mirror of
https://github.com/AetherDroid/android_kernel_samsung_on5xelte.git
synced 2025-09-08 17:18:05 -04:00
Fixed MTP to work with TWRP
This commit is contained in:
commit
f6dfaef42e
50820 changed files with 20846062 additions and 0 deletions
16
arch/blackfin/mach-common/Makefile
Normal file
16
arch/blackfin/mach-common/Makefile
Normal file
|
@ -0,0 +1,16 @@
|
|||
#
|
||||
# arch/blackfin/mach-common/Makefile
|
||||
#
|
||||
|
||||
obj-y := \
|
||||
cache.o cache-c.o entry.o head.o \
|
||||
interrupt.o arch_checks.o ints-priority.o
|
||||
|
||||
obj-$(CONFIG_PM) += pm.o
|
||||
ifneq ($(CONFIG_BF60x),y)
|
||||
obj-$(CONFIG_PM) += dpmc_modes.o
|
||||
endif
|
||||
obj-$(CONFIG_SCB_PRIORITY) += scb-init.o
|
||||
obj-$(CONFIG_CPU_VOLTAGE) += dpmc.o
|
||||
obj-$(CONFIG_SMP) += smp.o
|
||||
obj-$(CONFIG_BFIN_KERNEL_CLOCK) += clocks-init.o
|
66
arch/blackfin/mach-common/arch_checks.c
Normal file
66
arch/blackfin/mach-common/arch_checks.c
Normal file
|
@ -0,0 +1,66 @@
|
|||
/*
|
||||
* Do some checking to make sure things are OK
|
||||
*
|
||||
* Copyright 2007-2010 Analog Devices Inc.
|
||||
*
|
||||
* Licensed under the GPL-2 or later.
|
||||
*/
|
||||
|
||||
#include <asm/fixed_code.h>
|
||||
#include <mach/anomaly.h>
|
||||
#include <asm/clocks.h>
|
||||
|
||||
#ifdef CONFIG_BFIN_KERNEL_CLOCK
|
||||
|
||||
# if (CONFIG_VCO_HZ > CONFIG_MAX_VCO_HZ)
|
||||
# error "VCO selected is more than maximum value. Please change the VCO multipler"
|
||||
# endif
|
||||
|
||||
# if (CONFIG_SCLK_HZ > CONFIG_MAX_SCLK_HZ)
|
||||
# error "Sclk value selected is more than maximum. Please select a proper value for SCLK multiplier"
|
||||
# endif
|
||||
|
||||
# if (CONFIG_SCLK_HZ < CONFIG_MIN_SCLK_HZ)
|
||||
# error "Sclk value selected is less than minimum. Please select a proper value for SCLK multiplier"
|
||||
# endif
|
||||
|
||||
# if (ANOMALY_05000273) && (CONFIG_SCLK_HZ * 2 > CONFIG_CCLK_HZ)
|
||||
# error "ANOMALY 05000273, please make sure CCLK is at least 2x SCLK"
|
||||
# endif
|
||||
|
||||
# if (CONFIG_SCLK_HZ > CONFIG_CCLK_HZ) && (CONFIG_SCLK_HZ != CONFIG_CLKIN_HZ) && (CONFIG_CCLK_HZ != CONFIG_CLKIN_HZ)
|
||||
# error "Please select sclk less than cclk"
|
||||
# endif
|
||||
|
||||
#endif /* CONFIG_BFIN_KERNEL_CLOCK */
|
||||
|
||||
#if CONFIG_BOOT_LOAD < FIXED_CODE_END
|
||||
# error "The kernel load address must be after the fixed code section"
|
||||
#endif
|
||||
|
||||
#if (CONFIG_BOOT_LOAD & 0x3)
|
||||
# error "The kernel load address must be 4 byte aligned"
|
||||
#endif
|
||||
|
||||
/* The entire kernel must be able to make a 24bit pcrel call to start of L1 */
|
||||
#if ((0xffffffff - L1_CODE_START + 1) + CONFIG_BOOT_LOAD) > 0x1000000
|
||||
# error "The kernel load address is too high; keep it below 10meg for safety"
|
||||
#endif
|
||||
|
||||
#if ANOMALY_05000263 && defined(CONFIG_MPU)
|
||||
# error the MPU will not function safely while Anomaly 05000263 applies
|
||||
#endif
|
||||
|
||||
#if ANOMALY_05000448
|
||||
# error You are using a part with anomaly 05000448, this issue causes random memory read/write failures - that means random crashes.
|
||||
#endif
|
||||
|
||||
/* if 220 exists, can not set External Memory WB and L2 not_cached, either External Memory not_cached and L2 WB */
|
||||
#if ANOMALY_05000220 && \
|
||||
(defined(CONFIG_BFIN_EXTMEM_WRITEBACK) || defined(CONFIG_BFIN_L2_WRITEBACK))
|
||||
# error "Anomaly 05000220 does not allow you to use Write Back cache with L2 or External Memory"
|
||||
#endif
|
||||
|
||||
#if ANOMALY_05000491 && !defined(CONFIG_ICACHE_FLUSH_L1)
|
||||
# error You need IFLUSH in L1 inst while Anomaly 05000491 applies
|
||||
#endif
|
85
arch/blackfin/mach-common/cache-c.c
Normal file
85
arch/blackfin/mach-common/cache-c.c
Normal file
|
@ -0,0 +1,85 @@
|
|||
/*
|
||||
* Blackfin cache control code (simpler control-style functions)
|
||||
*
|
||||
* Copyright 2004-2009 Analog Devices Inc.
|
||||
*
|
||||
* Licensed under the GPL-2 or later.
|
||||
*/
|
||||
|
||||
#include <asm/blackfin.h>
|
||||
#include <asm/cplbinit.h>
|
||||
|
||||
/* Invalidate the Entire Data cache by
|
||||
* clearing DMC[1:0] bits
|
||||
*/
|
||||
void blackfin_invalidate_entire_dcache(void)
|
||||
{
|
||||
u32 dmem = bfin_read_DMEM_CONTROL();
|
||||
bfin_write_DMEM_CONTROL(dmem & ~0xc);
|
||||
SSYNC();
|
||||
bfin_write_DMEM_CONTROL(dmem);
|
||||
SSYNC();
|
||||
}
|
||||
|
||||
/* Invalidate the Entire Instruction cache by
|
||||
* clearing IMC bit
|
||||
*/
|
||||
void blackfin_invalidate_entire_icache(void)
|
||||
{
|
||||
u32 imem = bfin_read_IMEM_CONTROL();
|
||||
bfin_write_IMEM_CONTROL(imem & ~0x4);
|
||||
SSYNC();
|
||||
bfin_write_IMEM_CONTROL(imem);
|
||||
SSYNC();
|
||||
}
|
||||
|
||||
#if defined(CONFIG_BFIN_ICACHE) || defined(CONFIG_BFIN_DCACHE)
|
||||
|
||||
static void
|
||||
bfin_cache_init(struct cplb_entry *cplb_tbl, unsigned long cplb_addr,
|
||||
unsigned long cplb_data, unsigned long mem_control,
|
||||
unsigned long mem_mask)
|
||||
{
|
||||
int i;
|
||||
#ifdef CONFIG_L1_PARITY_CHECK
|
||||
u32 ctrl;
|
||||
|
||||
if (cplb_addr == DCPLB_ADDR0) {
|
||||
ctrl = bfin_read32(mem_control) | (1 << RDCHK);
|
||||
CSYNC();
|
||||
bfin_write32(mem_control, ctrl);
|
||||
SSYNC();
|
||||
}
|
||||
#endif
|
||||
|
||||
for (i = 0; i < MAX_CPLBS; i++) {
|
||||
bfin_write32(cplb_addr + i * 4, cplb_tbl[i].addr);
|
||||
bfin_write32(cplb_data + i * 4, cplb_tbl[i].data);
|
||||
}
|
||||
|
||||
_enable_cplb(mem_control, mem_mask);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_BFIN_ICACHE
|
||||
void bfin_icache_init(struct cplb_entry *icplb_tbl)
|
||||
{
|
||||
bfin_cache_init(icplb_tbl, ICPLB_ADDR0, ICPLB_DATA0, IMEM_CONTROL,
|
||||
(IMC | ENICPLB));
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_BFIN_DCACHE
|
||||
void bfin_dcache_init(struct cplb_entry *dcplb_tbl)
|
||||
{
|
||||
/*
|
||||
* Anomaly notes:
|
||||
* 05000287 - We implement workaround #2 - Change the DMEM_CONTROL
|
||||
* register, so that the port preferences for DAG0 and DAG1 are set
|
||||
* to port B
|
||||
*/
|
||||
bfin_cache_init(dcplb_tbl, DCPLB_ADDR0, DCPLB_DATA0, DMEM_CONTROL,
|
||||
(DMEM_CNTR | PORT_PREF0 | (ANOMALY_05000287 ? PORT_PREF1 : 0)));
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif
|
124
arch/blackfin/mach-common/cache.S
Normal file
124
arch/blackfin/mach-common/cache.S
Normal file
|
@ -0,0 +1,124 @@
|
|||
/*
|
||||
* Blackfin cache control code
|
||||
*
|
||||
* Copyright 2004-2008 Analog Devices Inc.
|
||||
*
|
||||
* Licensed under the GPL-2 or later.
|
||||
*/
|
||||
|
||||
#include <linux/linkage.h>
|
||||
#include <asm/blackfin.h>
|
||||
#include <asm/cache.h>
|
||||
#include <asm/page.h>
|
||||
|
||||
/* 05000443 - IFLUSH cannot be last instruction in hardware loop */
|
||||
#if ANOMALY_05000443
|
||||
# define BROK_FLUSH_INST "IFLUSH"
|
||||
#else
|
||||
# define BROK_FLUSH_INST "no anomaly! yeah!"
|
||||
#endif
|
||||
|
||||
/* Since all L1 caches work the same way, we use the same method for flushing
|
||||
* them. Only the actual flush instruction differs. We write this in asm as
|
||||
* GCC can be hard to coax into writing nice hardware loops.
|
||||
*
|
||||
* Also, we assume the following register setup:
|
||||
* R0 = start address
|
||||
* R1 = end address
|
||||
*/
|
||||
.macro do_flush flushins:req label
|
||||
|
||||
R2 = -L1_CACHE_BYTES;
|
||||
|
||||
/* start = (start & -L1_CACHE_BYTES) */
|
||||
R0 = R0 & R2;
|
||||
|
||||
/* end = ((end - 1) & -L1_CACHE_BYTES) + L1_CACHE_BYTES; */
|
||||
R1 += -1;
|
||||
R1 = R1 & R2;
|
||||
R1 += L1_CACHE_BYTES;
|
||||
|
||||
/* count = (end - start) >> L1_CACHE_SHIFT */
|
||||
R2 = R1 - R0;
|
||||
R2 >>= L1_CACHE_SHIFT;
|
||||
P1 = R2;
|
||||
|
||||
.ifnb \label
|
||||
\label :
|
||||
.endif
|
||||
P0 = R0;
|
||||
|
||||
LSETUP (1f, 2f) LC1 = P1;
|
||||
1:
|
||||
.ifeqs "\flushins", BROK_FLUSH_INST
|
||||
\flushins [P0++];
|
||||
nop;
|
||||
nop;
|
||||
2: nop;
|
||||
.else
|
||||
2: \flushins [P0++];
|
||||
.endif
|
||||
|
||||
RTS;
|
||||
.endm
|
||||
|
||||
#ifdef CONFIG_ICACHE_FLUSH_L1
|
||||
.section .l1.text
|
||||
#else
|
||||
.text
|
||||
#endif
|
||||
|
||||
/* Invalidate all instruction cache lines assocoiated with this memory area */
|
||||
#ifdef CONFIG_SMP
|
||||
# define _blackfin_icache_flush_range _blackfin_icache_flush_range_l1
|
||||
#endif
|
||||
ENTRY(_blackfin_icache_flush_range)
|
||||
do_flush IFLUSH
|
||||
ENDPROC(_blackfin_icache_flush_range)
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
.text
|
||||
# undef _blackfin_icache_flush_range
|
||||
ENTRY(_blackfin_icache_flush_range)
|
||||
p0.L = LO(DSPID);
|
||||
p0.H = HI(DSPID);
|
||||
r3 = [p0];
|
||||
r3 = r3.b (z);
|
||||
p2 = r3;
|
||||
p0.L = _blackfin_iflush_l1_entry;
|
||||
p0.H = _blackfin_iflush_l1_entry;
|
||||
p0 = p0 + (p2 << 2);
|
||||
p1 = [p0];
|
||||
jump (p1);
|
||||
ENDPROC(_blackfin_icache_flush_range)
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_DCACHE_FLUSH_L1
|
||||
.section .l1.text
|
||||
#else
|
||||
.text
|
||||
#endif
|
||||
|
||||
/* Throw away all D-cached data in specified region without any obligation to
|
||||
* write them back. Since the Blackfin ISA does not have an "invalidate"
|
||||
* instruction, we use flush/invalidate. Perhaps as a speed optimization we
|
||||
* could bang on the DTEST MMRs ...
|
||||
*/
|
||||
ENTRY(_blackfin_dcache_invalidate_range)
|
||||
do_flush FLUSHINV
|
||||
ENDPROC(_blackfin_dcache_invalidate_range)
|
||||
|
||||
/* Flush all data cache lines assocoiated with this memory area */
|
||||
ENTRY(_blackfin_dcache_flush_range)
|
||||
do_flush FLUSH, .Ldfr
|
||||
ENDPROC(_blackfin_dcache_flush_range)
|
||||
|
||||
/* Our headers convert the page structure to an address, so just need to flush
|
||||
* its contents like normal. We know the start address is page aligned (which
|
||||
* greater than our cache alignment), as is the end address. So just jump into
|
||||
* the middle of the dcache flush function.
|
||||
*/
|
||||
ENTRY(_blackfin_dflush_page)
|
||||
P1 = 1 << (PAGE_SHIFT - L1_CACHE_SHIFT);
|
||||
jump .Ldfr;
|
||||
ENDPROC(_blackfin_dflush_page)
|
27
arch/blackfin/mach-common/clock.h
Normal file
27
arch/blackfin/mach-common/clock.h
Normal file
|
@ -0,0 +1,27 @@
|
|||
#ifndef __MACH_COMMON_CLKDEV_H
|
||||
#define __MACH_COMMON_CLKDEV_H
|
||||
|
||||
#include <linux/clk.h>
|
||||
|
||||
struct clk_ops {
|
||||
unsigned long (*get_rate)(struct clk *clk);
|
||||
unsigned long (*round_rate)(struct clk *clk, unsigned long rate);
|
||||
int (*set_rate)(struct clk *clk, unsigned long rate);
|
||||
int (*enable)(struct clk *clk);
|
||||
int (*disable)(struct clk *clk);
|
||||
};
|
||||
|
||||
struct clk {
|
||||
const char *name;
|
||||
unsigned long rate;
|
||||
spinlock_t lock;
|
||||
u32 flags;
|
||||
const struct clk_ops *ops;
|
||||
const struct params *params;
|
||||
void __iomem *reg;
|
||||
u32 mask;
|
||||
u32 shift;
|
||||
};
|
||||
|
||||
#endif
|
||||
|
121
arch/blackfin/mach-common/clocks-init.c
Normal file
121
arch/blackfin/mach-common/clocks-init.c
Normal file
|
@ -0,0 +1,121 @@
|
|||
/*
|
||||
* arch/blackfin/mach-common/clocks-init.c - reprogram clocks / memory
|
||||
*
|
||||
* Copyright 2004-2008 Analog Devices Inc.
|
||||
*
|
||||
* Licensed under the GPL-2 or later.
|
||||
*/
|
||||
|
||||
#include <linux/linkage.h>
|
||||
#include <asm/blackfin.h>
|
||||
|
||||
#include <asm/dma.h>
|
||||
#include <asm/clocks.h>
|
||||
#include <asm/mem_init.h>
|
||||
#include <asm/dpmc.h>
|
||||
|
||||
#ifdef CONFIG_BF60x
|
||||
|
||||
#define CGU_CTL_VAL ((CONFIG_VCO_MULT << 8) | CLKIN_HALF)
|
||||
#define CGU_DIV_VAL \
|
||||
((CONFIG_CCLK_DIV << CSEL_OFFSET) | \
|
||||
(CONFIG_SCLK_DIV << SYSSEL_OFFSET) | \
|
||||
(CONFIG_SCLK0_DIV << S0SEL_OFFSET) | \
|
||||
(CONFIG_SCLK1_DIV << S1SEL_OFFSET) | \
|
||||
(CONFIG_DCLK_DIV << DSEL_OFFSET))
|
||||
|
||||
#define CONFIG_BFIN_DCLK (((CONFIG_CLKIN_HZ * CONFIG_VCO_MULT) / CONFIG_DCLK_DIV) / 1000000)
|
||||
#if ((CONFIG_BFIN_DCLK != 125) && \
|
||||
(CONFIG_BFIN_DCLK != 133) && (CONFIG_BFIN_DCLK != 150) && \
|
||||
(CONFIG_BFIN_DCLK != 166) && (CONFIG_BFIN_DCLK != 200) && \
|
||||
(CONFIG_BFIN_DCLK != 225) && (CONFIG_BFIN_DCLK != 250))
|
||||
#error "DCLK must be in (125, 133, 150, 166, 200, 225, 250)MHz"
|
||||
#endif
|
||||
|
||||
#else
|
||||
#define SDGCTL_WIDTH (1 << 31) /* SDRAM external data path width */
|
||||
#define PLL_CTL_VAL \
|
||||
(((CONFIG_VCO_MULT & 63) << 9) | CLKIN_HALF | \
|
||||
(PLL_BYPASS << 8) | (ANOMALY_05000305 ? 0 : 0x8000))
|
||||
#endif
|
||||
|
||||
__attribute__((l1_text))
|
||||
static void do_sync(void)
|
||||
{
|
||||
__builtin_bfin_ssync();
|
||||
}
|
||||
|
||||
__attribute__((l1_text))
|
||||
void init_clocks(void)
|
||||
{
|
||||
/* Kill any active DMAs as they may trigger external memory accesses
|
||||
* in the middle of reprogramming things, and that'll screw us up.
|
||||
* For example, any automatic DMAs left by U-Boot for splash screens.
|
||||
*/
|
||||
#ifdef CONFIG_BF60x
|
||||
init_cgu(CGU_DIV_VAL, CGU_CTL_VAL);
|
||||
init_dmc(CONFIG_BFIN_DCLK);
|
||||
#else
|
||||
size_t i;
|
||||
for (i = 0; i < MAX_DMA_CHANNELS; ++i) {
|
||||
struct dma_register *dma = dma_io_base_addr[i];
|
||||
dma->cfg = 0;
|
||||
}
|
||||
|
||||
do_sync();
|
||||
|
||||
#ifdef SIC_IWR0
|
||||
bfin_write_SIC_IWR0(IWR_ENABLE(0));
|
||||
# ifdef SIC_IWR1
|
||||
/* BF52x system reset does not properly reset SIC_IWR1 which
|
||||
* will screw up the bootrom as it relies on MDMA0/1 waking it
|
||||
* up from IDLE instructions. See this report for more info:
|
||||
* http://blackfin.uclinux.org/gf/tracker/4323
|
||||
*/
|
||||
if (ANOMALY_05000435)
|
||||
bfin_write_SIC_IWR1(IWR_ENABLE(10) | IWR_ENABLE(11));
|
||||
else
|
||||
bfin_write_SIC_IWR1(IWR_DISABLE_ALL);
|
||||
# endif
|
||||
# ifdef SIC_IWR2
|
||||
bfin_write_SIC_IWR2(IWR_DISABLE_ALL);
|
||||
# endif
|
||||
#else
|
||||
bfin_write_SIC_IWR(IWR_ENABLE(0));
|
||||
#endif
|
||||
do_sync();
|
||||
#ifdef EBIU_SDGCTL
|
||||
bfin_write_EBIU_SDGCTL(bfin_read_EBIU_SDGCTL() | SRFS);
|
||||
do_sync();
|
||||
#endif
|
||||
|
||||
#ifdef CLKBUFOE
|
||||
bfin_write16(VR_CTL, bfin_read_VR_CTL() | CLKBUFOE);
|
||||
do_sync();
|
||||
__asm__ __volatile__("IDLE;");
|
||||
#endif
|
||||
bfin_write_PLL_LOCKCNT(0x300);
|
||||
do_sync();
|
||||
/* We always write PLL_CTL thus avoiding Anomaly 05000242 */
|
||||
bfin_write16(PLL_CTL, PLL_CTL_VAL);
|
||||
__asm__ __volatile__("IDLE;");
|
||||
bfin_write_PLL_DIV(CONFIG_CCLK_ACT_DIV | CONFIG_SCLK_DIV);
|
||||
#ifdef EBIU_SDGCTL
|
||||
bfin_write_EBIU_SDRRC(mem_SDRRC);
|
||||
bfin_write_EBIU_SDGCTL((bfin_read_EBIU_SDGCTL() & SDGCTL_WIDTH) | mem_SDGCTL);
|
||||
#else
|
||||
bfin_write_EBIU_RSTCTL(bfin_read_EBIU_RSTCTL() & ~(SRREQ));
|
||||
do_sync();
|
||||
bfin_write_EBIU_RSTCTL(bfin_read_EBIU_RSTCTL() | 0x1);
|
||||
bfin_write_EBIU_DDRCTL0(mem_DDRCTL0);
|
||||
bfin_write_EBIU_DDRCTL1(mem_DDRCTL1);
|
||||
bfin_write_EBIU_DDRCTL2(mem_DDRCTL2);
|
||||
#ifdef CONFIG_MEM_EBIU_DDRQUE
|
||||
bfin_write_EBIU_DDRQUE(CONFIG_MEM_EBIU_DDRQUE);
|
||||
#endif
|
||||
#endif
|
||||
#endif
|
||||
do_sync();
|
||||
bfin_read16(0);
|
||||
|
||||
}
|
164
arch/blackfin/mach-common/dpmc.c
Normal file
164
arch/blackfin/mach-common/dpmc.c
Normal file
|
@ -0,0 +1,164 @@
|
|||
/*
|
||||
* Copyright 2008 Analog Devices Inc.
|
||||
*
|
||||
* Licensed under the GPL-2 or later.
|
||||
*/
|
||||
|
||||
#include <linux/cdev.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/cpufreq.h>
|
||||
|
||||
#include <asm/delay.h>
|
||||
#include <asm/dpmc.h>
|
||||
|
||||
#define DRIVER_NAME "bfin dpmc"
|
||||
|
||||
struct bfin_dpmc_platform_data *pdata;
|
||||
|
||||
/**
|
||||
* bfin_set_vlev - Update VLEV field in VR_CTL Reg.
|
||||
* Avoid BYPASS sequence
|
||||
*/
|
||||
static void bfin_set_vlev(unsigned int vlev)
|
||||
{
|
||||
unsigned pll_lcnt;
|
||||
|
||||
pll_lcnt = bfin_read_PLL_LOCKCNT();
|
||||
|
||||
bfin_write_PLL_LOCKCNT(1);
|
||||
bfin_write_VR_CTL((bfin_read_VR_CTL() & ~VLEV) | vlev);
|
||||
bfin_write_PLL_LOCKCNT(pll_lcnt);
|
||||
}
|
||||
|
||||
/**
|
||||
* bfin_get_vlev - Get CPU specific VLEV from platform device data
|
||||
*/
|
||||
static unsigned int bfin_get_vlev(unsigned int freq)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (!pdata)
|
||||
goto err_out;
|
||||
|
||||
freq >>= 16;
|
||||
|
||||
for (i = 0; i < pdata->tabsize; i++)
|
||||
if (freq <= (pdata->tuple_tab[i] & 0xFFFF))
|
||||
return pdata->tuple_tab[i] >> 16;
|
||||
|
||||
err_out:
|
||||
printk(KERN_WARNING "DPMC: No suitable CCLK VDDINT voltage pair found\n");
|
||||
return VLEV_120;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_CPU_FREQ
|
||||
# ifdef CONFIG_SMP
|
||||
static void bfin_idle_this_cpu(void *info)
|
||||
{
|
||||
unsigned long flags = 0;
|
||||
unsigned long iwr0, iwr1, iwr2;
|
||||
unsigned int cpu = smp_processor_id();
|
||||
|
||||
local_irq_save_hw(flags);
|
||||
bfin_iwr_set_sup0(&iwr0, &iwr1, &iwr2);
|
||||
|
||||
platform_clear_ipi(cpu, IRQ_SUPPLE_0);
|
||||
SSYNC();
|
||||
asm("IDLE;");
|
||||
bfin_iwr_restore(iwr0, iwr1, iwr2);
|
||||
|
||||
local_irq_restore_hw(flags);
|
||||
}
|
||||
|
||||
static void bfin_idle_cpu(void)
|
||||
{
|
||||
smp_call_function(bfin_idle_this_cpu, NULL, 0);
|
||||
}
|
||||
|
||||
static void bfin_wakeup_cpu(void)
|
||||
{
|
||||
unsigned int cpu;
|
||||
unsigned int this_cpu = smp_processor_id();
|
||||
cpumask_t mask;
|
||||
|
||||
cpumask_copy(&mask, cpu_online_mask);
|
||||
cpumask_clear_cpu(this_cpu, &mask);
|
||||
for_each_cpu(cpu, &mask)
|
||||
platform_send_ipi_cpu(cpu, IRQ_SUPPLE_0);
|
||||
}
|
||||
|
||||
# else
|
||||
static void bfin_idle_cpu(void) {}
|
||||
static void bfin_wakeup_cpu(void) {}
|
||||
# endif
|
||||
|
||||
static int
|
||||
vreg_cpufreq_notifier(struct notifier_block *nb, unsigned long val, void *data)
|
||||
{
|
||||
struct cpufreq_freqs *freq = data;
|
||||
|
||||
if (freq->cpu != CPUFREQ_CPU)
|
||||
return 0;
|
||||
|
||||
if (val == CPUFREQ_PRECHANGE && freq->old < freq->new) {
|
||||
bfin_idle_cpu();
|
||||
bfin_set_vlev(bfin_get_vlev(freq->new));
|
||||
udelay(pdata->vr_settling_time); /* Wait until Volatge settled */
|
||||
bfin_wakeup_cpu();
|
||||
} else if (val == CPUFREQ_POSTCHANGE && freq->old > freq->new) {
|
||||
bfin_idle_cpu();
|
||||
bfin_set_vlev(bfin_get_vlev(freq->new));
|
||||
bfin_wakeup_cpu();
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct notifier_block vreg_cpufreq_notifier_block = {
|
||||
.notifier_call = vreg_cpufreq_notifier
|
||||
};
|
||||
#endif /* CONFIG_CPU_FREQ */
|
||||
|
||||
/**
|
||||
* bfin_dpmc_probe -
|
||||
*
|
||||
*/
|
||||
static int bfin_dpmc_probe(struct platform_device *pdev)
|
||||
{
|
||||
if (pdev->dev.platform_data)
|
||||
pdata = pdev->dev.platform_data;
|
||||
else
|
||||
return -EINVAL;
|
||||
|
||||
return cpufreq_register_notifier(&vreg_cpufreq_notifier_block,
|
||||
CPUFREQ_TRANSITION_NOTIFIER);
|
||||
}
|
||||
|
||||
/**
|
||||
* bfin_dpmc_remove -
|
||||
*/
|
||||
static int bfin_dpmc_remove(struct platform_device *pdev)
|
||||
{
|
||||
pdata = NULL;
|
||||
return cpufreq_unregister_notifier(&vreg_cpufreq_notifier_block,
|
||||
CPUFREQ_TRANSITION_NOTIFIER);
|
||||
}
|
||||
|
||||
struct platform_driver bfin_dpmc_device_driver = {
|
||||
.probe = bfin_dpmc_probe,
|
||||
.remove = bfin_dpmc_remove,
|
||||
.driver = {
|
||||
.name = DRIVER_NAME,
|
||||
}
|
||||
};
|
||||
module_platform_driver(bfin_dpmc_device_driver);
|
||||
|
||||
MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
|
||||
MODULE_DESCRIPTION("cpu power management driver for Blackfin");
|
||||
MODULE_LICENSE("GPL");
|
320
arch/blackfin/mach-common/dpmc_modes.S
Normal file
320
arch/blackfin/mach-common/dpmc_modes.S
Normal file
|
@ -0,0 +1,320 @@
|
|||
/*
|
||||
* Copyright 2004-2008 Analog Devices Inc.
|
||||
*
|
||||
* Licensed under the GPL-2 or later.
|
||||
*/
|
||||
|
||||
#include <linux/linkage.h>
|
||||
#include <asm/blackfin.h>
|
||||
#include <mach/irq.h>
|
||||
#include <asm/dpmc.h>
|
||||
|
||||
.section .l1.text
|
||||
ENTRY(_sleep_mode)
|
||||
[--SP] = (R7:4, P5:3);
|
||||
[--SP] = RETS;
|
||||
|
||||
call _set_sic_iwr;
|
||||
|
||||
P0.H = hi(PLL_CTL);
|
||||
P0.L = lo(PLL_CTL);
|
||||
R1 = W[P0](z);
|
||||
BITSET (R1, 3);
|
||||
W[P0] = R1.L;
|
||||
|
||||
CLI R2;
|
||||
SSYNC;
|
||||
IDLE;
|
||||
STI R2;
|
||||
|
||||
call _test_pll_locked;
|
||||
|
||||
R0 = IWR_ENABLE(0);
|
||||
R1 = IWR_DISABLE_ALL;
|
||||
R2 = IWR_DISABLE_ALL;
|
||||
|
||||
call _set_sic_iwr;
|
||||
|
||||
P0.H = hi(PLL_CTL);
|
||||
P0.L = lo(PLL_CTL);
|
||||
R7 = w[p0](z);
|
||||
BITCLR (R7, 3);
|
||||
BITCLR (R7, 5);
|
||||
w[p0] = R7.L;
|
||||
IDLE;
|
||||
|
||||
bfin_init_pm_bench_cycles;
|
||||
|
||||
call _test_pll_locked;
|
||||
|
||||
RETS = [SP++];
|
||||
(R7:4, P5:3) = [SP++];
|
||||
RTS;
|
||||
ENDPROC(_sleep_mode)
|
||||
|
||||
/*
|
||||
* This func never returns as it puts the part into hibernate, and
|
||||
* is only called from do_hibernate, so we don't bother saving or
|
||||
* restoring any of the normal C runtime state. When we wake up,
|
||||
* the entry point will be in do_hibernate and not here.
|
||||
*
|
||||
* We accept just one argument -- the value to write to VR_CTL.
|
||||
*/
|
||||
|
||||
ENTRY(_hibernate_mode)
|
||||
/* Save/setup the regs we need early for minor pipeline optimization */
|
||||
R4 = R0;
|
||||
|
||||
P3.H = hi(VR_CTL);
|
||||
P3.L = lo(VR_CTL);
|
||||
/* Disable all wakeup sources */
|
||||
R0 = IWR_DISABLE_ALL;
|
||||
R1 = IWR_DISABLE_ALL;
|
||||
R2 = IWR_DISABLE_ALL;
|
||||
call _set_sic_iwr;
|
||||
call _set_dram_srfs;
|
||||
SSYNC;
|
||||
|
||||
/* Finally, we climb into our cave to hibernate */
|
||||
W[P3] = R4.L;
|
||||
|
||||
bfin_init_pm_bench_cycles;
|
||||
|
||||
CLI R2;
|
||||
IDLE;
|
||||
.Lforever:
|
||||
jump .Lforever;
|
||||
ENDPROC(_hibernate_mode)
|
||||
|
||||
ENTRY(_sleep_deeper)
|
||||
[--SP] = (R7:4, P5:3);
|
||||
[--SP] = RETS;
|
||||
|
||||
CLI R4;
|
||||
|
||||
P3 = R0;
|
||||
P4 = R1;
|
||||
P5 = R2;
|
||||
|
||||
R0 = IWR_ENABLE(0);
|
||||
R1 = IWR_DISABLE_ALL;
|
||||
R2 = IWR_DISABLE_ALL;
|
||||
|
||||
call _set_sic_iwr;
|
||||
call _set_dram_srfs; /* Set SDRAM Self Refresh */
|
||||
|
||||
P0.H = hi(PLL_DIV);
|
||||
P0.L = lo(PLL_DIV);
|
||||
R6 = W[P0](z);
|
||||
R0.L = 0xF;
|
||||
W[P0] = R0.l; /* Set Max VCO to SCLK divider */
|
||||
|
||||
P0.H = hi(PLL_CTL);
|
||||
P0.L = lo(PLL_CTL);
|
||||
R5 = W[P0](z);
|
||||
R0.L = (CONFIG_MIN_VCO_HZ/CONFIG_CLKIN_HZ) << 9;
|
||||
W[P0] = R0.l; /* Set Min CLKIN to VCO multiplier */
|
||||
|
||||
SSYNC;
|
||||
IDLE;
|
||||
|
||||
call _test_pll_locked;
|
||||
|
||||
P0.H = hi(VR_CTL);
|
||||
P0.L = lo(VR_CTL);
|
||||
R7 = W[P0](z);
|
||||
R1 = 0x6;
|
||||
R1 <<= 16;
|
||||
R2 = 0x0404(Z);
|
||||
R1 = R1|R2;
|
||||
|
||||
R2 = DEPOSIT(R7, R1);
|
||||
W[P0] = R2; /* Set Min Core Voltage */
|
||||
|
||||
SSYNC;
|
||||
IDLE;
|
||||
|
||||
call _test_pll_locked;
|
||||
|
||||
R0 = P3;
|
||||
R1 = P4;
|
||||
R3 = P5;
|
||||
call _set_sic_iwr; /* Set Awake from IDLE */
|
||||
|
||||
P0.H = hi(PLL_CTL);
|
||||
P0.L = lo(PLL_CTL);
|
||||
R0 = W[P0](z);
|
||||
BITSET (R0, 3);
|
||||
W[P0] = R0.L; /* Turn CCLK OFF */
|
||||
SSYNC;
|
||||
IDLE;
|
||||
|
||||
call _test_pll_locked;
|
||||
|
||||
R0 = IWR_ENABLE(0);
|
||||
R1 = IWR_DISABLE_ALL;
|
||||
R2 = IWR_DISABLE_ALL;
|
||||
|
||||
call _set_sic_iwr; /* Set Awake from IDLE PLL */
|
||||
|
||||
P0.H = hi(VR_CTL);
|
||||
P0.L = lo(VR_CTL);
|
||||
W[P0]= R7;
|
||||
|
||||
SSYNC;
|
||||
IDLE;
|
||||
|
||||
bfin_init_pm_bench_cycles;
|
||||
|
||||
call _test_pll_locked;
|
||||
|
||||
P0.H = hi(PLL_DIV);
|
||||
P0.L = lo(PLL_DIV);
|
||||
W[P0]= R6; /* Restore CCLK and SCLK divider */
|
||||
|
||||
P0.H = hi(PLL_CTL);
|
||||
P0.L = lo(PLL_CTL);
|
||||
w[p0] = R5; /* Restore VCO multiplier */
|
||||
IDLE;
|
||||
call _test_pll_locked;
|
||||
|
||||
call _unset_dram_srfs; /* SDRAM Self Refresh Off */
|
||||
|
||||
STI R4;
|
||||
|
||||
RETS = [SP++];
|
||||
(R7:4, P5:3) = [SP++];
|
||||
RTS;
|
||||
ENDPROC(_sleep_deeper)
|
||||
|
||||
ENTRY(_set_dram_srfs)
|
||||
/* set the dram to self refresh mode */
|
||||
SSYNC;
|
||||
#if defined(EBIU_RSTCTL) /* DDR */
|
||||
P0.H = hi(EBIU_RSTCTL);
|
||||
P0.L = lo(EBIU_RSTCTL);
|
||||
R2 = [P0];
|
||||
BITSET(R2, 3); /* SRREQ enter self-refresh mode */
|
||||
[P0] = R2;
|
||||
SSYNC;
|
||||
1:
|
||||
R2 = [P0];
|
||||
CC = BITTST(R2, 4);
|
||||
if !CC JUMP 1b;
|
||||
#else /* SDRAM */
|
||||
P0.L = lo(EBIU_SDGCTL);
|
||||
P0.H = hi(EBIU_SDGCTL);
|
||||
P1.L = lo(EBIU_SDSTAT);
|
||||
P1.H = hi(EBIU_SDSTAT);
|
||||
|
||||
R2 = [P0];
|
||||
BITSET(R2, 24); /* SRFS enter self-refresh mode */
|
||||
[P0] = R2;
|
||||
SSYNC;
|
||||
|
||||
1:
|
||||
R2 = w[P1];
|
||||
SSYNC;
|
||||
cc = BITTST(R2, 1); /* SDSRA poll self-refresh status */
|
||||
if !cc jump 1b;
|
||||
|
||||
R2 = [P0];
|
||||
BITCLR(R2, 0); /* SCTLE disable CLKOUT */
|
||||
[P0] = R2;
|
||||
#endif
|
||||
RTS;
|
||||
ENDPROC(_set_dram_srfs)
|
||||
|
||||
ENTRY(_unset_dram_srfs)
|
||||
/* set the dram out of self refresh mode */
|
||||
|
||||
#if defined(EBIU_RSTCTL) /* DDR */
|
||||
P0.H = hi(EBIU_RSTCTL);
|
||||
P0.L = lo(EBIU_RSTCTL);
|
||||
R2 = [P0];
|
||||
BITCLR(R2, 3); /* clear SRREQ bit */
|
||||
[P0] = R2;
|
||||
#elif defined(EBIU_SDGCTL) /* SDRAM */
|
||||
/* release CLKOUT from self-refresh */
|
||||
P0.L = lo(EBIU_SDGCTL);
|
||||
P0.H = hi(EBIU_SDGCTL);
|
||||
|
||||
R2 = [P0];
|
||||
BITSET(R2, 0); /* SCTLE enable CLKOUT */
|
||||
[P0] = R2
|
||||
SSYNC;
|
||||
|
||||
/* release SDRAM from self-refresh */
|
||||
R2 = [P0];
|
||||
BITCLR(R2, 24); /* clear SRFS bit */
|
||||
[P0] = R2
|
||||
#endif
|
||||
|
||||
SSYNC;
|
||||
RTS;
|
||||
ENDPROC(_unset_dram_srfs)
|
||||
|
||||
ENTRY(_set_sic_iwr)
|
||||
#ifdef SIC_IWR0
|
||||
P0.H = hi(SYSMMR_BASE);
|
||||
P0.L = lo(SYSMMR_BASE);
|
||||
[P0 + (SIC_IWR0 - SYSMMR_BASE)] = R0;
|
||||
[P0 + (SIC_IWR1 - SYSMMR_BASE)] = R1;
|
||||
# ifdef SIC_IWR2
|
||||
[P0 + (SIC_IWR2 - SYSMMR_BASE)] = R2;
|
||||
# endif
|
||||
#else
|
||||
P0.H = hi(SIC_IWR);
|
||||
P0.L = lo(SIC_IWR);
|
||||
[P0] = R0;
|
||||
#endif
|
||||
|
||||
SSYNC;
|
||||
RTS;
|
||||
ENDPROC(_set_sic_iwr)
|
||||
|
||||
ENTRY(_test_pll_locked)
|
||||
P0.H = hi(PLL_STAT);
|
||||
P0.L = lo(PLL_STAT);
|
||||
1:
|
||||
R0 = W[P0] (Z);
|
||||
CC = BITTST(R0,5);
|
||||
IF !CC JUMP 1b;
|
||||
RTS;
|
||||
ENDPROC(_test_pll_locked)
|
||||
|
||||
.section .text
|
||||
ENTRY(_do_hibernate)
|
||||
bfin_cpu_reg_save;
|
||||
bfin_sys_mmr_save;
|
||||
bfin_core_mmr_save;
|
||||
|
||||
/* Setup args to hibernate mode early for pipeline optimization */
|
||||
R0 = M3;
|
||||
P1.H = _hibernate_mode;
|
||||
P1.L = _hibernate_mode;
|
||||
|
||||
/* Save Magic, return address and Stack Pointer */
|
||||
P0 = 0;
|
||||
R1.H = 0xDEAD; /* Hibernate Magic */
|
||||
R1.L = 0xBEEF;
|
||||
R2.H = .Lpm_resume_here;
|
||||
R2.L = .Lpm_resume_here;
|
||||
[P0++] = R1; /* Store Hibernate Magic */
|
||||
[P0++] = R2; /* Save Return Address */
|
||||
[P0++] = SP; /* Save Stack Pointer */
|
||||
|
||||
/* Must use an indirect call as we need to jump to L1 */
|
||||
call (P1); /* Goodbye */
|
||||
|
||||
.Lpm_resume_here:
|
||||
|
||||
bfin_core_mmr_restore;
|
||||
bfin_sys_mmr_restore;
|
||||
bfin_cpu_reg_restore;
|
||||
|
||||
[--sp] = RETI; /* Clear Global Interrupt Disable */
|
||||
SP += 4;
|
||||
|
||||
RTS;
|
||||
ENDPROC(_do_hibernate)
|
1701
arch/blackfin/mach-common/entry.S
Normal file
1701
arch/blackfin/mach-common/entry.S
Normal file
File diff suppressed because it is too large
Load diff
229
arch/blackfin/mach-common/head.S
Normal file
229
arch/blackfin/mach-common/head.S
Normal file
|
@ -0,0 +1,229 @@
|
|||
/*
|
||||
* Common Blackfin startup code
|
||||
*
|
||||
* Copyright 2004-2008 Analog Devices Inc.
|
||||
*
|
||||
* Licensed under the GPL-2 or later.
|
||||
*/
|
||||
|
||||
#include <linux/linkage.h>
|
||||
#include <linux/init.h>
|
||||
#include <asm/blackfin.h>
|
||||
#include <asm/thread_info.h>
|
||||
#include <asm/trace.h>
|
||||
#include <asm/asm-offsets.h>
|
||||
|
||||
__INIT
|
||||
|
||||
ENTRY(__init_clear_bss)
|
||||
r2 = r2 - r1;
|
||||
cc = r2 == 0;
|
||||
if cc jump .L_bss_done;
|
||||
r2 >>= 2;
|
||||
p1 = r1;
|
||||
p2 = r2;
|
||||
lsetup (1f, 1f) lc0 = p2;
|
||||
1: [p1++] = r0;
|
||||
.L_bss_done:
|
||||
rts;
|
||||
ENDPROC(__init_clear_bss)
|
||||
|
||||
ENTRY(__start)
|
||||
/* R0: argument of command line string, passed from uboot, save it */
|
||||
R7 = R0;
|
||||
|
||||
/* Enable Cycle Counter and Nesting Of Interrupts */
|
||||
#ifdef CONFIG_BFIN_SCRATCH_REG_CYCLES
|
||||
R0 = SYSCFG_SNEN;
|
||||
#else
|
||||
R0 = SYSCFG_SNEN | SYSCFG_CCEN;
|
||||
#endif
|
||||
SYSCFG = R0;
|
||||
|
||||
/* Optimization register tricks: keep a base value in the
|
||||
* reserved P registers so we use the load/store with an
|
||||
* offset syntax. R0 = [P5 + <constant>];
|
||||
* P5 - core MMR base
|
||||
* R6 - 0
|
||||
*/
|
||||
r6 = 0;
|
||||
p5.l = 0;
|
||||
p5.h = hi(COREMMR_BASE);
|
||||
|
||||
/* Zero out registers required by Blackfin ABI */
|
||||
|
||||
/* Disable circular buffers */
|
||||
L0 = r6;
|
||||
L1 = r6;
|
||||
L2 = r6;
|
||||
L3 = r6;
|
||||
|
||||
/* Disable hardware loops in case we were started by 'go' */
|
||||
LC0 = r6;
|
||||
LC1 = r6;
|
||||
|
||||
/*
|
||||
* Clear ITEST_COMMAND and DTEST_COMMAND registers,
|
||||
* Leaving these as non-zero can confuse the emulator
|
||||
*/
|
||||
[p5 + (DTEST_COMMAND - COREMMR_BASE)] = r6;
|
||||
[p5 + (ITEST_COMMAND - COREMMR_BASE)] = r6;
|
||||
CSYNC;
|
||||
|
||||
trace_buffer_init(p0,r0);
|
||||
|
||||
/* Turn off the icache */
|
||||
r1 = [p5 + (IMEM_CONTROL - COREMMR_BASE)];
|
||||
BITCLR (r1, ENICPLB_P);
|
||||
[p5 + (IMEM_CONTROL - COREMMR_BASE)] = r1;
|
||||
SSYNC;
|
||||
|
||||
/* Turn off the dcache */
|
||||
r1 = [p5 + (DMEM_CONTROL - COREMMR_BASE)];
|
||||
BITCLR (r1, ENDCPLB_P);
|
||||
[p5 + (DMEM_CONTROL - COREMMR_BASE)] = r1;
|
||||
SSYNC;
|
||||
|
||||
/* in case of double faults, save a few things */
|
||||
p1.l = _initial_pda;
|
||||
p1.h = _initial_pda;
|
||||
r4 = RETX;
|
||||
#ifdef CONFIG_DEBUG_DOUBLEFAULT
|
||||
/* Only save these if we are storing them,
|
||||
* This happens here, since L1 gets clobbered
|
||||
* below
|
||||
*/
|
||||
GET_PDA(p0, r0);
|
||||
r0 = [p0 + PDA_DF_RETX];
|
||||
r1 = [p0 + PDA_DF_DCPLB];
|
||||
r2 = [p0 + PDA_DF_ICPLB];
|
||||
r3 = [p0 + PDA_DF_SEQSTAT];
|
||||
[p1 + PDA_INIT_DF_RETX] = r0;
|
||||
[p1 + PDA_INIT_DF_DCPLB] = r1;
|
||||
[p1 + PDA_INIT_DF_ICPLB] = r2;
|
||||
[p1 + PDA_INIT_DF_SEQSTAT] = r3;
|
||||
#endif
|
||||
[p1 + PDA_INIT_RETX] = r4;
|
||||
|
||||
/* Initialize stack pointer */
|
||||
sp.l = _init_thread_union + THREAD_SIZE;
|
||||
sp.h = _init_thread_union + THREAD_SIZE;
|
||||
fp = sp;
|
||||
usp = sp;
|
||||
|
||||
#ifdef CONFIG_EARLY_PRINTK
|
||||
call _init_early_exception_vectors;
|
||||
r0 = (EVT_IVHW | EVT_IRPTEN | EVT_EVX | EVT_NMI | EVT_RST | EVT_EMU);
|
||||
sti r0;
|
||||
#endif
|
||||
|
||||
r0 = r6;
|
||||
/* Zero out all of the fun bss regions */
|
||||
#if L1_DATA_A_LENGTH > 0
|
||||
r1.l = __sbss_l1;
|
||||
r1.h = __sbss_l1;
|
||||
r2.l = __ebss_l1;
|
||||
r2.h = __ebss_l1;
|
||||
call __init_clear_bss
|
||||
#endif
|
||||
#if L1_DATA_B_LENGTH > 0
|
||||
r1.l = __sbss_b_l1;
|
||||
r1.h = __sbss_b_l1;
|
||||
r2.l = __ebss_b_l1;
|
||||
r2.h = __ebss_b_l1;
|
||||
call __init_clear_bss
|
||||
#endif
|
||||
#if L2_LENGTH > 0
|
||||
r1.l = __sbss_l2;
|
||||
r1.h = __sbss_l2;
|
||||
r2.l = __ebss_l2;
|
||||
r2.h = __ebss_l2;
|
||||
call __init_clear_bss
|
||||
#endif
|
||||
r1.l = ___bss_start;
|
||||
r1.h = ___bss_start;
|
||||
r2.l = ___bss_stop;
|
||||
r2.h = ___bss_stop;
|
||||
call __init_clear_bss
|
||||
|
||||
/* Put The Code for PLL Programming and SDRAM Programming in L1 ISRAM */
|
||||
call _bfin_relocate_l1_mem;
|
||||
|
||||
#ifdef CONFIG_ROMKERNEL
|
||||
call _bfin_relocate_xip_data;
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_BFIN_KERNEL_CLOCK
|
||||
/* Only use on-chip scratch space for stack when absolutely required
|
||||
* to avoid Anomaly 05000227 ... we know the init_clocks() func only
|
||||
* uses L1 text and stack space and no other memory region.
|
||||
*/
|
||||
# define KERNEL_CLOCK_STACK (L1_SCRATCH_START + L1_SCRATCH_LENGTH - 12)
|
||||
sp.l = lo(KERNEL_CLOCK_STACK);
|
||||
sp.h = hi(KERNEL_CLOCK_STACK);
|
||||
call _init_clocks;
|
||||
sp = usp; /* usp hasn't been touched, so restore from there */
|
||||
#endif
|
||||
|
||||
/* This section keeps the processor in supervisor mode
|
||||
* during kernel boot. Switches to user mode at end of boot.
|
||||
* See page 3-9 of Hardware Reference manual for documentation.
|
||||
*/
|
||||
|
||||
/* EVT15 = _real_start */
|
||||
|
||||
p1.l = _real_start;
|
||||
p1.h = _real_start;
|
||||
[p5 + (EVT15 - COREMMR_BASE)] = p1;
|
||||
csync;
|
||||
|
||||
#ifdef CONFIG_EARLY_PRINTK
|
||||
r0 = (EVT_IVG15 | EVT_IVHW | EVT_IRPTEN | EVT_EVX | EVT_NMI | EVT_RST | EVT_EMU) (z);
|
||||
#else
|
||||
r0 = EVT_IVG15 (z);
|
||||
#endif
|
||||
sti r0;
|
||||
|
||||
raise 15;
|
||||
#ifdef CONFIG_EARLY_PRINTK
|
||||
p0.l = _early_trap;
|
||||
p0.h = _early_trap;
|
||||
#else
|
||||
p0.l = .LWAIT_HERE;
|
||||
p0.h = .LWAIT_HERE;
|
||||
#endif
|
||||
reti = p0;
|
||||
#if ANOMALY_05000281
|
||||
nop; nop; nop;
|
||||
#endif
|
||||
rti;
|
||||
|
||||
.LWAIT_HERE:
|
||||
jump .LWAIT_HERE;
|
||||
ENDPROC(__start)
|
||||
|
||||
/* A little BF561 glue ... */
|
||||
#ifndef WDOG_CTL
|
||||
# define WDOG_CTL WDOGA_CTL
|
||||
#endif
|
||||
|
||||
ENTRY(_real_start)
|
||||
/* Enable nested interrupts */
|
||||
[--sp] = reti;
|
||||
/* watchdog off for now */
|
||||
p0.l = lo(WDOG_CTL);
|
||||
p0.h = hi(WDOG_CTL);
|
||||
r0 = 0xAD6(z);
|
||||
w[p0] = r0;
|
||||
ssync;
|
||||
/* Pass the u-boot arguments to the global value command line */
|
||||
R0 = R7;
|
||||
call _cmdline_init;
|
||||
|
||||
sp += -12 + 4; /* +4 is for reti loading above */
|
||||
call _init_pda
|
||||
sp += 12;
|
||||
jump.l _start_kernel;
|
||||
ENDPROC(_real_start)
|
||||
|
||||
__FINIT
|
326
arch/blackfin/mach-common/interrupt.S
Normal file
326
arch/blackfin/mach-common/interrupt.S
Normal file
|
@ -0,0 +1,326 @@
|
|||
/*
|
||||
* Interrupt Entries
|
||||
*
|
||||
* Copyright 2005-2009 Analog Devices Inc.
|
||||
* D. Jeff Dionne <jeff@ryeham.ee.ryerson.ca>
|
||||
* Kenneth Albanowski <kjahds@kjahds.com>
|
||||
*
|
||||
* Licensed under the GPL-2 or later.
|
||||
*/
|
||||
|
||||
#include <asm/blackfin.h>
|
||||
#include <mach/irq.h>
|
||||
#include <linux/linkage.h>
|
||||
#include <asm/entry.h>
|
||||
#include <asm/asm-offsets.h>
|
||||
#include <asm/trace.h>
|
||||
#include <asm/traps.h>
|
||||
#include <asm/thread_info.h>
|
||||
|
||||
#include <asm/context.S>
|
||||
|
||||
.extern _ret_from_exception
|
||||
|
||||
#ifdef CONFIG_I_ENTRY_L1
|
||||
.section .l1.text
|
||||
#else
|
||||
.text
|
||||
#endif
|
||||
|
||||
.align 4 /* just in case */
|
||||
|
||||
/* Common interrupt entry code. First we do CLI, then push
|
||||
* RETI, to keep interrupts disabled, but to allow this state to be changed
|
||||
* by local_bh_enable.
|
||||
* R0 contains the interrupt number, while R1 may contain the value of IPEND,
|
||||
* or garbage if IPEND won't be needed by the ISR. */
|
||||
__common_int_entry:
|
||||
[--sp] = fp;
|
||||
[--sp] = usp;
|
||||
|
||||
[--sp] = i0;
|
||||
[--sp] = i1;
|
||||
[--sp] = i2;
|
||||
[--sp] = i3;
|
||||
|
||||
[--sp] = m0;
|
||||
[--sp] = m1;
|
||||
[--sp] = m2;
|
||||
[--sp] = m3;
|
||||
|
||||
[--sp] = l0;
|
||||
[--sp] = l1;
|
||||
[--sp] = l2;
|
||||
[--sp] = l3;
|
||||
|
||||
[--sp] = b0;
|
||||
[--sp] = b1;
|
||||
[--sp] = b2;
|
||||
[--sp] = b3;
|
||||
[--sp] = a0.x;
|
||||
[--sp] = a0.w;
|
||||
[--sp] = a1.x;
|
||||
[--sp] = a1.w;
|
||||
|
||||
[--sp] = LC0;
|
||||
[--sp] = LC1;
|
||||
[--sp] = LT0;
|
||||
[--sp] = LT1;
|
||||
[--sp] = LB0;
|
||||
[--sp] = LB1;
|
||||
|
||||
[--sp] = ASTAT;
|
||||
|
||||
[--sp] = r0; /* Skip reserved */
|
||||
[--sp] = RETS;
|
||||
r2 = RETI;
|
||||
[--sp] = r2;
|
||||
[--sp] = RETX;
|
||||
[--sp] = RETN;
|
||||
[--sp] = RETE;
|
||||
[--sp] = SEQSTAT;
|
||||
[--sp] = r1; /* IPEND - R1 may or may not be set up before jumping here. */
|
||||
|
||||
/* Switch to other method of keeping interrupts disabled. */
|
||||
#ifdef CONFIG_DEBUG_HWERR
|
||||
r1 = 0x3f;
|
||||
sti r1;
|
||||
#else
|
||||
cli r1;
|
||||
#endif
|
||||
#ifdef CONFIG_TRACE_IRQFLAGS
|
||||
[--sp] = r0;
|
||||
sp += -12;
|
||||
call _trace_hardirqs_off;
|
||||
sp += 12;
|
||||
r0 = [sp++];
|
||||
#endif
|
||||
[--sp] = RETI; /* orig_pc */
|
||||
/* Clear all L registers. */
|
||||
r1 = 0 (x);
|
||||
l0 = r1;
|
||||
l1 = r1;
|
||||
l2 = r1;
|
||||
l3 = r1;
|
||||
#ifdef CONFIG_FRAME_POINTER
|
||||
fp = 0;
|
||||
#endif
|
||||
|
||||
ANOMALY_283_315_WORKAROUND(p5, r7)
|
||||
|
||||
r1 = sp;
|
||||
SP += -12;
|
||||
#ifdef CONFIG_IPIPE
|
||||
call ___ipipe_grab_irq
|
||||
SP += 12;
|
||||
cc = r0 == 0;
|
||||
if cc jump .Lcommon_restore_context;
|
||||
#else /* CONFIG_IPIPE */
|
||||
|
||||
#ifdef CONFIG_PREEMPT
|
||||
r7 = sp;
|
||||
r4.l = lo(ALIGN_PAGE_MASK);
|
||||
r4.h = hi(ALIGN_PAGE_MASK);
|
||||
r7 = r7 & r4;
|
||||
p5 = r7;
|
||||
r7 = [p5 + TI_PREEMPT]; /* get preempt count */
|
||||
r7 += 1; /* increment it */
|
||||
[p5 + TI_PREEMPT] = r7;
|
||||
#endif
|
||||
pseudo_long_call _do_irq, p2;
|
||||
|
||||
#ifdef CONFIG_PREEMPT
|
||||
r7 += -1;
|
||||
[p5 + TI_PREEMPT] = r7; /* restore preempt count */
|
||||
#endif
|
||||
|
||||
SP += 12;
|
||||
#endif /* CONFIG_IPIPE */
|
||||
pseudo_long_call _return_from_int, p2;
|
||||
.Lcommon_restore_context:
|
||||
RESTORE_CONTEXT
|
||||
rti;
|
||||
|
||||
/* interrupt routine for ivhw - 5 */
|
||||
ENTRY(_evt_ivhw)
|
||||
/* In case a single action kicks off multiple memory transactions, (like
|
||||
* a cache line fetch, - this can cause multiple hardware errors, let's
|
||||
* catch them all. First - make sure all the actions are complete, and
|
||||
* the core sees the hardware errors.
|
||||
*/
|
||||
SSYNC;
|
||||
SSYNC;
|
||||
|
||||
SAVE_ALL_SYS
|
||||
#ifdef CONFIG_FRAME_POINTER
|
||||
fp = 0;
|
||||
#endif
|
||||
|
||||
ANOMALY_283_315_WORKAROUND(p5, r7)
|
||||
|
||||
/* Handle all stacked hardware errors
|
||||
* To make sure we don't hang forever, only do it 10 times
|
||||
*/
|
||||
R0 = 0;
|
||||
R2 = 10;
|
||||
1:
|
||||
P0.L = LO(ILAT);
|
||||
P0.H = HI(ILAT);
|
||||
R1 = [P0];
|
||||
CC = BITTST(R1, EVT_IVHW_P);
|
||||
IF ! CC JUMP 2f;
|
||||
/* OK a hardware error is pending - clear it */
|
||||
R1 = EVT_IVHW_P;
|
||||
[P0] = R1;
|
||||
R0 += 1;
|
||||
CC = R1 == R2;
|
||||
if CC JUMP 2f;
|
||||
JUMP 1b;
|
||||
2:
|
||||
# We are going to dump something out, so make sure we print IPEND properly
|
||||
p2.l = lo(IPEND);
|
||||
p2.h = hi(IPEND);
|
||||
r0 = [p2];
|
||||
[sp + PT_IPEND] = r0;
|
||||
|
||||
/* set the EXCAUSE to HWERR for trap_c */
|
||||
r0 = [sp + PT_SEQSTAT];
|
||||
R1.L = LO(VEC_HWERR);
|
||||
R1.H = HI(VEC_HWERR);
|
||||
R0 = R0 | R1;
|
||||
[sp + PT_SEQSTAT] = R0;
|
||||
|
||||
r0 = sp; /* stack frame pt_regs pointer argument ==> r0 */
|
||||
SP += -12;
|
||||
pseudo_long_call _trap_c, p5;
|
||||
SP += 12;
|
||||
|
||||
#ifdef EBIU_ERRMST
|
||||
/* make sure EBIU_ERRMST is clear */
|
||||
p0.l = LO(EBIU_ERRMST);
|
||||
p0.h = HI(EBIU_ERRMST);
|
||||
r0.l = (CORE_ERROR | CORE_MERROR);
|
||||
w[p0] = r0.l;
|
||||
#endif
|
||||
|
||||
pseudo_long_call _ret_from_exception, p2;
|
||||
|
||||
.Lcommon_restore_all_sys:
|
||||
RESTORE_ALL_SYS
|
||||
rti;
|
||||
ENDPROC(_evt_ivhw)
|
||||
|
||||
/* Interrupt routine for evt2 (NMI).
|
||||
* For inner circle type details, please see:
|
||||
* http://docs.blackfin.uclinux.org/doku.php?id=linux-kernel:nmi
|
||||
*/
|
||||
ENTRY(_evt_nmi)
|
||||
#ifndef CONFIG_NMI_WATCHDOG
|
||||
.weak _evt_nmi
|
||||
#else
|
||||
/* Not take account of CPLBs, this handler will not return */
|
||||
SAVE_ALL_SYS
|
||||
r0 = sp;
|
||||
r1 = retn;
|
||||
[sp + PT_PC] = r1;
|
||||
trace_buffer_save(p4,r5);
|
||||
|
||||
ANOMALY_283_315_WORKAROUND(p4, r5)
|
||||
|
||||
SP += -12;
|
||||
call _do_nmi;
|
||||
SP += 12;
|
||||
1:
|
||||
jump 1b;
|
||||
#endif
|
||||
rtn;
|
||||
ENDPROC(_evt_nmi)
|
||||
|
||||
/* interrupt routine for core timer - 6 */
|
||||
ENTRY(_evt_timer)
|
||||
TIMER_INTERRUPT_ENTRY(EVT_IVTMR_P)
|
||||
|
||||
/* interrupt routine for evt7 - 7 */
|
||||
ENTRY(_evt_evt7)
|
||||
INTERRUPT_ENTRY(EVT_IVG7_P)
|
||||
ENTRY(_evt_evt8)
|
||||
INTERRUPT_ENTRY(EVT_IVG8_P)
|
||||
ENTRY(_evt_evt9)
|
||||
INTERRUPT_ENTRY(EVT_IVG9_P)
|
||||
ENTRY(_evt_evt10)
|
||||
INTERRUPT_ENTRY(EVT_IVG10_P)
|
||||
ENTRY(_evt_evt11)
|
||||
INTERRUPT_ENTRY(EVT_IVG11_P)
|
||||
ENTRY(_evt_evt12)
|
||||
INTERRUPT_ENTRY(EVT_IVG12_P)
|
||||
ENTRY(_evt_evt13)
|
||||
INTERRUPT_ENTRY(EVT_IVG13_P)
|
||||
|
||||
|
||||
/* interrupt routine for system_call - 15 */
|
||||
ENTRY(_evt_system_call)
|
||||
SAVE_CONTEXT_SYSCALL
|
||||
#ifdef CONFIG_FRAME_POINTER
|
||||
fp = 0;
|
||||
#endif
|
||||
pseudo_long_call _system_call, p2;
|
||||
jump .Lcommon_restore_context;
|
||||
ENDPROC(_evt_system_call)
|
||||
|
||||
#ifdef CONFIG_IPIPE
|
||||
/*
|
||||
* __ipipe_call_irqtail: lowers the current priority level to EVT15
|
||||
* before running a user-defined routine, then raises the priority
|
||||
* level to EVT14 to prepare the caller for a normal interrupt
|
||||
* return through RTI.
|
||||
*
|
||||
* We currently use this feature in two occasions:
|
||||
*
|
||||
* - before branching to __ipipe_irq_tail_hook as requested by a high
|
||||
* priority domain after the pipeline delivered an interrupt,
|
||||
* e.g. such as Xenomai, in order to start its rescheduling
|
||||
* procedure, since we may not switch tasks when IRQ levels are
|
||||
* nested on the Blackfin, so we have to fake an interrupt return
|
||||
* so that we may reschedule immediately.
|
||||
*
|
||||
* - before branching to __ipipe_sync_root(), in order to play any interrupt
|
||||
* pending for the root domain (i.e. the Linux kernel). This lowers
|
||||
* the core priority level enough so that Linux IRQ handlers may
|
||||
* never delay interrupts handled by high priority domains; we defer
|
||||
* those handlers until this point instead. This is a substitute
|
||||
* to using a threaded interrupt model for the Linux kernel.
|
||||
*
|
||||
* r0: address of user-defined routine
|
||||
* context: caller must have preempted EVT15, hw interrupts must be off.
|
||||
*/
|
||||
ENTRY(___ipipe_call_irqtail)
|
||||
p0 = r0;
|
||||
r0.l = 1f;
|
||||
r0.h = 1f;
|
||||
reti = r0;
|
||||
rti;
|
||||
1:
|
||||
[--sp] = rets;
|
||||
[--sp] = ( r7:4, p5:3 );
|
||||
sp += -12;
|
||||
call (p0);
|
||||
sp += 12;
|
||||
( r7:4, p5:3 ) = [sp++];
|
||||
rets = [sp++];
|
||||
|
||||
#ifdef CONFIG_DEBUG_HWERR
|
||||
/* enable irq14 & hwerr interrupt, until we transition to _evt_evt14 */
|
||||
r0 = (EVT_IVG14 | EVT_IVHW | \
|
||||
EVT_IRPTEN | EVT_EVX | EVT_NMI | EVT_RST | EVT_EMU);
|
||||
#else
|
||||
/* Only enable irq14 interrupt, until we transition to _evt_evt14 */
|
||||
r0 = (EVT_IVG14 | \
|
||||
EVT_IRPTEN | EVT_EVX | EVT_NMI | EVT_RST | EVT_EMU);
|
||||
#endif
|
||||
sti r0;
|
||||
raise 14; /* Branches to _evt_evt14 */
|
||||
2:
|
||||
jump 2b; /* Likely paranoid. */
|
||||
ENDPROC(___ipipe_call_irqtail)
|
||||
|
||||
#endif /* CONFIG_IPIPE */
|
1373
arch/blackfin/mach-common/ints-priority.c
Normal file
1373
arch/blackfin/mach-common/ints-priority.c
Normal file
File diff suppressed because it is too large
Load diff
299
arch/blackfin/mach-common/pm.c
Normal file
299
arch/blackfin/mach-common/pm.c
Normal file
|
@ -0,0 +1,299 @@
|
|||
/*
|
||||
* Blackfin power management
|
||||
*
|
||||
* Copyright 2006-2009 Analog Devices Inc.
|
||||
*
|
||||
* Licensed under the GPL-2
|
||||
* based on arm/mach-omap/pm.c
|
||||
* Copyright 2001, Cliff Brake <cbrake@accelent.com> and others
|
||||
*/
|
||||
|
||||
#include <linux/suspend.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/proc_fs.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/irq.h>
|
||||
|
||||
#include <asm/cplb.h>
|
||||
#include <asm/gpio.h>
|
||||
#include <asm/dma.h>
|
||||
#include <asm/dpmc.h>
|
||||
#include <asm/pm.h>
|
||||
|
||||
#ifdef CONFIG_BF60x
|
||||
struct bfin_cpu_pm_fns *bfin_cpu_pm;
|
||||
#endif
|
||||
|
||||
void bfin_pm_suspend_standby_enter(void)
|
||||
{
|
||||
#if !BFIN_GPIO_PINT
|
||||
bfin_pm_standby_setup();
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_BF60x
|
||||
bfin_cpu_pm->enter(PM_SUSPEND_STANDBY);
|
||||
#else
|
||||
# ifdef CONFIG_PM_BFIN_SLEEP_DEEPER
|
||||
sleep_deeper(bfin_sic_iwr[0], bfin_sic_iwr[1], bfin_sic_iwr[2]);
|
||||
# else
|
||||
sleep_mode(bfin_sic_iwr[0], bfin_sic_iwr[1], bfin_sic_iwr[2]);
|
||||
# endif
|
||||
#endif
|
||||
|
||||
#if !BFIN_GPIO_PINT
|
||||
bfin_pm_standby_restore();
|
||||
#endif
|
||||
|
||||
#ifndef CONFIG_BF60x
|
||||
#ifdef SIC_IWR0
|
||||
bfin_write_SIC_IWR0(IWR_DISABLE_ALL);
|
||||
# ifdef SIC_IWR1
|
||||
/* BF52x system reset does not properly reset SIC_IWR1 which
|
||||
* will screw up the bootrom as it relies on MDMA0/1 waking it
|
||||
* up from IDLE instructions. See this report for more info:
|
||||
* http://blackfin.uclinux.org/gf/tracker/4323
|
||||
*/
|
||||
if (ANOMALY_05000435)
|
||||
bfin_write_SIC_IWR1(IWR_ENABLE(10) | IWR_ENABLE(11));
|
||||
else
|
||||
bfin_write_SIC_IWR1(IWR_DISABLE_ALL);
|
||||
# endif
|
||||
# ifdef SIC_IWR2
|
||||
bfin_write_SIC_IWR2(IWR_DISABLE_ALL);
|
||||
# endif
|
||||
#else
|
||||
bfin_write_SIC_IWR(IWR_DISABLE_ALL);
|
||||
#endif
|
||||
|
||||
#endif
|
||||
}
|
||||
|
||||
int bf53x_suspend_l1_mem(unsigned char *memptr)
|
||||
{
|
||||
dma_memcpy_nocache(memptr, (const void *) L1_CODE_START,
|
||||
L1_CODE_LENGTH);
|
||||
dma_memcpy_nocache(memptr + L1_CODE_LENGTH,
|
||||
(const void *) L1_DATA_A_START, L1_DATA_A_LENGTH);
|
||||
dma_memcpy_nocache(memptr + L1_CODE_LENGTH + L1_DATA_A_LENGTH,
|
||||
(const void *) L1_DATA_B_START, L1_DATA_B_LENGTH);
|
||||
memcpy(memptr + L1_CODE_LENGTH + L1_DATA_A_LENGTH +
|
||||
L1_DATA_B_LENGTH, (const void *) L1_SCRATCH_START,
|
||||
L1_SCRATCH_LENGTH);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int bf53x_resume_l1_mem(unsigned char *memptr)
|
||||
{
|
||||
dma_memcpy_nocache((void *) L1_CODE_START, memptr, L1_CODE_LENGTH);
|
||||
dma_memcpy_nocache((void *) L1_DATA_A_START, memptr + L1_CODE_LENGTH,
|
||||
L1_DATA_A_LENGTH);
|
||||
dma_memcpy_nocache((void *) L1_DATA_B_START, memptr + L1_CODE_LENGTH +
|
||||
L1_DATA_A_LENGTH, L1_DATA_B_LENGTH);
|
||||
memcpy((void *) L1_SCRATCH_START, memptr + L1_CODE_LENGTH +
|
||||
L1_DATA_A_LENGTH + L1_DATA_B_LENGTH, L1_SCRATCH_LENGTH);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
#if defined(CONFIG_BFIN_EXTMEM_WRITEBACK) || defined(CONFIG_BFIN_L2_WRITEBACK)
|
||||
# ifdef CONFIG_BF60x
|
||||
__attribute__((l1_text))
|
||||
# endif
|
||||
static void flushinv_all_dcache(void)
|
||||
{
|
||||
register u32 way, bank, subbank, set;
|
||||
register u32 status, addr;
|
||||
u32 dmem_ctl = bfin_read_DMEM_CONTROL();
|
||||
|
||||
for (bank = 0; bank < 2; ++bank) {
|
||||
if (!(dmem_ctl & (1 << (DMC1_P - bank))))
|
||||
continue;
|
||||
|
||||
for (way = 0; way < 2; ++way)
|
||||
for (subbank = 0; subbank < 4; ++subbank)
|
||||
for (set = 0; set < 64; ++set) {
|
||||
|
||||
bfin_write_DTEST_COMMAND(
|
||||
way << 26 |
|
||||
bank << 23 |
|
||||
subbank << 16 |
|
||||
set << 5
|
||||
);
|
||||
CSYNC();
|
||||
status = bfin_read_DTEST_DATA0();
|
||||
|
||||
/* only worry about valid/dirty entries */
|
||||
if ((status & 0x3) != 0x3)
|
||||
continue;
|
||||
|
||||
|
||||
/* construct the address using the tag */
|
||||
addr = (status & 0xFFFFC800) | (subbank << 12) | (set << 5);
|
||||
|
||||
/* flush it */
|
||||
__asm__ __volatile__("FLUSHINV[%0];" : : "a"(addr));
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
int bfin_pm_suspend_mem_enter(void)
|
||||
{
|
||||
int ret;
|
||||
#ifndef CONFIG_BF60x
|
||||
int wakeup;
|
||||
#endif
|
||||
|
||||
unsigned char *memptr = kmalloc(L1_CODE_LENGTH + L1_DATA_A_LENGTH
|
||||
+ L1_DATA_B_LENGTH + L1_SCRATCH_LENGTH,
|
||||
GFP_ATOMIC);
|
||||
|
||||
if (memptr == NULL) {
|
||||
panic("bf53x_suspend_l1_mem malloc failed");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
#ifndef CONFIG_BF60x
|
||||
wakeup = bfin_read_VR_CTL() & ~FREQ;
|
||||
wakeup |= SCKELOW;
|
||||
|
||||
#ifdef CONFIG_PM_BFIN_WAKE_PH6
|
||||
wakeup |= PHYWE;
|
||||
#endif
|
||||
#ifdef CONFIG_PM_BFIN_WAKE_GP
|
||||
wakeup |= GPWE;
|
||||
#endif
|
||||
#endif
|
||||
|
||||
ret = blackfin_dma_suspend();
|
||||
|
||||
if (ret) {
|
||||
kfree(memptr);
|
||||
return ret;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_GPIO_ADI
|
||||
bfin_gpio_pm_hibernate_suspend();
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_BFIN_EXTMEM_WRITEBACK) || defined(CONFIG_BFIN_L2_WRITEBACK)
|
||||
flushinv_all_dcache();
|
||||
#endif
|
||||
_disable_dcplb();
|
||||
_disable_icplb();
|
||||
bf53x_suspend_l1_mem(memptr);
|
||||
|
||||
#ifndef CONFIG_BF60x
|
||||
do_hibernate(wakeup | vr_wakeup); /* See you later! */
|
||||
#else
|
||||
bfin_cpu_pm->enter(PM_SUSPEND_MEM);
|
||||
#endif
|
||||
|
||||
bf53x_resume_l1_mem(memptr);
|
||||
|
||||
_enable_icplb();
|
||||
_enable_dcplb();
|
||||
|
||||
#ifdef CONFIG_GPIO_ADI
|
||||
bfin_gpio_pm_hibernate_restore();
|
||||
#endif
|
||||
blackfin_dma_resume();
|
||||
|
||||
kfree(memptr);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* bfin_pm_valid - Tell the PM core that we only support the standby sleep
|
||||
* state
|
||||
* @state: suspend state we're checking.
|
||||
*
|
||||
*/
|
||||
static int bfin_pm_valid(suspend_state_t state)
|
||||
{
|
||||
return (state == PM_SUSPEND_STANDBY
|
||||
#if !(defined(BF533_FAMILY) || defined(CONFIG_BF561))
|
||||
/*
|
||||
* On BF533/2/1:
|
||||
* If we enter Hibernate the SCKE Pin is driven Low,
|
||||
* so that the SDRAM enters Self Refresh Mode.
|
||||
* However when the reset sequence that follows hibernate
|
||||
* state is executed, SCKE is driven High, taking the
|
||||
* SDRAM out of Self Refresh.
|
||||
*
|
||||
* If you reconfigure and access the SDRAM "very quickly",
|
||||
* you are likely to avoid errors, otherwise the SDRAM
|
||||
* start losing its contents.
|
||||
* An external HW workaround is possible using logic gates.
|
||||
*/
|
||||
|| state == PM_SUSPEND_MEM
|
||||
#endif
|
||||
);
|
||||
}
|
||||
|
||||
/*
|
||||
* bfin_pm_enter - Actually enter a sleep state.
|
||||
* @state: State we're entering.
|
||||
*
|
||||
*/
|
||||
static int bfin_pm_enter(suspend_state_t state)
|
||||
{
|
||||
switch (state) {
|
||||
case PM_SUSPEND_STANDBY:
|
||||
bfin_pm_suspend_standby_enter();
|
||||
break;
|
||||
case PM_SUSPEND_MEM:
|
||||
bfin_pm_suspend_mem_enter();
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_BFIN_PM_WAKEUP_TIME_BENCH
|
||||
void bfin_pm_end(void)
|
||||
{
|
||||
u32 cycle, cycle2;
|
||||
u64 usec64;
|
||||
u32 usec;
|
||||
|
||||
__asm__ __volatile__ (
|
||||
"1: %0 = CYCLES2\n"
|
||||
"%1 = CYCLES\n"
|
||||
"%2 = CYCLES2\n"
|
||||
"CC = %2 == %0\n"
|
||||
"if ! CC jump 1b\n"
|
||||
: "=d,a" (cycle2), "=d,a" (cycle), "=d,a" (usec) : : "CC"
|
||||
);
|
||||
|
||||
usec64 = ((u64)cycle2 << 32) + cycle;
|
||||
do_div(usec64, get_cclk() / USEC_PER_SEC);
|
||||
usec = usec64;
|
||||
if (usec == 0)
|
||||
usec = 1;
|
||||
|
||||
pr_info("PM: resume of kernel completes after %ld msec %03ld usec\n",
|
||||
usec / USEC_PER_MSEC, usec % USEC_PER_MSEC);
|
||||
}
|
||||
#endif
|
||||
|
||||
static const struct platform_suspend_ops bfin_pm_ops = {
|
||||
.enter = bfin_pm_enter,
|
||||
.valid = bfin_pm_valid,
|
||||
#ifdef CONFIG_BFIN_PM_WAKEUP_TIME_BENCH
|
||||
.end = bfin_pm_end,
|
||||
#endif
|
||||
};
|
||||
|
||||
static int __init bfin_pm_init(void)
|
||||
{
|
||||
suspend_set_ops(&bfin_pm_ops);
|
||||
return 0;
|
||||
}
|
||||
|
||||
__initcall(bfin_pm_init);
|
52
arch/blackfin/mach-common/scb-init.c
Normal file
52
arch/blackfin/mach-common/scb-init.c
Normal file
|
@ -0,0 +1,52 @@
|
|||
/*
|
||||
* arch/blackfin/mach-common/scb-init.c - reprogram system cross bar priority
|
||||
*
|
||||
* Copyright 2012 Analog Devices Inc.
|
||||
*
|
||||
* Licensed under the GPL-2 or later.
|
||||
*/
|
||||
|
||||
#include <linux/errno.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <asm/scb.h>
|
||||
|
||||
__attribute__((l1_text))
|
||||
inline void scb_mi_write(unsigned long scb_mi_arbw, unsigned int slots,
|
||||
unsigned char *scb_mi_prio)
|
||||
{
|
||||
unsigned int i;
|
||||
|
||||
for (i = 0; i < slots; ++i)
|
||||
bfin_write32(scb_mi_arbw, (i << SCB_SLOT_OFFSET) | scb_mi_prio[i]);
|
||||
}
|
||||
|
||||
__attribute__((l1_text))
|
||||
inline void scb_mi_read(unsigned long scb_mi_arbw, unsigned int slots,
|
||||
unsigned char *scb_mi_prio)
|
||||
{
|
||||
unsigned int i;
|
||||
|
||||
for (i = 0; i < slots; ++i) {
|
||||
bfin_write32(scb_mi_arbw, (0xFF << SCB_SLOT_OFFSET) | i);
|
||||
scb_mi_prio[i] = bfin_read32(scb_mi_arbw);
|
||||
}
|
||||
}
|
||||
|
||||
__attribute__((l1_text))
|
||||
void init_scb(void)
|
||||
{
|
||||
unsigned int i, j;
|
||||
unsigned char scb_tmp_prio[32];
|
||||
|
||||
pr_info("Init System Crossbar\n");
|
||||
for (i = 0; scb_data[i].scb_mi_arbr > 0; ++i) {
|
||||
|
||||
scb_mi_write(scb_data[i].scb_mi_arbw, scb_data[i].scb_mi_slots, scb_data[i].scb_mi_prio);
|
||||
|
||||
pr_debug("scb priority at 0x%lx:\n", scb_data[i].scb_mi_arbr);
|
||||
scb_mi_read(scb_data[i].scb_mi_arbw, scb_data[i].scb_mi_slots, scb_tmp_prio);
|
||||
for (j = 0; j < scb_data[i].scb_mi_slots; ++j)
|
||||
pr_debug("slot %d = %d\n", j, scb_tmp_prio[j]);
|
||||
}
|
||||
|
||||
}
|
433
arch/blackfin/mach-common/smp.c
Normal file
433
arch/blackfin/mach-common/smp.c
Normal file
|
@ -0,0 +1,433 @@
|
|||
/*
|
||||
* IPI management based on arch/arm/kernel/smp.c (Copyright 2002 ARM Limited)
|
||||
*
|
||||
* Copyright 2007-2009 Analog Devices Inc.
|
||||
* Philippe Gerum <rpm@xenomai.org>
|
||||
*
|
||||
* Licensed under the GPL-2.
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/cache.h>
|
||||
#include <linux/clockchips.h>
|
||||
#include <linux/profile.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/cpu.h>
|
||||
#include <linux/smp.h>
|
||||
#include <linux/cpumask.h>
|
||||
#include <linux/seq_file.h>
|
||||
#include <linux/irq.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/atomic.h>
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/irq_handler.h>
|
||||
#include <asm/mmu_context.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/pgalloc.h>
|
||||
#include <asm/processor.h>
|
||||
#include <asm/ptrace.h>
|
||||
#include <asm/cpu.h>
|
||||
#include <asm/time.h>
|
||||
#include <linux/err.h>
|
||||
|
||||
/*
|
||||
* Anomaly notes:
|
||||
* 05000120 - we always define corelock as 32-bit integer in L2
|
||||
*/
|
||||
struct corelock_slot corelock __attribute__ ((__section__(".l2.bss")));
|
||||
|
||||
#ifdef CONFIG_ICACHE_FLUSH_L1
|
||||
unsigned long blackfin_iflush_l1_entry[NR_CPUS];
|
||||
#endif
|
||||
|
||||
struct blackfin_initial_pda initial_pda_coreb;
|
||||
|
||||
enum ipi_message_type {
|
||||
BFIN_IPI_NONE,
|
||||
BFIN_IPI_TIMER,
|
||||
BFIN_IPI_RESCHEDULE,
|
||||
BFIN_IPI_CALL_FUNC,
|
||||
BFIN_IPI_CPU_STOP,
|
||||
};
|
||||
|
||||
struct blackfin_flush_data {
|
||||
unsigned long start;
|
||||
unsigned long end;
|
||||
};
|
||||
|
||||
void *secondary_stack;
|
||||
|
||||
static struct blackfin_flush_data smp_flush_data;
|
||||
|
||||
static DEFINE_SPINLOCK(stop_lock);
|
||||
|
||||
/* A magic number - stress test shows this is safe for common cases */
|
||||
#define BFIN_IPI_MSGQ_LEN 5
|
||||
|
||||
/* Simple FIFO buffer, overflow leads to panic */
|
||||
struct ipi_data {
|
||||
atomic_t count;
|
||||
atomic_t bits;
|
||||
};
|
||||
|
||||
static DEFINE_PER_CPU(struct ipi_data, bfin_ipi);
|
||||
|
||||
static void ipi_cpu_stop(unsigned int cpu)
|
||||
{
|
||||
spin_lock(&stop_lock);
|
||||
printk(KERN_CRIT "CPU%u: stopping\n", cpu);
|
||||
dump_stack();
|
||||
spin_unlock(&stop_lock);
|
||||
|
||||
set_cpu_online(cpu, false);
|
||||
|
||||
local_irq_disable();
|
||||
|
||||
while (1)
|
||||
SSYNC();
|
||||
}
|
||||
|
||||
static void ipi_flush_icache(void *info)
|
||||
{
|
||||
struct blackfin_flush_data *fdata = info;
|
||||
|
||||
/* Invalidate the memory holding the bounds of the flushed region. */
|
||||
blackfin_dcache_invalidate_range((unsigned long)fdata,
|
||||
(unsigned long)fdata + sizeof(*fdata));
|
||||
|
||||
/* Make sure all write buffers in the data side of the core
|
||||
* are flushed before trying to invalidate the icache. This
|
||||
* needs to be after the data flush and before the icache
|
||||
* flush so that the SSYNC does the right thing in preventing
|
||||
* the instruction prefetcher from hitting things in cached
|
||||
* memory at the wrong time -- it runs much further ahead than
|
||||
* the pipeline.
|
||||
*/
|
||||
SSYNC();
|
||||
|
||||
/* ipi_flaush_icache is invoked by generic flush_icache_range,
|
||||
* so call blackfin arch icache flush directly here.
|
||||
*/
|
||||
blackfin_icache_flush_range(fdata->start, fdata->end);
|
||||
}
|
||||
|
||||
/* Use IRQ_SUPPLE_0 to request reschedule.
|
||||
* When returning from interrupt to user space,
|
||||
* there is chance to reschedule */
|
||||
static irqreturn_t ipi_handler_int0(int irq, void *dev_instance)
|
||||
{
|
||||
unsigned int cpu = smp_processor_id();
|
||||
|
||||
platform_clear_ipi(cpu, IRQ_SUPPLE_0);
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
DECLARE_PER_CPU(struct clock_event_device, coretmr_events);
|
||||
void ipi_timer(void)
|
||||
{
|
||||
int cpu = smp_processor_id();
|
||||
struct clock_event_device *evt = &per_cpu(coretmr_events, cpu);
|
||||
evt->event_handler(evt);
|
||||
}
|
||||
|
||||
static irqreturn_t ipi_handler_int1(int irq, void *dev_instance)
|
||||
{
|
||||
struct ipi_data *bfin_ipi_data;
|
||||
unsigned int cpu = smp_processor_id();
|
||||
unsigned long pending;
|
||||
unsigned long msg;
|
||||
|
||||
platform_clear_ipi(cpu, IRQ_SUPPLE_1);
|
||||
|
||||
smp_rmb();
|
||||
bfin_ipi_data = this_cpu_ptr(&bfin_ipi);
|
||||
while ((pending = atomic_xchg(&bfin_ipi_data->bits, 0)) != 0) {
|
||||
msg = 0;
|
||||
do {
|
||||
msg = find_next_bit(&pending, BITS_PER_LONG, msg + 1);
|
||||
switch (msg) {
|
||||
case BFIN_IPI_TIMER:
|
||||
ipi_timer();
|
||||
break;
|
||||
case BFIN_IPI_RESCHEDULE:
|
||||
scheduler_ipi();
|
||||
break;
|
||||
case BFIN_IPI_CALL_FUNC:
|
||||
generic_smp_call_function_interrupt();
|
||||
break;
|
||||
case BFIN_IPI_CPU_STOP:
|
||||
ipi_cpu_stop(cpu);
|
||||
break;
|
||||
default:
|
||||
goto out;
|
||||
}
|
||||
atomic_dec(&bfin_ipi_data->count);
|
||||
} while (msg < BITS_PER_LONG);
|
||||
|
||||
}
|
||||
out:
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
static void bfin_ipi_init(void)
|
||||
{
|
||||
unsigned int cpu;
|
||||
struct ipi_data *bfin_ipi_data;
|
||||
for_each_possible_cpu(cpu) {
|
||||
bfin_ipi_data = &per_cpu(bfin_ipi, cpu);
|
||||
atomic_set(&bfin_ipi_data->bits, 0);
|
||||
atomic_set(&bfin_ipi_data->count, 0);
|
||||
}
|
||||
}
|
||||
|
||||
void send_ipi(const struct cpumask *cpumask, enum ipi_message_type msg)
|
||||
{
|
||||
unsigned int cpu;
|
||||
struct ipi_data *bfin_ipi_data;
|
||||
unsigned long flags;
|
||||
|
||||
local_irq_save(flags);
|
||||
for_each_cpu(cpu, cpumask) {
|
||||
bfin_ipi_data = &per_cpu(bfin_ipi, cpu);
|
||||
atomic_set_mask((1 << msg), &bfin_ipi_data->bits);
|
||||
atomic_inc(&bfin_ipi_data->count);
|
||||
}
|
||||
local_irq_restore(flags);
|
||||
smp_wmb();
|
||||
for_each_cpu(cpu, cpumask)
|
||||
platform_send_ipi_cpu(cpu, IRQ_SUPPLE_1);
|
||||
}
|
||||
|
||||
void arch_send_call_function_single_ipi(int cpu)
|
||||
{
|
||||
send_ipi(cpumask_of(cpu), BFIN_IPI_CALL_FUNC);
|
||||
}
|
||||
|
||||
void arch_send_call_function_ipi_mask(const struct cpumask *mask)
|
||||
{
|
||||
send_ipi(mask, BFIN_IPI_CALL_FUNC);
|
||||
}
|
||||
|
||||
void smp_send_reschedule(int cpu)
|
||||
{
|
||||
send_ipi(cpumask_of(cpu), BFIN_IPI_RESCHEDULE);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
void smp_send_msg(const struct cpumask *mask, unsigned long type)
|
||||
{
|
||||
send_ipi(mask, type);
|
||||
}
|
||||
|
||||
void smp_timer_broadcast(const struct cpumask *mask)
|
||||
{
|
||||
smp_send_msg(mask, BFIN_IPI_TIMER);
|
||||
}
|
||||
|
||||
void smp_send_stop(void)
|
||||
{
|
||||
cpumask_t callmap;
|
||||
|
||||
preempt_disable();
|
||||
cpumask_copy(&callmap, cpu_online_mask);
|
||||
cpumask_clear_cpu(smp_processor_id(), &callmap);
|
||||
if (!cpumask_empty(&callmap))
|
||||
send_ipi(&callmap, BFIN_IPI_CPU_STOP);
|
||||
|
||||
preempt_enable();
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
int __cpu_up(unsigned int cpu, struct task_struct *idle)
|
||||
{
|
||||
int ret;
|
||||
|
||||
secondary_stack = task_stack_page(idle) + THREAD_SIZE;
|
||||
|
||||
ret = platform_boot_secondary(cpu, idle);
|
||||
|
||||
secondary_stack = NULL;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void setup_secondary(unsigned int cpu)
|
||||
{
|
||||
unsigned long ilat;
|
||||
|
||||
bfin_write_IMASK(0);
|
||||
CSYNC();
|
||||
ilat = bfin_read_ILAT();
|
||||
CSYNC();
|
||||
bfin_write_ILAT(ilat);
|
||||
CSYNC();
|
||||
|
||||
/* Enable interrupt levels IVG7-15. IARs have been already
|
||||
* programmed by the boot CPU. */
|
||||
bfin_irq_flags |= IMASK_IVG15 |
|
||||
IMASK_IVG14 | IMASK_IVG13 | IMASK_IVG12 | IMASK_IVG11 |
|
||||
IMASK_IVG10 | IMASK_IVG9 | IMASK_IVG8 | IMASK_IVG7 | IMASK_IVGHW;
|
||||
}
|
||||
|
||||
void secondary_start_kernel(void)
|
||||
{
|
||||
unsigned int cpu = smp_processor_id();
|
||||
struct mm_struct *mm = &init_mm;
|
||||
|
||||
if (_bfin_swrst & SWRST_DBL_FAULT_B) {
|
||||
printk(KERN_EMERG "CoreB Recovering from DOUBLE FAULT event\n");
|
||||
#ifdef CONFIG_DEBUG_DOUBLEFAULT
|
||||
printk(KERN_EMERG " While handling exception (EXCAUSE = %#x) at %pF\n",
|
||||
initial_pda_coreb.seqstat_doublefault & SEQSTAT_EXCAUSE,
|
||||
initial_pda_coreb.retx_doublefault);
|
||||
printk(KERN_NOTICE " DCPLB_FAULT_ADDR: %pF\n",
|
||||
initial_pda_coreb.dcplb_doublefault_addr);
|
||||
printk(KERN_NOTICE " ICPLB_FAULT_ADDR: %pF\n",
|
||||
initial_pda_coreb.icplb_doublefault_addr);
|
||||
#endif
|
||||
printk(KERN_NOTICE " The instruction at %pF caused a double exception\n",
|
||||
initial_pda_coreb.retx);
|
||||
}
|
||||
|
||||
/*
|
||||
* We want the D-cache to be enabled early, in case the atomic
|
||||
* support code emulates cache coherence (see
|
||||
* __ARCH_SYNC_CORE_DCACHE).
|
||||
*/
|
||||
init_exception_vectors();
|
||||
|
||||
local_irq_disable();
|
||||
|
||||
/* Attach the new idle task to the global mm. */
|
||||
atomic_inc(&mm->mm_users);
|
||||
atomic_inc(&mm->mm_count);
|
||||
current->active_mm = mm;
|
||||
|
||||
preempt_disable();
|
||||
|
||||
setup_secondary(cpu);
|
||||
|
||||
platform_secondary_init(cpu);
|
||||
/* setup local core timer */
|
||||
bfin_local_timer_setup();
|
||||
|
||||
local_irq_enable();
|
||||
|
||||
bfin_setup_caches(cpu);
|
||||
|
||||
notify_cpu_starting(cpu);
|
||||
/*
|
||||
* Calibrate loops per jiffy value.
|
||||
* IRQs need to be enabled here - D-cache can be invalidated
|
||||
* in timer irq handler, so core B can read correct jiffies.
|
||||
*/
|
||||
calibrate_delay();
|
||||
|
||||
/* We are done with local CPU inits, unblock the boot CPU. */
|
||||
set_cpu_online(cpu, true);
|
||||
cpu_startup_entry(CPUHP_ONLINE);
|
||||
}
|
||||
|
||||
void __init smp_prepare_boot_cpu(void)
|
||||
{
|
||||
}
|
||||
|
||||
void __init smp_prepare_cpus(unsigned int max_cpus)
|
||||
{
|
||||
platform_prepare_cpus(max_cpus);
|
||||
bfin_ipi_init();
|
||||
platform_request_ipi(IRQ_SUPPLE_0, ipi_handler_int0);
|
||||
platform_request_ipi(IRQ_SUPPLE_1, ipi_handler_int1);
|
||||
}
|
||||
|
||||
void __init smp_cpus_done(unsigned int max_cpus)
|
||||
{
|
||||
unsigned long bogosum = 0;
|
||||
unsigned int cpu;
|
||||
|
||||
for_each_online_cpu(cpu)
|
||||
bogosum += loops_per_jiffy;
|
||||
|
||||
printk(KERN_INFO "SMP: Total of %d processors activated "
|
||||
"(%lu.%02lu BogoMIPS).\n",
|
||||
num_online_cpus(),
|
||||
bogosum / (500000/HZ),
|
||||
(bogosum / (5000/HZ)) % 100);
|
||||
}
|
||||
|
||||
void smp_icache_flush_range_others(unsigned long start, unsigned long end)
|
||||
{
|
||||
smp_flush_data.start = start;
|
||||
smp_flush_data.end = end;
|
||||
|
||||
preempt_disable();
|
||||
if (smp_call_function(&ipi_flush_icache, &smp_flush_data, 1))
|
||||
printk(KERN_WARNING "SMP: failed to run I-cache flush request on other CPUs\n");
|
||||
preempt_enable();
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(smp_icache_flush_range_others);
|
||||
|
||||
#ifdef __ARCH_SYNC_CORE_ICACHE
|
||||
unsigned long icache_invld_count[NR_CPUS];
|
||||
void resync_core_icache(void)
|
||||
{
|
||||
unsigned int cpu = get_cpu();
|
||||
blackfin_invalidate_entire_icache();
|
||||
icache_invld_count[cpu]++;
|
||||
put_cpu();
|
||||
}
|
||||
EXPORT_SYMBOL(resync_core_icache);
|
||||
#endif
|
||||
|
||||
#ifdef __ARCH_SYNC_CORE_DCACHE
|
||||
unsigned long dcache_invld_count[NR_CPUS];
|
||||
unsigned long barrier_mask __attribute__ ((__section__(".l2.bss")));
|
||||
|
||||
void resync_core_dcache(void)
|
||||
{
|
||||
unsigned int cpu = get_cpu();
|
||||
blackfin_invalidate_entire_dcache();
|
||||
dcache_invld_count[cpu]++;
|
||||
put_cpu();
|
||||
}
|
||||
EXPORT_SYMBOL(resync_core_dcache);
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
int __cpu_disable(void)
|
||||
{
|
||||
unsigned int cpu = smp_processor_id();
|
||||
|
||||
if (cpu == 0)
|
||||
return -EPERM;
|
||||
|
||||
set_cpu_online(cpu, false);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static DECLARE_COMPLETION(cpu_killed);
|
||||
|
||||
int __cpu_die(unsigned int cpu)
|
||||
{
|
||||
return wait_for_completion_timeout(&cpu_killed, 5000);
|
||||
}
|
||||
|
||||
void cpu_die(void)
|
||||
{
|
||||
complete(&cpu_killed);
|
||||
|
||||
atomic_dec(&init_mm.mm_users);
|
||||
atomic_dec(&init_mm.mm_count);
|
||||
|
||||
local_irq_disable();
|
||||
platform_cpu_die();
|
||||
}
|
||||
#endif
|
Loading…
Add table
Add a link
Reference in a new issue