Fixed MTP to work with TWRP

This commit is contained in:
awab228 2018-06-19 23:16:04 +02:00
commit f6dfaef42e
50820 changed files with 20846062 additions and 0 deletions

View file

@ -0,0 +1,19 @@
extra-y := head.o vmlinux.lds
obj-$(CONFIG_SMP) += smp.o
obj-y += setup.o irq_cpu.o traps.o syscalltab.o signal.o time.o
obj-y += process.o trampoline.o reset.o ptrace.o vdso.o
obj-$(CONFIG_KGDB) += kgdb.o
obj-$(CONFIG_MODULES) += module.o hexagon_ksyms.o
# Modules required to work with the Hexagon Virtual Machine
obj-y += vm_entry.o vm_events.o vm_switch.o vm_ops.o vm_init_segtable.o
obj-y += vm_vectors.o
obj-$(CONFIG_HAS_DMA) += dma.o
obj-$(CONFIG_STACKTRACE) += stacktrace.o
obj-$(CONFIG_VGA_CONSOLE) += screen_info.o

View file

@ -0,0 +1,105 @@
/*
* Copyright (C) 1996 David S. Miller
* Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002, 2003 Ralf Baechle
* Copyright (C) 1999, 2000 Silicon Graphics, Inc.
* Kevin Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
* Copyright (C) 2000 MIPS Technologies, Inc.
*
* Copyright (c) 2010-2012, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
#include <linux/compat.h>
#include <linux/types.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
#include <linux/kbuild.h>
#include <asm/ptrace.h>
#include <asm/processor.h>
/* This file is used to produce asm/linkerscript constants from header
files typically used in c. Specifically, it generates asm-offsets.h */
int main(void)
{
COMMENT("This is a comment.");
/* might get these from somewhere else. */
DEFINE(_PAGE_SIZE, PAGE_SIZE);
DEFINE(_PAGE_SHIFT, PAGE_SHIFT);
BLANK();
COMMENT("Hexagon pt_regs definitions");
OFFSET(_PT_SYSCALL_NR, pt_regs, syscall_nr);
OFFSET(_PT_GPUGP, pt_regs, gpugp);
OFFSET(_PT_CS1CS0, pt_regs, cs1cs0);
OFFSET(_PT_R3130, pt_regs, r3130);
OFFSET(_PT_R2928, pt_regs, r2928);
OFFSET(_PT_R2726, pt_regs, r2726);
OFFSET(_PT_R2524, pt_regs, r2524);
OFFSET(_PT_R2322, pt_regs, r2322);
OFFSET(_PT_R2120, pt_regs, r2120);
OFFSET(_PT_R1918, pt_regs, r1918);
OFFSET(_PT_R1716, pt_regs, r1716);
OFFSET(_PT_R1514, pt_regs, r1514);
OFFSET(_PT_R1312, pt_regs, r1312);
OFFSET(_PT_R1110, pt_regs, r1110);
OFFSET(_PT_R0908, pt_regs, r0908);
OFFSET(_PT_R0706, pt_regs, r0706);
OFFSET(_PT_R0504, pt_regs, r0504);
OFFSET(_PT_R0302, pt_regs, r0302);
OFFSET(_PT_R0100, pt_regs, r0100);
OFFSET(_PT_LC0SA0, pt_regs, lc0sa0);
OFFSET(_PT_LC1SA1, pt_regs, lc1sa1);
OFFSET(_PT_M1M0, pt_regs, m1m0);
OFFSET(_PT_PREDSUSR, pt_regs, predsusr);
OFFSET(_PT_EVREC, pt_regs, hvmer);
OFFSET(_PT_ER_VMEL, pt_regs, hvmer.vmel);
OFFSET(_PT_ER_VMEST, pt_regs, hvmer.vmest);
OFFSET(_PT_ER_VMPSP, pt_regs, hvmer.vmpsp);
OFFSET(_PT_ER_VMBADVA, pt_regs, hvmer.vmbadva);
DEFINE(_PT_REGS_SIZE, sizeof(struct pt_regs));
BLANK();
COMMENT("Hexagon thread_info definitions");
OFFSET(_THREAD_INFO_FLAGS, thread_info, flags);
OFFSET(_THREAD_INFO_PT_REGS, thread_info, regs);
OFFSET(_THREAD_INFO_SP, thread_info, sp);
DEFINE(_THREAD_SIZE, THREAD_SIZE);
BLANK();
COMMENT("Hexagon hexagon_switch_stack definitions");
OFFSET(_SWITCH_R1716, hexagon_switch_stack, r1716);
OFFSET(_SWITCH_R1918, hexagon_switch_stack, r1918);
OFFSET(_SWITCH_R2120, hexagon_switch_stack, r2120);
OFFSET(_SWITCH_R2322, hexagon_switch_stack, r2322);
OFFSET(_SWITCH_R2524, hexagon_switch_stack, r2524);
OFFSET(_SWITCH_R2726, hexagon_switch_stack, r2726);
OFFSET(_SWITCH_FP, hexagon_switch_stack, fp);
OFFSET(_SWITCH_LR, hexagon_switch_stack, lr);
DEFINE(_SWITCH_STACK_SIZE, sizeof(struct hexagon_switch_stack));
BLANK();
COMMENT("Hexagon task_struct definitions");
OFFSET(_TASK_THREAD_INFO, task_struct, stack);
OFFSET(_TASK_STRUCT_THREAD, task_struct, thread);
COMMENT("Hexagon thread_struct definitions");
OFFSET(_THREAD_STRUCT_SWITCH_SP, thread_struct, switch_sp);
return 0;
}

229
arch/hexagon/kernel/dma.c Normal file
View file

@ -0,0 +1,229 @@
/*
* DMA implementation for Hexagon
*
* Copyright (c) 2010-2012, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
#include <linux/dma-mapping.h>
#include <linux/bootmem.h>
#include <linux/genalloc.h>
#include <asm/dma-mapping.h>
#include <linux/module.h>
#include <asm/page.h>
struct dma_map_ops *dma_ops;
EXPORT_SYMBOL(dma_ops);
int bad_dma_address; /* globals are automatically initialized to zero */
static inline void *dma_addr_to_virt(dma_addr_t dma_addr)
{
return phys_to_virt((unsigned long) dma_addr);
}
int dma_supported(struct device *dev, u64 mask)
{
if (mask == DMA_BIT_MASK(32))
return 1;
else
return 0;
}
EXPORT_SYMBOL(dma_supported);
int dma_set_mask(struct device *dev, u64 mask)
{
if (!dev->dma_mask || !dma_supported(dev, mask))
return -EIO;
*dev->dma_mask = mask;
return 0;
}
EXPORT_SYMBOL(dma_set_mask);
static struct gen_pool *coherent_pool;
/* Allocates from a pool of uncached memory that was reserved at boot time */
static void *hexagon_dma_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_addr, gfp_t flag,
struct dma_attrs *attrs)
{
void *ret;
/*
* Our max_low_pfn should have been backed off by 16MB in
* mm/init.c to create DMA coherent space. Use that as the VA
* for the pool.
*/
if (coherent_pool == NULL) {
coherent_pool = gen_pool_create(PAGE_SHIFT, -1);
if (coherent_pool == NULL)
panic("Can't create %s() memory pool!", __func__);
else
gen_pool_add(coherent_pool,
pfn_to_virt(max_low_pfn),
hexagon_coherent_pool_size, -1);
}
ret = (void *) gen_pool_alloc(coherent_pool, size);
if (ret) {
memset(ret, 0, size);
*dma_addr = (dma_addr_t) virt_to_phys(ret);
} else
*dma_addr = ~0;
return ret;
}
static void hexagon_free_coherent(struct device *dev, size_t size, void *vaddr,
dma_addr_t dma_addr, struct dma_attrs *attrs)
{
gen_pool_free(coherent_pool, (unsigned long) vaddr, size);
}
static int check_addr(const char *name, struct device *hwdev,
dma_addr_t bus, size_t size)
{
if (hwdev && hwdev->dma_mask && !dma_capable(hwdev, bus, size)) {
if (*hwdev->dma_mask >= DMA_BIT_MASK(32))
printk(KERN_ERR
"%s: overflow %Lx+%zu of device mask %Lx\n",
name, (long long)bus, size,
(long long)*hwdev->dma_mask);
return 0;
}
return 1;
}
static int hexagon_map_sg(struct device *hwdev, struct scatterlist *sg,
int nents, enum dma_data_direction dir,
struct dma_attrs *attrs)
{
struct scatterlist *s;
int i;
WARN_ON(nents == 0 || sg[0].length == 0);
for_each_sg(sg, s, nents, i) {
s->dma_address = sg_phys(s);
if (!check_addr("map_sg", hwdev, s->dma_address, s->length))
return 0;
s->dma_length = s->length;
flush_dcache_range(dma_addr_to_virt(s->dma_address),
dma_addr_to_virt(s->dma_address + s->length));
}
return nents;
}
/*
* address is virtual
*/
static inline void dma_sync(void *addr, size_t size,
enum dma_data_direction dir)
{
switch (dir) {
case DMA_TO_DEVICE:
hexagon_clean_dcache_range((unsigned long) addr,
(unsigned long) addr + size);
break;
case DMA_FROM_DEVICE:
hexagon_inv_dcache_range((unsigned long) addr,
(unsigned long) addr + size);
break;
case DMA_BIDIRECTIONAL:
flush_dcache_range((unsigned long) addr,
(unsigned long) addr + size);
break;
default:
BUG();
}
}
/**
* hexagon_map_page() - maps an address for device DMA
* @dev: pointer to DMA device
* @page: pointer to page struct of DMA memory
* @offset: offset within page
* @size: size of memory to map
* @dir: transfer direction
* @attrs: pointer to DMA attrs (not used)
*
* Called to map a memory address to a DMA address prior
* to accesses to/from device.
*
* We don't particularly have many hoops to jump through
* so far. Straight translation between phys and virtual.
*
* DMA is not cache coherent so sync is necessary; this
* seems to be a convenient place to do it.
*
*/
static dma_addr_t hexagon_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction dir,
struct dma_attrs *attrs)
{
dma_addr_t bus = page_to_phys(page) + offset;
WARN_ON(size == 0);
if (!check_addr("map_single", dev, bus, size))
return bad_dma_address;
dma_sync(dma_addr_to_virt(bus), size, dir);
return bus;
}
static void hexagon_sync_single_for_cpu(struct device *dev,
dma_addr_t dma_handle, size_t size,
enum dma_data_direction dir)
{
dma_sync(dma_addr_to_virt(dma_handle), size, dir);
}
static void hexagon_sync_single_for_device(struct device *dev,
dma_addr_t dma_handle, size_t size,
enum dma_data_direction dir)
{
dma_sync(dma_addr_to_virt(dma_handle), size, dir);
}
struct dma_map_ops hexagon_dma_ops = {
.alloc = hexagon_dma_alloc_coherent,
.free = hexagon_free_coherent,
.map_sg = hexagon_map_sg,
.map_page = hexagon_map_page,
.sync_single_for_cpu = hexagon_sync_single_for_cpu,
.sync_single_for_device = hexagon_sync_single_for_device,
.is_phys = 1,
};
void __init hexagon_dma_init(void)
{
if (dma_ops)
return;
dma_ops = &hexagon_dma_ops;
}

237
arch/hexagon/kernel/head.S Normal file
View file

@ -0,0 +1,237 @@
/*
* Early kernel startup code for Hexagon
*
* Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
*
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
#include <linux/linkage.h>
#include <linux/init.h>
#include <asm/asm-offsets.h>
#include <asm/mem-layout.h>
#include <asm/vm_mmu.h>
#include <asm/page.h>
#include <asm/hexagon_vm.h>
#define SEGTABLE_ENTRIES #0x0e0
__INIT
ENTRY(stext)
/*
* VMM will already have set up true vector page, MMU, etc.
* To set up initial kernel identity map, we have to pass
* the VMM a pointer to some canonical page tables. In
* this implementation, we're assuming that we've got
* them precompiled. Generate value in R24, as we'll need
* it again shortly.
*/
r24.L = #LO(swapper_pg_dir)
r24.H = #HI(swapper_pg_dir)
/*
* Symbol is kernel segment address, but we need
* the logical/physical address.
*/
r25 = pc;
r2.h = #0xffc0;
r2.l = #0x0000;
r25 = and(r2,r25); /* R25 holds PHYS_OFFSET now */
r1.h = #HI(PAGE_OFFSET);
r1.l = #LO(PAGE_OFFSET);
r24 = sub(r24,r1); /* swapper_pg_dir - PAGE_OFFSET */
r24 = add(r24,r25); /* + PHYS_OFFSET */
r0 = r24; /* aka __pa(swapper_pg_dir) */
/*
* Initialize page dir to make the virtual and physical
* addresses where the kernel was loaded be identical.
* Done in 4MB chunks.
*/
#define PTE_BITS ( __HVM_PTE_R | __HVM_PTE_W | __HVM_PTE_X \
| __HEXAGON_C_WB_L2 << 6 \
| __HVM_PDE_S_4MB)
/*
* Get number of VA=PA entries; only really needed for jump
* to hyperspace; gets blown away immediately after
*/
{
r1.l = #LO(_end);
r2.l = #LO(stext);
r3 = #1;
}
{
r1.h = #HI(_end);
r2.h = #HI(stext);
r3 = asl(r3, #22);
}
{
r1 = sub(r1, r2);
r3 = add(r3, #-1);
} /* r1 = _end - stext */
r1 = add(r1, r3); /* + (4M-1) */
r26 = lsr(r1, #22); /* / 4M = # of entries */
r1 = r25;
r2.h = #0xffc0;
r2.l = #0x0000; /* round back down to 4MB boundary */
r1 = and(r1,r2);
r2 = lsr(r1, #22) /* 4MB page number */
r2 = asl(r2, #2) /* times sizeof(PTE) (4bytes) */
r0 = add(r0,r2) /* r0 = address of correct PTE */
r2 = #PTE_BITS
r1 = add(r1,r2) /* r1 = 4MB PTE for the first entry */
r2.h = #0x0040
r2.l = #0x0000 /* 4MB increments */
loop0(1f,r26);
1:
memw(r0 ++ #4) = r1
{ r1 = add(r1, r2); } :endloop0
/* Also need to overwrite the initial 0xc0000000 entries */
/* PAGE_OFFSET >> (4MB shift - 4 bytes per entry shift) */
R1.H = #HI(PAGE_OFFSET >> (22 - 2))
R1.L = #LO(PAGE_OFFSET >> (22 - 2))
r0 = add(r1, r24); /* advance to 0xc0000000 entry */
r1 = r25;
r2.h = #0xffc0;
r2.l = #0x0000; /* round back down to 4MB boundary */
r1 = and(r1,r2); /* for huge page */
r2 = #PTE_BITS
r1 = add(r1,r2);
r2.h = #0x0040
r2.l = #0x0000 /* 4MB increments */
loop0(1f,SEGTABLE_ENTRIES);
1:
memw(r0 ++ #4) = r1;
{ r1 = add(r1,r2); } :endloop0
r0 = r24;
/*
* The subroutine wrapper around the virtual instruction touches
* no memory, so we should be able to use it even here.
* Note that in this version, R1 and R2 get "clobbered"; see
* vm_ops.S
*/
r1 = #VM_TRANS_TYPE_TABLE
call __vmnewmap;
/* Jump into virtual address range. */
r31.h = #hi(__head_s_vaddr_target)
r31.l = #lo(__head_s_vaddr_target)
jumpr r31
/* Insert trippy space effects. */
__head_s_vaddr_target:
/*
* Tear down VA=PA translation now that we are running
* in kernel virtual space.
*/
r0 = #__HVM_PDE_S_INVALID
r1.h = #0xffc0;
r1.l = #0x0000;
r2 = r25; /* phys_offset */
r2 = and(r1,r2);
r1.l = #lo(swapper_pg_dir)
r1.h = #hi(swapper_pg_dir)
r2 = lsr(r2, #22) /* 4MB page number */
r2 = asl(r2, #2) /* times sizeof(PTE) (4bytes) */
r1 = add(r1,r2);
loop0(1f,r26)
1:
{
memw(R1 ++ #4) = R0
}:endloop0
r0 = r24
r1 = #VM_TRANS_TYPE_TABLE
call __vmnewmap
/* Go ahead and install the trap0 return so angel calls work */
r0.h = #hi(_K_provisional_vec)
r0.l = #lo(_K_provisional_vec)
call __vmsetvec
/*
* OK, at this point we should start to be much more careful,
* we're going to enter C code and start touching memory
* in all sorts of places.
* This means:
* SGP needs to be OK
* Need to lock shared resources
* A bunch of other things that will cause
* all kinds of painful bugs
*/
/*
* Stack pointer should be pointed at the init task's
* thread stack, which should have been declared in arch/init_task.c.
* So uhhhhh...
* It's accessible via the init_thread_union, which is a union
* of a thread_info struct and a stack; of course, the top
* of the stack is not for you. The end of the stack
* is simply init_thread_union + THREAD_SIZE.
*/
{r29.H = #HI(init_thread_union); r0.H = #HI(_THREAD_SIZE); }
{r29.L = #LO(init_thread_union); r0.L = #LO(_THREAD_SIZE); }
/* initialize the register used to point to current_thread_info */
/* Fixme: THREADINFO_REG can't be R2 because of that memset thing. */
{r29 = add(r29,r0); THREADINFO_REG = r29; }
/* Hack: zero bss; */
{ r0.L = #LO(__bss_start); r1 = #0; r2.l = #LO(__bss_stop); }
{ r0.H = #HI(__bss_start); r2.h = #HI(__bss_stop); }
r2 = sub(r2,r0);
call memset;
/* Set PHYS_OFFSET; should be in R25 */
#ifdef CONFIG_HEXAGON_PHYS_OFFSET
r0.l = #LO(__phys_offset);
r0.h = #HI(__phys_offset);
memw(r0) = r25;
#endif
/* Time to make the doughnuts. */
call start_kernel
/*
* Should not reach here.
*/
1:
jump 1b
.p2align PAGE_SHIFT
ENTRY(external_cmdline_buffer)
.fill _PAGE_SIZE,1,0
.data
.p2align PAGE_SHIFT
ENTRY(empty_zero_page)
.fill _PAGE_SIZE,1,0

View file

@ -0,0 +1,56 @@
/*
* Export of symbols defined in assembly files and/or libgcc.
*
* Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
#include <linux/dma-mapping.h>
#include <asm/hexagon_vm.h>
#include <asm/io.h>
#include <asm/uaccess.h>
/* Additional functions */
EXPORT_SYMBOL(__clear_user_hexagon);
EXPORT_SYMBOL(__copy_from_user_hexagon);
EXPORT_SYMBOL(__copy_to_user_hexagon);
EXPORT_SYMBOL(__iounmap);
EXPORT_SYMBOL(__strnlen_user);
EXPORT_SYMBOL(__vmgetie);
EXPORT_SYMBOL(__vmsetie);
EXPORT_SYMBOL(__vmyield);
EXPORT_SYMBOL(empty_zero_page);
EXPORT_SYMBOL(ioremap_nocache);
EXPORT_SYMBOL(memcpy);
EXPORT_SYMBOL(memset);
/* Additional variables */
EXPORT_SYMBOL(__phys_offset);
EXPORT_SYMBOL(_dflt_cache_att);
EXPORT_SYMBOL(bad_dma_address);
#define DECLARE_EXPORT(name) \
extern void name(void); EXPORT_SYMBOL(name)
/* Symbols found in libgcc that assorted kernel modules need */
DECLARE_EXPORT(__hexagon_memcpy_likely_aligned_min32bytes_mult8bytes);
/* Additional functions */
DECLARE_EXPORT(__divsi3);
DECLARE_EXPORT(__modsi3);
DECLARE_EXPORT(__udivsi3);
DECLARE_EXPORT(__umodsi3);
DECLARE_EXPORT(csum_tcpudp_magic);

View file

@ -0,0 +1,90 @@
/*
* First-level interrupt controller model for Hexagon.
*
* Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
#include <linux/interrupt.h>
#include <asm/irq.h>
#include <asm/hexagon_vm.h>
static void mask_irq(struct irq_data *data)
{
__vmintop_locdis((long) data->irq);
}
static void mask_irq_num(unsigned int irq)
{
__vmintop_locdis((long) irq);
}
static void unmask_irq(struct irq_data *data)
{
__vmintop_locen((long) data->irq);
}
/* This is actually all we need for handle_fasteoi_irq */
static void eoi_irq(struct irq_data *data)
{
__vmintop_globen((long) data->irq);
}
/* Power mamangement wake call. We don't need this, however,
* if this is absent, then an -ENXIO error is returned to the
* msm_serial driver, and it fails to correctly initialize.
* This is a bug in the msm_serial driver, but, for now, we
* work around it here, by providing this bogus handler.
* XXX FIXME!!! remove this when msm_serial is fixed.
*/
static int set_wake(struct irq_data *data, unsigned int on)
{
return 0;
}
static struct irq_chip hexagon_irq_chip = {
.name = "HEXAGON",
.irq_mask = mask_irq,
.irq_unmask = unmask_irq,
.irq_set_wake = set_wake,
.irq_eoi = eoi_irq
};
/**
* The hexagon core comes with a first-level interrupt controller
* with 32 total possible interrupts. When the core is embedded
* into different systems/platforms, it is typically wrapped by
* macro cells that provide one or more second-level interrupt
* controllers that are cascaded into one or more of the first-level
* interrupts handled here. The precise wiring of these other
* irqs varies from platform to platform, and are set up & configured
* in the platform-specific files.
*
* The first-level interrupt controller is wrapped by the VM, which
* virtualizes the interrupt controller for us. It provides a very
* simple, fast & efficient API, and so the fasteoi handler is
* appropriate for this case.
*/
void __init init_IRQ(void)
{
int irq;
for (irq = 0; irq < HEXAGON_CPUINTS; irq++) {
mask_irq_num(irq);
irq_set_chip_and_handler(irq, &hexagon_irq_chip,
handle_fasteoi_irq);
}
}

258
arch/hexagon/kernel/kgdb.c Normal file
View file

@ -0,0 +1,258 @@
/*
* arch/hexagon/kernel/kgdb.c - Hexagon KGDB Support
*
* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
#include <linux/irq.h>
#include <linux/sched.h>
#include <linux/kdebug.h>
#include <linux/kgdb.h>
/* All registers are 4 bytes, for now */
#define GDB_SIZEOF_REG 4
/* The register names are used during printing of the regs;
* Keep these at three letters to pretty-print. */
struct dbg_reg_def_t dbg_reg_def[DBG_MAX_REG_NUM] = {
{ " r0", GDB_SIZEOF_REG, offsetof(struct pt_regs, r00)},
{ " r1", GDB_SIZEOF_REG, offsetof(struct pt_regs, r01)},
{ " r2", GDB_SIZEOF_REG, offsetof(struct pt_regs, r02)},
{ " r3", GDB_SIZEOF_REG, offsetof(struct pt_regs, r03)},
{ " r4", GDB_SIZEOF_REG, offsetof(struct pt_regs, r04)},
{ " r5", GDB_SIZEOF_REG, offsetof(struct pt_regs, r05)},
{ " r6", GDB_SIZEOF_REG, offsetof(struct pt_regs, r06)},
{ " r7", GDB_SIZEOF_REG, offsetof(struct pt_regs, r07)},
{ " r8", GDB_SIZEOF_REG, offsetof(struct pt_regs, r08)},
{ " r9", GDB_SIZEOF_REG, offsetof(struct pt_regs, r09)},
{ "r10", GDB_SIZEOF_REG, offsetof(struct pt_regs, r10)},
{ "r11", GDB_SIZEOF_REG, offsetof(struct pt_regs, r11)},
{ "r12", GDB_SIZEOF_REG, offsetof(struct pt_regs, r12)},
{ "r13", GDB_SIZEOF_REG, offsetof(struct pt_regs, r13)},
{ "r14", GDB_SIZEOF_REG, offsetof(struct pt_regs, r14)},
{ "r15", GDB_SIZEOF_REG, offsetof(struct pt_regs, r15)},
{ "r16", GDB_SIZEOF_REG, offsetof(struct pt_regs, r16)},
{ "r17", GDB_SIZEOF_REG, offsetof(struct pt_regs, r17)},
{ "r18", GDB_SIZEOF_REG, offsetof(struct pt_regs, r18)},
{ "r19", GDB_SIZEOF_REG, offsetof(struct pt_regs, r19)},
{ "r20", GDB_SIZEOF_REG, offsetof(struct pt_regs, r20)},
{ "r21", GDB_SIZEOF_REG, offsetof(struct pt_regs, r21)},
{ "r22", GDB_SIZEOF_REG, offsetof(struct pt_regs, r22)},
{ "r23", GDB_SIZEOF_REG, offsetof(struct pt_regs, r23)},
{ "r24", GDB_SIZEOF_REG, offsetof(struct pt_regs, r24)},
{ "r25", GDB_SIZEOF_REG, offsetof(struct pt_regs, r25)},
{ "r26", GDB_SIZEOF_REG, offsetof(struct pt_regs, r26)},
{ "r27", GDB_SIZEOF_REG, offsetof(struct pt_regs, r27)},
{ "r28", GDB_SIZEOF_REG, offsetof(struct pt_regs, r28)},
{ "r29", GDB_SIZEOF_REG, offsetof(struct pt_regs, r29)},
{ "r30", GDB_SIZEOF_REG, offsetof(struct pt_regs, r30)},
{ "r31", GDB_SIZEOF_REG, offsetof(struct pt_regs, r31)},
{ "usr", GDB_SIZEOF_REG, offsetof(struct pt_regs, usr)},
{ "preds", GDB_SIZEOF_REG, offsetof(struct pt_regs, preds)},
{ " m0", GDB_SIZEOF_REG, offsetof(struct pt_regs, m0)},
{ " m1", GDB_SIZEOF_REG, offsetof(struct pt_regs, m1)},
{ "sa0", GDB_SIZEOF_REG, offsetof(struct pt_regs, sa0)},
{ "sa1", GDB_SIZEOF_REG, offsetof(struct pt_regs, sa1)},
{ "lc0", GDB_SIZEOF_REG, offsetof(struct pt_regs, lc0)},
{ "lc1", GDB_SIZEOF_REG, offsetof(struct pt_regs, lc1)},
{ " gp", GDB_SIZEOF_REG, offsetof(struct pt_regs, gp)},
{ "ugp", GDB_SIZEOF_REG, offsetof(struct pt_regs, ugp)},
{ "cs0", GDB_SIZEOF_REG, offsetof(struct pt_regs, cs0)},
{ "cs1", GDB_SIZEOF_REG, offsetof(struct pt_regs, cs1)},
{ "psp", GDB_SIZEOF_REG, offsetof(struct pt_regs, hvmer.vmpsp)},
{ "elr", GDB_SIZEOF_REG, offsetof(struct pt_regs, hvmer.vmel)},
{ "est", GDB_SIZEOF_REG, offsetof(struct pt_regs, hvmer.vmest)},
{ "badva", GDB_SIZEOF_REG, offsetof(struct pt_regs, hvmer.vmbadva)},
{ "restart_r0", GDB_SIZEOF_REG, offsetof(struct pt_regs, restart_r0)},
{ "syscall_nr", GDB_SIZEOF_REG, offsetof(struct pt_regs, syscall_nr)},
};
struct kgdb_arch arch_kgdb_ops = {
/* trap0(#0xDB) 0x0cdb0054 */
.gdb_bpt_instr = {0x54, 0x00, 0xdb, 0x0c},
};
char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs)
{
if (regno >= DBG_MAX_REG_NUM || regno < 0)
return NULL;
*((unsigned long *) mem) = *((unsigned long *) ((void *)regs +
dbg_reg_def[regno].offset));
return dbg_reg_def[regno].name;
}
int dbg_set_reg(int regno, void *mem, struct pt_regs *regs)
{
if (regno >= DBG_MAX_REG_NUM || regno < 0)
return -EINVAL;
*((unsigned long *) ((void *)regs + dbg_reg_def[regno].offset)) =
*((unsigned long *) mem);
return 0;
}
void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long pc)
{
instruction_pointer(regs) = pc;
}
#ifdef CONFIG_SMP
/**
* kgdb_roundup_cpus - Get other CPUs into a holding pattern
* @flags: Current IRQ state
*
* On SMP systems, we need to get the attention of the other CPUs
* and get them be in a known state. This should do what is needed
* to get the other CPUs to call kgdb_wait(). Note that on some arches,
* the NMI approach is not used for rounding up all the CPUs. For example,
* in case of MIPS, smp_call_function() is used to roundup CPUs. In
* this case, we have to make sure that interrupts are enabled before
* calling smp_call_function(). The argument to this function is
* the flags that will be used when restoring the interrupts. There is
* local_irq_save() call before kgdb_roundup_cpus().
*
* On non-SMP systems, this is not called.
*/
static void hexagon_kgdb_nmi_hook(void *ignored)
{
kgdb_nmicallback(raw_smp_processor_id(), get_irq_regs());
}
void kgdb_roundup_cpus(unsigned long flags)
{
local_irq_enable();
smp_call_function(hexagon_kgdb_nmi_hook, NULL, 0);
local_irq_disable();
}
#endif
/* Not yet working */
void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs,
struct task_struct *task)
{
struct pt_regs *thread_regs;
if (task == NULL)
return;
/* Initialize to zero */
memset(gdb_regs, 0, NUMREGBYTES);
/* Otherwise, we have only some registers from switch_to() */
thread_regs = task_pt_regs(task);
gdb_regs[0] = thread_regs->r00;
}
/**
* kgdb_arch_handle_exception - Handle architecture specific GDB packets.
* @vector: The error vector of the exception that happened.
* @signo: The signal number of the exception that happened.
* @err_code: The error code of the exception that happened.
* @remcom_in_buffer: The buffer of the packet we have read.
* @remcom_out_buffer: The buffer of %BUFMAX bytes to write a packet into.
* @regs: The &struct pt_regs of the current process.
*
* This function MUST handle the 'c' and 's' command packets,
* as well packets to set / remove a hardware breakpoint, if used.
* If there are additional packets which the hardware needs to handle,
* they are handled here. The code should return -1 if it wants to
* process more packets, and a %0 or %1 if it wants to exit from the
* kgdb callback.
*
* Not yet working.
*/
int kgdb_arch_handle_exception(int vector, int signo, int err_code,
char *remcom_in_buffer, char *remcom_out_buffer,
struct pt_regs *linux_regs)
{
switch (remcom_in_buffer[0]) {
case 's':
case 'c':
return 0;
}
/* Stay in the debugger. */
return -1;
}
static int __kgdb_notify(struct die_args *args, unsigned long cmd)
{
/* cpu roundup */
if (atomic_read(&kgdb_active) != -1) {
kgdb_nmicallback(smp_processor_id(), args->regs);
return NOTIFY_STOP;
}
if (user_mode(args->regs))
return NOTIFY_DONE;
if (kgdb_handle_exception(args->trapnr & 0xff, args->signr, args->err,
args->regs))
return NOTIFY_DONE;
return NOTIFY_STOP;
}
static int
kgdb_notify(struct notifier_block *self, unsigned long cmd, void *ptr)
{
unsigned long flags;
int ret;
local_irq_save(flags);
ret = __kgdb_notify(ptr, cmd);
local_irq_restore(flags);
return ret;
}
static struct notifier_block kgdb_notifier = {
.notifier_call = kgdb_notify,
/*
* Lowest-prio notifier priority, we want to be notified last:
*/
.priority = -INT_MAX,
};
/**
* kgdb_arch_init - Perform any architecture specific initalization.
*
* This function will handle the initalization of any architecture
* specific callbacks.
*/
int kgdb_arch_init(void)
{
return register_die_notifier(&kgdb_notifier);
}
/**
* kgdb_arch_exit - Perform any architecture specific uninitalization.
*
* This function will handle the uninitalization of any architecture
* specific callbacks, for dynamic registration and unregistration.
*/
void kgdb_arch_exit(void)
{
unregister_die_notifier(&kgdb_notifier);
}

View file

@ -0,0 +1,162 @@
/*
* Kernel module loader for Hexagon
*
* Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
#include <asm/module.h>
#include <linux/elf.h>
#include <linux/module.h>
#include <linux/moduleloader.h>
#include <linux/vmalloc.h>
#if 0
#define DEBUGP printk
#else
#define DEBUGP(fmt , ...)
#endif
/*
* module_frob_arch_sections - tweak got/plt sections.
* @hdr - pointer to elf header
* @sechdrs - pointer to elf load section headers
* @secstrings - symbol names
* @mod - pointer to module
*/
int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
char *secstrings,
struct module *mod)
{
unsigned int i;
int found = 0;
/* Look for .plt and/or .got.plt and/or .init.plt sections */
for (i = 0; i < hdr->e_shnum; i++) {
DEBUGP("Section %d is %s\n", i,
secstrings + sechdrs[i].sh_name);
if (strcmp(secstrings + sechdrs[i].sh_name, ".plt") == 0)
found = i+1;
if (strcmp(secstrings + sechdrs[i].sh_name, ".got.plt") == 0)
found = i+1;
if (strcmp(secstrings + sechdrs[i].sh_name, ".rela.plt") == 0)
found = i+1;
}
/* At this time, we don't support modules comiled with -shared */
if (found) {
printk(KERN_WARNING
"Module '%s' contains unexpected .plt/.got sections.\n",
mod->name);
/* return -ENOEXEC; */
}
return 0;
}
/*
* apply_relocate_add - perform rela relocations.
* @sechdrs - pointer to section headers
* @strtab - some sort of start address?
* @symindex - symbol index offset or something?
* @relsec - address to relocate to?
* @module - pointer to module
*
* Perform rela relocations.
*/
int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
unsigned int symindex, unsigned int relsec,
struct module *module)
{
unsigned int i;
Elf32_Sym *sym;
uint32_t *location;
uint32_t value;
unsigned int nrelocs = sechdrs[relsec].sh_size / sizeof(Elf32_Rela);
Elf32_Rela *rela = (void *)sechdrs[relsec].sh_addr;
Elf32_Word sym_info = sechdrs[relsec].sh_info;
Elf32_Sym *sym_base = (Elf32_Sym *) sechdrs[symindex].sh_addr;
void *loc_base = (void *) sechdrs[sym_info].sh_addr;
DEBUGP("Applying relocations in section %u to section %u base=%p\n",
relsec, sym_info, loc_base);
for (i = 0; i < nrelocs; i++) {
/* Symbol to relocate */
sym = sym_base + ELF32_R_SYM(rela[i].r_info);
/* Where to make the change */
location = loc_base + rela[i].r_offset;
/* `Everything is relative'. */
value = sym->st_value + rela[i].r_addend;
DEBUGP("%d: value=%08x loc=%p reloc=%d symbol=%s\n",
i, value, location, ELF32_R_TYPE(rela[i].r_info),
sym->st_name ?
&strtab[sym->st_name] : "(anonymous)");
switch (ELF32_R_TYPE(rela[i].r_info)) {
case R_HEXAGON_B22_PCREL: {
int dist = (int)(value - (uint32_t)location);
if ((dist < -0x00800000) ||
(dist >= 0x00800000)) {
printk(KERN_ERR
"%s: %s: %08x=%08x-%08x %s\n",
module->name,
"R_HEXAGON_B22_PCREL reloc out of range",
dist, value, (uint32_t)location,
sym->st_name ?
&strtab[sym->st_name] : "(anonymous)");
return -ENOEXEC;
}
DEBUGP("B22_PCREL contents: %08X.\n", *location);
*location &= ~0x01ff3fff;
*location |= 0x00003fff & dist;
*location |= 0x01ff0000 & (dist<<2);
DEBUGP("Contents after reloc: %08x\n", *location);
break;
}
case R_HEXAGON_HI16:
value = (value>>16) & 0xffff;
/* fallthrough */
case R_HEXAGON_LO16:
*location &= ~0x00c03fff;
*location |= value & 0x3fff;
*location |= (value & 0xc000) << 8;
break;
case R_HEXAGON_32:
*location = value;
break;
case R_HEXAGON_32_PCREL:
*location = value - (uint32_t)location;
break;
case R_HEXAGON_PLT_B22_PCREL:
case R_HEXAGON_GOTOFF_LO16:
case R_HEXAGON_GOTOFF_HI16:
printk(KERN_ERR "%s: GOT/PLT relocations unsupported\n",
module->name);
return -ENOEXEC;
default:
printk(KERN_ERR "%s: unknown relocation: %u\n",
module->name,
ELF32_R_TYPE(rela[i].r_info));
return -ENOEXEC;
}
}
return 0;
}

View file

@ -0,0 +1,227 @@
/*
* Process creation support for Hexagon
*
* Copyright (c) 2010-2012, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
#include <linux/sched.h>
#include <linux/types.h>
#include <linux/module.h>
#include <linux/tick.h>
#include <linux/uaccess.h>
#include <linux/slab.h>
#include <linux/tracehook.h>
/*
* Program thread launch. Often defined as a macro in processor.h,
* but we're shooting for a small footprint and it's not an inner-loop
* performance-critical operation.
*
* The Hexagon ABI specifies that R28 is zero'ed before program launch,
* so that gets automatically done here. If we ever stop doing that here,
* we'll probably want to define the ELF_PLAT_INIT macro.
*/
void start_thread(struct pt_regs *regs, unsigned long pc, unsigned long sp)
{
/* Set to run with user-mode data segmentation */
set_fs(USER_DS);
/* We want to zero all data-containing registers. Is this overkill? */
memset(regs, 0, sizeof(*regs));
/* We might want to also zero all Processor registers here */
pt_set_usermode(regs);
pt_set_elr(regs, pc);
pt_set_rte_sp(regs, sp);
}
/*
* Spin, or better still, do a hardware or VM wait instruction
* If hardware or VM offer wait termination even though interrupts
* are disabled.
*/
void arch_cpu_idle(void)
{
__vmwait();
/* interrupts wake us up, but irqs are still disabled */
local_irq_enable();
}
/*
* Return saved PC of a blocked thread
*/
unsigned long thread_saved_pc(struct task_struct *tsk)
{
return 0;
}
/*
* Copy architecture-specific thread state
*/
int copy_thread(unsigned long clone_flags, unsigned long usp,
unsigned long arg, struct task_struct *p)
{
struct thread_info *ti = task_thread_info(p);
struct hexagon_switch_stack *ss;
struct pt_regs *childregs;
asmlinkage void ret_from_fork(void);
childregs = (struct pt_regs *) (((unsigned long) ti + THREAD_SIZE) -
sizeof(*childregs));
ti->regs = childregs;
/*
* Establish kernel stack pointer and initial PC for new thread
* Note that unlike the usual situation, we do not copy the
* parent's callee-saved here; those are in pt_regs and whatever
* we leave here will be overridden on return to userland.
*/
ss = (struct hexagon_switch_stack *) ((unsigned long) childregs -
sizeof(*ss));
ss->lr = (unsigned long)ret_from_fork;
p->thread.switch_sp = ss;
if (unlikely(p->flags & PF_KTHREAD)) {
memset(childregs, 0, sizeof(struct pt_regs));
/* r24 <- fn, r25 <- arg */
ss->r24 = usp;
ss->r25 = arg;
pt_set_kmode(childregs);
return 0;
}
memcpy(childregs, current_pt_regs(), sizeof(*childregs));
ss->r2524 = 0;
if (usp)
pt_set_rte_sp(childregs, usp);
/* Child sees zero return value */
childregs->r00 = 0;
/*
* The clone syscall has the C signature:
* int [r0] clone(int flags [r0],
* void *child_frame [r1],
* void *parent_tid [r2],
* void *child_tid [r3],
* void *thread_control_block [r4]);
* ugp is used to provide TLS support.
*/
if (clone_flags & CLONE_SETTLS)
childregs->ugp = childregs->r04;
/*
* Parent sees new pid -- not necessary, not even possible at
* this point in the fork process
* Might also want to set things like ti->addr_limit
*/
return 0;
}
/*
* Release any architecture-specific resources locked by thread
*/
void release_thread(struct task_struct *dead_task)
{
}
/*
* Free any architecture-specific thread data structures, etc.
*/
void exit_thread(void)
{
}
/*
* Some archs flush debug and FPU info here
*/
void flush_thread(void)
{
}
/*
* The "wait channel" terminology is archaic, but what we want
* is an identification of the point at which the scheduler
* was invoked by a blocked thread.
*/
unsigned long get_wchan(struct task_struct *p)
{
unsigned long fp, pc;
unsigned long stack_page;
int count = 0;
if (!p || p == current || p->state == TASK_RUNNING)
return 0;
stack_page = (unsigned long)task_stack_page(p);
fp = ((struct hexagon_switch_stack *)p->thread.switch_sp)->fp;
do {
if (fp < (stack_page + sizeof(struct thread_info)) ||
fp >= (THREAD_SIZE - 8 + stack_page))
return 0;
pc = ((unsigned long *)fp)[1];
if (!in_sched_functions(pc))
return pc;
fp = *(unsigned long *) fp;
} while (count++ < 16);
return 0;
}
/*
* Required placeholder.
*/
int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpu)
{
return 0;
}
/*
* Called on the exit path of event entry; see vm_entry.S
*
* Interrupts will already be disabled.
*
* Returns 0 if there's no need to re-check for more work.
*/
int do_work_pending(struct pt_regs *regs, u32 thread_info_flags)
{
if (!(thread_info_flags & _TIF_WORK_MASK)) {
return 0;
} /* shortcut -- no work to be done */
local_irq_enable();
if (thread_info_flags & _TIF_NEED_RESCHED) {
schedule();
return 1;
}
if (thread_info_flags & _TIF_SIGPENDING) {
do_signal(regs);
return 1;
}
if (thread_info_flags & _TIF_NOTIFY_RESUME) {
clear_thread_flag(TIF_NOTIFY_RESUME);
tracehook_notify_resume(regs);
return 1;
}
/* Should not even reach here */
panic("%s: bad thread_info flags 0x%08x\n", __func__,
thread_info_flags);
}

View file

@ -0,0 +1,205 @@
/*
* Ptrace support for Hexagon
*
* Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
#include <generated/compile.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/smp.h>
#include <linux/errno.h>
#include <linux/ptrace.h>
#include <linux/regset.h>
#include <linux/user.h>
#include <linux/elf.h>
#include <asm/user.h>
#if arch_has_single_step()
/* Both called from ptrace_resume */
void user_enable_single_step(struct task_struct *child)
{
pt_set_singlestep(task_pt_regs(child));
set_tsk_thread_flag(child, TIF_SINGLESTEP);
}
void user_disable_single_step(struct task_struct *child)
{
pt_clr_singlestep(task_pt_regs(child));
clear_tsk_thread_flag(child, TIF_SINGLESTEP);
}
#endif
static int genregs_get(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
void *kbuf, void __user *ubuf)
{
int ret;
unsigned int dummy;
struct pt_regs *regs = task_pt_regs(target);
if (!regs)
return -EIO;
/* The general idea here is that the copyout must happen in
* exactly the same order in which the userspace expects these
* regs. Now, the sequence in userspace does not match the
* sequence in the kernel, so everything past the 32 gprs
* happens one at a time.
*/
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
&regs->r00, 0, 32*sizeof(unsigned long));
#define ONEXT(KPT_REG, USR_REG) \
if (!ret) \
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, \
KPT_REG, offsetof(struct user_regs_struct, USR_REG), \
offsetof(struct user_regs_struct, USR_REG) + \
sizeof(unsigned long));
/* Must be exactly same sequence as struct user_regs_struct */
ONEXT(&regs->sa0, sa0);
ONEXT(&regs->lc0, lc0);
ONEXT(&regs->sa1, sa1);
ONEXT(&regs->lc1, lc1);
ONEXT(&regs->m0, m0);
ONEXT(&regs->m1, m1);
ONEXT(&regs->usr, usr);
ONEXT(&regs->preds, p3_0);
ONEXT(&regs->gp, gp);
ONEXT(&regs->ugp, ugp);
ONEXT(&pt_elr(regs), pc);
dummy = pt_cause(regs);
ONEXT(&dummy, cause);
ONEXT(&pt_badva(regs), badva);
#if CONFIG_HEXAGON_ARCH_VERSION >=4
ONEXT(&regs->cs0, cs0);
ONEXT(&regs->cs1, cs1);
#endif
/* Pad the rest with zeros, if needed */
if (!ret)
ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
offsetof(struct user_regs_struct, pad1), -1);
return ret;
}
static int genregs_set(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
int ret;
unsigned long bucket;
struct pt_regs *regs = task_pt_regs(target);
if (!regs)
return -EIO;
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&regs->r00, 0, 32*sizeof(unsigned long));
#define INEXT(KPT_REG, USR_REG) \
if (!ret) \
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, \
KPT_REG, offsetof(struct user_regs_struct, USR_REG), \
offsetof(struct user_regs_struct, USR_REG) + \
sizeof(unsigned long));
/* Must be exactly same sequence as struct user_regs_struct */
INEXT(&regs->sa0, sa0);
INEXT(&regs->lc0, lc0);
INEXT(&regs->sa1, sa1);
INEXT(&regs->lc1, lc1);
INEXT(&regs->m0, m0);
INEXT(&regs->m1, m1);
INEXT(&regs->usr, usr);
INEXT(&regs->preds, p3_0);
INEXT(&regs->gp, gp);
INEXT(&regs->ugp, ugp);
INEXT(&pt_elr(regs), pc);
/* CAUSE and BADVA aren't writeable. */
INEXT(&bucket, cause);
INEXT(&bucket, badva);
#if CONFIG_HEXAGON_ARCH_VERSION >=4
INEXT(&regs->cs0, cs0);
INEXT(&regs->cs1, cs1);
#endif
/* Ignore the rest, if needed */
if (!ret)
ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
offsetof(struct user_regs_struct, pad1), -1);
if (ret)
return ret;
/*
* This is special; SP is actually restored by the VM via the
* special event record which is set by the special trap.
*/
regs->hvmer.vmpsp = regs->r29;
return 0;
}
enum hexagon_regset {
REGSET_GENERAL,
};
static const struct user_regset hexagon_regsets[] = {
[REGSET_GENERAL] = {
.core_note_type = NT_PRSTATUS,
.n = ELF_NGREG,
.size = sizeof(unsigned long),
.align = sizeof(unsigned long),
.get = genregs_get,
.set = genregs_set,
},
};
static const struct user_regset_view hexagon_user_view = {
.name = UTS_MACHINE,
.e_machine = ELF_ARCH,
.ei_osabi = ELF_OSABI,
.regsets = hexagon_regsets,
.e_flags = ELF_CORE_EFLAGS,
.n = ARRAY_SIZE(hexagon_regsets)
};
const struct user_regset_view *task_user_regset_view(struct task_struct *task)
{
return &hexagon_user_view;
}
void ptrace_disable(struct task_struct *child)
{
/* Boilerplate - resolves to null inline if no HW single-step */
user_disable_single_step(child);
}
long arch_ptrace(struct task_struct *child, long request,
unsigned long addr, unsigned long data)
{
return ptrace_request(child, request, addr, data);
}

View file

@ -0,0 +1,37 @@
/*
* Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
#include <linux/smp.h>
#include <asm/hexagon_vm.h>
void machine_power_off(void)
{
smp_send_stop();
__vmstop();
}
void machine_halt(void)
{
}
void machine_restart(char *cmd)
{
}
void (*pm_power_off)(void) = NULL;
EXPORT_SYMBOL(pm_power_off);

View file

@ -0,0 +1,3 @@
#include <linux/screen_info.h>
struct screen_info screen_info;

149
arch/hexagon/kernel/setup.c Normal file
View file

@ -0,0 +1,149 @@
/*
* Arch related setup for Hexagon
*
* Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
#include <linux/init.h>
#include <linux/bootmem.h>
#include <linux/mmzone.h>
#include <linux/mm.h>
#include <linux/seq_file.h>
#include <linux/console.h>
#include <linux/of_fdt.h>
#include <asm/io.h>
#include <asm/sections.h>
#include <asm/setup.h>
#include <asm/processor.h>
#include <asm/hexagon_vm.h>
#include <asm/vm_mmu.h>
#include <asm/time.h>
char cmd_line[COMMAND_LINE_SIZE];
static char default_command_line[COMMAND_LINE_SIZE] __initdata = CONFIG_CMDLINE;
int on_simulator;
void calibrate_delay(void)
{
loops_per_jiffy = thread_freq_mhz * 1000000 / HZ;
}
/*
* setup_arch - high level architectural setup routine
* @cmdline_p: pointer to pointer to command-line arguments
*/
void __init setup_arch(char **cmdline_p)
{
char *p = &external_cmdline_buffer;
/*
* These will eventually be pulled in via either some hypervisor
* or devicetree description. Hardwiring for now.
*/
pcycle_freq_mhz = 600;
thread_freq_mhz = 100;
sleep_clk_freq = 32000;
/*
* Set up event bindings to handle exceptions and interrupts.
*/
__vmsetvec(_K_VM_event_vector);
printk(KERN_INFO "PHYS_OFFSET=0x%08x\n", PHYS_OFFSET);
/*
* Simulator has a few differences from the hardware.
* For now, check uninitialized-but-mapped memory
* prior to invoking setup_arch_memory().
*/
if (*(int *)((unsigned long)_end + 8) == 0x1f1f1f1f)
on_simulator = 1;
else
on_simulator = 0;
if (p[0] != '\0')
strlcpy(boot_command_line, p, COMMAND_LINE_SIZE);
else
strlcpy(boot_command_line, default_command_line,
COMMAND_LINE_SIZE);
/*
* boot_command_line and the value set up by setup_arch
* are both picked up by the init code. If no reason to
* make them different, pass the same pointer back.
*/
strlcpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE);
*cmdline_p = cmd_line;
parse_early_param();
setup_arch_memory();
#ifdef CONFIG_SMP
smp_start_cpus();
#endif
}
/*
* Functions for dumping CPU info via /proc
* Probably should move to kernel/proc.c or something.
*/
static void *c_start(struct seq_file *m, loff_t *pos)
{
return *pos < nr_cpu_ids ? (void *)((unsigned long) *pos + 1) : NULL;
}
static void *c_next(struct seq_file *m, void *v, loff_t *pos)
{
++*pos;
return c_start(m, pos);
}
static void c_stop(struct seq_file *m, void *v)
{
}
/*
* Eventually this will dump information about
* CPU properties like ISA level, TLB size, etc.
*/
static int show_cpuinfo(struct seq_file *m, void *v)
{
int cpu = (unsigned long) v - 1;
#ifdef CONFIG_SMP
if (!cpu_online(cpu))
return 0;
#endif
seq_printf(m, "processor\t: %d\n", cpu);
seq_printf(m, "model name\t: Hexagon Virtual Machine\n");
seq_printf(m, "BogoMips\t: %lu.%02lu\n",
(loops_per_jiffy * HZ) / 500000,
((loops_per_jiffy * HZ) / 5000) % 100);
seq_printf(m, "\n");
return 0;
}
const struct seq_operations cpuinfo_op = {
.start = &c_start,
.next = &c_next,
.stop = &c_stop,
.show = &show_cpuinfo,
};

View file

@ -0,0 +1,268 @@
/*
* Signal support for Hexagon processor
*
* Copyright (c) 2010-2012, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
#include <linux/linkage.h>
#include <linux/syscalls.h>
#include <linux/tracehook.h>
#include <asm/registers.h>
#include <asm/thread_info.h>
#include <asm/unistd.h>
#include <asm/uaccess.h>
#include <asm/ucontext.h>
#include <asm/cacheflush.h>
#include <asm/signal.h>
#include <asm/vdso.h>
struct rt_sigframe {
unsigned long tramp[2];
struct siginfo info;
struct ucontext uc;
};
static void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs,
size_t frame_size)
{
unsigned long sp = sigsp(regs->r29, ksig);
return (void __user *)((sp - frame_size) & ~(sizeof(long long) - 1));
}
static int setup_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
{
unsigned long tmp;
int err = 0;
err |= copy_to_user(&sc->sc_regs.r0, &regs->r00,
32*sizeof(unsigned long));
err |= __put_user(regs->sa0, &sc->sc_regs.sa0);
err |= __put_user(regs->lc0, &sc->sc_regs.lc0);
err |= __put_user(regs->sa1, &sc->sc_regs.sa1);
err |= __put_user(regs->lc1, &sc->sc_regs.lc1);
err |= __put_user(regs->m0, &sc->sc_regs.m0);
err |= __put_user(regs->m1, &sc->sc_regs.m1);
err |= __put_user(regs->usr, &sc->sc_regs.usr);
err |= __put_user(regs->preds, &sc->sc_regs.p3_0);
err |= __put_user(regs->gp, &sc->sc_regs.gp);
err |= __put_user(regs->ugp, &sc->sc_regs.ugp);
#if CONFIG_HEXAGON_ARCH_VERSION >= 4
err |= __put_user(regs->cs0, &sc->sc_regs.cs0);
err |= __put_user(regs->cs1, &sc->sc_regs.cs1);
#endif
tmp = pt_elr(regs); err |= __put_user(tmp, &sc->sc_regs.pc);
tmp = pt_cause(regs); err |= __put_user(tmp, &sc->sc_regs.cause);
tmp = pt_badva(regs); err |= __put_user(tmp, &sc->sc_regs.badva);
return err;
}
static int restore_sigcontext(struct pt_regs *regs,
struct sigcontext __user *sc)
{
unsigned long tmp;
int err = 0;
err |= copy_from_user(&regs->r00, &sc->sc_regs.r0,
32 * sizeof(unsigned long));
err |= __get_user(regs->sa0, &sc->sc_regs.sa0);
err |= __get_user(regs->lc0, &sc->sc_regs.lc0);
err |= __get_user(regs->sa1, &sc->sc_regs.sa1);
err |= __get_user(regs->lc1, &sc->sc_regs.lc1);
err |= __get_user(regs->m0, &sc->sc_regs.m0);
err |= __get_user(regs->m1, &sc->sc_regs.m1);
err |= __get_user(regs->usr, &sc->sc_regs.usr);
err |= __get_user(regs->preds, &sc->sc_regs.p3_0);
err |= __get_user(regs->gp, &sc->sc_regs.gp);
err |= __get_user(regs->ugp, &sc->sc_regs.ugp);
#if CONFIG_HEXAGON_ARCH_VERSION >= 4
err |= __get_user(regs->cs0, &sc->sc_regs.cs0);
err |= __get_user(regs->cs1, &sc->sc_regs.cs1);
#endif
err |= __get_user(tmp, &sc->sc_regs.pc); pt_set_elr(regs, tmp);
return err;
}
/*
* Setup signal stack frame with siginfo structure
*/
static int setup_rt_frame(struct ksignal *ksig, sigset_t *set,
struct pt_regs *regs)
{
int err = 0;
struct rt_sigframe __user *frame;
struct hexagon_vdso *vdso = current->mm->context.vdso;
frame = get_sigframe(ksig, regs, sizeof(struct rt_sigframe));
if (!access_ok(VERIFY_WRITE, frame, sizeof(struct rt_sigframe)))
return -EFAULT;
if (copy_siginfo_to_user(&frame->info, &ksig->info))
return -EFAULT;
/* The on-stack signal trampoline is no longer executed;
* however, the libgcc signal frame unwinding code checks for
* the presence of these two numeric magic values.
*/
err |= __put_user(0x7800d166, &frame->tramp[0]);
err |= __put_user(0x5400c004, &frame->tramp[1]);
err |= setup_sigcontext(regs, &frame->uc.uc_mcontext);
err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
err |= __save_altstack(&frame->uc.uc_stack, user_stack_pointer(regs));
if (err)
return -EFAULT;
/* Load r0/r1 pair with signumber/siginfo pointer... */
regs->r0100 = ((unsigned long long)((unsigned long)&frame->info) << 32)
| (unsigned long long)ksig->sig;
regs->r02 = (unsigned long) &frame->uc;
regs->r31 = (unsigned long) vdso->rt_signal_trampoline;
pt_psp(regs) = (unsigned long) frame;
pt_set_elr(regs, (unsigned long)ksig->ka.sa.sa_handler);
return 0;
}
/*
* Setup invocation of signal handler
*/
static void handle_signal(struct ksignal *ksig, struct pt_regs *regs)
{
int ret;
/*
* If we're handling a signal that aborted a system call,
* set up the error return value before adding the signal
* frame to the stack.
*/
if (regs->syscall_nr >= 0) {
switch (regs->r00) {
case -ERESTART_RESTARTBLOCK:
case -ERESTARTNOHAND:
regs->r00 = -EINTR;
break;
case -ERESTARTSYS:
if (!(ksig->ka.sa.sa_flags & SA_RESTART)) {
regs->r00 = -EINTR;
break;
}
/* Fall through */
case -ERESTARTNOINTR:
regs->r06 = regs->syscall_nr;
pt_set_elr(regs, pt_elr(regs) - 4);
regs->r00 = regs->restart_r0;
break;
default:
break;
}
}
/*
* Set up the stack frame; not doing the SA_SIGINFO thing. We
* only set up the rt_frame flavor.
*/
/* If there was an error on setup, no signal was delivered. */
ret = setup_rt_frame(ksig, sigmask_to_save(), regs);
signal_setup_done(ret, ksig, test_thread_flag(TIF_SINGLESTEP));
}
/*
* Called from return-from-event code.
*/
void do_signal(struct pt_regs *regs)
{
struct ksignal ksig;
if (!user_mode(regs))
return;
if (get_signal(&ksig)) {
handle_signal(&ksig, regs);
return;
}
/*
* No (more) signals; if we came from a system call, handle the restart.
*/
if (regs->syscall_nr >= 0) {
switch (regs->r00) {
case -ERESTARTNOHAND:
case -ERESTARTSYS:
case -ERESTARTNOINTR:
regs->r06 = regs->syscall_nr;
break;
case -ERESTART_RESTARTBLOCK:
regs->r06 = __NR_restart_syscall;
break;
default:
goto no_restart;
}
pt_set_elr(regs, pt_elr(regs) - 4);
regs->r00 = regs->restart_r0;
}
no_restart:
/* If there's no signal to deliver, put the saved sigmask back */
restore_saved_sigmask();
}
/*
* Architecture-specific wrappers for signal-related system calls
*/
asmlinkage int sys_rt_sigreturn(void)
{
struct pt_regs *regs = current_pt_regs();
struct rt_sigframe __user *frame;
sigset_t blocked;
/* Always make any pending restarted system calls return -EINTR */
current_thread_info()->restart_block.fn = do_no_restart_syscall;
frame = (struct rt_sigframe __user *)pt_psp(regs);
if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
goto badframe;
if (__copy_from_user(&blocked, &frame->uc.uc_sigmask, sizeof(blocked)))
goto badframe;
set_current_blocked(&blocked);
if (restore_sigcontext(regs, &frame->uc.uc_mcontext))
goto badframe;
/* Restore the user's stack as well */
pt_psp(regs) = regs->r29;
regs->syscall_nr = -1;
if (restore_altstack(&frame->uc.uc_stack))
goto badframe;
return regs->r00;
badframe:
force_sig(SIGSEGV, current);
return 0;
}

266
arch/hexagon/kernel/smp.c Normal file
View file

@ -0,0 +1,266 @@
/*
* SMP support for Hexagon
*
* Copyright (c) 2010-2012, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
#include <linux/err.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/percpu.h>
#include <linux/sched.h>
#include <linux/smp.h>
#include <linux/spinlock.h>
#include <linux/cpu.h>
#include <asm/time.h> /* timer_interrupt */
#include <asm/hexagon_vm.h>
#define BASE_IPI_IRQ 26
/*
* cpu_possible_mask needs to be filled out prior to setup_per_cpu_areas
* (which is prior to any of our smp_prepare_cpu crap), in order to set
* up the... per_cpu areas.
*/
struct ipi_data {
unsigned long bits;
};
static DEFINE_PER_CPU(struct ipi_data, ipi_data);
static inline void __handle_ipi(unsigned long *ops, struct ipi_data *ipi,
int cpu)
{
unsigned long msg = 0;
do {
msg = find_next_bit(ops, BITS_PER_LONG, msg+1);
switch (msg) {
case IPI_TIMER:
ipi_timer();
break;
case IPI_CALL_FUNC:
generic_smp_call_function_interrupt();
break;
case IPI_CPU_STOP:
/*
* call vmstop()
*/
__vmstop();
break;
case IPI_RESCHEDULE:
scheduler_ipi();
break;
}
} while (msg < BITS_PER_LONG);
}
/* Used for IPI call from other CPU's to unmask int */
void smp_vm_unmask_irq(void *info)
{
__vmintop_locen((long) info);
}
/*
* This is based on Alpha's IPI stuff.
* Supposed to take (int, void*) as args now.
* Specifically, first arg is irq, second is the irq_desc.
*/
irqreturn_t handle_ipi(int irq, void *desc)
{
int cpu = smp_processor_id();
struct ipi_data *ipi = &per_cpu(ipi_data, cpu);
unsigned long ops;
while ((ops = xchg(&ipi->bits, 0)) != 0)
__handle_ipi(&ops, ipi, cpu);
return IRQ_HANDLED;
}
void send_ipi(const struct cpumask *cpumask, enum ipi_message_type msg)
{
unsigned long flags;
unsigned long cpu;
unsigned long retval;
local_irq_save(flags);
for_each_cpu(cpu, cpumask) {
struct ipi_data *ipi = &per_cpu(ipi_data, cpu);
set_bit(msg, &ipi->bits);
/* Possible barrier here */
retval = __vmintop_post(BASE_IPI_IRQ+cpu);
if (retval != 0) {
printk(KERN_ERR "interrupt %ld not configured?\n",
BASE_IPI_IRQ+cpu);
}
}
local_irq_restore(flags);
}
static struct irqaction ipi_intdesc = {
.handler = handle_ipi,
.flags = IRQF_TRIGGER_RISING,
.name = "ipi_handler"
};
void __init smp_prepare_boot_cpu(void)
{
}
/*
* interrupts should already be disabled from the VM
* SP should already be correct; need to set THREADINFO_REG
* to point to current thread info
*/
void start_secondary(void)
{
unsigned int cpu;
unsigned long thread_ptr;
/* Calculate thread_info pointer from stack pointer */
__asm__ __volatile__(
"%0 = SP;\n"
: "=r" (thread_ptr)
);
thread_ptr = thread_ptr & ~(THREAD_SIZE-1);
__asm__ __volatile__(
QUOTED_THREADINFO_REG " = %0;\n"
:
: "r" (thread_ptr)
);
/* Set the memory struct */
atomic_inc(&init_mm.mm_count);
current->active_mm = &init_mm;
cpu = smp_processor_id();
setup_irq(BASE_IPI_IRQ + cpu, &ipi_intdesc);
/* Register the clock_event dummy */
setup_percpu_clockdev();
printk(KERN_INFO "%s cpu %d\n", __func__, current_thread_info()->cpu);
notify_cpu_starting(cpu);
set_cpu_online(cpu, true);
local_irq_enable();
cpu_startup_entry(CPUHP_ONLINE);
}
/*
* called once for each present cpu
* apparently starts up the CPU and then
* maintains control until "cpu_online(cpu)" is set.
*/
int __cpu_up(unsigned int cpu, struct task_struct *idle)
{
struct thread_info *thread = (struct thread_info *)idle->stack;
void *stack_start;
thread->cpu = cpu;
/* Boot to the head. */
stack_start = ((void *) thread) + THREAD_SIZE;
__vmstart(start_secondary, stack_start);
while (!cpu_online(cpu))
barrier();
return 0;
}
void __init smp_cpus_done(unsigned int max_cpus)
{
}
void __init smp_prepare_cpus(unsigned int max_cpus)
{
int i;
/*
* should eventually have some sort of machine
* descriptor that has this stuff
*/
/* Right now, let's just fake it. */
for (i = 0; i < max_cpus; i++)
set_cpu_present(i, true);
/* Also need to register the interrupts for IPI */
if (max_cpus > 1)
setup_irq(BASE_IPI_IRQ, &ipi_intdesc);
}
void smp_send_reschedule(int cpu)
{
send_ipi(cpumask_of(cpu), IPI_RESCHEDULE);
}
void smp_send_stop(void)
{
struct cpumask targets;
cpumask_copy(&targets, cpu_online_mask);
cpumask_clear_cpu(smp_processor_id(), &targets);
send_ipi(&targets, IPI_CPU_STOP);
}
void arch_send_call_function_single_ipi(int cpu)
{
send_ipi(cpumask_of(cpu), IPI_CALL_FUNC);
}
void arch_send_call_function_ipi_mask(const struct cpumask *mask)
{
send_ipi(mask, IPI_CALL_FUNC);
}
int setup_profiling_timer(unsigned int multiplier)
{
return -EINVAL;
}
void smp_start_cpus(void)
{
int i;
for (i = 0; i < NR_CPUS; i++)
set_cpu_possible(i, true);
}

View file

@ -0,0 +1,66 @@
/*
* Stacktrace support for Hexagon
*
* Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
#include <linux/sched.h>
#include <linux/stacktrace.h>
#include <linux/thread_info.h>
#include <linux/module.h>
register unsigned long current_frame_pointer asm("r30");
struct stackframe {
unsigned long fp;
unsigned long rets;
};
/*
* Save stack-backtrace addresses into a stack_trace buffer.
*/
void save_stack_trace(struct stack_trace *trace)
{
unsigned long low, high;
unsigned long fp;
struct stackframe *frame;
int skip = trace->skip;
low = (unsigned long)task_stack_page(current);
high = low + THREAD_SIZE;
fp = current_frame_pointer;
while (fp >= low && fp <= (high - sizeof(*frame))) {
frame = (struct stackframe *)fp;
if (skip) {
skip--;
} else {
trace->entries[trace->nr_entries++] = frame->rets;
if (trace->nr_entries >= trace->max_entries)
break;
}
/*
* The next frame must be at a higher address than the
* current frame.
*/
low = fp + sizeof(*frame);
fp = frame->fp;
}
}
EXPORT_SYMBOL_GPL(save_stack_trace);

View file

@ -0,0 +1,32 @@
/*
* System call table for Hexagon
*
* Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
#include <linux/syscalls.h>
#include <linux/signal.h>
#include <linux/unistd.h>
#include <asm/syscall.h>
#undef __SYSCALL
#define __SYSCALL(nr, call) [nr] = (call),
void *sys_call_table[__NR_syscalls] = {
#include <asm/unistd.h>
};

255
arch/hexagon/kernel/time.c Normal file
View file

@ -0,0 +1,255 @@
/*
* Time related functions for Hexagon architecture
*
* Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
#include <linux/init.h>
#include <linux/clockchips.h>
#include <linux/clocksource.h>
#include <linux/interrupt.h>
#include <linux/err.h>
#include <linux/platform_device.h>
#include <linux/ioport.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/module.h>
#include <asm/timer-regs.h>
#include <asm/hexagon_vm.h>
/*
* For the clocksource we need:
* pcycle frequency (600MHz)
* For the loops_per_jiffy we need:
* thread/cpu frequency (100MHz)
* And for the timer, we need:
* sleep clock rate
*/
cycles_t pcycle_freq_mhz;
cycles_t thread_freq_mhz;
cycles_t sleep_clk_freq;
static struct resource rtos_timer_resources[] = {
{
.start = RTOS_TIMER_REGS_ADDR,
.end = RTOS_TIMER_REGS_ADDR+PAGE_SIZE-1,
.flags = IORESOURCE_MEM,
},
};
static struct platform_device rtos_timer_device = {
.name = "rtos_timer",
.id = -1,
.num_resources = ARRAY_SIZE(rtos_timer_resources),
.resource = rtos_timer_resources,
};
/* A lot of this stuff should move into a platform specific section. */
struct adsp_hw_timer_struct {
u32 match; /* Match value */
u32 count;
u32 enable; /* [1] - CLR_ON_MATCH_EN, [0] - EN */
u32 clear; /* one-shot register that clears the count */
};
/* Look for "TCX0" for related constants. */
static __iomem struct adsp_hw_timer_struct *rtos_timer;
static cycle_t timer_get_cycles(struct clocksource *cs)
{
return (cycle_t) __vmgettime();
}
static struct clocksource hexagon_clocksource = {
.name = "pcycles",
.rating = 250,
.read = timer_get_cycles,
.mask = CLOCKSOURCE_MASK(64),
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
static int set_next_event(unsigned long delta, struct clock_event_device *evt)
{
/* Assuming the timer will be disabled when we enter here. */
iowrite32(1, &rtos_timer->clear);
iowrite32(0, &rtos_timer->clear);
iowrite32(delta, &rtos_timer->match);
iowrite32(1 << TIMER_ENABLE, &rtos_timer->enable);
return 0;
}
/*
* Sets the mode (periodic, shutdown, oneshot, etc) of a timer.
*/
static void set_mode(enum clock_event_mode mode,
struct clock_event_device *evt)
{
switch (mode) {
case CLOCK_EVT_MODE_SHUTDOWN:
/* XXX implement me */
default:
break;
}
}
#ifdef CONFIG_SMP
/* Broadcast mechanism */
static void broadcast(const struct cpumask *mask)
{
send_ipi(mask, IPI_TIMER);
}
#endif
static struct clock_event_device hexagon_clockevent_dev = {
.name = "clockevent",
.features = CLOCK_EVT_FEAT_ONESHOT,
.rating = 400,
.irq = RTOS_TIMER_INT,
.set_next_event = set_next_event,
.set_mode = set_mode,
#ifdef CONFIG_SMP
.broadcast = broadcast,
#endif
};
#ifdef CONFIG_SMP
static DEFINE_PER_CPU(struct clock_event_device, clock_events);
void setup_percpu_clockdev(void)
{
int cpu = smp_processor_id();
struct clock_event_device *ce_dev = &hexagon_clockevent_dev;
struct clock_event_device *dummy_clock_dev =
&per_cpu(clock_events, cpu);
memcpy(dummy_clock_dev, ce_dev, sizeof(*dummy_clock_dev));
INIT_LIST_HEAD(&dummy_clock_dev->list);
dummy_clock_dev->features = CLOCK_EVT_FEAT_DUMMY;
dummy_clock_dev->cpumask = cpumask_of(cpu);
dummy_clock_dev->mode = CLOCK_EVT_MODE_UNUSED;
clockevents_register_device(dummy_clock_dev);
}
/* Called from smp.c for each CPU's timer ipi call */
void ipi_timer(void)
{
int cpu = smp_processor_id();
struct clock_event_device *ce_dev = &per_cpu(clock_events, cpu);
ce_dev->event_handler(ce_dev);
}
#endif /* CONFIG_SMP */
static irqreturn_t timer_interrupt(int irq, void *devid)
{
struct clock_event_device *ce_dev = &hexagon_clockevent_dev;
iowrite32(0, &rtos_timer->enable);
ce_dev->event_handler(ce_dev);
return IRQ_HANDLED;
}
/* This should also be pulled from devtree */
static struct irqaction rtos_timer_intdesc = {
.handler = timer_interrupt,
.flags = IRQF_TIMER | IRQF_TRIGGER_RISING,
.name = "rtos_timer"
};
/*
* time_init_deferred - called by start_kernel to set up timer/clock source
*
* Install the IRQ handler for the clock, setup timers.
* This is done late, as that way, we can use ioremap().
*
* This runs just before the delay loop is calibrated, and
* is used for delay calibration.
*/
void __init time_init_deferred(void)
{
struct resource *resource = NULL;
struct clock_event_device *ce_dev = &hexagon_clockevent_dev;
ce_dev->cpumask = cpu_all_mask;
if (!resource)
resource = rtos_timer_device.resource;
/* ioremap here means this has to run later, after paging init */
rtos_timer = ioremap(resource->start, resource_size(resource));
if (!rtos_timer) {
release_mem_region(resource->start, resource_size(resource));
}
clocksource_register_khz(&hexagon_clocksource, pcycle_freq_mhz * 1000);
/* Note: the sim generic RTOS clock is apparently really 18750Hz */
/*
* Last arg is some guaranteed seconds for which the conversion will
* work without overflow.
*/
clockevents_calc_mult_shift(ce_dev, sleep_clk_freq, 4);
ce_dev->max_delta_ns = clockevent_delta2ns(0x7fffffff, ce_dev);
ce_dev->min_delta_ns = clockevent_delta2ns(0xf, ce_dev);
#ifdef CONFIG_SMP
setup_percpu_clockdev();
#endif
clockevents_register_device(ce_dev);
setup_irq(ce_dev->irq, &rtos_timer_intdesc);
}
void __init time_init(void)
{
late_time_init = time_init_deferred;
}
void __delay(unsigned long cycles)
{
unsigned long long start = __vmgettime();
while ((__vmgettime() - start) < cycles)
cpu_relax();
}
EXPORT_SYMBOL(__delay);
/*
* This could become parametric or perhaps even computed at run-time,
* but for now we take the observed simulator jitter.
*/
static long long fudgefactor = 350; /* Maybe lower if kernel optimized. */
void __udelay(unsigned long usecs)
{
unsigned long long start = __vmgettime();
unsigned long long finish = (pcycle_freq_mhz * usecs) - fudgefactor;
while ((__vmgettime() - start) < finish)
cpu_relax(); /* not sure how this improves readability */
}
EXPORT_SYMBOL(__udelay);

View file

@ -0,0 +1,35 @@
/*
* Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
/*
* Trampoline sequences to be copied onto user stack.
* This consumes a little more space than hand-assembling
* immediate constants for use in C, but is more portable
* to future tweaks to the Hexagon instruction set.
*/
#include <asm/unistd.h>
/* Sig trampolines - call sys_sigreturn or sys_rt_sigreturn as appropriate */
/* plain sigreturn is gone. */
.globl __rt_sigtramp_template
__rt_sigtramp_template:
r6 = #__NR_rt_sigreturn;
trap0(#1);

456
arch/hexagon/kernel/traps.c Normal file
View file

@ -0,0 +1,456 @@
/*
* Kernel traps/events for Hexagon processor
*
* Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/module.h>
#include <linux/kallsyms.h>
#include <linux/kdebug.h>
#include <linux/syscalls.h>
#include <linux/signal.h>
#include <linux/tracehook.h>
#include <asm/traps.h>
#include <asm/vm_fault.h>
#include <asm/syscall.h>
#include <asm/registers.h>
#include <asm/unistd.h>
#include <asm/sections.h>
#ifdef CONFIG_KGDB
# include <linux/kgdb.h>
#endif
#define TRAP_SYSCALL 1
#define TRAP_DEBUG 0xdb
void __init trap_init(void)
{
}
#ifdef CONFIG_GENERIC_BUG
/* Maybe should resemble arch/sh/kernel/traps.c ?? */
int is_valid_bugaddr(unsigned long addr)
{
return 1;
}
#endif /* CONFIG_GENERIC_BUG */
static const char *ex_name(int ex)
{
switch (ex) {
case HVM_GE_C_XPROT:
case HVM_GE_C_XUSER:
return "Execute protection fault";
case HVM_GE_C_RPROT:
case HVM_GE_C_RUSER:
return "Read protection fault";
case HVM_GE_C_WPROT:
case HVM_GE_C_WUSER:
return "Write protection fault";
case HVM_GE_C_XMAL:
return "Misaligned instruction";
case HVM_GE_C_WREG:
return "Multiple writes to same register in packet";
case HVM_GE_C_PCAL:
return "Program counter values that are not properly aligned";
case HVM_GE_C_RMAL:
return "Misaligned data load";
case HVM_GE_C_WMAL:
return "Misaligned data store";
case HVM_GE_C_INVI:
case HVM_GE_C_PRIVI:
return "Illegal instruction";
case HVM_GE_C_BUS:
return "Precise bus error";
case HVM_GE_C_CACHE:
return "Cache error";
case 0xdb:
return "Debugger trap";
default:
return "Unrecognized exception";
}
}
static void do_show_stack(struct task_struct *task, unsigned long *fp,
unsigned long ip)
{
int kstack_depth_to_print = 24;
unsigned long offset, size;
const char *name = NULL;
unsigned long *newfp;
unsigned long low, high;
char tmpstr[128];
char *modname;
int i;
if (task == NULL)
task = current;
printk(KERN_INFO "CPU#%d, %s/%d, Call Trace:\n",
raw_smp_processor_id(), task->comm,
task_pid_nr(task));
if (fp == NULL) {
if (task == current) {
asm("%0 = r30" : "=r" (fp));
} else {
fp = (unsigned long *)
((struct hexagon_switch_stack *)
task->thread.switch_sp)->fp;
}
}
if ((((unsigned long) fp) & 0x3) || ((unsigned long) fp < 0x1000)) {
printk(KERN_INFO "-- Corrupt frame pointer %p\n", fp);
return;
}
/* Saved link reg is one word above FP */
if (!ip)
ip = *(fp+1);
/* Expect kernel stack to be in-bounds */
low = (unsigned long)task_stack_page(task);
high = low + THREAD_SIZE - 8;
low += sizeof(struct thread_info);
for (i = 0; i < kstack_depth_to_print; i++) {
name = kallsyms_lookup(ip, &size, &offset, &modname, tmpstr);
printk(KERN_INFO "[%p] 0x%lx: %s + 0x%lx", fp, ip, name,
offset);
if (((unsigned long) fp < low) || (high < (unsigned long) fp))
printk(KERN_CONT " (FP out of bounds!)");
if (modname)
printk(KERN_CONT " [%s] ", modname);
printk(KERN_CONT "\n");
newfp = (unsigned long *) *fp;
if (((unsigned long) newfp) & 0x3) {
printk(KERN_INFO "-- Corrupt frame pointer %p\n",
newfp);
break;
}
/* Attempt to continue past exception. */
if (0 == newfp) {
struct pt_regs *regs = (struct pt_regs *) (((void *)fp)
+ 8);
if (regs->syscall_nr != -1) {
printk(KERN_INFO "-- trap0 -- syscall_nr: %ld",
regs->syscall_nr);
printk(KERN_CONT " psp: %lx elr: %lx\n",
pt_psp(regs), pt_elr(regs));
break;
} else {
/* really want to see more ... */
kstack_depth_to_print += 6;
printk(KERN_INFO "-- %s (0x%lx) badva: %lx\n",
ex_name(pt_cause(regs)), pt_cause(regs),
pt_badva(regs));
}
newfp = (unsigned long *) regs->r30;
ip = pt_elr(regs);
} else {
ip = *(newfp + 1);
}
/* If link reg is null, we are done. */
if (ip == 0x0)
break;
/* If newfp isn't larger, we're tracing garbage. */
if (newfp > fp)
fp = newfp;
else
break;
}
}
void show_stack(struct task_struct *task, unsigned long *fp)
{
/* Saved link reg is one word above FP */
do_show_stack(task, fp, 0);
}
int die(const char *str, struct pt_regs *regs, long err)
{
static struct {
spinlock_t lock;
int counter;
} die = {
.lock = __SPIN_LOCK_UNLOCKED(die.lock),
.counter = 0
};
console_verbose();
oops_enter();
spin_lock_irq(&die.lock);
bust_spinlocks(1);
printk(KERN_EMERG "Oops: %s[#%d]:\n", str, ++die.counter);
if (notify_die(DIE_OOPS, str, regs, err, pt_cause(regs), SIGSEGV) ==
NOTIFY_STOP)
return 1;
print_modules();
show_regs(regs);
do_show_stack(current, &regs->r30, pt_elr(regs));
bust_spinlocks(0);
add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
spin_unlock_irq(&die.lock);
if (in_interrupt())
panic("Fatal exception in interrupt");
if (panic_on_oops)
panic("Fatal exception");
oops_exit();
do_exit(err);
return 0;
}
int die_if_kernel(char *str, struct pt_regs *regs, long err)
{
if (!user_mode(regs))
return die(str, regs, err);
else
return 0;
}
/*
* It's not clear that misaligned fetches are ever recoverable.
*/
static void misaligned_instruction(struct pt_regs *regs)
{
die_if_kernel("Misaligned Instruction", regs, 0);
force_sig(SIGBUS, current);
}
/*
* Misaligned loads and stores, on the other hand, can be
* emulated, and probably should be, some day. But for now
* they will be considered fatal.
*/
static void misaligned_data_load(struct pt_regs *regs)
{
die_if_kernel("Misaligned Data Load", regs, 0);
force_sig(SIGBUS, current);
}
static void misaligned_data_store(struct pt_regs *regs)
{
die_if_kernel("Misaligned Data Store", regs, 0);
force_sig(SIGBUS, current);
}
static void illegal_instruction(struct pt_regs *regs)
{
die_if_kernel("Illegal Instruction", regs, 0);
force_sig(SIGILL, current);
}
/*
* Precise bus errors may be recoverable with a a retry,
* but for now, treat them as irrecoverable.
*/
static void precise_bus_error(struct pt_regs *regs)
{
die_if_kernel("Precise Bus Error", regs, 0);
force_sig(SIGBUS, current);
}
/*
* If anything is to be done here other than panic,
* it will probably be complex and migrate to another
* source module. For now, just die.
*/
static void cache_error(struct pt_regs *regs)
{
die("Cache Error", regs, 0);
}
/*
* General exception handler
*/
void do_genex(struct pt_regs *regs)
{
/*
* Decode Cause and Dispatch
*/
switch (pt_cause(regs)) {
case HVM_GE_C_XPROT:
case HVM_GE_C_XUSER:
execute_protection_fault(regs);
break;
case HVM_GE_C_RPROT:
case HVM_GE_C_RUSER:
read_protection_fault(regs);
break;
case HVM_GE_C_WPROT:
case HVM_GE_C_WUSER:
write_protection_fault(regs);
break;
case HVM_GE_C_XMAL:
misaligned_instruction(regs);
break;
case HVM_GE_C_WREG:
illegal_instruction(regs);
break;
case HVM_GE_C_PCAL:
misaligned_instruction(regs);
break;
case HVM_GE_C_RMAL:
misaligned_data_load(regs);
break;
case HVM_GE_C_WMAL:
misaligned_data_store(regs);
break;
case HVM_GE_C_INVI:
case HVM_GE_C_PRIVI:
illegal_instruction(regs);
break;
case HVM_GE_C_BUS:
precise_bus_error(regs);
break;
case HVM_GE_C_CACHE:
cache_error(regs);
break;
default:
/* Halt and catch fire */
panic("Unrecognized exception 0x%lx\n", pt_cause(regs));
break;
}
}
/* Indirect system call dispatch */
long sys_syscall(void)
{
printk(KERN_ERR "sys_syscall invoked!\n");
return -ENOSYS;
}
void do_trap0(struct pt_regs *regs)
{
syscall_fn syscall;
switch (pt_cause(regs)) {
case TRAP_SYSCALL:
/* System call is trap0 #1 */
/* allow strace to catch syscall args */
if (unlikely(test_thread_flag(TIF_SYSCALL_TRACE) &&
tracehook_report_syscall_entry(regs)))
return; /* return -ENOSYS somewhere? */
/* Interrupts should be re-enabled for syscall processing */
__vmsetie(VM_INT_ENABLE);
/*
* System call number is in r6, arguments in r0..r5.
* Fortunately, no Linux syscall has more than 6 arguments,
* and Hexagon ABI passes first 6 arguments in registers.
* 64-bit arguments are passed in odd/even register pairs.
* Fortunately, we have no system calls that take more
* than three arguments with more than one 64-bit value.
* Should that change, we'd need to redesign to copy
* between user and kernel stacks.
*/
regs->syscall_nr = regs->r06;
/*
* GPR R0 carries the first parameter, and is also used
* to report the return value. We need a backup of
* the user's value in case we need to do a late restart
* of the system call.
*/
regs->restart_r0 = regs->r00;
if ((unsigned long) regs->syscall_nr >= __NR_syscalls) {
regs->r00 = -1;
} else {
syscall = (syscall_fn)
(sys_call_table[regs->syscall_nr]);
regs->r00 = syscall(regs->r00, regs->r01,
regs->r02, regs->r03,
regs->r04, regs->r05);
}
/* allow strace to get the syscall return state */
if (unlikely(test_thread_flag(TIF_SYSCALL_TRACE)))
tracehook_report_syscall_exit(regs, 0);
break;
case TRAP_DEBUG:
/* Trap0 0xdb is debug breakpoint */
if (user_mode(regs)) {
struct siginfo info;
info.si_signo = SIGTRAP;
info.si_errno = 0;
/*
* Some architecures add some per-thread state
* to distinguish between breakpoint traps and
* trace traps. We may want to do that, and
* set the si_code value appropriately, or we
* may want to use a different trap0 flavor.
*/
info.si_code = TRAP_BRKPT;
info.si_addr = (void __user *) pt_elr(regs);
send_sig_info(SIGTRAP, &info, current);
} else {
#ifdef CONFIG_KGDB
kgdb_handle_exception(pt_cause(regs), SIGTRAP,
TRAP_BRKPT, regs);
#endif
}
break;
}
/* Ignore other trap0 codes for now, especially 0 (Angel calls) */
}
/*
* Machine check exception handler
*/
void do_machcheck(struct pt_regs *regs)
{
/* Halt and catch fire */
__vmstop();
}
/*
* Treat this like the old 0xdb trap.
*/
void do_debug_exception(struct pt_regs *regs)
{
regs->hvmer.vmest &= ~HVM_VMEST_CAUSE_MSK;
regs->hvmer.vmest |= (TRAP_DEBUG << HVM_VMEST_CAUSE_SFT);
do_trap0(regs);
}

100
arch/hexagon/kernel/vdso.c Normal file
View file

@ -0,0 +1,100 @@
/*
* vDSO implementation for Hexagon
*
* Copyright (c) 2011, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
#include <linux/err.h>
#include <linux/mm.h>
#include <linux/vmalloc.h>
#include <linux/binfmts.h>
#include <asm/vdso.h>
static struct page *vdso_page;
/* Create a vDSO page holding the signal trampoline.
* We want this for a non-executable stack.
*/
static int __init vdso_init(void)
{
struct hexagon_vdso *vdso;
vdso_page = alloc_page(GFP_KERNEL);
if (!vdso_page)
panic("Cannot allocate vdso");
vdso = vmap(&vdso_page, 1, 0, PAGE_KERNEL);
if (!vdso)
panic("Cannot map vdso");
clear_page(vdso);
/* Install the signal trampoline; currently looks like this:
* r6 = #__NR_rt_sigreturn;
* trap0(#1);
*/
vdso->rt_signal_trampoline[0] = __rt_sigtramp_template[0];
vdso->rt_signal_trampoline[1] = __rt_sigtramp_template[1];
vunmap(vdso);
return 0;
}
arch_initcall(vdso_init);
/*
* Called from binfmt_elf. Create a VMA for the vDSO page.
*/
int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
{
int ret;
unsigned long vdso_base;
struct mm_struct *mm = current->mm;
down_write(&mm->mmap_sem);
/* Try to get it loaded right near ld.so/glibc. */
vdso_base = STACK_TOP;
vdso_base = get_unmapped_area(NULL, vdso_base, PAGE_SIZE, 0, 0);
if (IS_ERR_VALUE(vdso_base)) {
ret = vdso_base;
goto up_fail;
}
/* MAYWRITE to allow gdb to COW and set breakpoints. */
ret = install_special_mapping(mm, vdso_base, PAGE_SIZE,
VM_READ|VM_EXEC|
VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
&vdso_page);
if (ret)
goto up_fail;
mm->context.vdso = (void *)vdso_base;
up_fail:
up_write(&mm->mmap_sem);
return ret;
}
const char *arch_vma_name(struct vm_area_struct *vma)
{
if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
return "[vdso]";
return NULL;
}

View file

@ -0,0 +1,393 @@
/*
* Event entry/exit for Hexagon
*
* Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
#include <asm/asm-offsets.h> /* assembly-safer versions of C defines */
#include <asm/mem-layout.h> /* sigh, except for page_offset */
#include <asm/hexagon_vm.h>
#include <asm/thread_info.h>
/*
* Entry into guest-mode Linux under Hexagon Virtual Machine.
* Stack pointer points to event record - build pt_regs on top of it,
* set up a plausible C stack frame, and dispatch to the C handler.
* On return, do vmrte virtual instruction with SP where we started.
*
* VM Spec 0.5 uses a trap to fetch HVM record now.
*/
/*
* Save full register state, while setting up thread_info struct
* pointer derived from kernel stack pointer in THREADINFO_REG
* register, putting prior thread_info.regs pointer in a callee-save
* register (R24, which had better not ever be assigned to THREADINFO_REG),
* and updating thread_info.regs to point to current stack frame,
* so as to support nested events in kernel mode.
*
* As this is common code, we set the pt_regs system call number
* to -1 for all events. It will be replaced with the system call
* number in the case where we decode a system call (trap0(#1)).
*/
#if CONFIG_HEXAGON_ARCH_VERSION < 4
#define save_pt_regs()\
memd(R0 + #_PT_R3130) = R31:30; \
{ memw(R0 + #_PT_R2928) = R28; \
R31 = memw(R0 + #_PT_ER_VMPSP); }\
{ memw(R0 + #(_PT_R2928 + 4)) = R31; \
R31 = ugp; } \
{ memd(R0 + #_PT_R2726) = R27:26; \
R30 = gp ; } \
memd(R0 + #_PT_R2524) = R25:24; \
memd(R0 + #_PT_R2322) = R23:22; \
memd(R0 + #_PT_R2120) = R21:20; \
memd(R0 + #_PT_R1918) = R19:18; \
memd(R0 + #_PT_R1716) = R17:16; \
memd(R0 + #_PT_R1514) = R15:14; \
memd(R0 + #_PT_R1312) = R13:12; \
{ memd(R0 + #_PT_R1110) = R11:10; \
R15 = lc0; } \
{ memd(R0 + #_PT_R0908) = R9:8; \
R14 = sa0; } \
{ memd(R0 + #_PT_R0706) = R7:6; \
R13 = lc1; } \
{ memd(R0 + #_PT_R0504) = R5:4; \
R12 = sa1; } \
{ memd(R0 + #_PT_GPUGP) = R31:30; \
R11 = m1; \
R2.H = #HI(_THREAD_SIZE); } \
{ memd(R0 + #_PT_LC0SA0) = R15:14; \
R10 = m0; \
R2.L = #LO(_THREAD_SIZE); } \
{ memd(R0 + #_PT_LC1SA1) = R13:12; \
R15 = p3:0; \
R2 = neg(R2); } \
{ memd(R0 + #_PT_M1M0) = R11:10; \
R14 = usr; \
R2 = and(R0,R2); } \
{ memd(R0 + #_PT_PREDSUSR) = R15:14; \
THREADINFO_REG = R2; } \
{ r24 = memw(THREADINFO_REG + #_THREAD_INFO_PT_REGS); \
memw(THREADINFO_REG + #_THREAD_INFO_PT_REGS) = R0; \
R2 = #-1; } \
{ memw(R0 + #_PT_SYSCALL_NR) = R2; \
R30 = #0; }
#else
/* V4+ */
/* the # ## # syntax inserts a literal ## */
#define save_pt_regs()\
{ memd(R0 + #_PT_R3130) = R31:30; \
R30 = memw(R0 + #_PT_ER_VMPSP); }\
{ memw(R0 + #_PT_R2928) = R28; \
memw(R0 + #(_PT_R2928 + 4)) = R30; }\
{ R31:30 = C11:10; \
memd(R0 + #_PT_R2726) = R27:26; \
memd(R0 + #_PT_R2524) = R25:24; }\
{ memd(R0 + #_PT_R2322) = R23:22; \
memd(R0 + #_PT_R2120) = R21:20; }\
{ memd(R0 + #_PT_R1918) = R19:18; \
memd(R0 + #_PT_R1716) = R17:16; }\
{ memd(R0 + #_PT_R1514) = R15:14; \
memd(R0 + #_PT_R1312) = R13:12; \
R17:16 = C13:12; }\
{ memd(R0 + #_PT_R1110) = R11:10; \
memd(R0 + #_PT_R0908) = R9:8; \
R15:14 = C1:0; } \
{ memd(R0 + #_PT_R0706) = R7:6; \
memd(R0 + #_PT_R0504) = R5:4; \
R13:12 = C3:2; } \
{ memd(R0 + #_PT_GPUGP) = R31:30; \
memd(R0 + #_PT_LC0SA0) = R15:14; \
R11:10 = C7:6; }\
{ THREADINFO_REG = and(R0, # ## #-_THREAD_SIZE); \
memd(R0 + #_PT_LC1SA1) = R13:12; \
R15 = p3:0; }\
{ memd(R0 + #_PT_M1M0) = R11:10; \
memw(R0 + #_PT_PREDSUSR + 4) = R15; }\
{ r24 = memw(THREADINFO_REG + #_THREAD_INFO_PT_REGS); \
memw(THREADINFO_REG + #_THREAD_INFO_PT_REGS) = R0; \
R2 = #-1; } \
{ memw(R0 + #_PT_SYSCALL_NR) = R2; \
memd(R0 + #_PT_CS1CS0) = R17:16; \
R30 = #0; }
#endif
/*
* Restore registers and thread_info.regs state. THREADINFO_REG
* is assumed to still be sane, and R24 to have been correctly
* preserved. Don't restore R29 (SP) until later.
*/
#if CONFIG_HEXAGON_ARCH_VERSION < 4
#define restore_pt_regs() \
{ memw(THREADINFO_REG + #_THREAD_INFO_PT_REGS) = R24; \
R15:14 = memd(R0 + #_PT_PREDSUSR); } \
{ R11:10 = memd(R0 + #_PT_M1M0); \
p3:0 = R15; } \
{ R13:12 = memd(R0 + #_PT_LC1SA1); \
usr = R14; } \
{ R15:14 = memd(R0 + #_PT_LC0SA0); \
m1 = R11; } \
{ R3:2 = memd(R0 + #_PT_R0302); \
m0 = R10; } \
{ R5:4 = memd(R0 + #_PT_R0504); \
lc1 = R13; } \
{ R7:6 = memd(R0 + #_PT_R0706); \
sa1 = R12; } \
{ R9:8 = memd(R0 + #_PT_R0908); \
lc0 = R15; } \
{ R11:10 = memd(R0 + #_PT_R1110); \
sa0 = R14; } \
{ R13:12 = memd(R0 + #_PT_R1312); \
R15:14 = memd(R0 + #_PT_R1514); } \
{ R17:16 = memd(R0 + #_PT_R1716); \
R19:18 = memd(R0 + #_PT_R1918); } \
{ R21:20 = memd(R0 + #_PT_R2120); \
R23:22 = memd(R0 + #_PT_R2322); } \
{ R25:24 = memd(R0 + #_PT_R2524); \
R27:26 = memd(R0 + #_PT_R2726); } \
R31:30 = memd(R0 + #_PT_GPUGP); \
{ R28 = memw(R0 + #_PT_R2928); \
ugp = R31; } \
{ R31:30 = memd(R0 + #_PT_R3130); \
gp = R30; }
#else
/* V4+ */
#define restore_pt_regs() \
{ memw(THREADINFO_REG + #_THREAD_INFO_PT_REGS) = R24; \
R15:14 = memd(R0 + #_PT_PREDSUSR); } \
{ R11:10 = memd(R0 + #_PT_M1M0); \
R13:12 = memd(R0 + #_PT_LC1SA1); \
p3:0 = R15; } \
{ R15:14 = memd(R0 + #_PT_LC0SA0); \
R3:2 = memd(R0 + #_PT_R0302); \
usr = R14; } \
{ R5:4 = memd(R0 + #_PT_R0504); \
R7:6 = memd(R0 + #_PT_R0706); \
C7:6 = R11:10; }\
{ R9:8 = memd(R0 + #_PT_R0908); \
R11:10 = memd(R0 + #_PT_R1110); \
C3:2 = R13:12; }\
{ R13:12 = memd(R0 + #_PT_R1312); \
R15:14 = memd(R0 + #_PT_R1514); \
C1:0 = R15:14; }\
{ R17:16 = memd(R0 + #_PT_R1716); \
R19:18 = memd(R0 + #_PT_R1918); } \
{ R21:20 = memd(R0 + #_PT_R2120); \
R23:22 = memd(R0 + #_PT_R2322); } \
{ R25:24 = memd(R0 + #_PT_R2524); \
R27:26 = memd(R0 + #_PT_R2726); } \
R31:30 = memd(R0 + #_PT_CS1CS0); \
{ C13:12 = R31:30; \
R31:30 = memd(R0 + #_PT_GPUGP) ; \
R28 = memw(R0 + #_PT_R2928); }\
{ C11:10 = R31:30; \
R31:30 = memd(R0 + #_PT_R3130); }
#endif
/*
* Clears off enough space for the rest of pt_regs; evrec is a part
* of pt_regs in HVM mode. Save R0/R1, set handler's address in R1.
* R0 is the address of pt_regs and is the parameter to save_pt_regs.
*/
/*
* Since the HVM isn't automagically pushing the EVREC onto the stack anymore,
* we'll subract the entire size out and then fill it in ourselves.
* Need to save off R0, R1, R2, R3 immediately.
*/
#if CONFIG_HEXAGON_ARCH_VERSION < 4
#define vm_event_entry(CHandler) \
{ \
R29 = add(R29, #-(_PT_REGS_SIZE)); \
memd(R29 + #(_PT_R0100 + -_PT_REGS_SIZE)) = R1:0; \
} \
{ \
memd(R29 +#_PT_R0302) = R3:2; \
} \
trap1(#HVM_TRAP1_VMGETREGS); \
{ \
memd(R29 + #_PT_ER_VMEL) = R1:0; \
R0 = R29; \
R1.L = #LO(CHandler); \
} \
{ \
memd(R29 + #_PT_ER_VMPSP) = R3:2; \
R1.H = #HI(CHandler); \
jump event_dispatch; \
}
#else
/* V4+ */
/* turn on I$ prefetch early */
/* the # ## # syntax inserts a literal ## */
#define vm_event_entry(CHandler) \
{ \
R29 = add(R29, #-(_PT_REGS_SIZE)); \
memd(R29 + #(_PT_R0100 + -_PT_REGS_SIZE)) = R1:0; \
memd(R29 + #(_PT_R0302 + -_PT_REGS_SIZE)) = R3:2; \
R0 = usr; \
} \
{ \
memw(R29 + #_PT_PREDSUSR) = R0; \
R0 = setbit(R0, #16); \
} \
usr = R0; \
R1:0 = G1:0; \
{ \
memd(R29 + #_PT_ER_VMEL) = R1:0; \
R1 = # ## #(CHandler); \
R3:2 = G3:2; \
} \
{ \
R0 = R29; \
memd(R29 + #_PT_ER_VMPSP) = R3:2; \
jump event_dispatch; \
}
#endif
.text
/*
* Do bulk save/restore in one place.
* Adds a jump to dispatch latency, but
* saves hundreds of bytes.
*/
event_dispatch:
save_pt_regs()
callr r1
/*
* Coming back from the C-world, our thread info pointer
* should be in the designated register (usually R19)
*
* If we were in kernel mode, we don't need to check scheduler
* or signals if CONFIG_PREEMPT is not set. If set, then it has
* to jump to a need_resched kind of block.
* BTW, CONFIG_PREEMPT is not supported yet.
*/
#ifdef CONFIG_PREEMPT
R0 = #VM_INT_DISABLE
trap1(#HVM_TRAP1_VMSETIE)
#endif
/* "Nested control path" -- if the previous mode was kernel */
{
R0 = memw(R29 + #_PT_ER_VMEST);
R26.L = #LO(do_work_pending);
}
{
P0 = tstbit(R0, #HVM_VMEST_UM_SFT);
if (!P0.new) jump:nt restore_all;
R26.H = #HI(do_work_pending);
R0 = #VM_INT_DISABLE;
}
/*
* Check also the return from fork/system call, normally coming back from
* user mode
*
* R26 needs to have do_work_pending, and R0 should have VM_INT_DISABLE
*/
check_work_pending:
/* Disable interrupts while checking TIF */
trap1(#HVM_TRAP1_VMSETIE)
{
R0 = R29; /* regs should still be at top of stack */
R1 = memw(THREADINFO_REG + #_THREAD_INFO_FLAGS);
callr R26;
}
{
P0 = cmp.eq(R0, #0); if (!P0.new) jump:nt check_work_pending;
R0 = #VM_INT_DISABLE;
}
restore_all:
/*
* Disable interrupts, if they weren't already, before reg restore.
* R0 gets preloaded with #VM_INT_DISABLE before we get here.
*/
trap1(#HVM_TRAP1_VMSETIE)
/* do the setregs here for VM 0.5 */
/* R29 here should already be pointing at pt_regs */
{
R1:0 = memd(R29 + #_PT_ER_VMEL);
R3:2 = memd(R29 + #_PT_ER_VMPSP);
}
#if CONFIG_HEXAGON_ARCH_VERSION < 4
trap1(#HVM_TRAP1_VMSETREGS);
#else
G1:0 = R1:0;
G3:2 = R3:2;
#endif
R0 = R29
restore_pt_regs()
{
R1:0 = memd(R29 + #_PT_R0100);
R29 = add(R29, #_PT_REGS_SIZE);
}
trap1(#HVM_TRAP1_VMRTE)
/* Notreached */
.globl _K_enter_genex
_K_enter_genex:
vm_event_entry(do_genex)
.globl _K_enter_interrupt
_K_enter_interrupt:
vm_event_entry(arch_do_IRQ)
.globl _K_enter_trap0
_K_enter_trap0:
vm_event_entry(do_trap0)
.globl _K_enter_machcheck
_K_enter_machcheck:
vm_event_entry(do_machcheck)
.globl _K_enter_debug
_K_enter_debug:
vm_event_entry(do_debug_exception)
.globl ret_from_fork
ret_from_fork:
{
call schedule_tail
R26.H = #HI(do_work_pending);
}
{
P0 = cmp.eq(R24, #0);
R26.L = #LO(do_work_pending);
R0 = #VM_INT_DISABLE;
}
if P0 jump check_work_pending
{
R0 = R25;
callr R24
}
{
jump check_work_pending
R0 = #VM_INT_DISABLE;
}

View file

@ -0,0 +1,104 @@
/*
* Mostly IRQ support for Hexagon
*
* Copyright (c) 2010-2012, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
#include <linux/kernel.h>
#include <asm/registers.h>
#include <linux/irq.h>
#include <linux/hardirq.h>
/*
* show_regs - print pt_regs structure
* @regs: pointer to pt_regs
*
* To-do: add all the accessor definitions to registers.h
*
* Will make this routine a lot easier to write.
*/
void show_regs(struct pt_regs *regs)
{
show_regs_print_info(KERN_EMERG);
printk(KERN_EMERG "restart_r0: \t0x%08lx syscall_nr: %ld\n",
regs->restart_r0, regs->syscall_nr);
printk(KERN_EMERG "preds: \t\t0x%08lx\n", regs->preds);
printk(KERN_EMERG "lc0: \t0x%08lx sa0: 0x%08lx m0: 0x%08lx\n",
regs->lc0, regs->sa0, regs->m0);
printk(KERN_EMERG "lc1: \t0x%08lx sa1: 0x%08lx m1: 0x%08lx\n",
regs->lc1, regs->sa1, regs->m1);
printk(KERN_EMERG "gp: \t0x%08lx ugp: 0x%08lx usr: 0x%08lx\n",
regs->gp, regs->ugp, regs->usr);
printk(KERN_EMERG "cs0: \t0x%08lx cs1: 0x%08lx\n",
regs->cs0, regs->cs1);
printk(KERN_EMERG "r0: \t0x%08lx %08lx %08lx %08lx\n", regs->r00,
regs->r01,
regs->r02,
regs->r03);
printk(KERN_EMERG "r4: \t0x%08lx %08lx %08lx %08lx\n", regs->r04,
regs->r05,
regs->r06,
regs->r07);
printk(KERN_EMERG "r8: \t0x%08lx %08lx %08lx %08lx\n", regs->r08,
regs->r09,
regs->r10,
regs->r11);
printk(KERN_EMERG "r12: \t0x%08lx %08lx %08lx %08lx\n", regs->r12,
regs->r13,
regs->r14,
regs->r15);
printk(KERN_EMERG "r16: \t0x%08lx %08lx %08lx %08lx\n", regs->r16,
regs->r17,
regs->r18,
regs->r19);
printk(KERN_EMERG "r20: \t0x%08lx %08lx %08lx %08lx\n", regs->r20,
regs->r21,
regs->r22,
regs->r23);
printk(KERN_EMERG "r24: \t0x%08lx %08lx %08lx %08lx\n", regs->r24,
regs->r25,
regs->r26,
regs->r27);
printk(KERN_EMERG "r28: \t0x%08lx %08lx %08lx %08lx\n", regs->r28,
regs->r29,
regs->r30,
regs->r31);
printk(KERN_EMERG "elr: \t0x%08lx cause: 0x%08lx user_mode: %d\n",
pt_elr(regs), pt_cause(regs), user_mode(regs));
printk(KERN_EMERG "psp: \t0x%08lx badva: 0x%08lx int_enabled: %d\n",
pt_psp(regs), pt_badva(regs), ints_enabled(regs));
}
void dummy_handler(struct pt_regs *regs)
{
unsigned int elr = pt_elr(regs);
printk(KERN_ERR "Unimplemented handler; ELR=0x%08x\n", elr);
}
void arch_do_IRQ(struct pt_regs *regs)
{
int irq = pt_cause(regs);
struct pt_regs *old_regs = set_irq_regs(regs);
irq_enter();
generic_handle_irq(irq);
irq_exit();
set_irq_regs(old_regs);
}

View file

@ -0,0 +1,442 @@
/*
* Initial page table for Linux kernel under Hexagon VM,
*
* Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
/*
* These tables are pre-computed and linked into kernel.
*/
#include <asm/vm_mmu.h>
/* #include <asm/iomap.h> */
/*
* Start with mapping PA=0 to both VA=0x0 and VA=0xc000000 as 16MB large pages.
* No user mode access, RWX, write-back cache. The entry needs
* to be replicated for all 4 virtual segments mapping to the page.
*/
/* "Big Kernel Page" */
#define BKP(pa) (((pa) & __HVM_PTE_PGMASK_4MB) \
| __HVM_PTE_R | __HVM_PTE_W | __HVM_PTE_X \
| __HEXAGON_C_WB_L2 << 6 \
| __HVM_PDE_S_16MB)
/* No cache version */
#define BKPG_IO(pa) (((pa) & __HVM_PTE_PGMASK_16MB) \
| __HVM_PTE_R | __HVM_PTE_W | __HVM_PTE_X \
| __HVM_PDE_S_16MB | __HEXAGON_C_DEV << 6 )
#define FOURK_IO(pa) (((pa) & __HVM_PTE_PGMASK_4KB) \
| __HVM_PTE_R | __HVM_PTE_W | __HVM_PTE_X \
| __HEXAGON_C_DEV << 6 )
#define L2_PTR(pa) (((pa) & __HVM_PTE_PGMASK_4KB) \
| __HVM_PDE_S_4KB )
#define X __HVM_PDE_S_INVALID
.p2align 12
.globl swapper_pg_dir
.globl _K_init_segtable
swapper_pg_dir:
/* VA 0x00000000 */
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X
.word X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X
.word X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X
.word X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X
.word X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X
.word X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X
.word X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X
.word X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X
/* VA 0x40000000 */
.word X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X
.word X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X
.word X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X
.word X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X
.word X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X
.word X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X
.word X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X
.word X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X
/* VA 0x80000000 */
.word X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X
.word X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X
.word X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X
.word X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X
.word X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X
/*0xa8*/.word X,X,X,X
#ifdef CONFIG_COMET_EARLY_UART_DEBUG
UART_PTE_ENTRY:
/*0xa9*/.word BKPG_IO(0xa9000000),BKPG_IO(0xa9000000),BKPG_IO(0xa9000000),BKPG_IO(0xa9000000)
#else
/*0xa9*/.word X,X,X,X
#endif
/*0xaa*/.word X,X,X,X
/*0xab*/.word X,X,X,X
/*0xac*/.word X,X,X,X
/*0xad*/.word X,X,X,X
/*0xae*/.word X,X,X,X
/*0xaf*/.word X,X,X,X
/*0xb0*/.word X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X
.word X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X
_K_init_segtable:
/* VA 0xC0000000 */
.word BKP(0x00000000), BKP(0x00400000), BKP(0x00800000), BKP(0x00c00000)
.word BKP(0x01000000), BKP(0x01400000), BKP(0x01800000), BKP(0x01c00000)
.word BKP(0x02000000), BKP(0x02400000), BKP(0x02800000), BKP(0x02c00000)
.word BKP(0x03000000), BKP(0x03400000), BKP(0x03800000), BKP(0x03c00000)
.word BKP(0x04000000), BKP(0x04400000), BKP(0x04800000), BKP(0x04c00000)
.word BKP(0x05000000), BKP(0x05400000), BKP(0x05800000), BKP(0x05c00000)
.word BKP(0x06000000), BKP(0x06400000), BKP(0x06800000), BKP(0x06c00000)
.word BKP(0x07000000), BKP(0x07400000), BKP(0x07800000), BKP(0x07c00000)
.word BKP(0x08000000), BKP(0x08400000), BKP(0x08800000), BKP(0x08c00000)
.word BKP(0x09000000), BKP(0x09400000), BKP(0x09800000), BKP(0x09c00000)
.word BKP(0x0a000000), BKP(0x0a400000), BKP(0x0a800000), BKP(0x0ac00000)
.word BKP(0x0b000000), BKP(0x0b400000), BKP(0x0b800000), BKP(0x0bc00000)
.word BKP(0x0c000000), BKP(0x0c400000), BKP(0x0c800000), BKP(0x0cc00000)
.word BKP(0x0d000000), BKP(0x0d400000), BKP(0x0d800000), BKP(0x0dc00000)
.word BKP(0x0e000000), BKP(0x0e400000), BKP(0x0e800000), BKP(0x0ec00000)
.word BKP(0x0f000000), BKP(0x0f400000), BKP(0x0f800000), BKP(0x0fc00000)
.word BKP(0x10000000), BKP(0x10400000), BKP(0x10800000), BKP(0x10c00000)
.word BKP(0x11000000), BKP(0x11400000), BKP(0x11800000), BKP(0x11c00000)
.word BKP(0x12000000), BKP(0x12400000), BKP(0x12800000), BKP(0x12c00000)
.word BKP(0x13000000), BKP(0x13400000), BKP(0x13800000), BKP(0x13c00000)
.word BKP(0x14000000), BKP(0x14400000), BKP(0x14800000), BKP(0x14c00000)
.word BKP(0x15000000), BKP(0x15400000), BKP(0x15800000), BKP(0x15c00000)
.word BKP(0x16000000), BKP(0x16400000), BKP(0x16800000), BKP(0x16c00000)
.word BKP(0x17000000), BKP(0x17400000), BKP(0x17800000), BKP(0x17c00000)
.word BKP(0x18000000), BKP(0x18400000), BKP(0x18800000), BKP(0x18c00000)
.word BKP(0x19000000), BKP(0x19400000), BKP(0x19800000), BKP(0x19c00000)
.word BKP(0x1a000000), BKP(0x1a400000), BKP(0x1a800000), BKP(0x1ac00000)
.word BKP(0x1b000000), BKP(0x1b400000), BKP(0x1b800000), BKP(0x1bc00000)
.word BKP(0x1c000000), BKP(0x1c400000), BKP(0x1c800000), BKP(0x1cc00000)
.word BKP(0x1d000000), BKP(0x1d400000), BKP(0x1d800000), BKP(0x1dc00000)
.word BKP(0x1e000000), BKP(0x1e400000), BKP(0x1e800000), BKP(0x1ec00000)
.word BKP(0x1f000000), BKP(0x1f400000), BKP(0x1f800000), BKP(0x1fc00000)
.word BKP(0x20000000), BKP(0x20400000), BKP(0x20800000), BKP(0x20c00000)
.word BKP(0x21000000), BKP(0x21400000), BKP(0x21800000), BKP(0x21c00000)
.word BKP(0x22000000), BKP(0x22400000), BKP(0x22800000), BKP(0x22c00000)
.word BKP(0x23000000), BKP(0x23400000), BKP(0x23800000), BKP(0x23c00000)
.word BKP(0x24000000), BKP(0x24400000), BKP(0x24800000), BKP(0x24c00000)
.word BKP(0x25000000), BKP(0x25400000), BKP(0x25800000), BKP(0x25c00000)
.word BKP(0x26000000), BKP(0x26400000), BKP(0x26800000), BKP(0x26c00000)
.word BKP(0x27000000), BKP(0x27400000), BKP(0x27800000), BKP(0x27c00000)
.word BKP(0x28000000), BKP(0x28400000), BKP(0x28800000), BKP(0x28c00000)
.word BKP(0x29000000), BKP(0x29400000), BKP(0x29800000), BKP(0x29c00000)
.word BKP(0x2a000000), BKP(0x2a400000), BKP(0x2a800000), BKP(0x2ac00000)
.word BKP(0x2b000000), BKP(0x2b400000), BKP(0x2b800000), BKP(0x2bc00000)
.word BKP(0x2c000000), BKP(0x2c400000), BKP(0x2c800000), BKP(0x2cc00000)
.word BKP(0x2d000000), BKP(0x2d400000), BKP(0x2d800000), BKP(0x2dc00000)
.word BKP(0x2e000000), BKP(0x2e400000), BKP(0x2e800000), BKP(0x2ec00000)
.word BKP(0x2f000000), BKP(0x2f400000), BKP(0x2f800000), BKP(0x2fc00000)
.word BKP(0x30000000), BKP(0x30400000), BKP(0x30800000), BKP(0x30c00000)
.word BKP(0x31000000), BKP(0x31400000), BKP(0x31800000), BKP(0x31c00000)
.word BKP(0x32000000), BKP(0x32400000), BKP(0x32800000), BKP(0x32c00000)
.word BKP(0x33000000), BKP(0x33400000), BKP(0x33800000), BKP(0x33c00000)
.word BKP(0x34000000), BKP(0x34400000), BKP(0x34800000), BKP(0x34c00000)
.word BKP(0x35000000), BKP(0x35400000), BKP(0x35800000), BKP(0x35c00000)
.word BKP(0x36000000), BKP(0x36400000), BKP(0x36800000), BKP(0x36c00000)
.word BKP(0x37000000), BKP(0x37400000), BKP(0x37800000), BKP(0x37c00000)
.word BKP(0x38000000), BKP(0x38400000), BKP(0x38800000), BKP(0x38c00000)
.word BKP(0x39000000), BKP(0x39400000), BKP(0x39800000), BKP(0x39c00000)
.word BKP(0x3a000000), BKP(0x3a400000), BKP(0x3a800000), BKP(0x3ac00000)
.word BKP(0x3b000000), BKP(0x3b400000), BKP(0x3b800000), BKP(0x3bc00000)
.word BKP(0x3c000000), BKP(0x3c400000), BKP(0x3c800000), BKP(0x3cc00000)
.word BKP(0x3d000000), BKP(0x3d400000), BKP(0x3d800000), BKP(0x3dc00000)
_K_io_map:
.word X,X,X,X /* 0x3e000000 - device IO early remap */
.word X,X,X,X /* 0x3f000000 - hypervisor space*/
#if 0
/*
* This is in here as an example for devices which need to be mapped really
* early.
*/
.p2align 12
.globl _K_io_kmap
.globl _K_init_devicetable
_K_init_devicetable: /* Should be 4MB worth of entries */
.word FOURK_IO(MSM_GPIO1_PHYS),FOURK_IO(MSM_GPIO2_PHYS),FOURK_IO(MSM_SIRC_PHYS),X
.word FOURK_IO(TLMM_GPIO1_PHYS),X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
#endif

View file

@ -0,0 +1,102 @@
/*
* Hexagon VM instruction support
*
* Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
#include <linux/linkage.h>
#include <asm/hexagon_vm.h>
/*
* C wrappers for virtual machine "instructions". These
* could be, and perhaps some day will be, handled as in-line
* macros, but for tracing/debugging it's handy to have
* a single point of invocation for each of them.
* Conveniently, they take paramters and return values
* consistent with the ABI calling convention.
*/
ENTRY(__vmrte)
trap1(#HVM_TRAP1_VMRTE);
jumpr R31;
ENTRY(__vmsetvec)
trap1(#HVM_TRAP1_VMSETVEC);
jumpr R31;
ENTRY(__vmsetie)
trap1(#HVM_TRAP1_VMSETIE);
jumpr R31;
ENTRY(__vmgetie)
trap1(#HVM_TRAP1_VMGETIE);
jumpr R31;
ENTRY(__vmintop)
trap1(#HVM_TRAP1_VMINTOP);
jumpr R31;
ENTRY(__vmclrmap)
trap1(#HVM_TRAP1_VMCLRMAP);
jumpr R31;
ENTRY(__vmnewmap)
r1 = #VM_NEWMAP_TYPE_PGTABLES;
trap1(#HVM_TRAP1_VMNEWMAP);
jumpr R31;
ENTRY(__vmcache)
trap1(#HVM_TRAP1_VMCACHE);
jumpr R31;
ENTRY(__vmgettime)
trap1(#HVM_TRAP1_VMGETTIME);
jumpr R31;
ENTRY(__vmsettime)
trap1(#HVM_TRAP1_VMSETTIME);
jumpr R31;
ENTRY(__vmwait)
trap1(#HVM_TRAP1_VMWAIT);
jumpr R31;
ENTRY(__vmyield)
trap1(#HVM_TRAP1_VMYIELD);
jumpr R31;
ENTRY(__vmstart)
trap1(#HVM_TRAP1_VMSTART);
jumpr R31;
ENTRY(__vmstop)
trap1(#HVM_TRAP1_VMSTOP);
jumpr R31;
ENTRY(__vmvpid)
trap1(#HVM_TRAP1_VMVPID);
jumpr R31;
/* Probably not actually going to use these; see vm_entry.S */
ENTRY(__vmsetregs)
trap1(#HVM_TRAP1_VMSETREGS);
jumpr R31;
ENTRY(__vmgetregs)
trap1(#HVM_TRAP1_VMGETREGS);
jumpr R31;

View file

@ -0,0 +1,95 @@
/*
* Context switch support for Hexagon
*
* Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
#include <asm/asm-offsets.h>
.text
/*
* The register used as a fast-path thread information pointer
* is determined as a kernel configuration option. If it happens
* to be a callee-save register, we're going to be saving and
* restoring it twice here.
*
* This code anticipates a revised ABI where R20-23 are added
* to the set of callee-save registers, but this should be
* backward compatible to legacy tools.
*/
/*
* void switch_to(struct task_struct *prev,
* struct task_struct *next, struct task_struct *last);
*/
.p2align 2
.globl __switch_to
.type __switch_to, @function
/*
* When we exit the wormhole, we need to store the previous task
* in the new R0's pointer. Technically it should be R2, but they should
* be the same; seems like a legacy thing. In short, don't butcher
* R0, let it go back out unmolested.
*/
__switch_to:
/*
* Push callee-saves onto "prev" stack.
* Here, we're sneaky because the LR and FP
* storage of the thread_stack structure
* is automagically allocated by allocframe,
* so we pass struct size less 8.
*/
allocframe(#(_SWITCH_STACK_SIZE - 8));
memd(R29+#(_SWITCH_R2726))=R27:26;
memd(R29+#(_SWITCH_R2524))=R25:24;
memd(R29+#(_SWITCH_R2322))=R23:22;
memd(R29+#(_SWITCH_R2120))=R21:20;
memd(R29+#(_SWITCH_R1918))=R19:18;
memd(R29+#(_SWITCH_R1716))=R17:16;
/* Stash thread_info pointer in task_struct */
memw(R0+#_TASK_THREAD_INFO) = THREADINFO_REG;
memw(R0 +#(_TASK_STRUCT_THREAD + _THREAD_STRUCT_SWITCH_SP)) = R29;
/* Switch to "next" stack and restore callee saves from there */
R29 = memw(R1 + #(_TASK_STRUCT_THREAD + _THREAD_STRUCT_SWITCH_SP));
{
R27:26 = memd(R29+#(_SWITCH_R2726));
R25:24 = memd(R29+#(_SWITCH_R2524));
}
{
R23:22 = memd(R29+#(_SWITCH_R2322));
R21:20 = memd(R29+#(_SWITCH_R2120));
}
{
R19:18 = memd(R29+#(_SWITCH_R1918));
R17:16 = memd(R29+#(_SWITCH_R1716));
}
{
/* THREADINFO_REG is currently one of the callee-saved regs
* above, and so be sure to re-load it last.
*/
THREADINFO_REG = memw(R1 + #_TASK_THREAD_INFO);
R31:30 = memd(R29+#_SWITCH_FP);
}
{
R29 = add(R29,#_SWITCH_STACK_SIZE);
jumpr R31;
}
.size __switch_to, .-__switch_to

View file

@ -0,0 +1,48 @@
/*
* Event jump tables
*
* Copyright (c) 2010-2012,2013, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
#include <asm/hexagon_vm.h>
.text
/* This is registered early on to allow angel */
.global _K_provisional_vec
_K_provisional_vec:
jump 1f;
jump 1f;
jump 1f;
jump 1f;
jump 1f;
trap1(#HVM_TRAP1_VMRTE)
jump 1f;
jump 1f;
.global _K_VM_event_vector
_K_VM_event_vector:
1:
jump 1b; /* Reset */
jump _K_enter_machcheck;
jump _K_enter_genex;
jump _K_enter_debug;
jump 1b; /* 4 Rsvd */
jump _K_enter_trap0;
jump 1b; /* 6 Rsvd */
jump _K_enter_interrupt;

View file

@ -0,0 +1,82 @@
/*
* Linker script for Hexagon kernel
*
* Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
#include <asm-generic/vmlinux.lds.h>
#include <asm/asm-offsets.h> /* Most of the kernel defines are here */
#include <asm/mem-layout.h> /* except for page_offset */
#include <asm/cache.h> /* and now we're pulling cache line size */
OUTPUT_ARCH(hexagon)
ENTRY(stext)
jiffies = jiffies_64;
/*
See asm-generic/vmlinux.lds.h for expansion of some of these macros.
See asm-generic/sections.h for seemingly required labels.
*/
#define PAGE_SIZE _PAGE_SIZE
SECTIONS
{
. = PAGE_OFFSET;
__init_begin = .;
HEAD_TEXT_SECTION
INIT_TEXT_SECTION(PAGE_SIZE)
PERCPU_SECTION(L1_CACHE_BYTES)
__init_end = .;
. = ALIGN(_PAGE_SIZE);
_stext = .;
.text : AT(ADDR(.text)) {
_text = .;
TEXT_TEXT
SCHED_TEXT
LOCK_TEXT
KPROBES_TEXT
*(.fixup)
}
_etext = .;
INIT_DATA_SECTION(PAGE_SIZE)
_sdata = .;
RW_DATA_SECTION(32,PAGE_SIZE,PAGE_SIZE)
RO_DATA_SECTION(PAGE_SIZE)
_edata = .;
EXCEPTION_TABLE(16)
NOTES
BSS_SECTION(_PAGE_SIZE, _PAGE_SIZE, _PAGE_SIZE)
_end = .;
/DISCARD/ : {
EXIT_TEXT
EXIT_DATA
EXIT_CALL
}
STABS_DEBUG
DWARF_DEBUG
}