Fixed MTP to work with TWRP

This commit is contained in:
awab228 2018-06-19 23:16:04 +02:00
commit f6dfaef42e
50820 changed files with 20846062 additions and 0 deletions

View file

@ -0,0 +1,30 @@
config PPC_PMAC
bool "Apple PowerMac based machines"
depends on PPC_BOOK3S
select MPIC
select PCI
select PPC_INDIRECT_PCI if PPC32
select PPC_MPC106 if PPC32
select PPC_NATIVE
default y
config PPC_PMAC64
bool
depends on PPC_PMAC && PPC64
select MPIC
select U3_DART
select MPIC_U3_HT_IRQS
select GENERIC_TBSYNC
select PPC_970_NAP
default y
config PPC_PMAC32_PSURGE
bool "Support for powersurge upgrade cards" if EXPERT
depends on SMP && PPC32 && PPC_PMAC
select PPC_SMP_MUXED_IPI
default y
help
The powersurge cpu boards can be used in the generation
of powermacs that have a socket for an upgradeable cpu card,
including the 7500, 8500, 9500, 9600. Support exists for
both dual and quad socket upgrade cards.

View file

@ -0,0 +1,19 @@
CFLAGS_bootx_init.o += -fPIC
ifdef CONFIG_FUNCTION_TRACER
# Do not trace early boot code
CFLAGS_REMOVE_bootx_init.o = -pg -mno-sched-epilog
endif
obj-y += pic.o setup.o time.o feature.o pci.o \
sleep.o low_i2c.o cache.o pfunc_core.o \
pfunc_base.o udbg_scc.o udbg_adb.o
obj-$(CONFIG_PMAC_BACKLIGHT) += backlight.o
# CONFIG_NVRAM is an arch. independent tristate symbol, for pmac32 we really
# need this to be a bool. Cheat here and pretend CONFIG_NVRAM=m is really
# CONFIG_NVRAM=y
obj-$(CONFIG_NVRAM:m=y) += nvram.o
# ppc64 pmac doesn't define CONFIG_NVRAM but needs nvram stuff
obj-$(CONFIG_PPC64) += nvram.o
obj-$(CONFIG_PPC32) += bootx_init.o
obj-$(CONFIG_SMP) += smp.o

View file

@ -0,0 +1,220 @@
/*
* Miscellaneous procedures for dealing with the PowerMac hardware.
* Contains support for the backlight.
*
* Copyright (C) 2000 Benjamin Herrenschmidt
* Copyright (C) 2006 Michael Hanselmann <linux-kernel@hansmi.ch>
*
*/
#include <linux/kernel.h>
#include <linux/fb.h>
#include <linux/backlight.h>
#include <linux/adb.h>
#include <linux/pmu.h>
#include <linux/atomic.h>
#include <linux/export.h>
#include <asm/prom.h>
#include <asm/backlight.h>
#define OLD_BACKLIGHT_MAX 15
static void pmac_backlight_key_worker(struct work_struct *work);
static void pmac_backlight_set_legacy_worker(struct work_struct *work);
static DECLARE_WORK(pmac_backlight_key_work, pmac_backlight_key_worker);
static DECLARE_WORK(pmac_backlight_set_legacy_work, pmac_backlight_set_legacy_worker);
/* Although these variables are used in interrupt context, it makes no sense to
* protect them. No user is able to produce enough key events per second and
* notice the errors that might happen.
*/
static int pmac_backlight_key_queued;
static int pmac_backlight_set_legacy_queued;
/* The via-pmu code allows the backlight to be grabbed, in which case the
* in-kernel control of the brightness needs to be disabled. This should
* only be used by really old PowerBooks.
*/
static atomic_t kernel_backlight_disabled = ATOMIC_INIT(0);
/* Protect the pmac_backlight variable below.
You should hold this lock when using the pmac_backlight pointer to
prevent its potential removal. */
DEFINE_MUTEX(pmac_backlight_mutex);
/* Main backlight storage
*
* Backlight drivers in this variable are required to have the "ops"
* attribute set and to have an update_status function.
*
* We can only store one backlight here, but since Apple laptops have only one
* internal display, it doesn't matter. Other backlight drivers can be used
* independently.
*
*/
struct backlight_device *pmac_backlight;
int pmac_has_backlight_type(const char *type)
{
struct device_node* bk_node = of_find_node_by_name(NULL, "backlight");
if (bk_node) {
const char *prop = of_get_property(bk_node,
"backlight-control", NULL);
if (prop && strncmp(prop, type, strlen(type)) == 0) {
of_node_put(bk_node);
return 1;
}
of_node_put(bk_node);
}
return 0;
}
int pmac_backlight_curve_lookup(struct fb_info *info, int value)
{
int level = (FB_BACKLIGHT_LEVELS - 1);
if (info && info->bl_dev) {
int i, max = 0;
/* Look for biggest value */
for (i = 0; i < FB_BACKLIGHT_LEVELS; i++)
max = max((int)info->bl_curve[i], max);
/* Look for nearest value */
for (i = 0; i < FB_BACKLIGHT_LEVELS; i++) {
int diff = abs(info->bl_curve[i] - value);
if (diff < max) {
max = diff;
level = i;
}
}
}
return level;
}
static void pmac_backlight_key_worker(struct work_struct *work)
{
if (atomic_read(&kernel_backlight_disabled))
return;
mutex_lock(&pmac_backlight_mutex);
if (pmac_backlight) {
struct backlight_properties *props;
int brightness;
props = &pmac_backlight->props;
brightness = props->brightness +
((pmac_backlight_key_queued?-1:1) *
(props->max_brightness / 15));
if (brightness < 0)
brightness = 0;
else if (brightness > props->max_brightness)
brightness = props->max_brightness;
props->brightness = brightness;
backlight_update_status(pmac_backlight);
}
mutex_unlock(&pmac_backlight_mutex);
}
/* This function is called in interrupt context */
void pmac_backlight_key(int direction)
{
if (atomic_read(&kernel_backlight_disabled))
return;
/* we can receive multiple interrupts here, but the scheduled work
* will run only once, with the last value
*/
pmac_backlight_key_queued = direction;
schedule_work(&pmac_backlight_key_work);
}
static int __pmac_backlight_set_legacy_brightness(int brightness)
{
int error = -ENXIO;
mutex_lock(&pmac_backlight_mutex);
if (pmac_backlight) {
struct backlight_properties *props;
props = &pmac_backlight->props;
props->brightness = brightness *
(props->max_brightness + 1) /
(OLD_BACKLIGHT_MAX + 1);
if (props->brightness > props->max_brightness)
props->brightness = props->max_brightness;
else if (props->brightness < 0)
props->brightness = 0;
backlight_update_status(pmac_backlight);
error = 0;
}
mutex_unlock(&pmac_backlight_mutex);
return error;
}
static void pmac_backlight_set_legacy_worker(struct work_struct *work)
{
if (atomic_read(&kernel_backlight_disabled))
return;
__pmac_backlight_set_legacy_brightness(pmac_backlight_set_legacy_queued);
}
/* This function is called in interrupt context */
void pmac_backlight_set_legacy_brightness_pmu(int brightness) {
if (atomic_read(&kernel_backlight_disabled))
return;
pmac_backlight_set_legacy_queued = brightness;
schedule_work(&pmac_backlight_set_legacy_work);
}
int pmac_backlight_set_legacy_brightness(int brightness)
{
return __pmac_backlight_set_legacy_brightness(brightness);
}
int pmac_backlight_get_legacy_brightness()
{
int result = -ENXIO;
mutex_lock(&pmac_backlight_mutex);
if (pmac_backlight) {
struct backlight_properties *props;
props = &pmac_backlight->props;
result = props->brightness *
(OLD_BACKLIGHT_MAX + 1) /
(props->max_brightness + 1);
}
mutex_unlock(&pmac_backlight_mutex);
return result;
}
void pmac_backlight_disable()
{
atomic_inc(&kernel_backlight_disabled);
}
void pmac_backlight_enable()
{
atomic_dec(&kernel_backlight_disabled);
}
EXPORT_SYMBOL_GPL(pmac_backlight);
EXPORT_SYMBOL_GPL(pmac_backlight_mutex);
EXPORT_SYMBOL_GPL(pmac_has_backlight_type);

View file

@ -0,0 +1,595 @@
/*
* Early boot support code for BootX bootloader
*
* Copyright (C) 2005 Ben. Herrenschmidt (benh@kernel.crashing.org)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/init.h>
#include <generated/utsrelease.h>
#include <asm/sections.h>
#include <asm/prom.h>
#include <asm/page.h>
#include <asm/bootx.h>
#include <asm/btext.h>
#include <asm/io.h>
#include <asm/setup.h>
#undef DEBUG
#define SET_BOOT_BAT
#ifdef DEBUG
#define DBG(fmt...) do { bootx_printf(fmt); } while(0)
#else
#define DBG(fmt...) do { } while(0)
#endif
extern void __start(unsigned long r3, unsigned long r4, unsigned long r5);
static unsigned long __initdata bootx_dt_strbase;
static unsigned long __initdata bootx_dt_strend;
static unsigned long __initdata bootx_node_chosen;
static boot_infos_t * __initdata bootx_info;
static char __initdata bootx_disp_path[256];
/* Is boot-info compatible ? */
#define BOOT_INFO_IS_COMPATIBLE(bi) \
((bi)->compatible_version <= BOOT_INFO_VERSION)
#define BOOT_INFO_IS_V2_COMPATIBLE(bi) ((bi)->version >= 2)
#define BOOT_INFO_IS_V4_COMPATIBLE(bi) ((bi)->version >= 4)
#ifdef CONFIG_BOOTX_TEXT
static void __init bootx_printf(const char *format, ...)
{
const char *p, *q, *s;
va_list args;
unsigned long v;
va_start(args, format);
for (p = format; *p != 0; p = q) {
for (q = p; *q != 0 && *q != '\n' && *q != '%'; ++q)
;
if (q > p)
btext_drawtext(p, q - p);
if (*q == 0)
break;
if (*q == '\n') {
++q;
btext_flushline();
btext_drawstring("\r\n");
btext_flushline();
continue;
}
++q;
if (*q == 0)
break;
switch (*q) {
case 's':
++q;
s = va_arg(args, const char *);
if (s == NULL)
s = "<NULL>";
btext_drawstring(s);
break;
case 'x':
++q;
v = va_arg(args, unsigned long);
btext_drawhex(v);
break;
}
}
}
#else /* CONFIG_BOOTX_TEXT */
static void __init bootx_printf(const char *format, ...) {}
#endif /* CONFIG_BOOTX_TEXT */
static void * __init bootx_early_getprop(unsigned long base,
unsigned long node,
char *prop)
{
struct bootx_dt_node *np = (struct bootx_dt_node *)(base + node);
u32 *ppp = &np->properties;
while(*ppp) {
struct bootx_dt_prop *pp =
(struct bootx_dt_prop *)(base + *ppp);
if (strcmp((char *)((unsigned long)pp->name + base),
prop) == 0) {
return (void *)((unsigned long)pp->value + base);
}
ppp = &pp->next;
}
return NULL;
}
#define dt_push_token(token, mem) \
do { \
*(mem) = _ALIGN_UP(*(mem),4); \
*((u32 *)*(mem)) = token; \
*(mem) += 4; \
} while(0)
static unsigned long __init bootx_dt_find_string(char *str)
{
char *s, *os;
s = os = (char *)bootx_dt_strbase;
s += 4;
while (s < (char *)bootx_dt_strend) {
if (strcmp(s, str) == 0)
return s - os;
s += strlen(s) + 1;
}
return 0;
}
static void __init bootx_dt_add_prop(char *name, void *data, int size,
unsigned long *mem_end)
{
unsigned long soff = bootx_dt_find_string(name);
if (data == NULL)
size = 0;
if (soff == 0) {
bootx_printf("WARNING: Can't find string index for <%s>\n",
name);
return;
}
if (size > 0x20000) {
bootx_printf("WARNING: ignoring large property ");
bootx_printf("%s length 0x%x\n", name, size);
return;
}
dt_push_token(OF_DT_PROP, mem_end);
dt_push_token(size, mem_end);
dt_push_token(soff, mem_end);
/* push property content */
if (size && data) {
memcpy((void *)*mem_end, data, size);
*mem_end = _ALIGN_UP(*mem_end + size, 4);
}
}
static void __init bootx_add_chosen_props(unsigned long base,
unsigned long *mem_end)
{
u32 val;
bootx_dt_add_prop("linux,bootx", NULL, 0, mem_end);
if (bootx_info->kernelParamsOffset) {
char *args = (char *)((unsigned long)bootx_info) +
bootx_info->kernelParamsOffset;
bootx_dt_add_prop("bootargs", args, strlen(args) + 1, mem_end);
}
if (bootx_info->ramDisk) {
val = ((unsigned long)bootx_info) + bootx_info->ramDisk;
bootx_dt_add_prop("linux,initrd-start", &val, 4, mem_end);
val += bootx_info->ramDiskSize;
bootx_dt_add_prop("linux,initrd-end", &val, 4, mem_end);
}
if (strlen(bootx_disp_path))
bootx_dt_add_prop("linux,stdout-path", bootx_disp_path,
strlen(bootx_disp_path) + 1, mem_end);
}
static void __init bootx_add_display_props(unsigned long base,
unsigned long *mem_end,
int has_real_node)
{
boot_infos_t *bi = bootx_info;
u32 tmp;
if (has_real_node) {
bootx_dt_add_prop("linux,boot-display", NULL, 0, mem_end);
bootx_dt_add_prop("linux,opened", NULL, 0, mem_end);
} else
bootx_dt_add_prop("linux,bootx-noscreen", NULL, 0, mem_end);
tmp = bi->dispDeviceDepth;
bootx_dt_add_prop("linux,bootx-depth", &tmp, 4, mem_end);
tmp = bi->dispDeviceRect[2] - bi->dispDeviceRect[0];
bootx_dt_add_prop("linux,bootx-width", &tmp, 4, mem_end);
tmp = bi->dispDeviceRect[3] - bi->dispDeviceRect[1];
bootx_dt_add_prop("linux,bootx-height", &tmp, 4, mem_end);
tmp = bi->dispDeviceRowBytes;
bootx_dt_add_prop("linux,bootx-linebytes", &tmp, 4, mem_end);
tmp = (u32)bi->dispDeviceBase;
if (tmp == 0)
tmp = (u32)bi->logicalDisplayBase;
tmp += bi->dispDeviceRect[1] * bi->dispDeviceRowBytes;
tmp += bi->dispDeviceRect[0] * ((bi->dispDeviceDepth + 7) / 8);
bootx_dt_add_prop("linux,bootx-addr", &tmp, 4, mem_end);
}
static void __init bootx_dt_add_string(char *s, unsigned long *mem_end)
{
unsigned int l = strlen(s) + 1;
memcpy((void *)*mem_end, s, l);
bootx_dt_strend = *mem_end = *mem_end + l;
}
static void __init bootx_scan_dt_build_strings(unsigned long base,
unsigned long node,
unsigned long *mem_end)
{
struct bootx_dt_node *np = (struct bootx_dt_node *)(base + node);
u32 *cpp, *ppp = &np->properties;
unsigned long soff;
char *namep;
/* Keep refs to known nodes */
namep = np->full_name ? (char *)(base + np->full_name) : NULL;
if (namep == NULL) {
bootx_printf("Node without a full name !\n");
namep = "";
}
DBG("* strings: %s\n", namep);
if (!strcmp(namep, "/chosen")) {
DBG(" detected /chosen ! adding properties names !\n");
bootx_dt_add_string("linux,bootx", mem_end);
bootx_dt_add_string("linux,stdout-path", mem_end);
bootx_dt_add_string("linux,initrd-start", mem_end);
bootx_dt_add_string("linux,initrd-end", mem_end);
bootx_dt_add_string("bootargs", mem_end);
bootx_node_chosen = node;
}
if (node == bootx_info->dispDeviceRegEntryOffset) {
DBG(" detected display ! adding properties names !\n");
bootx_dt_add_string("linux,boot-display", mem_end);
bootx_dt_add_string("linux,opened", mem_end);
strncpy(bootx_disp_path, namep, 255);
}
/* get and store all property names */
while (*ppp) {
struct bootx_dt_prop *pp =
(struct bootx_dt_prop *)(base + *ppp);
namep = pp->name ? (char *)(base + pp->name) : NULL;
if (namep == NULL || strcmp(namep, "name") == 0)
goto next;
/* get/create string entry */
soff = bootx_dt_find_string(namep);
if (soff == 0)
bootx_dt_add_string(namep, mem_end);
next:
ppp = &pp->next;
}
/* do all our children */
cpp = &np->child;
while(*cpp) {
np = (struct bootx_dt_node *)(base + *cpp);
bootx_scan_dt_build_strings(base, *cpp, mem_end);
cpp = &np->sibling;
}
}
static void __init bootx_scan_dt_build_struct(unsigned long base,
unsigned long node,
unsigned long *mem_end)
{
struct bootx_dt_node *np = (struct bootx_dt_node *)(base + node);
u32 *cpp, *ppp = &np->properties;
char *namep, *p, *ep, *lp;
int l;
dt_push_token(OF_DT_BEGIN_NODE, mem_end);
/* get the node's full name */
namep = np->full_name ? (char *)(base + np->full_name) : NULL;
if (namep == NULL)
namep = "";
l = strlen(namep);
DBG("* struct: %s\n", namep);
/* Fixup an Apple bug where they have bogus \0 chars in the
* middle of the path in some properties, and extract
* the unit name (everything after the last '/').
*/
memcpy((void *)*mem_end, namep, l + 1);
namep = (char *)*mem_end;
for (lp = p = namep, ep = namep + l; p < ep; p++) {
if (*p == '/')
lp = namep;
else if (*p != 0)
*lp++ = *p;
}
*lp = 0;
*mem_end = _ALIGN_UP((unsigned long)lp + 1, 4);
/* get and store all properties */
while (*ppp) {
struct bootx_dt_prop *pp =
(struct bootx_dt_prop *)(base + *ppp);
namep = pp->name ? (char *)(base + pp->name) : NULL;
/* Skip "name" */
if (namep == NULL || !strcmp(namep, "name"))
goto next;
/* Skip "bootargs" in /chosen too as we replace it */
if (node == bootx_node_chosen && !strcmp(namep, "bootargs"))
goto next;
/* push property head */
bootx_dt_add_prop(namep,
pp->value ? (void *)(base + pp->value): NULL,
pp->length, mem_end);
next:
ppp = &pp->next;
}
if (node == bootx_node_chosen) {
bootx_add_chosen_props(base, mem_end);
if (bootx_info->dispDeviceRegEntryOffset == 0)
bootx_add_display_props(base, mem_end, 0);
}
else if (node == bootx_info->dispDeviceRegEntryOffset)
bootx_add_display_props(base, mem_end, 1);
/* do all our children */
cpp = &np->child;
while(*cpp) {
np = (struct bootx_dt_node *)(base + *cpp);
bootx_scan_dt_build_struct(base, *cpp, mem_end);
cpp = &np->sibling;
}
dt_push_token(OF_DT_END_NODE, mem_end);
}
static unsigned long __init bootx_flatten_dt(unsigned long start)
{
boot_infos_t *bi = bootx_info;
unsigned long mem_start, mem_end;
struct boot_param_header *hdr;
unsigned long base;
u64 *rsvmap;
/* Start using memory after the big blob passed by BootX, get
* some space for the header
*/
mem_start = mem_end = _ALIGN_UP(((unsigned long)bi) + start, 4);
DBG("Boot params header at: %x\n", mem_start);
hdr = (struct boot_param_header *)mem_start;
mem_end += sizeof(struct boot_param_header);
rsvmap = (u64 *)(_ALIGN_UP(mem_end, 8));
hdr->off_mem_rsvmap = ((unsigned long)rsvmap) - mem_start;
mem_end = ((unsigned long)rsvmap) + 8 * sizeof(u64);
/* Get base of tree */
base = ((unsigned long)bi) + bi->deviceTreeOffset;
/* Build string array */
DBG("Building string array at: %x\n", mem_end);
DBG("Device Tree Base=%x\n", base);
bootx_dt_strbase = mem_end;
mem_end += 4;
bootx_dt_strend = mem_end;
bootx_scan_dt_build_strings(base, 4, &mem_end);
/* Add some strings */
bootx_dt_add_string("linux,bootx-noscreen", &mem_end);
bootx_dt_add_string("linux,bootx-depth", &mem_end);
bootx_dt_add_string("linux,bootx-width", &mem_end);
bootx_dt_add_string("linux,bootx-height", &mem_end);
bootx_dt_add_string("linux,bootx-linebytes", &mem_end);
bootx_dt_add_string("linux,bootx-addr", &mem_end);
/* Wrap up strings */
hdr->off_dt_strings = bootx_dt_strbase - mem_start;
hdr->dt_strings_size = bootx_dt_strend - bootx_dt_strbase;
/* Build structure */
mem_end = _ALIGN(mem_end, 16);
DBG("Building device tree structure at: %x\n", mem_end);
hdr->off_dt_struct = mem_end - mem_start;
bootx_scan_dt_build_struct(base, 4, &mem_end);
dt_push_token(OF_DT_END, &mem_end);
/* Finish header */
hdr->boot_cpuid_phys = 0;
hdr->magic = OF_DT_HEADER;
hdr->totalsize = mem_end - mem_start;
hdr->version = OF_DT_VERSION;
/* Version 16 is not backward compatible */
hdr->last_comp_version = 0x10;
/* Reserve the whole thing and copy the reserve map in, we
* also bump mem_reserve_cnt to cause further reservations to
* fail since it's too late.
*/
mem_end = _ALIGN(mem_end, PAGE_SIZE);
DBG("End of boot params: %x\n", mem_end);
rsvmap[0] = mem_start;
rsvmap[1] = mem_end;
if (bootx_info->ramDisk) {
rsvmap[2] = ((unsigned long)bootx_info) + bootx_info->ramDisk;
rsvmap[3] = rsvmap[2] + bootx_info->ramDiskSize;
rsvmap[4] = 0;
rsvmap[5] = 0;
} else {
rsvmap[2] = 0;
rsvmap[3] = 0;
}
return (unsigned long)hdr;
}
#ifdef CONFIG_BOOTX_TEXT
static void __init btext_welcome(boot_infos_t *bi)
{
unsigned long flags;
unsigned long pvr;
bootx_printf("Welcome to Linux, kernel " UTS_RELEASE "\n");
bootx_printf("\nlinked at : 0x%x", KERNELBASE);
bootx_printf("\nframe buffer at : 0x%x", bi->dispDeviceBase);
bootx_printf(" (phys), 0x%x", bi->logicalDisplayBase);
bootx_printf(" (log)");
bootx_printf("\nklimit : 0x%x",(unsigned long)klimit);
bootx_printf("\nboot_info at : 0x%x", bi);
__asm__ __volatile__ ("mfmsr %0" : "=r" (flags));
bootx_printf("\nMSR : 0x%x", flags);
__asm__ __volatile__ ("mfspr %0, 287" : "=r" (pvr));
bootx_printf("\nPVR : 0x%x", pvr);
pvr >>= 16;
if (pvr > 1) {
__asm__ __volatile__ ("mfspr %0, 1008" : "=r" (flags));
bootx_printf("\nHID0 : 0x%x", flags);
}
if (pvr == 8 || pvr == 12 || pvr == 0x800c) {
__asm__ __volatile__ ("mfspr %0, 1019" : "=r" (flags));
bootx_printf("\nICTC : 0x%x", flags);
}
#ifdef DEBUG
bootx_printf("\n\n");
bootx_printf("bi->deviceTreeOffset : 0x%x\n",
bi->deviceTreeOffset);
bootx_printf("bi->deviceTreeSize : 0x%x\n",
bi->deviceTreeSize);
#endif
bootx_printf("\n\n");
}
#endif /* CONFIG_BOOTX_TEXT */
void __init bootx_init(unsigned long r3, unsigned long r4)
{
boot_infos_t *bi = (boot_infos_t *) r4;
unsigned long hdr;
unsigned long space;
unsigned long ptr, x;
char *model;
unsigned long offset = reloc_offset();
reloc_got2(offset);
bootx_info = bi;
/* We haven't cleared any bss at this point, make sure
* what we need is initialized
*/
bootx_dt_strbase = bootx_dt_strend = 0;
bootx_node_chosen = 0;
bootx_disp_path[0] = 0;
if (!BOOT_INFO_IS_V2_COMPATIBLE(bi))
bi->logicalDisplayBase = bi->dispDeviceBase;
/* Fixup depth 16 -> 15 as that's what MacOS calls 16bpp */
if (bi->dispDeviceDepth == 16)
bi->dispDeviceDepth = 15;
#ifdef CONFIG_BOOTX_TEXT
ptr = (unsigned long)bi->logicalDisplayBase;
ptr += bi->dispDeviceRect[1] * bi->dispDeviceRowBytes;
ptr += bi->dispDeviceRect[0] * ((bi->dispDeviceDepth + 7) / 8);
btext_setup_display(bi->dispDeviceRect[2] - bi->dispDeviceRect[0],
bi->dispDeviceRect[3] - bi->dispDeviceRect[1],
bi->dispDeviceDepth, bi->dispDeviceRowBytes,
(unsigned long)bi->logicalDisplayBase);
btext_clearscreen();
btext_flushscreen();
#endif /* CONFIG_BOOTX_TEXT */
/*
* Test if boot-info is compatible. Done only in config
* CONFIG_BOOTX_TEXT since there is nothing much we can do
* with an incompatible version, except display a message
* and eventually hang the processor...
*
* I'll try to keep enough of boot-info compatible in the
* future to always allow display of this message;
*/
if (!BOOT_INFO_IS_COMPATIBLE(bi)) {
bootx_printf(" !!! WARNING - Incompatible version"
" of BootX !!!\n\n\n");
for (;;)
;
}
if (bi->architecture != BOOT_ARCH_PCI) {
bootx_printf(" !!! WARNING - Usupported machine"
" architecture !\n");
for (;;)
;
}
#ifdef CONFIG_BOOTX_TEXT
btext_welcome(bi);
#endif
/* New BootX enters kernel with MMU off, i/os are not allowed
* here. This hack will have been done by the boostrap anyway.
*/
if (bi->version < 4) {
/*
* XXX If this is an iMac, turn off the USB controller.
*/
model = (char *) bootx_early_getprop(r4 + bi->deviceTreeOffset,
4, "model");
if (model
&& (strcmp(model, "iMac,1") == 0
|| strcmp(model, "PowerMac1,1") == 0)) {
bootx_printf("iMac,1 detected, shutting down USB\n");
out_le32((unsigned __iomem *)0x80880008, 1); /* XXX */
}
}
/* Get a pointer that points above the device tree, args, ramdisk,
* etc... to use for generating the flattened tree
*/
if (bi->version < 5) {
space = bi->deviceTreeOffset + bi->deviceTreeSize;
if (bi->ramDisk >= space)
space = bi->ramDisk + bi->ramDiskSize;
} else
space = bi->totalParamsSize;
bootx_printf("Total space used by parameters & ramdisk: 0x%x\n", space);
/* New BootX will have flushed all TLBs and enters kernel with
* MMU switched OFF, so this should not be useful anymore.
*/
if (bi->version < 4) {
bootx_printf("Touching pages...\n");
/*
* Touch each page to make sure the PTEs for them
* are in the hash table - the aim is to try to avoid
* getting DSI exceptions while copying the kernel image.
*/
for (ptr = ((unsigned long) &_stext) & PAGE_MASK;
ptr < (unsigned long)bi + space; ptr += PAGE_SIZE)
x = *(volatile unsigned long *)ptr;
}
/* Ok, now we need to generate a flattened device-tree to pass
* to the kernel
*/
bootx_printf("Preparing boot params...\n");
hdr = bootx_flatten_dt(space);
#ifdef CONFIG_BOOTX_TEXT
#ifdef SET_BOOT_BAT
bootx_printf("Preparing BAT...\n");
btext_prepare_BAT();
#else
btext_unmap();
#endif
#endif
reloc_got2(-offset);
__start(hdr, KERNELBASE + offset, 0);
}

View file

@ -0,0 +1,358 @@
/*
* This file contains low-level cache management functions
* used for sleep and CPU speed changes on Apple machines.
* (In fact the only thing that is Apple-specific is that we assume
* that we can read from ROM at physical address 0xfff00000.)
*
* Copyright (C) 2004 Paul Mackerras (paulus@samba.org) and
* Benjamin Herrenschmidt (benh@kernel.crashing.org)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
*/
#include <asm/processor.h>
#include <asm/ppc_asm.h>
#include <asm/cputable.h>
/*
* Flush and disable all data caches (dL1, L2, L3). This is used
* when going to sleep, when doing a PMU based cpufreq transition,
* or when "offlining" a CPU on SMP machines. This code is over
* paranoid, but I've had enough issues with various CPU revs and
* bugs that I decided it was worth beeing over cautious
*/
_GLOBAL(flush_disable_caches)
#ifndef CONFIG_6xx
blr
#else
BEGIN_FTR_SECTION
b flush_disable_745x
END_FTR_SECTION_IFSET(CPU_FTR_SPEC7450)
BEGIN_FTR_SECTION
b flush_disable_75x
END_FTR_SECTION_IFSET(CPU_FTR_L2CR)
b __flush_disable_L1
/* This is the code for G3 and 74[01]0 */
flush_disable_75x:
mflr r10
/* Turn off EE and DR in MSR */
mfmsr r11
rlwinm r0,r11,0,~MSR_EE
rlwinm r0,r0,0,~MSR_DR
sync
mtmsr r0
isync
/* Stop DST streams */
BEGIN_FTR_SECTION
DSSALL
sync
END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
/* Stop DPM */
mfspr r8,SPRN_HID0 /* Save SPRN_HID0 in r8 */
rlwinm r4,r8,0,12,10 /* Turn off HID0[DPM] */
sync
mtspr SPRN_HID0,r4 /* Disable DPM */
sync
/* Disp-flush L1. We have a weird problem here that I never
* totally figured out. On 750FX, using the ROM for the flush
* results in a non-working flush. We use that workaround for
* now until I finally understand what's going on. --BenH
*/
/* ROM base by default */
lis r4,0xfff0
mfpvr r3
srwi r3,r3,16
cmplwi cr0,r3,0x7000
bne+ 1f
/* RAM base on 750FX */
li r4,0
1: li r4,0x4000
mtctr r4
1: lwz r0,0(r4)
addi r4,r4,32
bdnz 1b
sync
isync
/* Disable / invalidate / enable L1 data */
mfspr r3,SPRN_HID0
rlwinm r3,r3,0,~(HID0_DCE | HID0_ICE)
mtspr SPRN_HID0,r3
sync
isync
ori r3,r3,(HID0_DCE|HID0_DCI|HID0_ICE|HID0_ICFI)
sync
isync
mtspr SPRN_HID0,r3
xori r3,r3,(HID0_DCI|HID0_ICFI)
mtspr SPRN_HID0,r3
sync
/* Get the current enable bit of the L2CR into r4 */
mfspr r5,SPRN_L2CR
/* Set to data-only (pre-745x bit) */
oris r3,r5,L2CR_L2DO@h
b 2f
/* When disabling L2, code must be in L1 */
.balign 32
1: mtspr SPRN_L2CR,r3
3: sync
isync
b 1f
2: b 3f
3: sync
isync
b 1b
1: /* disp-flush L2. The interesting thing here is that the L2 can be
* up to 2Mb ... so using the ROM, we'll end up wrapping back to memory
* but that is probbaly fine. We disp-flush over 4Mb to be safe
*/
lis r4,2
mtctr r4
lis r4,0xfff0
1: lwz r0,0(r4)
addi r4,r4,32
bdnz 1b
sync
isync
lis r4,2
mtctr r4
lis r4,0xfff0
1: dcbf 0,r4
addi r4,r4,32
bdnz 1b
sync
isync
/* now disable L2 */
rlwinm r5,r5,0,~L2CR_L2E
b 2f
/* When disabling L2, code must be in L1 */
.balign 32
1: mtspr SPRN_L2CR,r5
3: sync
isync
b 1f
2: b 3f
3: sync
isync
b 1b
1: sync
isync
/* Invalidate L2. This is pre-745x, we clear the L2I bit ourselves */
oris r4,r5,L2CR_L2I@h
mtspr SPRN_L2CR,r4
sync
isync
/* Wait for the invalidation to complete */
1: mfspr r3,SPRN_L2CR
rlwinm. r0,r3,0,31,31
bne 1b
/* Clear L2I */
xoris r4,r4,L2CR_L2I@h
sync
mtspr SPRN_L2CR,r4
sync
/* now disable the L1 data cache */
mfspr r0,SPRN_HID0
rlwinm r0,r0,0,~(HID0_DCE|HID0_ICE)
mtspr SPRN_HID0,r0
sync
isync
/* Restore HID0[DPM] to whatever it was before */
sync
mfspr r0,SPRN_HID0
rlwimi r0,r8,0,11,11 /* Turn back HID0[DPM] */
mtspr SPRN_HID0,r0
sync
/* restore DR and EE */
sync
mtmsr r11
isync
mtlr r10
blr
/* This code is for 745x processors */
flush_disable_745x:
/* Turn off EE and DR in MSR */
mfmsr r11
rlwinm r0,r11,0,~MSR_EE
rlwinm r0,r0,0,~MSR_DR
sync
mtmsr r0
isync
/* Stop prefetch streams */
DSSALL
sync
/* Disable L2 prefetching */
mfspr r0,SPRN_MSSCR0
rlwinm r0,r0,0,0,29
mtspr SPRN_MSSCR0,r0
sync
isync
lis r4,0
dcbf 0,r4
dcbf 0,r4
dcbf 0,r4
dcbf 0,r4
dcbf 0,r4
dcbf 0,r4
dcbf 0,r4
dcbf 0,r4
/* Due to a bug with the HW flush on some CPU revs, we occasionally
* experience data corruption. I'm adding a displacement flush along
* with a dcbf loop over a few Mb to "help". The problem isn't totally
* fixed by this in theory, but at least, in practice, I couldn't reproduce
* it even with a big hammer...
*/
lis r4,0x0002
mtctr r4
li r4,0
1:
lwz r0,0(r4)
addi r4,r4,32 /* Go to start of next cache line */
bdnz 1b
isync
/* Now, flush the first 4MB of memory */
lis r4,0x0002
mtctr r4
li r4,0
sync
1:
dcbf 0,r4
addi r4,r4,32 /* Go to start of next cache line */
bdnz 1b
/* Flush and disable the L1 data cache */
mfspr r6,SPRN_LDSTCR
lis r3,0xfff0 /* read from ROM for displacement flush */
li r4,0xfe /* start with only way 0 unlocked */
li r5,128 /* 128 lines in each way */
1: mtctr r5
rlwimi r6,r4,0,24,31
mtspr SPRN_LDSTCR,r6
sync
isync
2: lwz r0,0(r3) /* touch each cache line */
addi r3,r3,32
bdnz 2b
rlwinm r4,r4,1,24,30 /* move on to the next way */
ori r4,r4,1
cmpwi r4,0xff /* all done? */
bne 1b
/* now unlock the L1 data cache */
li r4,0
rlwimi r6,r4,0,24,31
sync
mtspr SPRN_LDSTCR,r6
sync
isync
/* Flush the L2 cache using the hardware assist */
mfspr r3,SPRN_L2CR
cmpwi r3,0 /* check if it is enabled first */
bge 4f
oris r0,r3,(L2CR_L2IO_745x|L2CR_L2DO_745x)@h
b 2f
/* When disabling/locking L2, code must be in L1 */
.balign 32
1: mtspr SPRN_L2CR,r0 /* lock the L2 cache */
3: sync
isync
b 1f
2: b 3f
3: sync
isync
b 1b
1: sync
isync
ori r0,r3,L2CR_L2HWF_745x
sync
mtspr SPRN_L2CR,r0 /* set the hardware flush bit */
3: mfspr r0,SPRN_L2CR /* wait for it to go to 0 */
andi. r0,r0,L2CR_L2HWF_745x
bne 3b
sync
rlwinm r3,r3,0,~L2CR_L2E
b 2f
/* When disabling L2, code must be in L1 */
.balign 32
1: mtspr SPRN_L2CR,r3 /* disable the L2 cache */
3: sync
isync
b 1f
2: b 3f
3: sync
isync
b 1b
1: sync
isync
oris r4,r3,L2CR_L2I@h
mtspr SPRN_L2CR,r4
sync
isync
1: mfspr r4,SPRN_L2CR
andis. r0,r4,L2CR_L2I@h
bne 1b
sync
BEGIN_FTR_SECTION
/* Flush the L3 cache using the hardware assist */
4: mfspr r3,SPRN_L3CR
cmpwi r3,0 /* check if it is enabled */
bge 6f
oris r0,r3,L3CR_L3IO@h
ori r0,r0,L3CR_L3DO
sync
mtspr SPRN_L3CR,r0 /* lock the L3 cache */
sync
isync
ori r0,r0,L3CR_L3HWF
sync
mtspr SPRN_L3CR,r0 /* set the hardware flush bit */
5: mfspr r0,SPRN_L3CR /* wait for it to go to zero */
andi. r0,r0,L3CR_L3HWF
bne 5b
rlwinm r3,r3,0,~L3CR_L3E
sync
mtspr SPRN_L3CR,r3 /* disable the L3 cache */
sync
ori r4,r3,L3CR_L3I
mtspr SPRN_L3CR,r4
1: mfspr r4,SPRN_L3CR
andi. r0,r4,L3CR_L3I
bne 1b
sync
END_FTR_SECTION_IFSET(CPU_FTR_L3CR)
6: mfspr r0,SPRN_HID0 /* now disable the L1 data cache */
rlwinm r0,r0,0,~HID0_DCE
mtspr SPRN_HID0,r0
sync
isync
mtmsr r11 /* restore DR and EE */
isync
blr
#endif /* CONFIG_6xx */

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,652 @@
/*
* Copyright (C) 2002 Benjamin Herrenschmidt (benh@kernel.crashing.org)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* Todo: - add support for the OF persistent properties
*/
#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/stddef.h>
#include <linux/string.h>
#include <linux/nvram.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/adb.h>
#include <linux/pmu.h>
#include <linux/bootmem.h>
#include <linux/completion.h>
#include <linux/spinlock.h>
#include <asm/sections.h>
#include <asm/io.h>
#include <asm/prom.h>
#include <asm/machdep.h>
#include <asm/nvram.h>
#include "pmac.h"
#define DEBUG
#ifdef DEBUG
#define DBG(x...) printk(x)
#else
#define DBG(x...)
#endif
#define NVRAM_SIZE 0x2000 /* 8kB of non-volatile RAM */
#define CORE99_SIGNATURE 0x5a
#define CORE99_ADLER_START 0x14
/* On Core99, nvram is either a sharp, a micron or an AMD flash */
#define SM_FLASH_STATUS_DONE 0x80
#define SM_FLASH_STATUS_ERR 0x38
#define SM_FLASH_CMD_ERASE_CONFIRM 0xd0
#define SM_FLASH_CMD_ERASE_SETUP 0x20
#define SM_FLASH_CMD_RESET 0xff
#define SM_FLASH_CMD_WRITE_SETUP 0x40
#define SM_FLASH_CMD_CLEAR_STATUS 0x50
#define SM_FLASH_CMD_READ_STATUS 0x70
/* CHRP NVRAM header */
struct chrp_header {
u8 signature;
u8 cksum;
u16 len;
char name[12];
u8 data[0];
};
struct core99_header {
struct chrp_header hdr;
u32 adler;
u32 generation;
u32 reserved[2];
};
/*
* Read and write the non-volatile RAM on PowerMacs and CHRP machines.
*/
static int nvram_naddrs;
static volatile unsigned char __iomem *nvram_data;
static int is_core_99;
static int core99_bank = 0;
static int nvram_partitions[3];
// XXX Turn that into a sem
static DEFINE_RAW_SPINLOCK(nv_lock);
static int (*core99_write_bank)(int bank, u8* datas);
static int (*core99_erase_bank)(int bank);
static char *nvram_image;
static unsigned char core99_nvram_read_byte(int addr)
{
if (nvram_image == NULL)
return 0xff;
return nvram_image[addr];
}
static void core99_nvram_write_byte(int addr, unsigned char val)
{
if (nvram_image == NULL)
return;
nvram_image[addr] = val;
}
static ssize_t core99_nvram_read(char *buf, size_t count, loff_t *index)
{
int i;
if (nvram_image == NULL)
return -ENODEV;
if (*index > NVRAM_SIZE)
return 0;
i = *index;
if (i + count > NVRAM_SIZE)
count = NVRAM_SIZE - i;
memcpy(buf, &nvram_image[i], count);
*index = i + count;
return count;
}
static ssize_t core99_nvram_write(char *buf, size_t count, loff_t *index)
{
int i;
if (nvram_image == NULL)
return -ENODEV;
if (*index > NVRAM_SIZE)
return 0;
i = *index;
if (i + count > NVRAM_SIZE)
count = NVRAM_SIZE - i;
memcpy(&nvram_image[i], buf, count);
*index = i + count;
return count;
}
static ssize_t core99_nvram_size(void)
{
if (nvram_image == NULL)
return -ENODEV;
return NVRAM_SIZE;
}
#ifdef CONFIG_PPC32
static volatile unsigned char __iomem *nvram_addr;
static int nvram_mult;
static unsigned char direct_nvram_read_byte(int addr)
{
return in_8(&nvram_data[(addr & (NVRAM_SIZE - 1)) * nvram_mult]);
}
static void direct_nvram_write_byte(int addr, unsigned char val)
{
out_8(&nvram_data[(addr & (NVRAM_SIZE - 1)) * nvram_mult], val);
}
static unsigned char indirect_nvram_read_byte(int addr)
{
unsigned char val;
unsigned long flags;
raw_spin_lock_irqsave(&nv_lock, flags);
out_8(nvram_addr, addr >> 5);
val = in_8(&nvram_data[(addr & 0x1f) << 4]);
raw_spin_unlock_irqrestore(&nv_lock, flags);
return val;
}
static void indirect_nvram_write_byte(int addr, unsigned char val)
{
unsigned long flags;
raw_spin_lock_irqsave(&nv_lock, flags);
out_8(nvram_addr, addr >> 5);
out_8(&nvram_data[(addr & 0x1f) << 4], val);
raw_spin_unlock_irqrestore(&nv_lock, flags);
}
#ifdef CONFIG_ADB_PMU
static void pmu_nvram_complete(struct adb_request *req)
{
if (req->arg)
complete((struct completion *)req->arg);
}
static unsigned char pmu_nvram_read_byte(int addr)
{
struct adb_request req;
DECLARE_COMPLETION_ONSTACK(req_complete);
req.arg = system_state == SYSTEM_RUNNING ? &req_complete : NULL;
if (pmu_request(&req, pmu_nvram_complete, 3, PMU_READ_NVRAM,
(addr >> 8) & 0xff, addr & 0xff))
return 0xff;
if (system_state == SYSTEM_RUNNING)
wait_for_completion(&req_complete);
while (!req.complete)
pmu_poll();
return req.reply[0];
}
static void pmu_nvram_write_byte(int addr, unsigned char val)
{
struct adb_request req;
DECLARE_COMPLETION_ONSTACK(req_complete);
req.arg = system_state == SYSTEM_RUNNING ? &req_complete : NULL;
if (pmu_request(&req, pmu_nvram_complete, 4, PMU_WRITE_NVRAM,
(addr >> 8) & 0xff, addr & 0xff, val))
return;
if (system_state == SYSTEM_RUNNING)
wait_for_completion(&req_complete);
while (!req.complete)
pmu_poll();
}
#endif /* CONFIG_ADB_PMU */
#endif /* CONFIG_PPC32 */
static u8 chrp_checksum(struct chrp_header* hdr)
{
u8 *ptr;
u16 sum = hdr->signature;
for (ptr = (u8 *)&hdr->len; ptr < hdr->data; ptr++)
sum += *ptr;
while (sum > 0xFF)
sum = (sum & 0xFF) + (sum>>8);
return sum;
}
static u32 core99_calc_adler(u8 *buffer)
{
int cnt;
u32 low, high;
buffer += CORE99_ADLER_START;
low = 1;
high = 0;
for (cnt=0; cnt<(NVRAM_SIZE-CORE99_ADLER_START); cnt++) {
if ((cnt % 5000) == 0) {
high %= 65521UL;
high %= 65521UL;
}
low += buffer[cnt];
high += low;
}
low %= 65521UL;
high %= 65521UL;
return (high << 16) | low;
}
static u32 core99_check(u8* datas)
{
struct core99_header* hdr99 = (struct core99_header*)datas;
if (hdr99->hdr.signature != CORE99_SIGNATURE) {
DBG("Invalid signature\n");
return 0;
}
if (hdr99->hdr.cksum != chrp_checksum(&hdr99->hdr)) {
DBG("Invalid checksum\n");
return 0;
}
if (hdr99->adler != core99_calc_adler(datas)) {
DBG("Invalid adler\n");
return 0;
}
return hdr99->generation;
}
static int sm_erase_bank(int bank)
{
int stat;
unsigned long timeout;
u8 __iomem *base = (u8 __iomem *)nvram_data + core99_bank*NVRAM_SIZE;
DBG("nvram: Sharp/Micron Erasing bank %d...\n", bank);
out_8(base, SM_FLASH_CMD_ERASE_SETUP);
out_8(base, SM_FLASH_CMD_ERASE_CONFIRM);
timeout = 0;
do {
if (++timeout > 1000000) {
printk(KERN_ERR "nvram: Sharp/Micron flash erase timeout !\n");
break;
}
out_8(base, SM_FLASH_CMD_READ_STATUS);
stat = in_8(base);
} while (!(stat & SM_FLASH_STATUS_DONE));
out_8(base, SM_FLASH_CMD_CLEAR_STATUS);
out_8(base, SM_FLASH_CMD_RESET);
if (memchr_inv(base, 0xff, NVRAM_SIZE)) {
printk(KERN_ERR "nvram: Sharp/Micron flash erase failed !\n");
return -ENXIO;
}
return 0;
}
static int sm_write_bank(int bank, u8* datas)
{
int i, stat = 0;
unsigned long timeout;
u8 __iomem *base = (u8 __iomem *)nvram_data + core99_bank*NVRAM_SIZE;
DBG("nvram: Sharp/Micron Writing bank %d...\n", bank);
for (i=0; i<NVRAM_SIZE; i++) {
out_8(base+i, SM_FLASH_CMD_WRITE_SETUP);
udelay(1);
out_8(base+i, datas[i]);
timeout = 0;
do {
if (++timeout > 1000000) {
printk(KERN_ERR "nvram: Sharp/Micron flash write timeout !\n");
break;
}
out_8(base, SM_FLASH_CMD_READ_STATUS);
stat = in_8(base);
} while (!(stat & SM_FLASH_STATUS_DONE));
if (!(stat & SM_FLASH_STATUS_DONE))
break;
}
out_8(base, SM_FLASH_CMD_CLEAR_STATUS);
out_8(base, SM_FLASH_CMD_RESET);
if (memcmp(base, datas, NVRAM_SIZE)) {
printk(KERN_ERR "nvram: Sharp/Micron flash write failed !\n");
return -ENXIO;
}
return 0;
}
static int amd_erase_bank(int bank)
{
int stat = 0;
unsigned long timeout;
u8 __iomem *base = (u8 __iomem *)nvram_data + core99_bank*NVRAM_SIZE;
DBG("nvram: AMD Erasing bank %d...\n", bank);
/* Unlock 1 */
out_8(base+0x555, 0xaa);
udelay(1);
/* Unlock 2 */
out_8(base+0x2aa, 0x55);
udelay(1);
/* Sector-Erase */
out_8(base+0x555, 0x80);
udelay(1);
out_8(base+0x555, 0xaa);
udelay(1);
out_8(base+0x2aa, 0x55);
udelay(1);
out_8(base, 0x30);
udelay(1);
timeout = 0;
do {
if (++timeout > 1000000) {
printk(KERN_ERR "nvram: AMD flash erase timeout !\n");
break;
}
stat = in_8(base) ^ in_8(base);
} while (stat != 0);
/* Reset */
out_8(base, 0xf0);
udelay(1);
if (memchr_inv(base, 0xff, NVRAM_SIZE)) {
printk(KERN_ERR "nvram: AMD flash erase failed !\n");
return -ENXIO;
}
return 0;
}
static int amd_write_bank(int bank, u8* datas)
{
int i, stat = 0;
unsigned long timeout;
u8 __iomem *base = (u8 __iomem *)nvram_data + core99_bank*NVRAM_SIZE;
DBG("nvram: AMD Writing bank %d...\n", bank);
for (i=0; i<NVRAM_SIZE; i++) {
/* Unlock 1 */
out_8(base+0x555, 0xaa);
udelay(1);
/* Unlock 2 */
out_8(base+0x2aa, 0x55);
udelay(1);
/* Write single word */
out_8(base+0x555, 0xa0);
udelay(1);
out_8(base+i, datas[i]);
timeout = 0;
do {
if (++timeout > 1000000) {
printk(KERN_ERR "nvram: AMD flash write timeout !\n");
break;
}
stat = in_8(base) ^ in_8(base);
} while (stat != 0);
if (stat != 0)
break;
}
/* Reset */
out_8(base, 0xf0);
udelay(1);
if (memcmp(base, datas, NVRAM_SIZE)) {
printk(KERN_ERR "nvram: AMD flash write failed !\n");
return -ENXIO;
}
return 0;
}
static void __init lookup_partitions(void)
{
u8 buffer[17];
int i, offset;
struct chrp_header* hdr;
if (pmac_newworld) {
nvram_partitions[pmac_nvram_OF] = -1;
nvram_partitions[pmac_nvram_XPRAM] = -1;
nvram_partitions[pmac_nvram_NR] = -1;
hdr = (struct chrp_header *)buffer;
offset = 0;
buffer[16] = 0;
do {
for (i=0;i<16;i++)
buffer[i] = ppc_md.nvram_read_val(offset+i);
if (!strcmp(hdr->name, "common"))
nvram_partitions[pmac_nvram_OF] = offset + 0x10;
if (!strcmp(hdr->name, "APL,MacOS75")) {
nvram_partitions[pmac_nvram_XPRAM] = offset + 0x10;
nvram_partitions[pmac_nvram_NR] = offset + 0x110;
}
offset += (hdr->len * 0x10);
} while(offset < NVRAM_SIZE);
} else {
nvram_partitions[pmac_nvram_OF] = 0x1800;
nvram_partitions[pmac_nvram_XPRAM] = 0x1300;
nvram_partitions[pmac_nvram_NR] = 0x1400;
}
DBG("nvram: OF partition at 0x%x\n", nvram_partitions[pmac_nvram_OF]);
DBG("nvram: XP partition at 0x%x\n", nvram_partitions[pmac_nvram_XPRAM]);
DBG("nvram: NR partition at 0x%x\n", nvram_partitions[pmac_nvram_NR]);
}
static void core99_nvram_sync(void)
{
struct core99_header* hdr99;
unsigned long flags;
if (!is_core_99 || !nvram_data || !nvram_image)
return;
raw_spin_lock_irqsave(&nv_lock, flags);
if (!memcmp(nvram_image, (u8*)nvram_data + core99_bank*NVRAM_SIZE,
NVRAM_SIZE))
goto bail;
DBG("Updating nvram...\n");
hdr99 = (struct core99_header*)nvram_image;
hdr99->generation++;
hdr99->hdr.signature = CORE99_SIGNATURE;
hdr99->hdr.cksum = chrp_checksum(&hdr99->hdr);
hdr99->adler = core99_calc_adler(nvram_image);
core99_bank = core99_bank ? 0 : 1;
if (core99_erase_bank)
if (core99_erase_bank(core99_bank)) {
printk("nvram: Error erasing bank %d\n", core99_bank);
goto bail;
}
if (core99_write_bank)
if (core99_write_bank(core99_bank, nvram_image))
printk("nvram: Error writing bank %d\n", core99_bank);
bail:
raw_spin_unlock_irqrestore(&nv_lock, flags);
#ifdef DEBUG
mdelay(2000);
#endif
}
static int __init core99_nvram_setup(struct device_node *dp, unsigned long addr)
{
int i;
u32 gen_bank0, gen_bank1;
if (nvram_naddrs < 1) {
printk(KERN_ERR "nvram: no address\n");
return -EINVAL;
}
nvram_image = alloc_bootmem(NVRAM_SIZE);
if (nvram_image == NULL) {
printk(KERN_ERR "nvram: can't allocate ram image\n");
return -ENOMEM;
}
nvram_data = ioremap(addr, NVRAM_SIZE*2);
nvram_naddrs = 1; /* Make sure we get the correct case */
DBG("nvram: Checking bank 0...\n");
gen_bank0 = core99_check((u8 *)nvram_data);
gen_bank1 = core99_check((u8 *)nvram_data + NVRAM_SIZE);
core99_bank = (gen_bank0 < gen_bank1) ? 1 : 0;
DBG("nvram: gen0=%d, gen1=%d\n", gen_bank0, gen_bank1);
DBG("nvram: Active bank is: %d\n", core99_bank);
for (i=0; i<NVRAM_SIZE; i++)
nvram_image[i] = nvram_data[i + core99_bank*NVRAM_SIZE];
ppc_md.nvram_read_val = core99_nvram_read_byte;
ppc_md.nvram_write_val = core99_nvram_write_byte;
ppc_md.nvram_read = core99_nvram_read;
ppc_md.nvram_write = core99_nvram_write;
ppc_md.nvram_size = core99_nvram_size;
ppc_md.nvram_sync = core99_nvram_sync;
ppc_md.machine_shutdown = core99_nvram_sync;
/*
* Maybe we could be smarter here though making an exclusive list
* of known flash chips is a bit nasty as older OF didn't provide us
* with a useful "compatible" entry. A solution would be to really
* identify the chip using flash id commands and base ourselves on
* a list of known chips IDs
*/
if (of_device_is_compatible(dp, "amd-0137")) {
core99_erase_bank = amd_erase_bank;
core99_write_bank = amd_write_bank;
} else {
core99_erase_bank = sm_erase_bank;
core99_write_bank = sm_write_bank;
}
return 0;
}
int __init pmac_nvram_init(void)
{
struct device_node *dp;
struct resource r1, r2;
unsigned int s1 = 0, s2 = 0;
int err = 0;
nvram_naddrs = 0;
dp = of_find_node_by_name(NULL, "nvram");
if (dp == NULL) {
printk(KERN_ERR "Can't find NVRAM device\n");
return -ENODEV;
}
/* Try to obtain an address */
if (of_address_to_resource(dp, 0, &r1) == 0) {
nvram_naddrs = 1;
s1 = resource_size(&r1);
if (of_address_to_resource(dp, 1, &r2) == 0) {
nvram_naddrs = 2;
s2 = resource_size(&r2);
}
}
is_core_99 = of_device_is_compatible(dp, "nvram,flash");
if (is_core_99) {
err = core99_nvram_setup(dp, r1.start);
goto bail;
}
#ifdef CONFIG_PPC32
if (machine_is(chrp) && nvram_naddrs == 1) {
nvram_data = ioremap(r1.start, s1);
nvram_mult = 1;
ppc_md.nvram_read_val = direct_nvram_read_byte;
ppc_md.nvram_write_val = direct_nvram_write_byte;
} else if (nvram_naddrs == 1) {
nvram_data = ioremap(r1.start, s1);
nvram_mult = (s1 + NVRAM_SIZE - 1) / NVRAM_SIZE;
ppc_md.nvram_read_val = direct_nvram_read_byte;
ppc_md.nvram_write_val = direct_nvram_write_byte;
} else if (nvram_naddrs == 2) {
nvram_addr = ioremap(r1.start, s1);
nvram_data = ioremap(r2.start, s2);
ppc_md.nvram_read_val = indirect_nvram_read_byte;
ppc_md.nvram_write_val = indirect_nvram_write_byte;
} else if (nvram_naddrs == 0 && sys_ctrler == SYS_CTRLER_PMU) {
#ifdef CONFIG_ADB_PMU
nvram_naddrs = -1;
ppc_md.nvram_read_val = pmu_nvram_read_byte;
ppc_md.nvram_write_val = pmu_nvram_write_byte;
#endif /* CONFIG_ADB_PMU */
} else {
printk(KERN_ERR "Incompatible type of NVRAM\n");
err = -ENXIO;
}
#endif /* CONFIG_PPC32 */
bail:
of_node_put(dp);
if (err == 0)
lookup_partitions();
return err;
}
int pmac_get_partition(int partition)
{
return nvram_partitions[partition];
}
u8 pmac_xpram_read(int xpaddr)
{
int offset = pmac_get_partition(pmac_nvram_XPRAM);
if (offset < 0 || xpaddr < 0 || xpaddr > 0x100)
return 0xff;
return ppc_md.nvram_read_val(xpaddr + offset);
}
void pmac_xpram_write(int xpaddr, u8 data)
{
int offset = pmac_get_partition(pmac_nvram_XPRAM);
if (offset < 0 || xpaddr < 0 || xpaddr > 0x100)
return;
ppc_md.nvram_write_val(xpaddr + offset, data);
}
EXPORT_SYMBOL(pmac_get_partition);
EXPORT_SYMBOL(pmac_xpram_read);
EXPORT_SYMBOL(pmac_xpram_write);

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,410 @@
#include <linux/types.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/kernel.h>
#include <linux/interrupt.h>
#include <linux/spinlock.h>
#include <linux/of_irq.h>
#include <asm/pmac_feature.h>
#include <asm/pmac_pfunc.h>
#undef DEBUG
#ifdef DEBUG
#define DBG(fmt...) printk(fmt)
#else
#define DBG(fmt...)
#endif
static irqreturn_t macio_gpio_irq(int irq, void *data)
{
pmf_do_irq(data);
return IRQ_HANDLED;
}
static int macio_do_gpio_irq_enable(struct pmf_function *func)
{
unsigned int irq = irq_of_parse_and_map(func->node, 0);
if (irq == NO_IRQ)
return -EINVAL;
return request_irq(irq, macio_gpio_irq, 0, func->node->name, func);
}
static int macio_do_gpio_irq_disable(struct pmf_function *func)
{
unsigned int irq = irq_of_parse_and_map(func->node, 0);
if (irq == NO_IRQ)
return -EINVAL;
free_irq(irq, func);
return 0;
}
static int macio_do_gpio_write(PMF_STD_ARGS, u8 value, u8 mask)
{
u8 __iomem *addr = (u8 __iomem *)func->driver_data;
unsigned long flags;
u8 tmp;
/* Check polarity */
if (args && args->count && !args->u[0].v)
value = ~value;
/* Toggle the GPIO */
raw_spin_lock_irqsave(&feature_lock, flags);
tmp = readb(addr);
tmp = (tmp & ~mask) | (value & mask);
DBG("Do write 0x%02x to GPIO %s (%p)\n",
tmp, func->node->full_name, addr);
writeb(tmp, addr);
raw_spin_unlock_irqrestore(&feature_lock, flags);
return 0;
}
static int macio_do_gpio_read(PMF_STD_ARGS, u8 mask, int rshift, u8 xor)
{
u8 __iomem *addr = (u8 __iomem *)func->driver_data;
u32 value;
/* Check if we have room for reply */
if (args == NULL || args->count == 0 || args->u[0].p == NULL)
return -EINVAL;
value = readb(addr);
*args->u[0].p = ((value & mask) >> rshift) ^ xor;
return 0;
}
static int macio_do_delay(PMF_STD_ARGS, u32 duration)
{
/* assume we can sleep ! */
msleep((duration + 999) / 1000);
return 0;
}
static struct pmf_handlers macio_gpio_handlers = {
.irq_enable = macio_do_gpio_irq_enable,
.irq_disable = macio_do_gpio_irq_disable,
.write_gpio = macio_do_gpio_write,
.read_gpio = macio_do_gpio_read,
.delay = macio_do_delay,
};
static void macio_gpio_init_one(struct macio_chip *macio)
{
struct device_node *gparent, *gp;
/*
* Find the "gpio" parent node
*/
for (gparent = NULL;
(gparent = of_get_next_child(macio->of_node, gparent)) != NULL;)
if (strcmp(gparent->name, "gpio") == 0)
break;
if (gparent == NULL)
return;
DBG("Installing GPIO functions for macio %s\n",
macio->of_node->full_name);
/*
* Ok, got one, we dont need anything special to track them down, so
* we just create them all
*/
for (gp = NULL; (gp = of_get_next_child(gparent, gp)) != NULL;) {
const u32 *reg = of_get_property(gp, "reg", NULL);
unsigned long offset;
if (reg == NULL)
continue;
offset = *reg;
/* Deal with old style device-tree. We can safely hard code the
* offset for now too even if it's a bit gross ...
*/
if (offset < 0x50)
offset += 0x50;
offset += (unsigned long)macio->base;
pmf_register_driver(gp, &macio_gpio_handlers, (void *)offset);
}
DBG("Calling initial GPIO functions for macio %s\n",
macio->of_node->full_name);
/* And now we run all the init ones */
for (gp = NULL; (gp = of_get_next_child(gparent, gp)) != NULL;)
pmf_do_functions(gp, NULL, 0, PMF_FLAGS_ON_INIT, NULL);
/* Note: We do not at this point implement the "at sleep" or "at wake"
* functions. I yet to find any for GPIOs anyway
*/
}
static int macio_do_write_reg32(PMF_STD_ARGS, u32 offset, u32 value, u32 mask)
{
struct macio_chip *macio = func->driver_data;
unsigned long flags;
raw_spin_lock_irqsave(&feature_lock, flags);
MACIO_OUT32(offset, (MACIO_IN32(offset) & ~mask) | (value & mask));
raw_spin_unlock_irqrestore(&feature_lock, flags);
return 0;
}
static int macio_do_read_reg32(PMF_STD_ARGS, u32 offset)
{
struct macio_chip *macio = func->driver_data;
/* Check if we have room for reply */
if (args == NULL || args->count == 0 || args->u[0].p == NULL)
return -EINVAL;
*args->u[0].p = MACIO_IN32(offset);
return 0;
}
static int macio_do_write_reg8(PMF_STD_ARGS, u32 offset, u8 value, u8 mask)
{
struct macio_chip *macio = func->driver_data;
unsigned long flags;
raw_spin_lock_irqsave(&feature_lock, flags);
MACIO_OUT8(offset, (MACIO_IN8(offset) & ~mask) | (value & mask));
raw_spin_unlock_irqrestore(&feature_lock, flags);
return 0;
}
static int macio_do_read_reg8(PMF_STD_ARGS, u32 offset)
{
struct macio_chip *macio = func->driver_data;
/* Check if we have room for reply */
if (args == NULL || args->count == 0 || args->u[0].p == NULL)
return -EINVAL;
*((u8 *)(args->u[0].p)) = MACIO_IN8(offset);
return 0;
}
static int macio_do_read_reg32_msrx(PMF_STD_ARGS, u32 offset, u32 mask,
u32 shift, u32 xor)
{
struct macio_chip *macio = func->driver_data;
/* Check if we have room for reply */
if (args == NULL || args->count == 0 || args->u[0].p == NULL)
return -EINVAL;
*args->u[0].p = ((MACIO_IN32(offset) & mask) >> shift) ^ xor;
return 0;
}
static int macio_do_read_reg8_msrx(PMF_STD_ARGS, u32 offset, u32 mask,
u32 shift, u32 xor)
{
struct macio_chip *macio = func->driver_data;
/* Check if we have room for reply */
if (args == NULL || args->count == 0 || args->u[0].p == NULL)
return -EINVAL;
*((u8 *)(args->u[0].p)) = ((MACIO_IN8(offset) & mask) >> shift) ^ xor;
return 0;
}
static int macio_do_write_reg32_slm(PMF_STD_ARGS, u32 offset, u32 shift,
u32 mask)
{
struct macio_chip *macio = func->driver_data;
unsigned long flags;
u32 tmp, val;
/* Check args */
if (args == NULL || args->count == 0)
return -EINVAL;
raw_spin_lock_irqsave(&feature_lock, flags);
tmp = MACIO_IN32(offset);
val = args->u[0].v << shift;
tmp = (tmp & ~mask) | (val & mask);
MACIO_OUT32(offset, tmp);
raw_spin_unlock_irqrestore(&feature_lock, flags);
return 0;
}
static int macio_do_write_reg8_slm(PMF_STD_ARGS, u32 offset, u32 shift,
u32 mask)
{
struct macio_chip *macio = func->driver_data;
unsigned long flags;
u32 tmp, val;
/* Check args */
if (args == NULL || args->count == 0)
return -EINVAL;
raw_spin_lock_irqsave(&feature_lock, flags);
tmp = MACIO_IN8(offset);
val = args->u[0].v << shift;
tmp = (tmp & ~mask) | (val & mask);
MACIO_OUT8(offset, tmp);
raw_spin_unlock_irqrestore(&feature_lock, flags);
return 0;
}
static struct pmf_handlers macio_mmio_handlers = {
.write_reg32 = macio_do_write_reg32,
.read_reg32 = macio_do_read_reg32,
.write_reg8 = macio_do_write_reg8,
.read_reg8 = macio_do_read_reg8,
.read_reg32_msrx = macio_do_read_reg32_msrx,
.read_reg8_msrx = macio_do_read_reg8_msrx,
.write_reg32_slm = macio_do_write_reg32_slm,
.write_reg8_slm = macio_do_write_reg8_slm,
.delay = macio_do_delay,
};
static void macio_mmio_init_one(struct macio_chip *macio)
{
DBG("Installing MMIO functions for macio %s\n",
macio->of_node->full_name);
pmf_register_driver(macio->of_node, &macio_mmio_handlers, macio);
}
static struct device_node *unin_hwclock;
static int unin_do_write_reg32(PMF_STD_ARGS, u32 offset, u32 value, u32 mask)
{
unsigned long flags;
raw_spin_lock_irqsave(&feature_lock, flags);
/* This is fairly bogus in darwin, but it should work for our needs
* implemeted that way:
*/
UN_OUT(offset, (UN_IN(offset) & ~mask) | (value & mask));
raw_spin_unlock_irqrestore(&feature_lock, flags);
return 0;
}
static struct pmf_handlers unin_mmio_handlers = {
.write_reg32 = unin_do_write_reg32,
.delay = macio_do_delay,
};
static void uninorth_install_pfunc(void)
{
struct device_node *np;
DBG("Installing functions for UniN %s\n",
uninorth_node->full_name);
/*
* Install handlers for the bridge itself
*/
pmf_register_driver(uninorth_node, &unin_mmio_handlers, NULL);
pmf_do_functions(uninorth_node, NULL, 0, PMF_FLAGS_ON_INIT, NULL);
/*
* Install handlers for the hwclock child if any
*/
for (np = NULL; (np = of_get_next_child(uninorth_node, np)) != NULL;)
if (strcmp(np->name, "hw-clock") == 0) {
unin_hwclock = np;
break;
}
if (unin_hwclock) {
DBG("Installing functions for UniN clock %s\n",
unin_hwclock->full_name);
pmf_register_driver(unin_hwclock, &unin_mmio_handlers, NULL);
pmf_do_functions(unin_hwclock, NULL, 0, PMF_FLAGS_ON_INIT,
NULL);
}
}
/* We export this as the SMP code might init us early */
int __init pmac_pfunc_base_install(void)
{
static int pfbase_inited;
int i;
if (pfbase_inited)
return 0;
pfbase_inited = 1;
if (!machine_is(powermac))
return 0;
DBG("Installing base platform functions...\n");
/*
* Locate mac-io chips and install handlers
*/
for (i = 0 ; i < MAX_MACIO_CHIPS; i++) {
if (macio_chips[i].of_node) {
macio_mmio_init_one(&macio_chips[i]);
macio_gpio_init_one(&macio_chips[i]);
}
}
/*
* Install handlers for northbridge and direct mapped hwclock
* if any. We do not implement the config space access callback
* which is only ever used for functions that we do not call in
* the current driver (enabling/disabling cells in U2, mostly used
* to restore the PCI settings, we do that differently)
*/
if (uninorth_node && uninorth_base)
uninorth_install_pfunc();
DBG("All base functions installed\n");
return 0;
}
machine_arch_initcall(powermac, pmac_pfunc_base_install);
#ifdef CONFIG_PM
/* Those can be called by pmac_feature. Ultimately, I should use a sysdev
* or a device, but for now, that's good enough until I sort out some
* ordering issues. Also, we do not bother with GPIOs, as so far I yet have
* to see a case where a GPIO function has the on-suspend or on-resume bit
*/
void pmac_pfunc_base_suspend(void)
{
int i;
for (i = 0 ; i < MAX_MACIO_CHIPS; i++) {
if (macio_chips[i].of_node)
pmf_do_functions(macio_chips[i].of_node, NULL, 0,
PMF_FLAGS_ON_SLEEP, NULL);
}
if (uninorth_node)
pmf_do_functions(uninorth_node, NULL, 0,
PMF_FLAGS_ON_SLEEP, NULL);
if (unin_hwclock)
pmf_do_functions(unin_hwclock, NULL, 0,
PMF_FLAGS_ON_SLEEP, NULL);
}
void pmac_pfunc_base_resume(void)
{
int i;
if (unin_hwclock)
pmf_do_functions(unin_hwclock, NULL, 0,
PMF_FLAGS_ON_WAKE, NULL);
if (uninorth_node)
pmf_do_functions(uninorth_node, NULL, 0,
PMF_FLAGS_ON_WAKE, NULL);
for (i = 0 ; i < MAX_MACIO_CHIPS; i++) {
if (macio_chips[i].of_node)
pmf_do_functions(macio_chips[i].of_node, NULL, 0,
PMF_FLAGS_ON_WAKE, NULL);
}
}
#endif /* CONFIG_PM */

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,656 @@
/*
* Support for the interrupt controllers found on Power Macintosh,
* currently Apple's "Grand Central" interrupt controller in all
* it's incarnations. OpenPIC support used on newer machines is
* in a separate file
*
* Copyright (C) 1997 Paul Mackerras (paulus@samba.org)
* Copyright (C) 2005 Benjamin Herrenschmidt (benh@kernel.crashing.org)
* IBM, Corp.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
*/
#include <linux/stddef.h>
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/signal.h>
#include <linux/pci.h>
#include <linux/interrupt.h>
#include <linux/syscore_ops.h>
#include <linux/adb.h>
#include <linux/pmu.h>
#include <asm/sections.h>
#include <asm/io.h>
#include <asm/smp.h>
#include <asm/prom.h>
#include <asm/pci-bridge.h>
#include <asm/time.h>
#include <asm/pmac_feature.h>
#include <asm/mpic.h>
#include <asm/xmon.h>
#include "pmac.h"
#ifdef CONFIG_PPC32
struct pmac_irq_hw {
unsigned int event;
unsigned int enable;
unsigned int ack;
unsigned int level;
};
/* Workaround flags for 32bit powermac machines */
unsigned int of_irq_workarounds;
struct device_node *of_irq_dflt_pic;
/* Default addresses */
static volatile struct pmac_irq_hw __iomem *pmac_irq_hw[4];
static int max_irqs;
static int max_real_irqs;
static DEFINE_RAW_SPINLOCK(pmac_pic_lock);
/* The max irq number this driver deals with is 128; see max_irqs */
static DECLARE_BITMAP(ppc_lost_interrupts, 128);
static DECLARE_BITMAP(ppc_cached_irq_mask, 128);
static int pmac_irq_cascade = -1;
static struct irq_domain *pmac_pic_host;
static void __pmac_retrigger(unsigned int irq_nr)
{
if (irq_nr >= max_real_irqs && pmac_irq_cascade > 0) {
__set_bit(irq_nr, ppc_lost_interrupts);
irq_nr = pmac_irq_cascade;
mb();
}
if (!__test_and_set_bit(irq_nr, ppc_lost_interrupts)) {
atomic_inc(&ppc_n_lost_interrupts);
set_dec(1);
}
}
static void pmac_mask_and_ack_irq(struct irq_data *d)
{
unsigned int src = irqd_to_hwirq(d);
unsigned long bit = 1UL << (src & 0x1f);
int i = src >> 5;
unsigned long flags;
raw_spin_lock_irqsave(&pmac_pic_lock, flags);
__clear_bit(src, ppc_cached_irq_mask);
if (__test_and_clear_bit(src, ppc_lost_interrupts))
atomic_dec(&ppc_n_lost_interrupts);
out_le32(&pmac_irq_hw[i]->enable, ppc_cached_irq_mask[i]);
out_le32(&pmac_irq_hw[i]->ack, bit);
do {
/* make sure ack gets to controller before we enable
interrupts */
mb();
} while((in_le32(&pmac_irq_hw[i]->enable) & bit)
!= (ppc_cached_irq_mask[i] & bit));
raw_spin_unlock_irqrestore(&pmac_pic_lock, flags);
}
static void pmac_ack_irq(struct irq_data *d)
{
unsigned int src = irqd_to_hwirq(d);
unsigned long bit = 1UL << (src & 0x1f);
int i = src >> 5;
unsigned long flags;
raw_spin_lock_irqsave(&pmac_pic_lock, flags);
if (__test_and_clear_bit(src, ppc_lost_interrupts))
atomic_dec(&ppc_n_lost_interrupts);
out_le32(&pmac_irq_hw[i]->ack, bit);
(void)in_le32(&pmac_irq_hw[i]->ack);
raw_spin_unlock_irqrestore(&pmac_pic_lock, flags);
}
static void __pmac_set_irq_mask(unsigned int irq_nr, int nokicklost)
{
unsigned long bit = 1UL << (irq_nr & 0x1f);
int i = irq_nr >> 5;
if ((unsigned)irq_nr >= max_irqs)
return;
/* enable unmasked interrupts */
out_le32(&pmac_irq_hw[i]->enable, ppc_cached_irq_mask[i]);
do {
/* make sure mask gets to controller before we
return to user */
mb();
} while((in_le32(&pmac_irq_hw[i]->enable) & bit)
!= (ppc_cached_irq_mask[i] & bit));
/*
* Unfortunately, setting the bit in the enable register
* when the device interrupt is already on *doesn't* set
* the bit in the flag register or request another interrupt.
*/
if (bit & ppc_cached_irq_mask[i] & in_le32(&pmac_irq_hw[i]->level))
__pmac_retrigger(irq_nr);
}
/* When an irq gets requested for the first client, if it's an
* edge interrupt, we clear any previous one on the controller
*/
static unsigned int pmac_startup_irq(struct irq_data *d)
{
unsigned long flags;
unsigned int src = irqd_to_hwirq(d);
unsigned long bit = 1UL << (src & 0x1f);
int i = src >> 5;
raw_spin_lock_irqsave(&pmac_pic_lock, flags);
if (!irqd_is_level_type(d))
out_le32(&pmac_irq_hw[i]->ack, bit);
__set_bit(src, ppc_cached_irq_mask);
__pmac_set_irq_mask(src, 0);
raw_spin_unlock_irqrestore(&pmac_pic_lock, flags);
return 0;
}
static void pmac_mask_irq(struct irq_data *d)
{
unsigned long flags;
unsigned int src = irqd_to_hwirq(d);
raw_spin_lock_irqsave(&pmac_pic_lock, flags);
__clear_bit(src, ppc_cached_irq_mask);
__pmac_set_irq_mask(src, 1);
raw_spin_unlock_irqrestore(&pmac_pic_lock, flags);
}
static void pmac_unmask_irq(struct irq_data *d)
{
unsigned long flags;
unsigned int src = irqd_to_hwirq(d);
raw_spin_lock_irqsave(&pmac_pic_lock, flags);
__set_bit(src, ppc_cached_irq_mask);
__pmac_set_irq_mask(src, 0);
raw_spin_unlock_irqrestore(&pmac_pic_lock, flags);
}
static int pmac_retrigger(struct irq_data *d)
{
unsigned long flags;
raw_spin_lock_irqsave(&pmac_pic_lock, flags);
__pmac_retrigger(irqd_to_hwirq(d));
raw_spin_unlock_irqrestore(&pmac_pic_lock, flags);
return 1;
}
static struct irq_chip pmac_pic = {
.name = "PMAC-PIC",
.irq_startup = pmac_startup_irq,
.irq_mask = pmac_mask_irq,
.irq_ack = pmac_ack_irq,
.irq_mask_ack = pmac_mask_and_ack_irq,
.irq_unmask = pmac_unmask_irq,
.irq_retrigger = pmac_retrigger,
};
static irqreturn_t gatwick_action(int cpl, void *dev_id)
{
unsigned long flags;
int irq, bits;
int rc = IRQ_NONE;
raw_spin_lock_irqsave(&pmac_pic_lock, flags);
for (irq = max_irqs; (irq -= 32) >= max_real_irqs; ) {
int i = irq >> 5;
bits = in_le32(&pmac_irq_hw[i]->event) | ppc_lost_interrupts[i];
bits |= in_le32(&pmac_irq_hw[i]->level);
bits &= ppc_cached_irq_mask[i];
if (bits == 0)
continue;
irq += __ilog2(bits);
raw_spin_unlock_irqrestore(&pmac_pic_lock, flags);
generic_handle_irq(irq);
raw_spin_lock_irqsave(&pmac_pic_lock, flags);
rc = IRQ_HANDLED;
}
raw_spin_unlock_irqrestore(&pmac_pic_lock, flags);
return rc;
}
static unsigned int pmac_pic_get_irq(void)
{
int irq;
unsigned long bits = 0;
unsigned long flags;
#ifdef CONFIG_PPC_PMAC32_PSURGE
/* IPI's are a hack on the powersurge -- Cort */
if (smp_processor_id() != 0) {
return psurge_secondary_virq;
}
#endif /* CONFIG_PPC_PMAC32_PSURGE */
raw_spin_lock_irqsave(&pmac_pic_lock, flags);
for (irq = max_real_irqs; (irq -= 32) >= 0; ) {
int i = irq >> 5;
bits = in_le32(&pmac_irq_hw[i]->event) | ppc_lost_interrupts[i];
bits |= in_le32(&pmac_irq_hw[i]->level);
bits &= ppc_cached_irq_mask[i];
if (bits == 0)
continue;
irq += __ilog2(bits);
break;
}
raw_spin_unlock_irqrestore(&pmac_pic_lock, flags);
if (unlikely(irq < 0))
return NO_IRQ;
return irq_linear_revmap(pmac_pic_host, irq);
}
#ifdef CONFIG_XMON
static struct irqaction xmon_action = {
.handler = xmon_irq,
.flags = 0,
.name = "NMI - XMON"
};
#endif
static struct irqaction gatwick_cascade_action = {
.handler = gatwick_action,
.name = "cascade",
};
static int pmac_pic_host_match(struct irq_domain *h, struct device_node *node)
{
/* We match all, we don't always have a node anyway */
return 1;
}
static int pmac_pic_host_map(struct irq_domain *h, unsigned int virq,
irq_hw_number_t hw)
{
if (hw >= max_irqs)
return -EINVAL;
/* Mark level interrupts, set delayed disable for edge ones and set
* handlers
*/
irq_set_status_flags(virq, IRQ_LEVEL);
irq_set_chip_and_handler(virq, &pmac_pic, handle_level_irq);
return 0;
}
static const struct irq_domain_ops pmac_pic_host_ops = {
.match = pmac_pic_host_match,
.map = pmac_pic_host_map,
.xlate = irq_domain_xlate_onecell,
};
static void __init pmac_pic_probe_oldstyle(void)
{
int i;
struct device_node *master = NULL;
struct device_node *slave = NULL;
u8 __iomem *addr;
struct resource r;
/* Set our get_irq function */
ppc_md.get_irq = pmac_pic_get_irq;
/*
* Find the interrupt controller type & node
*/
if ((master = of_find_node_by_name(NULL, "gc")) != NULL) {
max_irqs = max_real_irqs = 32;
} else if ((master = of_find_node_by_name(NULL, "ohare")) != NULL) {
max_irqs = max_real_irqs = 32;
/* We might have a second cascaded ohare */
slave = of_find_node_by_name(NULL, "pci106b,7");
if (slave)
max_irqs = 64;
} else if ((master = of_find_node_by_name(NULL, "mac-io")) != NULL) {
max_irqs = max_real_irqs = 64;
/* We might have a second cascaded heathrow */
slave = of_find_node_by_name(master, "mac-io");
/* Check ordering of master & slave */
if (of_device_is_compatible(master, "gatwick")) {
struct device_node *tmp;
BUG_ON(slave == NULL);
tmp = master;
master = slave;
slave = tmp;
}
/* We found a slave */
if (slave)
max_irqs = 128;
}
BUG_ON(master == NULL);
/*
* Allocate an irq host
*/
pmac_pic_host = irq_domain_add_linear(master, max_irqs,
&pmac_pic_host_ops, NULL);
BUG_ON(pmac_pic_host == NULL);
irq_set_default_host(pmac_pic_host);
/* Get addresses of first controller if we have a node for it */
BUG_ON(of_address_to_resource(master, 0, &r));
/* Map interrupts of primary controller */
addr = (u8 __iomem *) ioremap(r.start, 0x40);
i = 0;
pmac_irq_hw[i++] = (volatile struct pmac_irq_hw __iomem *)
(addr + 0x20);
if (max_real_irqs > 32)
pmac_irq_hw[i++] = (volatile struct pmac_irq_hw __iomem *)
(addr + 0x10);
of_node_put(master);
printk(KERN_INFO "irq: Found primary Apple PIC %s for %d irqs\n",
master->full_name, max_real_irqs);
/* Map interrupts of cascaded controller */
if (slave && !of_address_to_resource(slave, 0, &r)) {
addr = (u8 __iomem *)ioremap(r.start, 0x40);
pmac_irq_hw[i++] = (volatile struct pmac_irq_hw __iomem *)
(addr + 0x20);
if (max_irqs > 64)
pmac_irq_hw[i++] =
(volatile struct pmac_irq_hw __iomem *)
(addr + 0x10);
pmac_irq_cascade = irq_of_parse_and_map(slave, 0);
printk(KERN_INFO "irq: Found slave Apple PIC %s for %d irqs"
" cascade: %d\n", slave->full_name,
max_irqs - max_real_irqs, pmac_irq_cascade);
}
of_node_put(slave);
/* Disable all interrupts in all controllers */
for (i = 0; i * 32 < max_irqs; ++i)
out_le32(&pmac_irq_hw[i]->enable, 0);
/* Hookup cascade irq */
if (slave && pmac_irq_cascade != NO_IRQ)
setup_irq(pmac_irq_cascade, &gatwick_cascade_action);
printk(KERN_INFO "irq: System has %d possible interrupts\n", max_irqs);
#ifdef CONFIG_XMON
setup_irq(irq_create_mapping(NULL, 20), &xmon_action);
#endif
}
int of_irq_parse_oldworld(struct device_node *device, int index,
struct of_phandle_args *out_irq)
{
const u32 *ints = NULL;
int intlen;
/*
* Old machines just have a list of interrupt numbers
* and no interrupt-controller nodes. We also have dodgy
* cases where the APPL,interrupts property is completely
* missing behind pci-pci bridges and we have to get it
* from the parent (the bridge itself, as apple just wired
* everything together on these)
*/
while (device) {
ints = of_get_property(device, "AAPL,interrupts", &intlen);
if (ints != NULL)
break;
device = device->parent;
if (device && strcmp(device->type, "pci") != 0)
break;
}
if (ints == NULL)
return -EINVAL;
intlen /= sizeof(u32);
if (index >= intlen)
return -EINVAL;
out_irq->np = NULL;
out_irq->args[0] = ints[index];
out_irq->args_count = 1;
return 0;
}
#endif /* CONFIG_PPC32 */
static void __init pmac_pic_setup_mpic_nmi(struct mpic *mpic)
{
#if defined(CONFIG_XMON) && defined(CONFIG_PPC32)
struct device_node* pswitch;
int nmi_irq;
pswitch = of_find_node_by_name(NULL, "programmer-switch");
if (pswitch) {
nmi_irq = irq_of_parse_and_map(pswitch, 0);
if (nmi_irq != NO_IRQ) {
mpic_irq_set_priority(nmi_irq, 9);
setup_irq(nmi_irq, &xmon_action);
}
of_node_put(pswitch);
}
#endif /* defined(CONFIG_XMON) && defined(CONFIG_PPC32) */
}
static struct mpic * __init pmac_setup_one_mpic(struct device_node *np,
int master)
{
const char *name = master ? " MPIC 1 " : " MPIC 2 ";
struct mpic *mpic;
unsigned int flags = master ? 0 : MPIC_SECONDARY;
pmac_call_feature(PMAC_FTR_ENABLE_MPIC, np, 0, 0);
if (of_get_property(np, "big-endian", NULL))
flags |= MPIC_BIG_ENDIAN;
/* Primary Big Endian means HT interrupts. This is quite dodgy
* but works until I find a better way
*/
if (master && (flags & MPIC_BIG_ENDIAN))
flags |= MPIC_U3_HT_IRQS;
mpic = mpic_alloc(np, 0, flags, 0, 0, name);
if (mpic == NULL)
return NULL;
mpic_init(mpic);
return mpic;
}
static int __init pmac_pic_probe_mpic(void)
{
struct mpic *mpic1, *mpic2;
struct device_node *np, *master = NULL, *slave = NULL;
/* We can have up to 2 MPICs cascaded */
for (np = NULL; (np = of_find_node_by_type(np, "open-pic"))
!= NULL;) {
if (master == NULL &&
of_get_property(np, "interrupts", NULL) == NULL)
master = of_node_get(np);
else if (slave == NULL)
slave = of_node_get(np);
if (master && slave)
break;
}
/* Check for bogus setups */
if (master == NULL && slave != NULL) {
master = slave;
slave = NULL;
}
/* Not found, default to good old pmac pic */
if (master == NULL)
return -ENODEV;
/* Set master handler */
ppc_md.get_irq = mpic_get_irq;
/* Setup master */
mpic1 = pmac_setup_one_mpic(master, 1);
BUG_ON(mpic1 == NULL);
/* Install NMI if any */
pmac_pic_setup_mpic_nmi(mpic1);
of_node_put(master);
/* Set up a cascaded controller, if present */
if (slave) {
mpic2 = pmac_setup_one_mpic(slave, 0);
if (mpic2 == NULL)
printk(KERN_ERR "Failed to setup slave MPIC\n");
of_node_put(slave);
}
return 0;
}
void __init pmac_pic_init(void)
{
/* We configure the OF parsing based on our oldworld vs. newworld
* platform type and whether we were booted by BootX.
*/
#ifdef CONFIG_PPC32
if (!pmac_newworld)
of_irq_workarounds |= OF_IMAP_OLDWORLD_MAC;
if (of_get_property(of_chosen, "linux,bootx", NULL) != NULL)
of_irq_workarounds |= OF_IMAP_NO_PHANDLE;
/* If we don't have phandles on a newworld, then try to locate a
* default interrupt controller (happens when booting with BootX).
* We do a first match here, hopefully, that only ever happens on
* machines with one controller.
*/
if (pmac_newworld && (of_irq_workarounds & OF_IMAP_NO_PHANDLE)) {
struct device_node *np;
for_each_node_with_property(np, "interrupt-controller") {
/* Skip /chosen/interrupt-controller */
if (strcmp(np->name, "chosen") == 0)
continue;
/* It seems like at least one person wants
* to use BootX on a machine with an AppleKiwi
* controller which happens to pretend to be an
* interrupt controller too. */
if (strcmp(np->name, "AppleKiwi") == 0)
continue;
/* I think we found one ! */
of_irq_dflt_pic = np;
break;
}
}
#endif /* CONFIG_PPC32 */
/* We first try to detect Apple's new Core99 chipset, since mac-io
* is quite different on those machines and contains an IBM MPIC2.
*/
if (pmac_pic_probe_mpic() == 0)
return;
#ifdef CONFIG_PPC32
pmac_pic_probe_oldstyle();
#endif
}
#if defined(CONFIG_PM) && defined(CONFIG_PPC32)
/*
* These procedures are used in implementing sleep on the powerbooks.
* sleep_save_intrs() saves the states of all interrupt enables
* and disables all interrupts except for the nominated one.
* sleep_restore_intrs() restores the states of all interrupt enables.
*/
unsigned long sleep_save_mask[2];
/* This used to be passed by the PMU driver but that link got
* broken with the new driver model. We use this tweak for now...
* We really want to do things differently though...
*/
static int pmacpic_find_viaint(void)
{
int viaint = -1;
#ifdef CONFIG_ADB_PMU
struct device_node *np;
if (pmu_get_model() != PMU_OHARE_BASED)
goto not_found;
np = of_find_node_by_name(NULL, "via-pmu");
if (np == NULL)
goto not_found;
viaint = irq_of_parse_and_map(np, 0);
not_found:
#endif /* CONFIG_ADB_PMU */
return viaint;
}
static int pmacpic_suspend(void)
{
int viaint = pmacpic_find_viaint();
sleep_save_mask[0] = ppc_cached_irq_mask[0];
sleep_save_mask[1] = ppc_cached_irq_mask[1];
ppc_cached_irq_mask[0] = 0;
ppc_cached_irq_mask[1] = 0;
if (viaint > 0)
set_bit(viaint, ppc_cached_irq_mask);
out_le32(&pmac_irq_hw[0]->enable, ppc_cached_irq_mask[0]);
if (max_real_irqs > 32)
out_le32(&pmac_irq_hw[1]->enable, ppc_cached_irq_mask[1]);
(void)in_le32(&pmac_irq_hw[0]->event);
/* make sure mask gets to controller before we return to caller */
mb();
(void)in_le32(&pmac_irq_hw[0]->enable);
return 0;
}
static void pmacpic_resume(void)
{
int i;
out_le32(&pmac_irq_hw[0]->enable, 0);
if (max_real_irqs > 32)
out_le32(&pmac_irq_hw[1]->enable, 0);
mb();
for (i = 0; i < max_real_irqs; ++i)
if (test_bit(i, sleep_save_mask))
pmac_unmask_irq(irq_get_irq_data(i));
}
static struct syscore_ops pmacpic_syscore_ops = {
.suspend = pmacpic_suspend,
.resume = pmacpic_resume,
};
static int __init init_pmacpic_syscore(void)
{
if (pmac_irq_hw[0])
register_syscore_ops(&pmacpic_syscore_ops);
return 0;
}
machine_subsys_initcall(powermac, init_pmacpic_syscore);
#endif /* CONFIG_PM && CONFIG_PPC32 */

View file

@ -0,0 +1,42 @@
#ifndef __PMAC_H__
#define __PMAC_H__
#include <linux/pci.h>
#include <linux/irq.h>
/*
* Declaration for the various functions exported by the
* pmac_* files. Mostly for use by pmac_setup
*/
struct rtc_time;
extern int pmac_newworld;
extern long pmac_time_init(void);
extern unsigned long pmac_get_boot_time(void);
extern void pmac_get_rtc_time(struct rtc_time *);
extern int pmac_set_rtc_time(struct rtc_time *);
extern void pmac_read_rtc_time(void);
extern void pmac_calibrate_decr(void);
extern void pmac_pci_irq_fixup(struct pci_dev *);
extern void pmac_pci_init(void);
extern void pmac_nvram_update(void);
extern unsigned char pmac_nvram_read_byte(int addr);
extern void pmac_nvram_write_byte(int addr, unsigned char val);
extern int pmac_pci_enable_device_hook(struct pci_dev *dev);
extern void pmac_pcibios_after_init(void);
extern int of_show_percpuinfo(struct seq_file *m, int i);
extern void pmac_setup_pci_dma(void);
extern void pmac_check_ht_link(void);
extern void pmac_setup_smp(void);
extern int psurge_secondary_virq;
extern void low_cpu_die(void) __attribute__((noreturn));
extern int pmac_nvram_init(void);
extern void pmac_pic_init(void);
#endif /* __PMAC_H__ */

View file

@ -0,0 +1,685 @@
/*
* Powermac setup and early boot code plus other random bits.
*
* PowerPC version
* Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
*
* Adapted for Power Macintosh by Paul Mackerras
* Copyright (C) 1996 Paul Mackerras (paulus@samba.org)
*
* Derived from "arch/alpha/kernel/setup.c"
* Copyright (C) 1995 Linus Torvalds
*
* Maintained by Benjamin Herrenschmidt (benh@kernel.crashing.org)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
*/
/*
* bootup setup stuff..
*/
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/stddef.h>
#include <linux/unistd.h>
#include <linux/ptrace.h>
#include <linux/export.h>
#include <linux/user.h>
#include <linux/tty.h>
#include <linux/string.h>
#include <linux/delay.h>
#include <linux/ioport.h>
#include <linux/major.h>
#include <linux/initrd.h>
#include <linux/vt_kern.h>
#include <linux/console.h>
#include <linux/pci.h>
#include <linux/adb.h>
#include <linux/cuda.h>
#include <linux/pmu.h>
#include <linux/irq.h>
#include <linux/seq_file.h>
#include <linux/root_dev.h>
#include <linux/bitops.h>
#include <linux/suspend.h>
#include <linux/of_device.h>
#include <linux/of_platform.h>
#include <linux/memblock.h>
#include <asm/reg.h>
#include <asm/sections.h>
#include <asm/prom.h>
#include <asm/pgtable.h>
#include <asm/io.h>
#include <asm/pci-bridge.h>
#include <asm/ohare.h>
#include <asm/mediabay.h>
#include <asm/machdep.h>
#include <asm/dma.h>
#include <asm/cputable.h>
#include <asm/btext.h>
#include <asm/pmac_feature.h>
#include <asm/time.h>
#include <asm/mmu_context.h>
#include <asm/iommu.h>
#include <asm/smu.h>
#include <asm/pmc.h>
#include <asm/udbg.h>
#include "pmac.h"
#undef SHOW_GATWICK_IRQS
int ppc_override_l2cr = 0;
int ppc_override_l2cr_value;
int has_l2cache = 0;
int pmac_newworld;
static int current_root_goodness = -1;
extern struct machdep_calls pmac_md;
#define DEFAULT_ROOT_DEVICE Root_SDA1 /* sda1 - slightly silly choice */
#ifdef CONFIG_PPC64
int sccdbg;
#endif
sys_ctrler_t sys_ctrler = SYS_CTRLER_UNKNOWN;
EXPORT_SYMBOL(sys_ctrler);
#ifdef CONFIG_PMAC_SMU
unsigned long smu_cmdbuf_abs;
EXPORT_SYMBOL(smu_cmdbuf_abs);
#endif
static void pmac_show_cpuinfo(struct seq_file *m)
{
struct device_node *np;
const char *pp;
int plen;
int mbmodel;
unsigned int mbflags;
char* mbname;
mbmodel = pmac_call_feature(PMAC_FTR_GET_MB_INFO, NULL,
PMAC_MB_INFO_MODEL, 0);
mbflags = pmac_call_feature(PMAC_FTR_GET_MB_INFO, NULL,
PMAC_MB_INFO_FLAGS, 0);
if (pmac_call_feature(PMAC_FTR_GET_MB_INFO, NULL, PMAC_MB_INFO_NAME,
(long) &mbname) != 0)
mbname = "Unknown";
/* find motherboard type */
seq_printf(m, "machine\t\t: ");
np = of_find_node_by_path("/");
if (np != NULL) {
pp = of_get_property(np, "model", NULL);
if (pp != NULL)
seq_printf(m, "%s\n", pp);
else
seq_printf(m, "PowerMac\n");
pp = of_get_property(np, "compatible", &plen);
if (pp != NULL) {
seq_printf(m, "motherboard\t:");
while (plen > 0) {
int l = strlen(pp) + 1;
seq_printf(m, " %s", pp);
plen -= l;
pp += l;
}
seq_printf(m, "\n");
}
of_node_put(np);
} else
seq_printf(m, "PowerMac\n");
/* print parsed model */
seq_printf(m, "detected as\t: %d (%s)\n", mbmodel, mbname);
seq_printf(m, "pmac flags\t: %08x\n", mbflags);
/* find l2 cache info */
np = of_find_node_by_name(NULL, "l2-cache");
if (np == NULL)
np = of_find_node_by_type(NULL, "cache");
if (np != NULL) {
const unsigned int *ic =
of_get_property(np, "i-cache-size", NULL);
const unsigned int *dc =
of_get_property(np, "d-cache-size", NULL);
seq_printf(m, "L2 cache\t:");
has_l2cache = 1;
if (of_get_property(np, "cache-unified", NULL) != 0 && dc) {
seq_printf(m, " %dK unified", *dc / 1024);
} else {
if (ic)
seq_printf(m, " %dK instruction", *ic / 1024);
if (dc)
seq_printf(m, "%s %dK data",
(ic? " +": ""), *dc / 1024);
}
pp = of_get_property(np, "ram-type", NULL);
if (pp)
seq_printf(m, " %s", pp);
seq_printf(m, "\n");
of_node_put(np);
}
/* Indicate newworld/oldworld */
seq_printf(m, "pmac-generation\t: %s\n",
pmac_newworld ? "NewWorld" : "OldWorld");
}
#ifndef CONFIG_ADB_CUDA
int find_via_cuda(void)
{
struct device_node *dn = of_find_node_by_name(NULL, "via-cuda");
if (!dn)
return 0;
of_node_put(dn);
printk("WARNING ! Your machine is CUDA-based but your kernel\n");
printk(" wasn't compiled with CONFIG_ADB_CUDA option !\n");
return 0;
}
#endif
#ifndef CONFIG_ADB_PMU
int find_via_pmu(void)
{
struct device_node *dn = of_find_node_by_name(NULL, "via-pmu");
if (!dn)
return 0;
of_node_put(dn);
printk("WARNING ! Your machine is PMU-based but your kernel\n");
printk(" wasn't compiled with CONFIG_ADB_PMU option !\n");
return 0;
}
#endif
#ifndef CONFIG_PMAC_SMU
int smu_init(void)
{
/* should check and warn if SMU is present */
return 0;
}
#endif
#ifdef CONFIG_PPC32
static volatile u32 *sysctrl_regs;
static void __init ohare_init(void)
{
struct device_node *dn;
/* this area has the CPU identification register
and some registers used by smp boards */
sysctrl_regs = (volatile u32 *) ioremap(0xf8000000, 0x1000);
/*
* Turn on the L2 cache.
* We assume that we have a PSX memory controller iff
* we have an ohare I/O controller.
*/
dn = of_find_node_by_name(NULL, "ohare");
if (dn) {
of_node_put(dn);
if (((sysctrl_regs[2] >> 24) & 0xf) >= 3) {
if (sysctrl_regs[4] & 0x10)
sysctrl_regs[4] |= 0x04000020;
else
sysctrl_regs[4] |= 0x04000000;
if(has_l2cache)
printk(KERN_INFO "Level 2 cache enabled\n");
}
}
}
static void __init l2cr_init(void)
{
/* Checks "l2cr-value" property in the registry */
if (cpu_has_feature(CPU_FTR_L2CR)) {
struct device_node *np = of_find_node_by_name(NULL, "cpus");
if (np == 0)
np = of_find_node_by_type(NULL, "cpu");
if (np != 0) {
const unsigned int *l2cr =
of_get_property(np, "l2cr-value", NULL);
if (l2cr != 0) {
ppc_override_l2cr = 1;
ppc_override_l2cr_value = *l2cr;
_set_L2CR(0);
_set_L2CR(ppc_override_l2cr_value);
}
of_node_put(np);
}
}
if (ppc_override_l2cr)
printk(KERN_INFO "L2CR overridden (0x%x), "
"backside cache is %s\n",
ppc_override_l2cr_value,
(ppc_override_l2cr_value & 0x80000000)
? "enabled" : "disabled");
}
#endif
static void __init pmac_setup_arch(void)
{
struct device_node *cpu, *ic;
const int *fp;
unsigned long pvr;
pvr = PVR_VER(mfspr(SPRN_PVR));
/* Set loops_per_jiffy to a half-way reasonable value,
for use until calibrate_delay gets called. */
loops_per_jiffy = 50000000 / HZ;
cpu = of_find_node_by_type(NULL, "cpu");
if (cpu != NULL) {
fp = of_get_property(cpu, "clock-frequency", NULL);
if (fp != NULL) {
if (pvr >= 0x30 && pvr < 0x80)
/* PPC970 etc. */
loops_per_jiffy = *fp / (3 * HZ);
else if (pvr == 4 || pvr >= 8)
/* 604, G3, G4 etc. */
loops_per_jiffy = *fp / HZ;
else
/* 601, 603, etc. */
loops_per_jiffy = *fp / (2 * HZ);
}
of_node_put(cpu);
}
/* See if newworld or oldworld */
ic = of_find_node_with_property(NULL, "interrupt-controller");
if (ic) {
pmac_newworld = 1;
of_node_put(ic);
}
/* Lookup PCI hosts */
pmac_pci_init();
#ifdef CONFIG_PPC32
ohare_init();
l2cr_init();
#endif /* CONFIG_PPC32 */
find_via_cuda();
find_via_pmu();
smu_init();
#if defined(CONFIG_NVRAM) || defined(CONFIG_NVRAM_MODULE) || \
defined(CONFIG_PPC64)
pmac_nvram_init();
#endif
#ifdef CONFIG_PPC32
#ifdef CONFIG_BLK_DEV_INITRD
if (initrd_start)
ROOT_DEV = Root_RAM0;
else
#endif
ROOT_DEV = DEFAULT_ROOT_DEVICE;
#endif
#ifdef CONFIG_ADB
if (strstr(boot_command_line, "adb_sync")) {
extern int __adb_probe_sync;
__adb_probe_sync = 1;
}
#endif /* CONFIG_ADB */
}
#ifdef CONFIG_SCSI
void note_scsi_host(struct device_node *node, void *host)
{
}
EXPORT_SYMBOL(note_scsi_host);
#endif
static int initializing = 1;
static int pmac_late_init(void)
{
initializing = 0;
return 0;
}
machine_late_initcall(powermac, pmac_late_init);
/*
* This is __init_refok because we check for "initializing" before
* touching any of the __init sensitive things and "initializing"
* will be false after __init time. This can't be __init because it
* can be called whenever a disk is first accessed.
*/
void __init_refok note_bootable_part(dev_t dev, int part, int goodness)
{
char *p;
if (!initializing)
return;
if ((goodness <= current_root_goodness) &&
ROOT_DEV != DEFAULT_ROOT_DEVICE)
return;
p = strstr(boot_command_line, "root=");
if (p != NULL && (p == boot_command_line || p[-1] == ' '))
return;
ROOT_DEV = dev + part;
current_root_goodness = goodness;
}
#ifdef CONFIG_ADB_CUDA
static void cuda_restart(void)
{
struct adb_request req;
cuda_request(&req, NULL, 2, CUDA_PACKET, CUDA_RESET_SYSTEM);
for (;;)
cuda_poll();
}
static void cuda_shutdown(void)
{
struct adb_request req;
cuda_request(&req, NULL, 2, CUDA_PACKET, CUDA_POWERDOWN);
for (;;)
cuda_poll();
}
#else
#define cuda_restart()
#define cuda_shutdown()
#endif
#ifndef CONFIG_ADB_PMU
#define pmu_restart()
#define pmu_shutdown()
#endif
#ifndef CONFIG_PMAC_SMU
#define smu_restart()
#define smu_shutdown()
#endif
static void pmac_restart(char *cmd)
{
switch (sys_ctrler) {
case SYS_CTRLER_CUDA:
cuda_restart();
break;
case SYS_CTRLER_PMU:
pmu_restart();
break;
case SYS_CTRLER_SMU:
smu_restart();
break;
default: ;
}
}
static void pmac_power_off(void)
{
switch (sys_ctrler) {
case SYS_CTRLER_CUDA:
cuda_shutdown();
break;
case SYS_CTRLER_PMU:
pmu_shutdown();
break;
case SYS_CTRLER_SMU:
smu_shutdown();
break;
default: ;
}
}
static void
pmac_halt(void)
{
pmac_power_off();
}
/*
* Early initialization.
*/
static void __init pmac_init_early(void)
{
/* Enable early btext debug if requested */
if (strstr(boot_command_line, "btextdbg")) {
udbg_adb_init_early();
register_early_udbg_console();
}
/* Probe motherboard chipset */
pmac_feature_init();
/* Initialize debug stuff */
udbg_scc_init(!!strstr(boot_command_line, "sccdbg"));
udbg_adb_init(!!strstr(boot_command_line, "btextdbg"));
#ifdef CONFIG_PPC64
iommu_init_early_dart();
#endif
/* SMP Init has to be done early as we need to patch up
* cpu_possible_mask before interrupt stacks are allocated
* or kaboom...
*/
#ifdef CONFIG_SMP
pmac_setup_smp();
#endif
}
static int __init pmac_declare_of_platform_devices(void)
{
struct device_node *np;
if (machine_is(chrp))
return -1;
np = of_find_node_by_name(NULL, "valkyrie");
if (np) {
of_platform_device_create(np, "valkyrie", NULL);
of_node_put(np);
}
np = of_find_node_by_name(NULL, "platinum");
if (np) {
of_platform_device_create(np, "platinum", NULL);
of_node_put(np);
}
np = of_find_node_by_type(NULL, "smu");
if (np) {
of_platform_device_create(np, "smu", NULL);
of_node_put(np);
}
np = of_find_node_by_type(NULL, "fcu");
if (np == NULL) {
/* Some machines have strangely broken device-tree */
np = of_find_node_by_path("/u3@0,f8000000/i2c@f8001000/fan@15e");
}
if (np) {
of_platform_device_create(np, "temperature", NULL);
of_node_put(np);
}
return 0;
}
machine_device_initcall(powermac, pmac_declare_of_platform_devices);
#ifdef CONFIG_SERIAL_PMACZILOG_CONSOLE
/*
* This is called very early, as part of console_init() (typically just after
* time_init()). This function is respondible for trying to find a good
* default console on serial ports. It tries to match the open firmware
* default output with one of the available serial console drivers.
*/
static int __init check_pmac_serial_console(void)
{
struct device_node *prom_stdout = NULL;
int offset = 0;
const char *name;
#ifdef CONFIG_SERIAL_PMACZILOG_TTYS
char *devname = "ttyS";
#else
char *devname = "ttyPZ";
#endif
pr_debug(" -> check_pmac_serial_console()\n");
/* The user has requested a console so this is already set up. */
if (strstr(boot_command_line, "console=")) {
pr_debug(" console was specified !\n");
return -EBUSY;
}
if (!of_chosen) {
pr_debug(" of_chosen is NULL !\n");
return -ENODEV;
}
/* We are getting a weird phandle from OF ... */
/* ... So use the full path instead */
name = of_get_property(of_chosen, "linux,stdout-path", NULL);
if (name == NULL) {
pr_debug(" no linux,stdout-path !\n");
return -ENODEV;
}
prom_stdout = of_find_node_by_path(name);
if (!prom_stdout) {
pr_debug(" can't find stdout package %s !\n", name);
return -ENODEV;
}
pr_debug("stdout is %s\n", prom_stdout->full_name);
name = of_get_property(prom_stdout, "name", NULL);
if (!name) {
pr_debug(" stdout package has no name !\n");
goto not_found;
}
if (strcmp(name, "ch-a") == 0)
offset = 0;
else if (strcmp(name, "ch-b") == 0)
offset = 1;
else
goto not_found;
of_node_put(prom_stdout);
pr_debug("Found serial console at %s%d\n", devname, offset);
return add_preferred_console(devname, offset, NULL);
not_found:
pr_debug("No preferred console found !\n");
of_node_put(prom_stdout);
return -ENODEV;
}
console_initcall(check_pmac_serial_console);
#endif /* CONFIG_SERIAL_PMACZILOG_CONSOLE */
/*
* Called very early, MMU is off, device-tree isn't unflattened
*/
static int __init pmac_probe(void)
{
unsigned long root = of_get_flat_dt_root();
if (!of_flat_dt_is_compatible(root, "Power Macintosh") &&
!of_flat_dt_is_compatible(root, "MacRISC"))
return 0;
#ifdef CONFIG_PPC64
/*
* On U3, the DART (iommu) must be allocated now since it
* has an impact on htab_initialize (due to the large page it
* occupies having to be broken up so the DART itself is not
* part of the cacheable linar mapping
*/
alloc_dart_table();
hpte_init_native();
#endif
#ifdef CONFIG_PPC32
/* isa_io_base gets set in pmac_pci_init */
ISA_DMA_THRESHOLD = ~0L;
DMA_MODE_READ = 1;
DMA_MODE_WRITE = 2;
#endif /* CONFIG_PPC32 */
#ifdef CONFIG_PMAC_SMU
/*
* SMU based G5s need some memory below 2Gb, at least the current
* driver needs that. We have to allocate it now. We allocate 4k
* (1 small page) for now.
*/
smu_cmdbuf_abs = memblock_alloc_base(4096, 4096, 0x80000000UL);
#endif /* CONFIG_PMAC_SMU */
return 1;
}
#ifdef CONFIG_PPC64
/* Move that to pci.c */
static int pmac_pci_probe_mode(struct pci_bus *bus)
{
struct device_node *node = pci_bus_to_OF_node(bus);
/* We need to use normal PCI probing for the AGP bus,
* since the device for the AGP bridge isn't in the tree.
* Same for the PCIe host on U4 and the HT host bridge.
*/
if (bus->self == NULL && (of_device_is_compatible(node, "u3-agp") ||
of_device_is_compatible(node, "u4-pcie") ||
of_device_is_compatible(node, "u3-ht")))
return PCI_PROBE_NORMAL;
return PCI_PROBE_DEVTREE;
}
#endif /* CONFIG_PPC64 */
define_machine(powermac) {
.name = "PowerMac",
.probe = pmac_probe,
.setup_arch = pmac_setup_arch,
.init_early = pmac_init_early,
.show_cpuinfo = pmac_show_cpuinfo,
.init_IRQ = pmac_pic_init,
.get_irq = NULL, /* changed later */
.pci_irq_fixup = pmac_pci_irq_fixup,
.restart = pmac_restart,
.power_off = pmac_power_off,
.halt = pmac_halt,
.time_init = pmac_time_init,
.get_boot_time = pmac_get_boot_time,
.set_rtc_time = pmac_set_rtc_time,
.get_rtc_time = pmac_get_rtc_time,
.calibrate_decr = pmac_calibrate_decr,
.feature_call = pmac_do_feature_call,
.progress = udbg_progress,
#ifdef CONFIG_PPC64
.pci_probe_mode = pmac_pci_probe_mode,
.power_save = power4_idle,
.enable_pmcs = power4_enable_pmcs,
#endif /* CONFIG_PPC64 */
#ifdef CONFIG_PPC32
.pcibios_enable_device_hook = pmac_pci_enable_device_hook,
.pcibios_after_init = pmac_pcibios_after_init,
.phys_mem_access_prot = pci_phys_mem_access_prot,
#endif
};

View file

@ -0,0 +1,397 @@
/*
* This file contains sleep low-level functions for PowerBook G3.
* Copyright (C) 1999 Benjamin Herrenschmidt (benh@kernel.crashing.org)
* and Paul Mackerras (paulus@samba.org).
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
*/
#include <asm/processor.h>
#include <asm/page.h>
#include <asm/ppc_asm.h>
#include <asm/cputable.h>
#include <asm/cache.h>
#include <asm/thread_info.h>
#include <asm/asm-offsets.h>
#include <asm/mmu.h>
#define MAGIC 0x4c617273 /* 'Lars' */
/*
* Structure for storing CPU registers on the stack.
*/
#define SL_SP 0
#define SL_PC 4
#define SL_MSR 8
#define SL_SDR1 0xc
#define SL_SPRG0 0x10 /* 4 sprg's */
#define SL_DBAT0 0x20
#define SL_IBAT0 0x28
#define SL_DBAT1 0x30
#define SL_IBAT1 0x38
#define SL_DBAT2 0x40
#define SL_IBAT2 0x48
#define SL_DBAT3 0x50
#define SL_IBAT3 0x58
#define SL_TB 0x60
#define SL_R2 0x68
#define SL_CR 0x6c
#define SL_R12 0x70 /* r12 to r31 */
#define SL_SIZE (SL_R12 + 80)
.section .text
.align 5
#if defined(CONFIG_PM) || defined(CONFIG_CPU_FREQ_PMAC) || \
(defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PPC32))
/* This gets called by via-pmu.c late during the sleep process.
* The PMU was already send the sleep command and will shut us down
* soon. We need to save all that is needed and setup the wakeup
* vector that will be called by the ROM on wakeup
*/
_GLOBAL(low_sleep_handler)
#ifndef CONFIG_6xx
blr
#else
mflr r0
stw r0,4(r1)
stwu r1,-SL_SIZE(r1)
mfcr r0
stw r0,SL_CR(r1)
stw r2,SL_R2(r1)
stmw r12,SL_R12(r1)
/* Save MSR & SDR1 */
mfmsr r4
stw r4,SL_MSR(r1)
mfsdr1 r4
stw r4,SL_SDR1(r1)
/* Get a stable timebase and save it */
1: mftbu r4
stw r4,SL_TB(r1)
mftb r5
stw r5,SL_TB+4(r1)
mftbu r3
cmpw r3,r4
bne 1b
/* Save SPRGs */
mfsprg r4,0
stw r4,SL_SPRG0(r1)
mfsprg r4,1
stw r4,SL_SPRG0+4(r1)
mfsprg r4,2
stw r4,SL_SPRG0+8(r1)
mfsprg r4,3
stw r4,SL_SPRG0+12(r1)
/* Save BATs */
mfdbatu r4,0
stw r4,SL_DBAT0(r1)
mfdbatl r4,0
stw r4,SL_DBAT0+4(r1)
mfdbatu r4,1
stw r4,SL_DBAT1(r1)
mfdbatl r4,1
stw r4,SL_DBAT1+4(r1)
mfdbatu r4,2
stw r4,SL_DBAT2(r1)
mfdbatl r4,2
stw r4,SL_DBAT2+4(r1)
mfdbatu r4,3
stw r4,SL_DBAT3(r1)
mfdbatl r4,3
stw r4,SL_DBAT3+4(r1)
mfibatu r4,0
stw r4,SL_IBAT0(r1)
mfibatl r4,0
stw r4,SL_IBAT0+4(r1)
mfibatu r4,1
stw r4,SL_IBAT1(r1)
mfibatl r4,1
stw r4,SL_IBAT1+4(r1)
mfibatu r4,2
stw r4,SL_IBAT2(r1)
mfibatl r4,2
stw r4,SL_IBAT2+4(r1)
mfibatu r4,3
stw r4,SL_IBAT3(r1)
mfibatl r4,3
stw r4,SL_IBAT3+4(r1)
/* Backup various CPU config stuffs */
bl __save_cpu_setup
/* The ROM can wake us up via 2 different vectors:
* - On wallstreet & lombard, we must write a magic
* value 'Lars' at address 4 and a pointer to a
* memory location containing the PC to resume from
* at address 0.
* - On Core99, we must store the wakeup vector at
* address 0x80 and eventually it's parameters
* at address 0x84. I've have some trouble with those
* parameters however and I no longer use them.
*/
lis r5,grackle_wake_up@ha
addi r5,r5,grackle_wake_up@l
tophys(r5,r5)
stw r5,SL_PC(r1)
lis r4,KERNELBASE@h
tophys(r5,r1)
addi r5,r5,SL_PC
lis r6,MAGIC@ha
addi r6,r6,MAGIC@l
stw r5,0(r4)
stw r6,4(r4)
/* Setup stuffs at 0x80-0x84 for Core99 */
lis r3,core99_wake_up@ha
addi r3,r3,core99_wake_up@l
tophys(r3,r3)
stw r3,0x80(r4)
stw r5,0x84(r4)
/* Store a pointer to our backup storage into
* a kernel global
*/
lis r3,sleep_storage@ha
addi r3,r3,sleep_storage@l
stw r5,0(r3)
.globl low_cpu_die
low_cpu_die:
/* Flush & disable all caches */
bl flush_disable_caches
/* Turn off data relocation. */
mfmsr r3 /* Save MSR in r7 */
rlwinm r3,r3,0,28,26 /* Turn off DR bit */
sync
mtmsr r3
isync
BEGIN_FTR_SECTION
/* Flush any pending L2 data prefetches to work around HW bug */
sync
lis r3,0xfff0
lwz r0,0(r3) /* perform cache-inhibited load to ROM */
sync /* (caches are disabled at this point) */
END_FTR_SECTION_IFSET(CPU_FTR_SPEC7450)
/*
* Set the HID0 and MSR for sleep.
*/
mfspr r2,SPRN_HID0
rlwinm r2,r2,0,10,7 /* clear doze, nap */
oris r2,r2,HID0_SLEEP@h
sync
isync
mtspr SPRN_HID0,r2
sync
/* This loop puts us back to sleep in case we have a spurrious
* wakeup so that the host bridge properly stays asleep. The
* CPU will be turned off, either after a known time (about 1
* second) on wallstreet & lombard, or as soon as the CPU enters
* SLEEP mode on core99
*/
mfmsr r2
oris r2,r2,MSR_POW@h
1: sync
mtmsr r2
isync
b 1b
/*
* Here is the resume code.
*/
/*
* Core99 machines resume here
* r4 has the physical address of SL_PC(sp) (unused)
*/
_GLOBAL(core99_wake_up)
/* Make sure HID0 no longer contains any sleep bit and that data cache
* is disabled
*/
mfspr r3,SPRN_HID0
rlwinm r3,r3,0,11,7 /* clear SLEEP, NAP, DOZE bits */
rlwinm 3,r3,0,18,15 /* clear DCE, ICE */
mtspr SPRN_HID0,r3
sync
isync
/* sanitize MSR */
mfmsr r3
ori r3,r3,MSR_EE|MSR_IP
xori r3,r3,MSR_EE|MSR_IP
sync
isync
mtmsr r3
sync
isync
/* Recover sleep storage */
lis r3,sleep_storage@ha
addi r3,r3,sleep_storage@l
tophys(r3,r3)
lwz r1,0(r3)
/* Pass thru to older resume code ... */
/*
* Here is the resume code for older machines.
* r1 has the physical address of SL_PC(sp).
*/
grackle_wake_up:
/* Restore the kernel's segment registers before
* we do any r1 memory access as we are not sure they
* are in a sane state above the first 256Mb region
*/
li r0,16 /* load up segment register values */
mtctr r0 /* for context 0 */
lis r3,0x2000 /* Ku = 1, VSID = 0 */
li r4,0
3: mtsrin r3,r4
addi r3,r3,0x111 /* increment VSID */
addis r4,r4,0x1000 /* address of next segment */
bdnz 3b
sync
isync
subi r1,r1,SL_PC
/* Restore various CPU config stuffs */
bl __restore_cpu_setup
/* Make sure all FPRs have been initialized */
bl reloc_offset
bl __init_fpu_registers
/* Invalidate & enable L1 cache, we don't care about
* whatever the ROM may have tried to write to memory
*/
bl __inval_enable_L1
/* Restore the BATs, and SDR1. Then we can turn on the MMU. */
lwz r4,SL_SDR1(r1)
mtsdr1 r4
lwz r4,SL_SPRG0(r1)
mtsprg 0,r4
lwz r4,SL_SPRG0+4(r1)
mtsprg 1,r4
lwz r4,SL_SPRG0+8(r1)
mtsprg 2,r4
lwz r4,SL_SPRG0+12(r1)
mtsprg 3,r4
lwz r4,SL_DBAT0(r1)
mtdbatu 0,r4
lwz r4,SL_DBAT0+4(r1)
mtdbatl 0,r4
lwz r4,SL_DBAT1(r1)
mtdbatu 1,r4
lwz r4,SL_DBAT1+4(r1)
mtdbatl 1,r4
lwz r4,SL_DBAT2(r1)
mtdbatu 2,r4
lwz r4,SL_DBAT2+4(r1)
mtdbatl 2,r4
lwz r4,SL_DBAT3(r1)
mtdbatu 3,r4
lwz r4,SL_DBAT3+4(r1)
mtdbatl 3,r4
lwz r4,SL_IBAT0(r1)
mtibatu 0,r4
lwz r4,SL_IBAT0+4(r1)
mtibatl 0,r4
lwz r4,SL_IBAT1(r1)
mtibatu 1,r4
lwz r4,SL_IBAT1+4(r1)
mtibatl 1,r4
lwz r4,SL_IBAT2(r1)
mtibatu 2,r4
lwz r4,SL_IBAT2+4(r1)
mtibatl 2,r4
lwz r4,SL_IBAT3(r1)
mtibatu 3,r4
lwz r4,SL_IBAT3+4(r1)
mtibatl 3,r4
BEGIN_MMU_FTR_SECTION
li r4,0
mtspr SPRN_DBAT4U,r4
mtspr SPRN_DBAT4L,r4
mtspr SPRN_DBAT5U,r4
mtspr SPRN_DBAT5L,r4
mtspr SPRN_DBAT6U,r4
mtspr SPRN_DBAT6L,r4
mtspr SPRN_DBAT7U,r4
mtspr SPRN_DBAT7L,r4
mtspr SPRN_IBAT4U,r4
mtspr SPRN_IBAT4L,r4
mtspr SPRN_IBAT5U,r4
mtspr SPRN_IBAT5L,r4
mtspr SPRN_IBAT6U,r4
mtspr SPRN_IBAT6L,r4
mtspr SPRN_IBAT7U,r4
mtspr SPRN_IBAT7L,r4
END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS)
/* Flush all TLBs */
lis r4,0x1000
1: addic. r4,r4,-0x1000
tlbie r4
blt 1b
sync
/* restore the MSR and turn on the MMU */
lwz r3,SL_MSR(r1)
bl turn_on_mmu
/* get back the stack pointer */
tovirt(r1,r1)
/* Restore TB */
li r3,0
mttbl r3
lwz r3,SL_TB(r1)
lwz r4,SL_TB+4(r1)
mttbu r3
mttbl r4
/* Restore the callee-saved registers and return */
lwz r0,SL_CR(r1)
mtcr r0
lwz r2,SL_R2(r1)
lmw r12,SL_R12(r1)
addi r1,r1,SL_SIZE
lwz r0,4(r1)
mtlr r0
blr
turn_on_mmu:
mflr r4
tovirt(r4,r4)
mtsrr0 r4
mtsrr1 r3
sync
isync
rfi
#endif /* defined(CONFIG_PM) || defined(CONFIG_CPU_FREQ) */
.section .data
.balign L1_CACHE_BYTES
sleep_storage:
.long 0
.balign L1_CACHE_BYTES, 0
#endif /* CONFIG_6xx */
.section .text

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,334 @@
/*
* Support for periodic interrupts (100 per second) and for getting
* the current time from the RTC on Power Macintoshes.
*
* We use the decrementer register for our periodic interrupts.
*
* Paul Mackerras August 1996.
* Copyright (C) 1996 Paul Mackerras.
* Copyright (C) 2003-2005 Benjamin Herrenschmidt.
*
*/
#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/param.h>
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/init.h>
#include <linux/time.h>
#include <linux/adb.h>
#include <linux/cuda.h>
#include <linux/pmu.h>
#include <linux/interrupt.h>
#include <linux/hardirq.h>
#include <linux/rtc.h>
#include <asm/sections.h>
#include <asm/prom.h>
#include <asm/io.h>
#include <asm/pgtable.h>
#include <asm/machdep.h>
#include <asm/time.h>
#include <asm/nvram.h>
#include <asm/smu.h>
#undef DEBUG
#ifdef DEBUG
#define DBG(x...) printk(x)
#else
#define DBG(x...)
#endif
/* Apparently the RTC stores seconds since 1 Jan 1904 */
#define RTC_OFFSET 2082844800
/*
* Calibrate the decrementer frequency with the VIA timer 1.
*/
#define VIA_TIMER_FREQ_6 4700000 /* time 1 frequency * 6 */
/* VIA registers */
#define RS 0x200 /* skip between registers */
#define T1CL (4*RS) /* Timer 1 ctr/latch (low 8 bits) */
#define T1CH (5*RS) /* Timer 1 counter (high 8 bits) */
#define T1LL (6*RS) /* Timer 1 latch (low 8 bits) */
#define T1LH (7*RS) /* Timer 1 latch (high 8 bits) */
#define ACR (11*RS) /* Auxiliary control register */
#define IFR (13*RS) /* Interrupt flag register */
/* Bits in ACR */
#define T1MODE 0xc0 /* Timer 1 mode */
#define T1MODE_CONT 0x40 /* continuous interrupts */
/* Bits in IFR and IER */
#define T1_INT 0x40 /* Timer 1 interrupt */
long __init pmac_time_init(void)
{
s32 delta = 0;
#ifdef CONFIG_NVRAM
int dst;
delta = ((s32)pmac_xpram_read(PMAC_XPRAM_MACHINE_LOC + 0x9)) << 16;
delta |= ((s32)pmac_xpram_read(PMAC_XPRAM_MACHINE_LOC + 0xa)) << 8;
delta |= pmac_xpram_read(PMAC_XPRAM_MACHINE_LOC + 0xb);
if (delta & 0x00800000UL)
delta |= 0xFF000000UL;
dst = ((pmac_xpram_read(PMAC_XPRAM_MACHINE_LOC + 0x8) & 0x80) != 0);
printk("GMT Delta read from XPRAM: %d minutes, DST: %s\n", delta/60,
dst ? "on" : "off");
#endif
return delta;
}
#if defined(CONFIG_ADB_CUDA) || defined(CONFIG_ADB_PMU)
static void to_rtc_time(unsigned long now, struct rtc_time *tm)
{
to_tm(now, tm);
tm->tm_year -= 1900;
tm->tm_mon -= 1;
}
#endif
#if defined(CONFIG_ADB_CUDA) || defined(CONFIG_ADB_PMU) || \
defined(CONFIG_PMAC_SMU)
static unsigned long from_rtc_time(struct rtc_time *tm)
{
return mktime(tm->tm_year+1900, tm->tm_mon+1, tm->tm_mday,
tm->tm_hour, tm->tm_min, tm->tm_sec);
}
#endif
#ifdef CONFIG_ADB_CUDA
static unsigned long cuda_get_time(void)
{
struct adb_request req;
unsigned int now;
if (cuda_request(&req, NULL, 2, CUDA_PACKET, CUDA_GET_TIME) < 0)
return 0;
while (!req.complete)
cuda_poll();
if (req.reply_len != 7)
printk(KERN_ERR "cuda_get_time: got %d byte reply\n",
req.reply_len);
now = (req.reply[3] << 24) + (req.reply[4] << 16)
+ (req.reply[5] << 8) + req.reply[6];
return ((unsigned long)now) - RTC_OFFSET;
}
#define cuda_get_rtc_time(tm) to_rtc_time(cuda_get_time(), (tm))
static int cuda_set_rtc_time(struct rtc_time *tm)
{
unsigned int nowtime;
struct adb_request req;
nowtime = from_rtc_time(tm) + RTC_OFFSET;
if (cuda_request(&req, NULL, 6, CUDA_PACKET, CUDA_SET_TIME,
nowtime >> 24, nowtime >> 16, nowtime >> 8,
nowtime) < 0)
return -ENXIO;
while (!req.complete)
cuda_poll();
if ((req.reply_len != 3) && (req.reply_len != 7))
printk(KERN_ERR "cuda_set_rtc_time: got %d byte reply\n",
req.reply_len);
return 0;
}
#else
#define cuda_get_time() 0
#define cuda_get_rtc_time(tm)
#define cuda_set_rtc_time(tm) 0
#endif
#ifdef CONFIG_ADB_PMU
static unsigned long pmu_get_time(void)
{
struct adb_request req;
unsigned int now;
if (pmu_request(&req, NULL, 1, PMU_READ_RTC) < 0)
return 0;
pmu_wait_complete(&req);
if (req.reply_len != 4)
printk(KERN_ERR "pmu_get_time: got %d byte reply from PMU\n",
req.reply_len);
now = (req.reply[0] << 24) + (req.reply[1] << 16)
+ (req.reply[2] << 8) + req.reply[3];
return ((unsigned long)now) - RTC_OFFSET;
}
#define pmu_get_rtc_time(tm) to_rtc_time(pmu_get_time(), (tm))
static int pmu_set_rtc_time(struct rtc_time *tm)
{
unsigned int nowtime;
struct adb_request req;
nowtime = from_rtc_time(tm) + RTC_OFFSET;
if (pmu_request(&req, NULL, 5, PMU_SET_RTC, nowtime >> 24,
nowtime >> 16, nowtime >> 8, nowtime) < 0)
return -ENXIO;
pmu_wait_complete(&req);
if (req.reply_len != 0)
printk(KERN_ERR "pmu_set_rtc_time: %d byte reply from PMU\n",
req.reply_len);
return 0;
}
#else
#define pmu_get_time() 0
#define pmu_get_rtc_time(tm)
#define pmu_set_rtc_time(tm) 0
#endif
#ifdef CONFIG_PMAC_SMU
static unsigned long smu_get_time(void)
{
struct rtc_time tm;
if (smu_get_rtc_time(&tm, 1))
return 0;
return from_rtc_time(&tm);
}
#else
#define smu_get_time() 0
#define smu_get_rtc_time(tm, spin)
#define smu_set_rtc_time(tm, spin) 0
#endif
/* Can't be __init, it's called when suspending and resuming */
unsigned long pmac_get_boot_time(void)
{
/* Get the time from the RTC, used only at boot time */
switch (sys_ctrler) {
case SYS_CTRLER_CUDA:
return cuda_get_time();
case SYS_CTRLER_PMU:
return pmu_get_time();
case SYS_CTRLER_SMU:
return smu_get_time();
default:
return 0;
}
}
void pmac_get_rtc_time(struct rtc_time *tm)
{
/* Get the time from the RTC, used only at boot time */
switch (sys_ctrler) {
case SYS_CTRLER_CUDA:
cuda_get_rtc_time(tm);
break;
case SYS_CTRLER_PMU:
pmu_get_rtc_time(tm);
break;
case SYS_CTRLER_SMU:
smu_get_rtc_time(tm, 1);
break;
default:
;
}
}
int pmac_set_rtc_time(struct rtc_time *tm)
{
switch (sys_ctrler) {
case SYS_CTRLER_CUDA:
return cuda_set_rtc_time(tm);
case SYS_CTRLER_PMU:
return pmu_set_rtc_time(tm);
case SYS_CTRLER_SMU:
return smu_set_rtc_time(tm, 1);
default:
return -ENODEV;
}
}
#ifdef CONFIG_PPC32
/*
* Calibrate the decrementer register using VIA timer 1.
* This is used both on powermacs and CHRP machines.
*/
int __init via_calibrate_decr(void)
{
struct device_node *vias;
volatile unsigned char __iomem *via;
int count = VIA_TIMER_FREQ_6 / 100;
unsigned int dstart, dend;
struct resource rsrc;
vias = of_find_node_by_name(NULL, "via-cuda");
if (vias == NULL)
vias = of_find_node_by_name(NULL, "via-pmu");
if (vias == NULL)
vias = of_find_node_by_name(NULL, "via");
if (vias == NULL || of_address_to_resource(vias, 0, &rsrc)) {
of_node_put(vias);
return 0;
}
of_node_put(vias);
via = ioremap(rsrc.start, resource_size(&rsrc));
if (via == NULL) {
printk(KERN_ERR "Failed to map VIA for timer calibration !\n");
return 0;
}
/* set timer 1 for continuous interrupts */
out_8(&via[ACR], (via[ACR] & ~T1MODE) | T1MODE_CONT);
/* set the counter to a small value */
out_8(&via[T1CH], 2);
/* set the latch to `count' */
out_8(&via[T1LL], count);
out_8(&via[T1LH], count >> 8);
/* wait until it hits 0 */
while ((in_8(&via[IFR]) & T1_INT) == 0)
;
dstart = get_dec();
/* clear the interrupt & wait until it hits 0 again */
in_8(&via[T1CL]);
while ((in_8(&via[IFR]) & T1_INT) == 0)
;
dend = get_dec();
ppc_tb_freq = (dstart - dend) * 100 / 6;
iounmap(via);
return 1;
}
#endif
/*
* Query the OF and get the decr frequency.
*/
void __init pmac_calibrate_decr(void)
{
generic_calibrate_decr();
#ifdef CONFIG_PPC32
/* We assume MacRISC2 machines have correct device-tree
* calibration. That's better since the VIA itself seems
* to be slightly off. --BenH
*/
if (!of_machine_is_compatible("MacRISC2") &&
!of_machine_is_compatible("MacRISC3") &&
!of_machine_is_compatible("MacRISC4"))
if (via_calibrate_decr())
return;
/* Special case: QuickSilver G4s seem to have a badly calibrated
* timebase-frequency in OF, VIA is much better on these. We should
* probably implement calibration based on the KL timer on these
* machines anyway... -BenH
*/
if (of_machine_is_compatible("PowerMac3,5"))
if (via_calibrate_decr())
return;
#endif
}

View file

@ -0,0 +1,219 @@
#include <linux/string.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/bitops.h>
#include <linux/ptrace.h>
#include <linux/adb.h>
#include <linux/pmu.h>
#include <linux/cuda.h>
#include <asm/machdep.h>
#include <asm/io.h>
#include <asm/page.h>
#include <asm/xmon.h>
#include <asm/prom.h>
#include <asm/bootx.h>
#include <asm/errno.h>
#include <asm/pmac_feature.h>
#include <asm/processor.h>
#include <asm/delay.h>
#include <asm/btext.h>
#include <asm/time.h>
#include <asm/udbg.h>
/*
* This implementation is "special", it can "patch" the current
* udbg implementation and work on top of it. It must thus be
* initialized last
*/
static void (*udbg_adb_old_putc)(char c);
static int (*udbg_adb_old_getc)(void);
static int (*udbg_adb_old_getc_poll)(void);
static enum {
input_adb_none,
input_adb_pmu,
input_adb_cuda,
} input_type = input_adb_none;
int xmon_wants_key, xmon_adb_keycode;
static inline void udbg_adb_poll(void)
{
#ifdef CONFIG_ADB_PMU
if (input_type == input_adb_pmu)
pmu_poll_adb();
#endif /* CONFIG_ADB_PMU */
#ifdef CONFIG_ADB_CUDA
if (input_type == input_adb_cuda)
cuda_poll();
#endif /* CONFIG_ADB_CUDA */
}
#ifdef CONFIG_BOOTX_TEXT
static int udbg_adb_use_btext;
static int xmon_adb_shiftstate;
static unsigned char xmon_keytab[128] =
"asdfhgzxcv\000bqwer" /* 0x00 - 0x0f */
"yt123465=97-80]o" /* 0x10 - 0x1f */
"u[ip\rlj'k;\\,/nm." /* 0x20 - 0x2f */
"\t `\177\0\033\0\0\0\0\0\0\0\0\0\0" /* 0x30 - 0x3f */
"\0.\0*\0+\0\0\0\0\0/\r\0-\0" /* 0x40 - 0x4f */
"\0\0000123456789\0\0\0"; /* 0x50 - 0x5f */
static unsigned char xmon_shift_keytab[128] =
"ASDFHGZXCV\000BQWER" /* 0x00 - 0x0f */
"YT!@#$^%+(&_*)}O" /* 0x10 - 0x1f */
"U{IP\rLJ\"K:|<?NM>" /* 0x20 - 0x2f */
"\t ~\177\0\033\0\0\0\0\0\0\0\0\0\0" /* 0x30 - 0x3f */
"\0.\0*\0+\0\0\0\0\0/\r\0-\0" /* 0x40 - 0x4f */
"\0\0000123456789\0\0\0"; /* 0x50 - 0x5f */
static int udbg_adb_local_getc(void)
{
int k, t, on;
xmon_wants_key = 1;
for (;;) {
xmon_adb_keycode = -1;
t = 0;
on = 0;
k = -1;
do {
if (--t < 0) {
on = 1 - on;
btext_drawchar(on? 0xdb: 0x20);
btext_drawchar('\b');
t = 200000;
}
udbg_adb_poll();
if (udbg_adb_old_getc_poll)
k = udbg_adb_old_getc_poll();
} while (k == -1 && xmon_adb_keycode == -1);
if (on)
btext_drawstring(" \b");
if (k != -1)
return k;
k = xmon_adb_keycode;
/* test for shift keys */
if ((k & 0x7f) == 0x38 || (k & 0x7f) == 0x7b) {
xmon_adb_shiftstate = (k & 0x80) == 0;
continue;
}
if (k >= 0x80)
continue; /* ignore up transitions */
k = (xmon_adb_shiftstate? xmon_shift_keytab: xmon_keytab)[k];
if (k != 0)
break;
}
xmon_wants_key = 0;
return k;
}
#endif /* CONFIG_BOOTX_TEXT */
static int udbg_adb_getc(void)
{
#ifdef CONFIG_BOOTX_TEXT
if (udbg_adb_use_btext && input_type != input_adb_none)
return udbg_adb_local_getc();
#endif
if (udbg_adb_old_getc)
return udbg_adb_old_getc();
return -1;
}
/* getc_poll() is not really used, unless you have the xmon-over modem
* hack that doesn't quite concern us here, thus we just poll the low level
* ADB driver to prevent it from timing out and call back the original poll
* routine.
*/
static int udbg_adb_getc_poll(void)
{
udbg_adb_poll();
if (udbg_adb_old_getc_poll)
return udbg_adb_old_getc_poll();
return -1;
}
static void udbg_adb_putc(char c)
{
#ifdef CONFIG_BOOTX_TEXT
if (udbg_adb_use_btext)
btext_drawchar(c);
#endif
if (udbg_adb_old_putc)
return udbg_adb_old_putc(c);
}
void __init udbg_adb_init_early(void)
{
#ifdef CONFIG_BOOTX_TEXT
if (btext_find_display(1) == 0) {
udbg_adb_use_btext = 1;
udbg_putc = udbg_adb_putc;
}
#endif
}
int __init udbg_adb_init(int force_btext)
{
struct device_node *np;
/* Capture existing callbacks */
udbg_adb_old_putc = udbg_putc;
udbg_adb_old_getc = udbg_getc;
udbg_adb_old_getc_poll = udbg_getc_poll;
/* Check if our early init was already called */
if (udbg_adb_old_putc == udbg_adb_putc)
udbg_adb_old_putc = NULL;
#ifdef CONFIG_BOOTX_TEXT
if (udbg_adb_old_putc == btext_drawchar)
udbg_adb_old_putc = NULL;
#endif
/* Set ours as output */
udbg_putc = udbg_adb_putc;
udbg_getc = udbg_adb_getc;
udbg_getc_poll = udbg_adb_getc_poll;
#ifdef CONFIG_BOOTX_TEXT
/* Check if we should use btext output */
if (btext_find_display(force_btext) == 0)
udbg_adb_use_btext = 1;
#endif
/* See if there is a keyboard in the device tree with a parent
* of type "adb". If not, we return a failure, but we keep the
* bext output set for now
*/
for_each_node_by_name(np, "keyboard") {
struct device_node *parent = of_get_parent(np);
int found = (parent && strcmp(parent->type, "adb") == 0);
of_node_put(parent);
if (found)
break;
}
if (np == NULL)
return -ENODEV;
of_node_put(np);
#ifdef CONFIG_ADB_PMU
if (find_via_pmu())
input_type = input_adb_pmu;
#endif
#ifdef CONFIG_ADB_CUDA
if (find_via_cuda())
input_type = input_adb_cuda;
#endif
/* Same as above: nothing found, keep btext set for output */
if (input_type == input_adb_none)
return -ENODEV;
return 0;
}

View file

@ -0,0 +1,184 @@
/*
* udbg for zilog scc ports as found on Apple PowerMacs
*
* Copyright (C) 2001-2005 PPC 64 Team, IBM Corp
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/types.h>
#include <asm/udbg.h>
#include <asm/processor.h>
#include <asm/io.h>
#include <asm/prom.h>
#include <asm/pmac_feature.h>
extern u8 real_readb(volatile u8 __iomem *addr);
extern void real_writeb(u8 data, volatile u8 __iomem *addr);
#define SCC_TXRDY 4
#define SCC_RXRDY 1
static volatile u8 __iomem *sccc;
static volatile u8 __iomem *sccd;
static void udbg_scc_putc(char c)
{
if (sccc) {
while ((in_8(sccc) & SCC_TXRDY) == 0)
;
out_8(sccd, c);
if (c == '\n')
udbg_scc_putc('\r');
}
}
static int udbg_scc_getc_poll(void)
{
if (sccc) {
if ((in_8(sccc) & SCC_RXRDY) != 0)
return in_8(sccd);
else
return -1;
}
return -1;
}
static int udbg_scc_getc(void)
{
if (sccc) {
while ((in_8(sccc) & SCC_RXRDY) == 0)
;
return in_8(sccd);
}
return -1;
}
static unsigned char scc_inittab[] = {
13, 0, /* set baud rate divisor */
12, 0,
14, 1, /* baud rate gen enable, src=rtxc */
11, 0x50, /* clocks = br gen */
5, 0xea, /* tx 8 bits, assert DTR & RTS */
4, 0x46, /* x16 clock, 1 stop */
3, 0xc1, /* rx enable, 8 bits */
};
void udbg_scc_init(int force_scc)
{
const u32 *reg;
unsigned long addr;
struct device_node *stdout = NULL, *escc = NULL, *macio = NULL;
struct device_node *ch, *ch_def = NULL, *ch_a = NULL;
const char *path;
int i, x;
escc = of_find_node_by_name(NULL, "escc");
if (escc == NULL)
goto bail;
macio = of_get_parent(escc);
if (macio == NULL)
goto bail;
path = of_get_property(of_chosen, "linux,stdout-path", NULL);
if (path != NULL)
stdout = of_find_node_by_path(path);
for (ch = NULL; (ch = of_get_next_child(escc, ch)) != NULL;) {
if (ch == stdout)
ch_def = of_node_get(ch);
if (strcmp(ch->name, "ch-a") == 0)
ch_a = of_node_get(ch);
}
if (ch_def == NULL && !force_scc)
goto bail;
ch = ch_def ? ch_def : ch_a;
/* Get address within mac-io ASIC */
reg = of_get_property(escc, "reg", NULL);
if (reg == NULL)
goto bail;
addr = reg[0];
/* Get address of mac-io PCI itself */
reg = of_get_property(macio, "assigned-addresses", NULL);
if (reg == NULL)
goto bail;
addr += reg[2];
/* Lock the serial port */
pmac_call_feature(PMAC_FTR_SCC_ENABLE, ch,
PMAC_SCC_ASYNC | PMAC_SCC_FLAG_XMON, 1);
if (ch == ch_a)
addr += 0x20;
sccc = ioremap(addr & PAGE_MASK, PAGE_SIZE) ;
sccc += addr & ~PAGE_MASK;
sccd = sccc + 0x10;
mb();
for (i = 20000; i != 0; --i)
x = in_8(sccc);
out_8(sccc, 0x09); /* reset A or B side */
out_8(sccc, 0xc0);
/* If SCC was the OF output port, read the BRG value, else
* Setup for 38400 or 57600 8N1 depending on the machine
*/
if (ch_def != NULL) {
out_8(sccc, 13);
scc_inittab[1] = in_8(sccc);
out_8(sccc, 12);
scc_inittab[3] = in_8(sccc);
} else if (of_machine_is_compatible("RackMac1,1")
|| of_machine_is_compatible("RackMac1,2")
|| of_machine_is_compatible("MacRISC4")) {
/* Xserves and G5s default to 57600 */
scc_inittab[1] = 0;
scc_inittab[3] = 0;
} else {
/* Others default to 38400 */
scc_inittab[1] = 0;
scc_inittab[3] = 1;
}
for (i = 0; i < sizeof(scc_inittab); ++i)
out_8(sccc, scc_inittab[i]);
udbg_putc = udbg_scc_putc;
udbg_getc = udbg_scc_getc;
udbg_getc_poll = udbg_scc_getc_poll;
udbg_puts("Hello World !\n");
bail:
of_node_put(macio);
of_node_put(escc);
of_node_put(stdout);
of_node_put(ch_def);
of_node_put(ch_a);
}
#ifdef CONFIG_PPC64
static void udbg_real_scc_putc(char c)
{
while ((real_readb(sccc) & SCC_TXRDY) == 0)
;
real_writeb(c, sccd);
if (c == '\n')
udbg_real_scc_putc('\r');
}
void __init udbg_init_pmac_realmode(void)
{
sccc = (volatile u8 __iomem *)0x80013020ul;
sccd = (volatile u8 __iomem *)0x80013030ul;
udbg_putc = udbg_real_scc_putc;
udbg_getc = NULL;
udbg_getc_poll = NULL;
}
#endif /* CONFIG_PPC64 */