Fixed MTP to work with TWRP

This commit is contained in:
awab228 2018-06-19 23:16:04 +02:00
commit f6dfaef42e
50820 changed files with 20846062 additions and 0 deletions

26
arch/arm/common/Kconfig Normal file
View file

@ -0,0 +1,26 @@
config ICST
bool
config SA1111
bool
select DMABOUNCE if !ARCH_PXA
config DMABOUNCE
bool
select ZONE_DMA
config SHARP_LOCOMO
bool
config SHARP_PARAM
bool
config SHARP_SCOOP
bool
config TI_PRIV_EDMA
bool
config FIQ_GLUE
bool
select FIQ

22
arch/arm/common/Makefile Normal file
View file

@ -0,0 +1,22 @@
#
# Makefile for the linux kernel.
#
obj-y += firmware.o
obj-$(CONFIG_FIQ_GLUE) += fiq_glue.o fiq_glue_setup.o
obj-$(CONFIG_ICST) += icst.o
obj-$(CONFIG_SA1111) += sa1111.o
obj-$(CONFIG_DMABOUNCE) += dmabounce.o
obj-$(CONFIG_SHARP_LOCOMO) += locomo.o
obj-$(CONFIG_SHARP_PARAM) += sharpsl_param.o
obj-$(CONFIG_SHARP_SCOOP) += scoop.o
obj-$(CONFIG_PCI_HOST_ITE8152) += it8152.o
obj-$(CONFIG_ARM_TIMER_SP804) += timer-sp.o
obj-$(CONFIG_MCPM) += mcpm_head.o mcpm_entry.o mcpm_platsmp.o vlock.o
CFLAGS_REMOVE_mcpm_entry.o = -pg
AFLAGS_mcpm_head.o := -march=armv7-a
AFLAGS_vlock.o := -march=armv7-a
obj-$(CONFIG_TI_PRIV_EDMA) += edma.o
obj-$(CONFIG_BL_SWITCHER) += bL_switcher.o
obj-$(CONFIG_BL_SWITCHER_DUMMY_IF) += bL_switcher_dummy_if.o

View file

@ -0,0 +1,814 @@
/*
* arch/arm/common/bL_switcher.c -- big.LITTLE cluster switcher core driver
*
* Created by: Nicolas Pitre, March 2012
* Copyright: (C) 2012-2013 Linaro Limited
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/atomic.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/interrupt.h>
#include <linux/cpu_pm.h>
#include <linux/cpu.h>
#include <linux/cpumask.h>
#include <linux/kthread.h>
#include <linux/wait.h>
#include <linux/time.h>
#include <linux/clockchips.h>
#include <linux/hrtimer.h>
#include <linux/tick.h>
#include <linux/notifier.h>
#include <linux/mm.h>
#include <linux/mutex.h>
#include <linux/smp.h>
#include <linux/spinlock.h>
#include <linux/string.h>
#include <linux/sysfs.h>
#include <linux/irqchip/arm-gic.h>
#include <linux/moduleparam.h>
#include <asm/smp_plat.h>
#include <asm/cputype.h>
#include <asm/suspend.h>
#include <asm/mcpm.h>
#include <asm/bL_switcher.h>
#define CREATE_TRACE_POINTS
#include <trace/events/power_cpu_migrate.h>
/*
* Use our own MPIDR accessors as the generic ones in asm/cputype.h have
* __attribute_const__ and we don't want the compiler to assume any
* constness here as the value _does_ change along some code paths.
*/
static int read_mpidr(void)
{
unsigned int id;
asm volatile ("mrc p15, 0, %0, c0, c0, 5" : "=r" (id));
return id & MPIDR_HWID_BITMASK;
}
/*
* bL switcher core code.
*/
static void bL_do_switch(void *_arg)
{
unsigned ib_mpidr, ib_cpu, ib_cluster;
long volatile handshake, **handshake_ptr = _arg;
pr_debug("%s\n", __func__);
ib_mpidr = cpu_logical_map(smp_processor_id());
ib_cpu = MPIDR_AFFINITY_LEVEL(ib_mpidr, 0);
ib_cluster = MPIDR_AFFINITY_LEVEL(ib_mpidr, 1);
/* Advertise our handshake location */
if (handshake_ptr) {
handshake = 0;
*handshake_ptr = &handshake;
} else
handshake = -1;
/*
* Our state has been saved at this point. Let's release our
* inbound CPU.
*/
mcpm_set_entry_vector(ib_cpu, ib_cluster, cpu_resume);
sev();
/*
* From this point, we must assume that our counterpart CPU might
* have taken over in its parallel world already, as if execution
* just returned from cpu_suspend(). It is therefore important to
* be very careful not to make any change the other guy is not
* expecting. This is why we need stack isolation.
*
* Fancy under cover tasks could be performed here. For now
* we have none.
*/
/*
* Let's wait until our inbound is alive.
*/
while (!handshake) {
wfe();
smp_mb();
}
/* Let's put ourself down. */
mcpm_cpu_power_down();
/* should never get here */
BUG();
}
/*
* Stack isolation. To ensure 'current' remains valid, we just use another
* piece of our thread's stack space which should be fairly lightly used.
* The selected area starts just above the thread_info structure located
* at the very bottom of the stack, aligned to a cache line, and indexed
* with the cluster number.
*/
#define STACK_SIZE 512
extern void call_with_stack(void (*fn)(void *), void *arg, void *sp);
static int bL_switchpoint(unsigned long _arg)
{
unsigned int mpidr = read_mpidr();
unsigned int clusterid = MPIDR_AFFINITY_LEVEL(mpidr, 1);
void *stack = current_thread_info() + 1;
stack = PTR_ALIGN(stack, L1_CACHE_BYTES);
stack += clusterid * STACK_SIZE + STACK_SIZE;
call_with_stack(bL_do_switch, (void *)_arg, stack);
BUG();
}
/*
* Generic switcher interface
*/
static unsigned int bL_gic_id[MAX_CPUS_PER_CLUSTER][MAX_NR_CLUSTERS];
static int bL_switcher_cpu_pairing[NR_CPUS];
/*
* bL_switch_to - Switch to a specific cluster for the current CPU
* @new_cluster_id: the ID of the cluster to switch to.
*
* This function must be called on the CPU to be switched.
* Returns 0 on success, else a negative status code.
*/
static int bL_switch_to(unsigned int new_cluster_id)
{
unsigned int mpidr, this_cpu, that_cpu;
unsigned int ob_mpidr, ob_cpu, ob_cluster, ib_mpidr, ib_cpu, ib_cluster;
struct completion inbound_alive;
struct tick_device *tdev;
enum clock_event_mode tdev_mode;
long volatile *handshake_ptr;
int ipi_nr, ret;
this_cpu = smp_processor_id();
ob_mpidr = read_mpidr();
ob_cpu = MPIDR_AFFINITY_LEVEL(ob_mpidr, 0);
ob_cluster = MPIDR_AFFINITY_LEVEL(ob_mpidr, 1);
BUG_ON(cpu_logical_map(this_cpu) != ob_mpidr);
if (new_cluster_id == ob_cluster)
return 0;
that_cpu = bL_switcher_cpu_pairing[this_cpu];
ib_mpidr = cpu_logical_map(that_cpu);
ib_cpu = MPIDR_AFFINITY_LEVEL(ib_mpidr, 0);
ib_cluster = MPIDR_AFFINITY_LEVEL(ib_mpidr, 1);
pr_debug("before switch: CPU %d MPIDR %#x -> %#x\n",
this_cpu, ob_mpidr, ib_mpidr);
this_cpu = smp_processor_id();
/* Close the gate for our entry vectors */
mcpm_set_entry_vector(ob_cpu, ob_cluster, NULL);
mcpm_set_entry_vector(ib_cpu, ib_cluster, NULL);
/* Install our "inbound alive" notifier. */
init_completion(&inbound_alive);
ipi_nr = register_ipi_completion(&inbound_alive, this_cpu);
ipi_nr |= ((1 << 16) << bL_gic_id[ob_cpu][ob_cluster]);
mcpm_set_early_poke(ib_cpu, ib_cluster, gic_get_sgir_physaddr(), ipi_nr);
/*
* Let's wake up the inbound CPU now in case it requires some delay
* to come online, but leave it gated in our entry vector code.
*/
ret = mcpm_cpu_power_up(ib_cpu, ib_cluster);
if (ret) {
pr_err("%s: mcpm_cpu_power_up() returned %d\n", __func__, ret);
return ret;
}
/*
* Raise a SGI on the inbound CPU to make sure it doesn't stall
* in a possible WFI, such as in bL_power_down().
*/
gic_send_sgi(bL_gic_id[ib_cpu][ib_cluster], 0);
/*
* Wait for the inbound to come up. This allows for other
* tasks to be scheduled in the mean time.
*/
wait_for_completion(&inbound_alive);
mcpm_set_early_poke(ib_cpu, ib_cluster, 0, 0);
/*
* From this point we are entering the switch critical zone
* and can't take any interrupts anymore.
*/
local_irq_disable();
local_fiq_disable();
trace_cpu_migrate_begin(ktime_get_real_ns(), ob_mpidr);
/* redirect GIC's SGIs to our counterpart */
gic_migrate_target(bL_gic_id[ib_cpu][ib_cluster]);
tdev = tick_get_device(this_cpu);
if (tdev && !cpumask_equal(tdev->evtdev->cpumask, cpumask_of(this_cpu)))
tdev = NULL;
if (tdev) {
tdev_mode = tdev->evtdev->mode;
clockevents_set_mode(tdev->evtdev, CLOCK_EVT_MODE_SHUTDOWN);
}
ret = cpu_pm_enter();
/* we can not tolerate errors at this point */
if (ret)
panic("%s: cpu_pm_enter() returned %d\n", __func__, ret);
/* Swap the physical CPUs in the logical map for this logical CPU. */
cpu_logical_map(this_cpu) = ib_mpidr;
cpu_logical_map(that_cpu) = ob_mpidr;
/* Let's do the actual CPU switch. */
ret = cpu_suspend((unsigned long)&handshake_ptr, bL_switchpoint);
if (ret > 0)
panic("%s: cpu_suspend() returned %d\n", __func__, ret);
/* We are executing on the inbound CPU at this point */
mpidr = read_mpidr();
pr_debug("after switch: CPU %d MPIDR %#x\n", this_cpu, mpidr);
BUG_ON(mpidr != ib_mpidr);
mcpm_cpu_powered_up();
ret = cpu_pm_exit();
if (tdev) {
clockevents_set_mode(tdev->evtdev, tdev_mode);
clockevents_program_event(tdev->evtdev,
tdev->evtdev->next_event, 1);
}
trace_cpu_migrate_finish(ktime_get_real_ns(), ib_mpidr);
local_fiq_enable();
local_irq_enable();
*handshake_ptr = 1;
dsb_sev();
if (ret)
pr_err("%s exiting with error %d\n", __func__, ret);
return ret;
}
struct bL_thread {
spinlock_t lock;
struct task_struct *task;
wait_queue_head_t wq;
int wanted_cluster;
struct completion started;
bL_switch_completion_handler completer;
void *completer_cookie;
};
static struct bL_thread bL_threads[NR_CPUS];
static int bL_switcher_thread(void *arg)
{
struct bL_thread *t = arg;
struct sched_param param = { .sched_priority = 1 };
int cluster;
bL_switch_completion_handler completer;
void *completer_cookie;
sched_setscheduler_nocheck(current, SCHED_FIFO, &param);
complete(&t->started);
do {
if (signal_pending(current))
flush_signals(current);
wait_event_interruptible(t->wq,
t->wanted_cluster != -1 ||
kthread_should_stop());
spin_lock(&t->lock);
cluster = t->wanted_cluster;
completer = t->completer;
completer_cookie = t->completer_cookie;
t->wanted_cluster = -1;
t->completer = NULL;
spin_unlock(&t->lock);
if (cluster != -1) {
bL_switch_to(cluster);
if (completer)
completer(completer_cookie);
}
} while (!kthread_should_stop());
return 0;
}
static struct task_struct *bL_switcher_thread_create(int cpu, void *arg)
{
struct task_struct *task;
task = kthread_create_on_node(bL_switcher_thread, arg,
cpu_to_node(cpu), "kswitcher_%d", cpu);
if (!IS_ERR(task)) {
kthread_bind(task, cpu);
wake_up_process(task);
} else
pr_err("%s failed for CPU %d\n", __func__, cpu);
return task;
}
/*
* bL_switch_request_cb - Switch to a specific cluster for the given CPU,
* with completion notification via a callback
*
* @cpu: the CPU to switch
* @new_cluster_id: the ID of the cluster to switch to.
* @completer: switch completion callback. if non-NULL,
* @completer(@completer_cookie) will be called on completion of
* the switch, in non-atomic context.
* @completer_cookie: opaque context argument for @completer.
*
* This function causes a cluster switch on the given CPU by waking up
* the appropriate switcher thread. This function may or may not return
* before the switch has occurred.
*
* If a @completer callback function is supplied, it will be called when
* the switch is complete. This can be used to determine asynchronously
* when the switch is complete, regardless of when bL_switch_request()
* returns. When @completer is supplied, no new switch request is permitted
* for the affected CPU until after the switch is complete, and @completer
* has returned.
*/
int bL_switch_request_cb(unsigned int cpu, unsigned int new_cluster_id,
bL_switch_completion_handler completer,
void *completer_cookie)
{
struct bL_thread *t;
if (cpu >= ARRAY_SIZE(bL_threads)) {
pr_err("%s: cpu %d out of bounds\n", __func__, cpu);
return -EINVAL;
}
t = &bL_threads[cpu];
if (IS_ERR(t->task))
return PTR_ERR(t->task);
if (!t->task)
return -ESRCH;
spin_lock(&t->lock);
if (t->completer) {
spin_unlock(&t->lock);
return -EBUSY;
}
t->completer = completer;
t->completer_cookie = completer_cookie;
t->wanted_cluster = new_cluster_id;
spin_unlock(&t->lock);
wake_up(&t->wq);
return 0;
}
EXPORT_SYMBOL_GPL(bL_switch_request_cb);
/*
* Activation and configuration code.
*/
static DEFINE_MUTEX(bL_switcher_activation_lock);
static BLOCKING_NOTIFIER_HEAD(bL_activation_notifier);
static unsigned int bL_switcher_active;
static unsigned int bL_switcher_cpu_original_cluster[NR_CPUS];
static cpumask_t bL_switcher_removed_logical_cpus;
int bL_switcher_register_notifier(struct notifier_block *nb)
{
return blocking_notifier_chain_register(&bL_activation_notifier, nb);
}
EXPORT_SYMBOL_GPL(bL_switcher_register_notifier);
int bL_switcher_unregister_notifier(struct notifier_block *nb)
{
return blocking_notifier_chain_unregister(&bL_activation_notifier, nb);
}
EXPORT_SYMBOL_GPL(bL_switcher_unregister_notifier);
static int bL_activation_notify(unsigned long val)
{
int ret;
ret = blocking_notifier_call_chain(&bL_activation_notifier, val, NULL);
if (ret & NOTIFY_STOP_MASK)
pr_err("%s: notifier chain failed with status 0x%x\n",
__func__, ret);
return notifier_to_errno(ret);
}
static void bL_switcher_restore_cpus(void)
{
int i;
for_each_cpu(i, &bL_switcher_removed_logical_cpus) {
struct device *cpu_dev = get_cpu_device(i);
int ret = device_online(cpu_dev);
if (ret)
dev_err(cpu_dev, "switcher: unable to restore CPU\n");
}
}
static int bL_switcher_halve_cpus(void)
{
int i, j, cluster_0, gic_id, ret;
unsigned int cpu, cluster, mask;
cpumask_t available_cpus;
/* First pass to validate what we have */
mask = 0;
for_each_online_cpu(i) {
cpu = MPIDR_AFFINITY_LEVEL(cpu_logical_map(i), 0);
cluster = MPIDR_AFFINITY_LEVEL(cpu_logical_map(i), 1);
if (cluster >= 2) {
pr_err("%s: only dual cluster systems are supported\n", __func__);
return -EINVAL;
}
if (WARN_ON(cpu >= MAX_CPUS_PER_CLUSTER))
return -EINVAL;
mask |= (1 << cluster);
}
if (mask != 3) {
pr_err("%s: no CPU pairing possible\n", __func__);
return -EINVAL;
}
/*
* Now let's do the pairing. We match each CPU with another CPU
* from a different cluster. To get a uniform scheduling behavior
* without fiddling with CPU topology and compute capacity data,
* we'll use logical CPUs initially belonging to the same cluster.
*/
memset(bL_switcher_cpu_pairing, -1, sizeof(bL_switcher_cpu_pairing));
cpumask_copy(&available_cpus, cpu_online_mask);
cluster_0 = -1;
for_each_cpu(i, &available_cpus) {
int match = -1;
cluster = MPIDR_AFFINITY_LEVEL(cpu_logical_map(i), 1);
if (cluster_0 == -1)
cluster_0 = cluster;
if (cluster != cluster_0)
continue;
cpumask_clear_cpu(i, &available_cpus);
for_each_cpu(j, &available_cpus) {
cluster = MPIDR_AFFINITY_LEVEL(cpu_logical_map(j), 1);
/*
* Let's remember the last match to create "odd"
* pairings on purpose in order for other code not
* to assume any relation between physical and
* logical CPU numbers.
*/
if (cluster != cluster_0)
match = j;
}
if (match != -1) {
bL_switcher_cpu_pairing[i] = match;
cpumask_clear_cpu(match, &available_cpus);
pr_info("CPU%d paired with CPU%d\n", i, match);
}
}
/*
* Now we disable the unwanted CPUs i.e. everything that has no
* pairing information (that includes the pairing counterparts).
*/
cpumask_clear(&bL_switcher_removed_logical_cpus);
for_each_online_cpu(i) {
cpu = MPIDR_AFFINITY_LEVEL(cpu_logical_map(i), 0);
cluster = MPIDR_AFFINITY_LEVEL(cpu_logical_map(i), 1);
/* Let's take note of the GIC ID for this CPU */
gic_id = gic_get_cpu_id(i);
if (gic_id < 0) {
pr_err("%s: bad GIC ID for CPU %d\n", __func__, i);
bL_switcher_restore_cpus();
return -EINVAL;
}
bL_gic_id[cpu][cluster] = gic_id;
pr_info("GIC ID for CPU %u cluster %u is %u\n",
cpu, cluster, gic_id);
if (bL_switcher_cpu_pairing[i] != -1) {
bL_switcher_cpu_original_cluster[i] = cluster;
continue;
}
ret = device_offline(get_cpu_device(i));
if (ret) {
bL_switcher_restore_cpus();
return ret;
}
cpumask_set_cpu(i, &bL_switcher_removed_logical_cpus);
}
return 0;
}
/* Determine the logical CPU a given physical CPU is grouped on. */
int bL_switcher_get_logical_index(u32 mpidr)
{
int cpu;
if (!bL_switcher_active)
return -EUNATCH;
mpidr &= MPIDR_HWID_BITMASK;
for_each_online_cpu(cpu) {
int pairing = bL_switcher_cpu_pairing[cpu];
if (pairing == -1)
continue;
if ((mpidr == cpu_logical_map(cpu)) ||
(mpidr == cpu_logical_map(pairing)))
return cpu;
}
return -EINVAL;
}
static void bL_switcher_trace_trigger_cpu(void *__always_unused info)
{
trace_cpu_migrate_current(ktime_get_real_ns(), read_mpidr());
}
int bL_switcher_trace_trigger(void)
{
int ret;
preempt_disable();
bL_switcher_trace_trigger_cpu(NULL);
ret = smp_call_function(bL_switcher_trace_trigger_cpu, NULL, true);
preempt_enable();
return ret;
}
EXPORT_SYMBOL_GPL(bL_switcher_trace_trigger);
static int bL_switcher_enable(void)
{
int cpu, ret;
mutex_lock(&bL_switcher_activation_lock);
lock_device_hotplug();
if (bL_switcher_active) {
unlock_device_hotplug();
mutex_unlock(&bL_switcher_activation_lock);
return 0;
}
pr_info("big.LITTLE switcher initializing\n");
ret = bL_activation_notify(BL_NOTIFY_PRE_ENABLE);
if (ret)
goto error;
ret = bL_switcher_halve_cpus();
if (ret)
goto error;
bL_switcher_trace_trigger();
for_each_online_cpu(cpu) {
struct bL_thread *t = &bL_threads[cpu];
spin_lock_init(&t->lock);
init_waitqueue_head(&t->wq);
init_completion(&t->started);
t->wanted_cluster = -1;
t->task = bL_switcher_thread_create(cpu, t);
}
bL_switcher_active = 1;
bL_activation_notify(BL_NOTIFY_POST_ENABLE);
pr_info("big.LITTLE switcher initialized\n");
goto out;
error:
pr_warn("big.LITTLE switcher initialization failed\n");
bL_activation_notify(BL_NOTIFY_POST_DISABLE);
out:
unlock_device_hotplug();
mutex_unlock(&bL_switcher_activation_lock);
return ret;
}
#ifdef CONFIG_SYSFS
static void bL_switcher_disable(void)
{
unsigned int cpu, cluster;
struct bL_thread *t;
struct task_struct *task;
mutex_lock(&bL_switcher_activation_lock);
lock_device_hotplug();
if (!bL_switcher_active)
goto out;
if (bL_activation_notify(BL_NOTIFY_PRE_DISABLE) != 0) {
bL_activation_notify(BL_NOTIFY_POST_ENABLE);
goto out;
}
bL_switcher_active = 0;
/*
* To deactivate the switcher, we must shut down the switcher
* threads to prevent any other requests from being accepted.
* Then, if the final cluster for given logical CPU is not the
* same as the original one, we'll recreate a switcher thread
* just for the purpose of switching the CPU back without any
* possibility for interference from external requests.
*/
for_each_online_cpu(cpu) {
t = &bL_threads[cpu];
task = t->task;
t->task = NULL;
if (!task || IS_ERR(task))
continue;
kthread_stop(task);
/* no more switch may happen on this CPU at this point */
cluster = MPIDR_AFFINITY_LEVEL(cpu_logical_map(cpu), 1);
if (cluster == bL_switcher_cpu_original_cluster[cpu])
continue;
init_completion(&t->started);
t->wanted_cluster = bL_switcher_cpu_original_cluster[cpu];
task = bL_switcher_thread_create(cpu, t);
if (!IS_ERR(task)) {
wait_for_completion(&t->started);
kthread_stop(task);
cluster = MPIDR_AFFINITY_LEVEL(cpu_logical_map(cpu), 1);
if (cluster == bL_switcher_cpu_original_cluster[cpu])
continue;
}
/* If execution gets here, we're in trouble. */
pr_crit("%s: unable to restore original cluster for CPU %d\n",
__func__, cpu);
pr_crit("%s: CPU %d can't be restored\n",
__func__, bL_switcher_cpu_pairing[cpu]);
cpumask_clear_cpu(bL_switcher_cpu_pairing[cpu],
&bL_switcher_removed_logical_cpus);
}
bL_switcher_restore_cpus();
bL_switcher_trace_trigger();
bL_activation_notify(BL_NOTIFY_POST_DISABLE);
out:
unlock_device_hotplug();
mutex_unlock(&bL_switcher_activation_lock);
}
static ssize_t bL_switcher_active_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
return sprintf(buf, "%u\n", bL_switcher_active);
}
static ssize_t bL_switcher_active_store(struct kobject *kobj,
struct kobj_attribute *attr, const char *buf, size_t count)
{
int ret;
switch (buf[0]) {
case '0':
bL_switcher_disable();
ret = 0;
break;
case '1':
ret = bL_switcher_enable();
break;
default:
ret = -EINVAL;
}
return (ret >= 0) ? count : ret;
}
static ssize_t bL_switcher_trace_trigger_store(struct kobject *kobj,
struct kobj_attribute *attr, const char *buf, size_t count)
{
int ret = bL_switcher_trace_trigger();
return ret ? ret : count;
}
static struct kobj_attribute bL_switcher_active_attr =
__ATTR(active, 0644, bL_switcher_active_show, bL_switcher_active_store);
static struct kobj_attribute bL_switcher_trace_trigger_attr =
__ATTR(trace_trigger, 0200, NULL, bL_switcher_trace_trigger_store);
static struct attribute *bL_switcher_attrs[] = {
&bL_switcher_active_attr.attr,
&bL_switcher_trace_trigger_attr.attr,
NULL,
};
static struct attribute_group bL_switcher_attr_group = {
.attrs = bL_switcher_attrs,
};
static struct kobject *bL_switcher_kobj;
static int __init bL_switcher_sysfs_init(void)
{
int ret;
bL_switcher_kobj = kobject_create_and_add("bL_switcher", kernel_kobj);
if (!bL_switcher_kobj)
return -ENOMEM;
ret = sysfs_create_group(bL_switcher_kobj, &bL_switcher_attr_group);
if (ret)
kobject_put(bL_switcher_kobj);
return ret;
}
#endif /* CONFIG_SYSFS */
bool bL_switcher_get_enabled(void)
{
mutex_lock(&bL_switcher_activation_lock);
return bL_switcher_active;
}
EXPORT_SYMBOL_GPL(bL_switcher_get_enabled);
void bL_switcher_put_enabled(void)
{
mutex_unlock(&bL_switcher_activation_lock);
}
EXPORT_SYMBOL_GPL(bL_switcher_put_enabled);
/*
* Veto any CPU hotplug operation on those CPUs we've removed
* while the switcher is active.
* We're just not ready to deal with that given the trickery involved.
*/
static int bL_switcher_hotplug_callback(struct notifier_block *nfb,
unsigned long action, void *hcpu)
{
if (bL_switcher_active) {
int pairing = bL_switcher_cpu_pairing[(unsigned long)hcpu];
switch (action & 0xf) {
case CPU_UP_PREPARE:
case CPU_DOWN_PREPARE:
if (pairing == -1)
return NOTIFY_BAD;
}
}
return NOTIFY_DONE;
}
static bool no_bL_switcher;
core_param(no_bL_switcher, no_bL_switcher, bool, 0644);
static int __init bL_switcher_init(void)
{
int ret;
if (!mcpm_is_available())
return -ENODEV;
cpu_notifier(bL_switcher_hotplug_callback, 0);
if (!no_bL_switcher) {
ret = bL_switcher_enable();
if (ret)
return ret;
}
#ifdef CONFIG_SYSFS
ret = bL_switcher_sysfs_init();
if (ret)
pr_err("%s: unable to create sysfs entry\n", __func__);
#endif
return 0;
}
late_initcall(bL_switcher_init);

View file

@ -0,0 +1,71 @@
/*
* arch/arm/common/bL_switcher_dummy_if.c -- b.L switcher dummy interface
*
* Created by: Nicolas Pitre, November 2012
* Copyright: (C) 2012-2013 Linaro Limited
*
* Dummy interface to user space for debugging purpose only.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/miscdevice.h>
#include <asm/uaccess.h>
#include <asm/bL_switcher.h>
static ssize_t bL_switcher_write(struct file *file, const char __user *buf,
size_t len, loff_t *pos)
{
unsigned char val[3];
unsigned int cpu, cluster;
int ret;
pr_debug("%s\n", __func__);
if (len < 3)
return -EINVAL;
if (copy_from_user(val, buf, 3))
return -EFAULT;
/* format: <cpu#>,<cluster#> */
if (val[0] < '0' || val[0] > '9' ||
val[1] != ',' ||
val[2] < '0' || val[2] > '1')
return -EINVAL;
cpu = val[0] - '0';
cluster = val[2] - '0';
ret = bL_switch_request(cpu, cluster);
return ret ? : len;
}
static const struct file_operations bL_switcher_fops = {
.write = bL_switcher_write,
.owner = THIS_MODULE,
};
static struct miscdevice bL_switcher_device = {
MISC_DYNAMIC_MINOR,
"b.L_switcher",
&bL_switcher_fops
};
static int __init bL_switcher_dummy_if_init(void)
{
return misc_register(&bL_switcher_device);
}
static void __exit bL_switcher_dummy_if_exit(void)
{
misc_deregister(&bL_switcher_device);
}
module_init(bL_switcher_dummy_if_init);
module_exit(bL_switcher_dummy_if_exit);

579
arch/arm/common/dmabounce.c Normal file
View file

@ -0,0 +1,579 @@
/*
* arch/arm/common/dmabounce.c
*
* Special dma_{map/unmap/dma_sync}_* routines for systems that have
* limited DMA windows. These functions utilize bounce buffers to
* copy data to/from buffers located outside the DMA region. This
* only works for systems in which DMA memory is at the bottom of
* RAM, the remainder of memory is at the top and the DMA memory
* can be marked as ZONE_DMA. Anything beyond that such as discontiguous
* DMA windows will require custom implementations that reserve memory
* areas at early bootup.
*
* Original version by Brad Parker (brad@heeltoe.com)
* Re-written by Christopher Hoover <ch@murgatroid.com>
* Made generic by Deepak Saxena <dsaxena@plexity.net>
*
* Copyright (C) 2002 Hewlett Packard Company.
* Copyright (C) 2004 MontaVista Software, Inc.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/page-flags.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/dmapool.h>
#include <linux/list.h>
#include <linux/scatterlist.h>
#include <asm/cacheflush.h>
#undef STATS
#ifdef STATS
#define DO_STATS(X) do { X ; } while (0)
#else
#define DO_STATS(X) do { } while (0)
#endif
/* ************************************************** */
struct safe_buffer {
struct list_head node;
/* original request */
void *ptr;
size_t size;
int direction;
/* safe buffer info */
struct dmabounce_pool *pool;
void *safe;
dma_addr_t safe_dma_addr;
};
struct dmabounce_pool {
unsigned long size;
struct dma_pool *pool;
#ifdef STATS
unsigned long allocs;
#endif
};
struct dmabounce_device_info {
struct device *dev;
struct list_head safe_buffers;
#ifdef STATS
unsigned long total_allocs;
unsigned long map_op_count;
unsigned long bounce_count;
int attr_res;
#endif
struct dmabounce_pool small;
struct dmabounce_pool large;
rwlock_t lock;
int (*needs_bounce)(struct device *, dma_addr_t, size_t);
};
#ifdef STATS
static ssize_t dmabounce_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct dmabounce_device_info *device_info = dev->archdata.dmabounce;
return sprintf(buf, "%lu %lu %lu %lu %lu %lu\n",
device_info->small.allocs,
device_info->large.allocs,
device_info->total_allocs - device_info->small.allocs -
device_info->large.allocs,
device_info->total_allocs,
device_info->map_op_count,
device_info->bounce_count);
}
static DEVICE_ATTR(dmabounce_stats, 0400, dmabounce_show, NULL);
#endif
/* allocate a 'safe' buffer and keep track of it */
static inline struct safe_buffer *
alloc_safe_buffer(struct dmabounce_device_info *device_info, void *ptr,
size_t size, enum dma_data_direction dir)
{
struct safe_buffer *buf;
struct dmabounce_pool *pool;
struct device *dev = device_info->dev;
unsigned long flags;
dev_dbg(dev, "%s(ptr=%p, size=%d, dir=%d)\n",
__func__, ptr, size, dir);
if (size <= device_info->small.size) {
pool = &device_info->small;
} else if (size <= device_info->large.size) {
pool = &device_info->large;
} else {
pool = NULL;
}
buf = kmalloc(sizeof(struct safe_buffer), GFP_ATOMIC);
if (buf == NULL) {
dev_warn(dev, "%s: kmalloc failed\n", __func__);
return NULL;
}
buf->ptr = ptr;
buf->size = size;
buf->direction = dir;
buf->pool = pool;
if (pool) {
buf->safe = dma_pool_alloc(pool->pool, GFP_ATOMIC,
&buf->safe_dma_addr);
} else {
buf->safe = dma_alloc_coherent(dev, size, &buf->safe_dma_addr,
GFP_ATOMIC);
}
if (buf->safe == NULL) {
dev_warn(dev,
"%s: could not alloc dma memory (size=%d)\n",
__func__, size);
kfree(buf);
return NULL;
}
#ifdef STATS
if (pool)
pool->allocs++;
device_info->total_allocs++;
#endif
write_lock_irqsave(&device_info->lock, flags);
list_add(&buf->node, &device_info->safe_buffers);
write_unlock_irqrestore(&device_info->lock, flags);
return buf;
}
/* determine if a buffer is from our "safe" pool */
static inline struct safe_buffer *
find_safe_buffer(struct dmabounce_device_info *device_info, dma_addr_t safe_dma_addr)
{
struct safe_buffer *b, *rb = NULL;
unsigned long flags;
read_lock_irqsave(&device_info->lock, flags);
list_for_each_entry(b, &device_info->safe_buffers, node)
if (b->safe_dma_addr <= safe_dma_addr &&
b->safe_dma_addr + b->size > safe_dma_addr) {
rb = b;
break;
}
read_unlock_irqrestore(&device_info->lock, flags);
return rb;
}
static inline void
free_safe_buffer(struct dmabounce_device_info *device_info, struct safe_buffer *buf)
{
unsigned long flags;
dev_dbg(device_info->dev, "%s(buf=%p)\n", __func__, buf);
write_lock_irqsave(&device_info->lock, flags);
list_del(&buf->node);
write_unlock_irqrestore(&device_info->lock, flags);
if (buf->pool)
dma_pool_free(buf->pool->pool, buf->safe, buf->safe_dma_addr);
else
dma_free_coherent(device_info->dev, buf->size, buf->safe,
buf->safe_dma_addr);
kfree(buf);
}
/* ************************************************** */
static struct safe_buffer *find_safe_buffer_dev(struct device *dev,
dma_addr_t dma_addr, const char *where)
{
if (!dev || !dev->archdata.dmabounce)
return NULL;
if (dma_mapping_error(dev, dma_addr)) {
dev_err(dev, "Trying to %s invalid mapping\n", where);
return NULL;
}
return find_safe_buffer(dev->archdata.dmabounce, dma_addr);
}
static int needs_bounce(struct device *dev, dma_addr_t dma_addr, size_t size)
{
if (!dev || !dev->archdata.dmabounce)
return 0;
if (dev->dma_mask) {
unsigned long limit, mask = *dev->dma_mask;
limit = (mask + 1) & ~mask;
if (limit && size > limit) {
dev_err(dev, "DMA mapping too big (requested %#x "
"mask %#Lx)\n", size, *dev->dma_mask);
return -E2BIG;
}
/* Figure out if we need to bounce from the DMA mask. */
if ((dma_addr | (dma_addr + size - 1)) & ~mask)
return 1;
}
return !!dev->archdata.dmabounce->needs_bounce(dev, dma_addr, size);
}
static inline dma_addr_t map_single(struct device *dev, void *ptr, size_t size,
enum dma_data_direction dir)
{
struct dmabounce_device_info *device_info = dev->archdata.dmabounce;
struct safe_buffer *buf;
if (device_info)
DO_STATS ( device_info->map_op_count++ );
buf = alloc_safe_buffer(device_info, ptr, size, dir);
if (buf == NULL) {
dev_err(dev, "%s: unable to map unsafe buffer %p!\n",
__func__, ptr);
return DMA_ERROR_CODE;
}
dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
__func__, buf->ptr, virt_to_dma(dev, buf->ptr),
buf->safe, buf->safe_dma_addr);
if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL) {
dev_dbg(dev, "%s: copy unsafe %p to safe %p, size %d\n",
__func__, ptr, buf->safe, size);
memcpy(buf->safe, ptr, size);
}
return buf->safe_dma_addr;
}
static inline void unmap_single(struct device *dev, struct safe_buffer *buf,
size_t size, enum dma_data_direction dir)
{
BUG_ON(buf->size != size);
BUG_ON(buf->direction != dir);
dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
__func__, buf->ptr, virt_to_dma(dev, buf->ptr),
buf->safe, buf->safe_dma_addr);
DO_STATS(dev->archdata.dmabounce->bounce_count++);
if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) {
void *ptr = buf->ptr;
dev_dbg(dev, "%s: copy back safe %p to unsafe %p size %d\n",
__func__, buf->safe, ptr, size);
memcpy(ptr, buf->safe, size);
/*
* Since we may have written to a page cache page,
* we need to ensure that the data will be coherent
* with user mappings.
*/
__cpuc_flush_dcache_area(ptr, size);
}
free_safe_buffer(dev->archdata.dmabounce, buf);
}
/* ************************************************** */
/*
* see if a buffer address is in an 'unsafe' range. if it is
* allocate a 'safe' buffer and copy the unsafe buffer into it.
* substitute the safe buffer for the unsafe one.
* (basically move the buffer from an unsafe area to a safe one)
*/
static dma_addr_t dmabounce_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size, enum dma_data_direction dir,
struct dma_attrs *attrs)
{
dma_addr_t dma_addr;
int ret;
dev_dbg(dev, "%s(page=%p,off=%#lx,size=%zx,dir=%x)\n",
__func__, page, offset, size, dir);
dma_addr = pfn_to_dma(dev, page_to_pfn(page)) + offset;
ret = needs_bounce(dev, dma_addr, size);
if (ret < 0)
return DMA_ERROR_CODE;
if (ret == 0) {
arm_dma_ops.sync_single_for_device(dev, dma_addr, size, dir);
return dma_addr;
}
if (PageHighMem(page)) {
dev_err(dev, "DMA buffer bouncing of HIGHMEM pages is not supported\n");
return DMA_ERROR_CODE;
}
return map_single(dev, page_address(page) + offset, size, dir);
}
/*
* see if a mapped address was really a "safe" buffer and if so, copy
* the data from the safe buffer back to the unsafe buffer and free up
* the safe buffer. (basically return things back to the way they
* should be)
*/
static void dmabounce_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
enum dma_data_direction dir, struct dma_attrs *attrs)
{
struct safe_buffer *buf;
dev_dbg(dev, "%s(dma=%#x,size=%d,dir=%x)\n",
__func__, dma_addr, size, dir);
buf = find_safe_buffer_dev(dev, dma_addr, __func__);
if (!buf) {
arm_dma_ops.sync_single_for_cpu(dev, dma_addr, size, dir);
return;
}
unmap_single(dev, buf, size, dir);
}
static int __dmabounce_sync_for_cpu(struct device *dev, dma_addr_t addr,
size_t sz, enum dma_data_direction dir)
{
struct safe_buffer *buf;
unsigned long off;
dev_dbg(dev, "%s(dma=%#x,sz=%zx,dir=%x)\n",
__func__, addr, sz, dir);
buf = find_safe_buffer_dev(dev, addr, __func__);
if (!buf)
return 1;
off = addr - buf->safe_dma_addr;
BUG_ON(buf->direction != dir);
dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x off=%#lx) mapped to %p (dma=%#x)\n",
__func__, buf->ptr, virt_to_dma(dev, buf->ptr), off,
buf->safe, buf->safe_dma_addr);
DO_STATS(dev->archdata.dmabounce->bounce_count++);
if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) {
dev_dbg(dev, "%s: copy back safe %p to unsafe %p size %d\n",
__func__, buf->safe + off, buf->ptr + off, sz);
memcpy(buf->ptr + off, buf->safe + off, sz);
}
return 0;
}
static void dmabounce_sync_for_cpu(struct device *dev,
dma_addr_t handle, size_t size, enum dma_data_direction dir)
{
if (!__dmabounce_sync_for_cpu(dev, handle, size, dir))
return;
arm_dma_ops.sync_single_for_cpu(dev, handle, size, dir);
}
static int __dmabounce_sync_for_device(struct device *dev, dma_addr_t addr,
size_t sz, enum dma_data_direction dir)
{
struct safe_buffer *buf;
unsigned long off;
dev_dbg(dev, "%s(dma=%#x,sz=%zx,dir=%x)\n",
__func__, addr, sz, dir);
buf = find_safe_buffer_dev(dev, addr, __func__);
if (!buf)
return 1;
off = addr - buf->safe_dma_addr;
BUG_ON(buf->direction != dir);
dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x off=%#lx) mapped to %p (dma=%#x)\n",
__func__, buf->ptr, virt_to_dma(dev, buf->ptr), off,
buf->safe, buf->safe_dma_addr);
DO_STATS(dev->archdata.dmabounce->bounce_count++);
if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL) {
dev_dbg(dev, "%s: copy out unsafe %p to safe %p, size %d\n",
__func__,buf->ptr + off, buf->safe + off, sz);
memcpy(buf->safe + off, buf->ptr + off, sz);
}
return 0;
}
static void dmabounce_sync_for_device(struct device *dev,
dma_addr_t handle, size_t size, enum dma_data_direction dir)
{
if (!__dmabounce_sync_for_device(dev, handle, size, dir))
return;
arm_dma_ops.sync_single_for_device(dev, handle, size, dir);
}
static int dmabounce_set_mask(struct device *dev, u64 dma_mask)
{
if (dev->archdata.dmabounce)
return 0;
return arm_dma_ops.set_dma_mask(dev, dma_mask);
}
static struct dma_map_ops dmabounce_ops = {
.alloc = arm_dma_alloc,
.free = arm_dma_free,
.mmap = arm_dma_mmap,
.get_sgtable = arm_dma_get_sgtable,
.map_page = dmabounce_map_page,
.unmap_page = dmabounce_unmap_page,
.sync_single_for_cpu = dmabounce_sync_for_cpu,
.sync_single_for_device = dmabounce_sync_for_device,
.map_sg = arm_dma_map_sg,
.unmap_sg = arm_dma_unmap_sg,
.sync_sg_for_cpu = arm_dma_sync_sg_for_cpu,
.sync_sg_for_device = arm_dma_sync_sg_for_device,
.set_dma_mask = dmabounce_set_mask,
};
static int dmabounce_init_pool(struct dmabounce_pool *pool, struct device *dev,
const char *name, unsigned long size)
{
pool->size = size;
DO_STATS(pool->allocs = 0);
pool->pool = dma_pool_create(name, dev, size,
0 /* byte alignment */,
0 /* no page-crossing issues */);
return pool->pool ? 0 : -ENOMEM;
}
int dmabounce_register_dev(struct device *dev, unsigned long small_buffer_size,
unsigned long large_buffer_size,
int (*needs_bounce_fn)(struct device *, dma_addr_t, size_t))
{
struct dmabounce_device_info *device_info;
int ret;
device_info = kmalloc(sizeof(struct dmabounce_device_info), GFP_ATOMIC);
if (!device_info) {
dev_err(dev,
"Could not allocated dmabounce_device_info\n");
return -ENOMEM;
}
ret = dmabounce_init_pool(&device_info->small, dev,
"small_dmabounce_pool", small_buffer_size);
if (ret) {
dev_err(dev,
"dmabounce: could not allocate DMA pool for %ld byte objects\n",
small_buffer_size);
goto err_free;
}
if (large_buffer_size) {
ret = dmabounce_init_pool(&device_info->large, dev,
"large_dmabounce_pool",
large_buffer_size);
if (ret) {
dev_err(dev,
"dmabounce: could not allocate DMA pool for %ld byte objects\n",
large_buffer_size);
goto err_destroy;
}
}
device_info->dev = dev;
INIT_LIST_HEAD(&device_info->safe_buffers);
rwlock_init(&device_info->lock);
device_info->needs_bounce = needs_bounce_fn;
#ifdef STATS
device_info->total_allocs = 0;
device_info->map_op_count = 0;
device_info->bounce_count = 0;
device_info->attr_res = device_create_file(dev, &dev_attr_dmabounce_stats);
#endif
dev->archdata.dmabounce = device_info;
set_dma_ops(dev, &dmabounce_ops);
dev_info(dev, "dmabounce: registered device\n");
return 0;
err_destroy:
dma_pool_destroy(device_info->small.pool);
err_free:
kfree(device_info);
return ret;
}
EXPORT_SYMBOL(dmabounce_register_dev);
void dmabounce_unregister_dev(struct device *dev)
{
struct dmabounce_device_info *device_info = dev->archdata.dmabounce;
dev->archdata.dmabounce = NULL;
set_dma_ops(dev, NULL);
if (!device_info) {
dev_warn(dev,
"Never registered with dmabounce but attempting"
"to unregister!\n");
return;
}
if (!list_empty(&device_info->safe_buffers)) {
dev_err(dev,
"Removing from dmabounce with pending buffers!\n");
BUG();
}
if (device_info->small.pool)
dma_pool_destroy(device_info->small.pool);
if (device_info->large.pool)
dma_pool_destroy(device_info->large.pool);
#ifdef STATS
if (device_info->attr_res == 0)
device_remove_file(dev, &dev_attr_dmabounce_stats);
#endif
kfree(device_info);
dev_info(dev, "dmabounce: device unregistered\n");
}
EXPORT_SYMBOL(dmabounce_unregister_dev);
MODULE_AUTHOR("Christopher Hoover <ch@hpl.hp.com>, Deepak Saxena <dsaxena@plexity.net>");
MODULE_DESCRIPTION("Special dma_{map/unmap/dma_sync}_* routines for systems with limited DMA windows");
MODULE_LICENSE("GPL");

1823
arch/arm/common/edma.c Normal file

File diff suppressed because it is too large Load diff

118
arch/arm/common/fiq_glue.S Normal file
View file

@ -0,0 +1,118 @@
/*
* Copyright (C) 2008 Google, Inc.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
.text
.global fiq_glue_end
/* fiq stack: r0-r15,cpsr,spsr of interrupted mode */
ENTRY(fiq_glue)
/* store pc, cpsr from previous mode, reserve space for spsr */
mrs r12, spsr
sub lr, lr, #4
subs r10, #1
bne nested_fiq
str r12, [sp, #-8]!
str lr, [sp, #-4]!
/* store r8-r14 from previous mode */
sub sp, sp, #(7 * 4)
stmia sp, {r8-r14}^
nop
/* store r0-r7 from previous mode */
stmfd sp!, {r0-r7}
/* setup func(data,regs) arguments */
mov r0, r9
mov r1, sp
mov r3, r8
mov r7, sp
/* Get sp and lr from non-user modes */
and r4, r12, #MODE_MASK
cmp r4, #USR_MODE
beq fiq_from_usr_mode
mov r7, sp
orr r4, r4, #(PSR_I_BIT | PSR_F_BIT)
msr cpsr_c, r4
str sp, [r7, #(4 * 13)]
str lr, [r7, #(4 * 14)]
mrs r5, spsr
str r5, [r7, #(4 * 17)]
cmp r4, #(SVC_MODE | PSR_I_BIT | PSR_F_BIT)
/* use fiq stack if we reenter this mode */
subne sp, r7, #(4 * 3)
fiq_from_usr_mode:
msr cpsr_c, #(SVC_MODE | PSR_I_BIT | PSR_F_BIT)
mov r2, sp
sub sp, r7, #12
stmfd sp!, {r2, ip, lr}
/* call func(data,regs) */
blx r3
ldmfd sp, {r2, ip, lr}
mov sp, r2
/* restore/discard saved state */
cmp r4, #USR_MODE
beq fiq_from_usr_mode_exit
msr cpsr_c, r4
ldr sp, [r7, #(4 * 13)]
ldr lr, [r7, #(4 * 14)]
msr spsr_cxsf, r5
fiq_from_usr_mode_exit:
msr cpsr_c, #(FIQ_MODE | PSR_I_BIT | PSR_F_BIT)
ldmfd sp!, {r0-r7}
ldr lr, [sp, #(4 * 7)]
ldr r12, [sp, #(4 * 8)]
add sp, sp, #(10 * 4)
exit_fiq:
msr spsr_cxsf, r12
add r10, #1
cmp r11, #0
moveqs pc, lr
bx r11 /* jump to custom fiq return function */
nested_fiq:
orr r12, r12, #(PSR_F_BIT)
b exit_fiq
fiq_glue_end:
ENTRY(fiq_glue_setup) /* func, data, sp, smc call number */
stmfd sp!, {r4}
mrs r4, cpsr
msr cpsr_c, #(FIQ_MODE | PSR_I_BIT | PSR_F_BIT)
movs r8, r0
mov r9, r1
mov sp, r2
mov r11, r3
moveq r10, #0
movne r10, #1
msr cpsr_c, r4
ldmfd sp!, {r4}
bx lr

View file

@ -0,0 +1,147 @@
/*
* Copyright (C) 2010 Google, Inc.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/kernel.h>
#include <linux/percpu.h>
#include <linux/slab.h>
#include <asm/fiq.h>
#include <asm/fiq_glue.h>
extern unsigned char fiq_glue, fiq_glue_end;
extern void fiq_glue_setup(void *func, void *data, void *sp,
fiq_return_handler_t fiq_return_handler);
static struct fiq_handler fiq_debbuger_fiq_handler = {
.name = "fiq_glue",
};
DEFINE_PER_CPU(void *, fiq_stack);
static struct fiq_glue_handler *current_handler;
static fiq_return_handler_t fiq_return_handler;
static DEFINE_MUTEX(fiq_glue_lock);
static void fiq_glue_setup_helper(void *info)
{
struct fiq_glue_handler *handler = info;
fiq_glue_setup(handler->fiq, handler,
__get_cpu_var(fiq_stack) + THREAD_START_SP,
fiq_return_handler);
}
int fiq_glue_register_handler(struct fiq_glue_handler *handler)
{
int ret;
int cpu;
if (!handler || !handler->fiq)
return -EINVAL;
mutex_lock(&fiq_glue_lock);
if (fiq_stack) {
ret = -EBUSY;
goto err_busy;
}
for_each_possible_cpu(cpu) {
void *stack;
stack = (void *)__get_free_pages(GFP_KERNEL, THREAD_SIZE_ORDER);
if (WARN_ON(!stack)) {
ret = -ENOMEM;
goto err_alloc_fiq_stack;
}
per_cpu(fiq_stack, cpu) = stack;
}
ret = claim_fiq(&fiq_debbuger_fiq_handler);
if (WARN_ON(ret))
goto err_claim_fiq;
current_handler = handler;
on_each_cpu(fiq_glue_setup_helper, handler, true);
set_fiq_handler(&fiq_glue, &fiq_glue_end - &fiq_glue);
mutex_unlock(&fiq_glue_lock);
return 0;
err_claim_fiq:
err_alloc_fiq_stack:
for_each_possible_cpu(cpu) {
__free_pages(per_cpu(fiq_stack, cpu), THREAD_SIZE_ORDER);
per_cpu(fiq_stack, cpu) = NULL;
}
err_busy:
mutex_unlock(&fiq_glue_lock);
return ret;
}
static void fiq_glue_update_return_handler(void (*fiq_return)(void))
{
fiq_return_handler = fiq_return;
if (current_handler)
on_each_cpu(fiq_glue_setup_helper, current_handler, true);
}
int fiq_glue_set_return_handler(void (*fiq_return)(void))
{
int ret;
mutex_lock(&fiq_glue_lock);
if (fiq_return_handler) {
ret = -EBUSY;
goto err_busy;
}
fiq_glue_update_return_handler(fiq_return);
ret = 0;
err_busy:
mutex_unlock(&fiq_glue_lock);
return ret;
}
EXPORT_SYMBOL(fiq_glue_set_return_handler);
int fiq_glue_clear_return_handler(void (*fiq_return)(void))
{
int ret;
mutex_lock(&fiq_glue_lock);
if (WARN_ON(fiq_return_handler != fiq_return)) {
ret = -EINVAL;
goto err_inval;
}
fiq_glue_update_return_handler(NULL);
ret = 0;
err_inval:
mutex_unlock(&fiq_glue_lock);
return ret;
}
EXPORT_SYMBOL(fiq_glue_clear_return_handler);
/**
* fiq_glue_resume - Restore fiqs after suspend or low power idle states
*
* This must be called before calling local_fiq_enable after returning from a
* power state where the fiq mode registers were lost. If a driver provided
* a resume hook when it registered the handler it will be called.
*/
void fiq_glue_resume(void)
{
if (!current_handler)
return;
fiq_glue_setup(current_handler->fiq, current_handler,
__get_cpu_var(fiq_stack) + THREAD_START_SP,
fiq_return_handler);
if (current_handler->resume)
current_handler->resume(current_handler);
}

View file

@ -0,0 +1,18 @@
/*
* Copyright (C) 2012 Samsung Electronics.
* Kyungmin Park <kyungmin.park@samsung.com>
* Tomasz Figa <t.figa@samsung.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/kernel.h>
#include <linux/suspend.h>
#include <asm/firmware.h>
static const struct firmware_ops default_firmware_ops;
const struct firmware_ops *firmware_ops = &default_firmware_ops;

100
arch/arm/common/icst.c Normal file
View file

@ -0,0 +1,100 @@
/*
* linux/arch/arm/common/icst307.c
*
* Copyright (C) 2003 Deep Blue Solutions, Ltd, All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* Support functions for calculating clocks/divisors for the ICST307
* clock generators. See http://www.idt.com/ for more information
* on these devices.
*
* This is an almost identical implementation to the ICST525 clock generator.
* The s2div and idx2s files are different
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <asm/hardware/icst.h>
/*
* Divisors for each OD setting.
*/
const unsigned char icst307_s2div[8] = { 10, 2, 8, 4, 5, 7, 3, 6 };
const unsigned char icst525_s2div[8] = { 10, 2, 8, 4, 5, 7, 9, 6 };
EXPORT_SYMBOL(icst307_s2div);
EXPORT_SYMBOL(icst525_s2div);
unsigned long icst_hz(const struct icst_params *p, struct icst_vco vco)
{
return p->ref * 2 * (vco.v + 8) / ((vco.r + 2) * p->s2div[vco.s]);
}
EXPORT_SYMBOL(icst_hz);
/*
* Ascending divisor S values.
*/
const unsigned char icst307_idx2s[8] = { 1, 6, 3, 4, 7, 5, 2, 0 };
const unsigned char icst525_idx2s[8] = { 1, 3, 4, 7, 5, 2, 6, 0 };
EXPORT_SYMBOL(icst307_idx2s);
EXPORT_SYMBOL(icst525_idx2s);
struct icst_vco
icst_hz_to_vco(const struct icst_params *p, unsigned long freq)
{
struct icst_vco vco = { .s = 1, .v = p->vd_max, .r = p->rd_max };
unsigned long f;
unsigned int i = 0, rd, best = (unsigned int)-1;
/*
* First, find the PLL output divisor such
* that the PLL output is within spec.
*/
do {
f = freq * p->s2div[p->idx2s[i]];
if (f > p->vco_min && f <= p->vco_max)
break;
} while (i < 8);
if (i >= 8)
return vco;
vco.s = p->idx2s[i];
/*
* Now find the closest divisor combination
* which gives a PLL output of 'f'.
*/
for (rd = p->rd_min; rd <= p->rd_max; rd++) {
unsigned long fref_div, f_pll;
unsigned int vd;
int f_diff;
fref_div = (2 * p->ref) / rd;
vd = (f + fref_div / 2) / fref_div;
if (vd < p->vd_min || vd > p->vd_max)
continue;
f_pll = fref_div * vd;
f_diff = f_pll - f;
if (f_diff < 0)
f_diff = -f_diff;
if ((unsigned)f_diff < best) {
vco.v = vd - 8;
vco.r = rd - 2;
if (f_diff == 0)
break;
best = f_diff;
}
}
return vco;
}
EXPORT_SYMBOL(icst_hz_to_vco);

355
arch/arm/common/it8152.c Normal file
View file

@ -0,0 +1,355 @@
/*
* linux/arch/arm/common/it8152.c
*
* Copyright Compulab Ltd, 2002-2007
* Mike Rapoport <mike@compulab.co.il>
*
* The DMA bouncing part is taken from arch/arm/mach-ixp4xx/common-pci.c
* (see this file for respective copyrights)
*
* Thanks to Guennadi Liakhovetski <gl@dsa-ac.de> for IRQ enumberation
* and demux code.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/ptrace.h>
#include <linux/interrupt.h>
#include <linux/mm.h>
#include <linux/init.h>
#include <linux/ioport.h>
#include <linux/irq.h>
#include <linux/io.h>
#include <linux/export.h>
#include <asm/mach/pci.h>
#include <asm/hardware/it8152.h>
#define MAX_SLOTS 21
static void it8152_mask_irq(struct irq_data *d)
{
unsigned int irq = d->irq;
if (irq >= IT8152_LD_IRQ(0)) {
__raw_writel((__raw_readl(IT8152_INTC_LDCNIMR) |
(1 << (irq - IT8152_LD_IRQ(0)))),
IT8152_INTC_LDCNIMR);
} else if (irq >= IT8152_LP_IRQ(0)) {
__raw_writel((__raw_readl(IT8152_INTC_LPCNIMR) |
(1 << (irq - IT8152_LP_IRQ(0)))),
IT8152_INTC_LPCNIMR);
} else if (irq >= IT8152_PD_IRQ(0)) {
__raw_writel((__raw_readl(IT8152_INTC_PDCNIMR) |
(1 << (irq - IT8152_PD_IRQ(0)))),
IT8152_INTC_PDCNIMR);
}
}
static void it8152_unmask_irq(struct irq_data *d)
{
unsigned int irq = d->irq;
if (irq >= IT8152_LD_IRQ(0)) {
__raw_writel((__raw_readl(IT8152_INTC_LDCNIMR) &
~(1 << (irq - IT8152_LD_IRQ(0)))),
IT8152_INTC_LDCNIMR);
} else if (irq >= IT8152_LP_IRQ(0)) {
__raw_writel((__raw_readl(IT8152_INTC_LPCNIMR) &
~(1 << (irq - IT8152_LP_IRQ(0)))),
IT8152_INTC_LPCNIMR);
} else if (irq >= IT8152_PD_IRQ(0)) {
__raw_writel((__raw_readl(IT8152_INTC_PDCNIMR) &
~(1 << (irq - IT8152_PD_IRQ(0)))),
IT8152_INTC_PDCNIMR);
}
}
static struct irq_chip it8152_irq_chip = {
.name = "it8152",
.irq_ack = it8152_mask_irq,
.irq_mask = it8152_mask_irq,
.irq_unmask = it8152_unmask_irq,
};
void it8152_init_irq(void)
{
int irq;
__raw_writel((0xffff), IT8152_INTC_PDCNIMR);
__raw_writel((0), IT8152_INTC_PDCNIRR);
__raw_writel((0xffff), IT8152_INTC_LPCNIMR);
__raw_writel((0), IT8152_INTC_LPCNIRR);
__raw_writel((0xffff), IT8152_INTC_LDCNIMR);
__raw_writel((0), IT8152_INTC_LDCNIRR);
for (irq = IT8152_IRQ(0); irq <= IT8152_LAST_IRQ; irq++) {
irq_set_chip_and_handler(irq, &it8152_irq_chip,
handle_level_irq);
set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
}
}
void it8152_irq_demux(unsigned int irq, struct irq_desc *desc)
{
int bits_pd, bits_lp, bits_ld;
int i;
while (1) {
/* Read all */
bits_pd = __raw_readl(IT8152_INTC_PDCNIRR);
bits_lp = __raw_readl(IT8152_INTC_LPCNIRR);
bits_ld = __raw_readl(IT8152_INTC_LDCNIRR);
/* Ack */
__raw_writel((~bits_pd), IT8152_INTC_PDCNIRR);
__raw_writel((~bits_lp), IT8152_INTC_LPCNIRR);
__raw_writel((~bits_ld), IT8152_INTC_LDCNIRR);
if (!(bits_ld | bits_lp | bits_pd)) {
/* Re-read to guarantee, that there was a moment of
time, when they all three were 0. */
bits_pd = __raw_readl(IT8152_INTC_PDCNIRR);
bits_lp = __raw_readl(IT8152_INTC_LPCNIRR);
bits_ld = __raw_readl(IT8152_INTC_LDCNIRR);
if (!(bits_ld | bits_lp | bits_pd))
return;
}
bits_pd &= ((1 << IT8152_PD_IRQ_COUNT) - 1);
while (bits_pd) {
i = __ffs(bits_pd);
generic_handle_irq(IT8152_PD_IRQ(i));
bits_pd &= ~(1 << i);
}
bits_lp &= ((1 << IT8152_LP_IRQ_COUNT) - 1);
while (bits_lp) {
i = __ffs(bits_lp);
generic_handle_irq(IT8152_LP_IRQ(i));
bits_lp &= ~(1 << i);
}
bits_ld &= ((1 << IT8152_LD_IRQ_COUNT) - 1);
while (bits_ld) {
i = __ffs(bits_ld);
generic_handle_irq(IT8152_LD_IRQ(i));
bits_ld &= ~(1 << i);
}
}
}
/* mapping for on-chip devices */
int __init it8152_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
if ((dev->vendor == PCI_VENDOR_ID_ITE) &&
(dev->device == PCI_DEVICE_ID_ITE_8152)) {
if ((dev->class >> 8) == PCI_CLASS_MULTIMEDIA_AUDIO)
return IT8152_AUDIO_INT;
if ((dev->class >> 8) == PCI_CLASS_SERIAL_USB)
return IT8152_USB_INT;
if ((dev->class >> 8) == PCI_CLASS_SYSTEM_DMA)
return IT8152_CDMA_INT;
}
return 0;
}
static unsigned long it8152_pci_dev_base_address(struct pci_bus *bus,
unsigned int devfn)
{
unsigned long addr = 0;
if (bus->number == 0) {
if (devfn < PCI_DEVFN(MAX_SLOTS, 0))
addr = (devfn << 8);
} else
addr = (bus->number << 16) | (devfn << 8);
return addr;
}
static int it8152_pci_read_config(struct pci_bus *bus,
unsigned int devfn, int where,
int size, u32 *value)
{
unsigned long addr = it8152_pci_dev_base_address(bus, devfn);
u32 v;
int shift;
shift = (where & 3);
__raw_writel((addr + where), IT8152_PCI_CFG_ADDR);
v = (__raw_readl(IT8152_PCI_CFG_DATA) >> (8 * (shift)));
*value = v;
return PCIBIOS_SUCCESSFUL;
}
static int it8152_pci_write_config(struct pci_bus *bus,
unsigned int devfn, int where,
int size, u32 value)
{
unsigned long addr = it8152_pci_dev_base_address(bus, devfn);
u32 v, vtemp, mask = 0;
int shift;
if (size == 1)
mask = 0xff;
if (size == 2)
mask = 0xffff;
shift = (where & 3);
__raw_writel((addr + where), IT8152_PCI_CFG_ADDR);
vtemp = __raw_readl(IT8152_PCI_CFG_DATA);
if (mask)
vtemp &= ~(mask << (8 * shift));
else
vtemp = 0;
v = (value << (8 * shift));
__raw_writel((addr + where), IT8152_PCI_CFG_ADDR);
__raw_writel((v | vtemp), IT8152_PCI_CFG_DATA);
return PCIBIOS_SUCCESSFUL;
}
struct pci_ops it8152_ops = {
.read = it8152_pci_read_config,
.write = it8152_pci_write_config,
};
static struct resource it8152_io = {
.name = "IT8152 PCI I/O region",
.flags = IORESOURCE_IO,
};
static struct resource it8152_mem = {
.name = "IT8152 PCI memory region",
.start = 0x10000000,
.end = 0x13e00000,
.flags = IORESOURCE_MEM,
};
/*
* The following functions are needed for DMA bouncing.
* ITE8152 chip can address up to 64MByte, so all the devices
* connected to ITE8152 (PCI and USB) should have limited DMA window
*/
static int it8152_needs_bounce(struct device *dev, dma_addr_t dma_addr, size_t size)
{
dev_dbg(dev, "%s: dma_addr %08x, size %08x\n",
__func__, dma_addr, size);
return (dma_addr + size - PHYS_OFFSET) >= SZ_64M;
}
/*
* Setup DMA mask to 64MB on devices connected to ITE8152. Ignore all
* other devices.
*/
static int it8152_pci_platform_notify(struct device *dev)
{
if (dev_is_pci(dev)) {
if (dev->dma_mask)
*dev->dma_mask = (SZ_64M - 1) | PHYS_OFFSET;
dev->coherent_dma_mask = (SZ_64M - 1) | PHYS_OFFSET;
dmabounce_register_dev(dev, 2048, 4096, it8152_needs_bounce);
}
return 0;
}
static int it8152_pci_platform_notify_remove(struct device *dev)
{
if (dev_is_pci(dev))
dmabounce_unregister_dev(dev);
return 0;
}
int dma_set_coherent_mask(struct device *dev, u64 mask)
{
if (mask >= PHYS_OFFSET + SZ_64M - 1)
return 0;
return -EIO;
}
int __init it8152_pci_setup(int nr, struct pci_sys_data *sys)
{
/*
* FIXME: use pci_ioremap_io to remap the IO space here and
* move over to the generic io.h implementation.
* This requires solving the same problem for PXA PCMCIA
* support.
*/
it8152_io.start = (unsigned long)IT8152_IO_BASE + 0x12000;
it8152_io.end = (unsigned long)IT8152_IO_BASE + 0x12000 + 0x100000;
sys->mem_offset = 0x10000000;
sys->io_offset = (unsigned long)IT8152_IO_BASE;
if (request_resource(&ioport_resource, &it8152_io)) {
printk(KERN_ERR "PCI: unable to allocate IO region\n");
goto err0;
}
if (request_resource(&iomem_resource, &it8152_mem)) {
printk(KERN_ERR "PCI: unable to allocate memory region\n");
goto err1;
}
pci_add_resource_offset(&sys->resources, &it8152_io, sys->io_offset);
pci_add_resource_offset(&sys->resources, &it8152_mem, sys->mem_offset);
if (platform_notify || platform_notify_remove) {
printk(KERN_ERR "PCI: Can't use platform_notify\n");
goto err2;
}
platform_notify = it8152_pci_platform_notify;
platform_notify_remove = it8152_pci_platform_notify_remove;
return 1;
err2:
release_resource(&it8152_io);
err1:
release_resource(&it8152_mem);
err0:
return -EBUSY;
}
/* ITE bridge requires setting latency timer to avoid early bus access
termination by PCI bus master devices
*/
void pcibios_set_master(struct pci_dev *dev)
{
u8 lat;
/* no need to update on-chip OHCI controller */
if ((dev->vendor == PCI_VENDOR_ID_ITE) &&
(dev->device == PCI_DEVICE_ID_ITE_8152) &&
((dev->class >> 8) == PCI_CLASS_SERIAL_USB))
return;
pci_read_config_byte(dev, PCI_LATENCY_TIMER, &lat);
if (lat < 16)
lat = (64 <= pcibios_max_latency) ? 64 : pcibios_max_latency;
else if (lat > pcibios_max_latency)
lat = pcibios_max_latency;
else
return;
printk(KERN_DEBUG "PCI: Setting latency timer of device %s to %d\n",
pci_name(dev), lat);
pci_write_config_byte(dev, PCI_LATENCY_TIMER, lat);
}
EXPORT_SYMBOL(dma_set_coherent_mask);

914
arch/arm/common/locomo.c Normal file
View file

@ -0,0 +1,914 @@
/*
* linux/arch/arm/common/locomo.c
*
* Sharp LoCoMo support
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This file contains all generic LoCoMo support.
*
* All initialization functions provided here are intended to be called
* from machine specific code with proper arguments when required.
*
* Based on sa1111.c
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/ioport.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/io.h>
#include <mach/hardware.h>
#include <asm/irq.h>
#include <asm/mach/irq.h>
#include <asm/hardware/locomo.h>
/* LoCoMo Interrupts */
#define IRQ_LOCOMO_KEY (0)
#define IRQ_LOCOMO_GPIO (1)
#define IRQ_LOCOMO_LT (2)
#define IRQ_LOCOMO_SPI (3)
/* M62332 output channel selection */
#define M62332_EVR_CH 1 /* M62332 volume channel number */
/* 0 : CH.1 , 1 : CH. 2 */
/* DAC send data */
#define M62332_SLAVE_ADDR 0x4e /* Slave address */
#define M62332_W_BIT 0x00 /* W bit (0 only) */
#define M62332_SUB_ADDR 0x00 /* Sub address */
#define M62332_A_BIT 0x00 /* A bit (0 only) */
/* DAC setup and hold times (expressed in us) */
#define DAC_BUS_FREE_TIME 5 /* 4.7 us */
#define DAC_START_SETUP_TIME 5 /* 4.7 us */
#define DAC_STOP_SETUP_TIME 4 /* 4.0 us */
#define DAC_START_HOLD_TIME 5 /* 4.7 us */
#define DAC_SCL_LOW_HOLD_TIME 5 /* 4.7 us */
#define DAC_SCL_HIGH_HOLD_TIME 4 /* 4.0 us */
#define DAC_DATA_SETUP_TIME 1 /* 250 ns */
#define DAC_DATA_HOLD_TIME 1 /* 300 ns */
#define DAC_LOW_SETUP_TIME 1 /* 300 ns */
#define DAC_HIGH_SETUP_TIME 1 /* 1000 ns */
/* the following is the overall data for the locomo chip */
struct locomo {
struct device *dev;
unsigned long phys;
unsigned int irq;
int irq_base;
spinlock_t lock;
void __iomem *base;
#ifdef CONFIG_PM
void *saved_state;
#endif
};
struct locomo_dev_info {
unsigned long offset;
unsigned long length;
unsigned int devid;
unsigned int irq[1];
const char * name;
};
/* All the locomo devices. If offset is non-zero, the mapbase for the
* locomo_dev will be set to the chip base plus offset. If offset is
* zero, then the mapbase for the locomo_dev will be set to zero. An
* offset of zero means the device only uses GPIOs or other helper
* functions inside this file */
static struct locomo_dev_info locomo_devices[] = {
{
.devid = LOCOMO_DEVID_KEYBOARD,
.irq = { IRQ_LOCOMO_KEY },
.name = "locomo-keyboard",
.offset = LOCOMO_KEYBOARD,
.length = 16,
},
{
.devid = LOCOMO_DEVID_FRONTLIGHT,
.irq = {},
.name = "locomo-frontlight",
.offset = LOCOMO_FRONTLIGHT,
.length = 8,
},
{
.devid = LOCOMO_DEVID_BACKLIGHT,
.irq = {},
.name = "locomo-backlight",
.offset = LOCOMO_BACKLIGHT,
.length = 8,
},
{
.devid = LOCOMO_DEVID_AUDIO,
.irq = {},
.name = "locomo-audio",
.offset = LOCOMO_AUDIO,
.length = 4,
},
{
.devid = LOCOMO_DEVID_LED,
.irq = {},
.name = "locomo-led",
.offset = LOCOMO_LED,
.length = 8,
},
{
.devid = LOCOMO_DEVID_UART,
.irq = {},
.name = "locomo-uart",
.offset = 0,
.length = 0,
},
{
.devid = LOCOMO_DEVID_SPI,
.irq = {},
.name = "locomo-spi",
.offset = LOCOMO_SPI,
.length = 0x30,
},
};
static void locomo_handler(unsigned int irq, struct irq_desc *desc)
{
struct locomo *lchip = irq_get_chip_data(irq);
int req, i;
/* Acknowledge the parent IRQ */
desc->irq_data.chip->irq_ack(&desc->irq_data);
/* check why this interrupt was generated */
req = locomo_readl(lchip->base + LOCOMO_ICR) & 0x0f00;
if (req) {
/* generate the next interrupt(s) */
irq = lchip->irq_base;
for (i = 0; i <= 3; i++, irq++) {
if (req & (0x0100 << i)) {
generic_handle_irq(irq);
}
}
}
}
static void locomo_ack_irq(struct irq_data *d)
{
}
static void locomo_mask_irq(struct irq_data *d)
{
struct locomo *lchip = irq_data_get_irq_chip_data(d);
unsigned int r;
r = locomo_readl(lchip->base + LOCOMO_ICR);
r &= ~(0x0010 << (d->irq - lchip->irq_base));
locomo_writel(r, lchip->base + LOCOMO_ICR);
}
static void locomo_unmask_irq(struct irq_data *d)
{
struct locomo *lchip = irq_data_get_irq_chip_data(d);
unsigned int r;
r = locomo_readl(lchip->base + LOCOMO_ICR);
r |= (0x0010 << (d->irq - lchip->irq_base));
locomo_writel(r, lchip->base + LOCOMO_ICR);
}
static struct irq_chip locomo_chip = {
.name = "LOCOMO",
.irq_ack = locomo_ack_irq,
.irq_mask = locomo_mask_irq,
.irq_unmask = locomo_unmask_irq,
};
static void locomo_setup_irq(struct locomo *lchip)
{
int irq = lchip->irq_base;
/*
* Install handler for IRQ_LOCOMO_HW.
*/
irq_set_irq_type(lchip->irq, IRQ_TYPE_EDGE_FALLING);
irq_set_chip_data(lchip->irq, lchip);
irq_set_chained_handler(lchip->irq, locomo_handler);
/* Install handlers for IRQ_LOCOMO_* */
for ( ; irq <= lchip->irq_base + 3; irq++) {
irq_set_chip_and_handler(irq, &locomo_chip, handle_level_irq);
irq_set_chip_data(irq, lchip);
set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
}
}
static void locomo_dev_release(struct device *_dev)
{
struct locomo_dev *dev = LOCOMO_DEV(_dev);
kfree(dev);
}
static int
locomo_init_one_child(struct locomo *lchip, struct locomo_dev_info *info)
{
struct locomo_dev *dev;
int ret;
dev = kzalloc(sizeof(struct locomo_dev), GFP_KERNEL);
if (!dev) {
ret = -ENOMEM;
goto out;
}
/*
* If the parent device has a DMA mask associated with it,
* propagate it down to the children.
*/
if (lchip->dev->dma_mask) {
dev->dma_mask = *lchip->dev->dma_mask;
dev->dev.dma_mask = &dev->dma_mask;
}
dev_set_name(&dev->dev, "%s", info->name);
dev->devid = info->devid;
dev->dev.parent = lchip->dev;
dev->dev.bus = &locomo_bus_type;
dev->dev.release = locomo_dev_release;
dev->dev.coherent_dma_mask = lchip->dev->coherent_dma_mask;
if (info->offset)
dev->mapbase = lchip->base + info->offset;
else
dev->mapbase = 0;
dev->length = info->length;
dev->irq[0] = (lchip->irq_base == NO_IRQ) ?
NO_IRQ : lchip->irq_base + info->irq[0];
ret = device_register(&dev->dev);
if (ret) {
out:
kfree(dev);
}
return ret;
}
#ifdef CONFIG_PM
struct locomo_save_data {
u16 LCM_GPO;
u16 LCM_SPICT;
u16 LCM_GPE;
u16 LCM_ASD;
u16 LCM_SPIMD;
};
static int locomo_suspend(struct platform_device *dev, pm_message_t state)
{
struct locomo *lchip = platform_get_drvdata(dev);
struct locomo_save_data *save;
unsigned long flags;
save = kmalloc(sizeof(struct locomo_save_data), GFP_KERNEL);
if (!save)
return -ENOMEM;
lchip->saved_state = save;
spin_lock_irqsave(&lchip->lock, flags);
save->LCM_GPO = locomo_readl(lchip->base + LOCOMO_GPO); /* GPIO */
locomo_writel(0x00, lchip->base + LOCOMO_GPO);
save->LCM_SPICT = locomo_readl(lchip->base + LOCOMO_SPI + LOCOMO_SPICT); /* SPI */
locomo_writel(0x40, lchip->base + LOCOMO_SPI + LOCOMO_SPICT);
save->LCM_GPE = locomo_readl(lchip->base + LOCOMO_GPE); /* GPIO */
locomo_writel(0x00, lchip->base + LOCOMO_GPE);
save->LCM_ASD = locomo_readl(lchip->base + LOCOMO_ASD); /* ADSTART */
locomo_writel(0x00, lchip->base + LOCOMO_ASD);
save->LCM_SPIMD = locomo_readl(lchip->base + LOCOMO_SPI + LOCOMO_SPIMD); /* SPI */
locomo_writel(0x3C14, lchip->base + LOCOMO_SPI + LOCOMO_SPIMD);
locomo_writel(0x00, lchip->base + LOCOMO_PAIF);
locomo_writel(0x00, lchip->base + LOCOMO_DAC);
locomo_writel(0x00, lchip->base + LOCOMO_BACKLIGHT + LOCOMO_TC);
if ((locomo_readl(lchip->base + LOCOMO_LED + LOCOMO_LPT0) & 0x88) && (locomo_readl(lchip->base + LOCOMO_LED + LOCOMO_LPT1) & 0x88))
locomo_writel(0x00, lchip->base + LOCOMO_C32K); /* CLK32 off */
else
/* 18MHz already enabled, so no wait */
locomo_writel(0xc1, lchip->base + LOCOMO_C32K); /* CLK32 on */
locomo_writel(0x00, lchip->base + LOCOMO_TADC); /* 18MHz clock off*/
locomo_writel(0x00, lchip->base + LOCOMO_AUDIO + LOCOMO_ACC); /* 22MHz/24MHz clock off */
locomo_writel(0x00, lchip->base + LOCOMO_FRONTLIGHT + LOCOMO_ALS); /* FL */
spin_unlock_irqrestore(&lchip->lock, flags);
return 0;
}
static int locomo_resume(struct platform_device *dev)
{
struct locomo *lchip = platform_get_drvdata(dev);
struct locomo_save_data *save;
unsigned long r;
unsigned long flags;
save = lchip->saved_state;
if (!save)
return 0;
spin_lock_irqsave(&lchip->lock, flags);
locomo_writel(save->LCM_GPO, lchip->base + LOCOMO_GPO);
locomo_writel(save->LCM_SPICT, lchip->base + LOCOMO_SPI + LOCOMO_SPICT);
locomo_writel(save->LCM_GPE, lchip->base + LOCOMO_GPE);
locomo_writel(save->LCM_ASD, lchip->base + LOCOMO_ASD);
locomo_writel(save->LCM_SPIMD, lchip->base + LOCOMO_SPI + LOCOMO_SPIMD);
locomo_writel(0x00, lchip->base + LOCOMO_C32K);
locomo_writel(0x90, lchip->base + LOCOMO_TADC);
locomo_writel(0, lchip->base + LOCOMO_KEYBOARD + LOCOMO_KSC);
r = locomo_readl(lchip->base + LOCOMO_KEYBOARD + LOCOMO_KIC);
r &= 0xFEFF;
locomo_writel(r, lchip->base + LOCOMO_KEYBOARD + LOCOMO_KIC);
locomo_writel(0x1, lchip->base + LOCOMO_KEYBOARD + LOCOMO_KCMD);
spin_unlock_irqrestore(&lchip->lock, flags);
lchip->saved_state = NULL;
kfree(save);
return 0;
}
#endif
/**
* locomo_probe - probe for a single LoCoMo chip.
* @phys_addr: physical address of device.
*
* Probe for a LoCoMo chip. This must be called
* before any other locomo-specific code.
*
* Returns:
* %-ENODEV device not found.
* %-EBUSY physical address already marked in-use.
* %0 successful.
*/
static int
__locomo_probe(struct device *me, struct resource *mem, int irq)
{
struct locomo_platform_data *pdata = me->platform_data;
struct locomo *lchip;
unsigned long r;
int i, ret = -ENODEV;
lchip = kzalloc(sizeof(struct locomo), GFP_KERNEL);
if (!lchip)
return -ENOMEM;
spin_lock_init(&lchip->lock);
lchip->dev = me;
dev_set_drvdata(lchip->dev, lchip);
lchip->phys = mem->start;
lchip->irq = irq;
lchip->irq_base = (pdata) ? pdata->irq_base : NO_IRQ;
/*
* Map the whole region. This also maps the
* registers for our children.
*/
lchip->base = ioremap(mem->start, PAGE_SIZE);
if (!lchip->base) {
ret = -ENOMEM;
goto out;
}
/* locomo initialize */
locomo_writel(0, lchip->base + LOCOMO_ICR);
/* KEYBOARD */
locomo_writel(0, lchip->base + LOCOMO_KEYBOARD + LOCOMO_KIC);
/* GPIO */
locomo_writel(0, lchip->base + LOCOMO_GPO);
locomo_writel((LOCOMO_GPIO(1) | LOCOMO_GPIO(2) | LOCOMO_GPIO(13) | LOCOMO_GPIO(14))
, lchip->base + LOCOMO_GPE);
locomo_writel((LOCOMO_GPIO(1) | LOCOMO_GPIO(2) | LOCOMO_GPIO(13) | LOCOMO_GPIO(14))
, lchip->base + LOCOMO_GPD);
locomo_writel(0, lchip->base + LOCOMO_GIE);
/* Frontlight */
locomo_writel(0, lchip->base + LOCOMO_FRONTLIGHT + LOCOMO_ALS);
locomo_writel(0, lchip->base + LOCOMO_FRONTLIGHT + LOCOMO_ALD);
/* Longtime timer */
locomo_writel(0, lchip->base + LOCOMO_LTINT);
/* SPI */
locomo_writel(0, lchip->base + LOCOMO_SPI + LOCOMO_SPIIE);
locomo_writel(6 + 8 + 320 + 30 - 10, lchip->base + LOCOMO_ASD);
r = locomo_readl(lchip->base + LOCOMO_ASD);
r |= 0x8000;
locomo_writel(r, lchip->base + LOCOMO_ASD);
locomo_writel(6 + 8 + 320 + 30 - 10 - 128 + 4, lchip->base + LOCOMO_HSD);
r = locomo_readl(lchip->base + LOCOMO_HSD);
r |= 0x8000;
locomo_writel(r, lchip->base + LOCOMO_HSD);
locomo_writel(128 / 8, lchip->base + LOCOMO_HSC);
/* XON */
locomo_writel(0x80, lchip->base + LOCOMO_TADC);
udelay(1000);
/* CLK9MEN */
r = locomo_readl(lchip->base + LOCOMO_TADC);
r |= 0x10;
locomo_writel(r, lchip->base + LOCOMO_TADC);
udelay(100);
/* init DAC */
r = locomo_readl(lchip->base + LOCOMO_DAC);
r |= LOCOMO_DAC_SCLOEB | LOCOMO_DAC_SDAOEB;
locomo_writel(r, lchip->base + LOCOMO_DAC);
r = locomo_readl(lchip->base + LOCOMO_VER);
printk(KERN_INFO "LoCoMo Chip: %lu%lu\n", (r >> 8), (r & 0xff));
/*
* The interrupt controller must be initialised before any
* other device to ensure that the interrupts are available.
*/
if (lchip->irq != NO_IRQ && lchip->irq_base != NO_IRQ)
locomo_setup_irq(lchip);
for (i = 0; i < ARRAY_SIZE(locomo_devices); i++)
locomo_init_one_child(lchip, &locomo_devices[i]);
return 0;
out:
kfree(lchip);
return ret;
}
static int locomo_remove_child(struct device *dev, void *data)
{
device_unregister(dev);
return 0;
}
static void __locomo_remove(struct locomo *lchip)
{
device_for_each_child(lchip->dev, NULL, locomo_remove_child);
if (lchip->irq != NO_IRQ) {
irq_set_chained_handler(lchip->irq, NULL);
irq_set_handler_data(lchip->irq, NULL);
}
iounmap(lchip->base);
kfree(lchip);
}
static int locomo_probe(struct platform_device *dev)
{
struct resource *mem;
int irq;
mem = platform_get_resource(dev, IORESOURCE_MEM, 0);
if (!mem)
return -EINVAL;
irq = platform_get_irq(dev, 0);
if (irq < 0)
return -ENXIO;
return __locomo_probe(&dev->dev, mem, irq);
}
static int locomo_remove(struct platform_device *dev)
{
struct locomo *lchip = platform_get_drvdata(dev);
if (lchip) {
__locomo_remove(lchip);
platform_set_drvdata(dev, NULL);
}
return 0;
}
/*
* Not sure if this should be on the system bus or not yet.
* We really want some way to register a system device at
* the per-machine level, and then have this driver pick
* up the registered devices.
*/
static struct platform_driver locomo_device_driver = {
.probe = locomo_probe,
.remove = locomo_remove,
#ifdef CONFIG_PM
.suspend = locomo_suspend,
.resume = locomo_resume,
#endif
.driver = {
.name = "locomo",
},
};
/*
* Get the parent device driver (us) structure
* from a child function device
*/
static inline struct locomo *locomo_chip_driver(struct locomo_dev *ldev)
{
return (struct locomo *)dev_get_drvdata(ldev->dev.parent);
}
void locomo_gpio_set_dir(struct device *dev, unsigned int bits, unsigned int dir)
{
struct locomo *lchip = dev_get_drvdata(dev);
unsigned long flags;
unsigned int r;
if (!lchip)
return;
spin_lock_irqsave(&lchip->lock, flags);
r = locomo_readl(lchip->base + LOCOMO_GPD);
if (dir)
r |= bits;
else
r &= ~bits;
locomo_writel(r, lchip->base + LOCOMO_GPD);
r = locomo_readl(lchip->base + LOCOMO_GPE);
if (dir)
r |= bits;
else
r &= ~bits;
locomo_writel(r, lchip->base + LOCOMO_GPE);
spin_unlock_irqrestore(&lchip->lock, flags);
}
EXPORT_SYMBOL(locomo_gpio_set_dir);
int locomo_gpio_read_level(struct device *dev, unsigned int bits)
{
struct locomo *lchip = dev_get_drvdata(dev);
unsigned long flags;
unsigned int ret;
if (!lchip)
return -ENODEV;
spin_lock_irqsave(&lchip->lock, flags);
ret = locomo_readl(lchip->base + LOCOMO_GPL);
spin_unlock_irqrestore(&lchip->lock, flags);
ret &= bits;
return ret;
}
EXPORT_SYMBOL(locomo_gpio_read_level);
int locomo_gpio_read_output(struct device *dev, unsigned int bits)
{
struct locomo *lchip = dev_get_drvdata(dev);
unsigned long flags;
unsigned int ret;
if (!lchip)
return -ENODEV;
spin_lock_irqsave(&lchip->lock, flags);
ret = locomo_readl(lchip->base + LOCOMO_GPO);
spin_unlock_irqrestore(&lchip->lock, flags);
ret &= bits;
return ret;
}
EXPORT_SYMBOL(locomo_gpio_read_output);
void locomo_gpio_write(struct device *dev, unsigned int bits, unsigned int set)
{
struct locomo *lchip = dev_get_drvdata(dev);
unsigned long flags;
unsigned int r;
if (!lchip)
return;
spin_lock_irqsave(&lchip->lock, flags);
r = locomo_readl(lchip->base + LOCOMO_GPO);
if (set)
r |= bits;
else
r &= ~bits;
locomo_writel(r, lchip->base + LOCOMO_GPO);
spin_unlock_irqrestore(&lchip->lock, flags);
}
EXPORT_SYMBOL(locomo_gpio_write);
static void locomo_m62332_sendbit(void *mapbase, int bit)
{
unsigned int r;
r = locomo_readl(mapbase + LOCOMO_DAC);
r &= ~(LOCOMO_DAC_SCLOEB);
locomo_writel(r, mapbase + LOCOMO_DAC);
udelay(DAC_LOW_SETUP_TIME); /* 300 nsec */
udelay(DAC_DATA_HOLD_TIME); /* 300 nsec */
r = locomo_readl(mapbase + LOCOMO_DAC);
r &= ~(LOCOMO_DAC_SCLOEB);
locomo_writel(r, mapbase + LOCOMO_DAC);
udelay(DAC_LOW_SETUP_TIME); /* 300 nsec */
udelay(DAC_SCL_LOW_HOLD_TIME); /* 4.7 usec */
if (bit & 1) {
r = locomo_readl(mapbase + LOCOMO_DAC);
r |= LOCOMO_DAC_SDAOEB;
locomo_writel(r, mapbase + LOCOMO_DAC);
udelay(DAC_HIGH_SETUP_TIME); /* 1000 nsec */
} else {
r = locomo_readl(mapbase + LOCOMO_DAC);
r &= ~(LOCOMO_DAC_SDAOEB);
locomo_writel(r, mapbase + LOCOMO_DAC);
udelay(DAC_LOW_SETUP_TIME); /* 300 nsec */
}
udelay(DAC_DATA_SETUP_TIME); /* 250 nsec */
r = locomo_readl(mapbase + LOCOMO_DAC);
r |= LOCOMO_DAC_SCLOEB;
locomo_writel(r, mapbase + LOCOMO_DAC);
udelay(DAC_HIGH_SETUP_TIME); /* 1000 nsec */
udelay(DAC_SCL_HIGH_HOLD_TIME); /* 4.0 usec */
}
void locomo_m62332_senddata(struct locomo_dev *ldev, unsigned int dac_data, int channel)
{
struct locomo *lchip = locomo_chip_driver(ldev);
int i;
unsigned char data;
unsigned int r;
void *mapbase = lchip->base;
unsigned long flags;
spin_lock_irqsave(&lchip->lock, flags);
/* Start */
udelay(DAC_BUS_FREE_TIME); /* 5.0 usec */
r = locomo_readl(mapbase + LOCOMO_DAC);
r |= LOCOMO_DAC_SCLOEB | LOCOMO_DAC_SDAOEB;
locomo_writel(r, mapbase + LOCOMO_DAC);
udelay(DAC_HIGH_SETUP_TIME); /* 1000 nsec */
udelay(DAC_SCL_HIGH_HOLD_TIME); /* 4.0 usec */
r = locomo_readl(mapbase + LOCOMO_DAC);
r &= ~(LOCOMO_DAC_SDAOEB);
locomo_writel(r, mapbase + LOCOMO_DAC);
udelay(DAC_START_HOLD_TIME); /* 5.0 usec */
udelay(DAC_DATA_HOLD_TIME); /* 300 nsec */
/* Send slave address and W bit (LSB is W bit) */
data = (M62332_SLAVE_ADDR << 1) | M62332_W_BIT;
for (i = 1; i <= 8; i++) {
locomo_m62332_sendbit(mapbase, data >> (8 - i));
}
/* Check A bit */
r = locomo_readl(mapbase + LOCOMO_DAC);
r &= ~(LOCOMO_DAC_SCLOEB);
locomo_writel(r, mapbase + LOCOMO_DAC);
udelay(DAC_LOW_SETUP_TIME); /* 300 nsec */
udelay(DAC_SCL_LOW_HOLD_TIME); /* 4.7 usec */
r = locomo_readl(mapbase + LOCOMO_DAC);
r &= ~(LOCOMO_DAC_SDAOEB);
locomo_writel(r, mapbase + LOCOMO_DAC);
udelay(DAC_LOW_SETUP_TIME); /* 300 nsec */
r = locomo_readl(mapbase + LOCOMO_DAC);
r |= LOCOMO_DAC_SCLOEB;
locomo_writel(r, mapbase + LOCOMO_DAC);
udelay(DAC_HIGH_SETUP_TIME); /* 1000 nsec */
udelay(DAC_SCL_HIGH_HOLD_TIME); /* 4.7 usec */
if (locomo_readl(mapbase + LOCOMO_DAC) & LOCOMO_DAC_SDAOEB) { /* High is error */
printk(KERN_WARNING "locomo: m62332_senddata Error 1\n");
goto out;
}
/* Send Sub address (LSB is channel select) */
/* channel = 0 : ch1 select */
/* = 1 : ch2 select */
data = M62332_SUB_ADDR + channel;
for (i = 1; i <= 8; i++) {
locomo_m62332_sendbit(mapbase, data >> (8 - i));
}
/* Check A bit */
r = locomo_readl(mapbase + LOCOMO_DAC);
r &= ~(LOCOMO_DAC_SCLOEB);
locomo_writel(r, mapbase + LOCOMO_DAC);
udelay(DAC_LOW_SETUP_TIME); /* 300 nsec */
udelay(DAC_SCL_LOW_HOLD_TIME); /* 4.7 usec */
r = locomo_readl(mapbase + LOCOMO_DAC);
r &= ~(LOCOMO_DAC_SDAOEB);
locomo_writel(r, mapbase + LOCOMO_DAC);
udelay(DAC_LOW_SETUP_TIME); /* 300 nsec */
r = locomo_readl(mapbase + LOCOMO_DAC);
r |= LOCOMO_DAC_SCLOEB;
locomo_writel(r, mapbase + LOCOMO_DAC);
udelay(DAC_HIGH_SETUP_TIME); /* 1000 nsec */
udelay(DAC_SCL_HIGH_HOLD_TIME); /* 4.7 usec */
if (locomo_readl(mapbase + LOCOMO_DAC) & LOCOMO_DAC_SDAOEB) { /* High is error */
printk(KERN_WARNING "locomo: m62332_senddata Error 2\n");
goto out;
}
/* Send DAC data */
for (i = 1; i <= 8; i++) {
locomo_m62332_sendbit(mapbase, dac_data >> (8 - i));
}
/* Check A bit */
r = locomo_readl(mapbase + LOCOMO_DAC);
r &= ~(LOCOMO_DAC_SCLOEB);
locomo_writel(r, mapbase + LOCOMO_DAC);
udelay(DAC_LOW_SETUP_TIME); /* 300 nsec */
udelay(DAC_SCL_LOW_HOLD_TIME); /* 4.7 usec */
r = locomo_readl(mapbase + LOCOMO_DAC);
r &= ~(LOCOMO_DAC_SDAOEB);
locomo_writel(r, mapbase + LOCOMO_DAC);
udelay(DAC_LOW_SETUP_TIME); /* 300 nsec */
r = locomo_readl(mapbase + LOCOMO_DAC);
r |= LOCOMO_DAC_SCLOEB;
locomo_writel(r, mapbase + LOCOMO_DAC);
udelay(DAC_HIGH_SETUP_TIME); /* 1000 nsec */
udelay(DAC_SCL_HIGH_HOLD_TIME); /* 4.7 usec */
if (locomo_readl(mapbase + LOCOMO_DAC) & LOCOMO_DAC_SDAOEB) { /* High is error */
printk(KERN_WARNING "locomo: m62332_senddata Error 3\n");
}
out:
/* stop */
r = locomo_readl(mapbase + LOCOMO_DAC);
r &= ~(LOCOMO_DAC_SCLOEB);
locomo_writel(r, mapbase + LOCOMO_DAC);
udelay(DAC_LOW_SETUP_TIME); /* 300 nsec */
udelay(DAC_SCL_LOW_HOLD_TIME); /* 4.7 usec */
r = locomo_readl(mapbase + LOCOMO_DAC);
r |= LOCOMO_DAC_SCLOEB;
locomo_writel(r, mapbase + LOCOMO_DAC);
udelay(DAC_HIGH_SETUP_TIME); /* 1000 nsec */
udelay(DAC_SCL_HIGH_HOLD_TIME); /* 4 usec */
r = locomo_readl(mapbase + LOCOMO_DAC);
r |= LOCOMO_DAC_SDAOEB;
locomo_writel(r, mapbase + LOCOMO_DAC);
udelay(DAC_HIGH_SETUP_TIME); /* 1000 nsec */
udelay(DAC_SCL_HIGH_HOLD_TIME); /* 4 usec */
r = locomo_readl(mapbase + LOCOMO_DAC);
r |= LOCOMO_DAC_SCLOEB | LOCOMO_DAC_SDAOEB;
locomo_writel(r, mapbase + LOCOMO_DAC);
udelay(DAC_LOW_SETUP_TIME); /* 1000 nsec */
udelay(DAC_SCL_LOW_HOLD_TIME); /* 4.7 usec */
spin_unlock_irqrestore(&lchip->lock, flags);
}
EXPORT_SYMBOL(locomo_m62332_senddata);
/*
* Frontlight control
*/
void locomo_frontlight_set(struct locomo_dev *dev, int duty, int vr, int bpwf)
{
unsigned long flags;
struct locomo *lchip = locomo_chip_driver(dev);
if (vr)
locomo_gpio_write(dev->dev.parent, LOCOMO_GPIO_FL_VR, 1);
else
locomo_gpio_write(dev->dev.parent, LOCOMO_GPIO_FL_VR, 0);
spin_lock_irqsave(&lchip->lock, flags);
locomo_writel(bpwf, lchip->base + LOCOMO_FRONTLIGHT + LOCOMO_ALS);
udelay(100);
locomo_writel(duty, lchip->base + LOCOMO_FRONTLIGHT + LOCOMO_ALD);
locomo_writel(bpwf | LOCOMO_ALC_EN, lchip->base + LOCOMO_FRONTLIGHT + LOCOMO_ALS);
spin_unlock_irqrestore(&lchip->lock, flags);
}
EXPORT_SYMBOL(locomo_frontlight_set);
/*
* LoCoMo "Register Access Bus."
*
* We model this as a regular bus type, and hang devices directly
* off this.
*/
static int locomo_match(struct device *_dev, struct device_driver *_drv)
{
struct locomo_dev *dev = LOCOMO_DEV(_dev);
struct locomo_driver *drv = LOCOMO_DRV(_drv);
return dev->devid == drv->devid;
}
static int locomo_bus_suspend(struct device *dev, pm_message_t state)
{
struct locomo_dev *ldev = LOCOMO_DEV(dev);
struct locomo_driver *drv = LOCOMO_DRV(dev->driver);
int ret = 0;
if (drv && drv->suspend)
ret = drv->suspend(ldev, state);
return ret;
}
static int locomo_bus_resume(struct device *dev)
{
struct locomo_dev *ldev = LOCOMO_DEV(dev);
struct locomo_driver *drv = LOCOMO_DRV(dev->driver);
int ret = 0;
if (drv && drv->resume)
ret = drv->resume(ldev);
return ret;
}
static int locomo_bus_probe(struct device *dev)
{
struct locomo_dev *ldev = LOCOMO_DEV(dev);
struct locomo_driver *drv = LOCOMO_DRV(dev->driver);
int ret = -ENODEV;
if (drv->probe)
ret = drv->probe(ldev);
return ret;
}
static int locomo_bus_remove(struct device *dev)
{
struct locomo_dev *ldev = LOCOMO_DEV(dev);
struct locomo_driver *drv = LOCOMO_DRV(dev->driver);
int ret = 0;
if (drv->remove)
ret = drv->remove(ldev);
return ret;
}
struct bus_type locomo_bus_type = {
.name = "locomo-bus",
.match = locomo_match,
.probe = locomo_bus_probe,
.remove = locomo_bus_remove,
.suspend = locomo_bus_suspend,
.resume = locomo_bus_resume,
};
int locomo_driver_register(struct locomo_driver *driver)
{
driver->drv.bus = &locomo_bus_type;
return driver_register(&driver->drv);
}
EXPORT_SYMBOL(locomo_driver_register);
void locomo_driver_unregister(struct locomo_driver *driver)
{
driver_unregister(&driver->drv);
}
EXPORT_SYMBOL(locomo_driver_unregister);
static int __init locomo_init(void)
{
int ret = bus_register(&locomo_bus_type);
if (ret == 0)
platform_driver_register(&locomo_device_driver);
return ret;
}
static void __exit locomo_exit(void)
{
platform_driver_unregister(&locomo_device_driver);
bus_unregister(&locomo_bus_type);
}
module_init(locomo_init);
module_exit(locomo_exit);
MODULE_DESCRIPTION("Sharp LoCoMo core driver");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("John Lenz <lenz@cs.wisc.edu>");

View file

@ -0,0 +1,348 @@
/*
* arch/arm/common/mcpm_entry.c -- entry point for multi-cluster PM
*
* Created by: Nicolas Pitre, March 2012
* Copyright: (C) 2012-2013 Linaro Limited
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/irqflags.h>
#include <linux/cpu_pm.h>
#include <asm/mcpm.h>
#include <asm/cacheflush.h>
#include <asm/idmap.h>
#include <asm/cputype.h>
#include <asm/suspend.h>
extern unsigned long mcpm_entry_vectors[MAX_NR_CLUSTERS][MAX_CPUS_PER_CLUSTER];
void mcpm_set_entry_vector(unsigned cpu, unsigned cluster, void *ptr)
{
unsigned long val = ptr ? virt_to_phys(ptr) : 0;
mcpm_entry_vectors[cluster][cpu] = val;
sync_cache_w(&mcpm_entry_vectors[cluster][cpu]);
}
extern unsigned long mcpm_entry_early_pokes[MAX_NR_CLUSTERS][MAX_CPUS_PER_CLUSTER][2];
void mcpm_set_early_poke(unsigned cpu, unsigned cluster,
unsigned long poke_phys_addr, unsigned long poke_val)
{
unsigned long *poke = &mcpm_entry_early_pokes[cluster][cpu][0];
poke[0] = poke_phys_addr;
poke[1] = poke_val;
__sync_cache_range_w(poke, 2 * sizeof(*poke));
}
static const struct mcpm_platform_ops *platform_ops;
int __init mcpm_platform_register(const struct mcpm_platform_ops *ops)
{
if (platform_ops)
return -EBUSY;
platform_ops = ops;
return 0;
}
bool mcpm_is_available(void)
{
return (platform_ops) ? true : false;
}
int mcpm_cpu_power_up(unsigned int cpu, unsigned int cluster)
{
if (!platform_ops)
return -EUNATCH; /* try not to shadow power_up errors */
might_sleep();
return platform_ops->power_up(cpu, cluster);
}
typedef void (*phys_reset_t)(unsigned long);
void mcpm_cpu_power_down(void)
{
phys_reset_t phys_reset;
if (WARN_ON_ONCE(!platform_ops || !platform_ops->power_down))
return;
BUG_ON(!irqs_disabled());
/*
* Do this before calling into the power_down method,
* as it might not always be safe to do afterwards.
*/
setup_mm_for_reboot();
platform_ops->power_down();
/*
* It is possible for a power_up request to happen concurrently
* with a power_down request for the same CPU. In this case the
* power_down method might not be able to actually enter a
* powered down state with the WFI instruction if the power_up
* method has removed the required reset condition. The
* power_down method is then allowed to return. We must perform
* a re-entry in the kernel as if the power_up method just had
* deasserted reset on the CPU.
*
* To simplify race issues, the platform specific implementation
* must accommodate for the possibility of unordered calls to
* power_down and power_up with a usage count. Therefore, if a
* call to power_up is issued for a CPU that is not down, then
* the next call to power_down must not attempt a full shutdown
* but only do the minimum (normally disabling L1 cache and CPU
* coherency) and return just as if a concurrent power_up request
* had happened as described above.
*/
phys_reset = (phys_reset_t)(unsigned long)virt_to_phys(cpu_reset);
phys_reset(virt_to_phys(mcpm_entry_point));
/* should never get here */
BUG();
}
int mcpm_wait_for_cpu_powerdown(unsigned int cpu, unsigned int cluster)
{
int ret;
if (WARN_ON_ONCE(!platform_ops || !platform_ops->wait_for_powerdown))
return -EUNATCH;
ret = platform_ops->wait_for_powerdown(cpu, cluster);
if (ret)
pr_warn("%s: cpu %u, cluster %u failed to power down (%d)\n",
__func__, cpu, cluster, ret);
return ret;
}
void mcpm_cpu_suspend(u64 expected_residency)
{
phys_reset_t phys_reset;
if (WARN_ON_ONCE(!platform_ops || !platform_ops->suspend))
return;
BUG_ON(!irqs_disabled());
/* Very similar to mcpm_cpu_power_down() */
setup_mm_for_reboot();
platform_ops->suspend(expected_residency);
phys_reset = (phys_reset_t)(unsigned long)virt_to_phys(cpu_reset);
phys_reset(virt_to_phys(mcpm_entry_point));
BUG();
}
int mcpm_cpu_powered_up(void)
{
if (!platform_ops)
return -EUNATCH;
if (platform_ops->powered_up)
platform_ops->powered_up();
return 0;
}
#ifdef CONFIG_ARM_CPU_SUSPEND
static int __init nocache_trampoline(unsigned long _arg)
{
void (*cache_disable)(void) = (void *)_arg;
unsigned int mpidr = read_cpuid_mpidr();
unsigned int cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
unsigned int cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
phys_reset_t phys_reset;
mcpm_set_entry_vector(cpu, cluster, cpu_resume);
setup_mm_for_reboot();
__mcpm_cpu_going_down(cpu, cluster);
BUG_ON(!__mcpm_outbound_enter_critical(cpu, cluster));
cache_disable();
__mcpm_outbound_leave_critical(cluster, CLUSTER_DOWN);
__mcpm_cpu_down(cpu, cluster);
phys_reset = (phys_reset_t)(unsigned long)virt_to_phys(cpu_reset);
phys_reset(virt_to_phys(mcpm_entry_point));
BUG();
}
int __init mcpm_loopback(void (*cache_disable)(void))
{
int ret;
/*
* We're going to soft-restart the current CPU through the
* low-level MCPM code by leveraging the suspend/resume
* infrastructure. Let's play it safe by using cpu_pm_enter()
* in case the CPU init code path resets the VFP or similar.
*/
local_irq_disable();
local_fiq_disable();
ret = cpu_pm_enter();
if (!ret) {
ret = cpu_suspend((unsigned long)cache_disable, nocache_trampoline);
cpu_pm_exit();
}
local_fiq_enable();
local_irq_enable();
if (ret)
pr_err("%s returned %d\n", __func__, ret);
return ret;
}
#endif
struct sync_struct mcpm_sync;
/*
* __mcpm_cpu_going_down: Indicates that the cpu is being torn down.
* This must be called at the point of committing to teardown of a CPU.
* The CPU cache (SCTRL.C bit) is expected to still be active.
*/
void __mcpm_cpu_going_down(unsigned int cpu, unsigned int cluster)
{
mcpm_sync.clusters[cluster].cpus[cpu].cpu = CPU_GOING_DOWN;
sync_cache_w(&mcpm_sync.clusters[cluster].cpus[cpu].cpu);
}
/*
* __mcpm_cpu_down: Indicates that cpu teardown is complete and that the
* cluster can be torn down without disrupting this CPU.
* To avoid deadlocks, this must be called before a CPU is powered down.
* The CPU cache (SCTRL.C bit) is expected to be off.
* However L2 cache might or might not be active.
*/
void __mcpm_cpu_down(unsigned int cpu, unsigned int cluster)
{
dmb();
mcpm_sync.clusters[cluster].cpus[cpu].cpu = CPU_DOWN;
sync_cache_w(&mcpm_sync.clusters[cluster].cpus[cpu].cpu);
sev();
}
/*
* __mcpm_outbound_leave_critical: Leave the cluster teardown critical section.
* @state: the final state of the cluster:
* CLUSTER_UP: no destructive teardown was done and the cluster has been
* restored to the previous state (CPU cache still active); or
* CLUSTER_DOWN: the cluster has been torn-down, ready for power-off
* (CPU cache disabled, L2 cache either enabled or disabled).
*/
void __mcpm_outbound_leave_critical(unsigned int cluster, int state)
{
dmb();
mcpm_sync.clusters[cluster].cluster = state;
sync_cache_w(&mcpm_sync.clusters[cluster].cluster);
sev();
}
/*
* __mcpm_outbound_enter_critical: Enter the cluster teardown critical section.
* This function should be called by the last man, after local CPU teardown
* is complete. CPU cache expected to be active.
*
* Returns:
* false: the critical section was not entered because an inbound CPU was
* observed, or the cluster is already being set up;
* true: the critical section was entered: it is now safe to tear down the
* cluster.
*/
bool __mcpm_outbound_enter_critical(unsigned int cpu, unsigned int cluster)
{
unsigned int i;
struct mcpm_sync_struct *c = &mcpm_sync.clusters[cluster];
/* Warn inbound CPUs that the cluster is being torn down: */
c->cluster = CLUSTER_GOING_DOWN;
sync_cache_w(&c->cluster);
/* Back out if the inbound cluster is already in the critical region: */
sync_cache_r(&c->inbound);
if (c->inbound == INBOUND_COMING_UP)
goto abort;
/*
* Wait for all CPUs to get out of the GOING_DOWN state, so that local
* teardown is complete on each CPU before tearing down the cluster.
*
* If any CPU has been woken up again from the DOWN state, then we
* shouldn't be taking the cluster down at all: abort in that case.
*/
sync_cache_r(&c->cpus);
for (i = 0; i < MAX_CPUS_PER_CLUSTER; i++) {
int cpustate;
if (i == cpu)
continue;
while (1) {
cpustate = c->cpus[i].cpu;
if (cpustate != CPU_GOING_DOWN)
break;
wfe();
sync_cache_r(&c->cpus[i].cpu);
}
switch (cpustate) {
case CPU_DOWN:
continue;
default:
goto abort;
}
}
return true;
abort:
__mcpm_outbound_leave_critical(cluster, CLUSTER_UP);
return false;
}
int __mcpm_cluster_state(unsigned int cluster)
{
sync_cache_r(&mcpm_sync.clusters[cluster].cluster);
return mcpm_sync.clusters[cluster].cluster;
}
extern unsigned long mcpm_power_up_setup_phys;
int __init mcpm_sync_init(
void (*power_up_setup)(unsigned int affinity_level))
{
unsigned int i, j, mpidr, this_cluster;
BUILD_BUG_ON(MCPM_SYNC_CLUSTER_SIZE * MAX_NR_CLUSTERS != sizeof mcpm_sync);
BUG_ON((unsigned long)&mcpm_sync & (__CACHE_WRITEBACK_GRANULE - 1));
/*
* Set initial CPU and cluster states.
* Only one cluster is assumed to be active at this point.
*/
for (i = 0; i < MAX_NR_CLUSTERS; i++) {
mcpm_sync.clusters[i].cluster = CLUSTER_DOWN;
mcpm_sync.clusters[i].inbound = INBOUND_NOT_COMING_UP;
for (j = 0; j < MAX_CPUS_PER_CLUSTER; j++)
mcpm_sync.clusters[i].cpus[j].cpu = CPU_DOWN;
}
mpidr = read_cpuid_mpidr();
this_cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
for_each_online_cpu(i)
mcpm_sync.clusters[this_cluster].cpus[i].cpu = CPU_UP;
mcpm_sync.clusters[this_cluster].cluster = CLUSTER_UP;
sync_cache_w(&mcpm_sync);
if (power_up_setup) {
mcpm_power_up_setup_phys = virt_to_phys(power_up_setup);
sync_cache_w(&mcpm_power_up_setup_phys);
}
return 0;
}

233
arch/arm/common/mcpm_head.S Normal file
View file

@ -0,0 +1,233 @@
/*
* arch/arm/common/mcpm_head.S -- kernel entry point for multi-cluster PM
*
* Created by: Nicolas Pitre, March 2012
* Copyright: (C) 2012-2013 Linaro Limited
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
*
* Refer to Documentation/arm/cluster-pm-race-avoidance.txt
* for details of the synchronisation algorithms used here.
*/
#include <linux/linkage.h>
#include <asm/mcpm.h>
#include <asm/assembler.h>
#include "vlock.h"
.if MCPM_SYNC_CLUSTER_CPUS
.error "cpus must be the first member of struct mcpm_sync_struct"
.endif
.macro pr_dbg string
#if defined(CONFIG_DEBUG_LL) && defined(DEBUG)
b 1901f
1902: .asciz "CPU"
1903: .asciz " cluster"
1904: .asciz ": \string"
.align
1901: adr r0, 1902b
bl printascii
mov r0, r9
bl printhex2
adr r0, 1903b
bl printascii
mov r0, r10
bl printhex2
adr r0, 1904b
bl printascii
#endif
.endm
.arm
.align
ENTRY(mcpm_entry_point)
ARM_BE8(setend be)
THUMB( adr r12, BSYM(1f) )
THUMB( bx r12 )
THUMB( .thumb )
1:
mrc p15, 0, r0, c0, c0, 5 @ MPIDR
ubfx r9, r0, #0, #8 @ r9 = cpu
ubfx r10, r0, #8, #8 @ r10 = cluster
mov r3, #MAX_CPUS_PER_CLUSTER
mla r4, r3, r10, r9 @ r4 = canonical CPU index
cmp r4, #(MAX_CPUS_PER_CLUSTER * MAX_NR_CLUSTERS)
blo 2f
/* We didn't expect this CPU. Try to cheaply make it quiet. */
1: wfi
wfe
b 1b
2: pr_dbg "kernel mcpm_entry_point\n"
/*
* MMU is off so we need to get to various variables in a
* position independent way.
*/
adr r5, 3f
ldmia r5, {r0, r6, r7, r8, r11}
add r0, r5, r0 @ r0 = mcpm_entry_early_pokes
add r6, r5, r6 @ r6 = mcpm_entry_vectors
ldr r7, [r5, r7] @ r7 = mcpm_power_up_setup_phys
add r8, r5, r8 @ r8 = mcpm_sync
add r11, r5, r11 @ r11 = first_man_locks
@ Perform an early poke, if any
add r0, r0, r4, lsl #3
ldmia r0, {r0, r1}
teq r0, #0
strne r1, [r0]
mov r0, #MCPM_SYNC_CLUSTER_SIZE
mla r8, r0, r10, r8 @ r8 = sync cluster base
@ Signal that this CPU is coming UP:
mov r0, #CPU_COMING_UP
mov r5, #MCPM_SYNC_CPU_SIZE
mla r5, r9, r5, r8 @ r5 = sync cpu address
strb r0, [r5]
@ At this point, the cluster cannot unexpectedly enter the GOING_DOWN
@ state, because there is at least one active CPU (this CPU).
mov r0, #VLOCK_SIZE
mla r11, r0, r10, r11 @ r11 = cluster first man lock
mov r0, r11
mov r1, r9 @ cpu
bl vlock_trylock @ implies DMB
cmp r0, #0 @ failed to get the lock?
bne mcpm_setup_wait @ wait for cluster setup if so
ldrb r0, [r8, #MCPM_SYNC_CLUSTER_CLUSTER]
cmp r0, #CLUSTER_UP @ cluster already up?
bne mcpm_setup @ if not, set up the cluster
@ Otherwise, release the first man lock and skip setup:
mov r0, r11
bl vlock_unlock
b mcpm_setup_complete
mcpm_setup:
@ Control dependency implies strb not observable before previous ldrb.
@ Signal that the cluster is being brought up:
mov r0, #INBOUND_COMING_UP
strb r0, [r8, #MCPM_SYNC_CLUSTER_INBOUND]
dmb
@ Any CPU trying to take the cluster into CLUSTER_GOING_DOWN from this
@ point onwards will observe INBOUND_COMING_UP and abort.
@ Wait for any previously-pending cluster teardown operations to abort
@ or complete:
mcpm_teardown_wait:
ldrb r0, [r8, #MCPM_SYNC_CLUSTER_CLUSTER]
cmp r0, #CLUSTER_GOING_DOWN
bne first_man_setup
wfe
b mcpm_teardown_wait
first_man_setup:
dmb
@ If the outbound gave up before teardown started, skip cluster setup:
cmp r0, #CLUSTER_UP
beq mcpm_setup_leave
@ power_up_setup is now responsible for setting up the cluster:
cmp r7, #0
mov r0, #1 @ second (cluster) affinity level
blxne r7 @ Call power_up_setup if defined
dmb
mov r0, #CLUSTER_UP
strb r0, [r8, #MCPM_SYNC_CLUSTER_CLUSTER]
dmb
mcpm_setup_leave:
@ Leave the cluster setup critical section:
mov r0, #INBOUND_NOT_COMING_UP
strb r0, [r8, #MCPM_SYNC_CLUSTER_INBOUND]
dsb st
sev
mov r0, r11
bl vlock_unlock @ implies DMB
b mcpm_setup_complete
@ In the contended case, non-first men wait here for cluster setup
@ to complete:
mcpm_setup_wait:
ldrb r0, [r8, #MCPM_SYNC_CLUSTER_CLUSTER]
cmp r0, #CLUSTER_UP
wfene
bne mcpm_setup_wait
dmb
mcpm_setup_complete:
@ If a platform-specific CPU setup hook is needed, it is
@ called from here.
cmp r7, #0
mov r0, #0 @ first (CPU) affinity level
blxne r7 @ Call power_up_setup if defined
dmb
@ Mark the CPU as up:
mov r0, #CPU_UP
strb r0, [r5]
@ Observability order of CPU_UP and opening of the gate does not matter.
mcpm_entry_gated:
ldr r5, [r6, r4, lsl #2] @ r5 = CPU entry vector
cmp r5, #0
wfeeq
beq mcpm_entry_gated
dmb
pr_dbg "released\n"
bx r5
.align 2
3: .word mcpm_entry_early_pokes - .
.word mcpm_entry_vectors - 3b
.word mcpm_power_up_setup_phys - 3b
.word mcpm_sync - 3b
.word first_man_locks - 3b
ENDPROC(mcpm_entry_point)
.bss
.align CACHE_WRITEBACK_ORDER
.type first_man_locks, #object
first_man_locks:
.space VLOCK_SIZE * MAX_NR_CLUSTERS
.align CACHE_WRITEBACK_ORDER
.type mcpm_entry_vectors, #object
ENTRY(mcpm_entry_vectors)
.space 4 * MAX_NR_CLUSTERS * MAX_CPUS_PER_CLUSTER
.type mcpm_entry_early_pokes, #object
ENTRY(mcpm_entry_early_pokes)
.space 8 * MAX_NR_CLUSTERS * MAX_CPUS_PER_CLUSTER
.type mcpm_power_up_setup_phys, #object
ENTRY(mcpm_power_up_setup_phys)
.space 4 @ set by mcpm_sync_init()

View file

@ -0,0 +1,103 @@
/*
* linux/arch/arm/mach-vexpress/mcpm_platsmp.c
*
* Created by: Nicolas Pitre, November 2012
* Copyright: (C) 2012-2013 Linaro Limited
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* Code to handle secondary CPU bringup and hotplug for the cluster power API.
*/
#include <linux/init.h>
#include <linux/smp.h>
#include <linux/spinlock.h>
#include <asm/mcpm.h>
#include <asm/smp.h>
#include <asm/smp_plat.h>
static void cpu_to_pcpu(unsigned int cpu,
unsigned int *pcpu, unsigned int *pcluster)
{
unsigned int mpidr;
mpidr = cpu_logical_map(cpu);
*pcpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
*pcluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
}
static int mcpm_boot_secondary(unsigned int cpu, struct task_struct *idle)
{
unsigned int pcpu, pcluster, ret;
extern void secondary_startup(void);
cpu_to_pcpu(cpu, &pcpu, &pcluster);
pr_debug("%s: logical CPU %d is physical CPU %d cluster %d\n",
__func__, cpu, pcpu, pcluster);
mcpm_set_entry_vector(pcpu, pcluster, NULL);
ret = mcpm_cpu_power_up(pcpu, pcluster);
if (ret)
return ret;
mcpm_set_entry_vector(pcpu, pcluster, secondary_startup);
arch_send_wakeup_ipi_mask(cpumask_of(cpu));
dsb_sev();
return 0;
}
static void mcpm_secondary_init(unsigned int cpu)
{
mcpm_cpu_powered_up();
}
#ifdef CONFIG_HOTPLUG_CPU
static int mcpm_cpu_kill(unsigned int cpu)
{
unsigned int pcpu, pcluster;
cpu_to_pcpu(cpu, &pcpu, &pcluster);
return !mcpm_wait_for_cpu_powerdown(pcpu, pcluster);
}
static int mcpm_cpu_disable(unsigned int cpu)
{
/*
* We assume all CPUs may be shut down.
* This would be the hook to use for eventual Secure
* OS migration requests as described in the PSCI spec.
*/
return 0;
}
static void mcpm_cpu_die(unsigned int cpu)
{
unsigned int mpidr, pcpu, pcluster;
mpidr = read_cpuid_mpidr();
pcpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
pcluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
mcpm_set_entry_vector(pcpu, pcluster, NULL);
mcpm_cpu_power_down();
}
#endif
static struct smp_operations __initdata mcpm_smp_ops = {
.smp_boot_secondary = mcpm_boot_secondary,
.smp_secondary_init = mcpm_secondary_init,
#ifdef CONFIG_HOTPLUG_CPU
.cpu_kill = mcpm_cpu_kill,
.cpu_disable = mcpm_cpu_disable,
.cpu_die = mcpm_cpu_die,
#endif
};
void __init mcpm_smp_set_ops(void)
{
smp_set_ops(&mcpm_smp_ops);
}

1458
arch/arm/common/sa1111.c Normal file

File diff suppressed because it is too large Load diff

275
arch/arm/common/scoop.c Normal file
View file

@ -0,0 +1,275 @@
/*
* Support code for the SCOOP interface found on various Sharp PDAs
*
* Copyright (c) 2004 Richard Purdie
*
* Based on code written by Sharp/Lineo for 2.4 kernels
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
*/
#include <linux/device.h>
#include <linux/gpio.h>
#include <linux/string.h>
#include <linux/slab.h>
#include <linux/platform_device.h>
#include <linux/export.h>
#include <linux/io.h>
#include <asm/hardware/scoop.h>
/* PCMCIA to Scoop linkage
There is no easy way to link multiple scoop devices into one
single entity for the pxa2xx_pcmcia device so this structure
is used which is setup by the platform code.
This file is never modular so this symbol is always
accessile to the board support files.
*/
struct scoop_pcmcia_config *platform_scoop_config;
EXPORT_SYMBOL(platform_scoop_config);
struct scoop_dev {
void __iomem *base;
struct gpio_chip gpio;
spinlock_t scoop_lock;
unsigned short suspend_clr;
unsigned short suspend_set;
u32 scoop_gpwr;
};
void reset_scoop(struct device *dev)
{
struct scoop_dev *sdev = dev_get_drvdata(dev);
iowrite16(0x0100, sdev->base + SCOOP_MCR); /* 00 */
iowrite16(0x0000, sdev->base + SCOOP_CDR); /* 04 */
iowrite16(0x0000, sdev->base + SCOOP_CCR); /* 10 */
iowrite16(0x0000, sdev->base + SCOOP_IMR); /* 18 */
iowrite16(0x00FF, sdev->base + SCOOP_IRM); /* 14 */
iowrite16(0x0000, sdev->base + SCOOP_ISR); /* 1C */
iowrite16(0x0000, sdev->base + SCOOP_IRM);
}
static void __scoop_gpio_set(struct scoop_dev *sdev,
unsigned offset, int value)
{
unsigned short gpwr;
gpwr = ioread16(sdev->base + SCOOP_GPWR);
if (value)
gpwr |= 1 << (offset + 1);
else
gpwr &= ~(1 << (offset + 1));
iowrite16(gpwr, sdev->base + SCOOP_GPWR);
}
static void scoop_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
{
struct scoop_dev *sdev = container_of(chip, struct scoop_dev, gpio);
unsigned long flags;
spin_lock_irqsave(&sdev->scoop_lock, flags);
__scoop_gpio_set(sdev, offset, value);
spin_unlock_irqrestore(&sdev->scoop_lock, flags);
}
static int scoop_gpio_get(struct gpio_chip *chip, unsigned offset)
{
struct scoop_dev *sdev = container_of(chip, struct scoop_dev, gpio);
/* XXX: I'm unsure, but it seems so */
return ioread16(sdev->base + SCOOP_GPRR) & (1 << (offset + 1));
}
static int scoop_gpio_direction_input(struct gpio_chip *chip,
unsigned offset)
{
struct scoop_dev *sdev = container_of(chip, struct scoop_dev, gpio);
unsigned long flags;
unsigned short gpcr;
spin_lock_irqsave(&sdev->scoop_lock, flags);
gpcr = ioread16(sdev->base + SCOOP_GPCR);
gpcr &= ~(1 << (offset + 1));
iowrite16(gpcr, sdev->base + SCOOP_GPCR);
spin_unlock_irqrestore(&sdev->scoop_lock, flags);
return 0;
}
static int scoop_gpio_direction_output(struct gpio_chip *chip,
unsigned offset, int value)
{
struct scoop_dev *sdev = container_of(chip, struct scoop_dev, gpio);
unsigned long flags;
unsigned short gpcr;
spin_lock_irqsave(&sdev->scoop_lock, flags);
__scoop_gpio_set(sdev, offset, value);
gpcr = ioread16(sdev->base + SCOOP_GPCR);
gpcr |= 1 << (offset + 1);
iowrite16(gpcr, sdev->base + SCOOP_GPCR);
spin_unlock_irqrestore(&sdev->scoop_lock, flags);
return 0;
}
unsigned short read_scoop_reg(struct device *dev, unsigned short reg)
{
struct scoop_dev *sdev = dev_get_drvdata(dev);
return ioread16(sdev->base + reg);
}
void write_scoop_reg(struct device *dev, unsigned short reg, unsigned short data)
{
struct scoop_dev *sdev = dev_get_drvdata(dev);
iowrite16(data, sdev->base + reg);
}
EXPORT_SYMBOL(reset_scoop);
EXPORT_SYMBOL(read_scoop_reg);
EXPORT_SYMBOL(write_scoop_reg);
#ifdef CONFIG_PM
static void check_scoop_reg(struct scoop_dev *sdev)
{
unsigned short mcr;
mcr = ioread16(sdev->base + SCOOP_MCR);
if ((mcr & 0x100) == 0)
iowrite16(0x0101, sdev->base + SCOOP_MCR);
}
static int scoop_suspend(struct platform_device *dev, pm_message_t state)
{
struct scoop_dev *sdev = platform_get_drvdata(dev);
check_scoop_reg(sdev);
sdev->scoop_gpwr = ioread16(sdev->base + SCOOP_GPWR);
iowrite16((sdev->scoop_gpwr & ~sdev->suspend_clr) | sdev->suspend_set, sdev->base + SCOOP_GPWR);
return 0;
}
static int scoop_resume(struct platform_device *dev)
{
struct scoop_dev *sdev = platform_get_drvdata(dev);
check_scoop_reg(sdev);
iowrite16(sdev->scoop_gpwr, sdev->base + SCOOP_GPWR);
return 0;
}
#else
#define scoop_suspend NULL
#define scoop_resume NULL
#endif
static int scoop_probe(struct platform_device *pdev)
{
struct scoop_dev *devptr;
struct scoop_config *inf;
struct resource *mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
int ret;
if (!mem)
return -EINVAL;
devptr = kzalloc(sizeof(struct scoop_dev), GFP_KERNEL);
if (!devptr)
return -ENOMEM;
spin_lock_init(&devptr->scoop_lock);
inf = pdev->dev.platform_data;
devptr->base = ioremap(mem->start, resource_size(mem));
if (!devptr->base) {
ret = -ENOMEM;
goto err_ioremap;
}
platform_set_drvdata(pdev, devptr);
printk("Sharp Scoop Device found at 0x%08x -> 0x%8p\n",(unsigned int)mem->start, devptr->base);
iowrite16(0x0140, devptr->base + SCOOP_MCR);
reset_scoop(&pdev->dev);
iowrite16(0x0000, devptr->base + SCOOP_CPR);
iowrite16(inf->io_dir & 0xffff, devptr->base + SCOOP_GPCR);
iowrite16(inf->io_out & 0xffff, devptr->base + SCOOP_GPWR);
devptr->suspend_clr = inf->suspend_clr;
devptr->suspend_set = inf->suspend_set;
devptr->gpio.base = -1;
if (inf->gpio_base != 0) {
devptr->gpio.label = dev_name(&pdev->dev);
devptr->gpio.base = inf->gpio_base;
devptr->gpio.ngpio = 12; /* PA11 = 0, PA12 = 1, etc. up to PA22 = 11 */
devptr->gpio.set = scoop_gpio_set;
devptr->gpio.get = scoop_gpio_get;
devptr->gpio.direction_input = scoop_gpio_direction_input;
devptr->gpio.direction_output = scoop_gpio_direction_output;
ret = gpiochip_add(&devptr->gpio);
if (ret)
goto err_gpio;
}
return 0;
err_gpio:
platform_set_drvdata(pdev, NULL);
err_ioremap:
iounmap(devptr->base);
kfree(devptr);
return ret;
}
static int scoop_remove(struct platform_device *pdev)
{
struct scoop_dev *sdev = platform_get_drvdata(pdev);
if (!sdev)
return -EINVAL;
if (sdev->gpio.base != -1)
gpiochip_remove(&sdev->gpio);
platform_set_drvdata(pdev, NULL);
iounmap(sdev->base);
kfree(sdev);
return 0;
}
static struct platform_driver scoop_driver = {
.probe = scoop_probe,
.remove = scoop_remove,
.suspend = scoop_suspend,
.resume = scoop_resume,
.driver = {
.name = "sharp-scoop",
},
};
static int __init scoop_init(void)
{
return platform_driver_register(&scoop_driver);
}
subsys_initcall(scoop_init);

View file

@ -0,0 +1,65 @@
/*
* Hardware parameter area specific to Sharp SL series devices
*
* Copyright (c) 2005 Richard Purdie
*
* Based on Sharp's 2.4 kernel patches
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/string.h>
#include <asm/mach/sharpsl_param.h>
#include <asm/memory.h>
/*
* Certain hardware parameters determined at the time of device manufacture,
* typically including LCD parameters are loaded by the bootloader at the
* address PARAM_BASE. As the kernel will overwrite them, we need to store
* them early in the boot process, then pass them to the appropriate drivers.
* Not all devices use all parameters but the format is common to all.
*/
#ifdef CONFIG_ARCH_SA1100
#define PARAM_BASE 0xe8ffc000
#define param_start(x) (void *)(x)
#else
#define PARAM_BASE 0xa0000a00
#define param_start(x) __va(x)
#endif
#define MAGIC_CHG(a,b,c,d) ( ( d << 24 ) | ( c << 16 ) | ( b << 8 ) | a )
#define COMADJ_MAGIC MAGIC_CHG('C','M','A','D')
#define UUID_MAGIC MAGIC_CHG('U','U','I','D')
#define TOUCH_MAGIC MAGIC_CHG('T','U','C','H')
#define AD_MAGIC MAGIC_CHG('B','V','A','D')
#define PHAD_MAGIC MAGIC_CHG('P','H','A','D')
struct sharpsl_param_info sharpsl_param;
EXPORT_SYMBOL(sharpsl_param);
void sharpsl_save_param(void)
{
memcpy(&sharpsl_param, param_start(PARAM_BASE), sizeof(struct sharpsl_param_info));
if (sharpsl_param.comadj_keyword != COMADJ_MAGIC)
sharpsl_param.comadj=-1;
if (sharpsl_param.phad_keyword != PHAD_MAGIC)
sharpsl_param.phadadj=-1;
if (sharpsl_param.uuid_keyword != UUID_MAGIC)
sharpsl_param.uuid[0]=-1;
if (sharpsl_param.touch_keyword != TOUCH_MAGIC)
sharpsl_param.touch_xp=-1;
if (sharpsl_param.adadj_keyword != AD_MAGIC)
sharpsl_param.adadj=-1;
}

304
arch/arm/common/timer-sp.c Normal file
View file

@ -0,0 +1,304 @@
/*
* linux/arch/arm/common/timer-sp.c
*
* Copyright (C) 1999 - 2003 ARM Limited
* Copyright (C) 2000 Deep Blue Solutions Ltd
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/clk.h>
#include <linux/clocksource.h>
#include <linux/clockchips.h>
#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/sched_clock.h>
#include <asm/hardware/arm_timer.h>
#include <asm/hardware/timer-sp.h>
static long __init sp804_get_clock_rate(struct clk *clk)
{
long rate;
int err;
err = clk_prepare(clk);
if (err) {
pr_err("sp804: clock failed to prepare: %d\n", err);
clk_put(clk);
return err;
}
err = clk_enable(clk);
if (err) {
pr_err("sp804: clock failed to enable: %d\n", err);
clk_unprepare(clk);
clk_put(clk);
return err;
}
rate = clk_get_rate(clk);
if (rate < 0) {
pr_err("sp804: clock failed to get rate: %ld\n", rate);
clk_disable(clk);
clk_unprepare(clk);
clk_put(clk);
}
return rate;
}
static void __iomem *sched_clock_base;
static u64 notrace sp804_read(void)
{
return ~readl_relaxed(sched_clock_base + TIMER_VALUE);
}
void __init __sp804_clocksource_and_sched_clock_init(void __iomem *base,
const char *name,
struct clk *clk,
int use_sched_clock)
{
long rate;
if (!clk) {
clk = clk_get_sys("sp804", name);
if (IS_ERR(clk)) {
pr_err("sp804: clock not found: %d\n",
(int)PTR_ERR(clk));
return;
}
}
rate = sp804_get_clock_rate(clk);
if (rate < 0)
return;
/* setup timer 0 as free-running clocksource */
writel(0, base + TIMER_CTRL);
writel(0xffffffff, base + TIMER_LOAD);
writel(0xffffffff, base + TIMER_VALUE);
writel(TIMER_CTRL_32BIT | TIMER_CTRL_ENABLE | TIMER_CTRL_PERIODIC,
base + TIMER_CTRL);
clocksource_mmio_init(base + TIMER_VALUE, name,
rate, 200, 32, clocksource_mmio_readl_down);
if (use_sched_clock) {
sched_clock_base = base;
sched_clock_register(sp804_read, 32, rate);
}
}
static void __iomem *clkevt_base;
static unsigned long clkevt_reload;
/*
* IRQ handler for the timer
*/
static irqreturn_t sp804_timer_interrupt(int irq, void *dev_id)
{
struct clock_event_device *evt = dev_id;
/* clear the interrupt */
writel(1, clkevt_base + TIMER_INTCLR);
evt->event_handler(evt);
return IRQ_HANDLED;
}
static void sp804_set_mode(enum clock_event_mode mode,
struct clock_event_device *evt)
{
unsigned long ctrl = TIMER_CTRL_32BIT | TIMER_CTRL_IE;
writel(ctrl, clkevt_base + TIMER_CTRL);
switch (mode) {
case CLOCK_EVT_MODE_PERIODIC:
writel(clkevt_reload, clkevt_base + TIMER_LOAD);
ctrl |= TIMER_CTRL_PERIODIC | TIMER_CTRL_ENABLE;
break;
case CLOCK_EVT_MODE_ONESHOT:
/* period set, and timer enabled in 'next_event' hook */
ctrl |= TIMER_CTRL_ONESHOT;
break;
case CLOCK_EVT_MODE_UNUSED:
case CLOCK_EVT_MODE_SHUTDOWN:
default:
break;
}
writel(ctrl, clkevt_base + TIMER_CTRL);
}
static int sp804_set_next_event(unsigned long next,
struct clock_event_device *evt)
{
unsigned long ctrl = readl(clkevt_base + TIMER_CTRL);
writel(next, clkevt_base + TIMER_LOAD);
writel(ctrl | TIMER_CTRL_ENABLE, clkevt_base + TIMER_CTRL);
return 0;
}
static struct clock_event_device sp804_clockevent = {
.features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT |
CLOCK_EVT_FEAT_DYNIRQ,
.set_mode = sp804_set_mode,
.set_next_event = sp804_set_next_event,
.rating = 300,
};
static struct irqaction sp804_timer_irq = {
.name = "timer",
.flags = IRQF_TIMER | IRQF_IRQPOLL,
.handler = sp804_timer_interrupt,
.dev_id = &sp804_clockevent,
};
void __init __sp804_clockevents_init(void __iomem *base, unsigned int irq, struct clk *clk, const char *name)
{
struct clock_event_device *evt = &sp804_clockevent;
long rate;
if (!clk)
clk = clk_get_sys("sp804", name);
if (IS_ERR(clk)) {
pr_err("sp804: %s clock not found: %d\n", name,
(int)PTR_ERR(clk));
return;
}
rate = sp804_get_clock_rate(clk);
if (rate < 0)
return;
clkevt_base = base;
clkevt_reload = DIV_ROUND_CLOSEST(rate, HZ);
evt->name = name;
evt->irq = irq;
evt->cpumask = cpu_possible_mask;
writel(0, base + TIMER_CTRL);
setup_irq(irq, &sp804_timer_irq);
clockevents_config_and_register(evt, rate, 0xf, 0xffffffff);
}
static void __init sp804_of_init(struct device_node *np)
{
static bool initialized = false;
void __iomem *base;
int irq;
u32 irq_num = 0;
struct clk *clk1, *clk2;
const char *name = of_get_property(np, "compatible", NULL);
base = of_iomap(np, 0);
if (WARN_ON(!base))
return;
/* Ensure timers are disabled */
writel(0, base + TIMER_CTRL);
writel(0, base + TIMER_2_BASE + TIMER_CTRL);
if (initialized || !of_device_is_available(np))
goto err;
clk1 = of_clk_get(np, 0);
if (IS_ERR(clk1))
clk1 = NULL;
/* Get the 2nd clock if the timer has 3 timer clocks */
if (of_count_phandle_with_args(np, "clocks", "#clock-cells") == 3) {
clk2 = of_clk_get(np, 1);
if (IS_ERR(clk2)) {
pr_err("sp804: %s clock not found: %d\n", np->name,
(int)PTR_ERR(clk2));
clk2 = NULL;
}
} else
clk2 = clk1;
irq = irq_of_parse_and_map(np, 0);
if (irq <= 0)
goto err;
of_property_read_u32(np, "arm,sp804-has-irq", &irq_num);
if (irq_num == 2) {
__sp804_clockevents_init(base + TIMER_2_BASE, irq, clk2, name);
__sp804_clocksource_and_sched_clock_init(base, name, clk1, 1);
} else {
__sp804_clockevents_init(base, irq, clk1 , name);
__sp804_clocksource_and_sched_clock_init(base + TIMER_2_BASE,
name, clk2, 1);
}
initialized = true;
return;
err:
iounmap(base);
}
CLOCKSOURCE_OF_DECLARE(sp804, "arm,sp804", sp804_of_init);
static void __init integrator_cp_of_init(struct device_node *np)
{
static int init_count = 0;
void __iomem *base;
int irq;
const char *name = of_get_property(np, "compatible", NULL);
struct clk *clk;
base = of_iomap(np, 0);
if (WARN_ON(!base))
return;
clk = of_clk_get(np, 0);
if (WARN_ON(IS_ERR(clk)))
return;
/* Ensure timer is disabled */
writel(0, base + TIMER_CTRL);
if (init_count == 2 || !of_device_is_available(np))
goto err;
if (!init_count)
__sp804_clocksource_and_sched_clock_init(base, name, clk, 0);
else {
irq = irq_of_parse_and_map(np, 0);
if (irq <= 0)
goto err;
__sp804_clockevents_init(base, irq, clk, name);
}
init_count++;
return;
err:
iounmap(base);
}
CLOCKSOURCE_OF_DECLARE(intcp, "arm,integrator-cp-timer", integrator_cp_of_init);

108
arch/arm/common/vlock.S Normal file
View file

@ -0,0 +1,108 @@
/*
* vlock.S - simple voting lock implementation for ARM
*
* Created by: Dave Martin, 2012-08-16
* Copyright: (C) 2012-2013 Linaro Limited
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*
* This algorithm is described in more detail in
* Documentation/arm/vlocks.txt.
*/
#include <linux/linkage.h>
#include "vlock.h"
/* Select different code if voting flags can fit in a single word. */
#if VLOCK_VOTING_SIZE > 4
#define FEW(x...)
#define MANY(x...) x
#else
#define FEW(x...) x
#define MANY(x...)
#endif
@ voting lock for first-man coordination
.macro voting_begin rbase:req, rcpu:req, rscratch:req
mov \rscratch, #1
strb \rscratch, [\rbase, \rcpu]
dmb
.endm
.macro voting_end rbase:req, rcpu:req, rscratch:req
dmb
mov \rscratch, #0
strb \rscratch, [\rbase, \rcpu]
dsb st
sev
.endm
/*
* The vlock structure must reside in Strongly-Ordered or Device memory.
* This implementation deliberately eliminates most of the barriers which
* would be required for other memory types, and assumes that independent
* writes to neighbouring locations within a cacheline do not interfere
* with one another.
*/
@ r0: lock structure base
@ r1: CPU ID (0-based index within cluster)
ENTRY(vlock_trylock)
add r1, r1, #VLOCK_VOTING_OFFSET
voting_begin r0, r1, r2
ldrb r2, [r0, #VLOCK_OWNER_OFFSET] @ check whether lock is held
cmp r2, #VLOCK_OWNER_NONE
bne trylock_fail @ fail if so
@ Control dependency implies strb not observable before previous ldrb.
strb r1, [r0, #VLOCK_OWNER_OFFSET] @ submit my vote
voting_end r0, r1, r2 @ implies DMB
@ Wait for the current round of voting to finish:
MANY( mov r3, #VLOCK_VOTING_OFFSET )
0:
MANY( ldr r2, [r0, r3] )
FEW( ldr r2, [r0, #VLOCK_VOTING_OFFSET] )
cmp r2, #0
wfene
bne 0b
MANY( add r3, r3, #4 )
MANY( cmp r3, #VLOCK_VOTING_OFFSET + VLOCK_VOTING_SIZE )
MANY( bne 0b )
@ Check who won:
dmb
ldrb r2, [r0, #VLOCK_OWNER_OFFSET]
eor r0, r1, r2 @ zero if I won, else nonzero
bx lr
trylock_fail:
voting_end r0, r1, r2
mov r0, #1 @ nonzero indicates that I lost
bx lr
ENDPROC(vlock_trylock)
@ r0: lock structure base
ENTRY(vlock_unlock)
dmb
mov r1, #VLOCK_OWNER_NONE
strb r1, [r0, #VLOCK_OWNER_OFFSET]
dsb st
sev
bx lr
ENDPROC(vlock_unlock)

29
arch/arm/common/vlock.h Normal file
View file

@ -0,0 +1,29 @@
/*
* vlock.h - simple voting lock implementation
*
* Created by: Dave Martin, 2012-08-16
* Copyright: (C) 2012-2013 Linaro Limited
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef __VLOCK_H
#define __VLOCK_H
#include <asm/mcpm.h>
/* Offsets and sizes are rounded to a word (4 bytes) */
#define VLOCK_OWNER_OFFSET 0
#define VLOCK_VOTING_OFFSET 4
#define VLOCK_VOTING_SIZE ((MAX_CPUS_PER_CLUSTER + 3) / 4 * 4)
#define VLOCK_SIZE (VLOCK_VOTING_OFFSET + VLOCK_VOTING_SIZE)
#define VLOCK_OWNER_NONE 0
#endif /* ! __VLOCK_H */