mirror of
https://github.com/AetherDroid/android_kernel_samsung_on5xelte.git
synced 2025-09-09 01:28:05 -04:00
Fixed MTP to work with TWRP
This commit is contained in:
commit
f6dfaef42e
50820 changed files with 20846062 additions and 0 deletions
88
kernel/irq/Kconfig
Normal file
88
kernel/irq/Kconfig
Normal file
|
@ -0,0 +1,88 @@
|
|||
menu "IRQ subsystem"
|
||||
# Options selectable by the architecture code
|
||||
|
||||
# Make sparse irq Kconfig switch below available
|
||||
config MAY_HAVE_SPARSE_IRQ
|
||||
bool
|
||||
|
||||
# Legacy support, required for itanic
|
||||
config GENERIC_IRQ_LEGACY
|
||||
bool
|
||||
|
||||
# Enable the generic irq autoprobe mechanism
|
||||
config GENERIC_IRQ_PROBE
|
||||
bool
|
||||
|
||||
# Use the generic /proc/interrupts implementation
|
||||
config GENERIC_IRQ_SHOW
|
||||
bool
|
||||
|
||||
# Print level/edge extra information
|
||||
config GENERIC_IRQ_SHOW_LEVEL
|
||||
bool
|
||||
|
||||
# Facility to allocate a hardware interrupt. This is legacy support
|
||||
# and should not be used in new code. Use irq domains instead.
|
||||
config GENERIC_IRQ_LEGACY_ALLOC_HWIRQ
|
||||
bool
|
||||
|
||||
# Support for delayed migration from interrupt context
|
||||
config GENERIC_PENDING_IRQ
|
||||
bool
|
||||
|
||||
# Alpha specific irq affinity mechanism
|
||||
config AUTO_IRQ_AFFINITY
|
||||
bool
|
||||
|
||||
# Tasklet based software resend for pending interrupts on enable_irq()
|
||||
config HARDIRQS_SW_RESEND
|
||||
bool
|
||||
|
||||
# Preflow handler support for fasteoi (sparc64)
|
||||
config IRQ_PREFLOW_FASTEOI
|
||||
bool
|
||||
|
||||
# Edge style eoi based handler (cell)
|
||||
config IRQ_EDGE_EOI_HANDLER
|
||||
bool
|
||||
|
||||
# Generic configurable interrupt chip implementation
|
||||
config GENERIC_IRQ_CHIP
|
||||
bool
|
||||
select IRQ_DOMAIN
|
||||
|
||||
# Generic irq_domain hw <--> linux irq number translation
|
||||
config IRQ_DOMAIN
|
||||
bool
|
||||
|
||||
config HANDLE_DOMAIN_IRQ
|
||||
bool
|
||||
|
||||
config IRQ_DOMAIN_DEBUG
|
||||
bool "Expose hardware/virtual IRQ mapping via debugfs"
|
||||
depends on IRQ_DOMAIN && DEBUG_FS
|
||||
help
|
||||
This option will show the mapping relationship between hardware irq
|
||||
numbers and Linux irq numbers. The mapping is exposed via debugfs
|
||||
in the file "irq_domain_mapping".
|
||||
|
||||
If you don't know what this means you don't need it.
|
||||
|
||||
# Support forced irq threading
|
||||
config IRQ_FORCED_THREADING
|
||||
bool
|
||||
|
||||
config SPARSE_IRQ
|
||||
bool "Support sparse irq numbering" if MAY_HAVE_SPARSE_IRQ
|
||||
---help---
|
||||
|
||||
Sparse irq numbering is useful for distro kernels that want
|
||||
to define a high CONFIG_NR_CPUS value but still want to have
|
||||
low kernel memory footprint on smaller machines.
|
||||
|
||||
( Sparse irqs can also be beneficial on NUMA boxes, as they spread
|
||||
out the interrupt descriptors in a more NUMA-friendly way. )
|
||||
|
||||
If you don't know what to do here, say N.
|
||||
|
||||
endmenu
|
8
kernel/irq/Makefile
Normal file
8
kernel/irq/Makefile
Normal file
|
@ -0,0 +1,8 @@
|
|||
|
||||
obj-y := irqdesc.o handle.o manage.o spurious.o resend.o chip.o dummychip.o devres.o
|
||||
obj-$(CONFIG_GENERIC_IRQ_CHIP) += generic-chip.o
|
||||
obj-$(CONFIG_GENERIC_IRQ_PROBE) += autoprobe.o
|
||||
obj-$(CONFIG_IRQ_DOMAIN) += irqdomain.o
|
||||
obj-$(CONFIG_PROC_FS) += proc.o
|
||||
obj-$(CONFIG_GENERIC_PENDING_IRQ) += migration.o
|
||||
obj-$(CONFIG_PM_SLEEP) += pm.o
|
185
kernel/irq/autoprobe.c
Normal file
185
kernel/irq/autoprobe.c
Normal file
|
@ -0,0 +1,185 @@
|
|||
/*
|
||||
* linux/kernel/irq/autoprobe.c
|
||||
*
|
||||
* Copyright (C) 1992, 1998-2004 Linus Torvalds, Ingo Molnar
|
||||
*
|
||||
* This file contains the interrupt probing code and driver APIs.
|
||||
*/
|
||||
|
||||
#include <linux/irq.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/async.h>
|
||||
|
||||
#include "internals.h"
|
||||
|
||||
/*
|
||||
* Autodetection depends on the fact that any interrupt that
|
||||
* comes in on to an unassigned handler will get stuck with
|
||||
* "IRQS_WAITING" cleared and the interrupt disabled.
|
||||
*/
|
||||
static DEFINE_MUTEX(probing_active);
|
||||
|
||||
/**
|
||||
* probe_irq_on - begin an interrupt autodetect
|
||||
*
|
||||
* Commence probing for an interrupt. The interrupts are scanned
|
||||
* and a mask of potential interrupt lines is returned.
|
||||
*
|
||||
*/
|
||||
unsigned long probe_irq_on(void)
|
||||
{
|
||||
struct irq_desc *desc;
|
||||
unsigned long mask = 0;
|
||||
int i;
|
||||
|
||||
/*
|
||||
* quiesce the kernel, or at least the asynchronous portion
|
||||
*/
|
||||
async_synchronize_full();
|
||||
mutex_lock(&probing_active);
|
||||
/*
|
||||
* something may have generated an irq long ago and we want to
|
||||
* flush such a longstanding irq before considering it as spurious.
|
||||
*/
|
||||
for_each_irq_desc_reverse(i, desc) {
|
||||
raw_spin_lock_irq(&desc->lock);
|
||||
if (!desc->action && irq_settings_can_probe(desc)) {
|
||||
/*
|
||||
* Some chips need to know about probing in
|
||||
* progress:
|
||||
*/
|
||||
if (desc->irq_data.chip->irq_set_type)
|
||||
desc->irq_data.chip->irq_set_type(&desc->irq_data,
|
||||
IRQ_TYPE_PROBE);
|
||||
irq_startup(desc, false);
|
||||
}
|
||||
raw_spin_unlock_irq(&desc->lock);
|
||||
}
|
||||
|
||||
/* Wait for longstanding interrupts to trigger. */
|
||||
msleep(20);
|
||||
|
||||
/*
|
||||
* enable any unassigned irqs
|
||||
* (we must startup again here because if a longstanding irq
|
||||
* happened in the previous stage, it may have masked itself)
|
||||
*/
|
||||
for_each_irq_desc_reverse(i, desc) {
|
||||
raw_spin_lock_irq(&desc->lock);
|
||||
if (!desc->action && irq_settings_can_probe(desc)) {
|
||||
desc->istate |= IRQS_AUTODETECT | IRQS_WAITING;
|
||||
if (irq_startup(desc, false))
|
||||
desc->istate |= IRQS_PENDING;
|
||||
}
|
||||
raw_spin_unlock_irq(&desc->lock);
|
||||
}
|
||||
|
||||
/*
|
||||
* Wait for spurious interrupts to trigger
|
||||
*/
|
||||
msleep(100);
|
||||
|
||||
/*
|
||||
* Now filter out any obviously spurious interrupts
|
||||
*/
|
||||
for_each_irq_desc(i, desc) {
|
||||
raw_spin_lock_irq(&desc->lock);
|
||||
|
||||
if (desc->istate & IRQS_AUTODETECT) {
|
||||
/* It triggered already - consider it spurious. */
|
||||
if (!(desc->istate & IRQS_WAITING)) {
|
||||
desc->istate &= ~IRQS_AUTODETECT;
|
||||
irq_shutdown(desc);
|
||||
} else
|
||||
if (i < 32)
|
||||
mask |= 1 << i;
|
||||
}
|
||||
raw_spin_unlock_irq(&desc->lock);
|
||||
}
|
||||
|
||||
return mask;
|
||||
}
|
||||
EXPORT_SYMBOL(probe_irq_on);
|
||||
|
||||
/**
|
||||
* probe_irq_mask - scan a bitmap of interrupt lines
|
||||
* @val: mask of interrupts to consider
|
||||
*
|
||||
* Scan the interrupt lines and return a bitmap of active
|
||||
* autodetect interrupts. The interrupt probe logic state
|
||||
* is then returned to its previous value.
|
||||
*
|
||||
* Note: we need to scan all the irq's even though we will
|
||||
* only return autodetect irq numbers - just so that we reset
|
||||
* them all to a known state.
|
||||
*/
|
||||
unsigned int probe_irq_mask(unsigned long val)
|
||||
{
|
||||
unsigned int mask = 0;
|
||||
struct irq_desc *desc;
|
||||
int i;
|
||||
|
||||
for_each_irq_desc(i, desc) {
|
||||
raw_spin_lock_irq(&desc->lock);
|
||||
if (desc->istate & IRQS_AUTODETECT) {
|
||||
if (i < 16 && !(desc->istate & IRQS_WAITING))
|
||||
mask |= 1 << i;
|
||||
|
||||
desc->istate &= ~IRQS_AUTODETECT;
|
||||
irq_shutdown(desc);
|
||||
}
|
||||
raw_spin_unlock_irq(&desc->lock);
|
||||
}
|
||||
mutex_unlock(&probing_active);
|
||||
|
||||
return mask & val;
|
||||
}
|
||||
EXPORT_SYMBOL(probe_irq_mask);
|
||||
|
||||
/**
|
||||
* probe_irq_off - end an interrupt autodetect
|
||||
* @val: mask of potential interrupts (unused)
|
||||
*
|
||||
* Scans the unused interrupt lines and returns the line which
|
||||
* appears to have triggered the interrupt. If no interrupt was
|
||||
* found then zero is returned. If more than one interrupt is
|
||||
* found then minus the first candidate is returned to indicate
|
||||
* their is doubt.
|
||||
*
|
||||
* The interrupt probe logic state is returned to its previous
|
||||
* value.
|
||||
*
|
||||
* BUGS: When used in a module (which arguably shouldn't happen)
|
||||
* nothing prevents two IRQ probe callers from overlapping. The
|
||||
* results of this are non-optimal.
|
||||
*/
|
||||
int probe_irq_off(unsigned long val)
|
||||
{
|
||||
int i, irq_found = 0, nr_of_irqs = 0;
|
||||
struct irq_desc *desc;
|
||||
|
||||
for_each_irq_desc(i, desc) {
|
||||
raw_spin_lock_irq(&desc->lock);
|
||||
|
||||
if (desc->istate & IRQS_AUTODETECT) {
|
||||
if (!(desc->istate & IRQS_WAITING)) {
|
||||
if (!nr_of_irqs)
|
||||
irq_found = i;
|
||||
nr_of_irqs++;
|
||||
}
|
||||
desc->istate &= ~IRQS_AUTODETECT;
|
||||
irq_shutdown(desc);
|
||||
}
|
||||
raw_spin_unlock_irq(&desc->lock);
|
||||
}
|
||||
mutex_unlock(&probing_active);
|
||||
|
||||
if (nr_of_irqs > 1)
|
||||
irq_found = -irq_found;
|
||||
|
||||
return irq_found;
|
||||
}
|
||||
EXPORT_SYMBOL(probe_irq_off);
|
||||
|
849
kernel/irq/chip.c
Normal file
849
kernel/irq/chip.c
Normal file
|
@ -0,0 +1,849 @@
|
|||
/*
|
||||
* linux/kernel/irq/chip.c
|
||||
*
|
||||
* Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
|
||||
* Copyright (C) 2005-2006, Thomas Gleixner, Russell King
|
||||
*
|
||||
* This file contains the core interrupt handling code, for irq-chip
|
||||
* based architectures.
|
||||
*
|
||||
* Detailed information is available in Documentation/DocBook/genericirq
|
||||
*/
|
||||
|
||||
#include <linux/irq.h>
|
||||
#include <linux/msi.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/kernel_stat.h>
|
||||
|
||||
#include <trace/events/irq.h>
|
||||
|
||||
#include "internals.h"
|
||||
|
||||
/**
|
||||
* irq_set_chip - set the irq chip for an irq
|
||||
* @irq: irq number
|
||||
* @chip: pointer to irq chip description structure
|
||||
*/
|
||||
int irq_set_chip(unsigned int irq, struct irq_chip *chip)
|
||||
{
|
||||
unsigned long flags;
|
||||
struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
|
||||
|
||||
if (!desc)
|
||||
return -EINVAL;
|
||||
|
||||
if (!chip)
|
||||
chip = &no_irq_chip;
|
||||
|
||||
desc->irq_data.chip = chip;
|
||||
irq_put_desc_unlock(desc, flags);
|
||||
/*
|
||||
* For !CONFIG_SPARSE_IRQ make the irq show up in
|
||||
* allocated_irqs.
|
||||
*/
|
||||
irq_mark_irq(irq);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(irq_set_chip);
|
||||
|
||||
/**
|
||||
* irq_set_type - set the irq trigger type for an irq
|
||||
* @irq: irq number
|
||||
* @type: IRQ_TYPE_{LEVEL,EDGE}_* value - see include/linux/irq.h
|
||||
*/
|
||||
int irq_set_irq_type(unsigned int irq, unsigned int type)
|
||||
{
|
||||
unsigned long flags;
|
||||
struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
|
||||
int ret = 0;
|
||||
|
||||
if (!desc)
|
||||
return -EINVAL;
|
||||
|
||||
type &= IRQ_TYPE_SENSE_MASK;
|
||||
ret = __irq_set_trigger(desc, irq, type);
|
||||
irq_put_desc_busunlock(desc, flags);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(irq_set_irq_type);
|
||||
|
||||
/**
|
||||
* irq_set_handler_data - set irq handler data for an irq
|
||||
* @irq: Interrupt number
|
||||
* @data: Pointer to interrupt specific data
|
||||
*
|
||||
* Set the hardware irq controller data for an irq
|
||||
*/
|
||||
int irq_set_handler_data(unsigned int irq, void *data)
|
||||
{
|
||||
unsigned long flags;
|
||||
struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
|
||||
|
||||
if (!desc)
|
||||
return -EINVAL;
|
||||
desc->irq_data.handler_data = data;
|
||||
irq_put_desc_unlock(desc, flags);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(irq_set_handler_data);
|
||||
|
||||
/**
|
||||
* irq_set_msi_desc_off - set MSI descriptor data for an irq at offset
|
||||
* @irq_base: Interrupt number base
|
||||
* @irq_offset: Interrupt number offset
|
||||
* @entry: Pointer to MSI descriptor data
|
||||
*
|
||||
* Set the MSI descriptor entry for an irq at offset
|
||||
*/
|
||||
int irq_set_msi_desc_off(unsigned int irq_base, unsigned int irq_offset,
|
||||
struct msi_desc *entry)
|
||||
{
|
||||
unsigned long flags;
|
||||
struct irq_desc *desc = irq_get_desc_lock(irq_base + irq_offset, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
|
||||
|
||||
if (!desc)
|
||||
return -EINVAL;
|
||||
desc->irq_data.msi_desc = entry;
|
||||
if (entry && !irq_offset)
|
||||
entry->irq = irq_base;
|
||||
irq_put_desc_unlock(desc, flags);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* irq_set_msi_desc - set MSI descriptor data for an irq
|
||||
* @irq: Interrupt number
|
||||
* @entry: Pointer to MSI descriptor data
|
||||
*
|
||||
* Set the MSI descriptor entry for an irq
|
||||
*/
|
||||
int irq_set_msi_desc(unsigned int irq, struct msi_desc *entry)
|
||||
{
|
||||
return irq_set_msi_desc_off(irq, 0, entry);
|
||||
}
|
||||
|
||||
/**
|
||||
* irq_set_chip_data - set irq chip data for an irq
|
||||
* @irq: Interrupt number
|
||||
* @data: Pointer to chip specific data
|
||||
*
|
||||
* Set the hardware irq chip data for an irq
|
||||
*/
|
||||
int irq_set_chip_data(unsigned int irq, void *data)
|
||||
{
|
||||
unsigned long flags;
|
||||
struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
|
||||
|
||||
if (!desc)
|
||||
return -EINVAL;
|
||||
desc->irq_data.chip_data = data;
|
||||
irq_put_desc_unlock(desc, flags);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(irq_set_chip_data);
|
||||
|
||||
struct irq_data *irq_get_irq_data(unsigned int irq)
|
||||
{
|
||||
struct irq_desc *desc = irq_to_desc(irq);
|
||||
|
||||
return desc ? &desc->irq_data : NULL;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(irq_get_irq_data);
|
||||
|
||||
static void irq_state_clr_disabled(struct irq_desc *desc)
|
||||
{
|
||||
irqd_clear(&desc->irq_data, IRQD_IRQ_DISABLED);
|
||||
}
|
||||
|
||||
static void irq_state_set_disabled(struct irq_desc *desc)
|
||||
{
|
||||
irqd_set(&desc->irq_data, IRQD_IRQ_DISABLED);
|
||||
}
|
||||
|
||||
static void irq_state_clr_masked(struct irq_desc *desc)
|
||||
{
|
||||
irqd_clear(&desc->irq_data, IRQD_IRQ_MASKED);
|
||||
}
|
||||
|
||||
static void irq_state_set_masked(struct irq_desc *desc)
|
||||
{
|
||||
irqd_set(&desc->irq_data, IRQD_IRQ_MASKED);
|
||||
}
|
||||
|
||||
int irq_startup(struct irq_desc *desc, bool resend)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
irq_state_clr_disabled(desc);
|
||||
desc->depth = 0;
|
||||
|
||||
if (desc->irq_data.chip->irq_startup) {
|
||||
ret = desc->irq_data.chip->irq_startup(&desc->irq_data);
|
||||
irq_state_clr_masked(desc);
|
||||
} else {
|
||||
irq_enable(desc);
|
||||
}
|
||||
if (resend)
|
||||
check_irq_resend(desc, desc->irq_data.irq);
|
||||
return ret;
|
||||
}
|
||||
|
||||
void irq_shutdown(struct irq_desc *desc)
|
||||
{
|
||||
irq_state_set_disabled(desc);
|
||||
desc->depth = 1;
|
||||
if (desc->irq_data.chip->irq_shutdown)
|
||||
desc->irq_data.chip->irq_shutdown(&desc->irq_data);
|
||||
else if (desc->irq_data.chip->irq_disable)
|
||||
desc->irq_data.chip->irq_disable(&desc->irq_data);
|
||||
else
|
||||
desc->irq_data.chip->irq_mask(&desc->irq_data);
|
||||
irq_state_set_masked(desc);
|
||||
}
|
||||
|
||||
void irq_enable(struct irq_desc *desc)
|
||||
{
|
||||
irq_state_clr_disabled(desc);
|
||||
if (desc->irq_data.chip->irq_enable)
|
||||
desc->irq_data.chip->irq_enable(&desc->irq_data);
|
||||
else
|
||||
desc->irq_data.chip->irq_unmask(&desc->irq_data);
|
||||
irq_state_clr_masked(desc);
|
||||
}
|
||||
|
||||
/**
|
||||
* irq_disable - Mark interrupt disabled
|
||||
* @desc: irq descriptor which should be disabled
|
||||
*
|
||||
* If the chip does not implement the irq_disable callback, we
|
||||
* use a lazy disable approach. That means we mark the interrupt
|
||||
* disabled, but leave the hardware unmasked. That's an
|
||||
* optimization because we avoid the hardware access for the
|
||||
* common case where no interrupt happens after we marked it
|
||||
* disabled. If an interrupt happens, then the interrupt flow
|
||||
* handler masks the line at the hardware level and marks it
|
||||
* pending.
|
||||
*/
|
||||
void irq_disable(struct irq_desc *desc)
|
||||
{
|
||||
irq_state_set_disabled(desc);
|
||||
if (desc->irq_data.chip->irq_disable) {
|
||||
desc->irq_data.chip->irq_disable(&desc->irq_data);
|
||||
irq_state_set_masked(desc);
|
||||
}
|
||||
}
|
||||
|
||||
void irq_percpu_enable(struct irq_desc *desc, unsigned int cpu)
|
||||
{
|
||||
if (desc->irq_data.chip->irq_enable)
|
||||
desc->irq_data.chip->irq_enable(&desc->irq_data);
|
||||
else
|
||||
desc->irq_data.chip->irq_unmask(&desc->irq_data);
|
||||
cpumask_set_cpu(cpu, desc->percpu_enabled);
|
||||
}
|
||||
|
||||
void irq_percpu_disable(struct irq_desc *desc, unsigned int cpu)
|
||||
{
|
||||
if (desc->irq_data.chip->irq_disable)
|
||||
desc->irq_data.chip->irq_disable(&desc->irq_data);
|
||||
else
|
||||
desc->irq_data.chip->irq_mask(&desc->irq_data);
|
||||
cpumask_clear_cpu(cpu, desc->percpu_enabled);
|
||||
}
|
||||
|
||||
static inline void mask_ack_irq(struct irq_desc *desc)
|
||||
{
|
||||
if (desc->irq_data.chip->irq_mask_ack)
|
||||
desc->irq_data.chip->irq_mask_ack(&desc->irq_data);
|
||||
else {
|
||||
desc->irq_data.chip->irq_mask(&desc->irq_data);
|
||||
if (desc->irq_data.chip->irq_ack)
|
||||
desc->irq_data.chip->irq_ack(&desc->irq_data);
|
||||
}
|
||||
irq_state_set_masked(desc);
|
||||
}
|
||||
|
||||
void mask_irq(struct irq_desc *desc)
|
||||
{
|
||||
if (desc->irq_data.chip->irq_mask) {
|
||||
desc->irq_data.chip->irq_mask(&desc->irq_data);
|
||||
irq_state_set_masked(desc);
|
||||
}
|
||||
}
|
||||
|
||||
void unmask_irq(struct irq_desc *desc)
|
||||
{
|
||||
if (desc->irq_data.chip->irq_unmask) {
|
||||
desc->irq_data.chip->irq_unmask(&desc->irq_data);
|
||||
irq_state_clr_masked(desc);
|
||||
}
|
||||
}
|
||||
|
||||
void unmask_threaded_irq(struct irq_desc *desc)
|
||||
{
|
||||
struct irq_chip *chip = desc->irq_data.chip;
|
||||
|
||||
if (chip->flags & IRQCHIP_EOI_THREADED)
|
||||
chip->irq_eoi(&desc->irq_data);
|
||||
|
||||
if (chip->irq_unmask) {
|
||||
chip->irq_unmask(&desc->irq_data);
|
||||
irq_state_clr_masked(desc);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* handle_nested_irq - Handle a nested irq from a irq thread
|
||||
* @irq: the interrupt number
|
||||
*
|
||||
* Handle interrupts which are nested into a threaded interrupt
|
||||
* handler. The handler function is called inside the calling
|
||||
* threads context.
|
||||
*/
|
||||
void handle_nested_irq(unsigned int irq)
|
||||
{
|
||||
struct irq_desc *desc = irq_to_desc(irq);
|
||||
struct irqaction *action;
|
||||
irqreturn_t action_ret;
|
||||
|
||||
might_sleep();
|
||||
|
||||
raw_spin_lock_irq(&desc->lock);
|
||||
|
||||
desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
|
||||
kstat_incr_irqs_this_cpu(irq, desc);
|
||||
|
||||
action = desc->action;
|
||||
if (unlikely(!action || irqd_irq_disabled(&desc->irq_data))) {
|
||||
desc->istate |= IRQS_PENDING;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS);
|
||||
raw_spin_unlock_irq(&desc->lock);
|
||||
|
||||
action_ret = action->thread_fn(action->irq, action->dev_id);
|
||||
if (!noirqdebug)
|
||||
note_interrupt(irq, desc, action_ret);
|
||||
|
||||
raw_spin_lock_irq(&desc->lock);
|
||||
irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
|
||||
|
||||
out_unlock:
|
||||
raw_spin_unlock_irq(&desc->lock);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(handle_nested_irq);
|
||||
|
||||
static bool irq_check_poll(struct irq_desc *desc)
|
||||
{
|
||||
if (!(desc->istate & IRQS_POLL_INPROGRESS))
|
||||
return false;
|
||||
return irq_wait_for_poll(desc);
|
||||
}
|
||||
|
||||
static bool irq_may_run(struct irq_desc *desc)
|
||||
{
|
||||
unsigned int mask = IRQD_IRQ_INPROGRESS | IRQD_WAKEUP_ARMED;
|
||||
|
||||
/*
|
||||
* If the interrupt is not in progress and is not an armed
|
||||
* wakeup interrupt, proceed.
|
||||
*/
|
||||
if (!irqd_has_set(&desc->irq_data, mask))
|
||||
return true;
|
||||
|
||||
/*
|
||||
* If the interrupt is an armed wakeup source, mark it pending
|
||||
* and suspended, disable it and notify the pm core about the
|
||||
* event.
|
||||
*/
|
||||
if (irq_pm_check_wakeup(desc))
|
||||
return false;
|
||||
|
||||
/*
|
||||
* Handle a potential concurrent poll on a different core.
|
||||
*/
|
||||
return irq_check_poll(desc);
|
||||
}
|
||||
|
||||
/**
|
||||
* handle_simple_irq - Simple and software-decoded IRQs.
|
||||
* @irq: the interrupt number
|
||||
* @desc: the interrupt description structure for this irq
|
||||
*
|
||||
* Simple interrupts are either sent from a demultiplexing interrupt
|
||||
* handler or come from hardware, where no interrupt hardware control
|
||||
* is necessary.
|
||||
*
|
||||
* Note: The caller is expected to handle the ack, clear, mask and
|
||||
* unmask issues if necessary.
|
||||
*/
|
||||
void
|
||||
handle_simple_irq(unsigned int irq, struct irq_desc *desc)
|
||||
{
|
||||
raw_spin_lock(&desc->lock);
|
||||
|
||||
if (!irq_may_run(desc))
|
||||
goto out_unlock;
|
||||
|
||||
desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
|
||||
kstat_incr_irqs_this_cpu(irq, desc);
|
||||
|
||||
if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
|
||||
desc->istate |= IRQS_PENDING;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
handle_irq_event(desc);
|
||||
|
||||
out_unlock:
|
||||
raw_spin_unlock(&desc->lock);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(handle_simple_irq);
|
||||
|
||||
/*
|
||||
* Called unconditionally from handle_level_irq() and only for oneshot
|
||||
* interrupts from handle_fasteoi_irq()
|
||||
*/
|
||||
static void cond_unmask_irq(struct irq_desc *desc)
|
||||
{
|
||||
/*
|
||||
* We need to unmask in the following cases:
|
||||
* - Standard level irq (IRQF_ONESHOT is not set)
|
||||
* - Oneshot irq which did not wake the thread (caused by a
|
||||
* spurious interrupt or a primary handler handling it
|
||||
* completely).
|
||||
*/
|
||||
if (!irqd_irq_disabled(&desc->irq_data) &&
|
||||
irqd_irq_masked(&desc->irq_data) && !desc->threads_oneshot)
|
||||
unmask_irq(desc);
|
||||
}
|
||||
|
||||
/**
|
||||
* handle_level_irq - Level type irq handler
|
||||
* @irq: the interrupt number
|
||||
* @desc: the interrupt description structure for this irq
|
||||
*
|
||||
* Level type interrupts are active as long as the hardware line has
|
||||
* the active level. This may require to mask the interrupt and unmask
|
||||
* it after the associated handler has acknowledged the device, so the
|
||||
* interrupt line is back to inactive.
|
||||
*/
|
||||
void
|
||||
handle_level_irq(unsigned int irq, struct irq_desc *desc)
|
||||
{
|
||||
raw_spin_lock(&desc->lock);
|
||||
mask_ack_irq(desc);
|
||||
|
||||
if (!irq_may_run(desc))
|
||||
goto out_unlock;
|
||||
|
||||
desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
|
||||
kstat_incr_irqs_this_cpu(irq, desc);
|
||||
|
||||
/*
|
||||
* If its disabled or no action available
|
||||
* keep it masked and get out of here
|
||||
*/
|
||||
if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
|
||||
desc->istate |= IRQS_PENDING;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
handle_irq_event(desc);
|
||||
|
||||
cond_unmask_irq(desc);
|
||||
|
||||
out_unlock:
|
||||
raw_spin_unlock(&desc->lock);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(handle_level_irq);
|
||||
|
||||
#ifdef CONFIG_IRQ_PREFLOW_FASTEOI
|
||||
static inline void preflow_handler(struct irq_desc *desc)
|
||||
{
|
||||
if (desc->preflow_handler)
|
||||
desc->preflow_handler(&desc->irq_data);
|
||||
}
|
||||
#else
|
||||
static inline void preflow_handler(struct irq_desc *desc) { }
|
||||
#endif
|
||||
|
||||
static void cond_unmask_eoi_irq(struct irq_desc *desc, struct irq_chip *chip)
|
||||
{
|
||||
if (!(desc->istate & IRQS_ONESHOT)) {
|
||||
chip->irq_eoi(&desc->irq_data);
|
||||
return;
|
||||
}
|
||||
/*
|
||||
* We need to unmask in the following cases:
|
||||
* - Oneshot irq which did not wake the thread (caused by a
|
||||
* spurious interrupt or a primary handler handling it
|
||||
* completely).
|
||||
*/
|
||||
if (!irqd_irq_disabled(&desc->irq_data) &&
|
||||
irqd_irq_masked(&desc->irq_data) && !desc->threads_oneshot) {
|
||||
chip->irq_eoi(&desc->irq_data);
|
||||
unmask_irq(desc);
|
||||
} else if (!(chip->flags & IRQCHIP_EOI_THREADED)) {
|
||||
chip->irq_eoi(&desc->irq_data);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* handle_fasteoi_irq - irq handler for transparent controllers
|
||||
* @irq: the interrupt number
|
||||
* @desc: the interrupt description structure for this irq
|
||||
*
|
||||
* Only a single callback will be issued to the chip: an ->eoi()
|
||||
* call when the interrupt has been serviced. This enables support
|
||||
* for modern forms of interrupt handlers, which handle the flow
|
||||
* details in hardware, transparently.
|
||||
*/
|
||||
void
|
||||
handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc)
|
||||
{
|
||||
struct irq_chip *chip = desc->irq_data.chip;
|
||||
|
||||
raw_spin_lock(&desc->lock);
|
||||
|
||||
if (!irq_may_run(desc))
|
||||
goto out;
|
||||
|
||||
desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
|
||||
kstat_incr_irqs_this_cpu(irq, desc);
|
||||
|
||||
/*
|
||||
* If its disabled or no action available
|
||||
* then mask it and get out of here:
|
||||
*/
|
||||
if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
|
||||
desc->istate |= IRQS_PENDING;
|
||||
mask_irq(desc);
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (desc->istate & IRQS_ONESHOT)
|
||||
mask_irq(desc);
|
||||
|
||||
preflow_handler(desc);
|
||||
handle_irq_event(desc);
|
||||
|
||||
cond_unmask_eoi_irq(desc, chip);
|
||||
|
||||
raw_spin_unlock(&desc->lock);
|
||||
return;
|
||||
out:
|
||||
if (!(chip->flags & IRQCHIP_EOI_IF_HANDLED))
|
||||
chip->irq_eoi(&desc->irq_data);
|
||||
raw_spin_unlock(&desc->lock);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(handle_fasteoi_irq);
|
||||
|
||||
/**
|
||||
* handle_edge_irq - edge type IRQ handler
|
||||
* @irq: the interrupt number
|
||||
* @desc: the interrupt description structure for this irq
|
||||
*
|
||||
* Interrupt occures on the falling and/or rising edge of a hardware
|
||||
* signal. The occurrence is latched into the irq controller hardware
|
||||
* and must be acked in order to be reenabled. After the ack another
|
||||
* interrupt can happen on the same source even before the first one
|
||||
* is handled by the associated event handler. If this happens it
|
||||
* might be necessary to disable (mask) the interrupt depending on the
|
||||
* controller hardware. This requires to reenable the interrupt inside
|
||||
* of the loop which handles the interrupts which have arrived while
|
||||
* the handler was running. If all pending interrupts are handled, the
|
||||
* loop is left.
|
||||
*/
|
||||
void
|
||||
handle_edge_irq(unsigned int irq, struct irq_desc *desc)
|
||||
{
|
||||
raw_spin_lock(&desc->lock);
|
||||
|
||||
desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
|
||||
|
||||
if (!irq_may_run(desc)) {
|
||||
desc->istate |= IRQS_PENDING;
|
||||
mask_ack_irq(desc);
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
/*
|
||||
* If its disabled or no action available then mask it and get
|
||||
* out of here.
|
||||
*/
|
||||
if (irqd_irq_disabled(&desc->irq_data) || !desc->action) {
|
||||
desc->istate |= IRQS_PENDING;
|
||||
mask_ack_irq(desc);
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
kstat_incr_irqs_this_cpu(irq, desc);
|
||||
|
||||
/* Start handling the irq */
|
||||
desc->irq_data.chip->irq_ack(&desc->irq_data);
|
||||
|
||||
do {
|
||||
if (unlikely(!desc->action)) {
|
||||
mask_irq(desc);
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
/*
|
||||
* When another irq arrived while we were handling
|
||||
* one, we could have masked the irq.
|
||||
* Renable it, if it was not disabled in meantime.
|
||||
*/
|
||||
if (unlikely(desc->istate & IRQS_PENDING)) {
|
||||
if (!irqd_irq_disabled(&desc->irq_data) &&
|
||||
irqd_irq_masked(&desc->irq_data))
|
||||
unmask_irq(desc);
|
||||
}
|
||||
|
||||
handle_irq_event(desc);
|
||||
|
||||
} while ((desc->istate & IRQS_PENDING) &&
|
||||
!irqd_irq_disabled(&desc->irq_data));
|
||||
|
||||
out_unlock:
|
||||
raw_spin_unlock(&desc->lock);
|
||||
}
|
||||
EXPORT_SYMBOL(handle_edge_irq);
|
||||
|
||||
#ifdef CONFIG_IRQ_EDGE_EOI_HANDLER
|
||||
/**
|
||||
* handle_edge_eoi_irq - edge eoi type IRQ handler
|
||||
* @irq: the interrupt number
|
||||
* @desc: the interrupt description structure for this irq
|
||||
*
|
||||
* Similar as the above handle_edge_irq, but using eoi and w/o the
|
||||
* mask/unmask logic.
|
||||
*/
|
||||
void handle_edge_eoi_irq(unsigned int irq, struct irq_desc *desc)
|
||||
{
|
||||
struct irq_chip *chip = irq_desc_get_chip(desc);
|
||||
|
||||
raw_spin_lock(&desc->lock);
|
||||
|
||||
desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
|
||||
|
||||
if (!irq_may_run(desc)) {
|
||||
desc->istate |= IRQS_PENDING;
|
||||
goto out_eoi;
|
||||
}
|
||||
|
||||
/*
|
||||
* If its disabled or no action available then mask it and get
|
||||
* out of here.
|
||||
*/
|
||||
if (irqd_irq_disabled(&desc->irq_data) || !desc->action) {
|
||||
desc->istate |= IRQS_PENDING;
|
||||
goto out_eoi;
|
||||
}
|
||||
|
||||
kstat_incr_irqs_this_cpu(irq, desc);
|
||||
|
||||
do {
|
||||
if (unlikely(!desc->action))
|
||||
goto out_eoi;
|
||||
|
||||
handle_irq_event(desc);
|
||||
|
||||
} while ((desc->istate & IRQS_PENDING) &&
|
||||
!irqd_irq_disabled(&desc->irq_data));
|
||||
|
||||
out_eoi:
|
||||
chip->irq_eoi(&desc->irq_data);
|
||||
raw_spin_unlock(&desc->lock);
|
||||
}
|
||||
#endif
|
||||
|
||||
/**
|
||||
* handle_percpu_irq - Per CPU local irq handler
|
||||
* @irq: the interrupt number
|
||||
* @desc: the interrupt description structure for this irq
|
||||
*
|
||||
* Per CPU interrupts on SMP machines without locking requirements
|
||||
*/
|
||||
void
|
||||
handle_percpu_irq(unsigned int irq, struct irq_desc *desc)
|
||||
{
|
||||
struct irq_chip *chip = irq_desc_get_chip(desc);
|
||||
|
||||
kstat_incr_irqs_this_cpu(irq, desc);
|
||||
|
||||
if (chip->irq_ack)
|
||||
chip->irq_ack(&desc->irq_data);
|
||||
|
||||
handle_irq_event_percpu(desc, desc->action);
|
||||
|
||||
if (chip->irq_eoi)
|
||||
chip->irq_eoi(&desc->irq_data);
|
||||
}
|
||||
|
||||
/**
|
||||
* handle_percpu_devid_irq - Per CPU local irq handler with per cpu dev ids
|
||||
* @irq: the interrupt number
|
||||
* @desc: the interrupt description structure for this irq
|
||||
*
|
||||
* Per CPU interrupts on SMP machines without locking requirements. Same as
|
||||
* handle_percpu_irq() above but with the following extras:
|
||||
*
|
||||
* action->percpu_dev_id is a pointer to percpu variables which
|
||||
* contain the real device id for the cpu on which this handler is
|
||||
* called
|
||||
*/
|
||||
void handle_percpu_devid_irq(unsigned int irq, struct irq_desc *desc)
|
||||
{
|
||||
struct irq_chip *chip = irq_desc_get_chip(desc);
|
||||
struct irqaction *action = desc->action;
|
||||
void *dev_id = raw_cpu_ptr(action->percpu_dev_id);
|
||||
irqreturn_t res;
|
||||
|
||||
kstat_incr_irqs_this_cpu(irq, desc);
|
||||
|
||||
if (chip->irq_ack)
|
||||
chip->irq_ack(&desc->irq_data);
|
||||
|
||||
trace_irq_handler_entry(irq, action);
|
||||
res = action->handler(irq, dev_id);
|
||||
trace_irq_handler_exit(irq, action, res);
|
||||
|
||||
if (chip->irq_eoi)
|
||||
chip->irq_eoi(&desc->irq_data);
|
||||
}
|
||||
|
||||
void
|
||||
__irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
|
||||
const char *name)
|
||||
{
|
||||
unsigned long flags;
|
||||
struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, 0);
|
||||
|
||||
if (!desc)
|
||||
return;
|
||||
|
||||
if (!handle) {
|
||||
handle = handle_bad_irq;
|
||||
} else {
|
||||
if (WARN_ON(desc->irq_data.chip == &no_irq_chip))
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* Uninstall? */
|
||||
if (handle == handle_bad_irq) {
|
||||
if (desc->irq_data.chip != &no_irq_chip)
|
||||
mask_ack_irq(desc);
|
||||
irq_state_set_disabled(desc);
|
||||
desc->depth = 1;
|
||||
}
|
||||
desc->handle_irq = handle;
|
||||
desc->name = name;
|
||||
|
||||
if (handle != handle_bad_irq && is_chained) {
|
||||
irq_settings_set_noprobe(desc);
|
||||
irq_settings_set_norequest(desc);
|
||||
irq_settings_set_nothread(desc);
|
||||
irq_startup(desc, true);
|
||||
}
|
||||
out:
|
||||
irq_put_desc_busunlock(desc, flags);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(__irq_set_handler);
|
||||
|
||||
void
|
||||
irq_set_chip_and_handler_name(unsigned int irq, struct irq_chip *chip,
|
||||
irq_flow_handler_t handle, const char *name)
|
||||
{
|
||||
irq_set_chip(irq, chip);
|
||||
__irq_set_handler(irq, handle, 0, name);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(irq_set_chip_and_handler_name);
|
||||
|
||||
void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set)
|
||||
{
|
||||
unsigned long flags;
|
||||
struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
|
||||
|
||||
if (!desc)
|
||||
return;
|
||||
irq_settings_clr_and_set(desc, clr, set);
|
||||
|
||||
irqd_clear(&desc->irq_data, IRQD_NO_BALANCING | IRQD_PER_CPU |
|
||||
IRQD_TRIGGER_MASK | IRQD_LEVEL | IRQD_MOVE_PCNTXT);
|
||||
if (irq_settings_has_no_balance_set(desc))
|
||||
irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
|
||||
if (irq_settings_is_per_cpu(desc))
|
||||
irqd_set(&desc->irq_data, IRQD_PER_CPU);
|
||||
if (irq_settings_can_move_pcntxt(desc))
|
||||
irqd_set(&desc->irq_data, IRQD_MOVE_PCNTXT);
|
||||
if (irq_settings_is_level(desc))
|
||||
irqd_set(&desc->irq_data, IRQD_LEVEL);
|
||||
|
||||
irqd_set(&desc->irq_data, irq_settings_get_trigger_mask(desc));
|
||||
|
||||
irq_put_desc_unlock(desc, flags);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(irq_modify_status);
|
||||
|
||||
/**
|
||||
* irq_cpu_online - Invoke all irq_cpu_online functions.
|
||||
*
|
||||
* Iterate through all irqs and invoke the chip.irq_cpu_online()
|
||||
* for each.
|
||||
*/
|
||||
void irq_cpu_online(void)
|
||||
{
|
||||
struct irq_desc *desc;
|
||||
struct irq_chip *chip;
|
||||
unsigned long flags;
|
||||
unsigned int irq;
|
||||
|
||||
for_each_active_irq(irq) {
|
||||
desc = irq_to_desc(irq);
|
||||
if (!desc)
|
||||
continue;
|
||||
|
||||
raw_spin_lock_irqsave(&desc->lock, flags);
|
||||
|
||||
chip = irq_data_get_irq_chip(&desc->irq_data);
|
||||
if (chip && chip->irq_cpu_online &&
|
||||
(!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) ||
|
||||
!irqd_irq_disabled(&desc->irq_data)))
|
||||
chip->irq_cpu_online(&desc->irq_data);
|
||||
|
||||
raw_spin_unlock_irqrestore(&desc->lock, flags);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* irq_cpu_offline - Invoke all irq_cpu_offline functions.
|
||||
*
|
||||
* Iterate through all irqs and invoke the chip.irq_cpu_offline()
|
||||
* for each.
|
||||
*/
|
||||
void irq_cpu_offline(void)
|
||||
{
|
||||
struct irq_desc *desc;
|
||||
struct irq_chip *chip;
|
||||
unsigned long flags;
|
||||
unsigned int irq;
|
||||
|
||||
for_each_active_irq(irq) {
|
||||
desc = irq_to_desc(irq);
|
||||
if (!desc)
|
||||
continue;
|
||||
|
||||
raw_spin_lock_irqsave(&desc->lock, flags);
|
||||
|
||||
chip = irq_data_get_irq_chip(&desc->irq_data);
|
||||
if (chip && chip->irq_cpu_offline &&
|
||||
(!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) ||
|
||||
!irqd_irq_disabled(&desc->irq_data)))
|
||||
chip->irq_cpu_offline(&desc->irq_data);
|
||||
|
||||
raw_spin_unlock_irqrestore(&desc->lock, flags);
|
||||
}
|
||||
}
|
45
kernel/irq/debug.h
Normal file
45
kernel/irq/debug.h
Normal file
|
@ -0,0 +1,45 @@
|
|||
/*
|
||||
* Debugging printout:
|
||||
*/
|
||||
|
||||
#include <linux/kallsyms.h>
|
||||
|
||||
#define ___P(f) if (desc->status_use_accessors & f) printk("%14s set\n", #f)
|
||||
#define ___PS(f) if (desc->istate & f) printk("%14s set\n", #f)
|
||||
/* FIXME */
|
||||
#define ___PD(f) do { } while (0)
|
||||
|
||||
static inline void print_irq_desc(unsigned int irq, struct irq_desc *desc)
|
||||
{
|
||||
printk("irq %d, desc: %p, depth: %d, count: %d, unhandled: %d\n",
|
||||
irq, desc, desc->depth, desc->irq_count, desc->irqs_unhandled);
|
||||
printk("->handle_irq(): %p, ", desc->handle_irq);
|
||||
print_symbol("%s\n", (unsigned long)desc->handle_irq);
|
||||
printk("->irq_data.chip(): %p, ", desc->irq_data.chip);
|
||||
print_symbol("%s\n", (unsigned long)desc->irq_data.chip);
|
||||
printk("->action(): %p\n", desc->action);
|
||||
if (desc->action) {
|
||||
printk("->action->handler(): %p, ", desc->action->handler);
|
||||
print_symbol("%s\n", (unsigned long)desc->action->handler);
|
||||
}
|
||||
|
||||
___P(IRQ_LEVEL);
|
||||
___P(IRQ_PER_CPU);
|
||||
___P(IRQ_NOPROBE);
|
||||
___P(IRQ_NOREQUEST);
|
||||
___P(IRQ_NOTHREAD);
|
||||
___P(IRQ_NOAUTOEN);
|
||||
|
||||
___PS(IRQS_AUTODETECT);
|
||||
___PS(IRQS_REPLAY);
|
||||
___PS(IRQS_WAITING);
|
||||
___PS(IRQS_PENDING);
|
||||
|
||||
___PD(IRQS_INPROGRESS);
|
||||
___PD(IRQS_DISABLED);
|
||||
___PD(IRQS_MASKED);
|
||||
}
|
||||
|
||||
#undef ___P
|
||||
#undef ___PS
|
||||
#undef ___PD
|
139
kernel/irq/devres.c
Normal file
139
kernel/irq/devres.c
Normal file
|
@ -0,0 +1,139 @@
|
|||
#include <linux/module.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/gfp.h>
|
||||
|
||||
/*
|
||||
* Device resource management aware IRQ request/free implementation.
|
||||
*/
|
||||
struct irq_devres {
|
||||
unsigned int irq;
|
||||
void *dev_id;
|
||||
};
|
||||
|
||||
static void devm_irq_release(struct device *dev, void *res)
|
||||
{
|
||||
struct irq_devres *this = res;
|
||||
|
||||
free_irq(this->irq, this->dev_id);
|
||||
}
|
||||
|
||||
static int devm_irq_match(struct device *dev, void *res, void *data)
|
||||
{
|
||||
struct irq_devres *this = res, *match = data;
|
||||
|
||||
return this->irq == match->irq && this->dev_id == match->dev_id;
|
||||
}
|
||||
|
||||
/**
|
||||
* devm_request_threaded_irq - allocate an interrupt line for a managed device
|
||||
* @dev: device to request interrupt for
|
||||
* @irq: Interrupt line to allocate
|
||||
* @handler: Function to be called when the IRQ occurs
|
||||
* @thread_fn: function to be called in a threaded interrupt context. NULL
|
||||
* for devices which handle everything in @handler
|
||||
* @irqflags: Interrupt type flags
|
||||
* @devname: An ascii name for the claiming device
|
||||
* @dev_id: A cookie passed back to the handler function
|
||||
*
|
||||
* Except for the extra @dev argument, this function takes the
|
||||
* same arguments and performs the same function as
|
||||
* request_threaded_irq(). IRQs requested with this function will be
|
||||
* automatically freed on driver detach.
|
||||
*
|
||||
* If an IRQ allocated with this function needs to be freed
|
||||
* separately, devm_free_irq() must be used.
|
||||
*/
|
||||
int devm_request_threaded_irq(struct device *dev, unsigned int irq,
|
||||
irq_handler_t handler, irq_handler_t thread_fn,
|
||||
unsigned long irqflags, const char *devname,
|
||||
void *dev_id)
|
||||
{
|
||||
struct irq_devres *dr;
|
||||
int rc;
|
||||
|
||||
dr = devres_alloc(devm_irq_release, sizeof(struct irq_devres),
|
||||
GFP_KERNEL);
|
||||
if (!dr)
|
||||
return -ENOMEM;
|
||||
|
||||
rc = request_threaded_irq(irq, handler, thread_fn, irqflags, devname,
|
||||
dev_id);
|
||||
if (rc) {
|
||||
devres_free(dr);
|
||||
return rc;
|
||||
}
|
||||
|
||||
dr->irq = irq;
|
||||
dr->dev_id = dev_id;
|
||||
devres_add(dev, dr);
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(devm_request_threaded_irq);
|
||||
|
||||
/**
|
||||
* devm_request_any_context_irq - allocate an interrupt line for a managed device
|
||||
* @dev: device to request interrupt for
|
||||
* @irq: Interrupt line to allocate
|
||||
* @handler: Function to be called when the IRQ occurs
|
||||
* @thread_fn: function to be called in a threaded interrupt context. NULL
|
||||
* for devices which handle everything in @handler
|
||||
* @irqflags: Interrupt type flags
|
||||
* @devname: An ascii name for the claiming device
|
||||
* @dev_id: A cookie passed back to the handler function
|
||||
*
|
||||
* Except for the extra @dev argument, this function takes the
|
||||
* same arguments and performs the same function as
|
||||
* request_any_context_irq(). IRQs requested with this function will be
|
||||
* automatically freed on driver detach.
|
||||
*
|
||||
* If an IRQ allocated with this function needs to be freed
|
||||
* separately, devm_free_irq() must be used.
|
||||
*/
|
||||
int devm_request_any_context_irq(struct device *dev, unsigned int irq,
|
||||
irq_handler_t handler, unsigned long irqflags,
|
||||
const char *devname, void *dev_id)
|
||||
{
|
||||
struct irq_devres *dr;
|
||||
int rc;
|
||||
|
||||
dr = devres_alloc(devm_irq_release, sizeof(struct irq_devres),
|
||||
GFP_KERNEL);
|
||||
if (!dr)
|
||||
return -ENOMEM;
|
||||
|
||||
rc = request_any_context_irq(irq, handler, irqflags, devname, dev_id);
|
||||
if (rc) {
|
||||
devres_free(dr);
|
||||
return rc;
|
||||
}
|
||||
|
||||
dr->irq = irq;
|
||||
dr->dev_id = dev_id;
|
||||
devres_add(dev, dr);
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(devm_request_any_context_irq);
|
||||
|
||||
/**
|
||||
* devm_free_irq - free an interrupt
|
||||
* @dev: device to free interrupt for
|
||||
* @irq: Interrupt line to free
|
||||
* @dev_id: Device identity to free
|
||||
*
|
||||
* Except for the extra @dev argument, this function takes the
|
||||
* same arguments and performs the same function as free_irq().
|
||||
* This function instead of free_irq() should be used to manually
|
||||
* free IRQs allocated with devm_request_irq().
|
||||
*/
|
||||
void devm_free_irq(struct device *dev, unsigned int irq, void *dev_id)
|
||||
{
|
||||
struct irq_devres match_data = { irq, dev_id };
|
||||
|
||||
WARN_ON(devres_destroy(dev, devm_irq_release, devm_irq_match,
|
||||
&match_data));
|
||||
free_irq(irq, dev_id);
|
||||
}
|
||||
EXPORT_SYMBOL(devm_free_irq);
|
61
kernel/irq/dummychip.c
Normal file
61
kernel/irq/dummychip.c
Normal file
|
@ -0,0 +1,61 @@
|
|||
/*
|
||||
* Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
|
||||
* Copyright (C) 2005-2006, Thomas Gleixner, Russell King
|
||||
*
|
||||
* This file contains the dummy interrupt chip implementation
|
||||
*/
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/irq.h>
|
||||
#include <linux/export.h>
|
||||
|
||||
#include "internals.h"
|
||||
|
||||
/*
|
||||
* What should we do if we get a hw irq event on an illegal vector?
|
||||
* Each architecture has to answer this themself.
|
||||
*/
|
||||
static void ack_bad(struct irq_data *data)
|
||||
{
|
||||
struct irq_desc *desc = irq_data_to_desc(data);
|
||||
|
||||
print_irq_desc(data->irq, desc);
|
||||
ack_bad_irq(data->irq);
|
||||
}
|
||||
|
||||
/*
|
||||
* NOP functions
|
||||
*/
|
||||
static void noop(struct irq_data *data) { }
|
||||
|
||||
static unsigned int noop_ret(struct irq_data *data)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Generic no controller implementation
|
||||
*/
|
||||
struct irq_chip no_irq_chip = {
|
||||
.name = "none",
|
||||
.irq_startup = noop_ret,
|
||||
.irq_shutdown = noop,
|
||||
.irq_enable = noop,
|
||||
.irq_disable = noop,
|
||||
.irq_ack = ack_bad,
|
||||
};
|
||||
|
||||
/*
|
||||
* Generic dummy implementation which can be used for
|
||||
* real dumb interrupt sources
|
||||
*/
|
||||
struct irq_chip dummy_irq_chip = {
|
||||
.name = "dummy",
|
||||
.irq_startup = noop_ret,
|
||||
.irq_shutdown = noop,
|
||||
.irq_enable = noop,
|
||||
.irq_disable = noop,
|
||||
.irq_ack = noop,
|
||||
.irq_mask = noop,
|
||||
.irq_unmask = noop,
|
||||
};
|
||||
EXPORT_SYMBOL_GPL(dummy_irq_chip);
|
592
kernel/irq/generic-chip.c
Normal file
592
kernel/irq/generic-chip.c
Normal file
|
@ -0,0 +1,592 @@
|
|||
/*
|
||||
* Library implementing the most common irq chip callback functions
|
||||
*
|
||||
* Copyright (C) 2011, Thomas Gleixner
|
||||
*/
|
||||
#include <linux/io.h>
|
||||
#include <linux/irq.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/export.h>
|
||||
#include <linux/irqdomain.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/kernel_stat.h>
|
||||
#include <linux/syscore_ops.h>
|
||||
|
||||
#include "internals.h"
|
||||
|
||||
static LIST_HEAD(gc_list);
|
||||
static DEFINE_RAW_SPINLOCK(gc_lock);
|
||||
|
||||
/**
|
||||
* irq_gc_noop - NOOP function
|
||||
* @d: irq_data
|
||||
*/
|
||||
void irq_gc_noop(struct irq_data *d)
|
||||
{
|
||||
}
|
||||
|
||||
/**
|
||||
* irq_gc_mask_disable_reg - Mask chip via disable register
|
||||
* @d: irq_data
|
||||
*
|
||||
* Chip has separate enable/disable registers instead of a single mask
|
||||
* register.
|
||||
*/
|
||||
void irq_gc_mask_disable_reg(struct irq_data *d)
|
||||
{
|
||||
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
|
||||
struct irq_chip_type *ct = irq_data_get_chip_type(d);
|
||||
u32 mask = d->mask;
|
||||
|
||||
irq_gc_lock(gc);
|
||||
irq_reg_writel(mask, gc->reg_base + ct->regs.disable);
|
||||
*ct->mask_cache &= ~mask;
|
||||
irq_gc_unlock(gc);
|
||||
}
|
||||
|
||||
/**
|
||||
* irq_gc_mask_set_bit - Mask chip via setting bit in mask register
|
||||
* @d: irq_data
|
||||
*
|
||||
* Chip has a single mask register. Values of this register are cached
|
||||
* and protected by gc->lock
|
||||
*/
|
||||
void irq_gc_mask_set_bit(struct irq_data *d)
|
||||
{
|
||||
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
|
||||
struct irq_chip_type *ct = irq_data_get_chip_type(d);
|
||||
u32 mask = d->mask;
|
||||
|
||||
irq_gc_lock(gc);
|
||||
*ct->mask_cache |= mask;
|
||||
irq_reg_writel(*ct->mask_cache, gc->reg_base + ct->regs.mask);
|
||||
irq_gc_unlock(gc);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(irq_gc_mask_set_bit);
|
||||
|
||||
/**
|
||||
* irq_gc_mask_clr_bit - Mask chip via clearing bit in mask register
|
||||
* @d: irq_data
|
||||
*
|
||||
* Chip has a single mask register. Values of this register are cached
|
||||
* and protected by gc->lock
|
||||
*/
|
||||
void irq_gc_mask_clr_bit(struct irq_data *d)
|
||||
{
|
||||
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
|
||||
struct irq_chip_type *ct = irq_data_get_chip_type(d);
|
||||
u32 mask = d->mask;
|
||||
|
||||
irq_gc_lock(gc);
|
||||
*ct->mask_cache &= ~mask;
|
||||
irq_reg_writel(*ct->mask_cache, gc->reg_base + ct->regs.mask);
|
||||
irq_gc_unlock(gc);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(irq_gc_mask_clr_bit);
|
||||
|
||||
/**
|
||||
* irq_gc_unmask_enable_reg - Unmask chip via enable register
|
||||
* @d: irq_data
|
||||
*
|
||||
* Chip has separate enable/disable registers instead of a single mask
|
||||
* register.
|
||||
*/
|
||||
void irq_gc_unmask_enable_reg(struct irq_data *d)
|
||||
{
|
||||
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
|
||||
struct irq_chip_type *ct = irq_data_get_chip_type(d);
|
||||
u32 mask = d->mask;
|
||||
|
||||
irq_gc_lock(gc);
|
||||
irq_reg_writel(mask, gc->reg_base + ct->regs.enable);
|
||||
*ct->mask_cache |= mask;
|
||||
irq_gc_unlock(gc);
|
||||
}
|
||||
|
||||
/**
|
||||
* irq_gc_ack_set_bit - Ack pending interrupt via setting bit
|
||||
* @d: irq_data
|
||||
*/
|
||||
void irq_gc_ack_set_bit(struct irq_data *d)
|
||||
{
|
||||
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
|
||||
struct irq_chip_type *ct = irq_data_get_chip_type(d);
|
||||
u32 mask = d->mask;
|
||||
|
||||
irq_gc_lock(gc);
|
||||
irq_reg_writel(mask, gc->reg_base + ct->regs.ack);
|
||||
irq_gc_unlock(gc);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(irq_gc_ack_set_bit);
|
||||
|
||||
/**
|
||||
* irq_gc_ack_clr_bit - Ack pending interrupt via clearing bit
|
||||
* @d: irq_data
|
||||
*/
|
||||
void irq_gc_ack_clr_bit(struct irq_data *d)
|
||||
{
|
||||
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
|
||||
struct irq_chip_type *ct = irq_data_get_chip_type(d);
|
||||
u32 mask = ~d->mask;
|
||||
|
||||
irq_gc_lock(gc);
|
||||
irq_reg_writel(mask, gc->reg_base + ct->regs.ack);
|
||||
irq_gc_unlock(gc);
|
||||
}
|
||||
|
||||
/**
|
||||
* irq_gc_mask_disable_reg_and_ack - Mask and ack pending interrupt
|
||||
* @d: irq_data
|
||||
*/
|
||||
void irq_gc_mask_disable_reg_and_ack(struct irq_data *d)
|
||||
{
|
||||
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
|
||||
struct irq_chip_type *ct = irq_data_get_chip_type(d);
|
||||
u32 mask = d->mask;
|
||||
|
||||
irq_gc_lock(gc);
|
||||
irq_reg_writel(mask, gc->reg_base + ct->regs.mask);
|
||||
irq_reg_writel(mask, gc->reg_base + ct->regs.ack);
|
||||
irq_gc_unlock(gc);
|
||||
}
|
||||
|
||||
/**
|
||||
* irq_gc_eoi - EOI interrupt
|
||||
* @d: irq_data
|
||||
*/
|
||||
void irq_gc_eoi(struct irq_data *d)
|
||||
{
|
||||
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
|
||||
struct irq_chip_type *ct = irq_data_get_chip_type(d);
|
||||
u32 mask = d->mask;
|
||||
|
||||
irq_gc_lock(gc);
|
||||
irq_reg_writel(mask, gc->reg_base + ct->regs.eoi);
|
||||
irq_gc_unlock(gc);
|
||||
}
|
||||
|
||||
/**
|
||||
* irq_gc_set_wake - Set/clr wake bit for an interrupt
|
||||
* @d: irq_data
|
||||
* @on: Indicates whether the wake bit should be set or cleared
|
||||
*
|
||||
* For chips where the wake from suspend functionality is not
|
||||
* configured in a separate register and the wakeup active state is
|
||||
* just stored in a bitmask.
|
||||
*/
|
||||
int irq_gc_set_wake(struct irq_data *d, unsigned int on)
|
||||
{
|
||||
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
|
||||
u32 mask = d->mask;
|
||||
|
||||
if (!(mask & gc->wake_enabled))
|
||||
return -EINVAL;
|
||||
|
||||
irq_gc_lock(gc);
|
||||
if (on)
|
||||
gc->wake_active |= mask;
|
||||
else
|
||||
gc->wake_active &= ~mask;
|
||||
irq_gc_unlock(gc);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
irq_init_generic_chip(struct irq_chip_generic *gc, const char *name,
|
||||
int num_ct, unsigned int irq_base,
|
||||
void __iomem *reg_base, irq_flow_handler_t handler)
|
||||
{
|
||||
raw_spin_lock_init(&gc->lock);
|
||||
gc->num_ct = num_ct;
|
||||
gc->irq_base = irq_base;
|
||||
gc->reg_base = reg_base;
|
||||
gc->chip_types->chip.name = name;
|
||||
gc->chip_types->handler = handler;
|
||||
}
|
||||
|
||||
/**
|
||||
* irq_alloc_generic_chip - Allocate a generic chip and initialize it
|
||||
* @name: Name of the irq chip
|
||||
* @num_ct: Number of irq_chip_type instances associated with this
|
||||
* @irq_base: Interrupt base nr for this chip
|
||||
* @reg_base: Register base address (virtual)
|
||||
* @handler: Default flow handler associated with this chip
|
||||
*
|
||||
* Returns an initialized irq_chip_generic structure. The chip defaults
|
||||
* to the primary (index 0) irq_chip_type and @handler
|
||||
*/
|
||||
struct irq_chip_generic *
|
||||
irq_alloc_generic_chip(const char *name, int num_ct, unsigned int irq_base,
|
||||
void __iomem *reg_base, irq_flow_handler_t handler)
|
||||
{
|
||||
struct irq_chip_generic *gc;
|
||||
unsigned long sz = sizeof(*gc) + num_ct * sizeof(struct irq_chip_type);
|
||||
|
||||
gc = kzalloc(sz, GFP_KERNEL);
|
||||
if (gc) {
|
||||
irq_init_generic_chip(gc, name, num_ct, irq_base, reg_base,
|
||||
handler);
|
||||
}
|
||||
return gc;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(irq_alloc_generic_chip);
|
||||
|
||||
static void
|
||||
irq_gc_init_mask_cache(struct irq_chip_generic *gc, enum irq_gc_flags flags)
|
||||
{
|
||||
struct irq_chip_type *ct = gc->chip_types;
|
||||
u32 *mskptr = &gc->mask_cache, mskreg = ct->regs.mask;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < gc->num_ct; i++) {
|
||||
if (flags & IRQ_GC_MASK_CACHE_PER_TYPE) {
|
||||
mskptr = &ct[i].mask_cache_priv;
|
||||
mskreg = ct[i].regs.mask;
|
||||
}
|
||||
ct[i].mask_cache = mskptr;
|
||||
if (flags & IRQ_GC_INIT_MASK_CACHE)
|
||||
*mskptr = irq_reg_readl(gc->reg_base + mskreg);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* irq_alloc_domain_generic_chip - Allocate generic chips for an irq domain
|
||||
* @d: irq domain for which to allocate chips
|
||||
* @irqs_per_chip: Number of interrupts each chip handles
|
||||
* @num_ct: Number of irq_chip_type instances associated with this
|
||||
* @name: Name of the irq chip
|
||||
* @handler: Default flow handler associated with these chips
|
||||
* @clr: IRQ_* bits to clear in the mapping function
|
||||
* @set: IRQ_* bits to set in the mapping function
|
||||
* @gcflags: Generic chip specific setup flags
|
||||
*/
|
||||
int irq_alloc_domain_generic_chips(struct irq_domain *d, int irqs_per_chip,
|
||||
int num_ct, const char *name,
|
||||
irq_flow_handler_t handler,
|
||||
unsigned int clr, unsigned int set,
|
||||
enum irq_gc_flags gcflags)
|
||||
{
|
||||
struct irq_domain_chip_generic *dgc;
|
||||
struct irq_chip_generic *gc;
|
||||
int numchips, sz, i;
|
||||
unsigned long flags;
|
||||
void *tmp;
|
||||
|
||||
if (d->gc)
|
||||
return -EBUSY;
|
||||
|
||||
numchips = DIV_ROUND_UP(d->revmap_size, irqs_per_chip);
|
||||
if (!numchips)
|
||||
return -EINVAL;
|
||||
|
||||
/* Allocate a pointer, generic chip and chiptypes for each chip */
|
||||
sz = sizeof(*dgc) + numchips * sizeof(gc);
|
||||
sz += numchips * (sizeof(*gc) + num_ct * sizeof(struct irq_chip_type));
|
||||
|
||||
tmp = dgc = kzalloc(sz, GFP_KERNEL);
|
||||
if (!dgc)
|
||||
return -ENOMEM;
|
||||
dgc->irqs_per_chip = irqs_per_chip;
|
||||
dgc->num_chips = numchips;
|
||||
dgc->irq_flags_to_set = set;
|
||||
dgc->irq_flags_to_clear = clr;
|
||||
dgc->gc_flags = gcflags;
|
||||
d->gc = dgc;
|
||||
|
||||
/* Calc pointer to the first generic chip */
|
||||
tmp += sizeof(*dgc) + numchips * sizeof(gc);
|
||||
for (i = 0; i < numchips; i++) {
|
||||
/* Store the pointer to the generic chip */
|
||||
dgc->gc[i] = gc = tmp;
|
||||
irq_init_generic_chip(gc, name, num_ct, i * irqs_per_chip,
|
||||
NULL, handler);
|
||||
gc->domain = d;
|
||||
raw_spin_lock_irqsave(&gc_lock, flags);
|
||||
list_add_tail(&gc->list, &gc_list);
|
||||
raw_spin_unlock_irqrestore(&gc_lock, flags);
|
||||
/* Calc pointer to the next generic chip */
|
||||
tmp += sizeof(*gc) + num_ct * sizeof(struct irq_chip_type);
|
||||
}
|
||||
d->name = name;
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(irq_alloc_domain_generic_chips);
|
||||
|
||||
/**
|
||||
* irq_get_domain_generic_chip - Get a pointer to the generic chip of a hw_irq
|
||||
* @d: irq domain pointer
|
||||
* @hw_irq: Hardware interrupt number
|
||||
*/
|
||||
struct irq_chip_generic *
|
||||
irq_get_domain_generic_chip(struct irq_domain *d, unsigned int hw_irq)
|
||||
{
|
||||
struct irq_domain_chip_generic *dgc = d->gc;
|
||||
int idx;
|
||||
|
||||
if (!dgc)
|
||||
return NULL;
|
||||
idx = hw_irq / dgc->irqs_per_chip;
|
||||
if (idx >= dgc->num_chips)
|
||||
return NULL;
|
||||
return dgc->gc[idx];
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(irq_get_domain_generic_chip);
|
||||
|
||||
/*
|
||||
* Separate lockdep class for interrupt chip which can nest irq_desc
|
||||
* lock.
|
||||
*/
|
||||
static struct lock_class_key irq_nested_lock_class;
|
||||
|
||||
/*
|
||||
* irq_map_generic_chip - Map a generic chip for an irq domain
|
||||
*/
|
||||
int irq_map_generic_chip(struct irq_domain *d, unsigned int virq,
|
||||
irq_hw_number_t hw_irq)
|
||||
{
|
||||
struct irq_data *data = irq_get_irq_data(virq);
|
||||
struct irq_domain_chip_generic *dgc = d->gc;
|
||||
struct irq_chip_generic *gc;
|
||||
struct irq_chip_type *ct;
|
||||
struct irq_chip *chip;
|
||||
unsigned long flags;
|
||||
int idx;
|
||||
|
||||
if (!d->gc)
|
||||
return -ENODEV;
|
||||
|
||||
idx = hw_irq / dgc->irqs_per_chip;
|
||||
if (idx >= dgc->num_chips)
|
||||
return -EINVAL;
|
||||
gc = dgc->gc[idx];
|
||||
|
||||
idx = hw_irq % dgc->irqs_per_chip;
|
||||
|
||||
if (test_bit(idx, &gc->unused))
|
||||
return -ENOTSUPP;
|
||||
|
||||
if (test_bit(idx, &gc->installed))
|
||||
return -EBUSY;
|
||||
|
||||
ct = gc->chip_types;
|
||||
chip = &ct->chip;
|
||||
|
||||
/* We only init the cache for the first mapping of a generic chip */
|
||||
if (!gc->installed) {
|
||||
raw_spin_lock_irqsave(&gc->lock, flags);
|
||||
irq_gc_init_mask_cache(gc, dgc->gc_flags);
|
||||
raw_spin_unlock_irqrestore(&gc->lock, flags);
|
||||
}
|
||||
|
||||
/* Mark the interrupt as installed */
|
||||
set_bit(idx, &gc->installed);
|
||||
|
||||
if (dgc->gc_flags & IRQ_GC_INIT_NESTED_LOCK)
|
||||
irq_set_lockdep_class(virq, &irq_nested_lock_class);
|
||||
|
||||
if (chip->irq_calc_mask)
|
||||
chip->irq_calc_mask(data);
|
||||
else
|
||||
data->mask = 1 << idx;
|
||||
|
||||
irq_set_chip_and_handler(virq, chip, ct->handler);
|
||||
irq_set_chip_data(virq, gc);
|
||||
irq_modify_status(virq, dgc->irq_flags_to_clear, dgc->irq_flags_to_set);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(irq_map_generic_chip);
|
||||
|
||||
struct irq_domain_ops irq_generic_chip_ops = {
|
||||
.map = irq_map_generic_chip,
|
||||
.xlate = irq_domain_xlate_onetwocell,
|
||||
};
|
||||
EXPORT_SYMBOL_GPL(irq_generic_chip_ops);
|
||||
|
||||
/**
|
||||
* irq_setup_generic_chip - Setup a range of interrupts with a generic chip
|
||||
* @gc: Generic irq chip holding all data
|
||||
* @msk: Bitmask holding the irqs to initialize relative to gc->irq_base
|
||||
* @flags: Flags for initialization
|
||||
* @clr: IRQ_* bits to clear
|
||||
* @set: IRQ_* bits to set
|
||||
*
|
||||
* Set up max. 32 interrupts starting from gc->irq_base. Note, this
|
||||
* initializes all interrupts to the primary irq_chip_type and its
|
||||
* associated handler.
|
||||
*/
|
||||
void irq_setup_generic_chip(struct irq_chip_generic *gc, u32 msk,
|
||||
enum irq_gc_flags flags, unsigned int clr,
|
||||
unsigned int set)
|
||||
{
|
||||
struct irq_chip_type *ct = gc->chip_types;
|
||||
struct irq_chip *chip = &ct->chip;
|
||||
unsigned int i;
|
||||
|
||||
raw_spin_lock(&gc_lock);
|
||||
list_add_tail(&gc->list, &gc_list);
|
||||
raw_spin_unlock(&gc_lock);
|
||||
|
||||
irq_gc_init_mask_cache(gc, flags);
|
||||
|
||||
for (i = gc->irq_base; msk; msk >>= 1, i++) {
|
||||
if (!(msk & 0x01))
|
||||
continue;
|
||||
|
||||
if (flags & IRQ_GC_INIT_NESTED_LOCK)
|
||||
irq_set_lockdep_class(i, &irq_nested_lock_class);
|
||||
|
||||
if (!(flags & IRQ_GC_NO_MASK)) {
|
||||
struct irq_data *d = irq_get_irq_data(i);
|
||||
|
||||
if (chip->irq_calc_mask)
|
||||
chip->irq_calc_mask(d);
|
||||
else
|
||||
d->mask = 1 << (i - gc->irq_base);
|
||||
}
|
||||
irq_set_chip_and_handler(i, chip, ct->handler);
|
||||
irq_set_chip_data(i, gc);
|
||||
irq_modify_status(i, clr, set);
|
||||
}
|
||||
gc->irq_cnt = i - gc->irq_base;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(irq_setup_generic_chip);
|
||||
|
||||
/**
|
||||
* irq_setup_alt_chip - Switch to alternative chip
|
||||
* @d: irq_data for this interrupt
|
||||
* @type: Flow type to be initialized
|
||||
*
|
||||
* Only to be called from chip->irq_set_type() callbacks.
|
||||
*/
|
||||
int irq_setup_alt_chip(struct irq_data *d, unsigned int type)
|
||||
{
|
||||
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
|
||||
struct irq_chip_type *ct = gc->chip_types;
|
||||
unsigned int i;
|
||||
|
||||
for (i = 0; i < gc->num_ct; i++, ct++) {
|
||||
if (ct->type & type) {
|
||||
d->chip = &ct->chip;
|
||||
irq_data_to_desc(d)->handle_irq = ct->handler;
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
return -EINVAL;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(irq_setup_alt_chip);
|
||||
|
||||
/**
|
||||
* irq_remove_generic_chip - Remove a chip
|
||||
* @gc: Generic irq chip holding all data
|
||||
* @msk: Bitmask holding the irqs to initialize relative to gc->irq_base
|
||||
* @clr: IRQ_* bits to clear
|
||||
* @set: IRQ_* bits to set
|
||||
*
|
||||
* Remove up to 32 interrupts starting from gc->irq_base.
|
||||
*/
|
||||
void irq_remove_generic_chip(struct irq_chip_generic *gc, u32 msk,
|
||||
unsigned int clr, unsigned int set)
|
||||
{
|
||||
unsigned int i = gc->irq_base;
|
||||
|
||||
raw_spin_lock(&gc_lock);
|
||||
list_del(&gc->list);
|
||||
raw_spin_unlock(&gc_lock);
|
||||
|
||||
for (; msk; msk >>= 1, i++) {
|
||||
if (!(msk & 0x01))
|
||||
continue;
|
||||
|
||||
/* Remove handler first. That will mask the irq line */
|
||||
irq_set_handler(i, NULL);
|
||||
irq_set_chip(i, &no_irq_chip);
|
||||
irq_set_chip_data(i, NULL);
|
||||
irq_modify_status(i, clr, set);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(irq_remove_generic_chip);
|
||||
|
||||
static struct irq_data *irq_gc_get_irq_data(struct irq_chip_generic *gc)
|
||||
{
|
||||
unsigned int virq;
|
||||
|
||||
if (!gc->domain)
|
||||
return irq_get_irq_data(gc->irq_base);
|
||||
|
||||
/*
|
||||
* We don't know which of the irqs has been actually
|
||||
* installed. Use the first one.
|
||||
*/
|
||||
if (!gc->installed)
|
||||
return NULL;
|
||||
|
||||
virq = irq_find_mapping(gc->domain, gc->irq_base + __ffs(gc->installed));
|
||||
return virq ? irq_get_irq_data(virq) : NULL;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PM
|
||||
static int irq_gc_suspend(void)
|
||||
{
|
||||
struct irq_chip_generic *gc;
|
||||
|
||||
list_for_each_entry(gc, &gc_list, list) {
|
||||
struct irq_chip_type *ct = gc->chip_types;
|
||||
|
||||
if (ct->chip.irq_suspend) {
|
||||
struct irq_data *data = irq_gc_get_irq_data(gc);
|
||||
|
||||
if (data)
|
||||
ct->chip.irq_suspend(data);
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void irq_gc_resume(void)
|
||||
{
|
||||
struct irq_chip_generic *gc;
|
||||
|
||||
list_for_each_entry(gc, &gc_list, list) {
|
||||
struct irq_chip_type *ct = gc->chip_types;
|
||||
|
||||
if (ct->chip.irq_resume) {
|
||||
struct irq_data *data = irq_gc_get_irq_data(gc);
|
||||
|
||||
if (data)
|
||||
ct->chip.irq_resume(data);
|
||||
}
|
||||
}
|
||||
}
|
||||
#else
|
||||
#define irq_gc_suspend NULL
|
||||
#define irq_gc_resume NULL
|
||||
#endif
|
||||
|
||||
static void irq_gc_shutdown(void)
|
||||
{
|
||||
struct irq_chip_generic *gc;
|
||||
|
||||
list_for_each_entry(gc, &gc_list, list) {
|
||||
struct irq_chip_type *ct = gc->chip_types;
|
||||
|
||||
if (ct->chip.irq_pm_shutdown) {
|
||||
struct irq_data *data = irq_gc_get_irq_data(gc);
|
||||
|
||||
if (data)
|
||||
ct->chip.irq_pm_shutdown(data);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static struct syscore_ops irq_gc_syscore_ops = {
|
||||
.suspend = irq_gc_suspend,
|
||||
.resume = irq_gc_resume,
|
||||
.shutdown = irq_gc_shutdown,
|
||||
};
|
||||
|
||||
static int __init irq_gc_init_ops(void)
|
||||
{
|
||||
register_syscore_ops(&irq_gc_syscore_ops);
|
||||
return 0;
|
||||
}
|
||||
device_initcall(irq_gc_init_ops);
|
199
kernel/irq/handle.c
Normal file
199
kernel/irq/handle.c
Normal file
|
@ -0,0 +1,199 @@
|
|||
/*
|
||||
* linux/kernel/irq/handle.c
|
||||
*
|
||||
* Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
|
||||
* Copyright (C) 2005-2006, Thomas Gleixner, Russell King
|
||||
*
|
||||
* This file contains the core interrupt handling code.
|
||||
*
|
||||
* Detailed information is available in Documentation/DocBook/genericirq
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/irq.h>
|
||||
#include <linux/random.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/kernel_stat.h>
|
||||
|
||||
#include <trace/events/irq.h>
|
||||
|
||||
#include "internals.h"
|
||||
|
||||
/**
|
||||
* handle_bad_irq - handle spurious and unhandled irqs
|
||||
* @irq: the interrupt number
|
||||
* @desc: description of the interrupt
|
||||
*
|
||||
* Handles spurious and unhandled IRQ's. It also prints a debugmessage.
|
||||
*/
|
||||
void handle_bad_irq(unsigned int irq, struct irq_desc *desc)
|
||||
{
|
||||
print_irq_desc(irq, desc);
|
||||
kstat_incr_irqs_this_cpu(irq, desc);
|
||||
ack_bad_irq(irq);
|
||||
}
|
||||
|
||||
/*
|
||||
* Special, empty irq handler:
|
||||
*/
|
||||
irqreturn_t no_action(int cpl, void *dev_id)
|
||||
{
|
||||
return IRQ_NONE;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(no_action);
|
||||
|
||||
static void warn_no_thread(unsigned int irq, struct irqaction *action)
|
||||
{
|
||||
if (test_and_set_bit(IRQTF_WARNED, &action->thread_flags))
|
||||
return;
|
||||
|
||||
printk(KERN_WARNING "IRQ %d device %s returned IRQ_WAKE_THREAD "
|
||||
"but no thread function available.", irq, action->name);
|
||||
}
|
||||
|
||||
void __irq_wake_thread(struct irq_desc *desc, struct irqaction *action)
|
||||
{
|
||||
/*
|
||||
* In case the thread crashed and was killed we just pretend that
|
||||
* we handled the interrupt. The hardirq handler has disabled the
|
||||
* device interrupt, so no irq storm is lurking.
|
||||
*/
|
||||
if (action->thread->flags & PF_EXITING)
|
||||
return;
|
||||
|
||||
/*
|
||||
* Wake up the handler thread for this action. If the
|
||||
* RUNTHREAD bit is already set, nothing to do.
|
||||
*/
|
||||
if (test_and_set_bit(IRQTF_RUNTHREAD, &action->thread_flags))
|
||||
return;
|
||||
|
||||
/*
|
||||
* It's safe to OR the mask lockless here. We have only two
|
||||
* places which write to threads_oneshot: This code and the
|
||||
* irq thread.
|
||||
*
|
||||
* This code is the hard irq context and can never run on two
|
||||
* cpus in parallel. If it ever does we have more serious
|
||||
* problems than this bitmask.
|
||||
*
|
||||
* The irq threads of this irq which clear their "running" bit
|
||||
* in threads_oneshot are serialized via desc->lock against
|
||||
* each other and they are serialized against this code by
|
||||
* IRQS_INPROGRESS.
|
||||
*
|
||||
* Hard irq handler:
|
||||
*
|
||||
* spin_lock(desc->lock);
|
||||
* desc->state |= IRQS_INPROGRESS;
|
||||
* spin_unlock(desc->lock);
|
||||
* set_bit(IRQTF_RUNTHREAD, &action->thread_flags);
|
||||
* desc->threads_oneshot |= mask;
|
||||
* spin_lock(desc->lock);
|
||||
* desc->state &= ~IRQS_INPROGRESS;
|
||||
* spin_unlock(desc->lock);
|
||||
*
|
||||
* irq thread:
|
||||
*
|
||||
* again:
|
||||
* spin_lock(desc->lock);
|
||||
* if (desc->state & IRQS_INPROGRESS) {
|
||||
* spin_unlock(desc->lock);
|
||||
* while(desc->state & IRQS_INPROGRESS)
|
||||
* cpu_relax();
|
||||
* goto again;
|
||||
* }
|
||||
* if (!test_bit(IRQTF_RUNTHREAD, &action->thread_flags))
|
||||
* desc->threads_oneshot &= ~mask;
|
||||
* spin_unlock(desc->lock);
|
||||
*
|
||||
* So either the thread waits for us to clear IRQS_INPROGRESS
|
||||
* or we are waiting in the flow handler for desc->lock to be
|
||||
* released before we reach this point. The thread also checks
|
||||
* IRQTF_RUNTHREAD under desc->lock. If set it leaves
|
||||
* threads_oneshot untouched and runs the thread another time.
|
||||
*/
|
||||
desc->threads_oneshot |= action->thread_mask;
|
||||
|
||||
/*
|
||||
* We increment the threads_active counter in case we wake up
|
||||
* the irq thread. The irq thread decrements the counter when
|
||||
* it returns from the handler or in the exit path and wakes
|
||||
* up waiters which are stuck in synchronize_irq() when the
|
||||
* active count becomes zero. synchronize_irq() is serialized
|
||||
* against this code (hard irq handler) via IRQS_INPROGRESS
|
||||
* like the finalize_oneshot() code. See comment above.
|
||||
*/
|
||||
atomic_inc(&desc->threads_active);
|
||||
|
||||
wake_up_process(action->thread);
|
||||
}
|
||||
|
||||
irqreturn_t
|
||||
handle_irq_event_percpu(struct irq_desc *desc, struct irqaction *action)
|
||||
{
|
||||
irqreturn_t retval = IRQ_NONE;
|
||||
unsigned int flags = 0, irq = desc->irq_data.irq;
|
||||
|
||||
do {
|
||||
irqreturn_t res;
|
||||
|
||||
trace_irq_handler_entry(irq, action);
|
||||
exynos_ss_irq(irq, (void *)action->handler, (int)irqs_disabled(), ESS_FLAG_IN);
|
||||
res = action->handler(irq, action->dev_id);
|
||||
exynos_ss_irq(irq, (void *)action->handler, (int)irqs_disabled(), ESS_FLAG_OUT);
|
||||
trace_irq_handler_exit(irq, action, res);
|
||||
|
||||
if (WARN_ONCE(!irqs_disabled(),"irq %u handler %pF enabled interrupts\n",
|
||||
irq, action->handler))
|
||||
local_irq_disable();
|
||||
|
||||
switch (res) {
|
||||
case IRQ_WAKE_THREAD:
|
||||
/*
|
||||
* Catch drivers which return WAKE_THREAD but
|
||||
* did not set up a thread function
|
||||
*/
|
||||
if (unlikely(!action->thread_fn)) {
|
||||
warn_no_thread(irq, action);
|
||||
break;
|
||||
}
|
||||
|
||||
__irq_wake_thread(desc, action);
|
||||
|
||||
/* Fall through to add to randomness */
|
||||
case IRQ_HANDLED:
|
||||
flags |= action->flags;
|
||||
break;
|
||||
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
retval |= res;
|
||||
action = action->next;
|
||||
} while (action);
|
||||
|
||||
add_interrupt_randomness(irq, flags);
|
||||
|
||||
if (!noirqdebug)
|
||||
note_interrupt(irq, desc, retval);
|
||||
return retval;
|
||||
}
|
||||
|
||||
irqreturn_t handle_irq_event(struct irq_desc *desc)
|
||||
{
|
||||
struct irqaction *action = desc->action;
|
||||
irqreturn_t ret;
|
||||
|
||||
desc->istate &= ~IRQS_PENDING;
|
||||
irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS);
|
||||
raw_spin_unlock(&desc->lock);
|
||||
|
||||
ret = handle_irq_event_percpu(desc, action);
|
||||
|
||||
raw_spin_lock(&desc->lock);
|
||||
irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
|
||||
return ret;
|
||||
}
|
212
kernel/irq/internals.h
Normal file
212
kernel/irq/internals.h
Normal file
|
@ -0,0 +1,212 @@
|
|||
/*
|
||||
* IRQ subsystem internal functions and variables:
|
||||
*
|
||||
* Do not ever include this file from anything else than
|
||||
* kernel/irq/. Do not even think about using any information outside
|
||||
* of this file for your non core code.
|
||||
*/
|
||||
#include <linux/irqdesc.h>
|
||||
#include <linux/kernel_stat.h>
|
||||
|
||||
#ifdef CONFIG_SPARSE_IRQ
|
||||
# define IRQ_BITMAP_BITS (NR_IRQS + 8196)
|
||||
#else
|
||||
# define IRQ_BITMAP_BITS NR_IRQS
|
||||
#endif
|
||||
|
||||
#define istate core_internal_state__do_not_mess_with_it
|
||||
|
||||
extern bool noirqdebug;
|
||||
|
||||
/*
|
||||
* Bits used by threaded handlers:
|
||||
* IRQTF_RUNTHREAD - signals that the interrupt handler thread should run
|
||||
* IRQTF_WARNED - warning "IRQ_WAKE_THREAD w/o thread_fn" has been printed
|
||||
* IRQTF_AFFINITY - irq thread is requested to adjust affinity
|
||||
* IRQTF_FORCED_THREAD - irq action is force threaded
|
||||
*/
|
||||
enum {
|
||||
IRQTF_RUNTHREAD,
|
||||
IRQTF_WARNED,
|
||||
IRQTF_AFFINITY,
|
||||
IRQTF_FORCED_THREAD,
|
||||
};
|
||||
|
||||
/*
|
||||
* Bit masks for desc->core_internal_state__do_not_mess_with_it
|
||||
*
|
||||
* IRQS_AUTODETECT - autodetection in progress
|
||||
* IRQS_SPURIOUS_DISABLED - was disabled due to spurious interrupt
|
||||
* detection
|
||||
* IRQS_POLL_INPROGRESS - polling in progress
|
||||
* IRQS_ONESHOT - irq is not unmasked in primary handler
|
||||
* IRQS_REPLAY - irq is replayed
|
||||
* IRQS_WAITING - irq is waiting
|
||||
* IRQS_PENDING - irq is pending and replayed later
|
||||
* IRQS_SUSPENDED - irq is suspended
|
||||
*/
|
||||
enum {
|
||||
IRQS_AUTODETECT = 0x00000001,
|
||||
IRQS_SPURIOUS_DISABLED = 0x00000002,
|
||||
IRQS_POLL_INPROGRESS = 0x00000008,
|
||||
IRQS_ONESHOT = 0x00000020,
|
||||
IRQS_REPLAY = 0x00000040,
|
||||
IRQS_WAITING = 0x00000080,
|
||||
IRQS_PENDING = 0x00000200,
|
||||
IRQS_SUSPENDED = 0x00000800,
|
||||
};
|
||||
|
||||
#include "debug.h"
|
||||
#include "settings.h"
|
||||
|
||||
#define irq_data_to_desc(data) container_of(data, struct irq_desc, irq_data)
|
||||
|
||||
extern int __irq_set_trigger(struct irq_desc *desc, unsigned int irq,
|
||||
unsigned long flags);
|
||||
extern void __disable_irq(struct irq_desc *desc, unsigned int irq);
|
||||
extern void __enable_irq(struct irq_desc *desc, unsigned int irq);
|
||||
|
||||
extern int irq_startup(struct irq_desc *desc, bool resend);
|
||||
extern void irq_shutdown(struct irq_desc *desc);
|
||||
extern void irq_enable(struct irq_desc *desc);
|
||||
extern void irq_disable(struct irq_desc *desc);
|
||||
extern void irq_percpu_enable(struct irq_desc *desc, unsigned int cpu);
|
||||
extern void irq_percpu_disable(struct irq_desc *desc, unsigned int cpu);
|
||||
extern void mask_irq(struct irq_desc *desc);
|
||||
extern void unmask_irq(struct irq_desc *desc);
|
||||
extern void unmask_threaded_irq(struct irq_desc *desc);
|
||||
|
||||
#ifdef CONFIG_SPARSE_IRQ
|
||||
static inline void irq_mark_irq(unsigned int irq) { }
|
||||
extern void irq_lock_sparse(void);
|
||||
extern void irq_unlock_sparse(void);
|
||||
#else
|
||||
extern void irq_mark_irq(unsigned int irq);
|
||||
static inline void irq_lock_sparse(void) { }
|
||||
static inline void irq_unlock_sparse(void) { }
|
||||
#endif
|
||||
|
||||
extern void init_kstat_irqs(struct irq_desc *desc, int node, int nr);
|
||||
|
||||
irqreturn_t handle_irq_event_percpu(struct irq_desc *desc, struct irqaction *action);
|
||||
irqreturn_t handle_irq_event(struct irq_desc *desc);
|
||||
|
||||
/* Resending of interrupts :*/
|
||||
void check_irq_resend(struct irq_desc *desc, unsigned int irq);
|
||||
bool irq_wait_for_poll(struct irq_desc *desc);
|
||||
void __irq_wake_thread(struct irq_desc *desc, struct irqaction *action);
|
||||
|
||||
#ifdef CONFIG_PROC_FS
|
||||
extern void register_irq_proc(unsigned int irq, struct irq_desc *desc);
|
||||
extern void unregister_irq_proc(unsigned int irq, struct irq_desc *desc);
|
||||
extern void register_handler_proc(unsigned int irq, struct irqaction *action);
|
||||
extern void unregister_handler_proc(unsigned int irq, struct irqaction *action);
|
||||
#else
|
||||
static inline void register_irq_proc(unsigned int irq, struct irq_desc *desc) { }
|
||||
static inline void unregister_irq_proc(unsigned int irq, struct irq_desc *desc) { }
|
||||
static inline void register_handler_proc(unsigned int irq,
|
||||
struct irqaction *action) { }
|
||||
static inline void unregister_handler_proc(unsigned int irq,
|
||||
struct irqaction *action) { }
|
||||
#endif
|
||||
|
||||
extern int irq_select_affinity_usr(unsigned int irq, struct cpumask *mask);
|
||||
|
||||
extern void irq_set_thread_affinity(struct irq_desc *desc);
|
||||
|
||||
extern int irq_do_set_affinity(struct irq_data *data,
|
||||
const struct cpumask *dest, bool force);
|
||||
|
||||
/* Inline functions for support of irq chips on slow busses */
|
||||
static inline void chip_bus_lock(struct irq_desc *desc)
|
||||
{
|
||||
if (unlikely(desc->irq_data.chip->irq_bus_lock))
|
||||
desc->irq_data.chip->irq_bus_lock(&desc->irq_data);
|
||||
}
|
||||
|
||||
static inline void chip_bus_sync_unlock(struct irq_desc *desc)
|
||||
{
|
||||
if (unlikely(desc->irq_data.chip->irq_bus_sync_unlock))
|
||||
desc->irq_data.chip->irq_bus_sync_unlock(&desc->irq_data);
|
||||
}
|
||||
|
||||
#define _IRQ_DESC_CHECK (1 << 0)
|
||||
#define _IRQ_DESC_PERCPU (1 << 1)
|
||||
|
||||
#define IRQ_GET_DESC_CHECK_GLOBAL (_IRQ_DESC_CHECK)
|
||||
#define IRQ_GET_DESC_CHECK_PERCPU (_IRQ_DESC_CHECK | _IRQ_DESC_PERCPU)
|
||||
|
||||
struct irq_desc *
|
||||
__irq_get_desc_lock(unsigned int irq, unsigned long *flags, bool bus,
|
||||
unsigned int check);
|
||||
void __irq_put_desc_unlock(struct irq_desc *desc, unsigned long flags, bool bus);
|
||||
|
||||
static inline struct irq_desc *
|
||||
irq_get_desc_buslock(unsigned int irq, unsigned long *flags, unsigned int check)
|
||||
{
|
||||
return __irq_get_desc_lock(irq, flags, true, check);
|
||||
}
|
||||
|
||||
static inline void
|
||||
irq_put_desc_busunlock(struct irq_desc *desc, unsigned long flags)
|
||||
{
|
||||
__irq_put_desc_unlock(desc, flags, true);
|
||||
}
|
||||
|
||||
static inline struct irq_desc *
|
||||
irq_get_desc_lock(unsigned int irq, unsigned long *flags, unsigned int check)
|
||||
{
|
||||
return __irq_get_desc_lock(irq, flags, false, check);
|
||||
}
|
||||
|
||||
static inline void
|
||||
irq_put_desc_unlock(struct irq_desc *desc, unsigned long flags)
|
||||
{
|
||||
__irq_put_desc_unlock(desc, flags, false);
|
||||
}
|
||||
|
||||
/*
|
||||
* Manipulation functions for irq_data.state
|
||||
*/
|
||||
static inline void irqd_set_move_pending(struct irq_data *d)
|
||||
{
|
||||
d->state_use_accessors |= IRQD_SETAFFINITY_PENDING;
|
||||
}
|
||||
|
||||
static inline void irqd_clr_move_pending(struct irq_data *d)
|
||||
{
|
||||
d->state_use_accessors &= ~IRQD_SETAFFINITY_PENDING;
|
||||
}
|
||||
|
||||
static inline void irqd_clear(struct irq_data *d, unsigned int mask)
|
||||
{
|
||||
d->state_use_accessors &= ~mask;
|
||||
}
|
||||
|
||||
static inline void irqd_set(struct irq_data *d, unsigned int mask)
|
||||
{
|
||||
d->state_use_accessors |= mask;
|
||||
}
|
||||
|
||||
static inline bool irqd_has_set(struct irq_data *d, unsigned int mask)
|
||||
{
|
||||
return d->state_use_accessors & mask;
|
||||
}
|
||||
|
||||
static inline void kstat_incr_irqs_this_cpu(unsigned int irq, struct irq_desc *desc)
|
||||
{
|
||||
__this_cpu_inc(*desc->kstat_irqs);
|
||||
__this_cpu_inc(kstat.irqs_sum);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PM_SLEEP
|
||||
bool irq_pm_check_wakeup(struct irq_desc *desc);
|
||||
void irq_pm_install_action(struct irq_desc *desc, struct irqaction *action);
|
||||
void irq_pm_remove_action(struct irq_desc *desc, struct irqaction *action);
|
||||
#else
|
||||
static inline bool irq_pm_check_wakeup(struct irq_desc *desc) { return false; }
|
||||
static inline void
|
||||
irq_pm_install_action(struct irq_desc *desc, struct irqaction *action) { }
|
||||
static inline void
|
||||
irq_pm_remove_action(struct irq_desc *desc, struct irqaction *action) { }
|
||||
#endif
|
659
kernel/irq/irqdesc.c
Normal file
659
kernel/irq/irqdesc.c
Normal file
|
@ -0,0 +1,659 @@
|
|||
/*
|
||||
* Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
|
||||
* Copyright (C) 2005-2006, Thomas Gleixner, Russell King
|
||||
*
|
||||
* This file contains the interrupt descriptor management code
|
||||
*
|
||||
* Detailed information is available in Documentation/DocBook/genericirq
|
||||
*
|
||||
*/
|
||||
#include <linux/irq.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/export.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/kernel_stat.h>
|
||||
#include <linux/radix-tree.h>
|
||||
#include <linux/bitmap.h>
|
||||
#include <linux/irqdomain.h>
|
||||
#include <linux/exynos-ss.h>
|
||||
|
||||
#include "internals.h"
|
||||
|
||||
/*
|
||||
* lockdep: we want to handle all irq_desc locks as a single lock-class:
|
||||
*/
|
||||
static struct lock_class_key irq_desc_lock_class;
|
||||
|
||||
#ifdef CONFIG_SCHED_HMP
|
||||
extern struct cpumask hmp_slow_cpu_mask;
|
||||
#endif
|
||||
#if defined(CONFIG_SMP)
|
||||
static void __init init_irq_default_affinity(void)
|
||||
{
|
||||
alloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT);
|
||||
#ifdef CONFIG_SCHED_HMP
|
||||
cpumask_copy(irq_default_affinity, &hmp_slow_cpu_mask);
|
||||
#else
|
||||
cpumask_setall(irq_default_affinity);
|
||||
#endif
|
||||
}
|
||||
#else
|
||||
static void __init init_irq_default_affinity(void)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
static int alloc_masks(struct irq_desc *desc, gfp_t gfp, int node)
|
||||
{
|
||||
if (!zalloc_cpumask_var_node(&desc->irq_data.affinity, gfp, node))
|
||||
return -ENOMEM;
|
||||
|
||||
#ifdef CONFIG_GENERIC_PENDING_IRQ
|
||||
if (!zalloc_cpumask_var_node(&desc->pending_mask, gfp, node)) {
|
||||
free_cpumask_var(desc->irq_data.affinity);
|
||||
return -ENOMEM;
|
||||
}
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void desc_smp_init(struct irq_desc *desc, int node)
|
||||
{
|
||||
desc->irq_data.node = node;
|
||||
cpumask_copy(desc->irq_data.affinity, irq_default_affinity);
|
||||
#ifdef CONFIG_GENERIC_PENDING_IRQ
|
||||
cpumask_clear(desc->pending_mask);
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline int desc_node(struct irq_desc *desc)
|
||||
{
|
||||
return desc->irq_data.node;
|
||||
}
|
||||
|
||||
#else
|
||||
static inline int
|
||||
alloc_masks(struct irq_desc *desc, gfp_t gfp, int node) { return 0; }
|
||||
static inline void desc_smp_init(struct irq_desc *desc, int node) { }
|
||||
static inline int desc_node(struct irq_desc *desc) { return 0; }
|
||||
#endif
|
||||
|
||||
static void desc_set_defaults(unsigned int irq, struct irq_desc *desc, int node,
|
||||
struct module *owner)
|
||||
{
|
||||
int cpu;
|
||||
|
||||
desc->irq_data.irq = irq;
|
||||
desc->irq_data.chip = &no_irq_chip;
|
||||
desc->irq_data.chip_data = NULL;
|
||||
desc->irq_data.handler_data = NULL;
|
||||
desc->irq_data.msi_desc = NULL;
|
||||
irq_settings_clr_and_set(desc, ~0, _IRQ_DEFAULT_INIT_FLAGS);
|
||||
irqd_set(&desc->irq_data, IRQD_IRQ_DISABLED);
|
||||
desc->handle_irq = handle_bad_irq;
|
||||
desc->depth = 1;
|
||||
desc->irq_count = 0;
|
||||
desc->irqs_unhandled = 0;
|
||||
desc->name = NULL;
|
||||
desc->owner = owner;
|
||||
for_each_possible_cpu(cpu)
|
||||
*per_cpu_ptr(desc->kstat_irqs, cpu) = 0;
|
||||
desc_smp_init(desc, node);
|
||||
}
|
||||
|
||||
int nr_irqs = NR_IRQS;
|
||||
EXPORT_SYMBOL_GPL(nr_irqs);
|
||||
|
||||
static DEFINE_MUTEX(sparse_irq_lock);
|
||||
static DECLARE_BITMAP(allocated_irqs, IRQ_BITMAP_BITS);
|
||||
|
||||
#ifdef CONFIG_SPARSE_IRQ
|
||||
|
||||
static RADIX_TREE(irq_desc_tree, GFP_KERNEL);
|
||||
|
||||
static void irq_insert_desc(unsigned int irq, struct irq_desc *desc)
|
||||
{
|
||||
radix_tree_insert(&irq_desc_tree, irq, desc);
|
||||
}
|
||||
|
||||
struct irq_desc *irq_to_desc(unsigned int irq)
|
||||
{
|
||||
return radix_tree_lookup(&irq_desc_tree, irq);
|
||||
}
|
||||
EXPORT_SYMBOL(irq_to_desc);
|
||||
|
||||
static void delete_irq_desc(unsigned int irq)
|
||||
{
|
||||
radix_tree_delete(&irq_desc_tree, irq);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
static void free_masks(struct irq_desc *desc)
|
||||
{
|
||||
#ifdef CONFIG_GENERIC_PENDING_IRQ
|
||||
free_cpumask_var(desc->pending_mask);
|
||||
#endif
|
||||
free_cpumask_var(desc->irq_data.affinity);
|
||||
}
|
||||
#else
|
||||
static inline void free_masks(struct irq_desc *desc) { }
|
||||
#endif
|
||||
|
||||
void irq_lock_sparse(void)
|
||||
{
|
||||
mutex_lock(&sparse_irq_lock);
|
||||
}
|
||||
|
||||
void irq_unlock_sparse(void)
|
||||
{
|
||||
mutex_unlock(&sparse_irq_lock);
|
||||
}
|
||||
|
||||
static struct irq_desc *alloc_desc(int irq, int node, struct module *owner)
|
||||
{
|
||||
struct irq_desc *desc;
|
||||
gfp_t gfp = GFP_KERNEL;
|
||||
|
||||
desc = kzalloc_node(sizeof(*desc), gfp, node);
|
||||
if (!desc)
|
||||
return NULL;
|
||||
/* allocate based on nr_cpu_ids */
|
||||
desc->kstat_irqs = alloc_percpu(unsigned int);
|
||||
if (!desc->kstat_irqs)
|
||||
goto err_desc;
|
||||
|
||||
if (alloc_masks(desc, gfp, node))
|
||||
goto err_kstat;
|
||||
|
||||
raw_spin_lock_init(&desc->lock);
|
||||
lockdep_set_class(&desc->lock, &irq_desc_lock_class);
|
||||
|
||||
desc_set_defaults(irq, desc, node, owner);
|
||||
|
||||
return desc;
|
||||
|
||||
err_kstat:
|
||||
free_percpu(desc->kstat_irqs);
|
||||
err_desc:
|
||||
kfree(desc);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void free_desc(unsigned int irq)
|
||||
{
|
||||
struct irq_desc *desc = irq_to_desc(irq);
|
||||
|
||||
unregister_irq_proc(irq, desc);
|
||||
|
||||
/*
|
||||
* sparse_irq_lock protects also show_interrupts() and
|
||||
* kstat_irq_usr(). Once we deleted the descriptor from the
|
||||
* sparse tree we can free it. Access in proc will fail to
|
||||
* lookup the descriptor.
|
||||
*/
|
||||
mutex_lock(&sparse_irq_lock);
|
||||
delete_irq_desc(irq);
|
||||
mutex_unlock(&sparse_irq_lock);
|
||||
|
||||
free_masks(desc);
|
||||
free_percpu(desc->kstat_irqs);
|
||||
kfree(desc);
|
||||
}
|
||||
|
||||
static int alloc_descs(unsigned int start, unsigned int cnt, int node,
|
||||
struct module *owner)
|
||||
{
|
||||
struct irq_desc *desc;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < cnt; i++) {
|
||||
desc = alloc_desc(start + i, node, owner);
|
||||
if (!desc)
|
||||
goto err;
|
||||
mutex_lock(&sparse_irq_lock);
|
||||
irq_insert_desc(start + i, desc);
|
||||
mutex_unlock(&sparse_irq_lock);
|
||||
}
|
||||
return start;
|
||||
|
||||
err:
|
||||
for (i--; i >= 0; i--)
|
||||
free_desc(start + i);
|
||||
|
||||
mutex_lock(&sparse_irq_lock);
|
||||
bitmap_clear(allocated_irqs, start, cnt);
|
||||
mutex_unlock(&sparse_irq_lock);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
static int irq_expand_nr_irqs(unsigned int nr)
|
||||
{
|
||||
if (nr > IRQ_BITMAP_BITS)
|
||||
return -ENOMEM;
|
||||
nr_irqs = nr;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int __init early_irq_init(void)
|
||||
{
|
||||
int i, initcnt, node = first_online_node;
|
||||
struct irq_desc *desc;
|
||||
|
||||
init_irq_default_affinity();
|
||||
|
||||
/* Let arch update nr_irqs and return the nr of preallocated irqs */
|
||||
initcnt = arch_probe_nr_irqs();
|
||||
printk(KERN_INFO "NR_IRQS:%d nr_irqs:%d %d\n", NR_IRQS, nr_irqs, initcnt);
|
||||
|
||||
if (WARN_ON(nr_irqs > IRQ_BITMAP_BITS))
|
||||
nr_irqs = IRQ_BITMAP_BITS;
|
||||
|
||||
if (WARN_ON(initcnt > IRQ_BITMAP_BITS))
|
||||
initcnt = IRQ_BITMAP_BITS;
|
||||
|
||||
if (initcnt > nr_irqs)
|
||||
nr_irqs = initcnt;
|
||||
|
||||
for (i = 0; i < initcnt; i++) {
|
||||
desc = alloc_desc(i, node, NULL);
|
||||
set_bit(i, allocated_irqs);
|
||||
irq_insert_desc(i, desc);
|
||||
}
|
||||
return arch_early_irq_init();
|
||||
}
|
||||
|
||||
#else /* !CONFIG_SPARSE_IRQ */
|
||||
|
||||
struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = {
|
||||
[0 ... NR_IRQS-1] = {
|
||||
.handle_irq = handle_bad_irq,
|
||||
.depth = 1,
|
||||
.lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc->lock),
|
||||
}
|
||||
};
|
||||
|
||||
int __init early_irq_init(void)
|
||||
{
|
||||
int count, i, node = first_online_node;
|
||||
struct irq_desc *desc;
|
||||
|
||||
init_irq_default_affinity();
|
||||
|
||||
printk(KERN_INFO "NR_IRQS:%d\n", NR_IRQS);
|
||||
|
||||
desc = irq_desc;
|
||||
count = ARRAY_SIZE(irq_desc);
|
||||
|
||||
for (i = 0; i < count; i++) {
|
||||
desc[i].kstat_irqs = alloc_percpu(unsigned int);
|
||||
alloc_masks(&desc[i], GFP_KERNEL, node);
|
||||
raw_spin_lock_init(&desc[i].lock);
|
||||
lockdep_set_class(&desc[i].lock, &irq_desc_lock_class);
|
||||
desc_set_defaults(i, &desc[i], node, NULL);
|
||||
}
|
||||
return arch_early_irq_init();
|
||||
}
|
||||
|
||||
struct irq_desc *irq_to_desc(unsigned int irq)
|
||||
{
|
||||
return (irq < NR_IRQS) ? irq_desc + irq : NULL;
|
||||
}
|
||||
EXPORT_SYMBOL(irq_to_desc);
|
||||
|
||||
static void free_desc(unsigned int irq)
|
||||
{
|
||||
struct irq_desc *desc = irq_to_desc(irq);
|
||||
unsigned long flags;
|
||||
|
||||
raw_spin_lock_irqsave(&desc->lock, flags);
|
||||
desc_set_defaults(irq, desc, desc_node(desc), NULL);
|
||||
raw_spin_unlock_irqrestore(&desc->lock, flags);
|
||||
}
|
||||
|
||||
static inline int alloc_descs(unsigned int start, unsigned int cnt, int node,
|
||||
struct module *owner)
|
||||
{
|
||||
u32 i;
|
||||
|
||||
for (i = 0; i < cnt; i++) {
|
||||
struct irq_desc *desc = irq_to_desc(start + i);
|
||||
|
||||
desc->owner = owner;
|
||||
}
|
||||
return start;
|
||||
}
|
||||
|
||||
static int irq_expand_nr_irqs(unsigned int nr)
|
||||
{
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
void irq_mark_irq(unsigned int irq)
|
||||
{
|
||||
mutex_lock(&sparse_irq_lock);
|
||||
bitmap_set(allocated_irqs, irq, 1);
|
||||
mutex_unlock(&sparse_irq_lock);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_GENERIC_IRQ_LEGACY
|
||||
void irq_init_desc(unsigned int irq)
|
||||
{
|
||||
free_desc(irq);
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* !CONFIG_SPARSE_IRQ */
|
||||
|
||||
/**
|
||||
* generic_handle_irq - Invoke the handler for a particular irq
|
||||
* @irq: The irq number to handle
|
||||
*
|
||||
*/
|
||||
int generic_handle_irq(unsigned int irq)
|
||||
{
|
||||
struct irq_desc *desc = irq_to_desc(irq);
|
||||
|
||||
if (!desc)
|
||||
return -EINVAL;
|
||||
generic_handle_irq_desc(irq, desc);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(generic_handle_irq);
|
||||
|
||||
#ifdef CONFIG_HANDLE_DOMAIN_IRQ
|
||||
/**
|
||||
* __handle_domain_irq - Invoke the handler for a HW irq belonging to a domain
|
||||
* @domain: The domain where to perform the lookup
|
||||
* @hwirq: The HW irq number to convert to a logical one
|
||||
* @lookup: Whether to perform the domain lookup or not
|
||||
* @regs: Register file coming from the low-level handling code
|
||||
*
|
||||
* Returns: 0 on success, or -EINVAL if conversion has failed
|
||||
*/
|
||||
int __handle_domain_irq(struct irq_domain *domain, unsigned int hwirq,
|
||||
bool lookup, struct pt_regs *regs)
|
||||
{
|
||||
struct pt_regs *old_regs = set_irq_regs(regs);
|
||||
unsigned long long start_time;
|
||||
unsigned int irq = hwirq;
|
||||
int ret = 0;
|
||||
|
||||
exynos_ss_irq_exit_var(start_time);
|
||||
irq_enter();
|
||||
|
||||
#ifdef CONFIG_IRQ_DOMAIN
|
||||
if (lookup)
|
||||
irq = irq_find_mapping(domain, hwirq);
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Some hardware gives randomly wrong interrupts. Rather
|
||||
* than crashing, do something sensible.
|
||||
*/
|
||||
if (unlikely(!irq || irq >= nr_irqs)) {
|
||||
ack_bad_irq(irq);
|
||||
ret = -EINVAL;
|
||||
} else {
|
||||
generic_handle_irq(irq);
|
||||
}
|
||||
|
||||
irq_exit();
|
||||
exynos_ss_irq_exit(irq, start_time);
|
||||
set_irq_regs(old_regs);
|
||||
return ret;
|
||||
}
|
||||
#endif
|
||||
|
||||
/* Dynamic interrupt handling */
|
||||
|
||||
/**
|
||||
* irq_free_descs - free irq descriptors
|
||||
* @from: Start of descriptor range
|
||||
* @cnt: Number of consecutive irqs to free
|
||||
*/
|
||||
void irq_free_descs(unsigned int from, unsigned int cnt)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (from >= nr_irqs || (from + cnt) > nr_irqs)
|
||||
return;
|
||||
|
||||
for (i = 0; i < cnt; i++)
|
||||
free_desc(from + i);
|
||||
|
||||
mutex_lock(&sparse_irq_lock);
|
||||
bitmap_clear(allocated_irqs, from, cnt);
|
||||
mutex_unlock(&sparse_irq_lock);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(irq_free_descs);
|
||||
|
||||
/**
|
||||
* irq_alloc_descs - allocate and initialize a range of irq descriptors
|
||||
* @irq: Allocate for specific irq number if irq >= 0
|
||||
* @from: Start the search from this irq number
|
||||
* @cnt: Number of consecutive irqs to allocate.
|
||||
* @node: Preferred node on which the irq descriptor should be allocated
|
||||
* @owner: Owning module (can be NULL)
|
||||
*
|
||||
* Returns the first irq number or error code
|
||||
*/
|
||||
int __ref
|
||||
__irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node,
|
||||
struct module *owner)
|
||||
{
|
||||
int start, ret;
|
||||
|
||||
if (!cnt)
|
||||
return -EINVAL;
|
||||
|
||||
if (irq >= 0) {
|
||||
if (from > irq)
|
||||
return -EINVAL;
|
||||
from = irq;
|
||||
} else {
|
||||
/*
|
||||
* For interrupts which are freely allocated the
|
||||
* architecture can force a lower bound to the @from
|
||||
* argument. x86 uses this to exclude the GSI space.
|
||||
*/
|
||||
from = arch_dynirq_lower_bound(from);
|
||||
}
|
||||
|
||||
mutex_lock(&sparse_irq_lock);
|
||||
|
||||
start = bitmap_find_next_zero_area(allocated_irqs, IRQ_BITMAP_BITS,
|
||||
from, cnt, 0);
|
||||
ret = -EEXIST;
|
||||
if (irq >=0 && start != irq)
|
||||
goto err;
|
||||
|
||||
if (start + cnt > nr_irqs) {
|
||||
ret = irq_expand_nr_irqs(start + cnt);
|
||||
if (ret)
|
||||
goto err;
|
||||
}
|
||||
|
||||
bitmap_set(allocated_irqs, start, cnt);
|
||||
mutex_unlock(&sparse_irq_lock);
|
||||
return alloc_descs(start, cnt, node, owner);
|
||||
|
||||
err:
|
||||
mutex_unlock(&sparse_irq_lock);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(__irq_alloc_descs);
|
||||
|
||||
#ifdef CONFIG_GENERIC_IRQ_LEGACY_ALLOC_HWIRQ
|
||||
/**
|
||||
* irq_alloc_hwirqs - Allocate an irq descriptor and initialize the hardware
|
||||
* @cnt: number of interrupts to allocate
|
||||
* @node: node on which to allocate
|
||||
*
|
||||
* Returns an interrupt number > 0 or 0, if the allocation fails.
|
||||
*/
|
||||
unsigned int irq_alloc_hwirqs(int cnt, int node)
|
||||
{
|
||||
int i, irq = __irq_alloc_descs(-1, 0, cnt, node, NULL);
|
||||
|
||||
if (irq < 0)
|
||||
return 0;
|
||||
|
||||
for (i = irq; cnt > 0; i++, cnt--) {
|
||||
if (arch_setup_hwirq(i, node))
|
||||
goto err;
|
||||
irq_clear_status_flags(i, _IRQ_NOREQUEST);
|
||||
}
|
||||
return irq;
|
||||
|
||||
err:
|
||||
for (i--; i >= irq; i--) {
|
||||
irq_set_status_flags(i, _IRQ_NOREQUEST | _IRQ_NOPROBE);
|
||||
arch_teardown_hwirq(i);
|
||||
}
|
||||
irq_free_descs(irq, cnt);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(irq_alloc_hwirqs);
|
||||
|
||||
/**
|
||||
* irq_free_hwirqs - Free irq descriptor and cleanup the hardware
|
||||
* @from: Free from irq number
|
||||
* @cnt: number of interrupts to free
|
||||
*
|
||||
*/
|
||||
void irq_free_hwirqs(unsigned int from, int cnt)
|
||||
{
|
||||
int i, j;
|
||||
|
||||
for (i = from, j = cnt; j > 0; i++, j--) {
|
||||
irq_set_status_flags(i, _IRQ_NOREQUEST | _IRQ_NOPROBE);
|
||||
arch_teardown_hwirq(i);
|
||||
}
|
||||
irq_free_descs(from, cnt);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(irq_free_hwirqs);
|
||||
#endif
|
||||
|
||||
/**
|
||||
* irq_get_next_irq - get next allocated irq number
|
||||
* @offset: where to start the search
|
||||
*
|
||||
* Returns next irq number after offset or nr_irqs if none is found.
|
||||
*/
|
||||
unsigned int irq_get_next_irq(unsigned int offset)
|
||||
{
|
||||
return find_next_bit(allocated_irqs, nr_irqs, offset);
|
||||
}
|
||||
|
||||
struct irq_desc *
|
||||
__irq_get_desc_lock(unsigned int irq, unsigned long *flags, bool bus,
|
||||
unsigned int check)
|
||||
{
|
||||
struct irq_desc *desc = irq_to_desc(irq);
|
||||
|
||||
if (desc) {
|
||||
if (check & _IRQ_DESC_CHECK) {
|
||||
if ((check & _IRQ_DESC_PERCPU) &&
|
||||
!irq_settings_is_per_cpu_devid(desc))
|
||||
return NULL;
|
||||
|
||||
if (!(check & _IRQ_DESC_PERCPU) &&
|
||||
irq_settings_is_per_cpu_devid(desc))
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (bus)
|
||||
chip_bus_lock(desc);
|
||||
raw_spin_lock_irqsave(&desc->lock, *flags);
|
||||
}
|
||||
return desc;
|
||||
}
|
||||
|
||||
void __irq_put_desc_unlock(struct irq_desc *desc, unsigned long flags, bool bus)
|
||||
{
|
||||
raw_spin_unlock_irqrestore(&desc->lock, flags);
|
||||
if (bus)
|
||||
chip_bus_sync_unlock(desc);
|
||||
}
|
||||
|
||||
int irq_set_percpu_devid(unsigned int irq)
|
||||
{
|
||||
struct irq_desc *desc = irq_to_desc(irq);
|
||||
|
||||
if (!desc)
|
||||
return -EINVAL;
|
||||
|
||||
if (desc->percpu_enabled)
|
||||
return -EINVAL;
|
||||
|
||||
desc->percpu_enabled = kzalloc(sizeof(*desc->percpu_enabled), GFP_KERNEL);
|
||||
|
||||
if (!desc->percpu_enabled)
|
||||
return -ENOMEM;
|
||||
|
||||
irq_set_percpu_devid_flags(irq);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void kstat_incr_irq_this_cpu(unsigned int irq)
|
||||
{
|
||||
kstat_incr_irqs_this_cpu(irq, irq_to_desc(irq));
|
||||
}
|
||||
|
||||
/**
|
||||
* kstat_irqs_cpu - Get the statistics for an interrupt on a cpu
|
||||
* @irq: The interrupt number
|
||||
* @cpu: The cpu number
|
||||
*
|
||||
* Returns the sum of interrupt counts on @cpu since boot for
|
||||
* @irq. The caller must ensure that the interrupt is not removed
|
||||
* concurrently.
|
||||
*/
|
||||
unsigned int kstat_irqs_cpu(unsigned int irq, int cpu)
|
||||
{
|
||||
struct irq_desc *desc = irq_to_desc(irq);
|
||||
|
||||
return desc && desc->kstat_irqs ?
|
||||
*per_cpu_ptr(desc->kstat_irqs, cpu) : 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* kstat_irqs - Get the statistics for an interrupt
|
||||
* @irq: The interrupt number
|
||||
*
|
||||
* Returns the sum of interrupt counts on all cpus since boot for
|
||||
* @irq. The caller must ensure that the interrupt is not removed
|
||||
* concurrently.
|
||||
*/
|
||||
unsigned int kstat_irqs(unsigned int irq)
|
||||
{
|
||||
struct irq_desc *desc = irq_to_desc(irq);
|
||||
int cpu;
|
||||
int sum = 0;
|
||||
|
||||
if (!desc || !desc->kstat_irqs)
|
||||
return 0;
|
||||
for_each_possible_cpu(cpu)
|
||||
sum += *per_cpu_ptr(desc->kstat_irqs, cpu);
|
||||
return sum;
|
||||
}
|
||||
|
||||
/**
|
||||
* kstat_irqs_usr - Get the statistics for an interrupt
|
||||
* @irq: The interrupt number
|
||||
*
|
||||
* Returns the sum of interrupt counts on all cpus since boot for
|
||||
* @irq. Contrary to kstat_irqs() this can be called from any
|
||||
* preemptible context. It's protected against concurrent removal of
|
||||
* an interrupt descriptor when sparse irqs are enabled.
|
||||
*/
|
||||
unsigned int kstat_irqs_usr(unsigned int irq)
|
||||
{
|
||||
int sum;
|
||||
|
||||
irq_lock_sparse();
|
||||
sum = kstat_irqs(irq);
|
||||
irq_unlock_sparse();
|
||||
return sum;
|
||||
}
|
711
kernel/irq/irqdomain.c
Normal file
711
kernel/irq/irqdomain.c
Normal file
|
@ -0,0 +1,711 @@
|
|||
#define pr_fmt(fmt) "irq: " fmt
|
||||
|
||||
#include <linux/debugfs.h>
|
||||
#include <linux/hardirq.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/irq.h>
|
||||
#include <linux/irqdesc.h>
|
||||
#include <linux/irqdomain.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/of_address.h>
|
||||
#include <linux/of_irq.h>
|
||||
#include <linux/topology.h>
|
||||
#include <linux/seq_file.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/smp.h>
|
||||
#include <linux/fs.h>
|
||||
|
||||
static LIST_HEAD(irq_domain_list);
|
||||
static DEFINE_MUTEX(irq_domain_mutex);
|
||||
|
||||
static DEFINE_MUTEX(revmap_trees_mutex);
|
||||
static struct irq_domain *irq_default_domain;
|
||||
|
||||
/**
|
||||
* __irq_domain_add() - Allocate a new irq_domain data structure
|
||||
* @of_node: optional device-tree node of the interrupt controller
|
||||
* @size: Size of linear map; 0 for radix mapping only
|
||||
* @hwirq_max: Maximum number of interrupts supported by controller
|
||||
* @direct_max: Maximum value of direct maps; Use ~0 for no limit; 0 for no
|
||||
* direct mapping
|
||||
* @ops: map/unmap domain callbacks
|
||||
* @host_data: Controller private data pointer
|
||||
*
|
||||
* Allocates and initialize and irq_domain structure.
|
||||
* Returns pointer to IRQ domain, or NULL on failure.
|
||||
*/
|
||||
struct irq_domain *__irq_domain_add(struct device_node *of_node, int size,
|
||||
irq_hw_number_t hwirq_max, int direct_max,
|
||||
const struct irq_domain_ops *ops,
|
||||
void *host_data)
|
||||
{
|
||||
struct irq_domain *domain;
|
||||
|
||||
domain = kzalloc_node(sizeof(*domain) + (sizeof(unsigned int) * size),
|
||||
GFP_KERNEL, of_node_to_nid(of_node));
|
||||
if (WARN_ON(!domain))
|
||||
return NULL;
|
||||
|
||||
/* Fill structure */
|
||||
INIT_RADIX_TREE(&domain->revmap_tree, GFP_KERNEL);
|
||||
domain->ops = ops;
|
||||
domain->host_data = host_data;
|
||||
domain->of_node = of_node_get(of_node);
|
||||
domain->hwirq_max = hwirq_max;
|
||||
domain->revmap_size = size;
|
||||
domain->revmap_direct_max_irq = direct_max;
|
||||
|
||||
mutex_lock(&irq_domain_mutex);
|
||||
list_add(&domain->link, &irq_domain_list);
|
||||
mutex_unlock(&irq_domain_mutex);
|
||||
|
||||
pr_debug("Added domain %s\n", domain->name);
|
||||
return domain;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(__irq_domain_add);
|
||||
|
||||
/**
|
||||
* irq_domain_remove() - Remove an irq domain.
|
||||
* @domain: domain to remove
|
||||
*
|
||||
* This routine is used to remove an irq domain. The caller must ensure
|
||||
* that all mappings within the domain have been disposed of prior to
|
||||
* use, depending on the revmap type.
|
||||
*/
|
||||
void irq_domain_remove(struct irq_domain *domain)
|
||||
{
|
||||
mutex_lock(&irq_domain_mutex);
|
||||
|
||||
/*
|
||||
* radix_tree_delete() takes care of destroying the root
|
||||
* node when all entries are removed. Shout if there are
|
||||
* any mappings left.
|
||||
*/
|
||||
WARN_ON(domain->revmap_tree.height);
|
||||
|
||||
list_del(&domain->link);
|
||||
|
||||
/*
|
||||
* If the going away domain is the default one, reset it.
|
||||
*/
|
||||
if (unlikely(irq_default_domain == domain))
|
||||
irq_set_default_host(NULL);
|
||||
|
||||
mutex_unlock(&irq_domain_mutex);
|
||||
|
||||
pr_debug("Removed domain %s\n", domain->name);
|
||||
|
||||
of_node_put(domain->of_node);
|
||||
kfree(domain);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(irq_domain_remove);
|
||||
|
||||
/**
|
||||
* irq_domain_add_simple() - Register an irq_domain and optionally map a range of irqs
|
||||
* @of_node: pointer to interrupt controller's device tree node.
|
||||
* @size: total number of irqs in mapping
|
||||
* @first_irq: first number of irq block assigned to the domain,
|
||||
* pass zero to assign irqs on-the-fly. If first_irq is non-zero, then
|
||||
* pre-map all of the irqs in the domain to virqs starting at first_irq.
|
||||
* @ops: map/unmap domain callbacks
|
||||
* @host_data: Controller private data pointer
|
||||
*
|
||||
* Allocates an irq_domain, and optionally if first_irq is positive then also
|
||||
* allocate irq_descs and map all of the hwirqs to virqs starting at first_irq.
|
||||
*
|
||||
* This is intended to implement the expected behaviour for most
|
||||
* interrupt controllers. If device tree is used, then first_irq will be 0 and
|
||||
* irqs get mapped dynamically on the fly. However, if the controller requires
|
||||
* static virq assignments (non-DT boot) then it will set that up correctly.
|
||||
*/
|
||||
struct irq_domain *irq_domain_add_simple(struct device_node *of_node,
|
||||
unsigned int size,
|
||||
unsigned int first_irq,
|
||||
const struct irq_domain_ops *ops,
|
||||
void *host_data)
|
||||
{
|
||||
struct irq_domain *domain;
|
||||
|
||||
domain = __irq_domain_add(of_node, size, size, 0, ops, host_data);
|
||||
if (!domain)
|
||||
return NULL;
|
||||
|
||||
if (first_irq > 0) {
|
||||
if (IS_ENABLED(CONFIG_SPARSE_IRQ)) {
|
||||
/* attempt to allocated irq_descs */
|
||||
int rc = irq_alloc_descs(first_irq, first_irq, size,
|
||||
of_node_to_nid(of_node));
|
||||
if (rc < 0)
|
||||
pr_info("Cannot allocate irq_descs @ IRQ%d, assuming pre-allocated\n",
|
||||
first_irq);
|
||||
}
|
||||
irq_domain_associate_many(domain, first_irq, 0, size);
|
||||
}
|
||||
|
||||
return domain;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(irq_domain_add_simple);
|
||||
|
||||
/**
|
||||
* irq_domain_add_legacy() - Allocate and register a legacy revmap irq_domain.
|
||||
* @of_node: pointer to interrupt controller's device tree node.
|
||||
* @size: total number of irqs in legacy mapping
|
||||
* @first_irq: first number of irq block assigned to the domain
|
||||
* @first_hwirq: first hwirq number to use for the translation. Should normally
|
||||
* be '0', but a positive integer can be used if the effective
|
||||
* hwirqs numbering does not begin at zero.
|
||||
* @ops: map/unmap domain callbacks
|
||||
* @host_data: Controller private data pointer
|
||||
*
|
||||
* Note: the map() callback will be called before this function returns
|
||||
* for all legacy interrupts except 0 (which is always the invalid irq for
|
||||
* a legacy controller).
|
||||
*/
|
||||
struct irq_domain *irq_domain_add_legacy(struct device_node *of_node,
|
||||
unsigned int size,
|
||||
unsigned int first_irq,
|
||||
irq_hw_number_t first_hwirq,
|
||||
const struct irq_domain_ops *ops,
|
||||
void *host_data)
|
||||
{
|
||||
struct irq_domain *domain;
|
||||
|
||||
domain = __irq_domain_add(of_node, first_hwirq + size,
|
||||
first_hwirq + size, 0, ops, host_data);
|
||||
if (!domain)
|
||||
return NULL;
|
||||
|
||||
irq_domain_associate_many(domain, first_irq, first_hwirq, size);
|
||||
|
||||
return domain;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(irq_domain_add_legacy);
|
||||
|
||||
/**
|
||||
* irq_find_host() - Locates a domain for a given device node
|
||||
* @node: device-tree node of the interrupt controller
|
||||
*/
|
||||
struct irq_domain *irq_find_host(struct device_node *node)
|
||||
{
|
||||
struct irq_domain *h, *found = NULL;
|
||||
int rc;
|
||||
|
||||
/* We might want to match the legacy controller last since
|
||||
* it might potentially be set to match all interrupts in
|
||||
* the absence of a device node. This isn't a problem so far
|
||||
* yet though...
|
||||
*/
|
||||
mutex_lock(&irq_domain_mutex);
|
||||
list_for_each_entry(h, &irq_domain_list, link) {
|
||||
if (h->ops->match)
|
||||
rc = h->ops->match(h, node);
|
||||
else
|
||||
rc = (h->of_node != NULL) && (h->of_node == node);
|
||||
|
||||
if (rc) {
|
||||
found = h;
|
||||
break;
|
||||
}
|
||||
}
|
||||
mutex_unlock(&irq_domain_mutex);
|
||||
return found;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(irq_find_host);
|
||||
|
||||
/**
|
||||
* irq_set_default_host() - Set a "default" irq domain
|
||||
* @domain: default domain pointer
|
||||
*
|
||||
* For convenience, it's possible to set a "default" domain that will be used
|
||||
* whenever NULL is passed to irq_create_mapping(). It makes life easier for
|
||||
* platforms that want to manipulate a few hard coded interrupt numbers that
|
||||
* aren't properly represented in the device-tree.
|
||||
*/
|
||||
void irq_set_default_host(struct irq_domain *domain)
|
||||
{
|
||||
pr_debug("Default domain set to @0x%p\n", domain);
|
||||
|
||||
irq_default_domain = domain;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(irq_set_default_host);
|
||||
|
||||
void irq_domain_disassociate(struct irq_domain *domain, unsigned int irq)
|
||||
{
|
||||
struct irq_data *irq_data = irq_get_irq_data(irq);
|
||||
irq_hw_number_t hwirq;
|
||||
|
||||
if (WARN(!irq_data || irq_data->domain != domain,
|
||||
"virq%i doesn't exist; cannot disassociate\n", irq))
|
||||
return;
|
||||
|
||||
hwirq = irq_data->hwirq;
|
||||
irq_set_status_flags(irq, IRQ_NOREQUEST);
|
||||
|
||||
/* remove chip and handler */
|
||||
irq_set_chip_and_handler(irq, NULL, NULL);
|
||||
|
||||
/* Make sure it's completed */
|
||||
synchronize_irq(irq);
|
||||
|
||||
/* Tell the PIC about it */
|
||||
if (domain->ops->unmap)
|
||||
domain->ops->unmap(domain, irq);
|
||||
smp_mb();
|
||||
|
||||
irq_data->domain = NULL;
|
||||
irq_data->hwirq = 0;
|
||||
|
||||
/* Clear reverse map for this hwirq */
|
||||
if (hwirq < domain->revmap_size) {
|
||||
domain->linear_revmap[hwirq] = 0;
|
||||
} else {
|
||||
mutex_lock(&revmap_trees_mutex);
|
||||
radix_tree_delete(&domain->revmap_tree, hwirq);
|
||||
mutex_unlock(&revmap_trees_mutex);
|
||||
}
|
||||
}
|
||||
|
||||
int irq_domain_associate(struct irq_domain *domain, unsigned int virq,
|
||||
irq_hw_number_t hwirq)
|
||||
{
|
||||
struct irq_data *irq_data = irq_get_irq_data(virq);
|
||||
int ret;
|
||||
|
||||
if (WARN(hwirq >= domain->hwirq_max,
|
||||
"error: hwirq 0x%x is too large for %s\n", (int)hwirq, domain->name))
|
||||
return -EINVAL;
|
||||
if (WARN(!irq_data, "error: virq%i is not allocated", virq))
|
||||
return -EINVAL;
|
||||
if (WARN(irq_data->domain, "error: virq%i is already associated", virq))
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&irq_domain_mutex);
|
||||
irq_data->hwirq = hwirq;
|
||||
irq_data->domain = domain;
|
||||
if (domain->ops->map) {
|
||||
ret = domain->ops->map(domain, virq, hwirq);
|
||||
if (ret != 0) {
|
||||
/*
|
||||
* If map() returns -EPERM, this interrupt is protected
|
||||
* by the firmware or some other service and shall not
|
||||
* be mapped. Don't bother telling the user about it.
|
||||
*/
|
||||
if (ret != -EPERM) {
|
||||
pr_info("%s didn't like hwirq-0x%lx to VIRQ%i mapping (rc=%d)\n",
|
||||
domain->name, hwirq, virq, ret);
|
||||
}
|
||||
irq_data->domain = NULL;
|
||||
irq_data->hwirq = 0;
|
||||
mutex_unlock(&irq_domain_mutex);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* If not already assigned, give the domain the chip's name */
|
||||
if (!domain->name && irq_data->chip)
|
||||
domain->name = irq_data->chip->name;
|
||||
}
|
||||
|
||||
if (hwirq < domain->revmap_size) {
|
||||
domain->linear_revmap[hwirq] = virq;
|
||||
} else {
|
||||
mutex_lock(&revmap_trees_mutex);
|
||||
radix_tree_insert(&domain->revmap_tree, hwirq, irq_data);
|
||||
mutex_unlock(&revmap_trees_mutex);
|
||||
}
|
||||
mutex_unlock(&irq_domain_mutex);
|
||||
|
||||
irq_clear_status_flags(virq, IRQ_NOREQUEST);
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(irq_domain_associate);
|
||||
|
||||
void irq_domain_associate_many(struct irq_domain *domain, unsigned int irq_base,
|
||||
irq_hw_number_t hwirq_base, int count)
|
||||
{
|
||||
int i;
|
||||
|
||||
pr_debug("%s(%s, irqbase=%i, hwbase=%i, count=%i)\n", __func__,
|
||||
of_node_full_name(domain->of_node), irq_base, (int)hwirq_base, count);
|
||||
|
||||
for (i = 0; i < count; i++) {
|
||||
irq_domain_associate(domain, irq_base + i, hwirq_base + i);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(irq_domain_associate_many);
|
||||
|
||||
/**
|
||||
* irq_create_direct_mapping() - Allocate an irq for direct mapping
|
||||
* @domain: domain to allocate the irq for or NULL for default domain
|
||||
*
|
||||
* This routine is used for irq controllers which can choose the hardware
|
||||
* interrupt numbers they generate. In such a case it's simplest to use
|
||||
* the linux irq as the hardware interrupt number. It still uses the linear
|
||||
* or radix tree to store the mapping, but the irq controller can optimize
|
||||
* the revmap path by using the hwirq directly.
|
||||
*/
|
||||
unsigned int irq_create_direct_mapping(struct irq_domain *domain)
|
||||
{
|
||||
unsigned int virq;
|
||||
|
||||
if (domain == NULL)
|
||||
domain = irq_default_domain;
|
||||
|
||||
virq = irq_alloc_desc_from(1, of_node_to_nid(domain->of_node));
|
||||
if (!virq) {
|
||||
pr_debug("create_direct virq allocation failed\n");
|
||||
return 0;
|
||||
}
|
||||
if (virq >= domain->revmap_direct_max_irq) {
|
||||
pr_err("ERROR: no free irqs available below %i maximum\n",
|
||||
domain->revmap_direct_max_irq);
|
||||
irq_free_desc(virq);
|
||||
return 0;
|
||||
}
|
||||
pr_debug("create_direct obtained virq %d\n", virq);
|
||||
|
||||
if (irq_domain_associate(domain, virq, virq)) {
|
||||
irq_free_desc(virq);
|
||||
return 0;
|
||||
}
|
||||
|
||||
return virq;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(irq_create_direct_mapping);
|
||||
|
||||
/**
|
||||
* irq_create_mapping() - Map a hardware interrupt into linux irq space
|
||||
* @domain: domain owning this hardware interrupt or NULL for default domain
|
||||
* @hwirq: hardware irq number in that domain space
|
||||
*
|
||||
* Only one mapping per hardware interrupt is permitted. Returns a linux
|
||||
* irq number.
|
||||
* If the sense/trigger is to be specified, set_irq_type() should be called
|
||||
* on the number returned from that call.
|
||||
*/
|
||||
unsigned int irq_create_mapping(struct irq_domain *domain,
|
||||
irq_hw_number_t hwirq)
|
||||
{
|
||||
unsigned int hint;
|
||||
int virq;
|
||||
|
||||
pr_debug("irq_create_mapping(0x%p, 0x%lx)\n", domain, hwirq);
|
||||
|
||||
/* Look for default domain if nececssary */
|
||||
if (domain == NULL)
|
||||
domain = irq_default_domain;
|
||||
if (domain == NULL) {
|
||||
WARN(1, "%s(, %lx) called with NULL domain\n", __func__, hwirq);
|
||||
return 0;
|
||||
}
|
||||
pr_debug("-> using domain @%p\n", domain);
|
||||
|
||||
/* Check if mapping already exists */
|
||||
virq = irq_find_mapping(domain, hwirq);
|
||||
if (virq) {
|
||||
pr_debug("-> existing mapping on virq %d\n", virq);
|
||||
return virq;
|
||||
}
|
||||
|
||||
/* Allocate a virtual interrupt number */
|
||||
hint = hwirq % nr_irqs;
|
||||
if (hint == 0)
|
||||
hint++;
|
||||
virq = irq_alloc_desc_from(hint, of_node_to_nid(domain->of_node));
|
||||
if (virq <= 0)
|
||||
virq = irq_alloc_desc_from(1, of_node_to_nid(domain->of_node));
|
||||
if (virq <= 0) {
|
||||
pr_debug("-> virq allocation failed\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (irq_domain_associate(domain, virq, hwirq)) {
|
||||
irq_free_desc(virq);
|
||||
return 0;
|
||||
}
|
||||
|
||||
pr_debug("irq %lu on domain %s mapped to virtual irq %u\n",
|
||||
hwirq, of_node_full_name(domain->of_node), virq);
|
||||
|
||||
return virq;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(irq_create_mapping);
|
||||
|
||||
/**
|
||||
* irq_create_strict_mappings() - Map a range of hw irqs to fixed linux irqs
|
||||
* @domain: domain owning the interrupt range
|
||||
* @irq_base: beginning of linux IRQ range
|
||||
* @hwirq_base: beginning of hardware IRQ range
|
||||
* @count: Number of interrupts to map
|
||||
*
|
||||
* This routine is used for allocating and mapping a range of hardware
|
||||
* irqs to linux irqs where the linux irq numbers are at pre-defined
|
||||
* locations. For use by controllers that already have static mappings
|
||||
* to insert in to the domain.
|
||||
*
|
||||
* Non-linear users can use irq_create_identity_mapping() for IRQ-at-a-time
|
||||
* domain insertion.
|
||||
*
|
||||
* 0 is returned upon success, while any failure to establish a static
|
||||
* mapping is treated as an error.
|
||||
*/
|
||||
int irq_create_strict_mappings(struct irq_domain *domain, unsigned int irq_base,
|
||||
irq_hw_number_t hwirq_base, int count)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = irq_alloc_descs(irq_base, irq_base, count,
|
||||
of_node_to_nid(domain->of_node));
|
||||
if (unlikely(ret < 0))
|
||||
return ret;
|
||||
|
||||
irq_domain_associate_many(domain, irq_base, hwirq_base, count);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(irq_create_strict_mappings);
|
||||
|
||||
unsigned int irq_create_of_mapping(struct of_phandle_args *irq_data)
|
||||
{
|
||||
struct irq_domain *domain;
|
||||
irq_hw_number_t hwirq;
|
||||
unsigned int type = IRQ_TYPE_NONE;
|
||||
unsigned int virq;
|
||||
|
||||
domain = irq_data->np ? irq_find_host(irq_data->np) : irq_default_domain;
|
||||
if (!domain) {
|
||||
pr_warn("no irq domain found for %s !\n",
|
||||
of_node_full_name(irq_data->np));
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* If domain has no translation, then we assume interrupt line */
|
||||
if (domain->ops->xlate == NULL)
|
||||
hwirq = irq_data->args[0];
|
||||
else {
|
||||
if (domain->ops->xlate(domain, irq_data->np, irq_data->args,
|
||||
irq_data->args_count, &hwirq, &type))
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Create mapping */
|
||||
virq = irq_create_mapping(domain, hwirq);
|
||||
if (!virq)
|
||||
return virq;
|
||||
|
||||
/* Set type if specified and different than the current one */
|
||||
if (type != IRQ_TYPE_NONE &&
|
||||
type != irq_get_trigger_type(virq))
|
||||
irq_set_irq_type(virq, type);
|
||||
return virq;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(irq_create_of_mapping);
|
||||
|
||||
/**
|
||||
* irq_dispose_mapping() - Unmap an interrupt
|
||||
* @virq: linux irq number of the interrupt to unmap
|
||||
*/
|
||||
void irq_dispose_mapping(unsigned int virq)
|
||||
{
|
||||
struct irq_data *irq_data = irq_get_irq_data(virq);
|
||||
struct irq_domain *domain;
|
||||
|
||||
if (!virq || !irq_data)
|
||||
return;
|
||||
|
||||
domain = irq_data->domain;
|
||||
if (WARN_ON(domain == NULL))
|
||||
return;
|
||||
|
||||
irq_domain_disassociate(domain, virq);
|
||||
irq_free_desc(virq);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(irq_dispose_mapping);
|
||||
|
||||
/**
|
||||
* irq_find_mapping() - Find a linux irq from an hw irq number.
|
||||
* @domain: domain owning this hardware interrupt
|
||||
* @hwirq: hardware irq number in that domain space
|
||||
*/
|
||||
unsigned int irq_find_mapping(struct irq_domain *domain,
|
||||
irq_hw_number_t hwirq)
|
||||
{
|
||||
struct irq_data *data;
|
||||
|
||||
/* Look for default domain if nececssary */
|
||||
if (domain == NULL)
|
||||
domain = irq_default_domain;
|
||||
if (domain == NULL)
|
||||
return 0;
|
||||
|
||||
if (hwirq < domain->revmap_direct_max_irq) {
|
||||
data = irq_get_irq_data(hwirq);
|
||||
if (data && (data->domain == domain) && (data->hwirq == hwirq))
|
||||
return hwirq;
|
||||
}
|
||||
|
||||
/* Check if the hwirq is in the linear revmap. */
|
||||
if (hwirq < domain->revmap_size)
|
||||
return domain->linear_revmap[hwirq];
|
||||
|
||||
rcu_read_lock();
|
||||
data = radix_tree_lookup(&domain->revmap_tree, hwirq);
|
||||
rcu_read_unlock();
|
||||
return data ? data->irq : 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(irq_find_mapping);
|
||||
|
||||
#ifdef CONFIG_IRQ_DOMAIN_DEBUG
|
||||
static int virq_debug_show(struct seq_file *m, void *private)
|
||||
{
|
||||
unsigned long flags;
|
||||
struct irq_desc *desc;
|
||||
struct irq_domain *domain;
|
||||
struct radix_tree_iter iter;
|
||||
void *data, **slot;
|
||||
int i;
|
||||
|
||||
seq_printf(m, " %-16s %-6s %-10s %-10s %s\n",
|
||||
"name", "mapped", "linear-max", "direct-max", "devtree-node");
|
||||
mutex_lock(&irq_domain_mutex);
|
||||
list_for_each_entry(domain, &irq_domain_list, link) {
|
||||
int count = 0;
|
||||
radix_tree_for_each_slot(slot, &domain->revmap_tree, &iter, 0)
|
||||
count++;
|
||||
seq_printf(m, "%c%-16s %6u %10u %10u %s\n",
|
||||
domain == irq_default_domain ? '*' : ' ', domain->name,
|
||||
domain->revmap_size + count, domain->revmap_size,
|
||||
domain->revmap_direct_max_irq,
|
||||
domain->of_node ? of_node_full_name(domain->of_node) : "");
|
||||
}
|
||||
mutex_unlock(&irq_domain_mutex);
|
||||
|
||||
seq_printf(m, "%-5s %-7s %-15s %-*s %6s %-14s %s\n", "irq", "hwirq",
|
||||
"chip name", (int)(2 * sizeof(void *) + 2), "chip data",
|
||||
"active", "type", "domain");
|
||||
|
||||
for (i = 1; i < nr_irqs; i++) {
|
||||
desc = irq_to_desc(i);
|
||||
if (!desc)
|
||||
continue;
|
||||
|
||||
raw_spin_lock_irqsave(&desc->lock, flags);
|
||||
domain = desc->irq_data.domain;
|
||||
|
||||
if (domain) {
|
||||
struct irq_chip *chip;
|
||||
int hwirq = desc->irq_data.hwirq;
|
||||
bool direct;
|
||||
|
||||
seq_printf(m, "%5d ", i);
|
||||
seq_printf(m, "0x%05x ", hwirq);
|
||||
|
||||
chip = irq_desc_get_chip(desc);
|
||||
seq_printf(m, "%-15s ", (chip && chip->name) ? chip->name : "none");
|
||||
|
||||
data = irq_desc_get_chip_data(desc);
|
||||
seq_printf(m, data ? "0x%p " : " %p ", data);
|
||||
|
||||
seq_printf(m, " %c ", (desc->action && desc->action->handler) ? '*' : ' ');
|
||||
direct = (i == hwirq) && (i < domain->revmap_direct_max_irq);
|
||||
seq_printf(m, "%6s%-8s ",
|
||||
(hwirq < domain->revmap_size) ? "LINEAR" : "RADIX",
|
||||
direct ? "(DIRECT)" : "");
|
||||
seq_printf(m, "%s\n", desc->irq_data.domain->name);
|
||||
}
|
||||
|
||||
raw_spin_unlock_irqrestore(&desc->lock, flags);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int virq_debug_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
return single_open(file, virq_debug_show, inode->i_private);
|
||||
}
|
||||
|
||||
static const struct file_operations virq_debug_fops = {
|
||||
.open = virq_debug_open,
|
||||
.read = seq_read,
|
||||
.llseek = seq_lseek,
|
||||
.release = single_release,
|
||||
};
|
||||
|
||||
static int __init irq_debugfs_init(void)
|
||||
{
|
||||
if (debugfs_create_file("irq_domain_mapping", S_IRUGO, NULL,
|
||||
NULL, &virq_debug_fops) == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
return 0;
|
||||
}
|
||||
__initcall(irq_debugfs_init);
|
||||
#endif /* CONFIG_IRQ_DOMAIN_DEBUG */
|
||||
|
||||
/**
|
||||
* irq_domain_xlate_onecell() - Generic xlate for direct one cell bindings
|
||||
*
|
||||
* Device Tree IRQ specifier translation function which works with one cell
|
||||
* bindings where the cell value maps directly to the hwirq number.
|
||||
*/
|
||||
int irq_domain_xlate_onecell(struct irq_domain *d, struct device_node *ctrlr,
|
||||
const u32 *intspec, unsigned int intsize,
|
||||
unsigned long *out_hwirq, unsigned int *out_type)
|
||||
{
|
||||
if (WARN_ON(intsize < 1))
|
||||
return -EINVAL;
|
||||
*out_hwirq = intspec[0];
|
||||
*out_type = IRQ_TYPE_NONE;
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(irq_domain_xlate_onecell);
|
||||
|
||||
/**
|
||||
* irq_domain_xlate_twocell() - Generic xlate for direct two cell bindings
|
||||
*
|
||||
* Device Tree IRQ specifier translation function which works with two cell
|
||||
* bindings where the cell values map directly to the hwirq number
|
||||
* and linux irq flags.
|
||||
*/
|
||||
int irq_domain_xlate_twocell(struct irq_domain *d, struct device_node *ctrlr,
|
||||
const u32 *intspec, unsigned int intsize,
|
||||
irq_hw_number_t *out_hwirq, unsigned int *out_type)
|
||||
{
|
||||
if (WARN_ON(intsize < 2))
|
||||
return -EINVAL;
|
||||
*out_hwirq = intspec[0];
|
||||
*out_type = intspec[1] & IRQ_TYPE_SENSE_MASK;
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(irq_domain_xlate_twocell);
|
||||
|
||||
/**
|
||||
* irq_domain_xlate_onetwocell() - Generic xlate for one or two cell bindings
|
||||
*
|
||||
* Device Tree IRQ specifier translation function which works with either one
|
||||
* or two cell bindings where the cell values map directly to the hwirq number
|
||||
* and linux irq flags.
|
||||
*
|
||||
* Note: don't use this function unless your interrupt controller explicitly
|
||||
* supports both one and two cell bindings. For the majority of controllers
|
||||
* the _onecell() or _twocell() variants above should be used.
|
||||
*/
|
||||
int irq_domain_xlate_onetwocell(struct irq_domain *d,
|
||||
struct device_node *ctrlr,
|
||||
const u32 *intspec, unsigned int intsize,
|
||||
unsigned long *out_hwirq, unsigned int *out_type)
|
||||
{
|
||||
if (WARN_ON(intsize < 1))
|
||||
return -EINVAL;
|
||||
*out_hwirq = intspec[0];
|
||||
*out_type = (intsize > 1) ? intspec[1] : IRQ_TYPE_NONE;
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(irq_domain_xlate_onetwocell);
|
||||
|
||||
const struct irq_domain_ops irq_domain_simple_ops = {
|
||||
.xlate = irq_domain_xlate_onetwocell,
|
||||
};
|
||||
EXPORT_SYMBOL_GPL(irq_domain_simple_ops);
|
1761
kernel/irq/manage.c
Normal file
1761
kernel/irq/manage.c
Normal file
File diff suppressed because it is too large
Load diff
72
kernel/irq/migration.c
Normal file
72
kernel/irq/migration.c
Normal file
|
@ -0,0 +1,72 @@
|
|||
|
||||
#include <linux/irq.h>
|
||||
#include <linux/interrupt.h>
|
||||
|
||||
#include "internals.h"
|
||||
|
||||
void irq_move_masked_irq(struct irq_data *idata)
|
||||
{
|
||||
struct irq_desc *desc = irq_data_to_desc(idata);
|
||||
struct irq_chip *chip = idata->chip;
|
||||
|
||||
if (likely(!irqd_is_setaffinity_pending(&desc->irq_data)))
|
||||
return;
|
||||
|
||||
/*
|
||||
* Paranoia: cpu-local interrupts shouldn't be calling in here anyway.
|
||||
*/
|
||||
if (!irqd_can_balance(&desc->irq_data)) {
|
||||
WARN_ON(1);
|
||||
return;
|
||||
}
|
||||
|
||||
irqd_clr_move_pending(&desc->irq_data);
|
||||
|
||||
if (unlikely(cpumask_empty(desc->pending_mask)))
|
||||
return;
|
||||
|
||||
if (!chip->irq_set_affinity)
|
||||
return;
|
||||
|
||||
assert_raw_spin_locked(&desc->lock);
|
||||
|
||||
/*
|
||||
* If there was a valid mask to work with, please
|
||||
* do the disable, re-program, enable sequence.
|
||||
* This is *not* particularly important for level triggered
|
||||
* but in a edge trigger case, we might be setting rte
|
||||
* when an active trigger is coming in. This could
|
||||
* cause some ioapics to mal-function.
|
||||
* Being paranoid i guess!
|
||||
*
|
||||
* For correct operation this depends on the caller
|
||||
* masking the irqs.
|
||||
*/
|
||||
if (cpumask_any_and(desc->pending_mask, cpu_online_mask) < nr_cpu_ids)
|
||||
irq_do_set_affinity(&desc->irq_data, desc->pending_mask, false);
|
||||
|
||||
cpumask_clear(desc->pending_mask);
|
||||
}
|
||||
|
||||
void irq_move_irq(struct irq_data *idata)
|
||||
{
|
||||
bool masked;
|
||||
|
||||
if (likely(!irqd_is_setaffinity_pending(idata)))
|
||||
return;
|
||||
|
||||
if (unlikely(irqd_irq_disabled(idata)))
|
||||
return;
|
||||
|
||||
/*
|
||||
* Be careful vs. already masked interrupts. If this is a
|
||||
* threaded interrupt with ONESHOT set, we can end up with an
|
||||
* interrupt storm.
|
||||
*/
|
||||
masked = irqd_irq_masked(idata);
|
||||
if (!masked)
|
||||
idata->chip->irq_mask(idata);
|
||||
irq_move_masked_irq(idata);
|
||||
if (!masked)
|
||||
idata->chip->irq_unmask(idata);
|
||||
}
|
201
kernel/irq/pm.c
Normal file
201
kernel/irq/pm.c
Normal file
|
@ -0,0 +1,201 @@
|
|||
/*
|
||||
* linux/kernel/irq/pm.c
|
||||
*
|
||||
* Copyright (C) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
|
||||
*
|
||||
* This file contains power management functions related to interrupts.
|
||||
*/
|
||||
|
||||
#include <linux/irq.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/suspend.h>
|
||||
#include <linux/syscore_ops.h>
|
||||
|
||||
#include "internals.h"
|
||||
|
||||
bool irq_pm_check_wakeup(struct irq_desc *desc)
|
||||
{
|
||||
if (irqd_is_wakeup_armed(&desc->irq_data)) {
|
||||
irqd_clear(&desc->irq_data, IRQD_WAKEUP_ARMED);
|
||||
desc->istate |= IRQS_SUSPENDED | IRQS_PENDING;
|
||||
desc->depth++;
|
||||
irq_disable(desc);
|
||||
pm_system_wakeup();
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
* Called from __setup_irq() with desc->lock held after @action has
|
||||
* been installed in the action chain.
|
||||
*/
|
||||
void irq_pm_install_action(struct irq_desc *desc, struct irqaction *action)
|
||||
{
|
||||
desc->nr_actions++;
|
||||
|
||||
if (action->flags & IRQF_FORCE_RESUME)
|
||||
desc->force_resume_depth++;
|
||||
|
||||
WARN_ON_ONCE(desc->force_resume_depth &&
|
||||
desc->force_resume_depth != desc->nr_actions);
|
||||
|
||||
if (action->flags & IRQF_NO_SUSPEND)
|
||||
desc->no_suspend_depth++;
|
||||
|
||||
WARN_ON_ONCE(desc->no_suspend_depth &&
|
||||
desc->no_suspend_depth != desc->nr_actions);
|
||||
}
|
||||
|
||||
/*
|
||||
* Called from __free_irq() with desc->lock held after @action has
|
||||
* been removed from the action chain.
|
||||
*/
|
||||
void irq_pm_remove_action(struct irq_desc *desc, struct irqaction *action)
|
||||
{
|
||||
desc->nr_actions--;
|
||||
|
||||
if (action->flags & IRQF_FORCE_RESUME)
|
||||
desc->force_resume_depth--;
|
||||
|
||||
if (action->flags & IRQF_NO_SUSPEND)
|
||||
desc->no_suspend_depth--;
|
||||
}
|
||||
|
||||
static bool suspend_device_irq(struct irq_desc *desc, int irq)
|
||||
{
|
||||
if (!desc->action || desc->no_suspend_depth)
|
||||
return false;
|
||||
|
||||
if (irqd_is_wakeup_set(&desc->irq_data)) {
|
||||
irqd_set(&desc->irq_data, IRQD_WAKEUP_ARMED);
|
||||
/*
|
||||
* We return true here to force the caller to issue
|
||||
* synchronize_irq(). We need to make sure that the
|
||||
* IRQD_WAKEUP_ARMED is visible before we return from
|
||||
* suspend_device_irqs().
|
||||
*/
|
||||
return true;
|
||||
}
|
||||
|
||||
desc->istate |= IRQS_SUSPENDED;
|
||||
__disable_irq(desc, irq);
|
||||
|
||||
/*
|
||||
* Hardware which has no wakeup source configuration facility
|
||||
* requires that the non wakeup interrupts are masked at the
|
||||
* chip level. The chip implementation indicates that with
|
||||
* IRQCHIP_MASK_ON_SUSPEND.
|
||||
*/
|
||||
if (irq_desc_get_chip(desc)->flags & IRQCHIP_MASK_ON_SUSPEND)
|
||||
mask_irq(desc);
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* suspend_device_irqs - disable all currently enabled interrupt lines
|
||||
*
|
||||
* During system-wide suspend or hibernation device drivers need to be
|
||||
* prevented from receiving interrupts and this function is provided
|
||||
* for this purpose.
|
||||
*
|
||||
* So we disable all interrupts and mark them IRQS_SUSPENDED except
|
||||
* for those which are unused, those which are marked as not
|
||||
* suspendable via an interrupt request with the flag IRQF_NO_SUSPEND
|
||||
* set and those which are marked as active wakeup sources.
|
||||
*
|
||||
* The active wakeup sources are handled by the flow handler entry
|
||||
* code which checks for the IRQD_WAKEUP_ARMED flag, suspends the
|
||||
* interrupt and notifies the pm core about the wakeup.
|
||||
*/
|
||||
void suspend_device_irqs(void)
|
||||
{
|
||||
struct irq_desc *desc;
|
||||
int irq;
|
||||
|
||||
for_each_irq_desc(irq, desc) {
|
||||
unsigned long flags;
|
||||
bool sync;
|
||||
|
||||
raw_spin_lock_irqsave(&desc->lock, flags);
|
||||
sync = suspend_device_irq(desc, irq);
|
||||
raw_spin_unlock_irqrestore(&desc->lock, flags);
|
||||
|
||||
if (sync)
|
||||
synchronize_irq(irq);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(suspend_device_irqs);
|
||||
|
||||
static void resume_irq(struct irq_desc *desc, int irq)
|
||||
{
|
||||
irqd_clear(&desc->irq_data, IRQD_WAKEUP_ARMED);
|
||||
|
||||
if (desc->istate & IRQS_SUSPENDED)
|
||||
goto resume;
|
||||
|
||||
/* Force resume the interrupt? */
|
||||
if (!desc->force_resume_depth)
|
||||
return;
|
||||
|
||||
/* Pretend that it got disabled ! */
|
||||
desc->depth++;
|
||||
resume:
|
||||
desc->istate &= ~IRQS_SUSPENDED;
|
||||
__enable_irq(desc, irq);
|
||||
}
|
||||
|
||||
static void resume_irqs(bool want_early)
|
||||
{
|
||||
struct irq_desc *desc;
|
||||
int irq;
|
||||
|
||||
for_each_irq_desc(irq, desc) {
|
||||
unsigned long flags;
|
||||
bool is_early = desc->action &&
|
||||
desc->action->flags & IRQF_EARLY_RESUME;
|
||||
|
||||
if (!is_early && want_early)
|
||||
continue;
|
||||
|
||||
raw_spin_lock_irqsave(&desc->lock, flags);
|
||||
resume_irq(desc, irq);
|
||||
raw_spin_unlock_irqrestore(&desc->lock, flags);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* irq_pm_syscore_ops - enable interrupt lines early
|
||||
*
|
||||
* Enable all interrupt lines with %IRQF_EARLY_RESUME set.
|
||||
*/
|
||||
static void irq_pm_syscore_resume(void)
|
||||
{
|
||||
resume_irqs(true);
|
||||
}
|
||||
|
||||
static struct syscore_ops irq_pm_syscore_ops = {
|
||||
.resume = irq_pm_syscore_resume,
|
||||
};
|
||||
|
||||
static int __init irq_pm_init_ops(void)
|
||||
{
|
||||
register_syscore_ops(&irq_pm_syscore_ops);
|
||||
return 0;
|
||||
}
|
||||
|
||||
device_initcall(irq_pm_init_ops);
|
||||
|
||||
/**
|
||||
* resume_device_irqs - enable interrupt lines disabled by suspend_device_irqs()
|
||||
*
|
||||
* Enable all non-%IRQF_EARLY_RESUME interrupt lines previously
|
||||
* disabled by suspend_device_irqs() that have the IRQS_SUSPENDED flag
|
||||
* set as well as those with %IRQF_FORCE_RESUME.
|
||||
*/
|
||||
void resume_device_irqs(void)
|
||||
{
|
||||
resume_irqs(false);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(resume_device_irqs);
|
504
kernel/irq/proc.c
Normal file
504
kernel/irq/proc.c
Normal file
|
@ -0,0 +1,504 @@
|
|||
/*
|
||||
* linux/kernel/irq/proc.c
|
||||
*
|
||||
* Copyright (C) 1992, 1998-2004 Linus Torvalds, Ingo Molnar
|
||||
*
|
||||
* This file contains the /proc/irq/ handling code.
|
||||
*/
|
||||
|
||||
#include <linux/irq.h>
|
||||
#include <linux/gfp.h>
|
||||
#include <linux/proc_fs.h>
|
||||
#include <linux/seq_file.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/kernel_stat.h>
|
||||
|
||||
#include "internals.h"
|
||||
|
||||
/*
|
||||
* Access rules:
|
||||
*
|
||||
* procfs protects read/write of /proc/irq/N/ files against a
|
||||
* concurrent free of the interrupt descriptor. remove_proc_entry()
|
||||
* immediately prevents new read/writes to happen and waits for
|
||||
* already running read/write functions to complete.
|
||||
*
|
||||
* We remove the proc entries first and then delete the interrupt
|
||||
* descriptor from the radix tree and free it. So it is guaranteed
|
||||
* that irq_to_desc(N) is valid as long as the read/writes are
|
||||
* permitted by procfs.
|
||||
*
|
||||
* The read from /proc/interrupts is a different problem because there
|
||||
* is no protection. So the lookup and the access to irqdesc
|
||||
* information must be protected by sparse_irq_lock.
|
||||
*/
|
||||
static struct proc_dir_entry *root_irq_dir;
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
|
||||
static int show_irq_affinity(int type, struct seq_file *m, void *v)
|
||||
{
|
||||
struct irq_desc *desc = irq_to_desc((long)m->private);
|
||||
const struct cpumask *mask = desc->irq_data.affinity;
|
||||
|
||||
#ifdef CONFIG_GENERIC_PENDING_IRQ
|
||||
if (irqd_is_setaffinity_pending(&desc->irq_data))
|
||||
mask = desc->pending_mask;
|
||||
#endif
|
||||
if (type)
|
||||
seq_cpumask_list(m, mask);
|
||||
else
|
||||
seq_cpumask(m, mask);
|
||||
seq_putc(m, '\n');
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int irq_affinity_hint_proc_show(struct seq_file *m, void *v)
|
||||
{
|
||||
struct irq_desc *desc = irq_to_desc((long)m->private);
|
||||
unsigned long flags;
|
||||
cpumask_var_t mask;
|
||||
|
||||
if (!zalloc_cpumask_var(&mask, GFP_KERNEL))
|
||||
return -ENOMEM;
|
||||
|
||||
raw_spin_lock_irqsave(&desc->lock, flags);
|
||||
if (desc->affinity_hint)
|
||||
cpumask_copy(mask, desc->affinity_hint);
|
||||
raw_spin_unlock_irqrestore(&desc->lock, flags);
|
||||
|
||||
seq_cpumask(m, mask);
|
||||
seq_putc(m, '\n');
|
||||
free_cpumask_var(mask);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifndef is_affinity_mask_valid
|
||||
#define is_affinity_mask_valid(val) 1
|
||||
#endif
|
||||
|
||||
int no_irq_affinity;
|
||||
static int irq_affinity_proc_show(struct seq_file *m, void *v)
|
||||
{
|
||||
return show_irq_affinity(0, m, v);
|
||||
}
|
||||
|
||||
static int irq_affinity_list_proc_show(struct seq_file *m, void *v)
|
||||
{
|
||||
return show_irq_affinity(1, m, v);
|
||||
}
|
||||
|
||||
|
||||
static ssize_t write_irq_affinity(int type, struct file *file,
|
||||
const char __user *buffer, size_t count, loff_t *pos)
|
||||
{
|
||||
unsigned int irq = (int)(long)PDE_DATA(file_inode(file));
|
||||
cpumask_var_t new_value;
|
||||
int err;
|
||||
|
||||
if (!irq_can_set_affinity(irq) || no_irq_affinity)
|
||||
return -EIO;
|
||||
|
||||
if (!alloc_cpumask_var(&new_value, GFP_KERNEL))
|
||||
return -ENOMEM;
|
||||
|
||||
if (type)
|
||||
err = cpumask_parselist_user(buffer, count, new_value);
|
||||
else
|
||||
err = cpumask_parse_user(buffer, count, new_value);
|
||||
if (err)
|
||||
goto free_cpumask;
|
||||
|
||||
if (!is_affinity_mask_valid(new_value)) {
|
||||
err = -EINVAL;
|
||||
goto free_cpumask;
|
||||
}
|
||||
|
||||
/*
|
||||
* Do not allow disabling IRQs completely - it's a too easy
|
||||
* way to make the system unusable accidentally :-) At least
|
||||
* one online CPU still has to be targeted.
|
||||
*/
|
||||
if (!cpumask_intersects(new_value, cpu_online_mask)) {
|
||||
/* Special case for empty set - allow the architecture
|
||||
code to set default SMP affinity. */
|
||||
err = irq_select_affinity_usr(irq, new_value) ? -EINVAL : count;
|
||||
} else {
|
||||
irq_set_affinity(irq, new_value);
|
||||
err = count;
|
||||
}
|
||||
|
||||
free_cpumask:
|
||||
free_cpumask_var(new_value);
|
||||
return err;
|
||||
}
|
||||
|
||||
static ssize_t irq_affinity_proc_write(struct file *file,
|
||||
const char __user *buffer, size_t count, loff_t *pos)
|
||||
{
|
||||
return write_irq_affinity(0, file, buffer, count, pos);
|
||||
}
|
||||
|
||||
static ssize_t irq_affinity_list_proc_write(struct file *file,
|
||||
const char __user *buffer, size_t count, loff_t *pos)
|
||||
{
|
||||
return write_irq_affinity(1, file, buffer, count, pos);
|
||||
}
|
||||
|
||||
static int irq_affinity_proc_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
return single_open(file, irq_affinity_proc_show, PDE_DATA(inode));
|
||||
}
|
||||
|
||||
static int irq_affinity_list_proc_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
return single_open(file, irq_affinity_list_proc_show, PDE_DATA(inode));
|
||||
}
|
||||
|
||||
static int irq_affinity_hint_proc_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
return single_open(file, irq_affinity_hint_proc_show, PDE_DATA(inode));
|
||||
}
|
||||
|
||||
static const struct file_operations irq_affinity_proc_fops = {
|
||||
.open = irq_affinity_proc_open,
|
||||
.read = seq_read,
|
||||
.llseek = seq_lseek,
|
||||
.release = single_release,
|
||||
.write = irq_affinity_proc_write,
|
||||
};
|
||||
|
||||
static const struct file_operations irq_affinity_hint_proc_fops = {
|
||||
.open = irq_affinity_hint_proc_open,
|
||||
.read = seq_read,
|
||||
.llseek = seq_lseek,
|
||||
.release = single_release,
|
||||
};
|
||||
|
||||
static const struct file_operations irq_affinity_list_proc_fops = {
|
||||
.open = irq_affinity_list_proc_open,
|
||||
.read = seq_read,
|
||||
.llseek = seq_lseek,
|
||||
.release = single_release,
|
||||
.write = irq_affinity_list_proc_write,
|
||||
};
|
||||
|
||||
static int default_affinity_show(struct seq_file *m, void *v)
|
||||
{
|
||||
seq_cpumask(m, irq_default_affinity);
|
||||
seq_putc(m, '\n');
|
||||
return 0;
|
||||
}
|
||||
|
||||
static ssize_t default_affinity_write(struct file *file,
|
||||
const char __user *buffer, size_t count, loff_t *ppos)
|
||||
{
|
||||
cpumask_var_t new_value;
|
||||
int err;
|
||||
|
||||
if (!alloc_cpumask_var(&new_value, GFP_KERNEL))
|
||||
return -ENOMEM;
|
||||
|
||||
err = cpumask_parse_user(buffer, count, new_value);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
if (!is_affinity_mask_valid(new_value)) {
|
||||
err = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/*
|
||||
* Do not allow disabling IRQs completely - it's a too easy
|
||||
* way to make the system unusable accidentally :-) At least
|
||||
* one online CPU still has to be targeted.
|
||||
*/
|
||||
if (!cpumask_intersects(new_value, cpu_online_mask)) {
|
||||
err = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
cpumask_copy(irq_default_affinity, new_value);
|
||||
err = count;
|
||||
|
||||
out:
|
||||
free_cpumask_var(new_value);
|
||||
return err;
|
||||
}
|
||||
|
||||
static int default_affinity_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
return single_open(file, default_affinity_show, PDE_DATA(inode));
|
||||
}
|
||||
|
||||
static const struct file_operations default_affinity_proc_fops = {
|
||||
.open = default_affinity_open,
|
||||
.read = seq_read,
|
||||
.llseek = seq_lseek,
|
||||
.release = single_release,
|
||||
.write = default_affinity_write,
|
||||
};
|
||||
|
||||
static int irq_node_proc_show(struct seq_file *m, void *v)
|
||||
{
|
||||
struct irq_desc *desc = irq_to_desc((long) m->private);
|
||||
|
||||
seq_printf(m, "%d\n", desc->irq_data.node);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int irq_node_proc_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
return single_open(file, irq_node_proc_show, PDE_DATA(inode));
|
||||
}
|
||||
|
||||
static const struct file_operations irq_node_proc_fops = {
|
||||
.open = irq_node_proc_open,
|
||||
.read = seq_read,
|
||||
.llseek = seq_lseek,
|
||||
.release = single_release,
|
||||
};
|
||||
#endif
|
||||
|
||||
static int irq_spurious_proc_show(struct seq_file *m, void *v)
|
||||
{
|
||||
struct irq_desc *desc = irq_to_desc((long) m->private);
|
||||
|
||||
seq_printf(m, "count %u\n" "unhandled %u\n" "last_unhandled %u ms\n",
|
||||
desc->irq_count, desc->irqs_unhandled,
|
||||
jiffies_to_msecs(desc->last_unhandled));
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int irq_spurious_proc_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
return single_open(file, irq_spurious_proc_show, PDE_DATA(inode));
|
||||
}
|
||||
|
||||
static const struct file_operations irq_spurious_proc_fops = {
|
||||
.open = irq_spurious_proc_open,
|
||||
.read = seq_read,
|
||||
.llseek = seq_lseek,
|
||||
.release = single_release,
|
||||
};
|
||||
|
||||
#define MAX_NAMELEN 128
|
||||
|
||||
static int name_unique(unsigned int irq, struct irqaction *new_action)
|
||||
{
|
||||
struct irq_desc *desc = irq_to_desc(irq);
|
||||
struct irqaction *action;
|
||||
unsigned long flags;
|
||||
int ret = 1;
|
||||
|
||||
raw_spin_lock_irqsave(&desc->lock, flags);
|
||||
for (action = desc->action ; action; action = action->next) {
|
||||
if ((action != new_action) && action->name &&
|
||||
!strcmp(new_action->name, action->name)) {
|
||||
ret = 0;
|
||||
break;
|
||||
}
|
||||
}
|
||||
raw_spin_unlock_irqrestore(&desc->lock, flags);
|
||||
return ret;
|
||||
}
|
||||
|
||||
void register_handler_proc(unsigned int irq, struct irqaction *action)
|
||||
{
|
||||
char name [MAX_NAMELEN];
|
||||
struct irq_desc *desc = irq_to_desc(irq);
|
||||
|
||||
if (!desc->dir || action->dir || !action->name ||
|
||||
!name_unique(irq, action))
|
||||
return;
|
||||
|
||||
memset(name, 0, MAX_NAMELEN);
|
||||
snprintf(name, MAX_NAMELEN, "%s", action->name);
|
||||
|
||||
/* create /proc/irq/1234/handler/ */
|
||||
action->dir = proc_mkdir(name, desc->dir);
|
||||
}
|
||||
|
||||
#undef MAX_NAMELEN
|
||||
|
||||
#define MAX_NAMELEN 10
|
||||
|
||||
void register_irq_proc(unsigned int irq, struct irq_desc *desc)
|
||||
{
|
||||
char name [MAX_NAMELEN];
|
||||
|
||||
if (!root_irq_dir || (desc->irq_data.chip == &no_irq_chip) || desc->dir)
|
||||
return;
|
||||
|
||||
memset(name, 0, MAX_NAMELEN);
|
||||
sprintf(name, "%d", irq);
|
||||
|
||||
/* create /proc/irq/1234 */
|
||||
desc->dir = proc_mkdir(name, root_irq_dir);
|
||||
if (!desc->dir)
|
||||
return;
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
/* create /proc/irq/<irq>/smp_affinity */
|
||||
proc_create_data("smp_affinity", 0644, desc->dir,
|
||||
&irq_affinity_proc_fops, (void *)(long)irq);
|
||||
|
||||
/* create /proc/irq/<irq>/affinity_hint */
|
||||
proc_create_data("affinity_hint", 0444, desc->dir,
|
||||
&irq_affinity_hint_proc_fops, (void *)(long)irq);
|
||||
|
||||
/* create /proc/irq/<irq>/smp_affinity_list */
|
||||
proc_create_data("smp_affinity_list", 0644, desc->dir,
|
||||
&irq_affinity_list_proc_fops, (void *)(long)irq);
|
||||
|
||||
proc_create_data("node", 0444, desc->dir,
|
||||
&irq_node_proc_fops, (void *)(long)irq);
|
||||
#endif
|
||||
|
||||
proc_create_data("spurious", 0444, desc->dir,
|
||||
&irq_spurious_proc_fops, (void *)(long)irq);
|
||||
}
|
||||
|
||||
void unregister_irq_proc(unsigned int irq, struct irq_desc *desc)
|
||||
{
|
||||
char name [MAX_NAMELEN];
|
||||
|
||||
if (!root_irq_dir || !desc->dir)
|
||||
return;
|
||||
#ifdef CONFIG_SMP
|
||||
remove_proc_entry("smp_affinity", desc->dir);
|
||||
remove_proc_entry("affinity_hint", desc->dir);
|
||||
remove_proc_entry("smp_affinity_list", desc->dir);
|
||||
remove_proc_entry("node", desc->dir);
|
||||
#endif
|
||||
remove_proc_entry("spurious", desc->dir);
|
||||
|
||||
memset(name, 0, MAX_NAMELEN);
|
||||
sprintf(name, "%u", irq);
|
||||
remove_proc_entry(name, root_irq_dir);
|
||||
}
|
||||
|
||||
#undef MAX_NAMELEN
|
||||
|
||||
void unregister_handler_proc(unsigned int irq, struct irqaction *action)
|
||||
{
|
||||
proc_remove(action->dir);
|
||||
}
|
||||
|
||||
static void register_default_affinity_proc(void)
|
||||
{
|
||||
#ifdef CONFIG_SMP
|
||||
proc_create("irq/default_smp_affinity", 0644, NULL,
|
||||
&default_affinity_proc_fops);
|
||||
#endif
|
||||
}
|
||||
|
||||
void init_irq_proc(void)
|
||||
{
|
||||
unsigned int irq;
|
||||
struct irq_desc *desc;
|
||||
|
||||
/* create /proc/irq */
|
||||
root_irq_dir = proc_mkdir("irq", NULL);
|
||||
if (!root_irq_dir)
|
||||
return;
|
||||
|
||||
register_default_affinity_proc();
|
||||
|
||||
/*
|
||||
* Create entries for all existing IRQs.
|
||||
*/
|
||||
for_each_irq_desc(irq, desc) {
|
||||
if (!desc)
|
||||
continue;
|
||||
|
||||
register_irq_proc(irq, desc);
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef CONFIG_GENERIC_IRQ_SHOW
|
||||
|
||||
int __weak arch_show_interrupts(struct seq_file *p, int prec)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifndef ACTUAL_NR_IRQS
|
||||
# define ACTUAL_NR_IRQS nr_irqs
|
||||
#endif
|
||||
|
||||
int show_interrupts(struct seq_file *p, void *v)
|
||||
{
|
||||
static int prec;
|
||||
|
||||
unsigned long flags, any_count = 0;
|
||||
int i = *(loff_t *) v, j;
|
||||
struct irqaction *action;
|
||||
struct irq_desc *desc;
|
||||
|
||||
if (i > ACTUAL_NR_IRQS)
|
||||
return 0;
|
||||
|
||||
if (i == ACTUAL_NR_IRQS)
|
||||
return arch_show_interrupts(p, prec);
|
||||
|
||||
/* print header and calculate the width of the first column */
|
||||
if (i == 0) {
|
||||
for (prec = 3, j = 1000; prec < 10 && j <= nr_irqs; ++prec)
|
||||
j *= 10;
|
||||
|
||||
seq_printf(p, "%*s", prec + 8, "");
|
||||
for_each_online_cpu(j)
|
||||
seq_printf(p, "CPU%-8d", j);
|
||||
seq_putc(p, '\n');
|
||||
}
|
||||
|
||||
irq_lock_sparse();
|
||||
desc = irq_to_desc(i);
|
||||
if (!desc)
|
||||
goto outsparse;
|
||||
|
||||
raw_spin_lock_irqsave(&desc->lock, flags);
|
||||
for_each_online_cpu(j)
|
||||
any_count |= kstat_irqs_cpu(i, j);
|
||||
action = desc->action;
|
||||
if (!action && !any_count)
|
||||
goto out;
|
||||
|
||||
seq_printf(p, "%*d: ", prec, i);
|
||||
for_each_online_cpu(j)
|
||||
seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
|
||||
|
||||
if (desc->irq_data.chip) {
|
||||
if (desc->irq_data.chip->irq_print_chip)
|
||||
desc->irq_data.chip->irq_print_chip(&desc->irq_data, p);
|
||||
else if (desc->irq_data.chip->name)
|
||||
seq_printf(p, " %8s", desc->irq_data.chip->name);
|
||||
else
|
||||
seq_printf(p, " %8s", "-");
|
||||
} else {
|
||||
seq_printf(p, " %8s", "None");
|
||||
}
|
||||
if (desc->irq_data.domain)
|
||||
seq_printf(p, " %*d", prec, (int) desc->irq_data.hwirq);
|
||||
#ifdef CONFIG_GENERIC_IRQ_SHOW_LEVEL
|
||||
seq_printf(p, " %-8s", irqd_is_level_type(&desc->irq_data) ? "Level" : "Edge");
|
||||
#endif
|
||||
if (desc->name)
|
||||
seq_printf(p, "-%-8s", desc->name);
|
||||
|
||||
if (action) {
|
||||
seq_printf(p, " %s", action->name);
|
||||
while ((action = action->next) != NULL)
|
||||
seq_printf(p, ", %s", action->name);
|
||||
}
|
||||
|
||||
seq_putc(p, '\n');
|
||||
out:
|
||||
raw_spin_unlock_irqrestore(&desc->lock, flags);
|
||||
outsparse:
|
||||
irq_unlock_sparse();
|
||||
return 0;
|
||||
}
|
||||
#endif
|
91
kernel/irq/resend.c
Normal file
91
kernel/irq/resend.c
Normal file
|
@ -0,0 +1,91 @@
|
|||
/*
|
||||
* linux/kernel/irq/resend.c
|
||||
*
|
||||
* Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
|
||||
* Copyright (C) 2005-2006, Thomas Gleixner
|
||||
*
|
||||
* This file contains the IRQ-resend code
|
||||
*
|
||||
* If the interrupt is waiting to be processed, we try to re-run it.
|
||||
* We can't directly run it from here since the caller might be in an
|
||||
* interrupt-protected region. Not all irq controller chips can
|
||||
* retrigger interrupts at the hardware level, so in those cases
|
||||
* we allow the resending of IRQs via a tasklet.
|
||||
*/
|
||||
|
||||
#include <linux/irq.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/random.h>
|
||||
#include <linux/interrupt.h>
|
||||
|
||||
#include "internals.h"
|
||||
|
||||
#ifdef CONFIG_HARDIRQS_SW_RESEND
|
||||
|
||||
/* Bitmap to handle software resend of interrupts: */
|
||||
static DECLARE_BITMAP(irqs_resend, IRQ_BITMAP_BITS);
|
||||
|
||||
/*
|
||||
* Run software resends of IRQ's
|
||||
*/
|
||||
static void resend_irqs(unsigned long arg)
|
||||
{
|
||||
struct irq_desc *desc;
|
||||
int irq;
|
||||
|
||||
while (!bitmap_empty(irqs_resend, nr_irqs)) {
|
||||
irq = find_first_bit(irqs_resend, nr_irqs);
|
||||
clear_bit(irq, irqs_resend);
|
||||
desc = irq_to_desc(irq);
|
||||
local_irq_disable();
|
||||
desc->handle_irq(irq, desc);
|
||||
local_irq_enable();
|
||||
}
|
||||
}
|
||||
|
||||
/* Tasklet to handle resend: */
|
||||
static DECLARE_TASKLET(resend_tasklet, resend_irqs, 0);
|
||||
|
||||
#endif
|
||||
|
||||
/*
|
||||
* IRQ resend
|
||||
*
|
||||
* Is called with interrupts disabled and desc->lock held.
|
||||
*/
|
||||
void check_irq_resend(struct irq_desc *desc, unsigned int irq)
|
||||
{
|
||||
/*
|
||||
* We do not resend level type interrupts. Level type
|
||||
* interrupts are resent by hardware when they are still
|
||||
* active. Clear the pending bit so suspend/resume does not
|
||||
* get confused.
|
||||
*/
|
||||
if (irq_settings_is_level(desc)) {
|
||||
desc->istate &= ~IRQS_PENDING;
|
||||
return;
|
||||
}
|
||||
if (desc->istate & IRQS_REPLAY)
|
||||
return;
|
||||
if (desc->istate & IRQS_PENDING) {
|
||||
desc->istate &= ~IRQS_PENDING;
|
||||
desc->istate |= IRQS_REPLAY;
|
||||
|
||||
if (!desc->irq_data.chip->irq_retrigger ||
|
||||
!desc->irq_data.chip->irq_retrigger(&desc->irq_data)) {
|
||||
#ifdef CONFIG_HARDIRQS_SW_RESEND
|
||||
/*
|
||||
* If the interrupt has a parent irq and runs
|
||||
* in the thread context of the parent irq,
|
||||
* retrigger the parent.
|
||||
*/
|
||||
if (desc->parent_irq &&
|
||||
irq_settings_is_nested_thread(desc))
|
||||
irq = desc->parent_irq;
|
||||
/* Set it pending and activate the softirq: */
|
||||
set_bit(irq, irqs_resend);
|
||||
tasklet_schedule(&resend_tasklet);
|
||||
#endif
|
||||
}
|
||||
}
|
||||
}
|
156
kernel/irq/settings.h
Normal file
156
kernel/irq/settings.h
Normal file
|
@ -0,0 +1,156 @@
|
|||
/*
|
||||
* Internal header to deal with irq_desc->status which will be renamed
|
||||
* to irq_desc->settings.
|
||||
*/
|
||||
enum {
|
||||
_IRQ_DEFAULT_INIT_FLAGS = IRQ_DEFAULT_INIT_FLAGS,
|
||||
_IRQ_PER_CPU = IRQ_PER_CPU,
|
||||
_IRQ_LEVEL = IRQ_LEVEL,
|
||||
_IRQ_NOPROBE = IRQ_NOPROBE,
|
||||
_IRQ_NOREQUEST = IRQ_NOREQUEST,
|
||||
_IRQ_NOTHREAD = IRQ_NOTHREAD,
|
||||
_IRQ_NOAUTOEN = IRQ_NOAUTOEN,
|
||||
_IRQ_MOVE_PCNTXT = IRQ_MOVE_PCNTXT,
|
||||
_IRQ_NO_BALANCING = IRQ_NO_BALANCING,
|
||||
_IRQ_NESTED_THREAD = IRQ_NESTED_THREAD,
|
||||
_IRQ_PER_CPU_DEVID = IRQ_PER_CPU_DEVID,
|
||||
_IRQ_IS_POLLED = IRQ_IS_POLLED,
|
||||
_IRQF_MODIFY_MASK = IRQF_MODIFY_MASK,
|
||||
};
|
||||
|
||||
#define IRQ_PER_CPU GOT_YOU_MORON
|
||||
#define IRQ_NO_BALANCING GOT_YOU_MORON
|
||||
#define IRQ_LEVEL GOT_YOU_MORON
|
||||
#define IRQ_NOPROBE GOT_YOU_MORON
|
||||
#define IRQ_NOREQUEST GOT_YOU_MORON
|
||||
#define IRQ_NOTHREAD GOT_YOU_MORON
|
||||
#define IRQ_NOAUTOEN GOT_YOU_MORON
|
||||
#define IRQ_NESTED_THREAD GOT_YOU_MORON
|
||||
#define IRQ_PER_CPU_DEVID GOT_YOU_MORON
|
||||
#define IRQ_IS_POLLED GOT_YOU_MORON
|
||||
#undef IRQF_MODIFY_MASK
|
||||
#define IRQF_MODIFY_MASK GOT_YOU_MORON
|
||||
|
||||
static inline void
|
||||
irq_settings_clr_and_set(struct irq_desc *desc, u32 clr, u32 set)
|
||||
{
|
||||
desc->status_use_accessors &= ~(clr & _IRQF_MODIFY_MASK);
|
||||
desc->status_use_accessors |= (set & _IRQF_MODIFY_MASK);
|
||||
}
|
||||
|
||||
static inline bool irq_settings_is_per_cpu(struct irq_desc *desc)
|
||||
{
|
||||
return desc->status_use_accessors & _IRQ_PER_CPU;
|
||||
}
|
||||
|
||||
static inline bool irq_settings_is_per_cpu_devid(struct irq_desc *desc)
|
||||
{
|
||||
return desc->status_use_accessors & _IRQ_PER_CPU_DEVID;
|
||||
}
|
||||
|
||||
static inline void irq_settings_set_per_cpu(struct irq_desc *desc)
|
||||
{
|
||||
desc->status_use_accessors |= _IRQ_PER_CPU;
|
||||
}
|
||||
|
||||
static inline void irq_settings_set_no_balancing(struct irq_desc *desc)
|
||||
{
|
||||
desc->status_use_accessors |= _IRQ_NO_BALANCING;
|
||||
}
|
||||
|
||||
static inline bool irq_settings_has_no_balance_set(struct irq_desc *desc)
|
||||
{
|
||||
return desc->status_use_accessors & _IRQ_NO_BALANCING;
|
||||
}
|
||||
|
||||
static inline u32 irq_settings_get_trigger_mask(struct irq_desc *desc)
|
||||
{
|
||||
return desc->status_use_accessors & IRQ_TYPE_SENSE_MASK;
|
||||
}
|
||||
|
||||
static inline void
|
||||
irq_settings_set_trigger_mask(struct irq_desc *desc, u32 mask)
|
||||
{
|
||||
desc->status_use_accessors &= ~IRQ_TYPE_SENSE_MASK;
|
||||
desc->status_use_accessors |= mask & IRQ_TYPE_SENSE_MASK;
|
||||
}
|
||||
|
||||
static inline bool irq_settings_is_level(struct irq_desc *desc)
|
||||
{
|
||||
return desc->status_use_accessors & _IRQ_LEVEL;
|
||||
}
|
||||
|
||||
static inline void irq_settings_clr_level(struct irq_desc *desc)
|
||||
{
|
||||
desc->status_use_accessors &= ~_IRQ_LEVEL;
|
||||
}
|
||||
|
||||
static inline void irq_settings_set_level(struct irq_desc *desc)
|
||||
{
|
||||
desc->status_use_accessors |= _IRQ_LEVEL;
|
||||
}
|
||||
|
||||
static inline bool irq_settings_can_request(struct irq_desc *desc)
|
||||
{
|
||||
return !(desc->status_use_accessors & _IRQ_NOREQUEST);
|
||||
}
|
||||
|
||||
static inline void irq_settings_clr_norequest(struct irq_desc *desc)
|
||||
{
|
||||
desc->status_use_accessors &= ~_IRQ_NOREQUEST;
|
||||
}
|
||||
|
||||
static inline void irq_settings_set_norequest(struct irq_desc *desc)
|
||||
{
|
||||
desc->status_use_accessors |= _IRQ_NOREQUEST;
|
||||
}
|
||||
|
||||
static inline bool irq_settings_can_thread(struct irq_desc *desc)
|
||||
{
|
||||
return !(desc->status_use_accessors & _IRQ_NOTHREAD);
|
||||
}
|
||||
|
||||
static inline void irq_settings_clr_nothread(struct irq_desc *desc)
|
||||
{
|
||||
desc->status_use_accessors &= ~_IRQ_NOTHREAD;
|
||||
}
|
||||
|
||||
static inline void irq_settings_set_nothread(struct irq_desc *desc)
|
||||
{
|
||||
desc->status_use_accessors |= _IRQ_NOTHREAD;
|
||||
}
|
||||
|
||||
static inline bool irq_settings_can_probe(struct irq_desc *desc)
|
||||
{
|
||||
return !(desc->status_use_accessors & _IRQ_NOPROBE);
|
||||
}
|
||||
|
||||
static inline void irq_settings_clr_noprobe(struct irq_desc *desc)
|
||||
{
|
||||
desc->status_use_accessors &= ~_IRQ_NOPROBE;
|
||||
}
|
||||
|
||||
static inline void irq_settings_set_noprobe(struct irq_desc *desc)
|
||||
{
|
||||
desc->status_use_accessors |= _IRQ_NOPROBE;
|
||||
}
|
||||
|
||||
static inline bool irq_settings_can_move_pcntxt(struct irq_desc *desc)
|
||||
{
|
||||
return desc->status_use_accessors & _IRQ_MOVE_PCNTXT;
|
||||
}
|
||||
|
||||
static inline bool irq_settings_can_autoenable(struct irq_desc *desc)
|
||||
{
|
||||
return !(desc->status_use_accessors & _IRQ_NOAUTOEN);
|
||||
}
|
||||
|
||||
static inline bool irq_settings_is_nested_thread(struct irq_desc *desc)
|
||||
{
|
||||
return desc->status_use_accessors & _IRQ_NESTED_THREAD;
|
||||
}
|
||||
|
||||
static inline bool irq_settings_is_polled(struct irq_desc *desc)
|
||||
{
|
||||
return desc->status_use_accessors & _IRQ_IS_POLLED;
|
||||
}
|
467
kernel/irq/spurious.c
Normal file
467
kernel/irq/spurious.c
Normal file
|
@ -0,0 +1,467 @@
|
|||
/*
|
||||
* linux/kernel/irq/spurious.c
|
||||
*
|
||||
* Copyright (C) 1992, 1998-2004 Linus Torvalds, Ingo Molnar
|
||||
*
|
||||
* This file contains spurious interrupt handling.
|
||||
*/
|
||||
|
||||
#include <linux/jiffies.h>
|
||||
#include <linux/irq.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/kallsyms.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/moduleparam.h>
|
||||
#include <linux/timer.h>
|
||||
|
||||
#include "internals.h"
|
||||
|
||||
static int irqfixup __read_mostly;
|
||||
|
||||
#define POLL_SPURIOUS_IRQ_INTERVAL (HZ/10)
|
||||
static void poll_spurious_irqs(unsigned long dummy);
|
||||
static DEFINE_TIMER(poll_spurious_irq_timer, poll_spurious_irqs, 0, 0);
|
||||
static int irq_poll_cpu;
|
||||
static atomic_t irq_poll_active;
|
||||
|
||||
/*
|
||||
* We wait here for a poller to finish.
|
||||
*
|
||||
* If the poll runs on this CPU, then we yell loudly and return
|
||||
* false. That will leave the interrupt line disabled in the worst
|
||||
* case, but it should never happen.
|
||||
*
|
||||
* We wait until the poller is done and then recheck disabled and
|
||||
* action (about to be disabled). Only if it's still active, we return
|
||||
* true and let the handler run.
|
||||
*/
|
||||
bool irq_wait_for_poll(struct irq_desc *desc)
|
||||
{
|
||||
if (WARN_ONCE(irq_poll_cpu == smp_processor_id(),
|
||||
"irq poll in progress on cpu %d for irq %d\n",
|
||||
smp_processor_id(), desc->irq_data.irq))
|
||||
return false;
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
do {
|
||||
raw_spin_unlock(&desc->lock);
|
||||
while (irqd_irq_inprogress(&desc->irq_data))
|
||||
cpu_relax();
|
||||
raw_spin_lock(&desc->lock);
|
||||
} while (irqd_irq_inprogress(&desc->irq_data));
|
||||
/* Might have been disabled in meantime */
|
||||
return !irqd_irq_disabled(&desc->irq_data) && desc->action;
|
||||
#else
|
||||
return false;
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Recovery handler for misrouted interrupts.
|
||||
*/
|
||||
static int try_one_irq(int irq, struct irq_desc *desc, bool force)
|
||||
{
|
||||
irqreturn_t ret = IRQ_NONE;
|
||||
struct irqaction *action;
|
||||
|
||||
raw_spin_lock(&desc->lock);
|
||||
|
||||
/*
|
||||
* PER_CPU, nested thread interrupts and interrupts explicitely
|
||||
* marked polled are excluded from polling.
|
||||
*/
|
||||
if (irq_settings_is_per_cpu(desc) ||
|
||||
irq_settings_is_nested_thread(desc) ||
|
||||
irq_settings_is_polled(desc))
|
||||
goto out;
|
||||
|
||||
/*
|
||||
* Do not poll disabled interrupts unless the spurious
|
||||
* disabled poller asks explicitely.
|
||||
*/
|
||||
if (irqd_irq_disabled(&desc->irq_data) && !force)
|
||||
goto out;
|
||||
|
||||
/*
|
||||
* All handlers must agree on IRQF_SHARED, so we test just the
|
||||
* first.
|
||||
*/
|
||||
action = desc->action;
|
||||
if (!action || !(action->flags & IRQF_SHARED) ||
|
||||
(action->flags & __IRQF_TIMER))
|
||||
goto out;
|
||||
|
||||
/* Already running on another processor */
|
||||
if (irqd_irq_inprogress(&desc->irq_data)) {
|
||||
/*
|
||||
* Already running: If it is shared get the other
|
||||
* CPU to go looking for our mystery interrupt too
|
||||
*/
|
||||
desc->istate |= IRQS_PENDING;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* Mark it poll in progress */
|
||||
desc->istate |= IRQS_POLL_INPROGRESS;
|
||||
do {
|
||||
if (handle_irq_event(desc) == IRQ_HANDLED)
|
||||
ret = IRQ_HANDLED;
|
||||
/* Make sure that there is still a valid action */
|
||||
action = desc->action;
|
||||
} while ((desc->istate & IRQS_PENDING) && action);
|
||||
desc->istate &= ~IRQS_POLL_INPROGRESS;
|
||||
out:
|
||||
raw_spin_unlock(&desc->lock);
|
||||
return ret == IRQ_HANDLED;
|
||||
}
|
||||
|
||||
static int misrouted_irq(int irq)
|
||||
{
|
||||
struct irq_desc *desc;
|
||||
int i, ok = 0;
|
||||
|
||||
if (atomic_inc_return(&irq_poll_active) != 1)
|
||||
goto out;
|
||||
|
||||
irq_poll_cpu = smp_processor_id();
|
||||
|
||||
for_each_irq_desc(i, desc) {
|
||||
if (!i)
|
||||
continue;
|
||||
|
||||
if (i == irq) /* Already tried */
|
||||
continue;
|
||||
|
||||
if (try_one_irq(i, desc, false))
|
||||
ok = 1;
|
||||
}
|
||||
out:
|
||||
atomic_dec(&irq_poll_active);
|
||||
/* So the caller can adjust the irq error counts */
|
||||
return ok;
|
||||
}
|
||||
|
||||
static void poll_spurious_irqs(unsigned long dummy)
|
||||
{
|
||||
struct irq_desc *desc;
|
||||
int i;
|
||||
|
||||
if (atomic_inc_return(&irq_poll_active) != 1)
|
||||
goto out;
|
||||
irq_poll_cpu = smp_processor_id();
|
||||
|
||||
for_each_irq_desc(i, desc) {
|
||||
unsigned int state;
|
||||
|
||||
if (!i)
|
||||
continue;
|
||||
|
||||
/* Racy but it doesn't matter */
|
||||
state = desc->istate;
|
||||
barrier();
|
||||
if (!(state & IRQS_SPURIOUS_DISABLED))
|
||||
continue;
|
||||
|
||||
local_irq_disable();
|
||||
try_one_irq(i, desc, true);
|
||||
local_irq_enable();
|
||||
}
|
||||
out:
|
||||
atomic_dec(&irq_poll_active);
|
||||
mod_timer(&poll_spurious_irq_timer,
|
||||
jiffies + POLL_SPURIOUS_IRQ_INTERVAL);
|
||||
}
|
||||
|
||||
static inline int bad_action_ret(irqreturn_t action_ret)
|
||||
{
|
||||
if (likely(action_ret <= (IRQ_HANDLED | IRQ_WAKE_THREAD)))
|
||||
return 0;
|
||||
return 1;
|
||||
}
|
||||
|
||||
/*
|
||||
* If 99,900 of the previous 100,000 interrupts have not been handled
|
||||
* then assume that the IRQ is stuck in some manner. Drop a diagnostic
|
||||
* and try to turn the IRQ off.
|
||||
*
|
||||
* (The other 100-of-100,000 interrupts may have been a correctly
|
||||
* functioning device sharing an IRQ with the failing one)
|
||||
*/
|
||||
static void
|
||||
__report_bad_irq(unsigned int irq, struct irq_desc *desc,
|
||||
irqreturn_t action_ret)
|
||||
{
|
||||
struct irqaction *action;
|
||||
unsigned long flags;
|
||||
|
||||
if (bad_action_ret(action_ret)) {
|
||||
printk(KERN_ERR "irq event %d: bogus return value %x\n",
|
||||
irq, action_ret);
|
||||
} else {
|
||||
printk(KERN_ERR "irq %d: nobody cared (try booting with "
|
||||
"the \"irqpoll\" option)\n", irq);
|
||||
}
|
||||
dump_stack();
|
||||
printk(KERN_ERR "handlers:\n");
|
||||
|
||||
/*
|
||||
* We need to take desc->lock here. note_interrupt() is called
|
||||
* w/o desc->lock held, but IRQ_PROGRESS set. We might race
|
||||
* with something else removing an action. It's ok to take
|
||||
* desc->lock here. See synchronize_irq().
|
||||
*/
|
||||
raw_spin_lock_irqsave(&desc->lock, flags);
|
||||
action = desc->action;
|
||||
while (action) {
|
||||
printk(KERN_ERR "[<%p>] %pf", action->handler, action->handler);
|
||||
if (action->thread_fn)
|
||||
printk(KERN_CONT " threaded [<%p>] %pf",
|
||||
action->thread_fn, action->thread_fn);
|
||||
printk(KERN_CONT "\n");
|
||||
action = action->next;
|
||||
}
|
||||
raw_spin_unlock_irqrestore(&desc->lock, flags);
|
||||
}
|
||||
|
||||
static void
|
||||
report_bad_irq(unsigned int irq, struct irq_desc *desc, irqreturn_t action_ret)
|
||||
{
|
||||
static int count = 100;
|
||||
|
||||
if (count > 0) {
|
||||
count--;
|
||||
__report_bad_irq(irq, desc, action_ret);
|
||||
}
|
||||
}
|
||||
|
||||
static inline int
|
||||
try_misrouted_irq(unsigned int irq, struct irq_desc *desc,
|
||||
irqreturn_t action_ret)
|
||||
{
|
||||
struct irqaction *action;
|
||||
|
||||
if (!irqfixup)
|
||||
return 0;
|
||||
|
||||
/* We didn't actually handle the IRQ - see if it was misrouted? */
|
||||
if (action_ret == IRQ_NONE)
|
||||
return 1;
|
||||
|
||||
/*
|
||||
* But for 'irqfixup == 2' we also do it for handled interrupts if
|
||||
* they are marked as IRQF_IRQPOLL (or for irq zero, which is the
|
||||
* traditional PC timer interrupt.. Legacy)
|
||||
*/
|
||||
if (irqfixup < 2)
|
||||
return 0;
|
||||
|
||||
if (!irq)
|
||||
return 1;
|
||||
|
||||
/*
|
||||
* Since we don't get the descriptor lock, "action" can
|
||||
* change under us. We don't really care, but we don't
|
||||
* want to follow a NULL pointer. So tell the compiler to
|
||||
* just load it once by using a barrier.
|
||||
*/
|
||||
action = desc->action;
|
||||
barrier();
|
||||
return action && (action->flags & IRQF_IRQPOLL);
|
||||
}
|
||||
|
||||
#define SPURIOUS_DEFERRED 0x80000000
|
||||
|
||||
void note_interrupt(unsigned int irq, struct irq_desc *desc,
|
||||
irqreturn_t action_ret)
|
||||
{
|
||||
if (desc->istate & IRQS_POLL_INPROGRESS ||
|
||||
irq_settings_is_polled(desc))
|
||||
return;
|
||||
|
||||
if (bad_action_ret(action_ret)) {
|
||||
report_bad_irq(irq, desc, action_ret);
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* We cannot call note_interrupt from the threaded handler
|
||||
* because we need to look at the compound of all handlers
|
||||
* (primary and threaded). Aside of that in the threaded
|
||||
* shared case we have no serialization against an incoming
|
||||
* hardware interrupt while we are dealing with a threaded
|
||||
* result.
|
||||
*
|
||||
* So in case a thread is woken, we just note the fact and
|
||||
* defer the analysis to the next hardware interrupt.
|
||||
*
|
||||
* The threaded handlers store whether they sucessfully
|
||||
* handled an interrupt and we check whether that number
|
||||
* changed versus the last invocation.
|
||||
*
|
||||
* We could handle all interrupts with the delayed by one
|
||||
* mechanism, but for the non forced threaded case we'd just
|
||||
* add pointless overhead to the straight hardirq interrupts
|
||||
* for the sake of a few lines less code.
|
||||
*/
|
||||
if (action_ret & IRQ_WAKE_THREAD) {
|
||||
/*
|
||||
* There is a thread woken. Check whether one of the
|
||||
* shared primary handlers returned IRQ_HANDLED. If
|
||||
* not we defer the spurious detection to the next
|
||||
* interrupt.
|
||||
*/
|
||||
if (action_ret == IRQ_WAKE_THREAD) {
|
||||
int handled;
|
||||
/*
|
||||
* We use bit 31 of thread_handled_last to
|
||||
* denote the deferred spurious detection
|
||||
* active. No locking necessary as
|
||||
* thread_handled_last is only accessed here
|
||||
* and we have the guarantee that hard
|
||||
* interrupts are not reentrant.
|
||||
*/
|
||||
if (!(desc->threads_handled_last & SPURIOUS_DEFERRED)) {
|
||||
desc->threads_handled_last |= SPURIOUS_DEFERRED;
|
||||
return;
|
||||
}
|
||||
/*
|
||||
* Check whether one of the threaded handlers
|
||||
* returned IRQ_HANDLED since the last
|
||||
* interrupt happened.
|
||||
*
|
||||
* For simplicity we just set bit 31, as it is
|
||||
* set in threads_handled_last as well. So we
|
||||
* avoid extra masking. And we really do not
|
||||
* care about the high bits of the handled
|
||||
* count. We just care about the count being
|
||||
* different than the one we saw before.
|
||||
*/
|
||||
handled = atomic_read(&desc->threads_handled);
|
||||
handled |= SPURIOUS_DEFERRED;
|
||||
if (handled != desc->threads_handled_last) {
|
||||
action_ret = IRQ_HANDLED;
|
||||
/*
|
||||
* Note: We keep the SPURIOUS_DEFERRED
|
||||
* bit set. We are handling the
|
||||
* previous invocation right now.
|
||||
* Keep it for the current one, so the
|
||||
* next hardware interrupt will
|
||||
* account for it.
|
||||
*/
|
||||
desc->threads_handled_last = handled;
|
||||
} else {
|
||||
/*
|
||||
* None of the threaded handlers felt
|
||||
* responsible for the last interrupt
|
||||
*
|
||||
* We keep the SPURIOUS_DEFERRED bit
|
||||
* set in threads_handled_last as we
|
||||
* need to account for the current
|
||||
* interrupt as well.
|
||||
*/
|
||||
action_ret = IRQ_NONE;
|
||||
}
|
||||
} else {
|
||||
/*
|
||||
* One of the primary handlers returned
|
||||
* IRQ_HANDLED. So we don't care about the
|
||||
* threaded handlers on the same line. Clear
|
||||
* the deferred detection bit.
|
||||
*
|
||||
* In theory we could/should check whether the
|
||||
* deferred bit is set and take the result of
|
||||
* the previous run into account here as
|
||||
* well. But it's really not worth the
|
||||
* trouble. If every other interrupt is
|
||||
* handled we never trigger the spurious
|
||||
* detector. And if this is just the one out
|
||||
* of 100k unhandled ones which is handled
|
||||
* then we merily delay the spurious detection
|
||||
* by one hard interrupt. Not a real problem.
|
||||
*/
|
||||
desc->threads_handled_last &= ~SPURIOUS_DEFERRED;
|
||||
}
|
||||
}
|
||||
|
||||
if (unlikely(action_ret == IRQ_NONE)) {
|
||||
/*
|
||||
* If we are seeing only the odd spurious IRQ caused by
|
||||
* bus asynchronicity then don't eventually trigger an error,
|
||||
* otherwise the counter becomes a doomsday timer for otherwise
|
||||
* working systems
|
||||
*/
|
||||
if (time_after(jiffies, desc->last_unhandled + HZ/10))
|
||||
desc->irqs_unhandled = 1;
|
||||
else
|
||||
desc->irqs_unhandled++;
|
||||
desc->last_unhandled = jiffies;
|
||||
}
|
||||
|
||||
if (unlikely(try_misrouted_irq(irq, desc, action_ret))) {
|
||||
int ok = misrouted_irq(irq);
|
||||
if (action_ret == IRQ_NONE)
|
||||
desc->irqs_unhandled -= ok;
|
||||
}
|
||||
|
||||
desc->irq_count++;
|
||||
if (likely(desc->irq_count < 100000))
|
||||
return;
|
||||
|
||||
desc->irq_count = 0;
|
||||
if (unlikely(desc->irqs_unhandled > 99900)) {
|
||||
/*
|
||||
* The interrupt is stuck
|
||||
*/
|
||||
__report_bad_irq(irq, desc, action_ret);
|
||||
/*
|
||||
* Now kill the IRQ
|
||||
*/
|
||||
printk(KERN_EMERG "Disabling IRQ #%d\n", irq);
|
||||
desc->istate |= IRQS_SPURIOUS_DISABLED;
|
||||
desc->depth++;
|
||||
irq_disable(desc);
|
||||
|
||||
mod_timer(&poll_spurious_irq_timer,
|
||||
jiffies + POLL_SPURIOUS_IRQ_INTERVAL);
|
||||
}
|
||||
desc->irqs_unhandled = 0;
|
||||
}
|
||||
|
||||
bool noirqdebug __read_mostly;
|
||||
|
||||
int noirqdebug_setup(char *str)
|
||||
{
|
||||
noirqdebug = 1;
|
||||
printk(KERN_INFO "IRQ lockup detection disabled\n");
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
__setup("noirqdebug", noirqdebug_setup);
|
||||
module_param(noirqdebug, bool, 0644);
|
||||
MODULE_PARM_DESC(noirqdebug, "Disable irq lockup detection when true");
|
||||
|
||||
static int __init irqfixup_setup(char *str)
|
||||
{
|
||||
irqfixup = 1;
|
||||
printk(KERN_WARNING "Misrouted IRQ fixup support enabled.\n");
|
||||
printk(KERN_WARNING "This may impact system performance.\n");
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
__setup("irqfixup", irqfixup_setup);
|
||||
module_param(irqfixup, int, 0644);
|
||||
|
||||
static int __init irqpoll_setup(char *str)
|
||||
{
|
||||
irqfixup = 2;
|
||||
printk(KERN_WARNING "Misrouted IRQ fixup and polling support "
|
||||
"enabled\n");
|
||||
printk(KERN_WARNING "This may significantly impact system "
|
||||
"performance\n");
|
||||
return 1;
|
||||
}
|
||||
|
||||
__setup("irqpoll", irqpoll_setup);
|
Loading…
Add table
Add a link
Reference in a new issue