mirror of
https://github.com/AetherDroid/android_kernel_samsung_on5xelte.git
synced 2025-09-08 01:08:03 -04:00
Fixed MTP to work with TWRP
This commit is contained in:
commit
f6dfaef42e
50820 changed files with 20846062 additions and 0 deletions
127
drivers/irqchip/Kconfig
Normal file
127
drivers/irqchip/Kconfig
Normal file
|
@ -0,0 +1,127 @@
|
|||
config IRQCHIP
|
||||
def_bool y
|
||||
depends on OF_IRQ
|
||||
|
||||
config ARM_GIC
|
||||
bool
|
||||
select IRQ_DOMAIN
|
||||
select MULTI_IRQ_HANDLER
|
||||
|
||||
config GIC_NON_BANKED
|
||||
bool
|
||||
|
||||
config ARM_GIC_V3
|
||||
bool
|
||||
select IRQ_DOMAIN
|
||||
select MULTI_IRQ_HANDLER
|
||||
|
||||
config ARM_NVIC
|
||||
bool
|
||||
select IRQ_DOMAIN
|
||||
select GENERIC_IRQ_CHIP
|
||||
|
||||
config ARM_VIC
|
||||
bool
|
||||
select IRQ_DOMAIN
|
||||
select MULTI_IRQ_HANDLER
|
||||
|
||||
config ARM_VIC_NR
|
||||
int
|
||||
default 4 if ARCH_S5PV210
|
||||
default 2
|
||||
depends on ARM_VIC
|
||||
help
|
||||
The maximum number of VICs available in the system, for
|
||||
power management.
|
||||
|
||||
config ATMEL_AIC_IRQ
|
||||
bool
|
||||
select GENERIC_IRQ_CHIP
|
||||
select IRQ_DOMAIN
|
||||
select MULTI_IRQ_HANDLER
|
||||
select SPARSE_IRQ
|
||||
|
||||
config ATMEL_AIC5_IRQ
|
||||
bool
|
||||
select GENERIC_IRQ_CHIP
|
||||
select IRQ_DOMAIN
|
||||
select MULTI_IRQ_HANDLER
|
||||
select SPARSE_IRQ
|
||||
|
||||
config BRCMSTB_L2_IRQ
|
||||
bool
|
||||
depends on ARM
|
||||
select GENERIC_IRQ_CHIP
|
||||
select IRQ_DOMAIN
|
||||
|
||||
config DW_APB_ICTL
|
||||
bool
|
||||
select IRQ_DOMAIN
|
||||
|
||||
config IMGPDC_IRQ
|
||||
bool
|
||||
select GENERIC_IRQ_CHIP
|
||||
select IRQ_DOMAIN
|
||||
|
||||
config CLPS711X_IRQCHIP
|
||||
bool
|
||||
depends on ARCH_CLPS711X
|
||||
select IRQ_DOMAIN
|
||||
select MULTI_IRQ_HANDLER
|
||||
select SPARSE_IRQ
|
||||
default y
|
||||
|
||||
config OR1K_PIC
|
||||
bool
|
||||
select IRQ_DOMAIN
|
||||
|
||||
config OMAP_IRQCHIP
|
||||
bool
|
||||
select GENERIC_IRQ_CHIP
|
||||
select IRQ_DOMAIN
|
||||
|
||||
config ORION_IRQCHIP
|
||||
bool
|
||||
select IRQ_DOMAIN
|
||||
select MULTI_IRQ_HANDLER
|
||||
|
||||
config RENESAS_INTC_IRQPIN
|
||||
bool
|
||||
select IRQ_DOMAIN
|
||||
|
||||
config RENESAS_IRQC
|
||||
bool
|
||||
select IRQ_DOMAIN
|
||||
|
||||
config TB10X_IRQC
|
||||
bool
|
||||
select IRQ_DOMAIN
|
||||
select GENERIC_IRQ_CHIP
|
||||
|
||||
config VERSATILE_FPGA_IRQ
|
||||
bool
|
||||
select IRQ_DOMAIN
|
||||
|
||||
config VERSATILE_FPGA_IRQ_NR
|
||||
int
|
||||
default 4
|
||||
depends on VERSATILE_FPGA_IRQ
|
||||
|
||||
config XTENSA_MX
|
||||
bool
|
||||
select IRQ_DOMAIN
|
||||
|
||||
config IRQ_CROSSBAR
|
||||
bool
|
||||
help
|
||||
Support for a CROSSBAR ip that precedes the main interrupt controller.
|
||||
The primary irqchip invokes the crossbar's callback which inturn allocates
|
||||
a free irq and configures the IP. Thus the peripheral interrupts are
|
||||
routed to one of the free irqchip interrupt lines.
|
||||
|
||||
config KEYSTONE_IRQ
|
||||
tristate "Keystone 2 IRQ controller IP"
|
||||
depends on ARCH_KEYSTONE
|
||||
help
|
||||
Support for Texas Instruments Keystone 2 IRQ controller IP which
|
||||
is part of the Keystone 2 IPC mechanism
|
40
drivers/irqchip/Makefile
Normal file
40
drivers/irqchip/Makefile
Normal file
|
@ -0,0 +1,40 @@
|
|||
obj-$(CONFIG_IRQCHIP) += irqchip.o
|
||||
|
||||
obj-$(CONFIG_ARCH_BCM2835) += irq-bcm2835.o
|
||||
obj-$(CONFIG_ARCH_EXYNOS) += exynos-combiner.o
|
||||
obj-$(CONFIG_ARCH_HIP04) += irq-hip04.o
|
||||
obj-$(CONFIG_ARCH_MMP) += irq-mmp.o
|
||||
obj-$(CONFIG_ARCH_MVEBU) += irq-armada-370-xp.o
|
||||
obj-$(CONFIG_ARCH_MXS) += irq-mxs.o
|
||||
obj-$(CONFIG_ARCH_S3C24XX) += irq-s3c24xx.o
|
||||
obj-$(CONFIG_DW_APB_ICTL) += irq-dw-apb-ictl.o
|
||||
obj-$(CONFIG_METAG) += irq-metag-ext.o
|
||||
obj-$(CONFIG_METAG_PERFCOUNTER_IRQS) += irq-metag.o
|
||||
obj-$(CONFIG_ARCH_MOXART) += irq-moxart.o
|
||||
obj-$(CONFIG_CLPS711X_IRQCHIP) += irq-clps711x.o
|
||||
obj-$(CONFIG_OR1K_PIC) += irq-or1k-pic.o
|
||||
obj-$(CONFIG_ORION_IRQCHIP) += irq-orion.o
|
||||
obj-$(CONFIG_OMAP_IRQCHIP) += irq-omap-intc.o
|
||||
obj-$(CONFIG_ARCH_SUNXI) += irq-sun4i.o
|
||||
obj-$(CONFIG_ARCH_SUNXI) += irq-sunxi-nmi.o
|
||||
obj-$(CONFIG_ARCH_SPEAR3XX) += spear-shirq.o
|
||||
obj-$(CONFIG_ARM_GIC) += irq-gic.o irq-gic-common.o
|
||||
obj-$(CONFIG_ARM_GIC_V3) += irq-gic-v3.o irq-gic-common.o
|
||||
obj-$(CONFIG_ARM_NVIC) += irq-nvic.o
|
||||
obj-$(CONFIG_ARM_VIC) += irq-vic.o
|
||||
obj-$(CONFIG_ATMEL_AIC_IRQ) += irq-atmel-aic-common.o irq-atmel-aic.o
|
||||
obj-$(CONFIG_ATMEL_AIC5_IRQ) += irq-atmel-aic-common.o irq-atmel-aic5.o
|
||||
obj-$(CONFIG_IMGPDC_IRQ) += irq-imgpdc.o
|
||||
obj-$(CONFIG_SIRF_IRQ) += irq-sirfsoc.o
|
||||
obj-$(CONFIG_RENESAS_INTC_IRQPIN) += irq-renesas-intc-irqpin.o
|
||||
obj-$(CONFIG_RENESAS_IRQC) += irq-renesas-irqc.o
|
||||
obj-$(CONFIG_VERSATILE_FPGA_IRQ) += irq-versatile-fpga.o
|
||||
obj-$(CONFIG_ARCH_NSPIRE) += irq-zevio.o
|
||||
obj-$(CONFIG_ARCH_VT8500) += irq-vt8500.o
|
||||
obj-$(CONFIG_TB10X_IRQC) += irq-tb10x.o
|
||||
obj-$(CONFIG_XTENSA) += irq-xtensa-pic.o
|
||||
obj-$(CONFIG_XTENSA_MX) += irq-xtensa-mx.o
|
||||
obj-$(CONFIG_IRQ_CROSSBAR) += irq-crossbar.o
|
||||
obj-$(CONFIG_BRCMSTB_L2_IRQ) += irq-brcmstb-l2.o \
|
||||
irq-bcm7120-l2.o
|
||||
obj-$(CONFIG_KEYSTONE_IRQ) += irq-keystone.o
|
227
drivers/irqchip/exynos-combiner.c
Normal file
227
drivers/irqchip/exynos-combiner.c
Normal file
|
@ -0,0 +1,227 @@
|
|||
/*
|
||||
* Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
|
||||
* http://www.samsung.com
|
||||
*
|
||||
* Combiner irqchip for EXYNOS
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
#include <linux/err.h>
|
||||
#include <linux/export.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/irqdomain.h>
|
||||
#include <linux/irqchip/chained_irq.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/of_address.h>
|
||||
#include <linux/of_irq.h>
|
||||
|
||||
#include "irqchip.h"
|
||||
|
||||
#define COMBINER_ENABLE_SET 0x0
|
||||
#define COMBINER_ENABLE_CLEAR 0x4
|
||||
#define COMBINER_INT_STATUS 0xC
|
||||
|
||||
#define IRQ_IN_COMBINER 8
|
||||
|
||||
static DEFINE_SPINLOCK(irq_controller_lock);
|
||||
|
||||
struct combiner_chip_data {
|
||||
unsigned int hwirq_offset;
|
||||
unsigned int irq_mask;
|
||||
void __iomem *base;
|
||||
unsigned int parent_irq;
|
||||
};
|
||||
|
||||
static struct irq_domain *combiner_irq_domain;
|
||||
|
||||
static inline void __iomem *combiner_base(struct irq_data *data)
|
||||
{
|
||||
struct combiner_chip_data *combiner_data =
|
||||
irq_data_get_irq_chip_data(data);
|
||||
|
||||
return combiner_data->base;
|
||||
}
|
||||
|
||||
static void combiner_mask_irq(struct irq_data *data)
|
||||
{
|
||||
u32 mask = 1 << (data->hwirq % 32);
|
||||
|
||||
__raw_writel(mask, combiner_base(data) + COMBINER_ENABLE_CLEAR);
|
||||
}
|
||||
|
||||
static void combiner_unmask_irq(struct irq_data *data)
|
||||
{
|
||||
u32 mask = 1 << (data->hwirq % 32);
|
||||
|
||||
__raw_writel(mask, combiner_base(data) + COMBINER_ENABLE_SET);
|
||||
}
|
||||
|
||||
static void combiner_handle_cascade_irq(unsigned int irq, struct irq_desc *desc)
|
||||
{
|
||||
struct combiner_chip_data *chip_data = irq_get_handler_data(irq);
|
||||
struct irq_chip *chip = irq_get_chip(irq);
|
||||
unsigned int cascade_irq, combiner_irq;
|
||||
unsigned long status;
|
||||
|
||||
chained_irq_enter(chip, desc);
|
||||
|
||||
spin_lock(&irq_controller_lock);
|
||||
status = __raw_readl(chip_data->base + COMBINER_INT_STATUS);
|
||||
spin_unlock(&irq_controller_lock);
|
||||
status &= chip_data->irq_mask;
|
||||
|
||||
if (status == 0)
|
||||
goto out;
|
||||
|
||||
combiner_irq = chip_data->hwirq_offset + __ffs(status);
|
||||
cascade_irq = irq_find_mapping(combiner_irq_domain, combiner_irq);
|
||||
|
||||
if (unlikely(!cascade_irq))
|
||||
handle_bad_irq(irq, desc);
|
||||
else
|
||||
generic_handle_irq(cascade_irq);
|
||||
|
||||
out:
|
||||
chained_irq_exit(chip, desc);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
static int combiner_set_affinity(struct irq_data *d,
|
||||
const struct cpumask *mask_val, bool force)
|
||||
{
|
||||
struct combiner_chip_data *chip_data = irq_data_get_irq_chip_data(d);
|
||||
struct irq_chip *chip = irq_get_chip(chip_data->parent_irq);
|
||||
struct irq_data *data = irq_get_irq_data(chip_data->parent_irq);
|
||||
|
||||
if (chip && chip->irq_set_affinity)
|
||||
return chip->irq_set_affinity(data, mask_val, force);
|
||||
else
|
||||
return -EINVAL;
|
||||
}
|
||||
#endif
|
||||
|
||||
static struct irq_chip combiner_chip = {
|
||||
.name = "COMBINER",
|
||||
.irq_mask = combiner_mask_irq,
|
||||
.irq_unmask = combiner_unmask_irq,
|
||||
#ifdef CONFIG_SMP
|
||||
.irq_set_affinity = combiner_set_affinity,
|
||||
#endif
|
||||
};
|
||||
|
||||
static void __init combiner_cascade_irq(struct combiner_chip_data *combiner_data,
|
||||
unsigned int irq)
|
||||
{
|
||||
if (irq_set_handler_data(irq, combiner_data) != 0)
|
||||
BUG();
|
||||
irq_set_chained_handler(irq, combiner_handle_cascade_irq);
|
||||
}
|
||||
|
||||
static void __init combiner_init_one(struct combiner_chip_data *combiner_data,
|
||||
unsigned int combiner_nr,
|
||||
void __iomem *base, unsigned int irq)
|
||||
{
|
||||
combiner_data->base = base;
|
||||
combiner_data->hwirq_offset = (combiner_nr & ~3) * IRQ_IN_COMBINER;
|
||||
combiner_data->irq_mask = 0xff << ((combiner_nr % 4) << 3);
|
||||
combiner_data->parent_irq = irq;
|
||||
|
||||
/* Disable all interrupts */
|
||||
__raw_writel(combiner_data->irq_mask, base + COMBINER_ENABLE_CLEAR);
|
||||
}
|
||||
|
||||
static int combiner_irq_domain_xlate(struct irq_domain *d,
|
||||
struct device_node *controller,
|
||||
const u32 *intspec, unsigned int intsize,
|
||||
unsigned long *out_hwirq,
|
||||
unsigned int *out_type)
|
||||
{
|
||||
if (d->of_node != controller)
|
||||
return -EINVAL;
|
||||
|
||||
if (intsize < 2)
|
||||
return -EINVAL;
|
||||
|
||||
*out_hwirq = intspec[0] * IRQ_IN_COMBINER + intspec[1];
|
||||
*out_type = 0;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int combiner_irq_domain_map(struct irq_domain *d, unsigned int irq,
|
||||
irq_hw_number_t hw)
|
||||
{
|
||||
struct combiner_chip_data *combiner_data = d->host_data;
|
||||
|
||||
irq_set_chip_and_handler(irq, &combiner_chip, handle_level_irq);
|
||||
irq_set_chip_data(irq, &combiner_data[hw >> 3]);
|
||||
set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct irq_domain_ops combiner_irq_domain_ops = {
|
||||
.xlate = combiner_irq_domain_xlate,
|
||||
.map = combiner_irq_domain_map,
|
||||
};
|
||||
|
||||
static void __init combiner_init(void __iomem *combiner_base,
|
||||
struct device_node *np,
|
||||
unsigned int max_nr)
|
||||
{
|
||||
int i, irq;
|
||||
unsigned int nr_irq;
|
||||
struct combiner_chip_data *combiner_data;
|
||||
|
||||
nr_irq = max_nr * IRQ_IN_COMBINER;
|
||||
|
||||
combiner_data = kcalloc(max_nr, sizeof (*combiner_data), GFP_KERNEL);
|
||||
if (!combiner_data) {
|
||||
pr_warning("%s: could not allocate combiner data\n", __func__);
|
||||
return;
|
||||
}
|
||||
|
||||
combiner_irq_domain = irq_domain_add_linear(np, nr_irq,
|
||||
&combiner_irq_domain_ops, combiner_data);
|
||||
if (WARN_ON(!combiner_irq_domain)) {
|
||||
pr_warning("%s: irq domain init failed\n", __func__);
|
||||
return;
|
||||
}
|
||||
|
||||
for (i = 0; i < max_nr; i++) {
|
||||
irq = irq_of_parse_and_map(np, i);
|
||||
|
||||
combiner_init_one(&combiner_data[i], i,
|
||||
combiner_base + (i >> 2) * 0x10, irq);
|
||||
combiner_cascade_irq(&combiner_data[i], irq);
|
||||
}
|
||||
}
|
||||
|
||||
static int __init combiner_of_init(struct device_node *np,
|
||||
struct device_node *parent)
|
||||
{
|
||||
void __iomem *combiner_base;
|
||||
unsigned int max_nr = 20;
|
||||
|
||||
combiner_base = of_iomap(np, 0);
|
||||
if (!combiner_base) {
|
||||
pr_err("%s: failed to map combiner registers\n", __func__);
|
||||
return -ENXIO;
|
||||
}
|
||||
|
||||
if (of_property_read_u32(np, "samsung,combiner-nr", &max_nr)) {
|
||||
pr_info("%s: number of combiners not specified, "
|
||||
"setting default as %d.\n",
|
||||
__func__, max_nr);
|
||||
}
|
||||
|
||||
combiner_init(combiner_base, np, max_nr);
|
||||
|
||||
return 0;
|
||||
}
|
||||
IRQCHIP_DECLARE(exynos4210_combiner, "samsung,exynos4210-combiner",
|
||||
combiner_of_init);
|
566
drivers/irqchip/irq-armada-370-xp.c
Normal file
566
drivers/irqchip/irq-armada-370-xp.c
Normal file
|
@ -0,0 +1,566 @@
|
|||
/*
|
||||
* Marvell Armada 370 and Armada XP SoC IRQ handling
|
||||
*
|
||||
* Copyright (C) 2012 Marvell
|
||||
*
|
||||
* Lior Amsalem <alior@marvell.com>
|
||||
* Gregory CLEMENT <gregory.clement@free-electrons.com>
|
||||
* Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
|
||||
* Ben Dooks <ben.dooks@codethink.co.uk>
|
||||
*
|
||||
* This file is licensed under the terms of the GNU General Public
|
||||
* License version 2. This program is licensed "as is" without any
|
||||
* warranty of any kind, whether express or implied.
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/irq.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/irqchip/chained_irq.h>
|
||||
#include <linux/cpu.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/of_address.h>
|
||||
#include <linux/of_irq.h>
|
||||
#include <linux/of_pci.h>
|
||||
#include <linux/irqdomain.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/msi.h>
|
||||
#include <asm/mach/arch.h>
|
||||
#include <asm/exception.h>
|
||||
#include <asm/smp_plat.h>
|
||||
#include <asm/mach/irq.h>
|
||||
|
||||
#include "irqchip.h"
|
||||
|
||||
/* Interrupt Controller Registers Map */
|
||||
#define ARMADA_370_XP_INT_SET_MASK_OFFS (0x48)
|
||||
#define ARMADA_370_XP_INT_CLEAR_MASK_OFFS (0x4C)
|
||||
|
||||
#define ARMADA_370_XP_INT_CONTROL (0x00)
|
||||
#define ARMADA_370_XP_INT_SET_ENABLE_OFFS (0x30)
|
||||
#define ARMADA_370_XP_INT_CLEAR_ENABLE_OFFS (0x34)
|
||||
#define ARMADA_370_XP_INT_SOURCE_CTL(irq) (0x100 + irq*4)
|
||||
#define ARMADA_370_XP_INT_SOURCE_CPU_MASK 0xF
|
||||
#define ARMADA_370_XP_INT_IRQ_FIQ_MASK(cpuid) ((BIT(0) | BIT(8)) << cpuid)
|
||||
|
||||
#define ARMADA_370_XP_CPU_INTACK_OFFS (0x44)
|
||||
#define ARMADA_375_PPI_CAUSE (0x10)
|
||||
|
||||
#define ARMADA_370_XP_SW_TRIG_INT_OFFS (0x4)
|
||||
#define ARMADA_370_XP_IN_DRBEL_MSK_OFFS (0xc)
|
||||
#define ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS (0x8)
|
||||
|
||||
#define ARMADA_370_XP_MAX_PER_CPU_IRQS (28)
|
||||
|
||||
#define ARMADA_370_XP_TIMER0_PER_CPU_IRQ (5)
|
||||
|
||||
#define IPI_DOORBELL_START (0)
|
||||
#define IPI_DOORBELL_END (8)
|
||||
#define IPI_DOORBELL_MASK 0xFF
|
||||
#define PCI_MSI_DOORBELL_START (16)
|
||||
#define PCI_MSI_DOORBELL_NR (16)
|
||||
#define PCI_MSI_DOORBELL_END (32)
|
||||
#define PCI_MSI_DOORBELL_MASK 0xFFFF0000
|
||||
|
||||
static void __iomem *per_cpu_int_base;
|
||||
static void __iomem *main_int_base;
|
||||
static struct irq_domain *armada_370_xp_mpic_domain;
|
||||
static int parent_irq;
|
||||
#ifdef CONFIG_PCI_MSI
|
||||
static struct irq_domain *armada_370_xp_msi_domain;
|
||||
static DECLARE_BITMAP(msi_used, PCI_MSI_DOORBELL_NR);
|
||||
static DEFINE_MUTEX(msi_used_lock);
|
||||
static phys_addr_t msi_doorbell_addr;
|
||||
#endif
|
||||
|
||||
/*
|
||||
* In SMP mode:
|
||||
* For shared global interrupts, mask/unmask global enable bit
|
||||
* For CPU interrupts, mask/unmask the calling CPU's bit
|
||||
*/
|
||||
static void armada_370_xp_irq_mask(struct irq_data *d)
|
||||
{
|
||||
irq_hw_number_t hwirq = irqd_to_hwirq(d);
|
||||
|
||||
if (hwirq != ARMADA_370_XP_TIMER0_PER_CPU_IRQ)
|
||||
writel(hwirq, main_int_base +
|
||||
ARMADA_370_XP_INT_CLEAR_ENABLE_OFFS);
|
||||
else
|
||||
writel(hwirq, per_cpu_int_base +
|
||||
ARMADA_370_XP_INT_SET_MASK_OFFS);
|
||||
}
|
||||
|
||||
static void armada_370_xp_irq_unmask(struct irq_data *d)
|
||||
{
|
||||
irq_hw_number_t hwirq = irqd_to_hwirq(d);
|
||||
|
||||
if (hwirq != ARMADA_370_XP_TIMER0_PER_CPU_IRQ)
|
||||
writel(hwirq, main_int_base +
|
||||
ARMADA_370_XP_INT_SET_ENABLE_OFFS);
|
||||
else
|
||||
writel(hwirq, per_cpu_int_base +
|
||||
ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PCI_MSI
|
||||
|
||||
static int armada_370_xp_alloc_msi(void)
|
||||
{
|
||||
int hwirq;
|
||||
|
||||
mutex_lock(&msi_used_lock);
|
||||
hwirq = find_first_zero_bit(&msi_used, PCI_MSI_DOORBELL_NR);
|
||||
if (hwirq >= PCI_MSI_DOORBELL_NR)
|
||||
hwirq = -ENOSPC;
|
||||
else
|
||||
set_bit(hwirq, msi_used);
|
||||
mutex_unlock(&msi_used_lock);
|
||||
|
||||
return hwirq;
|
||||
}
|
||||
|
||||
static void armada_370_xp_free_msi(int hwirq)
|
||||
{
|
||||
mutex_lock(&msi_used_lock);
|
||||
if (!test_bit(hwirq, msi_used))
|
||||
pr_err("trying to free unused MSI#%d\n", hwirq);
|
||||
else
|
||||
clear_bit(hwirq, msi_used);
|
||||
mutex_unlock(&msi_used_lock);
|
||||
}
|
||||
|
||||
static int armada_370_xp_setup_msi_irq(struct msi_chip *chip,
|
||||
struct pci_dev *pdev,
|
||||
struct msi_desc *desc)
|
||||
{
|
||||
struct msi_msg msg;
|
||||
int virq, hwirq;
|
||||
|
||||
/* We support MSI, but not MSI-X */
|
||||
if (desc->msi_attrib.is_msix)
|
||||
return -EINVAL;
|
||||
|
||||
hwirq = armada_370_xp_alloc_msi();
|
||||
if (hwirq < 0)
|
||||
return hwirq;
|
||||
|
||||
virq = irq_create_mapping(armada_370_xp_msi_domain, hwirq);
|
||||
if (!virq) {
|
||||
armada_370_xp_free_msi(hwirq);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
irq_set_msi_desc(virq, desc);
|
||||
|
||||
msg.address_lo = msi_doorbell_addr;
|
||||
msg.address_hi = 0;
|
||||
msg.data = 0xf00 | (hwirq + 16);
|
||||
|
||||
write_msi_msg(virq, &msg);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void armada_370_xp_teardown_msi_irq(struct msi_chip *chip,
|
||||
unsigned int irq)
|
||||
{
|
||||
struct irq_data *d = irq_get_irq_data(irq);
|
||||
unsigned long hwirq = d->hwirq;
|
||||
|
||||
irq_dispose_mapping(irq);
|
||||
armada_370_xp_free_msi(hwirq);
|
||||
}
|
||||
|
||||
static struct irq_chip armada_370_xp_msi_irq_chip = {
|
||||
.name = "armada_370_xp_msi_irq",
|
||||
.irq_enable = unmask_msi_irq,
|
||||
.irq_disable = mask_msi_irq,
|
||||
.irq_mask = mask_msi_irq,
|
||||
.irq_unmask = unmask_msi_irq,
|
||||
};
|
||||
|
||||
static int armada_370_xp_msi_map(struct irq_domain *domain, unsigned int virq,
|
||||
irq_hw_number_t hw)
|
||||
{
|
||||
irq_set_chip_and_handler(virq, &armada_370_xp_msi_irq_chip,
|
||||
handle_simple_irq);
|
||||
set_irq_flags(virq, IRQF_VALID);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct irq_domain_ops armada_370_xp_msi_irq_ops = {
|
||||
.map = armada_370_xp_msi_map,
|
||||
};
|
||||
|
||||
static int armada_370_xp_msi_init(struct device_node *node,
|
||||
phys_addr_t main_int_phys_base)
|
||||
{
|
||||
struct msi_chip *msi_chip;
|
||||
u32 reg;
|
||||
int ret;
|
||||
|
||||
msi_doorbell_addr = main_int_phys_base +
|
||||
ARMADA_370_XP_SW_TRIG_INT_OFFS;
|
||||
|
||||
msi_chip = kzalloc(sizeof(*msi_chip), GFP_KERNEL);
|
||||
if (!msi_chip)
|
||||
return -ENOMEM;
|
||||
|
||||
msi_chip->setup_irq = armada_370_xp_setup_msi_irq;
|
||||
msi_chip->teardown_irq = armada_370_xp_teardown_msi_irq;
|
||||
msi_chip->of_node = node;
|
||||
|
||||
armada_370_xp_msi_domain =
|
||||
irq_domain_add_linear(NULL, PCI_MSI_DOORBELL_NR,
|
||||
&armada_370_xp_msi_irq_ops,
|
||||
NULL);
|
||||
if (!armada_370_xp_msi_domain) {
|
||||
kfree(msi_chip);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
ret = of_pci_msi_chip_add(msi_chip);
|
||||
if (ret < 0) {
|
||||
irq_domain_remove(armada_370_xp_msi_domain);
|
||||
kfree(msi_chip);
|
||||
return ret;
|
||||
}
|
||||
|
||||
reg = readl(per_cpu_int_base + ARMADA_370_XP_IN_DRBEL_MSK_OFFS)
|
||||
| PCI_MSI_DOORBELL_MASK;
|
||||
|
||||
writel(reg, per_cpu_int_base +
|
||||
ARMADA_370_XP_IN_DRBEL_MSK_OFFS);
|
||||
|
||||
/* Unmask IPI interrupt */
|
||||
writel(1, per_cpu_int_base + ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
|
||||
|
||||
return 0;
|
||||
}
|
||||
#else
|
||||
static inline int armada_370_xp_msi_init(struct device_node *node,
|
||||
phys_addr_t main_int_phys_base)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
static DEFINE_RAW_SPINLOCK(irq_controller_lock);
|
||||
|
||||
static int armada_xp_set_affinity(struct irq_data *d,
|
||||
const struct cpumask *mask_val, bool force)
|
||||
{
|
||||
irq_hw_number_t hwirq = irqd_to_hwirq(d);
|
||||
unsigned long reg, mask;
|
||||
int cpu;
|
||||
|
||||
/* Select a single core from the affinity mask which is online */
|
||||
cpu = cpumask_any_and(mask_val, cpu_online_mask);
|
||||
mask = 1UL << cpu_logical_map(cpu);
|
||||
|
||||
raw_spin_lock(&irq_controller_lock);
|
||||
reg = readl(main_int_base + ARMADA_370_XP_INT_SOURCE_CTL(hwirq));
|
||||
reg = (reg & (~ARMADA_370_XP_INT_SOURCE_CPU_MASK)) | mask;
|
||||
writel(reg, main_int_base + ARMADA_370_XP_INT_SOURCE_CTL(hwirq));
|
||||
raw_spin_unlock(&irq_controller_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
static struct irq_chip armada_370_xp_irq_chip = {
|
||||
.name = "armada_370_xp_irq",
|
||||
.irq_mask = armada_370_xp_irq_mask,
|
||||
.irq_mask_ack = armada_370_xp_irq_mask,
|
||||
.irq_unmask = armada_370_xp_irq_unmask,
|
||||
#ifdef CONFIG_SMP
|
||||
.irq_set_affinity = armada_xp_set_affinity,
|
||||
#endif
|
||||
};
|
||||
|
||||
static int armada_370_xp_mpic_irq_map(struct irq_domain *h,
|
||||
unsigned int virq, irq_hw_number_t hw)
|
||||
{
|
||||
armada_370_xp_irq_mask(irq_get_irq_data(virq));
|
||||
if (hw != ARMADA_370_XP_TIMER0_PER_CPU_IRQ)
|
||||
writel(hw, per_cpu_int_base +
|
||||
ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
|
||||
else
|
||||
writel(hw, main_int_base + ARMADA_370_XP_INT_SET_ENABLE_OFFS);
|
||||
irq_set_status_flags(virq, IRQ_LEVEL);
|
||||
|
||||
if (hw == ARMADA_370_XP_TIMER0_PER_CPU_IRQ) {
|
||||
irq_set_percpu_devid(virq);
|
||||
irq_set_chip_and_handler(virq, &armada_370_xp_irq_chip,
|
||||
handle_percpu_devid_irq);
|
||||
|
||||
} else {
|
||||
irq_set_chip_and_handler(virq, &armada_370_xp_irq_chip,
|
||||
handle_level_irq);
|
||||
}
|
||||
set_irq_flags(virq, IRQF_VALID | IRQF_PROBE);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
static void armada_mpic_send_doorbell(const struct cpumask *mask,
|
||||
unsigned int irq)
|
||||
{
|
||||
int cpu;
|
||||
unsigned long map = 0;
|
||||
|
||||
/* Convert our logical CPU mask into a physical one. */
|
||||
for_each_cpu(cpu, mask)
|
||||
map |= 1 << cpu_logical_map(cpu);
|
||||
|
||||
/*
|
||||
* Ensure that stores to Normal memory are visible to the
|
||||
* other CPUs before issuing the IPI.
|
||||
*/
|
||||
dsb();
|
||||
|
||||
/* submit softirq */
|
||||
writel((map << 8) | irq, main_int_base +
|
||||
ARMADA_370_XP_SW_TRIG_INT_OFFS);
|
||||
}
|
||||
|
||||
static void armada_xp_mpic_smp_cpu_init(void)
|
||||
{
|
||||
u32 control;
|
||||
int nr_irqs, i;
|
||||
|
||||
control = readl(main_int_base + ARMADA_370_XP_INT_CONTROL);
|
||||
nr_irqs = (control >> 2) & 0x3ff;
|
||||
|
||||
for (i = 0; i < nr_irqs; i++)
|
||||
writel(i, per_cpu_int_base + ARMADA_370_XP_INT_SET_MASK_OFFS);
|
||||
|
||||
/* Clear pending IPIs */
|
||||
writel(0, per_cpu_int_base + ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS);
|
||||
|
||||
/* Enable first 8 IPIs */
|
||||
writel(IPI_DOORBELL_MASK, per_cpu_int_base +
|
||||
ARMADA_370_XP_IN_DRBEL_MSK_OFFS);
|
||||
|
||||
/* Unmask IPI interrupt */
|
||||
writel(0, per_cpu_int_base + ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
|
||||
}
|
||||
|
||||
static int armada_xp_mpic_secondary_init(struct notifier_block *nfb,
|
||||
unsigned long action, void *hcpu)
|
||||
{
|
||||
if (action == CPU_STARTING || action == CPU_STARTING_FROZEN)
|
||||
armada_xp_mpic_smp_cpu_init();
|
||||
|
||||
return NOTIFY_OK;
|
||||
}
|
||||
|
||||
static struct notifier_block armada_370_xp_mpic_cpu_notifier = {
|
||||
.notifier_call = armada_xp_mpic_secondary_init,
|
||||
.priority = 100,
|
||||
};
|
||||
|
||||
static int mpic_cascaded_secondary_init(struct notifier_block *nfb,
|
||||
unsigned long action, void *hcpu)
|
||||
{
|
||||
if (action == CPU_STARTING || action == CPU_STARTING_FROZEN)
|
||||
enable_percpu_irq(parent_irq, IRQ_TYPE_NONE);
|
||||
|
||||
return NOTIFY_OK;
|
||||
}
|
||||
|
||||
static struct notifier_block mpic_cascaded_cpu_notifier = {
|
||||
.notifier_call = mpic_cascaded_secondary_init,
|
||||
.priority = 100,
|
||||
};
|
||||
|
||||
#endif /* CONFIG_SMP */
|
||||
|
||||
static struct irq_domain_ops armada_370_xp_mpic_irq_ops = {
|
||||
.map = armada_370_xp_mpic_irq_map,
|
||||
.xlate = irq_domain_xlate_onecell,
|
||||
};
|
||||
|
||||
#ifdef CONFIG_PCI_MSI
|
||||
static void armada_370_xp_handle_msi_irq(struct pt_regs *regs, bool is_chained)
|
||||
{
|
||||
u32 msimask, msinr;
|
||||
|
||||
msimask = readl_relaxed(per_cpu_int_base +
|
||||
ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS)
|
||||
& PCI_MSI_DOORBELL_MASK;
|
||||
|
||||
writel(~msimask, per_cpu_int_base +
|
||||
ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS);
|
||||
|
||||
for (msinr = PCI_MSI_DOORBELL_START;
|
||||
msinr < PCI_MSI_DOORBELL_END; msinr++) {
|
||||
int irq;
|
||||
|
||||
if (!(msimask & BIT(msinr)))
|
||||
continue;
|
||||
|
||||
if (is_chained) {
|
||||
irq = irq_find_mapping(armada_370_xp_msi_domain,
|
||||
msinr - 16);
|
||||
generic_handle_irq(irq);
|
||||
} else {
|
||||
irq = msinr - 16;
|
||||
handle_domain_irq(armada_370_xp_msi_domain,
|
||||
irq, regs);
|
||||
}
|
||||
}
|
||||
}
|
||||
#else
|
||||
static void armada_370_xp_handle_msi_irq(struct pt_regs *r, bool b) {}
|
||||
#endif
|
||||
|
||||
static void armada_370_xp_mpic_handle_cascade_irq(unsigned int irq,
|
||||
struct irq_desc *desc)
|
||||
{
|
||||
struct irq_chip *chip = irq_get_chip(irq);
|
||||
unsigned long irqmap, irqn, irqsrc, cpuid;
|
||||
unsigned int cascade_irq;
|
||||
|
||||
chained_irq_enter(chip, desc);
|
||||
|
||||
irqmap = readl_relaxed(per_cpu_int_base + ARMADA_375_PPI_CAUSE);
|
||||
cpuid = cpu_logical_map(smp_processor_id());
|
||||
|
||||
for_each_set_bit(irqn, &irqmap, BITS_PER_LONG) {
|
||||
irqsrc = readl_relaxed(main_int_base +
|
||||
ARMADA_370_XP_INT_SOURCE_CTL(irqn));
|
||||
|
||||
/* Check if the interrupt is not masked on current CPU.
|
||||
* Test IRQ (0-1) and FIQ (8-9) mask bits.
|
||||
*/
|
||||
if (!(irqsrc & ARMADA_370_XP_INT_IRQ_FIQ_MASK(cpuid)))
|
||||
continue;
|
||||
|
||||
if (irqn == 1) {
|
||||
armada_370_xp_handle_msi_irq(NULL, true);
|
||||
continue;
|
||||
}
|
||||
|
||||
cascade_irq = irq_find_mapping(armada_370_xp_mpic_domain, irqn);
|
||||
generic_handle_irq(cascade_irq);
|
||||
}
|
||||
|
||||
chained_irq_exit(chip, desc);
|
||||
}
|
||||
|
||||
static void __exception_irq_entry
|
||||
armada_370_xp_handle_irq(struct pt_regs *regs)
|
||||
{
|
||||
u32 irqstat, irqnr;
|
||||
|
||||
do {
|
||||
irqstat = readl_relaxed(per_cpu_int_base +
|
||||
ARMADA_370_XP_CPU_INTACK_OFFS);
|
||||
irqnr = irqstat & 0x3FF;
|
||||
|
||||
if (irqnr > 1022)
|
||||
break;
|
||||
|
||||
if (irqnr > 1) {
|
||||
handle_domain_irq(armada_370_xp_mpic_domain,
|
||||
irqnr, regs);
|
||||
continue;
|
||||
}
|
||||
|
||||
/* MSI handling */
|
||||
if (irqnr == 1)
|
||||
armada_370_xp_handle_msi_irq(regs, false);
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
/* IPI Handling */
|
||||
if (irqnr == 0) {
|
||||
u32 ipimask, ipinr;
|
||||
|
||||
ipimask = readl_relaxed(per_cpu_int_base +
|
||||
ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS)
|
||||
& IPI_DOORBELL_MASK;
|
||||
|
||||
writel(~ipimask, per_cpu_int_base +
|
||||
ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS);
|
||||
|
||||
/* Handle all pending doorbells */
|
||||
for (ipinr = IPI_DOORBELL_START;
|
||||
ipinr < IPI_DOORBELL_END; ipinr++) {
|
||||
if (ipimask & (0x1 << ipinr))
|
||||
handle_IPI(ipinr, regs);
|
||||
}
|
||||
continue;
|
||||
}
|
||||
#endif
|
||||
|
||||
} while (1);
|
||||
}
|
||||
|
||||
static int __init armada_370_xp_mpic_of_init(struct device_node *node,
|
||||
struct device_node *parent)
|
||||
{
|
||||
struct resource main_int_res, per_cpu_int_res;
|
||||
int nr_irqs, i;
|
||||
u32 control;
|
||||
|
||||
BUG_ON(of_address_to_resource(node, 0, &main_int_res));
|
||||
BUG_ON(of_address_to_resource(node, 1, &per_cpu_int_res));
|
||||
|
||||
BUG_ON(!request_mem_region(main_int_res.start,
|
||||
resource_size(&main_int_res),
|
||||
node->full_name));
|
||||
BUG_ON(!request_mem_region(per_cpu_int_res.start,
|
||||
resource_size(&per_cpu_int_res),
|
||||
node->full_name));
|
||||
|
||||
main_int_base = ioremap(main_int_res.start,
|
||||
resource_size(&main_int_res));
|
||||
BUG_ON(!main_int_base);
|
||||
|
||||
per_cpu_int_base = ioremap(per_cpu_int_res.start,
|
||||
resource_size(&per_cpu_int_res));
|
||||
BUG_ON(!per_cpu_int_base);
|
||||
|
||||
control = readl(main_int_base + ARMADA_370_XP_INT_CONTROL);
|
||||
nr_irqs = (control >> 2) & 0x3ff;
|
||||
|
||||
for (i = 0; i < nr_irqs; i++)
|
||||
writel(i, main_int_base + ARMADA_370_XP_INT_CLEAR_ENABLE_OFFS);
|
||||
|
||||
armada_370_xp_mpic_domain =
|
||||
irq_domain_add_linear(node, nr_irqs,
|
||||
&armada_370_xp_mpic_irq_ops, NULL);
|
||||
|
||||
BUG_ON(!armada_370_xp_mpic_domain);
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
armada_xp_mpic_smp_cpu_init();
|
||||
#endif
|
||||
|
||||
armada_370_xp_msi_init(node, main_int_res.start);
|
||||
|
||||
parent_irq = irq_of_parse_and_map(node, 0);
|
||||
if (parent_irq <= 0) {
|
||||
irq_set_default_host(armada_370_xp_mpic_domain);
|
||||
set_handle_irq(armada_370_xp_handle_irq);
|
||||
#ifdef CONFIG_SMP
|
||||
set_smp_cross_call(armada_mpic_send_doorbell);
|
||||
register_cpu_notifier(&armada_370_xp_mpic_cpu_notifier);
|
||||
#endif
|
||||
} else {
|
||||
#ifdef CONFIG_SMP
|
||||
register_cpu_notifier(&mpic_cascaded_cpu_notifier);
|
||||
#endif
|
||||
irq_set_chained_handler(parent_irq,
|
||||
armada_370_xp_mpic_handle_cascade_irq);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
IRQCHIP_DECLARE(armada_370_xp_mpic, "marvell,mpic", armada_370_xp_mpic_of_init);
|
254
drivers/irqchip/irq-atmel-aic-common.c
Normal file
254
drivers/irqchip/irq-atmel-aic-common.c
Normal file
|
@ -0,0 +1,254 @@
|
|||
/*
|
||||
* Atmel AT91 common AIC (Advanced Interrupt Controller) code shared by
|
||||
* irq-atmel-aic and irq-atmel-aic5 drivers
|
||||
*
|
||||
* Copyright (C) 2004 SAN People
|
||||
* Copyright (C) 2004 ATMEL
|
||||
* Copyright (C) Rick Bronson
|
||||
* Copyright (C) 2014 Free Electrons
|
||||
*
|
||||
* Author: Boris BREZILLON <boris.brezillon@free-electrons.com>
|
||||
*
|
||||
* This file is licensed under the terms of the GNU General Public
|
||||
* License version 2. This program is licensed "as is" without any
|
||||
* warranty of any kind, whether express or implied.
|
||||
*/
|
||||
|
||||
#include <linux/errno.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/irq.h>
|
||||
#include <linux/irqdomain.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/of_address.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
#include "irq-atmel-aic-common.h"
|
||||
|
||||
#define AT91_AIC_PRIOR GENMASK(2, 0)
|
||||
#define AT91_AIC_IRQ_MIN_PRIORITY 0
|
||||
#define AT91_AIC_IRQ_MAX_PRIORITY 7
|
||||
|
||||
#define AT91_AIC_SRCTYPE GENMASK(6, 5)
|
||||
#define AT91_AIC_SRCTYPE_LOW (0 << 5)
|
||||
#define AT91_AIC_SRCTYPE_FALLING (1 << 5)
|
||||
#define AT91_AIC_SRCTYPE_HIGH (2 << 5)
|
||||
#define AT91_AIC_SRCTYPE_RISING (3 << 5)
|
||||
|
||||
struct aic_chip_data {
|
||||
u32 ext_irqs;
|
||||
};
|
||||
|
||||
static void aic_common_shutdown(struct irq_data *d)
|
||||
{
|
||||
struct irq_chip_type *ct = irq_data_get_chip_type(d);
|
||||
|
||||
ct->chip.irq_mask(d);
|
||||
}
|
||||
|
||||
int aic_common_set_type(struct irq_data *d, unsigned type, unsigned *val)
|
||||
{
|
||||
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
|
||||
struct aic_chip_data *aic = gc->private;
|
||||
unsigned aic_type;
|
||||
|
||||
switch (type) {
|
||||
case IRQ_TYPE_LEVEL_HIGH:
|
||||
aic_type = AT91_AIC_SRCTYPE_HIGH;
|
||||
break;
|
||||
case IRQ_TYPE_EDGE_RISING:
|
||||
aic_type = AT91_AIC_SRCTYPE_RISING;
|
||||
break;
|
||||
case IRQ_TYPE_LEVEL_LOW:
|
||||
if (!(d->mask & aic->ext_irqs))
|
||||
return -EINVAL;
|
||||
|
||||
aic_type = AT91_AIC_SRCTYPE_LOW;
|
||||
break;
|
||||
case IRQ_TYPE_EDGE_FALLING:
|
||||
if (!(d->mask & aic->ext_irqs))
|
||||
return -EINVAL;
|
||||
|
||||
aic_type = AT91_AIC_SRCTYPE_FALLING;
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
*val &= ~AT91_AIC_SRCTYPE;
|
||||
*val |= aic_type;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int aic_common_set_priority(int priority, unsigned *val)
|
||||
{
|
||||
if (priority < AT91_AIC_IRQ_MIN_PRIORITY ||
|
||||
priority > AT91_AIC_IRQ_MAX_PRIORITY)
|
||||
return -EINVAL;
|
||||
|
||||
*val &= AT91_AIC_PRIOR;
|
||||
*val |= priority;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int aic_common_irq_domain_xlate(struct irq_domain *d,
|
||||
struct device_node *ctrlr,
|
||||
const u32 *intspec,
|
||||
unsigned int intsize,
|
||||
irq_hw_number_t *out_hwirq,
|
||||
unsigned int *out_type)
|
||||
{
|
||||
if (WARN_ON(intsize < 3))
|
||||
return -EINVAL;
|
||||
|
||||
if (WARN_ON((intspec[2] < AT91_AIC_IRQ_MIN_PRIORITY) ||
|
||||
(intspec[2] > AT91_AIC_IRQ_MAX_PRIORITY)))
|
||||
return -EINVAL;
|
||||
|
||||
*out_hwirq = intspec[0];
|
||||
*out_type = intspec[1] & IRQ_TYPE_SENSE_MASK;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void __init aic_common_ext_irq_of_init(struct irq_domain *domain)
|
||||
{
|
||||
struct device_node *node = domain->of_node;
|
||||
struct irq_chip_generic *gc;
|
||||
struct aic_chip_data *aic;
|
||||
struct property *prop;
|
||||
const __be32 *p;
|
||||
u32 hwirq;
|
||||
|
||||
gc = irq_get_domain_generic_chip(domain, 0);
|
||||
|
||||
aic = gc->private;
|
||||
aic->ext_irqs |= 1;
|
||||
|
||||
of_property_for_each_u32(node, "atmel,external-irqs", prop, p, hwirq) {
|
||||
gc = irq_get_domain_generic_chip(domain, hwirq);
|
||||
if (!gc) {
|
||||
pr_warn("AIC: external irq %d >= %d skip it\n",
|
||||
hwirq, domain->revmap_size);
|
||||
continue;
|
||||
}
|
||||
|
||||
aic = gc->private;
|
||||
aic->ext_irqs |= (1 << (hwirq % 32));
|
||||
}
|
||||
}
|
||||
|
||||
#define AT91_RTC_IDR 0x24
|
||||
#define AT91_RTC_IMR 0x28
|
||||
#define AT91_RTC_IRQ_MASK 0x1f
|
||||
|
||||
void __init aic_common_rtc_irq_fixup(struct device_node *root)
|
||||
{
|
||||
struct device_node *np;
|
||||
void __iomem *regs;
|
||||
|
||||
np = of_find_compatible_node(root, NULL, "atmel,at91rm9200-rtc");
|
||||
if (!np)
|
||||
np = of_find_compatible_node(root, NULL,
|
||||
"atmel,at91sam9x5-rtc");
|
||||
|
||||
if (!np)
|
||||
return;
|
||||
|
||||
regs = of_iomap(np, 0);
|
||||
of_node_put(np);
|
||||
|
||||
if (!regs)
|
||||
return;
|
||||
|
||||
writel(AT91_RTC_IRQ_MASK, regs + AT91_RTC_IDR);
|
||||
|
||||
iounmap(regs);
|
||||
}
|
||||
|
||||
void __init aic_common_irq_fixup(const struct of_device_id *matches)
|
||||
{
|
||||
struct device_node *root = of_find_node_by_path("/");
|
||||
const struct of_device_id *match;
|
||||
|
||||
if (!root)
|
||||
return;
|
||||
|
||||
match = of_match_node(matches, root);
|
||||
of_node_put(root);
|
||||
|
||||
if (match) {
|
||||
void (*fixup)(struct device_node *) = match->data;
|
||||
fixup(root);
|
||||
}
|
||||
|
||||
of_node_put(root);
|
||||
}
|
||||
|
||||
struct irq_domain *__init aic_common_of_init(struct device_node *node,
|
||||
const struct irq_domain_ops *ops,
|
||||
const char *name, int nirqs)
|
||||
{
|
||||
struct irq_chip_generic *gc;
|
||||
struct irq_domain *domain;
|
||||
struct aic_chip_data *aic;
|
||||
void __iomem *reg_base;
|
||||
int nchips;
|
||||
int ret;
|
||||
int i;
|
||||
|
||||
nchips = DIV_ROUND_UP(nirqs, 32);
|
||||
|
||||
reg_base = of_iomap(node, 0);
|
||||
if (!reg_base)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
aic = kcalloc(nchips, sizeof(*aic), GFP_KERNEL);
|
||||
if (!aic) {
|
||||
ret = -ENOMEM;
|
||||
goto err_iounmap;
|
||||
}
|
||||
|
||||
domain = irq_domain_add_linear(node, nchips * 32, ops, aic);
|
||||
if (!domain) {
|
||||
ret = -ENOMEM;
|
||||
goto err_free_aic;
|
||||
}
|
||||
|
||||
ret = irq_alloc_domain_generic_chips(domain, 32, 1, name,
|
||||
handle_fasteoi_irq,
|
||||
IRQ_NOREQUEST | IRQ_NOPROBE |
|
||||
IRQ_NOAUTOEN, 0, 0);
|
||||
if (ret)
|
||||
goto err_domain_remove;
|
||||
|
||||
for (i = 0; i < nchips; i++) {
|
||||
gc = irq_get_domain_generic_chip(domain, i * 32);
|
||||
|
||||
gc->reg_base = reg_base;
|
||||
|
||||
gc->unused = 0;
|
||||
gc->wake_enabled = ~0;
|
||||
gc->chip_types[0].type = IRQ_TYPE_SENSE_MASK;
|
||||
gc->chip_types[0].chip.irq_eoi = irq_gc_eoi;
|
||||
gc->chip_types[0].chip.irq_set_wake = irq_gc_set_wake;
|
||||
gc->chip_types[0].chip.irq_shutdown = aic_common_shutdown;
|
||||
gc->private = &aic[i];
|
||||
}
|
||||
|
||||
aic_common_ext_irq_of_init(domain);
|
||||
|
||||
return domain;
|
||||
|
||||
err_domain_remove:
|
||||
irq_domain_remove(domain);
|
||||
|
||||
err_free_aic:
|
||||
kfree(aic);
|
||||
|
||||
err_iounmap:
|
||||
iounmap(reg_base);
|
||||
|
||||
return ERR_PTR(ret);
|
||||
}
|
39
drivers/irqchip/irq-atmel-aic-common.h
Normal file
39
drivers/irqchip/irq-atmel-aic-common.h
Normal file
|
@ -0,0 +1,39 @@
|
|||
/*
|
||||
* Atmel AT91 common AIC (Advanced Interrupt Controller) header file
|
||||
*
|
||||
* Copyright (C) 2004 SAN People
|
||||
* Copyright (C) 2004 ATMEL
|
||||
* Copyright (C) Rick Bronson
|
||||
* Copyright (C) 2014 Free Electrons
|
||||
*
|
||||
* Author: Boris BREZILLON <boris.brezillon@free-electrons.com>
|
||||
*
|
||||
* This file is licensed under the terms of the GNU General Public
|
||||
* License version 2. This program is licensed "as is" without any
|
||||
* warranty of any kind, whether express or implied.
|
||||
*/
|
||||
|
||||
#ifndef __IRQ_ATMEL_AIC_COMMON_H
|
||||
#define __IRQ_ATMEL_AIC_COMMON_H
|
||||
|
||||
|
||||
int aic_common_set_type(struct irq_data *d, unsigned type, unsigned *val);
|
||||
|
||||
int aic_common_set_priority(int priority, unsigned *val);
|
||||
|
||||
int aic_common_irq_domain_xlate(struct irq_domain *d,
|
||||
struct device_node *ctrlr,
|
||||
const u32 *intspec,
|
||||
unsigned int intsize,
|
||||
irq_hw_number_t *out_hwirq,
|
||||
unsigned int *out_type);
|
||||
|
||||
struct irq_domain *__init aic_common_of_init(struct device_node *node,
|
||||
const struct irq_domain_ops *ops,
|
||||
const char *name, int nirqs);
|
||||
|
||||
void __init aic_common_rtc_irq_fixup(struct device_node *root);
|
||||
|
||||
void __init aic_common_irq_fixup(const struct of_device_id *matches);
|
||||
|
||||
#endif /* __IRQ_ATMEL_AIC_COMMON_H */
|
260
drivers/irqchip/irq-atmel-aic.c
Normal file
260
drivers/irqchip/irq-atmel-aic.c
Normal file
|
@ -0,0 +1,260 @@
|
|||
/*
|
||||
* Atmel AT91 AIC (Advanced Interrupt Controller) driver
|
||||
*
|
||||
* Copyright (C) 2004 SAN People
|
||||
* Copyright (C) 2004 ATMEL
|
||||
* Copyright (C) Rick Bronson
|
||||
* Copyright (C) 2014 Free Electrons
|
||||
*
|
||||
* Author: Boris BREZILLON <boris.brezillon@free-electrons.com>
|
||||
*
|
||||
* This file is licensed under the terms of the GNU General Public
|
||||
* License version 2. This program is licensed "as is" without any
|
||||
* warranty of any kind, whether express or implied.
|
||||
*/
|
||||
|
||||
#include <linux/init.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/bitmap.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/irq.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/of_address.h>
|
||||
#include <linux/of_irq.h>
|
||||
#include <linux/irqdomain.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/io.h>
|
||||
|
||||
#include <asm/exception.h>
|
||||
#include <asm/mach/irq.h>
|
||||
|
||||
#include "irq-atmel-aic-common.h"
|
||||
#include "irqchip.h"
|
||||
|
||||
/* Number of irq lines managed by AIC */
|
||||
#define NR_AIC_IRQS 32
|
||||
|
||||
#define AT91_AIC_SMR(n) ((n) * 4)
|
||||
|
||||
#define AT91_AIC_SVR(n) (0x80 + ((n) * 4))
|
||||
#define AT91_AIC_IVR 0x100
|
||||
#define AT91_AIC_FVR 0x104
|
||||
#define AT91_AIC_ISR 0x108
|
||||
|
||||
#define AT91_AIC_IPR 0x10c
|
||||
#define AT91_AIC_IMR 0x110
|
||||
#define AT91_AIC_CISR 0x114
|
||||
|
||||
#define AT91_AIC_IECR 0x120
|
||||
#define AT91_AIC_IDCR 0x124
|
||||
#define AT91_AIC_ICCR 0x128
|
||||
#define AT91_AIC_ISCR 0x12c
|
||||
#define AT91_AIC_EOICR 0x130
|
||||
#define AT91_AIC_SPU 0x134
|
||||
#define AT91_AIC_DCR 0x138
|
||||
|
||||
static struct irq_domain *aic_domain;
|
||||
|
||||
static asmlinkage void __exception_irq_entry
|
||||
aic_handle(struct pt_regs *regs)
|
||||
{
|
||||
struct irq_domain_chip_generic *dgc = aic_domain->gc;
|
||||
struct irq_chip_generic *gc = dgc->gc[0];
|
||||
u32 irqnr;
|
||||
u32 irqstat;
|
||||
|
||||
irqnr = irq_reg_readl(gc->reg_base + AT91_AIC_IVR);
|
||||
irqstat = irq_reg_readl(gc->reg_base + AT91_AIC_ISR);
|
||||
|
||||
if (!irqstat)
|
||||
irq_reg_writel(0, gc->reg_base + AT91_AIC_EOICR);
|
||||
else
|
||||
handle_domain_irq(aic_domain, irqnr, regs);
|
||||
}
|
||||
|
||||
static int aic_retrigger(struct irq_data *d)
|
||||
{
|
||||
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
|
||||
|
||||
/* Enable interrupt on AIC5 */
|
||||
irq_gc_lock(gc);
|
||||
irq_reg_writel(d->mask, gc->reg_base + AT91_AIC_ISCR);
|
||||
irq_gc_unlock(gc);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int aic_set_type(struct irq_data *d, unsigned type)
|
||||
{
|
||||
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
|
||||
unsigned int smr;
|
||||
int ret;
|
||||
|
||||
smr = irq_reg_readl(gc->reg_base + AT91_AIC_SMR(d->hwirq));
|
||||
ret = aic_common_set_type(d, type, &smr);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
irq_reg_writel(smr, gc->reg_base + AT91_AIC_SMR(d->hwirq));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PM
|
||||
static void aic_suspend(struct irq_data *d)
|
||||
{
|
||||
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
|
||||
|
||||
irq_gc_lock(gc);
|
||||
irq_reg_writel(gc->mask_cache, gc->reg_base + AT91_AIC_IDCR);
|
||||
irq_reg_writel(gc->wake_active, gc->reg_base + AT91_AIC_IECR);
|
||||
irq_gc_unlock(gc);
|
||||
}
|
||||
|
||||
static void aic_resume(struct irq_data *d)
|
||||
{
|
||||
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
|
||||
|
||||
irq_gc_lock(gc);
|
||||
irq_reg_writel(gc->wake_active, gc->reg_base + AT91_AIC_IDCR);
|
||||
irq_reg_writel(gc->mask_cache, gc->reg_base + AT91_AIC_IECR);
|
||||
irq_gc_unlock(gc);
|
||||
}
|
||||
|
||||
static void aic_pm_shutdown(struct irq_data *d)
|
||||
{
|
||||
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
|
||||
|
||||
irq_gc_lock(gc);
|
||||
irq_reg_writel(0xffffffff, gc->reg_base + AT91_AIC_IDCR);
|
||||
irq_reg_writel(0xffffffff, gc->reg_base + AT91_AIC_ICCR);
|
||||
irq_gc_unlock(gc);
|
||||
}
|
||||
#else
|
||||
#define aic_suspend NULL
|
||||
#define aic_resume NULL
|
||||
#define aic_pm_shutdown NULL
|
||||
#endif /* CONFIG_PM */
|
||||
|
||||
static void __init aic_hw_init(struct irq_domain *domain)
|
||||
{
|
||||
struct irq_chip_generic *gc = irq_get_domain_generic_chip(domain, 0);
|
||||
int i;
|
||||
|
||||
/*
|
||||
* Perform 8 End Of Interrupt Command to make sure AIC
|
||||
* will not Lock out nIRQ
|
||||
*/
|
||||
for (i = 0; i < 8; i++)
|
||||
irq_reg_writel(0, gc->reg_base + AT91_AIC_EOICR);
|
||||
|
||||
/*
|
||||
* Spurious Interrupt ID in Spurious Vector Register.
|
||||
* When there is no current interrupt, the IRQ Vector Register
|
||||
* reads the value stored in AIC_SPU
|
||||
*/
|
||||
irq_reg_writel(0xffffffff, gc->reg_base + AT91_AIC_SPU);
|
||||
|
||||
/* No debugging in AIC: Debug (Protect) Control Register */
|
||||
irq_reg_writel(0, gc->reg_base + AT91_AIC_DCR);
|
||||
|
||||
/* Disable and clear all interrupts initially */
|
||||
irq_reg_writel(0xffffffff, gc->reg_base + AT91_AIC_IDCR);
|
||||
irq_reg_writel(0xffffffff, gc->reg_base + AT91_AIC_ICCR);
|
||||
|
||||
for (i = 0; i < 32; i++)
|
||||
irq_reg_writel(i, gc->reg_base + AT91_AIC_SVR(i));
|
||||
}
|
||||
|
||||
static int aic_irq_domain_xlate(struct irq_domain *d,
|
||||
struct device_node *ctrlr,
|
||||
const u32 *intspec, unsigned int intsize,
|
||||
irq_hw_number_t *out_hwirq,
|
||||
unsigned int *out_type)
|
||||
{
|
||||
struct irq_domain_chip_generic *dgc = d->gc;
|
||||
struct irq_chip_generic *gc;
|
||||
unsigned smr;
|
||||
int idx;
|
||||
int ret;
|
||||
|
||||
if (!dgc)
|
||||
return -EINVAL;
|
||||
|
||||
ret = aic_common_irq_domain_xlate(d, ctrlr, intspec, intsize,
|
||||
out_hwirq, out_type);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
idx = intspec[0] / dgc->irqs_per_chip;
|
||||
if (idx >= dgc->num_chips)
|
||||
return -EINVAL;
|
||||
|
||||
gc = dgc->gc[idx];
|
||||
|
||||
irq_gc_lock(gc);
|
||||
smr = irq_reg_readl(gc->reg_base + AT91_AIC_SMR(*out_hwirq));
|
||||
ret = aic_common_set_priority(intspec[2], &smr);
|
||||
if (!ret)
|
||||
irq_reg_writel(smr, gc->reg_base + AT91_AIC_SMR(*out_hwirq));
|
||||
irq_gc_unlock(gc);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static const struct irq_domain_ops aic_irq_ops = {
|
||||
.map = irq_map_generic_chip,
|
||||
.xlate = aic_irq_domain_xlate,
|
||||
};
|
||||
|
||||
static void __init at91sam9_aic_irq_fixup(struct device_node *root)
|
||||
{
|
||||
aic_common_rtc_irq_fixup(root);
|
||||
}
|
||||
|
||||
static const struct of_device_id __initdata aic_irq_fixups[] = {
|
||||
{ .compatible = "atmel,at91sam9g45", .data = at91sam9_aic_irq_fixup },
|
||||
{ .compatible = "atmel,at91sam9n12", .data = at91sam9_aic_irq_fixup },
|
||||
{ .compatible = "atmel,at91sam9rl", .data = at91sam9_aic_irq_fixup },
|
||||
{ .compatible = "atmel,at91sam9x5", .data = at91sam9_aic_irq_fixup },
|
||||
{ /* sentinel */ },
|
||||
};
|
||||
|
||||
static int __init aic_of_init(struct device_node *node,
|
||||
struct device_node *parent)
|
||||
{
|
||||
struct irq_chip_generic *gc;
|
||||
struct irq_domain *domain;
|
||||
|
||||
if (aic_domain)
|
||||
return -EEXIST;
|
||||
|
||||
domain = aic_common_of_init(node, &aic_irq_ops, "atmel-aic",
|
||||
NR_AIC_IRQS);
|
||||
if (IS_ERR(domain))
|
||||
return PTR_ERR(domain);
|
||||
|
||||
aic_common_irq_fixup(aic_irq_fixups);
|
||||
|
||||
aic_domain = domain;
|
||||
gc = irq_get_domain_generic_chip(domain, 0);
|
||||
|
||||
gc->chip_types[0].regs.eoi = AT91_AIC_EOICR;
|
||||
gc->chip_types[0].regs.enable = AT91_AIC_IECR;
|
||||
gc->chip_types[0].regs.disable = AT91_AIC_IDCR;
|
||||
gc->chip_types[0].chip.irq_mask = irq_gc_mask_disable_reg;
|
||||
gc->chip_types[0].chip.irq_unmask = irq_gc_unmask_enable_reg;
|
||||
gc->chip_types[0].chip.irq_retrigger = aic_retrigger;
|
||||
gc->chip_types[0].chip.irq_set_type = aic_set_type;
|
||||
gc->chip_types[0].chip.irq_suspend = aic_suspend;
|
||||
gc->chip_types[0].chip.irq_resume = aic_resume;
|
||||
gc->chip_types[0].chip.irq_pm_shutdown = aic_pm_shutdown;
|
||||
|
||||
aic_hw_init(domain);
|
||||
set_handle_irq(aic_handle);
|
||||
|
||||
return 0;
|
||||
}
|
||||
IRQCHIP_DECLARE(at91rm9200_aic, "atmel,at91rm9200-aic", aic_of_init);
|
361
drivers/irqchip/irq-atmel-aic5.c
Normal file
361
drivers/irqchip/irq-atmel-aic5.c
Normal file
|
@ -0,0 +1,361 @@
|
|||
/*
|
||||
* Atmel AT91 AIC5 (Advanced Interrupt Controller) driver
|
||||
*
|
||||
* Copyright (C) 2004 SAN People
|
||||
* Copyright (C) 2004 ATMEL
|
||||
* Copyright (C) Rick Bronson
|
||||
* Copyright (C) 2014 Free Electrons
|
||||
*
|
||||
* Author: Boris BREZILLON <boris.brezillon@free-electrons.com>
|
||||
*
|
||||
* This file is licensed under the terms of the GNU General Public
|
||||
* License version 2. This program is licensed "as is" without any
|
||||
* warranty of any kind, whether express or implied.
|
||||
*/
|
||||
|
||||
#include <linux/init.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/bitmap.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/irq.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/of_address.h>
|
||||
#include <linux/of_irq.h>
|
||||
#include <linux/irqdomain.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/io.h>
|
||||
|
||||
#include <asm/exception.h>
|
||||
#include <asm/mach/irq.h>
|
||||
|
||||
#include "irq-atmel-aic-common.h"
|
||||
#include "irqchip.h"
|
||||
|
||||
/* Number of irq lines managed by AIC */
|
||||
#define NR_AIC5_IRQS 128
|
||||
|
||||
#define AT91_AIC5_SSR 0x0
|
||||
#define AT91_AIC5_INTSEL_MSK (0x7f << 0)
|
||||
|
||||
#define AT91_AIC5_SMR 0x4
|
||||
|
||||
#define AT91_AIC5_SVR 0x8
|
||||
#define AT91_AIC5_IVR 0x10
|
||||
#define AT91_AIC5_FVR 0x14
|
||||
#define AT91_AIC5_ISR 0x18
|
||||
|
||||
#define AT91_AIC5_IPR0 0x20
|
||||
#define AT91_AIC5_IPR1 0x24
|
||||
#define AT91_AIC5_IPR2 0x28
|
||||
#define AT91_AIC5_IPR3 0x2c
|
||||
#define AT91_AIC5_IMR 0x30
|
||||
#define AT91_AIC5_CISR 0x34
|
||||
|
||||
#define AT91_AIC5_IECR 0x40
|
||||
#define AT91_AIC5_IDCR 0x44
|
||||
#define AT91_AIC5_ICCR 0x48
|
||||
#define AT91_AIC5_ISCR 0x4c
|
||||
#define AT91_AIC5_EOICR 0x38
|
||||
#define AT91_AIC5_SPU 0x3c
|
||||
#define AT91_AIC5_DCR 0x6c
|
||||
|
||||
#define AT91_AIC5_FFER 0x50
|
||||
#define AT91_AIC5_FFDR 0x54
|
||||
#define AT91_AIC5_FFSR 0x58
|
||||
|
||||
static struct irq_domain *aic5_domain;
|
||||
|
||||
static asmlinkage void __exception_irq_entry
|
||||
aic5_handle(struct pt_regs *regs)
|
||||
{
|
||||
struct irq_domain_chip_generic *dgc = aic5_domain->gc;
|
||||
struct irq_chip_generic *gc = dgc->gc[0];
|
||||
u32 irqnr;
|
||||
u32 irqstat;
|
||||
|
||||
irqnr = irq_reg_readl(gc->reg_base + AT91_AIC5_IVR);
|
||||
irqstat = irq_reg_readl(gc->reg_base + AT91_AIC5_ISR);
|
||||
|
||||
if (!irqstat)
|
||||
irq_reg_writel(0, gc->reg_base + AT91_AIC5_EOICR);
|
||||
else
|
||||
handle_domain_irq(aic5_domain, irqnr, regs);
|
||||
}
|
||||
|
||||
static void aic5_mask(struct irq_data *d)
|
||||
{
|
||||
struct irq_domain *domain = d->domain;
|
||||
struct irq_domain_chip_generic *dgc = domain->gc;
|
||||
struct irq_chip_generic *gc = dgc->gc[0];
|
||||
|
||||
/* Disable interrupt on AIC5 */
|
||||
irq_gc_lock(gc);
|
||||
irq_reg_writel(d->hwirq, gc->reg_base + AT91_AIC5_SSR);
|
||||
irq_reg_writel(1, gc->reg_base + AT91_AIC5_IDCR);
|
||||
gc->mask_cache &= ~d->mask;
|
||||
irq_gc_unlock(gc);
|
||||
}
|
||||
|
||||
static void aic5_unmask(struct irq_data *d)
|
||||
{
|
||||
struct irq_domain *domain = d->domain;
|
||||
struct irq_domain_chip_generic *dgc = domain->gc;
|
||||
struct irq_chip_generic *gc = dgc->gc[0];
|
||||
|
||||
/* Enable interrupt on AIC5 */
|
||||
irq_gc_lock(gc);
|
||||
irq_reg_writel(d->hwirq, gc->reg_base + AT91_AIC5_SSR);
|
||||
irq_reg_writel(1, gc->reg_base + AT91_AIC5_IECR);
|
||||
gc->mask_cache |= d->mask;
|
||||
irq_gc_unlock(gc);
|
||||
}
|
||||
|
||||
static int aic5_retrigger(struct irq_data *d)
|
||||
{
|
||||
struct irq_domain *domain = d->domain;
|
||||
struct irq_domain_chip_generic *dgc = domain->gc;
|
||||
struct irq_chip_generic *gc = dgc->gc[0];
|
||||
|
||||
/* Enable interrupt on AIC5 */
|
||||
irq_gc_lock(gc);
|
||||
irq_reg_writel(d->hwirq, gc->reg_base + AT91_AIC5_SSR);
|
||||
irq_reg_writel(1, gc->reg_base + AT91_AIC5_ISCR);
|
||||
irq_gc_unlock(gc);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int aic5_set_type(struct irq_data *d, unsigned type)
|
||||
{
|
||||
struct irq_domain *domain = d->domain;
|
||||
struct irq_domain_chip_generic *dgc = domain->gc;
|
||||
struct irq_chip_generic *gc = dgc->gc[0];
|
||||
unsigned int smr;
|
||||
int ret;
|
||||
|
||||
irq_gc_lock(gc);
|
||||
irq_reg_writel(d->hwirq, gc->reg_base + AT91_AIC5_SSR);
|
||||
smr = irq_reg_readl(gc->reg_base + AT91_AIC5_SMR);
|
||||
ret = aic_common_set_type(d, type, &smr);
|
||||
if (!ret)
|
||||
irq_reg_writel(smr, gc->reg_base + AT91_AIC5_SMR);
|
||||
irq_gc_unlock(gc);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PM
|
||||
static void aic5_suspend(struct irq_data *d)
|
||||
{
|
||||
struct irq_domain *domain = d->domain;
|
||||
struct irq_domain_chip_generic *dgc = domain->gc;
|
||||
struct irq_chip_generic *bgc = dgc->gc[0];
|
||||
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
|
||||
int i;
|
||||
u32 mask;
|
||||
|
||||
irq_gc_lock(bgc);
|
||||
for (i = 0; i < dgc->irqs_per_chip; i++) {
|
||||
mask = 1 << i;
|
||||
if ((mask & gc->mask_cache) == (mask & gc->wake_active))
|
||||
continue;
|
||||
|
||||
irq_reg_writel(i + gc->irq_base,
|
||||
bgc->reg_base + AT91_AIC5_SSR);
|
||||
if (mask & gc->wake_active)
|
||||
irq_reg_writel(1, bgc->reg_base + AT91_AIC5_IECR);
|
||||
else
|
||||
irq_reg_writel(1, bgc->reg_base + AT91_AIC5_IDCR);
|
||||
}
|
||||
irq_gc_unlock(bgc);
|
||||
}
|
||||
|
||||
static void aic5_resume(struct irq_data *d)
|
||||
{
|
||||
struct irq_domain *domain = d->domain;
|
||||
struct irq_domain_chip_generic *dgc = domain->gc;
|
||||
struct irq_chip_generic *bgc = dgc->gc[0];
|
||||
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
|
||||
int i;
|
||||
u32 mask;
|
||||
|
||||
irq_gc_lock(bgc);
|
||||
for (i = 0; i < dgc->irqs_per_chip; i++) {
|
||||
mask = 1 << i;
|
||||
if ((mask & gc->mask_cache) == (mask & gc->wake_active))
|
||||
continue;
|
||||
|
||||
irq_reg_writel(i + gc->irq_base,
|
||||
bgc->reg_base + AT91_AIC5_SSR);
|
||||
if (mask & gc->mask_cache)
|
||||
irq_reg_writel(1, bgc->reg_base + AT91_AIC5_IECR);
|
||||
else
|
||||
irq_reg_writel(1, bgc->reg_base + AT91_AIC5_IDCR);
|
||||
}
|
||||
irq_gc_unlock(bgc);
|
||||
}
|
||||
|
||||
static void aic5_pm_shutdown(struct irq_data *d)
|
||||
{
|
||||
struct irq_domain *domain = d->domain;
|
||||
struct irq_domain_chip_generic *dgc = domain->gc;
|
||||
struct irq_chip_generic *bgc = dgc->gc[0];
|
||||
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
|
||||
int i;
|
||||
|
||||
irq_gc_lock(bgc);
|
||||
for (i = 0; i < dgc->irqs_per_chip; i++) {
|
||||
irq_reg_writel(i + gc->irq_base,
|
||||
bgc->reg_base + AT91_AIC5_SSR);
|
||||
irq_reg_writel(1, bgc->reg_base + AT91_AIC5_IDCR);
|
||||
irq_reg_writel(1, bgc->reg_base + AT91_AIC5_ICCR);
|
||||
}
|
||||
irq_gc_unlock(bgc);
|
||||
}
|
||||
#else
|
||||
#define aic5_suspend NULL
|
||||
#define aic5_resume NULL
|
||||
#define aic5_pm_shutdown NULL
|
||||
#endif /* CONFIG_PM */
|
||||
|
||||
static void __init aic5_hw_init(struct irq_domain *domain)
|
||||
{
|
||||
struct irq_chip_generic *gc = irq_get_domain_generic_chip(domain, 0);
|
||||
int i;
|
||||
|
||||
/*
|
||||
* Perform 8 End Of Interrupt Command to make sure AIC
|
||||
* will not Lock out nIRQ
|
||||
*/
|
||||
for (i = 0; i < 8; i++)
|
||||
irq_reg_writel(0, gc->reg_base + AT91_AIC5_EOICR);
|
||||
|
||||
/*
|
||||
* Spurious Interrupt ID in Spurious Vector Register.
|
||||
* When there is no current interrupt, the IRQ Vector Register
|
||||
* reads the value stored in AIC_SPU
|
||||
*/
|
||||
irq_reg_writel(0xffffffff, gc->reg_base + AT91_AIC5_SPU);
|
||||
|
||||
/* No debugging in AIC: Debug (Protect) Control Register */
|
||||
irq_reg_writel(0, gc->reg_base + AT91_AIC5_DCR);
|
||||
|
||||
/* Disable and clear all interrupts initially */
|
||||
for (i = 0; i < domain->revmap_size; i++) {
|
||||
irq_reg_writel(i, gc->reg_base + AT91_AIC5_SSR);
|
||||
irq_reg_writel(i, gc->reg_base + AT91_AIC5_SVR);
|
||||
irq_reg_writel(1, gc->reg_base + AT91_AIC5_IDCR);
|
||||
irq_reg_writel(1, gc->reg_base + AT91_AIC5_ICCR);
|
||||
}
|
||||
}
|
||||
|
||||
static int aic5_irq_domain_xlate(struct irq_domain *d,
|
||||
struct device_node *ctrlr,
|
||||
const u32 *intspec, unsigned int intsize,
|
||||
irq_hw_number_t *out_hwirq,
|
||||
unsigned int *out_type)
|
||||
{
|
||||
struct irq_domain_chip_generic *dgc = d->gc;
|
||||
struct irq_chip_generic *gc;
|
||||
unsigned smr;
|
||||
int ret;
|
||||
|
||||
if (!dgc)
|
||||
return -EINVAL;
|
||||
|
||||
ret = aic_common_irq_domain_xlate(d, ctrlr, intspec, intsize,
|
||||
out_hwirq, out_type);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
gc = dgc->gc[0];
|
||||
|
||||
irq_gc_lock(gc);
|
||||
irq_reg_writel(*out_hwirq, gc->reg_base + AT91_AIC5_SSR);
|
||||
smr = irq_reg_readl(gc->reg_base + AT91_AIC5_SMR);
|
||||
ret = aic_common_set_priority(intspec[2], &smr);
|
||||
if (!ret)
|
||||
irq_reg_writel(intspec[2] | smr, gc->reg_base + AT91_AIC5_SMR);
|
||||
irq_gc_unlock(gc);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static const struct irq_domain_ops aic5_irq_ops = {
|
||||
.map = irq_map_generic_chip,
|
||||
.xlate = aic5_irq_domain_xlate,
|
||||
};
|
||||
|
||||
static void __init sama5d3_aic_irq_fixup(struct device_node *root)
|
||||
{
|
||||
aic_common_rtc_irq_fixup(root);
|
||||
}
|
||||
|
||||
static const struct of_device_id __initdata aic5_irq_fixups[] = {
|
||||
{ .compatible = "atmel,sama5d3", .data = sama5d3_aic_irq_fixup },
|
||||
{ .compatible = "atmel,sama5d4", .data = sama5d3_aic_irq_fixup },
|
||||
{ /* sentinel */ },
|
||||
};
|
||||
|
||||
static int __init aic5_of_init(struct device_node *node,
|
||||
struct device_node *parent,
|
||||
int nirqs)
|
||||
{
|
||||
struct irq_chip_generic *gc;
|
||||
struct irq_domain *domain;
|
||||
int nchips;
|
||||
int i;
|
||||
|
||||
if (nirqs > NR_AIC5_IRQS)
|
||||
return -EINVAL;
|
||||
|
||||
if (aic5_domain)
|
||||
return -EEXIST;
|
||||
|
||||
domain = aic_common_of_init(node, &aic5_irq_ops, "atmel-aic5",
|
||||
nirqs);
|
||||
if (IS_ERR(domain))
|
||||
return PTR_ERR(domain);
|
||||
|
||||
aic_common_irq_fixup(aic5_irq_fixups);
|
||||
|
||||
aic5_domain = domain;
|
||||
nchips = aic5_domain->revmap_size / 32;
|
||||
for (i = 0; i < nchips; i++) {
|
||||
gc = irq_get_domain_generic_chip(domain, i * 32);
|
||||
|
||||
gc->chip_types[0].regs.eoi = AT91_AIC5_EOICR;
|
||||
gc->chip_types[0].chip.irq_mask = aic5_mask;
|
||||
gc->chip_types[0].chip.irq_unmask = aic5_unmask;
|
||||
gc->chip_types[0].chip.irq_retrigger = aic5_retrigger;
|
||||
gc->chip_types[0].chip.irq_set_type = aic5_set_type;
|
||||
gc->chip_types[0].chip.irq_suspend = aic5_suspend;
|
||||
gc->chip_types[0].chip.irq_resume = aic5_resume;
|
||||
gc->chip_types[0].chip.irq_pm_shutdown = aic5_pm_shutdown;
|
||||
}
|
||||
|
||||
aic5_hw_init(domain);
|
||||
set_handle_irq(aic5_handle);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
#define NR_SAMA5D3_IRQS 48
|
||||
|
||||
static int __init sama5d3_aic5_of_init(struct device_node *node,
|
||||
struct device_node *parent)
|
||||
{
|
||||
return aic5_of_init(node, parent, NR_SAMA5D3_IRQS);
|
||||
}
|
||||
IRQCHIP_DECLARE(sama5d3_aic5, "atmel,sama5d3-aic", sama5d3_aic5_of_init);
|
||||
|
||||
#define NR_SAMA5D4_IRQS 68
|
||||
|
||||
static int __init sama5d4_aic5_of_init(struct device_node *node,
|
||||
struct device_node *parent)
|
||||
{
|
||||
return aic5_of_init(node, parent, NR_SAMA5D4_IRQS);
|
||||
}
|
||||
IRQCHIP_DECLARE(sama5d4_aic5, "atmel,sama5d4-aic", sama5d4_aic5_of_init);
|
222
drivers/irqchip/irq-bcm2835.c
Normal file
222
drivers/irqchip/irq-bcm2835.c
Normal file
|
@ -0,0 +1,222 @@
|
|||
/*
|
||||
* Copyright 2010 Broadcom
|
||||
* Copyright 2012 Simon Arlott, Chris Boot, Stephen Warren
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; either version 2 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* Quirk 1: Shortcut interrupts don't set the bank 1/2 register pending bits
|
||||
*
|
||||
* If an interrupt fires on bank 1 that isn't in the shortcuts list, bit 8
|
||||
* on bank 0 is set to signify that an interrupt in bank 1 has fired, and
|
||||
* to look in the bank 1 status register for more information.
|
||||
*
|
||||
* If an interrupt fires on bank 1 that _is_ in the shortcuts list, its
|
||||
* shortcut bit in bank 0 is set as well as its interrupt bit in the bank 1
|
||||
* status register, but bank 0 bit 8 is _not_ set.
|
||||
*
|
||||
* Quirk 2: You can't mask the register 1/2 pending interrupts
|
||||
*
|
||||
* In a proper cascaded interrupt controller, the interrupt lines with
|
||||
* cascaded interrupt controllers on them are just normal interrupt lines.
|
||||
* You can mask the interrupts and get on with things. With this controller
|
||||
* you can't do that.
|
||||
*
|
||||
* Quirk 3: The shortcut interrupts can't be (un)masked in bank 0
|
||||
*
|
||||
* Those interrupts that have shortcuts can only be masked/unmasked in
|
||||
* their respective banks' enable/disable registers. Doing so in the bank 0
|
||||
* enable/disable registers has no effect.
|
||||
*
|
||||
* The FIQ control register:
|
||||
* Bits 0-6: IRQ (index in order of interrupts from banks 1, 2, then 0)
|
||||
* Bit 7: Enable FIQ generation
|
||||
* Bits 8+: Unused
|
||||
*
|
||||
* An interrupt must be disabled before configuring it for FIQ generation
|
||||
* otherwise both handlers will fire at the same time!
|
||||
*/
|
||||
|
||||
#include <linux/io.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/of_address.h>
|
||||
#include <linux/of_irq.h>
|
||||
#include <linux/irqdomain.h>
|
||||
|
||||
#include <asm/exception.h>
|
||||
#include <asm/mach/irq.h>
|
||||
|
||||
#include "irqchip.h"
|
||||
|
||||
/* Put the bank and irq (32 bits) into the hwirq */
|
||||
#define MAKE_HWIRQ(b, n) ((b << 5) | (n))
|
||||
#define HWIRQ_BANK(i) (i >> 5)
|
||||
#define HWIRQ_BIT(i) BIT(i & 0x1f)
|
||||
|
||||
#define NR_IRQS_BANK0 8
|
||||
#define BANK0_HWIRQ_MASK 0xff
|
||||
/* Shortcuts can't be disabled so any unknown new ones need to be masked */
|
||||
#define SHORTCUT1_MASK 0x00007c00
|
||||
#define SHORTCUT2_MASK 0x001f8000
|
||||
#define SHORTCUT_SHIFT 10
|
||||
#define BANK1_HWIRQ BIT(8)
|
||||
#define BANK2_HWIRQ BIT(9)
|
||||
#define BANK0_VALID_MASK (BANK0_HWIRQ_MASK | BANK1_HWIRQ | BANK2_HWIRQ \
|
||||
| SHORTCUT1_MASK | SHORTCUT2_MASK)
|
||||
|
||||
#define REG_FIQ_CONTROL 0x0c
|
||||
|
||||
#define NR_BANKS 3
|
||||
#define IRQS_PER_BANK 32
|
||||
|
||||
static int reg_pending[] __initconst = { 0x00, 0x04, 0x08 };
|
||||
static int reg_enable[] __initconst = { 0x18, 0x10, 0x14 };
|
||||
static int reg_disable[] __initconst = { 0x24, 0x1c, 0x20 };
|
||||
static int bank_irqs[] __initconst = { 8, 32, 32 };
|
||||
|
||||
static const int shortcuts[] = {
|
||||
7, 9, 10, 18, 19, /* Bank 1 */
|
||||
21, 22, 23, 24, 25, 30 /* Bank 2 */
|
||||
};
|
||||
|
||||
struct armctrl_ic {
|
||||
void __iomem *base;
|
||||
void __iomem *pending[NR_BANKS];
|
||||
void __iomem *enable[NR_BANKS];
|
||||
void __iomem *disable[NR_BANKS];
|
||||
struct irq_domain *domain;
|
||||
};
|
||||
|
||||
static struct armctrl_ic intc __read_mostly;
|
||||
static void __exception_irq_entry bcm2835_handle_irq(
|
||||
struct pt_regs *regs);
|
||||
|
||||
static void armctrl_mask_irq(struct irq_data *d)
|
||||
{
|
||||
writel_relaxed(HWIRQ_BIT(d->hwirq), intc.disable[HWIRQ_BANK(d->hwirq)]);
|
||||
}
|
||||
|
||||
static void armctrl_unmask_irq(struct irq_data *d)
|
||||
{
|
||||
writel_relaxed(HWIRQ_BIT(d->hwirq), intc.enable[HWIRQ_BANK(d->hwirq)]);
|
||||
}
|
||||
|
||||
static struct irq_chip armctrl_chip = {
|
||||
.name = "ARMCTRL-level",
|
||||
.irq_mask = armctrl_mask_irq,
|
||||
.irq_unmask = armctrl_unmask_irq
|
||||
};
|
||||
|
||||
static int armctrl_xlate(struct irq_domain *d, struct device_node *ctrlr,
|
||||
const u32 *intspec, unsigned int intsize,
|
||||
unsigned long *out_hwirq, unsigned int *out_type)
|
||||
{
|
||||
if (WARN_ON(intsize != 2))
|
||||
return -EINVAL;
|
||||
|
||||
if (WARN_ON(intspec[0] >= NR_BANKS))
|
||||
return -EINVAL;
|
||||
|
||||
if (WARN_ON(intspec[1] >= IRQS_PER_BANK))
|
||||
return -EINVAL;
|
||||
|
||||
if (WARN_ON(intspec[0] == 0 && intspec[1] >= NR_IRQS_BANK0))
|
||||
return -EINVAL;
|
||||
|
||||
*out_hwirq = MAKE_HWIRQ(intspec[0], intspec[1]);
|
||||
*out_type = IRQ_TYPE_NONE;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct irq_domain_ops armctrl_ops = {
|
||||
.xlate = armctrl_xlate
|
||||
};
|
||||
|
||||
static int __init armctrl_of_init(struct device_node *node,
|
||||
struct device_node *parent)
|
||||
{
|
||||
void __iomem *base;
|
||||
int irq, b, i;
|
||||
|
||||
base = of_iomap(node, 0);
|
||||
if (!base)
|
||||
panic("%s: unable to map IC registers\n",
|
||||
node->full_name);
|
||||
|
||||
intc.domain = irq_domain_add_linear(node, MAKE_HWIRQ(NR_BANKS, 0),
|
||||
&armctrl_ops, NULL);
|
||||
if (!intc.domain)
|
||||
panic("%s: unable to create IRQ domain\n", node->full_name);
|
||||
|
||||
for (b = 0; b < NR_BANKS; b++) {
|
||||
intc.pending[b] = base + reg_pending[b];
|
||||
intc.enable[b] = base + reg_enable[b];
|
||||
intc.disable[b] = base + reg_disable[b];
|
||||
|
||||
for (i = 0; i < bank_irqs[b]; i++) {
|
||||
irq = irq_create_mapping(intc.domain, MAKE_HWIRQ(b, i));
|
||||
BUG_ON(irq <= 0);
|
||||
irq_set_chip_and_handler(irq, &armctrl_chip,
|
||||
handle_level_irq);
|
||||
set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
|
||||
}
|
||||
}
|
||||
|
||||
set_handle_irq(bcm2835_handle_irq);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Handle each interrupt across the entire interrupt controller. This reads the
|
||||
* status register before handling each interrupt, which is necessary given that
|
||||
* handle_IRQ may briefly re-enable interrupts for soft IRQ handling.
|
||||
*/
|
||||
|
||||
static void armctrl_handle_bank(int bank, struct pt_regs *regs)
|
||||
{
|
||||
u32 stat, irq;
|
||||
|
||||
while ((stat = readl_relaxed(intc.pending[bank]))) {
|
||||
irq = MAKE_HWIRQ(bank, ffs(stat) - 1);
|
||||
handle_IRQ(irq_linear_revmap(intc.domain, irq), regs);
|
||||
}
|
||||
}
|
||||
|
||||
static void armctrl_handle_shortcut(int bank, struct pt_regs *regs,
|
||||
u32 stat)
|
||||
{
|
||||
u32 irq = MAKE_HWIRQ(bank, shortcuts[ffs(stat >> SHORTCUT_SHIFT) - 1]);
|
||||
handle_IRQ(irq_linear_revmap(intc.domain, irq), regs);
|
||||
}
|
||||
|
||||
static void __exception_irq_entry bcm2835_handle_irq(
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
u32 stat, irq;
|
||||
|
||||
while ((stat = readl_relaxed(intc.pending[0]) & BANK0_VALID_MASK)) {
|
||||
if (stat & BANK0_HWIRQ_MASK) {
|
||||
irq = MAKE_HWIRQ(0, ffs(stat & BANK0_HWIRQ_MASK) - 1);
|
||||
handle_IRQ(irq_linear_revmap(intc.domain, irq), regs);
|
||||
} else if (stat & SHORTCUT1_MASK) {
|
||||
armctrl_handle_shortcut(1, regs, stat & SHORTCUT1_MASK);
|
||||
} else if (stat & SHORTCUT2_MASK) {
|
||||
armctrl_handle_shortcut(2, regs, stat & SHORTCUT2_MASK);
|
||||
} else if (stat & BANK1_HWIRQ) {
|
||||
armctrl_handle_bank(1, regs);
|
||||
} else if (stat & BANK2_HWIRQ) {
|
||||
armctrl_handle_bank(2, regs);
|
||||
} else {
|
||||
BUG();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
IRQCHIP_DECLARE(bcm2835_armctrl_ic, "brcm,bcm2835-armctrl-ic", armctrl_of_init);
|
219
drivers/irqchip/irq-bcm7120-l2.c
Normal file
219
drivers/irqchip/irq-bcm7120-l2.c
Normal file
|
@ -0,0 +1,219 @@
|
|||
/*
|
||||
* Broadcom BCM7120 style Level 2 interrupt controller driver
|
||||
*
|
||||
* Copyright (C) 2014 Broadcom Corporation
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
||||
|
||||
#include <linux/init.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/of_irq.h>
|
||||
#include <linux/of_address.h>
|
||||
#include <linux/of_platform.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/irq.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/irqdomain.h>
|
||||
#include <linux/reboot.h>
|
||||
#include <linux/irqchip/chained_irq.h>
|
||||
|
||||
#include "irqchip.h"
|
||||
|
||||
#include <asm/mach/irq.h>
|
||||
|
||||
/* Register offset in the L2 interrupt controller */
|
||||
#define IRQEN 0x00
|
||||
#define IRQSTAT 0x04
|
||||
|
||||
struct bcm7120_l2_intc_data {
|
||||
void __iomem *base;
|
||||
struct irq_domain *domain;
|
||||
bool can_wake;
|
||||
u32 irq_fwd_mask;
|
||||
u32 irq_map_mask;
|
||||
u32 saved_mask;
|
||||
};
|
||||
|
||||
static void bcm7120_l2_intc_irq_handle(unsigned int irq, struct irq_desc *desc)
|
||||
{
|
||||
struct bcm7120_l2_intc_data *b = irq_desc_get_handler_data(desc);
|
||||
struct irq_chip *chip = irq_desc_get_chip(desc);
|
||||
u32 status;
|
||||
|
||||
chained_irq_enter(chip, desc);
|
||||
|
||||
status = __raw_readl(b->base + IRQSTAT);
|
||||
|
||||
if (status == 0) {
|
||||
do_bad_IRQ(irq, desc);
|
||||
goto out;
|
||||
}
|
||||
|
||||
do {
|
||||
irq = ffs(status) - 1;
|
||||
status &= ~(1 << irq);
|
||||
generic_handle_irq(irq_find_mapping(b->domain, irq));
|
||||
} while (status);
|
||||
|
||||
out:
|
||||
chained_irq_exit(chip, desc);
|
||||
}
|
||||
|
||||
static void bcm7120_l2_intc_suspend(struct irq_data *d)
|
||||
{
|
||||
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
|
||||
struct bcm7120_l2_intc_data *b = gc->private;
|
||||
u32 reg;
|
||||
|
||||
irq_gc_lock(gc);
|
||||
/* Save the current mask and the interrupt forward mask */
|
||||
b->saved_mask = __raw_readl(b->base) | b->irq_fwd_mask;
|
||||
if (b->can_wake) {
|
||||
reg = b->saved_mask | gc->wake_active;
|
||||
__raw_writel(reg, b->base);
|
||||
}
|
||||
irq_gc_unlock(gc);
|
||||
}
|
||||
|
||||
static void bcm7120_l2_intc_resume(struct irq_data *d)
|
||||
{
|
||||
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
|
||||
struct bcm7120_l2_intc_data *b = gc->private;
|
||||
|
||||
/* Restore the saved mask */
|
||||
irq_gc_lock(gc);
|
||||
__raw_writel(b->saved_mask, b->base);
|
||||
irq_gc_unlock(gc);
|
||||
}
|
||||
|
||||
static int bcm7120_l2_intc_init_one(struct device_node *dn,
|
||||
struct bcm7120_l2_intc_data *data,
|
||||
int irq, const __be32 *map_mask)
|
||||
{
|
||||
int parent_irq;
|
||||
|
||||
parent_irq = irq_of_parse_and_map(dn, irq);
|
||||
if (!parent_irq) {
|
||||
pr_err("failed to map interrupt %d\n", irq);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
data->irq_map_mask |= be32_to_cpup(map_mask + irq);
|
||||
|
||||
irq_set_handler_data(parent_irq, data);
|
||||
irq_set_chained_handler(parent_irq, bcm7120_l2_intc_irq_handle);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int __init bcm7120_l2_intc_of_init(struct device_node *dn,
|
||||
struct device_node *parent)
|
||||
{
|
||||
unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
|
||||
struct bcm7120_l2_intc_data *data;
|
||||
struct irq_chip_generic *gc;
|
||||
struct irq_chip_type *ct;
|
||||
const __be32 *map_mask;
|
||||
int num_parent_irqs;
|
||||
int ret = 0, len, irq;
|
||||
|
||||
data = kzalloc(sizeof(*data), GFP_KERNEL);
|
||||
if (!data)
|
||||
return -ENOMEM;
|
||||
|
||||
data->base = of_iomap(dn, 0);
|
||||
if (!data->base) {
|
||||
pr_err("failed to remap intc L2 registers\n");
|
||||
ret = -ENOMEM;
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
if (of_property_read_u32(dn, "brcm,int-fwd-mask", &data->irq_fwd_mask))
|
||||
data->irq_fwd_mask = 0;
|
||||
|
||||
/* Enable all interrupt specified in the interrupt forward mask and have
|
||||
* the other disabled
|
||||
*/
|
||||
__raw_writel(data->irq_fwd_mask, data->base + IRQEN);
|
||||
|
||||
num_parent_irqs = of_irq_count(dn);
|
||||
if (num_parent_irqs <= 0) {
|
||||
pr_err("invalid number of parent interrupts\n");
|
||||
ret = -ENOMEM;
|
||||
goto out_unmap;
|
||||
}
|
||||
|
||||
map_mask = of_get_property(dn, "brcm,int-map-mask", &len);
|
||||
if (!map_mask || (len != (sizeof(*map_mask) * num_parent_irqs))) {
|
||||
pr_err("invalid brcm,int-map-mask property\n");
|
||||
ret = -EINVAL;
|
||||
goto out_unmap;
|
||||
}
|
||||
|
||||
for (irq = 0; irq < num_parent_irqs; irq++) {
|
||||
ret = bcm7120_l2_intc_init_one(dn, data, irq, map_mask);
|
||||
if (ret)
|
||||
goto out_unmap;
|
||||
}
|
||||
|
||||
data->domain = irq_domain_add_linear(dn, 32,
|
||||
&irq_generic_chip_ops, NULL);
|
||||
if (!data->domain) {
|
||||
ret = -ENOMEM;
|
||||
goto out_unmap;
|
||||
}
|
||||
|
||||
ret = irq_alloc_domain_generic_chips(data->domain, 32, 1,
|
||||
dn->full_name, handle_level_irq, clr, 0,
|
||||
IRQ_GC_INIT_MASK_CACHE);
|
||||
if (ret) {
|
||||
pr_err("failed to allocate generic irq chip\n");
|
||||
goto out_free_domain;
|
||||
}
|
||||
|
||||
gc = irq_get_domain_generic_chip(data->domain, 0);
|
||||
gc->unused = 0xfffffff & ~data->irq_map_mask;
|
||||
gc->reg_base = data->base;
|
||||
gc->private = data;
|
||||
ct = gc->chip_types;
|
||||
|
||||
ct->regs.mask = IRQEN;
|
||||
ct->chip.irq_mask = irq_gc_mask_clr_bit;
|
||||
ct->chip.irq_unmask = irq_gc_mask_set_bit;
|
||||
ct->chip.irq_ack = irq_gc_noop;
|
||||
ct->chip.irq_suspend = bcm7120_l2_intc_suspend;
|
||||
ct->chip.irq_resume = bcm7120_l2_intc_resume;
|
||||
|
||||
if (of_property_read_bool(dn, "brcm,irq-can-wake")) {
|
||||
data->can_wake = true;
|
||||
/* This IRQ chip can wake the system, set all relevant child
|
||||
* interupts in wake_enabled mask
|
||||
*/
|
||||
gc->wake_enabled = 0xffffffff;
|
||||
gc->wake_enabled &= ~gc->unused;
|
||||
ct->chip.irq_set_wake = irq_gc_set_wake;
|
||||
}
|
||||
|
||||
pr_info("registered BCM7120 L2 intc (mem: 0x%p, parent IRQ(s): %d)\n",
|
||||
data->base, num_parent_irqs);
|
||||
|
||||
return 0;
|
||||
|
||||
out_free_domain:
|
||||
irq_domain_remove(data->domain);
|
||||
out_unmap:
|
||||
iounmap(data->base);
|
||||
out_free:
|
||||
kfree(data);
|
||||
return ret;
|
||||
}
|
||||
IRQCHIP_DECLARE(brcmstb_l2_intc, "brcm,bcm7120-l2-intc",
|
||||
bcm7120_l2_intc_of_init);
|
202
drivers/irqchip/irq-brcmstb-l2.c
Normal file
202
drivers/irqchip/irq-brcmstb-l2.c
Normal file
|
@ -0,0 +1,202 @@
|
|||
/*
|
||||
* Generic Broadcom Set Top Box Level 2 Interrupt controller driver
|
||||
*
|
||||
* Copyright (C) 2014 Broadcom Corporation
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*/
|
||||
|
||||
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
||||
|
||||
#include <linux/init.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/of_irq.h>
|
||||
#include <linux/of_address.h>
|
||||
#include <linux/of_platform.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/irq.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/irqdomain.h>
|
||||
#include <linux/irqchip.h>
|
||||
#include <linux/irqchip/chained_irq.h>
|
||||
|
||||
#include <asm/mach/irq.h>
|
||||
|
||||
#include "irqchip.h"
|
||||
|
||||
/* Register offsets in the L2 interrupt controller */
|
||||
#define CPU_STATUS 0x00
|
||||
#define CPU_SET 0x04
|
||||
#define CPU_CLEAR 0x08
|
||||
#define CPU_MASK_STATUS 0x0c
|
||||
#define CPU_MASK_SET 0x10
|
||||
#define CPU_MASK_CLEAR 0x14
|
||||
|
||||
/* L2 intc private data structure */
|
||||
struct brcmstb_l2_intc_data {
|
||||
int parent_irq;
|
||||
void __iomem *base;
|
||||
struct irq_domain *domain;
|
||||
bool can_wake;
|
||||
u32 saved_mask; /* for suspend/resume */
|
||||
};
|
||||
|
||||
static void brcmstb_l2_intc_irq_handle(unsigned int irq, struct irq_desc *desc)
|
||||
{
|
||||
struct brcmstb_l2_intc_data *b = irq_desc_get_handler_data(desc);
|
||||
struct irq_chip *chip = irq_desc_get_chip(desc);
|
||||
u32 status;
|
||||
|
||||
chained_irq_enter(chip, desc);
|
||||
|
||||
status = __raw_readl(b->base + CPU_STATUS) &
|
||||
~(__raw_readl(b->base + CPU_MASK_STATUS));
|
||||
|
||||
if (status == 0) {
|
||||
do_bad_IRQ(irq, desc);
|
||||
goto out;
|
||||
}
|
||||
|
||||
do {
|
||||
irq = ffs(status) - 1;
|
||||
/* ack at our level */
|
||||
__raw_writel(1 << irq, b->base + CPU_CLEAR);
|
||||
status &= ~(1 << irq);
|
||||
generic_handle_irq(irq_find_mapping(b->domain, irq));
|
||||
} while (status);
|
||||
out:
|
||||
chained_irq_exit(chip, desc);
|
||||
}
|
||||
|
||||
static void brcmstb_l2_intc_suspend(struct irq_data *d)
|
||||
{
|
||||
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
|
||||
struct brcmstb_l2_intc_data *b = gc->private;
|
||||
|
||||
irq_gc_lock(gc);
|
||||
/* Save the current mask */
|
||||
b->saved_mask = __raw_readl(b->base + CPU_MASK_STATUS);
|
||||
|
||||
if (b->can_wake) {
|
||||
/* Program the wakeup mask */
|
||||
__raw_writel(~gc->wake_active, b->base + CPU_MASK_SET);
|
||||
__raw_writel(gc->wake_active, b->base + CPU_MASK_CLEAR);
|
||||
}
|
||||
irq_gc_unlock(gc);
|
||||
}
|
||||
|
||||
static void brcmstb_l2_intc_resume(struct irq_data *d)
|
||||
{
|
||||
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
|
||||
struct brcmstb_l2_intc_data *b = gc->private;
|
||||
|
||||
irq_gc_lock(gc);
|
||||
/* Clear unmasked non-wakeup interrupts */
|
||||
__raw_writel(~b->saved_mask & ~gc->wake_active, b->base + CPU_CLEAR);
|
||||
|
||||
/* Restore the saved mask */
|
||||
__raw_writel(b->saved_mask, b->base + CPU_MASK_SET);
|
||||
__raw_writel(~b->saved_mask, b->base + CPU_MASK_CLEAR);
|
||||
irq_gc_unlock(gc);
|
||||
}
|
||||
|
||||
int __init brcmstb_l2_intc_of_init(struct device_node *np,
|
||||
struct device_node *parent)
|
||||
{
|
||||
unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
|
||||
struct brcmstb_l2_intc_data *data;
|
||||
struct irq_chip_generic *gc;
|
||||
struct irq_chip_type *ct;
|
||||
int ret;
|
||||
|
||||
data = kzalloc(sizeof(*data), GFP_KERNEL);
|
||||
if (!data)
|
||||
return -ENOMEM;
|
||||
|
||||
data->base = of_iomap(np, 0);
|
||||
if (!data->base) {
|
||||
pr_err("failed to remap intc L2 registers\n");
|
||||
ret = -ENOMEM;
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
/* Disable all interrupts by default */
|
||||
__raw_writel(0xffffffff, data->base + CPU_MASK_SET);
|
||||
__raw_writel(0xffffffff, data->base + CPU_CLEAR);
|
||||
|
||||
data->parent_irq = irq_of_parse_and_map(np, 0);
|
||||
if (!data->parent_irq) {
|
||||
pr_err("failed to find parent interrupt\n");
|
||||
ret = -EINVAL;
|
||||
goto out_unmap;
|
||||
}
|
||||
|
||||
data->domain = irq_domain_add_linear(np, 32,
|
||||
&irq_generic_chip_ops, NULL);
|
||||
if (!data->domain) {
|
||||
ret = -ENOMEM;
|
||||
goto out_unmap;
|
||||
}
|
||||
|
||||
/* Allocate a single Generic IRQ chip for this node */
|
||||
ret = irq_alloc_domain_generic_chips(data->domain, 32, 1,
|
||||
np->full_name, handle_edge_irq, clr, 0, 0);
|
||||
if (ret) {
|
||||
pr_err("failed to allocate generic irq chip\n");
|
||||
goto out_free_domain;
|
||||
}
|
||||
|
||||
/* Set the IRQ chaining logic */
|
||||
irq_set_handler_data(data->parent_irq, data);
|
||||
irq_set_chained_handler(data->parent_irq, brcmstb_l2_intc_irq_handle);
|
||||
|
||||
gc = irq_get_domain_generic_chip(data->domain, 0);
|
||||
gc->reg_base = data->base;
|
||||
gc->private = data;
|
||||
ct = gc->chip_types;
|
||||
|
||||
ct->chip.irq_ack = irq_gc_ack_set_bit;
|
||||
ct->regs.ack = CPU_CLEAR;
|
||||
|
||||
ct->chip.irq_mask = irq_gc_mask_disable_reg;
|
||||
ct->regs.disable = CPU_MASK_SET;
|
||||
|
||||
ct->chip.irq_unmask = irq_gc_unmask_enable_reg;
|
||||
ct->regs.enable = CPU_MASK_CLEAR;
|
||||
|
||||
ct->chip.irq_suspend = brcmstb_l2_intc_suspend;
|
||||
ct->chip.irq_resume = brcmstb_l2_intc_resume;
|
||||
|
||||
if (of_property_read_bool(np, "brcm,irq-can-wake")) {
|
||||
data->can_wake = true;
|
||||
/* This IRQ chip can wake the system, set all child interrupts
|
||||
* in wake_enabled mask
|
||||
*/
|
||||
gc->wake_enabled = 0xffffffff;
|
||||
ct->chip.irq_set_wake = irq_gc_set_wake;
|
||||
}
|
||||
|
||||
pr_info("registered L2 intc (mem: 0x%p, parent irq: %d)\n",
|
||||
data->base, data->parent_irq);
|
||||
|
||||
return 0;
|
||||
|
||||
out_free_domain:
|
||||
irq_domain_remove(data->domain);
|
||||
out_unmap:
|
||||
iounmap(data->base);
|
||||
out_free:
|
||||
kfree(data);
|
||||
return ret;
|
||||
}
|
||||
IRQCHIP_DECLARE(brcmstb_l2_intc, "brcm,l2-intc", brcmstb_l2_intc_of_init);
|
239
drivers/irqchip/irq-clps711x.c
Normal file
239
drivers/irqchip/irq-clps711x.c
Normal file
|
@ -0,0 +1,239 @@
|
|||
/*
|
||||
* CLPS711X IRQ driver
|
||||
*
|
||||
* Copyright (C) 2013 Alexander Shiyan <shc_work@mail.ru>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; either version 2 of the License, or
|
||||
* (at your option) any later version.
|
||||
*/
|
||||
|
||||
#include <linux/io.h>
|
||||
#include <linux/irq.h>
|
||||
#include <linux/irqdomain.h>
|
||||
#include <linux/of_address.h>
|
||||
#include <linux/of_irq.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
#include <asm/exception.h>
|
||||
#include <asm/mach/irq.h>
|
||||
|
||||
#include "irqchip.h"
|
||||
|
||||
#define CLPS711X_INTSR1 (0x0240)
|
||||
#define CLPS711X_INTMR1 (0x0280)
|
||||
#define CLPS711X_BLEOI (0x0600)
|
||||
#define CLPS711X_MCEOI (0x0640)
|
||||
#define CLPS711X_TEOI (0x0680)
|
||||
#define CLPS711X_TC1EOI (0x06c0)
|
||||
#define CLPS711X_TC2EOI (0x0700)
|
||||
#define CLPS711X_RTCEOI (0x0740)
|
||||
#define CLPS711X_UMSEOI (0x0780)
|
||||
#define CLPS711X_COEOI (0x07c0)
|
||||
#define CLPS711X_INTSR2 (0x1240)
|
||||
#define CLPS711X_INTMR2 (0x1280)
|
||||
#define CLPS711X_SRXEOF (0x1600)
|
||||
#define CLPS711X_KBDEOI (0x1700)
|
||||
#define CLPS711X_INTSR3 (0x2240)
|
||||
#define CLPS711X_INTMR3 (0x2280)
|
||||
|
||||
static const struct {
|
||||
#define CLPS711X_FLAG_EN (1 << 0)
|
||||
#define CLPS711X_FLAG_FIQ (1 << 1)
|
||||
unsigned int flags;
|
||||
phys_addr_t eoi;
|
||||
} clps711x_irqs[] = {
|
||||
[1] = { CLPS711X_FLAG_FIQ, CLPS711X_BLEOI, },
|
||||
[3] = { CLPS711X_FLAG_FIQ, CLPS711X_MCEOI, },
|
||||
[4] = { CLPS711X_FLAG_EN, CLPS711X_COEOI, },
|
||||
[5] = { CLPS711X_FLAG_EN, },
|
||||
[6] = { CLPS711X_FLAG_EN, },
|
||||
[7] = { CLPS711X_FLAG_EN, },
|
||||
[8] = { CLPS711X_FLAG_EN, CLPS711X_TC1EOI, },
|
||||
[9] = { CLPS711X_FLAG_EN, CLPS711X_TC2EOI, },
|
||||
[10] = { CLPS711X_FLAG_EN, CLPS711X_RTCEOI, },
|
||||
[11] = { CLPS711X_FLAG_EN, CLPS711X_TEOI, },
|
||||
[12] = { CLPS711X_FLAG_EN, },
|
||||
[13] = { CLPS711X_FLAG_EN, },
|
||||
[14] = { CLPS711X_FLAG_EN, CLPS711X_UMSEOI, },
|
||||
[15] = { CLPS711X_FLAG_EN, CLPS711X_SRXEOF, },
|
||||
[16] = { CLPS711X_FLAG_EN, CLPS711X_KBDEOI, },
|
||||
[17] = { CLPS711X_FLAG_EN, },
|
||||
[18] = { CLPS711X_FLAG_EN, },
|
||||
[28] = { CLPS711X_FLAG_EN, },
|
||||
[29] = { CLPS711X_FLAG_EN, },
|
||||
[32] = { CLPS711X_FLAG_FIQ, },
|
||||
};
|
||||
|
||||
static struct {
|
||||
void __iomem *base;
|
||||
void __iomem *intmr[3];
|
||||
void __iomem *intsr[3];
|
||||
struct irq_domain *domain;
|
||||
struct irq_domain_ops ops;
|
||||
} *clps711x_intc;
|
||||
|
||||
static asmlinkage void __exception_irq_entry clps711x_irqh(struct pt_regs *regs)
|
||||
{
|
||||
u32 irqstat;
|
||||
|
||||
do {
|
||||
irqstat = readw_relaxed(clps711x_intc->intmr[0]) &
|
||||
readw_relaxed(clps711x_intc->intsr[0]);
|
||||
if (irqstat)
|
||||
handle_domain_irq(clps711x_intc->domain,
|
||||
fls(irqstat) - 1, regs);
|
||||
|
||||
irqstat = readw_relaxed(clps711x_intc->intmr[1]) &
|
||||
readw_relaxed(clps711x_intc->intsr[1]);
|
||||
if (irqstat)
|
||||
handle_domain_irq(clps711x_intc->domain,
|
||||
fls(irqstat) - 1 + 16, regs);
|
||||
} while (irqstat);
|
||||
}
|
||||
|
||||
static void clps711x_intc_eoi(struct irq_data *d)
|
||||
{
|
||||
irq_hw_number_t hwirq = irqd_to_hwirq(d);
|
||||
|
||||
writel_relaxed(0, clps711x_intc->base + clps711x_irqs[hwirq].eoi);
|
||||
}
|
||||
|
||||
static void clps711x_intc_mask(struct irq_data *d)
|
||||
{
|
||||
irq_hw_number_t hwirq = irqd_to_hwirq(d);
|
||||
void __iomem *intmr = clps711x_intc->intmr[hwirq / 16];
|
||||
u32 tmp;
|
||||
|
||||
tmp = readl_relaxed(intmr);
|
||||
tmp &= ~(1 << (hwirq % 16));
|
||||
writel_relaxed(tmp, intmr);
|
||||
}
|
||||
|
||||
static void clps711x_intc_unmask(struct irq_data *d)
|
||||
{
|
||||
irq_hw_number_t hwirq = irqd_to_hwirq(d);
|
||||
void __iomem *intmr = clps711x_intc->intmr[hwirq / 16];
|
||||
u32 tmp;
|
||||
|
||||
tmp = readl_relaxed(intmr);
|
||||
tmp |= 1 << (hwirq % 16);
|
||||
writel_relaxed(tmp, intmr);
|
||||
}
|
||||
|
||||
static struct irq_chip clps711x_intc_chip = {
|
||||
.name = "clps711x-intc",
|
||||
.irq_eoi = clps711x_intc_eoi,
|
||||
.irq_mask = clps711x_intc_mask,
|
||||
.irq_unmask = clps711x_intc_unmask,
|
||||
};
|
||||
|
||||
static int __init clps711x_intc_irq_map(struct irq_domain *h, unsigned int virq,
|
||||
irq_hw_number_t hw)
|
||||
{
|
||||
irq_flow_handler_t handler = handle_level_irq;
|
||||
unsigned int flags = IRQF_VALID | IRQF_PROBE;
|
||||
|
||||
if (!clps711x_irqs[hw].flags)
|
||||
return 0;
|
||||
|
||||
if (clps711x_irqs[hw].flags & CLPS711X_FLAG_FIQ) {
|
||||
handler = handle_bad_irq;
|
||||
flags |= IRQF_NOAUTOEN;
|
||||
} else if (clps711x_irqs[hw].eoi) {
|
||||
handler = handle_fasteoi_irq;
|
||||
}
|
||||
|
||||
/* Clear down pending interrupt */
|
||||
if (clps711x_irqs[hw].eoi)
|
||||
writel_relaxed(0, clps711x_intc->base + clps711x_irqs[hw].eoi);
|
||||
|
||||
irq_set_chip_and_handler(virq, &clps711x_intc_chip, handler);
|
||||
set_irq_flags(virq, flags);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __init _clps711x_intc_init(struct device_node *np,
|
||||
phys_addr_t base, resource_size_t size)
|
||||
{
|
||||
int err;
|
||||
|
||||
clps711x_intc = kzalloc(sizeof(*clps711x_intc), GFP_KERNEL);
|
||||
if (!clps711x_intc)
|
||||
return -ENOMEM;
|
||||
|
||||
clps711x_intc->base = ioremap(base, size);
|
||||
if (!clps711x_intc->base) {
|
||||
err = -ENOMEM;
|
||||
goto out_kfree;
|
||||
}
|
||||
|
||||
clps711x_intc->intsr[0] = clps711x_intc->base + CLPS711X_INTSR1;
|
||||
clps711x_intc->intmr[0] = clps711x_intc->base + CLPS711X_INTMR1;
|
||||
clps711x_intc->intsr[1] = clps711x_intc->base + CLPS711X_INTSR2;
|
||||
clps711x_intc->intmr[1] = clps711x_intc->base + CLPS711X_INTMR2;
|
||||
clps711x_intc->intsr[2] = clps711x_intc->base + CLPS711X_INTSR3;
|
||||
clps711x_intc->intmr[2] = clps711x_intc->base + CLPS711X_INTMR3;
|
||||
|
||||
/* Mask all interrupts */
|
||||
writel_relaxed(0, clps711x_intc->intmr[0]);
|
||||
writel_relaxed(0, clps711x_intc->intmr[1]);
|
||||
writel_relaxed(0, clps711x_intc->intmr[2]);
|
||||
|
||||
err = irq_alloc_descs(-1, 0, ARRAY_SIZE(clps711x_irqs), numa_node_id());
|
||||
if (IS_ERR_VALUE(err))
|
||||
goto out_iounmap;
|
||||
|
||||
clps711x_intc->ops.map = clps711x_intc_irq_map;
|
||||
clps711x_intc->ops.xlate = irq_domain_xlate_onecell;
|
||||
clps711x_intc->domain =
|
||||
irq_domain_add_legacy(np, ARRAY_SIZE(clps711x_irqs),
|
||||
0, 0, &clps711x_intc->ops, NULL);
|
||||
if (!clps711x_intc->domain) {
|
||||
err = -ENOMEM;
|
||||
goto out_irqfree;
|
||||
}
|
||||
|
||||
irq_set_default_host(clps711x_intc->domain);
|
||||
set_handle_irq(clps711x_irqh);
|
||||
|
||||
#ifdef CONFIG_FIQ
|
||||
init_FIQ(0);
|
||||
#endif
|
||||
|
||||
return 0;
|
||||
|
||||
out_irqfree:
|
||||
irq_free_descs(0, ARRAY_SIZE(clps711x_irqs));
|
||||
|
||||
out_iounmap:
|
||||
iounmap(clps711x_intc->base);
|
||||
|
||||
out_kfree:
|
||||
kfree(clps711x_intc);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
void __init clps711x_intc_init(phys_addr_t base, resource_size_t size)
|
||||
{
|
||||
BUG_ON(_clps711x_intc_init(NULL, base, size));
|
||||
}
|
||||
|
||||
#ifdef CONFIG_IRQCHIP
|
||||
static int __init clps711x_intc_init_dt(struct device_node *np,
|
||||
struct device_node *parent)
|
||||
{
|
||||
struct resource res;
|
||||
int err;
|
||||
|
||||
err = of_address_to_resource(np, 0, &res);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
return _clps711x_intc_init(np, res.start, resource_size(&res));
|
||||
}
|
||||
IRQCHIP_DECLARE(clps711x, "cirrus,clps711x-intc", clps711x_intc_init_dt);
|
||||
#endif
|
326
drivers/irqchip/irq-crossbar.c
Normal file
326
drivers/irqchip/irq-crossbar.c
Normal file
|
@ -0,0 +1,326 @@
|
|||
/*
|
||||
* drivers/irqchip/irq-crossbar.c
|
||||
*
|
||||
* Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com
|
||||
* Author: Sricharan R <r.sricharan@ti.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
*/
|
||||
#include <linux/err.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/of_address.h>
|
||||
#include <linux/of_irq.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/irqchip/arm-gic.h>
|
||||
#include <linux/irqchip/irq-crossbar.h>
|
||||
|
||||
#define IRQ_FREE -1
|
||||
#define IRQ_RESERVED -2
|
||||
#define IRQ_SKIP -3
|
||||
#define GIC_IRQ_START 32
|
||||
|
||||
/**
|
||||
* struct crossbar_device - crossbar device description
|
||||
* @int_max: maximum number of supported interrupts
|
||||
* @safe_map: safe default value to initialize the crossbar
|
||||
* @max_crossbar_sources: Maximum number of crossbar sources
|
||||
* @irq_map: array of interrupts to crossbar number mapping
|
||||
* @crossbar_base: crossbar base address
|
||||
* @register_offsets: offsets for each irq number
|
||||
* @write: register write function pointer
|
||||
*/
|
||||
struct crossbar_device {
|
||||
uint int_max;
|
||||
uint safe_map;
|
||||
uint max_crossbar_sources;
|
||||
uint *irq_map;
|
||||
void __iomem *crossbar_base;
|
||||
int *register_offsets;
|
||||
void (*write)(int, int);
|
||||
};
|
||||
|
||||
static struct crossbar_device *cb;
|
||||
|
||||
static inline void crossbar_writel(int irq_no, int cb_no)
|
||||
{
|
||||
writel(cb_no, cb->crossbar_base + cb->register_offsets[irq_no]);
|
||||
}
|
||||
|
||||
static inline void crossbar_writew(int irq_no, int cb_no)
|
||||
{
|
||||
writew(cb_no, cb->crossbar_base + cb->register_offsets[irq_no]);
|
||||
}
|
||||
|
||||
static inline void crossbar_writeb(int irq_no, int cb_no)
|
||||
{
|
||||
writeb(cb_no, cb->crossbar_base + cb->register_offsets[irq_no]);
|
||||
}
|
||||
|
||||
static inline int get_prev_map_irq(int cb_no)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = cb->int_max - 1; i >= 0; i--)
|
||||
if (cb->irq_map[i] == cb_no)
|
||||
return i;
|
||||
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
static inline int allocate_free_irq(int cb_no)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = cb->int_max - 1; i >= 0; i--) {
|
||||
if (cb->irq_map[i] == IRQ_FREE) {
|
||||
cb->irq_map[i] = cb_no;
|
||||
return i;
|
||||
}
|
||||
}
|
||||
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
static inline bool needs_crossbar_write(irq_hw_number_t hw)
|
||||
{
|
||||
int cb_no;
|
||||
|
||||
if (hw > GIC_IRQ_START) {
|
||||
cb_no = cb->irq_map[hw - GIC_IRQ_START];
|
||||
if (cb_no != IRQ_RESERVED && cb_no != IRQ_SKIP)
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static int crossbar_domain_map(struct irq_domain *d, unsigned int irq,
|
||||
irq_hw_number_t hw)
|
||||
{
|
||||
if (needs_crossbar_write(hw))
|
||||
cb->write(hw - GIC_IRQ_START, cb->irq_map[hw - GIC_IRQ_START]);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* crossbar_domain_unmap - unmap a crossbar<->irq connection
|
||||
* @d: domain of irq to unmap
|
||||
* @irq: virq number
|
||||
*
|
||||
* We do not maintain a use count of total number of map/unmap
|
||||
* calls for a particular irq to find out if a irq can be really
|
||||
* unmapped. This is because unmap is called during irq_dispose_mapping(irq),
|
||||
* after which irq is anyways unusable. So an explicit map has to be called
|
||||
* after that.
|
||||
*/
|
||||
static void crossbar_domain_unmap(struct irq_domain *d, unsigned int irq)
|
||||
{
|
||||
irq_hw_number_t hw = irq_get_irq_data(irq)->hwirq;
|
||||
|
||||
if (needs_crossbar_write(hw)) {
|
||||
cb->irq_map[hw - GIC_IRQ_START] = IRQ_FREE;
|
||||
cb->write(hw - GIC_IRQ_START, cb->safe_map);
|
||||
}
|
||||
}
|
||||
|
||||
static int crossbar_domain_xlate(struct irq_domain *d,
|
||||
struct device_node *controller,
|
||||
const u32 *intspec, unsigned int intsize,
|
||||
unsigned long *out_hwirq,
|
||||
unsigned int *out_type)
|
||||
{
|
||||
int ret;
|
||||
int req_num = intspec[1];
|
||||
int direct_map_num;
|
||||
|
||||
if (req_num >= cb->max_crossbar_sources) {
|
||||
direct_map_num = req_num - cb->max_crossbar_sources;
|
||||
if (direct_map_num < cb->int_max) {
|
||||
ret = cb->irq_map[direct_map_num];
|
||||
if (ret == IRQ_RESERVED || ret == IRQ_SKIP) {
|
||||
/* We use the interrupt num as h/w irq num */
|
||||
ret = direct_map_num;
|
||||
goto found;
|
||||
}
|
||||
}
|
||||
|
||||
pr_err("%s: requested crossbar number %d > max %d\n",
|
||||
__func__, req_num, cb->max_crossbar_sources);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ret = get_prev_map_irq(req_num);
|
||||
if (ret >= 0)
|
||||
goto found;
|
||||
|
||||
ret = allocate_free_irq(req_num);
|
||||
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
found:
|
||||
*out_hwirq = ret + GIC_IRQ_START;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct irq_domain_ops routable_irq_domain_ops = {
|
||||
.map = crossbar_domain_map,
|
||||
.unmap = crossbar_domain_unmap,
|
||||
.xlate = crossbar_domain_xlate
|
||||
};
|
||||
|
||||
static int __init crossbar_of_init(struct device_node *node)
|
||||
{
|
||||
int i, size, max = 0, reserved = 0, entry;
|
||||
const __be32 *irqsr;
|
||||
int ret = -ENOMEM;
|
||||
|
||||
cb = kzalloc(sizeof(*cb), GFP_KERNEL);
|
||||
|
||||
if (!cb)
|
||||
return ret;
|
||||
|
||||
cb->crossbar_base = of_iomap(node, 0);
|
||||
if (!cb->crossbar_base)
|
||||
goto err_cb;
|
||||
|
||||
of_property_read_u32(node, "ti,max-crossbar-sources",
|
||||
&cb->max_crossbar_sources);
|
||||
if (!cb->max_crossbar_sources) {
|
||||
pr_err("missing 'ti,max-crossbar-sources' property\n");
|
||||
ret = -EINVAL;
|
||||
goto err_base;
|
||||
}
|
||||
|
||||
of_property_read_u32(node, "ti,max-irqs", &max);
|
||||
if (!max) {
|
||||
pr_err("missing 'ti,max-irqs' property\n");
|
||||
ret = -EINVAL;
|
||||
goto err_base;
|
||||
}
|
||||
cb->irq_map = kcalloc(max, sizeof(int), GFP_KERNEL);
|
||||
if (!cb->irq_map)
|
||||
goto err_base;
|
||||
|
||||
cb->int_max = max;
|
||||
|
||||
for (i = 0; i < max; i++)
|
||||
cb->irq_map[i] = IRQ_FREE;
|
||||
|
||||
/* Get and mark reserved irqs */
|
||||
irqsr = of_get_property(node, "ti,irqs-reserved", &size);
|
||||
if (irqsr) {
|
||||
size /= sizeof(__be32);
|
||||
|
||||
for (i = 0; i < size; i++) {
|
||||
of_property_read_u32_index(node,
|
||||
"ti,irqs-reserved",
|
||||
i, &entry);
|
||||
if (entry >= max) {
|
||||
pr_err("Invalid reserved entry\n");
|
||||
ret = -EINVAL;
|
||||
goto err_irq_map;
|
||||
}
|
||||
cb->irq_map[entry] = IRQ_RESERVED;
|
||||
}
|
||||
}
|
||||
|
||||
/* Skip irqs hardwired to bypass the crossbar */
|
||||
irqsr = of_get_property(node, "ti,irqs-skip", &size);
|
||||
if (irqsr) {
|
||||
size /= sizeof(__be32);
|
||||
|
||||
for (i = 0; i < size; i++) {
|
||||
of_property_read_u32_index(node,
|
||||
"ti,irqs-skip",
|
||||
i, &entry);
|
||||
if (entry >= max) {
|
||||
pr_err("Invalid skip entry\n");
|
||||
ret = -EINVAL;
|
||||
goto err_irq_map;
|
||||
}
|
||||
cb->irq_map[entry] = IRQ_SKIP;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
cb->register_offsets = kcalloc(max, sizeof(int), GFP_KERNEL);
|
||||
if (!cb->register_offsets)
|
||||
goto err_irq_map;
|
||||
|
||||
of_property_read_u32(node, "ti,reg-size", &size);
|
||||
|
||||
switch (size) {
|
||||
case 1:
|
||||
cb->write = crossbar_writeb;
|
||||
break;
|
||||
case 2:
|
||||
cb->write = crossbar_writew;
|
||||
break;
|
||||
case 4:
|
||||
cb->write = crossbar_writel;
|
||||
break;
|
||||
default:
|
||||
pr_err("Invalid reg-size property\n");
|
||||
ret = -EINVAL;
|
||||
goto err_reg_offset;
|
||||
break;
|
||||
}
|
||||
|
||||
/*
|
||||
* Register offsets are not linear because of the
|
||||
* reserved irqs. so find and store the offsets once.
|
||||
*/
|
||||
for (i = 0; i < max; i++) {
|
||||
if (cb->irq_map[i] == IRQ_RESERVED)
|
||||
continue;
|
||||
|
||||
cb->register_offsets[i] = reserved;
|
||||
reserved += size;
|
||||
}
|
||||
|
||||
of_property_read_u32(node, "ti,irqs-safe-map", &cb->safe_map);
|
||||
/* Initialize the crossbar with safe map to start with */
|
||||
for (i = 0; i < max; i++) {
|
||||
if (cb->irq_map[i] == IRQ_RESERVED ||
|
||||
cb->irq_map[i] == IRQ_SKIP)
|
||||
continue;
|
||||
|
||||
cb->write(i, cb->safe_map);
|
||||
}
|
||||
|
||||
register_routable_domain_ops(&routable_irq_domain_ops);
|
||||
return 0;
|
||||
|
||||
err_reg_offset:
|
||||
kfree(cb->register_offsets);
|
||||
err_irq_map:
|
||||
kfree(cb->irq_map);
|
||||
err_base:
|
||||
iounmap(cb->crossbar_base);
|
||||
err_cb:
|
||||
kfree(cb);
|
||||
|
||||
cb = NULL;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static const struct of_device_id crossbar_match[] __initconst = {
|
||||
{ .compatible = "ti,irq-crossbar" },
|
||||
{}
|
||||
};
|
||||
|
||||
int __init irqcrossbar_init(void)
|
||||
{
|
||||
struct device_node *np;
|
||||
np = of_find_matching_node(NULL, crossbar_match);
|
||||
if (!np)
|
||||
return -ENODEV;
|
||||
|
||||
crossbar_of_init(np);
|
||||
return 0;
|
||||
}
|
150
drivers/irqchip/irq-dw-apb-ictl.c
Normal file
150
drivers/irqchip/irq-dw-apb-ictl.c
Normal file
|
@ -0,0 +1,150 @@
|
|||
/*
|
||||
* Synopsys DW APB ICTL irqchip driver.
|
||||
*
|
||||
* Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
|
||||
*
|
||||
* based on GPL'ed 2.6 kernel sources
|
||||
* (c) Marvell International Ltd.
|
||||
*
|
||||
* This file is licensed under the terms of the GNU General Public
|
||||
* License version 2. This program is licensed "as is" without any
|
||||
* warranty of any kind, whether express or implied.
|
||||
*/
|
||||
|
||||
#include <linux/io.h>
|
||||
#include <linux/irq.h>
|
||||
#include <linux/irqchip/chained_irq.h>
|
||||
#include <linux/of_address.h>
|
||||
#include <linux/of_irq.h>
|
||||
|
||||
#include "irqchip.h"
|
||||
|
||||
#define APB_INT_ENABLE_L 0x00
|
||||
#define APB_INT_ENABLE_H 0x04
|
||||
#define APB_INT_MASK_L 0x08
|
||||
#define APB_INT_MASK_H 0x0c
|
||||
#define APB_INT_FINALSTATUS_L 0x30
|
||||
#define APB_INT_FINALSTATUS_H 0x34
|
||||
|
||||
static void dw_apb_ictl_handler(unsigned int irq, struct irq_desc *desc)
|
||||
{
|
||||
struct irq_chip *chip = irq_get_chip(irq);
|
||||
struct irq_chip_generic *gc = irq_get_handler_data(irq);
|
||||
struct irq_domain *d = gc->private;
|
||||
u32 stat;
|
||||
int n;
|
||||
|
||||
chained_irq_enter(chip, desc);
|
||||
|
||||
for (n = 0; n < gc->num_ct; n++) {
|
||||
stat = readl_relaxed(gc->reg_base +
|
||||
APB_INT_FINALSTATUS_L + 4 * n);
|
||||
while (stat) {
|
||||
u32 hwirq = ffs(stat) - 1;
|
||||
generic_handle_irq(irq_find_mapping(d,
|
||||
gc->irq_base + hwirq + 32 * n));
|
||||
stat &= ~(1 << hwirq);
|
||||
}
|
||||
}
|
||||
|
||||
chained_irq_exit(chip, desc);
|
||||
}
|
||||
|
||||
static int __init dw_apb_ictl_init(struct device_node *np,
|
||||
struct device_node *parent)
|
||||
{
|
||||
unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
|
||||
struct resource r;
|
||||
struct irq_domain *domain;
|
||||
struct irq_chip_generic *gc;
|
||||
void __iomem *iobase;
|
||||
int ret, nrirqs, irq;
|
||||
u32 reg;
|
||||
|
||||
/* Map the parent interrupt for the chained handler */
|
||||
irq = irq_of_parse_and_map(np, 0);
|
||||
if (irq <= 0) {
|
||||
pr_err("%s: unable to parse irq\n", np->full_name);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ret = of_address_to_resource(np, 0, &r);
|
||||
if (ret) {
|
||||
pr_err("%s: unable to get resource\n", np->full_name);
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (!request_mem_region(r.start, resource_size(&r), np->full_name)) {
|
||||
pr_err("%s: unable to request mem region\n", np->full_name);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
iobase = ioremap(r.start, resource_size(&r));
|
||||
if (!iobase) {
|
||||
pr_err("%s: unable to map resource\n", np->full_name);
|
||||
ret = -ENOMEM;
|
||||
goto err_release;
|
||||
}
|
||||
|
||||
/*
|
||||
* DW IP can be configured to allow 2-64 irqs. We can determine
|
||||
* the number of irqs supported by writing into enable register
|
||||
* and look for bits not set, as corresponding flip-flops will
|
||||
* have been removed by sythesis tool.
|
||||
*/
|
||||
|
||||
/* mask and enable all interrupts */
|
||||
writel(~0, iobase + APB_INT_MASK_L);
|
||||
writel(~0, iobase + APB_INT_MASK_H);
|
||||
writel(~0, iobase + APB_INT_ENABLE_L);
|
||||
writel(~0, iobase + APB_INT_ENABLE_H);
|
||||
|
||||
reg = readl(iobase + APB_INT_ENABLE_H);
|
||||
if (reg)
|
||||
nrirqs = 32 + fls(reg);
|
||||
else
|
||||
nrirqs = fls(readl(iobase + APB_INT_ENABLE_L));
|
||||
|
||||
domain = irq_domain_add_linear(np, nrirqs,
|
||||
&irq_generic_chip_ops, NULL);
|
||||
if (!domain) {
|
||||
pr_err("%s: unable to add irq domain\n", np->full_name);
|
||||
ret = -ENOMEM;
|
||||
goto err_unmap;
|
||||
}
|
||||
|
||||
ret = irq_alloc_domain_generic_chips(domain, 32, (nrirqs > 32) ? 2 : 1,
|
||||
np->name, handle_level_irq, clr, 0,
|
||||
IRQ_GC_INIT_MASK_CACHE);
|
||||
if (ret) {
|
||||
pr_err("%s: unable to alloc irq domain gc\n", np->full_name);
|
||||
goto err_unmap;
|
||||
}
|
||||
|
||||
gc = irq_get_domain_generic_chip(domain, 0);
|
||||
gc->private = domain;
|
||||
gc->reg_base = iobase;
|
||||
|
||||
gc->chip_types[0].regs.mask = APB_INT_MASK_L;
|
||||
gc->chip_types[0].chip.irq_mask = irq_gc_mask_set_bit;
|
||||
gc->chip_types[0].chip.irq_unmask = irq_gc_mask_clr_bit;
|
||||
|
||||
if (nrirqs > 32) {
|
||||
gc->chip_types[1].regs.mask = APB_INT_MASK_H;
|
||||
gc->chip_types[1].chip.irq_mask = irq_gc_mask_set_bit;
|
||||
gc->chip_types[1].chip.irq_unmask = irq_gc_mask_clr_bit;
|
||||
}
|
||||
|
||||
irq_set_handler_data(irq, gc);
|
||||
irq_set_chained_handler(irq, dw_apb_ictl_handler);
|
||||
|
||||
return 0;
|
||||
|
||||
err_unmap:
|
||||
iounmap(iobase);
|
||||
err_release:
|
||||
release_mem_region(r.start, resource_size(&r));
|
||||
return ret;
|
||||
}
|
||||
IRQCHIP_DECLARE(dw_apb_ictl,
|
||||
"snps,dw-apb-ictl", dw_apb_ictl_init);
|
118
drivers/irqchip/irq-gic-common.c
Normal file
118
drivers/irqchip/irq-gic-common.c
Normal file
|
@ -0,0 +1,118 @@
|
|||
/*
|
||||
* Copyright (C) 2002 ARM Limited, All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/irq.h>
|
||||
#include <linux/irqchip/arm-gic.h>
|
||||
|
||||
#include "irq-gic-common.h"
|
||||
|
||||
void gic_configure_irq(unsigned int irq, unsigned int type,
|
||||
void __iomem *base, void (*sync_access)(void))
|
||||
{
|
||||
u32 enablemask = 1 << (irq % 32);
|
||||
u32 enableoff = (irq / 32) * 4;
|
||||
u32 confmask = 0x2 << ((irq % 16) * 2);
|
||||
u32 confoff = (irq / 16) * 4;
|
||||
bool enabled = false;
|
||||
u32 val;
|
||||
|
||||
/*
|
||||
* Read current configuration register, and insert the config
|
||||
* for "irq", depending on "type".
|
||||
*/
|
||||
val = readl_relaxed(base + GIC_DIST_CONFIG + confoff);
|
||||
if (type == IRQ_TYPE_LEVEL_HIGH)
|
||||
val &= ~confmask;
|
||||
else if (type == IRQ_TYPE_EDGE_RISING)
|
||||
val |= confmask;
|
||||
|
||||
/*
|
||||
* As recommended by the spec, disable the interrupt before changing
|
||||
* the configuration
|
||||
*/
|
||||
if (readl_relaxed(base + GIC_DIST_ENABLE_SET + enableoff) & enablemask) {
|
||||
writel_relaxed(enablemask, base + GIC_DIST_ENABLE_CLEAR + enableoff);
|
||||
if (sync_access)
|
||||
sync_access();
|
||||
enabled = true;
|
||||
}
|
||||
|
||||
/*
|
||||
* Write back the new configuration, and possibly re-enable
|
||||
* the interrupt.
|
||||
*/
|
||||
writel_relaxed(val, base + GIC_DIST_CONFIG + confoff);
|
||||
|
||||
if (enabled)
|
||||
writel_relaxed(enablemask, base + GIC_DIST_ENABLE_SET + enableoff);
|
||||
|
||||
if (sync_access)
|
||||
sync_access();
|
||||
}
|
||||
|
||||
void __init gic_dist_config(void __iomem *base, int gic_irqs,
|
||||
void (*sync_access)(void))
|
||||
{
|
||||
unsigned int i;
|
||||
|
||||
/*
|
||||
* Set all global interrupts to be level triggered, active low.
|
||||
*/
|
||||
for (i = 32; i < gic_irqs; i += 16)
|
||||
writel_relaxed(GICD_INT_ACTLOW_LVLTRIG,
|
||||
base + GIC_DIST_CONFIG + i / 4);
|
||||
|
||||
/*
|
||||
* Set priority on all global interrupts.
|
||||
*/
|
||||
for (i = 32; i < gic_irqs; i += 4)
|
||||
writel_relaxed(GICD_INT_DEF_PRI_X4, base + GIC_DIST_PRI + i);
|
||||
|
||||
/*
|
||||
* Disable all interrupts. Leave the PPI and SGIs alone
|
||||
* as they are enabled by redistributor registers.
|
||||
*/
|
||||
for (i = 32; i < gic_irqs; i += 32)
|
||||
writel_relaxed(GICD_INT_EN_CLR_X32,
|
||||
base + GIC_DIST_ENABLE_CLEAR + i / 8);
|
||||
|
||||
if (sync_access)
|
||||
sync_access();
|
||||
}
|
||||
|
||||
void gic_cpu_config(void __iomem *base, void (*sync_access)(void))
|
||||
{
|
||||
int i;
|
||||
|
||||
/*
|
||||
* Deal with the banked PPI and SGI interrupts - disable all
|
||||
* PPI interrupts, ensure all SGI interrupts are enabled.
|
||||
*/
|
||||
writel_relaxed(GICD_INT_EN_CLR_PPI, base + GIC_DIST_ENABLE_CLEAR);
|
||||
writel_relaxed(GICD_INT_EN_SET_SGI, base + GIC_DIST_ENABLE_SET);
|
||||
|
||||
/*
|
||||
* Set priority on PPI and SGI interrupts
|
||||
*/
|
||||
for (i = 0; i < 32; i += 4)
|
||||
writel_relaxed(GICD_INT_DEF_PRI_X4,
|
||||
base + GIC_DIST_PRI + i * 4 / 4);
|
||||
|
||||
if (sync_access)
|
||||
sync_access();
|
||||
}
|
29
drivers/irqchip/irq-gic-common.h
Normal file
29
drivers/irqchip/irq-gic-common.h
Normal file
|
@ -0,0 +1,29 @@
|
|||
/*
|
||||
* Copyright (C) 2002 ARM Limited, All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#ifndef _IRQ_GIC_COMMON_H
|
||||
#define _IRQ_GIC_COMMON_H
|
||||
|
||||
#include <linux/of.h>
|
||||
#include <linux/irqdomain.h>
|
||||
|
||||
void gic_configure_irq(unsigned int irq, unsigned int type,
|
||||
void __iomem *base, void (*sync_access)(void));
|
||||
void gic_dist_config(void __iomem *base, int gic_irqs,
|
||||
void (*sync_access)(void));
|
||||
void gic_cpu_config(void __iomem *base, void (*sync_access)(void));
|
||||
|
||||
#endif /* _IRQ_GIC_COMMON_H */
|
736
drivers/irqchip/irq-gic-v3.c
Normal file
736
drivers/irqchip/irq-gic-v3.c
Normal file
|
@ -0,0 +1,736 @@
|
|||
/*
|
||||
* Copyright (C) 2013, 2014 ARM Limited, All Rights Reserved.
|
||||
* Author: Marc Zyngier <marc.zyngier@arm.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include <linux/cpu.h>
|
||||
#include <linux/cpu_pm.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/of_address.h>
|
||||
#include <linux/of_irq.h>
|
||||
#include <linux/percpu.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
#include <linux/irqchip/arm-gic-v3.h>
|
||||
|
||||
#include <asm/cputype.h>
|
||||
#include <asm/exception.h>
|
||||
#include <asm/smp_plat.h>
|
||||
|
||||
#include "irq-gic-common.h"
|
||||
#include "irqchip.h"
|
||||
|
||||
struct gic_chip_data {
|
||||
void __iomem *dist_base;
|
||||
void __iomem **redist_base;
|
||||
void __iomem * __percpu *rdist;
|
||||
struct irq_domain *domain;
|
||||
u64 redist_stride;
|
||||
u32 redist_regions;
|
||||
unsigned int irq_nr;
|
||||
};
|
||||
|
||||
static struct gic_chip_data gic_data __read_mostly;
|
||||
|
||||
#define gic_data_rdist() (this_cpu_ptr(gic_data.rdist))
|
||||
#define gic_data_rdist_rd_base() (*gic_data_rdist())
|
||||
#define gic_data_rdist_sgi_base() (gic_data_rdist_rd_base() + SZ_64K)
|
||||
|
||||
/* Our default, arbitrary priority value. Linux only uses one anyway. */
|
||||
#define DEFAULT_PMR_VALUE 0xf0
|
||||
|
||||
static inline unsigned int gic_irq(struct irq_data *d)
|
||||
{
|
||||
return d->hwirq;
|
||||
}
|
||||
|
||||
static inline int gic_irq_in_rdist(struct irq_data *d)
|
||||
{
|
||||
return gic_irq(d) < 32;
|
||||
}
|
||||
|
||||
static inline void __iomem *gic_dist_base(struct irq_data *d)
|
||||
{
|
||||
if (gic_irq_in_rdist(d)) /* SGI+PPI -> SGI_base for this CPU */
|
||||
return gic_data_rdist_sgi_base();
|
||||
|
||||
if (d->hwirq <= 1023) /* SPI -> dist_base */
|
||||
return gic_data.dist_base;
|
||||
|
||||
if (d->hwirq >= 8192)
|
||||
BUG(); /* LPI Detected!!! */
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void gic_do_wait_for_rwp(void __iomem *base)
|
||||
{
|
||||
u32 count = 1000000; /* 1s! */
|
||||
|
||||
while (readl_relaxed(base + GICD_CTLR) & GICD_CTLR_RWP) {
|
||||
count--;
|
||||
if (!count) {
|
||||
pr_err_ratelimited("RWP timeout, gone fishing\n");
|
||||
return;
|
||||
}
|
||||
cpu_relax();
|
||||
udelay(1);
|
||||
};
|
||||
}
|
||||
|
||||
/* Wait for completion of a distributor change */
|
||||
static void gic_dist_wait_for_rwp(void)
|
||||
{
|
||||
gic_do_wait_for_rwp(gic_data.dist_base);
|
||||
}
|
||||
|
||||
/* Wait for completion of a redistributor change */
|
||||
static void gic_redist_wait_for_rwp(void)
|
||||
{
|
||||
gic_do_wait_for_rwp(gic_data_rdist_rd_base());
|
||||
}
|
||||
|
||||
/* Low level accessors */
|
||||
static u64 __maybe_unused gic_read_iar(void)
|
||||
{
|
||||
u64 irqstat;
|
||||
|
||||
asm volatile("mrs_s %0, " __stringify(ICC_IAR1_EL1) : "=r" (irqstat));
|
||||
return irqstat;
|
||||
}
|
||||
|
||||
static void __maybe_unused gic_write_pmr(u64 val)
|
||||
{
|
||||
asm volatile("msr_s " __stringify(ICC_PMR_EL1) ", %0" : : "r" (val));
|
||||
}
|
||||
|
||||
static void __maybe_unused gic_write_ctlr(u64 val)
|
||||
{
|
||||
asm volatile("msr_s " __stringify(ICC_CTLR_EL1) ", %0" : : "r" (val));
|
||||
isb();
|
||||
}
|
||||
|
||||
static void __maybe_unused gic_write_grpen1(u64 val)
|
||||
{
|
||||
asm volatile("msr_s " __stringify(ICC_GRPEN1_EL1) ", %0" : : "r" (val));
|
||||
isb();
|
||||
}
|
||||
|
||||
static void __maybe_unused gic_write_sgi1r(u64 val)
|
||||
{
|
||||
asm volatile("msr_s " __stringify(ICC_SGI1R_EL1) ", %0" : : "r" (val));
|
||||
}
|
||||
|
||||
static void gic_enable_sre(void)
|
||||
{
|
||||
u64 val;
|
||||
|
||||
asm volatile("mrs_s %0, " __stringify(ICC_SRE_EL1) : "=r" (val));
|
||||
val |= ICC_SRE_EL1_SRE;
|
||||
asm volatile("msr_s " __stringify(ICC_SRE_EL1) ", %0" : : "r" (val));
|
||||
isb();
|
||||
|
||||
/*
|
||||
* Need to check that the SRE bit has actually been set. If
|
||||
* not, it means that SRE is disabled at EL2. We're going to
|
||||
* die painfully, and there is nothing we can do about it.
|
||||
*
|
||||
* Kindly inform the luser.
|
||||
*/
|
||||
asm volatile("mrs_s %0, " __stringify(ICC_SRE_EL1) : "=r" (val));
|
||||
if (!(val & ICC_SRE_EL1_SRE))
|
||||
pr_err("GIC: unable to set SRE (disabled at EL2), panic ahead\n");
|
||||
}
|
||||
|
||||
static void gic_enable_redist(bool enable)
|
||||
{
|
||||
void __iomem *rbase;
|
||||
u32 count = 1000000; /* 1s! */
|
||||
u32 val;
|
||||
|
||||
rbase = gic_data_rdist_rd_base();
|
||||
|
||||
val = readl_relaxed(rbase + GICR_WAKER);
|
||||
if (enable)
|
||||
/* Wake up this CPU redistributor */
|
||||
val &= ~GICR_WAKER_ProcessorSleep;
|
||||
else
|
||||
val |= GICR_WAKER_ProcessorSleep;
|
||||
writel_relaxed(val, rbase + GICR_WAKER);
|
||||
|
||||
if (!enable) { /* Check that GICR_WAKER is writeable */
|
||||
val = readl_relaxed(rbase + GICR_WAKER);
|
||||
if (!(val & GICR_WAKER_ProcessorSleep))
|
||||
return; /* No PM support in this redistributor */
|
||||
}
|
||||
|
||||
while (count--) {
|
||||
val = readl_relaxed(rbase + GICR_WAKER);
|
||||
if (enable ^ (val & GICR_WAKER_ChildrenAsleep))
|
||||
break;
|
||||
cpu_relax();
|
||||
udelay(1);
|
||||
};
|
||||
if (!count)
|
||||
pr_err_ratelimited("redistributor failed to %s...\n",
|
||||
enable ? "wakeup" : "sleep");
|
||||
}
|
||||
|
||||
/*
|
||||
* Routines to disable, enable, EOI and route interrupts
|
||||
*/
|
||||
static void gic_poke_irq(struct irq_data *d, u32 offset)
|
||||
{
|
||||
u32 mask = 1 << (gic_irq(d) % 32);
|
||||
void (*rwp_wait)(void);
|
||||
void __iomem *base;
|
||||
|
||||
if (gic_irq_in_rdist(d)) {
|
||||
base = gic_data_rdist_sgi_base();
|
||||
rwp_wait = gic_redist_wait_for_rwp;
|
||||
} else {
|
||||
base = gic_data.dist_base;
|
||||
rwp_wait = gic_dist_wait_for_rwp;
|
||||
}
|
||||
|
||||
writel_relaxed(mask, base + offset + (gic_irq(d) / 32) * 4);
|
||||
rwp_wait();
|
||||
}
|
||||
|
||||
static void gic_mask_irq(struct irq_data *d)
|
||||
{
|
||||
gic_poke_irq(d, GICD_ICENABLER);
|
||||
}
|
||||
|
||||
static void gic_unmask_irq(struct irq_data *d)
|
||||
{
|
||||
gic_poke_irq(d, GICD_ISENABLER);
|
||||
}
|
||||
|
||||
static void gic_eoi_irq(struct irq_data *d)
|
||||
{
|
||||
gic_write_eoir(gic_irq(d));
|
||||
}
|
||||
|
||||
static int gic_set_type(struct irq_data *d, unsigned int type)
|
||||
{
|
||||
unsigned int irq = gic_irq(d);
|
||||
void (*rwp_wait)(void);
|
||||
void __iomem *base;
|
||||
|
||||
/* Interrupt configuration for SGIs can't be changed */
|
||||
if (irq < 16)
|
||||
return -EINVAL;
|
||||
|
||||
if (type != IRQ_TYPE_LEVEL_HIGH && type != IRQ_TYPE_EDGE_RISING)
|
||||
return -EINVAL;
|
||||
|
||||
if (gic_irq_in_rdist(d)) {
|
||||
base = gic_data_rdist_sgi_base();
|
||||
rwp_wait = gic_redist_wait_for_rwp;
|
||||
} else {
|
||||
base = gic_data.dist_base;
|
||||
rwp_wait = gic_dist_wait_for_rwp;
|
||||
}
|
||||
|
||||
gic_configure_irq(irq, type, base, rwp_wait);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static u64 gic_mpidr_to_affinity(u64 mpidr)
|
||||
{
|
||||
u64 aff;
|
||||
|
||||
aff = (MPIDR_AFFINITY_LEVEL(mpidr, 3) << 32 |
|
||||
MPIDR_AFFINITY_LEVEL(mpidr, 2) << 16 |
|
||||
MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8 |
|
||||
MPIDR_AFFINITY_LEVEL(mpidr, 0));
|
||||
|
||||
return aff;
|
||||
}
|
||||
|
||||
static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs)
|
||||
{
|
||||
u64 irqnr;
|
||||
|
||||
do {
|
||||
irqnr = gic_read_iar();
|
||||
|
||||
if (likely(irqnr > 15 && irqnr < 1020)) {
|
||||
int err;
|
||||
err = handle_domain_irq(gic_data.domain, irqnr, regs);
|
||||
if (err) {
|
||||
WARN_ONCE(true, "Unexpected SPI received!\n");
|
||||
gic_write_eoir(irqnr);
|
||||
}
|
||||
continue;
|
||||
}
|
||||
if (irqnr < 16) {
|
||||
gic_write_eoir(irqnr);
|
||||
#ifdef CONFIG_SMP
|
||||
handle_IPI(irqnr, regs);
|
||||
#else
|
||||
WARN_ONCE(true, "Unexpected SGI received!\n");
|
||||
#endif
|
||||
continue;
|
||||
}
|
||||
} while (irqnr != ICC_IAR1_EL1_SPURIOUS);
|
||||
}
|
||||
|
||||
static void __init gic_dist_init(void)
|
||||
{
|
||||
unsigned int i;
|
||||
u64 affinity;
|
||||
void __iomem *base = gic_data.dist_base;
|
||||
|
||||
/* Disable the distributor */
|
||||
writel_relaxed(0, base + GICD_CTLR);
|
||||
gic_dist_wait_for_rwp();
|
||||
|
||||
gic_dist_config(base, gic_data.irq_nr, gic_dist_wait_for_rwp);
|
||||
|
||||
/* Enable distributor with ARE, Group1 */
|
||||
writel_relaxed(GICD_CTLR_ARE_NS | GICD_CTLR_ENABLE_G1A | GICD_CTLR_ENABLE_G1,
|
||||
base + GICD_CTLR);
|
||||
|
||||
/*
|
||||
* Set all global interrupts to the boot CPU only. ARE must be
|
||||
* enabled.
|
||||
*/
|
||||
affinity = gic_mpidr_to_affinity(cpu_logical_map(smp_processor_id()));
|
||||
for (i = 32; i < gic_data.irq_nr; i++)
|
||||
writeq_relaxed(affinity, base + GICD_IROUTER + i * 8);
|
||||
}
|
||||
|
||||
static int gic_populate_rdist(void)
|
||||
{
|
||||
u64 mpidr = cpu_logical_map(smp_processor_id());
|
||||
u64 typer;
|
||||
u32 aff;
|
||||
int i;
|
||||
|
||||
/*
|
||||
* Convert affinity to a 32bit value that can be matched to
|
||||
* GICR_TYPER bits [63:32].
|
||||
*/
|
||||
aff = (MPIDR_AFFINITY_LEVEL(mpidr, 3) << 24 |
|
||||
MPIDR_AFFINITY_LEVEL(mpidr, 2) << 16 |
|
||||
MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8 |
|
||||
MPIDR_AFFINITY_LEVEL(mpidr, 0));
|
||||
|
||||
for (i = 0; i < gic_data.redist_regions; i++) {
|
||||
void __iomem *ptr = gic_data.redist_base[i];
|
||||
u32 reg;
|
||||
|
||||
reg = readl_relaxed(ptr + GICR_PIDR2) & GIC_PIDR2_ARCH_MASK;
|
||||
if (reg != GIC_PIDR2_ARCH_GICv3 &&
|
||||
reg != GIC_PIDR2_ARCH_GICv4) { /* We're in trouble... */
|
||||
pr_warn("No redistributor present @%p\n", ptr);
|
||||
break;
|
||||
}
|
||||
|
||||
do {
|
||||
typer = readq_relaxed(ptr + GICR_TYPER);
|
||||
if ((typer >> 32) == aff) {
|
||||
gic_data_rdist_rd_base() = ptr;
|
||||
pr_info("CPU%d: found redistributor %llx @%p\n",
|
||||
smp_processor_id(),
|
||||
(unsigned long long)mpidr, ptr);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (gic_data.redist_stride) {
|
||||
ptr += gic_data.redist_stride;
|
||||
} else {
|
||||
ptr += SZ_64K * 2; /* Skip RD_base + SGI_base */
|
||||
if (typer & GICR_TYPER_VLPIS)
|
||||
ptr += SZ_64K * 2; /* Skip VLPI_base + reserved page */
|
||||
}
|
||||
} while (!(typer & GICR_TYPER_LAST));
|
||||
}
|
||||
|
||||
/* We couldn't even deal with ourselves... */
|
||||
WARN(true, "CPU%d: mpidr %llx has no re-distributor!\n",
|
||||
smp_processor_id(), (unsigned long long)mpidr);
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
static void gic_cpu_sys_reg_init(void)
|
||||
{
|
||||
/* Enable system registers */
|
||||
gic_enable_sre();
|
||||
|
||||
/* Set priority mask register */
|
||||
gic_write_pmr(DEFAULT_PMR_VALUE);
|
||||
|
||||
/* EOI deactivates interrupt too (mode 0) */
|
||||
gic_write_ctlr(ICC_CTLR_EL1_EOImode_drop_dir);
|
||||
|
||||
/* ... and let's hit the road... */
|
||||
gic_write_grpen1(1);
|
||||
}
|
||||
|
||||
static void gic_cpu_init(void)
|
||||
{
|
||||
void __iomem *rbase;
|
||||
|
||||
/* Register ourselves with the rest of the world */
|
||||
if (gic_populate_rdist())
|
||||
return;
|
||||
|
||||
gic_enable_redist(true);
|
||||
|
||||
rbase = gic_data_rdist_sgi_base();
|
||||
|
||||
gic_cpu_config(rbase, gic_redist_wait_for_rwp);
|
||||
|
||||
/* initialise system registers */
|
||||
gic_cpu_sys_reg_init();
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
static int gic_peek_irq(struct irq_data *d, u32 offset)
|
||||
{
|
||||
u32 mask = 1 << (gic_irq(d) % 32);
|
||||
void __iomem *base;
|
||||
|
||||
if (gic_irq_in_rdist(d))
|
||||
base = gic_data_rdist_sgi_base();
|
||||
else
|
||||
base = gic_data.dist_base;
|
||||
|
||||
return !!(readl_relaxed(base + offset + (gic_irq(d) / 32) * 4) & mask);
|
||||
}
|
||||
|
||||
static int gic_secondary_init(struct notifier_block *nfb,
|
||||
unsigned long action, void *hcpu)
|
||||
{
|
||||
if (action == CPU_STARTING || action == CPU_STARTING_FROZEN)
|
||||
gic_cpu_init();
|
||||
return NOTIFY_OK;
|
||||
}
|
||||
|
||||
/*
|
||||
* Notifier for enabling the GIC CPU interface. Set an arbitrarily high
|
||||
* priority because the GIC needs to be up before the ARM generic timers.
|
||||
*/
|
||||
static struct notifier_block gic_cpu_notifier = {
|
||||
.notifier_call = gic_secondary_init,
|
||||
.priority = 100,
|
||||
};
|
||||
|
||||
static u16 gic_compute_target_list(int *base_cpu, const struct cpumask *mask,
|
||||
u64 cluster_id)
|
||||
{
|
||||
int cpu = *base_cpu;
|
||||
u64 mpidr = cpu_logical_map(cpu);
|
||||
u16 tlist = 0;
|
||||
|
||||
while (cpu < nr_cpu_ids) {
|
||||
/*
|
||||
* If we ever get a cluster of more than 16 CPUs, just
|
||||
* scream and skip that CPU.
|
||||
*/
|
||||
if (WARN_ON((mpidr & 0xff) >= 16))
|
||||
goto out;
|
||||
|
||||
tlist |= 1 << (mpidr & 0xf);
|
||||
|
||||
cpu = cpumask_next(cpu, mask);
|
||||
if (cpu == nr_cpu_ids)
|
||||
goto out;
|
||||
|
||||
mpidr = cpu_logical_map(cpu);
|
||||
|
||||
if (cluster_id != (mpidr & ~0xffUL)) {
|
||||
cpu--;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
out:
|
||||
*base_cpu = cpu;
|
||||
return tlist;
|
||||
}
|
||||
|
||||
static void gic_send_sgi(u64 cluster_id, u16 tlist, unsigned int irq)
|
||||
{
|
||||
u64 val;
|
||||
|
||||
val = (MPIDR_AFFINITY_LEVEL(cluster_id, 3) << 48 |
|
||||
MPIDR_AFFINITY_LEVEL(cluster_id, 2) << 32 |
|
||||
irq << 24 |
|
||||
MPIDR_AFFINITY_LEVEL(cluster_id, 1) << 16 |
|
||||
tlist);
|
||||
|
||||
pr_debug("CPU%d: ICC_SGI1R_EL1 %llx\n", smp_processor_id(), val);
|
||||
gic_write_sgi1r(val);
|
||||
}
|
||||
|
||||
static void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
|
||||
{
|
||||
int cpu;
|
||||
|
||||
if (WARN_ON(irq >= 16))
|
||||
return;
|
||||
|
||||
/*
|
||||
* Ensure that stores to Normal memory are visible to the
|
||||
* other CPUs before issuing the IPI.
|
||||
*/
|
||||
smp_wmb();
|
||||
|
||||
for_each_cpu_mask(cpu, *mask) {
|
||||
u64 cluster_id = cpu_logical_map(cpu) & ~0xffUL;
|
||||
u16 tlist;
|
||||
|
||||
tlist = gic_compute_target_list(&cpu, mask, cluster_id);
|
||||
gic_send_sgi(cluster_id, tlist, irq);
|
||||
}
|
||||
|
||||
/* Force the above writes to ICC_SGI1R_EL1 to be executed */
|
||||
isb();
|
||||
}
|
||||
|
||||
static void gic_smp_init(void)
|
||||
{
|
||||
set_smp_cross_call(gic_raise_softirq);
|
||||
register_cpu_notifier(&gic_cpu_notifier);
|
||||
}
|
||||
|
||||
static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
|
||||
bool force)
|
||||
{
|
||||
unsigned int cpu = cpumask_any_and(mask_val, cpu_online_mask);
|
||||
void __iomem *reg;
|
||||
int enabled;
|
||||
u64 val;
|
||||
|
||||
if (gic_irq_in_rdist(d))
|
||||
return -EINVAL;
|
||||
|
||||
/* If interrupt was enabled, disable it first */
|
||||
enabled = gic_peek_irq(d, GICD_ISENABLER);
|
||||
if (enabled)
|
||||
gic_mask_irq(d);
|
||||
|
||||
reg = gic_dist_base(d) + GICD_IROUTER + (gic_irq(d) * 8);
|
||||
val = gic_mpidr_to_affinity(cpu_logical_map(cpu));
|
||||
|
||||
writeq_relaxed(val, reg);
|
||||
|
||||
/*
|
||||
* If the interrupt was enabled, enabled it again. Otherwise,
|
||||
* just wait for the distributor to have digested our changes.
|
||||
*/
|
||||
if (enabled)
|
||||
gic_unmask_irq(d);
|
||||
else
|
||||
gic_dist_wait_for_rwp();
|
||||
|
||||
return IRQ_SET_MASK_OK;
|
||||
}
|
||||
#else
|
||||
#define gic_set_affinity NULL
|
||||
#define gic_smp_init() do { } while(0)
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_CPU_PM
|
||||
static int gic_cpu_pm_notifier(struct notifier_block *self,
|
||||
unsigned long cmd, void *v)
|
||||
{
|
||||
if (cmd == CPU_PM_EXIT) {
|
||||
gic_enable_redist(true);
|
||||
gic_cpu_sys_reg_init();
|
||||
} else if (cmd == CPU_PM_ENTER) {
|
||||
gic_write_grpen1(0);
|
||||
gic_enable_redist(false);
|
||||
}
|
||||
return NOTIFY_OK;
|
||||
}
|
||||
|
||||
static struct notifier_block gic_cpu_pm_notifier_block = {
|
||||
.notifier_call = gic_cpu_pm_notifier,
|
||||
};
|
||||
|
||||
static void gic_cpu_pm_init(void)
|
||||
{
|
||||
cpu_pm_register_notifier(&gic_cpu_pm_notifier_block);
|
||||
}
|
||||
|
||||
#else
|
||||
static inline void gic_cpu_pm_init(void) { }
|
||||
#endif /* CONFIG_CPU_PM */
|
||||
|
||||
static struct irq_chip gic_chip = {
|
||||
.name = "GICv3",
|
||||
.irq_mask = gic_mask_irq,
|
||||
.irq_unmask = gic_unmask_irq,
|
||||
.irq_eoi = gic_eoi_irq,
|
||||
.irq_set_type = gic_set_type,
|
||||
.irq_set_affinity = gic_set_affinity,
|
||||
};
|
||||
|
||||
static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq,
|
||||
irq_hw_number_t hw)
|
||||
{
|
||||
/* SGIs are private to the core kernel */
|
||||
if (hw < 16)
|
||||
return -EPERM;
|
||||
/* PPIs */
|
||||
if (hw < 32) {
|
||||
irq_set_percpu_devid(irq);
|
||||
irq_set_chip_and_handler(irq, &gic_chip,
|
||||
handle_percpu_devid_irq);
|
||||
set_irq_flags(irq, IRQF_VALID | IRQF_NOAUTOEN);
|
||||
}
|
||||
/* SPIs */
|
||||
if (hw >= 32 && hw < gic_data.irq_nr) {
|
||||
irq_set_chip_and_handler(irq, &gic_chip,
|
||||
handle_fasteoi_irq);
|
||||
set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
|
||||
}
|
||||
irq_set_chip_data(irq, d->host_data);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int gic_irq_domain_xlate(struct irq_domain *d,
|
||||
struct device_node *controller,
|
||||
const u32 *intspec, unsigned int intsize,
|
||||
unsigned long *out_hwirq, unsigned int *out_type)
|
||||
{
|
||||
if (d->of_node != controller)
|
||||
return -EINVAL;
|
||||
if (intsize < 3)
|
||||
return -EINVAL;
|
||||
|
||||
switch(intspec[0]) {
|
||||
case 0: /* SPI */
|
||||
*out_hwirq = intspec[1] + 32;
|
||||
break;
|
||||
case 1: /* PPI */
|
||||
*out_hwirq = intspec[1] + 16;
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
*out_type = intspec[2] & IRQ_TYPE_SENSE_MASK;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct irq_domain_ops gic_irq_domain_ops = {
|
||||
.map = gic_irq_domain_map,
|
||||
.xlate = gic_irq_domain_xlate,
|
||||
};
|
||||
|
||||
static int __init gic_of_init(struct device_node *node, struct device_node *parent)
|
||||
{
|
||||
void __iomem *dist_base;
|
||||
void __iomem **redist_base;
|
||||
u64 redist_stride;
|
||||
u32 redist_regions;
|
||||
u32 reg;
|
||||
int gic_irqs;
|
||||
int err;
|
||||
int i;
|
||||
|
||||
dist_base = of_iomap(node, 0);
|
||||
if (!dist_base) {
|
||||
pr_err("%s: unable to map gic dist registers\n",
|
||||
node->full_name);
|
||||
return -ENXIO;
|
||||
}
|
||||
|
||||
reg = readl_relaxed(dist_base + GICD_PIDR2) & GIC_PIDR2_ARCH_MASK;
|
||||
if (reg != GIC_PIDR2_ARCH_GICv3 && reg != GIC_PIDR2_ARCH_GICv4) {
|
||||
pr_err("%s: no distributor detected, giving up\n",
|
||||
node->full_name);
|
||||
err = -ENODEV;
|
||||
goto out_unmap_dist;
|
||||
}
|
||||
|
||||
if (of_property_read_u32(node, "#redistributor-regions", &redist_regions))
|
||||
redist_regions = 1;
|
||||
|
||||
redist_base = kzalloc(sizeof(*redist_base) * redist_regions, GFP_KERNEL);
|
||||
if (!redist_base) {
|
||||
err = -ENOMEM;
|
||||
goto out_unmap_dist;
|
||||
}
|
||||
|
||||
for (i = 0; i < redist_regions; i++) {
|
||||
redist_base[i] = of_iomap(node, 1 + i);
|
||||
if (!redist_base[i]) {
|
||||
pr_err("%s: couldn't map region %d\n",
|
||||
node->full_name, i);
|
||||
err = -ENODEV;
|
||||
goto out_unmap_rdist;
|
||||
}
|
||||
}
|
||||
|
||||
if (of_property_read_u64(node, "redistributor-stride", &redist_stride))
|
||||
redist_stride = 0;
|
||||
|
||||
gic_data.dist_base = dist_base;
|
||||
gic_data.redist_base = redist_base;
|
||||
gic_data.redist_regions = redist_regions;
|
||||
gic_data.redist_stride = redist_stride;
|
||||
|
||||
/*
|
||||
* Find out how many interrupts are supported.
|
||||
* The GIC only supports up to 1020 interrupt sources (SGI+PPI+SPI)
|
||||
*/
|
||||
gic_irqs = readl_relaxed(gic_data.dist_base + GICD_TYPER) & 0x1f;
|
||||
gic_irqs = (gic_irqs + 1) * 32;
|
||||
if (gic_irqs > 1020)
|
||||
gic_irqs = 1020;
|
||||
gic_data.irq_nr = gic_irqs;
|
||||
|
||||
gic_data.domain = irq_domain_add_tree(node, &gic_irq_domain_ops,
|
||||
&gic_data);
|
||||
gic_data.rdist = alloc_percpu(typeof(*gic_data.rdist));
|
||||
|
||||
if (WARN_ON(!gic_data.domain) || WARN_ON(!gic_data.rdist)) {
|
||||
err = -ENOMEM;
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
set_handle_irq(gic_handle_irq);
|
||||
|
||||
gic_smp_init();
|
||||
gic_dist_init();
|
||||
gic_cpu_init();
|
||||
gic_cpu_pm_init();
|
||||
|
||||
return 0;
|
||||
|
||||
out_free:
|
||||
if (gic_data.domain)
|
||||
irq_domain_remove(gic_data.domain);
|
||||
free_percpu(gic_data.rdist);
|
||||
out_unmap_rdist:
|
||||
for (i = 0; i < redist_regions; i++)
|
||||
if (redist_base[i])
|
||||
iounmap(redist_base[i]);
|
||||
kfree(redist_base);
|
||||
out_unmap_dist:
|
||||
iounmap(dist_base);
|
||||
return err;
|
||||
}
|
||||
|
||||
IRQCHIP_DECLARE(gic_v3, "arm,gic-v3", gic_of_init);
|
1072
drivers/irqchip/irq-gic.c
Normal file
1072
drivers/irqchip/irq-gic.c
Normal file
File diff suppressed because it is too large
Load diff
424
drivers/irqchip/irq-hip04.c
Normal file
424
drivers/irqchip/irq-hip04.c
Normal file
|
@ -0,0 +1,424 @@
|
|||
/*
|
||||
* Hisilicon HiP04 INTC
|
||||
*
|
||||
* Copyright (C) 2002-2014 ARM Limited.
|
||||
* Copyright (c) 2013-2014 Hisilicon Ltd.
|
||||
* Copyright (c) 2013-2014 Linaro Ltd.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* Interrupt architecture for the HIP04 INTC:
|
||||
*
|
||||
* o There is one Interrupt Distributor, which receives interrupts
|
||||
* from system devices and sends them to the Interrupt Controllers.
|
||||
*
|
||||
* o There is one CPU Interface per CPU, which sends interrupts sent
|
||||
* by the Distributor, and interrupts generated locally, to the
|
||||
* associated CPU. The base address of the CPU interface is usually
|
||||
* aliased so that the same address points to different chips depending
|
||||
* on the CPU it is accessed from.
|
||||
*
|
||||
* Note that IRQs 0-31 are special - they are local to each CPU.
|
||||
* As such, the enable set/clear, pending set/clear and active bit
|
||||
* registers are banked per-cpu for these sources.
|
||||
*/
|
||||
|
||||
#include <linux/init.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/smp.h>
|
||||
#include <linux/cpu.h>
|
||||
#include <linux/cpu_pm.h>
|
||||
#include <linux/cpumask.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/of_address.h>
|
||||
#include <linux/of_irq.h>
|
||||
#include <linux/irqdomain.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/irqchip/arm-gic.h>
|
||||
|
||||
#include <asm/irq.h>
|
||||
#include <asm/exception.h>
|
||||
#include <asm/smp_plat.h>
|
||||
|
||||
#include "irq-gic-common.h"
|
||||
#include "irqchip.h"
|
||||
|
||||
#define HIP04_MAX_IRQS 510
|
||||
|
||||
struct hip04_irq_data {
|
||||
void __iomem *dist_base;
|
||||
void __iomem *cpu_base;
|
||||
struct irq_domain *domain;
|
||||
unsigned int nr_irqs;
|
||||
};
|
||||
|
||||
static DEFINE_RAW_SPINLOCK(irq_controller_lock);
|
||||
|
||||
/*
|
||||
* The GIC mapping of CPU interfaces does not necessarily match
|
||||
* the logical CPU numbering. Let's use a mapping as returned
|
||||
* by the GIC itself.
|
||||
*/
|
||||
#define NR_HIP04_CPU_IF 16
|
||||
static u16 hip04_cpu_map[NR_HIP04_CPU_IF] __read_mostly;
|
||||
|
||||
static struct hip04_irq_data hip04_data __read_mostly;
|
||||
|
||||
static inline void __iomem *hip04_dist_base(struct irq_data *d)
|
||||
{
|
||||
struct hip04_irq_data *hip04_data = irq_data_get_irq_chip_data(d);
|
||||
return hip04_data->dist_base;
|
||||
}
|
||||
|
||||
static inline void __iomem *hip04_cpu_base(struct irq_data *d)
|
||||
{
|
||||
struct hip04_irq_data *hip04_data = irq_data_get_irq_chip_data(d);
|
||||
return hip04_data->cpu_base;
|
||||
}
|
||||
|
||||
static inline unsigned int hip04_irq(struct irq_data *d)
|
||||
{
|
||||
return d->hwirq;
|
||||
}
|
||||
|
||||
/*
|
||||
* Routines to acknowledge, disable and enable interrupts
|
||||
*/
|
||||
static void hip04_mask_irq(struct irq_data *d)
|
||||
{
|
||||
u32 mask = 1 << (hip04_irq(d) % 32);
|
||||
|
||||
raw_spin_lock(&irq_controller_lock);
|
||||
writel_relaxed(mask, hip04_dist_base(d) + GIC_DIST_ENABLE_CLEAR +
|
||||
(hip04_irq(d) / 32) * 4);
|
||||
raw_spin_unlock(&irq_controller_lock);
|
||||
}
|
||||
|
||||
static void hip04_unmask_irq(struct irq_data *d)
|
||||
{
|
||||
u32 mask = 1 << (hip04_irq(d) % 32);
|
||||
|
||||
raw_spin_lock(&irq_controller_lock);
|
||||
writel_relaxed(mask, hip04_dist_base(d) + GIC_DIST_ENABLE_SET +
|
||||
(hip04_irq(d) / 32) * 4);
|
||||
raw_spin_unlock(&irq_controller_lock);
|
||||
}
|
||||
|
||||
static void hip04_eoi_irq(struct irq_data *d)
|
||||
{
|
||||
writel_relaxed(hip04_irq(d), hip04_cpu_base(d) + GIC_CPU_EOI);
|
||||
}
|
||||
|
||||
static int hip04_irq_set_type(struct irq_data *d, unsigned int type)
|
||||
{
|
||||
void __iomem *base = hip04_dist_base(d);
|
||||
unsigned int irq = hip04_irq(d);
|
||||
|
||||
/* Interrupt configuration for SGIs can't be changed */
|
||||
if (irq < 16)
|
||||
return -EINVAL;
|
||||
|
||||
if (type != IRQ_TYPE_LEVEL_HIGH && type != IRQ_TYPE_EDGE_RISING)
|
||||
return -EINVAL;
|
||||
|
||||
raw_spin_lock(&irq_controller_lock);
|
||||
|
||||
gic_configure_irq(irq, type, base, NULL);
|
||||
|
||||
raw_spin_unlock(&irq_controller_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
static int hip04_irq_set_affinity(struct irq_data *d,
|
||||
const struct cpumask *mask_val,
|
||||
bool force)
|
||||
{
|
||||
void __iomem *reg;
|
||||
unsigned int cpu, shift = (hip04_irq(d) % 2) * 16;
|
||||
u32 val, mask, bit;
|
||||
|
||||
if (!force)
|
||||
cpu = cpumask_any_and(mask_val, cpu_online_mask);
|
||||
else
|
||||
cpu = cpumask_first(mask_val);
|
||||
|
||||
if (cpu >= NR_HIP04_CPU_IF || cpu >= nr_cpu_ids)
|
||||
return -EINVAL;
|
||||
|
||||
raw_spin_lock(&irq_controller_lock);
|
||||
reg = hip04_dist_base(d) + GIC_DIST_TARGET + ((hip04_irq(d) * 2) & ~3);
|
||||
mask = 0xffff << shift;
|
||||
bit = hip04_cpu_map[cpu] << shift;
|
||||
val = readl_relaxed(reg) & ~mask;
|
||||
writel_relaxed(val | bit, reg);
|
||||
raw_spin_unlock(&irq_controller_lock);
|
||||
|
||||
return IRQ_SET_MASK_OK;
|
||||
}
|
||||
#endif
|
||||
|
||||
static void __exception_irq_entry hip04_handle_irq(struct pt_regs *regs)
|
||||
{
|
||||
u32 irqstat, irqnr;
|
||||
void __iomem *cpu_base = hip04_data.cpu_base;
|
||||
|
||||
do {
|
||||
irqstat = readl_relaxed(cpu_base + GIC_CPU_INTACK);
|
||||
irqnr = irqstat & GICC_IAR_INT_ID_MASK;
|
||||
|
||||
if (likely(irqnr > 15 && irqnr <= HIP04_MAX_IRQS)) {
|
||||
irqnr = irq_find_mapping(hip04_data.domain, irqnr);
|
||||
handle_IRQ(irqnr, regs);
|
||||
continue;
|
||||
}
|
||||
if (irqnr < 16) {
|
||||
writel_relaxed(irqstat, cpu_base + GIC_CPU_EOI);
|
||||
#ifdef CONFIG_SMP
|
||||
handle_IPI(irqnr, regs);
|
||||
#endif
|
||||
continue;
|
||||
}
|
||||
break;
|
||||
} while (1);
|
||||
}
|
||||
|
||||
static struct irq_chip hip04_irq_chip = {
|
||||
.name = "HIP04 INTC",
|
||||
.irq_mask = hip04_mask_irq,
|
||||
.irq_unmask = hip04_unmask_irq,
|
||||
.irq_eoi = hip04_eoi_irq,
|
||||
.irq_set_type = hip04_irq_set_type,
|
||||
#ifdef CONFIG_SMP
|
||||
.irq_set_affinity = hip04_irq_set_affinity,
|
||||
#endif
|
||||
};
|
||||
|
||||
static u16 hip04_get_cpumask(struct hip04_irq_data *intc)
|
||||
{
|
||||
void __iomem *base = intc->dist_base;
|
||||
u32 mask, i;
|
||||
|
||||
for (i = mask = 0; i < 32; i += 2) {
|
||||
mask = readl_relaxed(base + GIC_DIST_TARGET + i * 2);
|
||||
mask |= mask >> 16;
|
||||
if (mask)
|
||||
break;
|
||||
}
|
||||
|
||||
if (!mask)
|
||||
pr_crit("GIC CPU mask not found - kernel will fail to boot.\n");
|
||||
|
||||
return mask;
|
||||
}
|
||||
|
||||
static void __init hip04_irq_dist_init(struct hip04_irq_data *intc)
|
||||
{
|
||||
unsigned int i;
|
||||
u32 cpumask;
|
||||
unsigned int nr_irqs = intc->nr_irqs;
|
||||
void __iomem *base = intc->dist_base;
|
||||
|
||||
writel_relaxed(0, base + GIC_DIST_CTRL);
|
||||
|
||||
/*
|
||||
* Set all global interrupts to this CPU only.
|
||||
*/
|
||||
cpumask = hip04_get_cpumask(intc);
|
||||
cpumask |= cpumask << 16;
|
||||
for (i = 32; i < nr_irqs; i += 2)
|
||||
writel_relaxed(cpumask, base + GIC_DIST_TARGET + ((i * 2) & ~3));
|
||||
|
||||
gic_dist_config(base, nr_irqs, NULL);
|
||||
|
||||
writel_relaxed(1, base + GIC_DIST_CTRL);
|
||||
}
|
||||
|
||||
static void hip04_irq_cpu_init(struct hip04_irq_data *intc)
|
||||
{
|
||||
void __iomem *dist_base = intc->dist_base;
|
||||
void __iomem *base = intc->cpu_base;
|
||||
unsigned int cpu_mask, cpu = smp_processor_id();
|
||||
int i;
|
||||
|
||||
/*
|
||||
* Get what the GIC says our CPU mask is.
|
||||
*/
|
||||
BUG_ON(cpu >= NR_HIP04_CPU_IF);
|
||||
cpu_mask = hip04_get_cpumask(intc);
|
||||
hip04_cpu_map[cpu] = cpu_mask;
|
||||
|
||||
/*
|
||||
* Clear our mask from the other map entries in case they're
|
||||
* still undefined.
|
||||
*/
|
||||
for (i = 0; i < NR_HIP04_CPU_IF; i++)
|
||||
if (i != cpu)
|
||||
hip04_cpu_map[i] &= ~cpu_mask;
|
||||
|
||||
gic_cpu_config(dist_base, NULL);
|
||||
|
||||
writel_relaxed(0xf0, base + GIC_CPU_PRIMASK);
|
||||
writel_relaxed(1, base + GIC_CPU_CTRL);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
static void hip04_raise_softirq(const struct cpumask *mask, unsigned int irq)
|
||||
{
|
||||
int cpu;
|
||||
unsigned long flags, map = 0;
|
||||
|
||||
raw_spin_lock_irqsave(&irq_controller_lock, flags);
|
||||
|
||||
/* Convert our logical CPU mask into a physical one. */
|
||||
for_each_cpu(cpu, mask)
|
||||
map |= hip04_cpu_map[cpu];
|
||||
|
||||
/*
|
||||
* Ensure that stores to Normal memory are visible to the
|
||||
* other CPUs before they observe us issuing the IPI.
|
||||
*/
|
||||
dmb(ishst);
|
||||
|
||||
/* this always happens on GIC0 */
|
||||
writel_relaxed(map << 8 | irq, hip04_data.dist_base + GIC_DIST_SOFTINT);
|
||||
|
||||
raw_spin_unlock_irqrestore(&irq_controller_lock, flags);
|
||||
}
|
||||
#endif
|
||||
|
||||
static int hip04_irq_domain_map(struct irq_domain *d, unsigned int irq,
|
||||
irq_hw_number_t hw)
|
||||
{
|
||||
if (hw < 32) {
|
||||
irq_set_percpu_devid(irq);
|
||||
irq_set_chip_and_handler(irq, &hip04_irq_chip,
|
||||
handle_percpu_devid_irq);
|
||||
set_irq_flags(irq, IRQF_VALID | IRQF_NOAUTOEN);
|
||||
} else {
|
||||
irq_set_chip_and_handler(irq, &hip04_irq_chip,
|
||||
handle_fasteoi_irq);
|
||||
set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
|
||||
}
|
||||
irq_set_chip_data(irq, d->host_data);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int hip04_irq_domain_xlate(struct irq_domain *d,
|
||||
struct device_node *controller,
|
||||
const u32 *intspec, unsigned int intsize,
|
||||
unsigned long *out_hwirq,
|
||||
unsigned int *out_type)
|
||||
{
|
||||
unsigned long ret = 0;
|
||||
|
||||
if (d->of_node != controller)
|
||||
return -EINVAL;
|
||||
if (intsize < 3)
|
||||
return -EINVAL;
|
||||
|
||||
/* Get the interrupt number and add 16 to skip over SGIs */
|
||||
*out_hwirq = intspec[1] + 16;
|
||||
|
||||
/* For SPIs, we need to add 16 more to get the irq ID number */
|
||||
if (!intspec[0])
|
||||
*out_hwirq += 16;
|
||||
|
||||
*out_type = intspec[2] & IRQ_TYPE_SENSE_MASK;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
static int hip04_irq_secondary_init(struct notifier_block *nfb,
|
||||
unsigned long action,
|
||||
void *hcpu)
|
||||
{
|
||||
if (action == CPU_STARTING || action == CPU_STARTING_FROZEN)
|
||||
hip04_irq_cpu_init(&hip04_data);
|
||||
return NOTIFY_OK;
|
||||
}
|
||||
|
||||
/*
|
||||
* Notifier for enabling the INTC CPU interface. Set an arbitrarily high
|
||||
* priority because the GIC needs to be up before the ARM generic timers.
|
||||
*/
|
||||
static struct notifier_block hip04_irq_cpu_notifier = {
|
||||
.notifier_call = hip04_irq_secondary_init,
|
||||
.priority = 100,
|
||||
};
|
||||
#endif
|
||||
|
||||
static const struct irq_domain_ops hip04_irq_domain_ops = {
|
||||
.map = hip04_irq_domain_map,
|
||||
.xlate = hip04_irq_domain_xlate,
|
||||
};
|
||||
|
||||
static int __init
|
||||
hip04_of_init(struct device_node *node, struct device_node *parent)
|
||||
{
|
||||
irq_hw_number_t hwirq_base = 16;
|
||||
int nr_irqs, irq_base, i;
|
||||
|
||||
if (WARN_ON(!node))
|
||||
return -ENODEV;
|
||||
|
||||
hip04_data.dist_base = of_iomap(node, 0);
|
||||
WARN(!hip04_data.dist_base, "fail to map hip04 intc dist registers\n");
|
||||
|
||||
hip04_data.cpu_base = of_iomap(node, 1);
|
||||
WARN(!hip04_data.cpu_base, "unable to map hip04 intc cpu registers\n");
|
||||
|
||||
/*
|
||||
* Initialize the CPU interface map to all CPUs.
|
||||
* It will be refined as each CPU probes its ID.
|
||||
*/
|
||||
for (i = 0; i < NR_HIP04_CPU_IF; i++)
|
||||
hip04_cpu_map[i] = 0xff;
|
||||
|
||||
/*
|
||||
* Find out how many interrupts are supported.
|
||||
* The HIP04 INTC only supports up to 510 interrupt sources.
|
||||
*/
|
||||
nr_irqs = readl_relaxed(hip04_data.dist_base + GIC_DIST_CTR) & 0x1f;
|
||||
nr_irqs = (nr_irqs + 1) * 32;
|
||||
if (nr_irqs > HIP04_MAX_IRQS)
|
||||
nr_irqs = HIP04_MAX_IRQS;
|
||||
hip04_data.nr_irqs = nr_irqs;
|
||||
|
||||
nr_irqs -= hwirq_base; /* calculate # of irqs to allocate */
|
||||
|
||||
irq_base = irq_alloc_descs(-1, hwirq_base, nr_irqs, numa_node_id());
|
||||
if (IS_ERR_VALUE(irq_base)) {
|
||||
pr_err("failed to allocate IRQ numbers\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
hip04_data.domain = irq_domain_add_legacy(node, nr_irqs, irq_base,
|
||||
hwirq_base,
|
||||
&hip04_irq_domain_ops,
|
||||
&hip04_data);
|
||||
|
||||
if (WARN_ON(!hip04_data.domain))
|
||||
return -EINVAL;
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
set_smp_cross_call(hip04_raise_softirq);
|
||||
register_cpu_notifier(&hip04_irq_cpu_notifier);
|
||||
#endif
|
||||
set_handle_irq(hip04_handle_irq);
|
||||
|
||||
hip04_irq_dist_init(&hip04_data);
|
||||
hip04_irq_cpu_init(&hip04_data);
|
||||
|
||||
return 0;
|
||||
}
|
||||
IRQCHIP_DECLARE(hip04_intc, "hisilicon,hip04-intc", hip04_of_init);
|
499
drivers/irqchip/irq-imgpdc.c
Normal file
499
drivers/irqchip/irq-imgpdc.c
Normal file
|
@ -0,0 +1,499 @@
|
|||
/*
|
||||
* IMG PowerDown Controller (PDC)
|
||||
*
|
||||
* Copyright 2010-2013 Imagination Technologies Ltd.
|
||||
*
|
||||
* Exposes the syswake and PDC peripheral wake interrupts to the system.
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/bitops.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/irqdomain.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/spinlock.h>
|
||||
|
||||
/* PDC interrupt register numbers */
|
||||
|
||||
#define PDC_IRQ_STATUS 0x310
|
||||
#define PDC_IRQ_ENABLE 0x314
|
||||
#define PDC_IRQ_CLEAR 0x318
|
||||
#define PDC_IRQ_ROUTE 0x31c
|
||||
#define PDC_SYS_WAKE_BASE 0x330
|
||||
#define PDC_SYS_WAKE_STRIDE 0x8
|
||||
#define PDC_SYS_WAKE_CONFIG_BASE 0x334
|
||||
#define PDC_SYS_WAKE_CONFIG_STRIDE 0x8
|
||||
|
||||
/* PDC interrupt register field masks */
|
||||
|
||||
#define PDC_IRQ_SYS3 0x08
|
||||
#define PDC_IRQ_SYS2 0x04
|
||||
#define PDC_IRQ_SYS1 0x02
|
||||
#define PDC_IRQ_SYS0 0x01
|
||||
#define PDC_IRQ_ROUTE_WU_EN_SYS3 0x08000000
|
||||
#define PDC_IRQ_ROUTE_WU_EN_SYS2 0x04000000
|
||||
#define PDC_IRQ_ROUTE_WU_EN_SYS1 0x02000000
|
||||
#define PDC_IRQ_ROUTE_WU_EN_SYS0 0x01000000
|
||||
#define PDC_IRQ_ROUTE_WU_EN_WD 0x00040000
|
||||
#define PDC_IRQ_ROUTE_WU_EN_IR 0x00020000
|
||||
#define PDC_IRQ_ROUTE_WU_EN_RTC 0x00010000
|
||||
#define PDC_IRQ_ROUTE_EXT_EN_SYS3 0x00000800
|
||||
#define PDC_IRQ_ROUTE_EXT_EN_SYS2 0x00000400
|
||||
#define PDC_IRQ_ROUTE_EXT_EN_SYS1 0x00000200
|
||||
#define PDC_IRQ_ROUTE_EXT_EN_SYS0 0x00000100
|
||||
#define PDC_IRQ_ROUTE_EXT_EN_WD 0x00000004
|
||||
#define PDC_IRQ_ROUTE_EXT_EN_IR 0x00000002
|
||||
#define PDC_IRQ_ROUTE_EXT_EN_RTC 0x00000001
|
||||
#define PDC_SYS_WAKE_RESET 0x00000010
|
||||
#define PDC_SYS_WAKE_INT_MODE 0x0000000e
|
||||
#define PDC_SYS_WAKE_INT_MODE_SHIFT 1
|
||||
#define PDC_SYS_WAKE_PIN_VAL 0x00000001
|
||||
|
||||
/* PDC interrupt constants */
|
||||
|
||||
#define PDC_SYS_WAKE_INT_LOW 0x0
|
||||
#define PDC_SYS_WAKE_INT_HIGH 0x1
|
||||
#define PDC_SYS_WAKE_INT_DOWN 0x2
|
||||
#define PDC_SYS_WAKE_INT_UP 0x3
|
||||
#define PDC_SYS_WAKE_INT_CHANGE 0x6
|
||||
#define PDC_SYS_WAKE_INT_NONE 0x4
|
||||
|
||||
/**
|
||||
* struct pdc_intc_priv - private pdc interrupt data.
|
||||
* @nr_perips: Number of peripheral interrupt signals.
|
||||
* @nr_syswakes: Number of syswake signals.
|
||||
* @perip_irqs: List of peripheral IRQ numbers handled.
|
||||
* @syswake_irq: Shared PDC syswake IRQ number.
|
||||
* @domain: IRQ domain for PDC peripheral and syswake IRQs.
|
||||
* @pdc_base: Base of PDC registers.
|
||||
* @irq_route: Cached version of PDC_IRQ_ROUTE register.
|
||||
* @lock: Lock to protect the PDC syswake registers and the cached
|
||||
* values of those registers in this struct.
|
||||
*/
|
||||
struct pdc_intc_priv {
|
||||
unsigned int nr_perips;
|
||||
unsigned int nr_syswakes;
|
||||
unsigned int *perip_irqs;
|
||||
unsigned int syswake_irq;
|
||||
struct irq_domain *domain;
|
||||
void __iomem *pdc_base;
|
||||
|
||||
u32 irq_route;
|
||||
raw_spinlock_t lock;
|
||||
};
|
||||
|
||||
static void pdc_write(struct pdc_intc_priv *priv, unsigned int reg_offs,
|
||||
unsigned int data)
|
||||
{
|
||||
iowrite32(data, priv->pdc_base + reg_offs);
|
||||
}
|
||||
|
||||
static unsigned int pdc_read(struct pdc_intc_priv *priv,
|
||||
unsigned int reg_offs)
|
||||
{
|
||||
return ioread32(priv->pdc_base + reg_offs);
|
||||
}
|
||||
|
||||
/* Generic IRQ callbacks */
|
||||
|
||||
#define SYS0_HWIRQ 8
|
||||
|
||||
static unsigned int hwirq_is_syswake(irq_hw_number_t hw)
|
||||
{
|
||||
return hw >= SYS0_HWIRQ;
|
||||
}
|
||||
|
||||
static unsigned int hwirq_to_syswake(irq_hw_number_t hw)
|
||||
{
|
||||
return hw - SYS0_HWIRQ;
|
||||
}
|
||||
|
||||
static irq_hw_number_t syswake_to_hwirq(unsigned int syswake)
|
||||
{
|
||||
return SYS0_HWIRQ + syswake;
|
||||
}
|
||||
|
||||
static struct pdc_intc_priv *irqd_to_priv(struct irq_data *data)
|
||||
{
|
||||
return (struct pdc_intc_priv *)data->domain->host_data;
|
||||
}
|
||||
|
||||
/*
|
||||
* perip_irq_mask() and perip_irq_unmask() use IRQ_ROUTE which also contains
|
||||
* wake bits, therefore we cannot use the generic irqchip mask callbacks as they
|
||||
* cache the mask.
|
||||
*/
|
||||
|
||||
static void perip_irq_mask(struct irq_data *data)
|
||||
{
|
||||
struct pdc_intc_priv *priv = irqd_to_priv(data);
|
||||
|
||||
raw_spin_lock(&priv->lock);
|
||||
priv->irq_route &= ~data->mask;
|
||||
pdc_write(priv, PDC_IRQ_ROUTE, priv->irq_route);
|
||||
raw_spin_unlock(&priv->lock);
|
||||
}
|
||||
|
||||
static void perip_irq_unmask(struct irq_data *data)
|
||||
{
|
||||
struct pdc_intc_priv *priv = irqd_to_priv(data);
|
||||
|
||||
raw_spin_lock(&priv->lock);
|
||||
priv->irq_route |= data->mask;
|
||||
pdc_write(priv, PDC_IRQ_ROUTE, priv->irq_route);
|
||||
raw_spin_unlock(&priv->lock);
|
||||
}
|
||||
|
||||
static int syswake_irq_set_type(struct irq_data *data, unsigned int flow_type)
|
||||
{
|
||||
struct pdc_intc_priv *priv = irqd_to_priv(data);
|
||||
unsigned int syswake = hwirq_to_syswake(data->hwirq);
|
||||
unsigned int irq_mode;
|
||||
unsigned int soc_sys_wake_regoff, soc_sys_wake;
|
||||
|
||||
/* translate to syswake IRQ mode */
|
||||
switch (flow_type) {
|
||||
case IRQ_TYPE_EDGE_BOTH:
|
||||
irq_mode = PDC_SYS_WAKE_INT_CHANGE;
|
||||
break;
|
||||
case IRQ_TYPE_EDGE_RISING:
|
||||
irq_mode = PDC_SYS_WAKE_INT_UP;
|
||||
break;
|
||||
case IRQ_TYPE_EDGE_FALLING:
|
||||
irq_mode = PDC_SYS_WAKE_INT_DOWN;
|
||||
break;
|
||||
case IRQ_TYPE_LEVEL_HIGH:
|
||||
irq_mode = PDC_SYS_WAKE_INT_HIGH;
|
||||
break;
|
||||
case IRQ_TYPE_LEVEL_LOW:
|
||||
irq_mode = PDC_SYS_WAKE_INT_LOW;
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
raw_spin_lock(&priv->lock);
|
||||
|
||||
/* set the IRQ mode */
|
||||
soc_sys_wake_regoff = PDC_SYS_WAKE_BASE + syswake*PDC_SYS_WAKE_STRIDE;
|
||||
soc_sys_wake = pdc_read(priv, soc_sys_wake_regoff);
|
||||
soc_sys_wake &= ~PDC_SYS_WAKE_INT_MODE;
|
||||
soc_sys_wake |= irq_mode << PDC_SYS_WAKE_INT_MODE_SHIFT;
|
||||
pdc_write(priv, soc_sys_wake_regoff, soc_sys_wake);
|
||||
|
||||
/* and update the handler */
|
||||
irq_setup_alt_chip(data, flow_type);
|
||||
|
||||
raw_spin_unlock(&priv->lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* applies to both peripheral and syswake interrupts */
|
||||
static int pdc_irq_set_wake(struct irq_data *data, unsigned int on)
|
||||
{
|
||||
struct pdc_intc_priv *priv = irqd_to_priv(data);
|
||||
irq_hw_number_t hw = data->hwirq;
|
||||
unsigned int mask = (1 << 16) << hw;
|
||||
unsigned int dst_irq;
|
||||
|
||||
raw_spin_lock(&priv->lock);
|
||||
if (on)
|
||||
priv->irq_route |= mask;
|
||||
else
|
||||
priv->irq_route &= ~mask;
|
||||
pdc_write(priv, PDC_IRQ_ROUTE, priv->irq_route);
|
||||
raw_spin_unlock(&priv->lock);
|
||||
|
||||
/* control the destination IRQ wakeup too for standby mode */
|
||||
if (hwirq_is_syswake(hw))
|
||||
dst_irq = priv->syswake_irq;
|
||||
else
|
||||
dst_irq = priv->perip_irqs[hw];
|
||||
irq_set_irq_wake(dst_irq, on);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void pdc_intc_perip_isr(unsigned int irq, struct irq_desc *desc)
|
||||
{
|
||||
struct pdc_intc_priv *priv;
|
||||
unsigned int i, irq_no;
|
||||
|
||||
priv = (struct pdc_intc_priv *)irq_desc_get_handler_data(desc);
|
||||
|
||||
/* find the peripheral number */
|
||||
for (i = 0; i < priv->nr_perips; ++i)
|
||||
if (irq == priv->perip_irqs[i])
|
||||
goto found;
|
||||
|
||||
/* should never get here */
|
||||
return;
|
||||
found:
|
||||
|
||||
/* pass on the interrupt */
|
||||
irq_no = irq_linear_revmap(priv->domain, i);
|
||||
generic_handle_irq(irq_no);
|
||||
}
|
||||
|
||||
static void pdc_intc_syswake_isr(unsigned int irq, struct irq_desc *desc)
|
||||
{
|
||||
struct pdc_intc_priv *priv;
|
||||
unsigned int syswake, irq_no;
|
||||
unsigned int status;
|
||||
|
||||
priv = (struct pdc_intc_priv *)irq_desc_get_handler_data(desc);
|
||||
|
||||
status = pdc_read(priv, PDC_IRQ_STATUS) &
|
||||
pdc_read(priv, PDC_IRQ_ENABLE);
|
||||
status &= (1 << priv->nr_syswakes) - 1;
|
||||
|
||||
for (syswake = 0; status; status >>= 1, ++syswake) {
|
||||
/* Has this sys_wake triggered? */
|
||||
if (!(status & 1))
|
||||
continue;
|
||||
|
||||
irq_no = irq_linear_revmap(priv->domain,
|
||||
syswake_to_hwirq(syswake));
|
||||
generic_handle_irq(irq_no);
|
||||
}
|
||||
}
|
||||
|
||||
static void pdc_intc_setup(struct pdc_intc_priv *priv)
|
||||
{
|
||||
int i;
|
||||
unsigned int soc_sys_wake_regoff;
|
||||
unsigned int soc_sys_wake;
|
||||
|
||||
/*
|
||||
* Mask all syswake interrupts before routing, or we could receive an
|
||||
* interrupt before we're ready to handle it.
|
||||
*/
|
||||
pdc_write(priv, PDC_IRQ_ENABLE, 0);
|
||||
|
||||
/*
|
||||
* Enable routing of all syswakes
|
||||
* Disable all wake sources
|
||||
*/
|
||||
priv->irq_route = ((PDC_IRQ_ROUTE_EXT_EN_SYS0 << priv->nr_syswakes) -
|
||||
PDC_IRQ_ROUTE_EXT_EN_SYS0);
|
||||
pdc_write(priv, PDC_IRQ_ROUTE, priv->irq_route);
|
||||
|
||||
/* Initialise syswake IRQ */
|
||||
for (i = 0; i < priv->nr_syswakes; ++i) {
|
||||
/* set the IRQ mode to none */
|
||||
soc_sys_wake_regoff = PDC_SYS_WAKE_BASE + i*PDC_SYS_WAKE_STRIDE;
|
||||
soc_sys_wake = PDC_SYS_WAKE_INT_NONE
|
||||
<< PDC_SYS_WAKE_INT_MODE_SHIFT;
|
||||
pdc_write(priv, soc_sys_wake_regoff, soc_sys_wake);
|
||||
}
|
||||
}
|
||||
|
||||
static int pdc_intc_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct pdc_intc_priv *priv;
|
||||
struct device_node *node = pdev->dev.of_node;
|
||||
struct resource *res_regs;
|
||||
struct irq_chip_generic *gc;
|
||||
unsigned int i;
|
||||
int irq, ret;
|
||||
u32 val;
|
||||
|
||||
if (!node)
|
||||
return -ENOENT;
|
||||
|
||||
/* Get registers */
|
||||
res_regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
if (res_regs == NULL) {
|
||||
dev_err(&pdev->dev, "cannot find registers resource\n");
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
/* Allocate driver data */
|
||||
priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
|
||||
if (!priv) {
|
||||
dev_err(&pdev->dev, "cannot allocate device data\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
raw_spin_lock_init(&priv->lock);
|
||||
platform_set_drvdata(pdev, priv);
|
||||
|
||||
/* Ioremap the registers */
|
||||
priv->pdc_base = devm_ioremap(&pdev->dev, res_regs->start,
|
||||
res_regs->end - res_regs->start);
|
||||
if (!priv->pdc_base)
|
||||
return -EIO;
|
||||
|
||||
/* Get number of peripherals */
|
||||
ret = of_property_read_u32(node, "num-perips", &val);
|
||||
if (ret) {
|
||||
dev_err(&pdev->dev, "No num-perips node property found\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
if (val > SYS0_HWIRQ) {
|
||||
dev_err(&pdev->dev, "num-perips (%u) out of range\n", val);
|
||||
return -EINVAL;
|
||||
}
|
||||
priv->nr_perips = val;
|
||||
|
||||
/* Get number of syswakes */
|
||||
ret = of_property_read_u32(node, "num-syswakes", &val);
|
||||
if (ret) {
|
||||
dev_err(&pdev->dev, "No num-syswakes node property found\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
if (val > SYS0_HWIRQ) {
|
||||
dev_err(&pdev->dev, "num-syswakes (%u) out of range\n", val);
|
||||
return -EINVAL;
|
||||
}
|
||||
priv->nr_syswakes = val;
|
||||
|
||||
/* Get peripheral IRQ numbers */
|
||||
priv->perip_irqs = devm_kzalloc(&pdev->dev, 4 * priv->nr_perips,
|
||||
GFP_KERNEL);
|
||||
if (!priv->perip_irqs) {
|
||||
dev_err(&pdev->dev, "cannot allocate perip IRQ list\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
for (i = 0; i < priv->nr_perips; ++i) {
|
||||
irq = platform_get_irq(pdev, 1 + i);
|
||||
if (irq < 0) {
|
||||
dev_err(&pdev->dev, "cannot find perip IRQ #%u\n", i);
|
||||
return irq;
|
||||
}
|
||||
priv->perip_irqs[i] = irq;
|
||||
}
|
||||
/* check if too many were provided */
|
||||
if (platform_get_irq(pdev, 1 + i) >= 0) {
|
||||
dev_err(&pdev->dev, "surplus perip IRQs detected\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Get syswake IRQ number */
|
||||
irq = platform_get_irq(pdev, 0);
|
||||
if (irq < 0) {
|
||||
dev_err(&pdev->dev, "cannot find syswake IRQ\n");
|
||||
return irq;
|
||||
}
|
||||
priv->syswake_irq = irq;
|
||||
|
||||
/* Set up an IRQ domain */
|
||||
priv->domain = irq_domain_add_linear(node, 16, &irq_generic_chip_ops,
|
||||
priv);
|
||||
if (unlikely(!priv->domain)) {
|
||||
dev_err(&pdev->dev, "cannot add IRQ domain\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/*
|
||||
* Set up 2 generic irq chips with 2 chip types.
|
||||
* The first one for peripheral irqs (only 1 chip type used)
|
||||
* The second one for syswake irqs (edge and level chip types)
|
||||
*/
|
||||
ret = irq_alloc_domain_generic_chips(priv->domain, 8, 2, "pdc",
|
||||
handle_level_irq, 0, 0,
|
||||
IRQ_GC_INIT_NESTED_LOCK);
|
||||
if (ret)
|
||||
goto err_generic;
|
||||
|
||||
/* peripheral interrupt chip */
|
||||
|
||||
gc = irq_get_domain_generic_chip(priv->domain, 0);
|
||||
gc->unused = ~(BIT(priv->nr_perips) - 1);
|
||||
gc->reg_base = priv->pdc_base;
|
||||
/*
|
||||
* IRQ_ROUTE contains wake bits, so we can't use the generic versions as
|
||||
* they cache the mask
|
||||
*/
|
||||
gc->chip_types[0].regs.mask = PDC_IRQ_ROUTE;
|
||||
gc->chip_types[0].chip.irq_mask = perip_irq_mask;
|
||||
gc->chip_types[0].chip.irq_unmask = perip_irq_unmask;
|
||||
gc->chip_types[0].chip.irq_set_wake = pdc_irq_set_wake;
|
||||
|
||||
/* syswake interrupt chip */
|
||||
|
||||
gc = irq_get_domain_generic_chip(priv->domain, 8);
|
||||
gc->unused = ~(BIT(priv->nr_syswakes) - 1);
|
||||
gc->reg_base = priv->pdc_base;
|
||||
|
||||
/* edge interrupts */
|
||||
gc->chip_types[0].type = IRQ_TYPE_EDGE_BOTH;
|
||||
gc->chip_types[0].handler = handle_edge_irq;
|
||||
gc->chip_types[0].regs.ack = PDC_IRQ_CLEAR;
|
||||
gc->chip_types[0].regs.mask = PDC_IRQ_ENABLE;
|
||||
gc->chip_types[0].chip.irq_ack = irq_gc_ack_set_bit;
|
||||
gc->chip_types[0].chip.irq_mask = irq_gc_mask_clr_bit;
|
||||
gc->chip_types[0].chip.irq_unmask = irq_gc_mask_set_bit;
|
||||
gc->chip_types[0].chip.irq_set_type = syswake_irq_set_type;
|
||||
gc->chip_types[0].chip.irq_set_wake = pdc_irq_set_wake;
|
||||
/* for standby we pass on to the shared syswake IRQ */
|
||||
gc->chip_types[0].chip.flags = IRQCHIP_MASK_ON_SUSPEND;
|
||||
|
||||
/* level interrupts */
|
||||
gc->chip_types[1].type = IRQ_TYPE_LEVEL_MASK;
|
||||
gc->chip_types[1].handler = handle_level_irq;
|
||||
gc->chip_types[1].regs.ack = PDC_IRQ_CLEAR;
|
||||
gc->chip_types[1].regs.mask = PDC_IRQ_ENABLE;
|
||||
gc->chip_types[1].chip.irq_ack = irq_gc_ack_set_bit;
|
||||
gc->chip_types[1].chip.irq_mask = irq_gc_mask_clr_bit;
|
||||
gc->chip_types[1].chip.irq_unmask = irq_gc_mask_set_bit;
|
||||
gc->chip_types[1].chip.irq_set_type = syswake_irq_set_type;
|
||||
gc->chip_types[1].chip.irq_set_wake = pdc_irq_set_wake;
|
||||
/* for standby we pass on to the shared syswake IRQ */
|
||||
gc->chip_types[1].chip.flags = IRQCHIP_MASK_ON_SUSPEND;
|
||||
|
||||
/* Set up the hardware to enable interrupt routing */
|
||||
pdc_intc_setup(priv);
|
||||
|
||||
/* Setup chained handlers for the peripheral IRQs */
|
||||
for (i = 0; i < priv->nr_perips; ++i) {
|
||||
irq = priv->perip_irqs[i];
|
||||
irq_set_handler_data(irq, priv);
|
||||
irq_set_chained_handler(irq, pdc_intc_perip_isr);
|
||||
}
|
||||
|
||||
/* Setup chained handler for the syswake IRQ */
|
||||
irq_set_handler_data(priv->syswake_irq, priv);
|
||||
irq_set_chained_handler(priv->syswake_irq, pdc_intc_syswake_isr);
|
||||
|
||||
dev_info(&pdev->dev,
|
||||
"PDC IRQ controller initialised (%u perip IRQs, %u syswake IRQs)\n",
|
||||
priv->nr_perips,
|
||||
priv->nr_syswakes);
|
||||
|
||||
return 0;
|
||||
err_generic:
|
||||
irq_domain_remove(priv->domain);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int pdc_intc_remove(struct platform_device *pdev)
|
||||
{
|
||||
struct pdc_intc_priv *priv = platform_get_drvdata(pdev);
|
||||
|
||||
irq_domain_remove(priv->domain);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct of_device_id pdc_intc_match[] = {
|
||||
{ .compatible = "img,pdc-intc" },
|
||||
{}
|
||||
};
|
||||
|
||||
static struct platform_driver pdc_intc_driver = {
|
||||
.driver = {
|
||||
.name = "pdc-intc",
|
||||
.of_match_table = pdc_intc_match,
|
||||
},
|
||||
.probe = pdc_intc_probe,
|
||||
.remove = pdc_intc_remove,
|
||||
};
|
||||
|
||||
static int __init pdc_intc_init(void)
|
||||
{
|
||||
return platform_driver_register(&pdc_intc_driver);
|
||||
}
|
||||
core_initcall(pdc_intc_init);
|
232
drivers/irqchip/irq-keystone.c
Normal file
232
drivers/irqchip/irq-keystone.c
Normal file
|
@ -0,0 +1,232 @@
|
|||
/*
|
||||
* Texas Instruments Keystone IRQ controller IP driver
|
||||
*
|
||||
* Copyright (C) 2014 Texas Instruments, Inc.
|
||||
* Author: Sajesh Kumar Saran <sajesh@ti.com>
|
||||
* Grygorii Strashko <grygorii.strashko@ti.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License as
|
||||
* published by the Free Software Foundation version 2.
|
||||
*
|
||||
* This program is distributed "as is" WITHOUT ANY WARRANTY of any
|
||||
* kind, whether express or implied; without even the implied warranty
|
||||
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*/
|
||||
|
||||
#include <linux/irq.h>
|
||||
#include <linux/bitops.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/moduleparam.h>
|
||||
#include <linux/irqdomain.h>
|
||||
#include <linux/irqchip/chained_irq.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/of_platform.h>
|
||||
#include <linux/mfd/syscon.h>
|
||||
#include <linux/regmap.h>
|
||||
#include "irqchip.h"
|
||||
|
||||
|
||||
/* The source ID bits start from 4 to 31 (total 28 bits)*/
|
||||
#define BIT_OFS 4
|
||||
#define KEYSTONE_N_IRQ (32 - BIT_OFS)
|
||||
|
||||
struct keystone_irq_device {
|
||||
struct device *dev;
|
||||
struct irq_chip chip;
|
||||
u32 mask;
|
||||
int irq;
|
||||
struct irq_domain *irqd;
|
||||
struct regmap *devctrl_regs;
|
||||
u32 devctrl_offset;
|
||||
};
|
||||
|
||||
static inline u32 keystone_irq_readl(struct keystone_irq_device *kirq)
|
||||
{
|
||||
int ret;
|
||||
u32 val = 0;
|
||||
|
||||
ret = regmap_read(kirq->devctrl_regs, kirq->devctrl_offset, &val);
|
||||
if (ret < 0)
|
||||
dev_dbg(kirq->dev, "irq read failed ret(%d)\n", ret);
|
||||
return val;
|
||||
}
|
||||
|
||||
static inline void
|
||||
keystone_irq_writel(struct keystone_irq_device *kirq, u32 value)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = regmap_write(kirq->devctrl_regs, kirq->devctrl_offset, value);
|
||||
if (ret < 0)
|
||||
dev_dbg(kirq->dev, "irq write failed ret(%d)\n", ret);
|
||||
}
|
||||
|
||||
static void keystone_irq_setmask(struct irq_data *d)
|
||||
{
|
||||
struct keystone_irq_device *kirq = irq_data_get_irq_chip_data(d);
|
||||
|
||||
kirq->mask |= BIT(d->hwirq);
|
||||
dev_dbg(kirq->dev, "mask %lu [%x]\n", d->hwirq, kirq->mask);
|
||||
}
|
||||
|
||||
static void keystone_irq_unmask(struct irq_data *d)
|
||||
{
|
||||
struct keystone_irq_device *kirq = irq_data_get_irq_chip_data(d);
|
||||
|
||||
kirq->mask &= ~BIT(d->hwirq);
|
||||
dev_dbg(kirq->dev, "unmask %lu [%x]\n", d->hwirq, kirq->mask);
|
||||
}
|
||||
|
||||
static void keystone_irq_ack(struct irq_data *d)
|
||||
{
|
||||
/* nothing to do here */
|
||||
}
|
||||
|
||||
static void keystone_irq_handler(unsigned irq, struct irq_desc *desc)
|
||||
{
|
||||
struct keystone_irq_device *kirq = irq_desc_get_handler_data(desc);
|
||||
unsigned long pending;
|
||||
int src, virq;
|
||||
|
||||
dev_dbg(kirq->dev, "start irq %d\n", irq);
|
||||
|
||||
chained_irq_enter(irq_desc_get_chip(desc), desc);
|
||||
|
||||
pending = keystone_irq_readl(kirq);
|
||||
keystone_irq_writel(kirq, pending);
|
||||
|
||||
dev_dbg(kirq->dev, "pending 0x%lx, mask 0x%x\n", pending, kirq->mask);
|
||||
|
||||
pending = (pending >> BIT_OFS) & ~kirq->mask;
|
||||
|
||||
dev_dbg(kirq->dev, "pending after mask 0x%lx\n", pending);
|
||||
|
||||
for (src = 0; src < KEYSTONE_N_IRQ; src++) {
|
||||
if (BIT(src) & pending) {
|
||||
virq = irq_find_mapping(kirq->irqd, src);
|
||||
dev_dbg(kirq->dev, "dispatch bit %d, virq %d\n",
|
||||
src, virq);
|
||||
if (!virq)
|
||||
dev_warn(kirq->dev, "sporious irq detected hwirq %d, virq %d\n",
|
||||
src, virq);
|
||||
generic_handle_irq(virq);
|
||||
}
|
||||
}
|
||||
|
||||
chained_irq_exit(irq_desc_get_chip(desc), desc);
|
||||
|
||||
dev_dbg(kirq->dev, "end irq %d\n", irq);
|
||||
}
|
||||
|
||||
static int keystone_irq_map(struct irq_domain *h, unsigned int virq,
|
||||
irq_hw_number_t hw)
|
||||
{
|
||||
struct keystone_irq_device *kirq = h->host_data;
|
||||
|
||||
irq_set_chip_data(virq, kirq);
|
||||
irq_set_chip_and_handler(virq, &kirq->chip, handle_level_irq);
|
||||
set_irq_flags(virq, IRQF_VALID | IRQF_PROBE);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct irq_domain_ops keystone_irq_ops = {
|
||||
.map = keystone_irq_map,
|
||||
.xlate = irq_domain_xlate_onecell,
|
||||
};
|
||||
|
||||
static int keystone_irq_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct device *dev = &pdev->dev;
|
||||
struct device_node *np = dev->of_node;
|
||||
struct keystone_irq_device *kirq;
|
||||
int ret;
|
||||
|
||||
if (np == NULL)
|
||||
return -EINVAL;
|
||||
|
||||
kirq = devm_kzalloc(dev, sizeof(*kirq), GFP_KERNEL);
|
||||
if (!kirq)
|
||||
return -ENOMEM;
|
||||
|
||||
kirq->devctrl_regs =
|
||||
syscon_regmap_lookup_by_phandle(np, "ti,syscon-dev");
|
||||
if (IS_ERR(kirq->devctrl_regs))
|
||||
return PTR_ERR(kirq->devctrl_regs);
|
||||
|
||||
ret = of_property_read_u32_index(np, "ti,syscon-dev", 1,
|
||||
&kirq->devctrl_offset);
|
||||
if (ret) {
|
||||
dev_err(dev, "couldn't read the devctrl_offset offset!\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
kirq->irq = platform_get_irq(pdev, 0);
|
||||
if (kirq->irq < 0) {
|
||||
dev_err(dev, "no irq resource %d\n", kirq->irq);
|
||||
return kirq->irq;
|
||||
}
|
||||
|
||||
kirq->dev = dev;
|
||||
kirq->mask = ~0x0;
|
||||
kirq->chip.name = "keystone-irq";
|
||||
kirq->chip.irq_ack = keystone_irq_ack;
|
||||
kirq->chip.irq_mask = keystone_irq_setmask;
|
||||
kirq->chip.irq_unmask = keystone_irq_unmask;
|
||||
|
||||
kirq->irqd = irq_domain_add_linear(np, KEYSTONE_N_IRQ,
|
||||
&keystone_irq_ops, kirq);
|
||||
if (!kirq->irqd) {
|
||||
dev_err(dev, "IRQ domain registration failed\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
platform_set_drvdata(pdev, kirq);
|
||||
|
||||
irq_set_chained_handler(kirq->irq, keystone_irq_handler);
|
||||
irq_set_handler_data(kirq->irq, kirq);
|
||||
|
||||
/* clear all source bits */
|
||||
keystone_irq_writel(kirq, ~0x0);
|
||||
|
||||
dev_info(dev, "irqchip registered, nr_irqs %u\n", KEYSTONE_N_IRQ);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int keystone_irq_remove(struct platform_device *pdev)
|
||||
{
|
||||
struct keystone_irq_device *kirq = platform_get_drvdata(pdev);
|
||||
int hwirq;
|
||||
|
||||
for (hwirq = 0; hwirq < KEYSTONE_N_IRQ; hwirq++)
|
||||
irq_dispose_mapping(irq_find_mapping(kirq->irqd, hwirq));
|
||||
|
||||
irq_domain_remove(kirq->irqd);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct of_device_id keystone_irq_dt_ids[] = {
|
||||
{ .compatible = "ti,keystone-irq", },
|
||||
{},
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, keystone_irq_dt_ids);
|
||||
|
||||
static struct platform_driver keystone_irq_device_driver = {
|
||||
.probe = keystone_irq_probe,
|
||||
.remove = keystone_irq_remove,
|
||||
.driver = {
|
||||
.name = "keystone_irq",
|
||||
.owner = THIS_MODULE,
|
||||
.of_match_table = of_match_ptr(keystone_irq_dt_ids),
|
||||
}
|
||||
};
|
||||
|
||||
module_platform_driver(keystone_irq_device_driver);
|
||||
|
||||
MODULE_AUTHOR("Texas Instruments");
|
||||
MODULE_AUTHOR("Sajesh Kumar Saran");
|
||||
MODULE_AUTHOR("Grygorii Strashko");
|
||||
MODULE_DESCRIPTION("Keystone IRQ chip");
|
||||
MODULE_LICENSE("GPL v2");
|
868
drivers/irqchip/irq-metag-ext.c
Normal file
868
drivers/irqchip/irq-metag-ext.c
Normal file
|
@ -0,0 +1,868 @@
|
|||
/*
|
||||
* Meta External interrupt code.
|
||||
*
|
||||
* Copyright (C) 2005-2012 Imagination Technologies Ltd.
|
||||
*
|
||||
* External interrupts on Meta are configured at two-levels, in the CPU core and
|
||||
* in the external trigger block. Interrupts from SoC peripherals are
|
||||
* multiplexed onto a single Meta CPU "trigger" - traditionally it has always
|
||||
* been trigger 2 (TR2). For info on how de-multiplexing happens check out
|
||||
* meta_intc_irq_demux().
|
||||
*/
|
||||
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/irqchip/metag-ext.h>
|
||||
#include <linux/irqdomain.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/syscore_ops.h>
|
||||
|
||||
#include <asm/irq.h>
|
||||
#include <asm/hwthread.h>
|
||||
|
||||
#define HWSTAT_STRIDE 8
|
||||
#define HWVEC_BLK_STRIDE 0x1000
|
||||
|
||||
/**
|
||||
* struct meta_intc_priv - private meta external interrupt data
|
||||
* @nr_banks: Number of interrupt banks
|
||||
* @domain: IRQ domain for all banks of external IRQs
|
||||
* @unmasked: Record of unmasked IRQs
|
||||
* @levels_altered: Record of altered level bits
|
||||
*/
|
||||
struct meta_intc_priv {
|
||||
unsigned int nr_banks;
|
||||
struct irq_domain *domain;
|
||||
|
||||
unsigned long unmasked[4];
|
||||
|
||||
#ifdef CONFIG_METAG_SUSPEND_MEM
|
||||
unsigned long levels_altered[4];
|
||||
#endif
|
||||
};
|
||||
|
||||
/* Private data for the one and only external interrupt controller */
|
||||
static struct meta_intc_priv meta_intc_priv;
|
||||
|
||||
/**
|
||||
* meta_intc_offset() - Get the offset into the bank of a hardware IRQ number
|
||||
* @hw: Hardware IRQ number (within external trigger block)
|
||||
*
|
||||
* Returns: Bit offset into the IRQ's bank registers
|
||||
*/
|
||||
static unsigned int meta_intc_offset(irq_hw_number_t hw)
|
||||
{
|
||||
return hw & 0x1f;
|
||||
}
|
||||
|
||||
/**
|
||||
* meta_intc_bank() - Get the bank number of a hardware IRQ number
|
||||
* @hw: Hardware IRQ number (within external trigger block)
|
||||
*
|
||||
* Returns: Bank number indicating which register the IRQ's bits are
|
||||
*/
|
||||
static unsigned int meta_intc_bank(irq_hw_number_t hw)
|
||||
{
|
||||
return hw >> 5;
|
||||
}
|
||||
|
||||
/**
|
||||
* meta_intc_stat_addr() - Get the address of a HWSTATEXT register
|
||||
* @hw: Hardware IRQ number (within external trigger block)
|
||||
*
|
||||
* Returns: Address of a HWSTATEXT register containing the status bit for
|
||||
* the specified hardware IRQ number
|
||||
*/
|
||||
static void __iomem *meta_intc_stat_addr(irq_hw_number_t hw)
|
||||
{
|
||||
return (void __iomem *)(HWSTATEXT +
|
||||
HWSTAT_STRIDE * meta_intc_bank(hw));
|
||||
}
|
||||
|
||||
/**
|
||||
* meta_intc_level_addr() - Get the address of a HWLEVELEXT register
|
||||
* @hw: Hardware IRQ number (within external trigger block)
|
||||
*
|
||||
* Returns: Address of a HWLEVELEXT register containing the sense bit for
|
||||
* the specified hardware IRQ number
|
||||
*/
|
||||
static void __iomem *meta_intc_level_addr(irq_hw_number_t hw)
|
||||
{
|
||||
return (void __iomem *)(HWLEVELEXT +
|
||||
HWSTAT_STRIDE * meta_intc_bank(hw));
|
||||
}
|
||||
|
||||
/**
|
||||
* meta_intc_mask_addr() - Get the address of a HWMASKEXT register
|
||||
* @hw: Hardware IRQ number (within external trigger block)
|
||||
*
|
||||
* Returns: Address of a HWMASKEXT register containing the mask bit for the
|
||||
* specified hardware IRQ number
|
||||
*/
|
||||
static void __iomem *meta_intc_mask_addr(irq_hw_number_t hw)
|
||||
{
|
||||
return (void __iomem *)(HWMASKEXT +
|
||||
HWSTAT_STRIDE * meta_intc_bank(hw));
|
||||
}
|
||||
|
||||
/**
|
||||
* meta_intc_vec_addr() - Get the vector address of a hardware interrupt
|
||||
* @hw: Hardware IRQ number (within external trigger block)
|
||||
*
|
||||
* Returns: Address of a HWVECEXT register controlling the core trigger to
|
||||
* vector the IRQ onto
|
||||
*/
|
||||
static inline void __iomem *meta_intc_vec_addr(irq_hw_number_t hw)
|
||||
{
|
||||
return (void __iomem *)(HWVEC0EXT +
|
||||
HWVEC_BLK_STRIDE * meta_intc_bank(hw) +
|
||||
HWVECnEXT_STRIDE * meta_intc_offset(hw));
|
||||
}
|
||||
|
||||
/**
|
||||
* meta_intc_startup_irq() - set up an external irq
|
||||
* @data: data for the external irq to start up
|
||||
*
|
||||
* Multiplex interrupts for irq onto TR2. Clear any pending interrupts and
|
||||
* unmask irq, both using the appropriate callbacks.
|
||||
*/
|
||||
static unsigned int meta_intc_startup_irq(struct irq_data *data)
|
||||
{
|
||||
irq_hw_number_t hw = data->hwirq;
|
||||
void __iomem *vec_addr = meta_intc_vec_addr(hw);
|
||||
int thread = hard_processor_id();
|
||||
|
||||
/* Perform any necessary acking. */
|
||||
if (data->chip->irq_ack)
|
||||
data->chip->irq_ack(data);
|
||||
|
||||
/* Wire up this interrupt to the core with HWVECxEXT. */
|
||||
metag_out32(TBI_TRIG_VEC(TBID_SIGNUM_TR2(thread)), vec_addr);
|
||||
|
||||
/* Perform any necessary unmasking. */
|
||||
data->chip->irq_unmask(data);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* meta_intc_shutdown_irq() - turn off an external irq
|
||||
* @data: data for the external irq to turn off
|
||||
*
|
||||
* Mask irq using the appropriate callback and stop muxing it onto TR2.
|
||||
*/
|
||||
static void meta_intc_shutdown_irq(struct irq_data *data)
|
||||
{
|
||||
irq_hw_number_t hw = data->hwirq;
|
||||
void __iomem *vec_addr = meta_intc_vec_addr(hw);
|
||||
|
||||
/* Mask the IRQ */
|
||||
data->chip->irq_mask(data);
|
||||
|
||||
/*
|
||||
* Disable the IRQ at the core by removing the interrupt from
|
||||
* the HW vector mapping.
|
||||
*/
|
||||
metag_out32(0, vec_addr);
|
||||
}
|
||||
|
||||
/**
|
||||
* meta_intc_ack_irq() - acknowledge an external irq
|
||||
* @data: data for the external irq to ack
|
||||
*
|
||||
* Clear down an edge interrupt in the status register.
|
||||
*/
|
||||
static void meta_intc_ack_irq(struct irq_data *data)
|
||||
{
|
||||
irq_hw_number_t hw = data->hwirq;
|
||||
unsigned int bit = 1 << meta_intc_offset(hw);
|
||||
void __iomem *stat_addr = meta_intc_stat_addr(hw);
|
||||
|
||||
/* Ack the int, if it is still 'on'.
|
||||
* NOTE - this only works for edge triggered interrupts.
|
||||
*/
|
||||
if (metag_in32(stat_addr) & bit)
|
||||
metag_out32(bit, stat_addr);
|
||||
}
|
||||
|
||||
/**
|
||||
* record_irq_is_masked() - record the IRQ masked so it doesn't get handled
|
||||
* @data: data for the external irq to record
|
||||
*
|
||||
* This should get called whenever an external IRQ is masked (by whichever
|
||||
* callback is used). It records the IRQ masked so that it doesn't get handled
|
||||
* if it still shows up in the status register.
|
||||
*/
|
||||
static void record_irq_is_masked(struct irq_data *data)
|
||||
{
|
||||
struct meta_intc_priv *priv = &meta_intc_priv;
|
||||
irq_hw_number_t hw = data->hwirq;
|
||||
|
||||
clear_bit(meta_intc_offset(hw), &priv->unmasked[meta_intc_bank(hw)]);
|
||||
}
|
||||
|
||||
/**
|
||||
* record_irq_is_unmasked() - record the IRQ unmasked so it can be handled
|
||||
* @data: data for the external irq to record
|
||||
*
|
||||
* This should get called whenever an external IRQ is unmasked (by whichever
|
||||
* callback is used). It records the IRQ unmasked so that it gets handled if it
|
||||
* shows up in the status register.
|
||||
*/
|
||||
static void record_irq_is_unmasked(struct irq_data *data)
|
||||
{
|
||||
struct meta_intc_priv *priv = &meta_intc_priv;
|
||||
irq_hw_number_t hw = data->hwirq;
|
||||
|
||||
set_bit(meta_intc_offset(hw), &priv->unmasked[meta_intc_bank(hw)]);
|
||||
}
|
||||
|
||||
/*
|
||||
* For use by wrapper IRQ drivers
|
||||
*/
|
||||
|
||||
/**
|
||||
* meta_intc_mask_irq_simple() - minimal mask used by wrapper IRQ drivers
|
||||
* @data: data for the external irq being masked
|
||||
*
|
||||
* This should be called by any wrapper IRQ driver mask functions. it doesn't do
|
||||
* any masking but records the IRQ as masked so that the core code knows the
|
||||
* mask has taken place. It is the callers responsibility to ensure that the IRQ
|
||||
* won't trigger an interrupt to the core.
|
||||
*/
|
||||
void meta_intc_mask_irq_simple(struct irq_data *data)
|
||||
{
|
||||
record_irq_is_masked(data);
|
||||
}
|
||||
|
||||
/**
|
||||
* meta_intc_unmask_irq_simple() - minimal unmask used by wrapper IRQ drivers
|
||||
* @data: data for the external irq being unmasked
|
||||
*
|
||||
* This should be called by any wrapper IRQ driver unmask functions. it doesn't
|
||||
* do any unmasking but records the IRQ as unmasked so that the core code knows
|
||||
* the unmask has taken place. It is the callers responsibility to ensure that
|
||||
* the IRQ can now trigger an interrupt to the core.
|
||||
*/
|
||||
void meta_intc_unmask_irq_simple(struct irq_data *data)
|
||||
{
|
||||
record_irq_is_unmasked(data);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* meta_intc_mask_irq() - mask an external irq using HWMASKEXT
|
||||
* @data: data for the external irq to mask
|
||||
*
|
||||
* This is a default implementation of a mask function which makes use of the
|
||||
* HWMASKEXT registers available in newer versions.
|
||||
*
|
||||
* Earlier versions without these registers should use SoC level IRQ masking
|
||||
* which call the meta_intc_*_simple() functions above, or if that isn't
|
||||
* available should use the fallback meta_intc_*_nomask() functions below.
|
||||
*/
|
||||
static void meta_intc_mask_irq(struct irq_data *data)
|
||||
{
|
||||
irq_hw_number_t hw = data->hwirq;
|
||||
unsigned int bit = 1 << meta_intc_offset(hw);
|
||||
void __iomem *mask_addr = meta_intc_mask_addr(hw);
|
||||
unsigned long flags;
|
||||
|
||||
record_irq_is_masked(data);
|
||||
|
||||
/* update the interrupt mask */
|
||||
__global_lock2(flags);
|
||||
metag_out32(metag_in32(mask_addr) & ~bit, mask_addr);
|
||||
__global_unlock2(flags);
|
||||
}
|
||||
|
||||
/**
|
||||
* meta_intc_unmask_irq() - unmask an external irq using HWMASKEXT
|
||||
* @data: data for the external irq to unmask
|
||||
*
|
||||
* This is a default implementation of an unmask function which makes use of the
|
||||
* HWMASKEXT registers available on new versions. It should be paired with
|
||||
* meta_intc_mask_irq() above.
|
||||
*/
|
||||
static void meta_intc_unmask_irq(struct irq_data *data)
|
||||
{
|
||||
irq_hw_number_t hw = data->hwirq;
|
||||
unsigned int bit = 1 << meta_intc_offset(hw);
|
||||
void __iomem *mask_addr = meta_intc_mask_addr(hw);
|
||||
unsigned long flags;
|
||||
|
||||
record_irq_is_unmasked(data);
|
||||
|
||||
/* update the interrupt mask */
|
||||
__global_lock2(flags);
|
||||
metag_out32(metag_in32(mask_addr) | bit, mask_addr);
|
||||
__global_unlock2(flags);
|
||||
}
|
||||
|
||||
/**
|
||||
* meta_intc_mask_irq_nomask() - mask an external irq by unvectoring
|
||||
* @data: data for the external irq to mask
|
||||
*
|
||||
* This is the version of the mask function for older versions which don't have
|
||||
* HWMASKEXT registers, or a SoC level means of masking IRQs. Instead the IRQ is
|
||||
* unvectored from the core and retriggered if necessary later.
|
||||
*/
|
||||
static void meta_intc_mask_irq_nomask(struct irq_data *data)
|
||||
{
|
||||
irq_hw_number_t hw = data->hwirq;
|
||||
void __iomem *vec_addr = meta_intc_vec_addr(hw);
|
||||
|
||||
record_irq_is_masked(data);
|
||||
|
||||
/* there is no interrupt mask, so unvector the interrupt */
|
||||
metag_out32(0, vec_addr);
|
||||
}
|
||||
|
||||
/**
|
||||
* meta_intc_unmask_edge_irq_nomask() - unmask an edge irq by revectoring
|
||||
* @data: data for the external irq to unmask
|
||||
*
|
||||
* This is the version of the unmask function for older versions which don't
|
||||
* have HWMASKEXT registers, or a SoC level means of masking IRQs. Instead the
|
||||
* IRQ is revectored back to the core and retriggered if necessary.
|
||||
*
|
||||
* The retriggering done by this function is specific to edge interrupts.
|
||||
*/
|
||||
static void meta_intc_unmask_edge_irq_nomask(struct irq_data *data)
|
||||
{
|
||||
irq_hw_number_t hw = data->hwirq;
|
||||
unsigned int bit = 1 << meta_intc_offset(hw);
|
||||
void __iomem *stat_addr = meta_intc_stat_addr(hw);
|
||||
void __iomem *vec_addr = meta_intc_vec_addr(hw);
|
||||
unsigned int thread = hard_processor_id();
|
||||
|
||||
record_irq_is_unmasked(data);
|
||||
|
||||
/* there is no interrupt mask, so revector the interrupt */
|
||||
metag_out32(TBI_TRIG_VEC(TBID_SIGNUM_TR2(thread)), vec_addr);
|
||||
|
||||
/*
|
||||
* Re-trigger interrupt
|
||||
*
|
||||
* Writing a 1 toggles, and a 0->1 transition triggers. We only
|
||||
* retrigger if the status bit is already set, which means we
|
||||
* need to clear it first. Retriggering is fundamentally racy
|
||||
* because if the interrupt fires again after we clear it we
|
||||
* could end up clearing it again and the interrupt handler
|
||||
* thinking it hasn't fired. Therefore we need to keep trying to
|
||||
* retrigger until the bit is set.
|
||||
*/
|
||||
if (metag_in32(stat_addr) & bit) {
|
||||
metag_out32(bit, stat_addr);
|
||||
while (!(metag_in32(stat_addr) & bit))
|
||||
metag_out32(bit, stat_addr);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* meta_intc_unmask_level_irq_nomask() - unmask a level irq by revectoring
|
||||
* @data: data for the external irq to unmask
|
||||
*
|
||||
* This is the version of the unmask function for older versions which don't
|
||||
* have HWMASKEXT registers, or a SoC level means of masking IRQs. Instead the
|
||||
* IRQ is revectored back to the core and retriggered if necessary.
|
||||
*
|
||||
* The retriggering done by this function is specific to level interrupts.
|
||||
*/
|
||||
static void meta_intc_unmask_level_irq_nomask(struct irq_data *data)
|
||||
{
|
||||
irq_hw_number_t hw = data->hwirq;
|
||||
unsigned int bit = 1 << meta_intc_offset(hw);
|
||||
void __iomem *stat_addr = meta_intc_stat_addr(hw);
|
||||
void __iomem *vec_addr = meta_intc_vec_addr(hw);
|
||||
unsigned int thread = hard_processor_id();
|
||||
|
||||
record_irq_is_unmasked(data);
|
||||
|
||||
/* there is no interrupt mask, so revector the interrupt */
|
||||
metag_out32(TBI_TRIG_VEC(TBID_SIGNUM_TR2(thread)), vec_addr);
|
||||
|
||||
/* Re-trigger interrupt */
|
||||
/* Writing a 1 triggers interrupt */
|
||||
if (metag_in32(stat_addr) & bit)
|
||||
metag_out32(bit, stat_addr);
|
||||
}
|
||||
|
||||
/**
|
||||
* meta_intc_irq_set_type() - set the type of an external irq
|
||||
* @data: data for the external irq to set the type of
|
||||
* @flow_type: new irq flow type
|
||||
*
|
||||
* Set the flow type of an external interrupt. This updates the irq chip and irq
|
||||
* handler depending on whether the irq is edge or level sensitive (the polarity
|
||||
* is ignored), and also sets up the bit in HWLEVELEXT so the hardware knows
|
||||
* when to trigger.
|
||||
*/
|
||||
static int meta_intc_irq_set_type(struct irq_data *data, unsigned int flow_type)
|
||||
{
|
||||
#ifdef CONFIG_METAG_SUSPEND_MEM
|
||||
struct meta_intc_priv *priv = &meta_intc_priv;
|
||||
#endif
|
||||
unsigned int irq = data->irq;
|
||||
irq_hw_number_t hw = data->hwirq;
|
||||
unsigned int bit = 1 << meta_intc_offset(hw);
|
||||
void __iomem *level_addr = meta_intc_level_addr(hw);
|
||||
unsigned long flags;
|
||||
unsigned int level;
|
||||
|
||||
/* update the chip/handler */
|
||||
if (flow_type & IRQ_TYPE_LEVEL_MASK)
|
||||
__irq_set_chip_handler_name_locked(irq, &meta_intc_level_chip,
|
||||
handle_level_irq, NULL);
|
||||
else
|
||||
__irq_set_chip_handler_name_locked(irq, &meta_intc_edge_chip,
|
||||
handle_edge_irq, NULL);
|
||||
|
||||
/* and clear/set the bit in HWLEVELEXT */
|
||||
__global_lock2(flags);
|
||||
level = metag_in32(level_addr);
|
||||
if (flow_type & IRQ_TYPE_LEVEL_MASK)
|
||||
level |= bit;
|
||||
else
|
||||
level &= ~bit;
|
||||
metag_out32(level, level_addr);
|
||||
#ifdef CONFIG_METAG_SUSPEND_MEM
|
||||
priv->levels_altered[meta_intc_bank(hw)] |= bit;
|
||||
#endif
|
||||
__global_unlock2(flags);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* meta_intc_irq_demux() - external irq de-multiplexer
|
||||
* @irq: the virtual interrupt number
|
||||
* @desc: the interrupt description structure for this irq
|
||||
*
|
||||
* The cpu receives an interrupt on TR2 when a SoC interrupt has occurred. It is
|
||||
* this function's job to demux this irq and figure out exactly which external
|
||||
* irq needs servicing.
|
||||
*
|
||||
* Whilst using TR2 to detect external interrupts is a software convention it is
|
||||
* (hopefully) unlikely to change.
|
||||
*/
|
||||
static void meta_intc_irq_demux(unsigned int irq, struct irq_desc *desc)
|
||||
{
|
||||
struct meta_intc_priv *priv = &meta_intc_priv;
|
||||
irq_hw_number_t hw;
|
||||
unsigned int bank, irq_no, status;
|
||||
void __iomem *stat_addr = meta_intc_stat_addr(0);
|
||||
|
||||
/*
|
||||
* Locate which interrupt has caused our handler to run.
|
||||
*/
|
||||
for (bank = 0; bank < priv->nr_banks; ++bank) {
|
||||
/* Which interrupts are currently pending in this bank? */
|
||||
recalculate:
|
||||
status = metag_in32(stat_addr) & priv->unmasked[bank];
|
||||
|
||||
for (hw = bank*32; status; status >>= 1, ++hw) {
|
||||
if (status & 0x1) {
|
||||
/*
|
||||
* Map the hardware IRQ number to a virtual
|
||||
* Linux IRQ number.
|
||||
*/
|
||||
irq_no = irq_linear_revmap(priv->domain, hw);
|
||||
|
||||
/*
|
||||
* Only fire off external interrupts that are
|
||||
* registered to be handled by the kernel.
|
||||
* Other external interrupts are probably being
|
||||
* handled by other Meta hardware threads.
|
||||
*/
|
||||
generic_handle_irq(irq_no);
|
||||
|
||||
/*
|
||||
* The handler may have re-enabled interrupts
|
||||
* which could have caused a nested invocation
|
||||
* of this code and make the copy of the
|
||||
* status register we are using invalid.
|
||||
*/
|
||||
goto recalculate;
|
||||
}
|
||||
}
|
||||
stat_addr += HWSTAT_STRIDE;
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
/**
|
||||
* meta_intc_set_affinity() - set the affinity for an interrupt
|
||||
* @data: data for the external irq to set the affinity of
|
||||
* @cpumask: cpu mask representing cpus which can handle the interrupt
|
||||
* @force: whether to force (ignored)
|
||||
*
|
||||
* Revector the specified external irq onto a specific cpu's TR2 trigger, so
|
||||
* that that cpu tends to be the one who handles it.
|
||||
*/
|
||||
static int meta_intc_set_affinity(struct irq_data *data,
|
||||
const struct cpumask *cpumask, bool force)
|
||||
{
|
||||
irq_hw_number_t hw = data->hwirq;
|
||||
void __iomem *vec_addr = meta_intc_vec_addr(hw);
|
||||
unsigned int cpu, thread;
|
||||
|
||||
/*
|
||||
* Wire up this interrupt from HWVECxEXT to the Meta core.
|
||||
*
|
||||
* Note that we can't wire up HWVECxEXT to interrupt more than
|
||||
* one cpu (the interrupt code doesn't support it), so we just
|
||||
* pick the first cpu we find in 'cpumask'.
|
||||
*/
|
||||
cpu = cpumask_any_and(cpumask, cpu_online_mask);
|
||||
thread = cpu_2_hwthread_id[cpu];
|
||||
|
||||
metag_out32(TBI_TRIG_VEC(TBID_SIGNUM_TR2(thread)), vec_addr);
|
||||
|
||||
return 0;
|
||||
}
|
||||
#else
|
||||
#define meta_intc_set_affinity NULL
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_PM_SLEEP
|
||||
#define META_INTC_CHIP_FLAGS (IRQCHIP_MASK_ON_SUSPEND \
|
||||
| IRQCHIP_SKIP_SET_WAKE)
|
||||
#else
|
||||
#define META_INTC_CHIP_FLAGS 0
|
||||
#endif
|
||||
|
||||
/* public edge/level irq chips which SoCs can override */
|
||||
|
||||
struct irq_chip meta_intc_edge_chip = {
|
||||
.irq_startup = meta_intc_startup_irq,
|
||||
.irq_shutdown = meta_intc_shutdown_irq,
|
||||
.irq_ack = meta_intc_ack_irq,
|
||||
.irq_mask = meta_intc_mask_irq,
|
||||
.irq_unmask = meta_intc_unmask_irq,
|
||||
.irq_set_type = meta_intc_irq_set_type,
|
||||
.irq_set_affinity = meta_intc_set_affinity,
|
||||
.flags = META_INTC_CHIP_FLAGS,
|
||||
};
|
||||
|
||||
struct irq_chip meta_intc_level_chip = {
|
||||
.irq_startup = meta_intc_startup_irq,
|
||||
.irq_shutdown = meta_intc_shutdown_irq,
|
||||
.irq_set_type = meta_intc_irq_set_type,
|
||||
.irq_mask = meta_intc_mask_irq,
|
||||
.irq_unmask = meta_intc_unmask_irq,
|
||||
.irq_set_affinity = meta_intc_set_affinity,
|
||||
.flags = META_INTC_CHIP_FLAGS,
|
||||
};
|
||||
|
||||
/**
|
||||
* meta_intc_map() - map an external irq
|
||||
* @d: irq domain of external trigger block
|
||||
* @irq: virtual irq number
|
||||
* @hw: hardware irq number within external trigger block
|
||||
*
|
||||
* This sets up a virtual irq for a specified hardware interrupt. The irq chip
|
||||
* and handler is configured, using the HWLEVELEXT registers to determine
|
||||
* edge/level flow type. These registers will have been set when the irq type is
|
||||
* set (or set to a default at init time).
|
||||
*/
|
||||
static int meta_intc_map(struct irq_domain *d, unsigned int irq,
|
||||
irq_hw_number_t hw)
|
||||
{
|
||||
unsigned int bit = 1 << meta_intc_offset(hw);
|
||||
void __iomem *level_addr = meta_intc_level_addr(hw);
|
||||
|
||||
/* Go by the current sense in the HWLEVELEXT register */
|
||||
if (metag_in32(level_addr) & bit)
|
||||
irq_set_chip_and_handler(irq, &meta_intc_level_chip,
|
||||
handle_level_irq);
|
||||
else
|
||||
irq_set_chip_and_handler(irq, &meta_intc_edge_chip,
|
||||
handle_edge_irq);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct irq_domain_ops meta_intc_domain_ops = {
|
||||
.map = meta_intc_map,
|
||||
.xlate = irq_domain_xlate_twocell,
|
||||
};
|
||||
|
||||
#ifdef CONFIG_METAG_SUSPEND_MEM
|
||||
|
||||
/**
|
||||
* struct meta_intc_context - suspend context
|
||||
* @levels: State of HWLEVELEXT registers
|
||||
* @masks: State of HWMASKEXT registers
|
||||
* @vectors: State of HWVECEXT registers
|
||||
* @txvecint: State of TxVECINT registers
|
||||
*
|
||||
* This structure stores the IRQ state across suspend.
|
||||
*/
|
||||
struct meta_intc_context {
|
||||
u32 levels[4];
|
||||
u32 masks[4];
|
||||
u8 vectors[4*32];
|
||||
|
||||
u8 txvecint[4][4];
|
||||
};
|
||||
|
||||
/* suspend context */
|
||||
static struct meta_intc_context *meta_intc_context;
|
||||
|
||||
/**
|
||||
* meta_intc_suspend() - store irq state
|
||||
*
|
||||
* To avoid interfering with other threads we only save the IRQ state of IRQs in
|
||||
* use by Linux.
|
||||
*/
|
||||
static int meta_intc_suspend(void)
|
||||
{
|
||||
struct meta_intc_priv *priv = &meta_intc_priv;
|
||||
int i, j;
|
||||
irq_hw_number_t hw;
|
||||
unsigned int bank;
|
||||
unsigned long flags;
|
||||
struct meta_intc_context *context;
|
||||
void __iomem *level_addr, *mask_addr, *vec_addr;
|
||||
u32 mask, bit;
|
||||
|
||||
context = kzalloc(sizeof(*context), GFP_ATOMIC);
|
||||
if (!context)
|
||||
return -ENOMEM;
|
||||
|
||||
hw = 0;
|
||||
level_addr = meta_intc_level_addr(0);
|
||||
mask_addr = meta_intc_mask_addr(0);
|
||||
for (bank = 0; bank < priv->nr_banks; ++bank) {
|
||||
vec_addr = meta_intc_vec_addr(hw);
|
||||
|
||||
/* create mask of interrupts in use */
|
||||
mask = 0;
|
||||
for (bit = 1; bit; bit <<= 1) {
|
||||
i = irq_linear_revmap(priv->domain, hw);
|
||||
/* save mapped irqs which are enabled or have actions */
|
||||
if (i && (!irqd_irq_disabled(irq_get_irq_data(i)) ||
|
||||
irq_has_action(i))) {
|
||||
mask |= bit;
|
||||
|
||||
/* save trigger vector */
|
||||
context->vectors[hw] = metag_in32(vec_addr);
|
||||
}
|
||||
|
||||
++hw;
|
||||
vec_addr += HWVECnEXT_STRIDE;
|
||||
}
|
||||
|
||||
/* save level state if any IRQ levels altered */
|
||||
if (priv->levels_altered[bank])
|
||||
context->levels[bank] = metag_in32(level_addr);
|
||||
/* save mask state if any IRQs in use */
|
||||
if (mask)
|
||||
context->masks[bank] = metag_in32(mask_addr);
|
||||
|
||||
level_addr += HWSTAT_STRIDE;
|
||||
mask_addr += HWSTAT_STRIDE;
|
||||
}
|
||||
|
||||
/* save trigger matrixing */
|
||||
__global_lock2(flags);
|
||||
for (i = 0; i < 4; ++i)
|
||||
for (j = 0; j < 4; ++j)
|
||||
context->txvecint[i][j] = metag_in32(T0VECINT_BHALT +
|
||||
TnVECINT_STRIDE*i +
|
||||
8*j);
|
||||
__global_unlock2(flags);
|
||||
|
||||
meta_intc_context = context;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* meta_intc_resume() - restore saved irq state
|
||||
*
|
||||
* Restore the saved IRQ state and drop it.
|
||||
*/
|
||||
static void meta_intc_resume(void)
|
||||
{
|
||||
struct meta_intc_priv *priv = &meta_intc_priv;
|
||||
int i, j;
|
||||
irq_hw_number_t hw;
|
||||
unsigned int bank;
|
||||
unsigned long flags;
|
||||
struct meta_intc_context *context = meta_intc_context;
|
||||
void __iomem *level_addr, *mask_addr, *vec_addr;
|
||||
u32 mask, bit, tmp;
|
||||
|
||||
meta_intc_context = NULL;
|
||||
|
||||
hw = 0;
|
||||
level_addr = meta_intc_level_addr(0);
|
||||
mask_addr = meta_intc_mask_addr(0);
|
||||
for (bank = 0; bank < priv->nr_banks; ++bank) {
|
||||
vec_addr = meta_intc_vec_addr(hw);
|
||||
|
||||
/* create mask of interrupts in use */
|
||||
mask = 0;
|
||||
for (bit = 1; bit; bit <<= 1) {
|
||||
i = irq_linear_revmap(priv->domain, hw);
|
||||
/* restore mapped irqs, enabled or with actions */
|
||||
if (i && (!irqd_irq_disabled(irq_get_irq_data(i)) ||
|
||||
irq_has_action(i))) {
|
||||
mask |= bit;
|
||||
|
||||
/* restore trigger vector */
|
||||
metag_out32(context->vectors[hw], vec_addr);
|
||||
}
|
||||
|
||||
++hw;
|
||||
vec_addr += HWVECnEXT_STRIDE;
|
||||
}
|
||||
|
||||
if (mask) {
|
||||
/* restore mask state */
|
||||
__global_lock2(flags);
|
||||
tmp = metag_in32(mask_addr);
|
||||
tmp = (tmp & ~mask) | (context->masks[bank] & mask);
|
||||
metag_out32(tmp, mask_addr);
|
||||
__global_unlock2(flags);
|
||||
}
|
||||
|
||||
mask = priv->levels_altered[bank];
|
||||
if (mask) {
|
||||
/* restore level state */
|
||||
__global_lock2(flags);
|
||||
tmp = metag_in32(level_addr);
|
||||
tmp = (tmp & ~mask) | (context->levels[bank] & mask);
|
||||
metag_out32(tmp, level_addr);
|
||||
__global_unlock2(flags);
|
||||
}
|
||||
|
||||
level_addr += HWSTAT_STRIDE;
|
||||
mask_addr += HWSTAT_STRIDE;
|
||||
}
|
||||
|
||||
/* restore trigger matrixing */
|
||||
__global_lock2(flags);
|
||||
for (i = 0; i < 4; ++i) {
|
||||
for (j = 0; j < 4; ++j) {
|
||||
metag_out32(context->txvecint[i][j],
|
||||
T0VECINT_BHALT +
|
||||
TnVECINT_STRIDE*i +
|
||||
8*j);
|
||||
}
|
||||
}
|
||||
__global_unlock2(flags);
|
||||
|
||||
kfree(context);
|
||||
}
|
||||
|
||||
static struct syscore_ops meta_intc_syscore_ops = {
|
||||
.suspend = meta_intc_suspend,
|
||||
.resume = meta_intc_resume,
|
||||
};
|
||||
|
||||
static void __init meta_intc_init_syscore_ops(struct meta_intc_priv *priv)
|
||||
{
|
||||
register_syscore_ops(&meta_intc_syscore_ops);
|
||||
}
|
||||
#else
|
||||
#define meta_intc_init_syscore_ops(priv) do {} while (0)
|
||||
#endif
|
||||
|
||||
/**
|
||||
* meta_intc_init_cpu() - register with a Meta cpu
|
||||
* @priv: private interrupt controller data
|
||||
* @cpu: the CPU to register on
|
||||
*
|
||||
* Configure @cpu's TR2 irq so that we can demux external irqs.
|
||||
*/
|
||||
static void __init meta_intc_init_cpu(struct meta_intc_priv *priv, int cpu)
|
||||
{
|
||||
unsigned int thread = cpu_2_hwthread_id[cpu];
|
||||
unsigned int signum = TBID_SIGNUM_TR2(thread);
|
||||
int irq = tbisig_map(signum);
|
||||
|
||||
/* Register the multiplexed IRQ handler */
|
||||
irq_set_chained_handler(irq, meta_intc_irq_demux);
|
||||
irq_set_irq_type(irq, IRQ_TYPE_LEVEL_LOW);
|
||||
}
|
||||
|
||||
/**
|
||||
* meta_intc_no_mask() - indicate lack of HWMASKEXT registers
|
||||
*
|
||||
* Called from SoC code (or init code below) to dynamically indicate the lack of
|
||||
* HWMASKEXT registers (for example depending on some SoC revision register).
|
||||
* This alters the irq mask and unmask callbacks to use the fallback
|
||||
* unvectoring/retriggering technique instead of using HWMASKEXT registers.
|
||||
*/
|
||||
void __init meta_intc_no_mask(void)
|
||||
{
|
||||
meta_intc_edge_chip.irq_mask = meta_intc_mask_irq_nomask;
|
||||
meta_intc_edge_chip.irq_unmask = meta_intc_unmask_edge_irq_nomask;
|
||||
meta_intc_level_chip.irq_mask = meta_intc_mask_irq_nomask;
|
||||
meta_intc_level_chip.irq_unmask = meta_intc_unmask_level_irq_nomask;
|
||||
}
|
||||
|
||||
/**
|
||||
* init_external_IRQ() - initialise the external irq controller
|
||||
*
|
||||
* Set up the external irq controller using device tree properties. This is
|
||||
* called from init_IRQ().
|
||||
*/
|
||||
int __init init_external_IRQ(void)
|
||||
{
|
||||
struct meta_intc_priv *priv = &meta_intc_priv;
|
||||
struct device_node *node;
|
||||
int ret, cpu;
|
||||
u32 val;
|
||||
bool no_masks = false;
|
||||
|
||||
node = of_find_compatible_node(NULL, NULL, "img,meta-intc");
|
||||
if (!node)
|
||||
return -ENOENT;
|
||||
|
||||
/* Get number of banks */
|
||||
ret = of_property_read_u32(node, "num-banks", &val);
|
||||
if (ret) {
|
||||
pr_err("meta-intc: No num-banks property found\n");
|
||||
return ret;
|
||||
}
|
||||
if (val < 1 || val > 4) {
|
||||
pr_err("meta-intc: num-banks (%u) out of range\n", val);
|
||||
return -EINVAL;
|
||||
}
|
||||
priv->nr_banks = val;
|
||||
|
||||
/* Are any mask registers present? */
|
||||
if (of_get_property(node, "no-mask", NULL))
|
||||
no_masks = true;
|
||||
|
||||
/* No HWMASKEXT registers present? */
|
||||
if (no_masks)
|
||||
meta_intc_no_mask();
|
||||
|
||||
/* Set up an IRQ domain */
|
||||
/*
|
||||
* This is a legacy IRQ domain for now until all the platform setup code
|
||||
* has been converted to devicetree.
|
||||
*/
|
||||
priv->domain = irq_domain_add_linear(node, priv->nr_banks*32,
|
||||
&meta_intc_domain_ops, priv);
|
||||
if (unlikely(!priv->domain)) {
|
||||
pr_err("meta-intc: cannot add IRQ domain\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/* Setup TR2 for all cpus. */
|
||||
for_each_possible_cpu(cpu)
|
||||
meta_intc_init_cpu(priv, cpu);
|
||||
|
||||
/* Set up system suspend/resume callbacks */
|
||||
meta_intc_init_syscore_ops(priv);
|
||||
|
||||
pr_info("meta-intc: External IRQ controller initialised (%u IRQs)\n",
|
||||
priv->nr_banks*32);
|
||||
|
||||
return 0;
|
||||
}
|
343
drivers/irqchip/irq-metag.c
Normal file
343
drivers/irqchip/irq-metag.c
Normal file
|
@ -0,0 +1,343 @@
|
|||
/*
|
||||
* Meta internal (HWSTATMETA) interrupt code.
|
||||
*
|
||||
* Copyright (C) 2011-2012 Imagination Technologies Ltd.
|
||||
*
|
||||
* This code is based on the code in SoC/common/irq.c and SoC/comet/irq.c
|
||||
* The code base could be generalised/merged as a lot of the functionality is
|
||||
* similar. Until this is done, we try to keep the code simple here.
|
||||
*/
|
||||
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/irqdomain.h>
|
||||
|
||||
#include <asm/irq.h>
|
||||
#include <asm/hwthread.h>
|
||||
|
||||
#define PERF0VECINT 0x04820580
|
||||
#define PERF1VECINT 0x04820588
|
||||
#define PERF0TRIG_OFFSET 16
|
||||
#define PERF1TRIG_OFFSET 17
|
||||
|
||||
/**
|
||||
* struct metag_internal_irq_priv - private meta internal interrupt data
|
||||
* @domain: IRQ domain for all internal Meta IRQs (HWSTATMETA)
|
||||
* @unmasked: Record of unmasked IRQs
|
||||
*/
|
||||
struct metag_internal_irq_priv {
|
||||
struct irq_domain *domain;
|
||||
|
||||
unsigned long unmasked;
|
||||
};
|
||||
|
||||
/* Private data for the one and only internal interrupt controller */
|
||||
static struct metag_internal_irq_priv metag_internal_irq_priv;
|
||||
|
||||
static unsigned int metag_internal_irq_startup(struct irq_data *data);
|
||||
static void metag_internal_irq_shutdown(struct irq_data *data);
|
||||
static void metag_internal_irq_ack(struct irq_data *data);
|
||||
static void metag_internal_irq_mask(struct irq_data *data);
|
||||
static void metag_internal_irq_unmask(struct irq_data *data);
|
||||
#ifdef CONFIG_SMP
|
||||
static int metag_internal_irq_set_affinity(struct irq_data *data,
|
||||
const struct cpumask *cpumask, bool force);
|
||||
#endif
|
||||
|
||||
static struct irq_chip internal_irq_edge_chip = {
|
||||
.name = "HWSTATMETA-IRQ",
|
||||
.irq_startup = metag_internal_irq_startup,
|
||||
.irq_shutdown = metag_internal_irq_shutdown,
|
||||
.irq_ack = metag_internal_irq_ack,
|
||||
.irq_mask = metag_internal_irq_mask,
|
||||
.irq_unmask = metag_internal_irq_unmask,
|
||||
#ifdef CONFIG_SMP
|
||||
.irq_set_affinity = metag_internal_irq_set_affinity,
|
||||
#endif
|
||||
};
|
||||
|
||||
/*
|
||||
* metag_hwvec_addr - get the address of *VECINT regs of irq
|
||||
*
|
||||
* This function is a table of supported triggers on HWSTATMETA
|
||||
* Could do with a structure, but better keep it simple. Changes
|
||||
* in this code should be rare.
|
||||
*/
|
||||
static inline void __iomem *metag_hwvec_addr(irq_hw_number_t hw)
|
||||
{
|
||||
void __iomem *addr;
|
||||
|
||||
switch (hw) {
|
||||
case PERF0TRIG_OFFSET:
|
||||
addr = (void __iomem *)PERF0VECINT;
|
||||
break;
|
||||
case PERF1TRIG_OFFSET:
|
||||
addr = (void __iomem *)PERF1VECINT;
|
||||
break;
|
||||
default:
|
||||
addr = NULL;
|
||||
break;
|
||||
}
|
||||
return addr;
|
||||
}
|
||||
|
||||
/*
|
||||
* metag_internal_startup - setup an internal irq
|
||||
* @irq: the irq to startup
|
||||
*
|
||||
* Multiplex interrupts for @irq onto TR1. Clear any pending
|
||||
* interrupts.
|
||||
*/
|
||||
static unsigned int metag_internal_irq_startup(struct irq_data *data)
|
||||
{
|
||||
/* Clear (toggle) the bit in HWSTATMETA for our interrupt. */
|
||||
metag_internal_irq_ack(data);
|
||||
|
||||
/* Enable the interrupt by unmasking it */
|
||||
metag_internal_irq_unmask(data);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* metag_internal_irq_shutdown - turn off the irq
|
||||
* @irq: the irq number to turn off
|
||||
*
|
||||
* Mask @irq and clear any pending interrupts.
|
||||
* Stop muxing @irq onto TR1.
|
||||
*/
|
||||
static void metag_internal_irq_shutdown(struct irq_data *data)
|
||||
{
|
||||
/* Disable the IRQ at the core by masking it. */
|
||||
metag_internal_irq_mask(data);
|
||||
|
||||
/* Clear (toggle) the bit in HWSTATMETA for our interrupt. */
|
||||
metag_internal_irq_ack(data);
|
||||
}
|
||||
|
||||
/*
|
||||
* metag_internal_irq_ack - acknowledge irq
|
||||
* @irq: the irq to ack
|
||||
*/
|
||||
static void metag_internal_irq_ack(struct irq_data *data)
|
||||
{
|
||||
irq_hw_number_t hw = data->hwirq;
|
||||
unsigned int bit = 1 << hw;
|
||||
|
||||
if (metag_in32(HWSTATMETA) & bit)
|
||||
metag_out32(bit, HWSTATMETA);
|
||||
}
|
||||
|
||||
/**
|
||||
* metag_internal_irq_mask() - mask an internal irq by unvectoring
|
||||
* @data: data for the internal irq to mask
|
||||
*
|
||||
* HWSTATMETA has no mask register. Instead the IRQ is unvectored from the core
|
||||
* and retriggered if necessary later.
|
||||
*/
|
||||
static void metag_internal_irq_mask(struct irq_data *data)
|
||||
{
|
||||
struct metag_internal_irq_priv *priv = &metag_internal_irq_priv;
|
||||
irq_hw_number_t hw = data->hwirq;
|
||||
void __iomem *vec_addr = metag_hwvec_addr(hw);
|
||||
|
||||
clear_bit(hw, &priv->unmasked);
|
||||
|
||||
/* there is no interrupt mask, so unvector the interrupt */
|
||||
metag_out32(0, vec_addr);
|
||||
}
|
||||
|
||||
/**
|
||||
* meta_intc_unmask_edge_irq_nomask() - unmask an edge irq by revectoring
|
||||
* @data: data for the internal irq to unmask
|
||||
*
|
||||
* HWSTATMETA has no mask register. Instead the IRQ is revectored back to the
|
||||
* core and retriggered if necessary.
|
||||
*/
|
||||
static void metag_internal_irq_unmask(struct irq_data *data)
|
||||
{
|
||||
struct metag_internal_irq_priv *priv = &metag_internal_irq_priv;
|
||||
irq_hw_number_t hw = data->hwirq;
|
||||
unsigned int bit = 1 << hw;
|
||||
void __iomem *vec_addr = metag_hwvec_addr(hw);
|
||||
unsigned int thread = hard_processor_id();
|
||||
|
||||
set_bit(hw, &priv->unmasked);
|
||||
|
||||
/* there is no interrupt mask, so revector the interrupt */
|
||||
metag_out32(TBI_TRIG_VEC(TBID_SIGNUM_TR1(thread)), vec_addr);
|
||||
|
||||
/*
|
||||
* Re-trigger interrupt
|
||||
*
|
||||
* Writing a 1 toggles, and a 0->1 transition triggers. We only
|
||||
* retrigger if the status bit is already set, which means we
|
||||
* need to clear it first. Retriggering is fundamentally racy
|
||||
* because if the interrupt fires again after we clear it we
|
||||
* could end up clearing it again and the interrupt handler
|
||||
* thinking it hasn't fired. Therefore we need to keep trying to
|
||||
* retrigger until the bit is set.
|
||||
*/
|
||||
if (metag_in32(HWSTATMETA) & bit) {
|
||||
metag_out32(bit, HWSTATMETA);
|
||||
while (!(metag_in32(HWSTATMETA) & bit))
|
||||
metag_out32(bit, HWSTATMETA);
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
/*
|
||||
* metag_internal_irq_set_affinity - set the affinity for an interrupt
|
||||
*/
|
||||
static int metag_internal_irq_set_affinity(struct irq_data *data,
|
||||
const struct cpumask *cpumask, bool force)
|
||||
{
|
||||
unsigned int cpu, thread;
|
||||
irq_hw_number_t hw = data->hwirq;
|
||||
/*
|
||||
* Wire up this interrupt from *VECINT to the Meta core.
|
||||
*
|
||||
* Note that we can't wire up *VECINT to interrupt more than
|
||||
* one cpu (the interrupt code doesn't support it), so we just
|
||||
* pick the first cpu we find in 'cpumask'.
|
||||
*/
|
||||
cpu = cpumask_any_and(cpumask, cpu_online_mask);
|
||||
thread = cpu_2_hwthread_id[cpu];
|
||||
|
||||
metag_out32(TBI_TRIG_VEC(TBID_SIGNUM_TR1(thread)),
|
||||
metag_hwvec_addr(hw));
|
||||
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* metag_internal_irq_demux - irq de-multiplexer
|
||||
* @irq: the interrupt number
|
||||
* @desc: the interrupt description structure for this irq
|
||||
*
|
||||
* The cpu receives an interrupt on TR1 when an interrupt has
|
||||
* occurred. It is this function's job to demux this irq and
|
||||
* figure out exactly which trigger needs servicing.
|
||||
*/
|
||||
static void metag_internal_irq_demux(unsigned int irq, struct irq_desc *desc)
|
||||
{
|
||||
struct metag_internal_irq_priv *priv = irq_desc_get_handler_data(desc);
|
||||
irq_hw_number_t hw;
|
||||
unsigned int irq_no;
|
||||
u32 status;
|
||||
|
||||
recalculate:
|
||||
status = metag_in32(HWSTATMETA) & priv->unmasked;
|
||||
|
||||
for (hw = 0; status != 0; status >>= 1, ++hw) {
|
||||
if (status & 0x1) {
|
||||
/*
|
||||
* Map the hardware IRQ number to a virtual Linux IRQ
|
||||
* number.
|
||||
*/
|
||||
irq_no = irq_linear_revmap(priv->domain, hw);
|
||||
|
||||
/*
|
||||
* Only fire off interrupts that are
|
||||
* registered to be handled by the kernel.
|
||||
* Other interrupts are probably being
|
||||
* handled by other Meta hardware threads.
|
||||
*/
|
||||
generic_handle_irq(irq_no);
|
||||
|
||||
/*
|
||||
* The handler may have re-enabled interrupts
|
||||
* which could have caused a nested invocation
|
||||
* of this code and make the copy of the
|
||||
* status register we are using invalid.
|
||||
*/
|
||||
goto recalculate;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* internal_irq_map() - Map an internal meta IRQ to a virtual IRQ number.
|
||||
* @hw: Number of the internal IRQ. Must be in range.
|
||||
*
|
||||
* Returns: The virtual IRQ number of the Meta internal IRQ specified by
|
||||
* @hw.
|
||||
*/
|
||||
int internal_irq_map(unsigned int hw)
|
||||
{
|
||||
struct metag_internal_irq_priv *priv = &metag_internal_irq_priv;
|
||||
if (!priv->domain)
|
||||
return -ENODEV;
|
||||
return irq_create_mapping(priv->domain, hw);
|
||||
}
|
||||
|
||||
/**
|
||||
* metag_internal_irq_init_cpu - regsister with the Meta cpu
|
||||
* @cpu: the CPU to register on
|
||||
*
|
||||
* Configure @cpu's TR1 irq so that we can demux irqs.
|
||||
*/
|
||||
static void metag_internal_irq_init_cpu(struct metag_internal_irq_priv *priv,
|
||||
int cpu)
|
||||
{
|
||||
unsigned int thread = cpu_2_hwthread_id[cpu];
|
||||
unsigned int signum = TBID_SIGNUM_TR1(thread);
|
||||
int irq = tbisig_map(signum);
|
||||
|
||||
/* Register the multiplexed IRQ handler */
|
||||
irq_set_handler_data(irq, priv);
|
||||
irq_set_chained_handler(irq, metag_internal_irq_demux);
|
||||
irq_set_irq_type(irq, IRQ_TYPE_LEVEL_LOW);
|
||||
}
|
||||
|
||||
/**
|
||||
* metag_internal_intc_map() - map an internal irq
|
||||
* @d: irq domain of internal trigger block
|
||||
* @irq: virtual irq number
|
||||
* @hw: hardware irq number within internal trigger block
|
||||
*
|
||||
* This sets up a virtual irq for a specified hardware interrupt. The irq chip
|
||||
* and handler is configured.
|
||||
*/
|
||||
static int metag_internal_intc_map(struct irq_domain *d, unsigned int irq,
|
||||
irq_hw_number_t hw)
|
||||
{
|
||||
/* only register interrupt if it is mapped */
|
||||
if (!metag_hwvec_addr(hw))
|
||||
return -EINVAL;
|
||||
|
||||
irq_set_chip_and_handler(irq, &internal_irq_edge_chip,
|
||||
handle_edge_irq);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct irq_domain_ops metag_internal_intc_domain_ops = {
|
||||
.map = metag_internal_intc_map,
|
||||
};
|
||||
|
||||
/**
|
||||
* metag_internal_irq_register - register internal IRQs
|
||||
*
|
||||
* Register the irq chip and handler function for all internal IRQs
|
||||
*/
|
||||
int __init init_internal_IRQ(void)
|
||||
{
|
||||
struct metag_internal_irq_priv *priv = &metag_internal_irq_priv;
|
||||
unsigned int cpu;
|
||||
|
||||
/* Set up an IRQ domain */
|
||||
priv->domain = irq_domain_add_linear(NULL, 32,
|
||||
&metag_internal_intc_domain_ops,
|
||||
priv);
|
||||
if (unlikely(!priv->domain)) {
|
||||
pr_err("meta-internal-intc: cannot add IRQ domain\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/* Setup TR1 for all cpus. */
|
||||
for_each_possible_cpu(cpu)
|
||||
metag_internal_irq_init_cpu(priv, cpu);
|
||||
|
||||
return 0;
|
||||
};
|
491
drivers/irqchip/irq-mmp.c
Normal file
491
drivers/irqchip/irq-mmp.c
Normal file
|
@ -0,0 +1,491 @@
|
|||
/*
|
||||
* linux/arch/arm/mach-mmp/irq.c
|
||||
*
|
||||
* Generic IRQ handling, GPIO IRQ demultiplexing, etc.
|
||||
* Copyright (C) 2008 - 2012 Marvell Technology Group Ltd.
|
||||
*
|
||||
* Author: Bin Yang <bin.yang@marvell.com>
|
||||
* Haojian Zhuang <haojian.zhuang@gmail.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/irq.h>
|
||||
#include <linux/irqdomain.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/ioport.h>
|
||||
#include <linux/of_address.h>
|
||||
#include <linux/of_irq.h>
|
||||
|
||||
#include <asm/exception.h>
|
||||
#include <asm/hardirq.h>
|
||||
|
||||
#include "irqchip.h"
|
||||
|
||||
#define MAX_ICU_NR 16
|
||||
|
||||
#define PJ1_INT_SEL 0x10c
|
||||
#define PJ4_INT_SEL 0x104
|
||||
|
||||
/* bit fields in PJ1_INT_SEL and PJ4_INT_SEL */
|
||||
#define SEL_INT_PENDING (1 << 6)
|
||||
#define SEL_INT_NUM_MASK 0x3f
|
||||
|
||||
struct icu_chip_data {
|
||||
int nr_irqs;
|
||||
unsigned int virq_base;
|
||||
unsigned int cascade_irq;
|
||||
void __iomem *reg_status;
|
||||
void __iomem *reg_mask;
|
||||
unsigned int conf_enable;
|
||||
unsigned int conf_disable;
|
||||
unsigned int conf_mask;
|
||||
unsigned int clr_mfp_irq_base;
|
||||
unsigned int clr_mfp_hwirq;
|
||||
struct irq_domain *domain;
|
||||
};
|
||||
|
||||
struct mmp_intc_conf {
|
||||
unsigned int conf_enable;
|
||||
unsigned int conf_disable;
|
||||
unsigned int conf_mask;
|
||||
};
|
||||
|
||||
static void __iomem *mmp_icu_base;
|
||||
static struct icu_chip_data icu_data[MAX_ICU_NR];
|
||||
static int max_icu_nr;
|
||||
|
||||
extern void mmp2_clear_pmic_int(void);
|
||||
|
||||
static void icu_mask_ack_irq(struct irq_data *d)
|
||||
{
|
||||
struct irq_domain *domain = d->domain;
|
||||
struct icu_chip_data *data = (struct icu_chip_data *)domain->host_data;
|
||||
int hwirq;
|
||||
u32 r;
|
||||
|
||||
hwirq = d->irq - data->virq_base;
|
||||
if (data == &icu_data[0]) {
|
||||
r = readl_relaxed(mmp_icu_base + (hwirq << 2));
|
||||
r &= ~data->conf_mask;
|
||||
r |= data->conf_disable;
|
||||
writel_relaxed(r, mmp_icu_base + (hwirq << 2));
|
||||
} else {
|
||||
#ifdef CONFIG_CPU_MMP2
|
||||
if ((data->virq_base == data->clr_mfp_irq_base)
|
||||
&& (hwirq == data->clr_mfp_hwirq))
|
||||
mmp2_clear_pmic_int();
|
||||
#endif
|
||||
r = readl_relaxed(data->reg_mask) | (1 << hwirq);
|
||||
writel_relaxed(r, data->reg_mask);
|
||||
}
|
||||
}
|
||||
|
||||
static void icu_mask_irq(struct irq_data *d)
|
||||
{
|
||||
struct irq_domain *domain = d->domain;
|
||||
struct icu_chip_data *data = (struct icu_chip_data *)domain->host_data;
|
||||
int hwirq;
|
||||
u32 r;
|
||||
|
||||
hwirq = d->irq - data->virq_base;
|
||||
if (data == &icu_data[0]) {
|
||||
r = readl_relaxed(mmp_icu_base + (hwirq << 2));
|
||||
r &= ~data->conf_mask;
|
||||
r |= data->conf_disable;
|
||||
writel_relaxed(r, mmp_icu_base + (hwirq << 2));
|
||||
} else {
|
||||
r = readl_relaxed(data->reg_mask) | (1 << hwirq);
|
||||
writel_relaxed(r, data->reg_mask);
|
||||
}
|
||||
}
|
||||
|
||||
static void icu_unmask_irq(struct irq_data *d)
|
||||
{
|
||||
struct irq_domain *domain = d->domain;
|
||||
struct icu_chip_data *data = (struct icu_chip_data *)domain->host_data;
|
||||
int hwirq;
|
||||
u32 r;
|
||||
|
||||
hwirq = d->irq - data->virq_base;
|
||||
if (data == &icu_data[0]) {
|
||||
r = readl_relaxed(mmp_icu_base + (hwirq << 2));
|
||||
r &= ~data->conf_mask;
|
||||
r |= data->conf_enable;
|
||||
writel_relaxed(r, mmp_icu_base + (hwirq << 2));
|
||||
} else {
|
||||
r = readl_relaxed(data->reg_mask) & ~(1 << hwirq);
|
||||
writel_relaxed(r, data->reg_mask);
|
||||
}
|
||||
}
|
||||
|
||||
struct irq_chip icu_irq_chip = {
|
||||
.name = "icu_irq",
|
||||
.irq_mask = icu_mask_irq,
|
||||
.irq_mask_ack = icu_mask_ack_irq,
|
||||
.irq_unmask = icu_unmask_irq,
|
||||
};
|
||||
|
||||
static void icu_mux_irq_demux(unsigned int irq, struct irq_desc *desc)
|
||||
{
|
||||
struct irq_domain *domain;
|
||||
struct icu_chip_data *data;
|
||||
int i;
|
||||
unsigned long mask, status, n;
|
||||
|
||||
for (i = 1; i < max_icu_nr; i++) {
|
||||
if (irq == icu_data[i].cascade_irq) {
|
||||
domain = icu_data[i].domain;
|
||||
data = (struct icu_chip_data *)domain->host_data;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (i >= max_icu_nr) {
|
||||
pr_err("Spurious irq %d in MMP INTC\n", irq);
|
||||
return;
|
||||
}
|
||||
|
||||
mask = readl_relaxed(data->reg_mask);
|
||||
while (1) {
|
||||
status = readl_relaxed(data->reg_status) & ~mask;
|
||||
if (status == 0)
|
||||
break;
|
||||
for_each_set_bit(n, &status, BITS_PER_LONG) {
|
||||
generic_handle_irq(icu_data[i].virq_base + n);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static int mmp_irq_domain_map(struct irq_domain *d, unsigned int irq,
|
||||
irq_hw_number_t hw)
|
||||
{
|
||||
irq_set_chip_and_handler(irq, &icu_irq_chip, handle_level_irq);
|
||||
set_irq_flags(irq, IRQF_VALID);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int mmp_irq_domain_xlate(struct irq_domain *d, struct device_node *node,
|
||||
const u32 *intspec, unsigned int intsize,
|
||||
unsigned long *out_hwirq,
|
||||
unsigned int *out_type)
|
||||
{
|
||||
*out_hwirq = intspec[0];
|
||||
return 0;
|
||||
}
|
||||
|
||||
const struct irq_domain_ops mmp_irq_domain_ops = {
|
||||
.map = mmp_irq_domain_map,
|
||||
.xlate = mmp_irq_domain_xlate,
|
||||
};
|
||||
|
||||
static struct mmp_intc_conf mmp_conf = {
|
||||
.conf_enable = 0x51,
|
||||
.conf_disable = 0x0,
|
||||
.conf_mask = 0x7f,
|
||||
};
|
||||
|
||||
static struct mmp_intc_conf mmp2_conf = {
|
||||
.conf_enable = 0x20,
|
||||
.conf_disable = 0x0,
|
||||
.conf_mask = 0x7f,
|
||||
};
|
||||
|
||||
static void __exception_irq_entry mmp_handle_irq(struct pt_regs *regs)
|
||||
{
|
||||
int hwirq;
|
||||
|
||||
hwirq = readl_relaxed(mmp_icu_base + PJ1_INT_SEL);
|
||||
if (!(hwirq & SEL_INT_PENDING))
|
||||
return;
|
||||
hwirq &= SEL_INT_NUM_MASK;
|
||||
handle_domain_irq(icu_data[0].domain, hwirq, regs);
|
||||
}
|
||||
|
||||
static void __exception_irq_entry mmp2_handle_irq(struct pt_regs *regs)
|
||||
{
|
||||
int hwirq;
|
||||
|
||||
hwirq = readl_relaxed(mmp_icu_base + PJ4_INT_SEL);
|
||||
if (!(hwirq & SEL_INT_PENDING))
|
||||
return;
|
||||
hwirq &= SEL_INT_NUM_MASK;
|
||||
handle_domain_irq(icu_data[0].domain, hwirq, regs);
|
||||
}
|
||||
|
||||
/* MMP (ARMv5) */
|
||||
void __init icu_init_irq(void)
|
||||
{
|
||||
int irq;
|
||||
|
||||
max_icu_nr = 1;
|
||||
mmp_icu_base = ioremap(0xd4282000, 0x1000);
|
||||
icu_data[0].conf_enable = mmp_conf.conf_enable;
|
||||
icu_data[0].conf_disable = mmp_conf.conf_disable;
|
||||
icu_data[0].conf_mask = mmp_conf.conf_mask;
|
||||
icu_data[0].nr_irqs = 64;
|
||||
icu_data[0].virq_base = 0;
|
||||
icu_data[0].domain = irq_domain_add_legacy(NULL, 64, 0, 0,
|
||||
&irq_domain_simple_ops,
|
||||
&icu_data[0]);
|
||||
for (irq = 0; irq < 64; irq++) {
|
||||
icu_mask_irq(irq_get_irq_data(irq));
|
||||
irq_set_chip_and_handler(irq, &icu_irq_chip, handle_level_irq);
|
||||
set_irq_flags(irq, IRQF_VALID);
|
||||
}
|
||||
irq_set_default_host(icu_data[0].domain);
|
||||
set_handle_irq(mmp_handle_irq);
|
||||
}
|
||||
|
||||
/* MMP2 (ARMv7) */
|
||||
void __init mmp2_init_icu(void)
|
||||
{
|
||||
int irq, end;
|
||||
|
||||
max_icu_nr = 8;
|
||||
mmp_icu_base = ioremap(0xd4282000, 0x1000);
|
||||
icu_data[0].conf_enable = mmp2_conf.conf_enable;
|
||||
icu_data[0].conf_disable = mmp2_conf.conf_disable;
|
||||
icu_data[0].conf_mask = mmp2_conf.conf_mask;
|
||||
icu_data[0].nr_irqs = 64;
|
||||
icu_data[0].virq_base = 0;
|
||||
icu_data[0].domain = irq_domain_add_legacy(NULL, 64, 0, 0,
|
||||
&irq_domain_simple_ops,
|
||||
&icu_data[0]);
|
||||
icu_data[1].reg_status = mmp_icu_base + 0x150;
|
||||
icu_data[1].reg_mask = mmp_icu_base + 0x168;
|
||||
icu_data[1].clr_mfp_irq_base = icu_data[0].virq_base +
|
||||
icu_data[0].nr_irqs;
|
||||
icu_data[1].clr_mfp_hwirq = 1; /* offset to IRQ_MMP2_PMIC_BASE */
|
||||
icu_data[1].nr_irqs = 2;
|
||||
icu_data[1].cascade_irq = 4;
|
||||
icu_data[1].virq_base = icu_data[0].virq_base + icu_data[0].nr_irqs;
|
||||
icu_data[1].domain = irq_domain_add_legacy(NULL, icu_data[1].nr_irqs,
|
||||
icu_data[1].virq_base, 0,
|
||||
&irq_domain_simple_ops,
|
||||
&icu_data[1]);
|
||||
icu_data[2].reg_status = mmp_icu_base + 0x154;
|
||||
icu_data[2].reg_mask = mmp_icu_base + 0x16c;
|
||||
icu_data[2].nr_irqs = 2;
|
||||
icu_data[2].cascade_irq = 5;
|
||||
icu_data[2].virq_base = icu_data[1].virq_base + icu_data[1].nr_irqs;
|
||||
icu_data[2].domain = irq_domain_add_legacy(NULL, icu_data[2].nr_irqs,
|
||||
icu_data[2].virq_base, 0,
|
||||
&irq_domain_simple_ops,
|
||||
&icu_data[2]);
|
||||
icu_data[3].reg_status = mmp_icu_base + 0x180;
|
||||
icu_data[3].reg_mask = mmp_icu_base + 0x17c;
|
||||
icu_data[3].nr_irqs = 3;
|
||||
icu_data[3].cascade_irq = 9;
|
||||
icu_data[3].virq_base = icu_data[2].virq_base + icu_data[2].nr_irqs;
|
||||
icu_data[3].domain = irq_domain_add_legacy(NULL, icu_data[3].nr_irqs,
|
||||
icu_data[3].virq_base, 0,
|
||||
&irq_domain_simple_ops,
|
||||
&icu_data[3]);
|
||||
icu_data[4].reg_status = mmp_icu_base + 0x158;
|
||||
icu_data[4].reg_mask = mmp_icu_base + 0x170;
|
||||
icu_data[4].nr_irqs = 5;
|
||||
icu_data[4].cascade_irq = 17;
|
||||
icu_data[4].virq_base = icu_data[3].virq_base + icu_data[3].nr_irqs;
|
||||
icu_data[4].domain = irq_domain_add_legacy(NULL, icu_data[4].nr_irqs,
|
||||
icu_data[4].virq_base, 0,
|
||||
&irq_domain_simple_ops,
|
||||
&icu_data[4]);
|
||||
icu_data[5].reg_status = mmp_icu_base + 0x15c;
|
||||
icu_data[5].reg_mask = mmp_icu_base + 0x174;
|
||||
icu_data[5].nr_irqs = 15;
|
||||
icu_data[5].cascade_irq = 35;
|
||||
icu_data[5].virq_base = icu_data[4].virq_base + icu_data[4].nr_irqs;
|
||||
icu_data[5].domain = irq_domain_add_legacy(NULL, icu_data[5].nr_irqs,
|
||||
icu_data[5].virq_base, 0,
|
||||
&irq_domain_simple_ops,
|
||||
&icu_data[5]);
|
||||
icu_data[6].reg_status = mmp_icu_base + 0x160;
|
||||
icu_data[6].reg_mask = mmp_icu_base + 0x178;
|
||||
icu_data[6].nr_irqs = 2;
|
||||
icu_data[6].cascade_irq = 51;
|
||||
icu_data[6].virq_base = icu_data[5].virq_base + icu_data[5].nr_irqs;
|
||||
icu_data[6].domain = irq_domain_add_legacy(NULL, icu_data[6].nr_irqs,
|
||||
icu_data[6].virq_base, 0,
|
||||
&irq_domain_simple_ops,
|
||||
&icu_data[6]);
|
||||
icu_data[7].reg_status = mmp_icu_base + 0x188;
|
||||
icu_data[7].reg_mask = mmp_icu_base + 0x184;
|
||||
icu_data[7].nr_irqs = 2;
|
||||
icu_data[7].cascade_irq = 55;
|
||||
icu_data[7].virq_base = icu_data[6].virq_base + icu_data[6].nr_irqs;
|
||||
icu_data[7].domain = irq_domain_add_legacy(NULL, icu_data[7].nr_irqs,
|
||||
icu_data[7].virq_base, 0,
|
||||
&irq_domain_simple_ops,
|
||||
&icu_data[7]);
|
||||
end = icu_data[7].virq_base + icu_data[7].nr_irqs;
|
||||
for (irq = 0; irq < end; irq++) {
|
||||
icu_mask_irq(irq_get_irq_data(irq));
|
||||
if (irq == icu_data[1].cascade_irq ||
|
||||
irq == icu_data[2].cascade_irq ||
|
||||
irq == icu_data[3].cascade_irq ||
|
||||
irq == icu_data[4].cascade_irq ||
|
||||
irq == icu_data[5].cascade_irq ||
|
||||
irq == icu_data[6].cascade_irq ||
|
||||
irq == icu_data[7].cascade_irq) {
|
||||
irq_set_chip(irq, &icu_irq_chip);
|
||||
irq_set_chained_handler(irq, icu_mux_irq_demux);
|
||||
} else {
|
||||
irq_set_chip_and_handler(irq, &icu_irq_chip,
|
||||
handle_level_irq);
|
||||
}
|
||||
set_irq_flags(irq, IRQF_VALID);
|
||||
}
|
||||
irq_set_default_host(icu_data[0].domain);
|
||||
set_handle_irq(mmp2_handle_irq);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_OF
|
||||
static int __init mmp_init_bases(struct device_node *node)
|
||||
{
|
||||
int ret, nr_irqs, irq, i = 0;
|
||||
|
||||
ret = of_property_read_u32(node, "mrvl,intc-nr-irqs", &nr_irqs);
|
||||
if (ret) {
|
||||
pr_err("Not found mrvl,intc-nr-irqs property\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
mmp_icu_base = of_iomap(node, 0);
|
||||
if (!mmp_icu_base) {
|
||||
pr_err("Failed to get interrupt controller register\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
icu_data[0].virq_base = 0;
|
||||
icu_data[0].domain = irq_domain_add_linear(node, nr_irqs,
|
||||
&mmp_irq_domain_ops,
|
||||
&icu_data[0]);
|
||||
for (irq = 0; irq < nr_irqs; irq++) {
|
||||
ret = irq_create_mapping(icu_data[0].domain, irq);
|
||||
if (!ret) {
|
||||
pr_err("Failed to mapping hwirq\n");
|
||||
goto err;
|
||||
}
|
||||
if (!irq)
|
||||
icu_data[0].virq_base = ret;
|
||||
}
|
||||
icu_data[0].nr_irqs = nr_irqs;
|
||||
return 0;
|
||||
err:
|
||||
if (icu_data[0].virq_base) {
|
||||
for (i = 0; i < irq; i++)
|
||||
irq_dispose_mapping(icu_data[0].virq_base + i);
|
||||
}
|
||||
irq_domain_remove(icu_data[0].domain);
|
||||
iounmap(mmp_icu_base);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static int __init mmp_of_init(struct device_node *node,
|
||||
struct device_node *parent)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = mmp_init_bases(node);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
icu_data[0].conf_enable = mmp_conf.conf_enable;
|
||||
icu_data[0].conf_disable = mmp_conf.conf_disable;
|
||||
icu_data[0].conf_mask = mmp_conf.conf_mask;
|
||||
irq_set_default_host(icu_data[0].domain);
|
||||
set_handle_irq(mmp_handle_irq);
|
||||
max_icu_nr = 1;
|
||||
return 0;
|
||||
}
|
||||
IRQCHIP_DECLARE(mmp_intc, "mrvl,mmp-intc", mmp_of_init);
|
||||
|
||||
static int __init mmp2_of_init(struct device_node *node,
|
||||
struct device_node *parent)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = mmp_init_bases(node);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
icu_data[0].conf_enable = mmp2_conf.conf_enable;
|
||||
icu_data[0].conf_disable = mmp2_conf.conf_disable;
|
||||
icu_data[0].conf_mask = mmp2_conf.conf_mask;
|
||||
irq_set_default_host(icu_data[0].domain);
|
||||
set_handle_irq(mmp2_handle_irq);
|
||||
max_icu_nr = 1;
|
||||
return 0;
|
||||
}
|
||||
IRQCHIP_DECLARE(mmp2_intc, "mrvl,mmp2-intc", mmp2_of_init);
|
||||
|
||||
static int __init mmp2_mux_of_init(struct device_node *node,
|
||||
struct device_node *parent)
|
||||
{
|
||||
struct resource res;
|
||||
int i, ret, irq, j = 0;
|
||||
u32 nr_irqs, mfp_irq;
|
||||
|
||||
if (!parent)
|
||||
return -ENODEV;
|
||||
|
||||
i = max_icu_nr;
|
||||
ret = of_property_read_u32(node, "mrvl,intc-nr-irqs",
|
||||
&nr_irqs);
|
||||
if (ret) {
|
||||
pr_err("Not found mrvl,intc-nr-irqs property\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
ret = of_address_to_resource(node, 0, &res);
|
||||
if (ret < 0) {
|
||||
pr_err("Not found reg property\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
icu_data[i].reg_status = mmp_icu_base + res.start;
|
||||
ret = of_address_to_resource(node, 1, &res);
|
||||
if (ret < 0) {
|
||||
pr_err("Not found reg property\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
icu_data[i].reg_mask = mmp_icu_base + res.start;
|
||||
icu_data[i].cascade_irq = irq_of_parse_and_map(node, 0);
|
||||
if (!icu_data[i].cascade_irq)
|
||||
return -EINVAL;
|
||||
|
||||
icu_data[i].virq_base = 0;
|
||||
icu_data[i].domain = irq_domain_add_linear(node, nr_irqs,
|
||||
&mmp_irq_domain_ops,
|
||||
&icu_data[i]);
|
||||
for (irq = 0; irq < nr_irqs; irq++) {
|
||||
ret = irq_create_mapping(icu_data[i].domain, irq);
|
||||
if (!ret) {
|
||||
pr_err("Failed to mapping hwirq\n");
|
||||
goto err;
|
||||
}
|
||||
if (!irq)
|
||||
icu_data[i].virq_base = ret;
|
||||
}
|
||||
icu_data[i].nr_irqs = nr_irqs;
|
||||
if (!of_property_read_u32(node, "mrvl,clr-mfp-irq",
|
||||
&mfp_irq)) {
|
||||
icu_data[i].clr_mfp_irq_base = icu_data[i].virq_base;
|
||||
icu_data[i].clr_mfp_hwirq = mfp_irq;
|
||||
}
|
||||
irq_set_chained_handler(icu_data[i].cascade_irq,
|
||||
icu_mux_irq_demux);
|
||||
max_icu_nr++;
|
||||
return 0;
|
||||
err:
|
||||
if (icu_data[i].virq_base) {
|
||||
for (j = 0; j < irq; j++)
|
||||
irq_dispose_mapping(icu_data[i].virq_base + j);
|
||||
}
|
||||
irq_domain_remove(icu_data[i].domain);
|
||||
return -EINVAL;
|
||||
}
|
||||
IRQCHIP_DECLARE(mmp2_mux_intc, "mrvl,mmp2-mux-intc", mmp2_mux_of_init);
|
||||
#endif
|
117
drivers/irqchip/irq-moxart.c
Normal file
117
drivers/irqchip/irq-moxart.c
Normal file
|
@ -0,0 +1,117 @@
|
|||
/*
|
||||
* MOXA ART SoCs IRQ chip driver.
|
||||
*
|
||||
* Copyright (C) 2013 Jonas Jensen
|
||||
*
|
||||
* Jonas Jensen <jonas.jensen@gmail.com>
|
||||
*
|
||||
* This file is licensed under the terms of the GNU General Public
|
||||
* License version 2. This program is licensed "as is" without any
|
||||
* warranty of any kind, whether express or implied.
|
||||
*/
|
||||
|
||||
#include <linux/io.h>
|
||||
#include <linux/irq.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/of_address.h>
|
||||
#include <linux/of_irq.h>
|
||||
#include <linux/irqdomain.h>
|
||||
|
||||
#include <asm/exception.h>
|
||||
|
||||
#include "irqchip.h"
|
||||
|
||||
#define IRQ_SOURCE_REG 0
|
||||
#define IRQ_MASK_REG 0x04
|
||||
#define IRQ_CLEAR_REG 0x08
|
||||
#define IRQ_MODE_REG 0x0c
|
||||
#define IRQ_LEVEL_REG 0x10
|
||||
#define IRQ_STATUS_REG 0x14
|
||||
|
||||
#define FIQ_SOURCE_REG 0x20
|
||||
#define FIQ_MASK_REG 0x24
|
||||
#define FIQ_CLEAR_REG 0x28
|
||||
#define FIQ_MODE_REG 0x2c
|
||||
#define FIQ_LEVEL_REG 0x30
|
||||
#define FIQ_STATUS_REG 0x34
|
||||
|
||||
|
||||
struct moxart_irq_data {
|
||||
void __iomem *base;
|
||||
struct irq_domain *domain;
|
||||
unsigned int interrupt_mask;
|
||||
};
|
||||
|
||||
static struct moxart_irq_data intc;
|
||||
|
||||
static void __exception_irq_entry handle_irq(struct pt_regs *regs)
|
||||
{
|
||||
u32 irqstat;
|
||||
int hwirq;
|
||||
|
||||
irqstat = readl(intc.base + IRQ_STATUS_REG);
|
||||
|
||||
while (irqstat) {
|
||||
hwirq = ffs(irqstat) - 1;
|
||||
handle_IRQ(irq_linear_revmap(intc.domain, hwirq), regs);
|
||||
irqstat &= ~(1 << hwirq);
|
||||
}
|
||||
}
|
||||
|
||||
static int __init moxart_of_intc_init(struct device_node *node,
|
||||
struct device_node *parent)
|
||||
{
|
||||
unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
|
||||
int ret;
|
||||
struct irq_chip_generic *gc;
|
||||
|
||||
intc.base = of_iomap(node, 0);
|
||||
if (!intc.base) {
|
||||
pr_err("%s: unable to map IC registers\n",
|
||||
node->full_name);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
intc.domain = irq_domain_add_linear(node, 32, &irq_generic_chip_ops,
|
||||
intc.base);
|
||||
if (!intc.domain) {
|
||||
pr_err("%s: unable to create IRQ domain\n", node->full_name);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ret = irq_alloc_domain_generic_chips(intc.domain, 32, 1,
|
||||
"MOXARTINTC", handle_edge_irq,
|
||||
clr, 0, IRQ_GC_INIT_MASK_CACHE);
|
||||
if (ret) {
|
||||
pr_err("%s: could not allocate generic chip\n",
|
||||
node->full_name);
|
||||
irq_domain_remove(intc.domain);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ret = of_property_read_u32(node, "interrupt-mask",
|
||||
&intc.interrupt_mask);
|
||||
if (ret)
|
||||
pr_err("%s: could not read interrupt-mask DT property\n",
|
||||
node->full_name);
|
||||
|
||||
gc = irq_get_domain_generic_chip(intc.domain, 0);
|
||||
|
||||
gc->reg_base = intc.base;
|
||||
gc->chip_types[0].regs.mask = IRQ_MASK_REG;
|
||||
gc->chip_types[0].regs.ack = IRQ_CLEAR_REG;
|
||||
gc->chip_types[0].chip.irq_ack = irq_gc_ack_set_bit;
|
||||
gc->chip_types[0].chip.irq_mask = irq_gc_mask_clr_bit;
|
||||
gc->chip_types[0].chip.irq_unmask = irq_gc_mask_set_bit;
|
||||
|
||||
writel(0, intc.base + IRQ_MASK_REG);
|
||||
writel(0xffffffff, intc.base + IRQ_CLEAR_REG);
|
||||
|
||||
writel(intc.interrupt_mask, intc.base + IRQ_MODE_REG);
|
||||
writel(intc.interrupt_mask, intc.base + IRQ_LEVEL_REG);
|
||||
|
||||
set_handle_irq(handle_irq);
|
||||
|
||||
return 0;
|
||||
}
|
||||
IRQCHIP_DECLARE(moxa_moxart_ic, "moxa,moxart-ic", moxart_of_intc_init);
|
114
drivers/irqchip/irq-mxs.c
Normal file
114
drivers/irqchip/irq-mxs.c
Normal file
|
@ -0,0 +1,114 @@
|
|||
/*
|
||||
* Copyright (C) 2009-2010 Freescale Semiconductor, Inc. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; either version 2 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License along
|
||||
* with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/irq.h>
|
||||
#include <linux/irqdomain.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/of_address.h>
|
||||
#include <linux/of_irq.h>
|
||||
#include <linux/stmp_device.h>
|
||||
#include <asm/exception.h>
|
||||
|
||||
#include "irqchip.h"
|
||||
|
||||
#define HW_ICOLL_VECTOR 0x0000
|
||||
#define HW_ICOLL_LEVELACK 0x0010
|
||||
#define HW_ICOLL_CTRL 0x0020
|
||||
#define HW_ICOLL_STAT_OFFSET 0x0070
|
||||
#define HW_ICOLL_INTERRUPTn_SET(n) (0x0124 + (n) * 0x10)
|
||||
#define HW_ICOLL_INTERRUPTn_CLR(n) (0x0128 + (n) * 0x10)
|
||||
#define BM_ICOLL_INTERRUPTn_ENABLE 0x00000004
|
||||
#define BV_ICOLL_LEVELACK_IRQLEVELACK__LEVEL0 0x1
|
||||
|
||||
#define ICOLL_NUM_IRQS 128
|
||||
|
||||
static void __iomem *icoll_base;
|
||||
static struct irq_domain *icoll_domain;
|
||||
|
||||
static void icoll_ack_irq(struct irq_data *d)
|
||||
{
|
||||
/*
|
||||
* The Interrupt Collector is able to prioritize irqs.
|
||||
* Currently only level 0 is used. So acking can use
|
||||
* BV_ICOLL_LEVELACK_IRQLEVELACK__LEVEL0 unconditionally.
|
||||
*/
|
||||
__raw_writel(BV_ICOLL_LEVELACK_IRQLEVELACK__LEVEL0,
|
||||
icoll_base + HW_ICOLL_LEVELACK);
|
||||
}
|
||||
|
||||
static void icoll_mask_irq(struct irq_data *d)
|
||||
{
|
||||
__raw_writel(BM_ICOLL_INTERRUPTn_ENABLE,
|
||||
icoll_base + HW_ICOLL_INTERRUPTn_CLR(d->hwirq));
|
||||
}
|
||||
|
||||
static void icoll_unmask_irq(struct irq_data *d)
|
||||
{
|
||||
__raw_writel(BM_ICOLL_INTERRUPTn_ENABLE,
|
||||
icoll_base + HW_ICOLL_INTERRUPTn_SET(d->hwirq));
|
||||
}
|
||||
|
||||
static struct irq_chip mxs_icoll_chip = {
|
||||
.irq_ack = icoll_ack_irq,
|
||||
.irq_mask = icoll_mask_irq,
|
||||
.irq_unmask = icoll_unmask_irq,
|
||||
};
|
||||
|
||||
asmlinkage void __exception_irq_entry icoll_handle_irq(struct pt_regs *regs)
|
||||
{
|
||||
u32 irqnr;
|
||||
|
||||
irqnr = __raw_readl(icoll_base + HW_ICOLL_STAT_OFFSET);
|
||||
__raw_writel(irqnr, icoll_base + HW_ICOLL_VECTOR);
|
||||
handle_domain_irq(icoll_domain, irqnr, regs);
|
||||
}
|
||||
|
||||
static int icoll_irq_domain_map(struct irq_domain *d, unsigned int virq,
|
||||
irq_hw_number_t hw)
|
||||
{
|
||||
irq_set_chip_and_handler(virq, &mxs_icoll_chip, handle_level_irq);
|
||||
set_irq_flags(virq, IRQF_VALID);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct irq_domain_ops icoll_irq_domain_ops = {
|
||||
.map = icoll_irq_domain_map,
|
||||
.xlate = irq_domain_xlate_onecell,
|
||||
};
|
||||
|
||||
static int __init icoll_of_init(struct device_node *np,
|
||||
struct device_node *interrupt_parent)
|
||||
{
|
||||
icoll_base = of_iomap(np, 0);
|
||||
WARN_ON(!icoll_base);
|
||||
|
||||
/*
|
||||
* Interrupt Collector reset, which initializes the priority
|
||||
* for each irq to level 0.
|
||||
*/
|
||||
stmp_reset_block(icoll_base + HW_ICOLL_CTRL);
|
||||
|
||||
icoll_domain = irq_domain_add_linear(np, ICOLL_NUM_IRQS,
|
||||
&icoll_irq_domain_ops, NULL);
|
||||
return icoll_domain ? 0 : -ENODEV;
|
||||
}
|
||||
IRQCHIP_DECLARE(mxs, "fsl,icoll", icoll_of_init);
|
112
drivers/irqchip/irq-nvic.c
Normal file
112
drivers/irqchip/irq-nvic.c
Normal file
|
@ -0,0 +1,112 @@
|
|||
/*
|
||||
* drivers/irq/irq-nvic.c
|
||||
*
|
||||
* Copyright (C) 2008 ARM Limited, All Rights Reserved.
|
||||
* Copyright (C) 2013 Pengutronix
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* Support for the Nested Vectored Interrupt Controller found on the
|
||||
* ARMv7-M CPUs (Cortex-M3/M4)
|
||||
*/
|
||||
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
||||
|
||||
#include <linux/init.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/of_address.h>
|
||||
#include <linux/irq.h>
|
||||
#include <linux/irqdomain.h>
|
||||
|
||||
#include <asm/v7m.h>
|
||||
#include <asm/exception.h>
|
||||
|
||||
#include "irqchip.h"
|
||||
|
||||
#define NVIC_ISER 0x000
|
||||
#define NVIC_ICER 0x080
|
||||
#define NVIC_IPR 0x300
|
||||
|
||||
#define NVIC_MAX_BANKS 16
|
||||
/*
|
||||
* Each bank handles 32 irqs. Only the 16th (= last) bank handles only
|
||||
* 16 irqs.
|
||||
*/
|
||||
#define NVIC_MAX_IRQ ((NVIC_MAX_BANKS - 1) * 32 + 16)
|
||||
|
||||
static struct irq_domain *nvic_irq_domain;
|
||||
|
||||
asmlinkage void __exception_irq_entry
|
||||
nvic_handle_irq(irq_hw_number_t hwirq, struct pt_regs *regs)
|
||||
{
|
||||
unsigned int irq = irq_linear_revmap(nvic_irq_domain, hwirq);
|
||||
|
||||
handle_IRQ(irq, regs);
|
||||
}
|
||||
|
||||
static int __init nvic_of_init(struct device_node *node,
|
||||
struct device_node *parent)
|
||||
{
|
||||
unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
|
||||
unsigned int irqs, i, ret, numbanks;
|
||||
void __iomem *nvic_base;
|
||||
|
||||
numbanks = (readl_relaxed(V7M_SCS_ICTR) &
|
||||
V7M_SCS_ICTR_INTLINESNUM_MASK) + 1;
|
||||
|
||||
nvic_base = of_iomap(node, 0);
|
||||
if (!nvic_base) {
|
||||
pr_warn("unable to map nvic registers\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
irqs = numbanks * 32;
|
||||
if (irqs > NVIC_MAX_IRQ)
|
||||
irqs = NVIC_MAX_IRQ;
|
||||
|
||||
nvic_irq_domain =
|
||||
irq_domain_add_linear(node, irqs, &irq_generic_chip_ops, NULL);
|
||||
if (!nvic_irq_domain) {
|
||||
pr_warn("Failed to allocate irq domain\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
ret = irq_alloc_domain_generic_chips(nvic_irq_domain, 32, 1,
|
||||
"nvic_irq", handle_fasteoi_irq,
|
||||
clr, 0, IRQ_GC_INIT_MASK_CACHE);
|
||||
if (ret) {
|
||||
pr_warn("Failed to allocate irq chips\n");
|
||||
irq_domain_remove(nvic_irq_domain);
|
||||
return ret;
|
||||
}
|
||||
|
||||
for (i = 0; i < numbanks; ++i) {
|
||||
struct irq_chip_generic *gc;
|
||||
|
||||
gc = irq_get_domain_generic_chip(nvic_irq_domain, 32 * i);
|
||||
gc->reg_base = nvic_base + 4 * i;
|
||||
gc->chip_types[0].regs.enable = NVIC_ISER;
|
||||
gc->chip_types[0].regs.disable = NVIC_ICER;
|
||||
gc->chip_types[0].chip.irq_mask = irq_gc_mask_disable_reg;
|
||||
gc->chip_types[0].chip.irq_unmask = irq_gc_unmask_enable_reg;
|
||||
/* This is a no-op as end of interrupt is signaled by the
|
||||
* exception return sequence.
|
||||
*/
|
||||
gc->chip_types[0].chip.irq_eoi = irq_gc_noop;
|
||||
|
||||
/* disable interrupts */
|
||||
writel_relaxed(~0, gc->reg_base + NVIC_ICER);
|
||||
}
|
||||
|
||||
/* Set priority on all interrupts */
|
||||
for (i = 0; i < irqs; i += 4)
|
||||
writel_relaxed(0, nvic_base + NVIC_IPR + i);
|
||||
|
||||
return 0;
|
||||
}
|
||||
IRQCHIP_DECLARE(armv7m_nvic, "arm,armv7m-nvic", nvic_of_init);
|
418
drivers/irqchip/irq-omap-intc.c
Normal file
418
drivers/irqchip/irq-omap-intc.c
Normal file
|
@ -0,0 +1,418 @@
|
|||
/*
|
||||
* linux/arch/arm/mach-omap2/irq.c
|
||||
*
|
||||
* Interrupt handler for OMAP2 boards.
|
||||
*
|
||||
* Copyright (C) 2005 Nokia Corporation
|
||||
* Author: Paul Mundt <paul.mundt@nokia.com>
|
||||
*
|
||||
* This file is subject to the terms and conditions of the GNU General Public
|
||||
* License. See the file "COPYING" in the main directory of this archive
|
||||
* for more details.
|
||||
*/
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/io.h>
|
||||
|
||||
#include <asm/exception.h>
|
||||
#include <linux/irqdomain.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/of_address.h>
|
||||
#include <linux/of_irq.h>
|
||||
|
||||
#include "irqchip.h"
|
||||
|
||||
/* Define these here for now until we drop all board-files */
|
||||
#define OMAP24XX_IC_BASE 0x480fe000
|
||||
#define OMAP34XX_IC_BASE 0x48200000
|
||||
|
||||
/* selected INTC register offsets */
|
||||
|
||||
#define INTC_REVISION 0x0000
|
||||
#define INTC_SYSCONFIG 0x0010
|
||||
#define INTC_SYSSTATUS 0x0014
|
||||
#define INTC_SIR 0x0040
|
||||
#define INTC_CONTROL 0x0048
|
||||
#define INTC_PROTECTION 0x004C
|
||||
#define INTC_IDLE 0x0050
|
||||
#define INTC_THRESHOLD 0x0068
|
||||
#define INTC_MIR0 0x0084
|
||||
#define INTC_MIR_CLEAR0 0x0088
|
||||
#define INTC_MIR_SET0 0x008c
|
||||
#define INTC_PENDING_IRQ0 0x0098
|
||||
#define INTC_PENDING_IRQ1 0x00b8
|
||||
#define INTC_PENDING_IRQ2 0x00d8
|
||||
#define INTC_PENDING_IRQ3 0x00f8
|
||||
#define INTC_ILR0 0x0100
|
||||
|
||||
#define ACTIVEIRQ_MASK 0x7f /* omap2/3 active interrupt bits */
|
||||
#define INTCPS_NR_ILR_REGS 128
|
||||
#define INTCPS_NR_MIR_REGS 4
|
||||
|
||||
#define INTC_IDLE_FUNCIDLE (1 << 0)
|
||||
#define INTC_IDLE_TURBO (1 << 1)
|
||||
|
||||
#define INTC_PROTECTION_ENABLE (1 << 0)
|
||||
|
||||
struct omap_intc_regs {
|
||||
u32 sysconfig;
|
||||
u32 protection;
|
||||
u32 idle;
|
||||
u32 threshold;
|
||||
u32 ilr[INTCPS_NR_ILR_REGS];
|
||||
u32 mir[INTCPS_NR_MIR_REGS];
|
||||
};
|
||||
static struct omap_intc_regs intc_context;
|
||||
|
||||
static struct irq_domain *domain;
|
||||
static void __iomem *omap_irq_base;
|
||||
static int omap_nr_pending = 3;
|
||||
static int omap_nr_irqs = 96;
|
||||
|
||||
static void intc_writel(u32 reg, u32 val)
|
||||
{
|
||||
writel_relaxed(val, omap_irq_base + reg);
|
||||
}
|
||||
|
||||
static u32 intc_readl(u32 reg)
|
||||
{
|
||||
return readl_relaxed(omap_irq_base + reg);
|
||||
}
|
||||
|
||||
void omap_intc_save_context(void)
|
||||
{
|
||||
int i;
|
||||
|
||||
intc_context.sysconfig =
|
||||
intc_readl(INTC_SYSCONFIG);
|
||||
intc_context.protection =
|
||||
intc_readl(INTC_PROTECTION);
|
||||
intc_context.idle =
|
||||
intc_readl(INTC_IDLE);
|
||||
intc_context.threshold =
|
||||
intc_readl(INTC_THRESHOLD);
|
||||
|
||||
for (i = 0; i < omap_nr_irqs; i++)
|
||||
intc_context.ilr[i] =
|
||||
intc_readl((INTC_ILR0 + 0x4 * i));
|
||||
for (i = 0; i < INTCPS_NR_MIR_REGS; i++)
|
||||
intc_context.mir[i] =
|
||||
intc_readl(INTC_MIR0 + (0x20 * i));
|
||||
}
|
||||
|
||||
void omap_intc_restore_context(void)
|
||||
{
|
||||
int i;
|
||||
|
||||
intc_writel(INTC_SYSCONFIG, intc_context.sysconfig);
|
||||
intc_writel(INTC_PROTECTION, intc_context.protection);
|
||||
intc_writel(INTC_IDLE, intc_context.idle);
|
||||
intc_writel(INTC_THRESHOLD, intc_context.threshold);
|
||||
|
||||
for (i = 0; i < omap_nr_irqs; i++)
|
||||
intc_writel(INTC_ILR0 + 0x4 * i,
|
||||
intc_context.ilr[i]);
|
||||
|
||||
for (i = 0; i < INTCPS_NR_MIR_REGS; i++)
|
||||
intc_writel(INTC_MIR0 + 0x20 * i,
|
||||
intc_context.mir[i]);
|
||||
/* MIRs are saved and restore with other PRCM registers */
|
||||
}
|
||||
|
||||
void omap3_intc_prepare_idle(void)
|
||||
{
|
||||
/*
|
||||
* Disable autoidle as it can stall interrupt controller,
|
||||
* cf. errata ID i540 for 3430 (all revisions up to 3.1.x)
|
||||
*/
|
||||
intc_writel(INTC_SYSCONFIG, 0);
|
||||
intc_writel(INTC_IDLE, INTC_IDLE_TURBO);
|
||||
}
|
||||
|
||||
void omap3_intc_resume_idle(void)
|
||||
{
|
||||
/* Re-enable autoidle */
|
||||
intc_writel(INTC_SYSCONFIG, 1);
|
||||
intc_writel(INTC_IDLE, 0);
|
||||
}
|
||||
|
||||
/* XXX: FIQ and additional INTC support (only MPU at the moment) */
|
||||
static void omap_ack_irq(struct irq_data *d)
|
||||
{
|
||||
intc_writel(INTC_CONTROL, 0x1);
|
||||
}
|
||||
|
||||
static void omap_mask_ack_irq(struct irq_data *d)
|
||||
{
|
||||
irq_gc_mask_disable_reg(d);
|
||||
omap_ack_irq(d);
|
||||
}
|
||||
|
||||
static void __init omap_irq_soft_reset(void)
|
||||
{
|
||||
unsigned long tmp;
|
||||
|
||||
tmp = intc_readl(INTC_REVISION) & 0xff;
|
||||
|
||||
pr_info("IRQ: Found an INTC at 0x%p (revision %ld.%ld) with %d interrupts\n",
|
||||
omap_irq_base, tmp >> 4, tmp & 0xf, omap_nr_irqs);
|
||||
|
||||
tmp = intc_readl(INTC_SYSCONFIG);
|
||||
tmp |= 1 << 1; /* soft reset */
|
||||
intc_writel(INTC_SYSCONFIG, tmp);
|
||||
|
||||
while (!(intc_readl(INTC_SYSSTATUS) & 0x1))
|
||||
/* Wait for reset to complete */;
|
||||
|
||||
/* Enable autoidle */
|
||||
intc_writel(INTC_SYSCONFIG, 1 << 0);
|
||||
}
|
||||
|
||||
int omap_irq_pending(void)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < omap_nr_pending; i++)
|
||||
if (intc_readl(INTC_PENDING_IRQ0 + (0x20 * i)))
|
||||
return 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void omap3_intc_suspend(void)
|
||||
{
|
||||
/* A pending interrupt would prevent OMAP from entering suspend */
|
||||
omap_ack_irq(NULL);
|
||||
}
|
||||
|
||||
static int __init omap_alloc_gc_of(struct irq_domain *d, void __iomem *base)
|
||||
{
|
||||
int ret;
|
||||
int i;
|
||||
|
||||
ret = irq_alloc_domain_generic_chips(d, 32, 1, "INTC",
|
||||
handle_level_irq, IRQ_NOREQUEST | IRQ_NOPROBE,
|
||||
IRQ_LEVEL, 0);
|
||||
if (ret) {
|
||||
pr_warn("Failed to allocate irq chips\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
for (i = 0; i < omap_nr_pending; i++) {
|
||||
struct irq_chip_generic *gc;
|
||||
struct irq_chip_type *ct;
|
||||
|
||||
gc = irq_get_domain_generic_chip(d, 32 * i);
|
||||
gc->reg_base = base;
|
||||
ct = gc->chip_types;
|
||||
|
||||
ct->type = IRQ_TYPE_LEVEL_MASK;
|
||||
ct->handler = handle_level_irq;
|
||||
|
||||
ct->chip.irq_ack = omap_mask_ack_irq;
|
||||
ct->chip.irq_mask = irq_gc_mask_disable_reg;
|
||||
ct->chip.irq_unmask = irq_gc_unmask_enable_reg;
|
||||
|
||||
ct->chip.flags |= IRQCHIP_SKIP_SET_WAKE;
|
||||
|
||||
ct->regs.enable = INTC_MIR_CLEAR0 + 32 * i;
|
||||
ct->regs.disable = INTC_MIR_SET0 + 32 * i;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void __init omap_alloc_gc_legacy(void __iomem *base,
|
||||
unsigned int irq_start, unsigned int num)
|
||||
{
|
||||
struct irq_chip_generic *gc;
|
||||
struct irq_chip_type *ct;
|
||||
|
||||
gc = irq_alloc_generic_chip("INTC", 1, irq_start, base,
|
||||
handle_level_irq);
|
||||
ct = gc->chip_types;
|
||||
ct->chip.irq_ack = omap_mask_ack_irq;
|
||||
ct->chip.irq_mask = irq_gc_mask_disable_reg;
|
||||
ct->chip.irq_unmask = irq_gc_unmask_enable_reg;
|
||||
ct->chip.flags |= IRQCHIP_SKIP_SET_WAKE;
|
||||
|
||||
ct->regs.enable = INTC_MIR_CLEAR0;
|
||||
ct->regs.disable = INTC_MIR_SET0;
|
||||
irq_setup_generic_chip(gc, IRQ_MSK(num), IRQ_GC_INIT_MASK_CACHE,
|
||||
IRQ_NOREQUEST | IRQ_NOPROBE, 0);
|
||||
}
|
||||
|
||||
static int __init omap_init_irq_of(struct device_node *node)
|
||||
{
|
||||
int ret;
|
||||
|
||||
omap_irq_base = of_iomap(node, 0);
|
||||
if (WARN_ON(!omap_irq_base))
|
||||
return -ENOMEM;
|
||||
|
||||
domain = irq_domain_add_linear(node, omap_nr_irqs,
|
||||
&irq_generic_chip_ops, NULL);
|
||||
|
||||
omap_irq_soft_reset();
|
||||
|
||||
ret = omap_alloc_gc_of(domain, omap_irq_base);
|
||||
if (ret < 0)
|
||||
irq_domain_remove(domain);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int __init omap_init_irq_legacy(u32 base, struct device_node *node)
|
||||
{
|
||||
int j, irq_base;
|
||||
|
||||
omap_irq_base = ioremap(base, SZ_4K);
|
||||
if (WARN_ON(!omap_irq_base))
|
||||
return -ENOMEM;
|
||||
|
||||
irq_base = irq_alloc_descs(-1, 0, omap_nr_irqs, 0);
|
||||
if (irq_base < 0) {
|
||||
pr_warn("Couldn't allocate IRQ numbers\n");
|
||||
irq_base = 0;
|
||||
}
|
||||
|
||||
domain = irq_domain_add_legacy(node, omap_nr_irqs, irq_base, 0,
|
||||
&irq_domain_simple_ops, NULL);
|
||||
|
||||
omap_irq_soft_reset();
|
||||
|
||||
for (j = 0; j < omap_nr_irqs; j += 32)
|
||||
omap_alloc_gc_legacy(omap_irq_base + j, j + irq_base, 32);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void __init omap_irq_enable_protection(void)
|
||||
{
|
||||
u32 reg;
|
||||
|
||||
reg = intc_readl(INTC_PROTECTION);
|
||||
reg |= INTC_PROTECTION_ENABLE;
|
||||
intc_writel(INTC_PROTECTION, reg);
|
||||
}
|
||||
|
||||
static int __init omap_init_irq(u32 base, struct device_node *node)
|
||||
{
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* FIXME legacy OMAP DMA driver sitting under arch/arm/plat-omap/dma.c
|
||||
* depends is still not ready for linear IRQ domains; because of that
|
||||
* we need to temporarily "blacklist" OMAP2 and OMAP3 devices from using
|
||||
* linear IRQ Domain until that driver is finally fixed.
|
||||
*/
|
||||
if (of_device_is_compatible(node, "ti,omap2-intc") ||
|
||||
of_device_is_compatible(node, "ti,omap3-intc")) {
|
||||
struct resource res;
|
||||
|
||||
if (of_address_to_resource(node, 0, &res))
|
||||
return -ENOMEM;
|
||||
|
||||
base = res.start;
|
||||
ret = omap_init_irq_legacy(base, node);
|
||||
} else if (node) {
|
||||
ret = omap_init_irq_of(node);
|
||||
} else {
|
||||
ret = omap_init_irq_legacy(base, NULL);
|
||||
}
|
||||
|
||||
if (ret == 0)
|
||||
omap_irq_enable_protection();
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static asmlinkage void __exception_irq_entry
|
||||
omap_intc_handle_irq(struct pt_regs *regs)
|
||||
{
|
||||
u32 irqnr = 0;
|
||||
int handled_irq = 0;
|
||||
int i;
|
||||
|
||||
do {
|
||||
for (i = 0; i < omap_nr_pending; i++) {
|
||||
irqnr = intc_readl(INTC_PENDING_IRQ0 + (0x20 * i));
|
||||
if (irqnr)
|
||||
goto out;
|
||||
}
|
||||
|
||||
out:
|
||||
if (!irqnr)
|
||||
break;
|
||||
|
||||
irqnr = intc_readl(INTC_SIR);
|
||||
irqnr &= ACTIVEIRQ_MASK;
|
||||
|
||||
if (irqnr) {
|
||||
handle_domain_irq(domain, irqnr, regs);
|
||||
handled_irq = 1;
|
||||
}
|
||||
} while (irqnr);
|
||||
|
||||
/*
|
||||
* If an irq is masked or deasserted while active, we will
|
||||
* keep ending up here with no irq handled. So remove it from
|
||||
* the INTC with an ack.
|
||||
*/
|
||||
if (!handled_irq)
|
||||
omap_ack_irq(NULL);
|
||||
}
|
||||
|
||||
void __init omap2_init_irq(void)
|
||||
{
|
||||
omap_nr_irqs = 96;
|
||||
omap_nr_pending = 3;
|
||||
omap_init_irq(OMAP24XX_IC_BASE, NULL);
|
||||
set_handle_irq(omap_intc_handle_irq);
|
||||
}
|
||||
|
||||
void __init omap3_init_irq(void)
|
||||
{
|
||||
omap_nr_irqs = 96;
|
||||
omap_nr_pending = 3;
|
||||
omap_init_irq(OMAP34XX_IC_BASE, NULL);
|
||||
set_handle_irq(omap_intc_handle_irq);
|
||||
}
|
||||
|
||||
void __init ti81xx_init_irq(void)
|
||||
{
|
||||
omap_nr_irqs = 96;
|
||||
omap_nr_pending = 4;
|
||||
omap_init_irq(OMAP34XX_IC_BASE, NULL);
|
||||
set_handle_irq(omap_intc_handle_irq);
|
||||
}
|
||||
|
||||
static int __init intc_of_init(struct device_node *node,
|
||||
struct device_node *parent)
|
||||
{
|
||||
int ret;
|
||||
|
||||
omap_nr_pending = 3;
|
||||
omap_nr_irqs = 96;
|
||||
|
||||
if (WARN_ON(!node))
|
||||
return -ENODEV;
|
||||
|
||||
if (of_device_is_compatible(node, "ti,am33xx-intc")) {
|
||||
omap_nr_irqs = 128;
|
||||
omap_nr_pending = 4;
|
||||
}
|
||||
|
||||
ret = omap_init_irq(-1, of_node_get(node));
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
set_handle_irq(omap_intc_handle_irq);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
IRQCHIP_DECLARE(omap2_intc, "ti,omap2-intc", intc_of_init);
|
||||
IRQCHIP_DECLARE(omap3_intc, "ti,omap3-intc", intc_of_init);
|
||||
IRQCHIP_DECLARE(am33xx_intc, "ti,am33xx-intc", intc_of_init);
|
182
drivers/irqchip/irq-or1k-pic.c
Normal file
182
drivers/irqchip/irq-or1k-pic.c
Normal file
|
@ -0,0 +1,182 @@
|
|||
/*
|
||||
* Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
|
||||
* Copyright (C) 2014 Stefan Kristansson <stefan.kristiansson@saunalahti.fi>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version
|
||||
* 2 of the License, or (at your option) any later version.
|
||||
*/
|
||||
|
||||
#include <linux/irq.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/of_irq.h>
|
||||
#include <linux/of_address.h>
|
||||
|
||||
#include "irqchip.h"
|
||||
|
||||
/* OR1K PIC implementation */
|
||||
|
||||
struct or1k_pic_dev {
|
||||
struct irq_chip chip;
|
||||
irq_flow_handler_t handle;
|
||||
unsigned long flags;
|
||||
};
|
||||
|
||||
/*
|
||||
* We're a couple of cycles faster than the generic implementations with
|
||||
* these 'fast' versions.
|
||||
*/
|
||||
|
||||
static void or1k_pic_mask(struct irq_data *data)
|
||||
{
|
||||
mtspr(SPR_PICMR, mfspr(SPR_PICMR) & ~(1UL << data->hwirq));
|
||||
}
|
||||
|
||||
static void or1k_pic_unmask(struct irq_data *data)
|
||||
{
|
||||
mtspr(SPR_PICMR, mfspr(SPR_PICMR) | (1UL << data->hwirq));
|
||||
}
|
||||
|
||||
static void or1k_pic_ack(struct irq_data *data)
|
||||
{
|
||||
mtspr(SPR_PICSR, (1UL << data->hwirq));
|
||||
}
|
||||
|
||||
static void or1k_pic_mask_ack(struct irq_data *data)
|
||||
{
|
||||
mtspr(SPR_PICMR, mfspr(SPR_PICMR) & ~(1UL << data->hwirq));
|
||||
mtspr(SPR_PICSR, (1UL << data->hwirq));
|
||||
}
|
||||
|
||||
/*
|
||||
* There are two oddities with the OR1200 PIC implementation:
|
||||
* i) LEVEL-triggered interrupts are latched and need to be cleared
|
||||
* ii) the interrupt latch is cleared by writing a 0 to the bit,
|
||||
* as opposed to a 1 as mandated by the spec
|
||||
*/
|
||||
static void or1k_pic_or1200_ack(struct irq_data *data)
|
||||
{
|
||||
mtspr(SPR_PICSR, mfspr(SPR_PICSR) & ~(1UL << data->hwirq));
|
||||
}
|
||||
|
||||
static void or1k_pic_or1200_mask_ack(struct irq_data *data)
|
||||
{
|
||||
mtspr(SPR_PICMR, mfspr(SPR_PICMR) & ~(1UL << data->hwirq));
|
||||
mtspr(SPR_PICSR, mfspr(SPR_PICSR) & ~(1UL << data->hwirq));
|
||||
}
|
||||
|
||||
static struct or1k_pic_dev or1k_pic_level = {
|
||||
.chip = {
|
||||
.name = "or1k-PIC-level",
|
||||
.irq_unmask = or1k_pic_unmask,
|
||||
.irq_mask = or1k_pic_mask,
|
||||
.irq_mask_ack = or1k_pic_mask,
|
||||
},
|
||||
.handle = handle_level_irq,
|
||||
.flags = IRQ_LEVEL | IRQ_NOPROBE,
|
||||
};
|
||||
|
||||
static struct or1k_pic_dev or1k_pic_edge = {
|
||||
.chip = {
|
||||
.name = "or1k-PIC-edge",
|
||||
.irq_unmask = or1k_pic_unmask,
|
||||
.irq_mask = or1k_pic_mask,
|
||||
.irq_ack = or1k_pic_ack,
|
||||
.irq_mask_ack = or1k_pic_mask_ack,
|
||||
},
|
||||
.handle = handle_edge_irq,
|
||||
.flags = IRQ_LEVEL | IRQ_NOPROBE,
|
||||
};
|
||||
|
||||
static struct or1k_pic_dev or1k_pic_or1200 = {
|
||||
.chip = {
|
||||
.name = "or1200-PIC",
|
||||
.irq_unmask = or1k_pic_unmask,
|
||||
.irq_mask = or1k_pic_mask,
|
||||
.irq_ack = or1k_pic_or1200_ack,
|
||||
.irq_mask_ack = or1k_pic_or1200_mask_ack,
|
||||
},
|
||||
.handle = handle_level_irq,
|
||||
.flags = IRQ_LEVEL | IRQ_NOPROBE,
|
||||
};
|
||||
|
||||
static struct irq_domain *root_domain;
|
||||
|
||||
static inline int pic_get_irq(int first)
|
||||
{
|
||||
int hwirq;
|
||||
|
||||
hwirq = ffs(mfspr(SPR_PICSR) >> first);
|
||||
if (!hwirq)
|
||||
return NO_IRQ;
|
||||
else
|
||||
hwirq = hwirq + first - 1;
|
||||
|
||||
return hwirq;
|
||||
}
|
||||
|
||||
static void or1k_pic_handle_irq(struct pt_regs *regs)
|
||||
{
|
||||
int irq = -1;
|
||||
|
||||
while ((irq = pic_get_irq(irq + 1)) != NO_IRQ)
|
||||
handle_domain_irq(root_domain, irq, regs);
|
||||
}
|
||||
|
||||
static int or1k_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw)
|
||||
{
|
||||
struct or1k_pic_dev *pic = d->host_data;
|
||||
|
||||
irq_set_chip_and_handler(irq, &pic->chip, pic->handle);
|
||||
irq_set_status_flags(irq, pic->flags);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct irq_domain_ops or1k_irq_domain_ops = {
|
||||
.xlate = irq_domain_xlate_onecell,
|
||||
.map = or1k_map,
|
||||
};
|
||||
|
||||
/*
|
||||
* This sets up the IRQ domain for the PIC built in to the OpenRISC
|
||||
* 1000 CPU. This is the "root" domain as these are the interrupts
|
||||
* that directly trigger an exception in the CPU.
|
||||
*/
|
||||
static int __init or1k_pic_init(struct device_node *node,
|
||||
struct or1k_pic_dev *pic)
|
||||
{
|
||||
/* Disable all interrupts until explicitly requested */
|
||||
mtspr(SPR_PICMR, (0UL));
|
||||
|
||||
root_domain = irq_domain_add_linear(node, 32, &or1k_irq_domain_ops,
|
||||
pic);
|
||||
|
||||
set_handle_irq(or1k_pic_handle_irq);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __init or1k_pic_or1200_init(struct device_node *node,
|
||||
struct device_node *parent)
|
||||
{
|
||||
return or1k_pic_init(node, &or1k_pic_or1200);
|
||||
}
|
||||
IRQCHIP_DECLARE(or1k_pic_or1200, "opencores,or1200-pic", or1k_pic_or1200_init);
|
||||
IRQCHIP_DECLARE(or1k_pic, "opencores,or1k-pic", or1k_pic_or1200_init);
|
||||
|
||||
static int __init or1k_pic_level_init(struct device_node *node,
|
||||
struct device_node *parent)
|
||||
{
|
||||
return or1k_pic_init(node, &or1k_pic_level);
|
||||
}
|
||||
IRQCHIP_DECLARE(or1k_pic_level, "opencores,or1k-pic-level",
|
||||
or1k_pic_level_init);
|
||||
|
||||
static int __init or1k_pic_edge_init(struct device_node *node,
|
||||
struct device_node *parent)
|
||||
{
|
||||
return or1k_pic_init(node, &or1k_pic_edge);
|
||||
}
|
||||
IRQCHIP_DECLARE(or1k_pic_edge, "opencores,or1k-pic-edge", or1k_pic_edge_init);
|
207
drivers/irqchip/irq-orion.c
Normal file
207
drivers/irqchip/irq-orion.c
Normal file
|
@ -0,0 +1,207 @@
|
|||
/*
|
||||
* Marvell Orion SoCs IRQ chip driver.
|
||||
*
|
||||
* Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
|
||||
*
|
||||
* This file is licensed under the terms of the GNU General Public
|
||||
* License version 2. This program is licensed "as is" without any
|
||||
* warranty of any kind, whether express or implied.
|
||||
*/
|
||||
|
||||
#include <linux/io.h>
|
||||
#include <linux/irq.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/of_address.h>
|
||||
#include <linux/of_irq.h>
|
||||
#include <asm/exception.h>
|
||||
#include <asm/mach/irq.h>
|
||||
|
||||
#include "irqchip.h"
|
||||
|
||||
/*
|
||||
* Orion SoC main interrupt controller
|
||||
*/
|
||||
#define ORION_IRQS_PER_CHIP 32
|
||||
|
||||
#define ORION_IRQ_CAUSE 0x00
|
||||
#define ORION_IRQ_MASK 0x04
|
||||
#define ORION_IRQ_FIQ_MASK 0x08
|
||||
#define ORION_IRQ_ENDP_MASK 0x0c
|
||||
|
||||
static struct irq_domain *orion_irq_domain;
|
||||
|
||||
static void
|
||||
__exception_irq_entry orion_handle_irq(struct pt_regs *regs)
|
||||
{
|
||||
struct irq_domain_chip_generic *dgc = orion_irq_domain->gc;
|
||||
int n, base = 0;
|
||||
|
||||
for (n = 0; n < dgc->num_chips; n++, base += ORION_IRQS_PER_CHIP) {
|
||||
struct irq_chip_generic *gc =
|
||||
irq_get_domain_generic_chip(orion_irq_domain, base);
|
||||
u32 stat = readl_relaxed(gc->reg_base + ORION_IRQ_CAUSE) &
|
||||
gc->mask_cache;
|
||||
while (stat) {
|
||||
u32 hwirq = __fls(stat);
|
||||
handle_domain_irq(orion_irq_domain,
|
||||
gc->irq_base + hwirq, regs);
|
||||
stat &= ~(1 << hwirq);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static int __init orion_irq_init(struct device_node *np,
|
||||
struct device_node *parent)
|
||||
{
|
||||
unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
|
||||
int n, ret, base, num_chips = 0;
|
||||
struct resource r;
|
||||
|
||||
/* count number of irq chips by valid reg addresses */
|
||||
while (of_address_to_resource(np, num_chips, &r) == 0)
|
||||
num_chips++;
|
||||
|
||||
orion_irq_domain = irq_domain_add_linear(np,
|
||||
num_chips * ORION_IRQS_PER_CHIP,
|
||||
&irq_generic_chip_ops, NULL);
|
||||
if (!orion_irq_domain)
|
||||
panic("%s: unable to add irq domain\n", np->name);
|
||||
|
||||
ret = irq_alloc_domain_generic_chips(orion_irq_domain,
|
||||
ORION_IRQS_PER_CHIP, 1, np->name,
|
||||
handle_level_irq, clr, 0,
|
||||
IRQ_GC_INIT_MASK_CACHE);
|
||||
if (ret)
|
||||
panic("%s: unable to alloc irq domain gc\n", np->name);
|
||||
|
||||
for (n = 0, base = 0; n < num_chips; n++, base += ORION_IRQS_PER_CHIP) {
|
||||
struct irq_chip_generic *gc =
|
||||
irq_get_domain_generic_chip(orion_irq_domain, base);
|
||||
|
||||
of_address_to_resource(np, n, &r);
|
||||
|
||||
if (!request_mem_region(r.start, resource_size(&r), np->name))
|
||||
panic("%s: unable to request mem region %d",
|
||||
np->name, n);
|
||||
|
||||
gc->reg_base = ioremap(r.start, resource_size(&r));
|
||||
if (!gc->reg_base)
|
||||
panic("%s: unable to map resource %d", np->name, n);
|
||||
|
||||
gc->chip_types[0].regs.mask = ORION_IRQ_MASK;
|
||||
gc->chip_types[0].chip.irq_mask = irq_gc_mask_clr_bit;
|
||||
gc->chip_types[0].chip.irq_unmask = irq_gc_mask_set_bit;
|
||||
|
||||
/* mask all interrupts */
|
||||
writel(0, gc->reg_base + ORION_IRQ_MASK);
|
||||
}
|
||||
|
||||
set_handle_irq(orion_handle_irq);
|
||||
return 0;
|
||||
}
|
||||
IRQCHIP_DECLARE(orion_intc, "marvell,orion-intc", orion_irq_init);
|
||||
|
||||
/*
|
||||
* Orion SoC bridge interrupt controller
|
||||
*/
|
||||
#define ORION_BRIDGE_IRQ_CAUSE 0x00
|
||||
#define ORION_BRIDGE_IRQ_MASK 0x04
|
||||
|
||||
static void orion_bridge_irq_handler(unsigned int irq, struct irq_desc *desc)
|
||||
{
|
||||
struct irq_domain *d = irq_get_handler_data(irq);
|
||||
|
||||
struct irq_chip_generic *gc = irq_get_domain_generic_chip(d, 0);
|
||||
u32 stat = readl_relaxed(gc->reg_base + ORION_BRIDGE_IRQ_CAUSE) &
|
||||
gc->mask_cache;
|
||||
|
||||
while (stat) {
|
||||
u32 hwirq = __fls(stat);
|
||||
|
||||
generic_handle_irq(irq_find_mapping(d, gc->irq_base + hwirq));
|
||||
stat &= ~(1 << hwirq);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Bridge IRQ_CAUSE is asserted regardless of IRQ_MASK register.
|
||||
* To avoid interrupt events on stale irqs, we clear them before unmask.
|
||||
*/
|
||||
static unsigned int orion_bridge_irq_startup(struct irq_data *d)
|
||||
{
|
||||
struct irq_chip_type *ct = irq_data_get_chip_type(d);
|
||||
|
||||
ct->chip.irq_ack(d);
|
||||
ct->chip.irq_unmask(d);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __init orion_bridge_irq_init(struct device_node *np,
|
||||
struct device_node *parent)
|
||||
{
|
||||
unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
|
||||
struct resource r;
|
||||
struct irq_domain *domain;
|
||||
struct irq_chip_generic *gc;
|
||||
int ret, irq, nrirqs = 32;
|
||||
|
||||
/* get optional number of interrupts provided */
|
||||
of_property_read_u32(np, "marvell,#interrupts", &nrirqs);
|
||||
|
||||
domain = irq_domain_add_linear(np, nrirqs,
|
||||
&irq_generic_chip_ops, NULL);
|
||||
if (!domain) {
|
||||
pr_err("%s: unable to add irq domain\n", np->name);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
ret = irq_alloc_domain_generic_chips(domain, nrirqs, 1, np->name,
|
||||
handle_edge_irq, clr, 0, IRQ_GC_INIT_MASK_CACHE);
|
||||
if (ret) {
|
||||
pr_err("%s: unable to alloc irq domain gc\n", np->name);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = of_address_to_resource(np, 0, &r);
|
||||
if (ret) {
|
||||
pr_err("%s: unable to get resource\n", np->name);
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (!request_mem_region(r.start, resource_size(&r), np->name)) {
|
||||
pr_err("%s: unable to request mem region\n", np->name);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/* Map the parent interrupt for the chained handler */
|
||||
irq = irq_of_parse_and_map(np, 0);
|
||||
if (irq <= 0) {
|
||||
pr_err("%s: unable to parse irq\n", np->name);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
gc = irq_get_domain_generic_chip(domain, 0);
|
||||
gc->reg_base = ioremap(r.start, resource_size(&r));
|
||||
if (!gc->reg_base) {
|
||||
pr_err("%s: unable to map resource\n", np->name);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
gc->chip_types[0].regs.ack = ORION_BRIDGE_IRQ_CAUSE;
|
||||
gc->chip_types[0].regs.mask = ORION_BRIDGE_IRQ_MASK;
|
||||
gc->chip_types[0].chip.irq_startup = orion_bridge_irq_startup;
|
||||
gc->chip_types[0].chip.irq_ack = irq_gc_ack_clr_bit;
|
||||
gc->chip_types[0].chip.irq_mask = irq_gc_mask_clr_bit;
|
||||
gc->chip_types[0].chip.irq_unmask = irq_gc_mask_set_bit;
|
||||
|
||||
/* mask and clear all interrupts */
|
||||
writel(0, gc->reg_base + ORION_BRIDGE_IRQ_MASK);
|
||||
writel(0, gc->reg_base + ORION_BRIDGE_IRQ_CAUSE);
|
||||
|
||||
irq_set_handler_data(irq, domain);
|
||||
irq_set_chained_handler(irq, orion_bridge_irq_handler);
|
||||
|
||||
return 0;
|
||||
}
|
||||
IRQCHIP_DECLARE(orion_bridge_intc,
|
||||
"marvell,orion-bridge-intc", orion_bridge_irq_init);
|
583
drivers/irqchip/irq-renesas-intc-irqpin.c
Normal file
583
drivers/irqchip/irq-renesas-intc-irqpin.c
Normal file
|
@ -0,0 +1,583 @@
|
|||
/*
|
||||
* Renesas INTC External IRQ Pin Driver
|
||||
*
|
||||
* Copyright (C) 2013 Magnus Damm
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; either version 2 of the License
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
||||
*/
|
||||
|
||||
#include <linux/clk.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/ioport.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/irq.h>
|
||||
#include <linux/irqdomain.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/platform_data/irq-renesas-intc-irqpin.h>
|
||||
#include <linux/pm_runtime.h>
|
||||
|
||||
#define INTC_IRQPIN_MAX 8 /* maximum 8 interrupts per driver instance */
|
||||
|
||||
#define INTC_IRQPIN_REG_SENSE 0 /* ICRn */
|
||||
#define INTC_IRQPIN_REG_PRIO 1 /* INTPRInn */
|
||||
#define INTC_IRQPIN_REG_SOURCE 2 /* INTREQnn */
|
||||
#define INTC_IRQPIN_REG_MASK 3 /* INTMSKnn */
|
||||
#define INTC_IRQPIN_REG_CLEAR 4 /* INTMSKCLRnn */
|
||||
#define INTC_IRQPIN_REG_NR 5
|
||||
|
||||
/* INTC external IRQ PIN hardware register access:
|
||||
*
|
||||
* SENSE is read-write 32-bit with 2-bits or 4-bits per IRQ (*)
|
||||
* PRIO is read-write 32-bit with 4-bits per IRQ (**)
|
||||
* SOURCE is read-only 32-bit or 8-bit with 1-bit per IRQ (***)
|
||||
* MASK is write-only 32-bit or 8-bit with 1-bit per IRQ (***)
|
||||
* CLEAR is write-only 32-bit or 8-bit with 1-bit per IRQ (***)
|
||||
*
|
||||
* (*) May be accessed by more than one driver instance - lock needed
|
||||
* (**) Read-modify-write access by one driver instance - lock needed
|
||||
* (***) Accessed by one driver instance only - no locking needed
|
||||
*/
|
||||
|
||||
struct intc_irqpin_iomem {
|
||||
void __iomem *iomem;
|
||||
unsigned long (*read)(void __iomem *iomem);
|
||||
void (*write)(void __iomem *iomem, unsigned long data);
|
||||
int width;
|
||||
};
|
||||
|
||||
struct intc_irqpin_irq {
|
||||
int hw_irq;
|
||||
int requested_irq;
|
||||
int domain_irq;
|
||||
struct intc_irqpin_priv *p;
|
||||
};
|
||||
|
||||
struct intc_irqpin_priv {
|
||||
struct intc_irqpin_iomem iomem[INTC_IRQPIN_REG_NR];
|
||||
struct intc_irqpin_irq irq[INTC_IRQPIN_MAX];
|
||||
struct renesas_intc_irqpin_config config;
|
||||
unsigned int number_of_irqs;
|
||||
struct platform_device *pdev;
|
||||
struct irq_chip irq_chip;
|
||||
struct irq_domain *irq_domain;
|
||||
struct clk *clk;
|
||||
bool shared_irqs;
|
||||
u8 shared_irq_mask;
|
||||
};
|
||||
|
||||
static unsigned long intc_irqpin_read32(void __iomem *iomem)
|
||||
{
|
||||
return ioread32(iomem);
|
||||
}
|
||||
|
||||
static unsigned long intc_irqpin_read8(void __iomem *iomem)
|
||||
{
|
||||
return ioread8(iomem);
|
||||
}
|
||||
|
||||
static void intc_irqpin_write32(void __iomem *iomem, unsigned long data)
|
||||
{
|
||||
iowrite32(data, iomem);
|
||||
}
|
||||
|
||||
static void intc_irqpin_write8(void __iomem *iomem, unsigned long data)
|
||||
{
|
||||
iowrite8(data, iomem);
|
||||
}
|
||||
|
||||
static inline unsigned long intc_irqpin_read(struct intc_irqpin_priv *p,
|
||||
int reg)
|
||||
{
|
||||
struct intc_irqpin_iomem *i = &p->iomem[reg];
|
||||
|
||||
return i->read(i->iomem);
|
||||
}
|
||||
|
||||
static inline void intc_irqpin_write(struct intc_irqpin_priv *p,
|
||||
int reg, unsigned long data)
|
||||
{
|
||||
struct intc_irqpin_iomem *i = &p->iomem[reg];
|
||||
|
||||
i->write(i->iomem, data);
|
||||
}
|
||||
|
||||
static inline unsigned long intc_irqpin_hwirq_mask(struct intc_irqpin_priv *p,
|
||||
int reg, int hw_irq)
|
||||
{
|
||||
return BIT((p->iomem[reg].width - 1) - hw_irq);
|
||||
}
|
||||
|
||||
static inline void intc_irqpin_irq_write_hwirq(struct intc_irqpin_priv *p,
|
||||
int reg, int hw_irq)
|
||||
{
|
||||
intc_irqpin_write(p, reg, intc_irqpin_hwirq_mask(p, reg, hw_irq));
|
||||
}
|
||||
|
||||
static DEFINE_RAW_SPINLOCK(intc_irqpin_lock); /* only used by slow path */
|
||||
|
||||
static void intc_irqpin_read_modify_write(struct intc_irqpin_priv *p,
|
||||
int reg, int shift,
|
||||
int width, int value)
|
||||
{
|
||||
unsigned long flags;
|
||||
unsigned long tmp;
|
||||
|
||||
raw_spin_lock_irqsave(&intc_irqpin_lock, flags);
|
||||
|
||||
tmp = intc_irqpin_read(p, reg);
|
||||
tmp &= ~(((1 << width) - 1) << shift);
|
||||
tmp |= value << shift;
|
||||
intc_irqpin_write(p, reg, tmp);
|
||||
|
||||
raw_spin_unlock_irqrestore(&intc_irqpin_lock, flags);
|
||||
}
|
||||
|
||||
static void intc_irqpin_mask_unmask_prio(struct intc_irqpin_priv *p,
|
||||
int irq, int do_mask)
|
||||
{
|
||||
/* The PRIO register is assumed to be 32-bit with fixed 4-bit fields. */
|
||||
int bitfield_width = 4;
|
||||
int shift = 32 - (irq + 1) * bitfield_width;
|
||||
|
||||
intc_irqpin_read_modify_write(p, INTC_IRQPIN_REG_PRIO,
|
||||
shift, bitfield_width,
|
||||
do_mask ? 0 : (1 << bitfield_width) - 1);
|
||||
}
|
||||
|
||||
static int intc_irqpin_set_sense(struct intc_irqpin_priv *p, int irq, int value)
|
||||
{
|
||||
/* The SENSE register is assumed to be 32-bit. */
|
||||
int bitfield_width = p->config.sense_bitfield_width;
|
||||
int shift = 32 - (irq + 1) * bitfield_width;
|
||||
|
||||
dev_dbg(&p->pdev->dev, "sense irq = %d, mode = %d\n", irq, value);
|
||||
|
||||
if (value >= (1 << bitfield_width))
|
||||
return -EINVAL;
|
||||
|
||||
intc_irqpin_read_modify_write(p, INTC_IRQPIN_REG_SENSE, shift,
|
||||
bitfield_width, value);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void intc_irqpin_dbg(struct intc_irqpin_irq *i, char *str)
|
||||
{
|
||||
dev_dbg(&i->p->pdev->dev, "%s (%d:%d:%d)\n",
|
||||
str, i->requested_irq, i->hw_irq, i->domain_irq);
|
||||
}
|
||||
|
||||
static void intc_irqpin_irq_enable(struct irq_data *d)
|
||||
{
|
||||
struct intc_irqpin_priv *p = irq_data_get_irq_chip_data(d);
|
||||
int hw_irq = irqd_to_hwirq(d);
|
||||
|
||||
intc_irqpin_dbg(&p->irq[hw_irq], "enable");
|
||||
intc_irqpin_irq_write_hwirq(p, INTC_IRQPIN_REG_CLEAR, hw_irq);
|
||||
}
|
||||
|
||||
static void intc_irqpin_irq_disable(struct irq_data *d)
|
||||
{
|
||||
struct intc_irqpin_priv *p = irq_data_get_irq_chip_data(d);
|
||||
int hw_irq = irqd_to_hwirq(d);
|
||||
|
||||
intc_irqpin_dbg(&p->irq[hw_irq], "disable");
|
||||
intc_irqpin_irq_write_hwirq(p, INTC_IRQPIN_REG_MASK, hw_irq);
|
||||
}
|
||||
|
||||
static void intc_irqpin_shared_irq_enable(struct irq_data *d)
|
||||
{
|
||||
struct intc_irqpin_priv *p = irq_data_get_irq_chip_data(d);
|
||||
int hw_irq = irqd_to_hwirq(d);
|
||||
|
||||
intc_irqpin_dbg(&p->irq[hw_irq], "shared enable");
|
||||
intc_irqpin_irq_write_hwirq(p, INTC_IRQPIN_REG_CLEAR, hw_irq);
|
||||
|
||||
p->shared_irq_mask &= ~BIT(hw_irq);
|
||||
}
|
||||
|
||||
static void intc_irqpin_shared_irq_disable(struct irq_data *d)
|
||||
{
|
||||
struct intc_irqpin_priv *p = irq_data_get_irq_chip_data(d);
|
||||
int hw_irq = irqd_to_hwirq(d);
|
||||
|
||||
intc_irqpin_dbg(&p->irq[hw_irq], "shared disable");
|
||||
intc_irqpin_irq_write_hwirq(p, INTC_IRQPIN_REG_MASK, hw_irq);
|
||||
|
||||
p->shared_irq_mask |= BIT(hw_irq);
|
||||
}
|
||||
|
||||
static void intc_irqpin_irq_enable_force(struct irq_data *d)
|
||||
{
|
||||
struct intc_irqpin_priv *p = irq_data_get_irq_chip_data(d);
|
||||
int irq = p->irq[irqd_to_hwirq(d)].requested_irq;
|
||||
|
||||
intc_irqpin_irq_enable(d);
|
||||
|
||||
/* enable interrupt through parent interrupt controller,
|
||||
* assumes non-shared interrupt with 1:1 mapping
|
||||
* needed for busted IRQs on some SoCs like sh73a0
|
||||
*/
|
||||
irq_get_chip(irq)->irq_unmask(irq_get_irq_data(irq));
|
||||
}
|
||||
|
||||
static void intc_irqpin_irq_disable_force(struct irq_data *d)
|
||||
{
|
||||
struct intc_irqpin_priv *p = irq_data_get_irq_chip_data(d);
|
||||
int irq = p->irq[irqd_to_hwirq(d)].requested_irq;
|
||||
|
||||
/* disable interrupt through parent interrupt controller,
|
||||
* assumes non-shared interrupt with 1:1 mapping
|
||||
* needed for busted IRQs on some SoCs like sh73a0
|
||||
*/
|
||||
irq_get_chip(irq)->irq_mask(irq_get_irq_data(irq));
|
||||
intc_irqpin_irq_disable(d);
|
||||
}
|
||||
|
||||
#define INTC_IRQ_SENSE_VALID 0x10
|
||||
#define INTC_IRQ_SENSE(x) (x + INTC_IRQ_SENSE_VALID)
|
||||
|
||||
static unsigned char intc_irqpin_sense[IRQ_TYPE_SENSE_MASK + 1] = {
|
||||
[IRQ_TYPE_EDGE_FALLING] = INTC_IRQ_SENSE(0x00),
|
||||
[IRQ_TYPE_EDGE_RISING] = INTC_IRQ_SENSE(0x01),
|
||||
[IRQ_TYPE_LEVEL_LOW] = INTC_IRQ_SENSE(0x02),
|
||||
[IRQ_TYPE_LEVEL_HIGH] = INTC_IRQ_SENSE(0x03),
|
||||
[IRQ_TYPE_EDGE_BOTH] = INTC_IRQ_SENSE(0x04),
|
||||
};
|
||||
|
||||
static int intc_irqpin_irq_set_type(struct irq_data *d, unsigned int type)
|
||||
{
|
||||
unsigned char value = intc_irqpin_sense[type & IRQ_TYPE_SENSE_MASK];
|
||||
struct intc_irqpin_priv *p = irq_data_get_irq_chip_data(d);
|
||||
|
||||
if (!(value & INTC_IRQ_SENSE_VALID))
|
||||
return -EINVAL;
|
||||
|
||||
return intc_irqpin_set_sense(p, irqd_to_hwirq(d),
|
||||
value ^ INTC_IRQ_SENSE_VALID);
|
||||
}
|
||||
|
||||
static int intc_irqpin_irq_set_wake(struct irq_data *d, unsigned int on)
|
||||
{
|
||||
struct intc_irqpin_priv *p = irq_data_get_irq_chip_data(d);
|
||||
|
||||
if (!p->clk)
|
||||
return 0;
|
||||
|
||||
if (on)
|
||||
clk_enable(p->clk);
|
||||
else
|
||||
clk_disable(p->clk);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static irqreturn_t intc_irqpin_irq_handler(int irq, void *dev_id)
|
||||
{
|
||||
struct intc_irqpin_irq *i = dev_id;
|
||||
struct intc_irqpin_priv *p = i->p;
|
||||
unsigned long bit;
|
||||
|
||||
intc_irqpin_dbg(i, "demux1");
|
||||
bit = intc_irqpin_hwirq_mask(p, INTC_IRQPIN_REG_SOURCE, i->hw_irq);
|
||||
|
||||
if (intc_irqpin_read(p, INTC_IRQPIN_REG_SOURCE) & bit) {
|
||||
intc_irqpin_write(p, INTC_IRQPIN_REG_SOURCE, ~bit);
|
||||
intc_irqpin_dbg(i, "demux2");
|
||||
generic_handle_irq(i->domain_irq);
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
return IRQ_NONE;
|
||||
}
|
||||
|
||||
static irqreturn_t intc_irqpin_shared_irq_handler(int irq, void *dev_id)
|
||||
{
|
||||
struct intc_irqpin_priv *p = dev_id;
|
||||
unsigned int reg_source = intc_irqpin_read(p, INTC_IRQPIN_REG_SOURCE);
|
||||
irqreturn_t status = IRQ_NONE;
|
||||
int k;
|
||||
|
||||
for (k = 0; k < 8; k++) {
|
||||
if (reg_source & BIT(7 - k)) {
|
||||
if (BIT(k) & p->shared_irq_mask)
|
||||
continue;
|
||||
|
||||
status |= intc_irqpin_irq_handler(irq, &p->irq[k]);
|
||||
}
|
||||
}
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
static int intc_irqpin_irq_domain_map(struct irq_domain *h, unsigned int virq,
|
||||
irq_hw_number_t hw)
|
||||
{
|
||||
struct intc_irqpin_priv *p = h->host_data;
|
||||
|
||||
p->irq[hw].domain_irq = virq;
|
||||
p->irq[hw].hw_irq = hw;
|
||||
|
||||
intc_irqpin_dbg(&p->irq[hw], "map");
|
||||
irq_set_chip_data(virq, h->host_data);
|
||||
irq_set_chip_and_handler(virq, &p->irq_chip, handle_level_irq);
|
||||
set_irq_flags(virq, IRQF_VALID); /* kill me now */
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct irq_domain_ops intc_irqpin_irq_domain_ops = {
|
||||
.map = intc_irqpin_irq_domain_map,
|
||||
.xlate = irq_domain_xlate_twocell,
|
||||
};
|
||||
|
||||
static int intc_irqpin_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct device *dev = &pdev->dev;
|
||||
struct renesas_intc_irqpin_config *pdata = dev->platform_data;
|
||||
struct intc_irqpin_priv *p;
|
||||
struct intc_irqpin_iomem *i;
|
||||
struct resource *io[INTC_IRQPIN_REG_NR];
|
||||
struct resource *irq;
|
||||
struct irq_chip *irq_chip;
|
||||
void (*enable_fn)(struct irq_data *d);
|
||||
void (*disable_fn)(struct irq_data *d);
|
||||
const char *name = dev_name(dev);
|
||||
int ref_irq;
|
||||
int ret;
|
||||
int k;
|
||||
|
||||
p = devm_kzalloc(dev, sizeof(*p), GFP_KERNEL);
|
||||
if (!p) {
|
||||
dev_err(dev, "failed to allocate driver data\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/* deal with driver instance configuration */
|
||||
if (pdata) {
|
||||
memcpy(&p->config, pdata, sizeof(*pdata));
|
||||
} else {
|
||||
of_property_read_u32(dev->of_node, "sense-bitfield-width",
|
||||
&p->config.sense_bitfield_width);
|
||||
p->config.control_parent = of_property_read_bool(dev->of_node,
|
||||
"control-parent");
|
||||
}
|
||||
if (!p->config.sense_bitfield_width)
|
||||
p->config.sense_bitfield_width = 4; /* default to 4 bits */
|
||||
|
||||
p->pdev = pdev;
|
||||
platform_set_drvdata(pdev, p);
|
||||
|
||||
p->clk = devm_clk_get(dev, NULL);
|
||||
if (IS_ERR(p->clk)) {
|
||||
dev_warn(dev, "unable to get clock\n");
|
||||
p->clk = NULL;
|
||||
}
|
||||
|
||||
pm_runtime_enable(dev);
|
||||
pm_runtime_get_sync(dev);
|
||||
|
||||
/* get hold of manadatory IOMEM */
|
||||
for (k = 0; k < INTC_IRQPIN_REG_NR; k++) {
|
||||
io[k] = platform_get_resource(pdev, IORESOURCE_MEM, k);
|
||||
if (!io[k]) {
|
||||
dev_err(dev, "not enough IOMEM resources\n");
|
||||
ret = -EINVAL;
|
||||
goto err0;
|
||||
}
|
||||
}
|
||||
|
||||
/* allow any number of IRQs between 1 and INTC_IRQPIN_MAX */
|
||||
for (k = 0; k < INTC_IRQPIN_MAX; k++) {
|
||||
irq = platform_get_resource(pdev, IORESOURCE_IRQ, k);
|
||||
if (!irq)
|
||||
break;
|
||||
|
||||
p->irq[k].p = p;
|
||||
p->irq[k].requested_irq = irq->start;
|
||||
}
|
||||
|
||||
p->number_of_irqs = k;
|
||||
if (p->number_of_irqs < 1) {
|
||||
dev_err(dev, "not enough IRQ resources\n");
|
||||
ret = -EINVAL;
|
||||
goto err0;
|
||||
}
|
||||
|
||||
/* ioremap IOMEM and setup read/write callbacks */
|
||||
for (k = 0; k < INTC_IRQPIN_REG_NR; k++) {
|
||||
i = &p->iomem[k];
|
||||
|
||||
switch (resource_size(io[k])) {
|
||||
case 1:
|
||||
i->width = 8;
|
||||
i->read = intc_irqpin_read8;
|
||||
i->write = intc_irqpin_write8;
|
||||
break;
|
||||
case 4:
|
||||
i->width = 32;
|
||||
i->read = intc_irqpin_read32;
|
||||
i->write = intc_irqpin_write32;
|
||||
break;
|
||||
default:
|
||||
dev_err(dev, "IOMEM size mismatch\n");
|
||||
ret = -EINVAL;
|
||||
goto err0;
|
||||
}
|
||||
|
||||
i->iomem = devm_ioremap_nocache(dev, io[k]->start,
|
||||
resource_size(io[k]));
|
||||
if (!i->iomem) {
|
||||
dev_err(dev, "failed to remap IOMEM\n");
|
||||
ret = -ENXIO;
|
||||
goto err0;
|
||||
}
|
||||
}
|
||||
|
||||
/* mask all interrupts using priority */
|
||||
for (k = 0; k < p->number_of_irqs; k++)
|
||||
intc_irqpin_mask_unmask_prio(p, k, 1);
|
||||
|
||||
/* clear all pending interrupts */
|
||||
intc_irqpin_write(p, INTC_IRQPIN_REG_SOURCE, 0x0);
|
||||
|
||||
/* scan for shared interrupt lines */
|
||||
ref_irq = p->irq[0].requested_irq;
|
||||
p->shared_irqs = true;
|
||||
for (k = 1; k < p->number_of_irqs; k++) {
|
||||
if (ref_irq != p->irq[k].requested_irq) {
|
||||
p->shared_irqs = false;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/* use more severe masking method if requested */
|
||||
if (p->config.control_parent) {
|
||||
enable_fn = intc_irqpin_irq_enable_force;
|
||||
disable_fn = intc_irqpin_irq_disable_force;
|
||||
} else if (!p->shared_irqs) {
|
||||
enable_fn = intc_irqpin_irq_enable;
|
||||
disable_fn = intc_irqpin_irq_disable;
|
||||
} else {
|
||||
enable_fn = intc_irqpin_shared_irq_enable;
|
||||
disable_fn = intc_irqpin_shared_irq_disable;
|
||||
}
|
||||
|
||||
irq_chip = &p->irq_chip;
|
||||
irq_chip->name = name;
|
||||
irq_chip->irq_mask = disable_fn;
|
||||
irq_chip->irq_unmask = enable_fn;
|
||||
irq_chip->irq_set_type = intc_irqpin_irq_set_type;
|
||||
irq_chip->irq_set_wake = intc_irqpin_irq_set_wake;
|
||||
irq_chip->flags = IRQCHIP_MASK_ON_SUSPEND;
|
||||
|
||||
p->irq_domain = irq_domain_add_simple(dev->of_node,
|
||||
p->number_of_irqs,
|
||||
p->config.irq_base,
|
||||
&intc_irqpin_irq_domain_ops, p);
|
||||
if (!p->irq_domain) {
|
||||
ret = -ENXIO;
|
||||
dev_err(dev, "cannot initialize irq domain\n");
|
||||
goto err0;
|
||||
}
|
||||
|
||||
if (p->shared_irqs) {
|
||||
/* request one shared interrupt */
|
||||
if (devm_request_irq(dev, p->irq[0].requested_irq,
|
||||
intc_irqpin_shared_irq_handler,
|
||||
IRQF_SHARED, name, p)) {
|
||||
dev_err(dev, "failed to request low IRQ\n");
|
||||
ret = -ENOENT;
|
||||
goto err1;
|
||||
}
|
||||
} else {
|
||||
/* request interrupts one by one */
|
||||
for (k = 0; k < p->number_of_irqs; k++) {
|
||||
if (devm_request_irq(dev, p->irq[k].requested_irq,
|
||||
intc_irqpin_irq_handler, 0, name,
|
||||
&p->irq[k])) {
|
||||
dev_err(dev, "failed to request low IRQ\n");
|
||||
ret = -ENOENT;
|
||||
goto err1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* unmask all interrupts on prio level */
|
||||
for (k = 0; k < p->number_of_irqs; k++)
|
||||
intc_irqpin_mask_unmask_prio(p, k, 0);
|
||||
|
||||
dev_info(dev, "driving %d irqs\n", p->number_of_irqs);
|
||||
|
||||
/* warn in case of mismatch if irq base is specified */
|
||||
if (p->config.irq_base) {
|
||||
if (p->config.irq_base != p->irq[0].domain_irq)
|
||||
dev_warn(dev, "irq base mismatch (%d/%d)\n",
|
||||
p->config.irq_base, p->irq[0].domain_irq);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
err1:
|
||||
irq_domain_remove(p->irq_domain);
|
||||
err0:
|
||||
pm_runtime_put(dev);
|
||||
pm_runtime_disable(dev);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int intc_irqpin_remove(struct platform_device *pdev)
|
||||
{
|
||||
struct intc_irqpin_priv *p = platform_get_drvdata(pdev);
|
||||
|
||||
irq_domain_remove(p->irq_domain);
|
||||
pm_runtime_put(&pdev->dev);
|
||||
pm_runtime_disable(&pdev->dev);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct of_device_id intc_irqpin_dt_ids[] = {
|
||||
{ .compatible = "renesas,intc-irqpin", },
|
||||
{},
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, intc_irqpin_dt_ids);
|
||||
|
||||
static struct platform_driver intc_irqpin_device_driver = {
|
||||
.probe = intc_irqpin_probe,
|
||||
.remove = intc_irqpin_remove,
|
||||
.driver = {
|
||||
.name = "renesas_intc_irqpin",
|
||||
.of_match_table = intc_irqpin_dt_ids,
|
||||
.owner = THIS_MODULE,
|
||||
}
|
||||
};
|
||||
|
||||
static int __init intc_irqpin_init(void)
|
||||
{
|
||||
return platform_driver_register(&intc_irqpin_device_driver);
|
||||
}
|
||||
postcore_initcall(intc_irqpin_init);
|
||||
|
||||
static void __exit intc_irqpin_exit(void)
|
||||
{
|
||||
platform_driver_unregister(&intc_irqpin_device_driver);
|
||||
}
|
||||
module_exit(intc_irqpin_exit);
|
||||
|
||||
MODULE_AUTHOR("Magnus Damm");
|
||||
MODULE_DESCRIPTION("Renesas INTC External IRQ Pin Driver");
|
||||
MODULE_LICENSE("GPL v2");
|
302
drivers/irqchip/irq-renesas-irqc.c
Normal file
302
drivers/irqchip/irq-renesas-irqc.c
Normal file
|
@ -0,0 +1,302 @@
|
|||
/*
|
||||
* Renesas IRQC Driver
|
||||
*
|
||||
* Copyright (C) 2013 Magnus Damm
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; either version 2 of the License
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
||||
*/
|
||||
|
||||
#include <linux/init.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/ioport.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/irq.h>
|
||||
#include <linux/irqdomain.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/platform_data/irq-renesas-irqc.h>
|
||||
|
||||
#define IRQC_IRQ_MAX 32 /* maximum 32 interrupts per driver instance */
|
||||
|
||||
#define IRQC_REQ_STS 0x00
|
||||
#define IRQC_EN_STS 0x04
|
||||
#define IRQC_EN_SET 0x08
|
||||
#define IRQC_INT_CPU_BASE(n) (0x000 + ((n) * 0x10))
|
||||
#define DETECT_STATUS 0x100
|
||||
#define IRQC_CONFIG(n) (0x180 + ((n) * 0x04))
|
||||
|
||||
struct irqc_irq {
|
||||
int hw_irq;
|
||||
int requested_irq;
|
||||
int domain_irq;
|
||||
struct irqc_priv *p;
|
||||
};
|
||||
|
||||
struct irqc_priv {
|
||||
void __iomem *iomem;
|
||||
void __iomem *cpu_int_base;
|
||||
struct irqc_irq irq[IRQC_IRQ_MAX];
|
||||
struct renesas_irqc_config config;
|
||||
unsigned int number_of_irqs;
|
||||
struct platform_device *pdev;
|
||||
struct irq_chip irq_chip;
|
||||
struct irq_domain *irq_domain;
|
||||
};
|
||||
|
||||
static void irqc_dbg(struct irqc_irq *i, char *str)
|
||||
{
|
||||
dev_dbg(&i->p->pdev->dev, "%s (%d:%d:%d)\n",
|
||||
str, i->requested_irq, i->hw_irq, i->domain_irq);
|
||||
}
|
||||
|
||||
static void irqc_irq_enable(struct irq_data *d)
|
||||
{
|
||||
struct irqc_priv *p = irq_data_get_irq_chip_data(d);
|
||||
int hw_irq = irqd_to_hwirq(d);
|
||||
|
||||
irqc_dbg(&p->irq[hw_irq], "enable");
|
||||
iowrite32(BIT(hw_irq), p->cpu_int_base + IRQC_EN_SET);
|
||||
}
|
||||
|
||||
static void irqc_irq_disable(struct irq_data *d)
|
||||
{
|
||||
struct irqc_priv *p = irq_data_get_irq_chip_data(d);
|
||||
int hw_irq = irqd_to_hwirq(d);
|
||||
|
||||
irqc_dbg(&p->irq[hw_irq], "disable");
|
||||
iowrite32(BIT(hw_irq), p->cpu_int_base + IRQC_EN_STS);
|
||||
}
|
||||
|
||||
static unsigned char irqc_sense[IRQ_TYPE_SENSE_MASK + 1] = {
|
||||
[IRQ_TYPE_LEVEL_LOW] = 0x01,
|
||||
[IRQ_TYPE_LEVEL_HIGH] = 0x02,
|
||||
[IRQ_TYPE_EDGE_FALLING] = 0x04, /* Synchronous */
|
||||
[IRQ_TYPE_EDGE_RISING] = 0x08, /* Synchronous */
|
||||
[IRQ_TYPE_EDGE_BOTH] = 0x0c, /* Synchronous */
|
||||
};
|
||||
|
||||
static int irqc_irq_set_type(struct irq_data *d, unsigned int type)
|
||||
{
|
||||
struct irqc_priv *p = irq_data_get_irq_chip_data(d);
|
||||
int hw_irq = irqd_to_hwirq(d);
|
||||
unsigned char value = irqc_sense[type & IRQ_TYPE_SENSE_MASK];
|
||||
unsigned long tmp;
|
||||
|
||||
irqc_dbg(&p->irq[hw_irq], "sense");
|
||||
|
||||
if (!value)
|
||||
return -EINVAL;
|
||||
|
||||
tmp = ioread32(p->iomem + IRQC_CONFIG(hw_irq));
|
||||
tmp &= ~0x3f;
|
||||
tmp |= value;
|
||||
iowrite32(tmp, p->iomem + IRQC_CONFIG(hw_irq));
|
||||
return 0;
|
||||
}
|
||||
|
||||
static irqreturn_t irqc_irq_handler(int irq, void *dev_id)
|
||||
{
|
||||
struct irqc_irq *i = dev_id;
|
||||
struct irqc_priv *p = i->p;
|
||||
unsigned long bit = BIT(i->hw_irq);
|
||||
|
||||
irqc_dbg(i, "demux1");
|
||||
|
||||
if (ioread32(p->iomem + DETECT_STATUS) & bit) {
|
||||
iowrite32(bit, p->iomem + DETECT_STATUS);
|
||||
irqc_dbg(i, "demux2");
|
||||
generic_handle_irq(i->domain_irq);
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
return IRQ_NONE;
|
||||
}
|
||||
|
||||
static int irqc_irq_domain_map(struct irq_domain *h, unsigned int virq,
|
||||
irq_hw_number_t hw)
|
||||
{
|
||||
struct irqc_priv *p = h->host_data;
|
||||
|
||||
p->irq[hw].domain_irq = virq;
|
||||
p->irq[hw].hw_irq = hw;
|
||||
|
||||
irqc_dbg(&p->irq[hw], "map");
|
||||
irq_set_chip_data(virq, h->host_data);
|
||||
irq_set_chip_and_handler(virq, &p->irq_chip, handle_level_irq);
|
||||
set_irq_flags(virq, IRQF_VALID); /* kill me now */
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct irq_domain_ops irqc_irq_domain_ops = {
|
||||
.map = irqc_irq_domain_map,
|
||||
.xlate = irq_domain_xlate_twocell,
|
||||
};
|
||||
|
||||
static int irqc_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct renesas_irqc_config *pdata = pdev->dev.platform_data;
|
||||
struct irqc_priv *p;
|
||||
struct resource *io;
|
||||
struct resource *irq;
|
||||
struct irq_chip *irq_chip;
|
||||
const char *name = dev_name(&pdev->dev);
|
||||
int ret;
|
||||
int k;
|
||||
|
||||
p = kzalloc(sizeof(*p), GFP_KERNEL);
|
||||
if (!p) {
|
||||
dev_err(&pdev->dev, "failed to allocate driver data\n");
|
||||
ret = -ENOMEM;
|
||||
goto err0;
|
||||
}
|
||||
|
||||
/* deal with driver instance configuration */
|
||||
if (pdata)
|
||||
memcpy(&p->config, pdata, sizeof(*pdata));
|
||||
|
||||
p->pdev = pdev;
|
||||
platform_set_drvdata(pdev, p);
|
||||
|
||||
/* get hold of manadatory IOMEM */
|
||||
io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
if (!io) {
|
||||
dev_err(&pdev->dev, "not enough IOMEM resources\n");
|
||||
ret = -EINVAL;
|
||||
goto err1;
|
||||
}
|
||||
|
||||
/* allow any number of IRQs between 1 and IRQC_IRQ_MAX */
|
||||
for (k = 0; k < IRQC_IRQ_MAX; k++) {
|
||||
irq = platform_get_resource(pdev, IORESOURCE_IRQ, k);
|
||||
if (!irq)
|
||||
break;
|
||||
|
||||
p->irq[k].p = p;
|
||||
p->irq[k].requested_irq = irq->start;
|
||||
}
|
||||
|
||||
p->number_of_irqs = k;
|
||||
if (p->number_of_irqs < 1) {
|
||||
dev_err(&pdev->dev, "not enough IRQ resources\n");
|
||||
ret = -EINVAL;
|
||||
goto err1;
|
||||
}
|
||||
|
||||
/* ioremap IOMEM and setup read/write callbacks */
|
||||
p->iomem = ioremap_nocache(io->start, resource_size(io));
|
||||
if (!p->iomem) {
|
||||
dev_err(&pdev->dev, "failed to remap IOMEM\n");
|
||||
ret = -ENXIO;
|
||||
goto err2;
|
||||
}
|
||||
|
||||
p->cpu_int_base = p->iomem + IRQC_INT_CPU_BASE(0); /* SYS-SPI */
|
||||
|
||||
irq_chip = &p->irq_chip;
|
||||
irq_chip->name = name;
|
||||
irq_chip->irq_mask = irqc_irq_disable;
|
||||
irq_chip->irq_unmask = irqc_irq_enable;
|
||||
irq_chip->irq_set_type = irqc_irq_set_type;
|
||||
irq_chip->flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_MASK_ON_SUSPEND;
|
||||
|
||||
p->irq_domain = irq_domain_add_simple(pdev->dev.of_node,
|
||||
p->number_of_irqs,
|
||||
p->config.irq_base,
|
||||
&irqc_irq_domain_ops, p);
|
||||
if (!p->irq_domain) {
|
||||
ret = -ENXIO;
|
||||
dev_err(&pdev->dev, "cannot initialize irq domain\n");
|
||||
goto err2;
|
||||
}
|
||||
|
||||
/* request interrupts one by one */
|
||||
for (k = 0; k < p->number_of_irqs; k++) {
|
||||
if (request_irq(p->irq[k].requested_irq, irqc_irq_handler,
|
||||
0, name, &p->irq[k])) {
|
||||
dev_err(&pdev->dev, "failed to request IRQ\n");
|
||||
ret = -ENOENT;
|
||||
goto err3;
|
||||
}
|
||||
}
|
||||
|
||||
dev_info(&pdev->dev, "driving %d irqs\n", p->number_of_irqs);
|
||||
|
||||
/* warn in case of mismatch if irq base is specified */
|
||||
if (p->config.irq_base) {
|
||||
if (p->config.irq_base != p->irq[0].domain_irq)
|
||||
dev_warn(&pdev->dev, "irq base mismatch (%d/%d)\n",
|
||||
p->config.irq_base, p->irq[0].domain_irq);
|
||||
}
|
||||
|
||||
return 0;
|
||||
err3:
|
||||
while (--k >= 0)
|
||||
free_irq(p->irq[k].requested_irq, &p->irq[k]);
|
||||
|
||||
irq_domain_remove(p->irq_domain);
|
||||
err2:
|
||||
iounmap(p->iomem);
|
||||
err1:
|
||||
kfree(p);
|
||||
err0:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int irqc_remove(struct platform_device *pdev)
|
||||
{
|
||||
struct irqc_priv *p = platform_get_drvdata(pdev);
|
||||
int k;
|
||||
|
||||
for (k = 0; k < p->number_of_irqs; k++)
|
||||
free_irq(p->irq[k].requested_irq, &p->irq[k]);
|
||||
|
||||
irq_domain_remove(p->irq_domain);
|
||||
iounmap(p->iomem);
|
||||
kfree(p);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct of_device_id irqc_dt_ids[] = {
|
||||
{ .compatible = "renesas,irqc", },
|
||||
{},
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, irqc_dt_ids);
|
||||
|
||||
static struct platform_driver irqc_device_driver = {
|
||||
.probe = irqc_probe,
|
||||
.remove = irqc_remove,
|
||||
.driver = {
|
||||
.name = "renesas_irqc",
|
||||
.of_match_table = irqc_dt_ids,
|
||||
.owner = THIS_MODULE,
|
||||
}
|
||||
};
|
||||
|
||||
static int __init irqc_init(void)
|
||||
{
|
||||
return platform_driver_register(&irqc_device_driver);
|
||||
}
|
||||
postcore_initcall(irqc_init);
|
||||
|
||||
static void __exit irqc_exit(void)
|
||||
{
|
||||
platform_driver_unregister(&irqc_device_driver);
|
||||
}
|
||||
module_exit(irqc_exit);
|
||||
|
||||
MODULE_AUTHOR("Magnus Damm");
|
||||
MODULE_DESCRIPTION("Renesas IRQC Driver");
|
||||
MODULE_LICENSE("GPL v2");
|
1352
drivers/irqchip/irq-s3c24xx.c
Normal file
1352
drivers/irqchip/irq-s3c24xx.c
Normal file
File diff suppressed because it is too large
Load diff
128
drivers/irqchip/irq-sirfsoc.c
Normal file
128
drivers/irqchip/irq-sirfsoc.c
Normal file
|
@ -0,0 +1,128 @@
|
|||
/*
|
||||
* interrupt controller support for CSR SiRFprimaII
|
||||
*
|
||||
* Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company.
|
||||
*
|
||||
* Licensed under GPLv2 or later.
|
||||
*/
|
||||
|
||||
#include <linux/init.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/irq.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/of_address.h>
|
||||
#include <linux/irqdomain.h>
|
||||
#include <linux/syscore_ops.h>
|
||||
#include <asm/mach/irq.h>
|
||||
#include <asm/exception.h>
|
||||
#include "irqchip.h"
|
||||
|
||||
#define SIRFSOC_INT_RISC_MASK0 0x0018
|
||||
#define SIRFSOC_INT_RISC_MASK1 0x001C
|
||||
#define SIRFSOC_INT_RISC_LEVEL0 0x0020
|
||||
#define SIRFSOC_INT_RISC_LEVEL1 0x0024
|
||||
#define SIRFSOC_INIT_IRQ_ID 0x0038
|
||||
|
||||
#define SIRFSOC_NUM_IRQS 64
|
||||
|
||||
static struct irq_domain *sirfsoc_irqdomain;
|
||||
|
||||
static __init void
|
||||
sirfsoc_alloc_gc(void __iomem *base, unsigned int irq_start, unsigned int num)
|
||||
{
|
||||
struct irq_chip_generic *gc;
|
||||
struct irq_chip_type *ct;
|
||||
int ret;
|
||||
unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
|
||||
unsigned int set = IRQ_LEVEL;
|
||||
|
||||
ret = irq_alloc_domain_generic_chips(sirfsoc_irqdomain, num, 1, "irq_sirfsoc",
|
||||
handle_level_irq, clr, set, IRQ_GC_INIT_MASK_CACHE);
|
||||
|
||||
gc = irq_get_domain_generic_chip(sirfsoc_irqdomain, irq_start);
|
||||
gc->reg_base = base;
|
||||
ct = gc->chip_types;
|
||||
ct->chip.irq_mask = irq_gc_mask_clr_bit;
|
||||
ct->chip.irq_unmask = irq_gc_mask_set_bit;
|
||||
ct->regs.mask = SIRFSOC_INT_RISC_MASK0;
|
||||
}
|
||||
|
||||
static void __exception_irq_entry sirfsoc_handle_irq(struct pt_regs *regs)
|
||||
{
|
||||
void __iomem *base = sirfsoc_irqdomain->host_data;
|
||||
u32 irqstat;
|
||||
|
||||
irqstat = readl_relaxed(base + SIRFSOC_INIT_IRQ_ID);
|
||||
handle_domain_irq(sirfsoc_irqdomain, irqstat & 0xff, regs);
|
||||
}
|
||||
|
||||
static int __init sirfsoc_irq_init(struct device_node *np,
|
||||
struct device_node *parent)
|
||||
{
|
||||
void __iomem *base = of_iomap(np, 0);
|
||||
if (!base)
|
||||
panic("unable to map intc cpu registers\n");
|
||||
|
||||
sirfsoc_irqdomain = irq_domain_add_linear(np, SIRFSOC_NUM_IRQS,
|
||||
&irq_generic_chip_ops, base);
|
||||
|
||||
sirfsoc_alloc_gc(base, 0, 32);
|
||||
sirfsoc_alloc_gc(base + 4, 32, SIRFSOC_NUM_IRQS - 32);
|
||||
|
||||
writel_relaxed(0, base + SIRFSOC_INT_RISC_LEVEL0);
|
||||
writel_relaxed(0, base + SIRFSOC_INT_RISC_LEVEL1);
|
||||
|
||||
writel_relaxed(0, base + SIRFSOC_INT_RISC_MASK0);
|
||||
writel_relaxed(0, base + SIRFSOC_INT_RISC_MASK1);
|
||||
|
||||
set_handle_irq(sirfsoc_handle_irq);
|
||||
|
||||
return 0;
|
||||
}
|
||||
IRQCHIP_DECLARE(sirfsoc_intc, "sirf,prima2-intc", sirfsoc_irq_init);
|
||||
|
||||
struct sirfsoc_irq_status {
|
||||
u32 mask0;
|
||||
u32 mask1;
|
||||
u32 level0;
|
||||
u32 level1;
|
||||
};
|
||||
|
||||
static struct sirfsoc_irq_status sirfsoc_irq_st;
|
||||
|
||||
static int sirfsoc_irq_suspend(void)
|
||||
{
|
||||
void __iomem *base = sirfsoc_irqdomain->host_data;
|
||||
|
||||
sirfsoc_irq_st.mask0 = readl_relaxed(base + SIRFSOC_INT_RISC_MASK0);
|
||||
sirfsoc_irq_st.mask1 = readl_relaxed(base + SIRFSOC_INT_RISC_MASK1);
|
||||
sirfsoc_irq_st.level0 = readl_relaxed(base + SIRFSOC_INT_RISC_LEVEL0);
|
||||
sirfsoc_irq_st.level1 = readl_relaxed(base + SIRFSOC_INT_RISC_LEVEL1);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void sirfsoc_irq_resume(void)
|
||||
{
|
||||
void __iomem *base = sirfsoc_irqdomain->host_data;
|
||||
|
||||
writel_relaxed(sirfsoc_irq_st.mask0, base + SIRFSOC_INT_RISC_MASK0);
|
||||
writel_relaxed(sirfsoc_irq_st.mask1, base + SIRFSOC_INT_RISC_MASK1);
|
||||
writel_relaxed(sirfsoc_irq_st.level0, base + SIRFSOC_INT_RISC_LEVEL0);
|
||||
writel_relaxed(sirfsoc_irq_st.level1, base + SIRFSOC_INT_RISC_LEVEL1);
|
||||
}
|
||||
|
||||
static struct syscore_ops sirfsoc_irq_syscore_ops = {
|
||||
.suspend = sirfsoc_irq_suspend,
|
||||
.resume = sirfsoc_irq_resume,
|
||||
};
|
||||
|
||||
static int __init sirfsoc_irq_pm_init(void)
|
||||
{
|
||||
if (!sirfsoc_irqdomain)
|
||||
return 0;
|
||||
|
||||
register_syscore_ops(&sirfsoc_irq_syscore_ops);
|
||||
return 0;
|
||||
}
|
||||
device_initcall(sirfsoc_irq_pm_init);
|
160
drivers/irqchip/irq-sun4i.c
Normal file
160
drivers/irqchip/irq-sun4i.c
Normal file
|
@ -0,0 +1,160 @@
|
|||
/*
|
||||
* Allwinner A1X SoCs IRQ chip driver.
|
||||
*
|
||||
* Copyright (C) 2012 Maxime Ripard
|
||||
*
|
||||
* Maxime Ripard <maxime.ripard@free-electrons.com>
|
||||
*
|
||||
* Based on code from
|
||||
* Allwinner Technology Co., Ltd. <www.allwinnertech.com>
|
||||
* Benn Huang <benn@allwinnertech.com>
|
||||
*
|
||||
* This file is licensed under the terms of the GNU General Public
|
||||
* License version 2. This program is licensed "as is" without any
|
||||
* warranty of any kind, whether express or implied.
|
||||
*/
|
||||
|
||||
#include <linux/io.h>
|
||||
#include <linux/irq.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/of_address.h>
|
||||
#include <linux/of_irq.h>
|
||||
|
||||
#include <asm/exception.h>
|
||||
#include <asm/mach/irq.h>
|
||||
|
||||
#include "irqchip.h"
|
||||
|
||||
#define SUN4I_IRQ_VECTOR_REG 0x00
|
||||
#define SUN4I_IRQ_PROTECTION_REG 0x08
|
||||
#define SUN4I_IRQ_NMI_CTRL_REG 0x0c
|
||||
#define SUN4I_IRQ_PENDING_REG(x) (0x10 + 0x4 * x)
|
||||
#define SUN4I_IRQ_FIQ_PENDING_REG(x) (0x20 + 0x4 * x)
|
||||
#define SUN4I_IRQ_ENABLE_REG(x) (0x40 + 0x4 * x)
|
||||
#define SUN4I_IRQ_MASK_REG(x) (0x50 + 0x4 * x)
|
||||
|
||||
static void __iomem *sun4i_irq_base;
|
||||
static struct irq_domain *sun4i_irq_domain;
|
||||
|
||||
static void __exception_irq_entry sun4i_handle_irq(struct pt_regs *regs);
|
||||
|
||||
static void sun4i_irq_ack(struct irq_data *irqd)
|
||||
{
|
||||
unsigned int irq = irqd_to_hwirq(irqd);
|
||||
|
||||
if (irq != 0)
|
||||
return; /* Only IRQ 0 / the ENMI needs to be acked */
|
||||
|
||||
writel(BIT(0), sun4i_irq_base + SUN4I_IRQ_PENDING_REG(0));
|
||||
}
|
||||
|
||||
static void sun4i_irq_mask(struct irq_data *irqd)
|
||||
{
|
||||
unsigned int irq = irqd_to_hwirq(irqd);
|
||||
unsigned int irq_off = irq % 32;
|
||||
int reg = irq / 32;
|
||||
u32 val;
|
||||
|
||||
val = readl(sun4i_irq_base + SUN4I_IRQ_ENABLE_REG(reg));
|
||||
writel(val & ~(1 << irq_off),
|
||||
sun4i_irq_base + SUN4I_IRQ_ENABLE_REG(reg));
|
||||
}
|
||||
|
||||
static void sun4i_irq_unmask(struct irq_data *irqd)
|
||||
{
|
||||
unsigned int irq = irqd_to_hwirq(irqd);
|
||||
unsigned int irq_off = irq % 32;
|
||||
int reg = irq / 32;
|
||||
u32 val;
|
||||
|
||||
val = readl(sun4i_irq_base + SUN4I_IRQ_ENABLE_REG(reg));
|
||||
writel(val | (1 << irq_off),
|
||||
sun4i_irq_base + SUN4I_IRQ_ENABLE_REG(reg));
|
||||
}
|
||||
|
||||
static struct irq_chip sun4i_irq_chip = {
|
||||
.name = "sun4i_irq",
|
||||
.irq_eoi = sun4i_irq_ack,
|
||||
.irq_mask = sun4i_irq_mask,
|
||||
.irq_unmask = sun4i_irq_unmask,
|
||||
.flags = IRQCHIP_EOI_THREADED | IRQCHIP_EOI_IF_HANDLED,
|
||||
};
|
||||
|
||||
static int sun4i_irq_map(struct irq_domain *d, unsigned int virq,
|
||||
irq_hw_number_t hw)
|
||||
{
|
||||
irq_set_chip_and_handler(virq, &sun4i_irq_chip, handle_fasteoi_irq);
|
||||
set_irq_flags(virq, IRQF_VALID | IRQF_PROBE);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct irq_domain_ops sun4i_irq_ops = {
|
||||
.map = sun4i_irq_map,
|
||||
.xlate = irq_domain_xlate_onecell,
|
||||
};
|
||||
|
||||
static int __init sun4i_of_init(struct device_node *node,
|
||||
struct device_node *parent)
|
||||
{
|
||||
sun4i_irq_base = of_iomap(node, 0);
|
||||
if (!sun4i_irq_base)
|
||||
panic("%s: unable to map IC registers\n",
|
||||
node->full_name);
|
||||
|
||||
/* Disable all interrupts */
|
||||
writel(0, sun4i_irq_base + SUN4I_IRQ_ENABLE_REG(0));
|
||||
writel(0, sun4i_irq_base + SUN4I_IRQ_ENABLE_REG(1));
|
||||
writel(0, sun4i_irq_base + SUN4I_IRQ_ENABLE_REG(2));
|
||||
|
||||
/* Unmask all the interrupts, ENABLE_REG(x) is used for masking */
|
||||
writel(0, sun4i_irq_base + SUN4I_IRQ_MASK_REG(0));
|
||||
writel(0, sun4i_irq_base + SUN4I_IRQ_MASK_REG(1));
|
||||
writel(0, sun4i_irq_base + SUN4I_IRQ_MASK_REG(2));
|
||||
|
||||
/* Clear all the pending interrupts */
|
||||
writel(0xffffffff, sun4i_irq_base + SUN4I_IRQ_PENDING_REG(0));
|
||||
writel(0xffffffff, sun4i_irq_base + SUN4I_IRQ_PENDING_REG(1));
|
||||
writel(0xffffffff, sun4i_irq_base + SUN4I_IRQ_PENDING_REG(2));
|
||||
|
||||
/* Enable protection mode */
|
||||
writel(0x01, sun4i_irq_base + SUN4I_IRQ_PROTECTION_REG);
|
||||
|
||||
/* Configure the external interrupt source type */
|
||||
writel(0x00, sun4i_irq_base + SUN4I_IRQ_NMI_CTRL_REG);
|
||||
|
||||
sun4i_irq_domain = irq_domain_add_linear(node, 3 * 32,
|
||||
&sun4i_irq_ops, NULL);
|
||||
if (!sun4i_irq_domain)
|
||||
panic("%s: unable to create IRQ domain\n", node->full_name);
|
||||
|
||||
set_handle_irq(sun4i_handle_irq);
|
||||
|
||||
return 0;
|
||||
}
|
||||
IRQCHIP_DECLARE(allwinner_sun4i_ic, "allwinner,sun4i-a10-ic", sun4i_of_init);
|
||||
|
||||
static void __exception_irq_entry sun4i_handle_irq(struct pt_regs *regs)
|
||||
{
|
||||
u32 hwirq;
|
||||
|
||||
/*
|
||||
* hwirq == 0 can mean one of 3 things:
|
||||
* 1) no more irqs pending
|
||||
* 2) irq 0 pending
|
||||
* 3) spurious irq
|
||||
* So if we immediately get a reading of 0, check the irq-pending reg
|
||||
* to differentiate between 2 and 3. We only do this once to avoid
|
||||
* the extra check in the common case of 1 hapening after having
|
||||
* read the vector-reg once.
|
||||
*/
|
||||
hwirq = readl(sun4i_irq_base + SUN4I_IRQ_VECTOR_REG) >> 2;
|
||||
if (hwirq == 0 &&
|
||||
!(readl(sun4i_irq_base + SUN4I_IRQ_PENDING_REG(0)) & BIT(0)))
|
||||
return;
|
||||
|
||||
do {
|
||||
handle_domain_irq(sun4i_irq_domain, hwirq, regs);
|
||||
hwirq = readl(sun4i_irq_base + SUN4I_IRQ_VECTOR_REG) >> 2;
|
||||
} while (hwirq != 0);
|
||||
}
|
208
drivers/irqchip/irq-sunxi-nmi.c
Normal file
208
drivers/irqchip/irq-sunxi-nmi.c
Normal file
|
@ -0,0 +1,208 @@
|
|||
/*
|
||||
* Allwinner A20/A31 SoCs NMI IRQ chip driver.
|
||||
*
|
||||
* Carlo Caione <carlo.caione@gmail.com>
|
||||
*
|
||||
* This file is licensed under the terms of the GNU General Public
|
||||
* License version 2. This program is licensed "as is" without any
|
||||
* warranty of any kind, whether express or implied.
|
||||
*/
|
||||
|
||||
#include <linux/bitops.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/irq.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/irqdomain.h>
|
||||
#include <linux/of_irq.h>
|
||||
#include <linux/of_address.h>
|
||||
#include <linux/of_platform.h>
|
||||
#include <linux/irqchip/chained_irq.h>
|
||||
#include "irqchip.h"
|
||||
|
||||
#define SUNXI_NMI_SRC_TYPE_MASK 0x00000003
|
||||
|
||||
enum {
|
||||
SUNXI_SRC_TYPE_LEVEL_LOW = 0,
|
||||
SUNXI_SRC_TYPE_EDGE_FALLING,
|
||||
SUNXI_SRC_TYPE_LEVEL_HIGH,
|
||||
SUNXI_SRC_TYPE_EDGE_RISING,
|
||||
};
|
||||
|
||||
struct sunxi_sc_nmi_reg_offs {
|
||||
u32 ctrl;
|
||||
u32 pend;
|
||||
u32 enable;
|
||||
};
|
||||
|
||||
static struct sunxi_sc_nmi_reg_offs sun7i_reg_offs = {
|
||||
.ctrl = 0x00,
|
||||
.pend = 0x04,
|
||||
.enable = 0x08,
|
||||
};
|
||||
|
||||
static struct sunxi_sc_nmi_reg_offs sun6i_reg_offs = {
|
||||
.ctrl = 0x00,
|
||||
.pend = 0x04,
|
||||
.enable = 0x34,
|
||||
};
|
||||
|
||||
static inline void sunxi_sc_nmi_write(struct irq_chip_generic *gc, u32 off,
|
||||
u32 val)
|
||||
{
|
||||
irq_reg_writel(val, gc->reg_base + off);
|
||||
}
|
||||
|
||||
static inline u32 sunxi_sc_nmi_read(struct irq_chip_generic *gc, u32 off)
|
||||
{
|
||||
return irq_reg_readl(gc->reg_base + off);
|
||||
}
|
||||
|
||||
static void sunxi_sc_nmi_handle_irq(unsigned int irq, struct irq_desc *desc)
|
||||
{
|
||||
struct irq_domain *domain = irq_desc_get_handler_data(desc);
|
||||
struct irq_chip *chip = irq_get_chip(irq);
|
||||
unsigned int virq = irq_find_mapping(domain, 0);
|
||||
|
||||
chained_irq_enter(chip, desc);
|
||||
generic_handle_irq(virq);
|
||||
chained_irq_exit(chip, desc);
|
||||
}
|
||||
|
||||
static int sunxi_sc_nmi_set_type(struct irq_data *data, unsigned int flow_type)
|
||||
{
|
||||
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(data);
|
||||
struct irq_chip_type *ct = gc->chip_types;
|
||||
u32 src_type_reg;
|
||||
u32 ctrl_off = ct->regs.type;
|
||||
unsigned int src_type;
|
||||
unsigned int i;
|
||||
|
||||
irq_gc_lock(gc);
|
||||
|
||||
switch (flow_type & IRQF_TRIGGER_MASK) {
|
||||
case IRQ_TYPE_EDGE_FALLING:
|
||||
src_type = SUNXI_SRC_TYPE_EDGE_FALLING;
|
||||
break;
|
||||
case IRQ_TYPE_EDGE_RISING:
|
||||
src_type = SUNXI_SRC_TYPE_EDGE_RISING;
|
||||
break;
|
||||
case IRQ_TYPE_LEVEL_HIGH:
|
||||
src_type = SUNXI_SRC_TYPE_LEVEL_HIGH;
|
||||
break;
|
||||
case IRQ_TYPE_NONE:
|
||||
case IRQ_TYPE_LEVEL_LOW:
|
||||
src_type = SUNXI_SRC_TYPE_LEVEL_LOW;
|
||||
break;
|
||||
default:
|
||||
irq_gc_unlock(gc);
|
||||
pr_err("%s: Cannot assign multiple trigger modes to IRQ %d.\n",
|
||||
__func__, data->irq);
|
||||
return -EBADR;
|
||||
}
|
||||
|
||||
irqd_set_trigger_type(data, flow_type);
|
||||
irq_setup_alt_chip(data, flow_type);
|
||||
|
||||
for (i = 0; i <= gc->num_ct; i++, ct++)
|
||||
if (ct->type & flow_type)
|
||||
ctrl_off = ct->regs.type;
|
||||
|
||||
src_type_reg = sunxi_sc_nmi_read(gc, ctrl_off);
|
||||
src_type_reg &= ~SUNXI_NMI_SRC_TYPE_MASK;
|
||||
src_type_reg |= src_type;
|
||||
sunxi_sc_nmi_write(gc, ctrl_off, src_type_reg);
|
||||
|
||||
irq_gc_unlock(gc);
|
||||
|
||||
return IRQ_SET_MASK_OK;
|
||||
}
|
||||
|
||||
static int __init sunxi_sc_nmi_irq_init(struct device_node *node,
|
||||
struct sunxi_sc_nmi_reg_offs *reg_offs)
|
||||
{
|
||||
struct irq_domain *domain;
|
||||
struct irq_chip_generic *gc;
|
||||
unsigned int irq;
|
||||
unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
|
||||
int ret;
|
||||
|
||||
|
||||
domain = irq_domain_add_linear(node, 1, &irq_generic_chip_ops, NULL);
|
||||
if (!domain) {
|
||||
pr_err("%s: Could not register interrupt domain.\n", node->name);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
ret = irq_alloc_domain_generic_chips(domain, 1, 2, node->name,
|
||||
handle_fasteoi_irq, clr, 0,
|
||||
IRQ_GC_INIT_MASK_CACHE);
|
||||
if (ret) {
|
||||
pr_err("%s: Could not allocate generic interrupt chip.\n",
|
||||
node->name);
|
||||
goto fail_irqd_remove;
|
||||
}
|
||||
|
||||
irq = irq_of_parse_and_map(node, 0);
|
||||
if (irq <= 0) {
|
||||
pr_err("%s: unable to parse irq\n", node->name);
|
||||
ret = -EINVAL;
|
||||
goto fail_irqd_remove;
|
||||
}
|
||||
|
||||
gc = irq_get_domain_generic_chip(domain, 0);
|
||||
gc->reg_base = of_iomap(node, 0);
|
||||
if (!gc->reg_base) {
|
||||
pr_err("%s: unable to map resource\n", node->name);
|
||||
ret = -ENOMEM;
|
||||
goto fail_irqd_remove;
|
||||
}
|
||||
|
||||
gc->chip_types[0].type = IRQ_TYPE_LEVEL_MASK;
|
||||
gc->chip_types[0].chip.irq_mask = irq_gc_mask_clr_bit;
|
||||
gc->chip_types[0].chip.irq_unmask = irq_gc_mask_set_bit;
|
||||
gc->chip_types[0].chip.irq_eoi = irq_gc_ack_set_bit;
|
||||
gc->chip_types[0].chip.irq_set_type = sunxi_sc_nmi_set_type;
|
||||
gc->chip_types[0].chip.flags = IRQCHIP_EOI_THREADED | IRQCHIP_EOI_IF_HANDLED;
|
||||
gc->chip_types[0].regs.ack = reg_offs->pend;
|
||||
gc->chip_types[0].regs.mask = reg_offs->enable;
|
||||
gc->chip_types[0].regs.type = reg_offs->ctrl;
|
||||
|
||||
gc->chip_types[1].type = IRQ_TYPE_EDGE_BOTH;
|
||||
gc->chip_types[1].chip.name = gc->chip_types[0].chip.name;
|
||||
gc->chip_types[1].chip.irq_ack = irq_gc_ack_set_bit;
|
||||
gc->chip_types[1].chip.irq_mask = irq_gc_mask_clr_bit;
|
||||
gc->chip_types[1].chip.irq_unmask = irq_gc_mask_set_bit;
|
||||
gc->chip_types[1].chip.irq_set_type = sunxi_sc_nmi_set_type;
|
||||
gc->chip_types[1].regs.ack = reg_offs->pend;
|
||||
gc->chip_types[1].regs.mask = reg_offs->enable;
|
||||
gc->chip_types[1].regs.type = reg_offs->ctrl;
|
||||
gc->chip_types[1].handler = handle_edge_irq;
|
||||
|
||||
sunxi_sc_nmi_write(gc, reg_offs->enable, 0);
|
||||
sunxi_sc_nmi_write(gc, reg_offs->pend, 0x1);
|
||||
|
||||
irq_set_handler_data(irq, domain);
|
||||
irq_set_chained_handler(irq, sunxi_sc_nmi_handle_irq);
|
||||
|
||||
return 0;
|
||||
|
||||
fail_irqd_remove:
|
||||
irq_domain_remove(domain);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int __init sun6i_sc_nmi_irq_init(struct device_node *node,
|
||||
struct device_node *parent)
|
||||
{
|
||||
return sunxi_sc_nmi_irq_init(node, &sun6i_reg_offs);
|
||||
}
|
||||
IRQCHIP_DECLARE(sun6i_sc_nmi, "allwinner,sun6i-a31-sc-nmi", sun6i_sc_nmi_irq_init);
|
||||
|
||||
static int __init sun7i_sc_nmi_irq_init(struct device_node *node,
|
||||
struct device_node *parent)
|
||||
{
|
||||
return sunxi_sc_nmi_irq_init(node, &sun7i_reg_offs);
|
||||
}
|
||||
IRQCHIP_DECLARE(sun7i_sc_nmi, "allwinner,sun7i-a20-sc-nmi", sun7i_sc_nmi_irq_init);
|
195
drivers/irqchip/irq-tb10x.c
Normal file
195
drivers/irqchip/irq-tb10x.c
Normal file
|
@ -0,0 +1,195 @@
|
|||
/*
|
||||
* Abilis Systems interrupt controller driver
|
||||
*
|
||||
* Copyright (C) Abilis Systems 2012
|
||||
*
|
||||
* Author: Christian Ruppert <christian.ruppert@abilis.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
||||
*/
|
||||
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/irqdomain.h>
|
||||
#include <linux/irq.h>
|
||||
#include <linux/of_irq.h>
|
||||
#include <linux/of_address.h>
|
||||
#include <linux/of_platform.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/bitops.h>
|
||||
#include "irqchip.h"
|
||||
|
||||
#define AB_IRQCTL_INT_ENABLE 0x00
|
||||
#define AB_IRQCTL_INT_STATUS 0x04
|
||||
#define AB_IRQCTL_SRC_MODE 0x08
|
||||
#define AB_IRQCTL_SRC_POLARITY 0x0C
|
||||
#define AB_IRQCTL_INT_MODE 0x10
|
||||
#define AB_IRQCTL_INT_POLARITY 0x14
|
||||
#define AB_IRQCTL_INT_FORCE 0x18
|
||||
|
||||
#define AB_IRQCTL_MAXIRQ 32
|
||||
|
||||
static inline void ab_irqctl_writereg(struct irq_chip_generic *gc, u32 reg,
|
||||
u32 val)
|
||||
{
|
||||
irq_reg_writel(val, gc->reg_base + reg);
|
||||
}
|
||||
|
||||
static inline u32 ab_irqctl_readreg(struct irq_chip_generic *gc, u32 reg)
|
||||
{
|
||||
return irq_reg_readl(gc->reg_base + reg);
|
||||
}
|
||||
|
||||
static int tb10x_irq_set_type(struct irq_data *data, unsigned int flow_type)
|
||||
{
|
||||
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(data);
|
||||
uint32_t im, mod, pol;
|
||||
|
||||
im = data->mask;
|
||||
|
||||
irq_gc_lock(gc);
|
||||
|
||||
mod = ab_irqctl_readreg(gc, AB_IRQCTL_SRC_MODE) | im;
|
||||
pol = ab_irqctl_readreg(gc, AB_IRQCTL_SRC_POLARITY) | im;
|
||||
|
||||
switch (flow_type & IRQF_TRIGGER_MASK) {
|
||||
case IRQ_TYPE_EDGE_FALLING:
|
||||
pol ^= im;
|
||||
break;
|
||||
case IRQ_TYPE_LEVEL_HIGH:
|
||||
mod ^= im;
|
||||
break;
|
||||
case IRQ_TYPE_NONE:
|
||||
flow_type = IRQ_TYPE_LEVEL_LOW;
|
||||
case IRQ_TYPE_LEVEL_LOW:
|
||||
mod ^= im;
|
||||
pol ^= im;
|
||||
break;
|
||||
case IRQ_TYPE_EDGE_RISING:
|
||||
break;
|
||||
default:
|
||||
irq_gc_unlock(gc);
|
||||
pr_err("%s: Cannot assign multiple trigger modes to IRQ %d.\n",
|
||||
__func__, data->irq);
|
||||
return -EBADR;
|
||||
}
|
||||
|
||||
irqd_set_trigger_type(data, flow_type);
|
||||
irq_setup_alt_chip(data, flow_type);
|
||||
|
||||
ab_irqctl_writereg(gc, AB_IRQCTL_SRC_MODE, mod);
|
||||
ab_irqctl_writereg(gc, AB_IRQCTL_SRC_POLARITY, pol);
|
||||
ab_irqctl_writereg(gc, AB_IRQCTL_INT_STATUS, im);
|
||||
|
||||
irq_gc_unlock(gc);
|
||||
|
||||
return IRQ_SET_MASK_OK;
|
||||
}
|
||||
|
||||
static void tb10x_irq_cascade(unsigned int irq, struct irq_desc *desc)
|
||||
{
|
||||
struct irq_domain *domain = irq_desc_get_handler_data(desc);
|
||||
|
||||
generic_handle_irq(irq_find_mapping(domain, irq));
|
||||
}
|
||||
|
||||
static int __init of_tb10x_init_irq(struct device_node *ictl,
|
||||
struct device_node *parent)
|
||||
{
|
||||
int i, ret, nrirqs = of_irq_count(ictl);
|
||||
struct resource mem;
|
||||
struct irq_chip_generic *gc;
|
||||
struct irq_domain *domain;
|
||||
void __iomem *reg_base;
|
||||
|
||||
if (of_address_to_resource(ictl, 0, &mem)) {
|
||||
pr_err("%s: No registers declared in DeviceTree.\n",
|
||||
ictl->name);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!request_mem_region(mem.start, resource_size(&mem),
|
||||
ictl->name)) {
|
||||
pr_err("%s: Request mem region failed.\n", ictl->name);
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
reg_base = ioremap(mem.start, resource_size(&mem));
|
||||
if (!reg_base) {
|
||||
ret = -EBUSY;
|
||||
pr_err("%s: ioremap failed.\n", ictl->name);
|
||||
goto ioremap_fail;
|
||||
}
|
||||
|
||||
domain = irq_domain_add_linear(ictl, AB_IRQCTL_MAXIRQ,
|
||||
&irq_generic_chip_ops, NULL);
|
||||
if (!domain) {
|
||||
ret = -ENOMEM;
|
||||
pr_err("%s: Could not register interrupt domain.\n",
|
||||
ictl->name);
|
||||
goto irq_domain_add_fail;
|
||||
}
|
||||
|
||||
ret = irq_alloc_domain_generic_chips(domain, AB_IRQCTL_MAXIRQ,
|
||||
2, ictl->name, handle_level_irq,
|
||||
IRQ_NOREQUEST, IRQ_NOPROBE,
|
||||
IRQ_GC_INIT_MASK_CACHE);
|
||||
if (ret) {
|
||||
pr_err("%s: Could not allocate generic interrupt chip.\n",
|
||||
ictl->name);
|
||||
goto gc_alloc_fail;
|
||||
}
|
||||
|
||||
gc = domain->gc->gc[0];
|
||||
gc->reg_base = reg_base;
|
||||
|
||||
gc->chip_types[0].type = IRQ_TYPE_LEVEL_MASK;
|
||||
gc->chip_types[0].chip.irq_mask = irq_gc_mask_clr_bit;
|
||||
gc->chip_types[0].chip.irq_unmask = irq_gc_mask_set_bit;
|
||||
gc->chip_types[0].chip.irq_set_type = tb10x_irq_set_type;
|
||||
gc->chip_types[0].regs.mask = AB_IRQCTL_INT_ENABLE;
|
||||
|
||||
gc->chip_types[1].type = IRQ_TYPE_EDGE_BOTH;
|
||||
gc->chip_types[1].chip.name = gc->chip_types[0].chip.name;
|
||||
gc->chip_types[1].chip.irq_ack = irq_gc_ack_set_bit;
|
||||
gc->chip_types[1].chip.irq_mask = irq_gc_mask_clr_bit;
|
||||
gc->chip_types[1].chip.irq_unmask = irq_gc_mask_set_bit;
|
||||
gc->chip_types[1].chip.irq_set_type = tb10x_irq_set_type;
|
||||
gc->chip_types[1].regs.ack = AB_IRQCTL_INT_STATUS;
|
||||
gc->chip_types[1].regs.mask = AB_IRQCTL_INT_ENABLE;
|
||||
gc->chip_types[1].handler = handle_edge_irq;
|
||||
|
||||
for (i = 0; i < nrirqs; i++) {
|
||||
unsigned int irq = irq_of_parse_and_map(ictl, i);
|
||||
|
||||
irq_set_handler_data(irq, domain);
|
||||
irq_set_chained_handler(irq, tb10x_irq_cascade);
|
||||
}
|
||||
|
||||
ab_irqctl_writereg(gc, AB_IRQCTL_INT_ENABLE, 0);
|
||||
ab_irqctl_writereg(gc, AB_IRQCTL_INT_MODE, 0);
|
||||
ab_irqctl_writereg(gc, AB_IRQCTL_INT_POLARITY, 0);
|
||||
ab_irqctl_writereg(gc, AB_IRQCTL_INT_STATUS, ~0UL);
|
||||
|
||||
return 0;
|
||||
|
||||
gc_alloc_fail:
|
||||
irq_domain_remove(domain);
|
||||
irq_domain_add_fail:
|
||||
iounmap(reg_base);
|
||||
ioremap_fail:
|
||||
release_mem_region(mem.start, resource_size(&mem));
|
||||
return ret;
|
||||
}
|
||||
IRQCHIP_DECLARE(tb10x_intc, "abilis,tb10x-ictl", of_tb10x_init_irq);
|
230
drivers/irqchip/irq-versatile-fpga.c
Normal file
230
drivers/irqchip/irq-versatile-fpga.c
Normal file
|
@ -0,0 +1,230 @@
|
|||
/*
|
||||
* Support for Versatile FPGA-based IRQ controllers
|
||||
*/
|
||||
#include <linux/bitops.h>
|
||||
#include <linux/irq.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/irqchip/versatile-fpga.h>
|
||||
#include <linux/irqdomain.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/of_address.h>
|
||||
#include <linux/of_irq.h>
|
||||
|
||||
#include <asm/exception.h>
|
||||
#include <asm/mach/irq.h>
|
||||
|
||||
#include "irqchip.h"
|
||||
|
||||
#define IRQ_STATUS 0x00
|
||||
#define IRQ_RAW_STATUS 0x04
|
||||
#define IRQ_ENABLE_SET 0x08
|
||||
#define IRQ_ENABLE_CLEAR 0x0c
|
||||
#define INT_SOFT_SET 0x10
|
||||
#define INT_SOFT_CLEAR 0x14
|
||||
#define FIQ_STATUS 0x20
|
||||
#define FIQ_RAW_STATUS 0x24
|
||||
#define FIQ_ENABLE 0x28
|
||||
#define FIQ_ENABLE_SET 0x28
|
||||
#define FIQ_ENABLE_CLEAR 0x2C
|
||||
|
||||
#define PIC_ENABLES 0x20 /* set interrupt pass through bits */
|
||||
|
||||
/**
|
||||
* struct fpga_irq_data - irq data container for the FPGA IRQ controller
|
||||
* @base: memory offset in virtual memory
|
||||
* @chip: chip container for this instance
|
||||
* @domain: IRQ domain for this instance
|
||||
* @valid: mask for valid IRQs on this controller
|
||||
* @used_irqs: number of active IRQs on this controller
|
||||
*/
|
||||
struct fpga_irq_data {
|
||||
void __iomem *base;
|
||||
struct irq_chip chip;
|
||||
u32 valid;
|
||||
struct irq_domain *domain;
|
||||
u8 used_irqs;
|
||||
};
|
||||
|
||||
/* we cannot allocate memory when the controllers are initially registered */
|
||||
static struct fpga_irq_data fpga_irq_devices[CONFIG_VERSATILE_FPGA_IRQ_NR];
|
||||
static int fpga_irq_id;
|
||||
|
||||
static void fpga_irq_mask(struct irq_data *d)
|
||||
{
|
||||
struct fpga_irq_data *f = irq_data_get_irq_chip_data(d);
|
||||
u32 mask = 1 << d->hwirq;
|
||||
|
||||
writel(mask, f->base + IRQ_ENABLE_CLEAR);
|
||||
}
|
||||
|
||||
static void fpga_irq_unmask(struct irq_data *d)
|
||||
{
|
||||
struct fpga_irq_data *f = irq_data_get_irq_chip_data(d);
|
||||
u32 mask = 1 << d->hwirq;
|
||||
|
||||
writel(mask, f->base + IRQ_ENABLE_SET);
|
||||
}
|
||||
|
||||
static void fpga_irq_handle(unsigned int irq, struct irq_desc *desc)
|
||||
{
|
||||
struct fpga_irq_data *f = irq_desc_get_handler_data(desc);
|
||||
u32 status = readl(f->base + IRQ_STATUS);
|
||||
|
||||
if (status == 0) {
|
||||
do_bad_IRQ(irq, desc);
|
||||
return;
|
||||
}
|
||||
|
||||
do {
|
||||
irq = ffs(status) - 1;
|
||||
status &= ~(1 << irq);
|
||||
generic_handle_irq(irq_find_mapping(f->domain, irq));
|
||||
} while (status);
|
||||
}
|
||||
|
||||
/*
|
||||
* Handle each interrupt in a single FPGA IRQ controller. Returns non-zero
|
||||
* if we've handled at least one interrupt. This does a single read of the
|
||||
* status register and handles all interrupts in order from LSB first.
|
||||
*/
|
||||
static int handle_one_fpga(struct fpga_irq_data *f, struct pt_regs *regs)
|
||||
{
|
||||
int handled = 0;
|
||||
int irq;
|
||||
u32 status;
|
||||
|
||||
while ((status = readl(f->base + IRQ_STATUS))) {
|
||||
irq = ffs(status) - 1;
|
||||
handle_domain_irq(f->domain, irq, regs);
|
||||
handled = 1;
|
||||
}
|
||||
|
||||
return handled;
|
||||
}
|
||||
|
||||
/*
|
||||
* Keep iterating over all registered FPGA IRQ controllers until there are
|
||||
* no pending interrupts.
|
||||
*/
|
||||
asmlinkage void __exception_irq_entry fpga_handle_irq(struct pt_regs *regs)
|
||||
{
|
||||
int i, handled;
|
||||
|
||||
do {
|
||||
for (i = 0, handled = 0; i < fpga_irq_id; ++i)
|
||||
handled |= handle_one_fpga(&fpga_irq_devices[i], regs);
|
||||
} while (handled);
|
||||
}
|
||||
|
||||
static int fpga_irqdomain_map(struct irq_domain *d, unsigned int irq,
|
||||
irq_hw_number_t hwirq)
|
||||
{
|
||||
struct fpga_irq_data *f = d->host_data;
|
||||
|
||||
/* Skip invalid IRQs, only register handlers for the real ones */
|
||||
if (!(f->valid & BIT(hwirq)))
|
||||
return -EPERM;
|
||||
irq_set_chip_data(irq, f);
|
||||
irq_set_chip_and_handler(irq, &f->chip,
|
||||
handle_level_irq);
|
||||
set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct irq_domain_ops fpga_irqdomain_ops = {
|
||||
.map = fpga_irqdomain_map,
|
||||
.xlate = irq_domain_xlate_onetwocell,
|
||||
};
|
||||
|
||||
void __init fpga_irq_init(void __iomem *base, const char *name, int irq_start,
|
||||
int parent_irq, u32 valid, struct device_node *node)
|
||||
{
|
||||
struct fpga_irq_data *f;
|
||||
int i;
|
||||
|
||||
if (fpga_irq_id >= ARRAY_SIZE(fpga_irq_devices)) {
|
||||
pr_err("%s: too few FPGA IRQ controllers, increase CONFIG_VERSATILE_FPGA_IRQ_NR\n", __func__);
|
||||
return;
|
||||
}
|
||||
f = &fpga_irq_devices[fpga_irq_id];
|
||||
f->base = base;
|
||||
f->chip.name = name;
|
||||
f->chip.irq_ack = fpga_irq_mask;
|
||||
f->chip.irq_mask = fpga_irq_mask;
|
||||
f->chip.irq_unmask = fpga_irq_unmask;
|
||||
f->valid = valid;
|
||||
|
||||
if (parent_irq != -1) {
|
||||
irq_set_handler_data(parent_irq, f);
|
||||
irq_set_chained_handler(parent_irq, fpga_irq_handle);
|
||||
}
|
||||
|
||||
/* This will also allocate irq descriptors */
|
||||
f->domain = irq_domain_add_simple(node, fls(valid), irq_start,
|
||||
&fpga_irqdomain_ops, f);
|
||||
|
||||
/* This will allocate all valid descriptors in the linear case */
|
||||
for (i = 0; i < fls(valid); i++)
|
||||
if (valid & BIT(i)) {
|
||||
if (!irq_start)
|
||||
irq_create_mapping(f->domain, i);
|
||||
f->used_irqs++;
|
||||
}
|
||||
|
||||
pr_info("FPGA IRQ chip %d \"%s\" @ %p, %u irqs",
|
||||
fpga_irq_id, name, base, f->used_irqs);
|
||||
if (parent_irq != -1)
|
||||
pr_cont(", parent IRQ: %d\n", parent_irq);
|
||||
else
|
||||
pr_cont("\n");
|
||||
|
||||
fpga_irq_id++;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_OF
|
||||
int __init fpga_irq_of_init(struct device_node *node,
|
||||
struct device_node *parent)
|
||||
{
|
||||
void __iomem *base;
|
||||
u32 clear_mask;
|
||||
u32 valid_mask;
|
||||
int parent_irq;
|
||||
|
||||
if (WARN_ON(!node))
|
||||
return -ENODEV;
|
||||
|
||||
base = of_iomap(node, 0);
|
||||
WARN(!base, "unable to map fpga irq registers\n");
|
||||
|
||||
if (of_property_read_u32(node, "clear-mask", &clear_mask))
|
||||
clear_mask = 0;
|
||||
|
||||
if (of_property_read_u32(node, "valid-mask", &valid_mask))
|
||||
valid_mask = 0;
|
||||
|
||||
/* Some chips are cascaded from a parent IRQ */
|
||||
parent_irq = irq_of_parse_and_map(node, 0);
|
||||
if (!parent_irq) {
|
||||
set_handle_irq(fpga_handle_irq);
|
||||
parent_irq = -1;
|
||||
}
|
||||
|
||||
fpga_irq_init(base, node->name, 0, parent_irq, valid_mask, node);
|
||||
|
||||
writel(clear_mask, base + IRQ_ENABLE_CLEAR);
|
||||
writel(clear_mask, base + FIQ_ENABLE_CLEAR);
|
||||
|
||||
/*
|
||||
* On Versatile AB/PB, some secondary interrupts have a direct
|
||||
* pass-thru to the primary controller for IRQs 20 and 22-31 which need
|
||||
* to be enabled. See section 3.10 of the Versatile AB user guide.
|
||||
*/
|
||||
if (of_device_is_compatible(node, "arm,versatile-sic"))
|
||||
writel(0xffd00000, base + PIC_ENABLES);
|
||||
|
||||
return 0;
|
||||
}
|
||||
IRQCHIP_DECLARE(arm_fpga, "arm,versatile-fpga-irq", fpga_irq_of_init);
|
||||
IRQCHIP_DECLARE(arm_fpga_sic, "arm,versatile-sic", fpga_irq_of_init);
|
||||
#endif
|
547
drivers/irqchip/irq-vic.c
Normal file
547
drivers/irqchip/irq-vic.c
Normal file
|
@ -0,0 +1,547 @@
|
|||
/*
|
||||
* linux/arch/arm/common/vic.c
|
||||
*
|
||||
* Copyright (C) 1999 - 2003 ARM Limited
|
||||
* Copyright (C) 2000 Deep Blue Solutions Ltd
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; either version 2 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
||||
*/
|
||||
|
||||
#include <linux/export.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/irq.h>
|
||||
#include <linux/irqchip/chained_irq.h>
|
||||
#include <linux/irqdomain.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/of_address.h>
|
||||
#include <linux/of_irq.h>
|
||||
#include <linux/syscore_ops.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/amba/bus.h>
|
||||
#include <linux/irqchip/arm-vic.h>
|
||||
|
||||
#include <asm/exception.h>
|
||||
#include <asm/irq.h>
|
||||
|
||||
#include "irqchip.h"
|
||||
|
||||
#define VIC_IRQ_STATUS 0x00
|
||||
#define VIC_FIQ_STATUS 0x04
|
||||
#define VIC_INT_SELECT 0x0c /* 1 = FIQ, 0 = IRQ */
|
||||
#define VIC_INT_SOFT 0x18
|
||||
#define VIC_INT_SOFT_CLEAR 0x1c
|
||||
#define VIC_PROTECT 0x20
|
||||
#define VIC_PL190_VECT_ADDR 0x30 /* PL190 only */
|
||||
#define VIC_PL190_DEF_VECT_ADDR 0x34 /* PL190 only */
|
||||
|
||||
#define VIC_VECT_ADDR0 0x100 /* 0 to 15 (0..31 PL192) */
|
||||
#define VIC_VECT_CNTL0 0x200 /* 0 to 15 (0..31 PL192) */
|
||||
#define VIC_ITCR 0x300 /* VIC test control register */
|
||||
|
||||
#define VIC_VECT_CNTL_ENABLE (1 << 5)
|
||||
|
||||
#define VIC_PL192_VECT_ADDR 0xF00
|
||||
|
||||
/**
|
||||
* struct vic_device - VIC PM device
|
||||
* @parent_irq: The parent IRQ number of the VIC if cascaded, or 0.
|
||||
* @irq: The IRQ number for the base of the VIC.
|
||||
* @base: The register base for the VIC.
|
||||
* @valid_sources: A bitmask of valid interrupts
|
||||
* @resume_sources: A bitmask of interrupts for resume.
|
||||
* @resume_irqs: The IRQs enabled for resume.
|
||||
* @int_select: Save for VIC_INT_SELECT.
|
||||
* @int_enable: Save for VIC_INT_ENABLE.
|
||||
* @soft_int: Save for VIC_INT_SOFT.
|
||||
* @protect: Save for VIC_PROTECT.
|
||||
* @domain: The IRQ domain for the VIC.
|
||||
*/
|
||||
struct vic_device {
|
||||
void __iomem *base;
|
||||
int irq;
|
||||
u32 valid_sources;
|
||||
u32 resume_sources;
|
||||
u32 resume_irqs;
|
||||
u32 int_select;
|
||||
u32 int_enable;
|
||||
u32 soft_int;
|
||||
u32 protect;
|
||||
struct irq_domain *domain;
|
||||
};
|
||||
|
||||
/* we cannot allocate memory when VICs are initially registered */
|
||||
static struct vic_device vic_devices[CONFIG_ARM_VIC_NR];
|
||||
|
||||
static int vic_id;
|
||||
|
||||
static void vic_handle_irq(struct pt_regs *regs);
|
||||
|
||||
/**
|
||||
* vic_init2 - common initialisation code
|
||||
* @base: Base of the VIC.
|
||||
*
|
||||
* Common initialisation code for registration
|
||||
* and resume.
|
||||
*/
|
||||
static void vic_init2(void __iomem *base)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < 16; i++) {
|
||||
void __iomem *reg = base + VIC_VECT_CNTL0 + (i * 4);
|
||||
writel(VIC_VECT_CNTL_ENABLE | i, reg);
|
||||
}
|
||||
|
||||
writel(32, base + VIC_PL190_DEF_VECT_ADDR);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PM
|
||||
static void resume_one_vic(struct vic_device *vic)
|
||||
{
|
||||
void __iomem *base = vic->base;
|
||||
|
||||
printk(KERN_DEBUG "%s: resuming vic at %p\n", __func__, base);
|
||||
|
||||
/* re-initialise static settings */
|
||||
vic_init2(base);
|
||||
|
||||
writel(vic->int_select, base + VIC_INT_SELECT);
|
||||
writel(vic->protect, base + VIC_PROTECT);
|
||||
|
||||
/* set the enabled ints and then clear the non-enabled */
|
||||
writel(vic->int_enable, base + VIC_INT_ENABLE);
|
||||
writel(~vic->int_enable, base + VIC_INT_ENABLE_CLEAR);
|
||||
|
||||
/* and the same for the soft-int register */
|
||||
|
||||
writel(vic->soft_int, base + VIC_INT_SOFT);
|
||||
writel(~vic->soft_int, base + VIC_INT_SOFT_CLEAR);
|
||||
}
|
||||
|
||||
static void vic_resume(void)
|
||||
{
|
||||
int id;
|
||||
|
||||
for (id = vic_id - 1; id >= 0; id--)
|
||||
resume_one_vic(vic_devices + id);
|
||||
}
|
||||
|
||||
static void suspend_one_vic(struct vic_device *vic)
|
||||
{
|
||||
void __iomem *base = vic->base;
|
||||
|
||||
printk(KERN_DEBUG "%s: suspending vic at %p\n", __func__, base);
|
||||
|
||||
vic->int_select = readl(base + VIC_INT_SELECT);
|
||||
vic->int_enable = readl(base + VIC_INT_ENABLE);
|
||||
vic->soft_int = readl(base + VIC_INT_SOFT);
|
||||
vic->protect = readl(base + VIC_PROTECT);
|
||||
|
||||
/* set the interrupts (if any) that are used for
|
||||
* resuming the system */
|
||||
|
||||
writel(vic->resume_irqs, base + VIC_INT_ENABLE);
|
||||
writel(~vic->resume_irqs, base + VIC_INT_ENABLE_CLEAR);
|
||||
}
|
||||
|
||||
static int vic_suspend(void)
|
||||
{
|
||||
int id;
|
||||
|
||||
for (id = 0; id < vic_id; id++)
|
||||
suspend_one_vic(vic_devices + id);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct syscore_ops vic_syscore_ops = {
|
||||
.suspend = vic_suspend,
|
||||
.resume = vic_resume,
|
||||
};
|
||||
|
||||
/**
|
||||
* vic_pm_init - initicall to register VIC pm
|
||||
*
|
||||
* This is called via late_initcall() to register
|
||||
* the resources for the VICs due to the early
|
||||
* nature of the VIC's registration.
|
||||
*/
|
||||
static int __init vic_pm_init(void)
|
||||
{
|
||||
if (vic_id > 0)
|
||||
register_syscore_ops(&vic_syscore_ops);
|
||||
|
||||
return 0;
|
||||
}
|
||||
late_initcall(vic_pm_init);
|
||||
#endif /* CONFIG_PM */
|
||||
|
||||
static struct irq_chip vic_chip;
|
||||
|
||||
static int vic_irqdomain_map(struct irq_domain *d, unsigned int irq,
|
||||
irq_hw_number_t hwirq)
|
||||
{
|
||||
struct vic_device *v = d->host_data;
|
||||
|
||||
/* Skip invalid IRQs, only register handlers for the real ones */
|
||||
if (!(v->valid_sources & (1 << hwirq)))
|
||||
return -EPERM;
|
||||
irq_set_chip_and_handler(irq, &vic_chip, handle_level_irq);
|
||||
irq_set_chip_data(irq, v->base);
|
||||
set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Handle each interrupt in a single VIC. Returns non-zero if we've
|
||||
* handled at least one interrupt. This reads the status register
|
||||
* before handling each interrupt, which is necessary given that
|
||||
* handle_IRQ may briefly re-enable interrupts for soft IRQ handling.
|
||||
*/
|
||||
static int handle_one_vic(struct vic_device *vic, struct pt_regs *regs)
|
||||
{
|
||||
u32 stat, irq;
|
||||
int handled = 0;
|
||||
|
||||
while ((stat = readl_relaxed(vic->base + VIC_IRQ_STATUS))) {
|
||||
irq = ffs(stat) - 1;
|
||||
handle_domain_irq(vic->domain, irq, regs);
|
||||
handled = 1;
|
||||
}
|
||||
|
||||
return handled;
|
||||
}
|
||||
|
||||
static void vic_handle_irq_cascaded(unsigned int irq, struct irq_desc *desc)
|
||||
{
|
||||
u32 stat, hwirq;
|
||||
struct irq_chip *host_chip = irq_desc_get_chip(desc);
|
||||
struct vic_device *vic = irq_desc_get_handler_data(desc);
|
||||
|
||||
chained_irq_enter(host_chip, desc);
|
||||
|
||||
while ((stat = readl_relaxed(vic->base + VIC_IRQ_STATUS))) {
|
||||
hwirq = ffs(stat) - 1;
|
||||
generic_handle_irq(irq_find_mapping(vic->domain, hwirq));
|
||||
}
|
||||
|
||||
chained_irq_exit(host_chip, desc);
|
||||
}
|
||||
|
||||
/*
|
||||
* Keep iterating over all registered VIC's until there are no pending
|
||||
* interrupts.
|
||||
*/
|
||||
static void __exception_irq_entry vic_handle_irq(struct pt_regs *regs)
|
||||
{
|
||||
int i, handled;
|
||||
|
||||
do {
|
||||
for (i = 0, handled = 0; i < vic_id; ++i)
|
||||
handled |= handle_one_vic(&vic_devices[i], regs);
|
||||
} while (handled);
|
||||
}
|
||||
|
||||
static struct irq_domain_ops vic_irqdomain_ops = {
|
||||
.map = vic_irqdomain_map,
|
||||
.xlate = irq_domain_xlate_onetwocell,
|
||||
};
|
||||
|
||||
/**
|
||||
* vic_register() - Register a VIC.
|
||||
* @base: The base address of the VIC.
|
||||
* @parent_irq: The parent IRQ if cascaded, else 0.
|
||||
* @irq: The base IRQ for the VIC.
|
||||
* @valid_sources: bitmask of valid interrupts
|
||||
* @resume_sources: bitmask of interrupts allowed for resume sources.
|
||||
* @node: The device tree node associated with the VIC.
|
||||
*
|
||||
* Register the VIC with the system device tree so that it can be notified
|
||||
* of suspend and resume requests and ensure that the correct actions are
|
||||
* taken to re-instate the settings on resume.
|
||||
*
|
||||
* This also configures the IRQ domain for the VIC.
|
||||
*/
|
||||
static void __init vic_register(void __iomem *base, unsigned int parent_irq,
|
||||
unsigned int irq,
|
||||
u32 valid_sources, u32 resume_sources,
|
||||
struct device_node *node)
|
||||
{
|
||||
struct vic_device *v;
|
||||
int i;
|
||||
|
||||
if (vic_id >= ARRAY_SIZE(vic_devices)) {
|
||||
printk(KERN_ERR "%s: too few VICs, increase CONFIG_ARM_VIC_NR\n", __func__);
|
||||
return;
|
||||
}
|
||||
|
||||
v = &vic_devices[vic_id];
|
||||
v->base = base;
|
||||
v->valid_sources = valid_sources;
|
||||
v->resume_sources = resume_sources;
|
||||
set_handle_irq(vic_handle_irq);
|
||||
vic_id++;
|
||||
|
||||
if (parent_irq) {
|
||||
irq_set_handler_data(parent_irq, v);
|
||||
irq_set_chained_handler(parent_irq, vic_handle_irq_cascaded);
|
||||
}
|
||||
|
||||
v->domain = irq_domain_add_simple(node, fls(valid_sources), irq,
|
||||
&vic_irqdomain_ops, v);
|
||||
/* create an IRQ mapping for each valid IRQ */
|
||||
for (i = 0; i < fls(valid_sources); i++)
|
||||
if (valid_sources & (1 << i))
|
||||
irq_create_mapping(v->domain, i);
|
||||
/* If no base IRQ was passed, figure out our allocated base */
|
||||
if (irq)
|
||||
v->irq = irq;
|
||||
else
|
||||
v->irq = irq_find_mapping(v->domain, 0);
|
||||
}
|
||||
|
||||
static void vic_ack_irq(struct irq_data *d)
|
||||
{
|
||||
void __iomem *base = irq_data_get_irq_chip_data(d);
|
||||
unsigned int irq = d->hwirq;
|
||||
writel(1 << irq, base + VIC_INT_ENABLE_CLEAR);
|
||||
/* moreover, clear the soft-triggered, in case it was the reason */
|
||||
writel(1 << irq, base + VIC_INT_SOFT_CLEAR);
|
||||
}
|
||||
|
||||
static void vic_mask_irq(struct irq_data *d)
|
||||
{
|
||||
void __iomem *base = irq_data_get_irq_chip_data(d);
|
||||
unsigned int irq = d->hwirq;
|
||||
writel(1 << irq, base + VIC_INT_ENABLE_CLEAR);
|
||||
}
|
||||
|
||||
static void vic_unmask_irq(struct irq_data *d)
|
||||
{
|
||||
void __iomem *base = irq_data_get_irq_chip_data(d);
|
||||
unsigned int irq = d->hwirq;
|
||||
writel(1 << irq, base + VIC_INT_ENABLE);
|
||||
}
|
||||
|
||||
#if defined(CONFIG_PM)
|
||||
static struct vic_device *vic_from_irq(unsigned int irq)
|
||||
{
|
||||
struct vic_device *v = vic_devices;
|
||||
unsigned int base_irq = irq & ~31;
|
||||
int id;
|
||||
|
||||
for (id = 0; id < vic_id; id++, v++) {
|
||||
if (v->irq == base_irq)
|
||||
return v;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static int vic_set_wake(struct irq_data *d, unsigned int on)
|
||||
{
|
||||
struct vic_device *v = vic_from_irq(d->irq);
|
||||
unsigned int off = d->hwirq;
|
||||
u32 bit = 1 << off;
|
||||
|
||||
if (!v)
|
||||
return -EINVAL;
|
||||
|
||||
if (!(bit & v->resume_sources))
|
||||
return -EINVAL;
|
||||
|
||||
if (on)
|
||||
v->resume_irqs |= bit;
|
||||
else
|
||||
v->resume_irqs &= ~bit;
|
||||
|
||||
return 0;
|
||||
}
|
||||
#else
|
||||
#define vic_set_wake NULL
|
||||
#endif /* CONFIG_PM */
|
||||
|
||||
static struct irq_chip vic_chip = {
|
||||
.name = "VIC",
|
||||
.irq_ack = vic_ack_irq,
|
||||
.irq_mask = vic_mask_irq,
|
||||
.irq_unmask = vic_unmask_irq,
|
||||
.irq_set_wake = vic_set_wake,
|
||||
};
|
||||
|
||||
static void __init vic_disable(void __iomem *base)
|
||||
{
|
||||
writel(0, base + VIC_INT_SELECT);
|
||||
writel(0, base + VIC_INT_ENABLE);
|
||||
writel(~0, base + VIC_INT_ENABLE_CLEAR);
|
||||
writel(0, base + VIC_ITCR);
|
||||
writel(~0, base + VIC_INT_SOFT_CLEAR);
|
||||
}
|
||||
|
||||
static void __init vic_clear_interrupts(void __iomem *base)
|
||||
{
|
||||
unsigned int i;
|
||||
|
||||
writel(0, base + VIC_PL190_VECT_ADDR);
|
||||
for (i = 0; i < 19; i++) {
|
||||
unsigned int value;
|
||||
|
||||
value = readl(base + VIC_PL190_VECT_ADDR);
|
||||
writel(value, base + VIC_PL190_VECT_ADDR);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* The PL190 cell from ARM has been modified by ST to handle 64 interrupts.
|
||||
* The original cell has 32 interrupts, while the modified one has 64,
|
||||
* replocating two blocks 0x00..0x1f in 0x20..0x3f. In that case
|
||||
* the probe function is called twice, with base set to offset 000
|
||||
* and 020 within the page. We call this "second block".
|
||||
*/
|
||||
static void __init vic_init_st(void __iomem *base, unsigned int irq_start,
|
||||
u32 vic_sources, struct device_node *node)
|
||||
{
|
||||
unsigned int i;
|
||||
int vic_2nd_block = ((unsigned long)base & ~PAGE_MASK) != 0;
|
||||
|
||||
/* Disable all interrupts initially. */
|
||||
vic_disable(base);
|
||||
|
||||
/*
|
||||
* Make sure we clear all existing interrupts. The vector registers
|
||||
* in this cell are after the second block of general registers,
|
||||
* so we can address them using standard offsets, but only from
|
||||
* the second base address, which is 0x20 in the page
|
||||
*/
|
||||
if (vic_2nd_block) {
|
||||
vic_clear_interrupts(base);
|
||||
|
||||
/* ST has 16 vectors as well, but we don't enable them by now */
|
||||
for (i = 0; i < 16; i++) {
|
||||
void __iomem *reg = base + VIC_VECT_CNTL0 + (i * 4);
|
||||
writel(0, reg);
|
||||
}
|
||||
|
||||
writel(32, base + VIC_PL190_DEF_VECT_ADDR);
|
||||
}
|
||||
|
||||
vic_register(base, 0, irq_start, vic_sources, 0, node);
|
||||
}
|
||||
|
||||
void __init __vic_init(void __iomem *base, int parent_irq, int irq_start,
|
||||
u32 vic_sources, u32 resume_sources,
|
||||
struct device_node *node)
|
||||
{
|
||||
unsigned int i;
|
||||
u32 cellid = 0;
|
||||
enum amba_vendor vendor;
|
||||
|
||||
/* Identify which VIC cell this one is, by reading the ID */
|
||||
for (i = 0; i < 4; i++) {
|
||||
void __iomem *addr;
|
||||
addr = (void __iomem *)((u32)base & PAGE_MASK) + 0xfe0 + (i * 4);
|
||||
cellid |= (readl(addr) & 0xff) << (8 * i);
|
||||
}
|
||||
vendor = (cellid >> 12) & 0xff;
|
||||
printk(KERN_INFO "VIC @%p: id 0x%08x, vendor 0x%02x\n",
|
||||
base, cellid, vendor);
|
||||
|
||||
switch(vendor) {
|
||||
case AMBA_VENDOR_ST:
|
||||
vic_init_st(base, irq_start, vic_sources, node);
|
||||
return;
|
||||
default:
|
||||
printk(KERN_WARNING "VIC: unknown vendor, continuing anyways\n");
|
||||
/* fall through */
|
||||
case AMBA_VENDOR_ARM:
|
||||
break;
|
||||
}
|
||||
|
||||
/* Disable all interrupts initially. */
|
||||
vic_disable(base);
|
||||
|
||||
/* Make sure we clear all existing interrupts */
|
||||
vic_clear_interrupts(base);
|
||||
|
||||
vic_init2(base);
|
||||
|
||||
vic_register(base, parent_irq, irq_start, vic_sources, resume_sources, node);
|
||||
}
|
||||
|
||||
/**
|
||||
* vic_init() - initialise a vectored interrupt controller
|
||||
* @base: iomem base address
|
||||
* @irq_start: starting interrupt number, must be muliple of 32
|
||||
* @vic_sources: bitmask of interrupt sources to allow
|
||||
* @resume_sources: bitmask of interrupt sources to allow for resume
|
||||
*/
|
||||
void __init vic_init(void __iomem *base, unsigned int irq_start,
|
||||
u32 vic_sources, u32 resume_sources)
|
||||
{
|
||||
__vic_init(base, 0, irq_start, vic_sources, resume_sources, NULL);
|
||||
}
|
||||
|
||||
/**
|
||||
* vic_init_cascaded() - initialise a cascaded vectored interrupt controller
|
||||
* @base: iomem base address
|
||||
* @parent_irq: the parent IRQ we're cascaded off
|
||||
* @irq_start: starting interrupt number, must be muliple of 32
|
||||
* @vic_sources: bitmask of interrupt sources to allow
|
||||
* @resume_sources: bitmask of interrupt sources to allow for resume
|
||||
*
|
||||
* This returns the base for the new interrupts or negative on error.
|
||||
*/
|
||||
int __init vic_init_cascaded(void __iomem *base, unsigned int parent_irq,
|
||||
u32 vic_sources, u32 resume_sources)
|
||||
{
|
||||
struct vic_device *v;
|
||||
|
||||
v = &vic_devices[vic_id];
|
||||
__vic_init(base, parent_irq, 0, vic_sources, resume_sources, NULL);
|
||||
/* Return out acquired base */
|
||||
return v->irq;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(vic_init_cascaded);
|
||||
|
||||
#ifdef CONFIG_OF
|
||||
int __init vic_of_init(struct device_node *node, struct device_node *parent)
|
||||
{
|
||||
void __iomem *regs;
|
||||
u32 interrupt_mask = ~0;
|
||||
u32 wakeup_mask = ~0;
|
||||
|
||||
if (WARN(parent, "non-root VICs are not supported"))
|
||||
return -EINVAL;
|
||||
|
||||
regs = of_iomap(node, 0);
|
||||
if (WARN_ON(!regs))
|
||||
return -EIO;
|
||||
|
||||
of_property_read_u32(node, "valid-mask", &interrupt_mask);
|
||||
of_property_read_u32(node, "valid-wakeup-mask", &wakeup_mask);
|
||||
|
||||
/*
|
||||
* Passing 0 as first IRQ makes the simple domain allocate descriptors
|
||||
*/
|
||||
__vic_init(regs, 0, 0, interrupt_mask, wakeup_mask, node);
|
||||
|
||||
return 0;
|
||||
}
|
||||
IRQCHIP_DECLARE(arm_pl190_vic, "arm,pl190-vic", vic_of_init);
|
||||
IRQCHIP_DECLARE(arm_pl192_vic, "arm,pl192-vic", vic_of_init);
|
||||
IRQCHIP_DECLARE(arm_versatile_vic, "arm,versatile-vic", vic_of_init);
|
||||
#endif /* CONFIG OF */
|
259
drivers/irqchip/irq-vt8500.c
Normal file
259
drivers/irqchip/irq-vt8500.c
Normal file
|
@ -0,0 +1,259 @@
|
|||
/*
|
||||
* arch/arm/mach-vt8500/irq.c
|
||||
*
|
||||
* Copyright (C) 2012 Tony Prisk <linux@prisktech.co.nz>
|
||||
* Copyright (C) 2010 Alexey Charkov <alchark@gmail.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; either version 2 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
||||
*/
|
||||
|
||||
/*
|
||||
* This file is copied and modified from the original irq.c provided by
|
||||
* Alexey Charkov. Minor changes have been made for Device Tree Support.
|
||||
*/
|
||||
|
||||
#include <linux/slab.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/irq.h>
|
||||
#include <linux/irqdomain.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/bitops.h>
|
||||
|
||||
#include <linux/of.h>
|
||||
#include <linux/of_irq.h>
|
||||
#include <linux/of_address.h>
|
||||
|
||||
#include <asm/irq.h>
|
||||
#include <asm/exception.h>
|
||||
#include <asm/mach/irq.h>
|
||||
|
||||
#include "irqchip.h"
|
||||
|
||||
#define VT8500_ICPC_IRQ 0x20
|
||||
#define VT8500_ICPC_FIQ 0x24
|
||||
#define VT8500_ICDC 0x40 /* Destination Control 64*u32 */
|
||||
#define VT8500_ICIS 0x80 /* Interrupt status, 16*u32 */
|
||||
|
||||
/* ICPC */
|
||||
#define ICPC_MASK 0x3F
|
||||
#define ICPC_ROTATE BIT(6)
|
||||
|
||||
/* IC_DCTR */
|
||||
#define ICDC_IRQ 0x00
|
||||
#define ICDC_FIQ 0x01
|
||||
#define ICDC_DSS0 0x02
|
||||
#define ICDC_DSS1 0x03
|
||||
#define ICDC_DSS2 0x04
|
||||
#define ICDC_DSS3 0x05
|
||||
#define ICDC_DSS4 0x06
|
||||
#define ICDC_DSS5 0x07
|
||||
|
||||
#define VT8500_INT_DISABLE 0
|
||||
#define VT8500_INT_ENABLE BIT(3)
|
||||
|
||||
#define VT8500_TRIGGER_HIGH 0
|
||||
#define VT8500_TRIGGER_RISING BIT(5)
|
||||
#define VT8500_TRIGGER_FALLING BIT(6)
|
||||
#define VT8500_EDGE ( VT8500_TRIGGER_RISING \
|
||||
| VT8500_TRIGGER_FALLING)
|
||||
|
||||
/* vt8500 has 1 intc, wm8505 and wm8650 have 2 */
|
||||
#define VT8500_INTC_MAX 2
|
||||
|
||||
struct vt8500_irq_data {
|
||||
void __iomem *base; /* IO Memory base address */
|
||||
struct irq_domain *domain; /* Domain for this controller */
|
||||
};
|
||||
|
||||
/* Global variable for accessing io-mem addresses */
|
||||
static struct vt8500_irq_data intc[VT8500_INTC_MAX];
|
||||
static u32 active_cnt = 0;
|
||||
|
||||
static void vt8500_irq_mask(struct irq_data *d)
|
||||
{
|
||||
struct vt8500_irq_data *priv = d->domain->host_data;
|
||||
void __iomem *base = priv->base;
|
||||
void __iomem *stat_reg = base + VT8500_ICIS + (d->hwirq < 32 ? 0 : 4);
|
||||
u8 edge, dctr;
|
||||
u32 status;
|
||||
|
||||
edge = readb(base + VT8500_ICDC + d->hwirq) & VT8500_EDGE;
|
||||
if (edge) {
|
||||
status = readl(stat_reg);
|
||||
|
||||
status |= (1 << (d->hwirq & 0x1f));
|
||||
writel(status, stat_reg);
|
||||
} else {
|
||||
dctr = readb(base + VT8500_ICDC + d->hwirq);
|
||||
dctr &= ~VT8500_INT_ENABLE;
|
||||
writeb(dctr, base + VT8500_ICDC + d->hwirq);
|
||||
}
|
||||
}
|
||||
|
||||
static void vt8500_irq_unmask(struct irq_data *d)
|
||||
{
|
||||
struct vt8500_irq_data *priv = d->domain->host_data;
|
||||
void __iomem *base = priv->base;
|
||||
u8 dctr;
|
||||
|
||||
dctr = readb(base + VT8500_ICDC + d->hwirq);
|
||||
dctr |= VT8500_INT_ENABLE;
|
||||
writeb(dctr, base + VT8500_ICDC + d->hwirq);
|
||||
}
|
||||
|
||||
static int vt8500_irq_set_type(struct irq_data *d, unsigned int flow_type)
|
||||
{
|
||||
struct vt8500_irq_data *priv = d->domain->host_data;
|
||||
void __iomem *base = priv->base;
|
||||
u8 dctr;
|
||||
|
||||
dctr = readb(base + VT8500_ICDC + d->hwirq);
|
||||
dctr &= ~VT8500_EDGE;
|
||||
|
||||
switch (flow_type) {
|
||||
case IRQF_TRIGGER_LOW:
|
||||
return -EINVAL;
|
||||
case IRQF_TRIGGER_HIGH:
|
||||
dctr |= VT8500_TRIGGER_HIGH;
|
||||
__irq_set_handler_locked(d->irq, handle_level_irq);
|
||||
break;
|
||||
case IRQF_TRIGGER_FALLING:
|
||||
dctr |= VT8500_TRIGGER_FALLING;
|
||||
__irq_set_handler_locked(d->irq, handle_edge_irq);
|
||||
break;
|
||||
case IRQF_TRIGGER_RISING:
|
||||
dctr |= VT8500_TRIGGER_RISING;
|
||||
__irq_set_handler_locked(d->irq, handle_edge_irq);
|
||||
break;
|
||||
}
|
||||
writeb(dctr, base + VT8500_ICDC + d->hwirq);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct irq_chip vt8500_irq_chip = {
|
||||
.name = "vt8500",
|
||||
.irq_ack = vt8500_irq_mask,
|
||||
.irq_mask = vt8500_irq_mask,
|
||||
.irq_unmask = vt8500_irq_unmask,
|
||||
.irq_set_type = vt8500_irq_set_type,
|
||||
};
|
||||
|
||||
static void __init vt8500_init_irq_hw(void __iomem *base)
|
||||
{
|
||||
u32 i;
|
||||
|
||||
/* Enable rotating priority for IRQ */
|
||||
writel(ICPC_ROTATE, base + VT8500_ICPC_IRQ);
|
||||
writel(0x00, base + VT8500_ICPC_FIQ);
|
||||
|
||||
/* Disable all interrupts and route them to IRQ */
|
||||
for (i = 0; i < 64; i++)
|
||||
writeb(VT8500_INT_DISABLE | ICDC_IRQ, base + VT8500_ICDC + i);
|
||||
}
|
||||
|
||||
static int vt8500_irq_map(struct irq_domain *h, unsigned int virq,
|
||||
irq_hw_number_t hw)
|
||||
{
|
||||
irq_set_chip_and_handler(virq, &vt8500_irq_chip, handle_level_irq);
|
||||
set_irq_flags(virq, IRQF_VALID);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct irq_domain_ops vt8500_irq_domain_ops = {
|
||||
.map = vt8500_irq_map,
|
||||
.xlate = irq_domain_xlate_onecell,
|
||||
};
|
||||
|
||||
static void __exception_irq_entry vt8500_handle_irq(struct pt_regs *regs)
|
||||
{
|
||||
u32 stat, i;
|
||||
int irqnr;
|
||||
void __iomem *base;
|
||||
|
||||
/* Loop through each active controller */
|
||||
for (i=0; i<active_cnt; i++) {
|
||||
base = intc[i].base;
|
||||
irqnr = readl_relaxed(base) & 0x3F;
|
||||
/*
|
||||
Highest Priority register default = 63, so check that this
|
||||
is a real interrupt by checking the status register
|
||||
*/
|
||||
if (irqnr == 63) {
|
||||
stat = readl_relaxed(base + VT8500_ICIS + 4);
|
||||
if (!(stat & BIT(31)))
|
||||
continue;
|
||||
}
|
||||
|
||||
handle_domain_irq(intc[i].domain, irqnr, regs);
|
||||
}
|
||||
}
|
||||
|
||||
static int __init vt8500_irq_init(struct device_node *node,
|
||||
struct device_node *parent)
|
||||
{
|
||||
int irq, i;
|
||||
struct device_node *np = node;
|
||||
|
||||
if (active_cnt == VT8500_INTC_MAX) {
|
||||
pr_err("%s: Interrupt controllers > VT8500_INTC_MAX\n",
|
||||
__func__);
|
||||
goto out;
|
||||
}
|
||||
|
||||
intc[active_cnt].base = of_iomap(np, 0);
|
||||
intc[active_cnt].domain = irq_domain_add_linear(node, 64,
|
||||
&vt8500_irq_domain_ops, &intc[active_cnt]);
|
||||
|
||||
if (!intc[active_cnt].base) {
|
||||
pr_err("%s: Unable to map IO memory\n", __func__);
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (!intc[active_cnt].domain) {
|
||||
pr_err("%s: Unable to add irq domain!\n", __func__);
|
||||
goto out;
|
||||
}
|
||||
|
||||
set_handle_irq(vt8500_handle_irq);
|
||||
|
||||
vt8500_init_irq_hw(intc[active_cnt].base);
|
||||
|
||||
pr_info("vt8500-irq: Added interrupt controller\n");
|
||||
|
||||
active_cnt++;
|
||||
|
||||
/* check if this is a slaved controller */
|
||||
if (of_irq_count(np) != 0) {
|
||||
/* check that we have the correct number of interrupts */
|
||||
if (of_irq_count(np) != 8) {
|
||||
pr_err("%s: Incorrect IRQ map for slaved controller\n",
|
||||
__func__);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
for (i = 0; i < 8; i++) {
|
||||
irq = irq_of_parse_and_map(np, i);
|
||||
enable_irq(irq);
|
||||
}
|
||||
|
||||
pr_info("vt8500-irq: Enabled slave->parent interrupts\n");
|
||||
}
|
||||
out:
|
||||
return 0;
|
||||
}
|
||||
|
||||
IRQCHIP_DECLARE(vt8500_irq, "via,vt8500-intc", vt8500_irq_init);
|
164
drivers/irqchip/irq-xtensa-mx.c
Normal file
164
drivers/irqchip/irq-xtensa-mx.c
Normal file
|
@ -0,0 +1,164 @@
|
|||
/*
|
||||
* Xtensa MX interrupt distributor
|
||||
*
|
||||
* Copyright (C) 2002 - 2013 Tensilica, Inc.
|
||||
*
|
||||
* This file is subject to the terms and conditions of the GNU General Public
|
||||
* License. See the file "COPYING" in the main directory of this archive
|
||||
* for more details.
|
||||
*/
|
||||
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/irqdomain.h>
|
||||
#include <linux/irq.h>
|
||||
#include <linux/of.h>
|
||||
|
||||
#include <asm/mxregs.h>
|
||||
|
||||
#include "irqchip.h"
|
||||
|
||||
#define HW_IRQ_IPI_COUNT 2
|
||||
#define HW_IRQ_MX_BASE 2
|
||||
#define HW_IRQ_EXTERN_BASE 3
|
||||
|
||||
static DEFINE_PER_CPU(unsigned int, cached_irq_mask);
|
||||
|
||||
static int xtensa_mx_irq_map(struct irq_domain *d, unsigned int irq,
|
||||
irq_hw_number_t hw)
|
||||
{
|
||||
if (hw < HW_IRQ_IPI_COUNT) {
|
||||
struct irq_chip *irq_chip = d->host_data;
|
||||
irq_set_chip_and_handler_name(irq, irq_chip,
|
||||
handle_percpu_irq, "ipi");
|
||||
irq_set_status_flags(irq, IRQ_LEVEL);
|
||||
return 0;
|
||||
}
|
||||
return xtensa_irq_map(d, irq, hw);
|
||||
}
|
||||
|
||||
/*
|
||||
* Device Tree IRQ specifier translation function which works with one or
|
||||
* two cell bindings. First cell value maps directly to the hwirq number.
|
||||
* Second cell if present specifies whether hwirq number is external (1) or
|
||||
* internal (0).
|
||||
*/
|
||||
static int xtensa_mx_irq_domain_xlate(struct irq_domain *d,
|
||||
struct device_node *ctrlr,
|
||||
const u32 *intspec, unsigned int intsize,
|
||||
unsigned long *out_hwirq, unsigned int *out_type)
|
||||
{
|
||||
return xtensa_irq_domain_xlate(intspec, intsize,
|
||||
intspec[0], intspec[0] + HW_IRQ_EXTERN_BASE,
|
||||
out_hwirq, out_type);
|
||||
}
|
||||
|
||||
static const struct irq_domain_ops xtensa_mx_irq_domain_ops = {
|
||||
.xlate = xtensa_mx_irq_domain_xlate,
|
||||
.map = xtensa_mx_irq_map,
|
||||
};
|
||||
|
||||
void secondary_init_irq(void)
|
||||
{
|
||||
__this_cpu_write(cached_irq_mask,
|
||||
XCHAL_INTTYPE_MASK_EXTERN_EDGE |
|
||||
XCHAL_INTTYPE_MASK_EXTERN_LEVEL);
|
||||
set_sr(XCHAL_INTTYPE_MASK_EXTERN_EDGE |
|
||||
XCHAL_INTTYPE_MASK_EXTERN_LEVEL, intenable);
|
||||
}
|
||||
|
||||
static void xtensa_mx_irq_mask(struct irq_data *d)
|
||||
{
|
||||
unsigned int mask = 1u << d->hwirq;
|
||||
|
||||
if (mask & (XCHAL_INTTYPE_MASK_EXTERN_EDGE |
|
||||
XCHAL_INTTYPE_MASK_EXTERN_LEVEL)) {
|
||||
set_er(1u << (xtensa_get_ext_irq_no(d->hwirq) -
|
||||
HW_IRQ_MX_BASE), MIENG);
|
||||
} else {
|
||||
mask = __this_cpu_read(cached_irq_mask) & ~mask;
|
||||
__this_cpu_write(cached_irq_mask, mask);
|
||||
set_sr(mask, intenable);
|
||||
}
|
||||
}
|
||||
|
||||
static void xtensa_mx_irq_unmask(struct irq_data *d)
|
||||
{
|
||||
unsigned int mask = 1u << d->hwirq;
|
||||
|
||||
if (mask & (XCHAL_INTTYPE_MASK_EXTERN_EDGE |
|
||||
XCHAL_INTTYPE_MASK_EXTERN_LEVEL)) {
|
||||
set_er(1u << (xtensa_get_ext_irq_no(d->hwirq) -
|
||||
HW_IRQ_MX_BASE), MIENGSET);
|
||||
} else {
|
||||
mask |= __this_cpu_read(cached_irq_mask);
|
||||
__this_cpu_write(cached_irq_mask, mask);
|
||||
set_sr(mask, intenable);
|
||||
}
|
||||
}
|
||||
|
||||
static void xtensa_mx_irq_enable(struct irq_data *d)
|
||||
{
|
||||
variant_irq_enable(d->hwirq);
|
||||
xtensa_mx_irq_unmask(d);
|
||||
}
|
||||
|
||||
static void xtensa_mx_irq_disable(struct irq_data *d)
|
||||
{
|
||||
xtensa_mx_irq_mask(d);
|
||||
variant_irq_disable(d->hwirq);
|
||||
}
|
||||
|
||||
static void xtensa_mx_irq_ack(struct irq_data *d)
|
||||
{
|
||||
set_sr(1 << d->hwirq, intclear);
|
||||
}
|
||||
|
||||
static int xtensa_mx_irq_retrigger(struct irq_data *d)
|
||||
{
|
||||
set_sr(1 << d->hwirq, intset);
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int xtensa_mx_irq_set_affinity(struct irq_data *d,
|
||||
const struct cpumask *dest, bool force)
|
||||
{
|
||||
unsigned mask = 1u << cpumask_any_and(dest, cpu_online_mask);
|
||||
|
||||
set_er(mask, MIROUT(d->hwirq - HW_IRQ_MX_BASE));
|
||||
return 0;
|
||||
|
||||
}
|
||||
|
||||
static struct irq_chip xtensa_mx_irq_chip = {
|
||||
.name = "xtensa-mx",
|
||||
.irq_enable = xtensa_mx_irq_enable,
|
||||
.irq_disable = xtensa_mx_irq_disable,
|
||||
.irq_mask = xtensa_mx_irq_mask,
|
||||
.irq_unmask = xtensa_mx_irq_unmask,
|
||||
.irq_ack = xtensa_mx_irq_ack,
|
||||
.irq_retrigger = xtensa_mx_irq_retrigger,
|
||||
.irq_set_affinity = xtensa_mx_irq_set_affinity,
|
||||
};
|
||||
|
||||
int __init xtensa_mx_init_legacy(struct device_node *interrupt_parent)
|
||||
{
|
||||
struct irq_domain *root_domain =
|
||||
irq_domain_add_legacy(NULL, NR_IRQS, 0, 0,
|
||||
&xtensa_mx_irq_domain_ops,
|
||||
&xtensa_mx_irq_chip);
|
||||
irq_set_default_host(root_domain);
|
||||
secondary_init_irq();
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __init xtensa_mx_init(struct device_node *np,
|
||||
struct device_node *interrupt_parent)
|
||||
{
|
||||
struct irq_domain *root_domain =
|
||||
irq_domain_add_linear(np, NR_IRQS, &xtensa_mx_irq_domain_ops,
|
||||
&xtensa_mx_irq_chip);
|
||||
irq_set_default_host(root_domain);
|
||||
secondary_init_irq();
|
||||
return 0;
|
||||
}
|
||||
IRQCHIP_DECLARE(xtensa_mx_irq_chip, "cdns,xtensa-mx", xtensa_mx_init);
|
108
drivers/irqchip/irq-xtensa-pic.c
Normal file
108
drivers/irqchip/irq-xtensa-pic.c
Normal file
|
@ -0,0 +1,108 @@
|
|||
/*
|
||||
* Xtensa built-in interrupt controller
|
||||
*
|
||||
* Copyright (C) 2002 - 2013 Tensilica, Inc.
|
||||
* Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
|
||||
*
|
||||
* This file is subject to the terms and conditions of the GNU General Public
|
||||
* License. See the file "COPYING" in the main directory of this archive
|
||||
* for more details.
|
||||
*
|
||||
* Chris Zankel <chris@zankel.net>
|
||||
* Kevin Chea
|
||||
*/
|
||||
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/irqdomain.h>
|
||||
#include <linux/irq.h>
|
||||
#include <linux/of.h>
|
||||
|
||||
#include "irqchip.h"
|
||||
|
||||
unsigned int cached_irq_mask;
|
||||
|
||||
/*
|
||||
* Device Tree IRQ specifier translation function which works with one or
|
||||
* two cell bindings. First cell value maps directly to the hwirq number.
|
||||
* Second cell if present specifies whether hwirq number is external (1) or
|
||||
* internal (0).
|
||||
*/
|
||||
static int xtensa_pic_irq_domain_xlate(struct irq_domain *d,
|
||||
struct device_node *ctrlr,
|
||||
const u32 *intspec, unsigned int intsize,
|
||||
unsigned long *out_hwirq, unsigned int *out_type)
|
||||
{
|
||||
return xtensa_irq_domain_xlate(intspec, intsize,
|
||||
intspec[0], intspec[0],
|
||||
out_hwirq, out_type);
|
||||
}
|
||||
|
||||
static const struct irq_domain_ops xtensa_irq_domain_ops = {
|
||||
.xlate = xtensa_pic_irq_domain_xlate,
|
||||
.map = xtensa_irq_map,
|
||||
};
|
||||
|
||||
static void xtensa_irq_mask(struct irq_data *d)
|
||||
{
|
||||
cached_irq_mask &= ~(1 << d->hwirq);
|
||||
set_sr(cached_irq_mask, intenable);
|
||||
}
|
||||
|
||||
static void xtensa_irq_unmask(struct irq_data *d)
|
||||
{
|
||||
cached_irq_mask |= 1 << d->hwirq;
|
||||
set_sr(cached_irq_mask, intenable);
|
||||
}
|
||||
|
||||
static void xtensa_irq_enable(struct irq_data *d)
|
||||
{
|
||||
variant_irq_enable(d->hwirq);
|
||||
xtensa_irq_unmask(d);
|
||||
}
|
||||
|
||||
static void xtensa_irq_disable(struct irq_data *d)
|
||||
{
|
||||
xtensa_irq_mask(d);
|
||||
variant_irq_disable(d->hwirq);
|
||||
}
|
||||
|
||||
static void xtensa_irq_ack(struct irq_data *d)
|
||||
{
|
||||
set_sr(1 << d->hwirq, intclear);
|
||||
}
|
||||
|
||||
static int xtensa_irq_retrigger(struct irq_data *d)
|
||||
{
|
||||
set_sr(1 << d->hwirq, intset);
|
||||
return 1;
|
||||
}
|
||||
|
||||
static struct irq_chip xtensa_irq_chip = {
|
||||
.name = "xtensa",
|
||||
.irq_enable = xtensa_irq_enable,
|
||||
.irq_disable = xtensa_irq_disable,
|
||||
.irq_mask = xtensa_irq_mask,
|
||||
.irq_unmask = xtensa_irq_unmask,
|
||||
.irq_ack = xtensa_irq_ack,
|
||||
.irq_retrigger = xtensa_irq_retrigger,
|
||||
};
|
||||
|
||||
int __init xtensa_pic_init_legacy(struct device_node *interrupt_parent)
|
||||
{
|
||||
struct irq_domain *root_domain =
|
||||
irq_domain_add_legacy(NULL, NR_IRQS, 0, 0,
|
||||
&xtensa_irq_domain_ops, &xtensa_irq_chip);
|
||||
irq_set_default_host(root_domain);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __init xtensa_pic_init(struct device_node *np,
|
||||
struct device_node *interrupt_parent)
|
||||
{
|
||||
struct irq_domain *root_domain =
|
||||
irq_domain_add_linear(np, NR_IRQS, &xtensa_irq_domain_ops,
|
||||
&xtensa_irq_chip);
|
||||
irq_set_default_host(root_domain);
|
||||
return 0;
|
||||
}
|
||||
IRQCHIP_DECLARE(xtensa_irq_chip, "cdns,xtensa-pic", xtensa_pic_init);
|
126
drivers/irqchip/irq-zevio.c
Normal file
126
drivers/irqchip/irq-zevio.c
Normal file
|
@ -0,0 +1,126 @@
|
|||
/*
|
||||
* linux/drivers/irqchip/irq-zevio.c
|
||||
*
|
||||
* Copyright (C) 2013 Daniel Tang <tangrs@tangrs.id.au>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/io.h>
|
||||
#include <linux/irq.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/of_address.h>
|
||||
#include <linux/of_irq.h>
|
||||
|
||||
#include <asm/mach/irq.h>
|
||||
#include <asm/exception.h>
|
||||
|
||||
#include "irqchip.h"
|
||||
|
||||
#define IO_STATUS 0x000
|
||||
#define IO_RAW_STATUS 0x004
|
||||
#define IO_ENABLE 0x008
|
||||
#define IO_DISABLE 0x00C
|
||||
#define IO_CURRENT 0x020
|
||||
#define IO_RESET 0x028
|
||||
#define IO_MAX_PRIOTY 0x02C
|
||||
|
||||
#define IO_IRQ_BASE 0x000
|
||||
#define IO_FIQ_BASE 0x100
|
||||
|
||||
#define IO_INVERT_SEL 0x200
|
||||
#define IO_STICKY_SEL 0x204
|
||||
#define IO_PRIORITY_SEL 0x300
|
||||
|
||||
#define MAX_INTRS 32
|
||||
#define FIQ_START MAX_INTRS
|
||||
|
||||
static struct irq_domain *zevio_irq_domain;
|
||||
static void __iomem *zevio_irq_io;
|
||||
|
||||
static void zevio_irq_ack(struct irq_data *irqd)
|
||||
{
|
||||
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(irqd);
|
||||
struct irq_chip_regs *regs =
|
||||
&container_of(irqd->chip, struct irq_chip_type, chip)->regs;
|
||||
|
||||
readl(gc->reg_base + regs->ack);
|
||||
}
|
||||
|
||||
static void __exception_irq_entry zevio_handle_irq(struct pt_regs *regs)
|
||||
{
|
||||
int irqnr;
|
||||
|
||||
while (readl(zevio_irq_io + IO_STATUS)) {
|
||||
irqnr = readl(zevio_irq_io + IO_CURRENT);
|
||||
handle_domain_irq(zevio_irq_domain, irqnr, regs);
|
||||
};
|
||||
}
|
||||
|
||||
static void __init zevio_init_irq_base(void __iomem *base)
|
||||
{
|
||||
/* Disable all interrupts */
|
||||
writel(~0, base + IO_DISABLE);
|
||||
|
||||
/* Accept interrupts of all priorities */
|
||||
writel(0xF, base + IO_MAX_PRIOTY);
|
||||
|
||||
/* Reset existing interrupts */
|
||||
readl(base + IO_RESET);
|
||||
}
|
||||
|
||||
static int __init zevio_of_init(struct device_node *node,
|
||||
struct device_node *parent)
|
||||
{
|
||||
unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
|
||||
struct irq_chip_generic *gc;
|
||||
int ret;
|
||||
|
||||
if (WARN_ON(zevio_irq_io || zevio_irq_domain))
|
||||
return -EBUSY;
|
||||
|
||||
zevio_irq_io = of_iomap(node, 0);
|
||||
BUG_ON(!zevio_irq_io);
|
||||
|
||||
/* Do not invert interrupt status bits */
|
||||
writel(~0, zevio_irq_io + IO_INVERT_SEL);
|
||||
|
||||
/* Disable sticky interrupts */
|
||||
writel(0, zevio_irq_io + IO_STICKY_SEL);
|
||||
|
||||
/* We don't use IRQ priorities. Set each IRQ to highest priority. */
|
||||
memset_io(zevio_irq_io + IO_PRIORITY_SEL, 0, MAX_INTRS * sizeof(u32));
|
||||
|
||||
/* Init IRQ and FIQ */
|
||||
zevio_init_irq_base(zevio_irq_io + IO_IRQ_BASE);
|
||||
zevio_init_irq_base(zevio_irq_io + IO_FIQ_BASE);
|
||||
|
||||
zevio_irq_domain = irq_domain_add_linear(node, MAX_INTRS,
|
||||
&irq_generic_chip_ops, NULL);
|
||||
BUG_ON(!zevio_irq_domain);
|
||||
|
||||
ret = irq_alloc_domain_generic_chips(zevio_irq_domain, MAX_INTRS, 1,
|
||||
"zevio_intc", handle_level_irq,
|
||||
clr, 0, IRQ_GC_INIT_MASK_CACHE);
|
||||
BUG_ON(ret);
|
||||
|
||||
gc = irq_get_domain_generic_chip(zevio_irq_domain, 0);
|
||||
gc->reg_base = zevio_irq_io;
|
||||
gc->chip_types[0].chip.irq_ack = zevio_irq_ack;
|
||||
gc->chip_types[0].chip.irq_mask = irq_gc_mask_disable_reg;
|
||||
gc->chip_types[0].chip.irq_unmask = irq_gc_unmask_enable_reg;
|
||||
gc->chip_types[0].regs.mask = IO_IRQ_BASE + IO_ENABLE;
|
||||
gc->chip_types[0].regs.enable = IO_IRQ_BASE + IO_ENABLE;
|
||||
gc->chip_types[0].regs.disable = IO_IRQ_BASE + IO_DISABLE;
|
||||
gc->chip_types[0].regs.ack = IO_IRQ_BASE + IO_RESET;
|
||||
|
||||
set_handle_irq(zevio_handle_irq);
|
||||
|
||||
pr_info("TI-NSPIRE classic IRQ controller\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
IRQCHIP_DECLARE(zevio_irq, "lsi,zevio-intc", zevio_of_init);
|
29
drivers/irqchip/irqchip.c
Normal file
29
drivers/irqchip/irqchip.c
Normal file
|
@ -0,0 +1,29 @@
|
|||
/*
|
||||
* Copyright (C) 2012 Thomas Petazzoni
|
||||
*
|
||||
* Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
|
||||
*
|
||||
* This file is licensed under the terms of the GNU General Public
|
||||
* License version 2. This program is licensed "as is" without any
|
||||
* warranty of any kind, whether express or implied.
|
||||
*/
|
||||
|
||||
#include <linux/init.h>
|
||||
#include <linux/of_irq.h>
|
||||
#include <linux/irqchip.h>
|
||||
|
||||
/*
|
||||
* This special of_device_id is the sentinel at the end of the
|
||||
* of_device_id[] array of all irqchips. It is automatically placed at
|
||||
* the end of the array by the linker, thanks to being part of a
|
||||
* special section.
|
||||
*/
|
||||
static const struct of_device_id
|
||||
irqchip_of_match_end __used __section(__irqchip_of_table_end);
|
||||
|
||||
extern struct of_device_id __irqchip_of_table[];
|
||||
|
||||
void __init irqchip_init(void)
|
||||
{
|
||||
of_irq_init(__irqchip_of_table);
|
||||
}
|
28
drivers/irqchip/irqchip.h
Normal file
28
drivers/irqchip/irqchip.h
Normal file
|
@ -0,0 +1,28 @@
|
|||
/*
|
||||
* Copyright (C) 2012 Thomas Petazzoni
|
||||
*
|
||||
* Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
|
||||
*
|
||||
* This file is licensed under the terms of the GNU General Public
|
||||
* License version 2. This program is licensed "as is" without any
|
||||
* warranty of any kind, whether express or implied.
|
||||
*/
|
||||
|
||||
#ifndef _IRQCHIP_H
|
||||
#define _IRQCHIP_H
|
||||
|
||||
#include <linux/of.h>
|
||||
|
||||
/*
|
||||
* This macro must be used by the different irqchip drivers to declare
|
||||
* the association between their DT compatible string and their
|
||||
* initialization function.
|
||||
*
|
||||
* @name: name that must be unique accross all IRQCHIP_DECLARE of the
|
||||
* same file.
|
||||
* @compstr: compatible string of the irqchip driver
|
||||
* @fn: initialization function
|
||||
*/
|
||||
#define IRQCHIP_DECLARE(name, compat, fn) OF_DECLARE_2(irqchip, name, compat, fn)
|
||||
|
||||
#endif
|
291
drivers/irqchip/spear-shirq.c
Normal file
291
drivers/irqchip/spear-shirq.c
Normal file
|
@ -0,0 +1,291 @@
|
|||
/*
|
||||
* SPEAr platform shared irq layer source file
|
||||
*
|
||||
* Copyright (C) 2009-2012 ST Microelectronics
|
||||
* Viresh Kumar <viresh.linux@gmail.com>
|
||||
*
|
||||
* Copyright (C) 2012 ST Microelectronics
|
||||
* Shiraz Hashim <shiraz.linux.kernel@gmail.com>
|
||||
*
|
||||
* This file is licensed under the terms of the GNU General Public
|
||||
* License version 2. This program is licensed "as is" without any
|
||||
* warranty of any kind, whether express or implied.
|
||||
*/
|
||||
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
||||
|
||||
#include <linux/err.h>
|
||||
#include <linux/export.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/irq.h>
|
||||
#include <linux/irqdomain.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/of_address.h>
|
||||
#include <linux/of_irq.h>
|
||||
#include <linux/spinlock.h>
|
||||
|
||||
#include "irqchip.h"
|
||||
|
||||
/*
|
||||
* struct spear_shirq: shared irq structure
|
||||
*
|
||||
* base: Base register address
|
||||
* status_reg: Status register offset for chained interrupt handler
|
||||
* mask_reg: Mask register offset for irq chip
|
||||
* mask: Mask to apply to the status register
|
||||
* virq_base: Base virtual interrupt number
|
||||
* nr_irqs: Number of interrupts handled by this block
|
||||
* offset: Bit offset of the first interrupt
|
||||
* irq_chip: Interrupt controller chip used for this instance,
|
||||
* if NULL group is disabled, but accounted
|
||||
*/
|
||||
struct spear_shirq {
|
||||
void __iomem *base;
|
||||
u32 status_reg;
|
||||
u32 mask_reg;
|
||||
u32 mask;
|
||||
u32 virq_base;
|
||||
u32 nr_irqs;
|
||||
u32 offset;
|
||||
struct irq_chip *irq_chip;
|
||||
};
|
||||
|
||||
/* spear300 shared irq registers offsets and masks */
|
||||
#define SPEAR300_INT_ENB_MASK_REG 0x54
|
||||
#define SPEAR300_INT_STS_MASK_REG 0x58
|
||||
|
||||
static DEFINE_RAW_SPINLOCK(shirq_lock);
|
||||
|
||||
static void shirq_irq_mask(struct irq_data *d)
|
||||
{
|
||||
struct spear_shirq *shirq = irq_data_get_irq_chip_data(d);
|
||||
u32 val, shift = d->irq - shirq->virq_base + shirq->offset;
|
||||
u32 __iomem *reg = shirq->base + shirq->mask_reg;
|
||||
|
||||
raw_spin_lock(&shirq_lock);
|
||||
val = readl(reg) & ~(0x1 << shift);
|
||||
writel(val, reg);
|
||||
raw_spin_unlock(&shirq_lock);
|
||||
}
|
||||
|
||||
static void shirq_irq_unmask(struct irq_data *d)
|
||||
{
|
||||
struct spear_shirq *shirq = irq_data_get_irq_chip_data(d);
|
||||
u32 val, shift = d->irq - shirq->virq_base + shirq->offset;
|
||||
u32 __iomem *reg = shirq->base + shirq->mask_reg;
|
||||
|
||||
raw_spin_lock(&shirq_lock);
|
||||
val = readl(reg) | (0x1 << shift);
|
||||
writel(val, reg);
|
||||
raw_spin_unlock(&shirq_lock);
|
||||
}
|
||||
|
||||
static struct irq_chip shirq_chip = {
|
||||
.name = "spear-shirq",
|
||||
.irq_mask = shirq_irq_mask,
|
||||
.irq_unmask = shirq_irq_unmask,
|
||||
};
|
||||
|
||||
static struct spear_shirq spear300_shirq_ras1 = {
|
||||
.offset = 0,
|
||||
.nr_irqs = 9,
|
||||
.mask = ((0x1 << 9) - 1) << 0,
|
||||
.irq_chip = &shirq_chip,
|
||||
.status_reg = SPEAR300_INT_STS_MASK_REG,
|
||||
.mask_reg = SPEAR300_INT_ENB_MASK_REG,
|
||||
};
|
||||
|
||||
static struct spear_shirq *spear300_shirq_blocks[] = {
|
||||
&spear300_shirq_ras1,
|
||||
};
|
||||
|
||||
/* spear310 shared irq registers offsets and masks */
|
||||
#define SPEAR310_INT_STS_MASK_REG 0x04
|
||||
|
||||
static struct spear_shirq spear310_shirq_ras1 = {
|
||||
.offset = 0,
|
||||
.nr_irqs = 8,
|
||||
.mask = ((0x1 << 8) - 1) << 0,
|
||||
.irq_chip = &dummy_irq_chip,
|
||||
.status_reg = SPEAR310_INT_STS_MASK_REG,
|
||||
};
|
||||
|
||||
static struct spear_shirq spear310_shirq_ras2 = {
|
||||
.offset = 8,
|
||||
.nr_irqs = 5,
|
||||
.mask = ((0x1 << 5) - 1) << 8,
|
||||
.irq_chip = &dummy_irq_chip,
|
||||
.status_reg = SPEAR310_INT_STS_MASK_REG,
|
||||
};
|
||||
|
||||
static struct spear_shirq spear310_shirq_ras3 = {
|
||||
.offset = 13,
|
||||
.nr_irqs = 1,
|
||||
.mask = ((0x1 << 1) - 1) << 13,
|
||||
.irq_chip = &dummy_irq_chip,
|
||||
.status_reg = SPEAR310_INT_STS_MASK_REG,
|
||||
};
|
||||
|
||||
static struct spear_shirq spear310_shirq_intrcomm_ras = {
|
||||
.offset = 14,
|
||||
.nr_irqs = 3,
|
||||
.mask = ((0x1 << 3) - 1) << 14,
|
||||
.irq_chip = &dummy_irq_chip,
|
||||
.status_reg = SPEAR310_INT_STS_MASK_REG,
|
||||
};
|
||||
|
||||
static struct spear_shirq *spear310_shirq_blocks[] = {
|
||||
&spear310_shirq_ras1,
|
||||
&spear310_shirq_ras2,
|
||||
&spear310_shirq_ras3,
|
||||
&spear310_shirq_intrcomm_ras,
|
||||
};
|
||||
|
||||
/* spear320 shared irq registers offsets and masks */
|
||||
#define SPEAR320_INT_STS_MASK_REG 0x04
|
||||
#define SPEAR320_INT_CLR_MASK_REG 0x04
|
||||
#define SPEAR320_INT_ENB_MASK_REG 0x08
|
||||
|
||||
static struct spear_shirq spear320_shirq_ras3 = {
|
||||
.offset = 0,
|
||||
.nr_irqs = 7,
|
||||
.mask = ((0x1 << 7) - 1) << 0,
|
||||
};
|
||||
|
||||
static struct spear_shirq spear320_shirq_ras1 = {
|
||||
.offset = 7,
|
||||
.nr_irqs = 3,
|
||||
.mask = ((0x1 << 3) - 1) << 7,
|
||||
.irq_chip = &dummy_irq_chip,
|
||||
.status_reg = SPEAR320_INT_STS_MASK_REG,
|
||||
};
|
||||
|
||||
static struct spear_shirq spear320_shirq_ras2 = {
|
||||
.offset = 10,
|
||||
.nr_irqs = 1,
|
||||
.mask = ((0x1 << 1) - 1) << 10,
|
||||
.irq_chip = &dummy_irq_chip,
|
||||
.status_reg = SPEAR320_INT_STS_MASK_REG,
|
||||
};
|
||||
|
||||
static struct spear_shirq spear320_shirq_intrcomm_ras = {
|
||||
.offset = 11,
|
||||
.nr_irqs = 11,
|
||||
.mask = ((0x1 << 11) - 1) << 11,
|
||||
.irq_chip = &dummy_irq_chip,
|
||||
.status_reg = SPEAR320_INT_STS_MASK_REG,
|
||||
};
|
||||
|
||||
static struct spear_shirq *spear320_shirq_blocks[] = {
|
||||
&spear320_shirq_ras3,
|
||||
&spear320_shirq_ras1,
|
||||
&spear320_shirq_ras2,
|
||||
&spear320_shirq_intrcomm_ras,
|
||||
};
|
||||
|
||||
static void shirq_handler(unsigned irq, struct irq_desc *desc)
|
||||
{
|
||||
struct spear_shirq *shirq = irq_get_handler_data(irq);
|
||||
u32 pend;
|
||||
|
||||
pend = readl(shirq->base + shirq->status_reg) & shirq->mask;
|
||||
pend >>= shirq->offset;
|
||||
|
||||
while (pend) {
|
||||
int irq = __ffs(pend);
|
||||
|
||||
pend &= ~(0x1 << irq);
|
||||
generic_handle_irq(shirq->virq_base + irq);
|
||||
}
|
||||
}
|
||||
|
||||
static void __init spear_shirq_register(struct spear_shirq *shirq,
|
||||
int parent_irq)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (!shirq->irq_chip)
|
||||
return;
|
||||
|
||||
irq_set_chained_handler(parent_irq, shirq_handler);
|
||||
irq_set_handler_data(parent_irq, shirq);
|
||||
|
||||
for (i = 0; i < shirq->nr_irqs; i++) {
|
||||
irq_set_chip_and_handler(shirq->virq_base + i,
|
||||
shirq->irq_chip, handle_simple_irq);
|
||||
set_irq_flags(shirq->virq_base + i, IRQF_VALID);
|
||||
irq_set_chip_data(shirq->virq_base + i, shirq);
|
||||
}
|
||||
}
|
||||
|
||||
static int __init shirq_init(struct spear_shirq **shirq_blocks, int block_nr,
|
||||
struct device_node *np)
|
||||
{
|
||||
int i, parent_irq, virq_base, hwirq = 0, nr_irqs = 0;
|
||||
struct irq_domain *shirq_domain;
|
||||
void __iomem *base;
|
||||
|
||||
base = of_iomap(np, 0);
|
||||
if (!base) {
|
||||
pr_err("%s: failed to map shirq registers\n", __func__);
|
||||
return -ENXIO;
|
||||
}
|
||||
|
||||
for (i = 0; i < block_nr; i++)
|
||||
nr_irqs += shirq_blocks[i]->nr_irqs;
|
||||
|
||||
virq_base = irq_alloc_descs(-1, 0, nr_irqs, 0);
|
||||
if (IS_ERR_VALUE(virq_base)) {
|
||||
pr_err("%s: irq desc alloc failed\n", __func__);
|
||||
goto err_unmap;
|
||||
}
|
||||
|
||||
shirq_domain = irq_domain_add_legacy(np, nr_irqs, virq_base, 0,
|
||||
&irq_domain_simple_ops, NULL);
|
||||
if (WARN_ON(!shirq_domain)) {
|
||||
pr_warn("%s: irq domain init failed\n", __func__);
|
||||
goto err_free_desc;
|
||||
}
|
||||
|
||||
for (i = 0; i < block_nr; i++) {
|
||||
shirq_blocks[i]->base = base;
|
||||
shirq_blocks[i]->virq_base = irq_find_mapping(shirq_domain,
|
||||
hwirq);
|
||||
|
||||
parent_irq = irq_of_parse_and_map(np, i);
|
||||
spear_shirq_register(shirq_blocks[i], parent_irq);
|
||||
hwirq += shirq_blocks[i]->nr_irqs;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
err_free_desc:
|
||||
irq_free_descs(virq_base, nr_irqs);
|
||||
err_unmap:
|
||||
iounmap(base);
|
||||
return -ENXIO;
|
||||
}
|
||||
|
||||
static int __init spear300_shirq_of_init(struct device_node *np,
|
||||
struct device_node *parent)
|
||||
{
|
||||
return shirq_init(spear300_shirq_blocks,
|
||||
ARRAY_SIZE(spear300_shirq_blocks), np);
|
||||
}
|
||||
IRQCHIP_DECLARE(spear300_shirq, "st,spear300-shirq", spear300_shirq_of_init);
|
||||
|
||||
static int __init spear310_shirq_of_init(struct device_node *np,
|
||||
struct device_node *parent)
|
||||
{
|
||||
return shirq_init(spear310_shirq_blocks,
|
||||
ARRAY_SIZE(spear310_shirq_blocks), np);
|
||||
}
|
||||
IRQCHIP_DECLARE(spear310_shirq, "st,spear310-shirq", spear310_shirq_of_init);
|
||||
|
||||
static int __init spear320_shirq_of_init(struct device_node *np,
|
||||
struct device_node *parent)
|
||||
{
|
||||
return shirq_init(spear320_shirq_blocks,
|
||||
ARRAY_SIZE(spear320_shirq_blocks), np);
|
||||
}
|
||||
IRQCHIP_DECLARE(spear320_shirq, "st,spear320-shirq", spear320_shirq_of_init);
|
Loading…
Add table
Add a link
Reference in a new issue