Fixed MTP to work with TWRP

This commit is contained in:
awab228 2018-06-19 23:16:04 +02:00
commit f6dfaef42e
50820 changed files with 20846062 additions and 0 deletions

98
drivers/pci/host/Kconfig Normal file
View file

@ -0,0 +1,98 @@
menu "PCI host controller drivers"
depends on PCI
config PCI_DRA7XX
bool "TI DRA7xx PCIe controller"
select PCIE_DW
depends on OF && HAS_IOMEM && TI_PIPE3
help
Enables support for the PCIe controller in the DRA7xx SoC. There
are two instances of PCIe controller in DRA7xx. This controller can
act both as EP and RC. This reuses the Designware core.
config PCI_MVEBU
bool "Marvell EBU PCIe controller"
depends on ARCH_MVEBU || ARCH_DOVE
depends on OF
config PCIE_DW
bool
config PCI_EXYNOS
bool "Samsung Exynos PCIe controller"
depends on SOC_EXYNOS8890
select PCIEPORTBUS
select PCIE_DW
config PCI_EXYNOS_TEST
bool "EXYNOS PCIe driver test"
depends on PCI_EXYNOS
config PCI_IMX6
bool "Freescale i.MX6 PCIe controller"
depends on SOC_IMX6Q
select PCIEPORTBUS
select PCIE_DW
config PCI_TEGRA
bool "NVIDIA Tegra PCIe controller"
depends on ARCH_TEGRA
config PCI_RCAR_GEN2
bool "Renesas R-Car Gen2 Internal PCI controller"
depends on ARCH_SHMOBILE || (ARM && COMPILE_TEST)
help
Say Y here if you want internal PCI support on R-Car Gen2 SoC.
There are 3 internal PCI controllers available with a single
built-in EHCI/OHCI host controller present on each one.
config PCI_RCAR_GEN2_PCIE
bool "Renesas R-Car PCIe controller"
depends on ARCH_SHMOBILE || (ARM && COMPILE_TEST)
help
Say Y here if you want PCIe controller support on R-Car Gen2 SoCs.
config PCI_HOST_GENERIC
bool "Generic PCI host controller"
depends on ARM && OF
help
Say Y here if you want to support a simple generic PCI host
controller, such as the one emulated by kvmtool.
config PCIE_SPEAR13XX
bool "STMicroelectronics SPEAr PCIe controller"
depends on ARCH_SPEAR13XX
select PCIEPORTBUS
select PCIE_DW
help
Say Y here if you want PCIe support on SPEAr13XX SoCs.
config PCI_KEYSTONE
bool "TI Keystone PCIe controller"
depends on ARCH_KEYSTONE
select PCIE_DW
select PCIEPORTBUS
help
Say Y here if you want to enable PCI controller support on Keystone
SoCs. The PCI controller on Keystone is based on Designware hardware
and therefore the driver re-uses the Designware core functions to
implement the driver.
config PCIE_XILINX
bool "Xilinx AXI PCIe host bridge support"
depends on ARCH_ZYNQ
help
Say 'Y' here if you want kernel to support the Xilinx AXI PCIe
Host Bridge driver.
config PCI_XGENE
bool "X-Gene PCIe controller"
depends on ARCH_XGENE
depends on OF
select PCIEPORTBUS
help
Say Y here if you want internal PCI support on APM X-Gene SoC.
There are 5 internal PCIe ports available. Each port is GEN3 capable
and have varied lanes from x1 to x8.
endmenu

13
drivers/pci/host/Makefile Normal file
View file

@ -0,0 +1,13 @@
obj-$(CONFIG_PCIE_DW) += pcie-designware.o
obj-$(CONFIG_PCI_DRA7XX) += pci-dra7xx.o
obj-$(CONFIG_PCI_EXYNOS) += pci-exynos.o
obj-$(CONFIG_PCI_IMX6) += pci-imx6.o
obj-$(CONFIG_PCI_MVEBU) += pci-mvebu.o
obj-$(CONFIG_PCI_TEGRA) += pci-tegra.o
obj-$(CONFIG_PCI_RCAR_GEN2) += pci-rcar-gen2.o
obj-$(CONFIG_PCI_RCAR_GEN2_PCIE) += pcie-rcar.o
obj-$(CONFIG_PCI_HOST_GENERIC) += pci-host-generic.o
obj-$(CONFIG_PCIE_SPEAR13XX) += pcie-spear13xx.o
obj-$(CONFIG_PCI_KEYSTONE) += pci-keystone-dw.o pci-keystone.o
obj-$(CONFIG_PCIE_XILINX) += pcie-xilinx.o
obj-$(CONFIG_PCI_XGENE) += pci-xgene.o

View file

@ -0,0 +1,458 @@
/*
* pcie-dra7xx - PCIe controller driver for TI DRA7xx SoCs
*
* Copyright (C) 2013-2014 Texas Instruments Incorporated - http://www.ti.com
*
* Authors: Kishon Vijay Abraham I <kishon@ti.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/irqdomain.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/resource.h>
#include <linux/types.h>
#include "pcie-designware.h"
/* PCIe controller wrapper DRA7XX configuration registers */
#define PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN 0x0024
#define PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MAIN 0x0028
#define ERR_SYS BIT(0)
#define ERR_FATAL BIT(1)
#define ERR_NONFATAL BIT(2)
#define ERR_COR BIT(3)
#define ERR_AXI BIT(4)
#define ERR_ECRC BIT(5)
#define PME_TURN_OFF BIT(8)
#define PME_TO_ACK BIT(9)
#define PM_PME BIT(10)
#define LINK_REQ_RST BIT(11)
#define LINK_UP_EVT BIT(12)
#define CFG_BME_EVT BIT(13)
#define CFG_MSE_EVT BIT(14)
#define INTERRUPTS (ERR_SYS | ERR_FATAL | ERR_NONFATAL | ERR_COR | ERR_AXI | \
ERR_ECRC | PME_TURN_OFF | PME_TO_ACK | PM_PME | \
LINK_REQ_RST | LINK_UP_EVT | CFG_BME_EVT | CFG_MSE_EVT)
#define PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI 0x0034
#define PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MSI 0x0038
#define INTA BIT(0)
#define INTB BIT(1)
#define INTC BIT(2)
#define INTD BIT(3)
#define MSI BIT(4)
#define LEG_EP_INTERRUPTS (INTA | INTB | INTC | INTD)
#define PCIECTRL_DRA7XX_CONF_DEVICE_CMD 0x0104
#define LTSSM_EN 0x1
#define PCIECTRL_DRA7XX_CONF_PHY_CS 0x010C
#define LINK_UP BIT(16)
struct dra7xx_pcie {
void __iomem *base;
struct phy **phy;
int phy_count;
struct device *dev;
struct pcie_port pp;
};
#define to_dra7xx_pcie(x) container_of((x), struct dra7xx_pcie, pp)
static inline u32 dra7xx_pcie_readl(struct dra7xx_pcie *pcie, u32 offset)
{
return readl(pcie->base + offset);
}
static inline void dra7xx_pcie_writel(struct dra7xx_pcie *pcie, u32 offset,
u32 value)
{
writel(value, pcie->base + offset);
}
static int dra7xx_pcie_link_up(struct pcie_port *pp)
{
struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pp);
u32 reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_PHY_CS);
return !!(reg & LINK_UP);
}
static int dra7xx_pcie_establish_link(struct pcie_port *pp)
{
u32 reg;
unsigned int retries = 1000;
struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pp);
if (dw_pcie_link_up(pp)) {
dev_err(pp->dev, "link is already up\n");
return 0;
}
reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD);
reg |= LTSSM_EN;
dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD, reg);
while (retries--) {
reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_PHY_CS);
if (reg & LINK_UP)
break;
usleep_range(10, 20);
}
if (retries == 0) {
dev_err(pp->dev, "link is not up\n");
return -ETIMEDOUT;
}
return 0;
}
static void dra7xx_pcie_enable_interrupts(struct pcie_port *pp)
{
struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pp);
dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN,
~INTERRUPTS);
dra7xx_pcie_writel(dra7xx,
PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MAIN, INTERRUPTS);
dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI,
~LEG_EP_INTERRUPTS & ~MSI);
if (IS_ENABLED(CONFIG_PCI_MSI))
dra7xx_pcie_writel(dra7xx,
PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MSI, MSI);
else
dra7xx_pcie_writel(dra7xx,
PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MSI,
LEG_EP_INTERRUPTS);
}
static void dra7xx_pcie_host_init(struct pcie_port *pp)
{
dw_pcie_setup_rc(pp);
dra7xx_pcie_establish_link(pp);
if (IS_ENABLED(CONFIG_PCI_MSI))
dw_pcie_msi_init(pp);
dra7xx_pcie_enable_interrupts(pp);
}
static struct pcie_host_ops dra7xx_pcie_host_ops = {
.link_up = dra7xx_pcie_link_up,
.host_init = dra7xx_pcie_host_init,
};
static int dra7xx_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
irq_hw_number_t hwirq)
{
irq_set_chip_and_handler(irq, &dummy_irq_chip, handle_simple_irq);
irq_set_chip_data(irq, domain->host_data);
set_irq_flags(irq, IRQF_VALID);
return 0;
}
static const struct irq_domain_ops intx_domain_ops = {
.map = dra7xx_pcie_intx_map,
};
static int dra7xx_pcie_init_irq_domain(struct pcie_port *pp)
{
struct device *dev = pp->dev;
struct device_node *node = dev->of_node;
struct device_node *pcie_intc_node = of_get_next_child(node, NULL);
if (!pcie_intc_node) {
dev_err(dev, "No PCIe Intc node found\n");
return PTR_ERR(pcie_intc_node);
}
pp->irq_domain = irq_domain_add_linear(pcie_intc_node, 4,
&intx_domain_ops, pp);
if (!pp->irq_domain) {
dev_err(dev, "Failed to get a INTx IRQ domain\n");
return PTR_ERR(pp->irq_domain);
}
return 0;
}
static irqreturn_t dra7xx_pcie_msi_irq_handler(int irq, void *arg)
{
struct pcie_port *pp = arg;
struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pp);
u32 reg;
reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI);
switch (reg) {
case MSI:
dw_handle_msi_irq(pp);
break;
case INTA:
case INTB:
case INTC:
case INTD:
generic_handle_irq(irq_find_mapping(pp->irq_domain, ffs(reg)));
break;
}
dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI, reg);
return IRQ_HANDLED;
}
static irqreturn_t dra7xx_pcie_irq_handler(int irq, void *arg)
{
struct dra7xx_pcie *dra7xx = arg;
u32 reg;
reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN);
if (reg & ERR_SYS)
dev_dbg(dra7xx->dev, "System Error\n");
if (reg & ERR_FATAL)
dev_dbg(dra7xx->dev, "Fatal Error\n");
if (reg & ERR_NONFATAL)
dev_dbg(dra7xx->dev, "Non Fatal Error\n");
if (reg & ERR_COR)
dev_dbg(dra7xx->dev, "Correctable Error\n");
if (reg & ERR_AXI)
dev_dbg(dra7xx->dev, "AXI tag lookup fatal Error\n");
if (reg & ERR_ECRC)
dev_dbg(dra7xx->dev, "ECRC Error\n");
if (reg & PME_TURN_OFF)
dev_dbg(dra7xx->dev,
"Power Management Event Turn-Off message received\n");
if (reg & PME_TO_ACK)
dev_dbg(dra7xx->dev,
"Power Management Turn-Off Ack message received\n");
if (reg & PM_PME)
dev_dbg(dra7xx->dev,
"PM Power Management Event message received\n");
if (reg & LINK_REQ_RST)
dev_dbg(dra7xx->dev, "Link Request Reset\n");
if (reg & LINK_UP_EVT)
dev_dbg(dra7xx->dev, "Link-up state change\n");
if (reg & CFG_BME_EVT)
dev_dbg(dra7xx->dev, "CFG 'Bus Master Enable' change\n");
if (reg & CFG_MSE_EVT)
dev_dbg(dra7xx->dev, "CFG 'Memory Space Enable' change\n");
dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN, reg);
return IRQ_HANDLED;
}
static int add_pcie_port(struct dra7xx_pcie *dra7xx,
struct platform_device *pdev)
{
int ret;
struct pcie_port *pp;
struct resource *res;
struct device *dev = &pdev->dev;
pp = &dra7xx->pp;
pp->dev = dev;
pp->ops = &dra7xx_pcie_host_ops;
pp->irq = platform_get_irq(pdev, 1);
if (pp->irq < 0) {
dev_err(dev, "missing IRQ resource\n");
return -EINVAL;
}
ret = devm_request_irq(&pdev->dev, pp->irq,
dra7xx_pcie_msi_irq_handler, IRQF_SHARED,
"dra7-pcie-msi", pp);
if (ret) {
dev_err(&pdev->dev, "failed to request irq\n");
return ret;
}
if (!IS_ENABLED(CONFIG_PCI_MSI)) {
ret = dra7xx_pcie_init_irq_domain(pp);
if (ret < 0)
return ret;
}
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rc_dbics");
pp->dbi_base = devm_ioremap(dev, res->start, resource_size(res));
if (!pp->dbi_base)
return -ENOMEM;
ret = dw_pcie_host_init(pp);
if (ret) {
dev_err(dra7xx->dev, "failed to initialize host\n");
return ret;
}
return 0;
}
static int __init dra7xx_pcie_probe(struct platform_device *pdev)
{
u32 reg;
int ret;
int irq;
int i;
int phy_count;
struct phy **phy;
void __iomem *base;
struct resource *res;
struct dra7xx_pcie *dra7xx;
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
char name[10];
dra7xx = devm_kzalloc(dev, sizeof(*dra7xx), GFP_KERNEL);
if (!dra7xx)
return -ENOMEM;
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
dev_err(dev, "missing IRQ resource\n");
return -EINVAL;
}
ret = devm_request_irq(dev, irq, dra7xx_pcie_irq_handler,
IRQF_SHARED, "dra7xx-pcie-main", dra7xx);
if (ret) {
dev_err(dev, "failed to request irq\n");
return ret;
}
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ti_conf");
base = devm_ioremap_nocache(dev, res->start, resource_size(res));
if (!base)
return -ENOMEM;
phy_count = of_property_count_strings(np, "phy-names");
if (phy_count < 0) {
dev_err(dev, "unable to find the strings\n");
return phy_count;
}
phy = devm_kzalloc(dev, sizeof(*phy) * phy_count, GFP_KERNEL);
if (!phy)
return -ENOMEM;
for (i = 0; i < phy_count; i++) {
snprintf(name, sizeof(name), "pcie-phy%d", i);
phy[i] = devm_phy_get(dev, name);
if (IS_ERR(phy[i]))
return PTR_ERR(phy[i]);
ret = phy_init(phy[i]);
if (ret < 0)
goto err_phy;
ret = phy_power_on(phy[i]);
if (ret < 0) {
phy_exit(phy[i]);
goto err_phy;
}
}
dra7xx->base = base;
dra7xx->phy = phy;
dra7xx->dev = dev;
dra7xx->phy_count = phy_count;
pm_runtime_enable(dev);
ret = pm_runtime_get_sync(dev);
if (IS_ERR_VALUE(ret)) {
dev_err(dev, "pm_runtime_get_sync failed\n");
goto err_phy;
}
reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD);
reg &= ~LTSSM_EN;
dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD, reg);
platform_set_drvdata(pdev, dra7xx);
ret = add_pcie_port(dra7xx, pdev);
if (ret < 0)
goto err_add_port;
return 0;
err_add_port:
pm_runtime_put(dev);
pm_runtime_disable(dev);
err_phy:
while (--i >= 0) {
phy_power_off(phy[i]);
phy_exit(phy[i]);
}
return ret;
}
static int __exit dra7xx_pcie_remove(struct platform_device *pdev)
{
struct dra7xx_pcie *dra7xx = platform_get_drvdata(pdev);
struct pcie_port *pp = &dra7xx->pp;
struct device *dev = &pdev->dev;
int count = dra7xx->phy_count;
if (pp->irq_domain)
irq_domain_remove(pp->irq_domain);
pm_runtime_put(dev);
pm_runtime_disable(dev);
while (count--) {
phy_power_off(dra7xx->phy[count]);
phy_exit(dra7xx->phy[count]);
}
return 0;
}
static const struct of_device_id of_dra7xx_pcie_match[] = {
{ .compatible = "ti,dra7-pcie", },
{},
};
MODULE_DEVICE_TABLE(of, of_dra7xx_pcie_match);
static struct platform_driver dra7xx_pcie_driver = {
.remove = __exit_p(dra7xx_pcie_remove),
.driver = {
.name = "dra7-pcie",
.owner = THIS_MODULE,
.of_match_table = of_dra7xx_pcie_match,
},
};
module_platform_driver_probe(dra7xx_pcie_driver, dra7xx_pcie_probe);
MODULE_AUTHOR("Kishon Vijay Abraham I <kishon@ti.com>");
MODULE_DESCRIPTION("TI PCIe controller driver");
MODULE_LICENSE("GPL v2");

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,139 @@
/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef __PCIE_EXYNOS_H
#define __PCIE_EXYNOS_H
#define MAX_TIMEOUT 2000
#define ID_MASK 0xffff
#define MAX_RC_NUM 2
#if defined(CONFIG_SOC_EXYNOS8890)
#define PCI_DEVICE_ID_EXYNOS 0xa544
#define GPIO_DEBUG_SFR 0x15601068
#else
#define PCI_DEVICE_ID_EXYNOS 0xecec
#define GPIO_DEBUG_SFR 0x0
#endif
#define to_exynos_pcie(x) container_of(x, struct exynos_pcie, pp)
#define PCIE_BUS_PRIV_DATA(pdev) \
((struct pcie_port *)pdev->bus->sysdata)
struct exynos_pcie_clks {
struct clk *pcie_clks[10];
struct clk *phy_clks[3];
};
enum exynos_pcie_state {
STATE_LINK_DOWN = 0,
STATE_LINK_UP_TRY,
STATE_LINK_DOWN_TRY,
STATE_LINK_UP,
};
struct exynos_pcie {
void __iomem *elbi_base;
void __iomem *phy_base;
void __iomem *block_base;
void __iomem *rc_dbi_base;
void __iomem *phy_pcs_base;
struct regmap *pmureg;
int perst_gpio;
int ch_num;
int pcie_clk_num;
int phy_clk_num;
enum exynos_pcie_state state;
int probe_ok;
int l1ss_enable;
int linkdown_cnt;
int idle_ip_index;
bool use_msi;
bool pcie_changed;
struct workqueue_struct *pcie_wq;
struct exynos_pcie_clks clks;
struct pcie_port pp;
struct pci_dev *pci_dev;
struct pci_saved_state *pci_saved_configs;
struct notifier_block lpa_nb;
struct delayed_work work;
struct exynos_pcie_register_event *event_reg;
#ifdef CONFIG_PCI_EXYNOS_TEST
int wlan_gpio;
int bt_gpio;
#endif
#ifdef CONFIG_PM_DEVFREQ
unsigned int int_min_lock;
#endif
};
/* PCIe ELBI registers */
#define PCIE_IRQ_PULSE 0x000
#define IRQ_INTA_ASSERT (0x1 << 0)
#define IRQ_INTB_ASSERT (0x1 << 2)
#define IRQ_INTC_ASSERT (0x1 << 4)
#define IRQ_INTD_ASSERT (0x1 << 6)
#define IRQ_RADM_PM_TO_ACK (0x1 << 18)
#define IRQ_L1_EXIT (0x1 << 24)
#define PCIE_IRQ_LEVEL 0x004
#define IRQ_MSI_CTRL (0x1 << 1)
#define PCIE_IRQ_SPECIAL 0x008
#define PCIE_IRQ_EN_PULSE 0x00c
#define PCIE_IRQ_EN_LEVEL 0x010
#define IRQ_MSI_ENABLE (0x1 << 1)
#define IRQ_LINK_DOWN (0x1 << 30)
#define IRQ_LINKDOWN_ENABLE (0x1 << 30)
#define PCIE_IRQ_EN_SPECIAL 0x014
#define PCIE_SW_WAKE 0x018
#define PCIE_IRQ_LEVEL_FOR_READ 0x020
#define L1_2_IDLE_STATE (0x1 << 23)
#define PCIE_APP_LTSSM_ENABLE 0x02c
#define PCIE_L1_BUG_FIX_ENABLE 0x038
#define PCIE_APP_REQ_EXIT_L1 0x040
#define PCIE_CXPL_DEBUG_INFO_H 0x070
#define PCIE_ELBI_RDLH_LINKUP 0x074
#define PCIE_ELBI_LTSSM_DISABLE 0x0
#define PCIE_ELBI_LTSSM_ENABLE 0x1
#define PCIE_PM_DSTATE 0x88
#define PCIE_D0_UNINIT_STATE 0x4
#define PCIE_APP_REQ_EXIT_L1_MODE 0xF4
#define APP_REQ_EXIT_L1_MODE 0x1
#define L1_REQ_NAK_CONTROL (0x3 << 4)
#define PCIE_HISTORY_REG(x) (0x138 + ((x) * 0x4))
#define LTSSM_STATE(x) (((x) >> 16) & 0x3f)
#define PM_DSTATE(x) (((x) >> 8) & 0x7)
#define L1SUB_STATE(x) (((x) >> 0) & 0x7)
#define PCIE_LINKDOWN_RST_CTRL_SEL 0x1B8
#define PCIE_LINKDOWN_RST_MANUAL (0x1 << 1)
#define PCIE_LINKDOWN_RST_FSM (0x1 << 0)
#define PCIE_SOFT_AUXCLK_SEL_CTRL 0x1C4
#define CORE_CLK_GATING (0x1 << 0)
#define PCIE_SOFT_CORE_RESET 0x1D0
#define PCIE_STATE_HISTORY_CHECK 0x274
#define HISTORY_BUFFER_ENABLE (0x1 << 0)
#define HISTORY_BUFFER_CLEAR (0x1 << 1)
#define PCIE_QCH_SEL 0x2C8
#define CLOCK_GATING_IN_L12 0x1
#define CLOCK_NOT_GATING 0x3
#define CLOCK_GATING_MASK 0x3
#define PCIE_DMA_MONITOR1 0x2CC
#define PCIE_DMA_MONITOR2 0x2D0
#define PCIE_DMA_MONITOR3 0x2D4
#define FSYS1_MON_SEL_MASK 0xf
#define PCIE_MON_SEL_MASK 0xff
/* PCIe PMU registers */
#define PCIE_PHY_CONTROL 0x071C
#define PCIE_PHY_CONTROL_MASK 0x1
#endif

View file

@ -0,0 +1,77 @@
/*
* PCIe clock control driver for Samsung EXYNOS7420
*
* Copyright (C) 2014 Samsung Electronics Co., Ltd.
* http://www.samsung.com
*
* Author: Kyoungil Kim <ki0351.kim@samsung.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
static int exynos_pcie_clock_get(struct pcie_port *pp)
{
struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
struct exynos_pcie_clks *clks = &exynos_pcie->clks;
int i;
if (exynos_pcie->ch_num == 0) {
clks->pcie_clks[0] = devm_clk_get(pp->dev, "gate_pciewifi0");
clks->phy_clks[0] = devm_clk_get(pp->dev, "wifi0_dig_refclk");
clks->phy_clks[1] = devm_clk_get(pp->dev, "pcie_wifi0_tx0");
clks->phy_clks[2] = devm_clk_get(pp->dev, "pcie_wifi0_rx0");
} else if (exynos_pcie->ch_num == 1) {
clks->pcie_clks[0] = devm_clk_get(pp->dev, "gate_pciewifi1");
clks->phy_clks[0] = devm_clk_get(pp->dev, "wifi1_dig_refclk");
clks->phy_clks[1] = devm_clk_get(pp->dev, "pcie_wifi1_tx0");
clks->phy_clks[2] = devm_clk_get(pp->dev, "pcie_wifi1_rx0");
}
for (i = 0; i < exynos_pcie->pcie_clk_num; i++) {
if (IS_ERR(clks->pcie_clks[i])) {
dev_err(pp->dev, "Failed to get pcie clock\n");
return -ENODEV;
}
}
for (i = 0; i < exynos_pcie->phy_clk_num; i++) {
if (IS_ERR(clks->phy_clks[i])) {
dev_err(pp->dev, "Failed to get pcie clock\n");
return -ENODEV;
}
}
return 0;
}
static int exynos_pcie_clock_enable(struct pcie_port *pp, int enable)
{
struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
struct exynos_pcie_clks *clks = &exynos_pcie->clks;
int i;
if (enable) {
for (i = 0; i < exynos_pcie->pcie_clk_num; i++)
clk_prepare_enable(clks->pcie_clks[i]);
} else {
for (i = 0; i < exynos_pcie->pcie_clk_num; i++)
clk_disable_unprepare(clks->pcie_clks[i]);
}
return 0;
}
static int exynos_pcie_phy_clock_enable(struct pcie_port *pp, int enable)
{
struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
struct exynos_pcie_clks *clks = &exynos_pcie->clks;
int i;
if (enable) {
for (i = 0; i < exynos_pcie->phy_clk_num; i++)
clk_prepare_enable(clks->phy_clks[i]);
} else {
for (i = 0; i < exynos_pcie->phy_clk_num; i++)
clk_disable_unprepare(clks->phy_clks[i]);
}
return 0;
}

View file

@ -0,0 +1,69 @@
/*
* PCIe phy driver for Samsung EXYNOS8890
*
* Copyright (C) 2013 Samsung Electronics Co., Ltd.
* http://www.samsung.com
*
* Author: Kyoungil Kim <ki0351.kim@samsung.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
void exynos_pcie_phy_config(void *phy_base_regs, void *phy_pcs_base_regs, void *sysreg_base_regs, void *elbi_bsae_regs)
{
/* 26MHz gen1 */
u32 cmn_config_val[26] = {0x01, 0x0F, 0xA6, 0x31, 0x90, 0x62, 0x20, 0x00, 0x00, 0xA7, 0x0A,
0x37, 0x20, 0x08, 0xEF, 0xFC, 0x96, 0x14, 0x00, 0x10, 0x60, 0x01,
0x00, 0x00, 0x04, 0x10};
u32 trsv_config_val[41] = {0x31, 0xF4, 0xF4, 0x80, 0x25, 0x40, 0xD8, 0x03, 0x35, 0x55, 0x4C,
0xC3, 0x10, 0x54, 0x70, 0xC5, 0x00, 0x2F, 0x38, 0xA4, 0x00, 0x3B,
0x30, 0x9A, 0x64, 0x00, 0x1F, 0x83, 0x1B, 0x01, 0xE0, 0x00, 0x00,
0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1D, 0x00};
int i;
writel(readl(sysreg_base_regs) & ~(0x1 << 1), sysreg_base_regs);
writel((((readl(sysreg_base_regs + 0xC) & ~(0xf << 4)) & ~(0xf << 2)) | (0x3 << 2)) & ~(0x1 << 1), sysreg_base_regs + 0xC);
/* pcs_g_rst */
writel(0x1, elbi_bsae_regs + 0x288);
udelay(10);
writel(0x0, elbi_bsae_regs + 0x288);
udelay(10);
writel(0x1, elbi_bsae_regs + 0x288);
udelay(10);
/* PHY Common block Setting */
for (i = 0; i < 26; i++)
writel(cmn_config_val[i], phy_base_regs + (i * 4));
/* PHY Tranceiver/Receiver block Setting */
for (i = 0; i < 41; i++)
writel(trsv_config_val[i], phy_base_regs + ((0x30 + i) * 4));
/* tx amplitude control */
writel(0x14, phy_base_regs + (0x5C * 4));
/* tx latency */
writel(0x70, phy_pcs_base_regs + 0xF8);
/* PRGM_TIMEOUT_L1SS_VAL Setting */
writel(readl(phy_pcs_base_regs + 0xC) | (0x1 << 4), phy_pcs_base_regs + 0xC);
/* PCIE_MAC CMN_RST */
writel(0x1, elbi_bsae_regs + 0x290);
udelay(10);
writel(0x0, elbi_bsae_regs + 0x290);
udelay(10);
writel(0x1, elbi_bsae_regs + 0x290);
udelay(10);
/* PCIE_PHY PCS&PMA(CMN)_RST */
writel(0x1, elbi_bsae_regs + 0x28C);
udelay(10);
writel(0x0, elbi_bsae_regs + 0x28C);
udelay(10);
writel(0x1, elbi_bsae_regs + 0x28C);
udelay(10);
}

View file

@ -0,0 +1,388 @@
/*
* Simple, generic PCI host controller driver targetting firmware-initialised
* systems and virtual machines (e.g. the PCI emulation provided by kvmtool).
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*
* Copyright (C) 2014 ARM Limited
*
* Author: Will Deacon <will.deacon@arm.com>
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of_address.h>
#include <linux/of_pci.h>
#include <linux/platform_device.h>
struct gen_pci_cfg_bus_ops {
u32 bus_shift;
void __iomem *(*map_bus)(struct pci_bus *, unsigned int, int);
};
struct gen_pci_cfg_windows {
struct resource res;
struct resource bus_range;
void __iomem **win;
const struct gen_pci_cfg_bus_ops *ops;
};
struct gen_pci {
struct pci_host_bridge host;
struct gen_pci_cfg_windows cfg;
struct list_head resources;
};
static void __iomem *gen_pci_map_cfg_bus_cam(struct pci_bus *bus,
unsigned int devfn,
int where)
{
struct pci_sys_data *sys = bus->sysdata;
struct gen_pci *pci = sys->private_data;
resource_size_t idx = bus->number - pci->cfg.bus_range.start;
return pci->cfg.win[idx] + ((devfn << 8) | where);
}
static struct gen_pci_cfg_bus_ops gen_pci_cfg_cam_bus_ops = {
.bus_shift = 16,
.map_bus = gen_pci_map_cfg_bus_cam,
};
static void __iomem *gen_pci_map_cfg_bus_ecam(struct pci_bus *bus,
unsigned int devfn,
int where)
{
struct pci_sys_data *sys = bus->sysdata;
struct gen_pci *pci = sys->private_data;
resource_size_t idx = bus->number - pci->cfg.bus_range.start;
return pci->cfg.win[idx] + ((devfn << 12) | where);
}
static struct gen_pci_cfg_bus_ops gen_pci_cfg_ecam_bus_ops = {
.bus_shift = 20,
.map_bus = gen_pci_map_cfg_bus_ecam,
};
static int gen_pci_config_read(struct pci_bus *bus, unsigned int devfn,
int where, int size, u32 *val)
{
void __iomem *addr;
struct pci_sys_data *sys = bus->sysdata;
struct gen_pci *pci = sys->private_data;
addr = pci->cfg.ops->map_bus(bus, devfn, where);
switch (size) {
case 1:
*val = readb(addr);
break;
case 2:
*val = readw(addr);
break;
default:
*val = readl(addr);
}
return PCIBIOS_SUCCESSFUL;
}
static int gen_pci_config_write(struct pci_bus *bus, unsigned int devfn,
int where, int size, u32 val)
{
void __iomem *addr;
struct pci_sys_data *sys = bus->sysdata;
struct gen_pci *pci = sys->private_data;
addr = pci->cfg.ops->map_bus(bus, devfn, where);
switch (size) {
case 1:
writeb(val, addr);
break;
case 2:
writew(val, addr);
break;
default:
writel(val, addr);
}
return PCIBIOS_SUCCESSFUL;
}
static struct pci_ops gen_pci_ops = {
.read = gen_pci_config_read,
.write = gen_pci_config_write,
};
static const struct of_device_id gen_pci_of_match[] = {
{ .compatible = "pci-host-cam-generic",
.data = &gen_pci_cfg_cam_bus_ops },
{ .compatible = "pci-host-ecam-generic",
.data = &gen_pci_cfg_ecam_bus_ops },
{ },
};
MODULE_DEVICE_TABLE(of, gen_pci_of_match);
static int gen_pci_calc_io_offset(struct device *dev,
struct of_pci_range *range,
struct resource *res,
resource_size_t *offset)
{
static atomic_t wins = ATOMIC_INIT(0);
int err, idx, max_win;
unsigned int window;
if (!PAGE_ALIGNED(range->cpu_addr))
return -EINVAL;
max_win = (IO_SPACE_LIMIT + 1) / SZ_64K;
idx = atomic_inc_return(&wins);
if (idx > max_win)
return -ENOSPC;
window = (idx - 1) * SZ_64K;
err = pci_ioremap_io(window, range->cpu_addr);
if (err)
return err;
of_pci_range_to_resource(range, dev->of_node, res);
res->start = window;
res->end = res->start + range->size - 1;
*offset = window - range->pci_addr;
return 0;
}
static int gen_pci_calc_mem_offset(struct device *dev,
struct of_pci_range *range,
struct resource *res,
resource_size_t *offset)
{
of_pci_range_to_resource(range, dev->of_node, res);
*offset = range->cpu_addr - range->pci_addr;
return 0;
}
static void gen_pci_release_of_pci_ranges(struct gen_pci *pci)
{
struct pci_host_bridge_window *win;
list_for_each_entry(win, &pci->resources, list)
release_resource(win->res);
pci_free_resource_list(&pci->resources);
}
static int gen_pci_parse_request_of_pci_ranges(struct gen_pci *pci)
{
struct of_pci_range range;
struct of_pci_range_parser parser;
int err, res_valid = 0;
struct device *dev = pci->host.dev.parent;
struct device_node *np = dev->of_node;
if (of_pci_range_parser_init(&parser, np)) {
dev_err(dev, "missing \"ranges\" property\n");
return -EINVAL;
}
for_each_of_pci_range(&parser, &range) {
struct resource *parent, *res;
resource_size_t offset;
u32 restype = range.flags & IORESOURCE_TYPE_BITS;
res = devm_kmalloc(dev, sizeof(*res), GFP_KERNEL);
if (!res) {
err = -ENOMEM;
goto out_release_res;
}
switch (restype) {
case IORESOURCE_IO:
parent = &ioport_resource;
err = gen_pci_calc_io_offset(dev, &range, res, &offset);
break;
case IORESOURCE_MEM:
parent = &iomem_resource;
err = gen_pci_calc_mem_offset(dev, &range, res, &offset);
res_valid |= !(res->flags & IORESOURCE_PREFETCH || err);
break;
default:
err = -EINVAL;
continue;
}
if (err) {
dev_warn(dev,
"error %d: failed to add resource [type 0x%x, %lld bytes]\n",
err, restype, range.size);
continue;
}
err = request_resource(parent, res);
if (err)
goto out_release_res;
pci_add_resource_offset(&pci->resources, res, offset);
}
if (!res_valid) {
dev_err(dev, "non-prefetchable memory resource required\n");
err = -EINVAL;
goto out_release_res;
}
return 0;
out_release_res:
gen_pci_release_of_pci_ranges(pci);
return err;
}
static int gen_pci_parse_map_cfg_windows(struct gen_pci *pci)
{
int err;
u8 bus_max;
resource_size_t busn;
struct resource *bus_range;
struct device *dev = pci->host.dev.parent;
struct device_node *np = dev->of_node;
if (of_pci_parse_bus_range(np, &pci->cfg.bus_range))
pci->cfg.bus_range = (struct resource) {
.name = np->name,
.start = 0,
.end = 0xff,
.flags = IORESOURCE_BUS,
};
err = of_address_to_resource(np, 0, &pci->cfg.res);
if (err) {
dev_err(dev, "missing \"reg\" property\n");
return err;
}
pci->cfg.win = devm_kcalloc(dev, resource_size(&pci->cfg.bus_range),
sizeof(*pci->cfg.win), GFP_KERNEL);
if (!pci->cfg.win)
return -ENOMEM;
/* Limit the bus-range to fit within reg */
bus_max = pci->cfg.bus_range.start +
(resource_size(&pci->cfg.res) >> pci->cfg.ops->bus_shift) - 1;
pci->cfg.bus_range.end = min_t(resource_size_t, pci->cfg.bus_range.end,
bus_max);
/* Map our Configuration Space windows */
if (!devm_request_mem_region(dev, pci->cfg.res.start,
resource_size(&pci->cfg.res),
"Configuration Space"))
return -ENOMEM;
bus_range = &pci->cfg.bus_range;
for (busn = bus_range->start; busn <= bus_range->end; ++busn) {
u32 idx = busn - bus_range->start;
u32 sz = 1 << pci->cfg.ops->bus_shift;
pci->cfg.win[idx] = devm_ioremap(dev,
pci->cfg.res.start + busn * sz,
sz);
if (!pci->cfg.win[idx])
return -ENOMEM;
}
/* Register bus resource */
pci_add_resource(&pci->resources, bus_range);
return 0;
}
static int gen_pci_setup(int nr, struct pci_sys_data *sys)
{
struct gen_pci *pci = sys->private_data;
list_splice_init(&pci->resources, &sys->resources);
return 1;
}
static int gen_pci_probe(struct platform_device *pdev)
{
int err;
const char *type;
const struct of_device_id *of_id;
const int *prop;
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
struct gen_pci *pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
struct hw_pci hw = {
.nr_controllers = 1,
.private_data = (void **)&pci,
.setup = gen_pci_setup,
.map_irq = of_irq_parse_and_map_pci,
.ops = &gen_pci_ops,
};
if (!pci)
return -ENOMEM;
type = of_get_property(np, "device_type", NULL);
if (!type || strcmp(type, "pci")) {
dev_err(dev, "invalid \"device_type\" %s\n", type);
return -EINVAL;
}
prop = of_get_property(of_chosen, "linux,pci-probe-only", NULL);
if (prop) {
if (*prop)
pci_add_flags(PCI_PROBE_ONLY);
else
pci_clear_flags(PCI_PROBE_ONLY);
}
of_id = of_match_node(gen_pci_of_match, np);
pci->cfg.ops = of_id->data;
pci->host.dev.parent = dev;
INIT_LIST_HEAD(&pci->host.windows);
INIT_LIST_HEAD(&pci->resources);
/* Parse our PCI ranges and request their resources */
err = gen_pci_parse_request_of_pci_ranges(pci);
if (err)
return err;
/* Parse and map our Configuration Space windows */
err = gen_pci_parse_map_cfg_windows(pci);
if (err) {
gen_pci_release_of_pci_ranges(pci);
return err;
}
pci_common_init_dev(dev, &hw);
return 0;
}
static struct platform_driver gen_pci_driver = {
.driver = {
.name = "pci-host-generic",
.owner = THIS_MODULE,
.of_match_table = gen_pci_of_match,
},
.probe = gen_pci_probe,
};
module_platform_driver(gen_pci_driver);
MODULE_DESCRIPTION("Generic PCI host driver");
MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>");
MODULE_LICENSE("GPL v2");

662
drivers/pci/host/pci-imx6.c Normal file
View file

@ -0,0 +1,662 @@
/*
* PCIe host controller driver for Freescale i.MX6 SoCs
*
* Copyright (C) 2013 Kosagi
* http://www.kosagi.com
*
* Author: Sean Cross <xobs@kosagi.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/gpio.h>
#include <linux/kernel.h>
#include <linux/mfd/syscon.h>
#include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
#include <linux/module.h>
#include <linux/of_gpio.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/resource.h>
#include <linux/signal.h>
#include <linux/types.h>
#include <linux/interrupt.h>
#include "pcie-designware.h"
#define to_imx6_pcie(x) container_of(x, struct imx6_pcie, pp)
struct imx6_pcie {
int reset_gpio;
struct clk *pcie_bus;
struct clk *pcie_phy;
struct clk *pcie;
struct pcie_port pp;
struct regmap *iomuxc_gpr;
void __iomem *mem_base;
};
/* PCIe Root Complex registers (memory-mapped) */
#define PCIE_RC_LCR 0x7c
#define PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN1 0x1
#define PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN2 0x2
#define PCIE_RC_LCR_MAX_LINK_SPEEDS_MASK 0xf
/* PCIe Port Logic registers (memory-mapped) */
#define PL_OFFSET 0x700
#define PCIE_PL_PFLR (PL_OFFSET + 0x08)
#define PCIE_PL_PFLR_LINK_STATE_MASK (0x3f << 16)
#define PCIE_PL_PFLR_FORCE_LINK (1 << 15)
#define PCIE_PHY_DEBUG_R0 (PL_OFFSET + 0x28)
#define PCIE_PHY_DEBUG_R1 (PL_OFFSET + 0x2c)
#define PCIE_PHY_DEBUG_R1_XMLH_LINK_IN_TRAINING (1 << 29)
#define PCIE_PHY_DEBUG_R1_XMLH_LINK_UP (1 << 4)
#define PCIE_PHY_CTRL (PL_OFFSET + 0x114)
#define PCIE_PHY_CTRL_DATA_LOC 0
#define PCIE_PHY_CTRL_CAP_ADR_LOC 16
#define PCIE_PHY_CTRL_CAP_DAT_LOC 17
#define PCIE_PHY_CTRL_WR_LOC 18
#define PCIE_PHY_CTRL_RD_LOC 19
#define PCIE_PHY_STAT (PL_OFFSET + 0x110)
#define PCIE_PHY_STAT_ACK_LOC 16
#define PCIE_LINK_WIDTH_SPEED_CONTROL 0x80C
#define PORT_LOGIC_SPEED_CHANGE (0x1 << 17)
/* PHY registers (not memory-mapped) */
#define PCIE_PHY_RX_ASIC_OUT 0x100D
#define PHY_RX_OVRD_IN_LO 0x1005
#define PHY_RX_OVRD_IN_LO_RX_DATA_EN (1 << 5)
#define PHY_RX_OVRD_IN_LO_RX_PLL_EN (1 << 3)
static int pcie_phy_poll_ack(void __iomem *dbi_base, int exp_val)
{
u32 val;
u32 max_iterations = 10;
u32 wait_counter = 0;
do {
val = readl(dbi_base + PCIE_PHY_STAT);
val = (val >> PCIE_PHY_STAT_ACK_LOC) & 0x1;
wait_counter++;
if (val == exp_val)
return 0;
udelay(1);
} while (wait_counter < max_iterations);
return -ETIMEDOUT;
}
static int pcie_phy_wait_ack(void __iomem *dbi_base, int addr)
{
u32 val;
int ret;
val = addr << PCIE_PHY_CTRL_DATA_LOC;
writel(val, dbi_base + PCIE_PHY_CTRL);
val |= (0x1 << PCIE_PHY_CTRL_CAP_ADR_LOC);
writel(val, dbi_base + PCIE_PHY_CTRL);
ret = pcie_phy_poll_ack(dbi_base, 1);
if (ret)
return ret;
val = addr << PCIE_PHY_CTRL_DATA_LOC;
writel(val, dbi_base + PCIE_PHY_CTRL);
ret = pcie_phy_poll_ack(dbi_base, 0);
if (ret)
return ret;
return 0;
}
/* Read from the 16-bit PCIe PHY control registers (not memory-mapped) */
static int pcie_phy_read(void __iomem *dbi_base, int addr , int *data)
{
u32 val, phy_ctl;
int ret;
ret = pcie_phy_wait_ack(dbi_base, addr);
if (ret)
return ret;
/* assert Read signal */
phy_ctl = 0x1 << PCIE_PHY_CTRL_RD_LOC;
writel(phy_ctl, dbi_base + PCIE_PHY_CTRL);
ret = pcie_phy_poll_ack(dbi_base, 1);
if (ret)
return ret;
val = readl(dbi_base + PCIE_PHY_STAT);
*data = val & 0xffff;
/* deassert Read signal */
writel(0x00, dbi_base + PCIE_PHY_CTRL);
ret = pcie_phy_poll_ack(dbi_base, 0);
if (ret)
return ret;
return 0;
}
static int pcie_phy_write(void __iomem *dbi_base, int addr, int data)
{
u32 var;
int ret;
/* write addr */
/* cap addr */
ret = pcie_phy_wait_ack(dbi_base, addr);
if (ret)
return ret;
var = data << PCIE_PHY_CTRL_DATA_LOC;
writel(var, dbi_base + PCIE_PHY_CTRL);
/* capture data */
var |= (0x1 << PCIE_PHY_CTRL_CAP_DAT_LOC);
writel(var, dbi_base + PCIE_PHY_CTRL);
ret = pcie_phy_poll_ack(dbi_base, 1);
if (ret)
return ret;
/* deassert cap data */
var = data << PCIE_PHY_CTRL_DATA_LOC;
writel(var, dbi_base + PCIE_PHY_CTRL);
/* wait for ack de-assertion */
ret = pcie_phy_poll_ack(dbi_base, 0);
if (ret)
return ret;
/* assert wr signal */
var = 0x1 << PCIE_PHY_CTRL_WR_LOC;
writel(var, dbi_base + PCIE_PHY_CTRL);
/* wait for ack */
ret = pcie_phy_poll_ack(dbi_base, 1);
if (ret)
return ret;
/* deassert wr signal */
var = data << PCIE_PHY_CTRL_DATA_LOC;
writel(var, dbi_base + PCIE_PHY_CTRL);
/* wait for ack de-assertion */
ret = pcie_phy_poll_ack(dbi_base, 0);
if (ret)
return ret;
writel(0x0, dbi_base + PCIE_PHY_CTRL);
return 0;
}
/* Added for PCI abort handling */
static int imx6q_pcie_abort_handler(unsigned long addr,
unsigned int fsr, struct pt_regs *regs)
{
return 0;
}
static int imx6_pcie_assert_core_reset(struct pcie_port *pp)
{
struct imx6_pcie *imx6_pcie = to_imx6_pcie(pp);
u32 val, gpr1, gpr12;
/*
* If the bootloader already enabled the link we need some special
* handling to get the core back into a state where it is safe to
* touch it for configuration. As there is no dedicated reset signal
* wired up for MX6QDL, we need to manually force LTSSM into "detect"
* state before completely disabling LTSSM, which is a prerequisite
* for core configuration.
*
* If both LTSSM_ENABLE and REF_SSP_ENABLE are active we have a strong
* indication that the bootloader activated the link.
*/
regmap_read(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1, &gpr1);
regmap_read(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, &gpr12);
if ((gpr1 & IMX6Q_GPR1_PCIE_REF_CLK_EN) &&
(gpr12 & IMX6Q_GPR12_PCIE_CTL_2)) {
val = readl(pp->dbi_base + PCIE_PL_PFLR);
val &= ~PCIE_PL_PFLR_LINK_STATE_MASK;
val |= PCIE_PL_PFLR_FORCE_LINK;
writel(val, pp->dbi_base + PCIE_PL_PFLR);
regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
IMX6Q_GPR12_PCIE_CTL_2, 0 << 10);
}
regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
IMX6Q_GPR1_PCIE_TEST_PD, 1 << 18);
regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
IMX6Q_GPR1_PCIE_REF_CLK_EN, 0 << 16);
return 0;
}
static int imx6_pcie_deassert_core_reset(struct pcie_port *pp)
{
struct imx6_pcie *imx6_pcie = to_imx6_pcie(pp);
int ret;
ret = clk_prepare_enable(imx6_pcie->pcie_phy);
if (ret) {
dev_err(pp->dev, "unable to enable pcie_phy clock\n");
goto err_pcie_phy;
}
ret = clk_prepare_enable(imx6_pcie->pcie_bus);
if (ret) {
dev_err(pp->dev, "unable to enable pcie_bus clock\n");
goto err_pcie_bus;
}
ret = clk_prepare_enable(imx6_pcie->pcie);
if (ret) {
dev_err(pp->dev, "unable to enable pcie clock\n");
goto err_pcie;
}
/* power up core phy and enable ref clock */
regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
IMX6Q_GPR1_PCIE_TEST_PD, 0 << 18);
/*
* the async reset input need ref clock to sync internally,
* when the ref clock comes after reset, internal synced
* reset time is too short, cannot meet the requirement.
* add one ~10us delay here.
*/
udelay(10);
regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
IMX6Q_GPR1_PCIE_REF_CLK_EN, 1 << 16);
/* allow the clocks to stabilize */
usleep_range(200, 500);
/* Some boards don't have PCIe reset GPIO. */
if (gpio_is_valid(imx6_pcie->reset_gpio)) {
gpio_set_value(imx6_pcie->reset_gpio, 0);
msleep(100);
gpio_set_value(imx6_pcie->reset_gpio, 1);
}
return 0;
err_pcie:
clk_disable_unprepare(imx6_pcie->pcie_bus);
err_pcie_bus:
clk_disable_unprepare(imx6_pcie->pcie_phy);
err_pcie_phy:
return ret;
}
static void imx6_pcie_init_phy(struct pcie_port *pp)
{
struct imx6_pcie *imx6_pcie = to_imx6_pcie(pp);
regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
IMX6Q_GPR12_PCIE_CTL_2, 0 << 10);
/* configure constant input signal to the pcie ctrl and phy */
regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
IMX6Q_GPR12_DEVICE_TYPE, PCI_EXP_TYPE_ROOT_PORT << 12);
regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
IMX6Q_GPR12_LOS_LEVEL, 9 << 4);
regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
IMX6Q_GPR8_TX_DEEMPH_GEN1, 0 << 0);
regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
IMX6Q_GPR8_TX_DEEMPH_GEN2_3P5DB, 0 << 6);
regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
IMX6Q_GPR8_TX_DEEMPH_GEN2_6DB, 20 << 12);
regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
IMX6Q_GPR8_TX_SWING_FULL, 127 << 18);
regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
IMX6Q_GPR8_TX_SWING_LOW, 127 << 25);
}
static int imx6_pcie_wait_for_link(struct pcie_port *pp)
{
int count = 200;
while (!dw_pcie_link_up(pp)) {
usleep_range(100, 1000);
if (--count)
continue;
dev_err(pp->dev, "phy link never came up\n");
dev_dbg(pp->dev, "DEBUG_R0: 0x%08x, DEBUG_R1: 0x%08x\n",
readl(pp->dbi_base + PCIE_PHY_DEBUG_R0),
readl(pp->dbi_base + PCIE_PHY_DEBUG_R1));
return -EINVAL;
}
return 0;
}
static irqreturn_t imx6_pcie_msi_handler(int irq, void *arg)
{
struct pcie_port *pp = arg;
return dw_handle_msi_irq(pp);
}
static int imx6_pcie_start_link(struct pcie_port *pp)
{
struct imx6_pcie *imx6_pcie = to_imx6_pcie(pp);
uint32_t tmp;
int ret, count;
/*
* Force Gen1 operation when starting the link. In case the link is
* started in Gen2 mode, there is a possibility the devices on the
* bus will not be detected at all. This happens with PCIe switches.
*/
tmp = readl(pp->dbi_base + PCIE_RC_LCR);
tmp &= ~PCIE_RC_LCR_MAX_LINK_SPEEDS_MASK;
tmp |= PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN1;
writel(tmp, pp->dbi_base + PCIE_RC_LCR);
/* Start LTSSM. */
regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
IMX6Q_GPR12_PCIE_CTL_2, 1 << 10);
ret = imx6_pcie_wait_for_link(pp);
if (ret)
return ret;
/* Allow Gen2 mode after the link is up. */
tmp = readl(pp->dbi_base + PCIE_RC_LCR);
tmp &= ~PCIE_RC_LCR_MAX_LINK_SPEEDS_MASK;
tmp |= PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN2;
writel(tmp, pp->dbi_base + PCIE_RC_LCR);
/*
* Start Directed Speed Change so the best possible speed both link
* partners support can be negotiated.
*/
tmp = readl(pp->dbi_base + PCIE_LINK_WIDTH_SPEED_CONTROL);
tmp |= PORT_LOGIC_SPEED_CHANGE;
writel(tmp, pp->dbi_base + PCIE_LINK_WIDTH_SPEED_CONTROL);
count = 200;
while (count--) {
tmp = readl(pp->dbi_base + PCIE_LINK_WIDTH_SPEED_CONTROL);
/* Test if the speed change finished. */
if (!(tmp & PORT_LOGIC_SPEED_CHANGE))
break;
usleep_range(100, 1000);
}
/* Make sure link training is finished as well! */
if (count)
ret = imx6_pcie_wait_for_link(pp);
else
ret = -EINVAL;
if (ret) {
dev_err(pp->dev, "Failed to bring link up!\n");
} else {
tmp = readl(pp->dbi_base + 0x80);
dev_dbg(pp->dev, "Link up, Gen=%i\n", (tmp >> 16) & 0xf);
}
return ret;
}
static void imx6_pcie_host_init(struct pcie_port *pp)
{
imx6_pcie_assert_core_reset(pp);
imx6_pcie_init_phy(pp);
imx6_pcie_deassert_core_reset(pp);
dw_pcie_setup_rc(pp);
imx6_pcie_start_link(pp);
if (IS_ENABLED(CONFIG_PCI_MSI))
dw_pcie_msi_init(pp);
}
static void imx6_pcie_reset_phy(struct pcie_port *pp)
{
uint32_t temp;
pcie_phy_read(pp->dbi_base, PHY_RX_OVRD_IN_LO, &temp);
temp |= (PHY_RX_OVRD_IN_LO_RX_DATA_EN |
PHY_RX_OVRD_IN_LO_RX_PLL_EN);
pcie_phy_write(pp->dbi_base, PHY_RX_OVRD_IN_LO, temp);
usleep_range(2000, 3000);
pcie_phy_read(pp->dbi_base, PHY_RX_OVRD_IN_LO, &temp);
temp &= ~(PHY_RX_OVRD_IN_LO_RX_DATA_EN |
PHY_RX_OVRD_IN_LO_RX_PLL_EN);
pcie_phy_write(pp->dbi_base, PHY_RX_OVRD_IN_LO, temp);
}
static int imx6_pcie_link_up(struct pcie_port *pp)
{
u32 rc, debug_r0, rx_valid;
int count = 5;
/*
* Test if the PHY reports that the link is up and also that the LTSSM
* training finished. There are three possible states of the link when
* this code is called:
* 1) The link is DOWN (unlikely)
* The link didn't come up yet for some reason. This usually means
* we have a real problem somewhere. Reset the PHY and exit. This
* state calls for inspection of the DEBUG registers.
* 2) The link is UP, but still in LTSSM training
* Wait for the training to finish, which should take a very short
* time. If the training does not finish, we have a problem and we
* need to inspect the DEBUG registers. If the training does finish,
* the link is up and operating correctly.
* 3) The link is UP and no longer in LTSSM training
* The link is up and operating correctly.
*/
while (1) {
rc = readl(pp->dbi_base + PCIE_PHY_DEBUG_R1);
if (!(rc & PCIE_PHY_DEBUG_R1_XMLH_LINK_UP))
break;
if (!(rc & PCIE_PHY_DEBUG_R1_XMLH_LINK_IN_TRAINING))
return 1;
if (!count--)
break;
dev_dbg(pp->dev, "Link is up, but still in training\n");
/*
* Wait a little bit, then re-check if the link finished
* the training.
*/
usleep_range(1000, 2000);
}
/*
* From L0, initiate MAC entry to gen2 if EP/RC supports gen2.
* Wait 2ms (LTSSM timeout is 24ms, PHY lock is ~5us in gen2).
* If (MAC/LTSSM.state == Recovery.RcvrLock)
* && (PHY/rx_valid==0) then pulse PHY/rx_reset. Transition
* to gen2 is stuck
*/
pcie_phy_read(pp->dbi_base, PCIE_PHY_RX_ASIC_OUT, &rx_valid);
debug_r0 = readl(pp->dbi_base + PCIE_PHY_DEBUG_R0);
if (rx_valid & 0x01)
return 0;
if ((debug_r0 & 0x3f) != 0x0d)
return 0;
dev_err(pp->dev, "transition to gen2 is stuck, reset PHY!\n");
dev_dbg(pp->dev, "debug_r0=%08x debug_r1=%08x\n", debug_r0, rc);
imx6_pcie_reset_phy(pp);
return 0;
}
static struct pcie_host_ops imx6_pcie_host_ops = {
.link_up = imx6_pcie_link_up,
.host_init = imx6_pcie_host_init,
};
static int __init imx6_add_pcie_port(struct pcie_port *pp,
struct platform_device *pdev)
{
int ret;
if (IS_ENABLED(CONFIG_PCI_MSI)) {
pp->msi_irq = platform_get_irq_byname(pdev, "msi");
if (pp->msi_irq <= 0) {
dev_err(&pdev->dev, "failed to get MSI irq\n");
return -ENODEV;
}
ret = devm_request_irq(&pdev->dev, pp->msi_irq,
imx6_pcie_msi_handler,
IRQF_SHARED, "mx6-pcie-msi", pp);
if (ret) {
dev_err(&pdev->dev, "failed to request MSI irq\n");
return -ENODEV;
}
}
pp->root_bus_nr = -1;
pp->ops = &imx6_pcie_host_ops;
ret = dw_pcie_host_init(pp);
if (ret) {
dev_err(&pdev->dev, "failed to initialize host\n");
return ret;
}
return 0;
}
static int __init imx6_pcie_probe(struct platform_device *pdev)
{
struct imx6_pcie *imx6_pcie;
struct pcie_port *pp;
struct device_node *np = pdev->dev.of_node;
struct resource *dbi_base;
int ret;
imx6_pcie = devm_kzalloc(&pdev->dev, sizeof(*imx6_pcie), GFP_KERNEL);
if (!imx6_pcie)
return -ENOMEM;
pp = &imx6_pcie->pp;
pp->dev = &pdev->dev;
/* Added for PCI abort handling */
hook_fault_code(16 + 6, imx6q_pcie_abort_handler, SIGBUS, 0,
"imprecise external abort");
dbi_base = platform_get_resource(pdev, IORESOURCE_MEM, 0);
pp->dbi_base = devm_ioremap_resource(&pdev->dev, dbi_base);
if (IS_ERR(pp->dbi_base))
return PTR_ERR(pp->dbi_base);
/* Fetch GPIOs */
imx6_pcie->reset_gpio = of_get_named_gpio(np, "reset-gpio", 0);
if (gpio_is_valid(imx6_pcie->reset_gpio)) {
ret = devm_gpio_request_one(&pdev->dev, imx6_pcie->reset_gpio,
GPIOF_OUT_INIT_LOW, "PCIe reset");
if (ret) {
dev_err(&pdev->dev, "unable to get reset gpio\n");
return ret;
}
}
/* Fetch clocks */
imx6_pcie->pcie_phy = devm_clk_get(&pdev->dev, "pcie_phy");
if (IS_ERR(imx6_pcie->pcie_phy)) {
dev_err(&pdev->dev,
"pcie_phy clock source missing or invalid\n");
return PTR_ERR(imx6_pcie->pcie_phy);
}
imx6_pcie->pcie_bus = devm_clk_get(&pdev->dev, "pcie_bus");
if (IS_ERR(imx6_pcie->pcie_bus)) {
dev_err(&pdev->dev,
"pcie_bus clock source missing or invalid\n");
return PTR_ERR(imx6_pcie->pcie_bus);
}
imx6_pcie->pcie = devm_clk_get(&pdev->dev, "pcie");
if (IS_ERR(imx6_pcie->pcie)) {
dev_err(&pdev->dev,
"pcie clock source missing or invalid\n");
return PTR_ERR(imx6_pcie->pcie);
}
/* Grab GPR config register range */
imx6_pcie->iomuxc_gpr =
syscon_regmap_lookup_by_compatible("fsl,imx6q-iomuxc-gpr");
if (IS_ERR(imx6_pcie->iomuxc_gpr)) {
dev_err(&pdev->dev, "unable to find iomuxc registers\n");
return PTR_ERR(imx6_pcie->iomuxc_gpr);
}
ret = imx6_add_pcie_port(pp, pdev);
if (ret < 0)
return ret;
platform_set_drvdata(pdev, imx6_pcie);
return 0;
}
static void imx6_pcie_shutdown(struct platform_device *pdev)
{
struct imx6_pcie *imx6_pcie = platform_get_drvdata(pdev);
/* bring down link, so bootloader gets clean state in case of reboot */
imx6_pcie_assert_core_reset(&imx6_pcie->pp);
}
static const struct of_device_id imx6_pcie_of_match[] = {
{ .compatible = "fsl,imx6q-pcie", },
{},
};
MODULE_DEVICE_TABLE(of, imx6_pcie_of_match);
static struct platform_driver imx6_pcie_driver = {
.driver = {
.name = "imx6q-pcie",
.owner = THIS_MODULE,
.of_match_table = imx6_pcie_of_match,
},
.shutdown = imx6_pcie_shutdown,
};
/* Freescale PCIe driver does not allow module unload */
static int __init imx6_pcie_init(void)
{
return platform_driver_probe(&imx6_pcie_driver, imx6_pcie_probe);
}
module_init(imx6_pcie_init);
MODULE_AUTHOR("Sean Cross <xobs@kosagi.com>");
MODULE_DESCRIPTION("Freescale i.MX6 PCIe host controller driver");
MODULE_LICENSE("GPL v2");

View file

@ -0,0 +1,516 @@
/*
* Designware application register space functions for Keystone PCI controller
*
* Copyright (C) 2013-2014 Texas Instruments., Ltd.
* http://www.ti.com
*
* Author: Murali Karicheri <m-karicheri2@ti.com>
*
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/irq.h>
#include <linux/irqdomain.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_pci.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
#include "pcie-designware.h"
#include "pci-keystone.h"
/* Application register defines */
#define LTSSM_EN_VAL 1
#define LTSSM_STATE_MASK 0x1f
#define LTSSM_STATE_L0 0x11
#define DBI_CS2_EN_VAL 0x20
#define OB_XLAT_EN_VAL 2
/* Application registers */
#define CMD_STATUS 0x004
#define CFG_SETUP 0x008
#define OB_SIZE 0x030
#define CFG_PCIM_WIN_SZ_IDX 3
#define CFG_PCIM_WIN_CNT 32
#define SPACE0_REMOTE_CFG_OFFSET 0x1000
#define OB_OFFSET_INDEX(n) (0x200 + (8 * n))
#define OB_OFFSET_HI(n) (0x204 + (8 * n))
/* IRQ register defines */
#define IRQ_EOI 0x050
#define IRQ_STATUS 0x184
#define IRQ_ENABLE_SET 0x188
#define IRQ_ENABLE_CLR 0x18c
#define MSI_IRQ 0x054
#define MSI0_IRQ_STATUS 0x104
#define MSI0_IRQ_ENABLE_SET 0x108
#define MSI0_IRQ_ENABLE_CLR 0x10c
#define IRQ_STATUS 0x184
#define MSI_IRQ_OFFSET 4
/* Config space registers */
#define DEBUG0 0x728
#define to_keystone_pcie(x) container_of(x, struct keystone_pcie, pp)
static inline struct pcie_port *sys_to_pcie(struct pci_sys_data *sys)
{
return sys->private_data;
}
static inline void update_reg_offset_bit_pos(u32 offset, u32 *reg_offset,
u32 *bit_pos)
{
*reg_offset = offset % 8;
*bit_pos = offset >> 3;
}
u32 ks_dw_pcie_get_msi_addr(struct pcie_port *pp)
{
struct keystone_pcie *ks_pcie = to_keystone_pcie(pp);
return ks_pcie->app.start + MSI_IRQ;
}
void ks_dw_pcie_handle_msi_irq(struct keystone_pcie *ks_pcie, int offset)
{
struct pcie_port *pp = &ks_pcie->pp;
u32 pending, vector;
int src, virq;
pending = readl(ks_pcie->va_app_base + MSI0_IRQ_STATUS + (offset << 4));
/*
* MSI0 status bit 0-3 shows vectors 0, 8, 16, 24, MSI1 status bit
* shows 1, 9, 17, 25 and so forth
*/
for (src = 0; src < 4; src++) {
if (BIT(src) & pending) {
vector = offset + (src << 3);
virq = irq_linear_revmap(pp->irq_domain, vector);
dev_dbg(pp->dev, "irq: bit %d, vector %d, virq %d\n",
src, vector, virq);
generic_handle_irq(virq);
}
}
}
static void ks_dw_pcie_msi_irq_ack(struct irq_data *d)
{
u32 offset, reg_offset, bit_pos;
struct keystone_pcie *ks_pcie;
unsigned int irq = d->irq;
struct msi_desc *msi;
struct pcie_port *pp;
msi = irq_get_msi_desc(irq);
pp = sys_to_pcie(msi->dev->bus->sysdata);
ks_pcie = to_keystone_pcie(pp);
offset = irq - irq_linear_revmap(pp->irq_domain, 0);
update_reg_offset_bit_pos(offset, &reg_offset, &bit_pos);
writel(BIT(bit_pos),
ks_pcie->va_app_base + MSI0_IRQ_STATUS + (reg_offset << 4));
writel(reg_offset + MSI_IRQ_OFFSET, ks_pcie->va_app_base + IRQ_EOI);
}
void ks_dw_pcie_msi_set_irq(struct pcie_port *pp, int irq)
{
u32 reg_offset, bit_pos;
struct keystone_pcie *ks_pcie = to_keystone_pcie(pp);
update_reg_offset_bit_pos(irq, &reg_offset, &bit_pos);
writel(BIT(bit_pos),
ks_pcie->va_app_base + MSI0_IRQ_ENABLE_SET + (reg_offset << 4));
}
void ks_dw_pcie_msi_clear_irq(struct pcie_port *pp, int irq)
{
u32 reg_offset, bit_pos;
struct keystone_pcie *ks_pcie = to_keystone_pcie(pp);
update_reg_offset_bit_pos(irq, &reg_offset, &bit_pos);
writel(BIT(bit_pos),
ks_pcie->va_app_base + MSI0_IRQ_ENABLE_CLR + (reg_offset << 4));
}
static void ks_dw_pcie_msi_irq_mask(struct irq_data *d)
{
struct keystone_pcie *ks_pcie;
unsigned int irq = d->irq;
struct msi_desc *msi;
struct pcie_port *pp;
u32 offset;
msi = irq_get_msi_desc(irq);
pp = sys_to_pcie(msi->dev->bus->sysdata);
ks_pcie = to_keystone_pcie(pp);
offset = irq - irq_linear_revmap(pp->irq_domain, 0);
/* Mask the end point if PVM implemented */
if (IS_ENABLED(CONFIG_PCI_MSI)) {
if (msi->msi_attrib.maskbit)
mask_msi_irq(d);
}
ks_dw_pcie_msi_clear_irq(pp, offset);
}
static void ks_dw_pcie_msi_irq_unmask(struct irq_data *d)
{
struct keystone_pcie *ks_pcie;
unsigned int irq = d->irq;
struct msi_desc *msi;
struct pcie_port *pp;
u32 offset;
msi = irq_get_msi_desc(irq);
pp = sys_to_pcie(msi->dev->bus->sysdata);
ks_pcie = to_keystone_pcie(pp);
offset = irq - irq_linear_revmap(pp->irq_domain, 0);
/* Mask the end point if PVM implemented */
if (IS_ENABLED(CONFIG_PCI_MSI)) {
if (msi->msi_attrib.maskbit)
unmask_msi_irq(d);
}
ks_dw_pcie_msi_set_irq(pp, offset);
}
static struct irq_chip ks_dw_pcie_msi_irq_chip = {
.name = "Keystone-PCIe-MSI-IRQ",
.irq_ack = ks_dw_pcie_msi_irq_ack,
.irq_mask = ks_dw_pcie_msi_irq_mask,
.irq_unmask = ks_dw_pcie_msi_irq_unmask,
};
static int ks_dw_pcie_msi_map(struct irq_domain *domain, unsigned int irq,
irq_hw_number_t hwirq)
{
irq_set_chip_and_handler(irq, &ks_dw_pcie_msi_irq_chip,
handle_level_irq);
irq_set_chip_data(irq, domain->host_data);
set_irq_flags(irq, IRQF_VALID);
return 0;
}
const struct irq_domain_ops ks_dw_pcie_msi_domain_ops = {
.map = ks_dw_pcie_msi_map,
};
int ks_dw_pcie_msi_host_init(struct pcie_port *pp, struct msi_chip *chip)
{
struct keystone_pcie *ks_pcie = to_keystone_pcie(pp);
int i;
pp->irq_domain = irq_domain_add_linear(ks_pcie->msi_intc_np,
MAX_MSI_IRQS,
&ks_dw_pcie_msi_domain_ops,
chip);
if (!pp->irq_domain) {
dev_err(pp->dev, "irq domain init failed\n");
return -ENXIO;
}
for (i = 0; i < MAX_MSI_IRQS; i++)
irq_create_mapping(pp->irq_domain, i);
return 0;
}
void ks_dw_pcie_enable_legacy_irqs(struct keystone_pcie *ks_pcie)
{
int i;
for (i = 0; i < MAX_LEGACY_IRQS; i++)
writel(0x1, ks_pcie->va_app_base + IRQ_ENABLE_SET + (i << 4));
}
void ks_dw_pcie_handle_legacy_irq(struct keystone_pcie *ks_pcie, int offset)
{
struct pcie_port *pp = &ks_pcie->pp;
u32 pending;
int virq;
pending = readl(ks_pcie->va_app_base + IRQ_STATUS + (offset << 4));
if (BIT(0) & pending) {
virq = irq_linear_revmap(ks_pcie->legacy_irq_domain, offset);
dev_dbg(pp->dev, ": irq: irq_offset %d, virq %d\n", offset,
virq);
generic_handle_irq(virq);
}
/* EOI the INTx interrupt */
writel(offset, ks_pcie->va_app_base + IRQ_EOI);
}
static void ks_dw_pcie_ack_legacy_irq(struct irq_data *d)
{
}
static void ks_dw_pcie_mask_legacy_irq(struct irq_data *d)
{
}
static void ks_dw_pcie_unmask_legacy_irq(struct irq_data *d)
{
}
static struct irq_chip ks_dw_pcie_legacy_irq_chip = {
.name = "Keystone-PCI-Legacy-IRQ",
.irq_ack = ks_dw_pcie_ack_legacy_irq,
.irq_mask = ks_dw_pcie_mask_legacy_irq,
.irq_unmask = ks_dw_pcie_unmask_legacy_irq,
};
static int ks_dw_pcie_init_legacy_irq_map(struct irq_domain *d,
unsigned int irq, irq_hw_number_t hw_irq)
{
irq_set_chip_and_handler(irq, &ks_dw_pcie_legacy_irq_chip,
handle_level_irq);
irq_set_chip_data(irq, d->host_data);
set_irq_flags(irq, IRQF_VALID);
return 0;
}
static const struct irq_domain_ops ks_dw_pcie_legacy_irq_domain_ops = {
.map = ks_dw_pcie_init_legacy_irq_map,
.xlate = irq_domain_xlate_onetwocell,
};
/**
* ks_dw_pcie_set_dbi_mode() - Set DBI mode to access overlaid BAR mask
* registers
*
* Since modification of dbi_cs2 involves different clock domain, read the
* status back to ensure the transition is complete.
*/
static void ks_dw_pcie_set_dbi_mode(void __iomem *reg_virt)
{
u32 val;
writel(DBI_CS2_EN_VAL | readl(reg_virt + CMD_STATUS),
reg_virt + CMD_STATUS);
do {
val = readl(reg_virt + CMD_STATUS);
} while (!(val & DBI_CS2_EN_VAL));
}
/**
* ks_dw_pcie_clear_dbi_mode() - Disable DBI mode
*
* Since modification of dbi_cs2 involves different clock domain, read the
* status back to ensure the transition is complete.
*/
static void ks_dw_pcie_clear_dbi_mode(void __iomem *reg_virt)
{
u32 val;
writel(~DBI_CS2_EN_VAL & readl(reg_virt + CMD_STATUS),
reg_virt + CMD_STATUS);
do {
val = readl(reg_virt + CMD_STATUS);
} while (val & DBI_CS2_EN_VAL);
}
void ks_dw_pcie_setup_rc_app_regs(struct keystone_pcie *ks_pcie)
{
struct pcie_port *pp = &ks_pcie->pp;
u32 start = pp->mem.start, end = pp->mem.end;
int i, tr_size;
/* Disable BARs for inbound access */
ks_dw_pcie_set_dbi_mode(ks_pcie->va_app_base);
writel(0, pp->dbi_base + PCI_BASE_ADDRESS_0);
writel(0, pp->dbi_base + PCI_BASE_ADDRESS_1);
ks_dw_pcie_clear_dbi_mode(ks_pcie->va_app_base);
/* Set outbound translation size per window division */
writel(CFG_PCIM_WIN_SZ_IDX & 0x7, ks_pcie->va_app_base + OB_SIZE);
tr_size = (1 << (CFG_PCIM_WIN_SZ_IDX & 0x7)) * SZ_1M;
/* Using Direct 1:1 mapping of RC <-> PCI memory space */
for (i = 0; (i < CFG_PCIM_WIN_CNT) && (start < end); i++) {
writel(start | 1, ks_pcie->va_app_base + OB_OFFSET_INDEX(i));
writel(0, ks_pcie->va_app_base + OB_OFFSET_HI(i));
start += tr_size;
}
/* Enable OB translation */
writel(OB_XLAT_EN_VAL | readl(ks_pcie->va_app_base + CMD_STATUS),
ks_pcie->va_app_base + CMD_STATUS);
}
/**
* ks_pcie_cfg_setup() - Set up configuration space address for a device
*
* @ks_pcie: ptr to keystone_pcie structure
* @bus: Bus number the device is residing on
* @devfn: device, function number info
*
* Forms and returns the address of configuration space mapped in PCIESS
* address space 0. Also configures CFG_SETUP for remote configuration space
* access.
*
* The address space has two regions to access configuration - local and remote.
* We access local region for bus 0 (as RC is attached on bus 0) and remote
* region for others with TYPE 1 access when bus > 1. As for device on bus = 1,
* we will do TYPE 0 access as it will be on our secondary bus (logical).
* CFG_SETUP is needed only for remote configuration access.
*/
static void __iomem *ks_pcie_cfg_setup(struct keystone_pcie *ks_pcie, u8 bus,
unsigned int devfn)
{
u8 device = PCI_SLOT(devfn), function = PCI_FUNC(devfn);
struct pcie_port *pp = &ks_pcie->pp;
u32 regval;
if (bus == 0)
return pp->dbi_base;
regval = (bus << 16) | (device << 8) | function;
/*
* Since Bus#1 will be a virtual bus, we need to have TYPE0
* access only.
* TYPE 1
*/
if (bus != 1)
regval |= BIT(24);
writel(regval, ks_pcie->va_app_base + CFG_SETUP);
return pp->va_cfg0_base;
}
int ks_dw_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus,
unsigned int devfn, int where, int size, u32 *val)
{
struct keystone_pcie *ks_pcie = to_keystone_pcie(pp);
u8 bus_num = bus->number;
void __iomem *addr;
addr = ks_pcie_cfg_setup(ks_pcie, bus_num, devfn);
return dw_pcie_cfg_read(addr + (where & ~0x3), where, size, val);
}
int ks_dw_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus,
unsigned int devfn, int where, int size, u32 val)
{
struct keystone_pcie *ks_pcie = to_keystone_pcie(pp);
u8 bus_num = bus->number;
void __iomem *addr;
addr = ks_pcie_cfg_setup(ks_pcie, bus_num, devfn);
return dw_pcie_cfg_write(addr + (where & ~0x3), where, size, val);
}
/**
* ks_dw_pcie_v3_65_scan_bus() - keystone scan_bus post initialization
*
* This sets BAR0 to enable inbound access for MSI_IRQ register
*/
void ks_dw_pcie_v3_65_scan_bus(struct pcie_port *pp)
{
struct keystone_pcie *ks_pcie = to_keystone_pcie(pp);
/* Configure and set up BAR0 */
ks_dw_pcie_set_dbi_mode(ks_pcie->va_app_base);
/* Enable BAR0 */
writel(1, pp->dbi_base + PCI_BASE_ADDRESS_0);
writel(SZ_4K - 1, pp->dbi_base + PCI_BASE_ADDRESS_0);
ks_dw_pcie_clear_dbi_mode(ks_pcie->va_app_base);
/*
* For BAR0, just setting bus address for inbound writes (MSI) should
* be sufficient. Use physical address to avoid any conflicts.
*/
writel(ks_pcie->app.start, pp->dbi_base + PCI_BASE_ADDRESS_0);
}
/**
* ks_dw_pcie_link_up() - Check if link up
*/
int ks_dw_pcie_link_up(struct pcie_port *pp)
{
u32 val = readl(pp->dbi_base + DEBUG0);
return (val & LTSSM_STATE_MASK) == LTSSM_STATE_L0;
}
void ks_dw_pcie_initiate_link_train(struct keystone_pcie *ks_pcie)
{
u32 val;
/* Disable Link training */
val = readl(ks_pcie->va_app_base + CMD_STATUS);
val &= ~LTSSM_EN_VAL;
writel(LTSSM_EN_VAL | val, ks_pcie->va_app_base + CMD_STATUS);
/* Initiate Link Training */
val = readl(ks_pcie->va_app_base + CMD_STATUS);
writel(LTSSM_EN_VAL | val, ks_pcie->va_app_base + CMD_STATUS);
}
/**
* ks_dw_pcie_host_init() - initialize host for v3_65 dw hardware
*
* Ioremap the register resources, initialize legacy irq domain
* and call dw_pcie_v3_65_host_init() API to initialize the Keystone
* PCI host controller.
*/
int __init ks_dw_pcie_host_init(struct keystone_pcie *ks_pcie,
struct device_node *msi_intc_np)
{
struct pcie_port *pp = &ks_pcie->pp;
struct platform_device *pdev = to_platform_device(pp->dev);
struct resource *res;
/* Index 0 is the config reg. space address */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
pp->dbi_base = devm_ioremap_resource(pp->dev, res);
if (IS_ERR(pp->dbi_base))
return PTR_ERR(pp->dbi_base);
/*
* We set these same and is used in pcie rd/wr_other_conf
* functions
*/
pp->va_cfg0_base = pp->dbi_base + SPACE0_REMOTE_CFG_OFFSET;
pp->va_cfg1_base = pp->va_cfg0_base;
/* Index 1 is the application reg. space address */
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
ks_pcie->app = *res;
ks_pcie->va_app_base = devm_ioremap_resource(pp->dev, res);
if (IS_ERR(ks_pcie->va_app_base))
return PTR_ERR(ks_pcie->va_app_base);
/* Create legacy IRQ domain */
ks_pcie->legacy_irq_domain =
irq_domain_add_linear(ks_pcie->legacy_intc_np,
MAX_LEGACY_IRQS,
&ks_dw_pcie_legacy_irq_domain_ops,
NULL);
if (!ks_pcie->legacy_irq_domain) {
dev_err(pp->dev, "Failed to add irq domain for legacy irqs\n");
return -EINVAL;
}
return dw_pcie_host_init(pp);
}

View file

@ -0,0 +1,415 @@
/*
* PCIe host controller driver for Texas Instruments Keystone SoCs
*
* Copyright (C) 2013-2014 Texas Instruments., Ltd.
* http://www.ti.com
*
* Author: Murali Karicheri <m-karicheri2@ti.com>
* Implementation based on pci-exynos.c and pcie-designware.c
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/irqchip/chained_irq.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/irqdomain.h>
#include <linux/module.h>
#include <linux/msi.h>
#include <linux/of_irq.h>
#include <linux/of.h>
#include <linux/of_pci.h>
#include <linux/platform_device.h>
#include <linux/phy/phy.h>
#include <linux/resource.h>
#include <linux/signal.h>
#include "pcie-designware.h"
#include "pci-keystone.h"
#define DRIVER_NAME "keystone-pcie"
/* driver specific constants */
#define MAX_MSI_HOST_IRQS 8
#define MAX_LEGACY_HOST_IRQS 4
/* DEV_STAT_CTRL */
#define PCIE_CAP_BASE 0x70
/* PCIE controller device IDs */
#define PCIE_RC_K2HK 0xb008
#define PCIE_RC_K2E 0xb009
#define PCIE_RC_K2L 0xb00a
#define to_keystone_pcie(x) container_of(x, struct keystone_pcie, pp)
static void quirk_limit_mrrs(struct pci_dev *dev)
{
struct pci_bus *bus = dev->bus;
struct pci_dev *bridge = bus->self;
static const struct pci_device_id rc_pci_devids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCIE_RC_K2HK),
.class = PCI_CLASS_BRIDGE_PCI << 8, .class_mask = ~0, },
{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCIE_RC_K2E),
.class = PCI_CLASS_BRIDGE_PCI << 8, .class_mask = ~0, },
{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCIE_RC_K2L),
.class = PCI_CLASS_BRIDGE_PCI << 8, .class_mask = ~0, },
{ 0, },
};
if (pci_is_root_bus(bus))
return;
/* look for the host bridge */
while (!pci_is_root_bus(bus)) {
bridge = bus->self;
bus = bus->parent;
}
if (bridge) {
/*
* Keystone PCI controller has a h/w limitation of
* 256 bytes maximum read request size. It can't handle
* anything higher than this. So force this limit on
* all downstream devices.
*/
if (pci_match_id(rc_pci_devids, bridge)) {
if (pcie_get_readrq(dev) > 256) {
dev_info(&dev->dev, "limiting MRRS to 256\n");
pcie_set_readrq(dev, 256);
}
}
}
}
DECLARE_PCI_FIXUP_ENABLE(PCI_ANY_ID, PCI_ANY_ID, quirk_limit_mrrs);
static int ks_pcie_establish_link(struct keystone_pcie *ks_pcie)
{
struct pcie_port *pp = &ks_pcie->pp;
int count = 200;
dw_pcie_setup_rc(pp);
if (dw_pcie_link_up(pp)) {
dev_err(pp->dev, "Link already up\n");
return 0;
}
ks_dw_pcie_initiate_link_train(ks_pcie);
/* check if the link is up or not */
while (!dw_pcie_link_up(pp)) {
usleep_range(100, 1000);
if (--count) {
ks_dw_pcie_initiate_link_train(ks_pcie);
continue;
}
dev_err(pp->dev, "phy link never came up\n");
return -EINVAL;
}
return 0;
}
static void ks_pcie_msi_irq_handler(unsigned int irq, struct irq_desc *desc)
{
struct keystone_pcie *ks_pcie = irq_desc_get_handler_data(desc);
u32 offset = irq - ks_pcie->msi_host_irqs[0];
struct pcie_port *pp = &ks_pcie->pp;
struct irq_chip *chip = irq_desc_get_chip(desc);
dev_dbg(pp->dev, "ks_pci_msi_irq_handler, irq %d\n", irq);
/*
* The chained irq handler installation would have replaced normal
* interrupt driver handler so we need to take care of mask/unmask and
* ack operation.
*/
chained_irq_enter(chip, desc);
ks_dw_pcie_handle_msi_irq(ks_pcie, offset);
chained_irq_exit(chip, desc);
}
/**
* ks_pcie_legacy_irq_handler() - Handle legacy interrupt
* @irq: IRQ line for legacy interrupts
* @desc: Pointer to irq descriptor
*
* Traverse through pending legacy interrupts and invoke handler for each. Also
* takes care of interrupt controller level mask/ack operation.
*/
static void ks_pcie_legacy_irq_handler(unsigned int irq, struct irq_desc *desc)
{
struct keystone_pcie *ks_pcie = irq_desc_get_handler_data(desc);
struct pcie_port *pp = &ks_pcie->pp;
u32 irq_offset = irq - ks_pcie->legacy_host_irqs[0];
struct irq_chip *chip = irq_desc_get_chip(desc);
dev_dbg(pp->dev, ": Handling legacy irq %d\n", irq);
/*
* The chained irq handler installation would have replaced normal
* interrupt driver handler so we need to take care of mask/unmask and
* ack operation.
*/
chained_irq_enter(chip, desc);
ks_dw_pcie_handle_legacy_irq(ks_pcie, irq_offset);
chained_irq_exit(chip, desc);
}
static int ks_pcie_get_irq_controller_info(struct keystone_pcie *ks_pcie,
char *controller, int *num_irqs)
{
int temp, max_host_irqs, legacy = 1, *host_irqs, ret = -EINVAL;
struct device *dev = ks_pcie->pp.dev;
struct device_node *np_pcie = dev->of_node, **np_temp;
if (!strcmp(controller, "msi-interrupt-controller"))
legacy = 0;
if (legacy) {
np_temp = &ks_pcie->legacy_intc_np;
max_host_irqs = MAX_LEGACY_HOST_IRQS;
host_irqs = &ks_pcie->legacy_host_irqs[0];
} else {
np_temp = &ks_pcie->msi_intc_np;
max_host_irqs = MAX_MSI_HOST_IRQS;
host_irqs = &ks_pcie->msi_host_irqs[0];
}
/* interrupt controller is in a child node */
*np_temp = of_find_node_by_name(np_pcie, controller);
if (!(*np_temp)) {
dev_err(dev, "Node for %s is absent\n", controller);
goto out;
}
temp = of_irq_count(*np_temp);
if (!temp)
goto out;
if (temp > max_host_irqs)
dev_warn(dev, "Too many %s interrupts defined %u\n",
(legacy ? "legacy" : "MSI"), temp);
/*
* support upto max_host_irqs. In dt from index 0 to 3 (legacy) or 0 to
* 7 (MSI)
*/
for (temp = 0; temp < max_host_irqs; temp++) {
host_irqs[temp] = irq_of_parse_and_map(*np_temp, temp);
if (host_irqs[temp] < 0)
break;
}
if (temp) {
*num_irqs = temp;
ret = 0;
}
out:
return ret;
}
static void ks_pcie_setup_interrupts(struct keystone_pcie *ks_pcie)
{
int i;
/* Legacy IRQ */
for (i = 0; i < ks_pcie->num_legacy_host_irqs; i++) {
irq_set_handler_data(ks_pcie->legacy_host_irqs[i], ks_pcie);
irq_set_chained_handler(ks_pcie->legacy_host_irqs[i],
ks_pcie_legacy_irq_handler);
}
ks_dw_pcie_enable_legacy_irqs(ks_pcie);
/* MSI IRQ */
if (IS_ENABLED(CONFIG_PCI_MSI)) {
for (i = 0; i < ks_pcie->num_msi_host_irqs; i++) {
irq_set_chained_handler(ks_pcie->msi_host_irqs[i],
ks_pcie_msi_irq_handler);
irq_set_handler_data(ks_pcie->msi_host_irqs[i],
ks_pcie);
}
}
}
/*
* When a PCI device does not exist during config cycles, keystone host gets a
* bus error instead of returning 0xffffffff. This handler always returns 0
* for this kind of faults.
*/
static int keystone_pcie_fault(unsigned long addr, unsigned int fsr,
struct pt_regs *regs)
{
unsigned long instr = *(unsigned long *) instruction_pointer(regs);
if ((instr & 0x0e100090) == 0x00100090) {
int reg = (instr >> 12) & 15;
regs->uregs[reg] = -1;
regs->ARM_pc += 4;
}
return 0;
}
static void __init ks_pcie_host_init(struct pcie_port *pp)
{
struct keystone_pcie *ks_pcie = to_keystone_pcie(pp);
u32 val;
ks_pcie_establish_link(ks_pcie);
ks_dw_pcie_setup_rc_app_regs(ks_pcie);
ks_pcie_setup_interrupts(ks_pcie);
writew(PCI_IO_RANGE_TYPE_32 | (PCI_IO_RANGE_TYPE_32 << 8),
pp->dbi_base + PCI_IO_BASE);
/* update the Vendor ID */
writew(ks_pcie->device_id, pp->dbi_base + PCI_DEVICE_ID);
/* update the DEV_STAT_CTRL to publish right mrrs */
val = readl(pp->dbi_base + PCIE_CAP_BASE + PCI_EXP_DEVCTL);
val &= ~PCI_EXP_DEVCTL_READRQ;
/* set the mrrs to 256 bytes */
val |= BIT(12);
writel(val, pp->dbi_base + PCIE_CAP_BASE + PCI_EXP_DEVCTL);
/*
* PCIe access errors that result into OCP errors are caught by ARM as
* "External aborts"
*/
hook_fault_code(17, keystone_pcie_fault, SIGBUS, 0,
"Asynchronous external abort");
}
static struct pcie_host_ops keystone_pcie_host_ops = {
.rd_other_conf = ks_dw_pcie_rd_other_conf,
.wr_other_conf = ks_dw_pcie_wr_other_conf,
.link_up = ks_dw_pcie_link_up,
.host_init = ks_pcie_host_init,
.msi_set_irq = ks_dw_pcie_msi_set_irq,
.msi_clear_irq = ks_dw_pcie_msi_clear_irq,
.get_msi_addr = ks_dw_pcie_get_msi_addr,
.msi_host_init = ks_dw_pcie_msi_host_init,
.scan_bus = ks_dw_pcie_v3_65_scan_bus,
};
static int __init ks_add_pcie_port(struct keystone_pcie *ks_pcie,
struct platform_device *pdev)
{
struct pcie_port *pp = &ks_pcie->pp;
int ret;
ret = ks_pcie_get_irq_controller_info(ks_pcie,
"legacy-interrupt-controller",
&ks_pcie->num_legacy_host_irqs);
if (ret)
return ret;
if (IS_ENABLED(CONFIG_PCI_MSI)) {
ret = ks_pcie_get_irq_controller_info(ks_pcie,
"msi-interrupt-controller",
&ks_pcie->num_msi_host_irqs);
if (ret)
return ret;
}
pp->root_bus_nr = -1;
pp->ops = &keystone_pcie_host_ops;
ret = ks_dw_pcie_host_init(ks_pcie, ks_pcie->msi_intc_np);
if (ret) {
dev_err(&pdev->dev, "failed to initialize host\n");
return ret;
}
return ret;
}
static const struct of_device_id ks_pcie_of_match[] = {
{
.type = "pci",
.compatible = "ti,keystone-pcie",
},
{ },
};
MODULE_DEVICE_TABLE(of, ks_pcie_of_match);
static int __exit ks_pcie_remove(struct platform_device *pdev)
{
struct keystone_pcie *ks_pcie = platform_get_drvdata(pdev);
clk_disable_unprepare(ks_pcie->clk);
return 0;
}
static int __init ks_pcie_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct keystone_pcie *ks_pcie;
struct pcie_port *pp;
struct resource *res;
void __iomem *reg_p;
struct phy *phy;
int ret = 0;
ks_pcie = devm_kzalloc(&pdev->dev, sizeof(*ks_pcie),
GFP_KERNEL);
if (!ks_pcie) {
dev_err(dev, "no memory for keystone pcie\n");
return -ENOMEM;
}
pp = &ks_pcie->pp;
/* initialize SerDes Phy if present */
phy = devm_phy_get(dev, "pcie-phy");
if (!IS_ERR_OR_NULL(phy)) {
ret = phy_init(phy);
if (ret < 0)
return ret;
}
/* index 2 is to read PCI DEVICE_ID */
res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
reg_p = devm_ioremap_resource(dev, res);
if (IS_ERR(reg_p))
return PTR_ERR(reg_p);
ks_pcie->device_id = readl(reg_p) >> 16;
devm_iounmap(dev, reg_p);
devm_release_mem_region(dev, res->start, resource_size(res));
pp->dev = dev;
platform_set_drvdata(pdev, ks_pcie);
ks_pcie->clk = devm_clk_get(dev, "pcie");
if (IS_ERR(ks_pcie->clk)) {
dev_err(dev, "Failed to get pcie rc clock\n");
return PTR_ERR(ks_pcie->clk);
}
ret = clk_prepare_enable(ks_pcie->clk);
if (ret)
return ret;
ret = ks_add_pcie_port(ks_pcie, pdev);
if (ret < 0)
goto fail_clk;
return 0;
fail_clk:
clk_disable_unprepare(ks_pcie->clk);
return ret;
}
static struct platform_driver ks_pcie_driver __refdata = {
.probe = ks_pcie_probe,
.remove = __exit_p(ks_pcie_remove),
.driver = {
.name = "keystone-pcie",
.owner = THIS_MODULE,
.of_match_table = of_match_ptr(ks_pcie_of_match),
},
};
module_platform_driver(ks_pcie_driver);
MODULE_AUTHOR("Murali Karicheri <m-karicheri2@ti.com>");
MODULE_DESCRIPTION("Keystone PCIe host controller driver");
MODULE_LICENSE("GPL v2");

View file

@ -0,0 +1,58 @@
/*
* Keystone PCI Controller's common includes
*
* Copyright (C) 2013-2014 Texas Instruments., Ltd.
* http://www.ti.com
*
* Author: Murali Karicheri <m-karicheri2@ti.com>
*
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#define MAX_LEGACY_IRQS 4
#define MAX_MSI_HOST_IRQS 8
#define MAX_LEGACY_HOST_IRQS 4
struct keystone_pcie {
struct clk *clk;
struct pcie_port pp;
/* PCI Device ID */
u32 device_id;
int num_legacy_host_irqs;
int legacy_host_irqs[MAX_LEGACY_HOST_IRQS];
struct device_node *legacy_intc_np;
int num_msi_host_irqs;
int msi_host_irqs[MAX_MSI_HOST_IRQS];
struct device_node *msi_intc_np;
struct irq_domain *legacy_irq_domain;
/* Application register space */
void __iomem *va_app_base;
struct resource app;
};
/* Keystone DW specific MSI controller APIs/definitions */
void ks_dw_pcie_handle_msi_irq(struct keystone_pcie *ks_pcie, int offset);
u32 ks_dw_pcie_get_msi_addr(struct pcie_port *pp);
/* Keystone specific PCI controller APIs */
void ks_dw_pcie_enable_legacy_irqs(struct keystone_pcie *ks_pcie);
void ks_dw_pcie_handle_legacy_irq(struct keystone_pcie *ks_pcie, int offset);
int ks_dw_pcie_host_init(struct keystone_pcie *ks_pcie,
struct device_node *msi_intc_np);
int ks_dw_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus,
unsigned int devfn, int where, int size, u32 val);
int ks_dw_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus,
unsigned int devfn, int where, int size, u32 *val);
void ks_dw_pcie_setup_rc_app_regs(struct keystone_pcie *ks_pcie);
int ks_dw_pcie_link_up(struct pcie_port *pp);
void ks_dw_pcie_initiate_link_train(struct keystone_pcie *ks_pcie);
void ks_dw_pcie_msi_set_irq(struct pcie_port *pp, int irq);
void ks_dw_pcie_msi_clear_irq(struct pcie_port *pp, int irq);
void ks_dw_pcie_v3_65_scan_bus(struct pcie_port *pp);
int ks_dw_pcie_msi_host_init(struct pcie_port *pp,
struct msi_chip *chip);

1097
drivers/pci/host/pci-mvebu.c Normal file

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,426 @@
/*
* pci-rcar-gen2: internal PCI bus support
*
* Copyright (C) 2013 Renesas Solutions Corp.
* Copyright (C) 2013 Cogent Embedded, Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of_pci.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/sizes.h>
#include <linux/slab.h>
/* AHB-PCI Bridge PCI communication registers */
#define RCAR_AHBPCI_PCICOM_OFFSET 0x800
#define RCAR_PCIAHB_WIN1_CTR_REG (RCAR_AHBPCI_PCICOM_OFFSET + 0x00)
#define RCAR_PCIAHB_WIN2_CTR_REG (RCAR_AHBPCI_PCICOM_OFFSET + 0x04)
#define RCAR_PCIAHB_PREFETCH0 0x0
#define RCAR_PCIAHB_PREFETCH4 0x1
#define RCAR_PCIAHB_PREFETCH8 0x2
#define RCAR_PCIAHB_PREFETCH16 0x3
#define RCAR_AHBPCI_WIN1_CTR_REG (RCAR_AHBPCI_PCICOM_OFFSET + 0x10)
#define RCAR_AHBPCI_WIN2_CTR_REG (RCAR_AHBPCI_PCICOM_OFFSET + 0x14)
#define RCAR_AHBPCI_WIN_CTR_MEM (3 << 1)
#define RCAR_AHBPCI_WIN_CTR_CFG (5 << 1)
#define RCAR_AHBPCI_WIN1_HOST (1 << 30)
#define RCAR_AHBPCI_WIN1_DEVICE (1 << 31)
#define RCAR_PCI_INT_ENABLE_REG (RCAR_AHBPCI_PCICOM_OFFSET + 0x20)
#define RCAR_PCI_INT_STATUS_REG (RCAR_AHBPCI_PCICOM_OFFSET + 0x24)
#define RCAR_PCI_INT_SIGTABORT (1 << 0)
#define RCAR_PCI_INT_SIGRETABORT (1 << 1)
#define RCAR_PCI_INT_REMABORT (1 << 2)
#define RCAR_PCI_INT_PERR (1 << 3)
#define RCAR_PCI_INT_SIGSERR (1 << 4)
#define RCAR_PCI_INT_RESERR (1 << 5)
#define RCAR_PCI_INT_WIN1ERR (1 << 12)
#define RCAR_PCI_INT_WIN2ERR (1 << 13)
#define RCAR_PCI_INT_A (1 << 16)
#define RCAR_PCI_INT_B (1 << 17)
#define RCAR_PCI_INT_PME (1 << 19)
#define RCAR_PCI_INT_ALLERRORS (RCAR_PCI_INT_SIGTABORT | \
RCAR_PCI_INT_SIGRETABORT | \
RCAR_PCI_INT_SIGRETABORT | \
RCAR_PCI_INT_REMABORT | \
RCAR_PCI_INT_PERR | \
RCAR_PCI_INT_SIGSERR | \
RCAR_PCI_INT_RESERR | \
RCAR_PCI_INT_WIN1ERR | \
RCAR_PCI_INT_WIN2ERR)
#define RCAR_AHB_BUS_CTR_REG (RCAR_AHBPCI_PCICOM_OFFSET + 0x30)
#define RCAR_AHB_BUS_MMODE_HTRANS (1 << 0)
#define RCAR_AHB_BUS_MMODE_BYTE_BURST (1 << 1)
#define RCAR_AHB_BUS_MMODE_WR_INCR (1 << 2)
#define RCAR_AHB_BUS_MMODE_HBUS_REQ (1 << 7)
#define RCAR_AHB_BUS_SMODE_READYCTR (1 << 17)
#define RCAR_AHB_BUS_MODE (RCAR_AHB_BUS_MMODE_HTRANS | \
RCAR_AHB_BUS_MMODE_BYTE_BURST | \
RCAR_AHB_BUS_MMODE_WR_INCR | \
RCAR_AHB_BUS_MMODE_HBUS_REQ | \
RCAR_AHB_BUS_SMODE_READYCTR)
#define RCAR_USBCTR_REG (RCAR_AHBPCI_PCICOM_OFFSET + 0x34)
#define RCAR_USBCTR_USBH_RST (1 << 0)
#define RCAR_USBCTR_PCICLK_MASK (1 << 1)
#define RCAR_USBCTR_PLL_RST (1 << 2)
#define RCAR_USBCTR_DIRPD (1 << 8)
#define RCAR_USBCTR_PCIAHB_WIN2_EN (1 << 9)
#define RCAR_USBCTR_PCIAHB_WIN1_256M (0 << 10)
#define RCAR_USBCTR_PCIAHB_WIN1_512M (1 << 10)
#define RCAR_USBCTR_PCIAHB_WIN1_1G (2 << 10)
#define RCAR_USBCTR_PCIAHB_WIN1_2G (3 << 10)
#define RCAR_USBCTR_PCIAHB_WIN1_MASK (3 << 10)
#define RCAR_PCI_ARBITER_CTR_REG (RCAR_AHBPCI_PCICOM_OFFSET + 0x40)
#define RCAR_PCI_ARBITER_PCIREQ0 (1 << 0)
#define RCAR_PCI_ARBITER_PCIREQ1 (1 << 1)
#define RCAR_PCI_ARBITER_PCIBP_MODE (1 << 12)
#define RCAR_PCI_UNIT_REV_REG (RCAR_AHBPCI_PCICOM_OFFSET + 0x48)
struct rcar_pci_priv {
struct device *dev;
void __iomem *reg;
struct resource io_res;
struct resource mem_res;
struct resource *cfg_res;
unsigned busnr;
int irq;
unsigned long window_size;
};
/* PCI configuration space operations */
static void __iomem *rcar_pci_cfg_base(struct pci_bus *bus, unsigned int devfn,
int where)
{
struct pci_sys_data *sys = bus->sysdata;
struct rcar_pci_priv *priv = sys->private_data;
int slot, val;
if (sys->busnr != bus->number || PCI_FUNC(devfn))
return NULL;
/* Only one EHCI/OHCI device built-in */
slot = PCI_SLOT(devfn);
if (slot > 2)
return NULL;
/* bridge logic only has registers to 0x40 */
if (slot == 0x0 && where >= 0x40)
return NULL;
val = slot ? RCAR_AHBPCI_WIN1_DEVICE | RCAR_AHBPCI_WIN_CTR_CFG :
RCAR_AHBPCI_WIN1_HOST | RCAR_AHBPCI_WIN_CTR_CFG;
iowrite32(val, priv->reg + RCAR_AHBPCI_WIN1_CTR_REG);
return priv->reg + (slot >> 1) * 0x100 + where;
}
static int rcar_pci_read_config(struct pci_bus *bus, unsigned int devfn,
int where, int size, u32 *val)
{
void __iomem *reg = rcar_pci_cfg_base(bus, devfn, where);
if (!reg)
return PCIBIOS_DEVICE_NOT_FOUND;
switch (size) {
case 1:
*val = ioread8(reg);
break;
case 2:
*val = ioread16(reg);
break;
default:
*val = ioread32(reg);
break;
}
return PCIBIOS_SUCCESSFUL;
}
static int rcar_pci_write_config(struct pci_bus *bus, unsigned int devfn,
int where, int size, u32 val)
{
void __iomem *reg = rcar_pci_cfg_base(bus, devfn, where);
if (!reg)
return PCIBIOS_DEVICE_NOT_FOUND;
switch (size) {
case 1:
iowrite8(val, reg);
break;
case 2:
iowrite16(val, reg);
break;
default:
iowrite32(val, reg);
break;
}
return PCIBIOS_SUCCESSFUL;
}
/* PCI interrupt mapping */
static int rcar_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
struct pci_sys_data *sys = dev->bus->sysdata;
struct rcar_pci_priv *priv = sys->private_data;
int irq;
irq = of_irq_parse_and_map_pci(dev, slot, pin);
if (!irq)
irq = priv->irq;
return irq;
}
#ifdef CONFIG_PCI_DEBUG
/* if debug enabled, then attach an error handler irq to the bridge */
static irqreturn_t rcar_pci_err_irq(int irq, void *pw)
{
struct rcar_pci_priv *priv = pw;
u32 status = ioread32(priv->reg + RCAR_PCI_INT_STATUS_REG);
if (status & RCAR_PCI_INT_ALLERRORS) {
dev_err(priv->dev, "error irq: status %08x\n", status);
/* clear the error(s) */
iowrite32(status & RCAR_PCI_INT_ALLERRORS,
priv->reg + RCAR_PCI_INT_STATUS_REG);
return IRQ_HANDLED;
}
return IRQ_NONE;
}
static void rcar_pci_setup_errirq(struct rcar_pci_priv *priv)
{
int ret;
u32 val;
ret = devm_request_irq(priv->dev, priv->irq, rcar_pci_err_irq,
IRQF_SHARED, "error irq", priv);
if (ret) {
dev_err(priv->dev, "cannot claim IRQ for error handling\n");
return;
}
val = ioread32(priv->reg + RCAR_PCI_INT_ENABLE_REG);
val |= RCAR_PCI_INT_ALLERRORS;
iowrite32(val, priv->reg + RCAR_PCI_INT_ENABLE_REG);
}
#else
static inline void rcar_pci_setup_errirq(struct rcar_pci_priv *priv) { }
#endif
/* PCI host controller setup */
static int rcar_pci_setup(int nr, struct pci_sys_data *sys)
{
struct rcar_pci_priv *priv = sys->private_data;
void __iomem *reg = priv->reg;
u32 val;
pm_runtime_enable(priv->dev);
pm_runtime_get_sync(priv->dev);
val = ioread32(reg + RCAR_PCI_UNIT_REV_REG);
dev_info(priv->dev, "PCI: bus%u revision %x\n", sys->busnr, val);
/* Disable Direct Power Down State and assert reset */
val = ioread32(reg + RCAR_USBCTR_REG) & ~RCAR_USBCTR_DIRPD;
val |= RCAR_USBCTR_USBH_RST | RCAR_USBCTR_PLL_RST;
iowrite32(val, reg + RCAR_USBCTR_REG);
udelay(4);
/* De-assert reset and reset PCIAHB window1 size */
val &= ~(RCAR_USBCTR_PCIAHB_WIN1_MASK | RCAR_USBCTR_PCICLK_MASK |
RCAR_USBCTR_USBH_RST | RCAR_USBCTR_PLL_RST);
/* Setup PCIAHB window1 size */
switch (priv->window_size) {
case SZ_2G:
val |= RCAR_USBCTR_PCIAHB_WIN1_2G;
break;
case SZ_1G:
val |= RCAR_USBCTR_PCIAHB_WIN1_1G;
break;
case SZ_512M:
val |= RCAR_USBCTR_PCIAHB_WIN1_512M;
break;
default:
pr_warn("unknown window size %ld - defaulting to 256M\n",
priv->window_size);
priv->window_size = SZ_256M;
/* fall-through */
case SZ_256M:
val |= RCAR_USBCTR_PCIAHB_WIN1_256M;
break;
}
iowrite32(val, reg + RCAR_USBCTR_REG);
/* Configure AHB master and slave modes */
iowrite32(RCAR_AHB_BUS_MODE, reg + RCAR_AHB_BUS_CTR_REG);
/* Configure PCI arbiter */
val = ioread32(reg + RCAR_PCI_ARBITER_CTR_REG);
val |= RCAR_PCI_ARBITER_PCIREQ0 | RCAR_PCI_ARBITER_PCIREQ1 |
RCAR_PCI_ARBITER_PCIBP_MODE;
iowrite32(val, reg + RCAR_PCI_ARBITER_CTR_REG);
/* PCI-AHB mapping: 0x40000000 base */
iowrite32(0x40000000 | RCAR_PCIAHB_PREFETCH16,
reg + RCAR_PCIAHB_WIN1_CTR_REG);
/* AHB-PCI mapping: OHCI/EHCI registers */
val = priv->mem_res.start | RCAR_AHBPCI_WIN_CTR_MEM;
iowrite32(val, reg + RCAR_AHBPCI_WIN2_CTR_REG);
/* Enable AHB-PCI bridge PCI configuration access */
iowrite32(RCAR_AHBPCI_WIN1_HOST | RCAR_AHBPCI_WIN_CTR_CFG,
reg + RCAR_AHBPCI_WIN1_CTR_REG);
/* Set PCI-AHB Window1 address */
iowrite32(0x40000000 | PCI_BASE_ADDRESS_MEM_PREFETCH,
reg + PCI_BASE_ADDRESS_1);
/* Set AHB-PCI bridge PCI communication area address */
val = priv->cfg_res->start + RCAR_AHBPCI_PCICOM_OFFSET;
iowrite32(val, reg + PCI_BASE_ADDRESS_0);
val = ioread32(reg + PCI_COMMAND);
val |= PCI_COMMAND_SERR | PCI_COMMAND_PARITY |
PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER;
iowrite32(val, reg + PCI_COMMAND);
/* Enable PCI interrupts */
iowrite32(RCAR_PCI_INT_A | RCAR_PCI_INT_B | RCAR_PCI_INT_PME,
reg + RCAR_PCI_INT_ENABLE_REG);
if (priv->irq > 0)
rcar_pci_setup_errirq(priv);
/* Add PCI resources */
pci_add_resource(&sys->resources, &priv->io_res);
pci_add_resource(&sys->resources, &priv->mem_res);
/* Setup bus number based on platform device id / of bus-range */
sys->busnr = priv->busnr;
return 1;
}
static struct pci_ops rcar_pci_ops = {
.read = rcar_pci_read_config,
.write = rcar_pci_write_config,
};
static int rcar_pci_probe(struct platform_device *pdev)
{
struct resource *cfg_res, *mem_res;
struct rcar_pci_priv *priv;
void __iomem *reg;
struct hw_pci hw;
void *hw_private[1];
cfg_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
reg = devm_ioremap_resource(&pdev->dev, cfg_res);
if (IS_ERR(reg))
return PTR_ERR(reg);
mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
if (!mem_res || !mem_res->start)
return -ENODEV;
priv = devm_kzalloc(&pdev->dev,
sizeof(struct rcar_pci_priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->mem_res = *mem_res;
/*
* The controller does not support/use port I/O,
* so setup a dummy port I/O region here.
*/
priv->io_res.start = priv->mem_res.start;
priv->io_res.end = priv->mem_res.end;
priv->io_res.flags = IORESOURCE_IO;
priv->cfg_res = cfg_res;
priv->irq = platform_get_irq(pdev, 0);
priv->reg = reg;
priv->dev = &pdev->dev;
if (priv->irq < 0) {
dev_err(&pdev->dev, "no valid irq found\n");
return priv->irq;
}
priv->window_size = SZ_1G;
if (pdev->dev.of_node) {
struct resource busnr;
int ret;
ret = of_pci_parse_bus_range(pdev->dev.of_node, &busnr);
if (ret < 0) {
dev_err(&pdev->dev, "failed to parse bus-range\n");
return ret;
}
priv->busnr = busnr.start;
if (busnr.end != busnr.start)
dev_warn(&pdev->dev, "only one bus number supported\n");
} else {
priv->busnr = pdev->id;
}
hw_private[0] = priv;
memset(&hw, 0, sizeof(hw));
hw.nr_controllers = ARRAY_SIZE(hw_private);
hw.private_data = hw_private;
hw.map_irq = rcar_pci_map_irq;
hw.ops = &rcar_pci_ops;
hw.setup = rcar_pci_setup;
pci_common_init_dev(&pdev->dev, &hw);
return 0;
}
static struct of_device_id rcar_pci_of_match[] = {
{ .compatible = "renesas,pci-r8a7790", },
{ .compatible = "renesas,pci-r8a7791", },
{ },
};
MODULE_DEVICE_TABLE(of, rcar_pci_of_match);
static struct platform_driver rcar_pci_driver = {
.driver = {
.name = "pci-rcar-gen2",
.owner = THIS_MODULE,
.suppress_bind_attrs = true,
.of_match_table = rcar_pci_of_match,
},
.probe = rcar_pci_probe,
};
module_platform_driver(rcar_pci_driver);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("Renesas R-Car Gen2 internal PCI");
MODULE_AUTHOR("Valentine Barshak <valentine.barshak@cogentembedded.com>");

2147
drivers/pci/host/pci-tegra.c Normal file

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,664 @@
/**
* APM X-Gene PCIe Driver
*
* Copyright (c) 2014 Applied Micro Circuits Corporation.
*
* Author: Tanmay Inamdar <tinamdar@apm.com>.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/clk-private.h>
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/jiffies.h>
#include <linux/memblock.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/of_pci.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#define PCIECORE_CTLANDSTATUS 0x50
#define PIM1_1L 0x80
#define IBAR2 0x98
#define IR2MSK 0x9c
#define PIM2_1L 0xa0
#define IBAR3L 0xb4
#define IR3MSKL 0xbc
#define PIM3_1L 0xc4
#define OMR1BARL 0x100
#define OMR2BARL 0x118
#define OMR3BARL 0x130
#define CFGBARL 0x154
#define CFGBARH 0x158
#define CFGCTL 0x15c
#define RTDID 0x160
#define BRIDGE_CFG_0 0x2000
#define BRIDGE_CFG_4 0x2010
#define BRIDGE_STATUS_0 0x2600
#define LINK_UP_MASK 0x00000100
#define AXI_EP_CFG_ACCESS 0x10000
#define EN_COHERENCY 0xF0000000
#define EN_REG 0x00000001
#define OB_LO_IO 0x00000002
#define XGENE_PCIE_VENDORID 0x10E8
#define XGENE_PCIE_DEVICEID 0xE004
#define SZ_1T (SZ_1G*1024ULL)
#define PIPE_PHY_RATE_RD(src) ((0xc000 & (u32)(src)) >> 0xe)
struct xgene_pcie_port {
struct device_node *node;
struct device *dev;
struct clk *clk;
void __iomem *csr_base;
void __iomem *cfg_base;
unsigned long cfg_addr;
bool link_up;
};
static inline u32 pcie_bar_low_val(u32 addr, u32 flags)
{
return (addr & PCI_BASE_ADDRESS_MEM_MASK) | flags;
}
/* PCIe Configuration Out/In */
static inline void xgene_pcie_cfg_out32(void __iomem *addr, int offset, u32 val)
{
writel(val, addr + offset);
}
static inline void xgene_pcie_cfg_out16(void __iomem *addr, int offset, u16 val)
{
u32 val32 = readl(addr + (offset & ~0x3));
switch (offset & 0x3) {
case 2:
val32 &= ~0xFFFF0000;
val32 |= (u32)val << 16;
break;
case 0:
default:
val32 &= ~0xFFFF;
val32 |= val;
break;
}
writel(val32, addr + (offset & ~0x3));
}
static inline void xgene_pcie_cfg_out8(void __iomem *addr, int offset, u8 val)
{
u32 val32 = readl(addr + (offset & ~0x3));
switch (offset & 0x3) {
case 0:
val32 &= ~0xFF;
val32 |= val;
break;
case 1:
val32 &= ~0xFF00;
val32 |= (u32)val << 8;
break;
case 2:
val32 &= ~0xFF0000;
val32 |= (u32)val << 16;
break;
case 3:
default:
val32 &= ~0xFF000000;
val32 |= (u32)val << 24;
break;
}
writel(val32, addr + (offset & ~0x3));
}
static inline void xgene_pcie_cfg_in32(void __iomem *addr, int offset, u32 *val)
{
*val = readl(addr + offset);
}
static inline void xgene_pcie_cfg_in16(void __iomem *addr, int offset, u32 *val)
{
*val = readl(addr + (offset & ~0x3));
switch (offset & 0x3) {
case 2:
*val >>= 16;
break;
}
*val &= 0xFFFF;
}
static inline void xgene_pcie_cfg_in8(void __iomem *addr, int offset, u32 *val)
{
*val = readl(addr + (offset & ~0x3));
switch (offset & 0x3) {
case 3:
*val = *val >> 24;
break;
case 2:
*val = *val >> 16;
break;
case 1:
*val = *val >> 8;
break;
}
*val &= 0xFF;
}
/*
* When the address bit [17:16] is 2'b01, the Configuration access will be
* treated as Type 1 and it will be forwarded to external PCIe device.
*/
static void __iomem *xgene_pcie_get_cfg_base(struct pci_bus *bus)
{
struct xgene_pcie_port *port = bus->sysdata;
if (bus->number >= (bus->primary + 1))
return port->cfg_base + AXI_EP_CFG_ACCESS;
return port->cfg_base;
}
/*
* For Configuration request, RTDID register is used as Bus Number,
* Device Number and Function number of the header fields.
*/
static void xgene_pcie_set_rtdid_reg(struct pci_bus *bus, uint devfn)
{
struct xgene_pcie_port *port = bus->sysdata;
unsigned int b, d, f;
u32 rtdid_val = 0;
b = bus->number;
d = PCI_SLOT(devfn);
f = PCI_FUNC(devfn);
if (!pci_is_root_bus(bus))
rtdid_val = (b << 8) | (d << 3) | f;
writel(rtdid_val, port->csr_base + RTDID);
/* read the register back to ensure flush */
readl(port->csr_base + RTDID);
}
/*
* X-Gene PCIe port uses BAR0-BAR1 of RC's configuration space as
* the translation from PCI bus to native BUS. Entire DDR region
* is mapped into PCIe space using these registers, so it can be
* reached by DMA from EP devices. The BAR0/1 of bridge should be
* hidden during enumeration to avoid the sizing and resource allocation
* by PCIe core.
*/
static bool xgene_pcie_hide_rc_bars(struct pci_bus *bus, int offset)
{
if (pci_is_root_bus(bus) && ((offset == PCI_BASE_ADDRESS_0) ||
(offset == PCI_BASE_ADDRESS_1)))
return true;
return false;
}
static int xgene_pcie_read_config(struct pci_bus *bus, unsigned int devfn,
int offset, int len, u32 *val)
{
struct xgene_pcie_port *port = bus->sysdata;
void __iomem *addr;
if ((pci_is_root_bus(bus) && devfn != 0) || !port->link_up)
return PCIBIOS_DEVICE_NOT_FOUND;
if (xgene_pcie_hide_rc_bars(bus, offset)) {
*val = 0;
return PCIBIOS_SUCCESSFUL;
}
xgene_pcie_set_rtdid_reg(bus, devfn);
addr = xgene_pcie_get_cfg_base(bus);
switch (len) {
case 1:
xgene_pcie_cfg_in8(addr, offset, val);
break;
case 2:
xgene_pcie_cfg_in16(addr, offset, val);
break;
default:
xgene_pcie_cfg_in32(addr, offset, val);
break;
}
return PCIBIOS_SUCCESSFUL;
}
static int xgene_pcie_write_config(struct pci_bus *bus, unsigned int devfn,
int offset, int len, u32 val)
{
struct xgene_pcie_port *port = bus->sysdata;
void __iomem *addr;
if ((pci_is_root_bus(bus) && devfn != 0) || !port->link_up)
return PCIBIOS_DEVICE_NOT_FOUND;
if (xgene_pcie_hide_rc_bars(bus, offset))
return PCIBIOS_SUCCESSFUL;
xgene_pcie_set_rtdid_reg(bus, devfn);
addr = xgene_pcie_get_cfg_base(bus);
switch (len) {
case 1:
xgene_pcie_cfg_out8(addr, offset, (u8)val);
break;
case 2:
xgene_pcie_cfg_out16(addr, offset, (u16)val);
break;
default:
xgene_pcie_cfg_out32(addr, offset, val);
break;
}
return PCIBIOS_SUCCESSFUL;
}
static struct pci_ops xgene_pcie_ops = {
.read = xgene_pcie_read_config,
.write = xgene_pcie_write_config
};
static u64 xgene_pcie_set_ib_mask(void __iomem *csr_base, u32 addr,
u32 flags, u64 size)
{
u64 mask = (~(size - 1) & PCI_BASE_ADDRESS_MEM_MASK) | flags;
u32 val32 = 0;
u32 val;
val32 = readl(csr_base + addr);
val = (val32 & 0x0000ffff) | (lower_32_bits(mask) << 16);
writel(val, csr_base + addr);
val32 = readl(csr_base + addr + 0x04);
val = (val32 & 0xffff0000) | (lower_32_bits(mask) >> 16);
writel(val, csr_base + addr + 0x04);
val32 = readl(csr_base + addr + 0x04);
val = (val32 & 0x0000ffff) | (upper_32_bits(mask) << 16);
writel(val, csr_base + addr + 0x04);
val32 = readl(csr_base + addr + 0x08);
val = (val32 & 0xffff0000) | (upper_32_bits(mask) >> 16);
writel(val, csr_base + addr + 0x08);
return mask;
}
static void xgene_pcie_linkup(struct xgene_pcie_port *port,
u32 *lanes, u32 *speed)
{
void __iomem *csr_base = port->csr_base;
u32 val32;
port->link_up = false;
val32 = readl(csr_base + PCIECORE_CTLANDSTATUS);
if (val32 & LINK_UP_MASK) {
port->link_up = true;
*speed = PIPE_PHY_RATE_RD(val32);
val32 = readl(csr_base + BRIDGE_STATUS_0);
*lanes = val32 >> 26;
}
}
static int xgene_pcie_init_port(struct xgene_pcie_port *port)
{
int rc;
port->clk = clk_get(port->dev, NULL);
if (IS_ERR(port->clk)) {
dev_err(port->dev, "clock not available\n");
return -ENODEV;
}
rc = clk_prepare_enable(port->clk);
if (rc) {
dev_err(port->dev, "clock enable failed\n");
return rc;
}
return 0;
}
static int xgene_pcie_map_reg(struct xgene_pcie_port *port,
struct platform_device *pdev)
{
struct resource *res;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "csr");
port->csr_base = devm_ioremap_resource(port->dev, res);
if (IS_ERR(port->csr_base))
return PTR_ERR(port->csr_base);
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cfg");
port->cfg_base = devm_ioremap_resource(port->dev, res);
if (IS_ERR(port->cfg_base))
return PTR_ERR(port->cfg_base);
port->cfg_addr = res->start;
return 0;
}
static void xgene_pcie_setup_ob_reg(struct xgene_pcie_port *port,
struct resource *res, u32 offset,
u64 cpu_addr, u64 pci_addr)
{
void __iomem *base = port->csr_base + offset;
resource_size_t size = resource_size(res);
u64 restype = resource_type(res);
u64 mask = 0;
u32 min_size;
u32 flag = EN_REG;
if (restype == IORESOURCE_MEM) {
min_size = SZ_128M;
} else {
min_size = 128;
flag |= OB_LO_IO;
}
if (size >= min_size)
mask = ~(size - 1) | flag;
else
dev_warn(port->dev, "res size 0x%llx less than minimum 0x%x\n",
(u64)size, min_size);
writel(lower_32_bits(cpu_addr), base);
writel(upper_32_bits(cpu_addr), base + 0x04);
writel(lower_32_bits(mask), base + 0x08);
writel(upper_32_bits(mask), base + 0x0c);
writel(lower_32_bits(pci_addr), base + 0x10);
writel(upper_32_bits(pci_addr), base + 0x14);
}
static void xgene_pcie_setup_cfg_reg(void __iomem *csr_base, u64 addr)
{
writel(lower_32_bits(addr), csr_base + CFGBARL);
writel(upper_32_bits(addr), csr_base + CFGBARH);
writel(EN_REG, csr_base + CFGCTL);
}
static int xgene_pcie_map_ranges(struct xgene_pcie_port *port,
struct list_head *res,
resource_size_t io_base)
{
struct pci_host_bridge_window *window;
struct device *dev = port->dev;
int ret;
list_for_each_entry(window, res, list) {
struct resource *res = window->res;
u64 restype = resource_type(res);
dev_dbg(port->dev, "%pR\n", res);
switch (restype) {
case IORESOURCE_IO:
xgene_pcie_setup_ob_reg(port, res, OMR3BARL, io_base,
res->start - window->offset);
ret = pci_remap_iospace(res, io_base);
if (ret < 0)
return ret;
break;
case IORESOURCE_MEM:
xgene_pcie_setup_ob_reg(port, res, OMR1BARL, res->start,
res->start - window->offset);
break;
case IORESOURCE_BUS:
break;
default:
dev_err(dev, "invalid resource %pR\n", res);
return -EINVAL;
}
}
xgene_pcie_setup_cfg_reg(port->csr_base, port->cfg_addr);
return 0;
}
static void xgene_pcie_setup_pims(void *addr, u64 pim, u64 size)
{
writel(lower_32_bits(pim), addr);
writel(upper_32_bits(pim) | EN_COHERENCY, addr + 0x04);
writel(lower_32_bits(size), addr + 0x10);
writel(upper_32_bits(size), addr + 0x14);
}
/*
* X-Gene PCIe support maximum 3 inbound memory regions
* This function helps to select a region based on size of region
*/
static int xgene_pcie_select_ib_reg(u8 *ib_reg_mask, u64 size)
{
if ((size > 4) && (size < SZ_16M) && !(*ib_reg_mask & (1 << 1))) {
*ib_reg_mask |= (1 << 1);
return 1;
}
if ((size > SZ_1K) && (size < SZ_1T) && !(*ib_reg_mask & (1 << 0))) {
*ib_reg_mask |= (1 << 0);
return 0;
}
if ((size > SZ_1M) && (size < SZ_1T) && !(*ib_reg_mask & (1 << 2))) {
*ib_reg_mask |= (1 << 2);
return 2;
}
return -EINVAL;
}
static void xgene_pcie_setup_ib_reg(struct xgene_pcie_port *port,
struct of_pci_range *range, u8 *ib_reg_mask)
{
void __iomem *csr_base = port->csr_base;
void __iomem *cfg_base = port->cfg_base;
void *bar_addr;
void *pim_addr;
u64 cpu_addr = range->cpu_addr;
u64 pci_addr = range->pci_addr;
u64 size = range->size;
u64 mask = ~(size - 1) | EN_REG;
u32 flags = PCI_BASE_ADDRESS_MEM_TYPE_64;
u32 bar_low;
int region;
region = xgene_pcie_select_ib_reg(ib_reg_mask, range->size);
if (region < 0) {
dev_warn(port->dev, "invalid pcie dma-range config\n");
return;
}
if (range->flags & IORESOURCE_PREFETCH)
flags |= PCI_BASE_ADDRESS_MEM_PREFETCH;
bar_low = pcie_bar_low_val((u32)cpu_addr, flags);
switch (region) {
case 0:
xgene_pcie_set_ib_mask(csr_base, BRIDGE_CFG_4, flags, size);
bar_addr = cfg_base + PCI_BASE_ADDRESS_0;
writel(bar_low, bar_addr);
writel(upper_32_bits(cpu_addr), bar_addr + 0x4);
pim_addr = csr_base + PIM1_1L;
break;
case 1:
bar_addr = csr_base + IBAR2;
writel(bar_low, bar_addr);
writel(lower_32_bits(mask), csr_base + IR2MSK);
pim_addr = csr_base + PIM2_1L;
break;
case 2:
bar_addr = csr_base + IBAR3L;
writel(bar_low, bar_addr);
writel(upper_32_bits(cpu_addr), bar_addr + 0x4);
writel(lower_32_bits(mask), csr_base + IR3MSKL);
writel(upper_32_bits(mask), csr_base + IR3MSKL + 0x4);
pim_addr = csr_base + PIM3_1L;
break;
}
xgene_pcie_setup_pims(pim_addr, pci_addr, ~(size - 1));
}
static int pci_dma_range_parser_init(struct of_pci_range_parser *parser,
struct device_node *node)
{
const int na = 3, ns = 2;
int rlen;
parser->node = node;
parser->pna = of_n_addr_cells(node);
parser->np = parser->pna + na + ns;
parser->range = of_get_property(node, "dma-ranges", &rlen);
if (!parser->range)
return -ENOENT;
parser->end = parser->range + rlen / sizeof(__be32);
return 0;
}
static int xgene_pcie_parse_map_dma_ranges(struct xgene_pcie_port *port)
{
struct device_node *np = port->node;
struct of_pci_range range;
struct of_pci_range_parser parser;
struct device *dev = port->dev;
u8 ib_reg_mask = 0;
if (pci_dma_range_parser_init(&parser, np)) {
dev_err(dev, "missing dma-ranges property\n");
return -EINVAL;
}
/* Get the dma-ranges from DT */
for_each_of_pci_range(&parser, &range) {
u64 end = range.cpu_addr + range.size - 1;
dev_dbg(port->dev, "0x%08x 0x%016llx..0x%016llx -> 0x%016llx\n",
range.flags, range.cpu_addr, end, range.pci_addr);
xgene_pcie_setup_ib_reg(port, &range, &ib_reg_mask);
}
return 0;
}
/* clear BAR configuration which was done by firmware */
static void xgene_pcie_clear_config(struct xgene_pcie_port *port)
{
int i;
for (i = PIM1_1L; i <= CFGCTL; i += 4)
writel(0x0, port->csr_base + i);
}
static int xgene_pcie_setup(struct xgene_pcie_port *port,
struct list_head *res,
resource_size_t io_base)
{
u32 val, lanes = 0, speed = 0;
int ret;
xgene_pcie_clear_config(port);
/* setup the vendor and device IDs correctly */
val = (XGENE_PCIE_DEVICEID << 16) | XGENE_PCIE_VENDORID;
writel(val, port->csr_base + BRIDGE_CFG_0);
ret = xgene_pcie_map_ranges(port, res, io_base);
if (ret)
return ret;
ret = xgene_pcie_parse_map_dma_ranges(port);
if (ret)
return ret;
xgene_pcie_linkup(port, &lanes, &speed);
if (!port->link_up)
dev_info(port->dev, "(rc) link down\n");
else
dev_info(port->dev, "(rc) x%d gen-%d link up\n",
lanes, speed + 1);
return 0;
}
static int xgene_pcie_probe_bridge(struct platform_device *pdev)
{
struct device_node *dn = pdev->dev.of_node;
struct xgene_pcie_port *port;
resource_size_t iobase = 0;
struct pci_bus *bus;
int ret;
LIST_HEAD(res);
port = devm_kzalloc(&pdev->dev, sizeof(*port), GFP_KERNEL);
if (!port)
return -ENOMEM;
port->node = of_node_get(pdev->dev.of_node);
port->dev = &pdev->dev;
ret = xgene_pcie_map_reg(port, pdev);
if (ret)
return ret;
ret = xgene_pcie_init_port(port);
if (ret)
return ret;
ret = of_pci_get_host_bridge_resources(dn, 0, 0xff, &res, &iobase);
if (ret)
return ret;
ret = xgene_pcie_setup(port, &res, iobase);
if (ret)
return ret;
bus = pci_create_root_bus(&pdev->dev, 0,
&xgene_pcie_ops, port, &res);
if (!bus)
return -ENOMEM;
pci_scan_child_bus(bus);
pci_assign_unassigned_bus_resources(bus);
pci_bus_add_devices(bus);
platform_set_drvdata(pdev, port);
return 0;
}
static const struct of_device_id xgene_pcie_match_table[] = {
{.compatible = "apm,xgene-pcie",},
{},
};
static struct platform_driver xgene_pcie_driver = {
.driver = {
.name = "xgene-pcie",
.owner = THIS_MODULE,
.of_match_table = of_match_ptr(xgene_pcie_match_table),
},
.probe = xgene_pcie_probe_bridge,
};
module_platform_driver(xgene_pcie_driver);
MODULE_AUTHOR("Tanmay Inamdar <tinamdar@apm.com>");
MODULE_DESCRIPTION("APM X-Gene PCIe driver");
MODULE_LICENSE("GPL v2");

View file

@ -0,0 +1,917 @@
/*
* Synopsys Designware PCIe host controller driver
*
* Copyright (C) 2013 Samsung Electronics Co., Ltd.
* http://www.samsung.com
*
* Author: Jingoo Han <jg1.han@samsung.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/hardirq.h>
#include <linux/irq.h>
#include <linux/irqdomain.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/msi.h>
#include <linux/of_address.h>
#include <linux/of_pci.h>
#include <linux/pci.h>
#include <linux/pci_regs.h>
#include <linux/platform_device.h>
#include <linux/types.h>
#include "pcie-designware.h"
#include "pci-exynos.h"
/* Synopsis specific PCIE configuration registers */
#define PM_CAP_ID_OFFSET 0x40
#define EXP_CAP_ID_OFFSET 0x70
#define PCI_EXP_LNKCAP_MLW_X1 (0x1 << 4)
#define PCI_EXP_LNKCAP_L1EL_64USEC (0x7 << 15)
#define PCI_EXP_LNKCTL2_TLS 0xf
#define PCI_EXP_LNKCTL2_TLS_2_5GB 0x1
#define PCIE_LINK_L1SS_CONTROL 0x158
#define PORT_LINK_TCOMMON_32US (0x20 << 8)
#define PCIE_LINK_L1SS_CONTROL2 0x15C
#define PORT_LINK_L1SS_ENABLE (0xf << 0)
#define PORT_LINK_TPOWERON_130US (0x69 << 0)
#define PORT_LINK_TPOWERON_3100US (0xfa << 0)
#define PCIE_LINK_L1SS_OFF 0xb44
#define PORT_LINK_L1SS_T_PCLKACK (0x3 << 6)
#define PORT_LINK_L1SS_T_L1_2 (0x4 << 2)
#define PORT_LINK_L1SS_T_POWER_OFF (0x2 << 0)
#define PCIE_ACK_F_ASPM_CONTROL 0x70C
#define PCIE_PORT_LINK_CONTROL 0x710
#define PORT_LINK_MODE_MASK (0x3f << 16)
#define PORT_LINK_MODE_1_LANES (0x1 << 16)
#define PORT_LINK_MODE_2_LANES (0x3 << 16)
#define PORT_LINK_MODE_4_LANES (0x7 << 16)
#define PCIE_LINK_WIDTH_SPEED_CONTROL 0x80C
#define PORT_LOGIC_SPEED_CHANGE (0x1 << 17)
#define PORT_LOGIC_LINK_WIDTH_MASK (0x1ff << 8)
#define PORT_LOGIC_LINK_WIDTH_1_LANES (0x1 << 8)
#define PORT_LOGIC_LINK_WIDTH_2_LANES (0x2 << 8)
#define PORT_LOGIC_LINK_WIDTH_4_LANES (0x4 << 8)
#define PCIE_MSI_ADDR_LO 0x820
#define PCIE_MSI_ADDR_HI 0x824
#define PCIE_MSI_INTR0_ENABLE 0x828
#define PCIE_MSI_INTR0_MASK 0x82C
#define PCIE_MSI_INTR0_STATUS 0x830
#define PCIE_MISC_CONTROL 0x8BC
#define DBI_RO_WR_EN 0x1
#define PCIE_ATU_VIEWPORT 0x900
#define PCIE_ATU_REGION_INBOUND (0x1 << 31)
#define PCIE_ATU_REGION_OUTBOUND (0x0 << 31)
#define PCIE_ATU_REGION_INDEX2 (0x2 << 0)
#define PCIE_ATU_REGION_INDEX1 (0x1 << 0)
#define PCIE_ATU_REGION_INDEX0 (0x0 << 0)
#define PCIE_ATU_CR1 0x904
#define PCIE_ATU_TYPE_MEM (0x0 << 0)
#define PCIE_ATU_TYPE_IO (0x2 << 0)
#define PCIE_ATU_TYPE_CFG0 (0x4 << 0)
#define PCIE_ATU_TYPE_CFG1 (0x5 << 0)
#define PCIE_ATU_CR2 0x908
#define PCIE_ATU_ENABLE (0x1 << 31)
#define PCIE_ATU_BAR_MODE_ENABLE (0x1 << 30)
#define PCIE_ATU_LOWER_BASE 0x90C
#define PCIE_ATU_UPPER_BASE 0x910
#define PCIE_ATU_LIMIT 0x914
#define PCIE_ATU_LOWER_TARGET 0x918
#define PCIE_ATU_BUS(x) (((x) & 0xff) << 24)
#define PCIE_ATU_DEV(x) (((x) & 0x1f) << 19)
#define PCIE_ATU_FUNC(x) (((x) & 0x7) << 16)
#define PCIE_ATU_UPPER_TARGET 0x91C
#define PCIE_AUX_CLK_FREQ_OFF 0xB40
#define PCIE_AUX_CLK_FREQ_24MHZ 0x18
#define PCIE_AUX_CLK_FREQ_26MHZ 0x1A
#define PCIE_L1_SUBSTATES_OFF 0xB44
static struct pci_ops dw_pcie_ops;
static unsigned long global_io_offset;
int dw_pcie_cfg_read(void __iomem *addr, int where, int size, u32 *val)
{
*val = readl(addr);
if (size == 1)
*val = (*val >> (8 * (where & 3))) & 0xff;
else if (size == 2)
*val = (*val >> (8 * (where & 3))) & 0xffff;
else if (size != 4)
return PCIBIOS_BAD_REGISTER_NUMBER;
return PCIBIOS_SUCCESSFUL;
}
int dw_pcie_cfg_write(void __iomem *addr, int where, int size, u32 val)
{
if (size == 4)
writel(val, addr);
else if (size == 2)
writew(val, addr + (where & 2));
else if (size == 1)
writeb(val, addr + (where & 3));
else
return PCIBIOS_BAD_REGISTER_NUMBER;
return PCIBIOS_SUCCESSFUL;
}
static int dw_pcie_rd_own_conf(struct pcie_port *pp, int where, int size,
u32 *val)
{
int ret;
if (pp->ops->rd_own_conf)
ret = pp->ops->rd_own_conf(pp, where, size, val);
else
ret = dw_pcie_cfg_read(pp->dbi_base + (where & ~0x3), where,
size, val);
return ret;
}
static int dw_pcie_wr_own_conf(struct pcie_port *pp, int where, int size,
u32 val)
{
int ret;
if (pp->ops->wr_own_conf)
ret = pp->ops->wr_own_conf(pp, where, size, val);
else
ret = dw_pcie_cfg_write(pp->dbi_base + (where & ~0x3), where,
size, val);
return ret;
}
static struct irq_chip dw_msi_irq_chip = {
.name = "PCI-MSI",
.irq_enable = unmask_msi_irq,
.irq_disable = mask_msi_irq,
.irq_mask = mask_msi_irq,
.irq_unmask = unmask_msi_irq,
};
/* MSI int handler */
irqreturn_t dw_handle_msi_irq(struct pcie_port *pp)
{
unsigned long val;
unsigned long flags;
int i, pos, irq;
irqreturn_t ret = IRQ_NONE;
for (i = 0; i < MAX_MSI_CTRLS; i++) {
spin_lock_irqsave(&pp->conf_lock, flags);
dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_STATUS + i * 12, 4,
(u32 *)&val);
spin_unlock_irqrestore(&pp->conf_lock, flags);
if (val) {
ret = IRQ_HANDLED;
pos = 0;
while ((pos = find_next_bit(&val, 32, pos)) != 32) {
irq = irq_find_mapping(pp->irq_domain,
i * 32 + pos);
spin_lock_irqsave(&pp->conf_lock, flags);
dw_pcie_wr_own_conf(pp,
PCIE_MSI_INTR0_STATUS + i * 12,
4, 1 << pos);
spin_unlock_irqrestore(&pp->conf_lock, flags);
generic_handle_irq(irq);
pos++;
}
}
}
return ret;
}
void dw_pcie_msi_init(struct pcie_port *pp)
{
unsigned long flags;
pp->msi_data = __get_free_pages(GFP_KERNEL, 0);
/* program the msi_data */
spin_lock_irqsave(&pp->conf_lock, flags);
dw_pcie_wr_own_conf(pp, PCIE_MSI_ADDR_LO, 4,
virt_to_phys((void *)pp->msi_data));
dw_pcie_wr_own_conf(pp, PCIE_MSI_ADDR_HI, 4, 0);
spin_unlock_irqrestore(&pp->conf_lock, flags);
}
static void dw_pcie_msi_clear_irq(struct pcie_port *pp, int irq)
{
unsigned int res, bit, val;
unsigned long flags;
res = (irq / 32) * 12;
bit = irq % 32;
spin_lock_irqsave(&pp->conf_lock, flags);
dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, &val);
val &= ~(1 << bit);
dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, val);
spin_unlock_irqrestore(&pp->conf_lock, flags);
}
static void clear_irq_range(struct pcie_port *pp, unsigned int irq_base,
unsigned int nvec, unsigned int pos)
{
unsigned int i;
for (i = 0; i < nvec; i++) {
irq_set_msi_desc_off(irq_base, i, NULL);
/* Disable corresponding interrupt on MSI controller */
if (pp->ops->msi_clear_irq)
pp->ops->msi_clear_irq(pp, pos + i);
else
dw_pcie_msi_clear_irq(pp, pos + i);
}
bitmap_release_region(pp->msi_irq_in_use, pos, order_base_2(nvec));
}
static void dw_pcie_msi_set_irq(struct pcie_port *pp, int irq)
{
unsigned int res, bit, val;
unsigned long flags;
res = (irq / 32) * 12;
bit = irq % 32;
spin_lock_irqsave(&pp->conf_lock, flags);
dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, &val);
val |= 1 << bit;
dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, val);
spin_unlock_irqrestore(&pp->conf_lock, flags);
}
static int assign_irq(int no_irqs, struct msi_desc *desc, int *pos)
{
int irq, pos0, i;
struct pcie_port *pp = desc->dev->bus->sysdata;
pos0 = bitmap_find_free_region(pp->msi_irq_in_use, MAX_MSI_IRQS,
order_base_2(no_irqs));
if (pos0 < 0)
goto no_valid_irq;
irq = irq_find_mapping(pp->irq_domain, pos0);
if (!irq)
goto no_valid_irq;
/*
* irq_create_mapping (called from dw_pcie_host_init) pre-allocates
* descs so there is no need to allocate descs here. We can therefore
* assume that if irq_find_mapping above returns non-zero, then the
* descs are also successfully allocated.
*/
for (i = 0; i < no_irqs; i++) {
if (irq_set_msi_desc_off(irq, i, desc) != 0) {
clear_irq_range(pp, irq, i, pos0);
goto no_valid_irq;
}
/*Enable corresponding interrupt in MSI interrupt controller */
if (pp->ops->msi_set_irq)
pp->ops->msi_set_irq(pp, pos0 + i);
else
dw_pcie_msi_set_irq(pp, pos0 + i);
}
*pos = pos0;
return irq;
no_valid_irq:
*pos = pos0;
return -ENOSPC;
}
static int dw_msi_setup_irq(struct msi_chip *chip, struct pci_dev *pdev,
struct msi_desc *desc)
{
int irq, pos;
struct msi_msg msg;
struct pcie_port *pp = pdev->bus->sysdata;
if (desc->msi_attrib.is_msix)
return -EINVAL;
irq = assign_irq(1, desc, &pos);
if (irq < 0)
return irq;
if (pp->ops->get_msi_addr)
msg.address_lo = pp->ops->get_msi_addr(pp);
else
msg.address_lo = virt_to_phys((void *)pp->msi_data);
msg.address_hi = 0x0;
if (pp->ops->get_msi_data)
msg.data = pp->ops->get_msi_data(pp, pos);
else
msg.data = pos;
#ifdef CONFIG_PCI_MSI
write_msi_msg(irq, &msg);
#endif
return 0;
}
static void dw_msi_teardown_irq(struct msi_chip *chip, unsigned int irq)
{
struct irq_data *data = irq_get_irq_data(irq);
struct msi_desc *msi = irq_data_get_msi(data);
struct pcie_port *pp = msi->dev->bus->sysdata;
clear_irq_range(pp, irq, 1, data->hwirq);
}
static struct msi_chip dw_pcie_msi_chip = {
.setup_irq = dw_msi_setup_irq,
.teardown_irq = dw_msi_teardown_irq,
};
int dw_pcie_link_up(struct pcie_port *pp)
{
if (pp == NULL)
return 0;
if (pp->ops == NULL)
return 0;
if (pp->ops->link_up)
return pp->ops->link_up(pp);
else
return 0;
}
void dw_pcie_config_l1ss(struct pcie_port *pp)
{
u32 val;
void __iomem *ep_dbi_base = pp->va_cfg0_base;
u32 exp_cap_off = EXP_CAP_ID_OFFSET;
/* Enable L1SS on Root Complex */
val = readl(ep_dbi_base + 0xbc);
val &= ~0x3;
val |= 0x142;
writel(val, ep_dbi_base + 0xBC);
val = readl(ep_dbi_base + 0x248);
writel(val | 0xa0f, ep_dbi_base + 0x248);
writel(PORT_LINK_TPOWERON_130US, ep_dbi_base + 0x24C);
writel(0x10031003, ep_dbi_base + 0x1B4);
val = readl(ep_dbi_base + 0xD4);
writel(val | (1 << 10), ep_dbi_base + 0xD4);
dw_pcie_rd_own_conf(pp, PCIE_LINK_L1SS_CONTROL, 4, &val);
val |= PORT_LINK_TCOMMON_32US | PORT_LINK_L1SS_ENABLE;
dw_pcie_wr_own_conf(pp, PCIE_LINK_L1SS_CONTROL, 4, val);
dw_pcie_wr_own_conf(pp, PCIE_LINK_L1SS_CONTROL2, 4, PORT_LINK_TPOWERON_130US);
val = PORT_LINK_L1SS_T_PCLKACK | PORT_LINK_L1SS_T_L1_2 |
PORT_LINK_L1SS_T_POWER_OFF;
dw_pcie_wr_own_conf(pp, PCIE_LINK_L1SS_OFF, 4, val);
dw_pcie_rd_own_conf(pp, exp_cap_off + PCI_EXP_LNKCTL, 4, &val);
val &= ~PCI_EXP_LNKCTL_ASPMC;
val |= PCI_EXP_LNKCTL_CCC | PCI_EXP_LNKCTL_ASPM_L1;
dw_pcie_wr_own_conf(pp, exp_cap_off + PCI_EXP_LNKCTL, 4, val);
dw_pcie_wr_own_conf(pp, exp_cap_off + PCI_EXP_DEVCTL2, 4, PCI_EXP_DEVCTL2_LTR_EN);
}
static int dw_pcie_msi_map(struct irq_domain *domain, unsigned int irq,
irq_hw_number_t hwirq)
{
irq_set_chip_and_handler(irq, &dw_msi_irq_chip, handle_simple_irq);
irq_set_chip_data(irq, domain->host_data);
set_irq_flags(irq, IRQF_VALID);
return 0;
}
static const struct irq_domain_ops msi_domain_ops = {
.map = dw_pcie_msi_map,
};
int dw_pcie_host_init(struct pcie_port *pp)
{
struct device_node *np = pp->dev->of_node;
struct platform_device *pdev = to_platform_device(pp->dev);
struct of_pci_range range;
struct of_pci_range_parser parser;
struct resource *cfg_res;
u32 na, ns;
const __be32 *addrp;
int i, index, ret;
/* Find the address cell size and the number of cells in order to get
* the untranslated address.
*/
of_property_read_u32(np, "#address-cells", &na);
ns = of_n_size_cells(np);
cfg_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "config");
if (cfg_res) {
pp->cfg0_size = resource_size(cfg_res)/2;
pp->cfg1_size = resource_size(cfg_res)/2;
pp->cfg0_base = cfg_res->start;
pp->cfg1_base = cfg_res->start + pp->cfg0_size;
/* Find the untranslated configuration space address */
index = of_property_match_string(np, "reg-names", "config");
addrp = of_get_address(np, index, NULL, NULL);
pp->cfg0_mod_base = of_read_number(addrp, of_n_addr_cells(np));
pp->cfg1_mod_base = pp->cfg0_mod_base + pp->cfg0_size;
} else {
dev_err(pp->dev, "missing *config* reg space\n");
}
if (of_pci_range_parser_init(&parser, np)) {
dev_err(pp->dev, "missing ranges property\n");
return -EINVAL;
}
/* Get the I/O and memory ranges from DT */
for_each_of_pci_range(&parser, &range) {
unsigned long restype = range.flags & IORESOURCE_TYPE_BITS;
if (restype == IORESOURCE_IO) {
of_pci_range_to_resource(&range, np, &pp->io);
pp->io.name = "I/O";
pp->io.start = max_t(resource_size_t,
PCIBIOS_MIN_IO,
range.pci_addr + global_io_offset);
pp->io.end = min_t(resource_size_t,
IO_SPACE_LIMIT,
range.pci_addr + range.size
+ global_io_offset - 1);
pp->io_size = resource_size(&pp->io);
pp->io_bus_addr = range.pci_addr;
pp->io_base = range.cpu_addr;
/* Find the untranslated IO space address */
pp->io_mod_base = of_read_number(parser.range -
parser.np + na,
of_n_addr_cells(np));
}
if (restype == IORESOURCE_MEM) {
of_pci_range_to_resource(&range, np, &pp->mem);
pp->mem.name = "MEM";
pp->mem_size = resource_size(&pp->mem);
pp->mem_bus_addr = range.pci_addr;
/* Find the untranslated MEM space address */
pp->mem_mod_base = of_read_number(parser.range -
parser.np + na,
of_n_addr_cells(np));
}
if (restype == 0) {
of_pci_range_to_resource(&range, np, &pp->cfg);
pp->cfg0_size = resource_size(&pp->cfg)/2;
pp->cfg1_size = resource_size(&pp->cfg)/2;
pp->cfg0_base = pp->cfg.start;
pp->cfg1_base = pp->cfg.start + pp->cfg0_size;
/* Find the untranslated configuration space address */
pp->cfg0_mod_base = of_read_number(parser.range -
parser.np + na, ns);
pp->cfg1_mod_base = pp->cfg0_mod_base +
pp->cfg0_size;
}
}
ret = of_pci_parse_bus_range(np, &pp->busn);
if (ret < 0) {
pp->busn.name = np->name;
pp->busn.start = 0;
pp->busn.end = 0xff;
pp->busn.flags = IORESOURCE_BUS;
dev_dbg(pp->dev, "failed to parse bus-range property: %d, using default %pR\n",
ret, &pp->busn);
}
if (!pp->dbi_base) {
pp->dbi_base = devm_ioremap(pp->dev, pp->cfg.start,
resource_size(&pp->cfg));
if (!pp->dbi_base) {
dev_err(pp->dev, "error with ioremap\n");
return -ENOMEM;
}
}
pp->mem_base = pp->mem.start;
if (!pp->va_cfg0_base) {
pp->va_cfg0_base = devm_ioremap(pp->dev, pp->cfg0_base,
pp->cfg0_size);
if (!pp->va_cfg0_base) {
dev_err(pp->dev, "error with ioremap in function\n");
return -ENOMEM;
}
}
if (!pp->va_cfg1_base) {
pp->va_cfg1_base = devm_ioremap(pp->dev, pp->cfg1_base,
pp->cfg1_size);
if (!pp->va_cfg1_base) {
dev_err(pp->dev, "error with ioremap\n");
return -ENOMEM;
}
}
if (of_property_read_u32(np, "num-lanes", &pp->lanes)) {
dev_err(pp->dev, "Failed to parse the number of lanes\n");
return -EINVAL;
}
if (IS_ENABLED(CONFIG_PCI_MSI)) {
if (!pp->ops->msi_host_init) {
pp->irq_domain = irq_domain_add_linear(pp->dev->of_node,
MAX_MSI_IRQS, &msi_domain_ops,
&dw_pcie_msi_chip);
if (!pp->irq_domain) {
dev_err(pp->dev, "irq domain init failed\n");
return -ENXIO;
}
for (i = 0; i < MAX_MSI_IRQS; i++)
irq_create_mapping(pp->irq_domain, i);
} else {
ret = pp->ops->msi_host_init(pp, &dw_pcie_msi_chip);
if (ret < 0)
return ret;
}
}
return 0;
}
int dw_pcie_scan(struct pcie_port *pp)
{
struct pci_bus *bus;
LIST_HEAD(res);
struct device_node *np = pp->dev->of_node;
u32 val;
int ret;
dw_pcie_wr_own_conf(pp, PCI_BASE_ADDRESS_0, 4, 0);
/* program correct class for RC */
dw_pcie_wr_own_conf(pp, PCI_CLASS_DEVICE, 2, PCI_CLASS_BRIDGE_PCI);
dw_pcie_rd_own_conf(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, 4, &val);
val |= PORT_LOGIC_SPEED_CHANGE;
dw_pcie_wr_own_conf(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, 4, val);
#ifdef CONFIG_ARM
/*
* FIXME: we should really be able to use
* of_pci_get_host_bridge_resources on arm32 as well,
* but the conversion needs some more testing
*/
if (global_io_offset < SZ_1M && pp->io_size > 0) {
pci_ioremap_io(global_io_offset, pp->io_base);
global_io_offset += SZ_64K;
pci_add_resource_offset(&res, &pp->io,
global_io_offset - pp->io_bus_addr);
}
pci_add_resource_offset(&res, &pp->mem,
pp->mem.start - pp->mem_bus_addr);
pci_add_resource(&res, &pp->busn);
#else
ret = of_pci_get_host_bridge_resources(np, 0, 0xff, &res, &pp->io_base);
if (ret)
return ret;
#endif
bus = pci_create_root_bus(pp->dev, pp->root_bus_nr, &dw_pcie_ops,
pp, &res);
if (!bus)
return -ENOMEM;
#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
bus->msi = container_of(&pp->irq_domain, struct msi_controller, domain);
#else
bus->msi = &dw_pcie_msi_chip;
#endif
pci_scan_child_bus(bus);
if (pp->ops->scan_bus)
pp->ops->scan_bus(pp);
#ifdef CONFIG_ARM
/* support old dtbs that incorrectly describe IRQs */
pci_fixup_irqs(pci_common_swizzle, of_irq_parse_and_map_pci);
#endif
pci_assign_unassigned_bus_resources(bus);
pci_bus_add_devices(bus);
dw_pcie_config_l1ss(pp);
return 0;
}
void dw_pcie_prog_viewport_cfg0(struct pcie_port *pp, u32 busdev)
{
/* Program viewport 0 : OUTBOUND : CFG0 */
dw_pcie_wr_own_conf(pp, PCIE_ATU_VIEWPORT, 4,
PCIE_ATU_REGION_OUTBOUND | PCIE_ATU_REGION_INDEX0);
dw_pcie_wr_own_conf(pp, PCIE_ATU_LOWER_BASE, 4, pp->cfg0_mod_base);
dw_pcie_wr_own_conf(pp, PCIE_ATU_UPPER_BASE, 4, (pp->cfg0_mod_base >> 32));
dw_pcie_wr_own_conf(pp, PCIE_ATU_LIMIT, 4, pp->cfg0_mod_base + pp->cfg0_size - 1);
dw_pcie_wr_own_conf(pp, PCIE_ATU_LOWER_TARGET, 4, busdev);
dw_pcie_wr_own_conf(pp, PCIE_ATU_UPPER_TARGET, 4, 0);
dw_pcie_wr_own_conf(pp, PCIE_ATU_CR1, 4, PCIE_ATU_TYPE_CFG0);
dw_pcie_wr_own_conf(pp, PCIE_ATU_CR2, 4, PCIE_ATU_ENABLE);
}
static void dw_pcie_prog_viewport_cfg1(struct pcie_port *pp, u32 busdev)
{
/* Program viewport 1 : OUTBOUND : CFG1 */
dw_pcie_wr_own_conf(pp, PCIE_ATU_VIEWPORT, 4,
PCIE_ATU_REGION_OUTBOUND | PCIE_ATU_REGION_INDEX2);
dw_pcie_wr_own_conf(pp, PCIE_ATU_CR1, 4, PCIE_ATU_TYPE_CFG1);
dw_pcie_wr_own_conf(pp, PCIE_ATU_LOWER_BASE, 4, pp->cfg1_mod_base);
dw_pcie_wr_own_conf(pp, PCIE_ATU_UPPER_BASE, 4, (pp->cfg1_mod_base >> 32));
dw_pcie_wr_own_conf(pp, PCIE_ATU_LIMIT, 4, pp->cfg1_mod_base + pp->cfg1_size - 1);
dw_pcie_wr_own_conf(pp, PCIE_ATU_LOWER_TARGET, 4, busdev);
dw_pcie_wr_own_conf(pp, PCIE_ATU_UPPER_TARGET, 4, 0);
dw_pcie_wr_own_conf(pp, PCIE_ATU_CR2, 4, PCIE_ATU_ENABLE);
}
void dw_pcie_prog_viewport_mem_outbound(struct pcie_port *pp)
{
/* Program viewport 0 : OUTBOUND : MEM */
dw_pcie_wr_own_conf(pp, PCIE_ATU_VIEWPORT, 4,
PCIE_ATU_REGION_OUTBOUND | PCIE_ATU_REGION_INDEX1);
dw_pcie_wr_own_conf(pp, PCIE_ATU_CR1, 4, PCIE_ATU_TYPE_MEM);
dw_pcie_wr_own_conf(pp, PCIE_ATU_LOWER_BASE, 4, pp->mem_mod_base);
dw_pcie_wr_own_conf(pp, PCIE_ATU_UPPER_BASE, 4, (pp->mem_mod_base >> 32));
dw_pcie_wr_own_conf(pp, PCIE_ATU_LIMIT, 4, pp->mem_mod_base + pp->mem_size - 1);
dw_pcie_wr_own_conf(pp, PCIE_ATU_LOWER_TARGET, 4, pp->mem_bus_addr);
dw_pcie_wr_own_conf(pp, PCIE_ATU_UPPER_TARGET, 4, upper_32_bits(pp->mem_bus_addr));
dw_pcie_wr_own_conf(pp, PCIE_ATU_CR2, 4, PCIE_ATU_ENABLE);
}
static void dw_pcie_prog_viewport_io_outbound(struct pcie_port *pp)
{
/* Program viewport 1 : OUTBOUND : IO */
dw_pcie_wr_own_conf(pp, PCIE_ATU_VIEWPORT, 4,
PCIE_ATU_REGION_OUTBOUND | PCIE_ATU_REGION_INDEX2);
dw_pcie_wr_own_conf(pp, PCIE_ATU_CR1, 4, PCIE_ATU_TYPE_IO);
dw_pcie_wr_own_conf(pp, PCIE_ATU_LOWER_BASE, 4, pp->io_mod_base);
dw_pcie_wr_own_conf(pp, PCIE_ATU_UPPER_BASE, 4, (pp->io_mod_base >> 32));
dw_pcie_wr_own_conf(pp, PCIE_ATU_LIMIT, 4, pp->io_mod_base + pp->io_size - 1);
dw_pcie_wr_own_conf(pp, PCIE_ATU_LOWER_TARGET, 4, pp->io_bus_addr);
dw_pcie_wr_own_conf(pp, PCIE_ATU_UPPER_TARGET, 4, upper_32_bits(pp->io_bus_addr));
dw_pcie_wr_own_conf(pp, PCIE_ATU_CR2, 4, PCIE_ATU_ENABLE);
}
static int dw_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus,
u32 devfn, int where, int size, u32 *val)
{
int ret = PCIBIOS_SUCCESSFUL;
u32 address, busdev;
busdev = PCIE_ATU_BUS(bus->number) | PCIE_ATU_DEV(PCI_SLOT(devfn)) |
PCIE_ATU_FUNC(PCI_FUNC(devfn));
address = where & ~0x3;
if (bus->parent->number == pp->root_bus_nr) {
dw_pcie_prog_viewport_cfg0(pp, busdev);
ret = dw_pcie_cfg_read(pp->va_cfg0_base + address, where, size,
val);
dw_pcie_prog_viewport_mem_outbound(pp);
} else {
dw_pcie_prog_viewport_cfg1(pp, busdev);
ret = dw_pcie_cfg_read(pp->va_cfg1_base + address, where, size,
val);
dw_pcie_prog_viewport_io_outbound(pp);
}
return ret;
}
static int dw_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus,
u32 devfn, int where, int size, u32 val)
{
int ret = PCIBIOS_SUCCESSFUL;
u32 address, busdev;
busdev = PCIE_ATU_BUS(bus->number) | PCIE_ATU_DEV(PCI_SLOT(devfn)) |
PCIE_ATU_FUNC(PCI_FUNC(devfn));
address = where & ~0x3;
if (bus->parent->number == pp->root_bus_nr) {
dw_pcie_prog_viewport_cfg0(pp, busdev);
ret = dw_pcie_cfg_write(pp->va_cfg0_base + address, where, size,
val);
dw_pcie_prog_viewport_mem_outbound(pp);
} else {
dw_pcie_prog_viewport_cfg1(pp, busdev);
ret = dw_pcie_cfg_write(pp->va_cfg1_base + address, where, size,
val);
dw_pcie_prog_viewport_io_outbound(pp);
}
return ret;
}
static int dw_pcie_valid_config(struct pcie_port *pp,
struct pci_bus *bus, int dev)
{
struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
if (exynos_pcie->state != STATE_LINK_UP)
return 0;
/* If there is no link, then there is no device */
if (bus->number != pp->root_bus_nr) {
if (!dw_pcie_link_up(pp))
return 0;
}
/* access only one slot on each root port */
if (bus->number == pp->root_bus_nr && dev > 0)
return 0;
/*
* do not read more than one device on the bus directly attached
* to RC's (Virtual Bridge's) DS side.
*/
if (bus->primary == pp->root_bus_nr && dev > 0)
return 0;
return 1;
}
static int dw_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where,
int size, u32 *val)
{
struct pcie_port *pp = bus->sysdata;
unsigned long flags;
int ret;
spin_lock_irqsave(&pp->conf_lock, flags);
if (dw_pcie_valid_config(pp, bus, PCI_SLOT(devfn)) == 0) {
spin_unlock_irqrestore(&pp->conf_lock, flags);
*val = 0xffffffff;
return PCIBIOS_DEVICE_NOT_FOUND;
}
if (bus->number != pp->root_bus_nr)
if (pp->ops->rd_other_conf)
ret = pp->ops->rd_other_conf(pp, bus, devfn,
where, size, val);
else
ret = dw_pcie_rd_other_conf(pp, bus, devfn,
where, size, val);
else
ret = dw_pcie_rd_own_conf(pp, where, size, val);
spin_unlock_irqrestore(&pp->conf_lock, flags);
return ret;
}
static int dw_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
int where, int size, u32 val)
{
struct pcie_port *pp = bus->sysdata;
unsigned long flags;
int ret;
spin_lock_irqsave(&pp->conf_lock, flags);
if (dw_pcie_valid_config(pp, bus, PCI_SLOT(devfn)) == 0) {
spin_unlock_irqrestore(&pp->conf_lock, flags);
return PCIBIOS_DEVICE_NOT_FOUND;
}
if (bus->number != pp->root_bus_nr)
if (pp->ops->wr_other_conf)
ret = pp->ops->wr_other_conf(pp, bus, devfn,
where, size, val);
else
ret = dw_pcie_wr_other_conf(pp, bus, devfn,
where, size, val);
else
ret = dw_pcie_wr_own_conf(pp, where, size, val);
spin_unlock_irqrestore(&pp->conf_lock, flags);
return ret;
}
static struct pci_ops dw_pcie_ops = {
.read = dw_pcie_rd_conf,
.write = dw_pcie_wr_conf,
};
void dw_pcie_setup_rc(struct pcie_port *pp)
{
struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
u32 val;
u32 membase;
u32 memlimit;
u32 exp_cap_off = EXP_CAP_ID_OFFSET;
u32 pm_cap_off = PM_CAP_ID_OFFSET;
/* enable writing to DBI read-only registers */
dw_pcie_wr_own_conf(pp, PCIE_MISC_CONTROL, 4, DBI_RO_WR_EN);
/* change vendor ID and device ID for PCIe */
dw_pcie_wr_own_conf(pp, PCI_VENDOR_ID, 2, PCI_VENDOR_ID_SAMSUNG);
dw_pcie_wr_own_conf(pp, PCI_DEVICE_ID, 2,
PCI_DEVICE_ID_EXYNOS + exynos_pcie->ch_num);
/* set the number of lanes */
dw_pcie_rd_own_conf(pp, PCIE_PORT_LINK_CONTROL, 4, &val);
val &= ~PORT_LINK_MODE_MASK;
switch (pp->lanes) {
case 1:
val |= PORT_LINK_MODE_1_LANES;
break;
case 2:
val |= PORT_LINK_MODE_2_LANES;
break;
case 4:
val |= PORT_LINK_MODE_4_LANES;
break;
}
dw_pcie_wr_own_conf(pp, PCIE_PORT_LINK_CONTROL, 4, val);
/* set link width speed control register */
dw_pcie_rd_own_conf(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, 4, &val);
val &= ~PORT_LOGIC_LINK_WIDTH_MASK;
switch (pp->lanes) {
case 1:
val |= PORT_LOGIC_LINK_WIDTH_1_LANES;
break;
case 2:
val |= PORT_LOGIC_LINK_WIDTH_2_LANES;
break;
case 4:
val |= PORT_LOGIC_LINK_WIDTH_4_LANES;
break;
}
dw_pcie_wr_own_conf(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, 4, val);
/* set max link width & speed : Gen2, Lane1 */
dw_pcie_rd_own_conf(pp, exp_cap_off + PCI_EXP_LNKCAP, 4, &val);
val &= ~(PCI_EXP_LNKCAP_L1EL|PCI_EXP_LNKCAP_MLW|PCI_EXP_LNKCAP_SLS);
val |= PCI_EXP_LNKCAP_L1EL_64USEC|PCI_EXP_LNKCAP_MLW_X1|PCI_EXP_LNKCAP_SLS_5_0GB;
dw_pcie_wr_own_conf(pp, exp_cap_off + PCI_EXP_LNKCAP, 4, val);
/* set auxiliary clock frequency: 26MHz */
dw_pcie_wr_own_conf(pp, PCIE_AUX_CLK_FREQ_OFF, 4, PCIE_AUX_CLK_FREQ_26MHZ);
/* set duration of L1.2 & L1.2.Entry */
dw_pcie_wr_own_conf(pp, PCIE_L1_SUBSTATES_OFF, 4, 0xD2);
/* clear power management control and status register */
dw_pcie_wr_own_conf(pp, pm_cap_off + PCI_PM_CTRL, 4, 0x0);
/* setup RC BARs */
dw_pcie_wr_own_conf(pp, PCI_BASE_ADDRESS_0, 4, 0x00000004);
dw_pcie_wr_own_conf(pp, PCI_BASE_ADDRESS_1, 4, 0x00000000);
/* setup bus numbers */
dw_pcie_rd_own_conf(pp, PCI_PRIMARY_BUS, 4, &val);
val &= 0xff000000;
val |= 0x00010100;
dw_pcie_wr_own_conf(pp, PCI_PRIMARY_BUS, 4, val);
/* setup memory base, memory limit */
membase = ((u32)pp->mem_base & 0xfff00000) >> 16;
memlimit = (pp->mem_size + (u32)pp->mem_base) & 0xfff00000;
val = memlimit | membase;
dw_pcie_wr_own_conf(pp, PCI_MEMORY_BASE, 4, val);
/* setup command register */
dw_pcie_rd_own_conf(pp, PCI_COMMAND, 4, &val);
val &= 0xffff0000;
val |= PCI_COMMAND_IO | PCI_COMMAND_MEMORY |
PCI_COMMAND_MASTER | PCI_COMMAND_SERR;
dw_pcie_wr_own_conf(pp, PCI_COMMAND, 4, val);
/* initiate link retraining */
dw_pcie_rd_own_conf(pp, exp_cap_off + PCI_EXP_LNKCTL, 4, &val);
val |= PCI_EXP_LNKCTL_RL;
dw_pcie_wr_own_conf(pp, exp_cap_off + PCI_EXP_LNKCTL, 4, val);
/* set target speed to GEN1 only */
dw_pcie_rd_own_conf(pp, exp_cap_off + PCI_EXP_LNKCTL2, 4, &val);
val &= ~PCI_EXP_LNKCTL2_TLS;
val |= PCI_EXP_LNKCTL2_TLS_2_5GB;
dw_pcie_wr_own_conf(pp, exp_cap_off + PCI_EXP_LNKCTL2, 4, val);
}
MODULE_AUTHOR("Jingoo Han <jg1.han@samsung.com>");
MODULE_DESCRIPTION("Designware PCIe host controller driver");
MODULE_LICENSE("GPL v2");

View file

@ -0,0 +1,95 @@
/*
* Synopsys Designware PCIe host controller driver
*
* Copyright (C) 2013 Samsung Electronics Co., Ltd.
* http://www.samsung.com
*
* Author: Jingoo Han <jg1.han@samsung.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef _PCIE_DESIGNWARE_H
#define _PCIE_DESIGNWARE_H
/*
* Maximum number of MSI IRQs can be 256 per controller. But keep
* it 32 as of now. Probably we will never need more than 32. If needed,
* then increment it in multiple of 32.
*/
#define MAX_MSI_IRQS 32
#define MAX_MSI_CTRLS (MAX_MSI_IRQS / 32)
struct pcie_port {
struct device *dev;
u8 root_bus_nr;
void __iomem *dbi_base;
u64 cfg0_base;
u64 cfg0_mod_base;
void __iomem *va_cfg0_base;
u32 cfg0_size;
u64 cfg1_base;
u64 cfg1_mod_base;
void __iomem *va_cfg1_base;
u32 cfg1_size;
u64 io_base;
u64 io_mod_base;
phys_addr_t io_bus_addr;
u32 io_size;
u64 mem_base;
u64 mem_mod_base;
phys_addr_t mem_bus_addr;
u32 mem_size;
spinlock_t conf_lock;
struct resource cfg;
struct resource io;
struct resource mem;
struct resource busn;
int irq;
u32 lanes;
struct pcie_host_ops *ops;
int msi_irq;
struct irq_domain *irq_domain;
unsigned long msi_data;
DECLARE_BITMAP(msi_irq_in_use, MAX_MSI_IRQS);
};
struct pcie_host_ops {
void (*readl_rc)(struct pcie_port *pp,
void __iomem *dbi_base, u32 *val);
void (*writel_rc)(struct pcie_port *pp,
u32 val, void __iomem *dbi_base);
int (*rd_own_conf)(struct pcie_port *pp, int where, int size, u32 *val);
int (*wr_own_conf)(struct pcie_port *pp, int where, int size, u32 val);
int (*rd_other_conf)(struct pcie_port *pp, struct pci_bus *bus,
unsigned int devfn, int where, int size, u32 *val);
int (*wr_other_conf)(struct pcie_port *pp, struct pci_bus *bus,
unsigned int devfn, int where, int size, u32 val);
int (*link_up)(struct pcie_port *pp);
void (*msi_set_irq)(struct pcie_port *pp, int irq);
void (*msi_clear_irq)(struct pcie_port *pp, int irq);
u32 (*get_msi_addr)(struct pcie_port *pp);
u32 (*get_msi_data)(struct pcie_port *pp, int pos);
void (*scan_bus)(struct pcie_port *pp);
int (*msi_host_init)(struct pcie_port *pp, struct msi_chip *chip);
};
int exynos_pcie_poweron(int);
void exynos_pcie_poweroff(int);
int dw_pcie_cfg_read(void __iomem *addr, int where, int size, u32 *val);
int dw_pcie_cfg_write(void __iomem *addr, int where, int size, u32 val);
irqreturn_t dw_handle_msi_irq(struct pcie_port *pp);
void dw_pcie_msi_init(struct pcie_port *pp);
int dw_pcie_link_up(struct pcie_port *pp);
void dw_pcie_setup_rc(struct pcie_port *pp);
int dw_pcie_host_init(struct pcie_port *pp);
void dw_pcie_prog_viewport_cfg0(struct pcie_port *pp, u32 busdev);
void dw_pcie_prog_viewport_mem_outbound(struct pcie_port *pp);
void dw_pcie_set_tpoweron(struct pcie_port *pp, int max);
void dw_pcie_config_l1ss(struct pcie_port *pp);
int dw_pcie_scan(struct pcie_port *pp);
#endif /* _PCIE_DESIGNWARE_H */

1001
drivers/pci/host/pcie-rcar.c Normal file

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,393 @@
/*
* PCIe host controller driver for ST Microelectronics SPEAr13xx SoCs
*
* SPEAr13xx PCIe Glue Layer Source Code
*
* Copyright (C) 2010-2014 ST Microelectronics
* Pratyush Anand <pratyush.anand@st.com>
* Mohit Kumar <mohit.kumar@st.com>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
* warranty of any kind, whether express or implied.
*/
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/pci.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/resource.h>
#include "pcie-designware.h"
struct spear13xx_pcie {
void __iomem *app_base;
struct phy *phy;
struct clk *clk;
struct pcie_port pp;
bool is_gen1;
};
struct pcie_app_reg {
u32 app_ctrl_0; /* cr0 */
u32 app_ctrl_1; /* cr1 */
u32 app_status_0; /* cr2 */
u32 app_status_1; /* cr3 */
u32 msg_status; /* cr4 */
u32 msg_payload; /* cr5 */
u32 int_sts; /* cr6 */
u32 int_clr; /* cr7 */
u32 int_mask; /* cr8 */
u32 mst_bmisc; /* cr9 */
u32 phy_ctrl; /* cr10 */
u32 phy_status; /* cr11 */
u32 cxpl_debug_info_0; /* cr12 */
u32 cxpl_debug_info_1; /* cr13 */
u32 ven_msg_ctrl_0; /* cr14 */
u32 ven_msg_ctrl_1; /* cr15 */
u32 ven_msg_data_0; /* cr16 */
u32 ven_msg_data_1; /* cr17 */
u32 ven_msi_0; /* cr18 */
u32 ven_msi_1; /* cr19 */
u32 mst_rmisc; /* cr20 */
};
/* CR0 ID */
#define RX_LANE_FLIP_EN_ID 0
#define TX_LANE_FLIP_EN_ID 1
#define SYS_AUX_PWR_DET_ID 2
#define APP_LTSSM_ENABLE_ID 3
#define SYS_ATTEN_BUTTON_PRESSED_ID 4
#define SYS_MRL_SENSOR_STATE_ID 5
#define SYS_PWR_FAULT_DET_ID 6
#define SYS_MRL_SENSOR_CHGED_ID 7
#define SYS_PRE_DET_CHGED_ID 8
#define SYS_CMD_CPLED_INT_ID 9
#define APP_INIT_RST_0_ID 11
#define APP_REQ_ENTR_L1_ID 12
#define APP_READY_ENTR_L23_ID 13
#define APP_REQ_EXIT_L1_ID 14
#define DEVICE_TYPE_EP (0 << 25)
#define DEVICE_TYPE_LEP (1 << 25)
#define DEVICE_TYPE_RC (4 << 25)
#define SYS_INT_ID 29
#define MISCTRL_EN_ID 30
#define REG_TRANSLATION_ENABLE 31
/* CR1 ID */
#define APPS_PM_XMT_TURNOFF_ID 2
#define APPS_PM_XMT_PME_ID 5
/* CR3 ID */
#define XMLH_LTSSM_STATE_DETECT_QUIET 0x00
#define XMLH_LTSSM_STATE_DETECT_ACT 0x01
#define XMLH_LTSSM_STATE_POLL_ACTIVE 0x02
#define XMLH_LTSSM_STATE_POLL_COMPLIANCE 0x03
#define XMLH_LTSSM_STATE_POLL_CONFIG 0x04
#define XMLH_LTSSM_STATE_PRE_DETECT_QUIET 0x05
#define XMLH_LTSSM_STATE_DETECT_WAIT 0x06
#define XMLH_LTSSM_STATE_CFG_LINKWD_START 0x07
#define XMLH_LTSSM_STATE_CFG_LINKWD_ACEPT 0x08
#define XMLH_LTSSM_STATE_CFG_LANENUM_WAIT 0x09
#define XMLH_LTSSM_STATE_CFG_LANENUM_ACEPT 0x0A
#define XMLH_LTSSM_STATE_CFG_COMPLETE 0x0B
#define XMLH_LTSSM_STATE_CFG_IDLE 0x0C
#define XMLH_LTSSM_STATE_RCVRY_LOCK 0x0D
#define XMLH_LTSSM_STATE_RCVRY_SPEED 0x0E
#define XMLH_LTSSM_STATE_RCVRY_RCVRCFG 0x0F
#define XMLH_LTSSM_STATE_RCVRY_IDLE 0x10
#define XMLH_LTSSM_STATE_L0 0x11
#define XMLH_LTSSM_STATE_L0S 0x12
#define XMLH_LTSSM_STATE_L123_SEND_EIDLE 0x13
#define XMLH_LTSSM_STATE_L1_IDLE 0x14
#define XMLH_LTSSM_STATE_L2_IDLE 0x15
#define XMLH_LTSSM_STATE_L2_WAKE 0x16
#define XMLH_LTSSM_STATE_DISABLED_ENTRY 0x17
#define XMLH_LTSSM_STATE_DISABLED_IDLE 0x18
#define XMLH_LTSSM_STATE_DISABLED 0x19
#define XMLH_LTSSM_STATE_LPBK_ENTRY 0x1A
#define XMLH_LTSSM_STATE_LPBK_ACTIVE 0x1B
#define XMLH_LTSSM_STATE_LPBK_EXIT 0x1C
#define XMLH_LTSSM_STATE_LPBK_EXIT_TIMEOUT 0x1D
#define XMLH_LTSSM_STATE_HOT_RESET_ENTRY 0x1E
#define XMLH_LTSSM_STATE_HOT_RESET 0x1F
#define XMLH_LTSSM_STATE_MASK 0x3F
#define XMLH_LINK_UP (1 << 6)
/* CR4 ID */
#define CFG_MSI_EN_ID 18
/* CR6 */
#define INTA_CTRL_INT (1 << 7)
#define INTB_CTRL_INT (1 << 8)
#define INTC_CTRL_INT (1 << 9)
#define INTD_CTRL_INT (1 << 10)
#define MSI_CTRL_INT (1 << 26)
/* CR19 ID */
#define VEN_MSI_REQ_ID 11
#define VEN_MSI_FUN_NUM_ID 8
#define VEN_MSI_TC_ID 5
#define VEN_MSI_VECTOR_ID 0
#define VEN_MSI_REQ_EN ((u32)0x1 << VEN_MSI_REQ_ID)
#define VEN_MSI_FUN_NUM_MASK ((u32)0x7 << VEN_MSI_FUN_NUM_ID)
#define VEN_MSI_TC_MASK ((u32)0x7 << VEN_MSI_TC_ID)
#define VEN_MSI_VECTOR_MASK ((u32)0x1F << VEN_MSI_VECTOR_ID)
#define EXP_CAP_ID_OFFSET 0x70
#define to_spear13xx_pcie(x) container_of(x, struct spear13xx_pcie, pp)
static int spear13xx_pcie_establish_link(struct pcie_port *pp)
{
u32 val;
int count = 0;
struct spear13xx_pcie *spear13xx_pcie = to_spear13xx_pcie(pp);
struct pcie_app_reg *app_reg = spear13xx_pcie->app_base;
u32 exp_cap_off = EXP_CAP_ID_OFFSET;
if (dw_pcie_link_up(pp)) {
dev_err(pp->dev, "link already up\n");
return 0;
}
dw_pcie_setup_rc(pp);
/*
* this controller support only 128 bytes read size, however its
* default value in capability register is 512 bytes. So force
* it to 128 here.
*/
dw_pcie_cfg_read(pp->dbi_base, exp_cap_off + PCI_EXP_DEVCTL, 4, &val);
val &= ~PCI_EXP_DEVCTL_READRQ;
dw_pcie_cfg_write(pp->dbi_base, exp_cap_off + PCI_EXP_DEVCTL, 4, val);
dw_pcie_cfg_write(pp->dbi_base, PCI_VENDOR_ID, 2, 0x104A);
dw_pcie_cfg_write(pp->dbi_base, PCI_DEVICE_ID, 2, 0xCD80);
/*
* if is_gen1 is set then handle it, so that some buggy card
* also works
*/
if (spear13xx_pcie->is_gen1) {
dw_pcie_cfg_read(pp->dbi_base, exp_cap_off + PCI_EXP_LNKCAP, 4,
&val);
if ((val & PCI_EXP_LNKCAP_SLS) != PCI_EXP_LNKCAP_SLS_2_5GB) {
val &= ~((u32)PCI_EXP_LNKCAP_SLS);
val |= PCI_EXP_LNKCAP_SLS_2_5GB;
dw_pcie_cfg_write(pp->dbi_base, exp_cap_off +
PCI_EXP_LNKCAP, 4, val);
}
dw_pcie_cfg_read(pp->dbi_base, exp_cap_off + PCI_EXP_LNKCTL2, 4,
&val);
if ((val & PCI_EXP_LNKCAP_SLS) != PCI_EXP_LNKCAP_SLS_2_5GB) {
val &= ~((u32)PCI_EXP_LNKCAP_SLS);
val |= PCI_EXP_LNKCAP_SLS_2_5GB;
dw_pcie_cfg_write(pp->dbi_base, exp_cap_off +
PCI_EXP_LNKCTL2, 4, val);
}
}
/* enable ltssm */
writel(DEVICE_TYPE_RC | (1 << MISCTRL_EN_ID)
| (1 << APP_LTSSM_ENABLE_ID)
| ((u32)1 << REG_TRANSLATION_ENABLE),
&app_reg->app_ctrl_0);
/* check if the link is up or not */
while (!dw_pcie_link_up(pp)) {
mdelay(100);
count++;
if (count == 10) {
dev_err(pp->dev, "link Fail\n");
return -EINVAL;
}
}
dev_info(pp->dev, "link up\n");
return 0;
}
static irqreturn_t spear13xx_pcie_irq_handler(int irq, void *arg)
{
struct pcie_port *pp = arg;
struct spear13xx_pcie *spear13xx_pcie = to_spear13xx_pcie(pp);
struct pcie_app_reg *app_reg = spear13xx_pcie->app_base;
unsigned int status;
status = readl(&app_reg->int_sts);
if (status & MSI_CTRL_INT) {
if (!IS_ENABLED(CONFIG_PCI_MSI))
BUG();
dw_handle_msi_irq(pp);
}
writel(status, &app_reg->int_clr);
return IRQ_HANDLED;
}
static void spear13xx_pcie_enable_interrupts(struct pcie_port *pp)
{
struct spear13xx_pcie *spear13xx_pcie = to_spear13xx_pcie(pp);
struct pcie_app_reg *app_reg = spear13xx_pcie->app_base;
/* Enable MSI interrupt */
if (IS_ENABLED(CONFIG_PCI_MSI)) {
dw_pcie_msi_init(pp);
writel(readl(&app_reg->int_mask) |
MSI_CTRL_INT, &app_reg->int_mask);
}
}
static int spear13xx_pcie_link_up(struct pcie_port *pp)
{
struct spear13xx_pcie *spear13xx_pcie = to_spear13xx_pcie(pp);
struct pcie_app_reg *app_reg = spear13xx_pcie->app_base;
if (readl(&app_reg->app_status_1) & XMLH_LINK_UP)
return 1;
return 0;
}
static void spear13xx_pcie_host_init(struct pcie_port *pp)
{
spear13xx_pcie_establish_link(pp);
spear13xx_pcie_enable_interrupts(pp);
}
static struct pcie_host_ops spear13xx_pcie_host_ops = {
.link_up = spear13xx_pcie_link_up,
.host_init = spear13xx_pcie_host_init,
};
static int add_pcie_port(struct pcie_port *pp, struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
int ret;
pp->irq = platform_get_irq(pdev, 0);
if (!pp->irq) {
dev_err(dev, "failed to get irq\n");
return -ENODEV;
}
ret = devm_request_irq(dev, pp->irq, spear13xx_pcie_irq_handler,
IRQF_SHARED, "spear1340-pcie", pp);
if (ret) {
dev_err(dev, "failed to request irq %d\n", pp->irq);
return ret;
}
pp->root_bus_nr = -1;
pp->ops = &spear13xx_pcie_host_ops;
ret = dw_pcie_host_init(pp);
if (ret) {
dev_err(dev, "failed to initialize host\n");
return ret;
}
return 0;
}
static int spear13xx_pcie_probe(struct platform_device *pdev)
{
struct spear13xx_pcie *spear13xx_pcie;
struct pcie_port *pp;
struct device *dev = &pdev->dev;
struct device_node *np = pdev->dev.of_node;
struct resource *dbi_base;
int ret;
spear13xx_pcie = devm_kzalloc(dev, sizeof(*spear13xx_pcie), GFP_KERNEL);
if (!spear13xx_pcie) {
dev_err(dev, "no memory for SPEAr13xx pcie\n");
return -ENOMEM;
}
spear13xx_pcie->phy = devm_phy_get(dev, "pcie-phy");
if (IS_ERR(spear13xx_pcie->phy)) {
ret = PTR_ERR(spear13xx_pcie->phy);
if (ret == -EPROBE_DEFER)
dev_info(dev, "probe deferred\n");
else
dev_err(dev, "couldn't get pcie-phy\n");
return ret;
}
phy_init(spear13xx_pcie->phy);
spear13xx_pcie->clk = devm_clk_get(dev, NULL);
if (IS_ERR(spear13xx_pcie->clk)) {
dev_err(dev, "couldn't get clk for pcie\n");
return PTR_ERR(spear13xx_pcie->clk);
}
ret = clk_prepare_enable(spear13xx_pcie->clk);
if (ret) {
dev_err(dev, "couldn't enable clk for pcie\n");
return ret;
}
pp = &spear13xx_pcie->pp;
pp->dev = dev;
dbi_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
pp->dbi_base = devm_ioremap_resource(dev, dbi_base);
if (IS_ERR(pp->dbi_base)) {
dev_err(dev, "couldn't remap dbi base %p\n", dbi_base);
ret = PTR_ERR(pp->dbi_base);
goto fail_clk;
}
spear13xx_pcie->app_base = pp->dbi_base + 0x2000;
if (of_property_read_bool(np, "st,pcie-is-gen1"))
spear13xx_pcie->is_gen1 = true;
ret = add_pcie_port(pp, pdev);
if (ret < 0)
goto fail_clk;
platform_set_drvdata(pdev, spear13xx_pcie);
return 0;
fail_clk:
clk_disable_unprepare(spear13xx_pcie->clk);
return ret;
}
static const struct of_device_id spear13xx_pcie_of_match[] = {
{ .compatible = "st,spear1340-pcie", },
{},
};
MODULE_DEVICE_TABLE(of, spear13xx_pcie_of_match);
static struct platform_driver spear13xx_pcie_driver = {
.probe = spear13xx_pcie_probe,
.driver = {
.name = "spear-pcie",
.owner = THIS_MODULE,
.of_match_table = of_match_ptr(spear13xx_pcie_of_match),
},
};
/* SPEAr13xx PCIe driver does not allow module unload */
static int __init pcie_init(void)
{
return platform_driver_register(&spear13xx_pcie_driver);
}
module_init(pcie_init);
MODULE_DESCRIPTION("ST Microelectronics SPEAr13xx PCIe host controller driver");
MODULE_AUTHOR("Pratyush Anand <pratyush.anand@st.com>");
MODULE_LICENSE("GPL v2");

View file

@ -0,0 +1,970 @@
/*
* PCIe host controller driver for Xilinx AXI PCIe Bridge
*
* Copyright (c) 2012 - 2014 Xilinx, Inc.
*
* Based on the Tegra PCIe driver
*
* Bits taken from Synopsys Designware Host controller driver and
* ARM PCI Host generic driver.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 2 of the License, or
* (at your option) any later version.
*/
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/irqdomain.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/msi.h>
#include <linux/of_address.h>
#include <linux/of_pci.h>
#include <linux/of_platform.h>
#include <linux/of_irq.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
/* Register definitions */
#define XILINX_PCIE_REG_BIR 0x00000130
#define XILINX_PCIE_REG_IDR 0x00000138
#define XILINX_PCIE_REG_IMR 0x0000013c
#define XILINX_PCIE_REG_PSCR 0x00000144
#define XILINX_PCIE_REG_RPSC 0x00000148
#define XILINX_PCIE_REG_MSIBASE1 0x0000014c
#define XILINX_PCIE_REG_MSIBASE2 0x00000150
#define XILINX_PCIE_REG_RPEFR 0x00000154
#define XILINX_PCIE_REG_RPIFR1 0x00000158
#define XILINX_PCIE_REG_RPIFR2 0x0000015c
/* Interrupt registers definitions */
#define XILINX_PCIE_INTR_LINK_DOWN BIT(0)
#define XILINX_PCIE_INTR_ECRC_ERR BIT(1)
#define XILINX_PCIE_INTR_STR_ERR BIT(2)
#define XILINX_PCIE_INTR_HOT_RESET BIT(3)
#define XILINX_PCIE_INTR_CFG_TIMEOUT BIT(8)
#define XILINX_PCIE_INTR_CORRECTABLE BIT(9)
#define XILINX_PCIE_INTR_NONFATAL BIT(10)
#define XILINX_PCIE_INTR_FATAL BIT(11)
#define XILINX_PCIE_INTR_INTX BIT(16)
#define XILINX_PCIE_INTR_MSI BIT(17)
#define XILINX_PCIE_INTR_SLV_UNSUPP BIT(20)
#define XILINX_PCIE_INTR_SLV_UNEXP BIT(21)
#define XILINX_PCIE_INTR_SLV_COMPL BIT(22)
#define XILINX_PCIE_INTR_SLV_ERRP BIT(23)
#define XILINX_PCIE_INTR_SLV_CMPABT BIT(24)
#define XILINX_PCIE_INTR_SLV_ILLBUR BIT(25)
#define XILINX_PCIE_INTR_MST_DECERR BIT(26)
#define XILINX_PCIE_INTR_MST_SLVERR BIT(27)
#define XILINX_PCIE_INTR_MST_ERRP BIT(28)
#define XILINX_PCIE_IMR_ALL_MASK 0x1FF30FED
#define XILINX_PCIE_IDR_ALL_MASK 0xFFFFFFFF
/* Root Port Error FIFO Read Register definitions */
#define XILINX_PCIE_RPEFR_ERR_VALID BIT(18)
#define XILINX_PCIE_RPEFR_REQ_ID GENMASK(15, 0)
#define XILINX_PCIE_RPEFR_ALL_MASK 0xFFFFFFFF
/* Root Port Interrupt FIFO Read Register 1 definitions */
#define XILINX_PCIE_RPIFR1_INTR_VALID BIT(31)
#define XILINX_PCIE_RPIFR1_MSI_INTR BIT(30)
#define XILINX_PCIE_RPIFR1_INTR_MASK GENMASK(28, 27)
#define XILINX_PCIE_RPIFR1_ALL_MASK 0xFFFFFFFF
#define XILINX_PCIE_RPIFR1_INTR_SHIFT 27
/* Bridge Info Register definitions */
#define XILINX_PCIE_BIR_ECAM_SZ_MASK GENMASK(18, 16)
#define XILINX_PCIE_BIR_ECAM_SZ_SHIFT 16
/* Root Port Interrupt FIFO Read Register 2 definitions */
#define XILINX_PCIE_RPIFR2_MSG_DATA GENMASK(15, 0)
/* Root Port Status/control Register definitions */
#define XILINX_PCIE_REG_RPSC_BEN BIT(0)
/* Phy Status/Control Register definitions */
#define XILINX_PCIE_REG_PSCR_LNKUP BIT(11)
/* ECAM definitions */
#define ECAM_BUS_NUM_SHIFT 20
#define ECAM_DEV_NUM_SHIFT 12
/* Number of MSI IRQs */
#define XILINX_NUM_MSI_IRQS 128
/* Number of Memory Resources */
#define XILINX_MAX_NUM_RESOURCES 3
/**
* struct xilinx_pcie_port - PCIe port information
* @reg_base: IO Mapped Register Base
* @irq: Interrupt number
* @msi_pages: MSI pages
* @root_busno: Root Bus number
* @dev: Device pointer
* @irq_domain: IRQ domain pointer
* @bus_range: Bus range
* @resources: Bus Resources
*/
struct xilinx_pcie_port {
void __iomem *reg_base;
u32 irq;
unsigned long msi_pages;
u8 root_busno;
struct device *dev;
struct irq_domain *irq_domain;
struct resource bus_range;
struct list_head resources;
};
static DECLARE_BITMAP(msi_irq_in_use, XILINX_NUM_MSI_IRQS);
static inline struct xilinx_pcie_port *sys_to_pcie(struct pci_sys_data *sys)
{
return sys->private_data;
}
static inline u32 pcie_read(struct xilinx_pcie_port *port, u32 reg)
{
return readl(port->reg_base + reg);
}
static inline void pcie_write(struct xilinx_pcie_port *port, u32 val, u32 reg)
{
writel(val, port->reg_base + reg);
}
static inline bool xilinx_pcie_link_is_up(struct xilinx_pcie_port *port)
{
return (pcie_read(port, XILINX_PCIE_REG_PSCR) &
XILINX_PCIE_REG_PSCR_LNKUP) ? 1 : 0;
}
/**
* xilinx_pcie_clear_err_interrupts - Clear Error Interrupts
* @port: PCIe port information
*/
static void xilinx_pcie_clear_err_interrupts(struct xilinx_pcie_port *port)
{
u32 val = pcie_read(port, XILINX_PCIE_REG_RPEFR);
if (val & XILINX_PCIE_RPEFR_ERR_VALID) {
dev_dbg(port->dev, "Requester ID %d\n",
val & XILINX_PCIE_RPEFR_REQ_ID);
pcie_write(port, XILINX_PCIE_RPEFR_ALL_MASK,
XILINX_PCIE_REG_RPEFR);
}
}
/**
* xilinx_pcie_valid_device - Check if a valid device is present on bus
* @bus: PCI Bus structure
* @devfn: device/function
*
* Return: 'true' on success and 'false' if invalid device is found
*/
static bool xilinx_pcie_valid_device(struct pci_bus *bus, unsigned int devfn)
{
struct xilinx_pcie_port *port = sys_to_pcie(bus->sysdata);
/* Check if link is up when trying to access downstream ports */
if (bus->number != port->root_busno)
if (!xilinx_pcie_link_is_up(port))
return false;
/* Only one device down on each root port */
if (bus->number == port->root_busno && devfn > 0)
return false;
/*
* Do not read more than one device on the bus directly attached
* to RC.
*/
if (bus->primary == port->root_busno && devfn > 0)
return false;
return true;
}
/**
* xilinx_pcie_config_base - Get configuration base
* @bus: PCI Bus structure
* @devfn: Device/function
* @where: Offset from base
*
* Return: Base address of the configuration space needed to be
* accessed.
*/
static void __iomem *xilinx_pcie_config_base(struct pci_bus *bus,
unsigned int devfn, int where)
{
struct xilinx_pcie_port *port = sys_to_pcie(bus->sysdata);
int relbus;
relbus = (bus->number << ECAM_BUS_NUM_SHIFT) |
(devfn << ECAM_DEV_NUM_SHIFT);
return port->reg_base + relbus + where;
}
/**
* xilinx_pcie_read_config - Read configuration space
* @bus: PCI Bus structure
* @devfn: Device/function
* @where: Offset from base
* @size: Byte/word/dword
* @val: Value to be read
*
* Return: PCIBIOS_SUCCESSFUL on success
* PCIBIOS_DEVICE_NOT_FOUND on failure
*/
static int xilinx_pcie_read_config(struct pci_bus *bus, unsigned int devfn,
int where, int size, u32 *val)
{
void __iomem *addr;
if (!xilinx_pcie_valid_device(bus, devfn)) {
*val = 0xFFFFFFFF;
return PCIBIOS_DEVICE_NOT_FOUND;
}
addr = xilinx_pcie_config_base(bus, devfn, where);
switch (size) {
case 1:
*val = readb(addr);
break;
case 2:
*val = readw(addr);
break;
default:
*val = readl(addr);
break;
}
return PCIBIOS_SUCCESSFUL;
}
/**
* xilinx_pcie_write_config - Write configuration space
* @bus: PCI Bus structure
* @devfn: Device/function
* @where: Offset from base
* @size: Byte/word/dword
* @val: Value to be written to device
*
* Return: PCIBIOS_SUCCESSFUL on success
* PCIBIOS_DEVICE_NOT_FOUND on failure
*/
static int xilinx_pcie_write_config(struct pci_bus *bus, unsigned int devfn,
int where, int size, u32 val)
{
void __iomem *addr;
if (!xilinx_pcie_valid_device(bus, devfn))
return PCIBIOS_DEVICE_NOT_FOUND;
addr = xilinx_pcie_config_base(bus, devfn, where);
switch (size) {
case 1:
writeb(val, addr);
break;
case 2:
writew(val, addr);
break;
default:
writel(val, addr);
break;
}
return PCIBIOS_SUCCESSFUL;
}
/* PCIe operations */
static struct pci_ops xilinx_pcie_ops = {
.read = xilinx_pcie_read_config,
.write = xilinx_pcie_write_config,
};
/* MSI functions */
/**
* xilinx_pcie_destroy_msi - Free MSI number
* @irq: IRQ to be freed
*/
static void xilinx_pcie_destroy_msi(unsigned int irq)
{
struct irq_desc *desc;
struct msi_desc *msi;
struct xilinx_pcie_port *port;
desc = irq_to_desc(irq);
msi = irq_desc_get_msi_desc(desc);
port = sys_to_pcie(msi->dev->bus->sysdata);
if (!test_bit(irq, msi_irq_in_use))
dev_err(port->dev, "Trying to free unused MSI#%d\n", irq);
else
clear_bit(irq, msi_irq_in_use);
}
/**
* xilinx_pcie_assign_msi - Allocate MSI number
* @port: PCIe port structure
*
* Return: A valid IRQ on success and error value on failure.
*/
static int xilinx_pcie_assign_msi(struct xilinx_pcie_port *port)
{
int pos;
pos = find_first_zero_bit(msi_irq_in_use, XILINX_NUM_MSI_IRQS);
if (pos < XILINX_NUM_MSI_IRQS)
set_bit(pos, msi_irq_in_use);
else
return -ENOSPC;
return pos;
}
/**
* xilinx_msi_teardown_irq - Destroy the MSI
* @chip: MSI Chip descriptor
* @irq: MSI IRQ to destroy
*/
static void xilinx_msi_teardown_irq(struct msi_chip *chip, unsigned int irq)
{
xilinx_pcie_destroy_msi(irq);
}
/**
* xilinx_pcie_msi_setup_irq - Setup MSI request
* @chip: MSI chip pointer
* @pdev: PCIe device pointer
* @desc: MSI descriptor pointer
*
* Return: '0' on success and error value on failure
*/
static int xilinx_pcie_msi_setup_irq(struct msi_chip *chip,
struct pci_dev *pdev,
struct msi_desc *desc)
{
struct xilinx_pcie_port *port = sys_to_pcie(pdev->bus->sysdata);
unsigned int irq;
int hwirq;
struct msi_msg msg;
phys_addr_t msg_addr;
hwirq = xilinx_pcie_assign_msi(port);
if (hwirq < 0)
return hwirq;
irq = irq_create_mapping(port->irq_domain, hwirq);
if (!irq)
return -EINVAL;
irq_set_msi_desc(irq, desc);
msg_addr = virt_to_phys((void *)port->msi_pages);
msg.address_hi = 0;
msg.address_lo = msg_addr;
msg.data = irq;
write_msi_msg(irq, &msg);
return 0;
}
/* MSI Chip Descriptor */
static struct msi_chip xilinx_pcie_msi_chip = {
.setup_irq = xilinx_pcie_msi_setup_irq,
.teardown_irq = xilinx_msi_teardown_irq,
};
/* HW Interrupt Chip Descriptor */
static struct irq_chip xilinx_msi_irq_chip = {
.name = "Xilinx PCIe MSI",
.irq_enable = unmask_msi_irq,
.irq_disable = mask_msi_irq,
.irq_mask = mask_msi_irq,
.irq_unmask = unmask_msi_irq,
};
/**
* xilinx_pcie_msi_map - Set the handler for the MSI and mark IRQ as valid
* @domain: IRQ domain
* @irq: Virtual IRQ number
* @hwirq: HW interrupt number
*
* Return: Always returns 0.
*/
static int xilinx_pcie_msi_map(struct irq_domain *domain, unsigned int irq,
irq_hw_number_t hwirq)
{
irq_set_chip_and_handler(irq, &xilinx_msi_irq_chip, handle_simple_irq);
irq_set_chip_data(irq, domain->host_data);
set_irq_flags(irq, IRQF_VALID);
return 0;
}
/* IRQ Domain operations */
static const struct irq_domain_ops msi_domain_ops = {
.map = xilinx_pcie_msi_map,
};
/**
* xilinx_pcie_enable_msi - Enable MSI support
* @port: PCIe port information
*/
static void xilinx_pcie_enable_msi(struct xilinx_pcie_port *port)
{
phys_addr_t msg_addr;
port->msi_pages = __get_free_pages(GFP_KERNEL, 0);
msg_addr = virt_to_phys((void *)port->msi_pages);
pcie_write(port, 0x0, XILINX_PCIE_REG_MSIBASE1);
pcie_write(port, msg_addr, XILINX_PCIE_REG_MSIBASE2);
}
/**
* xilinx_pcie_add_bus - Add MSI chip info to PCIe bus
* @bus: PCIe bus
*/
static void xilinx_pcie_add_bus(struct pci_bus *bus)
{
if (IS_ENABLED(CONFIG_PCI_MSI)) {
struct xilinx_pcie_port *port = sys_to_pcie(bus->sysdata);
xilinx_pcie_msi_chip.dev = port->dev;
bus->msi = &xilinx_pcie_msi_chip;
}
}
/* INTx Functions */
/**
* xilinx_pcie_intx_map - Set the handler for the INTx and mark IRQ as valid
* @domain: IRQ domain
* @irq: Virtual IRQ number
* @hwirq: HW interrupt number
*
* Return: Always returns 0.
*/
static int xilinx_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
irq_hw_number_t hwirq)
{
irq_set_chip_and_handler(irq, &dummy_irq_chip, handle_simple_irq);
irq_set_chip_data(irq, domain->host_data);
set_irq_flags(irq, IRQF_VALID);
return 0;
}
/* INTx IRQ Domain operations */
static const struct irq_domain_ops intx_domain_ops = {
.map = xilinx_pcie_intx_map,
};
/* PCIe HW Functions */
/**
* xilinx_pcie_intr_handler - Interrupt Service Handler
* @irq: IRQ number
* @data: PCIe port information
*
* Return: IRQ_HANDLED on success and IRQ_NONE on failure
*/
static irqreturn_t xilinx_pcie_intr_handler(int irq, void *data)
{
struct xilinx_pcie_port *port = (struct xilinx_pcie_port *)data;
u32 val, mask, status, msi_data;
/* Read interrupt decode and mask registers */
val = pcie_read(port, XILINX_PCIE_REG_IDR);
mask = pcie_read(port, XILINX_PCIE_REG_IMR);
status = val & mask;
if (!status)
return IRQ_NONE;
if (status & XILINX_PCIE_INTR_LINK_DOWN)
dev_warn(port->dev, "Link Down\n");
if (status & XILINX_PCIE_INTR_ECRC_ERR)
dev_warn(port->dev, "ECRC failed\n");
if (status & XILINX_PCIE_INTR_STR_ERR)
dev_warn(port->dev, "Streaming error\n");
if (status & XILINX_PCIE_INTR_HOT_RESET)
dev_info(port->dev, "Hot reset\n");
if (status & XILINX_PCIE_INTR_CFG_TIMEOUT)
dev_warn(port->dev, "ECAM access timeout\n");
if (status & XILINX_PCIE_INTR_CORRECTABLE) {
dev_warn(port->dev, "Correctable error message\n");
xilinx_pcie_clear_err_interrupts(port);
}
if (status & XILINX_PCIE_INTR_NONFATAL) {
dev_warn(port->dev, "Non fatal error message\n");
xilinx_pcie_clear_err_interrupts(port);
}
if (status & XILINX_PCIE_INTR_FATAL) {
dev_warn(port->dev, "Fatal error message\n");
xilinx_pcie_clear_err_interrupts(port);
}
if (status & XILINX_PCIE_INTR_INTX) {
/* INTx interrupt received */
val = pcie_read(port, XILINX_PCIE_REG_RPIFR1);
/* Check whether interrupt valid */
if (!(val & XILINX_PCIE_RPIFR1_INTR_VALID)) {
dev_warn(port->dev, "RP Intr FIFO1 read error\n");
return IRQ_HANDLED;
}
/* Clear interrupt FIFO register 1 */
pcie_write(port, XILINX_PCIE_RPIFR1_ALL_MASK,
XILINX_PCIE_REG_RPIFR1);
/* Handle INTx Interrupt */
val = ((val & XILINX_PCIE_RPIFR1_INTR_MASK) >>
XILINX_PCIE_RPIFR1_INTR_SHIFT) + 1;
generic_handle_irq(irq_find_mapping(port->irq_domain, val));
}
if (status & XILINX_PCIE_INTR_MSI) {
/* MSI Interrupt */
val = pcie_read(port, XILINX_PCIE_REG_RPIFR1);
if (!(val & XILINX_PCIE_RPIFR1_INTR_VALID)) {
dev_warn(port->dev, "RP Intr FIFO1 read error\n");
return IRQ_HANDLED;
}
if (val & XILINX_PCIE_RPIFR1_MSI_INTR) {
msi_data = pcie_read(port, XILINX_PCIE_REG_RPIFR2) &
XILINX_PCIE_RPIFR2_MSG_DATA;
/* Clear interrupt FIFO register 1 */
pcie_write(port, XILINX_PCIE_RPIFR1_ALL_MASK,
XILINX_PCIE_REG_RPIFR1);
if (IS_ENABLED(CONFIG_PCI_MSI)) {
/* Handle MSI Interrupt */
generic_handle_irq(msi_data);
}
}
}
if (status & XILINX_PCIE_INTR_SLV_UNSUPP)
dev_warn(port->dev, "Slave unsupported request\n");
if (status & XILINX_PCIE_INTR_SLV_UNEXP)
dev_warn(port->dev, "Slave unexpected completion\n");
if (status & XILINX_PCIE_INTR_SLV_COMPL)
dev_warn(port->dev, "Slave completion timeout\n");
if (status & XILINX_PCIE_INTR_SLV_ERRP)
dev_warn(port->dev, "Slave Error Poison\n");
if (status & XILINX_PCIE_INTR_SLV_CMPABT)
dev_warn(port->dev, "Slave Completer Abort\n");
if (status & XILINX_PCIE_INTR_SLV_ILLBUR)
dev_warn(port->dev, "Slave Illegal Burst\n");
if (status & XILINX_PCIE_INTR_MST_DECERR)
dev_warn(port->dev, "Master decode error\n");
if (status & XILINX_PCIE_INTR_MST_SLVERR)
dev_warn(port->dev, "Master slave error\n");
if (status & XILINX_PCIE_INTR_MST_ERRP)
dev_warn(port->dev, "Master error poison\n");
/* Clear the Interrupt Decode register */
pcie_write(port, status, XILINX_PCIE_REG_IDR);
return IRQ_HANDLED;
}
/**
* xilinx_pcie_free_irq_domain - Free IRQ domain
* @port: PCIe port information
*/
static void xilinx_pcie_free_irq_domain(struct xilinx_pcie_port *port)
{
int i;
u32 irq, num_irqs;
/* Free IRQ Domain */
if (IS_ENABLED(CONFIG_PCI_MSI)) {
free_pages(port->msi_pages, 0);
num_irqs = XILINX_NUM_MSI_IRQS;
} else {
/* INTx */
num_irqs = 4;
}
for (i = 0; i < num_irqs; i++) {
irq = irq_find_mapping(port->irq_domain, i);
if (irq > 0)
irq_dispose_mapping(irq);
}
irq_domain_remove(port->irq_domain);
}
/**
* xilinx_pcie_init_irq_domain - Initialize IRQ domain
* @port: PCIe port information
*
* Return: '0' on success and error value on failure
*/
static int xilinx_pcie_init_irq_domain(struct xilinx_pcie_port *port)
{
struct device *dev = port->dev;
struct device_node *node = dev->of_node;
struct device_node *pcie_intc_node;
/* Setup INTx */
pcie_intc_node = of_get_next_child(node, NULL);
if (!pcie_intc_node) {
dev_err(dev, "No PCIe Intc node found\n");
return PTR_ERR(pcie_intc_node);
}
port->irq_domain = irq_domain_add_linear(pcie_intc_node, 4,
&intx_domain_ops,
port);
if (!port->irq_domain) {
dev_err(dev, "Failed to get a INTx IRQ domain\n");
return PTR_ERR(port->irq_domain);
}
/* Setup MSI */
if (IS_ENABLED(CONFIG_PCI_MSI)) {
port->irq_domain = irq_domain_add_linear(node,
XILINX_NUM_MSI_IRQS,
&msi_domain_ops,
&xilinx_pcie_msi_chip);
if (!port->irq_domain) {
dev_err(dev, "Failed to get a MSI IRQ domain\n");
return PTR_ERR(port->irq_domain);
}
xilinx_pcie_enable_msi(port);
}
return 0;
}
/**
* xilinx_pcie_init_port - Initialize hardware
* @port: PCIe port information
*/
static void xilinx_pcie_init_port(struct xilinx_pcie_port *port)
{
if (xilinx_pcie_link_is_up(port))
dev_info(port->dev, "PCIe Link is UP\n");
else
dev_info(port->dev, "PCIe Link is DOWN\n");
/* Disable all interrupts */
pcie_write(port, ~XILINX_PCIE_IDR_ALL_MASK,
XILINX_PCIE_REG_IMR);
/* Clear pending interrupts */
pcie_write(port, pcie_read(port, XILINX_PCIE_REG_IDR) &
XILINX_PCIE_IMR_ALL_MASK,
XILINX_PCIE_REG_IDR);
/* Enable all interrupts */
pcie_write(port, XILINX_PCIE_IMR_ALL_MASK, XILINX_PCIE_REG_IMR);
/* Enable the Bridge enable bit */
pcie_write(port, pcie_read(port, XILINX_PCIE_REG_RPSC) |
XILINX_PCIE_REG_RPSC_BEN,
XILINX_PCIE_REG_RPSC);
}
/**
* xilinx_pcie_setup - Setup memory resources
* @nr: Bus number
* @sys: Per controller structure
*
* Return: '1' on success and error value on failure
*/
static int xilinx_pcie_setup(int nr, struct pci_sys_data *sys)
{
struct xilinx_pcie_port *port = sys_to_pcie(sys);
list_splice_init(&port->resources, &sys->resources);
return 1;
}
/**
* xilinx_pcie_scan_bus - Scan PCIe bus for devices
* @nr: Bus number
* @sys: Per controller structure
*
* Return: Valid Bus pointer on success and NULL on failure
*/
static struct pci_bus *xilinx_pcie_scan_bus(int nr, struct pci_sys_data *sys)
{
struct xilinx_pcie_port *port = sys_to_pcie(sys);
struct pci_bus *bus;
port->root_busno = sys->busnr;
bus = pci_scan_root_bus(port->dev, sys->busnr, &xilinx_pcie_ops,
sys, &sys->resources);
return bus;
}
/**
* xilinx_pcie_parse_and_add_res - Add resources by parsing ranges
* @port: PCIe port information
*
* Return: '0' on success and error value on failure
*/
static int xilinx_pcie_parse_and_add_res(struct xilinx_pcie_port *port)
{
struct device *dev = port->dev;
struct device_node *node = dev->of_node;
struct resource *mem;
resource_size_t offset;
struct of_pci_range_parser parser;
struct of_pci_range range;
struct pci_host_bridge_window *win;
int err = 0, mem_resno = 0;
/* Get the ranges */
if (of_pci_range_parser_init(&parser, node)) {
dev_err(dev, "missing \"ranges\" property\n");
return -EINVAL;
}
/* Parse the ranges and add the resources found to the list */
for_each_of_pci_range(&parser, &range) {
if (mem_resno >= XILINX_MAX_NUM_RESOURCES) {
dev_err(dev, "Maximum memory resources exceeded\n");
return -EINVAL;
}
mem = devm_kmalloc(dev, sizeof(*mem), GFP_KERNEL);
if (!mem) {
err = -ENOMEM;
goto free_resources;
}
of_pci_range_to_resource(&range, node, mem);
switch (mem->flags & IORESOURCE_TYPE_BITS) {
case IORESOURCE_MEM:
offset = range.cpu_addr - range.pci_addr;
mem_resno++;
break;
default:
err = -EINVAL;
break;
}
if (err < 0) {
dev_warn(dev, "Invalid resource found %pR\n", mem);
continue;
}
err = request_resource(&iomem_resource, mem);
if (err)
goto free_resources;
pci_add_resource_offset(&port->resources, mem, offset);
}
/* Get the bus range */
if (of_pci_parse_bus_range(node, &port->bus_range)) {
u32 val = pcie_read(port, XILINX_PCIE_REG_BIR);
u8 last;
last = (val & XILINX_PCIE_BIR_ECAM_SZ_MASK) >>
XILINX_PCIE_BIR_ECAM_SZ_SHIFT;
port->bus_range = (struct resource) {
.name = node->name,
.start = 0,
.end = last,
.flags = IORESOURCE_BUS,
};
}
/* Register bus resource */
pci_add_resource(&port->resources, &port->bus_range);
return 0;
free_resources:
release_child_resources(&iomem_resource);
list_for_each_entry(win, &port->resources, list)
devm_kfree(dev, win->res);
pci_free_resource_list(&port->resources);
return err;
}
/**
* xilinx_pcie_parse_dt - Parse Device tree
* @port: PCIe port information
*
* Return: '0' on success and error value on failure
*/
static int xilinx_pcie_parse_dt(struct xilinx_pcie_port *port)
{
struct device *dev = port->dev;
struct device_node *node = dev->of_node;
struct resource regs;
const char *type;
int err;
type = of_get_property(node, "device_type", NULL);
if (!type || strcmp(type, "pci")) {
dev_err(dev, "invalid \"device_type\" %s\n", type);
return -EINVAL;
}
err = of_address_to_resource(node, 0, &regs);
if (err) {
dev_err(dev, "missing \"reg\" property\n");
return err;
}
port->reg_base = devm_ioremap_resource(dev, &regs);
if (IS_ERR(port->reg_base))
return PTR_ERR(port->reg_base);
port->irq = irq_of_parse_and_map(node, 0);
err = devm_request_irq(dev, port->irq, xilinx_pcie_intr_handler,
IRQF_SHARED, "xilinx-pcie", port);
if (err) {
dev_err(dev, "unable to request irq %d\n", port->irq);
return err;
}
return 0;
}
/**
* xilinx_pcie_probe - Probe function
* @pdev: Platform device pointer
*
* Return: '0' on success and error value on failure
*/
static int xilinx_pcie_probe(struct platform_device *pdev)
{
struct xilinx_pcie_port *port;
struct hw_pci hw;
struct device *dev = &pdev->dev;
int err;
if (!dev->of_node)
return -ENODEV;
port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL);
if (!port)
return -ENOMEM;
port->dev = dev;
err = xilinx_pcie_parse_dt(port);
if (err) {
dev_err(dev, "Parsing DT failed\n");
return err;
}
xilinx_pcie_init_port(port);
err = xilinx_pcie_init_irq_domain(port);
if (err) {
dev_err(dev, "Failed creating IRQ Domain\n");
return err;
}
/*
* Parse PCI ranges, configuration bus range and
* request their resources
*/
INIT_LIST_HEAD(&port->resources);
err = xilinx_pcie_parse_and_add_res(port);
if (err) {
dev_err(dev, "Failed adding resources\n");
return err;
}
platform_set_drvdata(pdev, port);
/* Register the device */
memset(&hw, 0, sizeof(hw));
hw = (struct hw_pci) {
.nr_controllers = 1,
.private_data = (void **)&port,
.setup = xilinx_pcie_setup,
.map_irq = of_irq_parse_and_map_pci,
.add_bus = xilinx_pcie_add_bus,
.scan = xilinx_pcie_scan_bus,
.ops = &xilinx_pcie_ops,
};
pci_common_init_dev(dev, &hw);
return 0;
}
/**
* xilinx_pcie_remove - Remove function
* @pdev: Platform device pointer
*
* Return: '0' always
*/
static int xilinx_pcie_remove(struct platform_device *pdev)
{
struct xilinx_pcie_port *port = platform_get_drvdata(pdev);
xilinx_pcie_free_irq_domain(port);
return 0;
}
static struct of_device_id xilinx_pcie_of_match[] = {
{ .compatible = "xlnx,axi-pcie-host-1.00.a", },
{}
};
static struct platform_driver xilinx_pcie_driver = {
.driver = {
.name = "xilinx-pcie",
.owner = THIS_MODULE,
.of_match_table = xilinx_pcie_of_match,
.suppress_bind_attrs = true,
},
.probe = xilinx_pcie_probe,
.remove = xilinx_pcie_remove,
};
module_platform_driver(xilinx_pcie_driver);
MODULE_AUTHOR("Xilinx Inc");
MODULE_DESCRIPTION("Xilinx AXI PCIe driver");
MODULE_LICENSE("GPL v2");