Fixed MTP to work with TWRP

This commit is contained in:
awab228 2018-06-19 23:16:04 +02:00
commit f6dfaef42e
50820 changed files with 20846062 additions and 0 deletions

12
arch/ia64/sn/pci/Makefile Normal file
View file

@ -0,0 +1,12 @@
#
# This file is subject to the terms and conditions of the GNU General Public
# License. See the file "COPYING" in the main directory of this archive
# for more details.
#
# Copyright (C) 2000-2004 Silicon Graphics, Inc. All Rights Reserved.
#
# Makefile for the sn pci general routines.
ccflags-y := -Iarch/ia64/sn/include
obj-y := pci_dma.o tioca_provider.o tioce_provider.o pcibr/

487
arch/ia64/sn/pci/pci_dma.c Normal file
View file

@ -0,0 +1,487 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2000,2002-2005 Silicon Graphics, Inc. All rights reserved.
*
* Routines for PCI DMA mapping. See Documentation/DMA-API.txt for
* a description of how these routines should be used.
*/
#include <linux/gfp.h>
#include <linux/module.h>
#include <linux/dma-mapping.h>
#include <asm/dma.h>
#include <asm/sn/intr.h>
#include <asm/sn/pcibus_provider_defs.h>
#include <asm/sn/pcidev.h>
#include <asm/sn/sn_sal.h>
#define SG_ENT_VIRT_ADDRESS(sg) (sg_virt((sg)))
#define SG_ENT_PHYS_ADDRESS(SG) virt_to_phys(SG_ENT_VIRT_ADDRESS(SG))
/**
* sn_dma_supported - test a DMA mask
* @dev: device to test
* @mask: DMA mask to test
*
* Return whether the given PCI device DMA address mask can be supported
* properly. For example, if your device can only drive the low 24-bits
* during PCI bus mastering, then you would pass 0x00ffffff as the mask to
* this function. Of course, SN only supports devices that have 32 or more
* address bits when using the PMU.
*/
static int sn_dma_supported(struct device *dev, u64 mask)
{
BUG_ON(!dev_is_pci(dev));
if (mask < 0x7fffffff)
return 0;
return 1;
}
/**
* sn_dma_set_mask - set the DMA mask
* @dev: device to set
* @dma_mask: new mask
*
* Set @dev's DMA mask if the hw supports it.
*/
int sn_dma_set_mask(struct device *dev, u64 dma_mask)
{
BUG_ON(!dev_is_pci(dev));
if (!sn_dma_supported(dev, dma_mask))
return 0;
*dev->dma_mask = dma_mask;
return 1;
}
EXPORT_SYMBOL(sn_dma_set_mask);
/**
* sn_dma_alloc_coherent - allocate memory for coherent DMA
* @dev: device to allocate for
* @size: size of the region
* @dma_handle: DMA (bus) address
* @flags: memory allocation flags
*
* dma_alloc_coherent() returns a pointer to a memory region suitable for
* coherent DMA traffic to/from a PCI device. On SN platforms, this means
* that @dma_handle will have the %PCIIO_DMA_CMD flag set.
*
* This interface is usually used for "command" streams (e.g. the command
* queue for a SCSI controller). See Documentation/DMA-API.txt for
* more information.
*/
static void *sn_dma_alloc_coherent(struct device *dev, size_t size,
dma_addr_t * dma_handle, gfp_t flags,
struct dma_attrs *attrs)
{
void *cpuaddr;
unsigned long phys_addr;
int node;
struct pci_dev *pdev = to_pci_dev(dev);
struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
BUG_ON(!dev_is_pci(dev));
/*
* Allocate the memory.
*/
node = pcibus_to_node(pdev->bus);
if (likely(node >=0)) {
struct page *p = alloc_pages_exact_node(node,
flags, get_order(size));
if (likely(p))
cpuaddr = page_address(p);
else
return NULL;
} else
cpuaddr = (void *)__get_free_pages(flags, get_order(size));
if (unlikely(!cpuaddr))
return NULL;
memset(cpuaddr, 0x0, size);
/* physical addr. of the memory we just got */
phys_addr = __pa(cpuaddr);
/*
* 64 bit address translations should never fail.
* 32 bit translations can fail if there are insufficient mapping
* resources.
*/
*dma_handle = provider->dma_map_consistent(pdev, phys_addr, size,
SN_DMA_ADDR_PHYS);
if (!*dma_handle) {
printk(KERN_ERR "%s: out of ATEs\n", __func__);
free_pages((unsigned long)cpuaddr, get_order(size));
return NULL;
}
return cpuaddr;
}
/**
* sn_pci_free_coherent - free memory associated with coherent DMAable region
* @dev: device to free for
* @size: size to free
* @cpu_addr: kernel virtual address to free
* @dma_handle: DMA address associated with this region
*
* Frees the memory allocated by dma_alloc_coherent(), potentially unmapping
* any associated IOMMU mappings.
*/
static void sn_dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,
dma_addr_t dma_handle, struct dma_attrs *attrs)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
BUG_ON(!dev_is_pci(dev));
provider->dma_unmap(pdev, dma_handle, 0);
free_pages((unsigned long)cpu_addr, get_order(size));
}
/**
* sn_dma_map_single_attrs - map a single page for DMA
* @dev: device to map for
* @cpu_addr: kernel virtual address of the region to map
* @size: size of the region
* @direction: DMA direction
* @attrs: optional dma attributes
*
* Map the region pointed to by @cpu_addr for DMA and return the
* DMA address.
*
* We map this to the one step pcibr_dmamap_trans interface rather than
* the two step pcibr_dmamap_alloc/pcibr_dmamap_addr because we have
* no way of saving the dmamap handle from the alloc to later free
* (which is pretty much unacceptable).
*
* mappings with the DMA_ATTR_WRITE_BARRIER get mapped with
* dma_map_consistent() so that writes force a flush of pending DMA.
* (See "SGI Altix Architecture Considerations for Linux Device Drivers",
* Document Number: 007-4763-001)
*
* TODO: simplify our interface;
* figure out how to save dmamap handle so can use two step.
*/
static dma_addr_t sn_dma_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction dir,
struct dma_attrs *attrs)
{
void *cpu_addr = page_address(page) + offset;
dma_addr_t dma_addr;
unsigned long phys_addr;
struct pci_dev *pdev = to_pci_dev(dev);
struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
int dmabarr;
dmabarr = dma_get_attr(DMA_ATTR_WRITE_BARRIER, attrs);
BUG_ON(!dev_is_pci(dev));
phys_addr = __pa(cpu_addr);
if (dmabarr)
dma_addr = provider->dma_map_consistent(pdev, phys_addr,
size, SN_DMA_ADDR_PHYS);
else
dma_addr = provider->dma_map(pdev, phys_addr, size,
SN_DMA_ADDR_PHYS);
if (!dma_addr) {
printk(KERN_ERR "%s: out of ATEs\n", __func__);
return 0;
}
return dma_addr;
}
/**
* sn_dma_unmap_single_attrs - unamp a DMA mapped page
* @dev: device to sync
* @dma_addr: DMA address to sync
* @size: size of region
* @direction: DMA direction
* @attrs: optional dma attributes
*
* This routine is supposed to sync the DMA region specified
* by @dma_handle into the coherence domain. On SN, we're always cache
* coherent, so we just need to free any ATEs associated with this mapping.
*/
static void sn_dma_unmap_page(struct device *dev, dma_addr_t dma_addr,
size_t size, enum dma_data_direction dir,
struct dma_attrs *attrs)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
BUG_ON(!dev_is_pci(dev));
provider->dma_unmap(pdev, dma_addr, dir);
}
/**
* sn_dma_unmap_sg - unmap a DMA scatterlist
* @dev: device to unmap
* @sg: scatterlist to unmap
* @nhwentries: number of scatterlist entries
* @direction: DMA direction
* @attrs: optional dma attributes
*
* Unmap a set of streaming mode DMA translations.
*/
static void sn_dma_unmap_sg(struct device *dev, struct scatterlist *sgl,
int nhwentries, enum dma_data_direction dir,
struct dma_attrs *attrs)
{
int i;
struct pci_dev *pdev = to_pci_dev(dev);
struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
struct scatterlist *sg;
BUG_ON(!dev_is_pci(dev));
for_each_sg(sgl, sg, nhwentries, i) {
provider->dma_unmap(pdev, sg->dma_address, dir);
sg->dma_address = (dma_addr_t) NULL;
sg->dma_length = 0;
}
}
/**
* sn_dma_map_sg - map a scatterlist for DMA
* @dev: device to map for
* @sg: scatterlist to map
* @nhwentries: number of entries
* @direction: direction of the DMA transaction
* @attrs: optional dma attributes
*
* mappings with the DMA_ATTR_WRITE_BARRIER get mapped with
* dma_map_consistent() so that writes force a flush of pending DMA.
* (See "SGI Altix Architecture Considerations for Linux Device Drivers",
* Document Number: 007-4763-001)
*
* Maps each entry of @sg for DMA.
*/
static int sn_dma_map_sg(struct device *dev, struct scatterlist *sgl,
int nhwentries, enum dma_data_direction dir,
struct dma_attrs *attrs)
{
unsigned long phys_addr;
struct scatterlist *saved_sg = sgl, *sg;
struct pci_dev *pdev = to_pci_dev(dev);
struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
int i;
int dmabarr;
dmabarr = dma_get_attr(DMA_ATTR_WRITE_BARRIER, attrs);
BUG_ON(!dev_is_pci(dev));
/*
* Setup a DMA address for each entry in the scatterlist.
*/
for_each_sg(sgl, sg, nhwentries, i) {
dma_addr_t dma_addr;
phys_addr = SG_ENT_PHYS_ADDRESS(sg);
if (dmabarr)
dma_addr = provider->dma_map_consistent(pdev,
phys_addr,
sg->length,
SN_DMA_ADDR_PHYS);
else
dma_addr = provider->dma_map(pdev, phys_addr,
sg->length,
SN_DMA_ADDR_PHYS);
sg->dma_address = dma_addr;
if (!sg->dma_address) {
printk(KERN_ERR "%s: out of ATEs\n", __func__);
/*
* Free any successfully allocated entries.
*/
if (i > 0)
sn_dma_unmap_sg(dev, saved_sg, i, dir, attrs);
return 0;
}
sg->dma_length = sg->length;
}
return nhwentries;
}
static void sn_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
size_t size, enum dma_data_direction dir)
{
BUG_ON(!dev_is_pci(dev));
}
static void sn_dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
size_t size,
enum dma_data_direction dir)
{
BUG_ON(!dev_is_pci(dev));
}
static void sn_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
int nelems, enum dma_data_direction dir)
{
BUG_ON(!dev_is_pci(dev));
}
static void sn_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
int nelems, enum dma_data_direction dir)
{
BUG_ON(!dev_is_pci(dev));
}
static int sn_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
{
return 0;
}
u64 sn_dma_get_required_mask(struct device *dev)
{
return DMA_BIT_MASK(64);
}
EXPORT_SYMBOL_GPL(sn_dma_get_required_mask);
char *sn_pci_get_legacy_mem(struct pci_bus *bus)
{
if (!SN_PCIBUS_BUSSOFT(bus))
return ERR_PTR(-ENODEV);
return (char *)(SN_PCIBUS_BUSSOFT(bus)->bs_legacy_mem | __IA64_UNCACHED_OFFSET);
}
int sn_pci_legacy_read(struct pci_bus *bus, u16 port, u32 *val, u8 size)
{
unsigned long addr;
int ret;
struct ia64_sal_retval isrv;
/*
* First, try the SN_SAL_IOIF_PCI_SAFE SAL call which can work
* around hw issues at the pci bus level. SGI proms older than
* 4.10 don't implement this.
*/
SAL_CALL(isrv, SN_SAL_IOIF_PCI_SAFE,
pci_domain_nr(bus), bus->number,
0, /* io */
0, /* read */
port, size, __pa(val));
if (isrv.status == 0)
return size;
/*
* If the above failed, retry using the SAL_PROBE call which should
* be present in all proms (but which cannot work round PCI chipset
* bugs). This code is retained for compatibility with old
* pre-4.10 proms, and should be removed at some point in the future.
*/
if (!SN_PCIBUS_BUSSOFT(bus))
return -ENODEV;
addr = SN_PCIBUS_BUSSOFT(bus)->bs_legacy_io | __IA64_UNCACHED_OFFSET;
addr += port;
ret = ia64_sn_probe_mem(addr, (long)size, (void *)val);
if (ret == 2)
return -EINVAL;
if (ret == 1)
*val = -1;
return size;
}
int sn_pci_legacy_write(struct pci_bus *bus, u16 port, u32 val, u8 size)
{
int ret = size;
unsigned long paddr;
unsigned long *addr;
struct ia64_sal_retval isrv;
/*
* First, try the SN_SAL_IOIF_PCI_SAFE SAL call which can work
* around hw issues at the pci bus level. SGI proms older than
* 4.10 don't implement this.
*/
SAL_CALL(isrv, SN_SAL_IOIF_PCI_SAFE,
pci_domain_nr(bus), bus->number,
0, /* io */
1, /* write */
port, size, __pa(&val));
if (isrv.status == 0)
return size;
/*
* If the above failed, retry using the SAL_PROBE call which should
* be present in all proms (but which cannot work round PCI chipset
* bugs). This code is retained for compatibility with old
* pre-4.10 proms, and should be removed at some point in the future.
*/
if (!SN_PCIBUS_BUSSOFT(bus)) {
ret = -ENODEV;
goto out;
}
/* Put the phys addr in uncached space */
paddr = SN_PCIBUS_BUSSOFT(bus)->bs_legacy_io | __IA64_UNCACHED_OFFSET;
paddr += port;
addr = (unsigned long *)paddr;
switch (size) {
case 1:
*(volatile u8 *)(addr) = (u8)(val);
break;
case 2:
*(volatile u16 *)(addr) = (u16)(val);
break;
case 4:
*(volatile u32 *)(addr) = (u32)(val);
break;
default:
ret = -EINVAL;
break;
}
out:
return ret;
}
static struct dma_map_ops sn_dma_ops = {
.alloc = sn_dma_alloc_coherent,
.free = sn_dma_free_coherent,
.map_page = sn_dma_map_page,
.unmap_page = sn_dma_unmap_page,
.map_sg = sn_dma_map_sg,
.unmap_sg = sn_dma_unmap_sg,
.sync_single_for_cpu = sn_dma_sync_single_for_cpu,
.sync_sg_for_cpu = sn_dma_sync_sg_for_cpu,
.sync_single_for_device = sn_dma_sync_single_for_device,
.sync_sg_for_device = sn_dma_sync_sg_for_device,
.mapping_error = sn_dma_mapping_error,
.dma_supported = sn_dma_supported,
};
void sn_dma_init(void)
{
dma_ops = &sn_dma_ops;
}

View file

@ -0,0 +1,13 @@
#
# This file is subject to the terms and conditions of the GNU General Public
# License. See the file "COPYING" in the main directory of this archive
# for more details.
#
# Copyright (C) 2002-2004 Silicon Graphics, Inc. All Rights Reserved.
#
# Makefile for the sn2 io routines.
ccflags-y := -Iarch/ia64/sn/include
obj-y += pcibr_dma.o pcibr_reg.o \
pcibr_ate.o pcibr_provider.o

View file

@ -0,0 +1,177 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2001-2006 Silicon Graphics, Inc. All rights reserved.
*/
#include <linux/types.h>
#include <asm/sn/sn_sal.h>
#include <asm/sn/pcibr_provider.h>
#include <asm/sn/pcibus_provider_defs.h>
#include <asm/sn/pcidev.h>
int pcibr_invalidate_ate; /* by default don't invalidate ATE on free */
/*
* mark_ate: Mark the ate as either free or inuse.
*/
static void mark_ate(struct ate_resource *ate_resource, int start, int number,
u64 value)
{
u64 *ate = ate_resource->ate;
int index;
int length = 0;
for (index = start; length < number; index++, length++)
ate[index] = value;
}
/*
* find_free_ate: Find the first free ate index starting from the given
* index for the desired consecutive count.
*/
static int find_free_ate(struct ate_resource *ate_resource, int start,
int count)
{
u64 *ate = ate_resource->ate;
int index;
int start_free;
for (index = start; index < ate_resource->num_ate;) {
if (!ate[index]) {
int i;
int free;
free = 0;
start_free = index; /* Found start free ate */
for (i = start_free; i < ate_resource->num_ate; i++) {
if (!ate[i]) { /* This is free */
if (++free == count)
return start_free;
} else {
index = i + 1;
break;
}
}
if (i >= ate_resource->num_ate)
return -1;
} else
index++; /* Try next ate */
}
return -1;
}
/*
* free_ate_resource: Free the requested number of ATEs.
*/
static inline void free_ate_resource(struct ate_resource *ate_resource,
int start)
{
mark_ate(ate_resource, start, ate_resource->ate[start], 0);
if ((ate_resource->lowest_free_index > start) ||
(ate_resource->lowest_free_index < 0))
ate_resource->lowest_free_index = start;
}
/*
* alloc_ate_resource: Allocate the requested number of ATEs.
*/
static inline int alloc_ate_resource(struct ate_resource *ate_resource,
int ate_needed)
{
int start_index;
/*
* Check for ate exhaustion.
*/
if (ate_resource->lowest_free_index < 0)
return -1;
/*
* Find the required number of free consecutive ates.
*/
start_index =
find_free_ate(ate_resource, ate_resource->lowest_free_index,
ate_needed);
if (start_index >= 0)
mark_ate(ate_resource, start_index, ate_needed, ate_needed);
ate_resource->lowest_free_index =
find_free_ate(ate_resource, ate_resource->lowest_free_index, 1);
return start_index;
}
/*
* Allocate "count" contiguous Bridge Address Translation Entries
* on the specified bridge to be used for PCI to XTALK mappings.
* Indices in rm map range from 1..num_entries. Indices returned
* to caller range from 0..num_entries-1.
*
* Return the start index on success, -1 on failure.
*/
int pcibr_ate_alloc(struct pcibus_info *pcibus_info, int count)
{
int status;
unsigned long flags;
spin_lock_irqsave(&pcibus_info->pbi_lock, flags);
status = alloc_ate_resource(&pcibus_info->pbi_int_ate_resource, count);
spin_unlock_irqrestore(&pcibus_info->pbi_lock, flags);
return status;
}
/*
* Setup an Address Translation Entry as specified. Use either the Bridge
* internal maps or the external map RAM, as appropriate.
*/
static inline u64 __iomem *pcibr_ate_addr(struct pcibus_info *pcibus_info,
int ate_index)
{
if (ate_index < pcibus_info->pbi_int_ate_size) {
return pcireg_int_ate_addr(pcibus_info, ate_index);
}
panic("pcibr_ate_addr: invalid ate_index 0x%x", ate_index);
}
/*
* Update the ate.
*/
void inline
ate_write(struct pcibus_info *pcibus_info, int ate_index, int count,
volatile u64 ate)
{
while (count-- > 0) {
if (ate_index < pcibus_info->pbi_int_ate_size) {
pcireg_int_ate_set(pcibus_info, ate_index, ate);
} else {
panic("ate_write: invalid ate_index 0x%x", ate_index);
}
ate_index++;
ate += IOPGSIZE;
}
pcireg_tflush_get(pcibus_info); /* wait until Bridge PIO complete */
}
void pcibr_ate_free(struct pcibus_info *pcibus_info, int index)
{
volatile u64 ate;
int count;
unsigned long flags;
if (pcibr_invalidate_ate) {
/* For debugging purposes, clear the valid bit in the ATE */
ate = *pcibr_ate_addr(pcibus_info, index);
count = pcibus_info->pbi_int_ate_resource.ate[index];
ate_write(pcibus_info, index, count, (ate & ~PCI32_ATE_V));
}
spin_lock_irqsave(&pcibus_info->pbi_lock, flags);
free_ate_resource(&pcibus_info->pbi_int_ate_resource, index);
spin_unlock_irqrestore(&pcibus_info->pbi_lock, flags);
}

View file

@ -0,0 +1,413 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2001-2005 Silicon Graphics, Inc. All rights reserved.
*/
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/export.h>
#include <asm/sn/addrs.h>
#include <asm/sn/geo.h>
#include <asm/sn/pcibr_provider.h>
#include <asm/sn/pcibus_provider_defs.h>
#include <asm/sn/pcidev.h>
#include <asm/sn/pic.h>
#include <asm/sn/sn_sal.h>
#include <asm/sn/tiocp.h>
#include "tio.h"
#include "xtalk/xwidgetdev.h"
#include "xtalk/hubdev.h"
extern int sn_ioif_inited;
/* =====================================================================
* DMA MANAGEMENT
*
* The Bridge ASIC provides three methods of doing DMA: via a "direct map"
* register available in 32-bit PCI space (which selects a contiguous 2G
* address space on some other widget), via "direct" addressing via 64-bit
* PCI space (all destination information comes from the PCI address,
* including transfer attributes), and via a "mapped" region that allows
* a bunch of different small mappings to be established with the PMU.
*
* For efficiency, we most prefer to use the 32bit direct mapping facility,
* since it requires no resource allocations. The advantage of using the
* PMU over the 64-bit direct is that single-cycle PCI addressing can be
* used; the advantage of using 64-bit direct over PMU addressing is that
* we do not have to allocate entries in the PMU.
*/
static dma_addr_t
pcibr_dmamap_ate32(struct pcidev_info *info,
u64 paddr, size_t req_size, u64 flags, int dma_flags)
{
struct pcidev_info *pcidev_info = info->pdi_host_pcidev_info;
struct pcibus_info *pcibus_info = (struct pcibus_info *)pcidev_info->
pdi_pcibus_info;
u8 internal_device = (PCI_SLOT(pcidev_info->pdi_host_pcidev_info->
pdi_linux_pcidev->devfn)) - 1;
int ate_count;
int ate_index;
u64 ate_flags = flags | PCI32_ATE_V;
u64 ate;
u64 pci_addr;
u64 xio_addr;
u64 offset;
/* PIC in PCI-X mode does not supports 32bit PageMap mode */
if (IS_PIC_SOFT(pcibus_info) && IS_PCIX(pcibus_info)) {
return 0;
}
/* Calculate the number of ATEs needed. */
if (!(MINIMAL_ATE_FLAG(paddr, req_size))) {
ate_count = IOPG((IOPGSIZE - 1) /* worst case start offset */
+req_size /* max mapping bytes */
- 1) + 1; /* round UP */
} else { /* assume requested target is page aligned */
ate_count = IOPG(req_size /* max mapping bytes */
- 1) + 1; /* round UP */
}
/* Get the number of ATEs required. */
ate_index = pcibr_ate_alloc(pcibus_info, ate_count);
if (ate_index < 0)
return 0;
/* In PCI-X mode, Prefetch not supported */
if (IS_PCIX(pcibus_info))
ate_flags &= ~(PCI32_ATE_PREF);
if (SN_DMA_ADDRTYPE(dma_flags == SN_DMA_ADDR_PHYS))
xio_addr = IS_PIC_SOFT(pcibus_info) ? PHYS_TO_DMA(paddr) :
PHYS_TO_TIODMA(paddr);
else
xio_addr = paddr;
offset = IOPGOFF(xio_addr);
ate = ate_flags | (xio_addr - offset);
/* If PIC, put the targetid in the ATE */
if (IS_PIC_SOFT(pcibus_info)) {
ate |= (pcibus_info->pbi_hub_xid << PIC_ATE_TARGETID_SHFT);
}
/*
* If we're mapping for MSI, set the MSI bit in the ATE. If it's a
* TIOCP based pci bus, we also need to set the PIO bit in the ATE.
*/
if (dma_flags & SN_DMA_MSI) {
ate |= PCI32_ATE_MSI;
if (IS_TIOCP_SOFT(pcibus_info))
ate |= PCI32_ATE_PIO;
}
ate_write(pcibus_info, ate_index, ate_count, ate);
/*
* Set up the DMA mapped Address.
*/
pci_addr = PCI32_MAPPED_BASE + offset + IOPGSIZE * ate_index;
/*
* If swap was set in device in pcibr_endian_set()
* we need to turn swapping on.
*/
if (pcibus_info->pbi_devreg[internal_device] & PCIBR_DEV_SWAP_DIR)
ATE_SWAP_ON(pci_addr);
return pci_addr;
}
static dma_addr_t
pcibr_dmatrans_direct64(struct pcidev_info * info, u64 paddr,
u64 dma_attributes, int dma_flags)
{
struct pcibus_info *pcibus_info = (struct pcibus_info *)
((info->pdi_host_pcidev_info)->pdi_pcibus_info);
u64 pci_addr;
/* Translate to Crosstalk View of Physical Address */
if (SN_DMA_ADDRTYPE(dma_flags) == SN_DMA_ADDR_PHYS)
pci_addr = IS_PIC_SOFT(pcibus_info) ?
PHYS_TO_DMA(paddr) :
PHYS_TO_TIODMA(paddr);
else
pci_addr = paddr;
pci_addr |= dma_attributes;
/* Handle Bus mode */
if (IS_PCIX(pcibus_info))
pci_addr &= ~PCI64_ATTR_PREF;
/* Handle Bridge Chipset differences */
if (IS_PIC_SOFT(pcibus_info)) {
pci_addr |=
((u64) pcibus_info->
pbi_hub_xid << PIC_PCI64_ATTR_TARG_SHFT);
} else
pci_addr |= (dma_flags & SN_DMA_MSI) ?
TIOCP_PCI64_CMDTYPE_MSI :
TIOCP_PCI64_CMDTYPE_MEM;
/* If PCI mode, func zero uses VCHAN0, every other func uses VCHAN1 */
if (!IS_PCIX(pcibus_info) && PCI_FUNC(info->pdi_linux_pcidev->devfn))
pci_addr |= PCI64_ATTR_VIRTUAL;
return pci_addr;
}
static dma_addr_t
pcibr_dmatrans_direct32(struct pcidev_info * info,
u64 paddr, size_t req_size, u64 flags, int dma_flags)
{
struct pcidev_info *pcidev_info = info->pdi_host_pcidev_info;
struct pcibus_info *pcibus_info = (struct pcibus_info *)pcidev_info->
pdi_pcibus_info;
u64 xio_addr;
u64 xio_base;
u64 offset;
u64 endoff;
if (IS_PCIX(pcibus_info)) {
return 0;
}
if (dma_flags & SN_DMA_MSI)
return 0;
if (SN_DMA_ADDRTYPE(dma_flags) == SN_DMA_ADDR_PHYS)
xio_addr = IS_PIC_SOFT(pcibus_info) ? PHYS_TO_DMA(paddr) :
PHYS_TO_TIODMA(paddr);
else
xio_addr = paddr;
xio_base = pcibus_info->pbi_dir_xbase;
offset = xio_addr - xio_base;
endoff = req_size + offset;
if ((req_size > (1ULL << 31)) || /* Too Big */
(xio_addr < xio_base) || /* Out of range for mappings */
(endoff > (1ULL << 31))) { /* Too Big */
return 0;
}
return PCI32_DIRECT_BASE | offset;
}
/*
* Wrapper routine for freeing DMA maps
* DMA mappings for Direct 64 and 32 do not have any DMA maps.
*/
void
pcibr_dma_unmap(struct pci_dev *hwdev, dma_addr_t dma_handle, int direction)
{
struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(hwdev);
struct pcibus_info *pcibus_info =
(struct pcibus_info *)pcidev_info->pdi_pcibus_info;
if (IS_PCI32_MAPPED(dma_handle)) {
int ate_index;
ate_index =
IOPG((ATE_SWAP_OFF(dma_handle) - PCI32_MAPPED_BASE));
pcibr_ate_free(pcibus_info, ate_index);
}
}
/*
* On SN systems there is a race condition between a PIO read response and
* DMA's. In rare cases, the read response may beat the DMA, causing the
* driver to think that data in memory is complete and meaningful. This code
* eliminates that race. This routine is called by the PIO read routines
* after doing the read. For PIC this routine then forces a fake interrupt
* on another line, which is logically associated with the slot that the PIO
* is addressed to. It then spins while watching the memory location that
* the interrupt is targeted to. When the interrupt response arrives, we
* are sure that the DMA has landed in memory and it is safe for the driver
* to proceed. For TIOCP use the Device(x) Write Request Buffer Flush
* Bridge register since it ensures the data has entered the coherence domain,
* unlike the PIC Device(x) Write Request Buffer Flush register.
*/
void sn_dma_flush(u64 addr)
{
nasid_t nasid;
int is_tio;
int wid_num;
int i, j;
unsigned long flags;
u64 itte;
struct hubdev_info *hubinfo;
struct sn_flush_device_kernel *p;
struct sn_flush_device_common *common;
struct sn_flush_nasid_entry *flush_nasid_list;
if (!sn_ioif_inited)
return;
nasid = NASID_GET(addr);
if (-1 == nasid_to_cnodeid(nasid))
return;
hubinfo = (NODEPDA(nasid_to_cnodeid(nasid)))->pdinfo;
BUG_ON(!hubinfo);
flush_nasid_list = &hubinfo->hdi_flush_nasid_list;
if (flush_nasid_list->widget_p == NULL)
return;
is_tio = (nasid & 1);
if (is_tio) {
int itte_index;
if (TIO_HWIN(addr))
itte_index = 0;
else if (TIO_BWIN_WINDOWNUM(addr))
itte_index = TIO_BWIN_WINDOWNUM(addr);
else
itte_index = -1;
if (itte_index >= 0) {
itte = flush_nasid_list->iio_itte[itte_index];
if (! TIO_ITTE_VALID(itte))
return;
wid_num = TIO_ITTE_WIDGET(itte);
} else
wid_num = TIO_SWIN_WIDGETNUM(addr);
} else {
if (BWIN_WINDOWNUM(addr)) {
itte = flush_nasid_list->iio_itte[BWIN_WINDOWNUM(addr)];
wid_num = IIO_ITTE_WIDGET(itte);
} else
wid_num = SWIN_WIDGETNUM(addr);
}
if (flush_nasid_list->widget_p[wid_num] == NULL)
return;
p = &flush_nasid_list->widget_p[wid_num][0];
/* find a matching BAR */
for (i = 0; i < DEV_PER_WIDGET; i++,p++) {
common = p->common;
for (j = 0; j < PCI_ROM_RESOURCE; j++) {
if (common->sfdl_bar_list[j].start == 0)
break;
if (addr >= common->sfdl_bar_list[j].start
&& addr <= common->sfdl_bar_list[j].end)
break;
}
if (j < PCI_ROM_RESOURCE && common->sfdl_bar_list[j].start != 0)
break;
}
/* if no matching BAR, return without doing anything. */
if (i == DEV_PER_WIDGET)
return;
/*
* For TIOCP use the Device(x) Write Request Buffer Flush Bridge
* register since it ensures the data has entered the coherence
* domain, unlike PIC.
*/
if (is_tio) {
/*
* Note: devices behind TIOCE should never be matched in the
* above code, and so the following code is PIC/CP centric.
* If CE ever needs the sn_dma_flush mechanism, we will have
* to account for that here and in tioce_bus_fixup().
*/
u32 tio_id = HUB_L(TIO_IOSPACE_ADDR(nasid, TIO_NODE_ID));
u32 revnum = XWIDGET_PART_REV_NUM(tio_id);
/* TIOCP BRINGUP WAR (PV907516): Don't write buffer flush reg */
if ((1 << XWIDGET_PART_REV_NUM_REV(revnum)) & PV907516) {
return;
} else {
pcireg_wrb_flush_get(common->sfdl_pcibus_info,
(common->sfdl_slot - 1));
}
} else {
spin_lock_irqsave(&p->sfdl_flush_lock, flags);
*common->sfdl_flush_addr = 0;
/* force an interrupt. */
*(volatile u32 *)(common->sfdl_force_int_addr) = 1;
/* wait for the interrupt to come back. */
while (*(common->sfdl_flush_addr) != 0x10f)
cpu_relax();
/* okay, everything is synched up. */
spin_unlock_irqrestore(&p->sfdl_flush_lock, flags);
}
return;
}
/*
* DMA interfaces. Called from pci_dma.c routines.
*/
dma_addr_t
pcibr_dma_map(struct pci_dev * hwdev, unsigned long phys_addr, size_t size, int dma_flags)
{
dma_addr_t dma_handle;
struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(hwdev);
/* SN cannot support DMA addresses smaller than 32 bits. */
if (hwdev->dma_mask < 0x7fffffff) {
return 0;
}
if (hwdev->dma_mask == ~0UL) {
/*
* Handle the most common case: 64 bit cards. This
* call should always succeed.
*/
dma_handle = pcibr_dmatrans_direct64(pcidev_info, phys_addr,
PCI64_ATTR_PREF, dma_flags);
} else {
/* Handle 32-63 bit cards via direct mapping */
dma_handle = pcibr_dmatrans_direct32(pcidev_info, phys_addr,
size, 0, dma_flags);
if (!dma_handle) {
/*
* It is a 32 bit card and we cannot do direct mapping,
* so we use an ATE.
*/
dma_handle = pcibr_dmamap_ate32(pcidev_info, phys_addr,
size, PCI32_ATE_PREF,
dma_flags);
}
}
return dma_handle;
}
dma_addr_t
pcibr_dma_map_consistent(struct pci_dev * hwdev, unsigned long phys_addr,
size_t size, int dma_flags)
{
dma_addr_t dma_handle;
struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(hwdev);
if (hwdev->dev.coherent_dma_mask == ~0UL) {
dma_handle = pcibr_dmatrans_direct64(pcidev_info, phys_addr,
PCI64_ATTR_BAR, dma_flags);
} else {
dma_handle = (dma_addr_t) pcibr_dmamap_ate32(pcidev_info,
phys_addr, size,
PCI32_ATE_BAR, dma_flags);
}
return dma_handle;
}
EXPORT_SYMBOL(sn_dma_flush);

View file

@ -0,0 +1,265 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2001-2004, 2006 Silicon Graphics, Inc. All rights reserved.
*/
#include <linux/interrupt.h>
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/pci.h>
#include <linux/export.h>
#include <asm/sn/addrs.h>
#include <asm/sn/geo.h>
#include <asm/sn/pcibr_provider.h>
#include <asm/sn/pcibus_provider_defs.h>
#include <asm/sn/pcidev.h>
#include <asm/sn/sn_sal.h>
#include <asm/sn/pic.h>
#include <asm/sn/sn2/sn_hwperf.h>
#include "xtalk/xwidgetdev.h"
#include "xtalk/hubdev.h"
int
sal_pcibr_slot_enable(struct pcibus_info *soft, int device, void *resp,
char **ssdt)
{
struct ia64_sal_retval ret_stuff;
u64 busnum;
u64 segment;
ret_stuff.status = 0;
ret_stuff.v0 = 0;
segment = soft->pbi_buscommon.bs_persist_segment;
busnum = soft->pbi_buscommon.bs_persist_busnum;
SAL_CALL_NOLOCK(ret_stuff, (u64) SN_SAL_IOIF_SLOT_ENABLE, segment,
busnum, (u64) device, (u64) resp, (u64)ia64_tpa(ssdt),
0, 0);
return (int)ret_stuff.v0;
}
int
sal_pcibr_slot_disable(struct pcibus_info *soft, int device, int action,
void *resp)
{
struct ia64_sal_retval ret_stuff;
u64 busnum;
u64 segment;
ret_stuff.status = 0;
ret_stuff.v0 = 0;
segment = soft->pbi_buscommon.bs_persist_segment;
busnum = soft->pbi_buscommon.bs_persist_busnum;
SAL_CALL_NOLOCK(ret_stuff, (u64) SN_SAL_IOIF_SLOT_DISABLE,
segment, busnum, (u64) device, (u64) action,
(u64) resp, 0, 0);
return (int)ret_stuff.v0;
}
static int sal_pcibr_error_interrupt(struct pcibus_info *soft)
{
struct ia64_sal_retval ret_stuff;
u64 busnum;
int segment;
ret_stuff.status = 0;
ret_stuff.v0 = 0;
segment = soft->pbi_buscommon.bs_persist_segment;
busnum = soft->pbi_buscommon.bs_persist_busnum;
SAL_CALL_NOLOCK(ret_stuff,
(u64) SN_SAL_IOIF_ERROR_INTERRUPT,
(u64) segment, (u64) busnum, 0, 0, 0, 0, 0);
return (int)ret_stuff.v0;
}
u16 sn_ioboard_to_pci_bus(struct pci_bus *pci_bus)
{
long rc;
u16 uninitialized_var(ioboard); /* GCC be quiet */
nasid_t nasid = NASID_GET(SN_PCIBUS_BUSSOFT(pci_bus)->bs_base);
rc = ia64_sn_sysctl_ioboard_get(nasid, &ioboard);
if (rc) {
printk(KERN_WARNING "ia64_sn_sysctl_ioboard_get failed: %ld\n",
rc);
return 0;
}
return ioboard;
}
/*
* PCI Bridge Error interrupt handler. Gets invoked whenever a PCI
* bridge sends an error interrupt.
*/
static irqreturn_t
pcibr_error_intr_handler(int irq, void *arg)
{
struct pcibus_info *soft = arg;
if (sal_pcibr_error_interrupt(soft) < 0)
panic("pcibr_error_intr_handler(): Fatal Bridge Error");
return IRQ_HANDLED;
}
void *
pcibr_bus_fixup(struct pcibus_bussoft *prom_bussoft, struct pci_controller *controller)
{
int nasid, cnode, j;
struct hubdev_info *hubdev_info;
struct pcibus_info *soft;
struct sn_flush_device_kernel *sn_flush_device_kernel;
struct sn_flush_device_common *common;
if (! IS_PCI_BRIDGE_ASIC(prom_bussoft->bs_asic_type)) {
return NULL;
}
/*
* Allocate kernel bus soft and copy from prom.
*/
soft = kmemdup(prom_bussoft, sizeof(struct pcibus_info), GFP_KERNEL);
if (!soft) {
return NULL;
}
soft->pbi_buscommon.bs_base = (unsigned long)
ioremap(REGION_OFFSET(soft->pbi_buscommon.bs_base),
sizeof(struct pic));
spin_lock_init(&soft->pbi_lock);
/*
* register the bridge's error interrupt handler
*/
if (request_irq(SGI_PCIASIC_ERROR, pcibr_error_intr_handler,
IRQF_SHARED, "PCIBR error", (void *)(soft))) {
printk(KERN_WARNING
"pcibr cannot allocate interrupt for error handler\n");
}
irq_set_handler(SGI_PCIASIC_ERROR, handle_level_irq);
sn_set_err_irq_affinity(SGI_PCIASIC_ERROR);
/*
* Update the Bridge with the "kernel" pagesize
*/
if (PAGE_SIZE < 16384) {
pcireg_control_bit_clr(soft, PCIBR_CTRL_PAGE_SIZE);
} else {
pcireg_control_bit_set(soft, PCIBR_CTRL_PAGE_SIZE);
}
nasid = NASID_GET(soft->pbi_buscommon.bs_base);
cnode = nasid_to_cnodeid(nasid);
hubdev_info = (struct hubdev_info *)(NODEPDA(cnode)->pdinfo);
if (hubdev_info->hdi_flush_nasid_list.widget_p) {
sn_flush_device_kernel = hubdev_info->hdi_flush_nasid_list.
widget_p[(int)soft->pbi_buscommon.bs_xid];
if (sn_flush_device_kernel) {
for (j = 0; j < DEV_PER_WIDGET;
j++, sn_flush_device_kernel++) {
common = sn_flush_device_kernel->common;
if (common->sfdl_slot == -1)
continue;
if ((common->sfdl_persistent_segment ==
soft->pbi_buscommon.bs_persist_segment) &&
(common->sfdl_persistent_busnum ==
soft->pbi_buscommon.bs_persist_busnum))
common->sfdl_pcibus_info =
soft;
}
}
}
/* Setup the PMU ATE map */
soft->pbi_int_ate_resource.lowest_free_index = 0;
soft->pbi_int_ate_resource.ate =
kzalloc(soft->pbi_int_ate_size * sizeof(u64), GFP_KERNEL);
if (!soft->pbi_int_ate_resource.ate) {
kfree(soft);
return NULL;
}
return soft;
}
void pcibr_force_interrupt(struct sn_irq_info *sn_irq_info)
{
struct pcidev_info *pcidev_info;
struct pcibus_info *pcibus_info;
int bit = sn_irq_info->irq_int_bit;
if (! sn_irq_info->irq_bridge)
return;
pcidev_info = (struct pcidev_info *)sn_irq_info->irq_pciioinfo;
if (pcidev_info) {
pcibus_info =
(struct pcibus_info *)pcidev_info->pdi_host_pcidev_info->
pdi_pcibus_info;
pcireg_force_intr_set(pcibus_info, bit);
}
}
void pcibr_target_interrupt(struct sn_irq_info *sn_irq_info)
{
struct pcidev_info *pcidev_info;
struct pcibus_info *pcibus_info;
int bit = sn_irq_info->irq_int_bit;
u64 xtalk_addr = sn_irq_info->irq_xtalkaddr;
pcidev_info = (struct pcidev_info *)sn_irq_info->irq_pciioinfo;
if (pcidev_info) {
pcibus_info =
(struct pcibus_info *)pcidev_info->pdi_host_pcidev_info->
pdi_pcibus_info;
/* Disable the device's IRQ */
pcireg_intr_enable_bit_clr(pcibus_info, (1 << bit));
/* Change the device's IRQ */
pcireg_intr_addr_addr_set(pcibus_info, bit, xtalk_addr);
/* Re-enable the device's IRQ */
pcireg_intr_enable_bit_set(pcibus_info, (1 << bit));
pcibr_force_interrupt(sn_irq_info);
}
}
/*
* Provider entries for PIC/CP
*/
struct sn_pcibus_provider pcibr_provider = {
.dma_map = pcibr_dma_map,
.dma_map_consistent = pcibr_dma_map_consistent,
.dma_unmap = pcibr_dma_unmap,
.bus_fixup = pcibr_bus_fixup,
.force_interrupt = pcibr_force_interrupt,
.target_interrupt = pcibr_target_interrupt
};
int
pcibr_init_provider(void)
{
sn_pci_provider[PCIIO_ASIC_TYPE_PIC] = &pcibr_provider;
sn_pci_provider[PCIIO_ASIC_TYPE_TIOCP] = &pcibr_provider;
return 0;
}
EXPORT_SYMBOL_GPL(sal_pcibr_slot_enable);
EXPORT_SYMBOL_GPL(sal_pcibr_slot_disable);
EXPORT_SYMBOL_GPL(sn_ioboard_to_pci_bus);

View file

@ -0,0 +1,285 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2004 Silicon Graphics, Inc. All rights reserved.
*/
#include <linux/interrupt.h>
#include <linux/types.h>
#include <asm/sn/io.h>
#include <asm/sn/pcibr_provider.h>
#include <asm/sn/pcibus_provider_defs.h>
#include <asm/sn/pcidev.h>
#include <asm/sn/pic.h>
#include <asm/sn/tiocp.h>
union br_ptr {
struct tiocp tio;
struct pic pic;
};
/*
* Control Register Access -- Read/Write 0000_0020
*/
void pcireg_control_bit_clr(struct pcibus_info *pcibus_info, u64 bits)
{
union br_ptr __iomem *ptr = (union br_ptr __iomem *)pcibus_info->pbi_buscommon.bs_base;
if (pcibus_info) {
switch (pcibus_info->pbi_bridge_type) {
case PCIBR_BRIDGETYPE_TIOCP:
__sn_clrq_relaxed(&ptr->tio.cp_control, bits);
break;
case PCIBR_BRIDGETYPE_PIC:
__sn_clrq_relaxed(&ptr->pic.p_wid_control, bits);
break;
default:
panic
("pcireg_control_bit_clr: unknown bridgetype bridge 0x%p",
ptr);
}
}
}
void pcireg_control_bit_set(struct pcibus_info *pcibus_info, u64 bits)
{
union br_ptr __iomem *ptr = (union br_ptr __iomem *)pcibus_info->pbi_buscommon.bs_base;
if (pcibus_info) {
switch (pcibus_info->pbi_bridge_type) {
case PCIBR_BRIDGETYPE_TIOCP:
__sn_setq_relaxed(&ptr->tio.cp_control, bits);
break;
case PCIBR_BRIDGETYPE_PIC:
__sn_setq_relaxed(&ptr->pic.p_wid_control, bits);
break;
default:
panic
("pcireg_control_bit_set: unknown bridgetype bridge 0x%p",
ptr);
}
}
}
/*
* PCI/PCIX Target Flush Register Access -- Read Only 0000_0050
*/
u64 pcireg_tflush_get(struct pcibus_info *pcibus_info)
{
union br_ptr __iomem *ptr = (union br_ptr __iomem *)pcibus_info->pbi_buscommon.bs_base;
u64 ret = 0;
if (pcibus_info) {
switch (pcibus_info->pbi_bridge_type) {
case PCIBR_BRIDGETYPE_TIOCP:
ret = __sn_readq_relaxed(&ptr->tio.cp_tflush);
break;
case PCIBR_BRIDGETYPE_PIC:
ret = __sn_readq_relaxed(&ptr->pic.p_wid_tflush);
break;
default:
panic
("pcireg_tflush_get: unknown bridgetype bridge 0x%p",
ptr);
}
}
/* Read of the Target Flush should always return zero */
if (ret != 0)
panic("pcireg_tflush_get:Target Flush failed\n");
return ret;
}
/*
* Interrupt Status Register Access -- Read Only 0000_0100
*/
u64 pcireg_intr_status_get(struct pcibus_info * pcibus_info)
{
union br_ptr __iomem *ptr = (union br_ptr __iomem *)pcibus_info->pbi_buscommon.bs_base;
u64 ret = 0;
if (pcibus_info) {
switch (pcibus_info->pbi_bridge_type) {
case PCIBR_BRIDGETYPE_TIOCP:
ret = __sn_readq_relaxed(&ptr->tio.cp_int_status);
break;
case PCIBR_BRIDGETYPE_PIC:
ret = __sn_readq_relaxed(&ptr->pic.p_int_status);
break;
default:
panic
("pcireg_intr_status_get: unknown bridgetype bridge 0x%p",
ptr);
}
}
return ret;
}
/*
* Interrupt Enable Register Access -- Read/Write 0000_0108
*/
void pcireg_intr_enable_bit_clr(struct pcibus_info *pcibus_info, u64 bits)
{
union br_ptr __iomem *ptr = (union br_ptr __iomem *)pcibus_info->pbi_buscommon.bs_base;
if (pcibus_info) {
switch (pcibus_info->pbi_bridge_type) {
case PCIBR_BRIDGETYPE_TIOCP:
__sn_clrq_relaxed(&ptr->tio.cp_int_enable, bits);
break;
case PCIBR_BRIDGETYPE_PIC:
__sn_clrq_relaxed(&ptr->pic.p_int_enable, bits);
break;
default:
panic
("pcireg_intr_enable_bit_clr: unknown bridgetype bridge 0x%p",
ptr);
}
}
}
void pcireg_intr_enable_bit_set(struct pcibus_info *pcibus_info, u64 bits)
{
union br_ptr __iomem *ptr = (union br_ptr __iomem *)pcibus_info->pbi_buscommon.bs_base;
if (pcibus_info) {
switch (pcibus_info->pbi_bridge_type) {
case PCIBR_BRIDGETYPE_TIOCP:
__sn_setq_relaxed(&ptr->tio.cp_int_enable, bits);
break;
case PCIBR_BRIDGETYPE_PIC:
__sn_setq_relaxed(&ptr->pic.p_int_enable, bits);
break;
default:
panic
("pcireg_intr_enable_bit_set: unknown bridgetype bridge 0x%p",
ptr);
}
}
}
/*
* Intr Host Address Register (int_addr) -- Read/Write 0000_0130 - 0000_0168
*/
void pcireg_intr_addr_addr_set(struct pcibus_info *pcibus_info, int int_n,
u64 addr)
{
union br_ptr __iomem *ptr = (union br_ptr __iomem *)pcibus_info->pbi_buscommon.bs_base;
if (pcibus_info) {
switch (pcibus_info->pbi_bridge_type) {
case PCIBR_BRIDGETYPE_TIOCP:
__sn_clrq_relaxed(&ptr->tio.cp_int_addr[int_n],
TIOCP_HOST_INTR_ADDR);
__sn_setq_relaxed(&ptr->tio.cp_int_addr[int_n],
(addr & TIOCP_HOST_INTR_ADDR));
break;
case PCIBR_BRIDGETYPE_PIC:
__sn_clrq_relaxed(&ptr->pic.p_int_addr[int_n],
PIC_HOST_INTR_ADDR);
__sn_setq_relaxed(&ptr->pic.p_int_addr[int_n],
(addr & PIC_HOST_INTR_ADDR));
break;
default:
panic
("pcireg_intr_addr_addr_get: unknown bridgetype bridge 0x%p",
ptr);
}
}
}
/*
* Force Interrupt Register Access -- Write Only 0000_01C0 - 0000_01F8
*/
void pcireg_force_intr_set(struct pcibus_info *pcibus_info, int int_n)
{
union br_ptr __iomem *ptr = (union br_ptr __iomem *)pcibus_info->pbi_buscommon.bs_base;
if (pcibus_info) {
switch (pcibus_info->pbi_bridge_type) {
case PCIBR_BRIDGETYPE_TIOCP:
writeq(1, &ptr->tio.cp_force_pin[int_n]);
break;
case PCIBR_BRIDGETYPE_PIC:
writeq(1, &ptr->pic.p_force_pin[int_n]);
break;
default:
panic
("pcireg_force_intr_set: unknown bridgetype bridge 0x%p",
ptr);
}
}
}
/*
* Device(x) Write Buffer Flush Reg Access -- Read Only 0000_0240 - 0000_0258
*/
u64 pcireg_wrb_flush_get(struct pcibus_info *pcibus_info, int device)
{
union br_ptr __iomem *ptr = (union br_ptr __iomem *)pcibus_info->pbi_buscommon.bs_base;
u64 ret = 0;
if (pcibus_info) {
switch (pcibus_info->pbi_bridge_type) {
case PCIBR_BRIDGETYPE_TIOCP:
ret =
__sn_readq_relaxed(&ptr->tio.cp_wr_req_buf[device]);
break;
case PCIBR_BRIDGETYPE_PIC:
ret =
__sn_readq_relaxed(&ptr->pic.p_wr_req_buf[device]);
break;
default:
panic("pcireg_wrb_flush_get: unknown bridgetype bridge 0x%p", ptr);
}
}
/* Read of the Write Buffer Flush should always return zero */
return ret;
}
void pcireg_int_ate_set(struct pcibus_info *pcibus_info, int ate_index,
u64 val)
{
union br_ptr __iomem *ptr = (union br_ptr __iomem *)pcibus_info->pbi_buscommon.bs_base;
if (pcibus_info) {
switch (pcibus_info->pbi_bridge_type) {
case PCIBR_BRIDGETYPE_TIOCP:
writeq(val, &ptr->tio.cp_int_ate_ram[ate_index]);
break;
case PCIBR_BRIDGETYPE_PIC:
writeq(val, &ptr->pic.p_int_ate_ram[ate_index]);
break;
default:
panic
("pcireg_int_ate_set: unknown bridgetype bridge 0x%p",
ptr);
}
}
}
u64 __iomem *pcireg_int_ate_addr(struct pcibus_info *pcibus_info, int ate_index)
{
union br_ptr __iomem *ptr = (union br_ptr __iomem *)pcibus_info->pbi_buscommon.bs_base;
u64 __iomem *ret = NULL;
if (pcibus_info) {
switch (pcibus_info->pbi_bridge_type) {
case PCIBR_BRIDGETYPE_TIOCP:
ret = &ptr->tio.cp_int_ate_ram[ate_index];
break;
case PCIBR_BRIDGETYPE_PIC:
ret = &ptr->pic.p_int_ate_ram[ate_index];
break;
default:
panic
("pcireg_int_ate_addr: unknown bridgetype bridge 0x%p",
ptr);
}
}
return ret;
}

View file

@ -0,0 +1,677 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2003-2005 Silicon Graphics, Inc. All Rights Reserved.
*/
#include <linux/types.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
#include <linux/bitmap.h>
#include <linux/slab.h>
#include <linux/export.h>
#include <asm/sn/sn_sal.h>
#include <asm/sn/addrs.h>
#include <asm/sn/io.h>
#include <asm/sn/pcidev.h>
#include <asm/sn/pcibus_provider_defs.h>
#include <asm/sn/tioca_provider.h>
u32 tioca_gart_found;
EXPORT_SYMBOL(tioca_gart_found); /* used by agp-sgi */
LIST_HEAD(tioca_list);
EXPORT_SYMBOL(tioca_list); /* used by agp-sgi */
static int tioca_gart_init(struct tioca_kernel *);
/**
* tioca_gart_init - Initialize SGI TIOCA GART
* @tioca_common: ptr to common prom/kernel struct identifying the
*
* If the indicated tioca has devices present, initialize its associated
* GART MMR's and kernel memory.
*/
static int
tioca_gart_init(struct tioca_kernel *tioca_kern)
{
u64 ap_reg;
u64 offset;
struct page *tmp;
struct tioca_common *tioca_common;
struct tioca __iomem *ca_base;
tioca_common = tioca_kern->ca_common;
ca_base = (struct tioca __iomem *)tioca_common->ca_common.bs_base;
if (list_empty(tioca_kern->ca_devices))
return 0;
ap_reg = 0;
/*
* Validate aperature size
*/
switch (CA_APERATURE_SIZE >> 20) {
case 4:
ap_reg |= (0x3ff << CA_GART_AP_SIZE_SHFT); /* 4MB */
break;
case 8:
ap_reg |= (0x3fe << CA_GART_AP_SIZE_SHFT); /* 8MB */
break;
case 16:
ap_reg |= (0x3fc << CA_GART_AP_SIZE_SHFT); /* 16MB */
break;
case 32:
ap_reg |= (0x3f8 << CA_GART_AP_SIZE_SHFT); /* 32 MB */
break;
case 64:
ap_reg |= (0x3f0 << CA_GART_AP_SIZE_SHFT); /* 64 MB */
break;
case 128:
ap_reg |= (0x3e0 << CA_GART_AP_SIZE_SHFT); /* 128 MB */
break;
case 256:
ap_reg |= (0x3c0 << CA_GART_AP_SIZE_SHFT); /* 256 MB */
break;
case 512:
ap_reg |= (0x380 << CA_GART_AP_SIZE_SHFT); /* 512 MB */
break;
case 1024:
ap_reg |= (0x300 << CA_GART_AP_SIZE_SHFT); /* 1GB */
break;
case 2048:
ap_reg |= (0x200 << CA_GART_AP_SIZE_SHFT); /* 2GB */
break;
case 4096:
ap_reg |= (0x000 << CA_GART_AP_SIZE_SHFT); /* 4 GB */
break;
default:
printk(KERN_ERR "%s: Invalid CA_APERATURE_SIZE "
"0x%lx\n", __func__, (ulong) CA_APERATURE_SIZE);
return -1;
}
/*
* Set up other aperature parameters
*/
if (PAGE_SIZE >= 16384) {
tioca_kern->ca_ap_pagesize = 16384;
ap_reg |= CA_GART_PAGE_SIZE;
} else {
tioca_kern->ca_ap_pagesize = 4096;
}
tioca_kern->ca_ap_size = CA_APERATURE_SIZE;
tioca_kern->ca_ap_bus_base = CA_APERATURE_BASE;
tioca_kern->ca_gart_entries =
tioca_kern->ca_ap_size / tioca_kern->ca_ap_pagesize;
ap_reg |= (CA_GART_AP_ENB_AGP | CA_GART_AP_ENB_PCI);
ap_reg |= tioca_kern->ca_ap_bus_base;
/*
* Allocate and set up the GART
*/
tioca_kern->ca_gart_size = tioca_kern->ca_gart_entries * sizeof(u64);
tmp =
alloc_pages_node(tioca_kern->ca_closest_node,
GFP_KERNEL | __GFP_ZERO,
get_order(tioca_kern->ca_gart_size));
if (!tmp) {
printk(KERN_ERR "%s: Could not allocate "
"%llu bytes (order %d) for GART\n",
__func__,
tioca_kern->ca_gart_size,
get_order(tioca_kern->ca_gart_size));
return -ENOMEM;
}
tioca_kern->ca_gart = page_address(tmp);
tioca_kern->ca_gart_coretalk_addr =
PHYS_TO_TIODMA(virt_to_phys(tioca_kern->ca_gart));
/*
* Compute PCI/AGP convenience fields
*/
offset = CA_PCI32_MAPPED_BASE - CA_APERATURE_BASE;
tioca_kern->ca_pciap_base = CA_PCI32_MAPPED_BASE;
tioca_kern->ca_pciap_size = CA_PCI32_MAPPED_SIZE;
tioca_kern->ca_pcigart_start = offset / tioca_kern->ca_ap_pagesize;
tioca_kern->ca_pcigart_base =
tioca_kern->ca_gart_coretalk_addr + offset;
tioca_kern->ca_pcigart =
&tioca_kern->ca_gart[tioca_kern->ca_pcigart_start];
tioca_kern->ca_pcigart_entries =
tioca_kern->ca_pciap_size / tioca_kern->ca_ap_pagesize;
tioca_kern->ca_pcigart_pagemap =
kzalloc(tioca_kern->ca_pcigart_entries / 8, GFP_KERNEL);
if (!tioca_kern->ca_pcigart_pagemap) {
free_pages((unsigned long)tioca_kern->ca_gart,
get_order(tioca_kern->ca_gart_size));
return -1;
}
offset = CA_AGP_MAPPED_BASE - CA_APERATURE_BASE;
tioca_kern->ca_gfxap_base = CA_AGP_MAPPED_BASE;
tioca_kern->ca_gfxap_size = CA_AGP_MAPPED_SIZE;
tioca_kern->ca_gfxgart_start = offset / tioca_kern->ca_ap_pagesize;
tioca_kern->ca_gfxgart_base =
tioca_kern->ca_gart_coretalk_addr + offset;
tioca_kern->ca_gfxgart =
&tioca_kern->ca_gart[tioca_kern->ca_gfxgart_start];
tioca_kern->ca_gfxgart_entries =
tioca_kern->ca_gfxap_size / tioca_kern->ca_ap_pagesize;
/*
* various control settings:
* use agp op-combining
* use GET semantics to fetch memory
* participate in coherency domain
* DISABLE GART PREFETCHING due to hw bug tracked in SGI PV930029
*/
__sn_setq_relaxed(&ca_base->ca_control1,
CA_AGPDMA_OP_ENB_COMBDELAY); /* PV895469 ? */
__sn_clrq_relaxed(&ca_base->ca_control2, CA_GART_MEM_PARAM);
__sn_setq_relaxed(&ca_base->ca_control2,
(0x2ull << CA_GART_MEM_PARAM_SHFT));
tioca_kern->ca_gart_iscoherent = 1;
__sn_clrq_relaxed(&ca_base->ca_control2,
(CA_GART_WR_PREFETCH_ENB | CA_GART_RD_PREFETCH_ENB));
/*
* Unmask GART fetch error interrupts. Clear residual errors first.
*/
writeq(CA_GART_FETCH_ERR, &ca_base->ca_int_status_alias);
writeq(CA_GART_FETCH_ERR, &ca_base->ca_mult_error_alias);
__sn_clrq_relaxed(&ca_base->ca_int_mask, CA_GART_FETCH_ERR);
/*
* Program the aperature and gart registers in TIOCA
*/
writeq(ap_reg, &ca_base->ca_gart_aperature);
writeq(tioca_kern->ca_gart_coretalk_addr|1, &ca_base->ca_gart_ptr_table);
return 0;
}
/**
* tioca_fastwrite_enable - enable AGP FW for a tioca and its functions
* @tioca_kernel: structure representing the CA
*
* Given a CA, scan all attached functions making sure they all support
* FastWrite. If so, enable FastWrite for all functions and the CA itself.
*/
void
tioca_fastwrite_enable(struct tioca_kernel *tioca_kern)
{
int cap_ptr;
u32 reg;
struct tioca __iomem *tioca_base;
struct pci_dev *pdev;
struct tioca_common *common;
common = tioca_kern->ca_common;
/*
* Scan all vga controllers on this bus making sure they all
* support FW. If not, return.
*/
list_for_each_entry(pdev, tioca_kern->ca_devices, bus_list) {
if (pdev->class != (PCI_CLASS_DISPLAY_VGA << 8))
continue;
cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP);
if (!cap_ptr)
return; /* no AGP CAP means no FW */
pci_read_config_dword(pdev, cap_ptr + PCI_AGP_STATUS, &reg);
if (!(reg & PCI_AGP_STATUS_FW))
return; /* function doesn't support FW */
}
/*
* Set fw for all vga fn's
*/
list_for_each_entry(pdev, tioca_kern->ca_devices, bus_list) {
if (pdev->class != (PCI_CLASS_DISPLAY_VGA << 8))
continue;
cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP);
pci_read_config_dword(pdev, cap_ptr + PCI_AGP_COMMAND, &reg);
reg |= PCI_AGP_COMMAND_FW;
pci_write_config_dword(pdev, cap_ptr + PCI_AGP_COMMAND, reg);
}
/*
* Set ca's fw to match
*/
tioca_base = (struct tioca __iomem*)common->ca_common.bs_base;
__sn_setq_relaxed(&tioca_base->ca_control1, CA_AGP_FW_ENABLE);
}
EXPORT_SYMBOL(tioca_fastwrite_enable); /* used by agp-sgi */
/**
* tioca_dma_d64 - create a DMA mapping using 64-bit direct mode
* @paddr: system physical address
*
* Map @paddr into 64-bit CA bus space. No device context is necessary.
* Bits 53:0 come from the coretalk address. We just need to mask in the
* following optional bits of the 64-bit pci address:
*
* 63:60 - Coretalk Packet Type - 0x1 for Mem Get/Put (coherent)
* 0x2 for PIO (non-coherent)
* We will always use 0x1
* 55:55 - Swap bytes Currently unused
*/
static u64
tioca_dma_d64(unsigned long paddr)
{
dma_addr_t bus_addr;
bus_addr = PHYS_TO_TIODMA(paddr);
BUG_ON(!bus_addr);
BUG_ON(bus_addr >> 54);
/* Set upper nibble to Cache Coherent Memory op */
bus_addr |= (1UL << 60);
return bus_addr;
}
/**
* tioca_dma_d48 - create a DMA mapping using 48-bit direct mode
* @pdev: linux pci_dev representing the function
* @paddr: system physical address
*
* Map @paddr into 64-bit bus space of the CA associated with @pcidev_info.
*
* The CA agp 48 bit direct address falls out as follows:
*
* When direct mapping AGP addresses, the 48 bit AGP address is
* constructed as follows:
*
* [47:40] - Low 8 bits of the page Node ID extracted from coretalk
* address [47:40]. The upper 8 node bits are fixed
* and come from the xxx register bits [5:0]
* [39:38] - Chiplet ID extracted from coretalk address [39:38]
* [37:00] - node offset extracted from coretalk address [37:00]
*
* Since the node id in general will be non-zero, and the chiplet id
* will always be non-zero, it follows that the device must support
* a dma mask of at least 0xffffffffff (40 bits) to target node 0
* and in general should be 0xffffffffffff (48 bits) to target nodes
* up to 255. Nodes above 255 need the support of the xxx register,
* and so a given CA can only directly target nodes in the range
* xxx - xxx+255.
*/
static u64
tioca_dma_d48(struct pci_dev *pdev, u64 paddr)
{
struct tioca_common *tioca_common;
struct tioca __iomem *ca_base;
u64 ct_addr;
dma_addr_t bus_addr;
u32 node_upper;
u64 agp_dma_extn;
struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(pdev);
tioca_common = (struct tioca_common *)pcidev_info->pdi_pcibus_info;
ca_base = (struct tioca __iomem *)tioca_common->ca_common.bs_base;
ct_addr = PHYS_TO_TIODMA(paddr);
if (!ct_addr)
return 0;
bus_addr = (dma_addr_t) (ct_addr & 0xffffffffffffUL);
node_upper = ct_addr >> 48;
if (node_upper > 64) {
printk(KERN_ERR "%s: coretalk addr 0x%p node id out "
"of range\n", __func__, (void *)ct_addr);
return 0;
}
agp_dma_extn = __sn_readq_relaxed(&ca_base->ca_agp_dma_addr_extn);
if (node_upper != (agp_dma_extn >> CA_AGP_DMA_NODE_ID_SHFT)) {
printk(KERN_ERR "%s: coretalk upper node (%u) "
"mismatch with ca_agp_dma_addr_extn (%llu)\n",
__func__,
node_upper, (agp_dma_extn >> CA_AGP_DMA_NODE_ID_SHFT));
return 0;
}
return bus_addr;
}
/**
* tioca_dma_mapped - create a DMA mapping using a CA GART
* @pdev: linux pci_dev representing the function
* @paddr: host physical address to map
* @req_size: len (bytes) to map
*
* Map @paddr into CA address space using the GART mechanism. The mapped
* dma_addr_t is guaranteed to be contiguous in CA bus space.
*/
static dma_addr_t
tioca_dma_mapped(struct pci_dev *pdev, unsigned long paddr, size_t req_size)
{
int ps, ps_shift, entry, entries, mapsize;
u64 xio_addr, end_xio_addr;
struct tioca_common *tioca_common;
struct tioca_kernel *tioca_kern;
dma_addr_t bus_addr = 0;
struct tioca_dmamap *ca_dmamap;
void *map;
unsigned long flags;
struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(pdev);
tioca_common = (struct tioca_common *)pcidev_info->pdi_pcibus_info;
tioca_kern = (struct tioca_kernel *)tioca_common->ca_kernel_private;
xio_addr = PHYS_TO_TIODMA(paddr);
if (!xio_addr)
return 0;
spin_lock_irqsave(&tioca_kern->ca_lock, flags);
/*
* allocate a map struct
*/
ca_dmamap = kzalloc(sizeof(struct tioca_dmamap), GFP_ATOMIC);
if (!ca_dmamap)
goto map_return;
/*
* Locate free entries that can hold req_size. Account for
* unaligned start/length when allocating.
*/
ps = tioca_kern->ca_ap_pagesize; /* will be power of 2 */
ps_shift = ffs(ps) - 1;
end_xio_addr = xio_addr + req_size - 1;
entries = (end_xio_addr >> ps_shift) - (xio_addr >> ps_shift) + 1;
map = tioca_kern->ca_pcigart_pagemap;
mapsize = tioca_kern->ca_pcigart_entries;
entry = bitmap_find_next_zero_area(map, mapsize, 0, entries, 0);
if (entry >= mapsize) {
kfree(ca_dmamap);
goto map_return;
}
bitmap_set(map, entry, entries);
bus_addr = tioca_kern->ca_pciap_base + (entry * ps);
ca_dmamap->cad_dma_addr = bus_addr;
ca_dmamap->cad_gart_size = entries;
ca_dmamap->cad_gart_entry = entry;
list_add(&ca_dmamap->cad_list, &tioca_kern->ca_dmamaps);
if (xio_addr % ps) {
tioca_kern->ca_pcigart[entry] = tioca_paddr_to_gart(xio_addr);
bus_addr += xio_addr & (ps - 1);
xio_addr &= ~(ps - 1);
xio_addr += ps;
entry++;
}
while (xio_addr < end_xio_addr) {
tioca_kern->ca_pcigart[entry] = tioca_paddr_to_gart(xio_addr);
xio_addr += ps;
entry++;
}
tioca_tlbflush(tioca_kern);
map_return:
spin_unlock_irqrestore(&tioca_kern->ca_lock, flags);
return bus_addr;
}
/**
* tioca_dma_unmap - release CA mapping resources
* @pdev: linux pci_dev representing the function
* @bus_addr: bus address returned by an earlier tioca_dma_map
* @dir: mapping direction (unused)
*
* Locate mapping resources associated with @bus_addr and release them.
* For mappings created using the direct modes (64 or 48) there are no
* resources to release.
*/
static void
tioca_dma_unmap(struct pci_dev *pdev, dma_addr_t bus_addr, int dir)
{
int i, entry;
struct tioca_common *tioca_common;
struct tioca_kernel *tioca_kern;
struct tioca_dmamap *map;
struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(pdev);
unsigned long flags;
tioca_common = (struct tioca_common *)pcidev_info->pdi_pcibus_info;
tioca_kern = (struct tioca_kernel *)tioca_common->ca_kernel_private;
/* return straight away if this isn't be a mapped address */
if (bus_addr < tioca_kern->ca_pciap_base ||
bus_addr >= (tioca_kern->ca_pciap_base + tioca_kern->ca_pciap_size))
return;
spin_lock_irqsave(&tioca_kern->ca_lock, flags);
list_for_each_entry(map, &tioca_kern->ca_dmamaps, cad_list)
if (map->cad_dma_addr == bus_addr)
break;
BUG_ON(map == NULL);
entry = map->cad_gart_entry;
for (i = 0; i < map->cad_gart_size; i++, entry++) {
clear_bit(entry, tioca_kern->ca_pcigart_pagemap);
tioca_kern->ca_pcigart[entry] = 0;
}
tioca_tlbflush(tioca_kern);
list_del(&map->cad_list);
spin_unlock_irqrestore(&tioca_kern->ca_lock, flags);
kfree(map);
}
/**
* tioca_dma_map - map pages for PCI DMA
* @pdev: linux pci_dev representing the function
* @paddr: host physical address to map
* @byte_count: bytes to map
*
* This is the main wrapper for mapping host physical pages to CA PCI space.
* The mapping mode used is based on the devices dma_mask. As a last resort
* use the GART mapped mode.
*/
static u64
tioca_dma_map(struct pci_dev *pdev, unsigned long paddr, size_t byte_count, int dma_flags)
{
u64 mapaddr;
/*
* Not supported for now ...
*/
if (dma_flags & SN_DMA_MSI)
return 0;
/*
* If card is 64 or 48 bit addressable, use a direct mapping. 32
* bit direct is so restrictive w.r.t. where the memory resides that
* we don't use it even though CA has some support.
*/
if (pdev->dma_mask == ~0UL)
mapaddr = tioca_dma_d64(paddr);
else if (pdev->dma_mask == 0xffffffffffffUL)
mapaddr = tioca_dma_d48(pdev, paddr);
else
mapaddr = 0;
/* Last resort ... use PCI portion of CA GART */
if (mapaddr == 0)
mapaddr = tioca_dma_mapped(pdev, paddr, byte_count);
return mapaddr;
}
/**
* tioca_error_intr_handler - SGI TIO CA error interrupt handler
* @irq: unused
* @arg: pointer to tioca_common struct for the given CA
*
* Handle a CA error interrupt. Simply a wrapper around a SAL call which
* defers processing to the SGI prom.
*/
static irqreturn_t
tioca_error_intr_handler(int irq, void *arg)
{
struct tioca_common *soft = arg;
struct ia64_sal_retval ret_stuff;
u64 segment;
u64 busnum;
ret_stuff.status = 0;
ret_stuff.v0 = 0;
segment = soft->ca_common.bs_persist_segment;
busnum = soft->ca_common.bs_persist_busnum;
SAL_CALL_NOLOCK(ret_stuff,
(u64) SN_SAL_IOIF_ERROR_INTERRUPT,
segment, busnum, 0, 0, 0, 0, 0);
return IRQ_HANDLED;
}
/**
* tioca_bus_fixup - perform final PCI fixup for a TIO CA bus
* @prom_bussoft: Common prom/kernel struct representing the bus
*
* Replicates the tioca_common pointed to by @prom_bussoft in kernel
* space. Allocates and initializes a kernel-only area for a given CA,
* and sets up an irq for handling CA error interrupts.
*
* On successful setup, returns the kernel version of tioca_common back to
* the caller.
*/
static void *
tioca_bus_fixup(struct pcibus_bussoft *prom_bussoft, struct pci_controller *controller)
{
struct tioca_common *tioca_common;
struct tioca_kernel *tioca_kern;
struct pci_bus *bus;
/* sanity check prom rev */
if (is_shub1() && sn_sal_rev() < 0x0406) {
printk
(KERN_ERR "%s: SGI prom rev 4.06 or greater required "
"for tioca support\n", __func__);
return NULL;
}
/*
* Allocate kernel bus soft and copy from prom.
*/
tioca_common = kmemdup(prom_bussoft, sizeof(struct tioca_common),
GFP_KERNEL);
if (!tioca_common)
return NULL;
tioca_common->ca_common.bs_base = (unsigned long)
ioremap(REGION_OFFSET(tioca_common->ca_common.bs_base),
sizeof(struct tioca_common));
/* init kernel-private area */
tioca_kern = kzalloc(sizeof(struct tioca_kernel), GFP_KERNEL);
if (!tioca_kern) {
kfree(tioca_common);
return NULL;
}
tioca_kern->ca_common = tioca_common;
spin_lock_init(&tioca_kern->ca_lock);
INIT_LIST_HEAD(&tioca_kern->ca_dmamaps);
tioca_kern->ca_closest_node =
nasid_to_cnodeid(tioca_common->ca_closest_nasid);
tioca_common->ca_kernel_private = (u64) tioca_kern;
bus = pci_find_bus(tioca_common->ca_common.bs_persist_segment,
tioca_common->ca_common.bs_persist_busnum);
BUG_ON(!bus);
tioca_kern->ca_devices = &bus->devices;
/* init GART */
if (tioca_gart_init(tioca_kern) < 0) {
kfree(tioca_kern);
kfree(tioca_common);
return NULL;
}
tioca_gart_found++;
list_add(&tioca_kern->ca_list, &tioca_list);
if (request_irq(SGI_TIOCA_ERROR,
tioca_error_intr_handler,
IRQF_SHARED, "TIOCA error", (void *)tioca_common))
printk(KERN_WARNING
"%s: Unable to get irq %d. "
"Error interrupts won't be routed for TIOCA bus %d\n",
__func__, SGI_TIOCA_ERROR,
(int)tioca_common->ca_common.bs_persist_busnum);
irq_set_handler(SGI_TIOCA_ERROR, handle_level_irq);
sn_set_err_irq_affinity(SGI_TIOCA_ERROR);
/* Setup locality information */
controller->node = tioca_kern->ca_closest_node;
return tioca_common;
}
static struct sn_pcibus_provider tioca_pci_interfaces = {
.dma_map = tioca_dma_map,
.dma_map_consistent = tioca_dma_map,
.dma_unmap = tioca_dma_unmap,
.bus_fixup = tioca_bus_fixup,
.force_interrupt = NULL,
.target_interrupt = NULL
};
/**
* tioca_init_provider - init SN PCI provider ops for TIO CA
*/
int
tioca_init_provider(void)
{
sn_pci_provider[PCIIO_ASIC_TYPE_TIOCA] = &tioca_pci_interfaces;
return 0;
}

File diff suppressed because it is too large Load diff