Fixed MTP to work with TWRP

This commit is contained in:
awab228 2018-06-19 23:16:04 +02:00
commit f6dfaef42e
50820 changed files with 20846062 additions and 0 deletions

162
drivers/char/agp/Kconfig Normal file
View file

@ -0,0 +1,162 @@
menuconfig AGP
tristate "/dev/agpgart (AGP Support)"
depends on ALPHA || IA64 || PARISC || PPC || X86
depends on PCI
---help---
AGP (Accelerated Graphics Port) is a bus system mainly used to
connect graphics cards to the rest of the system.
If you have an AGP system and you say Y here, it will be possible to
use the AGP features of your 3D rendering video card. This code acts
as a sort of "AGP driver" for the motherboard's chipset.
If you need more texture memory than you can get with the AGP GART
(theoretically up to 256 MB, but in practice usually 64 or 128 MB
due to kernel allocation issues), you could use PCI accesses
and have up to a couple gigs of texture space.
Note that this is the only means to have X/GLX use
write-combining with MTRR support on the AGP bus. Without it, OpenGL
direct rendering will be a lot slower but still faster than PIO.
To compile this driver as a module, choose M here: the
module will be called agpgart.
You should say Y here if you want to use GLX or DRI.
If unsure, say N.
config AGP_ALI
tristate "ALI chipset support"
depends on AGP && X86_32
---help---
This option gives you AGP support for the GLX component of
X on the following ALi chipsets. The supported chipsets
include M1541, M1621, M1631, M1632, M1641,M1647,and M1651.
For the ALi-chipset question, ALi suggests you refer to
<http://www.ali.com.tw/>.
The M1541 chipset can do AGP 1x and 2x, but note that there is an
acknowledged incompatibility with Matrox G200 cards. Due to
timing issues, this chipset cannot do AGP 2x with the G200.
This is a hardware limitation. AGP 1x seems to be fine, though.
config AGP_ATI
tristate "ATI chipset support"
depends on AGP && X86_32
---help---
This option gives you AGP support for the GLX component of
X on the ATI RadeonIGP family of chipsets.
config AGP_AMD
tristate "AMD Irongate, 761, and 762 chipset support"
depends on AGP && X86_32
help
This option gives you AGP support for the GLX component of
X on AMD Irongate, 761, and 762 chipsets.
config AGP_AMD64
tristate "AMD Opteron/Athlon64 on-CPU GART support"
depends on AGP && X86 && AMD_NB
help
This option gives you AGP support for the GLX component of
X using the on-CPU northbridge of the AMD Athlon64/Opteron CPUs.
You still need an external AGP bridge like the AMD 8151, VIA
K8T400M, SiS755. It may also support other AGP bridges when loaded
with agp_try_unsupported=1.
config AGP_INTEL
tristate "Intel 440LX/BX/GX, I8xx and E7x05 chipset support"
depends on AGP && X86
select INTEL_GTT
help
This option gives you AGP support for the GLX component of X
on Intel 440LX/BX/GX, 815, 820, 830, 840, 845, 850, 860, 875,
E7205 and E7505 chipsets and full support for the 810, 815, 830M,
845G, 852GM, 855GM, 865G and I915 integrated graphics chipsets.
config AGP_NVIDIA
tristate "NVIDIA nForce/nForce2 chipset support"
depends on AGP && X86_32
help
This option gives you AGP support for the GLX component of
X on NVIDIA chipsets including nForce and nForce2
config AGP_SIS
tristate "SiS chipset support"
depends on AGP && X86
help
This option gives you AGP support for the GLX component of
X on Silicon Integrated Systems [SiS] chipsets.
Note that 5591/5592 AGP chipsets are NOT supported.
config AGP_SWORKS
tristate "Serverworks LE/HE chipset support"
depends on AGP && X86_32
help
Say Y here to support the Serverworks AGP card. See
<http://www.serverworks.com/> for product descriptions and images.
config AGP_VIA
tristate "VIA chipset support"
depends on AGP && X86
help
This option gives you AGP support for the GLX component of
X on VIA MVP3/Apollo Pro chipsets.
config AGP_I460
tristate "Intel 460GX chipset support"
depends on AGP && (IA64_DIG || IA64_GENERIC)
help
This option gives you AGP GART support for the Intel 460GX chipset
for IA64 processors.
config AGP_HP_ZX1
tristate "HP ZX1 chipset AGP support"
depends on AGP && (IA64_HP_ZX1 || IA64_HP_ZX1_SWIOTLB || IA64_GENERIC)
help
This option gives you AGP GART support for the HP ZX1 chipset
for IA64 processors.
config AGP_PARISC
tristate "HP Quicksilver AGP support"
depends on AGP && PARISC && 64BIT
help
This option gives you AGP GART support for the HP Quicksilver
AGP bus adapter on HP PA-RISC machines (Ok, just on the C8000
workstation...)
config AGP_ALPHA_CORE
tristate "Alpha AGP support"
depends on AGP && (ALPHA_GENERIC || ALPHA_TITAN || ALPHA_MARVEL)
default AGP
config AGP_UNINORTH
tristate "Apple UniNorth & U3 AGP support"
depends on AGP && PPC_PMAC
help
This option gives you AGP support for Apple machines with a
UniNorth or U3 (Apple G5) bridge.
config AGP_EFFICEON
tristate "Transmeta Efficeon support"
depends on AGP && X86_32
help
This option gives you AGP support for the Transmeta Efficeon
series processors with integrated northbridges.
config AGP_SGI_TIOCA
tristate "SGI TIO chipset AGP support"
depends on AGP && (IA64_SGI_SN2 || IA64_GENERIC)
help
This option gives you AGP GART support for the SGI TIO chipset
for IA64 processors.
config INTEL_GTT
tristate
depends on X86 && PCI

22
drivers/char/agp/Makefile Normal file
View file

@ -0,0 +1,22 @@
agpgart-y := backend.o frontend.o generic.o isoch.o
agpgart-$(CONFIG_COMPAT) += compat_ioctl.o
obj-$(CONFIG_AGP) += agpgart.o
obj-$(CONFIG_AGP_ALI) += ali-agp.o
obj-$(CONFIG_AGP_ATI) += ati-agp.o
obj-$(CONFIG_AGP_AMD) += amd-k7-agp.o
obj-$(CONFIG_AGP_AMD64) += amd64-agp.o
obj-$(CONFIG_AGP_ALPHA_CORE) += alpha-agp.o
obj-$(CONFIG_AGP_EFFICEON) += efficeon-agp.o
obj-$(CONFIG_AGP_HP_ZX1) += hp-agp.o
obj-$(CONFIG_AGP_PARISC) += parisc-agp.o
obj-$(CONFIG_AGP_I460) += i460-agp.o
obj-$(CONFIG_AGP_INTEL) += intel-agp.o
obj-$(CONFIG_INTEL_GTT) += intel-gtt.o
obj-$(CONFIG_AGP_NVIDIA) += nvidia-agp.o
obj-$(CONFIG_AGP_SGI_TIOCA) += sgi-agp.o
obj-$(CONFIG_AGP_SIS) += sis-agp.o
obj-$(CONFIG_AGP_SWORKS) += sworks-agp.o
obj-$(CONFIG_AGP_UNINORTH) += uninorth-agp.o
obj-$(CONFIG_AGP_VIA) += via-agp.o

287
drivers/char/agp/agp.h Normal file
View file

@ -0,0 +1,287 @@
/*
* AGPGART
* Copyright (C) 2004 Silicon Graphics, Inc.
* Copyright (C) 2002-2004 Dave Jones
* Copyright (C) 1999 Jeff Hartmann
* Copyright (C) 1999 Precision Insight, Inc.
* Copyright (C) 1999 Xi Graphics, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included
* in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* JEFF HARTMANN, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
* OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
#ifndef _AGP_BACKEND_PRIV_H
#define _AGP_BACKEND_PRIV_H 1
#include <asm/agp.h> /* for flush_agp_cache() */
#define PFX "agpgart: "
//#define AGP_DEBUG 1
#ifdef AGP_DEBUG
#define DBG(x,y...) printk (KERN_DEBUG PFX "%s: " x "\n", __func__ , ## y)
#else
#define DBG(x,y...) do { } while (0)
#endif
extern struct agp_bridge_data *agp_bridge;
enum aper_size_type {
U8_APER_SIZE,
U16_APER_SIZE,
U32_APER_SIZE,
LVL2_APER_SIZE,
FIXED_APER_SIZE
};
struct gatt_mask {
unsigned long mask;
u32 type;
/* totally device specific, for integrated chipsets that
* might have different types of memory masks. For other
* devices this will probably be ignored */
};
#define AGP_PAGE_DESTROY_UNMAP 1
#define AGP_PAGE_DESTROY_FREE 2
struct aper_size_info_8 {
int size;
int num_entries;
int page_order;
u8 size_value;
};
struct aper_size_info_16 {
int size;
int num_entries;
int page_order;
u16 size_value;
};
struct aper_size_info_32 {
int size;
int num_entries;
int page_order;
u32 size_value;
};
struct aper_size_info_lvl2 {
int size;
int num_entries;
u32 size_value;
};
struct aper_size_info_fixed {
int size;
int num_entries;
int page_order;
};
struct agp_bridge_driver {
struct module *owner;
const void *aperture_sizes;
int num_aperture_sizes;
enum aper_size_type size_type;
bool cant_use_aperture;
bool needs_scratch_page;
const struct gatt_mask *masks;
int (*fetch_size)(void);
int (*configure)(void);
void (*agp_enable)(struct agp_bridge_data *, u32);
void (*cleanup)(void);
void (*tlb_flush)(struct agp_memory *);
unsigned long (*mask_memory)(struct agp_bridge_data *, dma_addr_t, int);
void (*cache_flush)(void);
int (*create_gatt_table)(struct agp_bridge_data *);
int (*free_gatt_table)(struct agp_bridge_data *);
int (*insert_memory)(struct agp_memory *, off_t, int);
int (*remove_memory)(struct agp_memory *, off_t, int);
struct agp_memory *(*alloc_by_type) (size_t, int);
void (*free_by_type)(struct agp_memory *);
struct page *(*agp_alloc_page)(struct agp_bridge_data *);
int (*agp_alloc_pages)(struct agp_bridge_data *, struct agp_memory *, size_t);
void (*agp_destroy_page)(struct page *, int flags);
void (*agp_destroy_pages)(struct agp_memory *);
int (*agp_type_to_mask_type) (struct agp_bridge_data *, int);
};
struct agp_bridge_data {
const struct agp_version *version;
const struct agp_bridge_driver *driver;
const struct vm_operations_struct *vm_ops;
void *previous_size;
void *current_size;
void *dev_private_data;
struct pci_dev *dev;
u32 __iomem *gatt_table;
u32 *gatt_table_real;
unsigned long scratch_page;
struct page *scratch_page_page;
dma_addr_t scratch_page_dma;
unsigned long gart_bus_addr;
unsigned long gatt_bus_addr;
u32 mode;
enum chipset_type type;
unsigned long *key_list;
atomic_t current_memory_agp;
atomic_t agp_in_use;
int max_memory_agp; /* in number of pages */
int aperture_size_idx;
int capndx;
int flags;
char major_version;
char minor_version;
struct list_head list;
u32 apbase_config;
/* list of agp_memory mapped to the aperture */
struct list_head mapped_list;
spinlock_t mapped_lock;
};
#define KB(x) ((x) * 1024)
#define MB(x) (KB (KB (x)))
#define GB(x) (MB (KB (x)))
#define A_SIZE_8(x) ((struct aper_size_info_8 *) x)
#define A_SIZE_16(x) ((struct aper_size_info_16 *) x)
#define A_SIZE_32(x) ((struct aper_size_info_32 *) x)
#define A_SIZE_LVL2(x) ((struct aper_size_info_lvl2 *) x)
#define A_SIZE_FIX(x) ((struct aper_size_info_fixed *) x)
#define A_IDX8(bridge) (A_SIZE_8((bridge)->driver->aperture_sizes) + i)
#define A_IDX16(bridge) (A_SIZE_16((bridge)->driver->aperture_sizes) + i)
#define A_IDX32(bridge) (A_SIZE_32((bridge)->driver->aperture_sizes) + i)
#define MAXKEY (4096 * 32)
#define PGE_EMPTY(b, p) (!(p) || (p) == (unsigned long) (b)->scratch_page)
struct agp_device_ids {
unsigned short device_id; /* first, to make table easier to read */
enum chipset_type chipset;
const char *chipset_name;
int (*chipset_setup) (struct pci_dev *pdev); /* used to override generic */
};
/* Driver registration */
struct agp_bridge_data *agp_alloc_bridge(void);
void agp_put_bridge(struct agp_bridge_data *bridge);
int agp_add_bridge(struct agp_bridge_data *bridge);
void agp_remove_bridge(struct agp_bridge_data *bridge);
/* Frontend routines. */
int agp_frontend_initialize(void);
void agp_frontend_cleanup(void);
/* Generic routines. */
void agp_generic_enable(struct agp_bridge_data *bridge, u32 mode);
int agp_generic_create_gatt_table(struct agp_bridge_data *bridge);
int agp_generic_free_gatt_table(struct agp_bridge_data *bridge);
struct agp_memory *agp_create_memory(int scratch_pages);
int agp_generic_insert_memory(struct agp_memory *mem, off_t pg_start, int type);
int agp_generic_remove_memory(struct agp_memory *mem, off_t pg_start, int type);
struct agp_memory *agp_generic_alloc_by_type(size_t page_count, int type);
void agp_generic_free_by_type(struct agp_memory *curr);
struct page *agp_generic_alloc_page(struct agp_bridge_data *bridge);
int agp_generic_alloc_pages(struct agp_bridge_data *agp_bridge,
struct agp_memory *memory, size_t page_count);
void agp_generic_destroy_page(struct page *page, int flags);
void agp_generic_destroy_pages(struct agp_memory *memory);
void agp_free_key(int key);
int agp_num_entries(void);
u32 agp_collect_device_status(struct agp_bridge_data *bridge, u32 mode, u32 command);
void agp_device_command(u32 command, bool agp_v3);
int agp_3_5_enable(struct agp_bridge_data *bridge);
void global_cache_flush(void);
void get_agp_version(struct agp_bridge_data *bridge);
unsigned long agp_generic_mask_memory(struct agp_bridge_data *bridge,
dma_addr_t phys, int type);
int agp_generic_type_to_mask_type(struct agp_bridge_data *bridge,
int type);
struct agp_bridge_data *agp_generic_find_bridge(struct pci_dev *pdev);
/* generic functions for user-populated AGP memory types */
struct agp_memory *agp_generic_alloc_user(size_t page_count, int type);
void agp_alloc_page_array(size_t size, struct agp_memory *mem);
void agp_free_page_array(struct agp_memory *mem);
/* generic routines for agp>=3 */
int agp3_generic_fetch_size(void);
void agp3_generic_tlbflush(struct agp_memory *mem);
int agp3_generic_configure(void);
void agp3_generic_cleanup(void);
/* aperture sizes have been standardised since v3 */
#define AGP_GENERIC_SIZES_ENTRIES 11
extern const struct aper_size_info_16 agp3_generic_sizes[];
extern int agp_off;
extern int agp_try_unsupported_boot;
long compat_agp_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
/* Chipset independent registers (from AGP Spec) */
#define AGP_APBASE 0x10
#define AGP_APERTURE_BAR 0
#define AGPSTAT 0x4
#define AGPCMD 0x8
#define AGPNISTAT 0xc
#define AGPCTRL 0x10
#define AGPAPSIZE 0x14
#define AGPNEPG 0x16
#define AGPGARTLO 0x18
#define AGPGARTHI 0x1c
#define AGPNICMD 0x20
#define AGP_MAJOR_VERSION_SHIFT (20)
#define AGP_MINOR_VERSION_SHIFT (16)
#define AGPSTAT_RQ_DEPTH (0xff000000)
#define AGPSTAT_RQ_DEPTH_SHIFT 24
#define AGPSTAT_CAL_MASK (1<<12|1<<11|1<<10)
#define AGPSTAT_ARQSZ (1<<15|1<<14|1<<13)
#define AGPSTAT_ARQSZ_SHIFT 13
#define AGPSTAT_SBA (1<<9)
#define AGPSTAT_AGP_ENABLE (1<<8)
#define AGPSTAT_FW (1<<4)
#define AGPSTAT_MODE_3_0 (1<<3)
#define AGPSTAT2_1X (1<<0)
#define AGPSTAT2_2X (1<<1)
#define AGPSTAT2_4X (1<<2)
#define AGPSTAT3_RSVD (1<<2)
#define AGPSTAT3_8X (1<<1)
#define AGPSTAT3_4X (1)
#define AGPCTRL_APERENB (1<<8)
#define AGPCTRL_GTLBEN (1<<7)
#define AGP2_RESERVED_MASK 0x00fffcc8
#define AGP3_RESERVED_MASK 0x00ff00c4
#define AGP_ERRATA_FASTWRITES 1<<0
#define AGP_ERRATA_SBA 1<<1
#define AGP_ERRATA_1X 1<<2
#endif /* _AGP_BACKEND_PRIV_H */

422
drivers/char/agp/ali-agp.c Normal file
View file

@ -0,0 +1,422 @@
/*
* ALi AGPGART routines.
*/
#include <linux/types.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/agp_backend.h>
#include <asm/page.h> /* PAGE_SIZE */
#include "agp.h"
#define ALI_AGPCTRL 0xb8
#define ALI_ATTBASE 0xbc
#define ALI_TLBCTRL 0xc0
#define ALI_TAGCTRL 0xc4
#define ALI_CACHE_FLUSH_CTRL 0xD0
#define ALI_CACHE_FLUSH_ADDR_MASK 0xFFFFF000
#define ALI_CACHE_FLUSH_EN 0x100
static int ali_fetch_size(void)
{
int i;
u32 temp;
struct aper_size_info_32 *values;
pci_read_config_dword(agp_bridge->dev, ALI_ATTBASE, &temp);
temp &= ~(0xfffffff0);
values = A_SIZE_32(agp_bridge->driver->aperture_sizes);
for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) {
if (temp == values[i].size_value) {
agp_bridge->previous_size =
agp_bridge->current_size = (void *) (values + i);
agp_bridge->aperture_size_idx = i;
return values[i].size;
}
}
return 0;
}
static void ali_tlbflush(struct agp_memory *mem)
{
u32 temp;
pci_read_config_dword(agp_bridge->dev, ALI_TLBCTRL, &temp);
temp &= 0xfffffff0;
temp |= (1<<0 | 1<<1);
pci_write_config_dword(agp_bridge->dev, ALI_TAGCTRL, temp);
}
static void ali_cleanup(void)
{
struct aper_size_info_32 *previous_size;
u32 temp;
previous_size = A_SIZE_32(agp_bridge->previous_size);
pci_read_config_dword(agp_bridge->dev, ALI_TLBCTRL, &temp);
// clear tag
pci_write_config_dword(agp_bridge->dev, ALI_TAGCTRL,
((temp & 0xffffff00) | 0x00000001|0x00000002));
pci_read_config_dword(agp_bridge->dev, ALI_ATTBASE, &temp);
pci_write_config_dword(agp_bridge->dev, ALI_ATTBASE,
((temp & 0x00000ff0) | previous_size->size_value));
}
static int ali_configure(void)
{
u32 temp;
struct aper_size_info_32 *current_size;
current_size = A_SIZE_32(agp_bridge->current_size);
/* aperture size and gatt addr */
pci_read_config_dword(agp_bridge->dev, ALI_ATTBASE, &temp);
temp = (((temp & 0x00000ff0) | (agp_bridge->gatt_bus_addr & 0xfffff000))
| (current_size->size_value & 0xf));
pci_write_config_dword(agp_bridge->dev, ALI_ATTBASE, temp);
/* tlb control */
pci_read_config_dword(agp_bridge->dev, ALI_TLBCTRL, &temp);
pci_write_config_dword(agp_bridge->dev, ALI_TLBCTRL, ((temp & 0xffffff00) | 0x00000010));
/* address to map to */
agp_bridge->gart_bus_addr = pci_bus_address(agp_bridge->dev,
AGP_APERTURE_BAR);
#if 0
if (agp_bridge->type == ALI_M1541) {
u32 nlvm_addr = 0;
switch (current_size->size_value) {
case 0: break;
case 1: nlvm_addr = 0x100000;break;
case 2: nlvm_addr = 0x200000;break;
case 3: nlvm_addr = 0x400000;break;
case 4: nlvm_addr = 0x800000;break;
case 6: nlvm_addr = 0x1000000;break;
case 7: nlvm_addr = 0x2000000;break;
case 8: nlvm_addr = 0x4000000;break;
case 9: nlvm_addr = 0x8000000;break;
case 10: nlvm_addr = 0x10000000;break;
default: break;
}
nlvm_addr--;
nlvm_addr&=0xfff00000;
nlvm_addr+= agp_bridge->gart_bus_addr;
nlvm_addr|=(agp_bridge->gart_bus_addr>>12);
dev_info(&agp_bridge->dev->dev, "nlvm top &base = %8x\n",
nlvm_addr);
}
#endif
pci_read_config_dword(agp_bridge->dev, ALI_TLBCTRL, &temp);
temp &= 0xffffff7f; //enable TLB
pci_write_config_dword(agp_bridge->dev, ALI_TLBCTRL, temp);
return 0;
}
static void m1541_cache_flush(void)
{
int i, page_count;
u32 temp;
global_cache_flush();
page_count = 1 << A_SIZE_32(agp_bridge->current_size)->page_order;
for (i = 0; i < PAGE_SIZE * page_count; i += PAGE_SIZE) {
pci_read_config_dword(agp_bridge->dev, ALI_CACHE_FLUSH_CTRL,
&temp);
pci_write_config_dword(agp_bridge->dev, ALI_CACHE_FLUSH_CTRL,
(((temp & ALI_CACHE_FLUSH_ADDR_MASK) |
(agp_bridge->gatt_bus_addr + i)) |
ALI_CACHE_FLUSH_EN));
}
}
static struct page *m1541_alloc_page(struct agp_bridge_data *bridge)
{
struct page *page = agp_generic_alloc_page(agp_bridge);
u32 temp;
if (!page)
return NULL;
pci_read_config_dword(agp_bridge->dev, ALI_CACHE_FLUSH_CTRL, &temp);
pci_write_config_dword(agp_bridge->dev, ALI_CACHE_FLUSH_CTRL,
(((temp & ALI_CACHE_FLUSH_ADDR_MASK) |
page_to_phys(page)) | ALI_CACHE_FLUSH_EN ));
return page;
}
static void ali_destroy_page(struct page *page, int flags)
{
if (page) {
if (flags & AGP_PAGE_DESTROY_UNMAP) {
global_cache_flush(); /* is this really needed? --hch */
agp_generic_destroy_page(page, flags);
} else
agp_generic_destroy_page(page, flags);
}
}
static void m1541_destroy_page(struct page *page, int flags)
{
u32 temp;
if (page == NULL)
return;
if (flags & AGP_PAGE_DESTROY_UNMAP) {
global_cache_flush();
pci_read_config_dword(agp_bridge->dev, ALI_CACHE_FLUSH_CTRL, &temp);
pci_write_config_dword(agp_bridge->dev, ALI_CACHE_FLUSH_CTRL,
(((temp & ALI_CACHE_FLUSH_ADDR_MASK) |
page_to_phys(page)) | ALI_CACHE_FLUSH_EN));
}
agp_generic_destroy_page(page, flags);
}
/* Setup function */
static const struct aper_size_info_32 ali_generic_sizes[7] =
{
{256, 65536, 6, 10},
{128, 32768, 5, 9},
{64, 16384, 4, 8},
{32, 8192, 3, 7},
{16, 4096, 2, 6},
{8, 2048, 1, 4},
{4, 1024, 0, 3}
};
static const struct agp_bridge_driver ali_generic_bridge = {
.owner = THIS_MODULE,
.aperture_sizes = ali_generic_sizes,
.size_type = U32_APER_SIZE,
.num_aperture_sizes = 7,
.needs_scratch_page = true,
.configure = ali_configure,
.fetch_size = ali_fetch_size,
.cleanup = ali_cleanup,
.tlb_flush = ali_tlbflush,
.mask_memory = agp_generic_mask_memory,
.masks = NULL,
.agp_enable = agp_generic_enable,
.cache_flush = global_cache_flush,
.create_gatt_table = agp_generic_create_gatt_table,
.free_gatt_table = agp_generic_free_gatt_table,
.insert_memory = agp_generic_insert_memory,
.remove_memory = agp_generic_remove_memory,
.alloc_by_type = agp_generic_alloc_by_type,
.free_by_type = agp_generic_free_by_type,
.agp_alloc_page = agp_generic_alloc_page,
.agp_destroy_page = ali_destroy_page,
.agp_type_to_mask_type = agp_generic_type_to_mask_type,
};
static const struct agp_bridge_driver ali_m1541_bridge = {
.owner = THIS_MODULE,
.aperture_sizes = ali_generic_sizes,
.size_type = U32_APER_SIZE,
.num_aperture_sizes = 7,
.configure = ali_configure,
.fetch_size = ali_fetch_size,
.cleanup = ali_cleanup,
.tlb_flush = ali_tlbflush,
.mask_memory = agp_generic_mask_memory,
.masks = NULL,
.agp_enable = agp_generic_enable,
.cache_flush = m1541_cache_flush,
.create_gatt_table = agp_generic_create_gatt_table,
.free_gatt_table = agp_generic_free_gatt_table,
.insert_memory = agp_generic_insert_memory,
.remove_memory = agp_generic_remove_memory,
.alloc_by_type = agp_generic_alloc_by_type,
.free_by_type = agp_generic_free_by_type,
.agp_alloc_page = m1541_alloc_page,
.agp_destroy_page = m1541_destroy_page,
.agp_type_to_mask_type = agp_generic_type_to_mask_type,
};
static struct agp_device_ids ali_agp_device_ids[] =
{
{
.device_id = PCI_DEVICE_ID_AL_M1541,
.chipset_name = "M1541",
},
{
.device_id = PCI_DEVICE_ID_AL_M1621,
.chipset_name = "M1621",
},
{
.device_id = PCI_DEVICE_ID_AL_M1631,
.chipset_name = "M1631",
},
{
.device_id = PCI_DEVICE_ID_AL_M1632,
.chipset_name = "M1632",
},
{
.device_id = PCI_DEVICE_ID_AL_M1641,
.chipset_name = "M1641",
},
{
.device_id = PCI_DEVICE_ID_AL_M1644,
.chipset_name = "M1644",
},
{
.device_id = PCI_DEVICE_ID_AL_M1647,
.chipset_name = "M1647",
},
{
.device_id = PCI_DEVICE_ID_AL_M1651,
.chipset_name = "M1651",
},
{
.device_id = PCI_DEVICE_ID_AL_M1671,
.chipset_name = "M1671",
},
{
.device_id = PCI_DEVICE_ID_AL_M1681,
.chipset_name = "M1681",
},
{
.device_id = PCI_DEVICE_ID_AL_M1683,
.chipset_name = "M1683",
},
{ }, /* dummy final entry, always present */
};
static int agp_ali_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct agp_device_ids *devs = ali_agp_device_ids;
struct agp_bridge_data *bridge;
u8 hidden_1621_id, cap_ptr;
int j;
cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP);
if (!cap_ptr)
return -ENODEV;
/* probe for known chipsets */
for (j = 0; devs[j].chipset_name; j++) {
if (pdev->device == devs[j].device_id)
goto found;
}
dev_err(&pdev->dev, "unsupported ALi chipset [%04x/%04x])\n",
pdev->vendor, pdev->device);
return -ENODEV;
found:
bridge = agp_alloc_bridge();
if (!bridge)
return -ENOMEM;
bridge->dev = pdev;
bridge->capndx = cap_ptr;
switch (pdev->device) {
case PCI_DEVICE_ID_AL_M1541:
bridge->driver = &ali_m1541_bridge;
break;
case PCI_DEVICE_ID_AL_M1621:
pci_read_config_byte(pdev, 0xFB, &hidden_1621_id);
switch (hidden_1621_id) {
case 0x31:
devs[j].chipset_name = "M1631";
break;
case 0x32:
devs[j].chipset_name = "M1632";
break;
case 0x41:
devs[j].chipset_name = "M1641";
break;
case 0x43:
devs[j].chipset_name = "M1621";
break;
case 0x47:
devs[j].chipset_name = "M1647";
break;
case 0x51:
devs[j].chipset_name = "M1651";
break;
default:
break;
}
/*FALLTHROUGH*/
default:
bridge->driver = &ali_generic_bridge;
}
dev_info(&pdev->dev, "ALi %s chipset\n", devs[j].chipset_name);
/* Fill in the mode register */
pci_read_config_dword(pdev,
bridge->capndx+PCI_AGP_STATUS,
&bridge->mode);
pci_set_drvdata(pdev, bridge);
return agp_add_bridge(bridge);
}
static void agp_ali_remove(struct pci_dev *pdev)
{
struct agp_bridge_data *bridge = pci_get_drvdata(pdev);
agp_remove_bridge(bridge);
agp_put_bridge(bridge);
}
static struct pci_device_id agp_ali_pci_table[] = {
{
.class = (PCI_CLASS_BRIDGE_HOST << 8),
.class_mask = ~0,
.vendor = PCI_VENDOR_ID_AL,
.device = PCI_ANY_ID,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
},
{ }
};
MODULE_DEVICE_TABLE(pci, agp_ali_pci_table);
static struct pci_driver agp_ali_pci_driver = {
.name = "agpgart-ali",
.id_table = agp_ali_pci_table,
.probe = agp_ali_probe,
.remove = agp_ali_remove,
};
static int __init agp_ali_init(void)
{
if (agp_off)
return -EINVAL;
return pci_register_driver(&agp_ali_pci_driver);
}
static void __exit agp_ali_cleanup(void)
{
pci_unregister_driver(&agp_ali_pci_driver);
}
module_init(agp_ali_init);
module_exit(agp_ali_cleanup);
MODULE_AUTHOR("Dave Jones <davej@redhat.com>");
MODULE_LICENSE("GPL and additional rights");

View file

@ -0,0 +1,222 @@
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/agp_backend.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <asm/machvec.h>
#include <asm/agp_backend.h>
#include "../../../arch/alpha/kernel/pci_impl.h"
#include "agp.h"
static int alpha_core_agp_vm_fault(struct vm_area_struct *vma,
struct vm_fault *vmf)
{
alpha_agp_info *agp = agp_bridge->dev_private_data;
dma_addr_t dma_addr;
unsigned long pa;
struct page *page;
dma_addr = (unsigned long)vmf->virtual_address - vma->vm_start
+ agp->aperture.bus_base;
pa = agp->ops->translate(agp, dma_addr);
if (pa == (unsigned long)-EINVAL)
return VM_FAULT_SIGBUS; /* no translation */
/*
* Get the page, inc the use count, and return it
*/
page = virt_to_page(__va(pa));
get_page(page);
vmf->page = page;
return 0;
}
static struct aper_size_info_fixed alpha_core_agp_sizes[] =
{
{ 0, 0, 0 }, /* filled in by alpha_core_agp_setup */
};
static const struct vm_operations_struct alpha_core_agp_vm_ops = {
.fault = alpha_core_agp_vm_fault,
};
static int alpha_core_agp_fetch_size(void)
{
return alpha_core_agp_sizes[0].size;
}
static int alpha_core_agp_configure(void)
{
alpha_agp_info *agp = agp_bridge->dev_private_data;
agp_bridge->gart_bus_addr = agp->aperture.bus_base;
return 0;
}
static void alpha_core_agp_cleanup(void)
{
alpha_agp_info *agp = agp_bridge->dev_private_data;
agp->ops->cleanup(agp);
}
static void alpha_core_agp_tlbflush(struct agp_memory *mem)
{
alpha_agp_info *agp = agp_bridge->dev_private_data;
alpha_mv.mv_pci_tbi(agp->hose, 0, -1);
}
static void alpha_core_agp_enable(struct agp_bridge_data *bridge, u32 mode)
{
alpha_agp_info *agp = bridge->dev_private_data;
agp->mode.lw = agp_collect_device_status(bridge, mode,
agp->capability.lw);
agp->mode.bits.enable = 1;
agp->ops->configure(agp);
agp_device_command(agp->mode.lw, false);
}
static int alpha_core_agp_insert_memory(struct agp_memory *mem, off_t pg_start,
int type)
{
alpha_agp_info *agp = agp_bridge->dev_private_data;
int num_entries, status;
void *temp;
if (type >= AGP_USER_TYPES || mem->type >= AGP_USER_TYPES)
return -EINVAL;
temp = agp_bridge->current_size;
num_entries = A_SIZE_FIX(temp)->num_entries;
if ((pg_start + mem->page_count) > num_entries)
return -EINVAL;
status = agp->ops->bind(agp, pg_start, mem);
mb();
alpha_core_agp_tlbflush(mem);
return status;
}
static int alpha_core_agp_remove_memory(struct agp_memory *mem, off_t pg_start,
int type)
{
alpha_agp_info *agp = agp_bridge->dev_private_data;
int status;
status = agp->ops->unbind(agp, pg_start, mem);
alpha_core_agp_tlbflush(mem);
return status;
}
static int alpha_core_agp_create_free_gatt_table(struct agp_bridge_data *a)
{
return 0;
}
struct agp_bridge_driver alpha_core_agp_driver = {
.owner = THIS_MODULE,
.aperture_sizes = alpha_core_agp_sizes,
.num_aperture_sizes = 1,
.size_type = FIXED_APER_SIZE,
.cant_use_aperture = true,
.masks = NULL,
.fetch_size = alpha_core_agp_fetch_size,
.configure = alpha_core_agp_configure,
.agp_enable = alpha_core_agp_enable,
.cleanup = alpha_core_agp_cleanup,
.tlb_flush = alpha_core_agp_tlbflush,
.mask_memory = agp_generic_mask_memory,
.cache_flush = global_cache_flush,
.create_gatt_table = alpha_core_agp_create_free_gatt_table,
.free_gatt_table = alpha_core_agp_create_free_gatt_table,
.insert_memory = alpha_core_agp_insert_memory,
.remove_memory = alpha_core_agp_remove_memory,
.alloc_by_type = agp_generic_alloc_by_type,
.free_by_type = agp_generic_free_by_type,
.agp_alloc_page = agp_generic_alloc_page,
.agp_alloc_pages = agp_generic_alloc_pages,
.agp_destroy_page = agp_generic_destroy_page,
.agp_destroy_pages = agp_generic_destroy_pages,
.agp_type_to_mask_type = agp_generic_type_to_mask_type,
};
struct agp_bridge_data *alpha_bridge;
int __init
alpha_core_agp_setup(void)
{
alpha_agp_info *agp = alpha_mv.agp_info();
struct pci_dev *pdev; /* faked */
struct aper_size_info_fixed *aper_size;
if (!agp)
return -ENODEV;
if (agp->ops->setup(agp))
return -ENODEV;
/*
* Build the aperture size descriptor
*/
aper_size = alpha_core_agp_sizes;
aper_size->size = agp->aperture.size / (1024 * 1024);
aper_size->num_entries = agp->aperture.size / PAGE_SIZE;
aper_size->page_order = __ffs(aper_size->num_entries / 1024);
/*
* Build a fake pci_dev struct
*/
pdev = pci_alloc_dev(NULL);
if (!pdev)
return -ENOMEM;
pdev->vendor = 0xffff;
pdev->device = 0xffff;
pdev->sysdata = agp->hose;
alpha_bridge = agp_alloc_bridge();
if (!alpha_bridge)
goto fail;
alpha_bridge->driver = &alpha_core_agp_driver;
alpha_bridge->vm_ops = &alpha_core_agp_vm_ops;
alpha_bridge->current_size = aper_size; /* only 1 size */
alpha_bridge->dev_private_data = agp;
alpha_bridge->dev = pdev;
alpha_bridge->mode = agp->capability.lw;
printk(KERN_INFO PFX "Detected AGP on hose %d\n", agp->hose->index);
return agp_add_bridge(alpha_bridge);
fail:
kfree(pdev);
return -ENOMEM;
}
static int __init agp_alpha_core_init(void)
{
if (agp_off)
return -EINVAL;
if (alpha_mv.agp_info)
return alpha_core_agp_setup();
return -ENODEV;
}
static void __exit agp_alpha_core_cleanup(void)
{
agp_remove_bridge(alpha_bridge);
agp_put_bridge(alpha_bridge);
}
module_init(agp_alpha_core_init);
module_exit(agp_alpha_core_cleanup);
MODULE_AUTHOR("Jeff Wiedemeier <Jeff.Wiedemeier@hp.com>");
MODULE_LICENSE("GPL and additional rights");

View file

@ -0,0 +1,566 @@
/*
* AMD K7 AGPGART routines.
*/
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/agp_backend.h>
#include <linux/page-flags.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include "agp.h"
#define AMD_MMBASE_BAR 1
#define AMD_APSIZE 0xac
#define AMD_MODECNTL 0xb0
#define AMD_MODECNTL2 0xb2
#define AMD_GARTENABLE 0x02 /* In mmio region (16-bit register) */
#define AMD_ATTBASE 0x04 /* In mmio region (32-bit register) */
#define AMD_TLBFLUSH 0x0c /* In mmio region (32-bit register) */
#define AMD_CACHEENTRY 0x10 /* In mmio region (32-bit register) */
static struct pci_device_id agp_amdk7_pci_table[];
struct amd_page_map {
unsigned long *real;
unsigned long __iomem *remapped;
};
static struct _amd_irongate_private {
volatile u8 __iomem *registers;
struct amd_page_map **gatt_pages;
int num_tables;
} amd_irongate_private;
static int amd_create_page_map(struct amd_page_map *page_map)
{
int i;
page_map->real = (unsigned long *) __get_free_page(GFP_KERNEL);
if (page_map->real == NULL)
return -ENOMEM;
set_memory_uc((unsigned long)page_map->real, 1);
page_map->remapped = page_map->real;
for (i = 0; i < PAGE_SIZE / sizeof(unsigned long); i++) {
writel(agp_bridge->scratch_page, page_map->remapped+i);
readl(page_map->remapped+i); /* PCI Posting. */
}
return 0;
}
static void amd_free_page_map(struct amd_page_map *page_map)
{
set_memory_wb((unsigned long)page_map->real, 1);
free_page((unsigned long) page_map->real);
}
static void amd_free_gatt_pages(void)
{
int i;
struct amd_page_map **tables;
struct amd_page_map *entry;
tables = amd_irongate_private.gatt_pages;
for (i = 0; i < amd_irongate_private.num_tables; i++) {
entry = tables[i];
if (entry != NULL) {
if (entry->real != NULL)
amd_free_page_map(entry);
kfree(entry);
}
}
kfree(tables);
amd_irongate_private.gatt_pages = NULL;
}
static int amd_create_gatt_pages(int nr_tables)
{
struct amd_page_map **tables;
struct amd_page_map *entry;
int retval = 0;
int i;
tables = kzalloc((nr_tables + 1) * sizeof(struct amd_page_map *),GFP_KERNEL);
if (tables == NULL)
return -ENOMEM;
for (i = 0; i < nr_tables; i++) {
entry = kzalloc(sizeof(struct amd_page_map), GFP_KERNEL);
tables[i] = entry;
if (entry == NULL) {
retval = -ENOMEM;
break;
}
retval = amd_create_page_map(entry);
if (retval != 0)
break;
}
amd_irongate_private.num_tables = i;
amd_irongate_private.gatt_pages = tables;
if (retval != 0)
amd_free_gatt_pages();
return retval;
}
/* Since we don't need contiguous memory we just try
* to get the gatt table once
*/
#define GET_PAGE_DIR_OFF(addr) (addr >> 22)
#define GET_PAGE_DIR_IDX(addr) (GET_PAGE_DIR_OFF(addr) - \
GET_PAGE_DIR_OFF(agp_bridge->gart_bus_addr))
#define GET_GATT_OFF(addr) ((addr & 0x003ff000) >> 12)
#define GET_GATT(addr) (amd_irongate_private.gatt_pages[\
GET_PAGE_DIR_IDX(addr)]->remapped)
static int amd_create_gatt_table(struct agp_bridge_data *bridge)
{
struct aper_size_info_lvl2 *value;
struct amd_page_map page_dir;
unsigned long __iomem *cur_gatt;
unsigned long addr;
int retval;
int i;
value = A_SIZE_LVL2(agp_bridge->current_size);
retval = amd_create_page_map(&page_dir);
if (retval != 0)
return retval;
retval = amd_create_gatt_pages(value->num_entries / 1024);
if (retval != 0) {
amd_free_page_map(&page_dir);
return retval;
}
agp_bridge->gatt_table_real = (u32 *)page_dir.real;
agp_bridge->gatt_table = (u32 __iomem *)page_dir.remapped;
agp_bridge->gatt_bus_addr = virt_to_phys(page_dir.real);
/* Get the address for the gart region.
* This is a bus address even on the alpha, b/c its
* used to program the agp master not the cpu
*/
addr = pci_bus_address(agp_bridge->dev, AGP_APERTURE_BAR);
agp_bridge->gart_bus_addr = addr;
/* Calculate the agp offset */
for (i = 0; i < value->num_entries / 1024; i++, addr += 0x00400000) {
writel(virt_to_phys(amd_irongate_private.gatt_pages[i]->real) | 1,
page_dir.remapped+GET_PAGE_DIR_OFF(addr));
readl(page_dir.remapped+GET_PAGE_DIR_OFF(addr)); /* PCI Posting. */
}
for (i = 0; i < value->num_entries; i++) {
addr = (i * PAGE_SIZE) + agp_bridge->gart_bus_addr;
cur_gatt = GET_GATT(addr);
writel(agp_bridge->scratch_page, cur_gatt+GET_GATT_OFF(addr));
readl(cur_gatt+GET_GATT_OFF(addr)); /* PCI Posting. */
}
return 0;
}
static int amd_free_gatt_table(struct agp_bridge_data *bridge)
{
struct amd_page_map page_dir;
page_dir.real = (unsigned long *)agp_bridge->gatt_table_real;
page_dir.remapped = (unsigned long __iomem *)agp_bridge->gatt_table;
amd_free_gatt_pages();
amd_free_page_map(&page_dir);
return 0;
}
static int amd_irongate_fetch_size(void)
{
int i;
u32 temp;
struct aper_size_info_lvl2 *values;
pci_read_config_dword(agp_bridge->dev, AMD_APSIZE, &temp);
temp = (temp & 0x0000000e);
values = A_SIZE_LVL2(agp_bridge->driver->aperture_sizes);
for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) {
if (temp == values[i].size_value) {
agp_bridge->previous_size =
agp_bridge->current_size = (void *) (values + i);
agp_bridge->aperture_size_idx = i;
return values[i].size;
}
}
return 0;
}
static int amd_irongate_configure(void)
{
struct aper_size_info_lvl2 *current_size;
phys_addr_t reg;
u32 temp;
u16 enable_reg;
current_size = A_SIZE_LVL2(agp_bridge->current_size);
if (!amd_irongate_private.registers) {
/* Get the memory mapped registers */
reg = pci_resource_start(agp_bridge->dev, AMD_MMBASE_BAR);
amd_irongate_private.registers = (volatile u8 __iomem *) ioremap(reg, 4096);
if (!amd_irongate_private.registers)
return -ENOMEM;
}
/* Write out the address of the gatt table */
writel(agp_bridge->gatt_bus_addr, amd_irongate_private.registers+AMD_ATTBASE);
readl(amd_irongate_private.registers+AMD_ATTBASE); /* PCI Posting. */
/* Write the Sync register */
pci_write_config_byte(agp_bridge->dev, AMD_MODECNTL, 0x80);
/* Set indexing mode */
pci_write_config_byte(agp_bridge->dev, AMD_MODECNTL2, 0x00);
/* Write the enable register */
enable_reg = readw(amd_irongate_private.registers+AMD_GARTENABLE);
enable_reg = (enable_reg | 0x0004);
writew(enable_reg, amd_irongate_private.registers+AMD_GARTENABLE);
readw(amd_irongate_private.registers+AMD_GARTENABLE); /* PCI Posting. */
/* Write out the size register */
pci_read_config_dword(agp_bridge->dev, AMD_APSIZE, &temp);
temp = (((temp & ~(0x0000000e)) | current_size->size_value) | 1);
pci_write_config_dword(agp_bridge->dev, AMD_APSIZE, temp);
/* Flush the tlb */
writel(1, amd_irongate_private.registers+AMD_TLBFLUSH);
readl(amd_irongate_private.registers+AMD_TLBFLUSH); /* PCI Posting.*/
return 0;
}
static void amd_irongate_cleanup(void)
{
struct aper_size_info_lvl2 *previous_size;
u32 temp;
u16 enable_reg;
previous_size = A_SIZE_LVL2(agp_bridge->previous_size);
enable_reg = readw(amd_irongate_private.registers+AMD_GARTENABLE);
enable_reg = (enable_reg & ~(0x0004));
writew(enable_reg, amd_irongate_private.registers+AMD_GARTENABLE);
readw(amd_irongate_private.registers+AMD_GARTENABLE); /* PCI Posting. */
/* Write back the previous size and disable gart translation */
pci_read_config_dword(agp_bridge->dev, AMD_APSIZE, &temp);
temp = ((temp & ~(0x0000000f)) | previous_size->size_value);
pci_write_config_dword(agp_bridge->dev, AMD_APSIZE, temp);
iounmap((void __iomem *) amd_irongate_private.registers);
}
/*
* This routine could be implemented by taking the addresses
* written to the GATT, and flushing them individually. However
* currently it just flushes the whole table. Which is probably
* more efficient, since agp_memory blocks can be a large number of
* entries.
*/
static void amd_irongate_tlbflush(struct agp_memory *temp)
{
writel(1, amd_irongate_private.registers+AMD_TLBFLUSH);
readl(amd_irongate_private.registers+AMD_TLBFLUSH); /* PCI Posting. */
}
static int amd_insert_memory(struct agp_memory *mem, off_t pg_start, int type)
{
int i, j, num_entries;
unsigned long __iomem *cur_gatt;
unsigned long addr;
num_entries = A_SIZE_LVL2(agp_bridge->current_size)->num_entries;
if (type != mem->type ||
agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type))
return -EINVAL;
if ((pg_start + mem->page_count) > num_entries)
return -EINVAL;
j = pg_start;
while (j < (pg_start + mem->page_count)) {
addr = (j * PAGE_SIZE) + agp_bridge->gart_bus_addr;
cur_gatt = GET_GATT(addr);
if (!PGE_EMPTY(agp_bridge, readl(cur_gatt+GET_GATT_OFF(addr))))
return -EBUSY;
j++;
}
if (!mem->is_flushed) {
global_cache_flush();
mem->is_flushed = true;
}
for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
addr = (j * PAGE_SIZE) + agp_bridge->gart_bus_addr;
cur_gatt = GET_GATT(addr);
writel(agp_generic_mask_memory(agp_bridge,
page_to_phys(mem->pages[i]),
mem->type),
cur_gatt+GET_GATT_OFF(addr));
readl(cur_gatt+GET_GATT_OFF(addr)); /* PCI Posting. */
}
amd_irongate_tlbflush(mem);
return 0;
}
static int amd_remove_memory(struct agp_memory *mem, off_t pg_start, int type)
{
int i;
unsigned long __iomem *cur_gatt;
unsigned long addr;
if (type != mem->type ||
agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type))
return -EINVAL;
for (i = pg_start; i < (mem->page_count + pg_start); i++) {
addr = (i * PAGE_SIZE) + agp_bridge->gart_bus_addr;
cur_gatt = GET_GATT(addr);
writel(agp_bridge->scratch_page, cur_gatt+GET_GATT_OFF(addr));
readl(cur_gatt+GET_GATT_OFF(addr)); /* PCI Posting. */
}
amd_irongate_tlbflush(mem);
return 0;
}
static const struct aper_size_info_lvl2 amd_irongate_sizes[7] =
{
{2048, 524288, 0x0000000c},
{1024, 262144, 0x0000000a},
{512, 131072, 0x00000008},
{256, 65536, 0x00000006},
{128, 32768, 0x00000004},
{64, 16384, 0x00000002},
{32, 8192, 0x00000000}
};
static const struct gatt_mask amd_irongate_masks[] =
{
{.mask = 1, .type = 0}
};
static const struct agp_bridge_driver amd_irongate_driver = {
.owner = THIS_MODULE,
.aperture_sizes = amd_irongate_sizes,
.size_type = LVL2_APER_SIZE,
.num_aperture_sizes = 7,
.needs_scratch_page = true,
.configure = amd_irongate_configure,
.fetch_size = amd_irongate_fetch_size,
.cleanup = amd_irongate_cleanup,
.tlb_flush = amd_irongate_tlbflush,
.mask_memory = agp_generic_mask_memory,
.masks = amd_irongate_masks,
.agp_enable = agp_generic_enable,
.cache_flush = global_cache_flush,
.create_gatt_table = amd_create_gatt_table,
.free_gatt_table = amd_free_gatt_table,
.insert_memory = amd_insert_memory,
.remove_memory = amd_remove_memory,
.alloc_by_type = agp_generic_alloc_by_type,
.free_by_type = agp_generic_free_by_type,
.agp_alloc_page = agp_generic_alloc_page,
.agp_alloc_pages = agp_generic_alloc_pages,
.agp_destroy_page = agp_generic_destroy_page,
.agp_destroy_pages = agp_generic_destroy_pages,
.agp_type_to_mask_type = agp_generic_type_to_mask_type,
};
static struct agp_device_ids amd_agp_device_ids[] =
{
{
.device_id = PCI_DEVICE_ID_AMD_FE_GATE_7006,
.chipset_name = "Irongate",
},
{
.device_id = PCI_DEVICE_ID_AMD_FE_GATE_700E,
.chipset_name = "761",
},
{
.device_id = PCI_DEVICE_ID_AMD_FE_GATE_700C,
.chipset_name = "760MP",
},
{ }, /* dummy final entry, always present */
};
static int agp_amdk7_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
struct agp_bridge_data *bridge;
u8 cap_ptr;
int j;
cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP);
if (!cap_ptr)
return -ENODEV;
j = ent - agp_amdk7_pci_table;
dev_info(&pdev->dev, "AMD %s chipset\n",
amd_agp_device_ids[j].chipset_name);
bridge = agp_alloc_bridge();
if (!bridge)
return -ENOMEM;
bridge->driver = &amd_irongate_driver;
bridge->dev_private_data = &amd_irongate_private,
bridge->dev = pdev;
bridge->capndx = cap_ptr;
/* 751 Errata (22564_B-1.PDF)
erratum 20: strobe glitch with Nvidia NV10 GeForce cards.
system controller may experience noise due to strong drive strengths
*/
if (agp_bridge->dev->device == PCI_DEVICE_ID_AMD_FE_GATE_7006) {
struct pci_dev *gfxcard=NULL;
cap_ptr = 0;
while (!cap_ptr) {
gfxcard = pci_get_class(PCI_CLASS_DISPLAY_VGA<<8, gfxcard);
if (!gfxcard) {
dev_info(&pdev->dev, "no AGP VGA controller\n");
return -ENODEV;
}
cap_ptr = pci_find_capability(gfxcard, PCI_CAP_ID_AGP);
}
/* With so many variants of NVidia cards, it's simpler just
to blacklist them all, and then whitelist them as needed
(if necessary at all). */
if (gfxcard->vendor == PCI_VENDOR_ID_NVIDIA) {
agp_bridge->flags |= AGP_ERRATA_1X;
dev_info(&pdev->dev, "AMD 751 chipset with NVidia GeForce; forcing 1X due to errata\n");
}
pci_dev_put(gfxcard);
}
/* 761 Errata (23613_F.pdf)
* Revisions B0/B1 were a disaster.
* erratum 44: SYSCLK/AGPCLK skew causes 2X failures -- Force mode to 1X
* erratum 45: Timing problem prevents fast writes -- Disable fast write.
* erratum 46: Setup violation on AGP SBA pins - Disable side band addressing.
* With this lot disabled, we should prevent lockups. */
if (agp_bridge->dev->device == PCI_DEVICE_ID_AMD_FE_GATE_700E) {
if (pdev->revision == 0x10 || pdev->revision == 0x11) {
agp_bridge->flags = AGP_ERRATA_FASTWRITES;
agp_bridge->flags |= AGP_ERRATA_SBA;
agp_bridge->flags |= AGP_ERRATA_1X;
dev_info(&pdev->dev, "AMD 761 chipset with errata; disabling AGP fast writes & SBA and forcing to 1X\n");
}
}
/* Fill in the mode register */
pci_read_config_dword(pdev,
bridge->capndx+PCI_AGP_STATUS,
&bridge->mode);
pci_set_drvdata(pdev, bridge);
return agp_add_bridge(bridge);
}
static void agp_amdk7_remove(struct pci_dev *pdev)
{
struct agp_bridge_data *bridge = pci_get_drvdata(pdev);
agp_remove_bridge(bridge);
agp_put_bridge(bridge);
}
#ifdef CONFIG_PM
static int agp_amdk7_suspend(struct pci_dev *pdev, pm_message_t state)
{
pci_save_state(pdev);
pci_set_power_state(pdev, pci_choose_state(pdev, state));
return 0;
}
static int agp_amdk7_resume(struct pci_dev *pdev)
{
pci_set_power_state(pdev, PCI_D0);
pci_restore_state(pdev);
return amd_irongate_driver.configure();
}
#endif /* CONFIG_PM */
/* must be the same order as name table above */
static struct pci_device_id agp_amdk7_pci_table[] = {
{
.class = (PCI_CLASS_BRIDGE_HOST << 8),
.class_mask = ~0,
.vendor = PCI_VENDOR_ID_AMD,
.device = PCI_DEVICE_ID_AMD_FE_GATE_7006,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
},
{
.class = (PCI_CLASS_BRIDGE_HOST << 8),
.class_mask = ~0,
.vendor = PCI_VENDOR_ID_AMD,
.device = PCI_DEVICE_ID_AMD_FE_GATE_700E,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
},
{
.class = (PCI_CLASS_BRIDGE_HOST << 8),
.class_mask = ~0,
.vendor = PCI_VENDOR_ID_AMD,
.device = PCI_DEVICE_ID_AMD_FE_GATE_700C,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
},
{ }
};
MODULE_DEVICE_TABLE(pci, agp_amdk7_pci_table);
static struct pci_driver agp_amdk7_pci_driver = {
.name = "agpgart-amdk7",
.id_table = agp_amdk7_pci_table,
.probe = agp_amdk7_probe,
.remove = agp_amdk7_remove,
#ifdef CONFIG_PM
.suspend = agp_amdk7_suspend,
.resume = agp_amdk7_resume,
#endif
};
static int __init agp_amdk7_init(void)
{
if (agp_off)
return -EINVAL;
return pci_register_driver(&agp_amdk7_pci_driver);
}
static void __exit agp_amdk7_cleanup(void)
{
pci_unregister_driver(&agp_amdk7_pci_driver);
}
module_init(agp_amdk7_init);
module_exit(agp_amdk7_cleanup);
MODULE_LICENSE("GPL and additional rights");

View file

@ -0,0 +1,818 @@
/*
* Copyright 2001-2003 SuSE Labs.
* Distributed under the GNU public license, v2.
*
* This is a GART driver for the AMD Opteron/Athlon64 on-CPU northbridge.
* It also includes support for the AMD 8151 AGP bridge,
* although it doesn't actually do much, as all the real
* work is done in the northbridge(s).
*/
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/agp_backend.h>
#include <linux/mmzone.h>
#include <asm/page.h> /* PAGE_SIZE */
#include <asm/e820.h>
#include <asm/amd_nb.h>
#include <asm/gart.h>
#include "agp.h"
/* NVIDIA K8 registers */
#define NVIDIA_X86_64_0_APBASE 0x10
#define NVIDIA_X86_64_1_APBASE1 0x50
#define NVIDIA_X86_64_1_APLIMIT1 0x54
#define NVIDIA_X86_64_1_APSIZE 0xa8
#define NVIDIA_X86_64_1_APBASE2 0xd8
#define NVIDIA_X86_64_1_APLIMIT2 0xdc
/* ULi K8 registers */
#define ULI_X86_64_BASE_ADDR 0x10
#define ULI_X86_64_HTT_FEA_REG 0x50
#define ULI_X86_64_ENU_SCR_REG 0x54
static struct resource *aperture_resource;
static bool __initdata agp_try_unsupported = 1;
static int agp_bridges_found;
static void amd64_tlbflush(struct agp_memory *temp)
{
amd_flush_garts();
}
static int amd64_insert_memory(struct agp_memory *mem, off_t pg_start, int type)
{
int i, j, num_entries;
long long tmp;
int mask_type;
struct agp_bridge_data *bridge = mem->bridge;
u32 pte;
num_entries = agp_num_entries();
if (type != mem->type)
return -EINVAL;
mask_type = bridge->driver->agp_type_to_mask_type(bridge, type);
if (mask_type != 0)
return -EINVAL;
/* Make sure we can fit the range in the gatt table. */
/* FIXME: could wrap */
if (((unsigned long)pg_start + mem->page_count) > num_entries)
return -EINVAL;
j = pg_start;
/* gatt table should be empty. */
while (j < (pg_start + mem->page_count)) {
if (!PGE_EMPTY(agp_bridge, readl(agp_bridge->gatt_table+j)))
return -EBUSY;
j++;
}
if (!mem->is_flushed) {
global_cache_flush();
mem->is_flushed = true;
}
for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
tmp = agp_bridge->driver->mask_memory(agp_bridge,
page_to_phys(mem->pages[i]),
mask_type);
BUG_ON(tmp & 0xffffff0000000ffcULL);
pte = (tmp & 0x000000ff00000000ULL) >> 28;
pte |=(tmp & 0x00000000fffff000ULL);
pte |= GPTE_VALID | GPTE_COHERENT;
writel(pte, agp_bridge->gatt_table+j);
readl(agp_bridge->gatt_table+j); /* PCI Posting. */
}
amd64_tlbflush(mem);
return 0;
}
/*
* This hack alters the order element according
* to the size of a long. It sucks. I totally disown this, even
* though it does appear to work for the most part.
*/
static struct aper_size_info_32 amd64_aperture_sizes[7] =
{
{32, 8192, 3+(sizeof(long)/8), 0 },
{64, 16384, 4+(sizeof(long)/8), 1<<1 },
{128, 32768, 5+(sizeof(long)/8), 1<<2 },
{256, 65536, 6+(sizeof(long)/8), 1<<1 | 1<<2 },
{512, 131072, 7+(sizeof(long)/8), 1<<3 },
{1024, 262144, 8+(sizeof(long)/8), 1<<1 | 1<<3},
{2048, 524288, 9+(sizeof(long)/8), 1<<2 | 1<<3}
};
/*
* Get the current Aperture size from the x86-64.
* Note, that there may be multiple x86-64's, but we just return
* the value from the first one we find. The set_size functions
* keep the rest coherent anyway. Or at least should do.
*/
static int amd64_fetch_size(void)
{
struct pci_dev *dev;
int i;
u32 temp;
struct aper_size_info_32 *values;
dev = node_to_amd_nb(0)->misc;
if (dev==NULL)
return 0;
pci_read_config_dword(dev, AMD64_GARTAPERTURECTL, &temp);
temp = (temp & 0xe);
values = A_SIZE_32(amd64_aperture_sizes);
for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) {
if (temp == values[i].size_value) {
agp_bridge->previous_size =
agp_bridge->current_size = (void *) (values + i);
agp_bridge->aperture_size_idx = i;
return values[i].size;
}
}
return 0;
}
/*
* In a multiprocessor x86-64 system, this function gets
* called once for each CPU.
*/
static u64 amd64_configure(struct pci_dev *hammer, u64 gatt_table)
{
u64 aperturebase;
u32 tmp;
u64 aper_base;
/* Address to map to */
pci_read_config_dword(hammer, AMD64_GARTAPERTUREBASE, &tmp);
aperturebase = tmp << 25;
aper_base = (aperturebase & PCI_BASE_ADDRESS_MEM_MASK);
enable_gart_translation(hammer, gatt_table);
return aper_base;
}
static const struct aper_size_info_32 amd_8151_sizes[7] =
{
{2048, 524288, 9, 0x00000000 }, /* 0 0 0 0 0 0 */
{1024, 262144, 8, 0x00000400 }, /* 1 0 0 0 0 0 */
{512, 131072, 7, 0x00000600 }, /* 1 1 0 0 0 0 */
{256, 65536, 6, 0x00000700 }, /* 1 1 1 0 0 0 */
{128, 32768, 5, 0x00000720 }, /* 1 1 1 1 0 0 */
{64, 16384, 4, 0x00000730 }, /* 1 1 1 1 1 0 */
{32, 8192, 3, 0x00000738 } /* 1 1 1 1 1 1 */
};
static int amd_8151_configure(void)
{
unsigned long gatt_bus = virt_to_phys(agp_bridge->gatt_table_real);
int i;
if (!amd_nb_has_feature(AMD_NB_GART))
return 0;
/* Configure AGP regs in each x86-64 host bridge. */
for (i = 0; i < amd_nb_num(); i++) {
agp_bridge->gart_bus_addr =
amd64_configure(node_to_amd_nb(i)->misc, gatt_bus);
}
amd_flush_garts();
return 0;
}
static void amd64_cleanup(void)
{
u32 tmp;
int i;
if (!amd_nb_has_feature(AMD_NB_GART))
return;
for (i = 0; i < amd_nb_num(); i++) {
struct pci_dev *dev = node_to_amd_nb(i)->misc;
/* disable gart translation */
pci_read_config_dword(dev, AMD64_GARTAPERTURECTL, &tmp);
tmp &= ~GARTEN;
pci_write_config_dword(dev, AMD64_GARTAPERTURECTL, tmp);
}
}
static const struct agp_bridge_driver amd_8151_driver = {
.owner = THIS_MODULE,
.aperture_sizes = amd_8151_sizes,
.size_type = U32_APER_SIZE,
.num_aperture_sizes = 7,
.needs_scratch_page = true,
.configure = amd_8151_configure,
.fetch_size = amd64_fetch_size,
.cleanup = amd64_cleanup,
.tlb_flush = amd64_tlbflush,
.mask_memory = agp_generic_mask_memory,
.masks = NULL,
.agp_enable = agp_generic_enable,
.cache_flush = global_cache_flush,
.create_gatt_table = agp_generic_create_gatt_table,
.free_gatt_table = agp_generic_free_gatt_table,
.insert_memory = amd64_insert_memory,
.remove_memory = agp_generic_remove_memory,
.alloc_by_type = agp_generic_alloc_by_type,
.free_by_type = agp_generic_free_by_type,
.agp_alloc_page = agp_generic_alloc_page,
.agp_alloc_pages = agp_generic_alloc_pages,
.agp_destroy_page = agp_generic_destroy_page,
.agp_destroy_pages = agp_generic_destroy_pages,
.agp_type_to_mask_type = agp_generic_type_to_mask_type,
};
/* Some basic sanity checks for the aperture. */
static int agp_aperture_valid(u64 aper, u32 size)
{
if (!aperture_valid(aper, size, 32*1024*1024))
return 0;
/* Request the Aperture. This catches cases when someone else
already put a mapping in there - happens with some very broken BIOS
Maybe better to use pci_assign_resource/pci_enable_device instead
trusting the bridges? */
if (!aperture_resource &&
!(aperture_resource = request_mem_region(aper, size, "aperture"))) {
printk(KERN_ERR PFX "Aperture conflicts with PCI mapping.\n");
return 0;
}
return 1;
}
/*
* W*s centric BIOS sometimes only set up the aperture in the AGP
* bridge, not the northbridge. On AMD64 this is handled early
* in aperture.c, but when IOMMU is not enabled or we run
* on a 32bit kernel this needs to be redone.
* Unfortunately it is impossible to fix the aperture here because it's too late
* to allocate that much memory. But at least error out cleanly instead of
* crashing.
*/
static int fix_northbridge(struct pci_dev *nb, struct pci_dev *agp, u16 cap)
{
u64 aper, nb_aper;
int order = 0;
u32 nb_order, nb_base;
u16 apsize;
pci_read_config_dword(nb, AMD64_GARTAPERTURECTL, &nb_order);
nb_order = (nb_order >> 1) & 7;
pci_read_config_dword(nb, AMD64_GARTAPERTUREBASE, &nb_base);
nb_aper = nb_base << 25;
/* Northbridge seems to contain crap. Try the AGP bridge. */
pci_read_config_word(agp, cap+0x14, &apsize);
if (apsize == 0xffff) {
if (agp_aperture_valid(nb_aper, (32*1024*1024)<<nb_order))
return 0;
return -1;
}
apsize &= 0xfff;
/* Some BIOS use weird encodings not in the AGPv3 table. */
if (apsize & 0xff)
apsize |= 0xf00;
order = 7 - hweight16(apsize);
aper = pci_bus_address(agp, AGP_APERTURE_BAR);
/*
* On some sick chips APSIZE is 0. This means it wants 4G
* so let double check that order, and lets trust the AMD NB settings
*/
if (order >=0 && aper + (32ULL<<(20 + order)) > 0x100000000ULL) {
dev_info(&agp->dev, "aperture size %u MB is not right, using settings from NB\n",
32 << order);
order = nb_order;
}
if (nb_order >= order) {
if (agp_aperture_valid(nb_aper, (32*1024*1024)<<nb_order))
return 0;
}
dev_info(&agp->dev, "aperture from AGP @ %Lx size %u MB\n",
aper, 32 << order);
if (order < 0 || !agp_aperture_valid(aper, (32*1024*1024)<<order))
return -1;
gart_set_size_and_enable(nb, order);
pci_write_config_dword(nb, AMD64_GARTAPERTUREBASE, aper >> 25);
return 0;
}
static int cache_nbs(struct pci_dev *pdev, u32 cap_ptr)
{
int i;
if (amd_cache_northbridges() < 0)
return -ENODEV;
if (!amd_nb_has_feature(AMD_NB_GART))
return -ENODEV;
i = 0;
for (i = 0; i < amd_nb_num(); i++) {
struct pci_dev *dev = node_to_amd_nb(i)->misc;
if (fix_northbridge(dev, pdev, cap_ptr) < 0) {
dev_err(&dev->dev, "no usable aperture found\n");
#ifdef __x86_64__
/* should port this to i386 */
dev_err(&dev->dev, "consider rebooting with iommu=memaper=2 to get a good aperture\n");
#endif
return -1;
}
}
return 0;
}
/* Handle AMD 8151 quirks */
static void amd8151_init(struct pci_dev *pdev, struct agp_bridge_data *bridge)
{
char *revstring;
switch (pdev->revision) {
case 0x01: revstring="A0"; break;
case 0x02: revstring="A1"; break;
case 0x11: revstring="B0"; break;
case 0x12: revstring="B1"; break;
case 0x13: revstring="B2"; break;
case 0x14: revstring="B3"; break;
default: revstring="??"; break;
}
dev_info(&pdev->dev, "AMD 8151 AGP Bridge rev %s\n", revstring);
/*
* Work around errata.
* Chips before B2 stepping incorrectly reporting v3.5
*/
if (pdev->revision < 0x13) {
dev_info(&pdev->dev, "correcting AGP revision (reports 3.5, is really 3.0)\n");
bridge->major_version = 3;
bridge->minor_version = 0;
}
}
static const struct aper_size_info_32 uli_sizes[7] =
{
{256, 65536, 6, 10},
{128, 32768, 5, 9},
{64, 16384, 4, 8},
{32, 8192, 3, 7},
{16, 4096, 2, 6},
{8, 2048, 1, 4},
{4, 1024, 0, 3}
};
static int uli_agp_init(struct pci_dev *pdev)
{
u32 httfea,baseaddr,enuscr;
struct pci_dev *dev1;
int i, ret;
unsigned size = amd64_fetch_size();
dev_info(&pdev->dev, "setting up ULi AGP\n");
dev1 = pci_get_slot (pdev->bus,PCI_DEVFN(0,0));
if (dev1 == NULL) {
dev_info(&pdev->dev, "can't find ULi secondary device\n");
return -ENODEV;
}
for (i = 0; i < ARRAY_SIZE(uli_sizes); i++)
if (uli_sizes[i].size == size)
break;
if (i == ARRAY_SIZE(uli_sizes)) {
dev_info(&pdev->dev, "no ULi size found for %d\n", size);
ret = -ENODEV;
goto put;
}
/* shadow x86-64 registers into ULi registers */
pci_read_config_dword (node_to_amd_nb(0)->misc, AMD64_GARTAPERTUREBASE,
&httfea);
/* if x86-64 aperture base is beyond 4G, exit here */
if ((httfea & 0x7fff) >> (32 - 25)) {
ret = -ENODEV;
goto put;
}
httfea = (httfea& 0x7fff) << 25;
pci_read_config_dword(pdev, ULI_X86_64_BASE_ADDR, &baseaddr);
baseaddr&= ~PCI_BASE_ADDRESS_MEM_MASK;
baseaddr|= httfea;
pci_write_config_dword(pdev, ULI_X86_64_BASE_ADDR, baseaddr);
enuscr= httfea+ (size * 1024 * 1024) - 1;
pci_write_config_dword(dev1, ULI_X86_64_HTT_FEA_REG, httfea);
pci_write_config_dword(dev1, ULI_X86_64_ENU_SCR_REG, enuscr);
ret = 0;
put:
pci_dev_put(dev1);
return ret;
}
static const struct aper_size_info_32 nforce3_sizes[5] =
{
{512, 131072, 7, 0x00000000 },
{256, 65536, 6, 0x00000008 },
{128, 32768, 5, 0x0000000C },
{64, 16384, 4, 0x0000000E },
{32, 8192, 3, 0x0000000F }
};
/* Handle shadow device of the Nvidia NForce3 */
/* CHECK-ME original 2.4 version set up some IORRs. Check if that is needed. */
static int nforce3_agp_init(struct pci_dev *pdev)
{
u32 tmp, apbase, apbar, aplimit;
struct pci_dev *dev1;
int i, ret;
unsigned size = amd64_fetch_size();
dev_info(&pdev->dev, "setting up Nforce3 AGP\n");
dev1 = pci_get_slot(pdev->bus, PCI_DEVFN(11, 0));
if (dev1 == NULL) {
dev_info(&pdev->dev, "can't find Nforce3 secondary device\n");
return -ENODEV;
}
for (i = 0; i < ARRAY_SIZE(nforce3_sizes); i++)
if (nforce3_sizes[i].size == size)
break;
if (i == ARRAY_SIZE(nforce3_sizes)) {
dev_info(&pdev->dev, "no NForce3 size found for %d\n", size);
ret = -ENODEV;
goto put;
}
pci_read_config_dword(dev1, NVIDIA_X86_64_1_APSIZE, &tmp);
tmp &= ~(0xf);
tmp |= nforce3_sizes[i].size_value;
pci_write_config_dword(dev1, NVIDIA_X86_64_1_APSIZE, tmp);
/* shadow x86-64 registers into NVIDIA registers */
pci_read_config_dword (node_to_amd_nb(0)->misc, AMD64_GARTAPERTUREBASE,
&apbase);
/* if x86-64 aperture base is beyond 4G, exit here */
if ( (apbase & 0x7fff) >> (32 - 25) ) {
dev_info(&pdev->dev, "aperture base > 4G\n");
ret = -ENODEV;
goto put;
}
apbase = (apbase & 0x7fff) << 25;
pci_read_config_dword(pdev, NVIDIA_X86_64_0_APBASE, &apbar);
apbar &= ~PCI_BASE_ADDRESS_MEM_MASK;
apbar |= apbase;
pci_write_config_dword(pdev, NVIDIA_X86_64_0_APBASE, apbar);
aplimit = apbase + (size * 1024 * 1024) - 1;
pci_write_config_dword(dev1, NVIDIA_X86_64_1_APBASE1, apbase);
pci_write_config_dword(dev1, NVIDIA_X86_64_1_APLIMIT1, aplimit);
pci_write_config_dword(dev1, NVIDIA_X86_64_1_APBASE2, apbase);
pci_write_config_dword(dev1, NVIDIA_X86_64_1_APLIMIT2, aplimit);
ret = 0;
put:
pci_dev_put(dev1);
return ret;
}
static int agp_amd64_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
struct agp_bridge_data *bridge;
u8 cap_ptr;
int err;
/* The Highlander principle */
if (agp_bridges_found)
return -ENODEV;
cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP);
if (!cap_ptr)
return -ENODEV;
/* Could check for AGPv3 here */
bridge = agp_alloc_bridge();
if (!bridge)
return -ENOMEM;
if (pdev->vendor == PCI_VENDOR_ID_AMD &&
pdev->device == PCI_DEVICE_ID_AMD_8151_0) {
amd8151_init(pdev, bridge);
} else {
dev_info(&pdev->dev, "AGP bridge [%04x/%04x]\n",
pdev->vendor, pdev->device);
}
bridge->driver = &amd_8151_driver;
bridge->dev = pdev;
bridge->capndx = cap_ptr;
/* Fill in the mode register */
pci_read_config_dword(pdev, bridge->capndx+PCI_AGP_STATUS, &bridge->mode);
if (cache_nbs(pdev, cap_ptr) == -1) {
agp_put_bridge(bridge);
return -ENODEV;
}
if (pdev->vendor == PCI_VENDOR_ID_NVIDIA) {
int ret = nforce3_agp_init(pdev);
if (ret) {
agp_put_bridge(bridge);
return ret;
}
}
if (pdev->vendor == PCI_VENDOR_ID_AL) {
int ret = uli_agp_init(pdev);
if (ret) {
agp_put_bridge(bridge);
return ret;
}
}
pci_set_drvdata(pdev, bridge);
err = agp_add_bridge(bridge);
if (err < 0)
return err;
agp_bridges_found++;
return 0;
}
static void agp_amd64_remove(struct pci_dev *pdev)
{
struct agp_bridge_data *bridge = pci_get_drvdata(pdev);
release_mem_region(virt_to_phys(bridge->gatt_table_real),
amd64_aperture_sizes[bridge->aperture_size_idx].size);
agp_remove_bridge(bridge);
agp_put_bridge(bridge);
agp_bridges_found--;
}
#ifdef CONFIG_PM
static int agp_amd64_suspend(struct pci_dev *pdev, pm_message_t state)
{
pci_save_state(pdev);
pci_set_power_state(pdev, pci_choose_state(pdev, state));
return 0;
}
static int agp_amd64_resume(struct pci_dev *pdev)
{
pci_set_power_state(pdev, PCI_D0);
pci_restore_state(pdev);
if (pdev->vendor == PCI_VENDOR_ID_NVIDIA)
nforce3_agp_init(pdev);
return amd_8151_configure();
}
#endif /* CONFIG_PM */
static struct pci_device_id agp_amd64_pci_table[] = {
{
.class = (PCI_CLASS_BRIDGE_HOST << 8),
.class_mask = ~0,
.vendor = PCI_VENDOR_ID_AMD,
.device = PCI_DEVICE_ID_AMD_8151_0,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
},
/* ULi M1689 */
{
.class = (PCI_CLASS_BRIDGE_HOST << 8),
.class_mask = ~0,
.vendor = PCI_VENDOR_ID_AL,
.device = PCI_DEVICE_ID_AL_M1689,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
},
/* VIA K8T800Pro */
{
.class = (PCI_CLASS_BRIDGE_HOST << 8),
.class_mask = ~0,
.vendor = PCI_VENDOR_ID_VIA,
.device = PCI_DEVICE_ID_VIA_K8T800PRO_0,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
},
/* VIA K8T800 */
{
.class = (PCI_CLASS_BRIDGE_HOST << 8),
.class_mask = ~0,
.vendor = PCI_VENDOR_ID_VIA,
.device = PCI_DEVICE_ID_VIA_8385_0,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
},
/* VIA K8M800 / K8N800 */
{
.class = (PCI_CLASS_BRIDGE_HOST << 8),
.class_mask = ~0,
.vendor = PCI_VENDOR_ID_VIA,
.device = PCI_DEVICE_ID_VIA_8380_0,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
},
/* VIA K8M890 / K8N890 */
{
.class = (PCI_CLASS_BRIDGE_HOST << 8),
.class_mask = ~0,
.vendor = PCI_VENDOR_ID_VIA,
.device = PCI_DEVICE_ID_VIA_VT3336,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
},
/* VIA K8T890 */
{
.class = (PCI_CLASS_BRIDGE_HOST << 8),
.class_mask = ~0,
.vendor = PCI_VENDOR_ID_VIA,
.device = PCI_DEVICE_ID_VIA_3238_0,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
},
/* VIA K8T800/K8M800/K8N800 */
{
.class = (PCI_CLASS_BRIDGE_HOST << 8),
.class_mask = ~0,
.vendor = PCI_VENDOR_ID_VIA,
.device = PCI_DEVICE_ID_VIA_838X_1,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
},
/* NForce3 */
{
.class = (PCI_CLASS_BRIDGE_HOST << 8),
.class_mask = ~0,
.vendor = PCI_VENDOR_ID_NVIDIA,
.device = PCI_DEVICE_ID_NVIDIA_NFORCE3,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
},
{
.class = (PCI_CLASS_BRIDGE_HOST << 8),
.class_mask = ~0,
.vendor = PCI_VENDOR_ID_NVIDIA,
.device = PCI_DEVICE_ID_NVIDIA_NFORCE3S,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
},
/* SIS 755 */
{
.class = (PCI_CLASS_BRIDGE_HOST << 8),
.class_mask = ~0,
.vendor = PCI_VENDOR_ID_SI,
.device = PCI_DEVICE_ID_SI_755,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
},
/* SIS 760 */
{
.class = (PCI_CLASS_BRIDGE_HOST << 8),
.class_mask = ~0,
.vendor = PCI_VENDOR_ID_SI,
.device = PCI_DEVICE_ID_SI_760,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
},
/* ALI/ULI M1695 */
{
.class = (PCI_CLASS_BRIDGE_HOST << 8),
.class_mask = ~0,
.vendor = PCI_VENDOR_ID_AL,
.device = 0x1695,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
},
{ }
};
MODULE_DEVICE_TABLE(pci, agp_amd64_pci_table);
static const struct pci_device_id agp_amd64_pci_promisc_table[] = {
{ PCI_DEVICE_CLASS(0, 0) },
{ }
};
static struct pci_driver agp_amd64_pci_driver = {
.name = "agpgart-amd64",
.id_table = agp_amd64_pci_table,
.probe = agp_amd64_probe,
.remove = agp_amd64_remove,
#ifdef CONFIG_PM
.suspend = agp_amd64_suspend,
.resume = agp_amd64_resume,
#endif
};
/* Not static due to IOMMU code calling it early. */
int __init agp_amd64_init(void)
{
int err = 0;
if (agp_off)
return -EINVAL;
err = pci_register_driver(&agp_amd64_pci_driver);
if (err < 0)
return err;
if (agp_bridges_found == 0) {
if (!agp_try_unsupported && !agp_try_unsupported_boot) {
printk(KERN_INFO PFX "No supported AGP bridge found.\n");
#ifdef MODULE
printk(KERN_INFO PFX "You can try agp_try_unsupported=1\n");
#else
printk(KERN_INFO PFX "You can boot with agp=try_unsupported\n");
#endif
pci_unregister_driver(&agp_amd64_pci_driver);
return -ENODEV;
}
/* First check that we have at least one AMD64 NB */
if (!pci_dev_present(amd_nb_misc_ids)) {
pci_unregister_driver(&agp_amd64_pci_driver);
return -ENODEV;
}
/* Look for any AGP bridge */
agp_amd64_pci_driver.id_table = agp_amd64_pci_promisc_table;
err = driver_attach(&agp_amd64_pci_driver.driver);
if (err == 0 && agp_bridges_found == 0) {
pci_unregister_driver(&agp_amd64_pci_driver);
err = -ENODEV;
}
}
return err;
}
static int __init agp_amd64_mod_init(void)
{
#ifndef MODULE
if (gart_iommu_aperture)
return agp_bridges_found ? 0 : -ENODEV;
#endif
return agp_amd64_init();
}
static void __exit agp_amd64_cleanup(void)
{
#ifndef MODULE
if (gart_iommu_aperture)
return;
#endif
if (aperture_resource)
release_resource(aperture_resource);
pci_unregister_driver(&agp_amd64_pci_driver);
}
module_init(agp_amd64_mod_init);
module_exit(agp_amd64_cleanup);
MODULE_AUTHOR("Dave Jones <davej@redhat.com>, Andi Kleen");
module_param(agp_try_unsupported, bool, 0);
MODULE_LICENSE("GPL");

584
drivers/char/agp/ati-agp.c Normal file
View file

@ -0,0 +1,584 @@
/*
* ATi AGPGART routines.
*/
#include <linux/types.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/string.h>
#include <linux/slab.h>
#include <linux/agp_backend.h>
#include <asm/agp.h>
#include "agp.h"
#define ATI_GART_MMBASE_BAR 1
#define ATI_RS100_APSIZE 0xac
#define ATI_RS100_IG_AGPMODE 0xb0
#define ATI_RS300_APSIZE 0xf8
#define ATI_RS300_IG_AGPMODE 0xfc
#define ATI_GART_FEATURE_ID 0x00
#define ATI_GART_BASE 0x04
#define ATI_GART_CACHE_SZBASE 0x08
#define ATI_GART_CACHE_CNTRL 0x0c
#define ATI_GART_CACHE_ENTRY_CNTRL 0x10
static const struct aper_size_info_lvl2 ati_generic_sizes[7] =
{
{2048, 524288, 0x0000000c},
{1024, 262144, 0x0000000a},
{512, 131072, 0x00000008},
{256, 65536, 0x00000006},
{128, 32768, 0x00000004},
{64, 16384, 0x00000002},
{32, 8192, 0x00000000}
};
static struct gatt_mask ati_generic_masks[] =
{
{ .mask = 1, .type = 0}
};
struct ati_page_map {
unsigned long *real;
unsigned long __iomem *remapped;
};
static struct _ati_generic_private {
volatile u8 __iomem *registers;
struct ati_page_map **gatt_pages;
int num_tables;
} ati_generic_private;
static int ati_create_page_map(struct ati_page_map *page_map)
{
int i, err = 0;
page_map->real = (unsigned long *) __get_free_page(GFP_KERNEL);
if (page_map->real == NULL)
return -ENOMEM;
set_memory_uc((unsigned long)page_map->real, 1);
err = map_page_into_agp(virt_to_page(page_map->real));
page_map->remapped = page_map->real;
for (i = 0; i < PAGE_SIZE / sizeof(unsigned long); i++) {
writel(agp_bridge->scratch_page, page_map->remapped+i);
readl(page_map->remapped+i); /* PCI Posting. */
}
return 0;
}
static void ati_free_page_map(struct ati_page_map *page_map)
{
unmap_page_from_agp(virt_to_page(page_map->real));
set_memory_wb((unsigned long)page_map->real, 1);
free_page((unsigned long) page_map->real);
}
static void ati_free_gatt_pages(void)
{
int i;
struct ati_page_map **tables;
struct ati_page_map *entry;
tables = ati_generic_private.gatt_pages;
for (i = 0; i < ati_generic_private.num_tables; i++) {
entry = tables[i];
if (entry != NULL) {
if (entry->real != NULL)
ati_free_page_map(entry);
kfree(entry);
}
}
kfree(tables);
}
static int ati_create_gatt_pages(int nr_tables)
{
struct ati_page_map **tables;
struct ati_page_map *entry;
int retval = 0;
int i;
tables = kzalloc((nr_tables + 1) * sizeof(struct ati_page_map *),GFP_KERNEL);
if (tables == NULL)
return -ENOMEM;
for (i = 0; i < nr_tables; i++) {
entry = kzalloc(sizeof(struct ati_page_map), GFP_KERNEL);
tables[i] = entry;
if (entry == NULL) {
retval = -ENOMEM;
break;
}
retval = ati_create_page_map(entry);
if (retval != 0)
break;
}
ati_generic_private.num_tables = i;
ati_generic_private.gatt_pages = tables;
if (retval != 0)
ati_free_gatt_pages();
return retval;
}
static int is_r200(void)
{
if ((agp_bridge->dev->device == PCI_DEVICE_ID_ATI_RS100) ||
(agp_bridge->dev->device == PCI_DEVICE_ID_ATI_RS200) ||
(agp_bridge->dev->device == PCI_DEVICE_ID_ATI_RS200_B) ||
(agp_bridge->dev->device == PCI_DEVICE_ID_ATI_RS250))
return 1;
return 0;
}
static int ati_fetch_size(void)
{
int i;
u32 temp;
struct aper_size_info_lvl2 *values;
if (is_r200())
pci_read_config_dword(agp_bridge->dev, ATI_RS100_APSIZE, &temp);
else
pci_read_config_dword(agp_bridge->dev, ATI_RS300_APSIZE, &temp);
temp = (temp & 0x0000000e);
values = A_SIZE_LVL2(agp_bridge->driver->aperture_sizes);
for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) {
if (temp == values[i].size_value) {
agp_bridge->previous_size =
agp_bridge->current_size = (void *) (values + i);
agp_bridge->aperture_size_idx = i;
return values[i].size;
}
}
return 0;
}
static void ati_tlbflush(struct agp_memory * mem)
{
writel(1, ati_generic_private.registers+ATI_GART_CACHE_CNTRL);
readl(ati_generic_private.registers+ATI_GART_CACHE_CNTRL); /* PCI Posting. */
}
static void ati_cleanup(void)
{
struct aper_size_info_lvl2 *previous_size;
u32 temp;
previous_size = A_SIZE_LVL2(agp_bridge->previous_size);
/* Write back the previous size and disable gart translation */
if (is_r200()) {
pci_read_config_dword(agp_bridge->dev, ATI_RS100_APSIZE, &temp);
temp = ((temp & ~(0x0000000f)) | previous_size->size_value);
pci_write_config_dword(agp_bridge->dev, ATI_RS100_APSIZE, temp);
} else {
pci_read_config_dword(agp_bridge->dev, ATI_RS300_APSIZE, &temp);
temp = ((temp & ~(0x0000000f)) | previous_size->size_value);
pci_write_config_dword(agp_bridge->dev, ATI_RS300_APSIZE, temp);
}
iounmap((volatile u8 __iomem *)ati_generic_private.registers);
}
static int ati_configure(void)
{
phys_addr_t reg;
u32 temp;
/* Get the memory mapped registers */
reg = pci_resource_start(agp_bridge->dev, ATI_GART_MMBASE_BAR);
ati_generic_private.registers = (volatile u8 __iomem *) ioremap(reg, 4096);
if (!ati_generic_private.registers)
return -ENOMEM;
if (is_r200())
pci_write_config_dword(agp_bridge->dev, ATI_RS100_IG_AGPMODE, 0x20000);
else
pci_write_config_dword(agp_bridge->dev, ATI_RS300_IG_AGPMODE, 0x20000);
/* address to map to */
/*
agp_bridge.gart_bus_addr = pci_bus_address(agp_bridge.dev,
AGP_APERTURE_BAR);
printk(KERN_INFO PFX "IGP320 gart_bus_addr: %x\n", agp_bridge.gart_bus_addr);
*/
writel(0x60000, ati_generic_private.registers+ATI_GART_FEATURE_ID);
readl(ati_generic_private.registers+ATI_GART_FEATURE_ID); /* PCI Posting.*/
/* SIGNALED_SYSTEM_ERROR @ NB_STATUS */
pci_read_config_dword(agp_bridge->dev, PCI_COMMAND, &temp);
pci_write_config_dword(agp_bridge->dev, PCI_COMMAND, temp | (1<<14));
/* Write out the address of the gatt table */
writel(agp_bridge->gatt_bus_addr, ati_generic_private.registers+ATI_GART_BASE);
readl(ati_generic_private.registers+ATI_GART_BASE); /* PCI Posting. */
return 0;
}
#ifdef CONFIG_PM
static int agp_ati_suspend(struct pci_dev *dev, pm_message_t state)
{
pci_save_state(dev);
pci_set_power_state(dev, PCI_D3hot);
return 0;
}
static int agp_ati_resume(struct pci_dev *dev)
{
pci_set_power_state(dev, PCI_D0);
pci_restore_state(dev);
return ati_configure();
}
#endif
/*
*Since we don't need contiguous memory we just try
* to get the gatt table once
*/
#define GET_PAGE_DIR_OFF(addr) (addr >> 22)
#define GET_PAGE_DIR_IDX(addr) (GET_PAGE_DIR_OFF(addr) - \
GET_PAGE_DIR_OFF(agp_bridge->gart_bus_addr))
#define GET_GATT_OFF(addr) ((addr & 0x003ff000) >> 12)
#undef GET_GATT
#define GET_GATT(addr) (ati_generic_private.gatt_pages[\
GET_PAGE_DIR_IDX(addr)]->remapped)
static int ati_insert_memory(struct agp_memory * mem,
off_t pg_start, int type)
{
int i, j, num_entries;
unsigned long __iomem *cur_gatt;
unsigned long addr;
int mask_type;
num_entries = A_SIZE_LVL2(agp_bridge->current_size)->num_entries;
mask_type = agp_generic_type_to_mask_type(mem->bridge, type);
if (mask_type != 0 || type != mem->type)
return -EINVAL;
if (mem->page_count == 0)
return 0;
if ((pg_start + mem->page_count) > num_entries)
return -EINVAL;
j = pg_start;
while (j < (pg_start + mem->page_count)) {
addr = (j * PAGE_SIZE) + agp_bridge->gart_bus_addr;
cur_gatt = GET_GATT(addr);
if (!PGE_EMPTY(agp_bridge,readl(cur_gatt+GET_GATT_OFF(addr))))
return -EBUSY;
j++;
}
if (!mem->is_flushed) {
/*CACHE_FLUSH(); */
global_cache_flush();
mem->is_flushed = true;
}
for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
addr = (j * PAGE_SIZE) + agp_bridge->gart_bus_addr;
cur_gatt = GET_GATT(addr);
writel(agp_bridge->driver->mask_memory(agp_bridge,
page_to_phys(mem->pages[i]),
mem->type),
cur_gatt+GET_GATT_OFF(addr));
}
readl(GET_GATT(agp_bridge->gart_bus_addr)); /* PCI posting */
agp_bridge->driver->tlb_flush(mem);
return 0;
}
static int ati_remove_memory(struct agp_memory * mem, off_t pg_start,
int type)
{
int i;
unsigned long __iomem *cur_gatt;
unsigned long addr;
int mask_type;
mask_type = agp_generic_type_to_mask_type(mem->bridge, type);
if (mask_type != 0 || type != mem->type)
return -EINVAL;
if (mem->page_count == 0)
return 0;
for (i = pg_start; i < (mem->page_count + pg_start); i++) {
addr = (i * PAGE_SIZE) + agp_bridge->gart_bus_addr;
cur_gatt = GET_GATT(addr);
writel(agp_bridge->scratch_page, cur_gatt+GET_GATT_OFF(addr));
}
readl(GET_GATT(agp_bridge->gart_bus_addr)); /* PCI posting */
agp_bridge->driver->tlb_flush(mem);
return 0;
}
static int ati_create_gatt_table(struct agp_bridge_data *bridge)
{
struct aper_size_info_lvl2 *value;
struct ati_page_map page_dir;
unsigned long __iomem *cur_gatt;
unsigned long addr;
int retval;
u32 temp;
int i;
struct aper_size_info_lvl2 *current_size;
value = A_SIZE_LVL2(agp_bridge->current_size);
retval = ati_create_page_map(&page_dir);
if (retval != 0)
return retval;
retval = ati_create_gatt_pages(value->num_entries / 1024);
if (retval != 0) {
ati_free_page_map(&page_dir);
return retval;
}
agp_bridge->gatt_table_real = (u32 *)page_dir.real;
agp_bridge->gatt_table = (u32 __iomem *) page_dir.remapped;
agp_bridge->gatt_bus_addr = virt_to_phys(page_dir.real);
/* Write out the size register */
current_size = A_SIZE_LVL2(agp_bridge->current_size);
if (is_r200()) {
pci_read_config_dword(agp_bridge->dev, ATI_RS100_APSIZE, &temp);
temp = (((temp & ~(0x0000000e)) | current_size->size_value)
| 0x00000001);
pci_write_config_dword(agp_bridge->dev, ATI_RS100_APSIZE, temp);
pci_read_config_dword(agp_bridge->dev, ATI_RS100_APSIZE, &temp);
} else {
pci_read_config_dword(agp_bridge->dev, ATI_RS300_APSIZE, &temp);
temp = (((temp & ~(0x0000000e)) | current_size->size_value)
| 0x00000001);
pci_write_config_dword(agp_bridge->dev, ATI_RS300_APSIZE, temp);
pci_read_config_dword(agp_bridge->dev, ATI_RS300_APSIZE, &temp);
}
/*
* Get the address for the gart region.
* This is a bus address even on the alpha, b/c its
* used to program the agp master not the cpu
*/
addr = pci_bus_address(agp_bridge->dev, AGP_APERTURE_BAR);
agp_bridge->gart_bus_addr = addr;
/* Calculate the agp offset */
for (i = 0; i < value->num_entries / 1024; i++, addr += 0x00400000) {
writel(virt_to_phys(ati_generic_private.gatt_pages[i]->real) | 1,
page_dir.remapped+GET_PAGE_DIR_OFF(addr));
readl(page_dir.remapped+GET_PAGE_DIR_OFF(addr)); /* PCI Posting. */
}
for (i = 0; i < value->num_entries; i++) {
addr = (i * PAGE_SIZE) + agp_bridge->gart_bus_addr;
cur_gatt = GET_GATT(addr);
writel(agp_bridge->scratch_page, cur_gatt+GET_GATT_OFF(addr));
}
return 0;
}
static int ati_free_gatt_table(struct agp_bridge_data *bridge)
{
struct ati_page_map page_dir;
page_dir.real = (unsigned long *)agp_bridge->gatt_table_real;
page_dir.remapped = (unsigned long __iomem *)agp_bridge->gatt_table;
ati_free_gatt_pages();
ati_free_page_map(&page_dir);
return 0;
}
static const struct agp_bridge_driver ati_generic_bridge = {
.owner = THIS_MODULE,
.aperture_sizes = ati_generic_sizes,
.size_type = LVL2_APER_SIZE,
.num_aperture_sizes = 7,
.needs_scratch_page = true,
.configure = ati_configure,
.fetch_size = ati_fetch_size,
.cleanup = ati_cleanup,
.tlb_flush = ati_tlbflush,
.mask_memory = agp_generic_mask_memory,
.masks = ati_generic_masks,
.agp_enable = agp_generic_enable,
.cache_flush = global_cache_flush,
.create_gatt_table = ati_create_gatt_table,
.free_gatt_table = ati_free_gatt_table,
.insert_memory = ati_insert_memory,
.remove_memory = ati_remove_memory,
.alloc_by_type = agp_generic_alloc_by_type,
.free_by_type = agp_generic_free_by_type,
.agp_alloc_page = agp_generic_alloc_page,
.agp_alloc_pages = agp_generic_alloc_pages,
.agp_destroy_page = agp_generic_destroy_page,
.agp_destroy_pages = agp_generic_destroy_pages,
.agp_type_to_mask_type = agp_generic_type_to_mask_type,
};
static struct agp_device_ids ati_agp_device_ids[] =
{
{
.device_id = PCI_DEVICE_ID_ATI_RS100,
.chipset_name = "IGP320/M",
},
{
.device_id = PCI_DEVICE_ID_ATI_RS200,
.chipset_name = "IGP330/340/345/350/M",
},
{
.device_id = PCI_DEVICE_ID_ATI_RS200_B,
.chipset_name = "IGP345M",
},
{
.device_id = PCI_DEVICE_ID_ATI_RS250,
.chipset_name = "IGP7000/M",
},
{
.device_id = PCI_DEVICE_ID_ATI_RS300_100,
.chipset_name = "IGP9100/M",
},
{
.device_id = PCI_DEVICE_ID_ATI_RS300_133,
.chipset_name = "IGP9100/M",
},
{
.device_id = PCI_DEVICE_ID_ATI_RS300_166,
.chipset_name = "IGP9100/M",
},
{
.device_id = PCI_DEVICE_ID_ATI_RS300_200,
.chipset_name = "IGP9100/M",
},
{
.device_id = PCI_DEVICE_ID_ATI_RS350_133,
.chipset_name = "IGP9000/M",
},
{
.device_id = PCI_DEVICE_ID_ATI_RS350_200,
.chipset_name = "IGP9100/M",
},
{ }, /* dummy final entry, always present */
};
static int agp_ati_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct agp_device_ids *devs = ati_agp_device_ids;
struct agp_bridge_data *bridge;
u8 cap_ptr;
int j;
cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP);
if (!cap_ptr)
return -ENODEV;
/* probe for known chipsets */
for (j = 0; devs[j].chipset_name; j++) {
if (pdev->device == devs[j].device_id)
goto found;
}
dev_err(&pdev->dev, "unsupported Ati chipset [%04x/%04x])\n",
pdev->vendor, pdev->device);
return -ENODEV;
found:
bridge = agp_alloc_bridge();
if (!bridge)
return -ENOMEM;
bridge->dev = pdev;
bridge->capndx = cap_ptr;
bridge->driver = &ati_generic_bridge;
dev_info(&pdev->dev, "Ati %s chipset\n", devs[j].chipset_name);
/* Fill in the mode register */
pci_read_config_dword(pdev,
bridge->capndx+PCI_AGP_STATUS,
&bridge->mode);
pci_set_drvdata(pdev, bridge);
return agp_add_bridge(bridge);
}
static void agp_ati_remove(struct pci_dev *pdev)
{
struct agp_bridge_data *bridge = pci_get_drvdata(pdev);
agp_remove_bridge(bridge);
agp_put_bridge(bridge);
}
static struct pci_device_id agp_ati_pci_table[] = {
{
.class = (PCI_CLASS_BRIDGE_HOST << 8),
.class_mask = ~0,
.vendor = PCI_VENDOR_ID_ATI,
.device = PCI_ANY_ID,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
},
{ }
};
MODULE_DEVICE_TABLE(pci, agp_ati_pci_table);
static struct pci_driver agp_ati_pci_driver = {
.name = "agpgart-ati",
.id_table = agp_ati_pci_table,
.probe = agp_ati_probe,
.remove = agp_ati_remove,
#ifdef CONFIG_PM
.suspend = agp_ati_suspend,
.resume = agp_ati_resume,
#endif
};
static int __init agp_ati_init(void)
{
if (agp_off)
return -EINVAL;
return pci_register_driver(&agp_ati_pci_driver);
}
static void __exit agp_ati_cleanup(void)
{
pci_unregister_driver(&agp_ati_pci_driver);
}
module_init(agp_ati_init);
module_exit(agp_ati_cleanup);
MODULE_AUTHOR("Dave Jones <davej@redhat.com>");
MODULE_LICENSE("GPL and additional rights");

366
drivers/char/agp/backend.c Normal file
View file

@ -0,0 +1,366 @@
/*
* AGPGART driver backend routines.
* Copyright (C) 2004 Silicon Graphics, Inc.
* Copyright (C) 2002-2003 Dave Jones.
* Copyright (C) 1999 Jeff Hartmann.
* Copyright (C) 1999 Precision Insight, Inc.
* Copyright (C) 1999 Xi Graphics, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included
* in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* JEFF HARTMANN, DAVE JONES, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
* OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* TODO:
* - Allocate more than order 0 pages to avoid too much linear map splitting.
*/
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/pagemap.h>
#include <linux/miscdevice.h>
#include <linux/pm.h>
#include <linux/agp_backend.h>
#include <linux/agpgart.h>
#include <linux/vmalloc.h>
#include <asm/io.h>
#include "agp.h"
/* Due to XFree86 brain-damage, we can't go to 1.0 until they
* fix some real stupidity. It's only by chance we can bump
* past 0.99 at all due to some boolean logic error. */
#define AGPGART_VERSION_MAJOR 0
#define AGPGART_VERSION_MINOR 103
static const struct agp_version agp_current_version =
{
.major = AGPGART_VERSION_MAJOR,
.minor = AGPGART_VERSION_MINOR,
};
struct agp_bridge_data *(*agp_find_bridge)(struct pci_dev *) =
&agp_generic_find_bridge;
struct agp_bridge_data *agp_bridge;
LIST_HEAD(agp_bridges);
EXPORT_SYMBOL(agp_bridge);
EXPORT_SYMBOL(agp_bridges);
EXPORT_SYMBOL(agp_find_bridge);
/**
* agp_backend_acquire - attempt to acquire an agp backend.
*
*/
struct agp_bridge_data *agp_backend_acquire(struct pci_dev *pdev)
{
struct agp_bridge_data *bridge;
bridge = agp_find_bridge(pdev);
if (!bridge)
return NULL;
if (atomic_read(&bridge->agp_in_use))
return NULL;
atomic_inc(&bridge->agp_in_use);
return bridge;
}
EXPORT_SYMBOL(agp_backend_acquire);
/**
* agp_backend_release - release the lock on the agp backend.
*
* The caller must insure that the graphics aperture translation table
* is read for use by another entity.
*
* (Ensure that all memory it bound is unbound.)
*/
void agp_backend_release(struct agp_bridge_data *bridge)
{
if (bridge)
atomic_dec(&bridge->agp_in_use);
}
EXPORT_SYMBOL(agp_backend_release);
static const struct { int mem, agp; } maxes_table[] = {
{0, 0},
{32, 4},
{64, 28},
{128, 96},
{256, 204},
{512, 440},
{1024, 942},
{2048, 1920},
{4096, 3932}
};
static int agp_find_max(void)
{
long memory, index, result;
#if PAGE_SHIFT < 20
memory = totalram_pages >> (20 - PAGE_SHIFT);
#else
memory = totalram_pages << (PAGE_SHIFT - 20);
#endif
index = 1;
while ((memory > maxes_table[index].mem) && (index < 8))
index++;
result = maxes_table[index - 1].agp +
( (memory - maxes_table[index - 1].mem) *
(maxes_table[index].agp - maxes_table[index - 1].agp)) /
(maxes_table[index].mem - maxes_table[index - 1].mem);
result = result << (20 - PAGE_SHIFT);
return result;
}
static int agp_backend_initialize(struct agp_bridge_data *bridge)
{
int size_value, rc, got_gatt=0, got_keylist=0;
bridge->max_memory_agp = agp_find_max();
bridge->version = &agp_current_version;
if (bridge->driver->needs_scratch_page) {
struct page *page = bridge->driver->agp_alloc_page(bridge);
if (!page) {
dev_err(&bridge->dev->dev,
"can't get memory for scratch page\n");
return -ENOMEM;
}
bridge->scratch_page_page = page;
bridge->scratch_page_dma = page_to_phys(page);
bridge->scratch_page = bridge->driver->mask_memory(bridge,
bridge->scratch_page_dma, 0);
}
size_value = bridge->driver->fetch_size();
if (size_value == 0) {
dev_err(&bridge->dev->dev, "can't determine aperture size\n");
rc = -EINVAL;
goto err_out;
}
if (bridge->driver->create_gatt_table(bridge)) {
dev_err(&bridge->dev->dev,
"can't get memory for graphics translation table\n");
rc = -ENOMEM;
goto err_out;
}
got_gatt = 1;
bridge->key_list = vzalloc(PAGE_SIZE * 4);
if (bridge->key_list == NULL) {
dev_err(&bridge->dev->dev,
"can't allocate memory for key lists\n");
rc = -ENOMEM;
goto err_out;
}
got_keylist = 1;
/* FIXME vmalloc'd memory not guaranteed contiguous */
if (bridge->driver->configure()) {
dev_err(&bridge->dev->dev, "error configuring host chipset\n");
rc = -EINVAL;
goto err_out;
}
INIT_LIST_HEAD(&bridge->mapped_list);
spin_lock_init(&bridge->mapped_lock);
return 0;
err_out:
if (bridge->driver->needs_scratch_page) {
struct page *page = bridge->scratch_page_page;
bridge->driver->agp_destroy_page(page, AGP_PAGE_DESTROY_UNMAP);
bridge->driver->agp_destroy_page(page, AGP_PAGE_DESTROY_FREE);
}
if (got_gatt)
bridge->driver->free_gatt_table(bridge);
if (got_keylist) {
vfree(bridge->key_list);
bridge->key_list = NULL;
}
return rc;
}
/* cannot be __exit b/c as it could be called from __init code */
static void agp_backend_cleanup(struct agp_bridge_data *bridge)
{
if (bridge->driver->cleanup)
bridge->driver->cleanup();
if (bridge->driver->free_gatt_table)
bridge->driver->free_gatt_table(bridge);
vfree(bridge->key_list);
bridge->key_list = NULL;
if (bridge->driver->agp_destroy_page &&
bridge->driver->needs_scratch_page) {
struct page *page = bridge->scratch_page_page;
bridge->driver->agp_destroy_page(page, AGP_PAGE_DESTROY_UNMAP);
bridge->driver->agp_destroy_page(page, AGP_PAGE_DESTROY_FREE);
}
}
/* When we remove the global variable agp_bridge from all drivers
* then agp_alloc_bridge and agp_generic_find_bridge need to be updated
*/
struct agp_bridge_data *agp_alloc_bridge(void)
{
struct agp_bridge_data *bridge;
bridge = kzalloc(sizeof(*bridge), GFP_KERNEL);
if (!bridge)
return NULL;
atomic_set(&bridge->agp_in_use, 0);
atomic_set(&bridge->current_memory_agp, 0);
if (list_empty(&agp_bridges))
agp_bridge = bridge;
return bridge;
}
EXPORT_SYMBOL(agp_alloc_bridge);
void agp_put_bridge(struct agp_bridge_data *bridge)
{
kfree(bridge);
if (list_empty(&agp_bridges))
agp_bridge = NULL;
}
EXPORT_SYMBOL(agp_put_bridge);
int agp_add_bridge(struct agp_bridge_data *bridge)
{
int error;
if (agp_off) {
error = -ENODEV;
goto err_put_bridge;
}
if (!bridge->dev) {
printk (KERN_DEBUG PFX "Erk, registering with no pci_dev!\n");
error = -EINVAL;
goto err_put_bridge;
}
/* Grab reference on the chipset driver. */
if (!try_module_get(bridge->driver->owner)) {
dev_info(&bridge->dev->dev, "can't lock chipset driver\n");
error = -EINVAL;
goto err_put_bridge;
}
error = agp_backend_initialize(bridge);
if (error) {
dev_info(&bridge->dev->dev,
"agp_backend_initialize() failed\n");
goto err_out;
}
if (list_empty(&agp_bridges)) {
error = agp_frontend_initialize();
if (error) {
dev_info(&bridge->dev->dev,
"agp_frontend_initialize() failed\n");
goto frontend_err;
}
dev_info(&bridge->dev->dev, "AGP aperture is %dM @ 0x%lx\n",
bridge->driver->fetch_size(), bridge->gart_bus_addr);
}
list_add(&bridge->list, &agp_bridges);
return 0;
frontend_err:
agp_backend_cleanup(bridge);
err_out:
module_put(bridge->driver->owner);
err_put_bridge:
agp_put_bridge(bridge);
return error;
}
EXPORT_SYMBOL_GPL(agp_add_bridge);
void agp_remove_bridge(struct agp_bridge_data *bridge)
{
agp_backend_cleanup(bridge);
list_del(&bridge->list);
if (list_empty(&agp_bridges))
agp_frontend_cleanup();
module_put(bridge->driver->owner);
}
EXPORT_SYMBOL_GPL(agp_remove_bridge);
int agp_off;
int agp_try_unsupported_boot;
EXPORT_SYMBOL(agp_off);
EXPORT_SYMBOL(agp_try_unsupported_boot);
static int __init agp_init(void)
{
if (!agp_off)
printk(KERN_INFO "Linux agpgart interface v%d.%d\n",
AGPGART_VERSION_MAJOR, AGPGART_VERSION_MINOR);
return 0;
}
static void __exit agp_exit(void)
{
}
#ifndef MODULE
static __init int agp_setup(char *s)
{
if (!strcmp(s,"off"))
agp_off = 1;
if (!strcmp(s,"try_unsupported"))
agp_try_unsupported_boot = 1;
return 1;
}
__setup("agp=", agp_setup);
#endif
MODULE_AUTHOR("Dave Jones <davej@redhat.com>");
MODULE_DESCRIPTION("AGP GART driver");
MODULE_LICENSE("GPL and additional rights");
MODULE_ALIAS_MISCDEV(AGPGART_MINOR);
module_init(agp_init);
module_exit(agp_exit);

View file

@ -0,0 +1,287 @@
/*
* AGPGART driver frontend compatibility ioctls
* Copyright (C) 2004 Silicon Graphics, Inc.
* Copyright (C) 2002-2003 Dave Jones
* Copyright (C) 1999 Jeff Hartmann
* Copyright (C) 1999 Precision Insight, Inc.
* Copyright (C) 1999 Xi Graphics, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included
* in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* JEFF HARTMANN, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
* OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/fs.h>
#include <linux/agpgart.h>
#include <linux/slab.h>
#include <asm/uaccess.h>
#include "agp.h"
#include "compat_ioctl.h"
static int compat_agpioc_info_wrap(struct agp_file_private *priv, void __user *arg)
{
struct agp_info32 userinfo;
struct agp_kern_info kerninfo;
agp_copy_info(agp_bridge, &kerninfo);
userinfo.version.major = kerninfo.version.major;
userinfo.version.minor = kerninfo.version.minor;
userinfo.bridge_id = kerninfo.device->vendor |
(kerninfo.device->device << 16);
userinfo.agp_mode = kerninfo.mode;
userinfo.aper_base = (compat_long_t)kerninfo.aper_base;
userinfo.aper_size = kerninfo.aper_size;
userinfo.pg_total = userinfo.pg_system = kerninfo.max_memory;
userinfo.pg_used = kerninfo.current_memory;
if (copy_to_user(arg, &userinfo, sizeof(userinfo)))
return -EFAULT;
return 0;
}
static int compat_agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg)
{
struct agp_region32 ureserve;
struct agp_region kreserve;
struct agp_client *client;
struct agp_file_private *client_priv;
DBG("");
if (copy_from_user(&ureserve, arg, sizeof(ureserve)))
return -EFAULT;
if ((unsigned) ureserve.seg_count >= ~0U/sizeof(struct agp_segment32))
return -EFAULT;
kreserve.pid = ureserve.pid;
kreserve.seg_count = ureserve.seg_count;
client = agp_find_client_by_pid(kreserve.pid);
if (kreserve.seg_count == 0) {
/* remove a client */
client_priv = agp_find_private(kreserve.pid);
if (client_priv != NULL) {
set_bit(AGP_FF_IS_CLIENT, &client_priv->access_flags);
set_bit(AGP_FF_IS_VALID, &client_priv->access_flags);
}
if (client == NULL) {
/* client is already removed */
return 0;
}
return agp_remove_client(kreserve.pid);
} else {
struct agp_segment32 *usegment;
struct agp_segment *ksegment;
int seg;
if (ureserve.seg_count >= 16384)
return -EINVAL;
usegment = kmalloc(sizeof(*usegment) * ureserve.seg_count, GFP_KERNEL);
if (!usegment)
return -ENOMEM;
ksegment = kmalloc(sizeof(*ksegment) * kreserve.seg_count, GFP_KERNEL);
if (!ksegment) {
kfree(usegment);
return -ENOMEM;
}
if (copy_from_user(usegment, (void __user *) ureserve.seg_list,
sizeof(*usegment) * ureserve.seg_count)) {
kfree(usegment);
kfree(ksegment);
return -EFAULT;
}
for (seg = 0; seg < ureserve.seg_count; seg++) {
ksegment[seg].pg_start = usegment[seg].pg_start;
ksegment[seg].pg_count = usegment[seg].pg_count;
ksegment[seg].prot = usegment[seg].prot;
}
kfree(usegment);
kreserve.seg_list = ksegment;
if (client == NULL) {
/* Create the client and add the segment */
client = agp_create_client(kreserve.pid);
if (client == NULL) {
kfree(ksegment);
return -ENOMEM;
}
client_priv = agp_find_private(kreserve.pid);
if (client_priv != NULL) {
set_bit(AGP_FF_IS_CLIENT, &client_priv->access_flags);
set_bit(AGP_FF_IS_VALID, &client_priv->access_flags);
}
}
return agp_create_segment(client, &kreserve);
}
/* Will never really happen */
return -EINVAL;
}
static int compat_agpioc_allocate_wrap(struct agp_file_private *priv, void __user *arg)
{
struct agp_memory *memory;
struct agp_allocate32 alloc;
DBG("");
if (copy_from_user(&alloc, arg, sizeof(alloc)))
return -EFAULT;
memory = agp_allocate_memory_wrap(alloc.pg_count, alloc.type);
if (memory == NULL)
return -ENOMEM;
alloc.key = memory->key;
alloc.physical = memory->physical;
if (copy_to_user(arg, &alloc, sizeof(alloc))) {
agp_free_memory_wrap(memory);
return -EFAULT;
}
return 0;
}
static int compat_agpioc_bind_wrap(struct agp_file_private *priv, void __user *arg)
{
struct agp_bind32 bind_info;
struct agp_memory *memory;
DBG("");
if (copy_from_user(&bind_info, arg, sizeof(bind_info)))
return -EFAULT;
memory = agp_find_mem_by_key(bind_info.key);
if (memory == NULL)
return -EINVAL;
return agp_bind_memory(memory, bind_info.pg_start);
}
static int compat_agpioc_unbind_wrap(struct agp_file_private *priv, void __user *arg)
{
struct agp_memory *memory;
struct agp_unbind32 unbind;
DBG("");
if (copy_from_user(&unbind, arg, sizeof(unbind)))
return -EFAULT;
memory = agp_find_mem_by_key(unbind.key);
if (memory == NULL)
return -EINVAL;
return agp_unbind_memory(memory);
}
long compat_agp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
struct agp_file_private *curr_priv = file->private_data;
int ret_val = -ENOTTY;
mutex_lock(&(agp_fe.agp_mutex));
if ((agp_fe.current_controller == NULL) &&
(cmd != AGPIOC_ACQUIRE32)) {
ret_val = -EINVAL;
goto ioctl_out;
}
if ((agp_fe.backend_acquired != true) &&
(cmd != AGPIOC_ACQUIRE32)) {
ret_val = -EBUSY;
goto ioctl_out;
}
if (cmd != AGPIOC_ACQUIRE32) {
if (!(test_bit(AGP_FF_IS_CONTROLLER, &curr_priv->access_flags))) {
ret_val = -EPERM;
goto ioctl_out;
}
/* Use the original pid of the controller,
* in case it's threaded */
if (agp_fe.current_controller->pid != curr_priv->my_pid) {
ret_val = -EBUSY;
goto ioctl_out;
}
}
switch (cmd) {
case AGPIOC_INFO32:
ret_val = compat_agpioc_info_wrap(curr_priv, (void __user *) arg);
break;
case AGPIOC_ACQUIRE32:
ret_val = agpioc_acquire_wrap(curr_priv);
break;
case AGPIOC_RELEASE32:
ret_val = agpioc_release_wrap(curr_priv);
break;
case AGPIOC_SETUP32:
ret_val = agpioc_setup_wrap(curr_priv, (void __user *) arg);
break;
case AGPIOC_RESERVE32:
ret_val = compat_agpioc_reserve_wrap(curr_priv, (void __user *) arg);
break;
case AGPIOC_PROTECT32:
ret_val = agpioc_protect_wrap(curr_priv);
break;
case AGPIOC_ALLOCATE32:
ret_val = compat_agpioc_allocate_wrap(curr_priv, (void __user *) arg);
break;
case AGPIOC_DEALLOCATE32:
ret_val = agpioc_deallocate_wrap(curr_priv, (int) arg);
break;
case AGPIOC_BIND32:
ret_val = compat_agpioc_bind_wrap(curr_priv, (void __user *) arg);
break;
case AGPIOC_UNBIND32:
ret_val = compat_agpioc_unbind_wrap(curr_priv, (void __user *) arg);
break;
case AGPIOC_CHIPSET_FLUSH32:
break;
}
ioctl_out:
DBG("ioctl returns %d\n", ret_val);
mutex_unlock(&(agp_fe.agp_mutex));
return ret_val;
}

View file

@ -0,0 +1,106 @@
/*
* Copyright (C) 1999 Jeff Hartmann
* Copyright (C) 1999 Precision Insight, Inc.
* Copyright (C) 1999 Xi Graphics, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included
* in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* JEFF HARTMANN, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
* OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
#ifndef _AGP_COMPAT_IOCTL_H
#define _AGP_COMPAT_IOCTL_H
#include <linux/compat.h>
#include <linux/agpgart.h>
#define AGPIOC_INFO32 _IOR (AGPIOC_BASE, 0, compat_uptr_t)
#define AGPIOC_ACQUIRE32 _IO (AGPIOC_BASE, 1)
#define AGPIOC_RELEASE32 _IO (AGPIOC_BASE, 2)
#define AGPIOC_SETUP32 _IOW (AGPIOC_BASE, 3, compat_uptr_t)
#define AGPIOC_RESERVE32 _IOW (AGPIOC_BASE, 4, compat_uptr_t)
#define AGPIOC_PROTECT32 _IOW (AGPIOC_BASE, 5, compat_uptr_t)
#define AGPIOC_ALLOCATE32 _IOWR(AGPIOC_BASE, 6, compat_uptr_t)
#define AGPIOC_DEALLOCATE32 _IOW (AGPIOC_BASE, 7, compat_int_t)
#define AGPIOC_BIND32 _IOW (AGPIOC_BASE, 8, compat_uptr_t)
#define AGPIOC_UNBIND32 _IOW (AGPIOC_BASE, 9, compat_uptr_t)
#define AGPIOC_CHIPSET_FLUSH32 _IO (AGPIOC_BASE, 10)
struct agp_info32 {
struct agp_version version; /* version of the driver */
u32 bridge_id; /* bridge vendor/device */
u32 agp_mode; /* mode info of bridge */
compat_long_t aper_base; /* base of aperture */
compat_size_t aper_size; /* size of aperture */
compat_size_t pg_total; /* max pages (swap + system) */
compat_size_t pg_system; /* max pages (system) */
compat_size_t pg_used; /* current pages used */
};
/*
* The "prot" down below needs still a "sleep" flag somehow ...
*/
struct agp_segment32 {
compat_off_t pg_start; /* starting page to populate */
compat_size_t pg_count; /* number of pages */
compat_int_t prot; /* prot flags for mmap */
};
struct agp_region32 {
compat_pid_t pid; /* pid of process */
compat_size_t seg_count; /* number of segments */
struct agp_segment32 *seg_list;
};
struct agp_allocate32 {
compat_int_t key; /* tag of allocation */
compat_size_t pg_count; /* number of pages */
u32 type; /* 0 == normal, other devspec */
u32 physical; /* device specific (some devices
* need a phys address of the
* actual page behind the gatt
* table) */
};
struct agp_bind32 {
compat_int_t key; /* tag of allocation */
compat_off_t pg_start; /* starting page to populate */
};
struct agp_unbind32 {
compat_int_t key; /* tag of allocation */
u32 priority; /* priority for paging out */
};
extern struct agp_front_data agp_fe;
int agpioc_acquire_wrap(struct agp_file_private *priv);
int agpioc_release_wrap(struct agp_file_private *priv);
int agpioc_protect_wrap(struct agp_file_private *priv);
int agpioc_setup_wrap(struct agp_file_private *priv, void __user *arg);
int agpioc_deallocate_wrap(struct agp_file_private *priv, int arg);
struct agp_file_private *agp_find_private(pid_t pid);
struct agp_client *agp_create_client(pid_t id);
int agp_remove_client(pid_t id);
int agp_create_segment(struct agp_client *client, struct agp_region *region);
void agp_free_memory_wrap(struct agp_memory *memory);
struct agp_memory *agp_allocate_memory_wrap(size_t pg_count, u32 type);
struct agp_memory *agp_find_mem_by_key(int key);
struct agp_client *agp_find_client_by_pid(pid_t id);
#endif /* _AGP_COMPAT_H */

View file

@ -0,0 +1,478 @@
/*
* Transmeta's Efficeon AGPGART driver.
*
* Based upon a diff by Linus around November '02.
*
* Ported to the 2.6 kernel by Carlos Puchol <cpglinux@puchol.com>
* and H. Peter Anvin <hpa@transmeta.com>.
*/
/*
* NOTE-cpg-040217:
*
* - when compiled as a module, after loading the module,
* it will refuse to unload, indicating it is in use,
* when it is not.
* - no s3 (suspend to ram) testing.
* - tested on the efficeon integrated nothbridge for tens
* of iterations of starting x and glxgears.
* - tested with radeon 9000 and radeon mobility m9 cards
* - tested with c3/c4 enabled (with the mobility m9 card)
*/
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/agp_backend.h>
#include <linux/gfp.h>
#include <linux/page-flags.h>
#include <linux/mm.h>
#include "agp.h"
#include "intel-agp.h"
/*
* The real differences to the generic AGP code is
* in the GART mappings - a two-level setup with the
* first level being an on-chip 64-entry table.
*
* The page array is filled through the ATTPAGE register
* (Aperture Translation Table Page Register) at 0xB8. Bits:
* 31:20: physical page address
* 11:9: Page Attribute Table Index (PATI)
* must match the PAT index for the
* mapped pages (the 2nd level page table pages
* themselves should be just regular WB-cacheable,
* so this is normally zero.)
* 8: Present
* 7:6: reserved, write as zero
* 5:0: GATT directory index: which 1st-level entry
*
* The Efficeon AGP spec requires pages to be WB-cacheable
* but to be explicitly CLFLUSH'd after any changes.
*/
#define EFFICEON_ATTPAGE 0xb8
#define EFFICEON_L1_SIZE 64 /* Number of PDE pages */
#define EFFICEON_PATI (0 << 9)
#define EFFICEON_PRESENT (1 << 8)
static struct _efficeon_private {
unsigned long l1_table[EFFICEON_L1_SIZE];
} efficeon_private;
static const struct gatt_mask efficeon_generic_masks[] =
{
{.mask = 0x00000001, .type = 0}
};
/* This function does the same thing as mask_memory() for this chipset... */
static inline unsigned long efficeon_mask_memory(struct page *page)
{
unsigned long addr = page_to_phys(page);
return addr | 0x00000001;
}
static const struct aper_size_info_lvl2 efficeon_generic_sizes[4] =
{
{256, 65536, 0},
{128, 32768, 32},
{64, 16384, 48},
{32, 8192, 56}
};
/*
* Control interfaces are largely identical to
* the legacy Intel 440BX..
*/
static int efficeon_fetch_size(void)
{
int i;
u16 temp;
struct aper_size_info_lvl2 *values;
pci_read_config_word(agp_bridge->dev, INTEL_APSIZE, &temp);
values = A_SIZE_LVL2(agp_bridge->driver->aperture_sizes);
for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) {
if (temp == values[i].size_value) {
agp_bridge->previous_size =
agp_bridge->current_size = (void *) (values + i);
agp_bridge->aperture_size_idx = i;
return values[i].size;
}
}
return 0;
}
static void efficeon_tlbflush(struct agp_memory * mem)
{
printk(KERN_DEBUG PFX "efficeon_tlbflush()\n");
pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, 0x2200);
pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, 0x2280);
}
static void efficeon_cleanup(void)
{
u16 temp;
struct aper_size_info_lvl2 *previous_size;
printk(KERN_DEBUG PFX "efficeon_cleanup()\n");
previous_size = A_SIZE_LVL2(agp_bridge->previous_size);
pci_read_config_word(agp_bridge->dev, INTEL_NBXCFG, &temp);
pci_write_config_word(agp_bridge->dev, INTEL_NBXCFG, temp & ~(1 << 9));
pci_write_config_word(agp_bridge->dev, INTEL_APSIZE,
previous_size->size_value);
}
static int efficeon_configure(void)
{
u16 temp2;
struct aper_size_info_lvl2 *current_size;
printk(KERN_DEBUG PFX "efficeon_configure()\n");
current_size = A_SIZE_LVL2(agp_bridge->current_size);
/* aperture size */
pci_write_config_word(agp_bridge->dev, INTEL_APSIZE,
current_size->size_value);
/* address to map to */
agp_bridge->gart_bus_addr = pci_bus_address(agp_bridge->dev,
AGP_APERTURE_BAR);
/* agpctrl */
pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, 0x2280);
/* paccfg/nbxcfg */
pci_read_config_word(agp_bridge->dev, INTEL_NBXCFG, &temp2);
pci_write_config_word(agp_bridge->dev, INTEL_NBXCFG,
(temp2 & ~(1 << 10)) | (1 << 9) | (1 << 11));
/* clear any possible error conditions */
pci_write_config_byte(agp_bridge->dev, INTEL_ERRSTS + 1, 7);
return 0;
}
static int efficeon_free_gatt_table(struct agp_bridge_data *bridge)
{
int index, freed = 0;
for (index = 0; index < EFFICEON_L1_SIZE; index++) {
unsigned long page = efficeon_private.l1_table[index];
if (page) {
efficeon_private.l1_table[index] = 0;
ClearPageReserved(virt_to_page((char *)page));
free_page(page);
freed++;
}
printk(KERN_DEBUG PFX "efficeon_free_gatt_table(%p, %02x, %08x)\n",
agp_bridge->dev, EFFICEON_ATTPAGE, index);
pci_write_config_dword(agp_bridge->dev,
EFFICEON_ATTPAGE, index);
}
printk(KERN_DEBUG PFX "efficeon_free_gatt_table() freed %d pages\n", freed);
return 0;
}
/*
* Since we don't need contiguous memory we just try
* to get the gatt table once
*/
#define GET_PAGE_DIR_OFF(addr) (addr >> 22)
#define GET_PAGE_DIR_IDX(addr) (GET_PAGE_DIR_OFF(addr) - \
GET_PAGE_DIR_OFF(agp_bridge->gart_bus_addr))
#define GET_GATT_OFF(addr) ((addr & 0x003ff000) >> 12)
#undef GET_GATT
#define GET_GATT(addr) (efficeon_private.gatt_pages[\
GET_PAGE_DIR_IDX(addr)]->remapped)
static int efficeon_create_gatt_table(struct agp_bridge_data *bridge)
{
int index;
const int pati = EFFICEON_PATI;
const int present = EFFICEON_PRESENT;
const int clflush_chunk = ((cpuid_ebx(1) >> 8) & 0xff) << 3;
int num_entries, l1_pages;
num_entries = A_SIZE_LVL2(agp_bridge->current_size)->num_entries;
printk(KERN_DEBUG PFX "efficeon_create_gatt_table(%d)\n", num_entries);
/* There are 2^10 PTE pages per PDE page */
BUG_ON(num_entries & 0x3ff);
l1_pages = num_entries >> 10;
for (index = 0 ; index < l1_pages ; index++) {
int offset;
unsigned long page;
unsigned long value;
page = efficeon_private.l1_table[index];
BUG_ON(page);
page = get_zeroed_page(GFP_KERNEL);
if (!page) {
efficeon_free_gatt_table(agp_bridge);
return -ENOMEM;
}
SetPageReserved(virt_to_page((char *)page));
for (offset = 0; offset < PAGE_SIZE; offset += clflush_chunk)
clflush((char *)page+offset);
efficeon_private.l1_table[index] = page;
value = virt_to_phys((unsigned long *)page) | pati | present | index;
pci_write_config_dword(agp_bridge->dev,
EFFICEON_ATTPAGE, value);
}
return 0;
}
static int efficeon_insert_memory(struct agp_memory * mem, off_t pg_start, int type)
{
int i, count = mem->page_count, num_entries;
unsigned int *page, *last_page;
const int clflush_chunk = ((cpuid_ebx(1) >> 8) & 0xff) << 3;
const unsigned long clflush_mask = ~(clflush_chunk-1);
printk(KERN_DEBUG PFX "efficeon_insert_memory(%lx, %d)\n", pg_start, count);
num_entries = A_SIZE_LVL2(agp_bridge->current_size)->num_entries;
if ((pg_start + mem->page_count) > num_entries)
return -EINVAL;
if (type != 0 || mem->type != 0)
return -EINVAL;
if (!mem->is_flushed) {
global_cache_flush();
mem->is_flushed = true;
}
last_page = NULL;
for (i = 0; i < count; i++) {
int index = pg_start + i;
unsigned long insert = efficeon_mask_memory(mem->pages[i]);
page = (unsigned int *) efficeon_private.l1_table[index >> 10];
if (!page)
continue;
page += (index & 0x3ff);
*page = insert;
/* clflush is slow, so don't clflush until we have to */
if (last_page &&
(((unsigned long)page^(unsigned long)last_page) &
clflush_mask))
clflush(last_page);
last_page = page;
}
if ( last_page )
clflush(last_page);
agp_bridge->driver->tlb_flush(mem);
return 0;
}
static int efficeon_remove_memory(struct agp_memory * mem, off_t pg_start, int type)
{
int i, count = mem->page_count, num_entries;
printk(KERN_DEBUG PFX "efficeon_remove_memory(%lx, %d)\n", pg_start, count);
num_entries = A_SIZE_LVL2(agp_bridge->current_size)->num_entries;
if ((pg_start + mem->page_count) > num_entries)
return -EINVAL;
if (type != 0 || mem->type != 0)
return -EINVAL;
for (i = 0; i < count; i++) {
int index = pg_start + i;
unsigned int *page = (unsigned int *) efficeon_private.l1_table[index >> 10];
if (!page)
continue;
page += (index & 0x3ff);
*page = 0;
}
agp_bridge->driver->tlb_flush(mem);
return 0;
}
static const struct agp_bridge_driver efficeon_driver = {
.owner = THIS_MODULE,
.aperture_sizes = efficeon_generic_sizes,
.size_type = LVL2_APER_SIZE,
.num_aperture_sizes = 4,
.configure = efficeon_configure,
.fetch_size = efficeon_fetch_size,
.cleanup = efficeon_cleanup,
.tlb_flush = efficeon_tlbflush,
.mask_memory = agp_generic_mask_memory,
.masks = efficeon_generic_masks,
.agp_enable = agp_generic_enable,
.cache_flush = global_cache_flush,
// Efficeon-specific GATT table setup / populate / teardown
.create_gatt_table = efficeon_create_gatt_table,
.free_gatt_table = efficeon_free_gatt_table,
.insert_memory = efficeon_insert_memory,
.remove_memory = efficeon_remove_memory,
.cant_use_aperture = false, // true might be faster?
// Generic
.alloc_by_type = agp_generic_alloc_by_type,
.free_by_type = agp_generic_free_by_type,
.agp_alloc_page = agp_generic_alloc_page,
.agp_alloc_pages = agp_generic_alloc_pages,
.agp_destroy_page = agp_generic_destroy_page,
.agp_destroy_pages = agp_generic_destroy_pages,
.agp_type_to_mask_type = agp_generic_type_to_mask_type,
};
static int agp_efficeon_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
struct agp_bridge_data *bridge;
u8 cap_ptr;
struct resource *r;
cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP);
if (!cap_ptr)
return -ENODEV;
/* Probe for Efficeon controller */
if (pdev->device != PCI_DEVICE_ID_EFFICEON) {
printk(KERN_ERR PFX "Unsupported Efficeon chipset (device id: %04x)\n",
pdev->device);
return -ENODEV;
}
printk(KERN_INFO PFX "Detected Transmeta Efficeon TM8000 series chipset\n");
bridge = agp_alloc_bridge();
if (!bridge)
return -ENOMEM;
bridge->driver = &efficeon_driver;
bridge->dev = pdev;
bridge->capndx = cap_ptr;
/*
* If the device has not been properly setup, the following will catch
* the problem and should stop the system from crashing.
* 20030610 - hamish@zot.org
*/
if (pci_enable_device(pdev)) {
printk(KERN_ERR PFX "Unable to Enable PCI device\n");
agp_put_bridge(bridge);
return -ENODEV;
}
/*
* The following fixes the case where the BIOS has "forgotten" to
* provide an address range for the GART.
* 20030610 - hamish@zot.org
*/
r = &pdev->resource[0];
if (!r->start && r->end) {
if (pci_assign_resource(pdev, 0)) {
printk(KERN_ERR PFX "could not assign resource 0\n");
agp_put_bridge(bridge);
return -ENODEV;
}
}
/* Fill in the mode register */
if (cap_ptr) {
pci_read_config_dword(pdev,
bridge->capndx+PCI_AGP_STATUS,
&bridge->mode);
}
pci_set_drvdata(pdev, bridge);
return agp_add_bridge(bridge);
}
static void agp_efficeon_remove(struct pci_dev *pdev)
{
struct agp_bridge_data *bridge = pci_get_drvdata(pdev);
agp_remove_bridge(bridge);
agp_put_bridge(bridge);
}
#ifdef CONFIG_PM
static int agp_efficeon_suspend(struct pci_dev *dev, pm_message_t state)
{
return 0;
}
static int agp_efficeon_resume(struct pci_dev *pdev)
{
printk(KERN_DEBUG PFX "agp_efficeon_resume()\n");
return efficeon_configure();
}
#endif
static struct pci_device_id agp_efficeon_pci_table[] = {
{
.class = (PCI_CLASS_BRIDGE_HOST << 8),
.class_mask = ~0,
.vendor = PCI_VENDOR_ID_TRANSMETA,
.device = PCI_ANY_ID,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
},
{ }
};
MODULE_DEVICE_TABLE(pci, agp_efficeon_pci_table);
static struct pci_driver agp_efficeon_pci_driver = {
.name = "agpgart-efficeon",
.id_table = agp_efficeon_pci_table,
.probe = agp_efficeon_probe,
.remove = agp_efficeon_remove,
#ifdef CONFIG_PM
.suspend = agp_efficeon_suspend,
.resume = agp_efficeon_resume,
#endif
};
static int __init agp_efficeon_init(void)
{
static int agp_initialised=0;
if (agp_off)
return -EINVAL;
if (agp_initialised == 1)
return 0;
agp_initialised=1;
return pci_register_driver(&agp_efficeon_pci_driver);
}
static void __exit agp_efficeon_cleanup(void)
{
pci_unregister_driver(&agp_efficeon_pci_driver);
}
module_init(agp_efficeon_init);
module_exit(agp_efficeon_cleanup);
MODULE_AUTHOR("Carlos Puchol <cpglinux@puchol.com>");
MODULE_LICENSE("GPL and additional rights");

1068
drivers/char/agp/frontend.c Normal file

File diff suppressed because it is too large Load diff

1435
drivers/char/agp/generic.c Normal file

File diff suppressed because it is too large Load diff

553
drivers/char/agp/hp-agp.c Normal file
View file

@ -0,0 +1,553 @@
/*
* HP zx1 AGPGART routines.
*
* (c) Copyright 2002, 2003 Hewlett-Packard Development Company, L.P.
* Bjorn Helgaas <bjorn.helgaas@hp.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/acpi.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/agp_backend.h>
#include <linux/log2.h>
#include <linux/slab.h>
#include <asm/acpi-ext.h>
#include "agp.h"
#define HP_ZX1_IOC_OFFSET 0x1000 /* ACPI reports SBA, we want IOC */
/* HP ZX1 IOC registers */
#define HP_ZX1_IBASE 0x300
#define HP_ZX1_IMASK 0x308
#define HP_ZX1_PCOM 0x310
#define HP_ZX1_TCNFG 0x318
#define HP_ZX1_PDIR_BASE 0x320
#define HP_ZX1_IOVA_BASE GB(1UL)
#define HP_ZX1_IOVA_SIZE GB(1UL)
#define HP_ZX1_GART_SIZE (HP_ZX1_IOVA_SIZE / 2)
#define HP_ZX1_SBA_IOMMU_COOKIE 0x0000badbadc0ffeeUL
#define HP_ZX1_PDIR_VALID_BIT 0x8000000000000000UL
#define HP_ZX1_IOVA_TO_PDIR(va) ((va - hp_private.iova_base) >> hp_private.io_tlb_shift)
#define AGP8X_MODE_BIT 3
#define AGP8X_MODE (1 << AGP8X_MODE_BIT)
/* AGP bridge need not be PCI device, but DRM thinks it is. */
static struct pci_dev fake_bridge_dev;
static int hp_zx1_gart_found;
static struct aper_size_info_fixed hp_zx1_sizes[] =
{
{0, 0, 0}, /* filled in by hp_zx1_fetch_size() */
};
static struct gatt_mask hp_zx1_masks[] =
{
{.mask = HP_ZX1_PDIR_VALID_BIT, .type = 0}
};
static struct _hp_private {
volatile u8 __iomem *ioc_regs;
volatile u8 __iomem *lba_regs;
int lba_cap_offset;
u64 *io_pdir; // PDIR for entire IOVA
u64 *gatt; // PDIR just for GART (subset of above)
u64 gatt_entries;
u64 iova_base;
u64 gart_base;
u64 gart_size;
u64 io_pdir_size;
int io_pdir_owner; // do we own it, or share it with sba_iommu?
int io_page_size;
int io_tlb_shift;
int io_tlb_ps; // IOC ps config
int io_pages_per_kpage;
} hp_private;
static int __init hp_zx1_ioc_shared(void)
{
struct _hp_private *hp = &hp_private;
printk(KERN_INFO PFX "HP ZX1 IOC: IOPDIR shared with sba_iommu\n");
/*
* IOC already configured by sba_iommu module; just use
* its setup. We assume:
* - IOVA space is 1Gb in size
* - first 512Mb is IOMMU, second 512Mb is GART
*/
hp->io_tlb_ps = readq(hp->ioc_regs+HP_ZX1_TCNFG);
switch (hp->io_tlb_ps) {
case 0: hp->io_tlb_shift = 12; break;
case 1: hp->io_tlb_shift = 13; break;
case 2: hp->io_tlb_shift = 14; break;
case 3: hp->io_tlb_shift = 16; break;
default:
printk(KERN_ERR PFX "Invalid IOTLB page size "
"configuration 0x%x\n", hp->io_tlb_ps);
hp->gatt = NULL;
hp->gatt_entries = 0;
return -ENODEV;
}
hp->io_page_size = 1 << hp->io_tlb_shift;
hp->io_pages_per_kpage = PAGE_SIZE / hp->io_page_size;
hp->iova_base = readq(hp->ioc_regs+HP_ZX1_IBASE) & ~0x1;
hp->gart_base = hp->iova_base + HP_ZX1_IOVA_SIZE - HP_ZX1_GART_SIZE;
hp->gart_size = HP_ZX1_GART_SIZE;
hp->gatt_entries = hp->gart_size / hp->io_page_size;
hp->io_pdir = phys_to_virt(readq(hp->ioc_regs+HP_ZX1_PDIR_BASE));
hp->gatt = &hp->io_pdir[HP_ZX1_IOVA_TO_PDIR(hp->gart_base)];
if (hp->gatt[0] != HP_ZX1_SBA_IOMMU_COOKIE) {
/* Normal case when no AGP device in system */
hp->gatt = NULL;
hp->gatt_entries = 0;
printk(KERN_ERR PFX "No reserved IO PDIR entry found; "
"GART disabled\n");
return -ENODEV;
}
return 0;
}
static int __init
hp_zx1_ioc_owner (void)
{
struct _hp_private *hp = &hp_private;
printk(KERN_INFO PFX "HP ZX1 IOC: IOPDIR dedicated to GART\n");
/*
* Select an IOV page size no larger than system page size.
*/
if (PAGE_SIZE >= KB(64)) {
hp->io_tlb_shift = 16;
hp->io_tlb_ps = 3;
} else if (PAGE_SIZE >= KB(16)) {
hp->io_tlb_shift = 14;
hp->io_tlb_ps = 2;
} else if (PAGE_SIZE >= KB(8)) {
hp->io_tlb_shift = 13;
hp->io_tlb_ps = 1;
} else {
hp->io_tlb_shift = 12;
hp->io_tlb_ps = 0;
}
hp->io_page_size = 1 << hp->io_tlb_shift;
hp->io_pages_per_kpage = PAGE_SIZE / hp->io_page_size;
hp->iova_base = HP_ZX1_IOVA_BASE;
hp->gart_size = HP_ZX1_GART_SIZE;
hp->gart_base = hp->iova_base + HP_ZX1_IOVA_SIZE - hp->gart_size;
hp->gatt_entries = hp->gart_size / hp->io_page_size;
hp->io_pdir_size = (HP_ZX1_IOVA_SIZE / hp->io_page_size) * sizeof(u64);
return 0;
}
static int __init
hp_zx1_ioc_init (u64 hpa)
{
struct _hp_private *hp = &hp_private;
hp->ioc_regs = ioremap(hpa, 1024);
if (!hp->ioc_regs)
return -ENOMEM;
/*
* If the IOTLB is currently disabled, we can take it over.
* Otherwise, we have to share with sba_iommu.
*/
hp->io_pdir_owner = (readq(hp->ioc_regs+HP_ZX1_IBASE) & 0x1) == 0;
if (hp->io_pdir_owner)
return hp_zx1_ioc_owner();
return hp_zx1_ioc_shared();
}
static int
hp_zx1_lba_find_capability (volatile u8 __iomem *hpa, int cap)
{
u16 status;
u8 pos, id;
int ttl = 48;
status = readw(hpa+PCI_STATUS);
if (!(status & PCI_STATUS_CAP_LIST))
return 0;
pos = readb(hpa+PCI_CAPABILITY_LIST);
while (ttl-- && pos >= 0x40) {
pos &= ~3;
id = readb(hpa+pos+PCI_CAP_LIST_ID);
if (id == 0xff)
break;
if (id == cap)
return pos;
pos = readb(hpa+pos+PCI_CAP_LIST_NEXT);
}
return 0;
}
static int __init
hp_zx1_lba_init (u64 hpa)
{
struct _hp_private *hp = &hp_private;
int cap;
hp->lba_regs = ioremap(hpa, 256);
if (!hp->lba_regs)
return -ENOMEM;
hp->lba_cap_offset = hp_zx1_lba_find_capability(hp->lba_regs, PCI_CAP_ID_AGP);
cap = readl(hp->lba_regs+hp->lba_cap_offset) & 0xff;
if (cap != PCI_CAP_ID_AGP) {
printk(KERN_ERR PFX "Invalid capability ID 0x%02x at 0x%x\n",
cap, hp->lba_cap_offset);
iounmap(hp->lba_regs);
return -ENODEV;
}
return 0;
}
static int
hp_zx1_fetch_size(void)
{
int size;
size = hp_private.gart_size / MB(1);
hp_zx1_sizes[0].size = size;
agp_bridge->current_size = (void *) &hp_zx1_sizes[0];
return size;
}
static int
hp_zx1_configure (void)
{
struct _hp_private *hp = &hp_private;
agp_bridge->gart_bus_addr = hp->gart_base;
agp_bridge->capndx = hp->lba_cap_offset;
agp_bridge->mode = readl(hp->lba_regs+hp->lba_cap_offset+PCI_AGP_STATUS);
if (hp->io_pdir_owner) {
writel(virt_to_phys(hp->io_pdir), hp->ioc_regs+HP_ZX1_PDIR_BASE);
readl(hp->ioc_regs+HP_ZX1_PDIR_BASE);
writel(hp->io_tlb_ps, hp->ioc_regs+HP_ZX1_TCNFG);
readl(hp->ioc_regs+HP_ZX1_TCNFG);
writel((unsigned int)(~(HP_ZX1_IOVA_SIZE-1)), hp->ioc_regs+HP_ZX1_IMASK);
readl(hp->ioc_regs+HP_ZX1_IMASK);
writel(hp->iova_base|1, hp->ioc_regs+HP_ZX1_IBASE);
readl(hp->ioc_regs+HP_ZX1_IBASE);
writel(hp->iova_base|ilog2(HP_ZX1_IOVA_SIZE), hp->ioc_regs+HP_ZX1_PCOM);
readl(hp->ioc_regs+HP_ZX1_PCOM);
}
return 0;
}
static void
hp_zx1_cleanup (void)
{
struct _hp_private *hp = &hp_private;
if (hp->ioc_regs) {
if (hp->io_pdir_owner) {
writeq(0, hp->ioc_regs+HP_ZX1_IBASE);
readq(hp->ioc_regs+HP_ZX1_IBASE);
}
iounmap(hp->ioc_regs);
}
if (hp->lba_regs)
iounmap(hp->lba_regs);
}
static void
hp_zx1_tlbflush (struct agp_memory *mem)
{
struct _hp_private *hp = &hp_private;
writeq(hp->gart_base | ilog2(hp->gart_size), hp->ioc_regs+HP_ZX1_PCOM);
readq(hp->ioc_regs+HP_ZX1_PCOM);
}
static int
hp_zx1_create_gatt_table (struct agp_bridge_data *bridge)
{
struct _hp_private *hp = &hp_private;
int i;
if (hp->io_pdir_owner) {
hp->io_pdir = (u64 *) __get_free_pages(GFP_KERNEL,
get_order(hp->io_pdir_size));
if (!hp->io_pdir) {
printk(KERN_ERR PFX "Couldn't allocate contiguous "
"memory for I/O PDIR\n");
hp->gatt = NULL;
hp->gatt_entries = 0;
return -ENOMEM;
}
memset(hp->io_pdir, 0, hp->io_pdir_size);
hp->gatt = &hp->io_pdir[HP_ZX1_IOVA_TO_PDIR(hp->gart_base)];
}
for (i = 0; i < hp->gatt_entries; i++) {
hp->gatt[i] = (unsigned long) agp_bridge->scratch_page;
}
return 0;
}
static int
hp_zx1_free_gatt_table (struct agp_bridge_data *bridge)
{
struct _hp_private *hp = &hp_private;
if (hp->io_pdir_owner)
free_pages((unsigned long) hp->io_pdir,
get_order(hp->io_pdir_size));
else
hp->gatt[0] = HP_ZX1_SBA_IOMMU_COOKIE;
return 0;
}
static int
hp_zx1_insert_memory (struct agp_memory *mem, off_t pg_start, int type)
{
struct _hp_private *hp = &hp_private;
int i, k;
off_t j, io_pg_start;
int io_pg_count;
if (type != mem->type ||
agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type)) {
return -EINVAL;
}
io_pg_start = hp->io_pages_per_kpage * pg_start;
io_pg_count = hp->io_pages_per_kpage * mem->page_count;
if ((io_pg_start + io_pg_count) > hp->gatt_entries) {
return -EINVAL;
}
j = io_pg_start;
while (j < (io_pg_start + io_pg_count)) {
if (hp->gatt[j]) {
return -EBUSY;
}
j++;
}
if (!mem->is_flushed) {
global_cache_flush();
mem->is_flushed = true;
}
for (i = 0, j = io_pg_start; i < mem->page_count; i++) {
unsigned long paddr;
paddr = page_to_phys(mem->pages[i]);
for (k = 0;
k < hp->io_pages_per_kpage;
k++, j++, paddr += hp->io_page_size) {
hp->gatt[j] = HP_ZX1_PDIR_VALID_BIT | paddr;
}
}
agp_bridge->driver->tlb_flush(mem);
return 0;
}
static int
hp_zx1_remove_memory (struct agp_memory *mem, off_t pg_start, int type)
{
struct _hp_private *hp = &hp_private;
int i, io_pg_start, io_pg_count;
if (type != mem->type ||
agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type)) {
return -EINVAL;
}
io_pg_start = hp->io_pages_per_kpage * pg_start;
io_pg_count = hp->io_pages_per_kpage * mem->page_count;
for (i = io_pg_start; i < io_pg_count + io_pg_start; i++) {
hp->gatt[i] = agp_bridge->scratch_page;
}
agp_bridge->driver->tlb_flush(mem);
return 0;
}
static unsigned long
hp_zx1_mask_memory (struct agp_bridge_data *bridge, dma_addr_t addr, int type)
{
return HP_ZX1_PDIR_VALID_BIT | addr;
}
static void
hp_zx1_enable (struct agp_bridge_data *bridge, u32 mode)
{
struct _hp_private *hp = &hp_private;
u32 command;
command = readl(hp->lba_regs+hp->lba_cap_offset+PCI_AGP_STATUS);
command = agp_collect_device_status(bridge, mode, command);
command |= 0x00000100;
writel(command, hp->lba_regs+hp->lba_cap_offset+PCI_AGP_COMMAND);
agp_device_command(command, (mode & AGP8X_MODE) != 0);
}
const struct agp_bridge_driver hp_zx1_driver = {
.owner = THIS_MODULE,
.size_type = FIXED_APER_SIZE,
.configure = hp_zx1_configure,
.fetch_size = hp_zx1_fetch_size,
.cleanup = hp_zx1_cleanup,
.tlb_flush = hp_zx1_tlbflush,
.mask_memory = hp_zx1_mask_memory,
.masks = hp_zx1_masks,
.agp_enable = hp_zx1_enable,
.cache_flush = global_cache_flush,
.create_gatt_table = hp_zx1_create_gatt_table,
.free_gatt_table = hp_zx1_free_gatt_table,
.insert_memory = hp_zx1_insert_memory,
.remove_memory = hp_zx1_remove_memory,
.alloc_by_type = agp_generic_alloc_by_type,
.free_by_type = agp_generic_free_by_type,
.agp_alloc_page = agp_generic_alloc_page,
.agp_alloc_pages = agp_generic_alloc_pages,
.agp_destroy_page = agp_generic_destroy_page,
.agp_destroy_pages = agp_generic_destroy_pages,
.agp_type_to_mask_type = agp_generic_type_to_mask_type,
.cant_use_aperture = true,
};
static int __init
hp_zx1_setup (u64 ioc_hpa, u64 lba_hpa)
{
struct agp_bridge_data *bridge;
int error = 0;
error = hp_zx1_ioc_init(ioc_hpa);
if (error)
goto fail;
error = hp_zx1_lba_init(lba_hpa);
if (error)
goto fail;
bridge = agp_alloc_bridge();
if (!bridge) {
error = -ENOMEM;
goto fail;
}
bridge->driver = &hp_zx1_driver;
fake_bridge_dev.vendor = PCI_VENDOR_ID_HP;
fake_bridge_dev.device = PCI_DEVICE_ID_HP_PCIX_LBA;
bridge->dev = &fake_bridge_dev;
error = agp_add_bridge(bridge);
fail:
if (error)
hp_zx1_cleanup();
return error;
}
static acpi_status __init
zx1_gart_probe (acpi_handle obj, u32 depth, void *context, void **ret)
{
acpi_handle handle, parent;
acpi_status status;
struct acpi_device_info *info;
u64 lba_hpa, sba_hpa, length;
int match;
status = hp_acpi_csr_space(obj, &lba_hpa, &length);
if (ACPI_FAILURE(status))
return AE_OK; /* keep looking for another bridge */
/* Look for an enclosing IOC scope and find its CSR space */
handle = obj;
do {
status = acpi_get_object_info(handle, &info);
if (ACPI_SUCCESS(status) && (info->valid & ACPI_VALID_HID)) {
/* TBD check _CID also */
match = (strcmp(info->hardware_id.string, "HWP0001") == 0);
kfree(info);
if (match) {
status = hp_acpi_csr_space(handle, &sba_hpa, &length);
if (ACPI_SUCCESS(status))
break;
else {
printk(KERN_ERR PFX "Detected HP ZX1 "
"AGP LBA but no IOC.\n");
return AE_OK;
}
}
}
status = acpi_get_parent(handle, &parent);
handle = parent;
} while (ACPI_SUCCESS(status));
if (ACPI_FAILURE(status))
return AE_OK; /* found no enclosing IOC */
if (hp_zx1_setup(sba_hpa + HP_ZX1_IOC_OFFSET, lba_hpa))
return AE_OK;
printk(KERN_INFO PFX "Detected HP ZX1 %s AGP chipset "
"(ioc=%llx, lba=%llx)\n", (char *)context,
sba_hpa + HP_ZX1_IOC_OFFSET, lba_hpa);
hp_zx1_gart_found = 1;
return AE_CTRL_TERMINATE; /* we only support one bridge; quit looking */
}
static int __init
agp_hp_init (void)
{
if (agp_off)
return -EINVAL;
acpi_get_devices("HWP0003", zx1_gart_probe, "HWP0003", NULL);
if (hp_zx1_gart_found)
return 0;
acpi_get_devices("HWP0007", zx1_gart_probe, "HWP0007", NULL);
if (hp_zx1_gart_found)
return 0;
return -ENODEV;
}
static void __exit
agp_hp_cleanup (void)
{
}
module_init(agp_hp_init);
module_exit(agp_hp_cleanup);
MODULE_LICENSE("GPL and additional rights");

659
drivers/char/agp/i460-agp.c Normal file
View file

@ -0,0 +1,659 @@
/*
* For documentation on the i460 AGP interface, see Chapter 7 (AGP Subsystem) of
* the "Intel 460GTX Chipset Software Developer's Manual":
* http://www.intel.com/design/archives/itanium/downloads/248704.htm
*/
/*
* 460GX support by Chris Ahna <christopher.j.ahna@intel.com>
* Clean up & simplification by David Mosberger-Tang <davidm@hpl.hp.com>
*/
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/string.h>
#include <linux/slab.h>
#include <linux/agp_backend.h>
#include <linux/log2.h>
#include "agp.h"
#define INTEL_I460_BAPBASE 0x98
#define INTEL_I460_GXBCTL 0xa0
#define INTEL_I460_AGPSIZ 0xa2
#define INTEL_I460_ATTBASE 0xfe200000
#define INTEL_I460_GATT_VALID (1UL << 24)
#define INTEL_I460_GATT_COHERENT (1UL << 25)
/*
* The i460 can operate with large (4MB) pages, but there is no sane way to support this
* within the current kernel/DRM environment, so we disable the relevant code for now.
* See also comments in ia64_alloc_page()...
*/
#define I460_LARGE_IO_PAGES 0
#if I460_LARGE_IO_PAGES
# define I460_IO_PAGE_SHIFT i460.io_page_shift
#else
# define I460_IO_PAGE_SHIFT 12
#endif
#define I460_IOPAGES_PER_KPAGE (PAGE_SIZE >> I460_IO_PAGE_SHIFT)
#define I460_KPAGES_PER_IOPAGE (1 << (I460_IO_PAGE_SHIFT - PAGE_SHIFT))
#define I460_SRAM_IO_DISABLE (1 << 4)
#define I460_BAPBASE_ENABLE (1 << 3)
#define I460_AGPSIZ_MASK 0x7
#define I460_4M_PS (1 << 1)
/* Control bits for Out-Of-GART coherency and Burst Write Combining */
#define I460_GXBCTL_OOG (1UL << 0)
#define I460_GXBCTL_BWC (1UL << 2)
/*
* gatt_table entries are 32-bits wide on the i460; the generic code ought to declare the
* gatt_table and gatt_table_real pointers a "void *"...
*/
#define RD_GATT(index) readl((u32 *) i460.gatt + (index))
#define WR_GATT(index, val) writel((val), (u32 *) i460.gatt + (index))
/*
* The 460 spec says we have to read the last location written to make sure that all
* writes have taken effect
*/
#define WR_FLUSH_GATT(index) RD_GATT(index)
static unsigned long i460_mask_memory (struct agp_bridge_data *bridge,
dma_addr_t addr, int type);
static struct {
void *gatt; /* ioremap'd GATT area */
/* i460 supports multiple GART page sizes, so GART pageshift is dynamic: */
u8 io_page_shift;
/* BIOS configures chipset to one of 2 possible apbase values: */
u8 dynamic_apbase;
/* structure for tracking partial use of 4MB GART pages: */
struct lp_desc {
unsigned long *alloced_map; /* bitmap of kernel-pages in use */
int refcount; /* number of kernel pages using the large page */
u64 paddr; /* physical address of large page */
struct page *page; /* page pointer */
} *lp_desc;
} i460;
static const struct aper_size_info_8 i460_sizes[3] =
{
/*
* The 32GB aperture is only available with a 4M GART page size. Due to the
* dynamic GART page size, we can't figure out page_order or num_entries until
* runtime.
*/
{32768, 0, 0, 4},
{1024, 0, 0, 2},
{256, 0, 0, 1}
};
static struct gatt_mask i460_masks[] =
{
{
.mask = INTEL_I460_GATT_VALID | INTEL_I460_GATT_COHERENT,
.type = 0
}
};
static int i460_fetch_size (void)
{
int i;
u8 temp;
struct aper_size_info_8 *values;
/* Determine the GART page size */
pci_read_config_byte(agp_bridge->dev, INTEL_I460_GXBCTL, &temp);
i460.io_page_shift = (temp & I460_4M_PS) ? 22 : 12;
pr_debug("i460_fetch_size: io_page_shift=%d\n", i460.io_page_shift);
if (i460.io_page_shift != I460_IO_PAGE_SHIFT) {
printk(KERN_ERR PFX
"I/O (GART) page-size %luKB doesn't match expected "
"size %luKB\n",
1UL << (i460.io_page_shift - 10),
1UL << (I460_IO_PAGE_SHIFT));
return 0;
}
values = A_SIZE_8(agp_bridge->driver->aperture_sizes);
pci_read_config_byte(agp_bridge->dev, INTEL_I460_AGPSIZ, &temp);
/* Exit now if the IO drivers for the GART SRAMS are turned off */
if (temp & I460_SRAM_IO_DISABLE) {
printk(KERN_ERR PFX "GART SRAMS disabled on 460GX chipset\n");
printk(KERN_ERR PFX "AGPGART operation not possible\n");
return 0;
}
/* Make sure we don't try to create an 2 ^ 23 entry GATT */
if ((i460.io_page_shift == 0) && ((temp & I460_AGPSIZ_MASK) == 4)) {
printk(KERN_ERR PFX "We can't have a 32GB aperture with 4KB GART pages\n");
return 0;
}
/* Determine the proper APBASE register */
if (temp & I460_BAPBASE_ENABLE)
i460.dynamic_apbase = INTEL_I460_BAPBASE;
else
i460.dynamic_apbase = AGP_APBASE;
for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) {
/*
* Dynamically calculate the proper num_entries and page_order values for
* the define aperture sizes. Take care not to shift off the end of
* values[i].size.
*/
values[i].num_entries = (values[i].size << 8) >> (I460_IO_PAGE_SHIFT - 12);
values[i].page_order = ilog2((sizeof(u32)*values[i].num_entries) >> PAGE_SHIFT);
}
for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) {
/* Neglect control bits when matching up size_value */
if ((temp & I460_AGPSIZ_MASK) == values[i].size_value) {
agp_bridge->previous_size = agp_bridge->current_size = (void *) (values + i);
agp_bridge->aperture_size_idx = i;
return values[i].size;
}
}
return 0;
}
/* There isn't anything to do here since 460 has no GART TLB. */
static void i460_tlb_flush (struct agp_memory *mem)
{
return;
}
/*
* This utility function is needed to prevent corruption of the control bits
* which are stored along with the aperture size in 460's AGPSIZ register
*/
static void i460_write_agpsiz (u8 size_value)
{
u8 temp;
pci_read_config_byte(agp_bridge->dev, INTEL_I460_AGPSIZ, &temp);
pci_write_config_byte(agp_bridge->dev, INTEL_I460_AGPSIZ,
((temp & ~I460_AGPSIZ_MASK) | size_value));
}
static void i460_cleanup (void)
{
struct aper_size_info_8 *previous_size;
previous_size = A_SIZE_8(agp_bridge->previous_size);
i460_write_agpsiz(previous_size->size_value);
if (I460_IO_PAGE_SHIFT > PAGE_SHIFT)
kfree(i460.lp_desc);
}
static int i460_configure (void)
{
union {
u32 small[2];
u64 large;
} temp;
size_t size;
u8 scratch;
struct aper_size_info_8 *current_size;
temp.large = 0;
current_size = A_SIZE_8(agp_bridge->current_size);
i460_write_agpsiz(current_size->size_value);
/*
* Do the necessary rigmarole to read all eight bytes of APBASE.
* This has to be done since the AGP aperture can be above 4GB on
* 460 based systems.
*/
pci_read_config_dword(agp_bridge->dev, i460.dynamic_apbase, &(temp.small[0]));
pci_read_config_dword(agp_bridge->dev, i460.dynamic_apbase + 4, &(temp.small[1]));
/* Clear BAR control bits */
agp_bridge->gart_bus_addr = temp.large & ~((1UL << 3) - 1);
pci_read_config_byte(agp_bridge->dev, INTEL_I460_GXBCTL, &scratch);
pci_write_config_byte(agp_bridge->dev, INTEL_I460_GXBCTL,
(scratch & 0x02) | I460_GXBCTL_OOG | I460_GXBCTL_BWC);
/*
* Initialize partial allocation trackers if a GART page is bigger than a kernel
* page.
*/
if (I460_IO_PAGE_SHIFT > PAGE_SHIFT) {
size = current_size->num_entries * sizeof(i460.lp_desc[0]);
i460.lp_desc = kzalloc(size, GFP_KERNEL);
if (!i460.lp_desc)
return -ENOMEM;
}
return 0;
}
static int i460_create_gatt_table (struct agp_bridge_data *bridge)
{
int page_order, num_entries, i;
void *temp;
/*
* Load up the fixed address of the GART SRAMS which hold our GATT table.
*/
temp = agp_bridge->current_size;
page_order = A_SIZE_8(temp)->page_order;
num_entries = A_SIZE_8(temp)->num_entries;
i460.gatt = ioremap(INTEL_I460_ATTBASE, PAGE_SIZE << page_order);
if (!i460.gatt) {
printk(KERN_ERR PFX "ioremap failed\n");
return -ENOMEM;
}
/* These are no good, the should be removed from the agp_bridge strucure... */
agp_bridge->gatt_table_real = NULL;
agp_bridge->gatt_table = NULL;
agp_bridge->gatt_bus_addr = 0;
for (i = 0; i < num_entries; ++i)
WR_GATT(i, 0);
WR_FLUSH_GATT(i - 1);
return 0;
}
static int i460_free_gatt_table (struct agp_bridge_data *bridge)
{
int num_entries, i;
void *temp;
temp = agp_bridge->current_size;
num_entries = A_SIZE_8(temp)->num_entries;
for (i = 0; i < num_entries; ++i)
WR_GATT(i, 0);
WR_FLUSH_GATT(num_entries - 1);
iounmap(i460.gatt);
return 0;
}
/*
* The following functions are called when the I/O (GART) page size is smaller than
* PAGE_SIZE.
*/
static int i460_insert_memory_small_io_page (struct agp_memory *mem,
off_t pg_start, int type)
{
unsigned long paddr, io_pg_start, io_page_size;
int i, j, k, num_entries;
void *temp;
pr_debug("i460_insert_memory_small_io_page(mem=%p, pg_start=%ld, type=%d, paddr0=0x%lx)\n",
mem, pg_start, type, page_to_phys(mem->pages[0]));
if (type >= AGP_USER_TYPES || mem->type >= AGP_USER_TYPES)
return -EINVAL;
io_pg_start = I460_IOPAGES_PER_KPAGE * pg_start;
temp = agp_bridge->current_size;
num_entries = A_SIZE_8(temp)->num_entries;
if ((io_pg_start + I460_IOPAGES_PER_KPAGE * mem->page_count) > num_entries) {
printk(KERN_ERR PFX "Looks like we're out of AGP memory\n");
return -EINVAL;
}
j = io_pg_start;
while (j < (io_pg_start + I460_IOPAGES_PER_KPAGE * mem->page_count)) {
if (!PGE_EMPTY(agp_bridge, RD_GATT(j))) {
pr_debug("i460_insert_memory_small_io_page: GATT[%d]=0x%x is busy\n",
j, RD_GATT(j));
return -EBUSY;
}
j++;
}
io_page_size = 1UL << I460_IO_PAGE_SHIFT;
for (i = 0, j = io_pg_start; i < mem->page_count; i++) {
paddr = page_to_phys(mem->pages[i]);
for (k = 0; k < I460_IOPAGES_PER_KPAGE; k++, j++, paddr += io_page_size)
WR_GATT(j, i460_mask_memory(agp_bridge, paddr, mem->type));
}
WR_FLUSH_GATT(j - 1);
return 0;
}
static int i460_remove_memory_small_io_page(struct agp_memory *mem,
off_t pg_start, int type)
{
int i;
pr_debug("i460_remove_memory_small_io_page(mem=%p, pg_start=%ld, type=%d)\n",
mem, pg_start, type);
pg_start = I460_IOPAGES_PER_KPAGE * pg_start;
for (i = pg_start; i < (pg_start + I460_IOPAGES_PER_KPAGE * mem->page_count); i++)
WR_GATT(i, 0);
WR_FLUSH_GATT(i - 1);
return 0;
}
#if I460_LARGE_IO_PAGES
/*
* These functions are called when the I/O (GART) page size exceeds PAGE_SIZE.
*
* This situation is interesting since AGP memory allocations that are smaller than a
* single GART page are possible. The i460.lp_desc array tracks partial allocation of the
* large GART pages to work around this issue.
*
* i460.lp_desc[pg_num].refcount tracks the number of kernel pages in use within GART page
* pg_num. i460.lp_desc[pg_num].paddr is the physical address of the large page and
* i460.lp_desc[pg_num].alloced_map is a bitmap of kernel pages that are in use (allocated).
*/
static int i460_alloc_large_page (struct lp_desc *lp)
{
unsigned long order = I460_IO_PAGE_SHIFT - PAGE_SHIFT;
size_t map_size;
lp->page = alloc_pages(GFP_KERNEL, order);
if (!lp->page) {
printk(KERN_ERR PFX "Couldn't alloc 4M GART page...\n");
return -ENOMEM;
}
map_size = ((I460_KPAGES_PER_IOPAGE + BITS_PER_LONG - 1) & -BITS_PER_LONG)/8;
lp->alloced_map = kzalloc(map_size, GFP_KERNEL);
if (!lp->alloced_map) {
__free_pages(lp->page, order);
printk(KERN_ERR PFX "Out of memory, we're in trouble...\n");
return -ENOMEM;
}
lp->paddr = page_to_phys(lp->page);
lp->refcount = 0;
atomic_add(I460_KPAGES_PER_IOPAGE, &agp_bridge->current_memory_agp);
return 0;
}
static void i460_free_large_page (struct lp_desc *lp)
{
kfree(lp->alloced_map);
lp->alloced_map = NULL;
__free_pages(lp->page, I460_IO_PAGE_SHIFT - PAGE_SHIFT);
atomic_sub(I460_KPAGES_PER_IOPAGE, &agp_bridge->current_memory_agp);
}
static int i460_insert_memory_large_io_page (struct agp_memory *mem,
off_t pg_start, int type)
{
int i, start_offset, end_offset, idx, pg, num_entries;
struct lp_desc *start, *end, *lp;
void *temp;
if (type >= AGP_USER_TYPES || mem->type >= AGP_USER_TYPES)
return -EINVAL;
temp = agp_bridge->current_size;
num_entries = A_SIZE_8(temp)->num_entries;
/* Figure out what pg_start means in terms of our large GART pages */
start = &i460.lp_desc[pg_start / I460_KPAGES_PER_IOPAGE];
end = &i460.lp_desc[(pg_start + mem->page_count - 1) / I460_KPAGES_PER_IOPAGE];
start_offset = pg_start % I460_KPAGES_PER_IOPAGE;
end_offset = (pg_start + mem->page_count - 1) % I460_KPAGES_PER_IOPAGE;
if (end > i460.lp_desc + num_entries) {
printk(KERN_ERR PFX "Looks like we're out of AGP memory\n");
return -EINVAL;
}
/* Check if the requested region of the aperture is free */
for (lp = start; lp <= end; ++lp) {
if (!lp->alloced_map)
continue; /* OK, the entire large page is available... */
for (idx = ((lp == start) ? start_offset : 0);
idx < ((lp == end) ? (end_offset + 1) : I460_KPAGES_PER_IOPAGE);
idx++)
{
if (test_bit(idx, lp->alloced_map))
return -EBUSY;
}
}
for (lp = start, i = 0; lp <= end; ++lp) {
if (!lp->alloced_map) {
/* Allocate new GART pages... */
if (i460_alloc_large_page(lp) < 0)
return -ENOMEM;
pg = lp - i460.lp_desc;
WR_GATT(pg, i460_mask_memory(agp_bridge,
lp->paddr, 0));
WR_FLUSH_GATT(pg);
}
for (idx = ((lp == start) ? start_offset : 0);
idx < ((lp == end) ? (end_offset + 1) : I460_KPAGES_PER_IOPAGE);
idx++, i++)
{
mem->pages[i] = lp->page;
__set_bit(idx, lp->alloced_map);
++lp->refcount;
}
}
return 0;
}
static int i460_remove_memory_large_io_page (struct agp_memory *mem,
off_t pg_start, int type)
{
int i, pg, start_offset, end_offset, idx, num_entries;
struct lp_desc *start, *end, *lp;
void *temp;
temp = agp_bridge->current_size;
num_entries = A_SIZE_8(temp)->num_entries;
/* Figure out what pg_start means in terms of our large GART pages */
start = &i460.lp_desc[pg_start / I460_KPAGES_PER_IOPAGE];
end = &i460.lp_desc[(pg_start + mem->page_count - 1) / I460_KPAGES_PER_IOPAGE];
start_offset = pg_start % I460_KPAGES_PER_IOPAGE;
end_offset = (pg_start + mem->page_count - 1) % I460_KPAGES_PER_IOPAGE;
for (i = 0, lp = start; lp <= end; ++lp) {
for (idx = ((lp == start) ? start_offset : 0);
idx < ((lp == end) ? (end_offset + 1) : I460_KPAGES_PER_IOPAGE);
idx++, i++)
{
mem->pages[i] = NULL;
__clear_bit(idx, lp->alloced_map);
--lp->refcount;
}
/* Free GART pages if they are unused */
if (lp->refcount == 0) {
pg = lp - i460.lp_desc;
WR_GATT(pg, 0);
WR_FLUSH_GATT(pg);
i460_free_large_page(lp);
}
}
return 0;
}
/* Wrapper routines to call the approriate {small_io_page,large_io_page} function */
static int i460_insert_memory (struct agp_memory *mem,
off_t pg_start, int type)
{
if (I460_IO_PAGE_SHIFT <= PAGE_SHIFT)
return i460_insert_memory_small_io_page(mem, pg_start, type);
else
return i460_insert_memory_large_io_page(mem, pg_start, type);
}
static int i460_remove_memory (struct agp_memory *mem,
off_t pg_start, int type)
{
if (I460_IO_PAGE_SHIFT <= PAGE_SHIFT)
return i460_remove_memory_small_io_page(mem, pg_start, type);
else
return i460_remove_memory_large_io_page(mem, pg_start, type);
}
/*
* If the I/O (GART) page size is bigger than the kernel page size, we don't want to
* allocate memory until we know where it is to be bound in the aperture (a
* multi-kernel-page alloc might fit inside of an already allocated GART page).
*
* Let's just hope nobody counts on the allocated AGP memory being there before bind time
* (I don't think current drivers do)...
*/
static struct page *i460_alloc_page (struct agp_bridge_data *bridge)
{
void *page;
if (I460_IO_PAGE_SHIFT <= PAGE_SHIFT) {
page = agp_generic_alloc_page(agp_bridge);
} else
/* Returning NULL would cause problems */
/* AK: really dubious code. */
page = (void *)~0UL;
return page;
}
static void i460_destroy_page (struct page *page, int flags)
{
if (I460_IO_PAGE_SHIFT <= PAGE_SHIFT) {
agp_generic_destroy_page(page, flags);
}
}
#endif /* I460_LARGE_IO_PAGES */
static unsigned long i460_mask_memory (struct agp_bridge_data *bridge,
dma_addr_t addr, int type)
{
/* Make sure the returned address is a valid GATT entry */
return bridge->driver->masks[0].mask
| (((addr & ~((1 << I460_IO_PAGE_SHIFT) - 1)) & 0xfffff000) >> 12);
}
const struct agp_bridge_driver intel_i460_driver = {
.owner = THIS_MODULE,
.aperture_sizes = i460_sizes,
.size_type = U8_APER_SIZE,
.num_aperture_sizes = 3,
.configure = i460_configure,
.fetch_size = i460_fetch_size,
.cleanup = i460_cleanup,
.tlb_flush = i460_tlb_flush,
.mask_memory = i460_mask_memory,
.masks = i460_masks,
.agp_enable = agp_generic_enable,
.cache_flush = global_cache_flush,
.create_gatt_table = i460_create_gatt_table,
.free_gatt_table = i460_free_gatt_table,
#if I460_LARGE_IO_PAGES
.insert_memory = i460_insert_memory,
.remove_memory = i460_remove_memory,
.agp_alloc_page = i460_alloc_page,
.agp_destroy_page = i460_destroy_page,
#else
.insert_memory = i460_insert_memory_small_io_page,
.remove_memory = i460_remove_memory_small_io_page,
.agp_alloc_page = agp_generic_alloc_page,
.agp_alloc_pages = agp_generic_alloc_pages,
.agp_destroy_page = agp_generic_destroy_page,
.agp_destroy_pages = agp_generic_destroy_pages,
#endif
.alloc_by_type = agp_generic_alloc_by_type,
.free_by_type = agp_generic_free_by_type,
.agp_type_to_mask_type = agp_generic_type_to_mask_type,
.cant_use_aperture = true,
};
static int agp_intel_i460_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
struct agp_bridge_data *bridge;
u8 cap_ptr;
cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP);
if (!cap_ptr)
return -ENODEV;
bridge = agp_alloc_bridge();
if (!bridge)
return -ENOMEM;
bridge->driver = &intel_i460_driver;
bridge->dev = pdev;
bridge->capndx = cap_ptr;
printk(KERN_INFO PFX "Detected Intel 460GX chipset\n");
pci_set_drvdata(pdev, bridge);
return agp_add_bridge(bridge);
}
static void agp_intel_i460_remove(struct pci_dev *pdev)
{
struct agp_bridge_data *bridge = pci_get_drvdata(pdev);
agp_remove_bridge(bridge);
agp_put_bridge(bridge);
}
static struct pci_device_id agp_intel_i460_pci_table[] = {
{
.class = (PCI_CLASS_BRIDGE_HOST << 8),
.class_mask = ~0,
.vendor = PCI_VENDOR_ID_INTEL,
.device = PCI_DEVICE_ID_INTEL_84460GX,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
},
{ }
};
MODULE_DEVICE_TABLE(pci, agp_intel_i460_pci_table);
static struct pci_driver agp_intel_i460_pci_driver = {
.name = "agpgart-intel-i460",
.id_table = agp_intel_i460_pci_table,
.probe = agp_intel_i460_probe,
.remove = agp_intel_i460_remove,
};
static int __init agp_intel_i460_init(void)
{
if (agp_off)
return -EINVAL;
return pci_register_driver(&agp_intel_i460_pci_driver);
}
static void __exit agp_intel_i460_cleanup(void)
{
pci_unregister_driver(&agp_intel_i460_pci_driver);
}
module_init(agp_intel_i460_init);
module_exit(agp_intel_i460_cleanup);
MODULE_AUTHOR("Chris Ahna <Christopher.J.Ahna@intel.com>");
MODULE_LICENSE("GPL and additional rights");

View file

@ -0,0 +1,924 @@
/*
* Intel AGPGART routines.
*/
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/pagemap.h>
#include <linux/agp_backend.h>
#include <asm/smp.h>
#include "agp.h"
#include "intel-agp.h"
#include <drm/intel-gtt.h>
static int intel_fetch_size(void)
{
int i;
u16 temp;
struct aper_size_info_16 *values;
pci_read_config_word(agp_bridge->dev, INTEL_APSIZE, &temp);
values = A_SIZE_16(agp_bridge->driver->aperture_sizes);
for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) {
if (temp == values[i].size_value) {
agp_bridge->previous_size = agp_bridge->current_size = (void *) (values + i);
agp_bridge->aperture_size_idx = i;
return values[i].size;
}
}
return 0;
}
static int __intel_8xx_fetch_size(u8 temp)
{
int i;
struct aper_size_info_8 *values;
values = A_SIZE_8(agp_bridge->driver->aperture_sizes);
for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) {
if (temp == values[i].size_value) {
agp_bridge->previous_size =
agp_bridge->current_size = (void *) (values + i);
agp_bridge->aperture_size_idx = i;
return values[i].size;
}
}
return 0;
}
static int intel_8xx_fetch_size(void)
{
u8 temp;
pci_read_config_byte(agp_bridge->dev, INTEL_APSIZE, &temp);
return __intel_8xx_fetch_size(temp);
}
static int intel_815_fetch_size(void)
{
u8 temp;
/* Intel 815 chipsets have a _weird_ APSIZE register with only
* one non-reserved bit, so mask the others out ... */
pci_read_config_byte(agp_bridge->dev, INTEL_APSIZE, &temp);
temp &= (1 << 3);
return __intel_8xx_fetch_size(temp);
}
static void intel_tlbflush(struct agp_memory *mem)
{
pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, 0x2200);
pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, 0x2280);
}
static void intel_8xx_tlbflush(struct agp_memory *mem)
{
u32 temp;
pci_read_config_dword(agp_bridge->dev, INTEL_AGPCTRL, &temp);
pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, temp & ~(1 << 7));
pci_read_config_dword(agp_bridge->dev, INTEL_AGPCTRL, &temp);
pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, temp | (1 << 7));
}
static void intel_cleanup(void)
{
u16 temp;
struct aper_size_info_16 *previous_size;
previous_size = A_SIZE_16(agp_bridge->previous_size);
pci_read_config_word(agp_bridge->dev, INTEL_NBXCFG, &temp);
pci_write_config_word(agp_bridge->dev, INTEL_NBXCFG, temp & ~(1 << 9));
pci_write_config_word(agp_bridge->dev, INTEL_APSIZE, previous_size->size_value);
}
static void intel_8xx_cleanup(void)
{
u16 temp;
struct aper_size_info_8 *previous_size;
previous_size = A_SIZE_8(agp_bridge->previous_size);
pci_read_config_word(agp_bridge->dev, INTEL_NBXCFG, &temp);
pci_write_config_word(agp_bridge->dev, INTEL_NBXCFG, temp & ~(1 << 9));
pci_write_config_byte(agp_bridge->dev, INTEL_APSIZE, previous_size->size_value);
}
static int intel_configure(void)
{
u16 temp2;
struct aper_size_info_16 *current_size;
current_size = A_SIZE_16(agp_bridge->current_size);
/* aperture size */
pci_write_config_word(agp_bridge->dev, INTEL_APSIZE, current_size->size_value);
/* address to map to */
agp_bridge->gart_bus_addr = pci_bus_address(agp_bridge->dev,
AGP_APERTURE_BAR);
/* attbase - aperture base */
pci_write_config_dword(agp_bridge->dev, INTEL_ATTBASE, agp_bridge->gatt_bus_addr);
/* agpctrl */
pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, 0x2280);
/* paccfg/nbxcfg */
pci_read_config_word(agp_bridge->dev, INTEL_NBXCFG, &temp2);
pci_write_config_word(agp_bridge->dev, INTEL_NBXCFG,
(temp2 & ~(1 << 10)) | (1 << 9));
/* clear any possible error conditions */
pci_write_config_byte(agp_bridge->dev, INTEL_ERRSTS + 1, 7);
return 0;
}
static int intel_815_configure(void)
{
u32 addr;
u8 temp2;
struct aper_size_info_8 *current_size;
/* attbase - aperture base */
/* the Intel 815 chipset spec. says that bits 29-31 in the
* ATTBASE register are reserved -> try not to write them */
if (agp_bridge->gatt_bus_addr & INTEL_815_ATTBASE_MASK) {
dev_emerg(&agp_bridge->dev->dev, "gatt bus addr too high");
return -EINVAL;
}
current_size = A_SIZE_8(agp_bridge->current_size);
/* aperture size */
pci_write_config_byte(agp_bridge->dev, INTEL_APSIZE,
current_size->size_value);
/* address to map to */
agp_bridge->gart_bus_addr = pci_bus_address(agp_bridge->dev,
AGP_APERTURE_BAR);
pci_read_config_dword(agp_bridge->dev, INTEL_ATTBASE, &addr);
addr &= INTEL_815_ATTBASE_MASK;
addr |= agp_bridge->gatt_bus_addr;
pci_write_config_dword(agp_bridge->dev, INTEL_ATTBASE, addr);
/* agpctrl */
pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, 0x0000);
/* apcont */
pci_read_config_byte(agp_bridge->dev, INTEL_815_APCONT, &temp2);
pci_write_config_byte(agp_bridge->dev, INTEL_815_APCONT, temp2 | (1 << 1));
/* clear any possible error conditions */
/* Oddness : this chipset seems to have no ERRSTS register ! */
return 0;
}
static void intel_820_tlbflush(struct agp_memory *mem)
{
return;
}
static void intel_820_cleanup(void)
{
u8 temp;
struct aper_size_info_8 *previous_size;
previous_size = A_SIZE_8(agp_bridge->previous_size);
pci_read_config_byte(agp_bridge->dev, INTEL_I820_RDCR, &temp);
pci_write_config_byte(agp_bridge->dev, INTEL_I820_RDCR,
temp & ~(1 << 1));
pci_write_config_byte(agp_bridge->dev, INTEL_APSIZE,
previous_size->size_value);
}
static int intel_820_configure(void)
{
u8 temp2;
struct aper_size_info_8 *current_size;
current_size = A_SIZE_8(agp_bridge->current_size);
/* aperture size */
pci_write_config_byte(agp_bridge->dev, INTEL_APSIZE, current_size->size_value);
/* address to map to */
agp_bridge->gart_bus_addr = pci_bus_address(agp_bridge->dev,
AGP_APERTURE_BAR);
/* attbase - aperture base */
pci_write_config_dword(agp_bridge->dev, INTEL_ATTBASE, agp_bridge->gatt_bus_addr);
/* agpctrl */
pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, 0x0000);
/* global enable aperture access */
/* This flag is not accessed through MCHCFG register as in */
/* i850 chipset. */
pci_read_config_byte(agp_bridge->dev, INTEL_I820_RDCR, &temp2);
pci_write_config_byte(agp_bridge->dev, INTEL_I820_RDCR, temp2 | (1 << 1));
/* clear any possible AGP-related error conditions */
pci_write_config_word(agp_bridge->dev, INTEL_I820_ERRSTS, 0x001c);
return 0;
}
static int intel_840_configure(void)
{
u16 temp2;
struct aper_size_info_8 *current_size;
current_size = A_SIZE_8(agp_bridge->current_size);
/* aperture size */
pci_write_config_byte(agp_bridge->dev, INTEL_APSIZE, current_size->size_value);
/* address to map to */
agp_bridge->gart_bus_addr = pci_bus_address(agp_bridge->dev,
AGP_APERTURE_BAR);
/* attbase - aperture base */
pci_write_config_dword(agp_bridge->dev, INTEL_ATTBASE, agp_bridge->gatt_bus_addr);
/* agpctrl */
pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, 0x0000);
/* mcgcfg */
pci_read_config_word(agp_bridge->dev, INTEL_I840_MCHCFG, &temp2);
pci_write_config_word(agp_bridge->dev, INTEL_I840_MCHCFG, temp2 | (1 << 9));
/* clear any possible error conditions */
pci_write_config_word(agp_bridge->dev, INTEL_I840_ERRSTS, 0xc000);
return 0;
}
static int intel_845_configure(void)
{
u8 temp2;
struct aper_size_info_8 *current_size;
current_size = A_SIZE_8(agp_bridge->current_size);
/* aperture size */
pci_write_config_byte(agp_bridge->dev, INTEL_APSIZE, current_size->size_value);
if (agp_bridge->apbase_config != 0) {
pci_write_config_dword(agp_bridge->dev, AGP_APBASE,
agp_bridge->apbase_config);
} else {
/* address to map to */
agp_bridge->gart_bus_addr = pci_bus_address(agp_bridge->dev,
AGP_APERTURE_BAR);
agp_bridge->apbase_config = agp_bridge->gart_bus_addr;
}
/* attbase - aperture base */
pci_write_config_dword(agp_bridge->dev, INTEL_ATTBASE, agp_bridge->gatt_bus_addr);
/* agpctrl */
pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, 0x0000);
/* agpm */
pci_read_config_byte(agp_bridge->dev, INTEL_I845_AGPM, &temp2);
pci_write_config_byte(agp_bridge->dev, INTEL_I845_AGPM, temp2 | (1 << 1));
/* clear any possible error conditions */
pci_write_config_word(agp_bridge->dev, INTEL_I845_ERRSTS, 0x001c);
return 0;
}
static int intel_850_configure(void)
{
u16 temp2;
struct aper_size_info_8 *current_size;
current_size = A_SIZE_8(agp_bridge->current_size);
/* aperture size */
pci_write_config_byte(agp_bridge->dev, INTEL_APSIZE, current_size->size_value);
/* address to map to */
agp_bridge->gart_bus_addr = pci_bus_address(agp_bridge->dev,
AGP_APERTURE_BAR);
/* attbase - aperture base */
pci_write_config_dword(agp_bridge->dev, INTEL_ATTBASE, agp_bridge->gatt_bus_addr);
/* agpctrl */
pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, 0x0000);
/* mcgcfg */
pci_read_config_word(agp_bridge->dev, INTEL_I850_MCHCFG, &temp2);
pci_write_config_word(agp_bridge->dev, INTEL_I850_MCHCFG, temp2 | (1 << 9));
/* clear any possible AGP-related error conditions */
pci_write_config_word(agp_bridge->dev, INTEL_I850_ERRSTS, 0x001c);
return 0;
}
static int intel_860_configure(void)
{
u16 temp2;
struct aper_size_info_8 *current_size;
current_size = A_SIZE_8(agp_bridge->current_size);
/* aperture size */
pci_write_config_byte(agp_bridge->dev, INTEL_APSIZE, current_size->size_value);
/* address to map to */
agp_bridge->gart_bus_addr = pci_bus_address(agp_bridge->dev,
AGP_APERTURE_BAR);
/* attbase - aperture base */
pci_write_config_dword(agp_bridge->dev, INTEL_ATTBASE, agp_bridge->gatt_bus_addr);
/* agpctrl */
pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, 0x0000);
/* mcgcfg */
pci_read_config_word(agp_bridge->dev, INTEL_I860_MCHCFG, &temp2);
pci_write_config_word(agp_bridge->dev, INTEL_I860_MCHCFG, temp2 | (1 << 9));
/* clear any possible AGP-related error conditions */
pci_write_config_word(agp_bridge->dev, INTEL_I860_ERRSTS, 0xf700);
return 0;
}
static int intel_830mp_configure(void)
{
u16 temp2;
struct aper_size_info_8 *current_size;
current_size = A_SIZE_8(agp_bridge->current_size);
/* aperture size */
pci_write_config_byte(agp_bridge->dev, INTEL_APSIZE, current_size->size_value);
/* address to map to */
agp_bridge->gart_bus_addr = pci_bus_address(agp_bridge->dev,
AGP_APERTURE_BAR);
/* attbase - aperture base */
pci_write_config_dword(agp_bridge->dev, INTEL_ATTBASE, agp_bridge->gatt_bus_addr);
/* agpctrl */
pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, 0x0000);
/* gmch */
pci_read_config_word(agp_bridge->dev, INTEL_NBXCFG, &temp2);
pci_write_config_word(agp_bridge->dev, INTEL_NBXCFG, temp2 | (1 << 9));
/* clear any possible AGP-related error conditions */
pci_write_config_word(agp_bridge->dev, INTEL_I830_ERRSTS, 0x1c);
return 0;
}
static int intel_7505_configure(void)
{
u16 temp2;
struct aper_size_info_8 *current_size;
current_size = A_SIZE_8(agp_bridge->current_size);
/* aperture size */
pci_write_config_byte(agp_bridge->dev, INTEL_APSIZE, current_size->size_value);
/* address to map to */
agp_bridge->gart_bus_addr = pci_bus_address(agp_bridge->dev,
AGP_APERTURE_BAR);
/* attbase - aperture base */
pci_write_config_dword(agp_bridge->dev, INTEL_ATTBASE, agp_bridge->gatt_bus_addr);
/* agpctrl */
pci_write_config_dword(agp_bridge->dev, INTEL_AGPCTRL, 0x0000);
/* mchcfg */
pci_read_config_word(agp_bridge->dev, INTEL_I7505_MCHCFG, &temp2);
pci_write_config_word(agp_bridge->dev, INTEL_I7505_MCHCFG, temp2 | (1 << 9));
return 0;
}
/* Setup function */
static const struct gatt_mask intel_generic_masks[] =
{
{.mask = 0x00000017, .type = 0}
};
static const struct aper_size_info_8 intel_815_sizes[2] =
{
{64, 16384, 4, 0},
{32, 8192, 3, 8},
};
static const struct aper_size_info_8 intel_8xx_sizes[7] =
{
{256, 65536, 6, 0},
{128, 32768, 5, 32},
{64, 16384, 4, 48},
{32, 8192, 3, 56},
{16, 4096, 2, 60},
{8, 2048, 1, 62},
{4, 1024, 0, 63}
};
static const struct aper_size_info_16 intel_generic_sizes[7] =
{
{256, 65536, 6, 0},
{128, 32768, 5, 32},
{64, 16384, 4, 48},
{32, 8192, 3, 56},
{16, 4096, 2, 60},
{8, 2048, 1, 62},
{4, 1024, 0, 63}
};
static const struct aper_size_info_8 intel_830mp_sizes[4] =
{
{256, 65536, 6, 0},
{128, 32768, 5, 32},
{64, 16384, 4, 48},
{32, 8192, 3, 56}
};
static const struct agp_bridge_driver intel_generic_driver = {
.owner = THIS_MODULE,
.aperture_sizes = intel_generic_sizes,
.size_type = U16_APER_SIZE,
.num_aperture_sizes = 7,
.needs_scratch_page = true,
.configure = intel_configure,
.fetch_size = intel_fetch_size,
.cleanup = intel_cleanup,
.tlb_flush = intel_tlbflush,
.mask_memory = agp_generic_mask_memory,
.masks = intel_generic_masks,
.agp_enable = agp_generic_enable,
.cache_flush = global_cache_flush,
.create_gatt_table = agp_generic_create_gatt_table,
.free_gatt_table = agp_generic_free_gatt_table,
.insert_memory = agp_generic_insert_memory,
.remove_memory = agp_generic_remove_memory,
.alloc_by_type = agp_generic_alloc_by_type,
.free_by_type = agp_generic_free_by_type,
.agp_alloc_page = agp_generic_alloc_page,
.agp_alloc_pages = agp_generic_alloc_pages,
.agp_destroy_page = agp_generic_destroy_page,
.agp_destroy_pages = agp_generic_destroy_pages,
.agp_type_to_mask_type = agp_generic_type_to_mask_type,
};
static const struct agp_bridge_driver intel_815_driver = {
.owner = THIS_MODULE,
.aperture_sizes = intel_815_sizes,
.size_type = U8_APER_SIZE,
.num_aperture_sizes = 2,
.needs_scratch_page = true,
.configure = intel_815_configure,
.fetch_size = intel_815_fetch_size,
.cleanup = intel_8xx_cleanup,
.tlb_flush = intel_8xx_tlbflush,
.mask_memory = agp_generic_mask_memory,
.masks = intel_generic_masks,
.agp_enable = agp_generic_enable,
.cache_flush = global_cache_flush,
.create_gatt_table = agp_generic_create_gatt_table,
.free_gatt_table = agp_generic_free_gatt_table,
.insert_memory = agp_generic_insert_memory,
.remove_memory = agp_generic_remove_memory,
.alloc_by_type = agp_generic_alloc_by_type,
.free_by_type = agp_generic_free_by_type,
.agp_alloc_page = agp_generic_alloc_page,
.agp_alloc_pages = agp_generic_alloc_pages,
.agp_destroy_page = agp_generic_destroy_page,
.agp_destroy_pages = agp_generic_destroy_pages,
.agp_type_to_mask_type = agp_generic_type_to_mask_type,
};
static const struct agp_bridge_driver intel_820_driver = {
.owner = THIS_MODULE,
.aperture_sizes = intel_8xx_sizes,
.size_type = U8_APER_SIZE,
.num_aperture_sizes = 7,
.needs_scratch_page = true,
.configure = intel_820_configure,
.fetch_size = intel_8xx_fetch_size,
.cleanup = intel_820_cleanup,
.tlb_flush = intel_820_tlbflush,
.mask_memory = agp_generic_mask_memory,
.masks = intel_generic_masks,
.agp_enable = agp_generic_enable,
.cache_flush = global_cache_flush,
.create_gatt_table = agp_generic_create_gatt_table,
.free_gatt_table = agp_generic_free_gatt_table,
.insert_memory = agp_generic_insert_memory,
.remove_memory = agp_generic_remove_memory,
.alloc_by_type = agp_generic_alloc_by_type,
.free_by_type = agp_generic_free_by_type,
.agp_alloc_page = agp_generic_alloc_page,
.agp_alloc_pages = agp_generic_alloc_pages,
.agp_destroy_page = agp_generic_destroy_page,
.agp_destroy_pages = agp_generic_destroy_pages,
.agp_type_to_mask_type = agp_generic_type_to_mask_type,
};
static const struct agp_bridge_driver intel_830mp_driver = {
.owner = THIS_MODULE,
.aperture_sizes = intel_830mp_sizes,
.size_type = U8_APER_SIZE,
.num_aperture_sizes = 4,
.needs_scratch_page = true,
.configure = intel_830mp_configure,
.fetch_size = intel_8xx_fetch_size,
.cleanup = intel_8xx_cleanup,
.tlb_flush = intel_8xx_tlbflush,
.mask_memory = agp_generic_mask_memory,
.masks = intel_generic_masks,
.agp_enable = agp_generic_enable,
.cache_flush = global_cache_flush,
.create_gatt_table = agp_generic_create_gatt_table,
.free_gatt_table = agp_generic_free_gatt_table,
.insert_memory = agp_generic_insert_memory,
.remove_memory = agp_generic_remove_memory,
.alloc_by_type = agp_generic_alloc_by_type,
.free_by_type = agp_generic_free_by_type,
.agp_alloc_page = agp_generic_alloc_page,
.agp_alloc_pages = agp_generic_alloc_pages,
.agp_destroy_page = agp_generic_destroy_page,
.agp_destroy_pages = agp_generic_destroy_pages,
.agp_type_to_mask_type = agp_generic_type_to_mask_type,
};
static const struct agp_bridge_driver intel_840_driver = {
.owner = THIS_MODULE,
.aperture_sizes = intel_8xx_sizes,
.size_type = U8_APER_SIZE,
.num_aperture_sizes = 7,
.needs_scratch_page = true,
.configure = intel_840_configure,
.fetch_size = intel_8xx_fetch_size,
.cleanup = intel_8xx_cleanup,
.tlb_flush = intel_8xx_tlbflush,
.mask_memory = agp_generic_mask_memory,
.masks = intel_generic_masks,
.agp_enable = agp_generic_enable,
.cache_flush = global_cache_flush,
.create_gatt_table = agp_generic_create_gatt_table,
.free_gatt_table = agp_generic_free_gatt_table,
.insert_memory = agp_generic_insert_memory,
.remove_memory = agp_generic_remove_memory,
.alloc_by_type = agp_generic_alloc_by_type,
.free_by_type = agp_generic_free_by_type,
.agp_alloc_page = agp_generic_alloc_page,
.agp_alloc_pages = agp_generic_alloc_pages,
.agp_destroy_page = agp_generic_destroy_page,
.agp_destroy_pages = agp_generic_destroy_pages,
.agp_type_to_mask_type = agp_generic_type_to_mask_type,
};
static const struct agp_bridge_driver intel_845_driver = {
.owner = THIS_MODULE,
.aperture_sizes = intel_8xx_sizes,
.size_type = U8_APER_SIZE,
.num_aperture_sizes = 7,
.needs_scratch_page = true,
.configure = intel_845_configure,
.fetch_size = intel_8xx_fetch_size,
.cleanup = intel_8xx_cleanup,
.tlb_flush = intel_8xx_tlbflush,
.mask_memory = agp_generic_mask_memory,
.masks = intel_generic_masks,
.agp_enable = agp_generic_enable,
.cache_flush = global_cache_flush,
.create_gatt_table = agp_generic_create_gatt_table,
.free_gatt_table = agp_generic_free_gatt_table,
.insert_memory = agp_generic_insert_memory,
.remove_memory = agp_generic_remove_memory,
.alloc_by_type = agp_generic_alloc_by_type,
.free_by_type = agp_generic_free_by_type,
.agp_alloc_page = agp_generic_alloc_page,
.agp_alloc_pages = agp_generic_alloc_pages,
.agp_destroy_page = agp_generic_destroy_page,
.agp_destroy_pages = agp_generic_destroy_pages,
.agp_type_to_mask_type = agp_generic_type_to_mask_type,
};
static const struct agp_bridge_driver intel_850_driver = {
.owner = THIS_MODULE,
.aperture_sizes = intel_8xx_sizes,
.size_type = U8_APER_SIZE,
.num_aperture_sizes = 7,
.needs_scratch_page = true,
.configure = intel_850_configure,
.fetch_size = intel_8xx_fetch_size,
.cleanup = intel_8xx_cleanup,
.tlb_flush = intel_8xx_tlbflush,
.mask_memory = agp_generic_mask_memory,
.masks = intel_generic_masks,
.agp_enable = agp_generic_enable,
.cache_flush = global_cache_flush,
.create_gatt_table = agp_generic_create_gatt_table,
.free_gatt_table = agp_generic_free_gatt_table,
.insert_memory = agp_generic_insert_memory,
.remove_memory = agp_generic_remove_memory,
.alloc_by_type = agp_generic_alloc_by_type,
.free_by_type = agp_generic_free_by_type,
.agp_alloc_page = agp_generic_alloc_page,
.agp_alloc_pages = agp_generic_alloc_pages,
.agp_destroy_page = agp_generic_destroy_page,
.agp_destroy_pages = agp_generic_destroy_pages,
.agp_type_to_mask_type = agp_generic_type_to_mask_type,
};
static const struct agp_bridge_driver intel_860_driver = {
.owner = THIS_MODULE,
.aperture_sizes = intel_8xx_sizes,
.size_type = U8_APER_SIZE,
.num_aperture_sizes = 7,
.needs_scratch_page = true,
.configure = intel_860_configure,
.fetch_size = intel_8xx_fetch_size,
.cleanup = intel_8xx_cleanup,
.tlb_flush = intel_8xx_tlbflush,
.mask_memory = agp_generic_mask_memory,
.masks = intel_generic_masks,
.agp_enable = agp_generic_enable,
.cache_flush = global_cache_flush,
.create_gatt_table = agp_generic_create_gatt_table,
.free_gatt_table = agp_generic_free_gatt_table,
.insert_memory = agp_generic_insert_memory,
.remove_memory = agp_generic_remove_memory,
.alloc_by_type = agp_generic_alloc_by_type,
.free_by_type = agp_generic_free_by_type,
.agp_alloc_page = agp_generic_alloc_page,
.agp_alloc_pages = agp_generic_alloc_pages,
.agp_destroy_page = agp_generic_destroy_page,
.agp_destroy_pages = agp_generic_destroy_pages,
.agp_type_to_mask_type = agp_generic_type_to_mask_type,
};
static const struct agp_bridge_driver intel_7505_driver = {
.owner = THIS_MODULE,
.aperture_sizes = intel_8xx_sizes,
.size_type = U8_APER_SIZE,
.num_aperture_sizes = 7,
.needs_scratch_page = true,
.configure = intel_7505_configure,
.fetch_size = intel_8xx_fetch_size,
.cleanup = intel_8xx_cleanup,
.tlb_flush = intel_8xx_tlbflush,
.mask_memory = agp_generic_mask_memory,
.masks = intel_generic_masks,
.agp_enable = agp_generic_enable,
.cache_flush = global_cache_flush,
.create_gatt_table = agp_generic_create_gatt_table,
.free_gatt_table = agp_generic_free_gatt_table,
.insert_memory = agp_generic_insert_memory,
.remove_memory = agp_generic_remove_memory,
.alloc_by_type = agp_generic_alloc_by_type,
.free_by_type = agp_generic_free_by_type,
.agp_alloc_page = agp_generic_alloc_page,
.agp_alloc_pages = agp_generic_alloc_pages,
.agp_destroy_page = agp_generic_destroy_page,
.agp_destroy_pages = agp_generic_destroy_pages,
.agp_type_to_mask_type = agp_generic_type_to_mask_type,
};
/* Table to describe Intel GMCH and AGP/PCIE GART drivers. At least one of
* driver and gmch_driver must be non-null, and find_gmch will determine
* which one should be used if a gmch_chip_id is present.
*/
static const struct intel_agp_driver_description {
unsigned int chip_id;
char *name;
const struct agp_bridge_driver *driver;
} intel_agp_chipsets[] = {
{ PCI_DEVICE_ID_INTEL_82443LX_0, "440LX", &intel_generic_driver },
{ PCI_DEVICE_ID_INTEL_82443BX_0, "440BX", &intel_generic_driver },
{ PCI_DEVICE_ID_INTEL_82443GX_0, "440GX", &intel_generic_driver },
{ PCI_DEVICE_ID_INTEL_82815_MC, "i815", &intel_815_driver },
{ PCI_DEVICE_ID_INTEL_82820_HB, "i820", &intel_820_driver },
{ PCI_DEVICE_ID_INTEL_82820_UP_HB, "i820", &intel_820_driver },
{ PCI_DEVICE_ID_INTEL_82830_HB, "830M", &intel_830mp_driver },
{ PCI_DEVICE_ID_INTEL_82840_HB, "i840", &intel_840_driver },
{ PCI_DEVICE_ID_INTEL_82845_HB, "i845", &intel_845_driver },
{ PCI_DEVICE_ID_INTEL_82845G_HB, "845G", &intel_845_driver },
{ PCI_DEVICE_ID_INTEL_82850_HB, "i850", &intel_850_driver },
{ PCI_DEVICE_ID_INTEL_82854_HB, "854", &intel_845_driver },
{ PCI_DEVICE_ID_INTEL_82855PM_HB, "855PM", &intel_845_driver },
{ PCI_DEVICE_ID_INTEL_82855GM_HB, "855GM", &intel_845_driver },
{ PCI_DEVICE_ID_INTEL_82860_HB, "i860", &intel_860_driver },
{ PCI_DEVICE_ID_INTEL_82865_HB, "865", &intel_845_driver },
{ PCI_DEVICE_ID_INTEL_82875_HB, "i875", &intel_845_driver },
{ PCI_DEVICE_ID_INTEL_7505_0, "E7505", &intel_7505_driver },
{ PCI_DEVICE_ID_INTEL_7205_0, "E7205", &intel_7505_driver },
{ 0, NULL, NULL }
};
static int agp_intel_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
struct agp_bridge_data *bridge;
u8 cap_ptr = 0;
struct resource *r;
int i, err;
cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP);
bridge = agp_alloc_bridge();
if (!bridge)
return -ENOMEM;
bridge->capndx = cap_ptr;
if (intel_gmch_probe(pdev, NULL, bridge))
goto found_gmch;
for (i = 0; intel_agp_chipsets[i].name != NULL; i++) {
/* In case that multiple models of gfx chip may
stand on same host bridge type, this can be
sure we detect the right IGD. */
if (pdev->device == intel_agp_chipsets[i].chip_id) {
bridge->driver = intel_agp_chipsets[i].driver;
break;
}
}
if (!bridge->driver) {
if (cap_ptr)
dev_warn(&pdev->dev, "unsupported Intel chipset [%04x/%04x]\n",
pdev->vendor, pdev->device);
agp_put_bridge(bridge);
return -ENODEV;
}
bridge->dev = pdev;
bridge->dev_private_data = NULL;
dev_info(&pdev->dev, "Intel %s Chipset\n", intel_agp_chipsets[i].name);
/*
* The following fixes the case where the BIOS has "forgotten" to
* provide an address range for the GART.
* 20030610 - hamish@zot.org
* This happens before pci_enable_device() intentionally;
* calling pci_enable_device() before assigning the resource
* will result in the GART being disabled on machines with such
* BIOSs (the GART ends up with a BAR starting at 0, which
* conflicts a lot of other devices).
*/
r = &pdev->resource[0];
if (!r->start && r->end) {
if (pci_assign_resource(pdev, 0)) {
dev_err(&pdev->dev, "can't assign resource 0\n");
agp_put_bridge(bridge);
return -ENODEV;
}
}
/*
* If the device has not been properly setup, the following will catch
* the problem and should stop the system from crashing.
* 20030610 - hamish@zot.org
*/
if (pci_enable_device(pdev)) {
dev_err(&pdev->dev, "can't enable PCI device\n");
agp_put_bridge(bridge);
return -ENODEV;
}
/* Fill in the mode register */
if (cap_ptr) {
pci_read_config_dword(pdev,
bridge->capndx+PCI_AGP_STATUS,
&bridge->mode);
}
found_gmch:
pci_set_drvdata(pdev, bridge);
err = agp_add_bridge(bridge);
return err;
}
static void agp_intel_remove(struct pci_dev *pdev)
{
struct agp_bridge_data *bridge = pci_get_drvdata(pdev);
agp_remove_bridge(bridge);
intel_gmch_remove();
agp_put_bridge(bridge);
}
#ifdef CONFIG_PM
static int agp_intel_resume(struct pci_dev *pdev)
{
struct agp_bridge_data *bridge = pci_get_drvdata(pdev);
bridge->driver->configure();
return 0;
}
#endif
static struct pci_device_id agp_intel_pci_table[] = {
#define ID(x) \
{ \
.class = (PCI_CLASS_BRIDGE_HOST << 8), \
.class_mask = ~0, \
.vendor = PCI_VENDOR_ID_INTEL, \
.device = x, \
.subvendor = PCI_ANY_ID, \
.subdevice = PCI_ANY_ID, \
}
ID(PCI_DEVICE_ID_INTEL_82441), /* for HAS2 support */
ID(PCI_DEVICE_ID_INTEL_82443LX_0),
ID(PCI_DEVICE_ID_INTEL_82443BX_0),
ID(PCI_DEVICE_ID_INTEL_82443GX_0),
ID(PCI_DEVICE_ID_INTEL_82810_MC1),
ID(PCI_DEVICE_ID_INTEL_82810_MC3),
ID(PCI_DEVICE_ID_INTEL_82810E_MC),
ID(PCI_DEVICE_ID_INTEL_82815_MC),
ID(PCI_DEVICE_ID_INTEL_82820_HB),
ID(PCI_DEVICE_ID_INTEL_82820_UP_HB),
ID(PCI_DEVICE_ID_INTEL_82830_HB),
ID(PCI_DEVICE_ID_INTEL_82840_HB),
ID(PCI_DEVICE_ID_INTEL_82845_HB),
ID(PCI_DEVICE_ID_INTEL_82845G_HB),
ID(PCI_DEVICE_ID_INTEL_82850_HB),
ID(PCI_DEVICE_ID_INTEL_82854_HB),
ID(PCI_DEVICE_ID_INTEL_82855PM_HB),
ID(PCI_DEVICE_ID_INTEL_82855GM_HB),
ID(PCI_DEVICE_ID_INTEL_82860_HB),
ID(PCI_DEVICE_ID_INTEL_82865_HB),
ID(PCI_DEVICE_ID_INTEL_82875_HB),
ID(PCI_DEVICE_ID_INTEL_7505_0),
ID(PCI_DEVICE_ID_INTEL_7205_0),
ID(PCI_DEVICE_ID_INTEL_E7221_HB),
ID(PCI_DEVICE_ID_INTEL_82915G_HB),
ID(PCI_DEVICE_ID_INTEL_82915GM_HB),
ID(PCI_DEVICE_ID_INTEL_82945G_HB),
ID(PCI_DEVICE_ID_INTEL_82945GM_HB),
ID(PCI_DEVICE_ID_INTEL_82945GME_HB),
ID(PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB),
ID(PCI_DEVICE_ID_INTEL_PINEVIEW_HB),
ID(PCI_DEVICE_ID_INTEL_82946GZ_HB),
ID(PCI_DEVICE_ID_INTEL_82G35_HB),
ID(PCI_DEVICE_ID_INTEL_82965Q_HB),
ID(PCI_DEVICE_ID_INTEL_82965G_HB),
ID(PCI_DEVICE_ID_INTEL_82965GM_HB),
ID(PCI_DEVICE_ID_INTEL_82965GME_HB),
ID(PCI_DEVICE_ID_INTEL_G33_HB),
ID(PCI_DEVICE_ID_INTEL_Q35_HB),
ID(PCI_DEVICE_ID_INTEL_Q33_HB),
ID(PCI_DEVICE_ID_INTEL_GM45_HB),
ID(PCI_DEVICE_ID_INTEL_EAGLELAKE_HB),
ID(PCI_DEVICE_ID_INTEL_Q45_HB),
ID(PCI_DEVICE_ID_INTEL_G45_HB),
ID(PCI_DEVICE_ID_INTEL_G41_HB),
ID(PCI_DEVICE_ID_INTEL_B43_HB),
ID(PCI_DEVICE_ID_INTEL_B43_1_HB),
ID(PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB),
ID(PCI_DEVICE_ID_INTEL_IRONLAKE_D2_HB),
ID(PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB),
ID(PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB),
ID(PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB),
{ }
};
MODULE_DEVICE_TABLE(pci, agp_intel_pci_table);
static struct pci_driver agp_intel_pci_driver = {
.name = "agpgart-intel",
.id_table = agp_intel_pci_table,
.probe = agp_intel_probe,
.remove = agp_intel_remove,
#ifdef CONFIG_PM
.resume = agp_intel_resume,
#endif
};
static int __init agp_intel_init(void)
{
if (agp_off)
return -EINVAL;
return pci_register_driver(&agp_intel_pci_driver);
}
static void __exit agp_intel_cleanup(void)
{
pci_unregister_driver(&agp_intel_pci_driver);
}
module_init(agp_intel_init);
module_exit(agp_intel_cleanup);
MODULE_AUTHOR("Dave Jones <davej@redhat.com>");
MODULE_LICENSE("GPL and additional rights");

View file

@ -0,0 +1,193 @@
/*
* Common Intel AGPGART and GTT definitions.
*/
#ifndef _INTEL_AGP_H
#define _INTEL_AGP_H
/* Intel registers */
#define INTEL_APSIZE 0xb4
#define INTEL_ATTBASE 0xb8
#define INTEL_AGPCTRL 0xb0
#define INTEL_NBXCFG 0x50
#define INTEL_ERRSTS 0x91
/* Intel i830 registers */
#define I830_GMCH_CTRL 0x52
#define I830_GMCH_ENABLED 0x4
#define I830_GMCH_MEM_MASK 0x1
#define I830_GMCH_MEM_64M 0x1
#define I830_GMCH_MEM_128M 0
#define I830_GMCH_GMS_MASK 0x70
#define I830_GMCH_GMS_DISABLED 0x00
#define I830_GMCH_GMS_LOCAL 0x10
#define I830_GMCH_GMS_STOLEN_512 0x20
#define I830_GMCH_GMS_STOLEN_1024 0x30
#define I830_GMCH_GMS_STOLEN_8192 0x40
#define I830_RDRAM_CHANNEL_TYPE 0x03010
#define I830_RDRAM_ND(x) (((x) & 0x20) >> 5)
#define I830_RDRAM_DDT(x) (((x) & 0x18) >> 3)
/* This one is for I830MP w. an external graphic card */
#define INTEL_I830_ERRSTS 0x92
/* Intel 855GM/852GM registers */
#define I855_GMCH_GMS_MASK 0xF0
#define I855_GMCH_GMS_STOLEN_0M 0x0
#define I855_GMCH_GMS_STOLEN_1M (0x1 << 4)
#define I855_GMCH_GMS_STOLEN_4M (0x2 << 4)
#define I855_GMCH_GMS_STOLEN_8M (0x3 << 4)
#define I855_GMCH_GMS_STOLEN_16M (0x4 << 4)
#define I855_GMCH_GMS_STOLEN_32M (0x5 << 4)
#define I85X_CAPID 0x44
#define I85X_VARIANT_MASK 0x7
#define I85X_VARIANT_SHIFT 5
#define I855_GME 0x0
#define I855_GM 0x4
#define I852_GME 0x2
#define I852_GM 0x5
/* Intel i845 registers */
#define INTEL_I845_AGPM 0x51
#define INTEL_I845_ERRSTS 0xc8
/* Intel i860 registers */
#define INTEL_I860_MCHCFG 0x50
#define INTEL_I860_ERRSTS 0xc8
/* Intel i810 registers */
#define I810_GMADR_BAR 0
#define I810_MMADR_BAR 1
#define I810_PTE_BASE 0x10000
#define I810_PTE_MAIN_UNCACHED 0x00000000
#define I810_PTE_LOCAL 0x00000002
#define I810_PTE_VALID 0x00000001
#define I830_PTE_SYSTEM_CACHED 0x00000006
#define I810_SMRAM_MISCC 0x70
#define I810_GFX_MEM_WIN_SIZE 0x00010000
#define I810_GFX_MEM_WIN_32M 0x00010000
#define I810_GMS 0x000000c0
#define I810_GMS_DISABLE 0x00000000
#define I810_PGETBL_CTL 0x2020
#define I810_PGETBL_ENABLED 0x00000001
/* Note: PGETBL_CTL2 has a different offset on G33. */
#define I965_PGETBL_CTL2 0x20c4
#define I965_PGETBL_SIZE_MASK 0x0000000e
#define I965_PGETBL_SIZE_512KB (0 << 1)
#define I965_PGETBL_SIZE_256KB (1 << 1)
#define I965_PGETBL_SIZE_128KB (2 << 1)
#define I965_PGETBL_SIZE_1MB (3 << 1)
#define I965_PGETBL_SIZE_2MB (4 << 1)
#define I965_PGETBL_SIZE_1_5MB (5 << 1)
#define G33_GMCH_SIZE_MASK (3 << 8)
#define G33_GMCH_SIZE_1M (1 << 8)
#define G33_GMCH_SIZE_2M (2 << 8)
#define G4x_GMCH_SIZE_MASK (0xf << 8)
#define G4x_GMCH_SIZE_1M (0x1 << 8)
#define G4x_GMCH_SIZE_2M (0x3 << 8)
#define G4x_GMCH_SIZE_VT_EN (0x8 << 8)
#define G4x_GMCH_SIZE_VT_1M (G4x_GMCH_SIZE_1M | G4x_GMCH_SIZE_VT_EN)
#define G4x_GMCH_SIZE_VT_1_5M ((0x2 << 8) | G4x_GMCH_SIZE_VT_EN)
#define G4x_GMCH_SIZE_VT_2M (G4x_GMCH_SIZE_2M | G4x_GMCH_SIZE_VT_EN)
#define GFX_FLSH_CNTL 0x2170 /* 915+ */
#define I810_DRAM_CTL 0x3000
#define I810_DRAM_ROW_0 0x00000001
#define I810_DRAM_ROW_0_SDRAM 0x00000001
/* Intel 815 register */
#define INTEL_815_APCONT 0x51
#define INTEL_815_ATTBASE_MASK ~0x1FFFFFFF
/* Intel i820 registers */
#define INTEL_I820_RDCR 0x51
#define INTEL_I820_ERRSTS 0xc8
/* Intel i840 registers */
#define INTEL_I840_MCHCFG 0x50
#define INTEL_I840_ERRSTS 0xc8
/* Intel i850 registers */
#define INTEL_I850_MCHCFG 0x50
#define INTEL_I850_ERRSTS 0xc8
/* intel 915G registers */
#define I915_GMADR_BAR 2
#define I915_MMADR_BAR 0
#define I915_PTE_BAR 3
#define I915_GMCH_GMS_STOLEN_48M (0x6 << 4)
#define I915_GMCH_GMS_STOLEN_64M (0x7 << 4)
#define G33_GMCH_GMS_STOLEN_128M (0x8 << 4)
#define G33_GMCH_GMS_STOLEN_256M (0x9 << 4)
#define INTEL_GMCH_GMS_STOLEN_96M (0xa << 4)
#define INTEL_GMCH_GMS_STOLEN_160M (0xb << 4)
#define INTEL_GMCH_GMS_STOLEN_224M (0xc << 4)
#define INTEL_GMCH_GMS_STOLEN_352M (0xd << 4)
#define I915_IFPADDR 0x60
#define I830_HIC 0x70
/* Intel 965G registers */
#define I965_MSAC 0x62
#define I965_IFPADDR 0x70
/* Intel 7505 registers */
#define INTEL_I7505_APSIZE 0x74
#define INTEL_I7505_NCAPID 0x60
#define INTEL_I7505_NISTAT 0x6c
#define INTEL_I7505_ATTBASE 0x78
#define INTEL_I7505_ERRSTS 0x42
#define INTEL_I7505_AGPCTRL 0x70
#define INTEL_I7505_MCHCFG 0x50
/* pci devices ids */
#define PCI_DEVICE_ID_INTEL_E7221_HB 0x2588
#define PCI_DEVICE_ID_INTEL_E7221_IG 0x258a
#define PCI_DEVICE_ID_INTEL_82946GZ_HB 0x2970
#define PCI_DEVICE_ID_INTEL_82946GZ_IG 0x2972
#define PCI_DEVICE_ID_INTEL_82G35_HB 0x2980
#define PCI_DEVICE_ID_INTEL_82G35_IG 0x2982
#define PCI_DEVICE_ID_INTEL_82965Q_HB 0x2990
#define PCI_DEVICE_ID_INTEL_82965Q_IG 0x2992
#define PCI_DEVICE_ID_INTEL_82965G_HB 0x29A0
#define PCI_DEVICE_ID_INTEL_82965G_IG 0x29A2
#define PCI_DEVICE_ID_INTEL_82965GM_HB 0x2A00
#define PCI_DEVICE_ID_INTEL_82965GM_IG 0x2A02
#define PCI_DEVICE_ID_INTEL_82965GME_HB 0x2A10
#define PCI_DEVICE_ID_INTEL_82965GME_IG 0x2A12
#define PCI_DEVICE_ID_INTEL_82945GME_HB 0x27AC
#define PCI_DEVICE_ID_INTEL_82945GME_IG 0x27AE
#define PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB 0xA010
#define PCI_DEVICE_ID_INTEL_PINEVIEW_M_IG 0xA011
#define PCI_DEVICE_ID_INTEL_PINEVIEW_HB 0xA000
#define PCI_DEVICE_ID_INTEL_PINEVIEW_IG 0xA001
#define PCI_DEVICE_ID_INTEL_G33_HB 0x29C0
#define PCI_DEVICE_ID_INTEL_G33_IG 0x29C2
#define PCI_DEVICE_ID_INTEL_Q35_HB 0x29B0
#define PCI_DEVICE_ID_INTEL_Q35_IG 0x29B2
#define PCI_DEVICE_ID_INTEL_Q33_HB 0x29D0
#define PCI_DEVICE_ID_INTEL_Q33_IG 0x29D2
#define PCI_DEVICE_ID_INTEL_B43_HB 0x2E40
#define PCI_DEVICE_ID_INTEL_B43_IG 0x2E42
#define PCI_DEVICE_ID_INTEL_B43_1_HB 0x2E90
#define PCI_DEVICE_ID_INTEL_B43_1_IG 0x2E92
#define PCI_DEVICE_ID_INTEL_GM45_HB 0x2A40
#define PCI_DEVICE_ID_INTEL_GM45_IG 0x2A42
#define PCI_DEVICE_ID_INTEL_EAGLELAKE_HB 0x2E00
#define PCI_DEVICE_ID_INTEL_EAGLELAKE_IG 0x2E02
#define PCI_DEVICE_ID_INTEL_Q45_HB 0x2E10
#define PCI_DEVICE_ID_INTEL_Q45_IG 0x2E12
#define PCI_DEVICE_ID_INTEL_G45_HB 0x2E20
#define PCI_DEVICE_ID_INTEL_G45_IG 0x2E22
#define PCI_DEVICE_ID_INTEL_G41_HB 0x2E30
#define PCI_DEVICE_ID_INTEL_G41_IG 0x2E32
#define PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB 0x0040
#define PCI_DEVICE_ID_INTEL_IRONLAKE_D2_HB 0x0069
#define PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG 0x0042
#define PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB 0x0044
#define PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB 0x0062
#define PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB 0x006a
#define PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG 0x0046
#endif

1446
drivers/char/agp/intel-gtt.c Normal file

File diff suppressed because it is too large Load diff

470
drivers/char/agp/isoch.c Normal file
View file

@ -0,0 +1,470 @@
/*
* Setup routines for AGP 3.5 compliant bridges.
*/
#include <linux/list.h>
#include <linux/pci.h>
#include <linux/agp_backend.h>
#include <linux/module.h>
#include <linux/slab.h>
#include "agp.h"
/* Generic AGP 3.5 enabling routines */
struct agp_3_5_dev {
struct list_head list;
u8 capndx;
u32 maxbw;
struct pci_dev *dev;
};
static void agp_3_5_dev_list_insert(struct list_head *head, struct list_head *new)
{
struct agp_3_5_dev *cur, *n = list_entry(new, struct agp_3_5_dev, list);
struct list_head *pos;
list_for_each(pos, head) {
cur = list_entry(pos, struct agp_3_5_dev, list);
if (cur->maxbw > n->maxbw)
break;
}
list_add_tail(new, pos);
}
static void agp_3_5_dev_list_sort(struct agp_3_5_dev *list, unsigned int ndevs)
{
struct agp_3_5_dev *cur;
struct pci_dev *dev;
struct list_head *pos, *tmp, *head = &list->list, *start = head->next;
u32 nistat;
INIT_LIST_HEAD(head);
for (pos=start; pos!=head; ) {
cur = list_entry(pos, struct agp_3_5_dev, list);
dev = cur->dev;
pci_read_config_dword(dev, cur->capndx+AGPNISTAT, &nistat);
cur->maxbw = (nistat >> 16) & 0xff;
tmp = pos;
pos = pos->next;
agp_3_5_dev_list_insert(head, tmp);
}
}
/*
* Initialize all isochronous transfer parameters for an AGP 3.0
* node (i.e. a host bridge in combination with the adapters
* lying behind it...)
*/
static int agp_3_5_isochronous_node_enable(struct agp_bridge_data *bridge,
struct agp_3_5_dev *dev_list, unsigned int ndevs)
{
/*
* Convenience structure to make the calculations clearer
* here. The field names come straight from the AGP 3.0 spec.
*/
struct isoch_data {
u32 maxbw;
u32 n;
u32 y;
u32 l;
u32 rq;
struct agp_3_5_dev *dev;
};
struct pci_dev *td = bridge->dev, *dev;
struct list_head *head = &dev_list->list, *pos;
struct agp_3_5_dev *cur;
struct isoch_data *master, target;
unsigned int cdev = 0;
u32 mnistat, tnistat, tstatus, mcmd;
u16 tnicmd, mnicmd;
u8 mcapndx;
u32 tot_bw = 0, tot_n = 0, tot_rq = 0, y_max, rq_isoch, rq_async;
u32 step, rem, rem_isoch, rem_async;
int ret = 0;
/*
* We'll work with an array of isoch_data's (one for each
* device in dev_list) throughout this function.
*/
if ((master = kmalloc(ndevs * sizeof(*master), GFP_KERNEL)) == NULL) {
ret = -ENOMEM;
goto get_out;
}
/*
* Sort the device list by maxbw. We need to do this because the
* spec suggests that the devices with the smallest requirements
* have their resources allocated first, with all remaining resources
* falling to the device with the largest requirement.
*
* We don't exactly do this, we divide target resources by ndevs
* and split them amongst the AGP 3.0 devices. The remainder of such
* division operations are dropped on the last device, sort of like
* the spec mentions it should be done.
*
* We can't do this sort when we initially construct the dev_list
* because we don't know until this function whether isochronous
* transfers are enabled and consequently whether maxbw will mean
* anything.
*/
agp_3_5_dev_list_sort(dev_list, ndevs);
pci_read_config_dword(td, bridge->capndx+AGPNISTAT, &tnistat);
pci_read_config_dword(td, bridge->capndx+AGPSTAT, &tstatus);
/* Extract power-on defaults from the target */
target.maxbw = (tnistat >> 16) & 0xff;
target.n = (tnistat >> 8) & 0xff;
target.y = (tnistat >> 6) & 0x3;
target.l = (tnistat >> 3) & 0x7;
target.rq = (tstatus >> 24) & 0xff;
y_max = target.y;
/*
* Extract power-on defaults for each device in dev_list. Along
* the way, calculate the total isochronous bandwidth required
* by these devices and the largest requested payload size.
*/
list_for_each(pos, head) {
cur = list_entry(pos, struct agp_3_5_dev, list);
dev = cur->dev;
mcapndx = cur->capndx;
pci_read_config_dword(dev, cur->capndx+AGPNISTAT, &mnistat);
master[cdev].maxbw = (mnistat >> 16) & 0xff;
master[cdev].n = (mnistat >> 8) & 0xff;
master[cdev].y = (mnistat >> 6) & 0x3;
master[cdev].dev = cur;
tot_bw += master[cdev].maxbw;
y_max = max(y_max, master[cdev].y);
cdev++;
}
/* Check if this configuration has any chance of working */
if (tot_bw > target.maxbw) {
dev_err(&td->dev, "isochronous bandwidth required "
"by AGP 3.0 devices exceeds that which is supported by "
"the AGP 3.0 bridge!\n");
ret = -ENODEV;
goto free_and_exit;
}
target.y = y_max;
/*
* Write the calculated payload size into the target's NICMD
* register. Doing this directly effects the ISOCH_N value
* in the target's NISTAT register, so we need to do this now
* to get an accurate value for ISOCH_N later.
*/
pci_read_config_word(td, bridge->capndx+AGPNICMD, &tnicmd);
tnicmd &= ~(0x3 << 6);
tnicmd |= target.y << 6;
pci_write_config_word(td, bridge->capndx+AGPNICMD, tnicmd);
/* Reread the target's ISOCH_N */
pci_read_config_dword(td, bridge->capndx+AGPNISTAT, &tnistat);
target.n = (tnistat >> 8) & 0xff;
/* Calculate the minimum ISOCH_N needed by each master */
for (cdev=0; cdev<ndevs; cdev++) {
master[cdev].y = target.y;
master[cdev].n = master[cdev].maxbw / (master[cdev].y + 1);
tot_n += master[cdev].n;
}
/* Exit if the minimal ISOCH_N allocation among the masters is more
* than the target can handle. */
if (tot_n > target.n) {
dev_err(&td->dev, "number of isochronous "
"transactions per period required by AGP 3.0 devices "
"exceeds that which is supported by the AGP 3.0 "
"bridge!\n");
ret = -ENODEV;
goto free_and_exit;
}
/* Calculate left over ISOCH_N capability in the target. We'll give
* this to the hungriest device (as per the spec) */
rem = target.n - tot_n;
/*
* Calculate the minimum isochronous RQ depth needed by each master.
* Along the way, distribute the extra ISOCH_N capability calculated
* above.
*/
for (cdev=0; cdev<ndevs; cdev++) {
/*
* This is a little subtle. If ISOCH_Y > 64B, then ISOCH_Y
* byte isochronous writes will be broken into 64B pieces.
* This means we need to budget more RQ depth to account for
* these kind of writes (each isochronous write is actually
* many writes on the AGP bus).
*/
master[cdev].rq = master[cdev].n;
if (master[cdev].y > 0x1)
master[cdev].rq *= (1 << (master[cdev].y - 1));
tot_rq += master[cdev].rq;
}
master[ndevs-1].n += rem;
/* Figure the number of isochronous and asynchronous RQ slots the
* target is providing. */
rq_isoch = (target.y > 0x1) ? target.n * (1 << (target.y - 1)) : target.n;
rq_async = target.rq - rq_isoch;
/* Exit if the minimal RQ needs of the masters exceeds what the target
* can provide. */
if (tot_rq > rq_isoch) {
dev_err(&td->dev, "number of request queue slots "
"required by the isochronous bandwidth requested by "
"AGP 3.0 devices exceeds the number provided by the "
"AGP 3.0 bridge!\n");
ret = -ENODEV;
goto free_and_exit;
}
/* Calculate asynchronous RQ capability in the target (per master) as
* well as the total number of leftover isochronous RQ slots. */
step = rq_async / ndevs;
rem_async = step + (rq_async % ndevs);
rem_isoch = rq_isoch - tot_rq;
/* Distribute the extra RQ slots calculated above and write our
* isochronous settings out to the actual devices. */
for (cdev=0; cdev<ndevs; cdev++) {
cur = master[cdev].dev;
dev = cur->dev;
mcapndx = cur->capndx;
master[cdev].rq += (cdev == ndevs - 1)
? (rem_async + rem_isoch) : step;
pci_read_config_word(dev, cur->capndx+AGPNICMD, &mnicmd);
pci_read_config_dword(dev, cur->capndx+AGPCMD, &mcmd);
mnicmd &= ~(0xff << 8);
mnicmd &= ~(0x3 << 6);
mcmd &= ~(0xff << 24);
mnicmd |= master[cdev].n << 8;
mnicmd |= master[cdev].y << 6;
mcmd |= master[cdev].rq << 24;
pci_write_config_dword(dev, cur->capndx+AGPCMD, mcmd);
pci_write_config_word(dev, cur->capndx+AGPNICMD, mnicmd);
}
free_and_exit:
kfree(master);
get_out:
return ret;
}
/*
* This function basically allocates request queue slots among the
* AGP 3.0 systems in nonisochronous nodes. The algorithm is
* pretty stupid, divide the total number of RQ slots provided by the
* target by ndevs. Distribute this many slots to each AGP 3.0 device,
* giving any left over slots to the last device in dev_list.
*/
static void agp_3_5_nonisochronous_node_enable(struct agp_bridge_data *bridge,
struct agp_3_5_dev *dev_list, unsigned int ndevs)
{
struct agp_3_5_dev *cur;
struct list_head *head = &dev_list->list, *pos;
u32 tstatus, mcmd;
u32 trq, mrq, rem;
unsigned int cdev = 0;
pci_read_config_dword(bridge->dev, bridge->capndx+AGPSTAT, &tstatus);
trq = (tstatus >> 24) & 0xff;
mrq = trq / ndevs;
rem = mrq + (trq % ndevs);
for (pos=head->next; cdev<ndevs; cdev++, pos=pos->next) {
cur = list_entry(pos, struct agp_3_5_dev, list);
pci_read_config_dword(cur->dev, cur->capndx+AGPCMD, &mcmd);
mcmd &= ~(0xff << 24);
mcmd |= ((cdev == ndevs - 1) ? rem : mrq) << 24;
pci_write_config_dword(cur->dev, cur->capndx+AGPCMD, mcmd);
}
}
/*
* Fully configure and enable an AGP 3.0 host bridge and all the devices
* lying behind it.
*/
int agp_3_5_enable(struct agp_bridge_data *bridge)
{
struct pci_dev *td = bridge->dev, *dev = NULL;
u8 mcapndx;
u32 isoch, arqsz;
u32 tstatus, mstatus, ncapid;
u32 mmajor;
u16 mpstat;
struct agp_3_5_dev *dev_list, *cur;
struct list_head *head, *pos;
unsigned int ndevs = 0;
int ret = 0;
/* Extract some power-on defaults from the target */
pci_read_config_dword(td, bridge->capndx+AGPSTAT, &tstatus);
isoch = (tstatus >> 17) & 0x1;
if (isoch == 0) /* isoch xfers not available, bail out. */
return -ENODEV;
arqsz = (tstatus >> 13) & 0x7;
/*
* Allocate a head for our AGP 3.5 device list
* (multiple AGP v3 devices are allowed behind a single bridge).
*/
if ((dev_list = kmalloc(sizeof(*dev_list), GFP_KERNEL)) == NULL) {
ret = -ENOMEM;
goto get_out;
}
head = &dev_list->list;
INIT_LIST_HEAD(head);
/* Find all AGP devices, and add them to dev_list. */
for_each_pci_dev(dev) {
mcapndx = pci_find_capability(dev, PCI_CAP_ID_AGP);
if (mcapndx == 0)
continue;
switch ((dev->class >>8) & 0xff00) {
case 0x0600: /* Bridge */
/* Skip bridges. We should call this function for each one. */
continue;
case 0x0001: /* Unclassified device */
/* Don't know what this is, but log it for investigation. */
if (mcapndx != 0) {
dev_info(&td->dev, "wacky, found unclassified AGP device %s [%04x/%04x]\n",
pci_name(dev),
dev->vendor, dev->device);
}
continue;
case 0x0300: /* Display controller */
case 0x0400: /* Multimedia controller */
if ((cur = kmalloc(sizeof(*cur), GFP_KERNEL)) == NULL) {
ret = -ENOMEM;
goto free_and_exit;
}
cur->dev = dev;
pos = &cur->list;
list_add(pos, head);
ndevs++;
continue;
default:
continue;
}
}
/*
* Take an initial pass through the devices lying behind our host
* bridge. Make sure each one is actually an AGP 3.0 device, otherwise
* exit with an error message. Along the way store the AGP 3.0
* cap_ptr for each device
*/
list_for_each(pos, head) {
cur = list_entry(pos, struct agp_3_5_dev, list);
dev = cur->dev;
pci_read_config_word(dev, PCI_STATUS, &mpstat);
if ((mpstat & PCI_STATUS_CAP_LIST) == 0)
continue;
pci_read_config_byte(dev, PCI_CAPABILITY_LIST, &mcapndx);
if (mcapndx != 0) {
do {
pci_read_config_dword(dev, mcapndx, &ncapid);
if ((ncapid & 0xff) != 2)
mcapndx = (ncapid >> 8) & 0xff;
}
while (((ncapid & 0xff) != 2) && (mcapndx != 0));
}
if (mcapndx == 0) {
dev_err(&td->dev, "woah! Non-AGP device %s on "
"secondary bus of AGP 3.5 bridge!\n",
pci_name(dev));
ret = -ENODEV;
goto free_and_exit;
}
mmajor = (ncapid >> AGP_MAJOR_VERSION_SHIFT) & 0xf;
if (mmajor < 3) {
dev_err(&td->dev, "woah! AGP 2.0 device %s on "
"secondary bus of AGP 3.5 bridge operating "
"with AGP 3.0 electricals!\n", pci_name(dev));
ret = -ENODEV;
goto free_and_exit;
}
cur->capndx = mcapndx;
pci_read_config_dword(dev, cur->capndx+AGPSTAT, &mstatus);
if (((mstatus >> 3) & 0x1) == 0) {
dev_err(&td->dev, "woah! AGP 3.x device %s not "
"operating in AGP 3.x mode on secondary bus "
"of AGP 3.5 bridge operating with AGP 3.0 "
"electricals!\n", pci_name(dev));
ret = -ENODEV;
goto free_and_exit;
}
}
/*
* Call functions to divide target resources amongst the AGP 3.0
* masters. This process is dramatically different depending on
* whether isochronous transfers are supported.
*/
if (isoch) {
ret = agp_3_5_isochronous_node_enable(bridge, dev_list, ndevs);
if (ret) {
dev_info(&td->dev, "something bad happened setting "
"up isochronous xfers; falling back to "
"non-isochronous xfer mode\n");
} else {
goto free_and_exit;
}
}
agp_3_5_nonisochronous_node_enable(bridge, dev_list, ndevs);
free_and_exit:
/* Be sure to free the dev_list */
for (pos=head->next; pos!=head; ) {
cur = list_entry(pos, struct agp_3_5_dev, list);
pos = pos->next;
kfree(cur);
}
kfree(dev_list);
get_out:
return ret;
}

View file

@ -0,0 +1,476 @@
/*
* Nvidia AGPGART routines.
* Based upon a 2.4 agpgart diff by the folks from NVIDIA, and hacked up
* to work in 2.5 by Dave Jones <davej@redhat.com>
*/
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/agp_backend.h>
#include <linux/page-flags.h>
#include <linux/mm.h>
#include <linux/jiffies.h>
#include "agp.h"
/* NVIDIA registers */
#define NVIDIA_0_APSIZE 0x80
#define NVIDIA_1_WBC 0xf0
#define NVIDIA_2_GARTCTRL 0xd0
#define NVIDIA_2_APBASE 0xd8
#define NVIDIA_2_APLIMIT 0xdc
#define NVIDIA_2_ATTBASE(i) (0xe0 + (i) * 4)
#define NVIDIA_3_APBASE 0x50
#define NVIDIA_3_APLIMIT 0x54
static struct _nvidia_private {
struct pci_dev *dev_1;
struct pci_dev *dev_2;
struct pci_dev *dev_3;
volatile u32 __iomem *aperture;
int num_active_entries;
off_t pg_offset;
u32 wbc_mask;
} nvidia_private;
static int nvidia_fetch_size(void)
{
int i;
u8 size_value;
struct aper_size_info_8 *values;
pci_read_config_byte(agp_bridge->dev, NVIDIA_0_APSIZE, &size_value);
size_value &= 0x0f;
values = A_SIZE_8(agp_bridge->driver->aperture_sizes);
for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) {
if (size_value == values[i].size_value) {
agp_bridge->previous_size =
agp_bridge->current_size = (void *) (values + i);
agp_bridge->aperture_size_idx = i;
return values[i].size;
}
}
return 0;
}
#define SYSCFG 0xC0010010
#define IORR_BASE0 0xC0010016
#define IORR_MASK0 0xC0010017
#define AMD_K7_NUM_IORR 2
static int nvidia_init_iorr(u32 base, u32 size)
{
u32 base_hi, base_lo;
u32 mask_hi, mask_lo;
u32 sys_hi, sys_lo;
u32 iorr_addr, free_iorr_addr;
/* Find the iorr that is already used for the base */
/* If not found, determine the uppermost available iorr */
free_iorr_addr = AMD_K7_NUM_IORR;
for (iorr_addr = 0; iorr_addr < AMD_K7_NUM_IORR; iorr_addr++) {
rdmsr(IORR_BASE0 + 2 * iorr_addr, base_lo, base_hi);
rdmsr(IORR_MASK0 + 2 * iorr_addr, mask_lo, mask_hi);
if ((base_lo & 0xfffff000) == (base & 0xfffff000))
break;
if ((mask_lo & 0x00000800) == 0)
free_iorr_addr = iorr_addr;
}
if (iorr_addr >= AMD_K7_NUM_IORR) {
iorr_addr = free_iorr_addr;
if (iorr_addr >= AMD_K7_NUM_IORR)
return -EINVAL;
}
base_hi = 0x0;
base_lo = (base & ~0xfff) | 0x18;
mask_hi = 0xf;
mask_lo = ((~(size - 1)) & 0xfffff000) | 0x800;
wrmsr(IORR_BASE0 + 2 * iorr_addr, base_lo, base_hi);
wrmsr(IORR_MASK0 + 2 * iorr_addr, mask_lo, mask_hi);
rdmsr(SYSCFG, sys_lo, sys_hi);
sys_lo |= 0x00100000;
wrmsr(SYSCFG, sys_lo, sys_hi);
return 0;
}
static int nvidia_configure(void)
{
int i, rc, num_dirs;
u32 apbase, aplimit;
phys_addr_t apbase_phys;
struct aper_size_info_8 *current_size;
u32 temp;
current_size = A_SIZE_8(agp_bridge->current_size);
/* aperture size */
pci_write_config_byte(agp_bridge->dev, NVIDIA_0_APSIZE,
current_size->size_value);
/* address to map to */
apbase = pci_bus_address(agp_bridge->dev, AGP_APERTURE_BAR);
agp_bridge->gart_bus_addr = apbase;
aplimit = apbase + (current_size->size * 1024 * 1024) - 1;
pci_write_config_dword(nvidia_private.dev_2, NVIDIA_2_APBASE, apbase);
pci_write_config_dword(nvidia_private.dev_2, NVIDIA_2_APLIMIT, aplimit);
pci_write_config_dword(nvidia_private.dev_3, NVIDIA_3_APBASE, apbase);
pci_write_config_dword(nvidia_private.dev_3, NVIDIA_3_APLIMIT, aplimit);
if (0 != (rc = nvidia_init_iorr(apbase, current_size->size * 1024 * 1024)))
return rc;
/* directory size is 64k */
num_dirs = current_size->size / 64;
nvidia_private.num_active_entries = current_size->num_entries;
nvidia_private.pg_offset = 0;
if (num_dirs == 0) {
num_dirs = 1;
nvidia_private.num_active_entries /= (64 / current_size->size);
nvidia_private.pg_offset = (apbase & (64 * 1024 * 1024 - 1) &
~(current_size->size * 1024 * 1024 - 1)) / PAGE_SIZE;
}
/* attbase */
for (i = 0; i < 8; i++) {
pci_write_config_dword(nvidia_private.dev_2, NVIDIA_2_ATTBASE(i),
(agp_bridge->gatt_bus_addr + (i % num_dirs) * 64 * 1024) | 1);
}
/* gtlb control */
pci_read_config_dword(nvidia_private.dev_2, NVIDIA_2_GARTCTRL, &temp);
pci_write_config_dword(nvidia_private.dev_2, NVIDIA_2_GARTCTRL, temp | 0x11);
/* gart control */
pci_read_config_dword(agp_bridge->dev, NVIDIA_0_APSIZE, &temp);
pci_write_config_dword(agp_bridge->dev, NVIDIA_0_APSIZE, temp | 0x100);
/* map aperture */
apbase_phys = pci_resource_start(agp_bridge->dev, AGP_APERTURE_BAR);
nvidia_private.aperture =
(volatile u32 __iomem *) ioremap(apbase_phys, 33 * PAGE_SIZE);
if (!nvidia_private.aperture)
return -ENOMEM;
return 0;
}
static void nvidia_cleanup(void)
{
struct aper_size_info_8 *previous_size;
u32 temp;
/* gart control */
pci_read_config_dword(agp_bridge->dev, NVIDIA_0_APSIZE, &temp);
pci_write_config_dword(agp_bridge->dev, NVIDIA_0_APSIZE, temp & ~(0x100));
/* gtlb control */
pci_read_config_dword(nvidia_private.dev_2, NVIDIA_2_GARTCTRL, &temp);
pci_write_config_dword(nvidia_private.dev_2, NVIDIA_2_GARTCTRL, temp & ~(0x11));
/* unmap aperture */
iounmap((void __iomem *) nvidia_private.aperture);
/* restore previous aperture size */
previous_size = A_SIZE_8(agp_bridge->previous_size);
pci_write_config_byte(agp_bridge->dev, NVIDIA_0_APSIZE,
previous_size->size_value);
/* restore iorr for previous aperture size */
nvidia_init_iorr(agp_bridge->gart_bus_addr,
previous_size->size * 1024 * 1024);
}
/*
* Note we can't use the generic routines, even though they are 99% the same.
* Aperture sizes <64M still requires a full 64k GART directory, but
* only use the portion of the TLB entries that correspond to the apertures
* alignment inside the surrounding 64M block.
*/
extern int agp_memory_reserved;
static int nvidia_insert_memory(struct agp_memory *mem, off_t pg_start, int type)
{
int i, j;
int mask_type;
mask_type = agp_generic_type_to_mask_type(mem->bridge, type);
if (mask_type != 0 || type != mem->type)
return -EINVAL;
if (mem->page_count == 0)
return 0;
if ((pg_start + mem->page_count) >
(nvidia_private.num_active_entries - agp_memory_reserved/PAGE_SIZE))
return -EINVAL;
for (j = pg_start; j < (pg_start + mem->page_count); j++) {
if (!PGE_EMPTY(agp_bridge, readl(agp_bridge->gatt_table+nvidia_private.pg_offset+j)))
return -EBUSY;
}
if (!mem->is_flushed) {
global_cache_flush();
mem->is_flushed = true;
}
for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
writel(agp_bridge->driver->mask_memory(agp_bridge,
page_to_phys(mem->pages[i]), mask_type),
agp_bridge->gatt_table+nvidia_private.pg_offset+j);
}
/* PCI Posting. */
readl(agp_bridge->gatt_table+nvidia_private.pg_offset+j - 1);
agp_bridge->driver->tlb_flush(mem);
return 0;
}
static int nvidia_remove_memory(struct agp_memory *mem, off_t pg_start, int type)
{
int i;
int mask_type;
mask_type = agp_generic_type_to_mask_type(mem->bridge, type);
if (mask_type != 0 || type != mem->type)
return -EINVAL;
if (mem->page_count == 0)
return 0;
for (i = pg_start; i < (mem->page_count + pg_start); i++)
writel(agp_bridge->scratch_page, agp_bridge->gatt_table+nvidia_private.pg_offset+i);
agp_bridge->driver->tlb_flush(mem);
return 0;
}
static void nvidia_tlbflush(struct agp_memory *mem)
{
unsigned long end;
u32 wbc_reg, temp;
int i;
/* flush chipset */
if (nvidia_private.wbc_mask) {
pci_read_config_dword(nvidia_private.dev_1, NVIDIA_1_WBC, &wbc_reg);
wbc_reg |= nvidia_private.wbc_mask;
pci_write_config_dword(nvidia_private.dev_1, NVIDIA_1_WBC, wbc_reg);
end = jiffies + 3*HZ;
do {
pci_read_config_dword(nvidia_private.dev_1,
NVIDIA_1_WBC, &wbc_reg);
if (time_before_eq(end, jiffies)) {
printk(KERN_ERR PFX
"TLB flush took more than 3 seconds.\n");
}
} while (wbc_reg & nvidia_private.wbc_mask);
}
/* flush TLB entries */
for (i = 0; i < 32 + 1; i++)
temp = readl(nvidia_private.aperture+(i * PAGE_SIZE / sizeof(u32)));
for (i = 0; i < 32 + 1; i++)
temp = readl(nvidia_private.aperture+(i * PAGE_SIZE / sizeof(u32)));
}
static const struct aper_size_info_8 nvidia_generic_sizes[5] =
{
{512, 131072, 7, 0},
{256, 65536, 6, 8},
{128, 32768, 5, 12},
{64, 16384, 4, 14},
/* The 32M mode still requires a 64k gatt */
{32, 16384, 4, 15}
};
static const struct gatt_mask nvidia_generic_masks[] =
{
{ .mask = 1, .type = 0}
};
static const struct agp_bridge_driver nvidia_driver = {
.owner = THIS_MODULE,
.aperture_sizes = nvidia_generic_sizes,
.size_type = U8_APER_SIZE,
.num_aperture_sizes = 5,
.needs_scratch_page = true,
.configure = nvidia_configure,
.fetch_size = nvidia_fetch_size,
.cleanup = nvidia_cleanup,
.tlb_flush = nvidia_tlbflush,
.mask_memory = agp_generic_mask_memory,
.masks = nvidia_generic_masks,
.agp_enable = agp_generic_enable,
.cache_flush = global_cache_flush,
.create_gatt_table = agp_generic_create_gatt_table,
.free_gatt_table = agp_generic_free_gatt_table,
.insert_memory = nvidia_insert_memory,
.remove_memory = nvidia_remove_memory,
.alloc_by_type = agp_generic_alloc_by_type,
.free_by_type = agp_generic_free_by_type,
.agp_alloc_page = agp_generic_alloc_page,
.agp_alloc_pages = agp_generic_alloc_pages,
.agp_destroy_page = agp_generic_destroy_page,
.agp_destroy_pages = agp_generic_destroy_pages,
.agp_type_to_mask_type = agp_generic_type_to_mask_type,
};
static int agp_nvidia_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
struct agp_bridge_data *bridge;
u8 cap_ptr;
nvidia_private.dev_1 =
pci_get_bus_and_slot((unsigned int)pdev->bus->number, PCI_DEVFN(0, 1));
nvidia_private.dev_2 =
pci_get_bus_and_slot((unsigned int)pdev->bus->number, PCI_DEVFN(0, 2));
nvidia_private.dev_3 =
pci_get_bus_and_slot((unsigned int)pdev->bus->number, PCI_DEVFN(30, 0));
if (!nvidia_private.dev_1 || !nvidia_private.dev_2 || !nvidia_private.dev_3) {
printk(KERN_INFO PFX "Detected an NVIDIA nForce/nForce2 "
"chipset, but could not find the secondary devices.\n");
return -ENODEV;
}
cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP);
if (!cap_ptr)
return -ENODEV;
switch (pdev->device) {
case PCI_DEVICE_ID_NVIDIA_NFORCE:
printk(KERN_INFO PFX "Detected NVIDIA nForce chipset\n");
nvidia_private.wbc_mask = 0x00010000;
break;
case PCI_DEVICE_ID_NVIDIA_NFORCE2:
printk(KERN_INFO PFX "Detected NVIDIA nForce2 chipset\n");
nvidia_private.wbc_mask = 0x80000000;
break;
default:
printk(KERN_ERR PFX "Unsupported NVIDIA chipset (device id: %04x)\n",
pdev->device);
return -ENODEV;
}
bridge = agp_alloc_bridge();
if (!bridge)
return -ENOMEM;
bridge->driver = &nvidia_driver;
bridge->dev_private_data = &nvidia_private,
bridge->dev = pdev;
bridge->capndx = cap_ptr;
/* Fill in the mode register */
pci_read_config_dword(pdev,
bridge->capndx+PCI_AGP_STATUS,
&bridge->mode);
pci_set_drvdata(pdev, bridge);
return agp_add_bridge(bridge);
}
static void agp_nvidia_remove(struct pci_dev *pdev)
{
struct agp_bridge_data *bridge = pci_get_drvdata(pdev);
agp_remove_bridge(bridge);
agp_put_bridge(bridge);
}
#ifdef CONFIG_PM
static int agp_nvidia_suspend(struct pci_dev *pdev, pm_message_t state)
{
pci_save_state(pdev);
pci_set_power_state(pdev, PCI_D3hot);
return 0;
}
static int agp_nvidia_resume(struct pci_dev *pdev)
{
/* set power state 0 and restore PCI space */
pci_set_power_state(pdev, PCI_D0);
pci_restore_state(pdev);
/* reconfigure AGP hardware again */
nvidia_configure();
return 0;
}
#endif
static struct pci_device_id agp_nvidia_pci_table[] = {
{
.class = (PCI_CLASS_BRIDGE_HOST << 8),
.class_mask = ~0,
.vendor = PCI_VENDOR_ID_NVIDIA,
.device = PCI_DEVICE_ID_NVIDIA_NFORCE,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
},
{
.class = (PCI_CLASS_BRIDGE_HOST << 8),
.class_mask = ~0,
.vendor = PCI_VENDOR_ID_NVIDIA,
.device = PCI_DEVICE_ID_NVIDIA_NFORCE2,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
},
{ }
};
MODULE_DEVICE_TABLE(pci, agp_nvidia_pci_table);
static struct pci_driver agp_nvidia_pci_driver = {
.name = "agpgart-nvidia",
.id_table = agp_nvidia_pci_table,
.probe = agp_nvidia_probe,
.remove = agp_nvidia_remove,
#ifdef CONFIG_PM
.suspend = agp_nvidia_suspend,
.resume = agp_nvidia_resume,
#endif
};
static int __init agp_nvidia_init(void)
{
if (agp_off)
return -EINVAL;
return pci_register_driver(&agp_nvidia_pci_driver);
}
static void __exit agp_nvidia_cleanup(void)
{
pci_unregister_driver(&agp_nvidia_pci_driver);
pci_dev_put(nvidia_private.dev_1);
pci_dev_put(nvidia_private.dev_2);
pci_dev_put(nvidia_private.dev_3);
}
module_init(agp_nvidia_init);
module_exit(agp_nvidia_cleanup);
MODULE_LICENSE("GPL and additional rights");
MODULE_AUTHOR("NVIDIA Corporation");

View file

@ -0,0 +1,426 @@
/*
* HP Quicksilver AGP GART routines
*
* Copyright (c) 2006, Kyle McMartin <kyle@parisc-linux.org>
*
* Based on drivers/char/agpgart/hp-agp.c which is
* (c) Copyright 2002, 2003 Hewlett-Packard Development Company, L.P.
* Bjorn Helgaas <bjorn.helgaas@hp.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
*/
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/klist.h>
#include <linux/agp_backend.h>
#include <linux/log2.h>
#include <linux/slab.h>
#include <asm/parisc-device.h>
#include <asm/ropes.h>
#include "agp.h"
#define DRVNAME "quicksilver"
#define DRVPFX DRVNAME ": "
#define AGP8X_MODE_BIT 3
#define AGP8X_MODE (1 << AGP8X_MODE_BIT)
static unsigned long
parisc_agp_mask_memory(struct agp_bridge_data *bridge, dma_addr_t addr,
int type);
static struct _parisc_agp_info {
void __iomem *ioc_regs;
void __iomem *lba_regs;
int lba_cap_offset;
u64 *gatt;
u64 gatt_entries;
u64 gart_base;
u64 gart_size;
int io_page_size;
int io_pages_per_kpage;
} parisc_agp_info;
static struct gatt_mask parisc_agp_masks[] =
{
{
.mask = SBA_PDIR_VALID_BIT,
.type = 0
}
};
static struct aper_size_info_fixed parisc_agp_sizes[] =
{
{0, 0, 0}, /* filled in by parisc_agp_fetch_size() */
};
static int
parisc_agp_fetch_size(void)
{
int size;
size = parisc_agp_info.gart_size / MB(1);
parisc_agp_sizes[0].size = size;
agp_bridge->current_size = (void *) &parisc_agp_sizes[0];
return size;
}
static int
parisc_agp_configure(void)
{
struct _parisc_agp_info *info = &parisc_agp_info;
agp_bridge->gart_bus_addr = info->gart_base;
agp_bridge->capndx = info->lba_cap_offset;
agp_bridge->mode = readl(info->lba_regs+info->lba_cap_offset+PCI_AGP_STATUS);
return 0;
}
static void
parisc_agp_tlbflush(struct agp_memory *mem)
{
struct _parisc_agp_info *info = &parisc_agp_info;
writeq(info->gart_base | ilog2(info->gart_size), info->ioc_regs+IOC_PCOM);
readq(info->ioc_regs+IOC_PCOM); /* flush */
}
static int
parisc_agp_create_gatt_table(struct agp_bridge_data *bridge)
{
struct _parisc_agp_info *info = &parisc_agp_info;
int i;
for (i = 0; i < info->gatt_entries; i++) {
info->gatt[i] = (unsigned long)agp_bridge->scratch_page;
}
return 0;
}
static int
parisc_agp_free_gatt_table(struct agp_bridge_data *bridge)
{
struct _parisc_agp_info *info = &parisc_agp_info;
info->gatt[0] = SBA_AGPGART_COOKIE;
return 0;
}
static int
parisc_agp_insert_memory(struct agp_memory *mem, off_t pg_start, int type)
{
struct _parisc_agp_info *info = &parisc_agp_info;
int i, k;
off_t j, io_pg_start;
int io_pg_count;
if (type != mem->type ||
agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type)) {
return -EINVAL;
}
io_pg_start = info->io_pages_per_kpage * pg_start;
io_pg_count = info->io_pages_per_kpage * mem->page_count;
if ((io_pg_start + io_pg_count) > info->gatt_entries) {
return -EINVAL;
}
j = io_pg_start;
while (j < (io_pg_start + io_pg_count)) {
if (info->gatt[j])
return -EBUSY;
j++;
}
if (!mem->is_flushed) {
global_cache_flush();
mem->is_flushed = true;
}
for (i = 0, j = io_pg_start; i < mem->page_count; i++) {
unsigned long paddr;
paddr = page_to_phys(mem->pages[i]);
for (k = 0;
k < info->io_pages_per_kpage;
k++, j++, paddr += info->io_page_size) {
info->gatt[j] =
parisc_agp_mask_memory(agp_bridge,
paddr, type);
}
}
agp_bridge->driver->tlb_flush(mem);
return 0;
}
static int
parisc_agp_remove_memory(struct agp_memory *mem, off_t pg_start, int type)
{
struct _parisc_agp_info *info = &parisc_agp_info;
int i, io_pg_start, io_pg_count;
if (type != mem->type ||
agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type)) {
return -EINVAL;
}
io_pg_start = info->io_pages_per_kpage * pg_start;
io_pg_count = info->io_pages_per_kpage * mem->page_count;
for (i = io_pg_start; i < io_pg_count + io_pg_start; i++) {
info->gatt[i] = agp_bridge->scratch_page;
}
agp_bridge->driver->tlb_flush(mem);
return 0;
}
static unsigned long
parisc_agp_mask_memory(struct agp_bridge_data *bridge, dma_addr_t addr,
int type)
{
return SBA_PDIR_VALID_BIT | addr;
}
static void
parisc_agp_enable(struct agp_bridge_data *bridge, u32 mode)
{
struct _parisc_agp_info *info = &parisc_agp_info;
u32 command;
command = readl(info->lba_regs + info->lba_cap_offset + PCI_AGP_STATUS);
command = agp_collect_device_status(bridge, mode, command);
command |= 0x00000100;
writel(command, info->lba_regs + info->lba_cap_offset + PCI_AGP_COMMAND);
agp_device_command(command, (mode & AGP8X_MODE) != 0);
}
static const struct agp_bridge_driver parisc_agp_driver = {
.owner = THIS_MODULE,
.size_type = FIXED_APER_SIZE,
.configure = parisc_agp_configure,
.fetch_size = parisc_agp_fetch_size,
.tlb_flush = parisc_agp_tlbflush,
.mask_memory = parisc_agp_mask_memory,
.masks = parisc_agp_masks,
.agp_enable = parisc_agp_enable,
.cache_flush = global_cache_flush,
.create_gatt_table = parisc_agp_create_gatt_table,
.free_gatt_table = parisc_agp_free_gatt_table,
.insert_memory = parisc_agp_insert_memory,
.remove_memory = parisc_agp_remove_memory,
.alloc_by_type = agp_generic_alloc_by_type,
.free_by_type = agp_generic_free_by_type,
.agp_alloc_page = agp_generic_alloc_page,
.agp_alloc_pages = agp_generic_alloc_pages,
.agp_destroy_page = agp_generic_destroy_page,
.agp_destroy_pages = agp_generic_destroy_pages,
.agp_type_to_mask_type = agp_generic_type_to_mask_type,
.cant_use_aperture = true,
};
static int __init
agp_ioc_init(void __iomem *ioc_regs)
{
struct _parisc_agp_info *info = &parisc_agp_info;
u64 iova_base, *io_pdir, io_tlb_ps;
int io_tlb_shift;
printk(KERN_INFO DRVPFX "IO PDIR shared with sba_iommu\n");
info->ioc_regs = ioc_regs;
io_tlb_ps = readq(info->ioc_regs+IOC_TCNFG);
switch (io_tlb_ps) {
case 0: io_tlb_shift = 12; break;
case 1: io_tlb_shift = 13; break;
case 2: io_tlb_shift = 14; break;
case 3: io_tlb_shift = 16; break;
default:
printk(KERN_ERR DRVPFX "Invalid IOTLB page size "
"configuration 0x%llx\n", io_tlb_ps);
info->gatt = NULL;
info->gatt_entries = 0;
return -ENODEV;
}
info->io_page_size = 1 << io_tlb_shift;
info->io_pages_per_kpage = PAGE_SIZE / info->io_page_size;
iova_base = readq(info->ioc_regs+IOC_IBASE) & ~0x1;
info->gart_base = iova_base + PLUTO_IOVA_SIZE - PLUTO_GART_SIZE;
info->gart_size = PLUTO_GART_SIZE;
info->gatt_entries = info->gart_size / info->io_page_size;
io_pdir = phys_to_virt(readq(info->ioc_regs+IOC_PDIR_BASE));
info->gatt = &io_pdir[(PLUTO_IOVA_SIZE/2) >> PAGE_SHIFT];
if (info->gatt[0] != SBA_AGPGART_COOKIE) {
info->gatt = NULL;
info->gatt_entries = 0;
printk(KERN_ERR DRVPFX "No reserved IO PDIR entry found; "
"GART disabled\n");
return -ENODEV;
}
return 0;
}
static int
lba_find_capability(int cap)
{
struct _parisc_agp_info *info = &parisc_agp_info;
u16 status;
u8 pos, id;
int ttl = 48;
status = readw(info->lba_regs + PCI_STATUS);
if (!(status & PCI_STATUS_CAP_LIST))
return 0;
pos = readb(info->lba_regs + PCI_CAPABILITY_LIST);
while (ttl-- && pos >= 0x40) {
pos &= ~3;
id = readb(info->lba_regs + pos + PCI_CAP_LIST_ID);
if (id == 0xff)
break;
if (id == cap)
return pos;
pos = readb(info->lba_regs + pos + PCI_CAP_LIST_NEXT);
}
return 0;
}
static int __init
agp_lba_init(void __iomem *lba_hpa)
{
struct _parisc_agp_info *info = &parisc_agp_info;
int cap;
info->lba_regs = lba_hpa;
info->lba_cap_offset = lba_find_capability(PCI_CAP_ID_AGP);
cap = readl(lba_hpa + info->lba_cap_offset) & 0xff;
if (cap != PCI_CAP_ID_AGP) {
printk(KERN_ERR DRVPFX "Invalid capability ID 0x%02x at 0x%x\n",
cap, info->lba_cap_offset);
return -ENODEV;
}
return 0;
}
static int __init
parisc_agp_setup(void __iomem *ioc_hpa, void __iomem *lba_hpa)
{
struct pci_dev *fake_bridge_dev = NULL;
struct agp_bridge_data *bridge;
int error = 0;
fake_bridge_dev = pci_alloc_dev(NULL);
if (!fake_bridge_dev) {
error = -ENOMEM;
goto fail;
}
error = agp_ioc_init(ioc_hpa);
if (error)
goto fail;
error = agp_lba_init(lba_hpa);
if (error)
goto fail;
bridge = agp_alloc_bridge();
if (!bridge) {
error = -ENOMEM;
goto fail;
}
bridge->driver = &parisc_agp_driver;
fake_bridge_dev->vendor = PCI_VENDOR_ID_HP;
fake_bridge_dev->device = PCI_DEVICE_ID_HP_PCIX_LBA;
bridge->dev = fake_bridge_dev;
error = agp_add_bridge(bridge);
if (error)
goto fail;
return 0;
fail:
kfree(fake_bridge_dev);
return error;
}
static int
find_quicksilver(struct device *dev, void *data)
{
struct parisc_device **lba = data;
struct parisc_device *padev = to_parisc_device(dev);
if (IS_QUICKSILVER(padev))
*lba = padev;
return 0;
}
static int
parisc_agp_init(void)
{
extern struct sba_device *sba_list;
int err = -1;
struct parisc_device *sba = NULL, *lba = NULL;
struct lba_device *lbadev = NULL;
if (!sba_list)
goto out;
/* Find our parent Pluto */
sba = sba_list->dev;
if (!IS_PLUTO(sba)) {
printk(KERN_INFO DRVPFX "No Pluto found, so no AGPGART for you.\n");
goto out;
}
/* Now search our Pluto for our precious AGP device... */
device_for_each_child(&sba->dev, &lba, find_quicksilver);
if (!lba) {
printk(KERN_INFO DRVPFX "No AGP devices found.\n");
goto out;
}
lbadev = parisc_get_drvdata(lba);
/* w00t, let's go find our cookies... */
parisc_agp_setup(sba_list->ioc[0].ioc_hpa, lbadev->hba.base_addr);
return 0;
out:
return err;
}
module_init(parisc_agp_init);
MODULE_AUTHOR("Kyle McMartin <kyle@parisc-linux.org>");
MODULE_LICENSE("GPL");

338
drivers/char/agp/sgi-agp.c Normal file
View file

@ -0,0 +1,338 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2003-2005 Silicon Graphics, Inc. All Rights Reserved.
*/
/*
* SGI TIOCA AGPGART routines.
*
*/
#include <linux/acpi.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/agp_backend.h>
#include <asm/sn/addrs.h>
#include <asm/sn/io.h>
#include <asm/sn/pcidev.h>
#include <asm/sn/pcibus_provider_defs.h>
#include <asm/sn/tioca_provider.h>
#include "agp.h"
extern int agp_memory_reserved;
extern uint32_t tioca_gart_found;
extern struct list_head tioca_list;
static struct agp_bridge_data **sgi_tioca_agp_bridges;
/*
* The aperature size and related information is set up at TIOCA init time.
* Values for this table will be extracted and filled in at
* sgi_tioca_fetch_size() time.
*/
static struct aper_size_info_fixed sgi_tioca_sizes[] = {
{0, 0, 0},
};
static struct page *sgi_tioca_alloc_page(struct agp_bridge_data *bridge)
{
struct page *page;
int nid;
struct tioca_kernel *info =
(struct tioca_kernel *)bridge->dev_private_data;
nid = info->ca_closest_node;
page = alloc_pages_node(nid, GFP_KERNEL, 0);
if (!page)
return NULL;
get_page(page);
atomic_inc(&agp_bridge->current_memory_agp);
return page;
}
/*
* Flush GART tlb's. Cannot selectively flush based on memory so the mem
* arg is ignored.
*/
static void sgi_tioca_tlbflush(struct agp_memory *mem)
{
tioca_tlbflush(mem->bridge->dev_private_data);
}
/*
* Given an address of a host physical page, turn it into a valid gart
* entry.
*/
static unsigned long
sgi_tioca_mask_memory(struct agp_bridge_data *bridge, dma_addr_t addr,
int type)
{
return tioca_physpage_to_gart(addr);
}
static void sgi_tioca_agp_enable(struct agp_bridge_data *bridge, u32 mode)
{
tioca_fastwrite_enable(bridge->dev_private_data);
}
/*
* sgi_tioca_configure() doesn't have anything to do since the base CA driver
* has alreay set up the GART.
*/
static int sgi_tioca_configure(void)
{
return 0;
}
/*
* Determine gfx aperature size. This has already been determined by the
* CA driver init, so just need to set agp_bridge values accordingly.
*/
static int sgi_tioca_fetch_size(void)
{
struct tioca_kernel *info =
(struct tioca_kernel *)agp_bridge->dev_private_data;
sgi_tioca_sizes[0].size = info->ca_gfxap_size / MB(1);
sgi_tioca_sizes[0].num_entries = info->ca_gfxgart_entries;
return sgi_tioca_sizes[0].size;
}
static int sgi_tioca_create_gatt_table(struct agp_bridge_data *bridge)
{
struct tioca_kernel *info =
(struct tioca_kernel *)bridge->dev_private_data;
bridge->gatt_table_real = (u32 *) info->ca_gfxgart;
bridge->gatt_table = bridge->gatt_table_real;
bridge->gatt_bus_addr = info->ca_gfxgart_base;
return 0;
}
static int sgi_tioca_free_gatt_table(struct agp_bridge_data *bridge)
{
return 0;
}
static int sgi_tioca_insert_memory(struct agp_memory *mem, off_t pg_start,
int type)
{
int num_entries;
size_t i;
off_t j;
void *temp;
struct agp_bridge_data *bridge;
u64 *table;
bridge = mem->bridge;
if (!bridge)
return -EINVAL;
table = (u64 *)bridge->gatt_table;
temp = bridge->current_size;
switch (bridge->driver->size_type) {
case U8_APER_SIZE:
num_entries = A_SIZE_8(temp)->num_entries;
break;
case U16_APER_SIZE:
num_entries = A_SIZE_16(temp)->num_entries;
break;
case U32_APER_SIZE:
num_entries = A_SIZE_32(temp)->num_entries;
break;
case FIXED_APER_SIZE:
num_entries = A_SIZE_FIX(temp)->num_entries;
break;
case LVL2_APER_SIZE:
return -EINVAL;
default:
num_entries = 0;
break;
}
num_entries -= agp_memory_reserved / PAGE_SIZE;
if (num_entries < 0)
num_entries = 0;
if (type != 0 || mem->type != 0) {
return -EINVAL;
}
if ((pg_start + mem->page_count) > num_entries)
return -EINVAL;
j = pg_start;
while (j < (pg_start + mem->page_count)) {
if (table[j])
return -EBUSY;
j++;
}
if (!mem->is_flushed) {
bridge->driver->cache_flush();
mem->is_flushed = true;
}
for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
table[j] =
bridge->driver->mask_memory(bridge,
page_to_phys(mem->pages[i]),
mem->type);
}
bridge->driver->tlb_flush(mem);
return 0;
}
static int sgi_tioca_remove_memory(struct agp_memory *mem, off_t pg_start,
int type)
{
size_t i;
struct agp_bridge_data *bridge;
u64 *table;
bridge = mem->bridge;
if (!bridge)
return -EINVAL;
if (type != 0 || mem->type != 0) {
return -EINVAL;
}
table = (u64 *)bridge->gatt_table;
for (i = pg_start; i < (mem->page_count + pg_start); i++) {
table[i] = 0;
}
bridge->driver->tlb_flush(mem);
return 0;
}
static void sgi_tioca_cache_flush(void)
{
}
/*
* Cleanup. Nothing to do as the CA driver owns the GART.
*/
static void sgi_tioca_cleanup(void)
{
}
static struct agp_bridge_data *sgi_tioca_find_bridge(struct pci_dev *pdev)
{
struct agp_bridge_data *bridge;
list_for_each_entry(bridge, &agp_bridges, list) {
if (bridge->dev->bus == pdev->bus)
break;
}
return bridge;
}
const struct agp_bridge_driver sgi_tioca_driver = {
.owner = THIS_MODULE,
.size_type = U16_APER_SIZE,
.configure = sgi_tioca_configure,
.fetch_size = sgi_tioca_fetch_size,
.cleanup = sgi_tioca_cleanup,
.tlb_flush = sgi_tioca_tlbflush,
.mask_memory = sgi_tioca_mask_memory,
.agp_enable = sgi_tioca_agp_enable,
.cache_flush = sgi_tioca_cache_flush,
.create_gatt_table = sgi_tioca_create_gatt_table,
.free_gatt_table = sgi_tioca_free_gatt_table,
.insert_memory = sgi_tioca_insert_memory,
.remove_memory = sgi_tioca_remove_memory,
.alloc_by_type = agp_generic_alloc_by_type,
.free_by_type = agp_generic_free_by_type,
.agp_alloc_page = sgi_tioca_alloc_page,
.agp_destroy_page = agp_generic_destroy_page,
.agp_type_to_mask_type = agp_generic_type_to_mask_type,
.cant_use_aperture = true,
.needs_scratch_page = false,
.num_aperture_sizes = 1,
};
static int agp_sgi_init(void)
{
unsigned int j;
struct tioca_kernel *info;
struct pci_dev *pdev = NULL;
if (tioca_gart_found)
printk(KERN_INFO PFX "SGI TIO CA GART driver initialized.\n");
else
return 0;
sgi_tioca_agp_bridges = kmalloc(tioca_gart_found *
sizeof(struct agp_bridge_data *),
GFP_KERNEL);
if (!sgi_tioca_agp_bridges)
return -ENOMEM;
j = 0;
list_for_each_entry(info, &tioca_list, ca_list) {
if (list_empty(info->ca_devices))
continue;
list_for_each_entry(pdev, info->ca_devices, bus_list) {
u8 cap_ptr;
if (pdev->class != (PCI_CLASS_DISPLAY_VGA << 8))
continue;
cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP);
if (!cap_ptr)
continue;
}
sgi_tioca_agp_bridges[j] = agp_alloc_bridge();
printk(KERN_INFO PFX "bridge %d = 0x%p\n", j,
sgi_tioca_agp_bridges[j]);
if (sgi_tioca_agp_bridges[j]) {
sgi_tioca_agp_bridges[j]->dev = pdev;
sgi_tioca_agp_bridges[j]->dev_private_data = info;
sgi_tioca_agp_bridges[j]->driver = &sgi_tioca_driver;
sgi_tioca_agp_bridges[j]->gart_bus_addr =
info->ca_gfxap_base;
sgi_tioca_agp_bridges[j]->mode = (0x7D << 24) | /* 126 requests */
(0x1 << 9) | /* SBA supported */
(0x1 << 5) | /* 64-bit addresses supported */
(0x1 << 4) | /* FW supported */
(0x1 << 3) | /* AGP 3.0 mode */
0x2; /* 8x transfer only */
sgi_tioca_agp_bridges[j]->current_size =
sgi_tioca_agp_bridges[j]->previous_size =
(void *)&sgi_tioca_sizes[0];
agp_add_bridge(sgi_tioca_agp_bridges[j]);
}
j++;
}
agp_find_bridge = &sgi_tioca_find_bridge;
return 0;
}
static void agp_sgi_cleanup(void)
{
kfree(sgi_tioca_agp_bridges);
sgi_tioca_agp_bridges = NULL;
}
module_init(agp_sgi_init);
module_exit(agp_sgi_cleanup);
MODULE_LICENSE("GPL and additional rights");

452
drivers/char/agp/sis-agp.c Normal file
View file

@ -0,0 +1,452 @@
/*
* SiS AGPGART routines.
*/
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/agp_backend.h>
#include <linux/delay.h>
#include "agp.h"
#define SIS_ATTBASE 0x90
#define SIS_APSIZE 0x94
#define SIS_TLBCNTRL 0x97
#define SIS_TLBFLUSH 0x98
#define PCI_DEVICE_ID_SI_662 0x0662
#define PCI_DEVICE_ID_SI_671 0x0671
static bool agp_sis_force_delay = 0;
static int agp_sis_agp_spec = -1;
static int sis_fetch_size(void)
{
u8 temp_size;
int i;
struct aper_size_info_8 *values;
pci_read_config_byte(agp_bridge->dev, SIS_APSIZE, &temp_size);
values = A_SIZE_8(agp_bridge->driver->aperture_sizes);
for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) {
if ((temp_size == values[i].size_value) ||
((temp_size & ~(0x07)) ==
(values[i].size_value & ~(0x07)))) {
agp_bridge->previous_size =
agp_bridge->current_size = (void *) (values + i);
agp_bridge->aperture_size_idx = i;
return values[i].size;
}
}
return 0;
}
static void sis_tlbflush(struct agp_memory *mem)
{
pci_write_config_byte(agp_bridge->dev, SIS_TLBFLUSH, 0x02);
}
static int sis_configure(void)
{
struct aper_size_info_8 *current_size;
current_size = A_SIZE_8(agp_bridge->current_size);
pci_write_config_byte(agp_bridge->dev, SIS_TLBCNTRL, 0x05);
agp_bridge->gart_bus_addr = pci_bus_address(agp_bridge->dev,
AGP_APERTURE_BAR);
pci_write_config_dword(agp_bridge->dev, SIS_ATTBASE,
agp_bridge->gatt_bus_addr);
pci_write_config_byte(agp_bridge->dev, SIS_APSIZE,
current_size->size_value);
return 0;
}
static void sis_cleanup(void)
{
struct aper_size_info_8 *previous_size;
previous_size = A_SIZE_8(agp_bridge->previous_size);
pci_write_config_byte(agp_bridge->dev, SIS_APSIZE,
(previous_size->size_value & ~(0x03)));
}
static void sis_delayed_enable(struct agp_bridge_data *bridge, u32 mode)
{
struct pci_dev *device = NULL;
u32 command;
int rate;
dev_info(&agp_bridge->dev->dev, "AGP %d.%d bridge\n",
agp_bridge->major_version, agp_bridge->minor_version);
pci_read_config_dword(agp_bridge->dev, agp_bridge->capndx + PCI_AGP_STATUS, &command);
command = agp_collect_device_status(bridge, mode, command);
command |= AGPSTAT_AGP_ENABLE;
rate = (command & 0x7) << 2;
for_each_pci_dev(device) {
u8 agp = pci_find_capability(device, PCI_CAP_ID_AGP);
if (!agp)
continue;
dev_info(&agp_bridge->dev->dev, "putting AGP V3 device at %s into %dx mode\n",
pci_name(device), rate);
pci_write_config_dword(device, agp + PCI_AGP_COMMAND, command);
/*
* Weird: on some sis chipsets any rate change in the target
* command register triggers a 5ms screwup during which the master
* cannot be configured
*/
if (device->device == bridge->dev->device) {
dev_info(&agp_bridge->dev->dev, "SiS delay workaround: giving bridge time to recover\n");
msleep(10);
}
}
}
static const struct aper_size_info_8 sis_generic_sizes[7] =
{
{256, 65536, 6, 99},
{128, 32768, 5, 83},
{64, 16384, 4, 67},
{32, 8192, 3, 51},
{16, 4096, 2, 35},
{8, 2048, 1, 19},
{4, 1024, 0, 3}
};
static struct agp_bridge_driver sis_driver = {
.owner = THIS_MODULE,
.aperture_sizes = sis_generic_sizes,
.size_type = U8_APER_SIZE,
.num_aperture_sizes = 7,
.needs_scratch_page = true,
.configure = sis_configure,
.fetch_size = sis_fetch_size,
.cleanup = sis_cleanup,
.tlb_flush = sis_tlbflush,
.mask_memory = agp_generic_mask_memory,
.masks = NULL,
.agp_enable = agp_generic_enable,
.cache_flush = global_cache_flush,
.create_gatt_table = agp_generic_create_gatt_table,
.free_gatt_table = agp_generic_free_gatt_table,
.insert_memory = agp_generic_insert_memory,
.remove_memory = agp_generic_remove_memory,
.alloc_by_type = agp_generic_alloc_by_type,
.free_by_type = agp_generic_free_by_type,
.agp_alloc_page = agp_generic_alloc_page,
.agp_alloc_pages = agp_generic_alloc_pages,
.agp_destroy_page = agp_generic_destroy_page,
.agp_destroy_pages = agp_generic_destroy_pages,
.agp_type_to_mask_type = agp_generic_type_to_mask_type,
};
// chipsets that require the 'delay hack'
static int sis_broken_chipsets[] = {
PCI_DEVICE_ID_SI_648,
PCI_DEVICE_ID_SI_746,
0 // terminator
};
static void sis_get_driver(struct agp_bridge_data *bridge)
{
int i;
for (i=0; sis_broken_chipsets[i]!=0; ++i)
if (bridge->dev->device==sis_broken_chipsets[i])
break;
if (sis_broken_chipsets[i] || agp_sis_force_delay)
sis_driver.agp_enable=sis_delayed_enable;
// sis chipsets that indicate less than agp3.5
// are not actually fully agp3 compliant
if ((agp_bridge->major_version == 3 && agp_bridge->minor_version >= 5
&& agp_sis_agp_spec!=0) || agp_sis_agp_spec==1) {
sis_driver.aperture_sizes = agp3_generic_sizes;
sis_driver.size_type = U16_APER_SIZE;
sis_driver.num_aperture_sizes = AGP_GENERIC_SIZES_ENTRIES;
sis_driver.configure = agp3_generic_configure;
sis_driver.fetch_size = agp3_generic_fetch_size;
sis_driver.cleanup = agp3_generic_cleanup;
sis_driver.tlb_flush = agp3_generic_tlbflush;
}
}
static int agp_sis_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct agp_bridge_data *bridge;
u8 cap_ptr;
cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP);
if (!cap_ptr)
return -ENODEV;
dev_info(&pdev->dev, "SiS chipset [%04x/%04x]\n",
pdev->vendor, pdev->device);
bridge = agp_alloc_bridge();
if (!bridge)
return -ENOMEM;
bridge->driver = &sis_driver;
bridge->dev = pdev;
bridge->capndx = cap_ptr;
get_agp_version(bridge);
/* Fill in the mode register */
pci_read_config_dword(pdev, bridge->capndx+PCI_AGP_STATUS, &bridge->mode);
sis_get_driver(bridge);
pci_set_drvdata(pdev, bridge);
return agp_add_bridge(bridge);
}
static void agp_sis_remove(struct pci_dev *pdev)
{
struct agp_bridge_data *bridge = pci_get_drvdata(pdev);
agp_remove_bridge(bridge);
agp_put_bridge(bridge);
}
#ifdef CONFIG_PM
static int agp_sis_suspend(struct pci_dev *pdev, pm_message_t state)
{
pci_save_state(pdev);
pci_set_power_state(pdev, pci_choose_state(pdev, state));
return 0;
}
static int agp_sis_resume(struct pci_dev *pdev)
{
pci_set_power_state(pdev, PCI_D0);
pci_restore_state(pdev);
return sis_driver.configure();
}
#endif /* CONFIG_PM */
static struct pci_device_id agp_sis_pci_table[] = {
{
.class = (PCI_CLASS_BRIDGE_HOST << 8),
.class_mask = ~0,
.vendor = PCI_VENDOR_ID_SI,
.device = PCI_DEVICE_ID_SI_5591,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
},
{
.class = (PCI_CLASS_BRIDGE_HOST << 8),
.class_mask = ~0,
.vendor = PCI_VENDOR_ID_SI,
.device = PCI_DEVICE_ID_SI_530,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
},
{
.class = (PCI_CLASS_BRIDGE_HOST << 8),
.class_mask = ~0,
.vendor = PCI_VENDOR_ID_SI,
.device = PCI_DEVICE_ID_SI_540,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
},
{
.class = (PCI_CLASS_BRIDGE_HOST << 8),
.class_mask = ~0,
.vendor = PCI_VENDOR_ID_SI,
.device = PCI_DEVICE_ID_SI_550,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
},
{
.class = (PCI_CLASS_BRIDGE_HOST << 8),
.class_mask = ~0,
.vendor = PCI_VENDOR_ID_SI,
.device = PCI_DEVICE_ID_SI_620,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
},
{
.class = (PCI_CLASS_BRIDGE_HOST << 8),
.class_mask = ~0,
.vendor = PCI_VENDOR_ID_SI,
.device = PCI_DEVICE_ID_SI_630,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
},
{
.class = (PCI_CLASS_BRIDGE_HOST << 8),
.class_mask = ~0,
.vendor = PCI_VENDOR_ID_SI,
.device = PCI_DEVICE_ID_SI_635,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
},
{
.class = (PCI_CLASS_BRIDGE_HOST << 8),
.class_mask = ~0,
.vendor = PCI_VENDOR_ID_SI,
.device = PCI_DEVICE_ID_SI_645,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
},
{
.class = (PCI_CLASS_BRIDGE_HOST << 8),
.class_mask = ~0,
.vendor = PCI_VENDOR_ID_SI,
.device = PCI_DEVICE_ID_SI_646,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
},
{
.class = (PCI_CLASS_BRIDGE_HOST << 8),
.class_mask = ~0,
.vendor = PCI_VENDOR_ID_SI,
.device = PCI_DEVICE_ID_SI_648,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
},
{
.class = (PCI_CLASS_BRIDGE_HOST << 8),
.class_mask = ~0,
.vendor = PCI_VENDOR_ID_SI,
.device = PCI_DEVICE_ID_SI_650,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
},
{
.class = (PCI_CLASS_BRIDGE_HOST << 8),
.class_mask = ~0,
.vendor = PCI_VENDOR_ID_SI,
.device = PCI_DEVICE_ID_SI_651,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
},
{
.class = (PCI_CLASS_BRIDGE_HOST << 8),
.class_mask = ~0,
.vendor = PCI_VENDOR_ID_SI,
.device = PCI_DEVICE_ID_SI_655,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
},
{
.class = (PCI_CLASS_BRIDGE_HOST << 8),
.class_mask = ~0,
.vendor = PCI_VENDOR_ID_SI,
.device = PCI_DEVICE_ID_SI_661,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
},
{
.class = (PCI_CLASS_BRIDGE_HOST << 8),
.class_mask = ~0,
.vendor = PCI_VENDOR_ID_SI,
.device = PCI_DEVICE_ID_SI_662,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
},
{
.class = (PCI_CLASS_BRIDGE_HOST << 8),
.class_mask = ~0,
.vendor = PCI_VENDOR_ID_SI,
.device = PCI_DEVICE_ID_SI_671,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
},
{
.class = (PCI_CLASS_BRIDGE_HOST << 8),
.class_mask = ~0,
.vendor = PCI_VENDOR_ID_SI,
.device = PCI_DEVICE_ID_SI_730,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
},
{
.class = (PCI_CLASS_BRIDGE_HOST << 8),
.class_mask = ~0,
.vendor = PCI_VENDOR_ID_SI,
.device = PCI_DEVICE_ID_SI_735,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
},
{
.class = (PCI_CLASS_BRIDGE_HOST << 8),
.class_mask = ~0,
.vendor = PCI_VENDOR_ID_SI,
.device = PCI_DEVICE_ID_SI_740,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
},
{
.class = (PCI_CLASS_BRIDGE_HOST << 8),
.class_mask = ~0,
.vendor = PCI_VENDOR_ID_SI,
.device = PCI_DEVICE_ID_SI_741,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
},
{
.class = (PCI_CLASS_BRIDGE_HOST << 8),
.class_mask = ~0,
.vendor = PCI_VENDOR_ID_SI,
.device = PCI_DEVICE_ID_SI_745,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
},
{
.class = (PCI_CLASS_BRIDGE_HOST << 8),
.class_mask = ~0,
.vendor = PCI_VENDOR_ID_SI,
.device = PCI_DEVICE_ID_SI_746,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
},
{ }
};
MODULE_DEVICE_TABLE(pci, agp_sis_pci_table);
static struct pci_driver agp_sis_pci_driver = {
.name = "agpgart-sis",
.id_table = agp_sis_pci_table,
.probe = agp_sis_probe,
.remove = agp_sis_remove,
#ifdef CONFIG_PM
.suspend = agp_sis_suspend,
.resume = agp_sis_resume,
#endif
};
static int __init agp_sis_init(void)
{
if (agp_off)
return -EINVAL;
return pci_register_driver(&agp_sis_pci_driver);
}
static void __exit agp_sis_cleanup(void)
{
pci_unregister_driver(&agp_sis_pci_driver);
}
module_init(agp_sis_init);
module_exit(agp_sis_cleanup);
module_param(agp_sis_force_delay, bool, 0);
MODULE_PARM_DESC(agp_sis_force_delay,"forces sis delay hack");
module_param(agp_sis_agp_spec, int, 0);
MODULE_PARM_DESC(agp_sis_agp_spec,"0=force sis init, 1=force generic agp3 init, default: autodetect");
MODULE_LICENSE("GPL and additional rights");

View file

@ -0,0 +1,569 @@
/*
* Serverworks AGPGART routines.
*/
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/string.h>
#include <linux/slab.h>
#include <linux/jiffies.h>
#include <linux/agp_backend.h>
#include "agp.h"
#define SVWRKS_COMMAND 0x04
#define SVWRKS_APSIZE 0x10
#define SVWRKS_MMBASE 0x14
#define SVWRKS_CACHING 0x4b
#define SVWRKS_AGP_ENABLE 0x60
#define SVWRKS_FEATURE 0x68
#define SVWRKS_SIZE_MASK 0xfe000000
/* Memory mapped registers */
#define SVWRKS_GART_CACHE 0x02
#define SVWRKS_GATTBASE 0x04
#define SVWRKS_TLBFLUSH 0x10
#define SVWRKS_POSTFLUSH 0x14
#define SVWRKS_DIRFLUSH 0x0c
struct serverworks_page_map {
unsigned long *real;
unsigned long __iomem *remapped;
};
static struct _serverworks_private {
struct pci_dev *svrwrks_dev; /* device one */
volatile u8 __iomem *registers;
struct serverworks_page_map **gatt_pages;
int num_tables;
struct serverworks_page_map scratch_dir;
int gart_addr_ofs;
int mm_addr_ofs;
} serverworks_private;
static int serverworks_create_page_map(struct serverworks_page_map *page_map)
{
int i;
page_map->real = (unsigned long *) __get_free_page(GFP_KERNEL);
if (page_map->real == NULL) {
return -ENOMEM;
}
set_memory_uc((unsigned long)page_map->real, 1);
page_map->remapped = page_map->real;
for (i = 0; i < PAGE_SIZE / sizeof(unsigned long); i++)
writel(agp_bridge->scratch_page, page_map->remapped+i);
/* Red Pen: Everyone else does pci posting flush here */
return 0;
}
static void serverworks_free_page_map(struct serverworks_page_map *page_map)
{
set_memory_wb((unsigned long)page_map->real, 1);
free_page((unsigned long) page_map->real);
}
static void serverworks_free_gatt_pages(void)
{
int i;
struct serverworks_page_map **tables;
struct serverworks_page_map *entry;
tables = serverworks_private.gatt_pages;
for (i = 0; i < serverworks_private.num_tables; i++) {
entry = tables[i];
if (entry != NULL) {
if (entry->real != NULL) {
serverworks_free_page_map(entry);
}
kfree(entry);
}
}
kfree(tables);
}
static int serverworks_create_gatt_pages(int nr_tables)
{
struct serverworks_page_map **tables;
struct serverworks_page_map *entry;
int retval = 0;
int i;
tables = kzalloc((nr_tables + 1) * sizeof(struct serverworks_page_map *),
GFP_KERNEL);
if (tables == NULL)
return -ENOMEM;
for (i = 0; i < nr_tables; i++) {
entry = kzalloc(sizeof(struct serverworks_page_map), GFP_KERNEL);
if (entry == NULL) {
retval = -ENOMEM;
break;
}
tables[i] = entry;
retval = serverworks_create_page_map(entry);
if (retval != 0) break;
}
serverworks_private.num_tables = nr_tables;
serverworks_private.gatt_pages = tables;
if (retval != 0) serverworks_free_gatt_pages();
return retval;
}
#define SVRWRKS_GET_GATT(addr) (serverworks_private.gatt_pages[\
GET_PAGE_DIR_IDX(addr)]->remapped)
#ifndef GET_PAGE_DIR_OFF
#define GET_PAGE_DIR_OFF(addr) (addr >> 22)
#endif
#ifndef GET_PAGE_DIR_IDX
#define GET_PAGE_DIR_IDX(addr) (GET_PAGE_DIR_OFF(addr) - \
GET_PAGE_DIR_OFF(agp_bridge->gart_bus_addr))
#endif
#ifndef GET_GATT_OFF
#define GET_GATT_OFF(addr) ((addr & 0x003ff000) >> 12)
#endif
static int serverworks_create_gatt_table(struct agp_bridge_data *bridge)
{
struct aper_size_info_lvl2 *value;
struct serverworks_page_map page_dir;
int retval;
u32 temp;
int i;
value = A_SIZE_LVL2(agp_bridge->current_size);
retval = serverworks_create_page_map(&page_dir);
if (retval != 0) {
return retval;
}
retval = serverworks_create_page_map(&serverworks_private.scratch_dir);
if (retval != 0) {
serverworks_free_page_map(&page_dir);
return retval;
}
/* Create a fake scratch directory */
for (i = 0; i < 1024; i++) {
writel(agp_bridge->scratch_page, serverworks_private.scratch_dir.remapped+i);
writel(virt_to_phys(serverworks_private.scratch_dir.real) | 1, page_dir.remapped+i);
}
retval = serverworks_create_gatt_pages(value->num_entries / 1024);
if (retval != 0) {
serverworks_free_page_map(&page_dir);
serverworks_free_page_map(&serverworks_private.scratch_dir);
return retval;
}
agp_bridge->gatt_table_real = (u32 *)page_dir.real;
agp_bridge->gatt_table = (u32 __iomem *)page_dir.remapped;
agp_bridge->gatt_bus_addr = virt_to_phys(page_dir.real);
/* Get the address for the gart region.
* This is a bus address even on the alpha, b/c its
* used to program the agp master not the cpu
*/
pci_read_config_dword(agp_bridge->dev,serverworks_private.gart_addr_ofs,&temp);
agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
/* Calculate the agp offset */
for (i = 0; i < value->num_entries / 1024; i++)
writel(virt_to_phys(serverworks_private.gatt_pages[i]->real)|1, page_dir.remapped+i);
return 0;
}
static int serverworks_free_gatt_table(struct agp_bridge_data *bridge)
{
struct serverworks_page_map page_dir;
page_dir.real = (unsigned long *)agp_bridge->gatt_table_real;
page_dir.remapped = (unsigned long __iomem *)agp_bridge->gatt_table;
serverworks_free_gatt_pages();
serverworks_free_page_map(&page_dir);
serverworks_free_page_map(&serverworks_private.scratch_dir);
return 0;
}
static int serverworks_fetch_size(void)
{
int i;
u32 temp;
u32 temp2;
struct aper_size_info_lvl2 *values;
values = A_SIZE_LVL2(agp_bridge->driver->aperture_sizes);
pci_read_config_dword(agp_bridge->dev,serverworks_private.gart_addr_ofs,&temp);
pci_write_config_dword(agp_bridge->dev,serverworks_private.gart_addr_ofs,
SVWRKS_SIZE_MASK);
pci_read_config_dword(agp_bridge->dev,serverworks_private.gart_addr_ofs,&temp2);
pci_write_config_dword(agp_bridge->dev,serverworks_private.gart_addr_ofs,temp);
temp2 &= SVWRKS_SIZE_MASK;
for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) {
if (temp2 == values[i].size_value) {
agp_bridge->previous_size =
agp_bridge->current_size = (void *) (values + i);
agp_bridge->aperture_size_idx = i;
return values[i].size;
}
}
return 0;
}
/*
* This routine could be implemented by taking the addresses
* written to the GATT, and flushing them individually. However
* currently it just flushes the whole table. Which is probably
* more efficient, since agp_memory blocks can be a large number of
* entries.
*/
static void serverworks_tlbflush(struct agp_memory *temp)
{
unsigned long timeout;
writeb(1, serverworks_private.registers+SVWRKS_POSTFLUSH);
timeout = jiffies + 3*HZ;
while (readb(serverworks_private.registers+SVWRKS_POSTFLUSH) == 1) {
cpu_relax();
if (time_after(jiffies, timeout)) {
dev_err(&serverworks_private.svrwrks_dev->dev,
"TLB post flush took more than 3 seconds\n");
break;
}
}
writel(1, serverworks_private.registers+SVWRKS_DIRFLUSH);
timeout = jiffies + 3*HZ;
while (readl(serverworks_private.registers+SVWRKS_DIRFLUSH) == 1) {
cpu_relax();
if (time_after(jiffies, timeout)) {
dev_err(&serverworks_private.svrwrks_dev->dev,
"TLB Dir flush took more than 3 seconds\n");
break;
}
}
}
static int serverworks_configure(void)
{
struct aper_size_info_lvl2 *current_size;
u32 temp;
u8 enable_reg;
u16 cap_reg;
current_size = A_SIZE_LVL2(agp_bridge->current_size);
/* Get the memory mapped registers */
pci_read_config_dword(agp_bridge->dev, serverworks_private.mm_addr_ofs, &temp);
temp = (temp & PCI_BASE_ADDRESS_MEM_MASK);
serverworks_private.registers = (volatile u8 __iomem *) ioremap(temp, 4096);
if (!serverworks_private.registers) {
dev_err(&agp_bridge->dev->dev, "can't ioremap(%#x)\n", temp);
return -ENOMEM;
}
writeb(0xA, serverworks_private.registers+SVWRKS_GART_CACHE);
readb(serverworks_private.registers+SVWRKS_GART_CACHE); /* PCI Posting. */
writel(agp_bridge->gatt_bus_addr, serverworks_private.registers+SVWRKS_GATTBASE);
readl(serverworks_private.registers+SVWRKS_GATTBASE); /* PCI Posting. */
cap_reg = readw(serverworks_private.registers+SVWRKS_COMMAND);
cap_reg &= ~0x0007;
cap_reg |= 0x4;
writew(cap_reg, serverworks_private.registers+SVWRKS_COMMAND);
readw(serverworks_private.registers+SVWRKS_COMMAND);
pci_read_config_byte(serverworks_private.svrwrks_dev,SVWRKS_AGP_ENABLE, &enable_reg);
enable_reg |= 0x1; /* Agp Enable bit */
pci_write_config_byte(serverworks_private.svrwrks_dev,SVWRKS_AGP_ENABLE, enable_reg);
serverworks_tlbflush(NULL);
agp_bridge->capndx = pci_find_capability(serverworks_private.svrwrks_dev, PCI_CAP_ID_AGP);
/* Fill in the mode register */
pci_read_config_dword(serverworks_private.svrwrks_dev,
agp_bridge->capndx+PCI_AGP_STATUS, &agp_bridge->mode);
pci_read_config_byte(agp_bridge->dev, SVWRKS_CACHING, &enable_reg);
enable_reg &= ~0x3;
pci_write_config_byte(agp_bridge->dev, SVWRKS_CACHING, enable_reg);
pci_read_config_byte(agp_bridge->dev, SVWRKS_FEATURE, &enable_reg);
enable_reg |= (1<<6);
pci_write_config_byte(agp_bridge->dev,SVWRKS_FEATURE, enable_reg);
return 0;
}
static void serverworks_cleanup(void)
{
iounmap((void __iomem *) serverworks_private.registers);
}
static int serverworks_insert_memory(struct agp_memory *mem,
off_t pg_start, int type)
{
int i, j, num_entries;
unsigned long __iomem *cur_gatt;
unsigned long addr;
num_entries = A_SIZE_LVL2(agp_bridge->current_size)->num_entries;
if (type != 0 || mem->type != 0) {
return -EINVAL;
}
if ((pg_start + mem->page_count) > num_entries) {
return -EINVAL;
}
j = pg_start;
while (j < (pg_start + mem->page_count)) {
addr = (j * PAGE_SIZE) + agp_bridge->gart_bus_addr;
cur_gatt = SVRWRKS_GET_GATT(addr);
if (!PGE_EMPTY(agp_bridge, readl(cur_gatt+GET_GATT_OFF(addr))))
return -EBUSY;
j++;
}
if (!mem->is_flushed) {
global_cache_flush();
mem->is_flushed = true;
}
for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
addr = (j * PAGE_SIZE) + agp_bridge->gart_bus_addr;
cur_gatt = SVRWRKS_GET_GATT(addr);
writel(agp_bridge->driver->mask_memory(agp_bridge,
page_to_phys(mem->pages[i]), mem->type),
cur_gatt+GET_GATT_OFF(addr));
}
serverworks_tlbflush(mem);
return 0;
}
static int serverworks_remove_memory(struct agp_memory *mem, off_t pg_start,
int type)
{
int i;
unsigned long __iomem *cur_gatt;
unsigned long addr;
if (type != 0 || mem->type != 0) {
return -EINVAL;
}
global_cache_flush();
serverworks_tlbflush(mem);
for (i = pg_start; i < (mem->page_count + pg_start); i++) {
addr = (i * PAGE_SIZE) + agp_bridge->gart_bus_addr;
cur_gatt = SVRWRKS_GET_GATT(addr);
writel(agp_bridge->scratch_page, cur_gatt+GET_GATT_OFF(addr));
}
serverworks_tlbflush(mem);
return 0;
}
static const struct gatt_mask serverworks_masks[] =
{
{.mask = 1, .type = 0}
};
static const struct aper_size_info_lvl2 serverworks_sizes[7] =
{
{2048, 524288, 0x80000000},
{1024, 262144, 0xc0000000},
{512, 131072, 0xe0000000},
{256, 65536, 0xf0000000},
{128, 32768, 0xf8000000},
{64, 16384, 0xfc000000},
{32, 8192, 0xfe000000}
};
static void serverworks_agp_enable(struct agp_bridge_data *bridge, u32 mode)
{
u32 command;
pci_read_config_dword(serverworks_private.svrwrks_dev,
bridge->capndx + PCI_AGP_STATUS,
&command);
command = agp_collect_device_status(bridge, mode, command);
command &= ~0x10; /* disable FW */
command &= ~0x08;
command |= 0x100;
pci_write_config_dword(serverworks_private.svrwrks_dev,
bridge->capndx + PCI_AGP_COMMAND,
command);
agp_device_command(command, false);
}
static const struct agp_bridge_driver sworks_driver = {
.owner = THIS_MODULE,
.aperture_sizes = serverworks_sizes,
.size_type = LVL2_APER_SIZE,
.num_aperture_sizes = 7,
.configure = serverworks_configure,
.fetch_size = serverworks_fetch_size,
.cleanup = serverworks_cleanup,
.tlb_flush = serverworks_tlbflush,
.mask_memory = agp_generic_mask_memory,
.masks = serverworks_masks,
.agp_enable = serverworks_agp_enable,
.cache_flush = global_cache_flush,
.create_gatt_table = serverworks_create_gatt_table,
.free_gatt_table = serverworks_free_gatt_table,
.insert_memory = serverworks_insert_memory,
.remove_memory = serverworks_remove_memory,
.alloc_by_type = agp_generic_alloc_by_type,
.free_by_type = agp_generic_free_by_type,
.agp_alloc_page = agp_generic_alloc_page,
.agp_alloc_pages = agp_generic_alloc_pages,
.agp_destroy_page = agp_generic_destroy_page,
.agp_destroy_pages = agp_generic_destroy_pages,
.agp_type_to_mask_type = agp_generic_type_to_mask_type,
};
static int agp_serverworks_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
struct agp_bridge_data *bridge;
struct pci_dev *bridge_dev;
u32 temp, temp2;
u8 cap_ptr = 0;
cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP);
switch (pdev->device) {
case 0x0006:
dev_err(&pdev->dev, "ServerWorks CNB20HE is unsupported due to lack of documentation\n");
return -ENODEV;
case PCI_DEVICE_ID_SERVERWORKS_HE:
case PCI_DEVICE_ID_SERVERWORKS_LE:
case 0x0007:
break;
default:
if (cap_ptr)
dev_err(&pdev->dev, "unsupported Serverworks chipset "
"[%04x/%04x]\n", pdev->vendor, pdev->device);
return -ENODEV;
}
/* Everything is on func 1 here so we are hardcoding function one */
bridge_dev = pci_get_bus_and_slot((unsigned int)pdev->bus->number,
PCI_DEVFN(0, 1));
if (!bridge_dev) {
dev_info(&pdev->dev, "can't find secondary device\n");
return -ENODEV;
}
serverworks_private.svrwrks_dev = bridge_dev;
serverworks_private.gart_addr_ofs = 0x10;
pci_read_config_dword(pdev, SVWRKS_APSIZE, &temp);
if (temp & PCI_BASE_ADDRESS_MEM_TYPE_64) {
pci_read_config_dword(pdev, SVWRKS_APSIZE + 4, &temp2);
if (temp2 != 0) {
dev_info(&pdev->dev, "64 bit aperture address, "
"but top bits are not zero; disabling AGP\n");
return -ENODEV;
}
serverworks_private.mm_addr_ofs = 0x18;
} else
serverworks_private.mm_addr_ofs = 0x14;
pci_read_config_dword(pdev, serverworks_private.mm_addr_ofs, &temp);
if (temp & PCI_BASE_ADDRESS_MEM_TYPE_64) {
pci_read_config_dword(pdev,
serverworks_private.mm_addr_ofs + 4, &temp2);
if (temp2 != 0) {
dev_info(&pdev->dev, "64 bit MMIO address, but top "
"bits are not zero; disabling AGP\n");
return -ENODEV;
}
}
bridge = agp_alloc_bridge();
if (!bridge)
return -ENOMEM;
bridge->driver = &sworks_driver;
bridge->dev_private_data = &serverworks_private,
bridge->dev = pci_dev_get(pdev);
pci_set_drvdata(pdev, bridge);
return agp_add_bridge(bridge);
}
static void agp_serverworks_remove(struct pci_dev *pdev)
{
struct agp_bridge_data *bridge = pci_get_drvdata(pdev);
pci_dev_put(bridge->dev);
agp_remove_bridge(bridge);
agp_put_bridge(bridge);
pci_dev_put(serverworks_private.svrwrks_dev);
serverworks_private.svrwrks_dev = NULL;
}
static struct pci_device_id agp_serverworks_pci_table[] = {
{
.class = (PCI_CLASS_BRIDGE_HOST << 8),
.class_mask = ~0,
.vendor = PCI_VENDOR_ID_SERVERWORKS,
.device = PCI_ANY_ID,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
},
{ }
};
MODULE_DEVICE_TABLE(pci, agp_serverworks_pci_table);
static struct pci_driver agp_serverworks_pci_driver = {
.name = "agpgart-serverworks",
.id_table = agp_serverworks_pci_table,
.probe = agp_serverworks_probe,
.remove = agp_serverworks_remove,
};
static int __init agp_serverworks_init(void)
{
if (agp_off)
return -EINVAL;
return pci_register_driver(&agp_serverworks_pci_driver);
}
static void __exit agp_serverworks_cleanup(void)
{
pci_unregister_driver(&agp_serverworks_pci_driver);
}
module_init(agp_serverworks_init);
module_exit(agp_serverworks_cleanup);
MODULE_LICENSE("GPL and additional rights");

View file

@ -0,0 +1,722 @@
/*
* UniNorth AGPGART routines.
*/
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/pagemap.h>
#include <linux/agp_backend.h>
#include <linux/delay.h>
#include <linux/vmalloc.h>
#include <asm/uninorth.h>
#include <asm/pci-bridge.h>
#include <asm/prom.h>
#include <asm/pmac_feature.h>
#include "agp.h"
/*
* NOTES for uninorth3 (G5 AGP) supports :
*
* There maybe also possibility to have bigger cache line size for
* agp (see pmac_pci.c and look for cache line). Need to be investigated
* by someone.
*
* PAGE size are hardcoded but this may change, see asm/page.h.
*
* Jerome Glisse <j.glisse@gmail.com>
*/
static int uninorth_rev;
static int is_u3;
static u32 scratch_value;
#define DEFAULT_APERTURE_SIZE 256
#define DEFAULT_APERTURE_STRING "256"
static char *aperture = NULL;
static int uninorth_fetch_size(void)
{
int i, size = 0;
struct aper_size_info_32 *values =
A_SIZE_32(agp_bridge->driver->aperture_sizes);
if (aperture) {
char *save = aperture;
size = memparse(aperture, &aperture) >> 20;
aperture = save;
for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++)
if (size == values[i].size)
break;
if (i == agp_bridge->driver->num_aperture_sizes) {
dev_err(&agp_bridge->dev->dev, "invalid aperture size, "
"using default\n");
size = 0;
aperture = NULL;
}
}
if (!size) {
for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++)
if (values[i].size == DEFAULT_APERTURE_SIZE)
break;
}
agp_bridge->previous_size =
agp_bridge->current_size = (void *)(values + i);
agp_bridge->aperture_size_idx = i;
return values[i].size;
}
static void uninorth_tlbflush(struct agp_memory *mem)
{
u32 ctrl = UNI_N_CFG_GART_ENABLE;
if (is_u3)
ctrl |= U3_N_CFG_GART_PERFRD;
pci_write_config_dword(agp_bridge->dev, UNI_N_CFG_GART_CTRL,
ctrl | UNI_N_CFG_GART_INVAL);
pci_write_config_dword(agp_bridge->dev, UNI_N_CFG_GART_CTRL, ctrl);
if (!mem && uninorth_rev <= 0x30) {
pci_write_config_dword(agp_bridge->dev, UNI_N_CFG_GART_CTRL,
ctrl | UNI_N_CFG_GART_2xRESET);
pci_write_config_dword(agp_bridge->dev, UNI_N_CFG_GART_CTRL,
ctrl);
}
}
static void uninorth_cleanup(void)
{
u32 tmp;
pci_read_config_dword(agp_bridge->dev, UNI_N_CFG_GART_CTRL, &tmp);
if (!(tmp & UNI_N_CFG_GART_ENABLE))
return;
tmp |= UNI_N_CFG_GART_INVAL;
pci_write_config_dword(agp_bridge->dev, UNI_N_CFG_GART_CTRL, tmp);
pci_write_config_dword(agp_bridge->dev, UNI_N_CFG_GART_CTRL, 0);
if (uninorth_rev <= 0x30) {
pci_write_config_dword(agp_bridge->dev, UNI_N_CFG_GART_CTRL,
UNI_N_CFG_GART_2xRESET);
pci_write_config_dword(agp_bridge->dev, UNI_N_CFG_GART_CTRL,
0);
}
}
static int uninorth_configure(void)
{
struct aper_size_info_32 *current_size;
current_size = A_SIZE_32(agp_bridge->current_size);
dev_info(&agp_bridge->dev->dev, "configuring for size idx: %d\n",
current_size->size_value);
/* aperture size and gatt addr */
pci_write_config_dword(agp_bridge->dev,
UNI_N_CFG_GART_BASE,
(agp_bridge->gatt_bus_addr & 0xfffff000)
| current_size->size_value);
/* HACK ALERT
* UniNorth seem to be buggy enough not to handle properly when
* the AGP aperture isn't mapped at bus physical address 0
*/
agp_bridge->gart_bus_addr = 0;
#ifdef CONFIG_PPC64
/* Assume U3 or later on PPC64 systems */
/* high 4 bits of GART physical address go in UNI_N_CFG_AGP_BASE */
pci_write_config_dword(agp_bridge->dev, UNI_N_CFG_AGP_BASE,
(agp_bridge->gatt_bus_addr >> 32) & 0xf);
#else
pci_write_config_dword(agp_bridge->dev,
UNI_N_CFG_AGP_BASE, agp_bridge->gart_bus_addr);
#endif
if (is_u3) {
pci_write_config_dword(agp_bridge->dev,
UNI_N_CFG_GART_DUMMY_PAGE,
page_to_phys(agp_bridge->scratch_page_page) >> 12);
}
return 0;
}
static int uninorth_insert_memory(struct agp_memory *mem, off_t pg_start, int type)
{
int i, num_entries;
void *temp;
u32 *gp;
int mask_type;
if (type != mem->type)
return -EINVAL;
mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type);
if (mask_type != 0) {
/* We know nothing of memory types */
return -EINVAL;
}
if (mem->page_count == 0)
return 0;
temp = agp_bridge->current_size;
num_entries = A_SIZE_32(temp)->num_entries;
if ((pg_start + mem->page_count) > num_entries)
return -EINVAL;
gp = (u32 *) &agp_bridge->gatt_table[pg_start];
for (i = 0; i < mem->page_count; ++i) {
if (gp[i] != scratch_value) {
dev_info(&agp_bridge->dev->dev,
"uninorth_insert_memory: entry 0x%x occupied (%x)\n",
i, gp[i]);
return -EBUSY;
}
}
for (i = 0; i < mem->page_count; i++) {
if (is_u3)
gp[i] = (page_to_phys(mem->pages[i]) >> PAGE_SHIFT) | 0x80000000UL;
else
gp[i] = cpu_to_le32((page_to_phys(mem->pages[i]) & 0xFFFFF000UL) |
0x1UL);
flush_dcache_range((unsigned long)__va(page_to_phys(mem->pages[i])),
(unsigned long)__va(page_to_phys(mem->pages[i]))+0x1000);
}
mb();
uninorth_tlbflush(mem);
return 0;
}
int uninorth_remove_memory(struct agp_memory *mem, off_t pg_start, int type)
{
size_t i;
u32 *gp;
int mask_type;
if (type != mem->type)
return -EINVAL;
mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type);
if (mask_type != 0) {
/* We know nothing of memory types */
return -EINVAL;
}
if (mem->page_count == 0)
return 0;
gp = (u32 *) &agp_bridge->gatt_table[pg_start];
for (i = 0; i < mem->page_count; ++i) {
gp[i] = scratch_value;
}
mb();
uninorth_tlbflush(mem);
return 0;
}
static void uninorth_agp_enable(struct agp_bridge_data *bridge, u32 mode)
{
u32 command, scratch, status;
int timeout;
pci_read_config_dword(bridge->dev,
bridge->capndx + PCI_AGP_STATUS,
&status);
command = agp_collect_device_status(bridge, mode, status);
command |= PCI_AGP_COMMAND_AGP;
if (uninorth_rev == 0x21) {
/*
* Darwin disable AGP 4x on this revision, thus we
* may assume it's broken. This is an AGP2 controller.
*/
command &= ~AGPSTAT2_4X;
}
if ((uninorth_rev >= 0x30) && (uninorth_rev <= 0x33)) {
/*
* We need to set REQ_DEPTH to 7 for U3 versions 1.0, 2.1,
* 2.2 and 2.3, Darwin do so.
*/
if ((command >> AGPSTAT_RQ_DEPTH_SHIFT) > 7)
command = (command & ~AGPSTAT_RQ_DEPTH)
| (7 << AGPSTAT_RQ_DEPTH_SHIFT);
}
uninorth_tlbflush(NULL);
timeout = 0;
do {
pci_write_config_dword(bridge->dev,
bridge->capndx + PCI_AGP_COMMAND,
command);
pci_read_config_dword(bridge->dev,
bridge->capndx + PCI_AGP_COMMAND,
&scratch);
} while ((scratch & PCI_AGP_COMMAND_AGP) == 0 && ++timeout < 1000);
if ((scratch & PCI_AGP_COMMAND_AGP) == 0)
dev_err(&bridge->dev->dev, "can't write UniNorth AGP "
"command register\n");
if (uninorth_rev >= 0x30) {
/* This is an AGP V3 */
agp_device_command(command, (status & AGPSTAT_MODE_3_0) != 0);
} else {
/* AGP V2 */
agp_device_command(command, false);
}
uninorth_tlbflush(NULL);
}
#ifdef CONFIG_PM
/*
* These Power Management routines are _not_ called by the normal PCI PM layer,
* but directly by the video driver through function pointers in the device
* tree.
*/
static int agp_uninorth_suspend(struct pci_dev *pdev)
{
struct agp_bridge_data *bridge;
u32 cmd;
u8 agp;
struct pci_dev *device = NULL;
bridge = agp_find_bridge(pdev);
if (bridge == NULL)
return -ENODEV;
/* Only one suspend supported */
if (bridge->dev_private_data)
return 0;
/* turn off AGP on the video chip, if it was enabled */
for_each_pci_dev(device) {
/* Don't touch the bridge yet, device first */
if (device == pdev)
continue;
/* Only deal with devices on the same bus here, no Mac has a P2P
* bridge on the AGP port, and mucking around the entire PCI
* tree is source of problems on some machines because of a bug
* in some versions of pci_find_capability() when hitting a dead
* device
*/
if (device->bus != pdev->bus)
continue;
agp = pci_find_capability(device, PCI_CAP_ID_AGP);
if (!agp)
continue;
pci_read_config_dword(device, agp + PCI_AGP_COMMAND, &cmd);
if (!(cmd & PCI_AGP_COMMAND_AGP))
continue;
dev_info(&pdev->dev, "disabling AGP on device %s\n",
pci_name(device));
cmd &= ~PCI_AGP_COMMAND_AGP;
pci_write_config_dword(device, agp + PCI_AGP_COMMAND, cmd);
}
/* turn off AGP on the bridge */
agp = pci_find_capability(pdev, PCI_CAP_ID_AGP);
pci_read_config_dword(pdev, agp + PCI_AGP_COMMAND, &cmd);
bridge->dev_private_data = (void *)(long)cmd;
if (cmd & PCI_AGP_COMMAND_AGP) {
dev_info(&pdev->dev, "disabling AGP on bridge\n");
cmd &= ~PCI_AGP_COMMAND_AGP;
pci_write_config_dword(pdev, agp + PCI_AGP_COMMAND, cmd);
}
/* turn off the GART */
uninorth_cleanup();
return 0;
}
static int agp_uninorth_resume(struct pci_dev *pdev)
{
struct agp_bridge_data *bridge;
u32 command;
bridge = agp_find_bridge(pdev);
if (bridge == NULL)
return -ENODEV;
command = (long)bridge->dev_private_data;
bridge->dev_private_data = NULL;
if (!(command & PCI_AGP_COMMAND_AGP))
return 0;
uninorth_agp_enable(bridge, command);
return 0;
}
#endif /* CONFIG_PM */
static int uninorth_create_gatt_table(struct agp_bridge_data *bridge)
{
char *table;
char *table_end;
int size;
int page_order;
int num_entries;
int i;
void *temp;
struct page *page;
struct page **pages;
/* We can't handle 2 level gatt's */
if (bridge->driver->size_type == LVL2_APER_SIZE)
return -EINVAL;
table = NULL;
i = bridge->aperture_size_idx;
temp = bridge->current_size;
size = page_order = num_entries = 0;
do {
size = A_SIZE_32(temp)->size;
page_order = A_SIZE_32(temp)->page_order;
num_entries = A_SIZE_32(temp)->num_entries;
table = (char *) __get_free_pages(GFP_KERNEL, page_order);
if (table == NULL) {
i++;
bridge->current_size = A_IDX32(bridge);
} else {
bridge->aperture_size_idx = i;
}
} while (!table && (i < bridge->driver->num_aperture_sizes));
if (table == NULL)
return -ENOMEM;
pages = kmalloc((1 << page_order) * sizeof(struct page*), GFP_KERNEL);
if (pages == NULL)
goto enomem;
table_end = table + ((PAGE_SIZE * (1 << page_order)) - 1);
for (page = virt_to_page(table), i = 0; page <= virt_to_page(table_end);
page++, i++) {
SetPageReserved(page);
pages[i] = page;
}
bridge->gatt_table_real = (u32 *) table;
/* Need to clear out any dirty data still sitting in caches */
flush_dcache_range((unsigned long)table,
(unsigned long)table_end + 1);
bridge->gatt_table = vmap(pages, (1 << page_order), 0, PAGE_KERNEL_NCG);
if (bridge->gatt_table == NULL)
goto enomem;
bridge->gatt_bus_addr = virt_to_phys(table);
if (is_u3)
scratch_value = (page_to_phys(agp_bridge->scratch_page_page) >> PAGE_SHIFT) | 0x80000000UL;
else
scratch_value = cpu_to_le32((page_to_phys(agp_bridge->scratch_page_page) & 0xFFFFF000UL) |
0x1UL);
for (i = 0; i < num_entries; i++)
bridge->gatt_table[i] = scratch_value;
return 0;
enomem:
kfree(pages);
if (table)
free_pages((unsigned long)table, page_order);
return -ENOMEM;
}
static int uninorth_free_gatt_table(struct agp_bridge_data *bridge)
{
int page_order;
char *table, *table_end;
void *temp;
struct page *page;
temp = bridge->current_size;
page_order = A_SIZE_32(temp)->page_order;
/* Do not worry about freeing memory, because if this is
* called, then all agp memory is deallocated and removed
* from the table.
*/
vunmap(bridge->gatt_table);
table = (char *) bridge->gatt_table_real;
table_end = table + ((PAGE_SIZE * (1 << page_order)) - 1);
for (page = virt_to_page(table); page <= virt_to_page(table_end); page++)
ClearPageReserved(page);
free_pages((unsigned long) bridge->gatt_table_real, page_order);
return 0;
}
void null_cache_flush(void)
{
mb();
}
/* Setup function */
static const struct aper_size_info_32 uninorth_sizes[] =
{
{256, 65536, 6, 64},
{128, 32768, 5, 32},
{64, 16384, 4, 16},
{32, 8192, 3, 8},
{16, 4096, 2, 4},
{8, 2048, 1, 2},
{4, 1024, 0, 1}
};
/*
* Not sure that u3 supports that high aperture sizes but it
* would strange if it did not :)
*/
static const struct aper_size_info_32 u3_sizes[] =
{
{512, 131072, 7, 128},
{256, 65536, 6, 64},
{128, 32768, 5, 32},
{64, 16384, 4, 16},
{32, 8192, 3, 8},
{16, 4096, 2, 4},
{8, 2048, 1, 2},
{4, 1024, 0, 1}
};
const struct agp_bridge_driver uninorth_agp_driver = {
.owner = THIS_MODULE,
.aperture_sizes = (void *)uninorth_sizes,
.size_type = U32_APER_SIZE,
.num_aperture_sizes = ARRAY_SIZE(uninorth_sizes),
.configure = uninorth_configure,
.fetch_size = uninorth_fetch_size,
.cleanup = uninorth_cleanup,
.tlb_flush = uninorth_tlbflush,
.mask_memory = agp_generic_mask_memory,
.masks = NULL,
.cache_flush = null_cache_flush,
.agp_enable = uninorth_agp_enable,
.create_gatt_table = uninorth_create_gatt_table,
.free_gatt_table = uninorth_free_gatt_table,
.insert_memory = uninorth_insert_memory,
.remove_memory = uninorth_remove_memory,
.alloc_by_type = agp_generic_alloc_by_type,
.free_by_type = agp_generic_free_by_type,
.agp_alloc_page = agp_generic_alloc_page,
.agp_alloc_pages = agp_generic_alloc_pages,
.agp_destroy_page = agp_generic_destroy_page,
.agp_destroy_pages = agp_generic_destroy_pages,
.agp_type_to_mask_type = agp_generic_type_to_mask_type,
.cant_use_aperture = true,
.needs_scratch_page = true,
};
const struct agp_bridge_driver u3_agp_driver = {
.owner = THIS_MODULE,
.aperture_sizes = (void *)u3_sizes,
.size_type = U32_APER_SIZE,
.num_aperture_sizes = ARRAY_SIZE(u3_sizes),
.configure = uninorth_configure,
.fetch_size = uninorth_fetch_size,
.cleanup = uninorth_cleanup,
.tlb_flush = uninorth_tlbflush,
.mask_memory = agp_generic_mask_memory,
.masks = NULL,
.cache_flush = null_cache_flush,
.agp_enable = uninorth_agp_enable,
.create_gatt_table = uninorth_create_gatt_table,
.free_gatt_table = uninorth_free_gatt_table,
.insert_memory = uninorth_insert_memory,
.remove_memory = uninorth_remove_memory,
.alloc_by_type = agp_generic_alloc_by_type,
.free_by_type = agp_generic_free_by_type,
.agp_alloc_page = agp_generic_alloc_page,
.agp_alloc_pages = agp_generic_alloc_pages,
.agp_destroy_page = agp_generic_destroy_page,
.agp_destroy_pages = agp_generic_destroy_pages,
.agp_type_to_mask_type = agp_generic_type_to_mask_type,
.cant_use_aperture = true,
.needs_scratch_page = true,
};
static struct agp_device_ids uninorth_agp_device_ids[] = {
{
.device_id = PCI_DEVICE_ID_APPLE_UNI_N_AGP,
.chipset_name = "UniNorth",
},
{
.device_id = PCI_DEVICE_ID_APPLE_UNI_N_AGP_P,
.chipset_name = "UniNorth/Pangea",
},
{
.device_id = PCI_DEVICE_ID_APPLE_UNI_N_AGP15,
.chipset_name = "UniNorth 1.5",
},
{
.device_id = PCI_DEVICE_ID_APPLE_UNI_N_AGP2,
.chipset_name = "UniNorth 2",
},
{
.device_id = PCI_DEVICE_ID_APPLE_U3_AGP,
.chipset_name = "U3",
},
{
.device_id = PCI_DEVICE_ID_APPLE_U3L_AGP,
.chipset_name = "U3L",
},
{
.device_id = PCI_DEVICE_ID_APPLE_U3H_AGP,
.chipset_name = "U3H",
},
{
.device_id = PCI_DEVICE_ID_APPLE_IPID2_AGP,
.chipset_name = "UniNorth/Intrepid2",
},
};
static int agp_uninorth_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
struct agp_device_ids *devs = uninorth_agp_device_ids;
struct agp_bridge_data *bridge;
struct device_node *uninorth_node;
u8 cap_ptr;
int j;
cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP);
if (cap_ptr == 0)
return -ENODEV;
/* probe for known chipsets */
for (j = 0; devs[j].chipset_name != NULL; ++j) {
if (pdev->device == devs[j].device_id) {
dev_info(&pdev->dev, "Apple %s chipset\n",
devs[j].chipset_name);
goto found;
}
}
dev_err(&pdev->dev, "unsupported Apple chipset [%04x/%04x]\n",
pdev->vendor, pdev->device);
return -ENODEV;
found:
/* Set revision to 0 if we could not read it. */
uninorth_rev = 0;
is_u3 = 0;
/* Locate core99 Uni-N */
uninorth_node = of_find_node_by_name(NULL, "uni-n");
/* Locate G5 u3 */
if (uninorth_node == NULL) {
is_u3 = 1;
uninorth_node = of_find_node_by_name(NULL, "u3");
}
if (uninorth_node) {
const int *revprop = of_get_property(uninorth_node,
"device-rev", NULL);
if (revprop != NULL)
uninorth_rev = *revprop & 0x3f;
of_node_put(uninorth_node);
}
#ifdef CONFIG_PM
/* Inform platform of our suspend/resume caps */
pmac_register_agp_pm(pdev, agp_uninorth_suspend, agp_uninorth_resume);
#endif
/* Allocate & setup our driver */
bridge = agp_alloc_bridge();
if (!bridge)
return -ENOMEM;
if (is_u3)
bridge->driver = &u3_agp_driver;
else
bridge->driver = &uninorth_agp_driver;
bridge->dev = pdev;
bridge->capndx = cap_ptr;
bridge->flags = AGP_ERRATA_FASTWRITES;
/* Fill in the mode register */
pci_read_config_dword(pdev, cap_ptr+PCI_AGP_STATUS, &bridge->mode);
pci_set_drvdata(pdev, bridge);
return agp_add_bridge(bridge);
}
static void agp_uninorth_remove(struct pci_dev *pdev)
{
struct agp_bridge_data *bridge = pci_get_drvdata(pdev);
#ifdef CONFIG_PM
/* Inform platform of our suspend/resume caps */
pmac_register_agp_pm(pdev, NULL, NULL);
#endif
agp_remove_bridge(bridge);
agp_put_bridge(bridge);
}
static struct pci_device_id agp_uninorth_pci_table[] = {
{
.class = (PCI_CLASS_BRIDGE_HOST << 8),
.class_mask = ~0,
.vendor = PCI_VENDOR_ID_APPLE,
.device = PCI_ANY_ID,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
},
{ }
};
MODULE_DEVICE_TABLE(pci, agp_uninorth_pci_table);
static struct pci_driver agp_uninorth_pci_driver = {
.name = "agpgart-uninorth",
.id_table = agp_uninorth_pci_table,
.probe = agp_uninorth_probe,
.remove = agp_uninorth_remove,
};
static int __init agp_uninorth_init(void)
{
if (agp_off)
return -EINVAL;
return pci_register_driver(&agp_uninorth_pci_driver);
}
static void __exit agp_uninorth_cleanup(void)
{
pci_unregister_driver(&agp_uninorth_pci_driver);
}
module_init(agp_uninorth_init);
module_exit(agp_uninorth_cleanup);
module_param(aperture, charp, 0);
MODULE_PARM_DESC(aperture,
"Aperture size, must be power of two between 4MB and an\n"
"\t\tupper limit specific to the UniNorth revision.\n"
"\t\tDefault: " DEFAULT_APERTURE_STRING "M");
MODULE_AUTHOR("Ben Herrenschmidt & Paul Mackerras");
MODULE_LICENSE("GPL");

598
drivers/char/agp/via-agp.c Normal file
View file

@ -0,0 +1,598 @@
/*
* VIA AGPGART routines.
*/
#include <linux/types.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/agp_backend.h>
#include "agp.h"
static const struct pci_device_id agp_via_pci_table[];
#define VIA_GARTCTRL 0x80
#define VIA_APSIZE 0x84
#define VIA_ATTBASE 0x88
#define VIA_AGP3_GARTCTRL 0x90
#define VIA_AGP3_APSIZE 0x94
#define VIA_AGP3_ATTBASE 0x98
#define VIA_AGPSEL 0xfd
static int via_fetch_size(void)
{
int i;
u8 temp;
struct aper_size_info_8 *values;
values = A_SIZE_8(agp_bridge->driver->aperture_sizes);
pci_read_config_byte(agp_bridge->dev, VIA_APSIZE, &temp);
for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) {
if (temp == values[i].size_value) {
agp_bridge->previous_size =
agp_bridge->current_size = (void *) (values + i);
agp_bridge->aperture_size_idx = i;
return values[i].size;
}
}
printk(KERN_ERR PFX "Unknown aperture size from AGP bridge (0x%x)\n", temp);
return 0;
}
static int via_configure(void)
{
struct aper_size_info_8 *current_size;
current_size = A_SIZE_8(agp_bridge->current_size);
/* aperture size */
pci_write_config_byte(agp_bridge->dev, VIA_APSIZE,
current_size->size_value);
/* address to map to */
agp_bridge->gart_bus_addr = pci_bus_address(agp_bridge->dev,
AGP_APERTURE_BAR);
/* GART control register */
pci_write_config_dword(agp_bridge->dev, VIA_GARTCTRL, 0x0000000f);
/* attbase - aperture GATT base */
pci_write_config_dword(agp_bridge->dev, VIA_ATTBASE,
(agp_bridge->gatt_bus_addr & 0xfffff000) | 3);
return 0;
}
static void via_cleanup(void)
{
struct aper_size_info_8 *previous_size;
previous_size = A_SIZE_8(agp_bridge->previous_size);
pci_write_config_byte(agp_bridge->dev, VIA_APSIZE,
previous_size->size_value);
/* Do not disable by writing 0 to VIA_ATTBASE, it screws things up
* during reinitialization.
*/
}
static void via_tlbflush(struct agp_memory *mem)
{
u32 temp;
pci_read_config_dword(agp_bridge->dev, VIA_GARTCTRL, &temp);
temp |= (1<<7);
pci_write_config_dword(agp_bridge->dev, VIA_GARTCTRL, temp);
temp &= ~(1<<7);
pci_write_config_dword(agp_bridge->dev, VIA_GARTCTRL, temp);
}
static const struct aper_size_info_8 via_generic_sizes[9] =
{
{256, 65536, 6, 0},
{128, 32768, 5, 128},
{64, 16384, 4, 192},
{32, 8192, 3, 224},
{16, 4096, 2, 240},
{8, 2048, 1, 248},
{4, 1024, 0, 252},
{2, 512, 0, 254},
{1, 256, 0, 255}
};
static int via_fetch_size_agp3(void)
{
int i;
u16 temp;
struct aper_size_info_16 *values;
values = A_SIZE_16(agp_bridge->driver->aperture_sizes);
pci_read_config_word(agp_bridge->dev, VIA_AGP3_APSIZE, &temp);
temp &= 0xfff;
for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) {
if (temp == values[i].size_value) {
agp_bridge->previous_size =
agp_bridge->current_size = (void *) (values + i);
agp_bridge->aperture_size_idx = i;
return values[i].size;
}
}
return 0;
}
static int via_configure_agp3(void)
{
u32 temp;
struct aper_size_info_16 *current_size;
current_size = A_SIZE_16(agp_bridge->current_size);
/* address to map to */
agp_bridge->gart_bus_addr = pci_bus_address(agp_bridge->dev,
AGP_APERTURE_BAR);
/* attbase - aperture GATT base */
pci_write_config_dword(agp_bridge->dev, VIA_AGP3_ATTBASE,
agp_bridge->gatt_bus_addr & 0xfffff000);
/* 1. Enable GTLB in RX90<7>, all AGP aperture access needs to fetch
* translation table first.
* 2. Enable AGP aperture in RX91<0>. This bit controls the enabling of the
* graphics AGP aperture for the AGP3.0 port.
*/
pci_read_config_dword(agp_bridge->dev, VIA_AGP3_GARTCTRL, &temp);
pci_write_config_dword(agp_bridge->dev, VIA_AGP3_GARTCTRL, temp | (3<<7));
return 0;
}
static void via_cleanup_agp3(void)
{
struct aper_size_info_16 *previous_size;
previous_size = A_SIZE_16(agp_bridge->previous_size);
pci_write_config_byte(agp_bridge->dev, VIA_APSIZE, previous_size->size_value);
}
static void via_tlbflush_agp3(struct agp_memory *mem)
{
u32 temp;
pci_read_config_dword(agp_bridge->dev, VIA_AGP3_GARTCTRL, &temp);
pci_write_config_dword(agp_bridge->dev, VIA_AGP3_GARTCTRL, temp & ~(1<<7));
pci_write_config_dword(agp_bridge->dev, VIA_AGP3_GARTCTRL, temp);
}
static const struct agp_bridge_driver via_agp3_driver = {
.owner = THIS_MODULE,
.aperture_sizes = agp3_generic_sizes,
.size_type = U8_APER_SIZE,
.num_aperture_sizes = 10,
.needs_scratch_page = true,
.configure = via_configure_agp3,
.fetch_size = via_fetch_size_agp3,
.cleanup = via_cleanup_agp3,
.tlb_flush = via_tlbflush_agp3,
.mask_memory = agp_generic_mask_memory,
.masks = NULL,
.agp_enable = agp_generic_enable,
.cache_flush = global_cache_flush,
.create_gatt_table = agp_generic_create_gatt_table,
.free_gatt_table = agp_generic_free_gatt_table,
.insert_memory = agp_generic_insert_memory,
.remove_memory = agp_generic_remove_memory,
.alloc_by_type = agp_generic_alloc_by_type,
.free_by_type = agp_generic_free_by_type,
.agp_alloc_page = agp_generic_alloc_page,
.agp_alloc_pages = agp_generic_alloc_pages,
.agp_destroy_page = agp_generic_destroy_page,
.agp_destroy_pages = agp_generic_destroy_pages,
.agp_type_to_mask_type = agp_generic_type_to_mask_type,
};
static const struct agp_bridge_driver via_driver = {
.owner = THIS_MODULE,
.aperture_sizes = via_generic_sizes,
.size_type = U8_APER_SIZE,
.num_aperture_sizes = 9,
.needs_scratch_page = true,
.configure = via_configure,
.fetch_size = via_fetch_size,
.cleanup = via_cleanup,
.tlb_flush = via_tlbflush,
.mask_memory = agp_generic_mask_memory,
.masks = NULL,
.agp_enable = agp_generic_enable,
.cache_flush = global_cache_flush,
.create_gatt_table = agp_generic_create_gatt_table,
.free_gatt_table = agp_generic_free_gatt_table,
.insert_memory = agp_generic_insert_memory,
.remove_memory = agp_generic_remove_memory,
.alloc_by_type = agp_generic_alloc_by_type,
.free_by_type = agp_generic_free_by_type,
.agp_alloc_page = agp_generic_alloc_page,
.agp_alloc_pages = agp_generic_alloc_pages,
.agp_destroy_page = agp_generic_destroy_page,
.agp_destroy_pages = agp_generic_destroy_pages,
.agp_type_to_mask_type = agp_generic_type_to_mask_type,
};
static struct agp_device_ids via_agp_device_ids[] =
{
{
.device_id = PCI_DEVICE_ID_VIA_82C597_0,
.chipset_name = "Apollo VP3",
},
{
.device_id = PCI_DEVICE_ID_VIA_82C598_0,
.chipset_name = "Apollo MVP3",
},
{
.device_id = PCI_DEVICE_ID_VIA_8501_0,
.chipset_name = "Apollo MVP4",
},
/* VT8601 */
{
.device_id = PCI_DEVICE_ID_VIA_8601_0,
.chipset_name = "Apollo ProMedia/PLE133Ta",
},
/* VT82C693A / VT28C694T */
{
.device_id = PCI_DEVICE_ID_VIA_82C691_0,
.chipset_name = "Apollo Pro 133",
},
{
.device_id = PCI_DEVICE_ID_VIA_8371_0,
.chipset_name = "KX133",
},
/* VT8633 */
{
.device_id = PCI_DEVICE_ID_VIA_8633_0,
.chipset_name = "Pro 266",
},
{
.device_id = PCI_DEVICE_ID_VIA_XN266,
.chipset_name = "Apollo Pro266",
},
/* VT8361 */
{
.device_id = PCI_DEVICE_ID_VIA_8361,
.chipset_name = "KLE133",
},
/* VT8365 / VT8362 */
{
.device_id = PCI_DEVICE_ID_VIA_8363_0,
.chipset_name = "Twister-K/KT133x/KM133",
},
/* VT8753A */
{
.device_id = PCI_DEVICE_ID_VIA_8753_0,
.chipset_name = "P4X266",
},
/* VT8366 */
{
.device_id = PCI_DEVICE_ID_VIA_8367_0,
.chipset_name = "KT266/KY266x/KT333",
},
/* VT8633 (for CuMine/ Celeron) */
{
.device_id = PCI_DEVICE_ID_VIA_8653_0,
.chipset_name = "Pro266T",
},
/* KM266 / PM266 */
{
.device_id = PCI_DEVICE_ID_VIA_XM266,
.chipset_name = "PM266/KM266",
},
/* CLE266 */
{
.device_id = PCI_DEVICE_ID_VIA_862X_0,
.chipset_name = "CLE266",
},
{
.device_id = PCI_DEVICE_ID_VIA_8377_0,
.chipset_name = "KT400/KT400A/KT600",
},
/* VT8604 / VT8605 / VT8603
* (Apollo Pro133A chipset with S3 Savage4) */
{
.device_id = PCI_DEVICE_ID_VIA_8605_0,
.chipset_name = "ProSavage PM133/PL133/PN133"
},
/* P4M266x/P4N266 */
{
.device_id = PCI_DEVICE_ID_VIA_8703_51_0,
.chipset_name = "P4M266x/P4N266",
},
/* VT8754 */
{
.device_id = PCI_DEVICE_ID_VIA_8754C_0,
.chipset_name = "PT800",
},
/* P4X600 */
{
.device_id = PCI_DEVICE_ID_VIA_8763_0,
.chipset_name = "P4X600"
},
/* KM400 */
{
.device_id = PCI_DEVICE_ID_VIA_8378_0,
.chipset_name = "KM400/KM400A",
},
/* PT880 */
{
.device_id = PCI_DEVICE_ID_VIA_PT880,
.chipset_name = "PT880",
},
/* PT880 Ultra */
{
.device_id = PCI_DEVICE_ID_VIA_PT880ULTRA,
.chipset_name = "PT880 Ultra",
},
/* PT890 */
{
.device_id = PCI_DEVICE_ID_VIA_8783_0,
.chipset_name = "PT890",
},
/* PM800/PN800/PM880/PN880 */
{
.device_id = PCI_DEVICE_ID_VIA_PX8X0_0,
.chipset_name = "PM800/PN800/PM880/PN880",
},
/* KT880 */
{
.device_id = PCI_DEVICE_ID_VIA_3269_0,
.chipset_name = "KT880",
},
/* KTxxx/Px8xx */
{
.device_id = PCI_DEVICE_ID_VIA_83_87XX_1,
.chipset_name = "VT83xx/VT87xx/KTxxx/Px8xx",
},
/* P4M800 */
{
.device_id = PCI_DEVICE_ID_VIA_3296_0,
.chipset_name = "P4M800",
},
/* P4M800CE */
{
.device_id = PCI_DEVICE_ID_VIA_P4M800CE,
.chipset_name = "VT3314",
},
/* VT3324 / CX700 */
{
.device_id = PCI_DEVICE_ID_VIA_VT3324,
.chipset_name = "CX700",
},
/* VT3336 - this is a chipset for AMD Athlon/K8 CPU. Due to K8's unique
* architecture, the AGP resource and behavior are different from
* the traditional AGP which resides only in chipset. AGP is used
* by 3D driver which wasn't available for the VT3336 and VT3364
* generation until now. Unfortunately, by testing, VT3364 works
* but VT3336 doesn't. - explanation from via, just leave this as
* as a placeholder to avoid future patches adding it back in.
*/
#if 0
{
.device_id = PCI_DEVICE_ID_VIA_VT3336,
.chipset_name = "VT3336",
},
#endif
/* P4M890 */
{
.device_id = PCI_DEVICE_ID_VIA_P4M890,
.chipset_name = "P4M890",
},
/* P4M900 */
{
.device_id = PCI_DEVICE_ID_VIA_VT3364,
.chipset_name = "P4M900",
},
{ }, /* dummy final entry, always present */
};
/*
* VIA's AGP3 chipsets do magick to put the AGP bridge compliant
* with the same standards version as the graphics card.
*/
static void check_via_agp3 (struct agp_bridge_data *bridge)
{
u8 reg;
pci_read_config_byte(bridge->dev, VIA_AGPSEL, &reg);
/* Check AGP 2.0 compatibility mode. */
if ((reg & (1<<1))==0)
bridge->driver = &via_agp3_driver;
}
static int agp_via_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct agp_device_ids *devs = via_agp_device_ids;
struct agp_bridge_data *bridge;
int j = 0;
u8 cap_ptr;
cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP);
if (!cap_ptr)
return -ENODEV;
j = ent - agp_via_pci_table;
printk (KERN_INFO PFX "Detected VIA %s chipset\n", devs[j].chipset_name);
bridge = agp_alloc_bridge();
if (!bridge)
return -ENOMEM;
bridge->dev = pdev;
bridge->capndx = cap_ptr;
bridge->driver = &via_driver;
/*
* Garg, there are KT400s with KT266 IDs.
*/
if (pdev->device == PCI_DEVICE_ID_VIA_8367_0) {
/* Is there a KT400 subsystem ? */
if (pdev->subsystem_device == PCI_DEVICE_ID_VIA_8377_0) {
printk(KERN_INFO PFX "Found KT400 in disguise as a KT266.\n");
check_via_agp3(bridge);
}
}
/* If this is an AGP3 bridge, check which mode its in and adjust. */
get_agp_version(bridge);
if (bridge->major_version >= 3)
check_via_agp3(bridge);
/* Fill in the mode register */
pci_read_config_dword(pdev,
bridge->capndx+PCI_AGP_STATUS, &bridge->mode);
pci_set_drvdata(pdev, bridge);
return agp_add_bridge(bridge);
}
static void agp_via_remove(struct pci_dev *pdev)
{
struct agp_bridge_data *bridge = pci_get_drvdata(pdev);
agp_remove_bridge(bridge);
agp_put_bridge(bridge);
}
#ifdef CONFIG_PM
static int agp_via_suspend(struct pci_dev *pdev, pm_message_t state)
{
pci_save_state (pdev);
pci_set_power_state (pdev, PCI_D3hot);
return 0;
}
static int agp_via_resume(struct pci_dev *pdev)
{
struct agp_bridge_data *bridge = pci_get_drvdata(pdev);
pci_set_power_state (pdev, PCI_D0);
pci_restore_state(pdev);
if (bridge->driver == &via_agp3_driver)
return via_configure_agp3();
else if (bridge->driver == &via_driver)
return via_configure();
return 0;
}
#endif /* CONFIG_PM */
/* must be the same order as name table above */
static const struct pci_device_id agp_via_pci_table[] = {
#define ID(x) \
{ \
.class = (PCI_CLASS_BRIDGE_HOST << 8), \
.class_mask = ~0, \
.vendor = PCI_VENDOR_ID_VIA, \
.device = x, \
.subvendor = PCI_ANY_ID, \
.subdevice = PCI_ANY_ID, \
}
ID(PCI_DEVICE_ID_VIA_82C597_0),
ID(PCI_DEVICE_ID_VIA_82C598_0),
ID(PCI_DEVICE_ID_VIA_8501_0),
ID(PCI_DEVICE_ID_VIA_8601_0),
ID(PCI_DEVICE_ID_VIA_82C691_0),
ID(PCI_DEVICE_ID_VIA_8371_0),
ID(PCI_DEVICE_ID_VIA_8633_0),
ID(PCI_DEVICE_ID_VIA_XN266),
ID(PCI_DEVICE_ID_VIA_8361),
ID(PCI_DEVICE_ID_VIA_8363_0),
ID(PCI_DEVICE_ID_VIA_8753_0),
ID(PCI_DEVICE_ID_VIA_8367_0),
ID(PCI_DEVICE_ID_VIA_8653_0),
ID(PCI_DEVICE_ID_VIA_XM266),
ID(PCI_DEVICE_ID_VIA_862X_0),
ID(PCI_DEVICE_ID_VIA_8377_0),
ID(PCI_DEVICE_ID_VIA_8605_0),
ID(PCI_DEVICE_ID_VIA_8703_51_0),
ID(PCI_DEVICE_ID_VIA_8754C_0),
ID(PCI_DEVICE_ID_VIA_8763_0),
ID(PCI_DEVICE_ID_VIA_8378_0),
ID(PCI_DEVICE_ID_VIA_PT880),
ID(PCI_DEVICE_ID_VIA_PT880ULTRA),
ID(PCI_DEVICE_ID_VIA_8783_0),
ID(PCI_DEVICE_ID_VIA_PX8X0_0),
ID(PCI_DEVICE_ID_VIA_3269_0),
ID(PCI_DEVICE_ID_VIA_83_87XX_1),
ID(PCI_DEVICE_ID_VIA_3296_0),
ID(PCI_DEVICE_ID_VIA_P4M800CE),
ID(PCI_DEVICE_ID_VIA_VT3324),
ID(PCI_DEVICE_ID_VIA_P4M890),
ID(PCI_DEVICE_ID_VIA_VT3364),
{ }
};
MODULE_DEVICE_TABLE(pci, agp_via_pci_table);
static struct pci_driver agp_via_pci_driver = {
.name = "agpgart-via",
.id_table = agp_via_pci_table,
.probe = agp_via_probe,
.remove = agp_via_remove,
#ifdef CONFIG_PM
.suspend = agp_via_suspend,
.resume = agp_via_resume,
#endif
};
static int __init agp_via_init(void)
{
if (agp_off)
return -EINVAL;
return pci_register_driver(&agp_via_pci_driver);
}
static void __exit agp_via_cleanup(void)
{
pci_unregister_driver(&agp_via_pci_driver);
}
module_init(agp_via_init);
module_exit(agp_via_cleanup);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Dave Jones <davej@redhat.com>");