mirror of
https://github.com/AetherDroid/android_kernel_samsung_on5xelte.git
synced 2025-09-07 08:48:05 -04:00
Fixed MTP to work with TWRP
This commit is contained in:
commit
f6dfaef42e
50820 changed files with 20846062 additions and 0 deletions
320
drivers/iommu/Kconfig
Normal file
320
drivers/iommu/Kconfig
Normal file
|
@ -0,0 +1,320 @@
|
|||
# IOMMU_API always gets selected by whoever wants it.
|
||||
config IOMMU_API
|
||||
bool
|
||||
|
||||
menuconfig IOMMU_SUPPORT
|
||||
bool "IOMMU Hardware Support"
|
||||
default y
|
||||
---help---
|
||||
Say Y here if you want to compile device drivers for IO Memory
|
||||
Management Units into the kernel. These devices usually allow to
|
||||
remap DMA requests and/or remap interrupts from other devices on the
|
||||
system.
|
||||
|
||||
if IOMMU_SUPPORT
|
||||
|
||||
config OF_IOMMU
|
||||
def_bool y
|
||||
depends on OF
|
||||
|
||||
config FSL_PAMU
|
||||
bool "Freescale IOMMU support"
|
||||
depends on PPC_E500MC
|
||||
select IOMMU_API
|
||||
select GENERIC_ALLOCATOR
|
||||
help
|
||||
Freescale PAMU support. PAMU is the IOMMU present on Freescale QorIQ platforms.
|
||||
PAMU can authorize memory access, remap the memory address, and remap I/O
|
||||
transaction types.
|
||||
|
||||
# MSM IOMMU support
|
||||
config MSM_IOMMU
|
||||
bool "MSM IOMMU Support"
|
||||
depends on ARCH_MSM8X60 || ARCH_MSM8960
|
||||
select IOMMU_API
|
||||
help
|
||||
Support for the IOMMUs found on certain Qualcomm SOCs.
|
||||
These IOMMUs allow virtualization of the address space used by most
|
||||
cores within the multimedia subsystem.
|
||||
|
||||
If unsure, say N here.
|
||||
|
||||
config IOMMU_PGTABLES_L2
|
||||
def_bool y
|
||||
depends on MSM_IOMMU && MMU && SMP && CPU_DCACHE_DISABLE=n
|
||||
|
||||
# AMD IOMMU support
|
||||
config AMD_IOMMU
|
||||
bool "AMD IOMMU support"
|
||||
select SWIOTLB
|
||||
select PCI_MSI
|
||||
select PCI_ATS
|
||||
select PCI_PRI
|
||||
select PCI_PASID
|
||||
select IOMMU_API
|
||||
depends on X86_64 && PCI && ACPI
|
||||
---help---
|
||||
With this option you can enable support for AMD IOMMU hardware in
|
||||
your system. An IOMMU is a hardware component which provides
|
||||
remapping of DMA memory accesses from devices. With an AMD IOMMU you
|
||||
can isolate the DMA memory of different devices and protect the
|
||||
system from misbehaving device drivers or hardware.
|
||||
|
||||
You can find out if your system has an AMD IOMMU if you look into
|
||||
your BIOS for an option to enable it or if you have an IVRS ACPI
|
||||
table.
|
||||
|
||||
config AMD_IOMMU_STATS
|
||||
bool "Export AMD IOMMU statistics to debugfs"
|
||||
depends on AMD_IOMMU
|
||||
select DEBUG_FS
|
||||
---help---
|
||||
This option enables code in the AMD IOMMU driver to collect various
|
||||
statistics about whats happening in the driver and exports that
|
||||
information to userspace via debugfs.
|
||||
If unsure, say N.
|
||||
|
||||
config AMD_IOMMU_V2
|
||||
tristate "AMD IOMMU Version 2 driver"
|
||||
depends on AMD_IOMMU
|
||||
select MMU_NOTIFIER
|
||||
---help---
|
||||
This option enables support for the AMD IOMMUv2 features of the IOMMU
|
||||
hardware. Select this option if you want to use devices that support
|
||||
the PCI PRI and PASID interface.
|
||||
|
||||
# Intel IOMMU support
|
||||
config DMAR_TABLE
|
||||
bool
|
||||
|
||||
config INTEL_IOMMU
|
||||
bool "Support for Intel IOMMU using DMA Remapping Devices"
|
||||
depends on PCI_MSI && ACPI && (X86 || IA64_GENERIC)
|
||||
select IOMMU_API
|
||||
select DMAR_TABLE
|
||||
help
|
||||
DMA remapping (DMAR) devices support enables independent address
|
||||
translations for Direct Memory Access (DMA) from devices.
|
||||
These DMA remapping devices are reported via ACPI tables
|
||||
and include PCI device scope covered by these DMA
|
||||
remapping devices.
|
||||
|
||||
config INTEL_IOMMU_DEFAULT_ON
|
||||
def_bool y
|
||||
prompt "Enable Intel DMA Remapping Devices by default"
|
||||
depends on INTEL_IOMMU
|
||||
help
|
||||
Selecting this option will enable a DMAR device at boot time if
|
||||
one is found. If this option is not selected, DMAR support can
|
||||
be enabled by passing intel_iommu=on to the kernel.
|
||||
|
||||
config INTEL_IOMMU_BROKEN_GFX_WA
|
||||
bool "Workaround broken graphics drivers (going away soon)"
|
||||
depends on INTEL_IOMMU && BROKEN && X86
|
||||
---help---
|
||||
Current Graphics drivers tend to use physical address
|
||||
for DMA and avoid using DMA APIs. Setting this config
|
||||
option permits the IOMMU driver to set a unity map for
|
||||
all the OS-visible memory. Hence the driver can continue
|
||||
to use physical addresses for DMA, at least until this
|
||||
option is removed in the 2.6.32 kernel.
|
||||
|
||||
config INTEL_IOMMU_FLOPPY_WA
|
||||
def_bool y
|
||||
depends on INTEL_IOMMU && X86
|
||||
---help---
|
||||
Floppy disk drivers are known to bypass DMA API calls
|
||||
thereby failing to work when IOMMU is enabled. This
|
||||
workaround will setup a 1:1 mapping for the first
|
||||
16MiB to make floppy (an ISA device) work.
|
||||
|
||||
config IRQ_REMAP
|
||||
bool "Support for Interrupt Remapping"
|
||||
depends on X86_64 && X86_IO_APIC && PCI_MSI && ACPI
|
||||
select DMAR_TABLE
|
||||
---help---
|
||||
Supports Interrupt remapping for IO-APIC and MSI devices.
|
||||
To use x2apic mode in the CPU's which support x2APIC enhancements or
|
||||
to support platforms with CPU's having > 8 bit APIC ID, say Y.
|
||||
|
||||
# OMAP IOMMU support
|
||||
config OMAP_IOMMU
|
||||
bool "OMAP IOMMU Support"
|
||||
depends on ARCH_OMAP2PLUS
|
||||
select IOMMU_API
|
||||
|
||||
config OMAP_IOMMU_DEBUG
|
||||
tristate "Export OMAP IOMMU internals in DebugFS"
|
||||
depends on OMAP_IOMMU && DEBUG_FS
|
||||
help
|
||||
Select this to see extensive information about
|
||||
the internal state of OMAP IOMMU in debugfs.
|
||||
|
||||
Say N unless you know you need this.
|
||||
|
||||
config TEGRA_IOMMU_GART
|
||||
bool "Tegra GART IOMMU Support"
|
||||
depends on ARCH_TEGRA_2x_SOC
|
||||
select IOMMU_API
|
||||
help
|
||||
Enables support for remapping discontiguous physical memory
|
||||
shared with the operating system into contiguous I/O virtual
|
||||
space through the GART (Graphics Address Relocation Table)
|
||||
hardware included on Tegra SoCs.
|
||||
|
||||
config TEGRA_IOMMU_SMMU
|
||||
bool "Tegra SMMU IOMMU Support"
|
||||
depends on ARCH_TEGRA && TEGRA_AHB
|
||||
select IOMMU_API
|
||||
help
|
||||
Enables support for remapping discontiguous physical memory
|
||||
shared with the operating system into contiguous I/O virtual
|
||||
space through the SMMU (System Memory Management Unit)
|
||||
hardware included on Tegra SoCs.
|
||||
|
||||
# EXYNOS IOMMU support
|
||||
config EXYNOS_IOMMU
|
||||
bool "IOMMU for Exynos"
|
||||
default y
|
||||
depends on ARCH_EXYNOS
|
||||
select IOMMU_API
|
||||
select ARM_DMA_USE_IOMMU
|
||||
help
|
||||
Support for the IOMMU (System MMU) of Samsung Exynos application
|
||||
processor family. This enables H/W multimedia accelerators to see
|
||||
non-linear physical memory chunks as linear memory in their
|
||||
address space.
|
||||
|
||||
config EXYNOS_IOVMM
|
||||
bool "IO Virtual Memory Manager for Exynos IOMMU"
|
||||
depends on EXYNOS_IOMMU
|
||||
default y
|
||||
help
|
||||
Supporting the users of Exynos IOMMU for allocating and mapping
|
||||
an IO virtual memory region with a physical memory region
|
||||
and managing the allocated virtual memory regions.
|
||||
This config supports SYSMMU V6 version.
|
||||
|
||||
config EXYNOS_IOMMU_EVENT_LOG
|
||||
bool "Logging System MMU events in private uncached buffer"
|
||||
depends on EXYNOS_IOMMU
|
||||
default y
|
||||
|
||||
config EXYNOS_IOMMU_DEBUG
|
||||
bool "Debugging log for Exynos IOMMU"
|
||||
depends on EXYNOS_IOMMU
|
||||
help
|
||||
Select this to see the detailed log message that shows what
|
||||
happens in the IOMMU driver.
|
||||
|
||||
Say N unless you need kernel log message for IOMMU debugging.
|
||||
|
||||
config SHMOBILE_IPMMU
|
||||
bool
|
||||
|
||||
config SHMOBILE_IPMMU_TLB
|
||||
bool
|
||||
|
||||
config SHMOBILE_IOMMU
|
||||
bool "IOMMU for Renesas IPMMU/IPMMUI"
|
||||
default n
|
||||
depends on ARM
|
||||
depends on ARCH_SHMOBILE || COMPILE_TEST
|
||||
select IOMMU_API
|
||||
select ARM_DMA_USE_IOMMU
|
||||
select SHMOBILE_IPMMU
|
||||
select SHMOBILE_IPMMU_TLB
|
||||
help
|
||||
Support for Renesas IPMMU/IPMMUI. This option enables
|
||||
remapping of DMA memory accesses from all of the IP blocks
|
||||
on the ICB.
|
||||
|
||||
Warning: Drivers (including userspace drivers of UIO
|
||||
devices) of the IP blocks on the ICB *must* use addresses
|
||||
allocated from the IPMMU (iova) for DMA with this option
|
||||
enabled.
|
||||
|
||||
If unsure, say N.
|
||||
|
||||
choice
|
||||
prompt "IPMMU/IPMMUI address space size"
|
||||
default SHMOBILE_IOMMU_ADDRSIZE_2048MB
|
||||
depends on SHMOBILE_IOMMU
|
||||
help
|
||||
This option sets IPMMU/IPMMUI address space size by
|
||||
adjusting the 1st level page table size. The page table size
|
||||
is calculated as follows:
|
||||
|
||||
page table size = number of page table entries * 4 bytes
|
||||
number of page table entries = address space size / 1 MiB
|
||||
|
||||
For example, when the address space size is 2048 MiB, the
|
||||
1st level page table size is 8192 bytes.
|
||||
|
||||
config SHMOBILE_IOMMU_ADDRSIZE_2048MB
|
||||
bool "2 GiB"
|
||||
|
||||
config SHMOBILE_IOMMU_ADDRSIZE_1024MB
|
||||
bool "1 GiB"
|
||||
|
||||
config SHMOBILE_IOMMU_ADDRSIZE_512MB
|
||||
bool "512 MiB"
|
||||
|
||||
config SHMOBILE_IOMMU_ADDRSIZE_256MB
|
||||
bool "256 MiB"
|
||||
|
||||
config SHMOBILE_IOMMU_ADDRSIZE_128MB
|
||||
bool "128 MiB"
|
||||
|
||||
config SHMOBILE_IOMMU_ADDRSIZE_64MB
|
||||
bool "64 MiB"
|
||||
|
||||
config SHMOBILE_IOMMU_ADDRSIZE_32MB
|
||||
bool "32 MiB"
|
||||
|
||||
endchoice
|
||||
|
||||
config SHMOBILE_IOMMU_L1SIZE
|
||||
int
|
||||
default 8192 if SHMOBILE_IOMMU_ADDRSIZE_2048MB
|
||||
default 4096 if SHMOBILE_IOMMU_ADDRSIZE_1024MB
|
||||
default 2048 if SHMOBILE_IOMMU_ADDRSIZE_512MB
|
||||
default 1024 if SHMOBILE_IOMMU_ADDRSIZE_256MB
|
||||
default 512 if SHMOBILE_IOMMU_ADDRSIZE_128MB
|
||||
default 256 if SHMOBILE_IOMMU_ADDRSIZE_64MB
|
||||
default 128 if SHMOBILE_IOMMU_ADDRSIZE_32MB
|
||||
|
||||
config IPMMU_VMSA
|
||||
bool "Renesas VMSA-compatible IPMMU"
|
||||
depends on ARM_LPAE
|
||||
depends on ARCH_SHMOBILE || COMPILE_TEST
|
||||
select IOMMU_API
|
||||
select ARM_DMA_USE_IOMMU
|
||||
help
|
||||
Support for the Renesas VMSA-compatible IPMMU Renesas found in the
|
||||
R-Mobile APE6 and R-Car H2/M2 SoCs.
|
||||
|
||||
If unsure, say N.
|
||||
|
||||
config SPAPR_TCE_IOMMU
|
||||
bool "sPAPR TCE IOMMU Support"
|
||||
depends on PPC_POWERNV || PPC_PSERIES
|
||||
select IOMMU_API
|
||||
help
|
||||
Enables bits of IOMMU API required by VFIO. The iommu_ops
|
||||
is not implemented as it is not necessary for VFIO.
|
||||
|
||||
config ARM_SMMU
|
||||
bool "ARM Ltd. System MMU (SMMU) Support"
|
||||
depends on ARM64 || (ARM_LPAE && OF)
|
||||
select IOMMU_API
|
||||
select ARM_DMA_USE_IOMMU if ARM
|
||||
help
|
||||
Support for implementations of the ARM System MMU architecture
|
||||
versions 1 and 2. The driver supports both v7l and v8l table
|
||||
formats with 4k and 64k page sizes.
|
||||
|
||||
Say Y here if your SoC includes an IOMMU device implementing
|
||||
the ARM SMMU architecture.
|
||||
|
||||
endif # IOMMU_SUPPORT
|
23
drivers/iommu/Makefile
Normal file
23
drivers/iommu/Makefile
Normal file
|
@ -0,0 +1,23 @@
|
|||
obj-$(CONFIG_IOMMU_API) += iommu.o
|
||||
obj-$(CONFIG_IOMMU_API) += iommu-traces.o
|
||||
obj-$(CONFIG_IOMMU_API) += iommu-sysfs.o
|
||||
obj-$(CONFIG_OF_IOMMU) += of_iommu.o
|
||||
obj-$(CONFIG_MSM_IOMMU) += msm_iommu.o msm_iommu_dev.o
|
||||
obj-$(CONFIG_AMD_IOMMU) += amd_iommu.o amd_iommu_init.o
|
||||
obj-$(CONFIG_AMD_IOMMU_V2) += amd_iommu_v2.o
|
||||
obj-$(CONFIG_ARM_SMMU) += arm-smmu.o
|
||||
obj-$(CONFIG_DMAR_TABLE) += dmar.o
|
||||
obj-$(CONFIG_INTEL_IOMMU) += iova.o intel-iommu.o
|
||||
obj-$(CONFIG_IPMMU_VMSA) += ipmmu-vmsa.o
|
||||
obj-$(CONFIG_IRQ_REMAP) += intel_irq_remapping.o irq_remapping.o
|
||||
obj-$(CONFIG_OMAP_IOMMU) += omap-iommu.o
|
||||
obj-$(CONFIG_OMAP_IOMMU) += omap-iommu2.o
|
||||
obj-$(CONFIG_OMAP_IOMMU_DEBUG) += omap-iommu-debug.o
|
||||
obj-$(CONFIG_TEGRA_IOMMU_GART) += tegra-gart.o
|
||||
obj-$(CONFIG_TEGRA_IOMMU_SMMU) += tegra-smmu.o
|
||||
obj-$(CONFIG_EXYNOS_IOVMM) += exynos-iovmm.o
|
||||
obj-$(CONFIG_EXYNOS_IOMMU) += exynos-iommu.o
|
||||
obj-$(CONFIG_EXYNOS_IOMMU_EVENT_LOG) += exynos-iommu-log.o
|
||||
obj-$(CONFIG_SHMOBILE_IOMMU) += shmobile-iommu.o
|
||||
obj-$(CONFIG_SHMOBILE_IPMMU) += shmobile-ipmmu.o
|
||||
obj-$(CONFIG_FSL_PAMU) += fsl_pamu.o fsl_pamu_domain.o
|
4298
drivers/iommu/amd_iommu.c
Normal file
4298
drivers/iommu/amd_iommu.c
Normal file
File diff suppressed because it is too large
Load diff
2404
drivers/iommu/amd_iommu_init.c
Normal file
2404
drivers/iommu/amd_iommu_init.c
Normal file
File diff suppressed because it is too large
Load diff
93
drivers/iommu/amd_iommu_proto.h
Normal file
93
drivers/iommu/amd_iommu_proto.h
Normal file
|
@ -0,0 +1,93 @@
|
|||
/*
|
||||
* Copyright (C) 2009-2010 Advanced Micro Devices, Inc.
|
||||
* Author: Joerg Roedel <joerg.roedel@amd.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 as published
|
||||
* by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
||||
*/
|
||||
|
||||
#ifndef _ASM_X86_AMD_IOMMU_PROTO_H
|
||||
#define _ASM_X86_AMD_IOMMU_PROTO_H
|
||||
|
||||
#include "amd_iommu_types.h"
|
||||
|
||||
extern int amd_iommu_init_dma_ops(void);
|
||||
extern int amd_iommu_init_passthrough(void);
|
||||
extern irqreturn_t amd_iommu_int_thread(int irq, void *data);
|
||||
extern irqreturn_t amd_iommu_int_handler(int irq, void *data);
|
||||
extern void amd_iommu_apply_erratum_63(u16 devid);
|
||||
extern void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu);
|
||||
extern int amd_iommu_init_devices(void);
|
||||
extern void amd_iommu_uninit_devices(void);
|
||||
extern void amd_iommu_init_notifier(void);
|
||||
extern void amd_iommu_init_api(void);
|
||||
|
||||
/* Needed for interrupt remapping */
|
||||
extern int amd_iommu_supported(void);
|
||||
extern int amd_iommu_prepare(void);
|
||||
extern int amd_iommu_enable(void);
|
||||
extern void amd_iommu_disable(void);
|
||||
extern int amd_iommu_reenable(int);
|
||||
extern int amd_iommu_enable_faulting(void);
|
||||
|
||||
/* IOMMUv2 specific functions */
|
||||
struct iommu_domain;
|
||||
|
||||
extern bool amd_iommu_v2_supported(void);
|
||||
extern int amd_iommu_register_ppr_notifier(struct notifier_block *nb);
|
||||
extern int amd_iommu_unregister_ppr_notifier(struct notifier_block *nb);
|
||||
extern void amd_iommu_domain_direct_map(struct iommu_domain *dom);
|
||||
extern int amd_iommu_domain_enable_v2(struct iommu_domain *dom, int pasids);
|
||||
extern int amd_iommu_flush_page(struct iommu_domain *dom, int pasid,
|
||||
u64 address);
|
||||
extern int amd_iommu_flush_tlb(struct iommu_domain *dom, int pasid);
|
||||
extern int amd_iommu_domain_set_gcr3(struct iommu_domain *dom, int pasid,
|
||||
unsigned long cr3);
|
||||
extern int amd_iommu_domain_clear_gcr3(struct iommu_domain *dom, int pasid);
|
||||
extern struct iommu_domain *amd_iommu_get_v2_domain(struct pci_dev *pdev);
|
||||
|
||||
/* IOMMU Performance Counter functions */
|
||||
extern bool amd_iommu_pc_supported(void);
|
||||
extern u8 amd_iommu_pc_get_max_banks(u16 devid);
|
||||
extern u8 amd_iommu_pc_get_max_counters(u16 devid);
|
||||
extern int amd_iommu_pc_get_set_reg_val(u16 devid, u8 bank, u8 cntr, u8 fxn,
|
||||
u64 *value, bool is_write);
|
||||
|
||||
#define PPR_SUCCESS 0x0
|
||||
#define PPR_INVALID 0x1
|
||||
#define PPR_FAILURE 0xf
|
||||
|
||||
extern int amd_iommu_complete_ppr(struct pci_dev *pdev, int pasid,
|
||||
int status, int tag);
|
||||
|
||||
#ifndef CONFIG_AMD_IOMMU_STATS
|
||||
|
||||
static inline void amd_iommu_stats_init(void) { }
|
||||
|
||||
#endif /* !CONFIG_AMD_IOMMU_STATS */
|
||||
|
||||
static inline bool is_rd890_iommu(struct pci_dev *pdev)
|
||||
{
|
||||
return (pdev->vendor == PCI_VENDOR_ID_ATI) &&
|
||||
(pdev->device == PCI_DEVICE_ID_RD890_IOMMU);
|
||||
}
|
||||
|
||||
static inline bool iommu_feature(struct amd_iommu *iommu, u64 f)
|
||||
{
|
||||
if (!(iommu->cap & (1 << IOMMU_CAP_EFR)))
|
||||
return false;
|
||||
|
||||
return !!(iommu->features & f);
|
||||
}
|
||||
|
||||
#endif /* _ASM_X86_AMD_IOMMU_PROTO_H */
|
746
drivers/iommu/amd_iommu_types.h
Normal file
746
drivers/iommu/amd_iommu_types.h
Normal file
|
@ -0,0 +1,746 @@
|
|||
/*
|
||||
* Copyright (C) 2007-2010 Advanced Micro Devices, Inc.
|
||||
* Author: Joerg Roedel <joerg.roedel@amd.com>
|
||||
* Leo Duran <leo.duran@amd.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 as published
|
||||
* by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
||||
*/
|
||||
|
||||
#ifndef _ASM_X86_AMD_IOMMU_TYPES_H
|
||||
#define _ASM_X86_AMD_IOMMU_TYPES_H
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/irqreturn.h>
|
||||
|
||||
/*
|
||||
* Maximum number of IOMMUs supported
|
||||
*/
|
||||
#define MAX_IOMMUS 32
|
||||
|
||||
/*
|
||||
* some size calculation constants
|
||||
*/
|
||||
#define DEV_TABLE_ENTRY_SIZE 32
|
||||
#define ALIAS_TABLE_ENTRY_SIZE 2
|
||||
#define RLOOKUP_TABLE_ENTRY_SIZE (sizeof(void *))
|
||||
|
||||
/* Capability offsets used by the driver */
|
||||
#define MMIO_CAP_HDR_OFFSET 0x00
|
||||
#define MMIO_RANGE_OFFSET 0x0c
|
||||
#define MMIO_MISC_OFFSET 0x10
|
||||
|
||||
/* Masks, shifts and macros to parse the device range capability */
|
||||
#define MMIO_RANGE_LD_MASK 0xff000000
|
||||
#define MMIO_RANGE_FD_MASK 0x00ff0000
|
||||
#define MMIO_RANGE_BUS_MASK 0x0000ff00
|
||||
#define MMIO_RANGE_LD_SHIFT 24
|
||||
#define MMIO_RANGE_FD_SHIFT 16
|
||||
#define MMIO_RANGE_BUS_SHIFT 8
|
||||
#define MMIO_GET_LD(x) (((x) & MMIO_RANGE_LD_MASK) >> MMIO_RANGE_LD_SHIFT)
|
||||
#define MMIO_GET_FD(x) (((x) & MMIO_RANGE_FD_MASK) >> MMIO_RANGE_FD_SHIFT)
|
||||
#define MMIO_GET_BUS(x) (((x) & MMIO_RANGE_BUS_MASK) >> MMIO_RANGE_BUS_SHIFT)
|
||||
#define MMIO_MSI_NUM(x) ((x) & 0x1f)
|
||||
|
||||
/* Flag masks for the AMD IOMMU exclusion range */
|
||||
#define MMIO_EXCL_ENABLE_MASK 0x01ULL
|
||||
#define MMIO_EXCL_ALLOW_MASK 0x02ULL
|
||||
|
||||
/* Used offsets into the MMIO space */
|
||||
#define MMIO_DEV_TABLE_OFFSET 0x0000
|
||||
#define MMIO_CMD_BUF_OFFSET 0x0008
|
||||
#define MMIO_EVT_BUF_OFFSET 0x0010
|
||||
#define MMIO_CONTROL_OFFSET 0x0018
|
||||
#define MMIO_EXCL_BASE_OFFSET 0x0020
|
||||
#define MMIO_EXCL_LIMIT_OFFSET 0x0028
|
||||
#define MMIO_EXT_FEATURES 0x0030
|
||||
#define MMIO_PPR_LOG_OFFSET 0x0038
|
||||
#define MMIO_CMD_HEAD_OFFSET 0x2000
|
||||
#define MMIO_CMD_TAIL_OFFSET 0x2008
|
||||
#define MMIO_EVT_HEAD_OFFSET 0x2010
|
||||
#define MMIO_EVT_TAIL_OFFSET 0x2018
|
||||
#define MMIO_STATUS_OFFSET 0x2020
|
||||
#define MMIO_PPR_HEAD_OFFSET 0x2030
|
||||
#define MMIO_PPR_TAIL_OFFSET 0x2038
|
||||
#define MMIO_CNTR_CONF_OFFSET 0x4000
|
||||
#define MMIO_CNTR_REG_OFFSET 0x40000
|
||||
#define MMIO_REG_END_OFFSET 0x80000
|
||||
|
||||
|
||||
|
||||
/* Extended Feature Bits */
|
||||
#define FEATURE_PREFETCH (1ULL<<0)
|
||||
#define FEATURE_PPR (1ULL<<1)
|
||||
#define FEATURE_X2APIC (1ULL<<2)
|
||||
#define FEATURE_NX (1ULL<<3)
|
||||
#define FEATURE_GT (1ULL<<4)
|
||||
#define FEATURE_IA (1ULL<<6)
|
||||
#define FEATURE_GA (1ULL<<7)
|
||||
#define FEATURE_HE (1ULL<<8)
|
||||
#define FEATURE_PC (1ULL<<9)
|
||||
|
||||
#define FEATURE_PASID_SHIFT 32
|
||||
#define FEATURE_PASID_MASK (0x1fULL << FEATURE_PASID_SHIFT)
|
||||
|
||||
#define FEATURE_GLXVAL_SHIFT 14
|
||||
#define FEATURE_GLXVAL_MASK (0x03ULL << FEATURE_GLXVAL_SHIFT)
|
||||
|
||||
/* Note:
|
||||
* The current driver only support 16-bit PASID.
|
||||
* Currently, hardware only implement upto 16-bit PASID
|
||||
* even though the spec says it could have upto 20 bits.
|
||||
*/
|
||||
#define PASID_MASK 0x0000ffff
|
||||
|
||||
/* MMIO status bits */
|
||||
#define MMIO_STATUS_EVT_INT_MASK (1 << 1)
|
||||
#define MMIO_STATUS_COM_WAIT_INT_MASK (1 << 2)
|
||||
#define MMIO_STATUS_PPR_INT_MASK (1 << 6)
|
||||
|
||||
/* event logging constants */
|
||||
#define EVENT_ENTRY_SIZE 0x10
|
||||
#define EVENT_TYPE_SHIFT 28
|
||||
#define EVENT_TYPE_MASK 0xf
|
||||
#define EVENT_TYPE_ILL_DEV 0x1
|
||||
#define EVENT_TYPE_IO_FAULT 0x2
|
||||
#define EVENT_TYPE_DEV_TAB_ERR 0x3
|
||||
#define EVENT_TYPE_PAGE_TAB_ERR 0x4
|
||||
#define EVENT_TYPE_ILL_CMD 0x5
|
||||
#define EVENT_TYPE_CMD_HARD_ERR 0x6
|
||||
#define EVENT_TYPE_IOTLB_INV_TO 0x7
|
||||
#define EVENT_TYPE_INV_DEV_REQ 0x8
|
||||
#define EVENT_DEVID_MASK 0xffff
|
||||
#define EVENT_DEVID_SHIFT 0
|
||||
#define EVENT_DOMID_MASK 0xffff
|
||||
#define EVENT_DOMID_SHIFT 0
|
||||
#define EVENT_FLAGS_MASK 0xfff
|
||||
#define EVENT_FLAGS_SHIFT 0x10
|
||||
|
||||
/* feature control bits */
|
||||
#define CONTROL_IOMMU_EN 0x00ULL
|
||||
#define CONTROL_HT_TUN_EN 0x01ULL
|
||||
#define CONTROL_EVT_LOG_EN 0x02ULL
|
||||
#define CONTROL_EVT_INT_EN 0x03ULL
|
||||
#define CONTROL_COMWAIT_EN 0x04ULL
|
||||
#define CONTROL_INV_TIMEOUT 0x05ULL
|
||||
#define CONTROL_PASSPW_EN 0x08ULL
|
||||
#define CONTROL_RESPASSPW_EN 0x09ULL
|
||||
#define CONTROL_COHERENT_EN 0x0aULL
|
||||
#define CONTROL_ISOC_EN 0x0bULL
|
||||
#define CONTROL_CMDBUF_EN 0x0cULL
|
||||
#define CONTROL_PPFLOG_EN 0x0dULL
|
||||
#define CONTROL_PPFINT_EN 0x0eULL
|
||||
#define CONTROL_PPR_EN 0x0fULL
|
||||
#define CONTROL_GT_EN 0x10ULL
|
||||
|
||||
#define CTRL_INV_TO_MASK (7 << CONTROL_INV_TIMEOUT)
|
||||
#define CTRL_INV_TO_NONE 0
|
||||
#define CTRL_INV_TO_1MS 1
|
||||
#define CTRL_INV_TO_10MS 2
|
||||
#define CTRL_INV_TO_100MS 3
|
||||
#define CTRL_INV_TO_1S 4
|
||||
#define CTRL_INV_TO_10S 5
|
||||
#define CTRL_INV_TO_100S 6
|
||||
|
||||
/* command specific defines */
|
||||
#define CMD_COMPL_WAIT 0x01
|
||||
#define CMD_INV_DEV_ENTRY 0x02
|
||||
#define CMD_INV_IOMMU_PAGES 0x03
|
||||
#define CMD_INV_IOTLB_PAGES 0x04
|
||||
#define CMD_INV_IRT 0x05
|
||||
#define CMD_COMPLETE_PPR 0x07
|
||||
#define CMD_INV_ALL 0x08
|
||||
|
||||
#define CMD_COMPL_WAIT_STORE_MASK 0x01
|
||||
#define CMD_COMPL_WAIT_INT_MASK 0x02
|
||||
#define CMD_INV_IOMMU_PAGES_SIZE_MASK 0x01
|
||||
#define CMD_INV_IOMMU_PAGES_PDE_MASK 0x02
|
||||
#define CMD_INV_IOMMU_PAGES_GN_MASK 0x04
|
||||
|
||||
#define PPR_STATUS_MASK 0xf
|
||||
#define PPR_STATUS_SHIFT 12
|
||||
|
||||
#define CMD_INV_IOMMU_ALL_PAGES_ADDRESS 0x7fffffffffffffffULL
|
||||
|
||||
/* macros and definitions for device table entries */
|
||||
#define DEV_ENTRY_VALID 0x00
|
||||
#define DEV_ENTRY_TRANSLATION 0x01
|
||||
#define DEV_ENTRY_IR 0x3d
|
||||
#define DEV_ENTRY_IW 0x3e
|
||||
#define DEV_ENTRY_NO_PAGE_FAULT 0x62
|
||||
#define DEV_ENTRY_EX 0x67
|
||||
#define DEV_ENTRY_SYSMGT1 0x68
|
||||
#define DEV_ENTRY_SYSMGT2 0x69
|
||||
#define DEV_ENTRY_IRQ_TBL_EN 0x80
|
||||
#define DEV_ENTRY_INIT_PASS 0xb8
|
||||
#define DEV_ENTRY_EINT_PASS 0xb9
|
||||
#define DEV_ENTRY_NMI_PASS 0xba
|
||||
#define DEV_ENTRY_LINT0_PASS 0xbe
|
||||
#define DEV_ENTRY_LINT1_PASS 0xbf
|
||||
#define DEV_ENTRY_MODE_MASK 0x07
|
||||
#define DEV_ENTRY_MODE_SHIFT 0x09
|
||||
|
||||
#define MAX_DEV_TABLE_ENTRIES 0xffff
|
||||
|
||||
/* constants to configure the command buffer */
|
||||
#define CMD_BUFFER_SIZE 8192
|
||||
#define CMD_BUFFER_UNINITIALIZED 1
|
||||
#define CMD_BUFFER_ENTRIES 512
|
||||
#define MMIO_CMD_SIZE_SHIFT 56
|
||||
#define MMIO_CMD_SIZE_512 (0x9ULL << MMIO_CMD_SIZE_SHIFT)
|
||||
|
||||
/* constants for event buffer handling */
|
||||
#define EVT_BUFFER_SIZE 8192 /* 512 entries */
|
||||
#define EVT_LEN_MASK (0x9ULL << 56)
|
||||
|
||||
/* Constants for PPR Log handling */
|
||||
#define PPR_LOG_ENTRIES 512
|
||||
#define PPR_LOG_SIZE_SHIFT 56
|
||||
#define PPR_LOG_SIZE_512 (0x9ULL << PPR_LOG_SIZE_SHIFT)
|
||||
#define PPR_ENTRY_SIZE 16
|
||||
#define PPR_LOG_SIZE (PPR_ENTRY_SIZE * PPR_LOG_ENTRIES)
|
||||
|
||||
#define PPR_REQ_TYPE(x) (((x) >> 60) & 0xfULL)
|
||||
#define PPR_FLAGS(x) (((x) >> 48) & 0xfffULL)
|
||||
#define PPR_DEVID(x) ((x) & 0xffffULL)
|
||||
#define PPR_TAG(x) (((x) >> 32) & 0x3ffULL)
|
||||
#define PPR_PASID1(x) (((x) >> 16) & 0xffffULL)
|
||||
#define PPR_PASID2(x) (((x) >> 42) & 0xfULL)
|
||||
#define PPR_PASID(x) ((PPR_PASID2(x) << 16) | PPR_PASID1(x))
|
||||
|
||||
#define PPR_REQ_FAULT 0x01
|
||||
|
||||
#define PAGE_MODE_NONE 0x00
|
||||
#define PAGE_MODE_1_LEVEL 0x01
|
||||
#define PAGE_MODE_2_LEVEL 0x02
|
||||
#define PAGE_MODE_3_LEVEL 0x03
|
||||
#define PAGE_MODE_4_LEVEL 0x04
|
||||
#define PAGE_MODE_5_LEVEL 0x05
|
||||
#define PAGE_MODE_6_LEVEL 0x06
|
||||
|
||||
#define PM_LEVEL_SHIFT(x) (12 + ((x) * 9))
|
||||
#define PM_LEVEL_SIZE(x) (((x) < 6) ? \
|
||||
((1ULL << PM_LEVEL_SHIFT((x))) - 1): \
|
||||
(0xffffffffffffffffULL))
|
||||
#define PM_LEVEL_INDEX(x, a) (((a) >> PM_LEVEL_SHIFT((x))) & 0x1ffULL)
|
||||
#define PM_LEVEL_ENC(x) (((x) << 9) & 0xe00ULL)
|
||||
#define PM_LEVEL_PDE(x, a) ((a) | PM_LEVEL_ENC((x)) | \
|
||||
IOMMU_PTE_P | IOMMU_PTE_IR | IOMMU_PTE_IW)
|
||||
#define PM_PTE_LEVEL(pte) (((pte) >> 9) & 0x7ULL)
|
||||
|
||||
#define PM_MAP_4k 0
|
||||
#define PM_ADDR_MASK 0x000ffffffffff000ULL
|
||||
#define PM_MAP_MASK(lvl) (PM_ADDR_MASK & \
|
||||
(~((1ULL << (12 + ((lvl) * 9))) - 1)))
|
||||
#define PM_ALIGNED(lvl, addr) ((PM_MAP_MASK(lvl) & (addr)) == (addr))
|
||||
|
||||
/*
|
||||
* Returns the page table level to use for a given page size
|
||||
* Pagesize is expected to be a power-of-two
|
||||
*/
|
||||
#define PAGE_SIZE_LEVEL(pagesize) \
|
||||
((__ffs(pagesize) - 12) / 9)
|
||||
/*
|
||||
* Returns the number of ptes to use for a given page size
|
||||
* Pagesize is expected to be a power-of-two
|
||||
*/
|
||||
#define PAGE_SIZE_PTE_COUNT(pagesize) \
|
||||
(1ULL << ((__ffs(pagesize) - 12) % 9))
|
||||
|
||||
/*
|
||||
* Aligns a given io-virtual address to a given page size
|
||||
* Pagesize is expected to be a power-of-two
|
||||
*/
|
||||
#define PAGE_SIZE_ALIGN(address, pagesize) \
|
||||
((address) & ~((pagesize) - 1))
|
||||
/*
|
||||
* Creates an IOMMU PTE for an address and a given pagesize
|
||||
* The PTE has no permission bits set
|
||||
* Pagesize is expected to be a power-of-two larger than 4096
|
||||
*/
|
||||
#define PAGE_SIZE_PTE(address, pagesize) \
|
||||
(((address) | ((pagesize) - 1)) & \
|
||||
(~(pagesize >> 1)) & PM_ADDR_MASK)
|
||||
|
||||
/*
|
||||
* Takes a PTE value with mode=0x07 and returns the page size it maps
|
||||
*/
|
||||
#define PTE_PAGE_SIZE(pte) \
|
||||
(1ULL << (1 + ffz(((pte) | 0xfffULL))))
|
||||
|
||||
#define IOMMU_PTE_P (1ULL << 0)
|
||||
#define IOMMU_PTE_TV (1ULL << 1)
|
||||
#define IOMMU_PTE_U (1ULL << 59)
|
||||
#define IOMMU_PTE_FC (1ULL << 60)
|
||||
#define IOMMU_PTE_IR (1ULL << 61)
|
||||
#define IOMMU_PTE_IW (1ULL << 62)
|
||||
|
||||
#define DTE_FLAG_IOTLB (0x01UL << 32)
|
||||
#define DTE_FLAG_GV (0x01ULL << 55)
|
||||
#define DTE_GLX_SHIFT (56)
|
||||
#define DTE_GLX_MASK (3)
|
||||
|
||||
#define DTE_GCR3_VAL_A(x) (((x) >> 12) & 0x00007ULL)
|
||||
#define DTE_GCR3_VAL_B(x) (((x) >> 15) & 0x0ffffULL)
|
||||
#define DTE_GCR3_VAL_C(x) (((x) >> 31) & 0xfffffULL)
|
||||
|
||||
#define DTE_GCR3_INDEX_A 0
|
||||
#define DTE_GCR3_INDEX_B 1
|
||||
#define DTE_GCR3_INDEX_C 1
|
||||
|
||||
#define DTE_GCR3_SHIFT_A 58
|
||||
#define DTE_GCR3_SHIFT_B 16
|
||||
#define DTE_GCR3_SHIFT_C 43
|
||||
|
||||
#define GCR3_VALID 0x01ULL
|
||||
|
||||
#define IOMMU_PAGE_MASK (((1ULL << 52) - 1) & ~0xfffULL)
|
||||
#define IOMMU_PTE_PRESENT(pte) ((pte) & IOMMU_PTE_P)
|
||||
#define IOMMU_PTE_PAGE(pte) (phys_to_virt((pte) & IOMMU_PAGE_MASK))
|
||||
#define IOMMU_PTE_MODE(pte) (((pte) >> 9) & 0x07)
|
||||
|
||||
#define IOMMU_PROT_MASK 0x03
|
||||
#define IOMMU_PROT_IR 0x01
|
||||
#define IOMMU_PROT_IW 0x02
|
||||
|
||||
/* IOMMU capabilities */
|
||||
#define IOMMU_CAP_IOTLB 24
|
||||
#define IOMMU_CAP_NPCACHE 26
|
||||
#define IOMMU_CAP_EFR 27
|
||||
|
||||
#define MAX_DOMAIN_ID 65536
|
||||
|
||||
/* Protection domain flags */
|
||||
#define PD_DMA_OPS_MASK (1UL << 0) /* domain used for dma_ops */
|
||||
#define PD_DEFAULT_MASK (1UL << 1) /* domain is a default dma_ops
|
||||
domain for an IOMMU */
|
||||
#define PD_PASSTHROUGH_MASK (1UL << 2) /* domain has no page
|
||||
translation */
|
||||
#define PD_IOMMUV2_MASK (1UL << 3) /* domain has gcr3 table */
|
||||
|
||||
extern bool amd_iommu_dump;
|
||||
#define DUMP_printk(format, arg...) \
|
||||
do { \
|
||||
if (amd_iommu_dump) \
|
||||
printk(KERN_INFO "AMD-Vi: " format, ## arg); \
|
||||
} while(0);
|
||||
|
||||
/* global flag if IOMMUs cache non-present entries */
|
||||
extern bool amd_iommu_np_cache;
|
||||
/* Only true if all IOMMUs support device IOTLBs */
|
||||
extern bool amd_iommu_iotlb_sup;
|
||||
|
||||
#define MAX_IRQS_PER_TABLE 256
|
||||
#define IRQ_TABLE_ALIGNMENT 128
|
||||
|
||||
struct irq_remap_table {
|
||||
spinlock_t lock;
|
||||
unsigned min_index;
|
||||
u32 *table;
|
||||
};
|
||||
|
||||
extern struct irq_remap_table **irq_lookup_table;
|
||||
|
||||
/* Interrupt remapping feature used? */
|
||||
extern bool amd_iommu_irq_remap;
|
||||
|
||||
/* kmem_cache to get tables with 128 byte alignement */
|
||||
extern struct kmem_cache *amd_iommu_irq_cache;
|
||||
|
||||
/*
|
||||
* Make iterating over all IOMMUs easier
|
||||
*/
|
||||
#define for_each_iommu(iommu) \
|
||||
list_for_each_entry((iommu), &amd_iommu_list, list)
|
||||
#define for_each_iommu_safe(iommu, next) \
|
||||
list_for_each_entry_safe((iommu), (next), &amd_iommu_list, list)
|
||||
|
||||
#define APERTURE_RANGE_SHIFT 27 /* 128 MB */
|
||||
#define APERTURE_RANGE_SIZE (1ULL << APERTURE_RANGE_SHIFT)
|
||||
#define APERTURE_RANGE_PAGES (APERTURE_RANGE_SIZE >> PAGE_SHIFT)
|
||||
#define APERTURE_MAX_RANGES 32 /* allows 4GB of DMA address space */
|
||||
#define APERTURE_RANGE_INDEX(a) ((a) >> APERTURE_RANGE_SHIFT)
|
||||
#define APERTURE_PAGE_INDEX(a) (((a) >> 21) & 0x3fULL)
|
||||
|
||||
|
||||
/*
|
||||
* This struct is used to pass information about
|
||||
* incoming PPR faults around.
|
||||
*/
|
||||
struct amd_iommu_fault {
|
||||
u64 address; /* IO virtual address of the fault*/
|
||||
u32 pasid; /* Address space identifier */
|
||||
u16 device_id; /* Originating PCI device id */
|
||||
u16 tag; /* PPR tag */
|
||||
u16 flags; /* Fault flags */
|
||||
|
||||
};
|
||||
|
||||
|
||||
struct iommu_domain;
|
||||
|
||||
/*
|
||||
* This structure contains generic data for IOMMU protection domains
|
||||
* independent of their use.
|
||||
*/
|
||||
struct protection_domain {
|
||||
struct list_head list; /* for list of all protection domains */
|
||||
struct list_head dev_list; /* List of all devices in this domain */
|
||||
spinlock_t lock; /* mostly used to lock the page table*/
|
||||
struct mutex api_lock; /* protect page tables in the iommu-api path */
|
||||
u16 id; /* the domain id written to the device table */
|
||||
int mode; /* paging mode (0-6 levels) */
|
||||
u64 *pt_root; /* page table root pointer */
|
||||
int glx; /* Number of levels for GCR3 table */
|
||||
u64 *gcr3_tbl; /* Guest CR3 table */
|
||||
unsigned long flags; /* flags to find out type of domain */
|
||||
bool updated; /* complete domain flush required */
|
||||
unsigned dev_cnt; /* devices assigned to this domain */
|
||||
unsigned dev_iommu[MAX_IOMMUS]; /* per-IOMMU reference count */
|
||||
void *priv; /* private data */
|
||||
struct iommu_domain *iommu_domain; /* Pointer to generic
|
||||
domain structure */
|
||||
|
||||
};
|
||||
|
||||
/*
|
||||
* For dynamic growth the aperture size is split into ranges of 128MB of
|
||||
* DMA address space each. This struct represents one such range.
|
||||
*/
|
||||
struct aperture_range {
|
||||
|
||||
/* address allocation bitmap */
|
||||
unsigned long *bitmap;
|
||||
|
||||
/*
|
||||
* Array of PTE pages for the aperture. In this array we save all the
|
||||
* leaf pages of the domain page table used for the aperture. This way
|
||||
* we don't need to walk the page table to find a specific PTE. We can
|
||||
* just calculate its address in constant time.
|
||||
*/
|
||||
u64 *pte_pages[64];
|
||||
|
||||
unsigned long offset;
|
||||
};
|
||||
|
||||
/*
|
||||
* Data container for a dma_ops specific protection domain
|
||||
*/
|
||||
struct dma_ops_domain {
|
||||
struct list_head list;
|
||||
|
||||
/* generic protection domain information */
|
||||
struct protection_domain domain;
|
||||
|
||||
/* size of the aperture for the mappings */
|
||||
unsigned long aperture_size;
|
||||
|
||||
/* address we start to search for free addresses */
|
||||
unsigned long next_address;
|
||||
|
||||
/* address space relevant data */
|
||||
struct aperture_range *aperture[APERTURE_MAX_RANGES];
|
||||
|
||||
/* This will be set to true when TLB needs to be flushed */
|
||||
bool need_flush;
|
||||
|
||||
/*
|
||||
* if this is a preallocated domain, keep the device for which it was
|
||||
* preallocated in this variable
|
||||
*/
|
||||
u16 target_dev;
|
||||
};
|
||||
|
||||
/*
|
||||
* Structure where we save information about one hardware AMD IOMMU in the
|
||||
* system.
|
||||
*/
|
||||
struct amd_iommu {
|
||||
struct list_head list;
|
||||
|
||||
/* Index within the IOMMU array */
|
||||
int index;
|
||||
|
||||
/* locks the accesses to the hardware */
|
||||
spinlock_t lock;
|
||||
|
||||
/* Pointer to PCI device of this IOMMU */
|
||||
struct pci_dev *dev;
|
||||
|
||||
/* Cache pdev to root device for resume quirks */
|
||||
struct pci_dev *root_pdev;
|
||||
|
||||
/* physical address of MMIO space */
|
||||
u64 mmio_phys;
|
||||
|
||||
/* physical end address of MMIO space */
|
||||
u64 mmio_phys_end;
|
||||
|
||||
/* virtual address of MMIO space */
|
||||
u8 __iomem *mmio_base;
|
||||
|
||||
/* capabilities of that IOMMU read from ACPI */
|
||||
u32 cap;
|
||||
|
||||
/* flags read from acpi table */
|
||||
u8 acpi_flags;
|
||||
|
||||
/* Extended features */
|
||||
u64 features;
|
||||
|
||||
/* IOMMUv2 */
|
||||
bool is_iommu_v2;
|
||||
|
||||
/* PCI device id of the IOMMU device */
|
||||
u16 devid;
|
||||
|
||||
/*
|
||||
* Capability pointer. There could be more than one IOMMU per PCI
|
||||
* device function if there are more than one AMD IOMMU capability
|
||||
* pointers.
|
||||
*/
|
||||
u16 cap_ptr;
|
||||
|
||||
/* pci domain of this IOMMU */
|
||||
u16 pci_seg;
|
||||
|
||||
/* first device this IOMMU handles. read from PCI */
|
||||
u16 first_device;
|
||||
/* last device this IOMMU handles. read from PCI */
|
||||
u16 last_device;
|
||||
|
||||
/* start of exclusion range of that IOMMU */
|
||||
u64 exclusion_start;
|
||||
/* length of exclusion range of that IOMMU */
|
||||
u64 exclusion_length;
|
||||
|
||||
/* command buffer virtual address */
|
||||
u8 *cmd_buf;
|
||||
/* size of command buffer */
|
||||
u32 cmd_buf_size;
|
||||
|
||||
/* size of event buffer */
|
||||
u32 evt_buf_size;
|
||||
/* event buffer virtual address */
|
||||
u8 *evt_buf;
|
||||
|
||||
/* Base of the PPR log, if present */
|
||||
u8 *ppr_log;
|
||||
|
||||
/* true if interrupts for this IOMMU are already enabled */
|
||||
bool int_enabled;
|
||||
|
||||
/* if one, we need to send a completion wait command */
|
||||
bool need_sync;
|
||||
|
||||
/* default dma_ops domain for that IOMMU */
|
||||
struct dma_ops_domain *default_dom;
|
||||
|
||||
/* IOMMU sysfs device */
|
||||
struct device *iommu_dev;
|
||||
|
||||
/*
|
||||
* We can't rely on the BIOS to restore all values on reinit, so we
|
||||
* need to stash them
|
||||
*/
|
||||
|
||||
/* The iommu BAR */
|
||||
u32 stored_addr_lo;
|
||||
u32 stored_addr_hi;
|
||||
|
||||
/*
|
||||
* Each iommu has 6 l1s, each of which is documented as having 0x12
|
||||
* registers
|
||||
*/
|
||||
u32 stored_l1[6][0x12];
|
||||
|
||||
/* The l2 indirect registers */
|
||||
u32 stored_l2[0x83];
|
||||
|
||||
/* The maximum PC banks and counters/bank (PCSup=1) */
|
||||
u8 max_banks;
|
||||
u8 max_counters;
|
||||
};
|
||||
|
||||
struct devid_map {
|
||||
struct list_head list;
|
||||
u8 id;
|
||||
u16 devid;
|
||||
bool cmd_line;
|
||||
};
|
||||
|
||||
/* Map HPET and IOAPIC ids to the devid used by the IOMMU */
|
||||
extern struct list_head ioapic_map;
|
||||
extern struct list_head hpet_map;
|
||||
|
||||
/*
|
||||
* List with all IOMMUs in the system. This list is not locked because it is
|
||||
* only written and read at driver initialization or suspend time
|
||||
*/
|
||||
extern struct list_head amd_iommu_list;
|
||||
|
||||
/*
|
||||
* Array with pointers to each IOMMU struct
|
||||
* The indices are referenced in the protection domains
|
||||
*/
|
||||
extern struct amd_iommu *amd_iommus[MAX_IOMMUS];
|
||||
|
||||
/* Number of IOMMUs present in the system */
|
||||
extern int amd_iommus_present;
|
||||
|
||||
/*
|
||||
* Declarations for the global list of all protection domains
|
||||
*/
|
||||
extern spinlock_t amd_iommu_pd_lock;
|
||||
extern struct list_head amd_iommu_pd_list;
|
||||
|
||||
/*
|
||||
* Structure defining one entry in the device table
|
||||
*/
|
||||
struct dev_table_entry {
|
||||
u64 data[4];
|
||||
};
|
||||
|
||||
/*
|
||||
* One entry for unity mappings parsed out of the ACPI table.
|
||||
*/
|
||||
struct unity_map_entry {
|
||||
struct list_head list;
|
||||
|
||||
/* starting device id this entry is used for (including) */
|
||||
u16 devid_start;
|
||||
/* end device id this entry is used for (including) */
|
||||
u16 devid_end;
|
||||
|
||||
/* start address to unity map (including) */
|
||||
u64 address_start;
|
||||
/* end address to unity map (including) */
|
||||
u64 address_end;
|
||||
|
||||
/* required protection */
|
||||
int prot;
|
||||
};
|
||||
|
||||
/*
|
||||
* List of all unity mappings. It is not locked because as runtime it is only
|
||||
* read. It is created at ACPI table parsing time.
|
||||
*/
|
||||
extern struct list_head amd_iommu_unity_map;
|
||||
|
||||
/*
|
||||
* Data structures for device handling
|
||||
*/
|
||||
|
||||
/*
|
||||
* Device table used by hardware. Read and write accesses by software are
|
||||
* locked with the amd_iommu_pd_table lock.
|
||||
*/
|
||||
extern struct dev_table_entry *amd_iommu_dev_table;
|
||||
|
||||
/*
|
||||
* Alias table to find requestor ids to device ids. Not locked because only
|
||||
* read on runtime.
|
||||
*/
|
||||
extern u16 *amd_iommu_alias_table;
|
||||
|
||||
/*
|
||||
* Reverse lookup table to find the IOMMU which translates a specific device.
|
||||
*/
|
||||
extern struct amd_iommu **amd_iommu_rlookup_table;
|
||||
|
||||
/* size of the dma_ops aperture as power of 2 */
|
||||
extern unsigned amd_iommu_aperture_order;
|
||||
|
||||
/* largest PCI device id we expect translation requests for */
|
||||
extern u16 amd_iommu_last_bdf;
|
||||
|
||||
/* allocation bitmap for domain ids */
|
||||
extern unsigned long *amd_iommu_pd_alloc_bitmap;
|
||||
|
||||
/*
|
||||
* If true, the addresses will be flushed on unmap time, not when
|
||||
* they are reused
|
||||
*/
|
||||
extern u32 amd_iommu_unmap_flush;
|
||||
|
||||
/* Smallest max PASID supported by any IOMMU in the system */
|
||||
extern u32 amd_iommu_max_pasid;
|
||||
|
||||
extern bool amd_iommu_v2_present;
|
||||
|
||||
extern bool amd_iommu_force_isolation;
|
||||
|
||||
/* Max levels of glxval supported */
|
||||
extern int amd_iommu_max_glx_val;
|
||||
|
||||
/*
|
||||
* This function flushes all internal caches of
|
||||
* the IOMMU used by this driver.
|
||||
*/
|
||||
extern void iommu_flush_all_caches(struct amd_iommu *iommu);
|
||||
|
||||
static inline int get_ioapic_devid(int id)
|
||||
{
|
||||
struct devid_map *entry;
|
||||
|
||||
list_for_each_entry(entry, &ioapic_map, list) {
|
||||
if (entry->id == id)
|
||||
return entry->devid;
|
||||
}
|
||||
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static inline int get_hpet_devid(int id)
|
||||
{
|
||||
struct devid_map *entry;
|
||||
|
||||
list_for_each_entry(entry, &hpet_map, list) {
|
||||
if (entry->id == id)
|
||||
return entry->devid;
|
||||
}
|
||||
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_AMD_IOMMU_STATS
|
||||
|
||||
struct __iommu_counter {
|
||||
char *name;
|
||||
struct dentry *dent;
|
||||
u64 value;
|
||||
};
|
||||
|
||||
#define DECLARE_STATS_COUNTER(nm) \
|
||||
static struct __iommu_counter nm = { \
|
||||
.name = #nm, \
|
||||
}
|
||||
|
||||
#define INC_STATS_COUNTER(name) name.value += 1
|
||||
#define ADD_STATS_COUNTER(name, x) name.value += (x)
|
||||
#define SUB_STATS_COUNTER(name, x) name.value -= (x)
|
||||
|
||||
#else /* CONFIG_AMD_IOMMU_STATS */
|
||||
|
||||
#define DECLARE_STATS_COUNTER(name)
|
||||
#define INC_STATS_COUNTER(name)
|
||||
#define ADD_STATS_COUNTER(name, x)
|
||||
#define SUB_STATS_COUNTER(name, x)
|
||||
|
||||
#endif /* CONFIG_AMD_IOMMU_STATS */
|
||||
|
||||
#endif /* _ASM_X86_AMD_IOMMU_TYPES_H */
|
1007
drivers/iommu/amd_iommu_v2.c
Normal file
1007
drivers/iommu/amd_iommu_v2.c
Normal file
File diff suppressed because it is too large
Load diff
2108
drivers/iommu/arm-smmu.c
Normal file
2108
drivers/iommu/arm-smmu.c
Normal file
File diff suppressed because it is too large
Load diff
1686
drivers/iommu/dmar.c
Normal file
1686
drivers/iommu/dmar.c
Normal file
File diff suppressed because it is too large
Load diff
232
drivers/iommu/exynos-iommu-log.c
Normal file
232
drivers/iommu/exynos-iommu-log.c
Normal file
|
@ -0,0 +1,232 @@
|
|||
/*
|
||||
* Copyright (c) 2014 Samsung Electronics Co., Ltd.
|
||||
* http://www.samsung.com
|
||||
*
|
||||
* Data structure definition for Exynos IOMMU driver
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/debugfs.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
|
||||
#include "exynos-iommu-log.h"
|
||||
|
||||
int exynos_iommu_init_event_log(struct exynos_iommu_event_log *log,
|
||||
unsigned int log_len)
|
||||
{
|
||||
struct page *page;
|
||||
int i, order;
|
||||
size_t fit_size = PAGE_ALIGN(sizeof(*(log->log)) * log_len);
|
||||
int fit_pages = fit_size / PAGE_SIZE;
|
||||
|
||||
/* log_len must be power of 2 */
|
||||
BUG_ON((log_len - 1) & log_len);
|
||||
|
||||
atomic_set(&log->index, 0);
|
||||
order = get_order(fit_size);
|
||||
page = alloc_pages(GFP_KERNEL | __GFP_ZERO, order);
|
||||
if (!page)
|
||||
return -ENOMEM;
|
||||
|
||||
split_page(page, order);
|
||||
|
||||
if ((1 << order) > fit_pages) {
|
||||
int extra = (1 << order) - fit_pages;
|
||||
|
||||
for (i = 0; i < extra; i++)
|
||||
__free_pages(page + fit_pages + i, 0);
|
||||
}
|
||||
|
||||
log->log = page_address(page);
|
||||
log->log_len = log_len;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const char * const sysmmu_event_name[] = {
|
||||
"n/a", /* not an event */
|
||||
"ENABLE",
|
||||
"DISABLE",
|
||||
"TLB_INV_RANGE",
|
||||
"TLB_INV_VPN",
|
||||
"TLB_INV_ALL",
|
||||
"FLPD_FLUSH",
|
||||
"DF",
|
||||
"DF_UNLOCK",
|
||||
"DF_UNLOCK_ALL",
|
||||
"PBLMM",
|
||||
"PBSET",
|
||||
"BLOCK",
|
||||
"UNBLOCK",
|
||||
#ifdef CONFIG_PM_RUNTIME
|
||||
"POWERON",
|
||||
"POWEROFF",
|
||||
#endif
|
||||
"IOMMU_ATTACH",
|
||||
"IOMMU_DETACH",
|
||||
"IOMMU_MAP",
|
||||
"IOMMU_UNMAP",
|
||||
"IOMMU_ALLOCSLPD",
|
||||
"IOMMU_FREESLPD",
|
||||
"IOVMM_MAP",
|
||||
"IOVMM_UNMAP"
|
||||
};
|
||||
|
||||
static void exynos_iommu_debug_log_show(struct seq_file *s,
|
||||
struct sysmmu_event_log *log)
|
||||
{
|
||||
struct timeval tv = ktime_to_timeval(log->timestamp);
|
||||
|
||||
if (log->event == EVENT_SYSMMU_NONE)
|
||||
return;
|
||||
|
||||
seq_printf(s, "%06ld.%06ld: %15s", tv.tv_sec, tv.tv_usec,
|
||||
sysmmu_event_name[log->event]);
|
||||
|
||||
switch (log->event) {
|
||||
case EVENT_SYSMMU_ENABLE:
|
||||
case EVENT_SYSMMU_DISABLE:
|
||||
case EVENT_SYSMMU_TLB_INV_ALL:
|
||||
case EVENT_SYSMMU_DF_UNLOCK_ALL:
|
||||
case EVENT_SYSMMU_BLOCK:
|
||||
case EVENT_SYSMMU_UNBLOCK:
|
||||
#ifdef CONFIG_PM_RUNTIME
|
||||
case EVENT_SYSMMU_POWERON:
|
||||
case EVENT_SYSMMU_POWEROFF:
|
||||
#endif
|
||||
seq_puts(s, "\n");
|
||||
break;
|
||||
case EVENT_SYSMMU_TLB_INV_VPN:
|
||||
case EVENT_SYSMMU_FLPD_FLUSH:
|
||||
case EVENT_SYSMMU_DF:
|
||||
case EVENT_SYSMMU_DF_UNLOCK:
|
||||
case EVENT_SYSMMU_IOMMU_ALLOCSLPD:
|
||||
case EVENT_SYSMMU_IOMMU_FREESLPD:
|
||||
seq_printf(s, " @ %#010x\n", log->eventdata.addr);
|
||||
break;
|
||||
case EVENT_SYSMMU_TLB_INV_RANGE:
|
||||
case EVENT_SYSMMU_IOMMU_UNMAP:
|
||||
case EVENT_SYSMMU_IOVMM_UNMAP:
|
||||
seq_printf(s, " @ [%#010x, %#010x)\n",
|
||||
log->eventdata.range.start,
|
||||
log->eventdata.range.end);
|
||||
break;
|
||||
case EVENT_SYSMMU_PBLMM:
|
||||
seq_printf(s, " -> LMM %u, BUF_NUM %u\n",
|
||||
log->eventdata.pblmm.lmm,
|
||||
log->eventdata.pblmm.buf_num);
|
||||
break;
|
||||
case EVENT_SYSMMU_PBSET:
|
||||
seq_printf(s, " with %#010x, [%#010x, %#010x]\n",
|
||||
log->eventdata.pbset.config,
|
||||
log->eventdata.pbset.start,
|
||||
log->eventdata.pbset.end);
|
||||
break;
|
||||
case EVENT_SYSMMU_IOVMM_MAP:
|
||||
seq_printf(s, " [%#010x, %#010x(+%#x))\n",
|
||||
log->eventdata.iovmm.start,
|
||||
log->eventdata.iovmm.end,
|
||||
log->eventdata.iovmm.dummy);
|
||||
break;
|
||||
case EVENT_SYSMMU_IOMMU_MAP:
|
||||
seq_printf(s, " [%#010x, %#010x) for PFN %#x\n",
|
||||
log->eventdata.iommu.start,
|
||||
log->eventdata.iommu.end,
|
||||
log->eventdata.iommu.pfn);
|
||||
break;
|
||||
case EVENT_SYSMMU_IOMMU_ATTACH:
|
||||
case EVENT_SYSMMU_IOMMU_DETACH:
|
||||
seq_printf(s, " of %s\n", dev_name(log->eventdata.dev));
|
||||
break;
|
||||
default:
|
||||
BUG();
|
||||
}
|
||||
}
|
||||
|
||||
static int exynos_iommu_debugfs_log_show(struct seq_file *s, void *unused)
|
||||
{
|
||||
struct exynos_iommu_event_log *plog = s->private;
|
||||
unsigned int index = atomic_read(&plog->index) % plog->log_len;
|
||||
unsigned int begin = index;
|
||||
|
||||
do {
|
||||
exynos_iommu_debug_log_show(s, &plog->log[index++]);
|
||||
if (index == plog->log_len)
|
||||
index = 0;
|
||||
} while (index != begin);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int exynos_iommu_debugfs_log_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
return single_open(file, exynos_iommu_debugfs_log_show,
|
||||
inode->i_private);
|
||||
}
|
||||
|
||||
#define SYSMMU_DENTRY_LOG_ROOT_NAME "eventlog"
|
||||
static struct dentry *sysmmu_debugfs_log_root;
|
||||
static struct dentry *iommu_debugfs_log_root;
|
||||
|
||||
static const struct file_operations exynos_iommu_debugfs_fops = {
|
||||
.open = exynos_iommu_debugfs_log_open,
|
||||
.read = seq_read,
|
||||
.llseek = seq_lseek,
|
||||
.release = single_release,
|
||||
};
|
||||
|
||||
static void __sysmmu_add_log_to_debugfs(struct dentry *debugfs_root,
|
||||
struct dentry **debugfs_eventlog_root,
|
||||
struct exynos_iommu_event_log *log, const char *name)
|
||||
{
|
||||
if (!debugfs_root)
|
||||
return;
|
||||
|
||||
if (!(*debugfs_eventlog_root)) {
|
||||
*debugfs_eventlog_root = debugfs_create_dir(
|
||||
SYSMMU_DENTRY_LOG_ROOT_NAME, debugfs_root);
|
||||
if (!(*debugfs_eventlog_root)) {
|
||||
pr_err("%s: Failed to create 'eventlog' entry\n",
|
||||
__func__);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
log->debugfs_root = debugfs_create_file(name, 0400,
|
||||
*debugfs_eventlog_root, log,
|
||||
&exynos_iommu_debugfs_fops);
|
||||
if (!log->debugfs_root)
|
||||
pr_err("%s: Failed to create '%s' entry of 'eventlog'\n",
|
||||
__func__, name);
|
||||
}
|
||||
|
||||
void sysmmu_add_log_to_debugfs(struct dentry *debugfs_root,
|
||||
struct exynos_iommu_event_log *log, const char *name)
|
||||
{
|
||||
__sysmmu_add_log_to_debugfs(debugfs_root, &sysmmu_debugfs_log_root,
|
||||
log, name);
|
||||
}
|
||||
|
||||
void iommu_add_log_to_debugfs(struct dentry *debugfs_root,
|
||||
struct exynos_iommu_event_log *log, const char *name)
|
||||
{
|
||||
__sysmmu_add_log_to_debugfs(debugfs_root, &iommu_debugfs_log_root,
|
||||
log, name);
|
||||
}
|
||||
|
||||
#if defined(CONFIG_EXYNOS_IOVMM)
|
||||
static struct dentry *iovmm_debugfs_log_root;
|
||||
|
||||
void iovmm_add_log_to_debugfs(struct dentry *debugfs_root,
|
||||
struct exynos_iommu_event_log *log, const char *name)
|
||||
{
|
||||
__sysmmu_add_log_to_debugfs(debugfs_root, &iovmm_debugfs_log_root,
|
||||
log, name);
|
||||
}
|
||||
#endif
|
328
drivers/iommu/exynos-iommu-log.h
Normal file
328
drivers/iommu/exynos-iommu-log.h
Normal file
|
@ -0,0 +1,328 @@
|
|||
/*
|
||||
* Copyright (c) 2014 Samsung Electronics Co., Ltd.
|
||||
* http://www.samsung.com
|
||||
*
|
||||
* Data structure definition for Exynos IOMMU driver
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#ifndef _EXYNOS_IOMMU_LOG_H_
|
||||
#define _EXYNOS_IOMMU_LOG_H_
|
||||
|
||||
#include <linux/mm.h>
|
||||
#include <linux/mm_types.h>
|
||||
#include <linux/ktime.h>
|
||||
#include <linux/hrtimer.h>
|
||||
#include <linux/gfp.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/device.h>
|
||||
|
||||
enum sysmmu_event_log_event {
|
||||
EVENT_SYSMMU_NONE, /* initialized value */
|
||||
EVENT_SYSMMU_ENABLE,
|
||||
EVENT_SYSMMU_DISABLE,
|
||||
EVENT_SYSMMU_TLB_INV_RANGE,
|
||||
EVENT_SYSMMU_TLB_INV_VPN,
|
||||
EVENT_SYSMMU_TLB_INV_ALL,
|
||||
EVENT_SYSMMU_FLPD_FLUSH,
|
||||
EVENT_SYSMMU_DF,
|
||||
EVENT_SYSMMU_DF_UNLOCK,
|
||||
EVENT_SYSMMU_DF_UNLOCK_ALL,
|
||||
EVENT_SYSMMU_PBLMM,
|
||||
EVENT_SYSMMU_PBSET,
|
||||
EVENT_SYSMMU_BLOCK, /* TODO: consider later */
|
||||
EVENT_SYSMMU_UNBLOCK, /* TODO: consider later */
|
||||
#ifdef CONFIG_PM_RUNTIME
|
||||
EVENT_SYSMMU_POWERON,
|
||||
EVENT_SYSMMU_POWEROFF,
|
||||
#endif
|
||||
EVENT_SYSMMU_IOMMU_ATTACH,
|
||||
EVENT_SYSMMU_IOMMU_DETACH,
|
||||
EVENT_SYSMMU_IOMMU_MAP,
|
||||
EVENT_SYSMMU_IOMMU_UNMAP,
|
||||
EVENT_SYSMMU_IOMMU_ALLOCSLPD,
|
||||
EVENT_SYSMMU_IOMMU_FREESLPD,
|
||||
EVENT_SYSMMU_IOVMM_MAP,
|
||||
EVENT_SYSMMU_IOVMM_UNMAP
|
||||
};
|
||||
|
||||
struct sysmmu_event_range {
|
||||
u32 start;
|
||||
u32 end;
|
||||
};
|
||||
|
||||
struct sysmmu_event_PBLMM {
|
||||
u32 lmm;
|
||||
u32 buf_num;
|
||||
};
|
||||
struct sysmmu_event_PBSET {
|
||||
u32 config;
|
||||
u32 start;
|
||||
u32 end;
|
||||
};
|
||||
|
||||
struct sysmmu_event_IOMMU_MAP {
|
||||
u32 start;
|
||||
u32 end;
|
||||
unsigned int pfn;
|
||||
};
|
||||
|
||||
struct sysmmu_event_IOVMM_MAP {
|
||||
u32 start;
|
||||
u32 end;
|
||||
unsigned int dummy;
|
||||
};
|
||||
|
||||
/**
|
||||
* event must be updated before eventdata because of eventdata.dev
|
||||
* sysmmu_event_log is not protected by any locks. That means it permits
|
||||
* some data inconsistency by race condition between updating and reading.
|
||||
* However the problem arises when event is either IOMMU_ATTACH or
|
||||
* IOMMU_DETACH because they stores a pointer to device descriptor to
|
||||
* eventdata.dev and reading the sysmmu_event_log of those events refers
|
||||
* to values pointed by eventdata.dev.
|
||||
* Therefore, eventdata must be updated before event not to access invalid
|
||||
* pointer by reading debugfs entries.
|
||||
*/
|
||||
struct sysmmu_event_log {
|
||||
ktime_t timestamp;
|
||||
union {
|
||||
struct sysmmu_event_range range;
|
||||
struct sysmmu_event_PBLMM pblmm;
|
||||
struct sysmmu_event_PBSET pbset;
|
||||
struct sysmmu_event_IOMMU_MAP iommu;
|
||||
struct sysmmu_event_IOVMM_MAP iovmm;
|
||||
u32 addr;
|
||||
struct device *dev;
|
||||
} eventdata;
|
||||
enum sysmmu_event_log_event event;
|
||||
};
|
||||
|
||||
struct exynos_iommu_event_log {
|
||||
atomic_t index;
|
||||
unsigned int log_len;
|
||||
struct sysmmu_event_log *log;
|
||||
struct dentry *debugfs_root;
|
||||
};
|
||||
|
||||
/* sizeof(struct sysmmu_event_log) = 8 + 4 * 3 + 4 = 24 bytes */
|
||||
#define SYSMMU_LOG_LEN 1024
|
||||
#define IOMMU_LOG_LEN 4096
|
||||
#define IOVMM_LOG_LEN 512
|
||||
|
||||
#ifdef CONFIG_EXYNOS_IOMMU_EVENT_LOG
|
||||
|
||||
#define SYSMMU_DRVDATA_TO_LOG(data) (&(data)->log)
|
||||
#define IOMMU_PRIV_TO_LOG(data) (&(data)->log)
|
||||
#define IOMMU_TO_LOG(data) (&((struct exynos_iommu_domain *)(data)->priv)->log)
|
||||
#define IOVMM_TO_LOG(data) (&(data)->log)
|
||||
|
||||
static inline struct sysmmu_event_log *sysmmu_event_log_get(
|
||||
struct exynos_iommu_event_log *plog)
|
||||
{
|
||||
struct sysmmu_event_log *log;
|
||||
unsigned int index =
|
||||
(unsigned int)atomic_inc_return(&plog->index) - 1;
|
||||
log = &plog->log[index % plog->log_len];
|
||||
log->timestamp = ktime_get();
|
||||
return log;
|
||||
}
|
||||
|
||||
#define DEFINE_SYSMMU_EVENT_LOG(evt) \
|
||||
static inline void SYSMMU_EVENT_LOG_##evt(struct exynos_iommu_event_log *plog) \
|
||||
{ \
|
||||
struct sysmmu_event_log *log = sysmmu_event_log_get(plog); \
|
||||
log->event = EVENT_SYSMMU_##evt; \
|
||||
}
|
||||
|
||||
#define DEFINE_SYSMMU_EVENT_LOG_1ADDR(evt) \
|
||||
static inline void SYSMMU_EVENT_LOG_##evt( \
|
||||
struct exynos_iommu_event_log *plog, u32 addr) \
|
||||
{ \
|
||||
struct sysmmu_event_log *log = sysmmu_event_log_get(plog); \
|
||||
log->eventdata.addr = addr; \
|
||||
log->event = EVENT_SYSMMU_##evt; \
|
||||
}
|
||||
|
||||
#define DEFINE_SYSMMU_EVENT_LOG_2ADDR(evt) \
|
||||
static inline void SYSMMU_EVENT_LOG_##evt(struct exynos_iommu_event_log *plog, \
|
||||
u32 start, u32 end) \
|
||||
{ \
|
||||
struct sysmmu_event_log *log = sysmmu_event_log_get(plog); \
|
||||
log->eventdata.range.start = start; \
|
||||
log->eventdata.range.end = end; \
|
||||
log->event = EVENT_SYSMMU_##evt; \
|
||||
}
|
||||
|
||||
/* MMU_CFG is stored at pblmm.lmm for System MMU 3.1 and 3.2 */
|
||||
static inline void SYSMMU_EVENT_LOG_PBLMM(struct exynos_iommu_event_log *plog,
|
||||
u32 lmm, u32 buf_num)
|
||||
{
|
||||
struct sysmmu_event_log *log = sysmmu_event_log_get(plog);
|
||||
log->eventdata.pblmm.lmm = lmm;
|
||||
log->eventdata.pblmm.buf_num = buf_num;
|
||||
log->event = EVENT_SYSMMU_PBLMM;
|
||||
}
|
||||
|
||||
/* PB index is stored at pbset.config for System MMU 3.1 and 3.2 */
|
||||
static inline void SYSMMU_EVENT_LOG_PBSET(struct exynos_iommu_event_log *plog,
|
||||
u32 config, u32 start, u32 end)
|
||||
{
|
||||
struct sysmmu_event_log *log = sysmmu_event_log_get(plog);
|
||||
log->eventdata.pbset.config = config;
|
||||
log->eventdata.pbset.start = start;
|
||||
log->eventdata.pbset.end = end;
|
||||
log->event = EVENT_SYSMMU_PBSET;
|
||||
}
|
||||
|
||||
static inline void SYSMMU_EVENT_LOG_IOVMM_MAP(
|
||||
struct exynos_iommu_event_log *plog,
|
||||
u32 start, u32 end, unsigned int dummy)
|
||||
{
|
||||
struct sysmmu_event_log *log = sysmmu_event_log_get(plog);
|
||||
log->eventdata.iovmm.start = start;
|
||||
log->eventdata.iovmm.end = end;
|
||||
log->eventdata.iovmm.dummy = dummy;
|
||||
log->event = EVENT_SYSMMU_IOVMM_MAP;
|
||||
}
|
||||
|
||||
static inline void SYSMMU_EVENT_LOG_IOMMU_ATTACH(
|
||||
struct exynos_iommu_event_log *plog, struct device *dev)
|
||||
{
|
||||
struct sysmmu_event_log *log = sysmmu_event_log_get(plog);
|
||||
log->eventdata.dev = dev;
|
||||
log->event = EVENT_SYSMMU_IOMMU_ATTACH;
|
||||
}
|
||||
|
||||
static inline void SYSMMU_EVENT_LOG_IOMMU_DETACH(
|
||||
struct exynos_iommu_event_log *plog, struct device *dev)
|
||||
{
|
||||
struct sysmmu_event_log *log = sysmmu_event_log_get(plog);
|
||||
log->eventdata.dev = dev;
|
||||
log->event = EVENT_SYSMMU_IOMMU_DETACH;
|
||||
}
|
||||
|
||||
static inline void SYSMMU_EVENT_LOG_IOMMU_MAP(
|
||||
struct exynos_iommu_event_log *plog,
|
||||
u32 start, u32 end, unsigned int pfn)
|
||||
{
|
||||
struct sysmmu_event_log *log = sysmmu_event_log_get(plog);
|
||||
log->event = EVENT_SYSMMU_IOMMU_MAP;
|
||||
log->eventdata.iommu.start = start;
|
||||
log->eventdata.iommu.end = end;
|
||||
log->eventdata.iommu.pfn = pfn;
|
||||
}
|
||||
|
||||
int exynos_iommu_init_event_log(struct exynos_iommu_event_log *log,
|
||||
unsigned int log_len);
|
||||
|
||||
void sysmmu_add_log_to_debugfs(struct dentry *debugfs_root,
|
||||
struct exynos_iommu_event_log *log, const char *name);
|
||||
|
||||
void iommu_add_log_to_debugfs(struct dentry *debugfs_root,
|
||||
struct exynos_iommu_event_log *log, const char *name);
|
||||
|
||||
#if defined(CONFIG_EXYNOS_IOVMM)
|
||||
void iovmm_add_log_to_debugfs(struct dentry *debugfs_root,
|
||||
struct exynos_iommu_event_log *log, const char *name);
|
||||
#else
|
||||
#define iovmm_add_log_to_debugfs(debugfs_root, log, name) do { } while (0)
|
||||
#endif
|
||||
|
||||
#else /* !CONFIG_EXYNOS_IOMMU_EVENT_LOG */
|
||||
|
||||
#define SYSMMU_DRVDATA_TO_LOG(data) NULL
|
||||
#define IOMMU_PRIV_TO_LOG(data) NULL
|
||||
#define IOMMU_TO_LOG(data) NULL
|
||||
#define IOVMM_TO_LOG(data) NULL
|
||||
|
||||
static inline int exynos_iommu_init_event_log(
|
||||
struct exynos_iommu_event_log *log, unsigned int log_len)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
#define iovmm_add_log_to_debugfs(debugfs_root, log, name) do { } while (0)
|
||||
|
||||
#define DEFINE_SYSMMU_EVENT_LOG(event) \
|
||||
static inline void SYSMMU_EVENT_LOG_##event( \
|
||||
struct exynos_iommu_event_log *plog) \
|
||||
{ \
|
||||
}
|
||||
|
||||
#define DEFINE_SYSMMU_EVENT_LOG_1ADDR(event) \
|
||||
static inline void SYSMMU_EVENT_LOG_##event( \
|
||||
struct exynos_iommu_event_log *plog, u32 start) \
|
||||
{ \
|
||||
}
|
||||
|
||||
#define DEFINE_SYSMMU_EVENT_LOG_2ADDR(event) \
|
||||
static inline void SYSMMU_EVENT_LOG_##event( \
|
||||
struct exynos_iommu_event_log *plog, u32 start, u32 end) \
|
||||
{ \
|
||||
}
|
||||
|
||||
static inline void SYSMMU_EVENT_LOG_PBLMM(struct exynos_iommu_event_log *plog,
|
||||
u32 lmm, u32 buf_num)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void SYSMMU_EVENT_LOG_PBSET(struct exynos_iommu_event_log *plog,
|
||||
u32 config, u32 start, u32 end)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void SYSMMU_EVENT_LOG_IOMMU_MAP(
|
||||
struct exynos_iommu_event_log *plog,
|
||||
u32 start, u32 end, unsigned int pfn)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void SYSMMU_EVENT_LOG_IOVMM_MAP(
|
||||
struct exynos_iommu_event_log *plog,
|
||||
u32 start, u32 end, size_t dummy)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void SYSMMU_EVENT_LOG_IOMMU_ATTACH(
|
||||
struct exynos_iommu_event_log *plog, struct device *dev)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void SYSMMU_EVENT_LOG_IOMMU_DETACH(
|
||||
struct exynos_iommu_event_log *plog, struct device *dev)
|
||||
{
|
||||
}
|
||||
|
||||
#define sysmmu_add_log_to_debugfs(debugfs_root, log, name) do { } while (0)
|
||||
#define iommu_add_log_to_debugfs(debugfs_root, log, name) do { } while (0)
|
||||
#define iovmm_add_log_to_debugfs(debugfs_root, log, name) do { } while (0)
|
||||
|
||||
#endif /* CONFIG_EXYNOS_IOMMU_EVENT_LOG */
|
||||
|
||||
DEFINE_SYSMMU_EVENT_LOG(ENABLE)
|
||||
DEFINE_SYSMMU_EVENT_LOG(DISABLE)
|
||||
DEFINE_SYSMMU_EVENT_LOG(TLB_INV_ALL)
|
||||
DEFINE_SYSMMU_EVENT_LOG(DF_UNLOCK_ALL)
|
||||
DEFINE_SYSMMU_EVENT_LOG(BLOCK)
|
||||
DEFINE_SYSMMU_EVENT_LOG(UNBLOCK)
|
||||
#ifdef CONFIG_PM_RUNTIME
|
||||
DEFINE_SYSMMU_EVENT_LOG(POWERON)
|
||||
DEFINE_SYSMMU_EVENT_LOG(POWEROFF)
|
||||
#endif
|
||||
|
||||
DEFINE_SYSMMU_EVENT_LOG_1ADDR(TLB_INV_VPN)
|
||||
DEFINE_SYSMMU_EVENT_LOG_1ADDR(FLPD_FLUSH)
|
||||
DEFINE_SYSMMU_EVENT_LOG_1ADDR(DF)
|
||||
DEFINE_SYSMMU_EVENT_LOG_1ADDR(DF_UNLOCK)
|
||||
DEFINE_SYSMMU_EVENT_LOG_1ADDR(IOMMU_ALLOCSLPD)
|
||||
DEFINE_SYSMMU_EVENT_LOG_1ADDR(IOMMU_FREESLPD)
|
||||
|
||||
DEFINE_SYSMMU_EVENT_LOG_2ADDR(TLB_INV_RANGE)
|
||||
DEFINE_SYSMMU_EVENT_LOG_2ADDR(IOMMU_UNMAP)
|
||||
DEFINE_SYSMMU_EVENT_LOG_2ADDR(IOVMM_UNMAP)
|
||||
|
||||
#endif /*_EXYNOS_IOMMU_LOG_H_*/
|
3594
drivers/iommu/exynos-iommu.c
Normal file
3594
drivers/iommu/exynos-iommu.c
Normal file
File diff suppressed because it is too large
Load diff
480
drivers/iommu/exynos-iommu.h
Normal file
480
drivers/iommu/exynos-iommu.h
Normal file
|
@ -0,0 +1,480 @@
|
|||
/*
|
||||
* Copyright (c) 2012 Samsung Electronics Co., Ltd.
|
||||
* http://www.samsung.com
|
||||
*
|
||||
* Data structure definition for Exynos IOMMU driver
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
#ifndef _EXYNOS_IOMMU_H_
|
||||
#define _EXYNOS_IOMMU_H_
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/genalloc.h>
|
||||
#include <linux/iommu.h>
|
||||
#include <linux/irq.h>
|
||||
#include <linux/clk.h>
|
||||
|
||||
#include <linux/exynos_iovmm.h>
|
||||
|
||||
#include "exynos-iommu-log.h"
|
||||
|
||||
#define TRACE_LOG(...) do { } while (0) /* trace_printk */
|
||||
#define TRACE_LOG_DEV(dev, fmt, args...) \
|
||||
TRACE_LOG("%s: " fmt, dev_name(dev), ##args)
|
||||
|
||||
#define MODULE_NAME "exynos-sysmmu"
|
||||
|
||||
#define IOVA_START 0x10000000
|
||||
#define IOVM_SIZE (SZ_2G + SZ_1G + SZ_256M) /* last 4K is for error values */
|
||||
|
||||
#define IOVM_NUM_PAGES(vmsize) (vmsize / PAGE_SIZE)
|
||||
#define IOVM_BITMAP_SIZE(vmsize) \
|
||||
((IOVM_NUM_PAGES(vmsize) + BITS_PER_BYTE) / BITS_PER_BYTE)
|
||||
|
||||
#define SPSECT_ORDER 24
|
||||
#define DSECT_ORDER 21
|
||||
#define SECT_ORDER 20
|
||||
#define LPAGE_ORDER 16
|
||||
#define SPAGE_ORDER 12
|
||||
|
||||
#define SPSECT_SIZE (1 << SPSECT_ORDER)
|
||||
#define DSECT_SIZE (1 << DSECT_ORDER)
|
||||
#define SECT_SIZE (1 << SECT_ORDER)
|
||||
#define LPAGE_SIZE (1 << LPAGE_ORDER)
|
||||
#define SPAGE_SIZE (1 << SPAGE_ORDER)
|
||||
|
||||
#define SPSECT_MASK ~(SPSECT_SIZE - 1)
|
||||
#define DSECT_MASK ~(DSECT_SIZE - 1)
|
||||
#define SECT_MASK ~(SECT_SIZE - 1)
|
||||
#define LPAGE_MASK ~(LPAGE_SIZE - 1)
|
||||
#define SPAGE_MASK ~(SPAGE_SIZE - 1)
|
||||
|
||||
#define SPSECT_ENT_MASK ~((SPSECT_SIZE >> PG_ENT_SHIFT) - 1)
|
||||
#define DSECT_ENT_MASK ~((DSECT_SIZE >> PG_ENT_SHIFT) - 1)
|
||||
#define SECT_ENT_MASK ~((SECT_SIZE >> PG_ENT_SHIFT) - 1)
|
||||
#define LPAGE_ENT_MASK ~((LPAGE_SIZE >> PG_ENT_SHIFT) - 1)
|
||||
#define SPAGE_ENT_MASK ~((SPAGE_SIZE >> PG_ENT_SHIFT) - 1)
|
||||
|
||||
#define SECT_PER_SPSECT (SPSECT_SIZE / SECT_SIZE)
|
||||
#define SECT_PER_DSECT (DSECT_SIZE / SECT_SIZE)
|
||||
#define SPAGES_PER_LPAGE (LPAGE_SIZE / SPAGE_SIZE)
|
||||
|
||||
#define PGBASE_TO_PHYS(pgent) ((phys_addr_t)(pgent) << PG_ENT_SHIFT)
|
||||
|
||||
#define MAX_NUM_PBUF 6
|
||||
#define MAX_NUM_PLANE 6
|
||||
|
||||
#define NUM_LV1ENTRIES 4096
|
||||
#define NUM_LV2ENTRIES (SECT_SIZE / SPAGE_SIZE)
|
||||
|
||||
#define lv1ent_offset(iova) ((iova) >> SECT_ORDER)
|
||||
#define lv2ent_offset(iova) ((iova & ~SECT_MASK) >> SPAGE_ORDER)
|
||||
|
||||
typedef u32 sysmmu_pte_t;
|
||||
|
||||
#define LV2TABLE_SIZE (NUM_LV2ENTRIES * sizeof(sysmmu_pte_t))
|
||||
|
||||
#define ENT_TO_PHYS(ent) (phys_addr_t)(*(ent))
|
||||
#define spsection_phys(sent) PGBASE_TO_PHYS(ENT_TO_PHYS(sent) & SPSECT_ENT_MASK)
|
||||
#define spsection_offs(iova) ((iova) & (SPSECT_SIZE - 1))
|
||||
#define section_phys(sent) PGBASE_TO_PHYS(ENT_TO_PHYS(sent) & SECT_ENT_MASK)
|
||||
#define section_offs(iova) ((iova) & (SECT_SIZE - 1))
|
||||
#define lpage_phys(pent) PGBASE_TO_PHYS(ENT_TO_PHYS(pent) & LPAGE_ENT_MASK)
|
||||
#define lpage_offs(iova) ((iova) & (LPAGE_SIZE - 1))
|
||||
#define spage_phys(pent) PGBASE_TO_PHYS(ENT_TO_PHYS(pent) & SPAGE_ENT_MASK)
|
||||
#define spage_offs(iova) ((iova) & (SPAGE_SIZE - 1))
|
||||
|
||||
#define lv2table_base(sent) ((phys_addr_t)(*(sent) & ~0x3F) << PG_ENT_SHIFT)
|
||||
|
||||
#define SYSMMU_BLOCK_POLLING_COUNT 4096
|
||||
|
||||
#define REG_MMU_CTRL 0x000
|
||||
#define REG_MMU_CFG 0x004
|
||||
#define REG_MMU_STATUS 0x008
|
||||
#define REG_MMU_VERSION 0x034
|
||||
|
||||
#define CTRL_ENABLE 0x5
|
||||
#define CTRL_BLOCK 0x7
|
||||
#define CTRL_DISABLE 0x0
|
||||
#define CTRL_BLOCK_DISABLE 0x3
|
||||
|
||||
#define CFG_ACGEN (1 << 24) /* System MMU 3.3+ */
|
||||
#define CFG_FLPDCACHE (1 << 20) /* System MMU 3.2+ */
|
||||
#define CFG_SHAREABLE (1 << 12) /* System MMU 3.0+ */
|
||||
#define CFG_QOS_OVRRIDE (1 << 11) /* System MMU 3.3+ */
|
||||
#define CFG_QOS(n) (((n) & 0xF) << 7)
|
||||
|
||||
/*
|
||||
* Metadata attached to the owner of a group of System MMUs that belong
|
||||
* to the same owner device.
|
||||
*/
|
||||
struct exynos_iommu_owner {
|
||||
struct list_head client; /* entry of exynos_iommu_domain.clients */
|
||||
struct device *dev;
|
||||
struct exynos_iommu_owner *next; /* linked list of Owners */
|
||||
void *vmm_data; /* IO virtual memory manager's data */
|
||||
spinlock_t lock; /* Lock to preserve consistency of System MMU */
|
||||
struct list_head mmu_list; /* head of sysmmu_list_data.node */
|
||||
struct notifier_block nb;
|
||||
iommu_fault_handler_t fault_handler;
|
||||
void *token;
|
||||
};
|
||||
|
||||
struct exynos_vm_region {
|
||||
struct list_head node;
|
||||
u32 start;
|
||||
u32 size;
|
||||
u32 section_off;
|
||||
u32 dummy_size;
|
||||
};
|
||||
|
||||
struct exynos_iovmm {
|
||||
struct iommu_domain *domain; /* iommu domain for this iovmm */
|
||||
size_t iovm_size[MAX_NUM_PLANE]; /* iovm bitmap size per plane */
|
||||
u32 iova_start[MAX_NUM_PLANE]; /* iovm start address per plane */
|
||||
unsigned long *vm_map[MAX_NUM_PLANE]; /* iovm biatmap per plane */
|
||||
struct list_head regions_list; /* list of exynos_vm_region */
|
||||
spinlock_t vmlist_lock; /* lock for updating regions_list */
|
||||
spinlock_t bitmap_lock; /* lock for manipulating bitmaps */
|
||||
struct device *dev; /* peripheral device that has this iovmm */
|
||||
size_t allocated_size[MAX_NUM_PLANE];
|
||||
int num_areas[MAX_NUM_PLANE];
|
||||
int inplanes;
|
||||
int onplanes;
|
||||
unsigned int num_map;
|
||||
unsigned int num_unmap;
|
||||
const char *domain_name;
|
||||
#ifdef CONFIG_EXYNOS_IOMMU_EVENT_LOG
|
||||
struct exynos_iommu_event_log log;
|
||||
#endif
|
||||
};
|
||||
|
||||
void exynos_sysmmu_tlb_invalidate(struct iommu_domain *domain, dma_addr_t start,
|
||||
size_t size);
|
||||
|
||||
#define SYSMMU_FAULT_WRITE (1 << SYSMMU_FAULTS_NUM)
|
||||
|
||||
enum sysmmu_property {
|
||||
SYSMMU_PROP_RESERVED,
|
||||
SYSMMU_PROP_READ,
|
||||
SYSMMU_PROP_WRITE,
|
||||
SYSMMU_PROP_READWRITE = SYSMMU_PROP_READ | SYSMMU_PROP_WRITE,
|
||||
SYSMMU_PROP_RW_MASK = SYSMMU_PROP_READWRITE,
|
||||
SYSMMU_PROP_NONBLOCK_TLBINV = 0x10,
|
||||
SYSMMU_PROP_STOP_BLOCK = 0x20,
|
||||
SYSMMU_PROP_DISABLE_ACG = 0x40,
|
||||
SYSMMU_PROP_WINDOW_SHIFT = 16,
|
||||
SYSMMU_PROP_WINDOW_MASK = 0x1F << SYSMMU_PROP_WINDOW_SHIFT,
|
||||
};
|
||||
|
||||
enum sysmmu_clock_ids {
|
||||
SYSMMU_ACLK,
|
||||
SYSMMU_PCLK,
|
||||
SYSMMU_CLK_NUM,
|
||||
};
|
||||
|
||||
/*
|
||||
* Metadata attached to each System MMU devices.
|
||||
*/
|
||||
struct sysmmu_drvdata {
|
||||
struct list_head node; /* entry of exynos_iommu_owner.mmu_list */
|
||||
struct list_head pb_grp_list; /* list of pb groups */
|
||||
struct sysmmu_drvdata *next; /* linked list of System MMU */
|
||||
struct device *sysmmu; /* System MMU's device descriptor */
|
||||
struct device *master; /* Client device that needs System MMU */
|
||||
void __iomem *sfrbase;
|
||||
struct clk *clocks[SYSMMU_CLK_NUM];
|
||||
int activations;
|
||||
struct iommu_domain *domain; /* domain given to iommu_attach_device() */
|
||||
phys_addr_t pgtable;
|
||||
spinlock_t lock;
|
||||
struct sysmmu_prefbuf pbufs[MAX_NUM_PBUF];
|
||||
short qos;
|
||||
int runtime_active;
|
||||
enum sysmmu_property prop; /* mach/sysmmu.h */
|
||||
#ifdef CONFIG_EXYNOS_IOMMU_EVENT_LOG
|
||||
struct exynos_iommu_event_log log;
|
||||
#endif
|
||||
struct atomic_notifier_head fault_notifiers;
|
||||
unsigned char event_cnt;
|
||||
struct _tlbprops {
|
||||
u32 axid;
|
||||
u32 attr;
|
||||
} *tlbprops;
|
||||
u32 props_num;
|
||||
u32 hw_ver;
|
||||
u32 securebase;
|
||||
bool is_suspended;
|
||||
};
|
||||
|
||||
struct exynos_iommu_domain {
|
||||
struct list_head clients; /* list of sysmmu_drvdata.node */
|
||||
sysmmu_pte_t *pgtable; /* lv1 page table, 16KB */
|
||||
atomic_t *lv2entcnt; /* free lv2 entry counter for each section */
|
||||
spinlock_t lock; /* lock for this structure */
|
||||
spinlock_t pgtablelock; /* lock for modifying page table @ pgtable */
|
||||
#ifdef CONFIG_EXYNOS_IOMMU_EVENT_LOG
|
||||
struct exynos_iommu_event_log log;
|
||||
#endif
|
||||
};
|
||||
|
||||
struct pb_info {
|
||||
struct list_head node;
|
||||
int ar_id_num;
|
||||
int aw_id_num;
|
||||
int grp_num;
|
||||
int ar_axi_id[MAX_NUM_PBUF];
|
||||
int aw_axi_id[MAX_NUM_PBUF];
|
||||
struct device *master;
|
||||
enum sysmmu_property prop;
|
||||
};
|
||||
|
||||
int sysmmu_set_ppc_event(struct sysmmu_drvdata *drvdata, int event);
|
||||
void dump_sysmmu_ppc_cnt(struct sysmmu_drvdata *drvdata);
|
||||
extern const char *ppc_event_name[];
|
||||
|
||||
#define REG_PPC_EVENT_SEL(x) (0x600 + 0x4 * (x))
|
||||
#define REG_PPC_PMNC 0x620
|
||||
#define REG_PPC_CNTENS 0x624
|
||||
#define REG_PPC_CNTENC 0x628
|
||||
#define REG_PPC_INTENS 0x62C
|
||||
#define REG_PPC_INTENC 0x630
|
||||
#define REG_PPC_FLAG 0x634
|
||||
#define REG_PPC_CCNT 0x640
|
||||
#define REG_PPC_PMCNT(x) (0x644 + 0x4 * (x))
|
||||
|
||||
#define SYSMMU_OF_COMPAT_STRING "samsung,exynos5430-sysmmu"
|
||||
#define DEFAULT_QOS_VALUE -1 /* Inherited from master */
|
||||
#define PG_ENT_SHIFT 4 /* 36bit PA, 32bit VA */
|
||||
#define lv1ent_fault(sent) ((*(sent) & 7) == 0)
|
||||
#define lv1ent_page(sent) ((*(sent) & 7) == 1)
|
||||
|
||||
#define FLPD_FLAG_MASK 7
|
||||
#define SLPD_FLAG_MASK 3
|
||||
|
||||
#define SPSECT_FLAG 6
|
||||
#define DSECT_FLAG 4
|
||||
#define SECT_FLAG 2
|
||||
#define SLPD_FLAG 1
|
||||
|
||||
#define LPAGE_FLAG 1
|
||||
#define SPAGE_FLAG 2
|
||||
|
||||
#define lv1ent_section(sent) ((*(sent) & FLPD_FLAG_MASK) == SECT_FLAG)
|
||||
#define lv1ent_dsection(sent) ((*(sent) & FLPD_FLAG_MASK) == DSECT_FLAG)
|
||||
#define lv1ent_spsection(sent) ((*(sent) & FLPD_FLAG_MASK) == SPSECT_FLAG)
|
||||
#define lv2ent_fault(pent) ((*(pent) & SLPD_FLAG_MASK) == 0 || \
|
||||
(PGBASE_TO_PHYS(*(pent) & SPAGE_ENT_MASK) == fault_page))
|
||||
#define lv2ent_small(pent) ((*(pent) & SLPD_FLAG_MASK) == SPAGE_FLAG)
|
||||
#define lv2ent_large(pent) ((*(pent) & SLPD_FLAG_MASK) == LPAGE_FLAG)
|
||||
#define dsection_phys(sent) PGBASE_TO_PHYS(*(sent) & DSECT_ENT_MASK)
|
||||
#define dsection_offs(iova) ((iova) & (DSECT_SIZE - 1))
|
||||
#define mk_lv1ent_spsect(pa) ((sysmmu_pte_t) ((pa) >> PG_ENT_SHIFT) | 6)
|
||||
#define mk_lv1ent_dsect(pa) ((sysmmu_pte_t) ((pa) >> PG_ENT_SHIFT) | 4)
|
||||
#define mk_lv1ent_sect(pa) ((sysmmu_pte_t) ((pa) >> PG_ENT_SHIFT) | 2)
|
||||
#define mk_lv1ent_page(pa) ((sysmmu_pte_t) ((pa) >> PG_ENT_SHIFT) | 1)
|
||||
#define mk_lv2ent_lpage(pa) ((sysmmu_pte_t) ((pa) >> PG_ENT_SHIFT) | 1)
|
||||
#define mk_lv2ent_spage(pa) ((sysmmu_pte_t) ((pa) >> PG_ENT_SHIFT) | 2)
|
||||
#define set_lv1ent_shareable(sent) (*(sent) |= (1 << 6))
|
||||
#define set_lv2ent_shareable(pent) (*(pent) |= (1 << 4))
|
||||
|
||||
#define mk_lv2ent_pfnmap(pent) (*(pent) |= (1 << 5)) /* unused field */
|
||||
#define lv2ent_pfnmap(pent) ((*(pent) & (1 << 5)) == (1 << 5))
|
||||
|
||||
#define PGSIZE_BITMAP (DSECT_SIZE | SECT_SIZE | LPAGE_SIZE | SPAGE_SIZE)
|
||||
|
||||
void __sysmmu_show_status(struct sysmmu_drvdata *drvdata);
|
||||
|
||||
static inline void __sysmmu_clk_enable(struct sysmmu_drvdata *data)
|
||||
{
|
||||
if (!IS_ERR(data->clocks[SYSMMU_ACLK]))
|
||||
clk_enable(data->clocks[SYSMMU_ACLK]);
|
||||
|
||||
if (!IS_ERR(data->clocks[SYSMMU_PCLK]))
|
||||
clk_enable(data->clocks[SYSMMU_PCLK]);
|
||||
}
|
||||
|
||||
static inline void __sysmmu_clk_disable(struct sysmmu_drvdata *data)
|
||||
{
|
||||
if (!IS_ERR(data->clocks[SYSMMU_ACLK]))
|
||||
clk_disable(data->clocks[SYSMMU_ACLK]);
|
||||
|
||||
if (!IS_ERR(data->clocks[SYSMMU_PCLK]))
|
||||
clk_disable(data->clocks[SYSMMU_PCLK]);
|
||||
}
|
||||
|
||||
static inline bool get_sysmmu_runtime_active(struct sysmmu_drvdata *data)
|
||||
{
|
||||
return ++data->runtime_active == 1;
|
||||
}
|
||||
|
||||
static inline bool put_sysmmu_runtime_active(struct sysmmu_drvdata *data)
|
||||
{
|
||||
BUG_ON(data->runtime_active < 1);
|
||||
return --data->runtime_active == 0;
|
||||
}
|
||||
|
||||
static inline bool is_sysmmu_runtime_active(struct sysmmu_drvdata *data)
|
||||
{
|
||||
return data->runtime_active > 0;
|
||||
}
|
||||
|
||||
static inline bool set_sysmmu_active(struct sysmmu_drvdata *data)
|
||||
{
|
||||
/* return true if the System MMU was not active previously
|
||||
and it needs to be initialized */
|
||||
return ++data->activations == 1;
|
||||
}
|
||||
|
||||
static inline bool set_sysmmu_inactive(struct sysmmu_drvdata *data)
|
||||
{
|
||||
/* return true if the System MMU is needed to be disabled */
|
||||
BUG_ON(data->activations < 1);
|
||||
return --data->activations == 0;
|
||||
}
|
||||
|
||||
static inline bool is_sysmmu_active(struct sysmmu_drvdata *data)
|
||||
{
|
||||
return !data->is_suspended && data->activations > 0;
|
||||
}
|
||||
|
||||
static inline bool is_sysmmu_really_enabled(struct sysmmu_drvdata *data)
|
||||
{
|
||||
return is_sysmmu_active(data) && data->runtime_active;
|
||||
}
|
||||
|
||||
#define MMU_MAJ_VER(val) ((val) >> 11)
|
||||
#define MMU_MIN_VER(val) ((val >> 4) & 0x7F)
|
||||
#define MMU_REV_VER(val) ((val) & 0xF)
|
||||
#define MMU_RAW_VER(reg) (((reg) >> 17) & 0x7FFF) /* upper 15 bits */
|
||||
|
||||
#define MAKE_MMU_VER(maj, min) ((((maj) & 0xF) << 11) | \
|
||||
(((min) & 0x7F) << 4))
|
||||
|
||||
static inline unsigned int __raw_sysmmu_version(void __iomem *sfrbase)
|
||||
{
|
||||
return MMU_RAW_VER(__raw_readl(sfrbase + REG_MMU_VERSION));
|
||||
}
|
||||
|
||||
static inline void __raw_sysmmu_disable(void __iomem *sfrbase, int disable)
|
||||
{
|
||||
__raw_writel(0, sfrbase + REG_MMU_CFG);
|
||||
__raw_writel(disable, sfrbase + REG_MMU_CTRL);
|
||||
BUG_ON(__raw_readl(sfrbase + REG_MMU_CTRL) != disable);
|
||||
}
|
||||
|
||||
static inline void __raw_sysmmu_enable(void __iomem *sfrbase)
|
||||
{
|
||||
__raw_writel(CTRL_ENABLE, sfrbase + REG_MMU_CTRL);
|
||||
}
|
||||
|
||||
#define sysmmu_unblock __raw_sysmmu_enable
|
||||
|
||||
void dump_sysmmu_tlb_pb(void __iomem *sfrbase);
|
||||
|
||||
static inline bool sysmmu_block(void __iomem *sfrbase)
|
||||
{
|
||||
int i = SYSMMU_BLOCK_POLLING_COUNT;
|
||||
|
||||
__raw_writel(CTRL_BLOCK, sfrbase + REG_MMU_CTRL);
|
||||
while ((i > 0) && !(__raw_readl(sfrbase + REG_MMU_STATUS) & 1))
|
||||
--i;
|
||||
|
||||
if (!(__raw_readl(sfrbase + REG_MMU_STATUS) & 1)) {
|
||||
dump_sysmmu_tlb_pb(sfrbase);
|
||||
panic("Failed to block System MMU!");
|
||||
sysmmu_unblock(sfrbase);
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void __sysmmu_init_config(struct sysmmu_drvdata *drvdata);
|
||||
void __sysmmu_set_ptbase(void __iomem *sfrbase, phys_addr_t pfn_pgtable);
|
||||
|
||||
extern sysmmu_pte_t *zero_lv2_table;
|
||||
|
||||
static inline sysmmu_pte_t *page_entry(sysmmu_pte_t *sent, unsigned long iova)
|
||||
{
|
||||
return (sysmmu_pte_t *)(phys_to_virt(lv2table_base(sent))) +
|
||||
lv2ent_offset(iova);
|
||||
}
|
||||
|
||||
static inline sysmmu_pte_t *section_entry(
|
||||
sysmmu_pte_t *pgtable, unsigned long iova)
|
||||
{
|
||||
return (sysmmu_pte_t *)(pgtable + lv1ent_offset(iova));
|
||||
}
|
||||
|
||||
irqreturn_t exynos_sysmmu_irq(int irq, void *dev_id);
|
||||
void __sysmmu_tlb_invalidate_entry(void __iomem *sfrbase, dma_addr_t iova);
|
||||
void __sysmmu_tlb_invalidate(struct sysmmu_drvdata *drvdata,
|
||||
dma_addr_t iova, size_t size);
|
||||
|
||||
int exynos_iommu_map_userptr(struct iommu_domain *dom, unsigned long addr,
|
||||
dma_addr_t iova, size_t size, int prot);
|
||||
void exynos_iommu_unmap_userptr(struct iommu_domain *dom,
|
||||
dma_addr_t iova, size_t size);
|
||||
|
||||
void dump_sysmmu_tlb_pb(void __iomem *sfrbase);
|
||||
|
||||
#if defined(CONFIG_EXYNOS_IOVMM)
|
||||
static inline struct exynos_iovmm *exynos_get_iovmm(struct device *dev)
|
||||
{
|
||||
if (!dev->archdata.iommu) {
|
||||
dev_err(dev, "%s: System MMU is not configured\n", __func__);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return ((struct exynos_iommu_owner *)dev->archdata.iommu)->vmm_data;
|
||||
}
|
||||
|
||||
struct exynos_vm_region *find_iovm_region(struct exynos_iovmm *vmm,
|
||||
dma_addr_t iova);
|
||||
|
||||
static inline int find_iovmm_plane(struct exynos_iovmm *vmm, dma_addr_t iova)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < (vmm->inplanes + vmm->onplanes); i++)
|
||||
if ((iova >= vmm->iova_start[i]) &&
|
||||
(iova < (vmm->iova_start[i] + vmm->iovm_size[i])))
|
||||
return i;
|
||||
return -1;
|
||||
}
|
||||
|
||||
struct exynos_iovmm *exynos_create_single_iovmm(const char *name);
|
||||
int exynos_sysmmu_add_fault_notifier(struct device *dev,
|
||||
iommu_fault_handler_t handler, void *token);
|
||||
#else
|
||||
static inline struct exynos_iovmm *exynos_get_iovmm(struct device *dev)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
struct exynos_vm_region *find_iovm_region(struct exynos_iovmm *vmm,
|
||||
dma_addr_t iova)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline int find_iovmm_plane(struct exynos_iovmm *vmm, dma_addr_t iova)
|
||||
{
|
||||
return -1;
|
||||
}
|
||||
|
||||
static inline struct exynos_iovmm *exynos_create_single_iovmm(const char *name)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
#endif /* CONFIG_EXYNOS_IOVMM */
|
||||
|
||||
#endif /* _EXYNOS_IOMMU_H_ */
|
819
drivers/iommu/exynos-iovmm.c
Normal file
819
drivers/iommu/exynos-iovmm.c
Normal file
|
@ -0,0 +1,819 @@
|
|||
/* linux/drivers/iommu/exynos_iovmm.c
|
||||
*
|
||||
* Copyright (c) 2011-2012 Samsung Electronics Co., Ltd.
|
||||
* http://www.samsung.com
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#ifdef CONFIG_EXYNOS_IOMMU_DEBUG
|
||||
#define DEBUG
|
||||
#endif
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/hardirq.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/scatterlist.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/debugfs.h>
|
||||
#include <linux/delay.h>
|
||||
|
||||
#include <linux/exynos_iovmm.h>
|
||||
|
||||
#include "exynos-iommu.h"
|
||||
|
||||
/* IOVM region: [0x1000000, 0xD0000000) */
|
||||
#define IOVA_START_V6 0x10000000
|
||||
#define IOVM_SIZE_V6 (0xD0000000 - IOVA_START_V6)
|
||||
#define sg_physically_continuous(sg) (sg_next(sg) == NULL)
|
||||
|
||||
/* alloc_iovm_region - Allocate IO virtual memory region
|
||||
* vmm: virtual memory allocator
|
||||
* size: total size to allocate vm region from @vmm.
|
||||
* align: alignment constraints of the allocated virtual address
|
||||
* max_align: maximum alignment of allocated virtual address. allocated address
|
||||
* does not need to satisfy larger alignment than max_align.
|
||||
* section_offset: page size-aligned offset of iova start address within an 1MB
|
||||
* boundary. The caller of alloc_iovm_region will obtain the
|
||||
* allocated iova + section_offset. This is provided just for the
|
||||
* physically contiguous memory.
|
||||
* page_offset: must be smaller than PAGE_SIZE. Just a valut to be added to the
|
||||
* allocated virtual address. This does not effect to the allocaded size
|
||||
* and address.
|
||||
*
|
||||
* This function returns allocated IO virtual address that satisfies the given
|
||||
* constraints: the caller will get the allocated virtual address plus
|
||||
* (section_offset + page_offset). Returns 0 if this function is not able
|
||||
* to allocate IO virtual memory.
|
||||
*/
|
||||
static dma_addr_t alloc_iovm_region(struct exynos_iovmm *vmm, size_t size,
|
||||
size_t section_offset,
|
||||
off_t page_offset)
|
||||
{
|
||||
u32 index = 0;
|
||||
u32 vstart;
|
||||
u32 vsize;
|
||||
unsigned long end, i;
|
||||
struct exynos_vm_region *region;
|
||||
size_t align = SZ_1M;
|
||||
|
||||
BUG_ON(page_offset >= PAGE_SIZE);
|
||||
|
||||
/* To avoid allocating prefetched iovm region */
|
||||
vsize = (ALIGN(size + SZ_128K, SZ_128K) + section_offset) >> PAGE_SHIFT;
|
||||
align >>= PAGE_SHIFT;
|
||||
section_offset >>= PAGE_SHIFT;
|
||||
|
||||
spin_lock(&vmm->bitmap_lock);
|
||||
again:
|
||||
index = find_next_zero_bit(vmm->vm_map[0],
|
||||
IOVM_NUM_PAGES(vmm->iovm_size[0]), index);
|
||||
|
||||
if (align) {
|
||||
index = ALIGN(index, align);
|
||||
if (index >= IOVM_NUM_PAGES(vmm->iovm_size[0])) {
|
||||
spin_unlock(&vmm->bitmap_lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (test_bit(index, vmm->vm_map[0]))
|
||||
goto again;
|
||||
}
|
||||
|
||||
end = index + vsize;
|
||||
|
||||
if (end >= IOVM_NUM_PAGES(vmm->iovm_size[0])) {
|
||||
spin_unlock(&vmm->bitmap_lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
i = find_next_bit(vmm->vm_map[0], end, index);
|
||||
if (i < end) {
|
||||
index = i + 1;
|
||||
goto again;
|
||||
}
|
||||
|
||||
bitmap_set(vmm->vm_map[0], index, vsize);
|
||||
|
||||
spin_unlock(&vmm->bitmap_lock);
|
||||
|
||||
vstart = (index << PAGE_SHIFT) + vmm->iova_start[0] + page_offset;
|
||||
|
||||
region = kmalloc(sizeof(*region), GFP_KERNEL);
|
||||
if (unlikely(!region)) {
|
||||
spin_lock(&vmm->bitmap_lock);
|
||||
bitmap_clear(vmm->vm_map[0], index, vsize);
|
||||
spin_unlock(&vmm->bitmap_lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
INIT_LIST_HEAD(®ion->node);
|
||||
region->start = vstart;
|
||||
region->size = vsize << PAGE_SHIFT;
|
||||
region->dummy_size = region->size - size;
|
||||
region->section_off = section_offset << PAGE_SHIFT;
|
||||
|
||||
spin_lock(&vmm->vmlist_lock);
|
||||
list_add_tail(®ion->node, &vmm->regions_list);
|
||||
vmm->allocated_size[0] += region->size;
|
||||
vmm->num_areas[0]++;
|
||||
vmm->num_map++;
|
||||
spin_unlock(&vmm->vmlist_lock);
|
||||
|
||||
return region->start + region->section_off;
|
||||
}
|
||||
|
||||
struct exynos_vm_region *find_iovm_region(struct exynos_iovmm *vmm,
|
||||
dma_addr_t iova)
|
||||
{
|
||||
struct exynos_vm_region *region;
|
||||
|
||||
spin_lock(&vmm->vmlist_lock);
|
||||
|
||||
list_for_each_entry(region, &vmm->regions_list, node) {
|
||||
if (region->start <= iova &&
|
||||
(region->start + region->size) > iova) {
|
||||
spin_unlock(&vmm->vmlist_lock);
|
||||
return region;
|
||||
}
|
||||
}
|
||||
|
||||
spin_unlock(&vmm->vmlist_lock);
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static struct exynos_vm_region *remove_iovm_region(struct exynos_iovmm *vmm,
|
||||
dma_addr_t iova)
|
||||
{
|
||||
struct exynos_vm_region *region;
|
||||
|
||||
spin_lock(&vmm->vmlist_lock);
|
||||
|
||||
list_for_each_entry(region, &vmm->regions_list, node) {
|
||||
if (region->start + region->section_off == iova) {
|
||||
list_del(®ion->node);
|
||||
vmm->allocated_size[0] -= region->size;
|
||||
vmm->num_areas[0]--;
|
||||
vmm->num_unmap++;
|
||||
spin_unlock(&vmm->vmlist_lock);
|
||||
return region;
|
||||
}
|
||||
}
|
||||
|
||||
spin_unlock(&vmm->vmlist_lock);
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void free_iovm_region(struct exynos_iovmm *vmm,
|
||||
struct exynos_vm_region *region)
|
||||
{
|
||||
if (!region)
|
||||
return;
|
||||
|
||||
spin_lock(&vmm->bitmap_lock);
|
||||
bitmap_clear(vmm->vm_map[0],
|
||||
(region->start - vmm->iova_start[0]) >> PAGE_SHIFT,
|
||||
region->size >> PAGE_SHIFT);
|
||||
spin_unlock(&vmm->bitmap_lock);
|
||||
|
||||
SYSMMU_EVENT_LOG_IOVMM_UNMAP(IOVMM_TO_LOG(vmm),
|
||||
region->start, region->start + region->size);
|
||||
|
||||
kfree(region);
|
||||
}
|
||||
|
||||
static dma_addr_t add_iovm_region(struct exynos_iovmm *vmm,
|
||||
dma_addr_t start, size_t size)
|
||||
{
|
||||
struct exynos_vm_region *region, *pos;
|
||||
|
||||
region = kmalloc(sizeof(*region), GFP_KERNEL);
|
||||
if (!region)
|
||||
return 0;
|
||||
|
||||
INIT_LIST_HEAD(®ion->node);
|
||||
region->start = start;
|
||||
region->size = size;
|
||||
|
||||
spin_lock(&vmm->vmlist_lock);
|
||||
|
||||
list_for_each_entry(pos, &vmm->regions_list, node) {
|
||||
if ((start < (pos->start + pos->size)) &&
|
||||
((start + size) > pos->start)) {
|
||||
spin_unlock(&vmm->vmlist_lock);
|
||||
kfree(region);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
list_add(®ion->node, &vmm->regions_list);
|
||||
|
||||
spin_unlock(&vmm->vmlist_lock);
|
||||
|
||||
return start;
|
||||
}
|
||||
|
||||
static void show_iovm_regions(struct exynos_iovmm *vmm)
|
||||
{
|
||||
struct exynos_vm_region *pos;
|
||||
|
||||
pr_err("LISTING IOVMM REGIONS...\n");
|
||||
spin_lock(&vmm->vmlist_lock);
|
||||
list_for_each_entry(pos, &vmm->regions_list, node) {
|
||||
pr_err("REGION: %#x (SIZE: %#x, +[%#x, %#x])\n",
|
||||
pos->start, pos->size,
|
||||
pos->section_off, pos->dummy_size);
|
||||
}
|
||||
spin_unlock(&vmm->vmlist_lock);
|
||||
pr_err("END OF LISTING IOVMM REGIONS...\n");
|
||||
}
|
||||
|
||||
int iovmm_activate(struct device *dev)
|
||||
{
|
||||
struct exynos_iovmm *vmm = exynos_get_iovmm(dev);
|
||||
|
||||
if (!vmm) {
|
||||
dev_err(dev, "%s: IOVMM not found\n", __func__);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return iommu_attach_device(vmm->domain, dev);
|
||||
}
|
||||
|
||||
void iovmm_deactivate(struct device *dev)
|
||||
{
|
||||
struct exynos_iovmm *vmm = exynos_get_iovmm(dev);
|
||||
|
||||
if (!vmm) {
|
||||
dev_err(dev, "%s: IOVMM not found\n", __func__);
|
||||
return;
|
||||
}
|
||||
|
||||
iommu_detach_device(vmm->domain, dev);
|
||||
}
|
||||
|
||||
struct iommu_domain *get_domain_from_dev(struct device *dev)
|
||||
{
|
||||
struct exynos_iovmm *vmm = exynos_get_iovmm(dev);
|
||||
|
||||
if (!vmm) {
|
||||
dev_err(dev, "%s: IOVMM not found\n", __func__);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return vmm->domain;
|
||||
}
|
||||
|
||||
/* iovmm_map - allocate and map IO virtual memory for the given device
|
||||
* dev: device that has IO virtual address space managed by IOVMM
|
||||
* sg: list of physically contiguous memory chunks. The preceding chunk needs to
|
||||
* be larger than the following chunks in sg for efficient mapping and
|
||||
* performance. If elements of sg are more than one, physical address of
|
||||
* each chunk needs to be aligned by its size for efficent mapping and TLB
|
||||
* utilization.
|
||||
* offset: offset in bytes to be mapped and accessed by dev.
|
||||
* size: size in bytes to be mapped and accessed by dev.
|
||||
*
|
||||
* This function allocates IO virtual memory for the given device and maps the
|
||||
* given physical memory conveyed by sg into the allocated IO memory region.
|
||||
* Returns allocated IO virtual address if it allocates and maps successfull.
|
||||
* Otherwise, minus error number. Caller must check if the return value of this
|
||||
* function with IS_ERR_VALUE().
|
||||
*/
|
||||
dma_addr_t iovmm_map(struct device *dev, struct scatterlist *sg, off_t offset,
|
||||
size_t size, enum dma_data_direction direction, int prot)
|
||||
{
|
||||
off_t start_off;
|
||||
dma_addr_t addr, start = 0;
|
||||
size_t mapped_size = 0;
|
||||
struct exynos_iovmm *vmm = exynos_get_iovmm(dev);
|
||||
size_t section_offset = 0; /* section offset of contig. mem */
|
||||
int ret = 0;
|
||||
int idx;
|
||||
struct scatterlist *tsg;
|
||||
struct exynos_vm_region *region;
|
||||
|
||||
if (vmm == NULL) {
|
||||
dev_err(dev, "%s: IOVMM not found\n", __func__);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
for (; (sg != NULL) && (sg->length < offset); sg = sg_next(sg))
|
||||
offset -= sg->length;
|
||||
|
||||
if (sg == NULL) {
|
||||
dev_err(dev, "IOVMM: invalid offset to %s.\n", __func__);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
tsg = sg;
|
||||
|
||||
start_off = offset_in_page(sg_phys(sg) + offset);
|
||||
size = PAGE_ALIGN(size + start_off);
|
||||
|
||||
if (sg_physically_continuous(sg)) {
|
||||
size_t aligned_pad_size;
|
||||
phys_addr_t phys = page_to_phys(sg_page(sg));
|
||||
section_offset = phys & (~SECT_MASK);
|
||||
aligned_pad_size = ALIGN(phys, SECT_SIZE) - phys;
|
||||
if ((sg->length - aligned_pad_size) < SECT_SIZE) {
|
||||
aligned_pad_size = ALIGN(phys, LPAGE_SIZE) - phys;
|
||||
if ((sg->length - aligned_pad_size) >= LPAGE_SIZE)
|
||||
section_offset = phys & (~LPAGE_MASK);
|
||||
else
|
||||
section_offset = 0;
|
||||
}
|
||||
}
|
||||
start = alloc_iovm_region(vmm, size, section_offset, start_off);
|
||||
if (!start) {
|
||||
spin_lock(&vmm->vmlist_lock);
|
||||
dev_err(dev, "%s: Not enough IOVM space to allocate %#zx\n",
|
||||
__func__, size);
|
||||
dev_err(dev, "%s: Total %#zx, Allocated %#zx , Chunks %d\n",
|
||||
__func__, vmm->iovm_size[0],
|
||||
vmm->allocated_size[0], vmm->num_areas[0]);
|
||||
spin_unlock(&vmm->vmlist_lock);
|
||||
ret = -ENOMEM;
|
||||
goto err_map_nomem;
|
||||
}
|
||||
|
||||
addr = start - start_off;
|
||||
|
||||
do {
|
||||
phys_addr_t phys;
|
||||
size_t len;
|
||||
|
||||
phys = sg_phys(sg);
|
||||
len = sg->length;
|
||||
|
||||
/* if back to back sg entries are contiguous consolidate them */
|
||||
while (sg_next(sg) &&
|
||||
sg_phys(sg) + sg->length == sg_phys(sg_next(sg))) {
|
||||
len += sg_next(sg)->length;
|
||||
sg = sg_next(sg);
|
||||
}
|
||||
|
||||
if (offset > 0) {
|
||||
len -= offset;
|
||||
phys += offset;
|
||||
offset = 0;
|
||||
}
|
||||
|
||||
if (offset_in_page(phys)) {
|
||||
len += offset_in_page(phys);
|
||||
phys = round_down(phys, PAGE_SIZE);
|
||||
}
|
||||
|
||||
len = PAGE_ALIGN(len);
|
||||
|
||||
if (len > (size - mapped_size))
|
||||
len = size - mapped_size;
|
||||
|
||||
ret = iommu_map(vmm->domain, addr, phys, len, prot);
|
||||
if (ret) {
|
||||
dev_err(dev, "iommu_map failed w/ err: %d\n", ret);
|
||||
break;
|
||||
}
|
||||
|
||||
addr += len;
|
||||
mapped_size += len;
|
||||
} while ((sg = sg_next(sg)) && (mapped_size < size));
|
||||
|
||||
BUG_ON(mapped_size > size);
|
||||
|
||||
if (mapped_size < size) {
|
||||
dev_err(dev, "mapped_size(%#zx) is smaller than size(%#zx)\n",
|
||||
mapped_size, size);
|
||||
if (!ret) {
|
||||
dev_err(dev, "ret: %d\n", ret);
|
||||
ret = -EINVAL;
|
||||
}
|
||||
goto err_map_map;
|
||||
}
|
||||
|
||||
region = find_iovm_region(vmm, start);
|
||||
BUG_ON(!region);
|
||||
|
||||
/*
|
||||
* If pretched SLPD is a fault SLPD in zero_l2_table, FLPD cache
|
||||
* or prefetch buffer caches the address of zero_l2_table.
|
||||
* This function replaces the zero_l2_table with new L2 page
|
||||
* table to write valid mappings.
|
||||
* Accessing the valid area may cause page fault since FLPD
|
||||
* cache may still caches zero_l2_table for the valid area
|
||||
* instead of new L2 page table that have the mapping
|
||||
* information of the valid area
|
||||
* Thus any replacement of zero_l2_table with other valid L2
|
||||
* page table must involve FLPD cache invalidation if the System
|
||||
* MMU have prefetch feature and FLPD cache (version 3.3).
|
||||
* FLPD cache invalidation is performed with TLB invalidation
|
||||
* by VPN without blocking. It is safe to invalidate TLB without
|
||||
* blocking because the target address of TLB invalidation is
|
||||
* not currently mapped.
|
||||
*/
|
||||
|
||||
exynos_sysmmu_tlb_invalidate(vmm->domain, region->start, region->size);
|
||||
|
||||
TRACE_LOG_DEV(dev, "IOVMM: Allocated VM region @ %#x/%#x bytes.\n",
|
||||
start, size);
|
||||
|
||||
SYSMMU_EVENT_LOG_IOVMM_MAP(IOVMM_TO_LOG(vmm), start, start + size,
|
||||
region->size - size);
|
||||
|
||||
return start;
|
||||
|
||||
err_map_map:
|
||||
iommu_unmap(vmm->domain, start - start_off, mapped_size);
|
||||
free_iovm_region(vmm, remove_iovm_region(vmm, start));
|
||||
|
||||
dev_err(dev,
|
||||
"Failed(%d) to map IOVMM REGION %pa (SIZE: %#zx, mapped: %#zx)\n",
|
||||
ret, &start, size, mapped_size);
|
||||
idx = 0;
|
||||
do {
|
||||
pr_err("SGLIST[%d].size = %#x\n", idx++, tsg->length);
|
||||
} while ((tsg = sg_next(tsg)));
|
||||
|
||||
show_iovm_regions(vmm);
|
||||
|
||||
err_map_nomem:
|
||||
TRACE_LOG_DEV(dev,
|
||||
"IOVMM: Failed to allocated VM region for %#x bytes.\n", size);
|
||||
return (dma_addr_t)ret;
|
||||
}
|
||||
|
||||
void iovmm_unmap(struct device *dev, dma_addr_t iova)
|
||||
{
|
||||
struct exynos_iovmm *vmm = exynos_get_iovmm(dev);
|
||||
struct exynos_vm_region *region;
|
||||
size_t unmap_size;
|
||||
|
||||
/* This function must not be called in IRQ handlers */
|
||||
BUG_ON(in_irq());
|
||||
|
||||
if (vmm == NULL) {
|
||||
dev_err(dev, "%s: IOVMM not found\n", __func__);
|
||||
return;
|
||||
}
|
||||
|
||||
region = remove_iovm_region(vmm, iova);
|
||||
if (region) {
|
||||
u32 start = region->start + region->section_off;
|
||||
u32 size = region->size - region->dummy_size;
|
||||
|
||||
/* clear page offset */
|
||||
if (WARN_ON(start != iova)) {
|
||||
dev_err(dev, "IOVMM: "
|
||||
"iova %pa and region %#x(+%#x)@%#x(-%#x) mismatch\n",
|
||||
&iova, region->size, region->dummy_size,
|
||||
region->start, region->section_off);
|
||||
show_iovm_regions(vmm);
|
||||
/* reinsert iovm region */
|
||||
add_iovm_region(vmm, region->start, region->size);
|
||||
kfree(region);
|
||||
return;
|
||||
}
|
||||
unmap_size = iommu_unmap(vmm->domain, start & SPAGE_MASK, size);
|
||||
if (unlikely(unmap_size != size)) {
|
||||
dev_err(dev,
|
||||
"Failed to unmap REGION of %#x:\n", start);
|
||||
dev_err(dev, "(SIZE: %#x, iova: %pa, unmapped: %#zx)\n",
|
||||
size, &iova, unmap_size);
|
||||
show_iovm_regions(vmm);
|
||||
kfree(region);
|
||||
BUG();
|
||||
return;
|
||||
}
|
||||
|
||||
exynos_sysmmu_tlb_invalidate(vmm->domain, region->start, region->size);
|
||||
|
||||
/* 60us is required to guarantee that PTW ends itself */
|
||||
udelay(60);
|
||||
|
||||
free_iovm_region(vmm, region);
|
||||
|
||||
TRACE_LOG_DEV(dev, "IOVMM: Unmapped %#x bytes from %#x.\n",
|
||||
unmap_size, iova);
|
||||
} else {
|
||||
dev_err(dev, "IOVMM: No IOVM region %pa to free.\n", &iova);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* NOTE:
|
||||
* exynos_iovmm_map_userptr() should be called under current->mm.mmap_sem held.
|
||||
*/
|
||||
dma_addr_t exynos_iovmm_map_userptr(struct device *dev, unsigned long vaddr,
|
||||
size_t size, int prot)
|
||||
{
|
||||
struct exynos_iovmm *vmm = exynos_get_iovmm(dev);
|
||||
unsigned long eaddr = vaddr + size;
|
||||
off_t offset = offset_in_page(vaddr);
|
||||
int ret = -EINVAL;
|
||||
struct vm_area_struct *vma;
|
||||
dma_addr_t start;
|
||||
struct exynos_vm_region *region;
|
||||
|
||||
vma = find_vma(current->mm, vaddr);
|
||||
if (vaddr < vma->vm_start) {
|
||||
dev_err(dev, "%s: invalid address %#lx\n", __func__, vaddr);
|
||||
goto err;
|
||||
}
|
||||
|
||||
if (!!(vma->vm_flags & VM_PFNMAP))
|
||||
prot |= IOMMU_PFNMAP;
|
||||
|
||||
while (eaddr > vma->vm_end) {
|
||||
if (!!(vma->vm_flags & VM_PFNMAP)) {
|
||||
dev_err(dev, "%s: non-linear pfnmap is not supported\n",
|
||||
__func__);
|
||||
goto err;
|
||||
}
|
||||
|
||||
if ((vma->vm_next == NULL) ||
|
||||
(vma->vm_end != vma->vm_next->vm_start)) {
|
||||
dev_err(dev, "%s: invalid size %zu\n", __func__, size);
|
||||
goto err;
|
||||
}
|
||||
|
||||
vma = vma->vm_next;
|
||||
}
|
||||
|
||||
size = PAGE_ALIGN(size + offset);
|
||||
start = alloc_iovm_region(vmm, size, 0, offset);
|
||||
if (!start) {
|
||||
spin_lock(&vmm->vmlist_lock);
|
||||
dev_err(dev, "%s: Not enough IOVM space to allocate %#zx\n",
|
||||
__func__, size);
|
||||
dev_err(dev, "%s: Total %#zx, Allocated %#zx , Chunks %d\n",
|
||||
__func__, vmm->iovm_size[0],
|
||||
vmm->allocated_size[0], vmm->num_areas[0]);
|
||||
spin_unlock(&vmm->vmlist_lock);
|
||||
ret = -ENOMEM;
|
||||
goto err;
|
||||
}
|
||||
|
||||
ret = exynos_iommu_map_userptr(vmm->domain, vaddr - offset,
|
||||
start - offset, size, prot);
|
||||
if (ret < 0)
|
||||
goto err_map;
|
||||
|
||||
region = find_iovm_region(vmm, start);
|
||||
BUG_ON(!region);
|
||||
|
||||
SYSMMU_EVENT_LOG_IOVMM_MAP(IOVMM_TO_LOG(vmm), start, start + size,
|
||||
region->size - size);
|
||||
return start;
|
||||
err_map:
|
||||
free_iovm_region(vmm, remove_iovm_region(vmm, start));
|
||||
err:
|
||||
return (dma_addr_t)ret;
|
||||
}
|
||||
|
||||
void exynos_iovmm_unmap_userptr(struct device *dev, dma_addr_t iova)
|
||||
{
|
||||
struct exynos_iovmm *vmm = exynos_get_iovmm(dev);
|
||||
struct exynos_vm_region *region;
|
||||
|
||||
region = remove_iovm_region(vmm, iova);
|
||||
if (region) {
|
||||
u32 start = region->start + region->section_off;
|
||||
u32 size = region->size - region->dummy_size;
|
||||
|
||||
/* clear page offset */
|
||||
if (WARN_ON(start != iova)) {
|
||||
dev_err(dev, "IOVMM: "
|
||||
"iova %pa and region %#x(+%#x)@%#x(-%#x) mismatch\n",
|
||||
&iova, region->size, region->dummy_size,
|
||||
region->start, region->section_off);
|
||||
show_iovm_regions(vmm);
|
||||
/* reinsert iovm region */
|
||||
add_iovm_region(vmm, region->start, region->size);
|
||||
kfree(region);
|
||||
return;
|
||||
}
|
||||
|
||||
exynos_iommu_unmap_userptr(vmm->domain,
|
||||
start & SPAGE_MASK, size);
|
||||
|
||||
free_iovm_region(vmm, region);
|
||||
} else {
|
||||
dev_err(dev, "IOVMM: No IOVM region %pa to free.\n", &iova);
|
||||
}
|
||||
}
|
||||
|
||||
int iovmm_map_oto(struct device *dev, phys_addr_t phys, size_t size)
|
||||
{
|
||||
struct exynos_iovmm *vmm = exynos_get_iovmm(dev);
|
||||
int ret;
|
||||
|
||||
BUG_ON(!IS_ALIGNED(phys, PAGE_SIZE));
|
||||
BUG_ON(!IS_ALIGNED(size, PAGE_SIZE));
|
||||
|
||||
if (vmm == NULL) {
|
||||
dev_err(dev, "%s: IOVMM not found\n", __func__);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (WARN_ON((phys + size) >= IOVA_START_V6)) {
|
||||
dev_err(dev,
|
||||
"Unable to create one to one mapping for %#zx @ %pa\n",
|
||||
size, &phys);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!add_iovm_region(vmm, (dma_addr_t)phys, size))
|
||||
return -EADDRINUSE;
|
||||
|
||||
ret = iommu_map(vmm->domain, (dma_addr_t)phys, phys, size, 0);
|
||||
if (ret < 0)
|
||||
free_iovm_region(vmm,
|
||||
remove_iovm_region(vmm, (dma_addr_t)phys));
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
void iovmm_unmap_oto(struct device *dev, phys_addr_t phys)
|
||||
{
|
||||
struct exynos_iovmm *vmm = exynos_get_iovmm(dev);
|
||||
struct exynos_vm_region *region;
|
||||
size_t unmap_size;
|
||||
|
||||
/* This function must not be called in IRQ handlers */
|
||||
BUG_ON(in_irq());
|
||||
BUG_ON(!IS_ALIGNED(phys, PAGE_SIZE));
|
||||
|
||||
if (vmm == NULL) {
|
||||
dev_err(dev, "%s: IOVMM not found\n", __func__);
|
||||
return;
|
||||
}
|
||||
|
||||
region = remove_iovm_region(vmm, (dma_addr_t)phys);
|
||||
if (region) {
|
||||
unmap_size = iommu_unmap(vmm->domain, (dma_addr_t)phys,
|
||||
region->size);
|
||||
WARN_ON(unmap_size != region->size);
|
||||
|
||||
exynos_sysmmu_tlb_invalidate(vmm->domain, (dma_addr_t)phys,
|
||||
region->size);
|
||||
|
||||
free_iovm_region(vmm, region);
|
||||
|
||||
TRACE_LOG_DEV(dev, "IOVMM: Unmapped %#x bytes from %#x.\n",
|
||||
unmap_size, phys);
|
||||
}
|
||||
}
|
||||
|
||||
static struct dentry *exynos_iovmm_debugfs_root;
|
||||
static struct dentry *exynos_iommu_debugfs_root;
|
||||
|
||||
static int exynos_iovmm_create_debugfs(void)
|
||||
{
|
||||
exynos_iovmm_debugfs_root = debugfs_create_dir("iovmm", NULL);
|
||||
if (!exynos_iovmm_debugfs_root)
|
||||
pr_err("IOVMM: Failed to create debugfs entry\n");
|
||||
else
|
||||
pr_info("IOVMM: Created debugfs entry at debugfs/iovmm\n");
|
||||
|
||||
exynos_iommu_debugfs_root = debugfs_create_dir("iommu", NULL);
|
||||
if (!exynos_iommu_debugfs_root)
|
||||
pr_err("IOMMU: Failed to create debugfs entry\n");
|
||||
else
|
||||
pr_info("IOMMU: Created debugfs entry at debugfs/iommu\n");
|
||||
|
||||
return 0;
|
||||
}
|
||||
arch_initcall(exynos_iovmm_create_debugfs);
|
||||
|
||||
static int iovmm_debug_show(struct seq_file *s, void *unused)
|
||||
{
|
||||
struct exynos_iovmm *vmm = s->private;
|
||||
int i = 0;
|
||||
|
||||
seq_printf(s, "%.6s %10.s %10.s %10.s %6.s\n",
|
||||
"REGION", "VASTART", "SIZE", "FREE", "CHUNKS");
|
||||
seq_puts(s, "---------------------------------------------\n");
|
||||
|
||||
spin_lock(&vmm->vmlist_lock);
|
||||
while (i < vmm->inplanes) {
|
||||
seq_printf(s, "%3s[%d] %#x %#10zx %#10zx %d\n",
|
||||
"in", i, vmm->iova_start[i], vmm->iovm_size[i],
|
||||
vmm->iovm_size[i] - vmm->allocated_size[i],
|
||||
vmm->num_areas[i]);
|
||||
i++;
|
||||
}
|
||||
while (i < (vmm->inplanes + vmm->onplanes)) {
|
||||
seq_printf(s, "%3s[%d] %#x %#10zx %#10zx %d\n",
|
||||
"out", i - vmm->inplanes, vmm->iova_start[i],
|
||||
vmm->iovm_size[i],
|
||||
vmm->iovm_size[i] - vmm->allocated_size[i],
|
||||
vmm->num_areas[i]);
|
||||
i++;
|
||||
}
|
||||
seq_puts(s, "---------------------------------------------\n");
|
||||
seq_printf(s, "Total number of mappings : %d\n", vmm->num_map);
|
||||
seq_printf(s, "Total number of unmappings: %d\n", vmm->num_unmap);
|
||||
spin_unlock(&vmm->vmlist_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int iovmm_debug_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
return single_open(file, iovmm_debug_show, inode->i_private);
|
||||
}
|
||||
|
||||
static ssize_t iovmm_debug_write(struct file *filp, const char __user *p,
|
||||
size_t len, loff_t *off)
|
||||
{
|
||||
struct seq_file *s = filp->private_data;
|
||||
struct exynos_iovmm *vmm = s->private;
|
||||
/* clears the map count in IOVMM */
|
||||
spin_lock(&vmm->vmlist_lock);
|
||||
vmm->num_map = 0;
|
||||
vmm->num_unmap = 0;
|
||||
spin_unlock(&vmm->vmlist_lock);
|
||||
return len;
|
||||
}
|
||||
|
||||
static const struct file_operations iovmm_debug_fops = {
|
||||
.open = iovmm_debug_open,
|
||||
.read = seq_read,
|
||||
.write = iovmm_debug_write,
|
||||
.llseek = seq_lseek,
|
||||
.release = single_release,
|
||||
};
|
||||
|
||||
static void iovmm_register_debugfs(struct exynos_iovmm *vmm)
|
||||
{
|
||||
if (!exynos_iovmm_debugfs_root)
|
||||
return;
|
||||
|
||||
debugfs_create_file(vmm->domain_name, 0664,
|
||||
exynos_iovmm_debugfs_root, vmm, &iovmm_debug_fops);
|
||||
}
|
||||
|
||||
struct exynos_iovmm *exynos_create_single_iovmm(const char *name)
|
||||
{
|
||||
struct exynos_iovmm *vmm;
|
||||
int ret = 0;
|
||||
|
||||
vmm = kzalloc(sizeof(*vmm), GFP_KERNEL);
|
||||
if (!vmm) {
|
||||
ret = -ENOMEM;
|
||||
goto err_alloc_vmm;
|
||||
}
|
||||
|
||||
vmm->iovm_size[0] = IOVM_SIZE_V6;
|
||||
vmm->iova_start[0] = IOVA_START_V6;
|
||||
vmm->vm_map[0] = kzalloc(IOVM_BITMAP_SIZE(IOVM_SIZE_V6), GFP_KERNEL);
|
||||
if (!vmm->vm_map[0]) {
|
||||
ret = -ENOMEM;
|
||||
goto err_setup_domain;
|
||||
}
|
||||
|
||||
vmm->inplanes = 1;
|
||||
vmm->onplanes = 0;
|
||||
vmm->domain = iommu_domain_alloc(&platform_bus_type);
|
||||
if (!vmm->domain) {
|
||||
ret = -ENOMEM;
|
||||
goto err_setup_domain;
|
||||
}
|
||||
|
||||
ret = exynos_iommu_init_event_log(IOVMM_TO_LOG(vmm), IOVMM_LOG_LEN);
|
||||
if (!ret) {
|
||||
iovmm_add_log_to_debugfs(exynos_iovmm_debugfs_root,
|
||||
IOVMM_TO_LOG(vmm), name);
|
||||
|
||||
iommu_add_log_to_debugfs(exynos_iommu_debugfs_root,
|
||||
IOMMU_TO_LOG(vmm->domain), name);
|
||||
} else {
|
||||
goto err_init_event_log;
|
||||
}
|
||||
|
||||
spin_lock_init(&vmm->vmlist_lock);
|
||||
spin_lock_init(&vmm->bitmap_lock);
|
||||
|
||||
INIT_LIST_HEAD(&vmm->regions_list);
|
||||
|
||||
vmm->domain_name = name;
|
||||
|
||||
iovmm_register_debugfs(vmm);
|
||||
|
||||
pr_debug("%s IOVMM: Created %#x B IOVMM from %#x.\n",
|
||||
name, IOVM_SIZE_V6, IOVA_START_V6);
|
||||
return vmm;
|
||||
|
||||
err_init_event_log:
|
||||
iommu_domain_free(vmm->domain);
|
||||
err_setup_domain:
|
||||
kfree(vmm);
|
||||
err_alloc_vmm:
|
||||
pr_err("%s IOVMM: Failed to create IOVMM (%d)\n", name, ret);
|
||||
|
||||
return ERR_PTR(ret);
|
||||
}
|
1308
drivers/iommu/fsl_pamu.c
Normal file
1308
drivers/iommu/fsl_pamu.c
Normal file
File diff suppressed because it is too large
Load diff
410
drivers/iommu/fsl_pamu.h
Normal file
410
drivers/iommu/fsl_pamu.h
Normal file
|
@ -0,0 +1,410 @@
|
|||
/*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License, version 2, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*
|
||||
* Copyright (C) 2013 Freescale Semiconductor, Inc.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef __FSL_PAMU_H
|
||||
#define __FSL_PAMU_H
|
||||
|
||||
#include <asm/fsl_pamu_stash.h>
|
||||
|
||||
/* Bit Field macros
|
||||
* v = bit field variable; m = mask, m##_SHIFT = shift, x = value to load
|
||||
*/
|
||||
#define set_bf(v, m, x) (v = ((v) & ~(m)) | (((x) << (m##_SHIFT)) & (m)))
|
||||
#define get_bf(v, m) (((v) & (m)) >> (m##_SHIFT))
|
||||
|
||||
/* PAMU CCSR space */
|
||||
#define PAMU_PGC 0x00000000 /* Allows all peripheral accesses */
|
||||
#define PAMU_PE 0x40000000 /* enable PAMU */
|
||||
|
||||
/* PAMU_OFFSET to the next pamu space in ccsr */
|
||||
#define PAMU_OFFSET 0x1000
|
||||
|
||||
#define PAMU_MMAP_REGS_BASE 0
|
||||
|
||||
struct pamu_mmap_regs {
|
||||
u32 ppbah;
|
||||
u32 ppbal;
|
||||
u32 pplah;
|
||||
u32 pplal;
|
||||
u32 spbah;
|
||||
u32 spbal;
|
||||
u32 splah;
|
||||
u32 splal;
|
||||
u32 obah;
|
||||
u32 obal;
|
||||
u32 olah;
|
||||
u32 olal;
|
||||
};
|
||||
|
||||
/* PAMU Error Registers */
|
||||
#define PAMU_POES1 0x0040
|
||||
#define PAMU_POES2 0x0044
|
||||
#define PAMU_POEAH 0x0048
|
||||
#define PAMU_POEAL 0x004C
|
||||
#define PAMU_AVS1 0x0050
|
||||
#define PAMU_AVS1_AV 0x1
|
||||
#define PAMU_AVS1_OTV 0x6
|
||||
#define PAMU_AVS1_APV 0x78
|
||||
#define PAMU_AVS1_WAV 0x380
|
||||
#define PAMU_AVS1_LAV 0x1c00
|
||||
#define PAMU_AVS1_GCV 0x2000
|
||||
#define PAMU_AVS1_PDV 0x4000
|
||||
#define PAMU_AV_MASK (PAMU_AVS1_AV | PAMU_AVS1_OTV | PAMU_AVS1_APV | PAMU_AVS1_WAV \
|
||||
| PAMU_AVS1_LAV | PAMU_AVS1_GCV | PAMU_AVS1_PDV)
|
||||
#define PAMU_AVS1_LIODN_SHIFT 16
|
||||
#define PAMU_LAV_LIODN_NOT_IN_PPAACT 0x400
|
||||
|
||||
#define PAMU_AVS2 0x0054
|
||||
#define PAMU_AVAH 0x0058
|
||||
#define PAMU_AVAL 0x005C
|
||||
#define PAMU_EECTL 0x0060
|
||||
#define PAMU_EEDIS 0x0064
|
||||
#define PAMU_EEINTEN 0x0068
|
||||
#define PAMU_EEDET 0x006C
|
||||
#define PAMU_EEATTR 0x0070
|
||||
#define PAMU_EEAHI 0x0074
|
||||
#define PAMU_EEALO 0x0078
|
||||
#define PAMU_EEDHI 0X007C
|
||||
#define PAMU_EEDLO 0x0080
|
||||
#define PAMU_EECC 0x0084
|
||||
#define PAMU_UDAD 0x0090
|
||||
|
||||
/* PAMU Revision Registers */
|
||||
#define PAMU_PR1 0x0BF8
|
||||
#define PAMU_PR2 0x0BFC
|
||||
|
||||
/* PAMU version mask */
|
||||
#define PAMU_PR1_MASK 0xffff
|
||||
|
||||
/* PAMU Capabilities Registers */
|
||||
#define PAMU_PC1 0x0C00
|
||||
#define PAMU_PC2 0x0C04
|
||||
#define PAMU_PC3 0x0C08
|
||||
#define PAMU_PC4 0x0C0C
|
||||
|
||||
/* PAMU Control Register */
|
||||
#define PAMU_PC 0x0C10
|
||||
|
||||
/* PAMU control defs */
|
||||
#define PAMU_CONTROL 0x0C10
|
||||
#define PAMU_PC_PGC 0x80000000 /* PAMU gate closed bit */
|
||||
#define PAMU_PC_PE 0x40000000 /* PAMU enable bit */
|
||||
#define PAMU_PC_SPCC 0x00000010 /* sPAACE cache enable */
|
||||
#define PAMU_PC_PPCC 0x00000001 /* pPAACE cache enable */
|
||||
#define PAMU_PC_OCE 0x00001000 /* OMT cache enable */
|
||||
|
||||
#define PAMU_PFA1 0x0C14
|
||||
#define PAMU_PFA2 0x0C18
|
||||
|
||||
#define PAMU_PC2_MLIODN(X) ((X) >> 16)
|
||||
#define PAMU_PC3_MWCE(X) (((X) >> 21) & 0xf)
|
||||
|
||||
/* PAMU Interrupt control and Status Register */
|
||||
#define PAMU_PICS 0x0C1C
|
||||
#define PAMU_ACCESS_VIOLATION_STAT 0x8
|
||||
#define PAMU_ACCESS_VIOLATION_ENABLE 0x4
|
||||
|
||||
/* PAMU Debug Registers */
|
||||
#define PAMU_PD1 0x0F00
|
||||
#define PAMU_PD2 0x0F04
|
||||
#define PAMU_PD3 0x0F08
|
||||
#define PAMU_PD4 0x0F0C
|
||||
|
||||
#define PAACE_AP_PERMS_DENIED 0x0
|
||||
#define PAACE_AP_PERMS_QUERY 0x1
|
||||
#define PAACE_AP_PERMS_UPDATE 0x2
|
||||
#define PAACE_AP_PERMS_ALL 0x3
|
||||
|
||||
#define PAACE_DD_TO_HOST 0x0
|
||||
#define PAACE_DD_TO_IO 0x1
|
||||
#define PAACE_PT_PRIMARY 0x0
|
||||
#define PAACE_PT_SECONDARY 0x1
|
||||
#define PAACE_V_INVALID 0x0
|
||||
#define PAACE_V_VALID 0x1
|
||||
#define PAACE_MW_SUBWINDOWS 0x1
|
||||
|
||||
#define PAACE_WSE_4K 0xB
|
||||
#define PAACE_WSE_8K 0xC
|
||||
#define PAACE_WSE_16K 0xD
|
||||
#define PAACE_WSE_32K 0xE
|
||||
#define PAACE_WSE_64K 0xF
|
||||
#define PAACE_WSE_128K 0x10
|
||||
#define PAACE_WSE_256K 0x11
|
||||
#define PAACE_WSE_512K 0x12
|
||||
#define PAACE_WSE_1M 0x13
|
||||
#define PAACE_WSE_2M 0x14
|
||||
#define PAACE_WSE_4M 0x15
|
||||
#define PAACE_WSE_8M 0x16
|
||||
#define PAACE_WSE_16M 0x17
|
||||
#define PAACE_WSE_32M 0x18
|
||||
#define PAACE_WSE_64M 0x19
|
||||
#define PAACE_WSE_128M 0x1A
|
||||
#define PAACE_WSE_256M 0x1B
|
||||
#define PAACE_WSE_512M 0x1C
|
||||
#define PAACE_WSE_1G 0x1D
|
||||
#define PAACE_WSE_2G 0x1E
|
||||
#define PAACE_WSE_4G 0x1F
|
||||
|
||||
#define PAACE_DID_PCI_EXPRESS_1 0x00
|
||||
#define PAACE_DID_PCI_EXPRESS_2 0x01
|
||||
#define PAACE_DID_PCI_EXPRESS_3 0x02
|
||||
#define PAACE_DID_PCI_EXPRESS_4 0x03
|
||||
#define PAACE_DID_LOCAL_BUS 0x04
|
||||
#define PAACE_DID_SRIO 0x0C
|
||||
#define PAACE_DID_MEM_1 0x10
|
||||
#define PAACE_DID_MEM_2 0x11
|
||||
#define PAACE_DID_MEM_3 0x12
|
||||
#define PAACE_DID_MEM_4 0x13
|
||||
#define PAACE_DID_MEM_1_2 0x14
|
||||
#define PAACE_DID_MEM_3_4 0x15
|
||||
#define PAACE_DID_MEM_1_4 0x16
|
||||
#define PAACE_DID_BM_SW_PORTAL 0x18
|
||||
#define PAACE_DID_PAMU 0x1C
|
||||
#define PAACE_DID_CAAM 0x21
|
||||
#define PAACE_DID_QM_SW_PORTAL 0x3C
|
||||
#define PAACE_DID_CORE0_INST 0x80
|
||||
#define PAACE_DID_CORE0_DATA 0x81
|
||||
#define PAACE_DID_CORE1_INST 0x82
|
||||
#define PAACE_DID_CORE1_DATA 0x83
|
||||
#define PAACE_DID_CORE2_INST 0x84
|
||||
#define PAACE_DID_CORE2_DATA 0x85
|
||||
#define PAACE_DID_CORE3_INST 0x86
|
||||
#define PAACE_DID_CORE3_DATA 0x87
|
||||
#define PAACE_DID_CORE4_INST 0x88
|
||||
#define PAACE_DID_CORE4_DATA 0x89
|
||||
#define PAACE_DID_CORE5_INST 0x8A
|
||||
#define PAACE_DID_CORE5_DATA 0x8B
|
||||
#define PAACE_DID_CORE6_INST 0x8C
|
||||
#define PAACE_DID_CORE6_DATA 0x8D
|
||||
#define PAACE_DID_CORE7_INST 0x8E
|
||||
#define PAACE_DID_CORE7_DATA 0x8F
|
||||
#define PAACE_DID_BROADCAST 0xFF
|
||||
|
||||
#define PAACE_ATM_NO_XLATE 0x00
|
||||
#define PAACE_ATM_WINDOW_XLATE 0x01
|
||||
#define PAACE_ATM_PAGE_XLATE 0x02
|
||||
#define PAACE_ATM_WIN_PG_XLATE \
|
||||
(PAACE_ATM_WINDOW_XLATE | PAACE_ATM_PAGE_XLATE)
|
||||
#define PAACE_OTM_NO_XLATE 0x00
|
||||
#define PAACE_OTM_IMMEDIATE 0x01
|
||||
#define PAACE_OTM_INDEXED 0x02
|
||||
#define PAACE_OTM_RESERVED 0x03
|
||||
|
||||
#define PAACE_M_COHERENCE_REQ 0x01
|
||||
|
||||
#define PAACE_PID_0 0x0
|
||||
#define PAACE_PID_1 0x1
|
||||
#define PAACE_PID_2 0x2
|
||||
#define PAACE_PID_3 0x3
|
||||
#define PAACE_PID_4 0x4
|
||||
#define PAACE_PID_5 0x5
|
||||
#define PAACE_PID_6 0x6
|
||||
#define PAACE_PID_7 0x7
|
||||
|
||||
#define PAACE_TCEF_FORMAT0_8B 0x00
|
||||
#define PAACE_TCEF_FORMAT1_RSVD 0x01
|
||||
/*
|
||||
* Hard coded value for the PAACT size to accomodate
|
||||
* maximum LIODN value generated by u-boot.
|
||||
*/
|
||||
#define PAACE_NUMBER_ENTRIES 0x500
|
||||
/* Hard coded value for the SPAACT size */
|
||||
#define SPAACE_NUMBER_ENTRIES 0x800
|
||||
|
||||
#define OME_NUMBER_ENTRIES 16
|
||||
|
||||
/* PAACE Bit Field Defines */
|
||||
#define PPAACE_AF_WBAL 0xfffff000
|
||||
#define PPAACE_AF_WBAL_SHIFT 12
|
||||
#define PPAACE_AF_WSE 0x00000fc0
|
||||
#define PPAACE_AF_WSE_SHIFT 6
|
||||
#define PPAACE_AF_MW 0x00000020
|
||||
#define PPAACE_AF_MW_SHIFT 5
|
||||
|
||||
#define SPAACE_AF_LIODN 0xffff0000
|
||||
#define SPAACE_AF_LIODN_SHIFT 16
|
||||
|
||||
#define PAACE_AF_AP 0x00000018
|
||||
#define PAACE_AF_AP_SHIFT 3
|
||||
#define PAACE_AF_DD 0x00000004
|
||||
#define PAACE_AF_DD_SHIFT 2
|
||||
#define PAACE_AF_PT 0x00000002
|
||||
#define PAACE_AF_PT_SHIFT 1
|
||||
#define PAACE_AF_V 0x00000001
|
||||
#define PAACE_AF_V_SHIFT 0
|
||||
|
||||
#define PAACE_DA_HOST_CR 0x80
|
||||
#define PAACE_DA_HOST_CR_SHIFT 7
|
||||
|
||||
#define PAACE_IA_CID 0x00FF0000
|
||||
#define PAACE_IA_CID_SHIFT 16
|
||||
#define PAACE_IA_WCE 0x000000F0
|
||||
#define PAACE_IA_WCE_SHIFT 4
|
||||
#define PAACE_IA_ATM 0x0000000C
|
||||
#define PAACE_IA_ATM_SHIFT 2
|
||||
#define PAACE_IA_OTM 0x00000003
|
||||
#define PAACE_IA_OTM_SHIFT 0
|
||||
|
||||
#define PAACE_WIN_TWBAL 0xfffff000
|
||||
#define PAACE_WIN_TWBAL_SHIFT 12
|
||||
#define PAACE_WIN_SWSE 0x00000fc0
|
||||
#define PAACE_WIN_SWSE_SHIFT 6
|
||||
|
||||
/* PAMU Data Structures */
|
||||
/* primary / secondary paact structure */
|
||||
struct paace {
|
||||
/* PAACE Offset 0x00 */
|
||||
u32 wbah; /* only valid for Primary PAACE */
|
||||
u32 addr_bitfields; /* See P/S PAACE_AF_* */
|
||||
|
||||
/* PAACE Offset 0x08 */
|
||||
/* Interpretation of first 32 bits dependent on DD above */
|
||||
union {
|
||||
struct {
|
||||
/* Destination ID, see PAACE_DID_* defines */
|
||||
u8 did;
|
||||
/* Partition ID */
|
||||
u8 pid;
|
||||
/* Snoop ID */
|
||||
u8 snpid;
|
||||
/* coherency_required : 1 reserved : 7 */
|
||||
u8 coherency_required; /* See PAACE_DA_* */
|
||||
} to_host;
|
||||
struct {
|
||||
/* Destination ID, see PAACE_DID_* defines */
|
||||
u8 did;
|
||||
u8 reserved1;
|
||||
u16 reserved2;
|
||||
} to_io;
|
||||
} domain_attr;
|
||||
|
||||
/* Implementation attributes + window count + address & operation translation modes */
|
||||
u32 impl_attr; /* See PAACE_IA_* */
|
||||
|
||||
/* PAACE Offset 0x10 */
|
||||
/* Translated window base address */
|
||||
u32 twbah;
|
||||
u32 win_bitfields; /* See PAACE_WIN_* */
|
||||
|
||||
/* PAACE Offset 0x18 */
|
||||
/* first secondary paace entry */
|
||||
u32 fspi; /* only valid for Primary PAACE */
|
||||
union {
|
||||
struct {
|
||||
u8 ioea;
|
||||
u8 moea;
|
||||
u8 ioeb;
|
||||
u8 moeb;
|
||||
} immed_ot;
|
||||
struct {
|
||||
u16 reserved;
|
||||
u16 omi;
|
||||
} index_ot;
|
||||
} op_encode;
|
||||
|
||||
/* PAACE Offsets 0x20-0x38 */
|
||||
u32 reserved[8]; /* not currently implemented */
|
||||
};
|
||||
|
||||
/* OME : Operation mapping entry
|
||||
* MOE : Mapped Operation Encodings
|
||||
* The operation mapping table is table containing operation mapping entries (OME).
|
||||
* The index of a particular OME is programmed in the PAACE entry for translation
|
||||
* in bound I/O operations corresponding to an LIODN. The OMT is used for translation
|
||||
* specifically in case of the indexed translation mode. Each OME contains a 128
|
||||
* byte mapped operation encoding (MOE), where each byte represents an MOE.
|
||||
*/
|
||||
#define NUM_MOE 128
|
||||
struct ome {
|
||||
u8 moe[NUM_MOE];
|
||||
} __attribute__((packed));
|
||||
|
||||
#define PAACT_SIZE (sizeof(struct paace) * PAACE_NUMBER_ENTRIES)
|
||||
#define SPAACT_SIZE (sizeof(struct paace) * SPAACE_NUMBER_ENTRIES)
|
||||
#define OMT_SIZE (sizeof(struct ome) * OME_NUMBER_ENTRIES)
|
||||
|
||||
#define PAMU_PAGE_SHIFT 12
|
||||
#define PAMU_PAGE_SIZE 4096ULL
|
||||
|
||||
#define IOE_READ 0x00
|
||||
#define IOE_READ_IDX 0x00
|
||||
#define IOE_WRITE 0x81
|
||||
#define IOE_WRITE_IDX 0x01
|
||||
#define IOE_EREAD0 0x82 /* Enhanced read type 0 */
|
||||
#define IOE_EREAD0_IDX 0x02 /* Enhanced read type 0 */
|
||||
#define IOE_EWRITE0 0x83 /* Enhanced write type 0 */
|
||||
#define IOE_EWRITE0_IDX 0x03 /* Enhanced write type 0 */
|
||||
#define IOE_DIRECT0 0x84 /* Directive type 0 */
|
||||
#define IOE_DIRECT0_IDX 0x04 /* Directive type 0 */
|
||||
#define IOE_EREAD1 0x85 /* Enhanced read type 1 */
|
||||
#define IOE_EREAD1_IDX 0x05 /* Enhanced read type 1 */
|
||||
#define IOE_EWRITE1 0x86 /* Enhanced write type 1 */
|
||||
#define IOE_EWRITE1_IDX 0x06 /* Enhanced write type 1 */
|
||||
#define IOE_DIRECT1 0x87 /* Directive type 1 */
|
||||
#define IOE_DIRECT1_IDX 0x07 /* Directive type 1 */
|
||||
#define IOE_RAC 0x8c /* Read with Atomic clear */
|
||||
#define IOE_RAC_IDX 0x0c /* Read with Atomic clear */
|
||||
#define IOE_RAS 0x8d /* Read with Atomic set */
|
||||
#define IOE_RAS_IDX 0x0d /* Read with Atomic set */
|
||||
#define IOE_RAD 0x8e /* Read with Atomic decrement */
|
||||
#define IOE_RAD_IDX 0x0e /* Read with Atomic decrement */
|
||||
#define IOE_RAI 0x8f /* Read with Atomic increment */
|
||||
#define IOE_RAI_IDX 0x0f /* Read with Atomic increment */
|
||||
|
||||
#define EOE_READ 0x00
|
||||
#define EOE_WRITE 0x01
|
||||
#define EOE_RAC 0x0c /* Read with Atomic clear */
|
||||
#define EOE_RAS 0x0d /* Read with Atomic set */
|
||||
#define EOE_RAD 0x0e /* Read with Atomic decrement */
|
||||
#define EOE_RAI 0x0f /* Read with Atomic increment */
|
||||
#define EOE_LDEC 0x10 /* Load external cache */
|
||||
#define EOE_LDECL 0x11 /* Load external cache with stash lock */
|
||||
#define EOE_LDECPE 0x12 /* Load external cache with preferred exclusive */
|
||||
#define EOE_LDECPEL 0x13 /* Load external cache with preferred exclusive and lock */
|
||||
#define EOE_LDECFE 0x14 /* Load external cache with forced exclusive */
|
||||
#define EOE_LDECFEL 0x15 /* Load external cache with forced exclusive and lock */
|
||||
#define EOE_RSA 0x16 /* Read with stash allocate */
|
||||
#define EOE_RSAU 0x17 /* Read with stash allocate and unlock */
|
||||
#define EOE_READI 0x18 /* Read with invalidate */
|
||||
#define EOE_RWNITC 0x19 /* Read with no intention to cache */
|
||||
#define EOE_WCI 0x1a /* Write cache inhibited */
|
||||
#define EOE_WWSA 0x1b /* Write with stash allocate */
|
||||
#define EOE_WWSAL 0x1c /* Write with stash allocate and lock */
|
||||
#define EOE_WWSAO 0x1d /* Write with stash allocate only */
|
||||
#define EOE_WWSAOL 0x1e /* Write with stash allocate only and lock */
|
||||
#define EOE_VALID 0x80
|
||||
|
||||
/* Function prototypes */
|
||||
int pamu_domain_init(void);
|
||||
int pamu_enable_liodn(int liodn);
|
||||
int pamu_disable_liodn(int liodn);
|
||||
void pamu_free_subwins(int liodn);
|
||||
int pamu_config_ppaace(int liodn, phys_addr_t win_addr, phys_addr_t win_size,
|
||||
u32 omi, unsigned long rpn, u32 snoopid, uint32_t stashid,
|
||||
u32 subwin_cnt, int prot);
|
||||
int pamu_config_spaace(int liodn, u32 subwin_cnt, u32 subwin_addr,
|
||||
phys_addr_t subwin_size, u32 omi, unsigned long rpn,
|
||||
uint32_t snoopid, u32 stashid, int enable, int prot);
|
||||
|
||||
u32 get_stash_id(u32 stash_dest_hint, u32 vcpu);
|
||||
void get_ome_index(u32 *omi_index, struct device *dev);
|
||||
int pamu_update_paace_stash(int liodn, u32 subwin, u32 value);
|
||||
int pamu_disable_spaace(int liodn, u32 subwin);
|
||||
u32 pamu_get_max_subwin_cnt(void);
|
||||
|
||||
#endif /* __FSL_PAMU_H */
|
1110
drivers/iommu/fsl_pamu_domain.c
Normal file
1110
drivers/iommu/fsl_pamu_domain.c
Normal file
File diff suppressed because it is too large
Load diff
85
drivers/iommu/fsl_pamu_domain.h
Normal file
85
drivers/iommu/fsl_pamu_domain.h
Normal file
|
@ -0,0 +1,85 @@
|
|||
/*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License, version 2, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*
|
||||
* Copyright (C) 2013 Freescale Semiconductor, Inc.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef __FSL_PAMU_DOMAIN_H
|
||||
#define __FSL_PAMU_DOMAIN_H
|
||||
|
||||
#include "fsl_pamu.h"
|
||||
|
||||
struct dma_window {
|
||||
phys_addr_t paddr;
|
||||
u64 size;
|
||||
int valid;
|
||||
int prot;
|
||||
};
|
||||
|
||||
struct fsl_dma_domain {
|
||||
/*
|
||||
* Indicates the geometry size for the domain.
|
||||
* This would be set when the geometry is
|
||||
* configured for the domain.
|
||||
*/
|
||||
dma_addr_t geom_size;
|
||||
/*
|
||||
* Number of windows assocaited with this domain.
|
||||
* During domain initialization, it is set to the
|
||||
* the maximum number of subwindows allowed for a LIODN.
|
||||
* Minimum value for this is 1 indicating a single PAMU
|
||||
* window, without any sub windows. Value can be set/
|
||||
* queried by set_attr/get_attr API for DOMAIN_ATTR_WINDOWS.
|
||||
* Value can only be set once the geometry has been configured.
|
||||
*/
|
||||
u32 win_cnt;
|
||||
/*
|
||||
* win_arr contains information of the configured
|
||||
* windows for a domain. This is allocated only
|
||||
* when the number of windows for the domain are
|
||||
* set.
|
||||
*/
|
||||
struct dma_window *win_arr;
|
||||
/* list of devices associated with the domain */
|
||||
struct list_head devices;
|
||||
/* dma_domain states:
|
||||
* mapped - A particular mapping has been created
|
||||
* within the configured geometry.
|
||||
* enabled - DMA has been enabled for the given
|
||||
* domain. This translates to setting of the
|
||||
* valid bit for the primary PAACE in the PAMU
|
||||
* PAACT table. Domain geometry should be set and
|
||||
* it must have a valid mapping before DMA can be
|
||||
* enabled for it.
|
||||
*
|
||||
*/
|
||||
int mapped;
|
||||
int enabled;
|
||||
/* stash_id obtained from the stash attribute details */
|
||||
u32 stash_id;
|
||||
struct pamu_stash_attribute dma_stash;
|
||||
u32 snoop_id;
|
||||
struct iommu_domain *iommu_domain;
|
||||
spinlock_t domain_lock;
|
||||
};
|
||||
|
||||
/* domain-device relationship */
|
||||
struct device_domain_info {
|
||||
struct list_head link; /* link to domain siblings */
|
||||
struct device *dev;
|
||||
u32 liodn;
|
||||
struct fsl_dma_domain *domain; /* pointer to domain */
|
||||
};
|
||||
#endif /* __FSL_PAMU_DOMAIN_H */
|
4598
drivers/iommu/intel-iommu.c
Normal file
4598
drivers/iommu/intel-iommu.c
Normal file
File diff suppressed because it is too large
Load diff
1173
drivers/iommu/intel_irq_remapping.c
Normal file
1173
drivers/iommu/intel_irq_remapping.c
Normal file
File diff suppressed because it is too large
Load diff
134
drivers/iommu/iommu-sysfs.c
Normal file
134
drivers/iommu/iommu-sysfs.c
Normal file
|
@ -0,0 +1,134 @@
|
|||
/*
|
||||
* IOMMU sysfs class support
|
||||
*
|
||||
* Copyright (C) 2014 Red Hat, Inc. All rights reserved.
|
||||
* Author: Alex Williamson <alex.williamson@redhat.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#include <linux/device.h>
|
||||
#include <linux/iommu.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
/*
|
||||
* We provide a common class "devices" group which initially has no attributes.
|
||||
* As devices are added to the IOMMU, we'll add links to the group.
|
||||
*/
|
||||
static struct attribute *devices_attr[] = {
|
||||
NULL,
|
||||
};
|
||||
|
||||
static const struct attribute_group iommu_devices_attr_group = {
|
||||
.name = "devices",
|
||||
.attrs = devices_attr,
|
||||
};
|
||||
|
||||
static const struct attribute_group *iommu_dev_groups[] = {
|
||||
&iommu_devices_attr_group,
|
||||
NULL,
|
||||
};
|
||||
|
||||
static void iommu_release_device(struct device *dev)
|
||||
{
|
||||
kfree(dev);
|
||||
}
|
||||
|
||||
static struct class iommu_class = {
|
||||
.name = "iommu",
|
||||
.dev_release = iommu_release_device,
|
||||
.dev_groups = iommu_dev_groups,
|
||||
};
|
||||
|
||||
static int __init iommu_dev_init(void)
|
||||
{
|
||||
return class_register(&iommu_class);
|
||||
}
|
||||
postcore_initcall(iommu_dev_init);
|
||||
|
||||
/*
|
||||
* Create an IOMMU device and return a pointer to it. IOMMU specific
|
||||
* attributes can be provided as an attribute group, allowing a unique
|
||||
* namespace per IOMMU type.
|
||||
*/
|
||||
struct device *iommu_device_create(struct device *parent, void *drvdata,
|
||||
const struct attribute_group **groups,
|
||||
const char *fmt, ...)
|
||||
{
|
||||
struct device *dev;
|
||||
va_list vargs;
|
||||
int ret;
|
||||
|
||||
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
|
||||
if (!dev)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
device_initialize(dev);
|
||||
|
||||
dev->class = &iommu_class;
|
||||
dev->parent = parent;
|
||||
dev->groups = groups;
|
||||
dev_set_drvdata(dev, drvdata);
|
||||
|
||||
va_start(vargs, fmt);
|
||||
ret = kobject_set_name_vargs(&dev->kobj, fmt, vargs);
|
||||
va_end(vargs);
|
||||
if (ret)
|
||||
goto error;
|
||||
|
||||
ret = device_add(dev);
|
||||
if (ret)
|
||||
goto error;
|
||||
|
||||
return dev;
|
||||
|
||||
error:
|
||||
put_device(dev);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
void iommu_device_destroy(struct device *dev)
|
||||
{
|
||||
if (!dev || IS_ERR(dev))
|
||||
return;
|
||||
|
||||
device_unregister(dev);
|
||||
}
|
||||
|
||||
/*
|
||||
* IOMMU drivers can indicate a device is managed by a given IOMMU using
|
||||
* this interface. A link to the device will be created in the "devices"
|
||||
* directory of the IOMMU device in sysfs and an "iommu" link will be
|
||||
* created under the linked device, pointing back at the IOMMU device.
|
||||
*/
|
||||
int iommu_device_link(struct device *dev, struct device *link)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (!dev || IS_ERR(dev))
|
||||
return -ENODEV;
|
||||
|
||||
ret = sysfs_add_link_to_group(&dev->kobj, "devices",
|
||||
&link->kobj, dev_name(link));
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = sysfs_create_link_nowarn(&link->kobj, &dev->kobj, "iommu");
|
||||
if (ret)
|
||||
sysfs_remove_link_from_group(&dev->kobj, "devices",
|
||||
dev_name(link));
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
void iommu_device_unlink(struct device *dev, struct device *link)
|
||||
{
|
||||
if (!dev || IS_ERR(dev))
|
||||
return;
|
||||
|
||||
sysfs_remove_link(&link->kobj, "iommu");
|
||||
sysfs_remove_link_from_group(&dev->kobj, "devices", dev_name(link));
|
||||
}
|
27
drivers/iommu/iommu-traces.c
Normal file
27
drivers/iommu/iommu-traces.c
Normal file
|
@ -0,0 +1,27 @@
|
|||
/*
|
||||
* iommu trace points
|
||||
*
|
||||
* Copyright (C) 2013 Shuah Khan <shuah.kh@samsung.com>
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/string.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
#define CREATE_TRACE_POINTS
|
||||
#include <trace/events/iommu.h>
|
||||
|
||||
/* iommu_group_event */
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(add_device_to_group);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(remove_device_from_group);
|
||||
|
||||
/* iommu_device_event */
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(attach_device_to_domain);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(detach_device_from_domain);
|
||||
|
||||
/* iommu_map_unmap */
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(map);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(unmap);
|
||||
|
||||
/* iommu_error */
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(io_page_fault);
|
1224
drivers/iommu/iommu.c
Normal file
1224
drivers/iommu/iommu.c
Normal file
File diff suppressed because it is too large
Load diff
487
drivers/iommu/iova.c
Normal file
487
drivers/iommu/iova.c
Normal file
|
@ -0,0 +1,487 @@
|
|||
/*
|
||||
* Copyright © 2006-2009, Intel Corporation.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms and conditions of the GNU General Public License,
|
||||
* version 2, as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License along with
|
||||
* this program; if not, write to the Free Software Foundation, Inc., 59 Temple
|
||||
* Place - Suite 330, Boston, MA 02111-1307 USA.
|
||||
*
|
||||
* Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
|
||||
*/
|
||||
|
||||
#include <linux/iova.h>
|
||||
|
||||
void
|
||||
init_iova_domain(struct iova_domain *iovad, unsigned long pfn_32bit)
|
||||
{
|
||||
spin_lock_init(&iovad->iova_rbtree_lock);
|
||||
iovad->rbroot = RB_ROOT;
|
||||
iovad->cached32_node = NULL;
|
||||
iovad->dma_32bit_pfn = pfn_32bit;
|
||||
}
|
||||
|
||||
static struct rb_node *
|
||||
__get_cached_rbnode(struct iova_domain *iovad, unsigned long *limit_pfn)
|
||||
{
|
||||
if ((*limit_pfn != iovad->dma_32bit_pfn) ||
|
||||
(iovad->cached32_node == NULL))
|
||||
return rb_last(&iovad->rbroot);
|
||||
else {
|
||||
struct rb_node *prev_node = rb_prev(iovad->cached32_node);
|
||||
struct iova *curr_iova =
|
||||
container_of(iovad->cached32_node, struct iova, node);
|
||||
*limit_pfn = curr_iova->pfn_lo - 1;
|
||||
return prev_node;
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
__cached_rbnode_insert_update(struct iova_domain *iovad,
|
||||
unsigned long limit_pfn, struct iova *new)
|
||||
{
|
||||
if (limit_pfn != iovad->dma_32bit_pfn)
|
||||
return;
|
||||
iovad->cached32_node = &new->node;
|
||||
}
|
||||
|
||||
static void
|
||||
__cached_rbnode_delete_update(struct iova_domain *iovad, struct iova *free)
|
||||
{
|
||||
struct iova *cached_iova;
|
||||
struct rb_node *curr;
|
||||
|
||||
if (!iovad->cached32_node)
|
||||
return;
|
||||
curr = iovad->cached32_node;
|
||||
cached_iova = container_of(curr, struct iova, node);
|
||||
|
||||
if (free->pfn_lo >= cached_iova->pfn_lo) {
|
||||
struct rb_node *node = rb_next(&free->node);
|
||||
struct iova *iova = container_of(node, struct iova, node);
|
||||
|
||||
/* only cache if it's below 32bit pfn */
|
||||
if (node && iova->pfn_lo < iovad->dma_32bit_pfn)
|
||||
iovad->cached32_node = node;
|
||||
else
|
||||
iovad->cached32_node = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
/* Computes the padding size required, to make the
|
||||
* the start address naturally aligned on its size
|
||||
*/
|
||||
static int
|
||||
iova_get_pad_size(int size, unsigned int limit_pfn)
|
||||
{
|
||||
unsigned int pad_size = 0;
|
||||
unsigned int order = ilog2(size);
|
||||
|
||||
if (order)
|
||||
pad_size = (limit_pfn + 1) % (1 << order);
|
||||
|
||||
return pad_size;
|
||||
}
|
||||
|
||||
static int __alloc_and_insert_iova_range(struct iova_domain *iovad,
|
||||
unsigned long size, unsigned long limit_pfn,
|
||||
struct iova *new, bool size_aligned)
|
||||
{
|
||||
struct rb_node *prev, *curr = NULL;
|
||||
unsigned long flags;
|
||||
unsigned long saved_pfn;
|
||||
unsigned int pad_size = 0;
|
||||
|
||||
/* Walk the tree backwards */
|
||||
spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
|
||||
saved_pfn = limit_pfn;
|
||||
curr = __get_cached_rbnode(iovad, &limit_pfn);
|
||||
prev = curr;
|
||||
while (curr) {
|
||||
struct iova *curr_iova = container_of(curr, struct iova, node);
|
||||
|
||||
if (limit_pfn < curr_iova->pfn_lo)
|
||||
goto move_left;
|
||||
else if (limit_pfn < curr_iova->pfn_hi)
|
||||
goto adjust_limit_pfn;
|
||||
else {
|
||||
if (size_aligned)
|
||||
pad_size = iova_get_pad_size(size, limit_pfn);
|
||||
if ((curr_iova->pfn_hi + size + pad_size) <= limit_pfn)
|
||||
break; /* found a free slot */
|
||||
}
|
||||
adjust_limit_pfn:
|
||||
limit_pfn = curr_iova->pfn_lo - 1;
|
||||
move_left:
|
||||
prev = curr;
|
||||
curr = rb_prev(curr);
|
||||
}
|
||||
|
||||
if (!curr) {
|
||||
if (size_aligned)
|
||||
pad_size = iova_get_pad_size(size, limit_pfn);
|
||||
if ((IOVA_START_PFN + size + pad_size) > limit_pfn) {
|
||||
spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
|
||||
return -ENOMEM;
|
||||
}
|
||||
}
|
||||
|
||||
/* pfn_lo will point to size aligned address if size_aligned is set */
|
||||
new->pfn_lo = limit_pfn - (size + pad_size) + 1;
|
||||
new->pfn_hi = new->pfn_lo + size - 1;
|
||||
|
||||
/* Insert the new_iova into domain rbtree by holding writer lock */
|
||||
/* Add new node and rebalance tree. */
|
||||
{
|
||||
struct rb_node **entry, *parent = NULL;
|
||||
|
||||
/* If we have 'prev', it's a valid place to start the
|
||||
insertion. Otherwise, start from the root. */
|
||||
if (prev)
|
||||
entry = &prev;
|
||||
else
|
||||
entry = &iovad->rbroot.rb_node;
|
||||
|
||||
/* Figure out where to put new node */
|
||||
while (*entry) {
|
||||
struct iova *this = container_of(*entry,
|
||||
struct iova, node);
|
||||
parent = *entry;
|
||||
|
||||
if (new->pfn_lo < this->pfn_lo)
|
||||
entry = &((*entry)->rb_left);
|
||||
else if (new->pfn_lo > this->pfn_lo)
|
||||
entry = &((*entry)->rb_right);
|
||||
else
|
||||
BUG(); /* this should not happen */
|
||||
}
|
||||
|
||||
/* Add new node and rebalance tree. */
|
||||
rb_link_node(&new->node, parent, entry);
|
||||
rb_insert_color(&new->node, &iovad->rbroot);
|
||||
}
|
||||
__cached_rbnode_insert_update(iovad, saved_pfn, new);
|
||||
|
||||
spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
|
||||
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
iova_insert_rbtree(struct rb_root *root, struct iova *iova)
|
||||
{
|
||||
struct rb_node **new = &(root->rb_node), *parent = NULL;
|
||||
/* Figure out where to put new node */
|
||||
while (*new) {
|
||||
struct iova *this = container_of(*new, struct iova, node);
|
||||
parent = *new;
|
||||
|
||||
if (iova->pfn_lo < this->pfn_lo)
|
||||
new = &((*new)->rb_left);
|
||||
else if (iova->pfn_lo > this->pfn_lo)
|
||||
new = &((*new)->rb_right);
|
||||
else
|
||||
BUG(); /* this should not happen */
|
||||
}
|
||||
/* Add new node and rebalance tree. */
|
||||
rb_link_node(&iova->node, parent, new);
|
||||
rb_insert_color(&iova->node, root);
|
||||
}
|
||||
|
||||
/**
|
||||
* alloc_iova - allocates an iova
|
||||
* @iovad: - iova domain in question
|
||||
* @size: - size of page frames to allocate
|
||||
* @limit_pfn: - max limit address
|
||||
* @size_aligned: - set if size_aligned address range is required
|
||||
* This function allocates an iova in the range limit_pfn to IOVA_START_PFN
|
||||
* looking from limit_pfn instead from IOVA_START_PFN. If the size_aligned
|
||||
* flag is set then the allocated address iova->pfn_lo will be naturally
|
||||
* aligned on roundup_power_of_two(size).
|
||||
*/
|
||||
struct iova *
|
||||
alloc_iova(struct iova_domain *iovad, unsigned long size,
|
||||
unsigned long limit_pfn,
|
||||
bool size_aligned)
|
||||
{
|
||||
struct iova *new_iova;
|
||||
int ret;
|
||||
|
||||
new_iova = alloc_iova_mem();
|
||||
if (!new_iova)
|
||||
return NULL;
|
||||
|
||||
/* If size aligned is set then round the size to
|
||||
* to next power of two.
|
||||
*/
|
||||
if (size_aligned)
|
||||
size = __roundup_pow_of_two(size);
|
||||
|
||||
ret = __alloc_and_insert_iova_range(iovad, size, limit_pfn,
|
||||
new_iova, size_aligned);
|
||||
|
||||
if (ret) {
|
||||
free_iova_mem(new_iova);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return new_iova;
|
||||
}
|
||||
|
||||
/**
|
||||
* find_iova - find's an iova for a given pfn
|
||||
* @iovad: - iova domain in question.
|
||||
* @pfn: - page frame number
|
||||
* This function finds and returns an iova belonging to the
|
||||
* given doamin which matches the given pfn.
|
||||
*/
|
||||
struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn)
|
||||
{
|
||||
unsigned long flags;
|
||||
struct rb_node *node;
|
||||
|
||||
/* Take the lock so that no other thread is manipulating the rbtree */
|
||||
spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
|
||||
node = iovad->rbroot.rb_node;
|
||||
while (node) {
|
||||
struct iova *iova = container_of(node, struct iova, node);
|
||||
|
||||
/* If pfn falls within iova's range, return iova */
|
||||
if ((pfn >= iova->pfn_lo) && (pfn <= iova->pfn_hi)) {
|
||||
spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
|
||||
/* We are not holding the lock while this iova
|
||||
* is referenced by the caller as the same thread
|
||||
* which called this function also calls __free_iova()
|
||||
* and it is by design that only one thread can possibly
|
||||
* reference a particular iova and hence no conflict.
|
||||
*/
|
||||
return iova;
|
||||
}
|
||||
|
||||
if (pfn < iova->pfn_lo)
|
||||
node = node->rb_left;
|
||||
else if (pfn > iova->pfn_lo)
|
||||
node = node->rb_right;
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
* __free_iova - frees the given iova
|
||||
* @iovad: iova domain in question.
|
||||
* @iova: iova in question.
|
||||
* Frees the given iova belonging to the giving domain
|
||||
*/
|
||||
void
|
||||
__free_iova(struct iova_domain *iovad, struct iova *iova)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
|
||||
__cached_rbnode_delete_update(iovad, iova);
|
||||
rb_erase(&iova->node, &iovad->rbroot);
|
||||
spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
|
||||
free_iova_mem(iova);
|
||||
}
|
||||
|
||||
/**
|
||||
* free_iova - finds and frees the iova for a given pfn
|
||||
* @iovad: - iova domain in question.
|
||||
* @pfn: - pfn that is allocated previously
|
||||
* This functions finds an iova for a given pfn and then
|
||||
* frees the iova from that domain.
|
||||
*/
|
||||
void
|
||||
free_iova(struct iova_domain *iovad, unsigned long pfn)
|
||||
{
|
||||
struct iova *iova = find_iova(iovad, pfn);
|
||||
if (iova)
|
||||
__free_iova(iovad, iova);
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* put_iova_domain - destroys the iova doamin
|
||||
* @iovad: - iova domain in question.
|
||||
* All the iova's in that domain are destroyed.
|
||||
*/
|
||||
void put_iova_domain(struct iova_domain *iovad)
|
||||
{
|
||||
struct rb_node *node;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
|
||||
node = rb_first(&iovad->rbroot);
|
||||
while (node) {
|
||||
struct iova *iova = container_of(node, struct iova, node);
|
||||
rb_erase(node, &iovad->rbroot);
|
||||
free_iova_mem(iova);
|
||||
node = rb_first(&iovad->rbroot);
|
||||
}
|
||||
spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
|
||||
}
|
||||
|
||||
static int
|
||||
__is_range_overlap(struct rb_node *node,
|
||||
unsigned long pfn_lo, unsigned long pfn_hi)
|
||||
{
|
||||
struct iova *iova = container_of(node, struct iova, node);
|
||||
|
||||
if ((pfn_lo <= iova->pfn_hi) && (pfn_hi >= iova->pfn_lo))
|
||||
return 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline struct iova *
|
||||
alloc_and_init_iova(unsigned long pfn_lo, unsigned long pfn_hi)
|
||||
{
|
||||
struct iova *iova;
|
||||
|
||||
iova = alloc_iova_mem();
|
||||
if (iova) {
|
||||
iova->pfn_lo = pfn_lo;
|
||||
iova->pfn_hi = pfn_hi;
|
||||
}
|
||||
|
||||
return iova;
|
||||
}
|
||||
|
||||
static struct iova *
|
||||
__insert_new_range(struct iova_domain *iovad,
|
||||
unsigned long pfn_lo, unsigned long pfn_hi)
|
||||
{
|
||||
struct iova *iova;
|
||||
|
||||
iova = alloc_and_init_iova(pfn_lo, pfn_hi);
|
||||
if (iova)
|
||||
iova_insert_rbtree(&iovad->rbroot, iova);
|
||||
|
||||
return iova;
|
||||
}
|
||||
|
||||
static void
|
||||
__adjust_overlap_range(struct iova *iova,
|
||||
unsigned long *pfn_lo, unsigned long *pfn_hi)
|
||||
{
|
||||
if (*pfn_lo < iova->pfn_lo)
|
||||
iova->pfn_lo = *pfn_lo;
|
||||
if (*pfn_hi > iova->pfn_hi)
|
||||
*pfn_lo = iova->pfn_hi + 1;
|
||||
}
|
||||
|
||||
/**
|
||||
* reserve_iova - reserves an iova in the given range
|
||||
* @iovad: - iova domain pointer
|
||||
* @pfn_lo: - lower page frame address
|
||||
* @pfn_hi:- higher pfn adderss
|
||||
* This function allocates reserves the address range from pfn_lo to pfn_hi so
|
||||
* that this address is not dished out as part of alloc_iova.
|
||||
*/
|
||||
struct iova *
|
||||
reserve_iova(struct iova_domain *iovad,
|
||||
unsigned long pfn_lo, unsigned long pfn_hi)
|
||||
{
|
||||
struct rb_node *node;
|
||||
unsigned long flags;
|
||||
struct iova *iova;
|
||||
unsigned int overlap = 0;
|
||||
|
||||
spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
|
||||
for (node = rb_first(&iovad->rbroot); node; node = rb_next(node)) {
|
||||
if (__is_range_overlap(node, pfn_lo, pfn_hi)) {
|
||||
iova = container_of(node, struct iova, node);
|
||||
__adjust_overlap_range(iova, &pfn_lo, &pfn_hi);
|
||||
if ((pfn_lo >= iova->pfn_lo) &&
|
||||
(pfn_hi <= iova->pfn_hi))
|
||||
goto finish;
|
||||
overlap = 1;
|
||||
|
||||
} else if (overlap)
|
||||
break;
|
||||
}
|
||||
|
||||
/* We are here either because this is the first reserver node
|
||||
* or need to insert remaining non overlap addr range
|
||||
*/
|
||||
iova = __insert_new_range(iovad, pfn_lo, pfn_hi);
|
||||
finish:
|
||||
|
||||
spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
|
||||
return iova;
|
||||
}
|
||||
|
||||
/**
|
||||
* copy_reserved_iova - copies the reserved between domains
|
||||
* @from: - source doamin from where to copy
|
||||
* @to: - destination domin where to copy
|
||||
* This function copies reserved iova's from one doamin to
|
||||
* other.
|
||||
*/
|
||||
void
|
||||
copy_reserved_iova(struct iova_domain *from, struct iova_domain *to)
|
||||
{
|
||||
unsigned long flags;
|
||||
struct rb_node *node;
|
||||
|
||||
spin_lock_irqsave(&from->iova_rbtree_lock, flags);
|
||||
for (node = rb_first(&from->rbroot); node; node = rb_next(node)) {
|
||||
struct iova *iova = container_of(node, struct iova, node);
|
||||
struct iova *new_iova;
|
||||
new_iova = reserve_iova(to, iova->pfn_lo, iova->pfn_hi);
|
||||
if (!new_iova)
|
||||
printk(KERN_ERR "Reserve iova range %lx@%lx failed\n",
|
||||
iova->pfn_lo, iova->pfn_lo);
|
||||
}
|
||||
spin_unlock_irqrestore(&from->iova_rbtree_lock, flags);
|
||||
}
|
||||
|
||||
struct iova *
|
||||
split_and_remove_iova(struct iova_domain *iovad, struct iova *iova,
|
||||
unsigned long pfn_lo, unsigned long pfn_hi)
|
||||
{
|
||||
unsigned long flags;
|
||||
struct iova *prev = NULL, *next = NULL;
|
||||
|
||||
spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
|
||||
if (iova->pfn_lo < pfn_lo) {
|
||||
prev = alloc_and_init_iova(iova->pfn_lo, pfn_lo - 1);
|
||||
if (prev == NULL)
|
||||
goto error;
|
||||
}
|
||||
if (iova->pfn_hi > pfn_hi) {
|
||||
next = alloc_and_init_iova(pfn_hi + 1, iova->pfn_hi);
|
||||
if (next == NULL)
|
||||
goto error;
|
||||
}
|
||||
|
||||
__cached_rbnode_delete_update(iovad, iova);
|
||||
rb_erase(&iova->node, &iovad->rbroot);
|
||||
|
||||
if (prev) {
|
||||
iova_insert_rbtree(&iovad->rbroot, prev);
|
||||
iova->pfn_lo = pfn_lo;
|
||||
}
|
||||
if (next) {
|
||||
iova_insert_rbtree(&iovad->rbroot, next);
|
||||
iova->pfn_hi = pfn_hi;
|
||||
}
|
||||
spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
|
||||
|
||||
return iova;
|
||||
|
||||
error:
|
||||
spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
|
||||
if (prev)
|
||||
free_iova_mem(prev);
|
||||
return NULL;
|
||||
}
|
1255
drivers/iommu/ipmmu-vmsa.c
Normal file
1255
drivers/iommu/ipmmu-vmsa.c
Normal file
File diff suppressed because it is too large
Load diff
398
drivers/iommu/irq_remapping.c
Normal file
398
drivers/iommu/irq_remapping.c
Normal file
|
@ -0,0 +1,398 @@
|
|||
#include <linux/seq_file.h>
|
||||
#include <linux/cpumask.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/msi.h>
|
||||
#include <linux/irq.h>
|
||||
#include <linux/pci.h>
|
||||
|
||||
#include <asm/hw_irq.h>
|
||||
#include <asm/irq_remapping.h>
|
||||
#include <asm/processor.h>
|
||||
#include <asm/x86_init.h>
|
||||
#include <asm/apic.h>
|
||||
#include <asm/hpet.h>
|
||||
|
||||
#include "irq_remapping.h"
|
||||
|
||||
int irq_remapping_enabled;
|
||||
|
||||
int disable_irq_remap;
|
||||
int irq_remap_broken;
|
||||
int disable_sourceid_checking;
|
||||
int no_x2apic_optout;
|
||||
|
||||
static struct irq_remap_ops *remap_ops;
|
||||
|
||||
static int msi_alloc_remapped_irq(struct pci_dev *pdev, int irq, int nvec);
|
||||
static int msi_setup_remapped_irq(struct pci_dev *pdev, unsigned int irq,
|
||||
int index, int sub_handle);
|
||||
static int set_remapped_irq_affinity(struct irq_data *data,
|
||||
const struct cpumask *mask,
|
||||
bool force);
|
||||
|
||||
static bool irq_remapped(struct irq_cfg *cfg)
|
||||
{
|
||||
return (cfg->remapped == 1);
|
||||
}
|
||||
|
||||
static void irq_remapping_disable_io_apic(void)
|
||||
{
|
||||
/*
|
||||
* With interrupt-remapping, for now we will use virtual wire A
|
||||
* mode, as virtual wire B is little complex (need to configure
|
||||
* both IOAPIC RTE as well as interrupt-remapping table entry).
|
||||
* As this gets called during crash dump, keep this simple for
|
||||
* now.
|
||||
*/
|
||||
if (cpu_has_apic || apic_from_smp_config())
|
||||
disconnect_bsp_APIC(0);
|
||||
}
|
||||
|
||||
static int do_setup_msi_irqs(struct pci_dev *dev, int nvec)
|
||||
{
|
||||
int ret, sub_handle, nvec_pow2, index = 0;
|
||||
unsigned int irq;
|
||||
struct msi_desc *msidesc;
|
||||
|
||||
WARN_ON(!list_is_singular(&dev->msi_list));
|
||||
msidesc = list_entry(dev->msi_list.next, struct msi_desc, list);
|
||||
WARN_ON(msidesc->irq);
|
||||
WARN_ON(msidesc->msi_attrib.multiple);
|
||||
WARN_ON(msidesc->nvec_used);
|
||||
|
||||
irq = irq_alloc_hwirqs(nvec, dev_to_node(&dev->dev));
|
||||
if (irq == 0)
|
||||
return -ENOSPC;
|
||||
|
||||
nvec_pow2 = __roundup_pow_of_two(nvec);
|
||||
msidesc->nvec_used = nvec;
|
||||
msidesc->msi_attrib.multiple = ilog2(nvec_pow2);
|
||||
for (sub_handle = 0; sub_handle < nvec; sub_handle++) {
|
||||
if (!sub_handle) {
|
||||
index = msi_alloc_remapped_irq(dev, irq, nvec_pow2);
|
||||
if (index < 0) {
|
||||
ret = index;
|
||||
goto error;
|
||||
}
|
||||
} else {
|
||||
ret = msi_setup_remapped_irq(dev, irq + sub_handle,
|
||||
index, sub_handle);
|
||||
if (ret < 0)
|
||||
goto error;
|
||||
}
|
||||
ret = setup_msi_irq(dev, msidesc, irq, sub_handle);
|
||||
if (ret < 0)
|
||||
goto error;
|
||||
}
|
||||
return 0;
|
||||
|
||||
error:
|
||||
irq_free_hwirqs(irq, nvec);
|
||||
|
||||
/*
|
||||
* Restore altered MSI descriptor fields and prevent just destroyed
|
||||
* IRQs from tearing down again in default_teardown_msi_irqs()
|
||||
*/
|
||||
msidesc->irq = 0;
|
||||
msidesc->nvec_used = 0;
|
||||
msidesc->msi_attrib.multiple = 0;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int do_setup_msix_irqs(struct pci_dev *dev, int nvec)
|
||||
{
|
||||
int node, ret, sub_handle, index = 0;
|
||||
struct msi_desc *msidesc;
|
||||
unsigned int irq;
|
||||
|
||||
node = dev_to_node(&dev->dev);
|
||||
sub_handle = 0;
|
||||
|
||||
list_for_each_entry(msidesc, &dev->msi_list, list) {
|
||||
|
||||
irq = irq_alloc_hwirq(node);
|
||||
if (irq == 0)
|
||||
return -1;
|
||||
|
||||
if (sub_handle == 0)
|
||||
ret = index = msi_alloc_remapped_irq(dev, irq, nvec);
|
||||
else
|
||||
ret = msi_setup_remapped_irq(dev, irq, index, sub_handle);
|
||||
|
||||
if (ret < 0)
|
||||
goto error;
|
||||
|
||||
ret = setup_msi_irq(dev, msidesc, irq, 0);
|
||||
if (ret < 0)
|
||||
goto error;
|
||||
|
||||
sub_handle += 1;
|
||||
irq += 1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
error:
|
||||
irq_free_hwirq(irq);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int irq_remapping_setup_msi_irqs(struct pci_dev *dev,
|
||||
int nvec, int type)
|
||||
{
|
||||
if (type == PCI_CAP_ID_MSI)
|
||||
return do_setup_msi_irqs(dev, nvec);
|
||||
else
|
||||
return do_setup_msix_irqs(dev, nvec);
|
||||
}
|
||||
|
||||
static void eoi_ioapic_pin_remapped(int apic, int pin, int vector)
|
||||
{
|
||||
/*
|
||||
* Intr-remapping uses pin number as the virtual vector
|
||||
* in the RTE. Actual vector is programmed in
|
||||
* intr-remapping table entry. Hence for the io-apic
|
||||
* EOI we use the pin number.
|
||||
*/
|
||||
io_apic_eoi(apic, pin);
|
||||
}
|
||||
|
||||
static void __init irq_remapping_modify_x86_ops(void)
|
||||
{
|
||||
x86_io_apic_ops.disable = irq_remapping_disable_io_apic;
|
||||
x86_io_apic_ops.set_affinity = set_remapped_irq_affinity;
|
||||
x86_io_apic_ops.setup_entry = setup_ioapic_remapped_entry;
|
||||
x86_io_apic_ops.eoi_ioapic_pin = eoi_ioapic_pin_remapped;
|
||||
x86_msi.setup_msi_irqs = irq_remapping_setup_msi_irqs;
|
||||
x86_msi.setup_hpet_msi = setup_hpet_msi_remapped;
|
||||
x86_msi.compose_msi_msg = compose_remapped_msi_msg;
|
||||
}
|
||||
|
||||
static __init int setup_nointremap(char *str)
|
||||
{
|
||||
disable_irq_remap = 1;
|
||||
return 0;
|
||||
}
|
||||
early_param("nointremap", setup_nointremap);
|
||||
|
||||
static __init int setup_irqremap(char *str)
|
||||
{
|
||||
if (!str)
|
||||
return -EINVAL;
|
||||
|
||||
while (*str) {
|
||||
if (!strncmp(str, "on", 2))
|
||||
disable_irq_remap = 0;
|
||||
else if (!strncmp(str, "off", 3))
|
||||
disable_irq_remap = 1;
|
||||
else if (!strncmp(str, "nosid", 5))
|
||||
disable_sourceid_checking = 1;
|
||||
else if (!strncmp(str, "no_x2apic_optout", 16))
|
||||
no_x2apic_optout = 1;
|
||||
|
||||
str += strcspn(str, ",");
|
||||
while (*str == ',')
|
||||
str++;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
early_param("intremap", setup_irqremap);
|
||||
|
||||
void __init setup_irq_remapping_ops(void)
|
||||
{
|
||||
remap_ops = &intel_irq_remap_ops;
|
||||
|
||||
#ifdef CONFIG_AMD_IOMMU
|
||||
if (amd_iommu_irq_ops.prepare() == 0)
|
||||
remap_ops = &amd_iommu_irq_ops;
|
||||
#endif
|
||||
}
|
||||
|
||||
void set_irq_remapping_broken(void)
|
||||
{
|
||||
irq_remap_broken = 1;
|
||||
}
|
||||
|
||||
int irq_remapping_supported(void)
|
||||
{
|
||||
if (disable_irq_remap)
|
||||
return 0;
|
||||
|
||||
if (!remap_ops || !remap_ops->supported)
|
||||
return 0;
|
||||
|
||||
return remap_ops->supported();
|
||||
}
|
||||
|
||||
int __init irq_remapping_prepare(void)
|
||||
{
|
||||
if (!remap_ops || !remap_ops->prepare)
|
||||
return -ENODEV;
|
||||
|
||||
return remap_ops->prepare();
|
||||
}
|
||||
|
||||
int __init irq_remapping_enable(void)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (!remap_ops || !remap_ops->enable)
|
||||
return -ENODEV;
|
||||
|
||||
ret = remap_ops->enable();
|
||||
|
||||
if (irq_remapping_enabled)
|
||||
irq_remapping_modify_x86_ops();
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
void irq_remapping_disable(void)
|
||||
{
|
||||
if (!irq_remapping_enabled ||
|
||||
!remap_ops ||
|
||||
!remap_ops->disable)
|
||||
return;
|
||||
|
||||
remap_ops->disable();
|
||||
}
|
||||
|
||||
int irq_remapping_reenable(int mode)
|
||||
{
|
||||
if (!irq_remapping_enabled ||
|
||||
!remap_ops ||
|
||||
!remap_ops->reenable)
|
||||
return 0;
|
||||
|
||||
return remap_ops->reenable(mode);
|
||||
}
|
||||
|
||||
int __init irq_remap_enable_fault_handling(void)
|
||||
{
|
||||
if (!irq_remapping_enabled)
|
||||
return 0;
|
||||
|
||||
if (!remap_ops || !remap_ops->enable_faulting)
|
||||
return -ENODEV;
|
||||
|
||||
return remap_ops->enable_faulting();
|
||||
}
|
||||
|
||||
int setup_ioapic_remapped_entry(int irq,
|
||||
struct IO_APIC_route_entry *entry,
|
||||
unsigned int destination, int vector,
|
||||
struct io_apic_irq_attr *attr)
|
||||
{
|
||||
if (!remap_ops || !remap_ops->setup_ioapic_entry)
|
||||
return -ENODEV;
|
||||
|
||||
return remap_ops->setup_ioapic_entry(irq, entry, destination,
|
||||
vector, attr);
|
||||
}
|
||||
|
||||
static int set_remapped_irq_affinity(struct irq_data *data,
|
||||
const struct cpumask *mask, bool force)
|
||||
{
|
||||
if (!config_enabled(CONFIG_SMP) || !remap_ops ||
|
||||
!remap_ops->set_affinity)
|
||||
return 0;
|
||||
|
||||
return remap_ops->set_affinity(data, mask, force);
|
||||
}
|
||||
|
||||
void free_remapped_irq(int irq)
|
||||
{
|
||||
struct irq_cfg *cfg = irq_get_chip_data(irq);
|
||||
|
||||
if (!remap_ops || !remap_ops->free_irq)
|
||||
return;
|
||||
|
||||
if (irq_remapped(cfg))
|
||||
remap_ops->free_irq(irq);
|
||||
}
|
||||
|
||||
void compose_remapped_msi_msg(struct pci_dev *pdev,
|
||||
unsigned int irq, unsigned int dest,
|
||||
struct msi_msg *msg, u8 hpet_id)
|
||||
{
|
||||
struct irq_cfg *cfg = irq_get_chip_data(irq);
|
||||
|
||||
if (!irq_remapped(cfg))
|
||||
native_compose_msi_msg(pdev, irq, dest, msg, hpet_id);
|
||||
else if (remap_ops && remap_ops->compose_msi_msg)
|
||||
remap_ops->compose_msi_msg(pdev, irq, dest, msg, hpet_id);
|
||||
}
|
||||
|
||||
static int msi_alloc_remapped_irq(struct pci_dev *pdev, int irq, int nvec)
|
||||
{
|
||||
if (!remap_ops || !remap_ops->msi_alloc_irq)
|
||||
return -ENODEV;
|
||||
|
||||
return remap_ops->msi_alloc_irq(pdev, irq, nvec);
|
||||
}
|
||||
|
||||
static int msi_setup_remapped_irq(struct pci_dev *pdev, unsigned int irq,
|
||||
int index, int sub_handle)
|
||||
{
|
||||
if (!remap_ops || !remap_ops->msi_setup_irq)
|
||||
return -ENODEV;
|
||||
|
||||
return remap_ops->msi_setup_irq(pdev, irq, index, sub_handle);
|
||||
}
|
||||
|
||||
int setup_hpet_msi_remapped(unsigned int irq, unsigned int id)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (!remap_ops || !remap_ops->alloc_hpet_msi)
|
||||
return -ENODEV;
|
||||
|
||||
ret = remap_ops->alloc_hpet_msi(irq, id);
|
||||
if (ret)
|
||||
return -EINVAL;
|
||||
|
||||
return default_setup_hpet_msi(irq, id);
|
||||
}
|
||||
|
||||
void panic_if_irq_remap(const char *msg)
|
||||
{
|
||||
if (irq_remapping_enabled)
|
||||
panic(msg);
|
||||
}
|
||||
|
||||
static void ir_ack_apic_edge(struct irq_data *data)
|
||||
{
|
||||
ack_APIC_irq();
|
||||
}
|
||||
|
||||
static void ir_ack_apic_level(struct irq_data *data)
|
||||
{
|
||||
ack_APIC_irq();
|
||||
eoi_ioapic_irq(data->irq, data->chip_data);
|
||||
}
|
||||
|
||||
static void ir_print_prefix(struct irq_data *data, struct seq_file *p)
|
||||
{
|
||||
seq_printf(p, " IR-%s", data->chip->name);
|
||||
}
|
||||
|
||||
void irq_remap_modify_chip_defaults(struct irq_chip *chip)
|
||||
{
|
||||
chip->irq_print_chip = ir_print_prefix;
|
||||
chip->irq_ack = ir_ack_apic_edge;
|
||||
chip->irq_eoi = ir_ack_apic_level;
|
||||
chip->irq_set_affinity = x86_io_apic_ops.set_affinity;
|
||||
}
|
||||
|
||||
bool setup_remapped_irq(int irq, struct irq_cfg *cfg, struct irq_chip *chip)
|
||||
{
|
||||
if (!irq_remapped(cfg))
|
||||
return false;
|
||||
irq_set_status_flags(irq, IRQ_MOVE_PCNTXT);
|
||||
irq_remap_modify_chip_defaults(chip);
|
||||
return true;
|
||||
}
|
97
drivers/iommu/irq_remapping.h
Normal file
97
drivers/iommu/irq_remapping.h
Normal file
|
@ -0,0 +1,97 @@
|
|||
/*
|
||||
* Copyright (C) 2012 Advanced Micro Devices, Inc.
|
||||
* Author: Joerg Roedel <joerg.roedel@amd.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 as published
|
||||
* by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
||||
*
|
||||
* This header file contains stuff that is shared between different interrupt
|
||||
* remapping drivers but with no need to be visible outside of the IOMMU layer.
|
||||
*/
|
||||
|
||||
#ifndef __IRQ_REMAPPING_H
|
||||
#define __IRQ_REMAPPING_H
|
||||
|
||||
#ifdef CONFIG_IRQ_REMAP
|
||||
|
||||
struct IO_APIC_route_entry;
|
||||
struct io_apic_irq_attr;
|
||||
struct irq_data;
|
||||
struct cpumask;
|
||||
struct pci_dev;
|
||||
struct msi_msg;
|
||||
|
||||
extern int disable_irq_remap;
|
||||
extern int irq_remap_broken;
|
||||
extern int disable_sourceid_checking;
|
||||
extern int no_x2apic_optout;
|
||||
extern int irq_remapping_enabled;
|
||||
|
||||
struct irq_remap_ops {
|
||||
/* Check whether Interrupt Remapping is supported */
|
||||
int (*supported)(void);
|
||||
|
||||
/* Initializes hardware and makes it ready for remapping interrupts */
|
||||
int (*prepare)(void);
|
||||
|
||||
/* Enables the remapping hardware */
|
||||
int (*enable)(void);
|
||||
|
||||
/* Disables the remapping hardware */
|
||||
void (*disable)(void);
|
||||
|
||||
/* Reenables the remapping hardware */
|
||||
int (*reenable)(int);
|
||||
|
||||
/* Enable fault handling */
|
||||
int (*enable_faulting)(void);
|
||||
|
||||
/* IO-APIC setup routine */
|
||||
int (*setup_ioapic_entry)(int irq, struct IO_APIC_route_entry *,
|
||||
unsigned int, int,
|
||||
struct io_apic_irq_attr *);
|
||||
|
||||
/* Set the CPU affinity of a remapped interrupt */
|
||||
int (*set_affinity)(struct irq_data *data, const struct cpumask *mask,
|
||||
bool force);
|
||||
|
||||
/* Free an IRQ */
|
||||
int (*free_irq)(int);
|
||||
|
||||
/* Create MSI msg to use for interrupt remapping */
|
||||
void (*compose_msi_msg)(struct pci_dev *,
|
||||
unsigned int, unsigned int,
|
||||
struct msi_msg *, u8);
|
||||
|
||||
/* Allocate remapping resources for MSI */
|
||||
int (*msi_alloc_irq)(struct pci_dev *, int, int);
|
||||
|
||||
/* Setup the remapped MSI irq */
|
||||
int (*msi_setup_irq)(struct pci_dev *, unsigned int, int, int);
|
||||
|
||||
/* Setup interrupt remapping for an HPET MSI */
|
||||
int (*alloc_hpet_msi)(unsigned int, unsigned int);
|
||||
};
|
||||
|
||||
extern struct irq_remap_ops intel_irq_remap_ops;
|
||||
extern struct irq_remap_ops amd_iommu_irq_ops;
|
||||
|
||||
#else /* CONFIG_IRQ_REMAP */
|
||||
|
||||
#define irq_remapping_enabled 0
|
||||
#define disable_irq_remap 1
|
||||
#define irq_remap_broken 0
|
||||
|
||||
#endif /* CONFIG_IRQ_REMAP */
|
||||
|
||||
#endif /* __IRQ_REMAPPING_H */
|
736
drivers/iommu/msm_iommu.c
Normal file
736
drivers/iommu/msm_iommu.c
Normal file
|
@ -0,0 +1,736 @@
|
|||
/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 and
|
||||
* only version 2 as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
|
||||
* 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/iommu.h>
|
||||
#include <linux/clk.h>
|
||||
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/sizes.h>
|
||||
|
||||
#include "msm_iommu_hw-8xxx.h"
|
||||
#include "msm_iommu.h"
|
||||
|
||||
#define MRC(reg, processor, op1, crn, crm, op2) \
|
||||
__asm__ __volatile__ ( \
|
||||
" mrc " #processor "," #op1 ", %0," #crn "," #crm "," #op2 "\n" \
|
||||
: "=r" (reg))
|
||||
|
||||
#define RCP15_PRRR(reg) MRC(reg, p15, 0, c10, c2, 0)
|
||||
#define RCP15_NMRR(reg) MRC(reg, p15, 0, c10, c2, 1)
|
||||
|
||||
/* bitmap of the page sizes currently supported */
|
||||
#define MSM_IOMMU_PGSIZES (SZ_4K | SZ_64K | SZ_1M | SZ_16M)
|
||||
|
||||
static int msm_iommu_tex_class[4];
|
||||
|
||||
DEFINE_SPINLOCK(msm_iommu_lock);
|
||||
|
||||
struct msm_priv {
|
||||
unsigned long *pgtable;
|
||||
struct list_head list_attached;
|
||||
};
|
||||
|
||||
static int __enable_clocks(struct msm_iommu_drvdata *drvdata)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = clk_enable(drvdata->pclk);
|
||||
if (ret)
|
||||
goto fail;
|
||||
|
||||
if (drvdata->clk) {
|
||||
ret = clk_enable(drvdata->clk);
|
||||
if (ret)
|
||||
clk_disable(drvdata->pclk);
|
||||
}
|
||||
fail:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void __disable_clocks(struct msm_iommu_drvdata *drvdata)
|
||||
{
|
||||
if (drvdata->clk)
|
||||
clk_disable(drvdata->clk);
|
||||
clk_disable(drvdata->pclk);
|
||||
}
|
||||
|
||||
static int __flush_iotlb(struct iommu_domain *domain)
|
||||
{
|
||||
struct msm_priv *priv = domain->priv;
|
||||
struct msm_iommu_drvdata *iommu_drvdata;
|
||||
struct msm_iommu_ctx_drvdata *ctx_drvdata;
|
||||
int ret = 0;
|
||||
#ifndef CONFIG_IOMMU_PGTABLES_L2
|
||||
unsigned long *fl_table = priv->pgtable;
|
||||
int i;
|
||||
|
||||
if (!list_empty(&priv->list_attached)) {
|
||||
dmac_flush_range(fl_table, fl_table + SZ_16K);
|
||||
|
||||
for (i = 0; i < NUM_FL_PTE; i++)
|
||||
if ((fl_table[i] & 0x03) == FL_TYPE_TABLE) {
|
||||
void *sl_table = __va(fl_table[i] &
|
||||
FL_BASE_MASK);
|
||||
dmac_flush_range(sl_table, sl_table + SZ_4K);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
list_for_each_entry(ctx_drvdata, &priv->list_attached, attached_elm) {
|
||||
if (!ctx_drvdata->pdev || !ctx_drvdata->pdev->dev.parent)
|
||||
BUG();
|
||||
|
||||
iommu_drvdata = dev_get_drvdata(ctx_drvdata->pdev->dev.parent);
|
||||
BUG_ON(!iommu_drvdata);
|
||||
|
||||
ret = __enable_clocks(iommu_drvdata);
|
||||
if (ret)
|
||||
goto fail;
|
||||
|
||||
SET_CTX_TLBIALL(iommu_drvdata->base, ctx_drvdata->num, 0);
|
||||
__disable_clocks(iommu_drvdata);
|
||||
}
|
||||
fail:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void __reset_context(void __iomem *base, int ctx)
|
||||
{
|
||||
SET_BPRCOSH(base, ctx, 0);
|
||||
SET_BPRCISH(base, ctx, 0);
|
||||
SET_BPRCNSH(base, ctx, 0);
|
||||
SET_BPSHCFG(base, ctx, 0);
|
||||
SET_BPMTCFG(base, ctx, 0);
|
||||
SET_ACTLR(base, ctx, 0);
|
||||
SET_SCTLR(base, ctx, 0);
|
||||
SET_FSRRESTORE(base, ctx, 0);
|
||||
SET_TTBR0(base, ctx, 0);
|
||||
SET_TTBR1(base, ctx, 0);
|
||||
SET_TTBCR(base, ctx, 0);
|
||||
SET_BFBCR(base, ctx, 0);
|
||||
SET_PAR(base, ctx, 0);
|
||||
SET_FAR(base, ctx, 0);
|
||||
SET_CTX_TLBIALL(base, ctx, 0);
|
||||
SET_TLBFLPTER(base, ctx, 0);
|
||||
SET_TLBSLPTER(base, ctx, 0);
|
||||
SET_TLBLKCR(base, ctx, 0);
|
||||
SET_PRRR(base, ctx, 0);
|
||||
SET_NMRR(base, ctx, 0);
|
||||
}
|
||||
|
||||
static void __program_context(void __iomem *base, int ctx, phys_addr_t pgtable)
|
||||
{
|
||||
unsigned int prrr, nmrr;
|
||||
__reset_context(base, ctx);
|
||||
|
||||
/* Set up HTW mode */
|
||||
/* TLB miss configuration: perform HTW on miss */
|
||||
SET_TLBMCFG(base, ctx, 0x3);
|
||||
|
||||
/* V2P configuration: HTW for access */
|
||||
SET_V2PCFG(base, ctx, 0x3);
|
||||
|
||||
SET_TTBCR(base, ctx, 0);
|
||||
SET_TTBR0_PA(base, ctx, (pgtable >> 14));
|
||||
|
||||
/* Invalidate the TLB for this context */
|
||||
SET_CTX_TLBIALL(base, ctx, 0);
|
||||
|
||||
/* Set interrupt number to "secure" interrupt */
|
||||
SET_IRPTNDX(base, ctx, 0);
|
||||
|
||||
/* Enable context fault interrupt */
|
||||
SET_CFEIE(base, ctx, 1);
|
||||
|
||||
/* Stall access on a context fault and let the handler deal with it */
|
||||
SET_CFCFG(base, ctx, 1);
|
||||
|
||||
/* Redirect all cacheable requests to L2 slave port. */
|
||||
SET_RCISH(base, ctx, 1);
|
||||
SET_RCOSH(base, ctx, 1);
|
||||
SET_RCNSH(base, ctx, 1);
|
||||
|
||||
/* Turn on TEX Remap */
|
||||
SET_TRE(base, ctx, 1);
|
||||
|
||||
/* Set TEX remap attributes */
|
||||
RCP15_PRRR(prrr);
|
||||
RCP15_NMRR(nmrr);
|
||||
SET_PRRR(base, ctx, prrr);
|
||||
SET_NMRR(base, ctx, nmrr);
|
||||
|
||||
/* Turn on BFB prefetch */
|
||||
SET_BFBDFE(base, ctx, 1);
|
||||
|
||||
#ifdef CONFIG_IOMMU_PGTABLES_L2
|
||||
/* Configure page tables as inner-cacheable and shareable to reduce
|
||||
* the TLB miss penalty.
|
||||
*/
|
||||
SET_TTBR0_SH(base, ctx, 1);
|
||||
SET_TTBR1_SH(base, ctx, 1);
|
||||
|
||||
SET_TTBR0_NOS(base, ctx, 1);
|
||||
SET_TTBR1_NOS(base, ctx, 1);
|
||||
|
||||
SET_TTBR0_IRGNH(base, ctx, 0); /* WB, WA */
|
||||
SET_TTBR0_IRGNL(base, ctx, 1);
|
||||
|
||||
SET_TTBR1_IRGNH(base, ctx, 0); /* WB, WA */
|
||||
SET_TTBR1_IRGNL(base, ctx, 1);
|
||||
|
||||
SET_TTBR0_ORGN(base, ctx, 1); /* WB, WA */
|
||||
SET_TTBR1_ORGN(base, ctx, 1); /* WB, WA */
|
||||
#endif
|
||||
|
||||
/* Enable the MMU */
|
||||
SET_M(base, ctx, 1);
|
||||
}
|
||||
|
||||
static int msm_iommu_domain_init(struct iommu_domain *domain)
|
||||
{
|
||||
struct msm_priv *priv = kzalloc(sizeof(*priv), GFP_KERNEL);
|
||||
|
||||
if (!priv)
|
||||
goto fail_nomem;
|
||||
|
||||
INIT_LIST_HEAD(&priv->list_attached);
|
||||
priv->pgtable = (unsigned long *)__get_free_pages(GFP_KERNEL,
|
||||
get_order(SZ_16K));
|
||||
|
||||
if (!priv->pgtable)
|
||||
goto fail_nomem;
|
||||
|
||||
memset(priv->pgtable, 0, SZ_16K);
|
||||
domain->priv = priv;
|
||||
|
||||
domain->geometry.aperture_start = 0;
|
||||
domain->geometry.aperture_end = (1ULL << 32) - 1;
|
||||
domain->geometry.force_aperture = true;
|
||||
|
||||
return 0;
|
||||
|
||||
fail_nomem:
|
||||
kfree(priv);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
static void msm_iommu_domain_destroy(struct iommu_domain *domain)
|
||||
{
|
||||
struct msm_priv *priv;
|
||||
unsigned long flags;
|
||||
unsigned long *fl_table;
|
||||
int i;
|
||||
|
||||
spin_lock_irqsave(&msm_iommu_lock, flags);
|
||||
priv = domain->priv;
|
||||
domain->priv = NULL;
|
||||
|
||||
if (priv) {
|
||||
fl_table = priv->pgtable;
|
||||
|
||||
for (i = 0; i < NUM_FL_PTE; i++)
|
||||
if ((fl_table[i] & 0x03) == FL_TYPE_TABLE)
|
||||
free_page((unsigned long) __va(((fl_table[i]) &
|
||||
FL_BASE_MASK)));
|
||||
|
||||
free_pages((unsigned long)priv->pgtable, get_order(SZ_16K));
|
||||
priv->pgtable = NULL;
|
||||
}
|
||||
|
||||
kfree(priv);
|
||||
spin_unlock_irqrestore(&msm_iommu_lock, flags);
|
||||
}
|
||||
|
||||
static int msm_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
|
||||
{
|
||||
struct msm_priv *priv;
|
||||
struct msm_iommu_ctx_dev *ctx_dev;
|
||||
struct msm_iommu_drvdata *iommu_drvdata;
|
||||
struct msm_iommu_ctx_drvdata *ctx_drvdata;
|
||||
struct msm_iommu_ctx_drvdata *tmp_drvdata;
|
||||
int ret = 0;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&msm_iommu_lock, flags);
|
||||
|
||||
priv = domain->priv;
|
||||
|
||||
if (!priv || !dev) {
|
||||
ret = -EINVAL;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
iommu_drvdata = dev_get_drvdata(dev->parent);
|
||||
ctx_drvdata = dev_get_drvdata(dev);
|
||||
ctx_dev = dev->platform_data;
|
||||
|
||||
if (!iommu_drvdata || !ctx_drvdata || !ctx_dev) {
|
||||
ret = -EINVAL;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
if (!list_empty(&ctx_drvdata->attached_elm)) {
|
||||
ret = -EBUSY;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
list_for_each_entry(tmp_drvdata, &priv->list_attached, attached_elm)
|
||||
if (tmp_drvdata == ctx_drvdata) {
|
||||
ret = -EBUSY;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
ret = __enable_clocks(iommu_drvdata);
|
||||
if (ret)
|
||||
goto fail;
|
||||
|
||||
__program_context(iommu_drvdata->base, ctx_dev->num,
|
||||
__pa(priv->pgtable));
|
||||
|
||||
__disable_clocks(iommu_drvdata);
|
||||
list_add(&(ctx_drvdata->attached_elm), &priv->list_attached);
|
||||
ret = __flush_iotlb(domain);
|
||||
|
||||
fail:
|
||||
spin_unlock_irqrestore(&msm_iommu_lock, flags);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void msm_iommu_detach_dev(struct iommu_domain *domain,
|
||||
struct device *dev)
|
||||
{
|
||||
struct msm_priv *priv;
|
||||
struct msm_iommu_ctx_dev *ctx_dev;
|
||||
struct msm_iommu_drvdata *iommu_drvdata;
|
||||
struct msm_iommu_ctx_drvdata *ctx_drvdata;
|
||||
unsigned long flags;
|
||||
int ret;
|
||||
|
||||
spin_lock_irqsave(&msm_iommu_lock, flags);
|
||||
priv = domain->priv;
|
||||
|
||||
if (!priv || !dev)
|
||||
goto fail;
|
||||
|
||||
iommu_drvdata = dev_get_drvdata(dev->parent);
|
||||
ctx_drvdata = dev_get_drvdata(dev);
|
||||
ctx_dev = dev->platform_data;
|
||||
|
||||
if (!iommu_drvdata || !ctx_drvdata || !ctx_dev)
|
||||
goto fail;
|
||||
|
||||
ret = __flush_iotlb(domain);
|
||||
if (ret)
|
||||
goto fail;
|
||||
|
||||
ret = __enable_clocks(iommu_drvdata);
|
||||
if (ret)
|
||||
goto fail;
|
||||
|
||||
__reset_context(iommu_drvdata->base, ctx_dev->num);
|
||||
__disable_clocks(iommu_drvdata);
|
||||
list_del_init(&ctx_drvdata->attached_elm);
|
||||
|
||||
fail:
|
||||
spin_unlock_irqrestore(&msm_iommu_lock, flags);
|
||||
}
|
||||
|
||||
static int msm_iommu_map(struct iommu_domain *domain, unsigned long va,
|
||||
phys_addr_t pa, size_t len, int prot)
|
||||
{
|
||||
struct msm_priv *priv;
|
||||
unsigned long flags;
|
||||
unsigned long *fl_table;
|
||||
unsigned long *fl_pte;
|
||||
unsigned long fl_offset;
|
||||
unsigned long *sl_table;
|
||||
unsigned long *sl_pte;
|
||||
unsigned long sl_offset;
|
||||
unsigned int pgprot;
|
||||
int ret = 0, tex, sh;
|
||||
|
||||
spin_lock_irqsave(&msm_iommu_lock, flags);
|
||||
|
||||
sh = (prot & MSM_IOMMU_ATTR_SH) ? 1 : 0;
|
||||
tex = msm_iommu_tex_class[prot & MSM_IOMMU_CP_MASK];
|
||||
|
||||
if (tex < 0 || tex > NUM_TEX_CLASS - 1) {
|
||||
ret = -EINVAL;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
priv = domain->priv;
|
||||
if (!priv) {
|
||||
ret = -EINVAL;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
fl_table = priv->pgtable;
|
||||
|
||||
if (len != SZ_16M && len != SZ_1M &&
|
||||
len != SZ_64K && len != SZ_4K) {
|
||||
pr_debug("Bad size: %d\n", len);
|
||||
ret = -EINVAL;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
if (!fl_table) {
|
||||
pr_debug("Null page table\n");
|
||||
ret = -EINVAL;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
if (len == SZ_16M || len == SZ_1M) {
|
||||
pgprot = sh ? FL_SHARED : 0;
|
||||
pgprot |= tex & 0x01 ? FL_BUFFERABLE : 0;
|
||||
pgprot |= tex & 0x02 ? FL_CACHEABLE : 0;
|
||||
pgprot |= tex & 0x04 ? FL_TEX0 : 0;
|
||||
} else {
|
||||
pgprot = sh ? SL_SHARED : 0;
|
||||
pgprot |= tex & 0x01 ? SL_BUFFERABLE : 0;
|
||||
pgprot |= tex & 0x02 ? SL_CACHEABLE : 0;
|
||||
pgprot |= tex & 0x04 ? SL_TEX0 : 0;
|
||||
}
|
||||
|
||||
fl_offset = FL_OFFSET(va); /* Upper 12 bits */
|
||||
fl_pte = fl_table + fl_offset; /* int pointers, 4 bytes */
|
||||
|
||||
if (len == SZ_16M) {
|
||||
int i = 0;
|
||||
for (i = 0; i < 16; i++)
|
||||
*(fl_pte+i) = (pa & 0xFF000000) | FL_SUPERSECTION |
|
||||
FL_AP_READ | FL_AP_WRITE | FL_TYPE_SECT |
|
||||
FL_SHARED | FL_NG | pgprot;
|
||||
}
|
||||
|
||||
if (len == SZ_1M)
|
||||
*fl_pte = (pa & 0xFFF00000) | FL_AP_READ | FL_AP_WRITE | FL_NG |
|
||||
FL_TYPE_SECT | FL_SHARED | pgprot;
|
||||
|
||||
/* Need a 2nd level table */
|
||||
if ((len == SZ_4K || len == SZ_64K) && (*fl_pte) == 0) {
|
||||
unsigned long *sl;
|
||||
sl = (unsigned long *) __get_free_pages(GFP_ATOMIC,
|
||||
get_order(SZ_4K));
|
||||
|
||||
if (!sl) {
|
||||
pr_debug("Could not allocate second level table\n");
|
||||
ret = -ENOMEM;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
memset(sl, 0, SZ_4K);
|
||||
*fl_pte = ((((int)__pa(sl)) & FL_BASE_MASK) | FL_TYPE_TABLE);
|
||||
}
|
||||
|
||||
sl_table = (unsigned long *) __va(((*fl_pte) & FL_BASE_MASK));
|
||||
sl_offset = SL_OFFSET(va);
|
||||
sl_pte = sl_table + sl_offset;
|
||||
|
||||
|
||||
if (len == SZ_4K)
|
||||
*sl_pte = (pa & SL_BASE_MASK_SMALL) | SL_AP0 | SL_AP1 | SL_NG |
|
||||
SL_SHARED | SL_TYPE_SMALL | pgprot;
|
||||
|
||||
if (len == SZ_64K) {
|
||||
int i;
|
||||
|
||||
for (i = 0; i < 16; i++)
|
||||
*(sl_pte+i) = (pa & SL_BASE_MASK_LARGE) | SL_AP0 |
|
||||
SL_NG | SL_AP1 | SL_SHARED | SL_TYPE_LARGE | pgprot;
|
||||
}
|
||||
|
||||
ret = __flush_iotlb(domain);
|
||||
fail:
|
||||
spin_unlock_irqrestore(&msm_iommu_lock, flags);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static size_t msm_iommu_unmap(struct iommu_domain *domain, unsigned long va,
|
||||
size_t len)
|
||||
{
|
||||
struct msm_priv *priv;
|
||||
unsigned long flags;
|
||||
unsigned long *fl_table;
|
||||
unsigned long *fl_pte;
|
||||
unsigned long fl_offset;
|
||||
unsigned long *sl_table;
|
||||
unsigned long *sl_pte;
|
||||
unsigned long sl_offset;
|
||||
int i, ret = 0;
|
||||
|
||||
spin_lock_irqsave(&msm_iommu_lock, flags);
|
||||
|
||||
priv = domain->priv;
|
||||
|
||||
if (!priv)
|
||||
goto fail;
|
||||
|
||||
fl_table = priv->pgtable;
|
||||
|
||||
if (len != SZ_16M && len != SZ_1M &&
|
||||
len != SZ_64K && len != SZ_4K) {
|
||||
pr_debug("Bad length: %d\n", len);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
if (!fl_table) {
|
||||
pr_debug("Null page table\n");
|
||||
goto fail;
|
||||
}
|
||||
|
||||
fl_offset = FL_OFFSET(va); /* Upper 12 bits */
|
||||
fl_pte = fl_table + fl_offset; /* int pointers, 4 bytes */
|
||||
|
||||
if (*fl_pte == 0) {
|
||||
pr_debug("First level PTE is 0\n");
|
||||
goto fail;
|
||||
}
|
||||
|
||||
/* Unmap supersection */
|
||||
if (len == SZ_16M)
|
||||
for (i = 0; i < 16; i++)
|
||||
*(fl_pte+i) = 0;
|
||||
|
||||
if (len == SZ_1M)
|
||||
*fl_pte = 0;
|
||||
|
||||
sl_table = (unsigned long *) __va(((*fl_pte) & FL_BASE_MASK));
|
||||
sl_offset = SL_OFFSET(va);
|
||||
sl_pte = sl_table + sl_offset;
|
||||
|
||||
if (len == SZ_64K) {
|
||||
for (i = 0; i < 16; i++)
|
||||
*(sl_pte+i) = 0;
|
||||
}
|
||||
|
||||
if (len == SZ_4K)
|
||||
*sl_pte = 0;
|
||||
|
||||
if (len == SZ_4K || len == SZ_64K) {
|
||||
int used = 0;
|
||||
|
||||
for (i = 0; i < NUM_SL_PTE; i++)
|
||||
if (sl_table[i])
|
||||
used = 1;
|
||||
if (!used) {
|
||||
free_page((unsigned long)sl_table);
|
||||
*fl_pte = 0;
|
||||
}
|
||||
}
|
||||
|
||||
ret = __flush_iotlb(domain);
|
||||
|
||||
fail:
|
||||
spin_unlock_irqrestore(&msm_iommu_lock, flags);
|
||||
|
||||
/* the IOMMU API requires us to return how many bytes were unmapped */
|
||||
len = ret ? 0 : len;
|
||||
return len;
|
||||
}
|
||||
|
||||
static phys_addr_t msm_iommu_iova_to_phys(struct iommu_domain *domain,
|
||||
dma_addr_t va)
|
||||
{
|
||||
struct msm_priv *priv;
|
||||
struct msm_iommu_drvdata *iommu_drvdata;
|
||||
struct msm_iommu_ctx_drvdata *ctx_drvdata;
|
||||
unsigned int par;
|
||||
unsigned long flags;
|
||||
void __iomem *base;
|
||||
phys_addr_t ret = 0;
|
||||
int ctx;
|
||||
|
||||
spin_lock_irqsave(&msm_iommu_lock, flags);
|
||||
|
||||
priv = domain->priv;
|
||||
if (list_empty(&priv->list_attached))
|
||||
goto fail;
|
||||
|
||||
ctx_drvdata = list_entry(priv->list_attached.next,
|
||||
struct msm_iommu_ctx_drvdata, attached_elm);
|
||||
iommu_drvdata = dev_get_drvdata(ctx_drvdata->pdev->dev.parent);
|
||||
|
||||
base = iommu_drvdata->base;
|
||||
ctx = ctx_drvdata->num;
|
||||
|
||||
ret = __enable_clocks(iommu_drvdata);
|
||||
if (ret)
|
||||
goto fail;
|
||||
|
||||
/* Invalidate context TLB */
|
||||
SET_CTX_TLBIALL(base, ctx, 0);
|
||||
SET_V2PPR(base, ctx, va & V2Pxx_VA);
|
||||
|
||||
par = GET_PAR(base, ctx);
|
||||
|
||||
/* We are dealing with a supersection */
|
||||
if (GET_NOFAULT_SS(base, ctx))
|
||||
ret = (par & 0xFF000000) | (va & 0x00FFFFFF);
|
||||
else /* Upper 20 bits from PAR, lower 12 from VA */
|
||||
ret = (par & 0xFFFFF000) | (va & 0x00000FFF);
|
||||
|
||||
if (GET_FAULT(base, ctx))
|
||||
ret = 0;
|
||||
|
||||
__disable_clocks(iommu_drvdata);
|
||||
fail:
|
||||
spin_unlock_irqrestore(&msm_iommu_lock, flags);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static bool msm_iommu_capable(enum iommu_cap cap)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
static void print_ctx_regs(void __iomem *base, int ctx)
|
||||
{
|
||||
unsigned int fsr = GET_FSR(base, ctx);
|
||||
pr_err("FAR = %08x PAR = %08x\n",
|
||||
GET_FAR(base, ctx), GET_PAR(base, ctx));
|
||||
pr_err("FSR = %08x [%s%s%s%s%s%s%s%s%s%s]\n", fsr,
|
||||
(fsr & 0x02) ? "TF " : "",
|
||||
(fsr & 0x04) ? "AFF " : "",
|
||||
(fsr & 0x08) ? "APF " : "",
|
||||
(fsr & 0x10) ? "TLBMF " : "",
|
||||
(fsr & 0x20) ? "HTWDEEF " : "",
|
||||
(fsr & 0x40) ? "HTWSEEF " : "",
|
||||
(fsr & 0x80) ? "MHF " : "",
|
||||
(fsr & 0x10000) ? "SL " : "",
|
||||
(fsr & 0x40000000) ? "SS " : "",
|
||||
(fsr & 0x80000000) ? "MULTI " : "");
|
||||
|
||||
pr_err("FSYNR0 = %08x FSYNR1 = %08x\n",
|
||||
GET_FSYNR0(base, ctx), GET_FSYNR1(base, ctx));
|
||||
pr_err("TTBR0 = %08x TTBR1 = %08x\n",
|
||||
GET_TTBR0(base, ctx), GET_TTBR1(base, ctx));
|
||||
pr_err("SCTLR = %08x ACTLR = %08x\n",
|
||||
GET_SCTLR(base, ctx), GET_ACTLR(base, ctx));
|
||||
pr_err("PRRR = %08x NMRR = %08x\n",
|
||||
GET_PRRR(base, ctx), GET_NMRR(base, ctx));
|
||||
}
|
||||
|
||||
irqreturn_t msm_iommu_fault_handler(int irq, void *dev_id)
|
||||
{
|
||||
struct msm_iommu_drvdata *drvdata = dev_id;
|
||||
void __iomem *base;
|
||||
unsigned int fsr;
|
||||
int i, ret;
|
||||
|
||||
spin_lock(&msm_iommu_lock);
|
||||
|
||||
if (!drvdata) {
|
||||
pr_err("Invalid device ID in context interrupt handler\n");
|
||||
goto fail;
|
||||
}
|
||||
|
||||
base = drvdata->base;
|
||||
|
||||
pr_err("Unexpected IOMMU page fault!\n");
|
||||
pr_err("base = %08x\n", (unsigned int) base);
|
||||
|
||||
ret = __enable_clocks(drvdata);
|
||||
if (ret)
|
||||
goto fail;
|
||||
|
||||
for (i = 0; i < drvdata->ncb; i++) {
|
||||
fsr = GET_FSR(base, i);
|
||||
if (fsr) {
|
||||
pr_err("Fault occurred in context %d.\n", i);
|
||||
pr_err("Interesting registers:\n");
|
||||
print_ctx_regs(base, i);
|
||||
SET_FSR(base, i, 0x4000000F);
|
||||
}
|
||||
}
|
||||
__disable_clocks(drvdata);
|
||||
fail:
|
||||
spin_unlock(&msm_iommu_lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct iommu_ops msm_iommu_ops = {
|
||||
.capable = msm_iommu_capable,
|
||||
.domain_init = msm_iommu_domain_init,
|
||||
.domain_destroy = msm_iommu_domain_destroy,
|
||||
.attach_dev = msm_iommu_attach_dev,
|
||||
.detach_dev = msm_iommu_detach_dev,
|
||||
.map = msm_iommu_map,
|
||||
.unmap = msm_iommu_unmap,
|
||||
.iova_to_phys = msm_iommu_iova_to_phys,
|
||||
.pgsize_bitmap = MSM_IOMMU_PGSIZES,
|
||||
};
|
||||
|
||||
static int __init get_tex_class(int icp, int ocp, int mt, int nos)
|
||||
{
|
||||
int i = 0;
|
||||
unsigned int prrr = 0;
|
||||
unsigned int nmrr = 0;
|
||||
int c_icp, c_ocp, c_mt, c_nos;
|
||||
|
||||
RCP15_PRRR(prrr);
|
||||
RCP15_NMRR(nmrr);
|
||||
|
||||
for (i = 0; i < NUM_TEX_CLASS; i++) {
|
||||
c_nos = PRRR_NOS(prrr, i);
|
||||
c_mt = PRRR_MT(prrr, i);
|
||||
c_icp = NMRR_ICP(nmrr, i);
|
||||
c_ocp = NMRR_OCP(nmrr, i);
|
||||
|
||||
if (icp == c_icp && ocp == c_ocp && c_mt == mt && c_nos == nos)
|
||||
return i;
|
||||
}
|
||||
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
static void __init setup_iommu_tex_classes(void)
|
||||
{
|
||||
msm_iommu_tex_class[MSM_IOMMU_ATTR_NONCACHED] =
|
||||
get_tex_class(CP_NONCACHED, CP_NONCACHED, MT_NORMAL, 1);
|
||||
|
||||
msm_iommu_tex_class[MSM_IOMMU_ATTR_CACHED_WB_WA] =
|
||||
get_tex_class(CP_WB_WA, CP_WB_WA, MT_NORMAL, 1);
|
||||
|
||||
msm_iommu_tex_class[MSM_IOMMU_ATTR_CACHED_WB_NWA] =
|
||||
get_tex_class(CP_WB_NWA, CP_WB_NWA, MT_NORMAL, 1);
|
||||
|
||||
msm_iommu_tex_class[MSM_IOMMU_ATTR_CACHED_WT] =
|
||||
get_tex_class(CP_WT, CP_WT, MT_NORMAL, 1);
|
||||
}
|
||||
|
||||
static int __init msm_iommu_init(void)
|
||||
{
|
||||
setup_iommu_tex_classes();
|
||||
bus_set_iommu(&platform_bus_type, &msm_iommu_ops);
|
||||
return 0;
|
||||
}
|
||||
|
||||
subsys_initcall(msm_iommu_init);
|
||||
|
||||
MODULE_LICENSE("GPL v2");
|
||||
MODULE_AUTHOR("Stepan Moskovchenko <stepanm@codeaurora.org>");
|
120
drivers/iommu/msm_iommu.h
Normal file
120
drivers/iommu/msm_iommu.h
Normal file
|
@ -0,0 +1,120 @@
|
|||
/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 and
|
||||
* only version 2 as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
|
||||
* 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#ifndef MSM_IOMMU_H
|
||||
#define MSM_IOMMU_H
|
||||
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/clk.h>
|
||||
|
||||
/* Sharability attributes of MSM IOMMU mappings */
|
||||
#define MSM_IOMMU_ATTR_NON_SH 0x0
|
||||
#define MSM_IOMMU_ATTR_SH 0x4
|
||||
|
||||
/* Cacheability attributes of MSM IOMMU mappings */
|
||||
#define MSM_IOMMU_ATTR_NONCACHED 0x0
|
||||
#define MSM_IOMMU_ATTR_CACHED_WB_WA 0x1
|
||||
#define MSM_IOMMU_ATTR_CACHED_WB_NWA 0x2
|
||||
#define MSM_IOMMU_ATTR_CACHED_WT 0x3
|
||||
|
||||
/* Mask for the cache policy attribute */
|
||||
#define MSM_IOMMU_CP_MASK 0x03
|
||||
|
||||
/* Maximum number of Machine IDs that we are allowing to be mapped to the same
|
||||
* context bank. The number of MIDs mapped to the same CB does not affect
|
||||
* performance, but there is a practical limit on how many distinct MIDs may
|
||||
* be present. These mappings are typically determined at design time and are
|
||||
* not expected to change at run time.
|
||||
*/
|
||||
#define MAX_NUM_MIDS 32
|
||||
|
||||
/**
|
||||
* struct msm_iommu_dev - a single IOMMU hardware instance
|
||||
* name Human-readable name given to this IOMMU HW instance
|
||||
* ncb Number of context banks present on this IOMMU HW instance
|
||||
*/
|
||||
struct msm_iommu_dev {
|
||||
const char *name;
|
||||
int ncb;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct msm_iommu_ctx_dev - an IOMMU context bank instance
|
||||
* name Human-readable name given to this context bank
|
||||
* num Index of this context bank within the hardware
|
||||
* mids List of Machine IDs that are to be mapped into this context
|
||||
* bank, terminated by -1. The MID is a set of signals on the
|
||||
* AXI bus that identifies the function associated with a specific
|
||||
* memory request. (See ARM spec).
|
||||
*/
|
||||
struct msm_iommu_ctx_dev {
|
||||
const char *name;
|
||||
int num;
|
||||
int mids[MAX_NUM_MIDS];
|
||||
};
|
||||
|
||||
|
||||
/**
|
||||
* struct msm_iommu_drvdata - A single IOMMU hardware instance
|
||||
* @base: IOMMU config port base address (VA)
|
||||
* @ncb The number of contexts on this IOMMU
|
||||
* @irq: Interrupt number
|
||||
* @clk: The bus clock for this IOMMU hardware instance
|
||||
* @pclk: The clock for the IOMMU bus interconnect
|
||||
*
|
||||
* A msm_iommu_drvdata holds the global driver data about a single piece
|
||||
* of an IOMMU hardware instance.
|
||||
*/
|
||||
struct msm_iommu_drvdata {
|
||||
void __iomem *base;
|
||||
int irq;
|
||||
int ncb;
|
||||
struct clk *clk;
|
||||
struct clk *pclk;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct msm_iommu_ctx_drvdata - an IOMMU context bank instance
|
||||
* @num: Hardware context number of this context
|
||||
* @pdev: Platform device associated wit this HW instance
|
||||
* @attached_elm: List element for domains to track which devices are
|
||||
* attached to them
|
||||
*
|
||||
* A msm_iommu_ctx_drvdata holds the driver data for a single context bank
|
||||
* within each IOMMU hardware instance
|
||||
*/
|
||||
struct msm_iommu_ctx_drvdata {
|
||||
int num;
|
||||
struct platform_device *pdev;
|
||||
struct list_head attached_elm;
|
||||
};
|
||||
|
||||
/*
|
||||
* Look up an IOMMU context device by its context name. NULL if none found.
|
||||
* Useful for testing and drivers that do not yet fully have IOMMU stuff in
|
||||
* their platform devices.
|
||||
*/
|
||||
struct device *msm_iommu_get_ctx(const char *ctx_name);
|
||||
|
||||
/*
|
||||
* Interrupt handler for the IOMMU context fault interrupt. Hooking the
|
||||
* interrupt is not supported in the API yet, but this will print an error
|
||||
* message and dump useful IOMMU registers.
|
||||
*/
|
||||
irqreturn_t msm_iommu_fault_handler(int irq, void *dev_id);
|
||||
|
||||
#endif
|
394
drivers/iommu/msm_iommu_dev.c
Normal file
394
drivers/iommu/msm_iommu_dev.c
Normal file
|
@ -0,0 +1,394 @@
|
|||
/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 and
|
||||
* only version 2 as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
|
||||
* 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/clk.h>
|
||||
#include <linux/iommu.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
#include "msm_iommu_hw-8xxx.h"
|
||||
#include "msm_iommu.h"
|
||||
|
||||
struct iommu_ctx_iter_data {
|
||||
/* input */
|
||||
const char *name;
|
||||
|
||||
/* output */
|
||||
struct device *dev;
|
||||
};
|
||||
|
||||
static struct platform_device *msm_iommu_root_dev;
|
||||
|
||||
static int each_iommu_ctx(struct device *dev, void *data)
|
||||
{
|
||||
struct iommu_ctx_iter_data *res = data;
|
||||
struct msm_iommu_ctx_dev *c = dev->platform_data;
|
||||
|
||||
if (!res || !c || !c->name || !res->name)
|
||||
return -EINVAL;
|
||||
|
||||
if (!strcmp(res->name, c->name)) {
|
||||
res->dev = dev;
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int each_iommu(struct device *dev, void *data)
|
||||
{
|
||||
return device_for_each_child(dev, data, each_iommu_ctx);
|
||||
}
|
||||
|
||||
struct device *msm_iommu_get_ctx(const char *ctx_name)
|
||||
{
|
||||
struct iommu_ctx_iter_data r;
|
||||
int found;
|
||||
|
||||
if (!msm_iommu_root_dev) {
|
||||
pr_err("No root IOMMU device.\n");
|
||||
goto fail;
|
||||
}
|
||||
|
||||
r.name = ctx_name;
|
||||
found = device_for_each_child(&msm_iommu_root_dev->dev, &r, each_iommu);
|
||||
|
||||
if (!found) {
|
||||
pr_err("Could not find context <%s>\n", ctx_name);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
return r.dev;
|
||||
fail:
|
||||
return NULL;
|
||||
}
|
||||
EXPORT_SYMBOL(msm_iommu_get_ctx);
|
||||
|
||||
static void msm_iommu_reset(void __iomem *base, int ncb)
|
||||
{
|
||||
int ctx;
|
||||
|
||||
SET_RPUE(base, 0);
|
||||
SET_RPUEIE(base, 0);
|
||||
SET_ESRRESTORE(base, 0);
|
||||
SET_TBE(base, 0);
|
||||
SET_CR(base, 0);
|
||||
SET_SPDMBE(base, 0);
|
||||
SET_TESTBUSCR(base, 0);
|
||||
SET_TLBRSW(base, 0);
|
||||
SET_GLOBAL_TLBIALL(base, 0);
|
||||
SET_RPU_ACR(base, 0);
|
||||
SET_TLBLKCRWE(base, 1);
|
||||
|
||||
for (ctx = 0; ctx < ncb; ctx++) {
|
||||
SET_BPRCOSH(base, ctx, 0);
|
||||
SET_BPRCISH(base, ctx, 0);
|
||||
SET_BPRCNSH(base, ctx, 0);
|
||||
SET_BPSHCFG(base, ctx, 0);
|
||||
SET_BPMTCFG(base, ctx, 0);
|
||||
SET_ACTLR(base, ctx, 0);
|
||||
SET_SCTLR(base, ctx, 0);
|
||||
SET_FSRRESTORE(base, ctx, 0);
|
||||
SET_TTBR0(base, ctx, 0);
|
||||
SET_TTBR1(base, ctx, 0);
|
||||
SET_TTBCR(base, ctx, 0);
|
||||
SET_BFBCR(base, ctx, 0);
|
||||
SET_PAR(base, ctx, 0);
|
||||
SET_FAR(base, ctx, 0);
|
||||
SET_CTX_TLBIALL(base, ctx, 0);
|
||||
SET_TLBFLPTER(base, ctx, 0);
|
||||
SET_TLBSLPTER(base, ctx, 0);
|
||||
SET_TLBLKCR(base, ctx, 0);
|
||||
SET_PRRR(base, ctx, 0);
|
||||
SET_NMRR(base, ctx, 0);
|
||||
SET_CONTEXTIDR(base, ctx, 0);
|
||||
}
|
||||
}
|
||||
|
||||
static int msm_iommu_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct resource *r;
|
||||
struct clk *iommu_clk;
|
||||
struct clk *iommu_pclk;
|
||||
struct msm_iommu_drvdata *drvdata;
|
||||
struct msm_iommu_dev *iommu_dev = pdev->dev.platform_data;
|
||||
void __iomem *regs_base;
|
||||
int ret, irq, par;
|
||||
|
||||
if (pdev->id == -1) {
|
||||
msm_iommu_root_dev = pdev;
|
||||
return 0;
|
||||
}
|
||||
|
||||
drvdata = kzalloc(sizeof(*drvdata), GFP_KERNEL);
|
||||
|
||||
if (!drvdata) {
|
||||
ret = -ENOMEM;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
if (!iommu_dev) {
|
||||
ret = -ENODEV;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
iommu_pclk = clk_get(NULL, "smmu_pclk");
|
||||
if (IS_ERR(iommu_pclk)) {
|
||||
ret = -ENODEV;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
ret = clk_prepare_enable(iommu_pclk);
|
||||
if (ret)
|
||||
goto fail_enable;
|
||||
|
||||
iommu_clk = clk_get(&pdev->dev, "iommu_clk");
|
||||
|
||||
if (!IS_ERR(iommu_clk)) {
|
||||
if (clk_get_rate(iommu_clk) == 0)
|
||||
clk_set_rate(iommu_clk, 1);
|
||||
|
||||
ret = clk_prepare_enable(iommu_clk);
|
||||
if (ret) {
|
||||
clk_put(iommu_clk);
|
||||
goto fail_pclk;
|
||||
}
|
||||
} else
|
||||
iommu_clk = NULL;
|
||||
|
||||
r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "physbase");
|
||||
regs_base = devm_ioremap_resource(&pdev->dev, r);
|
||||
if (IS_ERR(regs_base)) {
|
||||
ret = PTR_ERR(regs_base);
|
||||
goto fail_clk;
|
||||
}
|
||||
|
||||
irq = platform_get_irq_byname(pdev, "secure_irq");
|
||||
if (irq < 0) {
|
||||
ret = -ENODEV;
|
||||
goto fail_clk;
|
||||
}
|
||||
|
||||
msm_iommu_reset(regs_base, iommu_dev->ncb);
|
||||
|
||||
SET_M(regs_base, 0, 1);
|
||||
SET_PAR(regs_base, 0, 0);
|
||||
SET_V2PCFG(regs_base, 0, 1);
|
||||
SET_V2PPR(regs_base, 0, 0);
|
||||
par = GET_PAR(regs_base, 0);
|
||||
SET_V2PCFG(regs_base, 0, 0);
|
||||
SET_M(regs_base, 0, 0);
|
||||
|
||||
if (!par) {
|
||||
pr_err("%s: Invalid PAR value detected\n", iommu_dev->name);
|
||||
ret = -ENODEV;
|
||||
goto fail_clk;
|
||||
}
|
||||
|
||||
ret = request_irq(irq, msm_iommu_fault_handler, 0,
|
||||
"msm_iommu_secure_irpt_handler", drvdata);
|
||||
if (ret) {
|
||||
pr_err("Request IRQ %d failed with ret=%d\n", irq, ret);
|
||||
goto fail_clk;
|
||||
}
|
||||
|
||||
|
||||
drvdata->pclk = iommu_pclk;
|
||||
drvdata->clk = iommu_clk;
|
||||
drvdata->base = regs_base;
|
||||
drvdata->irq = irq;
|
||||
drvdata->ncb = iommu_dev->ncb;
|
||||
|
||||
pr_info("device %s mapped at %p, irq %d with %d ctx banks\n",
|
||||
iommu_dev->name, regs_base, irq, iommu_dev->ncb);
|
||||
|
||||
platform_set_drvdata(pdev, drvdata);
|
||||
|
||||
if (iommu_clk)
|
||||
clk_disable(iommu_clk);
|
||||
|
||||
clk_disable(iommu_pclk);
|
||||
|
||||
return 0;
|
||||
fail_clk:
|
||||
if (iommu_clk) {
|
||||
clk_disable(iommu_clk);
|
||||
clk_put(iommu_clk);
|
||||
}
|
||||
fail_pclk:
|
||||
clk_disable_unprepare(iommu_pclk);
|
||||
fail_enable:
|
||||
clk_put(iommu_pclk);
|
||||
fail:
|
||||
kfree(drvdata);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int msm_iommu_remove(struct platform_device *pdev)
|
||||
{
|
||||
struct msm_iommu_drvdata *drv = NULL;
|
||||
|
||||
drv = platform_get_drvdata(pdev);
|
||||
if (drv) {
|
||||
if (drv->clk) {
|
||||
clk_unprepare(drv->clk);
|
||||
clk_put(drv->clk);
|
||||
}
|
||||
clk_unprepare(drv->pclk);
|
||||
clk_put(drv->pclk);
|
||||
memset(drv, 0, sizeof(*drv));
|
||||
kfree(drv);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int msm_iommu_ctx_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct msm_iommu_ctx_dev *c = pdev->dev.platform_data;
|
||||
struct msm_iommu_drvdata *drvdata;
|
||||
struct msm_iommu_ctx_drvdata *ctx_drvdata;
|
||||
int i, ret;
|
||||
|
||||
if (!c || !pdev->dev.parent)
|
||||
return -EINVAL;
|
||||
|
||||
drvdata = dev_get_drvdata(pdev->dev.parent);
|
||||
if (!drvdata)
|
||||
return -ENODEV;
|
||||
|
||||
ctx_drvdata = kzalloc(sizeof(*ctx_drvdata), GFP_KERNEL);
|
||||
if (!ctx_drvdata)
|
||||
return -ENOMEM;
|
||||
|
||||
ctx_drvdata->num = c->num;
|
||||
ctx_drvdata->pdev = pdev;
|
||||
|
||||
INIT_LIST_HEAD(&ctx_drvdata->attached_elm);
|
||||
platform_set_drvdata(pdev, ctx_drvdata);
|
||||
|
||||
ret = clk_prepare_enable(drvdata->pclk);
|
||||
if (ret)
|
||||
goto fail;
|
||||
|
||||
if (drvdata->clk) {
|
||||
ret = clk_prepare_enable(drvdata->clk);
|
||||
if (ret) {
|
||||
clk_disable_unprepare(drvdata->pclk);
|
||||
goto fail;
|
||||
}
|
||||
}
|
||||
|
||||
/* Program the M2V tables for this context */
|
||||
for (i = 0; i < MAX_NUM_MIDS; i++) {
|
||||
int mid = c->mids[i];
|
||||
if (mid == -1)
|
||||
break;
|
||||
|
||||
SET_M2VCBR_N(drvdata->base, mid, 0);
|
||||
SET_CBACR_N(drvdata->base, c->num, 0);
|
||||
|
||||
/* Set VMID = 0 */
|
||||
SET_VMID(drvdata->base, mid, 0);
|
||||
|
||||
/* Set the context number for that MID to this context */
|
||||
SET_CBNDX(drvdata->base, mid, c->num);
|
||||
|
||||
/* Set MID associated with this context bank to 0*/
|
||||
SET_CBVMID(drvdata->base, c->num, 0);
|
||||
|
||||
/* Set the ASID for TLB tagging for this context */
|
||||
SET_CONTEXTIDR_ASID(drvdata->base, c->num, c->num);
|
||||
|
||||
/* Set security bit override to be Non-secure */
|
||||
SET_NSCFG(drvdata->base, mid, 3);
|
||||
}
|
||||
|
||||
if (drvdata->clk)
|
||||
clk_disable(drvdata->clk);
|
||||
clk_disable(drvdata->pclk);
|
||||
|
||||
dev_info(&pdev->dev, "context %s using bank %d\n", c->name, c->num);
|
||||
return 0;
|
||||
fail:
|
||||
kfree(ctx_drvdata);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int msm_iommu_ctx_remove(struct platform_device *pdev)
|
||||
{
|
||||
struct msm_iommu_ctx_drvdata *drv = NULL;
|
||||
drv = platform_get_drvdata(pdev);
|
||||
if (drv) {
|
||||
memset(drv, 0, sizeof(struct msm_iommu_ctx_drvdata));
|
||||
kfree(drv);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct platform_driver msm_iommu_driver = {
|
||||
.driver = {
|
||||
.name = "msm_iommu",
|
||||
},
|
||||
.probe = msm_iommu_probe,
|
||||
.remove = msm_iommu_remove,
|
||||
};
|
||||
|
||||
static struct platform_driver msm_iommu_ctx_driver = {
|
||||
.driver = {
|
||||
.name = "msm_iommu_ctx",
|
||||
},
|
||||
.probe = msm_iommu_ctx_probe,
|
||||
.remove = msm_iommu_ctx_remove,
|
||||
};
|
||||
|
||||
static int __init msm_iommu_driver_init(void)
|
||||
{
|
||||
int ret;
|
||||
ret = platform_driver_register(&msm_iommu_driver);
|
||||
if (ret != 0) {
|
||||
pr_err("Failed to register IOMMU driver\n");
|
||||
goto error;
|
||||
}
|
||||
|
||||
ret = platform_driver_register(&msm_iommu_ctx_driver);
|
||||
if (ret != 0) {
|
||||
platform_driver_unregister(&msm_iommu_driver);
|
||||
pr_err("Failed to register IOMMU context driver\n");
|
||||
goto error;
|
||||
}
|
||||
|
||||
error:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void __exit msm_iommu_driver_exit(void)
|
||||
{
|
||||
platform_driver_unregister(&msm_iommu_ctx_driver);
|
||||
platform_driver_unregister(&msm_iommu_driver);
|
||||
}
|
||||
|
||||
subsys_initcall(msm_iommu_driver_init);
|
||||
module_exit(msm_iommu_driver_exit);
|
||||
|
||||
MODULE_LICENSE("GPL v2");
|
||||
MODULE_AUTHOR("Stepan Moskovchenko <stepanm@codeaurora.org>");
|
1865
drivers/iommu/msm_iommu_hw-8xxx.h
Normal file
1865
drivers/iommu/msm_iommu_hw-8xxx.h
Normal file
File diff suppressed because it is too large
Load diff
91
drivers/iommu/of_iommu.c
Normal file
91
drivers/iommu/of_iommu.c
Normal file
|
@ -0,0 +1,91 @@
|
|||
/*
|
||||
* OF helpers for IOMMU
|
||||
*
|
||||
* Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms and conditions of the GNU General Public License,
|
||||
* version 2, as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License along with
|
||||
* this program; if not, write to the Free Software Foundation, Inc.,
|
||||
* 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*/
|
||||
|
||||
#include <linux/export.h>
|
||||
#include <linux/limits.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/of_iommu.h>
|
||||
|
||||
/**
|
||||
* of_get_dma_window - Parse *dma-window property and returns 0 if found.
|
||||
*
|
||||
* @dn: device node
|
||||
* @prefix: prefix for property name if any
|
||||
* @index: index to start to parse
|
||||
* @busno: Returns busno if supported. Otherwise pass NULL
|
||||
* @addr: Returns address that DMA starts
|
||||
* @size: Returns the range that DMA can handle
|
||||
*
|
||||
* This supports different formats flexibly. "prefix" can be
|
||||
* configured if any. "busno" and "index" are optionally
|
||||
* specified. Set 0(or NULL) if not used.
|
||||
*/
|
||||
int of_get_dma_window(struct device_node *dn, const char *prefix, int index,
|
||||
unsigned long *busno, dma_addr_t *addr, size_t *size)
|
||||
{
|
||||
const __be32 *dma_window, *end;
|
||||
int bytes, cur_index = 0;
|
||||
char propname[NAME_MAX], addrname[NAME_MAX], sizename[NAME_MAX];
|
||||
|
||||
if (!dn || !addr || !size)
|
||||
return -EINVAL;
|
||||
|
||||
if (!prefix)
|
||||
prefix = "";
|
||||
|
||||
snprintf(propname, sizeof(propname), "%sdma-window", prefix);
|
||||
snprintf(addrname, sizeof(addrname), "%s#dma-address-cells", prefix);
|
||||
snprintf(sizename, sizeof(sizename), "%s#dma-size-cells", prefix);
|
||||
|
||||
dma_window = of_get_property(dn, propname, &bytes);
|
||||
if (!dma_window)
|
||||
return -ENODEV;
|
||||
end = dma_window + bytes / sizeof(*dma_window);
|
||||
|
||||
while (dma_window < end) {
|
||||
u32 cells;
|
||||
const void *prop;
|
||||
|
||||
/* busno is one cell if supported */
|
||||
if (busno)
|
||||
*busno = be32_to_cpup(dma_window++);
|
||||
|
||||
prop = of_get_property(dn, addrname, NULL);
|
||||
if (!prop)
|
||||
prop = of_get_property(dn, "#address-cells", NULL);
|
||||
|
||||
cells = prop ? be32_to_cpup(prop) : of_n_addr_cells(dn);
|
||||
if (!cells)
|
||||
return -EINVAL;
|
||||
*addr = of_read_number(dma_window, cells);
|
||||
dma_window += cells;
|
||||
|
||||
prop = of_get_property(dn, sizename, NULL);
|
||||
cells = prop ? be32_to_cpup(prop) : of_n_size_cells(dn);
|
||||
if (!cells)
|
||||
return -EINVAL;
|
||||
*size = of_read_number(dma_window, cells);
|
||||
dma_window += cells;
|
||||
|
||||
if (cur_index++ == index)
|
||||
break;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(of_get_dma_window);
|
330
drivers/iommu/omap-iommu-debug.c
Normal file
330
drivers/iommu/omap-iommu-debug.c
Normal file
|
@ -0,0 +1,330 @@
|
|||
/*
|
||||
* omap iommu: debugfs interface
|
||||
*
|
||||
* Copyright (C) 2008-2009 Nokia Corporation
|
||||
*
|
||||
* Written by Hiroshi DOYU <Hiroshi.DOYU@nokia.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/clk.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/debugfs.h>
|
||||
#include <linux/omap-iommu.h>
|
||||
#include <linux/platform_data/iommu-omap.h>
|
||||
|
||||
#include "omap-iopgtable.h"
|
||||
#include "omap-iommu.h"
|
||||
|
||||
#define MAXCOLUMN 100 /* for short messages */
|
||||
|
||||
static DEFINE_MUTEX(iommu_debug_lock);
|
||||
|
||||
static struct dentry *iommu_debug_root;
|
||||
|
||||
static ssize_t debug_read_ver(struct file *file, char __user *userbuf,
|
||||
size_t count, loff_t *ppos)
|
||||
{
|
||||
u32 ver = omap_iommu_arch_version();
|
||||
char buf[MAXCOLUMN], *p = buf;
|
||||
|
||||
p += sprintf(p, "H/W version: %d.%d\n", (ver >> 4) & 0xf , ver & 0xf);
|
||||
|
||||
return simple_read_from_buffer(userbuf, count, ppos, buf, p - buf);
|
||||
}
|
||||
|
||||
static ssize_t debug_read_regs(struct file *file, char __user *userbuf,
|
||||
size_t count, loff_t *ppos)
|
||||
{
|
||||
struct device *dev = file->private_data;
|
||||
struct omap_iommu *obj = dev_to_omap_iommu(dev);
|
||||
char *p, *buf;
|
||||
ssize_t bytes;
|
||||
|
||||
buf = kmalloc(count, GFP_KERNEL);
|
||||
if (!buf)
|
||||
return -ENOMEM;
|
||||
p = buf;
|
||||
|
||||
mutex_lock(&iommu_debug_lock);
|
||||
|
||||
bytes = omap_iommu_dump_ctx(obj, p, count);
|
||||
bytes = simple_read_from_buffer(userbuf, count, ppos, buf, bytes);
|
||||
|
||||
mutex_unlock(&iommu_debug_lock);
|
||||
kfree(buf);
|
||||
|
||||
return bytes;
|
||||
}
|
||||
|
||||
static ssize_t debug_read_tlb(struct file *file, char __user *userbuf,
|
||||
size_t count, loff_t *ppos)
|
||||
{
|
||||
struct device *dev = file->private_data;
|
||||
struct omap_iommu *obj = dev_to_omap_iommu(dev);
|
||||
char *p, *buf;
|
||||
ssize_t bytes, rest;
|
||||
|
||||
buf = kmalloc(count, GFP_KERNEL);
|
||||
if (!buf)
|
||||
return -ENOMEM;
|
||||
p = buf;
|
||||
|
||||
mutex_lock(&iommu_debug_lock);
|
||||
|
||||
p += sprintf(p, "%8s %8s\n", "cam:", "ram:");
|
||||
p += sprintf(p, "-----------------------------------------\n");
|
||||
rest = count - (p - buf);
|
||||
p += omap_dump_tlb_entries(obj, p, rest);
|
||||
|
||||
bytes = simple_read_from_buffer(userbuf, count, ppos, buf, p - buf);
|
||||
|
||||
mutex_unlock(&iommu_debug_lock);
|
||||
kfree(buf);
|
||||
|
||||
return bytes;
|
||||
}
|
||||
|
||||
static ssize_t debug_write_pagetable(struct file *file,
|
||||
const char __user *userbuf, size_t count, loff_t *ppos)
|
||||
{
|
||||
struct iotlb_entry e;
|
||||
struct cr_regs cr;
|
||||
int err;
|
||||
struct device *dev = file->private_data;
|
||||
struct omap_iommu *obj = dev_to_omap_iommu(dev);
|
||||
char buf[MAXCOLUMN], *p = buf;
|
||||
|
||||
count = min(count, sizeof(buf));
|
||||
|
||||
mutex_lock(&iommu_debug_lock);
|
||||
if (copy_from_user(p, userbuf, count)) {
|
||||
mutex_unlock(&iommu_debug_lock);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
sscanf(p, "%x %x", &cr.cam, &cr.ram);
|
||||
if (!cr.cam || !cr.ram) {
|
||||
mutex_unlock(&iommu_debug_lock);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
omap_iotlb_cr_to_e(&cr, &e);
|
||||
err = omap_iopgtable_store_entry(obj, &e);
|
||||
if (err)
|
||||
dev_err(obj->dev, "%s: fail to store cr\n", __func__);
|
||||
|
||||
mutex_unlock(&iommu_debug_lock);
|
||||
return count;
|
||||
}
|
||||
|
||||
#define dump_ioptable_entry_one(lv, da, val) \
|
||||
({ \
|
||||
int __err = 0; \
|
||||
ssize_t bytes; \
|
||||
const int maxcol = 22; \
|
||||
const char *str = "%d: %08x %08x\n"; \
|
||||
bytes = snprintf(p, maxcol, str, lv, da, val); \
|
||||
p += bytes; \
|
||||
len -= bytes; \
|
||||
if (len < maxcol) \
|
||||
__err = -ENOMEM; \
|
||||
__err; \
|
||||
})
|
||||
|
||||
static ssize_t dump_ioptable(struct omap_iommu *obj, char *buf, ssize_t len)
|
||||
{
|
||||
int i;
|
||||
u32 *iopgd;
|
||||
char *p = buf;
|
||||
|
||||
spin_lock(&obj->page_table_lock);
|
||||
|
||||
iopgd = iopgd_offset(obj, 0);
|
||||
for (i = 0; i < PTRS_PER_IOPGD; i++, iopgd++) {
|
||||
int j, err;
|
||||
u32 *iopte;
|
||||
u32 da;
|
||||
|
||||
if (!*iopgd)
|
||||
continue;
|
||||
|
||||
if (!(*iopgd & IOPGD_TABLE)) {
|
||||
da = i << IOPGD_SHIFT;
|
||||
|
||||
err = dump_ioptable_entry_one(1, da, *iopgd);
|
||||
if (err)
|
||||
goto out;
|
||||
continue;
|
||||
}
|
||||
|
||||
iopte = iopte_offset(iopgd, 0);
|
||||
|
||||
for (j = 0; j < PTRS_PER_IOPTE; j++, iopte++) {
|
||||
if (!*iopte)
|
||||
continue;
|
||||
|
||||
da = (i << IOPGD_SHIFT) + (j << IOPTE_SHIFT);
|
||||
err = dump_ioptable_entry_one(2, da, *iopgd);
|
||||
if (err)
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
out:
|
||||
spin_unlock(&obj->page_table_lock);
|
||||
|
||||
return p - buf;
|
||||
}
|
||||
|
||||
static ssize_t debug_read_pagetable(struct file *file, char __user *userbuf,
|
||||
size_t count, loff_t *ppos)
|
||||
{
|
||||
struct device *dev = file->private_data;
|
||||
struct omap_iommu *obj = dev_to_omap_iommu(dev);
|
||||
char *p, *buf;
|
||||
size_t bytes;
|
||||
|
||||
buf = (char *)__get_free_page(GFP_KERNEL);
|
||||
if (!buf)
|
||||
return -ENOMEM;
|
||||
p = buf;
|
||||
|
||||
p += sprintf(p, "L: %8s %8s\n", "da:", "pa:");
|
||||
p += sprintf(p, "-----------------------------------------\n");
|
||||
|
||||
mutex_lock(&iommu_debug_lock);
|
||||
|
||||
bytes = PAGE_SIZE - (p - buf);
|
||||
p += dump_ioptable(obj, p, bytes);
|
||||
|
||||
bytes = simple_read_from_buffer(userbuf, count, ppos, buf, p - buf);
|
||||
|
||||
mutex_unlock(&iommu_debug_lock);
|
||||
free_page((unsigned long)buf);
|
||||
|
||||
return bytes;
|
||||
}
|
||||
|
||||
#define DEBUG_FOPS(name) \
|
||||
static const struct file_operations debug_##name##_fops = { \
|
||||
.open = simple_open, \
|
||||
.read = debug_read_##name, \
|
||||
.write = debug_write_##name, \
|
||||
.llseek = generic_file_llseek, \
|
||||
};
|
||||
|
||||
#define DEBUG_FOPS_RO(name) \
|
||||
static const struct file_operations debug_##name##_fops = { \
|
||||
.open = simple_open, \
|
||||
.read = debug_read_##name, \
|
||||
.llseek = generic_file_llseek, \
|
||||
};
|
||||
|
||||
DEBUG_FOPS_RO(ver);
|
||||
DEBUG_FOPS_RO(regs);
|
||||
DEBUG_FOPS_RO(tlb);
|
||||
DEBUG_FOPS(pagetable);
|
||||
|
||||
#define __DEBUG_ADD_FILE(attr, mode) \
|
||||
{ \
|
||||
struct dentry *dent; \
|
||||
dent = debugfs_create_file(#attr, mode, parent, \
|
||||
dev, &debug_##attr##_fops); \
|
||||
if (!dent) \
|
||||
return -ENOMEM; \
|
||||
}
|
||||
|
||||
#define DEBUG_ADD_FILE(name) __DEBUG_ADD_FILE(name, 0600)
|
||||
#define DEBUG_ADD_FILE_RO(name) __DEBUG_ADD_FILE(name, 0400)
|
||||
|
||||
static int iommu_debug_register(struct device *dev, void *data)
|
||||
{
|
||||
struct platform_device *pdev = to_platform_device(dev);
|
||||
struct omap_iommu *obj = platform_get_drvdata(pdev);
|
||||
struct omap_iommu_arch_data *arch_data;
|
||||
struct dentry *d, *parent;
|
||||
|
||||
if (!obj || !obj->dev)
|
||||
return -EINVAL;
|
||||
|
||||
arch_data = kzalloc(sizeof(*arch_data), GFP_KERNEL);
|
||||
if (!arch_data)
|
||||
return -ENOMEM;
|
||||
|
||||
arch_data->iommu_dev = obj;
|
||||
|
||||
dev->archdata.iommu = arch_data;
|
||||
|
||||
d = debugfs_create_dir(obj->name, iommu_debug_root);
|
||||
if (!d)
|
||||
goto nomem;
|
||||
parent = d;
|
||||
|
||||
d = debugfs_create_u8("nr_tlb_entries", 400, parent,
|
||||
(u8 *)&obj->nr_tlb_entries);
|
||||
if (!d)
|
||||
goto nomem;
|
||||
|
||||
DEBUG_ADD_FILE_RO(ver);
|
||||
DEBUG_ADD_FILE_RO(regs);
|
||||
DEBUG_ADD_FILE_RO(tlb);
|
||||
DEBUG_ADD_FILE(pagetable);
|
||||
|
||||
return 0;
|
||||
|
||||
nomem:
|
||||
kfree(arch_data);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
static int iommu_debug_unregister(struct device *dev, void *data)
|
||||
{
|
||||
if (!dev->archdata.iommu)
|
||||
return 0;
|
||||
|
||||
kfree(dev->archdata.iommu);
|
||||
|
||||
dev->archdata.iommu = NULL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __init iommu_debug_init(void)
|
||||
{
|
||||
struct dentry *d;
|
||||
int err;
|
||||
|
||||
d = debugfs_create_dir("iommu", NULL);
|
||||
if (!d)
|
||||
return -ENOMEM;
|
||||
iommu_debug_root = d;
|
||||
|
||||
err = omap_foreach_iommu_device(d, iommu_debug_register);
|
||||
if (err)
|
||||
goto err_out;
|
||||
return 0;
|
||||
|
||||
err_out:
|
||||
debugfs_remove_recursive(iommu_debug_root);
|
||||
return err;
|
||||
}
|
||||
module_init(iommu_debug_init)
|
||||
|
||||
static void __exit iommu_debugfs_exit(void)
|
||||
{
|
||||
debugfs_remove_recursive(iommu_debug_root);
|
||||
omap_foreach_iommu_device(NULL, iommu_debug_unregister);
|
||||
}
|
||||
module_exit(iommu_debugfs_exit)
|
||||
|
||||
MODULE_DESCRIPTION("omap iommu: debugfs interface");
|
||||
MODULE_AUTHOR("Hiroshi DOYU <Hiroshi.DOYU@nokia.com>");
|
||||
MODULE_LICENSE("GPL v2");
|
1327
drivers/iommu/omap-iommu.c
Normal file
1327
drivers/iommu/omap-iommu.c
Normal file
File diff suppressed because it is too large
Load diff
227
drivers/iommu/omap-iommu.h
Normal file
227
drivers/iommu/omap-iommu.h
Normal file
|
@ -0,0 +1,227 @@
|
|||
/*
|
||||
* omap iommu: main structures
|
||||
*
|
||||
* Copyright (C) 2008-2009 Nokia Corporation
|
||||
*
|
||||
* Written by Hiroshi DOYU <Hiroshi.DOYU@nokia.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#if defined(CONFIG_ARCH_OMAP1)
|
||||
#error "iommu for this processor not implemented yet"
|
||||
#endif
|
||||
|
||||
struct iotlb_entry {
|
||||
u32 da;
|
||||
u32 pa;
|
||||
u32 pgsz, prsvd, valid;
|
||||
union {
|
||||
u16 ap;
|
||||
struct {
|
||||
u32 endian, elsz, mixed;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
struct omap_iommu {
|
||||
const char *name;
|
||||
void __iomem *regbase;
|
||||
struct device *dev;
|
||||
void *isr_priv;
|
||||
struct iommu_domain *domain;
|
||||
|
||||
unsigned int refcount;
|
||||
spinlock_t iommu_lock; /* global for this whole object */
|
||||
|
||||
/*
|
||||
* We don't change iopgd for a situation like pgd for a task,
|
||||
* but share it globally for each iommu.
|
||||
*/
|
||||
u32 *iopgd;
|
||||
spinlock_t page_table_lock; /* protect iopgd */
|
||||
|
||||
int nr_tlb_entries;
|
||||
|
||||
void *ctx; /* iommu context: registres saved area */
|
||||
|
||||
int has_bus_err_back;
|
||||
};
|
||||
|
||||
struct cr_regs {
|
||||
union {
|
||||
struct {
|
||||
u16 cam_l;
|
||||
u16 cam_h;
|
||||
};
|
||||
u32 cam;
|
||||
};
|
||||
union {
|
||||
struct {
|
||||
u16 ram_l;
|
||||
u16 ram_h;
|
||||
};
|
||||
u32 ram;
|
||||
};
|
||||
};
|
||||
|
||||
/* architecture specific functions */
|
||||
struct iommu_functions {
|
||||
unsigned long version;
|
||||
|
||||
int (*enable)(struct omap_iommu *obj);
|
||||
void (*disable)(struct omap_iommu *obj);
|
||||
void (*set_twl)(struct omap_iommu *obj, bool on);
|
||||
u32 (*fault_isr)(struct omap_iommu *obj, u32 *ra);
|
||||
|
||||
void (*tlb_read_cr)(struct omap_iommu *obj, struct cr_regs *cr);
|
||||
void (*tlb_load_cr)(struct omap_iommu *obj, struct cr_regs *cr);
|
||||
|
||||
struct cr_regs *(*alloc_cr)(struct omap_iommu *obj,
|
||||
struct iotlb_entry *e);
|
||||
int (*cr_valid)(struct cr_regs *cr);
|
||||
u32 (*cr_to_virt)(struct cr_regs *cr);
|
||||
void (*cr_to_e)(struct cr_regs *cr, struct iotlb_entry *e);
|
||||
ssize_t (*dump_cr)(struct omap_iommu *obj, struct cr_regs *cr,
|
||||
char *buf);
|
||||
|
||||
u32 (*get_pte_attr)(struct iotlb_entry *e);
|
||||
|
||||
void (*save_ctx)(struct omap_iommu *obj);
|
||||
void (*restore_ctx)(struct omap_iommu *obj);
|
||||
ssize_t (*dump_ctx)(struct omap_iommu *obj, char *buf, ssize_t len);
|
||||
};
|
||||
|
||||
#ifdef CONFIG_IOMMU_API
|
||||
/**
|
||||
* dev_to_omap_iommu() - retrieves an omap iommu object from a user device
|
||||
* @dev: iommu client device
|
||||
*/
|
||||
static inline struct omap_iommu *dev_to_omap_iommu(struct device *dev)
|
||||
{
|
||||
struct omap_iommu_arch_data *arch_data = dev->archdata.iommu;
|
||||
|
||||
return arch_data->iommu_dev;
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* MMU Register offsets
|
||||
*/
|
||||
#define MMU_REVISION 0x00
|
||||
#define MMU_IRQSTATUS 0x18
|
||||
#define MMU_IRQENABLE 0x1c
|
||||
#define MMU_WALKING_ST 0x40
|
||||
#define MMU_CNTL 0x44
|
||||
#define MMU_FAULT_AD 0x48
|
||||
#define MMU_TTB 0x4c
|
||||
#define MMU_LOCK 0x50
|
||||
#define MMU_LD_TLB 0x54
|
||||
#define MMU_CAM 0x58
|
||||
#define MMU_RAM 0x5c
|
||||
#define MMU_GFLUSH 0x60
|
||||
#define MMU_FLUSH_ENTRY 0x64
|
||||
#define MMU_READ_CAM 0x68
|
||||
#define MMU_READ_RAM 0x6c
|
||||
#define MMU_EMU_FAULT_AD 0x70
|
||||
#define MMU_GP_REG 0x88
|
||||
|
||||
#define MMU_REG_SIZE 256
|
||||
|
||||
/*
|
||||
* MMU Register bit definitions
|
||||
*/
|
||||
#define MMU_CAM_VATAG_SHIFT 12
|
||||
#define MMU_CAM_VATAG_MASK \
|
||||
((~0UL >> MMU_CAM_VATAG_SHIFT) << MMU_CAM_VATAG_SHIFT)
|
||||
#define MMU_CAM_P (1 << 3)
|
||||
#define MMU_CAM_V (1 << 2)
|
||||
#define MMU_CAM_PGSZ_MASK 3
|
||||
#define MMU_CAM_PGSZ_1M (0 << 0)
|
||||
#define MMU_CAM_PGSZ_64K (1 << 0)
|
||||
#define MMU_CAM_PGSZ_4K (2 << 0)
|
||||
#define MMU_CAM_PGSZ_16M (3 << 0)
|
||||
|
||||
#define MMU_RAM_PADDR_SHIFT 12
|
||||
#define MMU_RAM_PADDR_MASK \
|
||||
((~0UL >> MMU_RAM_PADDR_SHIFT) << MMU_RAM_PADDR_SHIFT)
|
||||
|
||||
#define MMU_RAM_ENDIAN_SHIFT 9
|
||||
#define MMU_RAM_ENDIAN_MASK (1 << MMU_RAM_ENDIAN_SHIFT)
|
||||
#define MMU_RAM_ENDIAN_LITTLE (0 << MMU_RAM_ENDIAN_SHIFT)
|
||||
#define MMU_RAM_ENDIAN_BIG (1 << MMU_RAM_ENDIAN_SHIFT)
|
||||
|
||||
#define MMU_RAM_ELSZ_SHIFT 7
|
||||
#define MMU_RAM_ELSZ_MASK (3 << MMU_RAM_ELSZ_SHIFT)
|
||||
#define MMU_RAM_ELSZ_8 (0 << MMU_RAM_ELSZ_SHIFT)
|
||||
#define MMU_RAM_ELSZ_16 (1 << MMU_RAM_ELSZ_SHIFT)
|
||||
#define MMU_RAM_ELSZ_32 (2 << MMU_RAM_ELSZ_SHIFT)
|
||||
#define MMU_RAM_ELSZ_NONE (3 << MMU_RAM_ELSZ_SHIFT)
|
||||
#define MMU_RAM_MIXED_SHIFT 6
|
||||
#define MMU_RAM_MIXED_MASK (1 << MMU_RAM_MIXED_SHIFT)
|
||||
#define MMU_RAM_MIXED MMU_RAM_MIXED_MASK
|
||||
|
||||
#define MMU_GP_REG_BUS_ERR_BACK_EN 0x1
|
||||
|
||||
/*
|
||||
* utilities for super page(16MB, 1MB, 64KB and 4KB)
|
||||
*/
|
||||
|
||||
#define iopgsz_max(bytes) \
|
||||
(((bytes) >= SZ_16M) ? SZ_16M : \
|
||||
((bytes) >= SZ_1M) ? SZ_1M : \
|
||||
((bytes) >= SZ_64K) ? SZ_64K : \
|
||||
((bytes) >= SZ_4K) ? SZ_4K : 0)
|
||||
|
||||
#define bytes_to_iopgsz(bytes) \
|
||||
(((bytes) == SZ_16M) ? MMU_CAM_PGSZ_16M : \
|
||||
((bytes) == SZ_1M) ? MMU_CAM_PGSZ_1M : \
|
||||
((bytes) == SZ_64K) ? MMU_CAM_PGSZ_64K : \
|
||||
((bytes) == SZ_4K) ? MMU_CAM_PGSZ_4K : -1)
|
||||
|
||||
#define iopgsz_to_bytes(iopgsz) \
|
||||
(((iopgsz) == MMU_CAM_PGSZ_16M) ? SZ_16M : \
|
||||
((iopgsz) == MMU_CAM_PGSZ_1M) ? SZ_1M : \
|
||||
((iopgsz) == MMU_CAM_PGSZ_64K) ? SZ_64K : \
|
||||
((iopgsz) == MMU_CAM_PGSZ_4K) ? SZ_4K : 0)
|
||||
|
||||
#define iopgsz_ok(bytes) (bytes_to_iopgsz(bytes) >= 0)
|
||||
|
||||
/*
|
||||
* global functions
|
||||
*/
|
||||
extern u32 omap_iommu_arch_version(void);
|
||||
|
||||
extern void omap_iotlb_cr_to_e(struct cr_regs *cr, struct iotlb_entry *e);
|
||||
|
||||
extern int
|
||||
omap_iopgtable_store_entry(struct omap_iommu *obj, struct iotlb_entry *e);
|
||||
|
||||
extern void omap_iommu_save_ctx(struct device *dev);
|
||||
extern void omap_iommu_restore_ctx(struct device *dev);
|
||||
|
||||
extern int omap_foreach_iommu_device(void *data,
|
||||
int (*fn)(struct device *, void *));
|
||||
|
||||
extern int omap_install_iommu_arch(const struct iommu_functions *ops);
|
||||
extern void omap_uninstall_iommu_arch(const struct iommu_functions *ops);
|
||||
|
||||
extern ssize_t
|
||||
omap_iommu_dump_ctx(struct omap_iommu *obj, char *buf, ssize_t len);
|
||||
extern size_t
|
||||
omap_dump_tlb_entries(struct omap_iommu *obj, char *buf, ssize_t len);
|
||||
|
||||
/*
|
||||
* register accessors
|
||||
*/
|
||||
static inline u32 iommu_read_reg(struct omap_iommu *obj, size_t offs)
|
||||
{
|
||||
return __raw_readl(obj->regbase + offs);
|
||||
}
|
||||
|
||||
static inline void iommu_write_reg(struct omap_iommu *obj, u32 val, size_t offs)
|
||||
{
|
||||
__raw_writel(val, obj->regbase + offs);
|
||||
}
|
337
drivers/iommu/omap-iommu2.c
Normal file
337
drivers/iommu/omap-iommu2.c
Normal file
|
@ -0,0 +1,337 @@
|
|||
/*
|
||||
* omap iommu: omap2/3 architecture specific functions
|
||||
*
|
||||
* Copyright (C) 2008-2009 Nokia Corporation
|
||||
*
|
||||
* Written by Hiroshi DOYU <Hiroshi.DOYU@nokia.com>,
|
||||
* Paul Mundt and Toshihiro Kobayashi
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#include <linux/err.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/jiffies.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/omap-iommu.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/stringify.h>
|
||||
#include <linux/platform_data/iommu-omap.h>
|
||||
|
||||
#include "omap-iommu.h"
|
||||
|
||||
/*
|
||||
* omap2 architecture specific register bit definitions
|
||||
*/
|
||||
#define IOMMU_ARCH_VERSION 0x00000011
|
||||
|
||||
/* IRQSTATUS & IRQENABLE */
|
||||
#define MMU_IRQ_MULTIHITFAULT (1 << 4)
|
||||
#define MMU_IRQ_TABLEWALKFAULT (1 << 3)
|
||||
#define MMU_IRQ_EMUMISS (1 << 2)
|
||||
#define MMU_IRQ_TRANSLATIONFAULT (1 << 1)
|
||||
#define MMU_IRQ_TLBMISS (1 << 0)
|
||||
|
||||
#define __MMU_IRQ_FAULT \
|
||||
(MMU_IRQ_MULTIHITFAULT | MMU_IRQ_EMUMISS | MMU_IRQ_TRANSLATIONFAULT)
|
||||
#define MMU_IRQ_MASK \
|
||||
(__MMU_IRQ_FAULT | MMU_IRQ_TABLEWALKFAULT | MMU_IRQ_TLBMISS)
|
||||
#define MMU_IRQ_TWL_MASK (__MMU_IRQ_FAULT | MMU_IRQ_TABLEWALKFAULT)
|
||||
#define MMU_IRQ_TLB_MISS_MASK (__MMU_IRQ_FAULT | MMU_IRQ_TLBMISS)
|
||||
|
||||
/* MMU_CNTL */
|
||||
#define MMU_CNTL_SHIFT 1
|
||||
#define MMU_CNTL_MASK (7 << MMU_CNTL_SHIFT)
|
||||
#define MMU_CNTL_EML_TLB (1 << 3)
|
||||
#define MMU_CNTL_TWL_EN (1 << 2)
|
||||
#define MMU_CNTL_MMU_EN (1 << 1)
|
||||
|
||||
#define get_cam_va_mask(pgsz) \
|
||||
(((pgsz) == MMU_CAM_PGSZ_16M) ? 0xff000000 : \
|
||||
((pgsz) == MMU_CAM_PGSZ_1M) ? 0xfff00000 : \
|
||||
((pgsz) == MMU_CAM_PGSZ_64K) ? 0xffff0000 : \
|
||||
((pgsz) == MMU_CAM_PGSZ_4K) ? 0xfffff000 : 0)
|
||||
|
||||
/* IOMMU errors */
|
||||
#define OMAP_IOMMU_ERR_TLB_MISS (1 << 0)
|
||||
#define OMAP_IOMMU_ERR_TRANS_FAULT (1 << 1)
|
||||
#define OMAP_IOMMU_ERR_EMU_MISS (1 << 2)
|
||||
#define OMAP_IOMMU_ERR_TBLWALK_FAULT (1 << 3)
|
||||
#define OMAP_IOMMU_ERR_MULTIHIT_FAULT (1 << 4)
|
||||
|
||||
static void __iommu_set_twl(struct omap_iommu *obj, bool on)
|
||||
{
|
||||
u32 l = iommu_read_reg(obj, MMU_CNTL);
|
||||
|
||||
if (on)
|
||||
iommu_write_reg(obj, MMU_IRQ_TWL_MASK, MMU_IRQENABLE);
|
||||
else
|
||||
iommu_write_reg(obj, MMU_IRQ_TLB_MISS_MASK, MMU_IRQENABLE);
|
||||
|
||||
l &= ~MMU_CNTL_MASK;
|
||||
if (on)
|
||||
l |= (MMU_CNTL_MMU_EN | MMU_CNTL_TWL_EN);
|
||||
else
|
||||
l |= (MMU_CNTL_MMU_EN);
|
||||
|
||||
iommu_write_reg(obj, l, MMU_CNTL);
|
||||
}
|
||||
|
||||
|
||||
static int omap2_iommu_enable(struct omap_iommu *obj)
|
||||
{
|
||||
u32 l, pa;
|
||||
|
||||
if (!obj->iopgd || !IS_ALIGNED((u32)obj->iopgd, SZ_16K))
|
||||
return -EINVAL;
|
||||
|
||||
pa = virt_to_phys(obj->iopgd);
|
||||
if (!IS_ALIGNED(pa, SZ_16K))
|
||||
return -EINVAL;
|
||||
|
||||
l = iommu_read_reg(obj, MMU_REVISION);
|
||||
dev_info(obj->dev, "%s: version %d.%d\n", obj->name,
|
||||
(l >> 4) & 0xf, l & 0xf);
|
||||
|
||||
iommu_write_reg(obj, pa, MMU_TTB);
|
||||
|
||||
if (obj->has_bus_err_back)
|
||||
iommu_write_reg(obj, MMU_GP_REG_BUS_ERR_BACK_EN, MMU_GP_REG);
|
||||
|
||||
__iommu_set_twl(obj, true);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void omap2_iommu_disable(struct omap_iommu *obj)
|
||||
{
|
||||
u32 l = iommu_read_reg(obj, MMU_CNTL);
|
||||
|
||||
l &= ~MMU_CNTL_MASK;
|
||||
iommu_write_reg(obj, l, MMU_CNTL);
|
||||
|
||||
dev_dbg(obj->dev, "%s is shutting down\n", obj->name);
|
||||
}
|
||||
|
||||
static void omap2_iommu_set_twl(struct omap_iommu *obj, bool on)
|
||||
{
|
||||
__iommu_set_twl(obj, false);
|
||||
}
|
||||
|
||||
static u32 omap2_iommu_fault_isr(struct omap_iommu *obj, u32 *ra)
|
||||
{
|
||||
u32 stat, da;
|
||||
u32 errs = 0;
|
||||
|
||||
stat = iommu_read_reg(obj, MMU_IRQSTATUS);
|
||||
stat &= MMU_IRQ_MASK;
|
||||
if (!stat) {
|
||||
*ra = 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
da = iommu_read_reg(obj, MMU_FAULT_AD);
|
||||
*ra = da;
|
||||
|
||||
if (stat & MMU_IRQ_TLBMISS)
|
||||
errs |= OMAP_IOMMU_ERR_TLB_MISS;
|
||||
if (stat & MMU_IRQ_TRANSLATIONFAULT)
|
||||
errs |= OMAP_IOMMU_ERR_TRANS_FAULT;
|
||||
if (stat & MMU_IRQ_EMUMISS)
|
||||
errs |= OMAP_IOMMU_ERR_EMU_MISS;
|
||||
if (stat & MMU_IRQ_TABLEWALKFAULT)
|
||||
errs |= OMAP_IOMMU_ERR_TBLWALK_FAULT;
|
||||
if (stat & MMU_IRQ_MULTIHITFAULT)
|
||||
errs |= OMAP_IOMMU_ERR_MULTIHIT_FAULT;
|
||||
iommu_write_reg(obj, stat, MMU_IRQSTATUS);
|
||||
|
||||
return errs;
|
||||
}
|
||||
|
||||
static void omap2_tlb_read_cr(struct omap_iommu *obj, struct cr_regs *cr)
|
||||
{
|
||||
cr->cam = iommu_read_reg(obj, MMU_READ_CAM);
|
||||
cr->ram = iommu_read_reg(obj, MMU_READ_RAM);
|
||||
}
|
||||
|
||||
static void omap2_tlb_load_cr(struct omap_iommu *obj, struct cr_regs *cr)
|
||||
{
|
||||
iommu_write_reg(obj, cr->cam | MMU_CAM_V, MMU_CAM);
|
||||
iommu_write_reg(obj, cr->ram, MMU_RAM);
|
||||
}
|
||||
|
||||
static u32 omap2_cr_to_virt(struct cr_regs *cr)
|
||||
{
|
||||
u32 page_size = cr->cam & MMU_CAM_PGSZ_MASK;
|
||||
u32 mask = get_cam_va_mask(cr->cam & page_size);
|
||||
|
||||
return cr->cam & mask;
|
||||
}
|
||||
|
||||
static struct cr_regs *omap2_alloc_cr(struct omap_iommu *obj,
|
||||
struct iotlb_entry *e)
|
||||
{
|
||||
struct cr_regs *cr;
|
||||
|
||||
if (e->da & ~(get_cam_va_mask(e->pgsz))) {
|
||||
dev_err(obj->dev, "%s:\twrong alignment: %08x\n", __func__,
|
||||
e->da);
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
cr = kmalloc(sizeof(*cr), GFP_KERNEL);
|
||||
if (!cr)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
cr->cam = (e->da & MMU_CAM_VATAG_MASK) | e->prsvd | e->pgsz | e->valid;
|
||||
cr->ram = e->pa | e->endian | e->elsz | e->mixed;
|
||||
|
||||
return cr;
|
||||
}
|
||||
|
||||
static inline int omap2_cr_valid(struct cr_regs *cr)
|
||||
{
|
||||
return cr->cam & MMU_CAM_V;
|
||||
}
|
||||
|
||||
static u32 omap2_get_pte_attr(struct iotlb_entry *e)
|
||||
{
|
||||
u32 attr;
|
||||
|
||||
attr = e->mixed << 5;
|
||||
attr |= e->endian;
|
||||
attr |= e->elsz >> 3;
|
||||
attr <<= (((e->pgsz == MMU_CAM_PGSZ_4K) ||
|
||||
(e->pgsz == MMU_CAM_PGSZ_64K)) ? 0 : 6);
|
||||
return attr;
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
omap2_dump_cr(struct omap_iommu *obj, struct cr_regs *cr, char *buf)
|
||||
{
|
||||
char *p = buf;
|
||||
|
||||
/* FIXME: Need more detail analysis of cam/ram */
|
||||
p += sprintf(p, "%08x %08x %01x\n", cr->cam, cr->ram,
|
||||
(cr->cam & MMU_CAM_P) ? 1 : 0);
|
||||
|
||||
return p - buf;
|
||||
}
|
||||
|
||||
#define pr_reg(name) \
|
||||
do { \
|
||||
ssize_t bytes; \
|
||||
const char *str = "%20s: %08x\n"; \
|
||||
const int maxcol = 32; \
|
||||
bytes = snprintf(p, maxcol, str, __stringify(name), \
|
||||
iommu_read_reg(obj, MMU_##name)); \
|
||||
p += bytes; \
|
||||
len -= bytes; \
|
||||
if (len < maxcol) \
|
||||
goto out; \
|
||||
} while (0)
|
||||
|
||||
static ssize_t
|
||||
omap2_iommu_dump_ctx(struct omap_iommu *obj, char *buf, ssize_t len)
|
||||
{
|
||||
char *p = buf;
|
||||
|
||||
pr_reg(REVISION);
|
||||
pr_reg(IRQSTATUS);
|
||||
pr_reg(IRQENABLE);
|
||||
pr_reg(WALKING_ST);
|
||||
pr_reg(CNTL);
|
||||
pr_reg(FAULT_AD);
|
||||
pr_reg(TTB);
|
||||
pr_reg(LOCK);
|
||||
pr_reg(LD_TLB);
|
||||
pr_reg(CAM);
|
||||
pr_reg(RAM);
|
||||
pr_reg(GFLUSH);
|
||||
pr_reg(FLUSH_ENTRY);
|
||||
pr_reg(READ_CAM);
|
||||
pr_reg(READ_RAM);
|
||||
pr_reg(EMU_FAULT_AD);
|
||||
out:
|
||||
return p - buf;
|
||||
}
|
||||
|
||||
static void omap2_iommu_save_ctx(struct omap_iommu *obj)
|
||||
{
|
||||
int i;
|
||||
u32 *p = obj->ctx;
|
||||
|
||||
for (i = 0; i < (MMU_REG_SIZE / sizeof(u32)); i++) {
|
||||
p[i] = iommu_read_reg(obj, i * sizeof(u32));
|
||||
dev_dbg(obj->dev, "%s\t[%02d] %08x\n", __func__, i, p[i]);
|
||||
}
|
||||
|
||||
BUG_ON(p[0] != IOMMU_ARCH_VERSION);
|
||||
}
|
||||
|
||||
static void omap2_iommu_restore_ctx(struct omap_iommu *obj)
|
||||
{
|
||||
int i;
|
||||
u32 *p = obj->ctx;
|
||||
|
||||
for (i = 0; i < (MMU_REG_SIZE / sizeof(u32)); i++) {
|
||||
iommu_write_reg(obj, p[i], i * sizeof(u32));
|
||||
dev_dbg(obj->dev, "%s\t[%02d] %08x\n", __func__, i, p[i]);
|
||||
}
|
||||
|
||||
BUG_ON(p[0] != IOMMU_ARCH_VERSION);
|
||||
}
|
||||
|
||||
static void omap2_cr_to_e(struct cr_regs *cr, struct iotlb_entry *e)
|
||||
{
|
||||
e->da = cr->cam & MMU_CAM_VATAG_MASK;
|
||||
e->pa = cr->ram & MMU_RAM_PADDR_MASK;
|
||||
e->valid = cr->cam & MMU_CAM_V;
|
||||
e->pgsz = cr->cam & MMU_CAM_PGSZ_MASK;
|
||||
e->endian = cr->ram & MMU_RAM_ENDIAN_MASK;
|
||||
e->elsz = cr->ram & MMU_RAM_ELSZ_MASK;
|
||||
e->mixed = cr->ram & MMU_RAM_MIXED;
|
||||
}
|
||||
|
||||
static const struct iommu_functions omap2_iommu_ops = {
|
||||
.version = IOMMU_ARCH_VERSION,
|
||||
|
||||
.enable = omap2_iommu_enable,
|
||||
.disable = omap2_iommu_disable,
|
||||
.set_twl = omap2_iommu_set_twl,
|
||||
.fault_isr = omap2_iommu_fault_isr,
|
||||
|
||||
.tlb_read_cr = omap2_tlb_read_cr,
|
||||
.tlb_load_cr = omap2_tlb_load_cr,
|
||||
|
||||
.cr_to_e = omap2_cr_to_e,
|
||||
.cr_to_virt = omap2_cr_to_virt,
|
||||
.alloc_cr = omap2_alloc_cr,
|
||||
.cr_valid = omap2_cr_valid,
|
||||
.dump_cr = omap2_dump_cr,
|
||||
|
||||
.get_pte_attr = omap2_get_pte_attr,
|
||||
|
||||
.save_ctx = omap2_iommu_save_ctx,
|
||||
.restore_ctx = omap2_iommu_restore_ctx,
|
||||
.dump_ctx = omap2_iommu_dump_ctx,
|
||||
};
|
||||
|
||||
static int __init omap2_iommu_init(void)
|
||||
{
|
||||
return omap_install_iommu_arch(&omap2_iommu_ops);
|
||||
}
|
||||
module_init(omap2_iommu_init);
|
||||
|
||||
static void __exit omap2_iommu_exit(void)
|
||||
{
|
||||
omap_uninstall_iommu_arch(&omap2_iommu_ops);
|
||||
}
|
||||
module_exit(omap2_iommu_exit);
|
||||
|
||||
MODULE_AUTHOR("Hiroshi DOYU, Paul Mundt and Toshihiro Kobayashi");
|
||||
MODULE_DESCRIPTION("omap iommu: omap2/3 architecture specific functions");
|
||||
MODULE_LICENSE("GPL v2");
|
95
drivers/iommu/omap-iopgtable.h
Normal file
95
drivers/iommu/omap-iopgtable.h
Normal file
|
@ -0,0 +1,95 @@
|
|||
/*
|
||||
* omap iommu: pagetable definitions
|
||||
*
|
||||
* Copyright (C) 2008-2010 Nokia Corporation
|
||||
*
|
||||
* Written by Hiroshi DOYU <Hiroshi.DOYU@nokia.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
/*
|
||||
* "L2 table" address mask and size definitions.
|
||||
*/
|
||||
#define IOPGD_SHIFT 20
|
||||
#define IOPGD_SIZE (1UL << IOPGD_SHIFT)
|
||||
#define IOPGD_MASK (~(IOPGD_SIZE - 1))
|
||||
|
||||
/*
|
||||
* "section" address mask and size definitions.
|
||||
*/
|
||||
#define IOSECTION_SHIFT 20
|
||||
#define IOSECTION_SIZE (1UL << IOSECTION_SHIFT)
|
||||
#define IOSECTION_MASK (~(IOSECTION_SIZE - 1))
|
||||
|
||||
/*
|
||||
* "supersection" address mask and size definitions.
|
||||
*/
|
||||
#define IOSUPER_SHIFT 24
|
||||
#define IOSUPER_SIZE (1UL << IOSUPER_SHIFT)
|
||||
#define IOSUPER_MASK (~(IOSUPER_SIZE - 1))
|
||||
|
||||
#define PTRS_PER_IOPGD (1UL << (32 - IOPGD_SHIFT))
|
||||
#define IOPGD_TABLE_SIZE (PTRS_PER_IOPGD * sizeof(u32))
|
||||
|
||||
/*
|
||||
* "small page" address mask and size definitions.
|
||||
*/
|
||||
#define IOPTE_SHIFT 12
|
||||
#define IOPTE_SIZE (1UL << IOPTE_SHIFT)
|
||||
#define IOPTE_MASK (~(IOPTE_SIZE - 1))
|
||||
|
||||
/*
|
||||
* "large page" address mask and size definitions.
|
||||
*/
|
||||
#define IOLARGE_SHIFT 16
|
||||
#define IOLARGE_SIZE (1UL << IOLARGE_SHIFT)
|
||||
#define IOLARGE_MASK (~(IOLARGE_SIZE - 1))
|
||||
|
||||
#define PTRS_PER_IOPTE (1UL << (IOPGD_SHIFT - IOPTE_SHIFT))
|
||||
#define IOPTE_TABLE_SIZE (PTRS_PER_IOPTE * sizeof(u32))
|
||||
|
||||
#define IOPAGE_MASK IOPTE_MASK
|
||||
|
||||
/**
|
||||
* omap_iommu_translate() - va to pa translation
|
||||
* @d: omap iommu descriptor
|
||||
* @va: virtual address
|
||||
* @mask: omap iommu descriptor mask
|
||||
*
|
||||
* va to pa translation
|
||||
*/
|
||||
static inline phys_addr_t omap_iommu_translate(u32 d, u32 va, u32 mask)
|
||||
{
|
||||
return (d & mask) | (va & (~mask));
|
||||
}
|
||||
|
||||
/*
|
||||
* some descriptor attributes.
|
||||
*/
|
||||
#define IOPGD_TABLE (1 << 0)
|
||||
#define IOPGD_SECTION (2 << 0)
|
||||
#define IOPGD_SUPER (1 << 18 | 2 << 0)
|
||||
|
||||
#define iopgd_is_table(x) (((x) & 3) == IOPGD_TABLE)
|
||||
#define iopgd_is_section(x) (((x) & (1 << 18 | 3)) == IOPGD_SECTION)
|
||||
#define iopgd_is_super(x) (((x) & (1 << 18 | 3)) == IOPGD_SUPER)
|
||||
|
||||
#define IOPTE_SMALL (2 << 0)
|
||||
#define IOPTE_LARGE (1 << 0)
|
||||
|
||||
#define iopte_is_small(x) (((x) & 2) == IOPTE_SMALL)
|
||||
#define iopte_is_large(x) (((x) & 3) == IOPTE_LARGE)
|
||||
|
||||
/* to find an entry in a page-table-directory */
|
||||
#define iopgd_index(da) (((da) >> IOPGD_SHIFT) & (PTRS_PER_IOPGD - 1))
|
||||
#define iopgd_offset(obj, da) ((obj)->iopgd + iopgd_index(da))
|
||||
|
||||
#define iopgd_page_paddr(iopgd) (*iopgd & ~((1 << 10) - 1))
|
||||
#define iopgd_page_vaddr(iopgd) ((u32 *)phys_to_virt(iopgd_page_paddr(iopgd)))
|
||||
|
||||
/* to find an entry in the second-level page table. */
|
||||
#define iopte_index(da) (((da) >> IOPTE_SHIFT) & (PTRS_PER_IOPTE - 1))
|
||||
#define iopte_offset(iopgd, da) (iopgd_page_vaddr(iopgd) + iopte_index(da))
|
394
drivers/iommu/shmobile-iommu.c
Normal file
394
drivers/iommu/shmobile-iommu.c
Normal file
|
@ -0,0 +1,394 @@
|
|||
/*
|
||||
* IOMMU for IPMMU/IPMMUI
|
||||
* Copyright (C) 2012 Hideki EIRAKU
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; version 2 of the License.
|
||||
*/
|
||||
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/iommu.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/sizes.h>
|
||||
#include <linux/slab.h>
|
||||
#include <asm/dma-iommu.h>
|
||||
#include "shmobile-ipmmu.h"
|
||||
|
||||
#define L1_SIZE CONFIG_SHMOBILE_IOMMU_L1SIZE
|
||||
#define L1_LEN (L1_SIZE / 4)
|
||||
#define L1_ALIGN L1_SIZE
|
||||
#define L2_SIZE SZ_1K
|
||||
#define L2_LEN (L2_SIZE / 4)
|
||||
#define L2_ALIGN L2_SIZE
|
||||
|
||||
struct shmobile_iommu_domain_pgtable {
|
||||
uint32_t *pgtable;
|
||||
dma_addr_t handle;
|
||||
};
|
||||
|
||||
struct shmobile_iommu_archdata {
|
||||
struct list_head attached_list;
|
||||
struct dma_iommu_mapping *iommu_mapping;
|
||||
spinlock_t attach_lock;
|
||||
struct shmobile_iommu_domain *attached;
|
||||
int num_attached_devices;
|
||||
struct shmobile_ipmmu *ipmmu;
|
||||
};
|
||||
|
||||
struct shmobile_iommu_domain {
|
||||
struct shmobile_iommu_domain_pgtable l1, l2[L1_LEN];
|
||||
spinlock_t map_lock;
|
||||
spinlock_t attached_list_lock;
|
||||
struct list_head attached_list;
|
||||
};
|
||||
|
||||
static struct shmobile_iommu_archdata *ipmmu_archdata;
|
||||
static struct kmem_cache *l1cache, *l2cache;
|
||||
|
||||
static int pgtable_alloc(struct shmobile_iommu_domain_pgtable *pgtable,
|
||||
struct kmem_cache *cache, size_t size)
|
||||
{
|
||||
pgtable->pgtable = kmem_cache_zalloc(cache, GFP_ATOMIC);
|
||||
if (!pgtable->pgtable)
|
||||
return -ENOMEM;
|
||||
pgtable->handle = dma_map_single(NULL, pgtable->pgtable, size,
|
||||
DMA_TO_DEVICE);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void pgtable_free(struct shmobile_iommu_domain_pgtable *pgtable,
|
||||
struct kmem_cache *cache, size_t size)
|
||||
{
|
||||
dma_unmap_single(NULL, pgtable->handle, size, DMA_TO_DEVICE);
|
||||
kmem_cache_free(cache, pgtable->pgtable);
|
||||
}
|
||||
|
||||
static uint32_t pgtable_read(struct shmobile_iommu_domain_pgtable *pgtable,
|
||||
unsigned int index)
|
||||
{
|
||||
return pgtable->pgtable[index];
|
||||
}
|
||||
|
||||
static void pgtable_write(struct shmobile_iommu_domain_pgtable *pgtable,
|
||||
unsigned int index, unsigned int count, uint32_t val)
|
||||
{
|
||||
unsigned int i;
|
||||
|
||||
for (i = 0; i < count; i++)
|
||||
pgtable->pgtable[index + i] = val;
|
||||
dma_sync_single_for_device(NULL, pgtable->handle + index * sizeof(val),
|
||||
sizeof(val) * count, DMA_TO_DEVICE);
|
||||
}
|
||||
|
||||
static int shmobile_iommu_domain_init(struct iommu_domain *domain)
|
||||
{
|
||||
struct shmobile_iommu_domain *sh_domain;
|
||||
int i, ret;
|
||||
|
||||
sh_domain = kmalloc(sizeof(*sh_domain), GFP_KERNEL);
|
||||
if (!sh_domain)
|
||||
return -ENOMEM;
|
||||
ret = pgtable_alloc(&sh_domain->l1, l1cache, L1_SIZE);
|
||||
if (ret < 0) {
|
||||
kfree(sh_domain);
|
||||
return ret;
|
||||
}
|
||||
for (i = 0; i < L1_LEN; i++)
|
||||
sh_domain->l2[i].pgtable = NULL;
|
||||
spin_lock_init(&sh_domain->map_lock);
|
||||
spin_lock_init(&sh_domain->attached_list_lock);
|
||||
INIT_LIST_HEAD(&sh_domain->attached_list);
|
||||
domain->priv = sh_domain;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void shmobile_iommu_domain_destroy(struct iommu_domain *domain)
|
||||
{
|
||||
struct shmobile_iommu_domain *sh_domain = domain->priv;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < L1_LEN; i++) {
|
||||
if (sh_domain->l2[i].pgtable)
|
||||
pgtable_free(&sh_domain->l2[i], l2cache, L2_SIZE);
|
||||
}
|
||||
pgtable_free(&sh_domain->l1, l1cache, L1_SIZE);
|
||||
kfree(sh_domain);
|
||||
domain->priv = NULL;
|
||||
}
|
||||
|
||||
static int shmobile_iommu_attach_device(struct iommu_domain *domain,
|
||||
struct device *dev)
|
||||
{
|
||||
struct shmobile_iommu_archdata *archdata = dev->archdata.iommu;
|
||||
struct shmobile_iommu_domain *sh_domain = domain->priv;
|
||||
int ret = -EBUSY;
|
||||
|
||||
if (!archdata)
|
||||
return -ENODEV;
|
||||
spin_lock(&sh_domain->attached_list_lock);
|
||||
spin_lock(&archdata->attach_lock);
|
||||
if (archdata->attached != sh_domain) {
|
||||
if (archdata->attached)
|
||||
goto err;
|
||||
ipmmu_tlb_set(archdata->ipmmu, sh_domain->l1.handle, L1_SIZE,
|
||||
0);
|
||||
ipmmu_tlb_flush(archdata->ipmmu);
|
||||
archdata->attached = sh_domain;
|
||||
archdata->num_attached_devices = 0;
|
||||
list_add(&archdata->attached_list, &sh_domain->attached_list);
|
||||
}
|
||||
archdata->num_attached_devices++;
|
||||
ret = 0;
|
||||
err:
|
||||
spin_unlock(&archdata->attach_lock);
|
||||
spin_unlock(&sh_domain->attached_list_lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void shmobile_iommu_detach_device(struct iommu_domain *domain,
|
||||
struct device *dev)
|
||||
{
|
||||
struct shmobile_iommu_archdata *archdata = dev->archdata.iommu;
|
||||
struct shmobile_iommu_domain *sh_domain = domain->priv;
|
||||
|
||||
if (!archdata)
|
||||
return;
|
||||
spin_lock(&sh_domain->attached_list_lock);
|
||||
spin_lock(&archdata->attach_lock);
|
||||
archdata->num_attached_devices--;
|
||||
if (!archdata->num_attached_devices) {
|
||||
ipmmu_tlb_set(archdata->ipmmu, 0, 0, 0);
|
||||
ipmmu_tlb_flush(archdata->ipmmu);
|
||||
archdata->attached = NULL;
|
||||
list_del(&archdata->attached_list);
|
||||
}
|
||||
spin_unlock(&archdata->attach_lock);
|
||||
spin_unlock(&sh_domain->attached_list_lock);
|
||||
}
|
||||
|
||||
static void domain_tlb_flush(struct shmobile_iommu_domain *sh_domain)
|
||||
{
|
||||
struct shmobile_iommu_archdata *archdata;
|
||||
|
||||
spin_lock(&sh_domain->attached_list_lock);
|
||||
list_for_each_entry(archdata, &sh_domain->attached_list, attached_list)
|
||||
ipmmu_tlb_flush(archdata->ipmmu);
|
||||
spin_unlock(&sh_domain->attached_list_lock);
|
||||
}
|
||||
|
||||
static int l2alloc(struct shmobile_iommu_domain *sh_domain,
|
||||
unsigned int l1index)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (!sh_domain->l2[l1index].pgtable) {
|
||||
ret = pgtable_alloc(&sh_domain->l2[l1index], l2cache, L2_SIZE);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
}
|
||||
pgtable_write(&sh_domain->l1, l1index, 1,
|
||||
sh_domain->l2[l1index].handle | 0x1);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void l2realfree(struct shmobile_iommu_domain_pgtable *l2)
|
||||
{
|
||||
if (l2->pgtable)
|
||||
pgtable_free(l2, l2cache, L2_SIZE);
|
||||
}
|
||||
|
||||
static void l2free(struct shmobile_iommu_domain *sh_domain,
|
||||
unsigned int l1index,
|
||||
struct shmobile_iommu_domain_pgtable *l2)
|
||||
{
|
||||
pgtable_write(&sh_domain->l1, l1index, 1, 0);
|
||||
if (sh_domain->l2[l1index].pgtable) {
|
||||
*l2 = sh_domain->l2[l1index];
|
||||
sh_domain->l2[l1index].pgtable = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
static int shmobile_iommu_map(struct iommu_domain *domain, unsigned long iova,
|
||||
phys_addr_t paddr, size_t size, int prot)
|
||||
{
|
||||
struct shmobile_iommu_domain_pgtable l2 = { .pgtable = NULL };
|
||||
struct shmobile_iommu_domain *sh_domain = domain->priv;
|
||||
unsigned int l1index, l2index;
|
||||
int ret;
|
||||
|
||||
l1index = iova >> 20;
|
||||
switch (size) {
|
||||
case SZ_4K:
|
||||
l2index = (iova >> 12) & 0xff;
|
||||
spin_lock(&sh_domain->map_lock);
|
||||
ret = l2alloc(sh_domain, l1index);
|
||||
if (!ret)
|
||||
pgtable_write(&sh_domain->l2[l1index], l2index, 1,
|
||||
paddr | 0xff2);
|
||||
spin_unlock(&sh_domain->map_lock);
|
||||
break;
|
||||
case SZ_64K:
|
||||
l2index = (iova >> 12) & 0xf0;
|
||||
spin_lock(&sh_domain->map_lock);
|
||||
ret = l2alloc(sh_domain, l1index);
|
||||
if (!ret)
|
||||
pgtable_write(&sh_domain->l2[l1index], l2index, 0x10,
|
||||
paddr | 0xff1);
|
||||
spin_unlock(&sh_domain->map_lock);
|
||||
break;
|
||||
case SZ_1M:
|
||||
spin_lock(&sh_domain->map_lock);
|
||||
l2free(sh_domain, l1index, &l2);
|
||||
pgtable_write(&sh_domain->l1, l1index, 1, paddr | 0xc02);
|
||||
spin_unlock(&sh_domain->map_lock);
|
||||
ret = 0;
|
||||
break;
|
||||
default:
|
||||
ret = -EINVAL;
|
||||
}
|
||||
if (!ret)
|
||||
domain_tlb_flush(sh_domain);
|
||||
l2realfree(&l2);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static size_t shmobile_iommu_unmap(struct iommu_domain *domain,
|
||||
unsigned long iova, size_t size)
|
||||
{
|
||||
struct shmobile_iommu_domain_pgtable l2 = { .pgtable = NULL };
|
||||
struct shmobile_iommu_domain *sh_domain = domain->priv;
|
||||
unsigned int l1index, l2index;
|
||||
uint32_t l2entry = 0;
|
||||
size_t ret = 0;
|
||||
|
||||
l1index = iova >> 20;
|
||||
if (!(iova & 0xfffff) && size >= SZ_1M) {
|
||||
spin_lock(&sh_domain->map_lock);
|
||||
l2free(sh_domain, l1index, &l2);
|
||||
spin_unlock(&sh_domain->map_lock);
|
||||
ret = SZ_1M;
|
||||
goto done;
|
||||
}
|
||||
l2index = (iova >> 12) & 0xff;
|
||||
spin_lock(&sh_domain->map_lock);
|
||||
if (sh_domain->l2[l1index].pgtable)
|
||||
l2entry = pgtable_read(&sh_domain->l2[l1index], l2index);
|
||||
switch (l2entry & 3) {
|
||||
case 1:
|
||||
if (l2index & 0xf)
|
||||
break;
|
||||
pgtable_write(&sh_domain->l2[l1index], l2index, 0x10, 0);
|
||||
ret = SZ_64K;
|
||||
break;
|
||||
case 2:
|
||||
pgtable_write(&sh_domain->l2[l1index], l2index, 1, 0);
|
||||
ret = SZ_4K;
|
||||
break;
|
||||
}
|
||||
spin_unlock(&sh_domain->map_lock);
|
||||
done:
|
||||
if (ret)
|
||||
domain_tlb_flush(sh_domain);
|
||||
l2realfree(&l2);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static phys_addr_t shmobile_iommu_iova_to_phys(struct iommu_domain *domain,
|
||||
dma_addr_t iova)
|
||||
{
|
||||
struct shmobile_iommu_domain *sh_domain = domain->priv;
|
||||
uint32_t l1entry = 0, l2entry = 0;
|
||||
unsigned int l1index, l2index;
|
||||
|
||||
l1index = iova >> 20;
|
||||
l2index = (iova >> 12) & 0xff;
|
||||
spin_lock(&sh_domain->map_lock);
|
||||
if (sh_domain->l2[l1index].pgtable)
|
||||
l2entry = pgtable_read(&sh_domain->l2[l1index], l2index);
|
||||
else
|
||||
l1entry = pgtable_read(&sh_domain->l1, l1index);
|
||||
spin_unlock(&sh_domain->map_lock);
|
||||
switch (l2entry & 3) {
|
||||
case 1:
|
||||
return (l2entry & ~0xffff) | (iova & 0xffff);
|
||||
case 2:
|
||||
return (l2entry & ~0xfff) | (iova & 0xfff);
|
||||
default:
|
||||
if ((l1entry & 3) == 2)
|
||||
return (l1entry & ~0xfffff) | (iova & 0xfffff);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
static int find_dev_name(struct shmobile_ipmmu *ipmmu, const char *dev_name)
|
||||
{
|
||||
unsigned int i, n = ipmmu->num_dev_names;
|
||||
|
||||
for (i = 0; i < n; i++) {
|
||||
if (strcmp(ipmmu->dev_names[i], dev_name) == 0)
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int shmobile_iommu_add_device(struct device *dev)
|
||||
{
|
||||
struct shmobile_iommu_archdata *archdata = ipmmu_archdata;
|
||||
struct dma_iommu_mapping *mapping;
|
||||
|
||||
if (!find_dev_name(archdata->ipmmu, dev_name(dev)))
|
||||
return 0;
|
||||
mapping = archdata->iommu_mapping;
|
||||
if (!mapping) {
|
||||
mapping = arm_iommu_create_mapping(&platform_bus_type, 0,
|
||||
L1_LEN << 20);
|
||||
if (IS_ERR(mapping))
|
||||
return PTR_ERR(mapping);
|
||||
archdata->iommu_mapping = mapping;
|
||||
}
|
||||
dev->archdata.iommu = archdata;
|
||||
if (arm_iommu_attach_device(dev, mapping))
|
||||
pr_err("arm_iommu_attach_device failed\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct iommu_ops shmobile_iommu_ops = {
|
||||
.domain_init = shmobile_iommu_domain_init,
|
||||
.domain_destroy = shmobile_iommu_domain_destroy,
|
||||
.attach_dev = shmobile_iommu_attach_device,
|
||||
.detach_dev = shmobile_iommu_detach_device,
|
||||
.map = shmobile_iommu_map,
|
||||
.unmap = shmobile_iommu_unmap,
|
||||
.iova_to_phys = shmobile_iommu_iova_to_phys,
|
||||
.add_device = shmobile_iommu_add_device,
|
||||
.pgsize_bitmap = SZ_1M | SZ_64K | SZ_4K,
|
||||
};
|
||||
|
||||
int ipmmu_iommu_init(struct shmobile_ipmmu *ipmmu)
|
||||
{
|
||||
static struct shmobile_iommu_archdata *archdata;
|
||||
|
||||
l1cache = kmem_cache_create("shmobile-iommu-pgtable1", L1_SIZE,
|
||||
L1_ALIGN, SLAB_HWCACHE_ALIGN, NULL);
|
||||
if (!l1cache)
|
||||
return -ENOMEM;
|
||||
l2cache = kmem_cache_create("shmobile-iommu-pgtable2", L2_SIZE,
|
||||
L2_ALIGN, SLAB_HWCACHE_ALIGN, NULL);
|
||||
if (!l2cache) {
|
||||
kmem_cache_destroy(l1cache);
|
||||
return -ENOMEM;
|
||||
}
|
||||
archdata = kzalloc(sizeof(*archdata), GFP_KERNEL);
|
||||
if (!archdata) {
|
||||
kmem_cache_destroy(l1cache);
|
||||
kmem_cache_destroy(l2cache);
|
||||
return -ENOMEM;
|
||||
}
|
||||
spin_lock_init(&archdata->attach_lock);
|
||||
archdata->ipmmu = ipmmu;
|
||||
ipmmu_archdata = archdata;
|
||||
bus_set_iommu(&platform_bus_type, &shmobile_iommu_ops);
|
||||
return 0;
|
||||
}
|
130
drivers/iommu/shmobile-ipmmu.c
Normal file
130
drivers/iommu/shmobile-ipmmu.c
Normal file
|
@ -0,0 +1,130 @@
|
|||
/*
|
||||
* IPMMU/IPMMUI
|
||||
* Copyright (C) 2012 Hideki EIRAKU
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; version 2 of the License.
|
||||
*/
|
||||
|
||||
#include <linux/err.h>
|
||||
#include <linux/export.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/platform_data/sh_ipmmu.h>
|
||||
#include "shmobile-ipmmu.h"
|
||||
|
||||
#define IMCTR1 0x000
|
||||
#define IMCTR2 0x004
|
||||
#define IMASID 0x010
|
||||
#define IMTTBR 0x014
|
||||
#define IMTTBCR 0x018
|
||||
|
||||
#define IMCTR1_TLBEN (1 << 0)
|
||||
#define IMCTR1_FLUSH (1 << 1)
|
||||
|
||||
static void ipmmu_reg_write(struct shmobile_ipmmu *ipmmu, unsigned long reg_off,
|
||||
unsigned long data)
|
||||
{
|
||||
iowrite32(data, ipmmu->ipmmu_base + reg_off);
|
||||
}
|
||||
|
||||
void ipmmu_tlb_flush(struct shmobile_ipmmu *ipmmu)
|
||||
{
|
||||
if (!ipmmu)
|
||||
return;
|
||||
|
||||
spin_lock(&ipmmu->flush_lock);
|
||||
if (ipmmu->tlb_enabled)
|
||||
ipmmu_reg_write(ipmmu, IMCTR1, IMCTR1_FLUSH | IMCTR1_TLBEN);
|
||||
else
|
||||
ipmmu_reg_write(ipmmu, IMCTR1, IMCTR1_FLUSH);
|
||||
spin_unlock(&ipmmu->flush_lock);
|
||||
}
|
||||
|
||||
void ipmmu_tlb_set(struct shmobile_ipmmu *ipmmu, unsigned long phys, int size,
|
||||
int asid)
|
||||
{
|
||||
if (!ipmmu)
|
||||
return;
|
||||
|
||||
spin_lock(&ipmmu->flush_lock);
|
||||
switch (size) {
|
||||
default:
|
||||
ipmmu->tlb_enabled = 0;
|
||||
break;
|
||||
case 0x2000:
|
||||
ipmmu_reg_write(ipmmu, IMTTBCR, 1);
|
||||
ipmmu->tlb_enabled = 1;
|
||||
break;
|
||||
case 0x1000:
|
||||
ipmmu_reg_write(ipmmu, IMTTBCR, 2);
|
||||
ipmmu->tlb_enabled = 1;
|
||||
break;
|
||||
case 0x800:
|
||||
ipmmu_reg_write(ipmmu, IMTTBCR, 3);
|
||||
ipmmu->tlb_enabled = 1;
|
||||
break;
|
||||
case 0x400:
|
||||
ipmmu_reg_write(ipmmu, IMTTBCR, 4);
|
||||
ipmmu->tlb_enabled = 1;
|
||||
break;
|
||||
case 0x200:
|
||||
ipmmu_reg_write(ipmmu, IMTTBCR, 5);
|
||||
ipmmu->tlb_enabled = 1;
|
||||
break;
|
||||
case 0x100:
|
||||
ipmmu_reg_write(ipmmu, IMTTBCR, 6);
|
||||
ipmmu->tlb_enabled = 1;
|
||||
break;
|
||||
case 0x80:
|
||||
ipmmu_reg_write(ipmmu, IMTTBCR, 7);
|
||||
ipmmu->tlb_enabled = 1;
|
||||
break;
|
||||
}
|
||||
ipmmu_reg_write(ipmmu, IMTTBR, phys);
|
||||
ipmmu_reg_write(ipmmu, IMASID, asid);
|
||||
spin_unlock(&ipmmu->flush_lock);
|
||||
}
|
||||
|
||||
static int ipmmu_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct shmobile_ipmmu *ipmmu;
|
||||
struct resource *res;
|
||||
struct shmobile_ipmmu_platform_data *pdata = pdev->dev.platform_data;
|
||||
|
||||
ipmmu = devm_kzalloc(&pdev->dev, sizeof(*ipmmu), GFP_KERNEL);
|
||||
if (!ipmmu) {
|
||||
dev_err(&pdev->dev, "cannot allocate device data\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
spin_lock_init(&ipmmu->flush_lock);
|
||||
ipmmu->dev = &pdev->dev;
|
||||
|
||||
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
ipmmu->ipmmu_base = devm_ioremap_resource(&pdev->dev, res);
|
||||
if (IS_ERR(ipmmu->ipmmu_base))
|
||||
return PTR_ERR(ipmmu->ipmmu_base);
|
||||
|
||||
ipmmu->dev_names = pdata->dev_names;
|
||||
ipmmu->num_dev_names = pdata->num_dev_names;
|
||||
platform_set_drvdata(pdev, ipmmu);
|
||||
ipmmu_reg_write(ipmmu, IMCTR1, 0x0); /* disable TLB */
|
||||
ipmmu_reg_write(ipmmu, IMCTR2, 0x0); /* disable PMB */
|
||||
return ipmmu_iommu_init(ipmmu);
|
||||
}
|
||||
|
||||
static struct platform_driver ipmmu_driver = {
|
||||
.probe = ipmmu_probe,
|
||||
.driver = {
|
||||
.owner = THIS_MODULE,
|
||||
.name = "ipmmu",
|
||||
},
|
||||
};
|
||||
|
||||
static int __init ipmmu_init(void)
|
||||
{
|
||||
return platform_driver_register(&ipmmu_driver);
|
||||
}
|
||||
subsys_initcall(ipmmu_init);
|
34
drivers/iommu/shmobile-ipmmu.h
Normal file
34
drivers/iommu/shmobile-ipmmu.h
Normal file
|
@ -0,0 +1,34 @@
|
|||
/* shmobile-ipmmu.h
|
||||
*
|
||||
* Copyright (C) 2012 Hideki EIRAKU
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; version 2 of the License.
|
||||
*/
|
||||
|
||||
#ifndef __SHMOBILE_IPMMU_H__
|
||||
#define __SHMOBILE_IPMMU_H__
|
||||
|
||||
struct shmobile_ipmmu {
|
||||
struct device *dev;
|
||||
void __iomem *ipmmu_base;
|
||||
int tlb_enabled;
|
||||
spinlock_t flush_lock;
|
||||
const char * const *dev_names;
|
||||
unsigned int num_dev_names;
|
||||
};
|
||||
|
||||
#ifdef CONFIG_SHMOBILE_IPMMU_TLB
|
||||
void ipmmu_tlb_flush(struct shmobile_ipmmu *ipmmu);
|
||||
void ipmmu_tlb_set(struct shmobile_ipmmu *ipmmu, unsigned long phys, int size,
|
||||
int asid);
|
||||
int ipmmu_iommu_init(struct shmobile_ipmmu *ipmmu);
|
||||
#else
|
||||
static inline int ipmmu_iommu_init(struct shmobile_ipmmu *ipmmu)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* __SHMOBILE_IPMMU_H__ */
|
451
drivers/iommu/tegra-gart.c
Normal file
451
drivers/iommu/tegra-gart.c
Normal file
|
@ -0,0 +1,451 @@
|
|||
/*
|
||||
* IOMMU API for GART in Tegra20
|
||||
*
|
||||
* Copyright (c) 2010-2012, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms and conditions of the GNU General Public License,
|
||||
* version 2, as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License along with
|
||||
* this program; if not, write to the Free Software Foundation, Inc.,
|
||||
* 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*/
|
||||
|
||||
#define pr_fmt(fmt) "%s(): " fmt, __func__
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/iommu.h>
|
||||
#include <linux/of.h>
|
||||
|
||||
#include <asm/cacheflush.h>
|
||||
|
||||
/* bitmap of the page sizes currently supported */
|
||||
#define GART_IOMMU_PGSIZES (SZ_4K)
|
||||
|
||||
#define GART_REG_BASE 0x24
|
||||
#define GART_CONFIG (0x24 - GART_REG_BASE)
|
||||
#define GART_ENTRY_ADDR (0x28 - GART_REG_BASE)
|
||||
#define GART_ENTRY_DATA (0x2c - GART_REG_BASE)
|
||||
#define GART_ENTRY_PHYS_ADDR_VALID (1 << 31)
|
||||
|
||||
#define GART_PAGE_SHIFT 12
|
||||
#define GART_PAGE_SIZE (1 << GART_PAGE_SHIFT)
|
||||
#define GART_PAGE_MASK \
|
||||
(~(GART_PAGE_SIZE - 1) & ~GART_ENTRY_PHYS_ADDR_VALID)
|
||||
|
||||
struct gart_client {
|
||||
struct device *dev;
|
||||
struct list_head list;
|
||||
};
|
||||
|
||||
struct gart_device {
|
||||
void __iomem *regs;
|
||||
u32 *savedata;
|
||||
u32 page_count; /* total remappable size */
|
||||
dma_addr_t iovmm_base; /* offset to vmm_area */
|
||||
spinlock_t pte_lock; /* for pagetable */
|
||||
struct list_head client;
|
||||
spinlock_t client_lock; /* for client list */
|
||||
struct device *dev;
|
||||
};
|
||||
|
||||
static struct gart_device *gart_handle; /* unique for a system */
|
||||
|
||||
#define GART_PTE(_pfn) \
|
||||
(GART_ENTRY_PHYS_ADDR_VALID | ((_pfn) << PAGE_SHIFT))
|
||||
|
||||
/*
|
||||
* Any interaction between any block on PPSB and a block on APB or AHB
|
||||
* must have these read-back to ensure the APB/AHB bus transaction is
|
||||
* complete before initiating activity on the PPSB block.
|
||||
*/
|
||||
#define FLUSH_GART_REGS(gart) ((void)readl((gart)->regs + GART_CONFIG))
|
||||
|
||||
#define for_each_gart_pte(gart, iova) \
|
||||
for (iova = gart->iovmm_base; \
|
||||
iova < gart->iovmm_base + GART_PAGE_SIZE * gart->page_count; \
|
||||
iova += GART_PAGE_SIZE)
|
||||
|
||||
static inline void gart_set_pte(struct gart_device *gart,
|
||||
unsigned long offs, u32 pte)
|
||||
{
|
||||
writel(offs, gart->regs + GART_ENTRY_ADDR);
|
||||
writel(pte, gart->regs + GART_ENTRY_DATA);
|
||||
|
||||
dev_dbg(gart->dev, "%s %08lx:%08x\n",
|
||||
pte ? "map" : "unmap", offs, pte & GART_PAGE_MASK);
|
||||
}
|
||||
|
||||
static inline unsigned long gart_read_pte(struct gart_device *gart,
|
||||
unsigned long offs)
|
||||
{
|
||||
unsigned long pte;
|
||||
|
||||
writel(offs, gart->regs + GART_ENTRY_ADDR);
|
||||
pte = readl(gart->regs + GART_ENTRY_DATA);
|
||||
|
||||
return pte;
|
||||
}
|
||||
|
||||
static void do_gart_setup(struct gart_device *gart, const u32 *data)
|
||||
{
|
||||
unsigned long iova;
|
||||
|
||||
for_each_gart_pte(gart, iova)
|
||||
gart_set_pte(gart, iova, data ? *(data++) : 0);
|
||||
|
||||
writel(1, gart->regs + GART_CONFIG);
|
||||
FLUSH_GART_REGS(gart);
|
||||
}
|
||||
|
||||
#ifdef DEBUG
|
||||
static void gart_dump_table(struct gart_device *gart)
|
||||
{
|
||||
unsigned long iova;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&gart->pte_lock, flags);
|
||||
for_each_gart_pte(gart, iova) {
|
||||
unsigned long pte;
|
||||
|
||||
pte = gart_read_pte(gart, iova);
|
||||
|
||||
dev_dbg(gart->dev, "%s %08lx:%08lx\n",
|
||||
(GART_ENTRY_PHYS_ADDR_VALID & pte) ? "v" : " ",
|
||||
iova, pte & GART_PAGE_MASK);
|
||||
}
|
||||
spin_unlock_irqrestore(&gart->pte_lock, flags);
|
||||
}
|
||||
#else
|
||||
static inline void gart_dump_table(struct gart_device *gart)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
static inline bool gart_iova_range_valid(struct gart_device *gart,
|
||||
unsigned long iova, size_t bytes)
|
||||
{
|
||||
unsigned long iova_start, iova_end, gart_start, gart_end;
|
||||
|
||||
iova_start = iova;
|
||||
iova_end = iova_start + bytes - 1;
|
||||
gart_start = gart->iovmm_base;
|
||||
gart_end = gart_start + gart->page_count * GART_PAGE_SIZE - 1;
|
||||
|
||||
if (iova_start < gart_start)
|
||||
return false;
|
||||
if (iova_end > gart_end)
|
||||
return false;
|
||||
return true;
|
||||
}
|
||||
|
||||
static int gart_iommu_attach_dev(struct iommu_domain *domain,
|
||||
struct device *dev)
|
||||
{
|
||||
struct gart_device *gart;
|
||||
struct gart_client *client, *c;
|
||||
int err = 0;
|
||||
|
||||
gart = gart_handle;
|
||||
if (!gart)
|
||||
return -EINVAL;
|
||||
domain->priv = gart;
|
||||
|
||||
domain->geometry.aperture_start = gart->iovmm_base;
|
||||
domain->geometry.aperture_end = gart->iovmm_base +
|
||||
gart->page_count * GART_PAGE_SIZE - 1;
|
||||
domain->geometry.force_aperture = true;
|
||||
|
||||
client = devm_kzalloc(gart->dev, sizeof(*c), GFP_KERNEL);
|
||||
if (!client)
|
||||
return -ENOMEM;
|
||||
client->dev = dev;
|
||||
|
||||
spin_lock(&gart->client_lock);
|
||||
list_for_each_entry(c, &gart->client, list) {
|
||||
if (c->dev == dev) {
|
||||
dev_err(gart->dev,
|
||||
"%s is already attached\n", dev_name(dev));
|
||||
err = -EINVAL;
|
||||
goto fail;
|
||||
}
|
||||
}
|
||||
list_add(&client->list, &gart->client);
|
||||
spin_unlock(&gart->client_lock);
|
||||
dev_dbg(gart->dev, "Attached %s\n", dev_name(dev));
|
||||
return 0;
|
||||
|
||||
fail:
|
||||
devm_kfree(gart->dev, client);
|
||||
spin_unlock(&gart->client_lock);
|
||||
return err;
|
||||
}
|
||||
|
||||
static void gart_iommu_detach_dev(struct iommu_domain *domain,
|
||||
struct device *dev)
|
||||
{
|
||||
struct gart_device *gart = domain->priv;
|
||||
struct gart_client *c;
|
||||
|
||||
spin_lock(&gart->client_lock);
|
||||
|
||||
list_for_each_entry(c, &gart->client, list) {
|
||||
if (c->dev == dev) {
|
||||
list_del(&c->list);
|
||||
devm_kfree(gart->dev, c);
|
||||
dev_dbg(gart->dev, "Detached %s\n", dev_name(dev));
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
dev_err(gart->dev, "Couldn't find\n");
|
||||
out:
|
||||
spin_unlock(&gart->client_lock);
|
||||
}
|
||||
|
||||
static int gart_iommu_domain_init(struct iommu_domain *domain)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void gart_iommu_domain_destroy(struct iommu_domain *domain)
|
||||
{
|
||||
struct gart_device *gart = domain->priv;
|
||||
|
||||
if (!gart)
|
||||
return;
|
||||
|
||||
spin_lock(&gart->client_lock);
|
||||
if (!list_empty(&gart->client)) {
|
||||
struct gart_client *c;
|
||||
|
||||
list_for_each_entry(c, &gart->client, list)
|
||||
gart_iommu_detach_dev(domain, c->dev);
|
||||
}
|
||||
spin_unlock(&gart->client_lock);
|
||||
domain->priv = NULL;
|
||||
}
|
||||
|
||||
static int gart_iommu_map(struct iommu_domain *domain, unsigned long iova,
|
||||
phys_addr_t pa, size_t bytes, int prot)
|
||||
{
|
||||
struct gart_device *gart = domain->priv;
|
||||
unsigned long flags;
|
||||
unsigned long pfn;
|
||||
|
||||
if (!gart_iova_range_valid(gart, iova, bytes))
|
||||
return -EINVAL;
|
||||
|
||||
spin_lock_irqsave(&gart->pte_lock, flags);
|
||||
pfn = __phys_to_pfn(pa);
|
||||
if (!pfn_valid(pfn)) {
|
||||
dev_err(gart->dev, "Invalid page: %pa\n", &pa);
|
||||
spin_unlock_irqrestore(&gart->pte_lock, flags);
|
||||
return -EINVAL;
|
||||
}
|
||||
gart_set_pte(gart, iova, GART_PTE(pfn));
|
||||
FLUSH_GART_REGS(gart);
|
||||
spin_unlock_irqrestore(&gart->pte_lock, flags);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static size_t gart_iommu_unmap(struct iommu_domain *domain, unsigned long iova,
|
||||
size_t bytes)
|
||||
{
|
||||
struct gart_device *gart = domain->priv;
|
||||
unsigned long flags;
|
||||
|
||||
if (!gart_iova_range_valid(gart, iova, bytes))
|
||||
return 0;
|
||||
|
||||
spin_lock_irqsave(&gart->pte_lock, flags);
|
||||
gart_set_pte(gart, iova, 0);
|
||||
FLUSH_GART_REGS(gart);
|
||||
spin_unlock_irqrestore(&gart->pte_lock, flags);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static phys_addr_t gart_iommu_iova_to_phys(struct iommu_domain *domain,
|
||||
dma_addr_t iova)
|
||||
{
|
||||
struct gart_device *gart = domain->priv;
|
||||
unsigned long pte;
|
||||
phys_addr_t pa;
|
||||
unsigned long flags;
|
||||
|
||||
if (!gart_iova_range_valid(gart, iova, 0))
|
||||
return -EINVAL;
|
||||
|
||||
spin_lock_irqsave(&gart->pte_lock, flags);
|
||||
pte = gart_read_pte(gart, iova);
|
||||
spin_unlock_irqrestore(&gart->pte_lock, flags);
|
||||
|
||||
pa = (pte & GART_PAGE_MASK);
|
||||
if (!pfn_valid(__phys_to_pfn(pa))) {
|
||||
dev_err(gart->dev, "No entry for %08llx:%pa\n",
|
||||
(unsigned long long)iova, &pa);
|
||||
gart_dump_table(gart);
|
||||
return -EINVAL;
|
||||
}
|
||||
return pa;
|
||||
}
|
||||
|
||||
static bool gart_iommu_capable(enum iommu_cap cap)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
static const struct iommu_ops gart_iommu_ops = {
|
||||
.capable = gart_iommu_capable,
|
||||
.domain_init = gart_iommu_domain_init,
|
||||
.domain_destroy = gart_iommu_domain_destroy,
|
||||
.attach_dev = gart_iommu_attach_dev,
|
||||
.detach_dev = gart_iommu_detach_dev,
|
||||
.map = gart_iommu_map,
|
||||
.unmap = gart_iommu_unmap,
|
||||
.iova_to_phys = gart_iommu_iova_to_phys,
|
||||
.pgsize_bitmap = GART_IOMMU_PGSIZES,
|
||||
};
|
||||
|
||||
static int tegra_gart_suspend(struct device *dev)
|
||||
{
|
||||
struct gart_device *gart = dev_get_drvdata(dev);
|
||||
unsigned long iova;
|
||||
u32 *data = gart->savedata;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&gart->pte_lock, flags);
|
||||
for_each_gart_pte(gart, iova)
|
||||
*(data++) = gart_read_pte(gart, iova);
|
||||
spin_unlock_irqrestore(&gart->pte_lock, flags);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int tegra_gart_resume(struct device *dev)
|
||||
{
|
||||
struct gart_device *gart = dev_get_drvdata(dev);
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&gart->pte_lock, flags);
|
||||
do_gart_setup(gart, gart->savedata);
|
||||
spin_unlock_irqrestore(&gart->pte_lock, flags);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int tegra_gart_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct gart_device *gart;
|
||||
struct resource *res, *res_remap;
|
||||
void __iomem *gart_regs;
|
||||
struct device *dev = &pdev->dev;
|
||||
|
||||
if (gart_handle)
|
||||
return -EIO;
|
||||
|
||||
BUILD_BUG_ON(PAGE_SHIFT != GART_PAGE_SHIFT);
|
||||
|
||||
/* the GART memory aperture is required */
|
||||
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
res_remap = platform_get_resource(pdev, IORESOURCE_MEM, 1);
|
||||
if (!res || !res_remap) {
|
||||
dev_err(dev, "GART memory aperture expected\n");
|
||||
return -ENXIO;
|
||||
}
|
||||
|
||||
gart = devm_kzalloc(dev, sizeof(*gart), GFP_KERNEL);
|
||||
if (!gart) {
|
||||
dev_err(dev, "failed to allocate gart_device\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
gart_regs = devm_ioremap(dev, res->start, resource_size(res));
|
||||
if (!gart_regs) {
|
||||
dev_err(dev, "failed to remap GART registers\n");
|
||||
return -ENXIO;
|
||||
}
|
||||
|
||||
gart->dev = &pdev->dev;
|
||||
spin_lock_init(&gart->pte_lock);
|
||||
spin_lock_init(&gart->client_lock);
|
||||
INIT_LIST_HEAD(&gart->client);
|
||||
gart->regs = gart_regs;
|
||||
gart->iovmm_base = (dma_addr_t)res_remap->start;
|
||||
gart->page_count = (resource_size(res_remap) >> GART_PAGE_SHIFT);
|
||||
|
||||
gart->savedata = vmalloc(sizeof(u32) * gart->page_count);
|
||||
if (!gart->savedata) {
|
||||
dev_err(dev, "failed to allocate context save area\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
platform_set_drvdata(pdev, gart);
|
||||
do_gart_setup(gart, NULL);
|
||||
|
||||
gart_handle = gart;
|
||||
bus_set_iommu(&platform_bus_type, &gart_iommu_ops);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int tegra_gart_remove(struct platform_device *pdev)
|
||||
{
|
||||
struct gart_device *gart = platform_get_drvdata(pdev);
|
||||
|
||||
writel(0, gart->regs + GART_CONFIG);
|
||||
if (gart->savedata)
|
||||
vfree(gart->savedata);
|
||||
gart_handle = NULL;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct dev_pm_ops tegra_gart_pm_ops = {
|
||||
.suspend = tegra_gart_suspend,
|
||||
.resume = tegra_gart_resume,
|
||||
};
|
||||
|
||||
static const struct of_device_id tegra_gart_of_match[] = {
|
||||
{ .compatible = "nvidia,tegra20-gart", },
|
||||
{ },
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, tegra_gart_of_match);
|
||||
|
||||
static struct platform_driver tegra_gart_driver = {
|
||||
.probe = tegra_gart_probe,
|
||||
.remove = tegra_gart_remove,
|
||||
.driver = {
|
||||
.owner = THIS_MODULE,
|
||||
.name = "tegra-gart",
|
||||
.pm = &tegra_gart_pm_ops,
|
||||
.of_match_table = tegra_gart_of_match,
|
||||
},
|
||||
};
|
||||
|
||||
static int tegra_gart_init(void)
|
||||
{
|
||||
return platform_driver_register(&tegra_gart_driver);
|
||||
}
|
||||
|
||||
static void __exit tegra_gart_exit(void)
|
||||
{
|
||||
platform_driver_unregister(&tegra_gart_driver);
|
||||
}
|
||||
|
||||
subsys_initcall(tegra_gart_init);
|
||||
module_exit(tegra_gart_exit);
|
||||
|
||||
MODULE_DESCRIPTION("IOMMU API for GART in Tegra20");
|
||||
MODULE_AUTHOR("Hiroshi DOYU <hdoyu@nvidia.com>");
|
||||
MODULE_ALIAS("platform:tegra-gart");
|
||||
MODULE_LICENSE("GPL v2");
|
1295
drivers/iommu/tegra-smmu.c
Normal file
1295
drivers/iommu/tegra-smmu.c
Normal file
File diff suppressed because it is too large
Load diff
Loading…
Add table
Add a link
Reference in a new issue