/**************************************************************************** * * Copyright (c) 2014 - 2016 Samsung Electronics Co., Ltd. All rights reserved * ****************************************************************************/ /* Implements */ #include "pcie_mif.h" /* Uses */ #include #include #include #include #include #include "pcie_mif_module.h" #include "peterson_mutex.h" #include "pcie_proc.h" static bool enable_pcie_mif_arm_reset = true; module_param(enable_pcie_mif_arm_reset, bool, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(enable_pcie_mif_arm_reset, "Enables ARM cores reset"); struct pcie_mif { struct scsc_mif_abs interface; struct scsc_mbox_s *mbox; struct peterson_mutex *p_mutex_r4; /* AP will READ - CR4 will WRITE */ struct peterson_mutex *p_mutex_ap; /* AP will WRITE - CR4 will READ */ struct pci_dev *pdev; int dma_using_dac; /* =1 if 64-bit DMA is used, =0 otherwise. */ __iomem void *registers; struct device *dev; void *mem; size_t mem_allocated; dma_addr_t dma_addr; /* Callback function and dev pointer mif_intr manager handler */ void (*r4_handler)(int irq, void *data); void *irq_dev; #ifdef SUPPORTED_M4 void (*m4_handler)(int irq, void *data); #endif }; #define pcie_mif_from_mif_abs(MIF_ABS_PTR) container_of(MIF_ABS_PTR, struct pcie_mif, interface) static void pcie_mif_irq_default_handler(int irq, void *data) { /* Avoid unused parameter error */ (void)irq; (void)data; } irqreturn_t pcie_mif_isr(int irq, void *data) { struct pcie_mif *pcie = (struct pcie_mif *)data; #ifdef SUPPORTED_M4 /* TODO */ #endif if (pcie->r4_handler != pcie_mif_irq_default_handler) pcie->r4_handler(irq, pcie->irq_dev); else SCSC_TAG_INFO(PCIE_MIF, "Any handler registered\n"); return IRQ_HANDLED; } static void pcie_mif_destroy(struct scsc_mif_abs *interface) { /* Avoid unused parameter error */ (void)interface; } static char *pcie_mif_get_uid(struct scsc_mif_abs *interface) { /* Avoid unused parameter error */ (void)interface; /* TODO */ /* return "0" for the time being */ return "0"; } static int pcie_mif_reset(struct scsc_mif_abs *interface, bool reset) { struct pcie_mif *pcie = pcie_mif_from_mif_abs(interface); int ret; if (enable_pcie_mif_arm_reset || !reset) { /* Sanity check */ iowrite32(0xdeadbeef, pcie->registers + SCSC_PCIE_SIGNATURE); mmiowb(); ret = ioread32(pcie->registers + SCSC_PCIE_SIGNATURE); if (ret != 0xdeadbeef) { SCSC_TAG_ERR_DEV(PCIE_MIF, pcie->dev, "Can't acces BAR0 magic number. Readed: 0x%x Expected: 0x%x\n", ret, 0xdeadbeef); return -ENODEV; } iowrite32(reset ? 1 : 0, pcie->registers + SCSC_PCIE_GRST_OFFSET); mmiowb(); } else SCSC_TAG_INFO(PCIE_MIF, "Not resetting ARM Cores enable_pcie_mif_arm_reset: %d\n", enable_pcie_mif_arm_reset); return 0; } static void *pcie_mif_map(struct scsc_mif_abs *interface, size_t *allocated) { int ret; size_t map_len = PCIE_MIF_ALLOC_MEM; struct pcie_mif *pcie = pcie_mif_from_mif_abs(interface); if (allocated) *allocated = 0; if (map_len > (PCIE_MIF_PREALLOC_MEM - 1)) { SCSC_TAG_ERR(PCIE_MIF, "Error allocating DMA memory, requested %zu, maximum %d, consider different size\n", map_len, PCIE_MIF_PREALLOC_MEM); return NULL; } /* should return PAGE_ALIGN Memory */ pcie->mem = dma_alloc_coherent(pcie->dev, PCIE_MIF_PREALLOC_MEM, &pcie->dma_addr, GFP_KERNEL); if (pcie->mem == NULL) { SCSC_TAG_ERR(PCIE_MIF, "Error allocating %d DMA memory\n", PCIE_MIF_PREALLOC_MEM); return NULL; } pcie->mem_allocated = map_len; SCSC_TAG_INFO_DEV(PCIE_MIF, pcie->dev, "Allocated dma coherent mem: %p addr %p\n", pcie->mem, (void *)pcie->dma_addr); iowrite32((unsigned int)pcie->dma_addr, pcie->registers + SCSC_PCIE_OFFSET); mmiowb(); ret = ioread32(pcie->registers + SCSC_PCIE_OFFSET); SCSC_TAG_INFO(PCIE_MIF, "Read SHARED_BA 0x%0x\n", ret); if (ret != (unsigned int)pcie->dma_addr) { SCSC_TAG_ERR_DEV(PCIE_MIF, pcie->dev, "Can't acces BAR0 Shared BA. Readed: 0x%x Expected: 0x%x\n", ret, (unsigned int)pcie->dma_addr); return NULL; } #ifdef OLD_REG /* Allocate mbox struct at the end of the PCIE_MIF_PREALLOC_MEM */ pcie->mbox = (void *)pcie->mem + PCIE_MIF_PREALLOC_MEM - sizeof(struct scsc_mbox_s); /* Allocate Peterson algo shared varialbles before mbox */ pcie->p_mutex_r4 = (void *)pcie->mem + PCIE_MIF_PREALLOC_MEM - sizeof(struct scsc_mbox_s) - sizeof(struct peterson_mutex); pcie->p_mutex_ap = (void *)pcie->mem + PCIE_MIF_PREALLOC_MEM - sizeof(struct scsc_mbox_s) - 2 * (sizeof(struct peterson_mutex)); #else /* Allocate mbox struct at the end of the PCIE_MIF_PREALLOC_MEM */ pcie->mbox = (void *)pcie->mem + MBOX_OFFSET; memset(pcie->mbox, 0, sizeof(struct scsc_mbox_s)); /* Allocate Peterson algo shared varialbles before mbox */ pcie->p_mutex_r4 = (void *)pcie->mem + P_OFFSET_R4; pcie->p_mutex_ap = (void *)pcie->mem + P_OFFSET_AP; #endif SCSC_TAG_INFO_DEV(PCIE_MIF, pcie->dev, "pcie->mbox is pointing at %p pcie->mem %p map_len %zu sizeof %zu\n", pcie->mbox, pcie->mem, map_len, sizeof(struct scsc_mbox_s)); #ifdef SUPPORTED_M4 /* TODO */ #endif peterson_mutex_init(pcie->p_mutex_ap); /* Return the max allocatable memory on this abs. implementation */ if (allocated) *allocated = map_len; return pcie->mem; } /* HERE: Not sure why mem is passed in - its stored in pcie - as it should be */ static void pcie_mif_unmap(struct scsc_mif_abs *interface, void *mem) { struct pcie_mif *pcie = pcie_mif_from_mif_abs(interface); /* Avoid unused parameter error */ (void)mem; dma_free_coherent(pcie->dev, PCIE_MIF_PREALLOC_MEM, pcie->mem, pcie->dma_addr); SCSC_TAG_INFO_DEV(PCIE_MIF, pcie->dev, "Freed dma coherent mem: %p addr %p\n", pcie->mem, (void *)pcie->dma_addr); } #ifdef MAILBOX_SETGET static void pcie_mif_mailbox_set(struct scsc_mif_abs *interface, u32 mbox_num, u32 value) { struct pcie_mif *pcie = pcie_mif_from_mif_abs(interface); if (mbox_num >= NUM_MBOX) { SCSC_TAG_ERR(PCIE_MIF, "MBOX not mapped\n"); return; } pcie->mbox->issr[mbox_num] = value; } static u32 pcie_mif_mailbox_get(struct scsc_mif_abs *interface, u32 mbox_num) { struct pcie_mif *pcie = pcie_mif_from_mif_abs(interface); u32 val; if (mbox_num >= NUM_MBOX) { SCSC_TAG_ERR(PCIE_MIF, "MBOX not mapped\n"); return -1; } val = pcie->mbox->issr[mbox_num]; return val; } #endif static u32 pcie_mif_irq_bit_mask_status_get(struct scsc_mif_abs *interface) { struct pcie_mif *pcie = pcie_mif_from_mif_abs(interface); u32 val; val = (pcie->mbox->intmr0) >> 16; return val; } static u32 pcie_mif_irq_get(struct scsc_mif_abs *interface) { struct pcie_mif *pcie = pcie_mif_from_mif_abs(interface); u32 val; val = pcie->mbox->intsr1 >> 16; return val; } static void pcie_mif_irq_bit_set(struct scsc_mif_abs *interface, int bit_num, enum scsc_mif_abs_target target) { volatile u32 *set_reg; volatile u32 *mask_reg; struct pcie_mif *pcie = pcie_mif_from_mif_abs(interface); if (bit_num >= 16) { SCSC_TAG_ERR(PCIE_MIF, "Incorrect INT\n"); return; } peterson_mutex_lock(pcie->p_mutex_r4, AP_PROCESS); /* Set Status Register */ if (target == SCSC_MIF_ABS_TARGET_R4) { set_reg = &pcie->mbox->intsr0; mask_reg = &pcie->mbox->intmr0; } else if (target == SCSC_MIF_ABS_TARGET_M4) { set_reg = &pcie->mbox->intsr2; mask_reg = &pcie->mbox->intmr2; } else { SCSC_TAG_ERR(PCIE_MIF, "Incorrect Target %d\n", target); return; } *set_reg |= (1 << bit_num) << 16; /* Check whether int is masked */ if (*mask_reg & ((1 << bit_num) << 16)) { SCSC_TAG_ERR(PCIE_MIF, "Interrupt is masked - do not generate interrupt\n"); peterson_mutex_unlock(pcie->p_mutex_r4, AP_PROCESS); return; } iowrite32(0xffffff, pcie->registers + SCSC_PCIE_NEWMSG); mmiowb(); peterson_mutex_unlock(pcie->p_mutex_r4, AP_PROCESS); } static void pcie_mif_irq_bit_clear(struct scsc_mif_abs *interface, int bit_num) { struct pcie_mif *pcie = pcie_mif_from_mif_abs(interface); if (bit_num >= 16) { SCSC_TAG_ERR(PCIE_MIF, "Incorrect INT\n"); return; } peterson_mutex_lock(pcie->p_mutex_ap, AP_PROCESS); pcie->mbox->intsr1 &= ~((1 << bit_num) << 16); peterson_mutex_unlock(pcie->p_mutex_ap, AP_PROCESS); } static void pcie_mif_irq_bit_mask(struct scsc_mif_abs *interface, int bit_num) { struct pcie_mif *pcie = pcie_mif_from_mif_abs(interface); if (bit_num >= 16) { SCSC_TAG_ERR(PCIE_MIF, "Incorrect INT\n"); return; } peterson_mutex_lock(pcie->p_mutex_ap, AP_PROCESS); pcie->mbox->intmr1 |= ((1 << bit_num) << 16); peterson_mutex_unlock(pcie->p_mutex_ap, AP_PROCESS); } static void pcie_mif_irq_bit_unmask(struct scsc_mif_abs *interface, int bit_num) { int irq_unmasked; struct pcie_mif *pcie = pcie_mif_from_mif_abs(interface); if (bit_num >= 16) { SCSC_TAG_ERR(PCIE_MIF, "Incorrect INT\n"); return; } peterson_mutex_lock(pcie->p_mutex_ap, AP_PROCESS); pcie->mbox->intmr1 &= ~((1 << bit_num) << 16); irq_unmasked = pcie_mif_irq_get(interface) & (1 << bit_num); peterson_mutex_unlock(pcie->p_mutex_ap, AP_PROCESS); /* Check whether the interrupt has been triggered */ if (irq_unmasked) if (pcie->r4_handler != pcie_mif_irq_default_handler) pcie->r4_handler(bit_num, pcie->irq_dev); } static void pcie_mif_irq_reg_handler(struct scsc_mif_abs *interface, void (*handler)(int irq, void *data), void *dev) { struct pcie_mif *pcie = pcie_mif_from_mif_abs(interface); pcie->r4_handler = handler; pcie->irq_dev = dev; } static void pcie_mif_irq_unreg_handler(struct scsc_mif_abs *interface) { struct pcie_mif *pcie = pcie_mif_from_mif_abs(interface); pcie->r4_handler = pcie_mif_irq_default_handler; pcie->irq_dev = NULL; } static void pcie_mif_irq_reg_reset_request_handler(struct scsc_mif_abs *interface, void (*handler)(int irq, void *data), void *dev) { (void)interface; (void)handler; (void)dev; } static void pcie_mif_irq_unreg_reset_request_handler(struct scsc_mif_abs *interface) { (void)interface; } static u32 *pcie_mif_get_mbox_ptr(struct scsc_mif_abs *interface, u32 mbox_index) { struct pcie_mif *pcie = pcie_mif_from_mif_abs(interface); u32 *addr; addr = (u32 *)(&pcie->mbox->issr[mbox_index]); return addr; } static int pcie_mif_get_mifram_ref(struct scsc_mif_abs *interface, void *ptr, scsc_mifram_ref *ref) { struct pcie_mif *pcie = pcie_mif_from_mif_abs(interface); if (ptr > (pcie->mem + 4 * 1024 * 1024)) { SCSC_TAG_ERR(PCIE_MIF, "ooops limits reached\n"); return -ENOMEM; } *ref = (scsc_mifram_ref)((uintptr_t)ptr - (uintptr_t)pcie->mem); return 0; } static void *pcie_mif_get_mifram_ptr(struct scsc_mif_abs *interface, scsc_mifram_ref ref) { struct pcie_mif *pcie = pcie_mif_from_mif_abs(interface); return (void *)((uintptr_t)pcie->mem + (uintptr_t)ref); } static uintptr_t pcie_mif_get_mif_pfn(struct scsc_mif_abs *interface) { struct pcie_mif *pcie = pcie_mif_from_mif_abs(interface); return virt_to_phys(pcie->mem) >> PAGE_SHIFT; } static struct device *pcie_mif_get_mif_device(struct scsc_mif_abs *interface) { struct pcie_mif *pcie = pcie_mif_from_mif_abs(interface); return pcie->dev; } static void pcie_mif_irq_clear(void) { } static void pcie_mif_dump_register(struct scsc_mif_abs *interface) { } struct scsc_mif_abs *pcie_mif_create(struct pci_dev *pdev, const struct pci_device_id *id) { int rc = 0; struct scsc_mif_abs *pcie_if; struct pcie_mif *pcie = (struct pcie_mif *)devm_kzalloc(&pdev->dev, sizeof(struct pcie_mif), GFP_KERNEL); u16 cmd; /* Avoid unused parameter error */ (void)id; if (!pcie) return NULL; pcie_if = &pcie->interface; /* initialise interface structure */ pcie_if->destroy = pcie_mif_destroy; pcie_if->get_uid = pcie_mif_get_uid; pcie_if->reset = pcie_mif_reset; pcie_if->map = pcie_mif_map; pcie_if->unmap = pcie_mif_unmap; #ifdef MAILBOX_SETGET pcie_if->mailbox_set = pcie_mif_mailbox_set; pcie_if->mailbox_get = pcie_mif_mailbox_get; #endif pcie_if->irq_bit_set = pcie_mif_irq_bit_set; pcie_if->irq_get = pcie_mif_irq_get; pcie_if->irq_bit_mask_status_get = pcie_mif_irq_bit_mask_status_get; pcie_if->irq_bit_clear = pcie_mif_irq_bit_clear; pcie_if->irq_bit_mask = pcie_mif_irq_bit_mask; pcie_if->irq_bit_unmask = pcie_mif_irq_bit_unmask; pcie_if->irq_reg_handler = pcie_mif_irq_reg_handler; pcie_if->irq_unreg_handler = pcie_mif_irq_unreg_handler; pcie_if->irq_reg_reset_request_handler = pcie_mif_irq_reg_reset_request_handler; pcie_if->irq_unreg_reset_request_handler = pcie_mif_irq_unreg_reset_request_handler; pcie_if->get_mbox_ptr = pcie_mif_get_mbox_ptr; pcie_if->get_mifram_ptr = pcie_mif_get_mifram_ptr; pcie_if->get_mifram_ref = pcie_mif_get_mifram_ref; pcie_if->get_mifram_pfn = pcie_mif_get_mif_pfn; pcie_if->get_mif_device = pcie_mif_get_mif_device; pcie_if->irq_clear = pcie_mif_irq_clear; pcie_if->mif_dump_registers = pcie_mif_dump_register; /* Suspend/resume not supported in PCIe MIF */ pcie_if->suspend_reg_handler = NULL; pcie_if->suspend_unreg_handler = NULL; /* Update state */ pcie->pdev = pdev; pcie->dev = &pdev->dev; pcie->r4_handler = pcie_mif_irq_default_handler; pcie->irq_dev = NULL; /* Just do whats is necessary to meet the pci probe * -BAR0 stuff * -Interrupt (will be able to handle interrupts?) */ /* My stuff */ pci_set_drvdata(pdev, pcie); rc = pcim_enable_device(pdev); if (rc) { SCSC_TAG_ERR_DEV(PCIE_MIF, pcie->dev, "Error enabling device.\n"); return NULL; } /* This function returns the flags associated with this resource.*/ /* esource flags are used to define some features of the individual resource. * For PCI resources associated with PCI I/O regions, the information is extracted from the base address registers */ /* IORESOURCE_MEM = If the associated I/O region exists, one and only one of these flags is set */ if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { SCSC_TAG_ERR(PCIE_MIF, "Incorrect BAR configuration\n"); return NULL; } /* old --- rc = pci_request_regions(pdev, "foo"); */ /* Request and iomap regions specified by @mask (0x01 ---> BAR0)*/ rc = pcim_iomap_regions(pdev, BIT(0), DRV_NAME); if (rc) { SCSC_TAG_ERR_DEV(PCIE_MIF, pcie->dev, "pcim_iomap_regions() failed. Aborting.\n"); return NULL; } pci_set_master(pdev); /* Access iomap allocation table */ /* return __iomem * const * */ pcie->registers = pcim_iomap_table(pdev)[0]; /* Set up a single MSI interrupt */ if (pci_enable_msi(pdev)) { SCSC_TAG_ERR_DEV(PCIE_MIF, pcie->dev, "Failed to enable MSI interrupts. Aborting.\n"); return NULL; } rc = devm_request_irq(&pdev->dev, pdev->irq, pcie_mif_isr, 0, DRV_NAME, pcie); if (rc) { SCSC_TAG_ERR_DEV(PCIE_MIF, pcie->dev, "Failed to register MSI handler. Aborting.\n"); return NULL; } /* if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { * SCSC_TAG_INFO_DEV(PCIE_MIF, pcie->dev, "DMA mask 64bits.\n"); * pcie->dma_using_dac = 1; */ if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) { SCSC_TAG_INFO_DEV(PCIE_MIF, pcie->dev, "DMA mask 32bits.\n"); pcie->dma_using_dac = 0; } else { SCSC_TAG_ERR_DEV(PCIE_MIF, pcie->dev, "Failed to set DMA mask. Aborting.\n"); return NULL; } pci_read_config_word(pdev, PCI_COMMAND, &cmd); /* Make sure Mx is in the reset state */ pcie_mif_reset(pcie_if, true); /* Create debug proc entry */ pcie_create_proc_dir(pcie); return pcie_if; } void pcie_mif_destroy_pcie(struct pci_dev *pdev, struct scsc_mif_abs *interface) { /* Create debug proc entry */ pcie_remove_proc_dir(); pci_disable_device(pdev); } struct pci_dev *pcie_mif_get_pci_dev(struct scsc_mif_abs *interface) { struct pcie_mif *pcie = pcie_mif_from_mif_abs(interface); BUG_ON(!interface || !pcie); return pcie->pdev; } struct device *pcie_mif_get_dev(struct scsc_mif_abs *interface) { struct pcie_mif *pcie = pcie_mif_from_mif_abs(interface); BUG_ON(!interface || !pcie); return pcie->dev; } /* Funtions for proc entry */ int pcie_mif_set_bar0_register(struct pcie_mif *pcie, unsigned int value, unsigned int offset) { iowrite32(value, pcie->registers + offset); mmiowb(); return 0; } void pcie_mif_get_bar0(struct pcie_mif *pcie, struct scsc_bar0_reg *bar0) { bar0->NEWMSG = ioread32(pcie->registers + SCSC_PCIE_NEWMSG); bar0->SIGNATURE = ioread32(pcie->registers + SCSC_PCIE_SIGNATURE); bar0->OFFSET = ioread32(pcie->registers + SCSC_PCIE_OFFSET); bar0->RUNEN = ioread32(pcie->registers + SCSC_PCIE_RUNEN); bar0->DEBUG = ioread32(pcie->registers + SCSC_PCIE_DEBUG); bar0->AXIWCNT = ioread32(pcie->registers + SCSC_PCIE_AXIWCNT); bar0->AXIRCNT = ioread32(pcie->registers + SCSC_PCIE_AXIRCNT); bar0->AXIWADDR = ioread32(pcie->registers + SCSC_PCIE_AXIWADDR); bar0->AXIRADDR = ioread32(pcie->registers + SCSC_PCIE_AXIRADDR); bar0->TBD = ioread32(pcie->registers + SCSC_PCIE_TBD); bar0->AXICTRL = ioread32(pcie->registers + SCSC_PCIE_AXICTRL); bar0->AXIDATA = ioread32(pcie->registers + SCSC_PCIE_AXIDATA); bar0->AXIRDBP = ioread32(pcie->registers + SCSC_PCIE_AXIRDBP); bar0->IFAXIWCNT = ioread32(pcie->registers + SCSC_PCIE_IFAXIWCNT); bar0->IFAXIRCNT = ioread32(pcie->registers + SCSC_PCIE_IFAXIRCNT); bar0->IFAXIWADDR = ioread32(pcie->registers + SCSC_PCIE_IFAXIWADDR); bar0->IFAXIRADDR = ioread32(pcie->registers + SCSC_PCIE_IFAXIRADDR); bar0->IFAXICTRL = ioread32(pcie->registers + SCSC_PCIE_IFAXICTRL); bar0->GRST = ioread32(pcie->registers + SCSC_PCIE_GRST); bar0->AMBA2TRANSAXIWCNT = ioread32(pcie->registers + SCSC_PCIE_AMBA2TRANSAXIWCNT); bar0->AMBA2TRANSAXIRCNT = ioread32(pcie->registers + SCSC_PCIE_AMBA2TRANSAXIRCNT); bar0->AMBA2TRANSAXIWADDR = ioread32(pcie->registers + SCSC_PCIE_AMBA2TRANSAXIWADDR); bar0->AMBA2TRANSAXIRADDR = ioread32(pcie->registers + SCSC_PCIE_AMBA2TRANSAXIRADDR); bar0->AMBA2TRANSAXICTR = ioread32(pcie->registers + SCSC_PCIE_AMBA2TRANSAXICTR); bar0->TRANS2PCIEREADALIGNAXIWCNT = ioread32(pcie->registers + SCSC_PCIE_TRANS2PCIEREADALIGNAXIWCNT); bar0->TRANS2PCIEREADALIGNAXIRCNT = ioread32(pcie->registers + SCSC_PCIE_TRANS2PCIEREADALIGNAXIRCNT); bar0->TRANS2PCIEREADALIGNAXIWADDR = ioread32(pcie->registers + SCSC_PCIE_TRANS2PCIEREADALIGNAXIWADDR); bar0->TRANS2PCIEREADALIGNAXIRADDR = ioread32(pcie->registers + SCSC_PCIE_TRANS2PCIEREADALIGNAXIRADDR); bar0->TRANS2PCIEREADALIGNAXICTRL = ioread32(pcie->registers + SCSC_PCIE_TRANS2PCIEREADALIGNAXICTRL); bar0->READROUNDTRIPMIN = ioread32(pcie->registers + SCSC_PCIE_READROUNDTRIPMIN); bar0->READROUNDTRIPMAX = ioread32(pcie->registers + SCSC_PCIE_READROUNDTRIPMAX); bar0->READROUNDTRIPLAST = ioread32(pcie->registers + SCSC_PCIE_READROUNDTRIPLAST); bar0->CPTAW0 = ioread32(pcie->registers + SCSC_PCIE_CPTAW0); bar0->CPTAW1 = ioread32(pcie->registers + SCSC_PCIE_CPTAW1); bar0->CPTAR0 = ioread32(pcie->registers + SCSC_PCIE_CPTAR0); bar0->CPTAR1 = ioread32(pcie->registers + SCSC_PCIE_CPTAR1); bar0->CPTB0 = ioread32(pcie->registers + SCSC_PCIE_CPTB0); bar0->CPTW0 = ioread32(pcie->registers + SCSC_PCIE_CPTW0); bar0->CPTW1 = ioread32(pcie->registers + SCSC_PCIE_CPTW1); bar0->CPTW2 = ioread32(pcie->registers + SCSC_PCIE_CPTW2); bar0->CPTR0 = ioread32(pcie->registers + SCSC_PCIE_CPTR0); bar0->CPTR1 = ioread32(pcie->registers + SCSC_PCIE_CPTR1); bar0->CPTR2 = ioread32(pcie->registers + SCSC_PCIE_CPTR2); bar0->CPTRES = ioread32(pcie->registers + SCSC_PCIE_CPTRES); bar0->CPTAWDELAY = ioread32(pcie->registers + SCSC_PCIE_CPTAWDELAY); bar0->CPTARDELAY = ioread32(pcie->registers + SCSC_PCIE_CPTARDELAY); bar0->CPTSRTADDR = ioread32(pcie->registers + SCSC_PCIE_CPTSRTADDR); bar0->CPTENDADDR = ioread32(pcie->registers + SCSC_PCIE_CPTENDADDR); bar0->CPTSZLTHID = ioread32(pcie->registers + SCSC_PCIE_CPTSZLTHID); bar0->CPTPHSEL = ioread32(pcie->registers + SCSC_PCIE_CPTPHSEL); bar0->CPTRUN = ioread32(pcie->registers + SCSC_PCIE_CPTRUN); bar0->FPGAVER = ioread32(pcie->registers + SCSC_PCIE_FPGAVER); }