Fixed MTP to work with TWRP

This commit is contained in:
awab228 2018-06-19 23:16:04 +02:00
commit f6dfaef42e
50820 changed files with 20846062 additions and 0 deletions

View file

@ -0,0 +1,274 @@
/***********************license start***************
* Author: Cavium Networks
*
* Contact: support@caviumnetworks.com
* This file is part of the OCTEON SDK
*
* Copyright (c) 2003-2009 Cavium Networks
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
* published by the Free Software Foundation.
*
* This file is distributed in the hope that it will be useful, but
* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
* NONINFRINGEMENT. See the GNU General Public License for more
* details.
*
* You should have received a copy of the GNU General Public License
* along with this file; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
* or visit http://www.gnu.org/licenses/.
*
* This file may also be available under a different license from Cavium.
* Contact Cavium Networks for more information
***********************license end**************************************/
/**
* Typedefs and defines for working with Octeon physical addresses.
*
*/
#ifndef __CVMX_ADDRESS_H__
#define __CVMX_ADDRESS_H__
#if 0
typedef enum {
CVMX_MIPS_SPACE_XKSEG = 3LL,
CVMX_MIPS_SPACE_XKPHYS = 2LL,
CVMX_MIPS_SPACE_XSSEG = 1LL,
CVMX_MIPS_SPACE_XUSEG = 0LL
} cvmx_mips_space_t;
#endif
typedef enum {
CVMX_MIPS_XKSEG_SPACE_KSEG0 = 0LL,
CVMX_MIPS_XKSEG_SPACE_KSEG1 = 1LL,
CVMX_MIPS_XKSEG_SPACE_SSEG = 2LL,
CVMX_MIPS_XKSEG_SPACE_KSEG3 = 3LL
} cvmx_mips_xkseg_space_t;
/* decodes <14:13> of a kseg3 window address */
typedef enum {
CVMX_ADD_WIN_SCR = 0L,
/* see cvmx_add_win_dma_dec_t for further decode */
CVMX_ADD_WIN_DMA = 1L,
CVMX_ADD_WIN_UNUSED = 2L,
CVMX_ADD_WIN_UNUSED2 = 3L
} cvmx_add_win_dec_t;
/* decode within DMA space */
typedef enum {
/*
* Add store data to the write buffer entry, allocating it if
* necessary.
*/
CVMX_ADD_WIN_DMA_ADD = 0L,
/* send out the write buffer entry to DRAM */
CVMX_ADD_WIN_DMA_SENDMEM = 1L,
/* store data must be normal DRAM memory space address in this case */
/* send out the write buffer entry as an IOBDMA command */
CVMX_ADD_WIN_DMA_SENDDMA = 2L,
/* see CVMX_ADD_WIN_DMA_SEND_DEC for data contents */
/* send out the write buffer entry as an IO write */
CVMX_ADD_WIN_DMA_SENDIO = 3L,
/* store data must be normal IO space address in this case */
/* send out a single-tick command on the NCB bus */
CVMX_ADD_WIN_DMA_SENDSINGLE = 4L,
/* no write buffer data needed/used */
} cvmx_add_win_dma_dec_t;
/*
* Physical Address Decode
*
* Octeon-I HW never interprets this X (<39:36> reserved
* for future expansion), software should set to 0.
*
* - 0x0 XXX0 0000 0000 to DRAM Cached
* - 0x0 XXX0 0FFF FFFF
*
* - 0x0 XXX0 1000 0000 to Boot Bus Uncached (Converted to 0x1 00X0 1000 0000
* - 0x0 XXX0 1FFF FFFF + EJTAG to 0x1 00X0 1FFF FFFF)
*
* - 0x0 XXX0 2000 0000 to DRAM Cached
* - 0x0 XXXF FFFF FFFF
*
* - 0x1 00X0 0000 0000 to Boot Bus Uncached
* - 0x1 00XF FFFF FFFF
*
* - 0x1 01X0 0000 0000 to Other NCB Uncached
* - 0x1 FFXF FFFF FFFF devices
*
* Decode of all Octeon addresses
*/
typedef union {
uint64_t u64;
/* mapped or unmapped virtual address */
struct {
uint64_t R:2;
uint64_t offset:62;
} sva;
/* mapped USEG virtual addresses (typically) */
struct {
uint64_t zeroes:33;
uint64_t offset:31;
} suseg;
/* mapped or unmapped virtual address */
struct {
uint64_t ones:33;
uint64_t sp:2;
uint64_t offset:29;
} sxkseg;
/*
* physical address accessed through xkphys unmapped virtual
* address.
*/
struct {
uint64_t R:2; /* CVMX_MIPS_SPACE_XKPHYS in this case */
uint64_t cca:3; /* ignored by octeon */
uint64_t mbz:10;
uint64_t pa:49; /* physical address */
} sxkphys;
/* physical address */
struct {
uint64_t mbz:15;
/* if set, the address is uncached and resides on MCB bus */
uint64_t is_io:1;
/*
* the hardware ignores this field when is_io==0, else
* device ID.
*/
uint64_t did:8;
/* the hardware ignores <39:36> in Octeon I */
uint64_t unaddr:4;
uint64_t offset:36;
} sphys;
/* physical mem address */
struct {
/* techically, <47:40> are dont-cares */
uint64_t zeroes:24;
/* the hardware ignores <39:36> in Octeon I */
uint64_t unaddr:4;
uint64_t offset:36;
} smem;
/* physical IO address */
struct {
uint64_t mem_region:2;
uint64_t mbz:13;
/* 1 in this case */
uint64_t is_io:1;
/*
* The hardware ignores this field when is_io==0, else
* device ID.
*/
uint64_t did:8;
/* the hardware ignores <39:36> in Octeon I */
uint64_t unaddr:4;
uint64_t offset:36;
} sio;
/*
* Scratchpad virtual address - accessed through a window at
* the end of kseg3
*/
struct {
uint64_t ones:49;
/* CVMX_ADD_WIN_SCR (0) in this case */
cvmx_add_win_dec_t csrdec:2;
uint64_t addr:13;
} sscr;
/* there should only be stores to IOBDMA space, no loads */
/*
* IOBDMA virtual address - accessed through a window at the
* end of kseg3
*/
struct {
uint64_t ones:49;
uint64_t csrdec:2; /* CVMX_ADD_WIN_DMA (1) in this case */
uint64_t unused2:3;
uint64_t type:3;
uint64_t addr:7;
} sdma;
struct {
uint64_t didspace:24;
uint64_t unused:40;
} sfilldidspace;
} cvmx_addr_t;
/* These macros for used by 32 bit applications */
#define CVMX_MIPS32_SPACE_KSEG0 1l
#define CVMX_ADD_SEG32(segment, add) \
(((int32_t)segment << 31) | (int32_t)(add))
/*
* Currently all IOs are performed using XKPHYS addressing. Linux uses
* the CvmMemCtl register to enable XKPHYS addressing to IO space from
* user mode. Future OSes may need to change the upper bits of IO
* addresses. The following define controls the upper two bits for all
* IO addresses generated by the simple executive library.
*/
#define CVMX_IO_SEG CVMX_MIPS_SPACE_XKPHYS
/* These macros simplify the process of creating common IO addresses */
#define CVMX_ADD_SEG(segment, add) ((((uint64_t)segment) << 62) | (add))
#ifndef CVMX_ADD_IO_SEG
#define CVMX_ADD_IO_SEG(add) CVMX_ADD_SEG(CVMX_IO_SEG, (add))
#endif
#define CVMX_ADDR_DIDSPACE(did) (((CVMX_IO_SEG) << 22) | ((1ULL) << 8) | (did))
#define CVMX_ADDR_DID(did) (CVMX_ADDR_DIDSPACE(did) << 40)
#define CVMX_FULL_DID(did, subdid) (((did) << 3) | (subdid))
/* from include/ncb_rsl_id.v */
#define CVMX_OCT_DID_MIS 0ULL /* misc stuff */
#define CVMX_OCT_DID_GMX0 1ULL
#define CVMX_OCT_DID_GMX1 2ULL
#define CVMX_OCT_DID_PCI 3ULL
#define CVMX_OCT_DID_KEY 4ULL
#define CVMX_OCT_DID_FPA 5ULL
#define CVMX_OCT_DID_DFA 6ULL
#define CVMX_OCT_DID_ZIP 7ULL
#define CVMX_OCT_DID_RNG 8ULL
#define CVMX_OCT_DID_IPD 9ULL
#define CVMX_OCT_DID_PKT 10ULL
#define CVMX_OCT_DID_TIM 11ULL
#define CVMX_OCT_DID_TAG 12ULL
/* the rest are not on the IO bus */
#define CVMX_OCT_DID_L2C 16ULL
#define CVMX_OCT_DID_LMC 17ULL
#define CVMX_OCT_DID_SPX0 18ULL
#define CVMX_OCT_DID_SPX1 19ULL
#define CVMX_OCT_DID_PIP 20ULL
#define CVMX_OCT_DID_ASX0 22ULL
#define CVMX_OCT_DID_ASX1 23ULL
#define CVMX_OCT_DID_IOB 30ULL
#define CVMX_OCT_DID_PKT_SEND CVMX_FULL_DID(CVMX_OCT_DID_PKT, 2ULL)
#define CVMX_OCT_DID_TAG_SWTAG CVMX_FULL_DID(CVMX_OCT_DID_TAG, 0ULL)
#define CVMX_OCT_DID_TAG_TAG1 CVMX_FULL_DID(CVMX_OCT_DID_TAG, 1ULL)
#define CVMX_OCT_DID_TAG_TAG2 CVMX_FULL_DID(CVMX_OCT_DID_TAG, 2ULL)
#define CVMX_OCT_DID_TAG_TAG3 CVMX_FULL_DID(CVMX_OCT_DID_TAG, 3ULL)
#define CVMX_OCT_DID_TAG_NULL_RD CVMX_FULL_DID(CVMX_OCT_DID_TAG, 4ULL)
#define CVMX_OCT_DID_TAG_CSR CVMX_FULL_DID(CVMX_OCT_DID_TAG, 7ULL)
#define CVMX_OCT_DID_FAU_FAI CVMX_FULL_DID(CVMX_OCT_DID_IOB, 0ULL)
#define CVMX_OCT_DID_TIM_CSR CVMX_FULL_DID(CVMX_OCT_DID_TIM, 0ULL)
#define CVMX_OCT_DID_KEY_RW CVMX_FULL_DID(CVMX_OCT_DID_KEY, 0ULL)
#define CVMX_OCT_DID_PCI_6 CVMX_FULL_DID(CVMX_OCT_DID_PCI, 6ULL)
#define CVMX_OCT_DID_MIS_BOO CVMX_FULL_DID(CVMX_OCT_DID_MIS, 0ULL)
#define CVMX_OCT_DID_PCI_RML CVMX_FULL_DID(CVMX_OCT_DID_PCI, 0ULL)
#define CVMX_OCT_DID_IPD_CSR CVMX_FULL_DID(CVMX_OCT_DID_IPD, 7ULL)
#define CVMX_OCT_DID_DFA_CSR CVMX_FULL_DID(CVMX_OCT_DID_DFA, 7ULL)
#define CVMX_OCT_DID_MIS_CSR CVMX_FULL_DID(CVMX_OCT_DID_MIS, 7ULL)
#define CVMX_OCT_DID_ZIP_CSR CVMX_FULL_DID(CVMX_OCT_DID_ZIP, 0ULL)
#endif /* __CVMX_ADDRESS_H__ */

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,139 @@
/***********************license start***************
* Author: Cavium Networks
*
* Contact: support@caviumnetworks.com
* This file is part of the OCTEON SDK
*
* Copyright (c) 2003-2008 Cavium Networks
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
* published by the Free Software Foundation.
*
* This file is distributed in the hope that it will be useful, but
* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
* NONINFRINGEMENT. See the GNU General Public License for more
* details.
*
* You should have received a copy of the GNU General Public License
* along with this file; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
* or visit http://www.gnu.org/licenses/.
*
* This file may also be available under a different license from Cavium.
* Contact Cavium Networks for more information
***********************license end**************************************/
/*
*
* This is file defines ASM primitives for the executive.
*/
#ifndef __CVMX_ASM_H__
#define __CVMX_ASM_H__
#include <asm/octeon/octeon-model.h>
/* other useful stuff */
#define CVMX_SYNC asm volatile ("sync" : : : "memory")
/* String version of SYNCW macro for using in inline asm constructs */
#define CVMX_SYNCW_STR "syncw\nsyncw\n"
#ifdef __OCTEON__
/* Deprecated, will be removed in future release */
#define CVMX_SYNCIO asm volatile ("nop")
#define CVMX_SYNCIOBDMA asm volatile ("synciobdma" : : : "memory")
/* Deprecated, will be removed in future release */
#define CVMX_SYNCIOALL asm volatile ("nop")
/*
* We actually use two syncw instructions in a row when we need a write
* memory barrier. This is because the CN3XXX series of Octeons have
* errata Core-401. This can cause a single syncw to not enforce
* ordering under very rare conditions. Even if it is rare, better safe
* than sorry.
*/
#define CVMX_SYNCW asm volatile ("syncw\n\tsyncw" : : : "memory")
/*
* Define new sync instructions to be normal SYNC instructions for
* operating systems that use threads.
*/
#define CVMX_SYNCWS CVMX_SYNCW
#define CVMX_SYNCS CVMX_SYNC
#define CVMX_SYNCWS_STR CVMX_SYNCW_STR
#else
/*
* Not using a Cavium compiler, always use the slower sync so the
* assembler stays happy.
*/
/* Deprecated, will be removed in future release */
#define CVMX_SYNCIO asm volatile ("nop")
#define CVMX_SYNCIOBDMA asm volatile ("sync" : : : "memory")
/* Deprecated, will be removed in future release */
#define CVMX_SYNCIOALL asm volatile ("nop")
#define CVMX_SYNCW asm volatile ("sync" : : : "memory")
#define CVMX_SYNCWS CVMX_SYNCW
#define CVMX_SYNCS CVMX_SYNC
#define CVMX_SYNCWS_STR CVMX_SYNCW_STR
#endif
/*
* CVMX_PREPARE_FOR_STORE makes each byte of the block unpredictable
* (actually old value or zero) until that byte is stored to (by this or
* another processor. Note that the value of each byte is not only
* unpredictable, but may also change again - up until the point when one
* of the cores stores to the byte.
*/
#define CVMX_PREPARE_FOR_STORE(address, offset) \
asm volatile ("pref 30, " CVMX_TMP_STR(offset) "(%[rbase])" : : \
[rbase] "d" (address))
/*
* This is a command headed to the L2 controller to tell it to clear
* its dirty bit for a block. Basically, SW is telling HW that the
* current version of the block will not be used.
*/
#define CVMX_DONT_WRITE_BACK(address, offset) \
asm volatile ("pref 29, " CVMX_TMP_STR(offset) "(%[rbase])" : : \
[rbase] "d" (address))
/* flush stores, invalidate entire icache */
#define CVMX_ICACHE_INVALIDATE \
{ CVMX_SYNC; asm volatile ("synci 0($0)" : : ); }
/* flush stores, invalidate entire icache */
#define CVMX_ICACHE_INVALIDATE2 \
{ CVMX_SYNC; asm volatile ("cache 0, 0($0)" : : ); }
/* complete prefetches, invalidate entire dcache */
#define CVMX_DCACHE_INVALIDATE \
{ CVMX_SYNC; asm volatile ("cache 9, 0($0)" : : ); }
#define CVMX_CACHE(op, address, offset) \
asm volatile ("cache " CVMX_TMP_STR(op) ", " CVMX_TMP_STR(offset) "(%[rbase])" \
: : [rbase] "d" (address) )
/* fetch and lock the state. */
#define CVMX_CACHE_LCKL2(address, offset) CVMX_CACHE(31, address, offset)
/* unlock the state. */
#define CVMX_CACHE_WBIL2(address, offset) CVMX_CACHE(23, address, offset)
/* invalidate the cache block and clear the USED bits for the block */
#define CVMX_CACHE_WBIL2I(address, offset) CVMX_CACHE(3, address, offset)
/* load virtual tag and data for the L2 cache block into L2C_TAD0_TAG register */
#define CVMX_CACHE_LTGL2I(address, offset) CVMX_CACHE(7, address, offset)
#define CVMX_POP(result, input) \
asm ("pop %[rd],%[rs]" : [rd] "=d" (result) : [rs] "d" (input))
#define CVMX_DPOP(result, input) \
asm ("dpop %[rd],%[rs]" : [rd] "=d" (result) : [rs] "d" (input))
/* some new cop0-like stuff */
#define CVMX_RDHWR(result, regstr) \
asm volatile ("rdhwr %[rt],$" CVMX_TMP_STR(regstr) : [rt] "=d" (result))
#define CVMX_RDHWRNV(result, regstr) \
asm ("rdhwr %[rt],$" CVMX_TMP_STR(regstr) : [rt] "=d" (result))
#endif /* __CVMX_ASM_H__ */

View file

@ -0,0 +1,669 @@
/***********************license start***************
* Author: Cavium Networks
*
* Contact: support@caviumnetworks.com
* This file is part of the OCTEON SDK
*
* Copyright (c) 2003-2012 Cavium Networks
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
* published by the Free Software Foundation.
*
* This file is distributed in the hope that it will be useful, but
* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
* NONINFRINGEMENT. See the GNU General Public License for more
* details.
*
* You should have received a copy of the GNU General Public License
* along with this file; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
* or visit http://www.gnu.org/licenses/.
*
* This file may also be available under a different license from Cavium.
* Contact Cavium Networks for more information
***********************license end**************************************/
#ifndef __CVMX_ASXX_DEFS_H__
#define __CVMX_ASXX_DEFS_H__
#define CVMX_ASXX_GMII_RX_CLK_SET(block_id) (CVMX_ADD_IO_SEG(0x00011800B0000180ull))
#define CVMX_ASXX_GMII_RX_DAT_SET(block_id) (CVMX_ADD_IO_SEG(0x00011800B0000188ull))
#define CVMX_ASXX_INT_EN(block_id) (CVMX_ADD_IO_SEG(0x00011800B0000018ull) + ((block_id) & 1) * 0x8000000ull)
#define CVMX_ASXX_INT_REG(block_id) (CVMX_ADD_IO_SEG(0x00011800B0000010ull) + ((block_id) & 1) * 0x8000000ull)
#define CVMX_ASXX_MII_RX_DAT_SET(block_id) (CVMX_ADD_IO_SEG(0x00011800B0000190ull))
#define CVMX_ASXX_PRT_LOOP(block_id) (CVMX_ADD_IO_SEG(0x00011800B0000040ull) + ((block_id) & 1) * 0x8000000ull)
#define CVMX_ASXX_RLD_BYPASS(block_id) (CVMX_ADD_IO_SEG(0x00011800B0000248ull) + ((block_id) & 1) * 0x8000000ull)
#define CVMX_ASXX_RLD_BYPASS_SETTING(block_id) (CVMX_ADD_IO_SEG(0x00011800B0000250ull) + ((block_id) & 1) * 0x8000000ull)
#define CVMX_ASXX_RLD_COMP(block_id) (CVMX_ADD_IO_SEG(0x00011800B0000220ull) + ((block_id) & 1) * 0x8000000ull)
#define CVMX_ASXX_RLD_DATA_DRV(block_id) (CVMX_ADD_IO_SEG(0x00011800B0000218ull) + ((block_id) & 1) * 0x8000000ull)
#define CVMX_ASXX_RLD_FCRAM_MODE(block_id) (CVMX_ADD_IO_SEG(0x00011800B0000210ull) + ((block_id) & 1) * 0x8000000ull)
#define CVMX_ASXX_RLD_NCTL_STRONG(block_id) (CVMX_ADD_IO_SEG(0x00011800B0000230ull) + ((block_id) & 1) * 0x8000000ull)
#define CVMX_ASXX_RLD_NCTL_WEAK(block_id) (CVMX_ADD_IO_SEG(0x00011800B0000240ull) + ((block_id) & 1) * 0x8000000ull)
#define CVMX_ASXX_RLD_PCTL_STRONG(block_id) (CVMX_ADD_IO_SEG(0x00011800B0000228ull) + ((block_id) & 1) * 0x8000000ull)
#define CVMX_ASXX_RLD_PCTL_WEAK(block_id) (CVMX_ADD_IO_SEG(0x00011800B0000238ull) + ((block_id) & 1) * 0x8000000ull)
#define CVMX_ASXX_RLD_SETTING(block_id) (CVMX_ADD_IO_SEG(0x00011800B0000258ull) + ((block_id) & 1) * 0x8000000ull)
#define CVMX_ASXX_RX_CLK_SETX(offset, block_id) (CVMX_ADD_IO_SEG(0x00011800B0000020ull) + (((offset) & 3) + ((block_id) & 1) * 0x1000000ull) * 8)
#define CVMX_ASXX_RX_PRT_EN(block_id) (CVMX_ADD_IO_SEG(0x00011800B0000000ull) + ((block_id) & 1) * 0x8000000ull)
#define CVMX_ASXX_RX_WOL(block_id) (CVMX_ADD_IO_SEG(0x00011800B0000100ull) + ((block_id) & 1) * 0x8000000ull)
#define CVMX_ASXX_RX_WOL_MSK(block_id) (CVMX_ADD_IO_SEG(0x00011800B0000108ull) + ((block_id) & 1) * 0x8000000ull)
#define CVMX_ASXX_RX_WOL_POWOK(block_id) (CVMX_ADD_IO_SEG(0x00011800B0000118ull) + ((block_id) & 1) * 0x8000000ull)
#define CVMX_ASXX_RX_WOL_SIG(block_id) (CVMX_ADD_IO_SEG(0x00011800B0000110ull) + ((block_id) & 1) * 0x8000000ull)
#define CVMX_ASXX_TX_CLK_SETX(offset, block_id) (CVMX_ADD_IO_SEG(0x00011800B0000048ull) + (((offset) & 3) + ((block_id) & 1) * 0x1000000ull) * 8)
#define CVMX_ASXX_TX_COMP_BYP(block_id) (CVMX_ADD_IO_SEG(0x00011800B0000068ull) + ((block_id) & 1) * 0x8000000ull)
#define CVMX_ASXX_TX_HI_WATERX(offset, block_id) (CVMX_ADD_IO_SEG(0x00011800B0000080ull) + (((offset) & 3) + ((block_id) & 1) * 0x1000000ull) * 8)
#define CVMX_ASXX_TX_PRT_EN(block_id) (CVMX_ADD_IO_SEG(0x00011800B0000008ull) + ((block_id) & 1) * 0x8000000ull)
union cvmx_asxx_gmii_rx_clk_set {
uint64_t u64;
struct cvmx_asxx_gmii_rx_clk_set_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_5_63:59;
uint64_t setting:5;
#else
uint64_t setting:5;
uint64_t reserved_5_63:59;
#endif
} s;
struct cvmx_asxx_gmii_rx_clk_set_s cn30xx;
struct cvmx_asxx_gmii_rx_clk_set_s cn31xx;
struct cvmx_asxx_gmii_rx_clk_set_s cn50xx;
};
union cvmx_asxx_gmii_rx_dat_set {
uint64_t u64;
struct cvmx_asxx_gmii_rx_dat_set_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_5_63:59;
uint64_t setting:5;
#else
uint64_t setting:5;
uint64_t reserved_5_63:59;
#endif
} s;
struct cvmx_asxx_gmii_rx_dat_set_s cn30xx;
struct cvmx_asxx_gmii_rx_dat_set_s cn31xx;
struct cvmx_asxx_gmii_rx_dat_set_s cn50xx;
};
union cvmx_asxx_int_en {
uint64_t u64;
struct cvmx_asxx_int_en_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_12_63:52;
uint64_t txpsh:4;
uint64_t txpop:4;
uint64_t ovrflw:4;
#else
uint64_t ovrflw:4;
uint64_t txpop:4;
uint64_t txpsh:4;
uint64_t reserved_12_63:52;
#endif
} s;
struct cvmx_asxx_int_en_cn30xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_11_63:53;
uint64_t txpsh:3;
uint64_t reserved_7_7:1;
uint64_t txpop:3;
uint64_t reserved_3_3:1;
uint64_t ovrflw:3;
#else
uint64_t ovrflw:3;
uint64_t reserved_3_3:1;
uint64_t txpop:3;
uint64_t reserved_7_7:1;
uint64_t txpsh:3;
uint64_t reserved_11_63:53;
#endif
} cn30xx;
struct cvmx_asxx_int_en_cn30xx cn31xx;
struct cvmx_asxx_int_en_s cn38xx;
struct cvmx_asxx_int_en_s cn38xxp2;
struct cvmx_asxx_int_en_cn30xx cn50xx;
struct cvmx_asxx_int_en_s cn58xx;
struct cvmx_asxx_int_en_s cn58xxp1;
};
union cvmx_asxx_int_reg {
uint64_t u64;
struct cvmx_asxx_int_reg_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_12_63:52;
uint64_t txpsh:4;
uint64_t txpop:4;
uint64_t ovrflw:4;
#else
uint64_t ovrflw:4;
uint64_t txpop:4;
uint64_t txpsh:4;
uint64_t reserved_12_63:52;
#endif
} s;
struct cvmx_asxx_int_reg_cn30xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_11_63:53;
uint64_t txpsh:3;
uint64_t reserved_7_7:1;
uint64_t txpop:3;
uint64_t reserved_3_3:1;
uint64_t ovrflw:3;
#else
uint64_t ovrflw:3;
uint64_t reserved_3_3:1;
uint64_t txpop:3;
uint64_t reserved_7_7:1;
uint64_t txpsh:3;
uint64_t reserved_11_63:53;
#endif
} cn30xx;
struct cvmx_asxx_int_reg_cn30xx cn31xx;
struct cvmx_asxx_int_reg_s cn38xx;
struct cvmx_asxx_int_reg_s cn38xxp2;
struct cvmx_asxx_int_reg_cn30xx cn50xx;
struct cvmx_asxx_int_reg_s cn58xx;
struct cvmx_asxx_int_reg_s cn58xxp1;
};
union cvmx_asxx_mii_rx_dat_set {
uint64_t u64;
struct cvmx_asxx_mii_rx_dat_set_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_5_63:59;
uint64_t setting:5;
#else
uint64_t setting:5;
uint64_t reserved_5_63:59;
#endif
} s;
struct cvmx_asxx_mii_rx_dat_set_s cn30xx;
struct cvmx_asxx_mii_rx_dat_set_s cn50xx;
};
union cvmx_asxx_prt_loop {
uint64_t u64;
struct cvmx_asxx_prt_loop_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_8_63:56;
uint64_t ext_loop:4;
uint64_t int_loop:4;
#else
uint64_t int_loop:4;
uint64_t ext_loop:4;
uint64_t reserved_8_63:56;
#endif
} s;
struct cvmx_asxx_prt_loop_cn30xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_7_63:57;
uint64_t ext_loop:3;
uint64_t reserved_3_3:1;
uint64_t int_loop:3;
#else
uint64_t int_loop:3;
uint64_t reserved_3_3:1;
uint64_t ext_loop:3;
uint64_t reserved_7_63:57;
#endif
} cn30xx;
struct cvmx_asxx_prt_loop_cn30xx cn31xx;
struct cvmx_asxx_prt_loop_s cn38xx;
struct cvmx_asxx_prt_loop_s cn38xxp2;
struct cvmx_asxx_prt_loop_cn30xx cn50xx;
struct cvmx_asxx_prt_loop_s cn58xx;
struct cvmx_asxx_prt_loop_s cn58xxp1;
};
union cvmx_asxx_rld_bypass {
uint64_t u64;
struct cvmx_asxx_rld_bypass_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_1_63:63;
uint64_t bypass:1;
#else
uint64_t bypass:1;
uint64_t reserved_1_63:63;
#endif
} s;
struct cvmx_asxx_rld_bypass_s cn38xx;
struct cvmx_asxx_rld_bypass_s cn38xxp2;
struct cvmx_asxx_rld_bypass_s cn58xx;
struct cvmx_asxx_rld_bypass_s cn58xxp1;
};
union cvmx_asxx_rld_bypass_setting {
uint64_t u64;
struct cvmx_asxx_rld_bypass_setting_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_5_63:59;
uint64_t setting:5;
#else
uint64_t setting:5;
uint64_t reserved_5_63:59;
#endif
} s;
struct cvmx_asxx_rld_bypass_setting_s cn38xx;
struct cvmx_asxx_rld_bypass_setting_s cn38xxp2;
struct cvmx_asxx_rld_bypass_setting_s cn58xx;
struct cvmx_asxx_rld_bypass_setting_s cn58xxp1;
};
union cvmx_asxx_rld_comp {
uint64_t u64;
struct cvmx_asxx_rld_comp_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_9_63:55;
uint64_t pctl:5;
uint64_t nctl:4;
#else
uint64_t nctl:4;
uint64_t pctl:5;
uint64_t reserved_9_63:55;
#endif
} s;
struct cvmx_asxx_rld_comp_cn38xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_8_63:56;
uint64_t pctl:4;
uint64_t nctl:4;
#else
uint64_t nctl:4;
uint64_t pctl:4;
uint64_t reserved_8_63:56;
#endif
} cn38xx;
struct cvmx_asxx_rld_comp_cn38xx cn38xxp2;
struct cvmx_asxx_rld_comp_s cn58xx;
struct cvmx_asxx_rld_comp_s cn58xxp1;
};
union cvmx_asxx_rld_data_drv {
uint64_t u64;
struct cvmx_asxx_rld_data_drv_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_8_63:56;
uint64_t pctl:4;
uint64_t nctl:4;
#else
uint64_t nctl:4;
uint64_t pctl:4;
uint64_t reserved_8_63:56;
#endif
} s;
struct cvmx_asxx_rld_data_drv_s cn38xx;
struct cvmx_asxx_rld_data_drv_s cn38xxp2;
struct cvmx_asxx_rld_data_drv_s cn58xx;
struct cvmx_asxx_rld_data_drv_s cn58xxp1;
};
union cvmx_asxx_rld_fcram_mode {
uint64_t u64;
struct cvmx_asxx_rld_fcram_mode_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_1_63:63;
uint64_t mode:1;
#else
uint64_t mode:1;
uint64_t reserved_1_63:63;
#endif
} s;
struct cvmx_asxx_rld_fcram_mode_s cn38xx;
struct cvmx_asxx_rld_fcram_mode_s cn38xxp2;
};
union cvmx_asxx_rld_nctl_strong {
uint64_t u64;
struct cvmx_asxx_rld_nctl_strong_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_5_63:59;
uint64_t nctl:5;
#else
uint64_t nctl:5;
uint64_t reserved_5_63:59;
#endif
} s;
struct cvmx_asxx_rld_nctl_strong_s cn38xx;
struct cvmx_asxx_rld_nctl_strong_s cn38xxp2;
struct cvmx_asxx_rld_nctl_strong_s cn58xx;
struct cvmx_asxx_rld_nctl_strong_s cn58xxp1;
};
union cvmx_asxx_rld_nctl_weak {
uint64_t u64;
struct cvmx_asxx_rld_nctl_weak_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_5_63:59;
uint64_t nctl:5;
#else
uint64_t nctl:5;
uint64_t reserved_5_63:59;
#endif
} s;
struct cvmx_asxx_rld_nctl_weak_s cn38xx;
struct cvmx_asxx_rld_nctl_weak_s cn38xxp2;
struct cvmx_asxx_rld_nctl_weak_s cn58xx;
struct cvmx_asxx_rld_nctl_weak_s cn58xxp1;
};
union cvmx_asxx_rld_pctl_strong {
uint64_t u64;
struct cvmx_asxx_rld_pctl_strong_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_5_63:59;
uint64_t pctl:5;
#else
uint64_t pctl:5;
uint64_t reserved_5_63:59;
#endif
} s;
struct cvmx_asxx_rld_pctl_strong_s cn38xx;
struct cvmx_asxx_rld_pctl_strong_s cn38xxp2;
struct cvmx_asxx_rld_pctl_strong_s cn58xx;
struct cvmx_asxx_rld_pctl_strong_s cn58xxp1;
};
union cvmx_asxx_rld_pctl_weak {
uint64_t u64;
struct cvmx_asxx_rld_pctl_weak_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_5_63:59;
uint64_t pctl:5;
#else
uint64_t pctl:5;
uint64_t reserved_5_63:59;
#endif
} s;
struct cvmx_asxx_rld_pctl_weak_s cn38xx;
struct cvmx_asxx_rld_pctl_weak_s cn38xxp2;
struct cvmx_asxx_rld_pctl_weak_s cn58xx;
struct cvmx_asxx_rld_pctl_weak_s cn58xxp1;
};
union cvmx_asxx_rld_setting {
uint64_t u64;
struct cvmx_asxx_rld_setting_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_13_63:51;
uint64_t dfaset:5;
uint64_t dfalag:1;
uint64_t dfalead:1;
uint64_t dfalock:1;
uint64_t setting:5;
#else
uint64_t setting:5;
uint64_t dfalock:1;
uint64_t dfalead:1;
uint64_t dfalag:1;
uint64_t dfaset:5;
uint64_t reserved_13_63:51;
#endif
} s;
struct cvmx_asxx_rld_setting_cn38xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_5_63:59;
uint64_t setting:5;
#else
uint64_t setting:5;
uint64_t reserved_5_63:59;
#endif
} cn38xx;
struct cvmx_asxx_rld_setting_cn38xx cn38xxp2;
struct cvmx_asxx_rld_setting_s cn58xx;
struct cvmx_asxx_rld_setting_s cn58xxp1;
};
union cvmx_asxx_rx_clk_setx {
uint64_t u64;
struct cvmx_asxx_rx_clk_setx_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_5_63:59;
uint64_t setting:5;
#else
uint64_t setting:5;
uint64_t reserved_5_63:59;
#endif
} s;
struct cvmx_asxx_rx_clk_setx_s cn30xx;
struct cvmx_asxx_rx_clk_setx_s cn31xx;
struct cvmx_asxx_rx_clk_setx_s cn38xx;
struct cvmx_asxx_rx_clk_setx_s cn38xxp2;
struct cvmx_asxx_rx_clk_setx_s cn50xx;
struct cvmx_asxx_rx_clk_setx_s cn58xx;
struct cvmx_asxx_rx_clk_setx_s cn58xxp1;
};
union cvmx_asxx_rx_prt_en {
uint64_t u64;
struct cvmx_asxx_rx_prt_en_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_4_63:60;
uint64_t prt_en:4;
#else
uint64_t prt_en:4;
uint64_t reserved_4_63:60;
#endif
} s;
struct cvmx_asxx_rx_prt_en_cn30xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_3_63:61;
uint64_t prt_en:3;
#else
uint64_t prt_en:3;
uint64_t reserved_3_63:61;
#endif
} cn30xx;
struct cvmx_asxx_rx_prt_en_cn30xx cn31xx;
struct cvmx_asxx_rx_prt_en_s cn38xx;
struct cvmx_asxx_rx_prt_en_s cn38xxp2;
struct cvmx_asxx_rx_prt_en_cn30xx cn50xx;
struct cvmx_asxx_rx_prt_en_s cn58xx;
struct cvmx_asxx_rx_prt_en_s cn58xxp1;
};
union cvmx_asxx_rx_wol {
uint64_t u64;
struct cvmx_asxx_rx_wol_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_2_63:62;
uint64_t status:1;
uint64_t enable:1;
#else
uint64_t enable:1;
uint64_t status:1;
uint64_t reserved_2_63:62;
#endif
} s;
struct cvmx_asxx_rx_wol_s cn38xx;
struct cvmx_asxx_rx_wol_s cn38xxp2;
};
union cvmx_asxx_rx_wol_msk {
uint64_t u64;
struct cvmx_asxx_rx_wol_msk_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t msk:64;
#else
uint64_t msk:64;
#endif
} s;
struct cvmx_asxx_rx_wol_msk_s cn38xx;
struct cvmx_asxx_rx_wol_msk_s cn38xxp2;
};
union cvmx_asxx_rx_wol_powok {
uint64_t u64;
struct cvmx_asxx_rx_wol_powok_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_1_63:63;
uint64_t powerok:1;
#else
uint64_t powerok:1;
uint64_t reserved_1_63:63;
#endif
} s;
struct cvmx_asxx_rx_wol_powok_s cn38xx;
struct cvmx_asxx_rx_wol_powok_s cn38xxp2;
};
union cvmx_asxx_rx_wol_sig {
uint64_t u64;
struct cvmx_asxx_rx_wol_sig_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_32_63:32;
uint64_t sig:32;
#else
uint64_t sig:32;
uint64_t reserved_32_63:32;
#endif
} s;
struct cvmx_asxx_rx_wol_sig_s cn38xx;
struct cvmx_asxx_rx_wol_sig_s cn38xxp2;
};
union cvmx_asxx_tx_clk_setx {
uint64_t u64;
struct cvmx_asxx_tx_clk_setx_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_5_63:59;
uint64_t setting:5;
#else
uint64_t setting:5;
uint64_t reserved_5_63:59;
#endif
} s;
struct cvmx_asxx_tx_clk_setx_s cn30xx;
struct cvmx_asxx_tx_clk_setx_s cn31xx;
struct cvmx_asxx_tx_clk_setx_s cn38xx;
struct cvmx_asxx_tx_clk_setx_s cn38xxp2;
struct cvmx_asxx_tx_clk_setx_s cn50xx;
struct cvmx_asxx_tx_clk_setx_s cn58xx;
struct cvmx_asxx_tx_clk_setx_s cn58xxp1;
};
union cvmx_asxx_tx_comp_byp {
uint64_t u64;
struct cvmx_asxx_tx_comp_byp_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_0_63:64;
#else
uint64_t reserved_0_63:64;
#endif
} s;
struct cvmx_asxx_tx_comp_byp_cn30xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_9_63:55;
uint64_t bypass:1;
uint64_t pctl:4;
uint64_t nctl:4;
#else
uint64_t nctl:4;
uint64_t pctl:4;
uint64_t bypass:1;
uint64_t reserved_9_63:55;
#endif
} cn30xx;
struct cvmx_asxx_tx_comp_byp_cn30xx cn31xx;
struct cvmx_asxx_tx_comp_byp_cn38xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_8_63:56;
uint64_t pctl:4;
uint64_t nctl:4;
#else
uint64_t nctl:4;
uint64_t pctl:4;
uint64_t reserved_8_63:56;
#endif
} cn38xx;
struct cvmx_asxx_tx_comp_byp_cn38xx cn38xxp2;
struct cvmx_asxx_tx_comp_byp_cn50xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_17_63:47;
uint64_t bypass:1;
uint64_t reserved_13_15:3;
uint64_t pctl:5;
uint64_t reserved_5_7:3;
uint64_t nctl:5;
#else
uint64_t nctl:5;
uint64_t reserved_5_7:3;
uint64_t pctl:5;
uint64_t reserved_13_15:3;
uint64_t bypass:1;
uint64_t reserved_17_63:47;
#endif
} cn50xx;
struct cvmx_asxx_tx_comp_byp_cn58xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_13_63:51;
uint64_t pctl:5;
uint64_t reserved_5_7:3;
uint64_t nctl:5;
#else
uint64_t nctl:5;
uint64_t reserved_5_7:3;
uint64_t pctl:5;
uint64_t reserved_13_63:51;
#endif
} cn58xx;
struct cvmx_asxx_tx_comp_byp_cn58xx cn58xxp1;
};
union cvmx_asxx_tx_hi_waterx {
uint64_t u64;
struct cvmx_asxx_tx_hi_waterx_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_4_63:60;
uint64_t mark:4;
#else
uint64_t mark:4;
uint64_t reserved_4_63:60;
#endif
} s;
struct cvmx_asxx_tx_hi_waterx_cn30xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_3_63:61;
uint64_t mark:3;
#else
uint64_t mark:3;
uint64_t reserved_3_63:61;
#endif
} cn30xx;
struct cvmx_asxx_tx_hi_waterx_cn30xx cn31xx;
struct cvmx_asxx_tx_hi_waterx_s cn38xx;
struct cvmx_asxx_tx_hi_waterx_s cn38xxp2;
struct cvmx_asxx_tx_hi_waterx_cn30xx cn50xx;
struct cvmx_asxx_tx_hi_waterx_s cn58xx;
struct cvmx_asxx_tx_hi_waterx_s cn58xxp1;
};
union cvmx_asxx_tx_prt_en {
uint64_t u64;
struct cvmx_asxx_tx_prt_en_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_4_63:60;
uint64_t prt_en:4;
#else
uint64_t prt_en:4;
uint64_t reserved_4_63:60;
#endif
} s;
struct cvmx_asxx_tx_prt_en_cn30xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_3_63:61;
uint64_t prt_en:3;
#else
uint64_t prt_en:3;
uint64_t reserved_3_63:61;
#endif
} cn30xx;
struct cvmx_asxx_tx_prt_en_cn30xx cn31xx;
struct cvmx_asxx_tx_prt_en_s cn38xx;
struct cvmx_asxx_tx_prt_en_s cn38xxp2;
struct cvmx_asxx_tx_prt_en_cn30xx cn50xx;
struct cvmx_asxx_tx_prt_en_s cn58xx;
struct cvmx_asxx_tx_prt_en_s cn58xxp1;
};
#endif

View file

@ -0,0 +1,351 @@
/***********************license start***************
* Author: Cavium Networks
*
* Contact: support@caviumnetworks.com
* This file is part of the OCTEON SDK
*
* Copyright (c) 2003-2008 Cavium Networks
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
* published by the Free Software Foundation.
*
* This file is distributed in the hope that it will be useful, but
* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
* NONINFRINGEMENT. See the GNU General Public License for more
* details.
*
* You should have received a copy of the GNU General Public License
* along with this file; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
* or visit http://www.gnu.org/licenses/.
*
* This file may also be available under a different license from Cavium.
* Contact Cavium Networks for more information
***********************license end**************************************/
/*
* Header file containing the ABI with the bootloader.
*/
#ifndef __CVMX_BOOTINFO_H__
#define __CVMX_BOOTINFO_H__
/*
* Current major and minor versions of the CVMX bootinfo block that is
* passed from the bootloader to the application. This is versioned
* so that applications can properly handle multiple bootloader
* versions.
*/
#define CVMX_BOOTINFO_MAJ_VER 1
#define CVMX_BOOTINFO_MIN_VER 3
#if (CVMX_BOOTINFO_MAJ_VER == 1)
#define CVMX_BOOTINFO_OCTEON_SERIAL_LEN 20
/*
* This structure is populated by the bootloader. For binary
* compatibility the only changes that should be made are
* adding members to the end of the structure, and the minor
* version should be incremented at that time.
* If an incompatible change is made, the major version
* must be incremented, and the minor version should be reset
* to 0.
*/
struct cvmx_bootinfo {
uint32_t major_version;
uint32_t minor_version;
uint64_t stack_top;
uint64_t heap_base;
uint64_t heap_end;
uint64_t desc_vaddr;
uint32_t exception_base_addr;
uint32_t stack_size;
uint32_t flags;
uint32_t core_mask;
/* DRAM size in megabytes */
uint32_t dram_size;
/* physical address of free memory descriptor block*/
uint32_t phy_mem_desc_addr;
/* used to pass flags from app to debugger */
uint32_t debugger_flags_base_addr;
/* CPU clock speed, in hz */
uint32_t eclock_hz;
/* DRAM clock speed, in hz */
uint32_t dclock_hz;
uint32_t reserved0;
uint16_t board_type;
uint8_t board_rev_major;
uint8_t board_rev_minor;
uint16_t reserved1;
uint8_t reserved2;
uint8_t reserved3;
char board_serial_number[CVMX_BOOTINFO_OCTEON_SERIAL_LEN];
uint8_t mac_addr_base[6];
uint8_t mac_addr_count;
#if (CVMX_BOOTINFO_MIN_VER >= 1)
/*
* Several boards support compact flash on the Octeon boot
* bus. The CF memory spaces may be mapped to different
* addresses on different boards. These are the physical
* addresses, so care must be taken to use the correct
* XKPHYS/KSEG0 addressing depending on the application's
* ABI. These values will be 0 if CF is not present.
*/
uint64_t compact_flash_common_base_addr;
uint64_t compact_flash_attribute_base_addr;
/*
* Base address of the LED display (as on EBT3000 board)
* This will be 0 if LED display not present.
*/
uint64_t led_display_base_addr;
#endif
#if (CVMX_BOOTINFO_MIN_VER >= 2)
/* DFA reference clock in hz (if applicable)*/
uint32_t dfa_ref_clock_hz;
/*
* flags indicating various configuration options. These
* flags supercede the 'flags' variable and should be used
* instead if available.
*/
uint32_t config_flags;
#endif
#if (CVMX_BOOTINFO_MIN_VER >= 3)
/*
* Address of the OF Flattened Device Tree structure
* describing the board.
*/
uint64_t fdt_addr;
#endif
};
#define CVMX_BOOTINFO_CFG_FLAG_PCI_HOST (1ull << 0)
#define CVMX_BOOTINFO_CFG_FLAG_PCI_TARGET (1ull << 1)
#define CVMX_BOOTINFO_CFG_FLAG_DEBUG (1ull << 2)
#define CVMX_BOOTINFO_CFG_FLAG_NO_MAGIC (1ull << 3)
/* This flag is set if the TLB mappings are not contained in the
* 0x10000000 - 0x20000000 boot bus region. */
#define CVMX_BOOTINFO_CFG_FLAG_OVERSIZE_TLB_MAPPING (1ull << 4)
#define CVMX_BOOTINFO_CFG_FLAG_BREAK (1ull << 5)
#endif /* (CVMX_BOOTINFO_MAJ_VER == 1) */
/* Type defines for board and chip types */
enum cvmx_board_types_enum {
CVMX_BOARD_TYPE_NULL = 0,
CVMX_BOARD_TYPE_SIM = 1,
CVMX_BOARD_TYPE_EBT3000 = 2,
CVMX_BOARD_TYPE_KODAMA = 3,
CVMX_BOARD_TYPE_NIAGARA = 4,
CVMX_BOARD_TYPE_NAC38 = 5, /* formerly NAO38 */
CVMX_BOARD_TYPE_THUNDER = 6,
CVMX_BOARD_TYPE_TRANTOR = 7,
CVMX_BOARD_TYPE_EBH3000 = 8,
CVMX_BOARD_TYPE_EBH3100 = 9,
CVMX_BOARD_TYPE_HIKARI = 10,
CVMX_BOARD_TYPE_CN3010_EVB_HS5 = 11,
CVMX_BOARD_TYPE_CN3005_EVB_HS5 = 12,
CVMX_BOARD_TYPE_KBP = 13,
/* Deprecated, CVMX_BOARD_TYPE_CN3010_EVB_HS5 supports the CN3020 */
CVMX_BOARD_TYPE_CN3020_EVB_HS5 = 14,
CVMX_BOARD_TYPE_EBT5800 = 15,
CVMX_BOARD_TYPE_NICPRO2 = 16,
CVMX_BOARD_TYPE_EBH5600 = 17,
CVMX_BOARD_TYPE_EBH5601 = 18,
CVMX_BOARD_TYPE_EBH5200 = 19,
CVMX_BOARD_TYPE_BBGW_REF = 20,
CVMX_BOARD_TYPE_NIC_XLE_4G = 21,
CVMX_BOARD_TYPE_EBT5600 = 22,
CVMX_BOARD_TYPE_EBH5201 = 23,
CVMX_BOARD_TYPE_EBT5200 = 24,
CVMX_BOARD_TYPE_CB5600 = 25,
CVMX_BOARD_TYPE_CB5601 = 26,
CVMX_BOARD_TYPE_CB5200 = 27,
/* Special 'generic' board type, supports many boards */
CVMX_BOARD_TYPE_GENERIC = 28,
CVMX_BOARD_TYPE_EBH5610 = 29,
CVMX_BOARD_TYPE_LANAI2_A = 30,
CVMX_BOARD_TYPE_LANAI2_U = 31,
CVMX_BOARD_TYPE_EBB5600 = 32,
CVMX_BOARD_TYPE_EBB6300 = 33,
CVMX_BOARD_TYPE_NIC_XLE_10G = 34,
CVMX_BOARD_TYPE_LANAI2_G = 35,
CVMX_BOARD_TYPE_EBT5810 = 36,
CVMX_BOARD_TYPE_NIC10E = 37,
CVMX_BOARD_TYPE_EP6300C = 38,
CVMX_BOARD_TYPE_EBB6800 = 39,
CVMX_BOARD_TYPE_NIC4E = 40,
CVMX_BOARD_TYPE_NIC2E = 41,
CVMX_BOARD_TYPE_EBB6600 = 42,
CVMX_BOARD_TYPE_REDWING = 43,
CVMX_BOARD_TYPE_NIC68_4 = 44,
CVMX_BOARD_TYPE_NIC10E_66 = 45,
CVMX_BOARD_TYPE_MAX,
/*
* The range from CVMX_BOARD_TYPE_MAX to
* CVMX_BOARD_TYPE_CUST_DEFINED_MIN is reserved for future
* SDK use.
*/
/*
* Set aside a range for customer boards. These numbers are managed
* by Cavium.
*/
CVMX_BOARD_TYPE_CUST_DEFINED_MIN = 10000,
CVMX_BOARD_TYPE_CUST_WSX16 = 10001,
CVMX_BOARD_TYPE_CUST_NS0216 = 10002,
CVMX_BOARD_TYPE_CUST_NB5 = 10003,
CVMX_BOARD_TYPE_CUST_WMR500 = 10004,
CVMX_BOARD_TYPE_CUST_ITB101 = 10005,
CVMX_BOARD_TYPE_CUST_NTE102 = 10006,
CVMX_BOARD_TYPE_CUST_AGS103 = 10007,
CVMX_BOARD_TYPE_CUST_GST104 = 10008,
CVMX_BOARD_TYPE_CUST_GCT105 = 10009,
CVMX_BOARD_TYPE_CUST_AGS106 = 10010,
CVMX_BOARD_TYPE_CUST_SGM107 = 10011,
CVMX_BOARD_TYPE_CUST_GCT108 = 10012,
CVMX_BOARD_TYPE_CUST_AGS109 = 10013,
CVMX_BOARD_TYPE_CUST_GCT110 = 10014,
CVMX_BOARD_TYPE_CUST_L2_AIR_SENDER = 10015,
CVMX_BOARD_TYPE_CUST_L2_AIR_RECEIVER = 10016,
CVMX_BOARD_TYPE_CUST_L2_ACCTON2_TX = 10017,
CVMX_BOARD_TYPE_CUST_L2_ACCTON2_RX = 10018,
CVMX_BOARD_TYPE_CUST_L2_WSTRNSNIC_TX = 10019,
CVMX_BOARD_TYPE_CUST_L2_WSTRNSNIC_RX = 10020,
CVMX_BOARD_TYPE_CUST_L2_ZINWELL = 10021,
CVMX_BOARD_TYPE_CUST_DEFINED_MAX = 20000,
/*
* Set aside a range for customer private use. The SDK won't
* use any numbers in this range.
*/
CVMX_BOARD_TYPE_CUST_PRIVATE_MIN = 20001,
CVMX_BOARD_TYPE_UBNT_E100 = 20002,
CVMX_BOARD_TYPE_CUST_DSR1000N = 20006,
CVMX_BOARD_TYPE_CUST_PRIVATE_MAX = 30000,
/* The remaining range is reserved for future use. */
};
enum cvmx_chip_types_enum {
CVMX_CHIP_TYPE_NULL = 0,
CVMX_CHIP_SIM_TYPE_DEPRECATED = 1,
CVMX_CHIP_TYPE_OCTEON_SAMPLE = 2,
CVMX_CHIP_TYPE_MAX,
};
/* Compatibility alias for NAC38 name change, planned to be removed
* from SDK 1.7 */
#define CVMX_BOARD_TYPE_NAO38 CVMX_BOARD_TYPE_NAC38
/* Functions to return string based on type */
#define ENUM_BRD_TYPE_CASE(x) \
case x: return(#x + 16); /* Skip CVMX_BOARD_TYPE_ */
static inline const char *cvmx_board_type_to_string(enum
cvmx_board_types_enum type)
{
switch (type) {
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_NULL)
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_SIM)
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_EBT3000)
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_KODAMA)
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_NIAGARA)
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_NAC38)
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_THUNDER)
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_TRANTOR)
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_EBH3000)
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_EBH3100)
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_HIKARI)
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CN3010_EVB_HS5)
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CN3005_EVB_HS5)
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_KBP)
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CN3020_EVB_HS5)
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_EBT5800)
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_NICPRO2)
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_EBH5600)
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_EBH5601)
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_EBH5200)
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_BBGW_REF)
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_NIC_XLE_4G)
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_EBT5600)
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_EBH5201)
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_EBT5200)
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CB5600)
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CB5601)
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CB5200)
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_GENERIC)
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_EBH5610)
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_LANAI2_A)
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_LANAI2_U)
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_EBB5600)
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_EBB6300)
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_NIC_XLE_10G)
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_LANAI2_G)
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_EBT5810)
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_NIC10E)
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_EP6300C)
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_EBB6800)
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_NIC4E)
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_NIC2E)
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_EBB6600)
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_REDWING)
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_NIC68_4)
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_NIC10E_66)
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_MAX)
/* Customer boards listed here */
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_DEFINED_MIN)
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_WSX16)
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_NS0216)
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_NB5)
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_WMR500)
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_ITB101)
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_NTE102)
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_AGS103)
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_GST104)
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_GCT105)
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_AGS106)
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_SGM107)
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_GCT108)
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_AGS109)
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_GCT110)
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_L2_AIR_SENDER)
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_L2_AIR_RECEIVER)
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_L2_ACCTON2_TX)
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_L2_ACCTON2_RX)
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_L2_WSTRNSNIC_TX)
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_L2_WSTRNSNIC_RX)
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_L2_ZINWELL)
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_DEFINED_MAX)
/* Customer private range */
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_PRIVATE_MIN)
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_UBNT_E100)
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_DSR1000N)
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_PRIVATE_MAX)
}
return "Unsupported Board";
}
#define ENUM_CHIP_TYPE_CASE(x) \
case x: return(#x + 15); /* Skip CVMX_CHIP_TYPE */
static inline const char *cvmx_chip_type_to_string(enum
cvmx_chip_types_enum type)
{
switch (type) {
ENUM_CHIP_TYPE_CASE(CVMX_CHIP_TYPE_NULL)
ENUM_CHIP_TYPE_CASE(CVMX_CHIP_SIM_TYPE_DEPRECATED)
ENUM_CHIP_TYPE_CASE(CVMX_CHIP_TYPE_OCTEON_SAMPLE)
ENUM_CHIP_TYPE_CASE(CVMX_CHIP_TYPE_MAX)
}
return "Unsupported Chip";
}
#endif /* __CVMX_BOOTINFO_H__ */

View file

@ -0,0 +1,375 @@
/***********************license start***************
* Author: Cavium Networks
*
* Contact: support@caviumnetworks.com
* This file is part of the OCTEON SDK
*
* Copyright (c) 2003-2008 Cavium Networks
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
* published by the Free Software Foundation.
*
* This file is distributed in the hope that it will be useful, but
* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
* NONINFRINGEMENT. See the GNU General Public License for more
* details.
*
* You should have received a copy of the GNU General Public License
* along with this file; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
* or visit http://www.gnu.org/licenses/.
*
* This file may also be available under a different license from Cavium.
* Contact Cavium Networks for more information
***********************license end**************************************/
/*
* Simple allocate only memory allocator. Used to allocate memory at
* application start time.
*/
#ifndef __CVMX_BOOTMEM_H__
#define __CVMX_BOOTMEM_H__
/* Must be multiple of 8, changing breaks ABI */
#define CVMX_BOOTMEM_NAME_LEN 128
/* Can change without breaking ABI */
#define CVMX_BOOTMEM_NUM_NAMED_BLOCKS 64
/* minimum alignment of bootmem alloced blocks */
#define CVMX_BOOTMEM_ALIGNMENT_SIZE (16ull)
/* Flags for cvmx_bootmem_phy_mem* functions */
/* Allocate from end of block instead of beginning */
#define CVMX_BOOTMEM_FLAG_END_ALLOC (1 << 0)
/* Don't do any locking. */
#define CVMX_BOOTMEM_FLAG_NO_LOCKING (1 << 1)
/* First bytes of each free physical block of memory contain this structure,
* which is used to maintain the free memory list. Since the bootloader is
* only 32 bits, there is a union providing 64 and 32 bit versions. The
* application init code converts addresses to 64 bit addresses before the
* application starts.
*/
struct cvmx_bootmem_block_header {
/*
* Note: these are referenced from assembly routines in the
* bootloader, so this structure should not be changed
* without changing those routines as well.
*/
uint64_t next_block_addr;
uint64_t size;
};
/*
* Structure for named memory blocks. Number of descriptors available
* can be changed without affecting compatibility, but name length
* changes require a bump in the bootmem descriptor version Note: This
* structure must be naturally 64 bit aligned, as a single memory
* image will be used by both 32 and 64 bit programs.
*/
struct cvmx_bootmem_named_block_desc {
/* Base address of named block */
uint64_t base_addr;
/*
* Size actually allocated for named block (may differ from
* requested).
*/
uint64_t size;
/* name of named block */
char name[CVMX_BOOTMEM_NAME_LEN];
};
/* Current descriptor versions */
/* CVMX bootmem descriptor major version */
#define CVMX_BOOTMEM_DESC_MAJ_VER 3
/* CVMX bootmem descriptor minor version */
#define CVMX_BOOTMEM_DESC_MIN_VER 0
/* First three members of cvmx_bootmem_desc_t are left in original
* positions for backwards compatibility.
*/
struct cvmx_bootmem_desc {
/* spinlock to control access to list */
uint32_t lock;
/* flags for indicating various conditions */
uint32_t flags;
uint64_t head_addr;
/* Incremented when incompatible changes made */
uint32_t major_version;
/*
* Incremented changed when compatible changes made, reset to
* zero when major incremented.
*/
uint32_t minor_version;
uint64_t app_data_addr;
uint64_t app_data_size;
/* number of elements in named blocks array */
uint32_t named_block_num_blocks;
/* length of name array in bootmem blocks */
uint32_t named_block_name_len;
/* address of named memory block descriptors */
uint64_t named_block_array_addr;
};
/**
* Initialize the boot alloc memory structures. This is
* normally called inside of cvmx_user_app_init()
*
* @mem_desc_ptr: Address of the free memory list
*/
extern int cvmx_bootmem_init(void *mem_desc_ptr);
/**
* Allocate a block of memory from the free list that was passed
* to the application by the bootloader.
* This is an allocate-only algorithm, so freeing memory is not possible.
*
* @size: Size in bytes of block to allocate
* @alignment: Alignment required - must be power of 2
*
* Returns pointer to block of memory, NULL on error
*/
extern void *cvmx_bootmem_alloc(uint64_t size, uint64_t alignment);
/**
* Allocate a block of memory from the free list that was
* passed to the application by the bootloader at a specific
* address. This is an allocate-only algorithm, so
* freeing memory is not possible. Allocation will fail if
* memory cannot be allocated at the specified address.
*
* @size: Size in bytes of block to allocate
* @address: Physical address to allocate memory at. If this memory is not
* available, the allocation fails.
* @alignment: Alignment required - must be power of 2
* Returns pointer to block of memory, NULL on error
*/
extern void *cvmx_bootmem_alloc_address(uint64_t size, uint64_t address,
uint64_t alignment);
/**
* Allocate a block of memory from the free list that was
* passed to the application by the bootloader within a specified
* address range. This is an allocate-only algorithm, so
* freeing memory is not possible. Allocation will fail if
* memory cannot be allocated in the requested range.
*
* @size: Size in bytes of block to allocate
* @min_addr: defines the minimum address of the range
* @max_addr: defines the maximum address of the range
* @alignment: Alignment required - must be power of 2
* Returns pointer to block of memory, NULL on error
*/
extern void *cvmx_bootmem_alloc_range(uint64_t size, uint64_t alignment,
uint64_t min_addr, uint64_t max_addr);
/**
* Frees a previously allocated named bootmem block.
*
* @name: name of block to free
*
* Returns 0 on failure,
* !0 on success
*/
/**
* Allocate a block of memory from the free list that was passed
* to the application by the bootloader, and assign it a name in the
* global named block table. (part of the cvmx_bootmem_descriptor_t structure)
* Named blocks can later be freed.
*
* @size: Size in bytes of block to allocate
* @alignment: Alignment required - must be power of 2
* @name: name of block - must be less than CVMX_BOOTMEM_NAME_LEN bytes
*
* Returns a pointer to block of memory, NULL on error
*/
extern void *cvmx_bootmem_alloc_named(uint64_t size, uint64_t alignment,
char *name);
/**
* Allocate a block of memory from the free list that was passed
* to the application by the bootloader, and assign it a name in the
* global named block table. (part of the cvmx_bootmem_descriptor_t structure)
* Named blocks can later be freed.
*
* @size: Size in bytes of block to allocate
* @address: Physical address to allocate memory at. If this
* memory is not available, the allocation fails.
* @name: name of block - must be less than CVMX_BOOTMEM_NAME_LEN
* bytes
*
* Returns a pointer to block of memory, NULL on error
*/
extern void *cvmx_bootmem_alloc_named_address(uint64_t size, uint64_t address,
char *name);
/**
* Allocate a block of memory from a specific range of the free list
* that was passed to the application by the bootloader, and assign it
* a name in the global named block table. (part of the
* cvmx_bootmem_descriptor_t structure) Named blocks can later be
* freed. If request cannot be satisfied within the address range
* specified, NULL is returned
*
* @size: Size in bytes of block to allocate
* @min_addr: minimum address of range
* @max_addr: maximum address of range
* @align: Alignment of memory to be allocated. (must be a power of 2)
* @name: name of block - must be less than CVMX_BOOTMEM_NAME_LEN bytes
*
* Returns a pointer to block of memory, NULL on error
*/
extern void *cvmx_bootmem_alloc_named_range(uint64_t size, uint64_t min_addr,
uint64_t max_addr, uint64_t align,
char *name);
extern int cvmx_bootmem_free_named(char *name);
/**
* Finds a named bootmem block by name.
*
* @name: name of block to free
*
* Returns pointer to named block descriptor on success
* 0 on failure
*/
struct cvmx_bootmem_named_block_desc *cvmx_bootmem_find_named_block(char *name);
/**
* Allocates a block of physical memory from the free list, at
* (optional) requested address and alignment.
*
* @req_size: size of region to allocate. All requests are rounded up
* to be a multiple CVMX_BOOTMEM_ALIGNMENT_SIZE bytes size
*
* @address_min: Minimum address that block can occupy.
*
* @address_max: Specifies the maximum address_min (inclusive) that
* the allocation can use.
*
* @alignment: Requested alignment of the block. If this alignment
* cannot be met, the allocation fails. This must be a
* power of 2. (Note: Alignment of
* CVMX_BOOTMEM_ALIGNMENT_SIZE bytes is required, and
* internally enforced. Requested alignments of less than
* CVMX_BOOTMEM_ALIGNMENT_SIZE are set to
* CVMX_BOOTMEM_ALIGNMENT_SIZE.)
*
* @flags: Flags to control options for the allocation.
*
* Returns physical address of block allocated, or -1 on failure
*/
int64_t cvmx_bootmem_phy_alloc(uint64_t req_size, uint64_t address_min,
uint64_t address_max, uint64_t alignment,
uint32_t flags);
/**
* Allocates a named block of physical memory from the free list, at
* (optional) requested address and alignment.
*
* @param size size of region to allocate. All requests are rounded
* up to be a multiple CVMX_BOOTMEM_ALIGNMENT_SIZE
* bytes size
* @param min_addr Minimum address that block can occupy.
* @param max_addr Specifies the maximum address_min (inclusive) that
* the allocation can use.
* @param alignment Requested alignment of the block. If this
* alignment cannot be met, the allocation fails.
* This must be a power of 2. (Note: Alignment of
* CVMX_BOOTMEM_ALIGNMENT_SIZE bytes is required, and
* internally enforced. Requested alignments of less
* than CVMX_BOOTMEM_ALIGNMENT_SIZE are set to
* CVMX_BOOTMEM_ALIGNMENT_SIZE.)
* @param name name to assign to named block
* @param flags Flags to control options for the allocation.
*
* @return physical address of block allocated, or -1 on failure
*/
int64_t cvmx_bootmem_phy_named_block_alloc(uint64_t size, uint64_t min_addr,
uint64_t max_addr,
uint64_t alignment,
char *name, uint32_t flags);
/**
* Finds a named memory block by name.
* Also used for finding an unused entry in the named block table.
*
* @name: Name of memory block to find. If NULL pointer given, then
* finds unused descriptor, if available.
*
* @flags: Flags to control options for the allocation.
*
* Returns Pointer to memory block descriptor, NULL if not found.
* If NULL returned when name parameter is NULL, then no memory
* block descriptors are available.
*/
struct cvmx_bootmem_named_block_desc *
cvmx_bootmem_phy_named_block_find(char *name, uint32_t flags);
/**
* Frees a named block.
*
* @name: name of block to free
* @flags: flags for passing options
*
* Returns 0 on failure
* 1 on success
*/
int cvmx_bootmem_phy_named_block_free(char *name, uint32_t flags);
/**
* Frees a block to the bootmem allocator list. This must
* be used with care, as the size provided must match the size
* of the block that was allocated, or the list will become
* corrupted.
*
* IMPORTANT: This is only intended to be used as part of named block
* frees and initial population of the free memory list.
* *
*
* @phy_addr: physical address of block
* @size: size of block in bytes.
* @flags: flags for passing options
*
* Returns 1 on success,
* 0 on failure
*/
int __cvmx_bootmem_phy_free(uint64_t phy_addr, uint64_t size, uint32_t flags);
/**
* Locks the bootmem allocator. This is useful in certain situations
* where multiple allocations must be made without being interrupted.
* This should be used with the CVMX_BOOTMEM_FLAG_NO_LOCKING flag.
*
*/
void cvmx_bootmem_lock(void);
/**
* Unlocks the bootmem allocator. This is useful in certain situations
* where multiple allocations must be made without being interrupted.
* This should be used with the CVMX_BOOTMEM_FLAG_NO_LOCKING flag.
*
*/
void cvmx_bootmem_unlock(void);
extern struct cvmx_bootmem_desc *cvmx_bootmem_get_desc(void);
#endif /* __CVMX_BOOTMEM_H__ */

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,617 @@
/***********************license start***************
* Author: Cavium Networks
*
* Contact: support@caviumnetworks.com
* This file is part of the OCTEON SDK
*
* Copyright (c) 2003-2008 Cavium Networks
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
* published by the Free Software Foundation.
*
* This file is distributed in the hope that it will be useful, but
* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
* NONINFRINGEMENT. See the GNU General Public License for more
* details.
*
* You should have received a copy of the GNU General Public License
* along with this file; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
* or visit http://www.gnu.org/licenses/.
*
* This file may also be available under a different license from Cavium.
* Contact Cavium Networks for more information
***********************license end**************************************/
/*
*
* Support functions for managing command queues used for
* various hardware blocks.
*
* The common command queue infrastructure abstracts out the
* software necessary for adding to Octeon's chained queue
* structures. These structures are used for commands to the
* PKO, ZIP, DFA, RAID, and DMA engine blocks. Although each
* hardware unit takes commands and CSRs of different types,
* they all use basic linked command buffers to store the
* pending request. In general, users of the CVMX API don't
* call cvmx-cmd-queue functions directly. Instead the hardware
* unit specific wrapper should be used. The wrappers perform
* unit specific validation and CSR writes to submit the
* commands.
*
* Even though most software will never directly interact with
* cvmx-cmd-queue, knowledge of its internal working can help
* in diagnosing performance problems and help with debugging.
*
* Command queue pointers are stored in a global named block
* called "cvmx_cmd_queues". Except for the PKO queues, each
* hardware queue is stored in its own cache line to reduce SMP
* contention on spin locks. The PKO queues are stored such that
* every 16th queue is next to each other in memory. This scheme
* allows for queues being in separate cache lines when there
* are low number of queues per port. With 16 queues per port,
* the first queue for each port is in the same cache area. The
* second queues for each port are in another area, etc. This
* allows software to implement very efficient lockless PKO with
* 16 queues per port using a minimum of cache lines per core.
* All queues for a given core will be isolated in the same
* cache area.
*
* In addition to the memory pointer layout, cvmx-cmd-queue
* provides an optimized fair ll/sc locking mechanism for the
* queues. The lock uses a "ticket / now serving" model to
* maintain fair order on contended locks. In addition, it uses
* predicted locking time to limit cache contention. When a core
* know it must wait in line for a lock, it spins on the
* internal cycle counter to completely eliminate any causes of
* bus traffic.
*
*/
#ifndef __CVMX_CMD_QUEUE_H__
#define __CVMX_CMD_QUEUE_H__
#include <linux/prefetch.h>
#include <asm/octeon/cvmx-fpa.h>
/**
* By default we disable the max depth support. Most programs
* don't use it and it slows down the command queue processing
* significantly.
*/
#ifndef CVMX_CMD_QUEUE_ENABLE_MAX_DEPTH
#define CVMX_CMD_QUEUE_ENABLE_MAX_DEPTH 0
#endif
/**
* Enumeration representing all hardware blocks that use command
* queues. Each hardware block has up to 65536 sub identifiers for
* multiple command queues. Not all chips support all hardware
* units.
*/
typedef enum {
CVMX_CMD_QUEUE_PKO_BASE = 0x00000,
#define CVMX_CMD_QUEUE_PKO(queue) \
((cvmx_cmd_queue_id_t)(CVMX_CMD_QUEUE_PKO_BASE + (0xffff&(queue))))
CVMX_CMD_QUEUE_ZIP = 0x10000,
CVMX_CMD_QUEUE_DFA = 0x20000,
CVMX_CMD_QUEUE_RAID = 0x30000,
CVMX_CMD_QUEUE_DMA_BASE = 0x40000,
#define CVMX_CMD_QUEUE_DMA(queue) \
((cvmx_cmd_queue_id_t)(CVMX_CMD_QUEUE_DMA_BASE + (0xffff&(queue))))
CVMX_CMD_QUEUE_END = 0x50000,
} cvmx_cmd_queue_id_t;
/**
* Command write operations can fail if the command queue needs
* a new buffer and the associated FPA pool is empty. It can also
* fail if the number of queued command words reaches the maximum
* set at initialization.
*/
typedef enum {
CVMX_CMD_QUEUE_SUCCESS = 0,
CVMX_CMD_QUEUE_NO_MEMORY = -1,
CVMX_CMD_QUEUE_FULL = -2,
CVMX_CMD_QUEUE_INVALID_PARAM = -3,
CVMX_CMD_QUEUE_ALREADY_SETUP = -4,
} cvmx_cmd_queue_result_t;
typedef struct {
/* You have lock when this is your ticket */
uint8_t now_serving;
uint64_t unused1:24;
/* Maximum outstanding command words */
uint32_t max_depth;
/* FPA pool buffers come from */
uint64_t fpa_pool:3;
/* Top of command buffer pointer shifted 7 */
uint64_t base_ptr_div128:29;
uint64_t unused2:6;
/* FPA buffer size in 64bit words minus 1 */
uint64_t pool_size_m1:13;
/* Number of commands already used in buffer */
uint64_t index:13;
} __cvmx_cmd_queue_state_t;
/**
* This structure contains the global state of all command queues.
* It is stored in a bootmem named block and shared by all
* applications running on Octeon. Tickets are stored in a differnet
* cahce line that queue information to reduce the contention on the
* ll/sc used to get a ticket. If this is not the case, the update
* of queue state causes the ll/sc to fail quite often.
*/
typedef struct {
uint64_t ticket[(CVMX_CMD_QUEUE_END >> 16) * 256];
__cvmx_cmd_queue_state_t state[(CVMX_CMD_QUEUE_END >> 16) * 256];
} __cvmx_cmd_queue_all_state_t;
/**
* Initialize a command queue for use. The initial FPA buffer is
* allocated and the hardware unit is configured to point to the
* new command queue.
*
* @queue_id: Hardware command queue to initialize.
* @max_depth: Maximum outstanding commands that can be queued.
* @fpa_pool: FPA pool the command queues should come from.
* @pool_size: Size of each buffer in the FPA pool (bytes)
*
* Returns CVMX_CMD_QUEUE_SUCCESS or a failure code
*/
cvmx_cmd_queue_result_t cvmx_cmd_queue_initialize(cvmx_cmd_queue_id_t queue_id,
int max_depth, int fpa_pool,
int pool_size);
/**
* Shutdown a queue a free it's command buffers to the FPA. The
* hardware connected to the queue must be stopped before this
* function is called.
*
* @queue_id: Queue to shutdown
*
* Returns CVMX_CMD_QUEUE_SUCCESS or a failure code
*/
cvmx_cmd_queue_result_t cvmx_cmd_queue_shutdown(cvmx_cmd_queue_id_t queue_id);
/**
* Return the number of command words pending in the queue. This
* function may be relatively slow for some hardware units.
*
* @queue_id: Hardware command queue to query
*
* Returns Number of outstanding commands
*/
int cvmx_cmd_queue_length(cvmx_cmd_queue_id_t queue_id);
/**
* Return the command buffer to be written to. The purpose of this
* function is to allow CVMX routine access t othe low level buffer
* for initial hardware setup. User applications should not call this
* function directly.
*
* @queue_id: Command queue to query
*
* Returns Command buffer or NULL on failure
*/
void *cvmx_cmd_queue_buffer(cvmx_cmd_queue_id_t queue_id);
/**
* Get the index into the state arrays for the supplied queue id.
*
* @queue_id: Queue ID to get an index for
*
* Returns Index into the state arrays
*/
static inline int __cvmx_cmd_queue_get_index(cvmx_cmd_queue_id_t queue_id)
{
/*
* Warning: This code currently only works with devices that
* have 256 queues or less. Devices with more than 16 queues
* are laid out in memory to allow cores quick access to
* every 16th queue. This reduces cache thrashing when you are
* running 16 queues per port to support lockless operation.
*/
int unit = queue_id >> 16;
int q = (queue_id >> 4) & 0xf;
int core = queue_id & 0xf;
return unit * 256 + core * 16 + q;
}
/**
* Lock the supplied queue so nobody else is updating it at the same
* time as us.
*
* @queue_id: Queue ID to lock
* @qptr: Pointer to the queue's global state
*/
static inline void __cvmx_cmd_queue_lock(cvmx_cmd_queue_id_t queue_id,
__cvmx_cmd_queue_state_t *qptr)
{
extern __cvmx_cmd_queue_all_state_t
*__cvmx_cmd_queue_state_ptr;
int tmp;
int my_ticket;
prefetch(qptr);
asm volatile (
".set push\n"
".set noreorder\n"
"1:\n"
/* Atomic add one to ticket_ptr */
"ll %[my_ticket], %[ticket_ptr]\n"
/* and store the original value */
"li %[ticket], 1\n"
/* in my_ticket */
"baddu %[ticket], %[my_ticket]\n"
"sc %[ticket], %[ticket_ptr]\n"
"beqz %[ticket], 1b\n"
" nop\n"
/* Load the current now_serving ticket */
"lbu %[ticket], %[now_serving]\n"
"2:\n"
/* Jump out if now_serving == my_ticket */
"beq %[ticket], %[my_ticket], 4f\n"
/* Find out how many tickets are in front of me */
" subu %[ticket], %[my_ticket], %[ticket]\n"
/* Use tickets in front of me minus one to delay */
"subu %[ticket], 1\n"
/* Delay will be ((tickets in front)-1)*32 loops */
"cins %[ticket], %[ticket], 5, 7\n"
"3:\n"
/* Loop here until our ticket might be up */
"bnez %[ticket], 3b\n"
" subu %[ticket], 1\n"
/* Jump back up to check out ticket again */
"b 2b\n"
/* Load the current now_serving ticket */
" lbu %[ticket], %[now_serving]\n"
"4:\n"
".set pop\n" :
[ticket_ptr] "=m"(__cvmx_cmd_queue_state_ptr->ticket[__cvmx_cmd_queue_get_index(queue_id)]),
[now_serving] "=m"(qptr->now_serving), [ticket] "=r"(tmp),
[my_ticket] "=r"(my_ticket)
);
}
/**
* Unlock the queue, flushing all writes.
*
* @qptr: Queue to unlock
*/
static inline void __cvmx_cmd_queue_unlock(__cvmx_cmd_queue_state_t *qptr)
{
qptr->now_serving++;
CVMX_SYNCWS;
}
/**
* Get the queue state structure for the given queue id
*
* @queue_id: Queue id to get
*
* Returns Queue structure or NULL on failure
*/
static inline __cvmx_cmd_queue_state_t
*__cvmx_cmd_queue_get_state(cvmx_cmd_queue_id_t queue_id)
{
extern __cvmx_cmd_queue_all_state_t
*__cvmx_cmd_queue_state_ptr;
return &__cvmx_cmd_queue_state_ptr->
state[__cvmx_cmd_queue_get_index(queue_id)];
}
/**
* Write an arbitrary number of command words to a command queue.
* This is a generic function; the fixed number of command word
* functions yield higher performance.
*
* @queue_id: Hardware command queue to write to
* @use_locking:
* Use internal locking to ensure exclusive access for queue
* updates. If you don't use this locking you must ensure
* exclusivity some other way. Locking is strongly recommended.
* @cmd_count: Number of command words to write
* @cmds: Array of commands to write
*
* Returns CVMX_CMD_QUEUE_SUCCESS or a failure code
*/
static inline cvmx_cmd_queue_result_t cvmx_cmd_queue_write(cvmx_cmd_queue_id_t
queue_id,
int use_locking,
int cmd_count,
uint64_t *cmds)
{
__cvmx_cmd_queue_state_t *qptr = __cvmx_cmd_queue_get_state(queue_id);
/* Make sure nobody else is updating the same queue */
if (likely(use_locking))
__cvmx_cmd_queue_lock(queue_id, qptr);
/*
* If a max queue length was specified then make sure we don't
* exceed it. If any part of the command would be below the
* limit we allow it.
*/
if (CVMX_CMD_QUEUE_ENABLE_MAX_DEPTH && unlikely(qptr->max_depth)) {
if (unlikely
(cvmx_cmd_queue_length(queue_id) > (int)qptr->max_depth)) {
if (likely(use_locking))
__cvmx_cmd_queue_unlock(qptr);
return CVMX_CMD_QUEUE_FULL;
}
}
/*
* Normally there is plenty of room in the current buffer for
* the command.
*/
if (likely(qptr->index + cmd_count < qptr->pool_size_m1)) {
uint64_t *ptr =
(uint64_t *) cvmx_phys_to_ptr((uint64_t) qptr->
base_ptr_div128 << 7);
ptr += qptr->index;
qptr->index += cmd_count;
while (cmd_count--)
*ptr++ = *cmds++;
} else {
uint64_t *ptr;
int count;
/*
* We need a new command buffer. Fail if there isn't
* one available.
*/
uint64_t *new_buffer =
(uint64_t *) cvmx_fpa_alloc(qptr->fpa_pool);
if (unlikely(new_buffer == NULL)) {
if (likely(use_locking))
__cvmx_cmd_queue_unlock(qptr);
return CVMX_CMD_QUEUE_NO_MEMORY;
}
ptr =
(uint64_t *) cvmx_phys_to_ptr((uint64_t) qptr->
base_ptr_div128 << 7);
/*
* Figure out how many command words will fit in this
* buffer. One location will be needed for the next
* buffer pointer.
*/
count = qptr->pool_size_m1 - qptr->index;
ptr += qptr->index;
cmd_count -= count;
while (count--)
*ptr++ = *cmds++;
*ptr = cvmx_ptr_to_phys(new_buffer);
/*
* The current buffer is full and has a link to the
* next buffer. Time to write the rest of the commands
* into the new buffer.
*/
qptr->base_ptr_div128 = *ptr >> 7;
qptr->index = cmd_count;
ptr = new_buffer;
while (cmd_count--)
*ptr++ = *cmds++;
}
/* All updates are complete. Release the lock and return */
if (likely(use_locking))
__cvmx_cmd_queue_unlock(qptr);
return CVMX_CMD_QUEUE_SUCCESS;
}
/**
* Simple function to write two command words to a command
* queue.
*
* @queue_id: Hardware command queue to write to
* @use_locking:
* Use internal locking to ensure exclusive access for queue
* updates. If you don't use this locking you must ensure
* exclusivity some other way. Locking is strongly recommended.
* @cmd1: Command
* @cmd2: Command
*
* Returns CVMX_CMD_QUEUE_SUCCESS or a failure code
*/
static inline cvmx_cmd_queue_result_t cvmx_cmd_queue_write2(cvmx_cmd_queue_id_t
queue_id,
int use_locking,
uint64_t cmd1,
uint64_t cmd2)
{
__cvmx_cmd_queue_state_t *qptr = __cvmx_cmd_queue_get_state(queue_id);
/* Make sure nobody else is updating the same queue */
if (likely(use_locking))
__cvmx_cmd_queue_lock(queue_id, qptr);
/*
* If a max queue length was specified then make sure we don't
* exceed it. If any part of the command would be below the
* limit we allow it.
*/
if (CVMX_CMD_QUEUE_ENABLE_MAX_DEPTH && unlikely(qptr->max_depth)) {
if (unlikely
(cvmx_cmd_queue_length(queue_id) > (int)qptr->max_depth)) {
if (likely(use_locking))
__cvmx_cmd_queue_unlock(qptr);
return CVMX_CMD_QUEUE_FULL;
}
}
/*
* Normally there is plenty of room in the current buffer for
* the command.
*/
if (likely(qptr->index + 2 < qptr->pool_size_m1)) {
uint64_t *ptr =
(uint64_t *) cvmx_phys_to_ptr((uint64_t) qptr->
base_ptr_div128 << 7);
ptr += qptr->index;
qptr->index += 2;
ptr[0] = cmd1;
ptr[1] = cmd2;
} else {
uint64_t *ptr;
/*
* Figure out how many command words will fit in this
* buffer. One location will be needed for the next
* buffer pointer.
*/
int count = qptr->pool_size_m1 - qptr->index;
/*
* We need a new command buffer. Fail if there isn't
* one available.
*/
uint64_t *new_buffer =
(uint64_t *) cvmx_fpa_alloc(qptr->fpa_pool);
if (unlikely(new_buffer == NULL)) {
if (likely(use_locking))
__cvmx_cmd_queue_unlock(qptr);
return CVMX_CMD_QUEUE_NO_MEMORY;
}
count--;
ptr =
(uint64_t *) cvmx_phys_to_ptr((uint64_t) qptr->
base_ptr_div128 << 7);
ptr += qptr->index;
*ptr++ = cmd1;
if (likely(count))
*ptr++ = cmd2;
*ptr = cvmx_ptr_to_phys(new_buffer);
/*
* The current buffer is full and has a link to the
* next buffer. Time to write the rest of the commands
* into the new buffer.
*/
qptr->base_ptr_div128 = *ptr >> 7;
qptr->index = 0;
if (unlikely(count == 0)) {
qptr->index = 1;
new_buffer[0] = cmd2;
}
}
/* All updates are complete. Release the lock and return */
if (likely(use_locking))
__cvmx_cmd_queue_unlock(qptr);
return CVMX_CMD_QUEUE_SUCCESS;
}
/**
* Simple function to write three command words to a command
* queue.
*
* @queue_id: Hardware command queue to write to
* @use_locking:
* Use internal locking to ensure exclusive access for queue
* updates. If you don't use this locking you must ensure
* exclusivity some other way. Locking is strongly recommended.
* @cmd1: Command
* @cmd2: Command
* @cmd3: Command
*
* Returns CVMX_CMD_QUEUE_SUCCESS or a failure code
*/
static inline cvmx_cmd_queue_result_t cvmx_cmd_queue_write3(cvmx_cmd_queue_id_t
queue_id,
int use_locking,
uint64_t cmd1,
uint64_t cmd2,
uint64_t cmd3)
{
__cvmx_cmd_queue_state_t *qptr = __cvmx_cmd_queue_get_state(queue_id);
/* Make sure nobody else is updating the same queue */
if (likely(use_locking))
__cvmx_cmd_queue_lock(queue_id, qptr);
/*
* If a max queue length was specified then make sure we don't
* exceed it. If any part of the command would be below the
* limit we allow it.
*/
if (CVMX_CMD_QUEUE_ENABLE_MAX_DEPTH && unlikely(qptr->max_depth)) {
if (unlikely
(cvmx_cmd_queue_length(queue_id) > (int)qptr->max_depth)) {
if (likely(use_locking))
__cvmx_cmd_queue_unlock(qptr);
return CVMX_CMD_QUEUE_FULL;
}
}
/*
* Normally there is plenty of room in the current buffer for
* the command.
*/
if (likely(qptr->index + 3 < qptr->pool_size_m1)) {
uint64_t *ptr =
(uint64_t *) cvmx_phys_to_ptr((uint64_t) qptr->
base_ptr_div128 << 7);
ptr += qptr->index;
qptr->index += 3;
ptr[0] = cmd1;
ptr[1] = cmd2;
ptr[2] = cmd3;
} else {
uint64_t *ptr;
/*
* Figure out how many command words will fit in this
* buffer. One location will be needed for the next
* buffer pointer
*/
int count = qptr->pool_size_m1 - qptr->index;
/*
* We need a new command buffer. Fail if there isn't
* one available
*/
uint64_t *new_buffer =
(uint64_t *) cvmx_fpa_alloc(qptr->fpa_pool);
if (unlikely(new_buffer == NULL)) {
if (likely(use_locking))
__cvmx_cmd_queue_unlock(qptr);
return CVMX_CMD_QUEUE_NO_MEMORY;
}
count--;
ptr =
(uint64_t *) cvmx_phys_to_ptr((uint64_t) qptr->
base_ptr_div128 << 7);
ptr += qptr->index;
*ptr++ = cmd1;
if (count) {
*ptr++ = cmd2;
if (count > 1)
*ptr++ = cmd3;
}
*ptr = cvmx_ptr_to_phys(new_buffer);
/*
* The current buffer is full and has a link to the
* next buffer. Time to write the rest of the commands
* into the new buffer.
*/
qptr->base_ptr_div128 = *ptr >> 7;
qptr->index = 0;
ptr = new_buffer;
if (count == 0) {
*ptr++ = cmd2;
qptr->index++;
}
if (count < 2) {
*ptr++ = cmd3;
qptr->index++;
}
}
/* All updates are complete. Release the lock and return */
if (likely(use_locking))
__cvmx_cmd_queue_unlock(qptr);
return CVMX_CMD_QUEUE_SUCCESS;
}
#endif /* __CVMX_CMD_QUEUE_H__ */

View file

@ -0,0 +1,168 @@
#ifndef __CVMX_CONFIG_H__
#define __CVMX_CONFIG_H__
/************************* Config Specific Defines ************************/
#define CVMX_LLM_NUM_PORTS 1
#define CVMX_NULL_POINTER_PROTECT 1
#define CVMX_ENABLE_DEBUG_PRINTS 1
/* PKO queues per port for interface 0 (ports 0-15) */
#define CVMX_PKO_QUEUES_PER_PORT_INTERFACE0 1
/* PKO queues per port for interface 1 (ports 16-31) */
#define CVMX_PKO_QUEUES_PER_PORT_INTERFACE1 1
/* Limit on the number of PKO ports enabled for interface 0 */
#define CVMX_PKO_MAX_PORTS_INTERFACE0 CVMX_HELPER_PKO_MAX_PORTS_INTERFACE0
/* Limit on the number of PKO ports enabled for interface 1 */
#define CVMX_PKO_MAX_PORTS_INTERFACE1 CVMX_HELPER_PKO_MAX_PORTS_INTERFACE1
/* PKO queues per port for PCI (ports 32-35) */
#define CVMX_PKO_QUEUES_PER_PORT_PCI 1
/* PKO queues per port for Loop devices (ports 36-39) */
#define CVMX_PKO_QUEUES_PER_PORT_LOOP 1
/************************* FPA allocation *********************************/
/* Pool sizes in bytes, must be multiple of a cache line */
#define CVMX_FPA_POOL_0_SIZE (16 * CVMX_CACHE_LINE_SIZE)
#define CVMX_FPA_POOL_1_SIZE (1 * CVMX_CACHE_LINE_SIZE)
#define CVMX_FPA_POOL_2_SIZE (8 * CVMX_CACHE_LINE_SIZE)
#define CVMX_FPA_POOL_3_SIZE (0 * CVMX_CACHE_LINE_SIZE)
#define CVMX_FPA_POOL_4_SIZE (0 * CVMX_CACHE_LINE_SIZE)
#define CVMX_FPA_POOL_5_SIZE (0 * CVMX_CACHE_LINE_SIZE)
#define CVMX_FPA_POOL_6_SIZE (0 * CVMX_CACHE_LINE_SIZE)
#define CVMX_FPA_POOL_7_SIZE (0 * CVMX_CACHE_LINE_SIZE)
/* Pools in use */
/* Packet buffers */
#define CVMX_FPA_PACKET_POOL (0)
#define CVMX_FPA_PACKET_POOL_SIZE CVMX_FPA_POOL_0_SIZE
/* Work queue entrys */
#define CVMX_FPA_WQE_POOL (1)
#define CVMX_FPA_WQE_POOL_SIZE CVMX_FPA_POOL_1_SIZE
/* PKO queue command buffers */
#define CVMX_FPA_OUTPUT_BUFFER_POOL (2)
#define CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE CVMX_FPA_POOL_2_SIZE
/************************* FAU allocation ********************************/
/* The fetch and add registers are allocated here. They are arranged
* in order of descending size so that all alignment constraints are
* automatically met. The enums are linked so that the following enum
* continues allocating where the previous one left off, so the
* numbering within each enum always starts with zero. The macros
* take care of the address increment size, so the values entered
* always increase by 1. FAU registers are accessed with byte
* addresses.
*/
#define CVMX_FAU_REG_64_ADDR(x) ((x << 3) + CVMX_FAU_REG_64_START)
typedef enum {
CVMX_FAU_REG_64_START = 0,
CVMX_FAU_REG_64_END = CVMX_FAU_REG_64_ADDR(0),
} cvmx_fau_reg_64_t;
#define CVMX_FAU_REG_32_ADDR(x) ((x << 2) + CVMX_FAU_REG_32_START)
typedef enum {
CVMX_FAU_REG_32_START = CVMX_FAU_REG_64_END,
CVMX_FAU_REG_32_END = CVMX_FAU_REG_32_ADDR(0),
} cvmx_fau_reg_32_t;
#define CVMX_FAU_REG_16_ADDR(x) ((x << 1) + CVMX_FAU_REG_16_START)
typedef enum {
CVMX_FAU_REG_16_START = CVMX_FAU_REG_32_END,
CVMX_FAU_REG_16_END = CVMX_FAU_REG_16_ADDR(0),
} cvmx_fau_reg_16_t;
#define CVMX_FAU_REG_8_ADDR(x) ((x) + CVMX_FAU_REG_8_START)
typedef enum {
CVMX_FAU_REG_8_START = CVMX_FAU_REG_16_END,
CVMX_FAU_REG_8_END = CVMX_FAU_REG_8_ADDR(0),
} cvmx_fau_reg_8_t;
/*
* The name CVMX_FAU_REG_AVAIL_BASE is provided to indicate the first
* available FAU address that is not allocated in cvmx-config.h. This
* is 64 bit aligned.
*/
#define CVMX_FAU_REG_AVAIL_BASE ((CVMX_FAU_REG_8_END + 0x7) & (~0x7ULL))
#define CVMX_FAU_REG_END (2048)
/********************** scratch memory allocation *************************/
/* Scratchpad memory allocation. Note that these are byte memory
* addresses. Some uses of scratchpad (IOBDMA for example) require
* the use of 8-byte aligned addresses, so proper alignment needs to
* be taken into account.
*/
/* Generic scratch iobdma area */
#define CVMX_SCR_SCRATCH (0)
/* First location available after cvmx-config.h allocated region. */
#define CVMX_SCR_REG_AVAIL_BASE (8)
/*
* CVMX_HELPER_FIRST_MBUFF_SKIP is the number of bytes to reserve
* before the beginning of the packet. If necessary, override the
* default here. See the IPD section of the hardware manual for MBUFF
* SKIP details.
*/
#define CVMX_HELPER_FIRST_MBUFF_SKIP 184
/*
* CVMX_HELPER_NOT_FIRST_MBUFF_SKIP is the number of bytes to reserve
* in each chained packet element. If necessary, override the default
* here.
*/
#define CVMX_HELPER_NOT_FIRST_MBUFF_SKIP 0
/*
* CVMX_HELPER_ENABLE_BACK_PRESSURE controls whether back pressure is
* enabled for all input ports. This controls if IPD sends
* backpressure to all ports if Octeon's FPA pools don't have enough
* packet or work queue entries. Even when this is off, it is still
* possible to get backpressure from individual hardware ports. When
* configuring backpressure, also check
* CVMX_HELPER_DISABLE_*_BACKPRESSURE below. If necessary, override
* the default here.
*/
#define CVMX_HELPER_ENABLE_BACK_PRESSURE 1
/*
* CVMX_HELPER_ENABLE_IPD controls if the IPD is enabled in the helper
* function. Once it is enabled the hardware starts accepting
* packets. You might want to skip the IPD enable if configuration
* changes are need from the default helper setup. If necessary,
* override the default here.
*/
#define CVMX_HELPER_ENABLE_IPD 0
/*
* CVMX_HELPER_INPUT_TAG_TYPE selects the type of tag that the IPD assigns
* to incoming packets.
*/
#define CVMX_HELPER_INPUT_TAG_TYPE CVMX_POW_TAG_TYPE_ORDERED
#define CVMX_ENABLE_PARAMETER_CHECKING 0
/*
* The following select which fields are used by the PIP to generate
* the tag on INPUT
* 0: don't include
* 1: include
*/
#define CVMX_HELPER_INPUT_TAG_IPV6_SRC_IP 0
#define CVMX_HELPER_INPUT_TAG_IPV6_DST_IP 0
#define CVMX_HELPER_INPUT_TAG_IPV6_SRC_PORT 0
#define CVMX_HELPER_INPUT_TAG_IPV6_DST_PORT 0
#define CVMX_HELPER_INPUT_TAG_IPV6_NEXT_HEADER 0
#define CVMX_HELPER_INPUT_TAG_IPV4_SRC_IP 0
#define CVMX_HELPER_INPUT_TAG_IPV4_DST_IP 0
#define CVMX_HELPER_INPUT_TAG_IPV4_SRC_PORT 0
#define CVMX_HELPER_INPUT_TAG_IPV4_DST_PORT 0
#define CVMX_HELPER_INPUT_TAG_IPV4_PROTOCOL 0
#define CVMX_HELPER_INPUT_TAG_INPUT_PORT 1
/* Select skip mode for input ports */
#define CVMX_HELPER_INPUT_PORT_SKIP_MODE CVMX_PIP_PORT_CFG_MODE_SKIPL2
/*
* Force backpressure to be disabled. This overrides all other
* backpressure configuration.
*/
#define CVMX_HELPER_DISABLE_RGMII_BACKPRESSURE 0
#endif /* __CVMX_CONFIG_H__ */

View file

@ -0,0 +1,105 @@
/***********************license start***************
* Author: Cavium Networks
*
* Contact: support@caviumnetworks.com
* This file is part of the OCTEON SDK
*
* Copyright (c) 2003-2012 Cavium Networks
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
* published by the Free Software Foundation.
*
* This file is distributed in the hope that it will be useful, but
* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
* NONINFRINGEMENT. See the GNU General Public License for more
* details.
*
* You should have received a copy of the GNU General Public License
* along with this file; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
* or visit http://www.gnu.org/licenses/.
*
* This file may also be available under a different license from Cavium.
* Contact Cavium Networks for more information
***********************license end**************************************/
#ifndef __CVMX_DBG_DEFS_H__
#define __CVMX_DBG_DEFS_H__
#define CVMX_DBG_DATA (CVMX_ADD_IO_SEG(0x00011F00000001E8ull))
union cvmx_dbg_data {
uint64_t u64;
struct cvmx_dbg_data_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_23_63:41;
uint64_t c_mul:5;
uint64_t dsel_ext:1;
uint64_t data:17;
#else
uint64_t data:17;
uint64_t dsel_ext:1;
uint64_t c_mul:5;
uint64_t reserved_23_63:41;
#endif
} s;
struct cvmx_dbg_data_cn30xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_31_63:33;
uint64_t pll_mul:3;
uint64_t reserved_23_27:5;
uint64_t c_mul:5;
uint64_t dsel_ext:1;
uint64_t data:17;
#else
uint64_t data:17;
uint64_t dsel_ext:1;
uint64_t c_mul:5;
uint64_t reserved_23_27:5;
uint64_t pll_mul:3;
uint64_t reserved_31_63:33;
#endif
} cn30xx;
struct cvmx_dbg_data_cn30xx cn31xx;
struct cvmx_dbg_data_cn38xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_29_63:35;
uint64_t d_mul:4;
uint64_t dclk_mul2:1;
uint64_t cclk_div2:1;
uint64_t c_mul:5;
uint64_t dsel_ext:1;
uint64_t data:17;
#else
uint64_t data:17;
uint64_t dsel_ext:1;
uint64_t c_mul:5;
uint64_t cclk_div2:1;
uint64_t dclk_mul2:1;
uint64_t d_mul:4;
uint64_t reserved_29_63:35;
#endif
} cn38xx;
struct cvmx_dbg_data_cn38xx cn38xxp2;
struct cvmx_dbg_data_cn30xx cn50xx;
struct cvmx_dbg_data_cn58xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_29_63:35;
uint64_t rem:6;
uint64_t c_mul:5;
uint64_t dsel_ext:1;
uint64_t data:17;
#else
uint64_t data:17;
uint64_t dsel_ext:1;
uint64_t c_mul:5;
uint64_t rem:6;
uint64_t reserved_29_63:35;
#endif
} cn58xx;
struct cvmx_dbg_data_cn58xx cn58xxp1;
};
#endif

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,597 @@
/***********************license start***************
* Author: Cavium Networks
*
* Contact: support@caviumnetworks.com
* This file is part of the OCTEON SDK
*
* Copyright (c) 2003-2008 Cavium Networks
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
* published by the Free Software Foundation.
*
* This file is distributed in the hope that it will be useful, but
* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
* NONINFRINGEMENT. See the GNU General Public License for more
* details.
*
* You should have received a copy of the GNU General Public License
* along with this file; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
* or visit http://www.gnu.org/licenses/.
*
* This file may also be available under a different license from Cavium.
* Contact Cavium Networks for more information
***********************license end**************************************/
/*
* Interface to the hardware Fetch and Add Unit.
*/
#ifndef __CVMX_FAU_H__
#define __CVMX_FAU_H__
/*
* Octeon Fetch and Add Unit (FAU)
*/
#define CVMX_FAU_LOAD_IO_ADDRESS cvmx_build_io_address(0x1e, 0)
#define CVMX_FAU_BITS_SCRADDR 63, 56
#define CVMX_FAU_BITS_LEN 55, 48
#define CVMX_FAU_BITS_INEVAL 35, 14
#define CVMX_FAU_BITS_TAGWAIT 13, 13
#define CVMX_FAU_BITS_NOADD 13, 13
#define CVMX_FAU_BITS_SIZE 12, 11
#define CVMX_FAU_BITS_REGISTER 10, 0
typedef enum {
CVMX_FAU_OP_SIZE_8 = 0,
CVMX_FAU_OP_SIZE_16 = 1,
CVMX_FAU_OP_SIZE_32 = 2,
CVMX_FAU_OP_SIZE_64 = 3
} cvmx_fau_op_size_t;
/**
* Tagwait return definition. If a timeout occurs, the error
* bit will be set. Otherwise the value of the register before
* the update will be returned.
*/
typedef struct {
uint64_t error:1;
int64_t value:63;
} cvmx_fau_tagwait64_t;
/**
* Tagwait return definition. If a timeout occurs, the error
* bit will be set. Otherwise the value of the register before
* the update will be returned.
*/
typedef struct {
uint64_t error:1;
int32_t value:31;
} cvmx_fau_tagwait32_t;
/**
* Tagwait return definition. If a timeout occurs, the error
* bit will be set. Otherwise the value of the register before
* the update will be returned.
*/
typedef struct {
uint64_t error:1;
int16_t value:15;
} cvmx_fau_tagwait16_t;
/**
* Tagwait return definition. If a timeout occurs, the error
* bit will be set. Otherwise the value of the register before
* the update will be returned.
*/
typedef struct {
uint64_t error:1;
int8_t value:7;
} cvmx_fau_tagwait8_t;
/**
* Asynchronous tagwait return definition. If a timeout occurs,
* the error bit will be set. Otherwise the value of the
* register before the update will be returned.
*/
typedef union {
uint64_t u64;
struct {
uint64_t invalid:1;
uint64_t data:63; /* unpredictable if invalid is set */
} s;
} cvmx_fau_async_tagwait_result_t;
/**
* Builds a store I/O address for writing to the FAU
*
* @noadd: 0 = Store value is atomically added to the current value
* 1 = Store value is atomically written over the current value
* @reg: FAU atomic register to access. 0 <= reg < 2048.
* - Step by 2 for 16 bit access.
* - Step by 4 for 32 bit access.
* - Step by 8 for 64 bit access.
* Returns Address to store for atomic update
*/
static inline uint64_t __cvmx_fau_store_address(uint64_t noadd, uint64_t reg)
{
return CVMX_ADD_IO_SEG(CVMX_FAU_LOAD_IO_ADDRESS) |
cvmx_build_bits(CVMX_FAU_BITS_NOADD, noadd) |
cvmx_build_bits(CVMX_FAU_BITS_REGISTER, reg);
}
/**
* Builds a I/O address for accessing the FAU
*
* @tagwait: Should the atomic add wait for the current tag switch
* operation to complete.
* - 0 = Don't wait
* - 1 = Wait for tag switch to complete
* @reg: FAU atomic register to access. 0 <= reg < 2048.
* - Step by 2 for 16 bit access.
* - Step by 4 for 32 bit access.
* - Step by 8 for 64 bit access.
* @value: Signed value to add.
* Note: When performing 32 and 64 bit access, only the low
* 22 bits are available.
* Returns Address to read from for atomic update
*/
static inline uint64_t __cvmx_fau_atomic_address(uint64_t tagwait, uint64_t reg,
int64_t value)
{
return CVMX_ADD_IO_SEG(CVMX_FAU_LOAD_IO_ADDRESS) |
cvmx_build_bits(CVMX_FAU_BITS_INEVAL, value) |
cvmx_build_bits(CVMX_FAU_BITS_TAGWAIT, tagwait) |
cvmx_build_bits(CVMX_FAU_BITS_REGISTER, reg);
}
/**
* Perform an atomic 64 bit add
*
* @reg: FAU atomic register to access. 0 <= reg < 2048.
* - Step by 8 for 64 bit access.
* @value: Signed value to add.
* Note: Only the low 22 bits are available.
* Returns Value of the register before the update
*/
static inline int64_t cvmx_fau_fetch_and_add64(cvmx_fau_reg_64_t reg,
int64_t value)
{
return cvmx_read64_int64(__cvmx_fau_atomic_address(0, reg, value));
}
/**
* Perform an atomic 32 bit add
*
* @reg: FAU atomic register to access. 0 <= reg < 2048.
* - Step by 4 for 32 bit access.
* @value: Signed value to add.
* Note: Only the low 22 bits are available.
* Returns Value of the register before the update
*/
static inline int32_t cvmx_fau_fetch_and_add32(cvmx_fau_reg_32_t reg,
int32_t value)
{
return cvmx_read64_int32(__cvmx_fau_atomic_address(0, reg, value));
}
/**
* Perform an atomic 16 bit add
*
* @reg: FAU atomic register to access. 0 <= reg < 2048.
* - Step by 2 for 16 bit access.
* @value: Signed value to add.
* Returns Value of the register before the update
*/
static inline int16_t cvmx_fau_fetch_and_add16(cvmx_fau_reg_16_t reg,
int16_t value)
{
return cvmx_read64_int16(__cvmx_fau_atomic_address(0, reg, value));
}
/**
* Perform an atomic 8 bit add
*
* @reg: FAU atomic register to access. 0 <= reg < 2048.
* @value: Signed value to add.
* Returns Value of the register before the update
*/
static inline int8_t cvmx_fau_fetch_and_add8(cvmx_fau_reg_8_t reg, int8_t value)
{
return cvmx_read64_int8(__cvmx_fau_atomic_address(0, reg, value));
}
/**
* Perform an atomic 64 bit add after the current tag switch
* completes
*
* @reg: FAU atomic register to access. 0 <= reg < 2048.
* - Step by 8 for 64 bit access.
* @value: Signed value to add.
* Note: Only the low 22 bits are available.
* Returns If a timeout occurs, the error bit will be set. Otherwise
* the value of the register before the update will be
* returned
*/
static inline cvmx_fau_tagwait64_t
cvmx_fau_tagwait_fetch_and_add64(cvmx_fau_reg_64_t reg, int64_t value)
{
union {
uint64_t i64;
cvmx_fau_tagwait64_t t;
} result;
result.i64 =
cvmx_read64_int64(__cvmx_fau_atomic_address(1, reg, value));
return result.t;
}
/**
* Perform an atomic 32 bit add after the current tag switch
* completes
*
* @reg: FAU atomic register to access. 0 <= reg < 2048.
* - Step by 4 for 32 bit access.
* @value: Signed value to add.
* Note: Only the low 22 bits are available.
* Returns If a timeout occurs, the error bit will be set. Otherwise
* the value of the register before the update will be
* returned
*/
static inline cvmx_fau_tagwait32_t
cvmx_fau_tagwait_fetch_and_add32(cvmx_fau_reg_32_t reg, int32_t value)
{
union {
uint64_t i32;
cvmx_fau_tagwait32_t t;
} result;
result.i32 =
cvmx_read64_int32(__cvmx_fau_atomic_address(1, reg, value));
return result.t;
}
/**
* Perform an atomic 16 bit add after the current tag switch
* completes
*
* @reg: FAU atomic register to access. 0 <= reg < 2048.
* - Step by 2 for 16 bit access.
* @value: Signed value to add.
* Returns If a timeout occurs, the error bit will be set. Otherwise
* the value of the register before the update will be
* returned
*/
static inline cvmx_fau_tagwait16_t
cvmx_fau_tagwait_fetch_and_add16(cvmx_fau_reg_16_t reg, int16_t value)
{
union {
uint64_t i16;
cvmx_fau_tagwait16_t t;
} result;
result.i16 =
cvmx_read64_int16(__cvmx_fau_atomic_address(1, reg, value));
return result.t;
}
/**
* Perform an atomic 8 bit add after the current tag switch
* completes
*
* @reg: FAU atomic register to access. 0 <= reg < 2048.
* @value: Signed value to add.
* Returns If a timeout occurs, the error bit will be set. Otherwise
* the value of the register before the update will be
* returned
*/
static inline cvmx_fau_tagwait8_t
cvmx_fau_tagwait_fetch_and_add8(cvmx_fau_reg_8_t reg, int8_t value)
{
union {
uint64_t i8;
cvmx_fau_tagwait8_t t;
} result;
result.i8 = cvmx_read64_int8(__cvmx_fau_atomic_address(1, reg, value));
return result.t;
}
/**
* Builds I/O data for async operations
*
* @scraddr: Scratch pad byte address to write to. Must be 8 byte aligned
* @value: Signed value to add.
* Note: When performing 32 and 64 bit access, only the low
* 22 bits are available.
* @tagwait: Should the atomic add wait for the current tag switch
* operation to complete.
* - 0 = Don't wait
* - 1 = Wait for tag switch to complete
* @size: The size of the operation:
* - CVMX_FAU_OP_SIZE_8 (0) = 8 bits
* - CVMX_FAU_OP_SIZE_16 (1) = 16 bits
* - CVMX_FAU_OP_SIZE_32 (2) = 32 bits
* - CVMX_FAU_OP_SIZE_64 (3) = 64 bits
* @reg: FAU atomic register to access. 0 <= reg < 2048.
* - Step by 2 for 16 bit access.
* - Step by 4 for 32 bit access.
* - Step by 8 for 64 bit access.
* Returns Data to write using cvmx_send_single
*/
static inline uint64_t __cvmx_fau_iobdma_data(uint64_t scraddr, int64_t value,
uint64_t tagwait,
cvmx_fau_op_size_t size,
uint64_t reg)
{
return CVMX_FAU_LOAD_IO_ADDRESS |
cvmx_build_bits(CVMX_FAU_BITS_SCRADDR, scraddr >> 3) |
cvmx_build_bits(CVMX_FAU_BITS_LEN, 1) |
cvmx_build_bits(CVMX_FAU_BITS_INEVAL, value) |
cvmx_build_bits(CVMX_FAU_BITS_TAGWAIT, tagwait) |
cvmx_build_bits(CVMX_FAU_BITS_SIZE, size) |
cvmx_build_bits(CVMX_FAU_BITS_REGISTER, reg);
}
/**
* Perform an async atomic 64 bit add. The old value is
* placed in the scratch memory at byte address scraddr.
*
* @scraddr: Scratch memory byte address to put response in.
* Must be 8 byte aligned.
* @reg: FAU atomic register to access. 0 <= reg < 2048.
* - Step by 8 for 64 bit access.
* @value: Signed value to add.
* Note: Only the low 22 bits are available.
* Returns Placed in the scratch pad register
*/
static inline void cvmx_fau_async_fetch_and_add64(uint64_t scraddr,
cvmx_fau_reg_64_t reg,
int64_t value)
{
cvmx_send_single(__cvmx_fau_iobdma_data
(scraddr, value, 0, CVMX_FAU_OP_SIZE_64, reg));
}
/**
* Perform an async atomic 32 bit add. The old value is
* placed in the scratch memory at byte address scraddr.
*
* @scraddr: Scratch memory byte address to put response in.
* Must be 8 byte aligned.
* @reg: FAU atomic register to access. 0 <= reg < 2048.
* - Step by 4 for 32 bit access.
* @value: Signed value to add.
* Note: Only the low 22 bits are available.
* Returns Placed in the scratch pad register
*/
static inline void cvmx_fau_async_fetch_and_add32(uint64_t scraddr,
cvmx_fau_reg_32_t reg,
int32_t value)
{
cvmx_send_single(__cvmx_fau_iobdma_data
(scraddr, value, 0, CVMX_FAU_OP_SIZE_32, reg));
}
/**
* Perform an async atomic 16 bit add. The old value is
* placed in the scratch memory at byte address scraddr.
*
* @scraddr: Scratch memory byte address to put response in.
* Must be 8 byte aligned.
* @reg: FAU atomic register to access. 0 <= reg < 2048.
* - Step by 2 for 16 bit access.
* @value: Signed value to add.
* Returns Placed in the scratch pad register
*/
static inline void cvmx_fau_async_fetch_and_add16(uint64_t scraddr,
cvmx_fau_reg_16_t reg,
int16_t value)
{
cvmx_send_single(__cvmx_fau_iobdma_data
(scraddr, value, 0, CVMX_FAU_OP_SIZE_16, reg));
}
/**
* Perform an async atomic 8 bit add. The old value is
* placed in the scratch memory at byte address scraddr.
*
* @scraddr: Scratch memory byte address to put response in.
* Must be 8 byte aligned.
* @reg: FAU atomic register to access. 0 <= reg < 2048.
* @value: Signed value to add.
* Returns Placed in the scratch pad register
*/
static inline void cvmx_fau_async_fetch_and_add8(uint64_t scraddr,
cvmx_fau_reg_8_t reg,
int8_t value)
{
cvmx_send_single(__cvmx_fau_iobdma_data
(scraddr, value, 0, CVMX_FAU_OP_SIZE_8, reg));
}
/**
* Perform an async atomic 64 bit add after the current tag
* switch completes.
*
* @scraddr: Scratch memory byte address to put response in. Must be
* 8 byte aligned. If a timeout occurs, the error bit (63)
* will be set. Otherwise the value of the register before
* the update will be returned
*
* @reg: FAU atomic register to access. 0 <= reg < 2048.
* - Step by 8 for 64 bit access.
* @value: Signed value to add.
* Note: Only the low 22 bits are available.
* Returns Placed in the scratch pad register
*/
static inline void cvmx_fau_async_tagwait_fetch_and_add64(uint64_t scraddr,
cvmx_fau_reg_64_t reg,
int64_t value)
{
cvmx_send_single(__cvmx_fau_iobdma_data
(scraddr, value, 1, CVMX_FAU_OP_SIZE_64, reg));
}
/**
* Perform an async atomic 32 bit add after the current tag
* switch completes.
*
* @scraddr: Scratch memory byte address to put response in. Must be
* 8 byte aligned. If a timeout occurs, the error bit (63)
* will be set. Otherwise the value of the register before
* the update will be returned
*
* @reg: FAU atomic register to access. 0 <= reg < 2048.
* - Step by 4 for 32 bit access.
* @value: Signed value to add.
* Note: Only the low 22 bits are available.
* Returns Placed in the scratch pad register
*/
static inline void cvmx_fau_async_tagwait_fetch_and_add32(uint64_t scraddr,
cvmx_fau_reg_32_t reg,
int32_t value)
{
cvmx_send_single(__cvmx_fau_iobdma_data
(scraddr, value, 1, CVMX_FAU_OP_SIZE_32, reg));
}
/**
* Perform an async atomic 16 bit add after the current tag
* switch completes.
*
* @scraddr: Scratch memory byte address to put response in. Must be
* 8 byte aligned. If a timeout occurs, the error bit (63)
* will be set. Otherwise the value of the register before
* the update will be returned
*
* @reg: FAU atomic register to access. 0 <= reg < 2048.
* - Step by 2 for 16 bit access.
* @value: Signed value to add.
*
* Returns Placed in the scratch pad register
*/
static inline void cvmx_fau_async_tagwait_fetch_and_add16(uint64_t scraddr,
cvmx_fau_reg_16_t reg,
int16_t value)
{
cvmx_send_single(__cvmx_fau_iobdma_data
(scraddr, value, 1, CVMX_FAU_OP_SIZE_16, reg));
}
/**
* Perform an async atomic 8 bit add after the current tag
* switch completes.
*
* @scraddr: Scratch memory byte address to put response in. Must be
* 8 byte aligned. If a timeout occurs, the error bit (63)
* will be set. Otherwise the value of the register before
* the update will be returned
*
* @reg: FAU atomic register to access. 0 <= reg < 2048.
* @value: Signed value to add.
*
* Returns Placed in the scratch pad register
*/
static inline void cvmx_fau_async_tagwait_fetch_and_add8(uint64_t scraddr,
cvmx_fau_reg_8_t reg,
int8_t value)
{
cvmx_send_single(__cvmx_fau_iobdma_data
(scraddr, value, 1, CVMX_FAU_OP_SIZE_8, reg));
}
/**
* Perform an atomic 64 bit add
*
* @reg: FAU atomic register to access. 0 <= reg < 2048.
* - Step by 8 for 64 bit access.
* @value: Signed value to add.
*/
static inline void cvmx_fau_atomic_add64(cvmx_fau_reg_64_t reg, int64_t value)
{
cvmx_write64_int64(__cvmx_fau_store_address(0, reg), value);
}
/**
* Perform an atomic 32 bit add
*
* @reg: FAU atomic register to access. 0 <= reg < 2048.
* - Step by 4 for 32 bit access.
* @value: Signed value to add.
*/
static inline void cvmx_fau_atomic_add32(cvmx_fau_reg_32_t reg, int32_t value)
{
cvmx_write64_int32(__cvmx_fau_store_address(0, reg), value);
}
/**
* Perform an atomic 16 bit add
*
* @reg: FAU atomic register to access. 0 <= reg < 2048.
* - Step by 2 for 16 bit access.
* @value: Signed value to add.
*/
static inline void cvmx_fau_atomic_add16(cvmx_fau_reg_16_t reg, int16_t value)
{
cvmx_write64_int16(__cvmx_fau_store_address(0, reg), value);
}
/**
* Perform an atomic 8 bit add
*
* @reg: FAU atomic register to access. 0 <= reg < 2048.
* @value: Signed value to add.
*/
static inline void cvmx_fau_atomic_add8(cvmx_fau_reg_8_t reg, int8_t value)
{
cvmx_write64_int8(__cvmx_fau_store_address(0, reg), value);
}
/**
* Perform an atomic 64 bit write
*
* @reg: FAU atomic register to access. 0 <= reg < 2048.
* - Step by 8 for 64 bit access.
* @value: Signed value to write.
*/
static inline void cvmx_fau_atomic_write64(cvmx_fau_reg_64_t reg, int64_t value)
{
cvmx_write64_int64(__cvmx_fau_store_address(1, reg), value);
}
/**
* Perform an atomic 32 bit write
*
* @reg: FAU atomic register to access. 0 <= reg < 2048.
* - Step by 4 for 32 bit access.
* @value: Signed value to write.
*/
static inline void cvmx_fau_atomic_write32(cvmx_fau_reg_32_t reg, int32_t value)
{
cvmx_write64_int32(__cvmx_fau_store_address(1, reg), value);
}
/**
* Perform an atomic 16 bit write
*
* @reg: FAU atomic register to access. 0 <= reg < 2048.
* - Step by 2 for 16 bit access.
* @value: Signed value to write.
*/
static inline void cvmx_fau_atomic_write16(cvmx_fau_reg_16_t reg, int16_t value)
{
cvmx_write64_int16(__cvmx_fau_store_address(1, reg), value);
}
/**
* Perform an atomic 8 bit write
*
* @reg: FAU atomic register to access. 0 <= reg < 2048.
* @value: Signed value to write.
*/
static inline void cvmx_fau_atomic_write8(cvmx_fau_reg_8_t reg, int8_t value)
{
cvmx_write64_int8(__cvmx_fau_store_address(1, reg), value);
}
#endif /* __CVMX_FAU_H__ */

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,299 @@
/***********************license start***************
* Author: Cavium Networks
*
* Contact: support@caviumnetworks.com
* This file is part of the OCTEON SDK
*
* Copyright (c) 2003-2008 Cavium Networks
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
* published by the Free Software Foundation.
*
* This file is distributed in the hope that it will be useful, but
* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
* NONINFRINGEMENT. See the GNU General Public License for more
* details.
*
* You should have received a copy of the GNU General Public License
* along with this file; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
* or visit http://www.gnu.org/licenses/.
*
* This file may also be available under a different license from Cavium.
* Contact Cavium Networks for more information
***********************license end**************************************/
/**
* @file
*
* Interface to the hardware Free Pool Allocator.
*
*
*/
#ifndef __CVMX_FPA_H__
#define __CVMX_FPA_H__
#include <asm/octeon/cvmx-address.h>
#include <asm/octeon/cvmx-fpa-defs.h>
#define CVMX_FPA_NUM_POOLS 8
#define CVMX_FPA_MIN_BLOCK_SIZE 128
#define CVMX_FPA_ALIGNMENT 128
/**
* Structure describing the data format used for stores to the FPA.
*/
typedef union {
uint64_t u64;
struct {
/*
* the (64-bit word) location in scratchpad to write
* to (if len != 0)
*/
uint64_t scraddr:8;
/* the number of words in the response (0 => no response) */
uint64_t len:8;
/* the ID of the device on the non-coherent bus */
uint64_t did:8;
/*
* the address that will appear in the first tick on
* the NCB bus.
*/
uint64_t addr:40;
} s;
} cvmx_fpa_iobdma_data_t;
/**
* Structure describing the current state of a FPA pool.
*/
typedef struct {
/* Name it was created under */
const char *name;
/* Size of each block */
uint64_t size;
/* The base memory address of whole block */
void *base;
/* The number of elements in the pool at creation */
uint64_t starting_element_count;
} cvmx_fpa_pool_info_t;
/**
* Current state of all the pools. Use access functions
* instead of using it directly.
*/
extern cvmx_fpa_pool_info_t cvmx_fpa_pool_info[CVMX_FPA_NUM_POOLS];
/* CSR typedefs have been moved to cvmx-csr-*.h */
/**
* Return the name of the pool
*
* @pool: Pool to get the name of
* Returns The name
*/
static inline const char *cvmx_fpa_get_name(uint64_t pool)
{
return cvmx_fpa_pool_info[pool].name;
}
/**
* Return the base of the pool
*
* @pool: Pool to get the base of
* Returns The base
*/
static inline void *cvmx_fpa_get_base(uint64_t pool)
{
return cvmx_fpa_pool_info[pool].base;
}
/**
* Check if a pointer belongs to an FPA pool. Return non-zero
* if the supplied pointer is inside the memory controlled by
* an FPA pool.
*
* @pool: Pool to check
* @ptr: Pointer to check
* Returns Non-zero if pointer is in the pool. Zero if not
*/
static inline int cvmx_fpa_is_member(uint64_t pool, void *ptr)
{
return ((ptr >= cvmx_fpa_pool_info[pool].base) &&
((char *)ptr <
((char *)(cvmx_fpa_pool_info[pool].base)) +
cvmx_fpa_pool_info[pool].size *
cvmx_fpa_pool_info[pool].starting_element_count));
}
/**
* Enable the FPA for use. Must be performed after any CSR
* configuration but before any other FPA functions.
*/
static inline void cvmx_fpa_enable(void)
{
union cvmx_fpa_ctl_status status;
status.u64 = cvmx_read_csr(CVMX_FPA_CTL_STATUS);
if (status.s.enb) {
cvmx_dprintf
("Warning: Enabling FPA when FPA already enabled.\n");
}
/*
* Do runtime check as we allow pass1 compiled code to run on
* pass2 chips.
*/
if (cvmx_octeon_is_pass1()) {
union cvmx_fpa_fpfx_marks marks;
int i;
for (i = 1; i < 8; i++) {
marks.u64 =
cvmx_read_csr(CVMX_FPA_FPF1_MARKS + (i - 1) * 8ull);
marks.s.fpf_wr = 0xe0;
cvmx_write_csr(CVMX_FPA_FPF1_MARKS + (i - 1) * 8ull,
marks.u64);
}
/* Enforce a 10 cycle delay between config and enable */
cvmx_wait(10);
}
/* FIXME: CVMX_FPA_CTL_STATUS read is unmodelled */
status.u64 = 0;
status.s.enb = 1;
cvmx_write_csr(CVMX_FPA_CTL_STATUS, status.u64);
}
/**
* Get a new block from the FPA
*
* @pool: Pool to get the block from
* Returns Pointer to the block or NULL on failure
*/
static inline void *cvmx_fpa_alloc(uint64_t pool)
{
uint64_t address =
cvmx_read_csr(CVMX_ADDR_DID(CVMX_FULL_DID(CVMX_OCT_DID_FPA, pool)));
if (address)
return cvmx_phys_to_ptr(address);
else
return NULL;
}
/**
* Asynchronously get a new block from the FPA
*
* @scr_addr: Local scratch address to put response in. This is a byte address,
* but must be 8 byte aligned.
* @pool: Pool to get the block from
*/
static inline void cvmx_fpa_async_alloc(uint64_t scr_addr, uint64_t pool)
{
cvmx_fpa_iobdma_data_t data;
/*
* Hardware only uses 64 bit aligned locations, so convert
* from byte address to 64-bit index
*/
data.s.scraddr = scr_addr >> 3;
data.s.len = 1;
data.s.did = CVMX_FULL_DID(CVMX_OCT_DID_FPA, pool);
data.s.addr = 0;
cvmx_send_single(data.u64);
}
/**
* Free a block allocated with a FPA pool. Does NOT provide memory
* ordering in cases where the memory block was modified by the core.
*
* @ptr: Block to free
* @pool: Pool to put it in
* @num_cache_lines:
* Cache lines to invalidate
*/
static inline void cvmx_fpa_free_nosync(void *ptr, uint64_t pool,
uint64_t num_cache_lines)
{
cvmx_addr_t newptr;
newptr.u64 = cvmx_ptr_to_phys(ptr);
newptr.sfilldidspace.didspace =
CVMX_ADDR_DIDSPACE(CVMX_FULL_DID(CVMX_OCT_DID_FPA, pool));
/* Prevent GCC from reordering around free */
barrier();
/* value written is number of cache lines not written back */
cvmx_write_io(newptr.u64, num_cache_lines);
}
/**
* Free a block allocated with a FPA pool. Provides required memory
* ordering in cases where memory block was modified by core.
*
* @ptr: Block to free
* @pool: Pool to put it in
* @num_cache_lines:
* Cache lines to invalidate
*/
static inline void cvmx_fpa_free(void *ptr, uint64_t pool,
uint64_t num_cache_lines)
{
cvmx_addr_t newptr;
newptr.u64 = cvmx_ptr_to_phys(ptr);
newptr.sfilldidspace.didspace =
CVMX_ADDR_DIDSPACE(CVMX_FULL_DID(CVMX_OCT_DID_FPA, pool));
/*
* Make sure that any previous writes to memory go out before
* we free this buffer. This also serves as a barrier to
* prevent GCC from reordering operations to after the
* free.
*/
CVMX_SYNCWS;
/* value written is number of cache lines not written back */
cvmx_write_io(newptr.u64, num_cache_lines);
}
/**
* Setup a FPA pool to control a new block of memory.
* This can only be called once per pool. Make sure proper
* locking enforces this.
*
* @pool: Pool to initialize
* 0 <= pool < 8
* @name: Constant character string to name this pool.
* String is not copied.
* @buffer: Pointer to the block of memory to use. This must be
* accessible by all processors and external hardware.
* @block_size: Size for each block controlled by the FPA
* @num_blocks: Number of blocks
*
* Returns 0 on Success,
* -1 on failure
*/
extern int cvmx_fpa_setup_pool(uint64_t pool, const char *name, void *buffer,
uint64_t block_size, uint64_t num_blocks);
/**
* Shutdown a Memory pool and validate that it had all of
* the buffers originally placed in it. This should only be
* called by one processor after all hardware has finished
* using the pool.
*
* @pool: Pool to shutdown
* Returns Zero on success
* - Positive is count of missing buffers
* - Negative is too many buffers or corrupted pointers
*/
extern uint64_t cvmx_fpa_shutdown_pool(uint64_t pool);
/**
* Get the size of blocks controlled by the pool
* This is resolved to a constant at compile time.
*
* @pool: Pool to access
* Returns Size of the block in bytes
*/
uint64_t cvmx_fpa_get_block_size(uint64_t pool);
#endif /* __CVM_FPA_H__ */

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,511 @@
/***********************license start***************
* Author: Cavium Networks
*
* Contact: support@caviumnetworks.com
* This file is part of the OCTEON SDK
*
* Copyright (c) 2003-2012 Cavium Networks
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
* published by the Free Software Foundation.
*
* This file is distributed in the hope that it will be useful, but
* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
* NONINFRINGEMENT. See the GNU General Public License for more
* details.
*
* You should have received a copy of the GNU General Public License
* along with this file; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
* or visit http://www.gnu.org/licenses/.
*
* This file may also be available under a different license from Cavium.
* Contact Cavium Networks for more information
***********************license end**************************************/
#ifndef __CVMX_GPIO_DEFS_H__
#define __CVMX_GPIO_DEFS_H__
#define CVMX_GPIO_BIT_CFGX(offset) (CVMX_ADD_IO_SEG(0x0001070000000800ull) + ((offset) & 15) * 8)
#define CVMX_GPIO_BOOT_ENA (CVMX_ADD_IO_SEG(0x00010700000008A8ull))
#define CVMX_GPIO_CLK_GENX(offset) (CVMX_ADD_IO_SEG(0x00010700000008C0ull) + ((offset) & 3) * 8)
#define CVMX_GPIO_CLK_QLMX(offset) (CVMX_ADD_IO_SEG(0x00010700000008E0ull) + ((offset) & 1) * 8)
#define CVMX_GPIO_DBG_ENA (CVMX_ADD_IO_SEG(0x00010700000008A0ull))
#define CVMX_GPIO_INT_CLR (CVMX_ADD_IO_SEG(0x0001070000000898ull))
#define CVMX_GPIO_MULTI_CAST (CVMX_ADD_IO_SEG(0x00010700000008B0ull))
#define CVMX_GPIO_PIN_ENA (CVMX_ADD_IO_SEG(0x00010700000008B8ull))
#define CVMX_GPIO_RX_DAT (CVMX_ADD_IO_SEG(0x0001070000000880ull))
#define CVMX_GPIO_TIM_CTL (CVMX_ADD_IO_SEG(0x00010700000008A0ull))
#define CVMX_GPIO_TX_CLR (CVMX_ADD_IO_SEG(0x0001070000000890ull))
#define CVMX_GPIO_TX_SET (CVMX_ADD_IO_SEG(0x0001070000000888ull))
#define CVMX_GPIO_XBIT_CFGX(offset) (CVMX_ADD_IO_SEG(0x0001070000000900ull) + ((offset) & 31) * 8 - 8*16)
union cvmx_gpio_bit_cfgx {
uint64_t u64;
struct cvmx_gpio_bit_cfgx_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_17_63:47;
uint64_t synce_sel:2;
uint64_t clk_gen:1;
uint64_t clk_sel:2;
uint64_t fil_sel:4;
uint64_t fil_cnt:4;
uint64_t int_type:1;
uint64_t int_en:1;
uint64_t rx_xor:1;
uint64_t tx_oe:1;
#else
uint64_t tx_oe:1;
uint64_t rx_xor:1;
uint64_t int_en:1;
uint64_t int_type:1;
uint64_t fil_cnt:4;
uint64_t fil_sel:4;
uint64_t clk_sel:2;
uint64_t clk_gen:1;
uint64_t synce_sel:2;
uint64_t reserved_17_63:47;
#endif
} s;
struct cvmx_gpio_bit_cfgx_cn30xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_12_63:52;
uint64_t fil_sel:4;
uint64_t fil_cnt:4;
uint64_t int_type:1;
uint64_t int_en:1;
uint64_t rx_xor:1;
uint64_t tx_oe:1;
#else
uint64_t tx_oe:1;
uint64_t rx_xor:1;
uint64_t int_en:1;
uint64_t int_type:1;
uint64_t fil_cnt:4;
uint64_t fil_sel:4;
uint64_t reserved_12_63:52;
#endif
} cn30xx;
struct cvmx_gpio_bit_cfgx_cn30xx cn31xx;
struct cvmx_gpio_bit_cfgx_cn30xx cn38xx;
struct cvmx_gpio_bit_cfgx_cn30xx cn38xxp2;
struct cvmx_gpio_bit_cfgx_cn30xx cn50xx;
struct cvmx_gpio_bit_cfgx_cn52xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_15_63:49;
uint64_t clk_gen:1;
uint64_t clk_sel:2;
uint64_t fil_sel:4;
uint64_t fil_cnt:4;
uint64_t int_type:1;
uint64_t int_en:1;
uint64_t rx_xor:1;
uint64_t tx_oe:1;
#else
uint64_t tx_oe:1;
uint64_t rx_xor:1;
uint64_t int_en:1;
uint64_t int_type:1;
uint64_t fil_cnt:4;
uint64_t fil_sel:4;
uint64_t clk_sel:2;
uint64_t clk_gen:1;
uint64_t reserved_15_63:49;
#endif
} cn52xx;
struct cvmx_gpio_bit_cfgx_cn52xx cn52xxp1;
struct cvmx_gpio_bit_cfgx_cn52xx cn56xx;
struct cvmx_gpio_bit_cfgx_cn52xx cn56xxp1;
struct cvmx_gpio_bit_cfgx_cn30xx cn58xx;
struct cvmx_gpio_bit_cfgx_cn30xx cn58xxp1;
struct cvmx_gpio_bit_cfgx_s cn61xx;
struct cvmx_gpio_bit_cfgx_s cn63xx;
struct cvmx_gpio_bit_cfgx_s cn63xxp1;
struct cvmx_gpio_bit_cfgx_s cn66xx;
struct cvmx_gpio_bit_cfgx_s cn68xx;
struct cvmx_gpio_bit_cfgx_s cn68xxp1;
struct cvmx_gpio_bit_cfgx_s cnf71xx;
};
union cvmx_gpio_boot_ena {
uint64_t u64;
struct cvmx_gpio_boot_ena_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_12_63:52;
uint64_t boot_ena:4;
uint64_t reserved_0_7:8;
#else
uint64_t reserved_0_7:8;
uint64_t boot_ena:4;
uint64_t reserved_12_63:52;
#endif
} s;
struct cvmx_gpio_boot_ena_s cn30xx;
struct cvmx_gpio_boot_ena_s cn31xx;
struct cvmx_gpio_boot_ena_s cn50xx;
};
union cvmx_gpio_clk_genx {
uint64_t u64;
struct cvmx_gpio_clk_genx_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_32_63:32;
uint64_t n:32;
#else
uint64_t n:32;
uint64_t reserved_32_63:32;
#endif
} s;
struct cvmx_gpio_clk_genx_s cn52xx;
struct cvmx_gpio_clk_genx_s cn52xxp1;
struct cvmx_gpio_clk_genx_s cn56xx;
struct cvmx_gpio_clk_genx_s cn56xxp1;
struct cvmx_gpio_clk_genx_s cn61xx;
struct cvmx_gpio_clk_genx_s cn63xx;
struct cvmx_gpio_clk_genx_s cn63xxp1;
struct cvmx_gpio_clk_genx_s cn66xx;
struct cvmx_gpio_clk_genx_s cn68xx;
struct cvmx_gpio_clk_genx_s cn68xxp1;
struct cvmx_gpio_clk_genx_s cnf71xx;
};
union cvmx_gpio_clk_qlmx {
uint64_t u64;
struct cvmx_gpio_clk_qlmx_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_11_63:53;
uint64_t qlm_sel:3;
uint64_t reserved_3_7:5;
uint64_t div:1;
uint64_t lane_sel:2;
#else
uint64_t lane_sel:2;
uint64_t div:1;
uint64_t reserved_3_7:5;
uint64_t qlm_sel:3;
uint64_t reserved_11_63:53;
#endif
} s;
struct cvmx_gpio_clk_qlmx_cn61xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_10_63:54;
uint64_t qlm_sel:2;
uint64_t reserved_3_7:5;
uint64_t div:1;
uint64_t lane_sel:2;
#else
uint64_t lane_sel:2;
uint64_t div:1;
uint64_t reserved_3_7:5;
uint64_t qlm_sel:2;
uint64_t reserved_10_63:54;
#endif
} cn61xx;
struct cvmx_gpio_clk_qlmx_cn63xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_3_63:61;
uint64_t div:1;
uint64_t lane_sel:2;
#else
uint64_t lane_sel:2;
uint64_t div:1;
uint64_t reserved_3_63:61;
#endif
} cn63xx;
struct cvmx_gpio_clk_qlmx_cn63xx cn63xxp1;
struct cvmx_gpio_clk_qlmx_cn61xx cn66xx;
struct cvmx_gpio_clk_qlmx_s cn68xx;
struct cvmx_gpio_clk_qlmx_s cn68xxp1;
struct cvmx_gpio_clk_qlmx_cn61xx cnf71xx;
};
union cvmx_gpio_dbg_ena {
uint64_t u64;
struct cvmx_gpio_dbg_ena_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_21_63:43;
uint64_t dbg_ena:21;
#else
uint64_t dbg_ena:21;
uint64_t reserved_21_63:43;
#endif
} s;
struct cvmx_gpio_dbg_ena_s cn30xx;
struct cvmx_gpio_dbg_ena_s cn31xx;
struct cvmx_gpio_dbg_ena_s cn50xx;
};
union cvmx_gpio_int_clr {
uint64_t u64;
struct cvmx_gpio_int_clr_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_16_63:48;
uint64_t type:16;
#else
uint64_t type:16;
uint64_t reserved_16_63:48;
#endif
} s;
struct cvmx_gpio_int_clr_s cn30xx;
struct cvmx_gpio_int_clr_s cn31xx;
struct cvmx_gpio_int_clr_s cn38xx;
struct cvmx_gpio_int_clr_s cn38xxp2;
struct cvmx_gpio_int_clr_s cn50xx;
struct cvmx_gpio_int_clr_s cn52xx;
struct cvmx_gpio_int_clr_s cn52xxp1;
struct cvmx_gpio_int_clr_s cn56xx;
struct cvmx_gpio_int_clr_s cn56xxp1;
struct cvmx_gpio_int_clr_s cn58xx;
struct cvmx_gpio_int_clr_s cn58xxp1;
struct cvmx_gpio_int_clr_s cn61xx;
struct cvmx_gpio_int_clr_s cn63xx;
struct cvmx_gpio_int_clr_s cn63xxp1;
struct cvmx_gpio_int_clr_s cn66xx;
struct cvmx_gpio_int_clr_s cn68xx;
struct cvmx_gpio_int_clr_s cn68xxp1;
struct cvmx_gpio_int_clr_s cnf71xx;
};
union cvmx_gpio_multi_cast {
uint64_t u64;
struct cvmx_gpio_multi_cast_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_1_63:63;
uint64_t en:1;
#else
uint64_t en:1;
uint64_t reserved_1_63:63;
#endif
} s;
struct cvmx_gpio_multi_cast_s cn61xx;
struct cvmx_gpio_multi_cast_s cnf71xx;
};
union cvmx_gpio_pin_ena {
uint64_t u64;
struct cvmx_gpio_pin_ena_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_20_63:44;
uint64_t ena19:1;
uint64_t ena18:1;
uint64_t reserved_0_17:18;
#else
uint64_t reserved_0_17:18;
uint64_t ena18:1;
uint64_t ena19:1;
uint64_t reserved_20_63:44;
#endif
} s;
struct cvmx_gpio_pin_ena_s cn66xx;
};
union cvmx_gpio_rx_dat {
uint64_t u64;
struct cvmx_gpio_rx_dat_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_24_63:40;
uint64_t dat:24;
#else
uint64_t dat:24;
uint64_t reserved_24_63:40;
#endif
} s;
struct cvmx_gpio_rx_dat_s cn30xx;
struct cvmx_gpio_rx_dat_s cn31xx;
struct cvmx_gpio_rx_dat_cn38xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_16_63:48;
uint64_t dat:16;
#else
uint64_t dat:16;
uint64_t reserved_16_63:48;
#endif
} cn38xx;
struct cvmx_gpio_rx_dat_cn38xx cn38xxp2;
struct cvmx_gpio_rx_dat_s cn50xx;
struct cvmx_gpio_rx_dat_cn38xx cn52xx;
struct cvmx_gpio_rx_dat_cn38xx cn52xxp1;
struct cvmx_gpio_rx_dat_cn38xx cn56xx;
struct cvmx_gpio_rx_dat_cn38xx cn56xxp1;
struct cvmx_gpio_rx_dat_cn38xx cn58xx;
struct cvmx_gpio_rx_dat_cn38xx cn58xxp1;
struct cvmx_gpio_rx_dat_cn61xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_20_63:44;
uint64_t dat:20;
#else
uint64_t dat:20;
uint64_t reserved_20_63:44;
#endif
} cn61xx;
struct cvmx_gpio_rx_dat_cn38xx cn63xx;
struct cvmx_gpio_rx_dat_cn38xx cn63xxp1;
struct cvmx_gpio_rx_dat_cn61xx cn66xx;
struct cvmx_gpio_rx_dat_cn38xx cn68xx;
struct cvmx_gpio_rx_dat_cn38xx cn68xxp1;
struct cvmx_gpio_rx_dat_cn61xx cnf71xx;
};
union cvmx_gpio_tim_ctl {
uint64_t u64;
struct cvmx_gpio_tim_ctl_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_4_63:60;
uint64_t sel:4;
#else
uint64_t sel:4;
uint64_t reserved_4_63:60;
#endif
} s;
struct cvmx_gpio_tim_ctl_s cn68xx;
struct cvmx_gpio_tim_ctl_s cn68xxp1;
};
union cvmx_gpio_tx_clr {
uint64_t u64;
struct cvmx_gpio_tx_clr_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_24_63:40;
uint64_t clr:24;
#else
uint64_t clr:24;
uint64_t reserved_24_63:40;
#endif
} s;
struct cvmx_gpio_tx_clr_s cn30xx;
struct cvmx_gpio_tx_clr_s cn31xx;
struct cvmx_gpio_tx_clr_cn38xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_16_63:48;
uint64_t clr:16;
#else
uint64_t clr:16;
uint64_t reserved_16_63:48;
#endif
} cn38xx;
struct cvmx_gpio_tx_clr_cn38xx cn38xxp2;
struct cvmx_gpio_tx_clr_s cn50xx;
struct cvmx_gpio_tx_clr_cn38xx cn52xx;
struct cvmx_gpio_tx_clr_cn38xx cn52xxp1;
struct cvmx_gpio_tx_clr_cn38xx cn56xx;
struct cvmx_gpio_tx_clr_cn38xx cn56xxp1;
struct cvmx_gpio_tx_clr_cn38xx cn58xx;
struct cvmx_gpio_tx_clr_cn38xx cn58xxp1;
struct cvmx_gpio_tx_clr_cn61xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_20_63:44;
uint64_t clr:20;
#else
uint64_t clr:20;
uint64_t reserved_20_63:44;
#endif
} cn61xx;
struct cvmx_gpio_tx_clr_cn38xx cn63xx;
struct cvmx_gpio_tx_clr_cn38xx cn63xxp1;
struct cvmx_gpio_tx_clr_cn61xx cn66xx;
struct cvmx_gpio_tx_clr_cn38xx cn68xx;
struct cvmx_gpio_tx_clr_cn38xx cn68xxp1;
struct cvmx_gpio_tx_clr_cn61xx cnf71xx;
};
union cvmx_gpio_tx_set {
uint64_t u64;
struct cvmx_gpio_tx_set_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_24_63:40;
uint64_t set:24;
#else
uint64_t set:24;
uint64_t reserved_24_63:40;
#endif
} s;
struct cvmx_gpio_tx_set_s cn30xx;
struct cvmx_gpio_tx_set_s cn31xx;
struct cvmx_gpio_tx_set_cn38xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_16_63:48;
uint64_t set:16;
#else
uint64_t set:16;
uint64_t reserved_16_63:48;
#endif
} cn38xx;
struct cvmx_gpio_tx_set_cn38xx cn38xxp2;
struct cvmx_gpio_tx_set_s cn50xx;
struct cvmx_gpio_tx_set_cn38xx cn52xx;
struct cvmx_gpio_tx_set_cn38xx cn52xxp1;
struct cvmx_gpio_tx_set_cn38xx cn56xx;
struct cvmx_gpio_tx_set_cn38xx cn56xxp1;
struct cvmx_gpio_tx_set_cn38xx cn58xx;
struct cvmx_gpio_tx_set_cn38xx cn58xxp1;
struct cvmx_gpio_tx_set_cn61xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_20_63:44;
uint64_t set:20;
#else
uint64_t set:20;
uint64_t reserved_20_63:44;
#endif
} cn61xx;
struct cvmx_gpio_tx_set_cn38xx cn63xx;
struct cvmx_gpio_tx_set_cn38xx cn63xxp1;
struct cvmx_gpio_tx_set_cn61xx cn66xx;
struct cvmx_gpio_tx_set_cn38xx cn68xx;
struct cvmx_gpio_tx_set_cn38xx cn68xxp1;
struct cvmx_gpio_tx_set_cn61xx cnf71xx;
};
union cvmx_gpio_xbit_cfgx {
uint64_t u64;
struct cvmx_gpio_xbit_cfgx_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_17_63:47;
uint64_t synce_sel:2;
uint64_t clk_gen:1;
uint64_t clk_sel:2;
uint64_t fil_sel:4;
uint64_t fil_cnt:4;
uint64_t int_type:1;
uint64_t int_en:1;
uint64_t rx_xor:1;
uint64_t tx_oe:1;
#else
uint64_t tx_oe:1;
uint64_t rx_xor:1;
uint64_t int_en:1;
uint64_t int_type:1;
uint64_t fil_cnt:4;
uint64_t fil_sel:4;
uint64_t clk_sel:2;
uint64_t clk_gen:1;
uint64_t synce_sel:2;
uint64_t reserved_17_63:47;
#endif
} s;
struct cvmx_gpio_xbit_cfgx_cn30xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_12_63:52;
uint64_t fil_sel:4;
uint64_t fil_cnt:4;
uint64_t reserved_2_3:2;
uint64_t rx_xor:1;
uint64_t tx_oe:1;
#else
uint64_t tx_oe:1;
uint64_t rx_xor:1;
uint64_t reserved_2_3:2;
uint64_t fil_cnt:4;
uint64_t fil_sel:4;
uint64_t reserved_12_63:52;
#endif
} cn30xx;
struct cvmx_gpio_xbit_cfgx_cn30xx cn31xx;
struct cvmx_gpio_xbit_cfgx_cn30xx cn50xx;
struct cvmx_gpio_xbit_cfgx_s cn61xx;
struct cvmx_gpio_xbit_cfgx_s cn66xx;
struct cvmx_gpio_xbit_cfgx_s cnf71xx;
};
#endif

View file

@ -0,0 +1,166 @@
/***********************license start***************
* Author: Cavium Networks
*
* Contact: support@caviumnetworks.com
* This file is part of the OCTEON SDK
*
* Copyright (c) 2003-2008 Cavium Networks
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
* published by the Free Software Foundation.
*
* This file is distributed in the hope that it will be useful, but
* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
* NONINFRINGEMENT. See the GNU General Public License for more
* details.
*
* You should have received a copy of the GNU General Public License
* along with this file; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
* or visit http://www.gnu.org/licenses/.
*
* This file may also be available under a different license from Cavium.
* Contact Cavium Networks for more information
***********************license end**************************************/
/**
*
* Helper functions to abstract board specific data about
* network ports from the rest of the cvmx-helper files.
*
*/
#ifndef __CVMX_HELPER_BOARD_H__
#define __CVMX_HELPER_BOARD_H__
#include <asm/octeon/cvmx-helper.h>
enum cvmx_helper_board_usb_clock_types {
USB_CLOCK_TYPE_REF_12,
USB_CLOCK_TYPE_REF_24,
USB_CLOCK_TYPE_REF_48,
USB_CLOCK_TYPE_CRYSTAL_12,
};
typedef enum {
set_phy_link_flags_autoneg = 0x1,
set_phy_link_flags_flow_control_dont_touch = 0x0 << 1,
set_phy_link_flags_flow_control_enable = 0x1 << 1,
set_phy_link_flags_flow_control_disable = 0x2 << 1,
set_phy_link_flags_flow_control_mask = 0x3 << 1, /* Mask for 2 bit wide flow control field */
} cvmx_helper_board_set_phy_link_flags_types_t;
/*
* Fake IPD port, the RGMII/MII interface may use different PHY, use
* this macro to return appropriate MIX address to read the PHY.
*/
#define CVMX_HELPER_BOARD_MGMT_IPD_PORT -10
/**
* cvmx_override_board_link_get(int ipd_port) is a function
* pointer. It is meant to allow customization of the process of
* talking to a PHY to determine link speed. It is called every
* time a PHY must be polled for link status. Users should set
* this pointer to a function before calling any cvmx-helper
* operations.
*/
extern cvmx_helper_link_info_t(*cvmx_override_board_link_get) (int ipd_port);
/**
* Return the MII PHY address associated with the given IPD
* port. A result of -1 means there isn't a MII capable PHY
* connected to this port. On chips supporting multiple MII
* busses the bus number is encoded in bits <15:8>.
*
* This function must be modifed for every new Octeon board.
* Internally it uses switch statements based on the cvmx_sysinfo
* data to determine board types and revisions. It relys on the
* fact that every Octeon board receives a unique board type
* enumeration from the bootloader.
*
* @ipd_port: Octeon IPD port to get the MII address for.
*
* Returns MII PHY address and bus number or -1.
*/
extern int cvmx_helper_board_get_mii_address(int ipd_port);
/**
* This function as a board specific method of changing the PHY
* speed, duplex, and autonegotiation. This programs the PHY and
* not Octeon. This can be used to force Octeon's links to
* specific settings.
*
* @phy_addr: The address of the PHY to program
* @link_flags:
* Flags to control autonegotiation. Bit 0 is autonegotiation
* enable/disable to maintain backware compatibility.
* @link_info: Link speed to program. If the speed is zero and autonegotiation
* is enabled, all possible negotiation speeds are advertised.
*
* Returns Zero on success, negative on failure
*/
int cvmx_helper_board_link_set_phy(int phy_addr,
cvmx_helper_board_set_phy_link_flags_types_t
link_flags,
cvmx_helper_link_info_t link_info);
/**
* This function is the board specific method of determining an
* ethernet ports link speed. Most Octeon boards have Marvell PHYs
* and are handled by the fall through case. This function must be
* updated for boards that don't have the normal Marvell PHYs.
*
* This function must be modifed for every new Octeon board.
* Internally it uses switch statements based on the cvmx_sysinfo
* data to determine board types and revisions. It relys on the
* fact that every Octeon board receives a unique board type
* enumeration from the bootloader.
*
* @ipd_port: IPD input port associated with the port we want to get link
* status for.
*
* Returns The ports link status. If the link isn't fully resolved, this must
* return zero.
*/
extern cvmx_helper_link_info_t __cvmx_helper_board_link_get(int ipd_port);
/**
* This function is called by cvmx_helper_interface_probe() after it
* determines the number of ports Octeon can support on a specific
* interface. This function is the per board location to override
* this value. It is called with the number of ports Octeon might
* support and should return the number of actual ports on the
* board.
*
* This function must be modifed for every new Octeon board.
* Internally it uses switch statements based on the cvmx_sysinfo
* data to determine board types and revisions. It relys on the
* fact that every Octeon board receives a unique board type
* enumeration from the bootloader.
*
* @interface: Interface to probe
* @supported_ports:
* Number of ports Octeon supports.
*
* Returns Number of ports the actual board supports. Many times this will
* simple be "support_ports".
*/
extern int __cvmx_helper_board_interface_probe(int interface,
int supported_ports);
/**
* Enable packet input/output from the hardware. This function is
* called after by cvmx_helper_packet_hardware_enable() to
* perform board specific initialization. For most boards
* nothing is needed.
*
* @interface: Interface to enable
*
* Returns Zero on success, negative on failure
*/
extern int __cvmx_helper_board_hardware_enable(int interface);
enum cvmx_helper_board_usb_clock_types __cvmx_helper_board_usb_get_clock_type(void);
#endif /* __CVMX_HELPER_BOARD_H__ */

View file

@ -0,0 +1,33 @@
/***********************license start***************
* Author: Cavium Networks
*
* Contact: support@caviumnetworks.com
* This file is part of the OCTEON SDK
*
* Copyright (c) 2003-2008 Cavium Networks
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
* published by the Free Software Foundation.
*
* This file is distributed in the hope that it will be useful, but
* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
* NONINFRINGEMENT. See the GNU General Public License for more
* details.
*
* You should have received a copy of the GNU General Public License
* along with this file; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
* or visit http://www.gnu.org/licenses/.
*
* This file may also be available under a different license from Cavium.
* Contact Cavium Networks for more information
***********************license end**************************************/
#ifndef __CVMX_HELPER_ERRATA_H__
#define __CVMX_HELPER_ERRATA_H__
extern void __cvmx_helper_errata_qlm_disable_2nd_order_cdr(int qlm);
#endif

View file

@ -0,0 +1,43 @@
/***********************license start***************
* Author: Cavium Networks
*
* Contact: support@caviumnetworks.com
* This file is part of the OCTEON SDK
*
* Copyright (c) 2003-2008 Cavium Networks
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
* published by the Free Software Foundation.
*
* This file is distributed in the hope that it will be useful, but
* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
* NONINFRINGEMENT. See the GNU General Public License for more
* details.
*
* You should have received a copy of the GNU General Public License
* along with this file; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
* or visit http://www.gnu.org/licenses/.
*
* This file may also be available under a different license from Cavium.
* Contact Cavium Networks for more information
***********************license end**************************************/
/**
* @file
*
* Helper utilities for qlm_jtag.
*
*/
#ifndef __CVMX_HELPER_JTAG_H__
#define __CVMX_HELPER_JTAG_H__
extern void cvmx_helper_qlm_jtag_init(void);
extern uint32_t cvmx_helper_qlm_jtag_shift(int qlm, int bits, uint32_t data);
extern void cvmx_helper_qlm_jtag_shift_zeros(int qlm, int bits);
extern void cvmx_helper_qlm_jtag_update(int qlm);
#endif /* __CVMX_HELPER_JTAG_H__ */

View file

@ -0,0 +1,60 @@
/***********************license start***************
* Author: Cavium Networks
*
* Contact: support@caviumnetworks.com
* This file is part of the OCTEON SDK
*
* Copyright (c) 2003-2008 Cavium Networks
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as published by
* the Free Software Foundation.
*
* This file is distributed in the hope that it will be useful,
* but AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or NONINFRINGEMENT.
* See the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this file; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
* or visit http://www.gnu.org/licenses/.
*
* This file may also be available under a different license from Cavium.
* Contact Cavium Networks for more information
***********************license end**************************************/
/**
* @file
*
* Functions for LOOP initialization, configuration,
* and monitoring.
*
*/
#ifndef __CVMX_HELPER_LOOP_H__
#define __CVMX_HELPER_LOOP_H__
/**
* Probe a LOOP interface and determine the number of ports
* connected to it. The LOOP interface should still be down after
* this call.
*
* @interface: Interface to probe
*
* Returns Number of ports on the interface. Zero to disable.
*/
extern int __cvmx_helper_loop_probe(int interface);
static inline int __cvmx_helper_loop_enumerate(int interface) {return 4; }
/**
* Bringup and enable a LOOP interface. After this call packet
* I/O should be fully functional. This is called with IPD
* enabled but PKO disabled.
*
* @interface: Interface to bring up
*
* Returns Zero on success, negative on failure
*/
extern int __cvmx_helper_loop_enable(int interface);
#endif

View file

@ -0,0 +1,61 @@
/***********************license start***************
* Author: Cavium Networks
*
* Contact: support@caviumnetworks.com
* This file is part of the OCTEON SDK
*
* Copyright (c) 2003-2008 Cavium Networks
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
* published by the Free Software Foundation.
*
* This file is distributed in the hope that it will be useful, but
* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
* NONINFRINGEMENT. See the GNU General Public License for more
* details.
*
* You should have received a copy of the GNU General Public License
* along with this file; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
* or visit http://www.gnu.org/licenses/.
*
* This file may also be available under a different license from Cavium.
* Contact Cavium Networks for more information
***********************license end**************************************/
/**
* @file
*
* Functions for NPI initialization, configuration,
* and monitoring.
*
*/
#ifndef __CVMX_HELPER_NPI_H__
#define __CVMX_HELPER_NPI_H__
/**
* Probe a NPI interface and determine the number of ports
* connected to it. The NPI interface should still be down after
* this call.
*
* @interface: Interface to probe
*
* Returns Number of ports on the interface. Zero to disable.
*/
extern int __cvmx_helper_npi_probe(int interface);
#define __cvmx_helper_npi_enumerate __cvmx_helper_npi_probe
/**
* Bringup and enable a NPI interface. After this call packet
* I/O should be fully functional. This is called with IPD
* enabled but PKO disabled.
*
* @interface: Interface to bring up
*
* Returns Zero on success, negative on failure
*/
extern int __cvmx_helper_npi_enable(int interface);
#endif

View file

@ -0,0 +1,111 @@
/***********************license start***************
* Author: Cavium Networks
*
* Contact: support@caviumnetworks.com
* This file is part of the OCTEON SDK
*
* Copyright (c) 2003-2008 Cavium Networks
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
* published by the Free Software Foundation.
*
* This file is distributed in the hope that it will be useful, but
* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
* NONINFRINGEMENT. See the GNU General Public License for more
* details.
*
* You should have received a copy of the GNU General Public License
* along with this file; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
* or visit http://www.gnu.org/licenses/.
*
* This file may also be available under a different license from Cavium.
* Contact Cavium Networks for more information
***********************license end**************************************/
/**
* @file
*
* Functions for RGMII/GMII/MII initialization, configuration,
* and monitoring.
*
*/
#ifndef __CVMX_HELPER_RGMII_H__
#define __CVMX_HELPER_RGMII_H__
/**
* Probe RGMII ports and determine the number present
*
* @interface: Interface to probe
*
* Returns Number of RGMII/GMII/MII ports (0-4).
*/
extern int __cvmx_helper_rgmii_probe(int interface);
#define __cvmx_helper_rgmii_enumerate __cvmx_helper_rgmii_probe
/**
* Put an RGMII interface in loopback mode. Internal packets sent
* out will be received back again on the same port. Externally
* received packets will echo back out.
*
* @port: IPD port number to loop.
*/
extern void cvmx_helper_rgmii_internal_loopback(int port);
/**
* Configure all of the ASX, GMX, and PKO regsiters required
* to get RGMII to function on the supplied interface.
*
* @interface: PKO Interface to configure (0 or 1)
*
* Returns Zero on success
*/
extern int __cvmx_helper_rgmii_enable(int interface);
/**
* Return the link state of an IPD/PKO port as returned by
* auto negotiation. The result of this function may not match
* Octeon's link config if auto negotiation has changed since
* the last call to cvmx_helper_link_set().
*
* @ipd_port: IPD/PKO port to query
*
* Returns Link state
*/
extern cvmx_helper_link_info_t __cvmx_helper_rgmii_link_get(int ipd_port);
/**
* Configure an IPD/PKO port for the specified link state. This
* function does not influence auto negotiation at the PHY level.
* The passed link state must always match the link state returned
* by cvmx_helper_link_get(). It is normally best to use
* cvmx_helper_link_autoconf() instead.
*
* @ipd_port: IPD/PKO port to configure
* @link_info: The new link state
*
* Returns Zero on success, negative on failure
*/
extern int __cvmx_helper_rgmii_link_set(int ipd_port,
cvmx_helper_link_info_t link_info);
/**
* Configure a port for internal and/or external loopback. Internal loopback
* causes packets sent by the port to be received by Octeon. External loopback
* causes packets received from the wire to sent out again.
*
* @ipd_port: IPD/PKO port to loopback.
* @enable_internal:
* Non zero if you want internal loopback
* @enable_external:
* Non zero if you want external loopback
*
* Returns Zero on success, negative on failure.
*/
extern int __cvmx_helper_rgmii_configure_loopback(int ipd_port,
int enable_internal,
int enable_external);
#endif

View file

@ -0,0 +1,105 @@
/***********************license start***************
* Author: Cavium Networks
*
* Contact: support@caviumnetworks.com
* This file is part of the OCTEON SDK
*
* Copyright (c) 2003-2008 Cavium Networks
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
* published by the Free Software Foundation.
*
* This file is distributed in the hope that it will be useful, but
* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
* NONINFRINGEMENT. See the GNU General Public License for more
* details.
*
* You should have received a copy of the GNU General Public License
* along with this file; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
* or visit http://www.gnu.org/licenses/.
*
* This file may also be available under a different license from Cavium.
* Contact Cavium Networks for more information
***********************license end**************************************/
/**
* @file
*
* Functions for SGMII initialization, configuration,
* and monitoring.
*
*/
#ifndef __CVMX_HELPER_SGMII_H__
#define __CVMX_HELPER_SGMII_H__
/**
* Probe a SGMII interface and determine the number of ports
* connected to it. The SGMII interface should still be down after
* this call.
*
* @interface: Interface to probe
*
* Returns Number of ports on the interface. Zero to disable.
*/
extern int __cvmx_helper_sgmii_probe(int interface);
extern int __cvmx_helper_sgmii_enumerate(int interface);
/**
* Bringup and enable a SGMII interface. After this call packet
* I/O should be fully functional. This is called with IPD
* enabled but PKO disabled.
*
* @interface: Interface to bring up
*
* Returns Zero on success, negative on failure
*/
extern int __cvmx_helper_sgmii_enable(int interface);
/**
* Return the link state of an IPD/PKO port as returned by
* auto negotiation. The result of this function may not match
* Octeon's link config if auto negotiation has changed since
* the last call to cvmx_helper_link_set().
*
* @ipd_port: IPD/PKO port to query
*
* Returns Link state
*/
extern cvmx_helper_link_info_t __cvmx_helper_sgmii_link_get(int ipd_port);
/**
* Configure an IPD/PKO port for the specified link state. This
* function does not influence auto negotiation at the PHY level.
* The passed link state must always match the link state returned
* by cvmx_helper_link_get(). It is normally best to use
* cvmx_helper_link_autoconf() instead.
*
* @ipd_port: IPD/PKO port to configure
* @link_info: The new link state
*
* Returns Zero on success, negative on failure
*/
extern int __cvmx_helper_sgmii_link_set(int ipd_port,
cvmx_helper_link_info_t link_info);
/**
* Configure a port for internal and/or external loopback. Internal loopback
* causes packets sent by the port to be received by Octeon. External loopback
* causes packets received from the wire to sent out again.
*
* @ipd_port: IPD/PKO port to loopback.
* @enable_internal:
* Non zero if you want internal loopback
* @enable_external:
* Non zero if you want external loopback
*
* Returns Zero on success, negative on failure.
*/
extern int __cvmx_helper_sgmii_configure_loopback(int ipd_port,
int enable_internal,
int enable_external);
#endif

View file

@ -0,0 +1,85 @@
/***********************license start***************
* Author: Cavium Networks
*
* Contact: support@caviumnetworks.com
* This file is part of the OCTEON SDK
*
* Copyright (c) 2003-2008 Cavium Networks
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
* published by the Free Software Foundation.
*
* This file is distributed in the hope that it will be useful, but
* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
* NONINFRINGEMENT. See the GNU General Public License for more
* details.
*
* You should have received a copy of the GNU General Public License
* along with this file; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
* or visit http://www.gnu.org/licenses/.
*
* This file may also be available under a different license from Cavium.
* Contact Cavium Networks for more information
***********************license end**************************************/
/*
* Functions for SPI initialization, configuration,
* and monitoring.
*/
#ifndef __CVMX_HELPER_SPI_H__
#define __CVMX_HELPER_SPI_H__
/**
* Probe a SPI interface and determine the number of ports
* connected to it. The SPI interface should still be down after
* this call.
*
* @interface: Interface to probe
*
* Returns Number of ports on the interface. Zero to disable.
*/
extern int __cvmx_helper_spi_probe(int interface);
extern int __cvmx_helper_spi_enumerate(int interface);
/**
* Bringup and enable a SPI interface. After this call packet I/O
* should be fully functional. This is called with IPD enabled but
* PKO disabled.
*
* @interface: Interface to bring up
*
* Returns Zero on success, negative on failure
*/
extern int __cvmx_helper_spi_enable(int interface);
/**
* Return the link state of an IPD/PKO port as returned by
* auto negotiation. The result of this function may not match
* Octeon's link config if auto negotiation has changed since
* the last call to cvmx_helper_link_set().
*
* @ipd_port: IPD/PKO port to query
*
* Returns Link state
*/
extern cvmx_helper_link_info_t __cvmx_helper_spi_link_get(int ipd_port);
/**
* Configure an IPD/PKO port for the specified link state. This
* function does not influence auto negotiation at the PHY level.
* The passed link state must always match the link state returned
* by cvmx_helper_link_get(). It is normally best to use
* cvmx_helper_link_autoconf() instead.
*
* @ipd_port: IPD/PKO port to configure
* @link_info: The new link state
*
* Returns Zero on success, negative on failure
*/
extern int __cvmx_helper_spi_link_set(int ipd_port,
cvmx_helper_link_info_t link_info);
#endif

View file

@ -0,0 +1,215 @@
/***********************license start***************
* Author: Cavium Networks
*
* Contact: support@caviumnetworks.com
* This file is part of the OCTEON SDK
*
* Copyright (c) 2003-2008 Cavium Networks
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
* published by the Free Software Foundation.
*
* This file is distributed in the hope that it will be useful, but
* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
* NONINFRINGEMENT. See the GNU General Public License for more
* details.
*
* You should have received a copy of the GNU General Public License
* along with this file; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
* or visit http://www.gnu.org/licenses/.
*
* This file may also be available under a different license from Cavium.
* Contact Cavium Networks for more information
***********************license end**************************************/
/*
*
* Small helper utilities.
*
*/
#ifndef __CVMX_HELPER_UTIL_H__
#define __CVMX_HELPER_UTIL_H__
/**
* Convert a interface mode into a human readable string
*
* @mode: Mode to convert
*
* Returns String
*/
extern const char
*cvmx_helper_interface_mode_to_string(cvmx_helper_interface_mode_t mode);
/**
* Debug routine to dump the packet structure to the console
*
* @work: Work queue entry containing the packet to dump
* Returns
*/
extern int cvmx_helper_dump_packet(cvmx_wqe_t *work);
/**
* Setup Random Early Drop on a specific input queue
*
* @queue: Input queue to setup RED on (0-7)
* @pass_thresh:
* Packets will begin slowly dropping when there are less than
* this many packet buffers free in FPA 0.
* @drop_thresh:
* All incoming packets will be dropped when there are less
* than this many free packet buffers in FPA 0.
* Returns Zero on success. Negative on failure
*/
extern int cvmx_helper_setup_red_queue(int queue, int pass_thresh,
int drop_thresh);
/**
* Setup Random Early Drop to automatically begin dropping packets.
*
* @pass_thresh:
* Packets will begin slowly dropping when there are less than
* this many packet buffers free in FPA 0.
* @drop_thresh:
* All incoming packets will be dropped when there are less
* than this many free packet buffers in FPA 0.
* Returns Zero on success. Negative on failure
*/
extern int cvmx_helper_setup_red(int pass_thresh, int drop_thresh);
/**
* Get the version of the CVMX libraries.
*
* Returns Version string. Note this buffer is allocated statically
* and will be shared by all callers.
*/
extern const char *cvmx_helper_get_version(void);
/**
* Setup the common GMX settings that determine the number of
* ports. These setting apply to almost all configurations of all
* chips.
*
* @interface: Interface to configure
* @num_ports: Number of ports on the interface
*
* Returns Zero on success, negative on failure
*/
extern int __cvmx_helper_setup_gmx(int interface, int num_ports);
/**
* Returns the IPD/PKO port number for a port on the given
* interface.
*
* @interface: Interface to use
* @port: Port on the interface
*
* Returns IPD/PKO port number
*/
extern int cvmx_helper_get_ipd_port(int interface, int port);
/**
* Returns the IPD/PKO port number for the first port on the given
* interface.
*
* @interface: Interface to use
*
* Returns IPD/PKO port number
*/
static inline int cvmx_helper_get_first_ipd_port(int interface)
{
return cvmx_helper_get_ipd_port(interface, 0);
}
/**
* Returns the IPD/PKO port number for the last port on the given
* interface.
*
* @interface: Interface to use
*
* Returns IPD/PKO port number
*/
static inline int cvmx_helper_get_last_ipd_port(int interface)
{
extern int cvmx_helper_ports_on_interface(int interface);
return cvmx_helper_get_first_ipd_port(interface) +
cvmx_helper_ports_on_interface(interface) - 1;
}
/**
* Free the packet buffers contained in a work queue entry.
* The work queue entry is not freed.
*
* @work: Work queue entry with packet to free
*/
static inline void cvmx_helper_free_packet_data(cvmx_wqe_t *work)
{
uint64_t number_buffers;
union cvmx_buf_ptr buffer_ptr;
union cvmx_buf_ptr next_buffer_ptr;
uint64_t start_of_buffer;
number_buffers = work->word2.s.bufs;
if (number_buffers == 0)
return;
buffer_ptr = work->packet_ptr;
/*
* Since the number of buffers is not zero, we know this is
* not a dynamic short packet. We need to check if it is a
* packet received with IPD_CTL_STATUS[NO_WPTR]. If this is
* true, we need to free all buffers except for the first
* one. The caller doesn't expect their WQE pointer to be
* freed
*/
start_of_buffer = ((buffer_ptr.s.addr >> 7) - buffer_ptr.s.back) << 7;
if (cvmx_ptr_to_phys(work) == start_of_buffer) {
next_buffer_ptr =
*(union cvmx_buf_ptr *) cvmx_phys_to_ptr(buffer_ptr.s.addr - 8);
buffer_ptr = next_buffer_ptr;
number_buffers--;
}
while (number_buffers--) {
/*
* Remember the back pointer is in cache lines, not
* 64bit words
*/
start_of_buffer =
((buffer_ptr.s.addr >> 7) - buffer_ptr.s.back) << 7;
/*
* Read pointer to next buffer before we free the
* current buffer.
*/
next_buffer_ptr =
*(union cvmx_buf_ptr *) cvmx_phys_to_ptr(buffer_ptr.s.addr - 8);
cvmx_fpa_free(cvmx_phys_to_ptr(start_of_buffer),
buffer_ptr.s.pool, 0);
buffer_ptr = next_buffer_ptr;
}
}
/**
* Returns the interface number for an IPD/PKO port number.
*
* @ipd_port: IPD/PKO port number
*
* Returns Interface number
*/
extern int cvmx_helper_get_interface_num(int ipd_port);
/**
* Returns the interface index number for an IPD/PKO port
* number.
*
* @ipd_port: IPD/PKO port number
*
* Returns Interface index number
*/
extern int cvmx_helper_get_interface_index_num(int ipd_port);
#endif /* __CVMX_HELPER_H__ */

View file

@ -0,0 +1,104 @@
/***********************license start***************
* Author: Cavium Networks
*
* Contact: support@caviumnetworks.com
* This file is part of the OCTEON SDK
*
* Copyright (c) 2003-2008 Cavium Networks
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
* published by the Free Software Foundation.
*
* This file is distributed in the hope that it will be useful, but
* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
* NONINFRINGEMENT. See the GNU General Public License for more
* details.
*
* You should have received a copy of the GNU General Public License
* along with this file; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
* or visit http://www.gnu.org/licenses/.
*
* This file may also be available under a different license from Cavium.
* Contact Cavium Networks for more information
***********************license end**************************************/
/**
* @file
*
* Functions for XAUI initialization, configuration,
* and monitoring.
*
*/
#ifndef __CVMX_HELPER_XAUI_H__
#define __CVMX_HELPER_XAUI_H__
/**
* Probe a XAUI interface and determine the number of ports
* connected to it. The XAUI interface should still be down
* after this call.
*
* @interface: Interface to probe
*
* Returns Number of ports on the interface. Zero to disable.
*/
extern int __cvmx_helper_xaui_probe(int interface);
extern int __cvmx_helper_xaui_enumerate(int interface);
/**
* Bringup and enable a XAUI interface. After this call packet
* I/O should be fully functional. This is called with IPD
* enabled but PKO disabled.
*
* @interface: Interface to bring up
*
* Returns Zero on success, negative on failure
*/
extern int __cvmx_helper_xaui_enable(int interface);
/**
* Return the link state of an IPD/PKO port as returned by
* auto negotiation. The result of this function may not match
* Octeon's link config if auto negotiation has changed since
* the last call to cvmx_helper_link_set().
*
* @ipd_port: IPD/PKO port to query
*
* Returns Link state
*/
extern cvmx_helper_link_info_t __cvmx_helper_xaui_link_get(int ipd_port);
/**
* Configure an IPD/PKO port for the specified link state. This
* function does not influence auto negotiation at the PHY level.
* The passed link state must always match the link state returned
* by cvmx_helper_link_get(). It is normally best to use
* cvmx_helper_link_autoconf() instead.
*
* @ipd_port: IPD/PKO port to configure
* @link_info: The new link state
*
* Returns Zero on success, negative on failure
*/
extern int __cvmx_helper_xaui_link_set(int ipd_port,
cvmx_helper_link_info_t link_info);
/**
* Configure a port for internal and/or external loopback. Internal loopback
* causes packets sent by the port to be received by Octeon. External loopback
* causes packets received from the wire to sent out again.
*
* @ipd_port: IPD/PKO port to loopback.
* @enable_internal:
* Non zero if you want internal loopback
* @enable_external:
* Non zero if you want external loopback
*
* Returns Zero on success, negative on failure.
*/
extern int __cvmx_helper_xaui_configure_loopback(int ipd_port,
int enable_internal,
int enable_external);
#endif

View file

@ -0,0 +1,226 @@
/***********************license start***************
* Author: Cavium Networks
*
* Contact: support@caviumnetworks.com
* This file is part of the OCTEON SDK
*
* Copyright (c) 2003-2008 Cavium Networks
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
* published by the Free Software Foundation.
*
* This file is distributed in the hope that it will be useful, but
* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
* NONINFRINGEMENT. See the GNU General Public License for more
* details.
*
* You should have received a copy of the GNU General Public License
* along with this file; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
* or visit http://www.gnu.org/licenses/.
*
* This file may also be available under a different license from Cavium.
* Contact Cavium Networks for more information
***********************license end**************************************/
/*
*
* Helper functions for common, but complicated tasks.
*
*/
#ifndef __CVMX_HELPER_H__
#define __CVMX_HELPER_H__
#include <asm/octeon/cvmx-config.h>
#include <asm/octeon/cvmx-fpa.h>
#include <asm/octeon/cvmx-wqe.h>
typedef enum {
CVMX_HELPER_INTERFACE_MODE_DISABLED,
CVMX_HELPER_INTERFACE_MODE_RGMII,
CVMX_HELPER_INTERFACE_MODE_GMII,
CVMX_HELPER_INTERFACE_MODE_SPI,
CVMX_HELPER_INTERFACE_MODE_PCIE,
CVMX_HELPER_INTERFACE_MODE_XAUI,
CVMX_HELPER_INTERFACE_MODE_SGMII,
CVMX_HELPER_INTERFACE_MODE_PICMG,
CVMX_HELPER_INTERFACE_MODE_NPI,
CVMX_HELPER_INTERFACE_MODE_LOOP,
} cvmx_helper_interface_mode_t;
typedef union {
uint64_t u64;
struct {
uint64_t reserved_20_63:44;
uint64_t link_up:1; /**< Is the physical link up? */
uint64_t full_duplex:1; /**< 1 if the link is full duplex */
uint64_t speed:18; /**< Speed of the link in Mbps */
} s;
} cvmx_helper_link_info_t;
#include <asm/octeon/cvmx-helper-errata.h>
#include <asm/octeon/cvmx-helper-loop.h>
#include <asm/octeon/cvmx-helper-npi.h>
#include <asm/octeon/cvmx-helper-rgmii.h>
#include <asm/octeon/cvmx-helper-sgmii.h>
#include <asm/octeon/cvmx-helper-spi.h>
#include <asm/octeon/cvmx-helper-util.h>
#include <asm/octeon/cvmx-helper-xaui.h>
/**
* cvmx_override_pko_queue_priority(int ipd_port, uint64_t
* priorities[16]) is a function pointer. It is meant to allow
* customization of the PKO queue priorities based on the port
* number. Users should set this pointer to a function before
* calling any cvmx-helper operations.
*/
extern void (*cvmx_override_pko_queue_priority) (int pko_port,
uint64_t priorities[16]);
/**
* cvmx_override_ipd_port_setup(int ipd_port) is a function
* pointer. It is meant to allow customization of the IPD port
* setup before packet input/output comes online. It is called
* after cvmx-helper does the default IPD configuration, but
* before IPD is enabled. Users should set this pointer to a
* function before calling any cvmx-helper operations.
*/
extern void (*cvmx_override_ipd_port_setup) (int ipd_port);
/**
* This function enables the IPD and also enables the packet interfaces.
* The packet interfaces (RGMII and SPI) must be enabled after the
* IPD. This should be called by the user program after any additional
* IPD configuration changes are made if CVMX_HELPER_ENABLE_IPD
* is not set in the executive-config.h file.
*
* Returns 0 on success
* -1 on failure
*/
extern int cvmx_helper_ipd_and_packet_input_enable(void);
/**
* Initialize the PIP, IPD, and PKO hardware to support
* simple priority based queues for the ethernet ports. Each
* port is configured with a number of priority queues based
* on CVMX_PKO_QUEUES_PER_PORT_* where each queue is lower
* priority than the previous.
*
* Returns Zero on success, non-zero on failure
*/
extern int cvmx_helper_initialize_packet_io_global(void);
/**
* Does core local initialization for packet io
*
* Returns Zero on success, non-zero on failure
*/
extern int cvmx_helper_initialize_packet_io_local(void);
/**
* Returns the number of ports on the given interface.
* The interface must be initialized before the port count
* can be returned.
*
* @interface: Which interface to return port count for.
*
* Returns Port count for interface
* -1 for uninitialized interface
*/
extern int cvmx_helper_ports_on_interface(int interface);
/**
* Return the number of interfaces the chip has. Each interface
* may have multiple ports. Most chips support two interfaces,
* but the CNX0XX and CNX1XX are exceptions. These only support
* one interface.
*
* Returns Number of interfaces on chip
*/
extern int cvmx_helper_get_number_of_interfaces(void);
/**
* Get the operating mode of an interface. Depending on the Octeon
* chip and configuration, this function returns an enumeration
* of the type of packet I/O supported by an interface.
*
* @interface: Interface to probe
*
* Returns Mode of the interface. Unknown or unsupported interfaces return
* DISABLED.
*/
extern cvmx_helper_interface_mode_t cvmx_helper_interface_get_mode(int
interface);
/**
* Auto configure an IPD/PKO port link state and speed. This
* function basically does the equivalent of:
* cvmx_helper_link_set(ipd_port, cvmx_helper_link_get(ipd_port));
*
* @ipd_port: IPD/PKO port to auto configure
*
* Returns Link state after configure
*/
extern cvmx_helper_link_info_t cvmx_helper_link_autoconf(int ipd_port);
/**
* Return the link state of an IPD/PKO port as returned by
* auto negotiation. The result of this function may not match
* Octeon's link config if auto negotiation has changed since
* the last call to cvmx_helper_link_set().
*
* @ipd_port: IPD/PKO port to query
*
* Returns Link state
*/
extern cvmx_helper_link_info_t cvmx_helper_link_get(int ipd_port);
/**
* Configure an IPD/PKO port for the specified link state. This
* function does not influence auto negotiation at the PHY level.
* The passed link state must always match the link state returned
* by cvmx_helper_link_get(). It is normally best to use
* cvmx_helper_link_autoconf() instead.
*
* @ipd_port: IPD/PKO port to configure
* @link_info: The new link state
*
* Returns Zero on success, negative on failure
*/
extern int cvmx_helper_link_set(int ipd_port,
cvmx_helper_link_info_t link_info);
/**
* This function probes an interface to determine the actual
* number of hardware ports connected to it. It doesn't setup the
* ports or enable them. The main goal here is to set the global
* interface_port_count[interface] correctly. Hardware setup of the
* ports will be performed later.
*
* @interface: Interface to probe
*
* Returns Zero on success, negative on failure
*/
extern int cvmx_helper_interface_probe(int interface);
extern int cvmx_helper_interface_enumerate(int interface);
/**
* Configure a port for internal and/or external loopback. Internal loopback
* causes packets sent by the port to be received by Octeon. External loopback
* causes packets received from the wire to sent out again.
*
* @ipd_port: IPD/PKO port to loopback.
* @enable_internal:
* Non zero if you want internal loopback
* @enable_external:
* Non zero if you want external loopback
*
* Returns Zero on success, negative on failure.
*/
extern int cvmx_helper_configure_loopback(int ipd_port, int enable_internal,
int enable_external);
#endif /* __CVMX_HELPER_H__ */

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,338 @@
/***********************license start***************
* Author: Cavium Networks
*
* Contact: support@caviumnetworks.com
* This file is part of the OCTEON SDK
*
* Copyright (c) 2003-2008 Cavium Networks
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
* published by the Free Software Foundation.
*
* This file is distributed in the hope that it will be useful, but
* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
* NONINFRINGEMENT. See the GNU General Public License for more
* details.
*
* You should have received a copy of the GNU General Public License
* along with this file; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
* or visit http://www.gnu.org/licenses/.
*
* This file may also be available under a different license from Cavium.
* Contact Cavium Networks for more information
***********************license end**************************************/
/**
*
* Interface to the hardware Input Packet Data unit.
*/
#ifndef __CVMX_IPD_H__
#define __CVMX_IPD_H__
#include <asm/octeon/octeon-feature.h>
#include <asm/octeon/cvmx-ipd-defs.h>
enum cvmx_ipd_mode {
CVMX_IPD_OPC_MODE_STT = 0LL, /* All blocks DRAM, not cached in L2 */
CVMX_IPD_OPC_MODE_STF = 1LL, /* All bloccks into L2 */
CVMX_IPD_OPC_MODE_STF1_STT = 2LL, /* 1st block L2, rest DRAM */
CVMX_IPD_OPC_MODE_STF2_STT = 3LL /* 1st, 2nd blocks L2, rest DRAM */
};
#ifndef CVMX_ENABLE_LEN_M8_FIX
#define CVMX_ENABLE_LEN_M8_FIX 0
#endif
/* CSR typedefs have been moved to cvmx-csr-*.h */
typedef union cvmx_ipd_1st_mbuff_skip cvmx_ipd_mbuff_first_skip_t;
typedef union cvmx_ipd_1st_next_ptr_back cvmx_ipd_first_next_ptr_back_t;
typedef cvmx_ipd_mbuff_first_skip_t cvmx_ipd_mbuff_not_first_skip_t;
typedef cvmx_ipd_first_next_ptr_back_t cvmx_ipd_second_next_ptr_back_t;
/**
* Configure IPD
*
* @mbuff_size: Packets buffer size in 8 byte words
* @first_mbuff_skip:
* Number of 8 byte words to skip in the first buffer
* @not_first_mbuff_skip:
* Number of 8 byte words to skip in each following buffer
* @first_back: Must be same as first_mbuff_skip / 128
* @second_back:
* Must be same as not_first_mbuff_skip / 128
* @wqe_fpa_pool:
* FPA pool to get work entries from
* @cache_mode:
* @back_pres_enable_flag:
* Enable or disable port back pressure
*/
static inline void cvmx_ipd_config(uint64_t mbuff_size,
uint64_t first_mbuff_skip,
uint64_t not_first_mbuff_skip,
uint64_t first_back,
uint64_t second_back,
uint64_t wqe_fpa_pool,
enum cvmx_ipd_mode cache_mode,
uint64_t back_pres_enable_flag)
{
cvmx_ipd_mbuff_first_skip_t first_skip;
cvmx_ipd_mbuff_not_first_skip_t not_first_skip;
union cvmx_ipd_packet_mbuff_size size;
cvmx_ipd_first_next_ptr_back_t first_back_struct;
cvmx_ipd_second_next_ptr_back_t second_back_struct;
union cvmx_ipd_wqe_fpa_queue wqe_pool;
union cvmx_ipd_ctl_status ipd_ctl_reg;
first_skip.u64 = 0;
first_skip.s.skip_sz = first_mbuff_skip;
cvmx_write_csr(CVMX_IPD_1ST_MBUFF_SKIP, first_skip.u64);
not_first_skip.u64 = 0;
not_first_skip.s.skip_sz = not_first_mbuff_skip;
cvmx_write_csr(CVMX_IPD_NOT_1ST_MBUFF_SKIP, not_first_skip.u64);
size.u64 = 0;
size.s.mb_size = mbuff_size;
cvmx_write_csr(CVMX_IPD_PACKET_MBUFF_SIZE, size.u64);
first_back_struct.u64 = 0;
first_back_struct.s.back = first_back;
cvmx_write_csr(CVMX_IPD_1st_NEXT_PTR_BACK, first_back_struct.u64);
second_back_struct.u64 = 0;
second_back_struct.s.back = second_back;
cvmx_write_csr(CVMX_IPD_2nd_NEXT_PTR_BACK, second_back_struct.u64);
wqe_pool.u64 = 0;
wqe_pool.s.wqe_pool = wqe_fpa_pool;
cvmx_write_csr(CVMX_IPD_WQE_FPA_QUEUE, wqe_pool.u64);
ipd_ctl_reg.u64 = cvmx_read_csr(CVMX_IPD_CTL_STATUS);
ipd_ctl_reg.s.opc_mode = cache_mode;
ipd_ctl_reg.s.pbp_en = back_pres_enable_flag;
cvmx_write_csr(CVMX_IPD_CTL_STATUS, ipd_ctl_reg.u64);
/* Note: the example RED code that used to be here has been moved to
cvmx_helper_setup_red */
}
/**
* Enable IPD
*/
static inline void cvmx_ipd_enable(void)
{
union cvmx_ipd_ctl_status ipd_reg;
ipd_reg.u64 = cvmx_read_csr(CVMX_IPD_CTL_STATUS);
if (ipd_reg.s.ipd_en) {
cvmx_dprintf
("Warning: Enabling IPD when IPD already enabled.\n");
}
ipd_reg.s.ipd_en = 1;
#if CVMX_ENABLE_LEN_M8_FIX
if (!OCTEON_IS_MODEL(OCTEON_CN38XX_PASS2))
ipd_reg.s.len_m8 = TRUE;
#endif
cvmx_write_csr(CVMX_IPD_CTL_STATUS, ipd_reg.u64);
}
/**
* Disable IPD
*/
static inline void cvmx_ipd_disable(void)
{
union cvmx_ipd_ctl_status ipd_reg;
ipd_reg.u64 = cvmx_read_csr(CVMX_IPD_CTL_STATUS);
ipd_reg.s.ipd_en = 0;
cvmx_write_csr(CVMX_IPD_CTL_STATUS, ipd_reg.u64);
}
/**
* Supportive function for cvmx_fpa_shutdown_pool.
*/
static inline void cvmx_ipd_free_ptr(void)
{
/* Only CN38XXp{1,2} cannot read pointer out of the IPD */
if (!OCTEON_IS_MODEL(OCTEON_CN38XX_PASS1)
&& !OCTEON_IS_MODEL(OCTEON_CN38XX_PASS2)) {
int no_wptr = 0;
union cvmx_ipd_ptr_count ipd_ptr_count;
ipd_ptr_count.u64 = cvmx_read_csr(CVMX_IPD_PTR_COUNT);
/* Handle Work Queue Entry in cn56xx and cn52xx */
if (octeon_has_feature(OCTEON_FEATURE_NO_WPTR)) {
union cvmx_ipd_ctl_status ipd_ctl_status;
ipd_ctl_status.u64 = cvmx_read_csr(CVMX_IPD_CTL_STATUS);
if (ipd_ctl_status.s.no_wptr)
no_wptr = 1;
}
/* Free the prefetched WQE */
if (ipd_ptr_count.s.wqev_cnt) {
union cvmx_ipd_wqe_ptr_valid ipd_wqe_ptr_valid;
ipd_wqe_ptr_valid.u64 =
cvmx_read_csr(CVMX_IPD_WQE_PTR_VALID);
if (no_wptr)
cvmx_fpa_free(cvmx_phys_to_ptr
((uint64_t) ipd_wqe_ptr_valid.s.
ptr << 7), CVMX_FPA_PACKET_POOL,
0);
else
cvmx_fpa_free(cvmx_phys_to_ptr
((uint64_t) ipd_wqe_ptr_valid.s.
ptr << 7), CVMX_FPA_WQE_POOL, 0);
}
/* Free all WQE in the fifo */
if (ipd_ptr_count.s.wqe_pcnt) {
int i;
union cvmx_ipd_pwp_ptr_fifo_ctl ipd_pwp_ptr_fifo_ctl;
ipd_pwp_ptr_fifo_ctl.u64 =
cvmx_read_csr(CVMX_IPD_PWP_PTR_FIFO_CTL);
for (i = 0; i < ipd_ptr_count.s.wqe_pcnt; i++) {
ipd_pwp_ptr_fifo_ctl.s.cena = 0;
ipd_pwp_ptr_fifo_ctl.s.raddr =
ipd_pwp_ptr_fifo_ctl.s.max_cnts +
(ipd_pwp_ptr_fifo_ctl.s.wraddr +
i) % ipd_pwp_ptr_fifo_ctl.s.max_cnts;
cvmx_write_csr(CVMX_IPD_PWP_PTR_FIFO_CTL,
ipd_pwp_ptr_fifo_ctl.u64);
ipd_pwp_ptr_fifo_ctl.u64 =
cvmx_read_csr(CVMX_IPD_PWP_PTR_FIFO_CTL);
if (no_wptr)
cvmx_fpa_free(cvmx_phys_to_ptr
((uint64_t)
ipd_pwp_ptr_fifo_ctl.s.
ptr << 7),
CVMX_FPA_PACKET_POOL, 0);
else
cvmx_fpa_free(cvmx_phys_to_ptr
((uint64_t)
ipd_pwp_ptr_fifo_ctl.s.
ptr << 7),
CVMX_FPA_WQE_POOL, 0);
}
ipd_pwp_ptr_fifo_ctl.s.cena = 1;
cvmx_write_csr(CVMX_IPD_PWP_PTR_FIFO_CTL,
ipd_pwp_ptr_fifo_ctl.u64);
}
/* Free the prefetched packet */
if (ipd_ptr_count.s.pktv_cnt) {
union cvmx_ipd_pkt_ptr_valid ipd_pkt_ptr_valid;
ipd_pkt_ptr_valid.u64 =
cvmx_read_csr(CVMX_IPD_PKT_PTR_VALID);
cvmx_fpa_free(cvmx_phys_to_ptr
(ipd_pkt_ptr_valid.s.ptr << 7),
CVMX_FPA_PACKET_POOL, 0);
}
/* Free the per port prefetched packets */
if (1) {
int i;
union cvmx_ipd_prc_port_ptr_fifo_ctl
ipd_prc_port_ptr_fifo_ctl;
ipd_prc_port_ptr_fifo_ctl.u64 =
cvmx_read_csr(CVMX_IPD_PRC_PORT_PTR_FIFO_CTL);
for (i = 0; i < ipd_prc_port_ptr_fifo_ctl.s.max_pkt;
i++) {
ipd_prc_port_ptr_fifo_ctl.s.cena = 0;
ipd_prc_port_ptr_fifo_ctl.s.raddr =
i % ipd_prc_port_ptr_fifo_ctl.s.max_pkt;
cvmx_write_csr(CVMX_IPD_PRC_PORT_PTR_FIFO_CTL,
ipd_prc_port_ptr_fifo_ctl.u64);
ipd_prc_port_ptr_fifo_ctl.u64 =
cvmx_read_csr
(CVMX_IPD_PRC_PORT_PTR_FIFO_CTL);
cvmx_fpa_free(cvmx_phys_to_ptr
((uint64_t)
ipd_prc_port_ptr_fifo_ctl.s.
ptr << 7), CVMX_FPA_PACKET_POOL,
0);
}
ipd_prc_port_ptr_fifo_ctl.s.cena = 1;
cvmx_write_csr(CVMX_IPD_PRC_PORT_PTR_FIFO_CTL,
ipd_prc_port_ptr_fifo_ctl.u64);
}
/* Free all packets in the holding fifo */
if (ipd_ptr_count.s.pfif_cnt) {
int i;
union cvmx_ipd_prc_hold_ptr_fifo_ctl
ipd_prc_hold_ptr_fifo_ctl;
ipd_prc_hold_ptr_fifo_ctl.u64 =
cvmx_read_csr(CVMX_IPD_PRC_HOLD_PTR_FIFO_CTL);
for (i = 0; i < ipd_ptr_count.s.pfif_cnt; i++) {
ipd_prc_hold_ptr_fifo_ctl.s.cena = 0;
ipd_prc_hold_ptr_fifo_ctl.s.raddr =
(ipd_prc_hold_ptr_fifo_ctl.s.praddr +
i) % ipd_prc_hold_ptr_fifo_ctl.s.max_pkt;
cvmx_write_csr(CVMX_IPD_PRC_HOLD_PTR_FIFO_CTL,
ipd_prc_hold_ptr_fifo_ctl.u64);
ipd_prc_hold_ptr_fifo_ctl.u64 =
cvmx_read_csr
(CVMX_IPD_PRC_HOLD_PTR_FIFO_CTL);
cvmx_fpa_free(cvmx_phys_to_ptr
((uint64_t)
ipd_prc_hold_ptr_fifo_ctl.s.
ptr << 7), CVMX_FPA_PACKET_POOL,
0);
}
ipd_prc_hold_ptr_fifo_ctl.s.cena = 1;
cvmx_write_csr(CVMX_IPD_PRC_HOLD_PTR_FIFO_CTL,
ipd_prc_hold_ptr_fifo_ctl.u64);
}
/* Free all packets in the fifo */
if (ipd_ptr_count.s.pkt_pcnt) {
int i;
union cvmx_ipd_pwp_ptr_fifo_ctl ipd_pwp_ptr_fifo_ctl;
ipd_pwp_ptr_fifo_ctl.u64 =
cvmx_read_csr(CVMX_IPD_PWP_PTR_FIFO_CTL);
for (i = 0; i < ipd_ptr_count.s.pkt_pcnt; i++) {
ipd_pwp_ptr_fifo_ctl.s.cena = 0;
ipd_pwp_ptr_fifo_ctl.s.raddr =
(ipd_pwp_ptr_fifo_ctl.s.praddr +
i) % ipd_pwp_ptr_fifo_ctl.s.max_cnts;
cvmx_write_csr(CVMX_IPD_PWP_PTR_FIFO_CTL,
ipd_pwp_ptr_fifo_ctl.u64);
ipd_pwp_ptr_fifo_ctl.u64 =
cvmx_read_csr(CVMX_IPD_PWP_PTR_FIFO_CTL);
cvmx_fpa_free(cvmx_phys_to_ptr
((uint64_t) ipd_pwp_ptr_fifo_ctl.
s.ptr << 7),
CVMX_FPA_PACKET_POOL, 0);
}
ipd_pwp_ptr_fifo_ctl.s.cena = 1;
cvmx_write_csr(CVMX_IPD_PWP_PTR_FIFO_CTL,
ipd_pwp_ptr_fifo_ctl.u64);
}
/* Reset the IPD to get all buffers out of it */
{
union cvmx_ipd_ctl_status ipd_ctl_status;
ipd_ctl_status.u64 = cvmx_read_csr(CVMX_IPD_CTL_STATUS);
ipd_ctl_status.s.reset = 1;
cvmx_write_csr(CVMX_IPD_CTL_STATUS, ipd_ctl_status.u64);
}
/* Reset the PIP */
{
union cvmx_pip_sft_rst pip_sft_rst;
pip_sft_rst.u64 = cvmx_read_csr(CVMX_PIP_SFT_RST);
pip_sft_rst.s.rst = 1;
cvmx_write_csr(CVMX_PIP_SFT_RST, pip_sft_rst.u64);
}
}
}
#endif /* __CVMX_IPD_H__ */

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,360 @@
/***********************license start***************
* Author: Cavium Networks
*
* Contact: support@caviumnetworks.com
* This file is part of the OCTEON SDK
*
* Copyright (c) 2003-2010 Cavium Networks
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
* published by the Free Software Foundation.
*
* This file is distributed in the hope that it will be useful, but
* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
* NONINFRINGEMENT. See the GNU General Public License for more
* details.
*
* You should have received a copy of the GNU General Public License
* along with this file; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
* or visit http://www.gnu.org/licenses/.
*
* This file may also be available under a different license from Cavium.
* Contact Cavium Networks for more information
***********************license end**************************************/
/*
* Interface to the Level 2 Cache (L2C) control, measurement, and debugging
* facilities.
*/
#ifndef __CVMX_L2C_H__
#define __CVMX_L2C_H__
#define CVMX_L2_ASSOC cvmx_l2c_get_num_assoc() /* Deprecated macro, use function */
#define CVMX_L2_SET_BITS cvmx_l2c_get_set_bits() /* Deprecated macro, use function */
#define CVMX_L2_SETS cvmx_l2c_get_num_sets() /* Deprecated macro, use function */
#define CVMX_L2C_IDX_ADDR_SHIFT 7 /* based on 128 byte cache line size */
#define CVMX_L2C_IDX_MASK (cvmx_l2c_get_num_sets() - 1)
/* Defines for index aliasing computations */
#define CVMX_L2C_TAG_ADDR_ALIAS_SHIFT (CVMX_L2C_IDX_ADDR_SHIFT + cvmx_l2c_get_set_bits())
#define CVMX_L2C_ALIAS_MASK (CVMX_L2C_IDX_MASK << CVMX_L2C_TAG_ADDR_ALIAS_SHIFT)
#define CVMX_L2C_MEMBANK_SELECT_SIZE 4096
/* Defines for Virtualizations, valid only from Octeon II onwards. */
#define CVMX_L2C_VRT_MAX_VIRTID_ALLOWED ((OCTEON_IS_MODEL(OCTEON_CN63XX)) ? 64 : 0)
#define CVMX_L2C_VRT_MAX_MEMSZ_ALLOWED ((OCTEON_IS_MODEL(OCTEON_CN63XX)) ? 32 : 0)
union cvmx_l2c_tag {
uint64_t u64;
struct {
uint64_t reserved:28;
uint64_t V:1; /* Line valid */
uint64_t D:1; /* Line dirty */
uint64_t L:1; /* Line locked */
uint64_t U:1; /* Use, LRU eviction */
uint64_t addr:32; /* Phys mem (not all bits valid) */
} s;
};
/* Number of L2C Tag-and-data sections (TADs) that are connected to LMC. */
#define CVMX_L2C_TADS 1
/* L2C Performance Counter events. */
enum cvmx_l2c_event {
CVMX_L2C_EVENT_CYCLES = 0,
CVMX_L2C_EVENT_INSTRUCTION_MISS = 1,
CVMX_L2C_EVENT_INSTRUCTION_HIT = 2,
CVMX_L2C_EVENT_DATA_MISS = 3,
CVMX_L2C_EVENT_DATA_HIT = 4,
CVMX_L2C_EVENT_MISS = 5,
CVMX_L2C_EVENT_HIT = 6,
CVMX_L2C_EVENT_VICTIM_HIT = 7,
CVMX_L2C_EVENT_INDEX_CONFLICT = 8,
CVMX_L2C_EVENT_TAG_PROBE = 9,
CVMX_L2C_EVENT_TAG_UPDATE = 10,
CVMX_L2C_EVENT_TAG_COMPLETE = 11,
CVMX_L2C_EVENT_TAG_DIRTY = 12,
CVMX_L2C_EVENT_DATA_STORE_NOP = 13,
CVMX_L2C_EVENT_DATA_STORE_READ = 14,
CVMX_L2C_EVENT_DATA_STORE_WRITE = 15,
CVMX_L2C_EVENT_FILL_DATA_VALID = 16,
CVMX_L2C_EVENT_WRITE_REQUEST = 17,
CVMX_L2C_EVENT_READ_REQUEST = 18,
CVMX_L2C_EVENT_WRITE_DATA_VALID = 19,
CVMX_L2C_EVENT_XMC_NOP = 20,
CVMX_L2C_EVENT_XMC_LDT = 21,
CVMX_L2C_EVENT_XMC_LDI = 22,
CVMX_L2C_EVENT_XMC_LDD = 23,
CVMX_L2C_EVENT_XMC_STF = 24,
CVMX_L2C_EVENT_XMC_STT = 25,
CVMX_L2C_EVENT_XMC_STP = 26,
CVMX_L2C_EVENT_XMC_STC = 27,
CVMX_L2C_EVENT_XMC_DWB = 28,
CVMX_L2C_EVENT_XMC_PL2 = 29,
CVMX_L2C_EVENT_XMC_PSL1 = 30,
CVMX_L2C_EVENT_XMC_IOBLD = 31,
CVMX_L2C_EVENT_XMC_IOBST = 32,
CVMX_L2C_EVENT_XMC_IOBDMA = 33,
CVMX_L2C_EVENT_XMC_IOBRSP = 34,
CVMX_L2C_EVENT_XMC_BUS_VALID = 35,
CVMX_L2C_EVENT_XMC_MEM_DATA = 36,
CVMX_L2C_EVENT_XMC_REFL_DATA = 37,
CVMX_L2C_EVENT_XMC_IOBRSP_DATA = 38,
CVMX_L2C_EVENT_RSC_NOP = 39,
CVMX_L2C_EVENT_RSC_STDN = 40,
CVMX_L2C_EVENT_RSC_FILL = 41,
CVMX_L2C_EVENT_RSC_REFL = 42,
CVMX_L2C_EVENT_RSC_STIN = 43,
CVMX_L2C_EVENT_RSC_SCIN = 44,
CVMX_L2C_EVENT_RSC_SCFL = 45,
CVMX_L2C_EVENT_RSC_SCDN = 46,
CVMX_L2C_EVENT_RSC_DATA_VALID = 47,
CVMX_L2C_EVENT_RSC_VALID_FILL = 48,
CVMX_L2C_EVENT_RSC_VALID_STRSP = 49,
CVMX_L2C_EVENT_RSC_VALID_REFL = 50,
CVMX_L2C_EVENT_LRF_REQ = 51,
CVMX_L2C_EVENT_DT_RD_ALLOC = 52,
CVMX_L2C_EVENT_DT_WR_INVAL = 53,
CVMX_L2C_EVENT_MAX
};
/* L2C Performance Counter events for Octeon2. */
enum cvmx_l2c_tad_event {
CVMX_L2C_TAD_EVENT_NONE = 0,
CVMX_L2C_TAD_EVENT_TAG_HIT = 1,
CVMX_L2C_TAD_EVENT_TAG_MISS = 2,
CVMX_L2C_TAD_EVENT_TAG_NOALLOC = 3,
CVMX_L2C_TAD_EVENT_TAG_VICTIM = 4,
CVMX_L2C_TAD_EVENT_SC_FAIL = 5,
CVMX_L2C_TAD_EVENT_SC_PASS = 6,
CVMX_L2C_TAD_EVENT_LFB_VALID = 7,
CVMX_L2C_TAD_EVENT_LFB_WAIT_LFB = 8,
CVMX_L2C_TAD_EVENT_LFB_WAIT_VAB = 9,
CVMX_L2C_TAD_EVENT_QUAD0_INDEX = 128,
CVMX_L2C_TAD_EVENT_QUAD0_READ = 129,
CVMX_L2C_TAD_EVENT_QUAD0_BANK = 130,
CVMX_L2C_TAD_EVENT_QUAD0_WDAT = 131,
CVMX_L2C_TAD_EVENT_QUAD1_INDEX = 144,
CVMX_L2C_TAD_EVENT_QUAD1_READ = 145,
CVMX_L2C_TAD_EVENT_QUAD1_BANK = 146,
CVMX_L2C_TAD_EVENT_QUAD1_WDAT = 147,
CVMX_L2C_TAD_EVENT_QUAD2_INDEX = 160,
CVMX_L2C_TAD_EVENT_QUAD2_READ = 161,
CVMX_L2C_TAD_EVENT_QUAD2_BANK = 162,
CVMX_L2C_TAD_EVENT_QUAD2_WDAT = 163,
CVMX_L2C_TAD_EVENT_QUAD3_INDEX = 176,
CVMX_L2C_TAD_EVENT_QUAD3_READ = 177,
CVMX_L2C_TAD_EVENT_QUAD3_BANK = 178,
CVMX_L2C_TAD_EVENT_QUAD3_WDAT = 179,
CVMX_L2C_TAD_EVENT_MAX
};
/**
* Configure one of the four L2 Cache performance counters to capture event
* occurrences.
*
* @counter: The counter to configure. Range 0..3.
* @event: The type of L2 Cache event occurrence to count.
* @clear_on_read: When asserted, any read of the performance counter
* clears the counter.
*
* @note The routine does not clear the counter.
*/
void cvmx_l2c_config_perf(uint32_t counter, enum cvmx_l2c_event event, uint32_t clear_on_read);
/**
* Read the given L2 Cache performance counter. The counter must be configured
* before reading, but this routine does not enforce this requirement.
*
* @counter: The counter to configure. Range 0..3.
*
* Returns The current counter value.
*/
uint64_t cvmx_l2c_read_perf(uint32_t counter);
/**
* Return the L2 Cache way partitioning for a given core.
*
* @core: The core processor of interest.
*
* Returns The mask specifying the partitioning. 0 bits in mask indicates
* the cache 'ways' that a core can evict from.
* -1 on error
*/
int cvmx_l2c_get_core_way_partition(uint32_t core);
/**
* Partitions the L2 cache for a core
*
* @core: The core that the partitioning applies to.
* @mask: The partitioning of the ways expressed as a binary
* mask. A 0 bit allows the core to evict cache lines from
* a way, while a 1 bit blocks the core from evicting any
* lines from that way. There must be at least one allowed
* way (0 bit) in the mask.
*
* @note If any ways are blocked for all cores and the HW blocks, then
* those ways will never have any cache lines evicted from them.
* All cores and the hardware blocks are free to read from all
* ways regardless of the partitioning.
*/
int cvmx_l2c_set_core_way_partition(uint32_t core, uint32_t mask);
/**
* Return the L2 Cache way partitioning for the hw blocks.
*
* Returns The mask specifying the reserved way. 0 bits in mask indicates
* the cache 'ways' that a core can evict from.
* -1 on error
*/
int cvmx_l2c_get_hw_way_partition(void);
/**
* Partitions the L2 cache for the hardware blocks.
*
* @mask: The partitioning of the ways expressed as a binary
* mask. A 0 bit allows the core to evict cache lines from
* a way, while a 1 bit blocks the core from evicting any
* lines from that way. There must be at least one allowed
* way (0 bit) in the mask.
*
* @note If any ways are blocked for all cores and the HW blocks, then
* those ways will never have any cache lines evicted from them.
* All cores and the hardware blocks are free to read from all
* ways regardless of the partitioning.
*/
int cvmx_l2c_set_hw_way_partition(uint32_t mask);
/**
* Locks a line in the L2 cache at the specified physical address
*
* @addr: physical address of line to lock
*
* Returns 0 on success,
* 1 if line not locked.
*/
int cvmx_l2c_lock_line(uint64_t addr);
/**
* Locks a specified memory region in the L2 cache.
*
* Note that if not all lines can be locked, that means that all
* but one of the ways (associations) available to the locking
* core are locked. Having only 1 association available for
* normal caching may have a significant adverse affect on performance.
* Care should be taken to ensure that enough of the L2 cache is left
* unlocked to allow for normal caching of DRAM.
*
* @start: Physical address of the start of the region to lock
* @len: Length (in bytes) of region to lock
*
* Returns Number of requested lines that where not locked.
* 0 on success (all locked)
*/
int cvmx_l2c_lock_mem_region(uint64_t start, uint64_t len);
/**
* Unlock and flush a cache line from the L2 cache.
* IMPORTANT: Must only be run by one core at a time due to use
* of L2C debug features.
* Note that this function will flush a matching but unlocked cache line.
* (If address is not in L2, no lines are flushed.)
*
* @address: Physical address to unlock
*
* Returns 0: line not unlocked
* 1: line unlocked
*/
int cvmx_l2c_unlock_line(uint64_t address);
/**
* Unlocks a region of memory that is locked in the L2 cache
*
* @start: start physical address
* @len: length (in bytes) to unlock
*
* Returns Number of locked lines that the call unlocked
*/
int cvmx_l2c_unlock_mem_region(uint64_t start, uint64_t len);
/**
* Read the L2 controller tag for a given location in L2
*
* @association:
* Which association to read line from
* @index: Which way to read from.
*
* Returns l2c tag structure for line requested.
*/
union cvmx_l2c_tag cvmx_l2c_get_tag(uint32_t association, uint32_t index);
/* Wrapper providing a deprecated old function name */
static inline union cvmx_l2c_tag cvmx_get_l2c_tag(uint32_t association, uint32_t index) __attribute__((deprecated));
static inline union cvmx_l2c_tag cvmx_get_l2c_tag(uint32_t association, uint32_t index)
{
return cvmx_l2c_get_tag(association, index);
}
/**
* Returns the cache index for a given physical address
*
* @addr: physical address
*
* Returns L2 cache index
*/
uint32_t cvmx_l2c_address_to_index(uint64_t addr);
/**
* Flushes (and unlocks) the entire L2 cache.
* IMPORTANT: Must only be run by one core at a time due to use
* of L2C debug features.
*/
void cvmx_l2c_flush(void);
/**
*
* Returns Returns the size of the L2 cache in bytes,
* -1 on error (unrecognized model)
*/
int cvmx_l2c_get_cache_size_bytes(void);
/**
* Return the number of sets in the L2 Cache
*
* Returns
*/
int cvmx_l2c_get_num_sets(void);
/**
* Return log base 2 of the number of sets in the L2 cache
* Returns
*/
int cvmx_l2c_get_set_bits(void);
/**
* Return the number of associations in the L2 Cache
*
* Returns
*/
int cvmx_l2c_get_num_assoc(void);
/**
* Flush a line from the L2 cache
* This should only be called from one core at a time, as this routine
* sets the core to the 'debug' core in order to flush the line.
*
* @assoc: Association (or way) to flush
* @index: Index to flush
*/
void cvmx_l2c_flush_line(uint32_t assoc, uint32_t index);
#endif /* __CVMX_L2C_H__ */

View file

@ -0,0 +1,526 @@
/***********************license start***************
* Author: Cavium Networks
*
* Contact: support@caviumnetworks.com
* This file is part of the OCTEON SDK
*
* Copyright (c) 2003-2012 Cavium Networks
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
* published by the Free Software Foundation.
*
* This file is distributed in the hope that it will be useful, but
* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
* NONINFRINGEMENT. See the GNU General Public License for more
* details.
*
* You should have received a copy of the GNU General Public License
* along with this file; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
* or visit http://www.gnu.org/licenses/.
*
* This file may also be available under a different license from Cavium.
* Contact Cavium Networks for more information
***********************license end**************************************/
#ifndef __CVMX_L2D_DEFS_H__
#define __CVMX_L2D_DEFS_H__
#define CVMX_L2D_BST0 (CVMX_ADD_IO_SEG(0x0001180080000780ull))
#define CVMX_L2D_BST1 (CVMX_ADD_IO_SEG(0x0001180080000788ull))
#define CVMX_L2D_BST2 (CVMX_ADD_IO_SEG(0x0001180080000790ull))
#define CVMX_L2D_BST3 (CVMX_ADD_IO_SEG(0x0001180080000798ull))
#define CVMX_L2D_ERR (CVMX_ADD_IO_SEG(0x0001180080000010ull))
#define CVMX_L2D_FADR (CVMX_ADD_IO_SEG(0x0001180080000018ull))
#define CVMX_L2D_FSYN0 (CVMX_ADD_IO_SEG(0x0001180080000020ull))
#define CVMX_L2D_FSYN1 (CVMX_ADD_IO_SEG(0x0001180080000028ull))
#define CVMX_L2D_FUS0 (CVMX_ADD_IO_SEG(0x00011800800007A0ull))
#define CVMX_L2D_FUS1 (CVMX_ADD_IO_SEG(0x00011800800007A8ull))
#define CVMX_L2D_FUS2 (CVMX_ADD_IO_SEG(0x00011800800007B0ull))
#define CVMX_L2D_FUS3 (CVMX_ADD_IO_SEG(0x00011800800007B8ull))
union cvmx_l2d_bst0 {
uint64_t u64;
struct cvmx_l2d_bst0_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_35_63:29;
uint64_t ftl:1;
uint64_t q0stat:34;
#else
uint64_t q0stat:34;
uint64_t ftl:1;
uint64_t reserved_35_63:29;
#endif
} s;
struct cvmx_l2d_bst0_s cn30xx;
struct cvmx_l2d_bst0_s cn31xx;
struct cvmx_l2d_bst0_s cn38xx;
struct cvmx_l2d_bst0_s cn38xxp2;
struct cvmx_l2d_bst0_s cn50xx;
struct cvmx_l2d_bst0_s cn52xx;
struct cvmx_l2d_bst0_s cn52xxp1;
struct cvmx_l2d_bst0_s cn56xx;
struct cvmx_l2d_bst0_s cn56xxp1;
struct cvmx_l2d_bst0_s cn58xx;
struct cvmx_l2d_bst0_s cn58xxp1;
};
union cvmx_l2d_bst1 {
uint64_t u64;
struct cvmx_l2d_bst1_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_34_63:30;
uint64_t q1stat:34;
#else
uint64_t q1stat:34;
uint64_t reserved_34_63:30;
#endif
} s;
struct cvmx_l2d_bst1_s cn30xx;
struct cvmx_l2d_bst1_s cn31xx;
struct cvmx_l2d_bst1_s cn38xx;
struct cvmx_l2d_bst1_s cn38xxp2;
struct cvmx_l2d_bst1_s cn50xx;
struct cvmx_l2d_bst1_s cn52xx;
struct cvmx_l2d_bst1_s cn52xxp1;
struct cvmx_l2d_bst1_s cn56xx;
struct cvmx_l2d_bst1_s cn56xxp1;
struct cvmx_l2d_bst1_s cn58xx;
struct cvmx_l2d_bst1_s cn58xxp1;
};
union cvmx_l2d_bst2 {
uint64_t u64;
struct cvmx_l2d_bst2_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_34_63:30;
uint64_t q2stat:34;
#else
uint64_t q2stat:34;
uint64_t reserved_34_63:30;
#endif
} s;
struct cvmx_l2d_bst2_s cn30xx;
struct cvmx_l2d_bst2_s cn31xx;
struct cvmx_l2d_bst2_s cn38xx;
struct cvmx_l2d_bst2_s cn38xxp2;
struct cvmx_l2d_bst2_s cn50xx;
struct cvmx_l2d_bst2_s cn52xx;
struct cvmx_l2d_bst2_s cn52xxp1;
struct cvmx_l2d_bst2_s cn56xx;
struct cvmx_l2d_bst2_s cn56xxp1;
struct cvmx_l2d_bst2_s cn58xx;
struct cvmx_l2d_bst2_s cn58xxp1;
};
union cvmx_l2d_bst3 {
uint64_t u64;
struct cvmx_l2d_bst3_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_34_63:30;
uint64_t q3stat:34;
#else
uint64_t q3stat:34;
uint64_t reserved_34_63:30;
#endif
} s;
struct cvmx_l2d_bst3_s cn30xx;
struct cvmx_l2d_bst3_s cn31xx;
struct cvmx_l2d_bst3_s cn38xx;
struct cvmx_l2d_bst3_s cn38xxp2;
struct cvmx_l2d_bst3_s cn50xx;
struct cvmx_l2d_bst3_s cn52xx;
struct cvmx_l2d_bst3_s cn52xxp1;
struct cvmx_l2d_bst3_s cn56xx;
struct cvmx_l2d_bst3_s cn56xxp1;
struct cvmx_l2d_bst3_s cn58xx;
struct cvmx_l2d_bst3_s cn58xxp1;
};
union cvmx_l2d_err {
uint64_t u64;
struct cvmx_l2d_err_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_6_63:58;
uint64_t bmhclsel:1;
uint64_t ded_err:1;
uint64_t sec_err:1;
uint64_t ded_intena:1;
uint64_t sec_intena:1;
uint64_t ecc_ena:1;
#else
uint64_t ecc_ena:1;
uint64_t sec_intena:1;
uint64_t ded_intena:1;
uint64_t sec_err:1;
uint64_t ded_err:1;
uint64_t bmhclsel:1;
uint64_t reserved_6_63:58;
#endif
} s;
struct cvmx_l2d_err_s cn30xx;
struct cvmx_l2d_err_s cn31xx;
struct cvmx_l2d_err_s cn38xx;
struct cvmx_l2d_err_s cn38xxp2;
struct cvmx_l2d_err_s cn50xx;
struct cvmx_l2d_err_s cn52xx;
struct cvmx_l2d_err_s cn52xxp1;
struct cvmx_l2d_err_s cn56xx;
struct cvmx_l2d_err_s cn56xxp1;
struct cvmx_l2d_err_s cn58xx;
struct cvmx_l2d_err_s cn58xxp1;
};
union cvmx_l2d_fadr {
uint64_t u64;
struct cvmx_l2d_fadr_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_19_63:45;
uint64_t fadru:1;
uint64_t fowmsk:4;
uint64_t fset:3;
uint64_t fadr:11;
#else
uint64_t fadr:11;
uint64_t fset:3;
uint64_t fowmsk:4;
uint64_t fadru:1;
uint64_t reserved_19_63:45;
#endif
} s;
struct cvmx_l2d_fadr_cn30xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_18_63:46;
uint64_t fowmsk:4;
uint64_t reserved_13_13:1;
uint64_t fset:2;
uint64_t reserved_9_10:2;
uint64_t fadr:9;
#else
uint64_t fadr:9;
uint64_t reserved_9_10:2;
uint64_t fset:2;
uint64_t reserved_13_13:1;
uint64_t fowmsk:4;
uint64_t reserved_18_63:46;
#endif
} cn30xx;
struct cvmx_l2d_fadr_cn31xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_18_63:46;
uint64_t fowmsk:4;
uint64_t reserved_13_13:1;
uint64_t fset:2;
uint64_t reserved_10_10:1;
uint64_t fadr:10;
#else
uint64_t fadr:10;
uint64_t reserved_10_10:1;
uint64_t fset:2;
uint64_t reserved_13_13:1;
uint64_t fowmsk:4;
uint64_t reserved_18_63:46;
#endif
} cn31xx;
struct cvmx_l2d_fadr_cn38xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_18_63:46;
uint64_t fowmsk:4;
uint64_t fset:3;
uint64_t fadr:11;
#else
uint64_t fadr:11;
uint64_t fset:3;
uint64_t fowmsk:4;
uint64_t reserved_18_63:46;
#endif
} cn38xx;
struct cvmx_l2d_fadr_cn38xx cn38xxp2;
struct cvmx_l2d_fadr_cn50xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_18_63:46;
uint64_t fowmsk:4;
uint64_t fset:3;
uint64_t reserved_8_10:3;
uint64_t fadr:8;
#else
uint64_t fadr:8;
uint64_t reserved_8_10:3;
uint64_t fset:3;
uint64_t fowmsk:4;
uint64_t reserved_18_63:46;
#endif
} cn50xx;
struct cvmx_l2d_fadr_cn52xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_18_63:46;
uint64_t fowmsk:4;
uint64_t fset:3;
uint64_t reserved_10_10:1;
uint64_t fadr:10;
#else
uint64_t fadr:10;
uint64_t reserved_10_10:1;
uint64_t fset:3;
uint64_t fowmsk:4;
uint64_t reserved_18_63:46;
#endif
} cn52xx;
struct cvmx_l2d_fadr_cn52xx cn52xxp1;
struct cvmx_l2d_fadr_s cn56xx;
struct cvmx_l2d_fadr_s cn56xxp1;
struct cvmx_l2d_fadr_s cn58xx;
struct cvmx_l2d_fadr_s cn58xxp1;
};
union cvmx_l2d_fsyn0 {
uint64_t u64;
struct cvmx_l2d_fsyn0_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_20_63:44;
uint64_t fsyn_ow1:10;
uint64_t fsyn_ow0:10;
#else
uint64_t fsyn_ow0:10;
uint64_t fsyn_ow1:10;
uint64_t reserved_20_63:44;
#endif
} s;
struct cvmx_l2d_fsyn0_s cn30xx;
struct cvmx_l2d_fsyn0_s cn31xx;
struct cvmx_l2d_fsyn0_s cn38xx;
struct cvmx_l2d_fsyn0_s cn38xxp2;
struct cvmx_l2d_fsyn0_s cn50xx;
struct cvmx_l2d_fsyn0_s cn52xx;
struct cvmx_l2d_fsyn0_s cn52xxp1;
struct cvmx_l2d_fsyn0_s cn56xx;
struct cvmx_l2d_fsyn0_s cn56xxp1;
struct cvmx_l2d_fsyn0_s cn58xx;
struct cvmx_l2d_fsyn0_s cn58xxp1;
};
union cvmx_l2d_fsyn1 {
uint64_t u64;
struct cvmx_l2d_fsyn1_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_20_63:44;
uint64_t fsyn_ow3:10;
uint64_t fsyn_ow2:10;
#else
uint64_t fsyn_ow2:10;
uint64_t fsyn_ow3:10;
uint64_t reserved_20_63:44;
#endif
} s;
struct cvmx_l2d_fsyn1_s cn30xx;
struct cvmx_l2d_fsyn1_s cn31xx;
struct cvmx_l2d_fsyn1_s cn38xx;
struct cvmx_l2d_fsyn1_s cn38xxp2;
struct cvmx_l2d_fsyn1_s cn50xx;
struct cvmx_l2d_fsyn1_s cn52xx;
struct cvmx_l2d_fsyn1_s cn52xxp1;
struct cvmx_l2d_fsyn1_s cn56xx;
struct cvmx_l2d_fsyn1_s cn56xxp1;
struct cvmx_l2d_fsyn1_s cn58xx;
struct cvmx_l2d_fsyn1_s cn58xxp1;
};
union cvmx_l2d_fus0 {
uint64_t u64;
struct cvmx_l2d_fus0_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_34_63:30;
uint64_t q0fus:34;
#else
uint64_t q0fus:34;
uint64_t reserved_34_63:30;
#endif
} s;
struct cvmx_l2d_fus0_s cn30xx;
struct cvmx_l2d_fus0_s cn31xx;
struct cvmx_l2d_fus0_s cn38xx;
struct cvmx_l2d_fus0_s cn38xxp2;
struct cvmx_l2d_fus0_s cn50xx;
struct cvmx_l2d_fus0_s cn52xx;
struct cvmx_l2d_fus0_s cn52xxp1;
struct cvmx_l2d_fus0_s cn56xx;
struct cvmx_l2d_fus0_s cn56xxp1;
struct cvmx_l2d_fus0_s cn58xx;
struct cvmx_l2d_fus0_s cn58xxp1;
};
union cvmx_l2d_fus1 {
uint64_t u64;
struct cvmx_l2d_fus1_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_34_63:30;
uint64_t q1fus:34;
#else
uint64_t q1fus:34;
uint64_t reserved_34_63:30;
#endif
} s;
struct cvmx_l2d_fus1_s cn30xx;
struct cvmx_l2d_fus1_s cn31xx;
struct cvmx_l2d_fus1_s cn38xx;
struct cvmx_l2d_fus1_s cn38xxp2;
struct cvmx_l2d_fus1_s cn50xx;
struct cvmx_l2d_fus1_s cn52xx;
struct cvmx_l2d_fus1_s cn52xxp1;
struct cvmx_l2d_fus1_s cn56xx;
struct cvmx_l2d_fus1_s cn56xxp1;
struct cvmx_l2d_fus1_s cn58xx;
struct cvmx_l2d_fus1_s cn58xxp1;
};
union cvmx_l2d_fus2 {
uint64_t u64;
struct cvmx_l2d_fus2_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_34_63:30;
uint64_t q2fus:34;
#else
uint64_t q2fus:34;
uint64_t reserved_34_63:30;
#endif
} s;
struct cvmx_l2d_fus2_s cn30xx;
struct cvmx_l2d_fus2_s cn31xx;
struct cvmx_l2d_fus2_s cn38xx;
struct cvmx_l2d_fus2_s cn38xxp2;
struct cvmx_l2d_fus2_s cn50xx;
struct cvmx_l2d_fus2_s cn52xx;
struct cvmx_l2d_fus2_s cn52xxp1;
struct cvmx_l2d_fus2_s cn56xx;
struct cvmx_l2d_fus2_s cn56xxp1;
struct cvmx_l2d_fus2_s cn58xx;
struct cvmx_l2d_fus2_s cn58xxp1;
};
union cvmx_l2d_fus3 {
uint64_t u64;
struct cvmx_l2d_fus3_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_40_63:24;
uint64_t ema_ctl:3;
uint64_t reserved_34_36:3;
uint64_t q3fus:34;
#else
uint64_t q3fus:34;
uint64_t reserved_34_36:3;
uint64_t ema_ctl:3;
uint64_t reserved_40_63:24;
#endif
} s;
struct cvmx_l2d_fus3_cn30xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_35_63:29;
uint64_t crip_64k:1;
uint64_t q3fus:34;
#else
uint64_t q3fus:34;
uint64_t crip_64k:1;
uint64_t reserved_35_63:29;
#endif
} cn30xx;
struct cvmx_l2d_fus3_cn31xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_35_63:29;
uint64_t crip_128k:1;
uint64_t q3fus:34;
#else
uint64_t q3fus:34;
uint64_t crip_128k:1;
uint64_t reserved_35_63:29;
#endif
} cn31xx;
struct cvmx_l2d_fus3_cn38xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_36_63:28;
uint64_t crip_256k:1;
uint64_t crip_512k:1;
uint64_t q3fus:34;
#else
uint64_t q3fus:34;
uint64_t crip_512k:1;
uint64_t crip_256k:1;
uint64_t reserved_36_63:28;
#endif
} cn38xx;
struct cvmx_l2d_fus3_cn38xx cn38xxp2;
struct cvmx_l2d_fus3_cn50xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_40_63:24;
uint64_t ema_ctl:3;
uint64_t reserved_36_36:1;
uint64_t crip_32k:1;
uint64_t crip_64k:1;
uint64_t q3fus:34;
#else
uint64_t q3fus:34;
uint64_t crip_64k:1;
uint64_t crip_32k:1;
uint64_t reserved_36_36:1;
uint64_t ema_ctl:3;
uint64_t reserved_40_63:24;
#endif
} cn50xx;
struct cvmx_l2d_fus3_cn52xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_40_63:24;
uint64_t ema_ctl:3;
uint64_t reserved_36_36:1;
uint64_t crip_128k:1;
uint64_t crip_256k:1;
uint64_t q3fus:34;
#else
uint64_t q3fus:34;
uint64_t crip_256k:1;
uint64_t crip_128k:1;
uint64_t reserved_36_36:1;
uint64_t ema_ctl:3;
uint64_t reserved_40_63:24;
#endif
} cn52xx;
struct cvmx_l2d_fus3_cn52xx cn52xxp1;
struct cvmx_l2d_fus3_cn56xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_40_63:24;
uint64_t ema_ctl:3;
uint64_t reserved_36_36:1;
uint64_t crip_512k:1;
uint64_t crip_1024k:1;
uint64_t q3fus:34;
#else
uint64_t q3fus:34;
uint64_t crip_1024k:1;
uint64_t crip_512k:1;
uint64_t reserved_36_36:1;
uint64_t ema_ctl:3;
uint64_t reserved_40_63:24;
#endif
} cn56xx;
struct cvmx_l2d_fus3_cn56xx cn56xxp1;
struct cvmx_l2d_fus3_cn58xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_39_63:25;
uint64_t ema_ctl:2;
uint64_t reserved_36_36:1;
uint64_t crip_512k:1;
uint64_t crip_1024k:1;
uint64_t q3fus:34;
#else
uint64_t q3fus:34;
uint64_t crip_1024k:1;
uint64_t crip_512k:1;
uint64_t reserved_36_36:1;
uint64_t ema_ctl:2;
uint64_t reserved_39_63:25;
#endif
} cn58xx;
struct cvmx_l2d_fus3_cn58xx cn58xxp1;
};
#endif

View file

@ -0,0 +1,243 @@
/***********************license start***************
* Author: Cavium Networks
*
* Contact: support@caviumnetworks.com
* This file is part of the OCTEON SDK
*
* Copyright (c) 2003-2012 Cavium Networks
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
* published by the Free Software Foundation.
*
* This file is distributed in the hope that it will be useful, but
* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
* NONINFRINGEMENT. See the GNU General Public License for more
* details.
*
* You should have received a copy of the GNU General Public License
* along with this file; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
* or visit http://www.gnu.org/licenses/.
*
* This file may also be available under a different license from Cavium.
* Contact Cavium Networks for more information
***********************license end**************************************/
#ifndef __CVMX_L2T_DEFS_H__
#define __CVMX_L2T_DEFS_H__
#define CVMX_L2T_ERR (CVMX_ADD_IO_SEG(0x0001180080000008ull))
union cvmx_l2t_err {
uint64_t u64;
struct cvmx_l2t_err_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_29_63:35;
uint64_t fadru:1;
uint64_t lck_intena2:1;
uint64_t lckerr2:1;
uint64_t lck_intena:1;
uint64_t lckerr:1;
uint64_t fset:3;
uint64_t fadr:10;
uint64_t fsyn:6;
uint64_t ded_err:1;
uint64_t sec_err:1;
uint64_t ded_intena:1;
uint64_t sec_intena:1;
uint64_t ecc_ena:1;
#else
uint64_t ecc_ena:1;
uint64_t sec_intena:1;
uint64_t ded_intena:1;
uint64_t sec_err:1;
uint64_t ded_err:1;
uint64_t fsyn:6;
uint64_t fadr:10;
uint64_t fset:3;
uint64_t lckerr:1;
uint64_t lck_intena:1;
uint64_t lckerr2:1;
uint64_t lck_intena2:1;
uint64_t fadru:1;
uint64_t reserved_29_63:35;
#endif
} s;
struct cvmx_l2t_err_cn30xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_28_63:36;
uint64_t lck_intena2:1;
uint64_t lckerr2:1;
uint64_t lck_intena:1;
uint64_t lckerr:1;
uint64_t reserved_23_23:1;
uint64_t fset:2;
uint64_t reserved_19_20:2;
uint64_t fadr:8;
uint64_t fsyn:6;
uint64_t ded_err:1;
uint64_t sec_err:1;
uint64_t ded_intena:1;
uint64_t sec_intena:1;
uint64_t ecc_ena:1;
#else
uint64_t ecc_ena:1;
uint64_t sec_intena:1;
uint64_t ded_intena:1;
uint64_t sec_err:1;
uint64_t ded_err:1;
uint64_t fsyn:6;
uint64_t fadr:8;
uint64_t reserved_19_20:2;
uint64_t fset:2;
uint64_t reserved_23_23:1;
uint64_t lckerr:1;
uint64_t lck_intena:1;
uint64_t lckerr2:1;
uint64_t lck_intena2:1;
uint64_t reserved_28_63:36;
#endif
} cn30xx;
struct cvmx_l2t_err_cn31xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_28_63:36;
uint64_t lck_intena2:1;
uint64_t lckerr2:1;
uint64_t lck_intena:1;
uint64_t lckerr:1;
uint64_t reserved_23_23:1;
uint64_t fset:2;
uint64_t reserved_20_20:1;
uint64_t fadr:9;
uint64_t fsyn:6;
uint64_t ded_err:1;
uint64_t sec_err:1;
uint64_t ded_intena:1;
uint64_t sec_intena:1;
uint64_t ecc_ena:1;
#else
uint64_t ecc_ena:1;
uint64_t sec_intena:1;
uint64_t ded_intena:1;
uint64_t sec_err:1;
uint64_t ded_err:1;
uint64_t fsyn:6;
uint64_t fadr:9;
uint64_t reserved_20_20:1;
uint64_t fset:2;
uint64_t reserved_23_23:1;
uint64_t lckerr:1;
uint64_t lck_intena:1;
uint64_t lckerr2:1;
uint64_t lck_intena2:1;
uint64_t reserved_28_63:36;
#endif
} cn31xx;
struct cvmx_l2t_err_cn38xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_28_63:36;
uint64_t lck_intena2:1;
uint64_t lckerr2:1;
uint64_t lck_intena:1;
uint64_t lckerr:1;
uint64_t fset:3;
uint64_t fadr:10;
uint64_t fsyn:6;
uint64_t ded_err:1;
uint64_t sec_err:1;
uint64_t ded_intena:1;
uint64_t sec_intena:1;
uint64_t ecc_ena:1;
#else
uint64_t ecc_ena:1;
uint64_t sec_intena:1;
uint64_t ded_intena:1;
uint64_t sec_err:1;
uint64_t ded_err:1;
uint64_t fsyn:6;
uint64_t fadr:10;
uint64_t fset:3;
uint64_t lckerr:1;
uint64_t lck_intena:1;
uint64_t lckerr2:1;
uint64_t lck_intena2:1;
uint64_t reserved_28_63:36;
#endif
} cn38xx;
struct cvmx_l2t_err_cn38xx cn38xxp2;
struct cvmx_l2t_err_cn50xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_28_63:36;
uint64_t lck_intena2:1;
uint64_t lckerr2:1;
uint64_t lck_intena:1;
uint64_t lckerr:1;
uint64_t fset:3;
uint64_t reserved_18_20:3;
uint64_t fadr:7;
uint64_t fsyn:6;
uint64_t ded_err:1;
uint64_t sec_err:1;
uint64_t ded_intena:1;
uint64_t sec_intena:1;
uint64_t ecc_ena:1;
#else
uint64_t ecc_ena:1;
uint64_t sec_intena:1;
uint64_t ded_intena:1;
uint64_t sec_err:1;
uint64_t ded_err:1;
uint64_t fsyn:6;
uint64_t fadr:7;
uint64_t reserved_18_20:3;
uint64_t fset:3;
uint64_t lckerr:1;
uint64_t lck_intena:1;
uint64_t lckerr2:1;
uint64_t lck_intena2:1;
uint64_t reserved_28_63:36;
#endif
} cn50xx;
struct cvmx_l2t_err_cn52xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_28_63:36;
uint64_t lck_intena2:1;
uint64_t lckerr2:1;
uint64_t lck_intena:1;
uint64_t lckerr:1;
uint64_t fset:3;
uint64_t reserved_20_20:1;
uint64_t fadr:9;
uint64_t fsyn:6;
uint64_t ded_err:1;
uint64_t sec_err:1;
uint64_t ded_intena:1;
uint64_t sec_intena:1;
uint64_t ecc_ena:1;
#else
uint64_t ecc_ena:1;
uint64_t sec_intena:1;
uint64_t ded_intena:1;
uint64_t sec_err:1;
uint64_t ded_err:1;
uint64_t fsyn:6;
uint64_t fadr:9;
uint64_t reserved_20_20:1;
uint64_t fset:3;
uint64_t lckerr:1;
uint64_t lck_intena:1;
uint64_t lckerr2:1;
uint64_t lck_intena2:1;
uint64_t reserved_28_63:36;
#endif
} cn52xx;
struct cvmx_l2t_err_cn52xx cn52xxp1;
struct cvmx_l2t_err_s cn56xx;
struct cvmx_l2t_err_s cn56xxp1;
struct cvmx_l2t_err_s cn58xx;
struct cvmx_l2t_err_s cn58xxp1;
};
#endif

View file

@ -0,0 +1,292 @@
/***********************license start***************
* Author: Cavium Networks
*
* Contact: support@caviumnetworks.com
* This file is part of the OCTEON SDK
*
* Copyright (c) 2003-2012 Cavium Networks
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
* published by the Free Software Foundation.
*
* This file is distributed in the hope that it will be useful, but
* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
* NONINFRINGEMENT. See the GNU General Public License for more
* details.
*
* You should have received a copy of the GNU General Public License
* along with this file; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
* or visit http://www.gnu.org/licenses/.
*
* This file may also be available under a different license from Cavium.
* Contact Cavium Networks for more information
***********************license end**************************************/
#ifndef __CVMX_LED_DEFS_H__
#define __CVMX_LED_DEFS_H__
#define CVMX_LED_BLINK (CVMX_ADD_IO_SEG(0x0001180000001A48ull))
#define CVMX_LED_CLK_PHASE (CVMX_ADD_IO_SEG(0x0001180000001A08ull))
#define CVMX_LED_CYLON (CVMX_ADD_IO_SEG(0x0001180000001AF8ull))
#define CVMX_LED_DBG (CVMX_ADD_IO_SEG(0x0001180000001A18ull))
#define CVMX_LED_EN (CVMX_ADD_IO_SEG(0x0001180000001A00ull))
#define CVMX_LED_POLARITY (CVMX_ADD_IO_SEG(0x0001180000001A50ull))
#define CVMX_LED_PRT (CVMX_ADD_IO_SEG(0x0001180000001A10ull))
#define CVMX_LED_PRT_FMT (CVMX_ADD_IO_SEG(0x0001180000001A30ull))
#define CVMX_LED_PRT_STATUSX(offset) (CVMX_ADD_IO_SEG(0x0001180000001A80ull) + ((offset) & 7) * 8)
#define CVMX_LED_UDD_CNTX(offset) (CVMX_ADD_IO_SEG(0x0001180000001A20ull) + ((offset) & 1) * 8)
#define CVMX_LED_UDD_DATX(offset) (CVMX_ADD_IO_SEG(0x0001180000001A38ull) + ((offset) & 1) * 8)
#define CVMX_LED_UDD_DAT_CLRX(offset) (CVMX_ADD_IO_SEG(0x0001180000001AC8ull) + ((offset) & 1) * 16)
#define CVMX_LED_UDD_DAT_SETX(offset) (CVMX_ADD_IO_SEG(0x0001180000001AC0ull) + ((offset) & 1) * 16)
union cvmx_led_blink {
uint64_t u64;
struct cvmx_led_blink_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_8_63:56;
uint64_t rate:8;
#else
uint64_t rate:8;
uint64_t reserved_8_63:56;
#endif
} s;
struct cvmx_led_blink_s cn38xx;
struct cvmx_led_blink_s cn38xxp2;
struct cvmx_led_blink_s cn56xx;
struct cvmx_led_blink_s cn56xxp1;
struct cvmx_led_blink_s cn58xx;
struct cvmx_led_blink_s cn58xxp1;
};
union cvmx_led_clk_phase {
uint64_t u64;
struct cvmx_led_clk_phase_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_7_63:57;
uint64_t phase:7;
#else
uint64_t phase:7;
uint64_t reserved_7_63:57;
#endif
} s;
struct cvmx_led_clk_phase_s cn38xx;
struct cvmx_led_clk_phase_s cn38xxp2;
struct cvmx_led_clk_phase_s cn56xx;
struct cvmx_led_clk_phase_s cn56xxp1;
struct cvmx_led_clk_phase_s cn58xx;
struct cvmx_led_clk_phase_s cn58xxp1;
};
union cvmx_led_cylon {
uint64_t u64;
struct cvmx_led_cylon_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_16_63:48;
uint64_t rate:16;
#else
uint64_t rate:16;
uint64_t reserved_16_63:48;
#endif
} s;
struct cvmx_led_cylon_s cn38xx;
struct cvmx_led_cylon_s cn38xxp2;
struct cvmx_led_cylon_s cn56xx;
struct cvmx_led_cylon_s cn56xxp1;
struct cvmx_led_cylon_s cn58xx;
struct cvmx_led_cylon_s cn58xxp1;
};
union cvmx_led_dbg {
uint64_t u64;
struct cvmx_led_dbg_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_1_63:63;
uint64_t dbg_en:1;
#else
uint64_t dbg_en:1;
uint64_t reserved_1_63:63;
#endif
} s;
struct cvmx_led_dbg_s cn38xx;
struct cvmx_led_dbg_s cn38xxp2;
struct cvmx_led_dbg_s cn56xx;
struct cvmx_led_dbg_s cn56xxp1;
struct cvmx_led_dbg_s cn58xx;
struct cvmx_led_dbg_s cn58xxp1;
};
union cvmx_led_en {
uint64_t u64;
struct cvmx_led_en_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_1_63:63;
uint64_t en:1;
#else
uint64_t en:1;
uint64_t reserved_1_63:63;
#endif
} s;
struct cvmx_led_en_s cn38xx;
struct cvmx_led_en_s cn38xxp2;
struct cvmx_led_en_s cn56xx;
struct cvmx_led_en_s cn56xxp1;
struct cvmx_led_en_s cn58xx;
struct cvmx_led_en_s cn58xxp1;
};
union cvmx_led_polarity {
uint64_t u64;
struct cvmx_led_polarity_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_1_63:63;
uint64_t polarity:1;
#else
uint64_t polarity:1;
uint64_t reserved_1_63:63;
#endif
} s;
struct cvmx_led_polarity_s cn38xx;
struct cvmx_led_polarity_s cn38xxp2;
struct cvmx_led_polarity_s cn56xx;
struct cvmx_led_polarity_s cn56xxp1;
struct cvmx_led_polarity_s cn58xx;
struct cvmx_led_polarity_s cn58xxp1;
};
union cvmx_led_prt {
uint64_t u64;
struct cvmx_led_prt_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_8_63:56;
uint64_t prt_en:8;
#else
uint64_t prt_en:8;
uint64_t reserved_8_63:56;
#endif
} s;
struct cvmx_led_prt_s cn38xx;
struct cvmx_led_prt_s cn38xxp2;
struct cvmx_led_prt_s cn56xx;
struct cvmx_led_prt_s cn56xxp1;
struct cvmx_led_prt_s cn58xx;
struct cvmx_led_prt_s cn58xxp1;
};
union cvmx_led_prt_fmt {
uint64_t u64;
struct cvmx_led_prt_fmt_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_4_63:60;
uint64_t format:4;
#else
uint64_t format:4;
uint64_t reserved_4_63:60;
#endif
} s;
struct cvmx_led_prt_fmt_s cn38xx;
struct cvmx_led_prt_fmt_s cn38xxp2;
struct cvmx_led_prt_fmt_s cn56xx;
struct cvmx_led_prt_fmt_s cn56xxp1;
struct cvmx_led_prt_fmt_s cn58xx;
struct cvmx_led_prt_fmt_s cn58xxp1;
};
union cvmx_led_prt_statusx {
uint64_t u64;
struct cvmx_led_prt_statusx_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_6_63:58;
uint64_t status:6;
#else
uint64_t status:6;
uint64_t reserved_6_63:58;
#endif
} s;
struct cvmx_led_prt_statusx_s cn38xx;
struct cvmx_led_prt_statusx_s cn38xxp2;
struct cvmx_led_prt_statusx_s cn56xx;
struct cvmx_led_prt_statusx_s cn56xxp1;
struct cvmx_led_prt_statusx_s cn58xx;
struct cvmx_led_prt_statusx_s cn58xxp1;
};
union cvmx_led_udd_cntx {
uint64_t u64;
struct cvmx_led_udd_cntx_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_6_63:58;
uint64_t cnt:6;
#else
uint64_t cnt:6;
uint64_t reserved_6_63:58;
#endif
} s;
struct cvmx_led_udd_cntx_s cn38xx;
struct cvmx_led_udd_cntx_s cn38xxp2;
struct cvmx_led_udd_cntx_s cn56xx;
struct cvmx_led_udd_cntx_s cn56xxp1;
struct cvmx_led_udd_cntx_s cn58xx;
struct cvmx_led_udd_cntx_s cn58xxp1;
};
union cvmx_led_udd_datx {
uint64_t u64;
struct cvmx_led_udd_datx_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_32_63:32;
uint64_t dat:32;
#else
uint64_t dat:32;
uint64_t reserved_32_63:32;
#endif
} s;
struct cvmx_led_udd_datx_s cn38xx;
struct cvmx_led_udd_datx_s cn38xxp2;
struct cvmx_led_udd_datx_s cn56xx;
struct cvmx_led_udd_datx_s cn56xxp1;
struct cvmx_led_udd_datx_s cn58xx;
struct cvmx_led_udd_datx_s cn58xxp1;
};
union cvmx_led_udd_dat_clrx {
uint64_t u64;
struct cvmx_led_udd_dat_clrx_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_32_63:32;
uint64_t clr:32;
#else
uint64_t clr:32;
uint64_t reserved_32_63:32;
#endif
} s;
struct cvmx_led_udd_dat_clrx_s cn38xx;
struct cvmx_led_udd_dat_clrx_s cn38xxp2;
struct cvmx_led_udd_dat_clrx_s cn56xx;
struct cvmx_led_udd_dat_clrx_s cn56xxp1;
struct cvmx_led_udd_dat_clrx_s cn58xx;
struct cvmx_led_udd_dat_clrx_s cn58xxp1;
};
union cvmx_led_udd_dat_setx {
uint64_t u64;
struct cvmx_led_udd_dat_setx_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_32_63:32;
uint64_t set:32;
#else
uint64_t set:32;
uint64_t reserved_32_63:32;
#endif
} s;
struct cvmx_led_udd_dat_setx_s cn38xx;
struct cvmx_led_udd_dat_setx_s cn38xxp2;
struct cvmx_led_udd_dat_setx_s cn56xx;
struct cvmx_led_udd_dat_setx_s cn56xxp1;
struct cvmx_led_udd_dat_setx_s cn58xx;
struct cvmx_led_udd_dat_setx_s cn58xxp1;
};
#endif

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,506 @@
/***********************license start***************
* Author: Cavium Networks
*
* Contact: support@caviumnetworks.com
* This file is part of the OCTEON SDK
*
* Copyright (c) 2003-2008 Cavium Networks
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
* published by the Free Software Foundation.
*
* This file is distributed in the hope that it will be useful, but
* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
* NONINFRINGEMENT. See the GNU General Public License for more
* details.
*
* You should have received a copy of the GNU General Public License
* along with this file; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
* or visit http://www.gnu.org/licenses/.
*
* This file may also be available under a different license from Cavium.
* Contact Cavium Networks for more information
***********************license end**************************************/
/*
*
* Interface to the SMI/MDIO hardware, including support for both IEEE 802.3
* clause 22 and clause 45 operations.
*
*/
#ifndef __CVMX_MIO_H__
#define __CVMX_MIO_H__
#include <asm/octeon/cvmx-smix-defs.h>
/**
* PHY register 0 from the 802.3 spec
*/
#define CVMX_MDIO_PHY_REG_CONTROL 0
typedef union {
uint16_t u16;
struct {
uint16_t reset:1;
uint16_t loopback:1;
uint16_t speed_lsb:1;
uint16_t autoneg_enable:1;
uint16_t power_down:1;
uint16_t isolate:1;
uint16_t restart_autoneg:1;
uint16_t duplex:1;
uint16_t collision_test:1;
uint16_t speed_msb:1;
uint16_t unidirectional_enable:1;
uint16_t reserved_0_4:5;
} s;
} cvmx_mdio_phy_reg_control_t;
/**
* PHY register 1 from the 802.3 spec
*/
#define CVMX_MDIO_PHY_REG_STATUS 1
typedef union {
uint16_t u16;
struct {
uint16_t capable_100base_t4:1;
uint16_t capable_100base_x_full:1;
uint16_t capable_100base_x_half:1;
uint16_t capable_10_full:1;
uint16_t capable_10_half:1;
uint16_t capable_100base_t2_full:1;
uint16_t capable_100base_t2_half:1;
uint16_t capable_extended_status:1;
uint16_t capable_unidirectional:1;
uint16_t capable_mf_preamble_suppression:1;
uint16_t autoneg_complete:1;
uint16_t remote_fault:1;
uint16_t capable_autoneg:1;
uint16_t link_status:1;
uint16_t jabber_detect:1;
uint16_t capable_extended_registers:1;
} s;
} cvmx_mdio_phy_reg_status_t;
/**
* PHY register 2 from the 802.3 spec
*/
#define CVMX_MDIO_PHY_REG_ID1 2
typedef union {
uint16_t u16;
struct {
uint16_t oui_bits_3_18;
} s;
} cvmx_mdio_phy_reg_id1_t;
/**
* PHY register 3 from the 802.3 spec
*/
#define CVMX_MDIO_PHY_REG_ID2 3
typedef union {
uint16_t u16;
struct {
uint16_t oui_bits_19_24:6;
uint16_t model:6;
uint16_t revision:4;
} s;
} cvmx_mdio_phy_reg_id2_t;
/**
* PHY register 4 from the 802.3 spec
*/
#define CVMX_MDIO_PHY_REG_AUTONEG_ADVER 4
typedef union {
uint16_t u16;
struct {
uint16_t next_page:1;
uint16_t reserved_14:1;
uint16_t remote_fault:1;
uint16_t reserved_12:1;
uint16_t asymmetric_pause:1;
uint16_t pause:1;
uint16_t advert_100base_t4:1;
uint16_t advert_100base_tx_full:1;
uint16_t advert_100base_tx_half:1;
uint16_t advert_10base_tx_full:1;
uint16_t advert_10base_tx_half:1;
uint16_t selector:5;
} s;
} cvmx_mdio_phy_reg_autoneg_adver_t;
/**
* PHY register 5 from the 802.3 spec
*/
#define CVMX_MDIO_PHY_REG_LINK_PARTNER_ABILITY 5
typedef union {
uint16_t u16;
struct {
uint16_t next_page:1;
uint16_t ack:1;
uint16_t remote_fault:1;
uint16_t reserved_12:1;
uint16_t asymmetric_pause:1;
uint16_t pause:1;
uint16_t advert_100base_t4:1;
uint16_t advert_100base_tx_full:1;
uint16_t advert_100base_tx_half:1;
uint16_t advert_10base_tx_full:1;
uint16_t advert_10base_tx_half:1;
uint16_t selector:5;
} s;
} cvmx_mdio_phy_reg_link_partner_ability_t;
/**
* PHY register 6 from the 802.3 spec
*/
#define CVMX_MDIO_PHY_REG_AUTONEG_EXPANSION 6
typedef union {
uint16_t u16;
struct {
uint16_t reserved_5_15:11;
uint16_t parallel_detection_fault:1;
uint16_t link_partner_next_page_capable:1;
uint16_t local_next_page_capable:1;
uint16_t page_received:1;
uint16_t link_partner_autoneg_capable:1;
} s;
} cvmx_mdio_phy_reg_autoneg_expansion_t;
/**
* PHY register 9 from the 802.3 spec
*/
#define CVMX_MDIO_PHY_REG_CONTROL_1000 9
typedef union {
uint16_t u16;
struct {
uint16_t test_mode:3;
uint16_t manual_master_slave:1;
uint16_t master:1;
uint16_t port_type:1;
uint16_t advert_1000base_t_full:1;
uint16_t advert_1000base_t_half:1;
uint16_t reserved_0_7:8;
} s;
} cvmx_mdio_phy_reg_control_1000_t;
/**
* PHY register 10 from the 802.3 spec
*/
#define CVMX_MDIO_PHY_REG_STATUS_1000 10
typedef union {
uint16_t u16;
struct {
uint16_t master_slave_fault:1;
uint16_t is_master:1;
uint16_t local_receiver_ok:1;
uint16_t remote_receiver_ok:1;
uint16_t remote_capable_1000base_t_full:1;
uint16_t remote_capable_1000base_t_half:1;
uint16_t reserved_8_9:2;
uint16_t idle_error_count:8;
} s;
} cvmx_mdio_phy_reg_status_1000_t;
/**
* PHY register 15 from the 802.3 spec
*/
#define CVMX_MDIO_PHY_REG_EXTENDED_STATUS 15
typedef union {
uint16_t u16;
struct {
uint16_t capable_1000base_x_full:1;
uint16_t capable_1000base_x_half:1;
uint16_t capable_1000base_t_full:1;
uint16_t capable_1000base_t_half:1;
uint16_t reserved_0_11:12;
} s;
} cvmx_mdio_phy_reg_extended_status_t;
/**
* PHY register 13 from the 802.3 spec
*/
#define CVMX_MDIO_PHY_REG_MMD_CONTROL 13
typedef union {
uint16_t u16;
struct {
uint16_t function:2;
uint16_t reserved_5_13:9;
uint16_t devad:5;
} s;
} cvmx_mdio_phy_reg_mmd_control_t;
/**
* PHY register 14 from the 802.3 spec
*/
#define CVMX_MDIO_PHY_REG_MMD_ADDRESS_DATA 14
typedef union {
uint16_t u16;
struct {
uint16_t address_data:16;
} s;
} cvmx_mdio_phy_reg_mmd_address_data_t;
/* Operating request encodings. */
#define MDIO_CLAUSE_22_WRITE 0
#define MDIO_CLAUSE_22_READ 1
#define MDIO_CLAUSE_45_ADDRESS 0
#define MDIO_CLAUSE_45_WRITE 1
#define MDIO_CLAUSE_45_READ_INC 2
#define MDIO_CLAUSE_45_READ 3
/* MMD identifiers, mostly for accessing devices within XENPAK modules. */
#define CVMX_MMD_DEVICE_PMA_PMD 1
#define CVMX_MMD_DEVICE_WIS 2
#define CVMX_MMD_DEVICE_PCS 3
#define CVMX_MMD_DEVICE_PHY_XS 4
#define CVMX_MMD_DEVICE_DTS_XS 5
#define CVMX_MMD_DEVICE_TC 6
#define CVMX_MMD_DEVICE_CL22_EXT 29
#define CVMX_MMD_DEVICE_VENDOR_1 30
#define CVMX_MMD_DEVICE_VENDOR_2 31
/* Helper function to put MDIO interface into clause 45 mode */
static inline void __cvmx_mdio_set_clause45_mode(int bus_id)
{
union cvmx_smix_clk smi_clk;
/* Put bus into clause 45 mode */
smi_clk.u64 = cvmx_read_csr(CVMX_SMIX_CLK(bus_id));
smi_clk.s.mode = 1;
smi_clk.s.preamble = 1;
cvmx_write_csr(CVMX_SMIX_CLK(bus_id), smi_clk.u64);
}
/* Helper function to put MDIO interface into clause 22 mode */
static inline void __cvmx_mdio_set_clause22_mode(int bus_id)
{
union cvmx_smix_clk smi_clk;
/* Put bus into clause 22 mode */
smi_clk.u64 = cvmx_read_csr(CVMX_SMIX_CLK(bus_id));
smi_clk.s.mode = 0;
cvmx_write_csr(CVMX_SMIX_CLK(bus_id), smi_clk.u64);
}
/**
* Perform an MII read. This function is used to read PHY
* registers controlling auto negotiation.
*
* @bus_id: MDIO bus number. Zero on most chips, but some chips (ex CN56XX)
* support multiple busses.
* @phy_id: The MII phy id
* @location: Register location to read
*
* Returns Result from the read or -1 on failure
*/
static inline int cvmx_mdio_read(int bus_id, int phy_id, int location)
{
union cvmx_smix_cmd smi_cmd;
union cvmx_smix_rd_dat smi_rd;
int timeout = 1000;
if (octeon_has_feature(OCTEON_FEATURE_MDIO_CLAUSE_45))
__cvmx_mdio_set_clause22_mode(bus_id);
smi_cmd.u64 = 0;
smi_cmd.s.phy_op = MDIO_CLAUSE_22_READ;
smi_cmd.s.phy_adr = phy_id;
smi_cmd.s.reg_adr = location;
cvmx_write_csr(CVMX_SMIX_CMD(bus_id), smi_cmd.u64);
do {
cvmx_wait(1000);
smi_rd.u64 = cvmx_read_csr(CVMX_SMIX_RD_DAT(bus_id));
} while (smi_rd.s.pending && timeout--);
if (smi_rd.s.val)
return smi_rd.s.dat;
else
return -1;
}
/**
* Perform an MII write. This function is used to write PHY
* registers controlling auto negotiation.
*
* @bus_id: MDIO bus number. Zero on most chips, but some chips (ex CN56XX)
* support multiple busses.
* @phy_id: The MII phy id
* @location: Register location to write
* @val: Value to write
*
* Returns -1 on error
* 0 on success
*/
static inline int cvmx_mdio_write(int bus_id, int phy_id, int location, int val)
{
union cvmx_smix_cmd smi_cmd;
union cvmx_smix_wr_dat smi_wr;
int timeout = 1000;
if (octeon_has_feature(OCTEON_FEATURE_MDIO_CLAUSE_45))
__cvmx_mdio_set_clause22_mode(bus_id);
smi_wr.u64 = 0;
smi_wr.s.dat = val;
cvmx_write_csr(CVMX_SMIX_WR_DAT(bus_id), smi_wr.u64);
smi_cmd.u64 = 0;
smi_cmd.s.phy_op = MDIO_CLAUSE_22_WRITE;
smi_cmd.s.phy_adr = phy_id;
smi_cmd.s.reg_adr = location;
cvmx_write_csr(CVMX_SMIX_CMD(bus_id), smi_cmd.u64);
do {
cvmx_wait(1000);
smi_wr.u64 = cvmx_read_csr(CVMX_SMIX_WR_DAT(bus_id));
} while (smi_wr.s.pending && --timeout);
if (timeout <= 0)
return -1;
return 0;
}
/**
* Perform an IEEE 802.3 clause 45 MII read. This function is used to
* read PHY registers controlling auto negotiation.
*
* @bus_id: MDIO bus number. Zero on most chips, but some chips (ex CN56XX)
* support multiple busses.
* @phy_id: The MII phy id
* @device: MDIO Managable Device (MMD) id
* @location: Register location to read
*
* Returns Result from the read or -1 on failure
*/
static inline int cvmx_mdio_45_read(int bus_id, int phy_id, int device,
int location)
{
union cvmx_smix_cmd smi_cmd;
union cvmx_smix_rd_dat smi_rd;
union cvmx_smix_wr_dat smi_wr;
int timeout = 1000;
if (!octeon_has_feature(OCTEON_FEATURE_MDIO_CLAUSE_45))
return -1;
__cvmx_mdio_set_clause45_mode(bus_id);
smi_wr.u64 = 0;
smi_wr.s.dat = location;
cvmx_write_csr(CVMX_SMIX_WR_DAT(bus_id), smi_wr.u64);
smi_cmd.u64 = 0;
smi_cmd.s.phy_op = MDIO_CLAUSE_45_ADDRESS;
smi_cmd.s.phy_adr = phy_id;
smi_cmd.s.reg_adr = device;
cvmx_write_csr(CVMX_SMIX_CMD(bus_id), smi_cmd.u64);
do {
cvmx_wait(1000);
smi_wr.u64 = cvmx_read_csr(CVMX_SMIX_WR_DAT(bus_id));
} while (smi_wr.s.pending && --timeout);
if (timeout <= 0) {
cvmx_dprintf("cvmx_mdio_45_read: bus_id %d phy_id %2d "
"device %2d register %2d TIME OUT(address)\n",
bus_id, phy_id, device, location);
return -1;
}
smi_cmd.u64 = 0;
smi_cmd.s.phy_op = MDIO_CLAUSE_45_READ;
smi_cmd.s.phy_adr = phy_id;
smi_cmd.s.reg_adr = device;
cvmx_write_csr(CVMX_SMIX_CMD(bus_id), smi_cmd.u64);
do {
cvmx_wait(1000);
smi_rd.u64 = cvmx_read_csr(CVMX_SMIX_RD_DAT(bus_id));
} while (smi_rd.s.pending && --timeout);
if (timeout <= 0) {
cvmx_dprintf("cvmx_mdio_45_read: bus_id %d phy_id %2d "
"device %2d register %2d TIME OUT(data)\n",
bus_id, phy_id, device, location);
return -1;
}
if (smi_rd.s.val)
return smi_rd.s.dat;
else {
cvmx_dprintf("cvmx_mdio_45_read: bus_id %d phy_id %2d "
"device %2d register %2d INVALID READ\n",
bus_id, phy_id, device, location);
return -1;
}
}
/**
* Perform an IEEE 802.3 clause 45 MII write. This function is used to
* write PHY registers controlling auto negotiation.
*
* @bus_id: MDIO bus number. Zero on most chips, but some chips (ex CN56XX)
* support multiple busses.
* @phy_id: The MII phy id
* @device: MDIO Managable Device (MMD) id
* @location: Register location to write
* @val: Value to write
*
* Returns -1 on error
* 0 on success
*/
static inline int cvmx_mdio_45_write(int bus_id, int phy_id, int device,
int location, int val)
{
union cvmx_smix_cmd smi_cmd;
union cvmx_smix_wr_dat smi_wr;
int timeout = 1000;
if (!octeon_has_feature(OCTEON_FEATURE_MDIO_CLAUSE_45))
return -1;
__cvmx_mdio_set_clause45_mode(bus_id);
smi_wr.u64 = 0;
smi_wr.s.dat = location;
cvmx_write_csr(CVMX_SMIX_WR_DAT(bus_id), smi_wr.u64);
smi_cmd.u64 = 0;
smi_cmd.s.phy_op = MDIO_CLAUSE_45_ADDRESS;
smi_cmd.s.phy_adr = phy_id;
smi_cmd.s.reg_adr = device;
cvmx_write_csr(CVMX_SMIX_CMD(bus_id), smi_cmd.u64);
do {
cvmx_wait(1000);
smi_wr.u64 = cvmx_read_csr(CVMX_SMIX_WR_DAT(bus_id));
} while (smi_wr.s.pending && --timeout);
if (timeout <= 0)
return -1;
smi_wr.u64 = 0;
smi_wr.s.dat = val;
cvmx_write_csr(CVMX_SMIX_WR_DAT(bus_id), smi_wr.u64);
smi_cmd.u64 = 0;
smi_cmd.s.phy_op = MDIO_CLAUSE_45_WRITE;
smi_cmd.s.phy_adr = phy_id;
smi_cmd.s.reg_adr = device;
cvmx_write_csr(CVMX_SMIX_CMD(bus_id), smi_cmd.u64);
do {
cvmx_wait(1000);
smi_wr.u64 = cvmx_read_csr(CVMX_SMIX_WR_DAT(bus_id));
} while (smi_wr.s.pending && --timeout);
if (timeout <= 0)
return -1;
return 0;
}
#endif

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,566 @@
/***********************license start***************
* Author: Cavium Networks
*
* Contact: support@caviumnetworks.com
* This file is part of the OCTEON SDK
*
* Copyright (c) 2003-2012 Cavium Networks
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
* published by the Free Software Foundation.
*
* This file is distributed in the hope that it will be useful, but
* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
* NONINFRINGEMENT. See the GNU General Public License for more
* details.
*
* You should have received a copy of the GNU General Public License
* along with this file; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
* or visit http://www.gnu.org/licenses/.
*
* This file may also be available under a different license from Cavium.
* Contact Cavium Networks for more information
***********************license end**************************************/
#ifndef __CVMX_MIXX_DEFS_H__
#define __CVMX_MIXX_DEFS_H__
#define CVMX_MIXX_BIST(offset) (CVMX_ADD_IO_SEG(0x0001070000100078ull) + ((offset) & 1) * 2048)
#define CVMX_MIXX_CTL(offset) (CVMX_ADD_IO_SEG(0x0001070000100020ull) + ((offset) & 1) * 2048)
#define CVMX_MIXX_INTENA(offset) (CVMX_ADD_IO_SEG(0x0001070000100050ull) + ((offset) & 1) * 2048)
#define CVMX_MIXX_IRCNT(offset) (CVMX_ADD_IO_SEG(0x0001070000100030ull) + ((offset) & 1) * 2048)
#define CVMX_MIXX_IRHWM(offset) (CVMX_ADD_IO_SEG(0x0001070000100028ull) + ((offset) & 1) * 2048)
#define CVMX_MIXX_IRING1(offset) (CVMX_ADD_IO_SEG(0x0001070000100010ull) + ((offset) & 1) * 2048)
#define CVMX_MIXX_IRING2(offset) (CVMX_ADD_IO_SEG(0x0001070000100018ull) + ((offset) & 1) * 2048)
#define CVMX_MIXX_ISR(offset) (CVMX_ADD_IO_SEG(0x0001070000100048ull) + ((offset) & 1) * 2048)
#define CVMX_MIXX_ORCNT(offset) (CVMX_ADD_IO_SEG(0x0001070000100040ull) + ((offset) & 1) * 2048)
#define CVMX_MIXX_ORHWM(offset) (CVMX_ADD_IO_SEG(0x0001070000100038ull) + ((offset) & 1) * 2048)
#define CVMX_MIXX_ORING1(offset) (CVMX_ADD_IO_SEG(0x0001070000100000ull) + ((offset) & 1) * 2048)
#define CVMX_MIXX_ORING2(offset) (CVMX_ADD_IO_SEG(0x0001070000100008ull) + ((offset) & 1) * 2048)
#define CVMX_MIXX_REMCNT(offset) (CVMX_ADD_IO_SEG(0x0001070000100058ull) + ((offset) & 1) * 2048)
#define CVMX_MIXX_TSCTL(offset) (CVMX_ADD_IO_SEG(0x0001070000100068ull) + ((offset) & 1) * 2048)
#define CVMX_MIXX_TSTAMP(offset) (CVMX_ADD_IO_SEG(0x0001070000100060ull) + ((offset) & 1) * 2048)
union cvmx_mixx_bist {
uint64_t u64;
struct cvmx_mixx_bist_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_6_63:58;
uint64_t opfdat:1;
uint64_t mrgdat:1;
uint64_t mrqdat:1;
uint64_t ipfdat:1;
uint64_t irfdat:1;
uint64_t orfdat:1;
#else
uint64_t orfdat:1;
uint64_t irfdat:1;
uint64_t ipfdat:1;
uint64_t mrqdat:1;
uint64_t mrgdat:1;
uint64_t opfdat:1;
uint64_t reserved_6_63:58;
#endif
} s;
struct cvmx_mixx_bist_cn52xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_4_63:60;
uint64_t mrqdat:1;
uint64_t ipfdat:1;
uint64_t irfdat:1;
uint64_t orfdat:1;
#else
uint64_t orfdat:1;
uint64_t irfdat:1;
uint64_t ipfdat:1;
uint64_t mrqdat:1;
uint64_t reserved_4_63:60;
#endif
} cn52xx;
struct cvmx_mixx_bist_cn52xx cn52xxp1;
struct cvmx_mixx_bist_cn52xx cn56xx;
struct cvmx_mixx_bist_cn52xx cn56xxp1;
struct cvmx_mixx_bist_s cn61xx;
struct cvmx_mixx_bist_s cn63xx;
struct cvmx_mixx_bist_s cn63xxp1;
struct cvmx_mixx_bist_s cn66xx;
struct cvmx_mixx_bist_s cn68xx;
struct cvmx_mixx_bist_s cn68xxp1;
};
union cvmx_mixx_ctl {
uint64_t u64;
struct cvmx_mixx_ctl_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_12_63:52;
uint64_t ts_thresh:4;
uint64_t crc_strip:1;
uint64_t busy:1;
uint64_t en:1;
uint64_t reset:1;
uint64_t lendian:1;
uint64_t nbtarb:1;
uint64_t mrq_hwm:2;
#else
uint64_t mrq_hwm:2;
uint64_t nbtarb:1;
uint64_t lendian:1;
uint64_t reset:1;
uint64_t en:1;
uint64_t busy:1;
uint64_t crc_strip:1;
uint64_t ts_thresh:4;
uint64_t reserved_12_63:52;
#endif
} s;
struct cvmx_mixx_ctl_cn52xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_8_63:56;
uint64_t crc_strip:1;
uint64_t busy:1;
uint64_t en:1;
uint64_t reset:1;
uint64_t lendian:1;
uint64_t nbtarb:1;
uint64_t mrq_hwm:2;
#else
uint64_t mrq_hwm:2;
uint64_t nbtarb:1;
uint64_t lendian:1;
uint64_t reset:1;
uint64_t en:1;
uint64_t busy:1;
uint64_t crc_strip:1;
uint64_t reserved_8_63:56;
#endif
} cn52xx;
struct cvmx_mixx_ctl_cn52xx cn52xxp1;
struct cvmx_mixx_ctl_cn52xx cn56xx;
struct cvmx_mixx_ctl_cn52xx cn56xxp1;
struct cvmx_mixx_ctl_s cn61xx;
struct cvmx_mixx_ctl_s cn63xx;
struct cvmx_mixx_ctl_s cn63xxp1;
struct cvmx_mixx_ctl_s cn66xx;
struct cvmx_mixx_ctl_s cn68xx;
struct cvmx_mixx_ctl_s cn68xxp1;
};
union cvmx_mixx_intena {
uint64_t u64;
struct cvmx_mixx_intena_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_8_63:56;
uint64_t tsena:1;
uint64_t orunena:1;
uint64_t irunena:1;
uint64_t data_drpena:1;
uint64_t ithena:1;
uint64_t othena:1;
uint64_t ivfena:1;
uint64_t ovfena:1;
#else
uint64_t ovfena:1;
uint64_t ivfena:1;
uint64_t othena:1;
uint64_t ithena:1;
uint64_t data_drpena:1;
uint64_t irunena:1;
uint64_t orunena:1;
uint64_t tsena:1;
uint64_t reserved_8_63:56;
#endif
} s;
struct cvmx_mixx_intena_cn52xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_7_63:57;
uint64_t orunena:1;
uint64_t irunena:1;
uint64_t data_drpena:1;
uint64_t ithena:1;
uint64_t othena:1;
uint64_t ivfena:1;
uint64_t ovfena:1;
#else
uint64_t ovfena:1;
uint64_t ivfena:1;
uint64_t othena:1;
uint64_t ithena:1;
uint64_t data_drpena:1;
uint64_t irunena:1;
uint64_t orunena:1;
uint64_t reserved_7_63:57;
#endif
} cn52xx;
struct cvmx_mixx_intena_cn52xx cn52xxp1;
struct cvmx_mixx_intena_cn52xx cn56xx;
struct cvmx_mixx_intena_cn52xx cn56xxp1;
struct cvmx_mixx_intena_s cn61xx;
struct cvmx_mixx_intena_s cn63xx;
struct cvmx_mixx_intena_s cn63xxp1;
struct cvmx_mixx_intena_s cn66xx;
struct cvmx_mixx_intena_s cn68xx;
struct cvmx_mixx_intena_s cn68xxp1;
};
union cvmx_mixx_ircnt {
uint64_t u64;
struct cvmx_mixx_ircnt_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_20_63:44;
uint64_t ircnt:20;
#else
uint64_t ircnt:20;
uint64_t reserved_20_63:44;
#endif
} s;
struct cvmx_mixx_ircnt_s cn52xx;
struct cvmx_mixx_ircnt_s cn52xxp1;
struct cvmx_mixx_ircnt_s cn56xx;
struct cvmx_mixx_ircnt_s cn56xxp1;
struct cvmx_mixx_ircnt_s cn61xx;
struct cvmx_mixx_ircnt_s cn63xx;
struct cvmx_mixx_ircnt_s cn63xxp1;
struct cvmx_mixx_ircnt_s cn66xx;
struct cvmx_mixx_ircnt_s cn68xx;
struct cvmx_mixx_ircnt_s cn68xxp1;
};
union cvmx_mixx_irhwm {
uint64_t u64;
struct cvmx_mixx_irhwm_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_40_63:24;
uint64_t ibplwm:20;
uint64_t irhwm:20;
#else
uint64_t irhwm:20;
uint64_t ibplwm:20;
uint64_t reserved_40_63:24;
#endif
} s;
struct cvmx_mixx_irhwm_s cn52xx;
struct cvmx_mixx_irhwm_s cn52xxp1;
struct cvmx_mixx_irhwm_s cn56xx;
struct cvmx_mixx_irhwm_s cn56xxp1;
struct cvmx_mixx_irhwm_s cn61xx;
struct cvmx_mixx_irhwm_s cn63xx;
struct cvmx_mixx_irhwm_s cn63xxp1;
struct cvmx_mixx_irhwm_s cn66xx;
struct cvmx_mixx_irhwm_s cn68xx;
struct cvmx_mixx_irhwm_s cn68xxp1;
};
union cvmx_mixx_iring1 {
uint64_t u64;
struct cvmx_mixx_iring1_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_60_63:4;
uint64_t isize:20;
uint64_t ibase:37;
uint64_t reserved_0_2:3;
#else
uint64_t reserved_0_2:3;
uint64_t ibase:37;
uint64_t isize:20;
uint64_t reserved_60_63:4;
#endif
} s;
struct cvmx_mixx_iring1_cn52xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_60_63:4;
uint64_t isize:20;
uint64_t reserved_36_39:4;
uint64_t ibase:33;
uint64_t reserved_0_2:3;
#else
uint64_t reserved_0_2:3;
uint64_t ibase:33;
uint64_t reserved_36_39:4;
uint64_t isize:20;
uint64_t reserved_60_63:4;
#endif
} cn52xx;
struct cvmx_mixx_iring1_cn52xx cn52xxp1;
struct cvmx_mixx_iring1_cn52xx cn56xx;
struct cvmx_mixx_iring1_cn52xx cn56xxp1;
struct cvmx_mixx_iring1_s cn61xx;
struct cvmx_mixx_iring1_s cn63xx;
struct cvmx_mixx_iring1_s cn63xxp1;
struct cvmx_mixx_iring1_s cn66xx;
struct cvmx_mixx_iring1_s cn68xx;
struct cvmx_mixx_iring1_s cn68xxp1;
};
union cvmx_mixx_iring2 {
uint64_t u64;
struct cvmx_mixx_iring2_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_52_63:12;
uint64_t itlptr:20;
uint64_t reserved_20_31:12;
uint64_t idbell:20;
#else
uint64_t idbell:20;
uint64_t reserved_20_31:12;
uint64_t itlptr:20;
uint64_t reserved_52_63:12;
#endif
} s;
struct cvmx_mixx_iring2_s cn52xx;
struct cvmx_mixx_iring2_s cn52xxp1;
struct cvmx_mixx_iring2_s cn56xx;
struct cvmx_mixx_iring2_s cn56xxp1;
struct cvmx_mixx_iring2_s cn61xx;
struct cvmx_mixx_iring2_s cn63xx;
struct cvmx_mixx_iring2_s cn63xxp1;
struct cvmx_mixx_iring2_s cn66xx;
struct cvmx_mixx_iring2_s cn68xx;
struct cvmx_mixx_iring2_s cn68xxp1;
};
union cvmx_mixx_isr {
uint64_t u64;
struct cvmx_mixx_isr_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_8_63:56;
uint64_t ts:1;
uint64_t orun:1;
uint64_t irun:1;
uint64_t data_drp:1;
uint64_t irthresh:1;
uint64_t orthresh:1;
uint64_t idblovf:1;
uint64_t odblovf:1;
#else
uint64_t odblovf:1;
uint64_t idblovf:1;
uint64_t orthresh:1;
uint64_t irthresh:1;
uint64_t data_drp:1;
uint64_t irun:1;
uint64_t orun:1;
uint64_t ts:1;
uint64_t reserved_8_63:56;
#endif
} s;
struct cvmx_mixx_isr_cn52xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_7_63:57;
uint64_t orun:1;
uint64_t irun:1;
uint64_t data_drp:1;
uint64_t irthresh:1;
uint64_t orthresh:1;
uint64_t idblovf:1;
uint64_t odblovf:1;
#else
uint64_t odblovf:1;
uint64_t idblovf:1;
uint64_t orthresh:1;
uint64_t irthresh:1;
uint64_t data_drp:1;
uint64_t irun:1;
uint64_t orun:1;
uint64_t reserved_7_63:57;
#endif
} cn52xx;
struct cvmx_mixx_isr_cn52xx cn52xxp1;
struct cvmx_mixx_isr_cn52xx cn56xx;
struct cvmx_mixx_isr_cn52xx cn56xxp1;
struct cvmx_mixx_isr_s cn61xx;
struct cvmx_mixx_isr_s cn63xx;
struct cvmx_mixx_isr_s cn63xxp1;
struct cvmx_mixx_isr_s cn66xx;
struct cvmx_mixx_isr_s cn68xx;
struct cvmx_mixx_isr_s cn68xxp1;
};
union cvmx_mixx_orcnt {
uint64_t u64;
struct cvmx_mixx_orcnt_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_20_63:44;
uint64_t orcnt:20;
#else
uint64_t orcnt:20;
uint64_t reserved_20_63:44;
#endif
} s;
struct cvmx_mixx_orcnt_s cn52xx;
struct cvmx_mixx_orcnt_s cn52xxp1;
struct cvmx_mixx_orcnt_s cn56xx;
struct cvmx_mixx_orcnt_s cn56xxp1;
struct cvmx_mixx_orcnt_s cn61xx;
struct cvmx_mixx_orcnt_s cn63xx;
struct cvmx_mixx_orcnt_s cn63xxp1;
struct cvmx_mixx_orcnt_s cn66xx;
struct cvmx_mixx_orcnt_s cn68xx;
struct cvmx_mixx_orcnt_s cn68xxp1;
};
union cvmx_mixx_orhwm {
uint64_t u64;
struct cvmx_mixx_orhwm_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_20_63:44;
uint64_t orhwm:20;
#else
uint64_t orhwm:20;
uint64_t reserved_20_63:44;
#endif
} s;
struct cvmx_mixx_orhwm_s cn52xx;
struct cvmx_mixx_orhwm_s cn52xxp1;
struct cvmx_mixx_orhwm_s cn56xx;
struct cvmx_mixx_orhwm_s cn56xxp1;
struct cvmx_mixx_orhwm_s cn61xx;
struct cvmx_mixx_orhwm_s cn63xx;
struct cvmx_mixx_orhwm_s cn63xxp1;
struct cvmx_mixx_orhwm_s cn66xx;
struct cvmx_mixx_orhwm_s cn68xx;
struct cvmx_mixx_orhwm_s cn68xxp1;
};
union cvmx_mixx_oring1 {
uint64_t u64;
struct cvmx_mixx_oring1_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_60_63:4;
uint64_t osize:20;
uint64_t obase:37;
uint64_t reserved_0_2:3;
#else
uint64_t reserved_0_2:3;
uint64_t obase:37;
uint64_t osize:20;
uint64_t reserved_60_63:4;
#endif
} s;
struct cvmx_mixx_oring1_cn52xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_60_63:4;
uint64_t osize:20;
uint64_t reserved_36_39:4;
uint64_t obase:33;
uint64_t reserved_0_2:3;
#else
uint64_t reserved_0_2:3;
uint64_t obase:33;
uint64_t reserved_36_39:4;
uint64_t osize:20;
uint64_t reserved_60_63:4;
#endif
} cn52xx;
struct cvmx_mixx_oring1_cn52xx cn52xxp1;
struct cvmx_mixx_oring1_cn52xx cn56xx;
struct cvmx_mixx_oring1_cn52xx cn56xxp1;
struct cvmx_mixx_oring1_s cn61xx;
struct cvmx_mixx_oring1_s cn63xx;
struct cvmx_mixx_oring1_s cn63xxp1;
struct cvmx_mixx_oring1_s cn66xx;
struct cvmx_mixx_oring1_s cn68xx;
struct cvmx_mixx_oring1_s cn68xxp1;
};
union cvmx_mixx_oring2 {
uint64_t u64;
struct cvmx_mixx_oring2_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_52_63:12;
uint64_t otlptr:20;
uint64_t reserved_20_31:12;
uint64_t odbell:20;
#else
uint64_t odbell:20;
uint64_t reserved_20_31:12;
uint64_t otlptr:20;
uint64_t reserved_52_63:12;
#endif
} s;
struct cvmx_mixx_oring2_s cn52xx;
struct cvmx_mixx_oring2_s cn52xxp1;
struct cvmx_mixx_oring2_s cn56xx;
struct cvmx_mixx_oring2_s cn56xxp1;
struct cvmx_mixx_oring2_s cn61xx;
struct cvmx_mixx_oring2_s cn63xx;
struct cvmx_mixx_oring2_s cn63xxp1;
struct cvmx_mixx_oring2_s cn66xx;
struct cvmx_mixx_oring2_s cn68xx;
struct cvmx_mixx_oring2_s cn68xxp1;
};
union cvmx_mixx_remcnt {
uint64_t u64;
struct cvmx_mixx_remcnt_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_52_63:12;
uint64_t iremcnt:20;
uint64_t reserved_20_31:12;
uint64_t oremcnt:20;
#else
uint64_t oremcnt:20;
uint64_t reserved_20_31:12;
uint64_t iremcnt:20;
uint64_t reserved_52_63:12;
#endif
} s;
struct cvmx_mixx_remcnt_s cn52xx;
struct cvmx_mixx_remcnt_s cn52xxp1;
struct cvmx_mixx_remcnt_s cn56xx;
struct cvmx_mixx_remcnt_s cn56xxp1;
struct cvmx_mixx_remcnt_s cn61xx;
struct cvmx_mixx_remcnt_s cn63xx;
struct cvmx_mixx_remcnt_s cn63xxp1;
struct cvmx_mixx_remcnt_s cn66xx;
struct cvmx_mixx_remcnt_s cn68xx;
struct cvmx_mixx_remcnt_s cn68xxp1;
};
union cvmx_mixx_tsctl {
uint64_t u64;
struct cvmx_mixx_tsctl_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_21_63:43;
uint64_t tsavl:5;
uint64_t reserved_13_15:3;
uint64_t tstot:5;
uint64_t reserved_5_7:3;
uint64_t tscnt:5;
#else
uint64_t tscnt:5;
uint64_t reserved_5_7:3;
uint64_t tstot:5;
uint64_t reserved_13_15:3;
uint64_t tsavl:5;
uint64_t reserved_21_63:43;
#endif
} s;
struct cvmx_mixx_tsctl_s cn61xx;
struct cvmx_mixx_tsctl_s cn63xx;
struct cvmx_mixx_tsctl_s cn63xxp1;
struct cvmx_mixx_tsctl_s cn66xx;
struct cvmx_mixx_tsctl_s cn68xx;
struct cvmx_mixx_tsctl_s cn68xxp1;
};
union cvmx_mixx_tstamp {
uint64_t u64;
struct cvmx_mixx_tstamp_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t tstamp:64;
#else
uint64_t tstamp:64;
#endif
} s;
struct cvmx_mixx_tstamp_s cn61xx;
struct cvmx_mixx_tstamp_s cn63xx;
struct cvmx_mixx_tstamp_s cn63xxp1;
struct cvmx_mixx_tstamp_s cn66xx;
struct cvmx_mixx_tstamp_s cn68xx;
struct cvmx_mixx_tstamp_s cn68xxp1;
};
#endif

View file

@ -0,0 +1,328 @@
/***********************license start***************
* Author: Cavium Networks
*
* Contact: support@caviumnetworks.com
* This file is part of the OCTEON SDK
*
* Copyright (c) 2003-2012 Cavium Networks
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
* published by the Free Software Foundation.
*
* This file is distributed in the hope that it will be useful, but
* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
* NONINFRINGEMENT. See the GNU General Public License for more
* details.
*
* You should have received a copy of the GNU General Public License
* along with this file; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
* or visit http://www.gnu.org/licenses/.
*
* This file may also be available under a different license from Cavium.
* Contact Cavium Networks for more information
***********************license end**************************************/
#ifndef __CVMX_MPI_DEFS_H__
#define __CVMX_MPI_DEFS_H__
#define CVMX_MPI_CFG (CVMX_ADD_IO_SEG(0x0001070000001000ull))
#define CVMX_MPI_DATX(offset) (CVMX_ADD_IO_SEG(0x0001070000001080ull) + ((offset) & 15) * 8)
#define CVMX_MPI_STS (CVMX_ADD_IO_SEG(0x0001070000001008ull))
#define CVMX_MPI_TX (CVMX_ADD_IO_SEG(0x0001070000001010ull))
union cvmx_mpi_cfg {
uint64_t u64;
struct cvmx_mpi_cfg_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_29_63:35;
uint64_t clkdiv:13;
uint64_t csena3:1;
uint64_t csena2:1;
uint64_t csena1:1;
uint64_t csena0:1;
uint64_t cslate:1;
uint64_t tritx:1;
uint64_t idleclks:2;
uint64_t cshi:1;
uint64_t csena:1;
uint64_t int_ena:1;
uint64_t lsbfirst:1;
uint64_t wireor:1;
uint64_t clk_cont:1;
uint64_t idlelo:1;
uint64_t enable:1;
#else
uint64_t enable:1;
uint64_t idlelo:1;
uint64_t clk_cont:1;
uint64_t wireor:1;
uint64_t lsbfirst:1;
uint64_t int_ena:1;
uint64_t csena:1;
uint64_t cshi:1;
uint64_t idleclks:2;
uint64_t tritx:1;
uint64_t cslate:1;
uint64_t csena0:1;
uint64_t csena1:1;
uint64_t csena2:1;
uint64_t csena3:1;
uint64_t clkdiv:13;
uint64_t reserved_29_63:35;
#endif
} s;
struct cvmx_mpi_cfg_cn30xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_29_63:35;
uint64_t clkdiv:13;
uint64_t reserved_12_15:4;
uint64_t cslate:1;
uint64_t tritx:1;
uint64_t idleclks:2;
uint64_t cshi:1;
uint64_t csena:1;
uint64_t int_ena:1;
uint64_t lsbfirst:1;
uint64_t wireor:1;
uint64_t clk_cont:1;
uint64_t idlelo:1;
uint64_t enable:1;
#else
uint64_t enable:1;
uint64_t idlelo:1;
uint64_t clk_cont:1;
uint64_t wireor:1;
uint64_t lsbfirst:1;
uint64_t int_ena:1;
uint64_t csena:1;
uint64_t cshi:1;
uint64_t idleclks:2;
uint64_t tritx:1;
uint64_t cslate:1;
uint64_t reserved_12_15:4;
uint64_t clkdiv:13;
uint64_t reserved_29_63:35;
#endif
} cn30xx;
struct cvmx_mpi_cfg_cn31xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_29_63:35;
uint64_t clkdiv:13;
uint64_t reserved_11_15:5;
uint64_t tritx:1;
uint64_t idleclks:2;
uint64_t cshi:1;
uint64_t csena:1;
uint64_t int_ena:1;
uint64_t lsbfirst:1;
uint64_t wireor:1;
uint64_t clk_cont:1;
uint64_t idlelo:1;
uint64_t enable:1;
#else
uint64_t enable:1;
uint64_t idlelo:1;
uint64_t clk_cont:1;
uint64_t wireor:1;
uint64_t lsbfirst:1;
uint64_t int_ena:1;
uint64_t csena:1;
uint64_t cshi:1;
uint64_t idleclks:2;
uint64_t tritx:1;
uint64_t reserved_11_15:5;
uint64_t clkdiv:13;
uint64_t reserved_29_63:35;
#endif
} cn31xx;
struct cvmx_mpi_cfg_cn30xx cn50xx;
struct cvmx_mpi_cfg_cn61xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_29_63:35;
uint64_t clkdiv:13;
uint64_t reserved_14_15:2;
uint64_t csena1:1;
uint64_t csena0:1;
uint64_t cslate:1;
uint64_t tritx:1;
uint64_t idleclks:2;
uint64_t cshi:1;
uint64_t reserved_6_6:1;
uint64_t int_ena:1;
uint64_t lsbfirst:1;
uint64_t wireor:1;
uint64_t clk_cont:1;
uint64_t idlelo:1;
uint64_t enable:1;
#else
uint64_t enable:1;
uint64_t idlelo:1;
uint64_t clk_cont:1;
uint64_t wireor:1;
uint64_t lsbfirst:1;
uint64_t int_ena:1;
uint64_t reserved_6_6:1;
uint64_t cshi:1;
uint64_t idleclks:2;
uint64_t tritx:1;
uint64_t cslate:1;
uint64_t csena0:1;
uint64_t csena1:1;
uint64_t reserved_14_15:2;
uint64_t clkdiv:13;
uint64_t reserved_29_63:35;
#endif
} cn61xx;
struct cvmx_mpi_cfg_cn66xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_29_63:35;
uint64_t clkdiv:13;
uint64_t csena3:1;
uint64_t csena2:1;
uint64_t reserved_12_13:2;
uint64_t cslate:1;
uint64_t tritx:1;
uint64_t idleclks:2;
uint64_t cshi:1;
uint64_t reserved_6_6:1;
uint64_t int_ena:1;
uint64_t lsbfirst:1;
uint64_t wireor:1;
uint64_t clk_cont:1;
uint64_t idlelo:1;
uint64_t enable:1;
#else
uint64_t enable:1;
uint64_t idlelo:1;
uint64_t clk_cont:1;
uint64_t wireor:1;
uint64_t lsbfirst:1;
uint64_t int_ena:1;
uint64_t reserved_6_6:1;
uint64_t cshi:1;
uint64_t idleclks:2;
uint64_t tritx:1;
uint64_t cslate:1;
uint64_t reserved_12_13:2;
uint64_t csena2:1;
uint64_t csena3:1;
uint64_t clkdiv:13;
uint64_t reserved_29_63:35;
#endif
} cn66xx;
struct cvmx_mpi_cfg_cn61xx cnf71xx;
};
union cvmx_mpi_datx {
uint64_t u64;
struct cvmx_mpi_datx_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_8_63:56;
uint64_t data:8;
#else
uint64_t data:8;
uint64_t reserved_8_63:56;
#endif
} s;
struct cvmx_mpi_datx_s cn30xx;
struct cvmx_mpi_datx_s cn31xx;
struct cvmx_mpi_datx_s cn50xx;
struct cvmx_mpi_datx_s cn61xx;
struct cvmx_mpi_datx_s cn66xx;
struct cvmx_mpi_datx_s cnf71xx;
};
union cvmx_mpi_sts {
uint64_t u64;
struct cvmx_mpi_sts_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_13_63:51;
uint64_t rxnum:5;
uint64_t reserved_1_7:7;
uint64_t busy:1;
#else
uint64_t busy:1;
uint64_t reserved_1_7:7;
uint64_t rxnum:5;
uint64_t reserved_13_63:51;
#endif
} s;
struct cvmx_mpi_sts_s cn30xx;
struct cvmx_mpi_sts_s cn31xx;
struct cvmx_mpi_sts_s cn50xx;
struct cvmx_mpi_sts_s cn61xx;
struct cvmx_mpi_sts_s cn66xx;
struct cvmx_mpi_sts_s cnf71xx;
};
union cvmx_mpi_tx {
uint64_t u64;
struct cvmx_mpi_tx_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_22_63:42;
uint64_t csid:2;
uint64_t reserved_17_19:3;
uint64_t leavecs:1;
uint64_t reserved_13_15:3;
uint64_t txnum:5;
uint64_t reserved_5_7:3;
uint64_t totnum:5;
#else
uint64_t totnum:5;
uint64_t reserved_5_7:3;
uint64_t txnum:5;
uint64_t reserved_13_15:3;
uint64_t leavecs:1;
uint64_t reserved_17_19:3;
uint64_t csid:2;
uint64_t reserved_22_63:42;
#endif
} s;
struct cvmx_mpi_tx_cn30xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_17_63:47;
uint64_t leavecs:1;
uint64_t reserved_13_15:3;
uint64_t txnum:5;
uint64_t reserved_5_7:3;
uint64_t totnum:5;
#else
uint64_t totnum:5;
uint64_t reserved_5_7:3;
uint64_t txnum:5;
uint64_t reserved_13_15:3;
uint64_t leavecs:1;
uint64_t reserved_17_63:47;
#endif
} cn30xx;
struct cvmx_mpi_tx_cn30xx cn31xx;
struct cvmx_mpi_tx_cn30xx cn50xx;
struct cvmx_mpi_tx_cn61xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_21_63:43;
uint64_t csid:1;
uint64_t reserved_17_19:3;
uint64_t leavecs:1;
uint64_t reserved_13_15:3;
uint64_t txnum:5;
uint64_t reserved_5_7:3;
uint64_t totnum:5;
#else
uint64_t totnum:5;
uint64_t reserved_5_7:3;
uint64_t txnum:5;
uint64_t reserved_13_15:3;
uint64_t leavecs:1;
uint64_t reserved_17_19:3;
uint64_t csid:1;
uint64_t reserved_21_63:43;
#endif
} cn61xx;
struct cvmx_mpi_tx_s cn66xx;
struct cvmx_mpi_tx_cn61xx cnf71xx;
};
#endif

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,61 @@
/***********************license start***************
* Author: Cavium Networks
*
* Contact: support@caviumnetworks.com
* This file is part of the OCTEON SDK
*
* Copyright (c) 2003-2008 Cavium Networks
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
* published by the Free Software Foundation.
*
* This file is distributed in the hope that it will be useful, but
* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
* NONINFRINGEMENT. See the GNU General Public License for more
* details.
*
* You should have received a copy of the GNU General Public License
* along with this file; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
* or visit http://www.gnu.org/licenses/.
*
* This file may also be available under a different license from Cavium.
* Contact Cavium Networks for more information
***********************license end**************************************/
/*
* Packet buffer defines.
*/
#ifndef __CVMX_PACKET_H__
#define __CVMX_PACKET_H__
/**
* This structure defines a buffer pointer on Octeon
*/
union cvmx_buf_ptr {
void *ptr;
uint64_t u64;
struct {
/* if set, invert the "free" pick of the overall
* packet. HW always sets this bit to 0 on inbound
* packet */
uint64_t i:1;
/* Indicates the amount to back up to get to the
* buffer start in cache lines. In most cases this is
* less than one complete cache line, so the value is
* zero */
uint64_t back:4;
/* The pool that the buffer came from / goes to */
uint64_t pool:3;
/* The size of the segment pointed to by addr (in bytes) */
uint64_t size:16;
/* Pointer to the first byte of the data, NOT buffer */
uint64_t addr:40;
} s;
};
#endif /* __CVMX_PACKET_H__ */

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,808 @@
/***********************license start***************
* Author: Cavium Networks
*
* Contact: support@caviumnetworks.com
* This file is part of the OCTEON SDK
*
* Copyright (c) 2003-2012 Cavium Networks
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
* published by the Free Software Foundation.
*
* This file is distributed in the hope that it will be useful, but
* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
* NONINFRINGEMENT. See the GNU General Public License for more
* details.
*
* You should have received a copy of the GNU General Public License
* along with this file; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
* or visit http://www.gnu.org/licenses/.
*
* This file may also be available under a different license from Cavium.
* Contact Cavium Networks for more information
***********************license end**************************************/
#ifndef __CVMX_PCSXX_DEFS_H__
#define __CVMX_PCSXX_DEFS_H__
static inline uint64_t CVMX_PCSXX_10GBX_STATUS_REG(unsigned long block_id)
{
switch (cvmx_get_octeon_family()) {
case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
return CVMX_ADD_IO_SEG(0x00011800B0000828ull) + (block_id) * 0x8000000ull;
case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
return CVMX_ADD_IO_SEG(0x00011800B0000828ull) + (block_id) * 0x8000000ull;
case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
return CVMX_ADD_IO_SEG(0x00011800B0000828ull) + (block_id) * 0x1000000ull;
}
return CVMX_ADD_IO_SEG(0x00011800B0000828ull) + (block_id) * 0x1000000ull;
}
static inline uint64_t CVMX_PCSXX_BIST_STATUS_REG(unsigned long block_id)
{
switch (cvmx_get_octeon_family()) {
case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
return CVMX_ADD_IO_SEG(0x00011800B0000870ull) + (block_id) * 0x8000000ull;
case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
return CVMX_ADD_IO_SEG(0x00011800B0000870ull) + (block_id) * 0x8000000ull;
case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
return CVMX_ADD_IO_SEG(0x00011800B0000870ull) + (block_id) * 0x1000000ull;
}
return CVMX_ADD_IO_SEG(0x00011800B0000870ull) + (block_id) * 0x1000000ull;
}
static inline uint64_t CVMX_PCSXX_BIT_LOCK_STATUS_REG(unsigned long block_id)
{
switch (cvmx_get_octeon_family()) {
case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
return CVMX_ADD_IO_SEG(0x00011800B0000850ull) + (block_id) * 0x8000000ull;
case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
return CVMX_ADD_IO_SEG(0x00011800B0000850ull) + (block_id) * 0x8000000ull;
case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
return CVMX_ADD_IO_SEG(0x00011800B0000850ull) + (block_id) * 0x1000000ull;
}
return CVMX_ADD_IO_SEG(0x00011800B0000850ull) + (block_id) * 0x1000000ull;
}
static inline uint64_t CVMX_PCSXX_CONTROL1_REG(unsigned long block_id)
{
switch (cvmx_get_octeon_family()) {
case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
return CVMX_ADD_IO_SEG(0x00011800B0000800ull) + (block_id) * 0x8000000ull;
case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
return CVMX_ADD_IO_SEG(0x00011800B0000800ull) + (block_id) * 0x8000000ull;
case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
return CVMX_ADD_IO_SEG(0x00011800B0000800ull) + (block_id) * 0x1000000ull;
}
return CVMX_ADD_IO_SEG(0x00011800B0000800ull) + (block_id) * 0x1000000ull;
}
static inline uint64_t CVMX_PCSXX_CONTROL2_REG(unsigned long block_id)
{
switch (cvmx_get_octeon_family()) {
case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
return CVMX_ADD_IO_SEG(0x00011800B0000818ull) + (block_id) * 0x8000000ull;
case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
return CVMX_ADD_IO_SEG(0x00011800B0000818ull) + (block_id) * 0x8000000ull;
case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
return CVMX_ADD_IO_SEG(0x00011800B0000818ull) + (block_id) * 0x1000000ull;
}
return CVMX_ADD_IO_SEG(0x00011800B0000818ull) + (block_id) * 0x1000000ull;
}
static inline uint64_t CVMX_PCSXX_INT_EN_REG(unsigned long block_id)
{
switch (cvmx_get_octeon_family()) {
case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
return CVMX_ADD_IO_SEG(0x00011800B0000860ull) + (block_id) * 0x8000000ull;
case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
return CVMX_ADD_IO_SEG(0x00011800B0000860ull) + (block_id) * 0x8000000ull;
case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
return CVMX_ADD_IO_SEG(0x00011800B0000860ull) + (block_id) * 0x1000000ull;
}
return CVMX_ADD_IO_SEG(0x00011800B0000860ull) + (block_id) * 0x1000000ull;
}
static inline uint64_t CVMX_PCSXX_INT_REG(unsigned long block_id)
{
switch (cvmx_get_octeon_family()) {
case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
return CVMX_ADD_IO_SEG(0x00011800B0000858ull) + (block_id) * 0x8000000ull;
case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
return CVMX_ADD_IO_SEG(0x00011800B0000858ull) + (block_id) * 0x8000000ull;
case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
return CVMX_ADD_IO_SEG(0x00011800B0000858ull) + (block_id) * 0x1000000ull;
}
return CVMX_ADD_IO_SEG(0x00011800B0000858ull) + (block_id) * 0x1000000ull;
}
static inline uint64_t CVMX_PCSXX_LOG_ANL_REG(unsigned long block_id)
{
switch (cvmx_get_octeon_family()) {
case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
return CVMX_ADD_IO_SEG(0x00011800B0000868ull) + (block_id) * 0x8000000ull;
case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
return CVMX_ADD_IO_SEG(0x00011800B0000868ull) + (block_id) * 0x8000000ull;
case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
return CVMX_ADD_IO_SEG(0x00011800B0000868ull) + (block_id) * 0x1000000ull;
}
return CVMX_ADD_IO_SEG(0x00011800B0000868ull) + (block_id) * 0x1000000ull;
}
static inline uint64_t CVMX_PCSXX_MISC_CTL_REG(unsigned long block_id)
{
switch (cvmx_get_octeon_family()) {
case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
return CVMX_ADD_IO_SEG(0x00011800B0000848ull) + (block_id) * 0x8000000ull;
case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
return CVMX_ADD_IO_SEG(0x00011800B0000848ull) + (block_id) * 0x8000000ull;
case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
return CVMX_ADD_IO_SEG(0x00011800B0000848ull) + (block_id) * 0x1000000ull;
}
return CVMX_ADD_IO_SEG(0x00011800B0000848ull) + (block_id) * 0x1000000ull;
}
static inline uint64_t CVMX_PCSXX_RX_SYNC_STATES_REG(unsigned long block_id)
{
switch (cvmx_get_octeon_family()) {
case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
return CVMX_ADD_IO_SEG(0x00011800B0000838ull) + (block_id) * 0x8000000ull;
case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
return CVMX_ADD_IO_SEG(0x00011800B0000838ull) + (block_id) * 0x8000000ull;
case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
return CVMX_ADD_IO_SEG(0x00011800B0000838ull) + (block_id) * 0x1000000ull;
}
return CVMX_ADD_IO_SEG(0x00011800B0000838ull) + (block_id) * 0x1000000ull;
}
static inline uint64_t CVMX_PCSXX_SPD_ABIL_REG(unsigned long block_id)
{
switch (cvmx_get_octeon_family()) {
case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
return CVMX_ADD_IO_SEG(0x00011800B0000810ull) + (block_id) * 0x8000000ull;
case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
return CVMX_ADD_IO_SEG(0x00011800B0000810ull) + (block_id) * 0x8000000ull;
case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
return CVMX_ADD_IO_SEG(0x00011800B0000810ull) + (block_id) * 0x1000000ull;
}
return CVMX_ADD_IO_SEG(0x00011800B0000810ull) + (block_id) * 0x1000000ull;
}
static inline uint64_t CVMX_PCSXX_STATUS1_REG(unsigned long block_id)
{
switch (cvmx_get_octeon_family()) {
case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
return CVMX_ADD_IO_SEG(0x00011800B0000808ull) + (block_id) * 0x8000000ull;
case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
return CVMX_ADD_IO_SEG(0x00011800B0000808ull) + (block_id) * 0x8000000ull;
case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
return CVMX_ADD_IO_SEG(0x00011800B0000808ull) + (block_id) * 0x1000000ull;
}
return CVMX_ADD_IO_SEG(0x00011800B0000808ull) + (block_id) * 0x1000000ull;
}
static inline uint64_t CVMX_PCSXX_STATUS2_REG(unsigned long block_id)
{
switch (cvmx_get_octeon_family()) {
case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
return CVMX_ADD_IO_SEG(0x00011800B0000820ull) + (block_id) * 0x8000000ull;
case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
return CVMX_ADD_IO_SEG(0x00011800B0000820ull) + (block_id) * 0x8000000ull;
case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
return CVMX_ADD_IO_SEG(0x00011800B0000820ull) + (block_id) * 0x1000000ull;
}
return CVMX_ADD_IO_SEG(0x00011800B0000820ull) + (block_id) * 0x1000000ull;
}
static inline uint64_t CVMX_PCSXX_TX_RX_POLARITY_REG(unsigned long block_id)
{
switch (cvmx_get_octeon_family()) {
case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
return CVMX_ADD_IO_SEG(0x00011800B0000840ull) + (block_id) * 0x8000000ull;
case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
return CVMX_ADD_IO_SEG(0x00011800B0000840ull) + (block_id) * 0x8000000ull;
case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
return CVMX_ADD_IO_SEG(0x00011800B0000840ull) + (block_id) * 0x1000000ull;
}
return CVMX_ADD_IO_SEG(0x00011800B0000840ull) + (block_id) * 0x1000000ull;
}
static inline uint64_t CVMX_PCSXX_TX_RX_STATES_REG(unsigned long block_id)
{
switch (cvmx_get_octeon_family()) {
case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
return CVMX_ADD_IO_SEG(0x00011800B0000830ull) + (block_id) * 0x8000000ull;
case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
return CVMX_ADD_IO_SEG(0x00011800B0000830ull) + (block_id) * 0x8000000ull;
case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
return CVMX_ADD_IO_SEG(0x00011800B0000830ull) + (block_id) * 0x1000000ull;
}
return CVMX_ADD_IO_SEG(0x00011800B0000830ull) + (block_id) * 0x1000000ull;
}
union cvmx_pcsxx_10gbx_status_reg {
uint64_t u64;
struct cvmx_pcsxx_10gbx_status_reg_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_13_63:51;
uint64_t alignd:1;
uint64_t pattst:1;
uint64_t reserved_4_10:7;
uint64_t l3sync:1;
uint64_t l2sync:1;
uint64_t l1sync:1;
uint64_t l0sync:1;
#else
uint64_t l0sync:1;
uint64_t l1sync:1;
uint64_t l2sync:1;
uint64_t l3sync:1;
uint64_t reserved_4_10:7;
uint64_t pattst:1;
uint64_t alignd:1;
uint64_t reserved_13_63:51;
#endif
} s;
struct cvmx_pcsxx_10gbx_status_reg_s cn52xx;
struct cvmx_pcsxx_10gbx_status_reg_s cn52xxp1;
struct cvmx_pcsxx_10gbx_status_reg_s cn56xx;
struct cvmx_pcsxx_10gbx_status_reg_s cn56xxp1;
struct cvmx_pcsxx_10gbx_status_reg_s cn61xx;
struct cvmx_pcsxx_10gbx_status_reg_s cn63xx;
struct cvmx_pcsxx_10gbx_status_reg_s cn63xxp1;
struct cvmx_pcsxx_10gbx_status_reg_s cn66xx;
struct cvmx_pcsxx_10gbx_status_reg_s cn68xx;
struct cvmx_pcsxx_10gbx_status_reg_s cn68xxp1;
};
union cvmx_pcsxx_bist_status_reg {
uint64_t u64;
struct cvmx_pcsxx_bist_status_reg_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_1_63:63;
uint64_t bist_status:1;
#else
uint64_t bist_status:1;
uint64_t reserved_1_63:63;
#endif
} s;
struct cvmx_pcsxx_bist_status_reg_s cn52xx;
struct cvmx_pcsxx_bist_status_reg_s cn52xxp1;
struct cvmx_pcsxx_bist_status_reg_s cn56xx;
struct cvmx_pcsxx_bist_status_reg_s cn56xxp1;
struct cvmx_pcsxx_bist_status_reg_s cn61xx;
struct cvmx_pcsxx_bist_status_reg_s cn63xx;
struct cvmx_pcsxx_bist_status_reg_s cn63xxp1;
struct cvmx_pcsxx_bist_status_reg_s cn66xx;
struct cvmx_pcsxx_bist_status_reg_s cn68xx;
struct cvmx_pcsxx_bist_status_reg_s cn68xxp1;
};
union cvmx_pcsxx_bit_lock_status_reg {
uint64_t u64;
struct cvmx_pcsxx_bit_lock_status_reg_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_4_63:60;
uint64_t bitlck3:1;
uint64_t bitlck2:1;
uint64_t bitlck1:1;
uint64_t bitlck0:1;
#else
uint64_t bitlck0:1;
uint64_t bitlck1:1;
uint64_t bitlck2:1;
uint64_t bitlck3:1;
uint64_t reserved_4_63:60;
#endif
} s;
struct cvmx_pcsxx_bit_lock_status_reg_s cn52xx;
struct cvmx_pcsxx_bit_lock_status_reg_s cn52xxp1;
struct cvmx_pcsxx_bit_lock_status_reg_s cn56xx;
struct cvmx_pcsxx_bit_lock_status_reg_s cn56xxp1;
struct cvmx_pcsxx_bit_lock_status_reg_s cn61xx;
struct cvmx_pcsxx_bit_lock_status_reg_s cn63xx;
struct cvmx_pcsxx_bit_lock_status_reg_s cn63xxp1;
struct cvmx_pcsxx_bit_lock_status_reg_s cn66xx;
struct cvmx_pcsxx_bit_lock_status_reg_s cn68xx;
struct cvmx_pcsxx_bit_lock_status_reg_s cn68xxp1;
};
union cvmx_pcsxx_control1_reg {
uint64_t u64;
struct cvmx_pcsxx_control1_reg_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_16_63:48;
uint64_t reset:1;
uint64_t loopbck1:1;
uint64_t spdsel1:1;
uint64_t reserved_12_12:1;
uint64_t lo_pwr:1;
uint64_t reserved_7_10:4;
uint64_t spdsel0:1;
uint64_t spd:4;
uint64_t reserved_0_1:2;
#else
uint64_t reserved_0_1:2;
uint64_t spd:4;
uint64_t spdsel0:1;
uint64_t reserved_7_10:4;
uint64_t lo_pwr:1;
uint64_t reserved_12_12:1;
uint64_t spdsel1:1;
uint64_t loopbck1:1;
uint64_t reset:1;
uint64_t reserved_16_63:48;
#endif
} s;
struct cvmx_pcsxx_control1_reg_s cn52xx;
struct cvmx_pcsxx_control1_reg_s cn52xxp1;
struct cvmx_pcsxx_control1_reg_s cn56xx;
struct cvmx_pcsxx_control1_reg_s cn56xxp1;
struct cvmx_pcsxx_control1_reg_s cn61xx;
struct cvmx_pcsxx_control1_reg_s cn63xx;
struct cvmx_pcsxx_control1_reg_s cn63xxp1;
struct cvmx_pcsxx_control1_reg_s cn66xx;
struct cvmx_pcsxx_control1_reg_s cn68xx;
struct cvmx_pcsxx_control1_reg_s cn68xxp1;
};
union cvmx_pcsxx_control2_reg {
uint64_t u64;
struct cvmx_pcsxx_control2_reg_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_2_63:62;
uint64_t type:2;
#else
uint64_t type:2;
uint64_t reserved_2_63:62;
#endif
} s;
struct cvmx_pcsxx_control2_reg_s cn52xx;
struct cvmx_pcsxx_control2_reg_s cn52xxp1;
struct cvmx_pcsxx_control2_reg_s cn56xx;
struct cvmx_pcsxx_control2_reg_s cn56xxp1;
struct cvmx_pcsxx_control2_reg_s cn61xx;
struct cvmx_pcsxx_control2_reg_s cn63xx;
struct cvmx_pcsxx_control2_reg_s cn63xxp1;
struct cvmx_pcsxx_control2_reg_s cn66xx;
struct cvmx_pcsxx_control2_reg_s cn68xx;
struct cvmx_pcsxx_control2_reg_s cn68xxp1;
};
union cvmx_pcsxx_int_en_reg {
uint64_t u64;
struct cvmx_pcsxx_int_en_reg_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_7_63:57;
uint64_t dbg_sync_en:1;
uint64_t algnlos_en:1;
uint64_t synlos_en:1;
uint64_t bitlckls_en:1;
uint64_t rxsynbad_en:1;
uint64_t rxbad_en:1;
uint64_t txflt_en:1;
#else
uint64_t txflt_en:1;
uint64_t rxbad_en:1;
uint64_t rxsynbad_en:1;
uint64_t bitlckls_en:1;
uint64_t synlos_en:1;
uint64_t algnlos_en:1;
uint64_t dbg_sync_en:1;
uint64_t reserved_7_63:57;
#endif
} s;
struct cvmx_pcsxx_int_en_reg_cn52xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_6_63:58;
uint64_t algnlos_en:1;
uint64_t synlos_en:1;
uint64_t bitlckls_en:1;
uint64_t rxsynbad_en:1;
uint64_t rxbad_en:1;
uint64_t txflt_en:1;
#else
uint64_t txflt_en:1;
uint64_t rxbad_en:1;
uint64_t rxsynbad_en:1;
uint64_t bitlckls_en:1;
uint64_t synlos_en:1;
uint64_t algnlos_en:1;
uint64_t reserved_6_63:58;
#endif
} cn52xx;
struct cvmx_pcsxx_int_en_reg_cn52xx cn52xxp1;
struct cvmx_pcsxx_int_en_reg_cn52xx cn56xx;
struct cvmx_pcsxx_int_en_reg_cn52xx cn56xxp1;
struct cvmx_pcsxx_int_en_reg_s cn61xx;
struct cvmx_pcsxx_int_en_reg_s cn63xx;
struct cvmx_pcsxx_int_en_reg_s cn63xxp1;
struct cvmx_pcsxx_int_en_reg_s cn66xx;
struct cvmx_pcsxx_int_en_reg_s cn68xx;
struct cvmx_pcsxx_int_en_reg_s cn68xxp1;
};
union cvmx_pcsxx_int_reg {
uint64_t u64;
struct cvmx_pcsxx_int_reg_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_7_63:57;
uint64_t dbg_sync:1;
uint64_t algnlos:1;
uint64_t synlos:1;
uint64_t bitlckls:1;
uint64_t rxsynbad:1;
uint64_t rxbad:1;
uint64_t txflt:1;
#else
uint64_t txflt:1;
uint64_t rxbad:1;
uint64_t rxsynbad:1;
uint64_t bitlckls:1;
uint64_t synlos:1;
uint64_t algnlos:1;
uint64_t dbg_sync:1;
uint64_t reserved_7_63:57;
#endif
} s;
struct cvmx_pcsxx_int_reg_cn52xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_6_63:58;
uint64_t algnlos:1;
uint64_t synlos:1;
uint64_t bitlckls:1;
uint64_t rxsynbad:1;
uint64_t rxbad:1;
uint64_t txflt:1;
#else
uint64_t txflt:1;
uint64_t rxbad:1;
uint64_t rxsynbad:1;
uint64_t bitlckls:1;
uint64_t synlos:1;
uint64_t algnlos:1;
uint64_t reserved_6_63:58;
#endif
} cn52xx;
struct cvmx_pcsxx_int_reg_cn52xx cn52xxp1;
struct cvmx_pcsxx_int_reg_cn52xx cn56xx;
struct cvmx_pcsxx_int_reg_cn52xx cn56xxp1;
struct cvmx_pcsxx_int_reg_s cn61xx;
struct cvmx_pcsxx_int_reg_s cn63xx;
struct cvmx_pcsxx_int_reg_s cn63xxp1;
struct cvmx_pcsxx_int_reg_s cn66xx;
struct cvmx_pcsxx_int_reg_s cn68xx;
struct cvmx_pcsxx_int_reg_s cn68xxp1;
};
union cvmx_pcsxx_log_anl_reg {
uint64_t u64;
struct cvmx_pcsxx_log_anl_reg_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_7_63:57;
uint64_t enc_mode:1;
uint64_t drop_ln:2;
uint64_t lafifovfl:1;
uint64_t la_en:1;
uint64_t pkt_sz:2;
#else
uint64_t pkt_sz:2;
uint64_t la_en:1;
uint64_t lafifovfl:1;
uint64_t drop_ln:2;
uint64_t enc_mode:1;
uint64_t reserved_7_63:57;
#endif
} s;
struct cvmx_pcsxx_log_anl_reg_s cn52xx;
struct cvmx_pcsxx_log_anl_reg_s cn52xxp1;
struct cvmx_pcsxx_log_anl_reg_s cn56xx;
struct cvmx_pcsxx_log_anl_reg_s cn56xxp1;
struct cvmx_pcsxx_log_anl_reg_s cn61xx;
struct cvmx_pcsxx_log_anl_reg_s cn63xx;
struct cvmx_pcsxx_log_anl_reg_s cn63xxp1;
struct cvmx_pcsxx_log_anl_reg_s cn66xx;
struct cvmx_pcsxx_log_anl_reg_s cn68xx;
struct cvmx_pcsxx_log_anl_reg_s cn68xxp1;
};
union cvmx_pcsxx_misc_ctl_reg {
uint64_t u64;
struct cvmx_pcsxx_misc_ctl_reg_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_4_63:60;
uint64_t tx_swap:1;
uint64_t rx_swap:1;
uint64_t xaui:1;
uint64_t gmxeno:1;
#else
uint64_t gmxeno:1;
uint64_t xaui:1;
uint64_t rx_swap:1;
uint64_t tx_swap:1;
uint64_t reserved_4_63:60;
#endif
} s;
struct cvmx_pcsxx_misc_ctl_reg_s cn52xx;
struct cvmx_pcsxx_misc_ctl_reg_s cn52xxp1;
struct cvmx_pcsxx_misc_ctl_reg_s cn56xx;
struct cvmx_pcsxx_misc_ctl_reg_s cn56xxp1;
struct cvmx_pcsxx_misc_ctl_reg_s cn61xx;
struct cvmx_pcsxx_misc_ctl_reg_s cn63xx;
struct cvmx_pcsxx_misc_ctl_reg_s cn63xxp1;
struct cvmx_pcsxx_misc_ctl_reg_s cn66xx;
struct cvmx_pcsxx_misc_ctl_reg_s cn68xx;
struct cvmx_pcsxx_misc_ctl_reg_s cn68xxp1;
};
union cvmx_pcsxx_rx_sync_states_reg {
uint64_t u64;
struct cvmx_pcsxx_rx_sync_states_reg_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_16_63:48;
uint64_t sync3st:4;
uint64_t sync2st:4;
uint64_t sync1st:4;
uint64_t sync0st:4;
#else
uint64_t sync0st:4;
uint64_t sync1st:4;
uint64_t sync2st:4;
uint64_t sync3st:4;
uint64_t reserved_16_63:48;
#endif
} s;
struct cvmx_pcsxx_rx_sync_states_reg_s cn52xx;
struct cvmx_pcsxx_rx_sync_states_reg_s cn52xxp1;
struct cvmx_pcsxx_rx_sync_states_reg_s cn56xx;
struct cvmx_pcsxx_rx_sync_states_reg_s cn56xxp1;
struct cvmx_pcsxx_rx_sync_states_reg_s cn61xx;
struct cvmx_pcsxx_rx_sync_states_reg_s cn63xx;
struct cvmx_pcsxx_rx_sync_states_reg_s cn63xxp1;
struct cvmx_pcsxx_rx_sync_states_reg_s cn66xx;
struct cvmx_pcsxx_rx_sync_states_reg_s cn68xx;
struct cvmx_pcsxx_rx_sync_states_reg_s cn68xxp1;
};
union cvmx_pcsxx_spd_abil_reg {
uint64_t u64;
struct cvmx_pcsxx_spd_abil_reg_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_2_63:62;
uint64_t tenpasst:1;
uint64_t tengb:1;
#else
uint64_t tengb:1;
uint64_t tenpasst:1;
uint64_t reserved_2_63:62;
#endif
} s;
struct cvmx_pcsxx_spd_abil_reg_s cn52xx;
struct cvmx_pcsxx_spd_abil_reg_s cn52xxp1;
struct cvmx_pcsxx_spd_abil_reg_s cn56xx;
struct cvmx_pcsxx_spd_abil_reg_s cn56xxp1;
struct cvmx_pcsxx_spd_abil_reg_s cn61xx;
struct cvmx_pcsxx_spd_abil_reg_s cn63xx;
struct cvmx_pcsxx_spd_abil_reg_s cn63xxp1;
struct cvmx_pcsxx_spd_abil_reg_s cn66xx;
struct cvmx_pcsxx_spd_abil_reg_s cn68xx;
struct cvmx_pcsxx_spd_abil_reg_s cn68xxp1;
};
union cvmx_pcsxx_status1_reg {
uint64_t u64;
struct cvmx_pcsxx_status1_reg_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_8_63:56;
uint64_t flt:1;
uint64_t reserved_3_6:4;
uint64_t rcv_lnk:1;
uint64_t lpable:1;
uint64_t reserved_0_0:1;
#else
uint64_t reserved_0_0:1;
uint64_t lpable:1;
uint64_t rcv_lnk:1;
uint64_t reserved_3_6:4;
uint64_t flt:1;
uint64_t reserved_8_63:56;
#endif
} s;
struct cvmx_pcsxx_status1_reg_s cn52xx;
struct cvmx_pcsxx_status1_reg_s cn52xxp1;
struct cvmx_pcsxx_status1_reg_s cn56xx;
struct cvmx_pcsxx_status1_reg_s cn56xxp1;
struct cvmx_pcsxx_status1_reg_s cn61xx;
struct cvmx_pcsxx_status1_reg_s cn63xx;
struct cvmx_pcsxx_status1_reg_s cn63xxp1;
struct cvmx_pcsxx_status1_reg_s cn66xx;
struct cvmx_pcsxx_status1_reg_s cn68xx;
struct cvmx_pcsxx_status1_reg_s cn68xxp1;
};
union cvmx_pcsxx_status2_reg {
uint64_t u64;
struct cvmx_pcsxx_status2_reg_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_16_63:48;
uint64_t dev:2;
uint64_t reserved_12_13:2;
uint64_t xmtflt:1;
uint64_t rcvflt:1;
uint64_t reserved_3_9:7;
uint64_t tengb_w:1;
uint64_t tengb_x:1;
uint64_t tengb_r:1;
#else
uint64_t tengb_r:1;
uint64_t tengb_x:1;
uint64_t tengb_w:1;
uint64_t reserved_3_9:7;
uint64_t rcvflt:1;
uint64_t xmtflt:1;
uint64_t reserved_12_13:2;
uint64_t dev:2;
uint64_t reserved_16_63:48;
#endif
} s;
struct cvmx_pcsxx_status2_reg_s cn52xx;
struct cvmx_pcsxx_status2_reg_s cn52xxp1;
struct cvmx_pcsxx_status2_reg_s cn56xx;
struct cvmx_pcsxx_status2_reg_s cn56xxp1;
struct cvmx_pcsxx_status2_reg_s cn61xx;
struct cvmx_pcsxx_status2_reg_s cn63xx;
struct cvmx_pcsxx_status2_reg_s cn63xxp1;
struct cvmx_pcsxx_status2_reg_s cn66xx;
struct cvmx_pcsxx_status2_reg_s cn68xx;
struct cvmx_pcsxx_status2_reg_s cn68xxp1;
};
union cvmx_pcsxx_tx_rx_polarity_reg {
uint64_t u64;
struct cvmx_pcsxx_tx_rx_polarity_reg_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_10_63:54;
uint64_t xor_rxplrt:4;
uint64_t xor_txplrt:4;
uint64_t rxplrt:1;
uint64_t txplrt:1;
#else
uint64_t txplrt:1;
uint64_t rxplrt:1;
uint64_t xor_txplrt:4;
uint64_t xor_rxplrt:4;
uint64_t reserved_10_63:54;
#endif
} s;
struct cvmx_pcsxx_tx_rx_polarity_reg_s cn52xx;
struct cvmx_pcsxx_tx_rx_polarity_reg_cn52xxp1 {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_2_63:62;
uint64_t rxplrt:1;
uint64_t txplrt:1;
#else
uint64_t txplrt:1;
uint64_t rxplrt:1;
uint64_t reserved_2_63:62;
#endif
} cn52xxp1;
struct cvmx_pcsxx_tx_rx_polarity_reg_s cn56xx;
struct cvmx_pcsxx_tx_rx_polarity_reg_cn52xxp1 cn56xxp1;
struct cvmx_pcsxx_tx_rx_polarity_reg_s cn61xx;
struct cvmx_pcsxx_tx_rx_polarity_reg_s cn63xx;
struct cvmx_pcsxx_tx_rx_polarity_reg_s cn63xxp1;
struct cvmx_pcsxx_tx_rx_polarity_reg_s cn66xx;
struct cvmx_pcsxx_tx_rx_polarity_reg_s cn68xx;
struct cvmx_pcsxx_tx_rx_polarity_reg_s cn68xxp1;
};
union cvmx_pcsxx_tx_rx_states_reg {
uint64_t u64;
struct cvmx_pcsxx_tx_rx_states_reg_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_14_63:50;
uint64_t term_err:1;
uint64_t syn3bad:1;
uint64_t syn2bad:1;
uint64_t syn1bad:1;
uint64_t syn0bad:1;
uint64_t rxbad:1;
uint64_t algn_st:3;
uint64_t rx_st:2;
uint64_t tx_st:3;
#else
uint64_t tx_st:3;
uint64_t rx_st:2;
uint64_t algn_st:3;
uint64_t rxbad:1;
uint64_t syn0bad:1;
uint64_t syn1bad:1;
uint64_t syn2bad:1;
uint64_t syn3bad:1;
uint64_t term_err:1;
uint64_t reserved_14_63:50;
#endif
} s;
struct cvmx_pcsxx_tx_rx_states_reg_s cn52xx;
struct cvmx_pcsxx_tx_rx_states_reg_cn52xxp1 {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_13_63:51;
uint64_t syn3bad:1;
uint64_t syn2bad:1;
uint64_t syn1bad:1;
uint64_t syn0bad:1;
uint64_t rxbad:1;
uint64_t algn_st:3;
uint64_t rx_st:2;
uint64_t tx_st:3;
#else
uint64_t tx_st:3;
uint64_t rx_st:2;
uint64_t algn_st:3;
uint64_t rxbad:1;
uint64_t syn0bad:1;
uint64_t syn1bad:1;
uint64_t syn2bad:1;
uint64_t syn3bad:1;
uint64_t reserved_13_63:51;
#endif
} cn52xxp1;
struct cvmx_pcsxx_tx_rx_states_reg_s cn56xx;
struct cvmx_pcsxx_tx_rx_states_reg_cn52xxp1 cn56xxp1;
struct cvmx_pcsxx_tx_rx_states_reg_s cn61xx;
struct cvmx_pcsxx_tx_rx_states_reg_s cn63xx;
struct cvmx_pcsxx_tx_rx_states_reg_s cn63xxp1;
struct cvmx_pcsxx_tx_rx_states_reg_s cn66xx;
struct cvmx_pcsxx_tx_rx_states_reg_s cn68xx;
struct cvmx_pcsxx_tx_rx_states_reg_s cn68xxp1;
};
#endif

View file

@ -0,0 +1,795 @@
/***********************license start***************
* Author: Cavium Networks
*
* Contact: support@caviumnetworks.com
* This file is part of the OCTEON SDK
*
* Copyright (c) 2003-2012 Cavium Networks
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
* published by the Free Software Foundation.
*
* This file is distributed in the hope that it will be useful, but
* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
* NONINFRINGEMENT. See the GNU General Public License for more
* details.
*
* You should have received a copy of the GNU General Public License
* along with this file; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
* or visit http://www.gnu.org/licenses/.
*
* This file may also be available under a different license from Cavium.
* Contact Cavium Networks for more information
***********************license end**************************************/
#ifndef __CVMX_PEMX_DEFS_H__
#define __CVMX_PEMX_DEFS_H__
#define CVMX_PEMX_BAR1_INDEXX(offset, block_id) (CVMX_ADD_IO_SEG(0x00011800C00000A8ull) + (((offset) & 15) + ((block_id) & 1) * 0x200000ull) * 8)
#define CVMX_PEMX_BAR2_MASK(block_id) (CVMX_ADD_IO_SEG(0x00011800C0000130ull) + ((block_id) & 1) * 0x1000000ull)
#define CVMX_PEMX_BAR_CTL(block_id) (CVMX_ADD_IO_SEG(0x00011800C0000128ull) + ((block_id) & 1) * 0x1000000ull)
#define CVMX_PEMX_BIST_STATUS(block_id) (CVMX_ADD_IO_SEG(0x00011800C0000018ull) + ((block_id) & 1) * 0x1000000ull)
#define CVMX_PEMX_BIST_STATUS2(block_id) (CVMX_ADD_IO_SEG(0x00011800C0000420ull) + ((block_id) & 1) * 0x1000000ull)
#define CVMX_PEMX_CFG_RD(block_id) (CVMX_ADD_IO_SEG(0x00011800C0000030ull) + ((block_id) & 1) * 0x1000000ull)
#define CVMX_PEMX_CFG_WR(block_id) (CVMX_ADD_IO_SEG(0x00011800C0000028ull) + ((block_id) & 1) * 0x1000000ull)
#define CVMX_PEMX_CPL_LUT_VALID(block_id) (CVMX_ADD_IO_SEG(0x00011800C0000098ull) + ((block_id) & 1) * 0x1000000ull)
#define CVMX_PEMX_CTL_STATUS(block_id) (CVMX_ADD_IO_SEG(0x00011800C0000000ull) + ((block_id) & 1) * 0x1000000ull)
#define CVMX_PEMX_DBG_INFO(block_id) (CVMX_ADD_IO_SEG(0x00011800C0000008ull) + ((block_id) & 1) * 0x1000000ull)
#define CVMX_PEMX_DBG_INFO_EN(block_id) (CVMX_ADD_IO_SEG(0x00011800C00000A0ull) + ((block_id) & 1) * 0x1000000ull)
#define CVMX_PEMX_DIAG_STATUS(block_id) (CVMX_ADD_IO_SEG(0x00011800C0000020ull) + ((block_id) & 1) * 0x1000000ull)
#define CVMX_PEMX_INB_READ_CREDITS(block_id) (CVMX_ADD_IO_SEG(0x00011800C0000138ull) + ((block_id) & 1) * 0x1000000ull)
#define CVMX_PEMX_INT_ENB(block_id) (CVMX_ADD_IO_SEG(0x00011800C0000410ull) + ((block_id) & 1) * 0x1000000ull)
#define CVMX_PEMX_INT_ENB_INT(block_id) (CVMX_ADD_IO_SEG(0x00011800C0000418ull) + ((block_id) & 1) * 0x1000000ull)
#define CVMX_PEMX_INT_SUM(block_id) (CVMX_ADD_IO_SEG(0x00011800C0000408ull) + ((block_id) & 1) * 0x1000000ull)
#define CVMX_PEMX_P2N_BAR0_START(block_id) (CVMX_ADD_IO_SEG(0x00011800C0000080ull) + ((block_id) & 1) * 0x1000000ull)
#define CVMX_PEMX_P2N_BAR1_START(block_id) (CVMX_ADD_IO_SEG(0x00011800C0000088ull) + ((block_id) & 1) * 0x1000000ull)
#define CVMX_PEMX_P2N_BAR2_START(block_id) (CVMX_ADD_IO_SEG(0x00011800C0000090ull) + ((block_id) & 1) * 0x1000000ull)
#define CVMX_PEMX_P2P_BARX_END(offset, block_id) (CVMX_ADD_IO_SEG(0x00011800C0000048ull) + (((offset) & 3) + ((block_id) & 1) * 0x100000ull) * 16)
#define CVMX_PEMX_P2P_BARX_START(offset, block_id) (CVMX_ADD_IO_SEG(0x00011800C0000040ull) + (((offset) & 3) + ((block_id) & 1) * 0x100000ull) * 16)
#define CVMX_PEMX_TLP_CREDITS(block_id) (CVMX_ADD_IO_SEG(0x00011800C0000038ull) + ((block_id) & 1) * 0x1000000ull)
union cvmx_pemx_bar1_indexx {
uint64_t u64;
struct cvmx_pemx_bar1_indexx_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_20_63:44;
uint64_t addr_idx:16;
uint64_t ca:1;
uint64_t end_swp:2;
uint64_t addr_v:1;
#else
uint64_t addr_v:1;
uint64_t end_swp:2;
uint64_t ca:1;
uint64_t addr_idx:16;
uint64_t reserved_20_63:44;
#endif
} s;
struct cvmx_pemx_bar1_indexx_s cn61xx;
struct cvmx_pemx_bar1_indexx_s cn63xx;
struct cvmx_pemx_bar1_indexx_s cn63xxp1;
struct cvmx_pemx_bar1_indexx_s cn66xx;
struct cvmx_pemx_bar1_indexx_s cn68xx;
struct cvmx_pemx_bar1_indexx_s cn68xxp1;
struct cvmx_pemx_bar1_indexx_s cnf71xx;
};
union cvmx_pemx_bar2_mask {
uint64_t u64;
struct cvmx_pemx_bar2_mask_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_38_63:26;
uint64_t mask:35;
uint64_t reserved_0_2:3;
#else
uint64_t reserved_0_2:3;
uint64_t mask:35;
uint64_t reserved_38_63:26;
#endif
} s;
struct cvmx_pemx_bar2_mask_s cn61xx;
struct cvmx_pemx_bar2_mask_s cn66xx;
struct cvmx_pemx_bar2_mask_s cn68xx;
struct cvmx_pemx_bar2_mask_s cn68xxp1;
struct cvmx_pemx_bar2_mask_s cnf71xx;
};
union cvmx_pemx_bar_ctl {
uint64_t u64;
struct cvmx_pemx_bar_ctl_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_7_63:57;
uint64_t bar1_siz:3;
uint64_t bar2_enb:1;
uint64_t bar2_esx:2;
uint64_t bar2_cax:1;
#else
uint64_t bar2_cax:1;
uint64_t bar2_esx:2;
uint64_t bar2_enb:1;
uint64_t bar1_siz:3;
uint64_t reserved_7_63:57;
#endif
} s;
struct cvmx_pemx_bar_ctl_s cn61xx;
struct cvmx_pemx_bar_ctl_s cn63xx;
struct cvmx_pemx_bar_ctl_s cn63xxp1;
struct cvmx_pemx_bar_ctl_s cn66xx;
struct cvmx_pemx_bar_ctl_s cn68xx;
struct cvmx_pemx_bar_ctl_s cn68xxp1;
struct cvmx_pemx_bar_ctl_s cnf71xx;
};
union cvmx_pemx_bist_status {
uint64_t u64;
struct cvmx_pemx_bist_status_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_8_63:56;
uint64_t retry:1;
uint64_t rqdata0:1;
uint64_t rqdata1:1;
uint64_t rqdata2:1;
uint64_t rqdata3:1;
uint64_t rqhdr1:1;
uint64_t rqhdr0:1;
uint64_t sot:1;
#else
uint64_t sot:1;
uint64_t rqhdr0:1;
uint64_t rqhdr1:1;
uint64_t rqdata3:1;
uint64_t rqdata2:1;
uint64_t rqdata1:1;
uint64_t rqdata0:1;
uint64_t retry:1;
uint64_t reserved_8_63:56;
#endif
} s;
struct cvmx_pemx_bist_status_s cn61xx;
struct cvmx_pemx_bist_status_s cn63xx;
struct cvmx_pemx_bist_status_s cn63xxp1;
struct cvmx_pemx_bist_status_s cn66xx;
struct cvmx_pemx_bist_status_s cn68xx;
struct cvmx_pemx_bist_status_s cn68xxp1;
struct cvmx_pemx_bist_status_s cnf71xx;
};
union cvmx_pemx_bist_status2 {
uint64_t u64;
struct cvmx_pemx_bist_status2_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_10_63:54;
uint64_t e2p_cpl:1;
uint64_t e2p_n:1;
uint64_t e2p_p:1;
uint64_t peai_p2e:1;
uint64_t pef_tpf1:1;
uint64_t pef_tpf0:1;
uint64_t pef_tnf:1;
uint64_t pef_tcf1:1;
uint64_t pef_tc0:1;
uint64_t ppf:1;
#else
uint64_t ppf:1;
uint64_t pef_tc0:1;
uint64_t pef_tcf1:1;
uint64_t pef_tnf:1;
uint64_t pef_tpf0:1;
uint64_t pef_tpf1:1;
uint64_t peai_p2e:1;
uint64_t e2p_p:1;
uint64_t e2p_n:1;
uint64_t e2p_cpl:1;
uint64_t reserved_10_63:54;
#endif
} s;
struct cvmx_pemx_bist_status2_s cn61xx;
struct cvmx_pemx_bist_status2_s cn63xx;
struct cvmx_pemx_bist_status2_s cn63xxp1;
struct cvmx_pemx_bist_status2_s cn66xx;
struct cvmx_pemx_bist_status2_s cn68xx;
struct cvmx_pemx_bist_status2_s cn68xxp1;
struct cvmx_pemx_bist_status2_s cnf71xx;
};
union cvmx_pemx_cfg_rd {
uint64_t u64;
struct cvmx_pemx_cfg_rd_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t data:32;
uint64_t addr:32;
#else
uint64_t addr:32;
uint64_t data:32;
#endif
} s;
struct cvmx_pemx_cfg_rd_s cn61xx;
struct cvmx_pemx_cfg_rd_s cn63xx;
struct cvmx_pemx_cfg_rd_s cn63xxp1;
struct cvmx_pemx_cfg_rd_s cn66xx;
struct cvmx_pemx_cfg_rd_s cn68xx;
struct cvmx_pemx_cfg_rd_s cn68xxp1;
struct cvmx_pemx_cfg_rd_s cnf71xx;
};
union cvmx_pemx_cfg_wr {
uint64_t u64;
struct cvmx_pemx_cfg_wr_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t data:32;
uint64_t addr:32;
#else
uint64_t addr:32;
uint64_t data:32;
#endif
} s;
struct cvmx_pemx_cfg_wr_s cn61xx;
struct cvmx_pemx_cfg_wr_s cn63xx;
struct cvmx_pemx_cfg_wr_s cn63xxp1;
struct cvmx_pemx_cfg_wr_s cn66xx;
struct cvmx_pemx_cfg_wr_s cn68xx;
struct cvmx_pemx_cfg_wr_s cn68xxp1;
struct cvmx_pemx_cfg_wr_s cnf71xx;
};
union cvmx_pemx_cpl_lut_valid {
uint64_t u64;
struct cvmx_pemx_cpl_lut_valid_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_32_63:32;
uint64_t tag:32;
#else
uint64_t tag:32;
uint64_t reserved_32_63:32;
#endif
} s;
struct cvmx_pemx_cpl_lut_valid_s cn61xx;
struct cvmx_pemx_cpl_lut_valid_s cn63xx;
struct cvmx_pemx_cpl_lut_valid_s cn63xxp1;
struct cvmx_pemx_cpl_lut_valid_s cn66xx;
struct cvmx_pemx_cpl_lut_valid_s cn68xx;
struct cvmx_pemx_cpl_lut_valid_s cn68xxp1;
struct cvmx_pemx_cpl_lut_valid_s cnf71xx;
};
union cvmx_pemx_ctl_status {
uint64_t u64;
struct cvmx_pemx_ctl_status_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_48_63:16;
uint64_t auto_sd:1;
uint64_t dnum:5;
uint64_t pbus:8;
uint64_t reserved_32_33:2;
uint64_t cfg_rtry:16;
uint64_t reserved_12_15:4;
uint64_t pm_xtoff:1;
uint64_t pm_xpme:1;
uint64_t ob_p_cmd:1;
uint64_t reserved_7_8:2;
uint64_t nf_ecrc:1;
uint64_t dly_one:1;
uint64_t lnk_enb:1;
uint64_t ro_ctlp:1;
uint64_t fast_lm:1;
uint64_t inv_ecrc:1;
uint64_t inv_lcrc:1;
#else
uint64_t inv_lcrc:1;
uint64_t inv_ecrc:1;
uint64_t fast_lm:1;
uint64_t ro_ctlp:1;
uint64_t lnk_enb:1;
uint64_t dly_one:1;
uint64_t nf_ecrc:1;
uint64_t reserved_7_8:2;
uint64_t ob_p_cmd:1;
uint64_t pm_xpme:1;
uint64_t pm_xtoff:1;
uint64_t reserved_12_15:4;
uint64_t cfg_rtry:16;
uint64_t reserved_32_33:2;
uint64_t pbus:8;
uint64_t dnum:5;
uint64_t auto_sd:1;
uint64_t reserved_48_63:16;
#endif
} s;
struct cvmx_pemx_ctl_status_s cn61xx;
struct cvmx_pemx_ctl_status_s cn63xx;
struct cvmx_pemx_ctl_status_s cn63xxp1;
struct cvmx_pemx_ctl_status_s cn66xx;
struct cvmx_pemx_ctl_status_s cn68xx;
struct cvmx_pemx_ctl_status_s cn68xxp1;
struct cvmx_pemx_ctl_status_s cnf71xx;
};
union cvmx_pemx_dbg_info {
uint64_t u64;
struct cvmx_pemx_dbg_info_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_31_63:33;
uint64_t ecrc_e:1;
uint64_t rawwpp:1;
uint64_t racpp:1;
uint64_t ramtlp:1;
uint64_t rarwdns:1;
uint64_t caar:1;
uint64_t racca:1;
uint64_t racur:1;
uint64_t rauc:1;
uint64_t rqo:1;
uint64_t fcuv:1;
uint64_t rpe:1;
uint64_t fcpvwt:1;
uint64_t dpeoosd:1;
uint64_t rtwdle:1;
uint64_t rdwdle:1;
uint64_t mre:1;
uint64_t rte:1;
uint64_t acto:1;
uint64_t rvdm:1;
uint64_t rumep:1;
uint64_t rptamrc:1;
uint64_t rpmerc:1;
uint64_t rfemrc:1;
uint64_t rnfemrc:1;
uint64_t rcemrc:1;
uint64_t rpoison:1;
uint64_t recrce:1;
uint64_t rtlplle:1;
uint64_t rtlpmal:1;
uint64_t spoison:1;
#else
uint64_t spoison:1;
uint64_t rtlpmal:1;
uint64_t rtlplle:1;
uint64_t recrce:1;
uint64_t rpoison:1;
uint64_t rcemrc:1;
uint64_t rnfemrc:1;
uint64_t rfemrc:1;
uint64_t rpmerc:1;
uint64_t rptamrc:1;
uint64_t rumep:1;
uint64_t rvdm:1;
uint64_t acto:1;
uint64_t rte:1;
uint64_t mre:1;
uint64_t rdwdle:1;
uint64_t rtwdle:1;
uint64_t dpeoosd:1;
uint64_t fcpvwt:1;
uint64_t rpe:1;
uint64_t fcuv:1;
uint64_t rqo:1;
uint64_t rauc:1;
uint64_t racur:1;
uint64_t racca:1;
uint64_t caar:1;
uint64_t rarwdns:1;
uint64_t ramtlp:1;
uint64_t racpp:1;
uint64_t rawwpp:1;
uint64_t ecrc_e:1;
uint64_t reserved_31_63:33;
#endif
} s;
struct cvmx_pemx_dbg_info_s cn61xx;
struct cvmx_pemx_dbg_info_s cn63xx;
struct cvmx_pemx_dbg_info_s cn63xxp1;
struct cvmx_pemx_dbg_info_s cn66xx;
struct cvmx_pemx_dbg_info_s cn68xx;
struct cvmx_pemx_dbg_info_s cn68xxp1;
struct cvmx_pemx_dbg_info_s cnf71xx;
};
union cvmx_pemx_dbg_info_en {
uint64_t u64;
struct cvmx_pemx_dbg_info_en_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_31_63:33;
uint64_t ecrc_e:1;
uint64_t rawwpp:1;
uint64_t racpp:1;
uint64_t ramtlp:1;
uint64_t rarwdns:1;
uint64_t caar:1;
uint64_t racca:1;
uint64_t racur:1;
uint64_t rauc:1;
uint64_t rqo:1;
uint64_t fcuv:1;
uint64_t rpe:1;
uint64_t fcpvwt:1;
uint64_t dpeoosd:1;
uint64_t rtwdle:1;
uint64_t rdwdle:1;
uint64_t mre:1;
uint64_t rte:1;
uint64_t acto:1;
uint64_t rvdm:1;
uint64_t rumep:1;
uint64_t rptamrc:1;
uint64_t rpmerc:1;
uint64_t rfemrc:1;
uint64_t rnfemrc:1;
uint64_t rcemrc:1;
uint64_t rpoison:1;
uint64_t recrce:1;
uint64_t rtlplle:1;
uint64_t rtlpmal:1;
uint64_t spoison:1;
#else
uint64_t spoison:1;
uint64_t rtlpmal:1;
uint64_t rtlplle:1;
uint64_t recrce:1;
uint64_t rpoison:1;
uint64_t rcemrc:1;
uint64_t rnfemrc:1;
uint64_t rfemrc:1;
uint64_t rpmerc:1;
uint64_t rptamrc:1;
uint64_t rumep:1;
uint64_t rvdm:1;
uint64_t acto:1;
uint64_t rte:1;
uint64_t mre:1;
uint64_t rdwdle:1;
uint64_t rtwdle:1;
uint64_t dpeoosd:1;
uint64_t fcpvwt:1;
uint64_t rpe:1;
uint64_t fcuv:1;
uint64_t rqo:1;
uint64_t rauc:1;
uint64_t racur:1;
uint64_t racca:1;
uint64_t caar:1;
uint64_t rarwdns:1;
uint64_t ramtlp:1;
uint64_t racpp:1;
uint64_t rawwpp:1;
uint64_t ecrc_e:1;
uint64_t reserved_31_63:33;
#endif
} s;
struct cvmx_pemx_dbg_info_en_s cn61xx;
struct cvmx_pemx_dbg_info_en_s cn63xx;
struct cvmx_pemx_dbg_info_en_s cn63xxp1;
struct cvmx_pemx_dbg_info_en_s cn66xx;
struct cvmx_pemx_dbg_info_en_s cn68xx;
struct cvmx_pemx_dbg_info_en_s cn68xxp1;
struct cvmx_pemx_dbg_info_en_s cnf71xx;
};
union cvmx_pemx_diag_status {
uint64_t u64;
struct cvmx_pemx_diag_status_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_4_63:60;
uint64_t pm_dst:1;
uint64_t pm_stat:1;
uint64_t pm_en:1;
uint64_t aux_en:1;
#else
uint64_t aux_en:1;
uint64_t pm_en:1;
uint64_t pm_stat:1;
uint64_t pm_dst:1;
uint64_t reserved_4_63:60;
#endif
} s;
struct cvmx_pemx_diag_status_s cn61xx;
struct cvmx_pemx_diag_status_s cn63xx;
struct cvmx_pemx_diag_status_s cn63xxp1;
struct cvmx_pemx_diag_status_s cn66xx;
struct cvmx_pemx_diag_status_s cn68xx;
struct cvmx_pemx_diag_status_s cn68xxp1;
struct cvmx_pemx_diag_status_s cnf71xx;
};
union cvmx_pemx_inb_read_credits {
uint64_t u64;
struct cvmx_pemx_inb_read_credits_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_6_63:58;
uint64_t num:6;
#else
uint64_t num:6;
uint64_t reserved_6_63:58;
#endif
} s;
struct cvmx_pemx_inb_read_credits_s cn61xx;
struct cvmx_pemx_inb_read_credits_s cn66xx;
struct cvmx_pemx_inb_read_credits_s cn68xx;
struct cvmx_pemx_inb_read_credits_s cnf71xx;
};
union cvmx_pemx_int_enb {
uint64_t u64;
struct cvmx_pemx_int_enb_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_14_63:50;
uint64_t crs_dr:1;
uint64_t crs_er:1;
uint64_t rdlk:1;
uint64_t exc:1;
uint64_t un_bx:1;
uint64_t un_b2:1;
uint64_t un_b1:1;
uint64_t up_bx:1;
uint64_t up_b2:1;
uint64_t up_b1:1;
uint64_t pmem:1;
uint64_t pmei:1;
uint64_t se:1;
uint64_t aeri:1;
#else
uint64_t aeri:1;
uint64_t se:1;
uint64_t pmei:1;
uint64_t pmem:1;
uint64_t up_b1:1;
uint64_t up_b2:1;
uint64_t up_bx:1;
uint64_t un_b1:1;
uint64_t un_b2:1;
uint64_t un_bx:1;
uint64_t exc:1;
uint64_t rdlk:1;
uint64_t crs_er:1;
uint64_t crs_dr:1;
uint64_t reserved_14_63:50;
#endif
} s;
struct cvmx_pemx_int_enb_s cn61xx;
struct cvmx_pemx_int_enb_s cn63xx;
struct cvmx_pemx_int_enb_s cn63xxp1;
struct cvmx_pemx_int_enb_s cn66xx;
struct cvmx_pemx_int_enb_s cn68xx;
struct cvmx_pemx_int_enb_s cn68xxp1;
struct cvmx_pemx_int_enb_s cnf71xx;
};
union cvmx_pemx_int_enb_int {
uint64_t u64;
struct cvmx_pemx_int_enb_int_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_14_63:50;
uint64_t crs_dr:1;
uint64_t crs_er:1;
uint64_t rdlk:1;
uint64_t exc:1;
uint64_t un_bx:1;
uint64_t un_b2:1;
uint64_t un_b1:1;
uint64_t up_bx:1;
uint64_t up_b2:1;
uint64_t up_b1:1;
uint64_t pmem:1;
uint64_t pmei:1;
uint64_t se:1;
uint64_t aeri:1;
#else
uint64_t aeri:1;
uint64_t se:1;
uint64_t pmei:1;
uint64_t pmem:1;
uint64_t up_b1:1;
uint64_t up_b2:1;
uint64_t up_bx:1;
uint64_t un_b1:1;
uint64_t un_b2:1;
uint64_t un_bx:1;
uint64_t exc:1;
uint64_t rdlk:1;
uint64_t crs_er:1;
uint64_t crs_dr:1;
uint64_t reserved_14_63:50;
#endif
} s;
struct cvmx_pemx_int_enb_int_s cn61xx;
struct cvmx_pemx_int_enb_int_s cn63xx;
struct cvmx_pemx_int_enb_int_s cn63xxp1;
struct cvmx_pemx_int_enb_int_s cn66xx;
struct cvmx_pemx_int_enb_int_s cn68xx;
struct cvmx_pemx_int_enb_int_s cn68xxp1;
struct cvmx_pemx_int_enb_int_s cnf71xx;
};
union cvmx_pemx_int_sum {
uint64_t u64;
struct cvmx_pemx_int_sum_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_14_63:50;
uint64_t crs_dr:1;
uint64_t crs_er:1;
uint64_t rdlk:1;
uint64_t exc:1;
uint64_t un_bx:1;
uint64_t un_b2:1;
uint64_t un_b1:1;
uint64_t up_bx:1;
uint64_t up_b2:1;
uint64_t up_b1:1;
uint64_t pmem:1;
uint64_t pmei:1;
uint64_t se:1;
uint64_t aeri:1;
#else
uint64_t aeri:1;
uint64_t se:1;
uint64_t pmei:1;
uint64_t pmem:1;
uint64_t up_b1:1;
uint64_t up_b2:1;
uint64_t up_bx:1;
uint64_t un_b1:1;
uint64_t un_b2:1;
uint64_t un_bx:1;
uint64_t exc:1;
uint64_t rdlk:1;
uint64_t crs_er:1;
uint64_t crs_dr:1;
uint64_t reserved_14_63:50;
#endif
} s;
struct cvmx_pemx_int_sum_s cn61xx;
struct cvmx_pemx_int_sum_s cn63xx;
struct cvmx_pemx_int_sum_s cn63xxp1;
struct cvmx_pemx_int_sum_s cn66xx;
struct cvmx_pemx_int_sum_s cn68xx;
struct cvmx_pemx_int_sum_s cn68xxp1;
struct cvmx_pemx_int_sum_s cnf71xx;
};
union cvmx_pemx_p2n_bar0_start {
uint64_t u64;
struct cvmx_pemx_p2n_bar0_start_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t addr:50;
uint64_t reserved_0_13:14;
#else
uint64_t reserved_0_13:14;
uint64_t addr:50;
#endif
} s;
struct cvmx_pemx_p2n_bar0_start_s cn61xx;
struct cvmx_pemx_p2n_bar0_start_s cn63xx;
struct cvmx_pemx_p2n_bar0_start_s cn63xxp1;
struct cvmx_pemx_p2n_bar0_start_s cn66xx;
struct cvmx_pemx_p2n_bar0_start_s cn68xx;
struct cvmx_pemx_p2n_bar0_start_s cn68xxp1;
struct cvmx_pemx_p2n_bar0_start_s cnf71xx;
};
union cvmx_pemx_p2n_bar1_start {
uint64_t u64;
struct cvmx_pemx_p2n_bar1_start_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t addr:38;
uint64_t reserved_0_25:26;
#else
uint64_t reserved_0_25:26;
uint64_t addr:38;
#endif
} s;
struct cvmx_pemx_p2n_bar1_start_s cn61xx;
struct cvmx_pemx_p2n_bar1_start_s cn63xx;
struct cvmx_pemx_p2n_bar1_start_s cn63xxp1;
struct cvmx_pemx_p2n_bar1_start_s cn66xx;
struct cvmx_pemx_p2n_bar1_start_s cn68xx;
struct cvmx_pemx_p2n_bar1_start_s cn68xxp1;
struct cvmx_pemx_p2n_bar1_start_s cnf71xx;
};
union cvmx_pemx_p2n_bar2_start {
uint64_t u64;
struct cvmx_pemx_p2n_bar2_start_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t addr:23;
uint64_t reserved_0_40:41;
#else
uint64_t reserved_0_40:41;
uint64_t addr:23;
#endif
} s;
struct cvmx_pemx_p2n_bar2_start_s cn61xx;
struct cvmx_pemx_p2n_bar2_start_s cn63xx;
struct cvmx_pemx_p2n_bar2_start_s cn63xxp1;
struct cvmx_pemx_p2n_bar2_start_s cn66xx;
struct cvmx_pemx_p2n_bar2_start_s cn68xx;
struct cvmx_pemx_p2n_bar2_start_s cn68xxp1;
struct cvmx_pemx_p2n_bar2_start_s cnf71xx;
};
union cvmx_pemx_p2p_barx_end {
uint64_t u64;
struct cvmx_pemx_p2p_barx_end_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t addr:52;
uint64_t reserved_0_11:12;
#else
uint64_t reserved_0_11:12;
uint64_t addr:52;
#endif
} s;
struct cvmx_pemx_p2p_barx_end_s cn63xx;
struct cvmx_pemx_p2p_barx_end_s cn63xxp1;
struct cvmx_pemx_p2p_barx_end_s cn66xx;
struct cvmx_pemx_p2p_barx_end_s cn68xx;
struct cvmx_pemx_p2p_barx_end_s cn68xxp1;
};
union cvmx_pemx_p2p_barx_start {
uint64_t u64;
struct cvmx_pemx_p2p_barx_start_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t addr:52;
uint64_t reserved_0_11:12;
#else
uint64_t reserved_0_11:12;
uint64_t addr:52;
#endif
} s;
struct cvmx_pemx_p2p_barx_start_s cn63xx;
struct cvmx_pemx_p2p_barx_start_s cn63xxp1;
struct cvmx_pemx_p2p_barx_start_s cn66xx;
struct cvmx_pemx_p2p_barx_start_s cn68xx;
struct cvmx_pemx_p2p_barx_start_s cn68xxp1;
};
union cvmx_pemx_tlp_credits {
uint64_t u64;
struct cvmx_pemx_tlp_credits_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_56_63:8;
uint64_t peai_ppf:8;
uint64_t pem_cpl:8;
uint64_t pem_np:8;
uint64_t pem_p:8;
uint64_t sli_cpl:8;
uint64_t sli_np:8;
uint64_t sli_p:8;
#else
uint64_t sli_p:8;
uint64_t sli_np:8;
uint64_t sli_cpl:8;
uint64_t pem_p:8;
uint64_t pem_np:8;
uint64_t pem_cpl:8;
uint64_t peai_ppf:8;
uint64_t reserved_56_63:8;
#endif
} s;
struct cvmx_pemx_tlp_credits_cn61xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_56_63:8;
uint64_t peai_ppf:8;
uint64_t reserved_24_47:24;
uint64_t sli_cpl:8;
uint64_t sli_np:8;
uint64_t sli_p:8;
#else
uint64_t sli_p:8;
uint64_t sli_np:8;
uint64_t sli_cpl:8;
uint64_t reserved_24_47:24;
uint64_t peai_ppf:8;
uint64_t reserved_56_63:8;
#endif
} cn61xx;
struct cvmx_pemx_tlp_credits_s cn63xx;
struct cvmx_pemx_tlp_credits_s cn63xxp1;
struct cvmx_pemx_tlp_credits_s cn66xx;
struct cvmx_pemx_tlp_credits_s cn68xx;
struct cvmx_pemx_tlp_credits_s cn68xxp1;
struct cvmx_pemx_tlp_credits_cn61xx cnf71xx;
};
#endif

View file

@ -0,0 +1,638 @@
/***********************license start***************
* Author: Cavium Networks
*
* Contact: support@caviumnetworks.com
* This file is part of the OCTEON SDK
*
* Copyright (c) 2003-2012 Cavium Networks
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
* published by the Free Software Foundation.
*
* This file is distributed in the hope that it will be useful, but
* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
* NONINFRINGEMENT. See the GNU General Public License for more
* details.
*
* You should have received a copy of the GNU General Public License
* along with this file; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
* or visit http://www.gnu.org/licenses/.
*
* This file may also be available under a different license from Cavium.
* Contact Cavium Networks for more information
***********************license end**************************************/
#ifndef __CVMX_PESCX_DEFS_H__
#define __CVMX_PESCX_DEFS_H__
#define CVMX_PESCX_BIST_STATUS(block_id) (CVMX_ADD_IO_SEG(0x00011800C8000018ull) + ((block_id) & 1) * 0x8000000ull)
#define CVMX_PESCX_BIST_STATUS2(block_id) (CVMX_ADD_IO_SEG(0x00011800C8000418ull) + ((block_id) & 1) * 0x8000000ull)
#define CVMX_PESCX_CFG_RD(block_id) (CVMX_ADD_IO_SEG(0x00011800C8000030ull) + ((block_id) & 1) * 0x8000000ull)
#define CVMX_PESCX_CFG_WR(block_id) (CVMX_ADD_IO_SEG(0x00011800C8000028ull) + ((block_id) & 1) * 0x8000000ull)
#define CVMX_PESCX_CPL_LUT_VALID(block_id) (CVMX_ADD_IO_SEG(0x00011800C8000098ull) + ((block_id) & 1) * 0x8000000ull)
#define CVMX_PESCX_CTL_STATUS(block_id) (CVMX_ADD_IO_SEG(0x00011800C8000000ull) + ((block_id) & 1) * 0x8000000ull)
#define CVMX_PESCX_CTL_STATUS2(block_id) (CVMX_ADD_IO_SEG(0x00011800C8000400ull) + ((block_id) & 1) * 0x8000000ull)
#define CVMX_PESCX_DBG_INFO(block_id) (CVMX_ADD_IO_SEG(0x00011800C8000008ull) + ((block_id) & 1) * 0x8000000ull)
#define CVMX_PESCX_DBG_INFO_EN(block_id) (CVMX_ADD_IO_SEG(0x00011800C80000A0ull) + ((block_id) & 1) * 0x8000000ull)
#define CVMX_PESCX_DIAG_STATUS(block_id) (CVMX_ADD_IO_SEG(0x00011800C8000020ull) + ((block_id) & 1) * 0x8000000ull)
#define CVMX_PESCX_P2N_BAR0_START(block_id) (CVMX_ADD_IO_SEG(0x00011800C8000080ull) + ((block_id) & 1) * 0x8000000ull)
#define CVMX_PESCX_P2N_BAR1_START(block_id) (CVMX_ADD_IO_SEG(0x00011800C8000088ull) + ((block_id) & 1) * 0x8000000ull)
#define CVMX_PESCX_P2N_BAR2_START(block_id) (CVMX_ADD_IO_SEG(0x00011800C8000090ull) + ((block_id) & 1) * 0x8000000ull)
#define CVMX_PESCX_P2P_BARX_END(offset, block_id) (CVMX_ADD_IO_SEG(0x00011800C8000048ull) + (((offset) & 3) + ((block_id) & 1) * 0x800000ull) * 16)
#define CVMX_PESCX_P2P_BARX_START(offset, block_id) (CVMX_ADD_IO_SEG(0x00011800C8000040ull) + (((offset) & 3) + ((block_id) & 1) * 0x800000ull) * 16)
#define CVMX_PESCX_TLP_CREDITS(block_id) (CVMX_ADD_IO_SEG(0x00011800C8000038ull) + ((block_id) & 1) * 0x8000000ull)
union cvmx_pescx_bist_status {
uint64_t u64;
struct cvmx_pescx_bist_status_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_13_63:51;
uint64_t rqdata5:1;
uint64_t ctlp_or:1;
uint64_t ntlp_or:1;
uint64_t ptlp_or:1;
uint64_t retry:1;
uint64_t rqdata0:1;
uint64_t rqdata1:1;
uint64_t rqdata2:1;
uint64_t rqdata3:1;
uint64_t rqdata4:1;
uint64_t rqhdr1:1;
uint64_t rqhdr0:1;
uint64_t sot:1;
#else
uint64_t sot:1;
uint64_t rqhdr0:1;
uint64_t rqhdr1:1;
uint64_t rqdata4:1;
uint64_t rqdata3:1;
uint64_t rqdata2:1;
uint64_t rqdata1:1;
uint64_t rqdata0:1;
uint64_t retry:1;
uint64_t ptlp_or:1;
uint64_t ntlp_or:1;
uint64_t ctlp_or:1;
uint64_t rqdata5:1;
uint64_t reserved_13_63:51;
#endif
} s;
struct cvmx_pescx_bist_status_s cn52xx;
struct cvmx_pescx_bist_status_cn52xxp1 {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_12_63:52;
uint64_t ctlp_or:1;
uint64_t ntlp_or:1;
uint64_t ptlp_or:1;
uint64_t retry:1;
uint64_t rqdata0:1;
uint64_t rqdata1:1;
uint64_t rqdata2:1;
uint64_t rqdata3:1;
uint64_t rqdata4:1;
uint64_t rqhdr1:1;
uint64_t rqhdr0:1;
uint64_t sot:1;
#else
uint64_t sot:1;
uint64_t rqhdr0:1;
uint64_t rqhdr1:1;
uint64_t rqdata4:1;
uint64_t rqdata3:1;
uint64_t rqdata2:1;
uint64_t rqdata1:1;
uint64_t rqdata0:1;
uint64_t retry:1;
uint64_t ptlp_or:1;
uint64_t ntlp_or:1;
uint64_t ctlp_or:1;
uint64_t reserved_12_63:52;
#endif
} cn52xxp1;
struct cvmx_pescx_bist_status_s cn56xx;
struct cvmx_pescx_bist_status_cn52xxp1 cn56xxp1;
};
union cvmx_pescx_bist_status2 {
uint64_t u64;
struct cvmx_pescx_bist_status2_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_14_63:50;
uint64_t cto_p2e:1;
uint64_t e2p_cpl:1;
uint64_t e2p_n:1;
uint64_t e2p_p:1;
uint64_t e2p_rsl:1;
uint64_t dbg_p2e:1;
uint64_t peai_p2e:1;
uint64_t rsl_p2e:1;
uint64_t pef_tpf1:1;
uint64_t pef_tpf0:1;
uint64_t pef_tnf:1;
uint64_t pef_tcf1:1;
uint64_t pef_tc0:1;
uint64_t ppf:1;
#else
uint64_t ppf:1;
uint64_t pef_tc0:1;
uint64_t pef_tcf1:1;
uint64_t pef_tnf:1;
uint64_t pef_tpf0:1;
uint64_t pef_tpf1:1;
uint64_t rsl_p2e:1;
uint64_t peai_p2e:1;
uint64_t dbg_p2e:1;
uint64_t e2p_rsl:1;
uint64_t e2p_p:1;
uint64_t e2p_n:1;
uint64_t e2p_cpl:1;
uint64_t cto_p2e:1;
uint64_t reserved_14_63:50;
#endif
} s;
struct cvmx_pescx_bist_status2_s cn52xx;
struct cvmx_pescx_bist_status2_s cn52xxp1;
struct cvmx_pescx_bist_status2_s cn56xx;
struct cvmx_pescx_bist_status2_s cn56xxp1;
};
union cvmx_pescx_cfg_rd {
uint64_t u64;
struct cvmx_pescx_cfg_rd_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t data:32;
uint64_t addr:32;
#else
uint64_t addr:32;
uint64_t data:32;
#endif
} s;
struct cvmx_pescx_cfg_rd_s cn52xx;
struct cvmx_pescx_cfg_rd_s cn52xxp1;
struct cvmx_pescx_cfg_rd_s cn56xx;
struct cvmx_pescx_cfg_rd_s cn56xxp1;
};
union cvmx_pescx_cfg_wr {
uint64_t u64;
struct cvmx_pescx_cfg_wr_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t data:32;
uint64_t addr:32;
#else
uint64_t addr:32;
uint64_t data:32;
#endif
} s;
struct cvmx_pescx_cfg_wr_s cn52xx;
struct cvmx_pescx_cfg_wr_s cn52xxp1;
struct cvmx_pescx_cfg_wr_s cn56xx;
struct cvmx_pescx_cfg_wr_s cn56xxp1;
};
union cvmx_pescx_cpl_lut_valid {
uint64_t u64;
struct cvmx_pescx_cpl_lut_valid_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_32_63:32;
uint64_t tag:32;
#else
uint64_t tag:32;
uint64_t reserved_32_63:32;
#endif
} s;
struct cvmx_pescx_cpl_lut_valid_s cn52xx;
struct cvmx_pescx_cpl_lut_valid_s cn52xxp1;
struct cvmx_pescx_cpl_lut_valid_s cn56xx;
struct cvmx_pescx_cpl_lut_valid_s cn56xxp1;
};
union cvmx_pescx_ctl_status {
uint64_t u64;
struct cvmx_pescx_ctl_status_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_28_63:36;
uint64_t dnum:5;
uint64_t pbus:8;
uint64_t qlm_cfg:2;
uint64_t lane_swp:1;
uint64_t pm_xtoff:1;
uint64_t pm_xpme:1;
uint64_t ob_p_cmd:1;
uint64_t reserved_7_8:2;
uint64_t nf_ecrc:1;
uint64_t dly_one:1;
uint64_t lnk_enb:1;
uint64_t ro_ctlp:1;
uint64_t reserved_2_2:1;
uint64_t inv_ecrc:1;
uint64_t inv_lcrc:1;
#else
uint64_t inv_lcrc:1;
uint64_t inv_ecrc:1;
uint64_t reserved_2_2:1;
uint64_t ro_ctlp:1;
uint64_t lnk_enb:1;
uint64_t dly_one:1;
uint64_t nf_ecrc:1;
uint64_t reserved_7_8:2;
uint64_t ob_p_cmd:1;
uint64_t pm_xpme:1;
uint64_t pm_xtoff:1;
uint64_t lane_swp:1;
uint64_t qlm_cfg:2;
uint64_t pbus:8;
uint64_t dnum:5;
uint64_t reserved_28_63:36;
#endif
} s;
struct cvmx_pescx_ctl_status_s cn52xx;
struct cvmx_pescx_ctl_status_s cn52xxp1;
struct cvmx_pescx_ctl_status_cn56xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_28_63:36;
uint64_t dnum:5;
uint64_t pbus:8;
uint64_t qlm_cfg:2;
uint64_t reserved_12_12:1;
uint64_t pm_xtoff:1;
uint64_t pm_xpme:1;
uint64_t ob_p_cmd:1;
uint64_t reserved_7_8:2;
uint64_t nf_ecrc:1;
uint64_t dly_one:1;
uint64_t lnk_enb:1;
uint64_t ro_ctlp:1;
uint64_t reserved_2_2:1;
uint64_t inv_ecrc:1;
uint64_t inv_lcrc:1;
#else
uint64_t inv_lcrc:1;
uint64_t inv_ecrc:1;
uint64_t reserved_2_2:1;
uint64_t ro_ctlp:1;
uint64_t lnk_enb:1;
uint64_t dly_one:1;
uint64_t nf_ecrc:1;
uint64_t reserved_7_8:2;
uint64_t ob_p_cmd:1;
uint64_t pm_xpme:1;
uint64_t pm_xtoff:1;
uint64_t reserved_12_12:1;
uint64_t qlm_cfg:2;
uint64_t pbus:8;
uint64_t dnum:5;
uint64_t reserved_28_63:36;
#endif
} cn56xx;
struct cvmx_pescx_ctl_status_cn56xx cn56xxp1;
};
union cvmx_pescx_ctl_status2 {
uint64_t u64;
struct cvmx_pescx_ctl_status2_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_2_63:62;
uint64_t pclk_run:1;
uint64_t pcierst:1;
#else
uint64_t pcierst:1;
uint64_t pclk_run:1;
uint64_t reserved_2_63:62;
#endif
} s;
struct cvmx_pescx_ctl_status2_s cn52xx;
struct cvmx_pescx_ctl_status2_cn52xxp1 {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_1_63:63;
uint64_t pcierst:1;
#else
uint64_t pcierst:1;
uint64_t reserved_1_63:63;
#endif
} cn52xxp1;
struct cvmx_pescx_ctl_status2_s cn56xx;
struct cvmx_pescx_ctl_status2_cn52xxp1 cn56xxp1;
};
union cvmx_pescx_dbg_info {
uint64_t u64;
struct cvmx_pescx_dbg_info_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_31_63:33;
uint64_t ecrc_e:1;
uint64_t rawwpp:1;
uint64_t racpp:1;
uint64_t ramtlp:1;
uint64_t rarwdns:1;
uint64_t caar:1;
uint64_t racca:1;
uint64_t racur:1;
uint64_t rauc:1;
uint64_t rqo:1;
uint64_t fcuv:1;
uint64_t rpe:1;
uint64_t fcpvwt:1;
uint64_t dpeoosd:1;
uint64_t rtwdle:1;
uint64_t rdwdle:1;
uint64_t mre:1;
uint64_t rte:1;
uint64_t acto:1;
uint64_t rvdm:1;
uint64_t rumep:1;
uint64_t rptamrc:1;
uint64_t rpmerc:1;
uint64_t rfemrc:1;
uint64_t rnfemrc:1;
uint64_t rcemrc:1;
uint64_t rpoison:1;
uint64_t recrce:1;
uint64_t rtlplle:1;
uint64_t rtlpmal:1;
uint64_t spoison:1;
#else
uint64_t spoison:1;
uint64_t rtlpmal:1;
uint64_t rtlplle:1;
uint64_t recrce:1;
uint64_t rpoison:1;
uint64_t rcemrc:1;
uint64_t rnfemrc:1;
uint64_t rfemrc:1;
uint64_t rpmerc:1;
uint64_t rptamrc:1;
uint64_t rumep:1;
uint64_t rvdm:1;
uint64_t acto:1;
uint64_t rte:1;
uint64_t mre:1;
uint64_t rdwdle:1;
uint64_t rtwdle:1;
uint64_t dpeoosd:1;
uint64_t fcpvwt:1;
uint64_t rpe:1;
uint64_t fcuv:1;
uint64_t rqo:1;
uint64_t rauc:1;
uint64_t racur:1;
uint64_t racca:1;
uint64_t caar:1;
uint64_t rarwdns:1;
uint64_t ramtlp:1;
uint64_t racpp:1;
uint64_t rawwpp:1;
uint64_t ecrc_e:1;
uint64_t reserved_31_63:33;
#endif
} s;
struct cvmx_pescx_dbg_info_s cn52xx;
struct cvmx_pescx_dbg_info_s cn52xxp1;
struct cvmx_pescx_dbg_info_s cn56xx;
struct cvmx_pescx_dbg_info_s cn56xxp1;
};
union cvmx_pescx_dbg_info_en {
uint64_t u64;
struct cvmx_pescx_dbg_info_en_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_31_63:33;
uint64_t ecrc_e:1;
uint64_t rawwpp:1;
uint64_t racpp:1;
uint64_t ramtlp:1;
uint64_t rarwdns:1;
uint64_t caar:1;
uint64_t racca:1;
uint64_t racur:1;
uint64_t rauc:1;
uint64_t rqo:1;
uint64_t fcuv:1;
uint64_t rpe:1;
uint64_t fcpvwt:1;
uint64_t dpeoosd:1;
uint64_t rtwdle:1;
uint64_t rdwdle:1;
uint64_t mre:1;
uint64_t rte:1;
uint64_t acto:1;
uint64_t rvdm:1;
uint64_t rumep:1;
uint64_t rptamrc:1;
uint64_t rpmerc:1;
uint64_t rfemrc:1;
uint64_t rnfemrc:1;
uint64_t rcemrc:1;
uint64_t rpoison:1;
uint64_t recrce:1;
uint64_t rtlplle:1;
uint64_t rtlpmal:1;
uint64_t spoison:1;
#else
uint64_t spoison:1;
uint64_t rtlpmal:1;
uint64_t rtlplle:1;
uint64_t recrce:1;
uint64_t rpoison:1;
uint64_t rcemrc:1;
uint64_t rnfemrc:1;
uint64_t rfemrc:1;
uint64_t rpmerc:1;
uint64_t rptamrc:1;
uint64_t rumep:1;
uint64_t rvdm:1;
uint64_t acto:1;
uint64_t rte:1;
uint64_t mre:1;
uint64_t rdwdle:1;
uint64_t rtwdle:1;
uint64_t dpeoosd:1;
uint64_t fcpvwt:1;
uint64_t rpe:1;
uint64_t fcuv:1;
uint64_t rqo:1;
uint64_t rauc:1;
uint64_t racur:1;
uint64_t racca:1;
uint64_t caar:1;
uint64_t rarwdns:1;
uint64_t ramtlp:1;
uint64_t racpp:1;
uint64_t rawwpp:1;
uint64_t ecrc_e:1;
uint64_t reserved_31_63:33;
#endif
} s;
struct cvmx_pescx_dbg_info_en_s cn52xx;
struct cvmx_pescx_dbg_info_en_s cn52xxp1;
struct cvmx_pescx_dbg_info_en_s cn56xx;
struct cvmx_pescx_dbg_info_en_s cn56xxp1;
};
union cvmx_pescx_diag_status {
uint64_t u64;
struct cvmx_pescx_diag_status_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_4_63:60;
uint64_t pm_dst:1;
uint64_t pm_stat:1;
uint64_t pm_en:1;
uint64_t aux_en:1;
#else
uint64_t aux_en:1;
uint64_t pm_en:1;
uint64_t pm_stat:1;
uint64_t pm_dst:1;
uint64_t reserved_4_63:60;
#endif
} s;
struct cvmx_pescx_diag_status_s cn52xx;
struct cvmx_pescx_diag_status_s cn52xxp1;
struct cvmx_pescx_diag_status_s cn56xx;
struct cvmx_pescx_diag_status_s cn56xxp1;
};
union cvmx_pescx_p2n_bar0_start {
uint64_t u64;
struct cvmx_pescx_p2n_bar0_start_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t addr:50;
uint64_t reserved_0_13:14;
#else
uint64_t reserved_0_13:14;
uint64_t addr:50;
#endif
} s;
struct cvmx_pescx_p2n_bar0_start_s cn52xx;
struct cvmx_pescx_p2n_bar0_start_s cn52xxp1;
struct cvmx_pescx_p2n_bar0_start_s cn56xx;
struct cvmx_pescx_p2n_bar0_start_s cn56xxp1;
};
union cvmx_pescx_p2n_bar1_start {
uint64_t u64;
struct cvmx_pescx_p2n_bar1_start_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t addr:38;
uint64_t reserved_0_25:26;
#else
uint64_t reserved_0_25:26;
uint64_t addr:38;
#endif
} s;
struct cvmx_pescx_p2n_bar1_start_s cn52xx;
struct cvmx_pescx_p2n_bar1_start_s cn52xxp1;
struct cvmx_pescx_p2n_bar1_start_s cn56xx;
struct cvmx_pescx_p2n_bar1_start_s cn56xxp1;
};
union cvmx_pescx_p2n_bar2_start {
uint64_t u64;
struct cvmx_pescx_p2n_bar2_start_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t addr:25;
uint64_t reserved_0_38:39;
#else
uint64_t reserved_0_38:39;
uint64_t addr:25;
#endif
} s;
struct cvmx_pescx_p2n_bar2_start_s cn52xx;
struct cvmx_pescx_p2n_bar2_start_s cn52xxp1;
struct cvmx_pescx_p2n_bar2_start_s cn56xx;
struct cvmx_pescx_p2n_bar2_start_s cn56xxp1;
};
union cvmx_pescx_p2p_barx_end {
uint64_t u64;
struct cvmx_pescx_p2p_barx_end_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t addr:52;
uint64_t reserved_0_11:12;
#else
uint64_t reserved_0_11:12;
uint64_t addr:52;
#endif
} s;
struct cvmx_pescx_p2p_barx_end_s cn52xx;
struct cvmx_pescx_p2p_barx_end_s cn52xxp1;
struct cvmx_pescx_p2p_barx_end_s cn56xx;
struct cvmx_pescx_p2p_barx_end_s cn56xxp1;
};
union cvmx_pescx_p2p_barx_start {
uint64_t u64;
struct cvmx_pescx_p2p_barx_start_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t addr:52;
uint64_t reserved_0_11:12;
#else
uint64_t reserved_0_11:12;
uint64_t addr:52;
#endif
} s;
struct cvmx_pescx_p2p_barx_start_s cn52xx;
struct cvmx_pescx_p2p_barx_start_s cn52xxp1;
struct cvmx_pescx_p2p_barx_start_s cn56xx;
struct cvmx_pescx_p2p_barx_start_s cn56xxp1;
};
union cvmx_pescx_tlp_credits {
uint64_t u64;
struct cvmx_pescx_tlp_credits_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_0_63:64;
#else
uint64_t reserved_0_63:64;
#endif
} s;
struct cvmx_pescx_tlp_credits_cn52xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_56_63:8;
uint64_t peai_ppf:8;
uint64_t pesc_cpl:8;
uint64_t pesc_np:8;
uint64_t pesc_p:8;
uint64_t npei_cpl:8;
uint64_t npei_np:8;
uint64_t npei_p:8;
#else
uint64_t npei_p:8;
uint64_t npei_np:8;
uint64_t npei_cpl:8;
uint64_t pesc_p:8;
uint64_t pesc_np:8;
uint64_t pesc_cpl:8;
uint64_t peai_ppf:8;
uint64_t reserved_56_63:8;
#endif
} cn52xx;
struct cvmx_pescx_tlp_credits_cn52xxp1 {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_38_63:26;
uint64_t peai_ppf:8;
uint64_t pesc_cpl:5;
uint64_t pesc_np:5;
uint64_t pesc_p:5;
uint64_t npei_cpl:5;
uint64_t npei_np:5;
uint64_t npei_p:5;
#else
uint64_t npei_p:5;
uint64_t npei_np:5;
uint64_t npei_cpl:5;
uint64_t pesc_p:5;
uint64_t pesc_np:5;
uint64_t pesc_cpl:5;
uint64_t peai_ppf:8;
uint64_t reserved_38_63:26;
#endif
} cn52xxp1;
struct cvmx_pescx_tlp_credits_cn52xx cn56xx;
struct cvmx_pescx_tlp_credits_cn52xxp1 cn56xxp1;
};
#endif

View file

@ -0,0 +1,224 @@
/***********************license start***************
* Author: Cavium Networks
*
* Contact: support@caviumnetworks.com
* This file is part of the OCTEON SDK
*
* Copyright (c) 2003-2012 Cavium Networks
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
* published by the Free Software Foundation.
*
* This file is distributed in the hope that it will be useful, but
* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
* NONINFRINGEMENT. See the GNU General Public License for more
* details.
*
* You should have received a copy of the GNU General Public License
* along with this file; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
* or visit http://www.gnu.org/licenses/.
*
* This file may also be available under a different license from Cavium.
* Contact Cavium Networks for more information
***********************license end**************************************/
#ifndef __CVMX_PEXP_DEFS_H__
#define __CVMX_PEXP_DEFS_H__
#define CVMX_PEXP_NPEI_BAR1_INDEXX(offset) (CVMX_ADD_IO_SEG(0x00011F0000008000ull) + ((offset) & 31) * 16)
#define CVMX_PEXP_NPEI_BIST_STATUS (CVMX_ADD_IO_SEG(0x00011F0000008580ull))
#define CVMX_PEXP_NPEI_BIST_STATUS2 (CVMX_ADD_IO_SEG(0x00011F0000008680ull))
#define CVMX_PEXP_NPEI_CTL_PORT0 (CVMX_ADD_IO_SEG(0x00011F0000008250ull))
#define CVMX_PEXP_NPEI_CTL_PORT1 (CVMX_ADD_IO_SEG(0x00011F0000008260ull))
#define CVMX_PEXP_NPEI_CTL_STATUS (CVMX_ADD_IO_SEG(0x00011F0000008570ull))
#define CVMX_PEXP_NPEI_CTL_STATUS2 (CVMX_ADD_IO_SEG(0x00011F000000BC00ull))
#define CVMX_PEXP_NPEI_DATA_OUT_CNT (CVMX_ADD_IO_SEG(0x00011F00000085F0ull))
#define CVMX_PEXP_NPEI_DBG_DATA (CVMX_ADD_IO_SEG(0x00011F0000008510ull))
#define CVMX_PEXP_NPEI_DBG_SELECT (CVMX_ADD_IO_SEG(0x00011F0000008500ull))
#define CVMX_PEXP_NPEI_DMA0_INT_LEVEL (CVMX_ADD_IO_SEG(0x00011F00000085C0ull))
#define CVMX_PEXP_NPEI_DMA1_INT_LEVEL (CVMX_ADD_IO_SEG(0x00011F00000085D0ull))
#define CVMX_PEXP_NPEI_DMAX_COUNTS(offset) (CVMX_ADD_IO_SEG(0x00011F0000008450ull) + ((offset) & 7) * 16)
#define CVMX_PEXP_NPEI_DMAX_DBELL(offset) (CVMX_ADD_IO_SEG(0x00011F00000083B0ull) + ((offset) & 7) * 16)
#define CVMX_PEXP_NPEI_DMAX_IBUFF_SADDR(offset) (CVMX_ADD_IO_SEG(0x00011F0000008400ull) + ((offset) & 7) * 16)
#define CVMX_PEXP_NPEI_DMAX_NADDR(offset) (CVMX_ADD_IO_SEG(0x00011F00000084A0ull) + ((offset) & 7) * 16)
#define CVMX_PEXP_NPEI_DMA_CNTS (CVMX_ADD_IO_SEG(0x00011F00000085E0ull))
#define CVMX_PEXP_NPEI_DMA_CONTROL (CVMX_ADD_IO_SEG(0x00011F00000083A0ull))
#define CVMX_PEXP_NPEI_DMA_PCIE_REQ_NUM (CVMX_ADD_IO_SEG(0x00011F00000085B0ull))
#define CVMX_PEXP_NPEI_DMA_STATE1 (CVMX_ADD_IO_SEG(0x00011F00000086C0ull))
#define CVMX_PEXP_NPEI_DMA_STATE1_P1 (CVMX_ADD_IO_SEG(0x00011F0000008680ull))
#define CVMX_PEXP_NPEI_DMA_STATE2 (CVMX_ADD_IO_SEG(0x00011F00000086D0ull))
#define CVMX_PEXP_NPEI_DMA_STATE2_P1 (CVMX_ADD_IO_SEG(0x00011F0000008690ull))
#define CVMX_PEXP_NPEI_DMA_STATE3_P1 (CVMX_ADD_IO_SEG(0x00011F00000086A0ull))
#define CVMX_PEXP_NPEI_DMA_STATE4_P1 (CVMX_ADD_IO_SEG(0x00011F00000086B0ull))
#define CVMX_PEXP_NPEI_DMA_STATE5_P1 (CVMX_ADD_IO_SEG(0x00011F00000086C0ull))
#define CVMX_PEXP_NPEI_INT_A_ENB (CVMX_ADD_IO_SEG(0x00011F0000008560ull))
#define CVMX_PEXP_NPEI_INT_A_ENB2 (CVMX_ADD_IO_SEG(0x00011F000000BCE0ull))
#define CVMX_PEXP_NPEI_INT_A_SUM (CVMX_ADD_IO_SEG(0x00011F0000008550ull))
#define CVMX_PEXP_NPEI_INT_ENB (CVMX_ADD_IO_SEG(0x00011F0000008540ull))
#define CVMX_PEXP_NPEI_INT_ENB2 (CVMX_ADD_IO_SEG(0x00011F000000BCD0ull))
#define CVMX_PEXP_NPEI_INT_INFO (CVMX_ADD_IO_SEG(0x00011F0000008590ull))
#define CVMX_PEXP_NPEI_INT_SUM (CVMX_ADD_IO_SEG(0x00011F0000008530ull))
#define CVMX_PEXP_NPEI_INT_SUM2 (CVMX_ADD_IO_SEG(0x00011F000000BCC0ull))
#define CVMX_PEXP_NPEI_LAST_WIN_RDATA0 (CVMX_ADD_IO_SEG(0x00011F0000008600ull))
#define CVMX_PEXP_NPEI_LAST_WIN_RDATA1 (CVMX_ADD_IO_SEG(0x00011F0000008610ull))
#define CVMX_PEXP_NPEI_MEM_ACCESS_CTL (CVMX_ADD_IO_SEG(0x00011F00000084F0ull))
#define CVMX_PEXP_NPEI_MEM_ACCESS_SUBIDX(offset) (CVMX_ADD_IO_SEG(0x00011F0000008280ull) + ((offset) & 31) * 16 - 16*12)
#define CVMX_PEXP_NPEI_MSI_ENB0 (CVMX_ADD_IO_SEG(0x00011F000000BC50ull))
#define CVMX_PEXP_NPEI_MSI_ENB1 (CVMX_ADD_IO_SEG(0x00011F000000BC60ull))
#define CVMX_PEXP_NPEI_MSI_ENB2 (CVMX_ADD_IO_SEG(0x00011F000000BC70ull))
#define CVMX_PEXP_NPEI_MSI_ENB3 (CVMX_ADD_IO_SEG(0x00011F000000BC80ull))
#define CVMX_PEXP_NPEI_MSI_RCV0 (CVMX_ADD_IO_SEG(0x00011F000000BC10ull))
#define CVMX_PEXP_NPEI_MSI_RCV1 (CVMX_ADD_IO_SEG(0x00011F000000BC20ull))
#define CVMX_PEXP_NPEI_MSI_RCV2 (CVMX_ADD_IO_SEG(0x00011F000000BC30ull))
#define CVMX_PEXP_NPEI_MSI_RCV3 (CVMX_ADD_IO_SEG(0x00011F000000BC40ull))
#define CVMX_PEXP_NPEI_MSI_RD_MAP (CVMX_ADD_IO_SEG(0x00011F000000BCA0ull))
#define CVMX_PEXP_NPEI_MSI_W1C_ENB0 (CVMX_ADD_IO_SEG(0x00011F000000BCF0ull))
#define CVMX_PEXP_NPEI_MSI_W1C_ENB1 (CVMX_ADD_IO_SEG(0x00011F000000BD00ull))
#define CVMX_PEXP_NPEI_MSI_W1C_ENB2 (CVMX_ADD_IO_SEG(0x00011F000000BD10ull))
#define CVMX_PEXP_NPEI_MSI_W1C_ENB3 (CVMX_ADD_IO_SEG(0x00011F000000BD20ull))
#define CVMX_PEXP_NPEI_MSI_W1S_ENB0 (CVMX_ADD_IO_SEG(0x00011F000000BD30ull))
#define CVMX_PEXP_NPEI_MSI_W1S_ENB1 (CVMX_ADD_IO_SEG(0x00011F000000BD40ull))
#define CVMX_PEXP_NPEI_MSI_W1S_ENB2 (CVMX_ADD_IO_SEG(0x00011F000000BD50ull))
#define CVMX_PEXP_NPEI_MSI_W1S_ENB3 (CVMX_ADD_IO_SEG(0x00011F000000BD60ull))
#define CVMX_PEXP_NPEI_MSI_WR_MAP (CVMX_ADD_IO_SEG(0x00011F000000BC90ull))
#define CVMX_PEXP_NPEI_PCIE_CREDIT_CNT (CVMX_ADD_IO_SEG(0x00011F000000BD70ull))
#define CVMX_PEXP_NPEI_PCIE_MSI_RCV (CVMX_ADD_IO_SEG(0x00011F000000BCB0ull))
#define CVMX_PEXP_NPEI_PCIE_MSI_RCV_B1 (CVMX_ADD_IO_SEG(0x00011F0000008650ull))
#define CVMX_PEXP_NPEI_PCIE_MSI_RCV_B2 (CVMX_ADD_IO_SEG(0x00011F0000008660ull))
#define CVMX_PEXP_NPEI_PCIE_MSI_RCV_B3 (CVMX_ADD_IO_SEG(0x00011F0000008670ull))
#define CVMX_PEXP_NPEI_PKTX_CNTS(offset) (CVMX_ADD_IO_SEG(0x00011F000000A400ull) + ((offset) & 31) * 16)
#define CVMX_PEXP_NPEI_PKTX_INSTR_BADDR(offset) (CVMX_ADD_IO_SEG(0x00011F000000A800ull) + ((offset) & 31) * 16)
#define CVMX_PEXP_NPEI_PKTX_INSTR_BAOFF_DBELL(offset) (CVMX_ADD_IO_SEG(0x00011F000000AC00ull) + ((offset) & 31) * 16)
#define CVMX_PEXP_NPEI_PKTX_INSTR_FIFO_RSIZE(offset) (CVMX_ADD_IO_SEG(0x00011F000000B000ull) + ((offset) & 31) * 16)
#define CVMX_PEXP_NPEI_PKTX_INSTR_HEADER(offset) (CVMX_ADD_IO_SEG(0x00011F000000B400ull) + ((offset) & 31) * 16)
#define CVMX_PEXP_NPEI_PKTX_IN_BP(offset) (CVMX_ADD_IO_SEG(0x00011F000000B800ull) + ((offset) & 31) * 16)
#define CVMX_PEXP_NPEI_PKTX_SLIST_BADDR(offset) (CVMX_ADD_IO_SEG(0x00011F0000009400ull) + ((offset) & 31) * 16)
#define CVMX_PEXP_NPEI_PKTX_SLIST_BAOFF_DBELL(offset) (CVMX_ADD_IO_SEG(0x00011F0000009800ull) + ((offset) & 31) * 16)
#define CVMX_PEXP_NPEI_PKTX_SLIST_FIFO_RSIZE(offset) (CVMX_ADD_IO_SEG(0x00011F0000009C00ull) + ((offset) & 31) * 16)
#define CVMX_PEXP_NPEI_PKT_CNT_INT (CVMX_ADD_IO_SEG(0x00011F0000009110ull))
#define CVMX_PEXP_NPEI_PKT_CNT_INT_ENB (CVMX_ADD_IO_SEG(0x00011F0000009130ull))
#define CVMX_PEXP_NPEI_PKT_DATA_OUT_ES (CVMX_ADD_IO_SEG(0x00011F00000090B0ull))
#define CVMX_PEXP_NPEI_PKT_DATA_OUT_NS (CVMX_ADD_IO_SEG(0x00011F00000090A0ull))
#define CVMX_PEXP_NPEI_PKT_DATA_OUT_ROR (CVMX_ADD_IO_SEG(0x00011F0000009090ull))
#define CVMX_PEXP_NPEI_PKT_DPADDR (CVMX_ADD_IO_SEG(0x00011F0000009080ull))
#define CVMX_PEXP_NPEI_PKT_INPUT_CONTROL (CVMX_ADD_IO_SEG(0x00011F0000009150ull))
#define CVMX_PEXP_NPEI_PKT_INSTR_ENB (CVMX_ADD_IO_SEG(0x00011F0000009000ull))
#define CVMX_PEXP_NPEI_PKT_INSTR_RD_SIZE (CVMX_ADD_IO_SEG(0x00011F0000009190ull))
#define CVMX_PEXP_NPEI_PKT_INSTR_SIZE (CVMX_ADD_IO_SEG(0x00011F0000009020ull))
#define CVMX_PEXP_NPEI_PKT_INT_LEVELS (CVMX_ADD_IO_SEG(0x00011F0000009100ull))
#define CVMX_PEXP_NPEI_PKT_IN_BP (CVMX_ADD_IO_SEG(0x00011F00000086B0ull))
#define CVMX_PEXP_NPEI_PKT_IN_DONEX_CNTS(offset) (CVMX_ADD_IO_SEG(0x00011F000000A000ull) + ((offset) & 31) * 16)
#define CVMX_PEXP_NPEI_PKT_IN_INSTR_COUNTS (CVMX_ADD_IO_SEG(0x00011F00000086A0ull))
#define CVMX_PEXP_NPEI_PKT_IN_PCIE_PORT (CVMX_ADD_IO_SEG(0x00011F00000091A0ull))
#define CVMX_PEXP_NPEI_PKT_IPTR (CVMX_ADD_IO_SEG(0x00011F0000009070ull))
#define CVMX_PEXP_NPEI_PKT_OUTPUT_WMARK (CVMX_ADD_IO_SEG(0x00011F0000009160ull))
#define CVMX_PEXP_NPEI_PKT_OUT_BMODE (CVMX_ADD_IO_SEG(0x00011F00000090D0ull))
#define CVMX_PEXP_NPEI_PKT_OUT_ENB (CVMX_ADD_IO_SEG(0x00011F0000009010ull))
#define CVMX_PEXP_NPEI_PKT_PCIE_PORT (CVMX_ADD_IO_SEG(0x00011F00000090E0ull))
#define CVMX_PEXP_NPEI_PKT_PORT_IN_RST (CVMX_ADD_IO_SEG(0x00011F0000008690ull))
#define CVMX_PEXP_NPEI_PKT_SLIST_ES (CVMX_ADD_IO_SEG(0x00011F0000009050ull))
#define CVMX_PEXP_NPEI_PKT_SLIST_ID_SIZE (CVMX_ADD_IO_SEG(0x00011F0000009180ull))
#define CVMX_PEXP_NPEI_PKT_SLIST_NS (CVMX_ADD_IO_SEG(0x00011F0000009040ull))
#define CVMX_PEXP_NPEI_PKT_SLIST_ROR (CVMX_ADD_IO_SEG(0x00011F0000009030ull))
#define CVMX_PEXP_NPEI_PKT_TIME_INT (CVMX_ADD_IO_SEG(0x00011F0000009120ull))
#define CVMX_PEXP_NPEI_PKT_TIME_INT_ENB (CVMX_ADD_IO_SEG(0x00011F0000009140ull))
#define CVMX_PEXP_NPEI_RSL_INT_BLOCKS (CVMX_ADD_IO_SEG(0x00011F0000008520ull))
#define CVMX_PEXP_NPEI_SCRATCH_1 (CVMX_ADD_IO_SEG(0x00011F0000008270ull))
#define CVMX_PEXP_NPEI_STATE1 (CVMX_ADD_IO_SEG(0x00011F0000008620ull))
#define CVMX_PEXP_NPEI_STATE2 (CVMX_ADD_IO_SEG(0x00011F0000008630ull))
#define CVMX_PEXP_NPEI_STATE3 (CVMX_ADD_IO_SEG(0x00011F0000008640ull))
#define CVMX_PEXP_NPEI_WINDOW_CTL (CVMX_ADD_IO_SEG(0x00011F0000008380ull))
#define CVMX_PEXP_SLI_BIST_STATUS (CVMX_ADD_IO_SEG(0x00011F0000010580ull))
#define CVMX_PEXP_SLI_CTL_PORTX(offset) (CVMX_ADD_IO_SEG(0x00011F0000010050ull) + ((offset) & 3) * 16)
#define CVMX_PEXP_SLI_CTL_STATUS (CVMX_ADD_IO_SEG(0x00011F0000010570ull))
#define CVMX_PEXP_SLI_DATA_OUT_CNT (CVMX_ADD_IO_SEG(0x00011F00000105F0ull))
#define CVMX_PEXP_SLI_DBG_DATA (CVMX_ADD_IO_SEG(0x00011F0000010310ull))
#define CVMX_PEXP_SLI_DBG_SELECT (CVMX_ADD_IO_SEG(0x00011F0000010300ull))
#define CVMX_PEXP_SLI_DMAX_CNT(offset) (CVMX_ADD_IO_SEG(0x00011F0000010400ull) + ((offset) & 1) * 16)
#define CVMX_PEXP_SLI_DMAX_INT_LEVEL(offset) (CVMX_ADD_IO_SEG(0x00011F00000103E0ull) + ((offset) & 1) * 16)
#define CVMX_PEXP_SLI_DMAX_TIM(offset) (CVMX_ADD_IO_SEG(0x00011F0000010420ull) + ((offset) & 1) * 16)
#define CVMX_PEXP_SLI_INT_ENB_CIU (CVMX_ADD_IO_SEG(0x00011F0000013CD0ull))
#define CVMX_PEXP_SLI_INT_ENB_PORTX(offset) (CVMX_ADD_IO_SEG(0x00011F0000010340ull) + ((offset) & 1) * 16)
#define CVMX_PEXP_SLI_INT_SUM (CVMX_ADD_IO_SEG(0x00011F0000010330ull))
#define CVMX_PEXP_SLI_LAST_WIN_RDATA0 (CVMX_ADD_IO_SEG(0x00011F0000010600ull))
#define CVMX_PEXP_SLI_LAST_WIN_RDATA1 (CVMX_ADD_IO_SEG(0x00011F0000010610ull))
#define CVMX_PEXP_SLI_LAST_WIN_RDATA2 (CVMX_ADD_IO_SEG(0x00011F00000106C0ull))
#define CVMX_PEXP_SLI_LAST_WIN_RDATA3 (CVMX_ADD_IO_SEG(0x00011F00000106D0ull))
#define CVMX_PEXP_SLI_MAC_CREDIT_CNT (CVMX_ADD_IO_SEG(0x00011F0000013D70ull))
#define CVMX_PEXP_SLI_MAC_CREDIT_CNT2 (CVMX_ADD_IO_SEG(0x00011F0000013E10ull))
#define CVMX_PEXP_SLI_MEM_ACCESS_CTL (CVMX_ADD_IO_SEG(0x00011F00000102F0ull))
#define CVMX_PEXP_SLI_MEM_ACCESS_SUBIDX(offset) (CVMX_ADD_IO_SEG(0x00011F00000100E0ull) + ((offset) & 31) * 16 - 16*12)
#define CVMX_PEXP_SLI_MSI_ENB0 (CVMX_ADD_IO_SEG(0x00011F0000013C50ull))
#define CVMX_PEXP_SLI_MSI_ENB1 (CVMX_ADD_IO_SEG(0x00011F0000013C60ull))
#define CVMX_PEXP_SLI_MSI_ENB2 (CVMX_ADD_IO_SEG(0x00011F0000013C70ull))
#define CVMX_PEXP_SLI_MSI_ENB3 (CVMX_ADD_IO_SEG(0x00011F0000013C80ull))
#define CVMX_PEXP_SLI_MSI_RCV0 (CVMX_ADD_IO_SEG(0x00011F0000013C10ull))
#define CVMX_PEXP_SLI_MSI_RCV1 (CVMX_ADD_IO_SEG(0x00011F0000013C20ull))
#define CVMX_PEXP_SLI_MSI_RCV2 (CVMX_ADD_IO_SEG(0x00011F0000013C30ull))
#define CVMX_PEXP_SLI_MSI_RCV3 (CVMX_ADD_IO_SEG(0x00011F0000013C40ull))
#define CVMX_PEXP_SLI_MSI_RD_MAP (CVMX_ADD_IO_SEG(0x00011F0000013CA0ull))
#define CVMX_PEXP_SLI_MSI_W1C_ENB0 (CVMX_ADD_IO_SEG(0x00011F0000013CF0ull))
#define CVMX_PEXP_SLI_MSI_W1C_ENB1 (CVMX_ADD_IO_SEG(0x00011F0000013D00ull))
#define CVMX_PEXP_SLI_MSI_W1C_ENB2 (CVMX_ADD_IO_SEG(0x00011F0000013D10ull))
#define CVMX_PEXP_SLI_MSI_W1C_ENB3 (CVMX_ADD_IO_SEG(0x00011F0000013D20ull))
#define CVMX_PEXP_SLI_MSI_W1S_ENB0 (CVMX_ADD_IO_SEG(0x00011F0000013D30ull))
#define CVMX_PEXP_SLI_MSI_W1S_ENB1 (CVMX_ADD_IO_SEG(0x00011F0000013D40ull))
#define CVMX_PEXP_SLI_MSI_W1S_ENB2 (CVMX_ADD_IO_SEG(0x00011F0000013D50ull))
#define CVMX_PEXP_SLI_MSI_W1S_ENB3 (CVMX_ADD_IO_SEG(0x00011F0000013D60ull))
#define CVMX_PEXP_SLI_MSI_WR_MAP (CVMX_ADD_IO_SEG(0x00011F0000013C90ull))
#define CVMX_PEXP_SLI_PCIE_MSI_RCV (CVMX_ADD_IO_SEG(0x00011F0000013CB0ull))
#define CVMX_PEXP_SLI_PCIE_MSI_RCV_B1 (CVMX_ADD_IO_SEG(0x00011F0000010650ull))
#define CVMX_PEXP_SLI_PCIE_MSI_RCV_B2 (CVMX_ADD_IO_SEG(0x00011F0000010660ull))
#define CVMX_PEXP_SLI_PCIE_MSI_RCV_B3 (CVMX_ADD_IO_SEG(0x00011F0000010670ull))
#define CVMX_PEXP_SLI_PKTX_CNTS(offset) (CVMX_ADD_IO_SEG(0x00011F0000012400ull) + ((offset) & 31) * 16)
#define CVMX_PEXP_SLI_PKTX_INSTR_BADDR(offset) (CVMX_ADD_IO_SEG(0x00011F0000012800ull) + ((offset) & 31) * 16)
#define CVMX_PEXP_SLI_PKTX_INSTR_BAOFF_DBELL(offset) (CVMX_ADD_IO_SEG(0x00011F0000012C00ull) + ((offset) & 31) * 16)
#define CVMX_PEXP_SLI_PKTX_INSTR_FIFO_RSIZE(offset) (CVMX_ADD_IO_SEG(0x00011F0000013000ull) + ((offset) & 31) * 16)
#define CVMX_PEXP_SLI_PKTX_INSTR_HEADER(offset) (CVMX_ADD_IO_SEG(0x00011F0000013400ull) + ((offset) & 31) * 16)
#define CVMX_PEXP_SLI_PKTX_IN_BP(offset) (CVMX_ADD_IO_SEG(0x00011F0000013800ull) + ((offset) & 31) * 16)
#define CVMX_PEXP_SLI_PKTX_OUT_SIZE(offset) (CVMX_ADD_IO_SEG(0x00011F0000010C00ull) + ((offset) & 31) * 16)
#define CVMX_PEXP_SLI_PKTX_SLIST_BADDR(offset) (CVMX_ADD_IO_SEG(0x00011F0000011400ull) + ((offset) & 31) * 16)
#define CVMX_PEXP_SLI_PKTX_SLIST_BAOFF_DBELL(offset) (CVMX_ADD_IO_SEG(0x00011F0000011800ull) + ((offset) & 31) * 16)
#define CVMX_PEXP_SLI_PKTX_SLIST_FIFO_RSIZE(offset) (CVMX_ADD_IO_SEG(0x00011F0000011C00ull) + ((offset) & 31) * 16)
#define CVMX_PEXP_SLI_PKT_CNT_INT (CVMX_ADD_IO_SEG(0x00011F0000011130ull))
#define CVMX_PEXP_SLI_PKT_CNT_INT_ENB (CVMX_ADD_IO_SEG(0x00011F0000011150ull))
#define CVMX_PEXP_SLI_PKT_CTL (CVMX_ADD_IO_SEG(0x00011F0000011220ull))
#define CVMX_PEXP_SLI_PKT_DATA_OUT_ES (CVMX_ADD_IO_SEG(0x00011F00000110B0ull))
#define CVMX_PEXP_SLI_PKT_DATA_OUT_NS (CVMX_ADD_IO_SEG(0x00011F00000110A0ull))
#define CVMX_PEXP_SLI_PKT_DATA_OUT_ROR (CVMX_ADD_IO_SEG(0x00011F0000011090ull))
#define CVMX_PEXP_SLI_PKT_DPADDR (CVMX_ADD_IO_SEG(0x00011F0000011080ull))
#define CVMX_PEXP_SLI_PKT_INPUT_CONTROL (CVMX_ADD_IO_SEG(0x00011F0000011170ull))
#define CVMX_PEXP_SLI_PKT_INSTR_ENB (CVMX_ADD_IO_SEG(0x00011F0000011000ull))
#define CVMX_PEXP_SLI_PKT_INSTR_RD_SIZE (CVMX_ADD_IO_SEG(0x00011F00000111A0ull))
#define CVMX_PEXP_SLI_PKT_INSTR_SIZE (CVMX_ADD_IO_SEG(0x00011F0000011020ull))
#define CVMX_PEXP_SLI_PKT_INT_LEVELS (CVMX_ADD_IO_SEG(0x00011F0000011120ull))
#define CVMX_PEXP_SLI_PKT_IN_BP (CVMX_ADD_IO_SEG(0x00011F0000011210ull))
#define CVMX_PEXP_SLI_PKT_IN_DONEX_CNTS(offset) (CVMX_ADD_IO_SEG(0x00011F0000012000ull) + ((offset) & 31) * 16)
#define CVMX_PEXP_SLI_PKT_IN_INSTR_COUNTS (CVMX_ADD_IO_SEG(0x00011F0000011200ull))
#define CVMX_PEXP_SLI_PKT_IN_PCIE_PORT (CVMX_ADD_IO_SEG(0x00011F00000111B0ull))
#define CVMX_PEXP_SLI_PKT_IPTR (CVMX_ADD_IO_SEG(0x00011F0000011070ull))
#define CVMX_PEXP_SLI_PKT_OUTPUT_WMARK (CVMX_ADD_IO_SEG(0x00011F0000011180ull))
#define CVMX_PEXP_SLI_PKT_OUT_BMODE (CVMX_ADD_IO_SEG(0x00011F00000110D0ull))
#define CVMX_PEXP_SLI_PKT_OUT_BP_EN (CVMX_ADD_IO_SEG(0x00011F0000011240ull))
#define CVMX_PEXP_SLI_PKT_OUT_ENB (CVMX_ADD_IO_SEG(0x00011F0000011010ull))
#define CVMX_PEXP_SLI_PKT_PCIE_PORT (CVMX_ADD_IO_SEG(0x00011F00000110E0ull))
#define CVMX_PEXP_SLI_PKT_PORT_IN_RST (CVMX_ADD_IO_SEG(0x00011F00000111F0ull))
#define CVMX_PEXP_SLI_PKT_SLIST_ES (CVMX_ADD_IO_SEG(0x00011F0000011050ull))
#define CVMX_PEXP_SLI_PKT_SLIST_NS (CVMX_ADD_IO_SEG(0x00011F0000011040ull))
#define CVMX_PEXP_SLI_PKT_SLIST_ROR (CVMX_ADD_IO_SEG(0x00011F0000011030ull))
#define CVMX_PEXP_SLI_PKT_TIME_INT (CVMX_ADD_IO_SEG(0x00011F0000011140ull))
#define CVMX_PEXP_SLI_PKT_TIME_INT_ENB (CVMX_ADD_IO_SEG(0x00011F0000011160ull))
#define CVMX_PEXP_SLI_PORTX_PKIND(offset) (CVMX_ADD_IO_SEG(0x00011F0000010800ull) + ((offset) & 31) * 16)
#define CVMX_PEXP_SLI_S2M_PORTX_CTL(offset) (CVMX_ADD_IO_SEG(0x00011F0000013D80ull) + ((offset) & 3) * 16)
#define CVMX_PEXP_SLI_SCRATCH_1 (CVMX_ADD_IO_SEG(0x00011F00000103C0ull))
#define CVMX_PEXP_SLI_SCRATCH_2 (CVMX_ADD_IO_SEG(0x00011F00000103D0ull))
#define CVMX_PEXP_SLI_STATE1 (CVMX_ADD_IO_SEG(0x00011F0000010620ull))
#define CVMX_PEXP_SLI_STATE2 (CVMX_ADD_IO_SEG(0x00011F0000010630ull))
#define CVMX_PEXP_SLI_STATE3 (CVMX_ADD_IO_SEG(0x00011F0000010640ull))
#define CVMX_PEXP_SLI_TX_PIPE (CVMX_ADD_IO_SEG(0x00011F0000011230ull))
#define CVMX_PEXP_SLI_WINDOW_CTL (CVMX_ADD_IO_SEG(0x00011F00000102E0ull))
#endif

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,524 @@
/***********************license start***************
* Author: Cavium Networks
*
* Contact: support@caviumnetworks.com
* This file is part of the OCTEON SDK
*
* Copyright (c) 2003-2008 Cavium Networks
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
* published by the Free Software Foundation.
*
* This file is distributed in the hope that it will be useful, but
* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
* NONINFRINGEMENT. See the GNU General Public License for more
* details.
*
* You should have received a copy of the GNU General Public License
* along with this file; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
* or visit http://www.gnu.org/licenses/.
*
* This file may also be available under a different license from Cavium.
* Contact Cavium Networks for more information
***********************license end**************************************/
/*
* Interface to the hardware Packet Input Processing unit.
*
*/
#ifndef __CVMX_PIP_H__
#define __CVMX_PIP_H__
#include <asm/octeon/cvmx-wqe.h>
#include <asm/octeon/cvmx-fpa.h>
#include <asm/octeon/cvmx-pip-defs.h>
#define CVMX_PIP_NUM_INPUT_PORTS 40
#define CVMX_PIP_NUM_WATCHERS 4
/*
* Encodes the different error and exception codes
*/
typedef enum {
CVMX_PIP_L4_NO_ERR = 0ull,
/*
* 1 = TCP (UDP) packet not long enough to cover TCP (UDP)
* header
*/
CVMX_PIP_L4_MAL_ERR = 1ull,
/* 2 = TCP/UDP checksum failure */
CVMX_PIP_CHK_ERR = 2ull,
/*
* 3 = TCP/UDP length check (TCP/UDP length does not match IP
* length).
*/
CVMX_PIP_L4_LENGTH_ERR = 3ull,
/* 4 = illegal TCP/UDP port (either source or dest port is zero) */
CVMX_PIP_BAD_PRT_ERR = 4ull,
/* 8 = TCP flags = FIN only */
CVMX_PIP_TCP_FLG8_ERR = 8ull,
/* 9 = TCP flags = 0 */
CVMX_PIP_TCP_FLG9_ERR = 9ull,
/* 10 = TCP flags = FIN+RST+* */
CVMX_PIP_TCP_FLG10_ERR = 10ull,
/* 11 = TCP flags = SYN+URG+* */
CVMX_PIP_TCP_FLG11_ERR = 11ull,
/* 12 = TCP flags = SYN+RST+* */
CVMX_PIP_TCP_FLG12_ERR = 12ull,
/* 13 = TCP flags = SYN+FIN+* */
CVMX_PIP_TCP_FLG13_ERR = 13ull
} cvmx_pip_l4_err_t;
typedef enum {
CVMX_PIP_IP_NO_ERR = 0ull,
/* 1 = not IPv4 or IPv6 */
CVMX_PIP_NOT_IP = 1ull,
/* 2 = IPv4 header checksum violation */
CVMX_PIP_IPV4_HDR_CHK = 2ull,
/* 3 = malformed (packet not long enough to cover IP hdr) */
CVMX_PIP_IP_MAL_HDR = 3ull,
/* 4 = malformed (packet not long enough to cover len in IP hdr) */
CVMX_PIP_IP_MAL_PKT = 4ull,
/* 5 = TTL / hop count equal zero */
CVMX_PIP_TTL_HOP = 5ull,
/* 6 = IPv4 options / IPv6 early extension headers */
CVMX_PIP_OPTS = 6ull
} cvmx_pip_ip_exc_t;
/**
* NOTES
* late collision (data received before collision)
* late collisions cannot be detected by the receiver
* they would appear as JAM bits which would appear as bad FCS
* or carrier extend error which is CVMX_PIP_EXTEND_ERR
*/
typedef enum {
/* No error */
CVMX_PIP_RX_NO_ERR = 0ull,
/* RGM+SPI 1 = partially received packet (buffering/bandwidth
* not adequate) */
CVMX_PIP_PARTIAL_ERR = 1ull,
/* RGM+SPI 2 = receive packet too large and truncated */
CVMX_PIP_JABBER_ERR = 2ull,
/*
* RGM 3 = max frame error (pkt len > max frame len) (with FCS
* error)
*/
CVMX_PIP_OVER_FCS_ERR = 3ull,
/* RGM+SPI 4 = max frame error (pkt len > max frame len) */
CVMX_PIP_OVER_ERR = 4ull,
/*
* RGM 5 = nibble error (data not byte multiple - 100M and 10M
* only)
*/
CVMX_PIP_ALIGN_ERR = 5ull,
/*
* RGM 6 = min frame error (pkt len < min frame len) (with FCS
* error)
*/
CVMX_PIP_UNDER_FCS_ERR = 6ull,
/* RGM 7 = FCS error */
CVMX_PIP_GMX_FCS_ERR = 7ull,
/* RGM+SPI 8 = min frame error (pkt len < min frame len) */
CVMX_PIP_UNDER_ERR = 8ull,
/* RGM 9 = Frame carrier extend error */
CVMX_PIP_EXTEND_ERR = 9ull,
/*
* RGM 10 = length mismatch (len did not match len in L2
* length/type)
*/
CVMX_PIP_LENGTH_ERR = 10ull,
/* RGM 11 = Frame error (some or all data bits marked err) */
CVMX_PIP_DAT_ERR = 11ull,
/* SPI 11 = DIP4 error */
CVMX_PIP_DIP_ERR = 11ull,
/*
* RGM 12 = packet was not large enough to pass the skipper -
* no inspection could occur.
*/
CVMX_PIP_SKIP_ERR = 12ull,
/*
* RGM 13 = studder error (data not repeated - 100M and 10M
* only)
*/
CVMX_PIP_NIBBLE_ERR = 13ull,
/* RGM+SPI 16 = FCS error */
CVMX_PIP_PIP_FCS = 16L,
/*
* RGM+SPI+PCI 17 = packet was not large enough to pass the
* skipper - no inspection could occur.
*/
CVMX_PIP_PIP_SKIP_ERR = 17L,
/*
* RGM+SPI+PCI 18 = malformed l2 (packet not long enough to
* cover L2 hdr).
*/
CVMX_PIP_PIP_L2_MAL_HDR = 18L
/*
* NOTES: xx = late collision (data received before collision)
* late collisions cannot be detected by the receiver
* they would appear as JAM bits which would appear as
* bad FCS or carrier extend error which is
* CVMX_PIP_EXTEND_ERR
*/
} cvmx_pip_rcv_err_t;
/**
* This defines the err_code field errors in the work Q entry
*/
typedef union {
cvmx_pip_l4_err_t l4_err;
cvmx_pip_ip_exc_t ip_exc;
cvmx_pip_rcv_err_t rcv_err;
} cvmx_pip_err_t;
/**
* Status statistics for a port
*/
typedef struct {
/* Inbound octets marked to be dropped by the IPD */
uint32_t dropped_octets;
/* Inbound packets marked to be dropped by the IPD */
uint32_t dropped_packets;
/* RAW PCI Packets received by PIP per port */
uint32_t pci_raw_packets;
/* Number of octets processed by PIP */
uint32_t octets;
/* Number of packets processed by PIP */
uint32_t packets;
/*
* Number of identified L2 multicast packets. Does not
* include broadcast packets. Only includes packets whose
* parse mode is SKIP_TO_L2
*/
uint32_t multicast_packets;
/*
* Number of identified L2 broadcast packets. Does not
* include multicast packets. Only includes packets whose
* parse mode is SKIP_TO_L2
*/
uint32_t broadcast_packets;
/* Number of 64B packets */
uint32_t len_64_packets;
/* Number of 65-127B packets */
uint32_t len_65_127_packets;
/* Number of 128-255B packets */
uint32_t len_128_255_packets;
/* Number of 256-511B packets */
uint32_t len_256_511_packets;
/* Number of 512-1023B packets */
uint32_t len_512_1023_packets;
/* Number of 1024-1518B packets */
uint32_t len_1024_1518_packets;
/* Number of 1519-max packets */
uint32_t len_1519_max_packets;
/* Number of packets with FCS or Align opcode errors */
uint32_t fcs_align_err_packets;
/* Number of packets with length < min */
uint32_t runt_packets;
/* Number of packets with length < min and FCS error */
uint32_t runt_crc_packets;
/* Number of packets with length > max */
uint32_t oversize_packets;
/* Number of packets with length > max and FCS error */
uint32_t oversize_crc_packets;
/* Number of packets without GMX/SPX/PCI errors received by PIP */
uint32_t inb_packets;
/*
* Total number of octets from all packets received by PIP,
* including CRC
*/
uint64_t inb_octets;
/* Number of packets with GMX/SPX/PCI errors received by PIP */
uint16_t inb_errors;
} cvmx_pip_port_status_t;
/**
* Definition of the PIP custom header that can be prepended
* to a packet by external hardware.
*/
typedef union {
uint64_t u64;
struct {
/*
* Documented as R - Set if the Packet is RAWFULL. If
* set, this header must be the full 8 bytes.
*/
uint64_t rawfull:1;
/* Must be zero */
uint64_t reserved0:5;
/* PIP parse mode for this packet */
uint64_t parse_mode:2;
/* Must be zero */
uint64_t reserved1:1;
/*
* Skip amount, including this header, to the
* beginning of the packet
*/
uint64_t skip_len:7;
/* Must be zero */
uint64_t reserved2:6;
/* POW input queue for this packet */
uint64_t qos:3;
/* POW input group for this packet */
uint64_t grp:4;
/*
* Flag to store this packet in the work queue entry,
* if possible
*/
uint64_t rs:1;
/* POW input tag type */
uint64_t tag_type:2;
/* POW input tag */
uint64_t tag:32;
} s;
} cvmx_pip_pkt_inst_hdr_t;
/* CSR typedefs have been moved to cvmx-csr-*.h */
/**
* Configure an ethernet input port
*
* @port_num: Port number to configure
* @port_cfg: Port hardware configuration
* @port_tag_cfg:
* Port POW tagging configuration
*/
static inline void cvmx_pip_config_port(uint64_t port_num,
union cvmx_pip_prt_cfgx port_cfg,
union cvmx_pip_prt_tagx port_tag_cfg)
{
cvmx_write_csr(CVMX_PIP_PRT_CFGX(port_num), port_cfg.u64);
cvmx_write_csr(CVMX_PIP_PRT_TAGX(port_num), port_tag_cfg.u64);
}
#if 0
/**
* @deprecated This function is a thin wrapper around the Pass1 version
* of the CVMX_PIP_QOS_WATCHX CSR; Pass2 has added a field for
* setting the group that is incompatible with this function,
* the preferred upgrade path is to use the CSR directly.
*
* Configure the global QoS packet watchers. Each watcher is
* capable of matching a field in a packet to determine the
* QoS queue for scheduling.
*
* @watcher: Watcher number to configure (0 - 3).
* @match_type: Watcher match type
* @match_value:
* Value the watcher will match against
* @qos: QoS queue for packets matching this watcher
*/
static inline void cvmx_pip_config_watcher(uint64_t watcher,
cvmx_pip_qos_watch_types match_type,
uint64_t match_value, uint64_t qos)
{
cvmx_pip_port_watcher_cfg_t watcher_config;
watcher_config.u64 = 0;
watcher_config.s.match_type = match_type;
watcher_config.s.match_value = match_value;
watcher_config.s.qos = qos;
cvmx_write_csr(CVMX_PIP_QOS_WATCHX(watcher), watcher_config.u64);
}
#endif
/**
* Configure the VLAN priority to QoS queue mapping.
*
* @vlan_priority:
* VLAN priority (0-7)
* @qos: QoS queue for packets matching this watcher
*/
static inline void cvmx_pip_config_vlan_qos(uint64_t vlan_priority,
uint64_t qos)
{
union cvmx_pip_qos_vlanx pip_qos_vlanx;
pip_qos_vlanx.u64 = 0;
pip_qos_vlanx.s.qos = qos;
cvmx_write_csr(CVMX_PIP_QOS_VLANX(vlan_priority), pip_qos_vlanx.u64);
}
/**
* Configure the Diffserv to QoS queue mapping.
*
* @diffserv: Diffserv field value (0-63)
* @qos: QoS queue for packets matching this watcher
*/
static inline void cvmx_pip_config_diffserv_qos(uint64_t diffserv, uint64_t qos)
{
union cvmx_pip_qos_diffx pip_qos_diffx;
pip_qos_diffx.u64 = 0;
pip_qos_diffx.s.qos = qos;
cvmx_write_csr(CVMX_PIP_QOS_DIFFX(diffserv), pip_qos_diffx.u64);
}
/**
* Get the status counters for a port.
*
* @port_num: Port number to get statistics for.
* @clear: Set to 1 to clear the counters after they are read
* @status: Where to put the results.
*/
static inline void cvmx_pip_get_port_status(uint64_t port_num, uint64_t clear,
cvmx_pip_port_status_t *status)
{
union cvmx_pip_stat_ctl pip_stat_ctl;
union cvmx_pip_stat0_prtx stat0;
union cvmx_pip_stat1_prtx stat1;
union cvmx_pip_stat2_prtx stat2;
union cvmx_pip_stat3_prtx stat3;
union cvmx_pip_stat4_prtx stat4;
union cvmx_pip_stat5_prtx stat5;
union cvmx_pip_stat6_prtx stat6;
union cvmx_pip_stat7_prtx stat7;
union cvmx_pip_stat8_prtx stat8;
union cvmx_pip_stat9_prtx stat9;
union cvmx_pip_stat_inb_pktsx pip_stat_inb_pktsx;
union cvmx_pip_stat_inb_octsx pip_stat_inb_octsx;
union cvmx_pip_stat_inb_errsx pip_stat_inb_errsx;
pip_stat_ctl.u64 = 0;
pip_stat_ctl.s.rdclr = clear;
cvmx_write_csr(CVMX_PIP_STAT_CTL, pip_stat_ctl.u64);
stat0.u64 = cvmx_read_csr(CVMX_PIP_STAT0_PRTX(port_num));
stat1.u64 = cvmx_read_csr(CVMX_PIP_STAT1_PRTX(port_num));
stat2.u64 = cvmx_read_csr(CVMX_PIP_STAT2_PRTX(port_num));
stat3.u64 = cvmx_read_csr(CVMX_PIP_STAT3_PRTX(port_num));
stat4.u64 = cvmx_read_csr(CVMX_PIP_STAT4_PRTX(port_num));
stat5.u64 = cvmx_read_csr(CVMX_PIP_STAT5_PRTX(port_num));
stat6.u64 = cvmx_read_csr(CVMX_PIP_STAT6_PRTX(port_num));
stat7.u64 = cvmx_read_csr(CVMX_PIP_STAT7_PRTX(port_num));
stat8.u64 = cvmx_read_csr(CVMX_PIP_STAT8_PRTX(port_num));
stat9.u64 = cvmx_read_csr(CVMX_PIP_STAT9_PRTX(port_num));
pip_stat_inb_pktsx.u64 =
cvmx_read_csr(CVMX_PIP_STAT_INB_PKTSX(port_num));
pip_stat_inb_octsx.u64 =
cvmx_read_csr(CVMX_PIP_STAT_INB_OCTSX(port_num));
pip_stat_inb_errsx.u64 =
cvmx_read_csr(CVMX_PIP_STAT_INB_ERRSX(port_num));
status->dropped_octets = stat0.s.drp_octs;
status->dropped_packets = stat0.s.drp_pkts;
status->octets = stat1.s.octs;
status->pci_raw_packets = stat2.s.raw;
status->packets = stat2.s.pkts;
status->multicast_packets = stat3.s.mcst;
status->broadcast_packets = stat3.s.bcst;
status->len_64_packets = stat4.s.h64;
status->len_65_127_packets = stat4.s.h65to127;
status->len_128_255_packets = stat5.s.h128to255;
status->len_256_511_packets = stat5.s.h256to511;
status->len_512_1023_packets = stat6.s.h512to1023;
status->len_1024_1518_packets = stat6.s.h1024to1518;
status->len_1519_max_packets = stat7.s.h1519;
status->fcs_align_err_packets = stat7.s.fcs;
status->runt_packets = stat8.s.undersz;
status->runt_crc_packets = stat8.s.frag;
status->oversize_packets = stat9.s.oversz;
status->oversize_crc_packets = stat9.s.jabber;
status->inb_packets = pip_stat_inb_pktsx.s.pkts;
status->inb_octets = pip_stat_inb_octsx.s.octs;
status->inb_errors = pip_stat_inb_errsx.s.errs;
if (cvmx_octeon_is_pass1()) {
/*
* Kludge to fix Octeon Pass 1 errata - Drop counts
* don't work.
*/
if (status->inb_packets > status->packets)
status->dropped_packets =
status->inb_packets - status->packets;
else
status->dropped_packets = 0;
if (status->inb_octets - status->inb_packets * 4 >
status->octets)
status->dropped_octets =
status->inb_octets - status->inb_packets * 4 -
status->octets;
else
status->dropped_octets = 0;
}
}
/**
* Configure the hardware CRC engine
*
* @interface: Interface to configure (0 or 1)
* @invert_result:
* Invert the result of the CRC
* @reflect: Reflect
* @initialization_vector:
* CRC initialization vector
*/
static inline void cvmx_pip_config_crc(uint64_t interface,
uint64_t invert_result, uint64_t reflect,
uint32_t initialization_vector)
{
if (OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)) {
union cvmx_pip_crc_ctlx config;
union cvmx_pip_crc_ivx pip_crc_ivx;
config.u64 = 0;
config.s.invres = invert_result;
config.s.reflect = reflect;
cvmx_write_csr(CVMX_PIP_CRC_CTLX(interface), config.u64);
pip_crc_ivx.u64 = 0;
pip_crc_ivx.s.iv = initialization_vector;
cvmx_write_csr(CVMX_PIP_CRC_IVX(interface), pip_crc_ivx.u64);
}
}
/**
* Clear all bits in a tag mask. This should be called on
* startup before any calls to cvmx_pip_tag_mask_set. Each bit
* set in the final mask represent a byte used in the packet for
* tag generation.
*
* @mask_index: Which tag mask to clear (0..3)
*/
static inline void cvmx_pip_tag_mask_clear(uint64_t mask_index)
{
uint64_t index;
union cvmx_pip_tag_incx pip_tag_incx;
pip_tag_incx.u64 = 0;
pip_tag_incx.s.en = 0;
for (index = mask_index * 16; index < (mask_index + 1) * 16; index++)
cvmx_write_csr(CVMX_PIP_TAG_INCX(index), pip_tag_incx.u64);
}
/**
* Sets a range of bits in the tag mask. The tag mask is used
* when the cvmx_pip_port_tag_cfg_t tag_mode is non zero.
* There are four separate masks that can be configured.
*
* @mask_index: Which tag mask to modify (0..3)
* @offset: Offset into the bitmask to set bits at. Use the GCC macro
* offsetof() to determine the offsets into packet headers.
* For example, offsetof(ethhdr, protocol) returns the offset
* of the ethernet protocol field. The bitmask selects which
* bytes to include the the tag, with bit offset X selecting
* byte at offset X from the beginning of the packet data.
* @len: Number of bytes to include. Usually this is the sizeof()
* the field.
*/
static inline void cvmx_pip_tag_mask_set(uint64_t mask_index, uint64_t offset,
uint64_t len)
{
while (len--) {
union cvmx_pip_tag_incx pip_tag_incx;
uint64_t index = mask_index * 16 + offset / 8;
pip_tag_incx.u64 = cvmx_read_csr(CVMX_PIP_TAG_INCX(index));
pip_tag_incx.s.en |= 0x80 >> (offset & 0x7);
cvmx_write_csr(CVMX_PIP_TAG_INCX(index), pip_tag_incx.u64);
offset++;
}
}
#endif /* __CVMX_PIP_H__ */

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,610 @@
/***********************license start***************
* Author: Cavium Networks
*
* Contact: support@caviumnetworks.com
* This file is part of the OCTEON SDK
*
* Copyright (c) 2003-2008 Cavium Networks
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
* published by the Free Software Foundation.
*
* This file is distributed in the hope that it will be useful, but
* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
* NONINFRINGEMENT. See the GNU General Public License for more
* details.
*
* You should have received a copy of the GNU General Public License
* along with this file; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
* or visit http://www.gnu.org/licenses/.
*
* This file may also be available under a different license from Cavium.
* Contact Cavium Networks for more information
***********************license end**************************************/
/**
*
* Interface to the hardware Packet Output unit.
*
* Starting with SDK 1.7.0, the PKO output functions now support
* two types of locking. CVMX_PKO_LOCK_ATOMIC_TAG continues to
* function similarly to previous SDKs by using POW atomic tags
* to preserve ordering and exclusivity. As a new option, you
* can now pass CVMX_PKO_LOCK_CMD_QUEUE which uses a ll/sc
* memory based locking instead. This locking has the advantage
* of not affecting the tag state but doesn't preserve packet
* ordering. CVMX_PKO_LOCK_CMD_QUEUE is appropriate in most
* generic code while CVMX_PKO_LOCK_CMD_QUEUE should be used
* with hand tuned fast path code.
*
* Some of other SDK differences visible to the command command
* queuing:
* - PKO indexes are no longer stored in the FAU. A large
* percentage of the FAU register block used to be tied up
* maintaining PKO queue pointers. These are now stored in a
* global named block.
* - The PKO <b>use_locking</b> parameter can now have a global
* effect. Since all application use the same named block,
* queue locking correctly applies across all operating
* systems when using CVMX_PKO_LOCK_CMD_QUEUE.
* - PKO 3 word commands are now supported. Use
* cvmx_pko_send_packet_finish3().
*
*/
#ifndef __CVMX_PKO_H__
#define __CVMX_PKO_H__
#include <asm/octeon/cvmx-fpa.h>
#include <asm/octeon/cvmx-pow.h>
#include <asm/octeon/cvmx-cmd-queue.h>
#include <asm/octeon/cvmx-pko-defs.h>
/* Adjust the command buffer size by 1 word so that in the case of using only
* two word PKO commands no command words stradle buffers. The useful values
* for this are 0 and 1. */
#define CVMX_PKO_COMMAND_BUFFER_SIZE_ADJUST (1)
#define CVMX_PKO_MAX_OUTPUT_QUEUES_STATIC 256
#define CVMX_PKO_MAX_OUTPUT_QUEUES ((OCTEON_IS_MODEL(OCTEON_CN31XX) || \
OCTEON_IS_MODEL(OCTEON_CN3010) || OCTEON_IS_MODEL(OCTEON_CN3005) || \
OCTEON_IS_MODEL(OCTEON_CN50XX)) ? 32 : \
(OCTEON_IS_MODEL(OCTEON_CN58XX) || \
OCTEON_IS_MODEL(OCTEON_CN56XX)) ? 256 : 128)
#define CVMX_PKO_NUM_OUTPUT_PORTS 40
/* use this for queues that are not used */
#define CVMX_PKO_MEM_QUEUE_PTRS_ILLEGAL_PID 63
#define CVMX_PKO_QUEUE_STATIC_PRIORITY 9
#define CVMX_PKO_ILLEGAL_QUEUE 0xFFFF
#define CVMX_PKO_MAX_QUEUE_DEPTH 0
typedef enum {
CVMX_PKO_SUCCESS,
CVMX_PKO_INVALID_PORT,
CVMX_PKO_INVALID_QUEUE,
CVMX_PKO_INVALID_PRIORITY,
CVMX_PKO_NO_MEMORY,
CVMX_PKO_PORT_ALREADY_SETUP,
CVMX_PKO_CMD_QUEUE_INIT_ERROR
} cvmx_pko_status_t;
/**
* This enumeration represents the differnet locking modes supported by PKO.
*/
typedef enum {
/*
* PKO doesn't do any locking. It is the responsibility of the
* application to make sure that no other core is accessing
* the same queue at the same time
*/
CVMX_PKO_LOCK_NONE = 0,
/*
* PKO performs an atomic tagswitch to insure exclusive access
* to the output queue. This will maintain packet ordering on
* output.
*/
CVMX_PKO_LOCK_ATOMIC_TAG = 1,
/*
* PKO uses the common command queue locks to insure exclusive
* access to the output queue. This is a memory based
* ll/sc. This is the most portable locking mechanism.
*/
CVMX_PKO_LOCK_CMD_QUEUE = 2,
} cvmx_pko_lock_t;
typedef struct {
uint32_t packets;
uint64_t octets;
uint64_t doorbell;
} cvmx_pko_port_status_t;
/**
* This structure defines the address to use on a packet enqueue
*/
typedef union {
uint64_t u64;
struct {
/* Must CVMX_IO_SEG */
uint64_t mem_space:2;
/* Must be zero */
uint64_t reserved:13;
/* Must be one */
uint64_t is_io:1;
/* The ID of the device on the non-coherent bus */
uint64_t did:8;
/* Must be zero */
uint64_t reserved2:4;
/* Must be zero */
uint64_t reserved3:18;
/*
* The hardware likes to have the output port in
* addition to the output queue,
*/
uint64_t port:6;
/*
* The output queue to send the packet to (0-127 are
* legal)
*/
uint64_t queue:9;
/* Must be zero */
uint64_t reserved4:3;
} s;
} cvmx_pko_doorbell_address_t;
/**
* Structure of the first packet output command word.
*/
typedef union {
uint64_t u64;
struct {
/*
* The size of the reg1 operation - could be 8, 16,
* 32, or 64 bits.
*/
uint64_t size1:2;
/*
* The size of the reg0 operation - could be 8, 16,
* 32, or 64 bits.
*/
uint64_t size0:2;
/*
* If set, subtract 1, if clear, subtract packet
* size.
*/
uint64_t subone1:1;
/*
* The register, subtract will be done if reg1 is
* non-zero.
*/
uint64_t reg1:11;
/* If set, subtract 1, if clear, subtract packet size */
uint64_t subone0:1;
/* The register, subtract will be done if reg0 is non-zero */
uint64_t reg0:11;
/*
* When set, interpret segment pointer and segment
* bytes in little endian order.
*/
uint64_t le:1;
/*
* When set, packet data not allocated in L2 cache by
* PKO.
*/
uint64_t n2:1;
/*
* If set and rsp is set, word3 contains a pointer to
* a work queue entry.
*/
uint64_t wqp:1;
/* If set, the hardware will send a response when done */
uint64_t rsp:1;
/*
* If set, the supplied pkt_ptr is really a pointer to
* a list of pkt_ptr's.
*/
uint64_t gather:1;
/*
* If ipoffp1 is non zero, (ipoffp1-1) is the number
* of bytes to IP header, and the hardware will
* calculate and insert the UDP/TCP checksum.
*/
uint64_t ipoffp1:7;
/*
* If set, ignore the I bit (force to zero) from all
* pointer structures.
*/
uint64_t ignore_i:1;
/*
* If clear, the hardware will attempt to free the
* buffers containing the packet.
*/
uint64_t dontfree:1;
/*
* The total number of segs in the packet, if gather
* set, also gather list length.
*/
uint64_t segs:6;
/* Including L2, but no trailing CRC */
uint64_t total_bytes:16;
} s;
} cvmx_pko_command_word0_t;
/* CSR typedefs have been moved to cvmx-csr-*.h */
/**
* Definition of internal state for Packet output processing
*/
typedef struct {
/* ptr to start of buffer, offset kept in FAU reg */
uint64_t *start_ptr;
} cvmx_pko_state_elem_t;
/**
* Call before any other calls to initialize the packet
* output system.
*/
extern void cvmx_pko_initialize_global(void);
extern int cvmx_pko_initialize_local(void);
/**
* Enables the packet output hardware. It must already be
* configured.
*/
extern void cvmx_pko_enable(void);
/**
* Disables the packet output. Does not affect any configuration.
*/
extern void cvmx_pko_disable(void);
/**
* Shutdown and free resources required by packet output.
*/
extern void cvmx_pko_shutdown(void);
/**
* Configure a output port and the associated queues for use.
*
* @port: Port to configure.
* @base_queue: First queue number to associate with this port.
* @num_queues: Number of queues t oassociate with this port
* @priority: Array of priority levels for each queue. Values are
* allowed to be 1-8. A value of 8 get 8 times the traffic
* of a value of 1. There must be num_queues elements in the
* array.
*/
extern cvmx_pko_status_t cvmx_pko_config_port(uint64_t port,
uint64_t base_queue,
uint64_t num_queues,
const uint64_t priority[]);
/**
* Ring the packet output doorbell. This tells the packet
* output hardware that "len" command words have been added
* to its pending list. This command includes the required
* CVMX_SYNCWS before the doorbell ring.
*
* @port: Port the packet is for
* @queue: Queue the packet is for
* @len: Length of the command in 64 bit words
*/
static inline void cvmx_pko_doorbell(uint64_t port, uint64_t queue,
uint64_t len)
{
cvmx_pko_doorbell_address_t ptr;
ptr.u64 = 0;
ptr.s.mem_space = CVMX_IO_SEG;
ptr.s.did = CVMX_OCT_DID_PKT_SEND;
ptr.s.is_io = 1;
ptr.s.port = port;
ptr.s.queue = queue;
/*
* Need to make sure output queue data is in DRAM before
* doorbell write.
*/
CVMX_SYNCWS;
cvmx_write_io(ptr.u64, len);
}
/**
* Prepare to send a packet. This may initiate a tag switch to
* get exclusive access to the output queue structure, and
* performs other prep work for the packet send operation.
*
* cvmx_pko_send_packet_finish() MUST be called after this function is called,
* and must be called with the same port/queue/use_locking arguments.
*
* The use_locking parameter allows the caller to use three
* possible locking modes.
* - CVMX_PKO_LOCK_NONE
* - PKO doesn't do any locking. It is the responsibility
* of the application to make sure that no other core
* is accessing the same queue at the same time.
* - CVMX_PKO_LOCK_ATOMIC_TAG
* - PKO performs an atomic tagswitch to insure exclusive
* access to the output queue. This will maintain
* packet ordering on output.
* - CVMX_PKO_LOCK_CMD_QUEUE
* - PKO uses the common command queue locks to insure
* exclusive access to the output queue. This is a
* memory based ll/sc. This is the most portable
* locking mechanism.
*
* NOTE: If atomic locking is used, the POW entry CANNOT be
* descheduled, as it does not contain a valid WQE pointer.
*
* @port: Port to send it on
* @queue: Queue to use
* @use_locking: CVMX_PKO_LOCK_NONE, CVMX_PKO_LOCK_ATOMIC_TAG, or
* CVMX_PKO_LOCK_CMD_QUEUE
*/
static inline void cvmx_pko_send_packet_prepare(uint64_t port, uint64_t queue,
cvmx_pko_lock_t use_locking)
{
if (use_locking == CVMX_PKO_LOCK_ATOMIC_TAG) {
/*
* Must do a full switch here to handle all cases. We
* use a fake WQE pointer, as the POW does not access
* this memory. The WQE pointer and group are only
* used if this work is descheduled, which is not
* supported by the
* cvmx_pko_send_packet_prepare/cvmx_pko_send_packet_finish
* combination. Note that this is a special case in
* which these fake values can be used - this is not a
* general technique.
*/
uint32_t tag =
CVMX_TAG_SW_BITS_INTERNAL << CVMX_TAG_SW_SHIFT |
CVMX_TAG_SUBGROUP_PKO << CVMX_TAG_SUBGROUP_SHIFT |
(CVMX_TAG_SUBGROUP_MASK & queue);
cvmx_pow_tag_sw_full((cvmx_wqe_t *) cvmx_phys_to_ptr(0x80), tag,
CVMX_POW_TAG_TYPE_ATOMIC, 0);
}
}
/**
* Complete packet output. cvmx_pko_send_packet_prepare() must be
* called exactly once before this, and the same parameters must be
* passed to both cvmx_pko_send_packet_prepare() and
* cvmx_pko_send_packet_finish().
*
* @port: Port to send it on
* @queue: Queue to use
* @pko_command:
* PKO HW command word
* @packet: Packet to send
* @use_locking: CVMX_PKO_LOCK_NONE, CVMX_PKO_LOCK_ATOMIC_TAG, or
* CVMX_PKO_LOCK_CMD_QUEUE
*
* Returns returns CVMX_PKO_SUCCESS on success, or error code on
* failure of output
*/
static inline cvmx_pko_status_t cvmx_pko_send_packet_finish(
uint64_t port,
uint64_t queue,
cvmx_pko_command_word0_t pko_command,
union cvmx_buf_ptr packet,
cvmx_pko_lock_t use_locking)
{
cvmx_cmd_queue_result_t result;
if (use_locking == CVMX_PKO_LOCK_ATOMIC_TAG)
cvmx_pow_tag_sw_wait();
result = cvmx_cmd_queue_write2(CVMX_CMD_QUEUE_PKO(queue),
(use_locking == CVMX_PKO_LOCK_CMD_QUEUE),
pko_command.u64, packet.u64);
if (likely(result == CVMX_CMD_QUEUE_SUCCESS)) {
cvmx_pko_doorbell(port, queue, 2);
return CVMX_PKO_SUCCESS;
} else if ((result == CVMX_CMD_QUEUE_NO_MEMORY)
|| (result == CVMX_CMD_QUEUE_FULL)) {
return CVMX_PKO_NO_MEMORY;
} else {
return CVMX_PKO_INVALID_QUEUE;
}
}
/**
* Complete packet output. cvmx_pko_send_packet_prepare() must be
* called exactly once before this, and the same parameters must be
* passed to both cvmx_pko_send_packet_prepare() and
* cvmx_pko_send_packet_finish().
*
* @port: Port to send it on
* @queue: Queue to use
* @pko_command:
* PKO HW command word
* @packet: Packet to send
* @addr: Plysical address of a work queue entry or physical address
* to zero on complete.
* @use_locking: CVMX_PKO_LOCK_NONE, CVMX_PKO_LOCK_ATOMIC_TAG, or
* CVMX_PKO_LOCK_CMD_QUEUE
*
* Returns returns CVMX_PKO_SUCCESS on success, or error code on
* failure of output
*/
static inline cvmx_pko_status_t cvmx_pko_send_packet_finish3(
uint64_t port,
uint64_t queue,
cvmx_pko_command_word0_t pko_command,
union cvmx_buf_ptr packet,
uint64_t addr,
cvmx_pko_lock_t use_locking)
{
cvmx_cmd_queue_result_t result;
if (use_locking == CVMX_PKO_LOCK_ATOMIC_TAG)
cvmx_pow_tag_sw_wait();
result = cvmx_cmd_queue_write3(CVMX_CMD_QUEUE_PKO(queue),
(use_locking == CVMX_PKO_LOCK_CMD_QUEUE),
pko_command.u64, packet.u64, addr);
if (likely(result == CVMX_CMD_QUEUE_SUCCESS)) {
cvmx_pko_doorbell(port, queue, 3);
return CVMX_PKO_SUCCESS;
} else if ((result == CVMX_CMD_QUEUE_NO_MEMORY)
|| (result == CVMX_CMD_QUEUE_FULL)) {
return CVMX_PKO_NO_MEMORY;
} else {
return CVMX_PKO_INVALID_QUEUE;
}
}
/**
* Return the pko output queue associated with a port and a specific core.
* In normal mode (PKO lockless operation is disabled), the value returned
* is the base queue.
*
* @port: Port number
* @core: Core to get queue for
*
* Returns Core-specific output queue
*/
static inline int cvmx_pko_get_base_queue_per_core(int port, int core)
{
#ifndef CVMX_HELPER_PKO_MAX_PORTS_INTERFACE0
#define CVMX_HELPER_PKO_MAX_PORTS_INTERFACE0 16
#endif
#ifndef CVMX_HELPER_PKO_MAX_PORTS_INTERFACE1
#define CVMX_HELPER_PKO_MAX_PORTS_INTERFACE1 16
#endif
if (port < CVMX_PKO_MAX_PORTS_INTERFACE0)
return port * CVMX_PKO_QUEUES_PER_PORT_INTERFACE0 + core;
else if (port >= 16 && port < 16 + CVMX_PKO_MAX_PORTS_INTERFACE1)
return CVMX_PKO_MAX_PORTS_INTERFACE0 *
CVMX_PKO_QUEUES_PER_PORT_INTERFACE0 + (port -
16) *
CVMX_PKO_QUEUES_PER_PORT_INTERFACE1 + core;
else if ((port >= 32) && (port < 36))
return CVMX_PKO_MAX_PORTS_INTERFACE0 *
CVMX_PKO_QUEUES_PER_PORT_INTERFACE0 +
CVMX_PKO_MAX_PORTS_INTERFACE1 *
CVMX_PKO_QUEUES_PER_PORT_INTERFACE1 + (port -
32) *
CVMX_PKO_QUEUES_PER_PORT_PCI;
else if ((port >= 36) && (port < 40))
return CVMX_PKO_MAX_PORTS_INTERFACE0 *
CVMX_PKO_QUEUES_PER_PORT_INTERFACE0 +
CVMX_PKO_MAX_PORTS_INTERFACE1 *
CVMX_PKO_QUEUES_PER_PORT_INTERFACE1 +
4 * CVMX_PKO_QUEUES_PER_PORT_PCI + (port -
36) *
CVMX_PKO_QUEUES_PER_PORT_LOOP;
else
/* Given the limit on the number of ports we can map to
* CVMX_MAX_OUTPUT_QUEUES_STATIC queues (currently 256,
* divided among all cores), the remaining unmapped ports
* are assigned an illegal queue number */
return CVMX_PKO_ILLEGAL_QUEUE;
}
/**
* For a given port number, return the base pko output queue
* for the port.
*
* @port: Port number
* Returns Base output queue
*/
static inline int cvmx_pko_get_base_queue(int port)
{
return cvmx_pko_get_base_queue_per_core(port, 0);
}
/**
* For a given port number, return the number of pko output queues.
*
* @port: Port number
* Returns Number of output queues
*/
static inline int cvmx_pko_get_num_queues(int port)
{
if (port < 16)
return CVMX_PKO_QUEUES_PER_PORT_INTERFACE0;
else if (port < 32)
return CVMX_PKO_QUEUES_PER_PORT_INTERFACE1;
else if (port < 36)
return CVMX_PKO_QUEUES_PER_PORT_PCI;
else if (port < 40)
return CVMX_PKO_QUEUES_PER_PORT_LOOP;
else
return 0;
}
/**
* Get the status counters for a port.
*
* @port_num: Port number to get statistics for.
* @clear: Set to 1 to clear the counters after they are read
* @status: Where to put the results.
*/
static inline void cvmx_pko_get_port_status(uint64_t port_num, uint64_t clear,
cvmx_pko_port_status_t *status)
{
union cvmx_pko_reg_read_idx pko_reg_read_idx;
union cvmx_pko_mem_count0 pko_mem_count0;
union cvmx_pko_mem_count1 pko_mem_count1;
pko_reg_read_idx.u64 = 0;
pko_reg_read_idx.s.index = port_num;
cvmx_write_csr(CVMX_PKO_REG_READ_IDX, pko_reg_read_idx.u64);
pko_mem_count0.u64 = cvmx_read_csr(CVMX_PKO_MEM_COUNT0);
status->packets = pko_mem_count0.s.count;
if (clear) {
pko_mem_count0.s.count = port_num;
cvmx_write_csr(CVMX_PKO_MEM_COUNT0, pko_mem_count0.u64);
}
pko_mem_count1.u64 = cvmx_read_csr(CVMX_PKO_MEM_COUNT1);
status->octets = pko_mem_count1.s.count;
if (clear) {
pko_mem_count1.s.count = port_num;
cvmx_write_csr(CVMX_PKO_MEM_COUNT1, pko_mem_count1.u64);
}
if (OCTEON_IS_MODEL(OCTEON_CN3XXX)) {
union cvmx_pko_mem_debug9 debug9;
pko_reg_read_idx.s.index = cvmx_pko_get_base_queue(port_num);
cvmx_write_csr(CVMX_PKO_REG_READ_IDX, pko_reg_read_idx.u64);
debug9.u64 = cvmx_read_csr(CVMX_PKO_MEM_DEBUG9);
status->doorbell = debug9.cn38xx.doorbell;
} else {
union cvmx_pko_mem_debug8 debug8;
pko_reg_read_idx.s.index = cvmx_pko_get_base_queue(port_num);
cvmx_write_csr(CVMX_PKO_REG_READ_IDX, pko_reg_read_idx.u64);
debug8.u64 = cvmx_read_csr(CVMX_PKO_MEM_DEBUG8);
status->doorbell = debug8.cn58xx.doorbell;
}
}
/**
* Rate limit a PKO port to a max packets/sec. This function is only
* supported on CN57XX, CN56XX, CN55XX, and CN54XX.
*
* @port: Port to rate limit
* @packets_s: Maximum packet/sec
* @burst: Maximum number of packets to burst in a row before rate
* limiting cuts in.
*
* Returns Zero on success, negative on failure
*/
extern int cvmx_pko_rate_limit_packets(int port, int packets_s, int burst);
/**
* Rate limit a PKO port to a max bits/sec. This function is only
* supported on CN57XX, CN56XX, CN55XX, and CN54XX.
*
* @port: Port to rate limit
* @bits_s: PKO rate limit in bits/sec
* @burst: Maximum number of bits to burst before rate
* limiting cuts in.
*
* Returns Zero on success, negative on failure
*/
extern int cvmx_pko_rate_limit_bits(int port, uint64_t bits_s, int burst);
#endif /* __CVMX_PKO_H__ */

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,224 @@
/***********************license start***************
* Author: Cavium Networks
*
* Contact: support@caviumnetworks.com
* This file is part of the OCTEON SDK
*
* Copyright (c) 2003-2012 Cavium Networks
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
* published by the Free Software Foundation.
*
* This file is distributed in the hope that it will be useful, but
* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
* NONINFRINGEMENT. See the GNU General Public License for more
* details.
*
* You should have received a copy of the GNU General Public License
* along with this file; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
* or visit http://www.gnu.org/licenses/.
*
* This file may also be available under a different license from Cavium.
* Contact Cavium Networks for more information
***********************license end**************************************/
#ifndef __CVMX_RNM_DEFS_H__
#define __CVMX_RNM_DEFS_H__
#define CVMX_RNM_BIST_STATUS (CVMX_ADD_IO_SEG(0x0001180040000008ull))
#define CVMX_RNM_CTL_STATUS (CVMX_ADD_IO_SEG(0x0001180040000000ull))
#define CVMX_RNM_EER_DBG (CVMX_ADD_IO_SEG(0x0001180040000018ull))
#define CVMX_RNM_EER_KEY (CVMX_ADD_IO_SEG(0x0001180040000010ull))
#define CVMX_RNM_SERIAL_NUM (CVMX_ADD_IO_SEG(0x0001180040000020ull))
union cvmx_rnm_bist_status {
uint64_t u64;
struct cvmx_rnm_bist_status_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_2_63:62;
uint64_t rrc:1;
uint64_t mem:1;
#else
uint64_t mem:1;
uint64_t rrc:1;
uint64_t reserved_2_63:62;
#endif
} s;
struct cvmx_rnm_bist_status_s cn30xx;
struct cvmx_rnm_bist_status_s cn31xx;
struct cvmx_rnm_bist_status_s cn38xx;
struct cvmx_rnm_bist_status_s cn38xxp2;
struct cvmx_rnm_bist_status_s cn50xx;
struct cvmx_rnm_bist_status_s cn52xx;
struct cvmx_rnm_bist_status_s cn52xxp1;
struct cvmx_rnm_bist_status_s cn56xx;
struct cvmx_rnm_bist_status_s cn56xxp1;
struct cvmx_rnm_bist_status_s cn58xx;
struct cvmx_rnm_bist_status_s cn58xxp1;
struct cvmx_rnm_bist_status_s cn61xx;
struct cvmx_rnm_bist_status_s cn63xx;
struct cvmx_rnm_bist_status_s cn63xxp1;
struct cvmx_rnm_bist_status_s cn66xx;
struct cvmx_rnm_bist_status_s cn68xx;
struct cvmx_rnm_bist_status_s cn68xxp1;
struct cvmx_rnm_bist_status_s cnf71xx;
};
union cvmx_rnm_ctl_status {
uint64_t u64;
struct cvmx_rnm_ctl_status_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_12_63:52;
uint64_t dis_mak:1;
uint64_t eer_lck:1;
uint64_t eer_val:1;
uint64_t ent_sel:4;
uint64_t exp_ent:1;
uint64_t rng_rst:1;
uint64_t rnm_rst:1;
uint64_t rng_en:1;
uint64_t ent_en:1;
#else
uint64_t ent_en:1;
uint64_t rng_en:1;
uint64_t rnm_rst:1;
uint64_t rng_rst:1;
uint64_t exp_ent:1;
uint64_t ent_sel:4;
uint64_t eer_val:1;
uint64_t eer_lck:1;
uint64_t dis_mak:1;
uint64_t reserved_12_63:52;
#endif
} s;
struct cvmx_rnm_ctl_status_cn30xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_4_63:60;
uint64_t rng_rst:1;
uint64_t rnm_rst:1;
uint64_t rng_en:1;
uint64_t ent_en:1;
#else
uint64_t ent_en:1;
uint64_t rng_en:1;
uint64_t rnm_rst:1;
uint64_t rng_rst:1;
uint64_t reserved_4_63:60;
#endif
} cn30xx;
struct cvmx_rnm_ctl_status_cn30xx cn31xx;
struct cvmx_rnm_ctl_status_cn30xx cn38xx;
struct cvmx_rnm_ctl_status_cn30xx cn38xxp2;
struct cvmx_rnm_ctl_status_cn50xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_9_63:55;
uint64_t ent_sel:4;
uint64_t exp_ent:1;
uint64_t rng_rst:1;
uint64_t rnm_rst:1;
uint64_t rng_en:1;
uint64_t ent_en:1;
#else
uint64_t ent_en:1;
uint64_t rng_en:1;
uint64_t rnm_rst:1;
uint64_t rng_rst:1;
uint64_t exp_ent:1;
uint64_t ent_sel:4;
uint64_t reserved_9_63:55;
#endif
} cn50xx;
struct cvmx_rnm_ctl_status_cn50xx cn52xx;
struct cvmx_rnm_ctl_status_cn50xx cn52xxp1;
struct cvmx_rnm_ctl_status_cn50xx cn56xx;
struct cvmx_rnm_ctl_status_cn50xx cn56xxp1;
struct cvmx_rnm_ctl_status_cn50xx cn58xx;
struct cvmx_rnm_ctl_status_cn50xx cn58xxp1;
struct cvmx_rnm_ctl_status_s cn61xx;
struct cvmx_rnm_ctl_status_cn63xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_11_63:53;
uint64_t eer_lck:1;
uint64_t eer_val:1;
uint64_t ent_sel:4;
uint64_t exp_ent:1;
uint64_t rng_rst:1;
uint64_t rnm_rst:1;
uint64_t rng_en:1;
uint64_t ent_en:1;
#else
uint64_t ent_en:1;
uint64_t rng_en:1;
uint64_t rnm_rst:1;
uint64_t rng_rst:1;
uint64_t exp_ent:1;
uint64_t ent_sel:4;
uint64_t eer_val:1;
uint64_t eer_lck:1;
uint64_t reserved_11_63:53;
#endif
} cn63xx;
struct cvmx_rnm_ctl_status_cn63xx cn63xxp1;
struct cvmx_rnm_ctl_status_s cn66xx;
struct cvmx_rnm_ctl_status_cn63xx cn68xx;
struct cvmx_rnm_ctl_status_cn63xx cn68xxp1;
struct cvmx_rnm_ctl_status_s cnf71xx;
};
union cvmx_rnm_eer_dbg {
uint64_t u64;
struct cvmx_rnm_eer_dbg_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t dat:64;
#else
uint64_t dat:64;
#endif
} s;
struct cvmx_rnm_eer_dbg_s cn61xx;
struct cvmx_rnm_eer_dbg_s cn63xx;
struct cvmx_rnm_eer_dbg_s cn63xxp1;
struct cvmx_rnm_eer_dbg_s cn66xx;
struct cvmx_rnm_eer_dbg_s cn68xx;
struct cvmx_rnm_eer_dbg_s cn68xxp1;
struct cvmx_rnm_eer_dbg_s cnf71xx;
};
union cvmx_rnm_eer_key {
uint64_t u64;
struct cvmx_rnm_eer_key_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t key:64;
#else
uint64_t key:64;
#endif
} s;
struct cvmx_rnm_eer_key_s cn61xx;
struct cvmx_rnm_eer_key_s cn63xx;
struct cvmx_rnm_eer_key_s cn63xxp1;
struct cvmx_rnm_eer_key_s cn66xx;
struct cvmx_rnm_eer_key_s cn68xx;
struct cvmx_rnm_eer_key_s cn68xxp1;
struct cvmx_rnm_eer_key_s cnf71xx;
};
union cvmx_rnm_serial_num {
uint64_t u64;
struct cvmx_rnm_serial_num_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t dat:64;
#else
uint64_t dat:64;
#endif
} s;
struct cvmx_rnm_serial_num_s cn61xx;
struct cvmx_rnm_serial_num_s cn63xx;
struct cvmx_rnm_serial_num_s cn66xx;
struct cvmx_rnm_serial_num_s cn68xx;
struct cvmx_rnm_serial_num_s cn68xxp1;
struct cvmx_rnm_serial_num_s cnf71xx;
};
#endif

View file

@ -0,0 +1,139 @@
/***********************license start***************
* Author: Cavium Networks
*
* Contact: support@caviumnetworks.com
* This file is part of the OCTEON SDK
*
* Copyright (c) 2003-2008 Cavium Networks
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
* published by the Free Software Foundation.
*
* This file is distributed in the hope that it will be useful, but
* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
* NONINFRINGEMENT. See the GNU General Public License for more
* details.
*
* You should have received a copy of the GNU General Public License
* along with this file; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
* or visit http://www.gnu.org/licenses/.
*
* This file may also be available under a different license from Cavium.
* Contact Cavium Networks for more information
***********************license end**************************************/
/**
*
* This file provides support for the processor local scratch memory.
* Scratch memory is byte addressable - all addresses are byte addresses.
*
*/
#ifndef __CVMX_SCRATCH_H__
#define __CVMX_SCRATCH_H__
/*
* Note: This define must be a long, not a long long in order to
* compile without warnings for both 32bit and 64bit.
*/
#define CVMX_SCRATCH_BASE (-32768l) /* 0xffffffffffff8000 */
/**
* Reads an 8 bit value from the processor local scratchpad memory.
*
* @address: byte address to read from
*
* Returns value read
*/
static inline uint8_t cvmx_scratch_read8(uint64_t address)
{
return *CASTPTR(volatile uint8_t, CVMX_SCRATCH_BASE + address);
}
/**
* Reads a 16 bit value from the processor local scratchpad memory.
*
* @address: byte address to read from
*
* Returns value read
*/
static inline uint16_t cvmx_scratch_read16(uint64_t address)
{
return *CASTPTR(volatile uint16_t, CVMX_SCRATCH_BASE + address);
}
/**
* Reads a 32 bit value from the processor local scratchpad memory.
*
* @address: byte address to read from
*
* Returns value read
*/
static inline uint32_t cvmx_scratch_read32(uint64_t address)
{
return *CASTPTR(volatile uint32_t, CVMX_SCRATCH_BASE + address);
}
/**
* Reads a 64 bit value from the processor local scratchpad memory.
*
* @address: byte address to read from
*
* Returns value read
*/
static inline uint64_t cvmx_scratch_read64(uint64_t address)
{
return *CASTPTR(volatile uint64_t, CVMX_SCRATCH_BASE + address);
}
/**
* Writes an 8 bit value to the processor local scratchpad memory.
*
* @address: byte address to write to
* @value: value to write
*/
static inline void cvmx_scratch_write8(uint64_t address, uint64_t value)
{
*CASTPTR(volatile uint8_t, CVMX_SCRATCH_BASE + address) =
(uint8_t) value;
}
/**
* Writes a 32 bit value to the processor local scratchpad memory.
*
* @address: byte address to write to
* @value: value to write
*/
static inline void cvmx_scratch_write16(uint64_t address, uint64_t value)
{
*CASTPTR(volatile uint16_t, CVMX_SCRATCH_BASE + address) =
(uint16_t) value;
}
/**
* Writes a 16 bit value to the processor local scratchpad memory.
*
* @address: byte address to write to
* @value: value to write
*/
static inline void cvmx_scratch_write32(uint64_t address, uint64_t value)
{
*CASTPTR(volatile uint32_t, CVMX_SCRATCH_BASE + address) =
(uint32_t) value;
}
/**
* Writes a 64 bit value to the processor local scratchpad memory.
*
* @address: byte address to write to
* @value: value to write
*/
static inline void cvmx_scratch_write64(uint64_t address, uint64_t value)
{
*CASTPTR(volatile uint64_t, CVMX_SCRATCH_BASE + address) = value;
}
#endif /* __CVMX_SCRATCH_H__ */

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,364 @@
/***********************license start***************
* Author: Cavium Networks
*
* Contact: support@caviumnetworks.com
* This file is part of the OCTEON SDK
*
* Copyright (c) 2003-2012 Cavium Networks
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
* published by the Free Software Foundation.
*
* This file is distributed in the hope that it will be useful, but
* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
* NONINFRINGEMENT. See the GNU General Public License for more
* details.
*
* You should have received a copy of the GNU General Public License
* along with this file; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
* or visit http://www.gnu.org/licenses/.
*
* This file may also be available under a different license from Cavium.
* Contact Cavium Networks for more information
***********************license end**************************************/
#ifndef __CVMX_SMIX_DEFS_H__
#define __CVMX_SMIX_DEFS_H__
static inline uint64_t CVMX_SMIX_CLK(unsigned long offset)
{
switch (cvmx_get_octeon_family()) {
case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
return CVMX_ADD_IO_SEG(0x0001180000001818ull) + (offset) * 256;
case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
return CVMX_ADD_IO_SEG(0x0001180000001818ull) + (offset) * 256;
case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
return CVMX_ADD_IO_SEG(0x0001180000003818ull) + (offset) * 128;
}
return CVMX_ADD_IO_SEG(0x0001180000001818ull) + (offset) * 256;
}
static inline uint64_t CVMX_SMIX_CMD(unsigned long offset)
{
switch (cvmx_get_octeon_family()) {
case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
return CVMX_ADD_IO_SEG(0x0001180000001800ull) + (offset) * 256;
case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
return CVMX_ADD_IO_SEG(0x0001180000001800ull) + (offset) * 256;
case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
return CVMX_ADD_IO_SEG(0x0001180000003800ull) + (offset) * 128;
}
return CVMX_ADD_IO_SEG(0x0001180000001800ull) + (offset) * 256;
}
static inline uint64_t CVMX_SMIX_EN(unsigned long offset)
{
switch (cvmx_get_octeon_family()) {
case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
return CVMX_ADD_IO_SEG(0x0001180000001820ull) + (offset) * 256;
case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
return CVMX_ADD_IO_SEG(0x0001180000001820ull) + (offset) * 256;
case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
return CVMX_ADD_IO_SEG(0x0001180000003820ull) + (offset) * 128;
}
return CVMX_ADD_IO_SEG(0x0001180000001820ull) + (offset) * 256;
}
static inline uint64_t CVMX_SMIX_RD_DAT(unsigned long offset)
{
switch (cvmx_get_octeon_family()) {
case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
return CVMX_ADD_IO_SEG(0x0001180000001810ull) + (offset) * 256;
case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
return CVMX_ADD_IO_SEG(0x0001180000001810ull) + (offset) * 256;
case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
return CVMX_ADD_IO_SEG(0x0001180000003810ull) + (offset) * 128;
}
return CVMX_ADD_IO_SEG(0x0001180000001810ull) + (offset) * 256;
}
static inline uint64_t CVMX_SMIX_WR_DAT(unsigned long offset)
{
switch (cvmx_get_octeon_family()) {
case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
return CVMX_ADD_IO_SEG(0x0001180000001808ull) + (offset) * 256;
case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
return CVMX_ADD_IO_SEG(0x0001180000001808ull) + (offset) * 256;
case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
return CVMX_ADD_IO_SEG(0x0001180000003808ull) + (offset) * 128;
}
return CVMX_ADD_IO_SEG(0x0001180000001808ull) + (offset) * 256;
}
union cvmx_smix_clk {
uint64_t u64;
struct cvmx_smix_clk_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_25_63:39;
uint64_t mode:1;
uint64_t reserved_21_23:3;
uint64_t sample_hi:5;
uint64_t sample_mode:1;
uint64_t reserved_14_14:1;
uint64_t clk_idle:1;
uint64_t preamble:1;
uint64_t sample:4;
uint64_t phase:8;
#else
uint64_t phase:8;
uint64_t sample:4;
uint64_t preamble:1;
uint64_t clk_idle:1;
uint64_t reserved_14_14:1;
uint64_t sample_mode:1;
uint64_t sample_hi:5;
uint64_t reserved_21_23:3;
uint64_t mode:1;
uint64_t reserved_25_63:39;
#endif
} s;
struct cvmx_smix_clk_cn30xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_21_63:43;
uint64_t sample_hi:5;
uint64_t sample_mode:1;
uint64_t reserved_14_14:1;
uint64_t clk_idle:1;
uint64_t preamble:1;
uint64_t sample:4;
uint64_t phase:8;
#else
uint64_t phase:8;
uint64_t sample:4;
uint64_t preamble:1;
uint64_t clk_idle:1;
uint64_t reserved_14_14:1;
uint64_t sample_mode:1;
uint64_t sample_hi:5;
uint64_t reserved_21_63:43;
#endif
} cn30xx;
struct cvmx_smix_clk_cn30xx cn31xx;
struct cvmx_smix_clk_cn30xx cn38xx;
struct cvmx_smix_clk_cn30xx cn38xxp2;
struct cvmx_smix_clk_s cn50xx;
struct cvmx_smix_clk_s cn52xx;
struct cvmx_smix_clk_s cn52xxp1;
struct cvmx_smix_clk_s cn56xx;
struct cvmx_smix_clk_s cn56xxp1;
struct cvmx_smix_clk_cn30xx cn58xx;
struct cvmx_smix_clk_cn30xx cn58xxp1;
struct cvmx_smix_clk_s cn61xx;
struct cvmx_smix_clk_s cn63xx;
struct cvmx_smix_clk_s cn63xxp1;
struct cvmx_smix_clk_s cn66xx;
struct cvmx_smix_clk_s cn68xx;
struct cvmx_smix_clk_s cn68xxp1;
struct cvmx_smix_clk_s cnf71xx;
};
union cvmx_smix_cmd {
uint64_t u64;
struct cvmx_smix_cmd_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_18_63:46;
uint64_t phy_op:2;
uint64_t reserved_13_15:3;
uint64_t phy_adr:5;
uint64_t reserved_5_7:3;
uint64_t reg_adr:5;
#else
uint64_t reg_adr:5;
uint64_t reserved_5_7:3;
uint64_t phy_adr:5;
uint64_t reserved_13_15:3;
uint64_t phy_op:2;
uint64_t reserved_18_63:46;
#endif
} s;
struct cvmx_smix_cmd_cn30xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_17_63:47;
uint64_t phy_op:1;
uint64_t reserved_13_15:3;
uint64_t phy_adr:5;
uint64_t reserved_5_7:3;
uint64_t reg_adr:5;
#else
uint64_t reg_adr:5;
uint64_t reserved_5_7:3;
uint64_t phy_adr:5;
uint64_t reserved_13_15:3;
uint64_t phy_op:1;
uint64_t reserved_17_63:47;
#endif
} cn30xx;
struct cvmx_smix_cmd_cn30xx cn31xx;
struct cvmx_smix_cmd_cn30xx cn38xx;
struct cvmx_smix_cmd_cn30xx cn38xxp2;
struct cvmx_smix_cmd_s cn50xx;
struct cvmx_smix_cmd_s cn52xx;
struct cvmx_smix_cmd_s cn52xxp1;
struct cvmx_smix_cmd_s cn56xx;
struct cvmx_smix_cmd_s cn56xxp1;
struct cvmx_smix_cmd_cn30xx cn58xx;
struct cvmx_smix_cmd_cn30xx cn58xxp1;
struct cvmx_smix_cmd_s cn61xx;
struct cvmx_smix_cmd_s cn63xx;
struct cvmx_smix_cmd_s cn63xxp1;
struct cvmx_smix_cmd_s cn66xx;
struct cvmx_smix_cmd_s cn68xx;
struct cvmx_smix_cmd_s cn68xxp1;
struct cvmx_smix_cmd_s cnf71xx;
};
union cvmx_smix_en {
uint64_t u64;
struct cvmx_smix_en_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_1_63:63;
uint64_t en:1;
#else
uint64_t en:1;
uint64_t reserved_1_63:63;
#endif
} s;
struct cvmx_smix_en_s cn30xx;
struct cvmx_smix_en_s cn31xx;
struct cvmx_smix_en_s cn38xx;
struct cvmx_smix_en_s cn38xxp2;
struct cvmx_smix_en_s cn50xx;
struct cvmx_smix_en_s cn52xx;
struct cvmx_smix_en_s cn52xxp1;
struct cvmx_smix_en_s cn56xx;
struct cvmx_smix_en_s cn56xxp1;
struct cvmx_smix_en_s cn58xx;
struct cvmx_smix_en_s cn58xxp1;
struct cvmx_smix_en_s cn61xx;
struct cvmx_smix_en_s cn63xx;
struct cvmx_smix_en_s cn63xxp1;
struct cvmx_smix_en_s cn66xx;
struct cvmx_smix_en_s cn68xx;
struct cvmx_smix_en_s cn68xxp1;
struct cvmx_smix_en_s cnf71xx;
};
union cvmx_smix_rd_dat {
uint64_t u64;
struct cvmx_smix_rd_dat_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_18_63:46;
uint64_t pending:1;
uint64_t val:1;
uint64_t dat:16;
#else
uint64_t dat:16;
uint64_t val:1;
uint64_t pending:1;
uint64_t reserved_18_63:46;
#endif
} s;
struct cvmx_smix_rd_dat_s cn30xx;
struct cvmx_smix_rd_dat_s cn31xx;
struct cvmx_smix_rd_dat_s cn38xx;
struct cvmx_smix_rd_dat_s cn38xxp2;
struct cvmx_smix_rd_dat_s cn50xx;
struct cvmx_smix_rd_dat_s cn52xx;
struct cvmx_smix_rd_dat_s cn52xxp1;
struct cvmx_smix_rd_dat_s cn56xx;
struct cvmx_smix_rd_dat_s cn56xxp1;
struct cvmx_smix_rd_dat_s cn58xx;
struct cvmx_smix_rd_dat_s cn58xxp1;
struct cvmx_smix_rd_dat_s cn61xx;
struct cvmx_smix_rd_dat_s cn63xx;
struct cvmx_smix_rd_dat_s cn63xxp1;
struct cvmx_smix_rd_dat_s cn66xx;
struct cvmx_smix_rd_dat_s cn68xx;
struct cvmx_smix_rd_dat_s cn68xxp1;
struct cvmx_smix_rd_dat_s cnf71xx;
};
union cvmx_smix_wr_dat {
uint64_t u64;
struct cvmx_smix_wr_dat_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_18_63:46;
uint64_t pending:1;
uint64_t val:1;
uint64_t dat:16;
#else
uint64_t dat:16;
uint64_t val:1;
uint64_t pending:1;
uint64_t reserved_18_63:46;
#endif
} s;
struct cvmx_smix_wr_dat_s cn30xx;
struct cvmx_smix_wr_dat_s cn31xx;
struct cvmx_smix_wr_dat_s cn38xx;
struct cvmx_smix_wr_dat_s cn38xxp2;
struct cvmx_smix_wr_dat_s cn50xx;
struct cvmx_smix_wr_dat_s cn52xx;
struct cvmx_smix_wr_dat_s cn52xxp1;
struct cvmx_smix_wr_dat_s cn56xx;
struct cvmx_smix_wr_dat_s cn56xxp1;
struct cvmx_smix_wr_dat_s cn58xx;
struct cvmx_smix_wr_dat_s cn58xxp1;
struct cvmx_smix_wr_dat_s cn61xx;
struct cvmx_smix_wr_dat_s cn63xx;
struct cvmx_smix_wr_dat_s cn63xxp1;
struct cvmx_smix_wr_dat_s cn66xx;
struct cvmx_smix_wr_dat_s cn68xx;
struct cvmx_smix_wr_dat_s cn68xxp1;
struct cvmx_smix_wr_dat_s cnf71xx;
};
#endif

View file

@ -0,0 +1,269 @@
/***********************license start***************
* Author: Cavium Networks
*
* Contact: support@caviumnetworks.com
* This file is part of the OCTEON SDK
*
* Copyright (c) 2003-2008 Cavium Networks
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
* published by the Free Software Foundation.
*
* This file is distributed in the hope that it will be useful, but
* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
* NONINFRINGEMENT. See the GNU General Public License for more
* details.
*
* You should have received a copy of the GNU General Public License
* along with this file; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
* or visit http://www.gnu.org/licenses/.
*
* This file may also be available under a different license from Cavium.
* Contact Cavium Networks for more information
***********************license end**************************************/
/*
*
* This file contains defines for the SPI interface
*/
#ifndef __CVMX_SPI_H__
#define __CVMX_SPI_H__
#include <asm/octeon/cvmx-gmxx-defs.h>
/* CSR typedefs have been moved to cvmx-csr-*.h */
typedef enum {
CVMX_SPI_MODE_UNKNOWN = 0,
CVMX_SPI_MODE_TX_HALFPLEX = 1,
CVMX_SPI_MODE_RX_HALFPLEX = 2,
CVMX_SPI_MODE_DUPLEX = 3
} cvmx_spi_mode_t;
/** Callbacks structure to customize SPI4 initialization sequence */
typedef struct {
/** Called to reset SPI4 DLL */
int (*reset_cb) (int interface, cvmx_spi_mode_t mode);
/** Called to setup calendar */
int (*calendar_setup_cb) (int interface, cvmx_spi_mode_t mode,
int num_ports);
/** Called for Tx and Rx clock detection */
int (*clock_detect_cb) (int interface, cvmx_spi_mode_t mode,
int timeout);
/** Called to perform link training */
int (*training_cb) (int interface, cvmx_spi_mode_t mode, int timeout);
/** Called for calendar data synchronization */
int (*calendar_sync_cb) (int interface, cvmx_spi_mode_t mode,
int timeout);
/** Called when interface is up */
int (*interface_up_cb) (int interface, cvmx_spi_mode_t mode);
} cvmx_spi_callbacks_t;
/**
* Return true if the supplied interface is configured for SPI
*
* @interface: Interface to check
* Returns True if interface is SPI
*/
static inline int cvmx_spi_is_spi_interface(int interface)
{
uint64_t gmxState = cvmx_read_csr(CVMX_GMXX_INF_MODE(interface));
return (gmxState & 0x2) && (gmxState & 0x1);
}
/**
* Initialize and start the SPI interface.
*
* @interface: The identifier of the packet interface to configure and
* use as a SPI interface.
* @mode: The operating mode for the SPI interface. The interface
* can operate as a full duplex (both Tx and Rx data paths
* active) or as a halfplex (either the Tx data path is
* active or the Rx data path is active, but not both).
* @timeout: Timeout to wait for clock synchronization in seconds
* @num_ports: Number of SPI ports to configure
*
* Returns Zero on success, negative of failure.
*/
extern int cvmx_spi_start_interface(int interface, cvmx_spi_mode_t mode,
int timeout, int num_ports);
/**
* This routine restarts the SPI interface after it has lost synchronization
* with its corespondant system.
*
* @interface: The identifier of the packet interface to configure and
* use as a SPI interface.
* @mode: The operating mode for the SPI interface. The interface
* can operate as a full duplex (both Tx and Rx data paths
* active) or as a halfplex (either the Tx data path is
* active or the Rx data path is active, but not both).
* @timeout: Timeout to wait for clock synchronization in seconds
* Returns Zero on success, negative of failure.
*/
extern int cvmx_spi_restart_interface(int interface, cvmx_spi_mode_t mode,
int timeout);
/**
* Return non-zero if the SPI interface has a SPI4000 attached
*
* @interface: SPI interface the SPI4000 is connected to
*
* Returns
*/
static inline int cvmx_spi4000_is_present(int interface)
{
return 0;
}
/**
* Initialize the SPI4000 for use
*
* @interface: SPI interface the SPI4000 is connected to
*/
static inline int cvmx_spi4000_initialize(int interface)
{
return 0;
}
/**
* Poll all the SPI4000 port and check its speed
*
* @interface: Interface the SPI4000 is on
* @port: Port to poll (0-9)
* Returns Status of the port. 0=down. All other values the port is up.
*/
static inline union cvmx_gmxx_rxx_rx_inbnd cvmx_spi4000_check_speed(
int interface,
int port)
{
union cvmx_gmxx_rxx_rx_inbnd r;
r.u64 = 0;
return r;
}
/**
* Get current SPI4 initialization callbacks
*
* @callbacks: Pointer to the callbacks structure.to fill
*
* Returns Pointer to cvmx_spi_callbacks_t structure.
*/
extern void cvmx_spi_get_callbacks(cvmx_spi_callbacks_t *callbacks);
/**
* Set new SPI4 initialization callbacks
*
* @new_callbacks: Pointer to an updated callbacks structure.
*/
extern void cvmx_spi_set_callbacks(cvmx_spi_callbacks_t *new_callbacks);
/**
* Callback to perform SPI4 reset
*
* @interface: The identifier of the packet interface to configure and
* use as a SPI interface.
* @mode: The operating mode for the SPI interface. The interface
* can operate as a full duplex (both Tx and Rx data paths
* active) or as a halfplex (either the Tx data path is
* active or the Rx data path is active, but not both).
*
* Returns Zero on success, non-zero error code on failure (will cause
* SPI initialization to abort)
*/
extern int cvmx_spi_reset_cb(int interface, cvmx_spi_mode_t mode);
/**
* Callback to setup calendar and miscellaneous settings before clock
* detection
*
* @interface: The identifier of the packet interface to configure and
* use as a SPI interface.
* @mode: The operating mode for the SPI interface. The interface
* can operate as a full duplex (both Tx and Rx data paths
* active) or as a halfplex (either the Tx data path is
* active or the Rx data path is active, but not both).
* @num_ports: Number of ports to configure on SPI
*
* Returns Zero on success, non-zero error code on failure (will cause
* SPI initialization to abort)
*/
extern int cvmx_spi_calendar_setup_cb(int interface, cvmx_spi_mode_t mode,
int num_ports);
/**
* Callback to perform clock detection
*
* @interface: The identifier of the packet interface to configure and
* use as a SPI interface.
* @mode: The operating mode for the SPI interface. The interface
* can operate as a full duplex (both Tx and Rx data paths
* active) or as a halfplex (either the Tx data path is
* active or the Rx data path is active, but not both).
* @timeout: Timeout to wait for clock synchronization in seconds
*
* Returns Zero on success, non-zero error code on failure (will cause
* SPI initialization to abort)
*/
extern int cvmx_spi_clock_detect_cb(int interface, cvmx_spi_mode_t mode,
int timeout);
/**
* Callback to perform link training
*
* @interface: The identifier of the packet interface to configure and
* use as a SPI interface.
* @mode: The operating mode for the SPI interface. The interface
* can operate as a full duplex (both Tx and Rx data paths
* active) or as a halfplex (either the Tx data path is
* active or the Rx data path is active, but not both).
* @timeout: Timeout to wait for link to be trained (in seconds)
*
* Returns Zero on success, non-zero error code on failure (will cause
* SPI initialization to abort)
*/
extern int cvmx_spi_training_cb(int interface, cvmx_spi_mode_t mode,
int timeout);
/**
* Callback to perform calendar data synchronization
*
* @interface: The identifier of the packet interface to configure and
* use as a SPI interface.
* @mode: The operating mode for the SPI interface. The interface
* can operate as a full duplex (both Tx and Rx data paths
* active) or as a halfplex (either the Tx data path is
* active or the Rx data path is active, but not both).
* @timeout: Timeout to wait for calendar data in seconds
*
* Returns Zero on success, non-zero error code on failure (will cause
* SPI initialization to abort)
*/
extern int cvmx_spi_calendar_sync_cb(int interface, cvmx_spi_mode_t mode,
int timeout);
/**
* Callback to handle interface up
*
* @interface: The identifier of the packet interface to configure and
* use as a SPI interface.
* @mode: The operating mode for the SPI interface. The interface
* can operate as a full duplex (both Tx and Rx data paths
* active) or as a halfplex (either the Tx data path is
* active or the Rx data path is active, but not both).
*
* Returns Zero on success, non-zero error code on failure (will cause
* SPI initialization to abort)
*/
extern int cvmx_spi_interface_up_cb(int interface, cvmx_spi_mode_t mode);
#endif /* __CVMX_SPI_H__ */

View file

@ -0,0 +1,232 @@
/***********************license start***************
* Author: Cavium Networks
*
* Contact: support@caviumnetworks.com
* This file is part of the OCTEON SDK
*
* Copyright (c) 2003-2008 Cavium Networks
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
* published by the Free Software Foundation.
*
* This file is distributed in the hope that it will be useful, but
* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
* NONINFRINGEMENT. See the GNU General Public License for more
* details.
*
* You should have received a copy of the GNU General Public License
* along with this file; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
* or visit http://www.gnu.org/licenses/.
*
* This file may also be available under a different license from Cavium.
* Contact Cavium Networks for more information
***********************license end**************************************/
/**
* Implementation of spinlocks for Octeon CVMX. Although similar in
* function to Linux kernel spinlocks, they are not compatible.
* Octeon CVMX spinlocks are only used to synchronize with the boot
* monitor and other non-Linux programs running in the system.
*/
#ifndef __CVMX_SPINLOCK_H__
#define __CVMX_SPINLOCK_H__
#include <asm/octeon/cvmx-asm.h>
/* Spinlocks for Octeon */
/* define these to enable recursive spinlock debugging */
/*#define CVMX_SPINLOCK_DEBUG */
/**
* Spinlocks for Octeon CVMX
*/
typedef struct {
volatile uint32_t value;
} cvmx_spinlock_t;
/* note - macros not expanded in inline ASM, so values hardcoded */
#define CVMX_SPINLOCK_UNLOCKED_VAL 0
#define CVMX_SPINLOCK_LOCKED_VAL 1
#define CVMX_SPINLOCK_UNLOCKED_INITIALIZER {CVMX_SPINLOCK_UNLOCKED_VAL}
/**
* Initialize a spinlock
*
* @lock: Lock to initialize
*/
static inline void cvmx_spinlock_init(cvmx_spinlock_t *lock)
{
lock->value = CVMX_SPINLOCK_UNLOCKED_VAL;
}
/**
* Return non-zero if the spinlock is currently locked
*
* @lock: Lock to check
* Returns Non-zero if locked
*/
static inline int cvmx_spinlock_locked(cvmx_spinlock_t *lock)
{
return lock->value != CVMX_SPINLOCK_UNLOCKED_VAL;
}
/**
* Releases lock
*
* @lock: pointer to lock structure
*/
static inline void cvmx_spinlock_unlock(cvmx_spinlock_t *lock)
{
CVMX_SYNCWS;
lock->value = 0;
CVMX_SYNCWS;
}
/**
* Attempts to take the lock, but does not spin if lock is not available.
* May take some time to acquire the lock even if it is available
* due to the ll/sc not succeeding.
*
* @lock: pointer to lock structure
*
* Returns 0: lock successfully taken
* 1: lock not taken, held by someone else
* These return values match the Linux semantics.
*/
static inline unsigned int cvmx_spinlock_trylock(cvmx_spinlock_t *lock)
{
unsigned int tmp;
__asm__ __volatile__(".set noreorder \n"
"1: ll %[tmp], %[val] \n"
/* if lock held, fail immediately */
" bnez %[tmp], 2f \n"
" li %[tmp], 1 \n"
" sc %[tmp], %[val] \n"
" beqz %[tmp], 1b \n"
" li %[tmp], 0 \n"
"2: \n"
".set reorder \n" :
[val] "+m"(lock->value), [tmp] "=&r"(tmp)
: : "memory");
return tmp != 0; /* normalize to 0 or 1 */
}
/**
* Gets lock, spins until lock is taken
*
* @lock: pointer to lock structure
*/
static inline void cvmx_spinlock_lock(cvmx_spinlock_t *lock)
{
unsigned int tmp;
__asm__ __volatile__(".set noreorder \n"
"1: ll %[tmp], %[val] \n"
" bnez %[tmp], 1b \n"
" li %[tmp], 1 \n"
" sc %[tmp], %[val] \n"
" beqz %[tmp], 1b \n"
" nop \n"
".set reorder \n" :
[val] "+m"(lock->value), [tmp] "=&r"(tmp)
: : "memory");
}
/** ********************************************************************
* Bit spinlocks
* These spinlocks use a single bit (bit 31) of a 32 bit word for locking.
* The rest of the bits in the word are left undisturbed. This enables more
* compact data structures as only 1 bit is consumed for the lock.
*
*/
/**
* Gets lock, spins until lock is taken
* Preserves the low 31 bits of the 32 bit
* word used for the lock.
*
*
* @word: word to lock bit 31 of
*/
static inline void cvmx_spinlock_bit_lock(uint32_t *word)
{
unsigned int tmp;
unsigned int sav;
__asm__ __volatile__(".set noreorder \n"
".set noat \n"
"1: ll %[tmp], %[val] \n"
" bbit1 %[tmp], 31, 1b \n"
" li $at, 1 \n"
" ins %[tmp], $at, 31, 1 \n"
" sc %[tmp], %[val] \n"
" beqz %[tmp], 1b \n"
" nop \n"
".set at \n"
".set reorder \n" :
[val] "+m"(*word), [tmp] "=&r"(tmp), [sav] "=&r"(sav)
: : "memory");
}
/**
* Attempts to get lock, returns immediately with success/failure
* Preserves the low 31 bits of the 32 bit
* word used for the lock.
*
*
* @word: word to lock bit 31 of
* Returns 0: lock successfully taken
* 1: lock not taken, held by someone else
* These return values match the Linux semantics.
*/
static inline unsigned int cvmx_spinlock_bit_trylock(uint32_t *word)
{
unsigned int tmp;
__asm__ __volatile__(".set noreorder\n\t"
".set noat\n"
"1: ll %[tmp], %[val] \n"
/* if lock held, fail immediately */
" bbit1 %[tmp], 31, 2f \n"
" li $at, 1 \n"
" ins %[tmp], $at, 31, 1 \n"
" sc %[tmp], %[val] \n"
" beqz %[tmp], 1b \n"
" li %[tmp], 0 \n"
"2: \n"
".set at \n"
".set reorder \n" :
[val] "+m"(*word), [tmp] "=&r"(tmp)
: : "memory");
return tmp != 0; /* normalize to 0 or 1 */
}
/**
* Releases bit lock
*
* Unconditionally clears bit 31 of the lock word. Note that this is
* done non-atomically, as this implementation assumes that the rest
* of the bits in the word are protected by the lock.
*
* @word: word to unlock bit 31 in
*/
static inline void cvmx_spinlock_bit_unlock(uint32_t *word)
{
CVMX_SYNCWS;
*word &= ~(1UL << 31);
CVMX_SYNCWS;
}
#endif /* __CVMX_SPINLOCK_H__ */

View file

@ -0,0 +1,506 @@
/***********************license start***************
* Author: Cavium Networks
*
* Contact: support@caviumnetworks.com
* This file is part of the OCTEON SDK
*
* Copyright (c) 2003-2012 Cavium Networks
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
* published by the Free Software Foundation.
*
* This file is distributed in the hope that it will be useful, but
* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
* NONINFRINGEMENT. See the GNU General Public License for more
* details.
*
* You should have received a copy of the GNU General Public License
* along with this file; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
* or visit http://www.gnu.org/licenses/.
*
* This file may also be available under a different license from Cavium.
* Contact Cavium Networks for more information
***********************license end**************************************/
#ifndef __CVMX_SPXX_DEFS_H__
#define __CVMX_SPXX_DEFS_H__
#define CVMX_SPXX_BCKPRS_CNT(block_id) (CVMX_ADD_IO_SEG(0x0001180090000340ull) + ((block_id) & 1) * 0x8000000ull)
#define CVMX_SPXX_BIST_STAT(block_id) (CVMX_ADD_IO_SEG(0x00011800900007F8ull) + ((block_id) & 1) * 0x8000000ull)
#define CVMX_SPXX_CLK_CTL(block_id) (CVMX_ADD_IO_SEG(0x0001180090000348ull) + ((block_id) & 1) * 0x8000000ull)
#define CVMX_SPXX_CLK_STAT(block_id) (CVMX_ADD_IO_SEG(0x0001180090000350ull) + ((block_id) & 1) * 0x8000000ull)
#define CVMX_SPXX_DBG_DESKEW_CTL(block_id) (CVMX_ADD_IO_SEG(0x0001180090000368ull) + ((block_id) & 1) * 0x8000000ull)
#define CVMX_SPXX_DBG_DESKEW_STATE(block_id) (CVMX_ADD_IO_SEG(0x0001180090000370ull) + ((block_id) & 1) * 0x8000000ull)
#define CVMX_SPXX_DRV_CTL(block_id) (CVMX_ADD_IO_SEG(0x0001180090000358ull) + ((block_id) & 1) * 0x8000000ull)
#define CVMX_SPXX_ERR_CTL(block_id) (CVMX_ADD_IO_SEG(0x0001180090000320ull) + ((block_id) & 1) * 0x8000000ull)
#define CVMX_SPXX_INT_DAT(block_id) (CVMX_ADD_IO_SEG(0x0001180090000318ull) + ((block_id) & 1) * 0x8000000ull)
#define CVMX_SPXX_INT_MSK(block_id) (CVMX_ADD_IO_SEG(0x0001180090000308ull) + ((block_id) & 1) * 0x8000000ull)
#define CVMX_SPXX_INT_REG(block_id) (CVMX_ADD_IO_SEG(0x0001180090000300ull) + ((block_id) & 1) * 0x8000000ull)
#define CVMX_SPXX_INT_SYNC(block_id) (CVMX_ADD_IO_SEG(0x0001180090000310ull) + ((block_id) & 1) * 0x8000000ull)
#define CVMX_SPXX_TPA_ACC(block_id) (CVMX_ADD_IO_SEG(0x0001180090000338ull) + ((block_id) & 1) * 0x8000000ull)
#define CVMX_SPXX_TPA_MAX(block_id) (CVMX_ADD_IO_SEG(0x0001180090000330ull) + ((block_id) & 1) * 0x8000000ull)
#define CVMX_SPXX_TPA_SEL(block_id) (CVMX_ADD_IO_SEG(0x0001180090000328ull) + ((block_id) & 1) * 0x8000000ull)
#define CVMX_SPXX_TRN4_CTL(block_id) (CVMX_ADD_IO_SEG(0x0001180090000360ull) + ((block_id) & 1) * 0x8000000ull)
union cvmx_spxx_bckprs_cnt {
uint64_t u64;
struct cvmx_spxx_bckprs_cnt_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_32_63:32;
uint64_t cnt:32;
#else
uint64_t cnt:32;
uint64_t reserved_32_63:32;
#endif
} s;
struct cvmx_spxx_bckprs_cnt_s cn38xx;
struct cvmx_spxx_bckprs_cnt_s cn38xxp2;
struct cvmx_spxx_bckprs_cnt_s cn58xx;
struct cvmx_spxx_bckprs_cnt_s cn58xxp1;
};
union cvmx_spxx_bist_stat {
uint64_t u64;
struct cvmx_spxx_bist_stat_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_3_63:61;
uint64_t stat2:1;
uint64_t stat1:1;
uint64_t stat0:1;
#else
uint64_t stat0:1;
uint64_t stat1:1;
uint64_t stat2:1;
uint64_t reserved_3_63:61;
#endif
} s;
struct cvmx_spxx_bist_stat_s cn38xx;
struct cvmx_spxx_bist_stat_s cn38xxp2;
struct cvmx_spxx_bist_stat_s cn58xx;
struct cvmx_spxx_bist_stat_s cn58xxp1;
};
union cvmx_spxx_clk_ctl {
uint64_t u64;
struct cvmx_spxx_clk_ctl_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_17_63:47;
uint64_t seetrn:1;
uint64_t reserved_12_15:4;
uint64_t clkdly:5;
uint64_t runbist:1;
uint64_t statdrv:1;
uint64_t statrcv:1;
uint64_t sndtrn:1;
uint64_t drptrn:1;
uint64_t rcvtrn:1;
uint64_t srxdlck:1;
#else
uint64_t srxdlck:1;
uint64_t rcvtrn:1;
uint64_t drptrn:1;
uint64_t sndtrn:1;
uint64_t statrcv:1;
uint64_t statdrv:1;
uint64_t runbist:1;
uint64_t clkdly:5;
uint64_t reserved_12_15:4;
uint64_t seetrn:1;
uint64_t reserved_17_63:47;
#endif
} s;
struct cvmx_spxx_clk_ctl_s cn38xx;
struct cvmx_spxx_clk_ctl_s cn38xxp2;
struct cvmx_spxx_clk_ctl_s cn58xx;
struct cvmx_spxx_clk_ctl_s cn58xxp1;
};
union cvmx_spxx_clk_stat {
uint64_t u64;
struct cvmx_spxx_clk_stat_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_11_63:53;
uint64_t stxcal:1;
uint64_t reserved_9_9:1;
uint64_t srxtrn:1;
uint64_t s4clk1:1;
uint64_t s4clk0:1;
uint64_t d4clk1:1;
uint64_t d4clk0:1;
uint64_t reserved_0_3:4;
#else
uint64_t reserved_0_3:4;
uint64_t d4clk0:1;
uint64_t d4clk1:1;
uint64_t s4clk0:1;
uint64_t s4clk1:1;
uint64_t srxtrn:1;
uint64_t reserved_9_9:1;
uint64_t stxcal:1;
uint64_t reserved_11_63:53;
#endif
} s;
struct cvmx_spxx_clk_stat_s cn38xx;
struct cvmx_spxx_clk_stat_s cn38xxp2;
struct cvmx_spxx_clk_stat_s cn58xx;
struct cvmx_spxx_clk_stat_s cn58xxp1;
};
union cvmx_spxx_dbg_deskew_ctl {
uint64_t u64;
struct cvmx_spxx_dbg_deskew_ctl_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_30_63:34;
uint64_t fallnop:1;
uint64_t fall8:1;
uint64_t reserved_26_27:2;
uint64_t sstep_go:1;
uint64_t sstep:1;
uint64_t reserved_22_23:2;
uint64_t clrdly:1;
uint64_t dec:1;
uint64_t inc:1;
uint64_t mux:1;
uint64_t offset:5;
uint64_t bitsel:5;
uint64_t offdly:6;
uint64_t dllfrc:1;
uint64_t dlldis:1;
#else
uint64_t dlldis:1;
uint64_t dllfrc:1;
uint64_t offdly:6;
uint64_t bitsel:5;
uint64_t offset:5;
uint64_t mux:1;
uint64_t inc:1;
uint64_t dec:1;
uint64_t clrdly:1;
uint64_t reserved_22_23:2;
uint64_t sstep:1;
uint64_t sstep_go:1;
uint64_t reserved_26_27:2;
uint64_t fall8:1;
uint64_t fallnop:1;
uint64_t reserved_30_63:34;
#endif
} s;
struct cvmx_spxx_dbg_deskew_ctl_s cn38xx;
struct cvmx_spxx_dbg_deskew_ctl_s cn38xxp2;
struct cvmx_spxx_dbg_deskew_ctl_s cn58xx;
struct cvmx_spxx_dbg_deskew_ctl_s cn58xxp1;
};
union cvmx_spxx_dbg_deskew_state {
uint64_t u64;
struct cvmx_spxx_dbg_deskew_state_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_9_63:55;
uint64_t testres:1;
uint64_t unxterm:1;
uint64_t muxsel:2;
uint64_t offset:5;
#else
uint64_t offset:5;
uint64_t muxsel:2;
uint64_t unxterm:1;
uint64_t testres:1;
uint64_t reserved_9_63:55;
#endif
} s;
struct cvmx_spxx_dbg_deskew_state_s cn38xx;
struct cvmx_spxx_dbg_deskew_state_s cn38xxp2;
struct cvmx_spxx_dbg_deskew_state_s cn58xx;
struct cvmx_spxx_dbg_deskew_state_s cn58xxp1;
};
union cvmx_spxx_drv_ctl {
uint64_t u64;
struct cvmx_spxx_drv_ctl_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_0_63:64;
#else
uint64_t reserved_0_63:64;
#endif
} s;
struct cvmx_spxx_drv_ctl_cn38xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_16_63:48;
uint64_t stx4ncmp:4;
uint64_t stx4pcmp:4;
uint64_t srx4cmp:8;
#else
uint64_t srx4cmp:8;
uint64_t stx4pcmp:4;
uint64_t stx4ncmp:4;
uint64_t reserved_16_63:48;
#endif
} cn38xx;
struct cvmx_spxx_drv_ctl_cn38xx cn38xxp2;
struct cvmx_spxx_drv_ctl_cn58xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_24_63:40;
uint64_t stx4ncmp:4;
uint64_t stx4pcmp:4;
uint64_t reserved_10_15:6;
uint64_t srx4cmp:10;
#else
uint64_t srx4cmp:10;
uint64_t reserved_10_15:6;
uint64_t stx4pcmp:4;
uint64_t stx4ncmp:4;
uint64_t reserved_24_63:40;
#endif
} cn58xx;
struct cvmx_spxx_drv_ctl_cn58xx cn58xxp1;
};
union cvmx_spxx_err_ctl {
uint64_t u64;
struct cvmx_spxx_err_ctl_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_9_63:55;
uint64_t prtnxa:1;
uint64_t dipcls:1;
uint64_t dippay:1;
uint64_t reserved_4_5:2;
uint64_t errcnt:4;
#else
uint64_t errcnt:4;
uint64_t reserved_4_5:2;
uint64_t dippay:1;
uint64_t dipcls:1;
uint64_t prtnxa:1;
uint64_t reserved_9_63:55;
#endif
} s;
struct cvmx_spxx_err_ctl_s cn38xx;
struct cvmx_spxx_err_ctl_s cn38xxp2;
struct cvmx_spxx_err_ctl_s cn58xx;
struct cvmx_spxx_err_ctl_s cn58xxp1;
};
union cvmx_spxx_int_dat {
uint64_t u64;
struct cvmx_spxx_int_dat_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_32_63:32;
uint64_t mul:1;
uint64_t reserved_14_30:17;
uint64_t calbnk:2;
uint64_t rsvop:4;
uint64_t prt:8;
#else
uint64_t prt:8;
uint64_t rsvop:4;
uint64_t calbnk:2;
uint64_t reserved_14_30:17;
uint64_t mul:1;
uint64_t reserved_32_63:32;
#endif
} s;
struct cvmx_spxx_int_dat_s cn38xx;
struct cvmx_spxx_int_dat_s cn38xxp2;
struct cvmx_spxx_int_dat_s cn58xx;
struct cvmx_spxx_int_dat_s cn58xxp1;
};
union cvmx_spxx_int_msk {
uint64_t u64;
struct cvmx_spxx_int_msk_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_12_63:52;
uint64_t calerr:1;
uint64_t syncerr:1;
uint64_t diperr:1;
uint64_t tpaovr:1;
uint64_t rsverr:1;
uint64_t drwnng:1;
uint64_t clserr:1;
uint64_t spiovr:1;
uint64_t reserved_2_3:2;
uint64_t abnorm:1;
uint64_t prtnxa:1;
#else
uint64_t prtnxa:1;
uint64_t abnorm:1;
uint64_t reserved_2_3:2;
uint64_t spiovr:1;
uint64_t clserr:1;
uint64_t drwnng:1;
uint64_t rsverr:1;
uint64_t tpaovr:1;
uint64_t diperr:1;
uint64_t syncerr:1;
uint64_t calerr:1;
uint64_t reserved_12_63:52;
#endif
} s;
struct cvmx_spxx_int_msk_s cn38xx;
struct cvmx_spxx_int_msk_s cn38xxp2;
struct cvmx_spxx_int_msk_s cn58xx;
struct cvmx_spxx_int_msk_s cn58xxp1;
};
union cvmx_spxx_int_reg {
uint64_t u64;
struct cvmx_spxx_int_reg_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_32_63:32;
uint64_t spf:1;
uint64_t reserved_12_30:19;
uint64_t calerr:1;
uint64_t syncerr:1;
uint64_t diperr:1;
uint64_t tpaovr:1;
uint64_t rsverr:1;
uint64_t drwnng:1;
uint64_t clserr:1;
uint64_t spiovr:1;
uint64_t reserved_2_3:2;
uint64_t abnorm:1;
uint64_t prtnxa:1;
#else
uint64_t prtnxa:1;
uint64_t abnorm:1;
uint64_t reserved_2_3:2;
uint64_t spiovr:1;
uint64_t clserr:1;
uint64_t drwnng:1;
uint64_t rsverr:1;
uint64_t tpaovr:1;
uint64_t diperr:1;
uint64_t syncerr:1;
uint64_t calerr:1;
uint64_t reserved_12_30:19;
uint64_t spf:1;
uint64_t reserved_32_63:32;
#endif
} s;
struct cvmx_spxx_int_reg_s cn38xx;
struct cvmx_spxx_int_reg_s cn38xxp2;
struct cvmx_spxx_int_reg_s cn58xx;
struct cvmx_spxx_int_reg_s cn58xxp1;
};
union cvmx_spxx_int_sync {
uint64_t u64;
struct cvmx_spxx_int_sync_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_12_63:52;
uint64_t calerr:1;
uint64_t syncerr:1;
uint64_t diperr:1;
uint64_t tpaovr:1;
uint64_t rsverr:1;
uint64_t drwnng:1;
uint64_t clserr:1;
uint64_t spiovr:1;
uint64_t reserved_2_3:2;
uint64_t abnorm:1;
uint64_t prtnxa:1;
#else
uint64_t prtnxa:1;
uint64_t abnorm:1;
uint64_t reserved_2_3:2;
uint64_t spiovr:1;
uint64_t clserr:1;
uint64_t drwnng:1;
uint64_t rsverr:1;
uint64_t tpaovr:1;
uint64_t diperr:1;
uint64_t syncerr:1;
uint64_t calerr:1;
uint64_t reserved_12_63:52;
#endif
} s;
struct cvmx_spxx_int_sync_s cn38xx;
struct cvmx_spxx_int_sync_s cn38xxp2;
struct cvmx_spxx_int_sync_s cn58xx;
struct cvmx_spxx_int_sync_s cn58xxp1;
};
union cvmx_spxx_tpa_acc {
uint64_t u64;
struct cvmx_spxx_tpa_acc_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_32_63:32;
uint64_t cnt:32;
#else
uint64_t cnt:32;
uint64_t reserved_32_63:32;
#endif
} s;
struct cvmx_spxx_tpa_acc_s cn38xx;
struct cvmx_spxx_tpa_acc_s cn38xxp2;
struct cvmx_spxx_tpa_acc_s cn58xx;
struct cvmx_spxx_tpa_acc_s cn58xxp1;
};
union cvmx_spxx_tpa_max {
uint64_t u64;
struct cvmx_spxx_tpa_max_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_32_63:32;
uint64_t max:32;
#else
uint64_t max:32;
uint64_t reserved_32_63:32;
#endif
} s;
struct cvmx_spxx_tpa_max_s cn38xx;
struct cvmx_spxx_tpa_max_s cn38xxp2;
struct cvmx_spxx_tpa_max_s cn58xx;
struct cvmx_spxx_tpa_max_s cn58xxp1;
};
union cvmx_spxx_tpa_sel {
uint64_t u64;
struct cvmx_spxx_tpa_sel_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_4_63:60;
uint64_t prtsel:4;
#else
uint64_t prtsel:4;
uint64_t reserved_4_63:60;
#endif
} s;
struct cvmx_spxx_tpa_sel_s cn38xx;
struct cvmx_spxx_tpa_sel_s cn38xxp2;
struct cvmx_spxx_tpa_sel_s cn58xx;
struct cvmx_spxx_tpa_sel_s cn58xxp1;
};
union cvmx_spxx_trn4_ctl {
uint64_t u64;
struct cvmx_spxx_trn4_ctl_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_13_63:51;
uint64_t trntest:1;
uint64_t jitter:3;
uint64_t clr_boot:1;
uint64_t set_boot:1;
uint64_t maxdist:5;
uint64_t macro_en:1;
uint64_t mux_en:1;
#else
uint64_t mux_en:1;
uint64_t macro_en:1;
uint64_t maxdist:5;
uint64_t set_boot:1;
uint64_t clr_boot:1;
uint64_t jitter:3;
uint64_t trntest:1;
uint64_t reserved_13_63:51;
#endif
} s;
struct cvmx_spxx_trn4_ctl_s cn38xx;
struct cvmx_spxx_trn4_ctl_s cn38xxp2;
struct cvmx_spxx_trn4_ctl_s cn58xx;
struct cvmx_spxx_trn4_ctl_s cn58xxp1;
};
#endif

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,162 @@
/***********************license start***************
* Author: Cavium Networks
*
* Contact: support@caviumnetworks.com
* This file is part of the OCTEON SDK
*
* Copyright (c) 2003-2012 Cavium Networks
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
* published by the Free Software Foundation.
*
* This file is distributed in the hope that it will be useful, but
* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
* NONINFRINGEMENT. See the GNU General Public License for more
* details.
*
* You should have received a copy of the GNU General Public License
* along with this file; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
* or visit http://www.gnu.org/licenses/.
*
* This file may also be available under a different license from Cavium.
* Contact Cavium Networks for more information
***********************license end**************************************/
#ifndef __CVMX_SRXX_DEFS_H__
#define __CVMX_SRXX_DEFS_H__
#define CVMX_SRXX_COM_CTL(block_id) (CVMX_ADD_IO_SEG(0x0001180090000200ull) + ((block_id) & 1) * 0x8000000ull)
#define CVMX_SRXX_IGN_RX_FULL(block_id) (CVMX_ADD_IO_SEG(0x0001180090000218ull) + ((block_id) & 1) * 0x8000000ull)
#define CVMX_SRXX_SPI4_CALX(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180090000000ull) + (((offset) & 31) + ((block_id) & 1) * 0x1000000ull) * 8)
#define CVMX_SRXX_SPI4_STAT(block_id) (CVMX_ADD_IO_SEG(0x0001180090000208ull) + ((block_id) & 1) * 0x8000000ull)
#define CVMX_SRXX_SW_TICK_CTL(block_id) (CVMX_ADD_IO_SEG(0x0001180090000220ull) + ((block_id) & 1) * 0x8000000ull)
#define CVMX_SRXX_SW_TICK_DAT(block_id) (CVMX_ADD_IO_SEG(0x0001180090000228ull) + ((block_id) & 1) * 0x8000000ull)
union cvmx_srxx_com_ctl {
uint64_t u64;
struct cvmx_srxx_com_ctl_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_8_63:56;
uint64_t prts:4;
uint64_t st_en:1;
uint64_t reserved_1_2:2;
uint64_t inf_en:1;
#else
uint64_t inf_en:1;
uint64_t reserved_1_2:2;
uint64_t st_en:1;
uint64_t prts:4;
uint64_t reserved_8_63:56;
#endif
} s;
struct cvmx_srxx_com_ctl_s cn38xx;
struct cvmx_srxx_com_ctl_s cn38xxp2;
struct cvmx_srxx_com_ctl_s cn58xx;
struct cvmx_srxx_com_ctl_s cn58xxp1;
};
union cvmx_srxx_ign_rx_full {
uint64_t u64;
struct cvmx_srxx_ign_rx_full_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_16_63:48;
uint64_t ignore:16;
#else
uint64_t ignore:16;
uint64_t reserved_16_63:48;
#endif
} s;
struct cvmx_srxx_ign_rx_full_s cn38xx;
struct cvmx_srxx_ign_rx_full_s cn38xxp2;
struct cvmx_srxx_ign_rx_full_s cn58xx;
struct cvmx_srxx_ign_rx_full_s cn58xxp1;
};
union cvmx_srxx_spi4_calx {
uint64_t u64;
struct cvmx_srxx_spi4_calx_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_17_63:47;
uint64_t oddpar:1;
uint64_t prt3:4;
uint64_t prt2:4;
uint64_t prt1:4;
uint64_t prt0:4;
#else
uint64_t prt0:4;
uint64_t prt1:4;
uint64_t prt2:4;
uint64_t prt3:4;
uint64_t oddpar:1;
uint64_t reserved_17_63:47;
#endif
} s;
struct cvmx_srxx_spi4_calx_s cn38xx;
struct cvmx_srxx_spi4_calx_s cn38xxp2;
struct cvmx_srxx_spi4_calx_s cn58xx;
struct cvmx_srxx_spi4_calx_s cn58xxp1;
};
union cvmx_srxx_spi4_stat {
uint64_t u64;
struct cvmx_srxx_spi4_stat_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_16_63:48;
uint64_t m:8;
uint64_t reserved_7_7:1;
uint64_t len:7;
#else
uint64_t len:7;
uint64_t reserved_7_7:1;
uint64_t m:8;
uint64_t reserved_16_63:48;
#endif
} s;
struct cvmx_srxx_spi4_stat_s cn38xx;
struct cvmx_srxx_spi4_stat_s cn38xxp2;
struct cvmx_srxx_spi4_stat_s cn58xx;
struct cvmx_srxx_spi4_stat_s cn58xxp1;
};
union cvmx_srxx_sw_tick_ctl {
uint64_t u64;
struct cvmx_srxx_sw_tick_ctl_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_14_63:50;
uint64_t eop:1;
uint64_t sop:1;
uint64_t mod:4;
uint64_t opc:4;
uint64_t adr:4;
#else
uint64_t adr:4;
uint64_t opc:4;
uint64_t mod:4;
uint64_t sop:1;
uint64_t eop:1;
uint64_t reserved_14_63:50;
#endif
} s;
struct cvmx_srxx_sw_tick_ctl_s cn38xx;
struct cvmx_srxx_sw_tick_ctl_s cn58xx;
struct cvmx_srxx_sw_tick_ctl_s cn58xxp1;
};
union cvmx_srxx_sw_tick_dat {
uint64_t u64;
struct cvmx_srxx_sw_tick_dat_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t dat:64;
#else
uint64_t dat:64;
#endif
} s;
struct cvmx_srxx_sw_tick_dat_s cn38xx;
struct cvmx_srxx_sw_tick_dat_s cn58xx;
struct cvmx_srxx_sw_tick_dat_s cn58xxp1;
};
#endif

View file

@ -0,0 +1,392 @@
/***********************license start***************
* Author: Cavium Networks
*
* Contact: support@caviumnetworks.com
* This file is part of the OCTEON SDK
*
* Copyright (c) 2003-2012 Cavium Networks
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
* published by the Free Software Foundation.
*
* This file is distributed in the hope that it will be useful, but
* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
* NONINFRINGEMENT. See the GNU General Public License for more
* details.
*
* You should have received a copy of the GNU General Public License
* along with this file; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
* or visit http://www.gnu.org/licenses/.
*
* This file may also be available under a different license from Cavium.
* Contact Cavium Networks for more information
***********************license end**************************************/
#ifndef __CVMX_STXX_DEFS_H__
#define __CVMX_STXX_DEFS_H__
#define CVMX_STXX_ARB_CTL(block_id) (CVMX_ADD_IO_SEG(0x0001180090000608ull) + ((block_id) & 1) * 0x8000000ull)
#define CVMX_STXX_BCKPRS_CNT(block_id) (CVMX_ADD_IO_SEG(0x0001180090000688ull) + ((block_id) & 1) * 0x8000000ull)
#define CVMX_STXX_COM_CTL(block_id) (CVMX_ADD_IO_SEG(0x0001180090000600ull) + ((block_id) & 1) * 0x8000000ull)
#define CVMX_STXX_DIP_CNT(block_id) (CVMX_ADD_IO_SEG(0x0001180090000690ull) + ((block_id) & 1) * 0x8000000ull)
#define CVMX_STXX_IGN_CAL(block_id) (CVMX_ADD_IO_SEG(0x0001180090000610ull) + ((block_id) & 1) * 0x8000000ull)
#define CVMX_STXX_INT_MSK(block_id) (CVMX_ADD_IO_SEG(0x00011800900006A0ull) + ((block_id) & 1) * 0x8000000ull)
#define CVMX_STXX_INT_REG(block_id) (CVMX_ADD_IO_SEG(0x0001180090000698ull) + ((block_id) & 1) * 0x8000000ull)
#define CVMX_STXX_INT_SYNC(block_id) (CVMX_ADD_IO_SEG(0x00011800900006A8ull) + ((block_id) & 1) * 0x8000000ull)
#define CVMX_STXX_MIN_BST(block_id) (CVMX_ADD_IO_SEG(0x0001180090000618ull) + ((block_id) & 1) * 0x8000000ull)
#define CVMX_STXX_SPI4_CALX(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180090000400ull) + (((offset) & 31) + ((block_id) & 1) * 0x1000000ull) * 8)
#define CVMX_STXX_SPI4_DAT(block_id) (CVMX_ADD_IO_SEG(0x0001180090000628ull) + ((block_id) & 1) * 0x8000000ull)
#define CVMX_STXX_SPI4_STAT(block_id) (CVMX_ADD_IO_SEG(0x0001180090000630ull) + ((block_id) & 1) * 0x8000000ull)
#define CVMX_STXX_STAT_BYTES_HI(block_id) (CVMX_ADD_IO_SEG(0x0001180090000648ull) + ((block_id) & 1) * 0x8000000ull)
#define CVMX_STXX_STAT_BYTES_LO(block_id) (CVMX_ADD_IO_SEG(0x0001180090000680ull) + ((block_id) & 1) * 0x8000000ull)
#define CVMX_STXX_STAT_CTL(block_id) (CVMX_ADD_IO_SEG(0x0001180090000638ull) + ((block_id) & 1) * 0x8000000ull)
#define CVMX_STXX_STAT_PKT_XMT(block_id) (CVMX_ADD_IO_SEG(0x0001180090000640ull) + ((block_id) & 1) * 0x8000000ull)
union cvmx_stxx_arb_ctl {
uint64_t u64;
struct cvmx_stxx_arb_ctl_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_6_63:58;
uint64_t mintrn:1;
uint64_t reserved_4_4:1;
uint64_t igntpa:1;
uint64_t reserved_0_2:3;
#else
uint64_t reserved_0_2:3;
uint64_t igntpa:1;
uint64_t reserved_4_4:1;
uint64_t mintrn:1;
uint64_t reserved_6_63:58;
#endif
} s;
struct cvmx_stxx_arb_ctl_s cn38xx;
struct cvmx_stxx_arb_ctl_s cn38xxp2;
struct cvmx_stxx_arb_ctl_s cn58xx;
struct cvmx_stxx_arb_ctl_s cn58xxp1;
};
union cvmx_stxx_bckprs_cnt {
uint64_t u64;
struct cvmx_stxx_bckprs_cnt_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_32_63:32;
uint64_t cnt:32;
#else
uint64_t cnt:32;
uint64_t reserved_32_63:32;
#endif
} s;
struct cvmx_stxx_bckprs_cnt_s cn38xx;
struct cvmx_stxx_bckprs_cnt_s cn38xxp2;
struct cvmx_stxx_bckprs_cnt_s cn58xx;
struct cvmx_stxx_bckprs_cnt_s cn58xxp1;
};
union cvmx_stxx_com_ctl {
uint64_t u64;
struct cvmx_stxx_com_ctl_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_4_63:60;
uint64_t st_en:1;
uint64_t reserved_1_2:2;
uint64_t inf_en:1;
#else
uint64_t inf_en:1;
uint64_t reserved_1_2:2;
uint64_t st_en:1;
uint64_t reserved_4_63:60;
#endif
} s;
struct cvmx_stxx_com_ctl_s cn38xx;
struct cvmx_stxx_com_ctl_s cn38xxp2;
struct cvmx_stxx_com_ctl_s cn58xx;
struct cvmx_stxx_com_ctl_s cn58xxp1;
};
union cvmx_stxx_dip_cnt {
uint64_t u64;
struct cvmx_stxx_dip_cnt_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_8_63:56;
uint64_t frmmax:4;
uint64_t dipmax:4;
#else
uint64_t dipmax:4;
uint64_t frmmax:4;
uint64_t reserved_8_63:56;
#endif
} s;
struct cvmx_stxx_dip_cnt_s cn38xx;
struct cvmx_stxx_dip_cnt_s cn38xxp2;
struct cvmx_stxx_dip_cnt_s cn58xx;
struct cvmx_stxx_dip_cnt_s cn58xxp1;
};
union cvmx_stxx_ign_cal {
uint64_t u64;
struct cvmx_stxx_ign_cal_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_16_63:48;
uint64_t igntpa:16;
#else
uint64_t igntpa:16;
uint64_t reserved_16_63:48;
#endif
} s;
struct cvmx_stxx_ign_cal_s cn38xx;
struct cvmx_stxx_ign_cal_s cn38xxp2;
struct cvmx_stxx_ign_cal_s cn58xx;
struct cvmx_stxx_ign_cal_s cn58xxp1;
};
union cvmx_stxx_int_msk {
uint64_t u64;
struct cvmx_stxx_int_msk_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_8_63:56;
uint64_t frmerr:1;
uint64_t unxfrm:1;
uint64_t nosync:1;
uint64_t diperr:1;
uint64_t datovr:1;
uint64_t ovrbst:1;
uint64_t calpar1:1;
uint64_t calpar0:1;
#else
uint64_t calpar0:1;
uint64_t calpar1:1;
uint64_t ovrbst:1;
uint64_t datovr:1;
uint64_t diperr:1;
uint64_t nosync:1;
uint64_t unxfrm:1;
uint64_t frmerr:1;
uint64_t reserved_8_63:56;
#endif
} s;
struct cvmx_stxx_int_msk_s cn38xx;
struct cvmx_stxx_int_msk_s cn38xxp2;
struct cvmx_stxx_int_msk_s cn58xx;
struct cvmx_stxx_int_msk_s cn58xxp1;
};
union cvmx_stxx_int_reg {
uint64_t u64;
struct cvmx_stxx_int_reg_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_9_63:55;
uint64_t syncerr:1;
uint64_t frmerr:1;
uint64_t unxfrm:1;
uint64_t nosync:1;
uint64_t diperr:1;
uint64_t datovr:1;
uint64_t ovrbst:1;
uint64_t calpar1:1;
uint64_t calpar0:1;
#else
uint64_t calpar0:1;
uint64_t calpar1:1;
uint64_t ovrbst:1;
uint64_t datovr:1;
uint64_t diperr:1;
uint64_t nosync:1;
uint64_t unxfrm:1;
uint64_t frmerr:1;
uint64_t syncerr:1;
uint64_t reserved_9_63:55;
#endif
} s;
struct cvmx_stxx_int_reg_s cn38xx;
struct cvmx_stxx_int_reg_s cn38xxp2;
struct cvmx_stxx_int_reg_s cn58xx;
struct cvmx_stxx_int_reg_s cn58xxp1;
};
union cvmx_stxx_int_sync {
uint64_t u64;
struct cvmx_stxx_int_sync_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_8_63:56;
uint64_t frmerr:1;
uint64_t unxfrm:1;
uint64_t nosync:1;
uint64_t diperr:1;
uint64_t datovr:1;
uint64_t ovrbst:1;
uint64_t calpar1:1;
uint64_t calpar0:1;
#else
uint64_t calpar0:1;
uint64_t calpar1:1;
uint64_t ovrbst:1;
uint64_t datovr:1;
uint64_t diperr:1;
uint64_t nosync:1;
uint64_t unxfrm:1;
uint64_t frmerr:1;
uint64_t reserved_8_63:56;
#endif
} s;
struct cvmx_stxx_int_sync_s cn38xx;
struct cvmx_stxx_int_sync_s cn38xxp2;
struct cvmx_stxx_int_sync_s cn58xx;
struct cvmx_stxx_int_sync_s cn58xxp1;
};
union cvmx_stxx_min_bst {
uint64_t u64;
struct cvmx_stxx_min_bst_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_9_63:55;
uint64_t minb:9;
#else
uint64_t minb:9;
uint64_t reserved_9_63:55;
#endif
} s;
struct cvmx_stxx_min_bst_s cn38xx;
struct cvmx_stxx_min_bst_s cn38xxp2;
struct cvmx_stxx_min_bst_s cn58xx;
struct cvmx_stxx_min_bst_s cn58xxp1;
};
union cvmx_stxx_spi4_calx {
uint64_t u64;
struct cvmx_stxx_spi4_calx_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_17_63:47;
uint64_t oddpar:1;
uint64_t prt3:4;
uint64_t prt2:4;
uint64_t prt1:4;
uint64_t prt0:4;
#else
uint64_t prt0:4;
uint64_t prt1:4;
uint64_t prt2:4;
uint64_t prt3:4;
uint64_t oddpar:1;
uint64_t reserved_17_63:47;
#endif
} s;
struct cvmx_stxx_spi4_calx_s cn38xx;
struct cvmx_stxx_spi4_calx_s cn38xxp2;
struct cvmx_stxx_spi4_calx_s cn58xx;
struct cvmx_stxx_spi4_calx_s cn58xxp1;
};
union cvmx_stxx_spi4_dat {
uint64_t u64;
struct cvmx_stxx_spi4_dat_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_32_63:32;
uint64_t alpha:16;
uint64_t max_t:16;
#else
uint64_t max_t:16;
uint64_t alpha:16;
uint64_t reserved_32_63:32;
#endif
} s;
struct cvmx_stxx_spi4_dat_s cn38xx;
struct cvmx_stxx_spi4_dat_s cn38xxp2;
struct cvmx_stxx_spi4_dat_s cn58xx;
struct cvmx_stxx_spi4_dat_s cn58xxp1;
};
union cvmx_stxx_spi4_stat {
uint64_t u64;
struct cvmx_stxx_spi4_stat_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_16_63:48;
uint64_t m:8;
uint64_t reserved_7_7:1;
uint64_t len:7;
#else
uint64_t len:7;
uint64_t reserved_7_7:1;
uint64_t m:8;
uint64_t reserved_16_63:48;
#endif
} s;
struct cvmx_stxx_spi4_stat_s cn38xx;
struct cvmx_stxx_spi4_stat_s cn38xxp2;
struct cvmx_stxx_spi4_stat_s cn58xx;
struct cvmx_stxx_spi4_stat_s cn58xxp1;
};
union cvmx_stxx_stat_bytes_hi {
uint64_t u64;
struct cvmx_stxx_stat_bytes_hi_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_32_63:32;
uint64_t cnt:32;
#else
uint64_t cnt:32;
uint64_t reserved_32_63:32;
#endif
} s;
struct cvmx_stxx_stat_bytes_hi_s cn38xx;
struct cvmx_stxx_stat_bytes_hi_s cn38xxp2;
struct cvmx_stxx_stat_bytes_hi_s cn58xx;
struct cvmx_stxx_stat_bytes_hi_s cn58xxp1;
};
union cvmx_stxx_stat_bytes_lo {
uint64_t u64;
struct cvmx_stxx_stat_bytes_lo_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_32_63:32;
uint64_t cnt:32;
#else
uint64_t cnt:32;
uint64_t reserved_32_63:32;
#endif
} s;
struct cvmx_stxx_stat_bytes_lo_s cn38xx;
struct cvmx_stxx_stat_bytes_lo_s cn38xxp2;
struct cvmx_stxx_stat_bytes_lo_s cn58xx;
struct cvmx_stxx_stat_bytes_lo_s cn58xxp1;
};
union cvmx_stxx_stat_ctl {
uint64_t u64;
struct cvmx_stxx_stat_ctl_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_5_63:59;
uint64_t clr:1;
uint64_t bckprs:4;
#else
uint64_t bckprs:4;
uint64_t clr:1;
uint64_t reserved_5_63:59;
#endif
} s;
struct cvmx_stxx_stat_ctl_s cn38xx;
struct cvmx_stxx_stat_ctl_s cn38xxp2;
struct cvmx_stxx_stat_ctl_s cn58xx;
struct cvmx_stxx_stat_ctl_s cn58xxp1;
};
union cvmx_stxx_stat_pkt_xmt {
uint64_t u64;
struct cvmx_stxx_stat_pkt_xmt_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_32_63:32;
uint64_t cnt:32;
#else
uint64_t cnt:32;
uint64_t reserved_32_63:32;
#endif
} s;
struct cvmx_stxx_stat_pkt_xmt_s cn38xx;
struct cvmx_stxx_stat_pkt_xmt_s cn38xxp2;
struct cvmx_stxx_stat_pkt_xmt_s cn58xx;
struct cvmx_stxx_stat_pkt_xmt_s cn58xxp1;
};
#endif

View file

@ -0,0 +1,152 @@
/***********************license start***************
* Author: Cavium Networks
*
* Contact: support@caviumnetworks.com
* This file is part of the OCTEON SDK
*
* Copyright (c) 2003-2008 Cavium Networks
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
* published by the Free Software Foundation.
*
* This file is distributed in the hope that it will be useful, but
* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
* NONINFRINGEMENT. See the GNU General Public License for more
* details.
*
* You should have received a copy of the GNU General Public License
* along with this file; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
* or visit http://www.gnu.org/licenses/.
*
* This file may also be available under a different license from Cavium.
* Contact Cavium Networks for more information
***********************license end**************************************/
/*
* This module provides system/board information obtained by the bootloader.
*/
#ifndef __CVMX_SYSINFO_H__
#define __CVMX_SYSINFO_H__
#define OCTEON_SERIAL_LEN 20
/**
* Structure describing application specific information.
* __cvmx_app_init() populates this from the cvmx boot descriptor.
* This structure is private to simple executive applications, so
* no versioning is required.
*
* This structure must be provided with some fields set in order to
* use simple executive functions in other applications (Linux kernel,
* u-boot, etc.) The cvmx_sysinfo_minimal_initialize() function is
* provided to set the required values in these cases.
*/
struct cvmx_sysinfo {
/* System wide variables */
/* installed DRAM in system, in bytes */
uint64_t system_dram_size;
/* ptr to memory descriptor block */
void *phy_mem_desc_ptr;
/* Application image specific variables */
/* stack top address (virtual) */
uint64_t stack_top;
/* heap base address (virtual) */
uint64_t heap_base;
/* stack size in bytes */
uint32_t stack_size;
/* heap size in bytes */
uint32_t heap_size;
/* coremask defining cores running application */
uint32_t core_mask;
/* Deprecated, use cvmx_coremask_first_core() to select init core */
uint32_t init_core;
/* exception base address, as set by bootloader */
uint64_t exception_base_addr;
/* cpu clock speed in hz */
uint32_t cpu_clock_hz;
/* dram data rate in hz (data rate = 2 * clock rate */
uint32_t dram_data_rate_hz;
uint16_t board_type;
uint8_t board_rev_major;
uint8_t board_rev_minor;
uint8_t mac_addr_base[6];
uint8_t mac_addr_count;
char board_serial_number[OCTEON_SERIAL_LEN];
/*
* Several boards support compact flash on the Octeon boot
* bus. The CF memory spaces may be mapped to different
* addresses on different boards. These values will be 0 if
* CF is not present. Note that these addresses are physical
* addresses, and it is up to the application to use the
* proper addressing mode (XKPHYS, KSEG0, etc.)
*/
uint64_t compact_flash_common_base_addr;
uint64_t compact_flash_attribute_base_addr;
/*
* Base address of the LED display (as on EBT3000 board) This
* will be 0 if LED display not present. Note that this
* address is a physical address, and it is up to the
* application to use the proper addressing mode (XKPHYS,
* KSEG0, etc.)
*/
uint64_t led_display_base_addr;
/* DFA reference clock in hz (if applicable)*/
uint32_t dfa_ref_clock_hz;
/* configuration flags from bootloader */
uint32_t bootloader_config_flags;
/* Uart number used for console */
uint8_t console_uart_num;
};
/**
* This function returns the system/board information as obtained
* by the bootloader.
*
*
* Returns Pointer to the boot information structure
*
*/
extern struct cvmx_sysinfo *cvmx_sysinfo_get(void);
/**
* This function is used in non-simple executive environments (such as
* Linux kernel, u-boot, etc.) to configure the minimal fields that
* are required to use simple executive files directly.
*
* Locking (if required) must be handled outside of this
* function
*
* @phy_mem_desc_ptr: Pointer to global physical memory descriptor
* (bootmem descriptor) @board_type: Octeon board
* type enumeration
*
* @board_rev_major:
* Board major revision
* @board_rev_minor:
* Board minor revision
* @cpu_clock_hz:
* CPU clock freqency in hertz
*
* Returns 0: Failure
* 1: success
*/
extern int cvmx_sysinfo_minimal_initialize(void *phy_mem_desc_ptr,
uint16_t board_type,
uint8_t board_rev_major,
uint8_t board_rev_minor,
uint32_t cpu_clock_hz);
#endif /* __CVMX_SYSINFO_H__ */

View file

@ -0,0 +1,475 @@
/***********************license start***************
* Author: Cavium Networks
*
* Contact: support@caviumnetworks.com
* This file is part of the OCTEON SDK
*
* Copyright (c) 2003-2012 Cavium Networks
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
* published by the Free Software Foundation.
*
* This file is distributed in the hope that it will be useful, but
* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
* NONINFRINGEMENT. See the GNU General Public License for more
* details.
*
* You should have received a copy of the GNU General Public License
* along with this file; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
* or visit http://www.gnu.org/licenses/.
*
* This file may also be available under a different license from Cavium.
* Contact Cavium Networks for more information
***********************license end**************************************/
#ifndef __CVMX_UCTLX_DEFS_H__
#define __CVMX_UCTLX_DEFS_H__
#define CVMX_UCTLX_BIST_STATUS(block_id) (CVMX_ADD_IO_SEG(0x000118006F0000A0ull))
#define CVMX_UCTLX_CLK_RST_CTL(block_id) (CVMX_ADD_IO_SEG(0x000118006F000000ull))
#define CVMX_UCTLX_EHCI_CTL(block_id) (CVMX_ADD_IO_SEG(0x000118006F000080ull))
#define CVMX_UCTLX_EHCI_FLA(block_id) (CVMX_ADD_IO_SEG(0x000118006F0000A8ull))
#define CVMX_UCTLX_ERTO_CTL(block_id) (CVMX_ADD_IO_SEG(0x000118006F000090ull))
#define CVMX_UCTLX_IF_ENA(block_id) (CVMX_ADD_IO_SEG(0x000118006F000030ull))
#define CVMX_UCTLX_INT_ENA(block_id) (CVMX_ADD_IO_SEG(0x000118006F000028ull))
#define CVMX_UCTLX_INT_REG(block_id) (CVMX_ADD_IO_SEG(0x000118006F000020ull))
#define CVMX_UCTLX_OHCI_CTL(block_id) (CVMX_ADD_IO_SEG(0x000118006F000088ull))
#define CVMX_UCTLX_ORTO_CTL(block_id) (CVMX_ADD_IO_SEG(0x000118006F000098ull))
#define CVMX_UCTLX_PPAF_WM(block_id) (CVMX_ADD_IO_SEG(0x000118006F000038ull))
#define CVMX_UCTLX_UPHY_CTL_STATUS(block_id) (CVMX_ADD_IO_SEG(0x000118006F000008ull))
#define CVMX_UCTLX_UPHY_PORTX_CTL_STATUS(offset, block_id) (CVMX_ADD_IO_SEG(0x000118006F000010ull) + (((offset) & 1) + ((block_id) & 0) * 0x0ull) * 8)
union cvmx_uctlx_bist_status {
uint64_t u64;
struct cvmx_uctlx_bist_status_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_6_63:58;
uint64_t data_bis:1;
uint64_t desc_bis:1;
uint64_t erbm_bis:1;
uint64_t orbm_bis:1;
uint64_t wrbm_bis:1;
uint64_t ppaf_bis:1;
#else
uint64_t ppaf_bis:1;
uint64_t wrbm_bis:1;
uint64_t orbm_bis:1;
uint64_t erbm_bis:1;
uint64_t desc_bis:1;
uint64_t data_bis:1;
uint64_t reserved_6_63:58;
#endif
} s;
struct cvmx_uctlx_bist_status_s cn61xx;
struct cvmx_uctlx_bist_status_s cn63xx;
struct cvmx_uctlx_bist_status_s cn63xxp1;
struct cvmx_uctlx_bist_status_s cn66xx;
struct cvmx_uctlx_bist_status_s cn68xx;
struct cvmx_uctlx_bist_status_s cn68xxp1;
struct cvmx_uctlx_bist_status_s cnf71xx;
};
union cvmx_uctlx_clk_rst_ctl {
uint64_t u64;
struct cvmx_uctlx_clk_rst_ctl_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_25_63:39;
uint64_t clear_bist:1;
uint64_t start_bist:1;
uint64_t ehci_sm:1;
uint64_t ohci_clkcktrst:1;
uint64_t ohci_sm:1;
uint64_t ohci_susp_lgcy:1;
uint64_t app_start_clk:1;
uint64_t o_clkdiv_rst:1;
uint64_t h_clkdiv_byp:1;
uint64_t h_clkdiv_rst:1;
uint64_t h_clkdiv_en:1;
uint64_t o_clkdiv_en:1;
uint64_t h_div:4;
uint64_t p_refclk_sel:2;
uint64_t p_refclk_div:2;
uint64_t reserved_4_4:1;
uint64_t p_com_on:1;
uint64_t p_por:1;
uint64_t p_prst:1;
uint64_t hrst:1;
#else
uint64_t hrst:1;
uint64_t p_prst:1;
uint64_t p_por:1;
uint64_t p_com_on:1;
uint64_t reserved_4_4:1;
uint64_t p_refclk_div:2;
uint64_t p_refclk_sel:2;
uint64_t h_div:4;
uint64_t o_clkdiv_en:1;
uint64_t h_clkdiv_en:1;
uint64_t h_clkdiv_rst:1;
uint64_t h_clkdiv_byp:1;
uint64_t o_clkdiv_rst:1;
uint64_t app_start_clk:1;
uint64_t ohci_susp_lgcy:1;
uint64_t ohci_sm:1;
uint64_t ohci_clkcktrst:1;
uint64_t ehci_sm:1;
uint64_t start_bist:1;
uint64_t clear_bist:1;
uint64_t reserved_25_63:39;
#endif
} s;
struct cvmx_uctlx_clk_rst_ctl_s cn61xx;
struct cvmx_uctlx_clk_rst_ctl_s cn63xx;
struct cvmx_uctlx_clk_rst_ctl_s cn63xxp1;
struct cvmx_uctlx_clk_rst_ctl_s cn66xx;
struct cvmx_uctlx_clk_rst_ctl_s cn68xx;
struct cvmx_uctlx_clk_rst_ctl_s cn68xxp1;
struct cvmx_uctlx_clk_rst_ctl_s cnf71xx;
};
union cvmx_uctlx_ehci_ctl {
uint64_t u64;
struct cvmx_uctlx_ehci_ctl_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_20_63:44;
uint64_t desc_rbm:1;
uint64_t reg_nb:1;
uint64_t l2c_dc:1;
uint64_t l2c_bc:1;
uint64_t l2c_0pag:1;
uint64_t l2c_stt:1;
uint64_t l2c_buff_emod:2;
uint64_t l2c_desc_emod:2;
uint64_t inv_reg_a2:1;
uint64_t ehci_64b_addr_en:1;
uint64_t l2c_addr_msb:8;
#else
uint64_t l2c_addr_msb:8;
uint64_t ehci_64b_addr_en:1;
uint64_t inv_reg_a2:1;
uint64_t l2c_desc_emod:2;
uint64_t l2c_buff_emod:2;
uint64_t l2c_stt:1;
uint64_t l2c_0pag:1;
uint64_t l2c_bc:1;
uint64_t l2c_dc:1;
uint64_t reg_nb:1;
uint64_t desc_rbm:1;
uint64_t reserved_20_63:44;
#endif
} s;
struct cvmx_uctlx_ehci_ctl_s cn61xx;
struct cvmx_uctlx_ehci_ctl_s cn63xx;
struct cvmx_uctlx_ehci_ctl_s cn63xxp1;
struct cvmx_uctlx_ehci_ctl_s cn66xx;
struct cvmx_uctlx_ehci_ctl_s cn68xx;
struct cvmx_uctlx_ehci_ctl_s cn68xxp1;
struct cvmx_uctlx_ehci_ctl_s cnf71xx;
};
union cvmx_uctlx_ehci_fla {
uint64_t u64;
struct cvmx_uctlx_ehci_fla_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_6_63:58;
uint64_t fla:6;
#else
uint64_t fla:6;
uint64_t reserved_6_63:58;
#endif
} s;
struct cvmx_uctlx_ehci_fla_s cn61xx;
struct cvmx_uctlx_ehci_fla_s cn63xx;
struct cvmx_uctlx_ehci_fla_s cn63xxp1;
struct cvmx_uctlx_ehci_fla_s cn66xx;
struct cvmx_uctlx_ehci_fla_s cn68xx;
struct cvmx_uctlx_ehci_fla_s cn68xxp1;
struct cvmx_uctlx_ehci_fla_s cnf71xx;
};
union cvmx_uctlx_erto_ctl {
uint64_t u64;
struct cvmx_uctlx_erto_ctl_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_32_63:32;
uint64_t to_val:27;
uint64_t reserved_0_4:5;
#else
uint64_t reserved_0_4:5;
uint64_t to_val:27;
uint64_t reserved_32_63:32;
#endif
} s;
struct cvmx_uctlx_erto_ctl_s cn61xx;
struct cvmx_uctlx_erto_ctl_s cn63xx;
struct cvmx_uctlx_erto_ctl_s cn63xxp1;
struct cvmx_uctlx_erto_ctl_s cn66xx;
struct cvmx_uctlx_erto_ctl_s cn68xx;
struct cvmx_uctlx_erto_ctl_s cn68xxp1;
struct cvmx_uctlx_erto_ctl_s cnf71xx;
};
union cvmx_uctlx_if_ena {
uint64_t u64;
struct cvmx_uctlx_if_ena_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_1_63:63;
uint64_t en:1;
#else
uint64_t en:1;
uint64_t reserved_1_63:63;
#endif
} s;
struct cvmx_uctlx_if_ena_s cn61xx;
struct cvmx_uctlx_if_ena_s cn63xx;
struct cvmx_uctlx_if_ena_s cn63xxp1;
struct cvmx_uctlx_if_ena_s cn66xx;
struct cvmx_uctlx_if_ena_s cn68xx;
struct cvmx_uctlx_if_ena_s cn68xxp1;
struct cvmx_uctlx_if_ena_s cnf71xx;
};
union cvmx_uctlx_int_ena {
uint64_t u64;
struct cvmx_uctlx_int_ena_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_8_63:56;
uint64_t ec_ovf_e:1;
uint64_t oc_ovf_e:1;
uint64_t wb_pop_e:1;
uint64_t wb_psh_f:1;
uint64_t cf_psh_f:1;
uint64_t or_psh_f:1;
uint64_t er_psh_f:1;
uint64_t pp_psh_f:1;
#else
uint64_t pp_psh_f:1;
uint64_t er_psh_f:1;
uint64_t or_psh_f:1;
uint64_t cf_psh_f:1;
uint64_t wb_psh_f:1;
uint64_t wb_pop_e:1;
uint64_t oc_ovf_e:1;
uint64_t ec_ovf_e:1;
uint64_t reserved_8_63:56;
#endif
} s;
struct cvmx_uctlx_int_ena_s cn61xx;
struct cvmx_uctlx_int_ena_s cn63xx;
struct cvmx_uctlx_int_ena_s cn63xxp1;
struct cvmx_uctlx_int_ena_s cn66xx;
struct cvmx_uctlx_int_ena_s cn68xx;
struct cvmx_uctlx_int_ena_s cn68xxp1;
struct cvmx_uctlx_int_ena_s cnf71xx;
};
union cvmx_uctlx_int_reg {
uint64_t u64;
struct cvmx_uctlx_int_reg_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_8_63:56;
uint64_t ec_ovf_e:1;
uint64_t oc_ovf_e:1;
uint64_t wb_pop_e:1;
uint64_t wb_psh_f:1;
uint64_t cf_psh_f:1;
uint64_t or_psh_f:1;
uint64_t er_psh_f:1;
uint64_t pp_psh_f:1;
#else
uint64_t pp_psh_f:1;
uint64_t er_psh_f:1;
uint64_t or_psh_f:1;
uint64_t cf_psh_f:1;
uint64_t wb_psh_f:1;
uint64_t wb_pop_e:1;
uint64_t oc_ovf_e:1;
uint64_t ec_ovf_e:1;
uint64_t reserved_8_63:56;
#endif
} s;
struct cvmx_uctlx_int_reg_s cn61xx;
struct cvmx_uctlx_int_reg_s cn63xx;
struct cvmx_uctlx_int_reg_s cn63xxp1;
struct cvmx_uctlx_int_reg_s cn66xx;
struct cvmx_uctlx_int_reg_s cn68xx;
struct cvmx_uctlx_int_reg_s cn68xxp1;
struct cvmx_uctlx_int_reg_s cnf71xx;
};
union cvmx_uctlx_ohci_ctl {
uint64_t u64;
struct cvmx_uctlx_ohci_ctl_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_19_63:45;
uint64_t reg_nb:1;
uint64_t l2c_dc:1;
uint64_t l2c_bc:1;
uint64_t l2c_0pag:1;
uint64_t l2c_stt:1;
uint64_t l2c_buff_emod:2;
uint64_t l2c_desc_emod:2;
uint64_t inv_reg_a2:1;
uint64_t reserved_8_8:1;
uint64_t l2c_addr_msb:8;
#else
uint64_t l2c_addr_msb:8;
uint64_t reserved_8_8:1;
uint64_t inv_reg_a2:1;
uint64_t l2c_desc_emod:2;
uint64_t l2c_buff_emod:2;
uint64_t l2c_stt:1;
uint64_t l2c_0pag:1;
uint64_t l2c_bc:1;
uint64_t l2c_dc:1;
uint64_t reg_nb:1;
uint64_t reserved_19_63:45;
#endif
} s;
struct cvmx_uctlx_ohci_ctl_s cn61xx;
struct cvmx_uctlx_ohci_ctl_s cn63xx;
struct cvmx_uctlx_ohci_ctl_s cn63xxp1;
struct cvmx_uctlx_ohci_ctl_s cn66xx;
struct cvmx_uctlx_ohci_ctl_s cn68xx;
struct cvmx_uctlx_ohci_ctl_s cn68xxp1;
struct cvmx_uctlx_ohci_ctl_s cnf71xx;
};
union cvmx_uctlx_orto_ctl {
uint64_t u64;
struct cvmx_uctlx_orto_ctl_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_32_63:32;
uint64_t to_val:24;
uint64_t reserved_0_7:8;
#else
uint64_t reserved_0_7:8;
uint64_t to_val:24;
uint64_t reserved_32_63:32;
#endif
} s;
struct cvmx_uctlx_orto_ctl_s cn61xx;
struct cvmx_uctlx_orto_ctl_s cn63xx;
struct cvmx_uctlx_orto_ctl_s cn63xxp1;
struct cvmx_uctlx_orto_ctl_s cn66xx;
struct cvmx_uctlx_orto_ctl_s cn68xx;
struct cvmx_uctlx_orto_ctl_s cn68xxp1;
struct cvmx_uctlx_orto_ctl_s cnf71xx;
};
union cvmx_uctlx_ppaf_wm {
uint64_t u64;
struct cvmx_uctlx_ppaf_wm_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_5_63:59;
uint64_t wm:5;
#else
uint64_t wm:5;
uint64_t reserved_5_63:59;
#endif
} s;
struct cvmx_uctlx_ppaf_wm_s cn61xx;
struct cvmx_uctlx_ppaf_wm_s cn63xx;
struct cvmx_uctlx_ppaf_wm_s cn63xxp1;
struct cvmx_uctlx_ppaf_wm_s cn66xx;
struct cvmx_uctlx_ppaf_wm_s cnf71xx;
};
union cvmx_uctlx_uphy_ctl_status {
uint64_t u64;
struct cvmx_uctlx_uphy_ctl_status_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_10_63:54;
uint64_t bist_done:1;
uint64_t bist_err:1;
uint64_t hsbist:1;
uint64_t fsbist:1;
uint64_t lsbist:1;
uint64_t siddq:1;
uint64_t vtest_en:1;
uint64_t uphy_bist:1;
uint64_t bist_en:1;
uint64_t ate_reset:1;
#else
uint64_t ate_reset:1;
uint64_t bist_en:1;
uint64_t uphy_bist:1;
uint64_t vtest_en:1;
uint64_t siddq:1;
uint64_t lsbist:1;
uint64_t fsbist:1;
uint64_t hsbist:1;
uint64_t bist_err:1;
uint64_t bist_done:1;
uint64_t reserved_10_63:54;
#endif
} s;
struct cvmx_uctlx_uphy_ctl_status_s cn61xx;
struct cvmx_uctlx_uphy_ctl_status_s cn63xx;
struct cvmx_uctlx_uphy_ctl_status_s cn63xxp1;
struct cvmx_uctlx_uphy_ctl_status_s cn66xx;
struct cvmx_uctlx_uphy_ctl_status_s cn68xx;
struct cvmx_uctlx_uphy_ctl_status_s cn68xxp1;
struct cvmx_uctlx_uphy_ctl_status_s cnf71xx;
};
union cvmx_uctlx_uphy_portx_ctl_status {
uint64_t u64;
struct cvmx_uctlx_uphy_portx_ctl_status_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_43_63:21;
uint64_t tdata_out:4;
uint64_t txbiststuffenh:1;
uint64_t txbiststuffen:1;
uint64_t dmpulldown:1;
uint64_t dppulldown:1;
uint64_t vbusvldext:1;
uint64_t portreset:1;
uint64_t txhsvxtune:2;
uint64_t txvreftune:4;
uint64_t txrisetune:1;
uint64_t txpreemphasistune:1;
uint64_t txfslstune:4;
uint64_t sqrxtune:3;
uint64_t compdistune:3;
uint64_t loop_en:1;
uint64_t tclk:1;
uint64_t tdata_sel:1;
uint64_t taddr_in:4;
uint64_t tdata_in:8;
#else
uint64_t tdata_in:8;
uint64_t taddr_in:4;
uint64_t tdata_sel:1;
uint64_t tclk:1;
uint64_t loop_en:1;
uint64_t compdistune:3;
uint64_t sqrxtune:3;
uint64_t txfslstune:4;
uint64_t txpreemphasistune:1;
uint64_t txrisetune:1;
uint64_t txvreftune:4;
uint64_t txhsvxtune:2;
uint64_t portreset:1;
uint64_t vbusvldext:1;
uint64_t dppulldown:1;
uint64_t dmpulldown:1;
uint64_t txbiststuffen:1;
uint64_t txbiststuffenh:1;
uint64_t tdata_out:4;
uint64_t reserved_43_63:21;
#endif
} s;
struct cvmx_uctlx_uphy_portx_ctl_status_s cn61xx;
struct cvmx_uctlx_uphy_portx_ctl_status_s cn63xx;
struct cvmx_uctlx_uphy_portx_ctl_status_s cn63xxp1;
struct cvmx_uctlx_uphy_portx_ctl_status_s cn66xx;
struct cvmx_uctlx_uphy_portx_ctl_status_s cn68xx;
struct cvmx_uctlx_uphy_portx_ctl_status_s cn68xxp1;
struct cvmx_uctlx_uphy_portx_ctl_status_s cnf71xx;
};
#endif

View file

@ -0,0 +1,397 @@
/***********************license start***************
* Author: Cavium Networks
*
* Contact: support@caviumnetworks.com
* This file is part of the OCTEON SDK
*
* Copyright (c) 2003-2008 Cavium Networks
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
* published by the Free Software Foundation.
*
* This file is distributed in the hope that it will be useful, but
* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
* NONINFRINGEMENT. See the GNU General Public License for more
* details.
*
* You should have received a copy of the GNU General Public License
* along with this file; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
* or visit http://www.gnu.org/licenses/.
*
* This file may also be available under a different license from Cavium.
* Contact Cavium Networks for more information
***********************license end**************************************/
/**
*
* This header file defines the work queue entry (wqe) data structure.
* Since this is a commonly used structure that depends on structures
* from several hardware blocks, those definitions have been placed
* in this file to create a single point of definition of the wqe
* format.
* Data structures are still named according to the block that they
* relate to.
*
*/
#ifndef __CVMX_WQE_H__
#define __CVMX_WQE_H__
#include <asm/octeon/cvmx-packet.h>
#define OCT_TAG_TYPE_STRING(x) \
(((x) == CVMX_POW_TAG_TYPE_ORDERED) ? "ORDERED" : \
(((x) == CVMX_POW_TAG_TYPE_ATOMIC) ? "ATOMIC" : \
(((x) == CVMX_POW_TAG_TYPE_NULL) ? "NULL" : \
"NULL_NULL")))
/**
* HW decode / err_code in work queue entry
*/
typedef union {
uint64_t u64;
/* Use this struct if the hardware determines that the packet is IP */
struct {
/* HW sets this to the number of buffers used by this packet */
uint64_t bufs:8;
/* HW sets to the number of L2 bytes prior to the IP */
uint64_t ip_offset:8;
/* set to 1 if we found DSA/VLAN in the L2 */
uint64_t vlan_valid:1;
/* Set to 1 if the DSA/VLAN tag is stacked */
uint64_t vlan_stacked:1;
uint64_t unassigned:1;
/* HW sets to the DSA/VLAN CFI flag (valid when vlan_valid) */
uint64_t vlan_cfi:1;
/* HW sets to the DSA/VLAN_ID field (valid when vlan_valid) */
uint64_t vlan_id:12;
/* Ring Identifier (if PCIe). Requires PIP_GBL_CTL[RING_EN]=1 */
uint64_t pr:4;
uint64_t unassigned2:8;
/* the packet needs to be decompressed */
uint64_t dec_ipcomp:1;
/* the packet is either TCP or UDP */
uint64_t tcp_or_udp:1;
/* the packet needs to be decrypted (ESP or AH) */
uint64_t dec_ipsec:1;
/* the packet is IPv6 */
uint64_t is_v6:1;
/*
* (rcv_error, not_IP, IP_exc, is_frag, L4_error,
* software, etc.).
*/
/*
* reserved for software use, hardware will clear on
* packet creation.
*/
uint64_t software:1;
/* exceptional conditions below */
/* the receive interface hardware detected an L4 error
* (only applies if !is_frag) (only applies if
* !rcv_error && !not_IP && !IP_exc && !is_frag)
* failure indicated in err_code below, decode:
*
* - 1 = Malformed L4
* - 2 = L4 Checksum Error: the L4 checksum value is
* - 3 = UDP Length Error: The UDP length field would
* make the UDP data longer than what remains in
* the IP packet (as defined by the IP header
* length field).
* - 4 = Bad L4 Port: either the source or destination
* TCP/UDP port is 0.
* - 8 = TCP FIN Only: the packet is TCP and only the
* FIN flag set.
* - 9 = TCP No Flags: the packet is TCP and no flags
* are set.
* - 10 = TCP FIN RST: the packet is TCP and both FIN
* and RST are set.
* - 11 = TCP SYN URG: the packet is TCP and both SYN
* and URG are set.
* - 12 = TCP SYN RST: the packet is TCP and both SYN
* and RST are set.
* - 13 = TCP SYN FIN: the packet is TCP and both SYN
* and FIN are set.
*/
uint64_t L4_error:1;
/* set if the packet is a fragment */
uint64_t is_frag:1;
/* the receive interface hardware detected an IP error
* / exception (only applies if !rcv_error && !not_IP)
* failure indicated in err_code below, decode:
*
* - 1 = Not IP: the IP version field is neither 4 nor
* 6.
* - 2 = IPv4 Header Checksum Error: the IPv4 header
* has a checksum violation.
* - 3 = IP Malformed Header: the packet is not long
* enough to contain the IP header.
* - 4 = IP Malformed: the packet is not long enough
* to contain the bytes indicated by the IP
* header. Pad is allowed.
* - 5 = IP TTL Hop: the IPv4 TTL field or the IPv6
* Hop Count field are zero.
* - 6 = IP Options
*/
uint64_t IP_exc:1;
/*
* Set if the hardware determined that the packet is a
* broadcast.
*/
uint64_t is_bcast:1;
/*
* St if the hardware determined that the packet is a
* multi-cast.
*/
uint64_t is_mcast:1;
/*
* Set if the packet may not be IP (must be zero in
* this case).
*/
uint64_t not_IP:1;
/*
* The receive interface hardware detected a receive
* error (must be zero in this case).
*/
uint64_t rcv_error:1;
/* lower err_code = first-level descriptor of the
* work */
/* zero for packet submitted by hardware that isn't on
* the slow path */
/* type is cvmx_pip_err_t */
uint64_t err_code:8;
} s;
/* use this to get at the 16 vlan bits */
struct {
uint64_t unused1:16;
uint64_t vlan:16;
uint64_t unused2:32;
} svlan;
/*
* use this struct if the hardware could not determine that
* the packet is ip.
*/
struct {
/*
* HW sets this to the number of buffers used by this
* packet.
*/
uint64_t bufs:8;
uint64_t unused:8;
/* set to 1 if we found DSA/VLAN in the L2 */
uint64_t vlan_valid:1;
/* Set to 1 if the DSA/VLAN tag is stacked */
uint64_t vlan_stacked:1;
uint64_t unassigned:1;
/*
* HW sets to the DSA/VLAN CFI flag (valid when
* vlan_valid)
*/
uint64_t vlan_cfi:1;
/*
* HW sets to the DSA/VLAN_ID field (valid when
* vlan_valid).
*/
uint64_t vlan_id:12;
/*
* Ring Identifier (if PCIe). Requires
* PIP_GBL_CTL[RING_EN]=1
*/
uint64_t pr:4;
uint64_t unassigned2:12;
/*
* reserved for software use, hardware will clear on
* packet creation.
*/
uint64_t software:1;
uint64_t unassigned3:1;
/*
* set if the hardware determined that the packet is
* rarp.
*/
uint64_t is_rarp:1;
/*
* set if the hardware determined that the packet is
* arp
*/
uint64_t is_arp:1;
/*
* set if the hardware determined that the packet is a
* broadcast.
*/
uint64_t is_bcast:1;
/*
* set if the hardware determined that the packet is a
* multi-cast
*/
uint64_t is_mcast:1;
/*
* set if the packet may not be IP (must be one in
* this case)
*/
uint64_t not_IP:1;
/* The receive interface hardware detected a receive
* error. Failure indicated in err_code below,
* decode:
*
* - 1 = partial error: a packet was partially
* received, but internal buffering / bandwidth
* was not adequate to receive the entire
* packet.
* - 2 = jabber error: the RGMII packet was too large
* and is truncated.
* - 3 = overrun error: the RGMII packet is longer
* than allowed and had an FCS error.
* - 4 = oversize error: the RGMII packet is longer
* than allowed.
* - 5 = alignment error: the RGMII packet is not an
* integer number of bytes
* and had an FCS error (100M and 10M only).
* - 6 = fragment error: the RGMII packet is shorter
* than allowed and had an FCS error.
* - 7 = GMX FCS error: the RGMII packet had an FCS
* error.
* - 8 = undersize error: the RGMII packet is shorter
* than allowed.
* - 9 = extend error: the RGMII packet had an extend
* error.
* - 10 = length mismatch error: the RGMII packet had
* a length that did not match the length field
* in the L2 HDR.
* - 11 = RGMII RX error/SPI4 DIP4 Error: the RGMII
* packet had one or more data reception errors
* (RXERR) or the SPI4 packet had one or more
* DIP4 errors.
* - 12 = RGMII skip error/SPI4 Abort Error: the RGMII
* packet was not large enough to cover the
* skipped bytes or the SPI4 packet was
* terminated with an About EOPS.
* - 13 = RGMII nibble error/SPI4 Port NXA Error: the
* RGMII packet had a studder error (data not
* repeated - 10/100M only) or the SPI4 packet
* was sent to an NXA.
* - 16 = FCS error: a SPI4.2 packet had an FCS error.
* - 17 = Skip error: a packet was not large enough to
* cover the skipped bytes.
* - 18 = L2 header malformed: the packet is not long
* enough to contain the L2.
*/
uint64_t rcv_error:1;
/*
* lower err_code = first-level descriptor of the
* work
*/
/*
* zero for packet submitted by hardware that isn't on
* the slow path
*/
/* type is cvmx_pip_err_t (union, so can't use directly */
uint64_t err_code:8;
} snoip;
} cvmx_pip_wqe_word2;
/**
* Work queue entry format
*
* must be 8-byte aligned
*/
typedef struct {
/*****************************************************************
* WORD 0
* HW WRITE: the following 64 bits are filled by HW when a packet arrives
*/
/**
* raw chksum result generated by the HW
*/
uint16_t hw_chksum;
/**
* Field unused by hardware - available for software
*/
uint8_t unused;
/**
* Next pointer used by hardware for list maintenance.
* May be written/read by HW before the work queue
* entry is scheduled to a PP
* (Only 36 bits used in Octeon 1)
*/
uint64_t next_ptr:40;
/*****************************************************************
* WORD 1
* HW WRITE: the following 64 bits are filled by HW when a packet arrives
*/
/**
* HW sets to the total number of bytes in the packet
*/
uint64_t len:16;
/**
* HW sets this to input physical port
*/
uint64_t ipprt:6;
/**
* HW sets this to what it thought the priority of the input packet was
*/
uint64_t qos:3;
/**
* the group that the work queue entry will be scheduled to
*/
uint64_t grp:4;
/**
* the type of the tag (ORDERED, ATOMIC, NULL)
*/
uint64_t tag_type:3;
/**
* the synchronization/ordering tag
*/
uint64_t tag:32;
/**
* WORD 2 HW WRITE: the following 64-bits are filled in by
* hardware when a packet arrives This indicates a variety of
* status and error conditions.
*/
cvmx_pip_wqe_word2 word2;
/**
* Pointer to the first segment of the packet.
*/
union cvmx_buf_ptr packet_ptr;
/**
* HW WRITE: octeon will fill in a programmable amount from the
* packet, up to (at most, but perhaps less) the amount
* needed to fill the work queue entry to 128 bytes
*
* If the packet is recognized to be IP, the hardware starts
* (except that the IPv4 header is padded for appropriate
* alignment) writing here where the IP header starts. If the
* packet is not recognized to be IP, the hardware starts
* writing the beginning of the packet here.
*/
uint8_t packet_data[96];
/**
* If desired, SW can make the work Q entry any length. For the
* purposes of discussion here, Assume 128B always, as this is all that
* the hardware deals with.
*
*/
} CVMX_CACHE_LINE_ALIGNED cvmx_wqe_t;
#endif /* __CVMX_WQE_H__ */

View file

@ -0,0 +1,517 @@
/***********************license start***************
* Author: Cavium Networks
*
* Contact: support@caviumnetworks.com
* This file is part of the OCTEON SDK
*
* Copyright (c) 2003-2008 Cavium Networks
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
* published by the Free Software Foundation.
*
* This file is distributed in the hope that it will be useful, but
* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
* NONINFRINGEMENT. See the GNU General Public License for more
* details.
*
* You should have received a copy of the GNU General Public License
* along with this file; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
* or visit http://www.gnu.org/licenses/.
*
* This file may also be available under a different license from Cavium.
* Contact Cavium Networks for more information
***********************license end**************************************/
#ifndef __CVMX_H__
#define __CVMX_H__
#include <linux/kernel.h>
#include <linux/string.h>
enum cvmx_mips_space {
CVMX_MIPS_SPACE_XKSEG = 3LL,
CVMX_MIPS_SPACE_XKPHYS = 2LL,
CVMX_MIPS_SPACE_XSSEG = 1LL,
CVMX_MIPS_SPACE_XUSEG = 0LL
};
/* These macros for use when using 32 bit pointers. */
#define CVMX_MIPS32_SPACE_KSEG0 1l
#define CVMX_ADD_SEG32(segment, add) \
(((int32_t)segment << 31) | (int32_t)(add))
#define CVMX_IO_SEG CVMX_MIPS_SPACE_XKPHYS
/* These macros simplify the process of creating common IO addresses */
#define CVMX_ADD_SEG(segment, add) \
((((uint64_t)segment) << 62) | (add))
#ifndef CVMX_ADD_IO_SEG
#define CVMX_ADD_IO_SEG(add) CVMX_ADD_SEG(CVMX_IO_SEG, (add))
#endif
#include <asm/octeon/cvmx-asm.h>
#include <asm/octeon/cvmx-packet.h>
#include <asm/octeon/cvmx-sysinfo.h>
#include <asm/octeon/cvmx-ciu-defs.h>
#include <asm/octeon/cvmx-gpio-defs.h>
#include <asm/octeon/cvmx-iob-defs.h>
#include <asm/octeon/cvmx-ipd-defs.h>
#include <asm/octeon/cvmx-l2c-defs.h>
#include <asm/octeon/cvmx-l2d-defs.h>
#include <asm/octeon/cvmx-l2t-defs.h>
#include <asm/octeon/cvmx-led-defs.h>
#include <asm/octeon/cvmx-mio-defs.h>
#include <asm/octeon/cvmx-pow-defs.h>
#include <asm/octeon/cvmx-bootinfo.h>
#include <asm/octeon/cvmx-bootmem.h>
#include <asm/octeon/cvmx-l2c.h>
#ifndef CVMX_ENABLE_DEBUG_PRINTS
#define CVMX_ENABLE_DEBUG_PRINTS 1
#endif
#if CVMX_ENABLE_DEBUG_PRINTS
#define cvmx_dprintf printk
#else
#define cvmx_dprintf(...) {}
#endif
#define CVMX_MAX_CORES (16)
#define CVMX_CACHE_LINE_SIZE (128) /* In bytes */
#define CVMX_CACHE_LINE_MASK (CVMX_CACHE_LINE_SIZE - 1) /* In bytes */
#define CVMX_CACHE_LINE_ALIGNED __attribute__ ((aligned(CVMX_CACHE_LINE_SIZE)))
#define CAST64(v) ((long long)(long)(v))
#define CASTPTR(type, v) ((type *)(long)(v))
/*
* Returns processor ID, different Linux and simple exec versions
* provided in the cvmx-app-init*.c files.
*/
static inline uint32_t cvmx_get_proc_id(void) __attribute__ ((pure));
static inline uint32_t cvmx_get_proc_id(void)
{
uint32_t id;
asm("mfc0 %0, $15,0" : "=r"(id));
return id;
}
/* turn the variable name into a string */
#define CVMX_TMP_STR(x) CVMX_TMP_STR2(x)
#define CVMX_TMP_STR2(x) #x
/**
* Builds a bit mask given the required size in bits.
*
* @bits: Number of bits in the mask
* Returns The mask
*/ static inline uint64_t cvmx_build_mask(uint64_t bits)
{
return ~((~0x0ull) << bits);
}
/**
* Builds a memory address for I/O based on the Major and Sub DID.
*
* @major_did: 5 bit major did
* @sub_did: 3 bit sub did
* Returns I/O base address
*/
static inline uint64_t cvmx_build_io_address(uint64_t major_did,
uint64_t sub_did)
{
return (0x1ull << 48) | (major_did << 43) | (sub_did << 40);
}
/**
* Perform mask and shift to place the supplied value into
* the supplied bit rage.
*
* Example: cvmx_build_bits(39,24,value)
* <pre>
* 6 5 4 3 3 2 1
* 3 5 7 9 1 3 5 7 0
* +-------+-------+-------+-------+-------+-------+-------+------+
* 000000000000000000000000___________value000000000000000000000000
* </pre>
*
* @high_bit: Highest bit value can occupy (inclusive) 0-63
* @low_bit: Lowest bit value can occupy inclusive 0-high_bit
* @value: Value to use
* Returns Value masked and shifted
*/
static inline uint64_t cvmx_build_bits(uint64_t high_bit,
uint64_t low_bit, uint64_t value)
{
return (value & cvmx_build_mask(high_bit - low_bit + 1)) << low_bit;
}
/**
* Convert a memory pointer (void*) into a hardware compatible
* memory address (uint64_t). Octeon hardware widgets don't
* understand logical addresses.
*
* @ptr: C style memory pointer
* Returns Hardware physical address
*/
static inline uint64_t cvmx_ptr_to_phys(void *ptr)
{
if (sizeof(void *) == 8) {
/*
* We're running in 64 bit mode. Normally this means
* that we can use 40 bits of address space (the
* hardware limit). Unfortunately there is one case
* were we need to limit this to 30 bits, sign
* extended 32 bit. Although these are 64 bits wide,
* only 30 bits can be used.
*/
if ((CAST64(ptr) >> 62) == 3)
return CAST64(ptr) & cvmx_build_mask(30);
else
return CAST64(ptr) & cvmx_build_mask(40);
} else {
return (long)(ptr) & 0x1fffffff;
}
}
/**
* Convert a hardware physical address (uint64_t) into a
* memory pointer (void *).
*
* @physical_address:
* Hardware physical address to memory
* Returns Pointer to memory
*/
static inline void *cvmx_phys_to_ptr(uint64_t physical_address)
{
if (sizeof(void *) == 8) {
/* Just set the top bit, avoiding any TLB uglyness */
return CASTPTR(void,
CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS,
physical_address));
} else {
return CASTPTR(void,
CVMX_ADD_SEG32(CVMX_MIPS32_SPACE_KSEG0,
physical_address));
}
}
/* The following #if controls the definition of the macro
CVMX_BUILD_WRITE64. This macro is used to build a store operation to
a full 64bit address. With a 64bit ABI, this can be done with a simple
pointer access. 32bit ABIs require more complicated assembly */
/* We have a full 64bit ABI. Writing to a 64bit address can be done with
a simple volatile pointer */
#define CVMX_BUILD_WRITE64(TYPE, ST) \
static inline void cvmx_write64_##TYPE(uint64_t addr, TYPE##_t val) \
{ \
*CASTPTR(volatile TYPE##_t, addr) = val; \
}
/* The following #if controls the definition of the macro
CVMX_BUILD_READ64. This macro is used to build a load operation from
a full 64bit address. With a 64bit ABI, this can be done with a simple
pointer access. 32bit ABIs require more complicated assembly */
/* We have a full 64bit ABI. Writing to a 64bit address can be done with
a simple volatile pointer */
#define CVMX_BUILD_READ64(TYPE, LT) \
static inline TYPE##_t cvmx_read64_##TYPE(uint64_t addr) \
{ \
return *CASTPTR(volatile TYPE##_t, addr); \
}
/* The following defines 8 functions for writing to a 64bit address. Each
takes two arguments, the address and the value to write.
cvmx_write64_int64 cvmx_write64_uint64
cvmx_write64_int32 cvmx_write64_uint32
cvmx_write64_int16 cvmx_write64_uint16
cvmx_write64_int8 cvmx_write64_uint8 */
CVMX_BUILD_WRITE64(int64, "sd");
CVMX_BUILD_WRITE64(int32, "sw");
CVMX_BUILD_WRITE64(int16, "sh");
CVMX_BUILD_WRITE64(int8, "sb");
CVMX_BUILD_WRITE64(uint64, "sd");
CVMX_BUILD_WRITE64(uint32, "sw");
CVMX_BUILD_WRITE64(uint16, "sh");
CVMX_BUILD_WRITE64(uint8, "sb");
#define cvmx_write64 cvmx_write64_uint64
/* The following defines 8 functions for reading from a 64bit address. Each
takes the address as the only argument
cvmx_read64_int64 cvmx_read64_uint64
cvmx_read64_int32 cvmx_read64_uint32
cvmx_read64_int16 cvmx_read64_uint16
cvmx_read64_int8 cvmx_read64_uint8 */
CVMX_BUILD_READ64(int64, "ld");
CVMX_BUILD_READ64(int32, "lw");
CVMX_BUILD_READ64(int16, "lh");
CVMX_BUILD_READ64(int8, "lb");
CVMX_BUILD_READ64(uint64, "ld");
CVMX_BUILD_READ64(uint32, "lw");
CVMX_BUILD_READ64(uint16, "lhu");
CVMX_BUILD_READ64(uint8, "lbu");
#define cvmx_read64 cvmx_read64_uint64
static inline void cvmx_write_csr(uint64_t csr_addr, uint64_t val)
{
cvmx_write64(csr_addr, val);
/*
* Perform an immediate read after every write to an RSL
* register to force the write to complete. It doesn't matter
* what RSL read we do, so we choose CVMX_MIO_BOOT_BIST_STAT
* because it is fast and harmless.
*/
if (((csr_addr >> 40) & 0x7ffff) == (0x118))
cvmx_read64(CVMX_MIO_BOOT_BIST_STAT);
}
static inline void cvmx_write_io(uint64_t io_addr, uint64_t val)
{
cvmx_write64(io_addr, val);
}
static inline uint64_t cvmx_read_csr(uint64_t csr_addr)
{
uint64_t val = cvmx_read64(csr_addr);
return val;
}
static inline void cvmx_send_single(uint64_t data)
{
const uint64_t CVMX_IOBDMA_SENDSINGLE = 0xffffffffffffa200ull;
cvmx_write64(CVMX_IOBDMA_SENDSINGLE, data);
}
static inline void cvmx_read_csr_async(uint64_t scraddr, uint64_t csr_addr)
{
union {
uint64_t u64;
struct {
uint64_t scraddr:8;
uint64_t len:8;
uint64_t addr:48;
} s;
} addr;
addr.u64 = csr_addr;
addr.s.scraddr = scraddr >> 3;
addr.s.len = 1;
cvmx_send_single(addr.u64);
}
/* Return true if Octeon is CN38XX pass 1 */
static inline int cvmx_octeon_is_pass1(void)
{
#if OCTEON_IS_COMMON_BINARY()
return 0; /* Pass 1 isn't supported for common binaries */
#else
/* Now that we know we're built for a specific model, only check CN38XX */
#if OCTEON_IS_MODEL(OCTEON_CN38XX)
return cvmx_get_proc_id() == OCTEON_CN38XX_PASS1;
#else
return 0; /* Built for non CN38XX chip, we're not CN38XX pass1 */
#endif
#endif
}
static inline unsigned int cvmx_get_core_num(void)
{
unsigned int core_num;
CVMX_RDHWRNV(core_num, 0);
return core_num;
}
/**
* Returns the number of bits set in the provided value.
* Simple wrapper for POP instruction.
*
* @val: 32 bit value to count set bits in
*
* Returns Number of bits set
*/
static inline uint32_t cvmx_pop(uint32_t val)
{
uint32_t pop;
CVMX_POP(pop, val);
return pop;
}
/**
* Returns the number of bits set in the provided value.
* Simple wrapper for DPOP instruction.
*
* @val: 64 bit value to count set bits in
*
* Returns Number of bits set
*/
static inline int cvmx_dpop(uint64_t val)
{
int pop;
CVMX_DPOP(pop, val);
return pop;
}
/**
* Provide current cycle counter as a return value
*
* Returns current cycle counter
*/
static inline uint64_t cvmx_get_cycle(void)
{
uint64_t cycle;
CVMX_RDHWR(cycle, 31);
return cycle;
}
/**
* Wait for the specified number of cycle
*
*/
static inline void cvmx_wait(uint64_t cycles)
{
uint64_t done = cvmx_get_cycle() + cycles;
while (cvmx_get_cycle() < done)
; /* Spin */
}
/**
* Reads a chip global cycle counter. This counts CPU cycles since
* chip reset. The counter is 64 bit.
* This register does not exist on CN38XX pass 1 silicion
*
* Returns Global chip cycle count since chip reset.
*/
static inline uint64_t cvmx_get_cycle_global(void)
{
if (cvmx_octeon_is_pass1())
return 0;
else
return cvmx_read64(CVMX_IPD_CLK_COUNT);
}
/**
* This macro spins on a field waiting for it to reach a value. It
* is common in code to need to wait for a specific field in a CSR
* to match a specific value. Conceptually this macro expands to:
*
* 1) read csr at "address" with a csr typedef of "type"
* 2) Check if ("type".s."field" "op" "value")
* 3) If #2 isn't true loop to #1 unless too much time has passed.
*/
#define CVMX_WAIT_FOR_FIELD64(address, type, field, op, value, timeout_usec)\
( \
{ \
int result; \
do { \
uint64_t done = cvmx_get_cycle() + (uint64_t)timeout_usec * \
cvmx_sysinfo_get()->cpu_clock_hz / 1000000; \
type c; \
while (1) { \
c.u64 = cvmx_read_csr(address); \
if ((c.s.field) op(value)) { \
result = 0; \
break; \
} else if (cvmx_get_cycle() > done) { \
result = -1; \
break; \
} else \
cvmx_wait(100); \
} \
} while (0); \
result; \
})
/***************************************************************************/
static inline void cvmx_reset_octeon(void)
{
union cvmx_ciu_soft_rst ciu_soft_rst;
ciu_soft_rst.u64 = 0;
ciu_soft_rst.s.soft_rst = 1;
cvmx_write_csr(CVMX_CIU_SOFT_RST, ciu_soft_rst.u64);
}
/* Return the number of cores available in the chip */
static inline uint32_t cvmx_octeon_num_cores(void)
{
uint32_t ciu_fuse = (uint32_t) cvmx_read_csr(CVMX_CIU_FUSE) & 0xffff;
return cvmx_pop(ciu_fuse);
}
/**
* Read a byte of fuse data
* @byte_addr: address to read
*
* Returns fuse value: 0 or 1
*/
static uint8_t cvmx_fuse_read_byte(int byte_addr)
{
union cvmx_mio_fus_rcmd read_cmd;
read_cmd.u64 = 0;
read_cmd.s.addr = byte_addr;
read_cmd.s.pend = 1;
cvmx_write_csr(CVMX_MIO_FUS_RCMD, read_cmd.u64);
while ((read_cmd.u64 = cvmx_read_csr(CVMX_MIO_FUS_RCMD))
&& read_cmd.s.pend)
;
return read_cmd.s.dat;
}
/**
* Read a single fuse bit
*
* @fuse: Fuse number (0-1024)
*
* Returns fuse value: 0 or 1
*/
static inline int cvmx_fuse_read(int fuse)
{
return (cvmx_fuse_read_byte(fuse >> 3) >> (fuse & 0x7)) & 1;
}
static inline int cvmx_octeon_model_CN36XX(void)
{
return OCTEON_IS_MODEL(OCTEON_CN38XX)
&& !cvmx_octeon_is_pass1()
&& cvmx_fuse_read(264);
}
static inline int cvmx_octeon_zip_present(void)
{
return octeon_has_feature(OCTEON_FEATURE_ZIP);
}
static inline int cvmx_octeon_dfa_present(void)
{
if (!OCTEON_IS_MODEL(OCTEON_CN38XX)
&& !OCTEON_IS_MODEL(OCTEON_CN31XX)
&& !OCTEON_IS_MODEL(OCTEON_CN58XX))
return 0;
else if (OCTEON_IS_MODEL(OCTEON_CN3020))
return 0;
else if (cvmx_octeon_is_pass1())
return 1;
else
return !cvmx_fuse_read(120);
}
static inline int cvmx_octeon_crypto_present(void)
{
return octeon_has_feature(OCTEON_FEATURE_CRYPTO);
}
#endif /* __CVMX_H__ */

View file

@ -0,0 +1,238 @@
/***********************license start***************
* Author: Cavium Networks
*
* Contact: support@caviumnetworks.com
* This file is part of the OCTEON SDK
*
* Copyright (c) 2003-2008 Cavium Networks
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
* published by the Free Software Foundation.
*
* This file is distributed in the hope that it will be useful, but
* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
* NONINFRINGEMENT. See the GNU General Public License for more
* details.
*
* You should have received a copy of the GNU General Public License
* along with this file; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
* or visit http://www.gnu.org/licenses/.
*
* This file may also be available under a different license from Cavium.
* Contact Cavium Networks for more information
***********************license end**************************************/
/*
* File defining checks for different Octeon features.
*/
#ifndef __OCTEON_FEATURE_H__
#define __OCTEON_FEATURE_H__
#include <asm/octeon/cvmx-mio-defs.h>
#include <asm/octeon/cvmx-rnm-defs.h>
enum octeon_feature {
/* CN68XX uses port kinds for packet interface */
OCTEON_FEATURE_PKND,
/* CN68XX has different fields in word0 - word2 */
OCTEON_FEATURE_CN68XX_WQE,
/*
* Octeon models in the CN5XXX family and higher support
* atomic add instructions to memory (saa/saad).
*/
OCTEON_FEATURE_SAAD,
/* Does this Octeon support the ZIP offload engine? */
OCTEON_FEATURE_ZIP,
/* Does this Octeon support crypto acceleration using COP2? */
OCTEON_FEATURE_CRYPTO,
OCTEON_FEATURE_DORM_CRYPTO,
/* Does this Octeon support PCI express? */
OCTEON_FEATURE_PCIE,
/* Does this Octeon support SRIOs */
OCTEON_FEATURE_SRIO,
/* Does this Octeon support Interlaken */
OCTEON_FEATURE_ILK,
/* Some Octeon models support internal memory for storing
* cryptographic keys */
OCTEON_FEATURE_KEY_MEMORY,
/* Octeon has a LED controller for banks of external LEDs */
OCTEON_FEATURE_LED_CONTROLLER,
/* Octeon has a trace buffer */
OCTEON_FEATURE_TRA,
/* Octeon has a management port */
OCTEON_FEATURE_MGMT_PORT,
/* Octeon has a raid unit */
OCTEON_FEATURE_RAID,
/* Octeon has a builtin USB */
OCTEON_FEATURE_USB,
/* Octeon IPD can run without using work queue entries */
OCTEON_FEATURE_NO_WPTR,
/* Octeon has DFA state machines */
OCTEON_FEATURE_DFA,
/* Octeon MDIO block supports clause 45 transactions for 10
* Gig support */
OCTEON_FEATURE_MDIO_CLAUSE_45,
/*
* CN52XX and CN56XX used a block named NPEI for PCIe
* access. Newer chips replaced this with SLI+DPI.
*/
OCTEON_FEATURE_NPEI,
OCTEON_FEATURE_HFA,
OCTEON_FEATURE_DFM,
OCTEON_FEATURE_CIU2,
OCTEON_MAX_FEATURE
};
static inline int cvmx_fuse_read(int fuse);
/**
* Determine if the current Octeon supports a specific feature. These
* checks have been optimized to be fairly quick, but they should still
* be kept out of fast path code.
*
* @feature: Feature to check for. This should always be a constant so the
* compiler can remove the switch statement through optimization.
*
* Returns Non zero if the feature exists. Zero if the feature does not
* exist.
*/
static inline int octeon_has_feature(enum octeon_feature feature)
{
switch (feature) {
case OCTEON_FEATURE_SAAD:
return !OCTEON_IS_MODEL(OCTEON_CN3XXX);
case OCTEON_FEATURE_ZIP:
if (OCTEON_IS_MODEL(OCTEON_CN30XX)
|| OCTEON_IS_MODEL(OCTEON_CN50XX)
|| OCTEON_IS_MODEL(OCTEON_CN52XX))
return 0;
else if (OCTEON_IS_MODEL(OCTEON_CN38XX_PASS1))
return 1;
else
return !cvmx_fuse_read(121);
case OCTEON_FEATURE_CRYPTO:
if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
union cvmx_mio_fus_dat2 fus_2;
fus_2.u64 = cvmx_read_csr(CVMX_MIO_FUS_DAT2);
if (fus_2.s.nocrypto || fus_2.s.nomul) {
return 0;
} else if (!fus_2.s.dorm_crypto) {
return 1;
} else {
union cvmx_rnm_ctl_status st;
st.u64 = cvmx_read_csr(CVMX_RNM_CTL_STATUS);
return st.s.eer_val;
}
} else {
return !cvmx_fuse_read(90);
}
case OCTEON_FEATURE_DORM_CRYPTO:
if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
union cvmx_mio_fus_dat2 fus_2;
fus_2.u64 = cvmx_read_csr(CVMX_MIO_FUS_DAT2);
return !fus_2.s.nocrypto && !fus_2.s.nomul && fus_2.s.dorm_crypto;
} else {
return 0;
}
case OCTEON_FEATURE_PCIE:
return OCTEON_IS_MODEL(OCTEON_CN56XX)
|| OCTEON_IS_MODEL(OCTEON_CN52XX)
|| OCTEON_IS_MODEL(OCTEON_CN6XXX);
case OCTEON_FEATURE_SRIO:
return OCTEON_IS_MODEL(OCTEON_CN63XX)
|| OCTEON_IS_MODEL(OCTEON_CN66XX);
case OCTEON_FEATURE_ILK:
return (OCTEON_IS_MODEL(OCTEON_CN68XX));
case OCTEON_FEATURE_KEY_MEMORY:
return OCTEON_IS_MODEL(OCTEON_CN38XX)
|| OCTEON_IS_MODEL(OCTEON_CN58XX)
|| OCTEON_IS_MODEL(OCTEON_CN56XX)
|| OCTEON_IS_MODEL(OCTEON_CN6XXX);
case OCTEON_FEATURE_LED_CONTROLLER:
return OCTEON_IS_MODEL(OCTEON_CN38XX)
|| OCTEON_IS_MODEL(OCTEON_CN58XX)
|| OCTEON_IS_MODEL(OCTEON_CN56XX);
case OCTEON_FEATURE_TRA:
return !(OCTEON_IS_MODEL(OCTEON_CN30XX)
|| OCTEON_IS_MODEL(OCTEON_CN50XX));
case OCTEON_FEATURE_MGMT_PORT:
return OCTEON_IS_MODEL(OCTEON_CN56XX)
|| OCTEON_IS_MODEL(OCTEON_CN52XX)
|| OCTEON_IS_MODEL(OCTEON_CN6XXX);
case OCTEON_FEATURE_RAID:
return OCTEON_IS_MODEL(OCTEON_CN56XX)
|| OCTEON_IS_MODEL(OCTEON_CN52XX)
|| OCTEON_IS_MODEL(OCTEON_CN6XXX);
case OCTEON_FEATURE_USB:
return !(OCTEON_IS_MODEL(OCTEON_CN38XX)
|| OCTEON_IS_MODEL(OCTEON_CN58XX));
case OCTEON_FEATURE_NO_WPTR:
return (OCTEON_IS_MODEL(OCTEON_CN56XX)
|| OCTEON_IS_MODEL(OCTEON_CN52XX)
|| OCTEON_IS_MODEL(OCTEON_CN6XXX))
&& !OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X)
&& !OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X);
case OCTEON_FEATURE_DFA:
if (!OCTEON_IS_MODEL(OCTEON_CN38XX)
&& !OCTEON_IS_MODEL(OCTEON_CN31XX)
&& !OCTEON_IS_MODEL(OCTEON_CN58XX))
return 0;
else if (OCTEON_IS_MODEL(OCTEON_CN3020))
return 0;
else
return !cvmx_fuse_read(120);
case OCTEON_FEATURE_HFA:
if (!OCTEON_IS_MODEL(OCTEON_CN6XXX))
return 0;
else
return !cvmx_fuse_read(90);
case OCTEON_FEATURE_DFM:
if (!(OCTEON_IS_MODEL(OCTEON_CN63XX)
|| OCTEON_IS_MODEL(OCTEON_CN66XX)))
return 0;
else
return !cvmx_fuse_read(90);
case OCTEON_FEATURE_MDIO_CLAUSE_45:
return !(OCTEON_IS_MODEL(OCTEON_CN3XXX)
|| OCTEON_IS_MODEL(OCTEON_CN58XX)
|| OCTEON_IS_MODEL(OCTEON_CN50XX));
case OCTEON_FEATURE_NPEI:
return OCTEON_IS_MODEL(OCTEON_CN56XX)
|| OCTEON_IS_MODEL(OCTEON_CN52XX);
case OCTEON_FEATURE_PKND:
return OCTEON_IS_MODEL(OCTEON_CN68XX);
case OCTEON_FEATURE_CN68XX_WQE:
return OCTEON_IS_MODEL(OCTEON_CN68XX);
case OCTEON_FEATURE_CIU2:
return OCTEON_IS_MODEL(OCTEON_CN68XX);
default:
break;
}
return 0;
}
#endif /* __OCTEON_FEATURE_H__ */

View file

@ -0,0 +1,342 @@
/***********************license start***************
* Author: Cavium Networks
*
* Contact: support@caviumnetworks.com
* This file is part of the OCTEON SDK
*
* Copyright (c) 2003-2010 Cavium Networks
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
* published by the Free Software Foundation.
*
* This file is distributed in the hope that it will be useful, but
* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
* NONINFRINGEMENT. See the GNU General Public License for more
* details.
*
* You should have received a copy of the GNU General Public License
* along with this file; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
* or visit http://www.gnu.org/licenses/.
*
* This file may also be available under a different license from Cavium.
* Contact Cavium Networks for more information
***********************license end**************************************/
#ifndef __OCTEON_MODEL_H__
#define __OCTEON_MODEL_H__
/*
* The defines below should be used with the OCTEON_IS_MODEL() macro
* to determine what model of chip the software is running on. Models
* ending in 'XX' match multiple models (families), while specific
* models match only that model. If a pass (revision) is specified,
* then only that revision will be matched. Care should be taken when
* checking for both specific models and families that the specific
* models are checked for first. While these defines are similar to
* the processor ID, they are not intended to be used by anything
* other that the OCTEON_IS_MODEL framework, and the values are
* subject to change at anytime without notice.
*
* NOTE: only the OCTEON_IS_MODEL() macro/function and the OCTEON_CN*
* macros should be used outside of this file. All other macros are
* for internal use only, and may change without notice.
*/
#define OCTEON_FAMILY_MASK 0x00ffff00
/* Flag bits in top byte */
/* Ignores revision in model checks */
#define OM_IGNORE_REVISION 0x01000000
/* Check submodels */
#define OM_CHECK_SUBMODEL 0x02000000
/* Match all models previous than the one specified */
#define OM_MATCH_PREVIOUS_MODELS 0x04000000
/* Ignores the minor revison on newer parts */
#define OM_IGNORE_MINOR_REVISION 0x08000000
#define OM_FLAG_MASK 0xff000000
/* Match all cn5XXX Octeon models. */
#define OM_MATCH_5XXX_FAMILY_MODELS 0x20000000
/* Match all cn6XXX Octeon models. */
#define OM_MATCH_6XXX_FAMILY_MODELS 0x40000000
/* Match all cnf7XXX Octeon models. */
#define OM_MATCH_F7XXX_FAMILY_MODELS 0x80000000
/*
* CNF7XXX models with new revision encoding
*/
#define OCTEON_CNF71XX_PASS1_0 0x000d9400
#define OCTEON_CNF71XX (OCTEON_CNF71XX_PASS1_0 | OM_IGNORE_REVISION)
#define OCTEON_CNF71XX_PASS1_X (OCTEON_CNF71XX_PASS1_0 | OM_IGNORE_MINOR_REVISION)
/*
* CN6XXX models with new revision encoding
*/
#define OCTEON_CN68XX_PASS1_0 0x000d9100
#define OCTEON_CN68XX_PASS1_1 0x000d9101
#define OCTEON_CN68XX_PASS1_2 0x000d9102
#define OCTEON_CN68XX_PASS2_0 0x000d9108
#define OCTEON_CN68XX (OCTEON_CN68XX_PASS2_0 | OM_IGNORE_REVISION)
#define OCTEON_CN68XX_PASS1_X (OCTEON_CN68XX_PASS1_0 | OM_IGNORE_MINOR_REVISION)
#define OCTEON_CN68XX_PASS2_X (OCTEON_CN68XX_PASS2_0 | OM_IGNORE_MINOR_REVISION)
#define OCTEON_CN68XX_PASS1 OCTEON_CN68XX_PASS1_X
#define OCTEON_CN68XX_PASS2 OCTEON_CN68XX_PASS2_X
#define OCTEON_CN66XX_PASS1_0 0x000d9200
#define OCTEON_CN66XX_PASS1_2 0x000d9202
#define OCTEON_CN66XX (OCTEON_CN66XX_PASS1_0 | OM_IGNORE_REVISION)
#define OCTEON_CN66XX_PASS1_X (OCTEON_CN66XX_PASS1_0 | OM_IGNORE_MINOR_REVISION)
#define OCTEON_CN63XX_PASS1_0 0x000d9000
#define OCTEON_CN63XX_PASS1_1 0x000d9001
#define OCTEON_CN63XX_PASS1_2 0x000d9002
#define OCTEON_CN63XX_PASS2_0 0x000d9008
#define OCTEON_CN63XX_PASS2_1 0x000d9009
#define OCTEON_CN63XX_PASS2_2 0x000d900a
#define OCTEON_CN63XX (OCTEON_CN63XX_PASS2_0 | OM_IGNORE_REVISION)
#define OCTEON_CN63XX_PASS1_X (OCTEON_CN63XX_PASS1_0 | OM_IGNORE_MINOR_REVISION)
#define OCTEON_CN63XX_PASS2_X (OCTEON_CN63XX_PASS2_0 | OM_IGNORE_MINOR_REVISION)
#define OCTEON_CN61XX_PASS1_0 0x000d9300
#define OCTEON_CN61XX (OCTEON_CN61XX_PASS1_0 | OM_IGNORE_REVISION)
#define OCTEON_CN61XX_PASS1_X (OCTEON_CN61XX_PASS1_0 | OM_IGNORE_MINOR_REVISION)
/*
* CN5XXX models with new revision encoding
*/
#define OCTEON_CN58XX_PASS1_0 0x000d0300
#define OCTEON_CN58XX_PASS1_1 0x000d0301
#define OCTEON_CN58XX_PASS1_2 0x000d0303
#define OCTEON_CN58XX_PASS2_0 0x000d0308
#define OCTEON_CN58XX_PASS2_1 0x000d0309
#define OCTEON_CN58XX_PASS2_2 0x000d030a
#define OCTEON_CN58XX_PASS2_3 0x000d030b
#define OCTEON_CN58XX (OCTEON_CN58XX_PASS1_0 | OM_IGNORE_REVISION)
#define OCTEON_CN58XX_PASS1_X (OCTEON_CN58XX_PASS1_0 | OM_IGNORE_MINOR_REVISION)
#define OCTEON_CN58XX_PASS2_X (OCTEON_CN58XX_PASS2_0 | OM_IGNORE_MINOR_REVISION)
#define OCTEON_CN58XX_PASS1 OCTEON_CN58XX_PASS1_X
#define OCTEON_CN58XX_PASS2 OCTEON_CN58XX_PASS2_X
#define OCTEON_CN56XX_PASS1_0 0x000d0400
#define OCTEON_CN56XX_PASS1_1 0x000d0401
#define OCTEON_CN56XX_PASS2_0 0x000d0408
#define OCTEON_CN56XX_PASS2_1 0x000d0409
#define OCTEON_CN56XX (OCTEON_CN56XX_PASS2_0 | OM_IGNORE_REVISION)
#define OCTEON_CN56XX_PASS1_X (OCTEON_CN56XX_PASS1_0 | OM_IGNORE_MINOR_REVISION)
#define OCTEON_CN56XX_PASS2_X (OCTEON_CN56XX_PASS2_0 | OM_IGNORE_MINOR_REVISION)
#define OCTEON_CN56XX_PASS1 OCTEON_CN56XX_PASS1_X
#define OCTEON_CN56XX_PASS2 OCTEON_CN56XX_PASS2_X
#define OCTEON_CN57XX OCTEON_CN56XX
#define OCTEON_CN57XX_PASS1 OCTEON_CN56XX_PASS1
#define OCTEON_CN57XX_PASS2 OCTEON_CN56XX_PASS2
#define OCTEON_CN55XX OCTEON_CN56XX
#define OCTEON_CN55XX_PASS1 OCTEON_CN56XX_PASS1
#define OCTEON_CN55XX_PASS2 OCTEON_CN56XX_PASS2
#define OCTEON_CN54XX OCTEON_CN56XX
#define OCTEON_CN54XX_PASS1 OCTEON_CN56XX_PASS1
#define OCTEON_CN54XX_PASS2 OCTEON_CN56XX_PASS2
#define OCTEON_CN50XX_PASS1_0 0x000d0600
#define OCTEON_CN50XX (OCTEON_CN50XX_PASS1_0 | OM_IGNORE_REVISION)
#define OCTEON_CN50XX_PASS1_X (OCTEON_CN50XX_PASS1_0 | OM_IGNORE_MINOR_REVISION)
#define OCTEON_CN50XX_PASS1 OCTEON_CN50XX_PASS1_X
/*
* NOTE: Octeon CN5000F model is not identifiable using the
* OCTEON_IS_MODEL() functions, but are treated as CN50XX.
*/
#define OCTEON_CN52XX_PASS1_0 0x000d0700
#define OCTEON_CN52XX_PASS2_0 0x000d0708
#define OCTEON_CN52XX (OCTEON_CN52XX_PASS2_0 | OM_IGNORE_REVISION)
#define OCTEON_CN52XX_PASS1_X (OCTEON_CN52XX_PASS1_0 | OM_IGNORE_MINOR_REVISION)
#define OCTEON_CN52XX_PASS2_X (OCTEON_CN52XX_PASS2_0 | OM_IGNORE_MINOR_REVISION)
#define OCTEON_CN52XX_PASS1 OCTEON_CN52XX_PASS1_X
#define OCTEON_CN52XX_PASS2 OCTEON_CN52XX_PASS2_X
/*
* CN3XXX models with old revision enconding
*/
#define OCTEON_CN38XX_PASS1 0x000d0000
#define OCTEON_CN38XX_PASS2 0x000d0001
#define OCTEON_CN38XX_PASS3 0x000d0003
#define OCTEON_CN38XX (OCTEON_CN38XX_PASS3 | OM_IGNORE_REVISION)
#define OCTEON_CN36XX OCTEON_CN38XX
#define OCTEON_CN36XX_PASS2 OCTEON_CN38XX_PASS2
#define OCTEON_CN36XX_PASS3 OCTEON_CN38XX_PASS3
/* The OCTEON_CN31XX matches CN31XX models and the CN3020 */
#define OCTEON_CN31XX_PASS1 0x000d0100
#define OCTEON_CN31XX_PASS1_1 0x000d0102
#define OCTEON_CN31XX (OCTEON_CN31XX_PASS1 | OM_IGNORE_REVISION)
/*
* This model is only used for internal checks, it is not a valid
* model for the OCTEON_MODEL environment variable. This matches the
* CN3010 and CN3005 but NOT the CN3020.
*/
#define OCTEON_CN30XX_PASS1 0x000d0200
#define OCTEON_CN30XX_PASS1_1 0x000d0202
#define OCTEON_CN30XX (OCTEON_CN30XX_PASS1 | OM_IGNORE_REVISION)
#define OCTEON_CN3005_PASS1 (0x000d0210 | OM_CHECK_SUBMODEL)
#define OCTEON_CN3005_PASS1_0 (0x000d0210 | OM_CHECK_SUBMODEL)
#define OCTEON_CN3005_PASS1_1 (0x000d0212 | OM_CHECK_SUBMODEL)
#define OCTEON_CN3005 (OCTEON_CN3005_PASS1 | OM_IGNORE_REVISION | OM_CHECK_SUBMODEL)
#define OCTEON_CN3010_PASS1 (0x000d0200 | OM_CHECK_SUBMODEL)
#define OCTEON_CN3010_PASS1_0 (0x000d0200 | OM_CHECK_SUBMODEL)
#define OCTEON_CN3010_PASS1_1 (0x000d0202 | OM_CHECK_SUBMODEL)
#define OCTEON_CN3010 (OCTEON_CN3010_PASS1 | OM_IGNORE_REVISION | OM_CHECK_SUBMODEL)
#define OCTEON_CN3020_PASS1 (0x000d0110 | OM_CHECK_SUBMODEL)
#define OCTEON_CN3020_PASS1_0 (0x000d0110 | OM_CHECK_SUBMODEL)
#define OCTEON_CN3020_PASS1_1 (0x000d0112 | OM_CHECK_SUBMODEL)
#define OCTEON_CN3020 (OCTEON_CN3020_PASS1 | OM_IGNORE_REVISION | OM_CHECK_SUBMODEL)
/*
* This matches the complete family of CN3xxx CPUs, and not subsequent
* models
*/
#define OCTEON_CN3XXX (OCTEON_CN58XX_PASS1_0 | OM_MATCH_PREVIOUS_MODELS | OM_IGNORE_REVISION)
#define OCTEON_CN5XXX (OCTEON_CN58XX_PASS1_0 | OM_MATCH_5XXX_FAMILY_MODELS)
#define OCTEON_CN6XXX (OCTEON_CN63XX_PASS1_0 | OM_MATCH_6XXX_FAMILY_MODELS)
/* These are used to cover entire families of OCTEON processors */
#define OCTEON_FAM_1 (OCTEON_CN3XXX)
#define OCTEON_FAM_PLUS (OCTEON_CN5XXX)
#define OCTEON_FAM_1_PLUS (OCTEON_FAM_PLUS | OM_MATCH_PREVIOUS_MODELS)
#define OCTEON_FAM_2 (OCTEON_CN6XXX)
/* The revision byte (low byte) has two different encodings.
* CN3XXX:
*
* bits
* <7:5>: reserved (0)
* <4>: alternate package
* <3:0>: revision
*
* CN5XXX:
*
* bits
* <7>: reserved (0)
* <6>: alternate package
* <5:3>: major revision
* <2:0>: minor revision
*
*/
/* Masks used for the various types of model/family/revision matching */
#define OCTEON_38XX_FAMILY_MASK 0x00ffff00
#define OCTEON_38XX_FAMILY_REV_MASK 0x00ffff0f
#define OCTEON_38XX_MODEL_MASK 0x00ffff10
#define OCTEON_38XX_MODEL_REV_MASK (OCTEON_38XX_FAMILY_REV_MASK | OCTEON_38XX_MODEL_MASK)
/* CN5XXX and later use different layout of bits in the revision ID field */
#define OCTEON_58XX_FAMILY_MASK OCTEON_38XX_FAMILY_MASK
#define OCTEON_58XX_FAMILY_REV_MASK 0x00ffff3f
#define OCTEON_58XX_MODEL_MASK 0x00ffffc0
#define OCTEON_58XX_MODEL_REV_MASK (OCTEON_58XX_FAMILY_REV_MASK | OCTEON_58XX_MODEL_MASK)
#define OCTEON_58XX_MODEL_MINOR_REV_MASK (OCTEON_58XX_MODEL_REV_MASK & 0x00fffff8)
#define OCTEON_5XXX_MODEL_MASK 0x00ff0fc0
/* forward declarations */
static inline uint32_t cvmx_get_proc_id(void) __attribute__ ((pure));
static inline uint64_t cvmx_read_csr(uint64_t csr_addr);
#define __OCTEON_MATCH_MASK__(x, y, z) (((x) & (z)) == ((y) & (z)))
/* NOTE: This for internal use only! */
#define __OCTEON_IS_MODEL_COMPILE__(arg_model, chip_model) \
((((arg_model & OCTEON_38XX_FAMILY_MASK) < OCTEON_CN58XX_PASS1_0) && ( \
((((arg_model) & (OM_FLAG_MASK)) == (OM_IGNORE_REVISION | OM_CHECK_SUBMODEL)) \
&& __OCTEON_MATCH_MASK__((chip_model), (arg_model), OCTEON_38XX_MODEL_MASK)) || \
((((arg_model) & (OM_FLAG_MASK)) == 0) \
&& __OCTEON_MATCH_MASK__((chip_model), (arg_model), OCTEON_38XX_FAMILY_REV_MASK)) || \
((((arg_model) & (OM_FLAG_MASK)) == OM_IGNORE_REVISION) \
&& __OCTEON_MATCH_MASK__((chip_model), (arg_model), OCTEON_38XX_FAMILY_MASK)) || \
((((arg_model) & (OM_FLAG_MASK)) == OM_CHECK_SUBMODEL) \
&& __OCTEON_MATCH_MASK__((chip_model), (arg_model), OCTEON_38XX_MODEL_REV_MASK)) || \
((((arg_model) & (OM_MATCH_PREVIOUS_MODELS)) == OM_MATCH_PREVIOUS_MODELS) \
&& (((chip_model) & OCTEON_38XX_MODEL_MASK) < ((arg_model) & OCTEON_38XX_MODEL_MASK))) \
)) || \
(((arg_model & OCTEON_38XX_FAMILY_MASK) >= OCTEON_CN58XX_PASS1_0) && ( \
((((arg_model) & (OM_FLAG_MASK)) == (OM_IGNORE_REVISION | OM_CHECK_SUBMODEL)) \
&& __OCTEON_MATCH_MASK__((chip_model), (arg_model), OCTEON_58XX_MODEL_MASK)) || \
((((arg_model) & (OM_FLAG_MASK)) == 0) \
&& __OCTEON_MATCH_MASK__((chip_model), (arg_model), OCTEON_58XX_FAMILY_REV_MASK)) || \
((((arg_model) & (OM_FLAG_MASK)) == OM_IGNORE_MINOR_REVISION) \
&& __OCTEON_MATCH_MASK__((chip_model), (arg_model), OCTEON_58XX_MODEL_MINOR_REV_MASK)) || \
((((arg_model) & (OM_FLAG_MASK)) == OM_IGNORE_REVISION) \
&& __OCTEON_MATCH_MASK__((chip_model), (arg_model), OCTEON_58XX_FAMILY_MASK)) || \
((((arg_model) & (OM_FLAG_MASK)) == OM_CHECK_SUBMODEL) \
&& __OCTEON_MATCH_MASK__((chip_model), (arg_model), OCTEON_58XX_MODEL_REV_MASK)) || \
((((arg_model) & (OM_MATCH_5XXX_FAMILY_MODELS)) == OM_MATCH_5XXX_FAMILY_MODELS) \
&& ((chip_model) >= OCTEON_CN58XX_PASS1_0) && ((chip_model) < OCTEON_CN63XX_PASS1_0)) || \
((((arg_model) & (OM_MATCH_6XXX_FAMILY_MODELS)) == OM_MATCH_6XXX_FAMILY_MODELS) \
&& ((chip_model) >= OCTEON_CN63XX_PASS1_0)) || \
((((arg_model) & (OM_MATCH_PREVIOUS_MODELS)) == OM_MATCH_PREVIOUS_MODELS) \
&& (((chip_model) & OCTEON_58XX_MODEL_MASK) < ((arg_model) & OCTEON_58XX_MODEL_MASK))) \
)))
/* NOTE: This for internal use only!!!!! */
static inline int __octeon_is_model_runtime__(uint32_t model)
{
uint32_t cpuid = cvmx_get_proc_id();
/*
* Check for special case of mismarked 3005 samples. We only
* need to check if the sub model isn't being ignored
*/
if ((model & OM_CHECK_SUBMODEL) == OM_CHECK_SUBMODEL) {
if (cpuid == OCTEON_CN3010_PASS1 && (cvmx_read_csr(0x80011800800007B8ull) & (1ull << 34)))
cpuid |= 0x10;
}
return __OCTEON_IS_MODEL_COMPILE__(model, cpuid);
}
/*
* The OCTEON_IS_MODEL macro should be used for all Octeon model checking done
* in a program.
* This should be kept runtime if at all possible and must be conditionalized
* with OCTEON_IS_COMMON_BINARY() if runtime checking support is required.
*
* Use of the macro in preprocessor directives ( #if OCTEON_IS_MODEL(...) )
* is NOT SUPPORTED, and should be replaced with CVMX_COMPILED_FOR()
* I.e.:
* #if OCTEON_IS_MODEL(OCTEON_CN56XX) -> #if CVMX_COMPILED_FOR(OCTEON_CN56XX)
*/
#define OCTEON_IS_MODEL(x) __octeon_is_model_runtime__(x)
#define OCTEON_IS_COMMON_BINARY() 1
#undef OCTEON_MODEL
const char *octeon_model_get_string(uint32_t chip_id);
const char *octeon_model_get_string_buffer(uint32_t chip_id, char *buffer);
/*
* Return the octeon family, i.e., ProcessorID of the PrID register.
*/
static inline uint32_t cvmx_get_octeon_family(void)
{
return cvmx_get_proc_id() & OCTEON_FAMILY_MASK;
}
#include <asm/octeon/octeon-feature.h>
#endif /* __OCTEON_MODEL_H__ */

View file

@ -0,0 +1,255 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2004-2008 Cavium Networks
*/
#ifndef __ASM_OCTEON_OCTEON_H
#define __ASM_OCTEON_OCTEON_H
#include <asm/octeon/cvmx.h>
extern uint64_t octeon_bootmem_alloc_range_phys(uint64_t size,
uint64_t alignment,
uint64_t min_addr,
uint64_t max_addr,
int do_locking);
extern void *octeon_bootmem_alloc(uint64_t size, uint64_t alignment,
int do_locking);
extern void *octeon_bootmem_alloc_range(uint64_t size, uint64_t alignment,
uint64_t min_addr, uint64_t max_addr,
int do_locking);
extern void *octeon_bootmem_alloc_named(uint64_t size, uint64_t alignment,
char *name);
extern void *octeon_bootmem_alloc_named_range(uint64_t size, uint64_t min_addr,
uint64_t max_addr, uint64_t align,
char *name);
extern void *octeon_bootmem_alloc_named_address(uint64_t size, uint64_t address,
char *name);
extern int octeon_bootmem_free_named(char *name);
extern void octeon_bootmem_lock(void);
extern void octeon_bootmem_unlock(void);
extern int octeon_is_simulation(void);
extern int octeon_is_pci_host(void);
extern int octeon_usb_is_ref_clk(void);
extern uint64_t octeon_get_clock_rate(void);
extern u64 octeon_get_io_clock_rate(void);
extern const char *octeon_board_type_string(void);
extern const char *octeon_get_pci_interrupts(void);
extern int octeon_get_southbridge_interrupt(void);
extern int octeon_get_boot_coremask(void);
extern int octeon_get_boot_num_arguments(void);
extern const char *octeon_get_boot_argument(int arg);
extern void octeon_hal_setup_reserved32(void);
extern void octeon_user_io_init(void);
struct octeon_cop2_state;
extern unsigned long octeon_crypto_enable(struct octeon_cop2_state *state);
extern void octeon_crypto_disable(struct octeon_cop2_state *state,
unsigned long flags);
extern asmlinkage void octeon_cop2_restore(struct octeon_cop2_state *task);
extern void octeon_init_cvmcount(void);
extern void octeon_setup_delays(void);
extern void octeon_io_clk_delay(unsigned long);
#define OCTEON_ARGV_MAX_ARGS 64
#define OCTOEN_SERIAL_LEN 20
struct octeon_boot_descriptor {
/* Start of block referenced by assembly code - do not change! */
uint32_t desc_version;
uint32_t desc_size;
uint64_t stack_top;
uint64_t heap_base;
uint64_t heap_end;
/* Only used by bootloader */
uint64_t entry_point;
uint64_t desc_vaddr;
/* End of This block referenced by assembly code - do not change! */
uint32_t exception_base_addr;
uint32_t stack_size;
uint32_t heap_size;
/* Argc count for application. */
uint32_t argc;
uint32_t argv[OCTEON_ARGV_MAX_ARGS];
#define BOOT_FLAG_INIT_CORE (1 << 0)
#define OCTEON_BL_FLAG_DEBUG (1 << 1)
#define OCTEON_BL_FLAG_NO_MAGIC (1 << 2)
/* If set, use uart1 for console */
#define OCTEON_BL_FLAG_CONSOLE_UART1 (1 << 3)
/* If set, use PCI console */
#define OCTEON_BL_FLAG_CONSOLE_PCI (1 << 4)
/* Call exit on break on serial port */
#define OCTEON_BL_FLAG_BREAK (1 << 5)
uint32_t flags;
uint32_t core_mask;
/* DRAM size in megabyes. */
uint32_t dram_size;
/* physical address of free memory descriptor block. */
uint32_t phy_mem_desc_addr;
/* used to pass flags from app to debugger. */
uint32_t debugger_flags_base_addr;
/* CPU clock speed, in hz. */
uint32_t eclock_hz;
/* DRAM clock speed, in hz. */
uint32_t dclock_hz;
/* SPI4 clock in hz. */
uint32_t spi_clock_hz;
uint16_t board_type;
uint8_t board_rev_major;
uint8_t board_rev_minor;
uint16_t chip_type;
uint8_t chip_rev_major;
uint8_t chip_rev_minor;
char board_serial_number[OCTOEN_SERIAL_LEN];
uint8_t mac_addr_base[6];
uint8_t mac_addr_count;
uint64_t cvmx_desc_vaddr;
};
union octeon_cvmemctl {
uint64_t u64;
struct {
/* RO 1 = BIST fail, 0 = BIST pass */
uint64_t tlbbist:1;
/* RO 1 = BIST fail, 0 = BIST pass */
uint64_t l1cbist:1;
/* RO 1 = BIST fail, 0 = BIST pass */
uint64_t l1dbist:1;
/* RO 1 = BIST fail, 0 = BIST pass */
uint64_t dcmbist:1;
/* RO 1 = BIST fail, 0 = BIST pass */
uint64_t ptgbist:1;
/* RO 1 = BIST fail, 0 = BIST pass */
uint64_t wbfbist:1;
/* Reserved */
uint64_t reserved:22;
/* R/W If set, marked write-buffer entries time out
* the same as as other entries; if clear, marked
* write-buffer entries use the maximum timeout. */
uint64_t dismarkwblongto:1;
/* R/W If set, a merged store does not clear the
* write-buffer entry timeout state. */
uint64_t dismrgclrwbto:1;
/* R/W Two bits that are the MSBs of the resultant
* CVMSEG LM word location for an IOBDMA. The other 8
* bits come from the SCRADDR field of the IOBDMA. */
uint64_t iobdmascrmsb:2;
/* R/W If set, SYNCWS and SYNCS only order marked
* stores; if clear, SYNCWS and SYNCS only order
* unmarked stores. SYNCWSMARKED has no effect when
* DISSYNCWS is set. */
uint64_t syncwsmarked:1;
/* R/W If set, SYNCWS acts as SYNCW and SYNCS acts as
* SYNC. */
uint64_t dissyncws:1;
/* R/W If set, no stall happens on write buffer
* full. */
uint64_t diswbfst:1;
/* R/W If set (and SX set), supervisor-level
* loads/stores can use XKPHYS addresses with
* VA<48>==0 */
uint64_t xkmemenas:1;
/* R/W If set (and UX set), user-level loads/stores
* can use XKPHYS addresses with VA<48>==0 */
uint64_t xkmemenau:1;
/* R/W If set (and SX set), supervisor-level
* loads/stores can use XKPHYS addresses with
* VA<48>==1 */
uint64_t xkioenas:1;
/* R/W If set (and UX set), user-level loads/stores
* can use XKPHYS addresses with VA<48>==1 */
uint64_t xkioenau:1;
/* R/W If set, all stores act as SYNCW (NOMERGE must
* be set when this is set) RW, reset to 0. */
uint64_t allsyncw:1;
/* R/W If set, no stores merge, and all stores reach
* the coherent bus in order. */
uint64_t nomerge:1;
/* R/W Selects the bit in the counter used for DID
* time-outs 0 = 231, 1 = 230, 2 = 229, 3 =
* 214. Actual time-out is between 1x and 2x this
* interval. For example, with DIDTTO=3, expiration
* interval is between 16K and 32K. */
uint64_t didtto:2;
/* R/W If set, the (mem) CSR clock never turns off. */
uint64_t csrckalwys:1;
/* R/W If set, mclk never turns off. */
uint64_t mclkalwys:1;
/* R/W Selects the bit in the counter used for write
* buffer flush time-outs (WBFLT+11) is the bit
* position in an internal counter used to determine
* expiration. The write buffer expires between 1x and
* 2x this interval. For example, with WBFLT = 0, a
* write buffer expires between 2K and 4K cycles after
* the write buffer entry is allocated. */
uint64_t wbfltime:3;
/* R/W If set, do not put Istream in the L2 cache. */
uint64_t istrnol2:1;
/* R/W The write buffer threshold. */
uint64_t wbthresh:4;
/* Reserved */
uint64_t reserved2:2;
/* R/W If set, CVMSEG is available for loads/stores in
* kernel/debug mode. */
uint64_t cvmsegenak:1;
/* R/W If set, CVMSEG is available for loads/stores in
* supervisor mode. */
uint64_t cvmsegenas:1;
/* R/W If set, CVMSEG is available for loads/stores in
* user mode. */
uint64_t cvmsegenau:1;
/* R/W Size of local memory in cache blocks, 54 (6912
* bytes) is max legal value. */
uint64_t lmemsz:6;
} s;
};
extern void octeon_write_lcd(const char *s);
extern void octeon_check_cpu_bist(void);
extern int octeon_get_boot_uart(void);
struct uart_port;
extern unsigned int octeon_serial_in(struct uart_port *, int);
extern void octeon_serial_out(struct uart_port *, int, int);
/**
* Write a 32bit value to the Octeon NPI register space
*
* @address: Address to write to
* @val: Value to write
*/
static inline void octeon_npi_write32(uint64_t address, uint32_t val)
{
cvmx_write64_uint32(address ^ 4, val);
cvmx_read64_uint32(address ^ 4);
}
/**
* Read a 32bit value from the Octeon NPI register space
*
* @address: Address to read
* Returns The result
*/
static inline uint32_t octeon_npi_read32(uint64_t address)
{
return cvmx_read64_uint32(address ^ 4);
}
extern struct cvmx_bootinfo *octeon_bootinfo;
extern uint64_t octeon_bootloader_entry_addr;
extern void (*octeon_irq_setup_secondary)(void);
typedef void (*octeon_irq_ip4_handler_t)(void);
void octeon_irq_set_ip4_handler(octeon_irq_ip4_handler_t);
extern void octeon_fixup_irqs(void);
#endif /* __ASM_OCTEON_OCTEON_H */

View file

@ -0,0 +1,66 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2005-2009 Cavium Networks
*/
#ifndef __PCI_OCTEON_H__
#define __PCI_OCTEON_H__
#include <linux/pci.h>
/*
* The physical memory base mapped by BAR1. 256MB at the end of the
* first 4GB.
*/
#define CVMX_PCIE_BAR1_PHYS_BASE ((1ull << 32) - (1ull << 28))
#define CVMX_PCIE_BAR1_PHYS_SIZE (1ull << 28)
/*
* The RC base of BAR1. gen1 has a 39-bit BAR2, gen2 has 41-bit BAR2,
* place BAR1 so it is the same for both.
*/
#define CVMX_PCIE_BAR1_RC_BASE (1ull << 41)
/*
* pcibios_map_irq() is defined inside pci-octeon.c. All it does is
* call the Octeon specific version pointed to by this variable. This
* function needs to change for PCI or PCIe based hosts.
*/
extern int (*octeon_pcibios_map_irq)(const struct pci_dev *dev,
u8 slot, u8 pin);
/*
* For PCI (not PCIe) the BAR2 base address.
*/
#define OCTEON_BAR2_PCI_ADDRESS 0x8000000000ull
/*
* For PCI (not PCIe) the base of the memory mapped by BAR1
*/
extern u64 octeon_bar1_pci_phys;
/*
* The following defines are used when octeon_dma_bar_type =
* OCTEON_DMA_BAR_TYPE_BIG
*/
#define OCTEON_PCI_BAR1_HOLE_BITS 5
#define OCTEON_PCI_BAR1_HOLE_SIZE (1ul<<(OCTEON_PCI_BAR1_HOLE_BITS+3))
enum octeon_dma_bar_type {
OCTEON_DMA_BAR_TYPE_INVALID,
OCTEON_DMA_BAR_TYPE_SMALL,
OCTEON_DMA_BAR_TYPE_BIG,
OCTEON_DMA_BAR_TYPE_PCIE,
OCTEON_DMA_BAR_TYPE_PCIE2
};
/*
* This tells the DMA mapping system in dma-octeon.c how to map PCI
* DMA addresses.
*/
extern enum octeon_dma_bar_type octeon_dma_bar_type;
#endif