Fixed MTP to work with TWRP

This commit is contained in:
awab228 2018-06-19 23:16:04 +02:00
commit f6dfaef42e
50820 changed files with 20846062 additions and 0 deletions

View file

@ -0,0 +1,605 @@
/*
* Copyright 2011 Tilera Corporation. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation, version 2.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for
* more details.
*/
/**
* Interface definitions for the mpipe driver.
*/
#ifndef _SYS_HV_DRV_MPIPE_INTF_H
#define _SYS_HV_DRV_MPIPE_INTF_H
#include <arch/mpipe.h>
#include <arch/mpipe_constants.h>
/** Number of mPIPE instances supported */
#define HV_MPIPE_INSTANCE_MAX (2)
/** Number of buffer stacks (32). */
#define HV_MPIPE_NUM_BUFFER_STACKS \
(MPIPE_MMIO_INIT_DAT_GX36_1__BUFFER_STACK_MASK_WIDTH)
/** Number of NotifRings (256). */
#define HV_MPIPE_NUM_NOTIF_RINGS (MPIPE_NUM_NOTIF_RINGS)
/** Number of NotifGroups (32). */
#define HV_MPIPE_NUM_NOTIF_GROUPS (MPIPE_NUM_NOTIF_GROUPS)
/** Number of buckets (4160). */
#define HV_MPIPE_NUM_BUCKETS (MPIPE_NUM_BUCKETS)
/** Number of "lo" buckets (4096). */
#define HV_MPIPE_NUM_LO_BUCKETS 4096
/** Number of "hi" buckets (64). */
#define HV_MPIPE_NUM_HI_BUCKETS \
(HV_MPIPE_NUM_BUCKETS - HV_MPIPE_NUM_LO_BUCKETS)
/** Number of edma rings (24). */
#define HV_MPIPE_NUM_EDMA_RINGS \
(MPIPE_MMIO_INIT_DAT_GX36_1__EDMA_POST_MASK_WIDTH)
/** A flag bit indicating a fixed resource allocation. */
#define HV_MPIPE_ALLOC_FIXED 0x01
/** Offset for the config register MMIO region. */
#define HV_MPIPE_CONFIG_MMIO_OFFSET \
(MPIPE_MMIO_ADDR__REGION_VAL_CFG << MPIPE_MMIO_ADDR__REGION_SHIFT)
/** Size of the config register MMIO region. */
#define HV_MPIPE_CONFIG_MMIO_SIZE (64 * 1024)
/** Offset for the config register MMIO region. */
#define HV_MPIPE_FAST_MMIO_OFFSET \
(MPIPE_MMIO_ADDR__REGION_VAL_IDMA << MPIPE_MMIO_ADDR__REGION_SHIFT)
/** Size of the fast register MMIO region (IDMA, EDMA, buffer stack). */
#define HV_MPIPE_FAST_MMIO_SIZE \
((MPIPE_MMIO_ADDR__REGION_VAL_BSM + 1 - MPIPE_MMIO_ADDR__REGION_VAL_IDMA) \
<< MPIPE_MMIO_ADDR__REGION_SHIFT)
/*
* Each type of resource allocation comes in quantized chunks, where
* XXX_BITS is the number of chunks, and XXX_RES_PER_BIT is the number
* of resources in each chunk.
*/
/** Number of buffer stack chunks available (32). */
#define HV_MPIPE_ALLOC_BUFFER_STACKS_BITS \
MPIPE_MMIO_INIT_DAT_GX36_1__BUFFER_STACK_MASK_WIDTH
/** Granularity of buffer stack allocation (1). */
#define HV_MPIPE_ALLOC_BUFFER_STACKS_RES_PER_BIT \
(HV_MPIPE_NUM_BUFFER_STACKS / HV_MPIPE_ALLOC_BUFFER_STACKS_BITS)
/** Number of NotifRing chunks available (32). */
#define HV_MPIPE_ALLOC_NOTIF_RINGS_BITS \
MPIPE_MMIO_INIT_DAT_GX36_0__NOTIF_RING_MASK_WIDTH
/** Granularity of NotifRing allocation (8). */
#define HV_MPIPE_ALLOC_NOTIF_RINGS_RES_PER_BIT \
(HV_MPIPE_NUM_NOTIF_RINGS / HV_MPIPE_ALLOC_NOTIF_RINGS_BITS)
/** Number of NotifGroup chunks available (32). */
#define HV_MPIPE_ALLOC_NOTIF_GROUPS_BITS \
HV_MPIPE_NUM_NOTIF_GROUPS
/** Granularity of NotifGroup allocation (1). */
#define HV_MPIPE_ALLOC_NOTIF_GROUPS_RES_PER_BIT \
(HV_MPIPE_NUM_NOTIF_GROUPS / HV_MPIPE_ALLOC_NOTIF_GROUPS_BITS)
/** Number of lo bucket chunks available (16). */
#define HV_MPIPE_ALLOC_LO_BUCKETS_BITS \
MPIPE_MMIO_INIT_DAT_GX36_0__BUCKET_RELEASE_MASK_LO_WIDTH
/** Granularity of lo bucket allocation (256). */
#define HV_MPIPE_ALLOC_LO_BUCKETS_RES_PER_BIT \
(HV_MPIPE_NUM_LO_BUCKETS / HV_MPIPE_ALLOC_LO_BUCKETS_BITS)
/** Number of hi bucket chunks available (16). */
#define HV_MPIPE_ALLOC_HI_BUCKETS_BITS \
MPIPE_MMIO_INIT_DAT_GX36_0__BUCKET_RELEASE_MASK_HI_WIDTH
/** Granularity of hi bucket allocation (4). */
#define HV_MPIPE_ALLOC_HI_BUCKETS_RES_PER_BIT \
(HV_MPIPE_NUM_HI_BUCKETS / HV_MPIPE_ALLOC_HI_BUCKETS_BITS)
/** Number of eDMA ring chunks available (24). */
#define HV_MPIPE_ALLOC_EDMA_RINGS_BITS \
MPIPE_MMIO_INIT_DAT_GX36_1__EDMA_POST_MASK_WIDTH
/** Granularity of eDMA ring allocation (1). */
#define HV_MPIPE_ALLOC_EDMA_RINGS_RES_PER_BIT \
(HV_MPIPE_NUM_EDMA_RINGS / HV_MPIPE_ALLOC_EDMA_RINGS_BITS)
/** Bit vector encoding which NotifRings are in a NotifGroup. */
typedef struct
{
/** The actual bits. */
uint64_t ring_mask[4];
} gxio_mpipe_notif_group_bits_t;
/** Another name for MPIPE_LBL_INIT_DAT_BSTS_TBL_t. */
typedef MPIPE_LBL_INIT_DAT_BSTS_TBL_t gxio_mpipe_bucket_info_t;
/** Eight buffer stack ids. */
typedef struct
{
/** The stacks. */
uint8_t stacks[8];
} gxio_mpipe_rules_stacks_t;
/** A destination mac address. */
typedef struct
{
/** The octets. */
uint8_t octets[6];
} gxio_mpipe_rules_dmac_t;
/** A vlan. */
typedef uint16_t gxio_mpipe_rules_vlan_t;
/** Maximum number of characters in a link name. */
#define GXIO_MPIPE_LINK_NAME_LEN 32
/** Structure holding a link name. Only needed, and only typedef'ed,
* because the IORPC stub generator only handles types which are single
* words coming before the parameter name. */
typedef struct
{
/** The name itself. */
char name[GXIO_MPIPE_LINK_NAME_LEN];
}
_gxio_mpipe_link_name_t;
/** Maximum number of characters in a symbol name. */
#define GXIO_MPIPE_SYMBOL_NAME_LEN 128
/** Structure holding a symbol name. Only needed, and only typedef'ed,
* because the IORPC stub generator only handles types which are single
* words coming before the parameter name. */
typedef struct
{
/** The name itself. */
char name[GXIO_MPIPE_SYMBOL_NAME_LEN];
}
_gxio_mpipe_symbol_name_t;
/** Structure holding a MAC address. */
typedef struct
{
/** The address. */
uint8_t mac[6];
}
_gxio_mpipe_link_mac_t;
/** Request shared data permission -- that is, the ability to send and
* receive packets -- on the specified link. Other processes may also
* request shared data permission on the same link.
*
* No more than one of ::GXIO_MPIPE_LINK_DATA, ::GXIO_MPIPE_LINK_NO_DATA,
* or ::GXIO_MPIPE_LINK_EXCL_DATA may be specifed in a gxio_mpipe_link_open()
* call. If none are specified, ::GXIO_MPIPE_LINK_DATA is assumed.
*/
#define GXIO_MPIPE_LINK_DATA 0x00000001UL
/** Do not request data permission on the specified link.
*
* No more than one of ::GXIO_MPIPE_LINK_DATA, ::GXIO_MPIPE_LINK_NO_DATA,
* or ::GXIO_MPIPE_LINK_EXCL_DATA may be specifed in a gxio_mpipe_link_open()
* call. If none are specified, ::GXIO_MPIPE_LINK_DATA is assumed.
*/
#define GXIO_MPIPE_LINK_NO_DATA 0x00000002UL
/** Request exclusive data permission -- that is, the ability to send and
* receive packets -- on the specified link. No other processes may
* request data permission on this link, and if any process already has
* data permission on it, this open will fail.
*
* No more than one of ::GXIO_MPIPE_LINK_DATA, ::GXIO_MPIPE_LINK_NO_DATA,
* or ::GXIO_MPIPE_LINK_EXCL_DATA may be specifed in a gxio_mpipe_link_open()
* call. If none are specified, ::GXIO_MPIPE_LINK_DATA is assumed.
*/
#define GXIO_MPIPE_LINK_EXCL_DATA 0x00000004UL
/** Request shared stats permission -- that is, the ability to read and write
* registers which contain link statistics, and to get link attributes --
* on the specified link. Other processes may also request shared stats
* permission on the same link.
*
* No more than one of ::GXIO_MPIPE_LINK_STATS, ::GXIO_MPIPE_LINK_NO_STATS,
* or ::GXIO_MPIPE_LINK_EXCL_STATS may be specifed in a gxio_mpipe_link_open()
* call. If none are specified, ::GXIO_MPIPE_LINK_STATS is assumed.
*/
#define GXIO_MPIPE_LINK_STATS 0x00000008UL
/** Do not request stats permission on the specified link.
*
* No more than one of ::GXIO_MPIPE_LINK_STATS, ::GXIO_MPIPE_LINK_NO_STATS,
* or ::GXIO_MPIPE_LINK_EXCL_STATS may be specifed in a gxio_mpipe_link_open()
* call. If none are specified, ::GXIO_MPIPE_LINK_STATS is assumed.
*/
#define GXIO_MPIPE_LINK_NO_STATS 0x00000010UL
/** Request exclusive stats permission -- that is, the ability to read and
* write registers which contain link statistics, and to get link
* attributes -- on the specified link. No other processes may request
* stats permission on this link, and if any process already
* has stats permission on it, this open will fail.
*
* Requesting exclusive stats permission is normally a very bad idea, since
* it prevents programs like mpipe-stat from providing information on this
* link. Applications should only do this if they use MAC statistics
* registers, and cannot tolerate any of the clear-on-read registers being
* reset by other statistics programs.
*
* No more than one of ::GXIO_MPIPE_LINK_STATS, ::GXIO_MPIPE_LINK_NO_STATS,
* or ::GXIO_MPIPE_LINK_EXCL_STATS may be specifed in a gxio_mpipe_link_open()
* call. If none are specified, ::GXIO_MPIPE_LINK_STATS is assumed.
*/
#define GXIO_MPIPE_LINK_EXCL_STATS 0x00000020UL
/** Request shared control permission -- that is, the ability to modify link
* attributes, and read and write MAC and MDIO registers -- on the
* specified link. Other processes may also request shared control
* permission on the same link.
*
* No more than one of ::GXIO_MPIPE_LINK_CTL, ::GXIO_MPIPE_LINK_NO_CTL,
* or ::GXIO_MPIPE_LINK_EXCL_CTL may be specifed in a gxio_mpipe_link_open()
* call. If none are specified, ::GXIO_MPIPE_LINK_CTL is assumed.
*/
#define GXIO_MPIPE_LINK_CTL 0x00000040UL
/** Do not request control permission on the specified link.
*
* No more than one of ::GXIO_MPIPE_LINK_CTL, ::GXIO_MPIPE_LINK_NO_CTL,
* or ::GXIO_MPIPE_LINK_EXCL_CTL may be specifed in a gxio_mpipe_link_open()
* call. If none are specified, ::GXIO_MPIPE_LINK_CTL is assumed.
*/
#define GXIO_MPIPE_LINK_NO_CTL 0x00000080UL
/** Request exclusive control permission -- that is, the ability to modify
* link attributes, and read and write MAC and MDIO registers -- on the
* specified link. No other processes may request control permission on
* this link, and if any process already has control permission on it,
* this open will fail.
*
* Requesting exclusive control permission is not always a good idea, since
* it prevents programs like mpipe-link from configuring the link.
*
* No more than one of ::GXIO_MPIPE_LINK_CTL, ::GXIO_MPIPE_LINK_NO_CTL,
* or ::GXIO_MPIPE_LINK_EXCL_CTL may be specifed in a gxio_mpipe_link_open()
* call. If none are specified, ::GXIO_MPIPE_LINK_CTL is assumed.
*/
#define GXIO_MPIPE_LINK_EXCL_CTL 0x00000100UL
/** Set the desired state of the link to up, allowing any speeds which are
* supported by the link hardware, as part of this open operation; do not
* change the desired state of the link when it is closed or the process
* exits. No more than one of ::GXIO_MPIPE_LINK_AUTO_UP,
* ::GXIO_MPIPE_LINK_AUTO_UPDOWN, ::GXIO_MPIPE_LINK_AUTO_DOWN, or
* ::GXIO_MPIPE_LINK_AUTO_NONE may be specifed in a gxio_mpipe_link_open()
* call. If none are specified, ::GXIO_MPIPE_LINK_AUTO_UPDOWN is assumed.
*/
#define GXIO_MPIPE_LINK_AUTO_UP 0x00000200UL
/** Set the desired state of the link to up, allowing any speeds which are
* supported by the link hardware, as part of this open operation; when the
* link is closed or this process exits, if no other process has the link
* open, set the desired state of the link to down. No more than one of
* ::GXIO_MPIPE_LINK_AUTO_UP, ::GXIO_MPIPE_LINK_AUTO_UPDOWN,
* ::GXIO_MPIPE_LINK_AUTO_DOWN, or ::GXIO_MPIPE_LINK_AUTO_NONE may be
* specifed in a gxio_mpipe_link_open() call. If none are specified,
* ::GXIO_MPIPE_LINK_AUTO_UPDOWN is assumed.
*/
#define GXIO_MPIPE_LINK_AUTO_UPDOWN 0x00000400UL
/** Do not change the desired state of the link as part of the open
* operation; when the link is closed or this process exits, if no other
* process has the link open, set the desired state of the link to down.
* No more than one of ::GXIO_MPIPE_LINK_AUTO_UP,
* ::GXIO_MPIPE_LINK_AUTO_UPDOWN, ::GXIO_MPIPE_LINK_AUTO_DOWN, or
* ::GXIO_MPIPE_LINK_AUTO_NONE may be specifed in a gxio_mpipe_link_open()
* call. If none are specified, ::GXIO_MPIPE_LINK_AUTO_UPDOWN is assumed.
*/
#define GXIO_MPIPE_LINK_AUTO_DOWN 0x00000800UL
/** Do not change the desired state of the link as part of the open
* operation; do not change the desired state of the link when it is
* closed or the process exits. No more than one of
* ::GXIO_MPIPE_LINK_AUTO_UP, ::GXIO_MPIPE_LINK_AUTO_UPDOWN,
* ::GXIO_MPIPE_LINK_AUTO_DOWN, or ::GXIO_MPIPE_LINK_AUTO_NONE may be
* specifed in a gxio_mpipe_link_open() call. If none are specified,
* ::GXIO_MPIPE_LINK_AUTO_UPDOWN is assumed.
*/
#define GXIO_MPIPE_LINK_AUTO_NONE 0x00001000UL
/** Request that this open call not complete until the network link is up.
* The process will wait as long as necessary for this to happen;
* applications which wish to abandon waiting for the link after a
* specific time period should not specify this flag when opening a link,
* but should instead call gxio_mpipe_link_wait() afterward. The link
* must be opened with stats permission. Note that this flag by itself
* does not change the desired link state; if other open flags or previous
* link state changes have not requested a desired state of up, the open
* call will never complete. This flag is not available to kernel
* clients.
*/
#define GXIO_MPIPE_LINK_WAIT 0x00002000UL
/*
* Note: link attributes must fit in 24 bits, since we use the top 8 bits
* of the IORPC offset word for the channel number.
*/
/** Determine whether jumbo frames may be received. If this attribute's
* value value is nonzero, the MAC will accept frames of up to 10240 bytes.
* If the value is zero, the MAC will only accept frames of up to 1544
* bytes. The default value is zero. */
#define GXIO_MPIPE_LINK_RECEIVE_JUMBO 0x010000
/** Determine whether to send pause frames on this link if the mPIPE packet
* FIFO is nearly full. If the value is zero, pause frames are not sent.
* If the value is nonzero, it is the delay value which will be sent in any
* pause frames which are output, in units of 512 bit times.
*
* Bear in mind that in almost all circumstances, the mPIPE packet FIFO
* will never fill up, since mPIPE will empty it as fast as or faster than
* the incoming data rate, by either delivering or dropping packets. The
* only situation in which this is not true is if the memory and cache
* subsystem is extremely heavily loaded, and mPIPE cannot perform DMA of
* packet data to memory in a timely fashion. In particular, pause frames
* will <em>not</em> be sent if packets cannot be delivered because
* NotifRings are full, buckets are full, or buffers are not available in
* a buffer stack. */
#define GXIO_MPIPE_LINK_SEND_PAUSE 0x020000
/** Determine whether to suspend output on the receipt of pause frames.
* If the value is nonzero, mPIPE shim will suspend output on the link's
* channel when a pause frame is received. If the value is zero, pause
* frames will be ignored. The default value is zero. */
#define GXIO_MPIPE_LINK_RECEIVE_PAUSE 0x030000
/** Interface MAC address. The value is a 6-byte MAC address, in the least
* significant 48 bits of the value; in other words, an address which would
* be printed as '12:34:56:78:90:AB' in IEEE 802 canonical format would
* be returned as 0x12345678ab.
*
* Depending upon the overall system design, a MAC address may or may not
* be available for each interface. Note that the interface's MAC address
* does not limit the packets received on its channel, although the
* classifier's rules could be configured to do that. Similarly, the MAC
* address is not used when transmitting packets, although applications
* could certainly decide to use the assigned address as a source MAC
* address when doing so. This attribute may only be retrieved with
* gxio_mpipe_link_get_attr(); it may not be modified.
*/
#define GXIO_MPIPE_LINK_MAC 0x040000
/** Determine whether to discard egress packets on link down. If this value
* is nonzero, packets sent on this link while the link is down will be
* discarded. If this value is zero, no packets will be sent on this link
* while it is down. The default value is one. */
#define GXIO_MPIPE_LINK_DISCARD_IF_DOWN 0x050000
/** Possible link state. The value is a combination of link state flags,
* ORed together, that indicate link modes which are actually supported by
* the hardware. This attribute may only be retrieved with
* gxio_mpipe_link_get_attr(); it may not be modified. */
#define GXIO_MPIPE_LINK_POSSIBLE_STATE 0x060000
/** Current link state. The value is a combination of link state flags,
* ORed together, that indicate the current state of the hardware. If the
* link is down, the value ANDed with ::GXIO_MPIPE_LINK_SPEED will be zero;
* if the link is up, the value ANDed with ::GXIO_MPIPE_LINK_SPEED will
* result in exactly one of the speed values, indicating the current speed.
* This attribute may only be retrieved with gxio_mpipe_link_get_attr(); it
* may not be modified. */
#define GXIO_MPIPE_LINK_CURRENT_STATE 0x070000
/** Desired link state. The value is a conbination of flags, which specify
* the desired state for the link. With gxio_mpipe_link_set_attr(), this
* will, in the background, attempt to bring up the link using whichever of
* the requested flags are reasonable, or take down the link if the flags
* are zero. The actual link up or down operation may happen after this
* call completes. If the link state changes in the future, the system
* will continue to try to get back to the desired link state; for
* instance, if the link is brought up successfully, and then the network
* cable is disconnected, the link will go down. However, the desired
* state of the link is still up, so if the cable is reconnected, the link
* will be brought up again.
*
* With gxio_mpipe_link_set_attr(), this will indicate the desired state
* for the link, as set with a previous gxio_mpipe_link_set_attr() call,
* or implicitly by a gxio_mpipe_link_open() or link close operation.
* This may not reflect the current state of the link; to get that, use
* ::GXIO_MPIPE_LINK_CURRENT_STATE.
*/
#define GXIO_MPIPE_LINK_DESIRED_STATE 0x080000
/** Link can run, should run, or is running at 10 Mbps. */
#define GXIO_MPIPE_LINK_10M 0x0000000000000001UL
/** Link can run, should run, or is running at 100 Mbps. */
#define GXIO_MPIPE_LINK_100M 0x0000000000000002UL
/** Link can run, should run, or is running at 1 Gbps. */
#define GXIO_MPIPE_LINK_1G 0x0000000000000004UL
/** Link can run, should run, or is running at 10 Gbps. */
#define GXIO_MPIPE_LINK_10G 0x0000000000000008UL
/** Link can run, should run, or is running at 20 Gbps. */
#define GXIO_MPIPE_LINK_20G 0x0000000000000010UL
/** Link can run, should run, or is running at 25 Gbps. */
#define GXIO_MPIPE_LINK_25G 0x0000000000000020UL
/** Link can run, should run, or is running at 50 Gbps. */
#define GXIO_MPIPE_LINK_50G 0x0000000000000040UL
/** Link should run at the highest speed supported by the link and by
* the device connected to the link. Only usable as a value for
* the link's desired state; never returned as a value for the current
* or possible states. */
#define GXIO_MPIPE_LINK_ANYSPEED 0x0000000000000800UL
/** All legal link speeds. This value is provided for use in extracting
* the speed-related subset of the link state flags; it is not intended
* to be set directly as a value for one of the GXIO_MPIPE_LINK_xxx_STATE
* attributes. A link is up or is requested to be up if its current or
* desired state, respectively, ANDED with this value, is nonzero. */
#define GXIO_MPIPE_LINK_SPEED_MASK 0x0000000000000FFFUL
/** Link can run, should run, or is running in MAC loopback mode. This
* loops transmitted packets back to the receiver, inside the Tile
* Processor. */
#define GXIO_MPIPE_LINK_LOOP_MAC 0x0000000000001000UL
/** Link can run, should run, or is running in PHY loopback mode. This
* loops transmitted packets back to the receiver, inside the external
* PHY chip. */
#define GXIO_MPIPE_LINK_LOOP_PHY 0x0000000000002000UL
/** Link can run, should run, or is running in external loopback mode.
* This requires that an external loopback plug be installed on the
* Ethernet port. Note that only some links require that this be
* configured via the gxio_mpipe_link routines; other links can do
* external loopack with the plug and no special configuration. */
#define GXIO_MPIPE_LINK_LOOP_EXT 0x0000000000004000UL
/** All legal loopback types. */
#define GXIO_MPIPE_LINK_LOOP_MASK 0x000000000000F000UL
/** Link can run, should run, or is running in full-duplex mode.
* If neither ::GXIO_MPIPE_LINK_FDX nor ::GXIO_MPIPE_LINK_HDX are
* specified in a set of desired state flags, both are assumed. */
#define GXIO_MPIPE_LINK_FDX 0x0000000000010000UL
/** Link can run, should run, or is running in half-duplex mode.
* If neither ::GXIO_MPIPE_LINK_FDX nor ::GXIO_MPIPE_LINK_HDX are
* specified in a set of desired state flags, both are assumed. */
#define GXIO_MPIPE_LINK_HDX 0x0000000000020000UL
/** An individual rule. */
typedef struct
{
/** The total size. */
uint16_t size;
/** The priority. */
int16_t priority;
/** The "headroom" in each buffer. */
uint8_t headroom;
/** The "tailroom" in each buffer. */
uint8_t tailroom;
/** The "capacity" of the largest buffer. */
uint16_t capacity;
/** The mask for converting a flow hash into a bucket. */
uint16_t bucket_mask;
/** The offset for converting a flow hash into a bucket. */
uint16_t bucket_first;
/** The buffer stack ids. */
gxio_mpipe_rules_stacks_t stacks;
/** The actual channels. */
uint32_t channel_bits;
/** The number of dmacs. */
uint16_t num_dmacs;
/** The number of vlans. */
uint16_t num_vlans;
/** The actual dmacs and vlans. */
uint8_t dmacs_and_vlans[];
} gxio_mpipe_rules_rule_t;
/** A list of classifier rules. */
typedef struct
{
/** The offset to the end of the current rule. */
uint16_t tail;
/** The offset to the start of the current rule. */
uint16_t head;
/** The actual rules. */
uint8_t rules[4096 - 4];
} gxio_mpipe_rules_list_t;
/** mPIPE statistics structure. These counters include all relevant
* events occurring on all links within the mPIPE shim. */
typedef struct
{
/** Number of ingress packets dropped for any reason. */
uint64_t ingress_drops;
/** Number of ingress packets dropped because a buffer stack was empty. */
uint64_t ingress_drops_no_buf;
/** Number of ingress packets dropped or truncated due to lack of space in
* the iPkt buffer. */
uint64_t ingress_drops_ipkt;
/** Number of ingress packets dropped by the classifier or load balancer */
uint64_t ingress_drops_cls_lb;
/** Total number of ingress packets. */
uint64_t ingress_packets;
/** Total number of egress packets. */
uint64_t egress_packets;
/** Total number of ingress bytes. */
uint64_t ingress_bytes;
/** Total number of egress bytes. */
uint64_t egress_bytes;
}
gxio_mpipe_stats_t;
#endif /* _SYS_HV_DRV_MPIPE_INTF_H */

View file

@ -0,0 +1,50 @@
/*
* Copyright 2011 Tilera Corporation. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation, version 2.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for
* more details.
*/
/**
* @file drv_mshim_intf.h
* Interface definitions for the Linux EDAC memory controller driver.
*/
#ifndef _SYS_HV_INCLUDE_DRV_MSHIM_INTF_H
#define _SYS_HV_INCLUDE_DRV_MSHIM_INTF_H
/** Number of memory controllers in the public API. */
#define TILE_MAX_MSHIMS 4
/** Memory info under each memory controller. */
struct mshim_mem_info
{
uint64_t mem_size; /**< Total memory size in bytes. */
uint8_t mem_type; /**< Memory type, DDR2 or DDR3. */
uint8_t mem_ecc; /**< Memory supports ECC. */
};
/**
* DIMM error structure.
* For now, only correctable errors are counted and the mshim doesn't record
* the error PA. HV takes panic upon uncorrectable errors.
*/
struct mshim_mem_error
{
uint32_t sbe_count; /**< Number of single-bit errors. */
};
/** Read this offset to get the memory info per mshim. */
#define MSHIM_MEM_INFO_OFF 0x100
/** Read this offset to check DIMM error. */
#define MSHIM_MEM_ERROR_OFF 0x200
#endif /* _SYS_HV_INCLUDE_DRV_MSHIM_INTF_H */

View file

@ -0,0 +1,38 @@
/*
* Copyright 2010 Tilera Corporation. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation, version 2.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for
* more details.
*/
/**
* @file drv_pcie_rc_intf.h
* Interface definitions for the PCIE Root Complex.
*/
#ifndef _SYS_HV_DRV_PCIE_RC_INTF_H
#define _SYS_HV_DRV_PCIE_RC_INTF_H
/** File offset for reading the interrupt base number used for PCIE legacy
interrupts and PLX Gen 1 requirement flag */
#define PCIE_RC_CONFIG_MASK_OFF 0
/**
* Structure used for obtaining PCIe config information, read from the PCIE
* subsystem /ctl file at initialization
*/
typedef struct pcie_rc_config
{
int intr; /**< interrupt number used for downcall */
int plx_gen1; /**< flag for PLX Gen 1 configuration */
} pcie_rc_config_t;
#endif /* _SYS_HV_DRV_PCIE_RC_INTF_H */

View file

@ -0,0 +1,41 @@
/*
* Copyright 2011 Tilera Corporation. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation, version 2.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for
* more details.
*/
/**
* @file drv_srom_intf.h
* Interface definitions for the SPI Flash ROM driver.
*/
#ifndef _SYS_HV_INCLUDE_DRV_SROM_INTF_H
#define _SYS_HV_INCLUDE_DRV_SROM_INTF_H
/** Read this offset to get the total device size. */
#define SROM_TOTAL_SIZE_OFF 0xF0000000
/** Read this offset to get the device sector size. */
#define SROM_SECTOR_SIZE_OFF 0xF0000004
/** Read this offset to get the device page size. */
#define SROM_PAGE_SIZE_OFF 0xF0000008
/** Write this offset to flush any pending writes. */
#define SROM_FLUSH_OFF 0xF1000000
/** Write this offset, plus the byte offset of the start of a sector, to
* erase a sector. Any write data is ignored, but there must be at least
* one byte of write data. Only applies when the driver is in MTD mode.
*/
#define SROM_ERASE_OFF 0xF2000000
#endif /* _SYS_HV_INCLUDE_DRV_SROM_INTF_H */

View file

@ -0,0 +1,199 @@
/*
* Copyright 2012 Tilera Corporation. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation, version 2.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for
* more details.
*/
/**
* Interface definitions for the trio driver.
*/
#ifndef _SYS_HV_DRV_TRIO_INTF_H
#define _SYS_HV_DRV_TRIO_INTF_H
#include <arch/trio.h>
/** The vendor ID for all Tilera processors. */
#define TILERA_VENDOR_ID 0x1a41
/** The device ID for the Gx36 processor. */
#define TILERA_GX36_DEV_ID 0x0200
/** Device ID for our internal bridge when running as RC. */
#define TILERA_GX36_RC_DEV_ID 0x2000
/** Maximum number of TRIO interfaces. */
#define TILEGX_NUM_TRIO 2
/** Gx36 has max 3 PCIe MACs per TRIO interface. */
#define TILEGX_TRIO_PCIES 3
/** Specify port properties for a PCIe MAC. */
struct pcie_port_property
{
/** If true, the link can be configured in PCIe root complex mode. */
uint8_t allow_rc: 1;
/** If true, the link can be configured in PCIe endpoint mode. */
uint8_t allow_ep: 1;
/** If true, the link can be configured in StreamIO mode. */
uint8_t allow_sio: 1;
/** If true, the link is allowed to support 1-lane operation. Software
* will not consider it an error if the link comes up as a x1 link. */
uint8_t allow_x1: 1;
/** If true, the link is allowed to support 2-lane operation. Software
* will not consider it an error if the link comes up as a x2 link. */
uint8_t allow_x2: 1;
/** If true, the link is allowed to support 4-lane operation. Software
* will not consider it an error if the link comes up as a x4 link. */
uint8_t allow_x4: 1;
/** If true, the link is allowed to support 8-lane operation. Software
* will not consider it an error if the link comes up as a x8 link. */
uint8_t allow_x8: 1;
/** If true, this link is connected to a device which may or may not
* be present. */
uint8_t removable: 1;
};
/** Configurations can be issued to configure a char stream interrupt. */
typedef enum pcie_stream_intr_config_sel_e
{
/** Interrupt configuration for memory map regions. */
MEM_MAP_SEL,
/** Interrupt configuration for push DMAs. */
PUSH_DMA_SEL,
/** Interrupt configuration for pull DMAs. */
PULL_DMA_SEL,
}
pcie_stream_intr_config_sel_t;
/** The mmap file offset (PA) of the TRIO config region. */
#define HV_TRIO_CONFIG_OFFSET \
((unsigned long long)TRIO_MMIO_ADDRESS_SPACE__REGION_VAL_CFG << \
TRIO_MMIO_ADDRESS_SPACE__REGION_SHIFT)
/** The maximum size of the TRIO config region. */
#define HV_TRIO_CONFIG_SIZE \
(1ULL << TRIO_CFG_REGION_ADDR__REGION_SHIFT)
/** Size of the config region mapped into client. We can't use
* TRIO_MMIO_ADDRESS_SPACE__OFFSET_WIDTH because it
* will require the kernel to allocate 4GB VA space
* from the VMALLOC region which has a total range
* of 4GB.
*/
#define HV_TRIO_CONFIG_IOREMAP_SIZE \
((uint64_t) 1 << TRIO_CFG_REGION_ADDR__PROT_SHIFT)
/** The mmap file offset (PA) of a scatter queue region. */
#define HV_TRIO_SQ_OFFSET(queue) \
(((unsigned long long)TRIO_MMIO_ADDRESS_SPACE__REGION_VAL_MAP_SQ << \
TRIO_MMIO_ADDRESS_SPACE__REGION_SHIFT) | \
((queue) << TRIO_MAP_SQ_REGION_ADDR__SQ_SEL_SHIFT))
/** The maximum size of a scatter queue region. */
#define HV_TRIO_SQ_SIZE \
(1ULL << TRIO_MAP_SQ_REGION_ADDR__SQ_SEL_SHIFT)
/** The "hardware MMIO region" of the first PIO region. */
#define HV_TRIO_FIRST_PIO_REGION 8
/** The mmap file offset (PA) of a PIO region. */
#define HV_TRIO_PIO_OFFSET(region) \
(((unsigned long long)(region) + HV_TRIO_FIRST_PIO_REGION) \
<< TRIO_PIO_REGIONS_ADDR__REGION_SHIFT)
/** The maximum size of a PIO region. */
#define HV_TRIO_PIO_SIZE (1ULL << TRIO_PIO_REGIONS_ADDR__ADDR_WIDTH)
/** The mmap file offset (PA) of a push DMA region. */
#define HV_TRIO_PUSH_DMA_OFFSET(ring) \
(((unsigned long long)TRIO_MMIO_ADDRESS_SPACE__REGION_VAL_PUSH_DMA << \
TRIO_MMIO_ADDRESS_SPACE__REGION_SHIFT) | \
((ring) << TRIO_PUSH_DMA_REGION_ADDR__RING_SEL_SHIFT))
/** The mmap file offset (PA) of a pull DMA region. */
#define HV_TRIO_PULL_DMA_OFFSET(ring) \
(((unsigned long long)TRIO_MMIO_ADDRESS_SPACE__REGION_VAL_PULL_DMA << \
TRIO_MMIO_ADDRESS_SPACE__REGION_SHIFT) | \
((ring) << TRIO_PULL_DMA_REGION_ADDR__RING_SEL_SHIFT))
/** The maximum size of a DMA region. */
#define HV_TRIO_DMA_REGION_SIZE \
(1ULL << TRIO_PUSH_DMA_REGION_ADDR__RING_SEL_SHIFT)
/** The mmap file offset (PA) of a Mem-Map interrupt region. */
#define HV_TRIO_MEM_MAP_INTR_OFFSET(map) \
(((unsigned long long)TRIO_MMIO_ADDRESS_SPACE__REGION_VAL_MAP_MEM << \
TRIO_MMIO_ADDRESS_SPACE__REGION_SHIFT) | \
((map) << TRIO_MAP_MEM_REGION_ADDR__MAP_SEL_SHIFT))
/** The maximum size of a Mem-Map interrupt region. */
#define HV_TRIO_MEM_MAP_INTR_SIZE \
(1ULL << TRIO_MAP_MEM_REGION_ADDR__MAP_SEL_SHIFT)
/** A flag bit indicating a fixed resource allocation. */
#define HV_TRIO_ALLOC_FIXED 0x01
/** TRIO requires that all mappings have 4kB aligned start addresses. */
#define HV_TRIO_PAGE_SHIFT 12
/** TRIO requires that all mappings have 4kB aligned start addresses. */
#define HV_TRIO_PAGE_SIZE (1ull << HV_TRIO_PAGE_SHIFT)
/* Specify all PCIe port properties for a TRIO. */
struct pcie_trio_ports_property
{
struct pcie_port_property ports[TILEGX_TRIO_PCIES];
/** Set if this TRIO belongs to a Gx72 device. */
uint8_t is_gx72;
};
/* Flags indicating traffic class. */
#define HV_TRIO_FLAG_TC_SHIFT 4
#define HV_TRIO_FLAG_TC_RMASK 0xf
#define HV_TRIO_FLAG_TC(N) \
((((N) & HV_TRIO_FLAG_TC_RMASK) + 1) << HV_TRIO_FLAG_TC_SHIFT)
/* Flags indicating virtual functions. */
#define HV_TRIO_FLAG_VFUNC_SHIFT 8
#define HV_TRIO_FLAG_VFUNC_RMASK 0xff
#define HV_TRIO_FLAG_VFUNC(N) \
((((N) & HV_TRIO_FLAG_VFUNC_RMASK) + 1) << HV_TRIO_FLAG_VFUNC_SHIFT)
/* Flag indicating an ordered PIO region. */
#define HV_TRIO_PIO_FLAG_ORDERED (1 << 16)
/* Flags indicating special types of PIO regions. */
#define HV_TRIO_PIO_FLAG_SPACE_SHIFT 17
#define HV_TRIO_PIO_FLAG_SPACE_MASK (0x3 << HV_TRIO_PIO_FLAG_SPACE_SHIFT)
#define HV_TRIO_PIO_FLAG_CONFIG_SPACE (0x1 << HV_TRIO_PIO_FLAG_SPACE_SHIFT)
#define HV_TRIO_PIO_FLAG_IO_SPACE (0x2 << HV_TRIO_PIO_FLAG_SPACE_SHIFT)
#endif /* _SYS_HV_DRV_TRIO_INTF_H */

View file

@ -0,0 +1,33 @@
/*
* Copyright 2013 Tilera Corporation. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation, version 2.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for
* more details.
*/
/**
* Interface definitions for the UART driver.
*/
#ifndef _SYS_HV_DRV_UART_INTF_H
#define _SYS_HV_DRV_UART_INTF_H
#include <arch/uart.h>
/** Number of UART ports supported. */
#define TILEGX_UART_NR 2
/** The mmap file offset (PA) of the UART MMIO region. */
#define HV_UART_MMIO_OFFSET 0
/** The maximum size of the UARTs MMIO region (64K Bytes). */
#define HV_UART_MMIO_SIZE (1UL << 16)
#endif /* _SYS_HV_DRV_UART_INTF_H */

View file

@ -0,0 +1,39 @@
/*
* Copyright 2012 Tilera Corporation. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation, version 2.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for
* more details.
*/
/**
* Interface definitions for the USB host driver.
*/
#ifndef _SYS_HV_DRV_USB_HOST_INTF_H
#define _SYS_HV_DRV_USB_HOST_INTF_H
#include <arch/usb_host.h>
/** Offset for the EHCI register MMIO region. */
#define HV_USB_HOST_MMIO_OFFSET_EHCI ((uint64_t) USB_HOST_HCCAPBASE_REG)
/** Offset for the OHCI register MMIO region. */
#define HV_USB_HOST_MMIO_OFFSET_OHCI ((uint64_t) USB_HOST_OHCD_HC_REVISION_REG)
/** Size of the register MMIO region. This turns out to be the same for
* both EHCI and OHCI. */
#define HV_USB_HOST_MMIO_SIZE ((uint64_t) 0x1000)
/** The number of service domains supported by the USB host shim. */
#define HV_USB_HOST_NUM_SVC_DOM 1
#endif /* _SYS_HV_DRV_USB_HOST_INTF_H */

View file

@ -0,0 +1,300 @@
/*
* Copyright 2010 Tilera Corporation. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation, version 2.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for
* more details.
*/
/**
* @file drivers/xgbe/impl.h
* Implementation details for the NetIO library.
*/
#ifndef __DRV_XGBE_IMPL_H__
#define __DRV_XGBE_IMPL_H__
#include <hv/netio_errors.h>
#include <hv/netio_intf.h>
#include <hv/drv_xgbe_intf.h>
/** How many groups we have (log2). */
#define LOG2_NUM_GROUPS (12)
/** How many groups we have. */
#define NUM_GROUPS (1 << LOG2_NUM_GROUPS)
/** Number of output requests we'll buffer per tile. */
#define EPP_REQS_PER_TILE (32)
/** Words used in an eDMA command without checksum acceleration. */
#define EDMA_WDS_NO_CSUM 8
/** Words used in an eDMA command with checksum acceleration. */
#define EDMA_WDS_CSUM 10
/** Total available words in the eDMA command FIFO. */
#define EDMA_WDS_TOTAL 128
/*
* FIXME: These definitions are internal and should have underscores!
* NOTE: The actual numeric values here are intentional and allow us to
* optimize the concept "if small ... else if large ... else ...", by
* checking for the low bit being set, and then for non-zero.
* These are used as array indices, so they must have the values (0, 1, 2)
* in some order.
*/
#define SIZE_SMALL (1) /**< Small packet queue. */
#define SIZE_LARGE (2) /**< Large packet queue. */
#define SIZE_JUMBO (0) /**< Jumbo packet queue. */
/** The number of "SIZE_xxx" values. */
#define NETIO_NUM_SIZES 3
/*
* Default numbers of packets for IPP drivers. These values are chosen
* such that CIPP1 will not overflow its L2 cache.
*/
/** The default number of small packets. */
#define NETIO_DEFAULT_SMALL_PACKETS 2750
/** The default number of large packets. */
#define NETIO_DEFAULT_LARGE_PACKETS 2500
/** The default number of jumbo packets. */
#define NETIO_DEFAULT_JUMBO_PACKETS 250
/** Log2 of the size of a memory arena. */
#define NETIO_ARENA_SHIFT 24 /* 16 MB */
/** Size of a memory arena. */
#define NETIO_ARENA_SIZE (1 << NETIO_ARENA_SHIFT)
/** A queue of packets.
*
* This structure partially defines a queue of packets waiting to be
* processed. The queue as a whole is written to by an interrupt handler and
* read by non-interrupt code; this data structure is what's touched by the
* interrupt handler. The other part of the queue state, the read offset, is
* kept in user space, not in hypervisor space, so it is in a separate data
* structure.
*
* The read offset (__packet_receive_read in the user part of the queue
* structure) points to the next packet to be read. When the read offset is
* equal to the write offset, the queue is empty; therefore the queue must
* contain one more slot than the required maximum queue size.
*
* Here's an example of all 3 state variables and what they mean. All
* pointers move left to right.
*
* @code
* I I V V V V I I I I
* 0 1 2 3 4 5 6 7 8 9 10
* ^ ^ ^ ^
* | | |
* | | __last_packet_plus_one
* | __buffer_write
* __packet_receive_read
* @endcode
*
* This queue has 10 slots, and thus can hold 9 packets (_last_packet_plus_one
* = 10). The read pointer is at 2, and the write pointer is at 6; thus,
* there are valid, unread packets in slots 2, 3, 4, and 5. The remaining
* slots are invalid (do not contain a packet).
*/
typedef struct {
/** Byte offset of the next notify packet to be written: zero for the first
* packet on the queue, sizeof (netio_pkt_t) for the second packet on the
* queue, etc. */
volatile uint32_t __packet_write;
/** Offset of the packet after the last valid packet (i.e., when any
* pointer is incremented to this value, it wraps back to zero). */
uint32_t __last_packet_plus_one;
}
__netio_packet_queue_t;
/** A queue of buffers.
*
* This structure partially defines a queue of empty buffers which have been
* obtained via requests to the IPP. (The elements of the queue are packet
* handles, which are transformed into a full netio_pkt_t when the buffer is
* retrieved.) The queue as a whole is written to by an interrupt handler and
* read by non-interrupt code; this data structure is what's touched by the
* interrupt handler. The other parts of the queue state, the read offset and
* requested write offset, are kept in user space, not in hypervisor space, so
* they are in a separate data structure.
*
* The read offset (__buffer_read in the user part of the queue structure)
* points to the next buffer to be read. When the read offset is equal to the
* write offset, the queue is empty; therefore the queue must contain one more
* slot than the required maximum queue size.
*
* The requested write offset (__buffer_requested_write in the user part of
* the queue structure) points to the slot which will hold the next buffer we
* request from the IPP, once we get around to sending such a request. When
* the requested write offset is equal to the write offset, no requests for
* new buffers are outstanding; when the requested write offset is one greater
* than the read offset, no more requests may be sent.
*
* Note that, unlike the packet_queue, the buffer_queue places incoming
* buffers at decreasing addresses. This makes the check for "is it time to
* wrap the buffer pointer" cheaper in the assembly code which receives new
* buffers, and means that the value which defines the queue size,
* __last_buffer, is different than in the packet queue. Also, the offset
* used in the packet_queue is already scaled by the size of a packet; here we
* use unscaled slot indices for the offsets. (These differences are
* historical, and in the future it's possible that the packet_queue will look
* more like this queue.)
*
* @code
* Here's an example of all 4 state variables and what they mean. Remember:
* all pointers move right to left.
*
* V V V I I R R V V V
* 0 1 2 3 4 5 6 7 8 9
* ^ ^ ^ ^
* | | | |
* | | | __last_buffer
* | | __buffer_write
* | __buffer_requested_write
* __buffer_read
* @endcode
*
* This queue has 10 slots, and thus can hold 9 buffers (_last_buffer = 9).
* The read pointer is at 2, and the write pointer is at 6; thus, there are
* valid, unread buffers in slots 2, 1, 0, 9, 8, and 7. The requested write
* pointer is at 4; thus, requests have been made to the IPP for buffers which
* will be placed in slots 6 and 5 when they arrive. Finally, the remaining
* slots are invalid (do not contain a buffer).
*/
typedef struct
{
/** Ordinal number of the next buffer to be written: 0 for the first slot in
* the queue, 1 for the second slot in the queue, etc. */
volatile uint32_t __buffer_write;
/** Ordinal number of the last buffer (i.e., when any pointer is decremented
* below zero, it is reloaded with this value). */
uint32_t __last_buffer;
}
__netio_buffer_queue_t;
/**
* An object for providing Ethernet packets to a process.
*/
typedef struct __netio_queue_impl_t
{
/** The queue of packets waiting to be received. */
__netio_packet_queue_t __packet_receive_queue;
/** The intr bit mask that IDs this device. */
unsigned int __intr_id;
/** Offset to queues of empty buffers, one per size. */
uint32_t __buffer_queue[NETIO_NUM_SIZES];
/** The address of the first EPP tile, or -1 if no EPP. */
/* ISSUE: Actually this is always "0" or "~0". */
uint32_t __epp_location;
/** The queue ID that this queue represents. */
unsigned int __queue_id;
/** Number of acknowledgements received. */
volatile uint32_t __acks_received;
/** Last completion number received for packet_sendv. */
volatile uint32_t __last_completion_rcv;
/** Number of packets allowed to be outstanding. */
uint32_t __max_outstanding;
/** First VA available for packets. */
void* __va_0;
/** First VA in second range available for packets. */
void* __va_1;
/** Padding to align the "__packets" field to the size of a netio_pkt_t. */
uint32_t __padding[3];
/** The packets themselves. */
netio_pkt_t __packets[0];
}
netio_queue_impl_t;
/**
* An object for managing the user end of a NetIO queue.
*/
typedef struct __netio_queue_user_impl_t
{
/** The next incoming packet to be read. */
uint32_t __packet_receive_read;
/** The next empty buffers to be read, one index per size. */
uint8_t __buffer_read[NETIO_NUM_SIZES];
/** Where the empty buffer we next request from the IPP will go, one index
* per size. */
uint8_t __buffer_requested_write[NETIO_NUM_SIZES];
/** PCIe interface flag. */
uint8_t __pcie;
/** Number of packets left to be received before we send a credit update. */
uint32_t __receive_credit_remaining;
/** Value placed in __receive_credit_remaining when it reaches zero. */
uint32_t __receive_credit_interval;
/** First fast I/O routine index. */
uint32_t __fastio_index;
/** Number of acknowledgements expected. */
uint32_t __acks_outstanding;
/** Last completion number requested. */
uint32_t __last_completion_req;
/** File descriptor for driver. */
int __fd;
}
netio_queue_user_impl_t;
#define NETIO_GROUP_CHUNK_SIZE 64 /**< Max # groups in one IPP request */
#define NETIO_BUCKET_CHUNK_SIZE 64 /**< Max # buckets in one IPP request */
/** Internal structure used to convey packet send information to the
* hypervisor. FIXME: Actually, it's not used for that anymore, but
* netio_packet_send() still uses it internally.
*/
typedef struct
{
uint16_t flags; /**< Packet flags (__NETIO_SEND_FLG_xxx) */
uint16_t transfer_size; /**< Size of packet */
uint32_t va; /**< VA of start of packet */
__netio_pkt_handle_t handle; /**< Packet handle */
uint32_t csum0; /**< First checksum word */
uint32_t csum1; /**< Second checksum word */
}
__netio_send_cmd_t;
/** Flags used in two contexts:
* - As the "flags" member in the __netio_send_cmd_t, above; used only
* for netio_pkt_send_{prepare,commit}.
* - As part of the flags passed to the various send packet fast I/O calls.
*/
/** Need acknowledgement on this packet. Note that some code in the
* normal send_pkt fast I/O handler assumes that this is equal to 1. */
#define __NETIO_SEND_FLG_ACK 0x1
/** Do checksum on this packet. (Only used with the __netio_send_cmd_t;
* normal packet sends use a special fast I/O index to denote checksumming,
* and multi-segment sends test the checksum descriptor.) */
#define __NETIO_SEND_FLG_CSUM 0x2
/** Get a completion on this packet. Only used with multi-segment sends. */
#define __NETIO_SEND_FLG_COMPLETION 0x4
/** Position of the number-of-extra-segments value in the flags word.
Only used with multi-segment sends. */
#define __NETIO_SEND_FLG_XSEG_SHIFT 3
/** Width of the number-of-extra-segments value in the flags word. */
#define __NETIO_SEND_FLG_XSEG_WIDTH 2
#endif /* __DRV_XGBE_IMPL_H__ */

View file

@ -0,0 +1,615 @@
/*
* Copyright 2010 Tilera Corporation. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation, version 2.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for
* more details.
*/
/**
* @file drv_xgbe_intf.h
* Interface to the hypervisor XGBE driver.
*/
#ifndef __DRV_XGBE_INTF_H__
#define __DRV_XGBE_INTF_H__
/**
* An object for forwarding VAs and PAs to the hypervisor.
* @ingroup types
*
* This allows the supervisor to specify a number of areas of memory to
* store packet buffers.
*/
typedef struct
{
/** The physical address of the memory. */
HV_PhysAddr pa;
/** Page table entry for the memory. This is only used to derive the
* memory's caching mode; the PA bits are ignored. */
HV_PTE pte;
/** The virtual address of the memory. */
HV_VirtAddr va;
/** Size (in bytes) of the memory area. */
int size;
}
netio_ipp_address_t;
/** The various pread/pwrite offsets into the hypervisor-level driver.
* @ingroup types
*/
typedef enum
{
/** Inform the Linux driver of the address of the NetIO arena memory.
* This offset is actually only used to convey information from netio
* to the Linux driver; it never makes it from there to the hypervisor.
* Write-only; takes a uint32_t specifying the VA address. */
NETIO_FIXED_ADDR = 0x5000000000000000ULL,
/** Inform the Linux driver of the size of the NetIO arena memory.
* This offset is actually only used to convey information from netio
* to the Linux driver; it never makes it from there to the hypervisor.
* Write-only; takes a uint32_t specifying the VA size. */
NETIO_FIXED_SIZE = 0x5100000000000000ULL,
/** Register current tile with IPP. Write then read: write, takes a
* netio_input_config_t, read returns a pointer to a netio_queue_impl_t. */
NETIO_IPP_INPUT_REGISTER_OFF = 0x6000000000000000ULL,
/** Unregister current tile from IPP. Write-only, takes a dummy argument. */
NETIO_IPP_INPUT_UNREGISTER_OFF = 0x6100000000000000ULL,
/** Start packets flowing. Write-only, takes a dummy argument. */
NETIO_IPP_INPUT_INIT_OFF = 0x6200000000000000ULL,
/** Stop packets flowing. Write-only, takes a dummy argument. */
NETIO_IPP_INPUT_UNINIT_OFF = 0x6300000000000000ULL,
/** Configure group (typically we group on VLAN). Write-only: takes an
* array of netio_group_t's, low 24 bits of the offset is the base group
* number times the size of a netio_group_t. */
NETIO_IPP_INPUT_GROUP_CFG_OFF = 0x6400000000000000ULL,
/** Configure bucket. Write-only: takes an array of netio_bucket_t's, low
* 24 bits of the offset is the base bucket number times the size of a
* netio_bucket_t. */
NETIO_IPP_INPUT_BUCKET_CFG_OFF = 0x6500000000000000ULL,
/** Get/set a parameter. Read or write: read or write data is the parameter
* value, low 32 bits of the offset is a __netio_getset_offset_t. */
NETIO_IPP_PARAM_OFF = 0x6600000000000000ULL,
/** Get fast I/O index. Read-only; returns a 4-byte base index value. */
NETIO_IPP_GET_FASTIO_OFF = 0x6700000000000000ULL,
/** Configure hijack IP address. Packets with this IPv4 dest address
* go to bucket NETIO_NUM_BUCKETS - 1. Write-only: takes an IP address
* in some standard form. FIXME: Define the form! */
NETIO_IPP_INPUT_HIJACK_CFG_OFF = 0x6800000000000000ULL,
/**
* Offsets beyond this point are reserved for the supervisor (although that
* enforcement must be done by the supervisor driver itself).
*/
NETIO_IPP_USER_MAX_OFF = 0x6FFFFFFFFFFFFFFFULL,
/** Register I/O memory. Write-only, takes a netio_ipp_address_t. */
NETIO_IPP_IOMEM_REGISTER_OFF = 0x7000000000000000ULL,
/** Unregister I/O memory. Write-only, takes a netio_ipp_address_t. */
NETIO_IPP_IOMEM_UNREGISTER_OFF = 0x7100000000000000ULL,
/* Offsets greater than 0x7FFFFFFF can't be used directly from Linux
* userspace code due to limitations in the pread/pwrite syscalls. */
/** Drain LIPP buffers. */
NETIO_IPP_DRAIN_OFF = 0xFA00000000000000ULL,
/** Supply a netio_ipp_address_t to be used as shared memory for the
* LEPP command queue. */
NETIO_EPP_SHM_OFF = 0xFB00000000000000ULL,
/* 0xFC... is currently unused. */
/** Stop IPP/EPP tiles. Write-only, takes a dummy argument. */
NETIO_IPP_STOP_SHIM_OFF = 0xFD00000000000000ULL,
/** Start IPP/EPP tiles. Write-only, takes a dummy argument. */
NETIO_IPP_START_SHIM_OFF = 0xFE00000000000000ULL,
/** Supply packet arena. Write-only, takes an array of
* netio_ipp_address_t values. */
NETIO_IPP_ADDRESS_OFF = 0xFF00000000000000ULL,
} netio_hv_offset_t;
/** Extract the base offset from an offset */
#define NETIO_BASE_OFFSET(off) ((off) & 0xFF00000000000000ULL)
/** Extract the local offset from an offset */
#define NETIO_LOCAL_OFFSET(off) ((off) & 0x00FFFFFFFFFFFFFFULL)
/**
* Get/set offset.
*/
typedef union
{
struct
{
uint64_t addr:48; /**< Class-specific address */
unsigned int class:8; /**< Class (e.g., NETIO_PARAM) */
unsigned int opcode:8; /**< High 8 bits of NETIO_IPP_PARAM_OFF */
}
bits; /**< Bitfields */
uint64_t word; /**< Aggregated value to use as the offset */
}
__netio_getset_offset_t;
/**
* Fast I/O index offsets (must be contiguous).
*/
typedef enum
{
NETIO_FASTIO_ALLOCATE = 0, /**< Get empty packet buffer */
NETIO_FASTIO_FREE_BUFFER = 1, /**< Give buffer back to IPP */
NETIO_FASTIO_RETURN_CREDITS = 2, /**< Give credits to IPP */
NETIO_FASTIO_SEND_PKT_NOCK = 3, /**< Send a packet, no checksum */
NETIO_FASTIO_SEND_PKT_CK = 4, /**< Send a packet, with checksum */
NETIO_FASTIO_SEND_PKT_VEC = 5, /**< Send a vector of packets */
NETIO_FASTIO_SENDV_PKT = 6, /**< Sendv one packet */
NETIO_FASTIO_NUM_INDEX = 7, /**< Total number of fast I/O indices */
} netio_fastio_index_t;
/** 3-word return type for Fast I/O call. */
typedef struct
{
int err; /**< Error code. */
uint32_t val0; /**< Value. Meaning depends upon the specific call. */
uint32_t val1; /**< Value. Meaning depends upon the specific call. */
} netio_fastio_rv3_t;
/** 0-argument fast I/O call */
int __netio_fastio0(uint32_t fastio_index);
/** 1-argument fast I/O call */
int __netio_fastio1(uint32_t fastio_index, uint32_t arg0);
/** 3-argument fast I/O call, 2-word return value */
netio_fastio_rv3_t __netio_fastio3_rv3(uint32_t fastio_index, uint32_t arg0,
uint32_t arg1, uint32_t arg2);
/** 4-argument fast I/O call */
int __netio_fastio4(uint32_t fastio_index, uint32_t arg0, uint32_t arg1,
uint32_t arg2, uint32_t arg3);
/** 6-argument fast I/O call */
int __netio_fastio6(uint32_t fastio_index, uint32_t arg0, uint32_t arg1,
uint32_t arg2, uint32_t arg3, uint32_t arg4, uint32_t arg5);
/** 9-argument fast I/O call */
int __netio_fastio9(uint32_t fastio_index, uint32_t arg0, uint32_t arg1,
uint32_t arg2, uint32_t arg3, uint32_t arg4, uint32_t arg5,
uint32_t arg6, uint32_t arg7, uint32_t arg8);
/** Allocate an empty packet.
* @param fastio_index Fast I/O index.
* @param size Size of the packet to allocate.
*/
#define __netio_fastio_allocate(fastio_index, size) \
__netio_fastio1((fastio_index) + NETIO_FASTIO_ALLOCATE, size)
/** Free a buffer.
* @param fastio_index Fast I/O index.
* @param handle Handle for the packet to free.
*/
#define __netio_fastio_free_buffer(fastio_index, handle) \
__netio_fastio1((fastio_index) + NETIO_FASTIO_FREE_BUFFER, handle)
/** Increment our receive credits.
* @param fastio_index Fast I/O index.
* @param credits Number of credits to add.
*/
#define __netio_fastio_return_credits(fastio_index, credits) \
__netio_fastio1((fastio_index) + NETIO_FASTIO_RETURN_CREDITS, credits)
/** Send packet, no checksum.
* @param fastio_index Fast I/O index.
* @param ackflag Nonzero if we want an ack.
* @param size Size of the packet.
* @param va Virtual address of start of packet.
* @param handle Packet handle.
*/
#define __netio_fastio_send_pkt_nock(fastio_index, ackflag, size, va, handle) \
__netio_fastio4((fastio_index) + NETIO_FASTIO_SEND_PKT_NOCK, ackflag, \
size, va, handle)
/** Send packet, calculate checksum.
* @param fastio_index Fast I/O index.
* @param ackflag Nonzero if we want an ack.
* @param size Size of the packet.
* @param va Virtual address of start of packet.
* @param handle Packet handle.
* @param csum0 Shim checksum header.
* @param csum1 Checksum seed.
*/
#define __netio_fastio_send_pkt_ck(fastio_index, ackflag, size, va, handle, \
csum0, csum1) \
__netio_fastio6((fastio_index) + NETIO_FASTIO_SEND_PKT_CK, ackflag, \
size, va, handle, csum0, csum1)
/** Format for the "csum0" argument to the __netio_fastio_send routines
* and LEPP. Note that this is currently exactly identical to the
* ShimProtocolOffloadHeader.
*/
typedef union
{
struct
{
unsigned int start_byte:7; /**< The first byte to be checksummed */
unsigned int count:14; /**< Number of bytes to be checksummed. */
unsigned int destination_byte:7; /**< The byte to write the checksum to. */
unsigned int reserved:4; /**< Reserved. */
} bits; /**< Decomposed method of access. */
unsigned int word; /**< To send out the IDN. */
} __netio_checksum_header_t;
/** Sendv packet with 1 or 2 segments.
* @param fastio_index Fast I/O index.
* @param flags Ack/csum/notify flags in low 3 bits; number of segments minus
* 1 in next 2 bits; expected checksum in high 16 bits.
* @param confno Confirmation number to request, if notify flag set.
* @param csum0 Checksum descriptor; if zero, no checksum.
* @param va_F Virtual address of first segment.
* @param va_L Virtual address of last segment, if 2 segments.
* @param len_F_L Length of first segment in low 16 bits; length of last
* segment, if 2 segments, in high 16 bits.
*/
#define __netio_fastio_sendv_pkt_1_2(fastio_index, flags, confno, csum0, \
va_F, va_L, len_F_L) \
__netio_fastio6((fastio_index) + NETIO_FASTIO_SENDV_PKT, flags, confno, \
csum0, va_F, va_L, len_F_L)
/** Send packet on PCIe interface.
* @param fastio_index Fast I/O index.
* @param flags Ack/csum/notify flags in low 3 bits.
* @param confno Confirmation number to request, if notify flag set.
* @param csum0 Checksum descriptor; Hard wired 0, not needed for PCIe.
* @param va_F Virtual address of the packet buffer.
* @param va_L Virtual address of last segment, if 2 segments. Hard wired 0.
* @param len_F_L Length of the packet buffer in low 16 bits.
*/
#define __netio_fastio_send_pcie_pkt(fastio_index, flags, confno, csum0, \
va_F, va_L, len_F_L) \
__netio_fastio6((fastio_index) + PCIE_FASTIO_SENDV_PKT, flags, confno, \
csum0, va_F, va_L, len_F_L)
/** Sendv packet with 3 or 4 segments.
* @param fastio_index Fast I/O index.
* @param flags Ack/csum/notify flags in low 3 bits; number of segments minus
* 1 in next 2 bits; expected checksum in high 16 bits.
* @param confno Confirmation number to request, if notify flag set.
* @param csum0 Checksum descriptor; if zero, no checksum.
* @param va_F Virtual address of first segment.
* @param va_L Virtual address of last segment (third segment if 3 segments,
* fourth segment if 4 segments).
* @param len_F_L Length of first segment in low 16 bits; length of last
* segment in high 16 bits.
* @param va_M0 Virtual address of "middle 0" segment; this segment is sent
* second when there are three segments, and third if there are four.
* @param va_M1 Virtual address of "middle 1" segment; this segment is sent
* second when there are four segments.
* @param len_M0_M1 Length of middle 0 segment in low 16 bits; length of middle
* 1 segment, if 4 segments, in high 16 bits.
*/
#define __netio_fastio_sendv_pkt_3_4(fastio_index, flags, confno, csum0, va_F, \
va_L, len_F_L, va_M0, va_M1, len_M0_M1) \
__netio_fastio9((fastio_index) + NETIO_FASTIO_SENDV_PKT, flags, confno, \
csum0, va_F, va_L, len_F_L, va_M0, va_M1, len_M0_M1)
/** Send vector of packets.
* @param fastio_index Fast I/O index.
* @param seqno Number of packets transmitted so far on this interface;
* used to decide which packets should be acknowledged.
* @param nentries Number of entries in vector.
* @param va Virtual address of start of vector entry array.
* @return 3-word netio_fastio_rv3_t structure. The structure's err member
* is an error code, or zero if no error. The val0 member is the
* updated value of seqno; it has been incremented by 1 for each
* packet sent. That increment may be less than nentries if an
* error occurred, or if some of the entries in the vector contain
* handles equal to NETIO_PKT_HANDLE_NONE. The val1 member is the
* updated value of nentries; it has been decremented by 1 for each
* vector entry processed. Again, that decrement may be less than
* nentries (leaving the returned value positive) if an error
* occurred.
*/
#define __netio_fastio_send_pkt_vec(fastio_index, seqno, nentries, va) \
__netio_fastio3_rv3((fastio_index) + NETIO_FASTIO_SEND_PKT_VEC, seqno, \
nentries, va)
/** An egress DMA command for LEPP. */
typedef struct
{
/** Is this a TSO transfer?
*
* NOTE: This field is always 0, to distinguish it from
* lepp_tso_cmd_t. It must come first!
*/
uint8_t tso : 1;
/** Unused padding bits. */
uint8_t _unused : 3;
/** Should this packet be sent directly from caches instead of DRAM,
* using hash-for-home to locate the packet data?
*/
uint8_t hash_for_home : 1;
/** Should we compute a checksum? */
uint8_t compute_checksum : 1;
/** Is this the final buffer for this packet?
*
* A single packet can be split over several input buffers (a "gather"
* operation). This flag indicates that this is the last buffer
* in a packet.
*/
uint8_t end_of_packet : 1;
/** Should LEPP advance 'comp_busy' when this DMA is fully finished? */
uint8_t send_completion : 1;
/** High bits of Client Physical Address of the start of the buffer
* to be egressed.
*
* NOTE: Only 6 bits are actually needed here, as CPAs are
* currently 38 bits. So two bits could be scavenged from this.
*/
uint8_t cpa_hi;
/** The number of bytes to be egressed. */
uint16_t length;
/** Low 32 bits of Client Physical Address of the start of the buffer
* to be egressed.
*/
uint32_t cpa_lo;
/** Checksum information (only used if 'compute_checksum'). */
__netio_checksum_header_t checksum_data;
} lepp_cmd_t;
/** A chunk of physical memory for a TSO egress. */
typedef struct
{
/** The low bits of the CPA. */
uint32_t cpa_lo;
/** The high bits of the CPA. */
uint16_t cpa_hi : 15;
/** Should this packet be sent directly from caches instead of DRAM,
* using hash-for-home to locate the packet data?
*/
uint16_t hash_for_home : 1;
/** The length in bytes. */
uint16_t length;
} lepp_frag_t;
/** An LEPP command that handles TSO. */
typedef struct
{
/** Is this a TSO transfer?
*
* NOTE: This field is always 1, to distinguish it from
* lepp_cmd_t. It must come first!
*/
uint8_t tso : 1;
/** Unused padding bits. */
uint8_t _unused : 7;
/** Size of the header[] array in bytes. It must be in the range
* [40, 127], which are the smallest header for a TCP packet over
* Ethernet and the maximum possible prepend size supported by
* hardware, respectively. Note that the array storage must be
* padded out to a multiple of four bytes so that the following
* LEPP command is aligned properly.
*/
uint8_t header_size;
/** Byte offset of the IP header in header[]. */
uint8_t ip_offset;
/** Byte offset of the TCP header in header[]. */
uint8_t tcp_offset;
/** The number of bytes to use for the payload of each packet,
* except of course the last one, which may not have enough bytes.
* This means that each Ethernet packet except the last will have a
* size of header_size + payload_size.
*/
uint16_t payload_size;
/** The length of the 'frags' array that follows this struct. */
uint16_t num_frags;
/** The actual frags. */
lepp_frag_t frags[0 /* Variable-sized; num_frags entries. */];
/*
* The packet header template logically follows frags[],
* but you can't declare that in C.
*
* uint32_t header[header_size_in_words_rounded_up];
*/
} lepp_tso_cmd_t;
/** An LEPP completion ring entry. */
typedef void* lepp_comp_t;
/** Maximum number of frags for one TSO command. This is adapted from
* linux's "MAX_SKB_FRAGS", and presumably over-estimates by one, for
* our page size of exactly 65536. We add one for a "body" fragment.
*/
#define LEPP_MAX_FRAGS (65536 / HV_DEFAULT_PAGE_SIZE_SMALL + 2 + 1)
/** Total number of bytes needed for an lepp_tso_cmd_t. */
#define LEPP_TSO_CMD_SIZE(num_frags, header_size) \
(sizeof(lepp_tso_cmd_t) + \
(num_frags) * sizeof(lepp_frag_t) + \
(((header_size) + 3) & -4))
/** The size of the lepp "cmd" queue. */
#define LEPP_CMD_QUEUE_BYTES \
(((CHIP_L2_CACHE_SIZE() - 2 * CHIP_L2_LINE_SIZE()) / \
(sizeof(lepp_cmd_t) + sizeof(lepp_comp_t))) * sizeof(lepp_cmd_t))
/** The largest possible command that can go in lepp_queue_t::cmds[]. */
#define LEPP_MAX_CMD_SIZE LEPP_TSO_CMD_SIZE(LEPP_MAX_FRAGS, 128)
/** The largest possible value of lepp_queue_t::cmd_{head, tail} (inclusive).
*/
#define LEPP_CMD_LIMIT \
(LEPP_CMD_QUEUE_BYTES - LEPP_MAX_CMD_SIZE)
/** The maximum number of completions in an LEPP queue. */
#define LEPP_COMP_QUEUE_SIZE \
((LEPP_CMD_LIMIT + sizeof(lepp_cmd_t) - 1) / sizeof(lepp_cmd_t))
/** Increment an index modulo the queue size. */
#define LEPP_QINC(var) \
(var = __insn_mnz(var - (LEPP_COMP_QUEUE_SIZE - 1), var + 1))
/** A queue used to convey egress commands from the client to LEPP. */
typedef struct
{
/** Index of first completion not yet processed by user code.
* If this is equal to comp_busy, there are no such completions.
*
* NOTE: This is only read/written by the user.
*/
unsigned int comp_head;
/** Index of first completion record not yet completed.
* If this is equal to comp_tail, there are no such completions.
* This index gets advanced (modulo LEPP_QUEUE_SIZE) whenever
* a command with the 'completion' bit set is finished.
*
* NOTE: This is only written by LEPP, only read by the user.
*/
volatile unsigned int comp_busy;
/** Index of the first empty slot in the completion ring.
* Entries from this up to but not including comp_head (in ring order)
* can be filled in with completion data.
*
* NOTE: This is only read/written by the user.
*/
unsigned int comp_tail;
/** Byte index of first command enqueued for LEPP but not yet processed.
*
* This is always divisible by sizeof(void*) and always <= LEPP_CMD_LIMIT.
*
* NOTE: LEPP advances this counter as soon as it no longer needs
* the cmds[] storage for this entry, but the transfer is not actually
* complete (i.e. the buffer pointed to by the command is no longer
* needed) until comp_busy advances.
*
* If this is equal to cmd_tail, the ring is empty.
*
* NOTE: This is only written by LEPP, only read by the user.
*/
volatile unsigned int cmd_head;
/** Byte index of first empty slot in the command ring. This field can
* be incremented up to but not equal to cmd_head (because that would
* mean the ring is empty).
*
* This is always divisible by sizeof(void*) and always <= LEPP_CMD_LIMIT.
*
* NOTE: This is read/written by the user, only read by LEPP.
*/
volatile unsigned int cmd_tail;
/** A ring of variable-sized egress DMA commands.
*
* NOTE: Only written by the user, only read by LEPP.
*/
char cmds[LEPP_CMD_QUEUE_BYTES]
__attribute__((aligned(CHIP_L2_LINE_SIZE())));
/** A ring of user completion data.
* NOTE: Only read/written by the user.
*/
lepp_comp_t comps[LEPP_COMP_QUEUE_SIZE]
__attribute__((aligned(CHIP_L2_LINE_SIZE())));
} lepp_queue_t;
/** An internal helper function for determining the number of entries
* available in a ring buffer, given that there is one sentinel.
*/
static inline unsigned int
_lepp_num_free_slots(unsigned int head, unsigned int tail)
{
/*
* One entry is reserved for use as a sentinel, to distinguish
* "empty" from "full". So we compute
* (head - tail - 1) % LEPP_QUEUE_SIZE, but without using a slow % operation.
*/
return (head - tail - 1) + ((head <= tail) ? LEPP_COMP_QUEUE_SIZE : 0);
}
/** Returns how many new comp entries can be enqueued. */
static inline unsigned int
lepp_num_free_comp_slots(const lepp_queue_t* q)
{
return _lepp_num_free_slots(q->comp_head, q->comp_tail);
}
static inline int
lepp_qsub(int v1, int v2)
{
int delta = v1 - v2;
return delta + ((delta >> 31) & LEPP_COMP_QUEUE_SIZE);
}
/** FIXME: Check this from linux, via a new "pwrite()" call. */
#define LIPP_VERSION 1
/** We use exactly two bytes of alignment padding. */
#define LIPP_PACKET_PADDING 2
/** The minimum size of a "small" buffer (including the padding). */
#define LIPP_SMALL_PACKET_SIZE 128
/*
* NOTE: The following two values should total to less than around
* 13582, to keep the total size used for "lipp_state_t" below 64K.
*/
/** The maximum number of "small" buffers.
* This is enough for 53 network cpus with 128 credits. Note that
* if these are exhausted, we will fall back to using large buffers.
*/
#define LIPP_SMALL_BUFFERS 6785
/** The maximum number of "large" buffers.
* This is enough for 53 network cpus with 128 credits.
*/
#define LIPP_LARGE_BUFFERS 6785
#endif /* __DRV_XGBE_INTF_H__ */

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,714 @@
/*
* Copyright 2012 Tilera Corporation. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation, version 2.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for
* more details.
*/
#ifndef _HV_IORPC_H_
#define _HV_IORPC_H_
/**
*
* Error codes and struct definitions for the IO RPC library.
*
* The hypervisor's IO RPC component provides a convenient way for
* driver authors to proxy system calls between user space, linux, and
* the hypervisor driver. The core of the system is a set of Python
* files that take ".idl" files as input and generates the following
* source code:
*
* - _rpc_call() routines for use in userspace IO libraries. These
* routines take an argument list specified in the .idl file, pack the
* arguments in to a buffer, and read or write that buffer via the
* Linux iorpc driver.
*
* - dispatch_read() and dispatch_write() routines that hypervisor
* drivers can use to implement most of their dev_pread() and
* dev_pwrite() methods. These routines decode the incoming parameter
* blob, permission check and translate parameters where appropriate,
* and then invoke a callback routine for whichever RPC call has
* arrived. The driver simply implements the set of callback
* routines.
*
* The IO RPC system also includes the Linux 'iorpc' driver, which
* proxies calls between the userspace library and the hypervisor
* driver. The Linux driver is almost entirely device agnostic; it
* watches for special flags indicating cases where a memory buffer
* address might need to be translated, etc. As a result, driver
* writers can avoid many of the problem cases related to registering
* hardware resources like memory pages or interrupts. However, the
* drivers must be careful to obey the conventions documented below in
* order to work properly with the generic Linux iorpc driver.
*
* @section iorpc_domains Service Domains
*
* All iorpc-based drivers must support a notion of service domains.
* A service domain is basically an application context - state
* indicating resources that are allocated to that particular app
* which it may access and (perhaps) other applications may not
* access. Drivers can support any number of service domains they
* choose. In some cases the design is limited by a number of service
* domains supported by the IO hardware; in other cases the service
* domains are a purely software concept and the driver chooses a
* maximum number of domains based on how much state memory it is
* willing to preallocate.
*
* For example, the mPIPE driver only supports as many service domains
* as are supported by the mPIPE hardware. This limitation is
* required because the hardware implements its own MMIO protection
* scheme to allow large MMIO mappings while still protecting small
* register ranges within the page that should only be accessed by the
* hypervisor.
*
* In contrast, drivers with no hardware service domain limitations
* (for instance the TRIO shim) can implement an arbitrary number of
* service domains. In these cases, each service domain is limited to
* a carefully restricted set of legal MMIO addresses if necessary to
* keep one application from corrupting another application's state.
*
* @section iorpc_conventions System Call Conventions
*
* The driver's open routine is responsible for allocating a new
* service domain for each hv_dev_open() call. By convention, the
* return value from open() should be the service domain number on
* success, or GXIO_ERR_NO_SVC_DOM if no more service domains are
* available.
*
* The implementations of hv_dev_pread() and hv_dev_pwrite() are
* responsible for validating the devhdl value passed up by the
* client. Since the device handle returned by hv_dev_open() should
* embed the positive service domain number, drivers should make sure
* that DRV_HDL2BITS(devhdl) is a legal service domain. If the client
* passes an illegal service domain number, the routine should return
* GXIO_ERR_INVAL_SVC_DOM. Once the service domain number has been
* validated, the driver can copy to/from the client buffer and call
* the dispatch_read() or dispatch_write() methods created by the RPC
* generator.
*
* The hv_dev_close() implementation should reset all service domain
* state and put the service domain back on a free list for
* reallocation by a future application. In most cases, this will
* require executing a hardware reset or drain flow and denying any
* MMIO regions that were created for the service domain.
*
* @section iorpc_data Special Data Types
*
* The .idl file syntax allows the creation of syscalls with special
* parameters that require permission checks or translations as part
* of the system call path. Because of limitations in the code
* generator, APIs are generally limited to just one of these special
* parameters per system call, and they are sometimes required to be
* the first or last parameter to the call. Special parameters
* include:
*
* @subsection iorpc_mem_buffer MEM_BUFFER
*
* The MEM_BUFFER() datatype allows user space to "register" memory
* buffers with a device. Registering memory accomplishes two tasks:
* Linux keeps track of all buffers that might be modified by a
* hardware device, and the hardware device drivers bind registered
* buffers to particular hardware resources like ingress NotifRings.
* The MEM_BUFFER() idl syntax can take extra flags like ALIGN_64KB,
* ALIGN_SELF_SIZE, and FLAGS indicating that memory buffers must have
* certain alignment or that the user should be able to pass a "memory
* flags" word specifying attributes like nt_hint or IO cache pinning.
* The parser will accept multiple MEM_BUFFER() flags.
*
* Implementations must obey the following conventions when
* registering memory buffers via the iorpc flow. These rules are a
* result of the Linux driver implementation, which needs to keep
* track of how many times a particular page has been registered with
* the hardware so that it can release the page when all those
* registrations are cleared.
*
* - Memory registrations that refer to a resource which has already
* been bound must return GXIO_ERR_ALREADY_INIT. Thus, it is an
* error to register memory twice without resetting (i.e. closing) the
* resource in between. This convention keeps the Linux driver from
* having to track which particular devices a page is bound to.
*
* - At present, a memory registration is only cleared when the
* service domain is reset. In this case, the Linux driver simply
* closes the HV device file handle and then decrements the reference
* counts of all pages that were previously registered with the
* device.
*
* - In the future, we may add a mechanism for unregistering memory.
* One possible implementation would require that the user specify
* which buffer is currently registered. The HV would then verify
* that that page was actually the one currently mapped and return
* success or failure to Linux, which would then only decrement the
* page reference count if the addresses were mapped. Another scheme
* might allow Linux to pass a token to the HV to be returned when the
* resource is unmapped.
*
* @subsection iorpc_interrupt INTERRUPT
*
* The INTERRUPT .idl datatype allows the client to bind hardware
* interrupts to a particular combination of IPI parameters - CPU, IPI
* PL, and event bit number. This data is passed via a special
* datatype so that the Linux driver can validate the CPU and PL and
* the HV generic iorpc code can translate client CPUs to real CPUs.
*
* @subsection iorpc_pollfd_setup POLLFD_SETUP
*
* The POLLFD_SETUP .idl datatype allows the client to set up hardware
* interrupt bindings which are received by Linux but which are made
* visible to user processes as state transitions on a file descriptor;
* this allows user processes to use Linux primitives, such as poll(), to
* await particular hardware events. This data is passed via a special
* datatype so that the Linux driver may recognize the pollable file
* descriptor and translate it to a set of interrupt target information,
* and so that the HV generic iorpc code can translate client CPUs to real
* CPUs.
*
* @subsection iorpc_pollfd POLLFD
*
* The POLLFD .idl datatype allows manipulation of hardware interrupt
* bindings set up via the POLLFD_SETUP datatype; common operations are
* resetting the state of the requested interrupt events, and unbinding any
* bound interrupts. This data is passed via a special datatype so that
* the Linux driver may recognize the pollable file descriptor and
* translate it to an interrupt identifier previously supplied by the
* hypervisor as the result of an earlier pollfd_setup operation.
*
* @subsection iorpc_blob BLOB
*
* The BLOB .idl datatype allows the client to write an arbitrary
* length string of bytes up to the hypervisor driver. This can be
* useful for passing up large, arbitrarily structured data like
* classifier programs. The iorpc stack takes care of validating the
* buffer VA and CPA as the data passes up to the hypervisor. Unlike
* MEM_BUFFER(), the buffer is not registered - Linux does not bump
* page refcounts and the HV driver should not reuse the buffer once
* the system call is complete.
*
* @section iorpc_translation Translating User Space Calls
*
* The ::iorpc_offset structure describes the formatting of the offset
* that is passed to pread() or pwrite() as part of the generated RPC code.
* When the user calls up to Linux, the rpc code fills in all the fields of
* the offset, including a 16-bit opcode, a 16 bit format indicator, and 32
* bits of user-specified "sub-offset". The opcode indicates which syscall
* is being requested. The format indicates whether there is a "prefix
* struct" at the start of the memory buffer passed to pwrite(), and if so
* what data is in that prefix struct. These prefix structs are used to
* implement special datatypes like MEM_BUFFER() and INTERRUPT - we arrange
* to put data that needs translation and permission checks at the start of
* the buffer so that the Linux driver and generic portions of the HV iorpc
* code can easily access the data. The 32 bits of user-specified
* "sub-offset" are most useful for pread() calls where the user needs to
* also pass in a few bits indicating which register to read, etc.
*
* The Linux iorpc driver watches for system calls that contain prefix
* structs so that it can translate parameters and bump reference
* counts as appropriate. It does not (currently) have any knowledge
* of the per-device opcodes - it doesn't care what operation you're
* doing to mPIPE, so long as it can do all the generic book-keeping.
* The hv/iorpc.h header file defines all of the generic encoding bits
* needed to translate iorpc calls without knowing which particular
* opcode is being issued.
*
* @section iorpc_globals Global iorpc Calls
*
* Implementing mmap() required adding some special iorpc syscalls
* that are only called by the Linux driver, never by userspace.
* These include get_mmio_base() and check_mmio_offset(). These
* routines are described in globals.idl and must be included in every
* iorpc driver. By providing these routines in every driver, Linux's
* mmap implementation can easily get the PTE bits it needs and
* validate the PA offset without needing to know the per-device
* opcodes to perform those tasks.
*
* @section iorpc_kernel Supporting gxio APIs in the Kernel
*
* The iorpc code generator also supports generation of kernel code
* implementing the gxio APIs. This capability is currently used by
* the mPIPE network driver, and will likely be used by the TRIO root
* complex and endpoint drivers and perhaps an in-kernel crypto
* driver. Each driver that wants to instantiate iorpc calls in the
* kernel needs to generate a kernel version of the generate rpc code
* and (probably) copy any related gxio source files into the kernel.
* The mPIPE driver provides a good example of this pattern.
*/
#ifdef __KERNEL__
#include <linux/stddef.h>
#else
#include <stddef.h>
#endif
#if defined(__HV__)
#include <hv/hypervisor.h>
#elif defined(__KERNEL__)
#include <hv/hypervisor.h>
#include <linux/types.h>
#else
#include <stdint.h>
#endif
/** Code indicating translation services required within the RPC path.
* These indicate whether there is a translatable struct at the start
* of the RPC buffer and what information that struct contains.
*/
enum iorpc_format_e
{
/** No translation required, no prefix struct. */
IORPC_FORMAT_NONE,
/** No translation required, no prefix struct, no access to this
* operation from user space. */
IORPC_FORMAT_NONE_NOUSER,
/** Prefix struct contains user VA and size. */
IORPC_FORMAT_USER_MEM,
/** Prefix struct contains CPA, size, and homing bits. */
IORPC_FORMAT_KERNEL_MEM,
/** Prefix struct contains interrupt. */
IORPC_FORMAT_KERNEL_INTERRUPT,
/** Prefix struct contains user-level interrupt. */
IORPC_FORMAT_USER_INTERRUPT,
/** Prefix struct contains pollfd_setup (interrupt information). */
IORPC_FORMAT_KERNEL_POLLFD_SETUP,
/** Prefix struct contains user-level pollfd_setup (file descriptor). */
IORPC_FORMAT_USER_POLLFD_SETUP,
/** Prefix struct contains pollfd (interrupt cookie). */
IORPC_FORMAT_KERNEL_POLLFD,
/** Prefix struct contains user-level pollfd (file descriptor). */
IORPC_FORMAT_USER_POLLFD,
};
/** Generate an opcode given format and code. */
#define IORPC_OPCODE(FORMAT, CODE) (((FORMAT) << 16) | (CODE))
/** The offset passed through the read() and write() system calls
combines an opcode with 32 bits of user-specified offset. */
union iorpc_offset
{
#ifndef __BIG_ENDIAN__
uint64_t offset; /**< All bits. */
struct
{
uint16_t code; /**< RPC code. */
uint16_t format; /**< iorpc_format_e */
uint32_t sub_offset; /**< caller-specified offset. */
};
uint32_t opcode; /**< Opcode combines code & format. */
#else
uint64_t offset; /**< All bits. */
struct
{
uint32_t sub_offset; /**< caller-specified offset. */
uint16_t format; /**< iorpc_format_e */
uint16_t code; /**< RPC code. */
};
struct
{
uint32_t padding;
uint32_t opcode; /**< Opcode combines code & format. */
};
#endif
};
/** Homing and cache hinting bits that can be used by IO devices. */
struct iorpc_mem_attr
{
unsigned int lotar_x:4; /**< lotar X bits (or Gx page_mask). */
unsigned int lotar_y:4; /**< lotar Y bits (or Gx page_offset). */
unsigned int hfh:1; /**< Uses hash-for-home. */
unsigned int nt_hint:1; /**< Non-temporal hint. */
unsigned int io_pin:1; /**< Only fill 'IO' cache ways. */
};
/** Set the nt_hint bit. */
#define IORPC_MEM_BUFFER_FLAG_NT_HINT (1 << 0)
/** Set the IO pin bit. */
#define IORPC_MEM_BUFFER_FLAG_IO_PIN (1 << 1)
/** A structure used to describe memory registration. Different
protection levels describe memory differently, so this union
contains all the different possible descriptions. As a request
moves up the call chain, each layer translates from one
description format to the next. In particular, the Linux iorpc
driver translates user VAs into CPAs and homing parameters. */
union iorpc_mem_buffer
{
struct
{
uint64_t va; /**< User virtual address. */
uint64_t size; /**< Buffer size. */
unsigned int flags; /**< nt_hint, IO pin. */
}
user; /**< Buffer as described by user apps. */
struct
{
unsigned long long cpa; /**< Client physical address. */
#if defined(__KERNEL__) || defined(__HV__)
size_t size; /**< Buffer size. */
HV_PTE pte; /**< PTE describing memory homing. */
#else
uint64_t size;
uint64_t pte;
#endif
unsigned int flags; /**< nt_hint, IO pin. */
}
kernel; /**< Buffer as described by kernel. */
struct
{
unsigned long long pa; /**< Physical address. */
size_t size; /**< Buffer size. */
struct iorpc_mem_attr attr; /**< Homing and locality hint bits. */
}
hv; /**< Buffer parameters for HV driver. */
};
/** A structure used to describe interrupts. The format differs slightly
* for user and kernel interrupts. As with the mem_buffer_t, translation
* between the formats is done at each level. */
union iorpc_interrupt
{
struct
{
int cpu; /**< CPU. */
int event; /**< evt_num */
}
user; /**< Interrupt as described by user applications. */
struct
{
int x; /**< X coord. */
int y; /**< Y coord. */
int ipi; /**< int_num */
int event; /**< evt_num */
}
kernel; /**< Interrupt as described by the kernel. */
};
/** A structure used to describe interrupts used with poll(). The format
* differs significantly for requests from user to kernel, and kernel to
* hypervisor. As with the mem_buffer_t, translation between the formats
* is done at each level. */
union iorpc_pollfd_setup
{
struct
{
int fd; /**< Pollable file descriptor. */
}
user; /**< pollfd_setup as described by user applications. */
struct
{
int x; /**< X coord. */
int y; /**< Y coord. */
int ipi; /**< int_num */
int event; /**< evt_num */
}
kernel; /**< pollfd_setup as described by the kernel. */
};
/** A structure used to describe previously set up interrupts used with
* poll(). The format differs significantly for requests from user to
* kernel, and kernel to hypervisor. As with the mem_buffer_t, translation
* between the formats is done at each level. */
union iorpc_pollfd
{
struct
{
int fd; /**< Pollable file descriptor. */
}
user; /**< pollfd as described by user applications. */
struct
{
int cookie; /**< hv cookie returned by the pollfd_setup operation. */
}
kernel; /**< pollfd as described by the kernel. */
};
/** The various iorpc devices use error codes from -1100 to -1299.
*
* This range is distinct from netio (-700 to -799), the hypervisor
* (-800 to -899), tilepci (-900 to -999), ilib (-1000 to -1099),
* gxcr (-1300 to -1399) and gxpci (-1400 to -1499).
*/
enum gxio_err_e {
/** Largest iorpc error number. */
GXIO_ERR_MAX = -1101,
/********************************************************/
/* Generic Error Codes */
/********************************************************/
/** Bad RPC opcode - possible version incompatibility. */
GXIO_ERR_OPCODE = -1101,
/** Invalid parameter. */
GXIO_ERR_INVAL = -1102,
/** Memory buffer did not meet alignment requirements. */
GXIO_ERR_ALIGNMENT = -1103,
/** Memory buffers must be coherent and cacheable. */
GXIO_ERR_COHERENCE = -1104,
/** Resource already initialized. */
GXIO_ERR_ALREADY_INIT = -1105,
/** No service domains available. */
GXIO_ERR_NO_SVC_DOM = -1106,
/** Illegal service domain number. */
GXIO_ERR_INVAL_SVC_DOM = -1107,
/** Illegal MMIO address. */
GXIO_ERR_MMIO_ADDRESS = -1108,
/** Illegal interrupt binding. */
GXIO_ERR_INTERRUPT = -1109,
/** Unreasonable client memory. */
GXIO_ERR_CLIENT_MEMORY = -1110,
/** No more IOTLB entries. */
GXIO_ERR_IOTLB_ENTRY = -1111,
/** Invalid memory size. */
GXIO_ERR_INVAL_MEMORY_SIZE = -1112,
/** Unsupported operation. */
GXIO_ERR_UNSUPPORTED_OP = -1113,
/** Insufficient DMA credits. */
GXIO_ERR_DMA_CREDITS = -1114,
/** Operation timed out. */
GXIO_ERR_TIMEOUT = -1115,
/** No such device or object. */
GXIO_ERR_NO_DEVICE = -1116,
/** Device or resource busy. */
GXIO_ERR_BUSY = -1117,
/** I/O error. */
GXIO_ERR_IO = -1118,
/** Permissions error. */
GXIO_ERR_PERM = -1119,
/********************************************************/
/* Test Device Error Codes */
/********************************************************/
/** Illegal register number. */
GXIO_TEST_ERR_REG_NUMBER = -1120,
/** Illegal buffer slot. */
GXIO_TEST_ERR_BUFFER_SLOT = -1121,
/********************************************************/
/* MPIPE Error Codes */
/********************************************************/
/** Invalid buffer size. */
GXIO_MPIPE_ERR_INVAL_BUFFER_SIZE = -1131,
/** Cannot allocate buffer stack. */
GXIO_MPIPE_ERR_NO_BUFFER_STACK = -1140,
/** Invalid buffer stack number. */
GXIO_MPIPE_ERR_BAD_BUFFER_STACK = -1141,
/** Cannot allocate NotifRing. */
GXIO_MPIPE_ERR_NO_NOTIF_RING = -1142,
/** Invalid NotifRing number. */
GXIO_MPIPE_ERR_BAD_NOTIF_RING = -1143,
/** Cannot allocate NotifGroup. */
GXIO_MPIPE_ERR_NO_NOTIF_GROUP = -1144,
/** Invalid NotifGroup number. */
GXIO_MPIPE_ERR_BAD_NOTIF_GROUP = -1145,
/** Cannot allocate bucket. */
GXIO_MPIPE_ERR_NO_BUCKET = -1146,
/** Invalid bucket number. */
GXIO_MPIPE_ERR_BAD_BUCKET = -1147,
/** Cannot allocate eDMA ring. */
GXIO_MPIPE_ERR_NO_EDMA_RING = -1148,
/** Invalid eDMA ring number. */
GXIO_MPIPE_ERR_BAD_EDMA_RING = -1149,
/** Invalid channel number. */
GXIO_MPIPE_ERR_BAD_CHANNEL = -1150,
/** Bad configuration. */
GXIO_MPIPE_ERR_BAD_CONFIG = -1151,
/** Empty iqueue. */
GXIO_MPIPE_ERR_IQUEUE_EMPTY = -1152,
/** Empty rules. */
GXIO_MPIPE_ERR_RULES_EMPTY = -1160,
/** Full rules. */
GXIO_MPIPE_ERR_RULES_FULL = -1161,
/** Corrupt rules. */
GXIO_MPIPE_ERR_RULES_CORRUPT = -1162,
/** Invalid rules. */
GXIO_MPIPE_ERR_RULES_INVALID = -1163,
/** Classifier is too big. */
GXIO_MPIPE_ERR_CLASSIFIER_TOO_BIG = -1170,
/** Classifier is too complex. */
GXIO_MPIPE_ERR_CLASSIFIER_TOO_COMPLEX = -1171,
/** Classifier has bad header. */
GXIO_MPIPE_ERR_CLASSIFIER_BAD_HEADER = -1172,
/** Classifier has bad contents. */
GXIO_MPIPE_ERR_CLASSIFIER_BAD_CONTENTS = -1173,
/** Classifier encountered invalid symbol. */
GXIO_MPIPE_ERR_CLASSIFIER_INVAL_SYMBOL = -1174,
/** Classifier encountered invalid bounds. */
GXIO_MPIPE_ERR_CLASSIFIER_INVAL_BOUNDS = -1175,
/** Classifier encountered invalid relocation. */
GXIO_MPIPE_ERR_CLASSIFIER_INVAL_RELOCATION = -1176,
/** Classifier encountered undefined symbol. */
GXIO_MPIPE_ERR_CLASSIFIER_UNDEF_SYMBOL = -1177,
/********************************************************/
/* TRIO Error Codes */
/********************************************************/
/** Cannot allocate memory map region. */
GXIO_TRIO_ERR_NO_MEMORY_MAP = -1180,
/** Invalid memory map region number. */
GXIO_TRIO_ERR_BAD_MEMORY_MAP = -1181,
/** Cannot allocate scatter queue. */
GXIO_TRIO_ERR_NO_SCATTER_QUEUE = -1182,
/** Invalid scatter queue number. */
GXIO_TRIO_ERR_BAD_SCATTER_QUEUE = -1183,
/** Cannot allocate push DMA ring. */
GXIO_TRIO_ERR_NO_PUSH_DMA_RING = -1184,
/** Invalid push DMA ring index. */
GXIO_TRIO_ERR_BAD_PUSH_DMA_RING = -1185,
/** Cannot allocate pull DMA ring. */
GXIO_TRIO_ERR_NO_PULL_DMA_RING = -1186,
/** Invalid pull DMA ring index. */
GXIO_TRIO_ERR_BAD_PULL_DMA_RING = -1187,
/** Cannot allocate PIO region. */
GXIO_TRIO_ERR_NO_PIO = -1188,
/** Invalid PIO region index. */
GXIO_TRIO_ERR_BAD_PIO = -1189,
/** Cannot allocate ASID. */
GXIO_TRIO_ERR_NO_ASID = -1190,
/** Invalid ASID. */
GXIO_TRIO_ERR_BAD_ASID = -1191,
/********************************************************/
/* MICA Error Codes */
/********************************************************/
/** No such accelerator type. */
GXIO_MICA_ERR_BAD_ACCEL_TYPE = -1220,
/** Cannot allocate context. */
GXIO_MICA_ERR_NO_CONTEXT = -1221,
/** PKA command queue is full, can't add another command. */
GXIO_MICA_ERR_PKA_CMD_QUEUE_FULL = -1222,
/** PKA result queue is empty, can't get a result from the queue. */
GXIO_MICA_ERR_PKA_RESULT_QUEUE_EMPTY = -1223,
/********************************************************/
/* GPIO Error Codes */
/********************************************************/
/** Pin not available. Either the physical pin does not exist, or
* it is reserved by the hypervisor for system usage. */
GXIO_GPIO_ERR_PIN_UNAVAILABLE = -1240,
/** Pin busy. The pin exists, and is available for use via GXIO, but
* it has been attached by some other process or driver. */
GXIO_GPIO_ERR_PIN_BUSY = -1241,
/** Cannot access unattached pin. One or more of the pins being
* manipulated by this call are not attached to the requesting
* context. */
GXIO_GPIO_ERR_PIN_UNATTACHED = -1242,
/** Invalid I/O mode for pin. The wiring of the pin in the system
* is such that the I/O mode or electrical control parameters
* requested could cause damage. */
GXIO_GPIO_ERR_PIN_INVALID_MODE = -1243,
/** Smallest iorpc error number. */
GXIO_ERR_MIN = -1299
};
#endif /* !_HV_IORPC_H_ */

View file

@ -0,0 +1,122 @@
/*
* Copyright 2010 Tilera Corporation. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation, version 2.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for
* more details.
*/
/**
* Error codes returned from NetIO routines.
*/
#ifndef __NETIO_ERRORS_H__
#define __NETIO_ERRORS_H__
/**
* @addtogroup error
*
* @brief The error codes returned by NetIO functions.
*
* NetIO functions return 0 (defined as ::NETIO_NO_ERROR) on success, and
* a negative value if an error occurs.
*
* In cases where a NetIO function failed due to a error reported by
* system libraries, the error code will be the negation of the
* system errno at the time of failure. The @ref netio_strerror()
* function will deliver error strings for both NetIO and system error
* codes.
*
* @{
*/
/** The set of all NetIO errors. */
typedef enum
{
/** Operation successfully completed. */
NETIO_NO_ERROR = 0,
/** A packet was successfully retrieved from an input queue. */
NETIO_PKT = 0,
/** Largest NetIO error number. */
NETIO_ERR_MAX = -701,
/** The tile is not registered with the IPP. */
NETIO_NOT_REGISTERED = -701,
/** No packet was available to retrieve from the input queue. */
NETIO_NOPKT = -702,
/** The requested function is not implemented. */
NETIO_NOT_IMPLEMENTED = -703,
/** On a registration operation, the target queue already has the maximum
* number of tiles registered for it, and no more may be added. On a
* packet send operation, the output queue is full and nothing more can
* be queued until some of the queued packets are actually transmitted. */
NETIO_QUEUE_FULL = -704,
/** The calling process or thread is not bound to exactly one CPU. */
NETIO_BAD_AFFINITY = -705,
/** Cannot allocate memory on requested controllers. */
NETIO_CANNOT_HOME = -706,
/** On a registration operation, the IPP specified is not configured
* to support the options requested; for instance, the application
* wants a specific type of tagged headers which the configured IPP
* doesn't support. Or, the supplied configuration information is
* not self-consistent, or is out of range; for instance, specifying
* both NETIO_RECV and NETIO_NO_RECV, or asking for more than
* NETIO_MAX_SEND_BUFFERS to be preallocated. On a VLAN or bucket
* configure operation, the number of items, or the base item, was
* out of range.
*/
NETIO_BAD_CONFIG = -707,
/** Too many tiles have registered to transmit packets. */
NETIO_TOOMANY_XMIT = -708,
/** Packet transmission was attempted on a queue which was registered
with transmit disabled. */
NETIO_UNREG_XMIT = -709,
/** This tile is already registered with the IPP. */
NETIO_ALREADY_REGISTERED = -710,
/** The Ethernet link is down. The application should try again later. */
NETIO_LINK_DOWN = -711,
/** An invalid memory buffer has been specified. This may be an unmapped
* virtual address, or one which does not meet alignment requirements.
* For netio_input_register(), this error may be returned when multiple
* processes specify different memory regions to be used for NetIO
* buffers. That can happen if these processes specify explicit memory
* regions with the ::NETIO_FIXED_BUFFER_VA flag, or if tmc_cmem_init()
* has not been called by a common ancestor of the processes.
*/
NETIO_FAULT = -712,
/** Cannot combine user-managed shared memory and cache coherence. */
NETIO_BAD_CACHE_CONFIG = -713,
/** Smallest NetIO error number. */
NETIO_ERR_MIN = -713,
#ifndef __DOXYGEN__
/** Used internally to mean that no response is needed; never returned to
* an application. */
NETIO_NO_RESPONSE = 1
#endif
} netio_error_t;
/** @} */
#endif /* __NETIO_ERRORS_H__ */

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,42 @@
/*
* Copyright 2010 Tilera Corporation. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation, version 2.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for
* more details.
*/
/**
* @file syscall.h
* Indices for the hypervisor system calls that are intended to be called
* directly, rather than only through hypervisor-generated "glue" code.
*/
#ifndef _SYS_HV_INCLUDE_SYSCALL_PUBLIC_H
#define _SYS_HV_INCLUDE_SYSCALL_PUBLIC_H
/** Fast syscall flag bit location. When this bit is set, the hypervisor
* handles the syscall specially.
*/
#define HV_SYS_FAST_SHIFT 14
/** Fast syscall flag bit mask. */
#define HV_SYS_FAST_MASK (1 << HV_SYS_FAST_SHIFT)
/** Bit location for flagging fast syscalls that can be called from PL0. */
#define HV_SYS_FAST_PLO_SHIFT 13
/** Fast syscall allowing PL0 bit mask. */
#define HV_SYS_FAST_PL0_MASK (1 << HV_SYS_FAST_PLO_SHIFT)
/** Perform an MF that waits for all victims to reach DRAM. */
#define HV_SYS_fence_incoherent (51 | HV_SYS_FAST_MASK \
| HV_SYS_FAST_PL0_MASK)
#endif /* !_SYS_HV_INCLUDE_SYSCALL_PUBLIC_H */