Fixed MTP to work with TWRP

This commit is contained in:
awab228 2018-06-19 23:16:04 +02:00
commit f6dfaef42e
50820 changed files with 20846062 additions and 0 deletions

View file

@ -0,0 +1,13 @@
menuconfig THUNDERBOLT
tristate "Thunderbolt support for Apple devices"
depends on PCI
select CRC32
help
Cactus Ridge Thunderbolt Controller driver
This driver is required if you want to hotplug Thunderbolt devices on
Apple hardware.
Device chaining is currently not supported.
To compile this driver a module, choose M here. The module will be
called thunderbolt.

View file

@ -0,0 +1,3 @@
obj-${CONFIG_THUNDERBOLT} := thunderbolt.o
thunderbolt-objs := nhi.o ctl.o tb.o switch.o cap.o path.o tunnel_pci.o eeprom.o

116
drivers/thunderbolt/cap.c Normal file
View file

@ -0,0 +1,116 @@
/*
* Thunderbolt Cactus Ridge driver - capabilities lookup
*
* Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
*/
#include <linux/slab.h>
#include <linux/errno.h>
#include "tb.h"
struct tb_cap_any {
union {
struct tb_cap_basic basic;
struct tb_cap_extended_short extended_short;
struct tb_cap_extended_long extended_long;
};
} __packed;
static bool tb_cap_is_basic(struct tb_cap_any *cap)
{
/* basic.cap is u8. This checks only the lower 8 bit of cap. */
return cap->basic.cap != 5;
}
static bool tb_cap_is_long(struct tb_cap_any *cap)
{
return !tb_cap_is_basic(cap)
&& cap->extended_short.next == 0
&& cap->extended_short.length == 0;
}
static enum tb_cap tb_cap(struct tb_cap_any *cap)
{
if (tb_cap_is_basic(cap))
return cap->basic.cap;
else
/* extended_short/long have cap at the same offset. */
return cap->extended_short.cap;
}
static u32 tb_cap_next(struct tb_cap_any *cap, u32 offset)
{
int next;
if (offset == 1) {
/*
* The first pointer is part of the switch header and always
* a simple pointer.
*/
next = cap->basic.next;
} else {
/*
* Somehow Intel decided to use 3 different types of capability
* headers. It is not like anyone could have predicted that
* single byte offsets are not enough...
*/
if (tb_cap_is_basic(cap))
next = cap->basic.next;
else if (!tb_cap_is_long(cap))
next = cap->extended_short.next;
else
next = cap->extended_long.next;
}
/*
* "Hey, we could terminate some capability lists with a null offset
* and others with a pointer to the last element." - "Great idea!"
*/
if (next == offset)
return 0;
return next;
}
/**
* tb_find_cap() - find a capability
*
* Return: Returns a positive offset if the capability was found and 0 if not.
* Returns an error code on failure.
*/
int tb_find_cap(struct tb_port *port, enum tb_cfg_space space, enum tb_cap cap)
{
u32 offset = 1;
struct tb_cap_any header;
int res;
int retries = 10;
while (retries--) {
res = tb_port_read(port, &header, space, offset, 1);
if (res) {
/* Intel needs some help with linked lists. */
if (space == TB_CFG_PORT && offset == 0xa
&& port->config.type == TB_TYPE_DP_HDMI_OUT) {
offset = 0x39;
continue;
}
return res;
}
if (offset != 1) {
if (tb_cap(&header) == cap)
return offset;
if (tb_cap_is_long(&header)) {
/* tb_cap_extended_long is 2 dwords */
res = tb_port_read(port, &header, space,
offset, 2);
if (res)
return res;
}
}
offset = tb_cap_next(&header, offset);
if (!offset)
return 0;
}
tb_port_WARN(port,
"run out of retries while looking for cap %#x in config space %d, last offset: %#x\n",
cap, space, offset);
return -EIO;
}

731
drivers/thunderbolt/ctl.c Normal file
View file

@ -0,0 +1,731 @@
/*
* Thunderbolt Cactus Ridge driver - control channel and configuration commands
*
* Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
*/
#include <linux/crc32.h>
#include <linux/slab.h>
#include <linux/pci.h>
#include <linux/dmapool.h>
#include <linux/workqueue.h>
#include <linux/kfifo.h>
#include "ctl.h"
struct ctl_pkg {
struct tb_ctl *ctl;
void *buffer;
struct ring_frame frame;
};
#define TB_CTL_RX_PKG_COUNT 10
/**
* struct tb_cfg - thunderbolt control channel
*/
struct tb_ctl {
struct tb_nhi *nhi;
struct tb_ring *tx;
struct tb_ring *rx;
struct dma_pool *frame_pool;
struct ctl_pkg *rx_packets[TB_CTL_RX_PKG_COUNT];
DECLARE_KFIFO(response_fifo, struct ctl_pkg*, 16);
struct completion response_ready;
hotplug_cb callback;
void *callback_data;
};
#define tb_ctl_WARN(ctl, format, arg...) \
dev_WARN(&(ctl)->nhi->pdev->dev, format, ## arg)
#define tb_ctl_err(ctl, format, arg...) \
dev_err(&(ctl)->nhi->pdev->dev, format, ## arg)
#define tb_ctl_warn(ctl, format, arg...) \
dev_warn(&(ctl)->nhi->pdev->dev, format, ## arg)
#define tb_ctl_info(ctl, format, arg...) \
dev_info(&(ctl)->nhi->pdev->dev, format, ## arg)
/* configuration packets definitions */
enum tb_cfg_pkg_type {
TB_CFG_PKG_READ = 1,
TB_CFG_PKG_WRITE = 2,
TB_CFG_PKG_ERROR = 3,
TB_CFG_PKG_NOTIFY_ACK = 4,
TB_CFG_PKG_EVENT = 5,
TB_CFG_PKG_XDOMAIN_REQ = 6,
TB_CFG_PKG_XDOMAIN_RESP = 7,
TB_CFG_PKG_OVERRIDE = 8,
TB_CFG_PKG_RESET = 9,
TB_CFG_PKG_PREPARE_TO_SLEEP = 0xd,
};
/* common header */
struct tb_cfg_header {
u32 route_hi:22;
u32 unknown:10; /* highest order bit is set on replies */
u32 route_lo;
} __packed;
/* additional header for read/write packets */
struct tb_cfg_address {
u32 offset:13; /* in dwords */
u32 length:6; /* in dwords */
u32 port:6;
enum tb_cfg_space space:2;
u32 seq:2; /* sequence number */
u32 zero:3;
} __packed;
/* TB_CFG_PKG_READ, response for TB_CFG_PKG_WRITE */
struct cfg_read_pkg {
struct tb_cfg_header header;
struct tb_cfg_address addr;
} __packed;
/* TB_CFG_PKG_WRITE, response for TB_CFG_PKG_READ */
struct cfg_write_pkg {
struct tb_cfg_header header;
struct tb_cfg_address addr;
u32 data[64]; /* maximum size, tb_cfg_address.length has 6 bits */
} __packed;
/* TB_CFG_PKG_ERROR */
struct cfg_error_pkg {
struct tb_cfg_header header;
enum tb_cfg_error error:4;
u32 zero1:4;
u32 port:6;
u32 zero2:2; /* Both should be zero, still they are different fields. */
u32 zero3:16;
} __packed;
/* TB_CFG_PKG_EVENT */
struct cfg_event_pkg {
struct tb_cfg_header header;
u32 port:6;
u32 zero:25;
bool unplug:1;
} __packed;
/* TB_CFG_PKG_RESET */
struct cfg_reset_pkg {
struct tb_cfg_header header;
} __packed;
/* TB_CFG_PKG_PREPARE_TO_SLEEP */
struct cfg_pts_pkg {
struct tb_cfg_header header;
u32 data;
} __packed;
/* utility functions */
static u64 get_route(struct tb_cfg_header header)
{
return (u64) header.route_hi << 32 | header.route_lo;
}
static struct tb_cfg_header make_header(u64 route)
{
struct tb_cfg_header header = {
.route_hi = route >> 32,
.route_lo = route,
};
/* check for overflow, route_hi is not 32 bits! */
WARN_ON(get_route(header) != route);
return header;
}
static int check_header(struct ctl_pkg *pkg, u32 len, enum tb_cfg_pkg_type type,
u64 route)
{
struct tb_cfg_header *header = pkg->buffer;
/* check frame, TODO: frame flags */
if (WARN(len != pkg->frame.size,
"wrong framesize (expected %#x, got %#x)\n",
len, pkg->frame.size))
return -EIO;
if (WARN(type != pkg->frame.eof, "wrong eof (expected %#x, got %#x)\n",
type, pkg->frame.eof))
return -EIO;
if (WARN(pkg->frame.sof, "wrong sof (expected 0x0, got %#x)\n",
pkg->frame.sof))
return -EIO;
/* check header */
if (WARN(header->unknown != 1 << 9,
"header->unknown is %#x\n", header->unknown))
return -EIO;
if (WARN(route != get_route(*header),
"wrong route (expected %llx, got %llx)",
route, get_route(*header)))
return -EIO;
return 0;
}
static int check_config_address(struct tb_cfg_address addr,
enum tb_cfg_space space, u32 offset,
u32 length)
{
if (WARN(addr.zero, "addr.zero is %#x\n", addr.zero))
return -EIO;
if (WARN(space != addr.space, "wrong space (expected %x, got %x\n)",
space, addr.space))
return -EIO;
if (WARN(offset != addr.offset, "wrong offset (expected %x, got %x\n)",
offset, addr.offset))
return -EIO;
if (WARN(length != addr.length, "wrong space (expected %x, got %x\n)",
length, addr.length))
return -EIO;
if (WARN(addr.seq, "addr.seq is %#x\n", addr.seq))
return -EIO;
/*
* We cannot check addr->port as it is set to the upstream port of the
* sender.
*/
return 0;
}
static struct tb_cfg_result decode_error(struct ctl_pkg *response)
{
struct cfg_error_pkg *pkg = response->buffer;
struct tb_cfg_result res = { 0 };
res.response_route = get_route(pkg->header);
res.response_port = 0;
res.err = check_header(response, sizeof(*pkg), TB_CFG_PKG_ERROR,
get_route(pkg->header));
if (res.err)
return res;
WARN(pkg->zero1, "pkg->zero1 is %#x\n", pkg->zero1);
WARN(pkg->zero2, "pkg->zero1 is %#x\n", pkg->zero1);
WARN(pkg->zero3, "pkg->zero1 is %#x\n", pkg->zero1);
res.err = 1;
res.tb_error = pkg->error;
res.response_port = pkg->port;
return res;
}
static struct tb_cfg_result parse_header(struct ctl_pkg *pkg, u32 len,
enum tb_cfg_pkg_type type, u64 route)
{
struct tb_cfg_header *header = pkg->buffer;
struct tb_cfg_result res = { 0 };
if (pkg->frame.eof == TB_CFG_PKG_ERROR)
return decode_error(pkg);
res.response_port = 0; /* will be updated later for cfg_read/write */
res.response_route = get_route(*header);
res.err = check_header(pkg, len, type, route);
return res;
}
static void tb_cfg_print_error(struct tb_ctl *ctl,
const struct tb_cfg_result *res)
{
WARN_ON(res->err != 1);
switch (res->tb_error) {
case TB_CFG_ERROR_PORT_NOT_CONNECTED:
/* Port is not connected. This can happen during surprise
* removal. Do not warn. */
return;
case TB_CFG_ERROR_INVALID_CONFIG_SPACE:
/*
* Invalid cfg_space/offset/length combination in
* cfg_read/cfg_write.
*/
tb_ctl_WARN(ctl,
"CFG_ERROR(%llx:%x): Invalid config space of offset\n",
res->response_route, res->response_port);
return;
case TB_CFG_ERROR_NO_SUCH_PORT:
/*
* - The route contains a non-existent port.
* - The route contains a non-PHY port (e.g. PCIe).
* - The port in cfg_read/cfg_write does not exist.
*/
tb_ctl_WARN(ctl, "CFG_ERROR(%llx:%x): Invalid port\n",
res->response_route, res->response_port);
return;
case TB_CFG_ERROR_LOOP:
tb_ctl_WARN(ctl, "CFG_ERROR(%llx:%x): Route contains a loop\n",
res->response_route, res->response_port);
return;
default:
/* 5,6,7,9 and 11 are also valid error codes */
tb_ctl_WARN(ctl, "CFG_ERROR(%llx:%x): Unknown error\n",
res->response_route, res->response_port);
return;
}
}
static void cpu_to_be32_array(__be32 *dst, u32 *src, size_t len)
{
int i;
for (i = 0; i < len; i++)
dst[i] = cpu_to_be32(src[i]);
}
static void be32_to_cpu_array(u32 *dst, __be32 *src, size_t len)
{
int i;
for (i = 0; i < len; i++)
dst[i] = be32_to_cpu(src[i]);
}
static __be32 tb_crc(void *data, size_t len)
{
return cpu_to_be32(~__crc32c_le(~0, data, len));
}
static void tb_ctl_pkg_free(struct ctl_pkg *pkg)
{
if (pkg) {
dma_pool_free(pkg->ctl->frame_pool,
pkg->buffer, pkg->frame.buffer_phy);
kfree(pkg);
}
}
static struct ctl_pkg *tb_ctl_pkg_alloc(struct tb_ctl *ctl)
{
struct ctl_pkg *pkg = kzalloc(sizeof(*pkg), GFP_KERNEL);
if (!pkg)
return NULL;
pkg->ctl = ctl;
pkg->buffer = dma_pool_alloc(ctl->frame_pool, GFP_KERNEL,
&pkg->frame.buffer_phy);
if (!pkg->buffer) {
kfree(pkg);
return NULL;
}
return pkg;
}
/* RX/TX handling */
static void tb_ctl_tx_callback(struct tb_ring *ring, struct ring_frame *frame,
bool canceled)
{
struct ctl_pkg *pkg = container_of(frame, typeof(*pkg), frame);
tb_ctl_pkg_free(pkg);
}
/**
* tb_cfg_tx() - transmit a packet on the control channel
*
* len must be a multiple of four.
*
* Return: Returns 0 on success or an error code on failure.
*/
static int tb_ctl_tx(struct tb_ctl *ctl, void *data, size_t len,
enum tb_cfg_pkg_type type)
{
int res;
struct ctl_pkg *pkg;
if (len % 4 != 0) { /* required for le->be conversion */
tb_ctl_WARN(ctl, "TX: invalid size: %zu\n", len);
return -EINVAL;
}
if (len > TB_FRAME_SIZE - 4) { /* checksum is 4 bytes */
tb_ctl_WARN(ctl, "TX: packet too large: %zu/%d\n",
len, TB_FRAME_SIZE - 4);
return -EINVAL;
}
pkg = tb_ctl_pkg_alloc(ctl);
if (!pkg)
return -ENOMEM;
pkg->frame.callback = tb_ctl_tx_callback;
pkg->frame.size = len + 4;
pkg->frame.sof = type;
pkg->frame.eof = type;
cpu_to_be32_array(pkg->buffer, data, len / 4);
*(__be32 *) (pkg->buffer + len) = tb_crc(pkg->buffer, len);
res = ring_tx(ctl->tx, &pkg->frame);
if (res) /* ring is stopped */
tb_ctl_pkg_free(pkg);
return res;
}
/**
* tb_ctl_handle_plug_event() - acknowledge a plug event, invoke ctl->callback
*/
static void tb_ctl_handle_plug_event(struct tb_ctl *ctl,
struct ctl_pkg *response)
{
struct cfg_event_pkg *pkg = response->buffer;
u64 route = get_route(pkg->header);
if (check_header(response, sizeof(*pkg), TB_CFG_PKG_EVENT, route)) {
tb_ctl_warn(ctl, "malformed TB_CFG_PKG_EVENT\n");
return;
}
if (tb_cfg_error(ctl, route, pkg->port, TB_CFG_ERROR_ACK_PLUG_EVENT))
tb_ctl_warn(ctl, "could not ack plug event on %llx:%x\n",
route, pkg->port);
WARN(pkg->zero, "pkg->zero is %#x\n", pkg->zero);
ctl->callback(ctl->callback_data, route, pkg->port, pkg->unplug);
}
static void tb_ctl_rx_submit(struct ctl_pkg *pkg)
{
ring_rx(pkg->ctl->rx, &pkg->frame); /*
* We ignore failures during stop.
* All rx packets are referenced
* from ctl->rx_packets, so we do
* not loose them.
*/
}
static void tb_ctl_rx_callback(struct tb_ring *ring, struct ring_frame *frame,
bool canceled)
{
struct ctl_pkg *pkg = container_of(frame, typeof(*pkg), frame);
if (canceled)
return; /*
* ring is stopped, packet is referenced from
* ctl->rx_packets.
*/
if (frame->size < 4 || frame->size % 4 != 0) {
tb_ctl_err(pkg->ctl, "RX: invalid size %#x, dropping packet\n",
frame->size);
goto rx;
}
frame->size -= 4; /* remove checksum */
if (*(__be32 *) (pkg->buffer + frame->size)
!= tb_crc(pkg->buffer, frame->size)) {
tb_ctl_err(pkg->ctl,
"RX: checksum mismatch, dropping packet\n");
goto rx;
}
be32_to_cpu_array(pkg->buffer, pkg->buffer, frame->size / 4);
if (frame->eof == TB_CFG_PKG_EVENT) {
tb_ctl_handle_plug_event(pkg->ctl, pkg);
goto rx;
}
if (!kfifo_put(&pkg->ctl->response_fifo, pkg)) {
tb_ctl_err(pkg->ctl, "RX: fifo is full\n");
goto rx;
}
complete(&pkg->ctl->response_ready);
return;
rx:
tb_ctl_rx_submit(pkg);
}
/**
* tb_ctl_rx() - receive a packet from the control channel
*/
static struct tb_cfg_result tb_ctl_rx(struct tb_ctl *ctl, void *buffer,
size_t length, int timeout_msec,
u64 route, enum tb_cfg_pkg_type type)
{
struct tb_cfg_result res;
struct ctl_pkg *pkg;
if (!wait_for_completion_timeout(&ctl->response_ready,
msecs_to_jiffies(timeout_msec))) {
tb_ctl_WARN(ctl, "RX: timeout\n");
return (struct tb_cfg_result) { .err = -ETIMEDOUT };
}
if (!kfifo_get(&ctl->response_fifo, &pkg)) {
tb_ctl_WARN(ctl, "empty kfifo\n");
return (struct tb_cfg_result) { .err = -EIO };
}
res = parse_header(pkg, length, type, route);
if (!res.err)
memcpy(buffer, pkg->buffer, length);
tb_ctl_rx_submit(pkg);
return res;
}
/* public interface, alloc/start/stop/free */
/**
* tb_ctl_alloc() - allocate a control channel
*
* cb will be invoked once for every hot plug event.
*
* Return: Returns a pointer on success or NULL on failure.
*/
struct tb_ctl *tb_ctl_alloc(struct tb_nhi *nhi, hotplug_cb cb, void *cb_data)
{
int i;
struct tb_ctl *ctl = kzalloc(sizeof(*ctl), GFP_KERNEL);
if (!ctl)
return NULL;
ctl->nhi = nhi;
ctl->callback = cb;
ctl->callback_data = cb_data;
init_completion(&ctl->response_ready);
INIT_KFIFO(ctl->response_fifo);
ctl->frame_pool = dma_pool_create("thunderbolt_ctl", &nhi->pdev->dev,
TB_FRAME_SIZE, 4, 0);
if (!ctl->frame_pool)
goto err;
ctl->tx = ring_alloc_tx(nhi, 0, 10);
if (!ctl->tx)
goto err;
ctl->rx = ring_alloc_rx(nhi, 0, 10);
if (!ctl->rx)
goto err;
for (i = 0; i < TB_CTL_RX_PKG_COUNT; i++) {
ctl->rx_packets[i] = tb_ctl_pkg_alloc(ctl);
if (!ctl->rx_packets[i])
goto err;
ctl->rx_packets[i]->frame.callback = tb_ctl_rx_callback;
}
tb_ctl_info(ctl, "control channel created\n");
return ctl;
err:
tb_ctl_free(ctl);
return NULL;
}
/**
* tb_ctl_free() - free a control channel
*
* Must be called after tb_ctl_stop.
*
* Must NOT be called from ctl->callback.
*/
void tb_ctl_free(struct tb_ctl *ctl)
{
int i;
if (ctl->rx)
ring_free(ctl->rx);
if (ctl->tx)
ring_free(ctl->tx);
/* free RX packets */
for (i = 0; i < TB_CTL_RX_PKG_COUNT; i++)
tb_ctl_pkg_free(ctl->rx_packets[i]);
if (ctl->frame_pool)
dma_pool_destroy(ctl->frame_pool);
kfree(ctl);
}
/**
* tb_cfg_start() - start/resume the control channel
*/
void tb_ctl_start(struct tb_ctl *ctl)
{
int i;
tb_ctl_info(ctl, "control channel starting...\n");
ring_start(ctl->tx); /* is used to ack hotplug packets, start first */
ring_start(ctl->rx);
for (i = 0; i < TB_CTL_RX_PKG_COUNT; i++)
tb_ctl_rx_submit(ctl->rx_packets[i]);
}
/**
* control() - pause the control channel
*
* All invocations of ctl->callback will have finished after this method
* returns.
*
* Must NOT be called from ctl->callback.
*/
void tb_ctl_stop(struct tb_ctl *ctl)
{
ring_stop(ctl->rx);
ring_stop(ctl->tx);
if (!kfifo_is_empty(&ctl->response_fifo))
tb_ctl_WARN(ctl, "dangling response in response_fifo\n");
kfifo_reset(&ctl->response_fifo);
tb_ctl_info(ctl, "control channel stopped\n");
}
/* public interface, commands */
/**
* tb_cfg_error() - send error packet
*
* Return: Returns 0 on success or an error code on failure.
*/
int tb_cfg_error(struct tb_ctl *ctl, u64 route, u32 port,
enum tb_cfg_error error)
{
struct cfg_error_pkg pkg = {
.header = make_header(route),
.port = port,
.error = error,
};
tb_ctl_info(ctl, "resetting error on %llx:%x.\n", route, port);
return tb_ctl_tx(ctl, &pkg, sizeof(pkg), TB_CFG_PKG_ERROR);
}
/**
* tb_cfg_reset() - send a reset packet and wait for a response
*
* If the switch at route is incorrectly configured then we will not receive a
* reply (even though the switch will reset). The caller should check for
* -ETIMEDOUT and attempt to reconfigure the switch.
*/
struct tb_cfg_result tb_cfg_reset(struct tb_ctl *ctl, u64 route,
int timeout_msec)
{
int err;
struct cfg_reset_pkg request = { .header = make_header(route) };
struct tb_cfg_header reply;
err = tb_ctl_tx(ctl, &request, sizeof(request), TB_CFG_PKG_RESET);
if (err)
return (struct tb_cfg_result) { .err = err };
return tb_ctl_rx(ctl, &reply, sizeof(reply), timeout_msec, route,
TB_CFG_PKG_RESET);
}
/**
* tb_cfg_read() - read from config space into buffer
*
* Offset and length are in dwords.
*/
struct tb_cfg_result tb_cfg_read_raw(struct tb_ctl *ctl, void *buffer,
u64 route, u32 port, enum tb_cfg_space space,
u32 offset, u32 length, int timeout_msec)
{
struct tb_cfg_result res = { 0 };
struct cfg_read_pkg request = {
.header = make_header(route),
.addr = {
.port = port,
.space = space,
.offset = offset,
.length = length,
},
};
struct cfg_write_pkg reply;
res.err = tb_ctl_tx(ctl, &request, sizeof(request), TB_CFG_PKG_READ);
if (res.err)
return res;
res = tb_ctl_rx(ctl, &reply, 12 + 4 * length, timeout_msec, route,
TB_CFG_PKG_READ);
if (res.err)
return res;
res.response_port = reply.addr.port;
res.err = check_config_address(reply.addr, space, offset, length);
if (!res.err)
memcpy(buffer, &reply.data, 4 * length);
return res;
}
/**
* tb_cfg_write() - write from buffer into config space
*
* Offset and length are in dwords.
*/
struct tb_cfg_result tb_cfg_write_raw(struct tb_ctl *ctl, void *buffer,
u64 route, u32 port, enum tb_cfg_space space,
u32 offset, u32 length, int timeout_msec)
{
struct tb_cfg_result res = { 0 };
struct cfg_write_pkg request = {
.header = make_header(route),
.addr = {
.port = port,
.space = space,
.offset = offset,
.length = length,
},
};
struct cfg_read_pkg reply;
memcpy(&request.data, buffer, length * 4);
res.err = tb_ctl_tx(ctl, &request, 12 + 4 * length, TB_CFG_PKG_WRITE);
if (res.err)
return res;
res = tb_ctl_rx(ctl, &reply, sizeof(reply), timeout_msec, route,
TB_CFG_PKG_WRITE);
if (res.err)
return res;
res.response_port = reply.addr.port;
res.err = check_config_address(reply.addr, space, offset, length);
return res;
}
int tb_cfg_read(struct tb_ctl *ctl, void *buffer, u64 route, u32 port,
enum tb_cfg_space space, u32 offset, u32 length)
{
struct tb_cfg_result res = tb_cfg_read_raw(ctl, buffer, route, port,
space, offset, length, TB_CFG_DEFAULT_TIMEOUT);
if (res.err == 1) {
tb_cfg_print_error(ctl, &res);
return -EIO;
}
WARN(res.err, "tb_cfg_read: %d\n", res.err);
return res.err;
}
int tb_cfg_write(struct tb_ctl *ctl, void *buffer, u64 route, u32 port,
enum tb_cfg_space space, u32 offset, u32 length)
{
struct tb_cfg_result res = tb_cfg_write_raw(ctl, buffer, route, port,
space, offset, length, TB_CFG_DEFAULT_TIMEOUT);
if (res.err == 1) {
tb_cfg_print_error(ctl, &res);
return -EIO;
}
WARN(res.err, "tb_cfg_write: %d\n", res.err);
return res.err;
}
/**
* tb_cfg_get_upstream_port() - get upstream port number of switch at route
*
* Reads the first dword from the switches TB_CFG_SWITCH config area and
* returns the port number from which the reply originated.
*
* Return: Returns the upstream port number on success or an error code on
* failure.
*/
int tb_cfg_get_upstream_port(struct tb_ctl *ctl, u64 route)
{
u32 dummy;
struct tb_cfg_result res = tb_cfg_read_raw(ctl, &dummy, route, 0,
TB_CFG_SWITCH, 0, 1,
TB_CFG_DEFAULT_TIMEOUT);
if (res.err == 1)
return -EIO;
if (res.err)
return res.err;
return res.response_port;
}

75
drivers/thunderbolt/ctl.h Normal file
View file

@ -0,0 +1,75 @@
/*
* Thunderbolt Cactus Ridge driver - control channel and configuration commands
*
* Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
*/
#ifndef _TB_CFG
#define _TB_CFG
#include "nhi.h"
/* control channel */
struct tb_ctl;
typedef void (*hotplug_cb)(void *data, u64 route, u8 port, bool unplug);
struct tb_ctl *tb_ctl_alloc(struct tb_nhi *nhi, hotplug_cb cb, void *cb_data);
void tb_ctl_start(struct tb_ctl *ctl);
void tb_ctl_stop(struct tb_ctl *ctl);
void tb_ctl_free(struct tb_ctl *ctl);
/* configuration commands */
#define TB_CFG_DEFAULT_TIMEOUT 5000 /* msec */
enum tb_cfg_space {
TB_CFG_HOPS = 0,
TB_CFG_PORT = 1,
TB_CFG_SWITCH = 2,
TB_CFG_COUNTERS = 3,
};
enum tb_cfg_error {
TB_CFG_ERROR_PORT_NOT_CONNECTED = 0,
TB_CFG_ERROR_INVALID_CONFIG_SPACE = 2,
TB_CFG_ERROR_NO_SUCH_PORT = 4,
TB_CFG_ERROR_ACK_PLUG_EVENT = 7, /* send as reply to TB_CFG_PKG_EVENT */
TB_CFG_ERROR_LOOP = 8,
};
struct tb_cfg_result {
u64 response_route;
u32 response_port; /*
* If err = 1 then this is the port that send the
* error.
* If err = 0 and if this was a cfg_read/write then
* this is the the upstream port of the responding
* switch.
* Otherwise the field is set to zero.
*/
int err; /* negative errors, 0 for success, 1 for tb errors */
enum tb_cfg_error tb_error; /* valid if err == 1 */
};
int tb_cfg_error(struct tb_ctl *ctl, u64 route, u32 port,
enum tb_cfg_error error);
struct tb_cfg_result tb_cfg_reset(struct tb_ctl *ctl, u64 route,
int timeout_msec);
struct tb_cfg_result tb_cfg_read_raw(struct tb_ctl *ctl, void *buffer,
u64 route, u32 port,
enum tb_cfg_space space, u32 offset,
u32 length, int timeout_msec);
struct tb_cfg_result tb_cfg_write_raw(struct tb_ctl *ctl, void *buffer,
u64 route, u32 port,
enum tb_cfg_space space, u32 offset,
u32 length, int timeout_msec);
int tb_cfg_read(struct tb_ctl *ctl, void *buffer, u64 route, u32 port,
enum tb_cfg_space space, u32 offset, u32 length);
int tb_cfg_write(struct tb_ctl *ctl, void *buffer, u64 route, u32 port,
enum tb_cfg_space space, u32 offset, u32 length);
int tb_cfg_get_upstream_port(struct tb_ctl *ctl, u64 route);
#endif

View file

@ -0,0 +1,449 @@
/*
* Thunderbolt Cactus Ridge driver - eeprom access
*
* Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
*/
#include <linux/crc32.h>
#include <linux/slab.h>
#include "tb.h"
/**
* tb_eeprom_ctl_write() - write control word
*/
static int tb_eeprom_ctl_write(struct tb_switch *sw, struct tb_eeprom_ctl *ctl)
{
return tb_sw_write(sw, ctl, TB_CFG_SWITCH, sw->cap_plug_events + 4, 1);
}
/**
* tb_eeprom_ctl_write() - read control word
*/
static int tb_eeprom_ctl_read(struct tb_switch *sw, struct tb_eeprom_ctl *ctl)
{
return tb_sw_read(sw, ctl, TB_CFG_SWITCH, sw->cap_plug_events + 4, 1);
}
enum tb_eeprom_transfer {
TB_EEPROM_IN,
TB_EEPROM_OUT,
};
/**
* tb_eeprom_active - enable rom access
*
* WARNING: Always disable access after usage. Otherwise the controller will
* fail to reprobe.
*/
static int tb_eeprom_active(struct tb_switch *sw, bool enable)
{
struct tb_eeprom_ctl ctl;
int res = tb_eeprom_ctl_read(sw, &ctl);
if (res)
return res;
if (enable) {
ctl.access_high = 1;
res = tb_eeprom_ctl_write(sw, &ctl);
if (res)
return res;
ctl.access_low = 0;
return tb_eeprom_ctl_write(sw, &ctl);
} else {
ctl.access_low = 1;
res = tb_eeprom_ctl_write(sw, &ctl);
if (res)
return res;
ctl.access_high = 0;
return tb_eeprom_ctl_write(sw, &ctl);
}
}
/**
* tb_eeprom_transfer - transfer one bit
*
* If TB_EEPROM_IN is passed, then the bit can be retrieved from ctl->data_in.
* If TB_EEPROM_OUT is passed, then ctl->data_out will be written.
*/
static int tb_eeprom_transfer(struct tb_switch *sw, struct tb_eeprom_ctl *ctl,
enum tb_eeprom_transfer direction)
{
int res;
if (direction == TB_EEPROM_OUT) {
res = tb_eeprom_ctl_write(sw, ctl);
if (res)
return res;
}
ctl->clock = 1;
res = tb_eeprom_ctl_write(sw, ctl);
if (res)
return res;
if (direction == TB_EEPROM_IN) {
res = tb_eeprom_ctl_read(sw, ctl);
if (res)
return res;
}
ctl->clock = 0;
return tb_eeprom_ctl_write(sw, ctl);
}
/**
* tb_eeprom_out - write one byte to the bus
*/
static int tb_eeprom_out(struct tb_switch *sw, u8 val)
{
struct tb_eeprom_ctl ctl;
int i;
int res = tb_eeprom_ctl_read(sw, &ctl);
if (res)
return res;
for (i = 0; i < 8; i++) {
ctl.data_out = val & 0x80;
res = tb_eeprom_transfer(sw, &ctl, TB_EEPROM_OUT);
if (res)
return res;
val <<= 1;
}
return 0;
}
/**
* tb_eeprom_in - read one byte from the bus
*/
static int tb_eeprom_in(struct tb_switch *sw, u8 *val)
{
struct tb_eeprom_ctl ctl;
int i;
int res = tb_eeprom_ctl_read(sw, &ctl);
if (res)
return res;
*val = 0;
for (i = 0; i < 8; i++) {
*val <<= 1;
res = tb_eeprom_transfer(sw, &ctl, TB_EEPROM_IN);
if (res)
return res;
*val |= ctl.data_in;
}
return 0;
}
/**
* tb_eeprom_read_n - read count bytes from offset into val
*/
static int tb_eeprom_read_n(struct tb_switch *sw, u16 offset, u8 *val,
size_t count)
{
int i, res;
res = tb_eeprom_active(sw, true);
if (res)
return res;
res = tb_eeprom_out(sw, 3);
if (res)
return res;
res = tb_eeprom_out(sw, offset >> 8);
if (res)
return res;
res = tb_eeprom_out(sw, offset);
if (res)
return res;
for (i = 0; i < count; i++) {
res = tb_eeprom_in(sw, val + i);
if (res)
return res;
}
return tb_eeprom_active(sw, false);
}
static u8 tb_crc8(u8 *data, int len)
{
int i, j;
u8 val = 0xff;
for (i = 0; i < len; i++) {
val ^= data[i];
for (j = 0; j < 8; j++)
val = (val << 1) ^ ((val & 0x80) ? 7 : 0);
}
return val;
}
static u32 tb_crc32(void *data, size_t len)
{
return ~__crc32c_le(~0, data, len);
}
#define TB_DROM_DATA_START 13
struct tb_drom_header {
/* BYTE 0 */
u8 uid_crc8; /* checksum for uid */
/* BYTES 1-8 */
u64 uid;
/* BYTES 9-12 */
u32 data_crc32; /* checksum for data_len bytes starting at byte 13 */
/* BYTE 13 */
u8 device_rom_revision; /* should be <= 1 */
u16 data_len:10;
u8 __unknown1:6;
/* BYTES 16-21 */
u16 vendor_id;
u16 model_id;
u8 model_rev;
u8 eeprom_rev;
} __packed;
enum tb_drom_entry_type {
/* force unsigned to prevent "one-bit signed bitfield" warning */
TB_DROM_ENTRY_GENERIC = 0U,
TB_DROM_ENTRY_PORT,
};
struct tb_drom_entry_header {
u8 len;
u8 index:6;
bool port_disabled:1; /* only valid if type is TB_DROM_ENTRY_PORT */
enum tb_drom_entry_type type:1;
} __packed;
struct tb_drom_entry_port {
/* BYTES 0-1 */
struct tb_drom_entry_header header;
/* BYTE 2 */
u8 dual_link_port_rid:4;
u8 link_nr:1;
u8 unknown1:2;
bool has_dual_link_port:1;
/* BYTE 3 */
u8 dual_link_port_nr:6;
u8 unknown2:2;
/* BYTES 4 - 5 TODO decode */
u8 micro2:4;
u8 micro1:4;
u8 micro3;
/* BYTES 5-6, TODO: verify (find hardware that has these set) */
u8 peer_port_rid:4;
u8 unknown3:3;
bool has_peer_port:1;
u8 peer_port_nr:6;
u8 unknown4:2;
} __packed;
/**
* tb_eeprom_get_drom_offset - get drom offset within eeprom
*/
static int tb_eeprom_get_drom_offset(struct tb_switch *sw, u16 *offset)
{
struct tb_cap_plug_events cap;
int res;
if (!sw->cap_plug_events) {
tb_sw_warn(sw, "no TB_CAP_PLUG_EVENTS, cannot read eeprom\n");
return -ENOSYS;
}
res = tb_sw_read(sw, &cap, TB_CFG_SWITCH, sw->cap_plug_events,
sizeof(cap) / 4);
if (res)
return res;
if (!cap.eeprom_ctl.present || cap.eeprom_ctl.not_present) {
tb_sw_warn(sw, "no NVM\n");
return -ENOSYS;
}
if (cap.drom_offset > 0xffff) {
tb_sw_warn(sw, "drom offset is larger than 0xffff: %#x\n",
cap.drom_offset);
return -ENXIO;
}
*offset = cap.drom_offset;
return 0;
}
/**
* tb_drom_read_uid_only - read uid directly from drom
*
* Does not use the cached copy in sw->drom. Used during resume to check switch
* identity.
*/
int tb_drom_read_uid_only(struct tb_switch *sw, u64 *uid)
{
u8 data[9];
u16 drom_offset;
u8 crc;
int res = tb_eeprom_get_drom_offset(sw, &drom_offset);
if (res)
return res;
/* read uid */
res = tb_eeprom_read_n(sw, drom_offset, data, 9);
if (res)
return res;
crc = tb_crc8(data + 1, 8);
if (crc != data[0]) {
tb_sw_warn(sw, "uid crc8 missmatch (expected: %#x, got: %#x)\n",
data[0], crc);
return -EIO;
}
*uid = *(u64 *)(data+1);
return 0;
}
static void tb_drom_parse_port_entry(struct tb_port *port,
struct tb_drom_entry_port *entry)
{
port->link_nr = entry->link_nr;
if (entry->has_dual_link_port)
port->dual_link_port =
&port->sw->ports[entry->dual_link_port_nr];
}
static int tb_drom_parse_entry(struct tb_switch *sw,
struct tb_drom_entry_header *header)
{
struct tb_port *port;
int res;
enum tb_port_type type;
if (header->type != TB_DROM_ENTRY_PORT)
return 0;
port = &sw->ports[header->index];
port->disabled = header->port_disabled;
if (port->disabled)
return 0;
res = tb_port_read(port, &type, TB_CFG_PORT, 2, 1);
if (res)
return res;
type &= 0xffffff;
if (type == TB_TYPE_PORT) {
struct tb_drom_entry_port *entry = (void *) header;
if (header->len != sizeof(*entry)) {
tb_sw_warn(sw,
"port entry has size %#x (expected %#zx)\n",
header->len, sizeof(struct tb_drom_entry_port));
return -EIO;
}
tb_drom_parse_port_entry(port, entry);
}
return 0;
}
/**
* tb_drom_parse_entries - parse the linked list of drom entries
*
* Drom must have been copied to sw->drom.
*/
static int tb_drom_parse_entries(struct tb_switch *sw)
{
struct tb_drom_header *header = (void *) sw->drom;
u16 pos = sizeof(*header);
u16 drom_size = header->data_len + TB_DROM_DATA_START;
while (pos < drom_size) {
struct tb_drom_entry_header *entry = (void *) (sw->drom + pos);
if (pos + 1 == drom_size || pos + entry->len > drom_size
|| !entry->len) {
tb_sw_warn(sw, "drom buffer overrun, aborting\n");
return -EIO;
}
tb_drom_parse_entry(sw, entry);
pos += entry->len;
}
return 0;
}
/**
* tb_drom_read - copy drom to sw->drom and parse it
*/
int tb_drom_read(struct tb_switch *sw)
{
u16 drom_offset;
u16 size;
u32 crc;
struct tb_drom_header *header;
int res;
if (sw->drom)
return 0;
if (tb_route(sw) == 0) {
/*
* The root switch contains only a dummy drom (header only,
* no entries). Hardcode the configuration here.
*/
tb_drom_read_uid_only(sw, &sw->uid);
sw->ports[1].link_nr = 0;
sw->ports[2].link_nr = 1;
sw->ports[1].dual_link_port = &sw->ports[2];
sw->ports[2].dual_link_port = &sw->ports[1];
sw->ports[3].link_nr = 0;
sw->ports[4].link_nr = 1;
sw->ports[3].dual_link_port = &sw->ports[4];
sw->ports[4].dual_link_port = &sw->ports[3];
return 0;
}
res = tb_eeprom_get_drom_offset(sw, &drom_offset);
if (res)
return res;
res = tb_eeprom_read_n(sw, drom_offset + 14, (u8 *) &size, 2);
if (res)
return res;
size &= 0x3ff;
size += TB_DROM_DATA_START;
tb_sw_info(sw, "reading drom (length: %#x)\n", size);
if (size < sizeof(*header)) {
tb_sw_warn(sw, "drom too small, aborting\n");
return -EIO;
}
sw->drom = kzalloc(size, GFP_KERNEL);
if (!sw->drom)
return -ENOMEM;
res = tb_eeprom_read_n(sw, drom_offset, sw->drom, size);
if (res)
goto err;
header = (void *) sw->drom;
if (header->data_len + TB_DROM_DATA_START != size) {
tb_sw_warn(sw, "drom size mismatch, aborting\n");
goto err;
}
crc = tb_crc8((u8 *) &header->uid, 8);
if (crc != header->uid_crc8) {
tb_sw_warn(sw,
"drom uid crc8 mismatch (expected: %#x, got: %#x), aborting\n",
header->uid_crc8, crc);
goto err;
}
sw->uid = header->uid;
crc = tb_crc32(sw->drom + TB_DROM_DATA_START, header->data_len);
if (crc != header->data_crc32) {
tb_sw_warn(sw,
"drom data crc32 mismatch (expected: %#x, got: %#x), aborting\n",
header->data_crc32, crc);
goto err;
}
if (header->device_rom_revision > 1)
tb_sw_warn(sw, "drom device_rom_revision %#x unknown\n",
header->device_rom_revision);
return tb_drom_parse_entries(sw);
err:
kfree(sw->drom);
return -EIO;
}

675
drivers/thunderbolt/nhi.c Normal file
View file

@ -0,0 +1,675 @@
/*
* Thunderbolt Cactus Ridge driver - NHI driver
*
* The NHI (native host interface) is the pci device that allows us to send and
* receive frames from the thunderbolt bus.
*
* Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
*/
#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/pci.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/dmi.h>
#include "nhi.h"
#include "nhi_regs.h"
#include "tb.h"
#define RING_TYPE(ring) ((ring)->is_tx ? "TX ring" : "RX ring")
static int ring_interrupt_index(struct tb_ring *ring)
{
int bit = ring->hop;
if (!ring->is_tx)
bit += ring->nhi->hop_count;
return bit;
}
/**
* ring_interrupt_active() - activate/deactivate interrupts for a single ring
*
* ring->nhi->lock must be held.
*/
static void ring_interrupt_active(struct tb_ring *ring, bool active)
{
int reg = REG_RING_INTERRUPT_BASE + ring_interrupt_index(ring) / 32;
int bit = ring_interrupt_index(ring) & 31;
int mask = 1 << bit;
u32 old, new;
old = ioread32(ring->nhi->iobase + reg);
if (active)
new = old | mask;
else
new = old & ~mask;
dev_info(&ring->nhi->pdev->dev,
"%s interrupt at register %#x bit %d (%#x -> %#x)\n",
active ? "enabling" : "disabling", reg, bit, old, new);
if (new == old)
dev_WARN(&ring->nhi->pdev->dev,
"interrupt for %s %d is already %s\n",
RING_TYPE(ring), ring->hop,
active ? "enabled" : "disabled");
iowrite32(new, ring->nhi->iobase + reg);
}
/**
* nhi_disable_interrupts() - disable interrupts for all rings
*
* Use only during init and shutdown.
*/
static void nhi_disable_interrupts(struct tb_nhi *nhi)
{
int i = 0;
/* disable interrupts */
for (i = 0; i < RING_INTERRUPT_REG_COUNT(nhi); i++)
iowrite32(0, nhi->iobase + REG_RING_INTERRUPT_BASE + 4 * i);
/* clear interrupt status bits */
for (i = 0; i < RING_NOTIFY_REG_COUNT(nhi); i++)
ioread32(nhi->iobase + REG_RING_NOTIFY_BASE + 4 * i);
}
/* ring helper methods */
static void __iomem *ring_desc_base(struct tb_ring *ring)
{
void __iomem *io = ring->nhi->iobase;
io += ring->is_tx ? REG_TX_RING_BASE : REG_RX_RING_BASE;
io += ring->hop * 16;
return io;
}
static void __iomem *ring_options_base(struct tb_ring *ring)
{
void __iomem *io = ring->nhi->iobase;
io += ring->is_tx ? REG_TX_OPTIONS_BASE : REG_RX_OPTIONS_BASE;
io += ring->hop * 32;
return io;
}
static void ring_iowrite16desc(struct tb_ring *ring, u32 value, u32 offset)
{
iowrite16(value, ring_desc_base(ring) + offset);
}
static void ring_iowrite32desc(struct tb_ring *ring, u32 value, u32 offset)
{
iowrite32(value, ring_desc_base(ring) + offset);
}
static void ring_iowrite64desc(struct tb_ring *ring, u64 value, u32 offset)
{
iowrite32(value, ring_desc_base(ring) + offset);
iowrite32(value >> 32, ring_desc_base(ring) + offset + 4);
}
static void ring_iowrite32options(struct tb_ring *ring, u32 value, u32 offset)
{
iowrite32(value, ring_options_base(ring) + offset);
}
static bool ring_full(struct tb_ring *ring)
{
return ((ring->head + 1) % ring->size) == ring->tail;
}
static bool ring_empty(struct tb_ring *ring)
{
return ring->head == ring->tail;
}
/**
* ring_write_descriptors() - post frames from ring->queue to the controller
*
* ring->lock is held.
*/
static void ring_write_descriptors(struct tb_ring *ring)
{
struct ring_frame *frame, *n;
struct ring_desc *descriptor;
list_for_each_entry_safe(frame, n, &ring->queue, list) {
if (ring_full(ring))
break;
list_move_tail(&frame->list, &ring->in_flight);
descriptor = &ring->descriptors[ring->head];
descriptor->phys = frame->buffer_phy;
descriptor->time = 0;
descriptor->flags = RING_DESC_POSTED | RING_DESC_INTERRUPT;
if (ring->is_tx) {
descriptor->length = frame->size;
descriptor->eof = frame->eof;
descriptor->sof = frame->sof;
}
ring->head = (ring->head + 1) % ring->size;
ring_iowrite16desc(ring, ring->head, ring->is_tx ? 10 : 8);
}
}
/**
* ring_work() - progress completed frames
*
* If the ring is shutting down then all frames are marked as canceled and
* their callbacks are invoked.
*
* Otherwise we collect all completed frame from the ring buffer, write new
* frame to the ring buffer and invoke the callbacks for the completed frames.
*/
static void ring_work(struct work_struct *work)
{
struct tb_ring *ring = container_of(work, typeof(*ring), work);
struct ring_frame *frame;
bool canceled = false;
LIST_HEAD(done);
mutex_lock(&ring->lock);
if (!ring->running) {
/* Move all frames to done and mark them as canceled. */
list_splice_tail_init(&ring->in_flight, &done);
list_splice_tail_init(&ring->queue, &done);
canceled = true;
goto invoke_callback;
}
while (!ring_empty(ring)) {
if (!(ring->descriptors[ring->tail].flags
& RING_DESC_COMPLETED))
break;
frame = list_first_entry(&ring->in_flight, typeof(*frame),
list);
list_move_tail(&frame->list, &done);
if (!ring->is_tx) {
frame->size = ring->descriptors[ring->tail].length;
frame->eof = ring->descriptors[ring->tail].eof;
frame->sof = ring->descriptors[ring->tail].sof;
frame->flags = ring->descriptors[ring->tail].flags;
if (frame->sof != 0)
dev_WARN(&ring->nhi->pdev->dev,
"%s %d got unexpected SOF: %#x\n",
RING_TYPE(ring), ring->hop,
frame->sof);
/*
* known flags:
* raw not enabled, interupt not set: 0x2=0010
* raw enabled: 0xa=1010
* raw not enabled: 0xb=1011
* partial frame (>MAX_FRAME_SIZE): 0xe=1110
*/
if (frame->flags != 0xa)
dev_WARN(&ring->nhi->pdev->dev,
"%s %d got unexpected flags: %#x\n",
RING_TYPE(ring), ring->hop,
frame->flags);
}
ring->tail = (ring->tail + 1) % ring->size;
}
ring_write_descriptors(ring);
invoke_callback:
mutex_unlock(&ring->lock); /* allow callbacks to schedule new work */
while (!list_empty(&done)) {
frame = list_first_entry(&done, typeof(*frame), list);
/*
* The callback may reenqueue or delete frame.
* Do not hold on to it.
*/
list_del_init(&frame->list);
frame->callback(ring, frame, canceled);
}
}
int __ring_enqueue(struct tb_ring *ring, struct ring_frame *frame)
{
int ret = 0;
mutex_lock(&ring->lock);
if (ring->running) {
list_add_tail(&frame->list, &ring->queue);
ring_write_descriptors(ring);
} else {
ret = -ESHUTDOWN;
}
mutex_unlock(&ring->lock);
return ret;
}
static struct tb_ring *ring_alloc(struct tb_nhi *nhi, u32 hop, int size,
bool transmit)
{
struct tb_ring *ring = NULL;
dev_info(&nhi->pdev->dev, "allocating %s ring %d of size %d\n",
transmit ? "TX" : "RX", hop, size);
mutex_lock(&nhi->lock);
if (hop >= nhi->hop_count) {
dev_WARN(&nhi->pdev->dev, "invalid hop: %d\n", hop);
goto err;
}
if (transmit && nhi->tx_rings[hop]) {
dev_WARN(&nhi->pdev->dev, "TX hop %d already allocated\n", hop);
goto err;
} else if (!transmit && nhi->rx_rings[hop]) {
dev_WARN(&nhi->pdev->dev, "RX hop %d already allocated\n", hop);
goto err;
}
ring = kzalloc(sizeof(*ring), GFP_KERNEL);
if (!ring)
goto err;
mutex_init(&ring->lock);
INIT_LIST_HEAD(&ring->queue);
INIT_LIST_HEAD(&ring->in_flight);
INIT_WORK(&ring->work, ring_work);
ring->nhi = nhi;
ring->hop = hop;
ring->is_tx = transmit;
ring->size = size;
ring->head = 0;
ring->tail = 0;
ring->running = false;
ring->descriptors = dma_alloc_coherent(&ring->nhi->pdev->dev,
size * sizeof(*ring->descriptors),
&ring->descriptors_dma, GFP_KERNEL | __GFP_ZERO);
if (!ring->descriptors)
goto err;
if (transmit)
nhi->tx_rings[hop] = ring;
else
nhi->rx_rings[hop] = ring;
mutex_unlock(&nhi->lock);
return ring;
err:
if (ring)
mutex_destroy(&ring->lock);
kfree(ring);
mutex_unlock(&nhi->lock);
return NULL;
}
struct tb_ring *ring_alloc_tx(struct tb_nhi *nhi, int hop, int size)
{
return ring_alloc(nhi, hop, size, true);
}
struct tb_ring *ring_alloc_rx(struct tb_nhi *nhi, int hop, int size)
{
return ring_alloc(nhi, hop, size, false);
}
/**
* ring_start() - enable a ring
*
* Must not be invoked in parallel with ring_stop().
*/
void ring_start(struct tb_ring *ring)
{
mutex_lock(&ring->nhi->lock);
mutex_lock(&ring->lock);
if (ring->running) {
dev_WARN(&ring->nhi->pdev->dev, "ring already started\n");
goto err;
}
dev_info(&ring->nhi->pdev->dev, "starting %s %d\n",
RING_TYPE(ring), ring->hop);
ring_iowrite64desc(ring, ring->descriptors_dma, 0);
if (ring->is_tx) {
ring_iowrite32desc(ring, ring->size, 12);
ring_iowrite32options(ring, 0, 4); /* time releated ? */
ring_iowrite32options(ring,
RING_FLAG_ENABLE | RING_FLAG_RAW, 0);
} else {
ring_iowrite32desc(ring,
(TB_FRAME_SIZE << 16) | ring->size, 12);
ring_iowrite32options(ring, 0xffffffff, 4); /* SOF EOF mask */
ring_iowrite32options(ring,
RING_FLAG_ENABLE | RING_FLAG_RAW, 0);
}
ring_interrupt_active(ring, true);
ring->running = true;
err:
mutex_unlock(&ring->lock);
mutex_unlock(&ring->nhi->lock);
}
/**
* ring_stop() - shutdown a ring
*
* Must not be invoked from a callback.
*
* This method will disable the ring. Further calls to ring_tx/ring_rx will
* return -ESHUTDOWN until ring_stop has been called.
*
* All enqueued frames will be canceled and their callbacks will be executed
* with frame->canceled set to true (on the callback thread). This method
* returns only after all callback invocations have finished.
*/
void ring_stop(struct tb_ring *ring)
{
mutex_lock(&ring->nhi->lock);
mutex_lock(&ring->lock);
dev_info(&ring->nhi->pdev->dev, "stopping %s %d\n",
RING_TYPE(ring), ring->hop);
if (!ring->running) {
dev_WARN(&ring->nhi->pdev->dev, "%s %d already stopped\n",
RING_TYPE(ring), ring->hop);
goto err;
}
ring_interrupt_active(ring, false);
ring_iowrite32options(ring, 0, 0);
ring_iowrite64desc(ring, 0, 0);
ring_iowrite16desc(ring, 0, ring->is_tx ? 10 : 8);
ring_iowrite32desc(ring, 0, 12);
ring->head = 0;
ring->tail = 0;
ring->running = false;
err:
mutex_unlock(&ring->lock);
mutex_unlock(&ring->nhi->lock);
/*
* schedule ring->work to invoke callbacks on all remaining frames.
*/
schedule_work(&ring->work);
flush_work(&ring->work);
}
/*
* ring_free() - free ring
*
* When this method returns all invocations of ring->callback will have
* finished.
*
* Ring must be stopped.
*
* Must NOT be called from ring_frame->callback!
*/
void ring_free(struct tb_ring *ring)
{
mutex_lock(&ring->nhi->lock);
/*
* Dissociate the ring from the NHI. This also ensures that
* nhi_interrupt_work cannot reschedule ring->work.
*/
if (ring->is_tx)
ring->nhi->tx_rings[ring->hop] = NULL;
else
ring->nhi->rx_rings[ring->hop] = NULL;
if (ring->running) {
dev_WARN(&ring->nhi->pdev->dev, "%s %d still running\n",
RING_TYPE(ring), ring->hop);
}
dma_free_coherent(&ring->nhi->pdev->dev,
ring->size * sizeof(*ring->descriptors),
ring->descriptors, ring->descriptors_dma);
ring->descriptors = NULL;
ring->descriptors_dma = 0;
dev_info(&ring->nhi->pdev->dev,
"freeing %s %d\n",
RING_TYPE(ring),
ring->hop);
mutex_unlock(&ring->nhi->lock);
/**
* ring->work can no longer be scheduled (it is scheduled only by
* nhi_interrupt_work and ring_stop). Wait for it to finish before
* freeing the ring.
*/
flush_work(&ring->work);
mutex_destroy(&ring->lock);
kfree(ring);
}
static void nhi_interrupt_work(struct work_struct *work)
{
struct tb_nhi *nhi = container_of(work, typeof(*nhi), interrupt_work);
int value = 0; /* Suppress uninitialized usage warning. */
int bit;
int hop = -1;
int type = 0; /* current interrupt type 0: TX, 1: RX, 2: RX overflow */
struct tb_ring *ring;
mutex_lock(&nhi->lock);
/*
* Starting at REG_RING_NOTIFY_BASE there are three status bitfields
* (TX, RX, RX overflow). We iterate over the bits and read a new
* dwords as required. The registers are cleared on read.
*/
for (bit = 0; bit < 3 * nhi->hop_count; bit++) {
if (bit % 32 == 0)
value = ioread32(nhi->iobase
+ REG_RING_NOTIFY_BASE
+ 4 * (bit / 32));
if (++hop == nhi->hop_count) {
hop = 0;
type++;
}
if ((value & (1 << (bit % 32))) == 0)
continue;
if (type == 2) {
dev_warn(&nhi->pdev->dev,
"RX overflow for ring %d\n",
hop);
continue;
}
if (type == 0)
ring = nhi->tx_rings[hop];
else
ring = nhi->rx_rings[hop];
if (ring == NULL) {
dev_warn(&nhi->pdev->dev,
"got interrupt for inactive %s ring %d\n",
type ? "RX" : "TX",
hop);
continue;
}
/* we do not check ring->running, this is done in ring->work */
schedule_work(&ring->work);
}
mutex_unlock(&nhi->lock);
}
static irqreturn_t nhi_msi(int irq, void *data)
{
struct tb_nhi *nhi = data;
schedule_work(&nhi->interrupt_work);
return IRQ_HANDLED;
}
static int nhi_suspend_noirq(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct tb *tb = pci_get_drvdata(pdev);
thunderbolt_suspend(tb);
return 0;
}
static int nhi_resume_noirq(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct tb *tb = pci_get_drvdata(pdev);
thunderbolt_resume(tb);
return 0;
}
static void nhi_shutdown(struct tb_nhi *nhi)
{
int i;
dev_info(&nhi->pdev->dev, "shutdown\n");
for (i = 0; i < nhi->hop_count; i++) {
if (nhi->tx_rings[i])
dev_WARN(&nhi->pdev->dev,
"TX ring %d is still active\n", i);
if (nhi->rx_rings[i])
dev_WARN(&nhi->pdev->dev,
"RX ring %d is still active\n", i);
}
nhi_disable_interrupts(nhi);
/*
* We have to release the irq before calling flush_work. Otherwise an
* already executing IRQ handler could call schedule_work again.
*/
devm_free_irq(&nhi->pdev->dev, nhi->pdev->irq, nhi);
flush_work(&nhi->interrupt_work);
mutex_destroy(&nhi->lock);
}
static int nhi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct tb_nhi *nhi;
struct tb *tb;
int res;
res = pcim_enable_device(pdev);
if (res) {
dev_err(&pdev->dev, "cannot enable PCI device, aborting\n");
return res;
}
res = pci_enable_msi(pdev);
if (res) {
dev_err(&pdev->dev, "cannot enable MSI, aborting\n");
return res;
}
res = pcim_iomap_regions(pdev, 1 << 0, "thunderbolt");
if (res) {
dev_err(&pdev->dev, "cannot obtain PCI resources, aborting\n");
return res;
}
nhi = devm_kzalloc(&pdev->dev, sizeof(*nhi), GFP_KERNEL);
if (!nhi)
return -ENOMEM;
nhi->pdev = pdev;
/* cannot fail - table is allocated bin pcim_iomap_regions */
nhi->iobase = pcim_iomap_table(pdev)[0];
nhi->hop_count = ioread32(nhi->iobase + REG_HOP_COUNT) & 0x3ff;
if (nhi->hop_count != 12)
dev_warn(&pdev->dev, "unexpected hop count: %d\n",
nhi->hop_count);
INIT_WORK(&nhi->interrupt_work, nhi_interrupt_work);
nhi->tx_rings = devm_kcalloc(&pdev->dev, nhi->hop_count,
sizeof(*nhi->tx_rings), GFP_KERNEL);
nhi->rx_rings = devm_kcalloc(&pdev->dev, nhi->hop_count,
sizeof(*nhi->rx_rings), GFP_KERNEL);
if (!nhi->tx_rings || !nhi->rx_rings)
return -ENOMEM;
nhi_disable_interrupts(nhi); /* In case someone left them on. */
res = devm_request_irq(&pdev->dev, pdev->irq, nhi_msi,
IRQF_NO_SUSPEND, /* must work during _noirq */
"thunderbolt", nhi);
if (res) {
dev_err(&pdev->dev, "request_irq failed, aborting\n");
return res;
}
mutex_init(&nhi->lock);
pci_set_master(pdev);
/* magic value - clock related? */
iowrite32(3906250 / 10000, nhi->iobase + 0x38c00);
dev_info(&nhi->pdev->dev, "NHI initialized, starting thunderbolt\n");
tb = thunderbolt_alloc_and_start(nhi);
if (!tb) {
/*
* At this point the RX/TX rings might already have been
* activated. Do a proper shutdown.
*/
nhi_shutdown(nhi);
return -EIO;
}
pci_set_drvdata(pdev, tb);
return 0;
}
static void nhi_remove(struct pci_dev *pdev)
{
struct tb *tb = pci_get_drvdata(pdev);
struct tb_nhi *nhi = tb->nhi;
thunderbolt_shutdown_and_free(tb);
nhi_shutdown(nhi);
}
/*
* The tunneled pci bridges are siblings of us. Use resume_noirq to reenable
* the tunnels asap. A corresponding pci quirk blocks the downstream bridges
* resume_noirq until we are done.
*/
static const struct dev_pm_ops nhi_pm_ops = {
.suspend_noirq = nhi_suspend_noirq,
.resume_noirq = nhi_resume_noirq,
.freeze_noirq = nhi_suspend_noirq, /*
* we just disable hotplug, the
* pci-tunnels stay alive.
*/
.restore_noirq = nhi_resume_noirq,
};
static struct pci_device_id nhi_ids[] = {
/*
* We have to specify class, the TB bridges use the same device and
* vendor (sub)id.
*/
{
.class = PCI_CLASS_SYSTEM_OTHER << 8, .class_mask = ~0,
.vendor = PCI_VENDOR_ID_INTEL, .device = 0x1547,
.subvendor = 0x2222, .subdevice = 0x1111,
},
{
.class = PCI_CLASS_SYSTEM_OTHER << 8, .class_mask = ~0,
.vendor = PCI_VENDOR_ID_INTEL, .device = 0x156c,
.subvendor = 0x2222, .subdevice = 0x1111,
},
{ 0,}
};
MODULE_DEVICE_TABLE(pci, nhi_ids);
MODULE_LICENSE("GPL");
static struct pci_driver nhi_driver = {
.name = "thunderbolt",
.id_table = nhi_ids,
.probe = nhi_probe,
.remove = nhi_remove,
.driver.pm = &nhi_pm_ops,
};
static int __init nhi_init(void)
{
if (!dmi_match(DMI_BOARD_VENDOR, "Apple Inc."))
return -ENOSYS;
return pci_register_driver(&nhi_driver);
}
static void __exit nhi_unload(void)
{
pci_unregister_driver(&nhi_driver);
}
module_init(nhi_init);
module_exit(nhi_unload);

114
drivers/thunderbolt/nhi.h Normal file
View file

@ -0,0 +1,114 @@
/*
* Thunderbolt Cactus Ridge driver - NHI driver
*
* Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
*/
#ifndef DSL3510_H_
#define DSL3510_H_
#include <linux/mutex.h>
#include <linux/workqueue.h>
/**
* struct tb_nhi - thunderbolt native host interface
*/
struct tb_nhi {
struct mutex lock; /*
* Must be held during ring creation/destruction.
* Is acquired by interrupt_work when dispatching
* interrupts to individual rings.
**/
struct pci_dev *pdev;
void __iomem *iobase;
struct tb_ring **tx_rings;
struct tb_ring **rx_rings;
struct work_struct interrupt_work;
u32 hop_count; /* Number of rings (end point hops) supported by NHI. */
};
/**
* struct tb_ring - thunderbolt TX or RX ring associated with a NHI
*/
struct tb_ring {
struct mutex lock; /* must be acquired after nhi->lock */
struct tb_nhi *nhi;
int size;
int hop;
int head; /* write next descriptor here */
int tail; /* complete next descriptor here */
struct ring_desc *descriptors;
dma_addr_t descriptors_dma;
struct list_head queue;
struct list_head in_flight;
struct work_struct work;
bool is_tx:1; /* rx otherwise */
bool running:1;
};
struct ring_frame;
typedef void (*ring_cb)(struct tb_ring*, struct ring_frame*, bool canceled);
/**
* struct ring_frame - for use with ring_rx/ring_tx
*/
struct ring_frame {
dma_addr_t buffer_phy;
ring_cb callback;
struct list_head list;
u32 size:12; /* TX: in, RX: out*/
u32 flags:12; /* RX: out */
u32 eof:4; /* TX:in, RX: out */
u32 sof:4; /* TX:in, RX: out */
};
#define TB_FRAME_SIZE 0x100 /* minimum size for ring_rx */
struct tb_ring *ring_alloc_tx(struct tb_nhi *nhi, int hop, int size);
struct tb_ring *ring_alloc_rx(struct tb_nhi *nhi, int hop, int size);
void ring_start(struct tb_ring *ring);
void ring_stop(struct tb_ring *ring);
void ring_free(struct tb_ring *ring);
int __ring_enqueue(struct tb_ring *ring, struct ring_frame *frame);
/**
* ring_rx() - enqueue a frame on an RX ring
*
* frame->buffer, frame->buffer_phy and frame->callback have to be set. The
* buffer must contain at least TB_FRAME_SIZE bytes.
*
* frame->callback will be invoked with frame->size, frame->flags, frame->eof,
* frame->sof set once the frame has been received.
*
* If ring_stop is called after the packet has been enqueued frame->callback
* will be called with canceled set to true.
*
* Return: Returns ESHUTDOWN if ring_stop has been called. Zero otherwise.
*/
static inline int ring_rx(struct tb_ring *ring, struct ring_frame *frame)
{
WARN_ON(ring->is_tx);
return __ring_enqueue(ring, frame);
}
/**
* ring_tx() - enqueue a frame on an TX ring
*
* frame->buffer, frame->buffer_phy, frame->callback, frame->size, frame->eof
* and frame->sof have to be set.
*
* frame->callback will be invoked with once the frame has been transmitted.
*
* If ring_stop is called after the packet has been enqueued frame->callback
* will be called with canceled set to true.
*
* Return: Returns ESHUTDOWN if ring_stop has been called. Zero otherwise.
*/
static inline int ring_tx(struct tb_ring *ring, struct ring_frame *frame)
{
WARN_ON(!ring->is_tx);
return __ring_enqueue(ring, frame);
}
#endif

View file

@ -0,0 +1,101 @@
/*
* Thunderbolt Cactus Ridge driver - NHI registers
*
* Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
*/
#ifndef DSL3510_REGS_H_
#define DSL3510_REGS_H_
#include <linux/types.h>
enum ring_flags {
RING_FLAG_ISOCH_ENABLE = 1 << 27, /* TX only? */
RING_FLAG_E2E_FLOW_CONTROL = 1 << 28,
RING_FLAG_PCI_NO_SNOOP = 1 << 29,
RING_FLAG_RAW = 1 << 30, /* ignore EOF/SOF mask, include checksum */
RING_FLAG_ENABLE = 1 << 31,
};
enum ring_desc_flags {
RING_DESC_ISOCH = 0x1, /* TX only? */
RING_DESC_COMPLETED = 0x2, /* set by NHI */
RING_DESC_POSTED = 0x4, /* always set this */
RING_DESC_INTERRUPT = 0x8, /* request an interrupt on completion */
};
/**
* struct ring_desc - TX/RX ring entry
*
* For TX set length/eof/sof.
* For RX length/eof/sof are set by the NHI.
*/
struct ring_desc {
u64 phys;
u32 length:12;
u32 eof:4;
u32 sof:4;
enum ring_desc_flags flags:12;
u32 time; /* write zero */
} __packed;
/* NHI registers in bar 0 */
/*
* 16 bytes per entry, one entry for every hop (REG_HOP_COUNT)
* 00: physical pointer to an array of struct ring_desc
* 08: ring tail (set by NHI)
* 10: ring head (index of first non posted descriptor)
* 12: descriptor count
*/
#define REG_TX_RING_BASE 0x00000
/*
* 16 bytes per entry, one entry for every hop (REG_HOP_COUNT)
* 00: physical pointer to an array of struct ring_desc
* 08: ring head (index of first not posted descriptor)
* 10: ring tail (set by NHI)
* 12: descriptor count
* 14: max frame sizes (anything larger than 0x100 has no effect)
*/
#define REG_RX_RING_BASE 0x08000
/*
* 32 bytes per entry, one entry for every hop (REG_HOP_COUNT)
* 00: enum_ring_flags
* 04: isoch time stamp ?? (write 0)
* ..: unknown
*/
#define REG_TX_OPTIONS_BASE 0x19800
/*
* 32 bytes per entry, one entry for every hop (REG_HOP_COUNT)
* 00: enum ring_flags
* If RING_FLAG_E2E_FLOW_CONTROL is set then bits 13-23 must be set to
* the corresponding TX hop id.
* 04: EOF/SOF mask (ignored for RING_FLAG_RAW rings)
* ..: unknown
*/
#define REG_RX_OPTIONS_BASE 0x29800
/*
* three bitfields: tx, rx, rx overflow
* Every bitfield contains one bit for every hop (REG_HOP_COUNT). Registers are
* cleared on read. New interrupts are fired only after ALL registers have been
* read (even those containing only disabled rings).
*/
#define REG_RING_NOTIFY_BASE 0x37800
#define RING_NOTIFY_REG_COUNT(nhi) ((31 + 3 * nhi->hop_count) / 32)
/*
* two bitfields: rx, tx
* Both bitfields contains one bit for every hop (REG_HOP_COUNT). To
* enable/disable interrupts set/clear the corresponding bits.
*/
#define REG_RING_INTERRUPT_BASE 0x38200
#define RING_INTERRUPT_REG_COUNT(nhi) ((31 + 2 * nhi->hop_count) / 32)
/* The last 11 bits contain the number of hops supported by the NHI port. */
#define REG_HOP_COUNT 0x39640
#endif

234
drivers/thunderbolt/path.c Normal file
View file

@ -0,0 +1,234 @@
/*
* Thunderbolt Cactus Ridge driver - path/tunnel functionality
*
* Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
*/
#include <linux/slab.h>
#include <linux/errno.h>
#include "tb.h"
static void tb_dump_hop(struct tb_port *port, struct tb_regs_hop *hop)
{
tb_port_info(port, " Hop through port %d to hop %d (%s)\n",
hop->out_port, hop->next_hop,
hop->enable ? "enabled" : "disabled");
tb_port_info(port, " Weight: %d Priority: %d Credits: %d Drop: %d\n",
hop->weight, hop->priority,
hop->initial_credits, hop->drop_packages);
tb_port_info(port, " Counter enabled: %d Counter index: %d\n",
hop->counter_enable, hop->counter);
tb_port_info(port, " Flow Control (In/Eg): %d/%d Shared Buffer (In/Eg): %d/%d\n",
hop->ingress_fc, hop->egress_fc,
hop->ingress_shared_buffer, hop->egress_shared_buffer);
tb_port_info(port, " Unknown1: %#x Unknown2: %#x Unknown3: %#x\n",
hop->unknown1, hop->unknown2, hop->unknown3);
}
/**
* tb_path_alloc() - allocate a thunderbolt path
*
* Return: Returns a tb_path on success or NULL on failure.
*/
struct tb_path *tb_path_alloc(struct tb *tb, int num_hops)
{
struct tb_path *path = kzalloc(sizeof(*path), GFP_KERNEL);
if (!path)
return NULL;
path->hops = kcalloc(num_hops, sizeof(*path->hops), GFP_KERNEL);
if (!path->hops) {
kfree(path);
return NULL;
}
path->tb = tb;
path->path_length = num_hops;
return path;
}
/**
* tb_path_free() - free a deactivated path
*/
void tb_path_free(struct tb_path *path)
{
if (path->activated) {
tb_WARN(path->tb, "trying to free an activated path\n")
return;
}
kfree(path->hops);
kfree(path);
}
static void __tb_path_deallocate_nfc(struct tb_path *path, int first_hop)
{
int i, res;
for (i = first_hop; i < path->path_length; i++) {
res = tb_port_add_nfc_credits(path->hops[i].in_port,
-path->nfc_credits);
if (res)
tb_port_warn(path->hops[i].in_port,
"nfc credits deallocation failed for hop %d\n",
i);
}
}
static void __tb_path_deactivate_hops(struct tb_path *path, int first_hop)
{
int i, res;
struct tb_regs_hop hop = { };
for (i = first_hop; i < path->path_length; i++) {
res = tb_port_write(path->hops[i].in_port, &hop, TB_CFG_HOPS,
2 * path->hops[i].in_hop_index, 2);
if (res)
tb_port_warn(path->hops[i].in_port,
"hop deactivation failed for hop %d, index %d\n",
i, path->hops[i].in_hop_index);
}
}
void tb_path_deactivate(struct tb_path *path)
{
if (!path->activated) {
tb_WARN(path->tb, "trying to deactivate an inactive path\n");
return;
}
tb_info(path->tb,
"deactivating path from %llx:%x to %llx:%x\n",
tb_route(path->hops[0].in_port->sw),
path->hops[0].in_port->port,
tb_route(path->hops[path->path_length - 1].out_port->sw),
path->hops[path->path_length - 1].out_port->port);
__tb_path_deactivate_hops(path, 0);
__tb_path_deallocate_nfc(path, 0);
path->activated = false;
}
/**
* tb_path_activate() - activate a path
*
* Activate a path starting with the last hop and iterating backwards. The
* caller must fill path->hops before calling tb_path_activate().
*
* Return: Returns 0 on success or an error code on failure.
*/
int tb_path_activate(struct tb_path *path)
{
int i, res;
enum tb_path_port out_mask, in_mask;
if (path->activated) {
tb_WARN(path->tb, "trying to activate already activated path\n");
return -EINVAL;
}
tb_info(path->tb,
"activating path from %llx:%x to %llx:%x\n",
tb_route(path->hops[0].in_port->sw),
path->hops[0].in_port->port,
tb_route(path->hops[path->path_length - 1].out_port->sw),
path->hops[path->path_length - 1].out_port->port);
/* Clear counters. */
for (i = path->path_length - 1; i >= 0; i--) {
if (path->hops[i].in_counter_index == -1)
continue;
res = tb_port_clear_counter(path->hops[i].in_port,
path->hops[i].in_counter_index);
if (res)
goto err;
}
/* Add non flow controlled credits. */
for (i = path->path_length - 1; i >= 0; i--) {
res = tb_port_add_nfc_credits(path->hops[i].in_port,
path->nfc_credits);
if (res) {
__tb_path_deallocate_nfc(path, i);
goto err;
}
}
/* Activate hops. */
for (i = path->path_length - 1; i >= 0; i--) {
struct tb_regs_hop hop = { 0 };
/*
* We do (currently) not tear down paths setup by the firmeware.
* If a firmware device is unplugged and plugged in again then
* it can happen that we reuse some of the hops from the (now
* defunct) firmeware path. This causes the hotplug operation to
* fail (the pci device does not show up). Clearing the hop
* before overwriting it fixes the problem.
*
* Should be removed once we discover and tear down firmeware
* paths.
*/
res = tb_port_write(path->hops[i].in_port, &hop, TB_CFG_HOPS,
2 * path->hops[i].in_hop_index, 2);
if (res) {
__tb_path_deactivate_hops(path, i);
__tb_path_deallocate_nfc(path, 0);
goto err;
}
/* dword 0 */
hop.next_hop = path->hops[i].next_hop_index;
hop.out_port = path->hops[i].out_port->port;
/* TODO: figure out why these are good values */
hop.initial_credits = (i == path->path_length - 1) ? 16 : 7;
hop.unknown1 = 0;
hop.enable = 1;
/* dword 1 */
out_mask = (i == path->path_length - 1) ?
TB_PATH_DESTINATION : TB_PATH_INTERNAL;
in_mask = (i == 0) ? TB_PATH_SOURCE : TB_PATH_INTERNAL;
hop.weight = path->weight;
hop.unknown2 = 0;
hop.priority = path->priority;
hop.drop_packages = path->drop_packages;
hop.counter = path->hops[i].in_counter_index;
hop.counter_enable = path->hops[i].in_counter_index != -1;
hop.ingress_fc = path->ingress_fc_enable & in_mask;
hop.egress_fc = path->egress_fc_enable & out_mask;
hop.ingress_shared_buffer = path->ingress_shared_buffer
& in_mask;
hop.egress_shared_buffer = path->egress_shared_buffer
& out_mask;
hop.unknown3 = 0;
tb_port_info(path->hops[i].in_port, "Writing hop %d, index %d",
i, path->hops[i].in_hop_index);
tb_dump_hop(path->hops[i].in_port, &hop);
res = tb_port_write(path->hops[i].in_port, &hop, TB_CFG_HOPS,
2 * path->hops[i].in_hop_index, 2);
if (res) {
__tb_path_deactivate_hops(path, i);
__tb_path_deallocate_nfc(path, 0);
goto err;
}
}
path->activated = true;
tb_info(path->tb, "path activation complete\n");
return 0;
err:
tb_WARN(path->tb, "path activation failed\n");
return res;
}
/**
* tb_path_is_invalid() - check whether any ports on the path are invalid
*
* Return: Returns true if the path is invalid, false otherwise.
*/
bool tb_path_is_invalid(struct tb_path *path)
{
int i = 0;
for (i = 0; i < path->path_length; i++) {
if (path->hops[i].in_port->sw->is_unplugged)
return true;
if (path->hops[i].out_port->sw->is_unplugged)
return true;
}
return false;
}

View file

@ -0,0 +1,507 @@
/*
* Thunderbolt Cactus Ridge driver - switch/port utility functions
*
* Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
*/
#include <linux/delay.h>
#include <linux/slab.h>
#include "tb.h"
/* port utility functions */
static const char *tb_port_type(struct tb_regs_port_header *port)
{
switch (port->type >> 16) {
case 0:
switch ((u8) port->type) {
case 0:
return "Inactive";
case 1:
return "Port";
case 2:
return "NHI";
default:
return "unknown";
}
case 0x2:
return "Ethernet";
case 0x8:
return "SATA";
case 0xe:
return "DP/HDMI";
case 0x10:
return "PCIe";
case 0x20:
return "USB";
default:
return "unknown";
}
}
static void tb_dump_port(struct tb *tb, struct tb_regs_port_header *port)
{
tb_info(tb,
" Port %d: %x:%x (Revision: %d, TB Version: %d, Type: %s (%#x))\n",
port->port_number, port->vendor_id, port->device_id,
port->revision, port->thunderbolt_version, tb_port_type(port),
port->type);
tb_info(tb, " Max hop id (in/out): %d/%d\n",
port->max_in_hop_id, port->max_out_hop_id);
tb_info(tb, " Max counters: %d\n", port->max_counters);
tb_info(tb, " NFC Credits: %#x\n", port->nfc_credits);
}
/**
* tb_port_state() - get connectedness state of a port
*
* The port must have a TB_CAP_PHY (i.e. it should be a real port).
*
* Return: Returns an enum tb_port_state on success or an error code on failure.
*/
static int tb_port_state(struct tb_port *port)
{
struct tb_cap_phy phy;
int res;
if (port->cap_phy == 0) {
tb_port_WARN(port, "does not have a PHY\n");
return -EINVAL;
}
res = tb_port_read(port, &phy, TB_CFG_PORT, port->cap_phy, 2);
if (res)
return res;
return phy.state;
}
/**
* tb_wait_for_port() - wait for a port to become ready
*
* Wait up to 1 second for a port to reach state TB_PORT_UP. If
* wait_if_unplugged is set then we also wait if the port is in state
* TB_PORT_UNPLUGGED (it takes a while for the device to be registered after
* switch resume). Otherwise we only wait if a device is registered but the link
* has not yet been established.
*
* Return: Returns an error code on failure. Returns 0 if the port is not
* connected or failed to reach state TB_PORT_UP within one second. Returns 1
* if the port is connected and in state TB_PORT_UP.
*/
int tb_wait_for_port(struct tb_port *port, bool wait_if_unplugged)
{
int retries = 10;
int state;
if (!port->cap_phy) {
tb_port_WARN(port, "does not have PHY\n");
return -EINVAL;
}
if (tb_is_upstream_port(port)) {
tb_port_WARN(port, "is the upstream port\n");
return -EINVAL;
}
while (retries--) {
state = tb_port_state(port);
if (state < 0)
return state;
if (state == TB_PORT_DISABLED) {
tb_port_info(port, "is disabled (state: 0)\n");
return 0;
}
if (state == TB_PORT_UNPLUGGED) {
if (wait_if_unplugged) {
/* used during resume */
tb_port_info(port,
"is unplugged (state: 7), retrying...\n");
msleep(100);
continue;
}
tb_port_info(port, "is unplugged (state: 7)\n");
return 0;
}
if (state == TB_PORT_UP) {
tb_port_info(port,
"is connected, link is up (state: 2)\n");
return 1;
}
/*
* After plug-in the state is TB_PORT_CONNECTING. Give it some
* time.
*/
tb_port_info(port,
"is connected, link is not up (state: %d), retrying...\n",
state);
msleep(100);
}
tb_port_warn(port,
"failed to reach state TB_PORT_UP. Ignoring port...\n");
return 0;
}
/**
* tb_port_add_nfc_credits() - add/remove non flow controlled credits to port
*
* Change the number of NFC credits allocated to @port by @credits. To remove
* NFC credits pass a negative amount of credits.
*
* Return: Returns 0 on success or an error code on failure.
*/
int tb_port_add_nfc_credits(struct tb_port *port, int credits)
{
if (credits == 0)
return 0;
tb_port_info(port,
"adding %#x NFC credits (%#x -> %#x)",
credits,
port->config.nfc_credits,
port->config.nfc_credits + credits);
port->config.nfc_credits += credits;
return tb_port_write(port, &port->config.nfc_credits,
TB_CFG_PORT, 4, 1);
}
/**
* tb_port_clear_counter() - clear a counter in TB_CFG_COUNTER
*
* Return: Returns 0 on success or an error code on failure.
*/
int tb_port_clear_counter(struct tb_port *port, int counter)
{
u32 zero[3] = { 0, 0, 0 };
tb_port_info(port, "clearing counter %d\n", counter);
return tb_port_write(port, zero, TB_CFG_COUNTERS, 3 * counter, 3);
}
/**
* tb_init_port() - initialize a port
*
* This is a helper method for tb_switch_alloc. Does not check or initialize
* any downstream switches.
*
* Return: Returns 0 on success or an error code on failure.
*/
static int tb_init_port(struct tb_port *port)
{
int res;
int cap;
res = tb_port_read(port, &port->config, TB_CFG_PORT, 0, 8);
if (res)
return res;
/* Port 0 is the switch itself and has no PHY. */
if (port->config.type == TB_TYPE_PORT && port->port != 0) {
cap = tb_find_cap(port, TB_CFG_PORT, TB_CAP_PHY);
if (cap > 0)
port->cap_phy = cap;
else
tb_port_WARN(port, "non switch port without a PHY\n");
}
tb_dump_port(port->sw->tb, &port->config);
/* TODO: Read dual link port, DP port and more from EEPROM. */
return 0;
}
/* switch utility functions */
static void tb_dump_switch(struct tb *tb, struct tb_regs_switch_header *sw)
{
tb_info(tb,
" Switch: %x:%x (Revision: %d, TB Version: %d)\n",
sw->vendor_id, sw->device_id, sw->revision,
sw->thunderbolt_version);
tb_info(tb, " Max Port Number: %d\n", sw->max_port_number);
tb_info(tb, " Config:\n");
tb_info(tb,
" Upstream Port Number: %d Depth: %d Route String: %#llx Enabled: %d, PlugEventsDelay: %dms\n",
sw->upstream_port_number, sw->depth,
(((u64) sw->route_hi) << 32) | sw->route_lo,
sw->enabled, sw->plug_events_delay);
tb_info(tb,
" unknown1: %#x unknown4: %#x\n",
sw->__unknown1, sw->__unknown4);
}
/**
* reset_switch() - reconfigure route, enable and send TB_CFG_PKG_RESET
*
* Return: Returns 0 on success or an error code on failure.
*/
int tb_switch_reset(struct tb *tb, u64 route)
{
struct tb_cfg_result res;
struct tb_regs_switch_header header = {
header.route_hi = route >> 32,
header.route_lo = route,
header.enabled = true,
};
tb_info(tb, "resetting switch at %llx\n", route);
res.err = tb_cfg_write(tb->ctl, ((u32 *) &header) + 2, route,
0, 2, 2, 2);
if (res.err)
return res.err;
res = tb_cfg_reset(tb->ctl, route, TB_CFG_DEFAULT_TIMEOUT);
if (res.err > 0)
return -EIO;
return res.err;
}
struct tb_switch *get_switch_at_route(struct tb_switch *sw, u64 route)
{
u8 next_port = route; /*
* Routes use a stride of 8 bits,
* eventhough a port index has 6 bits at most.
* */
if (route == 0)
return sw;
if (next_port > sw->config.max_port_number)
return NULL;
if (tb_is_upstream_port(&sw->ports[next_port]))
return NULL;
if (!sw->ports[next_port].remote)
return NULL;
return get_switch_at_route(sw->ports[next_port].remote->sw,
route >> TB_ROUTE_SHIFT);
}
/**
* tb_plug_events_active() - enable/disable plug events on a switch
*
* Also configures a sane plug_events_delay of 255ms.
*
* Return: Returns 0 on success or an error code on failure.
*/
static int tb_plug_events_active(struct tb_switch *sw, bool active)
{
u32 data;
int res;
sw->config.plug_events_delay = 0xff;
res = tb_sw_write(sw, ((u32 *) &sw->config) + 4, TB_CFG_SWITCH, 4, 1);
if (res)
return res;
res = tb_sw_read(sw, &data, TB_CFG_SWITCH, sw->cap_plug_events + 1, 1);
if (res)
return res;
if (active) {
data = data & 0xFFFFFF83;
switch (sw->config.device_id) {
case 0x1513:
case 0x151a:
case 0x1549:
break;
default:
data |= 4;
}
} else {
data = data | 0x7c;
}
return tb_sw_write(sw, &data, TB_CFG_SWITCH,
sw->cap_plug_events + 1, 1);
}
/**
* tb_switch_free() - free a tb_switch and all downstream switches
*/
void tb_switch_free(struct tb_switch *sw)
{
int i;
/* port 0 is the switch itself and never has a remote */
for (i = 1; i <= sw->config.max_port_number; i++) {
if (tb_is_upstream_port(&sw->ports[i]))
continue;
if (sw->ports[i].remote)
tb_switch_free(sw->ports[i].remote->sw);
sw->ports[i].remote = NULL;
}
if (!sw->is_unplugged)
tb_plug_events_active(sw, false);
kfree(sw->ports);
kfree(sw->drom);
kfree(sw);
}
/**
* tb_switch_alloc() - allocate and initialize a switch
*
* Return: Returns a NULL on failure.
*/
struct tb_switch *tb_switch_alloc(struct tb *tb, u64 route)
{
int i;
int cap;
struct tb_switch *sw;
int upstream_port = tb_cfg_get_upstream_port(tb->ctl, route);
if (upstream_port < 0)
return NULL;
sw = kzalloc(sizeof(*sw), GFP_KERNEL);
if (!sw)
return NULL;
sw->tb = tb;
if (tb_cfg_read(tb->ctl, &sw->config, route, 0, 2, 0, 5))
goto err;
tb_info(tb,
"initializing Switch at %#llx (depth: %d, up port: %d)\n",
route, tb_route_length(route), upstream_port);
tb_info(tb, "old switch config:\n");
tb_dump_switch(tb, &sw->config);
/* configure switch */
sw->config.upstream_port_number = upstream_port;
sw->config.depth = tb_route_length(route);
sw->config.route_lo = route;
sw->config.route_hi = route >> 32;
sw->config.enabled = 1;
/* from here on we may use the tb_sw_* functions & macros */
if (sw->config.vendor_id != 0x8086)
tb_sw_warn(sw, "unknown switch vendor id %#x\n",
sw->config.vendor_id);
if (sw->config.device_id != 0x1547 && sw->config.device_id != 0x1549)
tb_sw_warn(sw, "unsupported switch device id %#x\n",
sw->config.device_id);
/* upload configuration */
if (tb_sw_write(sw, 1 + (u32 *) &sw->config, TB_CFG_SWITCH, 1, 3))
goto err;
/* initialize ports */
sw->ports = kcalloc(sw->config.max_port_number + 1, sizeof(*sw->ports),
GFP_KERNEL);
if (!sw->ports)
goto err;
for (i = 0; i <= sw->config.max_port_number; i++) {
/* minimum setup for tb_find_cap and tb_drom_read to work */
sw->ports[i].sw = sw;
sw->ports[i].port = i;
}
cap = tb_find_cap(&sw->ports[0], TB_CFG_SWITCH, TB_CAP_PLUG_EVENTS);
if (cap < 0) {
tb_sw_warn(sw, "cannot find TB_CAP_PLUG_EVENTS aborting\n");
goto err;
}
sw->cap_plug_events = cap;
/* read drom */
if (tb_drom_read(sw))
tb_sw_warn(sw, "tb_eeprom_read_rom failed, continuing\n");
tb_sw_info(sw, "uid: %#llx\n", sw->uid);
for (i = 0; i <= sw->config.max_port_number; i++) {
if (sw->ports[i].disabled) {
tb_port_info(&sw->ports[i], "disabled by eeprom\n");
continue;
}
if (tb_init_port(&sw->ports[i]))
goto err;
}
/* TODO: I2C, IECS, link controller */
if (tb_plug_events_active(sw, true))
goto err;
return sw;
err:
kfree(sw->ports);
kfree(sw->drom);
kfree(sw);
return NULL;
}
/**
* tb_sw_set_unpplugged() - set is_unplugged on switch and downstream switches
*/
void tb_sw_set_unpplugged(struct tb_switch *sw)
{
int i;
if (sw == sw->tb->root_switch) {
tb_sw_WARN(sw, "cannot unplug root switch\n");
return;
}
if (sw->is_unplugged) {
tb_sw_WARN(sw, "is_unplugged already set\n");
return;
}
sw->is_unplugged = true;
for (i = 0; i <= sw->config.max_port_number; i++) {
if (!tb_is_upstream_port(&sw->ports[i]) && sw->ports[i].remote)
tb_sw_set_unpplugged(sw->ports[i].remote->sw);
}
}
int tb_switch_resume(struct tb_switch *sw)
{
int i, err;
u64 uid;
tb_sw_info(sw, "resuming switch\n");
err = tb_drom_read_uid_only(sw, &uid);
if (err) {
tb_sw_warn(sw, "uid read failed\n");
return err;
}
if (sw->uid != uid) {
tb_sw_info(sw,
"changed while suspended (uid %#llx -> %#llx)\n",
sw->uid, uid);
return -ENODEV;
}
/* upload configuration */
err = tb_sw_write(sw, 1 + (u32 *) &sw->config, TB_CFG_SWITCH, 1, 3);
if (err)
return err;
err = tb_plug_events_active(sw, true);
if (err)
return err;
/* check for surviving downstream switches */
for (i = 1; i <= sw->config.max_port_number; i++) {
struct tb_port *port = &sw->ports[i];
if (tb_is_upstream_port(port))
continue;
if (!port->remote)
continue;
if (tb_wait_for_port(port, true) <= 0
|| tb_switch_resume(port->remote->sw)) {
tb_port_warn(port,
"lost during suspend, disconnecting\n");
tb_sw_set_unpplugged(port->remote->sw);
}
}
return 0;
}
void tb_switch_suspend(struct tb_switch *sw)
{
int i, err;
err = tb_plug_events_active(sw, false);
if (err)
return;
for (i = 1; i <= sw->config.max_port_number; i++) {
if (!tb_is_upstream_port(&sw->ports[i]) && sw->ports[i].remote)
tb_switch_suspend(sw->ports[i].remote->sw);
}
/*
* TODO: invoke tb_cfg_prepare_to_sleep here? does not seem to have any
* effect?
*/
}

436
drivers/thunderbolt/tb.c Normal file
View file

@ -0,0 +1,436 @@
/*
* Thunderbolt Cactus Ridge driver - bus logic (NHI independent)
*
* Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
*/
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/delay.h>
#include "tb.h"
#include "tb_regs.h"
#include "tunnel_pci.h"
/* enumeration & hot plug handling */
static void tb_scan_port(struct tb_port *port);
/**
* tb_scan_switch() - scan for and initialize downstream switches
*/
static void tb_scan_switch(struct tb_switch *sw)
{
int i;
for (i = 1; i <= sw->config.max_port_number; i++)
tb_scan_port(&sw->ports[i]);
}
/**
* tb_scan_port() - check for and initialize switches below port
*/
static void tb_scan_port(struct tb_port *port)
{
struct tb_switch *sw;
if (tb_is_upstream_port(port))
return;
if (port->config.type != TB_TYPE_PORT)
return;
if (port->dual_link_port && port->link_nr)
return; /*
* Downstream switch is reachable through two ports.
* Only scan on the primary port (link_nr == 0).
*/
if (tb_wait_for_port(port, false) <= 0)
return;
if (port->remote) {
tb_port_WARN(port, "port already has a remote!\n");
return;
}
sw = tb_switch_alloc(port->sw->tb, tb_downstream_route(port));
if (!sw)
return;
port->remote = tb_upstream_port(sw);
tb_upstream_port(sw)->remote = port;
tb_scan_switch(sw);
}
/**
* tb_free_invalid_tunnels() - destroy tunnels of devices that have gone away
*/
static void tb_free_invalid_tunnels(struct tb *tb)
{
struct tb_pci_tunnel *tunnel;
struct tb_pci_tunnel *n;
list_for_each_entry_safe(tunnel, n, &tb->tunnel_list, list)
{
if (tb_pci_is_invalid(tunnel)) {
tb_pci_deactivate(tunnel);
tb_pci_free(tunnel);
}
}
}
/**
* tb_free_unplugged_children() - traverse hierarchy and free unplugged switches
*/
static void tb_free_unplugged_children(struct tb_switch *sw)
{
int i;
for (i = 1; i <= sw->config.max_port_number; i++) {
struct tb_port *port = &sw->ports[i];
if (tb_is_upstream_port(port))
continue;
if (!port->remote)
continue;
if (port->remote->sw->is_unplugged) {
tb_switch_free(port->remote->sw);
port->remote = NULL;
} else {
tb_free_unplugged_children(port->remote->sw);
}
}
}
/**
* find_pci_up_port() - return the first PCIe up port on @sw or NULL
*/
static struct tb_port *tb_find_pci_up_port(struct tb_switch *sw)
{
int i;
for (i = 1; i <= sw->config.max_port_number; i++)
if (sw->ports[i].config.type == TB_TYPE_PCIE_UP)
return &sw->ports[i];
return NULL;
}
/**
* find_unused_down_port() - return the first inactive PCIe down port on @sw
*/
static struct tb_port *tb_find_unused_down_port(struct tb_switch *sw)
{
int i;
int cap;
int res;
int data;
for (i = 1; i <= sw->config.max_port_number; i++) {
if (tb_is_upstream_port(&sw->ports[i]))
continue;
if (sw->ports[i].config.type != TB_TYPE_PCIE_DOWN)
continue;
cap = tb_find_cap(&sw->ports[i], TB_CFG_PORT, TB_CAP_PCIE);
if (cap <= 0)
continue;
res = tb_port_read(&sw->ports[i], &data, TB_CFG_PORT, cap, 1);
if (res < 0)
continue;
if (data & 0x80000000)
continue;
return &sw->ports[i];
}
return NULL;
}
/**
* tb_activate_pcie_devices() - scan for and activate PCIe devices
*
* This method is somewhat ad hoc. For now it only supports one device
* per port and only devices at depth 1.
*/
static void tb_activate_pcie_devices(struct tb *tb)
{
int i;
int cap;
u32 data;
struct tb_switch *sw;
struct tb_port *up_port;
struct tb_port *down_port;
struct tb_pci_tunnel *tunnel;
/* scan for pcie devices at depth 1*/
for (i = 1; i <= tb->root_switch->config.max_port_number; i++) {
if (tb_is_upstream_port(&tb->root_switch->ports[i]))
continue;
if (tb->root_switch->ports[i].config.type != TB_TYPE_PORT)
continue;
if (!tb->root_switch->ports[i].remote)
continue;
sw = tb->root_switch->ports[i].remote->sw;
up_port = tb_find_pci_up_port(sw);
if (!up_port) {
tb_sw_info(sw, "no PCIe devices found, aborting\n");
continue;
}
/* check whether port is already activated */
cap = tb_find_cap(up_port, TB_CFG_PORT, TB_CAP_PCIE);
if (cap <= 0)
continue;
if (tb_port_read(up_port, &data, TB_CFG_PORT, cap, 1))
continue;
if (data & 0x80000000) {
tb_port_info(up_port,
"PCIe port already activated, aborting\n");
continue;
}
down_port = tb_find_unused_down_port(tb->root_switch);
if (!down_port) {
tb_port_info(up_port,
"All PCIe down ports are occupied, aborting\n");
continue;
}
tunnel = tb_pci_alloc(tb, up_port, down_port);
if (!tunnel) {
tb_port_info(up_port,
"PCIe tunnel allocation failed, aborting\n");
continue;
}
if (tb_pci_activate(tunnel)) {
tb_port_info(up_port,
"PCIe tunnel activation failed, aborting\n");
tb_pci_free(tunnel);
}
}
}
/* hotplug handling */
struct tb_hotplug_event {
struct work_struct work;
struct tb *tb;
u64 route;
u8 port;
bool unplug;
};
/**
* tb_handle_hotplug() - handle hotplug event
*
* Executes on tb->wq.
*/
static void tb_handle_hotplug(struct work_struct *work)
{
struct tb_hotplug_event *ev = container_of(work, typeof(*ev), work);
struct tb *tb = ev->tb;
struct tb_switch *sw;
struct tb_port *port;
mutex_lock(&tb->lock);
if (!tb->hotplug_active)
goto out; /* during init, suspend or shutdown */
sw = get_switch_at_route(tb->root_switch, ev->route);
if (!sw) {
tb_warn(tb,
"hotplug event from non existent switch %llx:%x (unplug: %d)\n",
ev->route, ev->port, ev->unplug);
goto out;
}
if (ev->port > sw->config.max_port_number) {
tb_warn(tb,
"hotplug event from non existent port %llx:%x (unplug: %d)\n",
ev->route, ev->port, ev->unplug);
goto out;
}
port = &sw->ports[ev->port];
if (tb_is_upstream_port(port)) {
tb_warn(tb,
"hotplug event for upstream port %llx:%x (unplug: %d)\n",
ev->route, ev->port, ev->unplug);
goto out;
}
if (ev->unplug) {
if (port->remote) {
tb_port_info(port, "unplugged\n");
tb_sw_set_unpplugged(port->remote->sw);
tb_free_invalid_tunnels(tb);
tb_switch_free(port->remote->sw);
port->remote = NULL;
} else {
tb_port_info(port,
"got unplug event for disconnected port, ignoring\n");
}
} else if (port->remote) {
tb_port_info(port,
"got plug event for connected port, ignoring\n");
} else {
tb_port_info(port, "hotplug: scanning\n");
tb_scan_port(port);
if (!port->remote) {
tb_port_info(port, "hotplug: no switch found\n");
} else if (port->remote->sw->config.depth > 1) {
tb_sw_warn(port->remote->sw,
"hotplug: chaining not supported\n");
} else {
tb_sw_info(port->remote->sw,
"hotplug: activating pcie devices\n");
tb_activate_pcie_devices(tb);
}
}
out:
mutex_unlock(&tb->lock);
kfree(ev);
}
/**
* tb_schedule_hotplug_handler() - callback function for the control channel
*
* Delegates to tb_handle_hotplug.
*/
static void tb_schedule_hotplug_handler(void *data, u64 route, u8 port,
bool unplug)
{
struct tb *tb = data;
struct tb_hotplug_event *ev = kmalloc(sizeof(*ev), GFP_KERNEL);
if (!ev)
return;
INIT_WORK(&ev->work, tb_handle_hotplug);
ev->tb = tb;
ev->route = route;
ev->port = port;
ev->unplug = unplug;
queue_work(tb->wq, &ev->work);
}
/**
* thunderbolt_shutdown_and_free() - shutdown everything
*
* Free all switches and the config channel.
*
* Used in the error path of thunderbolt_alloc_and_start.
*/
void thunderbolt_shutdown_and_free(struct tb *tb)
{
struct tb_pci_tunnel *tunnel;
struct tb_pci_tunnel *n;
mutex_lock(&tb->lock);
/* tunnels are only present after everything has been initialized */
list_for_each_entry_safe(tunnel, n, &tb->tunnel_list, list) {
tb_pci_deactivate(tunnel);
tb_pci_free(tunnel);
}
if (tb->root_switch)
tb_switch_free(tb->root_switch);
tb->root_switch = NULL;
if (tb->ctl) {
tb_ctl_stop(tb->ctl);
tb_ctl_free(tb->ctl);
}
tb->ctl = NULL;
tb->hotplug_active = false; /* signal tb_handle_hotplug to quit */
/* allow tb_handle_hotplug to acquire the lock */
mutex_unlock(&tb->lock);
if (tb->wq) {
flush_workqueue(tb->wq);
destroy_workqueue(tb->wq);
tb->wq = NULL;
}
mutex_destroy(&tb->lock);
kfree(tb);
}
/**
* thunderbolt_alloc_and_start() - setup the thunderbolt bus
*
* Allocates a tb_cfg control channel, initializes the root switch, enables
* plug events and activates pci devices.
*
* Return: Returns NULL on error.
*/
struct tb *thunderbolt_alloc_and_start(struct tb_nhi *nhi)
{
struct tb *tb;
BUILD_BUG_ON(sizeof(struct tb_regs_switch_header) != 5 * 4);
BUILD_BUG_ON(sizeof(struct tb_regs_port_header) != 8 * 4);
BUILD_BUG_ON(sizeof(struct tb_regs_hop) != 2 * 4);
tb = kzalloc(sizeof(*tb), GFP_KERNEL);
if (!tb)
return NULL;
tb->nhi = nhi;
mutex_init(&tb->lock);
mutex_lock(&tb->lock);
INIT_LIST_HEAD(&tb->tunnel_list);
tb->wq = alloc_ordered_workqueue("thunderbolt", 0);
if (!tb->wq)
goto err_locked;
tb->ctl = tb_ctl_alloc(tb->nhi, tb_schedule_hotplug_handler, tb);
if (!tb->ctl)
goto err_locked;
/*
* tb_schedule_hotplug_handler may be called as soon as the config
* channel is started. Thats why we have to hold the lock here.
*/
tb_ctl_start(tb->ctl);
tb->root_switch = tb_switch_alloc(tb, 0);
if (!tb->root_switch)
goto err_locked;
/* Full scan to discover devices added before the driver was loaded. */
tb_scan_switch(tb->root_switch);
tb_activate_pcie_devices(tb);
/* Allow tb_handle_hotplug to progress events */
tb->hotplug_active = true;
mutex_unlock(&tb->lock);
return tb;
err_locked:
mutex_unlock(&tb->lock);
thunderbolt_shutdown_and_free(tb);
return NULL;
}
void thunderbolt_suspend(struct tb *tb)
{
tb_info(tb, "suspending...\n");
mutex_lock(&tb->lock);
tb_switch_suspend(tb->root_switch);
tb_ctl_stop(tb->ctl);
tb->hotplug_active = false; /* signal tb_handle_hotplug to quit */
mutex_unlock(&tb->lock);
tb_info(tb, "suspend finished\n");
}
void thunderbolt_resume(struct tb *tb)
{
struct tb_pci_tunnel *tunnel, *n;
tb_info(tb, "resuming...\n");
mutex_lock(&tb->lock);
tb_ctl_start(tb->ctl);
/* remove any pci devices the firmware might have setup */
tb_switch_reset(tb, 0);
tb_switch_resume(tb->root_switch);
tb_free_invalid_tunnels(tb);
tb_free_unplugged_children(tb->root_switch);
list_for_each_entry_safe(tunnel, n, &tb->tunnel_list, list)
tb_pci_restart(tunnel);
if (!list_empty(&tb->tunnel_list)) {
/*
* the pcie links need some time to get going.
* 100ms works for me...
*/
tb_info(tb, "tunnels restarted, sleeping for 100ms\n");
msleep(100);
}
/* Allow tb_handle_hotplug to progress events */
tb->hotplug_active = true;
mutex_unlock(&tb->lock);
tb_info(tb, "resume finished\n");
}

271
drivers/thunderbolt/tb.h Normal file
View file

@ -0,0 +1,271 @@
/*
* Thunderbolt Cactus Ridge driver - bus logic (NHI independent)
*
* Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
*/
#ifndef TB_H_
#define TB_H_
#include <linux/pci.h>
#include "tb_regs.h"
#include "ctl.h"
/**
* struct tb_switch - a thunderbolt switch
*/
struct tb_switch {
struct tb_regs_switch_header config;
struct tb_port *ports;
struct tb *tb;
u64 uid;
int cap_plug_events; /* offset, zero if not found */
bool is_unplugged; /* unplugged, will go away */
u8 *drom;
};
/**
* struct tb_port - a thunderbolt port, part of a tb_switch
*/
struct tb_port {
struct tb_regs_port_header config;
struct tb_switch *sw;
struct tb_port *remote; /* remote port, NULL if not connected */
int cap_phy; /* offset, zero if not found */
u8 port; /* port number on switch */
bool disabled; /* disabled by eeprom */
struct tb_port *dual_link_port;
u8 link_nr:1;
};
/**
* struct tb_path_hop - routing information for a tb_path
*
* Hop configuration is always done on the IN port of a switch.
* in_port and out_port have to be on the same switch. Packets arriving on
* in_port with "hop" = in_hop_index will get routed to through out_port. The
* next hop to take (on out_port->remote) is determined by next_hop_index.
*
* in_counter_index is the index of a counter (in TB_CFG_COUNTERS) on the in
* port.
*/
struct tb_path_hop {
struct tb_port *in_port;
struct tb_port *out_port;
int in_hop_index;
int in_counter_index; /* write -1 to disable counters for this hop. */
int next_hop_index;
};
/**
* enum tb_path_port - path options mask
*/
enum tb_path_port {
TB_PATH_NONE = 0,
TB_PATH_SOURCE = 1, /* activate on the first hop (out of src) */
TB_PATH_INTERNAL = 2, /* activate on other hops (not the first/last) */
TB_PATH_DESTINATION = 4, /* activate on the last hop (into dst) */
TB_PATH_ALL = 7,
};
/**
* struct tb_path - a unidirectional path between two ports
*
* A path consists of a number of hops (see tb_path_hop). To establish a PCIe
* tunnel two paths have to be created between the two PCIe ports.
*
*/
struct tb_path {
struct tb *tb;
int nfc_credits; /* non flow controlled credits */
enum tb_path_port ingress_shared_buffer;
enum tb_path_port egress_shared_buffer;
enum tb_path_port ingress_fc_enable;
enum tb_path_port egress_fc_enable;
int priority:3;
int weight:4;
bool drop_packages;
bool activated;
struct tb_path_hop *hops;
int path_length; /* number of hops */
};
/**
* struct tb - main thunderbolt bus structure
*/
struct tb {
struct mutex lock; /*
* Big lock. Must be held when accessing cfg or
* any struct tb_switch / struct tb_port.
*/
struct tb_nhi *nhi;
struct tb_ctl *ctl;
struct workqueue_struct *wq; /* ordered workqueue for plug events */
struct tb_switch *root_switch;
struct list_head tunnel_list; /* list of active PCIe tunnels */
bool hotplug_active; /*
* tb_handle_hotplug will stop progressing plug
* events and exit if this is not set (it needs to
* acquire the lock one more time). Used to drain
* wq after cfg has been paused.
*/
};
/* helper functions & macros */
/**
* tb_upstream_port() - return the upstream port of a switch
*
* Every switch has an upstream port (for the root switch it is the NHI).
*
* During switch alloc/init tb_upstream_port()->remote may be NULL, even for
* non root switches (on the NHI port remote is always NULL).
*
* Return: Returns the upstream port of the switch.
*/
static inline struct tb_port *tb_upstream_port(struct tb_switch *sw)
{
return &sw->ports[sw->config.upstream_port_number];
}
static inline u64 tb_route(struct tb_switch *sw)
{
return ((u64) sw->config.route_hi) << 32 | sw->config.route_lo;
}
static inline int tb_sw_read(struct tb_switch *sw, void *buffer,
enum tb_cfg_space space, u32 offset, u32 length)
{
return tb_cfg_read(sw->tb->ctl,
buffer,
tb_route(sw),
0,
space,
offset,
length);
}
static inline int tb_sw_write(struct tb_switch *sw, void *buffer,
enum tb_cfg_space space, u32 offset, u32 length)
{
return tb_cfg_write(sw->tb->ctl,
buffer,
tb_route(sw),
0,
space,
offset,
length);
}
static inline int tb_port_read(struct tb_port *port, void *buffer,
enum tb_cfg_space space, u32 offset, u32 length)
{
return tb_cfg_read(port->sw->tb->ctl,
buffer,
tb_route(port->sw),
port->port,
space,
offset,
length);
}
static inline int tb_port_write(struct tb_port *port, void *buffer,
enum tb_cfg_space space, u32 offset, u32 length)
{
return tb_cfg_write(port->sw->tb->ctl,
buffer,
tb_route(port->sw),
port->port,
space,
offset,
length);
}
#define tb_err(tb, fmt, arg...) dev_err(&(tb)->nhi->pdev->dev, fmt, ## arg)
#define tb_WARN(tb, fmt, arg...) dev_WARN(&(tb)->nhi->pdev->dev, fmt, ## arg)
#define tb_warn(tb, fmt, arg...) dev_warn(&(tb)->nhi->pdev->dev, fmt, ## arg)
#define tb_info(tb, fmt, arg...) dev_info(&(tb)->nhi->pdev->dev, fmt, ## arg)
#define __TB_SW_PRINT(level, sw, fmt, arg...) \
do { \
struct tb_switch *__sw = (sw); \
level(__sw->tb, "%llx: " fmt, \
tb_route(__sw), ## arg); \
} while (0)
#define tb_sw_WARN(sw, fmt, arg...) __TB_SW_PRINT(tb_WARN, sw, fmt, ##arg)
#define tb_sw_warn(sw, fmt, arg...) __TB_SW_PRINT(tb_warn, sw, fmt, ##arg)
#define tb_sw_info(sw, fmt, arg...) __TB_SW_PRINT(tb_info, sw, fmt, ##arg)
#define __TB_PORT_PRINT(level, _port, fmt, arg...) \
do { \
struct tb_port *__port = (_port); \
level(__port->sw->tb, "%llx:%x: " fmt, \
tb_route(__port->sw), __port->port, ## arg); \
} while (0)
#define tb_port_WARN(port, fmt, arg...) \
__TB_PORT_PRINT(tb_WARN, port, fmt, ##arg)
#define tb_port_warn(port, fmt, arg...) \
__TB_PORT_PRINT(tb_warn, port, fmt, ##arg)
#define tb_port_info(port, fmt, arg...) \
__TB_PORT_PRINT(tb_info, port, fmt, ##arg)
struct tb *thunderbolt_alloc_and_start(struct tb_nhi *nhi);
void thunderbolt_shutdown_and_free(struct tb *tb);
void thunderbolt_suspend(struct tb *tb);
void thunderbolt_resume(struct tb *tb);
struct tb_switch *tb_switch_alloc(struct tb *tb, u64 route);
void tb_switch_free(struct tb_switch *sw);
void tb_switch_suspend(struct tb_switch *sw);
int tb_switch_resume(struct tb_switch *sw);
int tb_switch_reset(struct tb *tb, u64 route);
void tb_sw_set_unpplugged(struct tb_switch *sw);
struct tb_switch *get_switch_at_route(struct tb_switch *sw, u64 route);
int tb_wait_for_port(struct tb_port *port, bool wait_if_unplugged);
int tb_port_add_nfc_credits(struct tb_port *port, int credits);
int tb_port_clear_counter(struct tb_port *port, int counter);
int tb_find_cap(struct tb_port *port, enum tb_cfg_space space, enum tb_cap cap);
struct tb_path *tb_path_alloc(struct tb *tb, int num_hops);
void tb_path_free(struct tb_path *path);
int tb_path_activate(struct tb_path *path);
void tb_path_deactivate(struct tb_path *path);
bool tb_path_is_invalid(struct tb_path *path);
int tb_drom_read(struct tb_switch *sw);
int tb_drom_read_uid_only(struct tb_switch *sw, u64 *uid);
static inline int tb_route_length(u64 route)
{
return (fls64(route) + TB_ROUTE_SHIFT - 1) / TB_ROUTE_SHIFT;
}
static inline bool tb_is_upstream_port(struct tb_port *port)
{
return port == tb_upstream_port(port->sw);
}
/**
* tb_downstream_route() - get route to downstream switch
*
* Port must not be the upstream port (otherwise a loop is created).
*
* Return: Returns a route to the switch behind @port.
*/
static inline u64 tb_downstream_route(struct tb_port *port)
{
return tb_route(port->sw)
| ((u64) port->port << (port->sw->config.depth * 8));
}
#endif

View file

@ -0,0 +1,213 @@
/*
* Thunderbolt Cactus Ridge driver - Port/Switch config area registers
*
* Every thunderbolt device consists (logically) of a switch with multiple
* ports. Every port contains up to four config regions (HOPS, PORT, SWITCH,
* COUNTERS) which are used to configure the device.
*
* Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
*/
#ifndef _TB_REGS
#define _TB_REGS
#include <linux/types.h>
#define TB_ROUTE_SHIFT 8 /* number of bits in a port entry of a route */
/*
* TODO: should be 63? But we do not know how to receive frames larger than 256
* bytes at the frame level. (header + checksum = 16, 60*4 = 240)
*/
#define TB_MAX_CONFIG_RW_LENGTH 60
enum tb_cap {
TB_CAP_PHY = 0x0001,
TB_CAP_TIME1 = 0x0003,
TB_CAP_PCIE = 0x0004,
TB_CAP_I2C = 0x0005,
TB_CAP_PLUG_EVENTS = 0x0105, /* also EEPROM */
TB_CAP_TIME2 = 0x0305,
TB_CAL_IECS = 0x0405,
TB_CAP_LINK_CONTROLLER = 0x0605, /* also IECS */
};
enum tb_port_state {
TB_PORT_DISABLED = 0, /* tb_cap_phy.disable == 1 */
TB_PORT_CONNECTING = 1, /* retry */
TB_PORT_UP = 2,
TB_PORT_UNPLUGGED = 7,
};
/* capability headers */
struct tb_cap_basic {
u8 next;
/* enum tb_cap cap:8; prevent "narrower than values of its type" */
u8 cap; /* if cap == 0x05 then we have a extended capability */
} __packed;
struct tb_cap_extended_short {
u8 next; /* if next and length are zero then we have a long cap */
enum tb_cap cap:16;
u8 length;
} __packed;
struct tb_cap_extended_long {
u8 zero1;
enum tb_cap cap:16;
u8 zero2;
u16 next;
u16 length;
} __packed;
/* capabilities */
struct tb_cap_link_controller {
struct tb_cap_extended_long cap_header;
u32 count:4; /* number of link controllers */
u32 unknown1:4;
u32 base_offset:8; /*
* offset (into this capability) of the configuration
* area of the first link controller
*/
u32 length:12; /* link controller configuration area length */
u32 unknown2:4; /* TODO check that length is correct */
} __packed;
struct tb_cap_phy {
struct tb_cap_basic cap_header;
u32 unknown1:16;
u32 unknown2:14;
bool disable:1;
u32 unknown3:11;
enum tb_port_state state:4;
u32 unknown4:2;
} __packed;
struct tb_eeprom_ctl {
bool clock:1; /* send pulse to transfer one bit */
bool access_low:1; /* set to 0 before access */
bool data_out:1; /* to eeprom */
bool data_in:1; /* from eeprom */
bool access_high:1; /* set to 1 before access */
bool not_present:1; /* should be 0 */
bool unknown1:1;
bool present:1; /* should be 1 */
u32 unknown2:24;
} __packed;
struct tb_cap_plug_events {
struct tb_cap_extended_short cap_header;
u32 __unknown1:2;
u32 plug_events:5;
u32 __unknown2:25;
u32 __unknown3;
u32 __unknown4;
struct tb_eeprom_ctl eeprom_ctl;
u32 __unknown5[7];
u32 drom_offset; /* 32 bit register, but eeprom addresses are 16 bit */
} __packed;
/* device headers */
/* Present on port 0 in TB_CFG_SWITCH at address zero. */
struct tb_regs_switch_header {
/* DWORD 0 */
u16 vendor_id;
u16 device_id;
/* DWORD 1 */
u32 first_cap_offset:8;
u32 upstream_port_number:6;
u32 max_port_number:6;
u32 depth:3;
u32 __unknown1:1;
u32 revision:8;
/* DWORD 2 */
u32 route_lo;
/* DWORD 3 */
u32 route_hi:31;
bool enabled:1;
/* DWORD 4 */
u32 plug_events_delay:8; /*
* RW, pause between plug events in
* milliseconds. Writing 0x00 is interpreted
* as 255ms.
*/
u32 __unknown4:16;
u32 thunderbolt_version:8;
} __packed;
enum tb_port_type {
TB_TYPE_INACTIVE = 0x000000,
TB_TYPE_PORT = 0x000001,
TB_TYPE_NHI = 0x000002,
/* TB_TYPE_ETHERNET = 0x020000, lower order bits are not known */
/* TB_TYPE_SATA = 0x080000, lower order bits are not known */
TB_TYPE_DP_HDMI_IN = 0x0e0101,
TB_TYPE_DP_HDMI_OUT = 0x0e0102,
TB_TYPE_PCIE_DOWN = 0x100101,
TB_TYPE_PCIE_UP = 0x100102,
/* TB_TYPE_USB = 0x200000, lower order bits are not known */
};
/* Present on every port in TB_CF_PORT at address zero. */
struct tb_regs_port_header {
/* DWORD 0 */
u16 vendor_id;
u16 device_id;
/* DWORD 1 */
u32 first_cap_offset:8;
u32 max_counters:11;
u32 __unknown1:5;
u32 revision:8;
/* DWORD 2 */
enum tb_port_type type:24;
u32 thunderbolt_version:8;
/* DWORD 3 */
u32 __unknown2:20;
u32 port_number:6;
u32 __unknown3:6;
/* DWORD 4 */
u32 nfc_credits;
/* DWORD 5 */
u32 max_in_hop_id:11;
u32 max_out_hop_id:11;
u32 __unkown4:10;
/* DWORD 6 */
u32 __unknown5;
/* DWORD 7 */
u32 __unknown6;
} __packed;
/* Hop register from TB_CFG_HOPS. 8 byte per entry. */
struct tb_regs_hop {
/* DWORD 0 */
u32 next_hop:11; /*
* hop to take after sending the packet through
* out_port (on the incoming port of the next switch)
*/
u32 out_port:6; /* next port of the path (on the same switch) */
u32 initial_credits:8;
u32 unknown1:6; /* set to zero */
bool enable:1;
/* DWORD 1 */
u32 weight:4;
u32 unknown2:4; /* set to zero */
u32 priority:3;
bool drop_packages:1;
u32 counter:11; /* index into TB_CFG_COUNTERS on this port */
bool counter_enable:1;
bool ingress_fc:1;
bool egress_fc:1;
bool ingress_shared_buffer:1;
bool egress_shared_buffer:1;
u32 unknown3:4; /* set to zero */
} __packed;
#endif

View file

@ -0,0 +1,232 @@
/*
* Thunderbolt Cactus Ridge driver - PCIe tunnel
*
* Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
*/
#include <linux/slab.h>
#include <linux/list.h>
#include "tunnel_pci.h"
#include "tb.h"
#define __TB_TUNNEL_PRINT(level, tunnel, fmt, arg...) \
do { \
struct tb_pci_tunnel *__tunnel = (tunnel); \
level(__tunnel->tb, "%llx:%x <-> %llx:%x (PCI): " fmt, \
tb_route(__tunnel->down_port->sw), \
__tunnel->down_port->port, \
tb_route(__tunnel->up_port->sw), \
__tunnel->up_port->port, \
## arg); \
} while (0)
#define tb_tunnel_WARN(tunnel, fmt, arg...) \
__TB_TUNNEL_PRINT(tb_WARN, tunnel, fmt, ##arg)
#define tb_tunnel_warn(tunnel, fmt, arg...) \
__TB_TUNNEL_PRINT(tb_warn, tunnel, fmt, ##arg)
#define tb_tunnel_info(tunnel, fmt, arg...) \
__TB_TUNNEL_PRINT(tb_info, tunnel, fmt, ##arg)
static void tb_pci_init_path(struct tb_path *path)
{
path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL;
path->egress_shared_buffer = TB_PATH_NONE;
path->ingress_fc_enable = TB_PATH_ALL;
path->ingress_shared_buffer = TB_PATH_NONE;
path->priority = 3;
path->weight = 1;
path->drop_packages = 0;
path->nfc_credits = 0;
}
/**
* tb_pci_alloc() - allocate a pci tunnel
*
* Allocate a PCI tunnel. The ports must be of type TB_TYPE_PCIE_UP and
* TB_TYPE_PCIE_DOWN.
*
* Currently only paths consisting of two hops are supported (that is the
* ports must be on "adjacent" switches).
*
* The paths are hard-coded to use hop 8 (the only working hop id available on
* my thunderbolt devices). Therefore at most ONE path per device may be
* activated.
*
* Return: Returns a tb_pci_tunnel on success or NULL on failure.
*/
struct tb_pci_tunnel *tb_pci_alloc(struct tb *tb, struct tb_port *up,
struct tb_port *down)
{
struct tb_pci_tunnel *tunnel = kzalloc(sizeof(*tunnel), GFP_KERNEL);
if (!tunnel)
goto err;
tunnel->tb = tb;
tunnel->down_port = down;
tunnel->up_port = up;
INIT_LIST_HEAD(&tunnel->list);
tunnel->path_to_up = tb_path_alloc(up->sw->tb, 2);
if (!tunnel->path_to_up)
goto err;
tunnel->path_to_down = tb_path_alloc(up->sw->tb, 2);
if (!tunnel->path_to_down)
goto err;
tb_pci_init_path(tunnel->path_to_up);
tb_pci_init_path(tunnel->path_to_down);
tunnel->path_to_up->hops[0].in_port = down;
tunnel->path_to_up->hops[0].in_hop_index = 8;
tunnel->path_to_up->hops[0].in_counter_index = -1;
tunnel->path_to_up->hops[0].out_port = tb_upstream_port(up->sw)->remote;
tunnel->path_to_up->hops[0].next_hop_index = 8;
tunnel->path_to_up->hops[1].in_port = tb_upstream_port(up->sw);
tunnel->path_to_up->hops[1].in_hop_index = 8;
tunnel->path_to_up->hops[1].in_counter_index = -1;
tunnel->path_to_up->hops[1].out_port = up;
tunnel->path_to_up->hops[1].next_hop_index = 8;
tunnel->path_to_down->hops[0].in_port = up;
tunnel->path_to_down->hops[0].in_hop_index = 8;
tunnel->path_to_down->hops[0].in_counter_index = -1;
tunnel->path_to_down->hops[0].out_port = tb_upstream_port(up->sw);
tunnel->path_to_down->hops[0].next_hop_index = 8;
tunnel->path_to_down->hops[1].in_port =
tb_upstream_port(up->sw)->remote;
tunnel->path_to_down->hops[1].in_hop_index = 8;
tunnel->path_to_down->hops[1].in_counter_index = -1;
tunnel->path_to_down->hops[1].out_port = down;
tunnel->path_to_down->hops[1].next_hop_index = 8;
return tunnel;
err:
if (tunnel) {
if (tunnel->path_to_down)
tb_path_free(tunnel->path_to_down);
if (tunnel->path_to_up)
tb_path_free(tunnel->path_to_up);
kfree(tunnel);
}
return NULL;
}
/**
* tb_pci_free() - free a tunnel
*
* The tunnel must have been deactivated.
*/
void tb_pci_free(struct tb_pci_tunnel *tunnel)
{
if (tunnel->path_to_up->activated || tunnel->path_to_down->activated) {
tb_tunnel_WARN(tunnel, "trying to free an activated tunnel\n");
return;
}
tb_path_free(tunnel->path_to_up);
tb_path_free(tunnel->path_to_down);
kfree(tunnel);
}
/**
* tb_pci_is_invalid - check whether an activated path is still valid
*/
bool tb_pci_is_invalid(struct tb_pci_tunnel *tunnel)
{
WARN_ON(!tunnel->path_to_up->activated);
WARN_ON(!tunnel->path_to_down->activated);
return tb_path_is_invalid(tunnel->path_to_up)
|| tb_path_is_invalid(tunnel->path_to_down);
}
/**
* tb_pci_port_active() - activate/deactivate PCI capability
*
* Return: Returns 0 on success or an error code on failure.
*/
static int tb_pci_port_active(struct tb_port *port, bool active)
{
u32 word = active ? 0x80000000 : 0x0;
int cap = tb_find_cap(port, TB_CFG_PORT, TB_CAP_PCIE);
if (cap <= 0) {
tb_port_warn(port, "TB_CAP_PCIE not found: %d\n", cap);
return cap ? cap : -ENXIO;
}
return tb_port_write(port, &word, TB_CFG_PORT, cap, 1);
}
/**
* tb_pci_restart() - activate a tunnel after a hardware reset
*/
int tb_pci_restart(struct tb_pci_tunnel *tunnel)
{
int res;
tunnel->path_to_up->activated = false;
tunnel->path_to_down->activated = false;
tb_tunnel_info(tunnel, "activating\n");
res = tb_path_activate(tunnel->path_to_up);
if (res)
goto err;
res = tb_path_activate(tunnel->path_to_down);
if (res)
goto err;
res = tb_pci_port_active(tunnel->down_port, true);
if (res)
goto err;
res = tb_pci_port_active(tunnel->up_port, true);
if (res)
goto err;
return 0;
err:
tb_tunnel_warn(tunnel, "activation failed\n");
tb_pci_deactivate(tunnel);
return res;
}
/**
* tb_pci_activate() - activate a tunnel
*
* Return: Returns 0 on success or an error code on failure.
*/
int tb_pci_activate(struct tb_pci_tunnel *tunnel)
{
int res;
if (tunnel->path_to_up->activated || tunnel->path_to_down->activated) {
tb_tunnel_WARN(tunnel,
"trying to activate an already activated tunnel\n");
return -EINVAL;
}
res = tb_pci_restart(tunnel);
if (res)
return res;
list_add(&tunnel->list, &tunnel->tb->tunnel_list);
return 0;
}
/**
* tb_pci_deactivate() - deactivate a tunnel
*/
void tb_pci_deactivate(struct tb_pci_tunnel *tunnel)
{
tb_tunnel_info(tunnel, "deactivating\n");
/*
* TODO: enable reset by writing 0x04000000 to TB_CAP_PCIE + 1 on up
* port. Seems to have no effect?
*/
tb_pci_port_active(tunnel->up_port, false);
tb_pci_port_active(tunnel->down_port, false);
if (tunnel->path_to_down->activated)
tb_path_deactivate(tunnel->path_to_down);
if (tunnel->path_to_up->activated)
tb_path_deactivate(tunnel->path_to_up);
list_del_init(&tunnel->list);
}

View file

@ -0,0 +1,30 @@
/*
* Thunderbolt Cactus Ridge driver - PCIe tunnel
*
* Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
*/
#ifndef TB_PCI_H_
#define TB_PCI_H_
#include "tb.h"
struct tb_pci_tunnel {
struct tb *tb;
struct tb_port *up_port;
struct tb_port *down_port;
struct tb_path *path_to_up;
struct tb_path *path_to_down;
struct list_head list;
};
struct tb_pci_tunnel *tb_pci_alloc(struct tb *tb, struct tb_port *up,
struct tb_port *down);
void tb_pci_free(struct tb_pci_tunnel *tunnel);
int tb_pci_activate(struct tb_pci_tunnel *tunnel);
int tb_pci_restart(struct tb_pci_tunnel *tunnel);
void tb_pci_deactivate(struct tb_pci_tunnel *tunnel);
bool tb_pci_is_invalid(struct tb_pci_tunnel *tunnel);
#endif