mirror of
https://github.com/AetherDroid/android_kernel_samsung_on5xelte.git
synced 2025-09-08 09:08:05 -04:00
Fixed MTP to work with TWRP
This commit is contained in:
commit
f6dfaef42e
50820 changed files with 20846062 additions and 0 deletions
56
drivers/net/caif/Kconfig
Normal file
56
drivers/net/caif/Kconfig
Normal file
|
@ -0,0 +1,56 @@
|
|||
#
|
||||
# CAIF physical drivers
|
||||
#
|
||||
|
||||
comment "CAIF transport drivers"
|
||||
|
||||
config CAIF_TTY
|
||||
tristate "CAIF TTY transport driver"
|
||||
depends on CAIF && TTY
|
||||
default n
|
||||
---help---
|
||||
The CAIF TTY transport driver is a Line Discipline (ldisc)
|
||||
identified as N_CAIF. When this ldisc is opened from user space
|
||||
it will redirect the TTY's traffic into the CAIF stack.
|
||||
|
||||
config CAIF_SPI_SLAVE
|
||||
tristate "CAIF SPI transport driver for slave interface"
|
||||
depends on CAIF && HAS_DMA
|
||||
default n
|
||||
---help---
|
||||
The CAIF Link layer SPI Protocol driver for Slave SPI interface.
|
||||
This driver implements a platform driver to accommodate for a
|
||||
platform specific SPI device. A sample CAIF SPI Platform device is
|
||||
provided in Documentation/networking/caif/spi_porting.txt
|
||||
|
||||
config CAIF_SPI_SYNC
|
||||
bool "Next command and length in start of frame"
|
||||
depends on CAIF_SPI_SLAVE
|
||||
default n
|
||||
---help---
|
||||
Putting the next command and length in the start of the frame can
|
||||
help to synchronize to the next transfer in case of over or under-runs.
|
||||
This option also needs to be enabled on the modem.
|
||||
|
||||
config CAIF_HSI
|
||||
tristate "CAIF HSI transport driver"
|
||||
depends on CAIF
|
||||
default n
|
||||
---help---
|
||||
The caif low level driver for CAIF over HSI.
|
||||
Be aware that if you enable this then you also need to
|
||||
enable a low-level HSI driver.
|
||||
|
||||
config CAIF_VIRTIO
|
||||
tristate "CAIF virtio transport driver"
|
||||
depends on CAIF && HAS_DMA
|
||||
select VHOST_RING
|
||||
select VIRTIO
|
||||
select GENERIC_ALLOCATOR
|
||||
default n
|
||||
---help---
|
||||
The caif driver for CAIF over Virtio.
|
||||
|
||||
if CAIF_VIRTIO
|
||||
source "drivers/vhost/Kconfig"
|
||||
endif
|
14
drivers/net/caif/Makefile
Normal file
14
drivers/net/caif/Makefile
Normal file
|
@ -0,0 +1,14 @@
|
|||
ccflags-$(CONFIG_CAIF_DEBUG) := -DDEBUG
|
||||
|
||||
# Serial interface
|
||||
obj-$(CONFIG_CAIF_TTY) += caif_serial.o
|
||||
|
||||
# SPI slave physical interfaces module
|
||||
cfspi_slave-objs := caif_spi.o caif_spi_slave.o
|
||||
obj-$(CONFIG_CAIF_SPI_SLAVE) += cfspi_slave.o
|
||||
|
||||
# HSI interface
|
||||
obj-$(CONFIG_CAIF_HSI) += caif_hsi.o
|
||||
|
||||
# Virtio interface
|
||||
obj-$(CONFIG_CAIF_VIRTIO) += caif_virtio.o
|
1482
drivers/net/caif/caif_hsi.c
Normal file
1482
drivers/net/caif/caif_hsi.c
Normal file
File diff suppressed because it is too large
Load diff
475
drivers/net/caif/caif_serial.c
Normal file
475
drivers/net/caif/caif_serial.c
Normal file
|
@ -0,0 +1,475 @@
|
|||
/*
|
||||
* Copyright (C) ST-Ericsson AB 2010
|
||||
* Author: Sjur Brendeland
|
||||
* License terms: GNU General Public License (GPL) version 2
|
||||
*/
|
||||
|
||||
#include <linux/hardirq.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/skbuff.h>
|
||||
#include <linux/netdevice.h>
|
||||
#include <linux/rtnetlink.h>
|
||||
#include <linux/tty.h>
|
||||
#include <linux/file.h>
|
||||
#include <linux/if_arp.h>
|
||||
#include <net/caif/caif_device.h>
|
||||
#include <net/caif/cfcnfg.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/debugfs.h>
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_AUTHOR("Sjur Brendeland");
|
||||
MODULE_DESCRIPTION("CAIF serial device TTY line discipline");
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_ALIAS_LDISC(N_CAIF);
|
||||
|
||||
#define SEND_QUEUE_LOW 10
|
||||
#define SEND_QUEUE_HIGH 100
|
||||
#define CAIF_SENDING 1 /* Bit 1 = 0x02*/
|
||||
#define CAIF_FLOW_OFF_SENT 4 /* Bit 4 = 0x10 */
|
||||
#define MAX_WRITE_CHUNK 4096
|
||||
#define ON 1
|
||||
#define OFF 0
|
||||
#define CAIF_MAX_MTU 4096
|
||||
|
||||
static DEFINE_SPINLOCK(ser_lock);
|
||||
static LIST_HEAD(ser_list);
|
||||
static LIST_HEAD(ser_release_list);
|
||||
|
||||
static bool ser_loop;
|
||||
module_param(ser_loop, bool, S_IRUGO);
|
||||
MODULE_PARM_DESC(ser_loop, "Run in simulated loopback mode.");
|
||||
|
||||
static bool ser_use_stx = true;
|
||||
module_param(ser_use_stx, bool, S_IRUGO);
|
||||
MODULE_PARM_DESC(ser_use_stx, "STX enabled or not.");
|
||||
|
||||
static bool ser_use_fcs = true;
|
||||
|
||||
module_param(ser_use_fcs, bool, S_IRUGO);
|
||||
MODULE_PARM_DESC(ser_use_fcs, "FCS enabled or not.");
|
||||
|
||||
static int ser_write_chunk = MAX_WRITE_CHUNK;
|
||||
module_param(ser_write_chunk, int, S_IRUGO);
|
||||
|
||||
MODULE_PARM_DESC(ser_write_chunk, "Maximum size of data written to UART.");
|
||||
|
||||
static struct dentry *debugfsdir;
|
||||
|
||||
static int caif_net_open(struct net_device *dev);
|
||||
static int caif_net_close(struct net_device *dev);
|
||||
|
||||
struct ser_device {
|
||||
struct caif_dev_common common;
|
||||
struct list_head node;
|
||||
struct net_device *dev;
|
||||
struct sk_buff_head head;
|
||||
struct tty_struct *tty;
|
||||
bool tx_started;
|
||||
unsigned long state;
|
||||
char *tty_name;
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
struct dentry *debugfs_tty_dir;
|
||||
struct debugfs_blob_wrapper tx_blob;
|
||||
struct debugfs_blob_wrapper rx_blob;
|
||||
u8 rx_data[128];
|
||||
u8 tx_data[128];
|
||||
u8 tty_status;
|
||||
|
||||
#endif
|
||||
};
|
||||
|
||||
static void caifdev_setup(struct net_device *dev);
|
||||
static void ldisc_tx_wakeup(struct tty_struct *tty);
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
static inline void update_tty_status(struct ser_device *ser)
|
||||
{
|
||||
ser->tty_status =
|
||||
ser->tty->stopped << 5 |
|
||||
ser->tty->flow_stopped << 3 |
|
||||
ser->tty->packet << 2 |
|
||||
ser->tty->port->low_latency << 1;
|
||||
}
|
||||
static inline void debugfs_init(struct ser_device *ser, struct tty_struct *tty)
|
||||
{
|
||||
ser->debugfs_tty_dir =
|
||||
debugfs_create_dir(tty->name, debugfsdir);
|
||||
if (!IS_ERR(ser->debugfs_tty_dir)) {
|
||||
debugfs_create_blob("last_tx_msg", S_IRUSR,
|
||||
ser->debugfs_tty_dir,
|
||||
&ser->tx_blob);
|
||||
|
||||
debugfs_create_blob("last_rx_msg", S_IRUSR,
|
||||
ser->debugfs_tty_dir,
|
||||
&ser->rx_blob);
|
||||
|
||||
debugfs_create_x32("ser_state", S_IRUSR,
|
||||
ser->debugfs_tty_dir,
|
||||
(u32 *)&ser->state);
|
||||
|
||||
debugfs_create_x8("tty_status", S_IRUSR,
|
||||
ser->debugfs_tty_dir,
|
||||
&ser->tty_status);
|
||||
|
||||
}
|
||||
ser->tx_blob.data = ser->tx_data;
|
||||
ser->tx_blob.size = 0;
|
||||
ser->rx_blob.data = ser->rx_data;
|
||||
ser->rx_blob.size = 0;
|
||||
}
|
||||
|
||||
static inline void debugfs_deinit(struct ser_device *ser)
|
||||
{
|
||||
debugfs_remove_recursive(ser->debugfs_tty_dir);
|
||||
}
|
||||
|
||||
static inline void debugfs_rx(struct ser_device *ser, const u8 *data, int size)
|
||||
{
|
||||
if (size > sizeof(ser->rx_data))
|
||||
size = sizeof(ser->rx_data);
|
||||
memcpy(ser->rx_data, data, size);
|
||||
ser->rx_blob.data = ser->rx_data;
|
||||
ser->rx_blob.size = size;
|
||||
}
|
||||
|
||||
static inline void debugfs_tx(struct ser_device *ser, const u8 *data, int size)
|
||||
{
|
||||
if (size > sizeof(ser->tx_data))
|
||||
size = sizeof(ser->tx_data);
|
||||
memcpy(ser->tx_data, data, size);
|
||||
ser->tx_blob.data = ser->tx_data;
|
||||
ser->tx_blob.size = size;
|
||||
}
|
||||
#else
|
||||
static inline void debugfs_init(struct ser_device *ser, struct tty_struct *tty)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void debugfs_deinit(struct ser_device *ser)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void update_tty_status(struct ser_device *ser)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void debugfs_rx(struct ser_device *ser, const u8 *data, int size)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void debugfs_tx(struct ser_device *ser, const u8 *data, int size)
|
||||
{
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
static void ldisc_receive(struct tty_struct *tty, const u8 *data,
|
||||
char *flags, int count)
|
||||
{
|
||||
struct sk_buff *skb = NULL;
|
||||
struct ser_device *ser;
|
||||
int ret;
|
||||
u8 *p;
|
||||
|
||||
ser = tty->disc_data;
|
||||
|
||||
/*
|
||||
* NOTE: flags may contain information about break or overrun.
|
||||
* This is not yet handled.
|
||||
*/
|
||||
|
||||
|
||||
/*
|
||||
* Workaround for garbage at start of transmission,
|
||||
* only enable if STX handling is not enabled.
|
||||
*/
|
||||
if (!ser->common.use_stx && !ser->tx_started) {
|
||||
dev_info(&ser->dev->dev,
|
||||
"Bytes received before initial transmission -"
|
||||
"bytes discarded.\n");
|
||||
return;
|
||||
}
|
||||
|
||||
BUG_ON(ser->dev == NULL);
|
||||
|
||||
/* Get a suitable caif packet and copy in data. */
|
||||
skb = netdev_alloc_skb(ser->dev, count+1);
|
||||
if (skb == NULL)
|
||||
return;
|
||||
p = skb_put(skb, count);
|
||||
memcpy(p, data, count);
|
||||
|
||||
skb->protocol = htons(ETH_P_CAIF);
|
||||
skb_reset_mac_header(skb);
|
||||
debugfs_rx(ser, data, count);
|
||||
/* Push received packet up the stack. */
|
||||
ret = netif_rx_ni(skb);
|
||||
if (!ret) {
|
||||
ser->dev->stats.rx_packets++;
|
||||
ser->dev->stats.rx_bytes += count;
|
||||
} else
|
||||
++ser->dev->stats.rx_dropped;
|
||||
update_tty_status(ser);
|
||||
}
|
||||
|
||||
static int handle_tx(struct ser_device *ser)
|
||||
{
|
||||
struct tty_struct *tty;
|
||||
struct sk_buff *skb;
|
||||
int tty_wr, len, room;
|
||||
|
||||
tty = ser->tty;
|
||||
ser->tx_started = true;
|
||||
|
||||
/* Enter critical section */
|
||||
if (test_and_set_bit(CAIF_SENDING, &ser->state))
|
||||
return 0;
|
||||
|
||||
/* skb_peek is safe because handle_tx is called after skb_queue_tail */
|
||||
while ((skb = skb_peek(&ser->head)) != NULL) {
|
||||
|
||||
/* Make sure you don't write too much */
|
||||
len = skb->len;
|
||||
room = tty_write_room(tty);
|
||||
if (!room)
|
||||
break;
|
||||
if (room > ser_write_chunk)
|
||||
room = ser_write_chunk;
|
||||
if (len > room)
|
||||
len = room;
|
||||
|
||||
/* Write to tty or loopback */
|
||||
if (!ser_loop) {
|
||||
tty_wr = tty->ops->write(tty, skb->data, len);
|
||||
update_tty_status(ser);
|
||||
} else {
|
||||
tty_wr = len;
|
||||
ldisc_receive(tty, skb->data, NULL, len);
|
||||
}
|
||||
ser->dev->stats.tx_packets++;
|
||||
ser->dev->stats.tx_bytes += tty_wr;
|
||||
|
||||
/* Error on TTY ?! */
|
||||
if (tty_wr < 0)
|
||||
goto error;
|
||||
/* Reduce buffer written, and discard if empty */
|
||||
skb_pull(skb, tty_wr);
|
||||
if (skb->len == 0) {
|
||||
struct sk_buff *tmp = skb_dequeue(&ser->head);
|
||||
WARN_ON(tmp != skb);
|
||||
if (in_interrupt())
|
||||
dev_kfree_skb_irq(skb);
|
||||
else
|
||||
kfree_skb(skb);
|
||||
}
|
||||
}
|
||||
/* Send flow off if queue is empty */
|
||||
if (ser->head.qlen <= SEND_QUEUE_LOW &&
|
||||
test_and_clear_bit(CAIF_FLOW_OFF_SENT, &ser->state) &&
|
||||
ser->common.flowctrl != NULL)
|
||||
ser->common.flowctrl(ser->dev, ON);
|
||||
clear_bit(CAIF_SENDING, &ser->state);
|
||||
return 0;
|
||||
error:
|
||||
clear_bit(CAIF_SENDING, &ser->state);
|
||||
return tty_wr;
|
||||
}
|
||||
|
||||
static int caif_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
{
|
||||
struct ser_device *ser;
|
||||
|
||||
BUG_ON(dev == NULL);
|
||||
ser = netdev_priv(dev);
|
||||
|
||||
/* Send flow off once, on high water mark */
|
||||
if (ser->head.qlen > SEND_QUEUE_HIGH &&
|
||||
!test_and_set_bit(CAIF_FLOW_OFF_SENT, &ser->state) &&
|
||||
ser->common.flowctrl != NULL)
|
||||
|
||||
ser->common.flowctrl(ser->dev, OFF);
|
||||
|
||||
skb_queue_tail(&ser->head, skb);
|
||||
return handle_tx(ser);
|
||||
}
|
||||
|
||||
|
||||
static void ldisc_tx_wakeup(struct tty_struct *tty)
|
||||
{
|
||||
struct ser_device *ser;
|
||||
|
||||
ser = tty->disc_data;
|
||||
BUG_ON(ser == NULL);
|
||||
WARN_ON(ser->tty != tty);
|
||||
handle_tx(ser);
|
||||
}
|
||||
|
||||
|
||||
static void ser_release(struct work_struct *work)
|
||||
{
|
||||
struct list_head list;
|
||||
struct ser_device *ser, *tmp;
|
||||
|
||||
spin_lock(&ser_lock);
|
||||
list_replace_init(&ser_release_list, &list);
|
||||
spin_unlock(&ser_lock);
|
||||
|
||||
if (!list_empty(&list)) {
|
||||
rtnl_lock();
|
||||
list_for_each_entry_safe(ser, tmp, &list, node) {
|
||||
dev_close(ser->dev);
|
||||
unregister_netdevice(ser->dev);
|
||||
debugfs_deinit(ser);
|
||||
}
|
||||
rtnl_unlock();
|
||||
}
|
||||
}
|
||||
|
||||
static DECLARE_WORK(ser_release_work, ser_release);
|
||||
|
||||
static int ldisc_open(struct tty_struct *tty)
|
||||
{
|
||||
struct ser_device *ser;
|
||||
struct net_device *dev;
|
||||
char name[64];
|
||||
int result;
|
||||
|
||||
/* No write no play */
|
||||
if (tty->ops->write == NULL)
|
||||
return -EOPNOTSUPP;
|
||||
if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_TTY_CONFIG))
|
||||
return -EPERM;
|
||||
|
||||
/* release devices to avoid name collision */
|
||||
ser_release(NULL);
|
||||
|
||||
result = snprintf(name, sizeof(name), "cf%s", tty->name);
|
||||
if (result >= IFNAMSIZ)
|
||||
return -EINVAL;
|
||||
dev = alloc_netdev(sizeof(*ser), name, NET_NAME_UNKNOWN,
|
||||
caifdev_setup);
|
||||
if (!dev)
|
||||
return -ENOMEM;
|
||||
|
||||
ser = netdev_priv(dev);
|
||||
ser->tty = tty_kref_get(tty);
|
||||
ser->dev = dev;
|
||||
debugfs_init(ser, tty);
|
||||
tty->receive_room = N_TTY_BUF_SIZE;
|
||||
tty->disc_data = ser;
|
||||
set_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
|
||||
rtnl_lock();
|
||||
result = register_netdevice(dev);
|
||||
if (result) {
|
||||
rtnl_unlock();
|
||||
free_netdev(dev);
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
spin_lock(&ser_lock);
|
||||
list_add(&ser->node, &ser_list);
|
||||
spin_unlock(&ser_lock);
|
||||
rtnl_unlock();
|
||||
netif_stop_queue(dev);
|
||||
update_tty_status(ser);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void ldisc_close(struct tty_struct *tty)
|
||||
{
|
||||
struct ser_device *ser = tty->disc_data;
|
||||
|
||||
tty_kref_put(ser->tty);
|
||||
|
||||
spin_lock(&ser_lock);
|
||||
list_move(&ser->node, &ser_release_list);
|
||||
spin_unlock(&ser_lock);
|
||||
schedule_work(&ser_release_work);
|
||||
}
|
||||
|
||||
/* The line discipline structure. */
|
||||
static struct tty_ldisc_ops caif_ldisc = {
|
||||
.owner = THIS_MODULE,
|
||||
.magic = TTY_LDISC_MAGIC,
|
||||
.name = "n_caif",
|
||||
.open = ldisc_open,
|
||||
.close = ldisc_close,
|
||||
.receive_buf = ldisc_receive,
|
||||
.write_wakeup = ldisc_tx_wakeup
|
||||
};
|
||||
|
||||
static int register_ldisc(void)
|
||||
{
|
||||
int result;
|
||||
|
||||
result = tty_register_ldisc(N_CAIF, &caif_ldisc);
|
||||
if (result < 0) {
|
||||
pr_err("cannot register CAIF ldisc=%d err=%d\n", N_CAIF,
|
||||
result);
|
||||
return result;
|
||||
}
|
||||
return result;
|
||||
}
|
||||
static const struct net_device_ops netdev_ops = {
|
||||
.ndo_open = caif_net_open,
|
||||
.ndo_stop = caif_net_close,
|
||||
.ndo_start_xmit = caif_xmit
|
||||
};
|
||||
|
||||
static void caifdev_setup(struct net_device *dev)
|
||||
{
|
||||
struct ser_device *serdev = netdev_priv(dev);
|
||||
|
||||
dev->features = 0;
|
||||
dev->netdev_ops = &netdev_ops;
|
||||
dev->type = ARPHRD_CAIF;
|
||||
dev->flags = IFF_POINTOPOINT | IFF_NOARP;
|
||||
dev->mtu = CAIF_MAX_MTU;
|
||||
dev->tx_queue_len = 0;
|
||||
dev->destructor = free_netdev;
|
||||
skb_queue_head_init(&serdev->head);
|
||||
serdev->common.link_select = CAIF_LINK_LOW_LATENCY;
|
||||
serdev->common.use_frag = true;
|
||||
serdev->common.use_stx = ser_use_stx;
|
||||
serdev->common.use_fcs = ser_use_fcs;
|
||||
serdev->dev = dev;
|
||||
}
|
||||
|
||||
|
||||
static int caif_net_open(struct net_device *dev)
|
||||
{
|
||||
netif_wake_queue(dev);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int caif_net_close(struct net_device *dev)
|
||||
{
|
||||
netif_stop_queue(dev);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __init caif_ser_init(void)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = register_ldisc();
|
||||
debugfsdir = debugfs_create_dir("caif_serial", NULL);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void __exit caif_ser_exit(void)
|
||||
{
|
||||
spin_lock(&ser_lock);
|
||||
list_splice(&ser_list, &ser_release_list);
|
||||
spin_unlock(&ser_lock);
|
||||
ser_release(NULL);
|
||||
cancel_work_sync(&ser_release_work);
|
||||
tty_unregister_ldisc(N_CAIF);
|
||||
debugfs_remove_recursive(debugfsdir);
|
||||
}
|
||||
|
||||
module_init(caif_ser_init);
|
||||
module_exit(caif_ser_exit);
|
871
drivers/net/caif/caif_spi.c
Normal file
871
drivers/net/caif/caif_spi.c
Normal file
|
@ -0,0 +1,871 @@
|
|||
/*
|
||||
* Copyright (C) ST-Ericsson AB 2010
|
||||
* Author: Daniel Martensson
|
||||
* License terms: GNU General Public License (GPL) version 2.
|
||||
*/
|
||||
|
||||
#include <linux/init.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/workqueue.h>
|
||||
#include <linux/completion.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/debugfs.h>
|
||||
#include <linux/if_arp.h>
|
||||
#include <net/caif/caif_layer.h>
|
||||
#include <net/caif/caif_spi.h>
|
||||
|
||||
#ifndef CONFIG_CAIF_SPI_SYNC
|
||||
#define FLAVOR "Flavour: Vanilla.\n"
|
||||
#else
|
||||
#define FLAVOR "Flavour: Master CMD&LEN at start.\n"
|
||||
#endif /* CONFIG_CAIF_SPI_SYNC */
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_AUTHOR("Daniel Martensson");
|
||||
MODULE_DESCRIPTION("CAIF SPI driver");
|
||||
|
||||
/* Returns the number of padding bytes for alignment. */
|
||||
#define PAD_POW2(x, pow) ((((x)&((pow)-1))==0) ? 0 : (((pow)-((x)&((pow)-1)))))
|
||||
|
||||
static bool spi_loop;
|
||||
module_param(spi_loop, bool, S_IRUGO);
|
||||
MODULE_PARM_DESC(spi_loop, "SPI running in loopback mode.");
|
||||
|
||||
/* SPI frame alignment. */
|
||||
module_param(spi_frm_align, int, S_IRUGO);
|
||||
MODULE_PARM_DESC(spi_frm_align, "SPI frame alignment.");
|
||||
|
||||
/*
|
||||
* SPI padding options.
|
||||
* Warning: must be a base of 2 (& operation used) and can not be zero !
|
||||
*/
|
||||
module_param(spi_up_head_align, int, S_IRUGO);
|
||||
MODULE_PARM_DESC(spi_up_head_align, "SPI uplink head alignment.");
|
||||
|
||||
module_param(spi_up_tail_align, int, S_IRUGO);
|
||||
MODULE_PARM_DESC(spi_up_tail_align, "SPI uplink tail alignment.");
|
||||
|
||||
module_param(spi_down_head_align, int, S_IRUGO);
|
||||
MODULE_PARM_DESC(spi_down_head_align, "SPI downlink head alignment.");
|
||||
|
||||
module_param(spi_down_tail_align, int, S_IRUGO);
|
||||
MODULE_PARM_DESC(spi_down_tail_align, "SPI downlink tail alignment.");
|
||||
|
||||
#ifdef CONFIG_ARM
|
||||
#define BYTE_HEX_FMT "%02X"
|
||||
#else
|
||||
#define BYTE_HEX_FMT "%02hhX"
|
||||
#endif
|
||||
|
||||
#define SPI_MAX_PAYLOAD_SIZE 4096
|
||||
/*
|
||||
* Threshold values for the SPI packet queue. Flowcontrol will be asserted
|
||||
* when the number of packets exceeds HIGH_WATER_MARK. It will not be
|
||||
* deasserted before the number of packets drops below LOW_WATER_MARK.
|
||||
*/
|
||||
#define LOW_WATER_MARK 100
|
||||
#define HIGH_WATER_MARK (LOW_WATER_MARK*5)
|
||||
|
||||
#ifdef CONFIG_UML
|
||||
|
||||
/*
|
||||
* We sometimes use UML for debugging, but it cannot handle
|
||||
* dma_alloc_coherent so we have to wrap it.
|
||||
*/
|
||||
static inline void *dma_alloc(dma_addr_t *daddr)
|
||||
{
|
||||
return kmalloc(SPI_DMA_BUF_LEN, GFP_KERNEL);
|
||||
}
|
||||
|
||||
static inline void dma_free(void *cpu_addr, dma_addr_t handle)
|
||||
{
|
||||
kfree(cpu_addr);
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
static inline void *dma_alloc(dma_addr_t *daddr)
|
||||
{
|
||||
return dma_alloc_coherent(NULL, SPI_DMA_BUF_LEN, daddr,
|
||||
GFP_KERNEL);
|
||||
}
|
||||
|
||||
static inline void dma_free(void *cpu_addr, dma_addr_t handle)
|
||||
{
|
||||
dma_free_coherent(NULL, SPI_DMA_BUF_LEN, cpu_addr, handle);
|
||||
}
|
||||
#endif /* CONFIG_UML */
|
||||
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
|
||||
#define DEBUGFS_BUF_SIZE 4096
|
||||
|
||||
static struct dentry *dbgfs_root;
|
||||
|
||||
static inline void driver_debugfs_create(void)
|
||||
{
|
||||
dbgfs_root = debugfs_create_dir(cfspi_spi_driver.driver.name, NULL);
|
||||
}
|
||||
|
||||
static inline void driver_debugfs_remove(void)
|
||||
{
|
||||
debugfs_remove(dbgfs_root);
|
||||
}
|
||||
|
||||
static inline void dev_debugfs_rem(struct cfspi *cfspi)
|
||||
{
|
||||
debugfs_remove(cfspi->dbgfs_frame);
|
||||
debugfs_remove(cfspi->dbgfs_state);
|
||||
debugfs_remove(cfspi->dbgfs_dir);
|
||||
}
|
||||
|
||||
static ssize_t dbgfs_state(struct file *file, char __user *user_buf,
|
||||
size_t count, loff_t *ppos)
|
||||
{
|
||||
char *buf;
|
||||
int len = 0;
|
||||
ssize_t size;
|
||||
struct cfspi *cfspi = file->private_data;
|
||||
|
||||
buf = kzalloc(DEBUGFS_BUF_SIZE, GFP_KERNEL);
|
||||
if (!buf)
|
||||
return 0;
|
||||
|
||||
/* Print out debug information. */
|
||||
len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
|
||||
"CAIF SPI debug information:\n");
|
||||
|
||||
len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len), FLAVOR);
|
||||
|
||||
len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
|
||||
"STATE: %d\n", cfspi->dbg_state);
|
||||
len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
|
||||
"Previous CMD: 0x%x\n", cfspi->pcmd);
|
||||
len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
|
||||
"Current CMD: 0x%x\n", cfspi->cmd);
|
||||
len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
|
||||
"Previous TX len: %d\n", cfspi->tx_ppck_len);
|
||||
len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
|
||||
"Previous RX len: %d\n", cfspi->rx_ppck_len);
|
||||
len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
|
||||
"Current TX len: %d\n", cfspi->tx_cpck_len);
|
||||
len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
|
||||
"Current RX len: %d\n", cfspi->rx_cpck_len);
|
||||
len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
|
||||
"Next TX len: %d\n", cfspi->tx_npck_len);
|
||||
len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
|
||||
"Next RX len: %d\n", cfspi->rx_npck_len);
|
||||
|
||||
if (len > DEBUGFS_BUF_SIZE)
|
||||
len = DEBUGFS_BUF_SIZE;
|
||||
|
||||
size = simple_read_from_buffer(user_buf, count, ppos, buf, len);
|
||||
kfree(buf);
|
||||
|
||||
return size;
|
||||
}
|
||||
|
||||
static ssize_t print_frame(char *buf, size_t size, char *frm,
|
||||
size_t count, size_t cut)
|
||||
{
|
||||
int len = 0;
|
||||
int i;
|
||||
for (i = 0; i < count; i++) {
|
||||
len += snprintf((buf + len), (size - len),
|
||||
"[0x" BYTE_HEX_FMT "]",
|
||||
frm[i]);
|
||||
if ((i == cut) && (count > (cut * 2))) {
|
||||
/* Fast forward. */
|
||||
i = count - cut;
|
||||
len += snprintf((buf + len), (size - len),
|
||||
"--- %u bytes skipped ---\n",
|
||||
(int)(count - (cut * 2)));
|
||||
}
|
||||
|
||||
if ((!(i % 10)) && i) {
|
||||
len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
|
||||
"\n");
|
||||
}
|
||||
}
|
||||
len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len), "\n");
|
||||
return len;
|
||||
}
|
||||
|
||||
static ssize_t dbgfs_frame(struct file *file, char __user *user_buf,
|
||||
size_t count, loff_t *ppos)
|
||||
{
|
||||
char *buf;
|
||||
int len = 0;
|
||||
ssize_t size;
|
||||
struct cfspi *cfspi;
|
||||
|
||||
cfspi = file->private_data;
|
||||
buf = kzalloc(DEBUGFS_BUF_SIZE, GFP_KERNEL);
|
||||
if (!buf)
|
||||
return 0;
|
||||
|
||||
/* Print out debug information. */
|
||||
len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
|
||||
"Current frame:\n");
|
||||
|
||||
len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
|
||||
"Tx data (Len: %d):\n", cfspi->tx_cpck_len);
|
||||
|
||||
len += print_frame((buf + len), (DEBUGFS_BUF_SIZE - len),
|
||||
cfspi->xfer.va_tx[0],
|
||||
(cfspi->tx_cpck_len + SPI_CMD_SZ), 100);
|
||||
|
||||
len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
|
||||
"Rx data (Len: %d):\n", cfspi->rx_cpck_len);
|
||||
|
||||
len += print_frame((buf + len), (DEBUGFS_BUF_SIZE - len),
|
||||
cfspi->xfer.va_rx,
|
||||
(cfspi->rx_cpck_len + SPI_CMD_SZ), 100);
|
||||
|
||||
size = simple_read_from_buffer(user_buf, count, ppos, buf, len);
|
||||
kfree(buf);
|
||||
|
||||
return size;
|
||||
}
|
||||
|
||||
static const struct file_operations dbgfs_state_fops = {
|
||||
.open = simple_open,
|
||||
.read = dbgfs_state,
|
||||
.owner = THIS_MODULE
|
||||
};
|
||||
|
||||
static const struct file_operations dbgfs_frame_fops = {
|
||||
.open = simple_open,
|
||||
.read = dbgfs_frame,
|
||||
.owner = THIS_MODULE
|
||||
};
|
||||
|
||||
static inline void dev_debugfs_add(struct cfspi *cfspi)
|
||||
{
|
||||
cfspi->dbgfs_dir = debugfs_create_dir(cfspi->pdev->name, dbgfs_root);
|
||||
cfspi->dbgfs_state = debugfs_create_file("state", S_IRUGO,
|
||||
cfspi->dbgfs_dir, cfspi,
|
||||
&dbgfs_state_fops);
|
||||
cfspi->dbgfs_frame = debugfs_create_file("frame", S_IRUGO,
|
||||
cfspi->dbgfs_dir, cfspi,
|
||||
&dbgfs_frame_fops);
|
||||
}
|
||||
|
||||
inline void cfspi_dbg_state(struct cfspi *cfspi, int state)
|
||||
{
|
||||
cfspi->dbg_state = state;
|
||||
};
|
||||
#else
|
||||
|
||||
static inline void driver_debugfs_create(void)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void driver_debugfs_remove(void)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void dev_debugfs_add(struct cfspi *cfspi)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void dev_debugfs_rem(struct cfspi *cfspi)
|
||||
{
|
||||
}
|
||||
|
||||
inline void cfspi_dbg_state(struct cfspi *cfspi, int state)
|
||||
{
|
||||
}
|
||||
#endif /* CONFIG_DEBUG_FS */
|
||||
|
||||
static LIST_HEAD(cfspi_list);
|
||||
static spinlock_t cfspi_list_lock;
|
||||
|
||||
/* SPI uplink head alignment. */
|
||||
static ssize_t show_up_head_align(struct device_driver *driver, char *buf)
|
||||
{
|
||||
return sprintf(buf, "%d\n", spi_up_head_align);
|
||||
}
|
||||
|
||||
static DRIVER_ATTR(up_head_align, S_IRUSR, show_up_head_align, NULL);
|
||||
|
||||
/* SPI uplink tail alignment. */
|
||||
static ssize_t show_up_tail_align(struct device_driver *driver, char *buf)
|
||||
{
|
||||
return sprintf(buf, "%d\n", spi_up_tail_align);
|
||||
}
|
||||
|
||||
static DRIVER_ATTR(up_tail_align, S_IRUSR, show_up_tail_align, NULL);
|
||||
|
||||
/* SPI downlink head alignment. */
|
||||
static ssize_t show_down_head_align(struct device_driver *driver, char *buf)
|
||||
{
|
||||
return sprintf(buf, "%d\n", spi_down_head_align);
|
||||
}
|
||||
|
||||
static DRIVER_ATTR(down_head_align, S_IRUSR, show_down_head_align, NULL);
|
||||
|
||||
/* SPI downlink tail alignment. */
|
||||
static ssize_t show_down_tail_align(struct device_driver *driver, char *buf)
|
||||
{
|
||||
return sprintf(buf, "%d\n", spi_down_tail_align);
|
||||
}
|
||||
|
||||
static DRIVER_ATTR(down_tail_align, S_IRUSR, show_down_tail_align, NULL);
|
||||
|
||||
/* SPI frame alignment. */
|
||||
static ssize_t show_frame_align(struct device_driver *driver, char *buf)
|
||||
{
|
||||
return sprintf(buf, "%d\n", spi_frm_align);
|
||||
}
|
||||
|
||||
static DRIVER_ATTR(frame_align, S_IRUSR, show_frame_align, NULL);
|
||||
|
||||
int cfspi_xmitfrm(struct cfspi *cfspi, u8 *buf, size_t len)
|
||||
{
|
||||
u8 *dst = buf;
|
||||
caif_assert(buf);
|
||||
|
||||
if (cfspi->slave && !cfspi->slave_talked)
|
||||
cfspi->slave_talked = true;
|
||||
|
||||
do {
|
||||
struct sk_buff *skb;
|
||||
struct caif_payload_info *info;
|
||||
int spad = 0;
|
||||
int epad;
|
||||
|
||||
skb = skb_dequeue(&cfspi->chead);
|
||||
if (!skb)
|
||||
break;
|
||||
|
||||
/*
|
||||
* Calculate length of frame including SPI padding.
|
||||
* The payload position is found in the control buffer.
|
||||
*/
|
||||
info = (struct caif_payload_info *)&skb->cb;
|
||||
|
||||
/*
|
||||
* Compute head offset i.e. number of bytes to add to
|
||||
* get the start of the payload aligned.
|
||||
*/
|
||||
if (spi_up_head_align > 1) {
|
||||
spad = 1 + PAD_POW2((info->hdr_len + 1), spi_up_head_align);
|
||||
*dst = (u8)(spad - 1);
|
||||
dst += spad;
|
||||
}
|
||||
|
||||
/* Copy in CAIF frame. */
|
||||
skb_copy_bits(skb, 0, dst, skb->len);
|
||||
dst += skb->len;
|
||||
cfspi->ndev->stats.tx_packets++;
|
||||
cfspi->ndev->stats.tx_bytes += skb->len;
|
||||
|
||||
/*
|
||||
* Compute tail offset i.e. number of bytes to add to
|
||||
* get the complete CAIF frame aligned.
|
||||
*/
|
||||
epad = PAD_POW2((skb->len + spad), spi_up_tail_align);
|
||||
dst += epad;
|
||||
|
||||
dev_kfree_skb(skb);
|
||||
|
||||
} while ((dst - buf) < len);
|
||||
|
||||
return dst - buf;
|
||||
}
|
||||
|
||||
int cfspi_xmitlen(struct cfspi *cfspi)
|
||||
{
|
||||
struct sk_buff *skb = NULL;
|
||||
int frm_len = 0;
|
||||
int pkts = 0;
|
||||
|
||||
/*
|
||||
* Decommit previously committed frames.
|
||||
* skb_queue_splice_tail(&cfspi->chead,&cfspi->qhead)
|
||||
*/
|
||||
while (skb_peek(&cfspi->chead)) {
|
||||
skb = skb_dequeue_tail(&cfspi->chead);
|
||||
skb_queue_head(&cfspi->qhead, skb);
|
||||
}
|
||||
|
||||
do {
|
||||
struct caif_payload_info *info = NULL;
|
||||
int spad = 0;
|
||||
int epad = 0;
|
||||
|
||||
skb = skb_dequeue(&cfspi->qhead);
|
||||
if (!skb)
|
||||
break;
|
||||
|
||||
/*
|
||||
* Calculate length of frame including SPI padding.
|
||||
* The payload position is found in the control buffer.
|
||||
*/
|
||||
info = (struct caif_payload_info *)&skb->cb;
|
||||
|
||||
/*
|
||||
* Compute head offset i.e. number of bytes to add to
|
||||
* get the start of the payload aligned.
|
||||
*/
|
||||
if (spi_up_head_align > 1)
|
||||
spad = 1 + PAD_POW2((info->hdr_len + 1), spi_up_head_align);
|
||||
|
||||
/*
|
||||
* Compute tail offset i.e. number of bytes to add to
|
||||
* get the complete CAIF frame aligned.
|
||||
*/
|
||||
epad = PAD_POW2((skb->len + spad), spi_up_tail_align);
|
||||
|
||||
if ((skb->len + spad + epad + frm_len) <= CAIF_MAX_SPI_FRAME) {
|
||||
skb_queue_tail(&cfspi->chead, skb);
|
||||
pkts++;
|
||||
frm_len += skb->len + spad + epad;
|
||||
} else {
|
||||
/* Put back packet. */
|
||||
skb_queue_head(&cfspi->qhead, skb);
|
||||
break;
|
||||
}
|
||||
} while (pkts <= CAIF_MAX_SPI_PKTS);
|
||||
|
||||
/*
|
||||
* Send flow on if previously sent flow off
|
||||
* and now go below the low water mark
|
||||
*/
|
||||
if (cfspi->flow_off_sent && cfspi->qhead.qlen < cfspi->qd_low_mark &&
|
||||
cfspi->cfdev.flowctrl) {
|
||||
cfspi->flow_off_sent = 0;
|
||||
cfspi->cfdev.flowctrl(cfspi->ndev, 1);
|
||||
}
|
||||
|
||||
return frm_len;
|
||||
}
|
||||
|
||||
static void cfspi_ss_cb(bool assert, struct cfspi_ifc *ifc)
|
||||
{
|
||||
struct cfspi *cfspi = (struct cfspi *)ifc->priv;
|
||||
|
||||
/*
|
||||
* The slave device is the master on the link. Interrupts before the
|
||||
* slave has transmitted are considered spurious.
|
||||
*/
|
||||
if (cfspi->slave && !cfspi->slave_talked) {
|
||||
printk(KERN_WARNING "CFSPI: Spurious SS interrupt.\n");
|
||||
return;
|
||||
}
|
||||
|
||||
if (!in_interrupt())
|
||||
spin_lock(&cfspi->lock);
|
||||
if (assert) {
|
||||
set_bit(SPI_SS_ON, &cfspi->state);
|
||||
set_bit(SPI_XFER, &cfspi->state);
|
||||
} else {
|
||||
set_bit(SPI_SS_OFF, &cfspi->state);
|
||||
}
|
||||
if (!in_interrupt())
|
||||
spin_unlock(&cfspi->lock);
|
||||
|
||||
/* Wake up the xfer thread. */
|
||||
if (assert)
|
||||
wake_up_interruptible(&cfspi->wait);
|
||||
}
|
||||
|
||||
static void cfspi_xfer_done_cb(struct cfspi_ifc *ifc)
|
||||
{
|
||||
struct cfspi *cfspi = (struct cfspi *)ifc->priv;
|
||||
|
||||
/* Transfer done, complete work queue */
|
||||
complete(&cfspi->comp);
|
||||
}
|
||||
|
||||
static int cfspi_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
{
|
||||
struct cfspi *cfspi = NULL;
|
||||
unsigned long flags;
|
||||
if (!dev)
|
||||
return -EINVAL;
|
||||
|
||||
cfspi = netdev_priv(dev);
|
||||
|
||||
skb_queue_tail(&cfspi->qhead, skb);
|
||||
|
||||
spin_lock_irqsave(&cfspi->lock, flags);
|
||||
if (!test_and_set_bit(SPI_XFER, &cfspi->state)) {
|
||||
/* Wake up xfer thread. */
|
||||
wake_up_interruptible(&cfspi->wait);
|
||||
}
|
||||
spin_unlock_irqrestore(&cfspi->lock, flags);
|
||||
|
||||
/* Send flow off if number of bytes is above high water mark */
|
||||
if (!cfspi->flow_off_sent &&
|
||||
cfspi->qhead.qlen > cfspi->qd_high_mark &&
|
||||
cfspi->cfdev.flowctrl) {
|
||||
cfspi->flow_off_sent = 1;
|
||||
cfspi->cfdev.flowctrl(cfspi->ndev, 0);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int cfspi_rxfrm(struct cfspi *cfspi, u8 *buf, size_t len)
|
||||
{
|
||||
u8 *src = buf;
|
||||
|
||||
caif_assert(buf != NULL);
|
||||
|
||||
do {
|
||||
int res;
|
||||
struct sk_buff *skb = NULL;
|
||||
int spad = 0;
|
||||
int epad = 0;
|
||||
u8 *dst = NULL;
|
||||
int pkt_len = 0;
|
||||
|
||||
/*
|
||||
* Compute head offset i.e. number of bytes added to
|
||||
* get the start of the payload aligned.
|
||||
*/
|
||||
if (spi_down_head_align > 1) {
|
||||
spad = 1 + *src;
|
||||
src += spad;
|
||||
}
|
||||
|
||||
/* Read length of CAIF frame (little endian). */
|
||||
pkt_len = *src;
|
||||
pkt_len |= ((*(src+1)) << 8) & 0xFF00;
|
||||
pkt_len += 2; /* Add FCS fields. */
|
||||
|
||||
/* Get a suitable caif packet and copy in data. */
|
||||
|
||||
skb = netdev_alloc_skb(cfspi->ndev, pkt_len + 1);
|
||||
caif_assert(skb != NULL);
|
||||
|
||||
dst = skb_put(skb, pkt_len);
|
||||
memcpy(dst, src, pkt_len);
|
||||
src += pkt_len;
|
||||
|
||||
skb->protocol = htons(ETH_P_CAIF);
|
||||
skb_reset_mac_header(skb);
|
||||
|
||||
/*
|
||||
* Push received packet up the stack.
|
||||
*/
|
||||
if (!spi_loop)
|
||||
res = netif_rx_ni(skb);
|
||||
else
|
||||
res = cfspi_xmit(skb, cfspi->ndev);
|
||||
|
||||
if (!res) {
|
||||
cfspi->ndev->stats.rx_packets++;
|
||||
cfspi->ndev->stats.rx_bytes += pkt_len;
|
||||
} else
|
||||
cfspi->ndev->stats.rx_dropped++;
|
||||
|
||||
/*
|
||||
* Compute tail offset i.e. number of bytes added to
|
||||
* get the complete CAIF frame aligned.
|
||||
*/
|
||||
epad = PAD_POW2((pkt_len + spad), spi_down_tail_align);
|
||||
src += epad;
|
||||
} while ((src - buf) < len);
|
||||
|
||||
return src - buf;
|
||||
}
|
||||
|
||||
static int cfspi_open(struct net_device *dev)
|
||||
{
|
||||
netif_wake_queue(dev);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int cfspi_close(struct net_device *dev)
|
||||
{
|
||||
netif_stop_queue(dev);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int cfspi_init(struct net_device *dev)
|
||||
{
|
||||
int res = 0;
|
||||
struct cfspi *cfspi = netdev_priv(dev);
|
||||
|
||||
/* Set flow info. */
|
||||
cfspi->flow_off_sent = 0;
|
||||
cfspi->qd_low_mark = LOW_WATER_MARK;
|
||||
cfspi->qd_high_mark = HIGH_WATER_MARK;
|
||||
|
||||
/* Set slave info. */
|
||||
if (!strncmp(cfspi_spi_driver.driver.name, "cfspi_sspi", 10)) {
|
||||
cfspi->slave = true;
|
||||
cfspi->slave_talked = false;
|
||||
} else {
|
||||
cfspi->slave = false;
|
||||
cfspi->slave_talked = false;
|
||||
}
|
||||
|
||||
/* Allocate DMA buffers. */
|
||||
cfspi->xfer.va_tx[0] = dma_alloc(&cfspi->xfer.pa_tx[0]);
|
||||
if (!cfspi->xfer.va_tx[0]) {
|
||||
res = -ENODEV;
|
||||
goto err_dma_alloc_tx_0;
|
||||
}
|
||||
|
||||
cfspi->xfer.va_rx = dma_alloc(&cfspi->xfer.pa_rx);
|
||||
|
||||
if (!cfspi->xfer.va_rx) {
|
||||
res = -ENODEV;
|
||||
goto err_dma_alloc_rx;
|
||||
}
|
||||
|
||||
/* Initialize the work queue. */
|
||||
INIT_WORK(&cfspi->work, cfspi_xfer);
|
||||
|
||||
/* Initialize spin locks. */
|
||||
spin_lock_init(&cfspi->lock);
|
||||
|
||||
/* Initialize flow control state. */
|
||||
cfspi->flow_stop = false;
|
||||
|
||||
/* Initialize wait queue. */
|
||||
init_waitqueue_head(&cfspi->wait);
|
||||
|
||||
/* Create work thread. */
|
||||
cfspi->wq = create_singlethread_workqueue(dev->name);
|
||||
if (!cfspi->wq) {
|
||||
printk(KERN_WARNING "CFSPI: failed to create work queue.\n");
|
||||
res = -ENODEV;
|
||||
goto err_create_wq;
|
||||
}
|
||||
|
||||
/* Initialize work queue. */
|
||||
init_completion(&cfspi->comp);
|
||||
|
||||
/* Create debugfs entries. */
|
||||
dev_debugfs_add(cfspi);
|
||||
|
||||
/* Set up the ifc. */
|
||||
cfspi->ifc.ss_cb = cfspi_ss_cb;
|
||||
cfspi->ifc.xfer_done_cb = cfspi_xfer_done_cb;
|
||||
cfspi->ifc.priv = cfspi;
|
||||
|
||||
/* Add CAIF SPI device to list. */
|
||||
spin_lock(&cfspi_list_lock);
|
||||
list_add_tail(&cfspi->list, &cfspi_list);
|
||||
spin_unlock(&cfspi_list_lock);
|
||||
|
||||
/* Schedule the work queue. */
|
||||
queue_work(cfspi->wq, &cfspi->work);
|
||||
|
||||
return 0;
|
||||
|
||||
err_create_wq:
|
||||
dma_free(cfspi->xfer.va_rx, cfspi->xfer.pa_rx);
|
||||
err_dma_alloc_rx:
|
||||
dma_free(cfspi->xfer.va_tx[0], cfspi->xfer.pa_tx[0]);
|
||||
err_dma_alloc_tx_0:
|
||||
return res;
|
||||
}
|
||||
|
||||
static void cfspi_uninit(struct net_device *dev)
|
||||
{
|
||||
struct cfspi *cfspi = netdev_priv(dev);
|
||||
|
||||
/* Remove from list. */
|
||||
spin_lock(&cfspi_list_lock);
|
||||
list_del(&cfspi->list);
|
||||
spin_unlock(&cfspi_list_lock);
|
||||
|
||||
cfspi->ndev = NULL;
|
||||
/* Free DMA buffers. */
|
||||
dma_free(cfspi->xfer.va_rx, cfspi->xfer.pa_rx);
|
||||
dma_free(cfspi->xfer.va_tx[0], cfspi->xfer.pa_tx[0]);
|
||||
set_bit(SPI_TERMINATE, &cfspi->state);
|
||||
wake_up_interruptible(&cfspi->wait);
|
||||
destroy_workqueue(cfspi->wq);
|
||||
/* Destroy debugfs directory and files. */
|
||||
dev_debugfs_rem(cfspi);
|
||||
return;
|
||||
}
|
||||
|
||||
static const struct net_device_ops cfspi_ops = {
|
||||
.ndo_open = cfspi_open,
|
||||
.ndo_stop = cfspi_close,
|
||||
.ndo_init = cfspi_init,
|
||||
.ndo_uninit = cfspi_uninit,
|
||||
.ndo_start_xmit = cfspi_xmit
|
||||
};
|
||||
|
||||
static void cfspi_setup(struct net_device *dev)
|
||||
{
|
||||
struct cfspi *cfspi = netdev_priv(dev);
|
||||
dev->features = 0;
|
||||
dev->netdev_ops = &cfspi_ops;
|
||||
dev->type = ARPHRD_CAIF;
|
||||
dev->flags = IFF_NOARP | IFF_POINTOPOINT;
|
||||
dev->tx_queue_len = 0;
|
||||
dev->mtu = SPI_MAX_PAYLOAD_SIZE;
|
||||
dev->destructor = free_netdev;
|
||||
skb_queue_head_init(&cfspi->qhead);
|
||||
skb_queue_head_init(&cfspi->chead);
|
||||
cfspi->cfdev.link_select = CAIF_LINK_HIGH_BANDW;
|
||||
cfspi->cfdev.use_frag = false;
|
||||
cfspi->cfdev.use_stx = false;
|
||||
cfspi->cfdev.use_fcs = false;
|
||||
cfspi->ndev = dev;
|
||||
}
|
||||
|
||||
int cfspi_spi_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct cfspi *cfspi = NULL;
|
||||
struct net_device *ndev;
|
||||
struct cfspi_dev *dev;
|
||||
int res;
|
||||
dev = (struct cfspi_dev *)pdev->dev.platform_data;
|
||||
|
||||
ndev = alloc_netdev(sizeof(struct cfspi), "cfspi%d",
|
||||
NET_NAME_UNKNOWN, cfspi_setup);
|
||||
if (!dev)
|
||||
return -ENODEV;
|
||||
|
||||
cfspi = netdev_priv(ndev);
|
||||
netif_stop_queue(ndev);
|
||||
cfspi->ndev = ndev;
|
||||
cfspi->pdev = pdev;
|
||||
|
||||
/* Assign the SPI device. */
|
||||
cfspi->dev = dev;
|
||||
/* Assign the device ifc to this SPI interface. */
|
||||
dev->ifc = &cfspi->ifc;
|
||||
|
||||
/* Register network device. */
|
||||
res = register_netdev(ndev);
|
||||
if (res) {
|
||||
printk(KERN_ERR "CFSPI: Reg. error: %d.\n", res);
|
||||
goto err_net_reg;
|
||||
}
|
||||
return res;
|
||||
|
||||
err_net_reg:
|
||||
free_netdev(ndev);
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
int cfspi_spi_remove(struct platform_device *pdev)
|
||||
{
|
||||
/* Everything is done in cfspi_uninit(). */
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void __exit cfspi_exit_module(void)
|
||||
{
|
||||
struct list_head *list_node;
|
||||
struct list_head *n;
|
||||
struct cfspi *cfspi = NULL;
|
||||
|
||||
list_for_each_safe(list_node, n, &cfspi_list) {
|
||||
cfspi = list_entry(list_node, struct cfspi, list);
|
||||
unregister_netdev(cfspi->ndev);
|
||||
}
|
||||
|
||||
/* Destroy sysfs files. */
|
||||
driver_remove_file(&cfspi_spi_driver.driver,
|
||||
&driver_attr_up_head_align);
|
||||
driver_remove_file(&cfspi_spi_driver.driver,
|
||||
&driver_attr_up_tail_align);
|
||||
driver_remove_file(&cfspi_spi_driver.driver,
|
||||
&driver_attr_down_head_align);
|
||||
driver_remove_file(&cfspi_spi_driver.driver,
|
||||
&driver_attr_down_tail_align);
|
||||
driver_remove_file(&cfspi_spi_driver.driver, &driver_attr_frame_align);
|
||||
/* Unregister platform driver. */
|
||||
platform_driver_unregister(&cfspi_spi_driver);
|
||||
/* Destroy debugfs root directory. */
|
||||
driver_debugfs_remove();
|
||||
}
|
||||
|
||||
static int __init cfspi_init_module(void)
|
||||
{
|
||||
int result;
|
||||
|
||||
/* Initialize spin lock. */
|
||||
spin_lock_init(&cfspi_list_lock);
|
||||
|
||||
/* Register platform driver. */
|
||||
result = platform_driver_register(&cfspi_spi_driver);
|
||||
if (result) {
|
||||
printk(KERN_ERR "Could not register platform SPI driver.\n");
|
||||
goto err_dev_register;
|
||||
}
|
||||
|
||||
/* Create sysfs files. */
|
||||
result =
|
||||
driver_create_file(&cfspi_spi_driver.driver,
|
||||
&driver_attr_up_head_align);
|
||||
if (result) {
|
||||
printk(KERN_ERR "Sysfs creation failed 1.\n");
|
||||
goto err_create_up_head_align;
|
||||
}
|
||||
|
||||
result =
|
||||
driver_create_file(&cfspi_spi_driver.driver,
|
||||
&driver_attr_up_tail_align);
|
||||
if (result) {
|
||||
printk(KERN_ERR "Sysfs creation failed 2.\n");
|
||||
goto err_create_up_tail_align;
|
||||
}
|
||||
|
||||
result =
|
||||
driver_create_file(&cfspi_spi_driver.driver,
|
||||
&driver_attr_down_head_align);
|
||||
if (result) {
|
||||
printk(KERN_ERR "Sysfs creation failed 3.\n");
|
||||
goto err_create_down_head_align;
|
||||
}
|
||||
|
||||
result =
|
||||
driver_create_file(&cfspi_spi_driver.driver,
|
||||
&driver_attr_down_tail_align);
|
||||
if (result) {
|
||||
printk(KERN_ERR "Sysfs creation failed 4.\n");
|
||||
goto err_create_down_tail_align;
|
||||
}
|
||||
|
||||
result =
|
||||
driver_create_file(&cfspi_spi_driver.driver,
|
||||
&driver_attr_frame_align);
|
||||
if (result) {
|
||||
printk(KERN_ERR "Sysfs creation failed 5.\n");
|
||||
goto err_create_frame_align;
|
||||
}
|
||||
driver_debugfs_create();
|
||||
return result;
|
||||
|
||||
err_create_frame_align:
|
||||
driver_remove_file(&cfspi_spi_driver.driver,
|
||||
&driver_attr_down_tail_align);
|
||||
err_create_down_tail_align:
|
||||
driver_remove_file(&cfspi_spi_driver.driver,
|
||||
&driver_attr_down_head_align);
|
||||
err_create_down_head_align:
|
||||
driver_remove_file(&cfspi_spi_driver.driver,
|
||||
&driver_attr_up_tail_align);
|
||||
err_create_up_tail_align:
|
||||
driver_remove_file(&cfspi_spi_driver.driver,
|
||||
&driver_attr_up_head_align);
|
||||
err_create_up_head_align:
|
||||
platform_driver_unregister(&cfspi_spi_driver);
|
||||
err_dev_register:
|
||||
return result;
|
||||
}
|
||||
|
||||
module_init(cfspi_init_module);
|
||||
module_exit(cfspi_exit_module);
|
254
drivers/net/caif/caif_spi_slave.c
Normal file
254
drivers/net/caif/caif_spi_slave.c
Normal file
|
@ -0,0 +1,254 @@
|
|||
/*
|
||||
* Copyright (C) ST-Ericsson AB 2010
|
||||
* Author: Daniel Martensson
|
||||
* License terms: GNU General Public License (GPL) version 2.
|
||||
*/
|
||||
#include <linux/module.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/semaphore.h>
|
||||
#include <linux/workqueue.h>
|
||||
#include <linux/completion.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/debugfs.h>
|
||||
#include <net/caif/caif_spi.h>
|
||||
|
||||
#ifndef CONFIG_CAIF_SPI_SYNC
|
||||
#define SPI_DATA_POS 0
|
||||
static inline int forward_to_spi_cmd(struct cfspi *cfspi)
|
||||
{
|
||||
return cfspi->rx_cpck_len;
|
||||
}
|
||||
#else
|
||||
#define SPI_DATA_POS SPI_CMD_SZ
|
||||
static inline int forward_to_spi_cmd(struct cfspi *cfspi)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
int spi_frm_align = 2;
|
||||
|
||||
/*
|
||||
* SPI padding options.
|
||||
* Warning: must be a base of 2 (& operation used) and can not be zero !
|
||||
*/
|
||||
int spi_up_head_align = 1 << 1;
|
||||
int spi_up_tail_align = 1 << 0;
|
||||
int spi_down_head_align = 1 << 2;
|
||||
int spi_down_tail_align = 1 << 1;
|
||||
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
static inline void debugfs_store_prev(struct cfspi *cfspi)
|
||||
{
|
||||
/* Store previous command for debugging reasons.*/
|
||||
cfspi->pcmd = cfspi->cmd;
|
||||
/* Store previous transfer. */
|
||||
cfspi->tx_ppck_len = cfspi->tx_cpck_len;
|
||||
cfspi->rx_ppck_len = cfspi->rx_cpck_len;
|
||||
}
|
||||
#else
|
||||
static inline void debugfs_store_prev(struct cfspi *cfspi)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
void cfspi_xfer(struct work_struct *work)
|
||||
{
|
||||
struct cfspi *cfspi;
|
||||
u8 *ptr = NULL;
|
||||
unsigned long flags;
|
||||
int ret;
|
||||
cfspi = container_of(work, struct cfspi, work);
|
||||
|
||||
/* Initialize state. */
|
||||
cfspi->cmd = SPI_CMD_EOT;
|
||||
|
||||
for (;;) {
|
||||
|
||||
cfspi_dbg_state(cfspi, CFSPI_STATE_WAITING);
|
||||
|
||||
/* Wait for master talk or transmit event. */
|
||||
wait_event_interruptible(cfspi->wait,
|
||||
test_bit(SPI_XFER, &cfspi->state) ||
|
||||
test_bit(SPI_TERMINATE, &cfspi->state));
|
||||
|
||||
if (test_bit(SPI_TERMINATE, &cfspi->state))
|
||||
return;
|
||||
|
||||
#if CFSPI_DBG_PREFILL
|
||||
/* Prefill buffers for easier debugging. */
|
||||
memset(cfspi->xfer.va_tx, 0xFF, SPI_DMA_BUF_LEN);
|
||||
memset(cfspi->xfer.va_rx, 0xFF, SPI_DMA_BUF_LEN);
|
||||
#endif /* CFSPI_DBG_PREFILL */
|
||||
|
||||
cfspi_dbg_state(cfspi, CFSPI_STATE_AWAKE);
|
||||
|
||||
/* Check whether we have a committed frame. */
|
||||
if (cfspi->tx_cpck_len) {
|
||||
int len;
|
||||
|
||||
cfspi_dbg_state(cfspi, CFSPI_STATE_FETCH_PKT);
|
||||
|
||||
/* Copy committed SPI frames after the SPI indication. */
|
||||
ptr = (u8 *) cfspi->xfer.va_tx;
|
||||
ptr += SPI_IND_SZ;
|
||||
len = cfspi_xmitfrm(cfspi, ptr, cfspi->tx_cpck_len);
|
||||
WARN_ON(len != cfspi->tx_cpck_len);
|
||||
}
|
||||
|
||||
cfspi_dbg_state(cfspi, CFSPI_STATE_GET_NEXT);
|
||||
|
||||
/* Get length of next frame to commit. */
|
||||
cfspi->tx_npck_len = cfspi_xmitlen(cfspi);
|
||||
|
||||
WARN_ON(cfspi->tx_npck_len > SPI_DMA_BUF_LEN);
|
||||
|
||||
/*
|
||||
* Add indication and length at the beginning of the frame,
|
||||
* using little endian.
|
||||
*/
|
||||
ptr = (u8 *) cfspi->xfer.va_tx;
|
||||
*ptr++ = SPI_CMD_IND;
|
||||
*ptr++ = (SPI_CMD_IND & 0xFF00) >> 8;
|
||||
*ptr++ = cfspi->tx_npck_len & 0x00FF;
|
||||
*ptr++ = (cfspi->tx_npck_len & 0xFF00) >> 8;
|
||||
|
||||
/* Calculate length of DMAs. */
|
||||
cfspi->xfer.tx_dma_len = cfspi->tx_cpck_len + SPI_IND_SZ;
|
||||
cfspi->xfer.rx_dma_len = cfspi->rx_cpck_len + SPI_CMD_SZ;
|
||||
|
||||
/* Add SPI TX frame alignment padding, if necessary. */
|
||||
if (cfspi->tx_cpck_len &&
|
||||
(cfspi->xfer.tx_dma_len % spi_frm_align)) {
|
||||
|
||||
cfspi->xfer.tx_dma_len += spi_frm_align -
|
||||
(cfspi->xfer.tx_dma_len % spi_frm_align);
|
||||
}
|
||||
|
||||
/* Add SPI RX frame alignment padding, if necessary. */
|
||||
if (cfspi->rx_cpck_len &&
|
||||
(cfspi->xfer.rx_dma_len % spi_frm_align)) {
|
||||
|
||||
cfspi->xfer.rx_dma_len += spi_frm_align -
|
||||
(cfspi->xfer.rx_dma_len % spi_frm_align);
|
||||
}
|
||||
|
||||
cfspi_dbg_state(cfspi, CFSPI_STATE_INIT_XFER);
|
||||
|
||||
/* Start transfer. */
|
||||
ret = cfspi->dev->init_xfer(&cfspi->xfer, cfspi->dev);
|
||||
WARN_ON(ret);
|
||||
|
||||
cfspi_dbg_state(cfspi, CFSPI_STATE_WAIT_ACTIVE);
|
||||
|
||||
/*
|
||||
* TODO: We might be able to make an assumption if this is the
|
||||
* first loop. Make sure that minimum toggle time is respected.
|
||||
*/
|
||||
udelay(MIN_TRANSITION_TIME_USEC);
|
||||
|
||||
cfspi_dbg_state(cfspi, CFSPI_STATE_SIG_ACTIVE);
|
||||
|
||||
/* Signal that we are ready to receive data. */
|
||||
cfspi->dev->sig_xfer(true, cfspi->dev);
|
||||
|
||||
cfspi_dbg_state(cfspi, CFSPI_STATE_WAIT_XFER_DONE);
|
||||
|
||||
/* Wait for transfer completion. */
|
||||
wait_for_completion(&cfspi->comp);
|
||||
|
||||
cfspi_dbg_state(cfspi, CFSPI_STATE_XFER_DONE);
|
||||
|
||||
if (cfspi->cmd == SPI_CMD_EOT) {
|
||||
/*
|
||||
* Clear the master talk bit. A xfer is always at
|
||||
* least two bursts.
|
||||
*/
|
||||
clear_bit(SPI_SS_ON, &cfspi->state);
|
||||
}
|
||||
|
||||
cfspi_dbg_state(cfspi, CFSPI_STATE_WAIT_INACTIVE);
|
||||
|
||||
/* Make sure that the minimum toggle time is respected. */
|
||||
if (SPI_XFER_TIME_USEC(cfspi->xfer.tx_dma_len,
|
||||
cfspi->dev->clk_mhz) <
|
||||
MIN_TRANSITION_TIME_USEC) {
|
||||
|
||||
udelay(MIN_TRANSITION_TIME_USEC -
|
||||
SPI_XFER_TIME_USEC
|
||||
(cfspi->xfer.tx_dma_len, cfspi->dev->clk_mhz));
|
||||
}
|
||||
|
||||
cfspi_dbg_state(cfspi, CFSPI_STATE_SIG_INACTIVE);
|
||||
|
||||
/* De-assert transfer signal. */
|
||||
cfspi->dev->sig_xfer(false, cfspi->dev);
|
||||
|
||||
/* Check whether we received a CAIF packet. */
|
||||
if (cfspi->rx_cpck_len) {
|
||||
int len;
|
||||
|
||||
cfspi_dbg_state(cfspi, CFSPI_STATE_DELIVER_PKT);
|
||||
|
||||
/* Parse SPI frame. */
|
||||
ptr = ((u8 *)(cfspi->xfer.va_rx + SPI_DATA_POS));
|
||||
|
||||
len = cfspi_rxfrm(cfspi, ptr, cfspi->rx_cpck_len);
|
||||
WARN_ON(len != cfspi->rx_cpck_len);
|
||||
}
|
||||
|
||||
/* Check the next SPI command and length. */
|
||||
ptr = (u8 *) cfspi->xfer.va_rx;
|
||||
|
||||
ptr += forward_to_spi_cmd(cfspi);
|
||||
|
||||
cfspi->cmd = *ptr++;
|
||||
cfspi->cmd |= ((*ptr++) << 8) & 0xFF00;
|
||||
cfspi->rx_npck_len = *ptr++;
|
||||
cfspi->rx_npck_len |= ((*ptr++) << 8) & 0xFF00;
|
||||
|
||||
WARN_ON(cfspi->rx_npck_len > SPI_DMA_BUF_LEN);
|
||||
WARN_ON(cfspi->cmd > SPI_CMD_EOT);
|
||||
|
||||
debugfs_store_prev(cfspi);
|
||||
|
||||
/* Check whether the master issued an EOT command. */
|
||||
if (cfspi->cmd == SPI_CMD_EOT) {
|
||||
/* Reset state. */
|
||||
cfspi->tx_cpck_len = 0;
|
||||
cfspi->rx_cpck_len = 0;
|
||||
} else {
|
||||
/* Update state. */
|
||||
cfspi->tx_cpck_len = cfspi->tx_npck_len;
|
||||
cfspi->rx_cpck_len = cfspi->rx_npck_len;
|
||||
}
|
||||
|
||||
/*
|
||||
* Check whether we need to clear the xfer bit.
|
||||
* Spin lock needed for packet insertion.
|
||||
* Test and clear of different bits
|
||||
* are not supported.
|
||||
*/
|
||||
spin_lock_irqsave(&cfspi->lock, flags);
|
||||
if (cfspi->cmd == SPI_CMD_EOT && !cfspi_xmitlen(cfspi)
|
||||
&& !test_bit(SPI_SS_ON, &cfspi->state))
|
||||
clear_bit(SPI_XFER, &cfspi->state);
|
||||
|
||||
spin_unlock_irqrestore(&cfspi->lock, flags);
|
||||
}
|
||||
}
|
||||
|
||||
struct platform_driver cfspi_spi_driver = {
|
||||
.probe = cfspi_spi_probe,
|
||||
.remove = cfspi_spi_remove,
|
||||
.driver = {
|
||||
.name = "cfspi_sspi",
|
||||
.owner = THIS_MODULE,
|
||||
},
|
||||
};
|
791
drivers/net/caif/caif_virtio.c
Normal file
791
drivers/net/caif/caif_virtio.c
Normal file
|
@ -0,0 +1,791 @@
|
|||
/*
|
||||
* Copyright (C) ST-Ericsson AB 2013
|
||||
* Authors: Vicram Arv
|
||||
* Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no>
|
||||
* Sjur Brendeland
|
||||
* License terms: GNU General Public License (GPL) version 2
|
||||
*/
|
||||
#include <linux/module.h>
|
||||
#include <linux/if_arp.h>
|
||||
#include <linux/virtio.h>
|
||||
#include <linux/vringh.h>
|
||||
#include <linux/debugfs.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/genalloc.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/netdevice.h>
|
||||
#include <linux/rtnetlink.h>
|
||||
#include <linux/virtio_ids.h>
|
||||
#include <linux/virtio_caif.h>
|
||||
#include <linux/virtio_ring.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <net/caif/caif_dev.h>
|
||||
#include <linux/virtio_config.h>
|
||||
|
||||
MODULE_LICENSE("GPL v2");
|
||||
MODULE_AUTHOR("Vicram Arv");
|
||||
MODULE_AUTHOR("Sjur Brendeland");
|
||||
MODULE_DESCRIPTION("Virtio CAIF Driver");
|
||||
|
||||
/* NAPI schedule quota */
|
||||
#define CFV_DEFAULT_QUOTA 32
|
||||
|
||||
/* Defaults used if virtio config space is unavailable */
|
||||
#define CFV_DEF_MTU_SIZE 4096
|
||||
#define CFV_DEF_HEADROOM 32
|
||||
#define CFV_DEF_TAILROOM 32
|
||||
|
||||
/* Required IP header alignment */
|
||||
#define IP_HDR_ALIGN 4
|
||||
|
||||
/* struct cfv_napi_contxt - NAPI context info
|
||||
* @riov: IOV holding data read from the ring. Note that riov may
|
||||
* still hold data when cfv_rx_poll() returns.
|
||||
* @head: Last descriptor ID we received from vringh_getdesc_kern.
|
||||
* We use this to put descriptor back on the used ring. USHRT_MAX is
|
||||
* used to indicate invalid head-id.
|
||||
*/
|
||||
struct cfv_napi_context {
|
||||
struct vringh_kiov riov;
|
||||
unsigned short head;
|
||||
};
|
||||
|
||||
/* struct cfv_stats - statistics for debugfs
|
||||
* @rx_napi_complete: Number of NAPI completions (RX)
|
||||
* @rx_napi_resched: Number of calls where the full quota was used (RX)
|
||||
* @rx_nomem: Number of SKB alloc failures (RX)
|
||||
* @rx_kicks: Number of RX kicks
|
||||
* @tx_full_ring: Number times TX ring was full
|
||||
* @tx_no_mem: Number of times TX went out of memory
|
||||
* @tx_flow_on: Number of flow on (TX)
|
||||
* @tx_kicks: Number of TX kicks
|
||||
*/
|
||||
struct cfv_stats {
|
||||
u32 rx_napi_complete;
|
||||
u32 rx_napi_resched;
|
||||
u32 rx_nomem;
|
||||
u32 rx_kicks;
|
||||
u32 tx_full_ring;
|
||||
u32 tx_no_mem;
|
||||
u32 tx_flow_on;
|
||||
u32 tx_kicks;
|
||||
};
|
||||
|
||||
/* struct cfv_info - Caif Virtio control structure
|
||||
* @cfdev: caif common header
|
||||
* @vdev: Associated virtio device
|
||||
* @vr_rx: rx/downlink host vring
|
||||
* @vq_tx: tx/uplink virtqueue
|
||||
* @ndev: CAIF link layer device
|
||||
* @watermark_tx: indicates number of free descriptors we need
|
||||
* to reopen the tx-queues after overload.
|
||||
* @tx_lock: protects vq_tx from concurrent use
|
||||
* @tx_release_tasklet: Tasklet for freeing consumed TX buffers
|
||||
* @napi: Napi context used in cfv_rx_poll()
|
||||
* @ctx: Context data used in cfv_rx_poll()
|
||||
* @tx_hr: transmit headroom
|
||||
* @rx_hr: receive headroom
|
||||
* @tx_tr: transmit tail room
|
||||
* @rx_tr: receive tail room
|
||||
* @mtu: transmit max size
|
||||
* @mru: receive max size
|
||||
* @allocsz: size of dma memory reserved for TX buffers
|
||||
* @alloc_addr: virtual address to dma memory for TX buffers
|
||||
* @alloc_dma: dma address to dma memory for TX buffers
|
||||
* @genpool: Gen Pool used for allocating TX buffers
|
||||
* @reserved_mem: Pointer to memory reserve allocated from genpool
|
||||
* @reserved_size: Size of memory reserve allocated from genpool
|
||||
* @stats: Statistics exposed in sysfs
|
||||
* @debugfs: Debugfs dentry for statistic counters
|
||||
*/
|
||||
struct cfv_info {
|
||||
struct caif_dev_common cfdev;
|
||||
struct virtio_device *vdev;
|
||||
struct vringh *vr_rx;
|
||||
struct virtqueue *vq_tx;
|
||||
struct net_device *ndev;
|
||||
unsigned int watermark_tx;
|
||||
/* Protect access to vq_tx */
|
||||
spinlock_t tx_lock;
|
||||
struct tasklet_struct tx_release_tasklet;
|
||||
struct napi_struct napi;
|
||||
struct cfv_napi_context ctx;
|
||||
u16 tx_hr;
|
||||
u16 rx_hr;
|
||||
u16 tx_tr;
|
||||
u16 rx_tr;
|
||||
u32 mtu;
|
||||
u32 mru;
|
||||
size_t allocsz;
|
||||
void *alloc_addr;
|
||||
dma_addr_t alloc_dma;
|
||||
struct gen_pool *genpool;
|
||||
unsigned long reserved_mem;
|
||||
size_t reserved_size;
|
||||
struct cfv_stats stats;
|
||||
struct dentry *debugfs;
|
||||
};
|
||||
|
||||
/* struct buf_info - maintains transmit buffer data handle
|
||||
* @size: size of transmit buffer
|
||||
* @dma_handle: handle to allocated dma device memory area
|
||||
* @vaddr: virtual address mapping to allocated memory area
|
||||
*/
|
||||
struct buf_info {
|
||||
size_t size;
|
||||
u8 *vaddr;
|
||||
};
|
||||
|
||||
/* Called from virtio device, in IRQ context */
|
||||
static void cfv_release_cb(struct virtqueue *vq_tx)
|
||||
{
|
||||
struct cfv_info *cfv = vq_tx->vdev->priv;
|
||||
|
||||
++cfv->stats.tx_kicks;
|
||||
tasklet_schedule(&cfv->tx_release_tasklet);
|
||||
}
|
||||
|
||||
static void free_buf_info(struct cfv_info *cfv, struct buf_info *buf_info)
|
||||
{
|
||||
if (!buf_info)
|
||||
return;
|
||||
gen_pool_free(cfv->genpool, (unsigned long) buf_info->vaddr,
|
||||
buf_info->size);
|
||||
kfree(buf_info);
|
||||
}
|
||||
|
||||
/* This is invoked whenever the remote processor completed processing
|
||||
* a TX msg we just sent, and the buffer is put back to the used ring.
|
||||
*/
|
||||
static void cfv_release_used_buf(struct virtqueue *vq_tx)
|
||||
{
|
||||
struct cfv_info *cfv = vq_tx->vdev->priv;
|
||||
unsigned long flags;
|
||||
|
||||
BUG_ON(vq_tx != cfv->vq_tx);
|
||||
|
||||
for (;;) {
|
||||
unsigned int len;
|
||||
struct buf_info *buf_info;
|
||||
|
||||
/* Get used buffer from used ring to recycle used descriptors */
|
||||
spin_lock_irqsave(&cfv->tx_lock, flags);
|
||||
buf_info = virtqueue_get_buf(vq_tx, &len);
|
||||
spin_unlock_irqrestore(&cfv->tx_lock, flags);
|
||||
|
||||
/* Stop looping if there are no more buffers to free */
|
||||
if (!buf_info)
|
||||
break;
|
||||
|
||||
free_buf_info(cfv, buf_info);
|
||||
|
||||
/* watermark_tx indicates if we previously stopped the tx
|
||||
* queues. If we have enough free stots in the virtio ring,
|
||||
* re-establish memory reserved and open up tx queues.
|
||||
*/
|
||||
if (cfv->vq_tx->num_free <= cfv->watermark_tx)
|
||||
continue;
|
||||
|
||||
/* Re-establish memory reserve */
|
||||
if (cfv->reserved_mem == 0 && cfv->genpool)
|
||||
cfv->reserved_mem =
|
||||
gen_pool_alloc(cfv->genpool,
|
||||
cfv->reserved_size);
|
||||
|
||||
/* Open up the tx queues */
|
||||
if (cfv->reserved_mem) {
|
||||
cfv->watermark_tx =
|
||||
virtqueue_get_vring_size(cfv->vq_tx);
|
||||
netif_tx_wake_all_queues(cfv->ndev);
|
||||
/* Buffers are recycled in cfv_netdev_tx, so
|
||||
* disable notifications when queues are opened.
|
||||
*/
|
||||
virtqueue_disable_cb(cfv->vq_tx);
|
||||
++cfv->stats.tx_flow_on;
|
||||
} else {
|
||||
/* if no memory reserve, wait for more free slots */
|
||||
WARN_ON(cfv->watermark_tx >
|
||||
virtqueue_get_vring_size(cfv->vq_tx));
|
||||
cfv->watermark_tx +=
|
||||
virtqueue_get_vring_size(cfv->vq_tx) / 4;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* Allocate a SKB and copy packet data to it */
|
||||
static struct sk_buff *cfv_alloc_and_copy_skb(int *err,
|
||||
struct cfv_info *cfv,
|
||||
u8 *frm, u32 frm_len)
|
||||
{
|
||||
struct sk_buff *skb;
|
||||
u32 cfpkt_len, pad_len;
|
||||
|
||||
*err = 0;
|
||||
/* Verify that packet size with down-link header and mtu size */
|
||||
if (frm_len > cfv->mru || frm_len <= cfv->rx_hr + cfv->rx_tr) {
|
||||
netdev_err(cfv->ndev,
|
||||
"Invalid frmlen:%u mtu:%u hr:%d tr:%d\n",
|
||||
frm_len, cfv->mru, cfv->rx_hr,
|
||||
cfv->rx_tr);
|
||||
*err = -EPROTO;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
cfpkt_len = frm_len - (cfv->rx_hr + cfv->rx_tr);
|
||||
pad_len = (unsigned long)(frm + cfv->rx_hr) & (IP_HDR_ALIGN - 1);
|
||||
|
||||
skb = netdev_alloc_skb(cfv->ndev, frm_len + pad_len);
|
||||
if (!skb) {
|
||||
*err = -ENOMEM;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
skb_reserve(skb, cfv->rx_hr + pad_len);
|
||||
|
||||
memcpy(skb_put(skb, cfpkt_len), frm + cfv->rx_hr, cfpkt_len);
|
||||
return skb;
|
||||
}
|
||||
|
||||
/* Get packets from the host vring */
|
||||
static int cfv_rx_poll(struct napi_struct *napi, int quota)
|
||||
{
|
||||
struct cfv_info *cfv = container_of(napi, struct cfv_info, napi);
|
||||
int rxcnt = 0;
|
||||
int err = 0;
|
||||
void *buf;
|
||||
struct sk_buff *skb;
|
||||
struct vringh_kiov *riov = &cfv->ctx.riov;
|
||||
unsigned int skb_len;
|
||||
|
||||
again:
|
||||
do {
|
||||
skb = NULL;
|
||||
|
||||
/* Put the previous iovec back on the used ring and
|
||||
* fetch a new iovec if we have processed all elements.
|
||||
*/
|
||||
if (riov->i == riov->used) {
|
||||
if (cfv->ctx.head != USHRT_MAX) {
|
||||
vringh_complete_kern(cfv->vr_rx,
|
||||
cfv->ctx.head,
|
||||
0);
|
||||
cfv->ctx.head = USHRT_MAX;
|
||||
}
|
||||
|
||||
err = vringh_getdesc_kern(
|
||||
cfv->vr_rx,
|
||||
riov,
|
||||
NULL,
|
||||
&cfv->ctx.head,
|
||||
GFP_ATOMIC);
|
||||
|
||||
if (err <= 0)
|
||||
goto exit;
|
||||
}
|
||||
|
||||
buf = phys_to_virt((unsigned long) riov->iov[riov->i].iov_base);
|
||||
/* TODO: Add check on valid buffer address */
|
||||
|
||||
skb = cfv_alloc_and_copy_skb(&err, cfv, buf,
|
||||
riov->iov[riov->i].iov_len);
|
||||
if (unlikely(err))
|
||||
goto exit;
|
||||
|
||||
/* Push received packet up the stack. */
|
||||
skb_len = skb->len;
|
||||
skb->protocol = htons(ETH_P_CAIF);
|
||||
skb_reset_mac_header(skb);
|
||||
skb->dev = cfv->ndev;
|
||||
err = netif_receive_skb(skb);
|
||||
if (unlikely(err)) {
|
||||
++cfv->ndev->stats.rx_dropped;
|
||||
} else {
|
||||
++cfv->ndev->stats.rx_packets;
|
||||
cfv->ndev->stats.rx_bytes += skb_len;
|
||||
}
|
||||
|
||||
++riov->i;
|
||||
++rxcnt;
|
||||
} while (rxcnt < quota);
|
||||
|
||||
++cfv->stats.rx_napi_resched;
|
||||
goto out;
|
||||
|
||||
exit:
|
||||
switch (err) {
|
||||
case 0:
|
||||
++cfv->stats.rx_napi_complete;
|
||||
|
||||
/* Really out of patckets? (stolen from virtio_net)*/
|
||||
napi_complete(napi);
|
||||
if (unlikely(!vringh_notify_enable_kern(cfv->vr_rx)) &&
|
||||
napi_schedule_prep(napi)) {
|
||||
vringh_notify_disable_kern(cfv->vr_rx);
|
||||
__napi_schedule(napi);
|
||||
goto again;
|
||||
}
|
||||
break;
|
||||
|
||||
case -ENOMEM:
|
||||
++cfv->stats.rx_nomem;
|
||||
dev_kfree_skb(skb);
|
||||
/* Stop NAPI poll on OOM, we hope to be polled later */
|
||||
napi_complete(napi);
|
||||
vringh_notify_enable_kern(cfv->vr_rx);
|
||||
break;
|
||||
|
||||
default:
|
||||
/* We're doomed, any modem fault is fatal */
|
||||
netdev_warn(cfv->ndev, "Bad ring, disable device\n");
|
||||
cfv->ndev->stats.rx_dropped = riov->used - riov->i;
|
||||
napi_complete(napi);
|
||||
vringh_notify_disable_kern(cfv->vr_rx);
|
||||
netif_carrier_off(cfv->ndev);
|
||||
break;
|
||||
}
|
||||
out:
|
||||
if (rxcnt && vringh_need_notify_kern(cfv->vr_rx) > 0)
|
||||
vringh_notify(cfv->vr_rx);
|
||||
return rxcnt;
|
||||
}
|
||||
|
||||
static void cfv_recv(struct virtio_device *vdev, struct vringh *vr_rx)
|
||||
{
|
||||
struct cfv_info *cfv = vdev->priv;
|
||||
|
||||
++cfv->stats.rx_kicks;
|
||||
vringh_notify_disable_kern(cfv->vr_rx);
|
||||
napi_schedule(&cfv->napi);
|
||||
}
|
||||
|
||||
static void cfv_destroy_genpool(struct cfv_info *cfv)
|
||||
{
|
||||
if (cfv->alloc_addr)
|
||||
dma_free_coherent(cfv->vdev->dev.parent->parent,
|
||||
cfv->allocsz, cfv->alloc_addr,
|
||||
cfv->alloc_dma);
|
||||
|
||||
if (!cfv->genpool)
|
||||
return;
|
||||
gen_pool_free(cfv->genpool, cfv->reserved_mem,
|
||||
cfv->reserved_size);
|
||||
gen_pool_destroy(cfv->genpool);
|
||||
cfv->genpool = NULL;
|
||||
}
|
||||
|
||||
static int cfv_create_genpool(struct cfv_info *cfv)
|
||||
{
|
||||
int err;
|
||||
|
||||
/* dma_alloc can only allocate whole pages, and we need a more
|
||||
* fine graned allocation so we use genpool. We ask for space needed
|
||||
* by IP and a full ring. If the dma allcoation fails we retry with a
|
||||
* smaller allocation size.
|
||||
*/
|
||||
err = -ENOMEM;
|
||||
cfv->allocsz = (virtqueue_get_vring_size(cfv->vq_tx) *
|
||||
(ETH_DATA_LEN + cfv->tx_hr + cfv->tx_tr) * 11)/10;
|
||||
if (cfv->allocsz <= (num_possible_cpus() + 1) * cfv->ndev->mtu)
|
||||
return -EINVAL;
|
||||
|
||||
for (;;) {
|
||||
if (cfv->allocsz <= num_possible_cpus() * cfv->ndev->mtu) {
|
||||
netdev_info(cfv->ndev, "Not enough device memory\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
cfv->alloc_addr = dma_alloc_coherent(
|
||||
cfv->vdev->dev.parent->parent,
|
||||
cfv->allocsz, &cfv->alloc_dma,
|
||||
GFP_ATOMIC);
|
||||
if (cfv->alloc_addr)
|
||||
break;
|
||||
|
||||
cfv->allocsz = (cfv->allocsz * 3) >> 2;
|
||||
}
|
||||
|
||||
netdev_dbg(cfv->ndev, "Allocated %zd bytes from dma-memory\n",
|
||||
cfv->allocsz);
|
||||
|
||||
/* Allocate on 128 bytes boundaries (1 << 7)*/
|
||||
cfv->genpool = gen_pool_create(7, -1);
|
||||
if (!cfv->genpool)
|
||||
goto err;
|
||||
|
||||
err = gen_pool_add_virt(cfv->genpool, (unsigned long)cfv->alloc_addr,
|
||||
(phys_addr_t)virt_to_phys(cfv->alloc_addr),
|
||||
cfv->allocsz, -1);
|
||||
if (err)
|
||||
goto err;
|
||||
|
||||
/* Reserve some memory for low memory situations. If we hit the roof
|
||||
* in the memory pool, we stop TX flow and release the reserve.
|
||||
*/
|
||||
cfv->reserved_size = num_possible_cpus() * cfv->ndev->mtu;
|
||||
cfv->reserved_mem = gen_pool_alloc(cfv->genpool,
|
||||
cfv->reserved_size);
|
||||
if (!cfv->reserved_mem) {
|
||||
err = -ENOMEM;
|
||||
goto err;
|
||||
}
|
||||
|
||||
cfv->watermark_tx = virtqueue_get_vring_size(cfv->vq_tx);
|
||||
return 0;
|
||||
err:
|
||||
cfv_destroy_genpool(cfv);
|
||||
return err;
|
||||
}
|
||||
|
||||
/* Enable the CAIF interface and allocate the memory-pool */
|
||||
static int cfv_netdev_open(struct net_device *netdev)
|
||||
{
|
||||
struct cfv_info *cfv = netdev_priv(netdev);
|
||||
|
||||
if (cfv_create_genpool(cfv))
|
||||
return -ENOMEM;
|
||||
|
||||
netif_carrier_on(netdev);
|
||||
napi_enable(&cfv->napi);
|
||||
|
||||
/* Schedule NAPI to read any pending packets */
|
||||
napi_schedule(&cfv->napi);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Disable the CAIF interface and free the memory-pool */
|
||||
static int cfv_netdev_close(struct net_device *netdev)
|
||||
{
|
||||
struct cfv_info *cfv = netdev_priv(netdev);
|
||||
unsigned long flags;
|
||||
struct buf_info *buf_info;
|
||||
|
||||
/* Disable interrupts, queues and NAPI polling */
|
||||
netif_carrier_off(netdev);
|
||||
virtqueue_disable_cb(cfv->vq_tx);
|
||||
vringh_notify_disable_kern(cfv->vr_rx);
|
||||
napi_disable(&cfv->napi);
|
||||
|
||||
/* Release any TX buffers on both used and avilable rings */
|
||||
cfv_release_used_buf(cfv->vq_tx);
|
||||
spin_lock_irqsave(&cfv->tx_lock, flags);
|
||||
while ((buf_info = virtqueue_detach_unused_buf(cfv->vq_tx)))
|
||||
free_buf_info(cfv, buf_info);
|
||||
spin_unlock_irqrestore(&cfv->tx_lock, flags);
|
||||
|
||||
/* Release all dma allocated memory and destroy the pool */
|
||||
cfv_destroy_genpool(cfv);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Allocate a buffer in dma-memory and copy skb to it */
|
||||
static struct buf_info *cfv_alloc_and_copy_to_shm(struct cfv_info *cfv,
|
||||
struct sk_buff *skb,
|
||||
struct scatterlist *sg)
|
||||
{
|
||||
struct caif_payload_info *info = (void *)&skb->cb;
|
||||
struct buf_info *buf_info = NULL;
|
||||
u8 pad_len, hdr_ofs;
|
||||
|
||||
if (!cfv->genpool)
|
||||
goto err;
|
||||
|
||||
if (unlikely(cfv->tx_hr + skb->len + cfv->tx_tr > cfv->mtu)) {
|
||||
netdev_warn(cfv->ndev, "Invalid packet len (%d > %d)\n",
|
||||
cfv->tx_hr + skb->len + cfv->tx_tr, cfv->mtu);
|
||||
goto err;
|
||||
}
|
||||
|
||||
buf_info = kmalloc(sizeof(struct buf_info), GFP_ATOMIC);
|
||||
if (unlikely(!buf_info))
|
||||
goto err;
|
||||
|
||||
/* Make the IP header aligned in tbe buffer */
|
||||
hdr_ofs = cfv->tx_hr + info->hdr_len;
|
||||
pad_len = hdr_ofs & (IP_HDR_ALIGN - 1);
|
||||
buf_info->size = cfv->tx_hr + skb->len + cfv->tx_tr + pad_len;
|
||||
|
||||
/* allocate dma memory buffer */
|
||||
buf_info->vaddr = (void *)gen_pool_alloc(cfv->genpool, buf_info->size);
|
||||
if (unlikely(!buf_info->vaddr))
|
||||
goto err;
|
||||
|
||||
/* copy skbuf contents to send buffer */
|
||||
skb_copy_bits(skb, 0, buf_info->vaddr + cfv->tx_hr + pad_len, skb->len);
|
||||
sg_init_one(sg, buf_info->vaddr + pad_len,
|
||||
skb->len + cfv->tx_hr + cfv->rx_hr);
|
||||
|
||||
return buf_info;
|
||||
err:
|
||||
kfree(buf_info);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* Put the CAIF packet on the virtio ring and kick the receiver */
|
||||
static int cfv_netdev_tx(struct sk_buff *skb, struct net_device *netdev)
|
||||
{
|
||||
struct cfv_info *cfv = netdev_priv(netdev);
|
||||
struct buf_info *buf_info;
|
||||
struct scatterlist sg;
|
||||
unsigned long flags;
|
||||
bool flow_off = false;
|
||||
int ret;
|
||||
|
||||
/* garbage collect released buffers */
|
||||
cfv_release_used_buf(cfv->vq_tx);
|
||||
spin_lock_irqsave(&cfv->tx_lock, flags);
|
||||
|
||||
/* Flow-off check takes into account number of cpus to make sure
|
||||
* virtqueue will not be overfilled in any possible smp conditions.
|
||||
*
|
||||
* Flow-on is triggered when sufficient buffers are freed
|
||||
*/
|
||||
if (unlikely(cfv->vq_tx->num_free <= num_present_cpus())) {
|
||||
flow_off = true;
|
||||
cfv->stats.tx_full_ring++;
|
||||
}
|
||||
|
||||
/* If we run out of memory, we release the memory reserve and retry
|
||||
* allocation.
|
||||
*/
|
||||
buf_info = cfv_alloc_and_copy_to_shm(cfv, skb, &sg);
|
||||
if (unlikely(!buf_info)) {
|
||||
cfv->stats.tx_no_mem++;
|
||||
flow_off = true;
|
||||
|
||||
if (cfv->reserved_mem && cfv->genpool) {
|
||||
gen_pool_free(cfv->genpool, cfv->reserved_mem,
|
||||
cfv->reserved_size);
|
||||
cfv->reserved_mem = 0;
|
||||
buf_info = cfv_alloc_and_copy_to_shm(cfv, skb, &sg);
|
||||
}
|
||||
}
|
||||
|
||||
if (unlikely(flow_off)) {
|
||||
/* Turn flow on when a 1/4 of the descriptors are released */
|
||||
cfv->watermark_tx = virtqueue_get_vring_size(cfv->vq_tx) / 4;
|
||||
/* Enable notifications of recycled TX buffers */
|
||||
virtqueue_enable_cb(cfv->vq_tx);
|
||||
netif_tx_stop_all_queues(netdev);
|
||||
}
|
||||
|
||||
if (unlikely(!buf_info)) {
|
||||
/* If the memory reserve does it's job, this shouldn't happen */
|
||||
netdev_warn(cfv->ndev, "Out of gen_pool memory\n");
|
||||
goto err;
|
||||
}
|
||||
|
||||
ret = virtqueue_add_outbuf(cfv->vq_tx, &sg, 1, buf_info, GFP_ATOMIC);
|
||||
if (unlikely((ret < 0))) {
|
||||
/* If flow control works, this shouldn't happen */
|
||||
netdev_warn(cfv->ndev, "Failed adding buffer to TX vring:%d\n",
|
||||
ret);
|
||||
goto err;
|
||||
}
|
||||
|
||||
/* update netdev statistics */
|
||||
cfv->ndev->stats.tx_packets++;
|
||||
cfv->ndev->stats.tx_bytes += skb->len;
|
||||
spin_unlock_irqrestore(&cfv->tx_lock, flags);
|
||||
|
||||
/* tell the remote processor it has a pending message to read */
|
||||
virtqueue_kick(cfv->vq_tx);
|
||||
|
||||
dev_kfree_skb(skb);
|
||||
return NETDEV_TX_OK;
|
||||
err:
|
||||
spin_unlock_irqrestore(&cfv->tx_lock, flags);
|
||||
cfv->ndev->stats.tx_dropped++;
|
||||
free_buf_info(cfv, buf_info);
|
||||
dev_kfree_skb(skb);
|
||||
return NETDEV_TX_OK;
|
||||
}
|
||||
|
||||
static void cfv_tx_release_tasklet(unsigned long drv)
|
||||
{
|
||||
struct cfv_info *cfv = (struct cfv_info *)drv;
|
||||
cfv_release_used_buf(cfv->vq_tx);
|
||||
}
|
||||
|
||||
static const struct net_device_ops cfv_netdev_ops = {
|
||||
.ndo_open = cfv_netdev_open,
|
||||
.ndo_stop = cfv_netdev_close,
|
||||
.ndo_start_xmit = cfv_netdev_tx,
|
||||
};
|
||||
|
||||
static void cfv_netdev_setup(struct net_device *netdev)
|
||||
{
|
||||
netdev->netdev_ops = &cfv_netdev_ops;
|
||||
netdev->type = ARPHRD_CAIF;
|
||||
netdev->tx_queue_len = 100;
|
||||
netdev->flags = IFF_POINTOPOINT | IFF_NOARP;
|
||||
netdev->mtu = CFV_DEF_MTU_SIZE;
|
||||
netdev->destructor = free_netdev;
|
||||
}
|
||||
|
||||
/* Create debugfs counters for the device */
|
||||
static inline void debugfs_init(struct cfv_info *cfv)
|
||||
{
|
||||
cfv->debugfs =
|
||||
debugfs_create_dir(netdev_name(cfv->ndev), NULL);
|
||||
|
||||
if (IS_ERR(cfv->debugfs))
|
||||
return;
|
||||
|
||||
debugfs_create_u32("rx-napi-complete", S_IRUSR, cfv->debugfs,
|
||||
&cfv->stats.rx_napi_complete);
|
||||
debugfs_create_u32("rx-napi-resched", S_IRUSR, cfv->debugfs,
|
||||
&cfv->stats.rx_napi_resched);
|
||||
debugfs_create_u32("rx-nomem", S_IRUSR, cfv->debugfs,
|
||||
&cfv->stats.rx_nomem);
|
||||
debugfs_create_u32("rx-kicks", S_IRUSR, cfv->debugfs,
|
||||
&cfv->stats.rx_kicks);
|
||||
debugfs_create_u32("tx-full-ring", S_IRUSR, cfv->debugfs,
|
||||
&cfv->stats.tx_full_ring);
|
||||
debugfs_create_u32("tx-no-mem", S_IRUSR, cfv->debugfs,
|
||||
&cfv->stats.tx_no_mem);
|
||||
debugfs_create_u32("tx-kicks", S_IRUSR, cfv->debugfs,
|
||||
&cfv->stats.tx_kicks);
|
||||
debugfs_create_u32("tx-flow-on", S_IRUSR, cfv->debugfs,
|
||||
&cfv->stats.tx_flow_on);
|
||||
}
|
||||
|
||||
/* Setup CAIF for the a virtio device */
|
||||
static int cfv_probe(struct virtio_device *vdev)
|
||||
{
|
||||
vq_callback_t *vq_cbs = cfv_release_cb;
|
||||
vrh_callback_t *vrh_cbs = cfv_recv;
|
||||
const char *names = "output";
|
||||
const char *cfv_netdev_name = "cfvrt";
|
||||
struct net_device *netdev;
|
||||
struct cfv_info *cfv;
|
||||
int err = -EINVAL;
|
||||
|
||||
netdev = alloc_netdev(sizeof(struct cfv_info), cfv_netdev_name,
|
||||
NET_NAME_UNKNOWN, cfv_netdev_setup);
|
||||
if (!netdev)
|
||||
return -ENOMEM;
|
||||
|
||||
cfv = netdev_priv(netdev);
|
||||
cfv->vdev = vdev;
|
||||
cfv->ndev = netdev;
|
||||
|
||||
spin_lock_init(&cfv->tx_lock);
|
||||
|
||||
/* Get the RX virtio ring. This is a "host side vring". */
|
||||
err = -ENODEV;
|
||||
if (!vdev->vringh_config || !vdev->vringh_config->find_vrhs)
|
||||
goto err;
|
||||
|
||||
err = vdev->vringh_config->find_vrhs(vdev, 1, &cfv->vr_rx, &vrh_cbs);
|
||||
if (err)
|
||||
goto err;
|
||||
|
||||
/* Get the TX virtio ring. This is a "guest side vring". */
|
||||
err = vdev->config->find_vqs(vdev, 1, &cfv->vq_tx, &vq_cbs, &names);
|
||||
if (err)
|
||||
goto err;
|
||||
|
||||
/* Get the CAIF configuration from virtio config space, if available */
|
||||
if (vdev->config->get) {
|
||||
virtio_cread(vdev, struct virtio_caif_transf_config, headroom,
|
||||
&cfv->tx_hr);
|
||||
virtio_cread(vdev, struct virtio_caif_transf_config, headroom,
|
||||
&cfv->rx_hr);
|
||||
virtio_cread(vdev, struct virtio_caif_transf_config, tailroom,
|
||||
&cfv->tx_tr);
|
||||
virtio_cread(vdev, struct virtio_caif_transf_config, tailroom,
|
||||
&cfv->rx_tr);
|
||||
virtio_cread(vdev, struct virtio_caif_transf_config, mtu,
|
||||
&cfv->mtu);
|
||||
virtio_cread(vdev, struct virtio_caif_transf_config, mtu,
|
||||
&cfv->mru);
|
||||
} else {
|
||||
cfv->tx_hr = CFV_DEF_HEADROOM;
|
||||
cfv->rx_hr = CFV_DEF_HEADROOM;
|
||||
cfv->tx_tr = CFV_DEF_TAILROOM;
|
||||
cfv->rx_tr = CFV_DEF_TAILROOM;
|
||||
cfv->mtu = CFV_DEF_MTU_SIZE;
|
||||
cfv->mru = CFV_DEF_MTU_SIZE;
|
||||
}
|
||||
|
||||
netdev->needed_headroom = cfv->tx_hr;
|
||||
netdev->needed_tailroom = cfv->tx_tr;
|
||||
|
||||
/* Disable buffer release interrupts unless we have stopped TX queues */
|
||||
virtqueue_disable_cb(cfv->vq_tx);
|
||||
|
||||
netdev->mtu = cfv->mtu - cfv->tx_tr;
|
||||
vdev->priv = cfv;
|
||||
|
||||
/* Initialize NAPI poll context data */
|
||||
vringh_kiov_init(&cfv->ctx.riov, NULL, 0);
|
||||
cfv->ctx.head = USHRT_MAX;
|
||||
netif_napi_add(netdev, &cfv->napi, cfv_rx_poll, CFV_DEFAULT_QUOTA);
|
||||
|
||||
tasklet_init(&cfv->tx_release_tasklet,
|
||||
cfv_tx_release_tasklet,
|
||||
(unsigned long)cfv);
|
||||
|
||||
/* Carrier is off until netdevice is opened */
|
||||
netif_carrier_off(netdev);
|
||||
|
||||
/* register Netdev */
|
||||
err = register_netdev(netdev);
|
||||
if (err) {
|
||||
dev_err(&vdev->dev, "Unable to register netdev (%d)\n", err);
|
||||
goto err;
|
||||
}
|
||||
|
||||
debugfs_init(cfv);
|
||||
|
||||
return 0;
|
||||
err:
|
||||
netdev_warn(cfv->ndev, "CAIF Virtio probe failed:%d\n", err);
|
||||
|
||||
if (cfv->vr_rx)
|
||||
vdev->vringh_config->del_vrhs(cfv->vdev);
|
||||
if (cfv->vdev)
|
||||
vdev->config->del_vqs(cfv->vdev);
|
||||
free_netdev(netdev);
|
||||
return err;
|
||||
}
|
||||
|
||||
static void cfv_remove(struct virtio_device *vdev)
|
||||
{
|
||||
struct cfv_info *cfv = vdev->priv;
|
||||
|
||||
rtnl_lock();
|
||||
dev_close(cfv->ndev);
|
||||
rtnl_unlock();
|
||||
|
||||
tasklet_kill(&cfv->tx_release_tasklet);
|
||||
debugfs_remove_recursive(cfv->debugfs);
|
||||
|
||||
vringh_kiov_cleanup(&cfv->ctx.riov);
|
||||
vdev->config->reset(vdev);
|
||||
vdev->vringh_config->del_vrhs(cfv->vdev);
|
||||
cfv->vr_rx = NULL;
|
||||
vdev->config->del_vqs(cfv->vdev);
|
||||
unregister_netdev(cfv->ndev);
|
||||
}
|
||||
|
||||
static struct virtio_device_id id_table[] = {
|
||||
{ VIRTIO_ID_CAIF, VIRTIO_DEV_ANY_ID },
|
||||
{ 0 },
|
||||
};
|
||||
|
||||
static unsigned int features[] = {
|
||||
};
|
||||
|
||||
static struct virtio_driver caif_virtio_driver = {
|
||||
.feature_table = features,
|
||||
.feature_table_size = ARRAY_SIZE(features),
|
||||
.driver.name = KBUILD_MODNAME,
|
||||
.driver.owner = THIS_MODULE,
|
||||
.id_table = id_table,
|
||||
.probe = cfv_probe,
|
||||
.remove = cfv_remove,
|
||||
};
|
||||
|
||||
module_virtio_driver(caif_virtio_driver);
|
||||
MODULE_DEVICE_TABLE(virtio, id_table);
|
Loading…
Add table
Add a link
Reference in a new issue