mirror of
https://github.com/AetherDroid/android_kernel_samsung_on5xelte.git
synced 2025-10-29 07:18:51 +01:00
Fixed MTP to work with TWRP
This commit is contained in:
commit
f6dfaef42e
50820 changed files with 20846062 additions and 0 deletions
6
drivers/block/aoe/Makefile
Normal file
6
drivers/block/aoe/Makefile
Normal file
|
|
@ -0,0 +1,6 @@
|
|||
#
|
||||
# Makefile for ATA over Ethernet
|
||||
#
|
||||
|
||||
obj-$(CONFIG_ATA_OVER_ETH) += aoe.o
|
||||
aoe-y := aoeblk.o aoechr.o aoecmd.o aoedev.o aoemain.o aoenet.o
|
||||
240
drivers/block/aoe/aoe.h
Normal file
240
drivers/block/aoe/aoe.h
Normal file
|
|
@ -0,0 +1,240 @@
|
|||
/* Copyright (c) 2013 Coraid, Inc. See COPYING for GPL terms. */
|
||||
#define VERSION "85"
|
||||
#define AOE_MAJOR 152
|
||||
#define DEVICE_NAME "aoe"
|
||||
|
||||
/* set AOE_PARTITIONS to 1 to use whole-disks only
|
||||
* default is 16, which is 15 partitions plus the whole disk
|
||||
*/
|
||||
#ifndef AOE_PARTITIONS
|
||||
#define AOE_PARTITIONS (16)
|
||||
#endif
|
||||
|
||||
#define WHITESPACE " \t\v\f\n,"
|
||||
|
||||
enum {
|
||||
AOECMD_ATA,
|
||||
AOECMD_CFG,
|
||||
AOECMD_VEND_MIN = 0xf0,
|
||||
|
||||
AOEFL_RSP = (1<<3),
|
||||
AOEFL_ERR = (1<<2),
|
||||
|
||||
AOEAFL_EXT = (1<<6),
|
||||
AOEAFL_DEV = (1<<4),
|
||||
AOEAFL_ASYNC = (1<<1),
|
||||
AOEAFL_WRITE = (1<<0),
|
||||
|
||||
AOECCMD_READ = 0,
|
||||
AOECCMD_TEST,
|
||||
AOECCMD_PTEST,
|
||||
AOECCMD_SET,
|
||||
AOECCMD_FSET,
|
||||
|
||||
AOE_HVER = 0x10,
|
||||
};
|
||||
|
||||
struct aoe_hdr {
|
||||
unsigned char dst[6];
|
||||
unsigned char src[6];
|
||||
__be16 type;
|
||||
unsigned char verfl;
|
||||
unsigned char err;
|
||||
__be16 major;
|
||||
unsigned char minor;
|
||||
unsigned char cmd;
|
||||
__be32 tag;
|
||||
};
|
||||
|
||||
struct aoe_atahdr {
|
||||
unsigned char aflags;
|
||||
unsigned char errfeat;
|
||||
unsigned char scnt;
|
||||
unsigned char cmdstat;
|
||||
unsigned char lba0;
|
||||
unsigned char lba1;
|
||||
unsigned char lba2;
|
||||
unsigned char lba3;
|
||||
unsigned char lba4;
|
||||
unsigned char lba5;
|
||||
unsigned char res[2];
|
||||
};
|
||||
|
||||
struct aoe_cfghdr {
|
||||
__be16 bufcnt;
|
||||
__be16 fwver;
|
||||
unsigned char scnt;
|
||||
unsigned char aoeccmd;
|
||||
unsigned char cslen[2];
|
||||
};
|
||||
|
||||
enum {
|
||||
DEVFL_UP = 1, /* device is installed in system and ready for AoE->ATA commands */
|
||||
DEVFL_TKILL = (1<<1), /* flag for timer to know when to kill self */
|
||||
DEVFL_EXT = (1<<2), /* device accepts lba48 commands */
|
||||
DEVFL_GDALLOC = (1<<3), /* need to alloc gendisk */
|
||||
DEVFL_GD_NOW = (1<<4), /* allocating gendisk */
|
||||
DEVFL_KICKME = (1<<5), /* slow polling network card catch */
|
||||
DEVFL_NEWSIZE = (1<<6), /* need to update dev size in block layer */
|
||||
DEVFL_FREEING = (1<<7), /* set when device is being cleaned up */
|
||||
DEVFL_FREED = (1<<8), /* device has been cleaned up */
|
||||
};
|
||||
|
||||
enum {
|
||||
DEFAULTBCNT = 2 * 512, /* 2 sectors */
|
||||
MIN_BUFS = 16,
|
||||
NTARGETS = 4,
|
||||
NAOEIFS = 8,
|
||||
NSKBPOOLMAX = 256,
|
||||
NFACTIVE = 61,
|
||||
|
||||
TIMERTICK = HZ / 10,
|
||||
RTTSCALE = 8,
|
||||
RTTDSCALE = 3,
|
||||
RTTAVG_INIT = USEC_PER_SEC / 4 << RTTSCALE,
|
||||
RTTDEV_INIT = RTTAVG_INIT / 4,
|
||||
|
||||
HARD_SCORN_SECS = 10, /* try another remote port after this */
|
||||
MAX_TAINT = 1000, /* cap on aoetgt taint */
|
||||
};
|
||||
|
||||
struct buf {
|
||||
ulong nframesout;
|
||||
struct bio *bio;
|
||||
struct bvec_iter iter;
|
||||
struct request *rq;
|
||||
};
|
||||
|
||||
enum frame_flags {
|
||||
FFL_PROBE = 1,
|
||||
};
|
||||
|
||||
struct frame {
|
||||
struct list_head head;
|
||||
u32 tag;
|
||||
struct timeval sent; /* high-res time packet was sent */
|
||||
u32 sent_jiffs; /* low-res jiffies-based sent time */
|
||||
ulong waited;
|
||||
ulong waited_total;
|
||||
struct aoetgt *t; /* parent target I belong to */
|
||||
struct sk_buff *skb; /* command skb freed on module exit */
|
||||
struct sk_buff *r_skb; /* response skb for async processing */
|
||||
struct buf *buf;
|
||||
struct bvec_iter iter;
|
||||
char flags;
|
||||
};
|
||||
|
||||
struct aoeif {
|
||||
struct net_device *nd;
|
||||
ulong lost;
|
||||
int bcnt;
|
||||
};
|
||||
|
||||
struct aoetgt {
|
||||
unsigned char addr[6];
|
||||
ushort nframes; /* cap on frames to use */
|
||||
struct aoedev *d; /* parent device I belong to */
|
||||
struct list_head ffree; /* list of free frames */
|
||||
struct aoeif ifs[NAOEIFS];
|
||||
struct aoeif *ifp; /* current aoeif in use */
|
||||
ushort nout; /* number of AoE commands outstanding */
|
||||
ushort maxout; /* current value for max outstanding */
|
||||
ushort next_cwnd; /* incr maxout after decrementing to zero */
|
||||
ushort ssthresh; /* slow start threshold */
|
||||
ulong falloc; /* number of allocated frames */
|
||||
int taint; /* how much we want to avoid this aoetgt */
|
||||
int minbcnt;
|
||||
int wpkts, rpkts;
|
||||
char nout_probes;
|
||||
};
|
||||
|
||||
struct aoedev {
|
||||
struct aoedev *next;
|
||||
ulong sysminor;
|
||||
ulong aoemajor;
|
||||
u32 rttavg; /* scaled AoE round trip time average */
|
||||
u32 rttdev; /* scaled round trip time mean deviation */
|
||||
u16 aoeminor;
|
||||
u16 flags;
|
||||
u16 nopen; /* (bd_openers isn't available without sleeping) */
|
||||
u16 fw_ver; /* version of blade's firmware */
|
||||
u16 lasttag; /* last tag sent */
|
||||
u16 useme;
|
||||
ulong ref;
|
||||
struct work_struct work;/* disk create work struct */
|
||||
struct gendisk *gd;
|
||||
struct dentry *debugfs;
|
||||
struct request_queue *blkq;
|
||||
struct hd_geometry geo;
|
||||
sector_t ssize;
|
||||
struct timer_list timer;
|
||||
spinlock_t lock;
|
||||
struct sk_buff_head skbpool;
|
||||
mempool_t *bufpool; /* for deadlock-free Buf allocation */
|
||||
struct { /* pointers to work in progress */
|
||||
struct buf *buf;
|
||||
struct bio *nxbio;
|
||||
struct request *rq;
|
||||
} ip;
|
||||
ulong maxbcnt;
|
||||
struct list_head factive[NFACTIVE]; /* hash of active frames */
|
||||
struct list_head rexmitq; /* deferred retransmissions */
|
||||
struct aoetgt **targets;
|
||||
ulong ntargets; /* number of allocated aoetgt pointers */
|
||||
struct aoetgt **tgt; /* target in use when working */
|
||||
ulong kicked;
|
||||
char ident[512];
|
||||
};
|
||||
|
||||
/* kthread tracking */
|
||||
struct ktstate {
|
||||
struct completion rendez;
|
||||
struct task_struct *task;
|
||||
wait_queue_head_t *waitq;
|
||||
int (*fn) (int);
|
||||
char name[12];
|
||||
spinlock_t *lock;
|
||||
int id;
|
||||
int active;
|
||||
};
|
||||
|
||||
int aoeblk_init(void);
|
||||
void aoeblk_exit(void);
|
||||
void aoeblk_gdalloc(void *);
|
||||
void aoedisk_rm_debugfs(struct aoedev *d);
|
||||
void aoedisk_rm_sysfs(struct aoedev *d);
|
||||
|
||||
int aoechr_init(void);
|
||||
void aoechr_exit(void);
|
||||
void aoechr_error(char *);
|
||||
|
||||
void aoecmd_work(struct aoedev *d);
|
||||
void aoecmd_cfg(ushort aoemajor, unsigned char aoeminor);
|
||||
struct sk_buff *aoecmd_ata_rsp(struct sk_buff *);
|
||||
void aoecmd_cfg_rsp(struct sk_buff *);
|
||||
void aoecmd_sleepwork(struct work_struct *);
|
||||
void aoecmd_wreset(struct aoetgt *t);
|
||||
void aoecmd_cleanslate(struct aoedev *);
|
||||
void aoecmd_exit(void);
|
||||
int aoecmd_init(void);
|
||||
struct sk_buff *aoecmd_ata_id(struct aoedev *);
|
||||
void aoe_freetframe(struct frame *);
|
||||
void aoe_flush_iocq(void);
|
||||
void aoe_flush_iocq_by_index(int);
|
||||
void aoe_end_request(struct aoedev *, struct request *, int);
|
||||
int aoe_ktstart(struct ktstate *k);
|
||||
void aoe_ktstop(struct ktstate *k);
|
||||
|
||||
int aoedev_init(void);
|
||||
void aoedev_exit(void);
|
||||
struct aoedev *aoedev_by_aoeaddr(ulong maj, int min, int do_alloc);
|
||||
void aoedev_downdev(struct aoedev *d);
|
||||
int aoedev_flush(const char __user *str, size_t size);
|
||||
void aoe_failbuf(struct aoedev *, struct buf *);
|
||||
void aoedev_put(struct aoedev *);
|
||||
|
||||
int aoenet_init(void);
|
||||
void aoenet_exit(void);
|
||||
void aoenet_xmit(struct sk_buff_head *);
|
||||
int is_aoe_netif(struct net_device *ifp);
|
||||
int set_aoe_iflist(const char __user *str, size_t size);
|
||||
464
drivers/block/aoe/aoeblk.c
Normal file
464
drivers/block/aoe/aoeblk.c
Normal file
|
|
@ -0,0 +1,464 @@
|
|||
/* Copyright (c) 2013 Coraid, Inc. See COPYING for GPL terms. */
|
||||
/*
|
||||
* aoeblk.c
|
||||
* block device routines
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/hdreg.h>
|
||||
#include <linux/blkdev.h>
|
||||
#include <linux/backing-dev.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/ioctl.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/ratelimit.h>
|
||||
#include <linux/genhd.h>
|
||||
#include <linux/netdevice.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/export.h>
|
||||
#include <linux/moduleparam.h>
|
||||
#include <linux/debugfs.h>
|
||||
#include <scsi/sg.h>
|
||||
#include "aoe.h"
|
||||
|
||||
static DEFINE_MUTEX(aoeblk_mutex);
|
||||
static struct kmem_cache *buf_pool_cache;
|
||||
static struct dentry *aoe_debugfs_dir;
|
||||
|
||||
/* GPFS needs a larger value than the default. */
|
||||
static int aoe_maxsectors;
|
||||
module_param(aoe_maxsectors, int, 0644);
|
||||
MODULE_PARM_DESC(aoe_maxsectors,
|
||||
"When nonzero, set the maximum number of sectors per I/O request");
|
||||
|
||||
static ssize_t aoedisk_show_state(struct device *dev,
|
||||
struct device_attribute *attr, char *page)
|
||||
{
|
||||
struct gendisk *disk = dev_to_disk(dev);
|
||||
struct aoedev *d = disk->private_data;
|
||||
|
||||
return snprintf(page, PAGE_SIZE,
|
||||
"%s%s\n",
|
||||
(d->flags & DEVFL_UP) ? "up" : "down",
|
||||
(d->flags & DEVFL_KICKME) ? ",kickme" :
|
||||
(d->nopen && !(d->flags & DEVFL_UP)) ? ",closewait" : "");
|
||||
/* I'd rather see nopen exported so we can ditch closewait */
|
||||
}
|
||||
static ssize_t aoedisk_show_mac(struct device *dev,
|
||||
struct device_attribute *attr, char *page)
|
||||
{
|
||||
struct gendisk *disk = dev_to_disk(dev);
|
||||
struct aoedev *d = disk->private_data;
|
||||
struct aoetgt *t = d->targets[0];
|
||||
|
||||
if (t == NULL)
|
||||
return snprintf(page, PAGE_SIZE, "none\n");
|
||||
return snprintf(page, PAGE_SIZE, "%pm\n", t->addr);
|
||||
}
|
||||
static ssize_t aoedisk_show_netif(struct device *dev,
|
||||
struct device_attribute *attr, char *page)
|
||||
{
|
||||
struct gendisk *disk = dev_to_disk(dev);
|
||||
struct aoedev *d = disk->private_data;
|
||||
struct net_device *nds[8], **nd, **nnd, **ne;
|
||||
struct aoetgt **t, **te;
|
||||
struct aoeif *ifp, *e;
|
||||
char *p;
|
||||
|
||||
memset(nds, 0, sizeof nds);
|
||||
nd = nds;
|
||||
ne = nd + ARRAY_SIZE(nds);
|
||||
t = d->targets;
|
||||
te = t + d->ntargets;
|
||||
for (; t < te && *t; t++) {
|
||||
ifp = (*t)->ifs;
|
||||
e = ifp + NAOEIFS;
|
||||
for (; ifp < e && ifp->nd; ifp++) {
|
||||
for (nnd = nds; nnd < nd; nnd++)
|
||||
if (*nnd == ifp->nd)
|
||||
break;
|
||||
if (nnd == nd && nd != ne)
|
||||
*nd++ = ifp->nd;
|
||||
}
|
||||
}
|
||||
|
||||
ne = nd;
|
||||
nd = nds;
|
||||
if (*nd == NULL)
|
||||
return snprintf(page, PAGE_SIZE, "none\n");
|
||||
for (p = page; nd < ne; nd++)
|
||||
p += snprintf(p, PAGE_SIZE - (p-page), "%s%s",
|
||||
p == page ? "" : ",", (*nd)->name);
|
||||
p += snprintf(p, PAGE_SIZE - (p-page), "\n");
|
||||
return p-page;
|
||||
}
|
||||
/* firmware version */
|
||||
static ssize_t aoedisk_show_fwver(struct device *dev,
|
||||
struct device_attribute *attr, char *page)
|
||||
{
|
||||
struct gendisk *disk = dev_to_disk(dev);
|
||||
struct aoedev *d = disk->private_data;
|
||||
|
||||
return snprintf(page, PAGE_SIZE, "0x%04x\n", (unsigned int) d->fw_ver);
|
||||
}
|
||||
static ssize_t aoedisk_show_payload(struct device *dev,
|
||||
struct device_attribute *attr, char *page)
|
||||
{
|
||||
struct gendisk *disk = dev_to_disk(dev);
|
||||
struct aoedev *d = disk->private_data;
|
||||
|
||||
return snprintf(page, PAGE_SIZE, "%lu\n", d->maxbcnt);
|
||||
}
|
||||
|
||||
static int aoedisk_debugfs_show(struct seq_file *s, void *ignored)
|
||||
{
|
||||
struct aoedev *d;
|
||||
struct aoetgt **t, **te;
|
||||
struct aoeif *ifp, *ife;
|
||||
unsigned long flags;
|
||||
char c;
|
||||
|
||||
d = s->private;
|
||||
seq_printf(s, "rttavg: %d rttdev: %d\n",
|
||||
d->rttavg >> RTTSCALE,
|
||||
d->rttdev >> RTTDSCALE);
|
||||
seq_printf(s, "nskbpool: %d\n", skb_queue_len(&d->skbpool));
|
||||
seq_printf(s, "kicked: %ld\n", d->kicked);
|
||||
seq_printf(s, "maxbcnt: %ld\n", d->maxbcnt);
|
||||
seq_printf(s, "ref: %ld\n", d->ref);
|
||||
|
||||
spin_lock_irqsave(&d->lock, flags);
|
||||
t = d->targets;
|
||||
te = t + d->ntargets;
|
||||
for (; t < te && *t; t++) {
|
||||
c = '\t';
|
||||
seq_printf(s, "falloc: %ld\n", (*t)->falloc);
|
||||
seq_printf(s, "ffree: %p\n",
|
||||
list_empty(&(*t)->ffree) ? NULL : (*t)->ffree.next);
|
||||
seq_printf(s, "%pm:%d:%d:%d\n", (*t)->addr, (*t)->nout,
|
||||
(*t)->maxout, (*t)->nframes);
|
||||
seq_printf(s, "\tssthresh:%d\n", (*t)->ssthresh);
|
||||
seq_printf(s, "\ttaint:%d\n", (*t)->taint);
|
||||
seq_printf(s, "\tr:%d\n", (*t)->rpkts);
|
||||
seq_printf(s, "\tw:%d\n", (*t)->wpkts);
|
||||
ifp = (*t)->ifs;
|
||||
ife = ifp + ARRAY_SIZE((*t)->ifs);
|
||||
for (; ifp->nd && ifp < ife; ifp++) {
|
||||
seq_printf(s, "%c%s", c, ifp->nd->name);
|
||||
c = ',';
|
||||
}
|
||||
seq_puts(s, "\n");
|
||||
}
|
||||
spin_unlock_irqrestore(&d->lock, flags);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int aoe_debugfs_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
return single_open(file, aoedisk_debugfs_show, inode->i_private);
|
||||
}
|
||||
|
||||
static DEVICE_ATTR(state, S_IRUGO, aoedisk_show_state, NULL);
|
||||
static DEVICE_ATTR(mac, S_IRUGO, aoedisk_show_mac, NULL);
|
||||
static DEVICE_ATTR(netif, S_IRUGO, aoedisk_show_netif, NULL);
|
||||
static struct device_attribute dev_attr_firmware_version = {
|
||||
.attr = { .name = "firmware-version", .mode = S_IRUGO },
|
||||
.show = aoedisk_show_fwver,
|
||||
};
|
||||
static DEVICE_ATTR(payload, S_IRUGO, aoedisk_show_payload, NULL);
|
||||
|
||||
static struct attribute *aoe_attrs[] = {
|
||||
&dev_attr_state.attr,
|
||||
&dev_attr_mac.attr,
|
||||
&dev_attr_netif.attr,
|
||||
&dev_attr_firmware_version.attr,
|
||||
&dev_attr_payload.attr,
|
||||
NULL,
|
||||
};
|
||||
|
||||
static const struct attribute_group attr_group = {
|
||||
.attrs = aoe_attrs,
|
||||
};
|
||||
|
||||
static const struct file_operations aoe_debugfs_fops = {
|
||||
.open = aoe_debugfs_open,
|
||||
.read = seq_read,
|
||||
.llseek = seq_lseek,
|
||||
.release = single_release,
|
||||
};
|
||||
|
||||
static void
|
||||
aoedisk_add_debugfs(struct aoedev *d)
|
||||
{
|
||||
struct dentry *entry;
|
||||
char *p;
|
||||
|
||||
if (aoe_debugfs_dir == NULL)
|
||||
return;
|
||||
p = strchr(d->gd->disk_name, '/');
|
||||
if (p == NULL)
|
||||
p = d->gd->disk_name;
|
||||
else
|
||||
p++;
|
||||
BUG_ON(*p == '\0');
|
||||
entry = debugfs_create_file(p, 0444, aoe_debugfs_dir, d,
|
||||
&aoe_debugfs_fops);
|
||||
if (IS_ERR_OR_NULL(entry)) {
|
||||
pr_info("aoe: cannot create debugfs file for %s\n",
|
||||
d->gd->disk_name);
|
||||
return;
|
||||
}
|
||||
BUG_ON(d->debugfs);
|
||||
d->debugfs = entry;
|
||||
}
|
||||
void
|
||||
aoedisk_rm_debugfs(struct aoedev *d)
|
||||
{
|
||||
debugfs_remove(d->debugfs);
|
||||
d->debugfs = NULL;
|
||||
}
|
||||
|
||||
static int
|
||||
aoedisk_add_sysfs(struct aoedev *d)
|
||||
{
|
||||
return sysfs_create_group(&disk_to_dev(d->gd)->kobj, &attr_group);
|
||||
}
|
||||
void
|
||||
aoedisk_rm_sysfs(struct aoedev *d)
|
||||
{
|
||||
sysfs_remove_group(&disk_to_dev(d->gd)->kobj, &attr_group);
|
||||
}
|
||||
|
||||
static int
|
||||
aoeblk_open(struct block_device *bdev, fmode_t mode)
|
||||
{
|
||||
struct aoedev *d = bdev->bd_disk->private_data;
|
||||
ulong flags;
|
||||
|
||||
if (!virt_addr_valid(d)) {
|
||||
pr_crit("aoe: invalid device pointer in %s\n",
|
||||
__func__);
|
||||
WARN_ON(1);
|
||||
return -ENODEV;
|
||||
}
|
||||
if (!(d->flags & DEVFL_UP) || d->flags & DEVFL_TKILL)
|
||||
return -ENODEV;
|
||||
|
||||
mutex_lock(&aoeblk_mutex);
|
||||
spin_lock_irqsave(&d->lock, flags);
|
||||
if (d->flags & DEVFL_UP && !(d->flags & DEVFL_TKILL)) {
|
||||
d->nopen++;
|
||||
spin_unlock_irqrestore(&d->lock, flags);
|
||||
mutex_unlock(&aoeblk_mutex);
|
||||
return 0;
|
||||
}
|
||||
spin_unlock_irqrestore(&d->lock, flags);
|
||||
mutex_unlock(&aoeblk_mutex);
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
static void
|
||||
aoeblk_release(struct gendisk *disk, fmode_t mode)
|
||||
{
|
||||
struct aoedev *d = disk->private_data;
|
||||
ulong flags;
|
||||
|
||||
spin_lock_irqsave(&d->lock, flags);
|
||||
|
||||
if (--d->nopen == 0) {
|
||||
spin_unlock_irqrestore(&d->lock, flags);
|
||||
aoecmd_cfg(d->aoemajor, d->aoeminor);
|
||||
return;
|
||||
}
|
||||
spin_unlock_irqrestore(&d->lock, flags);
|
||||
}
|
||||
|
||||
static void
|
||||
aoeblk_request(struct request_queue *q)
|
||||
{
|
||||
struct aoedev *d;
|
||||
struct request *rq;
|
||||
|
||||
d = q->queuedata;
|
||||
if ((d->flags & DEVFL_UP) == 0) {
|
||||
pr_info_ratelimited("aoe: device %ld.%d is not up\n",
|
||||
d->aoemajor, d->aoeminor);
|
||||
while ((rq = blk_peek_request(q))) {
|
||||
blk_start_request(rq);
|
||||
aoe_end_request(d, rq, 1);
|
||||
}
|
||||
return;
|
||||
}
|
||||
aoecmd_work(d);
|
||||
}
|
||||
|
||||
static int
|
||||
aoeblk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
|
||||
{
|
||||
struct aoedev *d = bdev->bd_disk->private_data;
|
||||
|
||||
if ((d->flags & DEVFL_UP) == 0) {
|
||||
printk(KERN_ERR "aoe: disk not up\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
geo->cylinders = d->geo.cylinders;
|
||||
geo->heads = d->geo.heads;
|
||||
geo->sectors = d->geo.sectors;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
aoeblk_ioctl(struct block_device *bdev, fmode_t mode, uint cmd, ulong arg)
|
||||
{
|
||||
struct aoedev *d;
|
||||
|
||||
if (!arg)
|
||||
return -EINVAL;
|
||||
|
||||
d = bdev->bd_disk->private_data;
|
||||
if ((d->flags & DEVFL_UP) == 0) {
|
||||
pr_err("aoe: disk not up\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
if (cmd == HDIO_GET_IDENTITY) {
|
||||
if (!copy_to_user((void __user *) arg, &d->ident,
|
||||
sizeof(d->ident)))
|
||||
return 0;
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
/* udev calls scsi_id, which uses SG_IO, resulting in noise */
|
||||
if (cmd != SG_IO)
|
||||
pr_info("aoe: unknown ioctl 0x%x\n", cmd);
|
||||
|
||||
return -ENOTTY;
|
||||
}
|
||||
|
||||
static const struct block_device_operations aoe_bdops = {
|
||||
.open = aoeblk_open,
|
||||
.release = aoeblk_release,
|
||||
.ioctl = aoeblk_ioctl,
|
||||
.getgeo = aoeblk_getgeo,
|
||||
.owner = THIS_MODULE,
|
||||
};
|
||||
|
||||
/* alloc_disk and add_disk can sleep */
|
||||
void
|
||||
aoeblk_gdalloc(void *vp)
|
||||
{
|
||||
struct aoedev *d = vp;
|
||||
struct gendisk *gd;
|
||||
mempool_t *mp;
|
||||
struct request_queue *q;
|
||||
enum { KB = 1024, MB = KB * KB, READ_AHEAD = 2 * MB, };
|
||||
ulong flags;
|
||||
int late = 0;
|
||||
|
||||
spin_lock_irqsave(&d->lock, flags);
|
||||
if (d->flags & DEVFL_GDALLOC
|
||||
&& !(d->flags & DEVFL_TKILL)
|
||||
&& !(d->flags & DEVFL_GD_NOW))
|
||||
d->flags |= DEVFL_GD_NOW;
|
||||
else
|
||||
late = 1;
|
||||
spin_unlock_irqrestore(&d->lock, flags);
|
||||
if (late)
|
||||
return;
|
||||
|
||||
gd = alloc_disk(AOE_PARTITIONS);
|
||||
if (gd == NULL) {
|
||||
pr_err("aoe: cannot allocate disk structure for %ld.%d\n",
|
||||
d->aoemajor, d->aoeminor);
|
||||
goto err;
|
||||
}
|
||||
|
||||
mp = mempool_create(MIN_BUFS, mempool_alloc_slab, mempool_free_slab,
|
||||
buf_pool_cache);
|
||||
if (mp == NULL) {
|
||||
printk(KERN_ERR "aoe: cannot allocate bufpool for %ld.%d\n",
|
||||
d->aoemajor, d->aoeminor);
|
||||
goto err_disk;
|
||||
}
|
||||
q = blk_init_queue(aoeblk_request, &d->lock);
|
||||
if (q == NULL) {
|
||||
pr_err("aoe: cannot allocate block queue for %ld.%d\n",
|
||||
d->aoemajor, d->aoeminor);
|
||||
goto err_mempool;
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&d->lock, flags);
|
||||
WARN_ON(!(d->flags & DEVFL_GD_NOW));
|
||||
WARN_ON(!(d->flags & DEVFL_GDALLOC));
|
||||
WARN_ON(d->flags & DEVFL_TKILL);
|
||||
WARN_ON(d->gd);
|
||||
WARN_ON(d->flags & DEVFL_UP);
|
||||
blk_queue_max_hw_sectors(q, BLK_DEF_MAX_SECTORS);
|
||||
q->backing_dev_info.name = "aoe";
|
||||
q->backing_dev_info.ra_pages = READ_AHEAD / PAGE_CACHE_SIZE;
|
||||
d->bufpool = mp;
|
||||
d->blkq = gd->queue = q;
|
||||
q->queuedata = d;
|
||||
d->gd = gd;
|
||||
if (aoe_maxsectors)
|
||||
blk_queue_max_hw_sectors(q, aoe_maxsectors);
|
||||
gd->major = AOE_MAJOR;
|
||||
gd->first_minor = d->sysminor;
|
||||
gd->fops = &aoe_bdops;
|
||||
gd->private_data = d;
|
||||
set_capacity(gd, d->ssize);
|
||||
snprintf(gd->disk_name, sizeof gd->disk_name, "etherd/e%ld.%d",
|
||||
d->aoemajor, d->aoeminor);
|
||||
|
||||
d->flags &= ~DEVFL_GDALLOC;
|
||||
d->flags |= DEVFL_UP;
|
||||
|
||||
spin_unlock_irqrestore(&d->lock, flags);
|
||||
|
||||
add_disk(gd);
|
||||
aoedisk_add_sysfs(d);
|
||||
aoedisk_add_debugfs(d);
|
||||
|
||||
spin_lock_irqsave(&d->lock, flags);
|
||||
WARN_ON(!(d->flags & DEVFL_GD_NOW));
|
||||
d->flags &= ~DEVFL_GD_NOW;
|
||||
spin_unlock_irqrestore(&d->lock, flags);
|
||||
return;
|
||||
|
||||
err_mempool:
|
||||
mempool_destroy(mp);
|
||||
err_disk:
|
||||
put_disk(gd);
|
||||
err:
|
||||
spin_lock_irqsave(&d->lock, flags);
|
||||
d->flags &= ~DEVFL_GD_NOW;
|
||||
schedule_work(&d->work);
|
||||
spin_unlock_irqrestore(&d->lock, flags);
|
||||
}
|
||||
|
||||
void
|
||||
aoeblk_exit(void)
|
||||
{
|
||||
debugfs_remove_recursive(aoe_debugfs_dir);
|
||||
aoe_debugfs_dir = NULL;
|
||||
kmem_cache_destroy(buf_pool_cache);
|
||||
}
|
||||
|
||||
int __init
|
||||
aoeblk_init(void)
|
||||
{
|
||||
buf_pool_cache = kmem_cache_create("aoe_bufs",
|
||||
sizeof(struct buf),
|
||||
0, 0, NULL);
|
||||
if (buf_pool_cache == NULL)
|
||||
return -ENOMEM;
|
||||
aoe_debugfs_dir = debugfs_create_dir("aoe", NULL);
|
||||
if (IS_ERR_OR_NULL(aoe_debugfs_dir)) {
|
||||
pr_info("aoe: cannot create debugfs directory\n");
|
||||
aoe_debugfs_dir = NULL;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
320
drivers/block/aoe/aoechr.c
Normal file
320
drivers/block/aoe/aoechr.c
Normal file
|
|
@ -0,0 +1,320 @@
|
|||
/* Copyright (c) 2012 Coraid, Inc. See COPYING for GPL terms. */
|
||||
/*
|
||||
* aoechr.c
|
||||
* AoE character device driver
|
||||
*/
|
||||
|
||||
#include <linux/hdreg.h>
|
||||
#include <linux/blkdev.h>
|
||||
#include <linux/completion.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/skbuff.h>
|
||||
#include <linux/export.h>
|
||||
#include "aoe.h"
|
||||
|
||||
enum {
|
||||
//MINOR_STAT = 1, (moved to sysfs)
|
||||
MINOR_ERR = 2,
|
||||
MINOR_DISCOVER,
|
||||
MINOR_INTERFACES,
|
||||
MINOR_REVALIDATE,
|
||||
MINOR_FLUSH,
|
||||
MSGSZ = 2048,
|
||||
NMSG = 100, /* message backlog to retain */
|
||||
};
|
||||
|
||||
struct aoe_chardev {
|
||||
ulong minor;
|
||||
char name[32];
|
||||
};
|
||||
|
||||
enum { EMFL_VALID = 1 };
|
||||
|
||||
struct ErrMsg {
|
||||
short flags;
|
||||
short len;
|
||||
char *msg;
|
||||
};
|
||||
|
||||
static DEFINE_MUTEX(aoechr_mutex);
|
||||
|
||||
/* A ring buffer of error messages, to be read through
|
||||
* "/dev/etherd/err". When no messages are present,
|
||||
* readers will block waiting for messages to appear.
|
||||
*/
|
||||
static struct ErrMsg emsgs[NMSG];
|
||||
static int emsgs_head_idx, emsgs_tail_idx;
|
||||
static struct completion emsgs_comp;
|
||||
static spinlock_t emsgs_lock;
|
||||
static int nblocked_emsgs_readers;
|
||||
static struct class *aoe_class;
|
||||
static struct aoe_chardev chardevs[] = {
|
||||
{ MINOR_ERR, "err" },
|
||||
{ MINOR_DISCOVER, "discover" },
|
||||
{ MINOR_INTERFACES, "interfaces" },
|
||||
{ MINOR_REVALIDATE, "revalidate" },
|
||||
{ MINOR_FLUSH, "flush" },
|
||||
};
|
||||
|
||||
static int
|
||||
discover(void)
|
||||
{
|
||||
aoecmd_cfg(0xffff, 0xff);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
interfaces(const char __user *str, size_t size)
|
||||
{
|
||||
if (set_aoe_iflist(str, size)) {
|
||||
printk(KERN_ERR
|
||||
"aoe: could not set interface list: too many interfaces\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
revalidate(const char __user *str, size_t size)
|
||||
{
|
||||
int major, minor, n;
|
||||
ulong flags;
|
||||
struct aoedev *d;
|
||||
struct sk_buff *skb;
|
||||
char buf[16];
|
||||
|
||||
if (size >= sizeof buf)
|
||||
return -EINVAL;
|
||||
buf[sizeof buf - 1] = '\0';
|
||||
if (copy_from_user(buf, str, size))
|
||||
return -EFAULT;
|
||||
|
||||
n = sscanf(buf, "e%d.%d", &major, &minor);
|
||||
if (n != 2) {
|
||||
pr_err("aoe: invalid device specification %s\n", buf);
|
||||
return -EINVAL;
|
||||
}
|
||||
d = aoedev_by_aoeaddr(major, minor, 0);
|
||||
if (!d)
|
||||
return -EINVAL;
|
||||
spin_lock_irqsave(&d->lock, flags);
|
||||
aoecmd_cleanslate(d);
|
||||
aoecmd_cfg(major, minor);
|
||||
loop:
|
||||
skb = aoecmd_ata_id(d);
|
||||
spin_unlock_irqrestore(&d->lock, flags);
|
||||
/* try again if we are able to sleep a bit,
|
||||
* otherwise give up this revalidation
|
||||
*/
|
||||
if (!skb && !msleep_interruptible(250)) {
|
||||
spin_lock_irqsave(&d->lock, flags);
|
||||
goto loop;
|
||||
}
|
||||
aoedev_put(d);
|
||||
if (skb) {
|
||||
struct sk_buff_head queue;
|
||||
__skb_queue_head_init(&queue);
|
||||
__skb_queue_tail(&queue, skb);
|
||||
aoenet_xmit(&queue);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
void
|
||||
aoechr_error(char *msg)
|
||||
{
|
||||
struct ErrMsg *em;
|
||||
char *mp;
|
||||
ulong flags, n;
|
||||
|
||||
n = strlen(msg);
|
||||
|
||||
spin_lock_irqsave(&emsgs_lock, flags);
|
||||
|
||||
em = emsgs + emsgs_tail_idx;
|
||||
if ((em->flags & EMFL_VALID)) {
|
||||
bail: spin_unlock_irqrestore(&emsgs_lock, flags);
|
||||
return;
|
||||
}
|
||||
|
||||
mp = kmemdup(msg, n, GFP_ATOMIC);
|
||||
if (mp == NULL) {
|
||||
printk(KERN_ERR "aoe: allocation failure, len=%ld\n", n);
|
||||
goto bail;
|
||||
}
|
||||
|
||||
em->msg = mp;
|
||||
em->flags |= EMFL_VALID;
|
||||
em->len = n;
|
||||
|
||||
emsgs_tail_idx++;
|
||||
emsgs_tail_idx %= ARRAY_SIZE(emsgs);
|
||||
|
||||
spin_unlock_irqrestore(&emsgs_lock, flags);
|
||||
|
||||
if (nblocked_emsgs_readers)
|
||||
complete(&emsgs_comp);
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
aoechr_write(struct file *filp, const char __user *buf, size_t cnt, loff_t *offp)
|
||||
{
|
||||
int ret = -EINVAL;
|
||||
|
||||
switch ((unsigned long) filp->private_data) {
|
||||
default:
|
||||
printk(KERN_INFO "aoe: can't write to that file.\n");
|
||||
break;
|
||||
case MINOR_DISCOVER:
|
||||
ret = discover();
|
||||
break;
|
||||
case MINOR_INTERFACES:
|
||||
ret = interfaces(buf, cnt);
|
||||
break;
|
||||
case MINOR_REVALIDATE:
|
||||
ret = revalidate(buf, cnt);
|
||||
break;
|
||||
case MINOR_FLUSH:
|
||||
ret = aoedev_flush(buf, cnt);
|
||||
break;
|
||||
}
|
||||
if (ret == 0)
|
||||
ret = cnt;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int
|
||||
aoechr_open(struct inode *inode, struct file *filp)
|
||||
{
|
||||
int n, i;
|
||||
|
||||
mutex_lock(&aoechr_mutex);
|
||||
n = iminor(inode);
|
||||
filp->private_data = (void *) (unsigned long) n;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(chardevs); ++i)
|
||||
if (chardevs[i].minor == n) {
|
||||
mutex_unlock(&aoechr_mutex);
|
||||
return 0;
|
||||
}
|
||||
mutex_unlock(&aoechr_mutex);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static int
|
||||
aoechr_rel(struct inode *inode, struct file *filp)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
aoechr_read(struct file *filp, char __user *buf, size_t cnt, loff_t *off)
|
||||
{
|
||||
unsigned long n;
|
||||
char *mp;
|
||||
struct ErrMsg *em;
|
||||
ssize_t len;
|
||||
ulong flags;
|
||||
|
||||
n = (unsigned long) filp->private_data;
|
||||
if (n != MINOR_ERR)
|
||||
return -EFAULT;
|
||||
|
||||
spin_lock_irqsave(&emsgs_lock, flags);
|
||||
|
||||
for (;;) {
|
||||
em = emsgs + emsgs_head_idx;
|
||||
if ((em->flags & EMFL_VALID) != 0)
|
||||
break;
|
||||
if (filp->f_flags & O_NDELAY) {
|
||||
spin_unlock_irqrestore(&emsgs_lock, flags);
|
||||
return -EAGAIN;
|
||||
}
|
||||
nblocked_emsgs_readers++;
|
||||
|
||||
spin_unlock_irqrestore(&emsgs_lock, flags);
|
||||
|
||||
n = wait_for_completion_interruptible(&emsgs_comp);
|
||||
|
||||
spin_lock_irqsave(&emsgs_lock, flags);
|
||||
|
||||
nblocked_emsgs_readers--;
|
||||
|
||||
if (n) {
|
||||
spin_unlock_irqrestore(&emsgs_lock, flags);
|
||||
return -ERESTARTSYS;
|
||||
}
|
||||
}
|
||||
if (em->len > cnt) {
|
||||
spin_unlock_irqrestore(&emsgs_lock, flags);
|
||||
return -EAGAIN;
|
||||
}
|
||||
mp = em->msg;
|
||||
len = em->len;
|
||||
em->msg = NULL;
|
||||
em->flags &= ~EMFL_VALID;
|
||||
|
||||
emsgs_head_idx++;
|
||||
emsgs_head_idx %= ARRAY_SIZE(emsgs);
|
||||
|
||||
spin_unlock_irqrestore(&emsgs_lock, flags);
|
||||
|
||||
n = copy_to_user(buf, mp, len);
|
||||
kfree(mp);
|
||||
return n == 0 ? len : -EFAULT;
|
||||
}
|
||||
|
||||
static const struct file_operations aoe_fops = {
|
||||
.write = aoechr_write,
|
||||
.read = aoechr_read,
|
||||
.open = aoechr_open,
|
||||
.release = aoechr_rel,
|
||||
.owner = THIS_MODULE,
|
||||
.llseek = noop_llseek,
|
||||
};
|
||||
|
||||
static char *aoe_devnode(struct device *dev, umode_t *mode)
|
||||
{
|
||||
return kasprintf(GFP_KERNEL, "etherd/%s", dev_name(dev));
|
||||
}
|
||||
|
||||
int __init
|
||||
aoechr_init(void)
|
||||
{
|
||||
int n, i;
|
||||
|
||||
n = register_chrdev(AOE_MAJOR, "aoechr", &aoe_fops);
|
||||
if (n < 0) {
|
||||
printk(KERN_ERR "aoe: can't register char device\n");
|
||||
return n;
|
||||
}
|
||||
init_completion(&emsgs_comp);
|
||||
spin_lock_init(&emsgs_lock);
|
||||
aoe_class = class_create(THIS_MODULE, "aoe");
|
||||
if (IS_ERR(aoe_class)) {
|
||||
unregister_chrdev(AOE_MAJOR, "aoechr");
|
||||
return PTR_ERR(aoe_class);
|
||||
}
|
||||
aoe_class->devnode = aoe_devnode;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(chardevs); ++i)
|
||||
device_create(aoe_class, NULL,
|
||||
MKDEV(AOE_MAJOR, chardevs[i].minor), NULL,
|
||||
chardevs[i].name);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void
|
||||
aoechr_exit(void)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(chardevs); ++i)
|
||||
device_destroy(aoe_class, MKDEV(AOE_MAJOR, chardevs[i].minor));
|
||||
class_destroy(aoe_class);
|
||||
unregister_chrdev(AOE_MAJOR, "aoechr");
|
||||
}
|
||||
|
||||
1826
drivers/block/aoe/aoecmd.c
Normal file
1826
drivers/block/aoe/aoecmd.c
Normal file
File diff suppressed because it is too large
Load diff
526
drivers/block/aoe/aoedev.c
Normal file
526
drivers/block/aoe/aoedev.c
Normal file
|
|
@ -0,0 +1,526 @@
|
|||
/* Copyright (c) 2013 Coraid, Inc. See COPYING for GPL terms. */
|
||||
/*
|
||||
* aoedev.c
|
||||
* AoE device utility functions; maintains device list.
|
||||
*/
|
||||
|
||||
#include <linux/hdreg.h>
|
||||
#include <linux/blkdev.h>
|
||||
#include <linux/netdevice.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/bitmap.h>
|
||||
#include <linux/kdev_t.h>
|
||||
#include <linux/moduleparam.h>
|
||||
#include <linux/string.h>
|
||||
#include "aoe.h"
|
||||
|
||||
static void dummy_timer(ulong);
|
||||
static void freetgt(struct aoedev *d, struct aoetgt *t);
|
||||
static void skbpoolfree(struct aoedev *d);
|
||||
|
||||
static int aoe_dyndevs = 1;
|
||||
module_param(aoe_dyndevs, int, 0644);
|
||||
MODULE_PARM_DESC(aoe_dyndevs, "Use dynamic minor numbers for devices.");
|
||||
|
||||
static struct aoedev *devlist;
|
||||
static DEFINE_SPINLOCK(devlist_lock);
|
||||
|
||||
/* Because some systems will have one, many, or no
|
||||
* - partitions,
|
||||
* - slots per shelf,
|
||||
* - or shelves,
|
||||
* we need some flexibility in the way the minor numbers
|
||||
* are allocated. So they are dynamic.
|
||||
*/
|
||||
#define N_DEVS ((1U<<MINORBITS)/AOE_PARTITIONS)
|
||||
|
||||
static DEFINE_SPINLOCK(used_minors_lock);
|
||||
static DECLARE_BITMAP(used_minors, N_DEVS);
|
||||
|
||||
static int
|
||||
minor_get_dyn(ulong *sysminor)
|
||||
{
|
||||
ulong flags;
|
||||
ulong n;
|
||||
int error = 0;
|
||||
|
||||
spin_lock_irqsave(&used_minors_lock, flags);
|
||||
n = find_first_zero_bit(used_minors, N_DEVS);
|
||||
if (n < N_DEVS)
|
||||
set_bit(n, used_minors);
|
||||
else
|
||||
error = -1;
|
||||
spin_unlock_irqrestore(&used_minors_lock, flags);
|
||||
|
||||
*sysminor = n * AOE_PARTITIONS;
|
||||
return error;
|
||||
}
|
||||
|
||||
static int
|
||||
minor_get_static(ulong *sysminor, ulong aoemaj, int aoemin)
|
||||
{
|
||||
ulong flags;
|
||||
ulong n;
|
||||
int error = 0;
|
||||
enum {
|
||||
/* for backwards compatibility when !aoe_dyndevs,
|
||||
* a static number of supported slots per shelf */
|
||||
NPERSHELF = 16,
|
||||
};
|
||||
|
||||
if (aoemin >= NPERSHELF) {
|
||||
pr_err("aoe: %s %d slots per shelf\n",
|
||||
"static minor device numbers support only",
|
||||
NPERSHELF);
|
||||
error = -1;
|
||||
goto out;
|
||||
}
|
||||
|
||||
n = aoemaj * NPERSHELF + aoemin;
|
||||
if (n >= N_DEVS) {
|
||||
pr_err("aoe: %s with e%ld.%d\n",
|
||||
"cannot use static minor device numbers",
|
||||
aoemaj, aoemin);
|
||||
error = -1;
|
||||
goto out;
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&used_minors_lock, flags);
|
||||
if (test_bit(n, used_minors)) {
|
||||
pr_err("aoe: %s %lu\n",
|
||||
"existing device already has static minor number",
|
||||
n);
|
||||
error = -1;
|
||||
} else
|
||||
set_bit(n, used_minors);
|
||||
spin_unlock_irqrestore(&used_minors_lock, flags);
|
||||
*sysminor = n * AOE_PARTITIONS;
|
||||
out:
|
||||
return error;
|
||||
}
|
||||
|
||||
static int
|
||||
minor_get(ulong *sysminor, ulong aoemaj, int aoemin)
|
||||
{
|
||||
if (aoe_dyndevs)
|
||||
return minor_get_dyn(sysminor);
|
||||
else
|
||||
return minor_get_static(sysminor, aoemaj, aoemin);
|
||||
}
|
||||
|
||||
static void
|
||||
minor_free(ulong minor)
|
||||
{
|
||||
ulong flags;
|
||||
|
||||
minor /= AOE_PARTITIONS;
|
||||
BUG_ON(minor >= N_DEVS);
|
||||
|
||||
spin_lock_irqsave(&used_minors_lock, flags);
|
||||
BUG_ON(!test_bit(minor, used_minors));
|
||||
clear_bit(minor, used_minors);
|
||||
spin_unlock_irqrestore(&used_minors_lock, flags);
|
||||
}
|
||||
|
||||
/*
|
||||
* Users who grab a pointer to the device with aoedev_by_aoeaddr
|
||||
* automatically get a reference count and must be responsible
|
||||
* for performing a aoedev_put. With the addition of async
|
||||
* kthread processing I'm no longer confident that we can
|
||||
* guarantee consistency in the face of device flushes.
|
||||
*
|
||||
* For the time being, we only bother to add extra references for
|
||||
* frames sitting on the iocq. When the kthreads finish processing
|
||||
* these frames, they will aoedev_put the device.
|
||||
*/
|
||||
|
||||
void
|
||||
aoedev_put(struct aoedev *d)
|
||||
{
|
||||
ulong flags;
|
||||
|
||||
spin_lock_irqsave(&devlist_lock, flags);
|
||||
d->ref--;
|
||||
spin_unlock_irqrestore(&devlist_lock, flags);
|
||||
}
|
||||
|
||||
static void
|
||||
dummy_timer(ulong vp)
|
||||
{
|
||||
struct aoedev *d;
|
||||
|
||||
d = (struct aoedev *)vp;
|
||||
if (d->flags & DEVFL_TKILL)
|
||||
return;
|
||||
d->timer.expires = jiffies + HZ;
|
||||
add_timer(&d->timer);
|
||||
}
|
||||
|
||||
static void
|
||||
aoe_failip(struct aoedev *d)
|
||||
{
|
||||
struct request *rq;
|
||||
struct bio *bio;
|
||||
unsigned long n;
|
||||
|
||||
aoe_failbuf(d, d->ip.buf);
|
||||
|
||||
rq = d->ip.rq;
|
||||
if (rq == NULL)
|
||||
return;
|
||||
while ((bio = d->ip.nxbio)) {
|
||||
clear_bit(BIO_UPTODATE, &bio->bi_flags);
|
||||
d->ip.nxbio = bio->bi_next;
|
||||
n = (unsigned long) rq->special;
|
||||
rq->special = (void *) --n;
|
||||
}
|
||||
if ((unsigned long) rq->special == 0)
|
||||
aoe_end_request(d, rq, 0);
|
||||
}
|
||||
|
||||
static void
|
||||
downdev_frame(struct list_head *pos)
|
||||
{
|
||||
struct frame *f;
|
||||
|
||||
f = list_entry(pos, struct frame, head);
|
||||
list_del(pos);
|
||||
if (f->buf) {
|
||||
f->buf->nframesout--;
|
||||
aoe_failbuf(f->t->d, f->buf);
|
||||
}
|
||||
aoe_freetframe(f);
|
||||
}
|
||||
|
||||
void
|
||||
aoedev_downdev(struct aoedev *d)
|
||||
{
|
||||
struct aoetgt *t, **tt, **te;
|
||||
struct list_head *head, *pos, *nx;
|
||||
struct request *rq;
|
||||
int i;
|
||||
|
||||
d->flags &= ~DEVFL_UP;
|
||||
|
||||
/* clean out active and to-be-retransmitted buffers */
|
||||
for (i = 0; i < NFACTIVE; i++) {
|
||||
head = &d->factive[i];
|
||||
list_for_each_safe(pos, nx, head)
|
||||
downdev_frame(pos);
|
||||
}
|
||||
head = &d->rexmitq;
|
||||
list_for_each_safe(pos, nx, head)
|
||||
downdev_frame(pos);
|
||||
|
||||
/* reset window dressings */
|
||||
tt = d->targets;
|
||||
te = tt + d->ntargets;
|
||||
for (; tt < te && (t = *tt); tt++) {
|
||||
aoecmd_wreset(t);
|
||||
t->nout = 0;
|
||||
}
|
||||
|
||||
/* clean out the in-process request (if any) */
|
||||
aoe_failip(d);
|
||||
|
||||
/* fast fail all pending I/O */
|
||||
if (d->blkq) {
|
||||
while ((rq = blk_peek_request(d->blkq))) {
|
||||
blk_start_request(rq);
|
||||
aoe_end_request(d, rq, 1);
|
||||
}
|
||||
}
|
||||
|
||||
if (d->gd)
|
||||
set_capacity(d->gd, 0);
|
||||
}
|
||||
|
||||
/* return whether the user asked for this particular
|
||||
* device to be flushed
|
||||
*/
|
||||
static int
|
||||
user_req(char *s, size_t slen, struct aoedev *d)
|
||||
{
|
||||
const char *p;
|
||||
size_t lim;
|
||||
|
||||
if (!d->gd)
|
||||
return 0;
|
||||
p = kbasename(d->gd->disk_name);
|
||||
lim = sizeof(d->gd->disk_name);
|
||||
lim -= p - d->gd->disk_name;
|
||||
if (slen < lim)
|
||||
lim = slen;
|
||||
|
||||
return !strncmp(s, p, lim);
|
||||
}
|
||||
|
||||
static void
|
||||
freedev(struct aoedev *d)
|
||||
{
|
||||
struct aoetgt **t, **e;
|
||||
int freeing = 0;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&d->lock, flags);
|
||||
if (d->flags & DEVFL_TKILL
|
||||
&& !(d->flags & DEVFL_FREEING)) {
|
||||
d->flags |= DEVFL_FREEING;
|
||||
freeing = 1;
|
||||
}
|
||||
spin_unlock_irqrestore(&d->lock, flags);
|
||||
if (!freeing)
|
||||
return;
|
||||
|
||||
del_timer_sync(&d->timer);
|
||||
if (d->gd) {
|
||||
aoedisk_rm_debugfs(d);
|
||||
aoedisk_rm_sysfs(d);
|
||||
del_gendisk(d->gd);
|
||||
put_disk(d->gd);
|
||||
blk_cleanup_queue(d->blkq);
|
||||
}
|
||||
t = d->targets;
|
||||
e = t + d->ntargets;
|
||||
for (; t < e && *t; t++)
|
||||
freetgt(d, *t);
|
||||
if (d->bufpool)
|
||||
mempool_destroy(d->bufpool);
|
||||
skbpoolfree(d);
|
||||
minor_free(d->sysminor);
|
||||
|
||||
spin_lock_irqsave(&d->lock, flags);
|
||||
d->flags |= DEVFL_FREED;
|
||||
spin_unlock_irqrestore(&d->lock, flags);
|
||||
}
|
||||
|
||||
enum flush_parms {
|
||||
NOT_EXITING = 0,
|
||||
EXITING = 1,
|
||||
};
|
||||
|
||||
static int
|
||||
flush(const char __user *str, size_t cnt, int exiting)
|
||||
{
|
||||
ulong flags;
|
||||
struct aoedev *d, **dd;
|
||||
char buf[16];
|
||||
int all = 0;
|
||||
int specified = 0; /* flush a specific device */
|
||||
unsigned int skipflags;
|
||||
|
||||
skipflags = DEVFL_GDALLOC | DEVFL_NEWSIZE | DEVFL_TKILL;
|
||||
|
||||
if (!exiting && cnt >= 3) {
|
||||
if (cnt > sizeof buf)
|
||||
cnt = sizeof buf;
|
||||
if (copy_from_user(buf, str, cnt))
|
||||
return -EFAULT;
|
||||
all = !strncmp(buf, "all", 3);
|
||||
if (!all)
|
||||
specified = 1;
|
||||
}
|
||||
|
||||
flush_scheduled_work();
|
||||
/* pass one: without sleeping, do aoedev_downdev */
|
||||
spin_lock_irqsave(&devlist_lock, flags);
|
||||
for (d = devlist; d; d = d->next) {
|
||||
spin_lock(&d->lock);
|
||||
if (exiting) {
|
||||
/* unconditionally take each device down */
|
||||
} else if (specified) {
|
||||
if (!user_req(buf, cnt, d))
|
||||
goto cont;
|
||||
} else if ((!all && (d->flags & DEVFL_UP))
|
||||
|| d->flags & skipflags
|
||||
|| d->nopen
|
||||
|| d->ref)
|
||||
goto cont;
|
||||
|
||||
aoedev_downdev(d);
|
||||
d->flags |= DEVFL_TKILL;
|
||||
cont:
|
||||
spin_unlock(&d->lock);
|
||||
}
|
||||
spin_unlock_irqrestore(&devlist_lock, flags);
|
||||
|
||||
/* pass two: call freedev, which might sleep,
|
||||
* for aoedevs marked with DEVFL_TKILL
|
||||
*/
|
||||
restart:
|
||||
spin_lock_irqsave(&devlist_lock, flags);
|
||||
for (d = devlist; d; d = d->next) {
|
||||
spin_lock(&d->lock);
|
||||
if (d->flags & DEVFL_TKILL
|
||||
&& !(d->flags & DEVFL_FREEING)) {
|
||||
spin_unlock(&d->lock);
|
||||
spin_unlock_irqrestore(&devlist_lock, flags);
|
||||
freedev(d);
|
||||
goto restart;
|
||||
}
|
||||
spin_unlock(&d->lock);
|
||||
}
|
||||
|
||||
/* pass three: remove aoedevs marked with DEVFL_FREED */
|
||||
for (dd = &devlist, d = *dd; d; d = *dd) {
|
||||
struct aoedev *doomed = NULL;
|
||||
|
||||
spin_lock(&d->lock);
|
||||
if (d->flags & DEVFL_FREED) {
|
||||
*dd = d->next;
|
||||
doomed = d;
|
||||
} else {
|
||||
dd = &d->next;
|
||||
}
|
||||
spin_unlock(&d->lock);
|
||||
if (doomed)
|
||||
kfree(doomed->targets);
|
||||
kfree(doomed);
|
||||
}
|
||||
spin_unlock_irqrestore(&devlist_lock, flags);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int
|
||||
aoedev_flush(const char __user *str, size_t cnt)
|
||||
{
|
||||
return flush(str, cnt, NOT_EXITING);
|
||||
}
|
||||
|
||||
/* This has been confirmed to occur once with Tms=3*1000 due to the
|
||||
* driver changing link and not processing its transmit ring. The
|
||||
* problem is hard enough to solve by returning an error that I'm
|
||||
* still punting on "solving" this.
|
||||
*/
|
||||
static void
|
||||
skbfree(struct sk_buff *skb)
|
||||
{
|
||||
enum { Sms = 250, Tms = 30 * 1000};
|
||||
int i = Tms / Sms;
|
||||
|
||||
if (skb == NULL)
|
||||
return;
|
||||
while (atomic_read(&skb_shinfo(skb)->dataref) != 1 && i-- > 0)
|
||||
msleep(Sms);
|
||||
if (i < 0) {
|
||||
printk(KERN_ERR
|
||||
"aoe: %s holds ref: %s\n",
|
||||
skb->dev ? skb->dev->name : "netif",
|
||||
"cannot free skb -- memory leaked.");
|
||||
return;
|
||||
}
|
||||
skb->truesize -= skb->data_len;
|
||||
skb_shinfo(skb)->nr_frags = skb->data_len = 0;
|
||||
skb_trim(skb, 0);
|
||||
dev_kfree_skb(skb);
|
||||
}
|
||||
|
||||
static void
|
||||
skbpoolfree(struct aoedev *d)
|
||||
{
|
||||
struct sk_buff *skb, *tmp;
|
||||
|
||||
skb_queue_walk_safe(&d->skbpool, skb, tmp)
|
||||
skbfree(skb);
|
||||
|
||||
__skb_queue_head_init(&d->skbpool);
|
||||
}
|
||||
|
||||
/* find it or allocate it */
|
||||
struct aoedev *
|
||||
aoedev_by_aoeaddr(ulong maj, int min, int do_alloc)
|
||||
{
|
||||
struct aoedev *d;
|
||||
int i;
|
||||
ulong flags;
|
||||
ulong sysminor = 0;
|
||||
|
||||
spin_lock_irqsave(&devlist_lock, flags);
|
||||
|
||||
for (d=devlist; d; d=d->next)
|
||||
if (d->aoemajor == maj && d->aoeminor == min) {
|
||||
spin_lock(&d->lock);
|
||||
if (d->flags & DEVFL_TKILL) {
|
||||
spin_unlock(&d->lock);
|
||||
d = NULL;
|
||||
goto out;
|
||||
}
|
||||
d->ref++;
|
||||
spin_unlock(&d->lock);
|
||||
break;
|
||||
}
|
||||
if (d || !do_alloc || minor_get(&sysminor, maj, min) < 0)
|
||||
goto out;
|
||||
d = kcalloc(1, sizeof *d, GFP_ATOMIC);
|
||||
if (!d)
|
||||
goto out;
|
||||
d->targets = kcalloc(NTARGETS, sizeof(*d->targets), GFP_ATOMIC);
|
||||
if (!d->targets) {
|
||||
kfree(d);
|
||||
d = NULL;
|
||||
goto out;
|
||||
}
|
||||
d->ntargets = NTARGETS;
|
||||
INIT_WORK(&d->work, aoecmd_sleepwork);
|
||||
spin_lock_init(&d->lock);
|
||||
skb_queue_head_init(&d->skbpool);
|
||||
init_timer(&d->timer);
|
||||
d->timer.data = (ulong) d;
|
||||
d->timer.function = dummy_timer;
|
||||
d->timer.expires = jiffies + HZ;
|
||||
add_timer(&d->timer);
|
||||
d->bufpool = NULL; /* defer to aoeblk_gdalloc */
|
||||
d->tgt = d->targets;
|
||||
d->ref = 1;
|
||||
for (i = 0; i < NFACTIVE; i++)
|
||||
INIT_LIST_HEAD(&d->factive[i]);
|
||||
INIT_LIST_HEAD(&d->rexmitq);
|
||||
d->sysminor = sysminor;
|
||||
d->aoemajor = maj;
|
||||
d->aoeminor = min;
|
||||
d->rttavg = RTTAVG_INIT;
|
||||
d->rttdev = RTTDEV_INIT;
|
||||
d->next = devlist;
|
||||
devlist = d;
|
||||
out:
|
||||
spin_unlock_irqrestore(&devlist_lock, flags);
|
||||
return d;
|
||||
}
|
||||
|
||||
static void
|
||||
freetgt(struct aoedev *d, struct aoetgt *t)
|
||||
{
|
||||
struct frame *f;
|
||||
struct list_head *pos, *nx, *head;
|
||||
struct aoeif *ifp;
|
||||
|
||||
for (ifp = t->ifs; ifp < &t->ifs[NAOEIFS]; ++ifp) {
|
||||
if (!ifp->nd)
|
||||
break;
|
||||
dev_put(ifp->nd);
|
||||
}
|
||||
|
||||
head = &t->ffree;
|
||||
list_for_each_safe(pos, nx, head) {
|
||||
list_del(pos);
|
||||
f = list_entry(pos, struct frame, head);
|
||||
skbfree(f->skb);
|
||||
kfree(f);
|
||||
}
|
||||
kfree(t);
|
||||
}
|
||||
|
||||
void
|
||||
aoedev_exit(void)
|
||||
{
|
||||
flush_scheduled_work();
|
||||
flush(NULL, 0, EXITING);
|
||||
}
|
||||
|
||||
int __init
|
||||
aoedev_init(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
115
drivers/block/aoe/aoemain.c
Normal file
115
drivers/block/aoe/aoemain.c
Normal file
|
|
@ -0,0 +1,115 @@
|
|||
/* Copyright (c) 2012 Coraid, Inc. See COPYING for GPL terms. */
|
||||
/*
|
||||
* aoemain.c
|
||||
* Module initialization routines, discover timer
|
||||
*/
|
||||
|
||||
#include <linux/hdreg.h>
|
||||
#include <linux/blkdev.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/skbuff.h>
|
||||
#include "aoe.h"
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_AUTHOR("Sam Hopkins <sah@coraid.com>");
|
||||
MODULE_DESCRIPTION("AoE block/char driver for 2.6.2 and newer 2.6 kernels");
|
||||
MODULE_VERSION(VERSION);
|
||||
|
||||
enum { TINIT, TRUN, TKILL };
|
||||
|
||||
static void
|
||||
discover_timer(ulong vp)
|
||||
{
|
||||
static struct timer_list t;
|
||||
static volatile ulong die;
|
||||
static spinlock_t lock;
|
||||
ulong flags;
|
||||
enum { DTIMERTICK = HZ * 60 }; /* one minute */
|
||||
|
||||
switch (vp) {
|
||||
case TINIT:
|
||||
init_timer(&t);
|
||||
spin_lock_init(&lock);
|
||||
t.data = TRUN;
|
||||
t.function = discover_timer;
|
||||
die = 0;
|
||||
case TRUN:
|
||||
spin_lock_irqsave(&lock, flags);
|
||||
if (!die) {
|
||||
t.expires = jiffies + DTIMERTICK;
|
||||
add_timer(&t);
|
||||
}
|
||||
spin_unlock_irqrestore(&lock, flags);
|
||||
|
||||
aoecmd_cfg(0xffff, 0xff);
|
||||
return;
|
||||
case TKILL:
|
||||
spin_lock_irqsave(&lock, flags);
|
||||
die = 1;
|
||||
spin_unlock_irqrestore(&lock, flags);
|
||||
|
||||
del_timer_sync(&t);
|
||||
default:
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
aoe_exit(void)
|
||||
{
|
||||
discover_timer(TKILL);
|
||||
|
||||
aoenet_exit();
|
||||
unregister_blkdev(AOE_MAJOR, DEVICE_NAME);
|
||||
aoecmd_exit();
|
||||
aoechr_exit();
|
||||
aoedev_exit();
|
||||
aoeblk_exit(); /* free cache after de-allocating bufs */
|
||||
}
|
||||
|
||||
static int __init
|
||||
aoe_init(void)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = aoedev_init();
|
||||
if (ret)
|
||||
return ret;
|
||||
ret = aoechr_init();
|
||||
if (ret)
|
||||
goto chr_fail;
|
||||
ret = aoeblk_init();
|
||||
if (ret)
|
||||
goto blk_fail;
|
||||
ret = aoenet_init();
|
||||
if (ret)
|
||||
goto net_fail;
|
||||
ret = aoecmd_init();
|
||||
if (ret)
|
||||
goto cmd_fail;
|
||||
ret = register_blkdev(AOE_MAJOR, DEVICE_NAME);
|
||||
if (ret < 0) {
|
||||
printk(KERN_ERR "aoe: can't register major\n");
|
||||
goto blkreg_fail;
|
||||
}
|
||||
printk(KERN_INFO "aoe: AoE v%s initialised.\n", VERSION);
|
||||
discover_timer(TINIT);
|
||||
return 0;
|
||||
blkreg_fail:
|
||||
aoecmd_exit();
|
||||
cmd_fail:
|
||||
aoenet_exit();
|
||||
net_fail:
|
||||
aoeblk_exit();
|
||||
blk_fail:
|
||||
aoechr_exit();
|
||||
chr_fail:
|
||||
aoedev_exit();
|
||||
|
||||
printk(KERN_INFO "aoe: initialisation failure.\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
module_init(aoe_init);
|
||||
module_exit(aoe_exit);
|
||||
|
||||
223
drivers/block/aoe/aoenet.c
Normal file
223
drivers/block/aoe/aoenet.c
Normal file
|
|
@ -0,0 +1,223 @@
|
|||
/* Copyright (c) 2013 Coraid, Inc. See COPYING for GPL terms. */
|
||||
/*
|
||||
* aoenet.c
|
||||
* Ethernet portion of AoE driver
|
||||
*/
|
||||
|
||||
#include <linux/gfp.h>
|
||||
#include <linux/hdreg.h>
|
||||
#include <linux/blkdev.h>
|
||||
#include <linux/netdevice.h>
|
||||
#include <linux/moduleparam.h>
|
||||
#include <net/net_namespace.h>
|
||||
#include <asm/unaligned.h>
|
||||
#include "aoe.h"
|
||||
|
||||
#define NECODES 5
|
||||
|
||||
static char *aoe_errlist[] =
|
||||
{
|
||||
"no such error",
|
||||
"unrecognized command code",
|
||||
"bad argument parameter",
|
||||
"device unavailable",
|
||||
"config string present",
|
||||
"unsupported version"
|
||||
};
|
||||
|
||||
enum {
|
||||
IFLISTSZ = 1024,
|
||||
};
|
||||
|
||||
static char aoe_iflist[IFLISTSZ];
|
||||
module_param_string(aoe_iflist, aoe_iflist, IFLISTSZ, 0600);
|
||||
MODULE_PARM_DESC(aoe_iflist, "aoe_iflist=dev1[,dev2...]");
|
||||
|
||||
static wait_queue_head_t txwq;
|
||||
static struct ktstate kts;
|
||||
|
||||
#ifndef MODULE
|
||||
static int __init aoe_iflist_setup(char *str)
|
||||
{
|
||||
strncpy(aoe_iflist, str, IFLISTSZ);
|
||||
aoe_iflist[IFLISTSZ - 1] = '\0';
|
||||
return 1;
|
||||
}
|
||||
|
||||
__setup("aoe_iflist=", aoe_iflist_setup);
|
||||
#endif
|
||||
|
||||
static spinlock_t txlock;
|
||||
static struct sk_buff_head skbtxq;
|
||||
|
||||
/* enters with txlock held */
|
||||
static int
|
||||
tx(int id) __must_hold(&txlock)
|
||||
{
|
||||
struct sk_buff *skb;
|
||||
struct net_device *ifp;
|
||||
|
||||
while ((skb = skb_dequeue(&skbtxq))) {
|
||||
spin_unlock_irq(&txlock);
|
||||
ifp = skb->dev;
|
||||
if (dev_queue_xmit(skb) == NET_XMIT_DROP && net_ratelimit())
|
||||
pr_warn("aoe: packet could not be sent on %s. %s\n",
|
||||
ifp ? ifp->name : "netif",
|
||||
"consider increasing tx_queue_len");
|
||||
spin_lock_irq(&txlock);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
int
|
||||
is_aoe_netif(struct net_device *ifp)
|
||||
{
|
||||
register char *p, *q;
|
||||
register int len;
|
||||
|
||||
if (aoe_iflist[0] == '\0')
|
||||
return 1;
|
||||
|
||||
p = aoe_iflist + strspn(aoe_iflist, WHITESPACE);
|
||||
for (; *p; p = q + strspn(q, WHITESPACE)) {
|
||||
q = p + strcspn(p, WHITESPACE);
|
||||
if (q != p)
|
||||
len = q - p;
|
||||
else
|
||||
len = strlen(p); /* last token in aoe_iflist */
|
||||
|
||||
if (strlen(ifp->name) == len && !strncmp(ifp->name, p, len))
|
||||
return 1;
|
||||
if (q == p)
|
||||
break;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int
|
||||
set_aoe_iflist(const char __user *user_str, size_t size)
|
||||
{
|
||||
if (size >= IFLISTSZ)
|
||||
return -EINVAL;
|
||||
|
||||
if (copy_from_user(aoe_iflist, user_str, size)) {
|
||||
printk(KERN_INFO "aoe: copy from user failed\n");
|
||||
return -EFAULT;
|
||||
}
|
||||
aoe_iflist[size] = 0x00;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void
|
||||
aoenet_xmit(struct sk_buff_head *queue)
|
||||
{
|
||||
struct sk_buff *skb, *tmp;
|
||||
ulong flags;
|
||||
|
||||
skb_queue_walk_safe(queue, skb, tmp) {
|
||||
__skb_unlink(skb, queue);
|
||||
spin_lock_irqsave(&txlock, flags);
|
||||
skb_queue_tail(&skbtxq, skb);
|
||||
spin_unlock_irqrestore(&txlock, flags);
|
||||
wake_up(&txwq);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* (1) len doesn't include the header by default. I want this.
|
||||
*/
|
||||
static int
|
||||
aoenet_rcv(struct sk_buff *skb, struct net_device *ifp, struct packet_type *pt, struct net_device *orig_dev)
|
||||
{
|
||||
struct aoe_hdr *h;
|
||||
struct aoe_atahdr *ah;
|
||||
u32 n;
|
||||
int sn;
|
||||
|
||||
if (dev_net(ifp) != &init_net)
|
||||
goto exit;
|
||||
|
||||
skb = skb_share_check(skb, GFP_ATOMIC);
|
||||
if (skb == NULL)
|
||||
return 0;
|
||||
if (!is_aoe_netif(ifp))
|
||||
goto exit;
|
||||
skb_push(skb, ETH_HLEN); /* (1) */
|
||||
sn = sizeof(*h) + sizeof(*ah);
|
||||
if (skb->len >= sn) {
|
||||
sn -= skb_headlen(skb);
|
||||
if (sn > 0 && !__pskb_pull_tail(skb, sn))
|
||||
goto exit;
|
||||
}
|
||||
h = (struct aoe_hdr *) skb->data;
|
||||
n = get_unaligned_be32(&h->tag);
|
||||
if ((h->verfl & AOEFL_RSP) == 0 || (n & 1<<31))
|
||||
goto exit;
|
||||
|
||||
if (h->verfl & AOEFL_ERR) {
|
||||
n = h->err;
|
||||
if (n > NECODES)
|
||||
n = 0;
|
||||
if (net_ratelimit())
|
||||
printk(KERN_ERR
|
||||
"%s%d.%d@%s; ecode=%d '%s'\n",
|
||||
"aoe: error packet from ",
|
||||
get_unaligned_be16(&h->major),
|
||||
h->minor, skb->dev->name,
|
||||
h->err, aoe_errlist[n]);
|
||||
goto exit;
|
||||
}
|
||||
|
||||
switch (h->cmd) {
|
||||
case AOECMD_ATA:
|
||||
/* ata_rsp may keep skb for later processing or give it back */
|
||||
skb = aoecmd_ata_rsp(skb);
|
||||
break;
|
||||
case AOECMD_CFG:
|
||||
aoecmd_cfg_rsp(skb);
|
||||
break;
|
||||
default:
|
||||
if (h->cmd >= AOECMD_VEND_MIN)
|
||||
break; /* don't complain about vendor commands */
|
||||
pr_info("aoe: unknown AoE command type 0x%02x\n", h->cmd);
|
||||
break;
|
||||
}
|
||||
|
||||
if (!skb)
|
||||
return 0;
|
||||
exit:
|
||||
dev_kfree_skb(skb);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct packet_type aoe_pt __read_mostly = {
|
||||
.type = __constant_htons(ETH_P_AOE),
|
||||
.func = aoenet_rcv,
|
||||
};
|
||||
|
||||
int __init
|
||||
aoenet_init(void)
|
||||
{
|
||||
skb_queue_head_init(&skbtxq);
|
||||
init_waitqueue_head(&txwq);
|
||||
spin_lock_init(&txlock);
|
||||
kts.lock = &txlock;
|
||||
kts.fn = tx;
|
||||
kts.waitq = &txwq;
|
||||
kts.id = 0;
|
||||
snprintf(kts.name, sizeof(kts.name), "aoe_tx%d", kts.id);
|
||||
if (aoe_ktstart(&kts))
|
||||
return -EAGAIN;
|
||||
dev_add_pack(&aoe_pt);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void
|
||||
aoenet_exit(void)
|
||||
{
|
||||
aoe_ktstop(&kts);
|
||||
skb_queue_purge(&skbtxq);
|
||||
dev_remove_pack(&aoe_pt);
|
||||
}
|
||||
|
||||
Loading…
Add table
Add a link
Reference in a new issue