mirror of
https://github.com/AetherDroid/android_kernel_samsung_on5xelte.git
synced 2025-09-08 01:08:03 -04:00
Fixed MTP to work with TWRP
This commit is contained in:
commit
f6dfaef42e
50820 changed files with 20846062 additions and 0 deletions
34
drivers/vhost/Kconfig
Normal file
34
drivers/vhost/Kconfig
Normal file
|
@ -0,0 +1,34 @@
|
|||
config VHOST_NET
|
||||
tristate "Host kernel accelerator for virtio net"
|
||||
depends on NET && EVENTFD && (TUN || !TUN) && (MACVTAP || !MACVTAP)
|
||||
select VHOST
|
||||
select VHOST_RING
|
||||
---help---
|
||||
This kernel module can be loaded in host kernel to accelerate
|
||||
guest networking with virtio_net. Not to be confused with virtio_net
|
||||
module itself which needs to be loaded in guest kernel.
|
||||
|
||||
To compile this driver as a module, choose M here: the module will
|
||||
be called vhost_net.
|
||||
|
||||
config VHOST_SCSI
|
||||
tristate "VHOST_SCSI TCM fabric driver"
|
||||
depends on TARGET_CORE && EVENTFD && m
|
||||
select VHOST
|
||||
select VHOST_RING
|
||||
default n
|
||||
---help---
|
||||
Say M here to enable the vhost_scsi TCM fabric module
|
||||
for use with virtio-scsi guests
|
||||
|
||||
config VHOST_RING
|
||||
tristate
|
||||
---help---
|
||||
This option is selected by any driver which needs to access
|
||||
the host side of a virtio ring.
|
||||
|
||||
config VHOST
|
||||
tristate
|
||||
---help---
|
||||
This option is selected by any driver which needs to access
|
||||
the core of vhost.
|
8
drivers/vhost/Makefile
Normal file
8
drivers/vhost/Makefile
Normal file
|
@ -0,0 +1,8 @@
|
|||
obj-$(CONFIG_VHOST_NET) += vhost_net.o
|
||||
vhost_net-y := net.o
|
||||
|
||||
obj-$(CONFIG_VHOST_SCSI) += vhost_scsi.o
|
||||
vhost_scsi-y := scsi.o
|
||||
|
||||
obj-$(CONFIG_VHOST_RING) += vringh.o
|
||||
obj-$(CONFIG_VHOST) += vhost.o
|
1164
drivers/vhost/net.c
Normal file
1164
drivers/vhost/net.c
Normal file
File diff suppressed because it is too large
Load diff
2409
drivers/vhost/scsi.c
Normal file
2409
drivers/vhost/scsi.c
Normal file
File diff suppressed because it is too large
Load diff
338
drivers/vhost/test.c
Normal file
338
drivers/vhost/test.c
Normal file
|
@ -0,0 +1,338 @@
|
|||
/* Copyright (C) 2009 Red Hat, Inc.
|
||||
* Author: Michael S. Tsirkin <mst@redhat.com>
|
||||
*
|
||||
* This work is licensed under the terms of the GNU GPL, version 2.
|
||||
*
|
||||
* test virtio server in host kernel.
|
||||
*/
|
||||
|
||||
#include <linux/compat.h>
|
||||
#include <linux/eventfd.h>
|
||||
#include <linux/vhost.h>
|
||||
#include <linux/miscdevice.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/workqueue.h>
|
||||
#include <linux/file.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
#include "test.h"
|
||||
#include "vhost.h"
|
||||
|
||||
/* Max number of bytes transferred before requeueing the job.
|
||||
* Using this limit prevents one virtqueue from starving others. */
|
||||
#define VHOST_TEST_WEIGHT 0x80000
|
||||
|
||||
enum {
|
||||
VHOST_TEST_VQ = 0,
|
||||
VHOST_TEST_VQ_MAX = 1,
|
||||
};
|
||||
|
||||
struct vhost_test {
|
||||
struct vhost_dev dev;
|
||||
struct vhost_virtqueue vqs[VHOST_TEST_VQ_MAX];
|
||||
};
|
||||
|
||||
/* Expects to be always run from workqueue - which acts as
|
||||
* read-size critical section for our kind of RCU. */
|
||||
static void handle_vq(struct vhost_test *n)
|
||||
{
|
||||
struct vhost_virtqueue *vq = &n->vqs[VHOST_TEST_VQ];
|
||||
unsigned out, in;
|
||||
int head;
|
||||
size_t len, total_len = 0;
|
||||
void *private;
|
||||
|
||||
mutex_lock(&vq->mutex);
|
||||
private = vq->private_data;
|
||||
if (!private) {
|
||||
mutex_unlock(&vq->mutex);
|
||||
return;
|
||||
}
|
||||
|
||||
vhost_disable_notify(&n->dev, vq);
|
||||
|
||||
for (;;) {
|
||||
head = vhost_get_vq_desc(vq, vq->iov,
|
||||
ARRAY_SIZE(vq->iov),
|
||||
&out, &in,
|
||||
NULL, NULL);
|
||||
/* On error, stop handling until the next kick. */
|
||||
if (unlikely(head < 0))
|
||||
break;
|
||||
/* Nothing new? Wait for eventfd to tell us they refilled. */
|
||||
if (head == vq->num) {
|
||||
if (unlikely(vhost_enable_notify(&n->dev, vq))) {
|
||||
vhost_disable_notify(&n->dev, vq);
|
||||
continue;
|
||||
}
|
||||
break;
|
||||
}
|
||||
if (in) {
|
||||
vq_err(vq, "Unexpected descriptor format for TX: "
|
||||
"out %d, int %d\n", out, in);
|
||||
break;
|
||||
}
|
||||
len = iov_length(vq->iov, out);
|
||||
/* Sanity check */
|
||||
if (!len) {
|
||||
vq_err(vq, "Unexpected 0 len for TX\n");
|
||||
break;
|
||||
}
|
||||
vhost_add_used_and_signal(&n->dev, vq, head, 0);
|
||||
total_len += len;
|
||||
if (unlikely(total_len >= VHOST_TEST_WEIGHT)) {
|
||||
vhost_poll_queue(&vq->poll);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
mutex_unlock(&vq->mutex);
|
||||
}
|
||||
|
||||
static void handle_vq_kick(struct vhost_work *work)
|
||||
{
|
||||
struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
|
||||
poll.work);
|
||||
struct vhost_test *n = container_of(vq->dev, struct vhost_test, dev);
|
||||
|
||||
handle_vq(n);
|
||||
}
|
||||
|
||||
static int vhost_test_open(struct inode *inode, struct file *f)
|
||||
{
|
||||
struct vhost_test *n = kmalloc(sizeof *n, GFP_KERNEL);
|
||||
struct vhost_dev *dev;
|
||||
struct vhost_virtqueue **vqs;
|
||||
|
||||
if (!n)
|
||||
return -ENOMEM;
|
||||
vqs = kmalloc(VHOST_TEST_VQ_MAX * sizeof(*vqs), GFP_KERNEL);
|
||||
if (!vqs) {
|
||||
kfree(n);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
dev = &n->dev;
|
||||
vqs[VHOST_TEST_VQ] = &n->vqs[VHOST_TEST_VQ];
|
||||
n->vqs[VHOST_TEST_VQ].handle_kick = handle_vq_kick;
|
||||
vhost_dev_init(dev, vqs, VHOST_TEST_VQ_MAX);
|
||||
|
||||
f->private_data = n;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void *vhost_test_stop_vq(struct vhost_test *n,
|
||||
struct vhost_virtqueue *vq)
|
||||
{
|
||||
void *private;
|
||||
|
||||
mutex_lock(&vq->mutex);
|
||||
private = vq->private_data;
|
||||
vq->private_data = NULL;
|
||||
mutex_unlock(&vq->mutex);
|
||||
return private;
|
||||
}
|
||||
|
||||
static void vhost_test_stop(struct vhost_test *n, void **privatep)
|
||||
{
|
||||
*privatep = vhost_test_stop_vq(n, n->vqs + VHOST_TEST_VQ);
|
||||
}
|
||||
|
||||
static void vhost_test_flush_vq(struct vhost_test *n, int index)
|
||||
{
|
||||
vhost_poll_flush(&n->vqs[index].poll);
|
||||
}
|
||||
|
||||
static void vhost_test_flush(struct vhost_test *n)
|
||||
{
|
||||
vhost_test_flush_vq(n, VHOST_TEST_VQ);
|
||||
}
|
||||
|
||||
static int vhost_test_release(struct inode *inode, struct file *f)
|
||||
{
|
||||
struct vhost_test *n = f->private_data;
|
||||
void *private;
|
||||
|
||||
vhost_test_stop(n, &private);
|
||||
vhost_test_flush(n);
|
||||
vhost_dev_cleanup(&n->dev, false);
|
||||
/* We do an extra flush before freeing memory,
|
||||
* since jobs can re-queue themselves. */
|
||||
vhost_test_flush(n);
|
||||
kfree(n);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static long vhost_test_run(struct vhost_test *n, int test)
|
||||
{
|
||||
void *priv, *oldpriv;
|
||||
struct vhost_virtqueue *vq;
|
||||
int r, index;
|
||||
|
||||
if (test < 0 || test > 1)
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&n->dev.mutex);
|
||||
r = vhost_dev_check_owner(&n->dev);
|
||||
if (r)
|
||||
goto err;
|
||||
|
||||
for (index = 0; index < n->dev.nvqs; ++index) {
|
||||
/* Verify that ring has been setup correctly. */
|
||||
if (!vhost_vq_access_ok(&n->vqs[index])) {
|
||||
r = -EFAULT;
|
||||
goto err;
|
||||
}
|
||||
}
|
||||
|
||||
for (index = 0; index < n->dev.nvqs; ++index) {
|
||||
vq = n->vqs + index;
|
||||
mutex_lock(&vq->mutex);
|
||||
priv = test ? n : NULL;
|
||||
|
||||
/* start polling new socket */
|
||||
oldpriv = vq->private_data;
|
||||
vq->private_data = priv;
|
||||
|
||||
r = vhost_init_used(&n->vqs[index]);
|
||||
|
||||
mutex_unlock(&vq->mutex);
|
||||
|
||||
if (r)
|
||||
goto err;
|
||||
|
||||
if (oldpriv) {
|
||||
vhost_test_flush_vq(n, index);
|
||||
}
|
||||
}
|
||||
|
||||
mutex_unlock(&n->dev.mutex);
|
||||
return 0;
|
||||
|
||||
err:
|
||||
mutex_unlock(&n->dev.mutex);
|
||||
return r;
|
||||
}
|
||||
|
||||
static long vhost_test_reset_owner(struct vhost_test *n)
|
||||
{
|
||||
void *priv = NULL;
|
||||
long err;
|
||||
struct vhost_memory *memory;
|
||||
|
||||
mutex_lock(&n->dev.mutex);
|
||||
err = vhost_dev_check_owner(&n->dev);
|
||||
if (err)
|
||||
goto done;
|
||||
memory = vhost_dev_reset_owner_prepare();
|
||||
if (!memory) {
|
||||
err = -ENOMEM;
|
||||
goto done;
|
||||
}
|
||||
vhost_test_stop(n, &priv);
|
||||
vhost_test_flush(n);
|
||||
vhost_dev_reset_owner(&n->dev, memory);
|
||||
done:
|
||||
mutex_unlock(&n->dev.mutex);
|
||||
return err;
|
||||
}
|
||||
|
||||
static int vhost_test_set_features(struct vhost_test *n, u64 features)
|
||||
{
|
||||
struct vhost_virtqueue *vq;
|
||||
|
||||
mutex_lock(&n->dev.mutex);
|
||||
if ((features & (1 << VHOST_F_LOG_ALL)) &&
|
||||
!vhost_log_access_ok(&n->dev)) {
|
||||
mutex_unlock(&n->dev.mutex);
|
||||
return -EFAULT;
|
||||
}
|
||||
vq = &n->vqs[VHOST_TEST_VQ];
|
||||
mutex_lock(&vq->mutex);
|
||||
vq->acked_features = features;
|
||||
mutex_unlock(&vq->mutex);
|
||||
mutex_unlock(&n->dev.mutex);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static long vhost_test_ioctl(struct file *f, unsigned int ioctl,
|
||||
unsigned long arg)
|
||||
{
|
||||
struct vhost_test *n = f->private_data;
|
||||
void __user *argp = (void __user *)arg;
|
||||
u64 __user *featurep = argp;
|
||||
int test;
|
||||
u64 features;
|
||||
int r;
|
||||
switch (ioctl) {
|
||||
case VHOST_TEST_RUN:
|
||||
if (copy_from_user(&test, argp, sizeof test))
|
||||
return -EFAULT;
|
||||
return vhost_test_run(n, test);
|
||||
case VHOST_GET_FEATURES:
|
||||
features = VHOST_FEATURES;
|
||||
if (copy_to_user(featurep, &features, sizeof features))
|
||||
return -EFAULT;
|
||||
return 0;
|
||||
case VHOST_SET_FEATURES:
|
||||
if (copy_from_user(&features, featurep, sizeof features))
|
||||
return -EFAULT;
|
||||
if (features & ~VHOST_FEATURES)
|
||||
return -EOPNOTSUPP;
|
||||
return vhost_test_set_features(n, features);
|
||||
case VHOST_RESET_OWNER:
|
||||
return vhost_test_reset_owner(n);
|
||||
default:
|
||||
mutex_lock(&n->dev.mutex);
|
||||
r = vhost_dev_ioctl(&n->dev, ioctl, argp);
|
||||
if (r == -ENOIOCTLCMD)
|
||||
r = vhost_vring_ioctl(&n->dev, ioctl, argp);
|
||||
vhost_test_flush(n);
|
||||
mutex_unlock(&n->dev.mutex);
|
||||
return r;
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef CONFIG_COMPAT
|
||||
static long vhost_test_compat_ioctl(struct file *f, unsigned int ioctl,
|
||||
unsigned long arg)
|
||||
{
|
||||
return vhost_test_ioctl(f, ioctl, (unsigned long)compat_ptr(arg));
|
||||
}
|
||||
#endif
|
||||
|
||||
static const struct file_operations vhost_test_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.release = vhost_test_release,
|
||||
.unlocked_ioctl = vhost_test_ioctl,
|
||||
#ifdef CONFIG_COMPAT
|
||||
.compat_ioctl = vhost_test_compat_ioctl,
|
||||
#endif
|
||||
.open = vhost_test_open,
|
||||
.llseek = noop_llseek,
|
||||
};
|
||||
|
||||
static struct miscdevice vhost_test_misc = {
|
||||
MISC_DYNAMIC_MINOR,
|
||||
"vhost-test",
|
||||
&vhost_test_fops,
|
||||
};
|
||||
|
||||
static int vhost_test_init(void)
|
||||
{
|
||||
return misc_register(&vhost_test_misc);
|
||||
}
|
||||
module_init(vhost_test_init);
|
||||
|
||||
static void vhost_test_exit(void)
|
||||
{
|
||||
misc_deregister(&vhost_test_misc);
|
||||
}
|
||||
module_exit(vhost_test_exit);
|
||||
|
||||
MODULE_VERSION("0.0.1");
|
||||
MODULE_LICENSE("GPL v2");
|
||||
MODULE_AUTHOR("Michael S. Tsirkin");
|
||||
MODULE_DESCRIPTION("Host kernel side for virtio simulator");
|
7
drivers/vhost/test.h
Normal file
7
drivers/vhost/test.h
Normal file
|
@ -0,0 +1,7 @@
|
|||
#ifndef LINUX_VHOST_TEST_H
|
||||
#define LINUX_VHOST_TEST_H
|
||||
|
||||
/* Start a given test on the virtio null device. 0 stops all tests. */
|
||||
#define VHOST_TEST_RUN _IOW(VHOST_VIRTIO, 0x31, int)
|
||||
|
||||
#endif
|
1558
drivers/vhost/vhost.c
Normal file
1558
drivers/vhost/vhost.c
Normal file
File diff suppressed because it is too large
Load diff
179
drivers/vhost/vhost.h
Normal file
179
drivers/vhost/vhost.h
Normal file
|
@ -0,0 +1,179 @@
|
|||
#ifndef _VHOST_H
|
||||
#define _VHOST_H
|
||||
|
||||
#include <linux/eventfd.h>
|
||||
#include <linux/vhost.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/poll.h>
|
||||
#include <linux/file.h>
|
||||
#include <linux/uio.h>
|
||||
#include <linux/virtio_config.h>
|
||||
#include <linux/virtio_ring.h>
|
||||
#include <linux/atomic.h>
|
||||
|
||||
struct vhost_device;
|
||||
|
||||
struct vhost_work;
|
||||
typedef void (*vhost_work_fn_t)(struct vhost_work *work);
|
||||
|
||||
struct vhost_work {
|
||||
struct list_head node;
|
||||
vhost_work_fn_t fn;
|
||||
wait_queue_head_t done;
|
||||
int flushing;
|
||||
unsigned queue_seq;
|
||||
unsigned done_seq;
|
||||
};
|
||||
|
||||
/* Poll a file (eventfd or socket) */
|
||||
/* Note: there's nothing vhost specific about this structure. */
|
||||
struct vhost_poll {
|
||||
poll_table table;
|
||||
wait_queue_head_t *wqh;
|
||||
wait_queue_t wait;
|
||||
struct vhost_work work;
|
||||
unsigned long mask;
|
||||
struct vhost_dev *dev;
|
||||
};
|
||||
|
||||
void vhost_work_init(struct vhost_work *work, vhost_work_fn_t fn);
|
||||
void vhost_work_queue(struct vhost_dev *dev, struct vhost_work *work);
|
||||
|
||||
void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn,
|
||||
unsigned long mask, struct vhost_dev *dev);
|
||||
int vhost_poll_start(struct vhost_poll *poll, struct file *file);
|
||||
void vhost_poll_stop(struct vhost_poll *poll);
|
||||
void vhost_poll_flush(struct vhost_poll *poll);
|
||||
void vhost_poll_queue(struct vhost_poll *poll);
|
||||
void vhost_work_flush(struct vhost_dev *dev, struct vhost_work *work);
|
||||
long vhost_vring_ioctl(struct vhost_dev *d, int ioctl, void __user *argp);
|
||||
|
||||
struct vhost_log {
|
||||
u64 addr;
|
||||
u64 len;
|
||||
};
|
||||
|
||||
struct vhost_virtqueue;
|
||||
|
||||
/* The virtqueue structure describes a queue attached to a device. */
|
||||
struct vhost_virtqueue {
|
||||
struct vhost_dev *dev;
|
||||
|
||||
/* The actual ring of buffers. */
|
||||
struct mutex mutex;
|
||||
unsigned int num;
|
||||
struct vring_desc __user *desc;
|
||||
struct vring_avail __user *avail;
|
||||
struct vring_used __user *used;
|
||||
struct file *kick;
|
||||
struct file *call;
|
||||
struct file *error;
|
||||
struct eventfd_ctx *call_ctx;
|
||||
struct eventfd_ctx *error_ctx;
|
||||
struct eventfd_ctx *log_ctx;
|
||||
|
||||
struct vhost_poll poll;
|
||||
|
||||
/* The routine to call when the Guest pings us, or timeout. */
|
||||
vhost_work_fn_t handle_kick;
|
||||
|
||||
/* Last available index we saw. */
|
||||
u16 last_avail_idx;
|
||||
|
||||
/* Caches available index value from user. */
|
||||
u16 avail_idx;
|
||||
|
||||
/* Last index we used. */
|
||||
u16 last_used_idx;
|
||||
|
||||
/* Used flags */
|
||||
u16 used_flags;
|
||||
|
||||
/* Last used index value we have signalled on */
|
||||
u16 signalled_used;
|
||||
|
||||
/* Last used index value we have signalled on */
|
||||
bool signalled_used_valid;
|
||||
|
||||
/* Log writes to used structure. */
|
||||
bool log_used;
|
||||
u64 log_addr;
|
||||
|
||||
struct iovec iov[UIO_MAXIOV];
|
||||
struct iovec *indirect;
|
||||
struct vring_used_elem *heads;
|
||||
/* Protected by virtqueue mutex. */
|
||||
struct vhost_memory *memory;
|
||||
void *private_data;
|
||||
unsigned acked_features;
|
||||
/* Log write descriptors */
|
||||
void __user *log_base;
|
||||
struct vhost_log *log;
|
||||
};
|
||||
|
||||
struct vhost_dev {
|
||||
struct vhost_memory *memory;
|
||||
struct mm_struct *mm;
|
||||
struct mutex mutex;
|
||||
struct vhost_virtqueue **vqs;
|
||||
int nvqs;
|
||||
struct file *log_file;
|
||||
struct eventfd_ctx *log_ctx;
|
||||
spinlock_t work_lock;
|
||||
struct list_head work_list;
|
||||
struct task_struct *worker;
|
||||
};
|
||||
|
||||
void vhost_dev_init(struct vhost_dev *, struct vhost_virtqueue **vqs, int nvqs);
|
||||
long vhost_dev_set_owner(struct vhost_dev *dev);
|
||||
bool vhost_dev_has_owner(struct vhost_dev *dev);
|
||||
long vhost_dev_check_owner(struct vhost_dev *);
|
||||
struct vhost_memory *vhost_dev_reset_owner_prepare(void);
|
||||
void vhost_dev_reset_owner(struct vhost_dev *, struct vhost_memory *);
|
||||
void vhost_dev_cleanup(struct vhost_dev *, bool locked);
|
||||
void vhost_dev_stop(struct vhost_dev *);
|
||||
long vhost_dev_ioctl(struct vhost_dev *, unsigned int ioctl, void __user *argp);
|
||||
long vhost_vring_ioctl(struct vhost_dev *d, int ioctl, void __user *argp);
|
||||
int vhost_vq_access_ok(struct vhost_virtqueue *vq);
|
||||
int vhost_log_access_ok(struct vhost_dev *);
|
||||
|
||||
int vhost_get_vq_desc(struct vhost_virtqueue *,
|
||||
struct iovec iov[], unsigned int iov_count,
|
||||
unsigned int *out_num, unsigned int *in_num,
|
||||
struct vhost_log *log, unsigned int *log_num);
|
||||
void vhost_discard_vq_desc(struct vhost_virtqueue *, int n);
|
||||
|
||||
int vhost_init_used(struct vhost_virtqueue *);
|
||||
int vhost_add_used(struct vhost_virtqueue *, unsigned int head, int len);
|
||||
int vhost_add_used_n(struct vhost_virtqueue *, struct vring_used_elem *heads,
|
||||
unsigned count);
|
||||
void vhost_add_used_and_signal(struct vhost_dev *, struct vhost_virtqueue *,
|
||||
unsigned int id, int len);
|
||||
void vhost_add_used_and_signal_n(struct vhost_dev *, struct vhost_virtqueue *,
|
||||
struct vring_used_elem *heads, unsigned count);
|
||||
void vhost_signal(struct vhost_dev *, struct vhost_virtqueue *);
|
||||
void vhost_disable_notify(struct vhost_dev *, struct vhost_virtqueue *);
|
||||
bool vhost_enable_notify(struct vhost_dev *, struct vhost_virtqueue *);
|
||||
|
||||
int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log,
|
||||
unsigned int log_num, u64 len);
|
||||
|
||||
#define vq_err(vq, fmt, ...) do { \
|
||||
pr_debug(pr_fmt(fmt), ##__VA_ARGS__); \
|
||||
if ((vq)->error_ctx) \
|
||||
eventfd_signal((vq)->error_ctx, 1);\
|
||||
} while (0)
|
||||
|
||||
enum {
|
||||
VHOST_FEATURES = (1ULL << VIRTIO_F_NOTIFY_ON_EMPTY) |
|
||||
(1ULL << VIRTIO_RING_F_INDIRECT_DESC) |
|
||||
(1ULL << VIRTIO_RING_F_EVENT_IDX) |
|
||||
(1ULL << VHOST_F_LOG_ALL),
|
||||
};
|
||||
|
||||
static inline int vhost_has_feature(struct vhost_virtqueue *vq, int bit)
|
||||
{
|
||||
return vq->acked_features & (1 << bit);
|
||||
}
|
||||
#endif
|
1010
drivers/vhost/vringh.c
Normal file
1010
drivers/vhost/vringh.c
Normal file
File diff suppressed because it is too large
Load diff
Loading…
Add table
Add a link
Reference in a new issue