mirror of
https://github.com/AetherDroid/android_kernel_samsung_on5xelte.git
synced 2025-09-08 01:08:03 -04:00
Fixed MTP to work with TWRP
This commit is contained in:
commit
f6dfaef42e
50820 changed files with 20846062 additions and 0 deletions
28
net/vmw_vsock/Kconfig
Normal file
28
net/vmw_vsock/Kconfig
Normal file
|
@ -0,0 +1,28 @@
|
|||
#
|
||||
# Vsock protocol
|
||||
#
|
||||
|
||||
config VSOCKETS
|
||||
tristate "Virtual Socket protocol"
|
||||
help
|
||||
Virtual Socket Protocol is a socket protocol similar to TCP/IP
|
||||
allowing communication between Virtual Machines and hypervisor
|
||||
or host.
|
||||
|
||||
You should also select one or more hypervisor-specific transports
|
||||
below.
|
||||
|
||||
To compile this driver as a module, choose M here: the module
|
||||
will be called vsock. If unsure, say N.
|
||||
|
||||
config VMWARE_VMCI_VSOCKETS
|
||||
tristate "VMware VMCI transport for Virtual Sockets"
|
||||
depends on VSOCKETS && VMWARE_VMCI
|
||||
help
|
||||
This module implements a VMCI transport for Virtual Sockets.
|
||||
|
||||
Enable this transport if your Virtual Machine runs on a VMware
|
||||
hypervisor.
|
||||
|
||||
To compile this driver as a module, choose M here: the module
|
||||
will be called vmw_vsock_vmci_transport. If unsure, say N.
|
7
net/vmw_vsock/Makefile
Normal file
7
net/vmw_vsock/Makefile
Normal file
|
@ -0,0 +1,7 @@
|
|||
obj-$(CONFIG_VSOCKETS) += vsock.o
|
||||
obj-$(CONFIG_VMWARE_VMCI_VSOCKETS) += vmw_vsock_vmci_transport.o
|
||||
|
||||
vsock-y += af_vsock.o vsock_addr.o
|
||||
|
||||
vmw_vsock_vmci_transport-y += vmci_transport.o vmci_transport_notify.o \
|
||||
vmci_transport_notify_qstate.o
|
2001
net/vmw_vsock/af_vsock.c
Normal file
2001
net/vmw_vsock/af_vsock.c
Normal file
File diff suppressed because it is too large
Load diff
2171
net/vmw_vsock/vmci_transport.c
Normal file
2171
net/vmw_vsock/vmci_transport.c
Normal file
File diff suppressed because it is too large
Load diff
142
net/vmw_vsock/vmci_transport.h
Normal file
142
net/vmw_vsock/vmci_transport.h
Normal file
|
@ -0,0 +1,142 @@
|
|||
/*
|
||||
* VMware vSockets Driver
|
||||
*
|
||||
* Copyright (C) 2013 VMware, Inc. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License as published by the Free
|
||||
* Software Foundation version 2 and no later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*/
|
||||
|
||||
#ifndef _VMCI_TRANSPORT_H_
|
||||
#define _VMCI_TRANSPORT_H_
|
||||
|
||||
#include <linux/vmw_vmci_defs.h>
|
||||
#include <linux/vmw_vmci_api.h>
|
||||
|
||||
#include <net/vsock_addr.h>
|
||||
#include <net/af_vsock.h>
|
||||
|
||||
/* If the packet format changes in a release then this should change too. */
|
||||
#define VMCI_TRANSPORT_PACKET_VERSION 1
|
||||
|
||||
/* The resource ID on which control packets are sent. */
|
||||
#define VMCI_TRANSPORT_PACKET_RID 1
|
||||
|
||||
/* The resource ID on which control packets are sent to the hypervisor. */
|
||||
#define VMCI_TRANSPORT_HYPERVISOR_PACKET_RID 15
|
||||
|
||||
#define VSOCK_PROTO_INVALID 0
|
||||
#define VSOCK_PROTO_PKT_ON_NOTIFY (1 << 0)
|
||||
#define VSOCK_PROTO_ALL_SUPPORTED (VSOCK_PROTO_PKT_ON_NOTIFY)
|
||||
|
||||
#define vmci_trans(_vsk) ((struct vmci_transport *)((_vsk)->trans))
|
||||
|
||||
enum vmci_transport_packet_type {
|
||||
VMCI_TRANSPORT_PACKET_TYPE_INVALID = 0,
|
||||
VMCI_TRANSPORT_PACKET_TYPE_REQUEST,
|
||||
VMCI_TRANSPORT_PACKET_TYPE_NEGOTIATE,
|
||||
VMCI_TRANSPORT_PACKET_TYPE_OFFER,
|
||||
VMCI_TRANSPORT_PACKET_TYPE_ATTACH,
|
||||
VMCI_TRANSPORT_PACKET_TYPE_WROTE,
|
||||
VMCI_TRANSPORT_PACKET_TYPE_READ,
|
||||
VMCI_TRANSPORT_PACKET_TYPE_RST,
|
||||
VMCI_TRANSPORT_PACKET_TYPE_SHUTDOWN,
|
||||
VMCI_TRANSPORT_PACKET_TYPE_WAITING_WRITE,
|
||||
VMCI_TRANSPORT_PACKET_TYPE_WAITING_READ,
|
||||
VMCI_TRANSPORT_PACKET_TYPE_REQUEST2,
|
||||
VMCI_TRANSPORT_PACKET_TYPE_NEGOTIATE2,
|
||||
VMCI_TRANSPORT_PACKET_TYPE_MAX
|
||||
};
|
||||
|
||||
struct vmci_transport_waiting_info {
|
||||
u64 generation;
|
||||
u64 offset;
|
||||
};
|
||||
|
||||
/* Control packet type for STREAM sockets. DGRAMs have no control packets nor
|
||||
* special packet header for data packets, they are just raw VMCI DGRAM
|
||||
* messages. For STREAMs, control packets are sent over the control channel
|
||||
* while data is written and read directly from queue pairs with no packet
|
||||
* format.
|
||||
*/
|
||||
struct vmci_transport_packet {
|
||||
struct vmci_datagram dg;
|
||||
u8 version;
|
||||
u8 type;
|
||||
u16 proto;
|
||||
u32 src_port;
|
||||
u32 dst_port;
|
||||
u32 _reserved2;
|
||||
union {
|
||||
u64 size;
|
||||
u64 mode;
|
||||
struct vmci_handle handle;
|
||||
struct vmci_transport_waiting_info wait;
|
||||
} u;
|
||||
};
|
||||
|
||||
struct vmci_transport_notify_pkt {
|
||||
u64 write_notify_window;
|
||||
u64 write_notify_min_window;
|
||||
bool peer_waiting_read;
|
||||
bool peer_waiting_write;
|
||||
bool peer_waiting_write_detected;
|
||||
bool sent_waiting_read;
|
||||
bool sent_waiting_write;
|
||||
struct vmci_transport_waiting_info peer_waiting_read_info;
|
||||
struct vmci_transport_waiting_info peer_waiting_write_info;
|
||||
u64 produce_q_generation;
|
||||
u64 consume_q_generation;
|
||||
};
|
||||
|
||||
struct vmci_transport_notify_pkt_q_state {
|
||||
u64 write_notify_window;
|
||||
u64 write_notify_min_window;
|
||||
bool peer_waiting_write;
|
||||
bool peer_waiting_write_detected;
|
||||
};
|
||||
|
||||
union vmci_transport_notify {
|
||||
struct vmci_transport_notify_pkt pkt;
|
||||
struct vmci_transport_notify_pkt_q_state pkt_q_state;
|
||||
};
|
||||
|
||||
/* Our transport-specific data. */
|
||||
struct vmci_transport {
|
||||
/* For DGRAMs. */
|
||||
struct vmci_handle dg_handle;
|
||||
/* For STREAMs. */
|
||||
struct vmci_handle qp_handle;
|
||||
struct vmci_qp *qpair;
|
||||
u64 produce_size;
|
||||
u64 consume_size;
|
||||
u64 queue_pair_size;
|
||||
u64 queue_pair_min_size;
|
||||
u64 queue_pair_max_size;
|
||||
u32 attach_sub_id;
|
||||
u32 detach_sub_id;
|
||||
union vmci_transport_notify notify;
|
||||
struct vmci_transport_notify_ops *notify_ops;
|
||||
};
|
||||
|
||||
int vmci_transport_register(void);
|
||||
void vmci_transport_unregister(void);
|
||||
|
||||
int vmci_transport_send_wrote_bh(struct sockaddr_vm *dst,
|
||||
struct sockaddr_vm *src);
|
||||
int vmci_transport_send_read_bh(struct sockaddr_vm *dst,
|
||||
struct sockaddr_vm *src);
|
||||
int vmci_transport_send_wrote(struct sock *sk);
|
||||
int vmci_transport_send_read(struct sock *sk);
|
||||
int vmci_transport_send_waiting_write(struct sock *sk,
|
||||
struct vmci_transport_waiting_info *wait);
|
||||
int vmci_transport_send_waiting_read(struct sock *sk,
|
||||
struct vmci_transport_waiting_info *wait);
|
||||
|
||||
#endif
|
680
net/vmw_vsock/vmci_transport_notify.c
Normal file
680
net/vmw_vsock/vmci_transport_notify.c
Normal file
|
@ -0,0 +1,680 @@
|
|||
/*
|
||||
* VMware vSockets Driver
|
||||
*
|
||||
* Copyright (C) 2009-2013 VMware, Inc. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License as published by the Free
|
||||
* Software Foundation version 2 and no later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*/
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/socket.h>
|
||||
#include <linux/stddef.h>
|
||||
#include <net/sock.h>
|
||||
|
||||
#include "vmci_transport_notify.h"
|
||||
|
||||
#define PKT_FIELD(vsk, field_name) (vmci_trans(vsk)->notify.pkt.field_name)
|
||||
|
||||
static bool vmci_transport_notify_waiting_write(struct vsock_sock *vsk)
|
||||
{
|
||||
#if defined(VSOCK_OPTIMIZATION_WAITING_NOTIFY)
|
||||
bool retval;
|
||||
u64 notify_limit;
|
||||
|
||||
if (!PKT_FIELD(vsk, peer_waiting_write))
|
||||
return false;
|
||||
|
||||
#ifdef VSOCK_OPTIMIZATION_FLOW_CONTROL
|
||||
/* When the sender blocks, we take that as a sign that the sender is
|
||||
* faster than the receiver. To reduce the transmit rate of the sender,
|
||||
* we delay the sending of the read notification by decreasing the
|
||||
* write_notify_window. The notification is delayed until the number of
|
||||
* bytes used in the queue drops below the write_notify_window.
|
||||
*/
|
||||
|
||||
if (!PKT_FIELD(vsk, peer_waiting_write_detected)) {
|
||||
PKT_FIELD(vsk, peer_waiting_write_detected) = true;
|
||||
if (PKT_FIELD(vsk, write_notify_window) < PAGE_SIZE) {
|
||||
PKT_FIELD(vsk, write_notify_window) =
|
||||
PKT_FIELD(vsk, write_notify_min_window);
|
||||
} else {
|
||||
PKT_FIELD(vsk, write_notify_window) -= PAGE_SIZE;
|
||||
if (PKT_FIELD(vsk, write_notify_window) <
|
||||
PKT_FIELD(vsk, write_notify_min_window))
|
||||
PKT_FIELD(vsk, write_notify_window) =
|
||||
PKT_FIELD(vsk, write_notify_min_window);
|
||||
|
||||
}
|
||||
}
|
||||
notify_limit = vmci_trans(vsk)->consume_size -
|
||||
PKT_FIELD(vsk, write_notify_window);
|
||||
#else
|
||||
notify_limit = 0;
|
||||
#endif
|
||||
|
||||
/* For now we ignore the wait information and just see if the free
|
||||
* space exceeds the notify limit. Note that improving this function
|
||||
* to be more intelligent will not require a protocol change and will
|
||||
* retain compatibility between endpoints with mixed versions of this
|
||||
* function.
|
||||
*
|
||||
* The notify_limit is used to delay notifications in the case where
|
||||
* flow control is enabled. Below the test is expressed in terms of
|
||||
* free space in the queue: if free_space > ConsumeSize -
|
||||
* write_notify_window then notify An alternate way of expressing this
|
||||
* is to rewrite the expression to use the data ready in the receive
|
||||
* queue: if write_notify_window > bufferReady then notify as
|
||||
* free_space == ConsumeSize - bufferReady.
|
||||
*/
|
||||
retval = vmci_qpair_consume_free_space(vmci_trans(vsk)->qpair) >
|
||||
notify_limit;
|
||||
#ifdef VSOCK_OPTIMIZATION_FLOW_CONTROL
|
||||
if (retval) {
|
||||
/*
|
||||
* Once we notify the peer, we reset the detected flag so the
|
||||
* next wait will again cause a decrease in the window size.
|
||||
*/
|
||||
|
||||
PKT_FIELD(vsk, peer_waiting_write_detected) = false;
|
||||
}
|
||||
#endif
|
||||
return retval;
|
||||
#else
|
||||
return true;
|
||||
#endif
|
||||
}
|
||||
|
||||
static bool vmci_transport_notify_waiting_read(struct vsock_sock *vsk)
|
||||
{
|
||||
#if defined(VSOCK_OPTIMIZATION_WAITING_NOTIFY)
|
||||
if (!PKT_FIELD(vsk, peer_waiting_read))
|
||||
return false;
|
||||
|
||||
/* For now we ignore the wait information and just see if there is any
|
||||
* data for our peer to read. Note that improving this function to be
|
||||
* more intelligent will not require a protocol change and will retain
|
||||
* compatibility between endpoints with mixed versions of this
|
||||
* function.
|
||||
*/
|
||||
return vmci_qpair_produce_buf_ready(vmci_trans(vsk)->qpair) > 0;
|
||||
#else
|
||||
return true;
|
||||
#endif
|
||||
}
|
||||
|
||||
static void
|
||||
vmci_transport_handle_waiting_read(struct sock *sk,
|
||||
struct vmci_transport_packet *pkt,
|
||||
bool bottom_half,
|
||||
struct sockaddr_vm *dst,
|
||||
struct sockaddr_vm *src)
|
||||
{
|
||||
#if defined(VSOCK_OPTIMIZATION_WAITING_NOTIFY)
|
||||
struct vsock_sock *vsk;
|
||||
|
||||
vsk = vsock_sk(sk);
|
||||
|
||||
PKT_FIELD(vsk, peer_waiting_read) = true;
|
||||
memcpy(&PKT_FIELD(vsk, peer_waiting_read_info), &pkt->u.wait,
|
||||
sizeof(PKT_FIELD(vsk, peer_waiting_read_info)));
|
||||
|
||||
if (vmci_transport_notify_waiting_read(vsk)) {
|
||||
bool sent;
|
||||
|
||||
if (bottom_half)
|
||||
sent = vmci_transport_send_wrote_bh(dst, src) > 0;
|
||||
else
|
||||
sent = vmci_transport_send_wrote(sk) > 0;
|
||||
|
||||
if (sent)
|
||||
PKT_FIELD(vsk, peer_waiting_read) = false;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
static void
|
||||
vmci_transport_handle_waiting_write(struct sock *sk,
|
||||
struct vmci_transport_packet *pkt,
|
||||
bool bottom_half,
|
||||
struct sockaddr_vm *dst,
|
||||
struct sockaddr_vm *src)
|
||||
{
|
||||
#if defined(VSOCK_OPTIMIZATION_WAITING_NOTIFY)
|
||||
struct vsock_sock *vsk;
|
||||
|
||||
vsk = vsock_sk(sk);
|
||||
|
||||
PKT_FIELD(vsk, peer_waiting_write) = true;
|
||||
memcpy(&PKT_FIELD(vsk, peer_waiting_write_info), &pkt->u.wait,
|
||||
sizeof(PKT_FIELD(vsk, peer_waiting_write_info)));
|
||||
|
||||
if (vmci_transport_notify_waiting_write(vsk)) {
|
||||
bool sent;
|
||||
|
||||
if (bottom_half)
|
||||
sent = vmci_transport_send_read_bh(dst, src) > 0;
|
||||
else
|
||||
sent = vmci_transport_send_read(sk) > 0;
|
||||
|
||||
if (sent)
|
||||
PKT_FIELD(vsk, peer_waiting_write) = false;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
static void
|
||||
vmci_transport_handle_read(struct sock *sk,
|
||||
struct vmci_transport_packet *pkt,
|
||||
bool bottom_half,
|
||||
struct sockaddr_vm *dst, struct sockaddr_vm *src)
|
||||
{
|
||||
#if defined(VSOCK_OPTIMIZATION_WAITING_NOTIFY)
|
||||
struct vsock_sock *vsk;
|
||||
|
||||
vsk = vsock_sk(sk);
|
||||
PKT_FIELD(vsk, sent_waiting_write) = false;
|
||||
#endif
|
||||
|
||||
sk->sk_write_space(sk);
|
||||
}
|
||||
|
||||
static bool send_waiting_read(struct sock *sk, u64 room_needed)
|
||||
{
|
||||
#if defined(VSOCK_OPTIMIZATION_WAITING_NOTIFY)
|
||||
struct vsock_sock *vsk;
|
||||
struct vmci_transport_waiting_info waiting_info;
|
||||
u64 tail;
|
||||
u64 head;
|
||||
u64 room_left;
|
||||
bool ret;
|
||||
|
||||
vsk = vsock_sk(sk);
|
||||
|
||||
if (PKT_FIELD(vsk, sent_waiting_read))
|
||||
return true;
|
||||
|
||||
if (PKT_FIELD(vsk, write_notify_window) <
|
||||
vmci_trans(vsk)->consume_size)
|
||||
PKT_FIELD(vsk, write_notify_window) =
|
||||
min(PKT_FIELD(vsk, write_notify_window) + PAGE_SIZE,
|
||||
vmci_trans(vsk)->consume_size);
|
||||
|
||||
vmci_qpair_get_consume_indexes(vmci_trans(vsk)->qpair, &tail, &head);
|
||||
room_left = vmci_trans(vsk)->consume_size - head;
|
||||
if (room_needed >= room_left) {
|
||||
waiting_info.offset = room_needed - room_left;
|
||||
waiting_info.generation =
|
||||
PKT_FIELD(vsk, consume_q_generation) + 1;
|
||||
} else {
|
||||
waiting_info.offset = head + room_needed;
|
||||
waiting_info.generation = PKT_FIELD(vsk, consume_q_generation);
|
||||
}
|
||||
|
||||
ret = vmci_transport_send_waiting_read(sk, &waiting_info) > 0;
|
||||
if (ret)
|
||||
PKT_FIELD(vsk, sent_waiting_read) = true;
|
||||
|
||||
return ret;
|
||||
#else
|
||||
return true;
|
||||
#endif
|
||||
}
|
||||
|
||||
static bool send_waiting_write(struct sock *sk, u64 room_needed)
|
||||
{
|
||||
#if defined(VSOCK_OPTIMIZATION_WAITING_NOTIFY)
|
||||
struct vsock_sock *vsk;
|
||||
struct vmci_transport_waiting_info waiting_info;
|
||||
u64 tail;
|
||||
u64 head;
|
||||
u64 room_left;
|
||||
bool ret;
|
||||
|
||||
vsk = vsock_sk(sk);
|
||||
|
||||
if (PKT_FIELD(vsk, sent_waiting_write))
|
||||
return true;
|
||||
|
||||
vmci_qpair_get_produce_indexes(vmci_trans(vsk)->qpair, &tail, &head);
|
||||
room_left = vmci_trans(vsk)->produce_size - tail;
|
||||
if (room_needed + 1 >= room_left) {
|
||||
/* Wraps around to current generation. */
|
||||
waiting_info.offset = room_needed + 1 - room_left;
|
||||
waiting_info.generation = PKT_FIELD(vsk, produce_q_generation);
|
||||
} else {
|
||||
waiting_info.offset = tail + room_needed + 1;
|
||||
waiting_info.generation =
|
||||
PKT_FIELD(vsk, produce_q_generation) - 1;
|
||||
}
|
||||
|
||||
ret = vmci_transport_send_waiting_write(sk, &waiting_info) > 0;
|
||||
if (ret)
|
||||
PKT_FIELD(vsk, sent_waiting_write) = true;
|
||||
|
||||
return ret;
|
||||
#else
|
||||
return true;
|
||||
#endif
|
||||
}
|
||||
|
||||
static int vmci_transport_send_read_notification(struct sock *sk)
|
||||
{
|
||||
struct vsock_sock *vsk;
|
||||
bool sent_read;
|
||||
unsigned int retries;
|
||||
int err;
|
||||
|
||||
vsk = vsock_sk(sk);
|
||||
sent_read = false;
|
||||
retries = 0;
|
||||
err = 0;
|
||||
|
||||
if (vmci_transport_notify_waiting_write(vsk)) {
|
||||
/* Notify the peer that we have read, retrying the send on
|
||||
* failure up to our maximum value. XXX For now we just log
|
||||
* the failure, but later we should schedule a work item to
|
||||
* handle the resend until it succeeds. That would require
|
||||
* keeping track of work items in the vsk and cleaning them up
|
||||
* upon socket close.
|
||||
*/
|
||||
while (!(vsk->peer_shutdown & RCV_SHUTDOWN) &&
|
||||
!sent_read &&
|
||||
retries < VMCI_TRANSPORT_MAX_DGRAM_RESENDS) {
|
||||
err = vmci_transport_send_read(sk);
|
||||
if (err >= 0)
|
||||
sent_read = true;
|
||||
|
||||
retries++;
|
||||
}
|
||||
|
||||
if (retries >= VMCI_TRANSPORT_MAX_DGRAM_RESENDS)
|
||||
pr_err("%p unable to send read notify to peer\n", sk);
|
||||
else
|
||||
#if defined(VSOCK_OPTIMIZATION_WAITING_NOTIFY)
|
||||
PKT_FIELD(vsk, peer_waiting_write) = false;
|
||||
#endif
|
||||
|
||||
}
|
||||
return err;
|
||||
}
|
||||
|
||||
static void
|
||||
vmci_transport_handle_wrote(struct sock *sk,
|
||||
struct vmci_transport_packet *pkt,
|
||||
bool bottom_half,
|
||||
struct sockaddr_vm *dst, struct sockaddr_vm *src)
|
||||
{
|
||||
#if defined(VSOCK_OPTIMIZATION_WAITING_NOTIFY)
|
||||
struct vsock_sock *vsk = vsock_sk(sk);
|
||||
PKT_FIELD(vsk, sent_waiting_read) = false;
|
||||
#endif
|
||||
sk->sk_data_ready(sk);
|
||||
}
|
||||
|
||||
static void vmci_transport_notify_pkt_socket_init(struct sock *sk)
|
||||
{
|
||||
struct vsock_sock *vsk = vsock_sk(sk);
|
||||
|
||||
PKT_FIELD(vsk, write_notify_window) = PAGE_SIZE;
|
||||
PKT_FIELD(vsk, write_notify_min_window) = PAGE_SIZE;
|
||||
PKT_FIELD(vsk, peer_waiting_read) = false;
|
||||
PKT_FIELD(vsk, peer_waiting_write) = false;
|
||||
PKT_FIELD(vsk, peer_waiting_write_detected) = false;
|
||||
PKT_FIELD(vsk, sent_waiting_read) = false;
|
||||
PKT_FIELD(vsk, sent_waiting_write) = false;
|
||||
PKT_FIELD(vsk, produce_q_generation) = 0;
|
||||
PKT_FIELD(vsk, consume_q_generation) = 0;
|
||||
|
||||
memset(&PKT_FIELD(vsk, peer_waiting_read_info), 0,
|
||||
sizeof(PKT_FIELD(vsk, peer_waiting_read_info)));
|
||||
memset(&PKT_FIELD(vsk, peer_waiting_write_info), 0,
|
||||
sizeof(PKT_FIELD(vsk, peer_waiting_write_info)));
|
||||
}
|
||||
|
||||
static void vmci_transport_notify_pkt_socket_destruct(struct vsock_sock *vsk)
|
||||
{
|
||||
}
|
||||
|
||||
static int
|
||||
vmci_transport_notify_pkt_poll_in(struct sock *sk,
|
||||
size_t target, bool *data_ready_now)
|
||||
{
|
||||
struct vsock_sock *vsk = vsock_sk(sk);
|
||||
|
||||
if (vsock_stream_has_data(vsk)) {
|
||||
*data_ready_now = true;
|
||||
} else {
|
||||
/* We can't read right now because there is nothing in the
|
||||
* queue. Ask for notifications when there is something to
|
||||
* read.
|
||||
*/
|
||||
if (sk->sk_state == SS_CONNECTED) {
|
||||
if (!send_waiting_read(sk, 1))
|
||||
return -1;
|
||||
|
||||
}
|
||||
*data_ready_now = false;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
vmci_transport_notify_pkt_poll_out(struct sock *sk,
|
||||
size_t target, bool *space_avail_now)
|
||||
{
|
||||
s64 produce_q_free_space;
|
||||
struct vsock_sock *vsk = vsock_sk(sk);
|
||||
|
||||
produce_q_free_space = vsock_stream_has_space(vsk);
|
||||
if (produce_q_free_space > 0) {
|
||||
*space_avail_now = true;
|
||||
return 0;
|
||||
} else if (produce_q_free_space == 0) {
|
||||
/* This is a connected socket but we can't currently send data.
|
||||
* Notify the peer that we are waiting if the queue is full. We
|
||||
* only send a waiting write if the queue is full because
|
||||
* otherwise we end up in an infinite WAITING_WRITE, READ,
|
||||
* WAITING_WRITE, READ, etc. loop. Treat failing to send the
|
||||
* notification as a socket error, passing that back through
|
||||
* the mask.
|
||||
*/
|
||||
if (!send_waiting_write(sk, 1))
|
||||
return -1;
|
||||
|
||||
*space_avail_now = false;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
vmci_transport_notify_pkt_recv_init(
|
||||
struct sock *sk,
|
||||
size_t target,
|
||||
struct vmci_transport_recv_notify_data *data)
|
||||
{
|
||||
struct vsock_sock *vsk = vsock_sk(sk);
|
||||
|
||||
#ifdef VSOCK_OPTIMIZATION_WAITING_NOTIFY
|
||||
data->consume_head = 0;
|
||||
data->produce_tail = 0;
|
||||
#ifdef VSOCK_OPTIMIZATION_FLOW_CONTROL
|
||||
data->notify_on_block = false;
|
||||
|
||||
if (PKT_FIELD(vsk, write_notify_min_window) < target + 1) {
|
||||
PKT_FIELD(vsk, write_notify_min_window) = target + 1;
|
||||
if (PKT_FIELD(vsk, write_notify_window) <
|
||||
PKT_FIELD(vsk, write_notify_min_window)) {
|
||||
/* If the current window is smaller than the new
|
||||
* minimal window size, we need to reevaluate whether
|
||||
* we need to notify the sender. If the number of ready
|
||||
* bytes are smaller than the new window, we need to
|
||||
* send a notification to the sender before we block.
|
||||
*/
|
||||
|
||||
PKT_FIELD(vsk, write_notify_window) =
|
||||
PKT_FIELD(vsk, write_notify_min_window);
|
||||
data->notify_on_block = true;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
#endif
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
vmci_transport_notify_pkt_recv_pre_block(
|
||||
struct sock *sk,
|
||||
size_t target,
|
||||
struct vmci_transport_recv_notify_data *data)
|
||||
{
|
||||
int err = 0;
|
||||
|
||||
/* Notify our peer that we are waiting for data to read. */
|
||||
if (!send_waiting_read(sk, target)) {
|
||||
err = -EHOSTUNREACH;
|
||||
return err;
|
||||
}
|
||||
#ifdef VSOCK_OPTIMIZATION_FLOW_CONTROL
|
||||
if (data->notify_on_block) {
|
||||
err = vmci_transport_send_read_notification(sk);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
data->notify_on_block = false;
|
||||
}
|
||||
#endif
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static int
|
||||
vmci_transport_notify_pkt_recv_pre_dequeue(
|
||||
struct sock *sk,
|
||||
size_t target,
|
||||
struct vmci_transport_recv_notify_data *data)
|
||||
{
|
||||
struct vsock_sock *vsk = vsock_sk(sk);
|
||||
|
||||
/* Now consume up to len bytes from the queue. Note that since we have
|
||||
* the socket locked we should copy at least ready bytes.
|
||||
*/
|
||||
#if defined(VSOCK_OPTIMIZATION_WAITING_NOTIFY)
|
||||
vmci_qpair_get_consume_indexes(vmci_trans(vsk)->qpair,
|
||||
&data->produce_tail,
|
||||
&data->consume_head);
|
||||
#endif
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
vmci_transport_notify_pkt_recv_post_dequeue(
|
||||
struct sock *sk,
|
||||
size_t target,
|
||||
ssize_t copied,
|
||||
bool data_read,
|
||||
struct vmci_transport_recv_notify_data *data)
|
||||
{
|
||||
struct vsock_sock *vsk;
|
||||
int err;
|
||||
|
||||
vsk = vsock_sk(sk);
|
||||
err = 0;
|
||||
|
||||
if (data_read) {
|
||||
#if defined(VSOCK_OPTIMIZATION_WAITING_NOTIFY)
|
||||
/* Detect a wrap-around to maintain queue generation. Note
|
||||
* that this is safe since we hold the socket lock across the
|
||||
* two queue pair operations.
|
||||
*/
|
||||
if (copied >=
|
||||
vmci_trans(vsk)->consume_size - data->consume_head)
|
||||
PKT_FIELD(vsk, consume_q_generation)++;
|
||||
#endif
|
||||
|
||||
err = vmci_transport_send_read_notification(sk);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
}
|
||||
return err;
|
||||
}
|
||||
|
||||
static int
|
||||
vmci_transport_notify_pkt_send_init(
|
||||
struct sock *sk,
|
||||
struct vmci_transport_send_notify_data *data)
|
||||
{
|
||||
#ifdef VSOCK_OPTIMIZATION_WAITING_NOTIFY
|
||||
data->consume_head = 0;
|
||||
data->produce_tail = 0;
|
||||
#endif
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
vmci_transport_notify_pkt_send_pre_block(
|
||||
struct sock *sk,
|
||||
struct vmci_transport_send_notify_data *data)
|
||||
{
|
||||
/* Notify our peer that we are waiting for room to write. */
|
||||
if (!send_waiting_write(sk, 1))
|
||||
return -EHOSTUNREACH;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
vmci_transport_notify_pkt_send_pre_enqueue(
|
||||
struct sock *sk,
|
||||
struct vmci_transport_send_notify_data *data)
|
||||
{
|
||||
struct vsock_sock *vsk = vsock_sk(sk);
|
||||
|
||||
#if defined(VSOCK_OPTIMIZATION_WAITING_NOTIFY)
|
||||
vmci_qpair_get_produce_indexes(vmci_trans(vsk)->qpair,
|
||||
&data->produce_tail,
|
||||
&data->consume_head);
|
||||
#endif
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
vmci_transport_notify_pkt_send_post_enqueue(
|
||||
struct sock *sk,
|
||||
ssize_t written,
|
||||
struct vmci_transport_send_notify_data *data)
|
||||
{
|
||||
int err = 0;
|
||||
struct vsock_sock *vsk;
|
||||
bool sent_wrote = false;
|
||||
int retries = 0;
|
||||
|
||||
vsk = vsock_sk(sk);
|
||||
|
||||
#if defined(VSOCK_OPTIMIZATION_WAITING_NOTIFY)
|
||||
/* Detect a wrap-around to maintain queue generation. Note that this
|
||||
* is safe since we hold the socket lock across the two queue pair
|
||||
* operations.
|
||||
*/
|
||||
if (written >= vmci_trans(vsk)->produce_size - data->produce_tail)
|
||||
PKT_FIELD(vsk, produce_q_generation)++;
|
||||
|
||||
#endif
|
||||
|
||||
if (vmci_transport_notify_waiting_read(vsk)) {
|
||||
/* Notify the peer that we have written, retrying the send on
|
||||
* failure up to our maximum value. See the XXX comment for the
|
||||
* corresponding piece of code in StreamRecvmsg() for potential
|
||||
* improvements.
|
||||
*/
|
||||
while (!(vsk->peer_shutdown & RCV_SHUTDOWN) &&
|
||||
!sent_wrote &&
|
||||
retries < VMCI_TRANSPORT_MAX_DGRAM_RESENDS) {
|
||||
err = vmci_transport_send_wrote(sk);
|
||||
if (err >= 0)
|
||||
sent_wrote = true;
|
||||
|
||||
retries++;
|
||||
}
|
||||
|
||||
if (retries >= VMCI_TRANSPORT_MAX_DGRAM_RESENDS) {
|
||||
pr_err("%p unable to send wrote notify to peer\n", sk);
|
||||
return err;
|
||||
} else {
|
||||
#if defined(VSOCK_OPTIMIZATION_WAITING_NOTIFY)
|
||||
PKT_FIELD(vsk, peer_waiting_read) = false;
|
||||
#endif
|
||||
}
|
||||
}
|
||||
return err;
|
||||
}
|
||||
|
||||
static void
|
||||
vmci_transport_notify_pkt_handle_pkt(
|
||||
struct sock *sk,
|
||||
struct vmci_transport_packet *pkt,
|
||||
bool bottom_half,
|
||||
struct sockaddr_vm *dst,
|
||||
struct sockaddr_vm *src, bool *pkt_processed)
|
||||
{
|
||||
bool processed = false;
|
||||
|
||||
switch (pkt->type) {
|
||||
case VMCI_TRANSPORT_PACKET_TYPE_WROTE:
|
||||
vmci_transport_handle_wrote(sk, pkt, bottom_half, dst, src);
|
||||
processed = true;
|
||||
break;
|
||||
case VMCI_TRANSPORT_PACKET_TYPE_READ:
|
||||
vmci_transport_handle_read(sk, pkt, bottom_half, dst, src);
|
||||
processed = true;
|
||||
break;
|
||||
case VMCI_TRANSPORT_PACKET_TYPE_WAITING_WRITE:
|
||||
vmci_transport_handle_waiting_write(sk, pkt, bottom_half,
|
||||
dst, src);
|
||||
processed = true;
|
||||
break;
|
||||
|
||||
case VMCI_TRANSPORT_PACKET_TYPE_WAITING_READ:
|
||||
vmci_transport_handle_waiting_read(sk, pkt, bottom_half,
|
||||
dst, src);
|
||||
processed = true;
|
||||
break;
|
||||
}
|
||||
|
||||
if (pkt_processed)
|
||||
*pkt_processed = processed;
|
||||
}
|
||||
|
||||
static void vmci_transport_notify_pkt_process_request(struct sock *sk)
|
||||
{
|
||||
struct vsock_sock *vsk = vsock_sk(sk);
|
||||
|
||||
PKT_FIELD(vsk, write_notify_window) = vmci_trans(vsk)->consume_size;
|
||||
if (vmci_trans(vsk)->consume_size <
|
||||
PKT_FIELD(vsk, write_notify_min_window))
|
||||
PKT_FIELD(vsk, write_notify_min_window) =
|
||||
vmci_trans(vsk)->consume_size;
|
||||
}
|
||||
|
||||
static void vmci_transport_notify_pkt_process_negotiate(struct sock *sk)
|
||||
{
|
||||
struct vsock_sock *vsk = vsock_sk(sk);
|
||||
|
||||
PKT_FIELD(vsk, write_notify_window) = vmci_trans(vsk)->consume_size;
|
||||
if (vmci_trans(vsk)->consume_size <
|
||||
PKT_FIELD(vsk, write_notify_min_window))
|
||||
PKT_FIELD(vsk, write_notify_min_window) =
|
||||
vmci_trans(vsk)->consume_size;
|
||||
}
|
||||
|
||||
/* Socket control packet based operations. */
|
||||
struct vmci_transport_notify_ops vmci_transport_notify_pkt_ops = {
|
||||
vmci_transport_notify_pkt_socket_init,
|
||||
vmci_transport_notify_pkt_socket_destruct,
|
||||
vmci_transport_notify_pkt_poll_in,
|
||||
vmci_transport_notify_pkt_poll_out,
|
||||
vmci_transport_notify_pkt_handle_pkt,
|
||||
vmci_transport_notify_pkt_recv_init,
|
||||
vmci_transport_notify_pkt_recv_pre_block,
|
||||
vmci_transport_notify_pkt_recv_pre_dequeue,
|
||||
vmci_transport_notify_pkt_recv_post_dequeue,
|
||||
vmci_transport_notify_pkt_send_init,
|
||||
vmci_transport_notify_pkt_send_pre_block,
|
||||
vmci_transport_notify_pkt_send_pre_enqueue,
|
||||
vmci_transport_notify_pkt_send_post_enqueue,
|
||||
vmci_transport_notify_pkt_process_request,
|
||||
vmci_transport_notify_pkt_process_negotiate,
|
||||
};
|
83
net/vmw_vsock/vmci_transport_notify.h
Normal file
83
net/vmw_vsock/vmci_transport_notify.h
Normal file
|
@ -0,0 +1,83 @@
|
|||
/*
|
||||
* VMware vSockets Driver
|
||||
*
|
||||
* Copyright (C) 2009-2013 VMware, Inc. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License as published by the Free
|
||||
* Software Foundation version 2 and no later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*/
|
||||
|
||||
#ifndef __VMCI_TRANSPORT_NOTIFY_H__
|
||||
#define __VMCI_TRANSPORT_NOTIFY_H__
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/vmw_vmci_defs.h>
|
||||
#include <linux/vmw_vmci_api.h>
|
||||
#include <linux/vm_sockets.h>
|
||||
|
||||
#include "vmci_transport.h"
|
||||
|
||||
/* Comment this out to compare with old protocol. */
|
||||
#define VSOCK_OPTIMIZATION_WAITING_NOTIFY 1
|
||||
#if defined(VSOCK_OPTIMIZATION_WAITING_NOTIFY)
|
||||
/* Comment this out to remove flow control for "new" protocol */
|
||||
#define VSOCK_OPTIMIZATION_FLOW_CONTROL 1
|
||||
#endif
|
||||
|
||||
#define VMCI_TRANSPORT_MAX_DGRAM_RESENDS 10
|
||||
|
||||
struct vmci_transport_recv_notify_data {
|
||||
u64 consume_head;
|
||||
u64 produce_tail;
|
||||
bool notify_on_block;
|
||||
};
|
||||
|
||||
struct vmci_transport_send_notify_data {
|
||||
u64 consume_head;
|
||||
u64 produce_tail;
|
||||
};
|
||||
|
||||
/* Socket notification callbacks. */
|
||||
struct vmci_transport_notify_ops {
|
||||
void (*socket_init) (struct sock *sk);
|
||||
void (*socket_destruct) (struct vsock_sock *vsk);
|
||||
int (*poll_in) (struct sock *sk, size_t target,
|
||||
bool *data_ready_now);
|
||||
int (*poll_out) (struct sock *sk, size_t target,
|
||||
bool *space_avail_now);
|
||||
void (*handle_notify_pkt) (struct sock *sk,
|
||||
struct vmci_transport_packet *pkt,
|
||||
bool bottom_half, struct sockaddr_vm *dst,
|
||||
struct sockaddr_vm *src,
|
||||
bool *pkt_processed);
|
||||
int (*recv_init) (struct sock *sk, size_t target,
|
||||
struct vmci_transport_recv_notify_data *data);
|
||||
int (*recv_pre_block) (struct sock *sk, size_t target,
|
||||
struct vmci_transport_recv_notify_data *data);
|
||||
int (*recv_pre_dequeue) (struct sock *sk, size_t target,
|
||||
struct vmci_transport_recv_notify_data *data);
|
||||
int (*recv_post_dequeue) (struct sock *sk, size_t target,
|
||||
ssize_t copied, bool data_read,
|
||||
struct vmci_transport_recv_notify_data *data);
|
||||
int (*send_init) (struct sock *sk,
|
||||
struct vmci_transport_send_notify_data *data);
|
||||
int (*send_pre_block) (struct sock *sk,
|
||||
struct vmci_transport_send_notify_data *data);
|
||||
int (*send_pre_enqueue) (struct sock *sk,
|
||||
struct vmci_transport_send_notify_data *data);
|
||||
int (*send_post_enqueue) (struct sock *sk, ssize_t written,
|
||||
struct vmci_transport_send_notify_data *data);
|
||||
void (*process_request) (struct sock *sk);
|
||||
void (*process_negotiate) (struct sock *sk);
|
||||
};
|
||||
|
||||
extern struct vmci_transport_notify_ops vmci_transport_notify_pkt_ops;
|
||||
extern struct vmci_transport_notify_ops vmci_transport_notify_pkt_q_state_ops;
|
||||
|
||||
#endif /* __VMCI_TRANSPORT_NOTIFY_H__ */
|
438
net/vmw_vsock/vmci_transport_notify_qstate.c
Normal file
438
net/vmw_vsock/vmci_transport_notify_qstate.c
Normal file
|
@ -0,0 +1,438 @@
|
|||
/*
|
||||
* VMware vSockets Driver
|
||||
*
|
||||
* Copyright (C) 2009-2013 VMware, Inc. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License as published by the Free
|
||||
* Software Foundation version 2 and no later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*/
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/socket.h>
|
||||
#include <linux/stddef.h>
|
||||
#include <net/sock.h>
|
||||
|
||||
#include "vmci_transport_notify.h"
|
||||
|
||||
#define PKT_FIELD(vsk, field_name) \
|
||||
(vmci_trans(vsk)->notify.pkt_q_state.field_name)
|
||||
|
||||
static bool vmci_transport_notify_waiting_write(struct vsock_sock *vsk)
|
||||
{
|
||||
bool retval;
|
||||
u64 notify_limit;
|
||||
|
||||
if (!PKT_FIELD(vsk, peer_waiting_write))
|
||||
return false;
|
||||
|
||||
/* When the sender blocks, we take that as a sign that the sender is
|
||||
* faster than the receiver. To reduce the transmit rate of the sender,
|
||||
* we delay the sending of the read notification by decreasing the
|
||||
* write_notify_window. The notification is delayed until the number of
|
||||
* bytes used in the queue drops below the write_notify_window.
|
||||
*/
|
||||
|
||||
if (!PKT_FIELD(vsk, peer_waiting_write_detected)) {
|
||||
PKT_FIELD(vsk, peer_waiting_write_detected) = true;
|
||||
if (PKT_FIELD(vsk, write_notify_window) < PAGE_SIZE) {
|
||||
PKT_FIELD(vsk, write_notify_window) =
|
||||
PKT_FIELD(vsk, write_notify_min_window);
|
||||
} else {
|
||||
PKT_FIELD(vsk, write_notify_window) -= PAGE_SIZE;
|
||||
if (PKT_FIELD(vsk, write_notify_window) <
|
||||
PKT_FIELD(vsk, write_notify_min_window))
|
||||
PKT_FIELD(vsk, write_notify_window) =
|
||||
PKT_FIELD(vsk, write_notify_min_window);
|
||||
|
||||
}
|
||||
}
|
||||
notify_limit = vmci_trans(vsk)->consume_size -
|
||||
PKT_FIELD(vsk, write_notify_window);
|
||||
|
||||
/* The notify_limit is used to delay notifications in the case where
|
||||
* flow control is enabled. Below the test is expressed in terms of
|
||||
* free space in the queue: if free_space > ConsumeSize -
|
||||
* write_notify_window then notify An alternate way of expressing this
|
||||
* is to rewrite the expression to use the data ready in the receive
|
||||
* queue: if write_notify_window > bufferReady then notify as
|
||||
* free_space == ConsumeSize - bufferReady.
|
||||
*/
|
||||
|
||||
retval = vmci_qpair_consume_free_space(vmci_trans(vsk)->qpair) >
|
||||
notify_limit;
|
||||
|
||||
if (retval) {
|
||||
/* Once we notify the peer, we reset the detected flag so the
|
||||
* next wait will again cause a decrease in the window size.
|
||||
*/
|
||||
|
||||
PKT_FIELD(vsk, peer_waiting_write_detected) = false;
|
||||
}
|
||||
return retval;
|
||||
}
|
||||
|
||||
static void
|
||||
vmci_transport_handle_read(struct sock *sk,
|
||||
struct vmci_transport_packet *pkt,
|
||||
bool bottom_half,
|
||||
struct sockaddr_vm *dst, struct sockaddr_vm *src)
|
||||
{
|
||||
sk->sk_write_space(sk);
|
||||
}
|
||||
|
||||
static void
|
||||
vmci_transport_handle_wrote(struct sock *sk,
|
||||
struct vmci_transport_packet *pkt,
|
||||
bool bottom_half,
|
||||
struct sockaddr_vm *dst, struct sockaddr_vm *src)
|
||||
{
|
||||
sk->sk_data_ready(sk);
|
||||
}
|
||||
|
||||
static void vsock_block_update_write_window(struct sock *sk)
|
||||
{
|
||||
struct vsock_sock *vsk = vsock_sk(sk);
|
||||
|
||||
if (PKT_FIELD(vsk, write_notify_window) < vmci_trans(vsk)->consume_size)
|
||||
PKT_FIELD(vsk, write_notify_window) =
|
||||
min(PKT_FIELD(vsk, write_notify_window) + PAGE_SIZE,
|
||||
vmci_trans(vsk)->consume_size);
|
||||
}
|
||||
|
||||
static int vmci_transport_send_read_notification(struct sock *sk)
|
||||
{
|
||||
struct vsock_sock *vsk;
|
||||
bool sent_read;
|
||||
unsigned int retries;
|
||||
int err;
|
||||
|
||||
vsk = vsock_sk(sk);
|
||||
sent_read = false;
|
||||
retries = 0;
|
||||
err = 0;
|
||||
|
||||
if (vmci_transport_notify_waiting_write(vsk)) {
|
||||
/* Notify the peer that we have read, retrying the send on
|
||||
* failure up to our maximum value. XXX For now we just log
|
||||
* the failure, but later we should schedule a work item to
|
||||
* handle the resend until it succeeds. That would require
|
||||
* keeping track of work items in the vsk and cleaning them up
|
||||
* upon socket close.
|
||||
*/
|
||||
while (!(vsk->peer_shutdown & RCV_SHUTDOWN) &&
|
||||
!sent_read &&
|
||||
retries < VMCI_TRANSPORT_MAX_DGRAM_RESENDS) {
|
||||
err = vmci_transport_send_read(sk);
|
||||
if (err >= 0)
|
||||
sent_read = true;
|
||||
|
||||
retries++;
|
||||
}
|
||||
|
||||
if (retries >= VMCI_TRANSPORT_MAX_DGRAM_RESENDS && !sent_read)
|
||||
pr_err("%p unable to send read notification to peer\n",
|
||||
sk);
|
||||
else
|
||||
PKT_FIELD(vsk, peer_waiting_write) = false;
|
||||
|
||||
}
|
||||
return err;
|
||||
}
|
||||
|
||||
static void vmci_transport_notify_pkt_socket_init(struct sock *sk)
|
||||
{
|
||||
struct vsock_sock *vsk = vsock_sk(sk);
|
||||
|
||||
PKT_FIELD(vsk, write_notify_window) = PAGE_SIZE;
|
||||
PKT_FIELD(vsk, write_notify_min_window) = PAGE_SIZE;
|
||||
PKT_FIELD(vsk, peer_waiting_write) = false;
|
||||
PKT_FIELD(vsk, peer_waiting_write_detected) = false;
|
||||
}
|
||||
|
||||
static void vmci_transport_notify_pkt_socket_destruct(struct vsock_sock *vsk)
|
||||
{
|
||||
PKT_FIELD(vsk, write_notify_window) = PAGE_SIZE;
|
||||
PKT_FIELD(vsk, write_notify_min_window) = PAGE_SIZE;
|
||||
PKT_FIELD(vsk, peer_waiting_write) = false;
|
||||
PKT_FIELD(vsk, peer_waiting_write_detected) = false;
|
||||
}
|
||||
|
||||
static int
|
||||
vmci_transport_notify_pkt_poll_in(struct sock *sk,
|
||||
size_t target, bool *data_ready_now)
|
||||
{
|
||||
struct vsock_sock *vsk = vsock_sk(sk);
|
||||
|
||||
if (vsock_stream_has_data(vsk)) {
|
||||
*data_ready_now = true;
|
||||
} else {
|
||||
/* We can't read right now because there is nothing in the
|
||||
* queue. Ask for notifications when there is something to
|
||||
* read.
|
||||
*/
|
||||
if (sk->sk_state == SS_CONNECTED)
|
||||
vsock_block_update_write_window(sk);
|
||||
*data_ready_now = false;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
vmci_transport_notify_pkt_poll_out(struct sock *sk,
|
||||
size_t target, bool *space_avail_now)
|
||||
{
|
||||
s64 produce_q_free_space;
|
||||
struct vsock_sock *vsk = vsock_sk(sk);
|
||||
|
||||
produce_q_free_space = vsock_stream_has_space(vsk);
|
||||
if (produce_q_free_space > 0) {
|
||||
*space_avail_now = true;
|
||||
return 0;
|
||||
} else if (produce_q_free_space == 0) {
|
||||
/* This is a connected socket but we can't currently send data.
|
||||
* Nothing else to do.
|
||||
*/
|
||||
*space_avail_now = false;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
vmci_transport_notify_pkt_recv_init(
|
||||
struct sock *sk,
|
||||
size_t target,
|
||||
struct vmci_transport_recv_notify_data *data)
|
||||
{
|
||||
struct vsock_sock *vsk = vsock_sk(sk);
|
||||
|
||||
data->consume_head = 0;
|
||||
data->produce_tail = 0;
|
||||
data->notify_on_block = false;
|
||||
|
||||
if (PKT_FIELD(vsk, write_notify_min_window) < target + 1) {
|
||||
PKT_FIELD(vsk, write_notify_min_window) = target + 1;
|
||||
if (PKT_FIELD(vsk, write_notify_window) <
|
||||
PKT_FIELD(vsk, write_notify_min_window)) {
|
||||
/* If the current window is smaller than the new
|
||||
* minimal window size, we need to reevaluate whether
|
||||
* we need to notify the sender. If the number of ready
|
||||
* bytes are smaller than the new window, we need to
|
||||
* send a notification to the sender before we block.
|
||||
*/
|
||||
|
||||
PKT_FIELD(vsk, write_notify_window) =
|
||||
PKT_FIELD(vsk, write_notify_min_window);
|
||||
data->notify_on_block = true;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
vmci_transport_notify_pkt_recv_pre_block(
|
||||
struct sock *sk,
|
||||
size_t target,
|
||||
struct vmci_transport_recv_notify_data *data)
|
||||
{
|
||||
int err = 0;
|
||||
|
||||
vsock_block_update_write_window(sk);
|
||||
|
||||
if (data->notify_on_block) {
|
||||
err = vmci_transport_send_read_notification(sk);
|
||||
if (err < 0)
|
||||
return err;
|
||||
data->notify_on_block = false;
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static int
|
||||
vmci_transport_notify_pkt_recv_post_dequeue(
|
||||
struct sock *sk,
|
||||
size_t target,
|
||||
ssize_t copied,
|
||||
bool data_read,
|
||||
struct vmci_transport_recv_notify_data *data)
|
||||
{
|
||||
struct vsock_sock *vsk;
|
||||
int err;
|
||||
bool was_full = false;
|
||||
u64 free_space;
|
||||
|
||||
vsk = vsock_sk(sk);
|
||||
err = 0;
|
||||
|
||||
if (data_read) {
|
||||
smp_mb();
|
||||
|
||||
free_space =
|
||||
vmci_qpair_consume_free_space(vmci_trans(vsk)->qpair);
|
||||
was_full = free_space == copied;
|
||||
|
||||
if (was_full)
|
||||
PKT_FIELD(vsk, peer_waiting_write) = true;
|
||||
|
||||
err = vmci_transport_send_read_notification(sk);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
/* See the comment in
|
||||
* vmci_transport_notify_pkt_send_post_enqueue().
|
||||
*/
|
||||
sk->sk_data_ready(sk);
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static int
|
||||
vmci_transport_notify_pkt_send_init(
|
||||
struct sock *sk,
|
||||
struct vmci_transport_send_notify_data *data)
|
||||
{
|
||||
data->consume_head = 0;
|
||||
data->produce_tail = 0;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
vmci_transport_notify_pkt_send_post_enqueue(
|
||||
struct sock *sk,
|
||||
ssize_t written,
|
||||
struct vmci_transport_send_notify_data *data)
|
||||
{
|
||||
int err = 0;
|
||||
struct vsock_sock *vsk;
|
||||
bool sent_wrote = false;
|
||||
bool was_empty;
|
||||
int retries = 0;
|
||||
|
||||
vsk = vsock_sk(sk);
|
||||
|
||||
smp_mb();
|
||||
|
||||
was_empty =
|
||||
vmci_qpair_produce_buf_ready(vmci_trans(vsk)->qpair) == written;
|
||||
if (was_empty) {
|
||||
while (!(vsk->peer_shutdown & RCV_SHUTDOWN) &&
|
||||
!sent_wrote &&
|
||||
retries < VMCI_TRANSPORT_MAX_DGRAM_RESENDS) {
|
||||
err = vmci_transport_send_wrote(sk);
|
||||
if (err >= 0)
|
||||
sent_wrote = true;
|
||||
|
||||
retries++;
|
||||
}
|
||||
}
|
||||
|
||||
if (retries >= VMCI_TRANSPORT_MAX_DGRAM_RESENDS && !sent_wrote) {
|
||||
pr_err("%p unable to send wrote notification to peer\n",
|
||||
sk);
|
||||
return err;
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static void
|
||||
vmci_transport_notify_pkt_handle_pkt(
|
||||
struct sock *sk,
|
||||
struct vmci_transport_packet *pkt,
|
||||
bool bottom_half,
|
||||
struct sockaddr_vm *dst,
|
||||
struct sockaddr_vm *src, bool *pkt_processed)
|
||||
{
|
||||
bool processed = false;
|
||||
|
||||
switch (pkt->type) {
|
||||
case VMCI_TRANSPORT_PACKET_TYPE_WROTE:
|
||||
vmci_transport_handle_wrote(sk, pkt, bottom_half, dst, src);
|
||||
processed = true;
|
||||
break;
|
||||
case VMCI_TRANSPORT_PACKET_TYPE_READ:
|
||||
vmci_transport_handle_read(sk, pkt, bottom_half, dst, src);
|
||||
processed = true;
|
||||
break;
|
||||
}
|
||||
|
||||
if (pkt_processed)
|
||||
*pkt_processed = processed;
|
||||
}
|
||||
|
||||
static void vmci_transport_notify_pkt_process_request(struct sock *sk)
|
||||
{
|
||||
struct vsock_sock *vsk = vsock_sk(sk);
|
||||
|
||||
PKT_FIELD(vsk, write_notify_window) = vmci_trans(vsk)->consume_size;
|
||||
if (vmci_trans(vsk)->consume_size <
|
||||
PKT_FIELD(vsk, write_notify_min_window))
|
||||
PKT_FIELD(vsk, write_notify_min_window) =
|
||||
vmci_trans(vsk)->consume_size;
|
||||
}
|
||||
|
||||
static void vmci_transport_notify_pkt_process_negotiate(struct sock *sk)
|
||||
{
|
||||
struct vsock_sock *vsk = vsock_sk(sk);
|
||||
|
||||
PKT_FIELD(vsk, write_notify_window) = vmci_trans(vsk)->consume_size;
|
||||
if (vmci_trans(vsk)->consume_size <
|
||||
PKT_FIELD(vsk, write_notify_min_window))
|
||||
PKT_FIELD(vsk, write_notify_min_window) =
|
||||
vmci_trans(vsk)->consume_size;
|
||||
}
|
||||
|
||||
static int
|
||||
vmci_transport_notify_pkt_recv_pre_dequeue(
|
||||
struct sock *sk,
|
||||
size_t target,
|
||||
struct vmci_transport_recv_notify_data *data)
|
||||
{
|
||||
return 0; /* NOP for QState. */
|
||||
}
|
||||
|
||||
static int
|
||||
vmci_transport_notify_pkt_send_pre_block(
|
||||
struct sock *sk,
|
||||
struct vmci_transport_send_notify_data *data)
|
||||
{
|
||||
return 0; /* NOP for QState. */
|
||||
}
|
||||
|
||||
static int
|
||||
vmci_transport_notify_pkt_send_pre_enqueue(
|
||||
struct sock *sk,
|
||||
struct vmci_transport_send_notify_data *data)
|
||||
{
|
||||
return 0; /* NOP for QState. */
|
||||
}
|
||||
|
||||
/* Socket always on control packet based operations. */
|
||||
struct vmci_transport_notify_ops vmci_transport_notify_pkt_q_state_ops = {
|
||||
vmci_transport_notify_pkt_socket_init,
|
||||
vmci_transport_notify_pkt_socket_destruct,
|
||||
vmci_transport_notify_pkt_poll_in,
|
||||
vmci_transport_notify_pkt_poll_out,
|
||||
vmci_transport_notify_pkt_handle_pkt,
|
||||
vmci_transport_notify_pkt_recv_init,
|
||||
vmci_transport_notify_pkt_recv_pre_block,
|
||||
vmci_transport_notify_pkt_recv_pre_dequeue,
|
||||
vmci_transport_notify_pkt_recv_post_dequeue,
|
||||
vmci_transport_notify_pkt_send_init,
|
||||
vmci_transport_notify_pkt_send_pre_block,
|
||||
vmci_transport_notify_pkt_send_pre_enqueue,
|
||||
vmci_transport_notify_pkt_send_post_enqueue,
|
||||
vmci_transport_notify_pkt_process_request,
|
||||
vmci_transport_notify_pkt_process_negotiate,
|
||||
};
|
75
net/vmw_vsock/vsock_addr.c
Normal file
75
net/vmw_vsock/vsock_addr.c
Normal file
|
@ -0,0 +1,75 @@
|
|||
/*
|
||||
* VMware vSockets Driver
|
||||
*
|
||||
* Copyright (C) 2007-2012 VMware, Inc. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License as published by the Free
|
||||
* Software Foundation version 2 and no later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*/
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/socket.h>
|
||||
#include <linux/stddef.h>
|
||||
#include <net/sock.h>
|
||||
#include <net/vsock_addr.h>
|
||||
|
||||
void vsock_addr_init(struct sockaddr_vm *addr, u32 cid, u32 port)
|
||||
{
|
||||
memset(addr, 0, sizeof(*addr));
|
||||
addr->svm_family = AF_VSOCK;
|
||||
addr->svm_cid = cid;
|
||||
addr->svm_port = port;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(vsock_addr_init);
|
||||
|
||||
int vsock_addr_validate(const struct sockaddr_vm *addr)
|
||||
{
|
||||
if (!addr)
|
||||
return -EFAULT;
|
||||
|
||||
if (addr->svm_family != AF_VSOCK)
|
||||
return -EAFNOSUPPORT;
|
||||
|
||||
if (addr->svm_zero[0] != 0)
|
||||
return -EINVAL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(vsock_addr_validate);
|
||||
|
||||
bool vsock_addr_bound(const struct sockaddr_vm *addr)
|
||||
{
|
||||
return addr->svm_port != VMADDR_PORT_ANY;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(vsock_addr_bound);
|
||||
|
||||
void vsock_addr_unbind(struct sockaddr_vm *addr)
|
||||
{
|
||||
vsock_addr_init(addr, VMADDR_CID_ANY, VMADDR_PORT_ANY);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(vsock_addr_unbind);
|
||||
|
||||
bool vsock_addr_equals_addr(const struct sockaddr_vm *addr,
|
||||
const struct sockaddr_vm *other)
|
||||
{
|
||||
return addr->svm_cid == other->svm_cid &&
|
||||
addr->svm_port == other->svm_port;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(vsock_addr_equals_addr);
|
||||
|
||||
int vsock_addr_cast(const struct sockaddr *addr,
|
||||
size_t len, struct sockaddr_vm **out_addr)
|
||||
{
|
||||
if (len < sizeof(**out_addr))
|
||||
return -EFAULT;
|
||||
|
||||
*out_addr = (struct sockaddr_vm *)addr;
|
||||
return vsock_addr_validate(*out_addr);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(vsock_addr_cast);
|
Loading…
Add table
Add a link
Reference in a new issue