mirror of
https://github.com/AetherDroid/android_kernel_samsung_on5xelte.git
synced 2025-09-08 17:18:05 -04:00
Fixed MTP to work with TWRP
This commit is contained in:
commit
f6dfaef42e
50820 changed files with 20846062 additions and 0 deletions
44
net/rxrpc/Kconfig
Normal file
44
net/rxrpc/Kconfig
Normal file
|
@ -0,0 +1,44 @@
|
|||
#
|
||||
# RxRPC session sockets
|
||||
#
|
||||
|
||||
config AF_RXRPC
|
||||
tristate "RxRPC session sockets"
|
||||
depends on INET
|
||||
select CRYPTO
|
||||
select KEYS
|
||||
help
|
||||
Say Y or M here to include support for RxRPC session sockets (just
|
||||
the transport part, not the presentation part: (un)marshalling is
|
||||
left to the application).
|
||||
|
||||
These are used for AFS kernel filesystem and userspace utilities.
|
||||
|
||||
This module at the moment only supports client operations and is
|
||||
currently incomplete.
|
||||
|
||||
See Documentation/networking/rxrpc.txt.
|
||||
|
||||
|
||||
config AF_RXRPC_DEBUG
|
||||
bool "RxRPC dynamic debugging"
|
||||
depends on AF_RXRPC
|
||||
help
|
||||
Say Y here to make runtime controllable debugging messages appear.
|
||||
|
||||
See Documentation/networking/rxrpc.txt.
|
||||
|
||||
|
||||
config RXKAD
|
||||
tristate "RxRPC Kerberos security"
|
||||
depends on AF_RXRPC
|
||||
select CRYPTO
|
||||
select CRYPTO_MANAGER
|
||||
select CRYPTO_BLKCIPHER
|
||||
select CRYPTO_PCBC
|
||||
select CRYPTO_FCRYPT
|
||||
help
|
||||
Provide kerberos 4 and AFS kaserver security handling for AF_RXRPC
|
||||
through the use of the key retention service.
|
||||
|
||||
See Documentation/networking/rxrpc.txt.
|
28
net/rxrpc/Makefile
Normal file
28
net/rxrpc/Makefile
Normal file
|
@ -0,0 +1,28 @@
|
|||
#
|
||||
# Makefile for Linux kernel RxRPC
|
||||
#
|
||||
|
||||
af-rxrpc-y := \
|
||||
af_rxrpc.o \
|
||||
ar-accept.o \
|
||||
ar-ack.o \
|
||||
ar-call.o \
|
||||
ar-connection.o \
|
||||
ar-connevent.o \
|
||||
ar-error.o \
|
||||
ar-input.o \
|
||||
ar-key.o \
|
||||
ar-local.o \
|
||||
ar-output.o \
|
||||
ar-peer.o \
|
||||
ar-recvmsg.o \
|
||||
ar-security.o \
|
||||
ar-skbuff.o \
|
||||
ar-transport.o
|
||||
|
||||
af-rxrpc-$(CONFIG_PROC_FS) += ar-proc.o
|
||||
af-rxrpc-$(CONFIG_SYSCTL) += sysctl.o
|
||||
|
||||
obj-$(CONFIG_AF_RXRPC) += af-rxrpc.o
|
||||
|
||||
obj-$(CONFIG_RXKAD) += rxkad.o
|
899
net/rxrpc/af_rxrpc.c
Normal file
899
net/rxrpc/af_rxrpc.c
Normal file
|
@ -0,0 +1,899 @@
|
|||
/* AF_RXRPC implementation
|
||||
*
|
||||
* Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
|
||||
* Written by David Howells (dhowells@redhat.com)
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version
|
||||
* 2 of the License, or (at your option) any later version.
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/net.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/skbuff.h>
|
||||
#include <linux/poll.h>
|
||||
#include <linux/proc_fs.h>
|
||||
#include <linux/key-type.h>
|
||||
#include <net/net_namespace.h>
|
||||
#include <net/sock.h>
|
||||
#include <net/af_rxrpc.h>
|
||||
#include "ar-internal.h"
|
||||
|
||||
MODULE_DESCRIPTION("RxRPC network protocol");
|
||||
MODULE_AUTHOR("Red Hat, Inc.");
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_ALIAS_NETPROTO(PF_RXRPC);
|
||||
|
||||
unsigned int rxrpc_debug; // = RXRPC_DEBUG_KPROTO;
|
||||
module_param_named(debug, rxrpc_debug, uint, S_IWUSR | S_IRUGO);
|
||||
MODULE_PARM_DESC(debug, "RxRPC debugging mask");
|
||||
|
||||
static int sysctl_rxrpc_max_qlen __read_mostly = 10;
|
||||
|
||||
static struct proto rxrpc_proto;
|
||||
static const struct proto_ops rxrpc_rpc_ops;
|
||||
|
||||
/* local epoch for detecting local-end reset */
|
||||
__be32 rxrpc_epoch;
|
||||
|
||||
/* current debugging ID */
|
||||
atomic_t rxrpc_debug_id;
|
||||
|
||||
/* count of skbs currently in use */
|
||||
atomic_t rxrpc_n_skbs;
|
||||
|
||||
struct workqueue_struct *rxrpc_workqueue;
|
||||
|
||||
static void rxrpc_sock_destructor(struct sock *);
|
||||
|
||||
/*
|
||||
* see if an RxRPC socket is currently writable
|
||||
*/
|
||||
static inline int rxrpc_writable(struct sock *sk)
|
||||
{
|
||||
return atomic_read(&sk->sk_wmem_alloc) < (size_t) sk->sk_sndbuf;
|
||||
}
|
||||
|
||||
/*
|
||||
* wait for write bufferage to become available
|
||||
*/
|
||||
static void rxrpc_write_space(struct sock *sk)
|
||||
{
|
||||
_enter("%p", sk);
|
||||
rcu_read_lock();
|
||||
if (rxrpc_writable(sk)) {
|
||||
struct socket_wq *wq = rcu_dereference(sk->sk_wq);
|
||||
|
||||
if (wq_has_sleeper(wq))
|
||||
wake_up_interruptible(&wq->wait);
|
||||
sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
/*
|
||||
* validate an RxRPC address
|
||||
*/
|
||||
static int rxrpc_validate_address(struct rxrpc_sock *rx,
|
||||
struct sockaddr_rxrpc *srx,
|
||||
int len)
|
||||
{
|
||||
if (len < sizeof(struct sockaddr_rxrpc))
|
||||
return -EINVAL;
|
||||
|
||||
if (srx->srx_family != AF_RXRPC)
|
||||
return -EAFNOSUPPORT;
|
||||
|
||||
if (srx->transport_type != SOCK_DGRAM)
|
||||
return -ESOCKTNOSUPPORT;
|
||||
|
||||
len -= offsetof(struct sockaddr_rxrpc, transport);
|
||||
if (srx->transport_len < sizeof(sa_family_t) ||
|
||||
srx->transport_len > len)
|
||||
return -EINVAL;
|
||||
|
||||
if (srx->transport.family != rx->proto)
|
||||
return -EAFNOSUPPORT;
|
||||
|
||||
switch (srx->transport.family) {
|
||||
case AF_INET:
|
||||
_debug("INET: %x @ %pI4",
|
||||
ntohs(srx->transport.sin.sin_port),
|
||||
&srx->transport.sin.sin_addr);
|
||||
if (srx->transport_len > 8)
|
||||
memset((void *)&srx->transport + 8, 0,
|
||||
srx->transport_len - 8);
|
||||
break;
|
||||
|
||||
case AF_INET6:
|
||||
default:
|
||||
return -EAFNOSUPPORT;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* bind a local address to an RxRPC socket
|
||||
*/
|
||||
static int rxrpc_bind(struct socket *sock, struct sockaddr *saddr, int len)
|
||||
{
|
||||
struct sockaddr_rxrpc *srx = (struct sockaddr_rxrpc *) saddr;
|
||||
struct sock *sk = sock->sk;
|
||||
struct rxrpc_local *local;
|
||||
struct rxrpc_sock *rx = rxrpc_sk(sk), *prx;
|
||||
__be16 service_id;
|
||||
int ret;
|
||||
|
||||
_enter("%p,%p,%d", rx, saddr, len);
|
||||
|
||||
ret = rxrpc_validate_address(rx, srx, len);
|
||||
if (ret < 0)
|
||||
goto error;
|
||||
|
||||
lock_sock(&rx->sk);
|
||||
|
||||
if (rx->sk.sk_state != RXRPC_UNCONNECTED) {
|
||||
ret = -EINVAL;
|
||||
goto error_unlock;
|
||||
}
|
||||
|
||||
memcpy(&rx->srx, srx, sizeof(rx->srx));
|
||||
|
||||
/* find a local transport endpoint if we don't have one already */
|
||||
local = rxrpc_lookup_local(&rx->srx);
|
||||
if (IS_ERR(local)) {
|
||||
ret = PTR_ERR(local);
|
||||
goto error_unlock;
|
||||
}
|
||||
|
||||
rx->local = local;
|
||||
if (srx->srx_service) {
|
||||
service_id = htons(srx->srx_service);
|
||||
write_lock_bh(&local->services_lock);
|
||||
list_for_each_entry(prx, &local->services, listen_link) {
|
||||
if (prx->service_id == service_id)
|
||||
goto service_in_use;
|
||||
}
|
||||
|
||||
rx->service_id = service_id;
|
||||
list_add_tail(&rx->listen_link, &local->services);
|
||||
write_unlock_bh(&local->services_lock);
|
||||
|
||||
rx->sk.sk_state = RXRPC_SERVER_BOUND;
|
||||
} else {
|
||||
rx->sk.sk_state = RXRPC_CLIENT_BOUND;
|
||||
}
|
||||
|
||||
release_sock(&rx->sk);
|
||||
_leave(" = 0");
|
||||
return 0;
|
||||
|
||||
service_in_use:
|
||||
ret = -EADDRINUSE;
|
||||
write_unlock_bh(&local->services_lock);
|
||||
error_unlock:
|
||||
release_sock(&rx->sk);
|
||||
error:
|
||||
_leave(" = %d", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* set the number of pending calls permitted on a listening socket
|
||||
*/
|
||||
static int rxrpc_listen(struct socket *sock, int backlog)
|
||||
{
|
||||
struct sock *sk = sock->sk;
|
||||
struct rxrpc_sock *rx = rxrpc_sk(sk);
|
||||
int ret;
|
||||
|
||||
_enter("%p,%d", rx, backlog);
|
||||
|
||||
lock_sock(&rx->sk);
|
||||
|
||||
switch (rx->sk.sk_state) {
|
||||
case RXRPC_UNCONNECTED:
|
||||
ret = -EADDRNOTAVAIL;
|
||||
break;
|
||||
case RXRPC_CLIENT_BOUND:
|
||||
case RXRPC_CLIENT_CONNECTED:
|
||||
default:
|
||||
ret = -EBUSY;
|
||||
break;
|
||||
case RXRPC_SERVER_BOUND:
|
||||
ASSERT(rx->local != NULL);
|
||||
sk->sk_max_ack_backlog = backlog;
|
||||
rx->sk.sk_state = RXRPC_SERVER_LISTENING;
|
||||
ret = 0;
|
||||
break;
|
||||
}
|
||||
|
||||
release_sock(&rx->sk);
|
||||
_leave(" = %d", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* find a transport by address
|
||||
*/
|
||||
static struct rxrpc_transport *rxrpc_name_to_transport(struct socket *sock,
|
||||
struct sockaddr *addr,
|
||||
int addr_len, int flags,
|
||||
gfp_t gfp)
|
||||
{
|
||||
struct sockaddr_rxrpc *srx = (struct sockaddr_rxrpc *) addr;
|
||||
struct rxrpc_transport *trans;
|
||||
struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
|
||||
struct rxrpc_peer *peer;
|
||||
|
||||
_enter("%p,%p,%d,%d", rx, addr, addr_len, flags);
|
||||
|
||||
ASSERT(rx->local != NULL);
|
||||
ASSERT(rx->sk.sk_state > RXRPC_UNCONNECTED);
|
||||
|
||||
if (rx->srx.transport_type != srx->transport_type)
|
||||
return ERR_PTR(-ESOCKTNOSUPPORT);
|
||||
if (rx->srx.transport.family != srx->transport.family)
|
||||
return ERR_PTR(-EAFNOSUPPORT);
|
||||
|
||||
/* find a remote transport endpoint from the local one */
|
||||
peer = rxrpc_get_peer(srx, gfp);
|
||||
if (IS_ERR(peer))
|
||||
return ERR_CAST(peer);
|
||||
|
||||
/* find a transport */
|
||||
trans = rxrpc_get_transport(rx->local, peer, gfp);
|
||||
rxrpc_put_peer(peer);
|
||||
_leave(" = %p", trans);
|
||||
return trans;
|
||||
}
|
||||
|
||||
/**
|
||||
* rxrpc_kernel_begin_call - Allow a kernel service to begin a call
|
||||
* @sock: The socket on which to make the call
|
||||
* @srx: The address of the peer to contact (defaults to socket setting)
|
||||
* @key: The security context to use (defaults to socket setting)
|
||||
* @user_call_ID: The ID to use
|
||||
*
|
||||
* Allow a kernel service to begin a call on the nominated socket. This just
|
||||
* sets up all the internal tracking structures and allocates connection and
|
||||
* call IDs as appropriate. The call to be used is returned.
|
||||
*
|
||||
* The default socket destination address and security may be overridden by
|
||||
* supplying @srx and @key.
|
||||
*/
|
||||
struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *sock,
|
||||
struct sockaddr_rxrpc *srx,
|
||||
struct key *key,
|
||||
unsigned long user_call_ID,
|
||||
gfp_t gfp)
|
||||
{
|
||||
struct rxrpc_conn_bundle *bundle;
|
||||
struct rxrpc_transport *trans;
|
||||
struct rxrpc_call *call;
|
||||
struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
|
||||
__be16 service_id;
|
||||
|
||||
_enter(",,%x,%lx", key_serial(key), user_call_ID);
|
||||
|
||||
lock_sock(&rx->sk);
|
||||
|
||||
if (srx) {
|
||||
trans = rxrpc_name_to_transport(sock, (struct sockaddr *) srx,
|
||||
sizeof(*srx), 0, gfp);
|
||||
if (IS_ERR(trans)) {
|
||||
call = ERR_CAST(trans);
|
||||
trans = NULL;
|
||||
goto out_notrans;
|
||||
}
|
||||
} else {
|
||||
trans = rx->trans;
|
||||
if (!trans) {
|
||||
call = ERR_PTR(-ENOTCONN);
|
||||
goto out_notrans;
|
||||
}
|
||||
atomic_inc(&trans->usage);
|
||||
}
|
||||
|
||||
service_id = rx->service_id;
|
||||
if (srx)
|
||||
service_id = htons(srx->srx_service);
|
||||
|
||||
if (!key)
|
||||
key = rx->key;
|
||||
if (key && !key->payload.data)
|
||||
key = NULL; /* a no-security key */
|
||||
|
||||
bundle = rxrpc_get_bundle(rx, trans, key, service_id, gfp);
|
||||
if (IS_ERR(bundle)) {
|
||||
call = ERR_CAST(bundle);
|
||||
goto out;
|
||||
}
|
||||
|
||||
call = rxrpc_get_client_call(rx, trans, bundle, user_call_ID, true,
|
||||
gfp);
|
||||
rxrpc_put_bundle(trans, bundle);
|
||||
out:
|
||||
rxrpc_put_transport(trans);
|
||||
out_notrans:
|
||||
release_sock(&rx->sk);
|
||||
_leave(" = %p", call);
|
||||
return call;
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(rxrpc_kernel_begin_call);
|
||||
|
||||
/**
|
||||
* rxrpc_kernel_end_call - Allow a kernel service to end a call it was using
|
||||
* @call: The call to end
|
||||
*
|
||||
* Allow a kernel service to end a call it was using. The call must be
|
||||
* complete before this is called (the call should be aborted if necessary).
|
||||
*/
|
||||
void rxrpc_kernel_end_call(struct rxrpc_call *call)
|
||||
{
|
||||
_enter("%d{%d}", call->debug_id, atomic_read(&call->usage));
|
||||
rxrpc_remove_user_ID(call->socket, call);
|
||||
rxrpc_put_call(call);
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(rxrpc_kernel_end_call);
|
||||
|
||||
/**
|
||||
* rxrpc_kernel_intercept_rx_messages - Intercept received RxRPC messages
|
||||
* @sock: The socket to intercept received messages on
|
||||
* @interceptor: The function to pass the messages to
|
||||
*
|
||||
* Allow a kernel service to intercept messages heading for the Rx queue on an
|
||||
* RxRPC socket. They get passed to the specified function instead.
|
||||
* @interceptor should free the socket buffers it is given. @interceptor is
|
||||
* called with the socket receive queue spinlock held and softirqs disabled -
|
||||
* this ensures that the messages will be delivered in the right order.
|
||||
*/
|
||||
void rxrpc_kernel_intercept_rx_messages(struct socket *sock,
|
||||
rxrpc_interceptor_t interceptor)
|
||||
{
|
||||
struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
|
||||
|
||||
_enter("");
|
||||
rx->interceptor = interceptor;
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(rxrpc_kernel_intercept_rx_messages);
|
||||
|
||||
/*
|
||||
* connect an RxRPC socket
|
||||
* - this just targets it at a specific destination; no actual connection
|
||||
* negotiation takes place
|
||||
*/
|
||||
static int rxrpc_connect(struct socket *sock, struct sockaddr *addr,
|
||||
int addr_len, int flags)
|
||||
{
|
||||
struct sockaddr_rxrpc *srx = (struct sockaddr_rxrpc *) addr;
|
||||
struct sock *sk = sock->sk;
|
||||
struct rxrpc_transport *trans;
|
||||
struct rxrpc_local *local;
|
||||
struct rxrpc_sock *rx = rxrpc_sk(sk);
|
||||
int ret;
|
||||
|
||||
_enter("%p,%p,%d,%d", rx, addr, addr_len, flags);
|
||||
|
||||
ret = rxrpc_validate_address(rx, srx, addr_len);
|
||||
if (ret < 0) {
|
||||
_leave(" = %d [bad addr]", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
lock_sock(&rx->sk);
|
||||
|
||||
switch (rx->sk.sk_state) {
|
||||
case RXRPC_UNCONNECTED:
|
||||
/* find a local transport endpoint if we don't have one already */
|
||||
ASSERTCMP(rx->local, ==, NULL);
|
||||
rx->srx.srx_family = AF_RXRPC;
|
||||
rx->srx.srx_service = 0;
|
||||
rx->srx.transport_type = srx->transport_type;
|
||||
rx->srx.transport_len = sizeof(sa_family_t);
|
||||
rx->srx.transport.family = srx->transport.family;
|
||||
local = rxrpc_lookup_local(&rx->srx);
|
||||
if (IS_ERR(local)) {
|
||||
release_sock(&rx->sk);
|
||||
return PTR_ERR(local);
|
||||
}
|
||||
rx->local = local;
|
||||
rx->sk.sk_state = RXRPC_CLIENT_BOUND;
|
||||
case RXRPC_CLIENT_BOUND:
|
||||
break;
|
||||
case RXRPC_CLIENT_CONNECTED:
|
||||
release_sock(&rx->sk);
|
||||
return -EISCONN;
|
||||
default:
|
||||
release_sock(&rx->sk);
|
||||
return -EBUSY; /* server sockets can't connect as well */
|
||||
}
|
||||
|
||||
trans = rxrpc_name_to_transport(sock, addr, addr_len, flags,
|
||||
GFP_KERNEL);
|
||||
if (IS_ERR(trans)) {
|
||||
release_sock(&rx->sk);
|
||||
_leave(" = %ld", PTR_ERR(trans));
|
||||
return PTR_ERR(trans);
|
||||
}
|
||||
|
||||
rx->trans = trans;
|
||||
rx->service_id = htons(srx->srx_service);
|
||||
rx->sk.sk_state = RXRPC_CLIENT_CONNECTED;
|
||||
|
||||
release_sock(&rx->sk);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* send a message through an RxRPC socket
|
||||
* - in a client this does a number of things:
|
||||
* - finds/sets up a connection for the security specified (if any)
|
||||
* - initiates a call (ID in control data)
|
||||
* - ends the request phase of a call (if MSG_MORE is not set)
|
||||
* - sends a call data packet
|
||||
* - may send an abort (abort code in control data)
|
||||
*/
|
||||
static int rxrpc_sendmsg(struct kiocb *iocb, struct socket *sock,
|
||||
struct msghdr *m, size_t len)
|
||||
{
|
||||
struct rxrpc_transport *trans;
|
||||
struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
|
||||
int ret;
|
||||
|
||||
_enter(",{%d},,%zu", rx->sk.sk_state, len);
|
||||
|
||||
if (m->msg_flags & MSG_OOB)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (m->msg_name) {
|
||||
ret = rxrpc_validate_address(rx, m->msg_name, m->msg_namelen);
|
||||
if (ret < 0) {
|
||||
_leave(" = %d [bad addr]", ret);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
trans = NULL;
|
||||
lock_sock(&rx->sk);
|
||||
|
||||
if (m->msg_name) {
|
||||
ret = -EISCONN;
|
||||
trans = rxrpc_name_to_transport(sock, m->msg_name,
|
||||
m->msg_namelen, 0, GFP_KERNEL);
|
||||
if (IS_ERR(trans)) {
|
||||
ret = PTR_ERR(trans);
|
||||
trans = NULL;
|
||||
goto out;
|
||||
}
|
||||
} else {
|
||||
trans = rx->trans;
|
||||
if (trans)
|
||||
atomic_inc(&trans->usage);
|
||||
}
|
||||
|
||||
switch (rx->sk.sk_state) {
|
||||
case RXRPC_SERVER_LISTENING:
|
||||
if (!m->msg_name) {
|
||||
ret = rxrpc_server_sendmsg(iocb, rx, m, len);
|
||||
break;
|
||||
}
|
||||
case RXRPC_SERVER_BOUND:
|
||||
case RXRPC_CLIENT_BOUND:
|
||||
if (!m->msg_name) {
|
||||
ret = -ENOTCONN;
|
||||
break;
|
||||
}
|
||||
case RXRPC_CLIENT_CONNECTED:
|
||||
ret = rxrpc_client_sendmsg(iocb, rx, trans, m, len);
|
||||
break;
|
||||
default:
|
||||
ret = -ENOTCONN;
|
||||
break;
|
||||
}
|
||||
|
||||
out:
|
||||
release_sock(&rx->sk);
|
||||
if (trans)
|
||||
rxrpc_put_transport(trans);
|
||||
_leave(" = %d", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* set RxRPC socket options
|
||||
*/
|
||||
static int rxrpc_setsockopt(struct socket *sock, int level, int optname,
|
||||
char __user *optval, unsigned int optlen)
|
||||
{
|
||||
struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
|
||||
unsigned int min_sec_level;
|
||||
int ret;
|
||||
|
||||
_enter(",%d,%d,,%d", level, optname, optlen);
|
||||
|
||||
lock_sock(&rx->sk);
|
||||
ret = -EOPNOTSUPP;
|
||||
|
||||
if (level == SOL_RXRPC) {
|
||||
switch (optname) {
|
||||
case RXRPC_EXCLUSIVE_CONNECTION:
|
||||
ret = -EINVAL;
|
||||
if (optlen != 0)
|
||||
goto error;
|
||||
ret = -EISCONN;
|
||||
if (rx->sk.sk_state != RXRPC_UNCONNECTED)
|
||||
goto error;
|
||||
set_bit(RXRPC_SOCK_EXCLUSIVE_CONN, &rx->flags);
|
||||
goto success;
|
||||
|
||||
case RXRPC_SECURITY_KEY:
|
||||
ret = -EINVAL;
|
||||
if (rx->key)
|
||||
goto error;
|
||||
ret = -EISCONN;
|
||||
if (rx->sk.sk_state != RXRPC_UNCONNECTED)
|
||||
goto error;
|
||||
ret = rxrpc_request_key(rx, optval, optlen);
|
||||
goto error;
|
||||
|
||||
case RXRPC_SECURITY_KEYRING:
|
||||
ret = -EINVAL;
|
||||
if (rx->key)
|
||||
goto error;
|
||||
ret = -EISCONN;
|
||||
if (rx->sk.sk_state != RXRPC_UNCONNECTED)
|
||||
goto error;
|
||||
ret = rxrpc_server_keyring(rx, optval, optlen);
|
||||
goto error;
|
||||
|
||||
case RXRPC_MIN_SECURITY_LEVEL:
|
||||
ret = -EINVAL;
|
||||
if (optlen != sizeof(unsigned int))
|
||||
goto error;
|
||||
ret = -EISCONN;
|
||||
if (rx->sk.sk_state != RXRPC_UNCONNECTED)
|
||||
goto error;
|
||||
ret = get_user(min_sec_level,
|
||||
(unsigned int __user *) optval);
|
||||
if (ret < 0)
|
||||
goto error;
|
||||
ret = -EINVAL;
|
||||
if (min_sec_level > RXRPC_SECURITY_MAX)
|
||||
goto error;
|
||||
rx->min_sec_level = min_sec_level;
|
||||
goto success;
|
||||
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
success:
|
||||
ret = 0;
|
||||
error:
|
||||
release_sock(&rx->sk);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* permit an RxRPC socket to be polled
|
||||
*/
|
||||
static unsigned int rxrpc_poll(struct file *file, struct socket *sock,
|
||||
poll_table *wait)
|
||||
{
|
||||
unsigned int mask;
|
||||
struct sock *sk = sock->sk;
|
||||
|
||||
sock_poll_wait(file, sk_sleep(sk), wait);
|
||||
mask = 0;
|
||||
|
||||
/* the socket is readable if there are any messages waiting on the Rx
|
||||
* queue */
|
||||
if (!skb_queue_empty(&sk->sk_receive_queue))
|
||||
mask |= POLLIN | POLLRDNORM;
|
||||
|
||||
/* the socket is writable if there is space to add new data to the
|
||||
* socket; there is no guarantee that any particular call in progress
|
||||
* on the socket may have space in the Tx ACK window */
|
||||
if (rxrpc_writable(sk))
|
||||
mask |= POLLOUT | POLLWRNORM;
|
||||
|
||||
return mask;
|
||||
}
|
||||
|
||||
/*
|
||||
* create an RxRPC socket
|
||||
*/
|
||||
static int rxrpc_create(struct net *net, struct socket *sock, int protocol,
|
||||
int kern)
|
||||
{
|
||||
struct rxrpc_sock *rx;
|
||||
struct sock *sk;
|
||||
|
||||
_enter("%p,%d", sock, protocol);
|
||||
|
||||
if (!net_eq(net, &init_net))
|
||||
return -EAFNOSUPPORT;
|
||||
|
||||
/* we support transport protocol UDP only */
|
||||
if (protocol != PF_INET)
|
||||
return -EPROTONOSUPPORT;
|
||||
|
||||
if (sock->type != SOCK_DGRAM)
|
||||
return -ESOCKTNOSUPPORT;
|
||||
|
||||
sock->ops = &rxrpc_rpc_ops;
|
||||
sock->state = SS_UNCONNECTED;
|
||||
|
||||
sk = sk_alloc(net, PF_RXRPC, GFP_KERNEL, &rxrpc_proto);
|
||||
if (!sk)
|
||||
return -ENOMEM;
|
||||
|
||||
sock_init_data(sock, sk);
|
||||
sk->sk_state = RXRPC_UNCONNECTED;
|
||||
sk->sk_write_space = rxrpc_write_space;
|
||||
sk->sk_max_ack_backlog = sysctl_rxrpc_max_qlen;
|
||||
sk->sk_destruct = rxrpc_sock_destructor;
|
||||
|
||||
rx = rxrpc_sk(sk);
|
||||
rx->proto = protocol;
|
||||
rx->calls = RB_ROOT;
|
||||
|
||||
INIT_LIST_HEAD(&rx->listen_link);
|
||||
INIT_LIST_HEAD(&rx->secureq);
|
||||
INIT_LIST_HEAD(&rx->acceptq);
|
||||
rwlock_init(&rx->call_lock);
|
||||
memset(&rx->srx, 0, sizeof(rx->srx));
|
||||
|
||||
_leave(" = 0 [%p]", rx);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* RxRPC socket destructor
|
||||
*/
|
||||
static void rxrpc_sock_destructor(struct sock *sk)
|
||||
{
|
||||
_enter("%p", sk);
|
||||
|
||||
rxrpc_purge_queue(&sk->sk_receive_queue);
|
||||
|
||||
WARN_ON(atomic_read(&sk->sk_wmem_alloc));
|
||||
WARN_ON(!sk_unhashed(sk));
|
||||
WARN_ON(sk->sk_socket);
|
||||
|
||||
if (!sock_flag(sk, SOCK_DEAD)) {
|
||||
printk("Attempt to release alive rxrpc socket: %p\n", sk);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* release an RxRPC socket
|
||||
*/
|
||||
static int rxrpc_release_sock(struct sock *sk)
|
||||
{
|
||||
struct rxrpc_sock *rx = rxrpc_sk(sk);
|
||||
|
||||
_enter("%p{%d,%d}", sk, sk->sk_state, atomic_read(&sk->sk_refcnt));
|
||||
|
||||
/* declare the socket closed for business */
|
||||
sock_orphan(sk);
|
||||
sk->sk_shutdown = SHUTDOWN_MASK;
|
||||
|
||||
spin_lock_bh(&sk->sk_receive_queue.lock);
|
||||
sk->sk_state = RXRPC_CLOSE;
|
||||
spin_unlock_bh(&sk->sk_receive_queue.lock);
|
||||
|
||||
ASSERTCMP(rx->listen_link.next, !=, LIST_POISON1);
|
||||
|
||||
if (!list_empty(&rx->listen_link)) {
|
||||
write_lock_bh(&rx->local->services_lock);
|
||||
list_del(&rx->listen_link);
|
||||
write_unlock_bh(&rx->local->services_lock);
|
||||
}
|
||||
|
||||
/* try to flush out this socket */
|
||||
rxrpc_release_calls_on_socket(rx);
|
||||
flush_workqueue(rxrpc_workqueue);
|
||||
rxrpc_purge_queue(&sk->sk_receive_queue);
|
||||
|
||||
if (rx->conn) {
|
||||
rxrpc_put_connection(rx->conn);
|
||||
rx->conn = NULL;
|
||||
}
|
||||
|
||||
if (rx->bundle) {
|
||||
rxrpc_put_bundle(rx->trans, rx->bundle);
|
||||
rx->bundle = NULL;
|
||||
}
|
||||
if (rx->trans) {
|
||||
rxrpc_put_transport(rx->trans);
|
||||
rx->trans = NULL;
|
||||
}
|
||||
if (rx->local) {
|
||||
rxrpc_put_local(rx->local);
|
||||
rx->local = NULL;
|
||||
}
|
||||
|
||||
key_put(rx->key);
|
||||
rx->key = NULL;
|
||||
key_put(rx->securities);
|
||||
rx->securities = NULL;
|
||||
sock_put(sk);
|
||||
|
||||
_leave(" = 0");
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* release an RxRPC BSD socket on close() or equivalent
|
||||
*/
|
||||
static int rxrpc_release(struct socket *sock)
|
||||
{
|
||||
struct sock *sk = sock->sk;
|
||||
|
||||
_enter("%p{%p}", sock, sk);
|
||||
|
||||
if (!sk)
|
||||
return 0;
|
||||
|
||||
sock->sk = NULL;
|
||||
|
||||
return rxrpc_release_sock(sk);
|
||||
}
|
||||
|
||||
/*
|
||||
* RxRPC network protocol
|
||||
*/
|
||||
static const struct proto_ops rxrpc_rpc_ops = {
|
||||
.family = PF_UNIX,
|
||||
.owner = THIS_MODULE,
|
||||
.release = rxrpc_release,
|
||||
.bind = rxrpc_bind,
|
||||
.connect = rxrpc_connect,
|
||||
.socketpair = sock_no_socketpair,
|
||||
.accept = sock_no_accept,
|
||||
.getname = sock_no_getname,
|
||||
.poll = rxrpc_poll,
|
||||
.ioctl = sock_no_ioctl,
|
||||
.listen = rxrpc_listen,
|
||||
.shutdown = sock_no_shutdown,
|
||||
.setsockopt = rxrpc_setsockopt,
|
||||
.getsockopt = sock_no_getsockopt,
|
||||
.sendmsg = rxrpc_sendmsg,
|
||||
.recvmsg = rxrpc_recvmsg,
|
||||
.mmap = sock_no_mmap,
|
||||
.sendpage = sock_no_sendpage,
|
||||
};
|
||||
|
||||
static struct proto rxrpc_proto = {
|
||||
.name = "RXRPC",
|
||||
.owner = THIS_MODULE,
|
||||
.obj_size = sizeof(struct rxrpc_sock),
|
||||
.max_header = sizeof(struct rxrpc_header),
|
||||
};
|
||||
|
||||
static const struct net_proto_family rxrpc_family_ops = {
|
||||
.family = PF_RXRPC,
|
||||
.create = rxrpc_create,
|
||||
.owner = THIS_MODULE,
|
||||
};
|
||||
|
||||
/*
|
||||
* initialise and register the RxRPC protocol
|
||||
*/
|
||||
static int __init af_rxrpc_init(void)
|
||||
{
|
||||
int ret = -1;
|
||||
|
||||
BUILD_BUG_ON(sizeof(struct rxrpc_skb_priv) > FIELD_SIZEOF(struct sk_buff, cb));
|
||||
|
||||
rxrpc_epoch = htonl(get_seconds());
|
||||
|
||||
ret = -ENOMEM;
|
||||
rxrpc_call_jar = kmem_cache_create(
|
||||
"rxrpc_call_jar", sizeof(struct rxrpc_call), 0,
|
||||
SLAB_HWCACHE_ALIGN, NULL);
|
||||
if (!rxrpc_call_jar) {
|
||||
printk(KERN_NOTICE "RxRPC: Failed to allocate call jar\n");
|
||||
goto error_call_jar;
|
||||
}
|
||||
|
||||
rxrpc_workqueue = alloc_workqueue("krxrpcd", 0, 1);
|
||||
if (!rxrpc_workqueue) {
|
||||
printk(KERN_NOTICE "RxRPC: Failed to allocate work queue\n");
|
||||
goto error_work_queue;
|
||||
}
|
||||
|
||||
ret = proto_register(&rxrpc_proto, 1);
|
||||
if (ret < 0) {
|
||||
printk(KERN_CRIT "RxRPC: Cannot register protocol\n");
|
||||
goto error_proto;
|
||||
}
|
||||
|
||||
ret = sock_register(&rxrpc_family_ops);
|
||||
if (ret < 0) {
|
||||
printk(KERN_CRIT "RxRPC: Cannot register socket family\n");
|
||||
goto error_sock;
|
||||
}
|
||||
|
||||
ret = register_key_type(&key_type_rxrpc);
|
||||
if (ret < 0) {
|
||||
printk(KERN_CRIT "RxRPC: Cannot register client key type\n");
|
||||
goto error_key_type;
|
||||
}
|
||||
|
||||
ret = register_key_type(&key_type_rxrpc_s);
|
||||
if (ret < 0) {
|
||||
printk(KERN_CRIT "RxRPC: Cannot register server key type\n");
|
||||
goto error_key_type_s;
|
||||
}
|
||||
|
||||
ret = rxrpc_sysctl_init();
|
||||
if (ret < 0) {
|
||||
printk(KERN_CRIT "RxRPC: Cannot register sysctls\n");
|
||||
goto error_sysctls;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PROC_FS
|
||||
proc_create("rxrpc_calls", 0, init_net.proc_net, &rxrpc_call_seq_fops);
|
||||
proc_create("rxrpc_conns", 0, init_net.proc_net,
|
||||
&rxrpc_connection_seq_fops);
|
||||
#endif
|
||||
return 0;
|
||||
|
||||
error_sysctls:
|
||||
unregister_key_type(&key_type_rxrpc_s);
|
||||
error_key_type_s:
|
||||
unregister_key_type(&key_type_rxrpc);
|
||||
error_key_type:
|
||||
sock_unregister(PF_RXRPC);
|
||||
error_sock:
|
||||
proto_unregister(&rxrpc_proto);
|
||||
error_proto:
|
||||
destroy_workqueue(rxrpc_workqueue);
|
||||
error_work_queue:
|
||||
kmem_cache_destroy(rxrpc_call_jar);
|
||||
error_call_jar:
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* unregister the RxRPC protocol
|
||||
*/
|
||||
static void __exit af_rxrpc_exit(void)
|
||||
{
|
||||
_enter("");
|
||||
rxrpc_sysctl_exit();
|
||||
unregister_key_type(&key_type_rxrpc_s);
|
||||
unregister_key_type(&key_type_rxrpc);
|
||||
sock_unregister(PF_RXRPC);
|
||||
proto_unregister(&rxrpc_proto);
|
||||
rxrpc_destroy_all_calls();
|
||||
rxrpc_destroy_all_connections();
|
||||
rxrpc_destroy_all_transports();
|
||||
rxrpc_destroy_all_peers();
|
||||
rxrpc_destroy_all_locals();
|
||||
|
||||
ASSERTCMP(atomic_read(&rxrpc_n_skbs), ==, 0);
|
||||
|
||||
_debug("flush scheduled work");
|
||||
flush_workqueue(rxrpc_workqueue);
|
||||
remove_proc_entry("rxrpc_conns", init_net.proc_net);
|
||||
remove_proc_entry("rxrpc_calls", init_net.proc_net);
|
||||
destroy_workqueue(rxrpc_workqueue);
|
||||
kmem_cache_destroy(rxrpc_call_jar);
|
||||
_leave("");
|
||||
}
|
||||
|
||||
module_init(af_rxrpc_init);
|
||||
module_exit(af_rxrpc_exit);
|
510
net/rxrpc/ar-accept.c
Normal file
510
net/rxrpc/ar-accept.c
Normal file
|
@ -0,0 +1,510 @@
|
|||
/* incoming call handling
|
||||
*
|
||||
* Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
|
||||
* Written by David Howells (dhowells@redhat.com)
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version
|
||||
* 2 of the License, or (at your option) any later version.
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/net.h>
|
||||
#include <linux/skbuff.h>
|
||||
#include <linux/errqueue.h>
|
||||
#include <linux/udp.h>
|
||||
#include <linux/in.h>
|
||||
#include <linux/in6.h>
|
||||
#include <linux/icmp.h>
|
||||
#include <linux/gfp.h>
|
||||
#include <net/sock.h>
|
||||
#include <net/af_rxrpc.h>
|
||||
#include <net/ip.h>
|
||||
#include "ar-internal.h"
|
||||
|
||||
/*
|
||||
* generate a connection-level abort
|
||||
*/
|
||||
static int rxrpc_busy(struct rxrpc_local *local, struct sockaddr_rxrpc *srx,
|
||||
struct rxrpc_header *hdr)
|
||||
{
|
||||
struct msghdr msg;
|
||||
struct kvec iov[1];
|
||||
size_t len;
|
||||
int ret;
|
||||
|
||||
_enter("%d,,", local->debug_id);
|
||||
|
||||
msg.msg_name = &srx->transport.sin;
|
||||
msg.msg_namelen = sizeof(srx->transport.sin);
|
||||
msg.msg_control = NULL;
|
||||
msg.msg_controllen = 0;
|
||||
msg.msg_flags = 0;
|
||||
|
||||
hdr->seq = 0;
|
||||
hdr->type = RXRPC_PACKET_TYPE_BUSY;
|
||||
hdr->flags = 0;
|
||||
hdr->userStatus = 0;
|
||||
hdr->_rsvd = 0;
|
||||
|
||||
iov[0].iov_base = hdr;
|
||||
iov[0].iov_len = sizeof(*hdr);
|
||||
|
||||
len = iov[0].iov_len;
|
||||
|
||||
hdr->serial = htonl(1);
|
||||
_proto("Tx BUSY %%%u", ntohl(hdr->serial));
|
||||
|
||||
ret = kernel_sendmsg(local->socket, &msg, iov, 1, len);
|
||||
if (ret < 0) {
|
||||
_leave(" = -EAGAIN [sendmsg failed: %d]", ret);
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
||||
_leave(" = 0");
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* accept an incoming call that needs peer, transport and/or connection setting
|
||||
* up
|
||||
*/
|
||||
static int rxrpc_accept_incoming_call(struct rxrpc_local *local,
|
||||
struct rxrpc_sock *rx,
|
||||
struct sk_buff *skb,
|
||||
struct sockaddr_rxrpc *srx)
|
||||
{
|
||||
struct rxrpc_connection *conn;
|
||||
struct rxrpc_transport *trans;
|
||||
struct rxrpc_skb_priv *sp, *nsp;
|
||||
struct rxrpc_peer *peer;
|
||||
struct rxrpc_call *call;
|
||||
struct sk_buff *notification;
|
||||
int ret;
|
||||
|
||||
_enter("");
|
||||
|
||||
sp = rxrpc_skb(skb);
|
||||
|
||||
/* get a notification message to send to the server app */
|
||||
notification = alloc_skb(0, GFP_NOFS);
|
||||
if (!notification) {
|
||||
_debug("no memory");
|
||||
ret = -ENOMEM;
|
||||
goto error_nofree;
|
||||
}
|
||||
rxrpc_new_skb(notification);
|
||||
notification->mark = RXRPC_SKB_MARK_NEW_CALL;
|
||||
|
||||
peer = rxrpc_get_peer(srx, GFP_NOIO);
|
||||
if (IS_ERR(peer)) {
|
||||
_debug("no peer");
|
||||
ret = -EBUSY;
|
||||
goto error;
|
||||
}
|
||||
|
||||
trans = rxrpc_get_transport(local, peer, GFP_NOIO);
|
||||
rxrpc_put_peer(peer);
|
||||
if (IS_ERR(trans)) {
|
||||
_debug("no trans");
|
||||
ret = -EBUSY;
|
||||
goto error;
|
||||
}
|
||||
|
||||
conn = rxrpc_incoming_connection(trans, &sp->hdr, GFP_NOIO);
|
||||
rxrpc_put_transport(trans);
|
||||
if (IS_ERR(conn)) {
|
||||
_debug("no conn");
|
||||
ret = PTR_ERR(conn);
|
||||
goto error;
|
||||
}
|
||||
|
||||
call = rxrpc_incoming_call(rx, conn, &sp->hdr, GFP_NOIO);
|
||||
rxrpc_put_connection(conn);
|
||||
if (IS_ERR(call)) {
|
||||
_debug("no call");
|
||||
ret = PTR_ERR(call);
|
||||
goto error;
|
||||
}
|
||||
|
||||
/* attach the call to the socket */
|
||||
read_lock_bh(&local->services_lock);
|
||||
if (rx->sk.sk_state == RXRPC_CLOSE)
|
||||
goto invalid_service;
|
||||
|
||||
write_lock(&rx->call_lock);
|
||||
if (!test_and_set_bit(RXRPC_CALL_INIT_ACCEPT, &call->flags)) {
|
||||
rxrpc_get_call(call);
|
||||
|
||||
spin_lock(&call->conn->state_lock);
|
||||
if (sp->hdr.securityIndex > 0 &&
|
||||
call->conn->state == RXRPC_CONN_SERVER_UNSECURED) {
|
||||
_debug("await conn sec");
|
||||
list_add_tail(&call->accept_link, &rx->secureq);
|
||||
call->conn->state = RXRPC_CONN_SERVER_CHALLENGING;
|
||||
atomic_inc(&call->conn->usage);
|
||||
set_bit(RXRPC_CONN_CHALLENGE, &call->conn->events);
|
||||
rxrpc_queue_conn(call->conn);
|
||||
} else {
|
||||
_debug("conn ready");
|
||||
call->state = RXRPC_CALL_SERVER_ACCEPTING;
|
||||
list_add_tail(&call->accept_link, &rx->acceptq);
|
||||
rxrpc_get_call(call);
|
||||
nsp = rxrpc_skb(notification);
|
||||
nsp->call = call;
|
||||
|
||||
ASSERTCMP(atomic_read(&call->usage), >=, 3);
|
||||
|
||||
_debug("notify");
|
||||
spin_lock(&call->lock);
|
||||
ret = rxrpc_queue_rcv_skb(call, notification, true,
|
||||
false);
|
||||
spin_unlock(&call->lock);
|
||||
notification = NULL;
|
||||
BUG_ON(ret < 0);
|
||||
}
|
||||
spin_unlock(&call->conn->state_lock);
|
||||
|
||||
_debug("queued");
|
||||
}
|
||||
write_unlock(&rx->call_lock);
|
||||
|
||||
_debug("process");
|
||||
rxrpc_fast_process_packet(call, skb);
|
||||
|
||||
_debug("done");
|
||||
read_unlock_bh(&local->services_lock);
|
||||
rxrpc_free_skb(notification);
|
||||
rxrpc_put_call(call);
|
||||
_leave(" = 0");
|
||||
return 0;
|
||||
|
||||
invalid_service:
|
||||
_debug("invalid");
|
||||
read_unlock_bh(&local->services_lock);
|
||||
|
||||
read_lock_bh(&call->state_lock);
|
||||
if (!test_bit(RXRPC_CALL_RELEASE, &call->flags) &&
|
||||
!test_and_set_bit(RXRPC_CALL_RELEASE, &call->events)) {
|
||||
rxrpc_get_call(call);
|
||||
rxrpc_queue_call(call);
|
||||
}
|
||||
read_unlock_bh(&call->state_lock);
|
||||
rxrpc_put_call(call);
|
||||
ret = -ECONNREFUSED;
|
||||
error:
|
||||
rxrpc_free_skb(notification);
|
||||
error_nofree:
|
||||
_leave(" = %d", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* accept incoming calls that need peer, transport and/or connection setting up
|
||||
* - the packets we get are all incoming client DATA packets that have seq == 1
|
||||
*/
|
||||
void rxrpc_accept_incoming_calls(struct work_struct *work)
|
||||
{
|
||||
struct rxrpc_local *local =
|
||||
container_of(work, struct rxrpc_local, acceptor);
|
||||
struct rxrpc_skb_priv *sp;
|
||||
struct sockaddr_rxrpc srx;
|
||||
struct rxrpc_sock *rx;
|
||||
struct sk_buff *skb;
|
||||
__be16 service_id;
|
||||
int ret;
|
||||
|
||||
_enter("%d", local->debug_id);
|
||||
|
||||
read_lock_bh(&rxrpc_local_lock);
|
||||
if (atomic_read(&local->usage) > 0)
|
||||
rxrpc_get_local(local);
|
||||
else
|
||||
local = NULL;
|
||||
read_unlock_bh(&rxrpc_local_lock);
|
||||
if (!local) {
|
||||
_leave(" [local dead]");
|
||||
return;
|
||||
}
|
||||
|
||||
process_next_packet:
|
||||
skb = skb_dequeue(&local->accept_queue);
|
||||
if (!skb) {
|
||||
rxrpc_put_local(local);
|
||||
_leave("\n");
|
||||
return;
|
||||
}
|
||||
|
||||
_net("incoming call skb %p", skb);
|
||||
|
||||
sp = rxrpc_skb(skb);
|
||||
|
||||
/* determine the remote address */
|
||||
memset(&srx, 0, sizeof(srx));
|
||||
srx.srx_family = AF_RXRPC;
|
||||
srx.transport.family = local->srx.transport.family;
|
||||
srx.transport_type = local->srx.transport_type;
|
||||
switch (srx.transport.family) {
|
||||
case AF_INET:
|
||||
srx.transport_len = sizeof(struct sockaddr_in);
|
||||
srx.transport.sin.sin_port = udp_hdr(skb)->source;
|
||||
srx.transport.sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
|
||||
break;
|
||||
default:
|
||||
goto busy;
|
||||
}
|
||||
|
||||
/* get the socket providing the service */
|
||||
service_id = sp->hdr.serviceId;
|
||||
read_lock_bh(&local->services_lock);
|
||||
list_for_each_entry(rx, &local->services, listen_link) {
|
||||
if (rx->service_id == service_id &&
|
||||
rx->sk.sk_state != RXRPC_CLOSE)
|
||||
goto found_service;
|
||||
}
|
||||
read_unlock_bh(&local->services_lock);
|
||||
goto invalid_service;
|
||||
|
||||
found_service:
|
||||
_debug("found service %hd", ntohs(rx->service_id));
|
||||
if (sk_acceptq_is_full(&rx->sk))
|
||||
goto backlog_full;
|
||||
sk_acceptq_added(&rx->sk);
|
||||
sock_hold(&rx->sk);
|
||||
read_unlock_bh(&local->services_lock);
|
||||
|
||||
ret = rxrpc_accept_incoming_call(local, rx, skb, &srx);
|
||||
if (ret < 0)
|
||||
sk_acceptq_removed(&rx->sk);
|
||||
sock_put(&rx->sk);
|
||||
switch (ret) {
|
||||
case -ECONNRESET: /* old calls are ignored */
|
||||
case -ECONNABORTED: /* aborted calls are reaborted or ignored */
|
||||
case 0:
|
||||
goto process_next_packet;
|
||||
case -ECONNREFUSED:
|
||||
goto invalid_service;
|
||||
case -EBUSY:
|
||||
goto busy;
|
||||
case -EKEYREJECTED:
|
||||
goto security_mismatch;
|
||||
default:
|
||||
BUG();
|
||||
}
|
||||
|
||||
backlog_full:
|
||||
read_unlock_bh(&local->services_lock);
|
||||
busy:
|
||||
rxrpc_busy(local, &srx, &sp->hdr);
|
||||
rxrpc_free_skb(skb);
|
||||
goto process_next_packet;
|
||||
|
||||
invalid_service:
|
||||
skb->priority = RX_INVALID_OPERATION;
|
||||
rxrpc_reject_packet(local, skb);
|
||||
goto process_next_packet;
|
||||
|
||||
/* can't change connection security type mid-flow */
|
||||
security_mismatch:
|
||||
skb->priority = RX_PROTOCOL_ERROR;
|
||||
rxrpc_reject_packet(local, skb);
|
||||
goto process_next_packet;
|
||||
}
|
||||
|
||||
/*
|
||||
* handle acceptance of a call by userspace
|
||||
* - assign the user call ID to the call at the front of the queue
|
||||
*/
|
||||
struct rxrpc_call *rxrpc_accept_call(struct rxrpc_sock *rx,
|
||||
unsigned long user_call_ID)
|
||||
{
|
||||
struct rxrpc_call *call;
|
||||
struct rb_node *parent, **pp;
|
||||
int ret;
|
||||
|
||||
_enter(",%lx", user_call_ID);
|
||||
|
||||
ASSERT(!irqs_disabled());
|
||||
|
||||
write_lock(&rx->call_lock);
|
||||
|
||||
ret = -ENODATA;
|
||||
if (list_empty(&rx->acceptq))
|
||||
goto out;
|
||||
|
||||
/* check the user ID isn't already in use */
|
||||
ret = -EBADSLT;
|
||||
pp = &rx->calls.rb_node;
|
||||
parent = NULL;
|
||||
while (*pp) {
|
||||
parent = *pp;
|
||||
call = rb_entry(parent, struct rxrpc_call, sock_node);
|
||||
|
||||
if (user_call_ID < call->user_call_ID)
|
||||
pp = &(*pp)->rb_left;
|
||||
else if (user_call_ID > call->user_call_ID)
|
||||
pp = &(*pp)->rb_right;
|
||||
else
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* dequeue the first call and check it's still valid */
|
||||
call = list_entry(rx->acceptq.next, struct rxrpc_call, accept_link);
|
||||
list_del_init(&call->accept_link);
|
||||
sk_acceptq_removed(&rx->sk);
|
||||
|
||||
write_lock_bh(&call->state_lock);
|
||||
switch (call->state) {
|
||||
case RXRPC_CALL_SERVER_ACCEPTING:
|
||||
call->state = RXRPC_CALL_SERVER_RECV_REQUEST;
|
||||
break;
|
||||
case RXRPC_CALL_REMOTELY_ABORTED:
|
||||
case RXRPC_CALL_LOCALLY_ABORTED:
|
||||
ret = -ECONNABORTED;
|
||||
goto out_release;
|
||||
case RXRPC_CALL_NETWORK_ERROR:
|
||||
ret = call->conn->error;
|
||||
goto out_release;
|
||||
case RXRPC_CALL_DEAD:
|
||||
ret = -ETIME;
|
||||
goto out_discard;
|
||||
default:
|
||||
BUG();
|
||||
}
|
||||
|
||||
/* formalise the acceptance */
|
||||
call->user_call_ID = user_call_ID;
|
||||
rb_link_node(&call->sock_node, parent, pp);
|
||||
rb_insert_color(&call->sock_node, &rx->calls);
|
||||
if (test_and_set_bit(RXRPC_CALL_HAS_USERID, &call->flags))
|
||||
BUG();
|
||||
if (test_and_set_bit(RXRPC_CALL_ACCEPTED, &call->events))
|
||||
BUG();
|
||||
rxrpc_queue_call(call);
|
||||
|
||||
rxrpc_get_call(call);
|
||||
write_unlock_bh(&call->state_lock);
|
||||
write_unlock(&rx->call_lock);
|
||||
_leave(" = %p{%d}", call, call->debug_id);
|
||||
return call;
|
||||
|
||||
/* if the call is already dying or dead, then we leave the socket's ref
|
||||
* on it to be released by rxrpc_dead_call_expired() as induced by
|
||||
* rxrpc_release_call() */
|
||||
out_release:
|
||||
_debug("release %p", call);
|
||||
if (!test_bit(RXRPC_CALL_RELEASED, &call->flags) &&
|
||||
!test_and_set_bit(RXRPC_CALL_RELEASE, &call->events))
|
||||
rxrpc_queue_call(call);
|
||||
out_discard:
|
||||
write_unlock_bh(&call->state_lock);
|
||||
_debug("discard %p", call);
|
||||
out:
|
||||
write_unlock(&rx->call_lock);
|
||||
_leave(" = %d", ret);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
/*
|
||||
* handle rejectance of a call by userspace
|
||||
* - reject the call at the front of the queue
|
||||
*/
|
||||
int rxrpc_reject_call(struct rxrpc_sock *rx)
|
||||
{
|
||||
struct rxrpc_call *call;
|
||||
int ret;
|
||||
|
||||
_enter("");
|
||||
|
||||
ASSERT(!irqs_disabled());
|
||||
|
||||
write_lock(&rx->call_lock);
|
||||
|
||||
ret = -ENODATA;
|
||||
if (list_empty(&rx->acceptq))
|
||||
goto out;
|
||||
|
||||
/* dequeue the first call and check it's still valid */
|
||||
call = list_entry(rx->acceptq.next, struct rxrpc_call, accept_link);
|
||||
list_del_init(&call->accept_link);
|
||||
sk_acceptq_removed(&rx->sk);
|
||||
|
||||
write_lock_bh(&call->state_lock);
|
||||
switch (call->state) {
|
||||
case RXRPC_CALL_SERVER_ACCEPTING:
|
||||
call->state = RXRPC_CALL_SERVER_BUSY;
|
||||
if (test_and_set_bit(RXRPC_CALL_REJECT_BUSY, &call->events))
|
||||
rxrpc_queue_call(call);
|
||||
ret = 0;
|
||||
goto out_release;
|
||||
case RXRPC_CALL_REMOTELY_ABORTED:
|
||||
case RXRPC_CALL_LOCALLY_ABORTED:
|
||||
ret = -ECONNABORTED;
|
||||
goto out_release;
|
||||
case RXRPC_CALL_NETWORK_ERROR:
|
||||
ret = call->conn->error;
|
||||
goto out_release;
|
||||
case RXRPC_CALL_DEAD:
|
||||
ret = -ETIME;
|
||||
goto out_discard;
|
||||
default:
|
||||
BUG();
|
||||
}
|
||||
|
||||
/* if the call is already dying or dead, then we leave the socket's ref
|
||||
* on it to be released by rxrpc_dead_call_expired() as induced by
|
||||
* rxrpc_release_call() */
|
||||
out_release:
|
||||
_debug("release %p", call);
|
||||
if (!test_bit(RXRPC_CALL_RELEASED, &call->flags) &&
|
||||
!test_and_set_bit(RXRPC_CALL_RELEASE, &call->events))
|
||||
rxrpc_queue_call(call);
|
||||
out_discard:
|
||||
write_unlock_bh(&call->state_lock);
|
||||
_debug("discard %p", call);
|
||||
out:
|
||||
write_unlock(&rx->call_lock);
|
||||
_leave(" = %d", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* rxrpc_kernel_accept_call - Allow a kernel service to accept an incoming call
|
||||
* @sock: The socket on which the impending call is waiting
|
||||
* @user_call_ID: The tag to attach to the call
|
||||
*
|
||||
* Allow a kernel service to accept an incoming call, assuming the incoming
|
||||
* call is still valid.
|
||||
*/
|
||||
struct rxrpc_call *rxrpc_kernel_accept_call(struct socket *sock,
|
||||
unsigned long user_call_ID)
|
||||
{
|
||||
struct rxrpc_call *call;
|
||||
|
||||
_enter(",%lx", user_call_ID);
|
||||
call = rxrpc_accept_call(rxrpc_sk(sock->sk), user_call_ID);
|
||||
_leave(" = %p", call);
|
||||
return call;
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(rxrpc_kernel_accept_call);
|
||||
|
||||
/**
|
||||
* rxrpc_kernel_reject_call - Allow a kernel service to reject an incoming call
|
||||
* @sock: The socket on which the impending call is waiting
|
||||
*
|
||||
* Allow a kernel service to reject an incoming call with a BUSY message,
|
||||
* assuming the incoming call is still valid.
|
||||
*/
|
||||
int rxrpc_kernel_reject_call(struct socket *sock)
|
||||
{
|
||||
int ret;
|
||||
|
||||
_enter("");
|
||||
ret = rxrpc_reject_call(rxrpc_sk(sock->sk));
|
||||
_leave(" = %d", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(rxrpc_kernel_reject_call);
|
1355
net/rxrpc/ar-ack.c
Normal file
1355
net/rxrpc/ar-ack.c
Normal file
File diff suppressed because it is too large
Load diff
1019
net/rxrpc/ar-call.c
Normal file
1019
net/rxrpc/ar-call.c
Normal file
File diff suppressed because it is too large
Load diff
928
net/rxrpc/ar-connection.c
Normal file
928
net/rxrpc/ar-connection.c
Normal file
|
@ -0,0 +1,928 @@
|
|||
/* RxRPC virtual connection handler
|
||||
*
|
||||
* Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
|
||||
* Written by David Howells (dhowells@redhat.com)
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version
|
||||
* 2 of the License, or (at your option) any later version.
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/net.h>
|
||||
#include <linux/skbuff.h>
|
||||
#include <linux/crypto.h>
|
||||
#include <net/sock.h>
|
||||
#include <net/af_rxrpc.h>
|
||||
#include "ar-internal.h"
|
||||
|
||||
/*
|
||||
* Time till a connection expires after last use (in seconds).
|
||||
*/
|
||||
unsigned rxrpc_connection_expiry = 10 * 60;
|
||||
|
||||
static void rxrpc_connection_reaper(struct work_struct *work);
|
||||
|
||||
LIST_HEAD(rxrpc_connections);
|
||||
DEFINE_RWLOCK(rxrpc_connection_lock);
|
||||
static DECLARE_DELAYED_WORK(rxrpc_connection_reap, rxrpc_connection_reaper);
|
||||
|
||||
/*
|
||||
* allocate a new client connection bundle
|
||||
*/
|
||||
static struct rxrpc_conn_bundle *rxrpc_alloc_bundle(gfp_t gfp)
|
||||
{
|
||||
struct rxrpc_conn_bundle *bundle;
|
||||
|
||||
_enter("");
|
||||
|
||||
bundle = kzalloc(sizeof(struct rxrpc_conn_bundle), gfp);
|
||||
if (bundle) {
|
||||
INIT_LIST_HEAD(&bundle->unused_conns);
|
||||
INIT_LIST_HEAD(&bundle->avail_conns);
|
||||
INIT_LIST_HEAD(&bundle->busy_conns);
|
||||
init_waitqueue_head(&bundle->chanwait);
|
||||
atomic_set(&bundle->usage, 1);
|
||||
}
|
||||
|
||||
_leave(" = %p", bundle);
|
||||
return bundle;
|
||||
}
|
||||
|
||||
/*
|
||||
* compare bundle parameters with what we're looking for
|
||||
* - return -ve, 0 or +ve
|
||||
*/
|
||||
static inline
|
||||
int rxrpc_cmp_bundle(const struct rxrpc_conn_bundle *bundle,
|
||||
struct key *key, __be16 service_id)
|
||||
{
|
||||
return (bundle->service_id - service_id) ?:
|
||||
((unsigned long) bundle->key - (unsigned long) key);
|
||||
}
|
||||
|
||||
/*
|
||||
* get bundle of client connections that a client socket can make use of
|
||||
*/
|
||||
struct rxrpc_conn_bundle *rxrpc_get_bundle(struct rxrpc_sock *rx,
|
||||
struct rxrpc_transport *trans,
|
||||
struct key *key,
|
||||
__be16 service_id,
|
||||
gfp_t gfp)
|
||||
{
|
||||
struct rxrpc_conn_bundle *bundle, *candidate;
|
||||
struct rb_node *p, *parent, **pp;
|
||||
|
||||
_enter("%p{%x},%x,%hx,",
|
||||
rx, key_serial(key), trans->debug_id, ntohs(service_id));
|
||||
|
||||
if (rx->trans == trans && rx->bundle) {
|
||||
atomic_inc(&rx->bundle->usage);
|
||||
return rx->bundle;
|
||||
}
|
||||
|
||||
/* search the extant bundles first for one that matches the specified
|
||||
* user ID */
|
||||
spin_lock(&trans->client_lock);
|
||||
|
||||
p = trans->bundles.rb_node;
|
||||
while (p) {
|
||||
bundle = rb_entry(p, struct rxrpc_conn_bundle, node);
|
||||
|
||||
if (rxrpc_cmp_bundle(bundle, key, service_id) < 0)
|
||||
p = p->rb_left;
|
||||
else if (rxrpc_cmp_bundle(bundle, key, service_id) > 0)
|
||||
p = p->rb_right;
|
||||
else
|
||||
goto found_extant_bundle;
|
||||
}
|
||||
|
||||
spin_unlock(&trans->client_lock);
|
||||
|
||||
/* not yet present - create a candidate for a new record and then
|
||||
* redo the search */
|
||||
candidate = rxrpc_alloc_bundle(gfp);
|
||||
if (!candidate) {
|
||||
_leave(" = -ENOMEM");
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
candidate->key = key_get(key);
|
||||
candidate->service_id = service_id;
|
||||
|
||||
spin_lock(&trans->client_lock);
|
||||
|
||||
pp = &trans->bundles.rb_node;
|
||||
parent = NULL;
|
||||
while (*pp) {
|
||||
parent = *pp;
|
||||
bundle = rb_entry(parent, struct rxrpc_conn_bundle, node);
|
||||
|
||||
if (rxrpc_cmp_bundle(bundle, key, service_id) < 0)
|
||||
pp = &(*pp)->rb_left;
|
||||
else if (rxrpc_cmp_bundle(bundle, key, service_id) > 0)
|
||||
pp = &(*pp)->rb_right;
|
||||
else
|
||||
goto found_extant_second;
|
||||
}
|
||||
|
||||
/* second search also failed; add the new bundle */
|
||||
bundle = candidate;
|
||||
candidate = NULL;
|
||||
|
||||
rb_link_node(&bundle->node, parent, pp);
|
||||
rb_insert_color(&bundle->node, &trans->bundles);
|
||||
spin_unlock(&trans->client_lock);
|
||||
_net("BUNDLE new on trans %d", trans->debug_id);
|
||||
if (!rx->bundle && rx->sk.sk_state == RXRPC_CLIENT_CONNECTED) {
|
||||
atomic_inc(&bundle->usage);
|
||||
rx->bundle = bundle;
|
||||
}
|
||||
_leave(" = %p [new]", bundle);
|
||||
return bundle;
|
||||
|
||||
/* we found the bundle in the list immediately */
|
||||
found_extant_bundle:
|
||||
atomic_inc(&bundle->usage);
|
||||
spin_unlock(&trans->client_lock);
|
||||
_net("BUNDLE old on trans %d", trans->debug_id);
|
||||
if (!rx->bundle && rx->sk.sk_state == RXRPC_CLIENT_CONNECTED) {
|
||||
atomic_inc(&bundle->usage);
|
||||
rx->bundle = bundle;
|
||||
}
|
||||
_leave(" = %p [extant %d]", bundle, atomic_read(&bundle->usage));
|
||||
return bundle;
|
||||
|
||||
/* we found the bundle on the second time through the list */
|
||||
found_extant_second:
|
||||
atomic_inc(&bundle->usage);
|
||||
spin_unlock(&trans->client_lock);
|
||||
kfree(candidate);
|
||||
_net("BUNDLE old2 on trans %d", trans->debug_id);
|
||||
if (!rx->bundle && rx->sk.sk_state == RXRPC_CLIENT_CONNECTED) {
|
||||
atomic_inc(&bundle->usage);
|
||||
rx->bundle = bundle;
|
||||
}
|
||||
_leave(" = %p [second %d]", bundle, atomic_read(&bundle->usage));
|
||||
return bundle;
|
||||
}
|
||||
|
||||
/*
|
||||
* release a bundle
|
||||
*/
|
||||
void rxrpc_put_bundle(struct rxrpc_transport *trans,
|
||||
struct rxrpc_conn_bundle *bundle)
|
||||
{
|
||||
_enter("%p,%p{%d}",trans, bundle, atomic_read(&bundle->usage));
|
||||
|
||||
if (atomic_dec_and_lock(&bundle->usage, &trans->client_lock)) {
|
||||
_debug("Destroy bundle");
|
||||
rb_erase(&bundle->node, &trans->bundles);
|
||||
spin_unlock(&trans->client_lock);
|
||||
ASSERT(list_empty(&bundle->unused_conns));
|
||||
ASSERT(list_empty(&bundle->avail_conns));
|
||||
ASSERT(list_empty(&bundle->busy_conns));
|
||||
ASSERTCMP(bundle->num_conns, ==, 0);
|
||||
key_put(bundle->key);
|
||||
kfree(bundle);
|
||||
}
|
||||
|
||||
_leave("");
|
||||
}
|
||||
|
||||
/*
|
||||
* allocate a new connection
|
||||
*/
|
||||
static struct rxrpc_connection *rxrpc_alloc_connection(gfp_t gfp)
|
||||
{
|
||||
struct rxrpc_connection *conn;
|
||||
|
||||
_enter("");
|
||||
|
||||
conn = kzalloc(sizeof(struct rxrpc_connection), gfp);
|
||||
if (conn) {
|
||||
INIT_WORK(&conn->processor, &rxrpc_process_connection);
|
||||
INIT_LIST_HEAD(&conn->bundle_link);
|
||||
conn->calls = RB_ROOT;
|
||||
skb_queue_head_init(&conn->rx_queue);
|
||||
rwlock_init(&conn->lock);
|
||||
spin_lock_init(&conn->state_lock);
|
||||
atomic_set(&conn->usage, 1);
|
||||
conn->debug_id = atomic_inc_return(&rxrpc_debug_id);
|
||||
conn->avail_calls = RXRPC_MAXCALLS;
|
||||
conn->size_align = 4;
|
||||
conn->header_size = sizeof(struct rxrpc_header);
|
||||
}
|
||||
|
||||
_leave(" = %p{%d}", conn, conn ? conn->debug_id : 0);
|
||||
return conn;
|
||||
}
|
||||
|
||||
/*
|
||||
* assign a connection ID to a connection and add it to the transport's
|
||||
* connection lookup tree
|
||||
* - called with transport client lock held
|
||||
*/
|
||||
static void rxrpc_assign_connection_id(struct rxrpc_connection *conn)
|
||||
{
|
||||
struct rxrpc_connection *xconn;
|
||||
struct rb_node *parent, **p;
|
||||
__be32 epoch;
|
||||
u32 real_conn_id;
|
||||
|
||||
_enter("");
|
||||
|
||||
epoch = conn->epoch;
|
||||
|
||||
write_lock_bh(&conn->trans->conn_lock);
|
||||
|
||||
conn->trans->conn_idcounter += RXRPC_CID_INC;
|
||||
if (conn->trans->conn_idcounter < RXRPC_CID_INC)
|
||||
conn->trans->conn_idcounter = RXRPC_CID_INC;
|
||||
real_conn_id = conn->trans->conn_idcounter;
|
||||
|
||||
attempt_insertion:
|
||||
parent = NULL;
|
||||
p = &conn->trans->client_conns.rb_node;
|
||||
|
||||
while (*p) {
|
||||
parent = *p;
|
||||
xconn = rb_entry(parent, struct rxrpc_connection, node);
|
||||
|
||||
if (epoch < xconn->epoch)
|
||||
p = &(*p)->rb_left;
|
||||
else if (epoch > xconn->epoch)
|
||||
p = &(*p)->rb_right;
|
||||
else if (real_conn_id < xconn->real_conn_id)
|
||||
p = &(*p)->rb_left;
|
||||
else if (real_conn_id > xconn->real_conn_id)
|
||||
p = &(*p)->rb_right;
|
||||
else
|
||||
goto id_exists;
|
||||
}
|
||||
|
||||
/* we've found a suitable hole - arrange for this connection to occupy
|
||||
* it */
|
||||
rb_link_node(&conn->node, parent, p);
|
||||
rb_insert_color(&conn->node, &conn->trans->client_conns);
|
||||
|
||||
conn->real_conn_id = real_conn_id;
|
||||
conn->cid = htonl(real_conn_id);
|
||||
write_unlock_bh(&conn->trans->conn_lock);
|
||||
_leave(" [CONNID %x CID %x]", real_conn_id, ntohl(conn->cid));
|
||||
return;
|
||||
|
||||
/* we found a connection with the proposed ID - walk the tree from that
|
||||
* point looking for the next unused ID */
|
||||
id_exists:
|
||||
for (;;) {
|
||||
real_conn_id += RXRPC_CID_INC;
|
||||
if (real_conn_id < RXRPC_CID_INC) {
|
||||
real_conn_id = RXRPC_CID_INC;
|
||||
conn->trans->conn_idcounter = real_conn_id;
|
||||
goto attempt_insertion;
|
||||
}
|
||||
|
||||
parent = rb_next(parent);
|
||||
if (!parent)
|
||||
goto attempt_insertion;
|
||||
|
||||
xconn = rb_entry(parent, struct rxrpc_connection, node);
|
||||
if (epoch < xconn->epoch ||
|
||||
real_conn_id < xconn->real_conn_id)
|
||||
goto attempt_insertion;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* add a call to a connection's call-by-ID tree
|
||||
*/
|
||||
static void rxrpc_add_call_ID_to_conn(struct rxrpc_connection *conn,
|
||||
struct rxrpc_call *call)
|
||||
{
|
||||
struct rxrpc_call *xcall;
|
||||
struct rb_node *parent, **p;
|
||||
__be32 call_id;
|
||||
|
||||
write_lock_bh(&conn->lock);
|
||||
|
||||
call_id = call->call_id;
|
||||
p = &conn->calls.rb_node;
|
||||
parent = NULL;
|
||||
while (*p) {
|
||||
parent = *p;
|
||||
xcall = rb_entry(parent, struct rxrpc_call, conn_node);
|
||||
|
||||
if (call_id < xcall->call_id)
|
||||
p = &(*p)->rb_left;
|
||||
else if (call_id > xcall->call_id)
|
||||
p = &(*p)->rb_right;
|
||||
else
|
||||
BUG();
|
||||
}
|
||||
|
||||
rb_link_node(&call->conn_node, parent, p);
|
||||
rb_insert_color(&call->conn_node, &conn->calls);
|
||||
|
||||
write_unlock_bh(&conn->lock);
|
||||
}
|
||||
|
||||
/*
|
||||
* connect a call on an exclusive connection
|
||||
*/
|
||||
static int rxrpc_connect_exclusive(struct rxrpc_sock *rx,
|
||||
struct rxrpc_transport *trans,
|
||||
__be16 service_id,
|
||||
struct rxrpc_call *call,
|
||||
gfp_t gfp)
|
||||
{
|
||||
struct rxrpc_connection *conn;
|
||||
int chan, ret;
|
||||
|
||||
_enter("");
|
||||
|
||||
conn = rx->conn;
|
||||
if (!conn) {
|
||||
/* not yet present - create a candidate for a new connection
|
||||
* and then redo the check */
|
||||
conn = rxrpc_alloc_connection(gfp);
|
||||
if (!conn) {
|
||||
_leave(" = -ENOMEM");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
conn->trans = trans;
|
||||
conn->bundle = NULL;
|
||||
conn->service_id = service_id;
|
||||
conn->epoch = rxrpc_epoch;
|
||||
conn->in_clientflag = 0;
|
||||
conn->out_clientflag = RXRPC_CLIENT_INITIATED;
|
||||
conn->cid = 0;
|
||||
conn->state = RXRPC_CONN_CLIENT;
|
||||
conn->avail_calls = RXRPC_MAXCALLS - 1;
|
||||
conn->security_level = rx->min_sec_level;
|
||||
conn->key = key_get(rx->key);
|
||||
|
||||
ret = rxrpc_init_client_conn_security(conn);
|
||||
if (ret < 0) {
|
||||
key_put(conn->key);
|
||||
kfree(conn);
|
||||
_leave(" = %d [key]", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
write_lock_bh(&rxrpc_connection_lock);
|
||||
list_add_tail(&conn->link, &rxrpc_connections);
|
||||
write_unlock_bh(&rxrpc_connection_lock);
|
||||
|
||||
spin_lock(&trans->client_lock);
|
||||
atomic_inc(&trans->usage);
|
||||
|
||||
_net("CONNECT EXCL new %d on TRANS %d",
|
||||
conn->debug_id, conn->trans->debug_id);
|
||||
|
||||
rxrpc_assign_connection_id(conn);
|
||||
rx->conn = conn;
|
||||
} else {
|
||||
spin_lock(&trans->client_lock);
|
||||
}
|
||||
|
||||
/* we've got a connection with a free channel and we can now attach the
|
||||
* call to it
|
||||
* - we're holding the transport's client lock
|
||||
* - we're holding a reference on the connection
|
||||
*/
|
||||
for (chan = 0; chan < RXRPC_MAXCALLS; chan++)
|
||||
if (!conn->channels[chan])
|
||||
goto found_channel;
|
||||
goto no_free_channels;
|
||||
|
||||
found_channel:
|
||||
atomic_inc(&conn->usage);
|
||||
conn->channels[chan] = call;
|
||||
call->conn = conn;
|
||||
call->channel = chan;
|
||||
call->cid = conn->cid | htonl(chan);
|
||||
call->call_id = htonl(++conn->call_counter);
|
||||
|
||||
_net("CONNECT client on conn %d chan %d as call %x",
|
||||
conn->debug_id, chan, ntohl(call->call_id));
|
||||
|
||||
spin_unlock(&trans->client_lock);
|
||||
|
||||
rxrpc_add_call_ID_to_conn(conn, call);
|
||||
_leave(" = 0");
|
||||
return 0;
|
||||
|
||||
no_free_channels:
|
||||
spin_unlock(&trans->client_lock);
|
||||
_leave(" = -ENOSR");
|
||||
return -ENOSR;
|
||||
}
|
||||
|
||||
/*
|
||||
* find a connection for a call
|
||||
* - called in process context with IRQs enabled
|
||||
*/
|
||||
int rxrpc_connect_call(struct rxrpc_sock *rx,
|
||||
struct rxrpc_transport *trans,
|
||||
struct rxrpc_conn_bundle *bundle,
|
||||
struct rxrpc_call *call,
|
||||
gfp_t gfp)
|
||||
{
|
||||
struct rxrpc_connection *conn, *candidate;
|
||||
int chan, ret;
|
||||
|
||||
DECLARE_WAITQUEUE(myself, current);
|
||||
|
||||
_enter("%p,%lx,", rx, call->user_call_ID);
|
||||
|
||||
if (test_bit(RXRPC_SOCK_EXCLUSIVE_CONN, &rx->flags))
|
||||
return rxrpc_connect_exclusive(rx, trans, bundle->service_id,
|
||||
call, gfp);
|
||||
|
||||
spin_lock(&trans->client_lock);
|
||||
for (;;) {
|
||||
/* see if the bundle has a call slot available */
|
||||
if (!list_empty(&bundle->avail_conns)) {
|
||||
_debug("avail");
|
||||
conn = list_entry(bundle->avail_conns.next,
|
||||
struct rxrpc_connection,
|
||||
bundle_link);
|
||||
if (conn->state >= RXRPC_CONN_REMOTELY_ABORTED) {
|
||||
list_del_init(&conn->bundle_link);
|
||||
bundle->num_conns--;
|
||||
continue;
|
||||
}
|
||||
if (--conn->avail_calls == 0)
|
||||
list_move(&conn->bundle_link,
|
||||
&bundle->busy_conns);
|
||||
ASSERTCMP(conn->avail_calls, <, RXRPC_MAXCALLS);
|
||||
ASSERT(conn->channels[0] == NULL ||
|
||||
conn->channels[1] == NULL ||
|
||||
conn->channels[2] == NULL ||
|
||||
conn->channels[3] == NULL);
|
||||
atomic_inc(&conn->usage);
|
||||
break;
|
||||
}
|
||||
|
||||
if (!list_empty(&bundle->unused_conns)) {
|
||||
_debug("unused");
|
||||
conn = list_entry(bundle->unused_conns.next,
|
||||
struct rxrpc_connection,
|
||||
bundle_link);
|
||||
if (conn->state >= RXRPC_CONN_REMOTELY_ABORTED) {
|
||||
list_del_init(&conn->bundle_link);
|
||||
bundle->num_conns--;
|
||||
continue;
|
||||
}
|
||||
ASSERTCMP(conn->avail_calls, ==, RXRPC_MAXCALLS);
|
||||
conn->avail_calls = RXRPC_MAXCALLS - 1;
|
||||
ASSERT(conn->channels[0] == NULL &&
|
||||
conn->channels[1] == NULL &&
|
||||
conn->channels[2] == NULL &&
|
||||
conn->channels[3] == NULL);
|
||||
atomic_inc(&conn->usage);
|
||||
list_move(&conn->bundle_link, &bundle->avail_conns);
|
||||
break;
|
||||
}
|
||||
|
||||
/* need to allocate a new connection */
|
||||
_debug("get new conn [%d]", bundle->num_conns);
|
||||
|
||||
spin_unlock(&trans->client_lock);
|
||||
|
||||
if (signal_pending(current))
|
||||
goto interrupted;
|
||||
|
||||
if (bundle->num_conns >= 20) {
|
||||
_debug("too many conns");
|
||||
|
||||
if (!(gfp & __GFP_WAIT)) {
|
||||
_leave(" = -EAGAIN");
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
||||
add_wait_queue(&bundle->chanwait, &myself);
|
||||
for (;;) {
|
||||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
if (bundle->num_conns < 20 ||
|
||||
!list_empty(&bundle->unused_conns) ||
|
||||
!list_empty(&bundle->avail_conns))
|
||||
break;
|
||||
if (signal_pending(current))
|
||||
goto interrupted_dequeue;
|
||||
schedule();
|
||||
}
|
||||
remove_wait_queue(&bundle->chanwait, &myself);
|
||||
__set_current_state(TASK_RUNNING);
|
||||
spin_lock(&trans->client_lock);
|
||||
continue;
|
||||
}
|
||||
|
||||
/* not yet present - create a candidate for a new connection and then
|
||||
* redo the check */
|
||||
candidate = rxrpc_alloc_connection(gfp);
|
||||
if (!candidate) {
|
||||
_leave(" = -ENOMEM");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
candidate->trans = trans;
|
||||
candidate->bundle = bundle;
|
||||
candidate->service_id = bundle->service_id;
|
||||
candidate->epoch = rxrpc_epoch;
|
||||
candidate->in_clientflag = 0;
|
||||
candidate->out_clientflag = RXRPC_CLIENT_INITIATED;
|
||||
candidate->cid = 0;
|
||||
candidate->state = RXRPC_CONN_CLIENT;
|
||||
candidate->avail_calls = RXRPC_MAXCALLS;
|
||||
candidate->security_level = rx->min_sec_level;
|
||||
candidate->key = key_get(bundle->key);
|
||||
|
||||
ret = rxrpc_init_client_conn_security(candidate);
|
||||
if (ret < 0) {
|
||||
key_put(candidate->key);
|
||||
kfree(candidate);
|
||||
_leave(" = %d [key]", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
write_lock_bh(&rxrpc_connection_lock);
|
||||
list_add_tail(&candidate->link, &rxrpc_connections);
|
||||
write_unlock_bh(&rxrpc_connection_lock);
|
||||
|
||||
spin_lock(&trans->client_lock);
|
||||
|
||||
list_add(&candidate->bundle_link, &bundle->unused_conns);
|
||||
bundle->num_conns++;
|
||||
atomic_inc(&bundle->usage);
|
||||
atomic_inc(&trans->usage);
|
||||
|
||||
_net("CONNECT new %d on TRANS %d",
|
||||
candidate->debug_id, candidate->trans->debug_id);
|
||||
|
||||
rxrpc_assign_connection_id(candidate);
|
||||
if (candidate->security)
|
||||
candidate->security->prime_packet_security(candidate);
|
||||
|
||||
/* leave the candidate lurking in zombie mode attached to the
|
||||
* bundle until we're ready for it */
|
||||
rxrpc_put_connection(candidate);
|
||||
candidate = NULL;
|
||||
}
|
||||
|
||||
/* we've got a connection with a free channel and we can now attach the
|
||||
* call to it
|
||||
* - we're holding the transport's client lock
|
||||
* - we're holding a reference on the connection
|
||||
* - we're holding a reference on the bundle
|
||||
*/
|
||||
for (chan = 0; chan < RXRPC_MAXCALLS; chan++)
|
||||
if (!conn->channels[chan])
|
||||
goto found_channel;
|
||||
ASSERT(conn->channels[0] == NULL ||
|
||||
conn->channels[1] == NULL ||
|
||||
conn->channels[2] == NULL ||
|
||||
conn->channels[3] == NULL);
|
||||
BUG();
|
||||
|
||||
found_channel:
|
||||
conn->channels[chan] = call;
|
||||
call->conn = conn;
|
||||
call->channel = chan;
|
||||
call->cid = conn->cid | htonl(chan);
|
||||
call->call_id = htonl(++conn->call_counter);
|
||||
|
||||
_net("CONNECT client on conn %d chan %d as call %x",
|
||||
conn->debug_id, chan, ntohl(call->call_id));
|
||||
|
||||
ASSERTCMP(conn->avail_calls, <, RXRPC_MAXCALLS);
|
||||
spin_unlock(&trans->client_lock);
|
||||
|
||||
rxrpc_add_call_ID_to_conn(conn, call);
|
||||
|
||||
_leave(" = 0");
|
||||
return 0;
|
||||
|
||||
interrupted_dequeue:
|
||||
remove_wait_queue(&bundle->chanwait, &myself);
|
||||
__set_current_state(TASK_RUNNING);
|
||||
interrupted:
|
||||
_leave(" = -ERESTARTSYS");
|
||||
return -ERESTARTSYS;
|
||||
}
|
||||
|
||||
/*
|
||||
* get a record of an incoming connection
|
||||
*/
|
||||
struct rxrpc_connection *
|
||||
rxrpc_incoming_connection(struct rxrpc_transport *trans,
|
||||
struct rxrpc_header *hdr,
|
||||
gfp_t gfp)
|
||||
{
|
||||
struct rxrpc_connection *conn, *candidate = NULL;
|
||||
struct rb_node *p, **pp;
|
||||
const char *new = "old";
|
||||
__be32 epoch;
|
||||
u32 conn_id;
|
||||
|
||||
_enter("");
|
||||
|
||||
ASSERT(hdr->flags & RXRPC_CLIENT_INITIATED);
|
||||
|
||||
epoch = hdr->epoch;
|
||||
conn_id = ntohl(hdr->cid) & RXRPC_CIDMASK;
|
||||
|
||||
/* search the connection list first */
|
||||
read_lock_bh(&trans->conn_lock);
|
||||
|
||||
p = trans->server_conns.rb_node;
|
||||
while (p) {
|
||||
conn = rb_entry(p, struct rxrpc_connection, node);
|
||||
|
||||
_debug("maybe %x", conn->real_conn_id);
|
||||
|
||||
if (epoch < conn->epoch)
|
||||
p = p->rb_left;
|
||||
else if (epoch > conn->epoch)
|
||||
p = p->rb_right;
|
||||
else if (conn_id < conn->real_conn_id)
|
||||
p = p->rb_left;
|
||||
else if (conn_id > conn->real_conn_id)
|
||||
p = p->rb_right;
|
||||
else
|
||||
goto found_extant_connection;
|
||||
}
|
||||
read_unlock_bh(&trans->conn_lock);
|
||||
|
||||
/* not yet present - create a candidate for a new record and then
|
||||
* redo the search */
|
||||
candidate = rxrpc_alloc_connection(gfp);
|
||||
if (!candidate) {
|
||||
_leave(" = -ENOMEM");
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
candidate->trans = trans;
|
||||
candidate->epoch = hdr->epoch;
|
||||
candidate->cid = hdr->cid & cpu_to_be32(RXRPC_CIDMASK);
|
||||
candidate->service_id = hdr->serviceId;
|
||||
candidate->security_ix = hdr->securityIndex;
|
||||
candidate->in_clientflag = RXRPC_CLIENT_INITIATED;
|
||||
candidate->out_clientflag = 0;
|
||||
candidate->real_conn_id = conn_id;
|
||||
candidate->state = RXRPC_CONN_SERVER;
|
||||
if (candidate->service_id)
|
||||
candidate->state = RXRPC_CONN_SERVER_UNSECURED;
|
||||
|
||||
write_lock_bh(&trans->conn_lock);
|
||||
|
||||
pp = &trans->server_conns.rb_node;
|
||||
p = NULL;
|
||||
while (*pp) {
|
||||
p = *pp;
|
||||
conn = rb_entry(p, struct rxrpc_connection, node);
|
||||
|
||||
if (epoch < conn->epoch)
|
||||
pp = &(*pp)->rb_left;
|
||||
else if (epoch > conn->epoch)
|
||||
pp = &(*pp)->rb_right;
|
||||
else if (conn_id < conn->real_conn_id)
|
||||
pp = &(*pp)->rb_left;
|
||||
else if (conn_id > conn->real_conn_id)
|
||||
pp = &(*pp)->rb_right;
|
||||
else
|
||||
goto found_extant_second;
|
||||
}
|
||||
|
||||
/* we can now add the new candidate to the list */
|
||||
conn = candidate;
|
||||
candidate = NULL;
|
||||
rb_link_node(&conn->node, p, pp);
|
||||
rb_insert_color(&conn->node, &trans->server_conns);
|
||||
atomic_inc(&conn->trans->usage);
|
||||
|
||||
write_unlock_bh(&trans->conn_lock);
|
||||
|
||||
write_lock_bh(&rxrpc_connection_lock);
|
||||
list_add_tail(&conn->link, &rxrpc_connections);
|
||||
write_unlock_bh(&rxrpc_connection_lock);
|
||||
|
||||
new = "new";
|
||||
|
||||
success:
|
||||
_net("CONNECTION %s %d {%x}", new, conn->debug_id, conn->real_conn_id);
|
||||
|
||||
_leave(" = %p {u=%d}", conn, atomic_read(&conn->usage));
|
||||
return conn;
|
||||
|
||||
/* we found the connection in the list immediately */
|
||||
found_extant_connection:
|
||||
if (hdr->securityIndex != conn->security_ix) {
|
||||
read_unlock_bh(&trans->conn_lock);
|
||||
goto security_mismatch;
|
||||
}
|
||||
atomic_inc(&conn->usage);
|
||||
read_unlock_bh(&trans->conn_lock);
|
||||
goto success;
|
||||
|
||||
/* we found the connection on the second time through the list */
|
||||
found_extant_second:
|
||||
if (hdr->securityIndex != conn->security_ix) {
|
||||
write_unlock_bh(&trans->conn_lock);
|
||||
goto security_mismatch;
|
||||
}
|
||||
atomic_inc(&conn->usage);
|
||||
write_unlock_bh(&trans->conn_lock);
|
||||
kfree(candidate);
|
||||
goto success;
|
||||
|
||||
security_mismatch:
|
||||
kfree(candidate);
|
||||
_leave(" = -EKEYREJECTED");
|
||||
return ERR_PTR(-EKEYREJECTED);
|
||||
}
|
||||
|
||||
/*
|
||||
* find a connection based on transport and RxRPC connection ID for an incoming
|
||||
* packet
|
||||
*/
|
||||
struct rxrpc_connection *rxrpc_find_connection(struct rxrpc_transport *trans,
|
||||
struct rxrpc_header *hdr)
|
||||
{
|
||||
struct rxrpc_connection *conn;
|
||||
struct rb_node *p;
|
||||
__be32 epoch;
|
||||
u32 conn_id;
|
||||
|
||||
_enter(",{%x,%x}", ntohl(hdr->cid), hdr->flags);
|
||||
|
||||
read_lock_bh(&trans->conn_lock);
|
||||
|
||||
conn_id = ntohl(hdr->cid) & RXRPC_CIDMASK;
|
||||
epoch = hdr->epoch;
|
||||
|
||||
if (hdr->flags & RXRPC_CLIENT_INITIATED)
|
||||
p = trans->server_conns.rb_node;
|
||||
else
|
||||
p = trans->client_conns.rb_node;
|
||||
|
||||
while (p) {
|
||||
conn = rb_entry(p, struct rxrpc_connection, node);
|
||||
|
||||
_debug("maybe %x", conn->real_conn_id);
|
||||
|
||||
if (epoch < conn->epoch)
|
||||
p = p->rb_left;
|
||||
else if (epoch > conn->epoch)
|
||||
p = p->rb_right;
|
||||
else if (conn_id < conn->real_conn_id)
|
||||
p = p->rb_left;
|
||||
else if (conn_id > conn->real_conn_id)
|
||||
p = p->rb_right;
|
||||
else
|
||||
goto found;
|
||||
}
|
||||
|
||||
read_unlock_bh(&trans->conn_lock);
|
||||
_leave(" = NULL");
|
||||
return NULL;
|
||||
|
||||
found:
|
||||
atomic_inc(&conn->usage);
|
||||
read_unlock_bh(&trans->conn_lock);
|
||||
_leave(" = %p", conn);
|
||||
return conn;
|
||||
}
|
||||
|
||||
/*
|
||||
* release a virtual connection
|
||||
*/
|
||||
void rxrpc_put_connection(struct rxrpc_connection *conn)
|
||||
{
|
||||
_enter("%p{u=%d,d=%d}",
|
||||
conn, atomic_read(&conn->usage), conn->debug_id);
|
||||
|
||||
ASSERTCMP(atomic_read(&conn->usage), >, 0);
|
||||
|
||||
conn->put_time = get_seconds();
|
||||
if (atomic_dec_and_test(&conn->usage)) {
|
||||
_debug("zombie");
|
||||
rxrpc_queue_delayed_work(&rxrpc_connection_reap, 0);
|
||||
}
|
||||
|
||||
_leave("");
|
||||
}
|
||||
|
||||
/*
|
||||
* destroy a virtual connection
|
||||
*/
|
||||
static void rxrpc_destroy_connection(struct rxrpc_connection *conn)
|
||||
{
|
||||
_enter("%p{%d}", conn, atomic_read(&conn->usage));
|
||||
|
||||
ASSERTCMP(atomic_read(&conn->usage), ==, 0);
|
||||
|
||||
_net("DESTROY CONN %d", conn->debug_id);
|
||||
|
||||
if (conn->bundle)
|
||||
rxrpc_put_bundle(conn->trans, conn->bundle);
|
||||
|
||||
ASSERT(RB_EMPTY_ROOT(&conn->calls));
|
||||
rxrpc_purge_queue(&conn->rx_queue);
|
||||
|
||||
rxrpc_clear_conn_security(conn);
|
||||
rxrpc_put_transport(conn->trans);
|
||||
kfree(conn);
|
||||
_leave("");
|
||||
}
|
||||
|
||||
/*
|
||||
* reap dead connections
|
||||
*/
|
||||
static void rxrpc_connection_reaper(struct work_struct *work)
|
||||
{
|
||||
struct rxrpc_connection *conn, *_p;
|
||||
unsigned long now, earliest, reap_time;
|
||||
|
||||
LIST_HEAD(graveyard);
|
||||
|
||||
_enter("");
|
||||
|
||||
now = get_seconds();
|
||||
earliest = ULONG_MAX;
|
||||
|
||||
write_lock_bh(&rxrpc_connection_lock);
|
||||
list_for_each_entry_safe(conn, _p, &rxrpc_connections, link) {
|
||||
_debug("reap CONN %d { u=%d,t=%ld }",
|
||||
conn->debug_id, atomic_read(&conn->usage),
|
||||
(long) now - (long) conn->put_time);
|
||||
|
||||
if (likely(atomic_read(&conn->usage) > 0))
|
||||
continue;
|
||||
|
||||
spin_lock(&conn->trans->client_lock);
|
||||
write_lock(&conn->trans->conn_lock);
|
||||
reap_time = conn->put_time + rxrpc_connection_expiry;
|
||||
|
||||
if (atomic_read(&conn->usage) > 0) {
|
||||
;
|
||||
} else if (reap_time <= now) {
|
||||
list_move_tail(&conn->link, &graveyard);
|
||||
if (conn->out_clientflag)
|
||||
rb_erase(&conn->node,
|
||||
&conn->trans->client_conns);
|
||||
else
|
||||
rb_erase(&conn->node,
|
||||
&conn->trans->server_conns);
|
||||
if (conn->bundle) {
|
||||
list_del_init(&conn->bundle_link);
|
||||
conn->bundle->num_conns--;
|
||||
}
|
||||
|
||||
} else if (reap_time < earliest) {
|
||||
earliest = reap_time;
|
||||
}
|
||||
|
||||
write_unlock(&conn->trans->conn_lock);
|
||||
spin_unlock(&conn->trans->client_lock);
|
||||
}
|
||||
write_unlock_bh(&rxrpc_connection_lock);
|
||||
|
||||
if (earliest != ULONG_MAX) {
|
||||
_debug("reschedule reaper %ld", (long) earliest - now);
|
||||
ASSERTCMP(earliest, >, now);
|
||||
rxrpc_queue_delayed_work(&rxrpc_connection_reap,
|
||||
(earliest - now) * HZ);
|
||||
}
|
||||
|
||||
/* then destroy all those pulled out */
|
||||
while (!list_empty(&graveyard)) {
|
||||
conn = list_entry(graveyard.next, struct rxrpc_connection,
|
||||
link);
|
||||
list_del_init(&conn->link);
|
||||
|
||||
ASSERTCMP(atomic_read(&conn->usage), ==, 0);
|
||||
rxrpc_destroy_connection(conn);
|
||||
}
|
||||
|
||||
_leave("");
|
||||
}
|
||||
|
||||
/*
|
||||
* preemptively destroy all the connection records rather than waiting for them
|
||||
* to time out
|
||||
*/
|
||||
void __exit rxrpc_destroy_all_connections(void)
|
||||
{
|
||||
_enter("");
|
||||
|
||||
rxrpc_connection_expiry = 0;
|
||||
cancel_delayed_work(&rxrpc_connection_reap);
|
||||
rxrpc_queue_delayed_work(&rxrpc_connection_reap, 0);
|
||||
|
||||
_leave("");
|
||||
}
|
405
net/rxrpc/ar-connevent.c
Normal file
405
net/rxrpc/ar-connevent.c
Normal file
|
@ -0,0 +1,405 @@
|
|||
/* connection-level event handling
|
||||
*
|
||||
* Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
|
||||
* Written by David Howells (dhowells@redhat.com)
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version
|
||||
* 2 of the License, or (at your option) any later version.
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/net.h>
|
||||
#include <linux/skbuff.h>
|
||||
#include <linux/errqueue.h>
|
||||
#include <linux/udp.h>
|
||||
#include <linux/in.h>
|
||||
#include <linux/in6.h>
|
||||
#include <linux/icmp.h>
|
||||
#include <net/sock.h>
|
||||
#include <net/af_rxrpc.h>
|
||||
#include <net/ip.h>
|
||||
#include "ar-internal.h"
|
||||
|
||||
/*
|
||||
* pass a connection-level abort onto all calls on that connection
|
||||
*/
|
||||
static void rxrpc_abort_calls(struct rxrpc_connection *conn, int state,
|
||||
u32 abort_code)
|
||||
{
|
||||
struct rxrpc_call *call;
|
||||
struct rb_node *p;
|
||||
|
||||
_enter("{%d},%x", conn->debug_id, abort_code);
|
||||
|
||||
read_lock_bh(&conn->lock);
|
||||
|
||||
for (p = rb_first(&conn->calls); p; p = rb_next(p)) {
|
||||
call = rb_entry(p, struct rxrpc_call, conn_node);
|
||||
write_lock(&call->state_lock);
|
||||
if (call->state <= RXRPC_CALL_COMPLETE) {
|
||||
call->state = state;
|
||||
call->abort_code = abort_code;
|
||||
if (state == RXRPC_CALL_LOCALLY_ABORTED)
|
||||
set_bit(RXRPC_CALL_CONN_ABORT, &call->events);
|
||||
else
|
||||
set_bit(RXRPC_CALL_RCVD_ABORT, &call->events);
|
||||
rxrpc_queue_call(call);
|
||||
}
|
||||
write_unlock(&call->state_lock);
|
||||
}
|
||||
|
||||
read_unlock_bh(&conn->lock);
|
||||
_leave("");
|
||||
}
|
||||
|
||||
/*
|
||||
* generate a connection-level abort
|
||||
*/
|
||||
static int rxrpc_abort_connection(struct rxrpc_connection *conn,
|
||||
u32 error, u32 abort_code)
|
||||
{
|
||||
struct rxrpc_header hdr;
|
||||
struct msghdr msg;
|
||||
struct kvec iov[2];
|
||||
__be32 word;
|
||||
size_t len;
|
||||
int ret;
|
||||
|
||||
_enter("%d,,%u,%u", conn->debug_id, error, abort_code);
|
||||
|
||||
/* generate a connection-level abort */
|
||||
spin_lock_bh(&conn->state_lock);
|
||||
if (conn->state < RXRPC_CONN_REMOTELY_ABORTED) {
|
||||
conn->state = RXRPC_CONN_LOCALLY_ABORTED;
|
||||
conn->error = error;
|
||||
spin_unlock_bh(&conn->state_lock);
|
||||
} else {
|
||||
spin_unlock_bh(&conn->state_lock);
|
||||
_leave(" = 0 [already dead]");
|
||||
return 0;
|
||||
}
|
||||
|
||||
rxrpc_abort_calls(conn, RXRPC_CALL_LOCALLY_ABORTED, abort_code);
|
||||
|
||||
msg.msg_name = &conn->trans->peer->srx.transport.sin;
|
||||
msg.msg_namelen = sizeof(conn->trans->peer->srx.transport.sin);
|
||||
msg.msg_control = NULL;
|
||||
msg.msg_controllen = 0;
|
||||
msg.msg_flags = 0;
|
||||
|
||||
hdr.epoch = conn->epoch;
|
||||
hdr.cid = conn->cid;
|
||||
hdr.callNumber = 0;
|
||||
hdr.seq = 0;
|
||||
hdr.type = RXRPC_PACKET_TYPE_ABORT;
|
||||
hdr.flags = conn->out_clientflag;
|
||||
hdr.userStatus = 0;
|
||||
hdr.securityIndex = conn->security_ix;
|
||||
hdr._rsvd = 0;
|
||||
hdr.serviceId = conn->service_id;
|
||||
|
||||
word = htonl(abort_code);
|
||||
|
||||
iov[0].iov_base = &hdr;
|
||||
iov[0].iov_len = sizeof(hdr);
|
||||
iov[1].iov_base = &word;
|
||||
iov[1].iov_len = sizeof(word);
|
||||
|
||||
len = iov[0].iov_len + iov[1].iov_len;
|
||||
|
||||
hdr.serial = htonl(atomic_inc_return(&conn->serial));
|
||||
_proto("Tx CONN ABORT %%%u { %d }", ntohl(hdr.serial), abort_code);
|
||||
|
||||
ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 2, len);
|
||||
if (ret < 0) {
|
||||
_debug("sendmsg failed: %d", ret);
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
||||
_leave(" = 0");
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* mark a call as being on a now-secured channel
|
||||
* - must be called with softirqs disabled
|
||||
*/
|
||||
static void rxrpc_call_is_secure(struct rxrpc_call *call)
|
||||
{
|
||||
_enter("%p", call);
|
||||
if (call) {
|
||||
read_lock(&call->state_lock);
|
||||
if (call->state < RXRPC_CALL_COMPLETE &&
|
||||
!test_and_set_bit(RXRPC_CALL_SECURED, &call->events))
|
||||
rxrpc_queue_call(call);
|
||||
read_unlock(&call->state_lock);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* connection-level Rx packet processor
|
||||
*/
|
||||
static int rxrpc_process_event(struct rxrpc_connection *conn,
|
||||
struct sk_buff *skb,
|
||||
u32 *_abort_code)
|
||||
{
|
||||
struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
|
||||
__be32 tmp;
|
||||
u32 serial;
|
||||
int loop, ret;
|
||||
|
||||
if (conn->state >= RXRPC_CONN_REMOTELY_ABORTED) {
|
||||
kleave(" = -ECONNABORTED [%u]", conn->state);
|
||||
return -ECONNABORTED;
|
||||
}
|
||||
|
||||
serial = ntohl(sp->hdr.serial);
|
||||
|
||||
_enter("{%d},{%u,%%%u},", conn->debug_id, sp->hdr.type, serial);
|
||||
|
||||
switch (sp->hdr.type) {
|
||||
case RXRPC_PACKET_TYPE_ABORT:
|
||||
if (skb_copy_bits(skb, 0, &tmp, sizeof(tmp)) < 0)
|
||||
return -EPROTO;
|
||||
_proto("Rx ABORT %%%u { ac=%d }", serial, ntohl(tmp));
|
||||
|
||||
conn->state = RXRPC_CONN_REMOTELY_ABORTED;
|
||||
rxrpc_abort_calls(conn, RXRPC_CALL_REMOTELY_ABORTED,
|
||||
ntohl(tmp));
|
||||
return -ECONNABORTED;
|
||||
|
||||
case RXRPC_PACKET_TYPE_CHALLENGE:
|
||||
if (conn->security)
|
||||
return conn->security->respond_to_challenge(
|
||||
conn, skb, _abort_code);
|
||||
return -EPROTO;
|
||||
|
||||
case RXRPC_PACKET_TYPE_RESPONSE:
|
||||
if (!conn->security)
|
||||
return -EPROTO;
|
||||
|
||||
ret = conn->security->verify_response(conn, skb, _abort_code);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
ret = conn->security->init_connection_security(conn);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
conn->security->prime_packet_security(conn);
|
||||
read_lock_bh(&conn->lock);
|
||||
spin_lock(&conn->state_lock);
|
||||
|
||||
if (conn->state == RXRPC_CONN_SERVER_CHALLENGING) {
|
||||
conn->state = RXRPC_CONN_SERVER;
|
||||
for (loop = 0; loop < RXRPC_MAXCALLS; loop++)
|
||||
rxrpc_call_is_secure(conn->channels[loop]);
|
||||
}
|
||||
|
||||
spin_unlock(&conn->state_lock);
|
||||
read_unlock_bh(&conn->lock);
|
||||
return 0;
|
||||
|
||||
default:
|
||||
_leave(" = -EPROTO [%u]", sp->hdr.type);
|
||||
return -EPROTO;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* set up security and issue a challenge
|
||||
*/
|
||||
static void rxrpc_secure_connection(struct rxrpc_connection *conn)
|
||||
{
|
||||
u32 abort_code;
|
||||
int ret;
|
||||
|
||||
_enter("{%d}", conn->debug_id);
|
||||
|
||||
ASSERT(conn->security_ix != 0);
|
||||
|
||||
if (!conn->key) {
|
||||
_debug("set up security");
|
||||
ret = rxrpc_init_server_conn_security(conn);
|
||||
switch (ret) {
|
||||
case 0:
|
||||
break;
|
||||
case -ENOENT:
|
||||
abort_code = RX_CALL_DEAD;
|
||||
goto abort;
|
||||
default:
|
||||
abort_code = RXKADNOAUTH;
|
||||
goto abort;
|
||||
}
|
||||
}
|
||||
|
||||
ASSERT(conn->security != NULL);
|
||||
|
||||
if (conn->security->issue_challenge(conn) < 0) {
|
||||
abort_code = RX_CALL_DEAD;
|
||||
ret = -ENOMEM;
|
||||
goto abort;
|
||||
}
|
||||
|
||||
_leave("");
|
||||
return;
|
||||
|
||||
abort:
|
||||
_debug("abort %d, %d", ret, abort_code);
|
||||
rxrpc_abort_connection(conn, -ret, abort_code);
|
||||
_leave(" [aborted]");
|
||||
}
|
||||
|
||||
/*
|
||||
* connection-level event processor
|
||||
*/
|
||||
void rxrpc_process_connection(struct work_struct *work)
|
||||
{
|
||||
struct rxrpc_connection *conn =
|
||||
container_of(work, struct rxrpc_connection, processor);
|
||||
struct sk_buff *skb;
|
||||
u32 abort_code = RX_PROTOCOL_ERROR;
|
||||
int ret;
|
||||
|
||||
_enter("{%d}", conn->debug_id);
|
||||
|
||||
atomic_inc(&conn->usage);
|
||||
|
||||
if (test_and_clear_bit(RXRPC_CONN_CHALLENGE, &conn->events)) {
|
||||
rxrpc_secure_connection(conn);
|
||||
rxrpc_put_connection(conn);
|
||||
}
|
||||
|
||||
/* go through the conn-level event packets, releasing the ref on this
|
||||
* connection that each one has when we've finished with it */
|
||||
while ((skb = skb_dequeue(&conn->rx_queue))) {
|
||||
ret = rxrpc_process_event(conn, skb, &abort_code);
|
||||
switch (ret) {
|
||||
case -EPROTO:
|
||||
case -EKEYEXPIRED:
|
||||
case -EKEYREJECTED:
|
||||
goto protocol_error;
|
||||
case -EAGAIN:
|
||||
goto requeue_and_leave;
|
||||
case -ECONNABORTED:
|
||||
default:
|
||||
rxrpc_put_connection(conn);
|
||||
rxrpc_free_skb(skb);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
out:
|
||||
rxrpc_put_connection(conn);
|
||||
_leave("");
|
||||
return;
|
||||
|
||||
requeue_and_leave:
|
||||
skb_queue_head(&conn->rx_queue, skb);
|
||||
goto out;
|
||||
|
||||
protocol_error:
|
||||
if (rxrpc_abort_connection(conn, -ret, abort_code) < 0)
|
||||
goto requeue_and_leave;
|
||||
rxrpc_put_connection(conn);
|
||||
rxrpc_free_skb(skb);
|
||||
_leave(" [EPROTO]");
|
||||
goto out;
|
||||
}
|
||||
|
||||
/*
|
||||
* put a packet up for transport-level abort
|
||||
*/
|
||||
void rxrpc_reject_packet(struct rxrpc_local *local, struct sk_buff *skb)
|
||||
{
|
||||
CHECK_SLAB_OKAY(&local->usage);
|
||||
|
||||
if (!atomic_inc_not_zero(&local->usage)) {
|
||||
printk("resurrected on reject\n");
|
||||
BUG();
|
||||
}
|
||||
|
||||
skb_queue_tail(&local->reject_queue, skb);
|
||||
rxrpc_queue_work(&local->rejecter);
|
||||
}
|
||||
|
||||
/*
|
||||
* reject packets through the local endpoint
|
||||
*/
|
||||
void rxrpc_reject_packets(struct work_struct *work)
|
||||
{
|
||||
union {
|
||||
struct sockaddr sa;
|
||||
struct sockaddr_in sin;
|
||||
} sa;
|
||||
struct rxrpc_skb_priv *sp;
|
||||
struct rxrpc_header hdr;
|
||||
struct rxrpc_local *local;
|
||||
struct sk_buff *skb;
|
||||
struct msghdr msg;
|
||||
struct kvec iov[2];
|
||||
size_t size;
|
||||
__be32 code;
|
||||
|
||||
local = container_of(work, struct rxrpc_local, rejecter);
|
||||
rxrpc_get_local(local);
|
||||
|
||||
_enter("%d", local->debug_id);
|
||||
|
||||
iov[0].iov_base = &hdr;
|
||||
iov[0].iov_len = sizeof(hdr);
|
||||
iov[1].iov_base = &code;
|
||||
iov[1].iov_len = sizeof(code);
|
||||
size = sizeof(hdr) + sizeof(code);
|
||||
|
||||
msg.msg_name = &sa;
|
||||
msg.msg_control = NULL;
|
||||
msg.msg_controllen = 0;
|
||||
msg.msg_flags = 0;
|
||||
|
||||
memset(&sa, 0, sizeof(sa));
|
||||
sa.sa.sa_family = local->srx.transport.family;
|
||||
switch (sa.sa.sa_family) {
|
||||
case AF_INET:
|
||||
msg.msg_namelen = sizeof(sa.sin);
|
||||
break;
|
||||
default:
|
||||
msg.msg_namelen = 0;
|
||||
break;
|
||||
}
|
||||
|
||||
memset(&hdr, 0, sizeof(hdr));
|
||||
hdr.type = RXRPC_PACKET_TYPE_ABORT;
|
||||
|
||||
while ((skb = skb_dequeue(&local->reject_queue))) {
|
||||
sp = rxrpc_skb(skb);
|
||||
switch (sa.sa.sa_family) {
|
||||
case AF_INET:
|
||||
sa.sin.sin_port = udp_hdr(skb)->source;
|
||||
sa.sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
|
||||
code = htonl(skb->priority);
|
||||
|
||||
hdr.epoch = sp->hdr.epoch;
|
||||
hdr.cid = sp->hdr.cid;
|
||||
hdr.callNumber = sp->hdr.callNumber;
|
||||
hdr.serviceId = sp->hdr.serviceId;
|
||||
hdr.flags = sp->hdr.flags;
|
||||
hdr.flags ^= RXRPC_CLIENT_INITIATED;
|
||||
hdr.flags &= RXRPC_CLIENT_INITIATED;
|
||||
|
||||
kernel_sendmsg(local->socket, &msg, iov, 2, size);
|
||||
break;
|
||||
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
rxrpc_free_skb(skb);
|
||||
rxrpc_put_local(local);
|
||||
}
|
||||
|
||||
rxrpc_put_local(local);
|
||||
_leave("");
|
||||
}
|
236
net/rxrpc/ar-error.c
Normal file
236
net/rxrpc/ar-error.c
Normal file
|
@ -0,0 +1,236 @@
|
|||
/* Error message handling (ICMP)
|
||||
*
|
||||
* Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
|
||||
* Written by David Howells (dhowells@redhat.com)
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version
|
||||
* 2 of the License, or (at your option) any later version.
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/net.h>
|
||||
#include <linux/skbuff.h>
|
||||
#include <linux/errqueue.h>
|
||||
#include <linux/udp.h>
|
||||
#include <linux/in.h>
|
||||
#include <linux/in6.h>
|
||||
#include <linux/icmp.h>
|
||||
#include <net/sock.h>
|
||||
#include <net/af_rxrpc.h>
|
||||
#include <net/ip.h>
|
||||
#include "ar-internal.h"
|
||||
|
||||
/*
|
||||
* handle an error received on the local endpoint
|
||||
*/
|
||||
void rxrpc_UDP_error_report(struct sock *sk)
|
||||
{
|
||||
struct sock_exterr_skb *serr;
|
||||
struct rxrpc_transport *trans;
|
||||
struct rxrpc_local *local = sk->sk_user_data;
|
||||
struct rxrpc_peer *peer;
|
||||
struct sk_buff *skb;
|
||||
__be32 addr;
|
||||
__be16 port;
|
||||
|
||||
_enter("%p{%d}", sk, local->debug_id);
|
||||
|
||||
skb = sock_dequeue_err_skb(sk);
|
||||
if (!skb) {
|
||||
_leave("UDP socket errqueue empty");
|
||||
return;
|
||||
}
|
||||
|
||||
rxrpc_new_skb(skb);
|
||||
|
||||
serr = SKB_EXT_ERR(skb);
|
||||
addr = *(__be32 *)(skb_network_header(skb) + serr->addr_offset);
|
||||
port = serr->port;
|
||||
|
||||
_net("Rx UDP Error from %pI4:%hu", &addr, ntohs(port));
|
||||
_debug("Msg l:%d d:%d", skb->len, skb->data_len);
|
||||
|
||||
peer = rxrpc_find_peer(local, addr, port);
|
||||
if (IS_ERR(peer)) {
|
||||
rxrpc_free_skb(skb);
|
||||
_leave(" [no peer]");
|
||||
return;
|
||||
}
|
||||
|
||||
trans = rxrpc_find_transport(local, peer);
|
||||
if (!trans) {
|
||||
rxrpc_put_peer(peer);
|
||||
rxrpc_free_skb(skb);
|
||||
_leave(" [no trans]");
|
||||
return;
|
||||
}
|
||||
|
||||
if (serr->ee.ee_origin == SO_EE_ORIGIN_ICMP &&
|
||||
serr->ee.ee_type == ICMP_DEST_UNREACH &&
|
||||
serr->ee.ee_code == ICMP_FRAG_NEEDED
|
||||
) {
|
||||
u32 mtu = serr->ee.ee_info;
|
||||
|
||||
_net("Rx Received ICMP Fragmentation Needed (%d)", mtu);
|
||||
|
||||
/* wind down the local interface MTU */
|
||||
if (mtu > 0 && peer->if_mtu == 65535 && mtu < peer->if_mtu) {
|
||||
peer->if_mtu = mtu;
|
||||
_net("I/F MTU %u", mtu);
|
||||
}
|
||||
|
||||
if (mtu == 0) {
|
||||
/* they didn't give us a size, estimate one */
|
||||
mtu = peer->if_mtu;
|
||||
if (mtu > 1500) {
|
||||
mtu >>= 1;
|
||||
if (mtu < 1500)
|
||||
mtu = 1500;
|
||||
} else {
|
||||
mtu -= 100;
|
||||
if (mtu < peer->hdrsize)
|
||||
mtu = peer->hdrsize + 4;
|
||||
}
|
||||
}
|
||||
|
||||
if (mtu < peer->mtu) {
|
||||
spin_lock_bh(&peer->lock);
|
||||
peer->mtu = mtu;
|
||||
peer->maxdata = peer->mtu - peer->hdrsize;
|
||||
spin_unlock_bh(&peer->lock);
|
||||
_net("Net MTU %u (maxdata %u)",
|
||||
peer->mtu, peer->maxdata);
|
||||
}
|
||||
}
|
||||
|
||||
rxrpc_put_peer(peer);
|
||||
|
||||
/* pass the transport ref to error_handler to release */
|
||||
skb_queue_tail(&trans->error_queue, skb);
|
||||
rxrpc_queue_work(&trans->error_handler);
|
||||
|
||||
_leave("");
|
||||
}
|
||||
|
||||
/*
|
||||
* deal with UDP error messages
|
||||
*/
|
||||
void rxrpc_UDP_error_handler(struct work_struct *work)
|
||||
{
|
||||
struct sock_extended_err *ee;
|
||||
struct sock_exterr_skb *serr;
|
||||
struct rxrpc_transport *trans =
|
||||
container_of(work, struct rxrpc_transport, error_handler);
|
||||
struct sk_buff *skb;
|
||||
int err;
|
||||
|
||||
_enter("");
|
||||
|
||||
skb = skb_dequeue(&trans->error_queue);
|
||||
if (!skb)
|
||||
return;
|
||||
|
||||
serr = SKB_EXT_ERR(skb);
|
||||
ee = &serr->ee;
|
||||
|
||||
_net("Rx Error o=%d t=%d c=%d e=%d",
|
||||
ee->ee_origin, ee->ee_type, ee->ee_code, ee->ee_errno);
|
||||
|
||||
err = ee->ee_errno;
|
||||
|
||||
switch (ee->ee_origin) {
|
||||
case SO_EE_ORIGIN_ICMP:
|
||||
switch (ee->ee_type) {
|
||||
case ICMP_DEST_UNREACH:
|
||||
switch (ee->ee_code) {
|
||||
case ICMP_NET_UNREACH:
|
||||
_net("Rx Received ICMP Network Unreachable");
|
||||
err = ENETUNREACH;
|
||||
break;
|
||||
case ICMP_HOST_UNREACH:
|
||||
_net("Rx Received ICMP Host Unreachable");
|
||||
err = EHOSTUNREACH;
|
||||
break;
|
||||
case ICMP_PORT_UNREACH:
|
||||
_net("Rx Received ICMP Port Unreachable");
|
||||
err = ECONNREFUSED;
|
||||
break;
|
||||
case ICMP_FRAG_NEEDED:
|
||||
_net("Rx Received ICMP Fragmentation Needed (%d)",
|
||||
ee->ee_info);
|
||||
err = 0; /* dealt with elsewhere */
|
||||
break;
|
||||
case ICMP_NET_UNKNOWN:
|
||||
_net("Rx Received ICMP Unknown Network");
|
||||
err = ENETUNREACH;
|
||||
break;
|
||||
case ICMP_HOST_UNKNOWN:
|
||||
_net("Rx Received ICMP Unknown Host");
|
||||
err = EHOSTUNREACH;
|
||||
break;
|
||||
default:
|
||||
_net("Rx Received ICMP DestUnreach code=%u",
|
||||
ee->ee_code);
|
||||
break;
|
||||
}
|
||||
break;
|
||||
|
||||
case ICMP_TIME_EXCEEDED:
|
||||
_net("Rx Received ICMP TTL Exceeded");
|
||||
break;
|
||||
|
||||
default:
|
||||
_proto("Rx Received ICMP error { type=%u code=%u }",
|
||||
ee->ee_type, ee->ee_code);
|
||||
break;
|
||||
}
|
||||
break;
|
||||
|
||||
case SO_EE_ORIGIN_LOCAL:
|
||||
_proto("Rx Received local error { error=%d }",
|
||||
ee->ee_errno);
|
||||
break;
|
||||
|
||||
case SO_EE_ORIGIN_NONE:
|
||||
case SO_EE_ORIGIN_ICMP6:
|
||||
default:
|
||||
_proto("Rx Received error report { orig=%u }",
|
||||
ee->ee_origin);
|
||||
break;
|
||||
}
|
||||
|
||||
/* terminate all the affected calls if there's an unrecoverable
|
||||
* error */
|
||||
if (err) {
|
||||
struct rxrpc_call *call, *_n;
|
||||
|
||||
_debug("ISSUE ERROR %d", err);
|
||||
|
||||
spin_lock_bh(&trans->peer->lock);
|
||||
trans->peer->net_error = err;
|
||||
|
||||
list_for_each_entry_safe(call, _n, &trans->peer->error_targets,
|
||||
error_link) {
|
||||
write_lock(&call->state_lock);
|
||||
if (call->state != RXRPC_CALL_COMPLETE &&
|
||||
call->state < RXRPC_CALL_NETWORK_ERROR) {
|
||||
call->state = RXRPC_CALL_NETWORK_ERROR;
|
||||
set_bit(RXRPC_CALL_RCVD_ERROR, &call->events);
|
||||
rxrpc_queue_call(call);
|
||||
}
|
||||
write_unlock(&call->state_lock);
|
||||
list_del_init(&call->error_link);
|
||||
}
|
||||
|
||||
spin_unlock_bh(&trans->peer->lock);
|
||||
}
|
||||
|
||||
if (!skb_queue_empty(&trans->error_queue))
|
||||
rxrpc_queue_work(&trans->error_handler);
|
||||
|
||||
rxrpc_free_skb(skb);
|
||||
rxrpc_put_transport(trans);
|
||||
_leave("");
|
||||
}
|
767
net/rxrpc/ar-input.c
Normal file
767
net/rxrpc/ar-input.c
Normal file
|
@ -0,0 +1,767 @@
|
|||
/* RxRPC packet reception
|
||||
*
|
||||
* Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
|
||||
* Written by David Howells (dhowells@redhat.com)
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version
|
||||
* 2 of the License, or (at your option) any later version.
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/net.h>
|
||||
#include <linux/skbuff.h>
|
||||
#include <linux/errqueue.h>
|
||||
#include <linux/udp.h>
|
||||
#include <linux/in.h>
|
||||
#include <linux/in6.h>
|
||||
#include <linux/icmp.h>
|
||||
#include <linux/gfp.h>
|
||||
#include <net/sock.h>
|
||||
#include <net/af_rxrpc.h>
|
||||
#include <net/ip.h>
|
||||
#include <net/udp.h>
|
||||
#include <net/net_namespace.h>
|
||||
#include "ar-internal.h"
|
||||
|
||||
const char *rxrpc_pkts[] = {
|
||||
"?00",
|
||||
"DATA", "ACK", "BUSY", "ABORT", "ACKALL", "CHALL", "RESP", "DEBUG",
|
||||
"?09", "?10", "?11", "?12", "?13", "?14", "?15"
|
||||
};
|
||||
|
||||
/*
|
||||
* queue a packet for recvmsg to pass to userspace
|
||||
* - the caller must hold a lock on call->lock
|
||||
* - must not be called with interrupts disabled (sk_filter() disables BH's)
|
||||
* - eats the packet whether successful or not
|
||||
* - there must be just one reference to the packet, which the caller passes to
|
||||
* this function
|
||||
*/
|
||||
int rxrpc_queue_rcv_skb(struct rxrpc_call *call, struct sk_buff *skb,
|
||||
bool force, bool terminal)
|
||||
{
|
||||
struct rxrpc_skb_priv *sp;
|
||||
struct rxrpc_sock *rx = call->socket;
|
||||
struct sock *sk;
|
||||
int ret;
|
||||
|
||||
_enter(",,%d,%d", force, terminal);
|
||||
|
||||
ASSERT(!irqs_disabled());
|
||||
|
||||
sp = rxrpc_skb(skb);
|
||||
ASSERTCMP(sp->call, ==, call);
|
||||
|
||||
/* if we've already posted the terminal message for a call, then we
|
||||
* don't post any more */
|
||||
if (test_bit(RXRPC_CALL_TERMINAL_MSG, &call->flags)) {
|
||||
_debug("already terminated");
|
||||
ASSERTCMP(call->state, >=, RXRPC_CALL_COMPLETE);
|
||||
skb->destructor = NULL;
|
||||
sp->call = NULL;
|
||||
rxrpc_put_call(call);
|
||||
rxrpc_free_skb(skb);
|
||||
return 0;
|
||||
}
|
||||
|
||||
sk = &rx->sk;
|
||||
|
||||
if (!force) {
|
||||
/* cast skb->rcvbuf to unsigned... It's pointless, but
|
||||
* reduces number of warnings when compiling with -W
|
||||
* --ANK */
|
||||
// ret = -ENOBUFS;
|
||||
// if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
|
||||
// (unsigned int) sk->sk_rcvbuf)
|
||||
// goto out;
|
||||
|
||||
ret = sk_filter(sk, skb);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
}
|
||||
|
||||
spin_lock_bh(&sk->sk_receive_queue.lock);
|
||||
if (!test_bit(RXRPC_CALL_TERMINAL_MSG, &call->flags) &&
|
||||
!test_bit(RXRPC_CALL_RELEASED, &call->flags) &&
|
||||
call->socket->sk.sk_state != RXRPC_CLOSE) {
|
||||
skb->destructor = rxrpc_packet_destructor;
|
||||
skb->dev = NULL;
|
||||
skb->sk = sk;
|
||||
atomic_add(skb->truesize, &sk->sk_rmem_alloc);
|
||||
|
||||
if (terminal) {
|
||||
_debug("<<<< TERMINAL MESSAGE >>>>");
|
||||
set_bit(RXRPC_CALL_TERMINAL_MSG, &call->flags);
|
||||
}
|
||||
|
||||
/* allow interception by a kernel service */
|
||||
if (rx->interceptor) {
|
||||
rx->interceptor(sk, call->user_call_ID, skb);
|
||||
spin_unlock_bh(&sk->sk_receive_queue.lock);
|
||||
} else {
|
||||
_net("post skb %p", skb);
|
||||
__skb_queue_tail(&sk->sk_receive_queue, skb);
|
||||
spin_unlock_bh(&sk->sk_receive_queue.lock);
|
||||
|
||||
if (!sock_flag(sk, SOCK_DEAD))
|
||||
sk->sk_data_ready(sk);
|
||||
}
|
||||
skb = NULL;
|
||||
} else {
|
||||
spin_unlock_bh(&sk->sk_receive_queue.lock);
|
||||
}
|
||||
ret = 0;
|
||||
|
||||
out:
|
||||
/* release the socket buffer */
|
||||
if (skb) {
|
||||
skb->destructor = NULL;
|
||||
sp->call = NULL;
|
||||
rxrpc_put_call(call);
|
||||
rxrpc_free_skb(skb);
|
||||
}
|
||||
|
||||
_leave(" = %d", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* process a DATA packet, posting the packet to the appropriate queue
|
||||
* - eats the packet if successful
|
||||
*/
|
||||
static int rxrpc_fast_process_data(struct rxrpc_call *call,
|
||||
struct sk_buff *skb, u32 seq)
|
||||
{
|
||||
struct rxrpc_skb_priv *sp;
|
||||
bool terminal;
|
||||
int ret, ackbit, ack;
|
||||
|
||||
_enter("{%u,%u},,{%u}", call->rx_data_post, call->rx_first_oos, seq);
|
||||
|
||||
sp = rxrpc_skb(skb);
|
||||
ASSERTCMP(sp->call, ==, NULL);
|
||||
|
||||
spin_lock(&call->lock);
|
||||
|
||||
if (call->state > RXRPC_CALL_COMPLETE)
|
||||
goto discard;
|
||||
|
||||
ASSERTCMP(call->rx_data_expect, >=, call->rx_data_post);
|
||||
ASSERTCMP(call->rx_data_post, >=, call->rx_data_recv);
|
||||
ASSERTCMP(call->rx_data_recv, >=, call->rx_data_eaten);
|
||||
|
||||
if (seq < call->rx_data_post) {
|
||||
_debug("dup #%u [-%u]", seq, call->rx_data_post);
|
||||
ack = RXRPC_ACK_DUPLICATE;
|
||||
ret = -ENOBUFS;
|
||||
goto discard_and_ack;
|
||||
}
|
||||
|
||||
/* we may already have the packet in the out of sequence queue */
|
||||
ackbit = seq - (call->rx_data_eaten + 1);
|
||||
ASSERTCMP(ackbit, >=, 0);
|
||||
if (__test_and_set_bit(ackbit, call->ackr_window)) {
|
||||
_debug("dup oos #%u [%u,%u]",
|
||||
seq, call->rx_data_eaten, call->rx_data_post);
|
||||
ack = RXRPC_ACK_DUPLICATE;
|
||||
goto discard_and_ack;
|
||||
}
|
||||
|
||||
if (seq >= call->ackr_win_top) {
|
||||
_debug("exceed #%u [%u]", seq, call->ackr_win_top);
|
||||
__clear_bit(ackbit, call->ackr_window);
|
||||
ack = RXRPC_ACK_EXCEEDS_WINDOW;
|
||||
goto discard_and_ack;
|
||||
}
|
||||
|
||||
if (seq == call->rx_data_expect) {
|
||||
clear_bit(RXRPC_CALL_EXPECT_OOS, &call->flags);
|
||||
call->rx_data_expect++;
|
||||
} else if (seq > call->rx_data_expect) {
|
||||
_debug("oos #%u [%u]", seq, call->rx_data_expect);
|
||||
call->rx_data_expect = seq + 1;
|
||||
if (test_and_set_bit(RXRPC_CALL_EXPECT_OOS, &call->flags)) {
|
||||
ack = RXRPC_ACK_OUT_OF_SEQUENCE;
|
||||
goto enqueue_and_ack;
|
||||
}
|
||||
goto enqueue_packet;
|
||||
}
|
||||
|
||||
if (seq != call->rx_data_post) {
|
||||
_debug("ahead #%u [%u]", seq, call->rx_data_post);
|
||||
goto enqueue_packet;
|
||||
}
|
||||
|
||||
if (test_bit(RXRPC_CALL_RCVD_LAST, &call->flags))
|
||||
goto protocol_error;
|
||||
|
||||
/* if the packet need security things doing to it, then it goes down
|
||||
* the slow path */
|
||||
if (call->conn->security)
|
||||
goto enqueue_packet;
|
||||
|
||||
sp->call = call;
|
||||
rxrpc_get_call(call);
|
||||
terminal = ((sp->hdr.flags & RXRPC_LAST_PACKET) &&
|
||||
!(sp->hdr.flags & RXRPC_CLIENT_INITIATED));
|
||||
ret = rxrpc_queue_rcv_skb(call, skb, false, terminal);
|
||||
if (ret < 0) {
|
||||
if (ret == -ENOMEM || ret == -ENOBUFS) {
|
||||
__clear_bit(ackbit, call->ackr_window);
|
||||
ack = RXRPC_ACK_NOSPACE;
|
||||
goto discard_and_ack;
|
||||
}
|
||||
goto out;
|
||||
}
|
||||
|
||||
skb = NULL;
|
||||
|
||||
_debug("post #%u", seq);
|
||||
ASSERTCMP(call->rx_data_post, ==, seq);
|
||||
call->rx_data_post++;
|
||||
|
||||
if (sp->hdr.flags & RXRPC_LAST_PACKET)
|
||||
set_bit(RXRPC_CALL_RCVD_LAST, &call->flags);
|
||||
|
||||
/* if we've reached an out of sequence packet then we need to drain
|
||||
* that queue into the socket Rx queue now */
|
||||
if (call->rx_data_post == call->rx_first_oos) {
|
||||
_debug("drain rx oos now");
|
||||
read_lock(&call->state_lock);
|
||||
if (call->state < RXRPC_CALL_COMPLETE &&
|
||||
!test_and_set_bit(RXRPC_CALL_DRAIN_RX_OOS, &call->events))
|
||||
rxrpc_queue_call(call);
|
||||
read_unlock(&call->state_lock);
|
||||
}
|
||||
|
||||
spin_unlock(&call->lock);
|
||||
atomic_inc(&call->ackr_not_idle);
|
||||
rxrpc_propose_ACK(call, RXRPC_ACK_DELAY, sp->hdr.serial, false);
|
||||
_leave(" = 0 [posted]");
|
||||
return 0;
|
||||
|
||||
protocol_error:
|
||||
ret = -EBADMSG;
|
||||
out:
|
||||
spin_unlock(&call->lock);
|
||||
_leave(" = %d", ret);
|
||||
return ret;
|
||||
|
||||
discard_and_ack:
|
||||
_debug("discard and ACK packet %p", skb);
|
||||
__rxrpc_propose_ACK(call, ack, sp->hdr.serial, true);
|
||||
discard:
|
||||
spin_unlock(&call->lock);
|
||||
rxrpc_free_skb(skb);
|
||||
_leave(" = 0 [discarded]");
|
||||
return 0;
|
||||
|
||||
enqueue_and_ack:
|
||||
__rxrpc_propose_ACK(call, ack, sp->hdr.serial, true);
|
||||
enqueue_packet:
|
||||
_net("defer skb %p", skb);
|
||||
spin_unlock(&call->lock);
|
||||
skb_queue_tail(&call->rx_queue, skb);
|
||||
atomic_inc(&call->ackr_not_idle);
|
||||
read_lock(&call->state_lock);
|
||||
if (call->state < RXRPC_CALL_DEAD)
|
||||
rxrpc_queue_call(call);
|
||||
read_unlock(&call->state_lock);
|
||||
_leave(" = 0 [queued]");
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* assume an implicit ACKALL of the transmission phase of a client socket upon
|
||||
* reception of the first reply packet
|
||||
*/
|
||||
static void rxrpc_assume_implicit_ackall(struct rxrpc_call *call, u32 serial)
|
||||
{
|
||||
write_lock_bh(&call->state_lock);
|
||||
|
||||
switch (call->state) {
|
||||
case RXRPC_CALL_CLIENT_AWAIT_REPLY:
|
||||
call->state = RXRPC_CALL_CLIENT_RECV_REPLY;
|
||||
call->acks_latest = serial;
|
||||
|
||||
_debug("implicit ACKALL %%%u", call->acks_latest);
|
||||
set_bit(RXRPC_CALL_RCVD_ACKALL, &call->events);
|
||||
write_unlock_bh(&call->state_lock);
|
||||
|
||||
if (try_to_del_timer_sync(&call->resend_timer) >= 0) {
|
||||
clear_bit(RXRPC_CALL_RESEND_TIMER, &call->events);
|
||||
clear_bit(RXRPC_CALL_RESEND, &call->events);
|
||||
clear_bit(RXRPC_CALL_RUN_RTIMER, &call->flags);
|
||||
}
|
||||
break;
|
||||
|
||||
default:
|
||||
write_unlock_bh(&call->state_lock);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* post an incoming packet to the nominated call to deal with
|
||||
* - must get rid of the sk_buff, either by freeing it or by queuing it
|
||||
*/
|
||||
void rxrpc_fast_process_packet(struct rxrpc_call *call, struct sk_buff *skb)
|
||||
{
|
||||
struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
|
||||
__be32 _abort_code;
|
||||
u32 serial, hi_serial, seq, abort_code;
|
||||
|
||||
_enter("%p,%p", call, skb);
|
||||
|
||||
ASSERT(!irqs_disabled());
|
||||
|
||||
#if 0 // INJECT RX ERROR
|
||||
if (sp->hdr.type == RXRPC_PACKET_TYPE_DATA) {
|
||||
static int skip = 0;
|
||||
if (++skip == 3) {
|
||||
printk("DROPPED 3RD PACKET!!!!!!!!!!!!!\n");
|
||||
skip = 0;
|
||||
goto free_packet;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
/* track the latest serial number on this connection for ACK packet
|
||||
* information */
|
||||
serial = ntohl(sp->hdr.serial);
|
||||
hi_serial = atomic_read(&call->conn->hi_serial);
|
||||
while (serial > hi_serial)
|
||||
hi_serial = atomic_cmpxchg(&call->conn->hi_serial, hi_serial,
|
||||
serial);
|
||||
|
||||
/* request ACK generation for any ACK or DATA packet that requests
|
||||
* it */
|
||||
if (sp->hdr.flags & RXRPC_REQUEST_ACK) {
|
||||
_proto("ACK Requested on %%%u", serial);
|
||||
rxrpc_propose_ACK(call, RXRPC_ACK_REQUESTED, sp->hdr.serial, false);
|
||||
}
|
||||
|
||||
switch (sp->hdr.type) {
|
||||
case RXRPC_PACKET_TYPE_ABORT:
|
||||
_debug("abort");
|
||||
|
||||
if (skb_copy_bits(skb, 0, &_abort_code,
|
||||
sizeof(_abort_code)) < 0)
|
||||
goto protocol_error;
|
||||
|
||||
abort_code = ntohl(_abort_code);
|
||||
_proto("Rx ABORT %%%u { %x }", serial, abort_code);
|
||||
|
||||
write_lock_bh(&call->state_lock);
|
||||
if (call->state < RXRPC_CALL_COMPLETE) {
|
||||
call->state = RXRPC_CALL_REMOTELY_ABORTED;
|
||||
call->abort_code = abort_code;
|
||||
set_bit(RXRPC_CALL_RCVD_ABORT, &call->events);
|
||||
rxrpc_queue_call(call);
|
||||
}
|
||||
goto free_packet_unlock;
|
||||
|
||||
case RXRPC_PACKET_TYPE_BUSY:
|
||||
_proto("Rx BUSY %%%u", serial);
|
||||
|
||||
if (call->conn->out_clientflag)
|
||||
goto protocol_error;
|
||||
|
||||
write_lock_bh(&call->state_lock);
|
||||
switch (call->state) {
|
||||
case RXRPC_CALL_CLIENT_SEND_REQUEST:
|
||||
call->state = RXRPC_CALL_SERVER_BUSY;
|
||||
set_bit(RXRPC_CALL_RCVD_BUSY, &call->events);
|
||||
rxrpc_queue_call(call);
|
||||
case RXRPC_CALL_SERVER_BUSY:
|
||||
goto free_packet_unlock;
|
||||
default:
|
||||
goto protocol_error_locked;
|
||||
}
|
||||
|
||||
default:
|
||||
_proto("Rx %s %%%u", rxrpc_pkts[sp->hdr.type], serial);
|
||||
goto protocol_error;
|
||||
|
||||
case RXRPC_PACKET_TYPE_DATA:
|
||||
seq = ntohl(sp->hdr.seq);
|
||||
|
||||
_proto("Rx DATA %%%u { #%u }", serial, seq);
|
||||
|
||||
if (seq == 0)
|
||||
goto protocol_error;
|
||||
|
||||
call->ackr_prev_seq = sp->hdr.seq;
|
||||
|
||||
/* received data implicitly ACKs all of the request packets we
|
||||
* sent when we're acting as a client */
|
||||
if (call->state == RXRPC_CALL_CLIENT_AWAIT_REPLY)
|
||||
rxrpc_assume_implicit_ackall(call, serial);
|
||||
|
||||
switch (rxrpc_fast_process_data(call, skb, seq)) {
|
||||
case 0:
|
||||
skb = NULL;
|
||||
goto done;
|
||||
|
||||
default:
|
||||
BUG();
|
||||
|
||||
/* data packet received beyond the last packet */
|
||||
case -EBADMSG:
|
||||
goto protocol_error;
|
||||
}
|
||||
|
||||
case RXRPC_PACKET_TYPE_ACKALL:
|
||||
case RXRPC_PACKET_TYPE_ACK:
|
||||
/* ACK processing is done in process context */
|
||||
read_lock_bh(&call->state_lock);
|
||||
if (call->state < RXRPC_CALL_DEAD) {
|
||||
skb_queue_tail(&call->rx_queue, skb);
|
||||
rxrpc_queue_call(call);
|
||||
skb = NULL;
|
||||
}
|
||||
read_unlock_bh(&call->state_lock);
|
||||
goto free_packet;
|
||||
}
|
||||
|
||||
protocol_error:
|
||||
_debug("protocol error");
|
||||
write_lock_bh(&call->state_lock);
|
||||
protocol_error_locked:
|
||||
if (call->state <= RXRPC_CALL_COMPLETE) {
|
||||
call->state = RXRPC_CALL_LOCALLY_ABORTED;
|
||||
call->abort_code = RX_PROTOCOL_ERROR;
|
||||
set_bit(RXRPC_CALL_ABORT, &call->events);
|
||||
rxrpc_queue_call(call);
|
||||
}
|
||||
free_packet_unlock:
|
||||
write_unlock_bh(&call->state_lock);
|
||||
free_packet:
|
||||
rxrpc_free_skb(skb);
|
||||
done:
|
||||
_leave("");
|
||||
}
|
||||
|
||||
/*
|
||||
* split up a jumbo data packet
|
||||
*/
|
||||
static void rxrpc_process_jumbo_packet(struct rxrpc_call *call,
|
||||
struct sk_buff *jumbo)
|
||||
{
|
||||
struct rxrpc_jumbo_header jhdr;
|
||||
struct rxrpc_skb_priv *sp;
|
||||
struct sk_buff *part;
|
||||
|
||||
_enter(",{%u,%u}", jumbo->data_len, jumbo->len);
|
||||
|
||||
sp = rxrpc_skb(jumbo);
|
||||
|
||||
do {
|
||||
sp->hdr.flags &= ~RXRPC_JUMBO_PACKET;
|
||||
|
||||
/* make a clone to represent the first subpacket in what's left
|
||||
* of the jumbo packet */
|
||||
part = skb_clone(jumbo, GFP_ATOMIC);
|
||||
if (!part) {
|
||||
/* simply ditch the tail in the event of ENOMEM */
|
||||
pskb_trim(jumbo, RXRPC_JUMBO_DATALEN);
|
||||
break;
|
||||
}
|
||||
rxrpc_new_skb(part);
|
||||
|
||||
pskb_trim(part, RXRPC_JUMBO_DATALEN);
|
||||
|
||||
if (!pskb_pull(jumbo, RXRPC_JUMBO_DATALEN))
|
||||
goto protocol_error;
|
||||
|
||||
if (skb_copy_bits(jumbo, 0, &jhdr, sizeof(jhdr)) < 0)
|
||||
goto protocol_error;
|
||||
if (!pskb_pull(jumbo, sizeof(jhdr)))
|
||||
BUG();
|
||||
|
||||
sp->hdr.seq = htonl(ntohl(sp->hdr.seq) + 1);
|
||||
sp->hdr.serial = htonl(ntohl(sp->hdr.serial) + 1);
|
||||
sp->hdr.flags = jhdr.flags;
|
||||
sp->hdr._rsvd = jhdr._rsvd;
|
||||
|
||||
_proto("Rx DATA Jumbo %%%u", ntohl(sp->hdr.serial) - 1);
|
||||
|
||||
rxrpc_fast_process_packet(call, part);
|
||||
part = NULL;
|
||||
|
||||
} while (sp->hdr.flags & RXRPC_JUMBO_PACKET);
|
||||
|
||||
rxrpc_fast_process_packet(call, jumbo);
|
||||
_leave("");
|
||||
return;
|
||||
|
||||
protocol_error:
|
||||
_debug("protocol error");
|
||||
rxrpc_free_skb(part);
|
||||
rxrpc_free_skb(jumbo);
|
||||
write_lock_bh(&call->state_lock);
|
||||
if (call->state <= RXRPC_CALL_COMPLETE) {
|
||||
call->state = RXRPC_CALL_LOCALLY_ABORTED;
|
||||
call->abort_code = RX_PROTOCOL_ERROR;
|
||||
set_bit(RXRPC_CALL_ABORT, &call->events);
|
||||
rxrpc_queue_call(call);
|
||||
}
|
||||
write_unlock_bh(&call->state_lock);
|
||||
_leave("");
|
||||
}
|
||||
|
||||
/*
|
||||
* post an incoming packet to the appropriate call/socket to deal with
|
||||
* - must get rid of the sk_buff, either by freeing it or by queuing it
|
||||
*/
|
||||
static void rxrpc_post_packet_to_call(struct rxrpc_call *call,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
struct rxrpc_skb_priv *sp;
|
||||
|
||||
_enter("%p,%p", call, skb);
|
||||
|
||||
sp = rxrpc_skb(skb);
|
||||
|
||||
_debug("extant call [%d]", call->state);
|
||||
|
||||
read_lock(&call->state_lock);
|
||||
switch (call->state) {
|
||||
case RXRPC_CALL_LOCALLY_ABORTED:
|
||||
if (!test_and_set_bit(RXRPC_CALL_ABORT, &call->events)) {
|
||||
rxrpc_queue_call(call);
|
||||
goto free_unlock;
|
||||
}
|
||||
case RXRPC_CALL_REMOTELY_ABORTED:
|
||||
case RXRPC_CALL_NETWORK_ERROR:
|
||||
case RXRPC_CALL_DEAD:
|
||||
goto dead_call;
|
||||
case RXRPC_CALL_COMPLETE:
|
||||
case RXRPC_CALL_CLIENT_FINAL_ACK:
|
||||
/* complete server call */
|
||||
if (call->conn->in_clientflag)
|
||||
goto dead_call;
|
||||
/* resend last packet of a completed call */
|
||||
_debug("final ack again");
|
||||
rxrpc_get_call(call);
|
||||
set_bit(RXRPC_CALL_ACK_FINAL, &call->events);
|
||||
rxrpc_queue_call(call);
|
||||
goto free_unlock;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
read_unlock(&call->state_lock);
|
||||
rxrpc_get_call(call);
|
||||
|
||||
if (sp->hdr.type == RXRPC_PACKET_TYPE_DATA &&
|
||||
sp->hdr.flags & RXRPC_JUMBO_PACKET)
|
||||
rxrpc_process_jumbo_packet(call, skb);
|
||||
else
|
||||
rxrpc_fast_process_packet(call, skb);
|
||||
|
||||
rxrpc_put_call(call);
|
||||
goto done;
|
||||
|
||||
dead_call:
|
||||
if (sp->hdr.type != RXRPC_PACKET_TYPE_ABORT) {
|
||||
skb->priority = RX_CALL_DEAD;
|
||||
rxrpc_reject_packet(call->conn->trans->local, skb);
|
||||
goto unlock;
|
||||
}
|
||||
free_unlock:
|
||||
rxrpc_free_skb(skb);
|
||||
unlock:
|
||||
read_unlock(&call->state_lock);
|
||||
done:
|
||||
_leave("");
|
||||
}
|
||||
|
||||
/*
|
||||
* post connection-level events to the connection
|
||||
* - this includes challenges, responses and some aborts
|
||||
*/
|
||||
static void rxrpc_post_packet_to_conn(struct rxrpc_connection *conn,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
_enter("%p,%p", conn, skb);
|
||||
|
||||
atomic_inc(&conn->usage);
|
||||
skb_queue_tail(&conn->rx_queue, skb);
|
||||
rxrpc_queue_conn(conn);
|
||||
}
|
||||
|
||||
static struct rxrpc_connection *rxrpc_conn_from_local(struct rxrpc_local *local,
|
||||
struct sk_buff *skb,
|
||||
struct rxrpc_skb_priv *sp)
|
||||
{
|
||||
struct rxrpc_peer *peer;
|
||||
struct rxrpc_transport *trans;
|
||||
struct rxrpc_connection *conn;
|
||||
|
||||
peer = rxrpc_find_peer(local, ip_hdr(skb)->saddr,
|
||||
udp_hdr(skb)->source);
|
||||
if (IS_ERR(peer))
|
||||
goto cant_find_conn;
|
||||
|
||||
trans = rxrpc_find_transport(local, peer);
|
||||
rxrpc_put_peer(peer);
|
||||
if (!trans)
|
||||
goto cant_find_conn;
|
||||
|
||||
conn = rxrpc_find_connection(trans, &sp->hdr);
|
||||
rxrpc_put_transport(trans);
|
||||
if (!conn)
|
||||
goto cant_find_conn;
|
||||
|
||||
return conn;
|
||||
cant_find_conn:
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* handle data received on the local endpoint
|
||||
* - may be called in interrupt context
|
||||
*/
|
||||
void rxrpc_data_ready(struct sock *sk)
|
||||
{
|
||||
struct rxrpc_skb_priv *sp;
|
||||
struct rxrpc_local *local;
|
||||
struct sk_buff *skb;
|
||||
int ret;
|
||||
|
||||
_enter("%p", sk);
|
||||
|
||||
ASSERT(!irqs_disabled());
|
||||
|
||||
read_lock_bh(&rxrpc_local_lock);
|
||||
local = sk->sk_user_data;
|
||||
if (local && atomic_read(&local->usage) > 0)
|
||||
rxrpc_get_local(local);
|
||||
else
|
||||
local = NULL;
|
||||
read_unlock_bh(&rxrpc_local_lock);
|
||||
if (!local) {
|
||||
_leave(" [local dead]");
|
||||
return;
|
||||
}
|
||||
|
||||
skb = skb_recv_datagram(sk, 0, 1, &ret);
|
||||
if (!skb) {
|
||||
rxrpc_put_local(local);
|
||||
if (ret == -EAGAIN)
|
||||
return;
|
||||
_debug("UDP socket error %d", ret);
|
||||
return;
|
||||
}
|
||||
|
||||
rxrpc_new_skb(skb);
|
||||
|
||||
_net("recv skb %p", skb);
|
||||
|
||||
/* we'll probably need to checksum it (didn't call sock_recvmsg) */
|
||||
if (skb_checksum_complete(skb)) {
|
||||
rxrpc_free_skb(skb);
|
||||
rxrpc_put_local(local);
|
||||
UDP_INC_STATS_BH(&init_net, UDP_MIB_INERRORS, 0);
|
||||
_leave(" [CSUM failed]");
|
||||
return;
|
||||
}
|
||||
|
||||
UDP_INC_STATS_BH(&init_net, UDP_MIB_INDATAGRAMS, 0);
|
||||
|
||||
/* the socket buffer we have is owned by UDP, with UDP's data all over
|
||||
* it, but we really want our own */
|
||||
skb_orphan(skb);
|
||||
sp = rxrpc_skb(skb);
|
||||
memset(sp, 0, sizeof(*sp));
|
||||
|
||||
_net("Rx UDP packet from %08x:%04hu",
|
||||
ntohl(ip_hdr(skb)->saddr), ntohs(udp_hdr(skb)->source));
|
||||
|
||||
/* dig out the RxRPC connection details */
|
||||
if (skb_copy_bits(skb, sizeof(struct udphdr), &sp->hdr,
|
||||
sizeof(sp->hdr)) < 0)
|
||||
goto bad_message;
|
||||
if (!pskb_pull(skb, sizeof(struct udphdr) + sizeof(sp->hdr)))
|
||||
BUG();
|
||||
|
||||
_net("Rx RxRPC %s ep=%x call=%x:%x",
|
||||
sp->hdr.flags & RXRPC_CLIENT_INITIATED ? "ToServer" : "ToClient",
|
||||
ntohl(sp->hdr.epoch),
|
||||
ntohl(sp->hdr.cid),
|
||||
ntohl(sp->hdr.callNumber));
|
||||
|
||||
if (sp->hdr.type == 0 || sp->hdr.type >= RXRPC_N_PACKET_TYPES) {
|
||||
_proto("Rx Bad Packet Type %u", sp->hdr.type);
|
||||
goto bad_message;
|
||||
}
|
||||
|
||||
if (sp->hdr.type == RXRPC_PACKET_TYPE_DATA &&
|
||||
(sp->hdr.callNumber == 0 || sp->hdr.seq == 0))
|
||||
goto bad_message;
|
||||
|
||||
if (sp->hdr.callNumber == 0) {
|
||||
/* This is a connection-level packet. These should be
|
||||
* fairly rare, so the extra overhead of looking them up the
|
||||
* old-fashioned way doesn't really hurt */
|
||||
struct rxrpc_connection *conn;
|
||||
|
||||
conn = rxrpc_conn_from_local(local, skb, sp);
|
||||
if (!conn)
|
||||
goto cant_route_call;
|
||||
|
||||
_debug("CONN %p {%d}", conn, conn->debug_id);
|
||||
rxrpc_post_packet_to_conn(conn, skb);
|
||||
rxrpc_put_connection(conn);
|
||||
} else {
|
||||
struct rxrpc_call *call;
|
||||
u8 in_clientflag = 0;
|
||||
|
||||
if (sp->hdr.flags & RXRPC_CLIENT_INITIATED)
|
||||
in_clientflag = RXRPC_CLIENT_INITIATED;
|
||||
call = rxrpc_find_call_hash(in_clientflag, sp->hdr.cid,
|
||||
sp->hdr.callNumber, sp->hdr.epoch,
|
||||
sp->hdr.serviceId, local, AF_INET,
|
||||
(u8 *)&ip_hdr(skb)->saddr);
|
||||
if (call)
|
||||
rxrpc_post_packet_to_call(call, skb);
|
||||
else
|
||||
goto cant_route_call;
|
||||
}
|
||||
rxrpc_put_local(local);
|
||||
return;
|
||||
|
||||
cant_route_call:
|
||||
_debug("can't route call");
|
||||
if (sp->hdr.flags & RXRPC_CLIENT_INITIATED &&
|
||||
sp->hdr.type == RXRPC_PACKET_TYPE_DATA) {
|
||||
if (sp->hdr.seq == cpu_to_be32(1)) {
|
||||
_debug("first packet");
|
||||
skb_queue_tail(&local->accept_queue, skb);
|
||||
rxrpc_queue_work(&local->acceptor);
|
||||
rxrpc_put_local(local);
|
||||
_leave(" [incoming]");
|
||||
return;
|
||||
}
|
||||
skb->priority = RX_INVALID_OPERATION;
|
||||
} else {
|
||||
skb->priority = RX_CALL_DEAD;
|
||||
}
|
||||
|
||||
if (sp->hdr.type != RXRPC_PACKET_TYPE_ABORT) {
|
||||
_debug("reject type %d",sp->hdr.type);
|
||||
rxrpc_reject_packet(local, skb);
|
||||
}
|
||||
rxrpc_put_local(local);
|
||||
_leave(" [no call]");
|
||||
return;
|
||||
|
||||
bad_message:
|
||||
skb->priority = RX_PROTOCOL_ERROR;
|
||||
rxrpc_reject_packet(local, skb);
|
||||
rxrpc_put_local(local);
|
||||
_leave(" [badmsg]");
|
||||
}
|
812
net/rxrpc/ar-internal.h
Normal file
812
net/rxrpc/ar-internal.h
Normal file
|
@ -0,0 +1,812 @@
|
|||
/* AF_RXRPC internal definitions
|
||||
*
|
||||
* Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
|
||||
* Written by David Howells (dhowells@redhat.com)
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version
|
||||
* 2 of the License, or (at your option) any later version.
|
||||
*/
|
||||
|
||||
#include <rxrpc/packet.h>
|
||||
|
||||
#if 0
|
||||
#define CHECK_SLAB_OKAY(X) \
|
||||
BUG_ON(atomic_read((X)) >> (sizeof(atomic_t) - 2) == \
|
||||
(POISON_FREE << 8 | POISON_FREE))
|
||||
#else
|
||||
#define CHECK_SLAB_OKAY(X) do {} while(0)
|
||||
#endif
|
||||
|
||||
#define FCRYPT_BSIZE 8
|
||||
struct rxrpc_crypt {
|
||||
union {
|
||||
u8 x[FCRYPT_BSIZE];
|
||||
__be32 n[2];
|
||||
};
|
||||
} __attribute__((aligned(8)));
|
||||
|
||||
#define rxrpc_queue_work(WS) queue_work(rxrpc_workqueue, (WS))
|
||||
#define rxrpc_queue_delayed_work(WS,D) \
|
||||
queue_delayed_work(rxrpc_workqueue, (WS), (D))
|
||||
|
||||
#define rxrpc_queue_call(CALL) rxrpc_queue_work(&(CALL)->processor)
|
||||
#define rxrpc_queue_conn(CONN) rxrpc_queue_work(&(CONN)->processor)
|
||||
|
||||
/*
|
||||
* sk_state for RxRPC sockets
|
||||
*/
|
||||
enum {
|
||||
RXRPC_UNCONNECTED = 0,
|
||||
RXRPC_CLIENT_BOUND, /* client local address bound */
|
||||
RXRPC_CLIENT_CONNECTED, /* client is connected */
|
||||
RXRPC_SERVER_BOUND, /* server local address bound */
|
||||
RXRPC_SERVER_LISTENING, /* server listening for connections */
|
||||
RXRPC_CLOSE, /* socket is being closed */
|
||||
};
|
||||
|
||||
/*
|
||||
* RxRPC socket definition
|
||||
*/
|
||||
struct rxrpc_sock {
|
||||
/* WARNING: sk has to be the first member */
|
||||
struct sock sk;
|
||||
rxrpc_interceptor_t interceptor; /* kernel service Rx interceptor function */
|
||||
struct rxrpc_local *local; /* local endpoint */
|
||||
struct rxrpc_transport *trans; /* transport handler */
|
||||
struct rxrpc_conn_bundle *bundle; /* virtual connection bundle */
|
||||
struct rxrpc_connection *conn; /* exclusive virtual connection */
|
||||
struct list_head listen_link; /* link in the local endpoint's listen list */
|
||||
struct list_head secureq; /* calls awaiting connection security clearance */
|
||||
struct list_head acceptq; /* calls awaiting acceptance */
|
||||
struct key *key; /* security for this socket */
|
||||
struct key *securities; /* list of server security descriptors */
|
||||
struct rb_root calls; /* outstanding calls on this socket */
|
||||
unsigned long flags;
|
||||
#define RXRPC_SOCK_EXCLUSIVE_CONN 1 /* exclusive connection for a client socket */
|
||||
rwlock_t call_lock; /* lock for calls */
|
||||
u32 min_sec_level; /* minimum security level */
|
||||
#define RXRPC_SECURITY_MAX RXRPC_SECURITY_ENCRYPT
|
||||
struct sockaddr_rxrpc srx; /* local address */
|
||||
sa_family_t proto; /* protocol created with */
|
||||
__be16 service_id; /* service ID of local/remote service */
|
||||
};
|
||||
|
||||
#define rxrpc_sk(__sk) container_of((__sk), struct rxrpc_sock, sk)
|
||||
|
||||
/*
|
||||
* RxRPC socket buffer private variables
|
||||
* - max 48 bytes (struct sk_buff::cb)
|
||||
*/
|
||||
struct rxrpc_skb_priv {
|
||||
struct rxrpc_call *call; /* call with which associated */
|
||||
unsigned long resend_at; /* time in jiffies at which to resend */
|
||||
union {
|
||||
unsigned int offset; /* offset into buffer of next read */
|
||||
int remain; /* amount of space remaining for next write */
|
||||
u32 error; /* network error code */
|
||||
bool need_resend; /* T if needs resending */
|
||||
};
|
||||
|
||||
struct rxrpc_header hdr; /* RxRPC packet header from this packet */
|
||||
};
|
||||
|
||||
#define rxrpc_skb(__skb) ((struct rxrpc_skb_priv *) &(__skb)->cb)
|
||||
|
||||
enum rxrpc_command {
|
||||
RXRPC_CMD_SEND_DATA, /* send data message */
|
||||
RXRPC_CMD_SEND_ABORT, /* request abort generation */
|
||||
RXRPC_CMD_ACCEPT, /* [server] accept incoming call */
|
||||
RXRPC_CMD_REJECT_BUSY, /* [server] reject a call as busy */
|
||||
};
|
||||
|
||||
/*
|
||||
* RxRPC security module interface
|
||||
*/
|
||||
struct rxrpc_security {
|
||||
struct module *owner; /* providing module */
|
||||
struct list_head link; /* link in master list */
|
||||
const char *name; /* name of this service */
|
||||
u8 security_index; /* security type provided */
|
||||
|
||||
/* initialise a connection's security */
|
||||
int (*init_connection_security)(struct rxrpc_connection *);
|
||||
|
||||
/* prime a connection's packet security */
|
||||
void (*prime_packet_security)(struct rxrpc_connection *);
|
||||
|
||||
/* impose security on a packet */
|
||||
int (*secure_packet)(const struct rxrpc_call *,
|
||||
struct sk_buff *,
|
||||
size_t,
|
||||
void *);
|
||||
|
||||
/* verify the security on a received packet */
|
||||
int (*verify_packet)(const struct rxrpc_call *, struct sk_buff *,
|
||||
u32 *);
|
||||
|
||||
/* issue a challenge */
|
||||
int (*issue_challenge)(struct rxrpc_connection *);
|
||||
|
||||
/* respond to a challenge */
|
||||
int (*respond_to_challenge)(struct rxrpc_connection *,
|
||||
struct sk_buff *,
|
||||
u32 *);
|
||||
|
||||
/* verify a response */
|
||||
int (*verify_response)(struct rxrpc_connection *,
|
||||
struct sk_buff *,
|
||||
u32 *);
|
||||
|
||||
/* clear connection security */
|
||||
void (*clear)(struct rxrpc_connection *);
|
||||
};
|
||||
|
||||
/*
|
||||
* RxRPC local transport endpoint definition
|
||||
* - matched by local port, address and protocol type
|
||||
*/
|
||||
struct rxrpc_local {
|
||||
struct socket *socket; /* my UDP socket */
|
||||
struct work_struct destroyer; /* endpoint destroyer */
|
||||
struct work_struct acceptor; /* incoming call processor */
|
||||
struct work_struct rejecter; /* packet reject writer */
|
||||
struct list_head services; /* services listening on this endpoint */
|
||||
struct list_head link; /* link in endpoint list */
|
||||
struct rw_semaphore defrag_sem; /* control re-enablement of IP DF bit */
|
||||
struct sk_buff_head accept_queue; /* incoming calls awaiting acceptance */
|
||||
struct sk_buff_head reject_queue; /* packets awaiting rejection */
|
||||
spinlock_t lock; /* access lock */
|
||||
rwlock_t services_lock; /* lock for services list */
|
||||
atomic_t usage;
|
||||
int debug_id; /* debug ID for printks */
|
||||
volatile char error_rcvd; /* T if received ICMP error outstanding */
|
||||
struct sockaddr_rxrpc srx; /* local address */
|
||||
};
|
||||
|
||||
/*
|
||||
* RxRPC remote transport endpoint definition
|
||||
* - matched by remote port, address and protocol type
|
||||
* - holds the connection ID counter for connections between the two endpoints
|
||||
*/
|
||||
struct rxrpc_peer {
|
||||
struct work_struct destroyer; /* peer destroyer */
|
||||
struct list_head link; /* link in master peer list */
|
||||
struct list_head error_targets; /* targets for net error distribution */
|
||||
spinlock_t lock; /* access lock */
|
||||
atomic_t usage;
|
||||
unsigned int if_mtu; /* interface MTU for this peer */
|
||||
unsigned int mtu; /* network MTU for this peer */
|
||||
unsigned int maxdata; /* data size (MTU - hdrsize) */
|
||||
unsigned short hdrsize; /* header size (IP + UDP + RxRPC) */
|
||||
int debug_id; /* debug ID for printks */
|
||||
int net_error; /* network error distributed */
|
||||
struct sockaddr_rxrpc srx; /* remote address */
|
||||
|
||||
/* calculated RTT cache */
|
||||
#define RXRPC_RTT_CACHE_SIZE 32
|
||||
suseconds_t rtt; /* current RTT estimate (in uS) */
|
||||
unsigned int rtt_point; /* next entry at which to insert */
|
||||
unsigned int rtt_usage; /* amount of cache actually used */
|
||||
suseconds_t rtt_cache[RXRPC_RTT_CACHE_SIZE]; /* calculated RTT cache */
|
||||
};
|
||||
|
||||
/*
|
||||
* RxRPC point-to-point transport / connection manager definition
|
||||
* - handles a bundle of connections between two endpoints
|
||||
* - matched by { local, peer }
|
||||
*/
|
||||
struct rxrpc_transport {
|
||||
struct rxrpc_local *local; /* local transport endpoint */
|
||||
struct rxrpc_peer *peer; /* remote transport endpoint */
|
||||
struct work_struct error_handler; /* network error distributor */
|
||||
struct rb_root bundles; /* client connection bundles on this transport */
|
||||
struct rb_root client_conns; /* client connections on this transport */
|
||||
struct rb_root server_conns; /* server connections on this transport */
|
||||
struct list_head link; /* link in master session list */
|
||||
struct sk_buff_head error_queue; /* error packets awaiting processing */
|
||||
time_t put_time; /* time at which to reap */
|
||||
spinlock_t client_lock; /* client connection allocation lock */
|
||||
rwlock_t conn_lock; /* lock for active/dead connections */
|
||||
atomic_t usage;
|
||||
int debug_id; /* debug ID for printks */
|
||||
unsigned int conn_idcounter; /* connection ID counter (client) */
|
||||
};
|
||||
|
||||
/*
|
||||
* RxRPC client connection bundle
|
||||
* - matched by { transport, service_id, key }
|
||||
*/
|
||||
struct rxrpc_conn_bundle {
|
||||
struct rb_node node; /* node in transport's lookup tree */
|
||||
struct list_head unused_conns; /* unused connections in this bundle */
|
||||
struct list_head avail_conns; /* available connections in this bundle */
|
||||
struct list_head busy_conns; /* busy connections in this bundle */
|
||||
struct key *key; /* security for this bundle */
|
||||
wait_queue_head_t chanwait; /* wait for channel to become available */
|
||||
atomic_t usage;
|
||||
int debug_id; /* debug ID for printks */
|
||||
unsigned short num_conns; /* number of connections in this bundle */
|
||||
__be16 service_id; /* service ID */
|
||||
u8 security_ix; /* security type */
|
||||
};
|
||||
|
||||
/*
|
||||
* RxRPC connection definition
|
||||
* - matched by { transport, service_id, conn_id, direction, key }
|
||||
* - each connection can only handle four simultaneous calls
|
||||
*/
|
||||
struct rxrpc_connection {
|
||||
struct rxrpc_transport *trans; /* transport session */
|
||||
struct rxrpc_conn_bundle *bundle; /* connection bundle (client) */
|
||||
struct work_struct processor; /* connection event processor */
|
||||
struct rb_node node; /* node in transport's lookup tree */
|
||||
struct list_head link; /* link in master connection list */
|
||||
struct list_head bundle_link; /* link in bundle */
|
||||
struct rb_root calls; /* calls on this connection */
|
||||
struct sk_buff_head rx_queue; /* received conn-level packets */
|
||||
struct rxrpc_call *channels[RXRPC_MAXCALLS]; /* channels (active calls) */
|
||||
struct rxrpc_security *security; /* applied security module */
|
||||
struct key *key; /* security for this connection (client) */
|
||||
struct key *server_key; /* security for this service */
|
||||
struct crypto_blkcipher *cipher; /* encryption handle */
|
||||
struct rxrpc_crypt csum_iv; /* packet checksum base */
|
||||
unsigned long events;
|
||||
#define RXRPC_CONN_CHALLENGE 0 /* send challenge packet */
|
||||
time_t put_time; /* time at which to reap */
|
||||
rwlock_t lock; /* access lock */
|
||||
spinlock_t state_lock; /* state-change lock */
|
||||
atomic_t usage;
|
||||
u32 real_conn_id; /* connection ID (host-endian) */
|
||||
enum { /* current state of connection */
|
||||
RXRPC_CONN_UNUSED, /* - connection not yet attempted */
|
||||
RXRPC_CONN_CLIENT, /* - client connection */
|
||||
RXRPC_CONN_SERVER_UNSECURED, /* - server unsecured connection */
|
||||
RXRPC_CONN_SERVER_CHALLENGING, /* - server challenging for security */
|
||||
RXRPC_CONN_SERVER, /* - server secured connection */
|
||||
RXRPC_CONN_REMOTELY_ABORTED, /* - conn aborted by peer */
|
||||
RXRPC_CONN_LOCALLY_ABORTED, /* - conn aborted locally */
|
||||
RXRPC_CONN_NETWORK_ERROR, /* - conn terminated by network error */
|
||||
} state;
|
||||
int error; /* error code for local abort */
|
||||
int debug_id; /* debug ID for printks */
|
||||
unsigned int call_counter; /* call ID counter */
|
||||
atomic_t serial; /* packet serial number counter */
|
||||
atomic_t hi_serial; /* highest serial number received */
|
||||
u8 avail_calls; /* number of calls available */
|
||||
u8 size_align; /* data size alignment (for security) */
|
||||
u8 header_size; /* rxrpc + security header size */
|
||||
u8 security_size; /* security header size */
|
||||
u32 security_level; /* security level negotiated */
|
||||
u32 security_nonce; /* response re-use preventer */
|
||||
|
||||
/* the following are all in net order */
|
||||
__be32 epoch; /* epoch of this connection */
|
||||
__be32 cid; /* connection ID */
|
||||
__be16 service_id; /* service ID */
|
||||
u8 security_ix; /* security type */
|
||||
u8 in_clientflag; /* RXRPC_CLIENT_INITIATED if we are server */
|
||||
u8 out_clientflag; /* RXRPC_CLIENT_INITIATED if we are client */
|
||||
};
|
||||
|
||||
/*
|
||||
* RxRPC call definition
|
||||
* - matched by { connection, call_id }
|
||||
*/
|
||||
struct rxrpc_call {
|
||||
struct rxrpc_connection *conn; /* connection carrying call */
|
||||
struct rxrpc_sock *socket; /* socket responsible */
|
||||
struct timer_list lifetimer; /* lifetime remaining on call */
|
||||
struct timer_list deadspan; /* reap timer for re-ACK'ing, etc */
|
||||
struct timer_list ack_timer; /* ACK generation timer */
|
||||
struct timer_list resend_timer; /* Tx resend timer */
|
||||
struct work_struct destroyer; /* call destroyer */
|
||||
struct work_struct processor; /* packet processor and ACK generator */
|
||||
struct list_head link; /* link in master call list */
|
||||
struct list_head error_link; /* link in error distribution list */
|
||||
struct list_head accept_link; /* calls awaiting acceptance */
|
||||
struct rb_node sock_node; /* node in socket call tree */
|
||||
struct rb_node conn_node; /* node in connection call tree */
|
||||
struct sk_buff_head rx_queue; /* received packets */
|
||||
struct sk_buff_head rx_oos_queue; /* packets received out of sequence */
|
||||
struct sk_buff *tx_pending; /* Tx socket buffer being filled */
|
||||
wait_queue_head_t tx_waitq; /* wait for Tx window space to become available */
|
||||
unsigned long user_call_ID; /* user-defined call ID */
|
||||
unsigned long creation_jif; /* time of call creation */
|
||||
unsigned long flags;
|
||||
#define RXRPC_CALL_RELEASED 0 /* call has been released - no more message to userspace */
|
||||
#define RXRPC_CALL_TERMINAL_MSG 1 /* call has given the socket its final message */
|
||||
#define RXRPC_CALL_RCVD_LAST 2 /* all packets received */
|
||||
#define RXRPC_CALL_RUN_RTIMER 3 /* Tx resend timer started */
|
||||
#define RXRPC_CALL_TX_SOFT_ACK 4 /* sent some soft ACKs */
|
||||
#define RXRPC_CALL_PROC_BUSY 5 /* the processor is busy */
|
||||
#define RXRPC_CALL_INIT_ACCEPT 6 /* acceptance was initiated */
|
||||
#define RXRPC_CALL_HAS_USERID 7 /* has a user ID attached */
|
||||
#define RXRPC_CALL_EXPECT_OOS 8 /* expect out of sequence packets */
|
||||
unsigned long events;
|
||||
#define RXRPC_CALL_RCVD_ACKALL 0 /* ACKALL or reply received */
|
||||
#define RXRPC_CALL_RCVD_BUSY 1 /* busy packet received */
|
||||
#define RXRPC_CALL_RCVD_ABORT 2 /* abort packet received */
|
||||
#define RXRPC_CALL_RCVD_ERROR 3 /* network error received */
|
||||
#define RXRPC_CALL_ACK_FINAL 4 /* need to generate final ACK (and release call) */
|
||||
#define RXRPC_CALL_ACK 5 /* need to generate ACK */
|
||||
#define RXRPC_CALL_REJECT_BUSY 6 /* need to generate busy message */
|
||||
#define RXRPC_CALL_ABORT 7 /* need to generate abort */
|
||||
#define RXRPC_CALL_CONN_ABORT 8 /* local connection abort generated */
|
||||
#define RXRPC_CALL_RESEND_TIMER 9 /* Tx resend timer expired */
|
||||
#define RXRPC_CALL_RESEND 10 /* Tx resend required */
|
||||
#define RXRPC_CALL_DRAIN_RX_OOS 11 /* drain the Rx out of sequence queue */
|
||||
#define RXRPC_CALL_LIFE_TIMER 12 /* call's lifetimer ran out */
|
||||
#define RXRPC_CALL_ACCEPTED 13 /* incoming call accepted by userspace app */
|
||||
#define RXRPC_CALL_SECURED 14 /* incoming call's connection is now secure */
|
||||
#define RXRPC_CALL_POST_ACCEPT 15 /* need to post an "accept?" message to the app */
|
||||
#define RXRPC_CALL_RELEASE 16 /* need to release the call's resources */
|
||||
|
||||
spinlock_t lock;
|
||||
rwlock_t state_lock; /* lock for state transition */
|
||||
atomic_t usage;
|
||||
atomic_t sequence; /* Tx data packet sequence counter */
|
||||
u32 abort_code; /* local/remote abort code */
|
||||
enum { /* current state of call */
|
||||
RXRPC_CALL_CLIENT_SEND_REQUEST, /* - client sending request phase */
|
||||
RXRPC_CALL_CLIENT_AWAIT_REPLY, /* - client awaiting reply */
|
||||
RXRPC_CALL_CLIENT_RECV_REPLY, /* - client receiving reply phase */
|
||||
RXRPC_CALL_CLIENT_FINAL_ACK, /* - client sending final ACK phase */
|
||||
RXRPC_CALL_SERVER_SECURING, /* - server securing request connection */
|
||||
RXRPC_CALL_SERVER_ACCEPTING, /* - server accepting request */
|
||||
RXRPC_CALL_SERVER_RECV_REQUEST, /* - server receiving request */
|
||||
RXRPC_CALL_SERVER_ACK_REQUEST, /* - server pending ACK of request */
|
||||
RXRPC_CALL_SERVER_SEND_REPLY, /* - server sending reply */
|
||||
RXRPC_CALL_SERVER_AWAIT_ACK, /* - server awaiting final ACK */
|
||||
RXRPC_CALL_COMPLETE, /* - call completed */
|
||||
RXRPC_CALL_SERVER_BUSY, /* - call rejected by busy server */
|
||||
RXRPC_CALL_REMOTELY_ABORTED, /* - call aborted by peer */
|
||||
RXRPC_CALL_LOCALLY_ABORTED, /* - call aborted locally on error or close */
|
||||
RXRPC_CALL_NETWORK_ERROR, /* - call terminated by network error */
|
||||
RXRPC_CALL_DEAD, /* - call is dead */
|
||||
} state;
|
||||
int debug_id; /* debug ID for printks */
|
||||
u8 channel; /* connection channel occupied by this call */
|
||||
|
||||
/* transmission-phase ACK management */
|
||||
u8 acks_head; /* offset into window of first entry */
|
||||
u8 acks_tail; /* offset into window of last entry */
|
||||
u8 acks_winsz; /* size of un-ACK'd window */
|
||||
u8 acks_unacked; /* lowest unacked packet in last ACK received */
|
||||
int acks_latest; /* serial number of latest ACK received */
|
||||
rxrpc_seq_t acks_hard; /* highest definitively ACK'd msg seq */
|
||||
unsigned long *acks_window; /* sent packet window
|
||||
* - elements are pointers with LSB set if ACK'd
|
||||
*/
|
||||
|
||||
/* receive-phase ACK management */
|
||||
rxrpc_seq_t rx_data_expect; /* next data seq ID expected to be received */
|
||||
rxrpc_seq_t rx_data_post; /* next data seq ID expected to be posted */
|
||||
rxrpc_seq_t rx_data_recv; /* last data seq ID encountered by recvmsg */
|
||||
rxrpc_seq_t rx_data_eaten; /* last data seq ID consumed by recvmsg */
|
||||
rxrpc_seq_t rx_first_oos; /* first packet in rx_oos_queue (or 0) */
|
||||
rxrpc_seq_t ackr_win_top; /* top of ACK window (rx_data_eaten is bottom) */
|
||||
rxrpc_seq_net_t ackr_prev_seq; /* previous sequence number received */
|
||||
u8 ackr_reason; /* reason to ACK */
|
||||
__be32 ackr_serial; /* serial of packet being ACK'd */
|
||||
atomic_t ackr_not_idle; /* number of packets in Rx queue */
|
||||
|
||||
/* received packet records, 1 bit per record */
|
||||
#define RXRPC_ACKR_WINDOW_ASZ DIV_ROUND_UP(RXRPC_MAXACKS, BITS_PER_LONG)
|
||||
unsigned long ackr_window[RXRPC_ACKR_WINDOW_ASZ + 1];
|
||||
|
||||
struct hlist_node hash_node;
|
||||
unsigned long hash_key; /* Full hash key */
|
||||
u8 in_clientflag; /* Copy of conn->in_clientflag for hashing */
|
||||
struct rxrpc_local *local; /* Local endpoint. Used for hashing. */
|
||||
sa_family_t proto; /* Frame protocol */
|
||||
/* the following should all be in net order */
|
||||
__be32 cid; /* connection ID + channel index */
|
||||
__be32 call_id; /* call ID on connection */
|
||||
__be32 epoch; /* epoch of this connection */
|
||||
__be16 service_id; /* service ID */
|
||||
union { /* Peer IP address for hashing */
|
||||
__be32 ipv4_addr;
|
||||
__u8 ipv6_addr[16]; /* Anticipates eventual IPv6 support */
|
||||
} peer_ip;
|
||||
};
|
||||
|
||||
/*
|
||||
* locally abort an RxRPC call
|
||||
*/
|
||||
static inline void rxrpc_abort_call(struct rxrpc_call *call, u32 abort_code)
|
||||
{
|
||||
write_lock_bh(&call->state_lock);
|
||||
if (call->state < RXRPC_CALL_COMPLETE) {
|
||||
call->abort_code = abort_code;
|
||||
call->state = RXRPC_CALL_LOCALLY_ABORTED;
|
||||
set_bit(RXRPC_CALL_ABORT, &call->events);
|
||||
}
|
||||
write_unlock_bh(&call->state_lock);
|
||||
}
|
||||
|
||||
/*
|
||||
* af_rxrpc.c
|
||||
*/
|
||||
extern atomic_t rxrpc_n_skbs;
|
||||
extern __be32 rxrpc_epoch;
|
||||
extern atomic_t rxrpc_debug_id;
|
||||
extern struct workqueue_struct *rxrpc_workqueue;
|
||||
|
||||
/*
|
||||
* ar-accept.c
|
||||
*/
|
||||
void rxrpc_accept_incoming_calls(struct work_struct *);
|
||||
struct rxrpc_call *rxrpc_accept_call(struct rxrpc_sock *, unsigned long);
|
||||
int rxrpc_reject_call(struct rxrpc_sock *);
|
||||
|
||||
/*
|
||||
* ar-ack.c
|
||||
*/
|
||||
extern unsigned rxrpc_requested_ack_delay;
|
||||
extern unsigned rxrpc_soft_ack_delay;
|
||||
extern unsigned rxrpc_idle_ack_delay;
|
||||
extern unsigned rxrpc_rx_window_size;
|
||||
extern unsigned rxrpc_rx_mtu;
|
||||
extern unsigned rxrpc_rx_jumbo_max;
|
||||
|
||||
void __rxrpc_propose_ACK(struct rxrpc_call *, u8, __be32, bool);
|
||||
void rxrpc_propose_ACK(struct rxrpc_call *, u8, __be32, bool);
|
||||
void rxrpc_process_call(struct work_struct *);
|
||||
|
||||
/*
|
||||
* ar-call.c
|
||||
*/
|
||||
extern unsigned rxrpc_max_call_lifetime;
|
||||
extern unsigned rxrpc_dead_call_expiry;
|
||||
extern struct kmem_cache *rxrpc_call_jar;
|
||||
extern struct list_head rxrpc_calls;
|
||||
extern rwlock_t rxrpc_call_lock;
|
||||
|
||||
struct rxrpc_call *rxrpc_find_call_hash(u8, __be32, __be32, __be32,
|
||||
__be16, void *, sa_family_t, const u8 *);
|
||||
struct rxrpc_call *rxrpc_get_client_call(struct rxrpc_sock *,
|
||||
struct rxrpc_transport *,
|
||||
struct rxrpc_conn_bundle *,
|
||||
unsigned long, int, gfp_t);
|
||||
struct rxrpc_call *rxrpc_incoming_call(struct rxrpc_sock *,
|
||||
struct rxrpc_connection *,
|
||||
struct rxrpc_header *, gfp_t);
|
||||
struct rxrpc_call *rxrpc_find_server_call(struct rxrpc_sock *, unsigned long);
|
||||
void rxrpc_release_call(struct rxrpc_call *);
|
||||
void rxrpc_release_calls_on_socket(struct rxrpc_sock *);
|
||||
void __rxrpc_put_call(struct rxrpc_call *);
|
||||
void __exit rxrpc_destroy_all_calls(void);
|
||||
|
||||
/*
|
||||
* ar-connection.c
|
||||
*/
|
||||
extern unsigned rxrpc_connection_expiry;
|
||||
extern struct list_head rxrpc_connections;
|
||||
extern rwlock_t rxrpc_connection_lock;
|
||||
|
||||
struct rxrpc_conn_bundle *rxrpc_get_bundle(struct rxrpc_sock *,
|
||||
struct rxrpc_transport *,
|
||||
struct key *, __be16, gfp_t);
|
||||
void rxrpc_put_bundle(struct rxrpc_transport *, struct rxrpc_conn_bundle *);
|
||||
int rxrpc_connect_call(struct rxrpc_sock *, struct rxrpc_transport *,
|
||||
struct rxrpc_conn_bundle *, struct rxrpc_call *, gfp_t);
|
||||
void rxrpc_put_connection(struct rxrpc_connection *);
|
||||
void __exit rxrpc_destroy_all_connections(void);
|
||||
struct rxrpc_connection *rxrpc_find_connection(struct rxrpc_transport *,
|
||||
struct rxrpc_header *);
|
||||
extern struct rxrpc_connection *
|
||||
rxrpc_incoming_connection(struct rxrpc_transport *, struct rxrpc_header *,
|
||||
gfp_t);
|
||||
|
||||
/*
|
||||
* ar-connevent.c
|
||||
*/
|
||||
void rxrpc_process_connection(struct work_struct *);
|
||||
void rxrpc_reject_packet(struct rxrpc_local *, struct sk_buff *);
|
||||
void rxrpc_reject_packets(struct work_struct *);
|
||||
|
||||
/*
|
||||
* ar-error.c
|
||||
*/
|
||||
void rxrpc_UDP_error_report(struct sock *);
|
||||
void rxrpc_UDP_error_handler(struct work_struct *);
|
||||
|
||||
/*
|
||||
* ar-input.c
|
||||
*/
|
||||
extern const char *rxrpc_pkts[];
|
||||
|
||||
void rxrpc_data_ready(struct sock *);
|
||||
int rxrpc_queue_rcv_skb(struct rxrpc_call *, struct sk_buff *, bool, bool);
|
||||
void rxrpc_fast_process_packet(struct rxrpc_call *, struct sk_buff *);
|
||||
|
||||
/*
|
||||
* ar-local.c
|
||||
*/
|
||||
extern rwlock_t rxrpc_local_lock;
|
||||
|
||||
struct rxrpc_local *rxrpc_lookup_local(struct sockaddr_rxrpc *);
|
||||
void rxrpc_put_local(struct rxrpc_local *);
|
||||
void __exit rxrpc_destroy_all_locals(void);
|
||||
|
||||
/*
|
||||
* ar-key.c
|
||||
*/
|
||||
extern struct key_type key_type_rxrpc;
|
||||
extern struct key_type key_type_rxrpc_s;
|
||||
|
||||
int rxrpc_request_key(struct rxrpc_sock *, char __user *, int);
|
||||
int rxrpc_server_keyring(struct rxrpc_sock *, char __user *, int);
|
||||
int rxrpc_get_server_data_key(struct rxrpc_connection *, const void *, time_t,
|
||||
u32);
|
||||
|
||||
/*
|
||||
* ar-output.c
|
||||
*/
|
||||
extern unsigned rxrpc_resend_timeout;
|
||||
|
||||
int rxrpc_send_packet(struct rxrpc_transport *, struct sk_buff *);
|
||||
int rxrpc_client_sendmsg(struct kiocb *, struct rxrpc_sock *,
|
||||
struct rxrpc_transport *, struct msghdr *, size_t);
|
||||
int rxrpc_server_sendmsg(struct kiocb *, struct rxrpc_sock *, struct msghdr *,
|
||||
size_t);
|
||||
|
||||
/*
|
||||
* ar-peer.c
|
||||
*/
|
||||
struct rxrpc_peer *rxrpc_get_peer(struct sockaddr_rxrpc *, gfp_t);
|
||||
void rxrpc_put_peer(struct rxrpc_peer *);
|
||||
struct rxrpc_peer *rxrpc_find_peer(struct rxrpc_local *, __be32, __be16);
|
||||
void __exit rxrpc_destroy_all_peers(void);
|
||||
|
||||
/*
|
||||
* ar-proc.c
|
||||
*/
|
||||
extern const char *const rxrpc_call_states[];
|
||||
extern const struct file_operations rxrpc_call_seq_fops;
|
||||
extern const struct file_operations rxrpc_connection_seq_fops;
|
||||
|
||||
/*
|
||||
* ar-recvmsg.c
|
||||
*/
|
||||
void rxrpc_remove_user_ID(struct rxrpc_sock *, struct rxrpc_call *);
|
||||
int rxrpc_recvmsg(struct kiocb *, struct socket *, struct msghdr *, size_t,
|
||||
int);
|
||||
|
||||
/*
|
||||
* ar-security.c
|
||||
*/
|
||||
int rxrpc_register_security(struct rxrpc_security *);
|
||||
void rxrpc_unregister_security(struct rxrpc_security *);
|
||||
int rxrpc_init_client_conn_security(struct rxrpc_connection *);
|
||||
int rxrpc_init_server_conn_security(struct rxrpc_connection *);
|
||||
int rxrpc_secure_packet(const struct rxrpc_call *, struct sk_buff *, size_t,
|
||||
void *);
|
||||
int rxrpc_verify_packet(const struct rxrpc_call *, struct sk_buff *, u32 *);
|
||||
void rxrpc_clear_conn_security(struct rxrpc_connection *);
|
||||
|
||||
/*
|
||||
* ar-skbuff.c
|
||||
*/
|
||||
void rxrpc_packet_destructor(struct sk_buff *);
|
||||
|
||||
/*
|
||||
* ar-transport.c
|
||||
*/
|
||||
extern unsigned rxrpc_transport_expiry;
|
||||
|
||||
struct rxrpc_transport *rxrpc_get_transport(struct rxrpc_local *,
|
||||
struct rxrpc_peer *, gfp_t);
|
||||
void rxrpc_put_transport(struct rxrpc_transport *);
|
||||
void __exit rxrpc_destroy_all_transports(void);
|
||||
struct rxrpc_transport *rxrpc_find_transport(struct rxrpc_local *,
|
||||
struct rxrpc_peer *);
|
||||
|
||||
/*
|
||||
* sysctl.c
|
||||
*/
|
||||
#ifdef CONFIG_SYSCTL
|
||||
extern int __init rxrpc_sysctl_init(void);
|
||||
extern void rxrpc_sysctl_exit(void);
|
||||
#else
|
||||
static inline int __init rxrpc_sysctl_init(void) { return 0; }
|
||||
static inline void rxrpc_sysctl_exit(void) {}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* debug tracing
|
||||
*/
|
||||
extern unsigned int rxrpc_debug;
|
||||
|
||||
#define dbgprintk(FMT,...) \
|
||||
printk("[%-6.6s] "FMT"\n", current->comm ,##__VA_ARGS__)
|
||||
|
||||
#define kenter(FMT,...) dbgprintk("==> %s("FMT")",__func__ ,##__VA_ARGS__)
|
||||
#define kleave(FMT,...) dbgprintk("<== %s()"FMT"",__func__ ,##__VA_ARGS__)
|
||||
#define kdebug(FMT,...) dbgprintk(" "FMT ,##__VA_ARGS__)
|
||||
#define kproto(FMT,...) dbgprintk("### "FMT ,##__VA_ARGS__)
|
||||
#define knet(FMT,...) dbgprintk("@@@ "FMT ,##__VA_ARGS__)
|
||||
|
||||
|
||||
#if defined(__KDEBUG)
|
||||
#define _enter(FMT,...) kenter(FMT,##__VA_ARGS__)
|
||||
#define _leave(FMT,...) kleave(FMT,##__VA_ARGS__)
|
||||
#define _debug(FMT,...) kdebug(FMT,##__VA_ARGS__)
|
||||
#define _proto(FMT,...) kproto(FMT,##__VA_ARGS__)
|
||||
#define _net(FMT,...) knet(FMT,##__VA_ARGS__)
|
||||
|
||||
#elif defined(CONFIG_AF_RXRPC_DEBUG)
|
||||
#define RXRPC_DEBUG_KENTER 0x01
|
||||
#define RXRPC_DEBUG_KLEAVE 0x02
|
||||
#define RXRPC_DEBUG_KDEBUG 0x04
|
||||
#define RXRPC_DEBUG_KPROTO 0x08
|
||||
#define RXRPC_DEBUG_KNET 0x10
|
||||
|
||||
#define _enter(FMT,...) \
|
||||
do { \
|
||||
if (unlikely(rxrpc_debug & RXRPC_DEBUG_KENTER)) \
|
||||
kenter(FMT,##__VA_ARGS__); \
|
||||
} while (0)
|
||||
|
||||
#define _leave(FMT,...) \
|
||||
do { \
|
||||
if (unlikely(rxrpc_debug & RXRPC_DEBUG_KLEAVE)) \
|
||||
kleave(FMT,##__VA_ARGS__); \
|
||||
} while (0)
|
||||
|
||||
#define _debug(FMT,...) \
|
||||
do { \
|
||||
if (unlikely(rxrpc_debug & RXRPC_DEBUG_KDEBUG)) \
|
||||
kdebug(FMT,##__VA_ARGS__); \
|
||||
} while (0)
|
||||
|
||||
#define _proto(FMT,...) \
|
||||
do { \
|
||||
if (unlikely(rxrpc_debug & RXRPC_DEBUG_KPROTO)) \
|
||||
kproto(FMT,##__VA_ARGS__); \
|
||||
} while (0)
|
||||
|
||||
#define _net(FMT,...) \
|
||||
do { \
|
||||
if (unlikely(rxrpc_debug & RXRPC_DEBUG_KNET)) \
|
||||
knet(FMT,##__VA_ARGS__); \
|
||||
} while (0)
|
||||
|
||||
#else
|
||||
#define _enter(FMT,...) no_printk("==> %s("FMT")",__func__ ,##__VA_ARGS__)
|
||||
#define _leave(FMT,...) no_printk("<== %s()"FMT"",__func__ ,##__VA_ARGS__)
|
||||
#define _debug(FMT,...) no_printk(" "FMT ,##__VA_ARGS__)
|
||||
#define _proto(FMT,...) no_printk("### "FMT ,##__VA_ARGS__)
|
||||
#define _net(FMT,...) no_printk("@@@ "FMT ,##__VA_ARGS__)
|
||||
#endif
|
||||
|
||||
/*
|
||||
* debug assertion checking
|
||||
*/
|
||||
#if 1 // defined(__KDEBUGALL)
|
||||
|
||||
#define ASSERT(X) \
|
||||
do { \
|
||||
if (unlikely(!(X))) { \
|
||||
printk(KERN_ERR "\n"); \
|
||||
printk(KERN_ERR "RxRPC: Assertion failed\n"); \
|
||||
BUG(); \
|
||||
} \
|
||||
} while(0)
|
||||
|
||||
#define ASSERTCMP(X, OP, Y) \
|
||||
do { \
|
||||
if (unlikely(!((X) OP (Y)))) { \
|
||||
printk(KERN_ERR "\n"); \
|
||||
printk(KERN_ERR "RxRPC: Assertion failed\n"); \
|
||||
printk(KERN_ERR "%lu " #OP " %lu is false\n", \
|
||||
(unsigned long)(X), (unsigned long)(Y)); \
|
||||
printk(KERN_ERR "0x%lx " #OP " 0x%lx is false\n", \
|
||||
(unsigned long)(X), (unsigned long)(Y)); \
|
||||
BUG(); \
|
||||
} \
|
||||
} while(0)
|
||||
|
||||
#define ASSERTIF(C, X) \
|
||||
do { \
|
||||
if (unlikely((C) && !(X))) { \
|
||||
printk(KERN_ERR "\n"); \
|
||||
printk(KERN_ERR "RxRPC: Assertion failed\n"); \
|
||||
BUG(); \
|
||||
} \
|
||||
} while(0)
|
||||
|
||||
#define ASSERTIFCMP(C, X, OP, Y) \
|
||||
do { \
|
||||
if (unlikely((C) && !((X) OP (Y)))) { \
|
||||
printk(KERN_ERR "\n"); \
|
||||
printk(KERN_ERR "RxRPC: Assertion failed\n"); \
|
||||
printk(KERN_ERR "%lu " #OP " %lu is false\n", \
|
||||
(unsigned long)(X), (unsigned long)(Y)); \
|
||||
printk(KERN_ERR "0x%lx " #OP " 0x%lx is false\n", \
|
||||
(unsigned long)(X), (unsigned long)(Y)); \
|
||||
BUG(); \
|
||||
} \
|
||||
} while(0)
|
||||
|
||||
#else
|
||||
|
||||
#define ASSERT(X) \
|
||||
do { \
|
||||
} while(0)
|
||||
|
||||
#define ASSERTCMP(X, OP, Y) \
|
||||
do { \
|
||||
} while(0)
|
||||
|
||||
#define ASSERTIF(C, X) \
|
||||
do { \
|
||||
} while(0)
|
||||
|
||||
#define ASSERTIFCMP(C, X, OP, Y) \
|
||||
do { \
|
||||
} while(0)
|
||||
|
||||
#endif /* __KDEBUGALL */
|
||||
|
||||
/*
|
||||
* socket buffer accounting / leak finding
|
||||
*/
|
||||
static inline void __rxrpc_new_skb(struct sk_buff *skb, const char *fn)
|
||||
{
|
||||
//_net("new skb %p %s [%d]", skb, fn, atomic_read(&rxrpc_n_skbs));
|
||||
//atomic_inc(&rxrpc_n_skbs);
|
||||
}
|
||||
|
||||
#define rxrpc_new_skb(skb) __rxrpc_new_skb((skb), __func__)
|
||||
|
||||
static inline void __rxrpc_kill_skb(struct sk_buff *skb, const char *fn)
|
||||
{
|
||||
//_net("kill skb %p %s [%d]", skb, fn, atomic_read(&rxrpc_n_skbs));
|
||||
//atomic_dec(&rxrpc_n_skbs);
|
||||
}
|
||||
|
||||
#define rxrpc_kill_skb(skb) __rxrpc_kill_skb((skb), __func__)
|
||||
|
||||
static inline void __rxrpc_free_skb(struct sk_buff *skb, const char *fn)
|
||||
{
|
||||
if (skb) {
|
||||
CHECK_SLAB_OKAY(&skb->users);
|
||||
//_net("free skb %p %s [%d]",
|
||||
// skb, fn, atomic_read(&rxrpc_n_skbs));
|
||||
//atomic_dec(&rxrpc_n_skbs);
|
||||
kfree_skb(skb);
|
||||
}
|
||||
}
|
||||
|
||||
#define rxrpc_free_skb(skb) __rxrpc_free_skb((skb), __func__)
|
||||
|
||||
static inline void rxrpc_purge_queue(struct sk_buff_head *list)
|
||||
{
|
||||
struct sk_buff *skb;
|
||||
while ((skb = skb_dequeue((list))) != NULL)
|
||||
rxrpc_free_skb(skb);
|
||||
}
|
||||
|
||||
static inline void __rxrpc_get_local(struct rxrpc_local *local, const char *f)
|
||||
{
|
||||
CHECK_SLAB_OKAY(&local->usage);
|
||||
if (atomic_inc_return(&local->usage) == 1)
|
||||
printk("resurrected (%s)\n", f);
|
||||
}
|
||||
|
||||
#define rxrpc_get_local(LOCAL) __rxrpc_get_local((LOCAL), __func__)
|
||||
|
||||
#define rxrpc_get_call(CALL) \
|
||||
do { \
|
||||
CHECK_SLAB_OKAY(&(CALL)->usage); \
|
||||
if (atomic_inc_return(&(CALL)->usage) == 1) \
|
||||
BUG(); \
|
||||
} while(0)
|
||||
|
||||
#define rxrpc_put_call(CALL) \
|
||||
do { \
|
||||
__rxrpc_put_call(CALL); \
|
||||
} while(0)
|
1247
net/rxrpc/ar-key.c
Normal file
1247
net/rxrpc/ar-key.c
Normal file
File diff suppressed because it is too large
Load diff
310
net/rxrpc/ar-local.c
Normal file
310
net/rxrpc/ar-local.c
Normal file
|
@ -0,0 +1,310 @@
|
|||
/* AF_RXRPC local endpoint management
|
||||
*
|
||||
* Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
|
||||
* Written by David Howells (dhowells@redhat.com)
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version
|
||||
* 2 of the License, or (at your option) any later version.
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/net.h>
|
||||
#include <linux/skbuff.h>
|
||||
#include <linux/slab.h>
|
||||
#include <net/sock.h>
|
||||
#include <net/af_rxrpc.h>
|
||||
#include "ar-internal.h"
|
||||
|
||||
static LIST_HEAD(rxrpc_locals);
|
||||
DEFINE_RWLOCK(rxrpc_local_lock);
|
||||
static DECLARE_RWSEM(rxrpc_local_sem);
|
||||
static DECLARE_WAIT_QUEUE_HEAD(rxrpc_local_wq);
|
||||
|
||||
static void rxrpc_destroy_local(struct work_struct *work);
|
||||
|
||||
/*
|
||||
* allocate a new local
|
||||
*/
|
||||
static
|
||||
struct rxrpc_local *rxrpc_alloc_local(struct sockaddr_rxrpc *srx)
|
||||
{
|
||||
struct rxrpc_local *local;
|
||||
|
||||
local = kzalloc(sizeof(struct rxrpc_local), GFP_KERNEL);
|
||||
if (local) {
|
||||
INIT_WORK(&local->destroyer, &rxrpc_destroy_local);
|
||||
INIT_WORK(&local->acceptor, &rxrpc_accept_incoming_calls);
|
||||
INIT_WORK(&local->rejecter, &rxrpc_reject_packets);
|
||||
INIT_LIST_HEAD(&local->services);
|
||||
INIT_LIST_HEAD(&local->link);
|
||||
init_rwsem(&local->defrag_sem);
|
||||
skb_queue_head_init(&local->accept_queue);
|
||||
skb_queue_head_init(&local->reject_queue);
|
||||
spin_lock_init(&local->lock);
|
||||
rwlock_init(&local->services_lock);
|
||||
atomic_set(&local->usage, 1);
|
||||
local->debug_id = atomic_inc_return(&rxrpc_debug_id);
|
||||
memcpy(&local->srx, srx, sizeof(*srx));
|
||||
}
|
||||
|
||||
_leave(" = %p", local);
|
||||
return local;
|
||||
}
|
||||
|
||||
/*
|
||||
* create the local socket
|
||||
* - must be called with rxrpc_local_sem writelocked
|
||||
*/
|
||||
static int rxrpc_create_local(struct rxrpc_local *local)
|
||||
{
|
||||
struct sock *sock;
|
||||
int ret, opt;
|
||||
|
||||
_enter("%p{%d}", local, local->srx.transport_type);
|
||||
|
||||
/* create a socket to represent the local endpoint */
|
||||
ret = sock_create_kern(PF_INET, local->srx.transport_type, IPPROTO_UDP,
|
||||
&local->socket);
|
||||
if (ret < 0) {
|
||||
_leave(" = %d [socket]", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* if a local address was supplied then bind it */
|
||||
if (local->srx.transport_len > sizeof(sa_family_t)) {
|
||||
_debug("bind");
|
||||
ret = kernel_bind(local->socket,
|
||||
(struct sockaddr *) &local->srx.transport,
|
||||
local->srx.transport_len);
|
||||
if (ret < 0) {
|
||||
_debug("bind failed");
|
||||
goto error;
|
||||
}
|
||||
}
|
||||
|
||||
/* we want to receive ICMP errors */
|
||||
opt = 1;
|
||||
ret = kernel_setsockopt(local->socket, SOL_IP, IP_RECVERR,
|
||||
(char *) &opt, sizeof(opt));
|
||||
if (ret < 0) {
|
||||
_debug("setsockopt failed");
|
||||
goto error;
|
||||
}
|
||||
|
||||
/* we want to set the don't fragment bit */
|
||||
opt = IP_PMTUDISC_DO;
|
||||
ret = kernel_setsockopt(local->socket, SOL_IP, IP_MTU_DISCOVER,
|
||||
(char *) &opt, sizeof(opt));
|
||||
if (ret < 0) {
|
||||
_debug("setsockopt failed");
|
||||
goto error;
|
||||
}
|
||||
|
||||
write_lock_bh(&rxrpc_local_lock);
|
||||
list_add(&local->link, &rxrpc_locals);
|
||||
write_unlock_bh(&rxrpc_local_lock);
|
||||
|
||||
/* set the socket up */
|
||||
sock = local->socket->sk;
|
||||
sock->sk_user_data = local;
|
||||
sock->sk_data_ready = rxrpc_data_ready;
|
||||
sock->sk_error_report = rxrpc_UDP_error_report;
|
||||
_leave(" = 0");
|
||||
return 0;
|
||||
|
||||
error:
|
||||
kernel_sock_shutdown(local->socket, SHUT_RDWR);
|
||||
local->socket->sk->sk_user_data = NULL;
|
||||
sock_release(local->socket);
|
||||
local->socket = NULL;
|
||||
|
||||
_leave(" = %d", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* create a new local endpoint using the specified UDP address
|
||||
*/
|
||||
struct rxrpc_local *rxrpc_lookup_local(struct sockaddr_rxrpc *srx)
|
||||
{
|
||||
struct rxrpc_local *local;
|
||||
int ret;
|
||||
|
||||
_enter("{%d,%u,%pI4+%hu}",
|
||||
srx->transport_type,
|
||||
srx->transport.family,
|
||||
&srx->transport.sin.sin_addr,
|
||||
ntohs(srx->transport.sin.sin_port));
|
||||
|
||||
down_write(&rxrpc_local_sem);
|
||||
|
||||
/* see if we have a suitable local local endpoint already */
|
||||
read_lock_bh(&rxrpc_local_lock);
|
||||
|
||||
list_for_each_entry(local, &rxrpc_locals, link) {
|
||||
_debug("CMP {%d,%u,%pI4+%hu}",
|
||||
local->srx.transport_type,
|
||||
local->srx.transport.family,
|
||||
&local->srx.transport.sin.sin_addr,
|
||||
ntohs(local->srx.transport.sin.sin_port));
|
||||
|
||||
if (local->srx.transport_type != srx->transport_type ||
|
||||
local->srx.transport.family != srx->transport.family)
|
||||
continue;
|
||||
|
||||
switch (srx->transport.family) {
|
||||
case AF_INET:
|
||||
if (local->srx.transport.sin.sin_port !=
|
||||
srx->transport.sin.sin_port)
|
||||
continue;
|
||||
if (memcmp(&local->srx.transport.sin.sin_addr,
|
||||
&srx->transport.sin.sin_addr,
|
||||
sizeof(struct in_addr)) != 0)
|
||||
continue;
|
||||
goto found_local;
|
||||
|
||||
default:
|
||||
BUG();
|
||||
}
|
||||
}
|
||||
|
||||
read_unlock_bh(&rxrpc_local_lock);
|
||||
|
||||
/* we didn't find one, so we need to create one */
|
||||
local = rxrpc_alloc_local(srx);
|
||||
if (!local) {
|
||||
up_write(&rxrpc_local_sem);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
ret = rxrpc_create_local(local);
|
||||
if (ret < 0) {
|
||||
up_write(&rxrpc_local_sem);
|
||||
kfree(local);
|
||||
_leave(" = %d", ret);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
up_write(&rxrpc_local_sem);
|
||||
|
||||
_net("LOCAL new %d {%d,%u,%pI4+%hu}",
|
||||
local->debug_id,
|
||||
local->srx.transport_type,
|
||||
local->srx.transport.family,
|
||||
&local->srx.transport.sin.sin_addr,
|
||||
ntohs(local->srx.transport.sin.sin_port));
|
||||
|
||||
_leave(" = %p [new]", local);
|
||||
return local;
|
||||
|
||||
found_local:
|
||||
rxrpc_get_local(local);
|
||||
read_unlock_bh(&rxrpc_local_lock);
|
||||
up_write(&rxrpc_local_sem);
|
||||
|
||||
_net("LOCAL old %d {%d,%u,%pI4+%hu}",
|
||||
local->debug_id,
|
||||
local->srx.transport_type,
|
||||
local->srx.transport.family,
|
||||
&local->srx.transport.sin.sin_addr,
|
||||
ntohs(local->srx.transport.sin.sin_port));
|
||||
|
||||
_leave(" = %p [reuse]", local);
|
||||
return local;
|
||||
}
|
||||
|
||||
/*
|
||||
* release a local endpoint
|
||||
*/
|
||||
void rxrpc_put_local(struct rxrpc_local *local)
|
||||
{
|
||||
_enter("%p{u=%d}", local, atomic_read(&local->usage));
|
||||
|
||||
ASSERTCMP(atomic_read(&local->usage), >, 0);
|
||||
|
||||
/* to prevent a race, the decrement and the dequeue must be effectively
|
||||
* atomic */
|
||||
write_lock_bh(&rxrpc_local_lock);
|
||||
if (unlikely(atomic_dec_and_test(&local->usage))) {
|
||||
_debug("destroy local");
|
||||
rxrpc_queue_work(&local->destroyer);
|
||||
}
|
||||
write_unlock_bh(&rxrpc_local_lock);
|
||||
_leave("");
|
||||
}
|
||||
|
||||
/*
|
||||
* destroy a local endpoint
|
||||
*/
|
||||
static void rxrpc_destroy_local(struct work_struct *work)
|
||||
{
|
||||
struct rxrpc_local *local =
|
||||
container_of(work, struct rxrpc_local, destroyer);
|
||||
|
||||
_enter("%p{%d}", local, atomic_read(&local->usage));
|
||||
|
||||
down_write(&rxrpc_local_sem);
|
||||
|
||||
write_lock_bh(&rxrpc_local_lock);
|
||||
if (atomic_read(&local->usage) > 0) {
|
||||
write_unlock_bh(&rxrpc_local_lock);
|
||||
up_read(&rxrpc_local_sem);
|
||||
_leave(" [resurrected]");
|
||||
return;
|
||||
}
|
||||
|
||||
list_del(&local->link);
|
||||
local->socket->sk->sk_user_data = NULL;
|
||||
write_unlock_bh(&rxrpc_local_lock);
|
||||
|
||||
downgrade_write(&rxrpc_local_sem);
|
||||
|
||||
ASSERT(list_empty(&local->services));
|
||||
ASSERT(!work_pending(&local->acceptor));
|
||||
ASSERT(!work_pending(&local->rejecter));
|
||||
|
||||
/* finish cleaning up the local descriptor */
|
||||
rxrpc_purge_queue(&local->accept_queue);
|
||||
rxrpc_purge_queue(&local->reject_queue);
|
||||
kernel_sock_shutdown(local->socket, SHUT_RDWR);
|
||||
sock_release(local->socket);
|
||||
|
||||
up_read(&rxrpc_local_sem);
|
||||
|
||||
_net("DESTROY LOCAL %d", local->debug_id);
|
||||
kfree(local);
|
||||
|
||||
if (list_empty(&rxrpc_locals))
|
||||
wake_up_all(&rxrpc_local_wq);
|
||||
|
||||
_leave("");
|
||||
}
|
||||
|
||||
/*
|
||||
* preemptively destroy all local local endpoint rather than waiting for
|
||||
* them to be destroyed
|
||||
*/
|
||||
void __exit rxrpc_destroy_all_locals(void)
|
||||
{
|
||||
DECLARE_WAITQUEUE(myself,current);
|
||||
|
||||
_enter("");
|
||||
|
||||
/* we simply have to wait for them to go away */
|
||||
if (!list_empty(&rxrpc_locals)) {
|
||||
set_current_state(TASK_UNINTERRUPTIBLE);
|
||||
add_wait_queue(&rxrpc_local_wq, &myself);
|
||||
|
||||
while (!list_empty(&rxrpc_locals)) {
|
||||
schedule();
|
||||
set_current_state(TASK_UNINTERRUPTIBLE);
|
||||
}
|
||||
|
||||
remove_wait_queue(&rxrpc_local_wq, &myself);
|
||||
set_current_state(TASK_RUNNING);
|
||||
}
|
||||
|
||||
_leave("");
|
||||
}
|
746
net/rxrpc/ar-output.c
Normal file
746
net/rxrpc/ar-output.c
Normal file
|
@ -0,0 +1,746 @@
|
|||
/* RxRPC packet transmission
|
||||
*
|
||||
* Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
|
||||
* Written by David Howells (dhowells@redhat.com)
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version
|
||||
* 2 of the License, or (at your option) any later version.
|
||||
*/
|
||||
|
||||
#include <linux/net.h>
|
||||
#include <linux/gfp.h>
|
||||
#include <linux/skbuff.h>
|
||||
#include <linux/circ_buf.h>
|
||||
#include <linux/export.h>
|
||||
#include <net/sock.h>
|
||||
#include <net/af_rxrpc.h>
|
||||
#include "ar-internal.h"
|
||||
|
||||
/*
|
||||
* Time till packet resend (in jiffies).
|
||||
*/
|
||||
unsigned rxrpc_resend_timeout = 4 * HZ;
|
||||
|
||||
static int rxrpc_send_data(struct kiocb *iocb,
|
||||
struct rxrpc_sock *rx,
|
||||
struct rxrpc_call *call,
|
||||
struct msghdr *msg, size_t len);
|
||||
|
||||
/*
|
||||
* extract control messages from the sendmsg() control buffer
|
||||
*/
|
||||
static int rxrpc_sendmsg_cmsg(struct rxrpc_sock *rx, struct msghdr *msg,
|
||||
unsigned long *user_call_ID,
|
||||
enum rxrpc_command *command,
|
||||
u32 *abort_code,
|
||||
bool server)
|
||||
{
|
||||
struct cmsghdr *cmsg;
|
||||
int len;
|
||||
|
||||
*command = RXRPC_CMD_SEND_DATA;
|
||||
|
||||
if (msg->msg_controllen == 0)
|
||||
return -EINVAL;
|
||||
|
||||
for (cmsg = CMSG_FIRSTHDR(msg); cmsg; cmsg = CMSG_NXTHDR(msg, cmsg)) {
|
||||
if (!CMSG_OK(msg, cmsg))
|
||||
return -EINVAL;
|
||||
|
||||
len = cmsg->cmsg_len - CMSG_ALIGN(sizeof(struct cmsghdr));
|
||||
_debug("CMSG %d, %d, %d",
|
||||
cmsg->cmsg_level, cmsg->cmsg_type, len);
|
||||
|
||||
if (cmsg->cmsg_level != SOL_RXRPC)
|
||||
continue;
|
||||
|
||||
switch (cmsg->cmsg_type) {
|
||||
case RXRPC_USER_CALL_ID:
|
||||
if (msg->msg_flags & MSG_CMSG_COMPAT) {
|
||||
if (len != sizeof(u32))
|
||||
return -EINVAL;
|
||||
*user_call_ID = *(u32 *) CMSG_DATA(cmsg);
|
||||
} else {
|
||||
if (len != sizeof(unsigned long))
|
||||
return -EINVAL;
|
||||
*user_call_ID = *(unsigned long *)
|
||||
CMSG_DATA(cmsg);
|
||||
}
|
||||
_debug("User Call ID %lx", *user_call_ID);
|
||||
break;
|
||||
|
||||
case RXRPC_ABORT:
|
||||
if (*command != RXRPC_CMD_SEND_DATA)
|
||||
return -EINVAL;
|
||||
*command = RXRPC_CMD_SEND_ABORT;
|
||||
if (len != sizeof(*abort_code))
|
||||
return -EINVAL;
|
||||
*abort_code = *(unsigned int *) CMSG_DATA(cmsg);
|
||||
_debug("Abort %x", *abort_code);
|
||||
if (*abort_code == 0)
|
||||
return -EINVAL;
|
||||
break;
|
||||
|
||||
case RXRPC_ACCEPT:
|
||||
if (*command != RXRPC_CMD_SEND_DATA)
|
||||
return -EINVAL;
|
||||
*command = RXRPC_CMD_ACCEPT;
|
||||
if (len != 0)
|
||||
return -EINVAL;
|
||||
if (!server)
|
||||
return -EISCONN;
|
||||
break;
|
||||
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
_leave(" = 0");
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* abort a call, sending an ABORT packet to the peer
|
||||
*/
|
||||
static void rxrpc_send_abort(struct rxrpc_call *call, u32 abort_code)
|
||||
{
|
||||
write_lock_bh(&call->state_lock);
|
||||
|
||||
if (call->state <= RXRPC_CALL_COMPLETE) {
|
||||
call->state = RXRPC_CALL_LOCALLY_ABORTED;
|
||||
call->abort_code = abort_code;
|
||||
set_bit(RXRPC_CALL_ABORT, &call->events);
|
||||
del_timer_sync(&call->resend_timer);
|
||||
del_timer_sync(&call->ack_timer);
|
||||
clear_bit(RXRPC_CALL_RESEND_TIMER, &call->events);
|
||||
clear_bit(RXRPC_CALL_ACK, &call->events);
|
||||
clear_bit(RXRPC_CALL_RUN_RTIMER, &call->flags);
|
||||
rxrpc_queue_call(call);
|
||||
}
|
||||
|
||||
write_unlock_bh(&call->state_lock);
|
||||
}
|
||||
|
||||
/*
|
||||
* send a message forming part of a client call through an RxRPC socket
|
||||
* - caller holds the socket locked
|
||||
* - the socket may be either a client socket or a server socket
|
||||
*/
|
||||
int rxrpc_client_sendmsg(struct kiocb *iocb, struct rxrpc_sock *rx,
|
||||
struct rxrpc_transport *trans, struct msghdr *msg,
|
||||
size_t len)
|
||||
{
|
||||
struct rxrpc_conn_bundle *bundle;
|
||||
enum rxrpc_command cmd;
|
||||
struct rxrpc_call *call;
|
||||
unsigned long user_call_ID = 0;
|
||||
struct key *key;
|
||||
__be16 service_id;
|
||||
u32 abort_code = 0;
|
||||
int ret;
|
||||
|
||||
_enter("");
|
||||
|
||||
ASSERT(trans != NULL);
|
||||
|
||||
ret = rxrpc_sendmsg_cmsg(rx, msg, &user_call_ID, &cmd, &abort_code,
|
||||
false);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
bundle = NULL;
|
||||
if (trans) {
|
||||
service_id = rx->service_id;
|
||||
if (msg->msg_name) {
|
||||
DECLARE_SOCKADDR(struct sockaddr_rxrpc *, srx,
|
||||
msg->msg_name);
|
||||
service_id = htons(srx->srx_service);
|
||||
}
|
||||
key = rx->key;
|
||||
if (key && !rx->key->payload.data)
|
||||
key = NULL;
|
||||
bundle = rxrpc_get_bundle(rx, trans, key, service_id,
|
||||
GFP_KERNEL);
|
||||
if (IS_ERR(bundle))
|
||||
return PTR_ERR(bundle);
|
||||
}
|
||||
|
||||
call = rxrpc_get_client_call(rx, trans, bundle, user_call_ID,
|
||||
abort_code == 0, GFP_KERNEL);
|
||||
if (trans)
|
||||
rxrpc_put_bundle(trans, bundle);
|
||||
if (IS_ERR(call)) {
|
||||
_leave(" = %ld", PTR_ERR(call));
|
||||
return PTR_ERR(call);
|
||||
}
|
||||
|
||||
_debug("CALL %d USR %lx ST %d on CONN %p",
|
||||
call->debug_id, call->user_call_ID, call->state, call->conn);
|
||||
|
||||
if (call->state >= RXRPC_CALL_COMPLETE) {
|
||||
/* it's too late for this call */
|
||||
ret = -ESHUTDOWN;
|
||||
} else if (cmd == RXRPC_CMD_SEND_ABORT) {
|
||||
rxrpc_send_abort(call, abort_code);
|
||||
} else if (cmd != RXRPC_CMD_SEND_DATA) {
|
||||
ret = -EINVAL;
|
||||
} else if (call->state != RXRPC_CALL_CLIENT_SEND_REQUEST) {
|
||||
/* request phase complete for this client call */
|
||||
ret = -EPROTO;
|
||||
} else {
|
||||
ret = rxrpc_send_data(iocb, rx, call, msg, len);
|
||||
}
|
||||
|
||||
rxrpc_put_call(call);
|
||||
_leave(" = %d", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* rxrpc_kernel_send_data - Allow a kernel service to send data on a call
|
||||
* @call: The call to send data through
|
||||
* @msg: The data to send
|
||||
* @len: The amount of data to send
|
||||
*
|
||||
* Allow a kernel service to send data on a call. The call must be in an state
|
||||
* appropriate to sending data. No control data should be supplied in @msg,
|
||||
* nor should an address be supplied. MSG_MORE should be flagged if there's
|
||||
* more data to come, otherwise this data will end the transmission phase.
|
||||
*/
|
||||
int rxrpc_kernel_send_data(struct rxrpc_call *call, struct msghdr *msg,
|
||||
size_t len)
|
||||
{
|
||||
int ret;
|
||||
|
||||
_enter("{%d,%s},", call->debug_id, rxrpc_call_states[call->state]);
|
||||
|
||||
ASSERTCMP(msg->msg_name, ==, NULL);
|
||||
ASSERTCMP(msg->msg_control, ==, NULL);
|
||||
|
||||
lock_sock(&call->socket->sk);
|
||||
|
||||
_debug("CALL %d USR %lx ST %d on CONN %p",
|
||||
call->debug_id, call->user_call_ID, call->state, call->conn);
|
||||
|
||||
if (call->state >= RXRPC_CALL_COMPLETE) {
|
||||
ret = -ESHUTDOWN; /* it's too late for this call */
|
||||
} else if (call->state != RXRPC_CALL_CLIENT_SEND_REQUEST &&
|
||||
call->state != RXRPC_CALL_SERVER_ACK_REQUEST &&
|
||||
call->state != RXRPC_CALL_SERVER_SEND_REPLY) {
|
||||
ret = -EPROTO; /* request phase complete for this client call */
|
||||
} else {
|
||||
mm_segment_t oldfs = get_fs();
|
||||
set_fs(KERNEL_DS);
|
||||
ret = rxrpc_send_data(NULL, call->socket, call, msg, len);
|
||||
set_fs(oldfs);
|
||||
}
|
||||
|
||||
release_sock(&call->socket->sk);
|
||||
_leave(" = %d", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(rxrpc_kernel_send_data);
|
||||
|
||||
/**
|
||||
* rxrpc_kernel_abort_call - Allow a kernel service to abort a call
|
||||
* @call: The call to be aborted
|
||||
* @abort_code: The abort code to stick into the ABORT packet
|
||||
*
|
||||
* Allow a kernel service to abort a call, if it's still in an abortable state.
|
||||
*/
|
||||
void rxrpc_kernel_abort_call(struct rxrpc_call *call, u32 abort_code)
|
||||
{
|
||||
_enter("{%d},%d", call->debug_id, abort_code);
|
||||
|
||||
lock_sock(&call->socket->sk);
|
||||
|
||||
_debug("CALL %d USR %lx ST %d on CONN %p",
|
||||
call->debug_id, call->user_call_ID, call->state, call->conn);
|
||||
|
||||
if (call->state < RXRPC_CALL_COMPLETE)
|
||||
rxrpc_send_abort(call, abort_code);
|
||||
|
||||
release_sock(&call->socket->sk);
|
||||
_leave("");
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(rxrpc_kernel_abort_call);
|
||||
|
||||
/*
|
||||
* send a message through a server socket
|
||||
* - caller holds the socket locked
|
||||
*/
|
||||
int rxrpc_server_sendmsg(struct kiocb *iocb, struct rxrpc_sock *rx,
|
||||
struct msghdr *msg, size_t len)
|
||||
{
|
||||
enum rxrpc_command cmd;
|
||||
struct rxrpc_call *call;
|
||||
unsigned long user_call_ID = 0;
|
||||
u32 abort_code = 0;
|
||||
int ret;
|
||||
|
||||
_enter("");
|
||||
|
||||
ret = rxrpc_sendmsg_cmsg(rx, msg, &user_call_ID, &cmd, &abort_code,
|
||||
true);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
if (cmd == RXRPC_CMD_ACCEPT) {
|
||||
call = rxrpc_accept_call(rx, user_call_ID);
|
||||
if (IS_ERR(call))
|
||||
return PTR_ERR(call);
|
||||
rxrpc_put_call(call);
|
||||
return 0;
|
||||
}
|
||||
|
||||
call = rxrpc_find_server_call(rx, user_call_ID);
|
||||
if (!call)
|
||||
return -EBADSLT;
|
||||
if (call->state >= RXRPC_CALL_COMPLETE) {
|
||||
ret = -ESHUTDOWN;
|
||||
goto out;
|
||||
}
|
||||
|
||||
switch (cmd) {
|
||||
case RXRPC_CMD_SEND_DATA:
|
||||
if (call->state != RXRPC_CALL_CLIENT_SEND_REQUEST &&
|
||||
call->state != RXRPC_CALL_SERVER_ACK_REQUEST &&
|
||||
call->state != RXRPC_CALL_SERVER_SEND_REPLY) {
|
||||
/* Tx phase not yet begun for this call */
|
||||
ret = -EPROTO;
|
||||
break;
|
||||
}
|
||||
|
||||
ret = rxrpc_send_data(iocb, rx, call, msg, len);
|
||||
break;
|
||||
|
||||
case RXRPC_CMD_SEND_ABORT:
|
||||
rxrpc_send_abort(call, abort_code);
|
||||
break;
|
||||
default:
|
||||
BUG();
|
||||
}
|
||||
|
||||
out:
|
||||
rxrpc_put_call(call);
|
||||
_leave(" = %d", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* send a packet through the transport endpoint
|
||||
*/
|
||||
int rxrpc_send_packet(struct rxrpc_transport *trans, struct sk_buff *skb)
|
||||
{
|
||||
struct kvec iov[1];
|
||||
struct msghdr msg;
|
||||
int ret, opt;
|
||||
|
||||
_enter(",{%d}", skb->len);
|
||||
|
||||
iov[0].iov_base = skb->head;
|
||||
iov[0].iov_len = skb->len;
|
||||
|
||||
msg.msg_name = &trans->peer->srx.transport.sin;
|
||||
msg.msg_namelen = sizeof(trans->peer->srx.transport.sin);
|
||||
msg.msg_control = NULL;
|
||||
msg.msg_controllen = 0;
|
||||
msg.msg_flags = 0;
|
||||
|
||||
/* send the packet with the don't fragment bit set if we currently
|
||||
* think it's small enough */
|
||||
if (skb->len - sizeof(struct rxrpc_header) < trans->peer->maxdata) {
|
||||
down_read(&trans->local->defrag_sem);
|
||||
/* send the packet by UDP
|
||||
* - returns -EMSGSIZE if UDP would have to fragment the packet
|
||||
* to go out of the interface
|
||||
* - in which case, we'll have processed the ICMP error
|
||||
* message and update the peer record
|
||||
*/
|
||||
ret = kernel_sendmsg(trans->local->socket, &msg, iov, 1,
|
||||
iov[0].iov_len);
|
||||
|
||||
up_read(&trans->local->defrag_sem);
|
||||
if (ret == -EMSGSIZE)
|
||||
goto send_fragmentable;
|
||||
|
||||
_leave(" = %d [%u]", ret, trans->peer->maxdata);
|
||||
return ret;
|
||||
}
|
||||
|
||||
send_fragmentable:
|
||||
/* attempt to send this message with fragmentation enabled */
|
||||
_debug("send fragment");
|
||||
|
||||
down_write(&trans->local->defrag_sem);
|
||||
opt = IP_PMTUDISC_DONT;
|
||||
ret = kernel_setsockopt(trans->local->socket, SOL_IP, IP_MTU_DISCOVER,
|
||||
(char *) &opt, sizeof(opt));
|
||||
if (ret == 0) {
|
||||
ret = kernel_sendmsg(trans->local->socket, &msg, iov, 1,
|
||||
iov[0].iov_len);
|
||||
|
||||
opt = IP_PMTUDISC_DO;
|
||||
kernel_setsockopt(trans->local->socket, SOL_IP,
|
||||
IP_MTU_DISCOVER, (char *) &opt, sizeof(opt));
|
||||
}
|
||||
|
||||
up_write(&trans->local->defrag_sem);
|
||||
_leave(" = %d [frag %u]", ret, trans->peer->maxdata);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* wait for space to appear in the transmit/ACK window
|
||||
* - caller holds the socket locked
|
||||
*/
|
||||
static int rxrpc_wait_for_tx_window(struct rxrpc_sock *rx,
|
||||
struct rxrpc_call *call,
|
||||
long *timeo)
|
||||
{
|
||||
DECLARE_WAITQUEUE(myself, current);
|
||||
int ret;
|
||||
|
||||
_enter(",{%d},%ld",
|
||||
CIRC_SPACE(call->acks_head, call->acks_tail, call->acks_winsz),
|
||||
*timeo);
|
||||
|
||||
add_wait_queue(&call->tx_waitq, &myself);
|
||||
|
||||
for (;;) {
|
||||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
ret = 0;
|
||||
if (CIRC_SPACE(call->acks_head, call->acks_tail,
|
||||
call->acks_winsz) > 0)
|
||||
break;
|
||||
if (signal_pending(current)) {
|
||||
ret = sock_intr_errno(*timeo);
|
||||
break;
|
||||
}
|
||||
|
||||
release_sock(&rx->sk);
|
||||
*timeo = schedule_timeout(*timeo);
|
||||
lock_sock(&rx->sk);
|
||||
}
|
||||
|
||||
remove_wait_queue(&call->tx_waitq, &myself);
|
||||
set_current_state(TASK_RUNNING);
|
||||
_leave(" = %d", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* attempt to schedule an instant Tx resend
|
||||
*/
|
||||
static inline void rxrpc_instant_resend(struct rxrpc_call *call)
|
||||
{
|
||||
read_lock_bh(&call->state_lock);
|
||||
if (try_to_del_timer_sync(&call->resend_timer) >= 0) {
|
||||
clear_bit(RXRPC_CALL_RUN_RTIMER, &call->flags);
|
||||
if (call->state < RXRPC_CALL_COMPLETE &&
|
||||
!test_and_set_bit(RXRPC_CALL_RESEND_TIMER, &call->events))
|
||||
rxrpc_queue_call(call);
|
||||
}
|
||||
read_unlock_bh(&call->state_lock);
|
||||
}
|
||||
|
||||
/*
|
||||
* queue a packet for transmission, set the resend timer and attempt
|
||||
* to send the packet immediately
|
||||
*/
|
||||
static void rxrpc_queue_packet(struct rxrpc_call *call, struct sk_buff *skb,
|
||||
bool last)
|
||||
{
|
||||
struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
|
||||
int ret;
|
||||
|
||||
_net("queue skb %p [%d]", skb, call->acks_head);
|
||||
|
||||
ASSERT(call->acks_window != NULL);
|
||||
call->acks_window[call->acks_head] = (unsigned long) skb;
|
||||
smp_wmb();
|
||||
call->acks_head = (call->acks_head + 1) & (call->acks_winsz - 1);
|
||||
|
||||
if (last || call->state == RXRPC_CALL_SERVER_ACK_REQUEST) {
|
||||
_debug("________awaiting reply/ACK__________");
|
||||
write_lock_bh(&call->state_lock);
|
||||
switch (call->state) {
|
||||
case RXRPC_CALL_CLIENT_SEND_REQUEST:
|
||||
call->state = RXRPC_CALL_CLIENT_AWAIT_REPLY;
|
||||
break;
|
||||
case RXRPC_CALL_SERVER_ACK_REQUEST:
|
||||
call->state = RXRPC_CALL_SERVER_SEND_REPLY;
|
||||
if (!last)
|
||||
break;
|
||||
case RXRPC_CALL_SERVER_SEND_REPLY:
|
||||
call->state = RXRPC_CALL_SERVER_AWAIT_ACK;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
write_unlock_bh(&call->state_lock);
|
||||
}
|
||||
|
||||
_proto("Tx DATA %%%u { #%u }",
|
||||
ntohl(sp->hdr.serial), ntohl(sp->hdr.seq));
|
||||
|
||||
sp->need_resend = false;
|
||||
sp->resend_at = jiffies + rxrpc_resend_timeout;
|
||||
if (!test_and_set_bit(RXRPC_CALL_RUN_RTIMER, &call->flags)) {
|
||||
_debug("run timer");
|
||||
call->resend_timer.expires = sp->resend_at;
|
||||
add_timer(&call->resend_timer);
|
||||
}
|
||||
|
||||
/* attempt to cancel the rx-ACK timer, deferring reply transmission if
|
||||
* we're ACK'ing the request phase of an incoming call */
|
||||
ret = -EAGAIN;
|
||||
if (try_to_del_timer_sync(&call->ack_timer) >= 0) {
|
||||
/* the packet may be freed by rxrpc_process_call() before this
|
||||
* returns */
|
||||
ret = rxrpc_send_packet(call->conn->trans, skb);
|
||||
_net("sent skb %p", skb);
|
||||
} else {
|
||||
_debug("failed to delete ACK timer");
|
||||
}
|
||||
|
||||
if (ret < 0) {
|
||||
_debug("need instant resend %d", ret);
|
||||
sp->need_resend = true;
|
||||
rxrpc_instant_resend(call);
|
||||
}
|
||||
|
||||
_leave("");
|
||||
}
|
||||
|
||||
/*
|
||||
* send data through a socket
|
||||
* - must be called in process context
|
||||
* - caller holds the socket locked
|
||||
*/
|
||||
static int rxrpc_send_data(struct kiocb *iocb,
|
||||
struct rxrpc_sock *rx,
|
||||
struct rxrpc_call *call,
|
||||
struct msghdr *msg, size_t len)
|
||||
{
|
||||
struct rxrpc_skb_priv *sp;
|
||||
unsigned char __user *from;
|
||||
struct sk_buff *skb;
|
||||
struct iovec *iov;
|
||||
struct sock *sk = &rx->sk;
|
||||
long timeo;
|
||||
bool more;
|
||||
int ret, ioc, segment, copied;
|
||||
|
||||
_enter(",,,{%zu},%zu", msg->msg_iovlen, len);
|
||||
|
||||
timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
|
||||
|
||||
/* this should be in poll */
|
||||
clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
|
||||
|
||||
if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
|
||||
return -EPIPE;
|
||||
|
||||
iov = msg->msg_iov;
|
||||
ioc = msg->msg_iovlen - 1;
|
||||
from = iov->iov_base;
|
||||
segment = iov->iov_len;
|
||||
iov++;
|
||||
more = msg->msg_flags & MSG_MORE;
|
||||
|
||||
skb = call->tx_pending;
|
||||
call->tx_pending = NULL;
|
||||
|
||||
copied = 0;
|
||||
do {
|
||||
int copy;
|
||||
|
||||
if (segment > len)
|
||||
segment = len;
|
||||
|
||||
_debug("SEGMENT %d @%p", segment, from);
|
||||
|
||||
if (!skb) {
|
||||
size_t size, chunk, max, space;
|
||||
|
||||
_debug("alloc");
|
||||
|
||||
if (CIRC_SPACE(call->acks_head, call->acks_tail,
|
||||
call->acks_winsz) <= 0) {
|
||||
ret = -EAGAIN;
|
||||
if (msg->msg_flags & MSG_DONTWAIT)
|
||||
goto maybe_error;
|
||||
ret = rxrpc_wait_for_tx_window(rx, call,
|
||||
&timeo);
|
||||
if (ret < 0)
|
||||
goto maybe_error;
|
||||
}
|
||||
|
||||
max = call->conn->trans->peer->maxdata;
|
||||
max -= call->conn->security_size;
|
||||
max &= ~(call->conn->size_align - 1UL);
|
||||
|
||||
chunk = max;
|
||||
if (chunk > len && !more)
|
||||
chunk = len;
|
||||
|
||||
space = chunk + call->conn->size_align;
|
||||
space &= ~(call->conn->size_align - 1UL);
|
||||
|
||||
size = space + call->conn->header_size;
|
||||
|
||||
_debug("SIZE: %zu/%zu/%zu", chunk, space, size);
|
||||
|
||||
/* create a buffer that we can retain until it's ACK'd */
|
||||
skb = sock_alloc_send_skb(
|
||||
sk, size, msg->msg_flags & MSG_DONTWAIT, &ret);
|
||||
if (!skb)
|
||||
goto maybe_error;
|
||||
|
||||
rxrpc_new_skb(skb);
|
||||
|
||||
_debug("ALLOC SEND %p", skb);
|
||||
|
||||
ASSERTCMP(skb->mark, ==, 0);
|
||||
|
||||
_debug("HS: %u", call->conn->header_size);
|
||||
skb_reserve(skb, call->conn->header_size);
|
||||
skb->len += call->conn->header_size;
|
||||
|
||||
sp = rxrpc_skb(skb);
|
||||
sp->remain = chunk;
|
||||
if (sp->remain > skb_tailroom(skb))
|
||||
sp->remain = skb_tailroom(skb);
|
||||
|
||||
_net("skb: hr %d, tr %d, hl %d, rm %d",
|
||||
skb_headroom(skb),
|
||||
skb_tailroom(skb),
|
||||
skb_headlen(skb),
|
||||
sp->remain);
|
||||
|
||||
skb->ip_summed = CHECKSUM_UNNECESSARY;
|
||||
}
|
||||
|
||||
_debug("append");
|
||||
sp = rxrpc_skb(skb);
|
||||
|
||||
/* append next segment of data to the current buffer */
|
||||
copy = skb_tailroom(skb);
|
||||
ASSERTCMP(copy, >, 0);
|
||||
if (copy > segment)
|
||||
copy = segment;
|
||||
if (copy > sp->remain)
|
||||
copy = sp->remain;
|
||||
|
||||
_debug("add");
|
||||
ret = skb_add_data(skb, from, copy);
|
||||
_debug("added");
|
||||
if (ret < 0)
|
||||
goto efault;
|
||||
sp->remain -= copy;
|
||||
skb->mark += copy;
|
||||
copied += copy;
|
||||
|
||||
len -= copy;
|
||||
segment -= copy;
|
||||
from += copy;
|
||||
while (segment == 0 && ioc > 0) {
|
||||
from = iov->iov_base;
|
||||
segment = iov->iov_len;
|
||||
iov++;
|
||||
ioc--;
|
||||
}
|
||||
if (len == 0) {
|
||||
segment = 0;
|
||||
ioc = 0;
|
||||
}
|
||||
|
||||
/* check for the far side aborting the call or a network error
|
||||
* occurring */
|
||||
if (call->state > RXRPC_CALL_COMPLETE)
|
||||
goto call_aborted;
|
||||
|
||||
/* add the packet to the send queue if it's now full */
|
||||
if (sp->remain <= 0 || (segment == 0 && !more)) {
|
||||
struct rxrpc_connection *conn = call->conn;
|
||||
uint32_t seq;
|
||||
size_t pad;
|
||||
|
||||
/* pad out if we're using security */
|
||||
if (conn->security) {
|
||||
pad = conn->security_size + skb->mark;
|
||||
pad = conn->size_align - pad;
|
||||
pad &= conn->size_align - 1;
|
||||
_debug("pad %zu", pad);
|
||||
if (pad)
|
||||
memset(skb_put(skb, pad), 0, pad);
|
||||
}
|
||||
|
||||
seq = atomic_inc_return(&call->sequence);
|
||||
|
||||
sp->hdr.epoch = conn->epoch;
|
||||
sp->hdr.cid = call->cid;
|
||||
sp->hdr.callNumber = call->call_id;
|
||||
sp->hdr.seq = htonl(seq);
|
||||
sp->hdr.serial =
|
||||
htonl(atomic_inc_return(&conn->serial));
|
||||
sp->hdr.type = RXRPC_PACKET_TYPE_DATA;
|
||||
sp->hdr.userStatus = 0;
|
||||
sp->hdr.securityIndex = conn->security_ix;
|
||||
sp->hdr._rsvd = 0;
|
||||
sp->hdr.serviceId = conn->service_id;
|
||||
|
||||
sp->hdr.flags = conn->out_clientflag;
|
||||
if (len == 0 && !more)
|
||||
sp->hdr.flags |= RXRPC_LAST_PACKET;
|
||||
else if (CIRC_SPACE(call->acks_head, call->acks_tail,
|
||||
call->acks_winsz) > 1)
|
||||
sp->hdr.flags |= RXRPC_MORE_PACKETS;
|
||||
if (more && seq & 1)
|
||||
sp->hdr.flags |= RXRPC_REQUEST_ACK;
|
||||
|
||||
ret = rxrpc_secure_packet(
|
||||
call, skb, skb->mark,
|
||||
skb->head + sizeof(struct rxrpc_header));
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
memcpy(skb->head, &sp->hdr,
|
||||
sizeof(struct rxrpc_header));
|
||||
rxrpc_queue_packet(call, skb, segment == 0 && !more);
|
||||
skb = NULL;
|
||||
}
|
||||
|
||||
} while (segment > 0);
|
||||
|
||||
success:
|
||||
ret = copied;
|
||||
out:
|
||||
call->tx_pending = skb;
|
||||
_leave(" = %d", ret);
|
||||
return ret;
|
||||
|
||||
call_aborted:
|
||||
rxrpc_free_skb(skb);
|
||||
if (call->state == RXRPC_CALL_NETWORK_ERROR)
|
||||
ret = call->conn->trans->peer->net_error;
|
||||
else
|
||||
ret = -ECONNABORTED;
|
||||
_leave(" = %d", ret);
|
||||
return ret;
|
||||
|
||||
maybe_error:
|
||||
if (copied)
|
||||
goto success;
|
||||
goto out;
|
||||
|
||||
efault:
|
||||
ret = -EFAULT;
|
||||
goto out;
|
||||
}
|
303
net/rxrpc/ar-peer.c
Normal file
303
net/rxrpc/ar-peer.c
Normal file
|
@ -0,0 +1,303 @@
|
|||
/* RxRPC remote transport endpoint management
|
||||
*
|
||||
* Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
|
||||
* Written by David Howells (dhowells@redhat.com)
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version
|
||||
* 2 of the License, or (at your option) any later version.
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/net.h>
|
||||
#include <linux/skbuff.h>
|
||||
#include <linux/udp.h>
|
||||
#include <linux/in.h>
|
||||
#include <linux/in6.h>
|
||||
#include <linux/icmp.h>
|
||||
#include <linux/slab.h>
|
||||
#include <net/sock.h>
|
||||
#include <net/af_rxrpc.h>
|
||||
#include <net/ip.h>
|
||||
#include <net/route.h>
|
||||
#include "ar-internal.h"
|
||||
|
||||
static LIST_HEAD(rxrpc_peers);
|
||||
static DEFINE_RWLOCK(rxrpc_peer_lock);
|
||||
static DECLARE_WAIT_QUEUE_HEAD(rxrpc_peer_wq);
|
||||
|
||||
static void rxrpc_destroy_peer(struct work_struct *work);
|
||||
|
||||
/*
|
||||
* assess the MTU size for the network interface through which this peer is
|
||||
* reached
|
||||
*/
|
||||
static void rxrpc_assess_MTU_size(struct rxrpc_peer *peer)
|
||||
{
|
||||
struct rtable *rt;
|
||||
struct flowi4 fl4;
|
||||
|
||||
peer->if_mtu = 1500;
|
||||
|
||||
rt = ip_route_output_ports(&init_net, &fl4, NULL,
|
||||
peer->srx.transport.sin.sin_addr.s_addr, 0,
|
||||
htons(7000), htons(7001),
|
||||
IPPROTO_UDP, 0, 0);
|
||||
if (IS_ERR(rt)) {
|
||||
_leave(" [route err %ld]", PTR_ERR(rt));
|
||||
return;
|
||||
}
|
||||
|
||||
peer->if_mtu = dst_mtu(&rt->dst);
|
||||
dst_release(&rt->dst);
|
||||
|
||||
_leave(" [if_mtu %u]", peer->if_mtu);
|
||||
}
|
||||
|
||||
/*
|
||||
* allocate a new peer
|
||||
*/
|
||||
static struct rxrpc_peer *rxrpc_alloc_peer(struct sockaddr_rxrpc *srx,
|
||||
gfp_t gfp)
|
||||
{
|
||||
struct rxrpc_peer *peer;
|
||||
|
||||
_enter("");
|
||||
|
||||
peer = kzalloc(sizeof(struct rxrpc_peer), gfp);
|
||||
if (peer) {
|
||||
INIT_WORK(&peer->destroyer, &rxrpc_destroy_peer);
|
||||
INIT_LIST_HEAD(&peer->link);
|
||||
INIT_LIST_HEAD(&peer->error_targets);
|
||||
spin_lock_init(&peer->lock);
|
||||
atomic_set(&peer->usage, 1);
|
||||
peer->debug_id = atomic_inc_return(&rxrpc_debug_id);
|
||||
memcpy(&peer->srx, srx, sizeof(*srx));
|
||||
|
||||
rxrpc_assess_MTU_size(peer);
|
||||
peer->mtu = peer->if_mtu;
|
||||
|
||||
if (srx->transport.family == AF_INET) {
|
||||
peer->hdrsize = sizeof(struct iphdr);
|
||||
switch (srx->transport_type) {
|
||||
case SOCK_DGRAM:
|
||||
peer->hdrsize += sizeof(struct udphdr);
|
||||
break;
|
||||
default:
|
||||
BUG();
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
BUG();
|
||||
}
|
||||
|
||||
peer->hdrsize += sizeof(struct rxrpc_header);
|
||||
peer->maxdata = peer->mtu - peer->hdrsize;
|
||||
}
|
||||
|
||||
_leave(" = %p", peer);
|
||||
return peer;
|
||||
}
|
||||
|
||||
/*
|
||||
* obtain a remote transport endpoint for the specified address
|
||||
*/
|
||||
struct rxrpc_peer *rxrpc_get_peer(struct sockaddr_rxrpc *srx, gfp_t gfp)
|
||||
{
|
||||
struct rxrpc_peer *peer, *candidate;
|
||||
const char *new = "old";
|
||||
int usage;
|
||||
|
||||
_enter("{%d,%d,%pI4+%hu}",
|
||||
srx->transport_type,
|
||||
srx->transport_len,
|
||||
&srx->transport.sin.sin_addr,
|
||||
ntohs(srx->transport.sin.sin_port));
|
||||
|
||||
/* search the peer list first */
|
||||
read_lock_bh(&rxrpc_peer_lock);
|
||||
list_for_each_entry(peer, &rxrpc_peers, link) {
|
||||
_debug("check PEER %d { u=%d t=%d l=%d }",
|
||||
peer->debug_id,
|
||||
atomic_read(&peer->usage),
|
||||
peer->srx.transport_type,
|
||||
peer->srx.transport_len);
|
||||
|
||||
if (atomic_read(&peer->usage) > 0 &&
|
||||
peer->srx.transport_type == srx->transport_type &&
|
||||
peer->srx.transport_len == srx->transport_len &&
|
||||
memcmp(&peer->srx.transport,
|
||||
&srx->transport,
|
||||
srx->transport_len) == 0)
|
||||
goto found_extant_peer;
|
||||
}
|
||||
read_unlock_bh(&rxrpc_peer_lock);
|
||||
|
||||
/* not yet present - create a candidate for a new record and then
|
||||
* redo the search */
|
||||
candidate = rxrpc_alloc_peer(srx, gfp);
|
||||
if (!candidate) {
|
||||
_leave(" = -ENOMEM");
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
write_lock_bh(&rxrpc_peer_lock);
|
||||
|
||||
list_for_each_entry(peer, &rxrpc_peers, link) {
|
||||
if (atomic_read(&peer->usage) > 0 &&
|
||||
peer->srx.transport_type == srx->transport_type &&
|
||||
peer->srx.transport_len == srx->transport_len &&
|
||||
memcmp(&peer->srx.transport,
|
||||
&srx->transport,
|
||||
srx->transport_len) == 0)
|
||||
goto found_extant_second;
|
||||
}
|
||||
|
||||
/* we can now add the new candidate to the list */
|
||||
peer = candidate;
|
||||
candidate = NULL;
|
||||
usage = atomic_read(&peer->usage);
|
||||
|
||||
list_add_tail(&peer->link, &rxrpc_peers);
|
||||
write_unlock_bh(&rxrpc_peer_lock);
|
||||
new = "new";
|
||||
|
||||
success:
|
||||
_net("PEER %s %d {%d,%u,%pI4+%hu}",
|
||||
new,
|
||||
peer->debug_id,
|
||||
peer->srx.transport_type,
|
||||
peer->srx.transport.family,
|
||||
&peer->srx.transport.sin.sin_addr,
|
||||
ntohs(peer->srx.transport.sin.sin_port));
|
||||
|
||||
_leave(" = %p {u=%d}", peer, usage);
|
||||
return peer;
|
||||
|
||||
/* we found the peer in the list immediately */
|
||||
found_extant_peer:
|
||||
usage = atomic_inc_return(&peer->usage);
|
||||
read_unlock_bh(&rxrpc_peer_lock);
|
||||
goto success;
|
||||
|
||||
/* we found the peer on the second time through the list */
|
||||
found_extant_second:
|
||||
usage = atomic_inc_return(&peer->usage);
|
||||
write_unlock_bh(&rxrpc_peer_lock);
|
||||
kfree(candidate);
|
||||
goto success;
|
||||
}
|
||||
|
||||
/*
|
||||
* find the peer associated with a packet
|
||||
*/
|
||||
struct rxrpc_peer *rxrpc_find_peer(struct rxrpc_local *local,
|
||||
__be32 addr, __be16 port)
|
||||
{
|
||||
struct rxrpc_peer *peer;
|
||||
|
||||
_enter("");
|
||||
|
||||
/* search the peer list */
|
||||
read_lock_bh(&rxrpc_peer_lock);
|
||||
|
||||
if (local->srx.transport.family == AF_INET &&
|
||||
local->srx.transport_type == SOCK_DGRAM
|
||||
) {
|
||||
list_for_each_entry(peer, &rxrpc_peers, link) {
|
||||
if (atomic_read(&peer->usage) > 0 &&
|
||||
peer->srx.transport_type == SOCK_DGRAM &&
|
||||
peer->srx.transport.family == AF_INET &&
|
||||
peer->srx.transport.sin.sin_port == port &&
|
||||
peer->srx.transport.sin.sin_addr.s_addr == addr)
|
||||
goto found_UDP_peer;
|
||||
}
|
||||
|
||||
goto new_UDP_peer;
|
||||
}
|
||||
|
||||
read_unlock_bh(&rxrpc_peer_lock);
|
||||
_leave(" = -EAFNOSUPPORT");
|
||||
return ERR_PTR(-EAFNOSUPPORT);
|
||||
|
||||
found_UDP_peer:
|
||||
_net("Rx UDP DGRAM from peer %d", peer->debug_id);
|
||||
atomic_inc(&peer->usage);
|
||||
read_unlock_bh(&rxrpc_peer_lock);
|
||||
_leave(" = %p", peer);
|
||||
return peer;
|
||||
|
||||
new_UDP_peer:
|
||||
_net("Rx UDP DGRAM from NEW peer");
|
||||
read_unlock_bh(&rxrpc_peer_lock);
|
||||
_leave(" = -EBUSY [new]");
|
||||
return ERR_PTR(-EBUSY);
|
||||
}
|
||||
|
||||
/*
|
||||
* release a remote transport endpoint
|
||||
*/
|
||||
void rxrpc_put_peer(struct rxrpc_peer *peer)
|
||||
{
|
||||
_enter("%p{u=%d}", peer, atomic_read(&peer->usage));
|
||||
|
||||
ASSERTCMP(atomic_read(&peer->usage), >, 0);
|
||||
|
||||
if (likely(!atomic_dec_and_test(&peer->usage))) {
|
||||
_leave(" [in use]");
|
||||
return;
|
||||
}
|
||||
|
||||
rxrpc_queue_work(&peer->destroyer);
|
||||
_leave("");
|
||||
}
|
||||
|
||||
/*
|
||||
* destroy a remote transport endpoint
|
||||
*/
|
||||
static void rxrpc_destroy_peer(struct work_struct *work)
|
||||
{
|
||||
struct rxrpc_peer *peer =
|
||||
container_of(work, struct rxrpc_peer, destroyer);
|
||||
|
||||
_enter("%p{%d}", peer, atomic_read(&peer->usage));
|
||||
|
||||
write_lock_bh(&rxrpc_peer_lock);
|
||||
list_del(&peer->link);
|
||||
write_unlock_bh(&rxrpc_peer_lock);
|
||||
|
||||
_net("DESTROY PEER %d", peer->debug_id);
|
||||
kfree(peer);
|
||||
|
||||
if (list_empty(&rxrpc_peers))
|
||||
wake_up_all(&rxrpc_peer_wq);
|
||||
_leave("");
|
||||
}
|
||||
|
||||
/*
|
||||
* preemptively destroy all the peer records from a transport endpoint rather
|
||||
* than waiting for them to time out
|
||||
*/
|
||||
void __exit rxrpc_destroy_all_peers(void)
|
||||
{
|
||||
DECLARE_WAITQUEUE(myself,current);
|
||||
|
||||
_enter("");
|
||||
|
||||
/* we simply have to wait for them to go away */
|
||||
if (!list_empty(&rxrpc_peers)) {
|
||||
set_current_state(TASK_UNINTERRUPTIBLE);
|
||||
add_wait_queue(&rxrpc_peer_wq, &myself);
|
||||
|
||||
while (!list_empty(&rxrpc_peers)) {
|
||||
schedule();
|
||||
set_current_state(TASK_UNINTERRUPTIBLE);
|
||||
}
|
||||
|
||||
remove_wait_queue(&rxrpc_peer_wq, &myself);
|
||||
set_current_state(TASK_RUNNING);
|
||||
}
|
||||
|
||||
_leave("");
|
||||
}
|
192
net/rxrpc/ar-proc.c
Normal file
192
net/rxrpc/ar-proc.c
Normal file
|
@ -0,0 +1,192 @@
|
|||
/* /proc/net/ support for AF_RXRPC
|
||||
*
|
||||
* Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
|
||||
* Written by David Howells (dhowells@redhat.com)
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version
|
||||
* 2 of the License, or (at your option) any later version.
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <net/sock.h>
|
||||
#include <net/af_rxrpc.h>
|
||||
#include "ar-internal.h"
|
||||
|
||||
static const char *const rxrpc_conn_states[] = {
|
||||
[RXRPC_CONN_UNUSED] = "Unused ",
|
||||
[RXRPC_CONN_CLIENT] = "Client ",
|
||||
[RXRPC_CONN_SERVER_UNSECURED] = "SvUnsec ",
|
||||
[RXRPC_CONN_SERVER_CHALLENGING] = "SvChall ",
|
||||
[RXRPC_CONN_SERVER] = "SvSecure",
|
||||
[RXRPC_CONN_REMOTELY_ABORTED] = "RmtAbort",
|
||||
[RXRPC_CONN_LOCALLY_ABORTED] = "LocAbort",
|
||||
[RXRPC_CONN_NETWORK_ERROR] = "NetError",
|
||||
};
|
||||
|
||||
/*
|
||||
* generate a list of extant and dead calls in /proc/net/rxrpc_calls
|
||||
*/
|
||||
static void *rxrpc_call_seq_start(struct seq_file *seq, loff_t *_pos)
|
||||
{
|
||||
read_lock(&rxrpc_call_lock);
|
||||
return seq_list_start_head(&rxrpc_calls, *_pos);
|
||||
}
|
||||
|
||||
static void *rxrpc_call_seq_next(struct seq_file *seq, void *v, loff_t *pos)
|
||||
{
|
||||
return seq_list_next(v, &rxrpc_calls, pos);
|
||||
}
|
||||
|
||||
static void rxrpc_call_seq_stop(struct seq_file *seq, void *v)
|
||||
{
|
||||
read_unlock(&rxrpc_call_lock);
|
||||
}
|
||||
|
||||
static int rxrpc_call_seq_show(struct seq_file *seq, void *v)
|
||||
{
|
||||
struct rxrpc_transport *trans;
|
||||
struct rxrpc_call *call;
|
||||
char lbuff[4 + 4 + 4 + 4 + 5 + 1], rbuff[4 + 4 + 4 + 4 + 5 + 1];
|
||||
|
||||
if (v == &rxrpc_calls) {
|
||||
seq_puts(seq,
|
||||
"Proto Local Remote "
|
||||
" SvID ConnID CallID End Use State Abort "
|
||||
" UserID\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
call = list_entry(v, struct rxrpc_call, link);
|
||||
trans = call->conn->trans;
|
||||
|
||||
sprintf(lbuff, "%pI4:%u",
|
||||
&trans->local->srx.transport.sin.sin_addr,
|
||||
ntohs(trans->local->srx.transport.sin.sin_port));
|
||||
|
||||
sprintf(rbuff, "%pI4:%u",
|
||||
&trans->peer->srx.transport.sin.sin_addr,
|
||||
ntohs(trans->peer->srx.transport.sin.sin_port));
|
||||
|
||||
seq_printf(seq,
|
||||
"UDP %-22.22s %-22.22s %4x %08x %08x %s %3u"
|
||||
" %-8.8s %08x %lx\n",
|
||||
lbuff,
|
||||
rbuff,
|
||||
ntohs(call->conn->service_id),
|
||||
ntohl(call->conn->cid),
|
||||
ntohl(call->call_id),
|
||||
call->conn->in_clientflag ? "Svc" : "Clt",
|
||||
atomic_read(&call->usage),
|
||||
rxrpc_call_states[call->state],
|
||||
call->abort_code,
|
||||
call->user_call_ID);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct seq_operations rxrpc_call_seq_ops = {
|
||||
.start = rxrpc_call_seq_start,
|
||||
.next = rxrpc_call_seq_next,
|
||||
.stop = rxrpc_call_seq_stop,
|
||||
.show = rxrpc_call_seq_show,
|
||||
};
|
||||
|
||||
static int rxrpc_call_seq_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
return seq_open(file, &rxrpc_call_seq_ops);
|
||||
}
|
||||
|
||||
const struct file_operations rxrpc_call_seq_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.open = rxrpc_call_seq_open,
|
||||
.read = seq_read,
|
||||
.llseek = seq_lseek,
|
||||
.release = seq_release,
|
||||
};
|
||||
|
||||
/*
|
||||
* generate a list of extant virtual connections in /proc/net/rxrpc_conns
|
||||
*/
|
||||
static void *rxrpc_connection_seq_start(struct seq_file *seq, loff_t *_pos)
|
||||
{
|
||||
read_lock(&rxrpc_connection_lock);
|
||||
return seq_list_start_head(&rxrpc_connections, *_pos);
|
||||
}
|
||||
|
||||
static void *rxrpc_connection_seq_next(struct seq_file *seq, void *v,
|
||||
loff_t *pos)
|
||||
{
|
||||
return seq_list_next(v, &rxrpc_connections, pos);
|
||||
}
|
||||
|
||||
static void rxrpc_connection_seq_stop(struct seq_file *seq, void *v)
|
||||
{
|
||||
read_unlock(&rxrpc_connection_lock);
|
||||
}
|
||||
|
||||
static int rxrpc_connection_seq_show(struct seq_file *seq, void *v)
|
||||
{
|
||||
struct rxrpc_connection *conn;
|
||||
struct rxrpc_transport *trans;
|
||||
char lbuff[4 + 4 + 4 + 4 + 5 + 1], rbuff[4 + 4 + 4 + 4 + 5 + 1];
|
||||
|
||||
if (v == &rxrpc_connections) {
|
||||
seq_puts(seq,
|
||||
"Proto Local Remote "
|
||||
" SvID ConnID Calls End Use State Key "
|
||||
" Serial ISerial\n"
|
||||
);
|
||||
return 0;
|
||||
}
|
||||
|
||||
conn = list_entry(v, struct rxrpc_connection, link);
|
||||
trans = conn->trans;
|
||||
|
||||
sprintf(lbuff, "%pI4:%u",
|
||||
&trans->local->srx.transport.sin.sin_addr,
|
||||
ntohs(trans->local->srx.transport.sin.sin_port));
|
||||
|
||||
sprintf(rbuff, "%pI4:%u",
|
||||
&trans->peer->srx.transport.sin.sin_addr,
|
||||
ntohs(trans->peer->srx.transport.sin.sin_port));
|
||||
|
||||
seq_printf(seq,
|
||||
"UDP %-22.22s %-22.22s %4x %08x %08x %s %3u"
|
||||
" %s %08x %08x %08x\n",
|
||||
lbuff,
|
||||
rbuff,
|
||||
ntohs(conn->service_id),
|
||||
ntohl(conn->cid),
|
||||
conn->call_counter,
|
||||
conn->in_clientflag ? "Svc" : "Clt",
|
||||
atomic_read(&conn->usage),
|
||||
rxrpc_conn_states[conn->state],
|
||||
key_serial(conn->key),
|
||||
atomic_read(&conn->serial),
|
||||
atomic_read(&conn->hi_serial));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct seq_operations rxrpc_connection_seq_ops = {
|
||||
.start = rxrpc_connection_seq_start,
|
||||
.next = rxrpc_connection_seq_next,
|
||||
.stop = rxrpc_connection_seq_stop,
|
||||
.show = rxrpc_connection_seq_show,
|
||||
};
|
||||
|
||||
|
||||
static int rxrpc_connection_seq_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
return seq_open(file, &rxrpc_connection_seq_ops);
|
||||
}
|
||||
|
||||
const struct file_operations rxrpc_connection_seq_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.open = rxrpc_connection_seq_open,
|
||||
.read = seq_read,
|
||||
.llseek = seq_lseek,
|
||||
.release = seq_release,
|
||||
};
|
424
net/rxrpc/ar-recvmsg.c
Normal file
424
net/rxrpc/ar-recvmsg.c
Normal file
|
@ -0,0 +1,424 @@
|
|||
/* RxRPC recvmsg() implementation
|
||||
*
|
||||
* Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
|
||||
* Written by David Howells (dhowells@redhat.com)
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version
|
||||
* 2 of the License, or (at your option) any later version.
|
||||
*/
|
||||
|
||||
#include <linux/net.h>
|
||||
#include <linux/skbuff.h>
|
||||
#include <linux/export.h>
|
||||
#include <net/sock.h>
|
||||
#include <net/af_rxrpc.h>
|
||||
#include "ar-internal.h"
|
||||
|
||||
/*
|
||||
* removal a call's user ID from the socket tree to make the user ID available
|
||||
* again and so that it won't be seen again in association with that call
|
||||
*/
|
||||
void rxrpc_remove_user_ID(struct rxrpc_sock *rx, struct rxrpc_call *call)
|
||||
{
|
||||
_debug("RELEASE CALL %d", call->debug_id);
|
||||
|
||||
if (test_bit(RXRPC_CALL_HAS_USERID, &call->flags)) {
|
||||
write_lock_bh(&rx->call_lock);
|
||||
rb_erase(&call->sock_node, &call->socket->calls);
|
||||
clear_bit(RXRPC_CALL_HAS_USERID, &call->flags);
|
||||
write_unlock_bh(&rx->call_lock);
|
||||
}
|
||||
|
||||
read_lock_bh(&call->state_lock);
|
||||
if (!test_bit(RXRPC_CALL_RELEASED, &call->flags) &&
|
||||
!test_and_set_bit(RXRPC_CALL_RELEASE, &call->events))
|
||||
rxrpc_queue_call(call);
|
||||
read_unlock_bh(&call->state_lock);
|
||||
}
|
||||
|
||||
/*
|
||||
* receive a message from an RxRPC socket
|
||||
* - we need to be careful about two or more threads calling recvmsg
|
||||
* simultaneously
|
||||
*/
|
||||
int rxrpc_recvmsg(struct kiocb *iocb, struct socket *sock,
|
||||
struct msghdr *msg, size_t len, int flags)
|
||||
{
|
||||
struct rxrpc_skb_priv *sp;
|
||||
struct rxrpc_call *call = NULL, *continue_call = NULL;
|
||||
struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
|
||||
struct sk_buff *skb;
|
||||
long timeo;
|
||||
int copy, ret, ullen, offset, copied = 0;
|
||||
u32 abort_code;
|
||||
|
||||
DEFINE_WAIT(wait);
|
||||
|
||||
_enter(",,,%zu,%d", len, flags);
|
||||
|
||||
if (flags & (MSG_OOB | MSG_TRUNC))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
ullen = msg->msg_flags & MSG_CMSG_COMPAT ? 4 : sizeof(unsigned long);
|
||||
|
||||
timeo = sock_rcvtimeo(&rx->sk, flags & MSG_DONTWAIT);
|
||||
msg->msg_flags |= MSG_MORE;
|
||||
|
||||
lock_sock(&rx->sk);
|
||||
|
||||
for (;;) {
|
||||
/* return immediately if a client socket has no outstanding
|
||||
* calls */
|
||||
if (RB_EMPTY_ROOT(&rx->calls)) {
|
||||
if (copied)
|
||||
goto out;
|
||||
if (rx->sk.sk_state != RXRPC_SERVER_LISTENING) {
|
||||
release_sock(&rx->sk);
|
||||
if (continue_call)
|
||||
rxrpc_put_call(continue_call);
|
||||
return -ENODATA;
|
||||
}
|
||||
}
|
||||
|
||||
/* get the next message on the Rx queue */
|
||||
skb = skb_peek(&rx->sk.sk_receive_queue);
|
||||
if (!skb) {
|
||||
/* nothing remains on the queue */
|
||||
if (copied &&
|
||||
(flags & MSG_PEEK || timeo == 0))
|
||||
goto out;
|
||||
|
||||
/* wait for a message to turn up */
|
||||
release_sock(&rx->sk);
|
||||
prepare_to_wait_exclusive(sk_sleep(&rx->sk), &wait,
|
||||
TASK_INTERRUPTIBLE);
|
||||
ret = sock_error(&rx->sk);
|
||||
if (ret)
|
||||
goto wait_error;
|
||||
|
||||
if (skb_queue_empty(&rx->sk.sk_receive_queue)) {
|
||||
if (signal_pending(current))
|
||||
goto wait_interrupted;
|
||||
timeo = schedule_timeout(timeo);
|
||||
}
|
||||
finish_wait(sk_sleep(&rx->sk), &wait);
|
||||
lock_sock(&rx->sk);
|
||||
continue;
|
||||
}
|
||||
|
||||
peek_next_packet:
|
||||
sp = rxrpc_skb(skb);
|
||||
call = sp->call;
|
||||
ASSERT(call != NULL);
|
||||
|
||||
_debug("next pkt %s", rxrpc_pkts[sp->hdr.type]);
|
||||
|
||||
/* make sure we wait for the state to be updated in this call */
|
||||
spin_lock_bh(&call->lock);
|
||||
spin_unlock_bh(&call->lock);
|
||||
|
||||
if (test_bit(RXRPC_CALL_RELEASED, &call->flags)) {
|
||||
_debug("packet from released call");
|
||||
if (skb_dequeue(&rx->sk.sk_receive_queue) != skb)
|
||||
BUG();
|
||||
rxrpc_free_skb(skb);
|
||||
continue;
|
||||
}
|
||||
|
||||
/* determine whether to continue last data receive */
|
||||
if (continue_call) {
|
||||
_debug("maybe cont");
|
||||
if (call != continue_call ||
|
||||
skb->mark != RXRPC_SKB_MARK_DATA) {
|
||||
release_sock(&rx->sk);
|
||||
rxrpc_put_call(continue_call);
|
||||
_leave(" = %d [noncont]", copied);
|
||||
return copied;
|
||||
}
|
||||
}
|
||||
|
||||
rxrpc_get_call(call);
|
||||
|
||||
/* copy the peer address and timestamp */
|
||||
if (!continue_call) {
|
||||
if (msg->msg_name) {
|
||||
size_t len =
|
||||
sizeof(call->conn->trans->peer->srx);
|
||||
memcpy(msg->msg_name,
|
||||
&call->conn->trans->peer->srx, len);
|
||||
msg->msg_namelen = len;
|
||||
}
|
||||
sock_recv_ts_and_drops(msg, &rx->sk, skb);
|
||||
}
|
||||
|
||||
/* receive the message */
|
||||
if (skb->mark != RXRPC_SKB_MARK_DATA)
|
||||
goto receive_non_data_message;
|
||||
|
||||
_debug("recvmsg DATA #%u { %d, %d }",
|
||||
ntohl(sp->hdr.seq), skb->len, sp->offset);
|
||||
|
||||
if (!continue_call) {
|
||||
/* only set the control data once per recvmsg() */
|
||||
ret = put_cmsg(msg, SOL_RXRPC, RXRPC_USER_CALL_ID,
|
||||
ullen, &call->user_call_ID);
|
||||
if (ret < 0)
|
||||
goto copy_error;
|
||||
ASSERT(test_bit(RXRPC_CALL_HAS_USERID, &call->flags));
|
||||
}
|
||||
|
||||
ASSERTCMP(ntohl(sp->hdr.seq), >=, call->rx_data_recv);
|
||||
ASSERTCMP(ntohl(sp->hdr.seq), <=, call->rx_data_recv + 1);
|
||||
call->rx_data_recv = ntohl(sp->hdr.seq);
|
||||
|
||||
ASSERTCMP(ntohl(sp->hdr.seq), >, call->rx_data_eaten);
|
||||
|
||||
offset = sp->offset;
|
||||
copy = skb->len - offset;
|
||||
if (copy > len - copied)
|
||||
copy = len - copied;
|
||||
|
||||
ret = skb_copy_datagram_iovec(skb, offset, msg->msg_iov, copy);
|
||||
|
||||
if (ret < 0)
|
||||
goto copy_error;
|
||||
|
||||
/* handle piecemeal consumption of data packets */
|
||||
_debug("copied %d+%d", copy, copied);
|
||||
|
||||
offset += copy;
|
||||
copied += copy;
|
||||
|
||||
if (!(flags & MSG_PEEK))
|
||||
sp->offset = offset;
|
||||
|
||||
if (sp->offset < skb->len) {
|
||||
_debug("buffer full");
|
||||
ASSERTCMP(copied, ==, len);
|
||||
break;
|
||||
}
|
||||
|
||||
/* we transferred the whole data packet */
|
||||
if (sp->hdr.flags & RXRPC_LAST_PACKET) {
|
||||
_debug("last");
|
||||
if (call->conn->out_clientflag) {
|
||||
/* last byte of reply received */
|
||||
ret = copied;
|
||||
goto terminal_message;
|
||||
}
|
||||
|
||||
/* last bit of request received */
|
||||
if (!(flags & MSG_PEEK)) {
|
||||
_debug("eat packet");
|
||||
if (skb_dequeue(&rx->sk.sk_receive_queue) !=
|
||||
skb)
|
||||
BUG();
|
||||
rxrpc_free_skb(skb);
|
||||
}
|
||||
msg->msg_flags &= ~MSG_MORE;
|
||||
break;
|
||||
}
|
||||
|
||||
/* move on to the next data message */
|
||||
_debug("next");
|
||||
if (!continue_call)
|
||||
continue_call = sp->call;
|
||||
else
|
||||
rxrpc_put_call(call);
|
||||
call = NULL;
|
||||
|
||||
if (flags & MSG_PEEK) {
|
||||
_debug("peek next");
|
||||
skb = skb->next;
|
||||
if (skb == (struct sk_buff *) &rx->sk.sk_receive_queue)
|
||||
break;
|
||||
goto peek_next_packet;
|
||||
}
|
||||
|
||||
_debug("eat packet");
|
||||
if (skb_dequeue(&rx->sk.sk_receive_queue) != skb)
|
||||
BUG();
|
||||
rxrpc_free_skb(skb);
|
||||
}
|
||||
|
||||
/* end of non-terminal data packet reception for the moment */
|
||||
_debug("end rcv data");
|
||||
out:
|
||||
release_sock(&rx->sk);
|
||||
if (call)
|
||||
rxrpc_put_call(call);
|
||||
if (continue_call)
|
||||
rxrpc_put_call(continue_call);
|
||||
_leave(" = %d [data]", copied);
|
||||
return copied;
|
||||
|
||||
/* handle non-DATA messages such as aborts, incoming connections and
|
||||
* final ACKs */
|
||||
receive_non_data_message:
|
||||
_debug("non-data");
|
||||
|
||||
if (skb->mark == RXRPC_SKB_MARK_NEW_CALL) {
|
||||
_debug("RECV NEW CALL");
|
||||
ret = put_cmsg(msg, SOL_RXRPC, RXRPC_NEW_CALL, 0, &abort_code);
|
||||
if (ret < 0)
|
||||
goto copy_error;
|
||||
if (!(flags & MSG_PEEK)) {
|
||||
if (skb_dequeue(&rx->sk.sk_receive_queue) != skb)
|
||||
BUG();
|
||||
rxrpc_free_skb(skb);
|
||||
}
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = put_cmsg(msg, SOL_RXRPC, RXRPC_USER_CALL_ID,
|
||||
ullen, &call->user_call_ID);
|
||||
if (ret < 0)
|
||||
goto copy_error;
|
||||
ASSERT(test_bit(RXRPC_CALL_HAS_USERID, &call->flags));
|
||||
|
||||
switch (skb->mark) {
|
||||
case RXRPC_SKB_MARK_DATA:
|
||||
BUG();
|
||||
case RXRPC_SKB_MARK_FINAL_ACK:
|
||||
ret = put_cmsg(msg, SOL_RXRPC, RXRPC_ACK, 0, &abort_code);
|
||||
break;
|
||||
case RXRPC_SKB_MARK_BUSY:
|
||||
ret = put_cmsg(msg, SOL_RXRPC, RXRPC_BUSY, 0, &abort_code);
|
||||
break;
|
||||
case RXRPC_SKB_MARK_REMOTE_ABORT:
|
||||
abort_code = call->abort_code;
|
||||
ret = put_cmsg(msg, SOL_RXRPC, RXRPC_ABORT, 4, &abort_code);
|
||||
break;
|
||||
case RXRPC_SKB_MARK_NET_ERROR:
|
||||
_debug("RECV NET ERROR %d", sp->error);
|
||||
abort_code = sp->error;
|
||||
ret = put_cmsg(msg, SOL_RXRPC, RXRPC_NET_ERROR, 4, &abort_code);
|
||||
break;
|
||||
case RXRPC_SKB_MARK_LOCAL_ERROR:
|
||||
_debug("RECV LOCAL ERROR %d", sp->error);
|
||||
abort_code = sp->error;
|
||||
ret = put_cmsg(msg, SOL_RXRPC, RXRPC_LOCAL_ERROR, 4,
|
||||
&abort_code);
|
||||
break;
|
||||
default:
|
||||
BUG();
|
||||
break;
|
||||
}
|
||||
|
||||
if (ret < 0)
|
||||
goto copy_error;
|
||||
|
||||
terminal_message:
|
||||
_debug("terminal");
|
||||
msg->msg_flags &= ~MSG_MORE;
|
||||
msg->msg_flags |= MSG_EOR;
|
||||
|
||||
if (!(flags & MSG_PEEK)) {
|
||||
_net("free terminal skb %p", skb);
|
||||
if (skb_dequeue(&rx->sk.sk_receive_queue) != skb)
|
||||
BUG();
|
||||
rxrpc_free_skb(skb);
|
||||
rxrpc_remove_user_ID(rx, call);
|
||||
}
|
||||
|
||||
release_sock(&rx->sk);
|
||||
rxrpc_put_call(call);
|
||||
if (continue_call)
|
||||
rxrpc_put_call(continue_call);
|
||||
_leave(" = %d", ret);
|
||||
return ret;
|
||||
|
||||
copy_error:
|
||||
_debug("copy error");
|
||||
release_sock(&rx->sk);
|
||||
rxrpc_put_call(call);
|
||||
if (continue_call)
|
||||
rxrpc_put_call(continue_call);
|
||||
_leave(" = %d", ret);
|
||||
return ret;
|
||||
|
||||
wait_interrupted:
|
||||
ret = sock_intr_errno(timeo);
|
||||
wait_error:
|
||||
finish_wait(sk_sleep(&rx->sk), &wait);
|
||||
if (continue_call)
|
||||
rxrpc_put_call(continue_call);
|
||||
if (copied)
|
||||
copied = ret;
|
||||
_leave(" = %d [waitfail %d]", copied, ret);
|
||||
return copied;
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* rxrpc_kernel_data_delivered - Record delivery of data message
|
||||
* @skb: Message holding data
|
||||
*
|
||||
* Record the delivery of a data message. This permits RxRPC to keep its
|
||||
* tracking correct. The socket buffer will be deleted.
|
||||
*/
|
||||
void rxrpc_kernel_data_delivered(struct sk_buff *skb)
|
||||
{
|
||||
struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
|
||||
struct rxrpc_call *call = sp->call;
|
||||
|
||||
ASSERTCMP(ntohl(sp->hdr.seq), >=, call->rx_data_recv);
|
||||
ASSERTCMP(ntohl(sp->hdr.seq), <=, call->rx_data_recv + 1);
|
||||
call->rx_data_recv = ntohl(sp->hdr.seq);
|
||||
|
||||
ASSERTCMP(ntohl(sp->hdr.seq), >, call->rx_data_eaten);
|
||||
rxrpc_free_skb(skb);
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(rxrpc_kernel_data_delivered);
|
||||
|
||||
/**
|
||||
* rxrpc_kernel_is_data_last - Determine if data message is last one
|
||||
* @skb: Message holding data
|
||||
*
|
||||
* Determine if data message is last one for the parent call.
|
||||
*/
|
||||
bool rxrpc_kernel_is_data_last(struct sk_buff *skb)
|
||||
{
|
||||
struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
|
||||
|
||||
ASSERTCMP(skb->mark, ==, RXRPC_SKB_MARK_DATA);
|
||||
|
||||
return sp->hdr.flags & RXRPC_LAST_PACKET;
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(rxrpc_kernel_is_data_last);
|
||||
|
||||
/**
|
||||
* rxrpc_kernel_get_abort_code - Get the abort code from an RxRPC abort message
|
||||
* @skb: Message indicating an abort
|
||||
*
|
||||
* Get the abort code from an RxRPC abort message.
|
||||
*/
|
||||
u32 rxrpc_kernel_get_abort_code(struct sk_buff *skb)
|
||||
{
|
||||
struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
|
||||
|
||||
ASSERTCMP(skb->mark, ==, RXRPC_SKB_MARK_REMOTE_ABORT);
|
||||
|
||||
return sp->call->abort_code;
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(rxrpc_kernel_get_abort_code);
|
||||
|
||||
/**
|
||||
* rxrpc_kernel_get_error - Get the error number from an RxRPC error message
|
||||
* @skb: Message indicating an error
|
||||
*
|
||||
* Get the error number from an RxRPC error message.
|
||||
*/
|
||||
int rxrpc_kernel_get_error_number(struct sk_buff *skb)
|
||||
{
|
||||
struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
|
||||
|
||||
return sp->error;
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(rxrpc_kernel_get_error_number);
|
264
net/rxrpc/ar-security.c
Normal file
264
net/rxrpc/ar-security.c
Normal file
|
@ -0,0 +1,264 @@
|
|||
/* RxRPC security handling
|
||||
*
|
||||
* Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
|
||||
* Written by David Howells (dhowells@redhat.com)
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version
|
||||
* 2 of the License, or (at your option) any later version.
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/net.h>
|
||||
#include <linux/skbuff.h>
|
||||
#include <linux/udp.h>
|
||||
#include <linux/crypto.h>
|
||||
#include <net/sock.h>
|
||||
#include <net/af_rxrpc.h>
|
||||
#include <keys/rxrpc-type.h>
|
||||
#include "ar-internal.h"
|
||||
|
||||
static LIST_HEAD(rxrpc_security_methods);
|
||||
static DECLARE_RWSEM(rxrpc_security_sem);
|
||||
|
||||
/*
|
||||
* get an RxRPC security module
|
||||
*/
|
||||
static struct rxrpc_security *rxrpc_security_get(struct rxrpc_security *sec)
|
||||
{
|
||||
return try_module_get(sec->owner) ? sec : NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* release an RxRPC security module
|
||||
*/
|
||||
static void rxrpc_security_put(struct rxrpc_security *sec)
|
||||
{
|
||||
module_put(sec->owner);
|
||||
}
|
||||
|
||||
/*
|
||||
* look up an rxrpc security module
|
||||
*/
|
||||
static struct rxrpc_security *rxrpc_security_lookup(u8 security_index)
|
||||
{
|
||||
struct rxrpc_security *sec = NULL;
|
||||
|
||||
_enter("");
|
||||
|
||||
down_read(&rxrpc_security_sem);
|
||||
|
||||
list_for_each_entry(sec, &rxrpc_security_methods, link) {
|
||||
if (sec->security_index == security_index) {
|
||||
if (unlikely(!rxrpc_security_get(sec)))
|
||||
break;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
sec = NULL;
|
||||
out:
|
||||
up_read(&rxrpc_security_sem);
|
||||
_leave(" = %p [%s]", sec, sec ? sec->name : "");
|
||||
return sec;
|
||||
}
|
||||
|
||||
/**
|
||||
* rxrpc_register_security - register an RxRPC security handler
|
||||
* @sec: security module
|
||||
*
|
||||
* register an RxRPC security handler for use by RxRPC
|
||||
*/
|
||||
int rxrpc_register_security(struct rxrpc_security *sec)
|
||||
{
|
||||
struct rxrpc_security *psec;
|
||||
int ret;
|
||||
|
||||
_enter("");
|
||||
down_write(&rxrpc_security_sem);
|
||||
|
||||
ret = -EEXIST;
|
||||
list_for_each_entry(psec, &rxrpc_security_methods, link) {
|
||||
if (psec->security_index == sec->security_index)
|
||||
goto out;
|
||||
}
|
||||
|
||||
list_add(&sec->link, &rxrpc_security_methods);
|
||||
|
||||
printk(KERN_NOTICE "RxRPC: Registered security type %d '%s'\n",
|
||||
sec->security_index, sec->name);
|
||||
ret = 0;
|
||||
|
||||
out:
|
||||
up_write(&rxrpc_security_sem);
|
||||
_leave(" = %d", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL_GPL(rxrpc_register_security);
|
||||
|
||||
/**
|
||||
* rxrpc_unregister_security - unregister an RxRPC security handler
|
||||
* @sec: security module
|
||||
*
|
||||
* unregister an RxRPC security handler
|
||||
*/
|
||||
void rxrpc_unregister_security(struct rxrpc_security *sec)
|
||||
{
|
||||
|
||||
_enter("");
|
||||
down_write(&rxrpc_security_sem);
|
||||
list_del_init(&sec->link);
|
||||
up_write(&rxrpc_security_sem);
|
||||
|
||||
printk(KERN_NOTICE "RxRPC: Unregistered security type %d '%s'\n",
|
||||
sec->security_index, sec->name);
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL_GPL(rxrpc_unregister_security);
|
||||
|
||||
/*
|
||||
* initialise the security on a client connection
|
||||
*/
|
||||
int rxrpc_init_client_conn_security(struct rxrpc_connection *conn)
|
||||
{
|
||||
struct rxrpc_key_token *token;
|
||||
struct rxrpc_security *sec;
|
||||
struct key *key = conn->key;
|
||||
int ret;
|
||||
|
||||
_enter("{%d},{%x}", conn->debug_id, key_serial(key));
|
||||
|
||||
if (!key)
|
||||
return 0;
|
||||
|
||||
ret = key_validate(key);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
if (!key->payload.data)
|
||||
return -EKEYREJECTED;
|
||||
token = key->payload.data;
|
||||
|
||||
sec = rxrpc_security_lookup(token->security_index);
|
||||
if (!sec)
|
||||
return -EKEYREJECTED;
|
||||
conn->security = sec;
|
||||
|
||||
ret = conn->security->init_connection_security(conn);
|
||||
if (ret < 0) {
|
||||
rxrpc_security_put(conn->security);
|
||||
conn->security = NULL;
|
||||
return ret;
|
||||
}
|
||||
|
||||
_leave(" = 0");
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* initialise the security on a server connection
|
||||
*/
|
||||
int rxrpc_init_server_conn_security(struct rxrpc_connection *conn)
|
||||
{
|
||||
struct rxrpc_security *sec;
|
||||
struct rxrpc_local *local = conn->trans->local;
|
||||
struct rxrpc_sock *rx;
|
||||
struct key *key;
|
||||
key_ref_t kref;
|
||||
char kdesc[5+1+3+1];
|
||||
|
||||
_enter("");
|
||||
|
||||
sprintf(kdesc, "%u:%u", ntohs(conn->service_id), conn->security_ix);
|
||||
|
||||
sec = rxrpc_security_lookup(conn->security_ix);
|
||||
if (!sec) {
|
||||
_leave(" = -ENOKEY [lookup]");
|
||||
return -ENOKEY;
|
||||
}
|
||||
|
||||
/* find the service */
|
||||
read_lock_bh(&local->services_lock);
|
||||
list_for_each_entry(rx, &local->services, listen_link) {
|
||||
if (rx->service_id == conn->service_id)
|
||||
goto found_service;
|
||||
}
|
||||
|
||||
/* the service appears to have died */
|
||||
read_unlock_bh(&local->services_lock);
|
||||
rxrpc_security_put(sec);
|
||||
_leave(" = -ENOENT");
|
||||
return -ENOENT;
|
||||
|
||||
found_service:
|
||||
if (!rx->securities) {
|
||||
read_unlock_bh(&local->services_lock);
|
||||
rxrpc_security_put(sec);
|
||||
_leave(" = -ENOKEY");
|
||||
return -ENOKEY;
|
||||
}
|
||||
|
||||
/* look through the service's keyring */
|
||||
kref = keyring_search(make_key_ref(rx->securities, 1UL),
|
||||
&key_type_rxrpc_s, kdesc);
|
||||
if (IS_ERR(kref)) {
|
||||
read_unlock_bh(&local->services_lock);
|
||||
rxrpc_security_put(sec);
|
||||
_leave(" = %ld [search]", PTR_ERR(kref));
|
||||
return PTR_ERR(kref);
|
||||
}
|
||||
|
||||
key = key_ref_to_ptr(kref);
|
||||
read_unlock_bh(&local->services_lock);
|
||||
|
||||
conn->server_key = key;
|
||||
conn->security = sec;
|
||||
|
||||
_leave(" = 0");
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* secure a packet prior to transmission
|
||||
*/
|
||||
int rxrpc_secure_packet(const struct rxrpc_call *call,
|
||||
struct sk_buff *skb,
|
||||
size_t data_size,
|
||||
void *sechdr)
|
||||
{
|
||||
if (call->conn->security)
|
||||
return call->conn->security->secure_packet(
|
||||
call, skb, data_size, sechdr);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* secure a packet prior to transmission
|
||||
*/
|
||||
int rxrpc_verify_packet(const struct rxrpc_call *call, struct sk_buff *skb,
|
||||
u32 *_abort_code)
|
||||
{
|
||||
if (call->conn->security)
|
||||
return call->conn->security->verify_packet(
|
||||
call, skb, _abort_code);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* clear connection security
|
||||
*/
|
||||
void rxrpc_clear_conn_security(struct rxrpc_connection *conn)
|
||||
{
|
||||
_enter("{%d}", conn->debug_id);
|
||||
|
||||
if (conn->security) {
|
||||
conn->security->clear(conn);
|
||||
rxrpc_security_put(conn->security);
|
||||
conn->security = NULL;
|
||||
}
|
||||
|
||||
key_put(conn->key);
|
||||
key_put(conn->server_key);
|
||||
}
|
137
net/rxrpc/ar-skbuff.c
Normal file
137
net/rxrpc/ar-skbuff.c
Normal file
|
@ -0,0 +1,137 @@
|
|||
/* ar-skbuff.c: socket buffer destruction handling
|
||||
*
|
||||
* Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
|
||||
* Written by David Howells (dhowells@redhat.com)
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version
|
||||
* 2 of the License, or (at your option) any later version.
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/net.h>
|
||||
#include <linux/skbuff.h>
|
||||
#include <net/sock.h>
|
||||
#include <net/af_rxrpc.h>
|
||||
#include "ar-internal.h"
|
||||
|
||||
/*
|
||||
* set up for the ACK at the end of the receive phase when we discard the final
|
||||
* receive phase data packet
|
||||
* - called with softirqs disabled
|
||||
*/
|
||||
static void rxrpc_request_final_ACK(struct rxrpc_call *call)
|
||||
{
|
||||
/* the call may be aborted before we have a chance to ACK it */
|
||||
write_lock(&call->state_lock);
|
||||
|
||||
switch (call->state) {
|
||||
case RXRPC_CALL_CLIENT_RECV_REPLY:
|
||||
call->state = RXRPC_CALL_CLIENT_FINAL_ACK;
|
||||
_debug("request final ACK");
|
||||
|
||||
/* get an extra ref on the call for the final-ACK generator to
|
||||
* release */
|
||||
rxrpc_get_call(call);
|
||||
set_bit(RXRPC_CALL_ACK_FINAL, &call->events);
|
||||
if (try_to_del_timer_sync(&call->ack_timer) >= 0)
|
||||
rxrpc_queue_call(call);
|
||||
break;
|
||||
|
||||
case RXRPC_CALL_SERVER_RECV_REQUEST:
|
||||
call->state = RXRPC_CALL_SERVER_ACK_REQUEST;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
write_unlock(&call->state_lock);
|
||||
}
|
||||
|
||||
/*
|
||||
* drop the bottom ACK off of the call ACK window and advance the window
|
||||
*/
|
||||
static void rxrpc_hard_ACK_data(struct rxrpc_call *call,
|
||||
struct rxrpc_skb_priv *sp)
|
||||
{
|
||||
int loop;
|
||||
u32 seq;
|
||||
|
||||
spin_lock_bh(&call->lock);
|
||||
|
||||
_debug("hard ACK #%u", ntohl(sp->hdr.seq));
|
||||
|
||||
for (loop = 0; loop < RXRPC_ACKR_WINDOW_ASZ; loop++) {
|
||||
call->ackr_window[loop] >>= 1;
|
||||
call->ackr_window[loop] |=
|
||||
call->ackr_window[loop + 1] << (BITS_PER_LONG - 1);
|
||||
}
|
||||
|
||||
seq = ntohl(sp->hdr.seq);
|
||||
ASSERTCMP(seq, ==, call->rx_data_eaten + 1);
|
||||
call->rx_data_eaten = seq;
|
||||
|
||||
if (call->ackr_win_top < UINT_MAX)
|
||||
call->ackr_win_top++;
|
||||
|
||||
ASSERTIFCMP(call->state <= RXRPC_CALL_COMPLETE,
|
||||
call->rx_data_post, >=, call->rx_data_recv);
|
||||
ASSERTIFCMP(call->state <= RXRPC_CALL_COMPLETE,
|
||||
call->rx_data_recv, >=, call->rx_data_eaten);
|
||||
|
||||
if (sp->hdr.flags & RXRPC_LAST_PACKET) {
|
||||
rxrpc_request_final_ACK(call);
|
||||
} else if (atomic_dec_and_test(&call->ackr_not_idle) &&
|
||||
test_and_clear_bit(RXRPC_CALL_TX_SOFT_ACK, &call->flags)) {
|
||||
/* We previously soft-ACK'd some received packets that have now
|
||||
* been consumed, so send a hard-ACK if no more packets are
|
||||
* immediately forthcoming to allow the transmitter to free up
|
||||
* its Tx bufferage.
|
||||
*/
|
||||
_debug("send Rx idle ACK");
|
||||
__rxrpc_propose_ACK(call, RXRPC_ACK_IDLE, sp->hdr.serial,
|
||||
false);
|
||||
}
|
||||
|
||||
spin_unlock_bh(&call->lock);
|
||||
}
|
||||
|
||||
/*
|
||||
* destroy a packet that has an RxRPC control buffer
|
||||
* - advance the hard-ACK state of the parent call (done here in case something
|
||||
* in the kernel bypasses recvmsg() and steals the packet directly off of the
|
||||
* socket receive queue)
|
||||
*/
|
||||
void rxrpc_packet_destructor(struct sk_buff *skb)
|
||||
{
|
||||
struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
|
||||
struct rxrpc_call *call = sp->call;
|
||||
|
||||
_enter("%p{%p}", skb, call);
|
||||
|
||||
if (call) {
|
||||
/* send the final ACK on a client call */
|
||||
if (sp->hdr.type == RXRPC_PACKET_TYPE_DATA)
|
||||
rxrpc_hard_ACK_data(call, sp);
|
||||
rxrpc_put_call(call);
|
||||
sp->call = NULL;
|
||||
}
|
||||
|
||||
if (skb->sk)
|
||||
sock_rfree(skb);
|
||||
_leave("");
|
||||
}
|
||||
|
||||
/**
|
||||
* rxrpc_kernel_free_skb - Free an RxRPC socket buffer
|
||||
* @skb: The socket buffer to be freed
|
||||
*
|
||||
* Let RxRPC free its own socket buffer, permitting it to maintain debug
|
||||
* accounting.
|
||||
*/
|
||||
void rxrpc_kernel_free_skb(struct sk_buff *skb)
|
||||
{
|
||||
rxrpc_free_skb(skb);
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(rxrpc_kernel_free_skb);
|
283
net/rxrpc/ar-transport.c
Normal file
283
net/rxrpc/ar-transport.c
Normal file
|
@ -0,0 +1,283 @@
|
|||
/* RxRPC point-to-point transport session management
|
||||
*
|
||||
* Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
|
||||
* Written by David Howells (dhowells@redhat.com)
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version
|
||||
* 2 of the License, or (at your option) any later version.
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/net.h>
|
||||
#include <linux/skbuff.h>
|
||||
#include <linux/slab.h>
|
||||
#include <net/sock.h>
|
||||
#include <net/af_rxrpc.h>
|
||||
#include "ar-internal.h"
|
||||
|
||||
/*
|
||||
* Time after last use at which transport record is cleaned up.
|
||||
*/
|
||||
unsigned rxrpc_transport_expiry = 3600 * 24;
|
||||
|
||||
static void rxrpc_transport_reaper(struct work_struct *work);
|
||||
|
||||
static LIST_HEAD(rxrpc_transports);
|
||||
static DEFINE_RWLOCK(rxrpc_transport_lock);
|
||||
static DECLARE_DELAYED_WORK(rxrpc_transport_reap, rxrpc_transport_reaper);
|
||||
|
||||
/*
|
||||
* allocate a new transport session manager
|
||||
*/
|
||||
static struct rxrpc_transport *rxrpc_alloc_transport(struct rxrpc_local *local,
|
||||
struct rxrpc_peer *peer,
|
||||
gfp_t gfp)
|
||||
{
|
||||
struct rxrpc_transport *trans;
|
||||
|
||||
_enter("");
|
||||
|
||||
trans = kzalloc(sizeof(struct rxrpc_transport), gfp);
|
||||
if (trans) {
|
||||
trans->local = local;
|
||||
trans->peer = peer;
|
||||
INIT_LIST_HEAD(&trans->link);
|
||||
trans->bundles = RB_ROOT;
|
||||
trans->client_conns = RB_ROOT;
|
||||
trans->server_conns = RB_ROOT;
|
||||
skb_queue_head_init(&trans->error_queue);
|
||||
spin_lock_init(&trans->client_lock);
|
||||
rwlock_init(&trans->conn_lock);
|
||||
atomic_set(&trans->usage, 1);
|
||||
trans->debug_id = atomic_inc_return(&rxrpc_debug_id);
|
||||
|
||||
if (peer->srx.transport.family == AF_INET) {
|
||||
switch (peer->srx.transport_type) {
|
||||
case SOCK_DGRAM:
|
||||
INIT_WORK(&trans->error_handler,
|
||||
rxrpc_UDP_error_handler);
|
||||
break;
|
||||
default:
|
||||
BUG();
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
BUG();
|
||||
}
|
||||
}
|
||||
|
||||
_leave(" = %p", trans);
|
||||
return trans;
|
||||
}
|
||||
|
||||
/*
|
||||
* obtain a transport session for the nominated endpoints
|
||||
*/
|
||||
struct rxrpc_transport *rxrpc_get_transport(struct rxrpc_local *local,
|
||||
struct rxrpc_peer *peer,
|
||||
gfp_t gfp)
|
||||
{
|
||||
struct rxrpc_transport *trans, *candidate;
|
||||
const char *new = "old";
|
||||
int usage;
|
||||
|
||||
_enter("{%pI4+%hu},{%pI4+%hu},",
|
||||
&local->srx.transport.sin.sin_addr,
|
||||
ntohs(local->srx.transport.sin.sin_port),
|
||||
&peer->srx.transport.sin.sin_addr,
|
||||
ntohs(peer->srx.transport.sin.sin_port));
|
||||
|
||||
/* search the transport list first */
|
||||
read_lock_bh(&rxrpc_transport_lock);
|
||||
list_for_each_entry(trans, &rxrpc_transports, link) {
|
||||
if (trans->local == local && trans->peer == peer)
|
||||
goto found_extant_transport;
|
||||
}
|
||||
read_unlock_bh(&rxrpc_transport_lock);
|
||||
|
||||
/* not yet present - create a candidate for a new record and then
|
||||
* redo the search */
|
||||
candidate = rxrpc_alloc_transport(local, peer, gfp);
|
||||
if (!candidate) {
|
||||
_leave(" = -ENOMEM");
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
write_lock_bh(&rxrpc_transport_lock);
|
||||
|
||||
list_for_each_entry(trans, &rxrpc_transports, link) {
|
||||
if (trans->local == local && trans->peer == peer)
|
||||
goto found_extant_second;
|
||||
}
|
||||
|
||||
/* we can now add the new candidate to the list */
|
||||
trans = candidate;
|
||||
candidate = NULL;
|
||||
usage = atomic_read(&trans->usage);
|
||||
|
||||
rxrpc_get_local(trans->local);
|
||||
atomic_inc(&trans->peer->usage);
|
||||
list_add_tail(&trans->link, &rxrpc_transports);
|
||||
write_unlock_bh(&rxrpc_transport_lock);
|
||||
new = "new";
|
||||
|
||||
success:
|
||||
_net("TRANSPORT %s %d local %d -> peer %d",
|
||||
new,
|
||||
trans->debug_id,
|
||||
trans->local->debug_id,
|
||||
trans->peer->debug_id);
|
||||
|
||||
_leave(" = %p {u=%d}", trans, usage);
|
||||
return trans;
|
||||
|
||||
/* we found the transport in the list immediately */
|
||||
found_extant_transport:
|
||||
usage = atomic_inc_return(&trans->usage);
|
||||
read_unlock_bh(&rxrpc_transport_lock);
|
||||
goto success;
|
||||
|
||||
/* we found the transport on the second time through the list */
|
||||
found_extant_second:
|
||||
usage = atomic_inc_return(&trans->usage);
|
||||
write_unlock_bh(&rxrpc_transport_lock);
|
||||
kfree(candidate);
|
||||
goto success;
|
||||
}
|
||||
|
||||
/*
|
||||
* find the transport connecting two endpoints
|
||||
*/
|
||||
struct rxrpc_transport *rxrpc_find_transport(struct rxrpc_local *local,
|
||||
struct rxrpc_peer *peer)
|
||||
{
|
||||
struct rxrpc_transport *trans;
|
||||
|
||||
_enter("{%pI4+%hu},{%pI4+%hu},",
|
||||
&local->srx.transport.sin.sin_addr,
|
||||
ntohs(local->srx.transport.sin.sin_port),
|
||||
&peer->srx.transport.sin.sin_addr,
|
||||
ntohs(peer->srx.transport.sin.sin_port));
|
||||
|
||||
/* search the transport list */
|
||||
read_lock_bh(&rxrpc_transport_lock);
|
||||
|
||||
list_for_each_entry(trans, &rxrpc_transports, link) {
|
||||
if (trans->local == local && trans->peer == peer)
|
||||
goto found_extant_transport;
|
||||
}
|
||||
|
||||
read_unlock_bh(&rxrpc_transport_lock);
|
||||
_leave(" = NULL");
|
||||
return NULL;
|
||||
|
||||
found_extant_transport:
|
||||
atomic_inc(&trans->usage);
|
||||
read_unlock_bh(&rxrpc_transport_lock);
|
||||
_leave(" = %p", trans);
|
||||
return trans;
|
||||
}
|
||||
|
||||
/*
|
||||
* release a transport session
|
||||
*/
|
||||
void rxrpc_put_transport(struct rxrpc_transport *trans)
|
||||
{
|
||||
_enter("%p{u=%d}", trans, atomic_read(&trans->usage));
|
||||
|
||||
ASSERTCMP(atomic_read(&trans->usage), >, 0);
|
||||
|
||||
trans->put_time = get_seconds();
|
||||
if (unlikely(atomic_dec_and_test(&trans->usage))) {
|
||||
_debug("zombie");
|
||||
/* let the reaper determine the timeout to avoid a race with
|
||||
* overextending the timeout if the reaper is running at the
|
||||
* same time */
|
||||
rxrpc_queue_delayed_work(&rxrpc_transport_reap, 0);
|
||||
}
|
||||
_leave("");
|
||||
}
|
||||
|
||||
/*
|
||||
* clean up a transport session
|
||||
*/
|
||||
static void rxrpc_cleanup_transport(struct rxrpc_transport *trans)
|
||||
{
|
||||
_net("DESTROY TRANS %d", trans->debug_id);
|
||||
|
||||
rxrpc_purge_queue(&trans->error_queue);
|
||||
|
||||
rxrpc_put_local(trans->local);
|
||||
rxrpc_put_peer(trans->peer);
|
||||
kfree(trans);
|
||||
}
|
||||
|
||||
/*
|
||||
* reap dead transports that have passed their expiry date
|
||||
*/
|
||||
static void rxrpc_transport_reaper(struct work_struct *work)
|
||||
{
|
||||
struct rxrpc_transport *trans, *_p;
|
||||
unsigned long now, earliest, reap_time;
|
||||
|
||||
LIST_HEAD(graveyard);
|
||||
|
||||
_enter("");
|
||||
|
||||
now = get_seconds();
|
||||
earliest = ULONG_MAX;
|
||||
|
||||
/* extract all the transports that have been dead too long */
|
||||
write_lock_bh(&rxrpc_transport_lock);
|
||||
list_for_each_entry_safe(trans, _p, &rxrpc_transports, link) {
|
||||
_debug("reap TRANS %d { u=%d t=%ld }",
|
||||
trans->debug_id, atomic_read(&trans->usage),
|
||||
(long) now - (long) trans->put_time);
|
||||
|
||||
if (likely(atomic_read(&trans->usage) > 0))
|
||||
continue;
|
||||
|
||||
reap_time = trans->put_time + rxrpc_transport_expiry;
|
||||
if (reap_time <= now)
|
||||
list_move_tail(&trans->link, &graveyard);
|
||||
else if (reap_time < earliest)
|
||||
earliest = reap_time;
|
||||
}
|
||||
write_unlock_bh(&rxrpc_transport_lock);
|
||||
|
||||
if (earliest != ULONG_MAX) {
|
||||
_debug("reschedule reaper %ld", (long) earliest - now);
|
||||
ASSERTCMP(earliest, >, now);
|
||||
rxrpc_queue_delayed_work(&rxrpc_transport_reap,
|
||||
(earliest - now) * HZ);
|
||||
}
|
||||
|
||||
/* then destroy all those pulled out */
|
||||
while (!list_empty(&graveyard)) {
|
||||
trans = list_entry(graveyard.next, struct rxrpc_transport,
|
||||
link);
|
||||
list_del_init(&trans->link);
|
||||
|
||||
ASSERTCMP(atomic_read(&trans->usage), ==, 0);
|
||||
rxrpc_cleanup_transport(trans);
|
||||
}
|
||||
|
||||
_leave("");
|
||||
}
|
||||
|
||||
/*
|
||||
* preemptively destroy all the transport session records rather than waiting
|
||||
* for them to time out
|
||||
*/
|
||||
void __exit rxrpc_destroy_all_transports(void)
|
||||
{
|
||||
_enter("");
|
||||
|
||||
rxrpc_transport_expiry = 0;
|
||||
cancel_delayed_work(&rxrpc_transport_reap);
|
||||
rxrpc_queue_delayed_work(&rxrpc_transport_reap, 0);
|
||||
|
||||
_leave("");
|
||||
}
|
1161
net/rxrpc/rxkad.c
Normal file
1161
net/rxrpc/rxkad.c
Normal file
File diff suppressed because it is too large
Load diff
146
net/rxrpc/sysctl.c
Normal file
146
net/rxrpc/sysctl.c
Normal file
|
@ -0,0 +1,146 @@
|
|||
/* sysctls for configuring RxRPC operating parameters
|
||||
*
|
||||
* Copyright (C) 2014 Red Hat, Inc. All Rights Reserved.
|
||||
* Written by David Howells (dhowells@redhat.com)
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public Licence
|
||||
* as published by the Free Software Foundation; either version
|
||||
* 2 of the Licence, or (at your option) any later version.
|
||||
*/
|
||||
|
||||
#include <linux/sysctl.h>
|
||||
#include <net/sock.h>
|
||||
#include <net/af_rxrpc.h>
|
||||
#include "ar-internal.h"
|
||||
|
||||
static struct ctl_table_header *rxrpc_sysctl_reg_table;
|
||||
static const unsigned zero = 0;
|
||||
static const unsigned one = 1;
|
||||
static const unsigned four = 4;
|
||||
static const unsigned n_65535 = 65535;
|
||||
static const unsigned n_max_acks = RXRPC_MAXACKS;
|
||||
|
||||
/*
|
||||
* RxRPC operating parameters.
|
||||
*
|
||||
* See Documentation/networking/rxrpc.txt and the variable definitions for more
|
||||
* information on the individual parameters.
|
||||
*/
|
||||
static struct ctl_table rxrpc_sysctl_table[] = {
|
||||
/* Values measured in milliseconds */
|
||||
{
|
||||
.procname = "req_ack_delay",
|
||||
.data = &rxrpc_requested_ack_delay,
|
||||
.maxlen = sizeof(unsigned),
|
||||
.mode = 0644,
|
||||
.proc_handler = proc_dointvec_ms_jiffies,
|
||||
.extra1 = (void *)&zero,
|
||||
},
|
||||
{
|
||||
.procname = "soft_ack_delay",
|
||||
.data = &rxrpc_soft_ack_delay,
|
||||
.maxlen = sizeof(unsigned),
|
||||
.mode = 0644,
|
||||
.proc_handler = proc_dointvec_ms_jiffies,
|
||||
.extra1 = (void *)&one,
|
||||
},
|
||||
{
|
||||
.procname = "idle_ack_delay",
|
||||
.data = &rxrpc_idle_ack_delay,
|
||||
.maxlen = sizeof(unsigned),
|
||||
.mode = 0644,
|
||||
.proc_handler = proc_dointvec_ms_jiffies,
|
||||
.extra1 = (void *)&one,
|
||||
},
|
||||
{
|
||||
.procname = "resend_timeout",
|
||||
.data = &rxrpc_resend_timeout,
|
||||
.maxlen = sizeof(unsigned),
|
||||
.mode = 0644,
|
||||
.proc_handler = proc_dointvec_ms_jiffies,
|
||||
.extra1 = (void *)&one,
|
||||
},
|
||||
|
||||
/* Values measured in seconds but used in jiffies */
|
||||
{
|
||||
.procname = "max_call_lifetime",
|
||||
.data = &rxrpc_max_call_lifetime,
|
||||
.maxlen = sizeof(unsigned),
|
||||
.mode = 0644,
|
||||
.proc_handler = proc_dointvec_jiffies,
|
||||
.extra1 = (void *)&one,
|
||||
},
|
||||
{
|
||||
.procname = "dead_call_expiry",
|
||||
.data = &rxrpc_dead_call_expiry,
|
||||
.maxlen = sizeof(unsigned),
|
||||
.mode = 0644,
|
||||
.proc_handler = proc_dointvec_jiffies,
|
||||
.extra1 = (void *)&one,
|
||||
},
|
||||
|
||||
/* Values measured in seconds */
|
||||
{
|
||||
.procname = "connection_expiry",
|
||||
.data = &rxrpc_connection_expiry,
|
||||
.maxlen = sizeof(unsigned),
|
||||
.mode = 0644,
|
||||
.proc_handler = proc_dointvec_minmax,
|
||||
.extra1 = (void *)&one,
|
||||
},
|
||||
{
|
||||
.procname = "transport_expiry",
|
||||
.data = &rxrpc_transport_expiry,
|
||||
.maxlen = sizeof(unsigned),
|
||||
.mode = 0644,
|
||||
.proc_handler = proc_dointvec_minmax,
|
||||
.extra1 = (void *)&one,
|
||||
},
|
||||
|
||||
/* Non-time values */
|
||||
{
|
||||
.procname = "rx_window_size",
|
||||
.data = &rxrpc_rx_window_size,
|
||||
.maxlen = sizeof(unsigned),
|
||||
.mode = 0644,
|
||||
.proc_handler = proc_dointvec_minmax,
|
||||
.extra1 = (void *)&one,
|
||||
.extra2 = (void *)&n_max_acks,
|
||||
},
|
||||
{
|
||||
.procname = "rx_mtu",
|
||||
.data = &rxrpc_rx_mtu,
|
||||
.maxlen = sizeof(unsigned),
|
||||
.mode = 0644,
|
||||
.proc_handler = proc_dointvec_minmax,
|
||||
.extra1 = (void *)&one,
|
||||
.extra1 = (void *)&n_65535,
|
||||
},
|
||||
{
|
||||
.procname = "rx_jumbo_max",
|
||||
.data = &rxrpc_rx_jumbo_max,
|
||||
.maxlen = sizeof(unsigned),
|
||||
.mode = 0644,
|
||||
.proc_handler = proc_dointvec_minmax,
|
||||
.extra1 = (void *)&one,
|
||||
.extra2 = (void *)&four,
|
||||
},
|
||||
|
||||
{ }
|
||||
};
|
||||
|
||||
int __init rxrpc_sysctl_init(void)
|
||||
{
|
||||
rxrpc_sysctl_reg_table = register_net_sysctl(&init_net, "net/rxrpc",
|
||||
rxrpc_sysctl_table);
|
||||
if (!rxrpc_sysctl_reg_table)
|
||||
return -ENOMEM;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void rxrpc_sysctl_exit(void)
|
||||
{
|
||||
if (rxrpc_sysctl_reg_table)
|
||||
unregister_net_sysctl_table(rxrpc_sysctl_reg_table);
|
||||
}
|
Loading…
Add table
Add a link
Reference in a new issue