mirror of
https://github.com/AetherDroid/android_kernel_samsung_on5xelte.git
synced 2025-09-07 16:58:04 -04:00
Fixed MTP to work with TWRP
This commit is contained in:
commit
f6dfaef42e
50820 changed files with 20846062 additions and 0 deletions
28
net/unix/Kconfig
Normal file
28
net/unix/Kconfig
Normal file
|
@ -0,0 +1,28 @@
|
|||
#
|
||||
# Unix Domain Sockets
|
||||
#
|
||||
|
||||
config UNIX
|
||||
tristate "Unix domain sockets"
|
||||
---help---
|
||||
If you say Y here, you will include support for Unix domain sockets;
|
||||
sockets are the standard Unix mechanism for establishing and
|
||||
accessing network connections. Many commonly used programs such as
|
||||
the X Window system and syslog use these sockets even if your
|
||||
machine is not connected to any network. Unless you are working on
|
||||
an embedded system or something similar, you therefore definitely
|
||||
want to say Y here.
|
||||
|
||||
To compile this driver as a module, choose M here: the module will be
|
||||
called unix. Note that several important services won't work
|
||||
correctly if you say M here and then neglect to load the module.
|
||||
|
||||
Say Y unless you know what you are doing.
|
||||
|
||||
config UNIX_DIAG
|
||||
tristate "UNIX: socket monitoring interface"
|
||||
depends on UNIX
|
||||
default n
|
||||
---help---
|
||||
Support for UNIX socket monitoring interface used by the ss tool.
|
||||
If unsure, say Y.
|
11
net/unix/Makefile
Normal file
11
net/unix/Makefile
Normal file
|
@ -0,0 +1,11 @@
|
|||
#
|
||||
# Makefile for the Linux unix domain socket layer.
|
||||
#
|
||||
|
||||
obj-$(CONFIG_UNIX) += unix.o
|
||||
|
||||
unix-y := af_unix.o garbage.o
|
||||
unix-$(CONFIG_SYSCTL) += sysctl_net_unix.o
|
||||
|
||||
obj-$(CONFIG_UNIX_DIAG) += unix_diag.o
|
||||
unix_diag-y := diag.o
|
2636
net/unix/af_unix.c
Normal file
2636
net/unix/af_unix.c
Normal file
File diff suppressed because it is too large
Load diff
327
net/unix/diag.c
Normal file
327
net/unix/diag.c
Normal file
|
@ -0,0 +1,327 @@
|
|||
#include <linux/types.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/sock_diag.h>
|
||||
#include <linux/unix_diag.h>
|
||||
#include <linux/skbuff.h>
|
||||
#include <linux/module.h>
|
||||
#include <net/netlink.h>
|
||||
#include <net/af_unix.h>
|
||||
#include <net/tcp_states.h>
|
||||
|
||||
static int sk_diag_dump_name(struct sock *sk, struct sk_buff *nlskb)
|
||||
{
|
||||
struct unix_address *addr = unix_sk(sk)->addr;
|
||||
|
||||
if (!addr)
|
||||
return 0;
|
||||
|
||||
return nla_put(nlskb, UNIX_DIAG_NAME, addr->len - sizeof(short),
|
||||
addr->name->sun_path);
|
||||
}
|
||||
|
||||
static int sk_diag_dump_vfs(struct sock *sk, struct sk_buff *nlskb)
|
||||
{
|
||||
struct dentry *dentry = unix_sk(sk)->path.dentry;
|
||||
|
||||
if (dentry) {
|
||||
struct unix_diag_vfs uv = {
|
||||
.udiag_vfs_ino = dentry->d_inode->i_ino,
|
||||
.udiag_vfs_dev = dentry->d_sb->s_dev,
|
||||
};
|
||||
|
||||
return nla_put(nlskb, UNIX_DIAG_VFS, sizeof(uv), &uv);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int sk_diag_dump_peer(struct sock *sk, struct sk_buff *nlskb)
|
||||
{
|
||||
struct sock *peer;
|
||||
int ino;
|
||||
|
||||
peer = unix_peer_get(sk);
|
||||
if (peer) {
|
||||
unix_state_lock(peer);
|
||||
ino = sock_i_ino(peer);
|
||||
unix_state_unlock(peer);
|
||||
sock_put(peer);
|
||||
|
||||
return nla_put_u32(nlskb, UNIX_DIAG_PEER, ino);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int sk_diag_dump_icons(struct sock *sk, struct sk_buff *nlskb)
|
||||
{
|
||||
struct sk_buff *skb;
|
||||
struct nlattr *attr;
|
||||
u32 *buf;
|
||||
int i;
|
||||
|
||||
if (sk->sk_state == TCP_LISTEN) {
|
||||
spin_lock(&sk->sk_receive_queue.lock);
|
||||
|
||||
attr = nla_reserve(nlskb, UNIX_DIAG_ICONS,
|
||||
sk->sk_receive_queue.qlen * sizeof(u32));
|
||||
if (!attr)
|
||||
goto errout;
|
||||
|
||||
buf = nla_data(attr);
|
||||
i = 0;
|
||||
skb_queue_walk(&sk->sk_receive_queue, skb) {
|
||||
struct sock *req, *peer;
|
||||
|
||||
req = skb->sk;
|
||||
/*
|
||||
* The state lock is outer for the same sk's
|
||||
* queue lock. With the other's queue locked it's
|
||||
* OK to lock the state.
|
||||
*/
|
||||
unix_state_lock_nested(req);
|
||||
peer = unix_sk(req)->peer;
|
||||
buf[i++] = (peer ? sock_i_ino(peer) : 0);
|
||||
unix_state_unlock(req);
|
||||
}
|
||||
spin_unlock(&sk->sk_receive_queue.lock);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
errout:
|
||||
spin_unlock(&sk->sk_receive_queue.lock);
|
||||
return -EMSGSIZE;
|
||||
}
|
||||
|
||||
static int sk_diag_show_rqlen(struct sock *sk, struct sk_buff *nlskb)
|
||||
{
|
||||
struct unix_diag_rqlen rql;
|
||||
|
||||
if (sk->sk_state == TCP_LISTEN) {
|
||||
rql.udiag_rqueue = sk->sk_receive_queue.qlen;
|
||||
rql.udiag_wqueue = sk->sk_max_ack_backlog;
|
||||
} else {
|
||||
rql.udiag_rqueue = (u32) unix_inq_len(sk);
|
||||
rql.udiag_wqueue = (u32) unix_outq_len(sk);
|
||||
}
|
||||
|
||||
return nla_put(nlskb, UNIX_DIAG_RQLEN, sizeof(rql), &rql);
|
||||
}
|
||||
|
||||
static int sk_diag_fill(struct sock *sk, struct sk_buff *skb, struct unix_diag_req *req,
|
||||
u32 portid, u32 seq, u32 flags, int sk_ino)
|
||||
{
|
||||
struct nlmsghdr *nlh;
|
||||
struct unix_diag_msg *rep;
|
||||
|
||||
nlh = nlmsg_put(skb, portid, seq, SOCK_DIAG_BY_FAMILY, sizeof(*rep),
|
||||
flags);
|
||||
if (!nlh)
|
||||
return -EMSGSIZE;
|
||||
|
||||
rep = nlmsg_data(nlh);
|
||||
rep->udiag_family = AF_UNIX;
|
||||
rep->udiag_type = sk->sk_type;
|
||||
rep->udiag_state = sk->sk_state;
|
||||
rep->pad = 0;
|
||||
rep->udiag_ino = sk_ino;
|
||||
sock_diag_save_cookie(sk, rep->udiag_cookie);
|
||||
|
||||
if ((req->udiag_show & UDIAG_SHOW_NAME) &&
|
||||
sk_diag_dump_name(sk, skb))
|
||||
goto out_nlmsg_trim;
|
||||
|
||||
if ((req->udiag_show & UDIAG_SHOW_VFS) &&
|
||||
sk_diag_dump_vfs(sk, skb))
|
||||
goto out_nlmsg_trim;
|
||||
|
||||
if ((req->udiag_show & UDIAG_SHOW_PEER) &&
|
||||
sk_diag_dump_peer(sk, skb))
|
||||
goto out_nlmsg_trim;
|
||||
|
||||
if ((req->udiag_show & UDIAG_SHOW_ICONS) &&
|
||||
sk_diag_dump_icons(sk, skb))
|
||||
goto out_nlmsg_trim;
|
||||
|
||||
if ((req->udiag_show & UDIAG_SHOW_RQLEN) &&
|
||||
sk_diag_show_rqlen(sk, skb))
|
||||
goto out_nlmsg_trim;
|
||||
|
||||
if ((req->udiag_show & UDIAG_SHOW_MEMINFO) &&
|
||||
sock_diag_put_meminfo(sk, skb, UNIX_DIAG_MEMINFO))
|
||||
goto out_nlmsg_trim;
|
||||
|
||||
if (nla_put_u8(skb, UNIX_DIAG_SHUTDOWN, sk->sk_shutdown))
|
||||
goto out_nlmsg_trim;
|
||||
|
||||
return nlmsg_end(skb, nlh);
|
||||
|
||||
out_nlmsg_trim:
|
||||
nlmsg_cancel(skb, nlh);
|
||||
return -EMSGSIZE;
|
||||
}
|
||||
|
||||
static int sk_diag_dump(struct sock *sk, struct sk_buff *skb, struct unix_diag_req *req,
|
||||
u32 portid, u32 seq, u32 flags)
|
||||
{
|
||||
int sk_ino;
|
||||
|
||||
unix_state_lock(sk);
|
||||
sk_ino = sock_i_ino(sk);
|
||||
unix_state_unlock(sk);
|
||||
|
||||
if (!sk_ino)
|
||||
return 0;
|
||||
|
||||
return sk_diag_fill(sk, skb, req, portid, seq, flags, sk_ino);
|
||||
}
|
||||
|
||||
static int unix_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
|
||||
{
|
||||
struct unix_diag_req *req;
|
||||
int num, s_num, slot, s_slot;
|
||||
struct net *net = sock_net(skb->sk);
|
||||
|
||||
req = nlmsg_data(cb->nlh);
|
||||
|
||||
s_slot = cb->args[0];
|
||||
num = s_num = cb->args[1];
|
||||
|
||||
spin_lock(&unix_table_lock);
|
||||
for (slot = s_slot;
|
||||
slot < ARRAY_SIZE(unix_socket_table);
|
||||
s_num = 0, slot++) {
|
||||
struct sock *sk;
|
||||
|
||||
num = 0;
|
||||
sk_for_each(sk, &unix_socket_table[slot]) {
|
||||
if (!net_eq(sock_net(sk), net))
|
||||
continue;
|
||||
if (num < s_num)
|
||||
goto next;
|
||||
if (!(req->udiag_states & (1 << sk->sk_state)))
|
||||
goto next;
|
||||
if (sk_diag_dump(sk, skb, req,
|
||||
NETLINK_CB(cb->skb).portid,
|
||||
cb->nlh->nlmsg_seq,
|
||||
NLM_F_MULTI) < 0)
|
||||
goto done;
|
||||
next:
|
||||
num++;
|
||||
}
|
||||
}
|
||||
done:
|
||||
spin_unlock(&unix_table_lock);
|
||||
cb->args[0] = slot;
|
||||
cb->args[1] = num;
|
||||
|
||||
return skb->len;
|
||||
}
|
||||
|
||||
static struct sock *unix_lookup_by_ino(int ino)
|
||||
{
|
||||
int i;
|
||||
struct sock *sk;
|
||||
|
||||
spin_lock(&unix_table_lock);
|
||||
for (i = 0; i < ARRAY_SIZE(unix_socket_table); i++) {
|
||||
sk_for_each(sk, &unix_socket_table[i])
|
||||
if (ino == sock_i_ino(sk)) {
|
||||
sock_hold(sk);
|
||||
spin_unlock(&unix_table_lock);
|
||||
|
||||
return sk;
|
||||
}
|
||||
}
|
||||
|
||||
spin_unlock(&unix_table_lock);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static int unix_diag_get_exact(struct sk_buff *in_skb,
|
||||
const struct nlmsghdr *nlh,
|
||||
struct unix_diag_req *req)
|
||||
{
|
||||
int err = -EINVAL;
|
||||
struct sock *sk;
|
||||
struct sk_buff *rep;
|
||||
unsigned int extra_len;
|
||||
struct net *net = sock_net(in_skb->sk);
|
||||
|
||||
if (req->udiag_ino == 0)
|
||||
goto out_nosk;
|
||||
|
||||
sk = unix_lookup_by_ino(req->udiag_ino);
|
||||
err = -ENOENT;
|
||||
if (sk == NULL)
|
||||
goto out_nosk;
|
||||
|
||||
err = sock_diag_check_cookie(sk, req->udiag_cookie);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
extra_len = 256;
|
||||
again:
|
||||
err = -ENOMEM;
|
||||
rep = nlmsg_new(sizeof(struct unix_diag_msg) + extra_len, GFP_KERNEL);
|
||||
if (!rep)
|
||||
goto out;
|
||||
|
||||
err = sk_diag_fill(sk, rep, req, NETLINK_CB(in_skb).portid,
|
||||
nlh->nlmsg_seq, 0, req->udiag_ino);
|
||||
if (err < 0) {
|
||||
nlmsg_free(rep);
|
||||
extra_len += 256;
|
||||
if (extra_len >= PAGE_SIZE)
|
||||
goto out;
|
||||
|
||||
goto again;
|
||||
}
|
||||
err = netlink_unicast(net->diag_nlsk, rep, NETLINK_CB(in_skb).portid,
|
||||
MSG_DONTWAIT);
|
||||
if (err > 0)
|
||||
err = 0;
|
||||
out:
|
||||
if (sk)
|
||||
sock_put(sk);
|
||||
out_nosk:
|
||||
return err;
|
||||
}
|
||||
|
||||
static int unix_diag_handler_dump(struct sk_buff *skb, struct nlmsghdr *h)
|
||||
{
|
||||
int hdrlen = sizeof(struct unix_diag_req);
|
||||
struct net *net = sock_net(skb->sk);
|
||||
|
||||
if (nlmsg_len(h) < hdrlen)
|
||||
return -EINVAL;
|
||||
|
||||
if (h->nlmsg_flags & NLM_F_DUMP) {
|
||||
struct netlink_dump_control c = {
|
||||
.dump = unix_diag_dump,
|
||||
};
|
||||
return netlink_dump_start(net->diag_nlsk, skb, h, &c);
|
||||
} else
|
||||
return unix_diag_get_exact(skb, h, nlmsg_data(h));
|
||||
}
|
||||
|
||||
static const struct sock_diag_handler unix_diag_handler = {
|
||||
.family = AF_UNIX,
|
||||
.dump = unix_diag_handler_dump,
|
||||
};
|
||||
|
||||
static int __init unix_diag_init(void)
|
||||
{
|
||||
return sock_diag_register(&unix_diag_handler);
|
||||
}
|
||||
|
||||
static void __exit unix_diag_exit(void)
|
||||
{
|
||||
sock_diag_unregister(&unix_diag_handler);
|
||||
}
|
||||
|
||||
module_init(unix_diag_init);
|
||||
module_exit(unix_diag_exit);
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, 1 /* AF_LOCAL */);
|
386
net/unix/garbage.c
Normal file
386
net/unix/garbage.c
Normal file
|
@ -0,0 +1,386 @@
|
|||
/*
|
||||
* NET3: Garbage Collector For AF_UNIX sockets
|
||||
*
|
||||
* Garbage Collector:
|
||||
* Copyright (C) Barak A. Pearlmutter.
|
||||
* Released under the GPL version 2 or later.
|
||||
*
|
||||
* Chopped about by Alan Cox 22/3/96 to make it fit the AF_UNIX socket problem.
|
||||
* If it doesn't work blame me, it worked when Barak sent it.
|
||||
*
|
||||
* Assumptions:
|
||||
*
|
||||
* - object w/ a bit
|
||||
* - free list
|
||||
*
|
||||
* Current optimizations:
|
||||
*
|
||||
* - explicit stack instead of recursion
|
||||
* - tail recurse on first born instead of immediate push/pop
|
||||
* - we gather the stuff that should not be killed into tree
|
||||
* and stack is just a path from root to the current pointer.
|
||||
*
|
||||
* Future optimizations:
|
||||
*
|
||||
* - don't just push entire root set; process in place
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version
|
||||
* 2 of the License, or (at your option) any later version.
|
||||
*
|
||||
* Fixes:
|
||||
* Alan Cox 07 Sept 1997 Vmalloc internal stack as needed.
|
||||
* Cope with changing max_files.
|
||||
* Al Viro 11 Oct 1998
|
||||
* Graph may have cycles. That is, we can send the descriptor
|
||||
* of foo to bar and vice versa. Current code chokes on that.
|
||||
* Fix: move SCM_RIGHTS ones into the separate list and then
|
||||
* skb_free() them all instead of doing explicit fput's.
|
||||
* Another problem: since fput() may block somebody may
|
||||
* create a new unix_socket when we are in the middle of sweep
|
||||
* phase. Fix: revert the logic wrt MARKED. Mark everything
|
||||
* upon the beginning and unmark non-junk ones.
|
||||
*
|
||||
* [12 Oct 1998] AAARGH! New code purges all SCM_RIGHTS
|
||||
* sent to connect()'ed but still not accept()'ed sockets.
|
||||
* Fixed. Old code had slightly different problem here:
|
||||
* extra fput() in situation when we passed the descriptor via
|
||||
* such socket and closed it (descriptor). That would happen on
|
||||
* each unix_gc() until the accept(). Since the struct file in
|
||||
* question would go to the free list and might be reused...
|
||||
* That might be the reason of random oopses on filp_close()
|
||||
* in unrelated processes.
|
||||
*
|
||||
* AV 28 Feb 1999
|
||||
* Kill the explicit allocation of stack. Now we keep the tree
|
||||
* with root in dummy + pointer (gc_current) to one of the nodes.
|
||||
* Stack is represented as path from gc_current to dummy. Unmark
|
||||
* now means "add to tree". Push == "make it a son of gc_current".
|
||||
* Pop == "move gc_current to parent". We keep only pointers to
|
||||
* parents (->gc_tree).
|
||||
* AV 1 Mar 1999
|
||||
* Damn. Added missing check for ->dead in listen queues scanning.
|
||||
*
|
||||
* Miklos Szeredi 25 Jun 2007
|
||||
* Reimplement with a cycle collecting algorithm. This should
|
||||
* solve several problems with the previous code, like being racy
|
||||
* wrt receive and holding up unrelated socket operations.
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/socket.h>
|
||||
#include <linux/un.h>
|
||||
#include <linux/net.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/skbuff.h>
|
||||
#include <linux/netdevice.h>
|
||||
#include <linux/file.h>
|
||||
#include <linux/proc_fs.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/wait.h>
|
||||
|
||||
#include <net/sock.h>
|
||||
#include <net/af_unix.h>
|
||||
#include <net/scm.h>
|
||||
#include <net/tcp_states.h>
|
||||
|
||||
/* Internal data structures and random procedures: */
|
||||
|
||||
static LIST_HEAD(gc_inflight_list);
|
||||
static LIST_HEAD(gc_candidates);
|
||||
static DEFINE_SPINLOCK(unix_gc_lock);
|
||||
static DECLARE_WAIT_QUEUE_HEAD(unix_gc_wait);
|
||||
|
||||
unsigned int unix_tot_inflight;
|
||||
|
||||
|
||||
struct sock *unix_get_socket(struct file *filp)
|
||||
{
|
||||
struct sock *u_sock = NULL;
|
||||
struct inode *inode = file_inode(filp);
|
||||
|
||||
/*
|
||||
* Socket ?
|
||||
*/
|
||||
if (S_ISSOCK(inode->i_mode) && !(filp->f_mode & FMODE_PATH)) {
|
||||
struct socket *sock = SOCKET_I(inode);
|
||||
struct sock *s = sock->sk;
|
||||
|
||||
/*
|
||||
* PF_UNIX ?
|
||||
*/
|
||||
if (s && sock->ops && sock->ops->family == PF_UNIX)
|
||||
u_sock = s;
|
||||
}
|
||||
return u_sock;
|
||||
}
|
||||
|
||||
/*
|
||||
* Keep the number of times in flight count for the file
|
||||
* descriptor if it is for an AF_UNIX socket.
|
||||
*/
|
||||
|
||||
void unix_inflight(struct file *fp)
|
||||
{
|
||||
struct sock *s = unix_get_socket(fp);
|
||||
if (s) {
|
||||
struct unix_sock *u = unix_sk(s);
|
||||
spin_lock(&unix_gc_lock);
|
||||
if (atomic_long_inc_return(&u->inflight) == 1) {
|
||||
BUG_ON(!list_empty(&u->link));
|
||||
list_add_tail(&u->link, &gc_inflight_list);
|
||||
} else {
|
||||
BUG_ON(list_empty(&u->link));
|
||||
}
|
||||
unix_tot_inflight++;
|
||||
spin_unlock(&unix_gc_lock);
|
||||
}
|
||||
}
|
||||
|
||||
void unix_notinflight(struct file *fp)
|
||||
{
|
||||
struct sock *s = unix_get_socket(fp);
|
||||
if (s) {
|
||||
struct unix_sock *u = unix_sk(s);
|
||||
spin_lock(&unix_gc_lock);
|
||||
BUG_ON(list_empty(&u->link));
|
||||
if (atomic_long_dec_and_test(&u->inflight))
|
||||
list_del_init(&u->link);
|
||||
unix_tot_inflight--;
|
||||
spin_unlock(&unix_gc_lock);
|
||||
}
|
||||
}
|
||||
|
||||
static void scan_inflight(struct sock *x, void (*func)(struct unix_sock *),
|
||||
struct sk_buff_head *hitlist)
|
||||
{
|
||||
struct sk_buff *skb;
|
||||
struct sk_buff *next;
|
||||
|
||||
spin_lock(&x->sk_receive_queue.lock);
|
||||
skb_queue_walk_safe(&x->sk_receive_queue, skb, next) {
|
||||
/*
|
||||
* Do we have file descriptors ?
|
||||
*/
|
||||
if (UNIXCB(skb).fp) {
|
||||
bool hit = false;
|
||||
/*
|
||||
* Process the descriptors of this socket
|
||||
*/
|
||||
int nfd = UNIXCB(skb).fp->count;
|
||||
struct file **fp = UNIXCB(skb).fp->fp;
|
||||
while (nfd--) {
|
||||
/*
|
||||
* Get the socket the fd matches
|
||||
* if it indeed does so
|
||||
*/
|
||||
struct sock *sk = unix_get_socket(*fp++);
|
||||
if (sk) {
|
||||
struct unix_sock *u = unix_sk(sk);
|
||||
|
||||
/*
|
||||
* Ignore non-candidates, they could
|
||||
* have been added to the queues after
|
||||
* starting the garbage collection
|
||||
*/
|
||||
if (test_bit(UNIX_GC_CANDIDATE, &u->gc_flags)) {
|
||||
hit = true;
|
||||
func(u);
|
||||
}
|
||||
}
|
||||
}
|
||||
if (hit && hitlist != NULL) {
|
||||
__skb_unlink(skb, &x->sk_receive_queue);
|
||||
__skb_queue_tail(hitlist, skb);
|
||||
}
|
||||
}
|
||||
}
|
||||
spin_unlock(&x->sk_receive_queue.lock);
|
||||
}
|
||||
|
||||
static void scan_children(struct sock *x, void (*func)(struct unix_sock *),
|
||||
struct sk_buff_head *hitlist)
|
||||
{
|
||||
if (x->sk_state != TCP_LISTEN)
|
||||
scan_inflight(x, func, hitlist);
|
||||
else {
|
||||
struct sk_buff *skb;
|
||||
struct sk_buff *next;
|
||||
struct unix_sock *u;
|
||||
LIST_HEAD(embryos);
|
||||
|
||||
/*
|
||||
* For a listening socket collect the queued embryos
|
||||
* and perform a scan on them as well.
|
||||
*/
|
||||
spin_lock(&x->sk_receive_queue.lock);
|
||||
skb_queue_walk_safe(&x->sk_receive_queue, skb, next) {
|
||||
u = unix_sk(skb->sk);
|
||||
|
||||
/*
|
||||
* An embryo cannot be in-flight, so it's safe
|
||||
* to use the list link.
|
||||
*/
|
||||
BUG_ON(!list_empty(&u->link));
|
||||
list_add_tail(&u->link, &embryos);
|
||||
}
|
||||
spin_unlock(&x->sk_receive_queue.lock);
|
||||
|
||||
while (!list_empty(&embryos)) {
|
||||
u = list_entry(embryos.next, struct unix_sock, link);
|
||||
scan_inflight(&u->sk, func, hitlist);
|
||||
list_del_init(&u->link);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void dec_inflight(struct unix_sock *usk)
|
||||
{
|
||||
atomic_long_dec(&usk->inflight);
|
||||
}
|
||||
|
||||
static void inc_inflight(struct unix_sock *usk)
|
||||
{
|
||||
atomic_long_inc(&usk->inflight);
|
||||
}
|
||||
|
||||
static void inc_inflight_move_tail(struct unix_sock *u)
|
||||
{
|
||||
atomic_long_inc(&u->inflight);
|
||||
/*
|
||||
* If this still might be part of a cycle, move it to the end
|
||||
* of the list, so that it's checked even if it was already
|
||||
* passed over
|
||||
*/
|
||||
if (test_bit(UNIX_GC_MAYBE_CYCLE, &u->gc_flags))
|
||||
list_move_tail(&u->link, &gc_candidates);
|
||||
}
|
||||
|
||||
static bool gc_in_progress;
|
||||
#define UNIX_INFLIGHT_TRIGGER_GC 16000
|
||||
|
||||
void wait_for_unix_gc(void)
|
||||
{
|
||||
/*
|
||||
* If number of inflight sockets is insane,
|
||||
* force a garbage collect right now.
|
||||
*/
|
||||
if (unix_tot_inflight > UNIX_INFLIGHT_TRIGGER_GC && !gc_in_progress)
|
||||
unix_gc();
|
||||
wait_event(unix_gc_wait, gc_in_progress == false);
|
||||
}
|
||||
|
||||
/* The external entry point: unix_gc() */
|
||||
void unix_gc(void)
|
||||
{
|
||||
struct unix_sock *u;
|
||||
struct unix_sock *next;
|
||||
struct sk_buff_head hitlist;
|
||||
struct list_head cursor;
|
||||
LIST_HEAD(not_cycle_list);
|
||||
|
||||
spin_lock(&unix_gc_lock);
|
||||
|
||||
/* Avoid a recursive GC. */
|
||||
if (gc_in_progress)
|
||||
goto out;
|
||||
|
||||
gc_in_progress = true;
|
||||
/*
|
||||
* First, select candidates for garbage collection. Only
|
||||
* in-flight sockets are considered, and from those only ones
|
||||
* which don't have any external reference.
|
||||
*
|
||||
* Holding unix_gc_lock will protect these candidates from
|
||||
* being detached, and hence from gaining an external
|
||||
* reference. Since there are no possible receivers, all
|
||||
* buffers currently on the candidates' queues stay there
|
||||
* during the garbage collection.
|
||||
*
|
||||
* We also know that no new candidate can be added onto the
|
||||
* receive queues. Other, non candidate sockets _can_ be
|
||||
* added to queue, so we must make sure only to touch
|
||||
* candidates.
|
||||
*/
|
||||
list_for_each_entry_safe(u, next, &gc_inflight_list, link) {
|
||||
long total_refs;
|
||||
long inflight_refs;
|
||||
|
||||
total_refs = file_count(u->sk.sk_socket->file);
|
||||
inflight_refs = atomic_long_read(&u->inflight);
|
||||
|
||||
BUG_ON(inflight_refs < 1);
|
||||
BUG_ON(total_refs < inflight_refs);
|
||||
if (total_refs == inflight_refs) {
|
||||
list_move_tail(&u->link, &gc_candidates);
|
||||
__set_bit(UNIX_GC_CANDIDATE, &u->gc_flags);
|
||||
__set_bit(UNIX_GC_MAYBE_CYCLE, &u->gc_flags);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Now remove all internal in-flight reference to children of
|
||||
* the candidates.
|
||||
*/
|
||||
list_for_each_entry(u, &gc_candidates, link)
|
||||
scan_children(&u->sk, dec_inflight, NULL);
|
||||
|
||||
/*
|
||||
* Restore the references for children of all candidates,
|
||||
* which have remaining references. Do this recursively, so
|
||||
* only those remain, which form cyclic references.
|
||||
*
|
||||
* Use a "cursor" link, to make the list traversal safe, even
|
||||
* though elements might be moved about.
|
||||
*/
|
||||
list_add(&cursor, &gc_candidates);
|
||||
while (cursor.next != &gc_candidates) {
|
||||
u = list_entry(cursor.next, struct unix_sock, link);
|
||||
|
||||
/* Move cursor to after the current position. */
|
||||
list_move(&cursor, &u->link);
|
||||
|
||||
if (atomic_long_read(&u->inflight) > 0) {
|
||||
list_move_tail(&u->link, ¬_cycle_list);
|
||||
__clear_bit(UNIX_GC_MAYBE_CYCLE, &u->gc_flags);
|
||||
scan_children(&u->sk, inc_inflight_move_tail, NULL);
|
||||
}
|
||||
}
|
||||
list_del(&cursor);
|
||||
|
||||
/*
|
||||
* not_cycle_list contains those sockets which do not make up a
|
||||
* cycle. Restore these to the inflight list.
|
||||
*/
|
||||
while (!list_empty(¬_cycle_list)) {
|
||||
u = list_entry(not_cycle_list.next, struct unix_sock, link);
|
||||
__clear_bit(UNIX_GC_CANDIDATE, &u->gc_flags);
|
||||
list_move_tail(&u->link, &gc_inflight_list);
|
||||
}
|
||||
|
||||
/*
|
||||
* Now gc_candidates contains only garbage. Restore original
|
||||
* inflight counters for these as well, and remove the skbuffs
|
||||
* which are creating the cycle(s).
|
||||
*/
|
||||
skb_queue_head_init(&hitlist);
|
||||
list_for_each_entry(u, &gc_candidates, link)
|
||||
scan_children(&u->sk, inc_inflight, &hitlist);
|
||||
|
||||
spin_unlock(&unix_gc_lock);
|
||||
|
||||
/* Here we are. Hitlist is filled. Die. */
|
||||
__skb_queue_purge(&hitlist);
|
||||
|
||||
spin_lock(&unix_gc_lock);
|
||||
|
||||
/* All candidates should have been detached by now. */
|
||||
BUG_ON(!list_empty(&gc_candidates));
|
||||
gc_in_progress = false;
|
||||
wake_up(&unix_gc_wait);
|
||||
|
||||
out:
|
||||
spin_unlock(&unix_gc_lock);
|
||||
}
|
61
net/unix/sysctl_net_unix.c
Normal file
61
net/unix/sysctl_net_unix.c
Normal file
|
@ -0,0 +1,61 @@
|
|||
/*
|
||||
* NET4: Sysctl interface to net af_unix subsystem.
|
||||
*
|
||||
* Authors: Mike Shaver.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version
|
||||
* 2 of the License, or (at your option) any later version.
|
||||
*/
|
||||
|
||||
#include <linux/mm.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/sysctl.h>
|
||||
|
||||
#include <net/af_unix.h>
|
||||
|
||||
static struct ctl_table unix_table[] = {
|
||||
{
|
||||
.procname = "max_dgram_qlen",
|
||||
.data = &init_net.unx.sysctl_max_dgram_qlen,
|
||||
.maxlen = sizeof(int),
|
||||
.mode = 0644,
|
||||
.proc_handler = proc_dointvec
|
||||
},
|
||||
{ }
|
||||
};
|
||||
|
||||
int __net_init unix_sysctl_register(struct net *net)
|
||||
{
|
||||
struct ctl_table *table;
|
||||
|
||||
table = kmemdup(unix_table, sizeof(unix_table), GFP_KERNEL);
|
||||
if (table == NULL)
|
||||
goto err_alloc;
|
||||
|
||||
/* Don't export sysctls to unprivileged users */
|
||||
if (net->user_ns != &init_user_ns)
|
||||
table[0].procname = NULL;
|
||||
|
||||
table[0].data = &net->unx.sysctl_max_dgram_qlen;
|
||||
net->unx.ctl = register_net_sysctl(net, "net/unix", table);
|
||||
if (net->unx.ctl == NULL)
|
||||
goto err_reg;
|
||||
|
||||
return 0;
|
||||
|
||||
err_reg:
|
||||
kfree(table);
|
||||
err_alloc:
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
void unix_sysctl_unregister(struct net *net)
|
||||
{
|
||||
struct ctl_table *table;
|
||||
|
||||
table = net->unx.ctl->ctl_table_arg;
|
||||
unregister_net_sysctl_table(net->unx.ctl);
|
||||
kfree(table);
|
||||
}
|
Loading…
Add table
Add a link
Reference in a new issue