mirror of
https://github.com/AetherDroid/android_kernel_samsung_on5xelte.git
synced 2025-09-08 09:08:05 -04:00
Fixed MTP to work with TWRP
This commit is contained in:
commit
f6dfaef42e
50820 changed files with 20846062 additions and 0 deletions
22
drivers/connector/Kconfig
Normal file
22
drivers/connector/Kconfig
Normal file
|
@ -0,0 +1,22 @@
|
|||
|
||||
menuconfig CONNECTOR
|
||||
tristate "Connector - unified userspace <-> kernelspace linker"
|
||||
depends on NET
|
||||
---help---
|
||||
This is unified userspace <-> kernelspace connector working on top
|
||||
of the netlink socket protocol.
|
||||
|
||||
Connector support can also be built as a module. If so, the module
|
||||
will be called cn.
|
||||
|
||||
if CONNECTOR
|
||||
|
||||
config PROC_EVENTS
|
||||
boolean "Report process events to userspace"
|
||||
depends on CONNECTOR=y
|
||||
default y
|
||||
---help---
|
||||
Provide a connector that reports process events to userspace. Send
|
||||
events such as fork, exec, id change (uid, gid, suid, etc), and exit.
|
||||
|
||||
endif # CONNECTOR
|
4
drivers/connector/Makefile
Normal file
4
drivers/connector/Makefile
Normal file
|
@ -0,0 +1,4 @@
|
|||
obj-$(CONFIG_CONNECTOR) += cn.o
|
||||
obj-$(CONFIG_PROC_EVENTS) += cn_proc.o
|
||||
|
||||
cn-y += cn_queue.o connector.o
|
393
drivers/connector/cn_proc.c
Normal file
393
drivers/connector/cn_proc.c
Normal file
|
@ -0,0 +1,393 @@
|
|||
/*
|
||||
* cn_proc.c - process events connector
|
||||
*
|
||||
* Copyright (C) Matt Helsley, IBM Corp. 2005
|
||||
* Based on cn_fork.c by Guillaume Thouvenin <guillaume.thouvenin@bull.net>
|
||||
* Original copyright notice follows:
|
||||
* Copyright (C) 2005 BULL SA.
|
||||
*
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; either version 2 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/ktime.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/connector.h>
|
||||
#include <linux/gfp.h>
|
||||
#include <linux/ptrace.h>
|
||||
#include <linux/atomic.h>
|
||||
#include <linux/pid_namespace.h>
|
||||
|
||||
#include <linux/cn_proc.h>
|
||||
|
||||
/*
|
||||
* Size of a cn_msg followed by a proc_event structure. Since the
|
||||
* sizeof struct cn_msg is a multiple of 4 bytes, but not 8 bytes, we
|
||||
* add one 4-byte word to the size here, and then start the actual
|
||||
* cn_msg structure 4 bytes into the stack buffer. The result is that
|
||||
* the immediately following proc_event structure is aligned to 8 bytes.
|
||||
*/
|
||||
#define CN_PROC_MSG_SIZE (sizeof(struct cn_msg) + sizeof(struct proc_event) + 4)
|
||||
|
||||
/* See comment above; we test our assumption about sizeof struct cn_msg here. */
|
||||
static inline struct cn_msg *buffer_to_cn_msg(__u8 *buffer)
|
||||
{
|
||||
BUILD_BUG_ON(sizeof(struct cn_msg) != 20);
|
||||
return (struct cn_msg *)(buffer + 4);
|
||||
}
|
||||
|
||||
static atomic_t proc_event_num_listeners = ATOMIC_INIT(0);
|
||||
static struct cb_id cn_proc_event_id = { CN_IDX_PROC, CN_VAL_PROC };
|
||||
|
||||
/* proc_event_counts is used as the sequence number of the netlink message */
|
||||
static DEFINE_PER_CPU(__u32, proc_event_counts) = { 0 };
|
||||
|
||||
static inline void get_seq(__u32 *ts, int *cpu)
|
||||
{
|
||||
preempt_disable();
|
||||
*ts = __this_cpu_inc_return(proc_event_counts) - 1;
|
||||
*cpu = smp_processor_id();
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
void proc_fork_connector(struct task_struct *task)
|
||||
{
|
||||
struct cn_msg *msg;
|
||||
struct proc_event *ev;
|
||||
__u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
|
||||
struct task_struct *parent;
|
||||
|
||||
if (atomic_read(&proc_event_num_listeners) < 1)
|
||||
return;
|
||||
|
||||
msg = buffer_to_cn_msg(buffer);
|
||||
ev = (struct proc_event *)msg->data;
|
||||
memset(&ev->event_data, 0, sizeof(ev->event_data));
|
||||
get_seq(&msg->seq, &ev->cpu);
|
||||
ev->timestamp_ns = ktime_get_ns();
|
||||
ev->what = PROC_EVENT_FORK;
|
||||
rcu_read_lock();
|
||||
parent = rcu_dereference(task->real_parent);
|
||||
ev->event_data.fork.parent_pid = parent->pid;
|
||||
ev->event_data.fork.parent_tgid = parent->tgid;
|
||||
rcu_read_unlock();
|
||||
ev->event_data.fork.child_pid = task->pid;
|
||||
ev->event_data.fork.child_tgid = task->tgid;
|
||||
|
||||
memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
|
||||
msg->ack = 0; /* not used */
|
||||
msg->len = sizeof(*ev);
|
||||
msg->flags = 0; /* not used */
|
||||
/* If cn_netlink_send() failed, the data is not sent */
|
||||
cn_netlink_send(msg, 0, CN_IDX_PROC, GFP_KERNEL);
|
||||
}
|
||||
|
||||
void proc_exec_connector(struct task_struct *task)
|
||||
{
|
||||
struct cn_msg *msg;
|
||||
struct proc_event *ev;
|
||||
__u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
|
||||
|
||||
if (atomic_read(&proc_event_num_listeners) < 1)
|
||||
return;
|
||||
|
||||
msg = buffer_to_cn_msg(buffer);
|
||||
ev = (struct proc_event *)msg->data;
|
||||
memset(&ev->event_data, 0, sizeof(ev->event_data));
|
||||
get_seq(&msg->seq, &ev->cpu);
|
||||
ev->timestamp_ns = ktime_get_ns();
|
||||
ev->what = PROC_EVENT_EXEC;
|
||||
ev->event_data.exec.process_pid = task->pid;
|
||||
ev->event_data.exec.process_tgid = task->tgid;
|
||||
|
||||
memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
|
||||
msg->ack = 0; /* not used */
|
||||
msg->len = sizeof(*ev);
|
||||
msg->flags = 0; /* not used */
|
||||
cn_netlink_send(msg, 0, CN_IDX_PROC, GFP_KERNEL);
|
||||
}
|
||||
|
||||
void proc_id_connector(struct task_struct *task, int which_id)
|
||||
{
|
||||
struct cn_msg *msg;
|
||||
struct proc_event *ev;
|
||||
__u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
|
||||
const struct cred *cred;
|
||||
|
||||
if (atomic_read(&proc_event_num_listeners) < 1)
|
||||
return;
|
||||
|
||||
msg = buffer_to_cn_msg(buffer);
|
||||
ev = (struct proc_event *)msg->data;
|
||||
memset(&ev->event_data, 0, sizeof(ev->event_data));
|
||||
ev->what = which_id;
|
||||
ev->event_data.id.process_pid = task->pid;
|
||||
ev->event_data.id.process_tgid = task->tgid;
|
||||
rcu_read_lock();
|
||||
cred = __task_cred(task);
|
||||
if (which_id == PROC_EVENT_UID) {
|
||||
ev->event_data.id.r.ruid = from_kuid_munged(&init_user_ns, cred->uid);
|
||||
ev->event_data.id.e.euid = from_kuid_munged(&init_user_ns, cred->euid);
|
||||
} else if (which_id == PROC_EVENT_GID) {
|
||||
ev->event_data.id.r.rgid = from_kgid_munged(&init_user_ns, cred->gid);
|
||||
ev->event_data.id.e.egid = from_kgid_munged(&init_user_ns, cred->egid);
|
||||
} else {
|
||||
rcu_read_unlock();
|
||||
return;
|
||||
}
|
||||
rcu_read_unlock();
|
||||
get_seq(&msg->seq, &ev->cpu);
|
||||
ev->timestamp_ns = ktime_get_ns();
|
||||
|
||||
memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
|
||||
msg->ack = 0; /* not used */
|
||||
msg->len = sizeof(*ev);
|
||||
msg->flags = 0; /* not used */
|
||||
cn_netlink_send(msg, 0, CN_IDX_PROC, GFP_KERNEL);
|
||||
}
|
||||
|
||||
void proc_sid_connector(struct task_struct *task)
|
||||
{
|
||||
struct cn_msg *msg;
|
||||
struct proc_event *ev;
|
||||
__u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
|
||||
|
||||
if (atomic_read(&proc_event_num_listeners) < 1)
|
||||
return;
|
||||
|
||||
msg = buffer_to_cn_msg(buffer);
|
||||
ev = (struct proc_event *)msg->data;
|
||||
memset(&ev->event_data, 0, sizeof(ev->event_data));
|
||||
get_seq(&msg->seq, &ev->cpu);
|
||||
ev->timestamp_ns = ktime_get_ns();
|
||||
ev->what = PROC_EVENT_SID;
|
||||
ev->event_data.sid.process_pid = task->pid;
|
||||
ev->event_data.sid.process_tgid = task->tgid;
|
||||
|
||||
memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
|
||||
msg->ack = 0; /* not used */
|
||||
msg->len = sizeof(*ev);
|
||||
msg->flags = 0; /* not used */
|
||||
cn_netlink_send(msg, 0, CN_IDX_PROC, GFP_KERNEL);
|
||||
}
|
||||
|
||||
void proc_ptrace_connector(struct task_struct *task, int ptrace_id)
|
||||
{
|
||||
struct cn_msg *msg;
|
||||
struct proc_event *ev;
|
||||
__u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
|
||||
|
||||
if (atomic_read(&proc_event_num_listeners) < 1)
|
||||
return;
|
||||
|
||||
msg = buffer_to_cn_msg(buffer);
|
||||
ev = (struct proc_event *)msg->data;
|
||||
memset(&ev->event_data, 0, sizeof(ev->event_data));
|
||||
get_seq(&msg->seq, &ev->cpu);
|
||||
ev->timestamp_ns = ktime_get_ns();
|
||||
ev->what = PROC_EVENT_PTRACE;
|
||||
ev->event_data.ptrace.process_pid = task->pid;
|
||||
ev->event_data.ptrace.process_tgid = task->tgid;
|
||||
if (ptrace_id == PTRACE_ATTACH) {
|
||||
ev->event_data.ptrace.tracer_pid = current->pid;
|
||||
ev->event_data.ptrace.tracer_tgid = current->tgid;
|
||||
} else if (ptrace_id == PTRACE_DETACH) {
|
||||
ev->event_data.ptrace.tracer_pid = 0;
|
||||
ev->event_data.ptrace.tracer_tgid = 0;
|
||||
} else
|
||||
return;
|
||||
|
||||
memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
|
||||
msg->ack = 0; /* not used */
|
||||
msg->len = sizeof(*ev);
|
||||
msg->flags = 0; /* not used */
|
||||
cn_netlink_send(msg, 0, CN_IDX_PROC, GFP_KERNEL);
|
||||
}
|
||||
|
||||
void proc_comm_connector(struct task_struct *task)
|
||||
{
|
||||
struct cn_msg *msg;
|
||||
struct proc_event *ev;
|
||||
__u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
|
||||
|
||||
if (atomic_read(&proc_event_num_listeners) < 1)
|
||||
return;
|
||||
|
||||
msg = buffer_to_cn_msg(buffer);
|
||||
ev = (struct proc_event *)msg->data;
|
||||
memset(&ev->event_data, 0, sizeof(ev->event_data));
|
||||
get_seq(&msg->seq, &ev->cpu);
|
||||
ev->timestamp_ns = ktime_get_ns();
|
||||
ev->what = PROC_EVENT_COMM;
|
||||
ev->event_data.comm.process_pid = task->pid;
|
||||
ev->event_data.comm.process_tgid = task->tgid;
|
||||
get_task_comm(ev->event_data.comm.comm, task);
|
||||
|
||||
memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
|
||||
msg->ack = 0; /* not used */
|
||||
msg->len = sizeof(*ev);
|
||||
msg->flags = 0; /* not used */
|
||||
cn_netlink_send(msg, 0, CN_IDX_PROC, GFP_KERNEL);
|
||||
}
|
||||
|
||||
void proc_coredump_connector(struct task_struct *task)
|
||||
{
|
||||
struct cn_msg *msg;
|
||||
struct proc_event *ev;
|
||||
__u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
|
||||
|
||||
if (atomic_read(&proc_event_num_listeners) < 1)
|
||||
return;
|
||||
|
||||
msg = buffer_to_cn_msg(buffer);
|
||||
ev = (struct proc_event *)msg->data;
|
||||
memset(&ev->event_data, 0, sizeof(ev->event_data));
|
||||
get_seq(&msg->seq, &ev->cpu);
|
||||
ev->timestamp_ns = ktime_get_ns();
|
||||
ev->what = PROC_EVENT_COREDUMP;
|
||||
ev->event_data.coredump.process_pid = task->pid;
|
||||
ev->event_data.coredump.process_tgid = task->tgid;
|
||||
|
||||
memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
|
||||
msg->ack = 0; /* not used */
|
||||
msg->len = sizeof(*ev);
|
||||
msg->flags = 0; /* not used */
|
||||
cn_netlink_send(msg, 0, CN_IDX_PROC, GFP_KERNEL);
|
||||
}
|
||||
|
||||
void proc_exit_connector(struct task_struct *task)
|
||||
{
|
||||
struct cn_msg *msg;
|
||||
struct proc_event *ev;
|
||||
__u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
|
||||
|
||||
if (atomic_read(&proc_event_num_listeners) < 1)
|
||||
return;
|
||||
|
||||
msg = buffer_to_cn_msg(buffer);
|
||||
ev = (struct proc_event *)msg->data;
|
||||
memset(&ev->event_data, 0, sizeof(ev->event_data));
|
||||
get_seq(&msg->seq, &ev->cpu);
|
||||
ev->timestamp_ns = ktime_get_ns();
|
||||
ev->what = PROC_EVENT_EXIT;
|
||||
ev->event_data.exit.process_pid = task->pid;
|
||||
ev->event_data.exit.process_tgid = task->tgid;
|
||||
ev->event_data.exit.exit_code = task->exit_code;
|
||||
ev->event_data.exit.exit_signal = task->exit_signal;
|
||||
|
||||
memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
|
||||
msg->ack = 0; /* not used */
|
||||
msg->len = sizeof(*ev);
|
||||
msg->flags = 0; /* not used */
|
||||
cn_netlink_send(msg, 0, CN_IDX_PROC, GFP_KERNEL);
|
||||
}
|
||||
|
||||
/*
|
||||
* Send an acknowledgement message to userspace
|
||||
*
|
||||
* Use 0 for success, EFOO otherwise.
|
||||
* Note: this is the negative of conventional kernel error
|
||||
* values because it's not being returned via syscall return
|
||||
* mechanisms.
|
||||
*/
|
||||
static void cn_proc_ack(int err, int rcvd_seq, int rcvd_ack)
|
||||
{
|
||||
struct cn_msg *msg;
|
||||
struct proc_event *ev;
|
||||
__u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
|
||||
|
||||
if (atomic_read(&proc_event_num_listeners) < 1)
|
||||
return;
|
||||
|
||||
msg = buffer_to_cn_msg(buffer);
|
||||
ev = (struct proc_event *)msg->data;
|
||||
memset(&ev->event_data, 0, sizeof(ev->event_data));
|
||||
msg->seq = rcvd_seq;
|
||||
ev->timestamp_ns = ktime_get_ns();
|
||||
ev->cpu = -1;
|
||||
ev->what = PROC_EVENT_NONE;
|
||||
ev->event_data.ack.err = err;
|
||||
memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
|
||||
msg->ack = rcvd_ack + 1;
|
||||
msg->len = sizeof(*ev);
|
||||
msg->flags = 0; /* not used */
|
||||
cn_netlink_send(msg, 0, CN_IDX_PROC, GFP_KERNEL);
|
||||
}
|
||||
|
||||
/**
|
||||
* cn_proc_mcast_ctl
|
||||
* @data: message sent from userspace via the connector
|
||||
*/
|
||||
static void cn_proc_mcast_ctl(struct cn_msg *msg,
|
||||
struct netlink_skb_parms *nsp)
|
||||
{
|
||||
enum proc_cn_mcast_op *mc_op = NULL;
|
||||
int err = 0;
|
||||
|
||||
if (msg->len != sizeof(*mc_op))
|
||||
return;
|
||||
|
||||
/*
|
||||
* Events are reported with respect to the initial pid
|
||||
* and user namespaces so ignore requestors from
|
||||
* other namespaces.
|
||||
*/
|
||||
if ((current_user_ns() != &init_user_ns) ||
|
||||
(task_active_pid_ns(current) != &init_pid_ns))
|
||||
return;
|
||||
|
||||
/* Can only change if privileged. */
|
||||
if (!__netlink_ns_capable(nsp, &init_user_ns, CAP_NET_ADMIN)) {
|
||||
err = EPERM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
mc_op = (enum proc_cn_mcast_op *)msg->data;
|
||||
switch (*mc_op) {
|
||||
case PROC_CN_MCAST_LISTEN:
|
||||
atomic_inc(&proc_event_num_listeners);
|
||||
break;
|
||||
case PROC_CN_MCAST_IGNORE:
|
||||
atomic_dec(&proc_event_num_listeners);
|
||||
break;
|
||||
default:
|
||||
err = EINVAL;
|
||||
break;
|
||||
}
|
||||
|
||||
out:
|
||||
cn_proc_ack(err, msg->seq, msg->ack);
|
||||
}
|
||||
|
||||
/*
|
||||
* cn_proc_init - initialization entry point
|
||||
*
|
||||
* Adds the connector callback to the connector driver.
|
||||
*/
|
||||
static int __init cn_proc_init(void)
|
||||
{
|
||||
int err = cn_add_callback(&cn_proc_event_id,
|
||||
"cn_proc",
|
||||
&cn_proc_mcast_ctl);
|
||||
if (err) {
|
||||
pr_warn("cn_proc failed to register\n");
|
||||
return err;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
module_init(cn_proc_init);
|
161
drivers/connector/cn_queue.c
Normal file
161
drivers/connector/cn_queue.c
Normal file
|
@ -0,0 +1,161 @@
|
|||
/*
|
||||
* cn_queue.c
|
||||
*
|
||||
* 2004+ Copyright (c) Evgeniy Polyakov <zbr@ioremap.net>
|
||||
* All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; either version 2 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/workqueue.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/skbuff.h>
|
||||
#include <linux/suspend.h>
|
||||
#include <linux/connector.h>
|
||||
#include <linux/delay.h>
|
||||
|
||||
static struct cn_callback_entry *
|
||||
cn_queue_alloc_callback_entry(struct cn_queue_dev *dev, const char *name,
|
||||
struct cb_id *id,
|
||||
void (*callback)(struct cn_msg *,
|
||||
struct netlink_skb_parms *))
|
||||
{
|
||||
struct cn_callback_entry *cbq;
|
||||
|
||||
cbq = kzalloc(sizeof(*cbq), GFP_KERNEL);
|
||||
if (!cbq) {
|
||||
pr_err("Failed to create new callback queue.\n");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
atomic_set(&cbq->refcnt, 1);
|
||||
|
||||
atomic_inc(&dev->refcnt);
|
||||
cbq->pdev = dev;
|
||||
|
||||
snprintf(cbq->id.name, sizeof(cbq->id.name), "%s", name);
|
||||
memcpy(&cbq->id.id, id, sizeof(struct cb_id));
|
||||
cbq->callback = callback;
|
||||
return cbq;
|
||||
}
|
||||
|
||||
void cn_queue_release_callback(struct cn_callback_entry *cbq)
|
||||
{
|
||||
if (!atomic_dec_and_test(&cbq->refcnt))
|
||||
return;
|
||||
|
||||
atomic_dec(&cbq->pdev->refcnt);
|
||||
kfree(cbq);
|
||||
}
|
||||
|
||||
int cn_cb_equal(struct cb_id *i1, struct cb_id *i2)
|
||||
{
|
||||
return ((i1->idx == i2->idx) && (i1->val == i2->val));
|
||||
}
|
||||
|
||||
int cn_queue_add_callback(struct cn_queue_dev *dev, const char *name,
|
||||
struct cb_id *id,
|
||||
void (*callback)(struct cn_msg *,
|
||||
struct netlink_skb_parms *))
|
||||
{
|
||||
struct cn_callback_entry *cbq, *__cbq;
|
||||
int found = 0;
|
||||
|
||||
cbq = cn_queue_alloc_callback_entry(dev, name, id, callback);
|
||||
if (!cbq)
|
||||
return -ENOMEM;
|
||||
|
||||
spin_lock_bh(&dev->queue_lock);
|
||||
list_for_each_entry(__cbq, &dev->queue_list, callback_entry) {
|
||||
if (cn_cb_equal(&__cbq->id.id, id)) {
|
||||
found = 1;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (!found)
|
||||
list_add_tail(&cbq->callback_entry, &dev->queue_list);
|
||||
spin_unlock_bh(&dev->queue_lock);
|
||||
|
||||
if (found) {
|
||||
cn_queue_release_callback(cbq);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
cbq->seq = 0;
|
||||
cbq->group = cbq->id.id.idx;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void cn_queue_del_callback(struct cn_queue_dev *dev, struct cb_id *id)
|
||||
{
|
||||
struct cn_callback_entry *cbq, *n;
|
||||
int found = 0;
|
||||
|
||||
spin_lock_bh(&dev->queue_lock);
|
||||
list_for_each_entry_safe(cbq, n, &dev->queue_list, callback_entry) {
|
||||
if (cn_cb_equal(&cbq->id.id, id)) {
|
||||
list_del(&cbq->callback_entry);
|
||||
found = 1;
|
||||
break;
|
||||
}
|
||||
}
|
||||
spin_unlock_bh(&dev->queue_lock);
|
||||
|
||||
if (found)
|
||||
cn_queue_release_callback(cbq);
|
||||
}
|
||||
|
||||
struct cn_queue_dev *cn_queue_alloc_dev(const char *name, struct sock *nls)
|
||||
{
|
||||
struct cn_queue_dev *dev;
|
||||
|
||||
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
|
||||
if (!dev)
|
||||
return NULL;
|
||||
|
||||
snprintf(dev->name, sizeof(dev->name), "%s", name);
|
||||
atomic_set(&dev->refcnt, 0);
|
||||
INIT_LIST_HEAD(&dev->queue_list);
|
||||
spin_lock_init(&dev->queue_lock);
|
||||
|
||||
dev->nls = nls;
|
||||
|
||||
return dev;
|
||||
}
|
||||
|
||||
void cn_queue_free_dev(struct cn_queue_dev *dev)
|
||||
{
|
||||
struct cn_callback_entry *cbq, *n;
|
||||
|
||||
spin_lock_bh(&dev->queue_lock);
|
||||
list_for_each_entry_safe(cbq, n, &dev->queue_list, callback_entry)
|
||||
list_del(&cbq->callback_entry);
|
||||
spin_unlock_bh(&dev->queue_lock);
|
||||
|
||||
while (atomic_read(&dev->refcnt)) {
|
||||
pr_info("Waiting for %s to become free: refcnt=%d.\n",
|
||||
dev->name, atomic_read(&dev->refcnt));
|
||||
msleep(1000);
|
||||
}
|
||||
|
||||
kfree(dev);
|
||||
dev = NULL;
|
||||
}
|
316
drivers/connector/connector.c
Normal file
316
drivers/connector/connector.c
Normal file
|
@ -0,0 +1,316 @@
|
|||
/*
|
||||
* connector.c
|
||||
*
|
||||
* 2004+ Copyright (c) Evgeniy Polyakov <zbr@ioremap.net>
|
||||
* All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; either version 2 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/skbuff.h>
|
||||
#include <net/netlink.h>
|
||||
#include <linux/moduleparam.h>
|
||||
#include <linux/connector.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/proc_fs.h>
|
||||
#include <linux/spinlock.h>
|
||||
|
||||
#include <net/sock.h>
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_AUTHOR("Evgeniy Polyakov <zbr@ioremap.net>");
|
||||
MODULE_DESCRIPTION("Generic userspace <-> kernelspace connector.");
|
||||
MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_CONNECTOR);
|
||||
|
||||
static struct cn_dev cdev;
|
||||
|
||||
static int cn_already_initialized;
|
||||
|
||||
/*
|
||||
* Sends mult (multiple) cn_msg at a time.
|
||||
*
|
||||
* msg->seq and msg->ack are used to determine message genealogy.
|
||||
* When someone sends message it puts there locally unique sequence
|
||||
* and random acknowledge numbers. Sequence number may be copied into
|
||||
* nlmsghdr->nlmsg_seq too.
|
||||
*
|
||||
* Sequence number is incremented with each message to be sent.
|
||||
*
|
||||
* If we expect a reply to our message then the sequence number in
|
||||
* received message MUST be the same as in original message, and
|
||||
* acknowledge number MUST be the same + 1.
|
||||
*
|
||||
* If we receive a message and its sequence number is not equal to the
|
||||
* one we are expecting then it is a new message.
|
||||
*
|
||||
* If we receive a message and its sequence number is the same as one
|
||||
* we are expecting but it's acknowledgement number is not equal to
|
||||
* the acknowledgement number in the original message + 1, then it is
|
||||
* a new message.
|
||||
*
|
||||
* If msg->len != len, then additional cn_msg messages are expected following
|
||||
* the first msg.
|
||||
*
|
||||
* The message is sent to, the portid if given, the group if given, both if
|
||||
* both, or if both are zero then the group is looked up and sent there.
|
||||
*/
|
||||
int cn_netlink_send_mult(struct cn_msg *msg, u16 len, u32 portid, u32 __group,
|
||||
gfp_t gfp_mask)
|
||||
{
|
||||
struct cn_callback_entry *__cbq;
|
||||
unsigned int size;
|
||||
struct sk_buff *skb;
|
||||
struct nlmsghdr *nlh;
|
||||
struct cn_msg *data;
|
||||
struct cn_dev *dev = &cdev;
|
||||
u32 group = 0;
|
||||
int found = 0;
|
||||
|
||||
if (portid || __group) {
|
||||
group = __group;
|
||||
} else {
|
||||
spin_lock_bh(&dev->cbdev->queue_lock);
|
||||
list_for_each_entry(__cbq, &dev->cbdev->queue_list,
|
||||
callback_entry) {
|
||||
if (cn_cb_equal(&__cbq->id.id, &msg->id)) {
|
||||
found = 1;
|
||||
group = __cbq->group;
|
||||
break;
|
||||
}
|
||||
}
|
||||
spin_unlock_bh(&dev->cbdev->queue_lock);
|
||||
|
||||
if (!found)
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
if (!portid && !netlink_has_listeners(dev->nls, group))
|
||||
return -ESRCH;
|
||||
|
||||
size = sizeof(*msg) + len;
|
||||
|
||||
skb = nlmsg_new(size, gfp_mask);
|
||||
if (!skb)
|
||||
return -ENOMEM;
|
||||
|
||||
nlh = nlmsg_put(skb, 0, msg->seq, NLMSG_DONE, size, 0);
|
||||
if (!nlh) {
|
||||
kfree_skb(skb);
|
||||
return -EMSGSIZE;
|
||||
}
|
||||
|
||||
data = nlmsg_data(nlh);
|
||||
|
||||
memcpy(data, msg, size);
|
||||
|
||||
NETLINK_CB(skb).dst_group = group;
|
||||
|
||||
if (group)
|
||||
return netlink_broadcast(dev->nls, skb, portid, group,
|
||||
gfp_mask);
|
||||
return netlink_unicast(dev->nls, skb, portid, !(gfp_mask&__GFP_WAIT));
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(cn_netlink_send_mult);
|
||||
|
||||
/* same as cn_netlink_send_mult except msg->len is used for len */
|
||||
int cn_netlink_send(struct cn_msg *msg, u32 portid, u32 __group,
|
||||
gfp_t gfp_mask)
|
||||
{
|
||||
return cn_netlink_send_mult(msg, msg->len, portid, __group, gfp_mask);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(cn_netlink_send);
|
||||
|
||||
/*
|
||||
* Callback helper - queues work and setup destructor for given data.
|
||||
*/
|
||||
static int cn_call_callback(struct sk_buff *skb)
|
||||
{
|
||||
struct cn_callback_entry *i, *cbq = NULL;
|
||||
struct cn_dev *dev = &cdev;
|
||||
struct cn_msg *msg = nlmsg_data(nlmsg_hdr(skb));
|
||||
struct netlink_skb_parms *nsp = &NETLINK_CB(skb);
|
||||
int err = -ENODEV;
|
||||
|
||||
spin_lock_bh(&dev->cbdev->queue_lock);
|
||||
list_for_each_entry(i, &dev->cbdev->queue_list, callback_entry) {
|
||||
if (cn_cb_equal(&i->id.id, &msg->id)) {
|
||||
atomic_inc(&i->refcnt);
|
||||
cbq = i;
|
||||
break;
|
||||
}
|
||||
}
|
||||
spin_unlock_bh(&dev->cbdev->queue_lock);
|
||||
|
||||
if (cbq != NULL) {
|
||||
cbq->callback(msg, nsp);
|
||||
kfree_skb(skb);
|
||||
cn_queue_release_callback(cbq);
|
||||
err = 0;
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
/*
|
||||
* Main netlink receiving function.
|
||||
*
|
||||
* It checks skb, netlink header and msg sizes, and calls callback helper.
|
||||
*/
|
||||
static void cn_rx_skb(struct sk_buff *__skb)
|
||||
{
|
||||
struct nlmsghdr *nlh;
|
||||
struct sk_buff *skb;
|
||||
int len, err;
|
||||
|
||||
skb = skb_get(__skb);
|
||||
|
||||
if (skb->len >= NLMSG_HDRLEN) {
|
||||
nlh = nlmsg_hdr(skb);
|
||||
len = nlmsg_len(nlh);
|
||||
|
||||
if (len < (int)sizeof(struct cn_msg) ||
|
||||
skb->len < nlh->nlmsg_len ||
|
||||
len > CONNECTOR_MAX_MSG_SIZE) {
|
||||
kfree_skb(skb);
|
||||
return;
|
||||
}
|
||||
|
||||
err = cn_call_callback(skb);
|
||||
if (err < 0)
|
||||
kfree_skb(skb);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Callback add routing - adds callback with given ID and name.
|
||||
* If there is registered callback with the same ID it will not be added.
|
||||
*
|
||||
* May sleep.
|
||||
*/
|
||||
int cn_add_callback(struct cb_id *id, const char *name,
|
||||
void (*callback)(struct cn_msg *,
|
||||
struct netlink_skb_parms *))
|
||||
{
|
||||
int err;
|
||||
struct cn_dev *dev = &cdev;
|
||||
|
||||
if (!cn_already_initialized)
|
||||
return -EAGAIN;
|
||||
|
||||
err = cn_queue_add_callback(dev->cbdev, name, id, callback);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(cn_add_callback);
|
||||
|
||||
/*
|
||||
* Callback remove routing - removes callback
|
||||
* with given ID.
|
||||
* If there is no registered callback with given
|
||||
* ID nothing happens.
|
||||
*
|
||||
* May sleep while waiting for reference counter to become zero.
|
||||
*/
|
||||
void cn_del_callback(struct cb_id *id)
|
||||
{
|
||||
struct cn_dev *dev = &cdev;
|
||||
|
||||
cn_queue_del_callback(dev->cbdev, id);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(cn_del_callback);
|
||||
|
||||
static int cn_proc_show(struct seq_file *m, void *v)
|
||||
{
|
||||
struct cn_queue_dev *dev = cdev.cbdev;
|
||||
struct cn_callback_entry *cbq;
|
||||
|
||||
seq_printf(m, "Name ID\n");
|
||||
|
||||
spin_lock_bh(&dev->queue_lock);
|
||||
|
||||
list_for_each_entry(cbq, &dev->queue_list, callback_entry) {
|
||||
seq_printf(m, "%-15s %u:%u\n",
|
||||
cbq->id.name,
|
||||
cbq->id.id.idx,
|
||||
cbq->id.id.val);
|
||||
}
|
||||
|
||||
spin_unlock_bh(&dev->queue_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int cn_proc_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
return single_open(file, cn_proc_show, NULL);
|
||||
}
|
||||
|
||||
static const struct file_operations cn_file_ops = {
|
||||
.owner = THIS_MODULE,
|
||||
.open = cn_proc_open,
|
||||
.read = seq_read,
|
||||
.llseek = seq_lseek,
|
||||
.release = single_release
|
||||
};
|
||||
|
||||
static struct cn_dev cdev = {
|
||||
.input = cn_rx_skb,
|
||||
};
|
||||
|
||||
static int cn_init(void)
|
||||
{
|
||||
struct cn_dev *dev = &cdev;
|
||||
struct netlink_kernel_cfg cfg = {
|
||||
.groups = CN_NETLINK_USERS + 0xf,
|
||||
.input = dev->input,
|
||||
};
|
||||
|
||||
dev->nls = netlink_kernel_create(&init_net, NETLINK_CONNECTOR, &cfg);
|
||||
if (!dev->nls)
|
||||
return -EIO;
|
||||
|
||||
dev->cbdev = cn_queue_alloc_dev("cqueue", dev->nls);
|
||||
if (!dev->cbdev) {
|
||||
netlink_kernel_release(dev->nls);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
cn_already_initialized = 1;
|
||||
|
||||
proc_create("connector", S_IRUGO, init_net.proc_net, &cn_file_ops);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void cn_fini(void)
|
||||
{
|
||||
struct cn_dev *dev = &cdev;
|
||||
|
||||
cn_already_initialized = 0;
|
||||
|
||||
remove_proc_entry("connector", init_net.proc_net);
|
||||
|
||||
cn_queue_free_dev(dev->cbdev);
|
||||
netlink_kernel_release(dev->nls);
|
||||
}
|
||||
|
||||
subsys_initcall(cn_init);
|
||||
module_exit(cn_fini);
|
Loading…
Add table
Add a link
Reference in a new issue