mirror of
https://github.com/AetherDroid/android_kernel_samsung_on5xelte.git
synced 2025-09-08 01:08:03 -04:00
Fixed MTP to work with TWRP
This commit is contained in:
commit
f6dfaef42e
50820 changed files with 20846062 additions and 0 deletions
5
kernel/bpf/Makefile
Normal file
5
kernel/bpf/Makefile
Normal file
|
@ -0,0 +1,5 @@
|
|||
obj-y := core.o
|
||||
obj-$(CONFIG_BPF_SYSCALL) += syscall.o verifier.o
|
||||
ifdef CONFIG_TEST_BPF
|
||||
obj-$(CONFIG_BPF_SYSCALL) += test_stub.o
|
||||
endif
|
666
kernel/bpf/core.c
Normal file
666
kernel/bpf/core.c
Normal file
|
@ -0,0 +1,666 @@
|
|||
/*
|
||||
* Linux Socket Filter - Kernel level socket filtering
|
||||
*
|
||||
* Based on the design of the Berkeley Packet Filter. The new
|
||||
* internal format has been designed by PLUMgrid:
|
||||
*
|
||||
* Copyright (c) 2011 - 2014 PLUMgrid, http://plumgrid.com
|
||||
*
|
||||
* Authors:
|
||||
*
|
||||
* Jay Schulist <jschlst@samba.org>
|
||||
* Alexei Starovoitov <ast@plumgrid.com>
|
||||
* Daniel Borkmann <dborkman@redhat.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version
|
||||
* 2 of the License, or (at your option) any later version.
|
||||
*
|
||||
* Andi Kleen - Fix a few bad bugs and races.
|
||||
* Kris Katterjohn - Added many additional checks in bpf_check_classic()
|
||||
*/
|
||||
|
||||
#include <linux/filter.h>
|
||||
#include <linux/skbuff.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/random.h>
|
||||
#include <linux/moduleloader.h>
|
||||
#include <asm/unaligned.h>
|
||||
#include <linux/bpf.h>
|
||||
|
||||
/* Registers */
|
||||
#define BPF_R0 regs[BPF_REG_0]
|
||||
#define BPF_R1 regs[BPF_REG_1]
|
||||
#define BPF_R2 regs[BPF_REG_2]
|
||||
#define BPF_R3 regs[BPF_REG_3]
|
||||
#define BPF_R4 regs[BPF_REG_4]
|
||||
#define BPF_R5 regs[BPF_REG_5]
|
||||
#define BPF_R6 regs[BPF_REG_6]
|
||||
#define BPF_R7 regs[BPF_REG_7]
|
||||
#define BPF_R8 regs[BPF_REG_8]
|
||||
#define BPF_R9 regs[BPF_REG_9]
|
||||
#define BPF_R10 regs[BPF_REG_10]
|
||||
|
||||
/* Named registers */
|
||||
#define DST regs[insn->dst_reg]
|
||||
#define SRC regs[insn->src_reg]
|
||||
#define FP regs[BPF_REG_FP]
|
||||
#define ARG1 regs[BPF_REG_ARG1]
|
||||
#define CTX regs[BPF_REG_CTX]
|
||||
#define IMM insn->imm
|
||||
|
||||
/* No hurry in this branch
|
||||
*
|
||||
* Exported for the bpf jit load helper.
|
||||
*/
|
||||
void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb, int k, unsigned int size)
|
||||
{
|
||||
u8 *ptr = NULL;
|
||||
|
||||
if (k >= SKF_NET_OFF)
|
||||
ptr = skb_network_header(skb) + k - SKF_NET_OFF;
|
||||
else if (k >= SKF_LL_OFF)
|
||||
ptr = skb_mac_header(skb) + k - SKF_LL_OFF;
|
||||
if (ptr >= skb->head && ptr + size <= skb_tail_pointer(skb))
|
||||
return ptr;
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags)
|
||||
{
|
||||
gfp_t gfp_flags = GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO |
|
||||
gfp_extra_flags;
|
||||
struct bpf_prog_aux *aux;
|
||||
struct bpf_prog *fp;
|
||||
|
||||
size = round_up(size, PAGE_SIZE);
|
||||
fp = __vmalloc(size, gfp_flags, PAGE_KERNEL);
|
||||
if (fp == NULL)
|
||||
return NULL;
|
||||
|
||||
aux = kzalloc(sizeof(*aux), GFP_KERNEL | gfp_extra_flags);
|
||||
if (aux == NULL) {
|
||||
vfree(fp);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
fp->pages = size / PAGE_SIZE;
|
||||
fp->aux = aux;
|
||||
|
||||
return fp;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(bpf_prog_alloc);
|
||||
|
||||
struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size,
|
||||
gfp_t gfp_extra_flags)
|
||||
{
|
||||
gfp_t gfp_flags = GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO |
|
||||
gfp_extra_flags;
|
||||
struct bpf_prog *fp;
|
||||
|
||||
BUG_ON(fp_old == NULL);
|
||||
|
||||
size = round_up(size, PAGE_SIZE);
|
||||
if (size <= fp_old->pages * PAGE_SIZE)
|
||||
return fp_old;
|
||||
|
||||
fp = __vmalloc(size, gfp_flags, PAGE_KERNEL);
|
||||
if (fp != NULL) {
|
||||
memcpy(fp, fp_old, fp_old->pages * PAGE_SIZE);
|
||||
fp->pages = size / PAGE_SIZE;
|
||||
|
||||
/* We keep fp->aux from fp_old around in the new
|
||||
* reallocated structure.
|
||||
*/
|
||||
fp_old->aux = NULL;
|
||||
__bpf_prog_free(fp_old);
|
||||
}
|
||||
|
||||
return fp;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(bpf_prog_realloc);
|
||||
|
||||
void __bpf_prog_free(struct bpf_prog *fp)
|
||||
{
|
||||
kfree(fp->aux);
|
||||
vfree(fp);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(__bpf_prog_free);
|
||||
|
||||
#ifdef CONFIG_BPF_JIT
|
||||
struct bpf_binary_header *
|
||||
bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
|
||||
unsigned int alignment,
|
||||
bpf_jit_fill_hole_t bpf_fill_ill_insns)
|
||||
{
|
||||
struct bpf_binary_header *hdr;
|
||||
unsigned int size, hole, start;
|
||||
|
||||
/* Most of BPF filters are really small, but if some of them
|
||||
* fill a page, allow at least 128 extra bytes to insert a
|
||||
* random section of illegal instructions.
|
||||
*/
|
||||
size = round_up(proglen + sizeof(*hdr) + 128, PAGE_SIZE);
|
||||
hdr = module_alloc(size);
|
||||
if (hdr == NULL)
|
||||
return NULL;
|
||||
|
||||
/* Fill space with illegal/arch-dep instructions. */
|
||||
bpf_fill_ill_insns(hdr, size);
|
||||
|
||||
hdr->pages = size / PAGE_SIZE;
|
||||
hole = min_t(unsigned int, size - (proglen + sizeof(*hdr)),
|
||||
PAGE_SIZE - sizeof(*hdr));
|
||||
start = (prandom_u32() % hole) & ~(alignment - 1);
|
||||
|
||||
/* Leave a random number of instructions before BPF code. */
|
||||
*image_ptr = &hdr->image[start];
|
||||
|
||||
return hdr;
|
||||
}
|
||||
|
||||
void bpf_jit_binary_free(struct bpf_binary_header *hdr)
|
||||
{
|
||||
module_free(NULL, hdr);
|
||||
}
|
||||
#endif /* CONFIG_BPF_JIT */
|
||||
|
||||
/* Base function for offset calculation. Needs to go into .text section,
|
||||
* therefore keeping it non-static as well; will also be used by JITs
|
||||
* anyway later on, so do not let the compiler omit it.
|
||||
*/
|
||||
noinline u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* __bpf_prog_run - run eBPF program on a given context
|
||||
* @ctx: is the data we are operating on
|
||||
* @insn: is the array of eBPF instructions
|
||||
*
|
||||
* Decode and execute eBPF instructions.
|
||||
*/
|
||||
static unsigned int __bpf_prog_run(void *ctx, const struct bpf_insn *insn)
|
||||
{
|
||||
u64 stack[MAX_BPF_STACK / sizeof(u64)];
|
||||
u64 regs[MAX_BPF_REG], tmp;
|
||||
static const void *jumptable[256] = {
|
||||
[0 ... 255] = &&default_label,
|
||||
/* Now overwrite non-defaults ... */
|
||||
/* 32 bit ALU operations */
|
||||
[BPF_ALU | BPF_ADD | BPF_X] = &&ALU_ADD_X,
|
||||
[BPF_ALU | BPF_ADD | BPF_K] = &&ALU_ADD_K,
|
||||
[BPF_ALU | BPF_SUB | BPF_X] = &&ALU_SUB_X,
|
||||
[BPF_ALU | BPF_SUB | BPF_K] = &&ALU_SUB_K,
|
||||
[BPF_ALU | BPF_AND | BPF_X] = &&ALU_AND_X,
|
||||
[BPF_ALU | BPF_AND | BPF_K] = &&ALU_AND_K,
|
||||
[BPF_ALU | BPF_OR | BPF_X] = &&ALU_OR_X,
|
||||
[BPF_ALU | BPF_OR | BPF_K] = &&ALU_OR_K,
|
||||
[BPF_ALU | BPF_LSH | BPF_X] = &&ALU_LSH_X,
|
||||
[BPF_ALU | BPF_LSH | BPF_K] = &&ALU_LSH_K,
|
||||
[BPF_ALU | BPF_RSH | BPF_X] = &&ALU_RSH_X,
|
||||
[BPF_ALU | BPF_RSH | BPF_K] = &&ALU_RSH_K,
|
||||
[BPF_ALU | BPF_XOR | BPF_X] = &&ALU_XOR_X,
|
||||
[BPF_ALU | BPF_XOR | BPF_K] = &&ALU_XOR_K,
|
||||
[BPF_ALU | BPF_MUL | BPF_X] = &&ALU_MUL_X,
|
||||
[BPF_ALU | BPF_MUL | BPF_K] = &&ALU_MUL_K,
|
||||
[BPF_ALU | BPF_MOV | BPF_X] = &&ALU_MOV_X,
|
||||
[BPF_ALU | BPF_MOV | BPF_K] = &&ALU_MOV_K,
|
||||
[BPF_ALU | BPF_DIV | BPF_X] = &&ALU_DIV_X,
|
||||
[BPF_ALU | BPF_DIV | BPF_K] = &&ALU_DIV_K,
|
||||
[BPF_ALU | BPF_MOD | BPF_X] = &&ALU_MOD_X,
|
||||
[BPF_ALU | BPF_MOD | BPF_K] = &&ALU_MOD_K,
|
||||
[BPF_ALU | BPF_NEG] = &&ALU_NEG,
|
||||
[BPF_ALU | BPF_END | BPF_TO_BE] = &&ALU_END_TO_BE,
|
||||
[BPF_ALU | BPF_END | BPF_TO_LE] = &&ALU_END_TO_LE,
|
||||
/* 64 bit ALU operations */
|
||||
[BPF_ALU64 | BPF_ADD | BPF_X] = &&ALU64_ADD_X,
|
||||
[BPF_ALU64 | BPF_ADD | BPF_K] = &&ALU64_ADD_K,
|
||||
[BPF_ALU64 | BPF_SUB | BPF_X] = &&ALU64_SUB_X,
|
||||
[BPF_ALU64 | BPF_SUB | BPF_K] = &&ALU64_SUB_K,
|
||||
[BPF_ALU64 | BPF_AND | BPF_X] = &&ALU64_AND_X,
|
||||
[BPF_ALU64 | BPF_AND | BPF_K] = &&ALU64_AND_K,
|
||||
[BPF_ALU64 | BPF_OR | BPF_X] = &&ALU64_OR_X,
|
||||
[BPF_ALU64 | BPF_OR | BPF_K] = &&ALU64_OR_K,
|
||||
[BPF_ALU64 | BPF_LSH | BPF_X] = &&ALU64_LSH_X,
|
||||
[BPF_ALU64 | BPF_LSH | BPF_K] = &&ALU64_LSH_K,
|
||||
[BPF_ALU64 | BPF_RSH | BPF_X] = &&ALU64_RSH_X,
|
||||
[BPF_ALU64 | BPF_RSH | BPF_K] = &&ALU64_RSH_K,
|
||||
[BPF_ALU64 | BPF_XOR | BPF_X] = &&ALU64_XOR_X,
|
||||
[BPF_ALU64 | BPF_XOR | BPF_K] = &&ALU64_XOR_K,
|
||||
[BPF_ALU64 | BPF_MUL | BPF_X] = &&ALU64_MUL_X,
|
||||
[BPF_ALU64 | BPF_MUL | BPF_K] = &&ALU64_MUL_K,
|
||||
[BPF_ALU64 | BPF_MOV | BPF_X] = &&ALU64_MOV_X,
|
||||
[BPF_ALU64 | BPF_MOV | BPF_K] = &&ALU64_MOV_K,
|
||||
[BPF_ALU64 | BPF_ARSH | BPF_X] = &&ALU64_ARSH_X,
|
||||
[BPF_ALU64 | BPF_ARSH | BPF_K] = &&ALU64_ARSH_K,
|
||||
[BPF_ALU64 | BPF_DIV | BPF_X] = &&ALU64_DIV_X,
|
||||
[BPF_ALU64 | BPF_DIV | BPF_K] = &&ALU64_DIV_K,
|
||||
[BPF_ALU64 | BPF_MOD | BPF_X] = &&ALU64_MOD_X,
|
||||
[BPF_ALU64 | BPF_MOD | BPF_K] = &&ALU64_MOD_K,
|
||||
[BPF_ALU64 | BPF_NEG] = &&ALU64_NEG,
|
||||
/* Call instruction */
|
||||
[BPF_JMP | BPF_CALL] = &&JMP_CALL,
|
||||
/* Jumps */
|
||||
[BPF_JMP | BPF_JA] = &&JMP_JA,
|
||||
[BPF_JMP | BPF_JEQ | BPF_X] = &&JMP_JEQ_X,
|
||||
[BPF_JMP | BPF_JEQ | BPF_K] = &&JMP_JEQ_K,
|
||||
[BPF_JMP | BPF_JNE | BPF_X] = &&JMP_JNE_X,
|
||||
[BPF_JMP | BPF_JNE | BPF_K] = &&JMP_JNE_K,
|
||||
[BPF_JMP | BPF_JGT | BPF_X] = &&JMP_JGT_X,
|
||||
[BPF_JMP | BPF_JGT | BPF_K] = &&JMP_JGT_K,
|
||||
[BPF_JMP | BPF_JGE | BPF_X] = &&JMP_JGE_X,
|
||||
[BPF_JMP | BPF_JGE | BPF_K] = &&JMP_JGE_K,
|
||||
[BPF_JMP | BPF_JSGT | BPF_X] = &&JMP_JSGT_X,
|
||||
[BPF_JMP | BPF_JSGT | BPF_K] = &&JMP_JSGT_K,
|
||||
[BPF_JMP | BPF_JSGE | BPF_X] = &&JMP_JSGE_X,
|
||||
[BPF_JMP | BPF_JSGE | BPF_K] = &&JMP_JSGE_K,
|
||||
[BPF_JMP | BPF_JSET | BPF_X] = &&JMP_JSET_X,
|
||||
[BPF_JMP | BPF_JSET | BPF_K] = &&JMP_JSET_K,
|
||||
/* Program return */
|
||||
[BPF_JMP | BPF_EXIT] = &&JMP_EXIT,
|
||||
/* Store instructions */
|
||||
[BPF_STX | BPF_MEM | BPF_B] = &&STX_MEM_B,
|
||||
[BPF_STX | BPF_MEM | BPF_H] = &&STX_MEM_H,
|
||||
[BPF_STX | BPF_MEM | BPF_W] = &&STX_MEM_W,
|
||||
[BPF_STX | BPF_MEM | BPF_DW] = &&STX_MEM_DW,
|
||||
[BPF_STX | BPF_XADD | BPF_W] = &&STX_XADD_W,
|
||||
[BPF_STX | BPF_XADD | BPF_DW] = &&STX_XADD_DW,
|
||||
[BPF_ST | BPF_MEM | BPF_B] = &&ST_MEM_B,
|
||||
[BPF_ST | BPF_MEM | BPF_H] = &&ST_MEM_H,
|
||||
[BPF_ST | BPF_MEM | BPF_W] = &&ST_MEM_W,
|
||||
[BPF_ST | BPF_MEM | BPF_DW] = &&ST_MEM_DW,
|
||||
/* Load instructions */
|
||||
[BPF_LDX | BPF_MEM | BPF_B] = &&LDX_MEM_B,
|
||||
[BPF_LDX | BPF_MEM | BPF_H] = &&LDX_MEM_H,
|
||||
[BPF_LDX | BPF_MEM | BPF_W] = &&LDX_MEM_W,
|
||||
[BPF_LDX | BPF_MEM | BPF_DW] = &&LDX_MEM_DW,
|
||||
[BPF_LD | BPF_ABS | BPF_W] = &&LD_ABS_W,
|
||||
[BPF_LD | BPF_ABS | BPF_H] = &&LD_ABS_H,
|
||||
[BPF_LD | BPF_ABS | BPF_B] = &&LD_ABS_B,
|
||||
[BPF_LD | BPF_IND | BPF_W] = &&LD_IND_W,
|
||||
[BPF_LD | BPF_IND | BPF_H] = &&LD_IND_H,
|
||||
[BPF_LD | BPF_IND | BPF_B] = &&LD_IND_B,
|
||||
[BPF_LD | BPF_IMM | BPF_DW] = &&LD_IMM_DW,
|
||||
};
|
||||
void *ptr;
|
||||
int off;
|
||||
|
||||
#define CONT ({ insn++; goto select_insn; })
|
||||
#define CONT_JMP ({ insn++; goto select_insn; })
|
||||
|
||||
FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)];
|
||||
ARG1 = (u64) (unsigned long) ctx;
|
||||
|
||||
/* Registers used in classic BPF programs need to be reset first. */
|
||||
regs[BPF_REG_A] = 0;
|
||||
regs[BPF_REG_X] = 0;
|
||||
|
||||
select_insn:
|
||||
goto *jumptable[insn->code];
|
||||
|
||||
/* ALU */
|
||||
#define ALU(OPCODE, OP) \
|
||||
ALU64_##OPCODE##_X: \
|
||||
DST = DST OP SRC; \
|
||||
CONT; \
|
||||
ALU_##OPCODE##_X: \
|
||||
DST = (u32) DST OP (u32) SRC; \
|
||||
CONT; \
|
||||
ALU64_##OPCODE##_K: \
|
||||
DST = DST OP IMM; \
|
||||
CONT; \
|
||||
ALU_##OPCODE##_K: \
|
||||
DST = (u32) DST OP (u32) IMM; \
|
||||
CONT;
|
||||
|
||||
ALU(ADD, +)
|
||||
ALU(SUB, -)
|
||||
ALU(AND, &)
|
||||
ALU(OR, |)
|
||||
ALU(LSH, <<)
|
||||
ALU(RSH, >>)
|
||||
ALU(XOR, ^)
|
||||
ALU(MUL, *)
|
||||
#undef ALU
|
||||
ALU_NEG:
|
||||
DST = (u32) -DST;
|
||||
CONT;
|
||||
ALU64_NEG:
|
||||
DST = -DST;
|
||||
CONT;
|
||||
ALU_MOV_X:
|
||||
DST = (u32) SRC;
|
||||
CONT;
|
||||
ALU_MOV_K:
|
||||
DST = (u32) IMM;
|
||||
CONT;
|
||||
ALU64_MOV_X:
|
||||
DST = SRC;
|
||||
CONT;
|
||||
ALU64_MOV_K:
|
||||
DST = IMM;
|
||||
CONT;
|
||||
LD_IMM_DW:
|
||||
DST = (u64) (u32) insn[0].imm | ((u64) (u32) insn[1].imm) << 32;
|
||||
insn++;
|
||||
CONT;
|
||||
ALU64_ARSH_X:
|
||||
(*(s64 *) &DST) >>= SRC;
|
||||
CONT;
|
||||
ALU64_ARSH_K:
|
||||
(*(s64 *) &DST) >>= IMM;
|
||||
CONT;
|
||||
ALU64_MOD_X:
|
||||
if (unlikely(SRC == 0))
|
||||
return 0;
|
||||
div64_u64_rem(DST, SRC, &tmp);
|
||||
DST = tmp;
|
||||
CONT;
|
||||
ALU_MOD_X:
|
||||
if (unlikely(SRC == 0))
|
||||
return 0;
|
||||
tmp = (u32) DST;
|
||||
DST = do_div(tmp, (u32) SRC);
|
||||
CONT;
|
||||
ALU64_MOD_K:
|
||||
div64_u64_rem(DST, IMM, &tmp);
|
||||
DST = tmp;
|
||||
CONT;
|
||||
ALU_MOD_K:
|
||||
tmp = (u32) DST;
|
||||
DST = do_div(tmp, (u32) IMM);
|
||||
CONT;
|
||||
ALU64_DIV_X:
|
||||
if (unlikely(SRC == 0))
|
||||
return 0;
|
||||
DST = div64_u64(DST, SRC);
|
||||
CONT;
|
||||
ALU_DIV_X:
|
||||
if (unlikely(SRC == 0))
|
||||
return 0;
|
||||
tmp = (u32) DST;
|
||||
do_div(tmp, (u32) SRC);
|
||||
DST = (u32) tmp;
|
||||
CONT;
|
||||
ALU64_DIV_K:
|
||||
DST = div64_u64(DST, IMM);
|
||||
CONT;
|
||||
ALU_DIV_K:
|
||||
tmp = (u32) DST;
|
||||
do_div(tmp, (u32) IMM);
|
||||
DST = (u32) tmp;
|
||||
CONT;
|
||||
ALU_END_TO_BE:
|
||||
switch (IMM) {
|
||||
case 16:
|
||||
DST = (__force u16) cpu_to_be16(DST);
|
||||
break;
|
||||
case 32:
|
||||
DST = (__force u32) cpu_to_be32(DST);
|
||||
break;
|
||||
case 64:
|
||||
DST = (__force u64) cpu_to_be64(DST);
|
||||
break;
|
||||
}
|
||||
CONT;
|
||||
ALU_END_TO_LE:
|
||||
switch (IMM) {
|
||||
case 16:
|
||||
DST = (__force u16) cpu_to_le16(DST);
|
||||
break;
|
||||
case 32:
|
||||
DST = (__force u32) cpu_to_le32(DST);
|
||||
break;
|
||||
case 64:
|
||||
DST = (__force u64) cpu_to_le64(DST);
|
||||
break;
|
||||
}
|
||||
CONT;
|
||||
|
||||
/* CALL */
|
||||
JMP_CALL:
|
||||
/* Function call scratches BPF_R1-BPF_R5 registers,
|
||||
* preserves BPF_R6-BPF_R9, and stores return value
|
||||
* into BPF_R0.
|
||||
*/
|
||||
BPF_R0 = (__bpf_call_base + insn->imm)(BPF_R1, BPF_R2, BPF_R3,
|
||||
BPF_R4, BPF_R5);
|
||||
CONT;
|
||||
|
||||
/* JMP */
|
||||
JMP_JA:
|
||||
insn += insn->off;
|
||||
CONT;
|
||||
JMP_JEQ_X:
|
||||
if (DST == SRC) {
|
||||
insn += insn->off;
|
||||
CONT_JMP;
|
||||
}
|
||||
CONT;
|
||||
JMP_JEQ_K:
|
||||
if (DST == IMM) {
|
||||
insn += insn->off;
|
||||
CONT_JMP;
|
||||
}
|
||||
CONT;
|
||||
JMP_JNE_X:
|
||||
if (DST != SRC) {
|
||||
insn += insn->off;
|
||||
CONT_JMP;
|
||||
}
|
||||
CONT;
|
||||
JMP_JNE_K:
|
||||
if (DST != IMM) {
|
||||
insn += insn->off;
|
||||
CONT_JMP;
|
||||
}
|
||||
CONT;
|
||||
JMP_JGT_X:
|
||||
if (DST > SRC) {
|
||||
insn += insn->off;
|
||||
CONT_JMP;
|
||||
}
|
||||
CONT;
|
||||
JMP_JGT_K:
|
||||
if (DST > IMM) {
|
||||
insn += insn->off;
|
||||
CONT_JMP;
|
||||
}
|
||||
CONT;
|
||||
JMP_JGE_X:
|
||||
if (DST >= SRC) {
|
||||
insn += insn->off;
|
||||
CONT_JMP;
|
||||
}
|
||||
CONT;
|
||||
JMP_JGE_K:
|
||||
if (DST >= IMM) {
|
||||
insn += insn->off;
|
||||
CONT_JMP;
|
||||
}
|
||||
CONT;
|
||||
JMP_JSGT_X:
|
||||
if (((s64) DST) > ((s64) SRC)) {
|
||||
insn += insn->off;
|
||||
CONT_JMP;
|
||||
}
|
||||
CONT;
|
||||
JMP_JSGT_K:
|
||||
if (((s64) DST) > ((s64) IMM)) {
|
||||
insn += insn->off;
|
||||
CONT_JMP;
|
||||
}
|
||||
CONT;
|
||||
JMP_JSGE_X:
|
||||
if (((s64) DST) >= ((s64) SRC)) {
|
||||
insn += insn->off;
|
||||
CONT_JMP;
|
||||
}
|
||||
CONT;
|
||||
JMP_JSGE_K:
|
||||
if (((s64) DST) >= ((s64) IMM)) {
|
||||
insn += insn->off;
|
||||
CONT_JMP;
|
||||
}
|
||||
CONT;
|
||||
JMP_JSET_X:
|
||||
if (DST & SRC) {
|
||||
insn += insn->off;
|
||||
CONT_JMP;
|
||||
}
|
||||
CONT;
|
||||
JMP_JSET_K:
|
||||
if (DST & IMM) {
|
||||
insn += insn->off;
|
||||
CONT_JMP;
|
||||
}
|
||||
CONT;
|
||||
JMP_EXIT:
|
||||
return BPF_R0;
|
||||
|
||||
/* STX and ST and LDX*/
|
||||
#define LDST(SIZEOP, SIZE) \
|
||||
STX_MEM_##SIZEOP: \
|
||||
*(SIZE *)(unsigned long) (DST + insn->off) = SRC; \
|
||||
CONT; \
|
||||
ST_MEM_##SIZEOP: \
|
||||
*(SIZE *)(unsigned long) (DST + insn->off) = IMM; \
|
||||
CONT; \
|
||||
LDX_MEM_##SIZEOP: \
|
||||
DST = *(SIZE *)(unsigned long) (SRC + insn->off); \
|
||||
CONT;
|
||||
|
||||
LDST(B, u8)
|
||||
LDST(H, u16)
|
||||
LDST(W, u32)
|
||||
LDST(DW, u64)
|
||||
#undef LDST
|
||||
STX_XADD_W: /* lock xadd *(u32 *)(dst_reg + off16) += src_reg */
|
||||
atomic_add((u32) SRC, (atomic_t *)(unsigned long)
|
||||
(DST + insn->off));
|
||||
CONT;
|
||||
STX_XADD_DW: /* lock xadd *(u64 *)(dst_reg + off16) += src_reg */
|
||||
atomic64_add((u64) SRC, (atomic64_t *)(unsigned long)
|
||||
(DST + insn->off));
|
||||
CONT;
|
||||
LD_ABS_W: /* BPF_R0 = ntohl(*(u32 *) (skb->data + imm32)) */
|
||||
off = IMM;
|
||||
load_word:
|
||||
/* BPF_LD + BPD_ABS and BPF_LD + BPF_IND insns are
|
||||
* only appearing in the programs where ctx ==
|
||||
* skb. All programs keep 'ctx' in regs[BPF_REG_CTX]
|
||||
* == BPF_R6, bpf_convert_filter() saves it in BPF_R6,
|
||||
* internal BPF verifier will check that BPF_R6 ==
|
||||
* ctx.
|
||||
*
|
||||
* BPF_ABS and BPF_IND are wrappers of function calls,
|
||||
* so they scratch BPF_R1-BPF_R5 registers, preserve
|
||||
* BPF_R6-BPF_R9, and store return value into BPF_R0.
|
||||
*
|
||||
* Implicit input:
|
||||
* ctx == skb == BPF_R6 == CTX
|
||||
*
|
||||
* Explicit input:
|
||||
* SRC == any register
|
||||
* IMM == 32-bit immediate
|
||||
*
|
||||
* Output:
|
||||
* BPF_R0 - 8/16/32-bit skb data converted to cpu endianness
|
||||
*/
|
||||
|
||||
ptr = bpf_load_pointer((struct sk_buff *) (unsigned long) CTX, off, 4, &tmp);
|
||||
if (likely(ptr != NULL)) {
|
||||
BPF_R0 = get_unaligned_be32(ptr);
|
||||
CONT;
|
||||
}
|
||||
|
||||
return 0;
|
||||
LD_ABS_H: /* BPF_R0 = ntohs(*(u16 *) (skb->data + imm32)) */
|
||||
off = IMM;
|
||||
load_half:
|
||||
ptr = bpf_load_pointer((struct sk_buff *) (unsigned long) CTX, off, 2, &tmp);
|
||||
if (likely(ptr != NULL)) {
|
||||
BPF_R0 = get_unaligned_be16(ptr);
|
||||
CONT;
|
||||
}
|
||||
|
||||
return 0;
|
||||
LD_ABS_B: /* BPF_R0 = *(u8 *) (skb->data + imm32) */
|
||||
off = IMM;
|
||||
load_byte:
|
||||
ptr = bpf_load_pointer((struct sk_buff *) (unsigned long) CTX, off, 1, &tmp);
|
||||
if (likely(ptr != NULL)) {
|
||||
BPF_R0 = *(u8 *)ptr;
|
||||
CONT;
|
||||
}
|
||||
|
||||
return 0;
|
||||
LD_IND_W: /* BPF_R0 = ntohl(*(u32 *) (skb->data + src_reg + imm32)) */
|
||||
off = IMM + SRC;
|
||||
goto load_word;
|
||||
LD_IND_H: /* BPF_R0 = ntohs(*(u16 *) (skb->data + src_reg + imm32)) */
|
||||
off = IMM + SRC;
|
||||
goto load_half;
|
||||
LD_IND_B: /* BPF_R0 = *(u8 *) (skb->data + src_reg + imm32) */
|
||||
off = IMM + SRC;
|
||||
goto load_byte;
|
||||
|
||||
default_label:
|
||||
/* If we ever reach this, we have a bug somewhere. */
|
||||
WARN_RATELIMIT(1, "unknown opcode %02x\n", insn->code);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void __weak bpf_int_jit_compile(struct bpf_prog *prog)
|
||||
{
|
||||
}
|
||||
|
||||
/**
|
||||
* bpf_prog_select_runtime - select execution runtime for BPF program
|
||||
* @fp: bpf_prog populated with internal BPF program
|
||||
*
|
||||
* try to JIT internal BPF program, if JIT is not available select interpreter
|
||||
* BPF program will be executed via BPF_PROG_RUN() macro
|
||||
*/
|
||||
void bpf_prog_select_runtime(struct bpf_prog *fp)
|
||||
{
|
||||
fp->bpf_func = (void *) __bpf_prog_run;
|
||||
|
||||
/* Probe if internal BPF can be JITed */
|
||||
bpf_int_jit_compile(fp);
|
||||
/* Lock whole bpf_prog as read-only */
|
||||
bpf_prog_lock_ro(fp);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(bpf_prog_select_runtime);
|
||||
|
||||
static void bpf_prog_free_deferred(struct work_struct *work)
|
||||
{
|
||||
struct bpf_prog_aux *aux;
|
||||
|
||||
aux = container_of(work, struct bpf_prog_aux, work);
|
||||
bpf_jit_free(aux->prog);
|
||||
}
|
||||
|
||||
/* Free internal BPF program */
|
||||
void bpf_prog_free(struct bpf_prog *fp)
|
||||
{
|
||||
struct bpf_prog_aux *aux = fp->aux;
|
||||
|
||||
INIT_WORK(&aux->work, bpf_prog_free_deferred);
|
||||
aux->prog = fp;
|
||||
schedule_work(&aux->work);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(bpf_prog_free);
|
||||
|
||||
/* To execute LD_ABS/LD_IND instructions __bpf_prog_run() may call
|
||||
* skb_copy_bits(), so provide a weak definition of it for NET-less config.
|
||||
*/
|
||||
int __weak skb_copy_bits(const struct sk_buff *skb, int offset, void *to,
|
||||
int len)
|
||||
{
|
||||
return -EFAULT;
|
||||
}
|
606
kernel/bpf/syscall.c
Normal file
606
kernel/bpf/syscall.c
Normal file
|
@ -0,0 +1,606 @@
|
|||
/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of version 2 of the GNU General Public
|
||||
* License as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* General Public License for more details.
|
||||
*/
|
||||
#include <linux/bpf.h>
|
||||
#include <linux/syscalls.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/anon_inodes.h>
|
||||
#include <linux/file.h>
|
||||
#include <linux/license.h>
|
||||
#include <linux/filter.h>
|
||||
|
||||
static LIST_HEAD(bpf_map_types);
|
||||
|
||||
static struct bpf_map *find_and_alloc_map(union bpf_attr *attr)
|
||||
{
|
||||
struct bpf_map_type_list *tl;
|
||||
struct bpf_map *map;
|
||||
|
||||
list_for_each_entry(tl, &bpf_map_types, list_node) {
|
||||
if (tl->type == attr->map_type) {
|
||||
map = tl->ops->map_alloc(attr);
|
||||
if (IS_ERR(map))
|
||||
return map;
|
||||
map->ops = tl->ops;
|
||||
map->map_type = attr->map_type;
|
||||
return map;
|
||||
}
|
||||
}
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
/* boot time registration of different map implementations */
|
||||
void bpf_register_map_type(struct bpf_map_type_list *tl)
|
||||
{
|
||||
list_add(&tl->list_node, &bpf_map_types);
|
||||
}
|
||||
|
||||
/* called from workqueue */
|
||||
static void bpf_map_free_deferred(struct work_struct *work)
|
||||
{
|
||||
struct bpf_map *map = container_of(work, struct bpf_map, work);
|
||||
|
||||
/* implementation dependent freeing */
|
||||
map->ops->map_free(map);
|
||||
}
|
||||
|
||||
/* decrement map refcnt and schedule it for freeing via workqueue
|
||||
* (unrelying map implementation ops->map_free() might sleep)
|
||||
*/
|
||||
void bpf_map_put(struct bpf_map *map)
|
||||
{
|
||||
if (atomic_dec_and_test(&map->refcnt)) {
|
||||
INIT_WORK(&map->work, bpf_map_free_deferred);
|
||||
schedule_work(&map->work);
|
||||
}
|
||||
}
|
||||
|
||||
static int bpf_map_release(struct inode *inode, struct file *filp)
|
||||
{
|
||||
struct bpf_map *map = filp->private_data;
|
||||
|
||||
bpf_map_put(map);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct file_operations bpf_map_fops = {
|
||||
.release = bpf_map_release,
|
||||
};
|
||||
|
||||
/* helper macro to check that unused fields 'union bpf_attr' are zero */
|
||||
#define CHECK_ATTR(CMD) \
|
||||
memchr_inv((void *) &attr->CMD##_LAST_FIELD + \
|
||||
sizeof(attr->CMD##_LAST_FIELD), 0, \
|
||||
sizeof(*attr) - \
|
||||
offsetof(union bpf_attr, CMD##_LAST_FIELD) - \
|
||||
sizeof(attr->CMD##_LAST_FIELD)) != NULL
|
||||
|
||||
#define BPF_MAP_CREATE_LAST_FIELD max_entries
|
||||
/* called via syscall */
|
||||
static int map_create(union bpf_attr *attr)
|
||||
{
|
||||
struct bpf_map *map;
|
||||
int err;
|
||||
|
||||
err = CHECK_ATTR(BPF_MAP_CREATE);
|
||||
if (err)
|
||||
return -EINVAL;
|
||||
|
||||
/* find map type and init map: hashtable vs rbtree vs bloom vs ... */
|
||||
map = find_and_alloc_map(attr);
|
||||
if (IS_ERR(map))
|
||||
return PTR_ERR(map);
|
||||
|
||||
atomic_set(&map->refcnt, 1);
|
||||
|
||||
err = anon_inode_getfd("bpf-map", &bpf_map_fops, map, O_RDWR | O_CLOEXEC);
|
||||
|
||||
if (err < 0)
|
||||
/* failed to allocate fd */
|
||||
goto free_map;
|
||||
|
||||
return err;
|
||||
|
||||
free_map:
|
||||
map->ops->map_free(map);
|
||||
return err;
|
||||
}
|
||||
|
||||
/* if error is returned, fd is released.
|
||||
* On success caller should complete fd access with matching fdput()
|
||||
*/
|
||||
struct bpf_map *bpf_map_get(struct fd f)
|
||||
{
|
||||
struct bpf_map *map;
|
||||
|
||||
if (!f.file)
|
||||
return ERR_PTR(-EBADF);
|
||||
|
||||
if (f.file->f_op != &bpf_map_fops) {
|
||||
fdput(f);
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
map = f.file->private_data;
|
||||
|
||||
return map;
|
||||
}
|
||||
|
||||
/* helper to convert user pointers passed inside __aligned_u64 fields */
|
||||
static void __user *u64_to_ptr(__u64 val)
|
||||
{
|
||||
return (void __user *) (unsigned long) val;
|
||||
}
|
||||
|
||||
/* last field in 'union bpf_attr' used by this command */
|
||||
#define BPF_MAP_LOOKUP_ELEM_LAST_FIELD value
|
||||
|
||||
static int map_lookup_elem(union bpf_attr *attr)
|
||||
{
|
||||
void __user *ukey = u64_to_ptr(attr->key);
|
||||
void __user *uvalue = u64_to_ptr(attr->value);
|
||||
int ufd = attr->map_fd;
|
||||
struct fd f = fdget(ufd);
|
||||
struct bpf_map *map;
|
||||
void *key, *value;
|
||||
int err;
|
||||
|
||||
if (CHECK_ATTR(BPF_MAP_LOOKUP_ELEM))
|
||||
return -EINVAL;
|
||||
|
||||
map = bpf_map_get(f);
|
||||
if (IS_ERR(map))
|
||||
return PTR_ERR(map);
|
||||
|
||||
err = -ENOMEM;
|
||||
key = kmalloc(map->key_size, GFP_USER);
|
||||
if (!key)
|
||||
goto err_put;
|
||||
|
||||
err = -EFAULT;
|
||||
if (copy_from_user(key, ukey, map->key_size) != 0)
|
||||
goto free_key;
|
||||
|
||||
err = -ESRCH;
|
||||
rcu_read_lock();
|
||||
value = map->ops->map_lookup_elem(map, key);
|
||||
if (!value)
|
||||
goto err_unlock;
|
||||
|
||||
err = -EFAULT;
|
||||
if (copy_to_user(uvalue, value, map->value_size) != 0)
|
||||
goto err_unlock;
|
||||
|
||||
err = 0;
|
||||
|
||||
err_unlock:
|
||||
rcu_read_unlock();
|
||||
free_key:
|
||||
kfree(key);
|
||||
err_put:
|
||||
fdput(f);
|
||||
return err;
|
||||
}
|
||||
|
||||
#define BPF_MAP_UPDATE_ELEM_LAST_FIELD value
|
||||
|
||||
static int map_update_elem(union bpf_attr *attr)
|
||||
{
|
||||
void __user *ukey = u64_to_ptr(attr->key);
|
||||
void __user *uvalue = u64_to_ptr(attr->value);
|
||||
int ufd = attr->map_fd;
|
||||
struct fd f = fdget(ufd);
|
||||
struct bpf_map *map;
|
||||
void *key, *value;
|
||||
int err;
|
||||
|
||||
if (CHECK_ATTR(BPF_MAP_UPDATE_ELEM))
|
||||
return -EINVAL;
|
||||
|
||||
map = bpf_map_get(f);
|
||||
if (IS_ERR(map))
|
||||
return PTR_ERR(map);
|
||||
|
||||
err = -ENOMEM;
|
||||
key = kmalloc(map->key_size, GFP_USER);
|
||||
if (!key)
|
||||
goto err_put;
|
||||
|
||||
err = -EFAULT;
|
||||
if (copy_from_user(key, ukey, map->key_size) != 0)
|
||||
goto free_key;
|
||||
|
||||
err = -ENOMEM;
|
||||
value = kmalloc(map->value_size, GFP_USER);
|
||||
if (!value)
|
||||
goto free_key;
|
||||
|
||||
err = -EFAULT;
|
||||
if (copy_from_user(value, uvalue, map->value_size) != 0)
|
||||
goto free_value;
|
||||
|
||||
/* eBPF program that use maps are running under rcu_read_lock(),
|
||||
* therefore all map accessors rely on this fact, so do the same here
|
||||
*/
|
||||
rcu_read_lock();
|
||||
err = map->ops->map_update_elem(map, key, value);
|
||||
rcu_read_unlock();
|
||||
|
||||
free_value:
|
||||
kfree(value);
|
||||
free_key:
|
||||
kfree(key);
|
||||
err_put:
|
||||
fdput(f);
|
||||
return err;
|
||||
}
|
||||
|
||||
#define BPF_MAP_DELETE_ELEM_LAST_FIELD key
|
||||
|
||||
static int map_delete_elem(union bpf_attr *attr)
|
||||
{
|
||||
void __user *ukey = u64_to_ptr(attr->key);
|
||||
int ufd = attr->map_fd;
|
||||
struct fd f = fdget(ufd);
|
||||
struct bpf_map *map;
|
||||
void *key;
|
||||
int err;
|
||||
|
||||
if (CHECK_ATTR(BPF_MAP_DELETE_ELEM))
|
||||
return -EINVAL;
|
||||
|
||||
map = bpf_map_get(f);
|
||||
if (IS_ERR(map))
|
||||
return PTR_ERR(map);
|
||||
|
||||
err = -ENOMEM;
|
||||
key = kmalloc(map->key_size, GFP_USER);
|
||||
if (!key)
|
||||
goto err_put;
|
||||
|
||||
err = -EFAULT;
|
||||
if (copy_from_user(key, ukey, map->key_size) != 0)
|
||||
goto free_key;
|
||||
|
||||
rcu_read_lock();
|
||||
err = map->ops->map_delete_elem(map, key);
|
||||
rcu_read_unlock();
|
||||
|
||||
free_key:
|
||||
kfree(key);
|
||||
err_put:
|
||||
fdput(f);
|
||||
return err;
|
||||
}
|
||||
|
||||
/* last field in 'union bpf_attr' used by this command */
|
||||
#define BPF_MAP_GET_NEXT_KEY_LAST_FIELD next_key
|
||||
|
||||
static int map_get_next_key(union bpf_attr *attr)
|
||||
{
|
||||
void __user *ukey = u64_to_ptr(attr->key);
|
||||
void __user *unext_key = u64_to_ptr(attr->next_key);
|
||||
int ufd = attr->map_fd;
|
||||
struct fd f = fdget(ufd);
|
||||
struct bpf_map *map;
|
||||
void *key, *next_key;
|
||||
int err;
|
||||
|
||||
if (CHECK_ATTR(BPF_MAP_GET_NEXT_KEY))
|
||||
return -EINVAL;
|
||||
|
||||
map = bpf_map_get(f);
|
||||
if (IS_ERR(map))
|
||||
return PTR_ERR(map);
|
||||
|
||||
err = -ENOMEM;
|
||||
key = kmalloc(map->key_size, GFP_USER);
|
||||
if (!key)
|
||||
goto err_put;
|
||||
|
||||
err = -EFAULT;
|
||||
if (copy_from_user(key, ukey, map->key_size) != 0)
|
||||
goto free_key;
|
||||
|
||||
err = -ENOMEM;
|
||||
next_key = kmalloc(map->key_size, GFP_USER);
|
||||
if (!next_key)
|
||||
goto free_key;
|
||||
|
||||
rcu_read_lock();
|
||||
err = map->ops->map_get_next_key(map, key, next_key);
|
||||
rcu_read_unlock();
|
||||
if (err)
|
||||
goto free_next_key;
|
||||
|
||||
err = -EFAULT;
|
||||
if (copy_to_user(unext_key, next_key, map->key_size) != 0)
|
||||
goto free_next_key;
|
||||
|
||||
err = 0;
|
||||
|
||||
free_next_key:
|
||||
kfree(next_key);
|
||||
free_key:
|
||||
kfree(key);
|
||||
err_put:
|
||||
fdput(f);
|
||||
return err;
|
||||
}
|
||||
|
||||
static LIST_HEAD(bpf_prog_types);
|
||||
|
||||
static int find_prog_type(enum bpf_prog_type type, struct bpf_prog *prog)
|
||||
{
|
||||
struct bpf_prog_type_list *tl;
|
||||
|
||||
list_for_each_entry(tl, &bpf_prog_types, list_node) {
|
||||
if (tl->type == type) {
|
||||
prog->aux->ops = tl->ops;
|
||||
prog->aux->prog_type = type;
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
void bpf_register_prog_type(struct bpf_prog_type_list *tl)
|
||||
{
|
||||
list_add(&tl->list_node, &bpf_prog_types);
|
||||
}
|
||||
|
||||
/* fixup insn->imm field of bpf_call instructions:
|
||||
* if (insn->imm == BPF_FUNC_map_lookup_elem)
|
||||
* insn->imm = bpf_map_lookup_elem - __bpf_call_base;
|
||||
* else if (insn->imm == BPF_FUNC_map_update_elem)
|
||||
* insn->imm = bpf_map_update_elem - __bpf_call_base;
|
||||
* else ...
|
||||
*
|
||||
* this function is called after eBPF program passed verification
|
||||
*/
|
||||
static void fixup_bpf_calls(struct bpf_prog *prog)
|
||||
{
|
||||
const struct bpf_func_proto *fn;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < prog->len; i++) {
|
||||
struct bpf_insn *insn = &prog->insnsi[i];
|
||||
|
||||
if (insn->code == (BPF_JMP | BPF_CALL)) {
|
||||
/* we reach here when program has bpf_call instructions
|
||||
* and it passed bpf_check(), means that
|
||||
* ops->get_func_proto must have been supplied, check it
|
||||
*/
|
||||
BUG_ON(!prog->aux->ops->get_func_proto);
|
||||
|
||||
fn = prog->aux->ops->get_func_proto(insn->imm);
|
||||
/* all functions that have prototype and verifier allowed
|
||||
* programs to call them, must be real in-kernel functions
|
||||
*/
|
||||
BUG_ON(!fn->func);
|
||||
insn->imm = fn->func - __bpf_call_base;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* drop refcnt on maps used by eBPF program and free auxilary data */
|
||||
static void free_used_maps(struct bpf_prog_aux *aux)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < aux->used_map_cnt; i++)
|
||||
bpf_map_put(aux->used_maps[i]);
|
||||
|
||||
kfree(aux->used_maps);
|
||||
}
|
||||
|
||||
void bpf_prog_put(struct bpf_prog *prog)
|
||||
{
|
||||
if (atomic_dec_and_test(&prog->aux->refcnt)) {
|
||||
free_used_maps(prog->aux);
|
||||
bpf_prog_free(prog);
|
||||
}
|
||||
}
|
||||
|
||||
static int bpf_prog_release(struct inode *inode, struct file *filp)
|
||||
{
|
||||
struct bpf_prog *prog = filp->private_data;
|
||||
|
||||
bpf_prog_put(prog);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct file_operations bpf_prog_fops = {
|
||||
.release = bpf_prog_release,
|
||||
};
|
||||
|
||||
static struct bpf_prog *get_prog(struct fd f)
|
||||
{
|
||||
struct bpf_prog *prog;
|
||||
|
||||
if (!f.file)
|
||||
return ERR_PTR(-EBADF);
|
||||
|
||||
if (f.file->f_op != &bpf_prog_fops) {
|
||||
fdput(f);
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
prog = f.file->private_data;
|
||||
|
||||
return prog;
|
||||
}
|
||||
|
||||
/* called by sockets/tracing/seccomp before attaching program to an event
|
||||
* pairs with bpf_prog_put()
|
||||
*/
|
||||
struct bpf_prog *bpf_prog_get(u32 ufd)
|
||||
{
|
||||
struct fd f = fdget(ufd);
|
||||
struct bpf_prog *prog;
|
||||
|
||||
prog = get_prog(f);
|
||||
|
||||
if (IS_ERR(prog))
|
||||
return prog;
|
||||
|
||||
atomic_inc(&prog->aux->refcnt);
|
||||
fdput(f);
|
||||
return prog;
|
||||
}
|
||||
|
||||
/* last field in 'union bpf_attr' used by this command */
|
||||
#define BPF_PROG_LOAD_LAST_FIELD log_buf
|
||||
|
||||
static int bpf_prog_load(union bpf_attr *attr)
|
||||
{
|
||||
enum bpf_prog_type type = attr->prog_type;
|
||||
struct bpf_prog *prog;
|
||||
int err;
|
||||
char license[128];
|
||||
bool is_gpl;
|
||||
|
||||
if (CHECK_ATTR(BPF_PROG_LOAD))
|
||||
return -EINVAL;
|
||||
|
||||
/* copy eBPF program license from user space */
|
||||
if (strncpy_from_user(license, u64_to_ptr(attr->license),
|
||||
sizeof(license) - 1) < 0)
|
||||
return -EFAULT;
|
||||
license[sizeof(license) - 1] = 0;
|
||||
|
||||
/* eBPF programs must be GPL compatible to use GPL-ed functions */
|
||||
is_gpl = license_is_gpl_compatible(license);
|
||||
|
||||
if (attr->insn_cnt >= BPF_MAXINSNS)
|
||||
return -EINVAL;
|
||||
|
||||
/* plain bpf_prog allocation */
|
||||
prog = bpf_prog_alloc(bpf_prog_size(attr->insn_cnt), GFP_USER);
|
||||
if (!prog)
|
||||
return -ENOMEM;
|
||||
|
||||
prog->len = attr->insn_cnt;
|
||||
|
||||
err = -EFAULT;
|
||||
if (copy_from_user(prog->insns, u64_to_ptr(attr->insns),
|
||||
prog->len * sizeof(struct bpf_insn)) != 0)
|
||||
goto free_prog;
|
||||
|
||||
prog->orig_prog = NULL;
|
||||
prog->jited = false;
|
||||
|
||||
atomic_set(&prog->aux->refcnt, 1);
|
||||
prog->aux->is_gpl_compatible = is_gpl;
|
||||
|
||||
/* find program type: socket_filter vs tracing_filter */
|
||||
err = find_prog_type(type, prog);
|
||||
if (err < 0)
|
||||
goto free_prog;
|
||||
|
||||
/* run eBPF verifier */
|
||||
err = bpf_check(prog, attr);
|
||||
|
||||
if (err < 0)
|
||||
goto free_used_maps;
|
||||
|
||||
/* fixup BPF_CALL->imm field */
|
||||
fixup_bpf_calls(prog);
|
||||
|
||||
/* eBPF program is ready to be JITed */
|
||||
bpf_prog_select_runtime(prog);
|
||||
|
||||
err = anon_inode_getfd("bpf-prog", &bpf_prog_fops, prog, O_RDWR | O_CLOEXEC);
|
||||
|
||||
if (err < 0)
|
||||
/* failed to allocate fd */
|
||||
goto free_used_maps;
|
||||
|
||||
return err;
|
||||
|
||||
free_used_maps:
|
||||
free_used_maps(prog->aux);
|
||||
free_prog:
|
||||
bpf_prog_free(prog);
|
||||
return err;
|
||||
}
|
||||
|
||||
SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, size)
|
||||
{
|
||||
union bpf_attr attr = {};
|
||||
int err;
|
||||
|
||||
/* the syscall is limited to root temporarily. This restriction will be
|
||||
* lifted when security audit is clean. Note that eBPF+tracing must have
|
||||
* this restriction, since it may pass kernel data to user space
|
||||
*/
|
||||
if (!capable(CAP_SYS_ADMIN))
|
||||
return -EPERM;
|
||||
|
||||
if (!access_ok(VERIFY_READ, uattr, 1))
|
||||
return -EFAULT;
|
||||
|
||||
if (size > PAGE_SIZE) /* silly large */
|
||||
return -E2BIG;
|
||||
|
||||
/* If we're handed a bigger struct than we know of,
|
||||
* ensure all the unknown bits are 0 - i.e. new
|
||||
* user-space does not rely on any kernel feature
|
||||
* extensions we dont know about yet.
|
||||
*/
|
||||
if (size > sizeof(attr)) {
|
||||
unsigned char __user *addr;
|
||||
unsigned char __user *end;
|
||||
unsigned char val;
|
||||
|
||||
addr = (void __user *)uattr + sizeof(attr);
|
||||
end = (void __user *)uattr + size;
|
||||
|
||||
for (; addr < end; addr++) {
|
||||
err = get_user(val, addr);
|
||||
if (err)
|
||||
return err;
|
||||
if (val)
|
||||
return -E2BIG;
|
||||
}
|
||||
size = sizeof(attr);
|
||||
}
|
||||
|
||||
/* copy attributes from user space, may be less than sizeof(bpf_attr) */
|
||||
if (copy_from_user(&attr, uattr, size) != 0)
|
||||
return -EFAULT;
|
||||
|
||||
switch (cmd) {
|
||||
case BPF_MAP_CREATE:
|
||||
err = map_create(&attr);
|
||||
break;
|
||||
case BPF_MAP_LOOKUP_ELEM:
|
||||
err = map_lookup_elem(&attr);
|
||||
break;
|
||||
case BPF_MAP_UPDATE_ELEM:
|
||||
err = map_update_elem(&attr);
|
||||
break;
|
||||
case BPF_MAP_DELETE_ELEM:
|
||||
err = map_delete_elem(&attr);
|
||||
break;
|
||||
case BPF_MAP_GET_NEXT_KEY:
|
||||
err = map_get_next_key(&attr);
|
||||
break;
|
||||
case BPF_PROG_LOAD:
|
||||
err = bpf_prog_load(&attr);
|
||||
break;
|
||||
default:
|
||||
err = -EINVAL;
|
||||
break;
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
116
kernel/bpf/test_stub.c
Normal file
116
kernel/bpf/test_stub.c
Normal file
|
@ -0,0 +1,116 @@
|
|||
/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of version 2 of the GNU General Public
|
||||
* License as published by the Free Software Foundation.
|
||||
*/
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/bpf.h>
|
||||
|
||||
/* test stubs for BPF_MAP_TYPE_UNSPEC and for BPF_PROG_TYPE_UNSPEC
|
||||
* to be used by user space verifier testsuite
|
||||
*/
|
||||
struct bpf_context {
|
||||
u64 arg1;
|
||||
u64 arg2;
|
||||
};
|
||||
|
||||
static u64 test_func(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct bpf_func_proto test_funcs[] = {
|
||||
[BPF_FUNC_unspec] = {
|
||||
.func = test_func,
|
||||
.gpl_only = true,
|
||||
.ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL,
|
||||
.arg1_type = ARG_CONST_MAP_PTR,
|
||||
.arg2_type = ARG_PTR_TO_MAP_KEY,
|
||||
},
|
||||
};
|
||||
|
||||
static const struct bpf_func_proto *test_func_proto(enum bpf_func_id func_id)
|
||||
{
|
||||
if (func_id < 0 || func_id >= ARRAY_SIZE(test_funcs))
|
||||
return NULL;
|
||||
return &test_funcs[func_id];
|
||||
}
|
||||
|
||||
static const struct bpf_context_access {
|
||||
int size;
|
||||
enum bpf_access_type type;
|
||||
} test_ctx_access[] = {
|
||||
[offsetof(struct bpf_context, arg1)] = {
|
||||
FIELD_SIZEOF(struct bpf_context, arg1),
|
||||
BPF_READ
|
||||
},
|
||||
[offsetof(struct bpf_context, arg2)] = {
|
||||
FIELD_SIZEOF(struct bpf_context, arg2),
|
||||
BPF_READ
|
||||
},
|
||||
};
|
||||
|
||||
static bool test_is_valid_access(int off, int size, enum bpf_access_type type)
|
||||
{
|
||||
const struct bpf_context_access *access;
|
||||
|
||||
if (off < 0 || off >= ARRAY_SIZE(test_ctx_access))
|
||||
return false;
|
||||
|
||||
access = &test_ctx_access[off];
|
||||
if (access->size == size && (access->type & type))
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static struct bpf_verifier_ops test_ops = {
|
||||
.get_func_proto = test_func_proto,
|
||||
.is_valid_access = test_is_valid_access,
|
||||
};
|
||||
|
||||
static struct bpf_prog_type_list tl_prog = {
|
||||
.ops = &test_ops,
|
||||
.type = BPF_PROG_TYPE_UNSPEC,
|
||||
};
|
||||
|
||||
static struct bpf_map *test_map_alloc(union bpf_attr *attr)
|
||||
{
|
||||
struct bpf_map *map;
|
||||
|
||||
map = kzalloc(sizeof(*map), GFP_USER);
|
||||
if (!map)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
map->key_size = attr->key_size;
|
||||
map->value_size = attr->value_size;
|
||||
map->max_entries = attr->max_entries;
|
||||
return map;
|
||||
}
|
||||
|
||||
static void test_map_free(struct bpf_map *map)
|
||||
{
|
||||
kfree(map);
|
||||
}
|
||||
|
||||
static struct bpf_map_ops test_map_ops = {
|
||||
.map_alloc = test_map_alloc,
|
||||
.map_free = test_map_free,
|
||||
};
|
||||
|
||||
static struct bpf_map_type_list tl_map = {
|
||||
.ops = &test_map_ops,
|
||||
.type = BPF_MAP_TYPE_UNSPEC,
|
||||
};
|
||||
|
||||
static int __init register_test_ops(void)
|
||||
{
|
||||
bpf_register_map_type(&tl_map);
|
||||
bpf_register_prog_type(&tl_prog);
|
||||
return 0;
|
||||
}
|
||||
late_initcall(register_test_ops);
|
1927
kernel/bpf/verifier.c
Normal file
1927
kernel/bpf/verifier.c
Normal file
File diff suppressed because it is too large
Load diff
Loading…
Add table
Add a link
Reference in a new issue