Fixed MTP to work with TWRP

This commit is contained in:
awab228 2018-06-19 23:16:04 +02:00
commit f6dfaef42e
50820 changed files with 20846062 additions and 0 deletions

620
security/selinux/ss/avtab.c Normal file
View file

@ -0,0 +1,620 @@
/*
* Implementation of the access vector table type.
*
* Author : Stephen Smalley, <sds@epoch.ncsc.mil>
*/
/* Updated: Frank Mayer <mayerf@tresys.com> and Karl MacMillan <kmacmillan@tresys.com>
*
* Added conditional policy language extensions
*
* Copyright (C) 2003 Tresys Technology, LLC
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, version 2.
*
* Updated: Yuichi Nakamura <ynakam@hitachisoft.jp>
* Tuned number of hash slots for avtab to reduce memory usage
*/
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/errno.h>
#include "avtab.h"
#include "policydb.h"
static struct kmem_cache *avtab_node_cachep;
static struct kmem_cache *avtab_operation_cachep;
static inline int avtab_hash(struct avtab_key *keyp, u16 mask)
{
return ((keyp->target_class + (keyp->target_type << 2) +
(keyp->source_type << 9)) & mask);
}
static struct avtab_node*
avtab_insert_node(struct avtab *h, int hvalue,
struct avtab_node *prev, struct avtab_node *cur,
struct avtab_key *key, struct avtab_datum *datum)
{
struct avtab_node *newnode;
struct avtab_operation *ops;
newnode = kmem_cache_zalloc(avtab_node_cachep, GFP_KERNEL);
if (newnode == NULL)
return NULL;
newnode->key = *key;
if (key->specified & AVTAB_OP) {
ops = kmem_cache_zalloc(avtab_operation_cachep, GFP_KERNEL);
if (ops == NULL) {
kmem_cache_free(avtab_node_cachep, newnode);
return NULL;
}
*ops = *(datum->u.ops);
newnode->datum.u.ops = ops;
} else {
newnode->datum.u.data = datum->u.data;
}
if (prev) {
newnode->next = prev->next;
prev->next = newnode;
} else {
newnode->next = h->htable[hvalue];
h->htable[hvalue] = newnode;
}
h->nel++;
return newnode;
}
static int avtab_insert(struct avtab *h, struct avtab_key *key, struct avtab_datum *datum)
{
int hvalue;
struct avtab_node *prev, *cur, *newnode;
u16 specified = key->specified & ~(AVTAB_ENABLED|AVTAB_ENABLED_OLD);
if (!h || !h->htable)
return -EINVAL;
hvalue = avtab_hash(key, h->mask);
for (prev = NULL, cur = h->htable[hvalue];
cur;
prev = cur, cur = cur->next) {
if (key->source_type == cur->key.source_type &&
key->target_type == cur->key.target_type &&
key->target_class == cur->key.target_class &&
(specified & cur->key.specified)) {
if (specified & AVTAB_OPNUM)
break;
return -EEXIST;
}
if (key->source_type < cur->key.source_type)
break;
if (key->source_type == cur->key.source_type &&
key->target_type < cur->key.target_type)
break;
if (key->source_type == cur->key.source_type &&
key->target_type == cur->key.target_type &&
key->target_class < cur->key.target_class)
break;
}
newnode = avtab_insert_node(h, hvalue, prev, cur, key, datum);
if (!newnode)
return -ENOMEM;
return 0;
}
/* Unlike avtab_insert(), this function allow multiple insertions of the same
* key/specified mask into the table, as needed by the conditional avtab.
* It also returns a pointer to the node inserted.
*/
struct avtab_node *
avtab_insert_nonunique(struct avtab *h, struct avtab_key *key, struct avtab_datum *datum)
{
int hvalue;
struct avtab_node *prev, *cur;
u16 specified = key->specified & ~(AVTAB_ENABLED|AVTAB_ENABLED_OLD);
if (!h || !h->htable)
return NULL;
hvalue = avtab_hash(key, h->mask);
for (prev = NULL, cur = h->htable[hvalue];
cur;
prev = cur, cur = cur->next) {
if (key->source_type == cur->key.source_type &&
key->target_type == cur->key.target_type &&
key->target_class == cur->key.target_class &&
(specified & cur->key.specified))
break;
if (key->source_type < cur->key.source_type)
break;
if (key->source_type == cur->key.source_type &&
key->target_type < cur->key.target_type)
break;
if (key->source_type == cur->key.source_type &&
key->target_type == cur->key.target_type &&
key->target_class < cur->key.target_class)
break;
}
return avtab_insert_node(h, hvalue, prev, cur, key, datum);
}
struct avtab_datum *avtab_search(struct avtab *h, struct avtab_key *key)
{
int hvalue;
struct avtab_node *cur;
u16 specified = key->specified & ~(AVTAB_ENABLED|AVTAB_ENABLED_OLD);
if (!h || !h->htable)
return NULL;
hvalue = avtab_hash(key, h->mask);
for (cur = h->htable[hvalue]; cur; cur = cur->next) {
if (key->source_type == cur->key.source_type &&
key->target_type == cur->key.target_type &&
key->target_class == cur->key.target_class &&
(specified & cur->key.specified))
return &cur->datum;
if (key->source_type < cur->key.source_type)
break;
if (key->source_type == cur->key.source_type &&
key->target_type < cur->key.target_type)
break;
if (key->source_type == cur->key.source_type &&
key->target_type == cur->key.target_type &&
key->target_class < cur->key.target_class)
break;
}
return NULL;
}
/* This search function returns a node pointer, and can be used in
* conjunction with avtab_search_next_node()
*/
struct avtab_node*
avtab_search_node(struct avtab *h, struct avtab_key *key)
{
int hvalue;
struct avtab_node *cur;
u16 specified = key->specified & ~(AVTAB_ENABLED|AVTAB_ENABLED_OLD);
if (!h || !h->htable)
return NULL;
hvalue = avtab_hash(key, h->mask);
for (cur = h->htable[hvalue]; cur; cur = cur->next) {
if (key->source_type == cur->key.source_type &&
key->target_type == cur->key.target_type &&
key->target_class == cur->key.target_class &&
(specified & cur->key.specified))
return cur;
if (key->source_type < cur->key.source_type)
break;
if (key->source_type == cur->key.source_type &&
key->target_type < cur->key.target_type)
break;
if (key->source_type == cur->key.source_type &&
key->target_type == cur->key.target_type &&
key->target_class < cur->key.target_class)
break;
}
return NULL;
}
struct avtab_node*
avtab_search_node_next(struct avtab_node *node, int specified)
{
struct avtab_node *cur;
if (!node)
return NULL;
specified &= ~(AVTAB_ENABLED|AVTAB_ENABLED_OLD);
for (cur = node->next; cur; cur = cur->next) {
if (node->key.source_type == cur->key.source_type &&
node->key.target_type == cur->key.target_type &&
node->key.target_class == cur->key.target_class &&
(specified & cur->key.specified))
return cur;
if (node->key.source_type < cur->key.source_type)
break;
if (node->key.source_type == cur->key.source_type &&
node->key.target_type < cur->key.target_type)
break;
if (node->key.source_type == cur->key.source_type &&
node->key.target_type == cur->key.target_type &&
node->key.target_class < cur->key.target_class)
break;
}
return NULL;
}
void avtab_destroy(struct avtab *h)
{
int i;
struct avtab_node *cur, *temp;
if (!h || !h->htable)
return;
for (i = 0; i < h->nslot; i++) {
cur = h->htable[i];
while (cur) {
temp = cur;
cur = cur->next;
if (temp->key.specified & AVTAB_OP)
kmem_cache_free(avtab_operation_cachep,
temp->datum.u.ops);
kmem_cache_free(avtab_node_cachep, temp);
}
h->htable[i] = NULL;
}
kfree(h->htable);
h->htable = NULL;
h->nslot = 0;
h->mask = 0;
}
int avtab_init(struct avtab *h)
{
h->htable = NULL;
h->nel = 0;
return 0;
}
int avtab_alloc(struct avtab *h, u32 nrules)
{
u16 mask = 0;
u32 shift = 0;
u32 work = nrules;
u32 nslot = 0;
if (nrules == 0)
goto avtab_alloc_out;
while (work) {
work = work >> 1;
shift++;
}
if (shift > 2)
shift = shift - 2;
nslot = 1 << shift;
if (nslot > MAX_AVTAB_HASH_BUCKETS)
nslot = MAX_AVTAB_HASH_BUCKETS;
mask = nslot - 1;
h->htable = kcalloc(nslot, sizeof(*(h->htable)), GFP_KERNEL);
if (!h->htable)
return -ENOMEM;
avtab_alloc_out:
h->nel = 0;
h->nslot = nslot;
h->mask = mask;
printk(KERN_DEBUG "SELinux: %d avtab hash slots, %d rules.\n",
h->nslot, nrules);
return 0;
}
void avtab_hash_eval(struct avtab *h, char *tag)
{
int i, chain_len, slots_used, max_chain_len;
unsigned long long chain2_len_sum;
struct avtab_node *cur;
slots_used = 0;
max_chain_len = 0;
chain2_len_sum = 0;
for (i = 0; i < h->nslot; i++) {
cur = h->htable[i];
if (cur) {
slots_used++;
chain_len = 0;
while (cur) {
chain_len++;
cur = cur->next;
}
if (chain_len > max_chain_len)
max_chain_len = chain_len;
chain2_len_sum += chain_len * chain_len;
}
}
printk(KERN_DEBUG "SELinux: %s: %d entries and %d/%d buckets used, "
"longest chain length %d sum of chain length^2 %llu\n",
tag, h->nel, slots_used, h->nslot, max_chain_len,
chain2_len_sum);
}
static uint16_t spec_order[] = {
AVTAB_ALLOWED,
AVTAB_AUDITDENY,
AVTAB_AUDITALLOW,
AVTAB_TRANSITION,
AVTAB_CHANGE,
AVTAB_MEMBER,
AVTAB_OPNUM_ALLOWED,
AVTAB_OPNUM_AUDITALLOW,
AVTAB_OPNUM_DONTAUDIT,
AVTAB_OPTYPE_ALLOWED,
AVTAB_OPTYPE_AUDITALLOW,
AVTAB_OPTYPE_DONTAUDIT
};
int avtab_read_item(struct avtab *a, void *fp, struct policydb *pol,
int (*insertf)(struct avtab *a, struct avtab_key *k,
struct avtab_datum *d, void *p),
void *p)
{
__le16 buf16[4];
u16 enabled;
u32 items, items2, val, vers = pol->policyvers;
struct avtab_key key;
struct avtab_datum datum;
struct avtab_operation ops;
__le32 buf32[ARRAY_SIZE(ops.op.perms)];
int i, rc;
unsigned set;
memset(&key, 0, sizeof(struct avtab_key));
memset(&datum, 0, sizeof(struct avtab_datum));
if (vers < POLICYDB_VERSION_AVTAB) {
rc = next_entry(buf32, fp, sizeof(u32));
if (rc) {
printk(KERN_ERR "SELinux: avtab: truncated entry\n");
return rc;
}
items2 = le32_to_cpu(buf32[0]);
if (items2 > ARRAY_SIZE(buf32)) {
printk(KERN_ERR "SELinux: avtab: entry overflow\n");
return -EINVAL;
}
rc = next_entry(buf32, fp, sizeof(u32)*items2);
if (rc) {
printk(KERN_ERR "SELinux: avtab: truncated entry\n");
return rc;
}
items = 0;
val = le32_to_cpu(buf32[items++]);
key.source_type = (u16)val;
if (key.source_type != val) {
printk(KERN_ERR "SELinux: avtab: truncated source type\n");
return -EINVAL;
}
val = le32_to_cpu(buf32[items++]);
key.target_type = (u16)val;
if (key.target_type != val) {
printk(KERN_ERR "SELinux: avtab: truncated target type\n");
return -EINVAL;
}
val = le32_to_cpu(buf32[items++]);
key.target_class = (u16)val;
if (key.target_class != val) {
printk(KERN_ERR "SELinux: avtab: truncated target class\n");
return -EINVAL;
}
val = le32_to_cpu(buf32[items++]);
enabled = (val & AVTAB_ENABLED_OLD) ? AVTAB_ENABLED : 0;
if (!(val & (AVTAB_AV | AVTAB_TYPE))) {
printk(KERN_ERR "SELinux: avtab: null entry\n");
return -EINVAL;
}
if ((val & AVTAB_AV) &&
(val & AVTAB_TYPE)) {
printk(KERN_ERR "SELinux: avtab: entry has both access vectors and types\n");
return -EINVAL;
}
if (val & AVTAB_OP) {
printk(KERN_ERR "SELinux: avtab: entry has operations\n");
return -EINVAL;
}
for (i = 0; i < ARRAY_SIZE(spec_order); i++) {
if (val & spec_order[i]) {
key.specified = spec_order[i] | enabled;
datum.u.data = le32_to_cpu(buf32[items++]);
rc = insertf(a, &key, &datum, p);
if (rc)
return rc;
}
}
if (items != items2) {
printk(KERN_ERR "SELinux: avtab: entry only had %d items, expected %d\n", items2, items);
return -EINVAL;
}
return 0;
}
rc = next_entry(buf16, fp, sizeof(u16)*4);
if (rc) {
printk(KERN_ERR "SELinux: avtab: truncated entry\n");
return rc;
}
items = 0;
key.source_type = le16_to_cpu(buf16[items++]);
key.target_type = le16_to_cpu(buf16[items++]);
key.target_class = le16_to_cpu(buf16[items++]);
key.specified = le16_to_cpu(buf16[items++]);
if (!policydb_type_isvalid(pol, key.source_type) ||
!policydb_type_isvalid(pol, key.target_type) ||
!policydb_class_isvalid(pol, key.target_class)) {
printk(KERN_ERR "SELinux: avtab: invalid type or class\n");
return -EINVAL;
}
set = 0;
for (i = 0; i < ARRAY_SIZE(spec_order); i++) {
if (key.specified & spec_order[i])
set++;
}
if (!set || set > 1) {
printk(KERN_ERR "SELinux: avtab: more than one specifier\n");
return -EINVAL;
}
if ((vers < POLICYDB_VERSION_IOCTL_OPERATIONS)
|| !(key.specified & AVTAB_OP)) {
rc = next_entry(buf32, fp, sizeof(u32));
if (rc) {
printk(KERN_ERR "SELinux: avtab: truncated entry\n");
return rc;
}
datum.u.data = le32_to_cpu(*buf32);
} else {
memset(&ops, 0, sizeof(struct avtab_operation));
rc = next_entry(&ops.type, fp, sizeof(u8));
if (rc) {
printk(KERN_ERR "SELinux: avtab: truncated entry\n");
return rc;
}
rc = next_entry(buf32, fp, sizeof(u32)*ARRAY_SIZE(ops.op.perms));
if (rc) {
printk(KERN_ERR "SELinux: avtab: truncated entry\n");
return rc;
}
for (i = 0; i < ARRAY_SIZE(ops.op.perms); i++)
ops.op.perms[i] = le32_to_cpu(buf32[i]);
datum.u.ops = &ops;
}
if ((key.specified & AVTAB_TYPE) &&
!policydb_type_isvalid(pol, datum.u.data)) {
printk(KERN_ERR "SELinux: avtab: invalid type\n");
return -EINVAL;
}
return insertf(a, &key, &datum, p);
}
static int avtab_insertf(struct avtab *a, struct avtab_key *k,
struct avtab_datum *d, void *p)
{
return avtab_insert(a, k, d);
}
int avtab_read(struct avtab *a, void *fp, struct policydb *pol)
{
int rc;
__le32 buf[1];
u32 nel, i;
rc = next_entry(buf, fp, sizeof(u32));
if (rc < 0) {
printk(KERN_ERR "SELinux: avtab: truncated table\n");
goto bad;
}
nel = le32_to_cpu(buf[0]);
if (!nel) {
printk(KERN_ERR "SELinux: avtab: table is empty\n");
rc = -EINVAL;
goto bad;
}
rc = avtab_alloc(a, nel);
if (rc)
goto bad;
for (i = 0; i < nel; i++) {
rc = avtab_read_item(a, fp, pol, avtab_insertf, NULL);
if (rc) {
if (rc == -ENOMEM)
printk(KERN_ERR "SELinux: avtab: out of memory\n");
else if (rc == -EEXIST)
printk(KERN_ERR "SELinux: avtab: duplicate entry\n");
goto bad;
}
}
rc = 0;
out:
return rc;
bad:
avtab_destroy(a);
goto out;
}
int avtab_write_item(struct policydb *p, struct avtab_node *cur, void *fp)
{
__le16 buf16[4];
__le32 buf32[ARRAY_SIZE(cur->datum.u.ops->op.perms)];
int rc;
unsigned int i;
buf16[0] = cpu_to_le16(cur->key.source_type);
buf16[1] = cpu_to_le16(cur->key.target_type);
buf16[2] = cpu_to_le16(cur->key.target_class);
buf16[3] = cpu_to_le16(cur->key.specified);
rc = put_entry(buf16, sizeof(u16), 4, fp);
if (rc)
return rc;
if (cur->key.specified & AVTAB_OP) {
rc = put_entry(&cur->datum.u.ops->type, sizeof(u8), 1, fp);
if (rc)
return rc;
for (i = 0; i < ARRAY_SIZE(cur->datum.u.ops->op.perms); i++)
buf32[i] = cpu_to_le32(cur->datum.u.ops->op.perms[i]);
rc = put_entry(buf32, sizeof(u32),
ARRAY_SIZE(cur->datum.u.ops->op.perms), fp);
} else {
buf32[0] = cpu_to_le32(cur->datum.u.data);
rc = put_entry(buf32, sizeof(u32), 1, fp);
}
if (rc)
return rc;
return 0;
}
int avtab_write(struct policydb *p, struct avtab *a, void *fp)
{
unsigned int i;
int rc = 0;
struct avtab_node *cur;
__le32 buf[1];
buf[0] = cpu_to_le32(a->nel);
rc = put_entry(buf, sizeof(u32), 1, fp);
if (rc)
return rc;
for (i = 0; i < a->nslot; i++) {
for (cur = a->htable[i]; cur; cur = cur->next) {
rc = avtab_write_item(p, cur, fp);
if (rc)
return rc;
}
}
return rc;
}
void avtab_cache_init(void)
{
avtab_node_cachep = kmem_cache_create("avtab_node",
sizeof(struct avtab_node),
0, SLAB_PANIC, NULL);
avtab_operation_cachep = kmem_cache_create("avtab_operation",
sizeof(struct avtab_operation),
0, SLAB_PANIC, NULL);
}
void avtab_cache_destroy(void)
{
kmem_cache_destroy(avtab_node_cachep);
kmem_cache_destroy(avtab_operation_cachep);
}

114
security/selinux/ss/avtab.h Normal file
View file

@ -0,0 +1,114 @@
/*
* An access vector table (avtab) is a hash table
* of access vectors and transition types indexed
* by a type pair and a class. An access vector
* table is used to represent the type enforcement
* tables.
*
* Author : Stephen Smalley, <sds@epoch.ncsc.mil>
*/
/* Updated: Frank Mayer <mayerf@tresys.com> and Karl MacMillan <kmacmillan@tresys.com>
*
* Added conditional policy language extensions
*
* Copyright (C) 2003 Tresys Technology, LLC
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, version 2.
*
* Updated: Yuichi Nakamura <ynakam@hitachisoft.jp>
* Tuned number of hash slots for avtab to reduce memory usage
*/
#ifndef _SS_AVTAB_H_
#define _SS_AVTAB_H_
#include "security.h"
struct avtab_key {
u16 source_type; /* source type */
u16 target_type; /* target type */
u16 target_class; /* target object class */
#define AVTAB_ALLOWED 0x0001
#define AVTAB_AUDITALLOW 0x0002
#define AVTAB_AUDITDENY 0x0004
#define AVTAB_AV (AVTAB_ALLOWED | AVTAB_AUDITALLOW | AVTAB_AUDITDENY)
#define AVTAB_TRANSITION 0x0010
#define AVTAB_MEMBER 0x0020
#define AVTAB_CHANGE 0x0040
#define AVTAB_TYPE (AVTAB_TRANSITION | AVTAB_MEMBER | AVTAB_CHANGE)
#define AVTAB_OPNUM_ALLOWED 0x0100
#define AVTAB_OPNUM_AUDITALLOW 0x0200
#define AVTAB_OPNUM_DONTAUDIT 0x0400
#define AVTAB_OPNUM (AVTAB_OPNUM_ALLOWED | \
AVTAB_OPNUM_AUDITALLOW | \
AVTAB_OPNUM_DONTAUDIT)
#define AVTAB_OPTYPE_ALLOWED 0x1000
#define AVTAB_OPTYPE_AUDITALLOW 0x2000
#define AVTAB_OPTYPE_DONTAUDIT 0x4000
#define AVTAB_OPTYPE (AVTAB_OPTYPE_ALLOWED | \
AVTAB_OPTYPE_AUDITALLOW | \
AVTAB_OPTYPE_DONTAUDIT)
#define AVTAB_OP (AVTAB_OPNUM | AVTAB_OPTYPE)
#define AVTAB_ENABLED_OLD 0x80000000 /* reserved for used in cond_avtab */
#define AVTAB_ENABLED 0x8000 /* reserved for used in cond_avtab */
u16 specified; /* what field is specified */
};
struct avtab_operation {
u8 type;
struct operation_perm op;
};
struct avtab_datum {
union {
u32 data; /* access vector or type value */
struct avtab_operation *ops; /* ioctl operations */
} u;
};
struct avtab_node {
struct avtab_key key;
struct avtab_datum datum;
struct avtab_node *next;
};
struct avtab {
struct avtab_node **htable;
u32 nel; /* number of elements */
u32 nslot; /* number of hash slots */
u16 mask; /* mask to compute hash func */
};
int avtab_init(struct avtab *);
int avtab_alloc(struct avtab *, u32);
struct avtab_datum *avtab_search(struct avtab *h, struct avtab_key *k);
void avtab_destroy(struct avtab *h);
void avtab_hash_eval(struct avtab *h, char *tag);
struct policydb;
int avtab_read_item(struct avtab *a, void *fp, struct policydb *pol,
int (*insert)(struct avtab *a, struct avtab_key *k,
struct avtab_datum *d, void *p),
void *p);
int avtab_read(struct avtab *a, void *fp, struct policydb *pol);
int avtab_write_item(struct policydb *p, struct avtab_node *cur, void *fp);
int avtab_write(struct policydb *p, struct avtab *a, void *fp);
struct avtab_node *avtab_insert_nonunique(struct avtab *h, struct avtab_key *key,
struct avtab_datum *datum);
struct avtab_node *avtab_search_node(struct avtab *h, struct avtab_key *key);
struct avtab_node *avtab_search_node_next(struct avtab_node *node, int specified);
void avtab_cache_init(void);
void avtab_cache_destroy(void);
#define MAX_AVTAB_HASH_BITS 11
#define MAX_AVTAB_HASH_BUCKETS (1 << MAX_AVTAB_HASH_BITS)
#endif /* _SS_AVTAB_H_ */

View file

@ -0,0 +1,665 @@
/* Authors: Karl MacMillan <kmacmillan@tresys.com>
* Frank Mayer <mayerf@tresys.com>
*
* Copyright (C) 2003 - 2004 Tresys Technology, LLC
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, version 2.
*/
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
#include "security.h"
#include "conditional.h"
#include "services.h"
/*
* cond_evaluate_expr evaluates a conditional expr
* in reverse polish notation. It returns true (1), false (0),
* or undefined (-1). Undefined occurs when the expression
* exceeds the stack depth of COND_EXPR_MAXDEPTH.
*/
static int cond_evaluate_expr(struct policydb *p, struct cond_expr *expr)
{
struct cond_expr *cur;
int s[COND_EXPR_MAXDEPTH];
int sp = -1;
for (cur = expr; cur; cur = cur->next) {
switch (cur->expr_type) {
case COND_BOOL:
if (sp == (COND_EXPR_MAXDEPTH - 1))
return -1;
sp++;
s[sp] = p->bool_val_to_struct[cur->bool - 1]->state;
break;
case COND_NOT:
if (sp < 0)
return -1;
s[sp] = !s[sp];
break;
case COND_OR:
if (sp < 1)
return -1;
sp--;
s[sp] |= s[sp + 1];
break;
case COND_AND:
if (sp < 1)
return -1;
sp--;
s[sp] &= s[sp + 1];
break;
case COND_XOR:
if (sp < 1)
return -1;
sp--;
s[sp] ^= s[sp + 1];
break;
case COND_EQ:
if (sp < 1)
return -1;
sp--;
s[sp] = (s[sp] == s[sp + 1]);
break;
case COND_NEQ:
if (sp < 1)
return -1;
sp--;
s[sp] = (s[sp] != s[sp + 1]);
break;
default:
return -1;
}
}
return s[0];
}
/*
* evaluate_cond_node evaluates the conditional stored in
* a struct cond_node and if the result is different than the
* current state of the node it sets the rules in the true/false
* list appropriately. If the result of the expression is undefined
* all of the rules are disabled for safety.
*/
int evaluate_cond_node(struct policydb *p, struct cond_node *node)
{
int new_state;
struct cond_av_list *cur;
new_state = cond_evaluate_expr(p, node->expr);
if (new_state != node->cur_state) {
node->cur_state = new_state;
if (new_state == -1)
printk(KERN_ERR "SELinux: expression result was undefined - disabling all rules.\n");
/* turn the rules on or off */
for (cur = node->true_list; cur; cur = cur->next) {
if (new_state <= 0)
cur->node->key.specified &= ~AVTAB_ENABLED;
else
cur->node->key.specified |= AVTAB_ENABLED;
}
for (cur = node->false_list; cur; cur = cur->next) {
/* -1 or 1 */
if (new_state)
cur->node->key.specified &= ~AVTAB_ENABLED;
else
cur->node->key.specified |= AVTAB_ENABLED;
}
}
return 0;
}
int cond_policydb_init(struct policydb *p)
{
int rc;
p->bool_val_to_struct = NULL;
p->cond_list = NULL;
rc = avtab_init(&p->te_cond_avtab);
if (rc)
return rc;
return 0;
}
static void cond_av_list_destroy(struct cond_av_list *list)
{
struct cond_av_list *cur, *next;
for (cur = list; cur; cur = next) {
next = cur->next;
/* the avtab_ptr_t node is destroy by the avtab */
kfree(cur);
}
}
static void cond_node_destroy(struct cond_node *node)
{
struct cond_expr *cur_expr, *next_expr;
for (cur_expr = node->expr; cur_expr; cur_expr = next_expr) {
next_expr = cur_expr->next;
kfree(cur_expr);
}
cond_av_list_destroy(node->true_list);
cond_av_list_destroy(node->false_list);
kfree(node);
}
static void cond_list_destroy(struct cond_node *list)
{
struct cond_node *next, *cur;
if (list == NULL)
return;
for (cur = list; cur; cur = next) {
next = cur->next;
cond_node_destroy(cur);
}
}
void cond_policydb_destroy(struct policydb *p)
{
kfree(p->bool_val_to_struct);
avtab_destroy(&p->te_cond_avtab);
cond_list_destroy(p->cond_list);
}
int cond_init_bool_indexes(struct policydb *p)
{
kfree(p->bool_val_to_struct);
p->bool_val_to_struct =
kmalloc(p->p_bools.nprim * sizeof(struct cond_bool_datum *), GFP_KERNEL);
if (!p->bool_val_to_struct)
return -ENOMEM;
return 0;
}
int cond_destroy_bool(void *key, void *datum, void *p)
{
kfree(key);
kfree(datum);
return 0;
}
int cond_index_bool(void *key, void *datum, void *datap)
{
struct policydb *p;
struct cond_bool_datum *booldatum;
struct flex_array *fa;
booldatum = datum;
p = datap;
if (!booldatum->value || booldatum->value > p->p_bools.nprim)
return -EINVAL;
fa = p->sym_val_to_name[SYM_BOOLS];
if (flex_array_put_ptr(fa, booldatum->value - 1, key,
GFP_KERNEL | __GFP_ZERO))
BUG();
p->bool_val_to_struct[booldatum->value - 1] = booldatum;
return 0;
}
static int bool_isvalid(struct cond_bool_datum *b)
{
if (!(b->state == 0 || b->state == 1))
return 0;
return 1;
}
int cond_read_bool(struct policydb *p, struct hashtab *h, void *fp)
{
char *key = NULL;
struct cond_bool_datum *booldatum;
__le32 buf[3];
u32 len;
int rc;
booldatum = kzalloc(sizeof(struct cond_bool_datum), GFP_KERNEL);
if (!booldatum)
return -ENOMEM;
rc = next_entry(buf, fp, sizeof buf);
if (rc)
goto err;
booldatum->value = le32_to_cpu(buf[0]);
booldatum->state = le32_to_cpu(buf[1]);
rc = -EINVAL;
if (!bool_isvalid(booldatum))
goto err;
len = le32_to_cpu(buf[2]);
rc = -ENOMEM;
key = kmalloc(len + 1, GFP_KERNEL);
if (!key)
goto err;
rc = next_entry(key, fp, len);
if (rc)
goto err;
key[len] = '\0';
rc = hashtab_insert(h, key, booldatum);
if (rc)
goto err;
return 0;
err:
cond_destroy_bool(key, booldatum, NULL);
return rc;
}
struct cond_insertf_data {
struct policydb *p;
struct cond_av_list *other;
struct cond_av_list *head;
struct cond_av_list *tail;
};
static int cond_insertf(struct avtab *a, struct avtab_key *k, struct avtab_datum *d, void *ptr)
{
struct cond_insertf_data *data = ptr;
struct policydb *p = data->p;
struct cond_av_list *other = data->other, *list, *cur;
struct avtab_node *node_ptr;
u8 found;
int rc = -EINVAL;
/*
* For type rules we have to make certain there aren't any
* conflicting rules by searching the te_avtab and the
* cond_te_avtab.
*/
if (k->specified & AVTAB_TYPE) {
if (avtab_search(&p->te_avtab, k)) {
printk(KERN_ERR "SELinux: type rule already exists outside of a conditional.\n");
goto err;
}
/*
* If we are reading the false list other will be a pointer to
* the true list. We can have duplicate entries if there is only
* 1 other entry and it is in our true list.
*
* If we are reading the true list (other == NULL) there shouldn't
* be any other entries.
*/
if (other) {
node_ptr = avtab_search_node(&p->te_cond_avtab, k);
if (node_ptr) {
if (avtab_search_node_next(node_ptr, k->specified)) {
printk(KERN_ERR "SELinux: too many conflicting type rules.\n");
goto err;
}
found = 0;
for (cur = other; cur; cur = cur->next) {
if (cur->node == node_ptr) {
found = 1;
break;
}
}
if (!found) {
printk(KERN_ERR "SELinux: conflicting type rules.\n");
goto err;
}
}
} else {
if (avtab_search(&p->te_cond_avtab, k)) {
printk(KERN_ERR "SELinux: conflicting type rules when adding type rule for true.\n");
goto err;
}
}
}
node_ptr = avtab_insert_nonunique(&p->te_cond_avtab, k, d);
if (!node_ptr) {
printk(KERN_ERR "SELinux: could not insert rule.\n");
rc = -ENOMEM;
goto err;
}
list = kzalloc(sizeof(struct cond_av_list), GFP_KERNEL);
if (!list) {
rc = -ENOMEM;
goto err;
}
list->node = node_ptr;
if (!data->head)
data->head = list;
else
data->tail->next = list;
data->tail = list;
return 0;
err:
cond_av_list_destroy(data->head);
data->head = NULL;
return rc;
}
static int cond_read_av_list(struct policydb *p, void *fp, struct cond_av_list **ret_list, struct cond_av_list *other)
{
int i, rc;
__le32 buf[1];
u32 len;
struct cond_insertf_data data;
*ret_list = NULL;
len = 0;
rc = next_entry(buf, fp, sizeof(u32));
if (rc)
return rc;
len = le32_to_cpu(buf[0]);
if (len == 0)
return 0;
data.p = p;
data.other = other;
data.head = NULL;
data.tail = NULL;
for (i = 0; i < len; i++) {
rc = avtab_read_item(&p->te_cond_avtab, fp, p, cond_insertf,
&data);
if (rc)
return rc;
}
*ret_list = data.head;
return 0;
}
static int expr_isvalid(struct policydb *p, struct cond_expr *expr)
{
if (expr->expr_type <= 0 || expr->expr_type > COND_LAST) {
printk(KERN_ERR "SELinux: conditional expressions uses unknown operator.\n");
return 0;
}
if (expr->bool > p->p_bools.nprim) {
printk(KERN_ERR "SELinux: conditional expressions uses unknown bool.\n");
return 0;
}
return 1;
}
static int cond_read_node(struct policydb *p, struct cond_node *node, void *fp)
{
__le32 buf[2];
u32 len, i;
int rc;
struct cond_expr *expr = NULL, *last = NULL;
rc = next_entry(buf, fp, sizeof(u32) * 2);
if (rc)
goto err;
node->cur_state = le32_to_cpu(buf[0]);
/* expr */
len = le32_to_cpu(buf[1]);
for (i = 0; i < len; i++) {
rc = next_entry(buf, fp, sizeof(u32) * 2);
if (rc)
goto err;
rc = -ENOMEM;
expr = kzalloc(sizeof(struct cond_expr), GFP_KERNEL);
if (!expr)
goto err;
expr->expr_type = le32_to_cpu(buf[0]);
expr->bool = le32_to_cpu(buf[1]);
if (!expr_isvalid(p, expr)) {
rc = -EINVAL;
kfree(expr);
goto err;
}
if (i == 0)
node->expr = expr;
else
last->next = expr;
last = expr;
}
rc = cond_read_av_list(p, fp, &node->true_list, NULL);
if (rc)
goto err;
rc = cond_read_av_list(p, fp, &node->false_list, node->true_list);
if (rc)
goto err;
return 0;
err:
cond_node_destroy(node);
return rc;
}
int cond_read_list(struct policydb *p, void *fp)
{
struct cond_node *node, *last = NULL;
__le32 buf[1];
u32 i, len;
int rc;
rc = next_entry(buf, fp, sizeof buf);
if (rc)
return rc;
len = le32_to_cpu(buf[0]);
rc = avtab_alloc(&(p->te_cond_avtab), p->te_avtab.nel);
if (rc)
goto err;
for (i = 0; i < len; i++) {
rc = -ENOMEM;
node = kzalloc(sizeof(struct cond_node), GFP_KERNEL);
if (!node)
goto err;
rc = cond_read_node(p, node, fp);
if (rc)
goto err;
if (i == 0)
p->cond_list = node;
else
last->next = node;
last = node;
}
return 0;
err:
cond_list_destroy(p->cond_list);
p->cond_list = NULL;
return rc;
}
int cond_write_bool(void *vkey, void *datum, void *ptr)
{
char *key = vkey;
struct cond_bool_datum *booldatum = datum;
struct policy_data *pd = ptr;
void *fp = pd->fp;
__le32 buf[3];
u32 len;
int rc;
len = strlen(key);
buf[0] = cpu_to_le32(booldatum->value);
buf[1] = cpu_to_le32(booldatum->state);
buf[2] = cpu_to_le32(len);
rc = put_entry(buf, sizeof(u32), 3, fp);
if (rc)
return rc;
rc = put_entry(key, 1, len, fp);
if (rc)
return rc;
return 0;
}
/*
* cond_write_cond_av_list doesn't write out the av_list nodes.
* Instead it writes out the key/value pairs from the avtab. This
* is necessary because there is no way to uniquely identifying rules
* in the avtab so it is not possible to associate individual rules
* in the avtab with a conditional without saving them as part of
* the conditional. This means that the avtab with the conditional
* rules will not be saved but will be rebuilt on policy load.
*/
static int cond_write_av_list(struct policydb *p,
struct cond_av_list *list, struct policy_file *fp)
{
__le32 buf[1];
struct cond_av_list *cur_list;
u32 len;
int rc;
len = 0;
for (cur_list = list; cur_list != NULL; cur_list = cur_list->next)
len++;
buf[0] = cpu_to_le32(len);
rc = put_entry(buf, sizeof(u32), 1, fp);
if (rc)
return rc;
if (len == 0)
return 0;
for (cur_list = list; cur_list != NULL; cur_list = cur_list->next) {
rc = avtab_write_item(p, cur_list->node, fp);
if (rc)
return rc;
}
return 0;
}
static int cond_write_node(struct policydb *p, struct cond_node *node,
struct policy_file *fp)
{
struct cond_expr *cur_expr;
__le32 buf[2];
int rc;
u32 len = 0;
buf[0] = cpu_to_le32(node->cur_state);
rc = put_entry(buf, sizeof(u32), 1, fp);
if (rc)
return rc;
for (cur_expr = node->expr; cur_expr != NULL; cur_expr = cur_expr->next)
len++;
buf[0] = cpu_to_le32(len);
rc = put_entry(buf, sizeof(u32), 1, fp);
if (rc)
return rc;
for (cur_expr = node->expr; cur_expr != NULL; cur_expr = cur_expr->next) {
buf[0] = cpu_to_le32(cur_expr->expr_type);
buf[1] = cpu_to_le32(cur_expr->bool);
rc = put_entry(buf, sizeof(u32), 2, fp);
if (rc)
return rc;
}
rc = cond_write_av_list(p, node->true_list, fp);
if (rc)
return rc;
rc = cond_write_av_list(p, node->false_list, fp);
if (rc)
return rc;
return 0;
}
int cond_write_list(struct policydb *p, struct cond_node *list, void *fp)
{
struct cond_node *cur;
u32 len;
__le32 buf[1];
int rc;
len = 0;
for (cur = list; cur != NULL; cur = cur->next)
len++;
buf[0] = cpu_to_le32(len);
rc = put_entry(buf, sizeof(u32), 1, fp);
if (rc)
return rc;
for (cur = list; cur != NULL; cur = cur->next) {
rc = cond_write_node(p, cur, fp);
if (rc)
return rc;
}
return 0;
}
void cond_compute_operation(struct avtab *ctab, struct avtab_key *key,
struct operation_decision *od)
{
struct avtab_node *node;
if (!ctab || !key || !od)
return;
for (node = avtab_search_node(ctab, key); node;
node = avtab_search_node_next(node, key->specified)) {
if (node->key.specified & AVTAB_ENABLED)
services_compute_operation_num(od, node);
}
return;
}
/* Determine whether additional permissions are granted by the conditional
* av table, and if so, add them to the result
*/
void cond_compute_av(struct avtab *ctab, struct avtab_key *key,
struct av_decision *avd, struct operation *ops)
{
struct avtab_node *node;
if (!ctab || !key || !avd || !ops)
return;
for (node = avtab_search_node(ctab, key); node;
node = avtab_search_node_next(node, key->specified)) {
if ((u16)(AVTAB_ALLOWED|AVTAB_ENABLED) ==
(node->key.specified & (AVTAB_ALLOWED|AVTAB_ENABLED)))
avd->allowed |= node->datum.u.data;
if ((u16)(AVTAB_AUDITDENY|AVTAB_ENABLED) ==
(node->key.specified & (AVTAB_AUDITDENY|AVTAB_ENABLED)))
/* Since a '0' in an auditdeny mask represents a
* permission we do NOT want to audit (dontaudit), we use
* the '&' operand to ensure that all '0's in the mask
* are retained (much unlike the allow and auditallow cases).
*/
avd->auditdeny &= node->datum.u.data;
if ((u16)(AVTAB_AUDITALLOW|AVTAB_ENABLED) ==
(node->key.specified & (AVTAB_AUDITALLOW|AVTAB_ENABLED)))
avd->auditallow |= node->datum.u.data;
if ((node->key.specified & AVTAB_ENABLED) &&
(node->key.specified & AVTAB_OP))
services_compute_operation_type(ops, node);
}
return;
}

View file

@ -0,0 +1,82 @@
/* Authors: Karl MacMillan <kmacmillan@tresys.com>
* Frank Mayer <mayerf@tresys.com>
*
* Copyright (C) 2003 - 2004 Tresys Technology, LLC
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, version 2.
*/
#ifndef _CONDITIONAL_H_
#define _CONDITIONAL_H_
#include "avtab.h"
#include "symtab.h"
#include "policydb.h"
#include "../include/conditional.h"
#define COND_EXPR_MAXDEPTH 10
/*
* A conditional expression is a list of operators and operands
* in reverse polish notation.
*/
struct cond_expr {
#define COND_BOOL 1 /* plain bool */
#define COND_NOT 2 /* !bool */
#define COND_OR 3 /* bool || bool */
#define COND_AND 4 /* bool && bool */
#define COND_XOR 5 /* bool ^ bool */
#define COND_EQ 6 /* bool == bool */
#define COND_NEQ 7 /* bool != bool */
#define COND_LAST COND_NEQ
__u32 expr_type;
__u32 bool;
struct cond_expr *next;
};
/*
* Each cond_node contains a list of rules to be enabled/disabled
* depending on the current value of the conditional expression. This
* struct is for that list.
*/
struct cond_av_list {
struct avtab_node *node;
struct cond_av_list *next;
};
/*
* A cond node represents a conditional block in a policy. It
* contains a conditional expression, the current state of the expression,
* two lists of rules to enable/disable depending on the value of the
* expression (the true list corresponds to if and the false list corresponds
* to else)..
*/
struct cond_node {
int cur_state;
struct cond_expr *expr;
struct cond_av_list *true_list;
struct cond_av_list *false_list;
struct cond_node *next;
};
int cond_policydb_init(struct policydb *p);
void cond_policydb_destroy(struct policydb *p);
int cond_init_bool_indexes(struct policydb *p);
int cond_destroy_bool(void *key, void *datum, void *p);
int cond_index_bool(void *key, void *datum, void *datap);
int cond_read_bool(struct policydb *p, struct hashtab *h, void *fp);
int cond_read_list(struct policydb *p, void *fp);
int cond_write_bool(void *key, void *datum, void *ptr);
int cond_write_list(struct policydb *p, struct cond_node *list, void *fp);
void cond_compute_av(struct avtab *ctab, struct avtab_key *key,
struct av_decision *avd, struct operation *ops);
void cond_compute_operation(struct avtab *ctab, struct avtab_key *key,
struct operation_decision *od);
int evaluate_cond_node(struct policydb *p, struct cond_node *node);
#endif /* _CONDITIONAL_H_ */

View file

@ -0,0 +1,62 @@
/*
* A constraint is a condition that must be satisfied in
* order for one or more permissions to be granted.
* Constraints are used to impose additional restrictions
* beyond the type-based rules in `te' or the role-based
* transition rules in `rbac'. Constraints are typically
* used to prevent a process from transitioning to a new user
* identity or role unless it is in a privileged type.
* Constraints are likewise typically used to prevent a
* process from labeling an object with a different user
* identity.
*
* Author : Stephen Smalley, <sds@epoch.ncsc.mil>
*/
#ifndef _SS_CONSTRAINT_H_
#define _SS_CONSTRAINT_H_
#include "ebitmap.h"
#define CEXPR_MAXDEPTH 5
struct constraint_expr {
#define CEXPR_NOT 1 /* not expr */
#define CEXPR_AND 2 /* expr and expr */
#define CEXPR_OR 3 /* expr or expr */
#define CEXPR_ATTR 4 /* attr op attr */
#define CEXPR_NAMES 5 /* attr op names */
u32 expr_type; /* expression type */
#define CEXPR_USER 1 /* user */
#define CEXPR_ROLE 2 /* role */
#define CEXPR_TYPE 4 /* type */
#define CEXPR_TARGET 8 /* target if set, source otherwise */
#define CEXPR_XTARGET 16 /* special 3rd target for validatetrans rule */
#define CEXPR_L1L2 32 /* low level 1 vs. low level 2 */
#define CEXPR_L1H2 64 /* low level 1 vs. high level 2 */
#define CEXPR_H1L2 128 /* high level 1 vs. low level 2 */
#define CEXPR_H1H2 256 /* high level 1 vs. high level 2 */
#define CEXPR_L1H1 512 /* low level 1 vs. high level 1 */
#define CEXPR_L2H2 1024 /* low level 2 vs. high level 2 */
u32 attr; /* attribute */
#define CEXPR_EQ 1 /* == or eq */
#define CEXPR_NEQ 2 /* != */
#define CEXPR_DOM 3 /* dom */
#define CEXPR_DOMBY 4 /* domby */
#define CEXPR_INCOMP 5 /* incomp */
u32 op; /* operator */
struct ebitmap names; /* names */
struct type_set *type_names;
struct constraint_expr *next; /* next expression */
};
struct constraint_node {
u32 permissions; /* constrained permissions */
struct constraint_expr *expr; /* constraint on permissions */
struct constraint_node *next; /* next constraint */
};
#endif /* _SS_CONSTRAINT_H_ */

View file

@ -0,0 +1,163 @@
/*
* A security context is a set of security attributes
* associated with each subject and object controlled
* by the security policy. Security contexts are
* externally represented as variable-length strings
* that can be interpreted by a user or application
* with an understanding of the security policy.
* Internally, the security server uses a simple
* structure. This structure is private to the
* security server and can be changed without affecting
* clients of the security server.
*
* Author : Stephen Smalley, <sds@epoch.ncsc.mil>
*/
#ifndef _SS_CONTEXT_H_
#define _SS_CONTEXT_H_
#include "ebitmap.h"
#include "mls_types.h"
#include "security.h"
/*
* A security context consists of an authenticated user
* identity, a role, a type and a MLS range.
*/
struct context {
u32 user;
u32 role;
u32 type;
u32 len; /* length of string in bytes */
struct mls_range range;
char *str; /* string representation if context cannot be mapped. */
};
static inline void mls_context_init(struct context *c)
{
memset(&c->range, 0, sizeof(c->range));
}
static inline int mls_context_cpy(struct context *dst, struct context *src)
{
int rc;
dst->range.level[0].sens = src->range.level[0].sens;
rc = ebitmap_cpy(&dst->range.level[0].cat, &src->range.level[0].cat);
if (rc)
goto out;
dst->range.level[1].sens = src->range.level[1].sens;
rc = ebitmap_cpy(&dst->range.level[1].cat, &src->range.level[1].cat);
if (rc)
ebitmap_destroy(&dst->range.level[0].cat);
out:
return rc;
}
/*
* Sets both levels in the MLS range of 'dst' to the low level of 'src'.
*/
static inline int mls_context_cpy_low(struct context *dst, struct context *src)
{
int rc;
dst->range.level[0].sens = src->range.level[0].sens;
rc = ebitmap_cpy(&dst->range.level[0].cat, &src->range.level[0].cat);
if (rc)
goto out;
dst->range.level[1].sens = src->range.level[0].sens;
rc = ebitmap_cpy(&dst->range.level[1].cat, &src->range.level[0].cat);
if (rc)
ebitmap_destroy(&dst->range.level[0].cat);
out:
return rc;
}
/*
* Sets both levels in the MLS range of 'dst' to the high level of 'src'.
*/
static inline int mls_context_cpy_high(struct context *dst, struct context *src)
{
int rc;
dst->range.level[0].sens = src->range.level[1].sens;
rc = ebitmap_cpy(&dst->range.level[0].cat, &src->range.level[1].cat);
if (rc)
goto out;
dst->range.level[1].sens = src->range.level[1].sens;
rc = ebitmap_cpy(&dst->range.level[1].cat, &src->range.level[1].cat);
if (rc)
ebitmap_destroy(&dst->range.level[0].cat);
out:
return rc;
}
static inline int mls_context_cmp(struct context *c1, struct context *c2)
{
return ((c1->range.level[0].sens == c2->range.level[0].sens) &&
ebitmap_cmp(&c1->range.level[0].cat, &c2->range.level[0].cat) &&
(c1->range.level[1].sens == c2->range.level[1].sens) &&
ebitmap_cmp(&c1->range.level[1].cat, &c2->range.level[1].cat));
}
static inline void mls_context_destroy(struct context *c)
{
ebitmap_destroy(&c->range.level[0].cat);
ebitmap_destroy(&c->range.level[1].cat);
mls_context_init(c);
}
static inline void context_init(struct context *c)
{
memset(c, 0, sizeof(*c));
}
static inline int context_cpy(struct context *dst, struct context *src)
{
int rc;
dst->user = src->user;
dst->role = src->role;
dst->type = src->type;
if (src->str) {
dst->str = kstrdup(src->str, GFP_ATOMIC);
if (!dst->str)
return -ENOMEM;
dst->len = src->len;
} else {
dst->str = NULL;
dst->len = 0;
}
rc = mls_context_cpy(dst, src);
if (rc) {
kfree(dst->str);
return rc;
}
return 0;
}
static inline void context_destroy(struct context *c)
{
c->user = c->role = c->type = 0;
kfree(c->str);
c->str = NULL;
c->len = 0;
mls_context_destroy(c);
}
static inline int context_cmp(struct context *c1, struct context *c2)
{
if (c1->len && c2->len)
return (c1->len == c2->len && !strcmp(c1->str, c2->str));
if (c1->len || c2->len)
return 0;
return ((c1->user == c2->user) &&
(c1->role == c2->role) &&
(c1->type == c2->type) &&
mls_context_cmp(c1, c2));
}
#endif /* _SS_CONTEXT_H_ */

View file

@ -0,0 +1,512 @@
/*
* Implementation of the extensible bitmap type.
*
* Author : Stephen Smalley, <sds@epoch.ncsc.mil>
*/
/*
* Updated: Hewlett-Packard <paul@paul-moore.com>
*
* Added support to import/export the NetLabel category bitmap
*
* (c) Copyright Hewlett-Packard Development Company, L.P., 2006
*/
/*
* Updated: KaiGai Kohei <kaigai@ak.jp.nec.com>
* Applied standard bit operations to improve bitmap scanning.
*/
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/errno.h>
#include <net/netlabel.h>
#include "ebitmap.h"
#include "policydb.h"
#define BITS_PER_U64 (sizeof(u64) * 8)
int ebitmap_cmp(struct ebitmap *e1, struct ebitmap *e2)
{
struct ebitmap_node *n1, *n2;
if (e1->highbit != e2->highbit)
return 0;
n1 = e1->node;
n2 = e2->node;
while (n1 && n2 &&
(n1->startbit == n2->startbit) &&
!memcmp(n1->maps, n2->maps, EBITMAP_SIZE / 8)) {
n1 = n1->next;
n2 = n2->next;
}
if (n1 || n2)
return 0;
return 1;
}
int ebitmap_cpy(struct ebitmap *dst, struct ebitmap *src)
{
struct ebitmap_node *n, *new, *prev;
ebitmap_init(dst);
n = src->node;
prev = NULL;
while (n) {
new = kzalloc(sizeof(*new), GFP_ATOMIC);
if (!new) {
ebitmap_destroy(dst);
return -ENOMEM;
}
new->startbit = n->startbit;
memcpy(new->maps, n->maps, EBITMAP_SIZE / 8);
new->next = NULL;
if (prev)
prev->next = new;
else
dst->node = new;
prev = new;
n = n->next;
}
dst->highbit = src->highbit;
return 0;
}
#ifdef CONFIG_NETLABEL
/**
* ebitmap_netlbl_export - Export an ebitmap into a NetLabel category bitmap
* @ebmap: the ebitmap to export
* @catmap: the NetLabel category bitmap
*
* Description:
* Export a SELinux extensibile bitmap into a NetLabel category bitmap.
* Returns zero on success, negative values on error.
*
*/
int ebitmap_netlbl_export(struct ebitmap *ebmap,
struct netlbl_lsm_catmap **catmap)
{
struct ebitmap_node *e_iter = ebmap->node;
unsigned long e_map;
u32 offset;
unsigned int iter;
int rc;
if (e_iter == NULL) {
*catmap = NULL;
return 0;
}
if (*catmap != NULL)
netlbl_catmap_free(*catmap);
*catmap = NULL;
while (e_iter) {
offset = e_iter->startbit;
for (iter = 0; iter < EBITMAP_UNIT_NUMS; iter++) {
e_map = e_iter->maps[iter];
if (e_map != 0) {
rc = netlbl_catmap_setlong(catmap,
offset,
e_map,
GFP_ATOMIC);
if (rc != 0)
goto netlbl_export_failure;
}
offset += EBITMAP_UNIT_SIZE;
}
e_iter = e_iter->next;
}
return 0;
netlbl_export_failure:
netlbl_catmap_free(*catmap);
return -ENOMEM;
}
/**
* ebitmap_netlbl_import - Import a NetLabel category bitmap into an ebitmap
* @ebmap: the ebitmap to import
* @catmap: the NetLabel category bitmap
*
* Description:
* Import a NetLabel category bitmap into a SELinux extensibile bitmap.
* Returns zero on success, negative values on error.
*
*/
int ebitmap_netlbl_import(struct ebitmap *ebmap,
struct netlbl_lsm_catmap *catmap)
{
int rc;
struct ebitmap_node *e_iter = NULL;
struct ebitmap_node *e_prev = NULL;
u32 offset = 0, idx;
unsigned long bitmap;
for (;;) {
rc = netlbl_catmap_getlong(catmap, &offset, &bitmap);
if (rc < 0)
goto netlbl_import_failure;
if (offset == (u32)-1)
return 0;
if (e_iter == NULL ||
offset >= e_iter->startbit + EBITMAP_SIZE) {
e_prev = e_iter;
e_iter = kzalloc(sizeof(*e_iter), GFP_ATOMIC);
if (e_iter == NULL)
goto netlbl_import_failure;
e_iter->startbit = offset & ~(EBITMAP_SIZE - 1);
if (e_prev == NULL)
ebmap->node = e_iter;
else
e_prev->next = e_iter;
ebmap->highbit = e_iter->startbit + EBITMAP_SIZE;
}
/* offset will always be aligned to an unsigned long */
idx = EBITMAP_NODE_INDEX(e_iter, offset);
e_iter->maps[idx] = bitmap;
/* next */
offset += EBITMAP_UNIT_SIZE;
}
/* NOTE: we should never reach this return */
return 0;
netlbl_import_failure:
ebitmap_destroy(ebmap);
return -ENOMEM;
}
#endif /* CONFIG_NETLABEL */
/*
* Check to see if all the bits set in e2 are also set in e1. Optionally,
* if last_e2bit is non-zero, the highest set bit in e2 cannot exceed
* last_e2bit.
*/
int ebitmap_contains(struct ebitmap *e1, struct ebitmap *e2, u32 last_e2bit)
{
struct ebitmap_node *n1, *n2;
int i;
if (e1->highbit < e2->highbit)
return 0;
n1 = e1->node;
n2 = e2->node;
while (n1 && n2 && (n1->startbit <= n2->startbit)) {
if (n1->startbit < n2->startbit) {
n1 = n1->next;
continue;
}
for (i = EBITMAP_UNIT_NUMS - 1; (i >= 0) && !n2->maps[i]; )
i--; /* Skip trailing NULL map entries */
if (last_e2bit && (i >= 0)) {
u32 lastsetbit = n2->startbit + i * EBITMAP_UNIT_SIZE +
__fls(n2->maps[i]);
if (lastsetbit > last_e2bit)
return 0;
}
while (i >= 0) {
if ((n1->maps[i] & n2->maps[i]) != n2->maps[i])
return 0;
i--;
}
n1 = n1->next;
n2 = n2->next;
}
if (n2)
return 0;
return 1;
}
int ebitmap_get_bit(struct ebitmap *e, unsigned long bit)
{
struct ebitmap_node *n;
if (e->highbit < bit)
return 0;
n = e->node;
while (n && (n->startbit <= bit)) {
if ((n->startbit + EBITMAP_SIZE) > bit)
return ebitmap_node_get_bit(n, bit);
n = n->next;
}
return 0;
}
int ebitmap_set_bit(struct ebitmap *e, unsigned long bit, int value)
{
struct ebitmap_node *n, *prev, *new;
prev = NULL;
n = e->node;
while (n && n->startbit <= bit) {
if ((n->startbit + EBITMAP_SIZE) > bit) {
if (value) {
ebitmap_node_set_bit(n, bit);
} else {
unsigned int s;
ebitmap_node_clr_bit(n, bit);
s = find_first_bit(n->maps, EBITMAP_SIZE);
if (s < EBITMAP_SIZE)
return 0;
/* drop this node from the bitmap */
if (!n->next) {
/*
* this was the highest map
* within the bitmap
*/
if (prev)
e->highbit = prev->startbit
+ EBITMAP_SIZE;
else
e->highbit = 0;
}
if (prev)
prev->next = n->next;
else
e->node = n->next;
kfree(n);
}
return 0;
}
prev = n;
n = n->next;
}
if (!value)
return 0;
new = kzalloc(sizeof(*new), GFP_ATOMIC);
if (!new)
return -ENOMEM;
new->startbit = bit - (bit % EBITMAP_SIZE);
ebitmap_node_set_bit(new, bit);
if (!n)
/* this node will be the highest map within the bitmap */
e->highbit = new->startbit + EBITMAP_SIZE;
if (prev) {
new->next = prev->next;
prev->next = new;
} else {
new->next = e->node;
e->node = new;
}
return 0;
}
void ebitmap_destroy(struct ebitmap *e)
{
struct ebitmap_node *n, *temp;
if (!e)
return;
n = e->node;
while (n) {
temp = n;
n = n->next;
kfree(temp);
}
e->highbit = 0;
e->node = NULL;
return;
}
int ebitmap_read(struct ebitmap *e, void *fp)
{
struct ebitmap_node *n = NULL;
u32 mapunit, count, startbit, index;
u64 map;
__le32 buf[3];
int rc, i;
ebitmap_init(e);
rc = next_entry(buf, fp, sizeof buf);
if (rc < 0)
goto out;
mapunit = le32_to_cpu(buf[0]);
e->highbit = le32_to_cpu(buf[1]);
count = le32_to_cpu(buf[2]);
if (mapunit != BITS_PER_U64) {
printk(KERN_ERR "SELinux: ebitmap: map size %u does not "
"match my size %Zd (high bit was %d)\n",
mapunit, BITS_PER_U64, e->highbit);
goto bad;
}
/* round up e->highbit */
e->highbit += EBITMAP_SIZE - 1;
e->highbit -= (e->highbit % EBITMAP_SIZE);
if (!e->highbit) {
e->node = NULL;
goto ok;
}
for (i = 0; i < count; i++) {
rc = next_entry(&startbit, fp, sizeof(u32));
if (rc < 0) {
printk(KERN_ERR "SELinux: ebitmap: truncated map\n");
goto bad;
}
startbit = le32_to_cpu(startbit);
if (startbit & (mapunit - 1)) {
printk(KERN_ERR "SELinux: ebitmap start bit (%d) is "
"not a multiple of the map unit size (%u)\n",
startbit, mapunit);
goto bad;
}
if (startbit > e->highbit - mapunit) {
printk(KERN_ERR "SELinux: ebitmap start bit (%d) is "
"beyond the end of the bitmap (%u)\n",
startbit, (e->highbit - mapunit));
goto bad;
}
if (!n || startbit >= n->startbit + EBITMAP_SIZE) {
struct ebitmap_node *tmp;
tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
if (!tmp) {
printk(KERN_ERR
"SELinux: ebitmap: out of memory\n");
rc = -ENOMEM;
goto bad;
}
/* round down */
tmp->startbit = startbit - (startbit % EBITMAP_SIZE);
if (n)
n->next = tmp;
else
e->node = tmp;
n = tmp;
} else if (startbit <= n->startbit) {
printk(KERN_ERR "SELinux: ebitmap: start bit %d"
" comes after start bit %d\n",
startbit, n->startbit);
goto bad;
}
rc = next_entry(&map, fp, sizeof(u64));
if (rc < 0) {
printk(KERN_ERR "SELinux: ebitmap: truncated map\n");
goto bad;
}
map = le64_to_cpu(map);
index = (startbit - n->startbit) / EBITMAP_UNIT_SIZE;
while (map) {
n->maps[index++] = map & (-1UL);
map = EBITMAP_SHIFT_UNIT_SIZE(map);
}
}
ok:
rc = 0;
out:
return rc;
bad:
if (!rc)
rc = -EINVAL;
ebitmap_destroy(e);
goto out;
}
int ebitmap_write(struct ebitmap *e, void *fp)
{
struct ebitmap_node *n;
u32 count;
__le32 buf[3];
u64 map;
int bit, last_bit, last_startbit, rc;
buf[0] = cpu_to_le32(BITS_PER_U64);
count = 0;
last_bit = 0;
last_startbit = -1;
ebitmap_for_each_positive_bit(e, n, bit) {
if (rounddown(bit, (int)BITS_PER_U64) > last_startbit) {
count++;
last_startbit = rounddown(bit, BITS_PER_U64);
}
last_bit = roundup(bit + 1, BITS_PER_U64);
}
buf[1] = cpu_to_le32(last_bit);
buf[2] = cpu_to_le32(count);
rc = put_entry(buf, sizeof(u32), 3, fp);
if (rc)
return rc;
map = 0;
last_startbit = INT_MIN;
ebitmap_for_each_positive_bit(e, n, bit) {
if (rounddown(bit, (int)BITS_PER_U64) > last_startbit) {
__le64 buf64[1];
/* this is the very first bit */
if (!map) {
last_startbit = rounddown(bit, BITS_PER_U64);
map = (u64)1 << (bit - last_startbit);
continue;
}
/* write the last node */
buf[0] = cpu_to_le32(last_startbit);
rc = put_entry(buf, sizeof(u32), 1, fp);
if (rc)
return rc;
buf64[0] = cpu_to_le64(map);
rc = put_entry(buf64, sizeof(u64), 1, fp);
if (rc)
return rc;
/* set up for the next node */
map = 0;
last_startbit = rounddown(bit, BITS_PER_U64);
}
map |= (u64)1 << (bit - last_startbit);
}
/* write the last node */
if (map) {
__le64 buf64[1];
/* write the last node */
buf[0] = cpu_to_le32(last_startbit);
rc = put_entry(buf, sizeof(u32), 1, fp);
if (rc)
return rc;
buf64[0] = cpu_to_le64(map);
rc = put_entry(buf64, sizeof(u64), 1, fp);
if (rc)
return rc;
}
return 0;
}

View file

@ -0,0 +1,151 @@
/*
* An extensible bitmap is a bitmap that supports an
* arbitrary number of bits. Extensible bitmaps are
* used to represent sets of values, such as types,
* roles, categories, and classes.
*
* Each extensible bitmap is implemented as a linked
* list of bitmap nodes, where each bitmap node has
* an explicitly specified starting bit position within
* the total bitmap.
*
* Author : Stephen Smalley, <sds@epoch.ncsc.mil>
*/
#ifndef _SS_EBITMAP_H_
#define _SS_EBITMAP_H_
#include <net/netlabel.h>
#ifdef CONFIG_64BIT
#define EBITMAP_NODE_SIZE 64
#else
#define EBITMAP_NODE_SIZE 32
#endif
#define EBITMAP_UNIT_NUMS ((EBITMAP_NODE_SIZE-sizeof(void *)-sizeof(u32))\
/ sizeof(unsigned long))
#define EBITMAP_UNIT_SIZE BITS_PER_LONG
#define EBITMAP_SIZE (EBITMAP_UNIT_NUMS * EBITMAP_UNIT_SIZE)
#define EBITMAP_BIT 1ULL
#define EBITMAP_SHIFT_UNIT_SIZE(x) \
(((x) >> EBITMAP_UNIT_SIZE / 2) >> EBITMAP_UNIT_SIZE / 2)
struct ebitmap_node {
struct ebitmap_node *next;
unsigned long maps[EBITMAP_UNIT_NUMS];
u32 startbit;
};
struct ebitmap {
struct ebitmap_node *node; /* first node in the bitmap */
u32 highbit; /* highest position in the total bitmap */
};
#define ebitmap_length(e) ((e)->highbit)
static inline unsigned int ebitmap_start_positive(struct ebitmap *e,
struct ebitmap_node **n)
{
unsigned int ofs;
for (*n = e->node; *n; *n = (*n)->next) {
ofs = find_first_bit((*n)->maps, EBITMAP_SIZE);
if (ofs < EBITMAP_SIZE)
return (*n)->startbit + ofs;
}
return ebitmap_length(e);
}
static inline void ebitmap_init(struct ebitmap *e)
{
memset(e, 0, sizeof(*e));
}
static inline unsigned int ebitmap_next_positive(struct ebitmap *e,
struct ebitmap_node **n,
unsigned int bit)
{
unsigned int ofs;
ofs = find_next_bit((*n)->maps, EBITMAP_SIZE, bit - (*n)->startbit + 1);
if (ofs < EBITMAP_SIZE)
return ofs + (*n)->startbit;
for (*n = (*n)->next; *n; *n = (*n)->next) {
ofs = find_first_bit((*n)->maps, EBITMAP_SIZE);
if (ofs < EBITMAP_SIZE)
return ofs + (*n)->startbit;
}
return ebitmap_length(e);
}
#define EBITMAP_NODE_INDEX(node, bit) \
(((bit) - (node)->startbit) / EBITMAP_UNIT_SIZE)
#define EBITMAP_NODE_OFFSET(node, bit) \
(((bit) - (node)->startbit) % EBITMAP_UNIT_SIZE)
static inline int ebitmap_node_get_bit(struct ebitmap_node *n,
unsigned int bit)
{
unsigned int index = EBITMAP_NODE_INDEX(n, bit);
unsigned int ofs = EBITMAP_NODE_OFFSET(n, bit);
BUG_ON(index >= EBITMAP_UNIT_NUMS);
if ((n->maps[index] & (EBITMAP_BIT << ofs)))
return 1;
return 0;
}
static inline void ebitmap_node_set_bit(struct ebitmap_node *n,
unsigned int bit)
{
unsigned int index = EBITMAP_NODE_INDEX(n, bit);
unsigned int ofs = EBITMAP_NODE_OFFSET(n, bit);
BUG_ON(index >= EBITMAP_UNIT_NUMS);
n->maps[index] |= (EBITMAP_BIT << ofs);
}
static inline void ebitmap_node_clr_bit(struct ebitmap_node *n,
unsigned int bit)
{
unsigned int index = EBITMAP_NODE_INDEX(n, bit);
unsigned int ofs = EBITMAP_NODE_OFFSET(n, bit);
BUG_ON(index >= EBITMAP_UNIT_NUMS);
n->maps[index] &= ~(EBITMAP_BIT << ofs);
}
#define ebitmap_for_each_positive_bit(e, n, bit) \
for (bit = ebitmap_start_positive(e, &n); \
bit < ebitmap_length(e); \
bit = ebitmap_next_positive(e, &n, bit)) \
int ebitmap_cmp(struct ebitmap *e1, struct ebitmap *e2);
int ebitmap_cpy(struct ebitmap *dst, struct ebitmap *src);
int ebitmap_contains(struct ebitmap *e1, struct ebitmap *e2, u32 last_e2bit);
int ebitmap_get_bit(struct ebitmap *e, unsigned long bit);
int ebitmap_set_bit(struct ebitmap *e, unsigned long bit, int value);
void ebitmap_destroy(struct ebitmap *e);
int ebitmap_read(struct ebitmap *e, void *fp);
int ebitmap_write(struct ebitmap *e, void *fp);
#ifdef CONFIG_NETLABEL
int ebitmap_netlbl_export(struct ebitmap *ebmap,
struct netlbl_lsm_catmap **catmap);
int ebitmap_netlbl_import(struct ebitmap *ebmap,
struct netlbl_lsm_catmap *catmap);
#else
static inline int ebitmap_netlbl_export(struct ebitmap *ebmap,
struct netlbl_lsm_catmap **catmap)
{
return -ENOMEM;
}
static inline int ebitmap_netlbl_import(struct ebitmap *ebmap,
struct netlbl_lsm_catmap *catmap)
{
return -ENOMEM;
}
#endif
#endif /* _SS_EBITMAP_H_ */

View file

@ -0,0 +1,168 @@
/*
* Implementation of the hash table type.
*
* Author : Stephen Smalley, <sds@epoch.ncsc.mil>
*/
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/sched.h>
#include "hashtab.h"
struct hashtab *hashtab_create(u32 (*hash_value)(struct hashtab *h, const void *key),
int (*keycmp)(struct hashtab *h, const void *key1, const void *key2),
u32 size)
{
struct hashtab *p;
u32 i;
p = kzalloc(sizeof(*p), GFP_KERNEL);
if (p == NULL)
return p;
p->size = size;
p->nel = 0;
p->hash_value = hash_value;
p->keycmp = keycmp;
p->htable = kmalloc(sizeof(*(p->htable)) * size, GFP_KERNEL);
if (p->htable == NULL) {
kfree(p);
return NULL;
}
for (i = 0; i < size; i++)
p->htable[i] = NULL;
return p;
}
int hashtab_insert(struct hashtab *h, void *key, void *datum)
{
u32 hvalue;
struct hashtab_node *prev, *cur, *newnode;
cond_resched();
if (!h || h->nel == HASHTAB_MAX_NODES)
return -EINVAL;
hvalue = h->hash_value(h, key);
prev = NULL;
cur = h->htable[hvalue];
while (cur && h->keycmp(h, key, cur->key) > 0) {
prev = cur;
cur = cur->next;
}
if (cur && (h->keycmp(h, key, cur->key) == 0))
return -EEXIST;
newnode = kzalloc(sizeof(*newnode), GFP_KERNEL);
if (newnode == NULL)
return -ENOMEM;
newnode->key = key;
newnode->datum = datum;
if (prev) {
newnode->next = prev->next;
prev->next = newnode;
} else {
newnode->next = h->htable[hvalue];
h->htable[hvalue] = newnode;
}
h->nel++;
return 0;
}
void *hashtab_search(struct hashtab *h, const void *key)
{
u32 hvalue;
struct hashtab_node *cur;
if (!h)
return NULL;
hvalue = h->hash_value(h, key);
cur = h->htable[hvalue];
while (cur && h->keycmp(h, key, cur->key) > 0)
cur = cur->next;
if (cur == NULL || (h->keycmp(h, key, cur->key) != 0))
return NULL;
return cur->datum;
}
void hashtab_destroy(struct hashtab *h)
{
u32 i;
struct hashtab_node *cur, *temp;
if (!h)
return;
for (i = 0; i < h->size; i++) {
cur = h->htable[i];
while (cur) {
temp = cur;
cur = cur->next;
kfree(temp);
}
h->htable[i] = NULL;
}
kfree(h->htable);
h->htable = NULL;
kfree(h);
}
int hashtab_map(struct hashtab *h,
int (*apply)(void *k, void *d, void *args),
void *args)
{
u32 i;
int ret;
struct hashtab_node *cur;
if (!h)
return 0;
for (i = 0; i < h->size; i++) {
cur = h->htable[i];
while (cur) {
ret = apply(cur->key, cur->datum, args);
if (ret)
return ret;
cur = cur->next;
}
}
return 0;
}
void hashtab_stat(struct hashtab *h, struct hashtab_info *info)
{
u32 i, chain_len, slots_used, max_chain_len;
struct hashtab_node *cur;
slots_used = 0;
max_chain_len = 0;
for (slots_used = max_chain_len = i = 0; i < h->size; i++) {
cur = h->htable[i];
if (cur) {
slots_used++;
chain_len = 0;
while (cur) {
chain_len++;
cur = cur->next;
}
if (chain_len > max_chain_len)
max_chain_len = chain_len;
}
}
info->slots_used = slots_used;
info->max_chain_len = max_chain_len;
}

View file

@ -0,0 +1,87 @@
/*
* A hash table (hashtab) maintains associations between
* key values and datum values. The type of the key values
* and the type of the datum values is arbitrary. The
* functions for hash computation and key comparison are
* provided by the creator of the table.
*
* Author : Stephen Smalley, <sds@epoch.ncsc.mil>
*/
#ifndef _SS_HASHTAB_H_
#define _SS_HASHTAB_H_
#define HASHTAB_MAX_NODES 0xffffffff
struct hashtab_node {
void *key;
void *datum;
struct hashtab_node *next;
};
struct hashtab {
struct hashtab_node **htable; /* hash table */
u32 size; /* number of slots in hash table */
u32 nel; /* number of elements in hash table */
u32 (*hash_value)(struct hashtab *h, const void *key);
/* hash function */
int (*keycmp)(struct hashtab *h, const void *key1, const void *key2);
/* key comparison function */
};
struct hashtab_info {
u32 slots_used;
u32 max_chain_len;
};
/*
* Creates a new hash table with the specified characteristics.
*
* Returns NULL if insufficent space is available or
* the new hash table otherwise.
*/
struct hashtab *hashtab_create(u32 (*hash_value)(struct hashtab *h, const void *key),
int (*keycmp)(struct hashtab *h, const void *key1, const void *key2),
u32 size);
/*
* Inserts the specified (key, datum) pair into the specified hash table.
*
* Returns -ENOMEM on memory allocation error,
* -EEXIST if there is already an entry with the same key,
* -EINVAL for general errors or
0 otherwise.
*/
int hashtab_insert(struct hashtab *h, void *k, void *d);
/*
* Searches for the entry with the specified key in the hash table.
*
* Returns NULL if no entry has the specified key or
* the datum of the entry otherwise.
*/
void *hashtab_search(struct hashtab *h, const void *k);
/*
* Destroys the specified hash table.
*/
void hashtab_destroy(struct hashtab *h);
/*
* Applies the specified apply function to (key,datum,args)
* for each entry in the specified hash table.
*
* The order in which the function is applied to the entries
* is dependent upon the internal structure of the hash table.
*
* If apply returns a non-zero status, then hashtab_map will cease
* iterating through the hash table and will propagate the error
* return to its caller.
*/
int hashtab_map(struct hashtab *h,
int (*apply)(void *k, void *d, void *args),
void *args);
/* Fill info with some hash table statistics */
void hashtab_stat(struct hashtab *h, struct hashtab_info *info);
#endif /* _SS_HASHTAB_H */

672
security/selinux/ss/mls.c Normal file
View file

@ -0,0 +1,672 @@
/*
* Implementation of the multi-level security (MLS) policy.
*
* Author : Stephen Smalley, <sds@epoch.ncsc.mil>
*/
/*
* Updated: Trusted Computer Solutions, Inc. <dgoeddel@trustedcs.com>
*
* Support for enhanced MLS infrastructure.
*
* Copyright (C) 2004-2006 Trusted Computer Solutions, Inc.
*/
/*
* Updated: Hewlett-Packard <paul@paul-moore.com>
*
* Added support to import/export the MLS label from NetLabel
*
* (c) Copyright Hewlett-Packard Development Company, L.P., 2006
*/
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/errno.h>
#include <net/netlabel.h>
#include "sidtab.h"
#include "mls.h"
#include "policydb.h"
#include "services.h"
/*
* Return the length in bytes for the MLS fields of the
* security context string representation of `context'.
*/
int mls_compute_context_len(struct context *context)
{
int i, l, len, head, prev;
char *nm;
struct ebitmap *e;
struct ebitmap_node *node;
if (!policydb.mls_enabled)
return 0;
len = 1; /* for the beginning ":" */
for (l = 0; l < 2; l++) {
int index_sens = context->range.level[l].sens;
len += strlen(sym_name(&policydb, SYM_LEVELS, index_sens - 1));
/* categories */
head = -2;
prev = -2;
e = &context->range.level[l].cat;
ebitmap_for_each_positive_bit(e, node, i) {
if (i - prev > 1) {
/* one or more negative bits are skipped */
if (head != prev) {
nm = sym_name(&policydb, SYM_CATS, prev);
len += strlen(nm) + 1;
}
nm = sym_name(&policydb, SYM_CATS, i);
len += strlen(nm) + 1;
head = i;
}
prev = i;
}
if (prev != head) {
nm = sym_name(&policydb, SYM_CATS, prev);
len += strlen(nm) + 1;
}
if (l == 0) {
if (mls_level_eq(&context->range.level[0],
&context->range.level[1]))
break;
else
len++;
}
}
return len;
}
/*
* Write the security context string representation of
* the MLS fields of `context' into the string `*scontext'.
* Update `*scontext' to point to the end of the MLS fields.
*/
void mls_sid_to_context(struct context *context,
char **scontext)
{
char *scontextp, *nm;
int i, l, head, prev;
struct ebitmap *e;
struct ebitmap_node *node;
if (!policydb.mls_enabled)
return;
scontextp = *scontext;
*scontextp = ':';
scontextp++;
for (l = 0; l < 2; l++) {
strcpy(scontextp, sym_name(&policydb, SYM_LEVELS,
context->range.level[l].sens - 1));
scontextp += strlen(scontextp);
/* categories */
head = -2;
prev = -2;
e = &context->range.level[l].cat;
ebitmap_for_each_positive_bit(e, node, i) {
if (i - prev > 1) {
/* one or more negative bits are skipped */
if (prev != head) {
if (prev - head > 1)
*scontextp++ = '.';
else
*scontextp++ = ',';
nm = sym_name(&policydb, SYM_CATS, prev);
strcpy(scontextp, nm);
scontextp += strlen(nm);
}
if (prev < 0)
*scontextp++ = ':';
else
*scontextp++ = ',';
nm = sym_name(&policydb, SYM_CATS, i);
strcpy(scontextp, nm);
scontextp += strlen(nm);
head = i;
}
prev = i;
}
if (prev != head) {
if (prev - head > 1)
*scontextp++ = '.';
else
*scontextp++ = ',';
nm = sym_name(&policydb, SYM_CATS, prev);
strcpy(scontextp, nm);
scontextp += strlen(nm);
}
if (l == 0) {
if (mls_level_eq(&context->range.level[0],
&context->range.level[1]))
break;
else
*scontextp++ = '-';
}
}
*scontext = scontextp;
return;
}
int mls_level_isvalid(struct policydb *p, struct mls_level *l)
{
struct level_datum *levdatum;
if (!l->sens || l->sens > p->p_levels.nprim)
return 0;
levdatum = hashtab_search(p->p_levels.table,
sym_name(p, SYM_LEVELS, l->sens - 1));
if (!levdatum)
return 0;
/*
* Return 1 iff all the bits set in l->cat are also be set in
* levdatum->level->cat and no bit in l->cat is larger than
* p->p_cats.nprim.
*/
return ebitmap_contains(&levdatum->level->cat, &l->cat,
p->p_cats.nprim);
}
int mls_range_isvalid(struct policydb *p, struct mls_range *r)
{
return (mls_level_isvalid(p, &r->level[0]) &&
mls_level_isvalid(p, &r->level[1]) &&
mls_level_dom(&r->level[1], &r->level[0]));
}
/*
* Return 1 if the MLS fields in the security context
* structure `c' are valid. Return 0 otherwise.
*/
int mls_context_isvalid(struct policydb *p, struct context *c)
{
struct user_datum *usrdatum;
if (!p->mls_enabled)
return 1;
if (!mls_range_isvalid(p, &c->range))
return 0;
if (c->role == OBJECT_R_VAL)
return 1;
/*
* User must be authorized for the MLS range.
*/
if (!c->user || c->user > p->p_users.nprim)
return 0;
usrdatum = p->user_val_to_struct[c->user - 1];
if (!mls_range_contains(usrdatum->range, c->range))
return 0; /* user may not be associated with range */
return 1;
}
/*
* Set the MLS fields in the security context structure
* `context' based on the string representation in
* the string `*scontext'. Update `*scontext' to
* point to the end of the string representation of
* the MLS fields.
*
* This function modifies the string in place, inserting
* NULL characters to terminate the MLS fields.
*
* If a def_sid is provided and no MLS field is present,
* copy the MLS field of the associated default context.
* Used for upgraded to MLS systems where objects may lack
* MLS fields.
*
* Policy read-lock must be held for sidtab lookup.
*
*/
int mls_context_to_sid(struct policydb *pol,
char oldc,
char **scontext,
struct context *context,
struct sidtab *s,
u32 def_sid)
{
char delim;
char *scontextp, *p, *rngptr;
struct level_datum *levdatum;
struct cat_datum *catdatum, *rngdatum;
int l, rc = -EINVAL;
if (!pol->mls_enabled) {
if (def_sid != SECSID_NULL && oldc)
*scontext += strlen(*scontext) + 1;
return 0;
}
/*
* No MLS component to the security context, try and map to
* default if provided.
*/
if (!oldc) {
struct context *defcon;
if (def_sid == SECSID_NULL)
goto out;
defcon = sidtab_search(s, def_sid);
if (!defcon)
goto out;
rc = mls_context_cpy(context, defcon);
goto out;
}
/* Extract low sensitivity. */
scontextp = p = *scontext;
while (*p && *p != ':' && *p != '-')
p++;
delim = *p;
if (delim != '\0')
*p++ = '\0';
for (l = 0; l < 2; l++) {
levdatum = hashtab_search(pol->p_levels.table, scontextp);
if (!levdatum) {
rc = -EINVAL;
goto out;
}
context->range.level[l].sens = levdatum->level->sens;
if (delim == ':') {
/* Extract category set. */
while (1) {
scontextp = p;
while (*p && *p != ',' && *p != '-')
p++;
delim = *p;
if (delim != '\0')
*p++ = '\0';
/* Separate into range if exists */
rngptr = strchr(scontextp, '.');
if (rngptr != NULL) {
/* Remove '.' */
*rngptr++ = '\0';
}
catdatum = hashtab_search(pol->p_cats.table,
scontextp);
if (!catdatum) {
rc = -EINVAL;
goto out;
}
rc = ebitmap_set_bit(&context->range.level[l].cat,
catdatum->value - 1, 1);
if (rc)
goto out;
/* If range, set all categories in range */
if (rngptr) {
int i;
rngdatum = hashtab_search(pol->p_cats.table, rngptr);
if (!rngdatum) {
rc = -EINVAL;
goto out;
}
if (catdatum->value >= rngdatum->value) {
rc = -EINVAL;
goto out;
}
for (i = catdatum->value; i < rngdatum->value; i++) {
rc = ebitmap_set_bit(&context->range.level[l].cat, i, 1);
if (rc)
goto out;
}
}
if (delim != ',')
break;
}
}
if (delim == '-') {
/* Extract high sensitivity. */
scontextp = p;
while (*p && *p != ':')
p++;
delim = *p;
if (delim != '\0')
*p++ = '\0';
} else
break;
}
if (l == 0) {
context->range.level[1].sens = context->range.level[0].sens;
rc = ebitmap_cpy(&context->range.level[1].cat,
&context->range.level[0].cat);
if (rc)
goto out;
}
*scontext = ++p;
rc = 0;
out:
return rc;
}
/*
* Set the MLS fields in the security context structure
* `context' based on the string representation in
* the string `str'. This function will allocate temporary memory with the
* given constraints of gfp_mask.
*/
int mls_from_string(char *str, struct context *context, gfp_t gfp_mask)
{
char *tmpstr, *freestr;
int rc;
if (!policydb.mls_enabled)
return -EINVAL;
/* we need freestr because mls_context_to_sid will change
the value of tmpstr */
tmpstr = freestr = kstrdup(str, gfp_mask);
if (!tmpstr) {
rc = -ENOMEM;
} else {
rc = mls_context_to_sid(&policydb, ':', &tmpstr, context,
NULL, SECSID_NULL);
kfree(freestr);
}
return rc;
}
/*
* Copies the MLS range `range' into `context'.
*/
int mls_range_set(struct context *context,
struct mls_range *range)
{
int l, rc = 0;
/* Copy the MLS range into the context */
for (l = 0; l < 2; l++) {
context->range.level[l].sens = range->level[l].sens;
rc = ebitmap_cpy(&context->range.level[l].cat,
&range->level[l].cat);
if (rc)
break;
}
return rc;
}
int mls_setup_user_range(struct context *fromcon, struct user_datum *user,
struct context *usercon)
{
if (policydb.mls_enabled) {
struct mls_level *fromcon_sen = &(fromcon->range.level[0]);
struct mls_level *fromcon_clr = &(fromcon->range.level[1]);
struct mls_level *user_low = &(user->range.level[0]);
struct mls_level *user_clr = &(user->range.level[1]);
struct mls_level *user_def = &(user->dfltlevel);
struct mls_level *usercon_sen = &(usercon->range.level[0]);
struct mls_level *usercon_clr = &(usercon->range.level[1]);
/* Honor the user's default level if we can */
if (mls_level_between(user_def, fromcon_sen, fromcon_clr))
*usercon_sen = *user_def;
else if (mls_level_between(fromcon_sen, user_def, user_clr))
*usercon_sen = *fromcon_sen;
else if (mls_level_between(fromcon_clr, user_low, user_def))
*usercon_sen = *user_low;
else
return -EINVAL;
/* Lower the clearance of available contexts
if the clearance of "fromcon" is lower than
that of the user's default clearance (but
only if the "fromcon" clearance dominates
the user's computed sensitivity level) */
if (mls_level_dom(user_clr, fromcon_clr))
*usercon_clr = *fromcon_clr;
else if (mls_level_dom(fromcon_clr, user_clr))
*usercon_clr = *user_clr;
else
return -EINVAL;
}
return 0;
}
/*
* Convert the MLS fields in the security context
* structure `c' from the values specified in the
* policy `oldp' to the values specified in the policy `newp'.
*/
int mls_convert_context(struct policydb *oldp,
struct policydb *newp,
struct context *c)
{
struct level_datum *levdatum;
struct cat_datum *catdatum;
struct ebitmap bitmap;
struct ebitmap_node *node;
int l, i;
if (!policydb.mls_enabled)
return 0;
for (l = 0; l < 2; l++) {
levdatum = hashtab_search(newp->p_levels.table,
sym_name(oldp, SYM_LEVELS,
c->range.level[l].sens - 1));
if (!levdatum)
return -EINVAL;
c->range.level[l].sens = levdatum->level->sens;
ebitmap_init(&bitmap);
ebitmap_for_each_positive_bit(&c->range.level[l].cat, node, i) {
int rc;
catdatum = hashtab_search(newp->p_cats.table,
sym_name(oldp, SYM_CATS, i));
if (!catdatum)
return -EINVAL;
rc = ebitmap_set_bit(&bitmap, catdatum->value - 1, 1);
if (rc)
return rc;
cond_resched();
}
ebitmap_destroy(&c->range.level[l].cat);
c->range.level[l].cat = bitmap;
}
return 0;
}
int mls_compute_sid(struct context *scontext,
struct context *tcontext,
u16 tclass,
u32 specified,
struct context *newcontext,
bool sock)
{
struct range_trans rtr;
struct mls_range *r;
struct class_datum *cladatum;
int default_range = 0;
if (!policydb.mls_enabled)
return 0;
switch (specified) {
case AVTAB_TRANSITION:
/* Look for a range transition rule. */
rtr.source_type = scontext->type;
rtr.target_type = tcontext->type;
rtr.target_class = tclass;
r = hashtab_search(policydb.range_tr, &rtr);
if (r)
return mls_range_set(newcontext, r);
if (tclass && tclass <= policydb.p_classes.nprim) {
cladatum = policydb.class_val_to_struct[tclass - 1];
if (cladatum)
default_range = cladatum->default_range;
}
switch (default_range) {
case DEFAULT_SOURCE_LOW:
return mls_context_cpy_low(newcontext, scontext);
case DEFAULT_SOURCE_HIGH:
return mls_context_cpy_high(newcontext, scontext);
case DEFAULT_SOURCE_LOW_HIGH:
return mls_context_cpy(newcontext, scontext);
case DEFAULT_TARGET_LOW:
return mls_context_cpy_low(newcontext, tcontext);
case DEFAULT_TARGET_HIGH:
return mls_context_cpy_high(newcontext, tcontext);
case DEFAULT_TARGET_LOW_HIGH:
return mls_context_cpy(newcontext, tcontext);
}
/* Fallthrough */
case AVTAB_CHANGE:
if ((tclass == policydb.process_class) || (sock == true))
/* Use the process MLS attributes. */
return mls_context_cpy(newcontext, scontext);
else
/* Use the process effective MLS attributes. */
return mls_context_cpy_low(newcontext, scontext);
case AVTAB_MEMBER:
/* Use the process effective MLS attributes. */
return mls_context_cpy_low(newcontext, scontext);
/* fall through */
}
return -EINVAL;
}
#ifdef CONFIG_NETLABEL
/**
* mls_export_netlbl_lvl - Export the MLS sensitivity levels to NetLabel
* @context: the security context
* @secattr: the NetLabel security attributes
*
* Description:
* Given the security context copy the low MLS sensitivity level into the
* NetLabel MLS sensitivity level field.
*
*/
void mls_export_netlbl_lvl(struct context *context,
struct netlbl_lsm_secattr *secattr)
{
if (!policydb.mls_enabled)
return;
secattr->attr.mls.lvl = context->range.level[0].sens - 1;
secattr->flags |= NETLBL_SECATTR_MLS_LVL;
}
/**
* mls_import_netlbl_lvl - Import the NetLabel MLS sensitivity levels
* @context: the security context
* @secattr: the NetLabel security attributes
*
* Description:
* Given the security context and the NetLabel security attributes, copy the
* NetLabel MLS sensitivity level into the context.
*
*/
void mls_import_netlbl_lvl(struct context *context,
struct netlbl_lsm_secattr *secattr)
{
if (!policydb.mls_enabled)
return;
context->range.level[0].sens = secattr->attr.mls.lvl + 1;
context->range.level[1].sens = context->range.level[0].sens;
}
/**
* mls_export_netlbl_cat - Export the MLS categories to NetLabel
* @context: the security context
* @secattr: the NetLabel security attributes
*
* Description:
* Given the security context copy the low MLS categories into the NetLabel
* MLS category field. Returns zero on success, negative values on failure.
*
*/
int mls_export_netlbl_cat(struct context *context,
struct netlbl_lsm_secattr *secattr)
{
int rc;
if (!policydb.mls_enabled)
return 0;
rc = ebitmap_netlbl_export(&context->range.level[0].cat,
&secattr->attr.mls.cat);
if (rc == 0 && secattr->attr.mls.cat != NULL)
secattr->flags |= NETLBL_SECATTR_MLS_CAT;
return rc;
}
/**
* mls_import_netlbl_cat - Import the MLS categories from NetLabel
* @context: the security context
* @secattr: the NetLabel security attributes
*
* Description:
* Copy the NetLabel security attributes into the SELinux context; since the
* NetLabel security attribute only contains a single MLS category use it for
* both the low and high categories of the context. Returns zero on success,
* negative values on failure.
*
*/
int mls_import_netlbl_cat(struct context *context,
struct netlbl_lsm_secattr *secattr)
{
int rc;
if (!policydb.mls_enabled)
return 0;
rc = ebitmap_netlbl_import(&context->range.level[0].cat,
secattr->attr.mls.cat);
if (rc != 0)
goto import_netlbl_cat_failure;
rc = ebitmap_cpy(&context->range.level[1].cat,
&context->range.level[0].cat);
if (rc != 0)
goto import_netlbl_cat_failure;
return 0;
import_netlbl_cat_failure:
ebitmap_destroy(&context->range.level[0].cat);
ebitmap_destroy(&context->range.level[1].cat);
return rc;
}
#endif /* CONFIG_NETLABEL */

91
security/selinux/ss/mls.h Normal file
View file

@ -0,0 +1,91 @@
/*
* Multi-level security (MLS) policy operations.
*
* Author : Stephen Smalley, <sds@epoch.ncsc.mil>
*/
/*
* Updated: Trusted Computer Solutions, Inc. <dgoeddel@trustedcs.com>
*
* Support for enhanced MLS infrastructure.
*
* Copyright (C) 2004-2006 Trusted Computer Solutions, Inc.
*/
/*
* Updated: Hewlett-Packard <paul@paul-moore.com>
*
* Added support to import/export the MLS label from NetLabel
*
* (c) Copyright Hewlett-Packard Development Company, L.P., 2006
*/
#ifndef _SS_MLS_H_
#define _SS_MLS_H_
#include "context.h"
#include "policydb.h"
int mls_compute_context_len(struct context *context);
void mls_sid_to_context(struct context *context, char **scontext);
int mls_context_isvalid(struct policydb *p, struct context *c);
int mls_range_isvalid(struct policydb *p, struct mls_range *r);
int mls_level_isvalid(struct policydb *p, struct mls_level *l);
int mls_context_to_sid(struct policydb *p,
char oldc,
char **scontext,
struct context *context,
struct sidtab *s,
u32 def_sid);
int mls_from_string(char *str, struct context *context, gfp_t gfp_mask);
int mls_range_set(struct context *context, struct mls_range *range);
int mls_convert_context(struct policydb *oldp,
struct policydb *newp,
struct context *context);
int mls_compute_sid(struct context *scontext,
struct context *tcontext,
u16 tclass,
u32 specified,
struct context *newcontext,
bool sock);
int mls_setup_user_range(struct context *fromcon, struct user_datum *user,
struct context *usercon);
#ifdef CONFIG_NETLABEL
void mls_export_netlbl_lvl(struct context *context,
struct netlbl_lsm_secattr *secattr);
void mls_import_netlbl_lvl(struct context *context,
struct netlbl_lsm_secattr *secattr);
int mls_export_netlbl_cat(struct context *context,
struct netlbl_lsm_secattr *secattr);
int mls_import_netlbl_cat(struct context *context,
struct netlbl_lsm_secattr *secattr);
#else
static inline void mls_export_netlbl_lvl(struct context *context,
struct netlbl_lsm_secattr *secattr)
{
return;
}
static inline void mls_import_netlbl_lvl(struct context *context,
struct netlbl_lsm_secattr *secattr)
{
return;
}
static inline int mls_export_netlbl_cat(struct context *context,
struct netlbl_lsm_secattr *secattr)
{
return -ENOMEM;
}
static inline int mls_import_netlbl_cat(struct context *context,
struct netlbl_lsm_secattr *secattr)
{
return -ENOMEM;
}
#endif
#endif /* _SS_MLS_H */

View file

@ -0,0 +1,51 @@
/*
* Type definitions for the multi-level security (MLS) policy.
*
* Author : Stephen Smalley, <sds@epoch.ncsc.mil>
*/
/*
* Updated: Trusted Computer Solutions, Inc. <dgoeddel@trustedcs.com>
*
* Support for enhanced MLS infrastructure.
*
* Copyright (C) 2004-2005 Trusted Computer Solutions, Inc.
*/
#ifndef _SS_MLS_TYPES_H_
#define _SS_MLS_TYPES_H_
#include "security.h"
#include "ebitmap.h"
struct mls_level {
u32 sens; /* sensitivity */
struct ebitmap cat; /* category set */
};
struct mls_range {
struct mls_level level[2]; /* low == level[0], high == level[1] */
};
static inline int mls_level_eq(struct mls_level *l1, struct mls_level *l2)
{
return ((l1->sens == l2->sens) &&
ebitmap_cmp(&l1->cat, &l2->cat));
}
static inline int mls_level_dom(struct mls_level *l1, struct mls_level *l2)
{
return ((l1->sens >= l2->sens) &&
ebitmap_contains(&l1->cat, &l2->cat, 0));
}
#define mls_level_incomp(l1, l2) \
(!mls_level_dom((l1), (l2)) && !mls_level_dom((l2), (l1)))
#define mls_level_between(l1, l2, l3) \
(mls_level_dom((l1), (l2)) && mls_level_dom((l3), (l1)))
#define mls_range_contains(r1, r2) \
(mls_level_dom(&(r2).level[0], &(r1).level[0]) && \
mls_level_dom(&(r1).level[1], &(r2).level[1]))
#endif /* _SS_MLS_TYPES_H_ */

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,370 @@
/*
* A policy database (policydb) specifies the
* configuration data for the security policy.
*
* Author : Stephen Smalley, <sds@epoch.ncsc.mil>
*/
/*
* Updated: Trusted Computer Solutions, Inc. <dgoeddel@trustedcs.com>
*
* Support for enhanced MLS infrastructure.
*
* Updated: Frank Mayer <mayerf@tresys.com> and Karl MacMillan <kmacmillan@tresys.com>
*
* Added conditional policy language extensions
*
* Copyright (C) 2004-2005 Trusted Computer Solutions, Inc.
* Copyright (C) 2003 - 2004 Tresys Technology, LLC
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, version 2.
*/
#ifndef _SS_POLICYDB_H_
#define _SS_POLICYDB_H_
#include <linux/flex_array.h>
#include "symtab.h"
#include "avtab.h"
#include "sidtab.h"
#include "ebitmap.h"
#include "mls_types.h"
#include "context.h"
#include "constraint.h"
/*
* A datum type is defined for each kind of symbol
* in the configuration data: individual permissions,
* common prefixes for access vectors, classes,
* users, roles, types, sensitivities, categories, etc.
*/
/* Permission attributes */
struct perm_datum {
u32 value; /* permission bit + 1 */
};
/* Attributes of a common prefix for access vectors */
struct common_datum {
u32 value; /* internal common value */
struct symtab permissions; /* common permissions */
};
/* Class attributes */
struct class_datum {
u32 value; /* class value */
char *comkey; /* common name */
struct common_datum *comdatum; /* common datum */
struct symtab permissions; /* class-specific permission symbol table */
struct constraint_node *constraints; /* constraints on class permissions */
struct constraint_node *validatetrans; /* special transition rules */
/* Options how a new object user, role, and type should be decided */
#define DEFAULT_SOURCE 1
#define DEFAULT_TARGET 2
char default_user;
char default_role;
char default_type;
/* Options how a new object range should be decided */
#define DEFAULT_SOURCE_LOW 1
#define DEFAULT_SOURCE_HIGH 2
#define DEFAULT_SOURCE_LOW_HIGH 3
#define DEFAULT_TARGET_LOW 4
#define DEFAULT_TARGET_HIGH 5
#define DEFAULT_TARGET_LOW_HIGH 6
char default_range;
};
/* Role attributes */
struct role_datum {
u32 value; /* internal role value */
u32 bounds; /* boundary of role */
struct ebitmap dominates; /* set of roles dominated by this role */
struct ebitmap types; /* set of authorized types for role */
};
struct role_trans {
u32 role; /* current role */
u32 type; /* program executable type, or new object type */
u32 tclass; /* process class, or new object class */
u32 new_role; /* new role */
struct role_trans *next;
};
struct filename_trans {
u32 stype; /* current process */
u32 ttype; /* parent dir context */
u16 tclass; /* class of new object */
const char *name; /* last path component */
};
struct filename_trans_datum {
u32 otype; /* expected of new object */
};
struct role_allow {
u32 role; /* current role */
u32 new_role; /* new role */
struct role_allow *next;
};
/* Type attributes */
struct type_datum {
u32 value; /* internal type value */
u32 bounds; /* boundary of type */
unsigned char primary; /* primary name? */
unsigned char attribute;/* attribute ?*/
};
/* User attributes */
struct user_datum {
u32 value; /* internal user value */
u32 bounds; /* bounds of user */
struct ebitmap roles; /* set of authorized roles for user */
struct mls_range range; /* MLS range (min - max) for user */
struct mls_level dfltlevel; /* default login MLS level for user */
};
/* Sensitivity attributes */
struct level_datum {
struct mls_level *level; /* sensitivity and associated categories */
unsigned char isalias; /* is this sensitivity an alias for another? */
};
/* Category attributes */
struct cat_datum {
u32 value; /* internal category bit + 1 */
unsigned char isalias; /* is this category an alias for another? */
};
struct range_trans {
u32 source_type;
u32 target_type;
u32 target_class;
};
/* Boolean data type */
struct cond_bool_datum {
__u32 value; /* internal type value */
int state;
};
struct cond_node;
/*
* type set preserves data needed to determine constraint info from
* policy source. This is not used by the kernel policy but allows
* utilities such as audit2allow to determine constraint denials.
*/
struct type_set {
struct ebitmap types;
struct ebitmap negset;
u32 flags;
};
/*
* The configuration data includes security contexts for
* initial SIDs, unlabeled file systems, TCP and UDP port numbers,
* network interfaces, and nodes. This structure stores the
* relevant data for one such entry. Entries of the same kind
* (e.g. all initial SIDs) are linked together into a list.
*/
struct ocontext {
union {
char *name; /* name of initial SID, fs, netif, fstype, path */
struct {
u8 protocol;
u16 low_port;
u16 high_port;
} port; /* TCP or UDP port information */
struct {
u32 addr;
u32 mask;
} node; /* node information */
struct {
u32 addr[4];
u32 mask[4];
} node6; /* IPv6 node information */
} u;
union {
u32 sclass; /* security class for genfs */
u32 behavior; /* labeling behavior for fs_use */
} v;
struct context context[2]; /* security context(s) */
u32 sid[2]; /* SID(s) */
struct ocontext *next;
};
struct genfs {
char *fstype;
struct ocontext *head;
struct genfs *next;
};
/* symbol table array indices */
#define SYM_COMMONS 0
#define SYM_CLASSES 1
#define SYM_ROLES 2
#define SYM_TYPES 3
#define SYM_USERS 4
#define SYM_BOOLS 5
#define SYM_LEVELS 6
#define SYM_CATS 7
#define SYM_NUM 8
/* object context array indices */
#define OCON_ISID 0 /* initial SIDs */
#define OCON_FS 1 /* unlabeled file systems */
#define OCON_PORT 2 /* TCP and UDP port numbers */
#define OCON_NETIF 3 /* network interfaces */
#define OCON_NODE 4 /* nodes */
#define OCON_FSUSE 5 /* fs_use */
#define OCON_NODE6 6 /* IPv6 nodes */
#define OCON_NUM 7
/* The policy database */
struct policydb {
int mls_enabled;
/* symbol tables */
struct symtab symtab[SYM_NUM];
#define p_commons symtab[SYM_COMMONS]
#define p_classes symtab[SYM_CLASSES]
#define p_roles symtab[SYM_ROLES]
#define p_types symtab[SYM_TYPES]
#define p_users symtab[SYM_USERS]
#define p_bools symtab[SYM_BOOLS]
#define p_levels symtab[SYM_LEVELS]
#define p_cats symtab[SYM_CATS]
/* symbol names indexed by (value - 1) */
struct flex_array *sym_val_to_name[SYM_NUM];
/* class, role, and user attributes indexed by (value - 1) */
struct class_datum **class_val_to_struct;
struct role_datum **role_val_to_struct;
struct user_datum **user_val_to_struct;
struct flex_array *type_val_to_struct_array;
/* type enforcement access vectors and transitions */
struct avtab te_avtab;
/* role transitions */
struct role_trans *role_tr;
/* file transitions with the last path component */
/* quickly exclude lookups when parent ttype has no rules */
struct ebitmap filename_trans_ttypes;
/* actual set of filename_trans rules */
struct hashtab *filename_trans;
/* bools indexed by (value - 1) */
struct cond_bool_datum **bool_val_to_struct;
/* type enforcement conditional access vectors and transitions */
struct avtab te_cond_avtab;
/* linked list indexing te_cond_avtab by conditional */
struct cond_node *cond_list;
/* role allows */
struct role_allow *role_allow;
/* security contexts of initial SIDs, unlabeled file systems,
TCP or UDP port numbers, network interfaces and nodes */
struct ocontext *ocontexts[OCON_NUM];
/* security contexts for files in filesystems that cannot support
a persistent label mapping or use another
fixed labeling behavior. */
struct genfs *genfs;
/* range transitions table (range_trans_key -> mls_range) */
struct hashtab *range_tr;
/* type -> attribute reverse mapping */
struct flex_array *type_attr_map_array;
struct ebitmap policycaps;
struct ebitmap permissive_map;
/* length of this policy when it was loaded */
size_t len;
unsigned int policyvers;
unsigned int reject_unknown : 1;
unsigned int allow_unknown : 1;
u16 process_class;
u32 process_trans_perms;
};
extern void policydb_destroy(struct policydb *p);
extern int policydb_load_isids(struct policydb *p, struct sidtab *s);
extern int policydb_context_isvalid(struct policydb *p, struct context *c);
extern int policydb_class_isvalid(struct policydb *p, unsigned int class);
extern int policydb_type_isvalid(struct policydb *p, unsigned int type);
extern int policydb_role_isvalid(struct policydb *p, unsigned int role);
extern int policydb_read(struct policydb *p, void *fp);
extern int policydb_write(struct policydb *p, void *fp);
#define PERM_SYMTAB_SIZE 32
#define POLICYDB_CONFIG_MLS 1
/* the config flags related to unknown classes/perms are bits 2 and 3 */
#define REJECT_UNKNOWN 0x00000002
#define ALLOW_UNKNOWN 0x00000004
#define OBJECT_R "object_r"
#define OBJECT_R_VAL 1
#define POLICYDB_MAGIC SELINUX_MAGIC
#define POLICYDB_STRING "SE Linux"
struct policy_file {
char *data;
size_t len;
};
struct policy_data {
struct policydb *p;
void *fp;
};
static inline int next_entry(void *buf, struct policy_file *fp, size_t bytes)
{
if (bytes > fp->len)
return -EINVAL;
memcpy(buf, fp->data, bytes);
fp->data += bytes;
fp->len -= bytes;
return 0;
}
static inline int put_entry(const void *buf, size_t bytes, int num, struct policy_file *fp)
{
size_t len = bytes * num;
memcpy(fp->data, buf, len);
fp->data += len;
fp->len -= len;
return 0;
}
static inline char *sym_name(struct policydb *p, unsigned int sym_num, unsigned int element_nr)
{
struct flex_array *fa = p->sym_val_to_name[sym_num];
return flex_array_get_ptr(fa, element_nr);
}
extern u16 string_to_security_class(struct policydb *p, const char *name);
extern u32 string_to_av_perm(struct policydb *p, u16 tclass, const char *name);
#endif /* _SS_POLICYDB_H_ */

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,21 @@
/*
* Implementation of the security services.
*
* Author : Stephen Smalley, <sds@epoch.ncsc.mil>
*/
#ifndef _SS_SERVICES_H_
#define _SS_SERVICES_H_
#include "policydb.h"
#include "sidtab.h"
extern struct policydb policydb;
void services_compute_operation_type(struct operation *ops,
struct avtab_node *node);
void services_compute_operation_num(struct operation_decision *od,
struct avtab_node *node);
#endif /* _SS_SERVICES_H_ */

View file

@ -0,0 +1,313 @@
/*
* Implementation of the SID table type.
*
* Author : Stephen Smalley, <sds@epoch.ncsc.mil>
*/
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/errno.h>
#include "flask.h"
#include "security.h"
#include "sidtab.h"
#define SIDTAB_HASH(sid) \
(sid & SIDTAB_HASH_MASK)
int sidtab_init(struct sidtab *s)
{
int i;
s->htable = kmalloc(sizeof(*(s->htable)) * SIDTAB_SIZE, GFP_ATOMIC);
if (!s->htable)
return -ENOMEM;
for (i = 0; i < SIDTAB_SIZE; i++)
s->htable[i] = NULL;
s->nel = 0;
s->next_sid = 1;
s->shutdown = 0;
spin_lock_init(&s->lock);
return 0;
}
int sidtab_insert(struct sidtab *s, u32 sid, struct context *context)
{
int hvalue, rc = 0;
struct sidtab_node *prev, *cur, *newnode;
if (!s) {
rc = -ENOMEM;
goto out;
}
hvalue = SIDTAB_HASH(sid);
prev = NULL;
cur = s->htable[hvalue];
while (cur && sid > cur->sid) {
prev = cur;
cur = cur->next;
}
if (cur && sid == cur->sid) {
rc = -EEXIST;
goto out;
}
newnode = kmalloc(sizeof(*newnode), GFP_ATOMIC);
if (newnode == NULL) {
rc = -ENOMEM;
goto out;
}
newnode->sid = sid;
if (context_cpy(&newnode->context, context)) {
kfree(newnode);
rc = -ENOMEM;
goto out;
}
if (prev) {
newnode->next = prev->next;
wmb();
prev->next = newnode;
} else {
newnode->next = s->htable[hvalue];
wmb();
s->htable[hvalue] = newnode;
}
s->nel++;
if (sid >= s->next_sid)
s->next_sid = sid + 1;
out:
return rc;
}
static struct context *sidtab_search_core(struct sidtab *s, u32 sid, int force)
{
int hvalue;
struct sidtab_node *cur;
if (!s)
return NULL;
hvalue = SIDTAB_HASH(sid);
cur = s->htable[hvalue];
while (cur && sid > cur->sid)
cur = cur->next;
if (force && cur && sid == cur->sid && cur->context.len)
return &cur->context;
if (cur == NULL || sid != cur->sid || cur->context.len) {
/* Remap invalid SIDs to the unlabeled SID. */
sid = SECINITSID_UNLABELED;
hvalue = SIDTAB_HASH(sid);
cur = s->htable[hvalue];
while (cur && sid > cur->sid)
cur = cur->next;
if (!cur || sid != cur->sid)
return NULL;
}
return &cur->context;
}
struct context *sidtab_search(struct sidtab *s, u32 sid)
{
return sidtab_search_core(s, sid, 0);
}
struct context *sidtab_search_force(struct sidtab *s, u32 sid)
{
return sidtab_search_core(s, sid, 1);
}
int sidtab_map(struct sidtab *s,
int (*apply) (u32 sid,
struct context *context,
void *args),
void *args)
{
int i, rc = 0;
struct sidtab_node *cur;
if (!s)
goto out;
for (i = 0; i < SIDTAB_SIZE; i++) {
cur = s->htable[i];
while (cur) {
rc = apply(cur->sid, &cur->context, args);
if (rc)
goto out;
cur = cur->next;
}
}
out:
return rc;
}
static void sidtab_update_cache(struct sidtab *s, struct sidtab_node *n, int loc)
{
BUG_ON(loc >= SIDTAB_CACHE_LEN);
while (loc > 0) {
s->cache[loc] = s->cache[loc - 1];
loc--;
}
s->cache[0] = n;
}
static inline u32 sidtab_search_context(struct sidtab *s,
struct context *context)
{
int i;
struct sidtab_node *cur;
for (i = 0; i < SIDTAB_SIZE; i++) {
cur = s->htable[i];
while (cur) {
if (context_cmp(&cur->context, context)) {
sidtab_update_cache(s, cur, SIDTAB_CACHE_LEN - 1);
return cur->sid;
}
cur = cur->next;
}
}
return 0;
}
static inline u32 sidtab_search_cache(struct sidtab *s, struct context *context)
{
int i;
struct sidtab_node *node;
for (i = 0; i < SIDTAB_CACHE_LEN; i++) {
node = s->cache[i];
if (unlikely(!node))
return 0;
if (context_cmp(&node->context, context)) {
sidtab_update_cache(s, node, i);
return node->sid;
}
}
return 0;
}
int sidtab_context_to_sid(struct sidtab *s,
struct context *context,
u32 *out_sid)
{
u32 sid;
int ret = 0;
unsigned long flags;
*out_sid = SECSID_NULL;
sid = sidtab_search_cache(s, context);
if (!sid)
sid = sidtab_search_context(s, context);
if (!sid) {
spin_lock_irqsave(&s->lock, flags);
/* Rescan now that we hold the lock. */
sid = sidtab_search_context(s, context);
if (sid)
goto unlock_out;
/* No SID exists for the context. Allocate a new one. */
if (s->next_sid == UINT_MAX || s->shutdown) {
ret = -ENOMEM;
goto unlock_out;
}
sid = s->next_sid++;
if (context->len)
printk(KERN_INFO
"SELinux: Context %s is not valid (left unmapped).\n",
context->str);
ret = sidtab_insert(s, sid, context);
if (ret)
s->next_sid--;
unlock_out:
spin_unlock_irqrestore(&s->lock, flags);
}
if (ret)
return ret;
*out_sid = sid;
return 0;
}
void sidtab_hash_eval(struct sidtab *h, char *tag)
{
int i, chain_len, slots_used, max_chain_len;
struct sidtab_node *cur;
slots_used = 0;
max_chain_len = 0;
for (i = 0; i < SIDTAB_SIZE; i++) {
cur = h->htable[i];
if (cur) {
slots_used++;
chain_len = 0;
while (cur) {
chain_len++;
cur = cur->next;
}
if (chain_len > max_chain_len)
max_chain_len = chain_len;
}
}
printk(KERN_DEBUG "%s: %d entries and %d/%d buckets used, longest "
"chain length %d\n", tag, h->nel, slots_used, SIDTAB_SIZE,
max_chain_len);
}
void sidtab_destroy(struct sidtab *s)
{
int i;
struct sidtab_node *cur, *temp;
if (!s)
return;
for (i = 0; i < SIDTAB_SIZE; i++) {
cur = s->htable[i];
while (cur) {
temp = cur;
cur = cur->next;
context_destroy(&temp->context);
kfree(temp);
}
s->htable[i] = NULL;
}
kfree(s->htable);
s->htable = NULL;
s->nel = 0;
s->next_sid = 1;
}
void sidtab_set(struct sidtab *dst, struct sidtab *src)
{
unsigned long flags;
int i;
spin_lock_irqsave(&src->lock, flags);
dst->htable = src->htable;
dst->nel = src->nel;
dst->next_sid = src->next_sid;
dst->shutdown = 0;
for (i = 0; i < SIDTAB_CACHE_LEN; i++)
dst->cache[i] = NULL;
spin_unlock_irqrestore(&src->lock, flags);
}
void sidtab_shutdown(struct sidtab *s)
{
unsigned long flags;
spin_lock_irqsave(&s->lock, flags);
s->shutdown = 1;
spin_unlock_irqrestore(&s->lock, flags);
}

View file

@ -0,0 +1,56 @@
/*
* A security identifier table (sidtab) is a hash table
* of security context structures indexed by SID value.
*
* Author : Stephen Smalley, <sds@epoch.ncsc.mil>
*/
#ifndef _SS_SIDTAB_H_
#define _SS_SIDTAB_H_
#include "context.h"
struct sidtab_node {
u32 sid; /* security identifier */
struct context context; /* security context structure */
struct sidtab_node *next;
};
#define SIDTAB_HASH_BITS 7
#define SIDTAB_HASH_BUCKETS (1 << SIDTAB_HASH_BITS)
#define SIDTAB_HASH_MASK (SIDTAB_HASH_BUCKETS-1)
#define SIDTAB_SIZE SIDTAB_HASH_BUCKETS
struct sidtab {
struct sidtab_node **htable;
unsigned int nel; /* number of elements */
unsigned int next_sid; /* next SID to allocate */
unsigned char shutdown;
#define SIDTAB_CACHE_LEN 3
struct sidtab_node *cache[SIDTAB_CACHE_LEN];
spinlock_t lock;
};
int sidtab_init(struct sidtab *s);
int sidtab_insert(struct sidtab *s, u32 sid, struct context *context);
struct context *sidtab_search(struct sidtab *s, u32 sid);
struct context *sidtab_search_force(struct sidtab *s, u32 sid);
int sidtab_map(struct sidtab *s,
int (*apply) (u32 sid,
struct context *context,
void *args),
void *args);
int sidtab_context_to_sid(struct sidtab *s,
struct context *context,
u32 *sid);
void sidtab_hash_eval(struct sidtab *h, char *tag);
void sidtab_destroy(struct sidtab *s);
void sidtab_set(struct sidtab *dst, struct sidtab *src);
void sidtab_shutdown(struct sidtab *s);
#endif /* _SS_SIDTAB_H_ */

View file

@ -0,0 +1,126 @@
/*
* mmap based event notifications for SELinux
*
* Author: KaiGai Kohei <kaigai@ak.jp.nec.com>
*
* Copyright (C) 2010 NEC corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2,
* as published by the Free Software Foundation.
*/
#include <linux/kernel.h>
#include <linux/gfp.h>
#include <linux/mm.h>
#include <linux/mutex.h>
#include "avc.h"
#include "services.h"
/*
* The selinux_status_page shall be exposed to userspace applications
* using mmap interface on /selinux/status.
* It enables to notify applications a few events that will cause reset
* of userspace access vector without context switching.
*
* The selinux_kernel_status structure on the head of status page is
* protected from concurrent accesses using seqlock logic, so userspace
* application should reference the status page according to the seqlock
* logic.
*
* Typically, application checks status->sequence at the head of access
* control routine. If it is odd-number, kernel is updating the status,
* so please wait for a moment. If it is changed from the last sequence
* number, it means something happen, so application will reset userspace
* avc, if needed.
* In most cases, application shall confirm the kernel status is not
* changed without any system call invocations.
*/
static struct page *selinux_status_page;
static DEFINE_MUTEX(selinux_status_lock);
/*
* selinux_kernel_status_page
*
* It returns a reference to selinux_status_page. If the status page is
* not allocated yet, it also tries to allocate it at the first time.
*/
struct page *selinux_kernel_status_page(void)
{
struct selinux_kernel_status *status;
struct page *result = NULL;
mutex_lock(&selinux_status_lock);
if (!selinux_status_page) {
selinux_status_page = alloc_page(GFP_KERNEL|__GFP_ZERO);
if (selinux_status_page) {
status = page_address(selinux_status_page);
status->version = SELINUX_KERNEL_STATUS_VERSION;
status->sequence = 0;
status->enforcing = selinux_enforcing;
/*
* NOTE: the next policyload event shall set
* a positive value on the status->policyload,
* although it may not be 1, but never zero.
* So, application can know it was updated.
*/
status->policyload = 0;
status->deny_unknown = !security_get_allow_unknown();
}
}
result = selinux_status_page;
mutex_unlock(&selinux_status_lock);
return result;
}
/*
* selinux_status_update_setenforce
*
* It updates status of the current enforcing/permissive mode.
*/
void selinux_status_update_setenforce(int enforcing)
{
struct selinux_kernel_status *status;
mutex_lock(&selinux_status_lock);
if (selinux_status_page) {
status = page_address(selinux_status_page);
status->sequence++;
smp_wmb();
status->enforcing = enforcing;
smp_wmb();
status->sequence++;
}
mutex_unlock(&selinux_status_lock);
}
/*
* selinux_status_update_policyload
*
* It updates status of the times of policy reloaded, and current
* setting of deny_unknown.
*/
void selinux_status_update_policyload(int seqno)
{
struct selinux_kernel_status *status;
mutex_lock(&selinux_status_lock);
if (selinux_status_page) {
status = page_address(selinux_status_page);
status->sequence++;
smp_wmb();
status->policyload = seqno;
status->deny_unknown = !security_get_allow_unknown();
smp_wmb();
status->sequence++;
}
mutex_unlock(&selinux_status_lock);
}

View file

@ -0,0 +1,43 @@
/*
* Implementation of the symbol table type.
*
* Author : Stephen Smalley, <sds@epoch.ncsc.mil>
*/
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/errno.h>
#include "symtab.h"
static unsigned int symhash(struct hashtab *h, const void *key)
{
const char *p, *keyp;
unsigned int size;
unsigned int val;
val = 0;
keyp = key;
size = strlen(keyp);
for (p = keyp; (p - keyp) < size; p++)
val = (val << 4 | (val >> (8*sizeof(unsigned int)-4))) ^ (*p);
return val & (h->size - 1);
}
static int symcmp(struct hashtab *h, const void *key1, const void *key2)
{
const char *keyp1, *keyp2;
keyp1 = key1;
keyp2 = key2;
return strcmp(keyp1, keyp2);
}
int symtab_init(struct symtab *s, unsigned int size)
{
s->table = hashtab_create(symhash, symcmp, size);
if (!s->table)
return -ENOMEM;
s->nprim = 0;
return 0;
}

View file

@ -0,0 +1,23 @@
/*
* A symbol table (symtab) maintains associations between symbol
* strings and datum values. The type of the datum values
* is arbitrary. The symbol table type is implemented
* using the hash table type (hashtab).
*
* Author : Stephen Smalley, <sds@epoch.ncsc.mil>
*/
#ifndef _SS_SYMTAB_H_
#define _SS_SYMTAB_H_
#include "hashtab.h"
struct symtab {
struct hashtab *table; /* hash table (keyed on a string) */
u32 nprim; /* number of primary names in table */
};
int symtab_init(struct symtab *s, unsigned int size);
#endif /* _SS_SYMTAB_H_ */