diff -r 65bfdf932c7e -r 12923ba929c8 Config.mk --- a/Config.mk Sat Dec 16 11:53:48 2006 -0500 +++ b/Config.mk Sat Dec 16 11:54:25 2006 -0500 @@ -54,6 +54,14 @@ CFLAGS += $(foreach i, $(EXTRA_INCLUDES) #Enable XSM security module. Enabling XSM requires selection of an #XSM security module. XSM_ENABLE ?= y +ifeq ($(XSM_ENABLE),y) +FLASK_ENABLE ?= y +ifeq ($(FLASK_ENABLE),y) +FLASK_DEVELOP ?= y +FLASK_BOOTPARAM ?= y +FLASK_AVC_STATS ?= y +endif +endif # If ACM_SECURITY = y, then the access control module is compiled # into Xen and the policy type can be set by the boot policy file diff -r 65bfdf932c7e -r 12923ba929c8 xen/Makefile --- a/xen/Makefile Sat Dec 16 11:53:48 2006 -0500 +++ b/xen/Makefile Sat Dec 16 11:54:25 2006 -0500 @@ -46,6 +46,7 @@ _clean: delete-unfresh-files $(MAKE) -f $(BASEDIR)/Rules.mk -C drivers clean $(MAKE) -f $(BASEDIR)/Rules.mk -C acm clean $(MAKE) -f $(BASEDIR)/Rules.mk -C xsm clean + $(MAKE) -f $(BASEDIR)/Rules.mk -C flask clean $(MAKE) -f $(BASEDIR)/Rules.mk -C arch/$(TARGET_ARCH) clean rm -f include/asm *.o $(TARGET)* *~ core rm -f include/asm-*/asm-offsets.h @@ -122,7 +123,7 @@ include/asm-$(TARGET_ARCH)/asm-offsets.h echo ""; \ echo "#endif") <$< >$@ -SUBDIRS = xsm acm arch/$(TARGET_ARCH) common drivers +SUBDIRS = xsm flask acm arch/$(TARGET_ARCH) common drivers define all_sources ( find include/asm-$(TARGET_ARCH) -name '*.h' -print; \ find include -name 'asm-*' -prune -o -name '*.h' -print; \ diff -r 65bfdf932c7e -r 12923ba929c8 xen/Rules.mk --- a/xen/Rules.mk Sat Dec 16 11:53:48 2006 -0500 +++ b/xen/Rules.mk Sat Dec 16 11:54:25 2006 -0500 @@ -47,16 +47,21 @@ ALL_OBJS-y += $(BASEDIR)/c ALL_OBJS-y += $(BASEDIR)/common/built_in.o ALL_OBJS-y += $(BASEDIR)/drivers/built_in.o ALL_OBJS-y += $(BASEDIR)/xsm/built_in.o +ALL_OBJS-$(FLASK_ENABLE) += $(BASEDIR)/flask/built_in.o ALL_OBJS-$(ACM_SECURITY) += $(BASEDIR)/acm/built_in.o ALL_OBJS-y += $(BASEDIR)/arch/$(TARGET_ARCH)/built_in.o -CFLAGS-y += -g -D__XEN__ -CFLAGS-$(XSM_ENABLE) += -DXSM_ENABLE -CFLAGS-$(ACM_SECURITY) += -DACM_SECURITY -CFLAGS-$(verbose) += -DVERBOSE -CFLAGS-$(crash_debug) += -DCRASH_DEBUG -CFLAGS-$(perfc) += -DPERF_COUNTERS -CFLAGS-$(perfc_arrays) += -DPERF_ARRAYS +CFLAGS-y += -g -D__XEN__ +CFLAGS-$(XSM_ENABLE) += -DXSM_ENABLE +CFLAGS-$(FLASK_ENABLE) += -DFLASK_ENABLE -DXSM_MAGIC=0xf97cff8c +CFLAGS-$(FLASK_DEVELOP) += -DFLASK_DEVELOP +CFLAGS-$(FLASK_BOOTPARAM) += -DFLASK_BOOTPARAM +CFLAGS-$(FLASK_AVC_STATS) += -DFLASK_AVC_STATS +CFLAGS-$(ACM_SECURITY) += -DACM_SECURITY +CFLAGS-$(verbose) += -DVERBOSE +CFLAGS-$(crash_debug) += -DCRASH_DEBUG +CFLAGS-$(perfc) += -DPERF_COUNTERS +CFLAGS-$(perfc_arrays) += -DPERF_ARRAYS ifneq ($(max_phys_cpus),) CFLAGS-y += -DMAX_PHYS_CPUS=$(max_phys_cpus) diff -r 65bfdf932c7e -r 12923ba929c8 xen/flask/Makefile --- /dev/null Thu Jan 1 00:00:00 1970 +0000 +++ b/xen/flask/Makefile Sat Dec 16 11:54:25 2006 -0500 @@ -0,0 +1,7 @@ +obj-y += avc.o +obj-y += hooks.o +obj-y += strutil.o + +CFLAGS += -I./include + +subdir-y += ss diff -r 65bfdf932c7e -r 12923ba929c8 xen/flask/avc.c --- /dev/null Thu Jan 1 00:00:00 1970 +0000 +++ b/xen/flask/avc.c Sat Dec 16 11:54:25 2006 -0500 @@ -0,0 +1,799 @@ +/* + * Implementation of the kernel access vector cache (AVC). + * + * Authors: Stephen Smalley, + * James Morris + * + * Update: KaiGai, Kohei + * Replaced the avc_lock spinlock by RCU. + * + * Copyright (C) 2003 Red Hat, Inc., James Morris + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2, + * as published by the Free Software Foundation. + */ + +/* Ported to Xen 3.0, George Coker, */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "avc.h" +#include "avc_ss.h" + +#include "kutil.h" + +static const struct av_perm_to_string +{ + u16 tclass; + u32 value; + const char *name; +} av_perm_to_string[] = { +#define S_(c, v, s) { c, v, s }, +#include "av_perm_to_string.h" +#undef S_ +}; + +static const char *class_to_string[] = { +#define S_(s) s, +#include "class_to_string.h" +#undef S_ +}; + +#define TB_(s) static const char * s [] = { +#define TE_(s) }; +#define S_(s) s, +#include "common_perm_to_string.h" +#undef TB_ +#undef TE_ +#undef S_ + +static const struct av_inherit +{ + u16 tclass; + const char **common_pts; + u32 common_base; +} av_inherit[] = { +#define S_(c, i, b) { c, common_##i##_perm_to_string, b }, +#include "av_inherit.h" +#undef S_ +}; + +#define AVC_CACHE_SLOTS 512 +#define AVC_DEF_CACHE_THRESHOLD 512 +#define AVC_CACHE_RECLAIM 16 + +#ifdef FLASK_AVC_STATS +#define avc_cache_stats_incr(field) \ +do { \ + __get_cpu_var(avc_cache_stats).field++; \ +} while (0) +#else +#define avc_cache_stats_incr(field) do {} while (0) +#endif + +struct avc_entry { + u32 ssid; + u32 tsid; + u16 tclass; + struct av_decision avd; + atomic_t used; /* used recently */ +}; + +struct avc_node { + struct avc_entry ae; + struct list_head list; + struct rcu_head rhead; +}; + +struct avc_cache { + struct list_head slots[AVC_CACHE_SLOTS]; + spinlock_t slots_lock[AVC_CACHE_SLOTS]; /* lock for writes */ + atomic_t lru_hint; /* LRU hint for reclaim scan */ + atomic_t active_nodes; + u32 latest_notif; /* latest revocation notification */ +}; + +struct avc_callback_node { + int (*callback) (u32 event, u32 ssid, u32 tsid, + u16 tclass, u32 perms, + u32 *out_retained); + u32 events; + u32 ssid; + u32 tsid; + u16 tclass; + u32 perms; + struct avc_callback_node *next; +}; + +/* Exported via selinufs */ +unsigned int avc_cache_threshold = AVC_DEF_CACHE_THRESHOLD; + +#ifdef FLASK_AVC_STATS +DEFINE_PER_CPU(struct avc_cache_stats, avc_cache_stats) = { 0 }; +#endif + +static DEFINE_RWLOCK(avc_rwlock); +#define AVC_RDLOCK read_lock(&avc_rwlock) +#define AVC_RDUNLOCK read_unlock(&avc_rwlock) +#define AVC_WRLOCK write_lock(&avc_rwlock) +#define AVC_WRUNLOCK write_unlock(&avc_rwlock) + +static DEFINE_SPINLOCK(avc_lock); +#define AVC_LOCK spin_lock(&avc_lock); +#define AVC_UNLOCK spin_unlock(&avc_lock); + +static struct avc_cache avc_cache; +static struct avc_callback_node *avc_callbacks; + +static inline int avc_hash(u32 ssid, u32 tsid, u16 tclass) +{ + return (ssid ^ (tsid<<2) ^ (tclass<<4)) & (AVC_CACHE_SLOTS - 1); +} + +/** + * avc_dump_av - Display an access vector in human-readable form. + * @tclass: target security class + * @av: access vector + */ +static void avc_dump_av(u16 tclass, u32 av) +{ + const char **common_pts = NULL; + u32 common_base = 0; + int i, i2, perm; + + if (av == 0) { + printk(" null"); + return; + } + + for (i = 0; i < ARRAY_SIZE(av_inherit); i++) { + if (av_inherit[i].tclass == tclass) { + common_pts = av_inherit[i].common_pts; + common_base = av_inherit[i].common_base; + break; + } + } + + printk(" {"); + i = 0; + perm = 1; + while (perm < common_base) { + if (perm & av) { + printk(" %s", common_pts[i]); + av &= ~perm; + } + i++; + perm <<= 1; + } + + while (i < sizeof(av) * 8) { + if (perm & av) { + for (i2 = 0; i2 < ARRAY_SIZE(av_perm_to_string); i2++) { + if ((av_perm_to_string[i2].tclass == tclass) && + (av_perm_to_string[i2].value == perm)) + break; + } + if (i2 < ARRAY_SIZE(av_perm_to_string)) { + printk(" %s", av_perm_to_string[i2].name); + av &= ~perm; + } + } + i++; + perm <<= 1; + } + + if (av) + printk(" 0x%x", av); + + printk(" }"); +} + +/** + * avc_dump_query - Display a SID pair and a class in human-readable form. + * @ssid: source security identifier + * @tsid: target security identifier + * @tclass: target security class + */ +static void avc_dump_query(u32 ssid, u32 tsid, u16 tclass) +{ + int rc; + char *scontext; + u32 scontext_len; + + rc = security_sid_to_context(ssid, &scontext, &scontext_len); + if (rc) + printk("ssid=%d", ssid); + else { + printk("scontext=%s", scontext); + xfree(scontext); + } + + rc = security_sid_to_context(tsid, &scontext, &scontext_len); + if (rc) + printk(" tsid=%d", tsid); + else { + printk(" tcontext=%s", scontext); + xfree(scontext); + } + printk("\n"); + printk("tclass=%s", class_to_string[tclass]); +} + +/** + * avc_init - Initialize the AVC. + * + * Initialize the access vector cache. + */ +void __init avc_init(void) +{ + int i; + + for (i = 0; i < AVC_CACHE_SLOTS; i++) { + INIT_LIST_HEAD(&avc_cache.slots[i]); + spin_lock_init(&avc_cache.slots_lock[i]); + } + atomic_set(&avc_cache.active_nodes, 0); + atomic_set(&avc_cache.lru_hint, 0); + + printk("AVC INITIALIZED\n"); +} + +int avc_get_hash_stats(char *page) +{ + int i, chain_len, max_chain_len, slots_used; + struct avc_node *node; + + AVC_LOCK; + AVC_RDLOCK; + + slots_used = 0; + max_chain_len = 0; + for (i = 0; i < AVC_CACHE_SLOTS; i++) { + if (!list_empty(&avc_cache.slots[i])) { + slots_used++; + chain_len = 0; + list_for_each_entry_rcu(node, &avc_cache.slots[i], list) + chain_len++; + if (chain_len > max_chain_len) + max_chain_len = chain_len; + } + } + + AVC_RDUNLOCK; + AVC_UNLOCK; + + return snprintf(page, PAGE_SIZE, "entries: %d\nbuckets used: %d/%d\n" + "longest chain: %d\n", + atomic_read(&avc_cache.active_nodes), + slots_used, AVC_CACHE_SLOTS, max_chain_len); +} + +static void avc_node_free(struct rcu_head *rhead) +{ + struct avc_node *node = container_of(rhead, struct avc_node, rhead); + xfree(node); + avc_cache_stats_incr(frees); +} + +static void avc_node_delete(struct avc_node *node) +{ + list_del_rcu(&node->list); + avc_node_free(&node->rhead); + atomic_dec(&avc_cache.active_nodes); +} + +static void avc_node_kill(struct avc_node *node) +{ + xfree(node); + avc_cache_stats_incr(frees); + atomic_dec(&avc_cache.active_nodes); +} + +static void avc_node_replace(struct avc_node *new, struct avc_node *old) +{ + list_replace_rcu(&old->list, &new->list); + avc_node_free(&old->rhead); + atomic_dec(&avc_cache.active_nodes); +} + +static inline int avc_reclaim_node(void) +{ + struct avc_node *node; + int hvalue, try, ecx; + + AVC_LOCK; + AVC_WRLOCK; + + for (try = 0, ecx = 0; try < AVC_CACHE_SLOTS; try++ ) { + atomic_inc(&avc_cache.lru_hint); + hvalue = atomic_read(&avc_cache.lru_hint) & (AVC_CACHE_SLOTS - 1); + + list_for_each_entry(node, &avc_cache.slots[hvalue], list) { + if (atomic_dec_and_test(&node->ae.used)) { + /* Recently Unused */ + avc_node_delete(node); + avc_cache_stats_incr(reclaims); + ecx++; + if (ecx >= AVC_CACHE_RECLAIM) { + goto out; + } + } + } + } + + +out: + AVC_WRUNLOCK; + AVC_UNLOCK; + return ecx; +} + +static struct avc_node *avc_alloc_node(void) +{ + struct avc_node *node; + + node = xmalloc(struct avc_node); + if (!node) + goto out; + + memset(node, 0, sizeof(*node)); + INIT_RCU_HEAD(&node->rhead); + INIT_LIST_HEAD(&node->list); + atomic_set(&node->ae.used, 1); + avc_cache_stats_incr(allocations); + + atomic_inc(&avc_cache.active_nodes); + if (atomic_read(&avc_cache.active_nodes) > avc_cache_threshold) + avc_reclaim_node(); + +out: + return node; +} + +static void avc_node_populate(struct avc_node *node, u32 ssid, u32 tsid, u16 tclass, struct avc_entry *ae) +{ + node->ae.ssid = ssid; + node->ae.tsid = tsid; + node->ae.tclass = tclass; + memcpy(&node->ae.avd, &ae->avd, sizeof(node->ae.avd)); +} + +static inline struct avc_node *avc_search_node(u32 ssid, u32 tsid, u16 tclass) +{ + struct avc_node *node, *ret = NULL; + int hvalue; + + hvalue = avc_hash(ssid, tsid, tclass); + list_for_each_entry_rcu(node, &avc_cache.slots[hvalue], list) { + if (ssid == node->ae.ssid && + tclass == node->ae.tclass && + tsid == node->ae.tsid) { + ret = node; + break; + } + } + + if (ret == NULL) { + /* cache miss */ + goto out; + } + + /* cache hit */ + if (atomic_read(&ret->ae.used) != 1) + atomic_set(&ret->ae.used, 1); +out: + return ret; +} + +/** + * avc_lookup - Look up an AVC entry. + * @ssid: source security identifier + * @tsid: target security identifier + * @tclass: target security class + * @requested: requested permissions, interpreted based on @tclass + * + * Look up an AVC entry that is valid for the + * @requested permissions between the SID pair + * (@ssid, @tsid), interpreting the permissions + * based on @tclass. If a valid AVC entry exists, + * then this function return the avc_node. + * Otherwise, this function returns NULL. + */ +static struct avc_node *avc_lookup(u32 ssid, u32 tsid, u16 tclass, u32 requested) +{ + struct avc_node *node; + + AVC_RDLOCK; + + avc_cache_stats_incr(lookups); + node = avc_search_node(ssid, tsid, tclass); + + if (node && ((node->ae.avd.decided & requested) == requested)) { + avc_cache_stats_incr(hits); + goto out; + } + + node = NULL; + avc_cache_stats_incr(misses); +out: + AVC_RDUNLOCK; + return node; +} + +static int avc_latest_notif_update(int seqno, int is_insert) +{ + int ret = 0; + static DEFINE_SPINLOCK(notif_lock); + unsigned long flag; + + spin_lock_irqsave(¬if_lock, flag); + if (is_insert) { + if (seqno < avc_cache.latest_notif) { + printk(KERN_WARNING "avc: seqno %d < latest_notif %d\n", + seqno, avc_cache.latest_notif); + ret = -EAGAIN; + } + } else { + if (seqno > avc_cache.latest_notif) + avc_cache.latest_notif = seqno; + } + spin_unlock_irqrestore(¬if_lock, flag); + + return ret; +} + +/** + * avc_insert - Insert an AVC entry. + * @ssid: source security identifier + * @tsid: target security identifier + * @tclass: target security class + * @ae: AVC entry + * + * Insert an AVC entry for the SID pair + * (@ssid, @tsid) and class @tclass. + * The access vectors and the sequence number are + * normally provided by the security server in + * response to a security_compute_av() call. If the + * sequence number @ae->avd.seqno is not less than the latest + * revocation notification, then the function copies + * the access vectors into a cache entry, returns + * avc_node inserted. Otherwise, this function returns NULL. + */ +static struct avc_node *avc_insert(u32 ssid, u32 tsid, u16 tclass, struct avc_entry *ae) +{ + struct avc_node *pos, *node = NULL; + int hvalue; + + AVC_WRLOCK; + + if (avc_latest_notif_update(ae->avd.seqno, 1)) + goto out; + + node = avc_alloc_node(); + if (node) { + hvalue = avc_hash(ssid, tsid, tclass); + avc_node_populate(node, ssid, tsid, tclass, ae); + + list_for_each_entry(pos, &avc_cache.slots[hvalue], list) { + if (pos->ae.ssid == ssid && + pos->ae.tsid == tsid && + pos->ae.tclass == tclass) { + avc_node_replace(node, pos); + goto out; + } + } + list_add_rcu(&node->list, &avc_cache.slots[hvalue]); + } + + +out: + AVC_WRUNLOCK; + return node; +} + +/** + * avc_audit - Audit the granting or denial of permissions. + * @ssid: source security identifier + * @tsid: target security identifier + * @tclass: target security class + * @requested: requested permissions + * @avd: access vector decisions + * @result: result from avc_has_perm_noaudit + * @a: auxiliary audit data + * + * Audit the granting or denial of permissions in accordance + * with the policy. This function is typically called by + * avc_has_perm() after a permission check, but can also be + * called directly by callers who use avc_has_perm_noaudit() + * in order to separate the permission check from the auditing. + * For example, this separation is useful when the permission check must + * be performed under a lock, to allow the lock to be released + * before calling the auditing code. + */ +void avc_audit(u32 ssid, u32 tsid, + u16 tclass, u32 requested, + struct av_decision *avd, int result, struct avc_audit_data *a) +{ + struct domain *d = current->domain; + u32 denied, audited; + + denied = requested & ~avd->allowed; + if (denied) { + audited = denied; + if (!(audited & avd->auditdeny)) + return; + } else if (result) { + audited = denied = requested; + } else { + audited = requested; + if (!(audited & avd->auditallow)) + return; + } + + printk("avc: %s ", denied ? "denied" : "granted"); + avc_dump_av(tclass, audited); + printk(" for "); + + if (a && a->d) + d = a->d; + if (d) + printk("domid=%d", d->domain_id); + + printk("\n"); + avc_dump_query(ssid, tsid, tclass); + printk("\n"); + +} + +/** + * avc_add_callback - Register a callback for security events. + * @callback: callback function + * @events: security events + * @ssid: source security identifier or %SECSID_WILD + * @tsid: target security identifier or %SECSID_WILD + * @tclass: target security class + * @perms: permissions + * + * Register a callback function for events in the set @events + * related to the SID pair (@ssid, @tsid) and + * and the permissions @perms, interpreting + * @perms based on @tclass. Returns %0 on success or + * -%ENOMEM if insufficient memory exists to add the callback. + */ +int avc_add_callback(int (*callback)(u32 event, u32 ssid, u32 tsid, + u16 tclass, u32 perms, + u32 *out_retained), + u32 events, u32 ssid, u32 tsid, + u16 tclass, u32 perms) +{ + struct avc_callback_node *c; + int rc = 0; + + c = xmalloc(struct avc_callback_node); + if (!c) { + rc = -ENOMEM; + goto out; + } + + c->callback = callback; + c->events = events; + c->ssid = ssid; + c->tsid = tsid; + c->perms = perms; + c->next = avc_callbacks; + avc_callbacks = c; +out: + return rc; +} + +static inline int avc_sidcmp(u32 x, u32 y) +{ + return (x == y || x == SECSID_WILD || y == SECSID_WILD); +} + +/** + * avc_update_node Update an AVC entry + * @event : Updating event + * @perms : Permission mask bits + * @ssid,@tsid,@tclass : identifier of an AVC entry + * + * if a valid AVC entry doesn't exist,this function returns -ENOENT. + * if kmalloc() called internal returns NULL, this function returns -ENOMEM. + * otherwise, this function update the AVC entry. The original AVC-entry object + * will release later by RCU. + */ +static int avc_update_node(u32 event, u32 perms, u32 ssid, u32 tsid, u16 tclass) +{ + int hvalue, rc = 0; + struct avc_node *pos, *node, *orig = NULL; + + AVC_WRLOCK; + + node = avc_alloc_node(); + if (!node) { + rc = -ENOMEM; + goto out; + } + + hvalue = avc_hash(ssid, tsid, tclass); + list_for_each_entry(pos, &avc_cache.slots[hvalue], list){ + if ( ssid==pos->ae.ssid && + tsid==pos->ae.tsid && + tclass==pos->ae.tclass ){ + orig = pos; + break; + } + } + + if (!orig) { + rc = -ENOENT; + avc_node_kill(node); + goto out; + } + + /* + * Copy and replace original node. + */ + + avc_node_populate(node, ssid, tsid, tclass, &orig->ae); + + switch (event) { + case AVC_CALLBACK_GRANT: + node->ae.avd.allowed |= perms; + break; + case AVC_CALLBACK_TRY_REVOKE: + case AVC_CALLBACK_REVOKE: + node->ae.avd.allowed &= ~perms; + break; + case AVC_CALLBACK_AUDITALLOW_ENABLE: + node->ae.avd.auditallow |= perms; + break; + case AVC_CALLBACK_AUDITALLOW_DISABLE: + node->ae.avd.auditallow &= ~perms; + break; + case AVC_CALLBACK_AUDITDENY_ENABLE: + node->ae.avd.auditdeny |= perms; + break; + case AVC_CALLBACK_AUDITDENY_DISABLE: + node->ae.avd.auditdeny &= ~perms; + break; + } + avc_node_replace(node, orig); + +out: + AVC_WRUNLOCK; + return rc; +} + +/** + * avc_ss_reset - Flush the cache and revalidate migrated permissions. + * @seqno: policy sequence number + */ +int avc_ss_reset(u32 seqno) +{ + struct avc_callback_node *c; + int i, rc = 0; + struct avc_node *node; + + AVC_LOCK; + AVC_WRLOCK; + + for (i = 0; i < AVC_CACHE_SLOTS; i++) { + list_for_each_entry(node, &avc_cache.slots[i], list) + avc_node_delete(node); + } + + AVC_WRUNLOCK; + + for (c = avc_callbacks; c; c = c->next) { + if (c->events & AVC_CALLBACK_RESET) { + rc = c->callback(AVC_CALLBACK_RESET, + 0, 0, 0, 0, NULL); + if (rc) + goto out; + } + } + + avc_latest_notif_update(seqno, 0); +out: + AVC_UNLOCK; + return rc; +} + +/** + * avc_has_perm_noaudit - Check permissions but perform no auditing. + * @ssid: source security identifier + * @tsid: target security identifier + * @tclass: target security class + * @requested: requested permissions, interpreted based on @tclass + * @avd: access vector decisions + * + * Check the AVC to determine whether the @requested permissions are granted + * for the SID pair (@ssid, @tsid), interpreting the permissions + * based on @tclass, and call the security server on a cache miss to obtain + * a new decision and add it to the cache. Return a copy of the decisions + * in @avd. Return %0 if all @requested permissions are granted, + * -%EACCES if any permissions are denied, or another -errno upon + * other errors. This function is typically called by avc_has_perm(), + * but may also be called directly to separate permission checking from + * auditing, e.g. in cases where a lock must be held for the check but + * should be released for the auditing. + */ +int avc_has_perm_noaudit(u32 ssid, u32 tsid, + u16 tclass, u32 requested, + struct av_decision *avd) +{ + struct avc_node *node; + struct avc_entry entry, *p_ae; + int rc = 0; + u32 denied; + + AVC_LOCK; + + node = avc_lookup(ssid, tsid, tclass, requested); + + if (!node) { + rc = security_compute_av(ssid,tsid,tclass,requested,&entry.avd); + if (rc) + goto out; + node = avc_insert(ssid,tsid,tclass,&entry); + } + + p_ae = node ? &node->ae : &entry; + + if (avd) + memcpy(avd, &p_ae->avd, sizeof(*avd)); + + denied = requested & ~(p_ae->avd.allowed); + + if (!requested || denied) { + if (flask_enforcing) + rc = -EACCES; + else + if (node) + avc_update_node(AVC_CALLBACK_GRANT,requested, + ssid,tsid,tclass); + } + +out: + AVC_UNLOCK; + return rc; +} + +/** + * avc_has_perm - Check permissions and perform any appropriate auditing. + * @ssid: source security identifier + * @tsid: target security identifier + * @tclass: target security class + * @requested: requested permissions, interpreted based on @tclass + * @auditdata: auxiliary audit data + * + * Check the AVC to determine whether the @requested permissions are granted + * for the SID pair (@ssid, @tsid), interpreting the permissions + * based on @tclass, and call the security server on a cache miss to obtain + * a new decision and add it to the cache. Audit the granting or denial of + * permissions in accordance with the policy. Return %0 if all @requested + * permissions are granted, -%EACCES if any permissions are denied, or + * another -errno upon other errors. + */ +int avc_has_perm(u32 ssid, u32 tsid, u16 tclass, + u32 requested, struct avc_audit_data *auditdata) +{ + struct av_decision avd; + int rc; + + rc = avc_has_perm_noaudit(ssid, tsid, tclass, requested, &avd); + avc_audit(ssid, tsid, tclass, requested, &avd, rc, auditdata); + return rc; +} diff -r 65bfdf932c7e -r 12923ba929c8 xen/flask/hooks.c --- /dev/null Thu Jan 1 00:00:00 1970 +0000 +++ b/xen/flask/hooks.c Sat Dec 16 11:54:25 2006 -0500 @@ -0,0 +1,2343 @@ + /* + * This file contains the Flask hook function implementations for Xen. + * + * Author: George Coker, + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2, + * as published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "strutil.h" + +#ifdef FLASK_DEVELOP +int flask_enforcing = 0; +integer_param("flask_enforcing", flask_enforcing); +#endif + +#ifdef FLASK_BOOTPARAM +int flask_enabled = 1; +integer_param("flask_enabled", flask_enabled); +#endif + +static struct xsm_operations *original_ops = NULL; + +static DEFINE_SPINLOCK(sel_sem); + +/* global data for booleans */ +static int bool_num = 0; +static int *bool_pending_values = NULL; + +extern int ss_initialized; + +static int domain_has_perm(struct domain *dom1, struct domain *dom2, + u16 class, u32 perms) +{ + struct domain_security_struct *dsec1, *dsec2; + + dsec1 = dom1->ssid; + dsec2 = dom2->ssid; + + return avc_has_perm(dsec1->sid, dsec2->sid, class, perms, NULL); +} + +static int domain_has_evtchn(struct domain *d, struct evtchn *chn, u32 perms) +{ + struct domain_security_struct *dsec; + struct evtchn_security_struct *esec; + + dsec = d->ssid; + esec = chn->ssid; + + return avc_has_perm(dsec->sid, esec->sid, SECCLASS_EVENT, perms, NULL); +} + +static int domain_has_xen(struct domain *d, u32 perms) +{ + struct domain_security_struct *dsec; + dsec = d->ssid; + + return avc_has_perm(dsec->sid, SECINITSID_XEN, SECCLASS_XEN, perms, NULL); +} + +static int flask_domain_alloc_security(struct domain *d) +{ + struct domain_security_struct *dsec; + + dsec = xmalloc(struct domain_security_struct); + + if (!dsec) + return -ENOMEM; + + memset(dsec, 0, sizeof(struct domain_security_struct)); + + dsec->d = d; + + if (d->domain_id == IDLE_DOMAIN_ID) { + dsec->sid = SECINITSID_XEN; + dsec->create_sid = SECINITSID_DOM0; + } else { + dsec->sid = SECINITSID_UNLABELED; + dsec->create_sid = SECINITSID_UNLABELED; + } + + d->ssid = dsec; + + return 0; +} + +static void flask_domain_free_security(struct domain *d) +{ + struct domain_security_struct *dsec = d->ssid; + + if (!dsec) + return; + + d->ssid = NULL; + xfree(dsec); +} + +static int flask_evtchn_unbound(struct domain *d1, struct evtchn *chn, + domid_t id2) +{ + u32 newsid; + int rc; + domid_t id; + struct domain *d2; + struct domain_security_struct *dsec, *dsec1, *dsec2; + struct evtchn_security_struct *esec; + + dsec = current->domain->ssid; + dsec1 = d1->ssid; + esec = chn->ssid; + + if (id2 == DOMID_SELF) + id = current->domain->domain_id; + else + id = id2; + + d2 = find_domain_by_id(id); + if (d2 == NULL) + return -EPERM; + + dsec2 = d2->ssid; + rc = security_transition_sid(dsec1->sid, dsec2->sid, SECCLASS_EVENT, + &newsid); + if (rc) + goto out; + + rc = avc_has_perm(dsec->sid, newsid, SECCLASS_EVENT, + EVENT__CREATE|EVENT__ALLOC, NULL); + if (rc) + goto out; + + rc = avc_has_perm(newsid, dsec2->sid, SECCLASS_EVENT, EVENT__BIND, NULL); + if (rc) + goto out; + else + esec->sid = newsid; + +out: + put_domain(d2); + return rc; +} + +static int flask_evtchn_interdomain(struct domain *d1, struct evtchn *chn1, + struct domain *d2, struct evtchn *chn2) +{ + u32 newsid1; + u32 newsid2; + int rc; + struct domain_security_struct *dsec1, *dsec2; + struct evtchn_security_struct *esec1, *esec2; + + dsec1 = d1->ssid; + dsec2 = d2->ssid; + + esec1 = chn1->ssid; + esec2 = chn2->ssid; + + rc = security_transition_sid(dsec1->sid, dsec2->sid, + SECCLASS_EVENT, &newsid1); + if (rc) { + printk("%s: security_transition_sid failed, rc=%d (domain=%d)\n", + __FUNCTION__, -rc, d2->domain_id); + return rc; + } + + rc = avc_has_perm(dsec1->sid, newsid1, SECCLASS_EVENT, EVENT__CREATE, NULL); + if (rc) + return rc; + + rc = security_transition_sid(dsec2->sid, dsec1->sid, + SECCLASS_EVENT, &newsid2); + if (rc) { + printk("%s: security_transition_sid failed, rc=%d (domain=%d)\n", + __FUNCTION__, -rc, d1->domain_id); + return rc; + } + + rc = avc_has_perm(dsec2->sid, newsid2, SECCLASS_EVENT, EVENT__CREATE, NULL); + if (rc) + return rc; + + rc = avc_has_perm(newsid1, dsec2->sid, SECCLASS_EVENT, EVENT__BIND, NULL); + if (rc) + return rc; + + rc = avc_has_perm(newsid2, dsec1->sid, SECCLASS_EVENT, EVENT__BIND, NULL); + if (rc) + return rc; + + esec1->sid = newsid1; + esec2->sid = newsid2; + + return rc; +} + +static int flask_evtchn_virq(struct domain *d, struct evtchn *chn, + int virq, int vcpu) +{ + u32 newsid; + u32 visid; + u32 vcsid; + int rc; + struct domain_security_struct *dsec; + struct evtchn_security_struct *esec; + + dsec = d->ssid; + esec = chn->ssid; + + rc = security_virq_sid(virq, &visid); + if (rc) + return rc; + + rc = security_transition_sid(dsec->sid, visid, SECCLASS_EVENT, &newsid); + if (rc) { + printk("%s: security_transition_sid failed, rc=%d (virq=%d)\n", + __FUNCTION__, -rc, virq); + return rc; + } + + rc = avc_has_perm(dsec->sid, newsid, SECCLASS_EVENT, EVENT__CREATE, NULL); + if (rc) + return rc; + + rc = avc_has_perm(newsid, visid, SECCLASS_EVENT, EVENT__BIND, NULL); + if (rc) + return rc; + + rc = security_vcpu_sid(vcpu, &vcsid); + if (rc) + return rc; + + rc = avc_has_perm(newsid, vcsid, SECCLASS_EVENT, EVENT__NOTIFY, NULL); + if (rc) + return rc; + + esec->sid = newsid; + + return rc; +} + +static int flask_evtchn_ipi(struct domain *d, struct evtchn *chn, int vcpu) +{ + u32 newsid; + u32 vsid; + int rc; + struct domain_security_struct *dsec; + struct evtchn_security_struct *esec; + + dsec = d->ssid; + esec = chn->ssid; + + rc = security_vcpu_sid(vcpu, &vsid); + if (rc) + return rc; + + rc = security_transition_sid(dsec->sid, vsid, SECCLASS_EVENT, &newsid); + if (rc) { + printk("%s: security_transition_sid failed, rc=%d (vcpu=%d)\n", + __FUNCTION__, -rc, vcpu); + return rc; + } + + rc = avc_has_perm(dsec->sid, newsid, SECCLASS_EVENT, EVENT__CREATE, NULL); + if (rc) + return rc; + + rc = avc_has_perm(newsid, vsid, SECCLASS_EVENT, EVENT__BIND, NULL); + if (rc) + return rc; + + esec->sid = newsid; + + return rc; +} + +static int flask_evtchn_pirq(struct domain *d, struct evtchn *chn, int pirq) +{ + u32 newsid; + u32 psid; + int rc; + struct domain_security_struct *dsec; + struct evtchn_security_struct *esec; + + dsec = d->ssid; + esec = chn->ssid; + + rc = security_pirq_sid(pirq, &psid); + if (rc) + return rc; + + rc = security_transition_sid(dsec->sid, psid, SECCLASS_EVENT, &newsid); + if (rc) { + printk("%s: security_transition_sid failed, rc=%d (pirq=%d)\n", + __FUNCTION__, -rc, pirq); + return rc; + } + + rc = avc_has_perm(dsec->sid, newsid, SECCLASS_EVENT, EVENT__CREATE, NULL); + + if (rc) + return rc; + + rc = avc_has_perm(newsid, psid, SECCLASS_EVENT, EVENT__BIND, NULL); + if (rc) + return rc; + + esec->sid = newsid; + + return rc; +} + +static int flask_evtchn_close(struct domain *d, struct evtchn *chn) +{ + return domain_has_evtchn(d, chn, EVENT__CLOSE); +} + +static void flask_evtchn_close_post(struct evtchn *chn) +{ + struct evtchn_security_struct *esec; + esec = chn->ssid; + + esec->sid = SECINITSID_UNLABELED; +} + +static int flask_evtchn_send(struct domain *d, struct evtchn *chn) +{ + return domain_has_evtchn(d, chn, EVENT__SEND); +} + +static int flask_evtchn_status(struct domain *d, struct evtchn *chn) +{ + return domain_has_evtchn(d, chn, EVENT__STATUS); +} + +static int flask_evtchn_vcpu(struct domain *d, struct evtchn *chn, + unsigned int vcpu) +{ + u32 vsid; + int rc; + struct domain_security_struct *dsec; + struct evtchn_security_struct *esec; + + dsec = d->ssid; + esec = chn->ssid; + + rc = security_vcpu_sid(vcpu, &vsid); + if (rc) + return rc; + + rc = avc_has_perm(dsec->sid, vsid, SECCLASS_EVENT, EVENT__NOTIFY, NULL); + if (rc) + return rc; + + return avc_has_perm(esec->sid, vsid, SECCLASS_EVENT, EVENT__BIND, NULL); +} + +static int flask_evtchn_unmask(struct domain *d, struct evtchn *chn) +{ + return domain_has_evtchn(d, chn, EVENT__UNMASK); +} + +static int flask_evtchn_init(struct domain *d2, struct evtchn *chn) +{ + u32 newsid; + int rc; + struct domain_security_struct *dsec; + struct evtchn_security_struct *esec; + + dsec = current->domain->ssid; + esec = chn->ssid; + + rc = security_transition_sid(dsec->sid, dsec->create_sid, + SECCLASS_EVENT, &newsid); + if (rc) + return rc; + + rc = avc_has_perm(dsec->sid, newsid, SECCLASS_EVENT, + EVENT__CREATE|EVENT__ALLOC, NULL); + if (rc) + return rc; + + rc = avc_has_perm(newsid, dsec->sid, SECCLASS_EVENT, EVENT__BIND, NULL); + if (rc) + return rc; + + esec->sid = newsid; + + return rc; +} + +static int flask_alloc_security_evtchn(struct evtchn *chn) +{ + int i; + struct evtchn_security_struct *esec; + + for ( i = 0; i < EVTCHNS_PER_BUCKET; i++ ) { + esec = xmalloc(struct evtchn_security_struct); + + if (!esec) + return -ENOMEM; + + memset(esec, 0, sizeof(struct evtchn_security_struct)); + + esec->chn = &chn[i]; + esec->sid = SECINITSID_UNLABELED; + + (&chn[i])->ssid = esec; + } + + return 0; +} + +static void flask_free_security_evtchn(struct evtchn *chn) +{ + int i; + struct evtchn_security_struct *esec; + + if (!chn) + return; + + for ( i = 0; i < EVTCHNS_PER_BUCKET; i++ ) { + esec = (&chn[i])->ssid; + + if (!esec) + continue; + + (&chn[i])->ssid = NULL; + xfree(esec); + } + +} + +static int flask_grant_mapref(struct domain *d1, struct domain *d2, + uint32_t flags) +{ + u32 perms = GRANT__MAP_READ; + + if (flags & GTF_writing) + perms |= GRANT__MAP_WRITE; + + return domain_has_perm(d1, d2, SECCLASS_GRANT, perms); +} + +static int flask_grant_unmapref(struct domain *d1, struct domain *d2) +{ + return domain_has_perm(d1, d2, SECCLASS_GRANT, GRANT__UNMAP); +} + +static int flask_grant_setup(struct domain *d1, struct domain *d2) +{ + return domain_has_perm(d1, d2, SECCLASS_GRANT, GRANT__SETUP); +} + +static int flask_grant_transfer(struct domain *d1, struct domain *d2) +{ + return domain_has_perm(d1, d2, SECCLASS_GRANT, GRANT__TRANSFER); +} + +static int flask_grant_copy(struct domain *d1, struct domain *d2) +{ + return domain_has_perm(d1, d2, SECCLASS_GRANT, GRANT__COPY); +} + +static int get_page_sid(unsigned long mfn, u32 *sid) +{ + int rc = 0; + struct domain *d; + struct page_info *page; + struct domain_security_struct *dsec; + + if (mfn_valid(mfn)) { + /*mfn is valid if this is a page that Xen is tracking!*/ + page = mfn_to_page(mfn); + d = page_get_owner(page); + + if (d == NULL) { + rc = security_iomem_sid(mfn, sid); + goto out; + } + + switch ( d->domain_id ) + { + case DOMID_IO: + /*A tracked IO page?*/ + *sid = SECINITSID_DOMIO; + break; + + case DOMID_XEN: + /*A page from Xen's private heap?*/ + *sid = SECINITSID_DOMXEN; + break; + + default: + /*Pages are implicitly labeled by domain ownership!*/ + dsec = d->ssid; + *sid = dsec->sid; + break; + } + + } else { + /*Possibly an untracked IO page?*/ + rc = security_iomem_sid(mfn, sid); + } + +out: + return rc; +} + +static int flask_mmu_normal_update(struct domain *d, intpte_t fpte) +{ + int rc = 0; + u32 map_perms = MMU__MAP_READ; + unsigned long fmfn; + struct domain_security_struct *dsec; + u32 fsid; + + dsec = d->ssid; + + if ( get_pte_flags(fpte) & _PAGE_RW ) + map_perms |= MMU__MAP_WRITE; + + fmfn = ((unsigned long)(((fpte) & (PADDR_MASK&PAGE_MASK)) >> PAGE_SHIFT)); + + rc = get_page_sid(fmfn, &fsid); + if (rc) + return rc; + + return avc_has_perm(dsec->sid, fsid, SECCLASS_MMU, map_perms, NULL); +} + +static int flask_mmu_machphys_update(struct domain *d, unsigned long mfn) +{ + int rc = 0; + u32 psid; + struct domain_security_struct *dsec; + dsec = d->ssid; + + rc = get_page_sid(mfn, &psid); + if (rc) + return rc; + + return avc_has_perm(dsec->sid, psid, SECCLASS_MMU, MMU__UPDATEMP, NULL); +} + +static int flask_translate_gpfn_list(struct domain *d, unsigned long mfn) +{ + int rc = 0; + u32 sid; + struct domain_security_struct *dsec; + dsec = d->ssid; + + rc = get_page_sid(mfn, &sid); + if (rc) + return rc; + + return avc_has_perm(dsec->sid, sid, SECCLASS_MMU, MMU__TRANSLATEGP, NULL); +} + +static int flask_memory_adjust_reservation(struct domain *d1, struct domain *d2) +{ + return domain_has_perm(d1, d2, SECCLASS_MMU, MMU__ADJUST); +} + +static int flask_memory_stat_reservation(struct domain *d1, struct domain *d2) +{ + return domain_has_perm(d1, d2, SECCLASS_MMU, MMU__STAT); +} + +static int flask_memory_pin_page(struct domain *d, unsigned long mfn) +{ + int rc = 0; + u32 sid; + struct domain_security_struct *dsec; + dsec = d->ssid; + + rc = get_page_sid(mfn, &sid); + if (rc) + return rc; + + return avc_has_perm(dsec->sid, sid, SECCLASS_MMU, MMU__PINPAGE, NULL); +} + +static int flask_update_va_mapping(struct domain *d, intpte_t val) +{ + int rc = 0; + u32 psid; + u32 map_perms = MMU__MAP_READ; + unsigned long mfn; + struct domain_security_struct *dsec; + + dsec = d->ssid; + + mfn = ((unsigned long)(((val) & (PADDR_MASK&PAGE_MASK)) >> PAGE_SHIFT)); + rc = get_page_sid(mfn, &psid); + if (rc) + return rc; + + if ( get_pte_flags(val) & _PAGE_RW ) + map_perms |= MMU__MAP_WRITE; + + return avc_has_perm(dsec->sid, psid, SECCLASS_MMU, map_perms, NULL); +} + +static int flask_add_to_physmap(struct domain *d1, struct domain *d2) +{ + return domain_has_perm(d1, d2, SECCLASS_MMU, MMU__PHYSMAP); +} + +static int flask_machine_memory_map(void) +{ + struct domain_security_struct *dsec; + dsec = current->domain->ssid; + + return avc_has_perm(dsec->sid, SECINITSID_XEN, SECCLASS_MMU, + MMU__MEMORYMAP, NULL); +} + +static int flask_domain_memory_map(struct domain *d) +{ + return domain_has_perm(current->domain, d, SECCLASS_MMU, MMU__MEMORYMAP); +} + +static int flask_machphys_mfn_list(struct domain *d) +{ + struct domain_security_struct *dsec; + dsec = d->ssid; + + return avc_has_perm(dsec->sid, SECINITSID_XEN, SECCLASS_MMU, + MMU__MFNLIST, NULL); +} + +static int flask_console_io(struct domain *d, int cmd) +{ + u32 perm; + + switch (cmd) { + case CONSOLEIO_read: + perm = XEN__READCONSOLE; + break; + case CONSOLEIO_write: + perm = XEN__WRITECONSOLE; + break; + default: + return -EPERM; + } + + return domain_has_xen(d, perm); +} + +#define bucket_from_port(d,p) \ + ((d)->evtchn[(p)/EVTCHNS_PER_BUCKET]) +#define evtchn_from_port(d,p) \ + (&(bucket_from_port(d,p))[(p)&(EVTCHNS_PER_BUCKET-1)]) + +static int flask_pirq_unmask(struct domain *d, int pirq) +{ + int port; + struct evtchn *chn; + + port = d->pirq_to_evtchn[pirq]; + chn = evtchn_from_port(d, port); + + return domain_has_evtchn(d, chn, EVENT__UNMASK); +} + +static int flask_pirq_status(struct domain *d, int pirq) +{ + int port; + struct evtchn *chn; + + port = d->pirq_to_evtchn[pirq]; + chn = evtchn_from_port(d, port); + + return domain_has_evtchn(d, chn, EVENT__STATUS); +} + +static int flask_apic(struct domain *d, int cmd) +{ + u32 perm; + + switch (cmd) { + case PHYSDEVOP_APIC_READ: + perm = XEN__READAPIC; + break; + case PHYSDEVOP_APIC_WRITE: + perm = XEN__WRITEAPIC; + break; + default: + return -EPERM; + } + + return domain_has_xen(d, perm); +} + +static int flask_assign_vector(struct domain *d, uint32_t pirq) +{ + int port; + struct evtchn *chn; + + port = d->pirq_to_evtchn[pirq]; + chn = evtchn_from_port(d, port); + + return domain_has_evtchn(d, chn, EVENT__VECTOR); +} + +static int flask_profile(struct domain *d, int op) +{ + u32 perm; + + switch (op) { + case XENOPROF_init: + case XENOPROF_enable_virq: + case XENOPROF_disable_virq: + case XENOPROF_get_buffer: + perm = XEN__NONPRIVPROFILE; + break; + case XENOPROF_reset_active_list: + case XENOPROF_reset_passive_list: + case XENOPROF_set_active: + case XENOPROF_set_passive: + case XENOPROF_reserve_counters: + case XENOPROF_counter: + case XENOPROF_setup_events: + case XENOPROF_start: + case XENOPROF_stop: + case XENOPROF_release_counters: + case XENOPROF_shutdown: + perm = XEN__PRIVPROFILE; + break; + default: + return -EPERM; + } + + return domain_has_xen(d, perm); +} + +static int flask_hvm_param(unsigned long op, struct domain *d) +{ + u32 perm; + + switch (op) { + case HVMOP_set_param: + perm = HVM__SETPARAM; + break; + case HVMOP_get_param: + perm = HVM__GETPARAM; + break; + default: + return -EPERM; + } + + return domain_has_perm(current->domain, d, SECCLASS_HVM, perm); +} + +static int flask_hvm_set_pci_intx_level(struct domain *d) +{ + return domain_has_perm(current->domain, d, SECCLASS_HVM, HVM__PCILEVEL); +} + +static int flask_hvm_set_isa_irq_level(struct domain *d) +{ + return domain_has_perm(current->domain, d, SECCLASS_HVM, HVM__IRQLEVEL); +} + +static int flask_hvm_set_pci_link_route(struct domain *d) +{ + return domain_has_perm(current->domain, d, SECCLASS_HVM, HVM__PCIROUTE); +} + +static int flask_kexec(void) +{ + return domain_has_xen(current->domain, XEN__KEXEC); +} + +static void flask_security_domaininfo(struct domain *d, + struct xen_domctl_getdomaininfo *info) +{ + struct domain_security_struct *dsec; + + dsec = d->ssid; + info->ssidref = dsec->sid; +} + +static int flask_setvcpucontext(struct domain *d) +{ + return domain_has_perm(current->domain, d, SECCLASS_DOMAIN, + DOMAIN__SETVCPUCONTEXT); +} + +static int flask_pausedomain(struct domain *d) +{ + return domain_has_perm(current->domain, d, SECCLASS_DOMAIN, DOMAIN__PAUSE); +} + +static int flask_unpausedomain(struct domain *d) +{ + return domain_has_perm(current->domain, d, SECCLASS_DOMAIN, DOMAIN__UNPAUSE); +} + +static int flask_createdomain(struct xen_domctl *op) +{ + int rc; + struct domain_security_struct *dsec; + + dsec = current->domain->ssid; + dsec->create_sid = op->u.createdomain.ssidref; + + rc = avc_has_perm(dsec->sid, dsec->create_sid, SECCLASS_DOMAIN, + DOMAIN__CREATE, NULL); + if (rc) + dsec->create_sid = SECINITSID_UNLABELED; + + return rc; +} + +static void flask_createdomain_post(struct domain *d, struct xen_domctl *op) +{ + struct domain_security_struct *dsec1; + struct domain_security_struct *dsec2; + + dsec1 = current->domain->ssid; + dsec2 = d->ssid; + + dsec2->sid = dsec1->create_sid; + + dsec2->create_sid = SECINITSID_UNLABELED; + dsec1->create_sid = SECINITSID_UNLABELED; +} + +static int flask_max_vcpus(struct domain *d) +{ + return domain_has_perm(current->domain, d, SECCLASS_DOMAIN, + DOMAIN__MAX_VCPUS); +} + +static int flask_destroydomain(struct domain *d) +{ + return domain_has_perm(current->domain, d, SECCLASS_DOMAIN, + DOMAIN__DESTROY); +} + +static int flask_vcpuaffinity(int cmd, struct domain *d) +{ + + u32 perm; + + switch (cmd) + { + case XEN_DOMCTL_setvcpuaffinity: + perm = DOMAIN__SETVCPUAFFINITY; + break; + case XEN_DOMCTL_getvcpuaffinity: + perm = DOMAIN__GETVCPUAFFINITY; + break; + default: + return -EPERM; + } + + return domain_has_perm(current->domain, d, SECCLASS_DOMAIN, perm ); +} + +static int flask_scheduler(struct domain *d) +{ + int rc = 0; + + rc = domain_has_xen(current->domain, XEN__SCHEDULER); + if (rc) + return rc; + + return domain_has_perm(current->domain, d, SECCLASS_DOMAIN, + DOMAIN__SCHEDULER); +} + +static int flask_getdomaininfo(struct domain *d) +{ + return domain_has_perm(current->domain, d, SECCLASS_DOMAIN, + DOMAIN__GETDOMAININFO); +} + +static int flask_getvcpucontext(struct domain *d) +{ + return domain_has_perm(current->domain, d, SECCLASS_DOMAIN, + DOMAIN__GETVCPUCONTEXT); +} + +static int flask_getvcpuinfo(struct domain *d) +{ + return domain_has_perm(current->domain, d, SECCLASS_DOMAIN, + DOMAIN__GETVCPUINFO); +} + +static int flask_domain_settime(struct domain *d) +{ + return domain_has_perm(current->domain, d, SECCLASS_DOMAIN, DOMAIN__SETTIME); +} + +static int flask_tbufcontrol(void) +{ + return domain_has_xen(current->domain, SECCLASS_XEN); +} + +static int flask_readconsole(uint32_t clear) +{ + u32 perms = XEN__READCONSOLE; + + if (clear) + perms |= XEN__CLEARCONSOLE; + + return domain_has_xen(current->domain, perms); +} + +static int flask_sched_id(void) +{ + return domain_has_xen(current->domain, XEN__SCHEDULER); +} + +static int flask_setdomainmaxmem(struct domain *d) +{ + return domain_has_perm(current->domain, d, SECCLASS_DOMAIN, + DOMAIN__SETDOMAINMAXMEM); +} + +static int flask_setdomainhandle(struct domain *d) +{ + return domain_has_perm(current->domain, d, SECCLASS_DOMAIN, + DOMAIN__SETDOMAINHANDLE); +} + +static int flask_setdebugging(struct domain *d) +{ + return domain_has_perm(current->domain, d, SECCLASS_DOMAIN, + DOMAIN__SETDEBUGGING); +} + +static inline u32 resource_to_perm(uint8_t access) +{ + if (access) + return RESOURCE__ADD; + else + return RESOURCE__REMOVE; +} + +static int flask_irq_permission(struct domain *d, uint8_t pirq, uint8_t access) +{ + u32 perm; + u32 rsid; + int rc = -EPERM; + + struct domain_security_struct *ssec, *tsec; + + rc = domain_has_perm(current->domain, d, SECCLASS_RESOURCE, + resource_to_perm(access)); + + if (rc) + return rc; + + if (access) + perm = RESOURCE__ADD_IRQ; + else + perm = RESOURCE__REMOVE_IRQ; + + ssec = current->domain->ssid; + tsec = d->ssid; + + rc = security_pirq_sid(pirq, &rsid); + if (rc) + return rc; + + rc = avc_has_perm(ssec->sid, rsid, SECCLASS_RESOURCE, perm, NULL); + + if (rc) + return rc; + + return avc_has_perm(tsec->sid, rsid, SECCLASS_RESOURCE, + RESOURCE__USE, NULL); +} + +static int flask_iomem_permission(struct domain *d, unsigned long mfn, + uint8_t access) +{ + u32 perm; + u32 rsid; + int rc = -EPERM; + + struct domain_security_struct *ssec, *tsec; + + rc = domain_has_perm(current->domain, d, SECCLASS_RESOURCE, + resource_to_perm(access)); + + if (rc) + return rc; + + if (access) + perm = RESOURCE__ADD_IOMEM; + else + perm = RESOURCE__REMOVE_IOMEM; + + ssec = current->domain->ssid; + tsec = d->ssid; + + rc = security_iomem_sid(mfn, &rsid); + if (rc) + return rc; + + rc = avc_has_perm(ssec->sid, rsid, SECCLASS_RESOURCE, perm, NULL); + + if (rc) + return rc; + + return avc_has_perm(tsec->sid, rsid, SECCLASS_RESOURCE, + RESOURCE__USE, NULL); +} + +static int flask_perfcontrol(void) +{ + return domain_has_xen(current->domain, XEN__PERFCONTROL); +} + +static int flask_shadow_control(struct domain *d, uint32_t op) +{ + u32 perm; + + switch (op) { + case XEN_DOMCTL_SHADOW_OP_OFF: + perm = SHADOW__DISABLE; + break; + case XEN_DOMCTL_SHADOW_OP_ENABLE: + case XEN_DOMCTL_SHADOW_OP_ENABLE_TEST: + case XEN_DOMCTL_SHADOW_OP_ENABLE_TRANSLATE: + case XEN_DOMCTL_SHADOW_OP_GET_ALLOCATION: + case XEN_DOMCTL_SHADOW_OP_SET_ALLOCATION: + perm = SHADOW__ENABLE; + break; + case XEN_DOMCTL_SHADOW_OP_ENABLE_LOGDIRTY: + case XEN_DOMCTL_SHADOW_OP_PEEK: + case XEN_DOMCTL_SHADOW_OP_CLEAN: + perm = SHADOW__LOGDIRTY; + default: + return -EPERM; + } + + return domain_has_perm(current->domain, d, SECCLASS_SHADOW, perm); +} + +static int flask_xen_settime(void) +{ + return domain_has_xen(current->domain, XEN__SETTIME); +} + +static int flask_memtype(uint32_t access) +{ + u32 perm; + + switch (access) { + case XENPF_add_memtype: + perm = XEN__MTRR_ADD; + break; + case XENPF_del_memtype: + perm = XEN__MTRR_DEL; + break; + case XENPF_read_memtype: + perm = XEN__MTRR_READ; + break; + default: + return -EPERM; + } + + return domain_has_xen(current->domain, perm); +} + +static int flask_microcode(void) +{ + return domain_has_xen(current->domain, XEN__MICROCODE); +} + +static int flask_ioport_permission(struct domain *d, uint32_t ioport, + uint8_t access) +{ + u32 perm; + u32 rsid; + int rc = -EPERM; + + struct domain_security_struct *ssec, *tsec; + + rc = domain_has_perm(current->domain, d, SECCLASS_RESOURCE, + resource_to_perm(access)); + + if (rc) + return rc; + + if (access) + perm = RESOURCE__ADD_IOPORT; + else + perm = RESOURCE__REMOVE_IOPORT; + + ssec = current->domain->ssid; + tsec = d->ssid; + + rc = security_ioport_sid(ioport, &rsid); + if (rc) + return rc; + + rc = avc_has_perm(ssec->sid, rsid, SECCLASS_RESOURCE, perm, NULL); + if (rc) + return rc; + + return avc_has_perm(tsec->sid, rsid, SECCLASS_RESOURCE, + RESOURCE__USE, NULL); +} + +static int flask_physinfo(void) +{ + return domain_has_xen(current->domain, XEN__PHYSINFO); +} + +static int flask_getpageframeinfo(unsigned long mfn) +{ + struct page_info *page; + struct domain *d; + struct domain_security_struct *ssec, *tsec; + + ssec = current->domain->ssid; + + if (mfn_valid(mfn)) + page = mfn_to_page(mfn); + else + return -EPERM; + + d = page_get_owner(page); + + tsec = d->ssid; + + return avc_has_perm(ssec->sid, tsec->sid, SECCLASS_MMU, + MMU__PAGEINFO, NULL); +} + +static int flask_getmemlist(struct domain *d) +{ + return domain_has_perm(current->domain, d, SECCLASS_MMU, MMU__PAGELIST); +} + +static int flask_platform_quirk(uint32_t quirk) +{ + struct domain_security_struct *dsec; + dsec = current->domain->ssid; + + return avc_has_perm(dsec->sid, SECINITSID_XEN, SECCLASS_XEN, + XEN__QUIRK, NULL); +} + +static int flask_hypercall_init(struct domain *d) +{ + return domain_has_perm(current->domain, d, SECCLASS_DOMAIN, + DOMAIN__HYPERCALL); +} + +static int domain_has_security(struct domain *d, u32 perms) +{ + struct domain_security_struct *dsec; + + dsec = d->ssid; + if (!dsec) + return -EACCES; + + return avc_has_perm(dsec->sid, SECINITSID_SECURITY, SECCLASS_SECURITY, + perms, NULL); +} + +static int flask_security_user(char *buf, int size) +{ + char *page = NULL; + char *con, *user, *ptr; + u32 sid, *sids; + int length; + char *newcon; + int i, rc; + u32 len, nsids; + + length = domain_has_security(current->domain, SECURITY__COMPUTE_USER); + if (length) + return length; + + length = -ENOMEM; + con = xmalloc_array(char, size+1); + if (!con) + return length; + memset(con, 0, size+1); + + user = xmalloc_array(char, size+1); + if (!user) + goto out; + memset(user, 0, size+1); + + length = -ENOMEM; + page = xmalloc_bytes(PAGE_SIZE); + if (!page) + goto out2; + memset(page, 0, PAGE_SIZE); + + length = -EFAULT; + if ( copy_from_user(page, buf, size) ) + goto out2; + + length = -EINVAL; + if (sscanf(page, "%s %s", con, user) != 2) + goto out2; + + length = security_context_to_sid(con, strlen(con)+1, &sid); + if (length < 0) + goto out2; + + length = security_get_user_sids(sid, user, &sids, &nsids); + if (length < 0) + goto out2; + + memset(page, 0, PAGE_SIZE); + length = sprintf(page, "%u", nsids) + 1; + ptr = page + length; + for (i = 0; i < nsids; i++) { + rc = security_sid_to_context(sids[i], &newcon, &len); + if (rc) { + length = rc; + goto out3; + } + if ((length + len) >= PAGE_SIZE) { + xfree(newcon); + length = -ERANGE; + goto out3; + } + memcpy(ptr, newcon, len); + xfree(newcon); + ptr += len; + length += len; + } + + if ( copy_to_user(buf, page, length) ) + length = -EFAULT; + +out3: + xfree(sids); +out2: + if (page) + xfree(page); + xfree(user); +out: + xfree(con); + return length; +} + +static int flask_security_relabel(char *buf, int size) +{ + char *scon, *tcon; + u32 ssid, tsid, newsid; + u16 tclass; + int length; + char *newcon; + u32 len; + + length = domain_has_security(current->domain, SECURITY__COMPUTE_RELABEL); + if (length) + return length; + + length = -ENOMEM; + scon = xmalloc_array(char, size+1); + if (!scon) + return length; + memset(scon, 0, size+1); + + tcon = xmalloc_array(char, size+1); + if (!tcon) + goto out; + memset(tcon, 0, size+1); + + length = -EINVAL; + if (sscanf(buf, "%s %s %hu", scon, tcon, &tclass) != 3) + goto out2; + + length = security_context_to_sid(scon, strlen(scon)+1, &ssid); + if (length < 0) + goto out2; + length = security_context_to_sid(tcon, strlen(tcon)+1, &tsid); + if (length < 0) + goto out2; + + length = security_change_sid(ssid, tsid, tclass, &newsid); + if (length < 0) + goto out2; + + length = security_sid_to_context(newsid, &newcon, &len); + if (length < 0) + goto out2; + + if (len > PAGE_SIZE) { + length = -ERANGE; + goto out3; + } + + if ( copy_to_user(buf, newcon, len) ) + len = -EFAULT; + + length = len; + +out3: + xfree(newcon); +out2: + xfree(tcon); +out: + xfree(scon); + return length; +} + +static int flask_security_create(char *buf, int size) +{ + char *scon, *tcon; + u32 ssid, tsid, newsid; + u16 tclass; + int length; + char *newcon; + u32 len; + + length = domain_has_security(current->domain, SECURITY__COMPUTE_CREATE); + if (length) + return length; + + length = -ENOMEM; + scon = xmalloc_array(char, size+1); + if (!scon) + return length; + memset(scon, 0, size+1); + + tcon = xmalloc_array(char, size+1); + if (!tcon) + goto out; + memset(tcon, 0, size+1); + + length = -EINVAL; + if (sscanf(buf, "%s %s %hu", scon, tcon, &tclass) != 3) + goto out2; + + length = security_context_to_sid(scon, strlen(scon)+1, &ssid); + if (length < 0) + goto out2; + length = security_context_to_sid(tcon, strlen(tcon)+1, &tsid); + if (length < 0) + goto out2; + + length = security_transition_sid(ssid, tsid, tclass, &newsid); + if (length < 0) + goto out2; + + length = security_sid_to_context(newsid, &newcon, &len); + if (length < 0) + goto out2; + + if (len > PAGE_SIZE) { + printk( "%s: context size (%u) exceeds payload " + "max\n", __FUNCTION__, len); + length = -ERANGE; + goto out3; + } + + if ( copy_to_user(buf, newcon, len) ) + len = -EFAULT; + + length = len; + +out3: + xfree(newcon); +out2: + xfree(tcon); +out: + xfree(scon); + return length; +} + +static int flask_security_access(char *buf, int size) +{ + char *page = NULL; + char *scon, *tcon; + u32 ssid, tsid; + u16 tclass; + u32 req; + struct av_decision avd; + int length; + + length = domain_has_security(current->domain, SECURITY__COMPUTE_AV); + if (length) + return length; + + length = -ENOMEM; + scon = xmalloc_array(char, size+1); + if (!scon) + return length; + memset(scon, 0, size+1); + + tcon = xmalloc_array(char, size+1); + if (!tcon) + goto out; + memset(tcon, 0, size+1); + + length = -EINVAL; + if (sscanf(buf, "%s %s %hu %x", scon, tcon, &tclass, &req) != 4) + goto out2; + + length = security_context_to_sid(scon, strlen(scon)+1, &ssid); + if (length < 0) + goto out2; + length = security_context_to_sid(tcon, strlen(tcon)+1, &tsid); + if (length < 0) + goto out2; + + length = security_compute_av(ssid, tsid, tclass, req, &avd); + if (length < 0) + goto out2; + + page = (char *)xmalloc_bytes(PAGE_SIZE); + if (!page) { + length = -ENOMEM; + goto out2; + } + + memset(page, 0, PAGE_SIZE); + + length = snprintf(page, PAGE_SIZE, "%x %x %x %x %u", + avd.allowed, avd.decided, + avd.auditallow, avd.auditdeny, + avd.seqno); + + if ( copy_to_user(buf, page, length) ) + length = -EFAULT; + +out2: + xfree(tcon); +out: + xfree(scon); + return length; +} + +static int flask_security_member(char *buf, int size) +{ + char *scon, *tcon; + u32 ssid, tsid, newsid; + u16 tclass; + int length; + char *newcon; + u32 len; + + length = domain_has_security(current->domain, SECURITY__COMPUTE_MEMBER); + if (length) + return length; + + length = -ENOMEM; + scon = xmalloc_array(char, size+1); + if (!scon) + return length; + memset(scon, 0, size+1); + + tcon = xmalloc_array(char, size+1); + if (!tcon) + goto out; + memset(tcon, 0, size+1); + + length = -EINVAL; + if (sscanf(buf, "%s, %s, %hu", scon, tcon, &tclass) != 3) + goto out2; + + length = security_context_to_sid(scon, strlen(scon)+1, &ssid); + if (length < 0) + goto out2; + length = security_context_to_sid(tcon, strlen(tcon)+1, &tsid); + if (length < 0) + goto out2; + + length = security_member_sid(ssid, tsid, tclass, &newsid); + if (length < 0) + goto out2; + + length = security_sid_to_context(newsid, &newcon, &len); + if (length < 0) + goto out2; + + if (len > PAGE_SIZE) { + printk("%s: context size (%u) exceeds payload " + "max\n", __FUNCTION__, len); + length = -ERANGE; + goto out3; + } + + if ( copy_to_user(buf, newcon, len) ) + len = -EFAULT; + + length = len; + +out3: + xfree(newcon); +out2: + xfree(tcon); +out: + xfree(scon); + return length; +} + +static int flask_security_setenforce(char *buf, int count) +{ + char *page = NULL; + int length; + int new_value; + + if (count < 0 || count >= PAGE_SIZE) + return -ENOMEM; + + page = (char *)xmalloc_bytes(PAGE_SIZE); + if (!page) + return -ENOMEM; + memset(page, 0, PAGE_SIZE); + length = -EFAULT; + if (copy_from_user(page, buf, count)) + goto out; + + length = -EINVAL; + if (sscanf(page, "%d", &new_value) != 1) + goto out; + + if (new_value != flask_enforcing) { + length = domain_has_security(current->domain, SECURITY__SETENFORCE); + if (length) + goto out; + flask_enforcing = new_value; + if (flask_enforcing) + avc_ss_reset(0); + } + length = count; + +out: + xfree(page); + return length; +} + +static int flask_security_context(char *buf, int count) +{ + char *page = NULL; + u32 sid; + int length; + + length = domain_has_security(current->domain, SECURITY__CHECK_CONTEXT); + if (length) + goto out; + + if (count < 0 || count >= PAGE_SIZE) + return -ENOMEM; + + page = (char *)xmalloc_bytes(PAGE_SIZE); + if (!page) + return -ENOMEM; + memset(page, 0, PAGE_SIZE); + length = -EFAULT; + if (copy_from_user(page, buf, count)) + goto out; + + length = security_context_to_sid(page, count, &sid); + if (length < 0) + goto out; + + memset(page, 0, PAGE_SIZE); + length = sprintf(page, "%u", sid); + + if ( copy_to_user(buf, page, count) ) + length = -EFAULT; + +out: + xfree(page); + return length; +} + +static int flask_security_sid(char *buf, int count) +{ + char *page = NULL; + char *context; + u32 sid; + u32 len; + int length; + + length = domain_has_security(current->domain, SECURITY__CHECK_CONTEXT); + if (length) + goto out; + + if (count < 0 || count >= PAGE_SIZE) + return -ENOMEM; + + page = (char *)xmalloc_bytes(PAGE_SIZE); + if (!page) + return -ENOMEM; + memset(page, 0, PAGE_SIZE); + length = -EFAULT; + if (copy_from_user(page, buf, count)) + goto out; + + if (sscanf(page, "%u", &sid) != 1) + goto out; + + length = security_sid_to_context(sid, &context, &len); + if (length < 0) + goto out; + + if ( copy_to_user(buf, context, len) ) + length = -EFAULT; + + xfree(context); + +out: + xfree(page); + return length; +} + +int flask_disable(void) +{ + static int flask_disabled = 0; + + if (ss_initialized) { + /* Not permitted after initial policy load. */ + return -EINVAL; + } + + if (flask_disabled) { + /* Only do this once. */ + return -EINVAL; + } + + printk("Flask: Disabled at runtime.\n"); + + flask_disabled = 1; + + /* Reset xsm_ops to the original module. */ + xsm_ops = original_ops; + + return 0; +} + +static int flask_security_disable(char *buf, int count) +{ + char *page = NULL; + int length; + int new_value; + + if (count < 0 || count >= PAGE_SIZE) + return -ENOMEM; + page = (char *)xmalloc_bytes(PAGE_SIZE); + if (!page) + return -ENOMEM; + memset(page, 0, PAGE_SIZE); + length = -EFAULT; + if (copy_from_user(page, buf, count)) + goto out; + + length = -EINVAL; + if (sscanf(page, "%d", &new_value) != 1) + goto out; + + if (new_value) { + length = flask_disable(); + if (length < 0) + goto out; + } + + length = count; + +out: + xfree(page); + return length; +} + +static int flask_security_setavc_threshold(char *buf, int count) +{ + char *page = NULL; + int ret; + int new_value; + + if (count < 0 || count >= PAGE_SIZE) { + ret = -ENOMEM; + goto out; + } + + page = (char*)xmalloc_bytes(PAGE_SIZE); + if (!page) + return -ENOMEM; + memset(page, 0, PAGE_SIZE); + + if (copy_from_user(page, buf, count)) { + ret = -EFAULT; + goto out_free; + } + + if (sscanf(page, "%u", &new_value) != 1) { + ret = -EINVAL; + goto out_free; + } + + if (new_value != avc_cache_threshold) { + ret = domain_has_security(current->domain, SECURITY__SETSECPARAM); + if (ret) + goto out_free; + avc_cache_threshold = new_value; + } + ret = count; + +out_free: + xfree(page); +out: + return ret; +} + +static int flask_security_set_bool(char *buf, int count) +{ + char *page = NULL; + int length = -EFAULT; + int i, new_value; + + spin_lock(&sel_sem); + + length = domain_has_security(current->domain, SECURITY__SETBOOL); + if (length) + goto out; + + if (count < 0 || count >= PAGE_SIZE) { + length = -ENOMEM; + goto out; + } + + page = (char *)xmalloc_bytes(PAGE_SIZE); + if (!page) { + length = -ENOMEM; + goto out; + } + memset(page, 0, PAGE_SIZE); + + if (copy_from_user(page, buf, count)) + goto out; + + length = -EINVAL; + if (sscanf(page, "%d %d", &i, &new_value) != 2) + goto out; + + if (new_value) { + new_value = 1; + } + + bool_pending_values[i] = new_value; + length = count; + +out: + spin_unlock(&sel_sem); + if (page) + xfree(page); + return length; +} + +static int flask_security_commit_bools(char *buf, int count) +{ + char *page = NULL; + int length = -EFAULT; + int new_value; + + spin_lock(&sel_sem); + + length = domain_has_security(current->domain, SECURITY__SETBOOL); + if (length) + goto out; + + if (count < 0 || count >= PAGE_SIZE) { + length = -ENOMEM; + goto out; + } + + page = (char *)xmalloc_bytes(PAGE_SIZE); + if (!page) { + length = -ENOMEM; + goto out; + } + memset(page, 0, PAGE_SIZE); + + if (copy_from_user(page, buf, count)) + goto out; + + length = -EINVAL; + if (sscanf(page, "%d", &new_value) != 1) + goto out; + + if (new_value) { + security_set_bools(bool_num, bool_pending_values); + } + + length = count; + +out: + spin_unlock(&sel_sem); + if (page) + xfree(page); + return length; +} + +static int flask_security_get_bool(char *buf, int count) +{ + char *page = NULL; + int length; + int i, cur_enforcing; + + spin_lock(&sel_sem); + + length = -EFAULT; + + if (count < 0 || count > PAGE_SIZE) { + length = -EINVAL; + goto out; + } + + page = (char *)xmalloc_bytes(PAGE_SIZE); + if (!page) { + length = -ENOMEM; + goto out; + } + memset(page, 0, PAGE_SIZE); + + if (copy_from_user(page, buf, count)) + goto out; + + length = -EINVAL; + if (sscanf(page, "%d", &i) != 1) + goto out; + + cur_enforcing = security_get_bool_value(i); + if (cur_enforcing < 0) { + length = cur_enforcing; + goto out; + } + + length = snprintf(page, PAGE_SIZE, "%d %d", cur_enforcing, + bool_pending_values[i]); + if (length < 0) + goto out; + + if ( copy_to_user(buf, page, length) ) + length = -EFAULT; + +out: + spin_unlock(&sel_sem); + if (page) + xfree(page); + return length; +} + +static int flask_security_make_bools(void) +{ + int i, ret = 0; + char **names = NULL; + int num; + int *values = NULL; + + xfree(bool_pending_values); + + ret = security_get_bools(&num, &names, &values); + if (ret != 0) + goto out; + + bool_num = num; + bool_pending_values = values; + +out: + if (names) { + for (i = 0; i < num; i++) + xfree(names[i]); + xfree(names); + } + return ret; +} + +#ifdef FLASK_AVC_STATS + +static int flask_security_avc_cachestats(char *buf, int count) { + + char *page = NULL; + int len = 0; + int length = 0; + long long idx = 0; + int cpu; + struct avc_cache_stats *st; + + page = (char *)xmalloc_bytes(PAGE_SIZE); + if (!page) + return -ENOMEM; + memset(page, 0, PAGE_SIZE); + + len = sprintf(page, "lookups hits misses allocations reclaims " + "frees\n"); + memcpy(buf, page, len); + buf += len; + length += len; + + for (cpu = idx; cpu < NR_CPUS; ++cpu) { + if (!cpu_possible(cpu)) + continue; + idx = cpu + 1; + st = &per_cpu(avc_cache_stats, cpu); + + len = sprintf(page, "%u %u %u %u %u %u\n", st->lookups, + st->hits, st->misses, st->allocations, + st->reclaims, st->frees); + memcpy(buf, page, len); + buf += len; + length += len; + } + + xfree(page); + return length; +} + +#endif + +static int flask_security_load(char *buf, int count) +{ + int ret; + int length; + void *data = NULL; + + spin_lock(&sel_sem); + + length = domain_has_security(current->domain, SECURITY__LOAD_POLICY); + if (length) + goto out; + + if ((count < 0) || (count > 64 * 1024 * 1024) + || (data = xmalloc_array(char, count)) == NULL) { + length = -ENOMEM; + goto out; + } + + length = -EFAULT; + if (copy_from_user(data, buf, count) != 0) + goto out; + + length = security_load_policy(data, count); + if (length) + goto out; + + ret = flask_security_make_bools(); + if (ret) + length = ret; + else + length = count; + +out: + spin_unlock(&sel_sem); + xfree(data); + return length; +} + + +void flask_complete_init(struct domain *d) +{ + struct domain_security_struct *dsec; + + /* Set the security state for the Dom0 domain. */ + dsec = d->ssid; + dsec->sid = SECINITSID_DOM0; + dsec->create_sid = SECINITSID_UNLABELED; + + printk("Flask: Completed initialization.\n"); +} + +#define FLASK_LOAD 1 +#define FLASK_GETENFORCE 2 +#define FLASK_SETENFORCE 3 +#define FLASK_CONTEXT_TO_SID 4 +#define FLASK_SID_TO_CONTEXT 5 +#define FLASK_ACCESS 6 +#define FLASK_CREATE 7 +#define FLASK_RELABEL 8 +#define FLASK_USER 9 +#define FLASK_POLICYVERS 10 +#define FLASK_GETBOOL 11 +#define FLASK_SETBOOL 12 +#define FLASK_COMMITBOOLS 13 +#define FLASK_MLS 14 +#define FLASK_DISABLE 15 +#define FLASK_GETAVC_THRESHOLD 16 +#define FLASK_SETAVC_THRESHOLD 17 +#define FLASK_AVC_HASHSTATS 18 +#define FLASK_AVC_CACHESTATS 19 +#define FLASK_MEMBER 20 + +typedef struct flask_op { + int size; + char *buf; +} flask_op_t; + +DEFINE_XEN_GUEST_HANDLE(flask_op_t); + +static long do_flask_op(int cmd, XEN_GUEST_HANDLE(xsm_op_t) u_flask_op) +{ + + flask_op_t curop, *op = &curop; + int rc = 0; + int length = 0; + char *page = NULL; + + if ( copy_from_guest(op, u_flask_op, 1) ) + return -EFAULT; + + switch ( cmd ) + { + + case FLASK_LOAD: + { + length = flask_security_load(op->buf, op->size); + } + break; + + case FLASK_GETENFORCE: + { + page = (char *)xmalloc_bytes(PAGE_SIZE); + if (!page) + return -ENOMEM; + memset(page, 0, PAGE_SIZE); + + length = sprintf(page, "%d", flask_enforcing); + + if( copy_to_user(op->buf, page, length) ) { + rc = -EFAULT; + goto out; + } + } + break; + + case FLASK_SETENFORCE: + { + length = flask_security_setenforce(op->buf, op->size); + } + break; + + case FLASK_CONTEXT_TO_SID: + { + length = flask_security_context(op->buf, op->size); + } + break; + + case FLASK_SID_TO_CONTEXT: + { + length = flask_security_sid(op->buf, op->size); + } + break; + + case FLASK_ACCESS: + { + length = flask_security_access(op->buf, op->size); + } + break; + + case FLASK_CREATE: + { + length = flask_security_create(op->buf, op->size); + } + break; + + case FLASK_RELABEL: + { + length = flask_security_relabel(op->buf, op->size); + } + break; + + case FLASK_USER: + { + length = flask_security_user(op->buf, op->size); + } + break; + + case FLASK_POLICYVERS: + { + page = (char *)xmalloc_bytes(PAGE_SIZE); + if (!page) + return -ENOMEM; + memset(page, 0, PAGE_SIZE); + + length = sprintf(page, "%d", POLICYDB_VERSION_MAX); + + if ( copy_to_user(op->buf, page, length) ) { + rc = -EFAULT; + goto out; + } + } + break; + + case FLASK_GETBOOL: + { + length = flask_security_get_bool(op->buf, op->size); + } + break; + + case FLASK_SETBOOL: + { + length = flask_security_set_bool(op->buf, op->size); + } + break; + + case FLASK_COMMITBOOLS: + { + length = flask_security_commit_bools(op->buf, op->size); + } + break; + + case FLASK_MLS: + { + page = (char *)xmalloc_bytes(PAGE_SIZE); + if (!page) + return -ENOMEM; + memset(page, 0, PAGE_SIZE); + + length = sprintf(page, "%d", flask_mls_enabled); + + if ( copy_to_user(op->buf, page, length) ) { + rc = -EFAULT; + goto out; + } + } + break; + + case FLASK_DISABLE: + { + length = flask_security_disable(op->buf, op->size); + } + break; + + case FLASK_GETAVC_THRESHOLD: + { + page = (char *)xmalloc_bytes(PAGE_SIZE); + if (!page) + return -ENOMEM; + memset(page, 0, PAGE_SIZE); + + length = sprintf(page, "%d", avc_cache_threshold); + + if ( copy_to_user(op->buf, page, length) ) { + rc = -EFAULT; + goto out; + } + } + break; + + case FLASK_SETAVC_THRESHOLD: + { + length = flask_security_setavc_threshold(op->buf, op->size); + } + break; + + case FLASK_AVC_HASHSTATS: + { + page = (char *)xmalloc_bytes(PAGE_SIZE); + if (!page) + return -ENOMEM; + memset(page, 0, PAGE_SIZE); + + length = avc_get_hash_stats(page); + + if ( copy_to_user(op->buf, page, length) ) { + rc = -EFAULT; + goto out; + } + } + break; +#ifdef FLASK_AVC_STATS + case FLASK_AVC_CACHESTATS: + { + length = flask_security_avc_cachestats(op->buf, op->size); + } + break; +#endif + case FLASK_MEMBER: + { + length = flask_security_member(op->buf, op->size); + } + break; + + default: + { + length = -ENOSYS; + break; + } + + } + + if (length < 0) { + rc = length; + goto out; + } + op->size = length; + if ( copy_to_guest(u_flask_op, op, 1) ) + rc = -EFAULT; + +out: + if (page) + xfree(page); + return rc; +} + +static struct xsm_operations flask_ops = { + .security_domaininfo = flask_security_domaininfo, + .setvcpucontext = flask_setvcpucontext, + .pausedomain = flask_pausedomain, + .unpausedomain = flask_unpausedomain, + .createdomain = flask_createdomain, + .createdomain_post = flask_createdomain_post, + .createdomain_fail = NULL, + .max_vcpus = flask_max_vcpus, + .destroydomain = flask_destroydomain, + .vcpuaffinity = flask_vcpuaffinity, + .scheduler = flask_scheduler, + .getdomaininfo = flask_getdomaininfo, + .getvcpucontext = flask_getvcpucontext, + .getvcpuinfo = flask_getvcpuinfo, + .domain_settime = flask_domain_settime, + .tbufcontrol = flask_tbufcontrol, + .readconsole = flask_readconsole, + .sched_id = flask_sched_id, + .setdomainmaxmem = flask_setdomainmaxmem, + .setdomainhandle = flask_setdomainhandle, + .setdebugging = flask_setdebugging, + .irq_permission = flask_irq_permission, + .iomem_permission = flask_iomem_permission, + .perfcontrol = flask_perfcontrol, + + .shadow_control = flask_shadow_control, + .xen_settime = flask_xen_settime, + .memtype = flask_memtype, + .microcode = flask_microcode, + .ioport_permission = flask_ioport_permission, + .physinfo = flask_physinfo, + .getpageframeinfo = flask_getpageframeinfo, + .getmemlist = flask_getmemlist, + .platform_quirk = flask_platform_quirk, + .hypercall_init = flask_hypercall_init, + + .evtchn_unbound = flask_evtchn_unbound, + .evtchn_interdomain = flask_evtchn_interdomain, + .evtchn_virq = flask_evtchn_virq, + .evtchn_ipi = flask_evtchn_ipi, + .evtchn_pirq = flask_evtchn_pirq, + .evtchn_close = flask_evtchn_close, + .evtchn_close_post = flask_evtchn_close_post, + .evtchn_send = flask_evtchn_send, + .evtchn_status = flask_evtchn_status, + .evtchn_vcpu = flask_evtchn_vcpu, + .evtchn_unmask = flask_evtchn_unmask, + .evtchn_init = flask_evtchn_init, + + .grant_mapref = flask_grant_mapref, + .grant_unmapref = flask_grant_unmapref, + .grant_setup = flask_grant_setup, + .grant_transfer = flask_grant_transfer, + .grant_copy = flask_grant_copy, + + .alloc_security_domain = flask_domain_alloc_security, + .free_security_domain = flask_domain_free_security, + .alloc_security_evtchn = flask_alloc_security_evtchn, + .free_security_evtchn = flask_free_security_evtchn, + + .mmu_normal_update = flask_mmu_normal_update, + .mmu_machphys_update = flask_mmu_machphys_update, + .translate_gpfn_list = flask_translate_gpfn_list, + .memory_adjust_reservation = flask_memory_adjust_reservation, + .memory_stat_reservation = flask_memory_stat_reservation, + .memory_pin_page = flask_memory_pin_page, + .update_va_mapping = flask_update_va_mapping, + .add_to_physmap = flask_add_to_physmap, + .machine_memory_map = flask_machine_memory_map, + .domain_memory_map = flask_domain_memory_map, + .machphys_mfn_list = flask_machphys_mfn_list, + + .console_io = flask_console_io, + + .pirq_unmask = flask_pirq_unmask, + .pirq_status = flask_pirq_status, + .apic = flask_apic, + .assign_vector = flask_assign_vector, + + .profile = flask_profile, + + .hvm_param = flask_hvm_param, + .hvm_set_pci_intx_level = flask_hvm_set_pci_intx_level, + .hvm_set_isa_irq_level = flask_hvm_set_isa_irq_level, + .hvm_set_pci_link_route = flask_hvm_set_pci_link_route, + + .kexec = flask_kexec, + + .__do_xsm_op = do_flask_op, + .complete_init = flask_complete_init, +}; + +static __init int flask_init(void) +{ + int ret = 0; + + if (!flask_enabled) { + printk("Flask: Disabled at boot.\n"); + return 0; + } + + printk("Flask: Initializing.\n"); + + avc_init(); + + original_ops = xsm_ops; + if (register_xsm(&flask_ops)) + panic("Flask: Unable to register with XSM.\n"); + + ret = security_load_policy(policy_buffer, policy_size); + + if (flask_enforcing) { + printk("Flask: Starting in enforcing mode.\n"); + } else { + printk("Flask: Starting in permissive mode.\n"); + } + + return ret; +} + +xsm_initcall(flask_init); diff -r 65bfdf932c7e -r 12923ba929c8 xen/flask/include/av_inherit.h --- /dev/null Thu Jan 1 00:00:00 1970 +0000 +++ b/xen/flask/include/av_inherit.h Sat Dec 16 11:54:25 2006 -0500 @@ -0,0 +1,1 @@ +/* This file is automatically generated. Do not edit. */ diff -r 65bfdf932c7e -r 12923ba929c8 xen/flask/include/av_perm_to_string.h --- /dev/null Thu Jan 1 00:00:00 1970 +0000 +++ b/xen/flask/include/av_perm_to_string.h Sat Dec 16 11:54:25 2006 -0500 @@ -0,0 +1,91 @@ +/* This file is automatically generated. Do not edit. */ + S_(SECCLASS_XEN, XEN__SCHEDULER, "scheduler") + S_(SECCLASS_XEN, XEN__SETTIME, "settime") + S_(SECCLASS_XEN, XEN__TBUFCONTROL, "tbufcontrol") + S_(SECCLASS_XEN, XEN__READCONSOLE, "readconsole") + S_(SECCLASS_XEN, XEN__CLEARCONSOLE, "clearconsole") + S_(SECCLASS_XEN, XEN__PERFCONTROL, "perfcontrol") + S_(SECCLASS_XEN, XEN__MTRR_ADD, "mtrr_add") + S_(SECCLASS_XEN, XEN__MTRR_DEL, "mtrr_del") + S_(SECCLASS_XEN, XEN__MTRR_READ, "mtrr_read") + S_(SECCLASS_XEN, XEN__MICROCODE, "microcode") + S_(SECCLASS_XEN, XEN__PHYSINFO, "physinfo") + S_(SECCLASS_XEN, XEN__QUIRK, "quirk") + S_(SECCLASS_XEN, XEN__WRITECONSOLE, "writeconsole") + S_(SECCLASS_XEN, XEN__READAPIC, "readapic") + S_(SECCLASS_XEN, XEN__WRITEAPIC, "writeapic") + S_(SECCLASS_XEN, XEN__PRIVPROFILE, "privprofile") + S_(SECCLASS_XEN, XEN__NONPRIVPROFILE, "nonprivprofile") + S_(SECCLASS_XEN, XEN__KEXEC, "kexec") + S_(SECCLASS_DOMAIN, DOMAIN__SETVCPUCONTEXT, "setvcpucontext") + S_(SECCLASS_DOMAIN, DOMAIN__PAUSE, "pause") + S_(SECCLASS_DOMAIN, DOMAIN__UNPAUSE, "unpause") + S_(SECCLASS_DOMAIN, DOMAIN__CREATE, "create") + S_(SECCLASS_DOMAIN, DOMAIN__MAX_VCPUS, "max_vcpus") + S_(SECCLASS_DOMAIN, DOMAIN__DESTROY, "destroy") + S_(SECCLASS_DOMAIN, DOMAIN__SETVCPUAFFINITY, "setvcpuaffinity") + S_(SECCLASS_DOMAIN, DOMAIN__GETVCPUAFFINITY, "getvcpuaffinity") + S_(SECCLASS_DOMAIN, DOMAIN__SCHEDULER, "scheduler") + S_(SECCLASS_DOMAIN, DOMAIN__GETDOMAININFO, "getdomaininfo") + S_(SECCLASS_DOMAIN, DOMAIN__GETVCPUINFO, "getvcpuinfo") + S_(SECCLASS_DOMAIN, DOMAIN__GETVCPUCONTEXT, "getvcpucontext") + S_(SECCLASS_DOMAIN, DOMAIN__SETDOMAINMAXMEM, "setdomainmaxmem") + S_(SECCLASS_DOMAIN, DOMAIN__SETDOMAINHANDLE, "setdomainhandle") + S_(SECCLASS_DOMAIN, DOMAIN__SETDEBUGGING, "setdebugging") + S_(SECCLASS_DOMAIN, DOMAIN__HYPERCALL, "hypercall") + S_(SECCLASS_DOMAIN, DOMAIN__TRANSITION, "transition") + S_(SECCLASS_DOMAIN, DOMAIN__SETTIME, "settime") + S_(SECCLASS_HVM, HVM__SETPARAM, "setparam") + S_(SECCLASS_HVM, HVM__GETPARAM, "getparam") + S_(SECCLASS_HVM, HVM__PCILEVEL, "pcilevel") + S_(SECCLASS_HVM, HVM__IRQLEVEL, "irqlevel") + S_(SECCLASS_HVM, HVM__PCIROUTE, "pciroute") + S_(SECCLASS_EVENT, EVENT__BIND, "bind") + S_(SECCLASS_EVENT, EVENT__CLOSE, "close") + S_(SECCLASS_EVENT, EVENT__SEND, "send") + S_(SECCLASS_EVENT, EVENT__STATUS, "status") + S_(SECCLASS_EVENT, EVENT__UNMASK, "unmask") + S_(SECCLASS_EVENT, EVENT__NOTIFY, "notify") + S_(SECCLASS_EVENT, EVENT__CREATE, "create") + S_(SECCLASS_EVENT, EVENT__ALLOC, "alloc") + S_(SECCLASS_EVENT, EVENT__VECTOR, "vector") + S_(SECCLASS_GRANT, GRANT__MAP_READ, "map_read") + S_(SECCLASS_GRANT, GRANT__MAP_WRITE, "map_write") + S_(SECCLASS_GRANT, GRANT__UNMAP, "unmap") + S_(SECCLASS_GRANT, GRANT__TRANSFER, "transfer") + S_(SECCLASS_GRANT, GRANT__SETUP, "setup") + S_(SECCLASS_GRANT, GRANT__COPY, "copy") + S_(SECCLASS_MMU, MMU__MAP_READ, "map_read") + S_(SECCLASS_MMU, MMU__MAP_WRITE, "map_write") + S_(SECCLASS_MMU, MMU__PAGEINFO, "pageinfo") + S_(SECCLASS_MMU, MMU__PAGELIST, "pagelist") + S_(SECCLASS_MMU, MMU__ADJUST, "adjust") + S_(SECCLASS_MMU, MMU__STAT, "stat") + S_(SECCLASS_MMU, MMU__TRANSLATEGP, "translategp") + S_(SECCLASS_MMU, MMU__UPDATEMP, "updatemp") + S_(SECCLASS_MMU, MMU__PHYSMAP, "physmap") + S_(SECCLASS_MMU, MMU__PINPAGE, "pinpage") + S_(SECCLASS_MMU, MMU__MFNLIST, "mfnlist") + S_(SECCLASS_MMU, MMU__MEMORYMAP, "memorymap") + S_(SECCLASS_SHADOW, SHADOW__DISABLE, "disable") + S_(SECCLASS_SHADOW, SHADOW__ENABLE, "enable") + S_(SECCLASS_SHADOW, SHADOW__LOGDIRTY, "logdirty") + S_(SECCLASS_RESOURCE, RESOURCE__ADD, "add") + S_(SECCLASS_RESOURCE, RESOURCE__REMOVE, "remove") + S_(SECCLASS_RESOURCE, RESOURCE__USE, "use") + S_(SECCLASS_RESOURCE, RESOURCE__ADD_IRQ, "add_irq") + S_(SECCLASS_RESOURCE, RESOURCE__REMOVE_IRQ, "remove_irq") + S_(SECCLASS_RESOURCE, RESOURCE__ADD_IOPORT, "add_ioport") + S_(SECCLASS_RESOURCE, RESOURCE__REMOVE_IOPORT, "remove_ioport") + S_(SECCLASS_RESOURCE, RESOURCE__ADD_IOMEM, "add_iomem") + S_(SECCLASS_RESOURCE, RESOURCE__REMOVE_IOMEM, "remove_iomem") + S_(SECCLASS_SECURITY, SECURITY__COMPUTE_AV, "compute_av") + S_(SECCLASS_SECURITY, SECURITY__COMPUTE_CREATE, "compute_create") + S_(SECCLASS_SECURITY, SECURITY__COMPUTE_MEMBER, "compute_member") + S_(SECCLASS_SECURITY, SECURITY__CHECK_CONTEXT, "check_context") + S_(SECCLASS_SECURITY, SECURITY__LOAD_POLICY, "load_policy") + S_(SECCLASS_SECURITY, SECURITY__COMPUTE_RELABEL, "compute_relabel") + S_(SECCLASS_SECURITY, SECURITY__COMPUTE_USER, "compute_user") + S_(SECCLASS_SECURITY, SECURITY__SETENFORCE, "setenforce") + S_(SECCLASS_SECURITY, SECURITY__SETBOOL, "setbool") + S_(SECCLASS_SECURITY, SECURITY__SETSECPARAM, "setsecparam") diff -r 65bfdf932c7e -r 12923ba929c8 xen/flask/include/av_permissions.h --- /dev/null Thu Jan 1 00:00:00 1970 +0000 +++ b/xen/flask/include/av_permissions.h Sat Dec 16 11:54:25 2006 -0500 @@ -0,0 +1,100 @@ +/* This file is automatically generated. Do not edit. */ +#define XEN__SCHEDULER 0x00000001UL +#define XEN__SETTIME 0x00000002UL +#define XEN__TBUFCONTROL 0x00000004UL +#define XEN__READCONSOLE 0x00000008UL +#define XEN__CLEARCONSOLE 0x00000010UL +#define XEN__PERFCONTROL 0x00000020UL +#define XEN__MTRR_ADD 0x00000040UL +#define XEN__MTRR_DEL 0x00000080UL +#define XEN__MTRR_READ 0x00000100UL +#define XEN__MICROCODE 0x00000200UL +#define XEN__PHYSINFO 0x00000400UL +#define XEN__QUIRK 0x00000800UL +#define XEN__WRITECONSOLE 0x00001000UL +#define XEN__READAPIC 0x00002000UL +#define XEN__WRITEAPIC 0x00004000UL +#define XEN__PRIVPROFILE 0x00008000UL +#define XEN__NONPRIVPROFILE 0x00010000UL +#define XEN__KEXEC 0x00020000UL + +#define DOMAIN__SETVCPUCONTEXT 0x00000001UL +#define DOMAIN__PAUSE 0x00000002UL +#define DOMAIN__UNPAUSE 0x00000004UL +#define DOMAIN__CREATE 0x00000008UL +#define DOMAIN__MAX_VCPUS 0x00000010UL +#define DOMAIN__DESTROY 0x00000020UL +#define DOMAIN__SETVCPUAFFINITY 0x00000040UL +#define DOMAIN__GETVCPUAFFINITY 0x00000080UL +#define DOMAIN__SCHEDULER 0x00000100UL +#define DOMAIN__GETDOMAININFO 0x00000200UL +#define DOMAIN__GETVCPUINFO 0x00000400UL +#define DOMAIN__GETVCPUCONTEXT 0x00000800UL +#define DOMAIN__SETDOMAINMAXMEM 0x00001000UL +#define DOMAIN__SETDOMAINHANDLE 0x00002000UL +#define DOMAIN__SETDEBUGGING 0x00004000UL +#define DOMAIN__HYPERCALL 0x00008000UL +#define DOMAIN__TRANSITION 0x00010000UL +#define DOMAIN__SETTIME 0x00020000UL + +#define HVM__SETPARAM 0x00000001UL +#define HVM__GETPARAM 0x00000002UL +#define HVM__PCILEVEL 0x00000004UL +#define HVM__IRQLEVEL 0x00000008UL +#define HVM__PCIROUTE 0x00000010UL + +#define EVENT__BIND 0x00000001UL +#define EVENT__CLOSE 0x00000002UL +#define EVENT__SEND 0x00000004UL +#define EVENT__STATUS 0x00000008UL +#define EVENT__UNMASK 0x00000010UL +#define EVENT__NOTIFY 0x00000020UL +#define EVENT__CREATE 0x00000040UL +#define EVENT__ALLOC 0x00000080UL +#define EVENT__VECTOR 0x00000100UL + +#define GRANT__MAP_READ 0x00000001UL +#define GRANT__MAP_WRITE 0x00000002UL +#define GRANT__UNMAP 0x00000004UL +#define GRANT__TRANSFER 0x00000008UL +#define GRANT__SETUP 0x00000010UL +#define GRANT__COPY 0x00000020UL + +#define MMU__MAP_READ 0x00000001UL +#define MMU__MAP_WRITE 0x00000002UL +#define MMU__PAGEINFO 0x00000004UL +#define MMU__PAGELIST 0x00000008UL +#define MMU__ADJUST 0x00000010UL +#define MMU__STAT 0x00000020UL +#define MMU__TRANSLATEGP 0x00000040UL +#define MMU__UPDATEMP 0x00000080UL +#define MMU__PHYSMAP 0x00000100UL +#define MMU__PINPAGE 0x00000200UL +#define MMU__MFNLIST 0x00000400UL +#define MMU__MEMORYMAP 0x00000800UL + +#define SHADOW__DISABLE 0x00000001UL +#define SHADOW__ENABLE 0x00000002UL +#define SHADOW__LOGDIRTY 0x00000004UL + +#define RESOURCE__ADD 0x00000001UL +#define RESOURCE__REMOVE 0x00000002UL +#define RESOURCE__USE 0x00000004UL +#define RESOURCE__ADD_IRQ 0x00000008UL +#define RESOURCE__REMOVE_IRQ 0x00000010UL +#define RESOURCE__ADD_IOPORT 0x00000020UL +#define RESOURCE__REMOVE_IOPORT 0x00000040UL +#define RESOURCE__ADD_IOMEM 0x00000080UL +#define RESOURCE__REMOVE_IOMEM 0x00000100UL + +#define SECURITY__COMPUTE_AV 0x00000001UL +#define SECURITY__COMPUTE_CREATE 0x00000002UL +#define SECURITY__COMPUTE_MEMBER 0x00000004UL +#define SECURITY__CHECK_CONTEXT 0x00000008UL +#define SECURITY__LOAD_POLICY 0x00000010UL +#define SECURITY__COMPUTE_RELABEL 0x00000020UL +#define SECURITY__COMPUTE_USER 0x00000040UL +#define SECURITY__SETENFORCE 0x00000080UL +#define SECURITY__SETBOOL 0x00000100UL +#define SECURITY__SETSECPARAM 0x00000200UL + diff -r 65bfdf932c7e -r 12923ba929c8 xen/flask/include/avc.h --- /dev/null Thu Jan 1 00:00:00 1970 +0000 +++ b/xen/flask/include/avc.h Sat Dec 16 11:54:25 2006 -0500 @@ -0,0 +1,110 @@ +/* + * Access vector cache interface for object managers. + * + * Author : Stephen Smalley, + */ + +/* Ported to Xen 3.0, George Coker, */ + +#ifndef _SELINUX_AVC_H_ +#define _SELINUX_AVC_H_ + +#include +#include +#include +#include +#include "flask.h" +#include "av_permissions.h" +#include "security.h" + +#ifdef FLASK_DEVELOP +extern int flask_enforcing; +#else +#define flask_enforcing 1 +#endif + +/* + * An entry in the AVC. + */ +struct avc_entry; + +struct task_struct; +struct vfsmount; +struct dentry; +struct inode; +struct sock; +struct sk_buff; + +/* Auxiliary data to use in generating the audit record. */ +struct avc_audit_data { + char type; +#define AVC_AUDIT_DATA_FS 1 +#define AVC_AUDIT_DATA_NET 2 +#define AVC_AUDIT_DATA_CAP 3 +#define AVC_AUDIT_DATA_IPC 4 + struct domain *d; +}; + +#define v4info fam.v4 +#define v6info fam.v6 + +/* Initialize an AVC audit data structure. */ +#define AVC_AUDIT_DATA_INIT(_d,_t) \ + { memset((_d), 0, sizeof(struct avc_audit_data)); (_d)->type = AVC_AUDIT_DATA_##_t; } + +/* + * AVC statistics + */ +struct avc_cache_stats +{ + unsigned int lookups; + unsigned int hits; + unsigned int misses; + unsigned int allocations; + unsigned int reclaims; + unsigned int frees; +}; + +/* + * AVC operations + */ + +void avc_init(void); + +void avc_audit(u32 ssid, u32 tsid, + u16 tclass, u32 requested, + struct av_decision *avd, int result, struct avc_audit_data *auditdata); + +int avc_has_perm_noaudit(u32 ssid, u32 tsid, + u16 tclass, u32 requested, + struct av_decision *avd); + +int avc_has_perm(u32 ssid, u32 tsid, + u16 tclass, u32 requested, + struct avc_audit_data *auditdata); + +#define AVC_CALLBACK_GRANT 1 +#define AVC_CALLBACK_TRY_REVOKE 2 +#define AVC_CALLBACK_REVOKE 4 +#define AVC_CALLBACK_RESET 8 +#define AVC_CALLBACK_AUDITALLOW_ENABLE 16 +#define AVC_CALLBACK_AUDITALLOW_DISABLE 32 +#define AVC_CALLBACK_AUDITDENY_ENABLE 64 +#define AVC_CALLBACK_AUDITDENY_DISABLE 128 + +int avc_add_callback(int (*callback)(u32 event, u32 ssid, u32 tsid, + u16 tclass, u32 perms, + u32 *out_retained), + u32 events, u32 ssid, u32 tsid, + u16 tclass, u32 perms); + +/* Exported to selinuxfs */ +int avc_get_hash_stats(char *page); +extern unsigned int avc_cache_threshold; + +#ifdef FLASK_AVC_STATS +DECLARE_PER_CPU(struct avc_cache_stats, avc_cache_stats); +#endif + +#endif /* _SELINUX_AVC_H_ */ + diff -r 65bfdf932c7e -r 12923ba929c8 xen/flask/include/avc_ss.h --- /dev/null Thu Jan 1 00:00:00 1970 +0000 +++ b/xen/flask/include/avc_ss.h Sat Dec 16 11:54:25 2006 -0500 @@ -0,0 +1,14 @@ +/* + * Access vector cache interface for the security server. + * + * Author : Stephen Smalley, + */ +#ifndef _SELINUX_AVC_SS_H_ +#define _SELINUX_AVC_SS_H_ + +#include "flask.h" + +int avc_ss_reset(u32 seqno); + +#endif /* _SELINUX_AVC_SS_H_ */ + diff -r 65bfdf932c7e -r 12923ba929c8 xen/flask/include/class_to_string.h --- /dev/null Thu Jan 1 00:00:00 1970 +0000 +++ b/xen/flask/include/class_to_string.h Sat Dec 16 11:54:25 2006 -0500 @@ -0,0 +1,14 @@ +/* This file is automatically generated. Do not edit. */ +/* + * Security object class definitions + */ + S_("null") + S_("xen") + S_("domain") + S_("hvm") + S_("mmu") + S_("resource") + S_("shadow") + S_("event") + S_("grant") + S_("security") diff -r 65bfdf932c7e -r 12923ba929c8 xen/flask/include/common_perm_to_string.h --- /dev/null Thu Jan 1 00:00:00 1970 +0000 +++ b/xen/flask/include/common_perm_to_string.h Sat Dec 16 11:54:25 2006 -0500 @@ -0,0 +1,1 @@ +/* This file is automatically generated. Do not edit. */ diff -r 65bfdf932c7e -r 12923ba929c8 xen/flask/include/conditional.h --- /dev/null Thu Jan 1 00:00:00 1970 +0000 +++ b/xen/flask/include/conditional.h Sat Dec 16 11:54:25 2006 -0500 @@ -0,0 +1,22 @@ +/* + * Interface to booleans in the security server. This is exported + * for the selinuxfs. + * + * Author: Karl MacMillan + * + * Copyright (C) 2003 - 2004 Tresys Technology, LLC + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, version 2. + */ + +#ifndef _SELINUX_CONDITIONAL_H_ +#define _SELINUX_CONDITIONAL_H_ + +int security_get_bools(int *len, char ***names, int **values); + +int security_set_bools(int len, int *values); + +int security_get_bool_value(int bool); + +#endif diff -r 65bfdf932c7e -r 12923ba929c8 xen/flask/include/flask.h --- /dev/null Thu Jan 1 00:00:00 1970 +0000 +++ b/xen/flask/include/flask.h Sat Dec 16 11:54:25 2006 -0500 @@ -0,0 +1,36 @@ +/* This file is automatically generated. Do not edit. */ +#ifndef _SELINUX_FLASK_H_ +#define _SELINUX_FLASK_H_ + +/* + * Security object class definitions + */ +#define SECCLASS_XEN 1 +#define SECCLASS_DOMAIN 2 +#define SECCLASS_HVM 3 +#define SECCLASS_MMU 4 +#define SECCLASS_RESOURCE 5 +#define SECCLASS_SHADOW 6 +#define SECCLASS_EVENT 7 +#define SECCLASS_GRANT 8 +#define SECCLASS_SECURITY 9 + +/* + * Security identifier indices for initial entities + */ +#define SECINITSID_XEN 1 +#define SECINITSID_DOM0 2 +#define SECINITSID_DOMU 3 +#define SECINITSID_DOMIO 4 +#define SECINITSID_DOMXEN 5 +#define SECINITSID_UNLABELED 6 +#define SECINITSID_SECURITY 7 +#define SECINITSID_IOPORT 8 +#define SECINITSID_IOMEM 9 +#define SECINITSID_VCPU 10 +#define SECINITSID_VIRQ 11 +#define SECINITSID_PIRQ 12 + +#define SECINITSID_NUM 12 + +#endif diff -r 65bfdf932c7e -r 12923ba929c8 xen/flask/include/initial_sid_to_string.h --- /dev/null Thu Jan 1 00:00:00 1970 +0000 +++ b/xen/flask/include/initial_sid_to_string.h Sat Dec 16 11:54:25 2006 -0500 @@ -0,0 +1,18 @@ +/* This file is automatically generated. Do not edit. */ +static char *initial_sid_to_string[] = +{ + "null", + "xen", + "dom0", + "domU", + "domio", + "domxen", + "unlabeled", + "security", + "ioport", + "iomem", + "vcpu", + "virq", + "pirq", +}; + diff -r 65bfdf932c7e -r 12923ba929c8 xen/flask/include/objsec.h --- /dev/null Thu Jan 1 00:00:00 1970 +0000 +++ b/xen/flask/include/objsec.h Sat Dec 16 11:54:25 2006 -0500 @@ -0,0 +1,33 @@ +/* + * NSA Security-Enhanced Linux (SELinux) security module + * + * This file contains the Flask security data structures for xen objects. + * + * Author(s): George Coker, + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2, + * as published by the Free Software Foundation. + */ + +#ifndef _SELINUX_OBJSEC_H_ +#define _SELINUX_OBJSEC_H_ + +#include +#include "flask.h" +#include "avc.h" + +struct domain_security_struct { + struct domain *d; /* back pointer to domain object */ + u32 sid; /* current SID */ + u32 create_sid; +}; + +struct evtchn_security_struct { + struct evtchn *chn; /* back pointer to evtchn object */ + u32 sid; /* current SID */ +}; + +extern unsigned int selinux_checkreqprot; + +#endif /* _SELINUX_OBJSEC_H_ */ diff -r 65bfdf932c7e -r 12923ba929c8 xen/flask/include/security.h --- /dev/null Thu Jan 1 00:00:00 1970 +0000 +++ b/xen/flask/include/security.h Sat Dec 16 11:54:25 2006 -0500 @@ -0,0 +1,84 @@ +/* + * Security server interface. + * + * Author : Stephen Smalley, + * + */ + +/* Ported to Xen 3.0, George Coker, */ + +#ifndef _SELINUX_SECURITY_H_ +#define _SELINUX_SECURITY_H_ + +#include "flask.h" + +#define SECSID_NULL 0x00000000 /* unspecified SID */ +#define SECSID_WILD 0xffffffff /* wildcard SID */ +#define SECCLASS_NULL 0x0000 /* no class */ + +#define SELINUX_MAGIC 0xf97cff8c + +/* Identify specific policy version changes */ +#define POLICYDB_VERSION_BASE 15 +#define POLICYDB_VERSION_BOOL 16 +#define POLICYDB_VERSION_IPV6 17 +#define POLICYDB_VERSION_NLCLASS 18 +#define POLICYDB_VERSION_VALIDATETRANS 19 +#define POLICYDB_VERSION_MLS 19 +#define POLICYDB_VERSION_AVTAB 20 + +/* Range of policy versions we understand*/ +#define POLICYDB_VERSION_MIN POLICYDB_VERSION_BASE +#define POLICYDB_VERSION_MAX POLICYDB_VERSION_AVTAB + +#ifdef FLASK_BOOTPARAM +extern int flask_enabled; +#else +#define flask_enabled 1 +#endif + +extern int flask_mls_enabled; + +int security_load_policy(void * data, size_t len); + +struct av_decision { + u32 allowed; + u32 decided; + u32 auditallow; + u32 auditdeny; + u32 seqno; +}; + +int security_compute_av(u32 ssid, u32 tsid, u16 tclass, u32 requested, + struct av_decision *avd); + +int security_transition_sid(u32 ssid, u32 tsid, u16 tclass, u32 *out_sid); + +int security_member_sid(u32 ssid, u32 tsid, u16 tclass, u32 *out_sid); + +int security_change_sid(u32 ssid, u32 tsid, u16 tclass, u32 *out_sid); + +int security_sid_to_context(u32 sid, char **scontext, u32 *scontext_len); + +int security_context_to_sid(char *scontext, u32 scontext_len, u32 *out_sid); + +int security_context_to_sid_default(char *scontext, u32 scontext_len, + u32 *out_sid, u32 def_sid); + +int security_get_user_sids(u32 callsid, char *username, u32 **sids, u32 *nel); + +int security_pirq_sid(int pirq, u32 *out_sid); + +int security_virq_sid(int virq, u32 *out_sid); + +int security_vcpu_sid(int vcpu, u32 *out_sid); + +int security_iomem_sid(unsigned long, u32 *out_sid); + +int security_ioport_sid(u32 ioport, u32 *out_sid); + +int security_validate_transition(u32 oldsid, u32 newsid, u32 tasksid, + u16 tclass); + +#endif /* _SELINUX_SECURITY_H_ */ + diff -r 65bfdf932c7e -r 12923ba929c8 xen/flask/kutil.h --- /dev/null Thu Jan 1 00:00:00 1970 +0000 +++ b/xen/flask/kutil.h Sat Dec 16 11:54:25 2006 -0500 @@ -0,0 +1,84 @@ + +/* These inline functions were taken from rcupdate.h in Linux 2.6.13.4.*/ + +/* + * Read-Copy Update mechanism for mutual exclusion + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + * Copyright (C) IBM Corporation, 2001 + * + * Author: Dipankar Sarma + * + * Based on the original work by Paul McKenney + * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen. + * Papers: + * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf + * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001) + * + * For detailed explanation of Read-Copy Update mechanism see - + * http://lse.sourceforge.net/locking/rcupdate.html + * + */ + +struct rcu_head { + struct rcu_head *next; + void (*func)(struct rcu_head *head); +}; + +#define rcu_dereference(p) ({ \ + typeof(p) _________p1 = p; \ + (_________p1); \ + }) + + + +#define list_for_each_entry_rcu(pos, head, member) \ + for (pos = list_entry((head)->next, typeof(*pos), member); \ + prefetch(pos->member.next), &pos->member != (head); \ + pos = rcu_dereference(list_entry(pos->member.next, \ + typeof(*pos), member))) + +static inline void list_replace_rcu(struct list_head *old, struct list_head *new){ + new->next = old->next; + new->prev = old->prev; + smp_wmb(); + new->next->prev = new; + new->prev->next = new; +} + +static inline void list_del_rcu(struct list_head *entry) +{ + __list_del(entry->prev, entry->next); +} + +static inline void __list_add_rcu(struct list_head * new, + struct list_head * prev, struct list_head * next) +{ + new->next = next; + new->prev = prev; + smp_wmb(); + next->prev = new; + prev->next = new; +} + +static inline void list_add_rcu(struct list_head *new, struct list_head *head) +{ + __list_add_rcu(new, head, head->next); +} + +#define INIT_RCU_HEAD(ptr) do { \ + (ptr)->next = NULL; (ptr)->func = NULL; \ +} while (0) diff -r 65bfdf932c7e -r 12923ba929c8 xen/flask/ss/Makefile --- /dev/null Thu Jan 1 00:00:00 1970 +0000 +++ b/xen/flask/ss/Makefile Sat Dec 16 11:54:25 2006 -0500 @@ -0,0 +1,11 @@ +obj-y += ebitmap.o +obj-y += hashtab.o +obj-y += symtab.o +obj-y += sidtab.o +obj-y += avtab.o +obj-y += policydb.o +obj-y += services.o +obj-y += conditional.o +obj-y += mls.o + +CFLAGS += -I../include diff -r 65bfdf932c7e -r 12923ba929c8 xen/flask/ss/avtab.c --- /dev/null Thu Jan 1 00:00:00 1970 +0000 +++ b/xen/flask/ss/avtab.c Sat Dec 16 11:54:25 2006 -0500 @@ -0,0 +1,444 @@ +/* + * Implementation of the access vector table type. + * + * Author : Stephen Smalley, + */ + +/* Updated: Frank Mayer and Karl MacMillan + * + * Added conditional policy language extensions + * + * Copyright (C) 2003 Tresys Technology, LLC + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, version 2. + */ + +/* Ported to Xen 3.0, George Coker, */ + +#include +#include +#include + +#include "avtab.h" +#include "policydb.h" +#include "util_endian.h" + +#define AVTAB_HASH(keyp) \ +((keyp->target_class + \ + (keyp->target_type << 2) + \ + (keyp->source_type << 9)) & \ + AVTAB_HASH_MASK) + +static struct avtab_node* +avtab_insert_node(struct avtab *h, int hvalue, + struct avtab_node * prev, struct avtab_node * cur, + struct avtab_key *key, struct avtab_datum *datum) +{ + struct avtab_node * newnode; + newnode = xmalloc(struct avtab_node); + if (newnode == NULL) + return NULL; + memset(newnode, 0, sizeof(struct avtab_node)); + newnode->key = *key; + newnode->datum = *datum; + if (prev) { + newnode->next = prev->next; + prev->next = newnode; + } else { + newnode->next = h->htable[hvalue]; + h->htable[hvalue] = newnode; + } + + h->nel++; + return newnode; +} + +static int avtab_insert(struct avtab *h, struct avtab_key *key, struct avtab_datum *datum) +{ + int hvalue; + struct avtab_node *prev, *cur, *newnode; + u16 specified = key->specified & ~(AVTAB_ENABLED|AVTAB_ENABLED_OLD); + + if (!h) + return -EINVAL; + + hvalue = AVTAB_HASH(key); + for (prev = NULL, cur = h->htable[hvalue]; + cur; + prev = cur, cur = cur->next) { + if (key->source_type == cur->key.source_type && + key->target_type == cur->key.target_type && + key->target_class == cur->key.target_class && + (specified & cur->key.specified)) + return -EEXIST; + if (key->source_type < cur->key.source_type) + break; + if (key->source_type == cur->key.source_type && + key->target_type < cur->key.target_type) + break; + if (key->source_type == cur->key.source_type && + key->target_type == cur->key.target_type && + key->target_class < cur->key.target_class) + break; + } + + newnode = avtab_insert_node(h, hvalue, prev, cur, key, datum); + if(!newnode) + return -ENOMEM; + + return 0; +} + +/* Unlike avtab_insert(), this function allow multiple insertions of the same + * key/specified mask into the table, as needed by the conditional avtab. + * It also returns a pointer to the node inserted. + */ +struct avtab_node * +avtab_insert_nonunique(struct avtab * h, struct avtab_key * key, struct avtab_datum * datum) +{ + int hvalue; + struct avtab_node *prev, *cur, *newnode; + u16 specified = key->specified & ~(AVTAB_ENABLED|AVTAB_ENABLED_OLD); + + if (!h) + return NULL; + hvalue = AVTAB_HASH(key); + for (prev = NULL, cur = h->htable[hvalue]; + cur; + prev = cur, cur = cur->next) { + if (key->source_type == cur->key.source_type && + key->target_type == cur->key.target_type && + key->target_class == cur->key.target_class && + (specified & cur->key.specified)) + break; + if (key->source_type < cur->key.source_type) + break; + if (key->source_type == cur->key.source_type && + key->target_type < cur->key.target_type) + break; + if (key->source_type == cur->key.source_type && + key->target_type == cur->key.target_type && + key->target_class < cur->key.target_class) + break; + } + newnode = avtab_insert_node(h, hvalue, prev, cur, key, datum); + + return newnode; +} + +struct avtab_datum *avtab_search(struct avtab *h, struct avtab_key *key) +{ + int hvalue; + struct avtab_node *cur; + u16 specified = key->specified & ~(AVTAB_ENABLED|AVTAB_ENABLED_OLD); + + if (!h) + return NULL; + + hvalue = AVTAB_HASH(key); + for (cur = h->htable[hvalue]; cur; cur = cur->next) { + if (key->source_type == cur->key.source_type && + key->target_type == cur->key.target_type && + key->target_class == cur->key.target_class && + (specified & cur->key.specified)) + return &cur->datum; + + if (key->source_type < cur->key.source_type) + break; + if (key->source_type == cur->key.source_type && + key->target_type < cur->key.target_type) + break; + if (key->source_type == cur->key.source_type && + key->target_type == cur->key.target_type && + key->target_class < cur->key.target_class) + break; + } + + return NULL; +} + +/* This search function returns a node pointer, and can be used in + * conjunction with avtab_search_next_node() + */ +struct avtab_node* +avtab_search_node(struct avtab *h, struct avtab_key *key) +{ + int hvalue; + struct avtab_node *cur; + u16 specified = key->specified & ~(AVTAB_ENABLED|AVTAB_ENABLED_OLD); + + if (!h) + return NULL; + + hvalue = AVTAB_HASH(key); + for (cur = h->htable[hvalue]; cur; cur = cur->next) { + if (key->source_type == cur->key.source_type && + key->target_type == cur->key.target_type && + key->target_class == cur->key.target_class && + (specified & cur->key.specified)) + return cur; + + if (key->source_type < cur->key.source_type) + break; + if (key->source_type == cur->key.source_type && + key->target_type < cur->key.target_type) + break; + if (key->source_type == cur->key.source_type && + key->target_type == cur->key.target_type && + key->target_class < cur->key.target_class) + break; + } + return NULL; +} + +struct avtab_node* +avtab_search_node_next(struct avtab_node *node, int specified) +{ + struct avtab_node *cur; + + if (!node) + return NULL; + + specified &= ~(AVTAB_ENABLED|AVTAB_ENABLED_OLD); + for (cur = node->next; cur; cur = cur->next) { + if (node->key.source_type == cur->key.source_type && + node->key.target_type == cur->key.target_type && + node->key.target_class == cur->key.target_class && + (specified & cur->key.specified)) + return cur; + + if (node->key.source_type < cur->key.source_type) + break; + if (node->key.source_type == cur->key.source_type && + node->key.target_type < cur->key.target_type) + break; + if (node->key.source_type == cur->key.source_type && + node->key.target_type == cur->key.target_type && + node->key.target_class < cur->key.target_class) + break; + } + return NULL; +} + +void avtab_destroy(struct avtab *h) +{ + int i; + struct avtab_node *cur, *temp; + + if (!h || !h->htable) + return; + + for (i = 0; i < AVTAB_SIZE; i++) { + cur = h->htable[i]; + while (cur != NULL) { + temp = cur; + cur = cur->next; + xfree(temp); + } + h->htable[i] = NULL; + } + xfree(h->htable); + h->htable = NULL; +} + + +int avtab_init(struct avtab *h) +{ + int i; + + h->htable = (void *)xmalloc_array(struct avtab_node, AVTAB_SIZE); + if (!h->htable) + return -ENOMEM; + for (i = 0; i < AVTAB_SIZE; i++) + h->htable[i] = NULL; + h->nel = 0; + return 0; +} + +void avtab_hash_eval(struct avtab *h, char *tag) +{ + int i, chain_len, slots_used, max_chain_len; + struct avtab_node *cur; + + slots_used = 0; + max_chain_len = 0; + for (i = 0; i < AVTAB_SIZE; i++) { + cur = h->htable[i]; + if (cur) { + slots_used++; + chain_len = 0; + while (cur) { + chain_len++; + cur = cur->next; + } + + if (chain_len > max_chain_len) + max_chain_len = chain_len; + } + } + + printk(KERN_INFO "%s: %d entries and %d/%d buckets used, longest " + "chain length %d\n", tag, h->nel, slots_used, AVTAB_SIZE, + max_chain_len); +} + +static uint16_t spec_order[] = { + AVTAB_ALLOWED, + AVTAB_AUDITDENY, + AVTAB_AUDITALLOW, + AVTAB_TRANSITION, + AVTAB_CHANGE, + AVTAB_MEMBER +}; + +int avtab_read_item(void *fp, u32 vers, struct avtab *a, + int (*insertf)(struct avtab *a, struct avtab_key *k, + struct avtab_datum *d, void *p), + void *p) +{ + __le16 buf16[4]; + u16 enabled; + __le32 buf32[7]; + u32 items, items2, val; + struct avtab_key key; + struct avtab_datum datum; + int i, rc; + + memset(&key, 0, sizeof(struct avtab_key)); + memset(&datum, 0, sizeof(struct avtab_datum)); + + if (vers < POLICYDB_VERSION_AVTAB) { + rc = next_entry(buf32, fp, sizeof(u32)); + if (rc < 0) { + printk(KERN_ERR "security: avtab: truncated entry\n"); + return -1; + } + items2 = le32_to_cpu(buf32[0]); + if (items2 > ARRAY_SIZE(buf32)) { + printk(KERN_ERR "security: avtab: entry overflow\n"); + return -1; + + } + rc = next_entry(buf32, fp, sizeof(u32)*items2); + if (rc < 0) { + printk(KERN_ERR "security: avtab: truncated entry\n"); + return -1; + } + items = 0; + + val = le32_to_cpu(buf32[items++]); + key.source_type = (u16)val; + if (key.source_type != val) { + printk("security: avtab: truncated source type\n"); + return -1; + } + val = le32_to_cpu(buf32[items++]); + key.target_type = (u16)val; + if (key.target_type != val) { + printk("security: avtab: truncated target type\n"); + return -1; + } + val = le32_to_cpu(buf32[items++]); + key.target_class = (u16)val; + if (key.target_class != val) { + printk("security: avtab: truncated target class\n"); + return -1; + } + + val = le32_to_cpu(buf32[items++]); + enabled = (val & AVTAB_ENABLED_OLD) ? AVTAB_ENABLED : 0; + + if (!(val & (AVTAB_AV | AVTAB_TYPE))) { + printk("security: avtab: null entry\n"); + return -1; + } + if ((val & AVTAB_AV) && + (val & AVTAB_TYPE)) { + printk("security: avtab: entry has both access vectors and types\n"); + return -1; + } + + for (i = 0; i < sizeof(spec_order)/sizeof(u16); i++) { + if (val & spec_order[i]) { + key.specified = spec_order[i] | enabled; + datum.data = le32_to_cpu(buf32[items++]); + rc = insertf(a, &key, &datum, p); + if (rc) return rc; + } + } + + if (items != items2) { + printk("security: avtab: entry only had %d items, expected %d\n", items2, items); + return -1; + } + return 0; + } + + rc = next_entry(buf16, fp, sizeof(u16)*4); + if (rc < 0) { + printk("security: avtab: truncated entry\n"); + return -1; + } + + items = 0; + key.source_type = le16_to_cpu(buf16[items++]); + key.target_type = le16_to_cpu(buf16[items++]); + key.target_class = le16_to_cpu(buf16[items++]); + key.specified = le16_to_cpu(buf16[items++]); + + rc = next_entry(buf32, fp, sizeof(u32)); + if (rc < 0) { + printk("security: avtab: truncated entry\n"); + return -1; + } + datum.data = le32_to_cpu(*buf32); + return insertf(a, &key, &datum, p); +} + +static int avtab_insertf(struct avtab *a, struct avtab_key *k, + struct avtab_datum *d, void *p) +{ + return avtab_insert(a, k, d); +} + +int avtab_read(struct avtab *a, void *fp, u32 vers) +{ + int rc; + __le32 buf[1]; + u32 nel, i; + + + rc = next_entry(buf, fp, sizeof(u32)); + if (rc < 0) { + printk(KERN_ERR "security: avtab: truncated table\n"); + goto bad; + } + nel = le32_to_cpu(buf[0]); + if (!nel) { + printk(KERN_ERR "security: avtab: table is empty\n"); + rc = -EINVAL; + goto bad; + } + for (i = 0; i < nel; i++) { + rc = avtab_read_item(fp,vers, a, avtab_insertf, NULL); + if (rc) { + if (rc == -ENOMEM) + printk(KERN_ERR "security: avtab: out of memory\n"); + else if (rc == -EEXIST) + printk(KERN_ERR "security: avtab: duplicate entry\n"); + else + rc = -EINVAL; + goto bad; + } + } + + rc = 0; +out: + return rc; + +bad: + avtab_destroy(a); + goto out; +} + diff -r 65bfdf932c7e -r 12923ba929c8 xen/flask/ss/avtab.h --- /dev/null Thu Jan 1 00:00:00 1970 +0000 +++ b/xen/flask/ss/avtab.h Sat Dec 16 11:54:25 2006 -0500 @@ -0,0 +1,84 @@ +/* + * An access vector table (avtab) is a hash table + * of access vectors and transition types indexed + * by a type pair and a class. An access vector + * table is used to represent the type enforcement + * tables. + * + * Author : Stephen Smalley, + */ + +/* Updated: Frank Mayer and Karl MacMillan + * + * Added conditional policy language extensions + * + * Copyright (C) 2003 Tresys Technology, LLC + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, version 2. + */ + +/* Ported to Xen 3.0, George Coker, */ + +#ifndef _SS_AVTAB_H_ +#define _SS_AVTAB_H_ + +struct avtab_key { + u16 source_type; /* source type */ + u16 target_type; /* target type */ + u16 target_class; /* target object class */ +#define AVTAB_ALLOWED 1 +#define AVTAB_AUDITALLOW 2 +#define AVTAB_AUDITDENY 4 +#define AVTAB_AV (AVTAB_ALLOWED | AVTAB_AUDITALLOW | AVTAB_AUDITDENY) +#define AVTAB_TRANSITION 16 +#define AVTAB_MEMBER 32 +#define AVTAB_CHANGE 64 +#define AVTAB_TYPE (AVTAB_TRANSITION | AVTAB_MEMBER | AVTAB_CHANGE) +#define AVTAB_ENABLED_OLD 0x80000000 /* reserved for used in cond_avtab */ +#define AVTAB_ENABLED 0x8000 /* reserved for used in cond_avtab */ + u16 specified; /* what field is specified */ +}; + +struct avtab_datum { + u32 data; /* access vector or type value */ +}; + +struct avtab_node { + struct avtab_key key; + struct avtab_datum datum; + struct avtab_node *next; +}; + +struct avtab { + struct avtab_node **htable; + u32 nel; /* number of elements */ +}; + +int avtab_init(struct avtab *); +struct avtab_datum *avtab_search(struct avtab *h, struct avtab_key *k); +void avtab_destroy(struct avtab *h); +void avtab_hash_eval(struct avtab *h, char *tag); + +int avtab_read_item(void *fp, uint32_t vers, struct avtab *a, + int (*insert)(struct avtab *a, struct avtab_key *k, + struct avtab_datum *d, void *p), + void *p); + +int avtab_read(struct avtab *a, void *fp, u32 vers); + +struct avtab_node *avtab_insert_nonunique(struct avtab *h, struct avtab_key *key, + struct avtab_datum *datum); + +struct avtab_node *avtab_search_node(struct avtab *h, struct avtab_key *key); + +struct avtab_node *avtab_search_node_next(struct avtab_node *node, int specified); + +#define AVTAB_HASH_BITS 15 +#define AVTAB_HASH_BUCKETS (1 << AVTAB_HASH_BITS) +#define AVTAB_HASH_MASK (AVTAB_HASH_BUCKETS-1) + +#define AVTAB_SIZE AVTAB_HASH_BUCKETS + +#endif /* _SS_AVTAB_H_ */ + diff -r 65bfdf932c7e -r 12923ba929c8 xen/flask/ss/conditional.c --- /dev/null Thu Jan 1 00:00:00 1970 +0000 +++ b/xen/flask/ss/conditional.c Sat Dec 16 11:54:25 2006 -0500 @@ -0,0 +1,515 @@ +/* Authors: Karl MacMillan + * Frank Mayer + * + * Copyright (C) 2003 - 2004 Tresys Technology, LLC + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, version 2. + */ + +/* Ported to Xen 3.0, George Coker, */ + +#include +#include +#include +#include +#include + +#include "security.h" +#include "conditional.h" +#include "util_endian.h" + +/* + * cond_evaluate_expr evaluates a conditional expr + * in reverse polish notation. It returns true (1), false (0), + * or undefined (-1). Undefined occurs when the expression + * exceeds the stack depth of COND_EXPR_MAXDEPTH. + */ +static int cond_evaluate_expr(struct policydb *p, struct cond_expr *expr) +{ + + struct cond_expr *cur; + int s[COND_EXPR_MAXDEPTH]; + int sp = -1; + + for (cur = expr; cur != NULL; cur = cur->next) { + switch (cur->expr_type) { + case COND_BOOL: + if (sp == (COND_EXPR_MAXDEPTH - 1)) + return -1; + sp++; + s[sp] = p->bool_val_to_struct[cur->bool - 1]->state; + break; + case COND_NOT: + if (sp < 0) + return -1; + s[sp] = !s[sp]; + break; + case COND_OR: + if (sp < 1) + return -1; + sp--; + s[sp] |= s[sp + 1]; + break; + case COND_AND: + if (sp < 1) + return -1; + sp--; + s[sp] &= s[sp + 1]; + break; + case COND_XOR: + if (sp < 1) + return -1; + sp--; + s[sp] ^= s[sp + 1]; + break; + case COND_EQ: + if (sp < 1) + return -1; + sp--; + s[sp] = (s[sp] == s[sp + 1]); + break; + case COND_NEQ: + if (sp < 1) + return -1; + sp--; + s[sp] = (s[sp] != s[sp + 1]); + break; + default: + return -1; + } + } + return s[0]; +} + +/* + * evaluate_cond_node evaluates the conditional stored in + * a struct cond_node and if the result is different than the + * current state of the node it sets the rules in the true/false + * list appropriately. If the result of the expression is undefined + * all of the rules are disabled for safety. + */ +int evaluate_cond_node(struct policydb *p, struct cond_node *node) +{ + int new_state; + struct cond_av_list* cur; + + new_state = cond_evaluate_expr(p, node->expr); + if (new_state != node->cur_state) { + node->cur_state = new_state; + if (new_state == -1) + printk(KERN_ERR "security: expression result was undefined - disabling all rules.\n"); + /* turn the rules on or off */ + for (cur = node->true_list; cur != NULL; cur = cur->next) { + if (new_state <= 0) { + cur->node->key.specified &= ~AVTAB_ENABLED; + } else { + cur->node->key.specified |= AVTAB_ENABLED; + } + } + + for (cur = node->false_list; cur != NULL; cur = cur->next) { + /* -1 or 1 */ + if (new_state) { + cur->node->key.specified &= ~AVTAB_ENABLED; + } else { + cur->node->key.specified |= AVTAB_ENABLED; + } + } + } + return 0; +} + +int cond_policydb_init(struct policydb *p) +{ + p->bool_val_to_struct = NULL; + p->cond_list = NULL; + if (avtab_init(&p->te_cond_avtab)) + return -1; + + return 0; +} + +static void cond_av_list_destroy(struct cond_av_list *list) +{ + struct cond_av_list *cur, *next; + for (cur = list; cur != NULL; cur = next) { + next = cur->next; + /* the avtab_ptr_t node is destroy by the avtab */ + xfree(cur); + } +} + +static void cond_node_destroy(struct cond_node *node) +{ + struct cond_expr *cur_expr, *next_expr; + + for (cur_expr = node->expr; cur_expr != NULL; cur_expr = next_expr) { + next_expr = cur_expr->next; + xfree(cur_expr); + } + cond_av_list_destroy(node->true_list); + cond_av_list_destroy(node->false_list); + xfree(node); +} + +static void cond_list_destroy(struct cond_node *list) +{ + struct cond_node *next, *cur; + + if (list == NULL) + return; + + for (cur = list; cur != NULL; cur = next) { + next = cur->next; + cond_node_destroy(cur); + } +} + +void cond_policydb_destroy(struct policydb *p) +{ + xfree(p->bool_val_to_struct); + avtab_destroy(&p->te_cond_avtab); + cond_list_destroy(p->cond_list); +} + +int cond_init_bool_indexes(struct policydb *p) +{ + xfree(p->bool_val_to_struct); + p->bool_val_to_struct = (struct cond_bool_datum**) + xmalloc_array(struct cond_bool_datum*, p->p_bools.nprim); + if (!p->bool_val_to_struct) + return -1; + return 0; +} + +int cond_destroy_bool(void *key, void *datum, void *p) +{ + xfree(key); + xfree(datum); + return 0; +} + +int cond_index_bool(void *key, void *datum, void *datap) +{ + struct policydb *p; + struct cond_bool_datum *booldatum; + + booldatum = datum; + p = datap; + + if (!booldatum->value || booldatum->value > p->p_bools.nprim) + return -EINVAL; + + p->p_bool_val_to_name[booldatum->value - 1] = key; + p->bool_val_to_struct[booldatum->value -1] = booldatum; + + return 0; +} + +static int bool_isvalid(struct cond_bool_datum *b) +{ + if (!(b->state == 0 || b->state == 1)) + return 0; + return 1; +} + +int cond_read_bool(struct policydb *p, struct hashtab *h, void *fp) +{ + char *key = NULL; + struct cond_bool_datum *booldatum; + __le32 buf[3]; + u32 len; + int rc; + + booldatum = xmalloc(struct cond_bool_datum); + if (!booldatum) + return -1; + memset(booldatum, 0, sizeof(struct cond_bool_datum)); + + rc = next_entry(buf, fp, sizeof buf); + if (rc < 0) + goto err; + + booldatum->value = le32_to_cpu(buf[0]); + booldatum->state = le32_to_cpu(buf[1]); + + if (!bool_isvalid(booldatum)) + goto err; + + len = le32_to_cpu(buf[2]); + + key = xmalloc_array(char, len + 1); + if (!key) + goto err; + rc = next_entry(key, fp, len); + if (rc < 0) + goto err; + key[len] = 0; + if (hashtab_insert(h, key, booldatum)) + goto err; + + return 0; +err: + cond_destroy_bool(key, booldatum, NULL); + return -1; +} + +struct cond_insertf_data +{ + struct policydb *p; + struct cond_av_list *other; + struct cond_av_list *head; + struct cond_av_list *tail; +}; + +static int cond_insertf(struct avtab *a, struct avtab_key *k, struct avtab_datum *d, void *ptr) +{ + struct cond_insertf_data *data = ptr; + struct policydb *p = data->p; + struct cond_av_list *other = data->other, *list, *cur; + struct avtab_node *node_ptr; + u8 found; + + + /* + * For type rules we have to make certain there aren't any + * conflicting rules by searching the te_avtab and the + * cond_te_avtab. + */ + if (k->specified & AVTAB_TYPE) { + if (avtab_search(&p->te_avtab, k)) { + printk("security: type rule already exists outside of a conditional."); + goto err; + } + /* + * If we are reading the false list other will be a pointer to + * the true list. We can have duplicate entries if there is only + * 1 other entry and it is in our true list. + * + * If we are reading the true list (other == NULL) there shouldn't + * be any other entries. + */ + if (other) { + node_ptr = avtab_search_node(&p->te_cond_avtab, k); + if (node_ptr) { + if (avtab_search_node_next(node_ptr, k->specified)) { + printk("security: too many conflicting type rules."); + goto err; + } + found = 0; + for (cur = other; cur != NULL; cur = cur->next) { + if (cur->node == node_ptr) { + found = 1; + break; + } + } + if (!found) { + printk("security: conflicting type rules.\n"); + goto err; + } + } + } else { + if (avtab_search(&p->te_cond_avtab, k)) { + printk("security: conflicting type rules when adding type rule for true.\n"); + goto err; + } + } + } + + node_ptr = avtab_insert_nonunique(&p->te_cond_avtab, k, d); + if (!node_ptr) { + printk("security: could not insert rule."); + goto err; + } + + list = xmalloc(struct cond_av_list); + if (!list) + goto err; + memset(list, 0, sizeof(*list)); + + list->node = node_ptr; + if (!data->head) + data->head = list; + else + data->tail->next = list; + data->tail = list; + return 0; + +err: + cond_av_list_destroy(data->head); + data->head = NULL; + return -1; +} + +static int cond_read_av_list(struct policydb *p, void *fp, struct cond_av_list **ret_list, struct cond_av_list *other) +{ + int i, rc; + __le32 buf[1]; + u32 len; + struct cond_insertf_data data; + + *ret_list = NULL; + + len = 0; + rc = next_entry(buf, fp, sizeof(u32)); + if (rc < 0) + return -1; + + len = le32_to_cpu(buf[0]); + if (len == 0) { + return 0; + } + + data.p = p; + data.other = other; + data.head = NULL; + data.tail = NULL; + for (i = 0; i < len; i++) { + rc = avtab_read_item(fp, p->policyvers, &p->te_cond_avtab, cond_insertf, &data); + if (rc) + return rc; + + } + + *ret_list = data.head; + return 0; +} + +static int expr_isvalid(struct policydb *p, struct cond_expr *expr) +{ + if (expr->expr_type <= 0 || expr->expr_type > COND_LAST) { + printk("security: conditional expressions uses unknown operator.\n"); + return 0; + } + + if (expr->bool > p->p_bools.nprim) { + printk("security: conditional expressions uses unknown bool.\n"); + return 0; + } + return 1; +} + +static int cond_read_node(struct policydb *p, struct cond_node *node, void *fp) +{ + __le32 buf[2]; + u32 len, i; + int rc; + struct cond_expr *expr = NULL, *last = NULL; + + rc = next_entry(buf, fp, sizeof(u32)); + if (rc < 0) + return -1; + + node->cur_state = le32_to_cpu(buf[0]); + + len = 0; + rc = next_entry(buf, fp, sizeof(u32)); + if (rc < 0) + return -1; + + /* expr */ + len = le32_to_cpu(buf[0]); + + for (i = 0; i < len; i++ ) { + rc = next_entry(buf, fp, sizeof(u32) * 2); + if (rc < 0) + goto err; + + expr = xmalloc(struct cond_expr); + if (!expr) { + goto err; + } + memset(expr, 0, sizeof(struct cond_expr)); + + expr->expr_type = le32_to_cpu(buf[0]); + expr->bool = le32_to_cpu(buf[1]); + + if (!expr_isvalid(p, expr)) { + xfree(expr); + goto err; + } + + if (i == 0) { + node->expr = expr; + } else { + last->next = expr; + } + last = expr; + } + + if (cond_read_av_list(p, fp, &node->true_list, NULL) != 0) + goto err; + if (cond_read_av_list(p, fp, &node->false_list, node->true_list) != 0) + goto err; + return 0; +err: + cond_node_destroy(node); + return -1; +} + +int cond_read_list(struct policydb *p, void *fp) +{ + struct cond_node *node, *last = NULL; + __le32 buf[1]; + u32 i, len; + int rc; + + rc = next_entry(buf, fp, sizeof buf); + if (rc < 0) + return -1; + + len = le32_to_cpu(buf[0]); + + for (i = 0; i < len; i++) { + node = xmalloc(struct cond_node); + if (!node) + goto err; + memset(node, 0, sizeof(struct cond_node)); + + if (cond_read_node(p, node, fp) != 0) + goto err; + + if (i == 0) { + p->cond_list = node; + } else { + last->next = node; + } + last = node; + } + return 0; +err: + cond_list_destroy(p->cond_list); + p->cond_list = NULL; + return -1; +} + +/* Determine whether additional permissions are granted by the conditional + * av table, and if so, add them to the result + */ +void cond_compute_av(struct avtab *ctab, struct avtab_key *key, struct av_decision *avd) +{ + struct avtab_node *node; + + if(!ctab || !key || !avd) + return; + + for(node = avtab_search_node(ctab, key); node != NULL; + node = avtab_search_node_next(node, key->specified)) { + if ( (u16) (AVTAB_ALLOWED|AVTAB_ENABLED) == + (node->key.specified & (AVTAB_ALLOWED|AVTAB_ENABLED))) + avd->allowed |= node->datum.data; + if ( (u16) (AVTAB_AUDITDENY|AVTAB_ENABLED) == + (node->key.specified & (AVTAB_AUDITDENY|AVTAB_ENABLED))) + /* Since a '0' in an auditdeny mask represents a + * permission we do NOT want to audit (dontaudit), we use + * the '&' operand to ensure that all '0's in the mask + * are retained (much unlike the allow and auditallow cases). + */ + avd->auditdeny &= node->datum.data; + if ( (u16) (AVTAB_AUDITALLOW|AVTAB_ENABLED) == + (node->key.specified & (AVTAB_AUDITALLOW|AVTAB_ENABLED))) + avd->auditallow |= node->datum.data; + } + return; +} diff -r 65bfdf932c7e -r 12923ba929c8 xen/flask/ss/conditional.h --- /dev/null Thu Jan 1 00:00:00 1970 +0000 +++ b/xen/flask/ss/conditional.h Sat Dec 16 11:54:25 2006 -0500 @@ -0,0 +1,77 @@ +/* Authors: Karl MacMillan + * Frank Mayer + * + * Copyright (C) 2003 - 2004 Tresys Technology, LLC + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, version 2. + */ + +#ifndef _CONDITIONAL_H_ +#define _CONDITIONAL_H_ + +#include "avtab.h" +#include "symtab.h" +#include "policydb.h" + +#define COND_EXPR_MAXDEPTH 10 + +/* + * A conditional expression is a list of operators and operands + * in reverse polish notation. + */ +struct cond_expr { +#define COND_BOOL 1 /* plain bool */ +#define COND_NOT 2 /* !bool */ +#define COND_OR 3 /* bool || bool */ +#define COND_AND 4 /* bool && bool */ +#define COND_XOR 5 /* bool ^ bool */ +#define COND_EQ 6 /* bool == bool */ +#define COND_NEQ 7 /* bool != bool */ +#define COND_LAST 8 + __u32 expr_type; + __u32 bool; + struct cond_expr *next; +}; + +/* + * Each cond_node contains a list of rules to be enabled/disabled + * depending on the current value of the conditional expression. This + * struct is for that list. + */ +struct cond_av_list { + struct avtab_node *node; + struct cond_av_list *next; +}; + +/* + * A cond node represents a conditional block in a policy. It + * contains a conditional expression, the current state of the expression, + * two lists of rules to enable/disable depending on the value of the + * expression (the true list corresponds to if and the false list corresponds + * to else).. + */ +struct cond_node { + int cur_state; + struct cond_expr *expr; + struct cond_av_list *true_list; + struct cond_av_list *false_list; + struct cond_node *next; +}; + +int cond_policydb_init(struct policydb* p); +void cond_policydb_destroy(struct policydb* p); + +int cond_init_bool_indexes(struct policydb* p); +int cond_destroy_bool(void *key, void *datum, void *p); + +int cond_index_bool(void *key, void *datum, void *datap); + +int cond_read_bool(struct policydb *p, struct hashtab *h, void *fp); +int cond_read_list(struct policydb *p, void *fp); + +void cond_compute_av(struct avtab *ctab, struct avtab_key *key, struct av_decision *avd); + +int evaluate_cond_node(struct policydb *p, struct cond_node *node); + +#endif /* _CONDITIONAL_H_ */ diff -r 65bfdf932c7e -r 12923ba929c8 xen/flask/ss/constraint.h --- /dev/null Thu Jan 1 00:00:00 1970 +0000 +++ b/xen/flask/ss/constraint.h Sat Dec 16 11:54:25 2006 -0500 @@ -0,0 +1,61 @@ +/* + * A constraint is a condition that must be satisfied in + * order for one or more permissions to be granted. + * Constraints are used to impose additional restrictions + * beyond the type-based rules in `te' or the role-based + * transition rules in `rbac'. Constraints are typically + * used to prevent a process from transitioning to a new user + * identity or role unless it is in a privileged type. + * Constraints are likewise typically used to prevent a + * process from labeling an object with a different user + * identity. + * + * Author : Stephen Smalley, + */ +#ifndef _SS_CONSTRAINT_H_ +#define _SS_CONSTRAINT_H_ + +#include "ebitmap.h" + +#define CEXPR_MAXDEPTH 5 + +struct constraint_expr { +#define CEXPR_NOT 1 /* not expr */ +#define CEXPR_AND 2 /* expr and expr */ +#define CEXPR_OR 3 /* expr or expr */ +#define CEXPR_ATTR 4 /* attr op attr */ +#define CEXPR_NAMES 5 /* attr op names */ + u32 expr_type; /* expression type */ + +#define CEXPR_USER 1 /* user */ +#define CEXPR_ROLE 2 /* role */ +#define CEXPR_TYPE 4 /* type */ +#define CEXPR_TARGET 8 /* target if set, source otherwise */ +#define CEXPR_XTARGET 16 /* special 3rd target for validatetrans rule */ +#define CEXPR_L1L2 32 /* low level 1 vs. low level 2 */ +#define CEXPR_L1H2 64 /* low level 1 vs. high level 2 */ +#define CEXPR_H1L2 128 /* high level 1 vs. low level 2 */ +#define CEXPR_H1H2 256 /* high level 1 vs. high level 2 */ +#define CEXPR_L1H1 512 /* low level 1 vs. high level 1 */ +#define CEXPR_L2H2 1024 /* low level 2 vs. high level 2 */ + u32 attr; /* attribute */ + +#define CEXPR_EQ 1 /* == or eq */ +#define CEXPR_NEQ 2 /* != */ +#define CEXPR_DOM 3 /* dom */ +#define CEXPR_DOMBY 4 /* domby */ +#define CEXPR_INCOMP 5 /* incomp */ + u32 op; /* operator */ + + struct ebitmap names; /* names */ + + struct constraint_expr *next; /* next expression */ +}; + +struct constraint_node { + u32 permissions; /* constrained permissions */ + struct constraint_expr *expr; /* constraint on permissions */ + struct constraint_node *next; /* next constraint */ +}; + +#endif /* _SS_CONSTRAINT_H_ */ diff -r 65bfdf932c7e -r 12923ba929c8 xen/flask/ss/context.h --- /dev/null Thu Jan 1 00:00:00 1970 +0000 +++ b/xen/flask/ss/context.h Sat Dec 16 11:54:25 2006 -0500 @@ -0,0 +1,110 @@ +/* + * A security context is a set of security attributes + * associated with each subject and object controlled + * by the security policy. Security contexts are + * externally represented as variable-length strings + * that can be interpreted by a user or application + * with an understanding of the security policy. + * Internally, the security server uses a simple + * structure. This structure is private to the + * security server and can be changed without affecting + * clients of the security server. + * + * Author : Stephen Smalley, + */ + +/* Ported to Xen 3.0, George Coker, */ + +#ifndef _SS_CONTEXT_H_ +#define _SS_CONTEXT_H_ + +#include "ebitmap.h" +#include "mls_types.h" +#include "security.h" + +/* + * A security context consists of an authenticated user + * identity, a role, a type and a MLS range. + */ +struct context { + u32 user; + u32 role; + u32 type; + struct mls_range range; +}; + +static inline void mls_context_init(struct context *c) +{ + memset(&c->range, 0, sizeof(c->range)); +} + +static inline int mls_context_cpy(struct context *dst, struct context *src) +{ + int rc; + + if (!flask_mls_enabled) + return 0; + + dst->range.level[0].sens = src->range.level[0].sens; + rc = ebitmap_cpy(&dst->range.level[0].cat, &src->range.level[0].cat); + if (rc) + goto out; + + dst->range.level[1].sens = src->range.level[1].sens; + rc = ebitmap_cpy(&dst->range.level[1].cat, &src->range.level[1].cat); + if (rc) + ebitmap_destroy(&dst->range.level[0].cat); +out: + return rc; +} + +static inline int mls_context_cmp(struct context *c1, struct context *c2) +{ + if (!flask_mls_enabled) + return 1; + + return ((c1->range.level[0].sens == c2->range.level[0].sens) && + ebitmap_cmp(&c1->range.level[0].cat,&c2->range.level[0].cat) && + (c1->range.level[1].sens == c2->range.level[1].sens) && + ebitmap_cmp(&c1->range.level[1].cat,&c2->range.level[1].cat)); +} + +static inline void mls_context_destroy(struct context *c) +{ + if (!flask_mls_enabled) + return; + + ebitmap_destroy(&c->range.level[0].cat); + ebitmap_destroy(&c->range.level[1].cat); + mls_context_init(c); +} + +static inline void context_init(struct context *c) +{ + memset(c, 0, sizeof(*c)); +} + +static inline int context_cpy(struct context *dst, struct context *src) +{ + dst->user = src->user; + dst->role = src->role; + dst->type = src->type; + return mls_context_cpy(dst, src); +} + +static inline void context_destroy(struct context *c) +{ + c->user = c->role = c->type = 0; + mls_context_destroy(c); +} + +static inline int context_cmp(struct context *c1, struct context *c2) +{ + return ((c1->user == c2->user) && + (c1->role == c2->role) && + (c1->type == c2->type) && + mls_context_cmp(c1, c2)); +} + +#endif /* _SS_CONTEXT_H_ */ + diff -r 65bfdf932c7e -r 12923ba929c8 xen/flask/ss/ebitmap.c --- /dev/null Thu Jan 1 00:00:00 1970 +0000 +++ b/xen/flask/ss/ebitmap.c Sat Dec 16 11:54:25 2006 -0500 @@ -0,0 +1,300 @@ +/* + * Implementation of the extensible bitmap type. + * + * Author : Stephen Smalley, + */ + +/* Ported to Xen 3.0, George Coker, */ + +#include +#include +#include +#include +#include +#include "ebitmap.h" +#include "policydb.h" +#include "util_endian.h" + +int ebitmap_cmp(struct ebitmap *e1, struct ebitmap *e2) +{ + struct ebitmap_node *n1, *n2; + + if (e1->highbit != e2->highbit) + return 0; + + n1 = e1->node; + n2 = e2->node; + while (n1 && n2 && + (n1->startbit == n2->startbit) && + (n1->map == n2->map)) { + n1 = n1->next; + n2 = n2->next; + } + + if (n1 || n2) + return 0; + + return 1; +} + +int ebitmap_cpy(struct ebitmap *dst, struct ebitmap *src) +{ + struct ebitmap_node *n, *new, *prev; + + ebitmap_init(dst); + n = src->node; + prev = NULL; + while (n) { + new = xmalloc(struct ebitmap_node); + if (!new) { + ebitmap_destroy(dst); + return -ENOMEM; + } + memset(new, 0, sizeof(*new)); + new->startbit = n->startbit; + new->map = n->map; + new->next = NULL; + if (prev) + prev->next = new; + else + dst->node = new; + prev = new; + n = n->next; + } + + dst->highbit = src->highbit; + return 0; +} + +int ebitmap_contains(struct ebitmap *e1, struct ebitmap *e2) +{ + struct ebitmap_node *n1, *n2; + + if (e1->highbit < e2->highbit) + return 0; + + n1 = e1->node; + n2 = e2->node; + while (n1 && n2 && (n1->startbit <= n2->startbit)) { + if (n1->startbit < n2->startbit) { + n1 = n1->next; + continue; + } + if ((n1->map & n2->map) != n2->map) + return 0; + + n1 = n1->next; + n2 = n2->next; + } + + if (n2) + return 0; + + return 1; +} + +int ebitmap_get_bit(struct ebitmap *e, unsigned long bit) +{ + struct ebitmap_node *n; + + if (e->highbit < bit) + return 0; + + n = e->node; + while (n && (n->startbit <= bit)) { + if ((n->startbit + MAPSIZE) > bit) { + if (n->map & (MAPBIT << (bit - n->startbit))) + return 1; + else + return 0; + } + n = n->next; + } + + return 0; +} + +int ebitmap_set_bit(struct ebitmap *e, unsigned long bit, int value) +{ + struct ebitmap_node *n, *prev, *new; + + prev = NULL; + n = e->node; + while (n && n->startbit <= bit) { + if ((n->startbit + MAPSIZE) > bit) { + if (value) { + n->map |= (MAPBIT << (bit - n->startbit)); + } else { + n->map &= ~(MAPBIT << (bit - n->startbit)); + if (!n->map) { + /* drop this node from the bitmap */ + + if (!n->next) { + /* + * this was the highest map + * within the bitmap + */ + if (prev) + e->highbit = prev->startbit + MAPSIZE; + else + e->highbit = 0; + } + if (prev) + prev->next = n->next; + else + e->node = n->next; + + xfree(n); + } + } + return 0; + } + prev = n; + n = n->next; + } + + if (!value) + return 0; + + new = xmalloc(struct ebitmap_node); + if (!new) + return -ENOMEM; + memset(new, 0, sizeof(*new)); + + new->startbit = bit & ~(MAPSIZE - 1); + new->map = (MAPBIT << (bit - new->startbit)); + + if (!n) + /* this node will be the highest map within the bitmap */ + e->highbit = new->startbit + MAPSIZE; + + if (prev) { + new->next = prev->next; + prev->next = new; + } else { + new->next = e->node; + e->node = new; + } + + return 0; +} + +void ebitmap_destroy(struct ebitmap *e) +{ + struct ebitmap_node *n, *temp; + + if (!e) + return; + + n = e->node; + while (n) { + temp = n; + n = n->next; + xfree(temp); + } + + e->highbit = 0; + e->node = NULL; + return; +} + +int ebitmap_read(struct ebitmap *e, void *fp) +{ + int rc; + struct ebitmap_node *n, *l; + __le32 buf[3]; + u32 mapsize, count, i; + __le64 map; + + ebitmap_init(e); + + rc = next_entry(buf, fp, sizeof buf); + if (rc < 0) + goto out; + + mapsize = le32_to_cpu(buf[0]); + e->highbit = le32_to_cpu(buf[1]); + count = le32_to_cpu(buf[2]); + + if (mapsize != MAPSIZE) { + printk(KERN_ERR "security: ebitmap: map size %u does not " + "match my size %Zd (high bit was %d)\n", mapsize, + MAPSIZE, e->highbit); + goto bad; + } + if (!e->highbit) { + e->node = NULL; + goto ok; + } + if (e->highbit & (MAPSIZE - 1)) { + printk(KERN_ERR "security: ebitmap: high bit (%d) is not a " + "multiple of the map size (%Zd)\n", e->highbit, MAPSIZE); + goto bad; + } + l = NULL; + for (i = 0; i < count; i++) { + rc = next_entry(buf, fp, sizeof(u32)); + if (rc < 0) { + printk(KERN_ERR "security: ebitmap: truncated map\n"); + goto bad; + } + n = xmalloc(struct ebitmap_node); + if (!n) { + printk(KERN_ERR "security: ebitmap: out of memory\n"); + rc = -ENOMEM; + goto bad; + } + memset(n, 0, sizeof(*n)); + + n->startbit = le32_to_cpu(buf[0]); + + if (n->startbit & (MAPSIZE - 1)) { + printk(KERN_ERR "security: ebitmap start bit (%d) is " + "not a multiple of the map size (%Zd)\n", + n->startbit, MAPSIZE); + goto bad_free; + } + if (n->startbit > (e->highbit - MAPSIZE)) { + printk(KERN_ERR "security: ebitmap start bit (%d) is " + "beyond the end of the bitmap (%Zd)\n", + n->startbit, (e->highbit - MAPSIZE)); + goto bad_free; + } + rc = next_entry(&map, fp, sizeof(u64)); + if (rc < 0) { + printk(KERN_ERR "security: ebitmap: truncated map\n"); + goto bad_free; + } + n->map = le64_to_cpu(map); + + if (!n->map) { + printk(KERN_ERR "security: ebitmap: null map in " + "ebitmap (startbit %d)\n", n->startbit); + goto bad_free; + } + if (l) { + if (n->startbit <= l->startbit) { + printk(KERN_ERR "security: ebitmap: start " + "bit %d comes after start bit %d\n", + n->startbit, l->startbit); + goto bad_free; + } + l->next = n; + } else + e->node = n; + + l = n; + } + +ok: + rc = 0; +out: + return rc; +bad_free: + xfree(n); +bad: + if (!rc) + rc = -EINVAL; + ebitmap_destroy(e); + goto out; +} diff -r 65bfdf932c7e -r 12923ba929c8 xen/flask/ss/ebitmap.h --- /dev/null Thu Jan 1 00:00:00 1970 +0000 +++ b/xen/flask/ss/ebitmap.h Sat Dec 16 11:54:25 2006 -0500 @@ -0,0 +1,78 @@ +/* + * An extensible bitmap is a bitmap that supports an + * arbitrary number of bits. Extensible bitmaps are + * used to represent sets of values, such as types, + * roles, categories, and classes. + * + * Each extensible bitmap is implemented as a linked + * list of bitmap nodes, where each bitmap node has + * an explicitly specified starting bit position within + * the total bitmap. + * + * Author : Stephen Smalley, + */ +#ifndef _SS_EBITMAP_H_ +#define _SS_EBITMAP_H_ + +#define MAPTYPE u64 /* portion of bitmap in each node */ +#define MAPSIZE (sizeof(MAPTYPE) * 8) /* number of bits in node bitmap */ +#define MAPBIT 1ULL /* a bit in the node bitmap */ + +struct ebitmap_node { + u32 startbit; /* starting position in the total bitmap */ + MAPTYPE map; /* this node's portion of the bitmap */ + struct ebitmap_node *next; +}; + +struct ebitmap { + struct ebitmap_node *node; /* first node in the bitmap */ + u32 highbit; /* highest position in the total bitmap */ +}; + +#define ebitmap_length(e) ((e)->highbit) +#define ebitmap_startbit(e) ((e)->node ? (e)->node->startbit : 0) + +static inline unsigned int ebitmap_start(struct ebitmap *e, + struct ebitmap_node **n) +{ + *n = e->node; + return ebitmap_startbit(e); +} + +static inline void ebitmap_init(struct ebitmap *e) +{ + memset(e, 0, sizeof(*e)); +} + +static inline unsigned int ebitmap_next(struct ebitmap_node **n, + unsigned int bit) +{ + if ((bit == ((*n)->startbit + MAPSIZE - 1)) && + (*n)->next) { + *n = (*n)->next; + return (*n)->startbit; + } + + return (bit+1); +} + +static inline int ebitmap_node_get_bit(struct ebitmap_node * n, + unsigned int bit) +{ + if (n->map & (MAPBIT << (bit - n->startbit))) + return 1; + return 0; +} + +#define ebitmap_for_each_bit(e, n, bit) \ + for (bit = ebitmap_start(e, &n); bit < ebitmap_length(e); bit = ebitmap_next(&n, bit)) \ + +int ebitmap_cmp(struct ebitmap *e1, struct ebitmap *e2); +int ebitmap_cpy(struct ebitmap *dst, struct ebitmap *src); +int ebitmap_contains(struct ebitmap *e1, struct ebitmap *e2); +int ebitmap_get_bit(struct ebitmap *e, unsigned long bit); +int ebitmap_set_bit(struct ebitmap *e, unsigned long bit, int value); +void ebitmap_destroy(struct ebitmap *e); +int ebitmap_read(struct ebitmap *e, void *fp); + +#endif /* _SS_EBITMAP_H_ */ diff -r 65bfdf932c7e -r 12923ba929c8 xen/flask/ss/hashtab.c --- /dev/null Thu Jan 1 00:00:00 1970 +0000 +++ b/xen/flask/ss/hashtab.c Sat Dec 16 11:54:25 2006 -0500 @@ -0,0 +1,170 @@ +/* + * Implementation of the hash table type. + * + * Author : Stephen Smalley, + */ + +/* Ported to Xen 3.0, George Coker, */ + +#include +#include +#include +#include "hashtab.h" + +struct hashtab *hashtab_create(u32 (*hash_value)(struct hashtab *h, void *key), + int (*keycmp)(struct hashtab *h, void *key1, void *key2), + u32 size) +{ + struct hashtab *p; + u32 i; + + p = xmalloc(struct hashtab); + if (p == NULL) + return p; + + memset(p, 0, sizeof(*p)); + p->size = size; + p->nel = 0; + p->hash_value = hash_value; + p->keycmp = keycmp; + p->htable = (void *)xmalloc_array(struct hashtab_node, size); + if (p->htable == NULL) { + xfree(p); + return NULL; + } + + for (i = 0; i < size; i++) + p->htable[i] = NULL; + + return p; +} + +int hashtab_insert(struct hashtab *h, void *key, void *datum) +{ + u32 hvalue; + struct hashtab_node *prev, *cur, *newnode; + + if (!h || h->nel == HASHTAB_MAX_NODES) + return -EINVAL; + + hvalue = h->hash_value(h, key); + prev = NULL; + cur = h->htable[hvalue]; + while (cur && h->keycmp(h, key, cur->key) > 0) { + prev = cur; + cur = cur->next; + } + + if (cur && (h->keycmp(h, key, cur->key) == 0)) + return -EEXIST; + + newnode = xmalloc(struct hashtab_node); + if (newnode == NULL) + return -ENOMEM; + memset(newnode, 0, sizeof(*newnode)); + newnode->key = key; + newnode->datum = datum; + if (prev) { + newnode->next = prev->next; + prev->next = newnode; + } else { + newnode->next = h->htable[hvalue]; + h->htable[hvalue] = newnode; + } + + h->nel++; + return 0; +} + +void *hashtab_search(struct hashtab *h, void *key) +{ + u32 hvalue; + struct hashtab_node *cur; + + if (!h) + return NULL; + + hvalue = h->hash_value(h, key); + cur = h->htable[hvalue]; + while (cur != NULL && h->keycmp(h, key, cur->key) > 0) + cur = cur->next; + + if (cur == NULL || (h->keycmp(h, key, cur->key) != 0)) + return NULL; + + return cur->datum; +} + +void hashtab_destroy(struct hashtab *h) +{ + u32 i; + struct hashtab_node *cur, *temp; + + if (!h) + return; + + for (i = 0; i < h->size; i++) { + cur = h->htable[i]; + while (cur != NULL) { + temp = cur; + cur = cur->next; + xfree(temp); + } + h->htable[i] = NULL; + } + + xfree(h->htable); + h->htable = NULL; + + xfree(h); +} + +int hashtab_map(struct hashtab *h, + int (*apply)(void *k, void *d, void *args), + void *args) +{ + u32 i; + int ret; + struct hashtab_node *cur; + + if (!h) + return 0; + + for (i = 0; i < h->size; i++) { + cur = h->htable[i]; + while (cur != NULL) { + ret = apply(cur->key, cur->datum, args); + if (ret) + return ret; + cur = cur->next; + } + } + return 0; +} + + +void hashtab_stat(struct hashtab *h, struct hashtab_info *info) +{ + u32 i, chain_len, slots_used, max_chain_len; + struct hashtab_node *cur; + + slots_used = 0; + max_chain_len = 0; + for (slots_used = max_chain_len = i = 0; i < h->size; i++) { + cur = h->htable[i]; + if (cur) { + slots_used++; + chain_len = 0; + while (cur) { + chain_len++; + cur = cur->next; + } + + if (chain_len > max_chain_len) + max_chain_len = chain_len; + } + } + + info->slots_used = slots_used; + info->max_chain_len = max_chain_len; +} diff -r 65bfdf932c7e -r 12923ba929c8 xen/flask/ss/hashtab.h --- /dev/null Thu Jan 1 00:00:00 1970 +0000 +++ b/xen/flask/ss/hashtab.h Sat Dec 16 11:54:25 2006 -0500 @@ -0,0 +1,87 @@ +/* + * A hash table (hashtab) maintains associations between + * key values and datum values. The type of the key values + * and the type of the datum values is arbitrary. The + * functions for hash computation and key comparison are + * provided by the creator of the table. + * + * Author : Stephen Smalley, + */ +#ifndef _SS_HASHTAB_H_ +#define _SS_HASHTAB_H_ + +#define HASHTAB_MAX_NODES 0xffffffff + +struct hashtab_node { + void *key; + void *datum; + struct hashtab_node *next; +}; + +struct hashtab { + struct hashtab_node **htable; /* hash table */ + u32 size; /* number of slots in hash table */ + u32 nel; /* number of elements in hash table */ + u32 (*hash_value)(struct hashtab *h, void *key); + /* hash function */ + int (*keycmp)(struct hashtab *h, void *key1, void *key2); + /* key comparison function */ +}; + +struct hashtab_info { + u32 slots_used; + u32 max_chain_len; +}; + +/* + * Creates a new hash table with the specified characteristics. + * + * Returns NULL if insufficent space is available or + * the new hash table otherwise. + */ +struct hashtab *hashtab_create(u32 (*hash_value)(struct hashtab *h, void *key), + int (*keycmp)(struct hashtab *h, void *key1, void *key2), + u32 size); + +/* + * Inserts the specified (key, datum) pair into the specified hash table. + * + * Returns -ENOMEM on memory allocation error, + * -EEXIST if there is already an entry with the same key, + * -EINVAL for general errors or + * 0 otherwise. + */ +int hashtab_insert(struct hashtab *h, void *k, void *d); + +/* + * Searches for the entry with the specified key in the hash table. + * + * Returns NULL if no entry has the specified key or + * the datum of the entry otherwise. + */ +void *hashtab_search(struct hashtab *h, void *k); + +/* + * Destroys the specified hash table. + */ +void hashtab_destroy(struct hashtab *h); + +/* + * Applies the specified apply function to (key,datum,args) + * for each entry in the specified hash table. + * + * The order in which the function is applied to the entries + * is dependent upon the internal structure of the hash table. + * + * If apply returns a non-zero status, then hashtab_map will cease + * iterating through the hash table and will propagate the error + * return to its caller. + */ +int hashtab_map(struct hashtab *h, + int (*apply)(void *k, void *d, void *args), + void *args); + +/* Fill info with some hash table statistics */ +void hashtab_stat(struct hashtab *h, struct hashtab_info *info); + +#endif /* _SS_HASHTAB_H */ diff -r 65bfdf932c7e -r 12923ba929c8 xen/flask/ss/mls.c --- /dev/null Thu Jan 1 00:00:00 1970 +0000 +++ b/xen/flask/ss/mls.c Sat Dec 16 11:54:25 2006 -0500 @@ -0,0 +1,558 @@ +/* + * Implementation of the multi-level security (MLS) policy. + * + * Author : Stephen Smalley, + */ +/* + * Updated: Trusted Computer Solutions, Inc. + * + * Support for enhanced MLS infrastructure. + * + * Copyright (C) 2004-2005 Trusted Computer Solutions, Inc. + */ + +/* Ported to Xen 3.0, George Coker, */ + +#include +#include +#include +#include +#include "sidtab.h" +#include "mls.h" +#include "policydb.h" +#include "services.h" + +/* + * Return the length in bytes for the MLS fields of the + * security context string representation of `context'. + */ +int mls_compute_context_len(struct context * context) +{ + int i, l, len, range; + struct ebitmap_node *node; + + if (!flask_mls_enabled) + return 0; + + len = 1; /* for the beginning ":" */ + for (l = 0; l < 2; l++) { + range = 0; + len += strlen(policydb.p_sens_val_to_name[context->range.level[l].sens - 1]); + + ebitmap_for_each_bit(&context->range.level[l].cat, node, i) { + if (ebitmap_node_get_bit(node, i)) { + if (range) { + range++; + continue; + } + + len += strlen(policydb.p_cat_val_to_name[i]) + 1; + range++; + } else { + if (range > 1) + len += strlen(policydb.p_cat_val_to_name[i - 1]) + 1; + range = 0; + } + } + /* Handle case where last category is the end of range */ + if (range > 1) + len += strlen(policydb.p_cat_val_to_name[i - 1]) + 1; + + if (l == 0) { + if (mls_level_eq(&context->range.level[0], + &context->range.level[1])) + break; + else + len++; + } + } + + return len; +} + +/* + * Write the security context string representation of + * the MLS fields of `context' into the string `*scontext'. + * Update `*scontext' to point to the end of the MLS fields. + */ +void mls_sid_to_context(struct context *context, + char **scontext) +{ + char *scontextp; + int i, l, range, wrote_sep; + struct ebitmap_node *node; + + if (!flask_mls_enabled) + return; + + scontextp = *scontext; + + *scontextp = ':'; + scontextp++; + + for (l = 0; l < 2; l++) { + range = 0; + wrote_sep = 0; + strcpy(scontextp, + policydb.p_sens_val_to_name[context->range.level[l].sens - 1]); + scontextp += strlen(policydb.p_sens_val_to_name[context->range.level[l].sens - 1]); + + /* categories */ + ebitmap_for_each_bit(&context->range.level[l].cat, node, i) { + if (ebitmap_node_get_bit(node, i)) { + if (range) { + range++; + continue; + } + + if (!wrote_sep) { + *scontextp++ = ':'; + wrote_sep = 1; + } else + *scontextp++ = ','; + strcpy(scontextp, policydb.p_cat_val_to_name[i]); + scontextp += strlen(policydb.p_cat_val_to_name[i]); + range++; + } else { + if (range > 1) { + if (range > 2) + *scontextp++ = '.'; + else + *scontextp++ = ','; + + strcpy(scontextp, policydb.p_cat_val_to_name[i - 1]); + scontextp += strlen(policydb.p_cat_val_to_name[i - 1]); + } + range = 0; + } + } + + /* Handle case where last category is the end of range */ + if (range > 1) { + if (range > 2) + *scontextp++ = '.'; + else + *scontextp++ = ','; + + strcpy(scontextp, policydb.p_cat_val_to_name[i - 1]); + scontextp += strlen(policydb.p_cat_val_to_name[i - 1]); + } + + if (l == 0) { + if (mls_level_eq(&context->range.level[0], + &context->range.level[1])) + break; + else { + *scontextp = '-'; + scontextp++; + } + } + } + + *scontext = scontextp; + return; +} + +/* + * Return 1 if the MLS fields in the security context + * structure `c' are valid. Return 0 otherwise. + */ +int mls_context_isvalid(struct policydb *p, struct context *c) +{ + struct level_datum *levdatum; + struct user_datum *usrdatum; + struct ebitmap_node *node; + int i, l; + + if (!flask_mls_enabled) + return 1; + + /* + * MLS range validity checks: high must dominate low, low level must + * be valid (category set <-> sensitivity check), and high level must + * be valid (category set <-> sensitivity check) + */ + if (!mls_level_dom(&c->range.level[1], &c->range.level[0])) + /* High does not dominate low. */ + return 0; + + for (l = 0; l < 2; l++) { + if (!c->range.level[l].sens || c->range.level[l].sens > p->p_levels.nprim) + return 0; + levdatum = hashtab_search(p->p_levels.table, + p->p_sens_val_to_name[c->range.level[l].sens - 1]); + if (!levdatum) + return 0; + + ebitmap_for_each_bit(&c->range.level[l].cat, node, i) { + if (ebitmap_node_get_bit(node, i)) { + if (i > p->p_cats.nprim) + return 0; + if (!ebitmap_get_bit(&levdatum->level->cat, i)) + /* + * Category may not be associated with + * sensitivity in low level. + */ + return 0; + } + } + } + + if (c->role == OBJECT_R_VAL) + return 1; + + /* + * User must be authorized for the MLS range. + */ + if (!c->user || c->user > p->p_users.nprim) + return 0; + usrdatum = p->user_val_to_struct[c->user - 1]; + if (!mls_range_contains(usrdatum->range, c->range)) + return 0; /* user may not be associated with range */ + + return 1; +} + +/* + * Copies the MLS range from `src' into `dst'. + */ +static inline int mls_copy_context(struct context *dst, + struct context *src) +{ + int l, rc = 0; + + /* Copy the MLS range from the source context */ + for (l = 0; l < 2; l++) { + dst->range.level[l].sens = src->range.level[l].sens; + rc = ebitmap_cpy(&dst->range.level[l].cat, + &src->range.level[l].cat); + if (rc) + break; + } + + return rc; +} + +/* + * Set the MLS fields in the security context structure + * `context' based on the string representation in + * the string `*scontext'. Update `*scontext' to + * point to the end of the string representation of + * the MLS fields. + * + * This function modifies the string in place, inserting + * NULL characters to terminate the MLS fields. + * + * If a def_sid is provided and no MLS field is present, + * copy the MLS field of the associated default context. + * Used for upgraded to MLS systems where objects may lack + * MLS fields. + * + * Policy read-lock must be held for sidtab lookup. + * + */ +int mls_context_to_sid(char oldc, + char **scontext, + struct context *context, + struct sidtab *s, + u32 def_sid) +{ + + char delim; + char *scontextp, *p, *rngptr; + struct level_datum *levdatum; + struct cat_datum *catdatum, *rngdatum; + int l, rc = -EINVAL; + + if (!flask_mls_enabled) + return 0; + + /* + * No MLS component to the security context, try and map to + * default if provided. + */ + if (!oldc) { + struct context *defcon; + + if (def_sid == SECSID_NULL) + goto out; + + defcon = sidtab_search(s, def_sid); + if (!defcon) + goto out; + + rc = mls_copy_context(context, defcon); + goto out; + } + + /* Extract low sensitivity. */ + scontextp = p = *scontext; + while (*p && *p != ':' && *p != '-') + p++; + + delim = *p; + if (delim != 0) + *p++ = 0; + + for (l = 0; l < 2; l++) { + levdatum = hashtab_search(policydb.p_levels.table, scontextp); + if (!levdatum) { + rc = -EINVAL; + goto out; + } + + context->range.level[l].sens = levdatum->level->sens; + + if (delim == ':') { + /* Extract category set. */ + while (1) { + scontextp = p; + while (*p && *p != ',' && *p != '-') + p++; + delim = *p; + if (delim != 0) + *p++ = 0; + + /* Separate into range if exists */ + if ((rngptr = strchr(scontextp, '.')) != NULL) { + /* Remove '.' */ + *rngptr++ = 0; + } + + catdatum = hashtab_search(policydb.p_cats.table, + scontextp); + if (!catdatum) { + rc = -EINVAL; + goto out; + } + + rc = ebitmap_set_bit(&context->range.level[l].cat, + catdatum->value - 1, 1); + if (rc) + goto out; + + /* If range, set all categories in range */ + if (rngptr) { + int i; + + rngdatum = hashtab_search(policydb.p_cats.table, rngptr); + if (!rngdatum) { + rc = -EINVAL; + goto out; + } + + if (catdatum->value >= rngdatum->value) { + rc = -EINVAL; + goto out; + } + + for (i = catdatum->value; i < rngdatum->value; i++) { + rc = ebitmap_set_bit(&context->range.level[l].cat, i, 1); + if (rc) + goto out; + } + } + + if (delim != ',') + break; + } + } + if (delim == '-') { + /* Extract high sensitivity. */ + scontextp = p; + while (*p && *p != ':') + p++; + + delim = *p; + if (delim != 0) + *p++ = 0; + } else + break; + } + + if (l == 0) { + context->range.level[1].sens = context->range.level[0].sens; + rc = ebitmap_cpy(&context->range.level[1].cat, + &context->range.level[0].cat); + if (rc) + goto out; + } + *scontext = ++p; + rc = 0; +out: + return rc; +} + +/* + * Copies the effective MLS range from `src' into `dst'. + */ +static inline int mls_scopy_context(struct context *dst, + struct context *src) +{ + int l, rc = 0; + + /* Copy the MLS range from the source context */ + for (l = 0; l < 2; l++) { + dst->range.level[l].sens = src->range.level[0].sens; + rc = ebitmap_cpy(&dst->range.level[l].cat, + &src->range.level[0].cat); + if (rc) + break; + } + + return rc; +} + +/* + * Copies the MLS range `range' into `context'. + */ +static inline int mls_range_set(struct context *context, + struct mls_range *range) +{ + int l, rc = 0; + + /* Copy the MLS range into the context */ + for (l = 0; l < 2; l++) { + context->range.level[l].sens = range->level[l].sens; + rc = ebitmap_cpy(&context->range.level[l].cat, + &range->level[l].cat); + if (rc) + break; + } + + return rc; +} + +int mls_setup_user_range(struct context *fromcon, struct user_datum *user, + struct context *usercon) +{ + if (flask_mls_enabled) { + struct mls_level *fromcon_sen = &(fromcon->range.level[0]); + struct mls_level *fromcon_clr = &(fromcon->range.level[1]); + struct mls_level *user_low = &(user->range.level[0]); + struct mls_level *user_clr = &(user->range.level[1]); + struct mls_level *user_def = &(user->dfltlevel); + struct mls_level *usercon_sen = &(usercon->range.level[0]); + struct mls_level *usercon_clr = &(usercon->range.level[1]); + + /* Honor the user's default level if we can */ + if (mls_level_between(user_def, fromcon_sen, fromcon_clr)) { + *usercon_sen = *user_def; + } else if (mls_level_between(fromcon_sen, user_def, user_clr)) { + *usercon_sen = *fromcon_sen; + } else if (mls_level_between(fromcon_clr, user_low, user_def)) { + *usercon_sen = *user_low; + } else + return -EINVAL; + + /* Lower the clearance of available contexts + if the clearance of "fromcon" is lower than + that of the user's default clearance (but + only if the "fromcon" clearance dominates + the user's computed sensitivity level) */ + if (mls_level_dom(user_clr, fromcon_clr)) { + *usercon_clr = *fromcon_clr; + } else if (mls_level_dom(fromcon_clr, user_clr)) { + *usercon_clr = *user_clr; + } else + return -EINVAL; + } + + return 0; +} + +/* + * Convert the MLS fields in the security context + * structure `c' from the values specified in the + * policy `oldp' to the values specified in the policy `newp'. + */ +int mls_convert_context(struct policydb *oldp, + struct policydb *newp, + struct context *c) +{ + struct level_datum *levdatum; + struct cat_datum *catdatum; + struct ebitmap bitmap; + struct ebitmap_node *node; + int l, i; + + if (!flask_mls_enabled) + return 0; + + for (l = 0; l < 2; l++) { + levdatum = hashtab_search(newp->p_levels.table, + oldp->p_sens_val_to_name[c->range.level[l].sens - 1]); + + if (!levdatum) + return -EINVAL; + c->range.level[l].sens = levdatum->level->sens; + + ebitmap_init(&bitmap); + ebitmap_for_each_bit(&c->range.level[l].cat, node, i) { + if (ebitmap_node_get_bit(node, i)) { + int rc; + + catdatum = hashtab_search(newp->p_cats.table, + oldp->p_cat_val_to_name[i]); + if (!catdatum) + return -EINVAL; + rc = ebitmap_set_bit(&bitmap, catdatum->value - 1, 1); + if (rc) + return rc; + } + } + ebitmap_destroy(&c->range.level[l].cat); + c->range.level[l].cat = bitmap; + } + + return 0; +} + +int mls_compute_sid(struct context *scontext, + struct context *tcontext, + u16 tclass, + u32 specified, + struct context *newcontext) +{ + if (!flask_mls_enabled) + return 0; + + switch (specified) { + case AVTAB_TRANSITION: + if (tclass == SECCLASS_DOMAIN) { + struct range_trans *rangetr; + /* Look for a range transition rule. */ + for (rangetr = policydb.range_tr; rangetr; + rangetr = rangetr->next) { + if (rangetr->dom == scontext->type && + rangetr->type == tcontext->type) { + /* Set the range from the rule */ + return mls_range_set(newcontext, + &rangetr->range); + } + } + } + /* Fallthrough */ + case AVTAB_CHANGE: + if (tclass == SECCLASS_DOMAIN) + /* Use the process MLS attributes. */ + return mls_copy_context(newcontext, scontext); + else + /* Use the process effective MLS attributes. */ + return mls_scopy_context(newcontext, scontext); + case AVTAB_MEMBER: + /* Only polyinstantiate the MLS attributes if + the type is being polyinstantiated */ + if (newcontext->type != tcontext->type) { + /* Use the process effective MLS attributes. */ + return mls_scopy_context(newcontext, scontext); + } else { + /* Use the related object MLS attributes. */ + return mls_copy_context(newcontext, tcontext); + } + default: + return -EINVAL; + } + return -EINVAL; +} + diff -r 65bfdf932c7e -r 12923ba929c8 xen/flask/ss/mls.h --- /dev/null Thu Jan 1 00:00:00 1970 +0000 +++ b/xen/flask/ss/mls.h Sat Dec 16 11:54:25 2006 -0500 @@ -0,0 +1,44 @@ +/* + * Multi-level security (MLS) policy operations. + * + * Author : Stephen Smalley, + */ +/* + * Updated: Trusted Computer Solutions, Inc. + * + * Support for enhanced MLS infrastructure. + * + * Copyright (C) 2004-2005 Trusted Computer Solutions, Inc. + */ + +#ifndef _SS_MLS_H_ +#define _SS_MLS_H_ + +#include "context.h" +#include "policydb.h" + +int mls_compute_context_len(struct context *context); +void mls_sid_to_context(struct context *context, char **scontext); +int mls_context_isvalid(struct policydb *p, struct context *c); + +int mls_context_to_sid(char oldc, + char **scontext, + struct context *context, + struct sidtab *s, + u32 def_sid); + +int mls_convert_context(struct policydb *oldp, + struct policydb *newp, + struct context *context); + +int mls_compute_sid(struct context *scontext, + struct context *tcontext, + u16 tclass, + u32 specified, + struct context *newcontext); + +int mls_setup_user_range(struct context *fromcon, struct user_datum *user, + struct context *usercon); + +#endif /* _SS_MLS_H */ + diff -r 65bfdf932c7e -r 12923ba929c8 xen/flask/ss/mls_types.h --- /dev/null Thu Jan 1 00:00:00 1970 +0000 +++ b/xen/flask/ss/mls_types.h Sat Dec 16 11:54:25 2006 -0500 @@ -0,0 +1,58 @@ +/* + * Type definitions for the multi-level security (MLS) policy. + * + * Author : Stephen Smalley, + */ +/* + * Updated: Trusted Computer Solutions, Inc. + * + * Support for enhanced MLS infrastructure. + * + * Copyright (C) 2004-2005 Trusted Computer Solutions, Inc. + */ + +/* Ported to Xen 3.0, George Coker, */ + +#ifndef _SS_MLS_TYPES_H_ +#define _SS_MLS_TYPES_H_ + +#include "security.h" + +struct mls_level { + u32 sens; /* sensitivity */ + struct ebitmap cat; /* category set */ +}; + +struct mls_range { + struct mls_level level[2]; /* low == level[0], high == level[1] */ +}; + +static inline int mls_level_eq(struct mls_level *l1, struct mls_level *l2) +{ + if (!flask_mls_enabled) + return 1; + + return ((l1->sens == l2->sens) && + ebitmap_cmp(&l1->cat, &l2->cat)); +} + +static inline int mls_level_dom(struct mls_level *l1, struct mls_level *l2) +{ + if (!flask_mls_enabled) + return 1; + + return ((l1->sens >= l2->sens) && + ebitmap_contains(&l1->cat, &l2->cat)); +} + +#define mls_level_incomp(l1, l2) \ +(!mls_level_dom((l1), (l2)) && !mls_level_dom((l2), (l1))) + +#define mls_level_between(l1, l2, l3) \ +(mls_level_dom((l1), (l2)) && mls_level_dom((l3), (l1))) + +#define mls_range_contains(r1, r2) \ +(mls_level_dom(&(r2).level[0], &(r1).level[0]) && \ + mls_level_dom(&(r1).level[1], &(r2).level[1])) + +#endif /* _SS_MLS_TYPES_H_ */ diff -r 65bfdf932c7e -r 12923ba929c8 xen/flask/ss/policydb.c --- /dev/null Thu Jan 1 00:00:00 1970 +0000 +++ b/xen/flask/ss/policydb.c Sat Dec 16 11:54:25 2006 -0500 @@ -0,0 +1,1796 @@ +/* + * Implementation of the policy database. + * + * Author : Stephen Smalley, + */ + +/* + * Updated: Trusted Computer Solutions, Inc. + * + * Support for enhanced MLS infrastructure. + * + * Updated: Frank Mayer and Karl MacMillan + * + * Added conditional policy language extensions + * + * Copyright (C) 2004-2005 Trusted Computer Solutions, Inc. + * Copyright (C) 2003 - 2004 Tresys Technology, LLC + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, version 2. + */ + +/* Ported to Xen 3.0, George Coker, */ + +#include +#include +#include +#include +#include "security.h" + +#include "policydb.h" +#include "conditional.h" +#include "mls.h" +#include "util_endian.h" + +#define _DEBUG_HASHES + +#ifdef DEBUG_HASHES +static char *symtab_name[SYM_NUM] = { + "common prefixes", + "classes", + "roles", + "types", + "users", + "bools", + "levels", + "categories", +}; +#endif + +int flask_mls_enabled = 0; + +static unsigned int symtab_sizes[SYM_NUM] = { + 2, + 32, + 16, + 512, + 128, + 16, + 16, + 16, +}; + +struct policydb_compat_info { + int version; + int sym_num; + int ocon_num; +}; + +/* These need to be updated if SYM_NUM or OCON_NUM changes */ +static struct policydb_compat_info policydb_compat[] = { + { + .version = POLICYDB_VERSION_BASE, + .sym_num = SYM_NUM - 3, + .ocon_num = OCON_NUM - 1, + }, + { + .version = POLICYDB_VERSION_BOOL, + .sym_num = SYM_NUM - 2, + .ocon_num = OCON_NUM - 1, + }, + { + .version = POLICYDB_VERSION_IPV6, + .sym_num = SYM_NUM - 2, + .ocon_num = OCON_NUM, + }, + { + .version = POLICYDB_VERSION_NLCLASS, + .sym_num = SYM_NUM - 2, + .ocon_num = OCON_NUM, + }, + { + .version = POLICYDB_VERSION_MLS, + .sym_num = SYM_NUM, + .ocon_num = OCON_NUM, + }, + { + .version = POLICYDB_VERSION_AVTAB, + .sym_num = SYM_NUM, + .ocon_num = OCON_NUM, + }, +}; + +static struct policydb_compat_info *policydb_lookup_compat(int version) +{ + int i; + struct policydb_compat_info *info = NULL; + + for (i = 0; i < sizeof(policydb_compat)/sizeof(*info); i++) { + if (policydb_compat[i].version == version) { + info = &policydb_compat[i]; + break; + } + } + return info; +} + +/* + * Initialize the role table. + */ +static int roles_init(struct policydb *p) +{ + char *key = NULL; + int rc; + struct role_datum *role; + + role = xmalloc(struct role_datum); + if (!role) { + rc = -ENOMEM; + goto out; + } + memset(role, 0, sizeof(*role)); + role->value = ++p->p_roles.nprim; + if (role->value != OBJECT_R_VAL) { + rc = -EINVAL; + goto out_free_role; + } + key = xmalloc_array(char, strlen(OBJECT_R)+1); + if (!key) { + rc = -ENOMEM; + goto out_free_role; + } + strcpy(key, OBJECT_R); + rc = hashtab_insert(p->p_roles.table, key, role); + if (rc) + goto out_free_key; +out: + return rc; + +out_free_key: + xfree(key); +out_free_role: + xfree(role); + goto out; +} + +/* + * Initialize a policy database structure. + */ +static int policydb_init(struct policydb *p) +{ + int i, rc; + + memset(p, 0, sizeof(*p)); + + for (i = 0; i < SYM_NUM; i++) { + rc = symtab_init(&p->symtab[i], symtab_sizes[i]); + if (rc) + goto out_free_symtab; + } + + rc = avtab_init(&p->te_avtab); + if (rc) + goto out_free_symtab; + + rc = roles_init(p); + if (rc) + goto out_free_avtab; + + rc = cond_policydb_init(p); + if (rc) + goto out_free_avtab; + +out: + return rc; + +out_free_avtab: + avtab_destroy(&p->te_avtab); + +out_free_symtab: + for (i = 0; i < SYM_NUM; i++) + hashtab_destroy(p->symtab[i].table); + goto out; +} + +/* + * The following *_index functions are used to + * define the val_to_name and val_to_struct arrays + * in a policy database structure. The val_to_name + * arrays are used when converting security context + * structures into string representations. The + * val_to_struct arrays are used when the attributes + * of a class, role, or user are needed. + */ + +static int common_index(void *key, void *datum, void *datap) +{ + struct policydb *p; + struct common_datum *comdatum; + + comdatum = datum; + p = datap; + if (!comdatum->value || comdatum->value > p->p_commons.nprim) + return -EINVAL; + p->p_common_val_to_name[comdatum->value - 1] = key; + return 0; +} + +static int class_index(void *key, void *datum, void *datap) +{ + struct policydb *p; + struct class_datum *cladatum; + + cladatum = datum; + p = datap; + if (!cladatum->value || cladatum->value > p->p_classes.nprim) + return -EINVAL; + p->p_class_val_to_name[cladatum->value - 1] = key; + p->class_val_to_struct[cladatum->value - 1] = cladatum; + return 0; +} + +static int role_index(void *key, void *datum, void *datap) +{ + struct policydb *p; + struct role_datum *role; + + role = datum; + p = datap; + if (!role->value || role->value > p->p_roles.nprim) + return -EINVAL; + p->p_role_val_to_name[role->value - 1] = key; + p->role_val_to_struct[role->value - 1] = role; + return 0; +} + +static int type_index(void *key, void *datum, void *datap) +{ + struct policydb *p; + struct type_datum *typdatum; + + typdatum = datum; + p = datap; + + if (typdatum->primary) { + if (!typdatum->value || typdatum->value > p->p_types.nprim) + return -EINVAL; + p->p_type_val_to_name[typdatum->value - 1] = key; + } + + return 0; +} + +static int user_index(void *key, void *datum, void *datap) +{ + struct policydb *p; + struct user_datum *usrdatum; + + usrdatum = datum; + p = datap; + if (!usrdatum->value || usrdatum->value > p->p_users.nprim) + return -EINVAL; + p->p_user_val_to_name[usrdatum->value - 1] = key; + p->user_val_to_struct[usrdatum->value - 1] = usrdatum; + return 0; +} + +static int sens_index(void *key, void *datum, void *datap) +{ + struct policydb *p; + struct level_datum *levdatum; + + levdatum = datum; + p = datap; + + if (!levdatum->isalias) { + if (!levdatum->level->sens || + levdatum->level->sens > p->p_levels.nprim) + return -EINVAL; + p->p_sens_val_to_name[levdatum->level->sens - 1] = key; + } + + return 0; +} + +static int cat_index(void *key, void *datum, void *datap) +{ + struct policydb *p; + struct cat_datum *catdatum; + + catdatum = datum; + p = datap; + + if (!catdatum->isalias) { + if (!catdatum->value || catdatum->value > p->p_cats.nprim) + return -EINVAL; + p->p_cat_val_to_name[catdatum->value - 1] = key; + } + + return 0; +} + +static int (*index_f[SYM_NUM]) (void *key, void *datum, void *datap) = +{ + common_index, + class_index, + role_index, + type_index, + user_index, + cond_index_bool, + sens_index, + cat_index, +}; + +/* + * Define the common val_to_name array and the class + * val_to_name and val_to_struct arrays in a policy + * database structure. + * + * Caller must clean up upon failure. + */ +static int policydb_index_classes(struct policydb *p) +{ + int rc; + + p->p_common_val_to_name = + xmalloc_array(char *, p->p_commons.nprim); + if (!p->p_common_val_to_name) { + rc = -ENOMEM; + goto out; + } + + rc = hashtab_map(p->p_commons.table, common_index, p); + if (rc) + goto out; + + p->class_val_to_struct = + (void *)xmalloc_array(struct class_datum, p->p_classes.nprim); + if (!p->class_val_to_struct) { + rc = -ENOMEM; + goto out; + } + + p->p_class_val_to_name = + xmalloc_array(char *, p->p_classes.nprim); + if (!p->p_class_val_to_name) { + rc = -ENOMEM; + goto out; + } + + rc = hashtab_map(p->p_classes.table, class_index, p); +out: + return rc; +} + +#ifdef DEBUG_HASHES +static void symtab_hash_eval(struct symtab *s) +{ + int i; + + for (i = 0; i < SYM_NUM; i++) { + struct hashtab *h = s[i].table; + struct hashtab_info info; + + hashtab_stat(h, &info); + printk(KERN_INFO "%s: %d entries and %d/%d buckets used, " + "longest chain length %d\n", symtab_name[i], h->nel, + info.slots_used, h->size, info.max_chain_len); + } +} +#endif + +/* + * Define the other val_to_name and val_to_struct arrays + * in a policy database structure. + * + * Caller must clean up on failure. + */ +static int policydb_index_others(struct policydb *p) +{ + int i, rc = 0; + + printk(KERN_INFO "security: %d users, %d roles, %d types, %d bools", + p->p_users.nprim, p->p_roles.nprim, p->p_types.nprim, p->p_bools.nprim); + if (flask_mls_enabled) + printk(", %d sens, %d cats", p->p_levels.nprim, + p->p_cats.nprim); + printk("\n"); + + printk(KERN_INFO "security: %d classes, %d rules\n", + p->p_classes.nprim, p->te_avtab.nel); + +#ifdef DEBUG_HASHES + avtab_hash_eval(&p->te_avtab, "rules"); + symtab_hash_eval(p->symtab); +#endif + + p->role_val_to_struct = + (void *)xmalloc_array(struct role_datum, p->p_roles.nprim); + if (!p->role_val_to_struct) { + rc = -ENOMEM; + goto out; + } + + p->user_val_to_struct = + (void *)xmalloc_array(struct user_datum, p->p_users.nprim); + if (!p->user_val_to_struct) { + rc = -ENOMEM; + goto out; + } + + if (cond_init_bool_indexes(p)) { + rc = -ENOMEM; + goto out; + } + + for (i = SYM_ROLES; i < SYM_NUM; i++) { + p->sym_val_to_name[i] = + xmalloc_array(char *, p->symtab[i].nprim); + if (!p->sym_val_to_name[i]) { + rc = -ENOMEM; + goto out; + } + rc = hashtab_map(p->symtab[i].table, index_f[i], p); + if (rc) + goto out; + } + +out: + return rc; +} + +/* + * The following *_destroy functions are used to + * free any memory allocated for each kind of + * symbol data in the policy database. + */ + +static int perm_destroy(void *key, void *datum, void *p) +{ + xfree(key); + xfree(datum); + return 0; +} + +static int common_destroy(void *key, void *datum, void *p) +{ + struct common_datum *comdatum; + + xfree(key); + comdatum = datum; + hashtab_map(comdatum->permissions.table, perm_destroy, NULL); + hashtab_destroy(comdatum->permissions.table); + xfree(datum); + return 0; +} + +static int class_destroy(void *key, void *datum, void *p) +{ + struct class_datum *cladatum; + struct constraint_node *constraint, *ctemp; + struct constraint_expr *e, *etmp; + + xfree(key); + cladatum = datum; + hashtab_map(cladatum->permissions.table, perm_destroy, NULL); + hashtab_destroy(cladatum->permissions.table); + constraint = cladatum->constraints; + while (constraint) { + e = constraint->expr; + while (e) { + ebitmap_destroy(&e->names); + etmp = e; + e = e->next; + xfree(etmp); + } + ctemp = constraint; + constraint = constraint->next; + xfree(ctemp); + } + + constraint = cladatum->validatetrans; + while (constraint) { + e = constraint->expr; + while (e) { + ebitmap_destroy(&e->names); + etmp = e; + e = e->next; + xfree(etmp); + } + ctemp = constraint; + constraint = constraint->next; + xfree(ctemp); + } + + xfree(cladatum->comkey); + xfree(datum); + return 0; +} + +static int role_destroy(void *key, void *datum, void *p) +{ + struct role_datum *role; + + xfree(key); + role = datum; + ebitmap_destroy(&role->dominates); + ebitmap_destroy(&role->types); + xfree(datum); + return 0; +} + +static int type_destroy(void *key, void *datum, void *p) +{ + xfree(key); + xfree(datum); + return 0; +} + +static int user_destroy(void *key, void *datum, void *p) +{ + struct user_datum *usrdatum; + + xfree(key); + usrdatum = datum; + ebitmap_destroy(&usrdatum->roles); + ebitmap_destroy(&usrdatum->range.level[0].cat); + ebitmap_destroy(&usrdatum->range.level[1].cat); + ebitmap_destroy(&usrdatum->dfltlevel.cat); + xfree(datum); + return 0; +} + +static int sens_destroy(void *key, void *datum, void *p) +{ + struct level_datum *levdatum; + + xfree(key); + levdatum = datum; + ebitmap_destroy(&levdatum->level->cat); + xfree(levdatum->level); + xfree(datum); + return 0; +} + +static int cat_destroy(void *key, void *datum, void *p) +{ + xfree(key); + xfree(datum); + return 0; +} + +static int (*destroy_f[SYM_NUM]) (void *key, void *datum, void *datap) = +{ + common_destroy, + class_destroy, + role_destroy, + type_destroy, + user_destroy, + cond_destroy_bool, + sens_destroy, + cat_destroy, +}; + +static void ocontext_destroy(struct ocontext *c, int i) +{ + context_destroy(&c->context[0]); + context_destroy(&c->context[1]); + if (i == OCON_ISID) + xfree(c->u.name); + xfree(c); +} + +/* + * Free any memory allocated by a policy database structure. + */ +void policydb_destroy(struct policydb *p) +{ + struct ocontext *c, *ctmp; + int i; + struct role_allow *ra, *lra = NULL; + struct role_trans *tr, *ltr = NULL; + struct range_trans *rt, *lrt = NULL; + + for (i = 0; i < SYM_NUM; i++) { + hashtab_map(p->symtab[i].table, destroy_f[i], NULL); + hashtab_destroy(p->symtab[i].table); + } + + for (i = 0; i < SYM_NUM; i++) + xfree(p->sym_val_to_name[i]); + + xfree(p->class_val_to_struct); + xfree(p->role_val_to_struct); + xfree(p->user_val_to_struct); + + avtab_destroy(&p->te_avtab); + + for (i = 0; i < OCON_NUM; i++) { + c = p->ocontexts[i]; + while (c) { + ctmp = c; + c = c->next; + ocontext_destroy(ctmp,i); + } + } + + cond_policydb_destroy(p); + + for (tr = p->role_tr; tr; tr = tr->next) { + if (ltr) xfree(ltr); + ltr = tr; + } + if (ltr) xfree(ltr); + + for (ra = p->role_allow; ra; ra = ra -> next) { + if (lra) xfree(lra); + lra = ra; + } + if (lra) xfree(lra); + + for (rt = p->range_tr; rt; rt = rt -> next) { + if (lrt) xfree(lrt); + lrt = rt; + } + if (lrt) xfree(lrt); + + for (i = 0; i < p->p_types.nprim; i++) + ebitmap_destroy(&p->type_attr_map[i]); + xfree(p->type_attr_map); + + return; +} + +/* + * Load the initial SIDs specified in a policy database + * structure into a SID table. + */ +int policydb_load_isids(struct policydb *p, struct sidtab *s) +{ + struct ocontext *head, *c; + int rc; + + rc = sidtab_init(s); + if (rc) { + printk(KERN_ERR "security: out of memory on SID table init\n"); + goto out; + } + + head = p->ocontexts[OCON_ISID]; + for (c = head; c; c = c->next) { + if (!c->context[0].user) { + printk(KERN_ERR "security: SID %s was never " + "defined.\n", c->u.name); + rc = -EINVAL; + goto out; + } + if (sidtab_insert(s, c->sid[0], &c->context[0])) { + printk(KERN_ERR "security: unable to load initial " + "SID %s.\n", c->u.name); + rc = -EINVAL; + goto out; + } + } +out: + return rc; +} + +/* + * Return 1 if the fields in the security context + * structure `c' are valid. Return 0 otherwise. + */ +int policydb_context_isvalid(struct policydb *p, struct context *c) +{ + struct role_datum *role; + struct user_datum *usrdatum; + + if (!c->role || c->role > p->p_roles.nprim) + return 0; + + if (!c->user || c->user > p->p_users.nprim) + return 0; + + if (!c->type || c->type > p->p_types.nprim) + return 0; + + if (c->role != OBJECT_R_VAL) { + /* + * Role must be authorized for the type. + */ + role = p->role_val_to_struct[c->role - 1]; + if (!ebitmap_get_bit(&role->types, + c->type - 1)) + /* role may not be associated with type */ + return 0; + + /* + * User must be authorized for the role. + */ + usrdatum = p->user_val_to_struct[c->user - 1]; + if (!usrdatum) + return 0; + + if (!ebitmap_get_bit(&usrdatum->roles, + c->role - 1)) + /* user may not be associated with role */ + return 0; + } + + if (!mls_context_isvalid(p, c)) + return 0; + + return 1; +} + +/* + * Read a MLS range structure from a policydb binary + * representation file. + */ +static int mls_read_range_helper(struct mls_range *r, void *fp) +{ + __le32 buf[2]; + u32 items; + int rc; + + rc = next_entry(buf, fp, sizeof(u32)); + if (rc < 0) + goto out; + + items = le32_to_cpu(buf[0]); + if (items > ARRAY_SIZE(buf)) { + printk(KERN_ERR "security: mls: range overflow\n"); + rc = -EINVAL; + goto out; + } + rc = next_entry(buf, fp, sizeof(u32) * items); + if (rc < 0) { + printk(KERN_ERR "security: mls: truncated range\n"); + goto out; + } + r->level[0].sens = le32_to_cpu(buf[0]); + if (items > 1) + r->level[1].sens = le32_to_cpu(buf[1]); + else + r->level[1].sens = r->level[0].sens; + + rc = ebitmap_read(&r->level[0].cat, fp); + if (rc) { + printk(KERN_ERR "security: mls: error reading low " + "categories\n"); + goto out; + } + if (items > 1) { + rc = ebitmap_read(&r->level[1].cat, fp); + if (rc) { + printk(KERN_ERR "security: mls: error reading high " + "categories\n"); + goto bad_high; + } + } else { + rc = ebitmap_cpy(&r->level[1].cat, &r->level[0].cat); + if (rc) { + printk(KERN_ERR "security: mls: out of memory\n"); + goto bad_high; + } + } + + rc = 0; +out: + return rc; +bad_high: + ebitmap_destroy(&r->level[0].cat); + goto out; +} + +/* + * Read and validate a security context structure + * from a policydb binary representation file. + */ +static int context_read_and_validate(struct context *c, + struct policydb *p, + void *fp) +{ + __le32 buf[3]; + int rc; + + rc = next_entry(buf, fp, sizeof buf); + if (rc < 0) { + printk(KERN_ERR "security: context truncated\n"); + goto out; + } + c->user = le32_to_cpu(buf[0]); + c->role = le32_to_cpu(buf[1]); + c->type = le32_to_cpu(buf[2]); + if (p->policyvers >= POLICYDB_VERSION_MLS) { + if (mls_read_range_helper(&c->range, fp)) { + printk(KERN_ERR "security: error reading MLS range of " + "context\n"); + rc = -EINVAL; + goto out; + } + } + + if (!policydb_context_isvalid(p, c)) { + printk(KERN_ERR "security: invalid security context\n"); + context_destroy(c); + rc = -EINVAL; + } +out: + return rc; +} + +/* + * The following *_read functions are used to + * read the symbol data from a policy database + * binary representation file. + */ + +static int perm_read(struct policydb *p, struct hashtab *h, void *fp) +{ + char *key = NULL; + struct perm_datum *perdatum; + int rc; + __le32 buf[2]; + u32 len; + + perdatum = xmalloc(struct perm_datum); + if (!perdatum) { + rc = -ENOMEM; + goto out; + } + memset(perdatum, 0, sizeof(*perdatum)); + + rc = next_entry(buf, fp, sizeof buf); + if (rc < 0) + goto bad; + + len = le32_to_cpu(buf[0]); + perdatum->value = le32_to_cpu(buf[1]); + + key = xmalloc_array(char, len + 1); + if (!key) { + rc = -ENOMEM; + goto bad; + } + rc = next_entry(key, fp, len); + if (rc < 0) + goto bad; + key[len] = 0; + + rc = hashtab_insert(h, key, perdatum); + if (rc) + goto bad; +out: + return rc; +bad: + perm_destroy(key, perdatum, NULL); + goto out; +} + +static int common_read(struct policydb *p, struct hashtab *h, void *fp) +{ + char *key = NULL; + struct common_datum *comdatum; + __le32 buf[4]; + u32 len, nel; + int i, rc; + + comdatum = xmalloc(struct common_datum); + if (!comdatum) { + rc = -ENOMEM; + goto out; + } + memset(comdatum, 0, sizeof(*comdatum)); + + rc = next_entry(buf, fp, sizeof buf); + if (rc < 0) + goto bad; + + len = le32_to_cpu(buf[0]); + comdatum->value = le32_to_cpu(buf[1]); + + rc = symtab_init(&comdatum->permissions, PERM_SYMTAB_SIZE); + if (rc) + goto bad; + comdatum->permissions.nprim = le32_to_cpu(buf[2]); + nel = le32_to_cpu(buf[3]); + + key = xmalloc_array(char, len + 1); + if (!key) { + rc = -ENOMEM; + goto bad; + } + rc = next_entry(key, fp, len); + if (rc < 0) + goto bad; + key[len] = 0; + + for (i = 0; i < nel; i++) { + rc = perm_read(p, comdatum->permissions.table, fp); + if (rc) + goto bad; + } + + rc = hashtab_insert(h, key, comdatum); + if (rc) + goto bad; +out: + return rc; +bad: + common_destroy(key, comdatum, NULL); + goto out; +} + +static int read_cons_helper(struct constraint_node **nodep, int ncons, + int allowxtarget, void *fp) +{ + struct constraint_node *c, *lc; + struct constraint_expr *e, *le; + __le32 buf[3]; + u32 nexpr; + int rc, i, j, depth; + + lc = NULL; + for (i = 0; i < ncons; i++) { + c = xmalloc(struct constraint_node); + if (!c) + return -ENOMEM; + memset(c, 0, sizeof(*c)); + + if (lc) { + lc->next = c; + } else { + *nodep = c; + } + + rc = next_entry(buf, fp, (sizeof(u32) * 2)); + if (rc < 0) + return rc; + c->permissions = le32_to_cpu(buf[0]); + nexpr = le32_to_cpu(buf[1]); + le = NULL; + depth = -1; + for (j = 0; j < nexpr; j++) { + e = xmalloc(struct constraint_expr); + if (!e) + return -ENOMEM; + memset(e, 0, sizeof(*e)); + + if (le) { + le->next = e; + } else { + c->expr = e; + } + + rc = next_entry(buf, fp, (sizeof(u32) * 3)); + if (rc < 0) + return rc; + e->expr_type = le32_to_cpu(buf[0]); + e->attr = le32_to_cpu(buf[1]); + e->op = le32_to_cpu(buf[2]); + + switch (e->expr_type) { + case CEXPR_NOT: + if (depth < 0) + return -EINVAL; + break; + case CEXPR_AND: + case CEXPR_OR: + if (depth < 1) + return -EINVAL; + depth--; + break; + case CEXPR_ATTR: + if (depth == (CEXPR_MAXDEPTH - 1)) + return -EINVAL; + depth++; + break; + case CEXPR_NAMES: + if (!allowxtarget && (e->attr & CEXPR_XTARGET)) + return -EINVAL; + if (depth == (CEXPR_MAXDEPTH - 1)) + return -EINVAL; + depth++; + if (ebitmap_read(&e->names, fp)) + return -EINVAL; + break; + default: + return -EINVAL; + } + le = e; + } + if (depth != 0) + return -EINVAL; + lc = c; + } + + return 0; +} + +static int class_read(struct policydb *p, struct hashtab *h, void *fp) +{ + char *key = NULL; + struct class_datum *cladatum; + __le32 buf[6]; + u32 len, len2, ncons, nel; + int i, rc; + + cladatum = xmalloc(struct class_datum); + if (!cladatum) { + rc = -ENOMEM; + goto out; + } + memset(cladatum, 0, sizeof(*cladatum)); + + rc = next_entry(buf, fp, sizeof(u32)*6); + if (rc < 0) + goto bad; + + len = le32_to_cpu(buf[0]); + len2 = le32_to_cpu(buf[1]); + cladatum->value = le32_to_cpu(buf[2]); + + rc = symtab_init(&cladatum->permissions, PERM_SYMTAB_SIZE); + if (rc) + goto bad; + cladatum->permissions.nprim = le32_to_cpu(buf[3]); + nel = le32_to_cpu(buf[4]); + + ncons = le32_to_cpu(buf[5]); + + key = xmalloc_array(char, len + 1); + if (!key) { + rc = -ENOMEM; + goto bad; + } + rc = next_entry(key, fp, len); + if (rc < 0) + goto bad; + key[len] = 0; + + if (len2) { + cladatum->comkey = xmalloc_array(char, len2 + 1); + if (!cladatum->comkey) { + rc = -ENOMEM; + goto bad; + } + rc = next_entry(cladatum->comkey, fp, len2); + if (rc < 0) + goto bad; + cladatum->comkey[len2] = 0; + + cladatum->comdatum = hashtab_search(p->p_commons.table, + cladatum->comkey); + if (!cladatum->comdatum) { + printk(KERN_ERR "security: unknown common %s\n", + cladatum->comkey); + rc = -EINVAL; + goto bad; + } + } + for (i = 0; i < nel; i++) { + rc = perm_read(p, cladatum->permissions.table, fp); + if (rc) + goto bad; + } + + rc = read_cons_helper(&cladatum->constraints, ncons, 0, fp); + if (rc) + goto bad; + + if (p->policyvers >= POLICYDB_VERSION_VALIDATETRANS) { + /* grab the validatetrans rules */ + rc = next_entry(buf, fp, sizeof(u32)); + if (rc < 0) + goto bad; + ncons = le32_to_cpu(buf[0]); + rc = read_cons_helper(&cladatum->validatetrans, ncons, 1, fp); + if (rc) + goto bad; + } + + rc = hashtab_insert(h, key, cladatum); + if (rc) + goto bad; + + rc = 0; +out: + return rc; +bad: + class_destroy(key, cladatum, NULL); + goto out; +} + +static int role_read(struct policydb *p, struct hashtab *h, void *fp) +{ + char *key = NULL; + struct role_datum *role; + int rc; + __le32 buf[2]; + u32 len; + + role = xmalloc(struct role_datum); + if (!role) { + rc = -ENOMEM; + goto out; + } + memset(role, 0, sizeof(*role)); + + rc = next_entry(buf, fp, sizeof buf); + if (rc < 0) + goto bad; + + len = le32_to_cpu(buf[0]); + role->value = le32_to_cpu(buf[1]); + + key = xmalloc_array(char, len + 1); + if (!key) { + rc = -ENOMEM; + goto bad; + } + rc = next_entry(key, fp, len); + if (rc < 0) + goto bad; + key[len] = 0; + + rc = ebitmap_read(&role->dominates, fp); + if (rc) + goto bad; + + rc = ebitmap_read(&role->types, fp); + if (rc) + goto bad; + + if (strcmp(key, OBJECT_R) == 0) { + if (role->value != OBJECT_R_VAL) { + printk(KERN_ERR "Role %s has wrong value %d\n", + OBJECT_R, role->value); + rc = -EINVAL; + goto bad; + } + rc = 0; + goto bad; + } + + rc = hashtab_insert(h, key, role); + if (rc) + goto bad; +out: + return rc; +bad: + role_destroy(key, role, NULL); + goto out; +} + +static int type_read(struct policydb *p, struct hashtab *h, void *fp) +{ + char *key = NULL; + struct type_datum *typdatum; + int rc; + __le32 buf[3]; + u32 len; + + typdatum = xmalloc(struct type_datum); + if (!typdatum) { + rc = -ENOMEM; + return rc; + } + memset(typdatum, 0, sizeof(*typdatum)); + + rc = next_entry(buf, fp, sizeof buf); + if (rc < 0) + goto bad; + + len = le32_to_cpu(buf[0]); + typdatum->value = le32_to_cpu(buf[1]); + typdatum->primary = le32_to_cpu(buf[2]); + + key = xmalloc_array(char, len + 1); + if (!key) { + rc = -ENOMEM; + goto bad; + } + rc = next_entry(key, fp, len); + if (rc < 0) + goto bad; + key[len] = 0; + + rc = hashtab_insert(h, key, typdatum); + if (rc) + goto bad; +out: + return rc; +bad: + type_destroy(key, typdatum, NULL); + goto out; +} + + +/* + * Read a MLS level structure from a policydb binary + * representation file. + */ +static int mls_read_level(struct mls_level *lp, void *fp) +{ + __le32 buf[1]; + int rc; + + memset(lp, 0, sizeof(*lp)); + + rc = next_entry(buf, fp, sizeof buf); + if (rc < 0) { + printk(KERN_ERR "security: mls: truncated level\n"); + goto bad; + } + lp->sens = le32_to_cpu(buf[0]); + + if (ebitmap_read(&lp->cat, fp)) { + printk(KERN_ERR "security: mls: error reading level " + "categories\n"); + goto bad; + } + return 0; + +bad: + return -EINVAL; +} + +static int user_read(struct policydb *p, struct hashtab *h, void *fp) +{ + char *key = NULL; + struct user_datum *usrdatum; + int rc; + __le32 buf[2]; + u32 len; + + usrdatum = xmalloc(struct user_datum); + if (!usrdatum) { + rc = -ENOMEM; + goto out; + } + memset(usrdatum, 0, sizeof(*usrdatum)); + + rc = next_entry(buf, fp, sizeof buf); + if (rc < 0) + goto bad; + + len = le32_to_cpu(buf[0]); + usrdatum->value = le32_to_cpu(buf[1]); + + key = xmalloc_array(char, len + 1); + if (!key) { + rc = -ENOMEM; + goto bad; + } + rc = next_entry(key, fp, len); + if (rc < 0) + goto bad; + key[len] = 0; + + rc = ebitmap_read(&usrdatum->roles, fp); + if (rc) + goto bad; + + if (p->policyvers >= POLICYDB_VERSION_MLS) { + rc = mls_read_range_helper(&usrdatum->range, fp); + if (rc) + goto bad; + rc = mls_read_level(&usrdatum->dfltlevel, fp); + if (rc) + goto bad; + } + + rc = hashtab_insert(h, key, usrdatum); + if (rc) + goto bad; +out: + return rc; +bad: + user_destroy(key, usrdatum, NULL); + goto out; +} + +static int sens_read(struct policydb *p, struct hashtab *h, void *fp) +{ + char *key = NULL; + struct level_datum *levdatum; + int rc; + __le32 buf[2]; + u32 len; + + levdatum = xmalloc(struct level_datum); + if (!levdatum) { + rc = -ENOMEM; + goto out; + } + memset(levdatum, 0, sizeof(*levdatum)); + + rc = next_entry(buf, fp, sizeof buf); + if (rc < 0) + goto bad; + + len = le32_to_cpu(buf[0]); + levdatum->isalias = le32_to_cpu(buf[1]); + + key = xmalloc_array(char, len + 1); + if (!key) { + rc = -ENOMEM; + goto bad; + } + rc = next_entry(key, fp, len); + if (rc < 0) + goto bad; + key[len] = 0; + + levdatum->level = xmalloc(struct mls_level); + if (!levdatum->level) { + rc = -ENOMEM; + goto bad; + } + if (mls_read_level(levdatum->level, fp)) { + rc = -EINVAL; + goto bad; + } + + rc = hashtab_insert(h, key, levdatum); + if (rc) + goto bad; +out: + return rc; +bad: + sens_destroy(key, levdatum, NULL); + goto out; +} + +static int cat_read(struct policydb *p, struct hashtab *h, void *fp) +{ + char *key = NULL; + struct cat_datum *catdatum; + int rc; + __le32 buf[3]; + u32 len; + + catdatum = xmalloc(struct cat_datum); + if (!catdatum) { + rc = -ENOMEM; + goto out; + } + memset(catdatum, 0, sizeof(*catdatum)); + + rc = next_entry(buf, fp, sizeof buf); + if (rc < 0) + goto bad; + + len = le32_to_cpu(buf[0]); + catdatum->value = le32_to_cpu(buf[1]); + catdatum->isalias = le32_to_cpu(buf[2]); + + key = xmalloc_array(char, len + 1); + if (!key) { + rc = -ENOMEM; + goto bad; + } + rc = next_entry(key, fp, len); + if (rc < 0) + goto bad; + key[len] = 0; + + rc = hashtab_insert(h, key, catdatum); + if (rc) + goto bad; +out: + return rc; + +bad: + cat_destroy(key, catdatum, NULL); + goto out; +} + +static int (*read_f[SYM_NUM]) (struct policydb *p, struct hashtab *h, void *fp) = +{ + common_read, + class_read, + role_read, + type_read, + user_read, + cond_read_bool, + sens_read, + cat_read, +}; + +extern int ss_initialized; + +/* + * Read the configuration data from a policy database binary + * representation file into a policy database structure. + */ +int policydb_read(struct policydb *p, void *fp) +{ + struct role_allow *ra, *lra; + struct role_trans *tr, *ltr; + struct ocontext *l, *c /*, *newc*/; + int i, j, rc; + __le32 buf[8]; + u32 len, /*len2,*/ config, nprim, nel /*, nel2*/; + char *policydb_str; + struct policydb_compat_info *info; + struct range_trans *rt, *lrt; + + config = 0; + rc = policydb_init(p); + if (rc) + goto out; + + /* Read the magic number and string length. */ + rc = next_entry(buf, fp, sizeof(u32)* 2); + if (rc < 0) + goto bad; + + if (le32_to_cpu(buf[0]) != POLICYDB_MAGIC) { + printk(KERN_ERR "security: policydb magic number 0x%x does " + "not match expected magic number 0x%x\n", + le32_to_cpu(buf[0]), POLICYDB_MAGIC); + goto bad; + } + + len = le32_to_cpu(buf[1]); + if (len != strlen(POLICYDB_STRING)) { + printk(KERN_ERR "security: policydb string length %d does not " + "match expected length %Zu\n", + len, (u32) strlen(POLICYDB_STRING)); + goto bad; + } + policydb_str = xmalloc_array(char, len + 1); + if (!policydb_str) { + printk(KERN_ERR "security: unable to allocate memory for policydb " + "string of length %d\n", len); + rc = -ENOMEM; + goto bad; + } + rc = next_entry(policydb_str, fp, len); + if (rc < 0) { + printk(KERN_ERR "security: truncated policydb string identifier\n"); + xfree(policydb_str); + goto bad; + } + policydb_str[len] = 0; + if (strcmp(policydb_str, POLICYDB_STRING)) { + printk(KERN_ERR "security: policydb string %s does not match " + "my string %s\n", policydb_str, POLICYDB_STRING); + xfree(policydb_str); + goto bad; + } + /* Done with policydb_str. */ + xfree(policydb_str); + policydb_str = NULL; + + /* Read the version, config, and table sizes. */ + rc = next_entry(buf, fp, sizeof(u32)*4); + if (rc < 0) + goto bad; + + p->policyvers = le32_to_cpu(buf[0]); + if (p->policyvers < POLICYDB_VERSION_MIN || + p->policyvers > POLICYDB_VERSION_MAX) { + printk(KERN_ERR "security: policydb version %d does not match " + "my version range %d-%d\n", + le32_to_cpu(buf[0]), POLICYDB_VERSION_MIN, POLICYDB_VERSION_MAX); + goto bad; + } + + if ((le32_to_cpu(buf[1]) & POLICYDB_CONFIG_MLS)) { + if (ss_initialized && !flask_mls_enabled) { + printk(KERN_ERR "Cannot switch between non-MLS and MLS " + "policies\n"); + goto bad; + } + flask_mls_enabled = 1; + config |= POLICYDB_CONFIG_MLS; + + if (p->policyvers < POLICYDB_VERSION_MLS) { + printk(KERN_ERR "security policydb version %d (MLS) " + "not backwards compatible\n", p->policyvers); + goto bad; + } + } else { + if (ss_initialized && flask_mls_enabled) { + printk(KERN_ERR "Cannot switch between MLS and non-MLS " + "policies\n"); + goto bad; + } + } + + info = policydb_lookup_compat(p->policyvers); + if (!info) { + printk(KERN_ERR "security: unable to find policy compat info " + "for version %d\n", p->policyvers); + goto bad; + } + + if (le32_to_cpu(buf[2]) != info->sym_num || + le32_to_cpu(buf[3]) != info->ocon_num) { + printk(KERN_ERR "security: policydb table sizes (%d,%d) do " + "not match mine (%d,%d)\n", le32_to_cpu(buf[2]), + le32_to_cpu(buf[3]), + info->sym_num, info->ocon_num); + goto bad; + } + + for (i = 0; i < info->sym_num; i++) { + rc = next_entry(buf, fp, sizeof(u32)*2); + if (rc < 0) + goto bad; + nprim = le32_to_cpu(buf[0]); + nel = le32_to_cpu(buf[1]); + for (j = 0; j < nel; j++) { + rc = read_f[i](p, p->symtab[i].table, fp); + if (rc) + goto bad; + } + + p->symtab[i].nprim = nprim; + } + + rc = avtab_read(&p->te_avtab, fp, p->policyvers); + if (rc) + goto bad; + + if (p->policyvers >= POLICYDB_VERSION_BOOL) { + rc = cond_read_list(p, fp); + if (rc) + goto bad; + } + + rc = next_entry(buf, fp, sizeof(u32)); + if (rc < 0) + goto bad; + nel = le32_to_cpu(buf[0]); + ltr = NULL; + for (i = 0; i < nel; i++) { + tr = xmalloc(struct role_trans); + if (!tr) { + rc = -ENOMEM; + goto bad; + } + memset(tr, 0, sizeof(*tr)); + if (ltr) { + ltr->next = tr; + } else { + p->role_tr = tr; + } + rc = next_entry(buf, fp, sizeof(u32)*3); + if (rc < 0) + goto bad; + tr->role = le32_to_cpu(buf[0]); + tr->type = le32_to_cpu(buf[1]); + tr->new_role = le32_to_cpu(buf[2]); + ltr = tr; + } + + rc = next_entry(buf, fp, sizeof(u32)); + if (rc < 0) + goto bad; + nel = le32_to_cpu(buf[0]); + lra = NULL; + for (i = 0; i < nel; i++) { + ra = xmalloc(struct role_allow); + if (!ra) { + rc = -ENOMEM; + goto bad; + } + memset(ra, 0, sizeof(*ra)); + if (lra) { + lra->next = ra; + } else { + p->role_allow = ra; + } + rc = next_entry(buf, fp, sizeof(u32)*2); + if (rc < 0) + goto bad; + ra->role = le32_to_cpu(buf[0]); + ra->new_role = le32_to_cpu(buf[1]); + lra = ra; + } + + rc = policydb_index_classes(p); + if (rc) + goto bad; + + rc = policydb_index_others(p); + if (rc) + goto bad; + + for (i = 0; i < info->ocon_num; i++) { + rc = next_entry(buf, fp, sizeof(u32)); + if (rc < 0) + goto bad; + nel = le32_to_cpu(buf[0]); + l = NULL; + for (j = 0; j < nel; j++) { + c = xmalloc(struct ocontext); + if (!c) { + rc = -ENOMEM; + goto bad; + } + memset(c, 0, sizeof(*c)); + if (l) { + l->next = c; + } else { + p->ocontexts[i] = c; + } + l = c; + rc = -EINVAL; + switch (i) { + case OCON_ISID: + rc = next_entry(buf, fp, sizeof(u32)); + if (rc < 0) + goto bad; + c->sid[0] = le32_to_cpu(buf[0]); + rc = context_read_and_validate(&c->context[0], p, fp); + if (rc) + goto bad; + break; + } + } + } + + rc = next_entry(buf, fp, sizeof(u32)); + if (rc < 0) + goto bad; + nel = le32_to_cpu(buf[0]); +/* genfs_p = NULL; + rc = -EINVAL; + for (i = 0; i < nel; i++) { + rc = next_entry(buf, fp, sizeof(u32)); + if (rc < 0) + goto bad; + len = le32_to_cpu(buf[0]); + newgenfs = xmalloc(struct genfs); + if (!newgenfs) { + rc = -ENOMEM; + goto bad; + } + memset(newgenfs, 0, sizeof(*newgenfs)); + + newgenfs->fstype = xmalloc_array(char, len + 1); + if (!newgenfs->fstype) { + rc = -ENOMEM; + xfree(newgenfs); + goto bad; + } + rc = next_entry(newgenfs->fstype, fp, len); + if (rc < 0) { + xfree(newgenfs->fstype); + xfree(newgenfs); + goto bad; + } + newgenfs->fstype[len] = 0; + for (genfs_p = NULL, genfs = p->genfs; genfs; + genfs_p = genfs, genfs = genfs->next) { + if (strcmp(newgenfs->fstype, genfs->fstype) == 0) { + printk(KERN_ERR "security: dup genfs " + "fstype %s\n", newgenfs->fstype); + xfree(newgenfs->fstype); + xfree(newgenfs); + goto bad; + } + if (strcmp(newgenfs->fstype, genfs->fstype) < 0) + break; + } + newgenfs->next = genfs; + if (genfs_p) + genfs_p->next = newgenfs; + else + p->genfs = newgenfs; + rc = next_entry(buf, fp, sizeof(u32)); + if (rc < 0) + goto bad; + nel2 = le32_to_cpu(buf[0]); + for (j = 0; j < nel2; j++) { + rc = next_entry(buf, fp, sizeof(u32)); + if (rc < 0) + goto bad; + len = le32_to_cpu(buf[0]); + + newc = xmalloc(struct ocontext); + if (!newc) { + rc = -ENOMEM; + goto bad; + } + memset(newc, 0, sizeof(*newc)); + + newc->u.name = xmalloc_array(char, len + 1); + if (!newc->u.name) { + rc = -ENOMEM; + goto bad_newc; + } + rc = next_entry(newc->u.name, fp, len); + if (rc < 0) + goto bad_newc; + newc->u.name[len] = 0; + rc = next_entry(buf, fp, sizeof(u32)); + if (rc < 0) + goto bad_newc; + newc->v.sclass = le32_to_cpu(buf[0]); + if (context_read_and_validate(&newc->context[0], p, fp)) + goto bad_newc; + for (l = NULL, c = newgenfs->head; c; + l = c, c = c->next) { + if (!strcmp(newc->u.name, c->u.name) && + (!c->v.sclass || !newc->v.sclass || + newc->v.sclass == c->v.sclass)) { + printk(KERN_ERR "security: dup genfs " + "entry (%s,%s)\n", + newgenfs->fstype, c->u.name); + goto bad_newc; + } + len = strlen(newc->u.name); + len2 = strlen(c->u.name); + if (len > len2) + break; + } + + newc->next = c; + if (l) + l->next = newc; + else + newgenfs->head = newc; + } + } +*/ + if (p->policyvers >= POLICYDB_VERSION_MLS) { + rc = next_entry(buf, fp, sizeof(u32)); + if (rc < 0) + goto bad; + nel = le32_to_cpu(buf[0]); + lrt = NULL; + for (i = 0; i < nel; i++) { + rt = xmalloc(struct range_trans); + if (!rt) { + rc = -ENOMEM; + goto bad; + } + memset(rt, 0, sizeof(*rt)); + if (lrt) + lrt->next = rt; + else + p->range_tr = rt; + rc = next_entry(buf, fp, (sizeof(u32) * 2)); + if (rc < 0) + goto bad; + rt->dom = le32_to_cpu(buf[0]); + rt->type = le32_to_cpu(buf[1]); + rc = mls_read_range_helper(&rt->range, fp); + if (rc) + goto bad; + lrt = rt; + } + } + + p->type_attr_map = xmalloc_array(struct ebitmap, p->p_types.nprim); + if (!p->type_attr_map) + goto bad; + + for (i = 0; i < p->p_types.nprim; i++) { + ebitmap_init(&p->type_attr_map[i]); + if (p->policyvers >= POLICYDB_VERSION_AVTAB) { + if (ebitmap_read(&p->type_attr_map[i], fp)) + goto bad; + } + /* add the type itself as the degenerate case */ + if (ebitmap_set_bit(&p->type_attr_map[i], i, 1)) + goto bad; + } + + rc = 0; +out: + return rc; +/*bad_newc: + ocontext_destroy(newc,OCON_FSUSE);*/ +bad: + if (!rc) + rc = -EINVAL; + policydb_destroy(p); + goto out; +} diff -r 65bfdf932c7e -r 12923ba929c8 xen/flask/ss/policydb.h --- /dev/null Thu Jan 1 00:00:00 1970 +0000 +++ b/xen/flask/ss/policydb.h Sat Dec 16 11:54:25 2006 -0500 @@ -0,0 +1,257 @@ +/* + * A policy database (policydb) specifies the + * configuration data for the security policy. + * + * Author : Stephen Smalley, + */ + +/* + * Updated: Trusted Computer Solutions, Inc. + * + * Support for enhanced MLS infrastructure. + * + * Updated: Frank Mayer and Karl MacMillan + * + * Added conditional policy language extensions + * + * Copyright (C) 2004-2005 Trusted Computer Solutions, Inc. + * Copyright (C) 2003 - 2004 Tresys Technology, LLC + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, version 2. + */ + +/* Ported to Xen 3.0, George Coker, */ + +#ifndef _SS_POLICYDB_H_ +#define _SS_POLICYDB_H_ + +#include "symtab.h" +#include "avtab.h" +#include "sidtab.h" +#include "context.h" +#include "constraint.h" + +/* + * A datum type is defined for each kind of symbol + * in the configuration data: individual permissions, + * common prefixes for access vectors, classes, + * users, roles, types, sensitivities, categories, etc. + */ + +/* Permission attributes */ +struct perm_datum { + u32 value; /* permission bit + 1 */ +}; + +/* Attributes of a common prefix for access vectors */ +struct common_datum { + u32 value; /* internal common value */ + struct symtab permissions; /* common permissions */ +}; + +/* Class attributes */ +struct class_datum { + u32 value; /* class value */ + char *comkey; /* common name */ + struct common_datum *comdatum; /* common datum */ + struct symtab permissions; /* class-specific permission symbol table */ + struct constraint_node *constraints; /* constraints on class permissions */ + struct constraint_node *validatetrans; /* special transition rules */ +}; + +/* Role attributes */ +struct role_datum { + u32 value; /* internal role value */ + struct ebitmap dominates; /* set of roles dominated by this role */ + struct ebitmap types; /* set of authorized types for role */ +}; + +struct role_trans { + u32 role; /* current role */ + u32 type; /* program executable type */ + u32 new_role; /* new role */ + struct role_trans *next; +}; + +struct role_allow { + u32 role; /* current role */ + u32 new_role; /* new role */ + struct role_allow *next; +}; + +/* Type attributes */ +struct type_datum { + u32 value; /* internal type value */ + unsigned char primary; /* primary name? */ +}; + +/* User attributes */ +struct user_datum { + u32 value; /* internal user value */ + struct ebitmap roles; /* set of authorized roles for user */ + struct mls_range range; /* MLS range (min - max) for user */ + struct mls_level dfltlevel; /* default login MLS level for user */ +}; + + +/* Sensitivity attributes */ +struct level_datum { + struct mls_level *level; /* sensitivity and associated categories */ + unsigned char isalias; /* is this sensitivity an alias for another? */ +}; + +/* Category attributes */ +struct cat_datum { + u32 value; /* internal category bit + 1 */ + unsigned char isalias; /* is this category an alias for another? */ +}; + +struct range_trans { + u32 dom; /* current process domain */ + u32 type; /* program executable type */ + struct mls_range range; /* new range */ + struct range_trans *next; +}; + +/* Boolean data type */ +struct cond_bool_datum { + __u32 value; /* internal type value */ + int state; +}; + +struct cond_node; + +/* + * The configuration data includes security contexts for + * initial SIDs, unlabeled file systems, TCP and UDP port numbers, + * network interfaces, and nodes. This structure stores the + * relevant data for one such entry. Entries of the same kind + * (e.g. all initial SIDs) are linked together into a list. + */ +struct ocontext { + union { + char *name; /* name of initial SID, fs, netif, fstype, path */ + int pirq; + int virq; + int vcpu; + u32 ioport; + unsigned long iomem; + } u; + struct context context[2]; /* security context(s) */ + u32 sid[2]; /* SID(s) */ + struct ocontext *next; +}; + +/* symbol table array indices */ +#define SYM_COMMONS 0 +#define SYM_CLASSES 1 +#define SYM_ROLES 2 +#define SYM_TYPES 3 +#define SYM_USERS 4 +#define SYM_BOOLS 5 +#define SYM_LEVELS 6 +#define SYM_CATS 7 +#define SYM_NUM 8 + +/* object context array indices */ +#define OCON_ISID 0 /* initial SIDs */ +#define OCON_PIRQ 1 /* physical irqs */ +#define OCON_VIRQ 2 /* virtual irqs */ +#define OCON_VCPU 3 /* virtual cpus */ +#define OCON_IOPORT 4 /* io ports */ +#define OCON_IOMEM 5 /* io memory */ +#define OCON_DUMMY 6 +#define OCON_NUM 7 + +/* The policy database */ +struct policydb { + /* symbol tables */ + struct symtab symtab[SYM_NUM]; +#define p_commons symtab[SYM_COMMONS] +#define p_classes symtab[SYM_CLASSES] +#define p_roles symtab[SYM_ROLES] +#define p_types symtab[SYM_TYPES] +#define p_users symtab[SYM_USERS] +#define p_bools symtab[SYM_BOOLS] +#define p_levels symtab[SYM_LEVELS] +#define p_cats symtab[SYM_CATS] + + /* symbol names indexed by (value - 1) */ + char **sym_val_to_name[SYM_NUM]; +#define p_common_val_to_name sym_val_to_name[SYM_COMMONS] +#define p_class_val_to_name sym_val_to_name[SYM_CLASSES] +#define p_role_val_to_name sym_val_to_name[SYM_ROLES] +#define p_type_val_to_name sym_val_to_name[SYM_TYPES] +#define p_user_val_to_name sym_val_to_name[SYM_USERS] +#define p_bool_val_to_name sym_val_to_name[SYM_BOOLS] +#define p_sens_val_to_name sym_val_to_name[SYM_LEVELS] +#define p_cat_val_to_name sym_val_to_name[SYM_CATS] + + /* class, role, and user attributes indexed by (value - 1) */ + struct class_datum **class_val_to_struct; + struct role_datum **role_val_to_struct; + struct user_datum **user_val_to_struct; + + /* type enforcement access vectors and transitions */ + struct avtab te_avtab; + + /* role transitions */ + struct role_trans *role_tr; + + /* bools indexed by (value - 1) */ + struct cond_bool_datum **bool_val_to_struct; + /* type enforcement conditional access vectors and transitions */ + struct avtab te_cond_avtab; + /* linked list indexing te_cond_avtab by conditional */ + struct cond_node* cond_list; + + /* role allows */ + struct role_allow *role_allow; + + /* security contexts of initial SIDs, unlabeled file systems, + TCP or UDP port numbers, network interfaces and nodes */ + struct ocontext *ocontexts[OCON_NUM]; + + /* range transitions */ + struct range_trans *range_tr; + + /* type -> attribute reverse mapping */ + struct ebitmap *type_attr_map; + + unsigned int policyvers; +}; + +extern void policydb_destroy(struct policydb *p); +extern int policydb_load_isids(struct policydb *p, struct sidtab *s); +extern int policydb_context_isvalid(struct policydb *p, struct context *c); +extern int policydb_read(struct policydb *p, void *fp); + +#define PERM_SYMTAB_SIZE 32 + +#define POLICYDB_CONFIG_MLS 1 + +#define OBJECT_R "object_r" +#define OBJECT_R_VAL 1 + +#define POLICYDB_MAGIC SELINUX_MAGIC +#define POLICYDB_STRING "SE Linux" + +struct policy_file { + char *data; + size_t len; +}; + +static inline int next_entry(void *buf, struct policy_file *fp, size_t bytes) +{ + if (bytes > fp->len) + return -EINVAL; + + memcpy(buf, fp->data, bytes); + fp->data += bytes; + fp->len -= bytes; + return 0; +} + +#endif /* _SS_POLICYDB_H_ */ + diff -r 65bfdf932c7e -r 12923ba929c8 xen/flask/ss/services.c --- /dev/null Thu Jan 1 00:00:00 1970 +0000 +++ b/xen/flask/ss/services.c Sat Dec 16 11:54:25 2006 -0500 @@ -0,0 +1,1669 @@ +/* + * Implementation of the security services. + * + * Authors : Stephen Smalley, + * James Morris + * + * Updated: Trusted Computer Solutions, Inc. + * + * Support for enhanced MLS infrastructure. + * + * Updated: Frank Mayer and Karl MacMillan + * + * Added conditional policy language extensions + * + * Copyright (C) 2004-2005 Trusted Computer Solutions, Inc. + * Copyright (C) 2003 - 2004 Tresys Technology, LLC + * Copyright (C) 2003 Red Hat, Inc., James Morris + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, version 2. + */ + +/* Ported to Xen 3.0, George Coker, */ + +#include +#include +#include +#include +#include +#include "flask.h" +#include "avc.h" +#include "avc_ss.h" +#include "security.h" +#include "context.h" +#include "policydb.h" +#include "sidtab.h" +#include "services.h" +#include "conditional.h" +#include "mls.h" + +unsigned int policydb_loaded_version; + +static DEFINE_RWLOCK(policy_rwlock); +#define POLICY_RDLOCK read_lock(&policy_rwlock) +#define POLICY_WRLOCK write_lock_irq(&policy_rwlock) +#define POLICY_RDUNLOCK read_unlock(&policy_rwlock) +#define POLICY_WRUNLOCK write_unlock_irq(&policy_rwlock) + +static DEFINE_SPINLOCK(load_sem); +#define LOAD_LOCK spin_lock(&load_sem) +#define LOAD_UNLOCK spin_unlock(&load_sem) + +static struct sidtab sidtab; +struct policydb policydb; +int ss_initialized = 0; + +/* + * The largest sequence number that has been used when + * providing an access decision to the access vector cache. + * The sequence number only changes when a policy change + * occurs. + */ +static u32 latest_granting = 0; + +/* Forward declaration. */ +static int context_struct_to_string(struct context *context, char **scontext, + u32 *scontext_len); + +/* + * Return the boolean value of a constraint expression + * when it is applied to the specified source and target + * security contexts. + * + * xcontext is a special beast... It is used by the validatetrans rules + * only. For these rules, scontext is the context before the transition, + * tcontext is the context after the transition, and xcontext is the context + * of the process performing the transition. All other callers of + * constraint_expr_eval should pass in NULL for xcontext. + */ +static int constraint_expr_eval(struct context *scontext, + struct context *tcontext, + struct context *xcontext, + struct constraint_expr *cexpr) +{ + u32 val1, val2; + struct context *c; + struct role_datum *r1, *r2; + struct mls_level *l1, *l2; + struct constraint_expr *e; + int s[CEXPR_MAXDEPTH]; + int sp = -1; + + for (e = cexpr; e; e = e->next) { + switch (e->expr_type) { + case CEXPR_NOT: + BUG_ON(sp < 0); + s[sp] = !s[sp]; + break; + case CEXPR_AND: + BUG_ON(sp < 1); + sp--; + s[sp] &= s[sp+1]; + break; + case CEXPR_OR: + BUG_ON(sp < 1); + sp--; + s[sp] |= s[sp+1]; + break; + case CEXPR_ATTR: + if (sp == (CEXPR_MAXDEPTH-1)) + return 0; + switch (e->attr) { + case CEXPR_USER: + val1 = scontext->user; + val2 = tcontext->user; + break; + case CEXPR_TYPE: + val1 = scontext->type; + val2 = tcontext->type; + break; + case CEXPR_ROLE: + val1 = scontext->role; + val2 = tcontext->role; + r1 = policydb.role_val_to_struct[val1 - 1]; + r2 = policydb.role_val_to_struct[val2 - 1]; + switch (e->op) { + case CEXPR_DOM: + s[++sp] = ebitmap_get_bit(&r1->dominates, + val2 - 1); + continue; + case CEXPR_DOMBY: + s[++sp] = ebitmap_get_bit(&r2->dominates, + val1 - 1); + continue; + case CEXPR_INCOMP: + s[++sp] = ( !ebitmap_get_bit(&r1->dominates, + val2 - 1) && + !ebitmap_get_bit(&r2->dominates, + val1 - 1) ); + continue; + default: + break; + } + break; + case CEXPR_L1L2: + l1 = &(scontext->range.level[0]); + l2 = &(tcontext->range.level[0]); + goto mls_ops; + case CEXPR_L1H2: + l1 = &(scontext->range.level[0]); + l2 = &(tcontext->range.level[1]); + goto mls_ops; + case CEXPR_H1L2: + l1 = &(scontext->range.level[1]); + l2 = &(tcontext->range.level[0]); + goto mls_ops; + case CEXPR_H1H2: + l1 = &(scontext->range.level[1]); + l2 = &(tcontext->range.level[1]); + goto mls_ops; + case CEXPR_L1H1: + l1 = &(scontext->range.level[0]); + l2 = &(scontext->range.level[1]); + goto mls_ops; + case CEXPR_L2H2: + l1 = &(tcontext->range.level[0]); + l2 = &(tcontext->range.level[1]); + goto mls_ops; +mls_ops: + switch (e->op) { + case CEXPR_EQ: + s[++sp] = mls_level_eq(l1, l2); + continue; + case CEXPR_NEQ: + s[++sp] = !mls_level_eq(l1, l2); + continue; + case CEXPR_DOM: + s[++sp] = mls_level_dom(l1, l2); + continue; + case CEXPR_DOMBY: + s[++sp] = mls_level_dom(l2, l1); + continue; + case CEXPR_INCOMP: + s[++sp] = mls_level_incomp(l2, l1); + continue; + default: + BUG(); + return 0; + } + break; + default: + BUG(); + return 0; + } + + switch (e->op) { + case CEXPR_EQ: + s[++sp] = (val1 == val2); + break; + case CEXPR_NEQ: + s[++sp] = (val1 != val2); + break; + default: + BUG(); + return 0; + } + break; + case CEXPR_NAMES: + if (sp == (CEXPR_MAXDEPTH-1)) + return 0; + c = scontext; + if (e->attr & CEXPR_TARGET) + c = tcontext; + else if (e->attr & CEXPR_XTARGET) { + c = xcontext; + if (!c) { + BUG(); + return 0; + } + } + if (e->attr & CEXPR_USER) + val1 = c->user; + else if (e->attr & CEXPR_ROLE) + val1 = c->role; + else if (e->attr & CEXPR_TYPE) + val1 = c->type; + else { + BUG(); + return 0; + } + + switch (e->op) { + case CEXPR_EQ: + s[++sp] = ebitmap_get_bit(&e->names, val1 - 1); + break; + case CEXPR_NEQ: + s[++sp] = !ebitmap_get_bit(&e->names, val1 - 1); + break; + default: + BUG(); + return 0; + } + break; + default: + BUG(); + return 0; + } + } + + BUG_ON(sp != 0); + return s[0]; +} + +/* + * Compute access vectors based on a context structure pair for + * the permissions in a particular class. + */ +static int context_struct_compute_av(struct context *scontext, + struct context *tcontext, + u16 tclass, + u32 requested, + struct av_decision *avd) +{ + struct constraint_node *constraint; + struct role_allow *ra; + struct avtab_key avkey; + struct avtab_node *node; + struct class_datum *tclass_datum; + struct ebitmap *sattr, *tattr; + struct ebitmap_node *snode, *tnode; + unsigned int i, j; + + if (!tclass || tclass > policydb.p_classes.nprim) { + printk(KERN_ERR "security_compute_av: unrecognized class %d\n", + tclass); + return -EINVAL; + } + tclass_datum = policydb.class_val_to_struct[tclass - 1]; + + /* + * Initialize the access vectors to the default values. + */ + avd->allowed = 0; + avd->decided = 0xffffffff; + avd->auditallow = 0; + avd->auditdeny = 0xffffffff; + avd->seqno = latest_granting; + + /* + * If a specific type enforcement rule was defined for + * this permission check, then use it. + */ + avkey.target_class = tclass; + avkey.specified = AVTAB_AV; + sattr = &policydb.type_attr_map[scontext->type - 1]; + tattr = &policydb.type_attr_map[tcontext->type - 1]; + ebitmap_for_each_bit(sattr, snode, i) { + if (!ebitmap_node_get_bit(snode, i)) + continue; + ebitmap_for_each_bit(tattr, tnode, j) { + if (!ebitmap_node_get_bit(tnode, j)) + continue; + avkey.source_type = i + 1; + avkey.target_type = j + 1; + for (node = avtab_search_node(&policydb.te_avtab, &avkey); + node != NULL; + node = avtab_search_node_next(node, avkey.specified)) { + if (node->key.specified == AVTAB_ALLOWED) + avd->allowed |= node->datum.data; + else if (node->key.specified == AVTAB_AUDITALLOW) + avd->auditallow |= node->datum.data; + else if (node->key.specified == AVTAB_AUDITDENY) + avd->auditdeny &= node->datum.data; + } + + /* Check conditional av table for additional permissions */ + cond_compute_av(&policydb.te_cond_avtab, &avkey, avd); + + } + } + + /* + * Remove any permissions prohibited by a constraint (this includes + * the MLS policy). + */ + constraint = tclass_datum->constraints; + while (constraint) { + if ((constraint->permissions & (avd->allowed)) && + !constraint_expr_eval(scontext, tcontext, NULL, + constraint->expr)) { + avd->allowed = (avd->allowed) & ~(constraint->permissions); + } + constraint = constraint->next; + } + + /* + * If checking process transition permission and the + * role is changing, then check the (current_role, new_role) + * pair. + */ + if (tclass == SECCLASS_DOMAIN && +/* removed until future dynamic domain capability + (avd->allowed & (DOMAIN__TRANSITION | DOMAIN__DYNTRANSITION)) && +*/ + scontext->role != tcontext->role) { + for (ra = policydb.role_allow; ra; ra = ra->next) { + if (scontext->role == ra->role && + tcontext->role == ra->new_role) + break; + } +/* removed until future dynamic domain capability + if (!ra) + avd->allowed = (avd->allowed) & ~(DOMAIN__TRANSITION | + DOMAIN__DYNTRANSITION); +*/ + } + + return 0; +} + +static int security_validtrans_handle_fail(struct context *ocontext, + struct context *ncontext, + struct context *tcontext, + u16 tclass) +{ + char *o = NULL, *n = NULL, *t = NULL; + u32 olen, nlen, tlen; + + if (context_struct_to_string(ocontext, &o, &olen) < 0) + goto out; + if (context_struct_to_string(ncontext, &n, &nlen) < 0) + goto out; + if (context_struct_to_string(tcontext, &t, &tlen) < 0) + goto out; + printk("security_validate_transition: denied for" + " oldcontext=%s newcontext=%s taskcontext=%s tclass=%s", + o, n, t, policydb.p_class_val_to_name[tclass-1]); +out: + xfree(o); + xfree(n); + xfree(t); + + if (!flask_enforcing) + return 0; + return -EPERM; +} + +int security_validate_transition(u32 oldsid, u32 newsid, u32 tasksid, + u16 tclass) +{ + struct context *ocontext; + struct context *ncontext; + struct context *tcontext; + struct class_datum *tclass_datum; + struct constraint_node *constraint; + int rc = 0; + + if (!ss_initialized) + return 0; + + POLICY_RDLOCK; + + if (!tclass || tclass > policydb.p_classes.nprim) { + printk(KERN_ERR "security_validate_transition: " + "unrecognized class %d\n", tclass); + rc = -EINVAL; + goto out; + } + tclass_datum = policydb.class_val_to_struct[tclass - 1]; + + ocontext = sidtab_search(&sidtab, oldsid); + if (!ocontext) { + printk(KERN_ERR "security_validate_transition: " + " unrecognized SID %d\n", oldsid); + rc = -EINVAL; + goto out; + } + + ncontext = sidtab_search(&sidtab, newsid); + if (!ncontext) { + printk(KERN_ERR "security_validate_transition: " + " unrecognized SID %d\n", newsid); + rc = -EINVAL; + goto out; + } + + tcontext = sidtab_search(&sidtab, tasksid); + if (!tcontext) { + printk(KERN_ERR "security_validate_transition: " + " unrecognized SID %d\n", tasksid); + rc = -EINVAL; + goto out; + } + + constraint = tclass_datum->validatetrans; + while (constraint) { + if (!constraint_expr_eval(ocontext, ncontext, tcontext, + constraint->expr)) { + rc = security_validtrans_handle_fail(ocontext, ncontext, + tcontext, tclass); + goto out; + } + constraint = constraint->next; + } + +out: + POLICY_RDUNLOCK; + return rc; +} + +/** + * security_compute_av - Compute access vector decisions. + * @ssid: source security identifier + * @tsid: target security identifier + * @tclass: target security class + * @requested: requested permissions + * @avd: access vector decisions + * + * Compute a set of access vector decisions based on the + * SID pair (@ssid, @tsid) for the permissions in @tclass. + * Return -%EINVAL if any of the parameters are invalid or %0 + * if the access vector decisions were computed successfully. + */ +int security_compute_av(u32 ssid, + u32 tsid, + u16 tclass, + u32 requested, + struct av_decision *avd) +{ + struct context *scontext = NULL, *tcontext = NULL; + int rc = 0; + + if (!ss_initialized) { + avd->allowed = 0xffffffff; + avd->decided = 0xffffffff; + avd->auditallow = 0; + avd->auditdeny = 0xffffffff; + avd->seqno = latest_granting; + return 0; + } + + POLICY_RDLOCK; + + scontext = sidtab_search(&sidtab, ssid); + if (!scontext) { + printk("security_compute_av: unrecognized SID %d\n", + ssid); + rc = -EINVAL; + goto out; + } + tcontext = sidtab_search(&sidtab, tsid); + if (!tcontext) { + printk("security_compute_av: unrecognized SID %d\n", + tsid); + rc = -EINVAL; + goto out; + } + + rc = context_struct_compute_av(scontext, tcontext, tclass, + requested, avd); +out: + POLICY_RDUNLOCK; + return rc; +} + +/* + * Write the security context string representation of + * the context structure `context' into a dynamically + * allocated string of the correct size. Set `*scontext' + * to point to this string and set `*scontext_len' to + * the length of the string. + */ +static int context_struct_to_string(struct context *context, char **scontext, u32 *scontext_len) +{ + char *scontextp; + + *scontext = NULL; + *scontext_len = 0; + + /* Compute the size of the context. */ + *scontext_len += strlen(policydb.p_user_val_to_name[context->user - 1]) + 1; + *scontext_len += strlen(policydb.p_role_val_to_name[context->role - 1]) + 1; + *scontext_len += strlen(policydb.p_type_val_to_name[context->type - 1]) + 1; + *scontext_len += mls_compute_context_len(context); + + /* Allocate space for the context; caller must free this space. */ + scontextp = xmalloc_array(char, *scontext_len); + if (!scontextp) { + return -ENOMEM; + } + *scontext = scontextp; + + /* + * Copy the user name, role name and type name into the context. + */ + sprintf(scontextp, "%s:%s:%s", + policydb.p_user_val_to_name[context->user - 1], + policydb.p_role_val_to_name[context->role - 1], + policydb.p_type_val_to_name[context->type - 1]); + scontextp += strlen(policydb.p_user_val_to_name[context->user - 1]) + + 1 + strlen(policydb.p_role_val_to_name[context->role - 1]) + + 1 + strlen(policydb.p_type_val_to_name[context->type - 1]); + + mls_sid_to_context(context, &scontextp); + + *scontextp = 0; + + return 0; +} + +#include "initial_sid_to_string.h" + +/** + * security_sid_to_context - Obtain a context for a given SID. + * @sid: security identifier, SID + * @scontext: security context + * @scontext_len: length in bytes + * + * Write the string representation of the context associated with @sid + * into a dynamically allocated string of the correct size. Set @scontext + * to point to this string and set @scontext_len to the length of the string. + */ +int security_sid_to_context(u32 sid, char **scontext, u32 *scontext_len) +{ + struct context *context; + int rc = 0; + + if (!ss_initialized) { + if (sid <= SECINITSID_NUM) { + char *scontextp; + + *scontext_len = strlen(initial_sid_to_string[sid]) + 1; + scontextp = xmalloc_array(char, *scontext_len); + strcpy(scontextp, initial_sid_to_string[sid]); + *scontext = scontextp; + goto out; + } + printk(KERN_ERR "security_sid_to_context: called before initial " + "load_policy on unknown SID %d\n", sid); + rc = -EINVAL; + goto out; + } + POLICY_RDLOCK; + context = sidtab_search(&sidtab, sid); + if (!context) { + printk(KERN_ERR "security_sid_to_context: unrecognized SID " + "%d\n", sid); + rc = -EINVAL; + goto out_unlock; + } + rc = context_struct_to_string(context, scontext, scontext_len); +out_unlock: + POLICY_RDUNLOCK; +out: + return rc; + +} + +static int security_context_to_sid_core(char *scontext, u32 scontext_len, u32 *sid, u32 def_sid) +{ + char *scontext2; + struct context context; + struct role_datum *role; + struct type_datum *typdatum; + struct user_datum *usrdatum; + char *scontextp, *p, oldc; + int rc = 0; + + if (!ss_initialized) { + int i; + + for (i = 1; i < SECINITSID_NUM; i++) { + if (!strcmp(initial_sid_to_string[i], scontext)) { + *sid = i; + goto out; + } + } + *sid = SECINITSID_XEN; + goto out; + } + *sid = SECSID_NULL; + + /* Copy the string so that we can modify the copy as we parse it. + The string should already by null terminated, but we append a + null suffix to the copy to avoid problems with the existing + attr package, which doesn't view the null terminator as part + of the attribute value. */ + scontext2 = xmalloc_array(char, scontext_len+1); + if (!scontext2) { + rc = -ENOMEM; + goto out; + } + memcpy(scontext2, scontext, scontext_len); + scontext2[scontext_len] = 0; + + context_init(&context); + *sid = SECSID_NULL; + + POLICY_RDLOCK; + + /* Parse the security context. */ + + rc = -EINVAL; + scontextp = (char *) scontext2; + + /* Extract the user. */ + p = scontextp; + while (*p && *p != ':') + p++; + + if (*p == 0) + goto out_unlock; + + *p++ = 0; + + usrdatum = hashtab_search(policydb.p_users.table, scontextp); + if (!usrdatum) + goto out_unlock; + + context.user = usrdatum->value; + + /* Extract role. */ + scontextp = p; + while (*p && *p != ':') + p++; + + if (*p == 0) + goto out_unlock; + + *p++ = 0; + + role = hashtab_search(policydb.p_roles.table, scontextp); + if (!role) + goto out_unlock; + context.role = role->value; + + /* Extract type. */ + scontextp = p; + while (*p && *p != ':') + p++; + oldc = *p; + *p++ = 0; + + typdatum = hashtab_search(policydb.p_types.table, scontextp); + if (!typdatum) + goto out_unlock; + + context.type = typdatum->value; + + rc = mls_context_to_sid(oldc, &p, &context, &sidtab, def_sid); + if (rc) + goto out_unlock; + + if ((p - scontext2) < scontext_len) { + rc = -EINVAL; + goto out_unlock; + } + + /* Check the validity of the new context. */ + if (!policydb_context_isvalid(&policydb, &context)) { + rc = -EINVAL; + goto out_unlock; + } + /* Obtain the new sid. */ + rc = sidtab_context_to_sid(&sidtab, &context, sid); +out_unlock: + POLICY_RDUNLOCK; + context_destroy(&context); + xfree(scontext2); +out: + return rc; +} + +/** + * security_context_to_sid - Obtain a SID for a given security context. + * @scontext: security context + * @scontext_len: length in bytes + * @sid: security identifier, SID + * + * Obtains a SID associated with the security context that + * has the string representation specified by @scontext. + * Returns -%EINVAL if the context is invalid, -%ENOMEM if insufficient + * memory is available, or 0 on success. + */ +int security_context_to_sid(char *scontext, u32 scontext_len, u32 *sid) +{ + return security_context_to_sid_core(scontext, scontext_len, + sid, SECSID_NULL); +} + +/** + * security_context_to_sid_default - Obtain a SID for a given security context, + * falling back to specified default if needed. + * + * @scontext: security context + * @scontext_len: length in bytes + * @sid: security identifier, SID + * @def_sid: default SID to assign on errror + * + * Obtains a SID associated with the security context that + * has the string representation specified by @scontext. + * The default SID is passed to the MLS layer to be used to allow + * kernel labeling of the MLS field if the MLS field is not present + * (for upgrading to MLS without full relabel). + * Returns -%EINVAL if the context is invalid, -%ENOMEM if insufficient + * memory is available, or 0 on success. + */ +int security_context_to_sid_default(char *scontext, u32 scontext_len, u32 *sid, u32 def_sid) +{ + return security_context_to_sid_core(scontext, scontext_len, + sid, def_sid); +} + +static int compute_sid_handle_invalid_context( + struct context *scontext, + struct context *tcontext, + u16 tclass, + struct context *newcontext) +{ + char *s = NULL, *t = NULL, *n = NULL; + u32 slen, tlen, nlen; + + if (context_struct_to_string(scontext, &s, &slen) < 0) + goto out; + if (context_struct_to_string(tcontext, &t, &tlen) < 0) + goto out; + if (context_struct_to_string(newcontext, &n, &nlen) < 0) + goto out; + printk("security_compute_sid: invalid context %s" + " for scontext=%s" + " tcontext=%s" + " tclass=%s", + n, s, t, policydb.p_class_val_to_name[tclass-1]); +out: + xfree(s); + xfree(t); + xfree(n); + if (!flask_enforcing) + return 0; + return -EACCES; +} + +static int security_compute_sid(u32 ssid, + u32 tsid, + u16 tclass, + u32 specified, + u32 *out_sid) +{ + struct context *scontext = NULL, *tcontext = NULL, newcontext; + struct role_trans *roletr = NULL; + struct avtab_key avkey; + struct avtab_datum *avdatum; + struct avtab_node *node; + int rc = 0; + + if (!ss_initialized) { + switch (tclass) { + case SECCLASS_DOMAIN: + *out_sid = ssid; + break; + default: + *out_sid = tsid; + break; + } + goto out; + } + + POLICY_RDLOCK; + + scontext = sidtab_search(&sidtab, ssid); + if (!scontext) { + printk(KERN_ERR "security_compute_sid: unrecognized SID %d\n", + ssid); + rc = -EINVAL; + goto out_unlock; + } + tcontext = sidtab_search(&sidtab, tsid); + if (!tcontext) { + printk(KERN_ERR "security_compute_sid: unrecognized SID %d\n", + tsid); + rc = -EINVAL; + goto out_unlock; + } + + context_init(&newcontext); + + /* Set the user identity. */ + switch (specified) { + case AVTAB_TRANSITION: + case AVTAB_CHANGE: + /* Use the process user identity. */ + newcontext.user = scontext->user; + break; + case AVTAB_MEMBER: + /* Use the related object owner. */ + newcontext.user = tcontext->user; + break; + } + + /* Set the role and type to default values. */ + switch (tclass) { + case SECCLASS_DOMAIN: + /* Use the current role and type of process. */ + newcontext.role = scontext->role; + newcontext.type = scontext->type; + break; + default: + /* Use the well-defined object role. */ + newcontext.role = OBJECT_R_VAL; + /* Use the type of the related object. */ + newcontext.type = tcontext->type; + } + + /* Look for a type transition/member/change rule. */ + avkey.source_type = scontext->type; + avkey.target_type = tcontext->type; + avkey.target_class = tclass; + avkey.specified = specified; + avdatum = avtab_search(&policydb.te_avtab, &avkey); + + /* If no permanent rule, also check for enabled conditional rules */ + if(!avdatum) { + node = avtab_search_node(&policydb.te_cond_avtab, &avkey); + for (; node != NULL; node = avtab_search_node_next(node, specified)) { + if (node->key.specified & AVTAB_ENABLED) { + avdatum = &node->datum; + break; + } + } + } + + if (avdatum) { + /* Use the type from the type transition/member/change rule. */ + newcontext.type = avdatum->data; + } + + /* Check for class-specific changes. */ + switch (tclass) { + case SECCLASS_DOMAIN: + if (specified & AVTAB_TRANSITION) { + /* Look for a role transition rule. */ + for (roletr = policydb.role_tr; roletr; + roletr = roletr->next) { + if (roletr->role == scontext->role && + roletr->type == tcontext->type) { + /* Use the role transition rule. */ + newcontext.role = roletr->new_role; + break; + } + } + } + break; + default: + break; + } + + /* Set the MLS attributes. + This is done last because it may allocate memory. */ + rc = mls_compute_sid(scontext, tcontext, tclass, specified, &newcontext); + if (rc) + goto out_unlock; + + /* Check the validity of the context. */ + if (!policydb_context_isvalid(&policydb, &newcontext)) { + rc = compute_sid_handle_invalid_context(scontext, + tcontext, + tclass, + &newcontext); + if (rc) + goto out_unlock; + } + /* Obtain the sid for the context. */ + rc = sidtab_context_to_sid(&sidtab, &newcontext, out_sid); +out_unlock: + POLICY_RDUNLOCK; + context_destroy(&newcontext); +out: + return rc; +} + +/** + * security_transition_sid - Compute the SID for a new subject/object. + * @ssid: source security identifier + * @tsid: target security identifier + * @tclass: target security class + * @out_sid: security identifier for new subject/object + * + * Compute a SID to use for labeling a new subject or object in the + * class @tclass based on a SID pair (@ssid, @tsid). + * Return -%EINVAL if any of the parameters are invalid, -%ENOMEM + * if insufficient memory is available, or %0 if the new SID was + * computed successfully. + */ +int security_transition_sid(u32 ssid, + u32 tsid, + u16 tclass, + u32 *out_sid) +{ + return security_compute_sid(ssid, tsid, tclass, AVTAB_TRANSITION, out_sid); +} + +/** + * security_member_sid - Compute the SID for member selection. + * @ssid: source security identifier + * @tsid: target security identifier + * @tclass: target security class + * @out_sid: security identifier for selected member + * + * Compute a SID to use when selecting a member of a polyinstantiated + * object of class @tclass based on a SID pair (@ssid, @tsid). + * Return -%EINVAL if any of the parameters are invalid, -%ENOMEM + * if insufficient memory is available, or %0 if the SID was + * computed successfully. + */ +int security_member_sid(u32 ssid, + u32 tsid, + u16 tclass, + u32 *out_sid) +{ + return security_compute_sid(ssid, tsid, tclass, AVTAB_MEMBER, out_sid); +} + +/** + * security_change_sid - Compute the SID for object relabeling. + * @ssid: source security identifier + * @tsid: target security identifier + * @tclass: target security class + * @out_sid: security identifier for selected member + * + * Compute a SID to use for relabeling an object of class @tclass + * based on a SID pair (@ssid, @tsid). + * Return -%EINVAL if any of the parameters are invalid, -%ENOMEM + * if insufficient memory is available, or %0 if the SID was + * computed successfully. + */ +int security_change_sid(u32 ssid, + u32 tsid, + u16 tclass, + u32 *out_sid) +{ + return security_compute_sid(ssid, tsid, tclass, AVTAB_CHANGE, out_sid); +} + +/* + * Verify that each permission that is defined under the + * existing policy is still defined with the same value + * in the new policy. + */ +static int validate_perm(void *key, void *datum, void *p) +{ + struct hashtab *h; + struct perm_datum *perdatum, *perdatum2; + int rc = 0; + + + h = p; + perdatum = datum; + + perdatum2 = hashtab_search(h, key); + if (!perdatum2) { + printk(KERN_ERR "security: permission %s disappeared", + (char *)key); + rc = -ENOENT; + goto out; + } + if (perdatum->value != perdatum2->value) { + printk(KERN_ERR "security: the value of permission %s changed", + (char *)key); + rc = -EINVAL; + } +out: + return rc; +} + +/* + * Verify that each class that is defined under the + * existing policy is still defined with the same + * attributes in the new policy. + */ +static int validate_class(void *key, void *datum, void *p) +{ + struct policydb *newp; + struct class_datum *cladatum, *cladatum2; + int rc; + + newp = p; + cladatum = datum; + + cladatum2 = hashtab_search(newp->p_classes.table, key); + if (!cladatum2) { + printk(KERN_ERR "security: class %s disappeared\n", + (char *)key); + rc = -ENOENT; + goto out; + } + if (cladatum->value != cladatum2->value) { + printk(KERN_ERR "security: the value of class %s changed\n", + (char *)key); + rc = -EINVAL; + goto out; + } + if ((cladatum->comdatum && !cladatum2->comdatum) || + (!cladatum->comdatum && cladatum2->comdatum)) { + printk(KERN_ERR "security: the inherits clause for the access " + "vector definition for class %s changed\n", (char *)key); + rc = -EINVAL; + goto out; + } + if (cladatum->comdatum) { + rc = hashtab_map(cladatum->comdatum->permissions.table, validate_perm, + cladatum2->comdatum->permissions.table); + if (rc) { + printk(" in the access vector definition for class " + "%s\n", (char *)key); + goto out; + } + } + rc = hashtab_map(cladatum->permissions.table, validate_perm, + cladatum2->permissions.table); + if (rc) + printk(" in access vector definition for class %s\n", + (char *)key); +out: + return rc; +} + +/* Clone the SID into the new SID table. */ +static int clone_sid(u32 sid, + struct context *context, + void *arg) +{ + struct sidtab *s = arg; + + return sidtab_insert(s, sid, context); +} + +static inline int convert_context_handle_invalid_context(struct context *context) +{ + int rc = 0; + + if (flask_enforcing) { + rc = -EINVAL; + } else { + char *s; + u32 len; + + context_struct_to_string(context, &s, &len); + printk(KERN_ERR "security: context %s is invalid\n", s); + xfree(s); + } + return rc; +} + +struct convert_context_args { + struct policydb *oldp; + struct policydb *newp; +}; + +/* + * Convert the values in the security context + * structure `c' from the values specified + * in the policy `p->oldp' to the values specified + * in the policy `p->newp'. Verify that the + * context is valid under the new policy. + */ +static int convert_context(u32 key, + struct context *c, + void *p) +{ + struct convert_context_args *args; + struct context oldc; + struct role_datum *role; + struct type_datum *typdatum; + struct user_datum *usrdatum; + char *s; + u32 len; + int rc; + + args = p; + + rc = context_cpy(&oldc, c); + if (rc) + goto out; + + rc = -EINVAL; + + /* Convert the user. */ + usrdatum = hashtab_search(args->newp->p_users.table, + args->oldp->p_user_val_to_name[c->user - 1]); + if (!usrdatum) { + goto bad; + } + c->user = usrdatum->value; + + /* Convert the role. */ + role = hashtab_search(args->newp->p_roles.table, + args->oldp->p_role_val_to_name[c->role - 1]); + if (!role) { + goto bad; + } + c->role = role->value; + + /* Convert the type. */ + typdatum = hashtab_search(args->newp->p_types.table, + args->oldp->p_type_val_to_name[c->type - 1]); + if (!typdatum) { + goto bad; + } + c->type = typdatum->value; + + rc = mls_convert_context(args->oldp, args->newp, c); + if (rc) + goto bad; + + /* Check the validity of the new context. */ + if (!policydb_context_isvalid(args->newp, c)) { + rc = convert_context_handle_invalid_context(&oldc); + if (rc) + goto bad; + } + + context_destroy(&oldc); +out: + return rc; +bad: + context_struct_to_string(&oldc, &s, &len); + context_destroy(&oldc); + printk(KERN_ERR "security: invalidating context %s\n", s); + xfree(s); + goto out; +} + +extern void flask_complete_init(void); + +/** + * security_load_policy - Load a security policy configuration. + * @data: binary policy data + * @len: length of data in bytes + * + * Load a new set of security policy configuration data, + * validate it and convert the SID table as necessary. + * This function will flush the access vector cache after + * loading the new policy. + */ +int security_load_policy(void *data, size_t len) +{ + struct policydb oldpolicydb, newpolicydb; + struct sidtab oldsidtab, newsidtab; + struct convert_context_args args; + u32 seqno; + int rc = 0; + struct policy_file file = { data, len }, *fp = &file; + + LOAD_LOCK; + + if (!ss_initialized) { + if (policydb_read(&policydb, fp)) { + LOAD_UNLOCK; + return -EINVAL; + } + if (policydb_load_isids(&policydb, &sidtab)) { + LOAD_UNLOCK; + policydb_destroy(&policydb); + return -EINVAL; + } + policydb_loaded_version = policydb.policyvers; + ss_initialized = 1; + seqno = ++latest_granting; + LOAD_UNLOCK; + avc_ss_reset(seqno); + return 0; + } + +#if 0 + sidtab_hash_eval(&sidtab, "sids"); +#endif + + if (policydb_read(&newpolicydb, fp)) { + LOAD_UNLOCK; + return -EINVAL; + } + + sidtab_init(&newsidtab); + + /* Verify that the existing classes did not change. */ + if (hashtab_map(policydb.p_classes.table, validate_class, &newpolicydb)) { + printk(KERN_ERR "security: the definition of an existing " + "class changed\n"); + rc = -EINVAL; + goto err; + } + + /* Clone the SID table. */ + sidtab_shutdown(&sidtab); + if (sidtab_map(&sidtab, clone_sid, &newsidtab)) { + rc = -ENOMEM; + goto err; + } + + /* Convert the internal representations of contexts + in the new SID table and remove invalid SIDs. */ + args.oldp = &policydb; + args.newp = &newpolicydb; + sidtab_map_remove_on_error(&newsidtab, convert_context, &args); + + /* Save the old policydb and SID table to free later. */ + memcpy(&oldpolicydb, &policydb, sizeof policydb); + sidtab_set(&oldsidtab, &sidtab); + + /* Install the new policydb and SID table. */ + POLICY_WRLOCK; + memcpy(&policydb, &newpolicydb, sizeof policydb); + sidtab_set(&sidtab, &newsidtab); + seqno = ++latest_granting; + policydb_loaded_version = policydb.policyvers; + POLICY_WRUNLOCK; + LOAD_UNLOCK; + + /* Free the old policydb and SID table. */ + policydb_destroy(&oldpolicydb); + sidtab_destroy(&oldsidtab); + + avc_ss_reset(seqno); + + return 0; + +err: + LOAD_UNLOCK; + sidtab_destroy(&newsidtab); + policydb_destroy(&newpolicydb); + return rc; + +} + +/** + * security_pirq_sid - Obtain the SID for a physical irq. + * @pirq: physical irq + * @out_sid: security identifier + */ +int security_pirq_sid(int pirq, u32 *out_sid) +{ + int rc = 0; + struct ocontext *c; + + POLICY_RDLOCK; + + c = policydb.ocontexts[OCON_PIRQ]; + + while (c) { + if (c->u.pirq == pirq) + break; + c = c->next; + } + + if (c) { + if (!c->sid[0]) { + rc = sidtab_context_to_sid(&sidtab, &c->context[0], &c->sid[0]); + if (rc) + goto out; + } + *out_sid = c->sid[0]; + } else { + *out_sid = SECINITSID_PIRQ; + } + +out: + POLICY_RDUNLOCK; + return rc; +} + +/** + * security_port_sid - Obtain the SID for a vcpu. + * @vcpu: vcpu id + * @out_sid: security identifier + */ +int security_vcpu_sid(int vcpu, u32 *out_sid) +{ + struct ocontext *c; + int rc = 0; + + POLICY_RDLOCK; + + c = policydb.ocontexts[OCON_VCPU]; + while (c) { + if (c->u.vcpu == vcpu) + break; + c = c->next; + } + + if (c) { + if (!c->sid[0]) { + rc = sidtab_context_to_sid(&sidtab, &c->context[0], &c->sid[0]); + if (rc) + goto out; + } + *out_sid = c->sid[0]; + } else { + *out_sid = SECINITSID_VCPU; + } + +out: + POLICY_RDUNLOCK; + return rc; +} + +/** + * security_virq_sid - Obtain the SID for a vcpu. + * @virq: virq + * @out_sid: security identifier + */ +int security_virq_sid(int virq, u32 *out_sid) +{ + struct ocontext *c; + int rc = 0; + + POLICY_RDLOCK; + + c = policydb.ocontexts[OCON_VIRQ]; + while (c) { + if (c->u.virq == virq) + break; + c = c->next; + } + + if (c) { + if (!c->sid[0]) { + rc = sidtab_context_to_sid(&sidtab, &c->context[0], &c->sid[0]); + if (rc) + goto out; + } + *out_sid = c->sid[0]; + } else { + *out_sid = SECINITSID_VIRQ; + } + +out: + POLICY_RDUNLOCK; + return rc; +} + +/** + * security_iomem_sid - Obtain the SID for a vcpu. + * @mfn: iomem mfn + * @out_sid: security identifier + */ +int security_iomem_sid(unsigned long mfn, u32 *out_sid) +{ + struct ocontext *c; + int rc = 0; + + POLICY_RDLOCK; + + c = policydb.ocontexts[OCON_IOMEM]; + while (c) { + if (c->u.iomem == mfn) + break; + c = c->next; + } + + if (c) { + if (!c->sid[0]) { + rc = sidtab_context_to_sid(&sidtab, &c->context[0], &c->sid[0]); + if (rc) + goto out; + } + *out_sid = c->sid[0]; + } else { + *out_sid = SECINITSID_IOMEM; + } + +out: + POLICY_RDUNLOCK; + return rc; +} + +/** + * security_ioport_sid - Obtain the SID for a vcpu. + * @ioport: ioport + * @out_sid: security identifier + */ +int security_ioport_sid(u32 ioport, u32 *out_sid) +{ + struct ocontext *c; + int rc = 0; + + POLICY_RDLOCK; + + c = policydb.ocontexts[OCON_IOPORT]; + while (c) { + if (c->u.ioport == ioport) + break; + c = c->next; + } + + if (c) { + if (!c->sid[0]) { + rc = sidtab_context_to_sid(&sidtab, &c->context[0], &c->sid[0]); + if (rc) + goto out; + } + *out_sid = c->sid[0]; + } else { + *out_sid = SECINITSID_IOPORT; + } + +out: + POLICY_RDUNLOCK; + return rc; +} + +#define SIDS_NEL 25 + +/** + * security_get_user_sids - Obtain reachable SIDs for a user. + * @fromsid: starting SID + * @username: username + * @sids: array of reachable SIDs for user + * @nel: number of elements in @sids + * + * Generate the set of SIDs for legal security contexts + * for a given user that can be reached by @fromsid. + * Set *@sids to point to a dynamically allocated + * array containing the set of SIDs. Set *@nel to the + * number of elements in the array. + */ + +int security_get_user_sids(u32 fromsid, + char *username, + u32 **sids, + u32 *nel) +{ + struct context *fromcon, usercon; + u32 *mysids, *mysids2, sid; + u32 mynel = 0, maxnel = SIDS_NEL; + struct user_datum *user; + struct role_datum *role; + struct av_decision avd; + struct ebitmap_node *rnode, *tnode; + int rc = 0, i, j; + + if (!ss_initialized) { + *sids = NULL; + *nel = 0; + goto out; + } + + POLICY_RDLOCK; + + fromcon = sidtab_search(&sidtab, fromsid); + if (!fromcon) { + rc = -EINVAL; + goto out_unlock; + } + + user = hashtab_search(policydb.p_users.table, username); + if (!user) { + rc = -EINVAL; + goto out_unlock; + } + usercon.user = user->value; + + mysids = xmalloc_array(u32, maxnel); + if (!mysids) { + rc = -ENOMEM; + goto out_unlock; + } + memset(mysids, 0, maxnel*sizeof(*mysids)); + + ebitmap_for_each_bit(&user->roles, rnode, i) { + if (!ebitmap_node_get_bit(rnode, i)) + continue; + role = policydb.role_val_to_struct[i]; + usercon.role = i+1; + ebitmap_for_each_bit(&role->types, tnode, j) { + if (!ebitmap_node_get_bit(tnode, j)) + continue; + usercon.type = j+1; + + if (mls_setup_user_range(fromcon, user, &usercon)) + continue; + + rc = context_struct_compute_av(fromcon, &usercon, + SECCLASS_DOMAIN, + DOMAIN__TRANSITION, + &avd); + if (rc || !(avd.allowed & DOMAIN__TRANSITION)) + continue; + rc = sidtab_context_to_sid(&sidtab, &usercon, &sid); + if (rc) { + xfree(mysids); + goto out_unlock; + } + if (mynel < maxnel) { + mysids[mynel++] = sid; + } else { + maxnel += SIDS_NEL; + mysids2 = xmalloc_array(u32, maxnel); + if (!mysids2) { + rc = -ENOMEM; + xfree(mysids); + goto out_unlock; + } + memset(mysids2, 0, maxnel*sizeof(*mysids2)); + memcpy(mysids2, mysids, mynel * sizeof(*mysids2)); + xfree(mysids); + mysids = mysids2; + mysids[mynel++] = sid; + } + } + } + + *sids = mysids; + *nel = mynel; + +out_unlock: + POLICY_RDUNLOCK; +out: + return rc; +} + +int security_get_bools(int *len, char ***names, int **values) +{ + int i, rc = -ENOMEM; + + POLICY_RDLOCK; + *names = NULL; + *values = NULL; + + *len = policydb.p_bools.nprim; + if (!*len) { + rc = 0; + goto out; + } + + *names = (char**)xmalloc_array(char*, *len); + if (!*names) + goto err; + memset(*names, 0, sizeof(char*) * *len); + + *values = (int*)xmalloc_array(int, *len); + if (!*values) + goto err; + + for (i = 0; i < *len; i++) { + size_t name_len; + (*values)[i] = policydb.bool_val_to_struct[i]->state; + name_len = strlen(policydb.p_bool_val_to_name[i]) + 1; + (*names)[i] = (char*)xmalloc_array(char, name_len); + if (!(*names)[i]) + goto err; + strncpy((*names)[i], policydb.p_bool_val_to_name[i], name_len); + (*names)[i][name_len - 1] = 0; + } + rc = 0; +out: + POLICY_RDUNLOCK; + return rc; +err: + if (*names) { + for (i = 0; i < *len; i++) + xfree((*names)[i]); + } + xfree(*values); + goto out; +} + + +int security_set_bools(int len, int *values) +{ + int i, rc = 0; + int lenp, seqno = 0; + struct cond_node *cur; + + POLICY_WRLOCK; + + lenp = policydb.p_bools.nprim; + if (len != lenp) { + rc = -EFAULT; + goto out; + } + + printk(KERN_INFO "security: committed booleans { "); + for (i = 0; i < len; i++) { + if (values[i]) { + policydb.bool_val_to_struct[i]->state = 1; + } else { + policydb.bool_val_to_struct[i]->state = 0; + } + if (i != 0) + printk(", "); + printk("%s:%d", policydb.p_bool_val_to_name[i], + policydb.bool_val_to_struct[i]->state); + } + printk(" }\n"); + + for (cur = policydb.cond_list; cur != NULL; cur = cur->next) { + rc = evaluate_cond_node(&policydb, cur); + if (rc) + goto out; + } + + seqno = ++latest_granting; + +out: + POLICY_WRUNLOCK; + if (!rc) { + avc_ss_reset(seqno); + } + return rc; +} + +int security_get_bool_value(int bool) +{ + int rc = 0; + int len; + + POLICY_RDLOCK; + + len = policydb.p_bools.nprim; + if (bool >= len) { + rc = -EFAULT; + goto out; + } + + rc = policydb.bool_val_to_struct[bool]->state; +out: + POLICY_RDUNLOCK; + return rc; +} diff -r 65bfdf932c7e -r 12923ba929c8 xen/flask/ss/services.h --- /dev/null Thu Jan 1 00:00:00 1970 +0000 +++ b/xen/flask/ss/services.h Sat Dec 16 11:54:25 2006 -0500 @@ -0,0 +1,15 @@ +/* + * Implementation of the security services. + * + * Author : Stephen Smalley, + */ +#ifndef _SS_SERVICES_H_ +#define _SS_SERVICES_H_ + +#include "policydb.h" +#include "sidtab.h" + +extern struct policydb policydb; + +#endif /* _SS_SERVICES_H_ */ + diff -r 65bfdf932c7e -r 12923ba929c8 xen/flask/ss/sidtab.c --- /dev/null Thu Jan 1 00:00:00 1970 +0000 +++ b/xen/flask/ss/sidtab.c Sat Dec 16 11:54:25 2006 -0500 @@ -0,0 +1,307 @@ +/* + * Implementation of the SID table type. + * + * Author : Stephen Smalley, + */ + +/* Ported to Xen 3.0, George Coker, */ + +#include +#include +#include +#include +#include "flask.h" +#include "security.h" +#include "sidtab.h" + +#define SIDTAB_HASH(sid) \ +(sid & SIDTAB_HASH_MASK) + +#define INIT_SIDTAB_LOCK(s) spin_lock_init(&s->lock) +#define SIDTAB_LOCK(s, x) spin_lock_irqsave(&s->lock, x) +#define SIDTAB_UNLOCK(s, x) spin_unlock_irqrestore(&s->lock, x) + +int sidtab_init(struct sidtab *s) +{ + int i; + + s->htable = (void *)xmalloc_array(struct sidtab_node, SIDTAB_SIZE); + if (!s->htable) + return -ENOMEM; + for (i = 0; i < SIDTAB_SIZE; i++) + s->htable[i] = NULL; + s->nel = 0; + s->next_sid = 1; + s->shutdown = 0; + INIT_SIDTAB_LOCK(s); + return 0; +} + +int sidtab_insert(struct sidtab *s, u32 sid, struct context *context) +{ + int hvalue, rc = 0; + struct sidtab_node *prev, *cur, *newnode; + + if (!s) { + rc = -ENOMEM; + goto out; + } + + hvalue = SIDTAB_HASH(sid); + prev = NULL; + cur = s->htable[hvalue]; + while (cur != NULL && sid > cur->sid) { + prev = cur; + cur = cur->next; + } + + if (cur && sid == cur->sid) { + rc = -EEXIST; + goto out; + } + + newnode = xmalloc(struct sidtab_node); + if (newnode == NULL) { + rc = -ENOMEM; + goto out; + } + newnode->sid = sid; + if (context_cpy(&newnode->context, context)) { + xfree(newnode); + rc = -ENOMEM; + goto out; + } + + if (prev) { + newnode->next = prev->next; + wmb(); + prev->next = newnode; + } else { + newnode->next = s->htable[hvalue]; + wmb(); + s->htable[hvalue] = newnode; + } + + s->nel++; + if (sid >= s->next_sid) + s->next_sid = sid + 1; +out: + return rc; +} + +struct context *sidtab_search(struct sidtab *s, u32 sid) +{ + int hvalue; + struct sidtab_node *cur; + + if (!s) + return NULL; + + hvalue = SIDTAB_HASH(sid); + cur = s->htable[hvalue]; + while (cur != NULL && sid > cur->sid) + cur = cur->next; + + if (cur == NULL || sid != cur->sid) { + /* Remap invalid SIDs to the unlabeled SID. */ + sid = SECINITSID_UNLABELED; + hvalue = SIDTAB_HASH(sid); + cur = s->htable[hvalue]; + while (cur != NULL && sid > cur->sid) + cur = cur->next; + if (!cur || sid != cur->sid) + return NULL; + } + + return &cur->context; +} + +int sidtab_map(struct sidtab *s, + int (*apply) (u32 sid, + struct context *context, + void *args), + void *args) +{ + int i, rc = 0; + struct sidtab_node *cur; + + if (!s) + goto out; + + for (i = 0; i < SIDTAB_SIZE; i++) { + cur = s->htable[i]; + while (cur != NULL) { + rc = apply(cur->sid, &cur->context, args); + if (rc) + goto out; + cur = cur->next; + } + } +out: + return rc; +} + +void sidtab_map_remove_on_error(struct sidtab *s, + int (*apply) (u32 sid, + struct context *context, + void *args), + void *args) +{ + int i, ret; + struct sidtab_node *last, *cur, *temp; + + if (!s) + return; + + for (i = 0; i < SIDTAB_SIZE; i++) { + last = NULL; + cur = s->htable[i]; + while (cur != NULL) { + ret = apply(cur->sid, &cur->context, args); + if (ret) { + if (last) { + last->next = cur->next; + } else { + s->htable[i] = cur->next; + } + + temp = cur; + cur = cur->next; + context_destroy(&temp->context); + xfree(temp); + s->nel--; + } else { + last = cur; + cur = cur->next; + } + } + } + + return; +} + +static inline u32 sidtab_search_context(struct sidtab *s, + struct context *context) +{ + int i; + struct sidtab_node *cur; + + for (i = 0; i < SIDTAB_SIZE; i++) { + cur = s->htable[i]; + while (cur != NULL) { + if (context_cmp(&cur->context, context)) + return cur->sid; + cur = cur->next; + } + } + return 0; +} + +int sidtab_context_to_sid(struct sidtab *s, + struct context *context, + u32 *out_sid) +{ + u32 sid; + int ret = 0; + unsigned long flags; + + *out_sid = SECSID_NULL; + + sid = sidtab_search_context(s, context); + if (!sid) { + SIDTAB_LOCK(s, flags); + /* Rescan now that we hold the lock. */ + sid = sidtab_search_context(s, context); + if (sid) + goto unlock_out; + /* No SID exists for the context. Allocate a new one. */ + if (s->next_sid == UINT_MAX || s->shutdown) { + ret = -ENOMEM; + goto unlock_out; + } + sid = s->next_sid++; + ret = sidtab_insert(s, sid, context); + if (ret) + s->next_sid--; +unlock_out: + SIDTAB_UNLOCK(s, flags); + } + + if (ret) + return ret; + + *out_sid = sid; + return 0; +} + +void sidtab_hash_eval(struct sidtab *h, char *tag) +{ + int i, chain_len, slots_used, max_chain_len; + struct sidtab_node *cur; + + slots_used = 0; + max_chain_len = 0; + for (i = 0; i < SIDTAB_SIZE; i++) { + cur = h->htable[i]; + if (cur) { + slots_used++; + chain_len = 0; + while (cur) { + chain_len++; + cur = cur->next; + } + + if (chain_len > max_chain_len) + max_chain_len = chain_len; + } + } + + printk(KERN_INFO "%s: %d entries and %d/%d buckets used, longest " + "chain length %d\n", tag, h->nel, slots_used, SIDTAB_SIZE, + max_chain_len); +} + +void sidtab_destroy(struct sidtab *s) +{ + int i; + struct sidtab_node *cur, *temp; + + if (!s) + return; + + for (i = 0; i < SIDTAB_SIZE; i++) { + cur = s->htable[i]; + while (cur != NULL) { + temp = cur; + cur = cur->next; + context_destroy(&temp->context); + xfree(temp); + } + s->htable[i] = NULL; + } + xfree(s->htable); + s->htable = NULL; + s->nel = 0; + s->next_sid = 1; +} + +void sidtab_set(struct sidtab *dst, struct sidtab *src) +{ + unsigned long flags; + + SIDTAB_LOCK(src, flags); + dst->htable = src->htable; + dst->nel = src->nel; + dst->next_sid = src->next_sid; + dst->shutdown = 0; + SIDTAB_UNLOCK(src, flags); +} + +void sidtab_shutdown(struct sidtab *s) +{ + unsigned long flags; + + SIDTAB_LOCK(s, flags); + s->shutdown = 1; + SIDTAB_UNLOCK(s, flags); +} diff -r 65bfdf932c7e -r 12923ba929c8 xen/flask/ss/sidtab.h --- /dev/null Thu Jan 1 00:00:00 1970 +0000 +++ b/xen/flask/ss/sidtab.h Sat Dec 16 11:54:25 2006 -0500 @@ -0,0 +1,63 @@ +/* + * A security identifier table (sidtab) is a hash table + * of security context structures indexed by SID value. + * + * Author : Stephen Smalley, + */ + +/* Ported to Xen 3.0, George Coker, */ + +#ifndef _SS_SIDTAB_H_ +#define _SS_SIDTAB_H_ + +#include "context.h" +#include + +struct sidtab_node { + u32 sid; /* security identifier */ + struct context context; /* security context structure */ + struct sidtab_node *next; +}; + +#define SIDTAB_HASH_BITS 7 +#define SIDTAB_HASH_BUCKETS (1 << SIDTAB_HASH_BITS) +#define SIDTAB_HASH_MASK (SIDTAB_HASH_BUCKETS-1) + +#define SIDTAB_SIZE SIDTAB_HASH_BUCKETS + +struct sidtab { + struct sidtab_node **htable; + unsigned int nel; /* number of elements */ + unsigned int next_sid; /* next SID to allocate */ + unsigned char shutdown; + spinlock_t lock; +}; + +int sidtab_init(struct sidtab *s); +int sidtab_insert(struct sidtab *s, u32 sid, struct context *context); +struct context *sidtab_search(struct sidtab *s, u32 sid); + +int sidtab_map(struct sidtab *s, + int (*apply) (u32 sid, + struct context *context, + void *args), + void *args); + +void sidtab_map_remove_on_error(struct sidtab *s, + int (*apply) (u32 sid, + struct context *context, + void *args), + void *args); + +int sidtab_context_to_sid(struct sidtab *s, + struct context *context, + u32 *sid); + +void sidtab_hash_eval(struct sidtab *h, char *tag); +void sidtab_destroy(struct sidtab *s); +void sidtab_set(struct sidtab *dst, struct sidtab *src); +void sidtab_shutdown(struct sidtab *s); + +#endif /* _SS_SIDTAB_H_ */ + + diff -r 65bfdf932c7e -r 12923ba929c8 xen/flask/ss/symtab.c --- /dev/null Thu Jan 1 00:00:00 1970 +0000 +++ b/xen/flask/ss/symtab.c Sat Dec 16 11:54:25 2006 -0500 @@ -0,0 +1,47 @@ +/* + * Implementation of the symbol table type. + * + * Author : Stephen Smalley, + */ + +/* Ported to Xen 3.0, George Coker, */ + +#include +#include +#include +#include +#include "symtab.h" + +static unsigned int symhash(struct hashtab *h, void *key) +{ + char *p, *keyp; + unsigned int size; + unsigned int val; + + val = 0; + keyp = key; + size = strlen(keyp); + for (p = keyp; (p - keyp) < size; p++) + val = (val << 4 | (val >> (8*sizeof(unsigned int)-4))) ^ (*p); + return val & (h->size - 1); +} + +static int symcmp(struct hashtab *h, void *key1, void *key2) +{ + char *keyp1, *keyp2; + + keyp1 = key1; + keyp2 = key2; + return strcmp(keyp1, keyp2); +} + + +int symtab_init(struct symtab *s, unsigned int size) +{ + s->table = hashtab_create(symhash, symcmp, size); + if (!s->table) + return -1; + s->nprim = 0; + return 0; +} + diff -r 65bfdf932c7e -r 12923ba929c8 xen/flask/ss/symtab.h --- /dev/null Thu Jan 1 00:00:00 1970 +0000 +++ b/xen/flask/ss/symtab.h Sat Dec 16 11:54:25 2006 -0500 @@ -0,0 +1,23 @@ +/* + * A symbol table (symtab) maintains associations between symbol + * strings and datum values. The type of the datum values + * is arbitrary. The symbol table type is implemented + * using the hash table type (hashtab). + * + * Author : Stephen Smalley, + */ +#ifndef _SS_SYMTAB_H_ +#define _SS_SYMTAB_H_ + +#include "hashtab.h" + +struct symtab { + struct hashtab *table; /* hash table (keyed on a string) */ + u32 nprim; /* number of primary names in table */ +}; + +int symtab_init(struct symtab *s, unsigned int size); + +#endif /* _SS_SYMTAB_H_ */ + + diff -r 65bfdf932c7e -r 12923ba929c8 xen/flask/ss/util_endian.h --- /dev/null Thu Jan 1 00:00:00 1970 +0000 +++ b/xen/flask/ss/util_endian.h Sat Dec 16 11:54:25 2006 -0500 @@ -0,0 +1,19 @@ +/* Ported to Xen 3.0, George Coker, */ + +#ifndef _SS_EXTRAS_H_ +#define _SS_EXTRAS_H_ + +typedef __u16 __le16; +typedef __u32 __le32; +typedef __u64 __le64; + + +#define le16_to_cpu __le16_to_cpu +#define le32_to_cpu __le32_to_cpu +#define le64_to_cpu __le64_to_cpu + +#define __le16_to_cpu(x) ((__le16)(x)) +#define __le32_to_cpu(x) ((__le32)(x)) +#define __le64_to_cpu(x) ((__le64)(x)) + +#endif /* _SS_EXTRAS_H_ */ diff -r 65bfdf932c7e -r 12923ba929c8 xen/flask/strutil.c --- /dev/null Thu Jan 1 00:00:00 1970 +0000 +++ b/xen/flask/strutil.c Sat Dec 16 11:54:25 2006 -0500 @@ -0,0 +1,267 @@ +/* derived from.... */ + +/* + * linux/lib/vsprintf.c + * + * Copyright (C) 1991, 1992 Linus Torvalds + */ + +/* vsprintf.c -- Lars Wirzenius & Linus Torvalds. */ +/* + * Wirzenius wrote this portably, Torvalds fucked it up :-) + */ + +/* + * Fri Jul 13 2001 Crutcher Dunnavant + * - changed to provide snprintf and vsnprintf functions + * So Feb 1 16:51:32 CET 2004 Juergen Quade + * - scnprintf and vscnprintf + */ + + +#include +#include +#include +#include + +static int skip_atoi(const char **s) +{ + int i=0; + + while (isdigit(**s)) + i = i*10 + *((*s)++) - '0'; + return i; +} + +/** + * vsscanf - Unformat a buffer into a list of arguments + * @buf: input buffer + * @fmt: format of buffer + * @args: arguments + */ +int vsscanf(const char * buf, const char * fmt, va_list args) +{ + const char *str = buf; + char *next; + char digit; + int num = 0; + int qualifier; + int base; + int field_width; + int is_sign = 0; + + while(*fmt && *str) { + /* skip any white space in format */ + /* white space in format matchs any amount of + * white space, including none, in the input. + */ + if (isspace(*fmt)) { + while (isspace(*fmt)) + ++fmt; + while (isspace(*str)) + ++str; + } + + /* anything that is not a conversion must match exactly */ + if (*fmt != '%' && *fmt) { + if (*fmt++ != *str++) + break; + continue; + } + + if (!*fmt) + break; + ++fmt; + + /* skip this conversion. + * advance both strings to next white space + */ + if (*fmt == '*') { + while (!isspace(*fmt) && *fmt) + fmt++; + while (!isspace(*str) && *str) + str++; + continue; + } + + /* get field width */ + field_width = -1; + if (isdigit(*fmt)) + field_width = skip_atoi(&fmt); + + /* get conversion qualifier */ + qualifier = -1; + if (*fmt == 'h' || *fmt == 'l' || *fmt == 'L' || + *fmt == 'Z' || *fmt == 'z') { + qualifier = *fmt++; + if (unlikely(qualifier == *fmt)) { + if (qualifier == 'h') { + qualifier = 'H'; + fmt++; + } else if (qualifier == 'l') { + qualifier = 'L'; + fmt++; + } + } + } + base = 10; + is_sign = 0; + + if (!*fmt || !*str) + break; + + switch(*fmt++) { + case 'c': + { + char *s = (char *) va_arg(args,char*); + if (field_width == -1) + field_width = 1; + do { + *s++ = *str++; + } while (--field_width > 0 && *str); + num++; + } + continue; + case 's': + { + char *s = (char *) va_arg(args, char *); + if(field_width == -1) + field_width = INT_MAX; + /* first, skip leading white space in buffer */ + while (isspace(*str)) + str++; + + /* now copy until next white space */ + while (*str && !isspace(*str) && field_width--) { + *s++ = *str++; + } + *s = '\0'; + num++; + } + continue; + case 'n': + /* return number of characters read so far */ + { + int *i = (int *)va_arg(args,int*); + *i = str - buf; + } + continue; + case 'o': + base = 8; + break; + case 'x': + case 'X': + base = 16; + break; + case 'i': + base = 0; + case 'd': + is_sign = 1; + case 'u': + break; + case '%': + /* looking for '%' in str */ + if (*str++ != '%') + return num; + continue; + default: + /* invalid format; stop here */ + return num; + } + + /* have some sort of integer conversion. + * first, skip white space in buffer. + */ + while (isspace(*str)) + str++; + + digit = *str; + if (is_sign && digit == '-') + digit = *(str + 1); + + if (!digit + || (base == 16 && !isxdigit(digit)) + || (base == 10 && !isdigit(digit)) + || (base == 8 && (!isdigit(digit) || digit > '7')) + || (base == 0 && !isdigit(digit))) + break; + + switch(qualifier) { + case 'H': /* that's 'hh' in format */ + if (is_sign) { + signed char *s = (signed char *) va_arg(args,signed char *); + *s = (signed char) simple_strtol(str,&next,base); + } else { + unsigned char *s = (unsigned char *) va_arg(args, unsigned char *); + *s = (unsigned char) simple_strtoul(str, &next, base); + } + break; + case 'h': + if (is_sign) { + short *s = (short *) va_arg(args,short *); + *s = (short) simple_strtol(str,&next,base); + } else { + unsigned short *s = (unsigned short *) va_arg(args, unsigned short *); + *s = (unsigned short) simple_strtoul(str, &next, base); + } + break; + case 'l': + if (is_sign) { + long *l = (long *) va_arg(args,long *); + *l = simple_strtol(str,&next,base); + } else { + unsigned long *l = (unsigned long*) va_arg(args,unsigned long*); + *l = simple_strtoul(str,&next,base); + } + break; + case 'L': + if (is_sign) { + long long *l = (long long*) va_arg(args,long long *); + *l = simple_strtoll(str,&next,base); + } else { + unsigned long long *l = (unsigned long long*) va_arg(args,unsigned long long*); + *l = simple_strtoull(str,&next,base); + } + break; + case 'Z': + case 'z': + { + size_t *s = (size_t*) va_arg(args,size_t*); + *s = (size_t) simple_strtoul(str,&next,base); + } + break; + default: + if (is_sign) { + int *i = (int *) va_arg(args, int*); + *i = (int) simple_strtol(str,&next,base); + } else { + unsigned int *i = (unsigned int*) va_arg(args, unsigned int*); + *i = (unsigned int) simple_strtoul(str,&next,base); + } + break; + } + num++; + + if (!next) + break; + str = next; + } + return num; +} + +/** + * sscanf - Unformat a buffer into a list of arguments + * @buf: input buffer + * @fmt: formatting of buffer + * @...: resulting arguments + */ +int sscanf(const char * buf, const char * fmt, ...) +{ + va_list args; + int i; + + va_start(args,fmt); + i = vsscanf(buf,fmt,args); + va_end(args); + return i; +} diff -r 65bfdf932c7e -r 12923ba929c8 xen/flask/strutil.h --- /dev/null Thu Jan 1 00:00:00 1970 +0000 +++ b/xen/flask/strutil.h Sat Dec 16 11:54:25 2006 -0500 @@ -0,0 +1,3 @@ + +int vsscanf(const char *, const char *); +int sscanf(const char *, const char *, ...);