lkml.org 
[lkml]   [2007]   [Dec]   [20]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
Date
From
Subject[AppArmor 43/47] AppArmor: Profile loading and manipulation, pathname matching
Pathname matching, transition table loading, profile loading and
manipulation.

Signed-off-by: John Johansen <jjohansen@suse.de>
Signed-off-by: Andreas Gruenbacher <agruen@suse.de>

---
security/apparmor/match.c | 299 +++++++++++++
security/apparmor/match.h | 85 +++
security/apparmor/module_interface.c | 805 +++++++++++++++++++++++++++++++++++
3 files changed, 1189 insertions(+)

--- /dev/null
+++ b/security/apparmor/match.c
@@ -0,0 +1,299 @@
+/*
+ * Copyright (C) 2007 Novell/SUSE
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ *
+ * Regular expression transition table matching
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include "apparmor.h"
+#include "match.h"
+
+static struct table_header *unpack_table(void *blob, size_t bsize)
+{
+ struct table_header *table = NULL;
+ struct table_header th;
+ size_t tsize;
+
+ if (bsize < sizeof(struct table_header))
+ goto out;
+
+ th.td_id = be16_to_cpu(*(u16 *) (blob));
+ th.td_flags = be16_to_cpu(*(u16 *) (blob + 2));
+ th.td_lolen = be32_to_cpu(*(u32 *) (blob + 8));
+ blob += sizeof(struct table_header);
+
+ if (!(th.td_flags == YYTD_DATA16 || th.td_flags == YYTD_DATA32 ||
+ th.td_flags == YYTD_DATA8))
+ goto out;
+
+ tsize = table_size(th.td_lolen, th.td_flags);
+ if (bsize < tsize)
+ goto out;
+
+ table = kmalloc(tsize, GFP_KERNEL);
+ if (table) {
+ *table = th;
+ if (th.td_flags == YYTD_DATA8)
+ UNPACK_ARRAY(table->td_data, blob, th.td_lolen,
+ u8, byte_to_byte);
+ else if (th.td_flags == YYTD_DATA16)
+ UNPACK_ARRAY(table->td_data, blob, th.td_lolen,
+ u16, be16_to_cpu);
+ else
+ UNPACK_ARRAY(table->td_data, blob, th.td_lolen,
+ u32, be32_to_cpu);
+ }
+
+out:
+ return table;
+}
+
+int unpack_dfa(struct aa_dfa *dfa, void *blob, size_t size)
+{
+ int hsize, i;
+ int error = -ENOMEM;
+
+ /* get dfa table set header */
+ if (size < sizeof(struct table_set_header))
+ goto fail;
+
+ if (ntohl(*(u32 *)blob) != YYTH_MAGIC)
+ goto fail;
+
+ hsize = ntohl(*(u32 *)(blob + 4));
+ if (size < hsize)
+ goto fail;
+
+ blob += hsize;
+ size -= hsize;
+
+ error = -EPROTO;
+ while (size > 0) {
+ struct table_header *table;
+ table = unpack_table(blob, size);
+ if (!table)
+ goto fail;
+
+ switch(table->td_id) {
+ case YYTD_ID_ACCEPT:
+ case YYTD_ID_BASE:
+ dfa->tables[table->td_id - 1] = table;
+ if (table->td_flags != YYTD_DATA32)
+ goto fail;
+ break;
+ case YYTD_ID_DEF:
+ case YYTD_ID_NXT:
+ case YYTD_ID_CHK:
+ dfa->tables[table->td_id - 1] = table;
+ if (table->td_flags != YYTD_DATA16)
+ goto fail;
+ break;
+ case YYTD_ID_EC:
+ dfa->tables[table->td_id - 1] = table;
+ if (table->td_flags != YYTD_DATA8)
+ goto fail;
+ break;
+ default:
+ kfree(table);
+ goto fail;
+ }
+
+ blob += table_size(table->td_lolen, table->td_flags);
+ size -= table_size(table->td_lolen, table->td_flags);
+ }
+
+ return 0;
+
+fail:
+ for (i = 0; i < ARRAY_SIZE(dfa->tables); i++) {
+ if (dfa->tables[i]) {
+ kfree(dfa->tables[i]);
+ dfa->tables[i] = NULL;
+ }
+ }
+ return error;
+}
+
+/**
+ * verify_dfa - verify that all the transitions and states in the dfa tables
+ * are in bounds.
+ * @dfa: dfa to test
+ *
+ * assumes dfa has gone through the verification done by unpacking
+ */
+int verify_dfa(struct aa_dfa *dfa)
+{
+ size_t i, state_count, trans_count;
+ int error = -EPROTO;
+
+ /* check that required tables exist */
+ if (!(dfa->tables[YYTD_ID_ACCEPT -1 ] &&
+ dfa->tables[YYTD_ID_DEF - 1] &&
+ dfa->tables[YYTD_ID_BASE - 1] &&
+ dfa->tables[YYTD_ID_NXT - 1] &&
+ dfa->tables[YYTD_ID_CHK - 1]))
+ goto out;
+
+ /* accept.size == default.size == base.size */
+ state_count = dfa->tables[YYTD_ID_BASE - 1]->td_lolen;
+ if (!(state_count == dfa->tables[YYTD_ID_DEF - 1]->td_lolen &&
+ state_count == dfa->tables[YYTD_ID_ACCEPT - 1]->td_lolen))
+ goto out;
+
+ /* next.size == chk.size */
+ trans_count = dfa->tables[YYTD_ID_NXT - 1]->td_lolen;
+ if (trans_count != dfa->tables[YYTD_ID_CHK - 1]->td_lolen)
+ goto out;
+
+ /* if equivalence classes then its table size must be 256 */
+ if (dfa->tables[YYTD_ID_EC - 1] &&
+ dfa->tables[YYTD_ID_EC - 1]->td_lolen != 256)
+ goto out;
+
+ for (i = 0; i < state_count; i++) {
+ if (DEFAULT_TABLE(dfa)[i] >= state_count)
+ goto out;
+ if (BASE_TABLE(dfa)[i] >= trans_count + 256)
+ goto out;
+ }
+
+ for (i = 0; i < trans_count ; i++) {
+ if (NEXT_TABLE(dfa)[i] >= state_count)
+ goto out;
+ if (CHECK_TABLE(dfa)[i] >= state_count)
+ goto out;
+ }
+
+ /* verify accept permissions */
+ for (i = 0; i < state_count; i++) {
+ int mode = ACCEPT_TABLE(dfa)[i];
+
+ if (mode & ~AA_VALID_PERM_MASK) {
+ goto out;
+ }
+ }
+
+ error = 0;
+out:
+ return error;
+}
+
+struct aa_dfa *aa_match_alloc(void)
+{
+ return kzalloc(sizeof(struct aa_dfa), GFP_KERNEL);
+}
+
+void aa_match_free(struct aa_dfa *dfa)
+{
+ if (dfa) {
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(dfa->tables); i++)
+ kfree(dfa->tables[i]);
+ }
+ kfree(dfa);
+}
+
+/**
+ * aa_dfa_next_state - traverse @dfa to find state @str stops at
+ * @dfa: the dfa to match @str against
+ * @start: the state of the dfa to start matching in
+ * @str: the string to match against the dfa
+ *
+ * aa_dfa_next_state will match @str against the dfa and return the state it
+ * finished matching in. The final state can be used to look up the accepting
+ * label, or as the start state of a continuing match.
+ */
+unsigned int aa_dfa_next_state(struct aa_dfa *dfa, unsigned int start,
+ const char *str)
+{
+ u16 *def = DEFAULT_TABLE(dfa);
+ u32 *base = BASE_TABLE(dfa);
+ u16 *next = NEXT_TABLE(dfa);
+ u16 *check = CHECK_TABLE(dfa);
+ unsigned int state = start, pos;
+
+ if (state == 0)
+ return 0;
+
+ /* current state is <state>, matching character *str */
+ if (dfa->tables[YYTD_ID_EC - 1]) {
+ u8 *equiv = EQUIV_TABLE(dfa);
+ while (*str) {
+ pos = base[state] + equiv[(u8)*str++];
+ if (check[pos] == state)
+ state = next[pos];
+ else
+ state = def[state];
+ }
+ } else {
+ while (*str) {
+ pos = base[state] + (u8)*str++;
+ if (check[pos] == state)
+ state = next[pos];
+ else
+ state = def[state];
+ }
+ }
+ return state;
+}
+
+/**
+ * aa_dfa_null_transition - step to next state after null character
+ * @dfa: the dfa to match against
+ * @start: the state of the dfa to start matching in
+ *
+ * aa_dfa_null_transition transitions to the next state after a null
+ * character which is not used in standard matching and is only
+ * used to seperate pairs.
+ */
+unsigned int aa_dfa_null_transition(struct aa_dfa *dfa, unsigned int start)
+{
+ return aa_dfa_next_state(dfa, start, "//");
+}
+
+/**
+ * aa_dfa_match - find accept perm for @str in @dfa
+ * @dfa: the dfa to match @str against
+ * @str: the string to match against the dfa
+ *
+ * aa_dfa_match will match @str and return the accept perms for the
+ * final state.
+ */
+unsigned int aa_dfa_match(struct aa_dfa *dfa, const char *str)
+{
+ return ACCEPT_TABLE(dfa)[aa_dfa_next_state(dfa, DFA_START, str)];
+}
+
+/**
+ * aa_match_state - find accept perm and state for @str in @dfa
+ * @dfa: the dfa to match @str against
+ * @start: the state to start the match from
+ * @str: the string to match against the dfa
+ * @final: the state that the match finished in
+ *
+ * aa_match_state will match @str and return the accept perms, and @final
+ * state, the match occured in.
+ */
+unsigned int aa_match_state(struct aa_dfa *dfa, unsigned int start,
+ const char *str, unsigned int *final)
+{
+ unsigned int state;
+ if (dfa) {
+ state = aa_dfa_next_state(dfa, start, str);
+ if (final)
+ *final = state;
+ return ACCEPT_TABLE(dfa)[state];
+ }
+ if (final)
+ *final = 0;
+ return 0;
+}
+
--- /dev/null
+++ b/security/apparmor/match.h
@@ -0,0 +1,85 @@
+/*
+ * Copyright (C) 2007 Novell/SUSE
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ *
+ * AppArmor submodule (match) prototypes
+ */
+
+#ifndef __MATCH_H
+#define __MATCH_H
+
+#define DFA_START 1
+
+/**
+ * The format used for transition tables is based on the GNU flex table
+ * file format (--tables-file option; see Table File Format in the flex
+ * info pages and the flex sources for documentation). The magic number
+ * used in the header is 0x1B5E783D insted of 0xF13C57B1 though, because
+ * the YY_ID_CHK (check) and YY_ID_DEF (default) tables are used
+ * slightly differently (see the apparmor-parser package).
+ */
+
+#define YYTH_MAGIC 0x1B5E783D
+
+struct table_set_header {
+ u32 th_magic; /* YYTH_MAGIC */
+ u32 th_hsize;
+ u32 th_ssize;
+ u16 th_flags;
+ char th_version[];
+};
+
+#define YYTD_ID_ACCEPT 1
+#define YYTD_ID_BASE 2
+#define YYTD_ID_CHK 3
+#define YYTD_ID_DEF 4
+#define YYTD_ID_EC 5
+#define YYTD_ID_META 6
+#define YYTD_ID_NXT 8
+
+
+#define YYTD_DATA8 1
+#define YYTD_DATA16 2
+#define YYTD_DATA32 4
+
+struct table_header {
+ u16 td_id;
+ u16 td_flags;
+ u32 td_hilen;
+ u32 td_lolen;
+ char td_data[];
+};
+
+#define DEFAULT_TABLE(DFA) ((u16 *)((DFA)->tables[YYTD_ID_DEF - 1]->td_data))
+#define BASE_TABLE(DFA) ((u32 *)((DFA)->tables[YYTD_ID_BASE - 1]->td_data))
+#define NEXT_TABLE(DFA) ((u16 *)((DFA)->tables[YYTD_ID_NXT - 1]->td_data))
+#define CHECK_TABLE(DFA) ((u16 *)((DFA)->tables[YYTD_ID_CHK - 1]->td_data))
+#define EQUIV_TABLE(DFA) ((u8 *)((DFA)->tables[YYTD_ID_EC - 1]->td_data))
+#define ACCEPT_TABLE(DFA) ((u32 *)((DFA)->tables[YYTD_ID_ACCEPT - 1]->td_data))
+
+struct aa_dfa {
+ struct table_header *tables[YYTD_ID_NXT];
+};
+
+#define byte_to_byte(X) (X)
+
+#define UNPACK_ARRAY(TABLE, BLOB, LEN, TYPE, NTOHX) \
+ do { \
+ typeof(LEN) __i; \
+ TYPE *__t = (TYPE *) TABLE; \
+ TYPE *__b = (TYPE *) BLOB; \
+ for (__i = 0; __i < LEN; __i++) { \
+ __t[__i] = NTOHX(__b[__i]); \
+ } \
+ } while (0)
+
+static inline size_t table_size(size_t len, size_t el_size)
+{
+ return ALIGN(sizeof(struct table_header) + len * el_size, 8);
+}
+
+#endif /* __MATCH_H */
--- /dev/null
+++ b/security/apparmor/module_interface.c
@@ -0,0 +1,805 @@
+/*
+ * Copyright (C) 1998-2007 Novell/SUSE
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ *
+ * AppArmor userspace policy interface
+ */
+
+#include <asm/unaligned.h>
+
+#include "apparmor.h"
+#include "inline.h"
+
+/*
+ * This mutex is used to synchronize profile adds, replacements, and
+ * removals: we only allow one of these operations at a time.
+ * We do not use the profile list lock here in order to avoid blocking
+ * exec during those operations. (Exec involves a profile list lookup
+ * for named-profile transitions.)
+ */
+DEFINE_MUTEX(aa_interface_lock);
+
+/*
+ * The AppArmor interface treats data as a type byte followed by the
+ * actual data. The interface has the notion of a a named entry
+ * which has a name (AA_NAME typecode followed by name string) followed by
+ * the entries typecode and data. Named types allow for optional
+ * elements and extensions to be added and tested for without breaking
+ * backwards compatability.
+ */
+
+enum aa_code {
+ AA_U8,
+ AA_U16,
+ AA_U32,
+ AA_U64,
+ AA_NAME, /* same as string except it is items name */
+ AA_STRING,
+ AA_BLOB,
+ AA_STRUCT,
+ AA_STRUCTEND,
+ AA_LIST,
+ AA_LISTEND,
+ AA_ARRAY,
+ AA_ARRAYEND,
+};
+
+/*
+ * aa_ext is the read of the buffer containing the serialized profile. The
+ * data is copied into a kernel buffer in apparmorfs and then handed off to
+ * the unpack routines.
+ */
+struct aa_ext {
+ void *start;
+ void *end;
+ void *pos; /* pointer to current position in the buffer */
+ u32 version;
+ char *ns_name;
+};
+
+static inline int aa_inbounds(struct aa_ext *e, size_t size)
+{
+ return (size <= e->end - e->pos);
+}
+
+/**
+ * aa_u16_chunck - test and do bounds checking for a u16 size based chunk
+ * @e: serialized data read head
+ * @chunk: start address for chunk of data
+ *
+ * return the size of chunk found with the read head at the end of
+ * the chunk.
+ */
+static size_t aa_is_u16_chunk(struct aa_ext *e, char **chunk)
+{
+ void *pos = e->pos;
+ size_t size = 0;
+
+ if (!aa_inbounds(e, sizeof(u16)))
+ goto fail;
+ size = le16_to_cpu(get_unaligned((u16 *)e->pos));
+ e->pos += sizeof(u16);
+ if (!aa_inbounds(e, size))
+ goto fail;
+ *chunk = e->pos;
+ e->pos += size;
+ return size;
+
+fail:
+ e->pos = pos;
+ return 0;
+}
+
+static inline int aa_is_X(struct aa_ext *e, enum aa_code code)
+{
+ if (!aa_inbounds(e, 1))
+ return 0;
+ if (*(u8 *) e->pos != code)
+ return 0;
+ e->pos++;
+ return 1;
+}
+
+/**
+ * aa_is_nameX - check is the next element is of type X with a name of @name
+ * @e: serialized data extent information
+ * @code: type code
+ * @name: name to match to the serialized element.
+ *
+ * check that the next serialized data element is of type X and has a tag
+ * name @name. If @name is specified then there must be a matching
+ * name element in the stream. If @name is NULL any name element will be
+ * skipped and only the typecode will be tested.
+ * returns 1 on success (both type code and name tests match) and the read
+ * head is advanced past the headers
+ * returns %0 if either match failes, the read head does not move
+ */
+static int aa_is_nameX(struct aa_ext *e, enum aa_code code, const char *name)
+{
+ void *pos = e->pos;
+ /*
+ * Check for presence of a tagname, and if present name size
+ * AA_NAME tag value is a u16.
+ */
+ if (aa_is_X(e, AA_NAME)) {
+ char *tag;
+ size_t size = aa_is_u16_chunk(e, &tag);
+ /* if a name is specified it must match. otherwise skip tag */
+ if (name && (!size || strcmp(name, tag)))
+ goto fail;
+ } else if (name) {
+ /* if a name is specified and there is no name tag fail */
+ goto fail;
+ }
+
+ /* now check if type code matches */
+ if (aa_is_X(e, code))
+ return 1;
+
+fail:
+ e->pos = pos;
+ return 0;
+}
+
+static int aa_is_u16(struct aa_ext *e, u16 *data, const char *name)
+{
+ void *pos = e->pos;
+ if (aa_is_nameX(e, AA_U16, name)) {
+ if (!aa_inbounds(e, sizeof(u16)))
+ goto fail;
+ if (data)
+ *data = le16_to_cpu(get_unaligned((u16 *)e->pos));
+ e->pos += sizeof(u16);
+ return 1;
+ }
+fail:
+ e->pos = pos;
+ return 0;
+}
+
+static int aa_is_u32(struct aa_ext *e, u32 *data, const char *name)
+{
+ void *pos = e->pos;
+ if (aa_is_nameX(e, AA_U32, name)) {
+ if (!aa_inbounds(e, sizeof(u32)))
+ goto fail;
+ if (data)
+ *data = le32_to_cpu(get_unaligned((u32 *)e->pos));
+ e->pos += sizeof(u32);
+ return 1;
+ }
+fail:
+ e->pos = pos;
+ return 0;
+}
+
+static size_t aa_is_array(struct aa_ext *e, const char *name)
+{
+ void *pos = e->pos;
+ if (aa_is_nameX(e, AA_ARRAY, name)) {
+ int size;
+ if (!aa_inbounds(e, sizeof(u16)))
+ goto fail;
+ size = (int) le16_to_cpu(get_unaligned((u16 *)e->pos));
+ e->pos += sizeof(u16);
+ return size;
+ }
+fail:
+ e->pos = pos;
+ return 0;
+}
+
+static size_t aa_is_blob(struct aa_ext *e, char **blob, const char *name)
+{
+ void *pos = e->pos;
+ if (aa_is_nameX(e, AA_BLOB, name)) {
+ u32 size;
+ if (!aa_inbounds(e, sizeof(u32)))
+ goto fail;
+ size = le32_to_cpu(get_unaligned((u32 *)e->pos));
+ e->pos += sizeof(u32);
+ if (aa_inbounds(e, (size_t) size)) {
+ * blob = e->pos;
+ e->pos += size;
+ return size;
+ }
+ }
+fail:
+ e->pos = pos;
+ return 0;
+}
+
+static int aa_is_dynstring(struct aa_ext *e, char **string, const char *name)
+{
+ char *src_str;
+ size_t size = 0;
+ void *pos = e->pos;
+ *string = NULL;
+ if (aa_is_nameX(e, AA_STRING, name) &&
+ (size = aa_is_u16_chunk(e, &src_str))) {
+ char *str;
+ if (!(str = kmalloc(size, GFP_KERNEL)))
+ goto fail;
+ memcpy(str, src_str, size);
+ *string = str;
+ }
+
+ return size;
+
+fail:
+ e->pos = pos;
+ return 0;
+}
+
+/**
+ * aa_unpack_dfa - unpack a file rule dfa
+ * @e: serialized data extent information
+ *
+ * returns dfa or ERR_PTR
+ */
+struct aa_dfa *aa_unpack_dfa(struct aa_ext *e)
+{
+ char *blob = NULL;
+ size_t size, error = 0;
+ struct aa_dfa *dfa = NULL;
+
+ size = aa_is_blob(e, &blob, "aadfa");
+ if (size) {
+ dfa = aa_match_alloc();
+ if (dfa) {
+ /*
+ * The dfa is aligned with in the blob to 8 bytes
+ * from the beginning of the stream.
+ */
+ size_t sz = blob - (char *) e->start;
+ size_t pad = ALIGN(sz, 8) - sz;
+ error = unpack_dfa(dfa, blob + pad, size - pad);
+ if (!error)
+ error = verify_dfa(dfa);
+ } else {
+ error = -ENOMEM;
+ }
+
+ if (error) {
+ aa_match_free(dfa);
+ dfa = ERR_PTR(error);
+ }
+ }
+
+ return dfa;
+}
+
+/**
+ * aa_unpack_profile - unpack a serialized profile
+ * @e: serialized data extent information
+ * @operation: operation profile is being unpacked for
+ */
+static struct aa_profile *aa_unpack_profile(struct aa_ext *e,
+ const char *operation)
+{
+ struct aa_profile *profile = NULL;
+ struct aa_audit sa;
+
+ int error = -EPROTO;
+
+ profile = alloc_aa_profile();
+ if (!profile)
+ return ERR_PTR(-ENOMEM);
+
+ /* check that we have the right struct being passed */
+ if (!aa_is_nameX(e, AA_STRUCT, "profile"))
+ goto fail;
+ if (!aa_is_dynstring(e, &profile->name, NULL))
+ goto fail;
+
+ /* per profile debug flags (complain, audit) */
+ if (!aa_is_nameX(e, AA_STRUCT, "flags"))
+ goto fail;
+ if (!aa_is_u32(e, NULL, NULL))
+ goto fail;
+ if (!aa_is_u32(e, &(profile->flags.complain), NULL))
+ goto fail;
+ if (!aa_is_u32(e, &(profile->flags.audit), NULL))
+ goto fail;
+ if (!aa_is_nameX(e, AA_STRUCTEND, NULL))
+ goto fail;
+
+ if (!aa_is_u32(e, &(profile->capabilities), NULL))
+ goto fail;
+
+ /* get file rules */
+ profile->file_rules = aa_unpack_dfa(e);
+ if (IS_ERR(profile->file_rules)) {
+ error = PTR_ERR(profile->file_rules);
+ profile->file_rules = NULL;
+ goto fail;
+ }
+
+ if (!aa_is_nameX(e, AA_STRUCTEND, NULL))
+ goto fail;
+
+ return profile;
+
+fail:
+ memset(&sa, 0, sizeof(sa));
+ sa.operation = operation;
+ sa.gfp_mask = GFP_KERNEL;
+ sa.name = profile && profile->name ? profile->name : "unknown";
+ if (!sa.info)
+ sa.info = "failed to unpack profile";
+ aa_audit_status(NULL, &sa);
+
+ if (profile)
+ free_aa_profile(profile);
+
+ return ERR_PTR(error);
+}
+
+/**
+ * aa_verify_head - unpack serialized stream header
+ * @e: serialized data read head
+ * @operation: operation header is being verified for
+ *
+ * returns error or 0 if header is good
+ */
+static int aa_verify_header(struct aa_ext *e, const char *operation)
+{
+ /* get the interface version */
+ if (!aa_is_u32(e, &e->version, "version")) {
+ struct aa_audit sa;
+ memset(&sa, 0, sizeof(sa));
+ sa.operation = operation;
+ sa.gfp_mask = GFP_KERNEL;
+ sa.info = "invalid profile format";
+ aa_audit_status(NULL, &sa);
+ return -EPROTONOSUPPORT;
+ }
+
+ /* check that the interface version is currently supported */
+ if (e->version != 3) {
+ struct aa_audit sa;
+ memset(&sa, 0, sizeof(sa));
+ sa.operation = operation;
+ sa.gfp_mask = GFP_KERNEL;
+ sa.info = "unsupported interface version";
+ aa_audit_status(NULL, &sa);
+ return -EPROTONOSUPPORT;
+ }
+
+ /* read the namespace if present */
+ if (!aa_is_dynstring(e, &e->ns_name, "namespace")) {
+ e->ns_name = NULL;
+ }
+
+ return 0;
+}
+
+/**
+ * aa_add_profile - Unpack and add a new profile to the profile list
+ * @data: serialized data stream
+ * @size: size of the serialized data stream
+ */
+ssize_t aa_add_profile(void *data, size_t size)
+{
+ struct aa_profile *profile = NULL;
+ struct aa_namespace *ns = NULL;
+ struct aa_ext e = {
+ .start = data,
+ .end = data + size,
+ .pos = data,
+ .ns_name = NULL
+ };
+ ssize_t error = aa_verify_header(&e, "profile_load");
+ if (error)
+ return error;
+
+ profile = aa_unpack_profile(&e, "profile_load");
+ if (IS_ERR(profile))
+ return PTR_ERR(profile);
+
+ mutex_lock(&aa_interface_lock);
+ write_lock(&profile_ns_list_lock);
+ if (e.ns_name)
+ ns = __aa_find_namespace(e.ns_name, &profile_ns_list);
+ else
+ ns = default_namespace;
+ if (!ns) {
+ struct aa_namespace *new_ns;
+ write_unlock(&profile_ns_list_lock);
+ new_ns = alloc_aa_namespace(e.ns_name);
+ if (!new_ns) {
+ mutex_unlock(&aa_interface_lock);
+ return -ENOMEM;
+ }
+ write_lock(&profile_ns_list_lock);
+ ns = __aa_find_namespace(e.ns_name, &profile_ns_list);
+ if (!ns) {
+ list_add(&new_ns->list, &profile_ns_list);
+ ns = new_ns;
+ } else
+ free_aa_namespace(new_ns);
+ }
+
+ write_lock(&ns->lock);
+ if (__aa_find_profile(profile->name, &ns->profiles)) {
+ /* A profile with this name exists already. */
+ write_unlock(&ns->lock);
+ write_unlock(&profile_ns_list_lock);
+ mutex_unlock(&aa_interface_lock);
+ aa_put_profile(profile);
+ return -EEXIST;
+ }
+ profile->ns = aa_get_namespace(ns);
+ ns->profile_count++;
+ list_add(&profile->list, &ns->profiles);
+ write_unlock(&ns->lock);
+ write_unlock(&profile_ns_list_lock);
+ mutex_unlock(&aa_interface_lock);
+
+ return size;
+}
+
+/**
+ * task_replace - replace a task's profile
+ * @task: task to replace profile on
+ * @new_cxt: new aa_task_context to do replacement with
+ * @new_profile: new profile
+ */
+static inline void task_replace(struct task_struct *task,
+ struct aa_task_context *new_cxt,
+ struct aa_profile *new_profile)
+{
+ struct aa_task_context *cxt = aa_task_context(task);
+
+ AA_DEBUG("%s: replacing profile for task %d "
+ "profile=%s (%p)\n",
+ __FUNCTION__,
+ cxt->task->pid,
+ cxt->profile->name, cxt->profile);
+
+ aa_change_task_context(task, new_cxt, new_profile, cxt->cookie,
+ cxt->previous_profile);
+}
+
+/**
+ * aa_replace_profile - replace a profile on the profile list
+ * @udata: serialized data stream
+ * @size: size of the serialized data stream
+ *
+ * unpack and replace a profile on the profile list and uses of that profile
+ * by any aa_task_context. If the profile does not exist on the profile list
+ * it is added. Return %0 or error.
+ */
+ssize_t aa_replace_profile(void *udata, size_t size)
+{
+ struct aa_profile *old_profile, *new_profile;
+ struct aa_namespace *ns;
+ struct aa_task_context *new_cxt;
+ struct aa_ext e = {
+ .start = udata,
+ .end = udata + size,
+ .pos = udata,
+ .ns_name = NULL
+ };
+
+ ssize_t error = aa_verify_header(&e, "profile_replace");
+ if (error)
+ return error;
+
+ new_profile = aa_unpack_profile(&e, "profile_replace");
+ if (IS_ERR(new_profile))
+ return PTR_ERR(new_profile);
+
+ mutex_lock(&aa_interface_lock);
+ write_lock(&profile_ns_list_lock);
+ if (e.ns_name)
+ ns = __aa_find_namespace(e.ns_name, &profile_ns_list);
+ else
+ ns = default_namespace;
+ if (!ns) {
+ struct aa_namespace *new_ns;
+ write_unlock(&profile_ns_list_lock);
+ new_ns = alloc_aa_namespace(e.ns_name);
+ if (!new_ns) {
+ mutex_unlock(&aa_interface_lock);
+ return -ENOMEM;
+ }
+ write_lock(&profile_ns_list_lock);
+ ns = __aa_find_namespace(e.ns_name, &profile_ns_list);
+ if (!ns) {
+ list_add(&new_ns->list, &profile_ns_list);
+ ns = new_ns;
+ } else
+ free_aa_namespace(new_ns);
+ }
+
+ write_lock(&ns->lock);
+ old_profile = __aa_find_profile(new_profile->name, &ns->profiles);
+ if (old_profile) {
+ lock_profile(old_profile);
+ old_profile->isstale = 1;
+ list_del_init(&old_profile->list);
+ unlock_profile(old_profile);
+ ns->profile_count--;
+ }
+ new_profile->ns = aa_get_namespace(ns);
+ ns->profile_count++;
+ list_add(&new_profile->list, &ns->profiles);
+ write_unlock(&ns->lock);
+ write_unlock(&profile_ns_list_lock);
+
+ if (!old_profile)
+ goto out;
+
+ /*
+ * Replacement needs to allocate a new aa_task_context for each
+ * task confined by old_profile. To do this the profile locks
+ * are only held when the actual switch is done per task. While
+ * looping to allocate a new aa_task_context the old_task list
+ * may get shorter if tasks exit/change their profile but will
+ * not get longer as new task will not use old_profile detecting
+ * that is stale.
+ */
+ do {
+ new_cxt = aa_alloc_task_context(GFP_KERNEL | __GFP_NOFAIL);
+
+ lock_both_profiles(old_profile, new_profile);
+ if (!list_empty(&old_profile->task_contexts)) {
+ struct task_struct *task =
+ list_entry(old_profile->task_contexts.next,
+ struct aa_task_context, list)->task;
+ task_lock(task);
+ task_replace(task, new_cxt, new_profile);
+ task_unlock(task);
+ new_cxt = NULL;
+ }
+ unlock_both_profiles(old_profile, new_profile);
+ } while (!new_cxt);
+ aa_free_task_context(new_cxt);
+ aa_put_profile(old_profile);
+
+out:
+ mutex_unlock(&aa_interface_lock);
+ return size;
+}
+
+/**
+ * aa_remove_profile - remove a profile from the system
+ * @name: name of the profile to remove
+ * @size: size of the name
+ *
+ * remove a profile from the profile list and all aa_task_context references
+ * to said profile.
+ */
+ssize_t aa_remove_profile(char *name, size_t size)
+{
+ struct aa_namespace *ns;
+ struct aa_profile *profile;
+
+ mutex_lock(&aa_interface_lock);
+ write_lock(&profile_ns_list_lock);
+
+ if (name[0] == '/') {
+ ns = default_namespace;
+ } else {
+ char *split = strchr(name, ':');
+ if (!split)
+ goto noent;
+ *split = 0;
+ ns = __aa_find_namespace(name, &profile_ns_list);
+ name = split + 1;
+ }
+
+ if (!ns)
+ goto noent;
+ write_lock(&ns->lock);
+ profile = __aa_find_profile(name, &ns->profiles);
+ if (!profile) {
+ write_unlock(&ns->lock);
+ goto noent;
+ }
+
+ /* Remove the profile from each task context it is on. */
+ lock_profile(profile);
+ profile->isstale = 1;
+ aa_unconfine_tasks(profile);
+ list_del_init(&profile->list);
+ ns->profile_count--;
+ unlock_profile(profile);
+ /* Release the profile itself. */
+ write_unlock(&ns->lock);
+ /* check to see if the namespace has become stale */
+ if (ns != default_namespace && ns->profile_count == 0) {
+ list_del_init(&ns->list);
+ aa_put_namespace(ns);
+ }
+ write_unlock(&profile_ns_list_lock);
+ mutex_unlock(&aa_interface_lock);
+ aa_put_profile(profile);
+
+ return size;
+
+noent:
+ write_unlock(&profile_ns_list_lock);
+ mutex_unlock(&aa_interface_lock);
+ return -ENOENT;
+}
+
+/**
+ * free_aa_namespace_kref - free aa_namespace by kref (see aa_put_namespace)
+ * @kr: kref callback for freeing of a namespace
+ */
+void free_aa_namespace_kref(struct kref *kref)
+{
+ struct aa_namespace *ns=container_of(kref, struct aa_namespace, count);
+
+ free_aa_namespace(ns);
+}
+
+/**
+ * alloc_aa_namespace - allocate, initialize and return a new namespace
+ * @name: a preallocated name
+ * Returns NULL on failure.
+ */
+struct aa_namespace *alloc_aa_namespace(char *name)
+{
+ struct aa_namespace *ns;
+
+ ns = kzalloc(sizeof(*ns), GFP_KERNEL);
+ AA_DEBUG("%s(%p)\n", __FUNCTION__, ns);
+ if (ns) {
+ ns->name = name;
+ INIT_LIST_HEAD(&ns->list);
+ INIT_LIST_HEAD(&ns->profiles);
+ kref_init(&ns->count);
+ rwlock_init(&ns->lock);
+
+ ns->null_complain_profile = alloc_aa_profile();
+ if (!ns->null_complain_profile) {
+ if (!name)
+ kfree(ns->name);
+ kfree(ns);
+ return NULL;
+ }
+ ns->null_complain_profile->name =
+ kstrdup("null-complain-profile", GFP_KERNEL);
+ if (!ns->null_complain_profile->name) {
+ free_aa_profile(ns->null_complain_profile);
+ if (!name)
+ kfree(ns->name);
+ kfree(ns);
+ return NULL;
+ }
+ ns->null_complain_profile->flags.complain = 1;
+ /* null_complain_profile doesn't contribute to ns ref count */
+ ns->null_complain_profile->ns = ns;
+ }
+ return ns;
+}
+
+/**
+ * free_aa_namespace - free a profile namespace
+ * @namespace: the namespace to free
+ *
+ * Free a namespace. All references to the namespace must have been put.
+ * If the namespace was referenced by a profile confining a task,
+ * free_aa_namespace will be called indirectly (through free_aa_profile)
+ * from an rcu callback routine, so we must not sleep here.
+ */
+void free_aa_namespace(struct aa_namespace *ns)
+{
+ AA_DEBUG("%s(%p)\n", __FUNCTION__, ns);
+
+ if (!ns)
+ return;
+
+ /* namespace still contains profiles -- invalid */
+ if (!list_empty(&ns->profiles)) {
+ AA_ERROR("%s: internal error, "
+ "namespace '%s' still contains profiles\n",
+ __FUNCTION__,
+ ns->name);
+ BUG();
+ }
+ if (!list_empty(&ns->list)) {
+ AA_ERROR("%s: internal error, "
+ "namespace '%s' still on list\n",
+ __FUNCTION__,
+ ns->name);
+ BUG();
+ }
+ /* null_complain_profile doesn't contribute to ns ref counting */
+ ns->null_complain_profile->ns = NULL;
+ aa_put_profile(ns->null_complain_profile);
+ kfree(ns->name);
+ kfree(ns);
+}
+
+/**
+ * free_aa_profile_kref - free aa_profile by kref (called by aa_put_profile)
+ * @kr: kref callback for freeing of a profile
+ */
+void free_aa_profile_kref(struct kref *kref)
+{
+ struct aa_profile *p=container_of(kref, struct aa_profile, count);
+
+ free_aa_profile(p);
+}
+
+/**
+ * alloc_aa_profile - allocate, initialize and return a new profile
+ * Returns NULL on failure.
+ */
+struct aa_profile *alloc_aa_profile(void)
+{
+ struct aa_profile *profile;
+
+ profile = kzalloc(sizeof(*profile), GFP_KERNEL);
+ AA_DEBUG("%s(%p)\n", __FUNCTION__, profile);
+ if (profile) {
+ INIT_LIST_HEAD(&profile->list);
+ kref_init(&profile->count);
+ INIT_LIST_HEAD(&profile->task_contexts);
+ spin_lock_init(&profile->lock);
+ }
+ return profile;
+}
+
+/**
+ * free_aa_profile - free a profile
+ * @profile: the profile to free
+ *
+ * Free a profile, its hats and null_profile. All references to the profile,
+ * its hats and null_profile must have been put.
+ *
+ * If the profile was referenced from a task context, free_aa_profile() will
+ * be called from an rcu callback routine, so we must not sleep here.
+ */
+void free_aa_profile(struct aa_profile *profile)
+{
+ AA_DEBUG("%s(%p)\n", __FUNCTION__, profile);
+
+ if (!profile)
+ return;
+
+ /* profile is still on profile namespace list -- invalid */
+ if (!list_empty(&profile->list)) {
+ AA_ERROR("%s: internal error, "
+ "profile '%s' still on global list\n",
+ __FUNCTION__,
+ profile->name);
+ BUG();
+ }
+ aa_put_namespace(profile->ns);
+
+ aa_match_free(profile->file_rules);
+
+ if (profile->name) {
+ AA_DEBUG("%s: %s\n", __FUNCTION__, profile->name);
+ kfree(profile->name);
+ }
+
+ kfree(profile);
+}
+
+/**
+ * aa_unconfine_tasks - remove tasks on a profile's task context list
+ * @profile: profile to remove tasks from
+ *
+ * Assumes that @profile lock is held.
+ */
+void aa_unconfine_tasks(struct aa_profile *profile)
+{
+ while (!list_empty(&profile->task_contexts)) {
+ struct task_struct *task =
+ list_entry(profile->task_contexts.next,
+ struct aa_task_context, list)->task;
+ task_lock(task);
+ aa_change_task_context(task, NULL, NULL, 0, NULL);
+ task_unlock(task);
+ }
+}
--



\
 
 \ /
  Last update: 2007-12-20 15:34    [W:1.293 / U:0.048 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site