]> git.ipfire.org Git - people/pmueller/ipfire-2.x.git/blob - src/patches/suse-2.6.27.25/patches.apparmor/apparmor-module_interface.diff
Added missing SuSE-Xen-Patches.
[people/pmueller/ipfire-2.x.git] / src / patches / suse-2.6.27.25 / patches.apparmor / apparmor-module_interface.diff
1 From: John Johansen <jjohansen@suse.de>
2 Subject: AppArmor: Profile loading and manipulation, pathname matching
3
4 Pathname matching, transition table loading, profile loading and
5 manipulation.
6
7 Signed-off-by: John Johansen <jjohansen@suse.de>
8 Signed-off-by: Andreas Gruenbacher <agruen@suse.de>
9
10 ---
11 security/apparmor/match.c | 364 ++++++++++++++
12 security/apparmor/match.h | 87 +++
13 security/apparmor/module_interface.c | 875 +++++++++++++++++++++++++++++++++++
14 3 files changed, 1326 insertions(+)
15
16 --- /dev/null
17 +++ b/security/apparmor/match.c
18 @@ -0,0 +1,364 @@
19 +/*
20 + * Copyright (C) 2007 Novell/SUSE
21 + *
22 + * This program is free software; you can redistribute it and/or
23 + * modify it under the terms of the GNU General Public License as
24 + * published by the Free Software Foundation, version 2 of the
25 + * License.
26 + *
27 + * Regular expression transition table matching
28 + */
29 +
30 +#include <linux/kernel.h>
31 +#include <linux/slab.h>
32 +#include <linux/errno.h>
33 +#include "apparmor.h"
34 +#include "match.h"
35 +#include "inline.h"
36 +
37 +static struct table_header *unpack_table(void *blob, size_t bsize)
38 +{
39 + struct table_header *table = NULL;
40 + struct table_header th;
41 + size_t tsize;
42 +
43 + if (bsize < sizeof(struct table_header))
44 + goto out;
45 +
46 + th.td_id = be16_to_cpu(*(u16 *) (blob));
47 + th.td_flags = be16_to_cpu(*(u16 *) (blob + 2));
48 + th.td_lolen = be32_to_cpu(*(u32 *) (blob + 8));
49 + blob += sizeof(struct table_header);
50 +
51 + if (!(th.td_flags == YYTD_DATA16 || th.td_flags == YYTD_DATA32 ||
52 + th.td_flags == YYTD_DATA8))
53 + goto out;
54 +
55 + tsize = table_size(th.td_lolen, th.td_flags);
56 + if (bsize < tsize)
57 + goto out;
58 +
59 + table = kmalloc(tsize, GFP_KERNEL);
60 + if (table) {
61 + *table = th;
62 + if (th.td_flags == YYTD_DATA8)
63 + UNPACK_ARRAY(table->td_data, blob, th.td_lolen,
64 + u8, byte_to_byte);
65 + else if (th.td_flags == YYTD_DATA16)
66 + UNPACK_ARRAY(table->td_data, blob, th.td_lolen,
67 + u16, be16_to_cpu);
68 + else
69 + UNPACK_ARRAY(table->td_data, blob, th.td_lolen,
70 + u32, be32_to_cpu);
71 + }
72 +
73 +out:
74 + return table;
75 +}
76 +
77 +int unpack_dfa(struct aa_dfa *dfa, void *blob, size_t size)
78 +{
79 + int hsize, i;
80 + int error = -ENOMEM;
81 +
82 + /* get dfa table set header */
83 + if (size < sizeof(struct table_set_header))
84 + goto fail;
85 +
86 + if (ntohl(*(u32 *)blob) != YYTH_MAGIC)
87 + goto fail;
88 +
89 + hsize = ntohl(*(u32 *)(blob + 4));
90 + if (size < hsize)
91 + goto fail;
92 +
93 + blob += hsize;
94 + size -= hsize;
95 +
96 + error = -EPROTO;
97 + while (size > 0) {
98 + struct table_header *table;
99 + table = unpack_table(blob, size);
100 + if (!table)
101 + goto fail;
102 +
103 + switch(table->td_id) {
104 + case YYTD_ID_ACCEPT:
105 + case YYTD_ID_ACCEPT2:
106 + case YYTD_ID_BASE:
107 + dfa->tables[table->td_id - 1] = table;
108 + if (table->td_flags != YYTD_DATA32)
109 + goto fail;
110 + break;
111 + case YYTD_ID_DEF:
112 + case YYTD_ID_NXT:
113 + case YYTD_ID_CHK:
114 + dfa->tables[table->td_id - 1] = table;
115 + if (table->td_flags != YYTD_DATA16)
116 + goto fail;
117 + break;
118 + case YYTD_ID_EC:
119 + dfa->tables[table->td_id - 1] = table;
120 + if (table->td_flags != YYTD_DATA8)
121 + goto fail;
122 + break;
123 + default:
124 + kfree(table);
125 + goto fail;
126 + }
127 +
128 + blob += table_size(table->td_lolen, table->td_flags);
129 + size -= table_size(table->td_lolen, table->td_flags);
130 + }
131 +
132 + return 0;
133 +
134 +fail:
135 + for (i = 0; i < ARRAY_SIZE(dfa->tables); i++) {
136 + if (dfa->tables[i]) {
137 + kfree(dfa->tables[i]);
138 + dfa->tables[i] = NULL;
139 + }
140 + }
141 + return error;
142 +}
143 +
144 +/**
145 + * verify_dfa - verify that all the transitions and states in the dfa tables
146 + * are in bounds.
147 + * @dfa: dfa to test
148 + *
149 + * assumes dfa has gone through the verification done by unpacking
150 + */
151 +int verify_dfa(struct aa_dfa *dfa)
152 +{
153 + size_t i, state_count, trans_count;
154 + int error = -EPROTO;
155 +
156 + /* check that required tables exist */
157 + if (!(dfa->tables[YYTD_ID_ACCEPT - 1] &&
158 + dfa->tables[YYTD_ID_ACCEPT2 - 1] &&
159 + dfa->tables[YYTD_ID_DEF - 1] &&
160 + dfa->tables[YYTD_ID_BASE - 1] &&
161 + dfa->tables[YYTD_ID_NXT - 1] &&
162 + dfa->tables[YYTD_ID_CHK - 1]))
163 + goto out;
164 +
165 + /* accept.size == default.size == base.size */
166 + state_count = dfa->tables[YYTD_ID_BASE - 1]->td_lolen;
167 + if (!(state_count == dfa->tables[YYTD_ID_DEF - 1]->td_lolen &&
168 + state_count == dfa->tables[YYTD_ID_ACCEPT - 1]->td_lolen &&
169 + state_count == dfa->tables[YYTD_ID_ACCEPT2 - 1]->td_lolen))
170 + goto out;
171 +
172 + /* next.size == chk.size */
173 + trans_count = dfa->tables[YYTD_ID_NXT - 1]->td_lolen;
174 + if (trans_count != dfa->tables[YYTD_ID_CHK - 1]->td_lolen)
175 + goto out;
176 +
177 + /* if equivalence classes then its table size must be 256 */
178 + if (dfa->tables[YYTD_ID_EC - 1] &&
179 + dfa->tables[YYTD_ID_EC - 1]->td_lolen != 256)
180 + goto out;
181 +
182 + for (i = 0; i < state_count; i++) {
183 + if (DEFAULT_TABLE(dfa)[i] >= state_count)
184 + goto out;
185 + if (BASE_TABLE(dfa)[i] >= trans_count + 256)
186 + goto out;
187 + }
188 +
189 + for (i = 0; i < trans_count ; i++) {
190 + if (NEXT_TABLE(dfa)[i] >= state_count)
191 + goto out;
192 + if (CHECK_TABLE(dfa)[i] >= state_count)
193 + goto out;
194 + }
195 +
196 + /* verify accept permissions */
197 + for (i = 0; i < state_count; i++) {
198 + int mode = ACCEPT_TABLE(dfa)[i];
199 +
200 + if (mode & ~AA_VALID_PERM_MASK)
201 + goto out;
202 + if (ACCEPT_TABLE2(dfa)[i] & ~AA_VALID_PERM2_MASK)
203 + goto out;
204 +
205 + /* if any exec modifier is set MAY_EXEC must be set */
206 + if ((mode & AA_USER_EXEC_TYPE) && !(mode & AA_USER_EXEC))
207 + goto out;
208 + if ((mode & AA_OTHER_EXEC_TYPE) && !(mode & AA_OTHER_EXEC))
209 + goto out;
210 + }
211 +
212 + error = 0;
213 +out:
214 + return error;
215 +}
216 +
217 +struct aa_dfa *aa_match_alloc(void)
218 +{
219 + return kzalloc(sizeof(struct aa_dfa), GFP_KERNEL);
220 +}
221 +
222 +void aa_match_free(struct aa_dfa *dfa)
223 +{
224 + if (dfa) {
225 + int i;
226 +
227 + for (i = 0; i < ARRAY_SIZE(dfa->tables); i++)
228 + kfree(dfa->tables[i]);
229 + }
230 + kfree(dfa);
231 +}
232 +
233 +/**
234 + * aa_dfa_next_state_len - traverse @dfa to find state @str stops at
235 + * @dfa: the dfa to match @str against
236 + * @start: the state of the dfa to start matching in
237 + * @str: the string of bytes to match against the dfa
238 + * @len: length of the string of bytes to match
239 + *
240 + * aa_dfa_next_state will match @str against the dfa and return the state it
241 + * finished matching in. The final state can be used to look up the accepting
242 + * label, or as the start state of a continuing match.
243 + *
244 + * aa_dfa_next_state could be implement using this function by doing
245 + * return aa_dfa_next_state_len(dfa, start, str, strlen(str));
246 + * but that would require traversing the string twice and be slightly
247 + * slower.
248 + */
249 +unsigned int aa_dfa_next_state_len(struct aa_dfa *dfa, unsigned int start,
250 + const char *str, int len)
251 +{
252 + u16 *def = DEFAULT_TABLE(dfa);
253 + u32 *base = BASE_TABLE(dfa);
254 + u16 *next = NEXT_TABLE(dfa);
255 + u16 *check = CHECK_TABLE(dfa);
256 + unsigned int state = start, pos;
257 +
258 + if (state == 0)
259 + return 0;
260 +
261 + /* current state is <state>, matching character *str */
262 + if (dfa->tables[YYTD_ID_EC - 1]) {
263 + u8 *equiv = EQUIV_TABLE(dfa);
264 + for (; len; len--) {
265 + pos = base[state] + equiv[(u8)*str++];
266 + if (check[pos] == state)
267 + state = next[pos];
268 + else
269 + state = def[state];
270 + }
271 + } else {
272 + for (; len; len--) {
273 + pos = base[state] + (u8)*str++;
274 + if (check[pos] == state)
275 + state = next[pos];
276 + else
277 + state = def[state];
278 + }
279 + }
280 + return state;
281 +}
282 +
283 +/**
284 + * aa_dfa_next_state - traverse @dfa to find state @str stops at
285 + * @dfa: the dfa to match @str against
286 + * @start: the state of the dfa to start matching in
287 + * @str: the null terminated string of bytes to match against the dfa
288 + *
289 + * aa_dfa_next_state will match @str against the dfa and return the state it
290 + * finished matching in. The final state can be used to look up the accepting
291 + * label, or as the start state of a continuing match.
292 + */
293 +unsigned int aa_dfa_next_state(struct aa_dfa *dfa, unsigned int start,
294 + const char *str)
295 +{
296 + u16 *def = DEFAULT_TABLE(dfa);
297 + u32 *base = BASE_TABLE(dfa);
298 + u16 *next = NEXT_TABLE(dfa);
299 + u16 *check = CHECK_TABLE(dfa);
300 + unsigned int state = start, pos;
301 +
302 + if (state == 0)
303 + return 0;
304 +
305 + /* current state is <state>, matching character *str */
306 + if (dfa->tables[YYTD_ID_EC - 1]) {
307 + u8 *equiv = EQUIV_TABLE(dfa);
308 + while (*str) {
309 + pos = base[state] + equiv[(u8)*str++];
310 + if (check[pos] == state)
311 + state = next[pos];
312 + else
313 + state = def[state];
314 + }
315 + } else {
316 + while (*str) {
317 + pos = base[state] + (u8)*str++;
318 + if (check[pos] == state)
319 + state = next[pos];
320 + else
321 + state = def[state];
322 + }
323 + }
324 + return state;
325 +}
326 +
327 +/**
328 + * aa_dfa_null_transition - step to next state after null character
329 + * @dfa: the dfa to match against
330 + * @start: the state of the dfa to start matching in
331 + *
332 + * aa_dfa_null_transition transitions to the next state after a null
333 + * character which is not used in standard matching and is only
334 + * used to seperate pairs.
335 + */
336 +unsigned int aa_dfa_null_transition(struct aa_dfa *dfa, unsigned int start)
337 +{
338 + return aa_dfa_next_state_len(dfa, start, "", 1);
339 +}
340 +
341 +/**
342 + * aa_dfa_match - find accept perm for @str in @dfa
343 + * @dfa: the dfa to match @str against
344 + * @str: the string to match against the dfa
345 + * @audit_mask: the audit_mask for the final state
346 + *
347 + * aa_dfa_match will match @str and return the accept perms for the
348 + * final state.
349 + */
350 +unsigned int aa_dfa_match(struct aa_dfa *dfa, const char *str, int *audit_mask)
351 +{
352 + int state = aa_dfa_next_state(dfa, DFA_START, str);
353 + if (audit_mask)
354 + *audit_mask = dfa_audit_mask(dfa, state);
355 + return ACCEPT_TABLE(dfa)[state];
356 +}
357 +
358 +/**
359 + * aa_match_state - find accept perm and state for @str in @dfa
360 + * @dfa: the dfa to match @str against
361 + * @start: the state to start the match from
362 + * @str: the string to match against the dfa
363 + * @final: the state that the match finished in
364 + *
365 + * aa_match_state will match @str and return the accept perms, and @final
366 + * state, the match occured in.
367 + */
368 +unsigned int aa_match_state(struct aa_dfa *dfa, unsigned int start,
369 + const char *str, unsigned int *final)
370 +{
371 + unsigned int state;
372 + if (dfa) {
373 + state = aa_dfa_next_state(dfa, start, str);
374 + if (final)
375 + *final = state;
376 + return ACCEPT_TABLE(dfa)[state];
377 + }
378 + if (final)
379 + *final = 0;
380 + return 0;
381 +}
382 +
383 --- /dev/null
384 +++ b/security/apparmor/match.h
385 @@ -0,0 +1,87 @@
386 +/*
387 + * Copyright (C) 2007 Novell/SUSE
388 + *
389 + * This program is free software; you can redistribute it and/or
390 + * modify it under the terms of the GNU General Public License as
391 + * published by the Free Software Foundation, version 2 of the
392 + * License.
393 + *
394 + * AppArmor submodule (match) prototypes
395 + */
396 +
397 +#ifndef __MATCH_H
398 +#define __MATCH_H
399 +
400 +#define DFA_START 1
401 +
402 +/**
403 + * The format used for transition tables is based on the GNU flex table
404 + * file format (--tables-file option; see Table File Format in the flex
405 + * info pages and the flex sources for documentation). The magic number
406 + * used in the header is 0x1B5E783D insted of 0xF13C57B1 though, because
407 + * the YY_ID_CHK (check) and YY_ID_DEF (default) tables are used
408 + * slightly differently (see the apparmor-parser package).
409 + */
410 +
411 +#define YYTH_MAGIC 0x1B5E783D
412 +
413 +struct table_set_header {
414 + u32 th_magic; /* YYTH_MAGIC */
415 + u32 th_hsize;
416 + u32 th_ssize;
417 + u16 th_flags;
418 + char th_version[];
419 +};
420 +
421 +#define YYTD_ID_ACCEPT 1
422 +#define YYTD_ID_BASE 2
423 +#define YYTD_ID_CHK 3
424 +#define YYTD_ID_DEF 4
425 +#define YYTD_ID_EC 5
426 +#define YYTD_ID_META 6
427 +#define YYTD_ID_ACCEPT2 7
428 +#define YYTD_ID_NXT 8
429 +
430 +
431 +#define YYTD_DATA8 1
432 +#define YYTD_DATA16 2
433 +#define YYTD_DATA32 4
434 +
435 +struct table_header {
436 + u16 td_id;
437 + u16 td_flags;
438 + u32 td_hilen;
439 + u32 td_lolen;
440 + char td_data[];
441 +};
442 +
443 +#define DEFAULT_TABLE(DFA) ((u16 *)((DFA)->tables[YYTD_ID_DEF - 1]->td_data))
444 +#define BASE_TABLE(DFA) ((u32 *)((DFA)->tables[YYTD_ID_BASE - 1]->td_data))
445 +#define NEXT_TABLE(DFA) ((u16 *)((DFA)->tables[YYTD_ID_NXT - 1]->td_data))
446 +#define CHECK_TABLE(DFA) ((u16 *)((DFA)->tables[YYTD_ID_CHK - 1]->td_data))
447 +#define EQUIV_TABLE(DFA) ((u8 *)((DFA)->tables[YYTD_ID_EC - 1]->td_data))
448 +#define ACCEPT_TABLE(DFA) ((u32 *)((DFA)->tables[YYTD_ID_ACCEPT - 1]->td_data))
449 +#define ACCEPT_TABLE2(DFA) ((u32 *)((DFA)->tables[YYTD_ID_ACCEPT2 -1]->td_data))
450 +
451 +struct aa_dfa {
452 + struct table_header *tables[YYTD_ID_NXT];
453 +};
454 +
455 +#define byte_to_byte(X) (X)
456 +
457 +#define UNPACK_ARRAY(TABLE, BLOB, LEN, TYPE, NTOHX) \
458 + do { \
459 + typeof(LEN) __i; \
460 + TYPE *__t = (TYPE *) TABLE; \
461 + TYPE *__b = (TYPE *) BLOB; \
462 + for (__i = 0; __i < LEN; __i++) { \
463 + __t[__i] = NTOHX(__b[__i]); \
464 + } \
465 + } while (0)
466 +
467 +static inline size_t table_size(size_t len, size_t el_size)
468 +{
469 + return ALIGN(sizeof(struct table_header) + len * el_size, 8);
470 +}
471 +
472 +#endif /* __MATCH_H */
473 --- /dev/null
474 +++ b/security/apparmor/module_interface.c
475 @@ -0,0 +1,875 @@
476 +/*
477 + * Copyright (C) 1998-2007 Novell/SUSE
478 + *
479 + * This program is free software; you can redistribute it and/or
480 + * modify it under the terms of the GNU General Public License as
481 + * published by the Free Software Foundation, version 2 of the
482 + * License.
483 + *
484 + * AppArmor userspace policy interface
485 + */
486 +
487 +#include <asm/unaligned.h>
488 +
489 +#include "apparmor.h"
490 +#include "inline.h"
491 +
492 +/*
493 + * This mutex is used to synchronize profile adds, replacements, and
494 + * removals: we only allow one of these operations at a time.
495 + * We do not use the profile list lock here in order to avoid blocking
496 + * exec during those operations. (Exec involves a profile list lookup
497 + * for named-profile transitions.)
498 + */
499 +DEFINE_MUTEX(aa_interface_lock);
500 +
501 +/*
502 + * The AppArmor interface treats data as a type byte followed by the
503 + * actual data. The interface has the notion of a a named entry
504 + * which has a name (AA_NAME typecode followed by name string) followed by
505 + * the entries typecode and data. Named types allow for optional
506 + * elements and extensions to be added and tested for without breaking
507 + * backwards compatability.
508 + */
509 +
510 +enum aa_code {
511 + AA_U8,
512 + AA_U16,
513 + AA_U32,
514 + AA_U64,
515 + AA_NAME, /* same as string except it is items name */
516 + AA_STRING,
517 + AA_BLOB,
518 + AA_STRUCT,
519 + AA_STRUCTEND,
520 + AA_LIST,
521 + AA_LISTEND,
522 + AA_ARRAY,
523 + AA_ARRAYEND,
524 +};
525 +
526 +/*
527 + * aa_ext is the read of the buffer containing the serialized profile. The
528 + * data is copied into a kernel buffer in apparmorfs and then handed off to
529 + * the unpack routines.
530 + */
531 +struct aa_ext {
532 + void *start;
533 + void *end;
534 + void *pos; /* pointer to current position in the buffer */
535 + u32 version;
536 + char *ns_name;
537 +};
538 +
539 +static inline int aa_inbounds(struct aa_ext *e, size_t size)
540 +{
541 + return (size <= e->end - e->pos);
542 +}
543 +
544 +/**
545 + * aa_u16_chunck - test and do bounds checking for a u16 size based chunk
546 + * @e: serialized data read head
547 + * @chunk: start address for chunk of data
548 + *
549 + * return the size of chunk found with the read head at the end of
550 + * the chunk.
551 + */
552 +static size_t aa_is_u16_chunk(struct aa_ext *e, char **chunk)
553 +{
554 + void *pos = e->pos;
555 + size_t size = 0;
556 +
557 + if (!aa_inbounds(e, sizeof(u16)))
558 + goto fail;
559 + size = le16_to_cpu(get_unaligned((u16 *)e->pos));
560 + e->pos += sizeof(u16);
561 + if (!aa_inbounds(e, size))
562 + goto fail;
563 + *chunk = e->pos;
564 + e->pos += size;
565 + return size;
566 +
567 +fail:
568 + e->pos = pos;
569 + return 0;
570 +}
571 +
572 +static inline int aa_is_X(struct aa_ext *e, enum aa_code code)
573 +{
574 + if (!aa_inbounds(e, 1))
575 + return 0;
576 + if (*(u8 *) e->pos != code)
577 + return 0;
578 + e->pos++;
579 + return 1;
580 +}
581 +
582 +/**
583 + * aa_is_nameX - check is the next element is of type X with a name of @name
584 + * @e: serialized data extent information
585 + * @code: type code
586 + * @name: name to match to the serialized element.
587 + *
588 + * check that the next serialized data element is of type X and has a tag
589 + * name @name. If @name is specified then there must be a matching
590 + * name element in the stream. If @name is NULL any name element will be
591 + * skipped and only the typecode will be tested.
592 + * returns 1 on success (both type code and name tests match) and the read
593 + * head is advanced past the headers
594 + * returns %0 if either match failes, the read head does not move
595 + */
596 +static int aa_is_nameX(struct aa_ext *e, enum aa_code code, const char *name)
597 +{
598 + void *pos = e->pos;
599 + /*
600 + * Check for presence of a tagname, and if present name size
601 + * AA_NAME tag value is a u16.
602 + */
603 + if (aa_is_X(e, AA_NAME)) {
604 + char *tag;
605 + size_t size = aa_is_u16_chunk(e, &tag);
606 + /* if a name is specified it must match. otherwise skip tag */
607 + if (name && (!size || strcmp(name, tag)))
608 + goto fail;
609 + } else if (name) {
610 + /* if a name is specified and there is no name tag fail */
611 + goto fail;
612 + }
613 +
614 + /* now check if type code matches */
615 + if (aa_is_X(e, code))
616 + return 1;
617 +
618 +fail:
619 + e->pos = pos;
620 + return 0;
621 +}
622 +
623 +static int aa_is_u16(struct aa_ext *e, u16 *data, const char *name)
624 +{
625 + void *pos = e->pos;
626 + if (aa_is_nameX(e, AA_U16, name)) {
627 + if (!aa_inbounds(e, sizeof(u16)))
628 + goto fail;
629 + if (data)
630 + *data = le16_to_cpu(get_unaligned((u16 *)e->pos));
631 + e->pos += sizeof(u16);
632 + return 1;
633 + }
634 +fail:
635 + e->pos = pos;
636 + return 0;
637 +}
638 +
639 +static int aa_is_u32(struct aa_ext *e, u32 *data, const char *name)
640 +{
641 + void *pos = e->pos;
642 + if (aa_is_nameX(e, AA_U32, name)) {
643 + if (!aa_inbounds(e, sizeof(u32)))
644 + goto fail;
645 + if (data)
646 + *data = le32_to_cpu(get_unaligned((u32 *)e->pos));
647 + e->pos += sizeof(u32);
648 + return 1;
649 + }
650 +fail:
651 + e->pos = pos;
652 + return 0;
653 +}
654 +
655 +static size_t aa_is_array(struct aa_ext *e, const char *name)
656 +{
657 + void *pos = e->pos;
658 + if (aa_is_nameX(e, AA_ARRAY, name)) {
659 + int size;
660 + if (!aa_inbounds(e, sizeof(u16)))
661 + goto fail;
662 + size = (int) le16_to_cpu(get_unaligned((u16 *)e->pos));
663 + e->pos += sizeof(u16);
664 + return size;
665 + }
666 +fail:
667 + e->pos = pos;
668 + return 0;
669 +}
670 +
671 +static size_t aa_is_blob(struct aa_ext *e, char **blob, const char *name)
672 +{
673 + void *pos = e->pos;
674 + if (aa_is_nameX(e, AA_BLOB, name)) {
675 + u32 size;
676 + if (!aa_inbounds(e, sizeof(u32)))
677 + goto fail;
678 + size = le32_to_cpu(get_unaligned((u32 *)e->pos));
679 + e->pos += sizeof(u32);
680 + if (aa_inbounds(e, (size_t) size)) {
681 + * blob = e->pos;
682 + e->pos += size;
683 + return size;
684 + }
685 + }
686 +fail:
687 + e->pos = pos;
688 + return 0;
689 +}
690 +
691 +static int aa_is_dynstring(struct aa_ext *e, char **string, const char *name)
692 +{
693 + char *src_str;
694 + size_t size = 0;
695 + void *pos = e->pos;
696 + *string = NULL;
697 + if (aa_is_nameX(e, AA_STRING, name) &&
698 + (size = aa_is_u16_chunk(e, &src_str))) {
699 + char *str;
700 + if (!(str = kmalloc(size, GFP_KERNEL)))
701 + goto fail;
702 + memcpy(str, src_str, size);
703 + *string = str;
704 + }
705 +
706 + return size;
707 +
708 +fail:
709 + e->pos = pos;
710 + return 0;
711 +}
712 +
713 +/**
714 + * aa_unpack_dfa - unpack a file rule dfa
715 + * @e: serialized data extent information
716 + *
717 + * returns dfa or ERR_PTR
718 + */
719 +static struct aa_dfa *aa_unpack_dfa(struct aa_ext *e)
720 +{
721 + char *blob = NULL;
722 + size_t size, error = 0;
723 + struct aa_dfa *dfa = NULL;
724 +
725 + size = aa_is_blob(e, &blob, "aadfa");
726 + if (size) {
727 + dfa = aa_match_alloc();
728 + if (dfa) {
729 + /*
730 + * The dfa is aligned with in the blob to 8 bytes
731 + * from the beginning of the stream.
732 + */
733 + size_t sz = blob - (char *) e->start;
734 + size_t pad = ALIGN(sz, 8) - sz;
735 + error = unpack_dfa(dfa, blob + pad, size - pad);
736 + if (!error)
737 + error = verify_dfa(dfa);
738 + } else {
739 + error = -ENOMEM;
740 + }
741 +
742 + if (error) {
743 + aa_match_free(dfa);
744 + dfa = ERR_PTR(error);
745 + }
746 + }
747 +
748 + return dfa;
749 +}
750 +
751 +static int aa_unpack_exec_table(struct aa_ext *e, struct aa_profile *profile)
752 +{
753 + void *pos = e->pos;
754 +
755 + /* exec table is optional */
756 + if (aa_is_nameX(e, AA_STRUCT, "xtable")) {
757 + int i, size;
758 +
759 + size = aa_is_array(e, NULL);
760 + /* currently 4 exec bits and entries 0-3 are reserved iupcx */
761 + if (size > 16 - 4)
762 + goto fail;
763 + profile->exec_table = kzalloc(sizeof(char *) * size,
764 + GFP_KERNEL);
765 + if (!profile->exec_table)
766 + goto fail;
767 +
768 + for (i = 0; i < size; i++) {
769 + char *tmp;
770 + if (!aa_is_dynstring(e, &tmp, NULL))
771 + goto fail;
772 + /* note: strings beginning with a : have an embedded
773 + \0 seperating the profile ns name from the profile
774 + name */
775 + profile->exec_table[i] = tmp;
776 + }
777 + if (!aa_is_nameX(e, AA_ARRAYEND, NULL))
778 + goto fail;
779 + if (!aa_is_nameX(e, AA_STRUCTEND, NULL))
780 + goto fail;
781 + profile->exec_table_size = size;
782 + }
783 + return 1;
784 +
785 +fail:
786 + e->pos = pos;
787 + return 0;
788 +}
789 +
790 +/**
791 + * aa_unpack_profile - unpack a serialized profile
792 + * @e: serialized data extent information
793 + * @sa: audit struct for the operation
794 + */
795 +static struct aa_profile *aa_unpack_profile(struct aa_ext *e,
796 + struct aa_audit *sa)
797 +{
798 + struct aa_profile *profile = NULL;
799 +
800 + int error = -EPROTO;
801 +
802 + profile = alloc_aa_profile();
803 + if (!profile)
804 + return ERR_PTR(-ENOMEM);
805 +
806 + /* check that we have the right struct being passed */
807 + if (!aa_is_nameX(e, AA_STRUCT, "profile"))
808 + goto fail;
809 + if (!aa_is_dynstring(e, &profile->name, NULL))
810 + goto fail;
811 +
812 + /* per profile debug flags (complain, audit) */
813 + if (!aa_is_nameX(e, AA_STRUCT, "flags"))
814 + goto fail;
815 + if (!aa_is_u32(e, &(profile->flags.hat), NULL))
816 + goto fail;
817 + if (!aa_is_u32(e, &(profile->flags.complain), NULL))
818 + goto fail;
819 + if (!aa_is_u32(e, &(profile->flags.audit), NULL))
820 + goto fail;
821 + if (!aa_is_nameX(e, AA_STRUCTEND, NULL))
822 + goto fail;
823 +
824 + if (!aa_is_u32(e, &(profile->capabilities), NULL))
825 + goto fail;
826 + if (!aa_is_u32(e, &(profile->audit_caps), NULL))
827 + goto fail;
828 + if (!aa_is_u32(e, &(profile->quiet_caps), NULL))
829 + goto fail;
830 + if (!aa_is_u32(e, &(profile->set_caps), NULL))
831 + goto fail;
832 +
833 + /* get file rules */
834 + profile->file_rules = aa_unpack_dfa(e);
835 + if (IS_ERR(profile->file_rules)) {
836 + error = PTR_ERR(profile->file_rules);
837 + profile->file_rules = NULL;
838 + goto fail;
839 + }
840 +
841 + if (!aa_unpack_exec_table(e, profile))
842 + goto fail;
843 +
844 + if (!aa_is_nameX(e, AA_STRUCTEND, NULL))
845 + goto fail;
846 +
847 + return profile;
848 +
849 +fail:
850 + sa->name = profile && profile->name ? profile->name : "unknown";
851 + if (!sa->info)
852 + sa->info = "failed to unpack profile";
853 + aa_audit_status(NULL, sa);
854 +
855 + if (profile)
856 + free_aa_profile(profile);
857 +
858 + return ERR_PTR(error);
859 +}
860 +
861 +/**
862 + * aa_verify_head - unpack serialized stream header
863 + * @e: serialized data read head
864 + * @operation: operation header is being verified for
865 + *
866 + * returns error or 0 if header is good
867 + */
868 +static int aa_verify_header(struct aa_ext *e, struct aa_audit *sa)
869 +{
870 + /* get the interface version */
871 + if (!aa_is_u32(e, &e->version, "version")) {
872 + sa->info = "invalid profile format";
873 + aa_audit_status(NULL, sa);
874 + return -EPROTONOSUPPORT;
875 + }
876 +
877 + /* check that the interface version is currently supported */
878 + if (e->version != 5) {
879 + sa->info = "unsupported interface version";
880 + aa_audit_status(NULL, sa);
881 + return -EPROTONOSUPPORT;
882 + }
883 +
884 + /* read the namespace if present */
885 + if (!aa_is_dynstring(e, &e->ns_name, "namespace")) {
886 + e->ns_name = NULL;
887 + }
888 +
889 + return 0;
890 +}
891 +
892 +/**
893 + * aa_add_profile - Unpack and add a new profile to the profile list
894 + * @data: serialized data stream
895 + * @size: size of the serialized data stream
896 + */
897 +ssize_t aa_add_profile(void *data, size_t size)
898 +{
899 + struct aa_profile *profile = NULL;
900 + struct aa_namespace *ns = NULL;
901 + struct aa_ext e = {
902 + .start = data,
903 + .end = data + size,
904 + .pos = data,
905 + .ns_name = NULL
906 + };
907 + ssize_t error;
908 + struct aa_audit sa;
909 + memset(&sa, 0, sizeof(sa));
910 + sa.operation = "profile_load";
911 + sa.gfp_mask = GFP_KERNEL;
912 +
913 + error = aa_verify_header(&e, &sa);
914 + if (error)
915 + return error;
916 +
917 + profile = aa_unpack_profile(&e, &sa);
918 + if (IS_ERR(profile))
919 + return PTR_ERR(profile);
920 +
921 + mutex_lock(&aa_interface_lock);
922 + write_lock(&profile_ns_list_lock);
923 + if (e.ns_name)
924 + ns = __aa_find_namespace(e.ns_name, &profile_ns_list);
925 + else
926 + ns = default_namespace;
927 + if (!ns) {
928 + struct aa_namespace *new_ns;
929 + write_unlock(&profile_ns_list_lock);
930 + new_ns = alloc_aa_namespace(e.ns_name);
931 + if (!new_ns) {
932 + mutex_unlock(&aa_interface_lock);
933 + return -ENOMEM;
934 + }
935 + write_lock(&profile_ns_list_lock);
936 + ns = __aa_find_namespace(e.ns_name, &profile_ns_list);
937 + if (!ns) {
938 + list_add(&new_ns->list, &profile_ns_list);
939 + ns = new_ns;
940 + } else
941 + free_aa_namespace(new_ns);
942 + }
943 +
944 + write_lock(&ns->lock);
945 + if (__aa_find_profile(profile->name, &ns->profiles)) {
946 + /* A profile with this name exists already. */
947 + write_unlock(&ns->lock);
948 + write_unlock(&profile_ns_list_lock);
949 + sa.name = profile->name;
950 + sa.name2 = ns->name;
951 + sa.info = "failed: profile already loaded";
952 + aa_audit_status(NULL, &sa);
953 + mutex_unlock(&aa_interface_lock);
954 + aa_put_profile(profile);
955 + return -EEXIST;
956 + }
957 + profile->ns = aa_get_namespace(ns);
958 + ns->profile_count++;
959 + list_add(&profile->list, &ns->profiles);
960 + write_unlock(&ns->lock);
961 + write_unlock(&profile_ns_list_lock);
962 +
963 + sa.name = profile->name;
964 + sa.name2 = ns->name;
965 + aa_audit_status(NULL, &sa);
966 + mutex_unlock(&aa_interface_lock);
967 + return size;
968 +}
969 +
970 +/**
971 + * task_replace - replace a task's profile
972 + * @task: task to replace profile on
973 + * @new_cxt: new aa_task_context to do replacement with
974 + * @new_profile: new profile
975 + */
976 +static inline void task_replace(struct task_struct *task,
977 + struct aa_task_context *new_cxt,
978 + struct aa_profile *new_profile)
979 +{
980 + struct aa_task_context *cxt = aa_task_context(task);
981 +
982 + AA_DEBUG("%s: replacing profile for task %d "
983 + "profile=%s (%p)\n",
984 + __FUNCTION__,
985 + cxt->task->pid,
986 + cxt->profile->name, cxt->profile);
987 +
988 + aa_change_task_context(task, new_cxt, new_profile, cxt->cookie,
989 + cxt->previous_profile);
990 +}
991 +
992 +/**
993 + * aa_replace_profile - replace a profile on the profile list
994 + * @udata: serialized data stream
995 + * @size: size of the serialized data stream
996 + *
997 + * unpack and replace a profile on the profile list and uses of that profile
998 + * by any aa_task_context. If the profile does not exist on the profile list
999 + * it is added. Return %0 or error.
1000 + */
1001 +ssize_t aa_replace_profile(void *udata, size_t size)
1002 +{
1003 + struct aa_profile *old_profile, *new_profile;
1004 + struct aa_namespace *ns;
1005 + struct aa_task_context *new_cxt;
1006 + struct aa_ext e = {
1007 + .start = udata,
1008 + .end = udata + size,
1009 + .pos = udata,
1010 + .ns_name = NULL
1011 + };
1012 + ssize_t error;
1013 + struct aa_audit sa;
1014 + memset(&sa, 0, sizeof(sa));
1015 + sa.operation = "profile_replace";
1016 + sa.gfp_mask = GFP_KERNEL;
1017 +
1018 + error = aa_verify_header(&e, &sa);
1019 + if (error)
1020 + return error;
1021 +
1022 + new_profile = aa_unpack_profile(&e, &sa);
1023 + if (IS_ERR(new_profile))
1024 + return PTR_ERR(new_profile);
1025 +
1026 + mutex_lock(&aa_interface_lock);
1027 + write_lock(&profile_ns_list_lock);
1028 + if (e.ns_name)
1029 + ns = __aa_find_namespace(e.ns_name, &profile_ns_list);
1030 + else
1031 + ns = default_namespace;
1032 + if (!ns) {
1033 + struct aa_namespace *new_ns;
1034 + write_unlock(&profile_ns_list_lock);
1035 + new_ns = alloc_aa_namespace(e.ns_name);
1036 + if (!new_ns) {
1037 + mutex_unlock(&aa_interface_lock);
1038 + return -ENOMEM;
1039 + }
1040 + write_lock(&profile_ns_list_lock);
1041 + ns = __aa_find_namespace(e.ns_name, &profile_ns_list);
1042 + if (!ns) {
1043 + list_add(&new_ns->list, &profile_ns_list);
1044 + ns = new_ns;
1045 + } else
1046 + free_aa_namespace(new_ns);
1047 + }
1048 +
1049 + write_lock(&ns->lock);
1050 + old_profile = __aa_find_profile(new_profile->name, &ns->profiles);
1051 + if (old_profile) {
1052 + lock_profile(old_profile);
1053 + old_profile->isstale = 1;
1054 + list_del_init(&old_profile->list);
1055 + unlock_profile(old_profile);
1056 + ns->profile_count--;
1057 + }
1058 + new_profile->ns = aa_get_namespace(ns);
1059 + ns->profile_count++;
1060 + /* not don't need an extra ref count to keep new_profile as
1061 + * it is protect by the interface mutex */
1062 + list_add(&new_profile->list, &ns->profiles);
1063 + write_unlock(&ns->lock);
1064 + write_unlock(&profile_ns_list_lock);
1065 +
1066 + if (!old_profile) {
1067 + sa.operation = "profile_load";
1068 + goto out;
1069 + }
1070 + /*
1071 + * Replacement needs to allocate a new aa_task_context for each
1072 + * task confined by old_profile. To do this the profile locks
1073 + * are only held when the actual switch is done per task. While
1074 + * looping to allocate a new aa_task_context the old_task list
1075 + * may get shorter if tasks exit/change their profile but will
1076 + * not get longer as new task will not use old_profile detecting
1077 + * that is stale.
1078 + */
1079 + do {
1080 + new_cxt = aa_alloc_task_context(GFP_KERNEL | __GFP_NOFAIL);
1081 +
1082 + lock_both_profiles(old_profile, new_profile);
1083 + if (!list_empty(&old_profile->task_contexts)) {
1084 + struct task_struct *task =
1085 + list_entry(old_profile->task_contexts.next,
1086 + struct aa_task_context, list)->task;
1087 + task_lock(task);
1088 + task_replace(task, new_cxt, new_profile);
1089 + task_unlock(task);
1090 + new_cxt = NULL;
1091 + }
1092 + unlock_both_profiles(old_profile, new_profile);
1093 + } while (!new_cxt);
1094 + aa_free_task_context(new_cxt);
1095 + aa_put_profile(old_profile);
1096 +
1097 +out:
1098 + sa.name = new_profile->name;
1099 + sa.name2 = ns->name;
1100 + aa_audit_status(NULL, &sa);
1101 + mutex_unlock(&aa_interface_lock);
1102 + return size;
1103 +}
1104 +
1105 +/**
1106 + * aa_remove_profile - remove a profile from the system
1107 + * @name: name of the profile to remove
1108 + * @size: size of the name
1109 + *
1110 + * remove a profile from the profile list and all aa_task_context references
1111 + * to said profile.
1112 + */
1113 +ssize_t aa_remove_profile(char *name, size_t size)
1114 +{
1115 + struct aa_namespace *ns;
1116 + struct aa_profile *profile;
1117 + struct aa_audit sa;
1118 + memset(&sa, 0, sizeof(sa));
1119 + sa.operation = "profile_remove";
1120 + sa.gfp_mask = GFP_KERNEL;
1121 +
1122 + mutex_lock(&aa_interface_lock);
1123 + write_lock(&profile_ns_list_lock);
1124 +
1125 + if (name[0] == ':') {
1126 + char *split = strchr(name + 1, ':');
1127 + if (!split)
1128 + goto noent;
1129 + *split = 0;
1130 + ns = __aa_find_namespace(name + 1, &profile_ns_list);
1131 + name = split + 1;
1132 + } else {
1133 + ns = default_namespace;
1134 + }
1135 +
1136 + if (!ns)
1137 + goto noent;
1138 + sa.name2 = ns->name;
1139 + write_lock(&ns->lock);
1140 + profile = __aa_find_profile(name, &ns->profiles);
1141 + if (!profile) {
1142 + write_unlock(&ns->lock);
1143 + goto noent;
1144 + }
1145 + sa.name = profile->name;
1146 +
1147 + /* Remove the profile from each task context it is on. */
1148 + lock_profile(profile);
1149 + profile->isstale = 1;
1150 + aa_unconfine_tasks(profile);
1151 + list_del_init(&profile->list);
1152 + ns->profile_count--;
1153 + unlock_profile(profile);
1154 + /* Release the profile itself. */
1155 + write_unlock(&ns->lock);
1156 + /* check to see if the namespace has become stale */
1157 + if (ns != default_namespace && ns->profile_count == 0) {
1158 + list_del_init(&ns->list);
1159 + aa_put_namespace(ns);
1160 + }
1161 + write_unlock(&profile_ns_list_lock);
1162 +
1163 + aa_audit_status(NULL, &sa);
1164 + mutex_unlock(&aa_interface_lock);
1165 + aa_put_profile(profile);
1166 +
1167 + return size;
1168 +
1169 +noent:
1170 + write_unlock(&profile_ns_list_lock);
1171 + sa.info = "failed: profile does not exist";
1172 + aa_audit_status(NULL, &sa);
1173 + mutex_unlock(&aa_interface_lock);
1174 + return -ENOENT;
1175 +}
1176 +
1177 +/**
1178 + * free_aa_namespace_kref - free aa_namespace by kref (see aa_put_namespace)
1179 + * @kr: kref callback for freeing of a namespace
1180 + */
1181 +void free_aa_namespace_kref(struct kref *kref)
1182 +{
1183 + struct aa_namespace *ns=container_of(kref, struct aa_namespace, count);
1184 +
1185 + free_aa_namespace(ns);
1186 +}
1187 +
1188 +/**
1189 + * alloc_aa_namespace - allocate, initialize and return a new namespace
1190 + * @name: a preallocated name
1191 + * Returns NULL on failure.
1192 + */
1193 +struct aa_namespace *alloc_aa_namespace(char *name)
1194 +{
1195 + struct aa_namespace *ns;
1196 +
1197 + ns = kzalloc(sizeof(*ns), GFP_KERNEL);
1198 + AA_DEBUG("%s(%p)\n", __FUNCTION__, ns);
1199 + if (ns) {
1200 + ns->name = name;
1201 + INIT_LIST_HEAD(&ns->list);
1202 + INIT_LIST_HEAD(&ns->profiles);
1203 + kref_init(&ns->count);
1204 + rwlock_init(&ns->lock);
1205 +
1206 + ns->null_complain_profile = alloc_aa_profile();
1207 + if (!ns->null_complain_profile) {
1208 + if (!name)
1209 + kfree(ns->name);
1210 + kfree(ns);
1211 + return NULL;
1212 + }
1213 + ns->null_complain_profile->name =
1214 + kstrdup("null-complain-profile", GFP_KERNEL);
1215 + if (!ns->null_complain_profile->name) {
1216 + free_aa_profile(ns->null_complain_profile);
1217 + if (!name)
1218 + kfree(ns->name);
1219 + kfree(ns);
1220 + return NULL;
1221 + }
1222 + ns->null_complain_profile->flags.complain = 1;
1223 + /* null_complain_profile doesn't contribute to ns ref count */
1224 + ns->null_complain_profile->ns = ns;
1225 + }
1226 + return ns;
1227 +}
1228 +
1229 +/**
1230 + * free_aa_namespace - free a profile namespace
1231 + * @namespace: the namespace to free
1232 + *
1233 + * Free a namespace. All references to the namespace must have been put.
1234 + * If the namespace was referenced by a profile confining a task,
1235 + * free_aa_namespace will be called indirectly (through free_aa_profile)
1236 + * from an rcu callback routine, so we must not sleep here.
1237 + */
1238 +void free_aa_namespace(struct aa_namespace *ns)
1239 +{
1240 + AA_DEBUG("%s(%p)\n", __FUNCTION__, ns);
1241 +
1242 + if (!ns)
1243 + return;
1244 +
1245 + /* namespace still contains profiles -- invalid */
1246 + if (!list_empty(&ns->profiles)) {
1247 + AA_ERROR("%s: internal error, "
1248 + "namespace '%s' still contains profiles\n",
1249 + __FUNCTION__,
1250 + ns->name);
1251 + BUG();
1252 + }
1253 + if (!list_empty(&ns->list)) {
1254 + AA_ERROR("%s: internal error, "
1255 + "namespace '%s' still on list\n",
1256 + __FUNCTION__,
1257 + ns->name);
1258 + BUG();
1259 + }
1260 + /* null_complain_profile doesn't contribute to ns ref counting */
1261 + ns->null_complain_profile->ns = NULL;
1262 + aa_put_profile(ns->null_complain_profile);
1263 + kfree(ns->name);
1264 + kfree(ns);
1265 +}
1266 +
1267 +/**
1268 + * free_aa_profile_kref - free aa_profile by kref (called by aa_put_profile)
1269 + * @kr: kref callback for freeing of a profile
1270 + */
1271 +void free_aa_profile_kref(struct kref *kref)
1272 +{
1273 + struct aa_profile *p=container_of(kref, struct aa_profile, count);
1274 +
1275 + free_aa_profile(p);
1276 +}
1277 +
1278 +/**
1279 + * alloc_aa_profile - allocate, initialize and return a new profile
1280 + * Returns NULL on failure.
1281 + */
1282 +struct aa_profile *alloc_aa_profile(void)
1283 +{
1284 + struct aa_profile *profile;
1285 +
1286 + profile = kzalloc(sizeof(*profile), GFP_KERNEL);
1287 + AA_DEBUG("%s(%p)\n", __FUNCTION__, profile);
1288 + if (profile) {
1289 + INIT_LIST_HEAD(&profile->list);
1290 + kref_init(&profile->count);
1291 + INIT_LIST_HEAD(&profile->task_contexts);
1292 + spin_lock_init(&profile->lock);
1293 + }
1294 + return profile;
1295 +}
1296 +
1297 +/**
1298 + * free_aa_profile - free a profile
1299 + * @profile: the profile to free
1300 + *
1301 + * Free a profile, its hats and null_profile. All references to the profile,
1302 + * its hats and null_profile must have been put.
1303 + *
1304 + * If the profile was referenced from a task context, free_aa_profile() will
1305 + * be called from an rcu callback routine, so we must not sleep here.
1306 + */
1307 +void free_aa_profile(struct aa_profile *profile)
1308 +{
1309 + AA_DEBUG("%s(%p)\n", __FUNCTION__, profile);
1310 +
1311 + if (!profile)
1312 + return;
1313 +
1314 + /* profile is still on profile namespace list -- invalid */
1315 + if (!list_empty(&profile->list)) {
1316 + AA_ERROR("%s: internal error, "
1317 + "profile '%s' still on global list\n",
1318 + __FUNCTION__,
1319 + profile->name);
1320 + BUG();
1321 + }
1322 + aa_put_namespace(profile->ns);
1323 +
1324 + aa_match_free(profile->file_rules);
1325 +
1326 + if (profile->name) {
1327 + AA_DEBUG("%s: %s\n", __FUNCTION__, profile->name);
1328 + kfree(profile->name);
1329 + }
1330 +
1331 + kfree(profile);
1332 +}
1333 +
1334 +/**
1335 + * aa_unconfine_tasks - remove tasks on a profile's task context list
1336 + * @profile: profile to remove tasks from
1337 + *
1338 + * Assumes that @profile lock is held.
1339 + */
1340 +void aa_unconfine_tasks(struct aa_profile *profile)
1341 +{
1342 + while (!list_empty(&profile->task_contexts)) {
1343 + struct task_struct *task =
1344 + list_entry(profile->task_contexts.next,
1345 + struct aa_task_context, list)->task;
1346 + task_lock(task);
1347 + aa_change_task_context(task, NULL, NULL, 0, NULL);
1348 + task_unlock(task);
1349 + }
1350 +}