1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* AppArmor security module
*
* This file contains AppArmor security identifier (secid) manipulation fns
*
* Copyright 2009-2017 Canonical Ltd.
*
* AppArmor allocates a unique secid for every label used. If a label
* is replaced it receives the secid of the label it is replacing.
*/
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/gfp.h>
#include <linux/idr.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include "include/cred.h"
#include "include/lib.h"
#include "include/secid.h"
#include "include/label.h"
#include "include/policy_ns.h"
/*
* secids - do not pin labels with a refcount. They rely on the label
* properly updating/freeing them
*/
#define AA_FIRST_SECID 2
static DEFINE_IDR(aa_secids);
static DEFINE_SPINLOCK(secid_lock);
/*
* TODO: allow policy to reserve a secid range?
* TODO: add secid pinning
* TODO: use secid_update in label replace
*/
/**
* aa_secid_update - update a secid mapping to a new label
* @secid: secid to update
* @label: label the secid will now map to
*/
void aa_secid_update(u32 secid, struct aa_label *label)
{
unsigned long flags;
spin_lock_irqsave(&secid_lock, flags);
idr_replace(&aa_secids, label, secid);
spin_unlock_irqrestore(&secid_lock, flags);
}
/**
*
* see label for inverse aa_label_to_secid
*/
struct aa_label *aa_secid_to_label(u32 secid)
{
struct aa_label *label;
rcu_read_lock();
label = idr_find(&aa_secids, secid);
rcu_read_unlock();
return label;
}
int apparmor_secid_to_secctx(u32 secid, char **secdata, u32 *seclen)
{
/* TODO: cache secctx and ref count so we don't have to recreate */
struct aa_label *label = aa_secid_to_label(secid);
int len;
AA_BUG(!seclen);
if (!label)
return -EINVAL;
if (secdata)
len = aa_label_asxprint(secdata, root_ns, label,
FLAG_SHOW_MODE | FLAG_VIEW_SUBNS |
FLAG_HIDDEN_UNCONFINED | FLAG_ABS_ROOT,
GFP_ATOMIC);
else
len = aa_label_snxprint(NULL, 0, root_ns, label,
FLAG_SHOW_MODE | FLAG_VIEW_SUBNS |
FLAG_HIDDEN_UNCONFINED | FLAG_ABS_ROOT);
if (len < 0)
return -ENOMEM;
*seclen = len;
return 0;
}
int apparmor_secctx_to_secid(const char *secdata, u32 seclen, u32 *secid)
{
struct aa_label *label;
label = aa_label_strn_parse(&root_ns->unconfined->label, secdata,
seclen, GFP_KERNEL, false, false);
if (IS_ERR(label))
return PTR_ERR(label);
*secid = label->secid;
return 0;
}
void apparmor_release_secctx(char *secdata, u32 seclen)
{
kfree(secdata);
}
/**
* aa_alloc_secid - allocate a new secid for a profile
* @label: the label to allocate a secid for
* @gfp: memory allocation flags
*
* Returns: 0 with @label->secid initialized
* <0 returns error with @label->secid set to AA_SECID_INVALID
*/
int aa_alloc_secid(struct aa_label *label, gfp_t gfp)
{
unsigned long flags;
int ret;
idr_preload(gfp);
spin_lock_irqsave(&secid_lock, flags);
ret = idr_alloc(&aa_secids, label, AA_FIRST_SECID, 0, GFP_ATOMIC);
spin_unlock_irqrestore(&secid_lock, flags);
idr_preload_end();
if (ret < 0) {
label->secid = AA_SECID_INVALID;
return ret;
}
AA_BUG(ret == AA_SECID_INVALID);
label->secid = ret;
return 0;
}
/**
* aa_free_secid - free a secid
* @secid: secid to free
*/
void aa_free_secid(u32 secid)
{
unsigned long flags;
spin_lock_irqsave(&secid_lock, flags);
idr_remove(&aa_secids, secid);
spin_unlock_irqrestore(&secid_lock, flags);
}
void aa_secids_init(void)
{
idr_init_base(&aa_secids, AA_FIRST_SECID);
}
|