0001
0002
0003
0004
0005
0006
0007
0008
0009
0010 #include <linux/errno.h>
0011 #include <linux/kernel.h>
0012 #include <linux/list.h>
0013 #include <linux/rcupdate.h>
0014 #include <linux/slab.h>
0015 #include <linux/sched.h>
0016 #include <linux/spinlock.h>
0017 #include <asm/barrier.h>
0018 #include "flask.h"
0019 #include "security.h"
0020 #include "sidtab.h"
0021
0022 struct sidtab_str_cache {
0023 struct rcu_head rcu_member;
0024 struct list_head lru_member;
0025 struct sidtab_entry *parent;
0026 u32 len;
0027 char str[];
0028 };
0029
0030 #define index_to_sid(index) ((index) + SECINITSID_NUM + 1)
0031 #define sid_to_index(sid) ((sid) - (SECINITSID_NUM + 1))
0032
0033 int sidtab_init(struct sidtab *s)
0034 {
0035 u32 i;
0036
0037 memset(s->roots, 0, sizeof(s->roots));
0038
0039 for (i = 0; i < SECINITSID_NUM; i++)
0040 s->isids[i].set = 0;
0041
0042 s->frozen = false;
0043 s->count = 0;
0044 s->convert = NULL;
0045 hash_init(s->context_to_sid);
0046
0047 spin_lock_init(&s->lock);
0048
0049 #if CONFIG_SECURITY_SELINUX_SID2STR_CACHE_SIZE > 0
0050 s->cache_free_slots = CONFIG_SECURITY_SELINUX_SID2STR_CACHE_SIZE;
0051 INIT_LIST_HEAD(&s->cache_lru_list);
0052 spin_lock_init(&s->cache_lock);
0053 #endif
0054
0055 return 0;
0056 }
0057
0058 static u32 context_to_sid(struct sidtab *s, struct context *context, u32 hash)
0059 {
0060 struct sidtab_entry *entry;
0061 u32 sid = 0;
0062
0063 rcu_read_lock();
0064 hash_for_each_possible_rcu(s->context_to_sid, entry, list, hash) {
0065 if (entry->hash != hash)
0066 continue;
0067 if (context_cmp(&entry->context, context)) {
0068 sid = entry->sid;
0069 break;
0070 }
0071 }
0072 rcu_read_unlock();
0073 return sid;
0074 }
0075
0076 int sidtab_set_initial(struct sidtab *s, u32 sid, struct context *context)
0077 {
0078 struct sidtab_isid_entry *isid;
0079 u32 hash;
0080 int rc;
0081
0082 if (sid == 0 || sid > SECINITSID_NUM)
0083 return -EINVAL;
0084
0085 isid = &s->isids[sid - 1];
0086
0087 rc = context_cpy(&isid->entry.context, context);
0088 if (rc)
0089 return rc;
0090
0091 #if CONFIG_SECURITY_SELINUX_SID2STR_CACHE_SIZE > 0
0092 isid->entry.cache = NULL;
0093 #endif
0094 isid->set = 1;
0095
0096 hash = context_compute_hash(context);
0097
0098
0099
0100
0101
0102
0103
0104 if (!context_to_sid(s, context, hash)) {
0105 isid->entry.sid = sid;
0106 isid->entry.hash = hash;
0107 hash_add(s->context_to_sid, &isid->entry.list, hash);
0108 }
0109
0110 return 0;
0111 }
0112
0113 int sidtab_hash_stats(struct sidtab *sidtab, char *page)
0114 {
0115 int i;
0116 int chain_len = 0;
0117 int slots_used = 0;
0118 int entries = 0;
0119 int max_chain_len = 0;
0120 int cur_bucket = 0;
0121 struct sidtab_entry *entry;
0122
0123 rcu_read_lock();
0124 hash_for_each_rcu(sidtab->context_to_sid, i, entry, list) {
0125 entries++;
0126 if (i == cur_bucket) {
0127 chain_len++;
0128 if (chain_len == 1)
0129 slots_used++;
0130 } else {
0131 cur_bucket = i;
0132 if (chain_len > max_chain_len)
0133 max_chain_len = chain_len;
0134 chain_len = 0;
0135 }
0136 }
0137 rcu_read_unlock();
0138
0139 if (chain_len > max_chain_len)
0140 max_chain_len = chain_len;
0141
0142 return scnprintf(page, PAGE_SIZE, "entries: %d\nbuckets used: %d/%d\n"
0143 "longest chain: %d\n", entries,
0144 slots_used, SIDTAB_HASH_BUCKETS, max_chain_len);
0145 }
0146
0147 static u32 sidtab_level_from_count(u32 count)
0148 {
0149 u32 capacity = SIDTAB_LEAF_ENTRIES;
0150 u32 level = 0;
0151
0152 while (count > capacity) {
0153 capacity <<= SIDTAB_INNER_SHIFT;
0154 ++level;
0155 }
0156 return level;
0157 }
0158
0159 static int sidtab_alloc_roots(struct sidtab *s, u32 level)
0160 {
0161 u32 l;
0162
0163 if (!s->roots[0].ptr_leaf) {
0164 s->roots[0].ptr_leaf = kzalloc(SIDTAB_NODE_ALLOC_SIZE,
0165 GFP_ATOMIC);
0166 if (!s->roots[0].ptr_leaf)
0167 return -ENOMEM;
0168 }
0169 for (l = 1; l <= level; ++l)
0170 if (!s->roots[l].ptr_inner) {
0171 s->roots[l].ptr_inner = kzalloc(SIDTAB_NODE_ALLOC_SIZE,
0172 GFP_ATOMIC);
0173 if (!s->roots[l].ptr_inner)
0174 return -ENOMEM;
0175 s->roots[l].ptr_inner->entries[0] = s->roots[l - 1];
0176 }
0177 return 0;
0178 }
0179
0180 static struct sidtab_entry *sidtab_do_lookup(struct sidtab *s, u32 index,
0181 int alloc)
0182 {
0183 union sidtab_entry_inner *entry;
0184 u32 level, capacity_shift, leaf_index = index / SIDTAB_LEAF_ENTRIES;
0185
0186
0187 level = sidtab_level_from_count(index + 1);
0188 capacity_shift = level * SIDTAB_INNER_SHIFT;
0189
0190
0191 if (alloc && sidtab_alloc_roots(s, level) != 0)
0192 return NULL;
0193
0194
0195 entry = &s->roots[level];
0196 while (level != 0) {
0197 capacity_shift -= SIDTAB_INNER_SHIFT;
0198 --level;
0199
0200 entry = &entry->ptr_inner->entries[leaf_index >> capacity_shift];
0201 leaf_index &= ((u32)1 << capacity_shift) - 1;
0202
0203 if (!entry->ptr_inner) {
0204 if (alloc)
0205 entry->ptr_inner = kzalloc(SIDTAB_NODE_ALLOC_SIZE,
0206 GFP_ATOMIC);
0207 if (!entry->ptr_inner)
0208 return NULL;
0209 }
0210 }
0211 if (!entry->ptr_leaf) {
0212 if (alloc)
0213 entry->ptr_leaf = kzalloc(SIDTAB_NODE_ALLOC_SIZE,
0214 GFP_ATOMIC);
0215 if (!entry->ptr_leaf)
0216 return NULL;
0217 }
0218 return &entry->ptr_leaf->entries[index % SIDTAB_LEAF_ENTRIES];
0219 }
0220
0221 static struct sidtab_entry *sidtab_lookup(struct sidtab *s, u32 index)
0222 {
0223
0224 u32 count = smp_load_acquire(&s->count);
0225
0226 if (index >= count)
0227 return NULL;
0228
0229 return sidtab_do_lookup(s, index, 0);
0230 }
0231
0232 static struct sidtab_entry *sidtab_lookup_initial(struct sidtab *s, u32 sid)
0233 {
0234 return s->isids[sid - 1].set ? &s->isids[sid - 1].entry : NULL;
0235 }
0236
0237 static struct sidtab_entry *sidtab_search_core(struct sidtab *s, u32 sid,
0238 int force)
0239 {
0240 if (sid != 0) {
0241 struct sidtab_entry *entry;
0242
0243 if (sid > SECINITSID_NUM)
0244 entry = sidtab_lookup(s, sid_to_index(sid));
0245 else
0246 entry = sidtab_lookup_initial(s, sid);
0247 if (entry && (!entry->context.len || force))
0248 return entry;
0249 }
0250
0251 return sidtab_lookup_initial(s, SECINITSID_UNLABELED);
0252 }
0253
0254 struct sidtab_entry *sidtab_search_entry(struct sidtab *s, u32 sid)
0255 {
0256 return sidtab_search_core(s, sid, 0);
0257 }
0258
0259 struct sidtab_entry *sidtab_search_entry_force(struct sidtab *s, u32 sid)
0260 {
0261 return sidtab_search_core(s, sid, 1);
0262 }
0263
0264 int sidtab_context_to_sid(struct sidtab *s, struct context *context,
0265 u32 *sid)
0266 {
0267 unsigned long flags;
0268 u32 count, hash = context_compute_hash(context);
0269 struct sidtab_convert_params *convert;
0270 struct sidtab_entry *dst, *dst_convert;
0271 int rc;
0272
0273 *sid = context_to_sid(s, context, hash);
0274 if (*sid)
0275 return 0;
0276
0277
0278 spin_lock_irqsave(&s->lock, flags);
0279
0280 rc = 0;
0281 *sid = context_to_sid(s, context, hash);
0282 if (*sid)
0283 goto out_unlock;
0284
0285 if (unlikely(s->frozen)) {
0286
0287
0288
0289
0290 rc = -ESTALE;
0291 goto out_unlock;
0292 }
0293
0294 count = s->count;
0295 convert = s->convert;
0296
0297
0298 rc = -EOVERFLOW;
0299 if (count >= SIDTAB_MAX)
0300 goto out_unlock;
0301
0302
0303 rc = -ENOMEM;
0304 dst = sidtab_do_lookup(s, count, 1);
0305 if (!dst)
0306 goto out_unlock;
0307
0308 dst->sid = index_to_sid(count);
0309 dst->hash = hash;
0310
0311 rc = context_cpy(&dst->context, context);
0312 if (rc)
0313 goto out_unlock;
0314
0315
0316
0317
0318
0319 if (convert) {
0320 rc = -ENOMEM;
0321 dst_convert = sidtab_do_lookup(convert->target, count, 1);
0322 if (!dst_convert) {
0323 context_destroy(&dst->context);
0324 goto out_unlock;
0325 }
0326
0327 rc = convert->func(context, &dst_convert->context,
0328 convert->args);
0329 if (rc) {
0330 context_destroy(&dst->context);
0331 goto out_unlock;
0332 }
0333 dst_convert->sid = index_to_sid(count);
0334 dst_convert->hash = context_compute_hash(&dst_convert->context);
0335 convert->target->count = count + 1;
0336
0337 hash_add_rcu(convert->target->context_to_sid,
0338 &dst_convert->list, dst_convert->hash);
0339 }
0340
0341 if (context->len)
0342 pr_info("SELinux: Context %s is not valid (left unmapped).\n",
0343 context->str);
0344
0345 *sid = index_to_sid(count);
0346
0347
0348 smp_store_release(&s->count, count + 1);
0349 hash_add_rcu(s->context_to_sid, &dst->list, dst->hash);
0350
0351 rc = 0;
0352 out_unlock:
0353 spin_unlock_irqrestore(&s->lock, flags);
0354 return rc;
0355 }
0356
0357 static void sidtab_convert_hashtable(struct sidtab *s, u32 count)
0358 {
0359 struct sidtab_entry *entry;
0360 u32 i;
0361
0362 for (i = 0; i < count; i++) {
0363 entry = sidtab_do_lookup(s, i, 0);
0364 entry->sid = index_to_sid(i);
0365 entry->hash = context_compute_hash(&entry->context);
0366
0367 hash_add_rcu(s->context_to_sid, &entry->list, entry->hash);
0368 }
0369 }
0370
0371 static int sidtab_convert_tree(union sidtab_entry_inner *edst,
0372 union sidtab_entry_inner *esrc,
0373 u32 *pos, u32 count, u32 level,
0374 struct sidtab_convert_params *convert)
0375 {
0376 int rc;
0377 u32 i;
0378
0379 if (level != 0) {
0380 if (!edst->ptr_inner) {
0381 edst->ptr_inner = kzalloc(SIDTAB_NODE_ALLOC_SIZE,
0382 GFP_KERNEL);
0383 if (!edst->ptr_inner)
0384 return -ENOMEM;
0385 }
0386 i = 0;
0387 while (i < SIDTAB_INNER_ENTRIES && *pos < count) {
0388 rc = sidtab_convert_tree(&edst->ptr_inner->entries[i],
0389 &esrc->ptr_inner->entries[i],
0390 pos, count, level - 1,
0391 convert);
0392 if (rc)
0393 return rc;
0394 i++;
0395 }
0396 } else {
0397 if (!edst->ptr_leaf) {
0398 edst->ptr_leaf = kzalloc(SIDTAB_NODE_ALLOC_SIZE,
0399 GFP_KERNEL);
0400 if (!edst->ptr_leaf)
0401 return -ENOMEM;
0402 }
0403 i = 0;
0404 while (i < SIDTAB_LEAF_ENTRIES && *pos < count) {
0405 rc = convert->func(&esrc->ptr_leaf->entries[i].context,
0406 &edst->ptr_leaf->entries[i].context,
0407 convert->args);
0408 if (rc)
0409 return rc;
0410 (*pos)++;
0411 i++;
0412 }
0413 cond_resched();
0414 }
0415 return 0;
0416 }
0417
0418 int sidtab_convert(struct sidtab *s, struct sidtab_convert_params *params)
0419 {
0420 unsigned long flags;
0421 u32 count, level, pos;
0422 int rc;
0423
0424 spin_lock_irqsave(&s->lock, flags);
0425
0426
0427 if (s->convert) {
0428 spin_unlock_irqrestore(&s->lock, flags);
0429 return -EBUSY;
0430 }
0431
0432 count = s->count;
0433 level = sidtab_level_from_count(count);
0434
0435
0436
0437
0438 rc = sidtab_do_lookup(params->target, count - 1, 1) ? 0 : -ENOMEM;
0439 if (rc) {
0440 spin_unlock_irqrestore(&s->lock, flags);
0441 return rc;
0442 }
0443
0444
0445 params->target->count = count;
0446
0447
0448 s->convert = params;
0449
0450
0451 spin_unlock_irqrestore(&s->lock, flags);
0452
0453 pr_info("SELinux: Converting %u SID table entries...\n", count);
0454
0455
0456 pos = 0;
0457 rc = sidtab_convert_tree(¶ms->target->roots[level],
0458 &s->roots[level], &pos, count, level, params);
0459 if (rc) {
0460
0461 spin_lock_irqsave(&s->lock, flags);
0462 s->convert = NULL;
0463 spin_unlock_irqrestore(&s->lock, flags);
0464 return rc;
0465 }
0466
0467
0468
0469
0470 spin_lock_irqsave(&s->lock, flags);
0471 sidtab_convert_hashtable(params->target, count);
0472 spin_unlock_irqrestore(&s->lock, flags);
0473
0474 return 0;
0475 }
0476
0477 void sidtab_cancel_convert(struct sidtab *s)
0478 {
0479 unsigned long flags;
0480
0481
0482 spin_lock_irqsave(&s->lock, flags);
0483 s->convert = NULL;
0484 spin_unlock_irqrestore(&s->lock, flags);
0485 }
0486
0487 void sidtab_freeze_begin(struct sidtab *s, unsigned long *flags) __acquires(&s->lock)
0488 {
0489 spin_lock_irqsave(&s->lock, *flags);
0490 s->frozen = true;
0491 s->convert = NULL;
0492 }
0493 void sidtab_freeze_end(struct sidtab *s, unsigned long *flags) __releases(&s->lock)
0494 {
0495 spin_unlock_irqrestore(&s->lock, *flags);
0496 }
0497
0498 static void sidtab_destroy_entry(struct sidtab_entry *entry)
0499 {
0500 context_destroy(&entry->context);
0501 #if CONFIG_SECURITY_SELINUX_SID2STR_CACHE_SIZE > 0
0502 kfree(rcu_dereference_raw(entry->cache));
0503 #endif
0504 }
0505
0506 static void sidtab_destroy_tree(union sidtab_entry_inner entry, u32 level)
0507 {
0508 u32 i;
0509
0510 if (level != 0) {
0511 struct sidtab_node_inner *node = entry.ptr_inner;
0512
0513 if (!node)
0514 return;
0515
0516 for (i = 0; i < SIDTAB_INNER_ENTRIES; i++)
0517 sidtab_destroy_tree(node->entries[i], level - 1);
0518 kfree(node);
0519 } else {
0520 struct sidtab_node_leaf *node = entry.ptr_leaf;
0521
0522 if (!node)
0523 return;
0524
0525 for (i = 0; i < SIDTAB_LEAF_ENTRIES; i++)
0526 sidtab_destroy_entry(&node->entries[i]);
0527 kfree(node);
0528 }
0529 }
0530
0531 void sidtab_destroy(struct sidtab *s)
0532 {
0533 u32 i, level;
0534
0535 for (i = 0; i < SECINITSID_NUM; i++)
0536 if (s->isids[i].set)
0537 sidtab_destroy_entry(&s->isids[i].entry);
0538
0539 level = SIDTAB_MAX_LEVEL;
0540 while (level && !s->roots[level].ptr_inner)
0541 --level;
0542
0543 sidtab_destroy_tree(s->roots[level], level);
0544
0545
0546
0547
0548
0549 }
0550
0551 #if CONFIG_SECURITY_SELINUX_SID2STR_CACHE_SIZE > 0
0552
0553 void sidtab_sid2str_put(struct sidtab *s, struct sidtab_entry *entry,
0554 const char *str, u32 str_len)
0555 {
0556 struct sidtab_str_cache *cache, *victim = NULL;
0557 unsigned long flags;
0558
0559
0560 if (entry->context.len)
0561 return;
0562
0563 spin_lock_irqsave(&s->cache_lock, flags);
0564
0565 cache = rcu_dereference_protected(entry->cache,
0566 lockdep_is_held(&s->cache_lock));
0567 if (cache) {
0568
0569 list_move(&cache->lru_member, &s->cache_lru_list);
0570 goto out_unlock;
0571 }
0572
0573 cache = kmalloc(struct_size(cache, str, str_len), GFP_ATOMIC);
0574 if (!cache)
0575 goto out_unlock;
0576
0577 if (s->cache_free_slots == 0) {
0578
0579 victim = container_of(s->cache_lru_list.prev,
0580 struct sidtab_str_cache, lru_member);
0581 list_del(&victim->lru_member);
0582 rcu_assign_pointer(victim->parent->cache, NULL);
0583 } else {
0584 s->cache_free_slots--;
0585 }
0586 cache->parent = entry;
0587 cache->len = str_len;
0588 memcpy(cache->str, str, str_len);
0589 list_add(&cache->lru_member, &s->cache_lru_list);
0590
0591 rcu_assign_pointer(entry->cache, cache);
0592
0593 out_unlock:
0594 spin_unlock_irqrestore(&s->cache_lock, flags);
0595 kfree_rcu(victim, rcu_member);
0596 }
0597
0598 int sidtab_sid2str_get(struct sidtab *s, struct sidtab_entry *entry,
0599 char **out, u32 *out_len)
0600 {
0601 struct sidtab_str_cache *cache;
0602 int rc = 0;
0603
0604 if (entry->context.len)
0605 return -ENOENT;
0606
0607 rcu_read_lock();
0608
0609 cache = rcu_dereference(entry->cache);
0610 if (!cache) {
0611 rc = -ENOENT;
0612 } else {
0613 *out_len = cache->len;
0614 if (out) {
0615 *out = kmemdup(cache->str, cache->len, GFP_ATOMIC);
0616 if (!*out)
0617 rc = -ENOMEM;
0618 }
0619 }
0620
0621 rcu_read_unlock();
0622
0623 if (!rc && out)
0624 sidtab_sid2str_put(s, entry, *out, *out_len);
0625 return rc;
0626 }
0627
0628 #endif