0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024 #include <linux/init.h>
0025 #include <linux/types.h>
0026 #include <linux/rcupdate.h>
0027 #include <linux/list.h>
0028 #include <linux/spinlock.h>
0029 #include <linux/string.h>
0030 #include <linux/jhash.h>
0031 #include <linux/audit.h>
0032 #include <linux/slab.h>
0033 #include <net/ip.h>
0034 #include <net/icmp.h>
0035 #include <net/tcp.h>
0036 #include <net/netlabel.h>
0037 #include <net/cipso_ipv4.h>
0038 #include <linux/atomic.h>
0039 #include <linux/bug.h>
0040 #include <asm/unaligned.h>
0041
0042
0043
0044
0045
0046
0047 static DEFINE_SPINLOCK(cipso_v4_doi_list_lock);
0048 static LIST_HEAD(cipso_v4_doi_list);
0049
0050
0051 int cipso_v4_cache_enabled = 1;
0052 int cipso_v4_cache_bucketsize = 10;
0053 #define CIPSO_V4_CACHE_BUCKETBITS 7
0054 #define CIPSO_V4_CACHE_BUCKETS (1 << CIPSO_V4_CACHE_BUCKETBITS)
0055 #define CIPSO_V4_CACHE_REORDERLIMIT 10
0056 struct cipso_v4_map_cache_bkt {
0057 spinlock_t lock;
0058 u32 size;
0059 struct list_head list;
0060 };
0061
0062 struct cipso_v4_map_cache_entry {
0063 u32 hash;
0064 unsigned char *key;
0065 size_t key_len;
0066
0067 struct netlbl_lsm_cache *lsm_data;
0068
0069 u32 activity;
0070 struct list_head list;
0071 };
0072
0073 static struct cipso_v4_map_cache_bkt *cipso_v4_cache;
0074
0075
0076 int cipso_v4_rbm_optfmt;
0077 int cipso_v4_rbm_strictvalid = 1;
0078
0079
0080
0081
0082
0083
0084
0085 #define CIPSO_V4_OPT_LEN_MAX 40
0086
0087
0088
0089 #define CIPSO_V4_HDR_LEN 6
0090
0091
0092 #define CIPSO_V4_TAG_RBM_BLEN 4
0093
0094
0095 #define CIPSO_V4_TAG_ENUM_BLEN 4
0096
0097
0098 #define CIPSO_V4_TAG_RNG_BLEN 4
0099
0100
0101
0102
0103
0104 #define CIPSO_V4_TAG_RNG_CAT_MAX 8
0105
0106
0107
0108
0109
0110
0111
0112
0113
0114
0115
0116
0117 #define CIPSO_V4_TAG_LOC_BLEN 6
0118
0119
0120
0121
0122
0123
0124
0125
0126
0127
0128
0129
0130
0131
0132 static void cipso_v4_cache_entry_free(struct cipso_v4_map_cache_entry *entry)
0133 {
0134 if (entry->lsm_data)
0135 netlbl_secattr_cache_free(entry->lsm_data);
0136 kfree(entry->key);
0137 kfree(entry);
0138 }
0139
0140
0141
0142
0143
0144
0145
0146
0147
0148
0149 static u32 cipso_v4_map_cache_hash(const unsigned char *key, u32 key_len)
0150 {
0151 return jhash(key, key_len, 0);
0152 }
0153
0154
0155
0156
0157
0158
0159
0160
0161
0162
0163
0164
0165
0166
0167 static int __init cipso_v4_cache_init(void)
0168 {
0169 u32 iter;
0170
0171 cipso_v4_cache = kcalloc(CIPSO_V4_CACHE_BUCKETS,
0172 sizeof(struct cipso_v4_map_cache_bkt),
0173 GFP_KERNEL);
0174 if (!cipso_v4_cache)
0175 return -ENOMEM;
0176
0177 for (iter = 0; iter < CIPSO_V4_CACHE_BUCKETS; iter++) {
0178 spin_lock_init(&cipso_v4_cache[iter].lock);
0179 cipso_v4_cache[iter].size = 0;
0180 INIT_LIST_HEAD(&cipso_v4_cache[iter].list);
0181 }
0182
0183 return 0;
0184 }
0185
0186
0187
0188
0189
0190
0191
0192
0193 void cipso_v4_cache_invalidate(void)
0194 {
0195 struct cipso_v4_map_cache_entry *entry, *tmp_entry;
0196 u32 iter;
0197
0198 for (iter = 0; iter < CIPSO_V4_CACHE_BUCKETS; iter++) {
0199 spin_lock_bh(&cipso_v4_cache[iter].lock);
0200 list_for_each_entry_safe(entry,
0201 tmp_entry,
0202 &cipso_v4_cache[iter].list, list) {
0203 list_del(&entry->list);
0204 cipso_v4_cache_entry_free(entry);
0205 }
0206 cipso_v4_cache[iter].size = 0;
0207 spin_unlock_bh(&cipso_v4_cache[iter].lock);
0208 }
0209 }
0210
0211
0212
0213
0214
0215
0216
0217
0218
0219
0220
0221
0222
0223
0224
0225
0226
0227
0228
0229
0230
0231
0232
0233 static int cipso_v4_cache_check(const unsigned char *key,
0234 u32 key_len,
0235 struct netlbl_lsm_secattr *secattr)
0236 {
0237 u32 bkt;
0238 struct cipso_v4_map_cache_entry *entry;
0239 struct cipso_v4_map_cache_entry *prev_entry = NULL;
0240 u32 hash;
0241
0242 if (!READ_ONCE(cipso_v4_cache_enabled))
0243 return -ENOENT;
0244
0245 hash = cipso_v4_map_cache_hash(key, key_len);
0246 bkt = hash & (CIPSO_V4_CACHE_BUCKETS - 1);
0247 spin_lock_bh(&cipso_v4_cache[bkt].lock);
0248 list_for_each_entry(entry, &cipso_v4_cache[bkt].list, list) {
0249 if (entry->hash == hash &&
0250 entry->key_len == key_len &&
0251 memcmp(entry->key, key, key_len) == 0) {
0252 entry->activity += 1;
0253 refcount_inc(&entry->lsm_data->refcount);
0254 secattr->cache = entry->lsm_data;
0255 secattr->flags |= NETLBL_SECATTR_CACHE;
0256 secattr->type = NETLBL_NLTYPE_CIPSOV4;
0257 if (!prev_entry) {
0258 spin_unlock_bh(&cipso_v4_cache[bkt].lock);
0259 return 0;
0260 }
0261
0262 if (prev_entry->activity > 0)
0263 prev_entry->activity -= 1;
0264 if (entry->activity > prev_entry->activity &&
0265 entry->activity - prev_entry->activity >
0266 CIPSO_V4_CACHE_REORDERLIMIT) {
0267 __list_del(entry->list.prev, entry->list.next);
0268 __list_add(&entry->list,
0269 prev_entry->list.prev,
0270 &prev_entry->list);
0271 }
0272
0273 spin_unlock_bh(&cipso_v4_cache[bkt].lock);
0274 return 0;
0275 }
0276 prev_entry = entry;
0277 }
0278 spin_unlock_bh(&cipso_v4_cache[bkt].lock);
0279
0280 return -ENOENT;
0281 }
0282
0283
0284
0285
0286
0287
0288
0289
0290
0291
0292
0293
0294
0295
0296 int cipso_v4_cache_add(const unsigned char *cipso_ptr,
0297 const struct netlbl_lsm_secattr *secattr)
0298 {
0299 int bkt_size = READ_ONCE(cipso_v4_cache_bucketsize);
0300 int ret_val = -EPERM;
0301 u32 bkt;
0302 struct cipso_v4_map_cache_entry *entry = NULL;
0303 struct cipso_v4_map_cache_entry *old_entry = NULL;
0304 u32 cipso_ptr_len;
0305
0306 if (!READ_ONCE(cipso_v4_cache_enabled) || bkt_size <= 0)
0307 return 0;
0308
0309 cipso_ptr_len = cipso_ptr[1];
0310
0311 entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
0312 if (!entry)
0313 return -ENOMEM;
0314 entry->key = kmemdup(cipso_ptr, cipso_ptr_len, GFP_ATOMIC);
0315 if (!entry->key) {
0316 ret_val = -ENOMEM;
0317 goto cache_add_failure;
0318 }
0319 entry->key_len = cipso_ptr_len;
0320 entry->hash = cipso_v4_map_cache_hash(cipso_ptr, cipso_ptr_len);
0321 refcount_inc(&secattr->cache->refcount);
0322 entry->lsm_data = secattr->cache;
0323
0324 bkt = entry->hash & (CIPSO_V4_CACHE_BUCKETS - 1);
0325 spin_lock_bh(&cipso_v4_cache[bkt].lock);
0326 if (cipso_v4_cache[bkt].size < bkt_size) {
0327 list_add(&entry->list, &cipso_v4_cache[bkt].list);
0328 cipso_v4_cache[bkt].size += 1;
0329 } else {
0330 old_entry = list_entry(cipso_v4_cache[bkt].list.prev,
0331 struct cipso_v4_map_cache_entry, list);
0332 list_del(&old_entry->list);
0333 list_add(&entry->list, &cipso_v4_cache[bkt].list);
0334 cipso_v4_cache_entry_free(old_entry);
0335 }
0336 spin_unlock_bh(&cipso_v4_cache[bkt].lock);
0337
0338 return 0;
0339
0340 cache_add_failure:
0341 if (entry)
0342 cipso_v4_cache_entry_free(entry);
0343 return ret_val;
0344 }
0345
0346
0347
0348
0349
0350
0351
0352
0353
0354
0355
0356
0357
0358
0359 static struct cipso_v4_doi *cipso_v4_doi_search(u32 doi)
0360 {
0361 struct cipso_v4_doi *iter;
0362
0363 list_for_each_entry_rcu(iter, &cipso_v4_doi_list, list)
0364 if (iter->doi == doi && refcount_read(&iter->refcount))
0365 return iter;
0366 return NULL;
0367 }
0368
0369
0370
0371
0372
0373
0374
0375
0376
0377
0378
0379
0380
0381
0382 int cipso_v4_doi_add(struct cipso_v4_doi *doi_def,
0383 struct netlbl_audit *audit_info)
0384 {
0385 int ret_val = -EINVAL;
0386 u32 iter;
0387 u32 doi;
0388 u32 doi_type;
0389 struct audit_buffer *audit_buf;
0390
0391 doi = doi_def->doi;
0392 doi_type = doi_def->type;
0393
0394 if (doi_def->doi == CIPSO_V4_DOI_UNKNOWN)
0395 goto doi_add_return;
0396 for (iter = 0; iter < CIPSO_V4_TAG_MAXCNT; iter++) {
0397 switch (doi_def->tags[iter]) {
0398 case CIPSO_V4_TAG_RBITMAP:
0399 break;
0400 case CIPSO_V4_TAG_RANGE:
0401 case CIPSO_V4_TAG_ENUM:
0402 if (doi_def->type != CIPSO_V4_MAP_PASS)
0403 goto doi_add_return;
0404 break;
0405 case CIPSO_V4_TAG_LOCAL:
0406 if (doi_def->type != CIPSO_V4_MAP_LOCAL)
0407 goto doi_add_return;
0408 break;
0409 case CIPSO_V4_TAG_INVALID:
0410 if (iter == 0)
0411 goto doi_add_return;
0412 break;
0413 default:
0414 goto doi_add_return;
0415 }
0416 }
0417
0418 refcount_set(&doi_def->refcount, 1);
0419
0420 spin_lock(&cipso_v4_doi_list_lock);
0421 if (cipso_v4_doi_search(doi_def->doi)) {
0422 spin_unlock(&cipso_v4_doi_list_lock);
0423 ret_val = -EEXIST;
0424 goto doi_add_return;
0425 }
0426 list_add_tail_rcu(&doi_def->list, &cipso_v4_doi_list);
0427 spin_unlock(&cipso_v4_doi_list_lock);
0428 ret_val = 0;
0429
0430 doi_add_return:
0431 audit_buf = netlbl_audit_start(AUDIT_MAC_CIPSOV4_ADD, audit_info);
0432 if (audit_buf) {
0433 const char *type_str;
0434 switch (doi_type) {
0435 case CIPSO_V4_MAP_TRANS:
0436 type_str = "trans";
0437 break;
0438 case CIPSO_V4_MAP_PASS:
0439 type_str = "pass";
0440 break;
0441 case CIPSO_V4_MAP_LOCAL:
0442 type_str = "local";
0443 break;
0444 default:
0445 type_str = "(unknown)";
0446 }
0447 audit_log_format(audit_buf,
0448 " cipso_doi=%u cipso_type=%s res=%u",
0449 doi, type_str, ret_val == 0 ? 1 : 0);
0450 audit_log_end(audit_buf);
0451 }
0452
0453 return ret_val;
0454 }
0455
0456
0457
0458
0459
0460
0461
0462
0463
0464 void cipso_v4_doi_free(struct cipso_v4_doi *doi_def)
0465 {
0466 if (!doi_def)
0467 return;
0468
0469 switch (doi_def->type) {
0470 case CIPSO_V4_MAP_TRANS:
0471 kfree(doi_def->map.std->lvl.cipso);
0472 kfree(doi_def->map.std->lvl.local);
0473 kfree(doi_def->map.std->cat.cipso);
0474 kfree(doi_def->map.std->cat.local);
0475 kfree(doi_def->map.std);
0476 break;
0477 }
0478 kfree(doi_def);
0479 }
0480
0481
0482
0483
0484
0485
0486
0487
0488
0489
0490
0491 static void cipso_v4_doi_free_rcu(struct rcu_head *entry)
0492 {
0493 struct cipso_v4_doi *doi_def;
0494
0495 doi_def = container_of(entry, struct cipso_v4_doi, rcu);
0496 cipso_v4_doi_free(doi_def);
0497 }
0498
0499
0500
0501
0502
0503
0504
0505
0506
0507
0508
0509
0510 int cipso_v4_doi_remove(u32 doi, struct netlbl_audit *audit_info)
0511 {
0512 int ret_val;
0513 struct cipso_v4_doi *doi_def;
0514 struct audit_buffer *audit_buf;
0515
0516 spin_lock(&cipso_v4_doi_list_lock);
0517 doi_def = cipso_v4_doi_search(doi);
0518 if (!doi_def) {
0519 spin_unlock(&cipso_v4_doi_list_lock);
0520 ret_val = -ENOENT;
0521 goto doi_remove_return;
0522 }
0523 list_del_rcu(&doi_def->list);
0524 spin_unlock(&cipso_v4_doi_list_lock);
0525
0526 cipso_v4_doi_putdef(doi_def);
0527 ret_val = 0;
0528
0529 doi_remove_return:
0530 audit_buf = netlbl_audit_start(AUDIT_MAC_CIPSOV4_DEL, audit_info);
0531 if (audit_buf) {
0532 audit_log_format(audit_buf,
0533 " cipso_doi=%u res=%u",
0534 doi, ret_val == 0 ? 1 : 0);
0535 audit_log_end(audit_buf);
0536 }
0537
0538 return ret_val;
0539 }
0540
0541
0542
0543
0544
0545
0546
0547
0548
0549
0550
0551
0552 struct cipso_v4_doi *cipso_v4_doi_getdef(u32 doi)
0553 {
0554 struct cipso_v4_doi *doi_def;
0555
0556 rcu_read_lock();
0557 doi_def = cipso_v4_doi_search(doi);
0558 if (!doi_def)
0559 goto doi_getdef_return;
0560 if (!refcount_inc_not_zero(&doi_def->refcount))
0561 doi_def = NULL;
0562
0563 doi_getdef_return:
0564 rcu_read_unlock();
0565 return doi_def;
0566 }
0567
0568
0569
0570
0571
0572
0573
0574
0575
0576 void cipso_v4_doi_putdef(struct cipso_v4_doi *doi_def)
0577 {
0578 if (!doi_def)
0579 return;
0580
0581 if (!refcount_dec_and_test(&doi_def->refcount))
0582 return;
0583
0584 cipso_v4_cache_invalidate();
0585 call_rcu(&doi_def->rcu, cipso_v4_doi_free_rcu);
0586 }
0587
0588
0589
0590
0591
0592
0593
0594
0595
0596
0597
0598
0599
0600
0601 int cipso_v4_doi_walk(u32 *skip_cnt,
0602 int (*callback) (struct cipso_v4_doi *doi_def, void *arg),
0603 void *cb_arg)
0604 {
0605 int ret_val = -ENOENT;
0606 u32 doi_cnt = 0;
0607 struct cipso_v4_doi *iter_doi;
0608
0609 rcu_read_lock();
0610 list_for_each_entry_rcu(iter_doi, &cipso_v4_doi_list, list)
0611 if (refcount_read(&iter_doi->refcount) > 0) {
0612 if (doi_cnt++ < *skip_cnt)
0613 continue;
0614 ret_val = callback(iter_doi, cb_arg);
0615 if (ret_val < 0) {
0616 doi_cnt--;
0617 goto doi_walk_return;
0618 }
0619 }
0620
0621 doi_walk_return:
0622 rcu_read_unlock();
0623 *skip_cnt = doi_cnt;
0624 return ret_val;
0625 }
0626
0627
0628
0629
0630
0631
0632
0633
0634
0635
0636
0637
0638
0639
0640
0641
0642 static int cipso_v4_map_lvl_valid(const struct cipso_v4_doi *doi_def, u8 level)
0643 {
0644 switch (doi_def->type) {
0645 case CIPSO_V4_MAP_PASS:
0646 return 0;
0647 case CIPSO_V4_MAP_TRANS:
0648 if ((level < doi_def->map.std->lvl.cipso_size) &&
0649 (doi_def->map.std->lvl.cipso[level] < CIPSO_V4_INV_LVL))
0650 return 0;
0651 break;
0652 }
0653
0654 return -EFAULT;
0655 }
0656
0657
0658
0659
0660
0661
0662
0663
0664
0665
0666
0667
0668
0669 static int cipso_v4_map_lvl_hton(const struct cipso_v4_doi *doi_def,
0670 u32 host_lvl,
0671 u32 *net_lvl)
0672 {
0673 switch (doi_def->type) {
0674 case CIPSO_V4_MAP_PASS:
0675 *net_lvl = host_lvl;
0676 return 0;
0677 case CIPSO_V4_MAP_TRANS:
0678 if (host_lvl < doi_def->map.std->lvl.local_size &&
0679 doi_def->map.std->lvl.local[host_lvl] < CIPSO_V4_INV_LVL) {
0680 *net_lvl = doi_def->map.std->lvl.local[host_lvl];
0681 return 0;
0682 }
0683 return -EPERM;
0684 }
0685
0686 return -EINVAL;
0687 }
0688
0689
0690
0691
0692
0693
0694
0695
0696
0697
0698
0699
0700
0701 static int cipso_v4_map_lvl_ntoh(const struct cipso_v4_doi *doi_def,
0702 u32 net_lvl,
0703 u32 *host_lvl)
0704 {
0705 struct cipso_v4_std_map_tbl *map_tbl;
0706
0707 switch (doi_def->type) {
0708 case CIPSO_V4_MAP_PASS:
0709 *host_lvl = net_lvl;
0710 return 0;
0711 case CIPSO_V4_MAP_TRANS:
0712 map_tbl = doi_def->map.std;
0713 if (net_lvl < map_tbl->lvl.cipso_size &&
0714 map_tbl->lvl.cipso[net_lvl] < CIPSO_V4_INV_LVL) {
0715 *host_lvl = doi_def->map.std->lvl.cipso[net_lvl];
0716 return 0;
0717 }
0718 return -EPERM;
0719 }
0720
0721 return -EINVAL;
0722 }
0723
0724
0725
0726
0727
0728
0729
0730
0731
0732
0733
0734
0735
0736 static int cipso_v4_map_cat_rbm_valid(const struct cipso_v4_doi *doi_def,
0737 const unsigned char *bitmap,
0738 u32 bitmap_len)
0739 {
0740 int cat = -1;
0741 u32 bitmap_len_bits = bitmap_len * 8;
0742 u32 cipso_cat_size;
0743 u32 *cipso_array;
0744
0745 switch (doi_def->type) {
0746 case CIPSO_V4_MAP_PASS:
0747 return 0;
0748 case CIPSO_V4_MAP_TRANS:
0749 cipso_cat_size = doi_def->map.std->cat.cipso_size;
0750 cipso_array = doi_def->map.std->cat.cipso;
0751 for (;;) {
0752 cat = netlbl_bitmap_walk(bitmap,
0753 bitmap_len_bits,
0754 cat + 1,
0755 1);
0756 if (cat < 0)
0757 break;
0758 if (cat >= cipso_cat_size ||
0759 cipso_array[cat] >= CIPSO_V4_INV_CAT)
0760 return -EFAULT;
0761 }
0762
0763 if (cat == -1)
0764 return 0;
0765 break;
0766 }
0767
0768 return -EFAULT;
0769 }
0770
0771
0772
0773
0774
0775
0776
0777
0778
0779
0780
0781
0782
0783
0784 static int cipso_v4_map_cat_rbm_hton(const struct cipso_v4_doi *doi_def,
0785 const struct netlbl_lsm_secattr *secattr,
0786 unsigned char *net_cat,
0787 u32 net_cat_len)
0788 {
0789 int host_spot = -1;
0790 u32 net_spot = CIPSO_V4_INV_CAT;
0791 u32 net_spot_max = 0;
0792 u32 net_clen_bits = net_cat_len * 8;
0793 u32 host_cat_size = 0;
0794 u32 *host_cat_array = NULL;
0795
0796 if (doi_def->type == CIPSO_V4_MAP_TRANS) {
0797 host_cat_size = doi_def->map.std->cat.local_size;
0798 host_cat_array = doi_def->map.std->cat.local;
0799 }
0800
0801 for (;;) {
0802 host_spot = netlbl_catmap_walk(secattr->attr.mls.cat,
0803 host_spot + 1);
0804 if (host_spot < 0)
0805 break;
0806
0807 switch (doi_def->type) {
0808 case CIPSO_V4_MAP_PASS:
0809 net_spot = host_spot;
0810 break;
0811 case CIPSO_V4_MAP_TRANS:
0812 if (host_spot >= host_cat_size)
0813 return -EPERM;
0814 net_spot = host_cat_array[host_spot];
0815 if (net_spot >= CIPSO_V4_INV_CAT)
0816 return -EPERM;
0817 break;
0818 }
0819 if (net_spot >= net_clen_bits)
0820 return -ENOSPC;
0821 netlbl_bitmap_setbit(net_cat, net_spot, 1);
0822
0823 if (net_spot > net_spot_max)
0824 net_spot_max = net_spot;
0825 }
0826
0827 if (++net_spot_max % 8)
0828 return net_spot_max / 8 + 1;
0829 return net_spot_max / 8;
0830 }
0831
0832
0833
0834
0835
0836
0837
0838
0839
0840
0841
0842
0843
0844
0845 static int cipso_v4_map_cat_rbm_ntoh(const struct cipso_v4_doi *doi_def,
0846 const unsigned char *net_cat,
0847 u32 net_cat_len,
0848 struct netlbl_lsm_secattr *secattr)
0849 {
0850 int ret_val;
0851 int net_spot = -1;
0852 u32 host_spot = CIPSO_V4_INV_CAT;
0853 u32 net_clen_bits = net_cat_len * 8;
0854 u32 net_cat_size = 0;
0855 u32 *net_cat_array = NULL;
0856
0857 if (doi_def->type == CIPSO_V4_MAP_TRANS) {
0858 net_cat_size = doi_def->map.std->cat.cipso_size;
0859 net_cat_array = doi_def->map.std->cat.cipso;
0860 }
0861
0862 for (;;) {
0863 net_spot = netlbl_bitmap_walk(net_cat,
0864 net_clen_bits,
0865 net_spot + 1,
0866 1);
0867 if (net_spot < 0) {
0868 if (net_spot == -2)
0869 return -EFAULT;
0870 return 0;
0871 }
0872
0873 switch (doi_def->type) {
0874 case CIPSO_V4_MAP_PASS:
0875 host_spot = net_spot;
0876 break;
0877 case CIPSO_V4_MAP_TRANS:
0878 if (net_spot >= net_cat_size)
0879 return -EPERM;
0880 host_spot = net_cat_array[net_spot];
0881 if (host_spot >= CIPSO_V4_INV_CAT)
0882 return -EPERM;
0883 break;
0884 }
0885 ret_val = netlbl_catmap_setbit(&secattr->attr.mls.cat,
0886 host_spot,
0887 GFP_ATOMIC);
0888 if (ret_val != 0)
0889 return ret_val;
0890 }
0891
0892 return -EINVAL;
0893 }
0894
0895
0896
0897
0898
0899
0900
0901
0902
0903
0904
0905
0906
0907 static int cipso_v4_map_cat_enum_valid(const struct cipso_v4_doi *doi_def,
0908 const unsigned char *enumcat,
0909 u32 enumcat_len)
0910 {
0911 u16 cat;
0912 int cat_prev = -1;
0913 u32 iter;
0914
0915 if (doi_def->type != CIPSO_V4_MAP_PASS || enumcat_len & 0x01)
0916 return -EFAULT;
0917
0918 for (iter = 0; iter < enumcat_len; iter += 2) {
0919 cat = get_unaligned_be16(&enumcat[iter]);
0920 if (cat <= cat_prev)
0921 return -EFAULT;
0922 cat_prev = cat;
0923 }
0924
0925 return 0;
0926 }
0927
0928
0929
0930
0931
0932
0933
0934
0935
0936
0937
0938
0939
0940
0941
0942 static int cipso_v4_map_cat_enum_hton(const struct cipso_v4_doi *doi_def,
0943 const struct netlbl_lsm_secattr *secattr,
0944 unsigned char *net_cat,
0945 u32 net_cat_len)
0946 {
0947 int cat = -1;
0948 u32 cat_iter = 0;
0949
0950 for (;;) {
0951 cat = netlbl_catmap_walk(secattr->attr.mls.cat, cat + 1);
0952 if (cat < 0)
0953 break;
0954 if ((cat_iter + 2) > net_cat_len)
0955 return -ENOSPC;
0956
0957 *((__be16 *)&net_cat[cat_iter]) = htons(cat);
0958 cat_iter += 2;
0959 }
0960
0961 return cat_iter;
0962 }
0963
0964
0965
0966
0967
0968
0969
0970
0971
0972
0973
0974
0975
0976
0977 static int cipso_v4_map_cat_enum_ntoh(const struct cipso_v4_doi *doi_def,
0978 const unsigned char *net_cat,
0979 u32 net_cat_len,
0980 struct netlbl_lsm_secattr *secattr)
0981 {
0982 int ret_val;
0983 u32 iter;
0984
0985 for (iter = 0; iter < net_cat_len; iter += 2) {
0986 ret_val = netlbl_catmap_setbit(&secattr->attr.mls.cat,
0987 get_unaligned_be16(&net_cat[iter]),
0988 GFP_ATOMIC);
0989 if (ret_val != 0)
0990 return ret_val;
0991 }
0992
0993 return 0;
0994 }
0995
0996
0997
0998
0999
1000
1001
1002
1003
1004
1005
1006
1007
1008 static int cipso_v4_map_cat_rng_valid(const struct cipso_v4_doi *doi_def,
1009 const unsigned char *rngcat,
1010 u32 rngcat_len)
1011 {
1012 u16 cat_high;
1013 u16 cat_low;
1014 u32 cat_prev = CIPSO_V4_MAX_REM_CATS + 1;
1015 u32 iter;
1016
1017 if (doi_def->type != CIPSO_V4_MAP_PASS || rngcat_len & 0x01)
1018 return -EFAULT;
1019
1020 for (iter = 0; iter < rngcat_len; iter += 4) {
1021 cat_high = get_unaligned_be16(&rngcat[iter]);
1022 if ((iter + 4) <= rngcat_len)
1023 cat_low = get_unaligned_be16(&rngcat[iter + 2]);
1024 else
1025 cat_low = 0;
1026
1027 if (cat_high > cat_prev)
1028 return -EFAULT;
1029
1030 cat_prev = cat_low;
1031 }
1032
1033 return 0;
1034 }
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050 static int cipso_v4_map_cat_rng_hton(const struct cipso_v4_doi *doi_def,
1051 const struct netlbl_lsm_secattr *secattr,
1052 unsigned char *net_cat,
1053 u32 net_cat_len)
1054 {
1055 int iter = -1;
1056 u16 array[CIPSO_V4_TAG_RNG_CAT_MAX * 2];
1057 u32 array_cnt = 0;
1058 u32 cat_size = 0;
1059
1060
1061 if (net_cat_len >
1062 (CIPSO_V4_OPT_LEN_MAX - CIPSO_V4_HDR_LEN - CIPSO_V4_TAG_RNG_BLEN))
1063 return -ENOSPC;
1064
1065 for (;;) {
1066 iter = netlbl_catmap_walk(secattr->attr.mls.cat, iter + 1);
1067 if (iter < 0)
1068 break;
1069 cat_size += (iter == 0 ? 0 : sizeof(u16));
1070 if (cat_size > net_cat_len)
1071 return -ENOSPC;
1072 array[array_cnt++] = iter;
1073
1074 iter = netlbl_catmap_walkrng(secattr->attr.mls.cat, iter);
1075 if (iter < 0)
1076 return -EFAULT;
1077 cat_size += sizeof(u16);
1078 if (cat_size > net_cat_len)
1079 return -ENOSPC;
1080 array[array_cnt++] = iter;
1081 }
1082
1083 for (iter = 0; array_cnt > 0;) {
1084 *((__be16 *)&net_cat[iter]) = htons(array[--array_cnt]);
1085 iter += 2;
1086 array_cnt--;
1087 if (array[array_cnt] != 0) {
1088 *((__be16 *)&net_cat[iter]) = htons(array[array_cnt]);
1089 iter += 2;
1090 }
1091 }
1092
1093 return cat_size;
1094 }
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109 static int cipso_v4_map_cat_rng_ntoh(const struct cipso_v4_doi *doi_def,
1110 const unsigned char *net_cat,
1111 u32 net_cat_len,
1112 struct netlbl_lsm_secattr *secattr)
1113 {
1114 int ret_val;
1115 u32 net_iter;
1116 u16 cat_low;
1117 u16 cat_high;
1118
1119 for (net_iter = 0; net_iter < net_cat_len; net_iter += 4) {
1120 cat_high = get_unaligned_be16(&net_cat[net_iter]);
1121 if ((net_iter + 4) <= net_cat_len)
1122 cat_low = get_unaligned_be16(&net_cat[net_iter + 2]);
1123 else
1124 cat_low = 0;
1125
1126 ret_val = netlbl_catmap_setrng(&secattr->attr.mls.cat,
1127 cat_low,
1128 cat_high,
1129 GFP_ATOMIC);
1130 if (ret_val != 0)
1131 return ret_val;
1132 }
1133
1134 return 0;
1135 }
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151 static void cipso_v4_gentag_hdr(const struct cipso_v4_doi *doi_def,
1152 unsigned char *buf,
1153 u32 len)
1154 {
1155 buf[0] = IPOPT_CIPSO;
1156 buf[1] = CIPSO_V4_HDR_LEN + len;
1157 put_unaligned_be32(doi_def->doi, &buf[2]);
1158 }
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174 static int cipso_v4_gentag_rbm(const struct cipso_v4_doi *doi_def,
1175 const struct netlbl_lsm_secattr *secattr,
1176 unsigned char *buffer,
1177 u32 buffer_len)
1178 {
1179 int ret_val;
1180 u32 tag_len;
1181 u32 level;
1182
1183 if ((secattr->flags & NETLBL_SECATTR_MLS_LVL) == 0)
1184 return -EPERM;
1185
1186 ret_val = cipso_v4_map_lvl_hton(doi_def,
1187 secattr->attr.mls.lvl,
1188 &level);
1189 if (ret_val != 0)
1190 return ret_val;
1191
1192 if (secattr->flags & NETLBL_SECATTR_MLS_CAT) {
1193 ret_val = cipso_v4_map_cat_rbm_hton(doi_def,
1194 secattr,
1195 &buffer[4],
1196 buffer_len - 4);
1197 if (ret_val < 0)
1198 return ret_val;
1199
1200
1201
1202
1203 if (READ_ONCE(cipso_v4_rbm_optfmt) && ret_val > 0 &&
1204 ret_val <= 10)
1205 tag_len = 14;
1206 else
1207 tag_len = 4 + ret_val;
1208 } else
1209 tag_len = 4;
1210
1211 buffer[0] = CIPSO_V4_TAG_RBITMAP;
1212 buffer[1] = tag_len;
1213 buffer[3] = level;
1214
1215 return tag_len;
1216 }
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230 static int cipso_v4_parsetag_rbm(const struct cipso_v4_doi *doi_def,
1231 const unsigned char *tag,
1232 struct netlbl_lsm_secattr *secattr)
1233 {
1234 int ret_val;
1235 u8 tag_len = tag[1];
1236 u32 level;
1237
1238 ret_val = cipso_v4_map_lvl_ntoh(doi_def, tag[3], &level);
1239 if (ret_val != 0)
1240 return ret_val;
1241 secattr->attr.mls.lvl = level;
1242 secattr->flags |= NETLBL_SECATTR_MLS_LVL;
1243
1244 if (tag_len > 4) {
1245 ret_val = cipso_v4_map_cat_rbm_ntoh(doi_def,
1246 &tag[4],
1247 tag_len - 4,
1248 secattr);
1249 if (ret_val != 0) {
1250 netlbl_catmap_free(secattr->attr.mls.cat);
1251 return ret_val;
1252 }
1253
1254 if (secattr->attr.mls.cat)
1255 secattr->flags |= NETLBL_SECATTR_MLS_CAT;
1256 }
1257
1258 return 0;
1259 }
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273 static int cipso_v4_gentag_enum(const struct cipso_v4_doi *doi_def,
1274 const struct netlbl_lsm_secattr *secattr,
1275 unsigned char *buffer,
1276 u32 buffer_len)
1277 {
1278 int ret_val;
1279 u32 tag_len;
1280 u32 level;
1281
1282 if (!(secattr->flags & NETLBL_SECATTR_MLS_LVL))
1283 return -EPERM;
1284
1285 ret_val = cipso_v4_map_lvl_hton(doi_def,
1286 secattr->attr.mls.lvl,
1287 &level);
1288 if (ret_val != 0)
1289 return ret_val;
1290
1291 if (secattr->flags & NETLBL_SECATTR_MLS_CAT) {
1292 ret_val = cipso_v4_map_cat_enum_hton(doi_def,
1293 secattr,
1294 &buffer[4],
1295 buffer_len - 4);
1296 if (ret_val < 0)
1297 return ret_val;
1298
1299 tag_len = 4 + ret_val;
1300 } else
1301 tag_len = 4;
1302
1303 buffer[0] = CIPSO_V4_TAG_ENUM;
1304 buffer[1] = tag_len;
1305 buffer[3] = level;
1306
1307 return tag_len;
1308 }
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322 static int cipso_v4_parsetag_enum(const struct cipso_v4_doi *doi_def,
1323 const unsigned char *tag,
1324 struct netlbl_lsm_secattr *secattr)
1325 {
1326 int ret_val;
1327 u8 tag_len = tag[1];
1328 u32 level;
1329
1330 ret_val = cipso_v4_map_lvl_ntoh(doi_def, tag[3], &level);
1331 if (ret_val != 0)
1332 return ret_val;
1333 secattr->attr.mls.lvl = level;
1334 secattr->flags |= NETLBL_SECATTR_MLS_LVL;
1335
1336 if (tag_len > 4) {
1337 ret_val = cipso_v4_map_cat_enum_ntoh(doi_def,
1338 &tag[4],
1339 tag_len - 4,
1340 secattr);
1341 if (ret_val != 0) {
1342 netlbl_catmap_free(secattr->attr.mls.cat);
1343 return ret_val;
1344 }
1345
1346 secattr->flags |= NETLBL_SECATTR_MLS_CAT;
1347 }
1348
1349 return 0;
1350 }
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364 static int cipso_v4_gentag_rng(const struct cipso_v4_doi *doi_def,
1365 const struct netlbl_lsm_secattr *secattr,
1366 unsigned char *buffer,
1367 u32 buffer_len)
1368 {
1369 int ret_val;
1370 u32 tag_len;
1371 u32 level;
1372
1373 if (!(secattr->flags & NETLBL_SECATTR_MLS_LVL))
1374 return -EPERM;
1375
1376 ret_val = cipso_v4_map_lvl_hton(doi_def,
1377 secattr->attr.mls.lvl,
1378 &level);
1379 if (ret_val != 0)
1380 return ret_val;
1381
1382 if (secattr->flags & NETLBL_SECATTR_MLS_CAT) {
1383 ret_val = cipso_v4_map_cat_rng_hton(doi_def,
1384 secattr,
1385 &buffer[4],
1386 buffer_len - 4);
1387 if (ret_val < 0)
1388 return ret_val;
1389
1390 tag_len = 4 + ret_val;
1391 } else
1392 tag_len = 4;
1393
1394 buffer[0] = CIPSO_V4_TAG_RANGE;
1395 buffer[1] = tag_len;
1396 buffer[3] = level;
1397
1398 return tag_len;
1399 }
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412 static int cipso_v4_parsetag_rng(const struct cipso_v4_doi *doi_def,
1413 const unsigned char *tag,
1414 struct netlbl_lsm_secattr *secattr)
1415 {
1416 int ret_val;
1417 u8 tag_len = tag[1];
1418 u32 level;
1419
1420 ret_val = cipso_v4_map_lvl_ntoh(doi_def, tag[3], &level);
1421 if (ret_val != 0)
1422 return ret_val;
1423 secattr->attr.mls.lvl = level;
1424 secattr->flags |= NETLBL_SECATTR_MLS_LVL;
1425
1426 if (tag_len > 4) {
1427 ret_val = cipso_v4_map_cat_rng_ntoh(doi_def,
1428 &tag[4],
1429 tag_len - 4,
1430 secattr);
1431 if (ret_val != 0) {
1432 netlbl_catmap_free(secattr->attr.mls.cat);
1433 return ret_val;
1434 }
1435
1436 if (secattr->attr.mls.cat)
1437 secattr->flags |= NETLBL_SECATTR_MLS_CAT;
1438 }
1439
1440 return 0;
1441 }
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455 static int cipso_v4_gentag_loc(const struct cipso_v4_doi *doi_def,
1456 const struct netlbl_lsm_secattr *secattr,
1457 unsigned char *buffer,
1458 u32 buffer_len)
1459 {
1460 if (!(secattr->flags & NETLBL_SECATTR_SECID))
1461 return -EPERM;
1462
1463 buffer[0] = CIPSO_V4_TAG_LOCAL;
1464 buffer[1] = CIPSO_V4_TAG_LOC_BLEN;
1465 *(u32 *)&buffer[2] = secattr->attr.secid;
1466
1467 return CIPSO_V4_TAG_LOC_BLEN;
1468 }
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481 static int cipso_v4_parsetag_loc(const struct cipso_v4_doi *doi_def,
1482 const unsigned char *tag,
1483 struct netlbl_lsm_secattr *secattr)
1484 {
1485 secattr->attr.secid = *(u32 *)&tag[2];
1486 secattr->flags |= NETLBL_SECATTR_SECID;
1487
1488 return 0;
1489 }
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500 unsigned char *cipso_v4_optptr(const struct sk_buff *skb)
1501 {
1502 const struct iphdr *iph = ip_hdr(skb);
1503 unsigned char *optptr = (unsigned char *)&(ip_hdr(skb)[1]);
1504 int optlen;
1505 int taglen;
1506
1507 for (optlen = iph->ihl*4 - sizeof(struct iphdr); optlen > 1; ) {
1508 switch (optptr[0]) {
1509 case IPOPT_END:
1510 return NULL;
1511 case IPOPT_NOOP:
1512 taglen = 1;
1513 break;
1514 default:
1515 taglen = optptr[1];
1516 }
1517 if (!taglen || taglen > optlen)
1518 return NULL;
1519 if (optptr[0] == IPOPT_CIPSO)
1520 return optptr;
1521
1522 optlen -= taglen;
1523 optptr += taglen;
1524 }
1525
1526 return NULL;
1527 }
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549 int cipso_v4_validate(const struct sk_buff *skb, unsigned char **option)
1550 {
1551 unsigned char *opt = *option;
1552 unsigned char *tag;
1553 unsigned char opt_iter;
1554 unsigned char err_offset = 0;
1555 u8 opt_len;
1556 u8 tag_len;
1557 struct cipso_v4_doi *doi_def = NULL;
1558 u32 tag_iter;
1559
1560
1561 opt_len = opt[1];
1562 if (opt_len < 8) {
1563 err_offset = 1;
1564 goto validate_return;
1565 }
1566
1567 rcu_read_lock();
1568 doi_def = cipso_v4_doi_search(get_unaligned_be32(&opt[2]));
1569 if (!doi_def) {
1570 err_offset = 2;
1571 goto validate_return_locked;
1572 }
1573
1574 opt_iter = CIPSO_V4_HDR_LEN;
1575 tag = opt + opt_iter;
1576 while (opt_iter < opt_len) {
1577 for (tag_iter = 0; doi_def->tags[tag_iter] != tag[0];)
1578 if (doi_def->tags[tag_iter] == CIPSO_V4_TAG_INVALID ||
1579 ++tag_iter == CIPSO_V4_TAG_MAXCNT) {
1580 err_offset = opt_iter;
1581 goto validate_return_locked;
1582 }
1583
1584 if (opt_iter + 1 == opt_len) {
1585 err_offset = opt_iter;
1586 goto validate_return_locked;
1587 }
1588 tag_len = tag[1];
1589 if (tag_len > (opt_len - opt_iter)) {
1590 err_offset = opt_iter + 1;
1591 goto validate_return_locked;
1592 }
1593
1594 switch (tag[0]) {
1595 case CIPSO_V4_TAG_RBITMAP:
1596 if (tag_len < CIPSO_V4_TAG_RBM_BLEN) {
1597 err_offset = opt_iter + 1;
1598 goto validate_return_locked;
1599 }
1600
1601
1602
1603
1604
1605
1606
1607
1608 if (READ_ONCE(cipso_v4_rbm_strictvalid)) {
1609 if (cipso_v4_map_lvl_valid(doi_def,
1610 tag[3]) < 0) {
1611 err_offset = opt_iter + 3;
1612 goto validate_return_locked;
1613 }
1614 if (tag_len > CIPSO_V4_TAG_RBM_BLEN &&
1615 cipso_v4_map_cat_rbm_valid(doi_def,
1616 &tag[4],
1617 tag_len - 4) < 0) {
1618 err_offset = opt_iter + 4;
1619 goto validate_return_locked;
1620 }
1621 }
1622 break;
1623 case CIPSO_V4_TAG_ENUM:
1624 if (tag_len < CIPSO_V4_TAG_ENUM_BLEN) {
1625 err_offset = opt_iter + 1;
1626 goto validate_return_locked;
1627 }
1628
1629 if (cipso_v4_map_lvl_valid(doi_def,
1630 tag[3]) < 0) {
1631 err_offset = opt_iter + 3;
1632 goto validate_return_locked;
1633 }
1634 if (tag_len > CIPSO_V4_TAG_ENUM_BLEN &&
1635 cipso_v4_map_cat_enum_valid(doi_def,
1636 &tag[4],
1637 tag_len - 4) < 0) {
1638 err_offset = opt_iter + 4;
1639 goto validate_return_locked;
1640 }
1641 break;
1642 case CIPSO_V4_TAG_RANGE:
1643 if (tag_len < CIPSO_V4_TAG_RNG_BLEN) {
1644 err_offset = opt_iter + 1;
1645 goto validate_return_locked;
1646 }
1647
1648 if (cipso_v4_map_lvl_valid(doi_def,
1649 tag[3]) < 0) {
1650 err_offset = opt_iter + 3;
1651 goto validate_return_locked;
1652 }
1653 if (tag_len > CIPSO_V4_TAG_RNG_BLEN &&
1654 cipso_v4_map_cat_rng_valid(doi_def,
1655 &tag[4],
1656 tag_len - 4) < 0) {
1657 err_offset = opt_iter + 4;
1658 goto validate_return_locked;
1659 }
1660 break;
1661 case CIPSO_V4_TAG_LOCAL:
1662
1663
1664
1665
1666
1667 if (!skb || !(skb->dev->flags & IFF_LOOPBACK)) {
1668 err_offset = opt_iter;
1669 goto validate_return_locked;
1670 }
1671 if (tag_len != CIPSO_V4_TAG_LOC_BLEN) {
1672 err_offset = opt_iter + 1;
1673 goto validate_return_locked;
1674 }
1675 break;
1676 default:
1677 err_offset = opt_iter;
1678 goto validate_return_locked;
1679 }
1680
1681 tag += tag_len;
1682 opt_iter += tag_len;
1683 }
1684
1685 validate_return_locked:
1686 rcu_read_unlock();
1687 validate_return:
1688 *option = opt + err_offset;
1689 return err_offset;
1690 }
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719 void cipso_v4_error(struct sk_buff *skb, int error, u32 gateway)
1720 {
1721 unsigned char optbuf[sizeof(struct ip_options) + 40];
1722 struct ip_options *opt = (struct ip_options *)optbuf;
1723 int res;
1724
1725 if (ip_hdr(skb)->protocol == IPPROTO_ICMP || error != -EACCES)
1726 return;
1727
1728
1729
1730
1731
1732
1733 memset(opt, 0, sizeof(struct ip_options));
1734 opt->optlen = ip_hdr(skb)->ihl*4 - sizeof(struct iphdr);
1735 rcu_read_lock();
1736 res = __ip_options_compile(dev_net(skb->dev), opt, skb, NULL);
1737 rcu_read_unlock();
1738
1739 if (res)
1740 return;
1741
1742 if (gateway)
1743 __icmp_send(skb, ICMP_DEST_UNREACH, ICMP_NET_ANO, 0, opt);
1744 else
1745 __icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_ANO, 0, opt);
1746 }
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761 static int cipso_v4_genopt(unsigned char *buf, u32 buf_len,
1762 const struct cipso_v4_doi *doi_def,
1763 const struct netlbl_lsm_secattr *secattr)
1764 {
1765 int ret_val;
1766 u32 iter;
1767
1768 if (buf_len <= CIPSO_V4_HDR_LEN)
1769 return -ENOSPC;
1770
1771
1772
1773
1774 iter = 0;
1775 do {
1776 memset(buf, 0, buf_len);
1777 switch (doi_def->tags[iter]) {
1778 case CIPSO_V4_TAG_RBITMAP:
1779 ret_val = cipso_v4_gentag_rbm(doi_def,
1780 secattr,
1781 &buf[CIPSO_V4_HDR_LEN],
1782 buf_len - CIPSO_V4_HDR_LEN);
1783 break;
1784 case CIPSO_V4_TAG_ENUM:
1785 ret_val = cipso_v4_gentag_enum(doi_def,
1786 secattr,
1787 &buf[CIPSO_V4_HDR_LEN],
1788 buf_len - CIPSO_V4_HDR_LEN);
1789 break;
1790 case CIPSO_V4_TAG_RANGE:
1791 ret_val = cipso_v4_gentag_rng(doi_def,
1792 secattr,
1793 &buf[CIPSO_V4_HDR_LEN],
1794 buf_len - CIPSO_V4_HDR_LEN);
1795 break;
1796 case CIPSO_V4_TAG_LOCAL:
1797 ret_val = cipso_v4_gentag_loc(doi_def,
1798 secattr,
1799 &buf[CIPSO_V4_HDR_LEN],
1800 buf_len - CIPSO_V4_HDR_LEN);
1801 break;
1802 default:
1803 return -EPERM;
1804 }
1805
1806 iter++;
1807 } while (ret_val < 0 &&
1808 iter < CIPSO_V4_TAG_MAXCNT &&
1809 doi_def->tags[iter] != CIPSO_V4_TAG_INVALID);
1810 if (ret_val < 0)
1811 return ret_val;
1812 cipso_v4_gentag_hdr(doi_def, buf, ret_val);
1813 return CIPSO_V4_HDR_LEN + ret_val;
1814 }
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830 int cipso_v4_sock_setattr(struct sock *sk,
1831 const struct cipso_v4_doi *doi_def,
1832 const struct netlbl_lsm_secattr *secattr)
1833 {
1834 int ret_val = -EPERM;
1835 unsigned char *buf = NULL;
1836 u32 buf_len;
1837 u32 opt_len;
1838 struct ip_options_rcu *old, *opt = NULL;
1839 struct inet_sock *sk_inet;
1840 struct inet_connection_sock *sk_conn;
1841
1842
1843
1844
1845
1846 if (!sk)
1847 return 0;
1848
1849
1850
1851
1852 buf_len = CIPSO_V4_OPT_LEN_MAX;
1853 buf = kmalloc(buf_len, GFP_ATOMIC);
1854 if (!buf) {
1855 ret_val = -ENOMEM;
1856 goto socket_setattr_failure;
1857 }
1858
1859 ret_val = cipso_v4_genopt(buf, buf_len, doi_def, secattr);
1860 if (ret_val < 0)
1861 goto socket_setattr_failure;
1862 buf_len = ret_val;
1863
1864
1865
1866
1867
1868 opt_len = (buf_len + 3) & ~3;
1869 opt = kzalloc(sizeof(*opt) + opt_len, GFP_ATOMIC);
1870 if (!opt) {
1871 ret_val = -ENOMEM;
1872 goto socket_setattr_failure;
1873 }
1874 memcpy(opt->opt.__data, buf, buf_len);
1875 opt->opt.optlen = opt_len;
1876 opt->opt.cipso = sizeof(struct iphdr);
1877 kfree(buf);
1878 buf = NULL;
1879
1880 sk_inet = inet_sk(sk);
1881
1882 old = rcu_dereference_protected(sk_inet->inet_opt,
1883 lockdep_sock_is_held(sk));
1884 if (sk_inet->is_icsk) {
1885 sk_conn = inet_csk(sk);
1886 if (old)
1887 sk_conn->icsk_ext_hdr_len -= old->opt.optlen;
1888 sk_conn->icsk_ext_hdr_len += opt->opt.optlen;
1889 sk_conn->icsk_sync_mss(sk, sk_conn->icsk_pmtu_cookie);
1890 }
1891 rcu_assign_pointer(sk_inet->inet_opt, opt);
1892 if (old)
1893 kfree_rcu(old, rcu);
1894
1895 return 0;
1896
1897 socket_setattr_failure:
1898 kfree(buf);
1899 kfree(opt);
1900 return ret_val;
1901 }
1902
1903
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915 int cipso_v4_req_setattr(struct request_sock *req,
1916 const struct cipso_v4_doi *doi_def,
1917 const struct netlbl_lsm_secattr *secattr)
1918 {
1919 int ret_val = -EPERM;
1920 unsigned char *buf = NULL;
1921 u32 buf_len;
1922 u32 opt_len;
1923 struct ip_options_rcu *opt = NULL;
1924 struct inet_request_sock *req_inet;
1925
1926
1927
1928
1929 buf_len = CIPSO_V4_OPT_LEN_MAX;
1930 buf = kmalloc(buf_len, GFP_ATOMIC);
1931 if (!buf) {
1932 ret_val = -ENOMEM;
1933 goto req_setattr_failure;
1934 }
1935
1936 ret_val = cipso_v4_genopt(buf, buf_len, doi_def, secattr);
1937 if (ret_val < 0)
1938 goto req_setattr_failure;
1939 buf_len = ret_val;
1940
1941
1942
1943
1944
1945 opt_len = (buf_len + 3) & ~3;
1946 opt = kzalloc(sizeof(*opt) + opt_len, GFP_ATOMIC);
1947 if (!opt) {
1948 ret_val = -ENOMEM;
1949 goto req_setattr_failure;
1950 }
1951 memcpy(opt->opt.__data, buf, buf_len);
1952 opt->opt.optlen = opt_len;
1953 opt->opt.cipso = sizeof(struct iphdr);
1954 kfree(buf);
1955 buf = NULL;
1956
1957 req_inet = inet_rsk(req);
1958 opt = xchg((__force struct ip_options_rcu **)&req_inet->ireq_opt, opt);
1959 if (opt)
1960 kfree_rcu(opt, rcu);
1961
1962 return 0;
1963
1964 req_setattr_failure:
1965 kfree(buf);
1966 kfree(opt);
1967 return ret_val;
1968 }
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979
1980 static int cipso_v4_delopt(struct ip_options_rcu __rcu **opt_ptr)
1981 {
1982 struct ip_options_rcu *opt = rcu_dereference_protected(*opt_ptr, 1);
1983 int hdr_delta = 0;
1984
1985 if (!opt || opt->opt.cipso == 0)
1986 return 0;
1987 if (opt->opt.srr || opt->opt.rr || opt->opt.ts || opt->opt.router_alert) {
1988 u8 cipso_len;
1989 u8 cipso_off;
1990 unsigned char *cipso_ptr;
1991 int iter;
1992 int optlen_new;
1993
1994 cipso_off = opt->opt.cipso - sizeof(struct iphdr);
1995 cipso_ptr = &opt->opt.__data[cipso_off];
1996 cipso_len = cipso_ptr[1];
1997
1998 if (opt->opt.srr > opt->opt.cipso)
1999 opt->opt.srr -= cipso_len;
2000 if (opt->opt.rr > opt->opt.cipso)
2001 opt->opt.rr -= cipso_len;
2002 if (opt->opt.ts > opt->opt.cipso)
2003 opt->opt.ts -= cipso_len;
2004 if (opt->opt.router_alert > opt->opt.cipso)
2005 opt->opt.router_alert -= cipso_len;
2006 opt->opt.cipso = 0;
2007
2008 memmove(cipso_ptr, cipso_ptr + cipso_len,
2009 opt->opt.optlen - cipso_off - cipso_len);
2010
2011
2012
2013
2014
2015
2016 iter = 0;
2017 optlen_new = 0;
2018 while (iter < opt->opt.optlen)
2019 if (opt->opt.__data[iter] != IPOPT_NOP) {
2020 iter += opt->opt.__data[iter + 1];
2021 optlen_new = iter;
2022 } else
2023 iter++;
2024 hdr_delta = opt->opt.optlen;
2025 opt->opt.optlen = (optlen_new + 3) & ~3;
2026 hdr_delta -= opt->opt.optlen;
2027 } else {
2028
2029
2030 *opt_ptr = NULL;
2031 hdr_delta = opt->opt.optlen;
2032 kfree_rcu(opt, rcu);
2033 }
2034
2035 return hdr_delta;
2036 }
2037
2038
2039
2040
2041
2042
2043
2044
2045
2046 void cipso_v4_sock_delattr(struct sock *sk)
2047 {
2048 struct inet_sock *sk_inet;
2049 int hdr_delta;
2050
2051 sk_inet = inet_sk(sk);
2052
2053 hdr_delta = cipso_v4_delopt(&sk_inet->inet_opt);
2054 if (sk_inet->is_icsk && hdr_delta > 0) {
2055 struct inet_connection_sock *sk_conn = inet_csk(sk);
2056 sk_conn->icsk_ext_hdr_len -= hdr_delta;
2057 sk_conn->icsk_sync_mss(sk, sk_conn->icsk_pmtu_cookie);
2058 }
2059 }
2060
2061
2062
2063
2064
2065
2066
2067
2068
2069 void cipso_v4_req_delattr(struct request_sock *req)
2070 {
2071 cipso_v4_delopt(&inet_rsk(req)->ireq_opt);
2072 }
2073
2074
2075
2076
2077
2078
2079
2080
2081
2082
2083
2084 int cipso_v4_getattr(const unsigned char *cipso,
2085 struct netlbl_lsm_secattr *secattr)
2086 {
2087 int ret_val = -ENOMSG;
2088 u32 doi;
2089 struct cipso_v4_doi *doi_def;
2090
2091 if (cipso_v4_cache_check(cipso, cipso[1], secattr) == 0)
2092 return 0;
2093
2094 doi = get_unaligned_be32(&cipso[2]);
2095 rcu_read_lock();
2096 doi_def = cipso_v4_doi_search(doi);
2097 if (!doi_def)
2098 goto getattr_return;
2099
2100
2101
2102 switch (cipso[6]) {
2103 case CIPSO_V4_TAG_RBITMAP:
2104 ret_val = cipso_v4_parsetag_rbm(doi_def, &cipso[6], secattr);
2105 break;
2106 case CIPSO_V4_TAG_ENUM:
2107 ret_val = cipso_v4_parsetag_enum(doi_def, &cipso[6], secattr);
2108 break;
2109 case CIPSO_V4_TAG_RANGE:
2110 ret_val = cipso_v4_parsetag_rng(doi_def, &cipso[6], secattr);
2111 break;
2112 case CIPSO_V4_TAG_LOCAL:
2113 ret_val = cipso_v4_parsetag_loc(doi_def, &cipso[6], secattr);
2114 break;
2115 }
2116 if (ret_val == 0)
2117 secattr->type = NETLBL_NLTYPE_CIPSOV4;
2118
2119 getattr_return:
2120 rcu_read_unlock();
2121 return ret_val;
2122 }
2123
2124
2125
2126
2127
2128
2129
2130
2131
2132
2133
2134
2135
2136 int cipso_v4_sock_getattr(struct sock *sk, struct netlbl_lsm_secattr *secattr)
2137 {
2138 struct ip_options_rcu *opt;
2139 int res = -ENOMSG;
2140
2141 rcu_read_lock();
2142 opt = rcu_dereference(inet_sk(sk)->inet_opt);
2143 if (opt && opt->opt.cipso)
2144 res = cipso_v4_getattr(opt->opt.__data +
2145 opt->opt.cipso -
2146 sizeof(struct iphdr),
2147 secattr);
2148 rcu_read_unlock();
2149 return res;
2150 }
2151
2152
2153
2154
2155
2156
2157
2158
2159
2160
2161
2162
2163 int cipso_v4_skbuff_setattr(struct sk_buff *skb,
2164 const struct cipso_v4_doi *doi_def,
2165 const struct netlbl_lsm_secattr *secattr)
2166 {
2167 int ret_val;
2168 struct iphdr *iph;
2169 struct ip_options *opt = &IPCB(skb)->opt;
2170 unsigned char buf[CIPSO_V4_OPT_LEN_MAX];
2171 u32 buf_len = CIPSO_V4_OPT_LEN_MAX;
2172 u32 opt_len;
2173 int len_delta;
2174
2175 ret_val = cipso_v4_genopt(buf, buf_len, doi_def, secattr);
2176 if (ret_val < 0)
2177 return ret_val;
2178 buf_len = ret_val;
2179 opt_len = (buf_len + 3) & ~3;
2180
2181
2182
2183
2184
2185
2186
2187 len_delta = opt_len - opt->optlen;
2188
2189
2190
2191 ret_val = skb_cow(skb, skb_headroom(skb) + len_delta);
2192 if (ret_val < 0)
2193 return ret_val;
2194
2195 if (len_delta > 0) {
2196
2197
2198 iph = ip_hdr(skb);
2199 skb_push(skb, len_delta);
2200 memmove((char *)iph - len_delta, iph, iph->ihl << 2);
2201 skb_reset_network_header(skb);
2202 iph = ip_hdr(skb);
2203 } else if (len_delta < 0) {
2204 iph = ip_hdr(skb);
2205 memset(iph + 1, IPOPT_NOP, opt->optlen);
2206 } else
2207 iph = ip_hdr(skb);
2208
2209 if (opt->optlen > 0)
2210 memset(opt, 0, sizeof(*opt));
2211 opt->optlen = opt_len;
2212 opt->cipso = sizeof(struct iphdr);
2213 opt->is_changed = 1;
2214
2215
2216
2217
2218
2219
2220 memcpy(iph + 1, buf, buf_len);
2221 if (opt_len > buf_len)
2222 memset((char *)(iph + 1) + buf_len, 0, opt_len - buf_len);
2223 if (len_delta != 0) {
2224 iph->ihl = 5 + (opt_len >> 2);
2225 iph->tot_len = htons(skb->len);
2226 }
2227 ip_send_check(iph);
2228
2229 return 0;
2230 }
2231
2232
2233
2234
2235
2236
2237
2238
2239
2240
2241 int cipso_v4_skbuff_delattr(struct sk_buff *skb)
2242 {
2243 int ret_val;
2244 struct iphdr *iph;
2245 struct ip_options *opt = &IPCB(skb)->opt;
2246 unsigned char *cipso_ptr;
2247
2248 if (opt->cipso == 0)
2249 return 0;
2250
2251
2252 ret_val = skb_cow(skb, skb_headroom(skb));
2253 if (ret_val < 0)
2254 return ret_val;
2255
2256
2257
2258
2259
2260 iph = ip_hdr(skb);
2261 cipso_ptr = (unsigned char *)iph + opt->cipso;
2262 memset(cipso_ptr, IPOPT_NOOP, cipso_ptr[1]);
2263 opt->cipso = 0;
2264 opt->is_changed = 1;
2265
2266 ip_send_check(iph);
2267
2268 return 0;
2269 }
2270
2271
2272
2273
2274
2275
2276
2277
2278
2279
2280
2281
2282
2283 static int __init cipso_v4_init(void)
2284 {
2285 int ret_val;
2286
2287 ret_val = cipso_v4_cache_init();
2288 if (ret_val != 0)
2289 panic("Failed to initialize the CIPSO/IPv4 cache (%d)\n",
2290 ret_val);
2291
2292 return 0;
2293 }
2294
2295 subsys_initcall(cipso_v4_init);