0001
0002
0003
0004
0005
0006
0007
0008
0009
0010 #include <linux/atomic.h>
0011 #include <linux/bitops.h>
0012 #include <linux/bits.h>
0013 #include <linux/compiler_types.h>
0014 #include <linux/dcache.h>
0015 #include <linux/err.h>
0016 #include <linux/fs.h>
0017 #include <linux/init.h>
0018 #include <linux/kernel.h>
0019 #include <linux/limits.h>
0020 #include <linux/list.h>
0021 #include <linux/lsm_hooks.h>
0022 #include <linux/mount.h>
0023 #include <linux/namei.h>
0024 #include <linux/path.h>
0025 #include <linux/rcupdate.h>
0026 #include <linux/spinlock.h>
0027 #include <linux/stat.h>
0028 #include <linux/types.h>
0029 #include <linux/wait_bit.h>
0030 #include <linux/workqueue.h>
0031 #include <uapi/linux/landlock.h>
0032
0033 #include "common.h"
0034 #include "cred.h"
0035 #include "fs.h"
0036 #include "limits.h"
0037 #include "object.h"
0038 #include "ruleset.h"
0039 #include "setup.h"
0040
0041
0042
0043 static void release_inode(struct landlock_object *const object)
0044 __releases(object->lock)
0045 {
0046 struct inode *const inode = object->underobj;
0047 struct super_block *sb;
0048
0049 if (!inode) {
0050 spin_unlock(&object->lock);
0051 return;
0052 }
0053
0054
0055
0056
0057
0058 object->underobj = NULL;
0059
0060
0061
0062
0063 sb = inode->i_sb;
0064 atomic_long_inc(&landlock_superblock(sb)->inode_refs);
0065 spin_unlock(&object->lock);
0066
0067
0068
0069
0070
0071
0072 rcu_assign_pointer(landlock_inode(inode)->object, NULL);
0073
0074
0075
0076
0077 iput(inode);
0078 if (atomic_long_dec_and_test(&landlock_superblock(sb)->inode_refs))
0079 wake_up_var(&landlock_superblock(sb)->inode_refs);
0080 }
0081
0082 static const struct landlock_object_underops landlock_fs_underops = {
0083 .release = release_inode
0084 };
0085
0086
0087
0088 static struct landlock_object *get_inode_object(struct inode *const inode)
0089 {
0090 struct landlock_object *object, *new_object;
0091 struct landlock_inode_security *inode_sec = landlock_inode(inode);
0092
0093 rcu_read_lock();
0094 retry:
0095 object = rcu_dereference(inode_sec->object);
0096 if (object) {
0097 if (likely(refcount_inc_not_zero(&object->usage))) {
0098 rcu_read_unlock();
0099 return object;
0100 }
0101
0102
0103
0104
0105 spin_lock(&object->lock);
0106 spin_unlock(&object->lock);
0107 goto retry;
0108 }
0109 rcu_read_unlock();
0110
0111
0112
0113
0114
0115 new_object = landlock_create_object(&landlock_fs_underops, inode);
0116 if (IS_ERR(new_object))
0117 return new_object;
0118
0119
0120
0121
0122
0123 spin_lock(&inode->i_lock);
0124 if (unlikely(rcu_access_pointer(inode_sec->object))) {
0125
0126 spin_unlock(&inode->i_lock);
0127 kfree(new_object);
0128
0129 rcu_read_lock();
0130 goto retry;
0131 }
0132
0133
0134
0135
0136
0137
0138 ihold(inode);
0139 rcu_assign_pointer(inode_sec->object, new_object);
0140 spin_unlock(&inode->i_lock);
0141 return new_object;
0142 }
0143
0144
0145
0146 #define ACCESS_FILE ( \
0147 LANDLOCK_ACCESS_FS_EXECUTE | \
0148 LANDLOCK_ACCESS_FS_WRITE_FILE | \
0149 LANDLOCK_ACCESS_FS_READ_FILE)
0150
0151
0152
0153
0154
0155
0156
0157
0158 #define ACCESS_INITIALLY_DENIED ( \
0159 LANDLOCK_ACCESS_FS_REFER)
0160
0161
0162
0163
0164
0165 int landlock_append_fs_rule(struct landlock_ruleset *const ruleset,
0166 const struct path *const path,
0167 access_mask_t access_rights)
0168 {
0169 int err;
0170 struct landlock_object *object;
0171
0172
0173 if (!d_is_dir(path->dentry) &&
0174 (access_rights | ACCESS_FILE) != ACCESS_FILE)
0175 return -EINVAL;
0176 if (WARN_ON_ONCE(ruleset->num_layers != 1))
0177 return -EINVAL;
0178
0179
0180 access_rights |=
0181 LANDLOCK_MASK_ACCESS_FS &
0182 ~(ruleset->fs_access_masks[0] | ACCESS_INITIALLY_DENIED);
0183 object = get_inode_object(d_backing_inode(path->dentry));
0184 if (IS_ERR(object))
0185 return PTR_ERR(object);
0186 mutex_lock(&ruleset->lock);
0187 err = landlock_insert_rule(ruleset, object, access_rights);
0188 mutex_unlock(&ruleset->lock);
0189
0190
0191
0192
0193 landlock_put_object(object);
0194 return err;
0195 }
0196
0197
0198
0199
0200
0201
0202
0203
0204 static inline const struct landlock_rule *
0205 find_rule(const struct landlock_ruleset *const domain,
0206 const struct dentry *const dentry)
0207 {
0208 const struct landlock_rule *rule;
0209 const struct inode *inode;
0210
0211
0212 if (d_is_negative(dentry))
0213 return NULL;
0214
0215 inode = d_backing_inode(dentry);
0216 rcu_read_lock();
0217 rule = landlock_find_rule(
0218 domain, rcu_dereference(landlock_inode(inode)->object));
0219 rcu_read_unlock();
0220 return rule;
0221 }
0222
0223
0224
0225
0226
0227
0228
0229
0230 static inline bool
0231 unmask_layers(const struct landlock_rule *const rule,
0232 const access_mask_t access_request,
0233 layer_mask_t (*const layer_masks)[LANDLOCK_NUM_ACCESS_FS])
0234 {
0235 size_t layer_level;
0236
0237 if (!access_request || !layer_masks)
0238 return true;
0239 if (!rule)
0240 return false;
0241
0242
0243
0244
0245
0246
0247
0248
0249
0250
0251
0252 for (layer_level = 0; layer_level < rule->num_layers; layer_level++) {
0253 const struct landlock_layer *const layer =
0254 &rule->layers[layer_level];
0255 const layer_mask_t layer_bit = BIT_ULL(layer->level - 1);
0256 const unsigned long access_req = access_request;
0257 unsigned long access_bit;
0258 bool is_empty;
0259
0260
0261
0262
0263
0264 is_empty = true;
0265 for_each_set_bit(access_bit, &access_req,
0266 ARRAY_SIZE(*layer_masks)) {
0267 if (layer->access & BIT_ULL(access_bit))
0268 (*layer_masks)[access_bit] &= ~layer_bit;
0269 is_empty = is_empty && !(*layer_masks)[access_bit];
0270 }
0271 if (is_empty)
0272 return true;
0273 }
0274 return false;
0275 }
0276
0277
0278
0279
0280
0281
0282 static inline bool is_nouser_or_private(const struct dentry *dentry)
0283 {
0284 return (dentry->d_sb->s_flags & SB_NOUSER) ||
0285 (d_is_positive(dentry) &&
0286 unlikely(IS_PRIVATE(d_backing_inode(dentry))));
0287 }
0288
0289 static inline access_mask_t
0290 get_handled_accesses(const struct landlock_ruleset *const domain)
0291 {
0292 access_mask_t access_dom = ACCESS_INITIALLY_DENIED;
0293 size_t layer_level;
0294
0295 for (layer_level = 0; layer_level < domain->num_layers; layer_level++)
0296 access_dom |= domain->fs_access_masks[layer_level];
0297 return access_dom & LANDLOCK_MASK_ACCESS_FS;
0298 }
0299
0300 static inline access_mask_t
0301 init_layer_masks(const struct landlock_ruleset *const domain,
0302 const access_mask_t access_request,
0303 layer_mask_t (*const layer_masks)[LANDLOCK_NUM_ACCESS_FS])
0304 {
0305 access_mask_t handled_accesses = 0;
0306 size_t layer_level;
0307
0308 memset(layer_masks, 0, sizeof(*layer_masks));
0309
0310 if (!access_request)
0311 return 0;
0312
0313
0314 for (layer_level = 0; layer_level < domain->num_layers; layer_level++) {
0315 const unsigned long access_req = access_request;
0316 unsigned long access_bit;
0317
0318 for_each_set_bit(access_bit, &access_req,
0319 ARRAY_SIZE(*layer_masks)) {
0320
0321
0322
0323
0324 if (BIT_ULL(access_bit) &
0325 (domain->fs_access_masks[layer_level] |
0326 ACCESS_INITIALLY_DENIED)) {
0327 (*layer_masks)[access_bit] |=
0328 BIT_ULL(layer_level);
0329 handled_accesses |= BIT_ULL(access_bit);
0330 }
0331 }
0332 }
0333 return handled_accesses;
0334 }
0335
0336
0337
0338
0339
0340
0341
0342 static inline bool no_more_access(
0343 const layer_mask_t (*const layer_masks_parent1)[LANDLOCK_NUM_ACCESS_FS],
0344 const layer_mask_t (*const layer_masks_child1)[LANDLOCK_NUM_ACCESS_FS],
0345 const bool child1_is_directory,
0346 const layer_mask_t (*const layer_masks_parent2)[LANDLOCK_NUM_ACCESS_FS],
0347 const layer_mask_t (*const layer_masks_child2)[LANDLOCK_NUM_ACCESS_FS],
0348 const bool child2_is_directory)
0349 {
0350 unsigned long access_bit;
0351
0352 for (access_bit = 0; access_bit < ARRAY_SIZE(*layer_masks_parent2);
0353 access_bit++) {
0354
0355 const bool is_file_access =
0356 !!(BIT_ULL(access_bit) & ACCESS_FILE);
0357
0358 if (child1_is_directory || is_file_access) {
0359
0360
0361
0362
0363
0364
0365 if ((((*layer_masks_parent1)[access_bit] &
0366 (*layer_masks_child1)[access_bit]) |
0367 (*layer_masks_parent2)[access_bit]) !=
0368 (*layer_masks_parent2)[access_bit])
0369 return false;
0370 }
0371
0372 if (!layer_masks_child2)
0373 continue;
0374 if (child2_is_directory || is_file_access) {
0375
0376
0377
0378
0379 if ((((*layer_masks_parent2)[access_bit] &
0380 (*layer_masks_child2)[access_bit]) |
0381 (*layer_masks_parent1)[access_bit]) !=
0382 (*layer_masks_parent1)[access_bit])
0383 return false;
0384 }
0385 }
0386 return true;
0387 }
0388
0389
0390
0391
0392
0393
0394 static inline bool
0395 scope_to_request(const access_mask_t access_request,
0396 layer_mask_t (*const layer_masks)[LANDLOCK_NUM_ACCESS_FS])
0397 {
0398 const unsigned long access_req = access_request;
0399 unsigned long access_bit;
0400
0401 if (WARN_ON_ONCE(!layer_masks))
0402 return true;
0403
0404 for_each_clear_bit(access_bit, &access_req, ARRAY_SIZE(*layer_masks))
0405 (*layer_masks)[access_bit] = 0;
0406 return !memchr_inv(layer_masks, 0, sizeof(*layer_masks));
0407 }
0408
0409
0410
0411
0412
0413 static inline bool
0414 is_eacces(const layer_mask_t (*const layer_masks)[LANDLOCK_NUM_ACCESS_FS],
0415 const access_mask_t access_request)
0416 {
0417 unsigned long access_bit;
0418
0419 const unsigned long access_check = access_request &
0420 ~LANDLOCK_ACCESS_FS_REFER;
0421
0422 if (!layer_masks)
0423 return false;
0424
0425 for_each_set_bit(access_bit, &access_check, ARRAY_SIZE(*layer_masks)) {
0426 if ((*layer_masks)[access_bit])
0427 return true;
0428 }
0429 return false;
0430 }
0431
0432
0433
0434
0435
0436
0437
0438
0439
0440
0441
0442
0443
0444
0445
0446
0447
0448
0449
0450
0451
0452
0453
0454
0455
0456
0457
0458
0459
0460
0461
0462
0463
0464
0465
0466
0467
0468
0469
0470
0471
0472
0473
0474
0475 static int check_access_path_dual(
0476 const struct landlock_ruleset *const domain,
0477 const struct path *const path,
0478 const access_mask_t access_request_parent1,
0479 layer_mask_t (*const layer_masks_parent1)[LANDLOCK_NUM_ACCESS_FS],
0480 const struct dentry *const dentry_child1,
0481 const access_mask_t access_request_parent2,
0482 layer_mask_t (*const layer_masks_parent2)[LANDLOCK_NUM_ACCESS_FS],
0483 const struct dentry *const dentry_child2)
0484 {
0485 bool allowed_parent1 = false, allowed_parent2 = false, is_dom_check,
0486 child1_is_directory = true, child2_is_directory = true;
0487 struct path walker_path;
0488 access_mask_t access_masked_parent1, access_masked_parent2;
0489 layer_mask_t _layer_masks_child1[LANDLOCK_NUM_ACCESS_FS],
0490 _layer_masks_child2[LANDLOCK_NUM_ACCESS_FS];
0491 layer_mask_t(*layer_masks_child1)[LANDLOCK_NUM_ACCESS_FS] = NULL,
0492 (*layer_masks_child2)[LANDLOCK_NUM_ACCESS_FS] = NULL;
0493
0494 if (!access_request_parent1 && !access_request_parent2)
0495 return 0;
0496 if (WARN_ON_ONCE(!domain || !path))
0497 return 0;
0498 if (is_nouser_or_private(path->dentry))
0499 return 0;
0500 if (WARN_ON_ONCE(domain->num_layers < 1 || !layer_masks_parent1))
0501 return -EACCES;
0502
0503 if (unlikely(layer_masks_parent2)) {
0504 if (WARN_ON_ONCE(!dentry_child1))
0505 return -EACCES;
0506
0507
0508
0509
0510
0511 access_masked_parent1 = access_masked_parent2 =
0512 get_handled_accesses(domain);
0513 is_dom_check = true;
0514 } else {
0515 if (WARN_ON_ONCE(dentry_child1 || dentry_child2))
0516 return -EACCES;
0517
0518 access_masked_parent1 = access_request_parent1;
0519 access_masked_parent2 = access_request_parent2;
0520 is_dom_check = false;
0521 }
0522
0523 if (unlikely(dentry_child1)) {
0524 unmask_layers(find_rule(domain, dentry_child1),
0525 init_layer_masks(domain, LANDLOCK_MASK_ACCESS_FS,
0526 &_layer_masks_child1),
0527 &_layer_masks_child1);
0528 layer_masks_child1 = &_layer_masks_child1;
0529 child1_is_directory = d_is_dir(dentry_child1);
0530 }
0531 if (unlikely(dentry_child2)) {
0532 unmask_layers(find_rule(domain, dentry_child2),
0533 init_layer_masks(domain, LANDLOCK_MASK_ACCESS_FS,
0534 &_layer_masks_child2),
0535 &_layer_masks_child2);
0536 layer_masks_child2 = &_layer_masks_child2;
0537 child2_is_directory = d_is_dir(dentry_child2);
0538 }
0539
0540 walker_path = *path;
0541 path_get(&walker_path);
0542
0543
0544
0545
0546 while (true) {
0547 struct dentry *parent_dentry;
0548 const struct landlock_rule *rule;
0549
0550
0551
0552
0553
0554
0555
0556
0557
0558
0559
0560
0561
0562 if (unlikely(is_dom_check &&
0563 no_more_access(
0564 layer_masks_parent1, layer_masks_child1,
0565 child1_is_directory, layer_masks_parent2,
0566 layer_masks_child2,
0567 child2_is_directory))) {
0568 allowed_parent1 = scope_to_request(
0569 access_request_parent1, layer_masks_parent1);
0570 allowed_parent2 = scope_to_request(
0571 access_request_parent2, layer_masks_parent2);
0572
0573
0574 if (allowed_parent1 && allowed_parent2)
0575 break;
0576
0577
0578
0579
0580
0581 is_dom_check = false;
0582 access_masked_parent1 = access_request_parent1;
0583 access_masked_parent2 = access_request_parent2;
0584 }
0585
0586 rule = find_rule(domain, walker_path.dentry);
0587 allowed_parent1 = unmask_layers(rule, access_masked_parent1,
0588 layer_masks_parent1);
0589 allowed_parent2 = unmask_layers(rule, access_masked_parent2,
0590 layer_masks_parent2);
0591
0592
0593 if (allowed_parent1 && allowed_parent2)
0594 break;
0595
0596 jump_up:
0597 if (walker_path.dentry == walker_path.mnt->mnt_root) {
0598 if (follow_up(&walker_path)) {
0599
0600 goto jump_up;
0601 } else {
0602
0603
0604
0605
0606 break;
0607 }
0608 }
0609 if (unlikely(IS_ROOT(walker_path.dentry))) {
0610
0611
0612
0613
0614
0615 allowed_parent1 = allowed_parent2 =
0616 !!(walker_path.mnt->mnt_flags & MNT_INTERNAL);
0617 break;
0618 }
0619 parent_dentry = dget_parent(walker_path.dentry);
0620 dput(walker_path.dentry);
0621 walker_path.dentry = parent_dentry;
0622 }
0623 path_put(&walker_path);
0624
0625 if (allowed_parent1 && allowed_parent2)
0626 return 0;
0627
0628
0629
0630
0631
0632 if (likely(is_eacces(layer_masks_parent1, access_request_parent1) ||
0633 is_eacces(layer_masks_parent2, access_request_parent2)))
0634 return -EACCES;
0635
0636
0637
0638
0639
0640
0641
0642 return -EXDEV;
0643 }
0644
0645 static inline int check_access_path(const struct landlock_ruleset *const domain,
0646 const struct path *const path,
0647 access_mask_t access_request)
0648 {
0649 layer_mask_t layer_masks[LANDLOCK_NUM_ACCESS_FS] = {};
0650
0651 access_request = init_layer_masks(domain, access_request, &layer_masks);
0652 return check_access_path_dual(domain, path, access_request,
0653 &layer_masks, NULL, 0, NULL, NULL);
0654 }
0655
0656 static inline int current_check_access_path(const struct path *const path,
0657 const access_mask_t access_request)
0658 {
0659 const struct landlock_ruleset *const dom =
0660 landlock_get_current_domain();
0661
0662 if (!dom)
0663 return 0;
0664 return check_access_path(dom, path, access_request);
0665 }
0666
0667 static inline access_mask_t get_mode_access(const umode_t mode)
0668 {
0669 switch (mode & S_IFMT) {
0670 case S_IFLNK:
0671 return LANDLOCK_ACCESS_FS_MAKE_SYM;
0672 case 0:
0673
0674 case S_IFREG:
0675 return LANDLOCK_ACCESS_FS_MAKE_REG;
0676 case S_IFDIR:
0677 return LANDLOCK_ACCESS_FS_MAKE_DIR;
0678 case S_IFCHR:
0679 return LANDLOCK_ACCESS_FS_MAKE_CHAR;
0680 case S_IFBLK:
0681 return LANDLOCK_ACCESS_FS_MAKE_BLOCK;
0682 case S_IFIFO:
0683 return LANDLOCK_ACCESS_FS_MAKE_FIFO;
0684 case S_IFSOCK:
0685 return LANDLOCK_ACCESS_FS_MAKE_SOCK;
0686 default:
0687 WARN_ON_ONCE(1);
0688 return 0;
0689 }
0690 }
0691
0692 static inline access_mask_t maybe_remove(const struct dentry *const dentry)
0693 {
0694 if (d_is_negative(dentry))
0695 return 0;
0696 return d_is_dir(dentry) ? LANDLOCK_ACCESS_FS_REMOVE_DIR :
0697 LANDLOCK_ACCESS_FS_REMOVE_FILE;
0698 }
0699
0700
0701
0702
0703
0704
0705
0706
0707
0708
0709
0710
0711
0712
0713
0714
0715
0716
0717
0718
0719
0720
0721 static bool collect_domain_accesses(
0722 const struct landlock_ruleset *const domain,
0723 const struct dentry *const mnt_root, struct dentry *dir,
0724 layer_mask_t (*const layer_masks_dom)[LANDLOCK_NUM_ACCESS_FS])
0725 {
0726 unsigned long access_dom;
0727 bool ret = false;
0728
0729 if (WARN_ON_ONCE(!domain || !mnt_root || !dir || !layer_masks_dom))
0730 return true;
0731 if (is_nouser_or_private(dir))
0732 return true;
0733
0734 access_dom = init_layer_masks(domain, LANDLOCK_MASK_ACCESS_FS,
0735 layer_masks_dom);
0736
0737 dget(dir);
0738 while (true) {
0739 struct dentry *parent_dentry;
0740
0741
0742 if (unmask_layers(find_rule(domain, dir), access_dom,
0743 layer_masks_dom)) {
0744
0745
0746
0747
0748 ret = true;
0749 break;
0750 }
0751
0752
0753 if (dir == mnt_root || WARN_ON_ONCE(IS_ROOT(dir)))
0754 break;
0755
0756 parent_dentry = dget_parent(dir);
0757 dput(dir);
0758 dir = parent_dentry;
0759 }
0760 dput(dir);
0761 return ret;
0762 }
0763
0764
0765
0766
0767
0768
0769
0770
0771
0772
0773
0774
0775
0776
0777
0778
0779
0780
0781
0782
0783
0784
0785
0786
0787
0788
0789
0790
0791
0792
0793
0794
0795
0796
0797
0798
0799
0800
0801
0802
0803
0804
0805
0806
0807
0808
0809
0810
0811
0812
0813
0814
0815
0816
0817 static int current_check_refer_path(struct dentry *const old_dentry,
0818 const struct path *const new_dir,
0819 struct dentry *const new_dentry,
0820 const bool removable, const bool exchange)
0821 {
0822 const struct landlock_ruleset *const dom =
0823 landlock_get_current_domain();
0824 bool allow_parent1, allow_parent2;
0825 access_mask_t access_request_parent1, access_request_parent2;
0826 struct path mnt_dir;
0827 layer_mask_t layer_masks_parent1[LANDLOCK_NUM_ACCESS_FS],
0828 layer_masks_parent2[LANDLOCK_NUM_ACCESS_FS];
0829
0830 if (!dom)
0831 return 0;
0832 if (WARN_ON_ONCE(dom->num_layers < 1))
0833 return -EACCES;
0834 if (unlikely(d_is_negative(old_dentry)))
0835 return -ENOENT;
0836 if (exchange) {
0837 if (unlikely(d_is_negative(new_dentry)))
0838 return -ENOENT;
0839 access_request_parent1 =
0840 get_mode_access(d_backing_inode(new_dentry)->i_mode);
0841 } else {
0842 access_request_parent1 = 0;
0843 }
0844 access_request_parent2 =
0845 get_mode_access(d_backing_inode(old_dentry)->i_mode);
0846 if (removable) {
0847 access_request_parent1 |= maybe_remove(old_dentry);
0848 access_request_parent2 |= maybe_remove(new_dentry);
0849 }
0850
0851
0852 if (old_dentry->d_parent == new_dir->dentry) {
0853
0854
0855
0856
0857 access_request_parent1 = init_layer_masks(
0858 dom, access_request_parent1 | access_request_parent2,
0859 &layer_masks_parent1);
0860 return check_access_path_dual(dom, new_dir,
0861 access_request_parent1,
0862 &layer_masks_parent1, NULL, 0,
0863 NULL, NULL);
0864 }
0865
0866 access_request_parent1 |= LANDLOCK_ACCESS_FS_REFER;
0867 access_request_parent2 |= LANDLOCK_ACCESS_FS_REFER;
0868
0869
0870 mnt_dir.mnt = new_dir->mnt;
0871 mnt_dir.dentry = new_dir->mnt->mnt_root;
0872
0873
0874 allow_parent1 = collect_domain_accesses(dom, mnt_dir.dentry,
0875 old_dentry->d_parent,
0876 &layer_masks_parent1);
0877 allow_parent2 = collect_domain_accesses(
0878 dom, mnt_dir.dentry, new_dir->dentry, &layer_masks_parent2);
0879
0880 if (allow_parent1 && allow_parent2)
0881 return 0;
0882
0883
0884
0885
0886
0887
0888
0889 return check_access_path_dual(dom, &mnt_dir, access_request_parent1,
0890 &layer_masks_parent1, old_dentry,
0891 access_request_parent2,
0892 &layer_masks_parent2,
0893 exchange ? new_dentry : NULL);
0894 }
0895
0896
0897
0898 static void hook_inode_free_security(struct inode *const inode)
0899 {
0900
0901
0902
0903
0904 WARN_ON_ONCE(landlock_inode(inode)->object);
0905 }
0906
0907
0908
0909
0910
0911
0912
0913
0914 static void hook_sb_delete(struct super_block *const sb)
0915 {
0916 struct inode *inode, *prev_inode = NULL;
0917
0918 if (!landlock_initialized)
0919 return;
0920
0921 spin_lock(&sb->s_inode_list_lock);
0922 list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
0923 struct landlock_object *object;
0924
0925
0926 if (!atomic_read(&inode->i_count))
0927 continue;
0928
0929
0930
0931
0932
0933 spin_lock(&inode->i_lock);
0934
0935
0936
0937
0938
0939
0940
0941 if (inode->i_state & (I_FREEING | I_WILL_FREE | I_NEW)) {
0942 spin_unlock(&inode->i_lock);
0943 continue;
0944 }
0945
0946 rcu_read_lock();
0947 object = rcu_dereference(landlock_inode(inode)->object);
0948 if (!object) {
0949 rcu_read_unlock();
0950 spin_unlock(&inode->i_lock);
0951 continue;
0952 }
0953
0954 __iget(inode);
0955 spin_unlock(&inode->i_lock);
0956
0957
0958
0959
0960
0961
0962 spin_lock(&object->lock);
0963 if (object->underobj == inode) {
0964 object->underobj = NULL;
0965 spin_unlock(&object->lock);
0966 rcu_read_unlock();
0967
0968
0969
0970
0971
0972
0973
0974
0975 rcu_assign_pointer(landlock_inode(inode)->object, NULL);
0976
0977
0978
0979
0980
0981
0982
0983
0984 iput(inode);
0985 } else {
0986 spin_unlock(&object->lock);
0987 rcu_read_unlock();
0988 }
0989
0990 if (prev_inode) {
0991
0992
0993
0994
0995
0996
0997 spin_unlock(&sb->s_inode_list_lock);
0998
0999
1000
1001
1002 iput(prev_inode);
1003 cond_resched();
1004 spin_lock(&sb->s_inode_list_lock);
1005 }
1006 prev_inode = inode;
1007 }
1008 spin_unlock(&sb->s_inode_list_lock);
1009
1010
1011 if (prev_inode)
1012 iput(prev_inode);
1013
1014 wait_var_event(&landlock_superblock(sb)->inode_refs,
1015 !atomic_long_read(&landlock_superblock(sb)->inode_refs));
1016 }
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036 static int hook_sb_mount(const char *const dev_name,
1037 const struct path *const path, const char *const type,
1038 const unsigned long flags, void *const data)
1039 {
1040 if (!landlock_get_current_domain())
1041 return 0;
1042 return -EPERM;
1043 }
1044
1045 static int hook_move_mount(const struct path *const from_path,
1046 const struct path *const to_path)
1047 {
1048 if (!landlock_get_current_domain())
1049 return 0;
1050 return -EPERM;
1051 }
1052
1053
1054
1055
1056
1057 static int hook_sb_umount(struct vfsmount *const mnt, const int flags)
1058 {
1059 if (!landlock_get_current_domain())
1060 return 0;
1061 return -EPERM;
1062 }
1063
1064 static int hook_sb_remount(struct super_block *const sb, void *const mnt_opts)
1065 {
1066 if (!landlock_get_current_domain())
1067 return 0;
1068 return -EPERM;
1069 }
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079 static int hook_sb_pivotroot(const struct path *const old_path,
1080 const struct path *const new_path)
1081 {
1082 if (!landlock_get_current_domain())
1083 return 0;
1084 return -EPERM;
1085 }
1086
1087
1088
1089 static int hook_path_link(struct dentry *const old_dentry,
1090 const struct path *const new_dir,
1091 struct dentry *const new_dentry)
1092 {
1093 return current_check_refer_path(old_dentry, new_dir, new_dentry, false,
1094 false);
1095 }
1096
1097 static int hook_path_rename(const struct path *const old_dir,
1098 struct dentry *const old_dentry,
1099 const struct path *const new_dir,
1100 struct dentry *const new_dentry,
1101 const unsigned int flags)
1102 {
1103
1104 return current_check_refer_path(old_dentry, new_dir, new_dentry, true,
1105 !!(flags & RENAME_EXCHANGE));
1106 }
1107
1108 static int hook_path_mkdir(const struct path *const dir,
1109 struct dentry *const dentry, const umode_t mode)
1110 {
1111 return current_check_access_path(dir, LANDLOCK_ACCESS_FS_MAKE_DIR);
1112 }
1113
1114 static int hook_path_mknod(const struct path *const dir,
1115 struct dentry *const dentry, const umode_t mode,
1116 const unsigned int dev)
1117 {
1118 const struct landlock_ruleset *const dom =
1119 landlock_get_current_domain();
1120
1121 if (!dom)
1122 return 0;
1123 return check_access_path(dom, dir, get_mode_access(mode));
1124 }
1125
1126 static int hook_path_symlink(const struct path *const dir,
1127 struct dentry *const dentry,
1128 const char *const old_name)
1129 {
1130 return current_check_access_path(dir, LANDLOCK_ACCESS_FS_MAKE_SYM);
1131 }
1132
1133 static int hook_path_unlink(const struct path *const dir,
1134 struct dentry *const dentry)
1135 {
1136 return current_check_access_path(dir, LANDLOCK_ACCESS_FS_REMOVE_FILE);
1137 }
1138
1139 static int hook_path_rmdir(const struct path *const dir,
1140 struct dentry *const dentry)
1141 {
1142 return current_check_access_path(dir, LANDLOCK_ACCESS_FS_REMOVE_DIR);
1143 }
1144
1145
1146
1147 static inline access_mask_t get_file_access(const struct file *const file)
1148 {
1149 access_mask_t access = 0;
1150
1151 if (file->f_mode & FMODE_READ) {
1152
1153 if (S_ISDIR(file_inode(file)->i_mode))
1154 return LANDLOCK_ACCESS_FS_READ_DIR;
1155 access = LANDLOCK_ACCESS_FS_READ_FILE;
1156 }
1157 if (file->f_mode & FMODE_WRITE)
1158 access |= LANDLOCK_ACCESS_FS_WRITE_FILE;
1159
1160 if (file->f_flags & __FMODE_EXEC)
1161 access |= LANDLOCK_ACCESS_FS_EXECUTE;
1162 return access;
1163 }
1164
1165 static int hook_file_open(struct file *const file)
1166 {
1167 const struct landlock_ruleset *const dom =
1168 landlock_get_current_domain();
1169
1170 if (!dom)
1171 return 0;
1172
1173
1174
1175
1176
1177 return check_access_path(dom, &file->f_path, get_file_access(file));
1178 }
1179
1180 static struct security_hook_list landlock_hooks[] __lsm_ro_after_init = {
1181 LSM_HOOK_INIT(inode_free_security, hook_inode_free_security),
1182
1183 LSM_HOOK_INIT(sb_delete, hook_sb_delete),
1184 LSM_HOOK_INIT(sb_mount, hook_sb_mount),
1185 LSM_HOOK_INIT(move_mount, hook_move_mount),
1186 LSM_HOOK_INIT(sb_umount, hook_sb_umount),
1187 LSM_HOOK_INIT(sb_remount, hook_sb_remount),
1188 LSM_HOOK_INIT(sb_pivotroot, hook_sb_pivotroot),
1189
1190 LSM_HOOK_INIT(path_link, hook_path_link),
1191 LSM_HOOK_INIT(path_rename, hook_path_rename),
1192 LSM_HOOK_INIT(path_mkdir, hook_path_mkdir),
1193 LSM_HOOK_INIT(path_mknod, hook_path_mknod),
1194 LSM_HOOK_INIT(path_symlink, hook_path_symlink),
1195 LSM_HOOK_INIT(path_unlink, hook_path_unlink),
1196 LSM_HOOK_INIT(path_rmdir, hook_path_rmdir),
1197
1198 LSM_HOOK_INIT(file_open, hook_file_open),
1199 };
1200
1201 __init void landlock_add_fs_hooks(void)
1202 {
1203 security_add_hooks(landlock_hooks, ARRAY_SIZE(landlock_hooks),
1204 LANDLOCK_NAME);
1205 }