0001
0002
0003
0004
0005
0006
0007
0008
0009 #include <linux/bits.h>
0010 #include <linux/bug.h>
0011 #include <linux/compiler_types.h>
0012 #include <linux/err.h>
0013 #include <linux/errno.h>
0014 #include <linux/kernel.h>
0015 #include <linux/lockdep.h>
0016 #include <linux/overflow.h>
0017 #include <linux/rbtree.h>
0018 #include <linux/refcount.h>
0019 #include <linux/slab.h>
0020 #include <linux/spinlock.h>
0021 #include <linux/workqueue.h>
0022
0023 #include "limits.h"
0024 #include "object.h"
0025 #include "ruleset.h"
0026
0027 static struct landlock_ruleset *create_ruleset(const u32 num_layers)
0028 {
0029 struct landlock_ruleset *new_ruleset;
0030
0031 new_ruleset =
0032 kzalloc(struct_size(new_ruleset, fs_access_masks, num_layers),
0033 GFP_KERNEL_ACCOUNT);
0034 if (!new_ruleset)
0035 return ERR_PTR(-ENOMEM);
0036 refcount_set(&new_ruleset->usage, 1);
0037 mutex_init(&new_ruleset->lock);
0038 new_ruleset->root = RB_ROOT;
0039 new_ruleset->num_layers = num_layers;
0040
0041
0042
0043
0044
0045 return new_ruleset;
0046 }
0047
0048 struct landlock_ruleset *
0049 landlock_create_ruleset(const access_mask_t fs_access_mask)
0050 {
0051 struct landlock_ruleset *new_ruleset;
0052
0053
0054 if (!fs_access_mask)
0055 return ERR_PTR(-ENOMSG);
0056 new_ruleset = create_ruleset(1);
0057 if (!IS_ERR(new_ruleset))
0058 new_ruleset->fs_access_masks[0] = fs_access_mask;
0059 return new_ruleset;
0060 }
0061
0062 static void build_check_rule(void)
0063 {
0064 const struct landlock_rule rule = {
0065 .num_layers = ~0,
0066 };
0067
0068 BUILD_BUG_ON(rule.num_layers < LANDLOCK_MAX_NUM_LAYERS);
0069 }
0070
0071 static struct landlock_rule *
0072 create_rule(struct landlock_object *const object,
0073 const struct landlock_layer (*const layers)[], const u32 num_layers,
0074 const struct landlock_layer *const new_layer)
0075 {
0076 struct landlock_rule *new_rule;
0077 u32 new_num_layers;
0078
0079 build_check_rule();
0080 if (new_layer) {
0081
0082 if (WARN_ON_ONCE(num_layers >= LANDLOCK_MAX_NUM_LAYERS))
0083 return ERR_PTR(-E2BIG);
0084 new_num_layers = num_layers + 1;
0085 } else {
0086 new_num_layers = num_layers;
0087 }
0088 new_rule = kzalloc(struct_size(new_rule, layers, new_num_layers),
0089 GFP_KERNEL_ACCOUNT);
0090 if (!new_rule)
0091 return ERR_PTR(-ENOMEM);
0092 RB_CLEAR_NODE(&new_rule->node);
0093 landlock_get_object(object);
0094 new_rule->object = object;
0095 new_rule->num_layers = new_num_layers;
0096
0097 memcpy(new_rule->layers, layers,
0098 flex_array_size(new_rule, layers, num_layers));
0099 if (new_layer)
0100
0101 new_rule->layers[new_rule->num_layers - 1] = *new_layer;
0102 return new_rule;
0103 }
0104
0105 static void free_rule(struct landlock_rule *const rule)
0106 {
0107 might_sleep();
0108 if (!rule)
0109 return;
0110 landlock_put_object(rule->object);
0111 kfree(rule);
0112 }
0113
0114 static void build_check_ruleset(void)
0115 {
0116 const struct landlock_ruleset ruleset = {
0117 .num_rules = ~0,
0118 .num_layers = ~0,
0119 };
0120 typeof(ruleset.fs_access_masks[0]) fs_access_mask = ~0;
0121
0122 BUILD_BUG_ON(ruleset.num_rules < LANDLOCK_MAX_NUM_RULES);
0123 BUILD_BUG_ON(ruleset.num_layers < LANDLOCK_MAX_NUM_LAYERS);
0124 BUILD_BUG_ON(fs_access_mask < LANDLOCK_MASK_ACCESS_FS);
0125 }
0126
0127
0128
0129
0130
0131
0132
0133
0134
0135
0136
0137
0138
0139
0140
0141
0142
0143
0144
0145 static int insert_rule(struct landlock_ruleset *const ruleset,
0146 struct landlock_object *const object,
0147 const struct landlock_layer (*const layers)[],
0148 size_t num_layers)
0149 {
0150 struct rb_node **walker_node;
0151 struct rb_node *parent_node = NULL;
0152 struct landlock_rule *new_rule;
0153
0154 might_sleep();
0155 lockdep_assert_held(&ruleset->lock);
0156 if (WARN_ON_ONCE(!object || !layers))
0157 return -ENOENT;
0158 walker_node = &(ruleset->root.rb_node);
0159 while (*walker_node) {
0160 struct landlock_rule *const this =
0161 rb_entry(*walker_node, struct landlock_rule, node);
0162
0163 if (this->object != object) {
0164 parent_node = *walker_node;
0165 if (this->object < object)
0166 walker_node = &((*walker_node)->rb_right);
0167 else
0168 walker_node = &((*walker_node)->rb_left);
0169 continue;
0170 }
0171
0172
0173 if (WARN_ON_ONCE(num_layers != 1))
0174 return -EINVAL;
0175
0176
0177 if ((*layers)[0].level == 0) {
0178
0179
0180
0181
0182 if (WARN_ON_ONCE(this->num_layers != 1))
0183 return -EINVAL;
0184 if (WARN_ON_ONCE(this->layers[0].level != 0))
0185 return -EINVAL;
0186 this->layers[0].access |= (*layers)[0].access;
0187 return 0;
0188 }
0189
0190 if (WARN_ON_ONCE(this->layers[0].level == 0))
0191 return -EINVAL;
0192
0193
0194
0195
0196
0197 new_rule = create_rule(object, &this->layers, this->num_layers,
0198 &(*layers)[0]);
0199 if (IS_ERR(new_rule))
0200 return PTR_ERR(new_rule);
0201 rb_replace_node(&this->node, &new_rule->node, &ruleset->root);
0202 free_rule(this);
0203 return 0;
0204 }
0205
0206
0207 build_check_ruleset();
0208 if (ruleset->num_rules >= LANDLOCK_MAX_NUM_RULES)
0209 return -E2BIG;
0210 new_rule = create_rule(object, layers, num_layers, NULL);
0211 if (IS_ERR(new_rule))
0212 return PTR_ERR(new_rule);
0213 rb_link_node(&new_rule->node, parent_node, walker_node);
0214 rb_insert_color(&new_rule->node, &ruleset->root);
0215 ruleset->num_rules++;
0216 return 0;
0217 }
0218
0219 static void build_check_layer(void)
0220 {
0221 const struct landlock_layer layer = {
0222 .level = ~0,
0223 .access = ~0,
0224 };
0225
0226 BUILD_BUG_ON(layer.level < LANDLOCK_MAX_NUM_LAYERS);
0227 BUILD_BUG_ON(layer.access < LANDLOCK_MASK_ACCESS_FS);
0228 }
0229
0230
0231 int landlock_insert_rule(struct landlock_ruleset *const ruleset,
0232 struct landlock_object *const object,
0233 const access_mask_t access)
0234 {
0235 struct landlock_layer layers[] = { {
0236 .access = access,
0237
0238 .level = 0,
0239 } };
0240
0241 build_check_layer();
0242 return insert_rule(ruleset, object, &layers, ARRAY_SIZE(layers));
0243 }
0244
0245 static inline void get_hierarchy(struct landlock_hierarchy *const hierarchy)
0246 {
0247 if (hierarchy)
0248 refcount_inc(&hierarchy->usage);
0249 }
0250
0251 static void put_hierarchy(struct landlock_hierarchy *hierarchy)
0252 {
0253 while (hierarchy && refcount_dec_and_test(&hierarchy->usage)) {
0254 const struct landlock_hierarchy *const freeme = hierarchy;
0255
0256 hierarchy = hierarchy->parent;
0257 kfree(freeme);
0258 }
0259 }
0260
0261 static int merge_ruleset(struct landlock_ruleset *const dst,
0262 struct landlock_ruleset *const src)
0263 {
0264 struct landlock_rule *walker_rule, *next_rule;
0265 int err = 0;
0266
0267 might_sleep();
0268
0269 if (WARN_ON_ONCE(!src))
0270 return 0;
0271
0272 if (WARN_ON_ONCE(!dst || !dst->hierarchy))
0273 return -EINVAL;
0274
0275
0276 mutex_lock(&dst->lock);
0277 mutex_lock_nested(&src->lock, SINGLE_DEPTH_NESTING);
0278
0279
0280 if (WARN_ON_ONCE(src->num_layers != 1 || dst->num_layers < 1)) {
0281 err = -EINVAL;
0282 goto out_unlock;
0283 }
0284 dst->fs_access_masks[dst->num_layers - 1] = src->fs_access_masks[0];
0285
0286
0287 rbtree_postorder_for_each_entry_safe(walker_rule, next_rule, &src->root,
0288 node) {
0289 struct landlock_layer layers[] = { {
0290 .level = dst->num_layers,
0291 } };
0292
0293 if (WARN_ON_ONCE(walker_rule->num_layers != 1)) {
0294 err = -EINVAL;
0295 goto out_unlock;
0296 }
0297 if (WARN_ON_ONCE(walker_rule->layers[0].level != 0)) {
0298 err = -EINVAL;
0299 goto out_unlock;
0300 }
0301 layers[0].access = walker_rule->layers[0].access;
0302 err = insert_rule(dst, walker_rule->object, &layers,
0303 ARRAY_SIZE(layers));
0304 if (err)
0305 goto out_unlock;
0306 }
0307
0308 out_unlock:
0309 mutex_unlock(&src->lock);
0310 mutex_unlock(&dst->lock);
0311 return err;
0312 }
0313
0314 static int inherit_ruleset(struct landlock_ruleset *const parent,
0315 struct landlock_ruleset *const child)
0316 {
0317 struct landlock_rule *walker_rule, *next_rule;
0318 int err = 0;
0319
0320 might_sleep();
0321 if (!parent)
0322 return 0;
0323
0324
0325 mutex_lock(&child->lock);
0326 mutex_lock_nested(&parent->lock, SINGLE_DEPTH_NESTING);
0327
0328
0329 rbtree_postorder_for_each_entry_safe(walker_rule, next_rule,
0330 &parent->root, node) {
0331 err = insert_rule(child, walker_rule->object,
0332 &walker_rule->layers,
0333 walker_rule->num_layers);
0334 if (err)
0335 goto out_unlock;
0336 }
0337
0338 if (WARN_ON_ONCE(child->num_layers <= parent->num_layers)) {
0339 err = -EINVAL;
0340 goto out_unlock;
0341 }
0342
0343 memcpy(child->fs_access_masks, parent->fs_access_masks,
0344 flex_array_size(parent, fs_access_masks, parent->num_layers));
0345
0346 if (WARN_ON_ONCE(!parent->hierarchy)) {
0347 err = -EINVAL;
0348 goto out_unlock;
0349 }
0350 get_hierarchy(parent->hierarchy);
0351 child->hierarchy->parent = parent->hierarchy;
0352
0353 out_unlock:
0354 mutex_unlock(&parent->lock);
0355 mutex_unlock(&child->lock);
0356 return err;
0357 }
0358
0359 static void free_ruleset(struct landlock_ruleset *const ruleset)
0360 {
0361 struct landlock_rule *freeme, *next;
0362
0363 might_sleep();
0364 rbtree_postorder_for_each_entry_safe(freeme, next, &ruleset->root, node)
0365 free_rule(freeme);
0366 put_hierarchy(ruleset->hierarchy);
0367 kfree(ruleset);
0368 }
0369
0370 void landlock_put_ruleset(struct landlock_ruleset *const ruleset)
0371 {
0372 might_sleep();
0373 if (ruleset && refcount_dec_and_test(&ruleset->usage))
0374 free_ruleset(ruleset);
0375 }
0376
0377 static void free_ruleset_work(struct work_struct *const work)
0378 {
0379 struct landlock_ruleset *ruleset;
0380
0381 ruleset = container_of(work, struct landlock_ruleset, work_free);
0382 free_ruleset(ruleset);
0383 }
0384
0385 void landlock_put_ruleset_deferred(struct landlock_ruleset *const ruleset)
0386 {
0387 if (ruleset && refcount_dec_and_test(&ruleset->usage)) {
0388 INIT_WORK(&ruleset->work_free, free_ruleset_work);
0389 schedule_work(&ruleset->work_free);
0390 }
0391 }
0392
0393
0394
0395
0396
0397
0398
0399
0400
0401
0402 struct landlock_ruleset *
0403 landlock_merge_ruleset(struct landlock_ruleset *const parent,
0404 struct landlock_ruleset *const ruleset)
0405 {
0406 struct landlock_ruleset *new_dom;
0407 u32 num_layers;
0408 int err;
0409
0410 might_sleep();
0411 if (WARN_ON_ONCE(!ruleset || parent == ruleset))
0412 return ERR_PTR(-EINVAL);
0413
0414 if (parent) {
0415 if (parent->num_layers >= LANDLOCK_MAX_NUM_LAYERS)
0416 return ERR_PTR(-E2BIG);
0417 num_layers = parent->num_layers + 1;
0418 } else {
0419 num_layers = 1;
0420 }
0421
0422
0423 new_dom = create_ruleset(num_layers);
0424 if (IS_ERR(new_dom))
0425 return new_dom;
0426 new_dom->hierarchy =
0427 kzalloc(sizeof(*new_dom->hierarchy), GFP_KERNEL_ACCOUNT);
0428 if (!new_dom->hierarchy) {
0429 err = -ENOMEM;
0430 goto out_put_dom;
0431 }
0432 refcount_set(&new_dom->hierarchy->usage, 1);
0433
0434
0435 err = inherit_ruleset(parent, new_dom);
0436 if (err)
0437 goto out_put_dom;
0438
0439
0440 err = merge_ruleset(new_dom, ruleset);
0441 if (err)
0442 goto out_put_dom;
0443
0444 return new_dom;
0445
0446 out_put_dom:
0447 landlock_put_ruleset(new_dom);
0448 return ERR_PTR(err);
0449 }
0450
0451
0452
0453
0454 const struct landlock_rule *
0455 landlock_find_rule(const struct landlock_ruleset *const ruleset,
0456 const struct landlock_object *const object)
0457 {
0458 const struct rb_node *node;
0459
0460 if (!object)
0461 return NULL;
0462 node = ruleset->root.rb_node;
0463 while (node) {
0464 struct landlock_rule *this =
0465 rb_entry(node, struct landlock_rule, node);
0466
0467 if (this->object == object)
0468 return this;
0469 if (this->object < object)
0470 node = node->rb_right;
0471 else
0472 node = node->rb_left;
0473 }
0474 return NULL;
0475 }