0001
0002 #include <linux/fanotify.h>
0003 #include <linux/fdtable.h>
0004 #include <linux/fsnotify_backend.h>
0005 #include <linux/init.h>
0006 #include <linux/jiffies.h>
0007 #include <linux/kernel.h> /* UINT_MAX */
0008 #include <linux/mount.h>
0009 #include <linux/sched.h>
0010 #include <linux/sched/user.h>
0011 #include <linux/sched/signal.h>
0012 #include <linux/types.h>
0013 #include <linux/wait.h>
0014 #include <linux/audit.h>
0015 #include <linux/sched/mm.h>
0016 #include <linux/statfs.h>
0017 #include <linux/stringhash.h>
0018
0019 #include "fanotify.h"
0020
0021 static bool fanotify_path_equal(struct path *p1, struct path *p2)
0022 {
0023 return p1->mnt == p2->mnt && p1->dentry == p2->dentry;
0024 }
0025
0026 static unsigned int fanotify_hash_path(const struct path *path)
0027 {
0028 return hash_ptr(path->dentry, FANOTIFY_EVENT_HASH_BITS) ^
0029 hash_ptr(path->mnt, FANOTIFY_EVENT_HASH_BITS);
0030 }
0031
0032 static inline bool fanotify_fsid_equal(__kernel_fsid_t *fsid1,
0033 __kernel_fsid_t *fsid2)
0034 {
0035 return fsid1->val[0] == fsid2->val[0] && fsid1->val[1] == fsid2->val[1];
0036 }
0037
0038 static unsigned int fanotify_hash_fsid(__kernel_fsid_t *fsid)
0039 {
0040 return hash_32(fsid->val[0], FANOTIFY_EVENT_HASH_BITS) ^
0041 hash_32(fsid->val[1], FANOTIFY_EVENT_HASH_BITS);
0042 }
0043
0044 static bool fanotify_fh_equal(struct fanotify_fh *fh1,
0045 struct fanotify_fh *fh2)
0046 {
0047 if (fh1->type != fh2->type || fh1->len != fh2->len)
0048 return false;
0049
0050 return !fh1->len ||
0051 !memcmp(fanotify_fh_buf(fh1), fanotify_fh_buf(fh2), fh1->len);
0052 }
0053
0054 static unsigned int fanotify_hash_fh(struct fanotify_fh *fh)
0055 {
0056 long salt = (long)fh->type | (long)fh->len << 8;
0057
0058
0059
0060
0061 return full_name_hash((void *)salt, fanotify_fh_buf(fh), fh->len);
0062 }
0063
0064 static bool fanotify_fid_event_equal(struct fanotify_fid_event *ffe1,
0065 struct fanotify_fid_event *ffe2)
0066 {
0067
0068 if (!ffe1->object_fh.len)
0069 return false;
0070
0071 return fanotify_fsid_equal(&ffe1->fsid, &ffe2->fsid) &&
0072 fanotify_fh_equal(&ffe1->object_fh, &ffe2->object_fh);
0073 }
0074
0075 static bool fanotify_info_equal(struct fanotify_info *info1,
0076 struct fanotify_info *info2)
0077 {
0078 if (info1->dir_fh_totlen != info2->dir_fh_totlen ||
0079 info1->dir2_fh_totlen != info2->dir2_fh_totlen ||
0080 info1->file_fh_totlen != info2->file_fh_totlen ||
0081 info1->name_len != info2->name_len ||
0082 info1->name2_len != info2->name2_len)
0083 return false;
0084
0085 if (info1->dir_fh_totlen &&
0086 !fanotify_fh_equal(fanotify_info_dir_fh(info1),
0087 fanotify_info_dir_fh(info2)))
0088 return false;
0089
0090 if (info1->dir2_fh_totlen &&
0091 !fanotify_fh_equal(fanotify_info_dir2_fh(info1),
0092 fanotify_info_dir2_fh(info2)))
0093 return false;
0094
0095 if (info1->file_fh_totlen &&
0096 !fanotify_fh_equal(fanotify_info_file_fh(info1),
0097 fanotify_info_file_fh(info2)))
0098 return false;
0099
0100 if (info1->name_len &&
0101 memcmp(fanotify_info_name(info1), fanotify_info_name(info2),
0102 info1->name_len))
0103 return false;
0104
0105 return !info1->name2_len ||
0106 !memcmp(fanotify_info_name2(info1), fanotify_info_name2(info2),
0107 info1->name2_len);
0108 }
0109
0110 static bool fanotify_name_event_equal(struct fanotify_name_event *fne1,
0111 struct fanotify_name_event *fne2)
0112 {
0113 struct fanotify_info *info1 = &fne1->info;
0114 struct fanotify_info *info2 = &fne2->info;
0115
0116
0117 if (!info1->dir_fh_totlen)
0118 return false;
0119
0120 if (!fanotify_fsid_equal(&fne1->fsid, &fne2->fsid))
0121 return false;
0122
0123 return fanotify_info_equal(info1, info2);
0124 }
0125
0126 static bool fanotify_error_event_equal(struct fanotify_error_event *fee1,
0127 struct fanotify_error_event *fee2)
0128 {
0129
0130 if (!fanotify_fsid_equal(&fee1->fsid, &fee2->fsid))
0131 return false;
0132
0133 return true;
0134 }
0135
0136 static bool fanotify_should_merge(struct fanotify_event *old,
0137 struct fanotify_event *new)
0138 {
0139 pr_debug("%s: old=%p new=%p\n", __func__, old, new);
0140
0141 if (old->hash != new->hash ||
0142 old->type != new->type || old->pid != new->pid)
0143 return false;
0144
0145
0146
0147
0148
0149
0150
0151
0152
0153 if ((old->mask & FS_ISDIR) != (new->mask & FS_ISDIR))
0154 return false;
0155
0156
0157
0158
0159
0160 if ((old->mask & FAN_RENAME) != (new->mask & FAN_RENAME))
0161 return false;
0162
0163 switch (old->type) {
0164 case FANOTIFY_EVENT_TYPE_PATH:
0165 return fanotify_path_equal(fanotify_event_path(old),
0166 fanotify_event_path(new));
0167 case FANOTIFY_EVENT_TYPE_FID:
0168 return fanotify_fid_event_equal(FANOTIFY_FE(old),
0169 FANOTIFY_FE(new));
0170 case FANOTIFY_EVENT_TYPE_FID_NAME:
0171 return fanotify_name_event_equal(FANOTIFY_NE(old),
0172 FANOTIFY_NE(new));
0173 case FANOTIFY_EVENT_TYPE_FS_ERROR:
0174 return fanotify_error_event_equal(FANOTIFY_EE(old),
0175 FANOTIFY_EE(new));
0176 default:
0177 WARN_ON_ONCE(1);
0178 }
0179
0180 return false;
0181 }
0182
0183
0184 #define FANOTIFY_MAX_MERGE_EVENTS 128
0185
0186
0187 static int fanotify_merge(struct fsnotify_group *group,
0188 struct fsnotify_event *event)
0189 {
0190 struct fanotify_event *old, *new = FANOTIFY_E(event);
0191 unsigned int bucket = fanotify_event_hash_bucket(group, new);
0192 struct hlist_head *hlist = &group->fanotify_data.merge_hash[bucket];
0193 int i = 0;
0194
0195 pr_debug("%s: group=%p event=%p bucket=%u\n", __func__,
0196 group, event, bucket);
0197
0198
0199
0200
0201
0202
0203 if (fanotify_is_perm_event(new->mask))
0204 return 0;
0205
0206 hlist_for_each_entry(old, hlist, merge_list) {
0207 if (++i > FANOTIFY_MAX_MERGE_EVENTS)
0208 break;
0209 if (fanotify_should_merge(old, new)) {
0210 old->mask |= new->mask;
0211
0212 if (fanotify_is_error_event(old->mask))
0213 FANOTIFY_EE(old)->err_count++;
0214
0215 return 1;
0216 }
0217 }
0218
0219 return 0;
0220 }
0221
0222
0223
0224
0225
0226
0227
0228
0229 static int fanotify_get_response(struct fsnotify_group *group,
0230 struct fanotify_perm_event *event,
0231 struct fsnotify_iter_info *iter_info)
0232 {
0233 int ret;
0234
0235 pr_debug("%s: group=%p event=%p\n", __func__, group, event);
0236
0237 ret = wait_event_killable(group->fanotify_data.access_waitq,
0238 event->state == FAN_EVENT_ANSWERED);
0239
0240 if (ret < 0) {
0241 spin_lock(&group->notification_lock);
0242
0243 if (event->state == FAN_EVENT_REPORTED) {
0244
0245 event->state = FAN_EVENT_CANCELED;
0246 spin_unlock(&group->notification_lock);
0247 return ret;
0248 }
0249
0250 if (event->state == FAN_EVENT_INIT) {
0251 fsnotify_remove_queued_event(group, &event->fae.fse);
0252
0253 WARN_ON_ONCE(!hlist_unhashed(&event->fae.merge_list));
0254 }
0255
0256
0257
0258
0259
0260 spin_unlock(&group->notification_lock);
0261 goto out;
0262 }
0263
0264
0265 switch (event->response & ~FAN_AUDIT) {
0266 case FAN_ALLOW:
0267 ret = 0;
0268 break;
0269 case FAN_DENY:
0270 default:
0271 ret = -EPERM;
0272 }
0273
0274
0275 if (event->response & FAN_AUDIT)
0276 audit_fanotify(event->response & ~FAN_AUDIT);
0277
0278 pr_debug("%s: group=%p event=%p about to return ret=%d\n", __func__,
0279 group, event, ret);
0280 out:
0281 fsnotify_destroy_event(group, &event->fae.fse);
0282
0283 return ret;
0284 }
0285
0286
0287
0288
0289
0290
0291
0292 static u32 fanotify_group_event_mask(struct fsnotify_group *group,
0293 struct fsnotify_iter_info *iter_info,
0294 u32 *match_mask, u32 event_mask,
0295 const void *data, int data_type,
0296 struct inode *dir)
0297 {
0298 __u32 marks_mask = 0, marks_ignore_mask = 0;
0299 __u32 test_mask, user_mask = FANOTIFY_OUTGOING_EVENTS |
0300 FANOTIFY_EVENT_FLAGS;
0301 const struct path *path = fsnotify_data_path(data, data_type);
0302 unsigned int fid_mode = FAN_GROUP_FLAG(group, FANOTIFY_FID_BITS);
0303 struct fsnotify_mark *mark;
0304 bool ondir = event_mask & FAN_ONDIR;
0305 int type;
0306
0307 pr_debug("%s: report_mask=%x mask=%x data=%p data_type=%d\n",
0308 __func__, iter_info->report_mask, event_mask, data, data_type);
0309
0310 if (!fid_mode) {
0311
0312 if (!path)
0313 return 0;
0314
0315 if (!d_is_reg(path->dentry) && !d_can_lookup(path->dentry))
0316 return 0;
0317 } else if (!(fid_mode & FAN_REPORT_FID)) {
0318
0319 if (!dir && !ondir)
0320 return 0;
0321 }
0322
0323 fsnotify_foreach_iter_mark_type(iter_info, mark, type) {
0324
0325
0326
0327 marks_ignore_mask |=
0328 fsnotify_effective_ignore_mask(mark, ondir, type);
0329
0330
0331
0332
0333 if (!fsnotify_mask_applicable(mark->mask, ondir, type))
0334 continue;
0335
0336 marks_mask |= mark->mask;
0337
0338
0339 *match_mask |= 1U << type;
0340 }
0341
0342 test_mask = event_mask & marks_mask & ~marks_ignore_mask;
0343
0344
0345
0346
0347
0348
0349
0350
0351
0352
0353
0354
0355
0356
0357 if (fid_mode) {
0358
0359 if (!(test_mask & ~FANOTIFY_EVENT_FLAGS))
0360 return 0;
0361 } else {
0362 user_mask &= ~FANOTIFY_EVENT_FLAGS;
0363 }
0364
0365 return test_mask & user_mask;
0366 }
0367
0368
0369
0370
0371
0372
0373
0374 static int fanotify_encode_fh_len(struct inode *inode)
0375 {
0376 int dwords = 0;
0377 int fh_len;
0378
0379 if (!inode)
0380 return 0;
0381
0382 exportfs_encode_inode_fh(inode, NULL, &dwords, NULL);
0383 fh_len = dwords << 2;
0384
0385
0386
0387
0388
0389
0390 if (WARN_ON_ONCE(fh_len > MAX_HANDLE_SZ))
0391 return 0;
0392
0393 return fh_len;
0394 }
0395
0396
0397
0398
0399
0400
0401
0402 static int fanotify_encode_fh(struct fanotify_fh *fh, struct inode *inode,
0403 unsigned int fh_len, unsigned int *hash,
0404 gfp_t gfp)
0405 {
0406 int dwords, type = 0;
0407 char *ext_buf = NULL;
0408 void *buf = fh->buf;
0409 int err;
0410
0411 fh->type = FILEID_ROOT;
0412 fh->len = 0;
0413 fh->flags = 0;
0414
0415
0416
0417
0418
0419
0420 if (!inode)
0421 goto out;
0422
0423
0424
0425
0426
0427 err = -ENOENT;
0428 if (fh_len < 4 || WARN_ON_ONCE(fh_len % 4) || fh_len > MAX_HANDLE_SZ)
0429 goto out_err;
0430
0431
0432 if (gfp && fh_len > FANOTIFY_INLINE_FH_LEN) {
0433
0434 err = -ENOMEM;
0435 ext_buf = kmalloc(fh_len, gfp);
0436 if (!ext_buf)
0437 goto out_err;
0438
0439 *fanotify_fh_ext_buf_ptr(fh) = ext_buf;
0440 buf = ext_buf;
0441 fh->flags |= FANOTIFY_FH_FLAG_EXT_BUF;
0442 }
0443
0444 dwords = fh_len >> 2;
0445 type = exportfs_encode_inode_fh(inode, buf, &dwords, NULL);
0446 err = -EINVAL;
0447 if (!type || type == FILEID_INVALID || fh_len != dwords << 2)
0448 goto out_err;
0449
0450 fh->type = type;
0451 fh->len = fh_len;
0452
0453 out:
0454
0455
0456
0457
0458 if (hash)
0459 *hash ^= fanotify_hash_fh(fh);
0460
0461 return FANOTIFY_FH_HDR_LEN + fh_len;
0462
0463 out_err:
0464 pr_warn_ratelimited("fanotify: failed to encode fid (type=%d, len=%d, err=%i)\n",
0465 type, fh_len, err);
0466 kfree(ext_buf);
0467 *fanotify_fh_ext_buf_ptr(fh) = NULL;
0468
0469 fh->type = FILEID_INVALID;
0470 fh->len = 0;
0471 return 0;
0472 }
0473
0474
0475
0476
0477
0478
0479
0480
0481
0482 static inline bool fanotify_report_child_fid(unsigned int fid_mode, u32 mask)
0483 {
0484 if (mask & ALL_FSNOTIFY_DIRENT_EVENTS)
0485 return (fid_mode & FAN_REPORT_TARGET_FID);
0486
0487 return (fid_mode & FAN_REPORT_FID) && !(mask & FAN_ONDIR);
0488 }
0489
0490
0491
0492
0493
0494
0495
0496
0497
0498
0499
0500
0501
0502
0503
0504 static struct inode *fanotify_fid_inode(u32 event_mask, const void *data,
0505 int data_type, struct inode *dir,
0506 unsigned int fid_mode)
0507 {
0508 if ((event_mask & ALL_FSNOTIFY_DIRENT_EVENTS) &&
0509 !(fid_mode & FAN_REPORT_TARGET_FID))
0510 return dir;
0511
0512 return fsnotify_data_inode(data, data_type);
0513 }
0514
0515
0516
0517
0518
0519
0520
0521
0522
0523 static struct inode *fanotify_dfid_inode(u32 event_mask, const void *data,
0524 int data_type, struct inode *dir)
0525 {
0526 struct inode *inode = fsnotify_data_inode(data, data_type);
0527
0528 if (event_mask & ALL_FSNOTIFY_DIRENT_EVENTS)
0529 return dir;
0530
0531 if (inode && S_ISDIR(inode->i_mode))
0532 return inode;
0533
0534 return dir;
0535 }
0536
0537 static struct fanotify_event *fanotify_alloc_path_event(const struct path *path,
0538 unsigned int *hash,
0539 gfp_t gfp)
0540 {
0541 struct fanotify_path_event *pevent;
0542
0543 pevent = kmem_cache_alloc(fanotify_path_event_cachep, gfp);
0544 if (!pevent)
0545 return NULL;
0546
0547 pevent->fae.type = FANOTIFY_EVENT_TYPE_PATH;
0548 pevent->path = *path;
0549 *hash ^= fanotify_hash_path(path);
0550 path_get(path);
0551
0552 return &pevent->fae;
0553 }
0554
0555 static struct fanotify_event *fanotify_alloc_perm_event(const struct path *path,
0556 gfp_t gfp)
0557 {
0558 struct fanotify_perm_event *pevent;
0559
0560 pevent = kmem_cache_alloc(fanotify_perm_event_cachep, gfp);
0561 if (!pevent)
0562 return NULL;
0563
0564 pevent->fae.type = FANOTIFY_EVENT_TYPE_PATH_PERM;
0565 pevent->response = 0;
0566 pevent->state = FAN_EVENT_INIT;
0567 pevent->path = *path;
0568 path_get(path);
0569
0570 return &pevent->fae;
0571 }
0572
0573 static struct fanotify_event *fanotify_alloc_fid_event(struct inode *id,
0574 __kernel_fsid_t *fsid,
0575 unsigned int *hash,
0576 gfp_t gfp)
0577 {
0578 struct fanotify_fid_event *ffe;
0579
0580 ffe = kmem_cache_alloc(fanotify_fid_event_cachep, gfp);
0581 if (!ffe)
0582 return NULL;
0583
0584 ffe->fae.type = FANOTIFY_EVENT_TYPE_FID;
0585 ffe->fsid = *fsid;
0586 *hash ^= fanotify_hash_fsid(fsid);
0587 fanotify_encode_fh(&ffe->object_fh, id, fanotify_encode_fh_len(id),
0588 hash, gfp);
0589
0590 return &ffe->fae;
0591 }
0592
0593 static struct fanotify_event *fanotify_alloc_name_event(struct inode *dir,
0594 __kernel_fsid_t *fsid,
0595 const struct qstr *name,
0596 struct inode *child,
0597 struct dentry *moved,
0598 unsigned int *hash,
0599 gfp_t gfp)
0600 {
0601 struct fanotify_name_event *fne;
0602 struct fanotify_info *info;
0603 struct fanotify_fh *dfh, *ffh;
0604 struct inode *dir2 = moved ? d_inode(moved->d_parent) : NULL;
0605 const struct qstr *name2 = moved ? &moved->d_name : NULL;
0606 unsigned int dir_fh_len = fanotify_encode_fh_len(dir);
0607 unsigned int dir2_fh_len = fanotify_encode_fh_len(dir2);
0608 unsigned int child_fh_len = fanotify_encode_fh_len(child);
0609 unsigned long name_len = name ? name->len : 0;
0610 unsigned long name2_len = name2 ? name2->len : 0;
0611 unsigned int len, size;
0612
0613
0614 size = sizeof(*fne) + name_len + name2_len + 2;
0615 if (dir_fh_len)
0616 size += FANOTIFY_FH_HDR_LEN + dir_fh_len;
0617 if (dir2_fh_len)
0618 size += FANOTIFY_FH_HDR_LEN + dir2_fh_len;
0619 if (child_fh_len)
0620 size += FANOTIFY_FH_HDR_LEN + child_fh_len;
0621 fne = kmalloc(size, gfp);
0622 if (!fne)
0623 return NULL;
0624
0625 fne->fae.type = FANOTIFY_EVENT_TYPE_FID_NAME;
0626 fne->fsid = *fsid;
0627 *hash ^= fanotify_hash_fsid(fsid);
0628 info = &fne->info;
0629 fanotify_info_init(info);
0630 if (dir_fh_len) {
0631 dfh = fanotify_info_dir_fh(info);
0632 len = fanotify_encode_fh(dfh, dir, dir_fh_len, hash, 0);
0633 fanotify_info_set_dir_fh(info, len);
0634 }
0635 if (dir2_fh_len) {
0636 dfh = fanotify_info_dir2_fh(info);
0637 len = fanotify_encode_fh(dfh, dir2, dir2_fh_len, hash, 0);
0638 fanotify_info_set_dir2_fh(info, len);
0639 }
0640 if (child_fh_len) {
0641 ffh = fanotify_info_file_fh(info);
0642 len = fanotify_encode_fh(ffh, child, child_fh_len, hash, 0);
0643 fanotify_info_set_file_fh(info, len);
0644 }
0645 if (name_len) {
0646 fanotify_info_copy_name(info, name);
0647 *hash ^= full_name_hash((void *)name_len, name->name, name_len);
0648 }
0649 if (name2_len) {
0650 fanotify_info_copy_name2(info, name2);
0651 *hash ^= full_name_hash((void *)name2_len, name2->name,
0652 name2_len);
0653 }
0654
0655 pr_debug("%s: size=%u dir_fh_len=%u child_fh_len=%u name_len=%u name='%.*s'\n",
0656 __func__, size, dir_fh_len, child_fh_len,
0657 info->name_len, info->name_len, fanotify_info_name(info));
0658
0659 if (dir2_fh_len) {
0660 pr_debug("%s: dir2_fh_len=%u name2_len=%u name2='%.*s'\n",
0661 __func__, dir2_fh_len, info->name2_len,
0662 info->name2_len, fanotify_info_name2(info));
0663 }
0664
0665 return &fne->fae;
0666 }
0667
0668 static struct fanotify_event *fanotify_alloc_error_event(
0669 struct fsnotify_group *group,
0670 __kernel_fsid_t *fsid,
0671 const void *data, int data_type,
0672 unsigned int *hash)
0673 {
0674 struct fs_error_report *report =
0675 fsnotify_data_error_report(data, data_type);
0676 struct inode *inode;
0677 struct fanotify_error_event *fee;
0678 int fh_len;
0679
0680 if (WARN_ON_ONCE(!report))
0681 return NULL;
0682
0683 fee = mempool_alloc(&group->fanotify_data.error_events_pool, GFP_NOFS);
0684 if (!fee)
0685 return NULL;
0686
0687 fee->fae.type = FANOTIFY_EVENT_TYPE_FS_ERROR;
0688 fee->error = report->error;
0689 fee->err_count = 1;
0690 fee->fsid = *fsid;
0691
0692 inode = report->inode;
0693 fh_len = fanotify_encode_fh_len(inode);
0694
0695
0696 if (!fh_len && inode)
0697 inode = NULL;
0698
0699 fanotify_encode_fh(&fee->object_fh, inode, fh_len, NULL, 0);
0700
0701 *hash ^= fanotify_hash_fsid(fsid);
0702
0703 return &fee->fae;
0704 }
0705
0706 static struct fanotify_event *fanotify_alloc_event(
0707 struct fsnotify_group *group,
0708 u32 mask, const void *data, int data_type,
0709 struct inode *dir, const struct qstr *file_name,
0710 __kernel_fsid_t *fsid, u32 match_mask)
0711 {
0712 struct fanotify_event *event = NULL;
0713 gfp_t gfp = GFP_KERNEL_ACCOUNT;
0714 unsigned int fid_mode = FAN_GROUP_FLAG(group, FANOTIFY_FID_BITS);
0715 struct inode *id = fanotify_fid_inode(mask, data, data_type, dir,
0716 fid_mode);
0717 struct inode *dirid = fanotify_dfid_inode(mask, data, data_type, dir);
0718 const struct path *path = fsnotify_data_path(data, data_type);
0719 struct mem_cgroup *old_memcg;
0720 struct dentry *moved = NULL;
0721 struct inode *child = NULL;
0722 bool name_event = false;
0723 unsigned int hash = 0;
0724 bool ondir = mask & FAN_ONDIR;
0725 struct pid *pid;
0726
0727 if ((fid_mode & FAN_REPORT_DIR_FID) && dirid) {
0728
0729
0730
0731
0732 if (fanotify_report_child_fid(fid_mode, mask) && id != dirid)
0733 child = id;
0734
0735 id = dirid;
0736
0737
0738
0739
0740
0741
0742
0743
0744
0745
0746
0747
0748
0749
0750 if (!(fid_mode & FAN_REPORT_NAME)) {
0751 name_event = !!child;
0752 file_name = NULL;
0753 } else if ((mask & ALL_FSNOTIFY_DIRENT_EVENTS) || !ondir) {
0754 name_event = true;
0755 }
0756
0757
0758
0759
0760
0761
0762
0763
0764 if (mask & FAN_RENAME) {
0765 bool report_old, report_new;
0766
0767 if (WARN_ON_ONCE(!match_mask))
0768 return NULL;
0769
0770
0771 report_old = report_new =
0772 match_mask & (1U << FSNOTIFY_ITER_TYPE_SB);
0773 report_old |=
0774 match_mask & (1U << FSNOTIFY_ITER_TYPE_INODE);
0775 report_new |=
0776 match_mask & (1U << FSNOTIFY_ITER_TYPE_INODE2);
0777
0778 if (!report_old) {
0779
0780 dirid = NULL;
0781 file_name = NULL;
0782 }
0783 if (report_new) {
0784
0785 moved = fsnotify_data_dentry(data, data_type);
0786 }
0787 }
0788 }
0789
0790
0791
0792
0793
0794
0795
0796 if (group->max_events == UINT_MAX)
0797 gfp |= __GFP_NOFAIL;
0798 else
0799 gfp |= __GFP_RETRY_MAYFAIL;
0800
0801
0802 old_memcg = set_active_memcg(group->memcg);
0803
0804 if (fanotify_is_perm_event(mask)) {
0805 event = fanotify_alloc_perm_event(path, gfp);
0806 } else if (fanotify_is_error_event(mask)) {
0807 event = fanotify_alloc_error_event(group, fsid, data,
0808 data_type, &hash);
0809 } else if (name_event && (file_name || moved || child)) {
0810 event = fanotify_alloc_name_event(dirid, fsid, file_name, child,
0811 moved, &hash, gfp);
0812 } else if (fid_mode) {
0813 event = fanotify_alloc_fid_event(id, fsid, &hash, gfp);
0814 } else {
0815 event = fanotify_alloc_path_event(path, &hash, gfp);
0816 }
0817
0818 if (!event)
0819 goto out;
0820
0821 if (FAN_GROUP_FLAG(group, FAN_REPORT_TID))
0822 pid = get_pid(task_pid(current));
0823 else
0824 pid = get_pid(task_tgid(current));
0825
0826
0827 hash ^= hash_long((unsigned long)pid | ondir, FANOTIFY_EVENT_HASH_BITS);
0828 fanotify_init_event(event, hash, mask);
0829 event->pid = pid;
0830
0831 out:
0832 set_active_memcg(old_memcg);
0833 return event;
0834 }
0835
0836
0837
0838
0839
0840
0841 static __kernel_fsid_t fanotify_get_fsid(struct fsnotify_iter_info *iter_info)
0842 {
0843 struct fsnotify_mark *mark;
0844 int type;
0845 __kernel_fsid_t fsid = {};
0846
0847 fsnotify_foreach_iter_mark_type(iter_info, mark, type) {
0848 struct fsnotify_mark_connector *conn;
0849
0850 conn = READ_ONCE(mark->connector);
0851
0852 if (!conn)
0853 continue;
0854 if (!(conn->flags & FSNOTIFY_CONN_FLAG_HAS_FSID))
0855 continue;
0856
0857 smp_rmb();
0858 fsid = conn->fsid;
0859 if (WARN_ON_ONCE(!fsid.val[0] && !fsid.val[1]))
0860 continue;
0861 return fsid;
0862 }
0863
0864 return fsid;
0865 }
0866
0867
0868
0869
0870 static void fanotify_insert_event(struct fsnotify_group *group,
0871 struct fsnotify_event *fsn_event)
0872 {
0873 struct fanotify_event *event = FANOTIFY_E(fsn_event);
0874 unsigned int bucket = fanotify_event_hash_bucket(group, event);
0875 struct hlist_head *hlist = &group->fanotify_data.merge_hash[bucket];
0876
0877 assert_spin_locked(&group->notification_lock);
0878
0879 if (!fanotify_is_hashed_event(event->mask))
0880 return;
0881
0882 pr_debug("%s: group=%p event=%p bucket=%u\n", __func__,
0883 group, event, bucket);
0884
0885 hlist_add_head(&event->merge_list, hlist);
0886 }
0887
0888 static int fanotify_handle_event(struct fsnotify_group *group, u32 mask,
0889 const void *data, int data_type,
0890 struct inode *dir,
0891 const struct qstr *file_name, u32 cookie,
0892 struct fsnotify_iter_info *iter_info)
0893 {
0894 int ret = 0;
0895 struct fanotify_event *event;
0896 struct fsnotify_event *fsn_event;
0897 __kernel_fsid_t fsid = {};
0898 u32 match_mask = 0;
0899
0900 BUILD_BUG_ON(FAN_ACCESS != FS_ACCESS);
0901 BUILD_BUG_ON(FAN_MODIFY != FS_MODIFY);
0902 BUILD_BUG_ON(FAN_ATTRIB != FS_ATTRIB);
0903 BUILD_BUG_ON(FAN_CLOSE_NOWRITE != FS_CLOSE_NOWRITE);
0904 BUILD_BUG_ON(FAN_CLOSE_WRITE != FS_CLOSE_WRITE);
0905 BUILD_BUG_ON(FAN_OPEN != FS_OPEN);
0906 BUILD_BUG_ON(FAN_MOVED_TO != FS_MOVED_TO);
0907 BUILD_BUG_ON(FAN_MOVED_FROM != FS_MOVED_FROM);
0908 BUILD_BUG_ON(FAN_CREATE != FS_CREATE);
0909 BUILD_BUG_ON(FAN_DELETE != FS_DELETE);
0910 BUILD_BUG_ON(FAN_DELETE_SELF != FS_DELETE_SELF);
0911 BUILD_BUG_ON(FAN_MOVE_SELF != FS_MOVE_SELF);
0912 BUILD_BUG_ON(FAN_EVENT_ON_CHILD != FS_EVENT_ON_CHILD);
0913 BUILD_BUG_ON(FAN_Q_OVERFLOW != FS_Q_OVERFLOW);
0914 BUILD_BUG_ON(FAN_OPEN_PERM != FS_OPEN_PERM);
0915 BUILD_BUG_ON(FAN_ACCESS_PERM != FS_ACCESS_PERM);
0916 BUILD_BUG_ON(FAN_ONDIR != FS_ISDIR);
0917 BUILD_BUG_ON(FAN_OPEN_EXEC != FS_OPEN_EXEC);
0918 BUILD_BUG_ON(FAN_OPEN_EXEC_PERM != FS_OPEN_EXEC_PERM);
0919 BUILD_BUG_ON(FAN_FS_ERROR != FS_ERROR);
0920 BUILD_BUG_ON(FAN_RENAME != FS_RENAME);
0921
0922 BUILD_BUG_ON(HWEIGHT32(ALL_FANOTIFY_EVENT_BITS) != 21);
0923
0924 mask = fanotify_group_event_mask(group, iter_info, &match_mask,
0925 mask, data, data_type, dir);
0926 if (!mask)
0927 return 0;
0928
0929 pr_debug("%s: group=%p mask=%x report_mask=%x\n", __func__,
0930 group, mask, match_mask);
0931
0932 if (fanotify_is_perm_event(mask)) {
0933
0934
0935
0936
0937 if (!fsnotify_prepare_user_wait(iter_info))
0938 return 0;
0939 }
0940
0941 if (FAN_GROUP_FLAG(group, FANOTIFY_FID_BITS)) {
0942 fsid = fanotify_get_fsid(iter_info);
0943
0944 if (!fsid.val[0] && !fsid.val[1])
0945 return 0;
0946 }
0947
0948 event = fanotify_alloc_event(group, mask, data, data_type, dir,
0949 file_name, &fsid, match_mask);
0950 ret = -ENOMEM;
0951 if (unlikely(!event)) {
0952
0953
0954
0955
0956 if (!fanotify_is_perm_event(mask))
0957 fsnotify_queue_overflow(group);
0958 goto finish;
0959 }
0960
0961 fsn_event = &event->fse;
0962 ret = fsnotify_insert_event(group, fsn_event, fanotify_merge,
0963 fanotify_insert_event);
0964 if (ret) {
0965
0966 BUG_ON(ret == 1 && mask & FANOTIFY_PERM_EVENTS);
0967
0968 fsnotify_destroy_event(group, fsn_event);
0969
0970 ret = 0;
0971 } else if (fanotify_is_perm_event(mask)) {
0972 ret = fanotify_get_response(group, FANOTIFY_PERM(event),
0973 iter_info);
0974 }
0975 finish:
0976 if (fanotify_is_perm_event(mask))
0977 fsnotify_finish_user_wait(iter_info);
0978
0979 return ret;
0980 }
0981
0982 static void fanotify_free_group_priv(struct fsnotify_group *group)
0983 {
0984 kfree(group->fanotify_data.merge_hash);
0985 if (group->fanotify_data.ucounts)
0986 dec_ucount(group->fanotify_data.ucounts,
0987 UCOUNT_FANOTIFY_GROUPS);
0988
0989 if (mempool_initialized(&group->fanotify_data.error_events_pool))
0990 mempool_exit(&group->fanotify_data.error_events_pool);
0991 }
0992
0993 static void fanotify_free_path_event(struct fanotify_event *event)
0994 {
0995 path_put(fanotify_event_path(event));
0996 kmem_cache_free(fanotify_path_event_cachep, FANOTIFY_PE(event));
0997 }
0998
0999 static void fanotify_free_perm_event(struct fanotify_event *event)
1000 {
1001 path_put(fanotify_event_path(event));
1002 kmem_cache_free(fanotify_perm_event_cachep, FANOTIFY_PERM(event));
1003 }
1004
1005 static void fanotify_free_fid_event(struct fanotify_event *event)
1006 {
1007 struct fanotify_fid_event *ffe = FANOTIFY_FE(event);
1008
1009 if (fanotify_fh_has_ext_buf(&ffe->object_fh))
1010 kfree(fanotify_fh_ext_buf(&ffe->object_fh));
1011 kmem_cache_free(fanotify_fid_event_cachep, ffe);
1012 }
1013
1014 static void fanotify_free_name_event(struct fanotify_event *event)
1015 {
1016 kfree(FANOTIFY_NE(event));
1017 }
1018
1019 static void fanotify_free_error_event(struct fsnotify_group *group,
1020 struct fanotify_event *event)
1021 {
1022 struct fanotify_error_event *fee = FANOTIFY_EE(event);
1023
1024 mempool_free(fee, &group->fanotify_data.error_events_pool);
1025 }
1026
1027 static void fanotify_free_event(struct fsnotify_group *group,
1028 struct fsnotify_event *fsn_event)
1029 {
1030 struct fanotify_event *event;
1031
1032 event = FANOTIFY_E(fsn_event);
1033 put_pid(event->pid);
1034 switch (event->type) {
1035 case FANOTIFY_EVENT_TYPE_PATH:
1036 fanotify_free_path_event(event);
1037 break;
1038 case FANOTIFY_EVENT_TYPE_PATH_PERM:
1039 fanotify_free_perm_event(event);
1040 break;
1041 case FANOTIFY_EVENT_TYPE_FID:
1042 fanotify_free_fid_event(event);
1043 break;
1044 case FANOTIFY_EVENT_TYPE_FID_NAME:
1045 fanotify_free_name_event(event);
1046 break;
1047 case FANOTIFY_EVENT_TYPE_OVERFLOW:
1048 kfree(event);
1049 break;
1050 case FANOTIFY_EVENT_TYPE_FS_ERROR:
1051 fanotify_free_error_event(group, event);
1052 break;
1053 default:
1054 WARN_ON_ONCE(1);
1055 }
1056 }
1057
1058 static void fanotify_freeing_mark(struct fsnotify_mark *mark,
1059 struct fsnotify_group *group)
1060 {
1061 if (!FAN_GROUP_FLAG(group, FAN_UNLIMITED_MARKS))
1062 dec_ucount(group->fanotify_data.ucounts, UCOUNT_FANOTIFY_MARKS);
1063 }
1064
1065 static void fanotify_free_mark(struct fsnotify_mark *fsn_mark)
1066 {
1067 kmem_cache_free(fanotify_mark_cache, fsn_mark);
1068 }
1069
1070 const struct fsnotify_ops fanotify_fsnotify_ops = {
1071 .handle_event = fanotify_handle_event,
1072 .free_group_priv = fanotify_free_group_priv,
1073 .free_event = fanotify_free_event,
1074 .freeing_mark = fanotify_freeing_mark,
1075 .free_mark = fanotify_free_mark,
1076 };