0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037
0038
0039
0040
0041
0042
0043
0044
0045
0046
0047
0048
0049
0050
0051
0052
0053
0054
0055
0056
0057
0058
0059
0060
0061
0062
0063
0064
0065
0066
0067
0068
0069
0070
0071
0072
0073 #include <linux/compat.h>
0074 #include <linux/slab.h>
0075 #include <linux/spinlock.h>
0076 #include <linux/init.h>
0077 #include <linux/proc_fs.h>
0078 #include <linux/time.h>
0079 #include <linux/security.h>
0080 #include <linux/syscalls.h>
0081 #include <linux/audit.h>
0082 #include <linux/capability.h>
0083 #include <linux/seq_file.h>
0084 #include <linux/rwsem.h>
0085 #include <linux/nsproxy.h>
0086 #include <linux/ipc_namespace.h>
0087 #include <linux/sched/wake_q.h>
0088 #include <linux/nospec.h>
0089 #include <linux/rhashtable.h>
0090
0091 #include <linux/uaccess.h>
0092 #include "util.h"
0093
0094
0095 struct sem {
0096 int semval;
0097
0098
0099
0100
0101
0102
0103
0104 struct pid *sempid;
0105 spinlock_t lock;
0106 struct list_head pending_alter;
0107
0108 struct list_head pending_const;
0109
0110 time64_t sem_otime;
0111 } ____cacheline_aligned_in_smp;
0112
0113
0114 struct sem_array {
0115 struct kern_ipc_perm sem_perm;
0116 time64_t sem_ctime;
0117 struct list_head pending_alter;
0118
0119 struct list_head pending_const;
0120
0121 struct list_head list_id;
0122 int sem_nsems;
0123 int complex_count;
0124 unsigned int use_global_lock;
0125
0126 struct sem sems[];
0127 } __randomize_layout;
0128
0129
0130 struct sem_queue {
0131 struct list_head list;
0132 struct task_struct *sleeper;
0133 struct sem_undo *undo;
0134 struct pid *pid;
0135 int status;
0136 struct sembuf *sops;
0137 struct sembuf *blocking;
0138 int nsops;
0139 bool alter;
0140 bool dupsop;
0141 };
0142
0143
0144
0145
0146 struct sem_undo {
0147 struct list_head list_proc;
0148
0149
0150 struct rcu_head rcu;
0151 struct sem_undo_list *ulp;
0152 struct list_head list_id;
0153
0154 int semid;
0155 short *semadj;
0156
0157 };
0158
0159
0160
0161
0162 struct sem_undo_list {
0163 refcount_t refcnt;
0164 spinlock_t lock;
0165 struct list_head list_proc;
0166 };
0167
0168
0169 #define sem_ids(ns) ((ns)->ids[IPC_SEM_IDS])
0170
0171 static int newary(struct ipc_namespace *, struct ipc_params *);
0172 static void freeary(struct ipc_namespace *, struct kern_ipc_perm *);
0173 #ifdef CONFIG_PROC_FS
0174 static int sysvipc_sem_proc_show(struct seq_file *s, void *it);
0175 #endif
0176
0177 #define SEMMSL_FAST 256
0178 #define SEMOPM_FAST 64
0179
0180
0181
0182
0183
0184
0185 #define USE_GLOBAL_LOCK_HYSTERESIS 10
0186
0187
0188
0189
0190
0191
0192
0193
0194
0195
0196
0197
0198
0199
0200
0201
0202
0203
0204
0205
0206
0207
0208
0209
0210
0211
0212
0213
0214
0215
0216
0217
0218
0219
0220
0221
0222
0223
0224
0225
0226
0227
0228
0229
0230
0231
0232
0233
0234
0235
0236
0237
0238
0239
0240
0241
0242
0243
0244 #define sc_semmsl sem_ctls[0]
0245 #define sc_semmns sem_ctls[1]
0246 #define sc_semopm sem_ctls[2]
0247 #define sc_semmni sem_ctls[3]
0248
0249 void sem_init_ns(struct ipc_namespace *ns)
0250 {
0251 ns->sc_semmsl = SEMMSL;
0252 ns->sc_semmns = SEMMNS;
0253 ns->sc_semopm = SEMOPM;
0254 ns->sc_semmni = SEMMNI;
0255 ns->used_sems = 0;
0256 ipc_init_ids(&ns->ids[IPC_SEM_IDS]);
0257 }
0258
0259 #ifdef CONFIG_IPC_NS
0260 void sem_exit_ns(struct ipc_namespace *ns)
0261 {
0262 free_ipcs(ns, &sem_ids(ns), freeary);
0263 idr_destroy(&ns->ids[IPC_SEM_IDS].ipcs_idr);
0264 rhashtable_destroy(&ns->ids[IPC_SEM_IDS].key_ht);
0265 }
0266 #endif
0267
0268 void __init sem_init(void)
0269 {
0270 sem_init_ns(&init_ipc_ns);
0271 ipc_init_proc_interface("sysvipc/sem",
0272 " key semid perms nsems uid gid cuid cgid otime ctime\n",
0273 IPC_SEM_IDS, sysvipc_sem_proc_show);
0274 }
0275
0276
0277
0278
0279
0280
0281
0282
0283 static void unmerge_queues(struct sem_array *sma)
0284 {
0285 struct sem_queue *q, *tq;
0286
0287
0288 if (sma->complex_count)
0289 return;
0290
0291
0292
0293
0294
0295 list_for_each_entry_safe(q, tq, &sma->pending_alter, list) {
0296 struct sem *curr;
0297 curr = &sma->sems[q->sops[0].sem_num];
0298
0299 list_add_tail(&q->list, &curr->pending_alter);
0300 }
0301 INIT_LIST_HEAD(&sma->pending_alter);
0302 }
0303
0304
0305
0306
0307
0308
0309
0310
0311
0312
0313 static void merge_queues(struct sem_array *sma)
0314 {
0315 int i;
0316 for (i = 0; i < sma->sem_nsems; i++) {
0317 struct sem *sem = &sma->sems[i];
0318
0319 list_splice_init(&sem->pending_alter, &sma->pending_alter);
0320 }
0321 }
0322
0323 static void sem_rcu_free(struct rcu_head *head)
0324 {
0325 struct kern_ipc_perm *p = container_of(head, struct kern_ipc_perm, rcu);
0326 struct sem_array *sma = container_of(p, struct sem_array, sem_perm);
0327
0328 security_sem_free(&sma->sem_perm);
0329 kvfree(sma);
0330 }
0331
0332
0333
0334
0335
0336 static void complexmode_enter(struct sem_array *sma)
0337 {
0338 int i;
0339 struct sem *sem;
0340
0341 if (sma->use_global_lock > 0) {
0342
0343
0344
0345
0346
0347 WRITE_ONCE(sma->use_global_lock, USE_GLOBAL_LOCK_HYSTERESIS);
0348 return;
0349 }
0350 WRITE_ONCE(sma->use_global_lock, USE_GLOBAL_LOCK_HYSTERESIS);
0351
0352 for (i = 0; i < sma->sem_nsems; i++) {
0353 sem = &sma->sems[i];
0354 spin_lock(&sem->lock);
0355 spin_unlock(&sem->lock);
0356 }
0357 }
0358
0359
0360
0361
0362
0363 static void complexmode_tryleave(struct sem_array *sma)
0364 {
0365 if (sma->complex_count) {
0366
0367
0368
0369 return;
0370 }
0371 if (sma->use_global_lock == 1) {
0372
0373
0374 smp_store_release(&sma->use_global_lock, 0);
0375 } else {
0376 WRITE_ONCE(sma->use_global_lock,
0377 sma->use_global_lock-1);
0378 }
0379 }
0380
0381 #define SEM_GLOBAL_LOCK (-1)
0382
0383
0384
0385
0386
0387
0388
0389 static inline int sem_lock(struct sem_array *sma, struct sembuf *sops,
0390 int nsops)
0391 {
0392 struct sem *sem;
0393 int idx;
0394
0395 if (nsops != 1) {
0396
0397 ipc_lock_object(&sma->sem_perm);
0398
0399
0400 complexmode_enter(sma);
0401 return SEM_GLOBAL_LOCK;
0402 }
0403
0404
0405
0406
0407
0408
0409
0410
0411 idx = array_index_nospec(sops->sem_num, sma->sem_nsems);
0412 sem = &sma->sems[idx];
0413
0414
0415
0416
0417
0418 if (!READ_ONCE(sma->use_global_lock)) {
0419
0420
0421
0422
0423 spin_lock(&sem->lock);
0424
0425
0426 if (!smp_load_acquire(&sma->use_global_lock)) {
0427
0428 return sops->sem_num;
0429 }
0430 spin_unlock(&sem->lock);
0431 }
0432
0433
0434 ipc_lock_object(&sma->sem_perm);
0435
0436 if (sma->use_global_lock == 0) {
0437
0438
0439
0440
0441
0442
0443
0444
0445
0446 spin_lock(&sem->lock);
0447
0448 ipc_unlock_object(&sma->sem_perm);
0449 return sops->sem_num;
0450 } else {
0451
0452
0453
0454
0455
0456 return SEM_GLOBAL_LOCK;
0457 }
0458 }
0459
0460 static inline void sem_unlock(struct sem_array *sma, int locknum)
0461 {
0462 if (locknum == SEM_GLOBAL_LOCK) {
0463 unmerge_queues(sma);
0464 complexmode_tryleave(sma);
0465 ipc_unlock_object(&sma->sem_perm);
0466 } else {
0467 struct sem *sem = &sma->sems[locknum];
0468 spin_unlock(&sem->lock);
0469 }
0470 }
0471
0472
0473
0474
0475
0476
0477
0478 static inline struct sem_array *sem_obtain_object(struct ipc_namespace *ns, int id)
0479 {
0480 struct kern_ipc_perm *ipcp = ipc_obtain_object_idr(&sem_ids(ns), id);
0481
0482 if (IS_ERR(ipcp))
0483 return ERR_CAST(ipcp);
0484
0485 return container_of(ipcp, struct sem_array, sem_perm);
0486 }
0487
0488 static inline struct sem_array *sem_obtain_object_check(struct ipc_namespace *ns,
0489 int id)
0490 {
0491 struct kern_ipc_perm *ipcp = ipc_obtain_object_check(&sem_ids(ns), id);
0492
0493 if (IS_ERR(ipcp))
0494 return ERR_CAST(ipcp);
0495
0496 return container_of(ipcp, struct sem_array, sem_perm);
0497 }
0498
0499 static inline void sem_lock_and_putref(struct sem_array *sma)
0500 {
0501 sem_lock(sma, NULL, -1);
0502 ipc_rcu_putref(&sma->sem_perm, sem_rcu_free);
0503 }
0504
0505 static inline void sem_rmid(struct ipc_namespace *ns, struct sem_array *s)
0506 {
0507 ipc_rmid(&sem_ids(ns), &s->sem_perm);
0508 }
0509
0510 static struct sem_array *sem_alloc(size_t nsems)
0511 {
0512 struct sem_array *sma;
0513
0514 if (nsems > (INT_MAX - sizeof(*sma)) / sizeof(sma->sems[0]))
0515 return NULL;
0516
0517 sma = kvzalloc(struct_size(sma, sems, nsems), GFP_KERNEL_ACCOUNT);
0518 if (unlikely(!sma))
0519 return NULL;
0520
0521 return sma;
0522 }
0523
0524
0525
0526
0527
0528
0529
0530
0531 static int newary(struct ipc_namespace *ns, struct ipc_params *params)
0532 {
0533 int retval;
0534 struct sem_array *sma;
0535 key_t key = params->key;
0536 int nsems = params->u.nsems;
0537 int semflg = params->flg;
0538 int i;
0539
0540 if (!nsems)
0541 return -EINVAL;
0542 if (ns->used_sems + nsems > ns->sc_semmns)
0543 return -ENOSPC;
0544
0545 sma = sem_alloc(nsems);
0546 if (!sma)
0547 return -ENOMEM;
0548
0549 sma->sem_perm.mode = (semflg & S_IRWXUGO);
0550 sma->sem_perm.key = key;
0551
0552 sma->sem_perm.security = NULL;
0553 retval = security_sem_alloc(&sma->sem_perm);
0554 if (retval) {
0555 kvfree(sma);
0556 return retval;
0557 }
0558
0559 for (i = 0; i < nsems; i++) {
0560 INIT_LIST_HEAD(&sma->sems[i].pending_alter);
0561 INIT_LIST_HEAD(&sma->sems[i].pending_const);
0562 spin_lock_init(&sma->sems[i].lock);
0563 }
0564
0565 sma->complex_count = 0;
0566 sma->use_global_lock = USE_GLOBAL_LOCK_HYSTERESIS;
0567 INIT_LIST_HEAD(&sma->pending_alter);
0568 INIT_LIST_HEAD(&sma->pending_const);
0569 INIT_LIST_HEAD(&sma->list_id);
0570 sma->sem_nsems = nsems;
0571 sma->sem_ctime = ktime_get_real_seconds();
0572
0573
0574 retval = ipc_addid(&sem_ids(ns), &sma->sem_perm, ns->sc_semmni);
0575 if (retval < 0) {
0576 ipc_rcu_putref(&sma->sem_perm, sem_rcu_free);
0577 return retval;
0578 }
0579 ns->used_sems += nsems;
0580
0581 sem_unlock(sma, -1);
0582 rcu_read_unlock();
0583
0584 return sma->sem_perm.id;
0585 }
0586
0587
0588
0589
0590
0591 static int sem_more_checks(struct kern_ipc_perm *ipcp, struct ipc_params *params)
0592 {
0593 struct sem_array *sma;
0594
0595 sma = container_of(ipcp, struct sem_array, sem_perm);
0596 if (params->u.nsems > sma->sem_nsems)
0597 return -EINVAL;
0598
0599 return 0;
0600 }
0601
0602 long ksys_semget(key_t key, int nsems, int semflg)
0603 {
0604 struct ipc_namespace *ns;
0605 static const struct ipc_ops sem_ops = {
0606 .getnew = newary,
0607 .associate = security_sem_associate,
0608 .more_checks = sem_more_checks,
0609 };
0610 struct ipc_params sem_params;
0611
0612 ns = current->nsproxy->ipc_ns;
0613
0614 if (nsems < 0 || nsems > ns->sc_semmsl)
0615 return -EINVAL;
0616
0617 sem_params.key = key;
0618 sem_params.flg = semflg;
0619 sem_params.u.nsems = nsems;
0620
0621 return ipcget(ns, &sem_ids(ns), &sem_ops, &sem_params);
0622 }
0623
0624 SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
0625 {
0626 return ksys_semget(key, nsems, semflg);
0627 }
0628
0629
0630
0631
0632
0633
0634
0635
0636
0637
0638
0639
0640
0641
0642
0643
0644
0645
0646 static int perform_atomic_semop_slow(struct sem_array *sma, struct sem_queue *q)
0647 {
0648 int result, sem_op, nsops;
0649 struct pid *pid;
0650 struct sembuf *sop;
0651 struct sem *curr;
0652 struct sembuf *sops;
0653 struct sem_undo *un;
0654
0655 sops = q->sops;
0656 nsops = q->nsops;
0657 un = q->undo;
0658
0659 for (sop = sops; sop < sops + nsops; sop++) {
0660 int idx = array_index_nospec(sop->sem_num, sma->sem_nsems);
0661 curr = &sma->sems[idx];
0662 sem_op = sop->sem_op;
0663 result = curr->semval;
0664
0665 if (!sem_op && result)
0666 goto would_block;
0667
0668 result += sem_op;
0669 if (result < 0)
0670 goto would_block;
0671 if (result > SEMVMX)
0672 goto out_of_range;
0673
0674 if (sop->sem_flg & SEM_UNDO) {
0675 int undo = un->semadj[sop->sem_num] - sem_op;
0676
0677 if (undo < (-SEMAEM - 1) || undo > SEMAEM)
0678 goto out_of_range;
0679 un->semadj[sop->sem_num] = undo;
0680 }
0681
0682 curr->semval = result;
0683 }
0684
0685 sop--;
0686 pid = q->pid;
0687 while (sop >= sops) {
0688 ipc_update_pid(&sma->sems[sop->sem_num].sempid, pid);
0689 sop--;
0690 }
0691
0692 return 0;
0693
0694 out_of_range:
0695 result = -ERANGE;
0696 goto undo;
0697
0698 would_block:
0699 q->blocking = sop;
0700
0701 if (sop->sem_flg & IPC_NOWAIT)
0702 result = -EAGAIN;
0703 else
0704 result = 1;
0705
0706 undo:
0707 sop--;
0708 while (sop >= sops) {
0709 sem_op = sop->sem_op;
0710 sma->sems[sop->sem_num].semval -= sem_op;
0711 if (sop->sem_flg & SEM_UNDO)
0712 un->semadj[sop->sem_num] += sem_op;
0713 sop--;
0714 }
0715
0716 return result;
0717 }
0718
0719 static int perform_atomic_semop(struct sem_array *sma, struct sem_queue *q)
0720 {
0721 int result, sem_op, nsops;
0722 struct sembuf *sop;
0723 struct sem *curr;
0724 struct sembuf *sops;
0725 struct sem_undo *un;
0726
0727 sops = q->sops;
0728 nsops = q->nsops;
0729 un = q->undo;
0730
0731 if (unlikely(q->dupsop))
0732 return perform_atomic_semop_slow(sma, q);
0733
0734
0735
0736
0737
0738
0739
0740 for (sop = sops; sop < sops + nsops; sop++) {
0741 int idx = array_index_nospec(sop->sem_num, sma->sem_nsems);
0742
0743 curr = &sma->sems[idx];
0744 sem_op = sop->sem_op;
0745 result = curr->semval;
0746
0747 if (!sem_op && result)
0748 goto would_block;
0749
0750 result += sem_op;
0751 if (result < 0)
0752 goto would_block;
0753
0754 if (result > SEMVMX)
0755 return -ERANGE;
0756
0757 if (sop->sem_flg & SEM_UNDO) {
0758 int undo = un->semadj[sop->sem_num] - sem_op;
0759
0760
0761 if (undo < (-SEMAEM - 1) || undo > SEMAEM)
0762 return -ERANGE;
0763 }
0764 }
0765
0766 for (sop = sops; sop < sops + nsops; sop++) {
0767 curr = &sma->sems[sop->sem_num];
0768 sem_op = sop->sem_op;
0769
0770 if (sop->sem_flg & SEM_UNDO) {
0771 int undo = un->semadj[sop->sem_num] - sem_op;
0772
0773 un->semadj[sop->sem_num] = undo;
0774 }
0775 curr->semval += sem_op;
0776 ipc_update_pid(&curr->sempid, q->pid);
0777 }
0778
0779 return 0;
0780
0781 would_block:
0782 q->blocking = sop;
0783 return sop->sem_flg & IPC_NOWAIT ? -EAGAIN : 1;
0784 }
0785
0786 static inline void wake_up_sem_queue_prepare(struct sem_queue *q, int error,
0787 struct wake_q_head *wake_q)
0788 {
0789 struct task_struct *sleeper;
0790
0791 sleeper = get_task_struct(q->sleeper);
0792
0793
0794 smp_store_release(&q->status, error);
0795
0796 wake_q_add_safe(wake_q, sleeper);
0797 }
0798
0799 static void unlink_queue(struct sem_array *sma, struct sem_queue *q)
0800 {
0801 list_del(&q->list);
0802 if (q->nsops > 1)
0803 sma->complex_count--;
0804 }
0805
0806
0807
0808
0809
0810
0811
0812
0813
0814
0815
0816 static inline int check_restart(struct sem_array *sma, struct sem_queue *q)
0817 {
0818
0819 if (!list_empty(&sma->pending_alter))
0820 return 1;
0821
0822
0823 if (q->nsops > 1)
0824 return 1;
0825
0826
0827
0828
0829
0830
0831
0832
0833
0834
0835
0836
0837 return 0;
0838 }
0839
0840
0841
0842
0843
0844
0845
0846
0847
0848
0849
0850
0851
0852
0853
0854 static int wake_const_ops(struct sem_array *sma, int semnum,
0855 struct wake_q_head *wake_q)
0856 {
0857 struct sem_queue *q, *tmp;
0858 struct list_head *pending_list;
0859 int semop_completed = 0;
0860
0861 if (semnum == -1)
0862 pending_list = &sma->pending_const;
0863 else
0864 pending_list = &sma->sems[semnum].pending_const;
0865
0866 list_for_each_entry_safe(q, tmp, pending_list, list) {
0867 int error = perform_atomic_semop(sma, q);
0868
0869 if (error > 0)
0870 continue;
0871
0872 unlink_queue(sma, q);
0873
0874 wake_up_sem_queue_prepare(q, error, wake_q);
0875 if (error == 0)
0876 semop_completed = 1;
0877 }
0878
0879 return semop_completed;
0880 }
0881
0882
0883
0884
0885
0886
0887
0888
0889
0890
0891
0892
0893 static int do_smart_wakeup_zero(struct sem_array *sma, struct sembuf *sops,
0894 int nsops, struct wake_q_head *wake_q)
0895 {
0896 int i;
0897 int semop_completed = 0;
0898 int got_zero = 0;
0899
0900
0901 if (sops) {
0902 for (i = 0; i < nsops; i++) {
0903 int num = sops[i].sem_num;
0904
0905 if (sma->sems[num].semval == 0) {
0906 got_zero = 1;
0907 semop_completed |= wake_const_ops(sma, num, wake_q);
0908 }
0909 }
0910 } else {
0911
0912
0913
0914
0915 for (i = 0; i < sma->sem_nsems; i++) {
0916 if (sma->sems[i].semval == 0) {
0917 got_zero = 1;
0918 semop_completed |= wake_const_ops(sma, i, wake_q);
0919 }
0920 }
0921 }
0922
0923
0924
0925
0926 if (got_zero)
0927 semop_completed |= wake_const_ops(sma, -1, wake_q);
0928
0929 return semop_completed;
0930 }
0931
0932
0933
0934
0935
0936
0937
0938
0939
0940
0941
0942
0943
0944
0945
0946
0947
0948
0949 static int update_queue(struct sem_array *sma, int semnum, struct wake_q_head *wake_q)
0950 {
0951 struct sem_queue *q, *tmp;
0952 struct list_head *pending_list;
0953 int semop_completed = 0;
0954
0955 if (semnum == -1)
0956 pending_list = &sma->pending_alter;
0957 else
0958 pending_list = &sma->sems[semnum].pending_alter;
0959
0960 again:
0961 list_for_each_entry_safe(q, tmp, pending_list, list) {
0962 int error, restart;
0963
0964
0965
0966
0967
0968
0969
0970
0971 if (semnum != -1 && sma->sems[semnum].semval == 0)
0972 break;
0973
0974 error = perform_atomic_semop(sma, q);
0975
0976
0977 if (error > 0)
0978 continue;
0979
0980 unlink_queue(sma, q);
0981
0982 if (error) {
0983 restart = 0;
0984 } else {
0985 semop_completed = 1;
0986 do_smart_wakeup_zero(sma, q->sops, q->nsops, wake_q);
0987 restart = check_restart(sma, q);
0988 }
0989
0990 wake_up_sem_queue_prepare(q, error, wake_q);
0991 if (restart)
0992 goto again;
0993 }
0994 return semop_completed;
0995 }
0996
0997
0998
0999
1000
1001
1002
1003
1004
1005 static void set_semotime(struct sem_array *sma, struct sembuf *sops)
1006 {
1007 if (sops == NULL) {
1008 sma->sems[0].sem_otime = ktime_get_real_seconds();
1009 } else {
1010 sma->sems[sops[0].sem_num].sem_otime =
1011 ktime_get_real_seconds();
1012 }
1013 }
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029 static void do_smart_update(struct sem_array *sma, struct sembuf *sops, int nsops,
1030 int otime, struct wake_q_head *wake_q)
1031 {
1032 int i;
1033
1034 otime |= do_smart_wakeup_zero(sma, sops, nsops, wake_q);
1035
1036 if (!list_empty(&sma->pending_alter)) {
1037
1038 otime |= update_queue(sma, -1, wake_q);
1039 } else {
1040 if (!sops) {
1041
1042
1043
1044
1045 for (i = 0; i < sma->sem_nsems; i++)
1046 otime |= update_queue(sma, i, wake_q);
1047 } else {
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057 for (i = 0; i < nsops; i++) {
1058 if (sops[i].sem_op > 0) {
1059 otime |= update_queue(sma,
1060 sops[i].sem_num, wake_q);
1061 }
1062 }
1063 }
1064 }
1065 if (otime)
1066 set_semotime(sma, sops);
1067 }
1068
1069
1070
1071
1072 static int check_qop(struct sem_array *sma, int semnum, struct sem_queue *q,
1073 bool count_zero)
1074 {
1075 struct sembuf *sop = q->blocking;
1076
1077
1078
1079
1080
1081
1082
1083
1084 pr_info_once("semctl(GETNCNT/GETZCNT) is since 3.16 Single Unix Specification compliant.\n"
1085 "The task %s (%d) triggered the difference, watch for misbehavior.\n",
1086 current->comm, task_pid_nr(current));
1087
1088 if (sop->sem_num != semnum)
1089 return 0;
1090
1091 if (count_zero && sop->sem_op == 0)
1092 return 1;
1093 if (!count_zero && sop->sem_op < 0)
1094 return 1;
1095
1096 return 0;
1097 }
1098
1099
1100
1101
1102
1103
1104
1105
1106 static int count_semcnt(struct sem_array *sma, ushort semnum,
1107 bool count_zero)
1108 {
1109 struct list_head *l;
1110 struct sem_queue *q;
1111 int semcnt;
1112
1113 semcnt = 0;
1114
1115 if (count_zero)
1116 l = &sma->sems[semnum].pending_const;
1117 else
1118 l = &sma->sems[semnum].pending_alter;
1119
1120 list_for_each_entry(q, l, list) {
1121
1122
1123
1124 semcnt++;
1125 }
1126
1127
1128 list_for_each_entry(q, &sma->pending_alter, list) {
1129 semcnt += check_qop(sma, semnum, q, count_zero);
1130 }
1131 if (count_zero) {
1132 list_for_each_entry(q, &sma->pending_const, list) {
1133 semcnt += check_qop(sma, semnum, q, count_zero);
1134 }
1135 }
1136 return semcnt;
1137 }
1138
1139
1140
1141
1142
1143 static void freeary(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
1144 {
1145 struct sem_undo *un, *tu;
1146 struct sem_queue *q, *tq;
1147 struct sem_array *sma = container_of(ipcp, struct sem_array, sem_perm);
1148 int i;
1149 DEFINE_WAKE_Q(wake_q);
1150
1151
1152 ipc_assert_locked_object(&sma->sem_perm);
1153 list_for_each_entry_safe(un, tu, &sma->list_id, list_id) {
1154 list_del(&un->list_id);
1155 spin_lock(&un->ulp->lock);
1156 un->semid = -1;
1157 list_del_rcu(&un->list_proc);
1158 spin_unlock(&un->ulp->lock);
1159 kvfree_rcu(un, rcu);
1160 }
1161
1162
1163 list_for_each_entry_safe(q, tq, &sma->pending_const, list) {
1164 unlink_queue(sma, q);
1165 wake_up_sem_queue_prepare(q, -EIDRM, &wake_q);
1166 }
1167
1168 list_for_each_entry_safe(q, tq, &sma->pending_alter, list) {
1169 unlink_queue(sma, q);
1170 wake_up_sem_queue_prepare(q, -EIDRM, &wake_q);
1171 }
1172 for (i = 0; i < sma->sem_nsems; i++) {
1173 struct sem *sem = &sma->sems[i];
1174 list_for_each_entry_safe(q, tq, &sem->pending_const, list) {
1175 unlink_queue(sma, q);
1176 wake_up_sem_queue_prepare(q, -EIDRM, &wake_q);
1177 }
1178 list_for_each_entry_safe(q, tq, &sem->pending_alter, list) {
1179 unlink_queue(sma, q);
1180 wake_up_sem_queue_prepare(q, -EIDRM, &wake_q);
1181 }
1182 ipc_update_pid(&sem->sempid, NULL);
1183 }
1184
1185
1186 sem_rmid(ns, sma);
1187 sem_unlock(sma, -1);
1188 rcu_read_unlock();
1189
1190 wake_up_q(&wake_q);
1191 ns->used_sems -= sma->sem_nsems;
1192 ipc_rcu_putref(&sma->sem_perm, sem_rcu_free);
1193 }
1194
1195 static unsigned long copy_semid_to_user(void __user *buf, struct semid64_ds *in, int version)
1196 {
1197 switch (version) {
1198 case IPC_64:
1199 return copy_to_user(buf, in, sizeof(*in));
1200 case IPC_OLD:
1201 {
1202 struct semid_ds out;
1203
1204 memset(&out, 0, sizeof(out));
1205
1206 ipc64_perm_to_ipc_perm(&in->sem_perm, &out.sem_perm);
1207
1208 out.sem_otime = in->sem_otime;
1209 out.sem_ctime = in->sem_ctime;
1210 out.sem_nsems = in->sem_nsems;
1211
1212 return copy_to_user(buf, &out, sizeof(out));
1213 }
1214 default:
1215 return -EINVAL;
1216 }
1217 }
1218
1219 static time64_t get_semotime(struct sem_array *sma)
1220 {
1221 int i;
1222 time64_t res;
1223
1224 res = sma->sems[0].sem_otime;
1225 for (i = 1; i < sma->sem_nsems; i++) {
1226 time64_t to = sma->sems[i].sem_otime;
1227
1228 if (to > res)
1229 res = to;
1230 }
1231 return res;
1232 }
1233
1234 static int semctl_stat(struct ipc_namespace *ns, int semid,
1235 int cmd, struct semid64_ds *semid64)
1236 {
1237 struct sem_array *sma;
1238 time64_t semotime;
1239 int err;
1240
1241 memset(semid64, 0, sizeof(*semid64));
1242
1243 rcu_read_lock();
1244 if (cmd == SEM_STAT || cmd == SEM_STAT_ANY) {
1245 sma = sem_obtain_object(ns, semid);
1246 if (IS_ERR(sma)) {
1247 err = PTR_ERR(sma);
1248 goto out_unlock;
1249 }
1250 } else {
1251 sma = sem_obtain_object_check(ns, semid);
1252 if (IS_ERR(sma)) {
1253 err = PTR_ERR(sma);
1254 goto out_unlock;
1255 }
1256 }
1257
1258
1259 if (cmd == SEM_STAT_ANY)
1260 audit_ipc_obj(&sma->sem_perm);
1261 else {
1262 err = -EACCES;
1263 if (ipcperms(ns, &sma->sem_perm, S_IRUGO))
1264 goto out_unlock;
1265 }
1266
1267 err = security_sem_semctl(&sma->sem_perm, cmd);
1268 if (err)
1269 goto out_unlock;
1270
1271 ipc_lock_object(&sma->sem_perm);
1272
1273 if (!ipc_valid_object(&sma->sem_perm)) {
1274 ipc_unlock_object(&sma->sem_perm);
1275 err = -EIDRM;
1276 goto out_unlock;
1277 }
1278
1279 kernel_to_ipc64_perm(&sma->sem_perm, &semid64->sem_perm);
1280 semotime = get_semotime(sma);
1281 semid64->sem_otime = semotime;
1282 semid64->sem_ctime = sma->sem_ctime;
1283 #ifndef CONFIG_64BIT
1284 semid64->sem_otime_high = semotime >> 32;
1285 semid64->sem_ctime_high = sma->sem_ctime >> 32;
1286 #endif
1287 semid64->sem_nsems = sma->sem_nsems;
1288
1289 if (cmd == IPC_STAT) {
1290
1291
1292
1293
1294 err = 0;
1295 } else {
1296
1297
1298
1299
1300 err = sma->sem_perm.id;
1301 }
1302 ipc_unlock_object(&sma->sem_perm);
1303 out_unlock:
1304 rcu_read_unlock();
1305 return err;
1306 }
1307
1308 static int semctl_info(struct ipc_namespace *ns, int semid,
1309 int cmd, void __user *p)
1310 {
1311 struct seminfo seminfo;
1312 int max_idx;
1313 int err;
1314
1315 err = security_sem_semctl(NULL, cmd);
1316 if (err)
1317 return err;
1318
1319 memset(&seminfo, 0, sizeof(seminfo));
1320 seminfo.semmni = ns->sc_semmni;
1321 seminfo.semmns = ns->sc_semmns;
1322 seminfo.semmsl = ns->sc_semmsl;
1323 seminfo.semopm = ns->sc_semopm;
1324 seminfo.semvmx = SEMVMX;
1325 seminfo.semmnu = SEMMNU;
1326 seminfo.semmap = SEMMAP;
1327 seminfo.semume = SEMUME;
1328 down_read(&sem_ids(ns).rwsem);
1329 if (cmd == SEM_INFO) {
1330 seminfo.semusz = sem_ids(ns).in_use;
1331 seminfo.semaem = ns->used_sems;
1332 } else {
1333 seminfo.semusz = SEMUSZ;
1334 seminfo.semaem = SEMAEM;
1335 }
1336 max_idx = ipc_get_maxidx(&sem_ids(ns));
1337 up_read(&sem_ids(ns).rwsem);
1338 if (copy_to_user(p, &seminfo, sizeof(struct seminfo)))
1339 return -EFAULT;
1340 return (max_idx < 0) ? 0 : max_idx;
1341 }
1342
1343 static int semctl_setval(struct ipc_namespace *ns, int semid, int semnum,
1344 int val)
1345 {
1346 struct sem_undo *un;
1347 struct sem_array *sma;
1348 struct sem *curr;
1349 int err;
1350 DEFINE_WAKE_Q(wake_q);
1351
1352 if (val > SEMVMX || val < 0)
1353 return -ERANGE;
1354
1355 rcu_read_lock();
1356 sma = sem_obtain_object_check(ns, semid);
1357 if (IS_ERR(sma)) {
1358 rcu_read_unlock();
1359 return PTR_ERR(sma);
1360 }
1361
1362 if (semnum < 0 || semnum >= sma->sem_nsems) {
1363 rcu_read_unlock();
1364 return -EINVAL;
1365 }
1366
1367
1368 if (ipcperms(ns, &sma->sem_perm, S_IWUGO)) {
1369 rcu_read_unlock();
1370 return -EACCES;
1371 }
1372
1373 err = security_sem_semctl(&sma->sem_perm, SETVAL);
1374 if (err) {
1375 rcu_read_unlock();
1376 return -EACCES;
1377 }
1378
1379 sem_lock(sma, NULL, -1);
1380
1381 if (!ipc_valid_object(&sma->sem_perm)) {
1382 sem_unlock(sma, -1);
1383 rcu_read_unlock();
1384 return -EIDRM;
1385 }
1386
1387 semnum = array_index_nospec(semnum, sma->sem_nsems);
1388 curr = &sma->sems[semnum];
1389
1390 ipc_assert_locked_object(&sma->sem_perm);
1391 list_for_each_entry(un, &sma->list_id, list_id)
1392 un->semadj[semnum] = 0;
1393
1394 curr->semval = val;
1395 ipc_update_pid(&curr->sempid, task_tgid(current));
1396 sma->sem_ctime = ktime_get_real_seconds();
1397
1398 do_smart_update(sma, NULL, 0, 0, &wake_q);
1399 sem_unlock(sma, -1);
1400 rcu_read_unlock();
1401 wake_up_q(&wake_q);
1402 return 0;
1403 }
1404
1405 static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
1406 int cmd, void __user *p)
1407 {
1408 struct sem_array *sma;
1409 struct sem *curr;
1410 int err, nsems;
1411 ushort fast_sem_io[SEMMSL_FAST];
1412 ushort *sem_io = fast_sem_io;
1413 DEFINE_WAKE_Q(wake_q);
1414
1415 rcu_read_lock();
1416 sma = sem_obtain_object_check(ns, semid);
1417 if (IS_ERR(sma)) {
1418 rcu_read_unlock();
1419 return PTR_ERR(sma);
1420 }
1421
1422 nsems = sma->sem_nsems;
1423
1424 err = -EACCES;
1425 if (ipcperms(ns, &sma->sem_perm, cmd == SETALL ? S_IWUGO : S_IRUGO))
1426 goto out_rcu_wakeup;
1427
1428 err = security_sem_semctl(&sma->sem_perm, cmd);
1429 if (err)
1430 goto out_rcu_wakeup;
1431
1432 switch (cmd) {
1433 case GETALL:
1434 {
1435 ushort __user *array = p;
1436 int i;
1437
1438 sem_lock(sma, NULL, -1);
1439 if (!ipc_valid_object(&sma->sem_perm)) {
1440 err = -EIDRM;
1441 goto out_unlock;
1442 }
1443 if (nsems > SEMMSL_FAST) {
1444 if (!ipc_rcu_getref(&sma->sem_perm)) {
1445 err = -EIDRM;
1446 goto out_unlock;
1447 }
1448 sem_unlock(sma, -1);
1449 rcu_read_unlock();
1450 sem_io = kvmalloc_array(nsems, sizeof(ushort),
1451 GFP_KERNEL);
1452 if (sem_io == NULL) {
1453 ipc_rcu_putref(&sma->sem_perm, sem_rcu_free);
1454 return -ENOMEM;
1455 }
1456
1457 rcu_read_lock();
1458 sem_lock_and_putref(sma);
1459 if (!ipc_valid_object(&sma->sem_perm)) {
1460 err = -EIDRM;
1461 goto out_unlock;
1462 }
1463 }
1464 for (i = 0; i < sma->sem_nsems; i++)
1465 sem_io[i] = sma->sems[i].semval;
1466 sem_unlock(sma, -1);
1467 rcu_read_unlock();
1468 err = 0;
1469 if (copy_to_user(array, sem_io, nsems*sizeof(ushort)))
1470 err = -EFAULT;
1471 goto out_free;
1472 }
1473 case SETALL:
1474 {
1475 int i;
1476 struct sem_undo *un;
1477
1478 if (!ipc_rcu_getref(&sma->sem_perm)) {
1479 err = -EIDRM;
1480 goto out_rcu_wakeup;
1481 }
1482 rcu_read_unlock();
1483
1484 if (nsems > SEMMSL_FAST) {
1485 sem_io = kvmalloc_array(nsems, sizeof(ushort),
1486 GFP_KERNEL);
1487 if (sem_io == NULL) {
1488 ipc_rcu_putref(&sma->sem_perm, sem_rcu_free);
1489 return -ENOMEM;
1490 }
1491 }
1492
1493 if (copy_from_user(sem_io, p, nsems*sizeof(ushort))) {
1494 ipc_rcu_putref(&sma->sem_perm, sem_rcu_free);
1495 err = -EFAULT;
1496 goto out_free;
1497 }
1498
1499 for (i = 0; i < nsems; i++) {
1500 if (sem_io[i] > SEMVMX) {
1501 ipc_rcu_putref(&sma->sem_perm, sem_rcu_free);
1502 err = -ERANGE;
1503 goto out_free;
1504 }
1505 }
1506 rcu_read_lock();
1507 sem_lock_and_putref(sma);
1508 if (!ipc_valid_object(&sma->sem_perm)) {
1509 err = -EIDRM;
1510 goto out_unlock;
1511 }
1512
1513 for (i = 0; i < nsems; i++) {
1514 sma->sems[i].semval = sem_io[i];
1515 ipc_update_pid(&sma->sems[i].sempid, task_tgid(current));
1516 }
1517
1518 ipc_assert_locked_object(&sma->sem_perm);
1519 list_for_each_entry(un, &sma->list_id, list_id) {
1520 for (i = 0; i < nsems; i++)
1521 un->semadj[i] = 0;
1522 }
1523 sma->sem_ctime = ktime_get_real_seconds();
1524
1525 do_smart_update(sma, NULL, 0, 0, &wake_q);
1526 err = 0;
1527 goto out_unlock;
1528 }
1529
1530 }
1531 err = -EINVAL;
1532 if (semnum < 0 || semnum >= nsems)
1533 goto out_rcu_wakeup;
1534
1535 sem_lock(sma, NULL, -1);
1536 if (!ipc_valid_object(&sma->sem_perm)) {
1537 err = -EIDRM;
1538 goto out_unlock;
1539 }
1540
1541 semnum = array_index_nospec(semnum, nsems);
1542 curr = &sma->sems[semnum];
1543
1544 switch (cmd) {
1545 case GETVAL:
1546 err = curr->semval;
1547 goto out_unlock;
1548 case GETPID:
1549 err = pid_vnr(curr->sempid);
1550 goto out_unlock;
1551 case GETNCNT:
1552 err = count_semcnt(sma, semnum, 0);
1553 goto out_unlock;
1554 case GETZCNT:
1555 err = count_semcnt(sma, semnum, 1);
1556 goto out_unlock;
1557 }
1558
1559 out_unlock:
1560 sem_unlock(sma, -1);
1561 out_rcu_wakeup:
1562 rcu_read_unlock();
1563 wake_up_q(&wake_q);
1564 out_free:
1565 if (sem_io != fast_sem_io)
1566 kvfree(sem_io);
1567 return err;
1568 }
1569
1570 static inline unsigned long
1571 copy_semid_from_user(struct semid64_ds *out, void __user *buf, int version)
1572 {
1573 switch (version) {
1574 case IPC_64:
1575 if (copy_from_user(out, buf, sizeof(*out)))
1576 return -EFAULT;
1577 return 0;
1578 case IPC_OLD:
1579 {
1580 struct semid_ds tbuf_old;
1581
1582 if (copy_from_user(&tbuf_old, buf, sizeof(tbuf_old)))
1583 return -EFAULT;
1584
1585 out->sem_perm.uid = tbuf_old.sem_perm.uid;
1586 out->sem_perm.gid = tbuf_old.sem_perm.gid;
1587 out->sem_perm.mode = tbuf_old.sem_perm.mode;
1588
1589 return 0;
1590 }
1591 default:
1592 return -EINVAL;
1593 }
1594 }
1595
1596
1597
1598
1599
1600
1601 static int semctl_down(struct ipc_namespace *ns, int semid,
1602 int cmd, struct semid64_ds *semid64)
1603 {
1604 struct sem_array *sma;
1605 int err;
1606 struct kern_ipc_perm *ipcp;
1607
1608 down_write(&sem_ids(ns).rwsem);
1609 rcu_read_lock();
1610
1611 ipcp = ipcctl_obtain_check(ns, &sem_ids(ns), semid, cmd,
1612 &semid64->sem_perm, 0);
1613 if (IS_ERR(ipcp)) {
1614 err = PTR_ERR(ipcp);
1615 goto out_unlock1;
1616 }
1617
1618 sma = container_of(ipcp, struct sem_array, sem_perm);
1619
1620 err = security_sem_semctl(&sma->sem_perm, cmd);
1621 if (err)
1622 goto out_unlock1;
1623
1624 switch (cmd) {
1625 case IPC_RMID:
1626 sem_lock(sma, NULL, -1);
1627
1628 freeary(ns, ipcp);
1629 goto out_up;
1630 case IPC_SET:
1631 sem_lock(sma, NULL, -1);
1632 err = ipc_update_perm(&semid64->sem_perm, ipcp);
1633 if (err)
1634 goto out_unlock0;
1635 sma->sem_ctime = ktime_get_real_seconds();
1636 break;
1637 default:
1638 err = -EINVAL;
1639 goto out_unlock1;
1640 }
1641
1642 out_unlock0:
1643 sem_unlock(sma, -1);
1644 out_unlock1:
1645 rcu_read_unlock();
1646 out_up:
1647 up_write(&sem_ids(ns).rwsem);
1648 return err;
1649 }
1650
1651 static long ksys_semctl(int semid, int semnum, int cmd, unsigned long arg, int version)
1652 {
1653 struct ipc_namespace *ns;
1654 void __user *p = (void __user *)arg;
1655 struct semid64_ds semid64;
1656 int err;
1657
1658 if (semid < 0)
1659 return -EINVAL;
1660
1661 ns = current->nsproxy->ipc_ns;
1662
1663 switch (cmd) {
1664 case IPC_INFO:
1665 case SEM_INFO:
1666 return semctl_info(ns, semid, cmd, p);
1667 case IPC_STAT:
1668 case SEM_STAT:
1669 case SEM_STAT_ANY:
1670 err = semctl_stat(ns, semid, cmd, &semid64);
1671 if (err < 0)
1672 return err;
1673 if (copy_semid_to_user(p, &semid64, version))
1674 err = -EFAULT;
1675 return err;
1676 case GETALL:
1677 case GETVAL:
1678 case GETPID:
1679 case GETNCNT:
1680 case GETZCNT:
1681 case SETALL:
1682 return semctl_main(ns, semid, semnum, cmd, p);
1683 case SETVAL: {
1684 int val;
1685 #if defined(CONFIG_64BIT) && defined(__BIG_ENDIAN)
1686
1687 val = arg >> 32;
1688 #else
1689
1690 val = arg;
1691 #endif
1692 return semctl_setval(ns, semid, semnum, val);
1693 }
1694 case IPC_SET:
1695 if (copy_semid_from_user(&semid64, p, version))
1696 return -EFAULT;
1697 fallthrough;
1698 case IPC_RMID:
1699 return semctl_down(ns, semid, cmd, &semid64);
1700 default:
1701 return -EINVAL;
1702 }
1703 }
1704
1705 SYSCALL_DEFINE4(semctl, int, semid, int, semnum, int, cmd, unsigned long, arg)
1706 {
1707 return ksys_semctl(semid, semnum, cmd, arg, IPC_64);
1708 }
1709
1710 #ifdef CONFIG_ARCH_WANT_IPC_PARSE_VERSION
1711 long ksys_old_semctl(int semid, int semnum, int cmd, unsigned long arg)
1712 {
1713 int version = ipc_parse_version(&cmd);
1714
1715 return ksys_semctl(semid, semnum, cmd, arg, version);
1716 }
1717
1718 SYSCALL_DEFINE4(old_semctl, int, semid, int, semnum, int, cmd, unsigned long, arg)
1719 {
1720 return ksys_old_semctl(semid, semnum, cmd, arg);
1721 }
1722 #endif
1723
1724 #ifdef CONFIG_COMPAT
1725
1726 struct compat_semid_ds {
1727 struct compat_ipc_perm sem_perm;
1728 old_time32_t sem_otime;
1729 old_time32_t sem_ctime;
1730 compat_uptr_t sem_base;
1731 compat_uptr_t sem_pending;
1732 compat_uptr_t sem_pending_last;
1733 compat_uptr_t undo;
1734 unsigned short sem_nsems;
1735 };
1736
1737 static int copy_compat_semid_from_user(struct semid64_ds *out, void __user *buf,
1738 int version)
1739 {
1740 memset(out, 0, sizeof(*out));
1741 if (version == IPC_64) {
1742 struct compat_semid64_ds __user *p = buf;
1743 return get_compat_ipc64_perm(&out->sem_perm, &p->sem_perm);
1744 } else {
1745 struct compat_semid_ds __user *p = buf;
1746 return get_compat_ipc_perm(&out->sem_perm, &p->sem_perm);
1747 }
1748 }
1749
1750 static int copy_compat_semid_to_user(void __user *buf, struct semid64_ds *in,
1751 int version)
1752 {
1753 if (version == IPC_64) {
1754 struct compat_semid64_ds v;
1755 memset(&v, 0, sizeof(v));
1756 to_compat_ipc64_perm(&v.sem_perm, &in->sem_perm);
1757 v.sem_otime = lower_32_bits(in->sem_otime);
1758 v.sem_otime_high = upper_32_bits(in->sem_otime);
1759 v.sem_ctime = lower_32_bits(in->sem_ctime);
1760 v.sem_ctime_high = upper_32_bits(in->sem_ctime);
1761 v.sem_nsems = in->sem_nsems;
1762 return copy_to_user(buf, &v, sizeof(v));
1763 } else {
1764 struct compat_semid_ds v;
1765 memset(&v, 0, sizeof(v));
1766 to_compat_ipc_perm(&v.sem_perm, &in->sem_perm);
1767 v.sem_otime = in->sem_otime;
1768 v.sem_ctime = in->sem_ctime;
1769 v.sem_nsems = in->sem_nsems;
1770 return copy_to_user(buf, &v, sizeof(v));
1771 }
1772 }
1773
1774 static long compat_ksys_semctl(int semid, int semnum, int cmd, int arg, int version)
1775 {
1776 void __user *p = compat_ptr(arg);
1777 struct ipc_namespace *ns;
1778 struct semid64_ds semid64;
1779 int err;
1780
1781 ns = current->nsproxy->ipc_ns;
1782
1783 if (semid < 0)
1784 return -EINVAL;
1785
1786 switch (cmd & (~IPC_64)) {
1787 case IPC_INFO:
1788 case SEM_INFO:
1789 return semctl_info(ns, semid, cmd, p);
1790 case IPC_STAT:
1791 case SEM_STAT:
1792 case SEM_STAT_ANY:
1793 err = semctl_stat(ns, semid, cmd, &semid64);
1794 if (err < 0)
1795 return err;
1796 if (copy_compat_semid_to_user(p, &semid64, version))
1797 err = -EFAULT;
1798 return err;
1799 case GETVAL:
1800 case GETPID:
1801 case GETNCNT:
1802 case GETZCNT:
1803 case GETALL:
1804 case SETALL:
1805 return semctl_main(ns, semid, semnum, cmd, p);
1806 case SETVAL:
1807 return semctl_setval(ns, semid, semnum, arg);
1808 case IPC_SET:
1809 if (copy_compat_semid_from_user(&semid64, p, version))
1810 return -EFAULT;
1811 fallthrough;
1812 case IPC_RMID:
1813 return semctl_down(ns, semid, cmd, &semid64);
1814 default:
1815 return -EINVAL;
1816 }
1817 }
1818
1819 COMPAT_SYSCALL_DEFINE4(semctl, int, semid, int, semnum, int, cmd, int, arg)
1820 {
1821 return compat_ksys_semctl(semid, semnum, cmd, arg, IPC_64);
1822 }
1823
1824 #ifdef CONFIG_ARCH_WANT_COMPAT_IPC_PARSE_VERSION
1825 long compat_ksys_old_semctl(int semid, int semnum, int cmd, int arg)
1826 {
1827 int version = compat_ipc_parse_version(&cmd);
1828
1829 return compat_ksys_semctl(semid, semnum, cmd, arg, version);
1830 }
1831
1832 COMPAT_SYSCALL_DEFINE4(old_semctl, int, semid, int, semnum, int, cmd, int, arg)
1833 {
1834 return compat_ksys_old_semctl(semid, semnum, cmd, arg);
1835 }
1836 #endif
1837 #endif
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850 static inline int get_undo_list(struct sem_undo_list **undo_listp)
1851 {
1852 struct sem_undo_list *undo_list;
1853
1854 undo_list = current->sysvsem.undo_list;
1855 if (!undo_list) {
1856 undo_list = kzalloc(sizeof(*undo_list), GFP_KERNEL_ACCOUNT);
1857 if (undo_list == NULL)
1858 return -ENOMEM;
1859 spin_lock_init(&undo_list->lock);
1860 refcount_set(&undo_list->refcnt, 1);
1861 INIT_LIST_HEAD(&undo_list->list_proc);
1862
1863 current->sysvsem.undo_list = undo_list;
1864 }
1865 *undo_listp = undo_list;
1866 return 0;
1867 }
1868
1869 static struct sem_undo *__lookup_undo(struct sem_undo_list *ulp, int semid)
1870 {
1871 struct sem_undo *un;
1872
1873 list_for_each_entry_rcu(un, &ulp->list_proc, list_proc,
1874 spin_is_locked(&ulp->lock)) {
1875 if (un->semid == semid)
1876 return un;
1877 }
1878 return NULL;
1879 }
1880
1881 static struct sem_undo *lookup_undo(struct sem_undo_list *ulp, int semid)
1882 {
1883 struct sem_undo *un;
1884
1885 assert_spin_locked(&ulp->lock);
1886
1887 un = __lookup_undo(ulp, semid);
1888 if (un) {
1889 list_del_rcu(&un->list_proc);
1890 list_add_rcu(&un->list_proc, &ulp->list_proc);
1891 }
1892 return un;
1893 }
1894
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904
1905
1906 static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
1907 {
1908 struct sem_array *sma;
1909 struct sem_undo_list *ulp;
1910 struct sem_undo *un, *new;
1911 int nsems, error;
1912
1913 error = get_undo_list(&ulp);
1914 if (error)
1915 return ERR_PTR(error);
1916
1917 rcu_read_lock();
1918 spin_lock(&ulp->lock);
1919 un = lookup_undo(ulp, semid);
1920 spin_unlock(&ulp->lock);
1921 if (likely(un != NULL))
1922 goto out;
1923
1924
1925
1926 sma = sem_obtain_object_check(ns, semid);
1927 if (IS_ERR(sma)) {
1928 rcu_read_unlock();
1929 return ERR_CAST(sma);
1930 }
1931
1932 nsems = sma->sem_nsems;
1933 if (!ipc_rcu_getref(&sma->sem_perm)) {
1934 rcu_read_unlock();
1935 un = ERR_PTR(-EIDRM);
1936 goto out;
1937 }
1938 rcu_read_unlock();
1939
1940
1941 new = kvzalloc(sizeof(struct sem_undo) + sizeof(short)*nsems,
1942 GFP_KERNEL_ACCOUNT);
1943 if (!new) {
1944 ipc_rcu_putref(&sma->sem_perm, sem_rcu_free);
1945 return ERR_PTR(-ENOMEM);
1946 }
1947
1948
1949 rcu_read_lock();
1950 sem_lock_and_putref(sma);
1951 if (!ipc_valid_object(&sma->sem_perm)) {
1952 sem_unlock(sma, -1);
1953 rcu_read_unlock();
1954 kvfree(new);
1955 un = ERR_PTR(-EIDRM);
1956 goto out;
1957 }
1958 spin_lock(&ulp->lock);
1959
1960
1961
1962
1963 un = lookup_undo(ulp, semid);
1964 if (un) {
1965 spin_unlock(&ulp->lock);
1966 kvfree(new);
1967 goto success;
1968 }
1969
1970 new->semadj = (short *) &new[1];
1971 new->ulp = ulp;
1972 new->semid = semid;
1973 assert_spin_locked(&ulp->lock);
1974 list_add_rcu(&new->list_proc, &ulp->list_proc);
1975 ipc_assert_locked_object(&sma->sem_perm);
1976 list_add(&new->list_id, &sma->list_id);
1977 un = new;
1978 spin_unlock(&ulp->lock);
1979 success:
1980 sem_unlock(sma, -1);
1981 out:
1982 return un;
1983 }
1984
1985 long __do_semtimedop(int semid, struct sembuf *sops,
1986 unsigned nsops, const struct timespec64 *timeout,
1987 struct ipc_namespace *ns)
1988 {
1989 int error = -EINVAL;
1990 struct sem_array *sma;
1991 struct sembuf *sop;
1992 struct sem_undo *un;
1993 int max, locknum;
1994 bool undos = false, alter = false, dupsop = false;
1995 struct sem_queue queue;
1996 unsigned long dup = 0;
1997 ktime_t expires, *exp = NULL;
1998 bool timed_out = false;
1999
2000 if (nsops < 1 || semid < 0)
2001 return -EINVAL;
2002 if (nsops > ns->sc_semopm)
2003 return -E2BIG;
2004
2005 if (timeout) {
2006 if (!timespec64_valid(timeout))
2007 return -EINVAL;
2008 expires = ktime_add_safe(ktime_get(),
2009 timespec64_to_ktime(*timeout));
2010 exp = &expires;
2011 }
2012
2013
2014 max = 0;
2015 for (sop = sops; sop < sops + nsops; sop++) {
2016 unsigned long mask = 1ULL << ((sop->sem_num) % BITS_PER_LONG);
2017
2018 if (sop->sem_num >= max)
2019 max = sop->sem_num;
2020 if (sop->sem_flg & SEM_UNDO)
2021 undos = true;
2022 if (dup & mask) {
2023
2024
2025
2026
2027
2028
2029 dupsop = true;
2030 }
2031 if (sop->sem_op != 0) {
2032 alter = true;
2033 dup |= mask;
2034 }
2035 }
2036
2037 if (undos) {
2038
2039 un = find_alloc_undo(ns, semid);
2040 if (IS_ERR(un)) {
2041 error = PTR_ERR(un);
2042 goto out;
2043 }
2044 } else {
2045 un = NULL;
2046 rcu_read_lock();
2047 }
2048
2049 sma = sem_obtain_object_check(ns, semid);
2050 if (IS_ERR(sma)) {
2051 rcu_read_unlock();
2052 error = PTR_ERR(sma);
2053 goto out;
2054 }
2055
2056 error = -EFBIG;
2057 if (max >= sma->sem_nsems) {
2058 rcu_read_unlock();
2059 goto out;
2060 }
2061
2062 error = -EACCES;
2063 if (ipcperms(ns, &sma->sem_perm, alter ? S_IWUGO : S_IRUGO)) {
2064 rcu_read_unlock();
2065 goto out;
2066 }
2067
2068 error = security_sem_semop(&sma->sem_perm, sops, nsops, alter);
2069 if (error) {
2070 rcu_read_unlock();
2071 goto out;
2072 }
2073
2074 error = -EIDRM;
2075 locknum = sem_lock(sma, sops, nsops);
2076
2077
2078
2079
2080
2081
2082
2083
2084 if (!ipc_valid_object(&sma->sem_perm))
2085 goto out_unlock;
2086
2087
2088
2089
2090
2091
2092
2093 if (un && un->semid == -1)
2094 goto out_unlock;
2095
2096 queue.sops = sops;
2097 queue.nsops = nsops;
2098 queue.undo = un;
2099 queue.pid = task_tgid(current);
2100 queue.alter = alter;
2101 queue.dupsop = dupsop;
2102
2103 error = perform_atomic_semop(sma, &queue);
2104 if (error == 0) {
2105 DEFINE_WAKE_Q(wake_q);
2106
2107
2108
2109
2110
2111 if (alter)
2112 do_smart_update(sma, sops, nsops, 1, &wake_q);
2113 else
2114 set_semotime(sma, sops);
2115
2116 sem_unlock(sma, locknum);
2117 rcu_read_unlock();
2118 wake_up_q(&wake_q);
2119
2120 goto out;
2121 }
2122 if (error < 0)
2123 goto out_unlock;
2124
2125
2126
2127
2128
2129 if (nsops == 1) {
2130 struct sem *curr;
2131 int idx = array_index_nospec(sops->sem_num, sma->sem_nsems);
2132 curr = &sma->sems[idx];
2133
2134 if (alter) {
2135 if (sma->complex_count) {
2136 list_add_tail(&queue.list,
2137 &sma->pending_alter);
2138 } else {
2139
2140 list_add_tail(&queue.list,
2141 &curr->pending_alter);
2142 }
2143 } else {
2144 list_add_tail(&queue.list, &curr->pending_const);
2145 }
2146 } else {
2147 if (!sma->complex_count)
2148 merge_queues(sma);
2149
2150 if (alter)
2151 list_add_tail(&queue.list, &sma->pending_alter);
2152 else
2153 list_add_tail(&queue.list, &sma->pending_const);
2154
2155 sma->complex_count++;
2156 }
2157
2158 do {
2159
2160 WRITE_ONCE(queue.status, -EINTR);
2161 queue.sleeper = current;
2162
2163
2164 __set_current_state(TASK_INTERRUPTIBLE);
2165 sem_unlock(sma, locknum);
2166 rcu_read_unlock();
2167
2168 timed_out = !schedule_hrtimeout_range(exp,
2169 current->timer_slack_ns, HRTIMER_MODE_ABS);
2170
2171
2172
2173
2174
2175
2176
2177
2178
2179
2180
2181
2182 error = READ_ONCE(queue.status);
2183 if (error != -EINTR) {
2184
2185 smp_acquire__after_ctrl_dep();
2186 goto out;
2187 }
2188
2189 rcu_read_lock();
2190 locknum = sem_lock(sma, sops, nsops);
2191
2192 if (!ipc_valid_object(&sma->sem_perm))
2193 goto out_unlock;
2194
2195
2196
2197
2198 error = READ_ONCE(queue.status);
2199
2200
2201
2202
2203
2204 if (error != -EINTR)
2205 goto out_unlock;
2206
2207
2208
2209
2210 if (timed_out)
2211 error = -EAGAIN;
2212 } while (error == -EINTR && !signal_pending(current));
2213
2214 unlink_queue(sma, &queue);
2215
2216 out_unlock:
2217 sem_unlock(sma, locknum);
2218 rcu_read_unlock();
2219 out:
2220 return error;
2221 }
2222
2223 static long do_semtimedop(int semid, struct sembuf __user *tsops,
2224 unsigned nsops, const struct timespec64 *timeout)
2225 {
2226 struct sembuf fast_sops[SEMOPM_FAST];
2227 struct sembuf *sops = fast_sops;
2228 struct ipc_namespace *ns;
2229 int ret;
2230
2231 ns = current->nsproxy->ipc_ns;
2232 if (nsops > ns->sc_semopm)
2233 return -E2BIG;
2234 if (nsops < 1)
2235 return -EINVAL;
2236
2237 if (nsops > SEMOPM_FAST) {
2238 sops = kvmalloc_array(nsops, sizeof(*sops), GFP_KERNEL);
2239 if (sops == NULL)
2240 return -ENOMEM;
2241 }
2242
2243 if (copy_from_user(sops, tsops, nsops * sizeof(*tsops))) {
2244 ret = -EFAULT;
2245 goto out_free;
2246 }
2247
2248 ret = __do_semtimedop(semid, sops, nsops, timeout, ns);
2249
2250 out_free:
2251 if (sops != fast_sops)
2252 kvfree(sops);
2253
2254 return ret;
2255 }
2256
2257 long ksys_semtimedop(int semid, struct sembuf __user *tsops,
2258 unsigned int nsops, const struct __kernel_timespec __user *timeout)
2259 {
2260 if (timeout) {
2261 struct timespec64 ts;
2262 if (get_timespec64(&ts, timeout))
2263 return -EFAULT;
2264 return do_semtimedop(semid, tsops, nsops, &ts);
2265 }
2266 return do_semtimedop(semid, tsops, nsops, NULL);
2267 }
2268
2269 SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
2270 unsigned int, nsops, const struct __kernel_timespec __user *, timeout)
2271 {
2272 return ksys_semtimedop(semid, tsops, nsops, timeout);
2273 }
2274
2275 #ifdef CONFIG_COMPAT_32BIT_TIME
2276 long compat_ksys_semtimedop(int semid, struct sembuf __user *tsems,
2277 unsigned int nsops,
2278 const struct old_timespec32 __user *timeout)
2279 {
2280 if (timeout) {
2281 struct timespec64 ts;
2282 if (get_old_timespec32(&ts, timeout))
2283 return -EFAULT;
2284 return do_semtimedop(semid, tsems, nsops, &ts);
2285 }
2286 return do_semtimedop(semid, tsems, nsops, NULL);
2287 }
2288
2289 SYSCALL_DEFINE4(semtimedop_time32, int, semid, struct sembuf __user *, tsems,
2290 unsigned int, nsops,
2291 const struct old_timespec32 __user *, timeout)
2292 {
2293 return compat_ksys_semtimedop(semid, tsems, nsops, timeout);
2294 }
2295 #endif
2296
2297 SYSCALL_DEFINE3(semop, int, semid, struct sembuf __user *, tsops,
2298 unsigned, nsops)
2299 {
2300 return do_semtimedop(semid, tsops, nsops, NULL);
2301 }
2302
2303
2304
2305
2306
2307 int copy_semundo(unsigned long clone_flags, struct task_struct *tsk)
2308 {
2309 struct sem_undo_list *undo_list;
2310 int error;
2311
2312 if (clone_flags & CLONE_SYSVSEM) {
2313 error = get_undo_list(&undo_list);
2314 if (error)
2315 return error;
2316 refcount_inc(&undo_list->refcnt);
2317 tsk->sysvsem.undo_list = undo_list;
2318 } else
2319 tsk->sysvsem.undo_list = NULL;
2320
2321 return 0;
2322 }
2323
2324
2325
2326
2327
2328
2329
2330
2331
2332
2333
2334
2335
2336 void exit_sem(struct task_struct *tsk)
2337 {
2338 struct sem_undo_list *ulp;
2339
2340 ulp = tsk->sysvsem.undo_list;
2341 if (!ulp)
2342 return;
2343 tsk->sysvsem.undo_list = NULL;
2344
2345 if (!refcount_dec_and_test(&ulp->refcnt))
2346 return;
2347
2348 for (;;) {
2349 struct sem_array *sma;
2350 struct sem_undo *un;
2351 int semid, i;
2352 DEFINE_WAKE_Q(wake_q);
2353
2354 cond_resched();
2355
2356 rcu_read_lock();
2357 un = list_entry_rcu(ulp->list_proc.next,
2358 struct sem_undo, list_proc);
2359 if (&un->list_proc == &ulp->list_proc) {
2360
2361
2362
2363
2364
2365
2366 spin_lock(&ulp->lock);
2367 spin_unlock(&ulp->lock);
2368 rcu_read_unlock();
2369 break;
2370 }
2371 spin_lock(&ulp->lock);
2372 semid = un->semid;
2373 spin_unlock(&ulp->lock);
2374
2375
2376 if (semid == -1) {
2377 rcu_read_unlock();
2378 continue;
2379 }
2380
2381 sma = sem_obtain_object_check(tsk->nsproxy->ipc_ns, semid);
2382
2383 if (IS_ERR(sma)) {
2384 rcu_read_unlock();
2385 continue;
2386 }
2387
2388 sem_lock(sma, NULL, -1);
2389
2390 if (!ipc_valid_object(&sma->sem_perm)) {
2391 sem_unlock(sma, -1);
2392 rcu_read_unlock();
2393 continue;
2394 }
2395 un = __lookup_undo(ulp, semid);
2396 if (un == NULL) {
2397
2398
2399
2400 sem_unlock(sma, -1);
2401 rcu_read_unlock();
2402 continue;
2403 }
2404
2405
2406 ipc_assert_locked_object(&sma->sem_perm);
2407 list_del(&un->list_id);
2408
2409 spin_lock(&ulp->lock);
2410 list_del_rcu(&un->list_proc);
2411 spin_unlock(&ulp->lock);
2412
2413
2414 for (i = 0; i < sma->sem_nsems; i++) {
2415 struct sem *semaphore = &sma->sems[i];
2416 if (un->semadj[i]) {
2417 semaphore->semval += un->semadj[i];
2418
2419
2420
2421
2422
2423
2424
2425
2426
2427
2428
2429
2430
2431 if (semaphore->semval < 0)
2432 semaphore->semval = 0;
2433 if (semaphore->semval > SEMVMX)
2434 semaphore->semval = SEMVMX;
2435 ipc_update_pid(&semaphore->sempid, task_tgid(current));
2436 }
2437 }
2438
2439 do_smart_update(sma, NULL, 0, 1, &wake_q);
2440 sem_unlock(sma, -1);
2441 rcu_read_unlock();
2442 wake_up_q(&wake_q);
2443
2444 kvfree_rcu(un, rcu);
2445 }
2446 kfree(ulp);
2447 }
2448
2449 #ifdef CONFIG_PROC_FS
2450 static int sysvipc_sem_proc_show(struct seq_file *s, void *it)
2451 {
2452 struct user_namespace *user_ns = seq_user_ns(s);
2453 struct kern_ipc_perm *ipcp = it;
2454 struct sem_array *sma = container_of(ipcp, struct sem_array, sem_perm);
2455 time64_t sem_otime;
2456
2457
2458
2459
2460
2461
2462
2463
2464 complexmode_enter(sma);
2465
2466 sem_otime = get_semotime(sma);
2467
2468 seq_printf(s,
2469 "%10d %10d %4o %10u %5u %5u %5u %5u %10llu %10llu\n",
2470 sma->sem_perm.key,
2471 sma->sem_perm.id,
2472 sma->sem_perm.mode,
2473 sma->sem_nsems,
2474 from_kuid_munged(user_ns, sma->sem_perm.uid),
2475 from_kgid_munged(user_ns, sma->sem_perm.gid),
2476 from_kuid_munged(user_ns, sma->sem_perm.cuid),
2477 from_kgid_munged(user_ns, sma->sem_perm.cgid),
2478 sem_otime,
2479 sma->sem_ctime);
2480
2481 complexmode_tryleave(sma);
2482
2483 return 0;
2484 }
2485 #endif