0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037
0038
0039
0040
0041
0042
0043 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
0044
0045 #include <linux/fdtable.h>
0046 #include <linux/file.h>
0047 #include <linux/freezer.h>
0048 #include <linux/fs.h>
0049 #include <linux/list.h>
0050 #include <linux/miscdevice.h>
0051 #include <linux/module.h>
0052 #include <linux/mutex.h>
0053 #include <linux/nsproxy.h>
0054 #include <linux/poll.h>
0055 #include <linux/debugfs.h>
0056 #include <linux/rbtree.h>
0057 #include <linux/sched/signal.h>
0058 #include <linux/sched/mm.h>
0059 #include <linux/seq_file.h>
0060 #include <linux/string.h>
0061 #include <linux/uaccess.h>
0062 #include <linux/pid_namespace.h>
0063 #include <linux/security.h>
0064 #include <linux/spinlock.h>
0065 #include <linux/ratelimit.h>
0066 #include <linux/syscalls.h>
0067 #include <linux/task_work.h>
0068 #include <linux/sizes.h>
0069
0070 #include <uapi/linux/android/binder.h>
0071
0072 #include <linux/cacheflush.h>
0073
0074 #include "binder_internal.h"
0075 #include "binder_trace.h"
0076
0077 static HLIST_HEAD(binder_deferred_list);
0078 static DEFINE_MUTEX(binder_deferred_lock);
0079
0080 static HLIST_HEAD(binder_devices);
0081 static HLIST_HEAD(binder_procs);
0082 static DEFINE_MUTEX(binder_procs_lock);
0083
0084 static HLIST_HEAD(binder_dead_nodes);
0085 static DEFINE_SPINLOCK(binder_dead_nodes_lock);
0086
0087 static struct dentry *binder_debugfs_dir_entry_root;
0088 static struct dentry *binder_debugfs_dir_entry_proc;
0089 static atomic_t binder_last_id;
0090
0091 static int proc_show(struct seq_file *m, void *unused);
0092 DEFINE_SHOW_ATTRIBUTE(proc);
0093
0094 #define FORBIDDEN_MMAP_FLAGS (VM_WRITE)
0095
0096 enum {
0097 BINDER_DEBUG_USER_ERROR = 1U << 0,
0098 BINDER_DEBUG_FAILED_TRANSACTION = 1U << 1,
0099 BINDER_DEBUG_DEAD_TRANSACTION = 1U << 2,
0100 BINDER_DEBUG_OPEN_CLOSE = 1U << 3,
0101 BINDER_DEBUG_DEAD_BINDER = 1U << 4,
0102 BINDER_DEBUG_DEATH_NOTIFICATION = 1U << 5,
0103 BINDER_DEBUG_READ_WRITE = 1U << 6,
0104 BINDER_DEBUG_USER_REFS = 1U << 7,
0105 BINDER_DEBUG_THREADS = 1U << 8,
0106 BINDER_DEBUG_TRANSACTION = 1U << 9,
0107 BINDER_DEBUG_TRANSACTION_COMPLETE = 1U << 10,
0108 BINDER_DEBUG_FREE_BUFFER = 1U << 11,
0109 BINDER_DEBUG_INTERNAL_REFS = 1U << 12,
0110 BINDER_DEBUG_PRIORITY_CAP = 1U << 13,
0111 BINDER_DEBUG_SPINLOCKS = 1U << 14,
0112 };
0113 static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR |
0114 BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION;
0115 module_param_named(debug_mask, binder_debug_mask, uint, 0644);
0116
0117 char *binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES;
0118 module_param_named(devices, binder_devices_param, charp, 0444);
0119
0120 static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait);
0121 static int binder_stop_on_user_error;
0122
0123 static int binder_set_stop_on_user_error(const char *val,
0124 const struct kernel_param *kp)
0125 {
0126 int ret;
0127
0128 ret = param_set_int(val, kp);
0129 if (binder_stop_on_user_error < 2)
0130 wake_up(&binder_user_error_wait);
0131 return ret;
0132 }
0133 module_param_call(stop_on_user_error, binder_set_stop_on_user_error,
0134 param_get_int, &binder_stop_on_user_error, 0644);
0135
0136 static __printf(2, 3) void binder_debug(int mask, const char *format, ...)
0137 {
0138 struct va_format vaf;
0139 va_list args;
0140
0141 if (binder_debug_mask & mask) {
0142 va_start(args, format);
0143 vaf.va = &args;
0144 vaf.fmt = format;
0145 pr_info_ratelimited("%pV", &vaf);
0146 va_end(args);
0147 }
0148 }
0149
0150 #define binder_txn_error(x...) \
0151 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, x)
0152
0153 static __printf(1, 2) void binder_user_error(const char *format, ...)
0154 {
0155 struct va_format vaf;
0156 va_list args;
0157
0158 if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) {
0159 va_start(args, format);
0160 vaf.va = &args;
0161 vaf.fmt = format;
0162 pr_info_ratelimited("%pV", &vaf);
0163 va_end(args);
0164 }
0165
0166 if (binder_stop_on_user_error)
0167 binder_stop_on_user_error = 2;
0168 }
0169
0170 #define binder_set_extended_error(ee, _id, _command, _param) \
0171 do { \
0172 (ee)->id = _id; \
0173 (ee)->command = _command; \
0174 (ee)->param = _param; \
0175 } while (0)
0176
0177 #define to_flat_binder_object(hdr) \
0178 container_of(hdr, struct flat_binder_object, hdr)
0179
0180 #define to_binder_fd_object(hdr) container_of(hdr, struct binder_fd_object, hdr)
0181
0182 #define to_binder_buffer_object(hdr) \
0183 container_of(hdr, struct binder_buffer_object, hdr)
0184
0185 #define to_binder_fd_array_object(hdr) \
0186 container_of(hdr, struct binder_fd_array_object, hdr)
0187
0188 static struct binder_stats binder_stats;
0189
0190 static inline void binder_stats_deleted(enum binder_stat_types type)
0191 {
0192 atomic_inc(&binder_stats.obj_deleted[type]);
0193 }
0194
0195 static inline void binder_stats_created(enum binder_stat_types type)
0196 {
0197 atomic_inc(&binder_stats.obj_created[type]);
0198 }
0199
0200 struct binder_transaction_log_entry {
0201 int debug_id;
0202 int debug_id_done;
0203 int call_type;
0204 int from_proc;
0205 int from_thread;
0206 int target_handle;
0207 int to_proc;
0208 int to_thread;
0209 int to_node;
0210 int data_size;
0211 int offsets_size;
0212 int return_error_line;
0213 uint32_t return_error;
0214 uint32_t return_error_param;
0215 char context_name[BINDERFS_MAX_NAME + 1];
0216 };
0217
0218 struct binder_transaction_log {
0219 atomic_t cur;
0220 bool full;
0221 struct binder_transaction_log_entry entry[32];
0222 };
0223
0224 static struct binder_transaction_log binder_transaction_log;
0225 static struct binder_transaction_log binder_transaction_log_failed;
0226
0227 static struct binder_transaction_log_entry *binder_transaction_log_add(
0228 struct binder_transaction_log *log)
0229 {
0230 struct binder_transaction_log_entry *e;
0231 unsigned int cur = atomic_inc_return(&log->cur);
0232
0233 if (cur >= ARRAY_SIZE(log->entry))
0234 log->full = true;
0235 e = &log->entry[cur % ARRAY_SIZE(log->entry)];
0236 WRITE_ONCE(e->debug_id_done, 0);
0237
0238
0239
0240
0241
0242 smp_wmb();
0243 memset(e, 0, sizeof(*e));
0244 return e;
0245 }
0246
0247 enum binder_deferred_state {
0248 BINDER_DEFERRED_FLUSH = 0x01,
0249 BINDER_DEFERRED_RELEASE = 0x02,
0250 };
0251
0252 enum {
0253 BINDER_LOOPER_STATE_REGISTERED = 0x01,
0254 BINDER_LOOPER_STATE_ENTERED = 0x02,
0255 BINDER_LOOPER_STATE_EXITED = 0x04,
0256 BINDER_LOOPER_STATE_INVALID = 0x08,
0257 BINDER_LOOPER_STATE_WAITING = 0x10,
0258 BINDER_LOOPER_STATE_POLL = 0x20,
0259 };
0260
0261
0262
0263
0264
0265
0266
0267
0268 #define binder_proc_lock(proc) _binder_proc_lock(proc, __LINE__)
0269 static void
0270 _binder_proc_lock(struct binder_proc *proc, int line)
0271 __acquires(&proc->outer_lock)
0272 {
0273 binder_debug(BINDER_DEBUG_SPINLOCKS,
0274 "%s: line=%d\n", __func__, line);
0275 spin_lock(&proc->outer_lock);
0276 }
0277
0278
0279
0280
0281
0282
0283
0284 #define binder_proc_unlock(_proc) _binder_proc_unlock(_proc, __LINE__)
0285 static void
0286 _binder_proc_unlock(struct binder_proc *proc, int line)
0287 __releases(&proc->outer_lock)
0288 {
0289 binder_debug(BINDER_DEBUG_SPINLOCKS,
0290 "%s: line=%d\n", __func__, line);
0291 spin_unlock(&proc->outer_lock);
0292 }
0293
0294
0295
0296
0297
0298
0299
0300 #define binder_inner_proc_lock(proc) _binder_inner_proc_lock(proc, __LINE__)
0301 static void
0302 _binder_inner_proc_lock(struct binder_proc *proc, int line)
0303 __acquires(&proc->inner_lock)
0304 {
0305 binder_debug(BINDER_DEBUG_SPINLOCKS,
0306 "%s: line=%d\n", __func__, line);
0307 spin_lock(&proc->inner_lock);
0308 }
0309
0310
0311
0312
0313
0314
0315
0316 #define binder_inner_proc_unlock(proc) _binder_inner_proc_unlock(proc, __LINE__)
0317 static void
0318 _binder_inner_proc_unlock(struct binder_proc *proc, int line)
0319 __releases(&proc->inner_lock)
0320 {
0321 binder_debug(BINDER_DEBUG_SPINLOCKS,
0322 "%s: line=%d\n", __func__, line);
0323 spin_unlock(&proc->inner_lock);
0324 }
0325
0326
0327
0328
0329
0330
0331
0332 #define binder_node_lock(node) _binder_node_lock(node, __LINE__)
0333 static void
0334 _binder_node_lock(struct binder_node *node, int line)
0335 __acquires(&node->lock)
0336 {
0337 binder_debug(BINDER_DEBUG_SPINLOCKS,
0338 "%s: line=%d\n", __func__, line);
0339 spin_lock(&node->lock);
0340 }
0341
0342
0343
0344
0345
0346
0347
0348 #define binder_node_unlock(node) _binder_node_unlock(node, __LINE__)
0349 static void
0350 _binder_node_unlock(struct binder_node *node, int line)
0351 __releases(&node->lock)
0352 {
0353 binder_debug(BINDER_DEBUG_SPINLOCKS,
0354 "%s: line=%d\n", __func__, line);
0355 spin_unlock(&node->lock);
0356 }
0357
0358
0359
0360
0361
0362
0363
0364
0365 #define binder_node_inner_lock(node) _binder_node_inner_lock(node, __LINE__)
0366 static void
0367 _binder_node_inner_lock(struct binder_node *node, int line)
0368 __acquires(&node->lock) __acquires(&node->proc->inner_lock)
0369 {
0370 binder_debug(BINDER_DEBUG_SPINLOCKS,
0371 "%s: line=%d\n", __func__, line);
0372 spin_lock(&node->lock);
0373 if (node->proc)
0374 binder_inner_proc_lock(node->proc);
0375 else
0376
0377 __acquire(&node->proc->inner_lock);
0378 }
0379
0380
0381
0382
0383
0384
0385
0386 #define binder_node_inner_unlock(node) _binder_node_inner_unlock(node, __LINE__)
0387 static void
0388 _binder_node_inner_unlock(struct binder_node *node, int line)
0389 __releases(&node->lock) __releases(&node->proc->inner_lock)
0390 {
0391 struct binder_proc *proc = node->proc;
0392
0393 binder_debug(BINDER_DEBUG_SPINLOCKS,
0394 "%s: line=%d\n", __func__, line);
0395 if (proc)
0396 binder_inner_proc_unlock(proc);
0397 else
0398
0399 __release(&node->proc->inner_lock);
0400 spin_unlock(&node->lock);
0401 }
0402
0403 static bool binder_worklist_empty_ilocked(struct list_head *list)
0404 {
0405 return list_empty(list);
0406 }
0407
0408
0409
0410
0411
0412
0413
0414
0415 static bool binder_worklist_empty(struct binder_proc *proc,
0416 struct list_head *list)
0417 {
0418 bool ret;
0419
0420 binder_inner_proc_lock(proc);
0421 ret = binder_worklist_empty_ilocked(list);
0422 binder_inner_proc_unlock(proc);
0423 return ret;
0424 }
0425
0426
0427
0428
0429
0430
0431
0432
0433
0434
0435
0436 static void
0437 binder_enqueue_work_ilocked(struct binder_work *work,
0438 struct list_head *target_list)
0439 {
0440 BUG_ON(target_list == NULL);
0441 BUG_ON(work->entry.next && !list_empty(&work->entry));
0442 list_add_tail(&work->entry, target_list);
0443 }
0444
0445
0446
0447
0448
0449
0450
0451
0452
0453
0454
0455
0456 static void
0457 binder_enqueue_deferred_thread_work_ilocked(struct binder_thread *thread,
0458 struct binder_work *work)
0459 {
0460 WARN_ON(!list_empty(&thread->waiting_thread_node));
0461 binder_enqueue_work_ilocked(work, &thread->todo);
0462 }
0463
0464
0465
0466
0467
0468
0469
0470
0471
0472
0473
0474 static void
0475 binder_enqueue_thread_work_ilocked(struct binder_thread *thread,
0476 struct binder_work *work)
0477 {
0478 WARN_ON(!list_empty(&thread->waiting_thread_node));
0479 binder_enqueue_work_ilocked(work, &thread->todo);
0480 thread->process_todo = true;
0481 }
0482
0483
0484
0485
0486
0487
0488
0489
0490
0491 static void
0492 binder_enqueue_thread_work(struct binder_thread *thread,
0493 struct binder_work *work)
0494 {
0495 binder_inner_proc_lock(thread->proc);
0496 binder_enqueue_thread_work_ilocked(thread, work);
0497 binder_inner_proc_unlock(thread->proc);
0498 }
0499
0500 static void
0501 binder_dequeue_work_ilocked(struct binder_work *work)
0502 {
0503 list_del_init(&work->entry);
0504 }
0505
0506
0507
0508
0509
0510
0511
0512
0513
0514 static void
0515 binder_dequeue_work(struct binder_proc *proc, struct binder_work *work)
0516 {
0517 binder_inner_proc_lock(proc);
0518 binder_dequeue_work_ilocked(work);
0519 binder_inner_proc_unlock(proc);
0520 }
0521
0522 static struct binder_work *binder_dequeue_work_head_ilocked(
0523 struct list_head *list)
0524 {
0525 struct binder_work *w;
0526
0527 w = list_first_entry_or_null(list, struct binder_work, entry);
0528 if (w)
0529 list_del_init(&w->entry);
0530 return w;
0531 }
0532
0533 static void
0534 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer);
0535 static void binder_free_thread(struct binder_thread *thread);
0536 static void binder_free_proc(struct binder_proc *proc);
0537 static void binder_inc_node_tmpref_ilocked(struct binder_node *node);
0538
0539 static bool binder_has_work_ilocked(struct binder_thread *thread,
0540 bool do_proc_work)
0541 {
0542 return thread->process_todo ||
0543 thread->looper_need_return ||
0544 (do_proc_work &&
0545 !binder_worklist_empty_ilocked(&thread->proc->todo));
0546 }
0547
0548 static bool binder_has_work(struct binder_thread *thread, bool do_proc_work)
0549 {
0550 bool has_work;
0551
0552 binder_inner_proc_lock(thread->proc);
0553 has_work = binder_has_work_ilocked(thread, do_proc_work);
0554 binder_inner_proc_unlock(thread->proc);
0555
0556 return has_work;
0557 }
0558
0559 static bool binder_available_for_proc_work_ilocked(struct binder_thread *thread)
0560 {
0561 return !thread->transaction_stack &&
0562 binder_worklist_empty_ilocked(&thread->todo) &&
0563 (thread->looper & (BINDER_LOOPER_STATE_ENTERED |
0564 BINDER_LOOPER_STATE_REGISTERED));
0565 }
0566
0567 static void binder_wakeup_poll_threads_ilocked(struct binder_proc *proc,
0568 bool sync)
0569 {
0570 struct rb_node *n;
0571 struct binder_thread *thread;
0572
0573 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
0574 thread = rb_entry(n, struct binder_thread, rb_node);
0575 if (thread->looper & BINDER_LOOPER_STATE_POLL &&
0576 binder_available_for_proc_work_ilocked(thread)) {
0577 if (sync)
0578 wake_up_interruptible_sync(&thread->wait);
0579 else
0580 wake_up_interruptible(&thread->wait);
0581 }
0582 }
0583 }
0584
0585
0586
0587
0588
0589
0590
0591
0592
0593
0594
0595
0596
0597 static struct binder_thread *
0598 binder_select_thread_ilocked(struct binder_proc *proc)
0599 {
0600 struct binder_thread *thread;
0601
0602 assert_spin_locked(&proc->inner_lock);
0603 thread = list_first_entry_or_null(&proc->waiting_threads,
0604 struct binder_thread,
0605 waiting_thread_node);
0606
0607 if (thread)
0608 list_del_init(&thread->waiting_thread_node);
0609
0610 return thread;
0611 }
0612
0613
0614
0615
0616
0617
0618
0619
0620
0621
0622
0623
0624
0625
0626
0627
0628
0629 static void binder_wakeup_thread_ilocked(struct binder_proc *proc,
0630 struct binder_thread *thread,
0631 bool sync)
0632 {
0633 assert_spin_locked(&proc->inner_lock);
0634
0635 if (thread) {
0636 if (sync)
0637 wake_up_interruptible_sync(&thread->wait);
0638 else
0639 wake_up_interruptible(&thread->wait);
0640 return;
0641 }
0642
0643
0644
0645
0646
0647
0648
0649
0650
0651
0652
0653
0654
0655
0656 binder_wakeup_poll_threads_ilocked(proc, sync);
0657 }
0658
0659 static void binder_wakeup_proc_ilocked(struct binder_proc *proc)
0660 {
0661 struct binder_thread *thread = binder_select_thread_ilocked(proc);
0662
0663 binder_wakeup_thread_ilocked(proc, thread, false);
0664 }
0665
0666 static void binder_set_nice(long nice)
0667 {
0668 long min_nice;
0669
0670 if (can_nice(current, nice)) {
0671 set_user_nice(current, nice);
0672 return;
0673 }
0674 min_nice = rlimit_to_nice(rlimit(RLIMIT_NICE));
0675 binder_debug(BINDER_DEBUG_PRIORITY_CAP,
0676 "%d: nice value %ld not allowed use %ld instead\n",
0677 current->pid, nice, min_nice);
0678 set_user_nice(current, min_nice);
0679 if (min_nice <= MAX_NICE)
0680 return;
0681 binder_user_error("%d RLIMIT_NICE not set\n", current->pid);
0682 }
0683
0684 static struct binder_node *binder_get_node_ilocked(struct binder_proc *proc,
0685 binder_uintptr_t ptr)
0686 {
0687 struct rb_node *n = proc->nodes.rb_node;
0688 struct binder_node *node;
0689
0690 assert_spin_locked(&proc->inner_lock);
0691
0692 while (n) {
0693 node = rb_entry(n, struct binder_node, rb_node);
0694
0695 if (ptr < node->ptr)
0696 n = n->rb_left;
0697 else if (ptr > node->ptr)
0698 n = n->rb_right;
0699 else {
0700
0701
0702
0703
0704
0705 binder_inc_node_tmpref_ilocked(node);
0706 return node;
0707 }
0708 }
0709 return NULL;
0710 }
0711
0712 static struct binder_node *binder_get_node(struct binder_proc *proc,
0713 binder_uintptr_t ptr)
0714 {
0715 struct binder_node *node;
0716
0717 binder_inner_proc_lock(proc);
0718 node = binder_get_node_ilocked(proc, ptr);
0719 binder_inner_proc_unlock(proc);
0720 return node;
0721 }
0722
0723 static struct binder_node *binder_init_node_ilocked(
0724 struct binder_proc *proc,
0725 struct binder_node *new_node,
0726 struct flat_binder_object *fp)
0727 {
0728 struct rb_node **p = &proc->nodes.rb_node;
0729 struct rb_node *parent = NULL;
0730 struct binder_node *node;
0731 binder_uintptr_t ptr = fp ? fp->binder : 0;
0732 binder_uintptr_t cookie = fp ? fp->cookie : 0;
0733 __u32 flags = fp ? fp->flags : 0;
0734
0735 assert_spin_locked(&proc->inner_lock);
0736
0737 while (*p) {
0738
0739 parent = *p;
0740 node = rb_entry(parent, struct binder_node, rb_node);
0741
0742 if (ptr < node->ptr)
0743 p = &(*p)->rb_left;
0744 else if (ptr > node->ptr)
0745 p = &(*p)->rb_right;
0746 else {
0747
0748
0749
0750
0751
0752 binder_inc_node_tmpref_ilocked(node);
0753 return node;
0754 }
0755 }
0756 node = new_node;
0757 binder_stats_created(BINDER_STAT_NODE);
0758 node->tmp_refs++;
0759 rb_link_node(&node->rb_node, parent, p);
0760 rb_insert_color(&node->rb_node, &proc->nodes);
0761 node->debug_id = atomic_inc_return(&binder_last_id);
0762 node->proc = proc;
0763 node->ptr = ptr;
0764 node->cookie = cookie;
0765 node->work.type = BINDER_WORK_NODE;
0766 node->min_priority = flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
0767 node->accept_fds = !!(flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
0768 node->txn_security_ctx = !!(flags & FLAT_BINDER_FLAG_TXN_SECURITY_CTX);
0769 spin_lock_init(&node->lock);
0770 INIT_LIST_HEAD(&node->work.entry);
0771 INIT_LIST_HEAD(&node->async_todo);
0772 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
0773 "%d:%d node %d u%016llx c%016llx created\n",
0774 proc->pid, current->pid, node->debug_id,
0775 (u64)node->ptr, (u64)node->cookie);
0776
0777 return node;
0778 }
0779
0780 static struct binder_node *binder_new_node(struct binder_proc *proc,
0781 struct flat_binder_object *fp)
0782 {
0783 struct binder_node *node;
0784 struct binder_node *new_node = kzalloc(sizeof(*node), GFP_KERNEL);
0785
0786 if (!new_node)
0787 return NULL;
0788 binder_inner_proc_lock(proc);
0789 node = binder_init_node_ilocked(proc, new_node, fp);
0790 binder_inner_proc_unlock(proc);
0791 if (node != new_node)
0792
0793
0794
0795 kfree(new_node);
0796
0797 return node;
0798 }
0799
0800 static void binder_free_node(struct binder_node *node)
0801 {
0802 kfree(node);
0803 binder_stats_deleted(BINDER_STAT_NODE);
0804 }
0805
0806 static int binder_inc_node_nilocked(struct binder_node *node, int strong,
0807 int internal,
0808 struct list_head *target_list)
0809 {
0810 struct binder_proc *proc = node->proc;
0811
0812 assert_spin_locked(&node->lock);
0813 if (proc)
0814 assert_spin_locked(&proc->inner_lock);
0815 if (strong) {
0816 if (internal) {
0817 if (target_list == NULL &&
0818 node->internal_strong_refs == 0 &&
0819 !(node->proc &&
0820 node == node->proc->context->binder_context_mgr_node &&
0821 node->has_strong_ref)) {
0822 pr_err("invalid inc strong node for %d\n",
0823 node->debug_id);
0824 return -EINVAL;
0825 }
0826 node->internal_strong_refs++;
0827 } else
0828 node->local_strong_refs++;
0829 if (!node->has_strong_ref && target_list) {
0830 struct binder_thread *thread = container_of(target_list,
0831 struct binder_thread, todo);
0832 binder_dequeue_work_ilocked(&node->work);
0833 BUG_ON(&thread->todo != target_list);
0834 binder_enqueue_deferred_thread_work_ilocked(thread,
0835 &node->work);
0836 }
0837 } else {
0838 if (!internal)
0839 node->local_weak_refs++;
0840 if (!node->has_weak_ref && list_empty(&node->work.entry)) {
0841 if (target_list == NULL) {
0842 pr_err("invalid inc weak node for %d\n",
0843 node->debug_id);
0844 return -EINVAL;
0845 }
0846
0847
0848
0849 binder_enqueue_work_ilocked(&node->work, target_list);
0850 }
0851 }
0852 return 0;
0853 }
0854
0855 static int binder_inc_node(struct binder_node *node, int strong, int internal,
0856 struct list_head *target_list)
0857 {
0858 int ret;
0859
0860 binder_node_inner_lock(node);
0861 ret = binder_inc_node_nilocked(node, strong, internal, target_list);
0862 binder_node_inner_unlock(node);
0863
0864 return ret;
0865 }
0866
0867 static bool binder_dec_node_nilocked(struct binder_node *node,
0868 int strong, int internal)
0869 {
0870 struct binder_proc *proc = node->proc;
0871
0872 assert_spin_locked(&node->lock);
0873 if (proc)
0874 assert_spin_locked(&proc->inner_lock);
0875 if (strong) {
0876 if (internal)
0877 node->internal_strong_refs--;
0878 else
0879 node->local_strong_refs--;
0880 if (node->local_strong_refs || node->internal_strong_refs)
0881 return false;
0882 } else {
0883 if (!internal)
0884 node->local_weak_refs--;
0885 if (node->local_weak_refs || node->tmp_refs ||
0886 !hlist_empty(&node->refs))
0887 return false;
0888 }
0889
0890 if (proc && (node->has_strong_ref || node->has_weak_ref)) {
0891 if (list_empty(&node->work.entry)) {
0892 binder_enqueue_work_ilocked(&node->work, &proc->todo);
0893 binder_wakeup_proc_ilocked(proc);
0894 }
0895 } else {
0896 if (hlist_empty(&node->refs) && !node->local_strong_refs &&
0897 !node->local_weak_refs && !node->tmp_refs) {
0898 if (proc) {
0899 binder_dequeue_work_ilocked(&node->work);
0900 rb_erase(&node->rb_node, &proc->nodes);
0901 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
0902 "refless node %d deleted\n",
0903 node->debug_id);
0904 } else {
0905 BUG_ON(!list_empty(&node->work.entry));
0906 spin_lock(&binder_dead_nodes_lock);
0907
0908
0909
0910
0911 if (node->tmp_refs) {
0912 spin_unlock(&binder_dead_nodes_lock);
0913 return false;
0914 }
0915 hlist_del(&node->dead_node);
0916 spin_unlock(&binder_dead_nodes_lock);
0917 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
0918 "dead node %d deleted\n",
0919 node->debug_id);
0920 }
0921 return true;
0922 }
0923 }
0924 return false;
0925 }
0926
0927 static void binder_dec_node(struct binder_node *node, int strong, int internal)
0928 {
0929 bool free_node;
0930
0931 binder_node_inner_lock(node);
0932 free_node = binder_dec_node_nilocked(node, strong, internal);
0933 binder_node_inner_unlock(node);
0934 if (free_node)
0935 binder_free_node(node);
0936 }
0937
0938 static void binder_inc_node_tmpref_ilocked(struct binder_node *node)
0939 {
0940
0941
0942
0943
0944
0945 node->tmp_refs++;
0946 }
0947
0948
0949
0950
0951
0952
0953
0954
0955
0956
0957
0958
0959
0960
0961 static void binder_inc_node_tmpref(struct binder_node *node)
0962 {
0963 binder_node_lock(node);
0964 if (node->proc)
0965 binder_inner_proc_lock(node->proc);
0966 else
0967 spin_lock(&binder_dead_nodes_lock);
0968 binder_inc_node_tmpref_ilocked(node);
0969 if (node->proc)
0970 binder_inner_proc_unlock(node->proc);
0971 else
0972 spin_unlock(&binder_dead_nodes_lock);
0973 binder_node_unlock(node);
0974 }
0975
0976
0977
0978
0979
0980
0981
0982 static void binder_dec_node_tmpref(struct binder_node *node)
0983 {
0984 bool free_node;
0985
0986 binder_node_inner_lock(node);
0987 if (!node->proc)
0988 spin_lock(&binder_dead_nodes_lock);
0989 else
0990 __acquire(&binder_dead_nodes_lock);
0991 node->tmp_refs--;
0992 BUG_ON(node->tmp_refs < 0);
0993 if (!node->proc)
0994 spin_unlock(&binder_dead_nodes_lock);
0995 else
0996 __release(&binder_dead_nodes_lock);
0997
0998
0999
1000
1001
1002
1003 free_node = binder_dec_node_nilocked(node, 0, 1);
1004 binder_node_inner_unlock(node);
1005 if (free_node)
1006 binder_free_node(node);
1007 }
1008
1009 static void binder_put_node(struct binder_node *node)
1010 {
1011 binder_dec_node_tmpref(node);
1012 }
1013
1014 static struct binder_ref *binder_get_ref_olocked(struct binder_proc *proc,
1015 u32 desc, bool need_strong_ref)
1016 {
1017 struct rb_node *n = proc->refs_by_desc.rb_node;
1018 struct binder_ref *ref;
1019
1020 while (n) {
1021 ref = rb_entry(n, struct binder_ref, rb_node_desc);
1022
1023 if (desc < ref->data.desc) {
1024 n = n->rb_left;
1025 } else if (desc > ref->data.desc) {
1026 n = n->rb_right;
1027 } else if (need_strong_ref && !ref->data.strong) {
1028 binder_user_error("tried to use weak ref as strong ref\n");
1029 return NULL;
1030 } else {
1031 return ref;
1032 }
1033 }
1034 return NULL;
1035 }
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055 static struct binder_ref *binder_get_ref_for_node_olocked(
1056 struct binder_proc *proc,
1057 struct binder_node *node,
1058 struct binder_ref *new_ref)
1059 {
1060 struct binder_context *context = proc->context;
1061 struct rb_node **p = &proc->refs_by_node.rb_node;
1062 struct rb_node *parent = NULL;
1063 struct binder_ref *ref;
1064 struct rb_node *n;
1065
1066 while (*p) {
1067 parent = *p;
1068 ref = rb_entry(parent, struct binder_ref, rb_node_node);
1069
1070 if (node < ref->node)
1071 p = &(*p)->rb_left;
1072 else if (node > ref->node)
1073 p = &(*p)->rb_right;
1074 else
1075 return ref;
1076 }
1077 if (!new_ref)
1078 return NULL;
1079
1080 binder_stats_created(BINDER_STAT_REF);
1081 new_ref->data.debug_id = atomic_inc_return(&binder_last_id);
1082 new_ref->proc = proc;
1083 new_ref->node = node;
1084 rb_link_node(&new_ref->rb_node_node, parent, p);
1085 rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);
1086
1087 new_ref->data.desc = (node == context->binder_context_mgr_node) ? 0 : 1;
1088 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
1089 ref = rb_entry(n, struct binder_ref, rb_node_desc);
1090 if (ref->data.desc > new_ref->data.desc)
1091 break;
1092 new_ref->data.desc = ref->data.desc + 1;
1093 }
1094
1095 p = &proc->refs_by_desc.rb_node;
1096 while (*p) {
1097 parent = *p;
1098 ref = rb_entry(parent, struct binder_ref, rb_node_desc);
1099
1100 if (new_ref->data.desc < ref->data.desc)
1101 p = &(*p)->rb_left;
1102 else if (new_ref->data.desc > ref->data.desc)
1103 p = &(*p)->rb_right;
1104 else
1105 BUG();
1106 }
1107 rb_link_node(&new_ref->rb_node_desc, parent, p);
1108 rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc);
1109
1110 binder_node_lock(node);
1111 hlist_add_head(&new_ref->node_entry, &node->refs);
1112
1113 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1114 "%d new ref %d desc %d for node %d\n",
1115 proc->pid, new_ref->data.debug_id, new_ref->data.desc,
1116 node->debug_id);
1117 binder_node_unlock(node);
1118 return new_ref;
1119 }
1120
1121 static void binder_cleanup_ref_olocked(struct binder_ref *ref)
1122 {
1123 bool delete_node = false;
1124
1125 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1126 "%d delete ref %d desc %d for node %d\n",
1127 ref->proc->pid, ref->data.debug_id, ref->data.desc,
1128 ref->node->debug_id);
1129
1130 rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc);
1131 rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node);
1132
1133 binder_node_inner_lock(ref->node);
1134 if (ref->data.strong)
1135 binder_dec_node_nilocked(ref->node, 1, 1);
1136
1137 hlist_del(&ref->node_entry);
1138 delete_node = binder_dec_node_nilocked(ref->node, 0, 1);
1139 binder_node_inner_unlock(ref->node);
1140
1141
1142
1143 if (!delete_node) {
1144
1145
1146
1147
1148
1149 ref->node = NULL;
1150 }
1151
1152 if (ref->death) {
1153 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1154 "%d delete ref %d desc %d has death notification\n",
1155 ref->proc->pid, ref->data.debug_id,
1156 ref->data.desc);
1157 binder_dequeue_work(ref->proc, &ref->death->work);
1158 binder_stats_deleted(BINDER_STAT_DEATH);
1159 }
1160 binder_stats_deleted(BINDER_STAT_REF);
1161 }
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173 static int binder_inc_ref_olocked(struct binder_ref *ref, int strong,
1174 struct list_head *target_list)
1175 {
1176 int ret;
1177
1178 if (strong) {
1179 if (ref->data.strong == 0) {
1180 ret = binder_inc_node(ref->node, 1, 1, target_list);
1181 if (ret)
1182 return ret;
1183 }
1184 ref->data.strong++;
1185 } else {
1186 if (ref->data.weak == 0) {
1187 ret = binder_inc_node(ref->node, 0, 1, target_list);
1188 if (ret)
1189 return ret;
1190 }
1191 ref->data.weak++;
1192 }
1193 return 0;
1194 }
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205 static bool binder_dec_ref_olocked(struct binder_ref *ref, int strong)
1206 {
1207 if (strong) {
1208 if (ref->data.strong == 0) {
1209 binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n",
1210 ref->proc->pid, ref->data.debug_id,
1211 ref->data.desc, ref->data.strong,
1212 ref->data.weak);
1213 return false;
1214 }
1215 ref->data.strong--;
1216 if (ref->data.strong == 0)
1217 binder_dec_node(ref->node, strong, 1);
1218 } else {
1219 if (ref->data.weak == 0) {
1220 binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n",
1221 ref->proc->pid, ref->data.debug_id,
1222 ref->data.desc, ref->data.strong,
1223 ref->data.weak);
1224 return false;
1225 }
1226 ref->data.weak--;
1227 }
1228 if (ref->data.strong == 0 && ref->data.weak == 0) {
1229 binder_cleanup_ref_olocked(ref);
1230 return true;
1231 }
1232 return false;
1233 }
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246 static struct binder_node *binder_get_node_from_ref(
1247 struct binder_proc *proc,
1248 u32 desc, bool need_strong_ref,
1249 struct binder_ref_data *rdata)
1250 {
1251 struct binder_node *node;
1252 struct binder_ref *ref;
1253
1254 binder_proc_lock(proc);
1255 ref = binder_get_ref_olocked(proc, desc, need_strong_ref);
1256 if (!ref)
1257 goto err_no_ref;
1258 node = ref->node;
1259
1260
1261
1262
1263 binder_inc_node_tmpref(node);
1264 if (rdata)
1265 *rdata = ref->data;
1266 binder_proc_unlock(proc);
1267
1268 return node;
1269
1270 err_no_ref:
1271 binder_proc_unlock(proc);
1272 return NULL;
1273 }
1274
1275
1276
1277
1278
1279
1280
1281
1282 static void binder_free_ref(struct binder_ref *ref)
1283 {
1284 if (ref->node)
1285 binder_free_node(ref->node);
1286 kfree(ref->death);
1287 kfree(ref);
1288 }
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303 static int binder_update_ref_for_handle(struct binder_proc *proc,
1304 uint32_t desc, bool increment, bool strong,
1305 struct binder_ref_data *rdata)
1306 {
1307 int ret = 0;
1308 struct binder_ref *ref;
1309 bool delete_ref = false;
1310
1311 binder_proc_lock(proc);
1312 ref = binder_get_ref_olocked(proc, desc, strong);
1313 if (!ref) {
1314 ret = -EINVAL;
1315 goto err_no_ref;
1316 }
1317 if (increment)
1318 ret = binder_inc_ref_olocked(ref, strong, NULL);
1319 else
1320 delete_ref = binder_dec_ref_olocked(ref, strong);
1321
1322 if (rdata)
1323 *rdata = ref->data;
1324 binder_proc_unlock(proc);
1325
1326 if (delete_ref)
1327 binder_free_ref(ref);
1328 return ret;
1329
1330 err_no_ref:
1331 binder_proc_unlock(proc);
1332 return ret;
1333 }
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346 static int binder_dec_ref_for_handle(struct binder_proc *proc,
1347 uint32_t desc, bool strong, struct binder_ref_data *rdata)
1348 {
1349 return binder_update_ref_for_handle(proc, desc, false, strong, rdata);
1350 }
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366 static int binder_inc_ref_for_node(struct binder_proc *proc,
1367 struct binder_node *node,
1368 bool strong,
1369 struct list_head *target_list,
1370 struct binder_ref_data *rdata)
1371 {
1372 struct binder_ref *ref;
1373 struct binder_ref *new_ref = NULL;
1374 int ret = 0;
1375
1376 binder_proc_lock(proc);
1377 ref = binder_get_ref_for_node_olocked(proc, node, NULL);
1378 if (!ref) {
1379 binder_proc_unlock(proc);
1380 new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
1381 if (!new_ref)
1382 return -ENOMEM;
1383 binder_proc_lock(proc);
1384 ref = binder_get_ref_for_node_olocked(proc, node, new_ref);
1385 }
1386 ret = binder_inc_ref_olocked(ref, strong, target_list);
1387 *rdata = ref->data;
1388 if (ret && ref == new_ref) {
1389
1390
1391
1392
1393
1394
1395
1396 binder_cleanup_ref_olocked(new_ref);
1397 ref = NULL;
1398 }
1399
1400 binder_proc_unlock(proc);
1401 if (new_ref && ref != new_ref)
1402
1403
1404
1405
1406 kfree(new_ref);
1407 return ret;
1408 }
1409
1410 static void binder_pop_transaction_ilocked(struct binder_thread *target_thread,
1411 struct binder_transaction *t)
1412 {
1413 BUG_ON(!target_thread);
1414 assert_spin_locked(&target_thread->proc->inner_lock);
1415 BUG_ON(target_thread->transaction_stack != t);
1416 BUG_ON(target_thread->transaction_stack->from != target_thread);
1417 target_thread->transaction_stack =
1418 target_thread->transaction_stack->from_parent;
1419 t->from = NULL;
1420 }
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434 static void binder_thread_dec_tmpref(struct binder_thread *thread)
1435 {
1436
1437
1438
1439
1440 binder_inner_proc_lock(thread->proc);
1441 atomic_dec(&thread->tmp_ref);
1442 if (thread->is_dead && !atomic_read(&thread->tmp_ref)) {
1443 binder_inner_proc_unlock(thread->proc);
1444 binder_free_thread(thread);
1445 return;
1446 }
1447 binder_inner_proc_unlock(thread->proc);
1448 }
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462 static void binder_proc_dec_tmpref(struct binder_proc *proc)
1463 {
1464 binder_inner_proc_lock(proc);
1465 proc->tmp_ref--;
1466 if (proc->is_dead && RB_EMPTY_ROOT(&proc->threads) &&
1467 !proc->tmp_ref) {
1468 binder_inner_proc_unlock(proc);
1469 binder_free_proc(proc);
1470 return;
1471 }
1472 binder_inner_proc_unlock(proc);
1473 }
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485 static struct binder_thread *binder_get_txn_from(
1486 struct binder_transaction *t)
1487 {
1488 struct binder_thread *from;
1489
1490 spin_lock(&t->lock);
1491 from = t->from;
1492 if (from)
1493 atomic_inc(&from->tmp_ref);
1494 spin_unlock(&t->lock);
1495 return from;
1496 }
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509 static struct binder_thread *binder_get_txn_from_and_acq_inner(
1510 struct binder_transaction *t)
1511 __acquires(&t->from->proc->inner_lock)
1512 {
1513 struct binder_thread *from;
1514
1515 from = binder_get_txn_from(t);
1516 if (!from) {
1517 __acquire(&from->proc->inner_lock);
1518 return NULL;
1519 }
1520 binder_inner_proc_lock(from->proc);
1521 if (t->from) {
1522 BUG_ON(from != t->from);
1523 return from;
1524 }
1525 binder_inner_proc_unlock(from->proc);
1526 __acquire(&from->proc->inner_lock);
1527 binder_thread_dec_tmpref(from);
1528 return NULL;
1529 }
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541 static void binder_free_txn_fixups(struct binder_transaction *t)
1542 {
1543 struct binder_txn_fd_fixup *fixup, *tmp;
1544
1545 list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) {
1546 fput(fixup->file);
1547 if (fixup->target_fd >= 0)
1548 put_unused_fd(fixup->target_fd);
1549 list_del(&fixup->fixup_entry);
1550 kfree(fixup);
1551 }
1552 }
1553
1554 static void binder_txn_latency_free(struct binder_transaction *t)
1555 {
1556 int from_proc, from_thread, to_proc, to_thread;
1557
1558 spin_lock(&t->lock);
1559 from_proc = t->from ? t->from->proc->pid : 0;
1560 from_thread = t->from ? t->from->pid : 0;
1561 to_proc = t->to_proc ? t->to_proc->pid : 0;
1562 to_thread = t->to_thread ? t->to_thread->pid : 0;
1563 spin_unlock(&t->lock);
1564
1565 trace_binder_txn_latency_free(t, from_proc, from_thread, to_proc, to_thread);
1566 }
1567
1568 static void binder_free_transaction(struct binder_transaction *t)
1569 {
1570 struct binder_proc *target_proc = t->to_proc;
1571
1572 if (target_proc) {
1573 binder_inner_proc_lock(target_proc);
1574 target_proc->outstanding_txns--;
1575 if (target_proc->outstanding_txns < 0)
1576 pr_warn("%s: Unexpected outstanding_txns %d\n",
1577 __func__, target_proc->outstanding_txns);
1578 if (!target_proc->outstanding_txns && target_proc->is_frozen)
1579 wake_up_interruptible_all(&target_proc->freeze_wait);
1580 if (t->buffer)
1581 t->buffer->transaction = NULL;
1582 binder_inner_proc_unlock(target_proc);
1583 }
1584 if (trace_binder_txn_latency_free_enabled())
1585 binder_txn_latency_free(t);
1586
1587
1588
1589
1590 binder_free_txn_fixups(t);
1591 kfree(t);
1592 binder_stats_deleted(BINDER_STAT_TRANSACTION);
1593 }
1594
1595 static void binder_send_failed_reply(struct binder_transaction *t,
1596 uint32_t error_code)
1597 {
1598 struct binder_thread *target_thread;
1599 struct binder_transaction *next;
1600
1601 BUG_ON(t->flags & TF_ONE_WAY);
1602 while (1) {
1603 target_thread = binder_get_txn_from_and_acq_inner(t);
1604 if (target_thread) {
1605 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1606 "send failed reply for transaction %d to %d:%d\n",
1607 t->debug_id,
1608 target_thread->proc->pid,
1609 target_thread->pid);
1610
1611 binder_pop_transaction_ilocked(target_thread, t);
1612 if (target_thread->reply_error.cmd == BR_OK) {
1613 target_thread->reply_error.cmd = error_code;
1614 binder_enqueue_thread_work_ilocked(
1615 target_thread,
1616 &target_thread->reply_error.work);
1617 wake_up_interruptible(&target_thread->wait);
1618 } else {
1619
1620
1621
1622
1623
1624
1625 pr_warn("Unexpected reply error: %u\n",
1626 target_thread->reply_error.cmd);
1627 }
1628 binder_inner_proc_unlock(target_thread->proc);
1629 binder_thread_dec_tmpref(target_thread);
1630 binder_free_transaction(t);
1631 return;
1632 }
1633 __release(&target_thread->proc->inner_lock);
1634 next = t->from_parent;
1635
1636 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1637 "send failed reply for transaction %d, target dead\n",
1638 t->debug_id);
1639
1640 binder_free_transaction(t);
1641 if (next == NULL) {
1642 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1643 "reply failed, no target thread at root\n");
1644 return;
1645 }
1646 t = next;
1647 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1648 "reply failed, no target thread -- retry %d\n",
1649 t->debug_id);
1650 }
1651 }
1652
1653
1654
1655
1656
1657
1658
1659 static void binder_cleanup_transaction(struct binder_transaction *t,
1660 const char *reason,
1661 uint32_t error_code)
1662 {
1663 if (t->buffer->target_node && !(t->flags & TF_ONE_WAY)) {
1664 binder_send_failed_reply(t, error_code);
1665 } else {
1666 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
1667 "undelivered transaction %d, %s\n",
1668 t->debug_id, reason);
1669 binder_free_transaction(t);
1670 }
1671 }
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689 static size_t binder_get_object(struct binder_proc *proc,
1690 const void __user *u,
1691 struct binder_buffer *buffer,
1692 unsigned long offset,
1693 struct binder_object *object)
1694 {
1695 size_t read_size;
1696 struct binder_object_header *hdr;
1697 size_t object_size = 0;
1698
1699 read_size = min_t(size_t, sizeof(*object), buffer->data_size - offset);
1700 if (offset > buffer->data_size || read_size < sizeof(*hdr))
1701 return 0;
1702 if (u) {
1703 if (copy_from_user(object, u + offset, read_size))
1704 return 0;
1705 } else {
1706 if (binder_alloc_copy_from_buffer(&proc->alloc, object, buffer,
1707 offset, read_size))
1708 return 0;
1709 }
1710
1711
1712 hdr = &object->hdr;
1713 switch (hdr->type) {
1714 case BINDER_TYPE_BINDER:
1715 case BINDER_TYPE_WEAK_BINDER:
1716 case BINDER_TYPE_HANDLE:
1717 case BINDER_TYPE_WEAK_HANDLE:
1718 object_size = sizeof(struct flat_binder_object);
1719 break;
1720 case BINDER_TYPE_FD:
1721 object_size = sizeof(struct binder_fd_object);
1722 break;
1723 case BINDER_TYPE_PTR:
1724 object_size = sizeof(struct binder_buffer_object);
1725 break;
1726 case BINDER_TYPE_FDA:
1727 object_size = sizeof(struct binder_fd_array_object);
1728 break;
1729 default:
1730 return 0;
1731 }
1732 if (offset <= buffer->data_size - object_size &&
1733 buffer->data_size >= object_size)
1734 return object_size;
1735 else
1736 return 0;
1737 }
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761 static struct binder_buffer_object *binder_validate_ptr(
1762 struct binder_proc *proc,
1763 struct binder_buffer *b,
1764 struct binder_object *object,
1765 binder_size_t index,
1766 binder_size_t start_offset,
1767 binder_size_t *object_offsetp,
1768 binder_size_t num_valid)
1769 {
1770 size_t object_size;
1771 binder_size_t object_offset;
1772 unsigned long buffer_offset;
1773
1774 if (index >= num_valid)
1775 return NULL;
1776
1777 buffer_offset = start_offset + sizeof(binder_size_t) * index;
1778 if (binder_alloc_copy_from_buffer(&proc->alloc, &object_offset,
1779 b, buffer_offset,
1780 sizeof(object_offset)))
1781 return NULL;
1782 object_size = binder_get_object(proc, NULL, b, object_offset, object);
1783 if (!object_size || object->hdr.type != BINDER_TYPE_PTR)
1784 return NULL;
1785 if (object_offsetp)
1786 *object_offsetp = object_offset;
1787
1788 return &object->bbo;
1789 }
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830 static bool binder_validate_fixup(struct binder_proc *proc,
1831 struct binder_buffer *b,
1832 binder_size_t objects_start_offset,
1833 binder_size_t buffer_obj_offset,
1834 binder_size_t fixup_offset,
1835 binder_size_t last_obj_offset,
1836 binder_size_t last_min_offset)
1837 {
1838 if (!last_obj_offset) {
1839
1840 return false;
1841 }
1842
1843 while (last_obj_offset != buffer_obj_offset) {
1844 unsigned long buffer_offset;
1845 struct binder_object last_object;
1846 struct binder_buffer_object *last_bbo;
1847 size_t object_size = binder_get_object(proc, NULL, b,
1848 last_obj_offset,
1849 &last_object);
1850 if (object_size != sizeof(*last_bbo))
1851 return false;
1852
1853 last_bbo = &last_object.bbo;
1854
1855
1856
1857
1858 if ((last_bbo->flags & BINDER_BUFFER_FLAG_HAS_PARENT) == 0)
1859 return false;
1860 last_min_offset = last_bbo->parent_offset + sizeof(uintptr_t);
1861 buffer_offset = objects_start_offset +
1862 sizeof(binder_size_t) * last_bbo->parent;
1863 if (binder_alloc_copy_from_buffer(&proc->alloc,
1864 &last_obj_offset,
1865 b, buffer_offset,
1866 sizeof(last_obj_offset)))
1867 return false;
1868 }
1869 return (fixup_offset >= last_min_offset);
1870 }
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881 struct binder_task_work_cb {
1882 struct callback_head twork;
1883 struct file *file;
1884 };
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897
1898
1899 static void binder_do_fd_close(struct callback_head *twork)
1900 {
1901 struct binder_task_work_cb *twcb = container_of(twork,
1902 struct binder_task_work_cb, twork);
1903
1904 fput(twcb->file);
1905 kfree(twcb);
1906 }
1907
1908
1909
1910
1911
1912
1913
1914
1915 static void binder_deferred_fd_close(int fd)
1916 {
1917 struct binder_task_work_cb *twcb;
1918
1919 twcb = kzalloc(sizeof(*twcb), GFP_KERNEL);
1920 if (!twcb)
1921 return;
1922 init_task_work(&twcb->twork, binder_do_fd_close);
1923 twcb->file = close_fd_get_file(fd);
1924 if (twcb->file) {
1925
1926 get_file(twcb->file);
1927 filp_close(twcb->file, current->files);
1928 task_work_add(current, &twcb->twork, TWA_RESUME);
1929 } else {
1930 kfree(twcb);
1931 }
1932 }
1933
1934 static void binder_transaction_buffer_release(struct binder_proc *proc,
1935 struct binder_thread *thread,
1936 struct binder_buffer *buffer,
1937 binder_size_t failed_at,
1938 bool is_failure)
1939 {
1940 int debug_id = buffer->debug_id;
1941 binder_size_t off_start_offset, buffer_offset, off_end_offset;
1942
1943 binder_debug(BINDER_DEBUG_TRANSACTION,
1944 "%d buffer release %d, size %zd-%zd, failed at %llx\n",
1945 proc->pid, buffer->debug_id,
1946 buffer->data_size, buffer->offsets_size,
1947 (unsigned long long)failed_at);
1948
1949 if (buffer->target_node)
1950 binder_dec_node(buffer->target_node, 1, 0);
1951
1952 off_start_offset = ALIGN(buffer->data_size, sizeof(void *));
1953 off_end_offset = is_failure && failed_at ? failed_at :
1954 off_start_offset + buffer->offsets_size;
1955 for (buffer_offset = off_start_offset; buffer_offset < off_end_offset;
1956 buffer_offset += sizeof(binder_size_t)) {
1957 struct binder_object_header *hdr;
1958 size_t object_size = 0;
1959 struct binder_object object;
1960 binder_size_t object_offset;
1961
1962 if (!binder_alloc_copy_from_buffer(&proc->alloc, &object_offset,
1963 buffer, buffer_offset,
1964 sizeof(object_offset)))
1965 object_size = binder_get_object(proc, NULL, buffer,
1966 object_offset, &object);
1967 if (object_size == 0) {
1968 pr_err("transaction release %d bad object at offset %lld, size %zd\n",
1969 debug_id, (u64)object_offset, buffer->data_size);
1970 continue;
1971 }
1972 hdr = &object.hdr;
1973 switch (hdr->type) {
1974 case BINDER_TYPE_BINDER:
1975 case BINDER_TYPE_WEAK_BINDER: {
1976 struct flat_binder_object *fp;
1977 struct binder_node *node;
1978
1979 fp = to_flat_binder_object(hdr);
1980 node = binder_get_node(proc, fp->binder);
1981 if (node == NULL) {
1982 pr_err("transaction release %d bad node %016llx\n",
1983 debug_id, (u64)fp->binder);
1984 break;
1985 }
1986 binder_debug(BINDER_DEBUG_TRANSACTION,
1987 " node %d u%016llx\n",
1988 node->debug_id, (u64)node->ptr);
1989 binder_dec_node(node, hdr->type == BINDER_TYPE_BINDER,
1990 0);
1991 binder_put_node(node);
1992 } break;
1993 case BINDER_TYPE_HANDLE:
1994 case BINDER_TYPE_WEAK_HANDLE: {
1995 struct flat_binder_object *fp;
1996 struct binder_ref_data rdata;
1997 int ret;
1998
1999 fp = to_flat_binder_object(hdr);
2000 ret = binder_dec_ref_for_handle(proc, fp->handle,
2001 hdr->type == BINDER_TYPE_HANDLE, &rdata);
2002
2003 if (ret) {
2004 pr_err("transaction release %d bad handle %d, ret = %d\n",
2005 debug_id, fp->handle, ret);
2006 break;
2007 }
2008 binder_debug(BINDER_DEBUG_TRANSACTION,
2009 " ref %d desc %d\n",
2010 rdata.debug_id, rdata.desc);
2011 } break;
2012
2013 case BINDER_TYPE_FD: {
2014
2015
2016
2017
2018
2019
2020
2021
2022
2023 } break;
2024 case BINDER_TYPE_PTR:
2025
2026
2027
2028
2029 break;
2030 case BINDER_TYPE_FDA: {
2031 struct binder_fd_array_object *fda;
2032 struct binder_buffer_object *parent;
2033 struct binder_object ptr_object;
2034 binder_size_t fda_offset;
2035 size_t fd_index;
2036 binder_size_t fd_buf_size;
2037 binder_size_t num_valid;
2038
2039 if (is_failure) {
2040
2041
2042
2043
2044 continue;
2045 }
2046
2047 num_valid = (buffer_offset - off_start_offset) /
2048 sizeof(binder_size_t);
2049 fda = to_binder_fd_array_object(hdr);
2050 parent = binder_validate_ptr(proc, buffer, &ptr_object,
2051 fda->parent,
2052 off_start_offset,
2053 NULL,
2054 num_valid);
2055 if (!parent) {
2056 pr_err("transaction release %d bad parent offset\n",
2057 debug_id);
2058 continue;
2059 }
2060 fd_buf_size = sizeof(u32) * fda->num_fds;
2061 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2062 pr_err("transaction release %d invalid number of fds (%lld)\n",
2063 debug_id, (u64)fda->num_fds);
2064 continue;
2065 }
2066 if (fd_buf_size > parent->length ||
2067 fda->parent_offset > parent->length - fd_buf_size) {
2068
2069 pr_err("transaction release %d not enough space for %lld fds in buffer\n",
2070 debug_id, (u64)fda->num_fds);
2071 continue;
2072 }
2073
2074
2075
2076
2077
2078
2079
2080 fda_offset =
2081 (parent->buffer - (uintptr_t)buffer->user_data) +
2082 fda->parent_offset;
2083 for (fd_index = 0; fd_index < fda->num_fds;
2084 fd_index++) {
2085 u32 fd;
2086 int err;
2087 binder_size_t offset = fda_offset +
2088 fd_index * sizeof(fd);
2089
2090 err = binder_alloc_copy_from_buffer(
2091 &proc->alloc, &fd, buffer,
2092 offset, sizeof(fd));
2093 WARN_ON(err);
2094 if (!err) {
2095 binder_deferred_fd_close(fd);
2096
2097
2098
2099
2100
2101 if (thread)
2102 thread->looper_need_return = true;
2103 }
2104 }
2105 } break;
2106 default:
2107 pr_err("transaction release %d bad object type %x\n",
2108 debug_id, hdr->type);
2109 break;
2110 }
2111 }
2112 }
2113
2114 static int binder_translate_binder(struct flat_binder_object *fp,
2115 struct binder_transaction *t,
2116 struct binder_thread *thread)
2117 {
2118 struct binder_node *node;
2119 struct binder_proc *proc = thread->proc;
2120 struct binder_proc *target_proc = t->to_proc;
2121 struct binder_ref_data rdata;
2122 int ret = 0;
2123
2124 node = binder_get_node(proc, fp->binder);
2125 if (!node) {
2126 node = binder_new_node(proc, fp);
2127 if (!node)
2128 return -ENOMEM;
2129 }
2130 if (fp->cookie != node->cookie) {
2131 binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
2132 proc->pid, thread->pid, (u64)fp->binder,
2133 node->debug_id, (u64)fp->cookie,
2134 (u64)node->cookie);
2135 ret = -EINVAL;
2136 goto done;
2137 }
2138 if (security_binder_transfer_binder(proc->cred, target_proc->cred)) {
2139 ret = -EPERM;
2140 goto done;
2141 }
2142
2143 ret = binder_inc_ref_for_node(target_proc, node,
2144 fp->hdr.type == BINDER_TYPE_BINDER,
2145 &thread->todo, &rdata);
2146 if (ret)
2147 goto done;
2148
2149 if (fp->hdr.type == BINDER_TYPE_BINDER)
2150 fp->hdr.type = BINDER_TYPE_HANDLE;
2151 else
2152 fp->hdr.type = BINDER_TYPE_WEAK_HANDLE;
2153 fp->binder = 0;
2154 fp->handle = rdata.desc;
2155 fp->cookie = 0;
2156
2157 trace_binder_transaction_node_to_ref(t, node, &rdata);
2158 binder_debug(BINDER_DEBUG_TRANSACTION,
2159 " node %d u%016llx -> ref %d desc %d\n",
2160 node->debug_id, (u64)node->ptr,
2161 rdata.debug_id, rdata.desc);
2162 done:
2163 binder_put_node(node);
2164 return ret;
2165 }
2166
2167 static int binder_translate_handle(struct flat_binder_object *fp,
2168 struct binder_transaction *t,
2169 struct binder_thread *thread)
2170 {
2171 struct binder_proc *proc = thread->proc;
2172 struct binder_proc *target_proc = t->to_proc;
2173 struct binder_node *node;
2174 struct binder_ref_data src_rdata;
2175 int ret = 0;
2176
2177 node = binder_get_node_from_ref(proc, fp->handle,
2178 fp->hdr.type == BINDER_TYPE_HANDLE, &src_rdata);
2179 if (!node) {
2180 binder_user_error("%d:%d got transaction with invalid handle, %d\n",
2181 proc->pid, thread->pid, fp->handle);
2182 return -EINVAL;
2183 }
2184 if (security_binder_transfer_binder(proc->cred, target_proc->cred)) {
2185 ret = -EPERM;
2186 goto done;
2187 }
2188
2189 binder_node_lock(node);
2190 if (node->proc == target_proc) {
2191 if (fp->hdr.type == BINDER_TYPE_HANDLE)
2192 fp->hdr.type = BINDER_TYPE_BINDER;
2193 else
2194 fp->hdr.type = BINDER_TYPE_WEAK_BINDER;
2195 fp->binder = node->ptr;
2196 fp->cookie = node->cookie;
2197 if (node->proc)
2198 binder_inner_proc_lock(node->proc);
2199 else
2200 __acquire(&node->proc->inner_lock);
2201 binder_inc_node_nilocked(node,
2202 fp->hdr.type == BINDER_TYPE_BINDER,
2203 0, NULL);
2204 if (node->proc)
2205 binder_inner_proc_unlock(node->proc);
2206 else
2207 __release(&node->proc->inner_lock);
2208 trace_binder_transaction_ref_to_node(t, node, &src_rdata);
2209 binder_debug(BINDER_DEBUG_TRANSACTION,
2210 " ref %d desc %d -> node %d u%016llx\n",
2211 src_rdata.debug_id, src_rdata.desc, node->debug_id,
2212 (u64)node->ptr);
2213 binder_node_unlock(node);
2214 } else {
2215 struct binder_ref_data dest_rdata;
2216
2217 binder_node_unlock(node);
2218 ret = binder_inc_ref_for_node(target_proc, node,
2219 fp->hdr.type == BINDER_TYPE_HANDLE,
2220 NULL, &dest_rdata);
2221 if (ret)
2222 goto done;
2223
2224 fp->binder = 0;
2225 fp->handle = dest_rdata.desc;
2226 fp->cookie = 0;
2227 trace_binder_transaction_ref_to_ref(t, node, &src_rdata,
2228 &dest_rdata);
2229 binder_debug(BINDER_DEBUG_TRANSACTION,
2230 " ref %d desc %d -> ref %d desc %d (node %d)\n",
2231 src_rdata.debug_id, src_rdata.desc,
2232 dest_rdata.debug_id, dest_rdata.desc,
2233 node->debug_id);
2234 }
2235 done:
2236 binder_put_node(node);
2237 return ret;
2238 }
2239
2240 static int binder_translate_fd(u32 fd, binder_size_t fd_offset,
2241 struct binder_transaction *t,
2242 struct binder_thread *thread,
2243 struct binder_transaction *in_reply_to)
2244 {
2245 struct binder_proc *proc = thread->proc;
2246 struct binder_proc *target_proc = t->to_proc;
2247 struct binder_txn_fd_fixup *fixup;
2248 struct file *file;
2249 int ret = 0;
2250 bool target_allows_fd;
2251
2252 if (in_reply_to)
2253 target_allows_fd = !!(in_reply_to->flags & TF_ACCEPT_FDS);
2254 else
2255 target_allows_fd = t->buffer->target_node->accept_fds;
2256 if (!target_allows_fd) {
2257 binder_user_error("%d:%d got %s with fd, %d, but target does not allow fds\n",
2258 proc->pid, thread->pid,
2259 in_reply_to ? "reply" : "transaction",
2260 fd);
2261 ret = -EPERM;
2262 goto err_fd_not_accepted;
2263 }
2264
2265 file = fget(fd);
2266 if (!file) {
2267 binder_user_error("%d:%d got transaction with invalid fd, %d\n",
2268 proc->pid, thread->pid, fd);
2269 ret = -EBADF;
2270 goto err_fget;
2271 }
2272 ret = security_binder_transfer_file(proc->cred, target_proc->cred, file);
2273 if (ret < 0) {
2274 ret = -EPERM;
2275 goto err_security;
2276 }
2277
2278
2279
2280
2281
2282
2283 fixup = kzalloc(sizeof(*fixup), GFP_KERNEL);
2284 if (!fixup) {
2285 ret = -ENOMEM;
2286 goto err_alloc;
2287 }
2288 fixup->file = file;
2289 fixup->offset = fd_offset;
2290 fixup->target_fd = -1;
2291 trace_binder_transaction_fd_send(t, fd, fixup->offset);
2292 list_add_tail(&fixup->fixup_entry, &t->fd_fixups);
2293
2294 return ret;
2295
2296 err_alloc:
2297 err_security:
2298 fput(file);
2299 err_fget:
2300 err_fd_not_accepted:
2301 return ret;
2302 }
2303
2304
2305
2306
2307
2308
2309
2310
2311
2312
2313
2314
2315
2316
2317 struct binder_ptr_fixup {
2318 binder_size_t offset;
2319 size_t skip_size;
2320 binder_uintptr_t fixup_data;
2321 struct list_head node;
2322 };
2323
2324
2325
2326
2327
2328
2329
2330
2331
2332
2333
2334
2335
2336
2337 struct binder_sg_copy {
2338 binder_size_t offset;
2339 const void __user *sender_uaddr;
2340 size_t length;
2341 struct list_head node;
2342 };
2343
2344
2345
2346
2347
2348
2349
2350
2351
2352
2353
2354
2355
2356
2357
2358
2359 static int binder_do_deferred_txn_copies(struct binder_alloc *alloc,
2360 struct binder_buffer *buffer,
2361 struct list_head *sgc_head,
2362 struct list_head *pf_head)
2363 {
2364 int ret = 0;
2365 struct binder_sg_copy *sgc, *tmpsgc;
2366 struct binder_ptr_fixup *tmppf;
2367 struct binder_ptr_fixup *pf =
2368 list_first_entry_or_null(pf_head, struct binder_ptr_fixup,
2369 node);
2370
2371 list_for_each_entry_safe(sgc, tmpsgc, sgc_head, node) {
2372 size_t bytes_copied = 0;
2373
2374 while (bytes_copied < sgc->length) {
2375 size_t copy_size;
2376 size_t bytes_left = sgc->length - bytes_copied;
2377 size_t offset = sgc->offset + bytes_copied;
2378
2379
2380
2381
2382 copy_size = pf ? min(bytes_left, (size_t)pf->offset - offset)
2383 : bytes_left;
2384 if (!ret && copy_size)
2385 ret = binder_alloc_copy_user_to_buffer(
2386 alloc, buffer,
2387 offset,
2388 sgc->sender_uaddr + bytes_copied,
2389 copy_size);
2390 bytes_copied += copy_size;
2391 if (copy_size != bytes_left) {
2392 BUG_ON(!pf);
2393
2394 if (pf->skip_size) {
2395
2396
2397
2398
2399
2400
2401 bytes_copied += pf->skip_size;
2402 } else {
2403
2404 if (!ret)
2405 ret = binder_alloc_copy_to_buffer(
2406 alloc, buffer,
2407 pf->offset,
2408 &pf->fixup_data,
2409 sizeof(pf->fixup_data));
2410 bytes_copied += sizeof(pf->fixup_data);
2411 }
2412 list_del(&pf->node);
2413 kfree(pf);
2414 pf = list_first_entry_or_null(pf_head,
2415 struct binder_ptr_fixup, node);
2416 }
2417 }
2418 list_del(&sgc->node);
2419 kfree(sgc);
2420 }
2421 list_for_each_entry_safe(pf, tmppf, pf_head, node) {
2422 BUG_ON(pf->skip_size == 0);
2423 list_del(&pf->node);
2424 kfree(pf);
2425 }
2426 BUG_ON(!list_empty(sgc_head));
2427
2428 return ret > 0 ? -EINVAL : ret;
2429 }
2430
2431
2432
2433
2434
2435
2436
2437
2438
2439 static void binder_cleanup_deferred_txn_lists(struct list_head *sgc_head,
2440 struct list_head *pf_head)
2441 {
2442 struct binder_sg_copy *sgc, *tmpsgc;
2443 struct binder_ptr_fixup *pf, *tmppf;
2444
2445 list_for_each_entry_safe(sgc, tmpsgc, sgc_head, node) {
2446 list_del(&sgc->node);
2447 kfree(sgc);
2448 }
2449 list_for_each_entry_safe(pf, tmppf, pf_head, node) {
2450 list_del(&pf->node);
2451 kfree(pf);
2452 }
2453 }
2454
2455
2456
2457
2458
2459
2460
2461
2462
2463
2464
2465
2466
2467
2468
2469
2470
2471
2472
2473 static int binder_defer_copy(struct list_head *sgc_head, binder_size_t offset,
2474 const void __user *sender_uaddr, size_t length)
2475 {
2476 struct binder_sg_copy *bc = kzalloc(sizeof(*bc), GFP_KERNEL);
2477
2478 if (!bc)
2479 return -ENOMEM;
2480
2481 bc->offset = offset;
2482 bc->sender_uaddr = sender_uaddr;
2483 bc->length = length;
2484 INIT_LIST_HEAD(&bc->node);
2485
2486
2487
2488
2489
2490 list_add_tail(&bc->node, sgc_head);
2491
2492 return 0;
2493 }
2494
2495
2496
2497
2498
2499
2500
2501
2502
2503
2504
2505
2506
2507
2508
2509
2510
2511
2512
2513
2514
2515
2516 static int binder_add_fixup(struct list_head *pf_head, binder_size_t offset,
2517 binder_uintptr_t fixup, size_t skip_size)
2518 {
2519 struct binder_ptr_fixup *pf = kzalloc(sizeof(*pf), GFP_KERNEL);
2520 struct binder_ptr_fixup *tmppf;
2521
2522 if (!pf)
2523 return -ENOMEM;
2524
2525 pf->offset = offset;
2526 pf->fixup_data = fixup;
2527 pf->skip_size = skip_size;
2528 INIT_LIST_HEAD(&pf->node);
2529
2530
2531
2532
2533 list_for_each_entry_reverse(tmppf, pf_head, node) {
2534 if (tmppf->offset < pf->offset) {
2535 list_add(&pf->node, &tmppf->node);
2536 return 0;
2537 }
2538 }
2539
2540
2541
2542
2543 list_add(&pf->node, pf_head);
2544 return 0;
2545 }
2546
2547 static int binder_translate_fd_array(struct list_head *pf_head,
2548 struct binder_fd_array_object *fda,
2549 const void __user *sender_ubuffer,
2550 struct binder_buffer_object *parent,
2551 struct binder_buffer_object *sender_uparent,
2552 struct binder_transaction *t,
2553 struct binder_thread *thread,
2554 struct binder_transaction *in_reply_to)
2555 {
2556 binder_size_t fdi, fd_buf_size;
2557 binder_size_t fda_offset;
2558 const void __user *sender_ufda_base;
2559 struct binder_proc *proc = thread->proc;
2560 int ret;
2561
2562 if (fda->num_fds == 0)
2563 return 0;
2564
2565 fd_buf_size = sizeof(u32) * fda->num_fds;
2566 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2567 binder_user_error("%d:%d got transaction with invalid number of fds (%lld)\n",
2568 proc->pid, thread->pid, (u64)fda->num_fds);
2569 return -EINVAL;
2570 }
2571 if (fd_buf_size > parent->length ||
2572 fda->parent_offset > parent->length - fd_buf_size) {
2573
2574 binder_user_error("%d:%d not enough space to store %lld fds in buffer\n",
2575 proc->pid, thread->pid, (u64)fda->num_fds);
2576 return -EINVAL;
2577 }
2578
2579
2580
2581
2582
2583
2584
2585 fda_offset = (parent->buffer - (uintptr_t)t->buffer->user_data) +
2586 fda->parent_offset;
2587 sender_ufda_base = (void __user *)(uintptr_t)sender_uparent->buffer +
2588 fda->parent_offset;
2589
2590 if (!IS_ALIGNED((unsigned long)fda_offset, sizeof(u32)) ||
2591 !IS_ALIGNED((unsigned long)sender_ufda_base, sizeof(u32))) {
2592 binder_user_error("%d:%d parent offset not aligned correctly.\n",
2593 proc->pid, thread->pid);
2594 return -EINVAL;
2595 }
2596 ret = binder_add_fixup(pf_head, fda_offset, 0, fda->num_fds * sizeof(u32));
2597 if (ret)
2598 return ret;
2599
2600 for (fdi = 0; fdi < fda->num_fds; fdi++) {
2601 u32 fd;
2602 binder_size_t offset = fda_offset + fdi * sizeof(fd);
2603 binder_size_t sender_uoffset = fdi * sizeof(fd);
2604
2605 ret = copy_from_user(&fd, sender_ufda_base + sender_uoffset, sizeof(fd));
2606 if (!ret)
2607 ret = binder_translate_fd(fd, offset, t, thread,
2608 in_reply_to);
2609 if (ret)
2610 return ret > 0 ? -EINVAL : ret;
2611 }
2612 return 0;
2613 }
2614
2615 static int binder_fixup_parent(struct list_head *pf_head,
2616 struct binder_transaction *t,
2617 struct binder_thread *thread,
2618 struct binder_buffer_object *bp,
2619 binder_size_t off_start_offset,
2620 binder_size_t num_valid,
2621 binder_size_t last_fixup_obj_off,
2622 binder_size_t last_fixup_min_off)
2623 {
2624 struct binder_buffer_object *parent;
2625 struct binder_buffer *b = t->buffer;
2626 struct binder_proc *proc = thread->proc;
2627 struct binder_proc *target_proc = t->to_proc;
2628 struct binder_object object;
2629 binder_size_t buffer_offset;
2630 binder_size_t parent_offset;
2631
2632 if (!(bp->flags & BINDER_BUFFER_FLAG_HAS_PARENT))
2633 return 0;
2634
2635 parent = binder_validate_ptr(target_proc, b, &object, bp->parent,
2636 off_start_offset, &parent_offset,
2637 num_valid);
2638 if (!parent) {
2639 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
2640 proc->pid, thread->pid);
2641 return -EINVAL;
2642 }
2643
2644 if (!binder_validate_fixup(target_proc, b, off_start_offset,
2645 parent_offset, bp->parent_offset,
2646 last_fixup_obj_off,
2647 last_fixup_min_off)) {
2648 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
2649 proc->pid, thread->pid);
2650 return -EINVAL;
2651 }
2652
2653 if (parent->length < sizeof(binder_uintptr_t) ||
2654 bp->parent_offset > parent->length - sizeof(binder_uintptr_t)) {
2655
2656 binder_user_error("%d:%d got transaction with invalid parent offset\n",
2657 proc->pid, thread->pid);
2658 return -EINVAL;
2659 }
2660 buffer_offset = bp->parent_offset +
2661 (uintptr_t)parent->buffer - (uintptr_t)b->user_data;
2662 return binder_add_fixup(pf_head, buffer_offset, bp->buffer, 0);
2663 }
2664
2665
2666
2667
2668
2669
2670
2671
2672
2673 static bool binder_can_update_transaction(struct binder_transaction *t1,
2674 struct binder_transaction *t2)
2675 {
2676 if ((t1->flags & t2->flags & (TF_ONE_WAY | TF_UPDATE_TXN)) !=
2677 (TF_ONE_WAY | TF_UPDATE_TXN) || !t1->to_proc || !t2->to_proc)
2678 return false;
2679 if (t1->to_proc->tsk == t2->to_proc->tsk && t1->code == t2->code &&
2680 t1->flags == t2->flags && t1->buffer->pid == t2->buffer->pid &&
2681 t1->buffer->target_node->ptr == t2->buffer->target_node->ptr &&
2682 t1->buffer->target_node->cookie == t2->buffer->target_node->cookie)
2683 return true;
2684 return false;
2685 }
2686
2687
2688
2689
2690
2691
2692
2693
2694
2695
2696
2697 static struct binder_transaction *
2698 binder_find_outdated_transaction_ilocked(struct binder_transaction *t,
2699 struct list_head *target_list)
2700 {
2701 struct binder_work *w;
2702
2703 list_for_each_entry(w, target_list, entry) {
2704 struct binder_transaction *t_queued;
2705
2706 if (w->type != BINDER_WORK_TRANSACTION)
2707 continue;
2708 t_queued = container_of(w, struct binder_transaction, work);
2709 if (binder_can_update_transaction(t_queued, t))
2710 return t_queued;
2711 }
2712 return NULL;
2713 }
2714
2715
2716
2717
2718
2719
2720
2721
2722
2723
2724
2725
2726
2727
2728
2729
2730
2731
2732
2733 static int binder_proc_transaction(struct binder_transaction *t,
2734 struct binder_proc *proc,
2735 struct binder_thread *thread)
2736 {
2737 struct binder_node *node = t->buffer->target_node;
2738 bool oneway = !!(t->flags & TF_ONE_WAY);
2739 bool pending_async = false;
2740 struct binder_transaction *t_outdated = NULL;
2741
2742 BUG_ON(!node);
2743 binder_node_lock(node);
2744 if (oneway) {
2745 BUG_ON(thread);
2746 if (node->has_async_transaction)
2747 pending_async = true;
2748 else
2749 node->has_async_transaction = true;
2750 }
2751
2752 binder_inner_proc_lock(proc);
2753 if (proc->is_frozen) {
2754 proc->sync_recv |= !oneway;
2755 proc->async_recv |= oneway;
2756 }
2757
2758 if ((proc->is_frozen && !oneway) || proc->is_dead ||
2759 (thread && thread->is_dead)) {
2760 binder_inner_proc_unlock(proc);
2761 binder_node_unlock(node);
2762 return proc->is_frozen ? BR_FROZEN_REPLY : BR_DEAD_REPLY;
2763 }
2764
2765 if (!thread && !pending_async)
2766 thread = binder_select_thread_ilocked(proc);
2767
2768 if (thread) {
2769 binder_enqueue_thread_work_ilocked(thread, &t->work);
2770 } else if (!pending_async) {
2771 binder_enqueue_work_ilocked(&t->work, &proc->todo);
2772 } else {
2773 if ((t->flags & TF_UPDATE_TXN) && proc->is_frozen) {
2774 t_outdated = binder_find_outdated_transaction_ilocked(t,
2775 &node->async_todo);
2776 if (t_outdated) {
2777 binder_debug(BINDER_DEBUG_TRANSACTION,
2778 "txn %d supersedes %d\n",
2779 t->debug_id, t_outdated->debug_id);
2780 list_del_init(&t_outdated->work.entry);
2781 proc->outstanding_txns--;
2782 }
2783 }
2784 binder_enqueue_work_ilocked(&t->work, &node->async_todo);
2785 }
2786
2787 if (!pending_async)
2788 binder_wakeup_thread_ilocked(proc, thread, !oneway );
2789
2790 proc->outstanding_txns++;
2791 binder_inner_proc_unlock(proc);
2792 binder_node_unlock(node);
2793
2794
2795
2796
2797
2798 if (t_outdated) {
2799 struct binder_buffer *buffer = t_outdated->buffer;
2800
2801 t_outdated->buffer = NULL;
2802 buffer->transaction = NULL;
2803 trace_binder_transaction_update_buffer_release(buffer);
2804 binder_transaction_buffer_release(proc, NULL, buffer, 0, 0);
2805 binder_alloc_free_buf(&proc->alloc, buffer);
2806 kfree(t_outdated);
2807 binder_stats_deleted(BINDER_STAT_TRANSACTION);
2808 }
2809
2810 return 0;
2811 }
2812
2813
2814
2815
2816
2817
2818
2819
2820
2821
2822
2823
2824
2825
2826
2827
2828
2829
2830
2831
2832
2833
2834 static struct binder_node *binder_get_node_refs_for_txn(
2835 struct binder_node *node,
2836 struct binder_proc **procp,
2837 uint32_t *error)
2838 {
2839 struct binder_node *target_node = NULL;
2840
2841 binder_node_inner_lock(node);
2842 if (node->proc) {
2843 target_node = node;
2844 binder_inc_node_nilocked(node, 1, 0, NULL);
2845 binder_inc_node_tmpref_ilocked(node);
2846 node->proc->tmp_ref++;
2847 *procp = node->proc;
2848 } else
2849 *error = BR_DEAD_REPLY;
2850 binder_node_inner_unlock(node);
2851
2852 return target_node;
2853 }
2854
2855 static void binder_set_txn_from_error(struct binder_transaction *t, int id,
2856 uint32_t command, int32_t param)
2857 {
2858 struct binder_thread *from = binder_get_txn_from_and_acq_inner(t);
2859
2860 if (!from) {
2861
2862 __release(&from->proc->inner_lock);
2863 return;
2864 }
2865
2866
2867 if (from->ee.command == BR_OK)
2868 binder_set_extended_error(&from->ee, id, command, param);
2869 binder_inner_proc_unlock(from->proc);
2870 binder_thread_dec_tmpref(from);
2871 }
2872
2873 static void binder_transaction(struct binder_proc *proc,
2874 struct binder_thread *thread,
2875 struct binder_transaction_data *tr, int reply,
2876 binder_size_t extra_buffers_size)
2877 {
2878 int ret;
2879 struct binder_transaction *t;
2880 struct binder_work *w;
2881 struct binder_work *tcomplete;
2882 binder_size_t buffer_offset = 0;
2883 binder_size_t off_start_offset, off_end_offset;
2884 binder_size_t off_min;
2885 binder_size_t sg_buf_offset, sg_buf_end_offset;
2886 binder_size_t user_offset = 0;
2887 struct binder_proc *target_proc = NULL;
2888 struct binder_thread *target_thread = NULL;
2889 struct binder_node *target_node = NULL;
2890 struct binder_transaction *in_reply_to = NULL;
2891 struct binder_transaction_log_entry *e;
2892 uint32_t return_error = 0;
2893 uint32_t return_error_param = 0;
2894 uint32_t return_error_line = 0;
2895 binder_size_t last_fixup_obj_off = 0;
2896 binder_size_t last_fixup_min_off = 0;
2897 struct binder_context *context = proc->context;
2898 int t_debug_id = atomic_inc_return(&binder_last_id);
2899 char *secctx = NULL;
2900 u32 secctx_sz = 0;
2901 struct list_head sgc_head;
2902 struct list_head pf_head;
2903 const void __user *user_buffer = (const void __user *)
2904 (uintptr_t)tr->data.ptr.buffer;
2905 INIT_LIST_HEAD(&sgc_head);
2906 INIT_LIST_HEAD(&pf_head);
2907
2908 e = binder_transaction_log_add(&binder_transaction_log);
2909 e->debug_id = t_debug_id;
2910 e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
2911 e->from_proc = proc->pid;
2912 e->from_thread = thread->pid;
2913 e->target_handle = tr->target.handle;
2914 e->data_size = tr->data_size;
2915 e->offsets_size = tr->offsets_size;
2916 strscpy(e->context_name, proc->context->name, BINDERFS_MAX_NAME);
2917
2918 binder_inner_proc_lock(proc);
2919 binder_set_extended_error(&thread->ee, t_debug_id, BR_OK, 0);
2920 binder_inner_proc_unlock(proc);
2921
2922 if (reply) {
2923 binder_inner_proc_lock(proc);
2924 in_reply_to = thread->transaction_stack;
2925 if (in_reply_to == NULL) {
2926 binder_inner_proc_unlock(proc);
2927 binder_user_error("%d:%d got reply transaction with no transaction stack\n",
2928 proc->pid, thread->pid);
2929 return_error = BR_FAILED_REPLY;
2930 return_error_param = -EPROTO;
2931 return_error_line = __LINE__;
2932 goto err_empty_call_stack;
2933 }
2934 if (in_reply_to->to_thread != thread) {
2935 spin_lock(&in_reply_to->lock);
2936 binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n",
2937 proc->pid, thread->pid, in_reply_to->debug_id,
2938 in_reply_to->to_proc ?
2939 in_reply_to->to_proc->pid : 0,
2940 in_reply_to->to_thread ?
2941 in_reply_to->to_thread->pid : 0);
2942 spin_unlock(&in_reply_to->lock);
2943 binder_inner_proc_unlock(proc);
2944 return_error = BR_FAILED_REPLY;
2945 return_error_param = -EPROTO;
2946 return_error_line = __LINE__;
2947 in_reply_to = NULL;
2948 goto err_bad_call_stack;
2949 }
2950 thread->transaction_stack = in_reply_to->to_parent;
2951 binder_inner_proc_unlock(proc);
2952 binder_set_nice(in_reply_to->saved_priority);
2953 target_thread = binder_get_txn_from_and_acq_inner(in_reply_to);
2954 if (target_thread == NULL) {
2955
2956 __release(&target_thread->proc->inner_lock);
2957 binder_txn_error("%d:%d reply target not found\n",
2958 thread->pid, proc->pid);
2959 return_error = BR_DEAD_REPLY;
2960 return_error_line = __LINE__;
2961 goto err_dead_binder;
2962 }
2963 if (target_thread->transaction_stack != in_reply_to) {
2964 binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n",
2965 proc->pid, thread->pid,
2966 target_thread->transaction_stack ?
2967 target_thread->transaction_stack->debug_id : 0,
2968 in_reply_to->debug_id);
2969 binder_inner_proc_unlock(target_thread->proc);
2970 return_error = BR_FAILED_REPLY;
2971 return_error_param = -EPROTO;
2972 return_error_line = __LINE__;
2973 in_reply_to = NULL;
2974 target_thread = NULL;
2975 goto err_dead_binder;
2976 }
2977 target_proc = target_thread->proc;
2978 target_proc->tmp_ref++;
2979 binder_inner_proc_unlock(target_thread->proc);
2980 } else {
2981 if (tr->target.handle) {
2982 struct binder_ref *ref;
2983
2984
2985
2986
2987
2988
2989
2990
2991 binder_proc_lock(proc);
2992 ref = binder_get_ref_olocked(proc, tr->target.handle,
2993 true);
2994 if (ref) {
2995 target_node = binder_get_node_refs_for_txn(
2996 ref->node, &target_proc,
2997 &return_error);
2998 } else {
2999 binder_user_error("%d:%d got transaction to invalid handle, %u\n",
3000 proc->pid, thread->pid, tr->target.handle);
3001 return_error = BR_FAILED_REPLY;
3002 }
3003 binder_proc_unlock(proc);
3004 } else {
3005 mutex_lock(&context->context_mgr_node_lock);
3006 target_node = context->binder_context_mgr_node;
3007 if (target_node)
3008 target_node = binder_get_node_refs_for_txn(
3009 target_node, &target_proc,
3010 &return_error);
3011 else
3012 return_error = BR_DEAD_REPLY;
3013 mutex_unlock(&context->context_mgr_node_lock);
3014 if (target_node && target_proc->pid == proc->pid) {
3015 binder_user_error("%d:%d got transaction to context manager from process owning it\n",
3016 proc->pid, thread->pid);
3017 return_error = BR_FAILED_REPLY;
3018 return_error_param = -EINVAL;
3019 return_error_line = __LINE__;
3020 goto err_invalid_target_handle;
3021 }
3022 }
3023 if (!target_node) {
3024 binder_txn_error("%d:%d cannot find target node\n",
3025 thread->pid, proc->pid);
3026
3027
3028
3029 return_error_param = -EINVAL;
3030 return_error_line = __LINE__;
3031 goto err_dead_binder;
3032 }
3033 e->to_node = target_node->debug_id;
3034 if (WARN_ON(proc == target_proc)) {
3035 binder_txn_error("%d:%d self transactions not allowed\n",
3036 thread->pid, proc->pid);
3037 return_error = BR_FAILED_REPLY;
3038 return_error_param = -EINVAL;
3039 return_error_line = __LINE__;
3040 goto err_invalid_target_handle;
3041 }
3042 if (security_binder_transaction(proc->cred,
3043 target_proc->cred) < 0) {
3044 binder_txn_error("%d:%d transaction credentials failed\n",
3045 thread->pid, proc->pid);
3046 return_error = BR_FAILED_REPLY;
3047 return_error_param = -EPERM;
3048 return_error_line = __LINE__;
3049 goto err_invalid_target_handle;
3050 }
3051 binder_inner_proc_lock(proc);
3052
3053 w = list_first_entry_or_null(&thread->todo,
3054 struct binder_work, entry);
3055 if (!(tr->flags & TF_ONE_WAY) && w &&
3056 w->type == BINDER_WORK_TRANSACTION) {
3057
3058
3059
3060
3061
3062
3063
3064
3065
3066 binder_user_error("%d:%d new transaction not allowed when there is a transaction on thread todo\n",
3067 proc->pid, thread->pid);
3068 binder_inner_proc_unlock(proc);
3069 return_error = BR_FAILED_REPLY;
3070 return_error_param = -EPROTO;
3071 return_error_line = __LINE__;
3072 goto err_bad_todo_list;
3073 }
3074
3075 if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
3076 struct binder_transaction *tmp;
3077
3078 tmp = thread->transaction_stack;
3079 if (tmp->to_thread != thread) {
3080 spin_lock(&tmp->lock);
3081 binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n",
3082 proc->pid, thread->pid, tmp->debug_id,
3083 tmp->to_proc ? tmp->to_proc->pid : 0,
3084 tmp->to_thread ?
3085 tmp->to_thread->pid : 0);
3086 spin_unlock(&tmp->lock);
3087 binder_inner_proc_unlock(proc);
3088 return_error = BR_FAILED_REPLY;
3089 return_error_param = -EPROTO;
3090 return_error_line = __LINE__;
3091 goto err_bad_call_stack;
3092 }
3093 while (tmp) {
3094 struct binder_thread *from;
3095
3096 spin_lock(&tmp->lock);
3097 from = tmp->from;
3098 if (from && from->proc == target_proc) {
3099 atomic_inc(&from->tmp_ref);
3100 target_thread = from;
3101 spin_unlock(&tmp->lock);
3102 break;
3103 }
3104 spin_unlock(&tmp->lock);
3105 tmp = tmp->from_parent;
3106 }
3107 }
3108 binder_inner_proc_unlock(proc);
3109 }
3110 if (target_thread)
3111 e->to_thread = target_thread->pid;
3112 e->to_proc = target_proc->pid;
3113
3114
3115 t = kzalloc(sizeof(*t), GFP_KERNEL);
3116 if (t == NULL) {
3117 binder_txn_error("%d:%d cannot allocate transaction\n",
3118 thread->pid, proc->pid);
3119 return_error = BR_FAILED_REPLY;
3120 return_error_param = -ENOMEM;
3121 return_error_line = __LINE__;
3122 goto err_alloc_t_failed;
3123 }
3124 INIT_LIST_HEAD(&t->fd_fixups);
3125 binder_stats_created(BINDER_STAT_TRANSACTION);
3126 spin_lock_init(&t->lock);
3127
3128 tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
3129 if (tcomplete == NULL) {
3130 binder_txn_error("%d:%d cannot allocate work for transaction\n",
3131 thread->pid, proc->pid);
3132 return_error = BR_FAILED_REPLY;
3133 return_error_param = -ENOMEM;
3134 return_error_line = __LINE__;
3135 goto err_alloc_tcomplete_failed;
3136 }
3137 binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
3138
3139 t->debug_id = t_debug_id;
3140
3141 if (reply)
3142 binder_debug(BINDER_DEBUG_TRANSACTION,
3143 "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld-%lld\n",
3144 proc->pid, thread->pid, t->debug_id,
3145 target_proc->pid, target_thread->pid,
3146 (u64)tr->data.ptr.buffer,
3147 (u64)tr->data.ptr.offsets,
3148 (u64)tr->data_size, (u64)tr->offsets_size,
3149 (u64)extra_buffers_size);
3150 else
3151 binder_debug(BINDER_DEBUG_TRANSACTION,
3152 "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld-%lld\n",
3153 proc->pid, thread->pid, t->debug_id,
3154 target_proc->pid, target_node->debug_id,
3155 (u64)tr->data.ptr.buffer,
3156 (u64)tr->data.ptr.offsets,
3157 (u64)tr->data_size, (u64)tr->offsets_size,
3158 (u64)extra_buffers_size);
3159
3160 if (!reply && !(tr->flags & TF_ONE_WAY))
3161 t->from = thread;
3162 else
3163 t->from = NULL;
3164 t->sender_euid = task_euid(proc->tsk);
3165 t->to_proc = target_proc;
3166 t->to_thread = target_thread;
3167 t->code = tr->code;
3168 t->flags = tr->flags;
3169 t->priority = task_nice(current);
3170
3171 if (target_node && target_node->txn_security_ctx) {
3172 u32 secid;
3173 size_t added_size;
3174
3175 security_cred_getsecid(proc->cred, &secid);
3176 ret = security_secid_to_secctx(secid, &secctx, &secctx_sz);
3177 if (ret) {
3178 binder_txn_error("%d:%d failed to get security context\n",
3179 thread->pid, proc->pid);
3180 return_error = BR_FAILED_REPLY;
3181 return_error_param = ret;
3182 return_error_line = __LINE__;
3183 goto err_get_secctx_failed;
3184 }
3185 added_size = ALIGN(secctx_sz, sizeof(u64));
3186 extra_buffers_size += added_size;
3187 if (extra_buffers_size < added_size) {
3188 binder_txn_error("%d:%d integer overflow of extra_buffers_size\n",
3189 thread->pid, proc->pid);
3190 return_error = BR_FAILED_REPLY;
3191 return_error_param = -EINVAL;
3192 return_error_line = __LINE__;
3193 goto err_bad_extra_size;
3194 }
3195 }
3196
3197 trace_binder_transaction(reply, t, target_node);
3198
3199 t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size,
3200 tr->offsets_size, extra_buffers_size,
3201 !reply && (t->flags & TF_ONE_WAY), current->tgid);
3202 if (IS_ERR(t->buffer)) {
3203 char *s;
3204
3205 ret = PTR_ERR(t->buffer);
3206 s = (ret == -ESRCH) ? ": vma cleared, target dead or dying"
3207 : (ret == -ENOSPC) ? ": no space left"
3208 : (ret == -ENOMEM) ? ": memory allocation failed"
3209 : "";
3210 binder_txn_error("cannot allocate buffer%s", s);
3211
3212 return_error_param = PTR_ERR(t->buffer);
3213 return_error = return_error_param == -ESRCH ?
3214 BR_DEAD_REPLY : BR_FAILED_REPLY;
3215 return_error_line = __LINE__;
3216 t->buffer = NULL;
3217 goto err_binder_alloc_buf_failed;
3218 }
3219 if (secctx) {
3220 int err;
3221 size_t buf_offset = ALIGN(tr->data_size, sizeof(void *)) +
3222 ALIGN(tr->offsets_size, sizeof(void *)) +
3223 ALIGN(extra_buffers_size, sizeof(void *)) -
3224 ALIGN(secctx_sz, sizeof(u64));
3225
3226 t->security_ctx = (uintptr_t)t->buffer->user_data + buf_offset;
3227 err = binder_alloc_copy_to_buffer(&target_proc->alloc,
3228 t->buffer, buf_offset,
3229 secctx, secctx_sz);
3230 if (err) {
3231 t->security_ctx = 0;
3232 WARN_ON(1);
3233 }
3234 security_release_secctx(secctx, secctx_sz);
3235 secctx = NULL;
3236 }
3237 t->buffer->debug_id = t->debug_id;
3238 t->buffer->transaction = t;
3239 t->buffer->target_node = target_node;
3240 t->buffer->clear_on_free = !!(t->flags & TF_CLEAR_BUF);
3241 trace_binder_transaction_alloc_buf(t->buffer);
3242
3243 if (binder_alloc_copy_user_to_buffer(
3244 &target_proc->alloc,
3245 t->buffer,
3246 ALIGN(tr->data_size, sizeof(void *)),
3247 (const void __user *)
3248 (uintptr_t)tr->data.ptr.offsets,
3249 tr->offsets_size)) {
3250 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
3251 proc->pid, thread->pid);
3252 return_error = BR_FAILED_REPLY;
3253 return_error_param = -EFAULT;
3254 return_error_line = __LINE__;
3255 goto err_copy_data_failed;
3256 }
3257 if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) {
3258 binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n",
3259 proc->pid, thread->pid, (u64)tr->offsets_size);
3260 return_error = BR_FAILED_REPLY;
3261 return_error_param = -EINVAL;
3262 return_error_line = __LINE__;
3263 goto err_bad_offset;
3264 }
3265 if (!IS_ALIGNED(extra_buffers_size, sizeof(u64))) {
3266 binder_user_error("%d:%d got transaction with unaligned buffers size, %lld\n",
3267 proc->pid, thread->pid,
3268 (u64)extra_buffers_size);
3269 return_error = BR_FAILED_REPLY;
3270 return_error_param = -EINVAL;
3271 return_error_line = __LINE__;
3272 goto err_bad_offset;
3273 }
3274 off_start_offset = ALIGN(tr->data_size, sizeof(void *));
3275 buffer_offset = off_start_offset;
3276 off_end_offset = off_start_offset + tr->offsets_size;
3277 sg_buf_offset = ALIGN(off_end_offset, sizeof(void *));
3278 sg_buf_end_offset = sg_buf_offset + extra_buffers_size -
3279 ALIGN(secctx_sz, sizeof(u64));
3280 off_min = 0;
3281 for (buffer_offset = off_start_offset; buffer_offset < off_end_offset;
3282 buffer_offset += sizeof(binder_size_t)) {
3283 struct binder_object_header *hdr;
3284 size_t object_size;
3285 struct binder_object object;
3286 binder_size_t object_offset;
3287 binder_size_t copy_size;
3288
3289 if (binder_alloc_copy_from_buffer(&target_proc->alloc,
3290 &object_offset,
3291 t->buffer,
3292 buffer_offset,
3293 sizeof(object_offset))) {
3294 binder_txn_error("%d:%d copy offset from buffer failed\n",
3295 thread->pid, proc->pid);
3296 return_error = BR_FAILED_REPLY;
3297 return_error_param = -EINVAL;
3298 return_error_line = __LINE__;
3299 goto err_bad_offset;
3300 }
3301
3302
3303
3304
3305
3306 copy_size = object_offset - user_offset;
3307 if (copy_size && (user_offset > object_offset ||
3308 binder_alloc_copy_user_to_buffer(
3309 &target_proc->alloc,
3310 t->buffer, user_offset,
3311 user_buffer + user_offset,
3312 copy_size))) {
3313 binder_user_error("%d:%d got transaction with invalid data ptr\n",
3314 proc->pid, thread->pid);
3315 return_error = BR_FAILED_REPLY;
3316 return_error_param = -EFAULT;
3317 return_error_line = __LINE__;
3318 goto err_copy_data_failed;
3319 }
3320 object_size = binder_get_object(target_proc, user_buffer,
3321 t->buffer, object_offset, &object);
3322 if (object_size == 0 || object_offset < off_min) {
3323 binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n",
3324 proc->pid, thread->pid,
3325 (u64)object_offset,
3326 (u64)off_min,
3327 (u64)t->buffer->data_size);
3328 return_error = BR_FAILED_REPLY;
3329 return_error_param = -EINVAL;
3330 return_error_line = __LINE__;
3331 goto err_bad_offset;
3332 }
3333
3334
3335
3336
3337 user_offset = object_offset + object_size;
3338
3339 hdr = &object.hdr;
3340 off_min = object_offset + object_size;
3341 switch (hdr->type) {
3342 case BINDER_TYPE_BINDER:
3343 case BINDER_TYPE_WEAK_BINDER: {
3344 struct flat_binder_object *fp;
3345
3346 fp = to_flat_binder_object(hdr);
3347 ret = binder_translate_binder(fp, t, thread);
3348
3349 if (ret < 0 ||
3350 binder_alloc_copy_to_buffer(&target_proc->alloc,
3351 t->buffer,
3352 object_offset,
3353 fp, sizeof(*fp))) {
3354 binder_txn_error("%d:%d translate binder failed\n",
3355 thread->pid, proc->pid);
3356 return_error = BR_FAILED_REPLY;
3357 return_error_param = ret;
3358 return_error_line = __LINE__;
3359 goto err_translate_failed;
3360 }
3361 } break;
3362 case BINDER_TYPE_HANDLE:
3363 case BINDER_TYPE_WEAK_HANDLE: {
3364 struct flat_binder_object *fp;
3365
3366 fp = to_flat_binder_object(hdr);
3367 ret = binder_translate_handle(fp, t, thread);
3368 if (ret < 0 ||
3369 binder_alloc_copy_to_buffer(&target_proc->alloc,
3370 t->buffer,
3371 object_offset,
3372 fp, sizeof(*fp))) {
3373 binder_txn_error("%d:%d translate handle failed\n",
3374 thread->pid, proc->pid);
3375 return_error = BR_FAILED_REPLY;
3376 return_error_param = ret;
3377 return_error_line = __LINE__;
3378 goto err_translate_failed;
3379 }
3380 } break;
3381
3382 case BINDER_TYPE_FD: {
3383 struct binder_fd_object *fp = to_binder_fd_object(hdr);
3384 binder_size_t fd_offset = object_offset +
3385 (uintptr_t)&fp->fd - (uintptr_t)fp;
3386 int ret = binder_translate_fd(fp->fd, fd_offset, t,
3387 thread, in_reply_to);
3388
3389 fp->pad_binder = 0;
3390 if (ret < 0 ||
3391 binder_alloc_copy_to_buffer(&target_proc->alloc,
3392 t->buffer,
3393 object_offset,
3394 fp, sizeof(*fp))) {
3395 binder_txn_error("%d:%d translate fd failed\n",
3396 thread->pid, proc->pid);
3397 return_error = BR_FAILED_REPLY;
3398 return_error_param = ret;
3399 return_error_line = __LINE__;
3400 goto err_translate_failed;
3401 }
3402 } break;
3403 case BINDER_TYPE_FDA: {
3404 struct binder_object ptr_object;
3405 binder_size_t parent_offset;
3406 struct binder_object user_object;
3407 size_t user_parent_size;
3408 struct binder_fd_array_object *fda =
3409 to_binder_fd_array_object(hdr);
3410 size_t num_valid = (buffer_offset - off_start_offset) /
3411 sizeof(binder_size_t);
3412 struct binder_buffer_object *parent =
3413 binder_validate_ptr(target_proc, t->buffer,
3414 &ptr_object, fda->parent,
3415 off_start_offset,
3416 &parent_offset,
3417 num_valid);
3418 if (!parent) {
3419 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
3420 proc->pid, thread->pid);
3421 return_error = BR_FAILED_REPLY;
3422 return_error_param = -EINVAL;
3423 return_error_line = __LINE__;
3424 goto err_bad_parent;
3425 }
3426 if (!binder_validate_fixup(target_proc, t->buffer,
3427 off_start_offset,
3428 parent_offset,
3429 fda->parent_offset,
3430 last_fixup_obj_off,
3431 last_fixup_min_off)) {
3432 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
3433 proc->pid, thread->pid);
3434 return_error = BR_FAILED_REPLY;
3435 return_error_param = -EINVAL;
3436 return_error_line = __LINE__;
3437 goto err_bad_parent;
3438 }
3439
3440
3441
3442
3443 user_parent_size =
3444 binder_get_object(proc, user_buffer, t->buffer,
3445 parent_offset, &user_object);
3446 if (user_parent_size != sizeof(user_object.bbo)) {
3447 binder_user_error("%d:%d invalid ptr object size: %zd vs %zd\n",
3448 proc->pid, thread->pid,
3449 user_parent_size,
3450 sizeof(user_object.bbo));
3451 return_error = BR_FAILED_REPLY;
3452 return_error_param = -EINVAL;
3453 return_error_line = __LINE__;
3454 goto err_bad_parent;
3455 }
3456 ret = binder_translate_fd_array(&pf_head, fda,
3457 user_buffer, parent,
3458 &user_object.bbo, t,
3459 thread, in_reply_to);
3460 if (!ret)
3461 ret = binder_alloc_copy_to_buffer(&target_proc->alloc,
3462 t->buffer,
3463 object_offset,
3464 fda, sizeof(*fda));
3465 if (ret) {
3466 binder_txn_error("%d:%d translate fd array failed\n",
3467 thread->pid, proc->pid);
3468 return_error = BR_FAILED_REPLY;
3469 return_error_param = ret > 0 ? -EINVAL : ret;
3470 return_error_line = __LINE__;
3471 goto err_translate_failed;
3472 }
3473 last_fixup_obj_off = parent_offset;
3474 last_fixup_min_off =
3475 fda->parent_offset + sizeof(u32) * fda->num_fds;
3476 } break;
3477 case BINDER_TYPE_PTR: {
3478 struct binder_buffer_object *bp =
3479 to_binder_buffer_object(hdr);
3480 size_t buf_left = sg_buf_end_offset - sg_buf_offset;
3481 size_t num_valid;
3482
3483 if (bp->length > buf_left) {
3484 binder_user_error("%d:%d got transaction with too large buffer\n",
3485 proc->pid, thread->pid);
3486 return_error = BR_FAILED_REPLY;
3487 return_error_param = -EINVAL;
3488 return_error_line = __LINE__;
3489 goto err_bad_offset;
3490 }
3491 ret = binder_defer_copy(&sgc_head, sg_buf_offset,
3492 (const void __user *)(uintptr_t)bp->buffer,
3493 bp->length);
3494 if (ret) {
3495 binder_txn_error("%d:%d deferred copy failed\n",
3496 thread->pid, proc->pid);
3497 return_error = BR_FAILED_REPLY;
3498 return_error_param = ret;
3499 return_error_line = __LINE__;
3500 goto err_translate_failed;
3501 }
3502
3503 bp->buffer = (uintptr_t)
3504 t->buffer->user_data + sg_buf_offset;
3505 sg_buf_offset += ALIGN(bp->length, sizeof(u64));
3506
3507 num_valid = (buffer_offset - off_start_offset) /
3508 sizeof(binder_size_t);
3509 ret = binder_fixup_parent(&pf_head, t,
3510 thread, bp,
3511 off_start_offset,
3512 num_valid,
3513 last_fixup_obj_off,
3514 last_fixup_min_off);
3515 if (ret < 0 ||
3516 binder_alloc_copy_to_buffer(&target_proc->alloc,
3517 t->buffer,
3518 object_offset,
3519 bp, sizeof(*bp))) {
3520 binder_txn_error("%d:%d failed to fixup parent\n",
3521 thread->pid, proc->pid);
3522 return_error = BR_FAILED_REPLY;
3523 return_error_param = ret;
3524 return_error_line = __LINE__;
3525 goto err_translate_failed;
3526 }
3527 last_fixup_obj_off = object_offset;
3528 last_fixup_min_off = 0;
3529 } break;
3530 default:
3531 binder_user_error("%d:%d got transaction with invalid object type, %x\n",
3532 proc->pid, thread->pid, hdr->type);
3533 return_error = BR_FAILED_REPLY;
3534 return_error_param = -EINVAL;
3535 return_error_line = __LINE__;
3536 goto err_bad_object_type;
3537 }
3538 }
3539
3540 if (binder_alloc_copy_user_to_buffer(
3541 &target_proc->alloc,
3542 t->buffer, user_offset,
3543 user_buffer + user_offset,
3544 tr->data_size - user_offset)) {
3545 binder_user_error("%d:%d got transaction with invalid data ptr\n",
3546 proc->pid, thread->pid);
3547 return_error = BR_FAILED_REPLY;
3548 return_error_param = -EFAULT;
3549 return_error_line = __LINE__;
3550 goto err_copy_data_failed;
3551 }
3552
3553 ret = binder_do_deferred_txn_copies(&target_proc->alloc, t->buffer,
3554 &sgc_head, &pf_head);
3555 if (ret) {
3556 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
3557 proc->pid, thread->pid);
3558 return_error = BR_FAILED_REPLY;
3559 return_error_param = ret;
3560 return_error_line = __LINE__;
3561 goto err_copy_data_failed;
3562 }
3563 if (t->buffer->oneway_spam_suspect)
3564 tcomplete->type = BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT;
3565 else
3566 tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
3567 t->work.type = BINDER_WORK_TRANSACTION;
3568
3569 if (reply) {
3570 binder_enqueue_thread_work(thread, tcomplete);
3571 binder_inner_proc_lock(target_proc);
3572 if (target_thread->is_dead) {
3573 return_error = BR_DEAD_REPLY;
3574 binder_inner_proc_unlock(target_proc);
3575 goto err_dead_proc_or_thread;
3576 }
3577 BUG_ON(t->buffer->async_transaction != 0);
3578 binder_pop_transaction_ilocked(target_thread, in_reply_to);
3579 binder_enqueue_thread_work_ilocked(target_thread, &t->work);
3580 target_proc->outstanding_txns++;
3581 binder_inner_proc_unlock(target_proc);
3582 wake_up_interruptible_sync(&target_thread->wait);
3583 binder_free_transaction(in_reply_to);
3584 } else if (!(t->flags & TF_ONE_WAY)) {
3585 BUG_ON(t->buffer->async_transaction != 0);
3586 binder_inner_proc_lock(proc);
3587
3588
3589
3590
3591
3592
3593
3594 binder_enqueue_deferred_thread_work_ilocked(thread, tcomplete);
3595 t->need_reply = 1;
3596 t->from_parent = thread->transaction_stack;
3597 thread->transaction_stack = t;
3598 binder_inner_proc_unlock(proc);
3599 return_error = binder_proc_transaction(t,
3600 target_proc, target_thread);
3601 if (return_error) {
3602 binder_inner_proc_lock(proc);
3603 binder_pop_transaction_ilocked(thread, t);
3604 binder_inner_proc_unlock(proc);
3605 goto err_dead_proc_or_thread;
3606 }
3607 } else {
3608 BUG_ON(target_node == NULL);
3609 BUG_ON(t->buffer->async_transaction != 1);
3610 binder_enqueue_thread_work(thread, tcomplete);
3611 return_error = binder_proc_transaction(t, target_proc, NULL);
3612 if (return_error)
3613 goto err_dead_proc_or_thread;
3614 }
3615 if (target_thread)
3616 binder_thread_dec_tmpref(target_thread);
3617 binder_proc_dec_tmpref(target_proc);
3618 if (target_node)
3619 binder_dec_node_tmpref(target_node);
3620
3621
3622
3623
3624 smp_wmb();
3625 WRITE_ONCE(e->debug_id_done, t_debug_id);
3626 return;
3627
3628 err_dead_proc_or_thread:
3629 binder_txn_error("%d:%d dead process or thread\n",
3630 thread->pid, proc->pid);
3631 return_error_line = __LINE__;
3632 binder_dequeue_work(proc, tcomplete);
3633 err_translate_failed:
3634 err_bad_object_type:
3635 err_bad_offset:
3636 err_bad_parent:
3637 err_copy_data_failed:
3638 binder_cleanup_deferred_txn_lists(&sgc_head, &pf_head);
3639 binder_free_txn_fixups(t);
3640 trace_binder_transaction_failed_buffer_release(t->buffer);
3641 binder_transaction_buffer_release(target_proc, NULL, t->buffer,
3642 buffer_offset, true);
3643 if (target_node)
3644 binder_dec_node_tmpref(target_node);
3645 target_node = NULL;
3646 t->buffer->transaction = NULL;
3647 binder_alloc_free_buf(&target_proc->alloc, t->buffer);
3648 err_binder_alloc_buf_failed:
3649 err_bad_extra_size:
3650 if (secctx)
3651 security_release_secctx(secctx, secctx_sz);
3652 err_get_secctx_failed:
3653 kfree(tcomplete);
3654 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
3655 err_alloc_tcomplete_failed:
3656 if (trace_binder_txn_latency_free_enabled())
3657 binder_txn_latency_free(t);
3658 kfree(t);
3659 binder_stats_deleted(BINDER_STAT_TRANSACTION);
3660 err_alloc_t_failed:
3661 err_bad_todo_list:
3662 err_bad_call_stack:
3663 err_empty_call_stack:
3664 err_dead_binder:
3665 err_invalid_target_handle:
3666 if (target_node) {
3667 binder_dec_node(target_node, 1, 0);
3668 binder_dec_node_tmpref(target_node);
3669 }
3670
3671 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
3672 "%d:%d transaction %s to %d:%d failed %d/%d/%d, size %lld-%lld line %d\n",
3673 proc->pid, thread->pid, reply ? "reply" :
3674 (tr->flags & TF_ONE_WAY ? "async" : "call"),
3675 target_proc ? target_proc->pid : 0,
3676 target_thread ? target_thread->pid : 0,
3677 t_debug_id, return_error, return_error_param,
3678 (u64)tr->data_size, (u64)tr->offsets_size,
3679 return_error_line);
3680
3681 if (target_thread)
3682 binder_thread_dec_tmpref(target_thread);
3683 if (target_proc)
3684 binder_proc_dec_tmpref(target_proc);
3685
3686 {
3687 struct binder_transaction_log_entry *fe;
3688
3689 e->return_error = return_error;
3690 e->return_error_param = return_error_param;
3691 e->return_error_line = return_error_line;
3692 fe = binder_transaction_log_add(&binder_transaction_log_failed);
3693 *fe = *e;
3694
3695
3696
3697
3698 smp_wmb();
3699 WRITE_ONCE(e->debug_id_done, t_debug_id);
3700 WRITE_ONCE(fe->debug_id_done, t_debug_id);
3701 }
3702
3703 BUG_ON(thread->return_error.cmd != BR_OK);
3704 if (in_reply_to) {
3705 binder_set_txn_from_error(in_reply_to, t_debug_id,
3706 return_error, return_error_param);
3707 thread->return_error.cmd = BR_TRANSACTION_COMPLETE;
3708 binder_enqueue_thread_work(thread, &thread->return_error.work);
3709 binder_send_failed_reply(in_reply_to, return_error);
3710 } else {
3711 binder_inner_proc_lock(proc);
3712 binder_set_extended_error(&thread->ee, t_debug_id,
3713 return_error, return_error_param);
3714 binder_inner_proc_unlock(proc);
3715 thread->return_error.cmd = return_error;
3716 binder_enqueue_thread_work(thread, &thread->return_error.work);
3717 }
3718 }
3719
3720
3721
3722
3723
3724
3725
3726
3727
3728
3729
3730
3731 static void
3732 binder_free_buf(struct binder_proc *proc,
3733 struct binder_thread *thread,
3734 struct binder_buffer *buffer, bool is_failure)
3735 {
3736 binder_inner_proc_lock(proc);
3737 if (buffer->transaction) {
3738 buffer->transaction->buffer = NULL;
3739 buffer->transaction = NULL;
3740 }
3741 binder_inner_proc_unlock(proc);
3742 if (buffer->async_transaction && buffer->target_node) {
3743 struct binder_node *buf_node;
3744 struct binder_work *w;
3745
3746 buf_node = buffer->target_node;
3747 binder_node_inner_lock(buf_node);
3748 BUG_ON(!buf_node->has_async_transaction);
3749 BUG_ON(buf_node->proc != proc);
3750 w = binder_dequeue_work_head_ilocked(
3751 &buf_node->async_todo);
3752 if (!w) {
3753 buf_node->has_async_transaction = false;
3754 } else {
3755 binder_enqueue_work_ilocked(
3756 w, &proc->todo);
3757 binder_wakeup_proc_ilocked(proc);
3758 }
3759 binder_node_inner_unlock(buf_node);
3760 }
3761 trace_binder_transaction_buffer_release(buffer);
3762 binder_transaction_buffer_release(proc, thread, buffer, 0, is_failure);
3763 binder_alloc_free_buf(&proc->alloc, buffer);
3764 }
3765
3766 static int binder_thread_write(struct binder_proc *proc,
3767 struct binder_thread *thread,
3768 binder_uintptr_t binder_buffer, size_t size,
3769 binder_size_t *consumed)
3770 {
3771 uint32_t cmd;
3772 struct binder_context *context = proc->context;
3773 void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
3774 void __user *ptr = buffer + *consumed;
3775 void __user *end = buffer + size;
3776
3777 while (ptr < end && thread->return_error.cmd == BR_OK) {
3778 int ret;
3779
3780 if (get_user(cmd, (uint32_t __user *)ptr))
3781 return -EFAULT;
3782 ptr += sizeof(uint32_t);
3783 trace_binder_command(cmd);
3784 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
3785 atomic_inc(&binder_stats.bc[_IOC_NR(cmd)]);
3786 atomic_inc(&proc->stats.bc[_IOC_NR(cmd)]);
3787 atomic_inc(&thread->stats.bc[_IOC_NR(cmd)]);
3788 }
3789 switch (cmd) {
3790 case BC_INCREFS:
3791 case BC_ACQUIRE:
3792 case BC_RELEASE:
3793 case BC_DECREFS: {
3794 uint32_t target;
3795 const char *debug_string;
3796 bool strong = cmd == BC_ACQUIRE || cmd == BC_RELEASE;
3797 bool increment = cmd == BC_INCREFS || cmd == BC_ACQUIRE;
3798 struct binder_ref_data rdata;
3799
3800 if (get_user(target, (uint32_t __user *)ptr))
3801 return -EFAULT;
3802
3803 ptr += sizeof(uint32_t);
3804 ret = -1;
3805 if (increment && !target) {
3806 struct binder_node *ctx_mgr_node;
3807
3808 mutex_lock(&context->context_mgr_node_lock);
3809 ctx_mgr_node = context->binder_context_mgr_node;
3810 if (ctx_mgr_node) {
3811 if (ctx_mgr_node->proc == proc) {
3812 binder_user_error("%d:%d context manager tried to acquire desc 0\n",
3813 proc->pid, thread->pid);
3814 mutex_unlock(&context->context_mgr_node_lock);
3815 return -EINVAL;
3816 }
3817 ret = binder_inc_ref_for_node(
3818 proc, ctx_mgr_node,
3819 strong, NULL, &rdata);
3820 }
3821 mutex_unlock(&context->context_mgr_node_lock);
3822 }
3823 if (ret)
3824 ret = binder_update_ref_for_handle(
3825 proc, target, increment, strong,
3826 &rdata);
3827 if (!ret && rdata.desc != target) {
3828 binder_user_error("%d:%d tried to acquire reference to desc %d, got %d instead\n",
3829 proc->pid, thread->pid,
3830 target, rdata.desc);
3831 }
3832 switch (cmd) {
3833 case BC_INCREFS:
3834 debug_string = "IncRefs";
3835 break;
3836 case BC_ACQUIRE:
3837 debug_string = "Acquire";
3838 break;
3839 case BC_RELEASE:
3840 debug_string = "Release";
3841 break;
3842 case BC_DECREFS:
3843 default:
3844 debug_string = "DecRefs";
3845 break;
3846 }
3847 if (ret) {
3848 binder_user_error("%d:%d %s %d refcount change on invalid ref %d ret %d\n",
3849 proc->pid, thread->pid, debug_string,
3850 strong, target, ret);
3851 break;
3852 }
3853 binder_debug(BINDER_DEBUG_USER_REFS,
3854 "%d:%d %s ref %d desc %d s %d w %d\n",
3855 proc->pid, thread->pid, debug_string,
3856 rdata.debug_id, rdata.desc, rdata.strong,
3857 rdata.weak);
3858 break;
3859 }
3860 case BC_INCREFS_DONE:
3861 case BC_ACQUIRE_DONE: {
3862 binder_uintptr_t node_ptr;
3863 binder_uintptr_t cookie;
3864 struct binder_node *node;
3865 bool free_node;
3866
3867 if (get_user(node_ptr, (binder_uintptr_t __user *)ptr))
3868 return -EFAULT;
3869 ptr += sizeof(binder_uintptr_t);
3870 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
3871 return -EFAULT;
3872 ptr += sizeof(binder_uintptr_t);
3873 node = binder_get_node(proc, node_ptr);
3874 if (node == NULL) {
3875 binder_user_error("%d:%d %s u%016llx no match\n",
3876 proc->pid, thread->pid,
3877 cmd == BC_INCREFS_DONE ?
3878 "BC_INCREFS_DONE" :
3879 "BC_ACQUIRE_DONE",
3880 (u64)node_ptr);
3881 break;
3882 }
3883 if (cookie != node->cookie) {
3884 binder_user_error("%d:%d %s u%016llx node %d cookie mismatch %016llx != %016llx\n",
3885 proc->pid, thread->pid,
3886 cmd == BC_INCREFS_DONE ?
3887 "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
3888 (u64)node_ptr, node->debug_id,
3889 (u64)cookie, (u64)node->cookie);
3890 binder_put_node(node);
3891 break;
3892 }
3893 binder_node_inner_lock(node);
3894 if (cmd == BC_ACQUIRE_DONE) {
3895 if (node->pending_strong_ref == 0) {
3896 binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n",
3897 proc->pid, thread->pid,
3898 node->debug_id);
3899 binder_node_inner_unlock(node);
3900 binder_put_node(node);
3901 break;
3902 }
3903 node->pending_strong_ref = 0;
3904 } else {
3905 if (node->pending_weak_ref == 0) {
3906 binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n",
3907 proc->pid, thread->pid,
3908 node->debug_id);
3909 binder_node_inner_unlock(node);
3910 binder_put_node(node);
3911 break;
3912 }
3913 node->pending_weak_ref = 0;
3914 }
3915 free_node = binder_dec_node_nilocked(node,
3916 cmd == BC_ACQUIRE_DONE, 0);
3917 WARN_ON(free_node);
3918 binder_debug(BINDER_DEBUG_USER_REFS,
3919 "%d:%d %s node %d ls %d lw %d tr %d\n",
3920 proc->pid, thread->pid,
3921 cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
3922 node->debug_id, node->local_strong_refs,
3923 node->local_weak_refs, node->tmp_refs);
3924 binder_node_inner_unlock(node);
3925 binder_put_node(node);
3926 break;
3927 }
3928 case BC_ATTEMPT_ACQUIRE:
3929 pr_err("BC_ATTEMPT_ACQUIRE not supported\n");
3930 return -EINVAL;
3931 case BC_ACQUIRE_RESULT:
3932 pr_err("BC_ACQUIRE_RESULT not supported\n");
3933 return -EINVAL;
3934
3935 case BC_FREE_BUFFER: {
3936 binder_uintptr_t data_ptr;
3937 struct binder_buffer *buffer;
3938
3939 if (get_user(data_ptr, (binder_uintptr_t __user *)ptr))
3940 return -EFAULT;
3941 ptr += sizeof(binder_uintptr_t);
3942
3943 buffer = binder_alloc_prepare_to_free(&proc->alloc,
3944 data_ptr);
3945 if (IS_ERR_OR_NULL(buffer)) {
3946 if (PTR_ERR(buffer) == -EPERM) {
3947 binder_user_error(
3948 "%d:%d BC_FREE_BUFFER u%016llx matched unreturned or currently freeing buffer\n",
3949 proc->pid, thread->pid,
3950 (u64)data_ptr);
3951 } else {
3952 binder_user_error(
3953 "%d:%d BC_FREE_BUFFER u%016llx no match\n",
3954 proc->pid, thread->pid,
3955 (u64)data_ptr);
3956 }
3957 break;
3958 }
3959 binder_debug(BINDER_DEBUG_FREE_BUFFER,
3960 "%d:%d BC_FREE_BUFFER u%016llx found buffer %d for %s transaction\n",
3961 proc->pid, thread->pid, (u64)data_ptr,
3962 buffer->debug_id,
3963 buffer->transaction ? "active" : "finished");
3964 binder_free_buf(proc, thread, buffer, false);
3965 break;
3966 }
3967
3968 case BC_TRANSACTION_SG:
3969 case BC_REPLY_SG: {
3970 struct binder_transaction_data_sg tr;
3971
3972 if (copy_from_user(&tr, ptr, sizeof(tr)))
3973 return -EFAULT;
3974 ptr += sizeof(tr);
3975 binder_transaction(proc, thread, &tr.transaction_data,
3976 cmd == BC_REPLY_SG, tr.buffers_size);
3977 break;
3978 }
3979 case BC_TRANSACTION:
3980 case BC_REPLY: {
3981 struct binder_transaction_data tr;
3982
3983 if (copy_from_user(&tr, ptr, sizeof(tr)))
3984 return -EFAULT;
3985 ptr += sizeof(tr);
3986 binder_transaction(proc, thread, &tr,
3987 cmd == BC_REPLY, 0);
3988 break;
3989 }
3990
3991 case BC_REGISTER_LOOPER:
3992 binder_debug(BINDER_DEBUG_THREADS,
3993 "%d:%d BC_REGISTER_LOOPER\n",
3994 proc->pid, thread->pid);
3995 binder_inner_proc_lock(proc);
3996 if (thread->looper & BINDER_LOOPER_STATE_ENTERED) {
3997 thread->looper |= BINDER_LOOPER_STATE_INVALID;
3998 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n",
3999 proc->pid, thread->pid);
4000 } else if (proc->requested_threads == 0) {
4001 thread->looper |= BINDER_LOOPER_STATE_INVALID;
4002 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called without request\n",
4003 proc->pid, thread->pid);
4004 } else {
4005 proc->requested_threads--;
4006 proc->requested_threads_started++;
4007 }
4008 thread->looper |= BINDER_LOOPER_STATE_REGISTERED;
4009 binder_inner_proc_unlock(proc);
4010 break;
4011 case BC_ENTER_LOOPER:
4012 binder_debug(BINDER_DEBUG_THREADS,
4013 "%d:%d BC_ENTER_LOOPER\n",
4014 proc->pid, thread->pid);
4015 if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) {
4016 thread->looper |= BINDER_LOOPER_STATE_INVALID;
4017 binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n",
4018 proc->pid, thread->pid);
4019 }
4020 thread->looper |= BINDER_LOOPER_STATE_ENTERED;
4021 break;
4022 case BC_EXIT_LOOPER:
4023 binder_debug(BINDER_DEBUG_THREADS,
4024 "%d:%d BC_EXIT_LOOPER\n",
4025 proc->pid, thread->pid);
4026 thread->looper |= BINDER_LOOPER_STATE_EXITED;
4027 break;
4028
4029 case BC_REQUEST_DEATH_NOTIFICATION:
4030 case BC_CLEAR_DEATH_NOTIFICATION: {
4031 uint32_t target;
4032 binder_uintptr_t cookie;
4033 struct binder_ref *ref;
4034 struct binder_ref_death *death = NULL;
4035
4036 if (get_user(target, (uint32_t __user *)ptr))
4037 return -EFAULT;
4038 ptr += sizeof(uint32_t);
4039 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
4040 return -EFAULT;
4041 ptr += sizeof(binder_uintptr_t);
4042 if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
4043
4044
4045
4046
4047 death = kzalloc(sizeof(*death), GFP_KERNEL);
4048 if (death == NULL) {
4049 WARN_ON(thread->return_error.cmd !=
4050 BR_OK);
4051 thread->return_error.cmd = BR_ERROR;
4052 binder_enqueue_thread_work(
4053 thread,
4054 &thread->return_error.work);
4055 binder_debug(
4056 BINDER_DEBUG_FAILED_TRANSACTION,
4057 "%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
4058 proc->pid, thread->pid);
4059 break;
4060 }
4061 }
4062 binder_proc_lock(proc);
4063 ref = binder_get_ref_olocked(proc, target, false);
4064 if (ref == NULL) {
4065 binder_user_error("%d:%d %s invalid ref %d\n",
4066 proc->pid, thread->pid,
4067 cmd == BC_REQUEST_DEATH_NOTIFICATION ?
4068 "BC_REQUEST_DEATH_NOTIFICATION" :
4069 "BC_CLEAR_DEATH_NOTIFICATION",
4070 target);
4071 binder_proc_unlock(proc);
4072 kfree(death);
4073 break;
4074 }
4075
4076 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
4077 "%d:%d %s %016llx ref %d desc %d s %d w %d for node %d\n",
4078 proc->pid, thread->pid,
4079 cmd == BC_REQUEST_DEATH_NOTIFICATION ?
4080 "BC_REQUEST_DEATH_NOTIFICATION" :
4081 "BC_CLEAR_DEATH_NOTIFICATION",
4082 (u64)cookie, ref->data.debug_id,
4083 ref->data.desc, ref->data.strong,
4084 ref->data.weak, ref->node->debug_id);
4085
4086 binder_node_lock(ref->node);
4087 if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
4088 if (ref->death) {
4089 binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n",
4090 proc->pid, thread->pid);
4091 binder_node_unlock(ref->node);
4092 binder_proc_unlock(proc);
4093 kfree(death);
4094 break;
4095 }
4096 binder_stats_created(BINDER_STAT_DEATH);
4097 INIT_LIST_HEAD(&death->work.entry);
4098 death->cookie = cookie;
4099 ref->death = death;
4100 if (ref->node->proc == NULL) {
4101 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
4102
4103 binder_inner_proc_lock(proc);
4104 binder_enqueue_work_ilocked(
4105 &ref->death->work, &proc->todo);
4106 binder_wakeup_proc_ilocked(proc);
4107 binder_inner_proc_unlock(proc);
4108 }
4109 } else {
4110 if (ref->death == NULL) {
4111 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n",
4112 proc->pid, thread->pid);
4113 binder_node_unlock(ref->node);
4114 binder_proc_unlock(proc);
4115 break;
4116 }
4117 death = ref->death;
4118 if (death->cookie != cookie) {
4119 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %016llx != %016llx\n",
4120 proc->pid, thread->pid,
4121 (u64)death->cookie,
4122 (u64)cookie);
4123 binder_node_unlock(ref->node);
4124 binder_proc_unlock(proc);
4125 break;
4126 }
4127 ref->death = NULL;
4128 binder_inner_proc_lock(proc);
4129 if (list_empty(&death->work.entry)) {
4130 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
4131 if (thread->looper &
4132 (BINDER_LOOPER_STATE_REGISTERED |
4133 BINDER_LOOPER_STATE_ENTERED))
4134 binder_enqueue_thread_work_ilocked(
4135 thread,
4136 &death->work);
4137 else {
4138 binder_enqueue_work_ilocked(
4139 &death->work,
4140 &proc->todo);
4141 binder_wakeup_proc_ilocked(
4142 proc);
4143 }
4144 } else {
4145 BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER);
4146 death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR;
4147 }
4148 binder_inner_proc_unlock(proc);
4149 }
4150 binder_node_unlock(ref->node);
4151 binder_proc_unlock(proc);
4152 } break;
4153 case BC_DEAD_BINDER_DONE: {
4154 struct binder_work *w;
4155 binder_uintptr_t cookie;
4156 struct binder_ref_death *death = NULL;
4157
4158 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
4159 return -EFAULT;
4160
4161 ptr += sizeof(cookie);
4162 binder_inner_proc_lock(proc);
4163 list_for_each_entry(w, &proc->delivered_death,
4164 entry) {
4165 struct binder_ref_death *tmp_death =
4166 container_of(w,
4167 struct binder_ref_death,
4168 work);
4169
4170 if (tmp_death->cookie == cookie) {
4171 death = tmp_death;
4172 break;
4173 }
4174 }
4175 binder_debug(BINDER_DEBUG_DEAD_BINDER,
4176 "%d:%d BC_DEAD_BINDER_DONE %016llx found %pK\n",
4177 proc->pid, thread->pid, (u64)cookie,
4178 death);
4179 if (death == NULL) {
4180 binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n",
4181 proc->pid, thread->pid, (u64)cookie);
4182 binder_inner_proc_unlock(proc);
4183 break;
4184 }
4185 binder_dequeue_work_ilocked(&death->work);
4186 if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) {
4187 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
4188 if (thread->looper &
4189 (BINDER_LOOPER_STATE_REGISTERED |
4190 BINDER_LOOPER_STATE_ENTERED))
4191 binder_enqueue_thread_work_ilocked(
4192 thread, &death->work);
4193 else {
4194 binder_enqueue_work_ilocked(
4195 &death->work,
4196 &proc->todo);
4197 binder_wakeup_proc_ilocked(proc);
4198 }
4199 }
4200 binder_inner_proc_unlock(proc);
4201 } break;
4202
4203 default:
4204 pr_err("%d:%d unknown command %u\n",
4205 proc->pid, thread->pid, cmd);
4206 return -EINVAL;
4207 }
4208 *consumed = ptr - buffer;
4209 }
4210 return 0;
4211 }
4212
4213 static void binder_stat_br(struct binder_proc *proc,
4214 struct binder_thread *thread, uint32_t cmd)
4215 {
4216 trace_binder_return(cmd);
4217 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) {
4218 atomic_inc(&binder_stats.br[_IOC_NR(cmd)]);
4219 atomic_inc(&proc->stats.br[_IOC_NR(cmd)]);
4220 atomic_inc(&thread->stats.br[_IOC_NR(cmd)]);
4221 }
4222 }
4223
4224 static int binder_put_node_cmd(struct binder_proc *proc,
4225 struct binder_thread *thread,
4226 void __user **ptrp,
4227 binder_uintptr_t node_ptr,
4228 binder_uintptr_t node_cookie,
4229 int node_debug_id,
4230 uint32_t cmd, const char *cmd_name)
4231 {
4232 void __user *ptr = *ptrp;
4233
4234 if (put_user(cmd, (uint32_t __user *)ptr))
4235 return -EFAULT;
4236 ptr += sizeof(uint32_t);
4237
4238 if (put_user(node_ptr, (binder_uintptr_t __user *)ptr))
4239 return -EFAULT;
4240 ptr += sizeof(binder_uintptr_t);
4241
4242 if (put_user(node_cookie, (binder_uintptr_t __user *)ptr))
4243 return -EFAULT;
4244 ptr += sizeof(binder_uintptr_t);
4245
4246 binder_stat_br(proc, thread, cmd);
4247 binder_debug(BINDER_DEBUG_USER_REFS, "%d:%d %s %d u%016llx c%016llx\n",
4248 proc->pid, thread->pid, cmd_name, node_debug_id,
4249 (u64)node_ptr, (u64)node_cookie);
4250
4251 *ptrp = ptr;
4252 return 0;
4253 }
4254
4255 static int binder_wait_for_work(struct binder_thread *thread,
4256 bool do_proc_work)
4257 {
4258 DEFINE_WAIT(wait);
4259 struct binder_proc *proc = thread->proc;
4260 int ret = 0;
4261
4262 freezer_do_not_count();
4263 binder_inner_proc_lock(proc);
4264 for (;;) {
4265 prepare_to_wait(&thread->wait, &wait, TASK_INTERRUPTIBLE);
4266 if (binder_has_work_ilocked(thread, do_proc_work))
4267 break;
4268 if (do_proc_work)
4269 list_add(&thread->waiting_thread_node,
4270 &proc->waiting_threads);
4271 binder_inner_proc_unlock(proc);
4272 schedule();
4273 binder_inner_proc_lock(proc);
4274 list_del_init(&thread->waiting_thread_node);
4275 if (signal_pending(current)) {
4276 ret = -EINTR;
4277 break;
4278 }
4279 }
4280 finish_wait(&thread->wait, &wait);
4281 binder_inner_proc_unlock(proc);
4282 freezer_count();
4283
4284 return ret;
4285 }
4286
4287
4288
4289
4290
4291
4292
4293
4294
4295
4296
4297
4298
4299
4300 static int binder_apply_fd_fixups(struct binder_proc *proc,
4301 struct binder_transaction *t)
4302 {
4303 struct binder_txn_fd_fixup *fixup, *tmp;
4304 int ret = 0;
4305
4306 list_for_each_entry(fixup, &t->fd_fixups, fixup_entry) {
4307 int fd = get_unused_fd_flags(O_CLOEXEC);
4308
4309 if (fd < 0) {
4310 binder_debug(BINDER_DEBUG_TRANSACTION,
4311 "failed fd fixup txn %d fd %d\n",
4312 t->debug_id, fd);
4313 ret = -ENOMEM;
4314 goto err;
4315 }
4316 binder_debug(BINDER_DEBUG_TRANSACTION,
4317 "fd fixup txn %d fd %d\n",
4318 t->debug_id, fd);
4319 trace_binder_transaction_fd_recv(t, fd, fixup->offset);
4320 fixup->target_fd = fd;
4321 if (binder_alloc_copy_to_buffer(&proc->alloc, t->buffer,
4322 fixup->offset, &fd,
4323 sizeof(u32))) {
4324 ret = -EINVAL;
4325 goto err;
4326 }
4327 }
4328 list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) {
4329 fd_install(fixup->target_fd, fixup->file);
4330 list_del(&fixup->fixup_entry);
4331 kfree(fixup);
4332 }
4333
4334 return ret;
4335
4336 err:
4337 binder_free_txn_fixups(t);
4338 return ret;
4339 }
4340
4341 static int binder_thread_read(struct binder_proc *proc,
4342 struct binder_thread *thread,
4343 binder_uintptr_t binder_buffer, size_t size,
4344 binder_size_t *consumed, int non_block)
4345 {
4346 void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
4347 void __user *ptr = buffer + *consumed;
4348 void __user *end = buffer + size;
4349
4350 int ret = 0;
4351 int wait_for_proc_work;
4352
4353 if (*consumed == 0) {
4354 if (put_user(BR_NOOP, (uint32_t __user *)ptr))
4355 return -EFAULT;
4356 ptr += sizeof(uint32_t);
4357 }
4358
4359 retry:
4360 binder_inner_proc_lock(proc);
4361 wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
4362 binder_inner_proc_unlock(proc);
4363
4364 thread->looper |= BINDER_LOOPER_STATE_WAITING;
4365
4366 trace_binder_wait_for_work(wait_for_proc_work,
4367 !!thread->transaction_stack,
4368 !binder_worklist_empty(proc, &thread->todo));
4369 if (wait_for_proc_work) {
4370 if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
4371 BINDER_LOOPER_STATE_ENTERED))) {
4372 binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n",
4373 proc->pid, thread->pid, thread->looper);
4374 wait_event_interruptible(binder_user_error_wait,
4375 binder_stop_on_user_error < 2);
4376 }
4377 binder_set_nice(proc->default_priority);
4378 }
4379
4380 if (non_block) {
4381 if (!binder_has_work(thread, wait_for_proc_work))
4382 ret = -EAGAIN;
4383 } else {
4384 ret = binder_wait_for_work(thread, wait_for_proc_work);
4385 }
4386
4387 thread->looper &= ~BINDER_LOOPER_STATE_WAITING;
4388
4389 if (ret)
4390 return ret;
4391
4392 while (1) {
4393 uint32_t cmd;
4394 struct binder_transaction_data_secctx tr;
4395 struct binder_transaction_data *trd = &tr.transaction_data;
4396 struct binder_work *w = NULL;
4397 struct list_head *list = NULL;
4398 struct binder_transaction *t = NULL;
4399 struct binder_thread *t_from;
4400 size_t trsize = sizeof(*trd);
4401
4402 binder_inner_proc_lock(proc);
4403 if (!binder_worklist_empty_ilocked(&thread->todo))
4404 list = &thread->todo;
4405 else if (!binder_worklist_empty_ilocked(&proc->todo) &&
4406 wait_for_proc_work)
4407 list = &proc->todo;
4408 else {
4409 binder_inner_proc_unlock(proc);
4410
4411
4412 if (ptr - buffer == 4 && !thread->looper_need_return)
4413 goto retry;
4414 break;
4415 }
4416
4417 if (end - ptr < sizeof(tr) + 4) {
4418 binder_inner_proc_unlock(proc);
4419 break;
4420 }
4421 w = binder_dequeue_work_head_ilocked(list);
4422 if (binder_worklist_empty_ilocked(&thread->todo))
4423 thread->process_todo = false;
4424
4425 switch (w->type) {
4426 case BINDER_WORK_TRANSACTION: {
4427 binder_inner_proc_unlock(proc);
4428 t = container_of(w, struct binder_transaction, work);
4429 } break;
4430 case BINDER_WORK_RETURN_ERROR: {
4431 struct binder_error *e = container_of(
4432 w, struct binder_error, work);
4433
4434 WARN_ON(e->cmd == BR_OK);
4435 binder_inner_proc_unlock(proc);
4436 if (put_user(e->cmd, (uint32_t __user *)ptr))
4437 return -EFAULT;
4438 cmd = e->cmd;
4439 e->cmd = BR_OK;
4440 ptr += sizeof(uint32_t);
4441
4442 binder_stat_br(proc, thread, cmd);
4443 } break;
4444 case BINDER_WORK_TRANSACTION_COMPLETE:
4445 case BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT: {
4446 if (proc->oneway_spam_detection_enabled &&
4447 w->type == BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT)
4448 cmd = BR_ONEWAY_SPAM_SUSPECT;
4449 else
4450 cmd = BR_TRANSACTION_COMPLETE;
4451 binder_inner_proc_unlock(proc);
4452 kfree(w);
4453 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
4454 if (put_user(cmd, (uint32_t __user *)ptr))
4455 return -EFAULT;
4456 ptr += sizeof(uint32_t);
4457
4458 binder_stat_br(proc, thread, cmd);
4459 binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE,
4460 "%d:%d BR_TRANSACTION_COMPLETE\n",
4461 proc->pid, thread->pid);
4462 } break;
4463 case BINDER_WORK_NODE: {
4464 struct binder_node *node = container_of(w, struct binder_node, work);
4465 int strong, weak;
4466 binder_uintptr_t node_ptr = node->ptr;
4467 binder_uintptr_t node_cookie = node->cookie;
4468 int node_debug_id = node->debug_id;
4469 int has_weak_ref;
4470 int has_strong_ref;
4471 void __user *orig_ptr = ptr;
4472
4473 BUG_ON(proc != node->proc);
4474 strong = node->internal_strong_refs ||
4475 node->local_strong_refs;
4476 weak = !hlist_empty(&node->refs) ||
4477 node->local_weak_refs ||
4478 node->tmp_refs || strong;
4479 has_strong_ref = node->has_strong_ref;
4480 has_weak_ref = node->has_weak_ref;
4481
4482 if (weak && !has_weak_ref) {
4483 node->has_weak_ref = 1;
4484 node->pending_weak_ref = 1;
4485 node->local_weak_refs++;
4486 }
4487 if (strong && !has_strong_ref) {
4488 node->has_strong_ref = 1;
4489 node->pending_strong_ref = 1;
4490 node->local_strong_refs++;
4491 }
4492 if (!strong && has_strong_ref)
4493 node->has_strong_ref = 0;
4494 if (!weak && has_weak_ref)
4495 node->has_weak_ref = 0;
4496 if (!weak && !strong) {
4497 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
4498 "%d:%d node %d u%016llx c%016llx deleted\n",
4499 proc->pid, thread->pid,
4500 node_debug_id,
4501 (u64)node_ptr,
4502 (u64)node_cookie);
4503 rb_erase(&node->rb_node, &proc->nodes);
4504 binder_inner_proc_unlock(proc);
4505 binder_node_lock(node);
4506
4507
4508
4509
4510
4511
4512
4513
4514
4515 binder_node_unlock(node);
4516 binder_free_node(node);
4517 } else
4518 binder_inner_proc_unlock(proc);
4519
4520 if (weak && !has_weak_ref)
4521 ret = binder_put_node_cmd(
4522 proc, thread, &ptr, node_ptr,
4523 node_cookie, node_debug_id,
4524 BR_INCREFS, "BR_INCREFS");
4525 if (!ret && strong && !has_strong_ref)
4526 ret = binder_put_node_cmd(
4527 proc, thread, &ptr, node_ptr,
4528 node_cookie, node_debug_id,
4529 BR_ACQUIRE, "BR_ACQUIRE");
4530 if (!ret && !strong && has_strong_ref)
4531 ret = binder_put_node_cmd(
4532 proc, thread, &ptr, node_ptr,
4533 node_cookie, node_debug_id,
4534 BR_RELEASE, "BR_RELEASE");
4535 if (!ret && !weak && has_weak_ref)
4536 ret = binder_put_node_cmd(
4537 proc, thread, &ptr, node_ptr,
4538 node_cookie, node_debug_id,
4539 BR_DECREFS, "BR_DECREFS");
4540 if (orig_ptr == ptr)
4541 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
4542 "%d:%d node %d u%016llx c%016llx state unchanged\n",
4543 proc->pid, thread->pid,
4544 node_debug_id,
4545 (u64)node_ptr,
4546 (u64)node_cookie);
4547 if (ret)
4548 return ret;
4549 } break;
4550 case BINDER_WORK_DEAD_BINDER:
4551 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
4552 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
4553 struct binder_ref_death *death;
4554 uint32_t cmd;
4555 binder_uintptr_t cookie;
4556
4557 death = container_of(w, struct binder_ref_death, work);
4558 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION)
4559 cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE;
4560 else
4561 cmd = BR_DEAD_BINDER;
4562 cookie = death->cookie;
4563
4564 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
4565 "%d:%d %s %016llx\n",
4566 proc->pid, thread->pid,
4567 cmd == BR_DEAD_BINDER ?
4568 "BR_DEAD_BINDER" :
4569 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
4570 (u64)cookie);
4571 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) {
4572 binder_inner_proc_unlock(proc);
4573 kfree(death);
4574 binder_stats_deleted(BINDER_STAT_DEATH);
4575 } else {
4576 binder_enqueue_work_ilocked(
4577 w, &proc->delivered_death);
4578 binder_inner_proc_unlock(proc);
4579 }
4580 if (put_user(cmd, (uint32_t __user *)ptr))
4581 return -EFAULT;
4582 ptr += sizeof(uint32_t);
4583 if (put_user(cookie,
4584 (binder_uintptr_t __user *)ptr))
4585 return -EFAULT;
4586 ptr += sizeof(binder_uintptr_t);
4587 binder_stat_br(proc, thread, cmd);
4588 if (cmd == BR_DEAD_BINDER)
4589 goto done;
4590 } break;
4591 default:
4592 binder_inner_proc_unlock(proc);
4593 pr_err("%d:%d: bad work type %d\n",
4594 proc->pid, thread->pid, w->type);
4595 break;
4596 }
4597
4598 if (!t)
4599 continue;
4600
4601 BUG_ON(t->buffer == NULL);
4602 if (t->buffer->target_node) {
4603 struct binder_node *target_node = t->buffer->target_node;
4604
4605 trd->target.ptr = target_node->ptr;
4606 trd->cookie = target_node->cookie;
4607 t->saved_priority = task_nice(current);
4608 if (t->priority < target_node->min_priority &&
4609 !(t->flags & TF_ONE_WAY))
4610 binder_set_nice(t->priority);
4611 else if (!(t->flags & TF_ONE_WAY) ||
4612 t->saved_priority > target_node->min_priority)
4613 binder_set_nice(target_node->min_priority);
4614 cmd = BR_TRANSACTION;
4615 } else {
4616 trd->target.ptr = 0;
4617 trd->cookie = 0;
4618 cmd = BR_REPLY;
4619 }
4620 trd->code = t->code;
4621 trd->flags = t->flags;
4622 trd->sender_euid = from_kuid(current_user_ns(), t->sender_euid);
4623
4624 t_from = binder_get_txn_from(t);
4625 if (t_from) {
4626 struct task_struct *sender = t_from->proc->tsk;
4627
4628 trd->sender_pid =
4629 task_tgid_nr_ns(sender,
4630 task_active_pid_ns(current));
4631 } else {
4632 trd->sender_pid = 0;
4633 }
4634
4635 ret = binder_apply_fd_fixups(proc, t);
4636 if (ret) {
4637 struct binder_buffer *buffer = t->buffer;
4638 bool oneway = !!(t->flags & TF_ONE_WAY);
4639 int tid = t->debug_id;
4640
4641 if (t_from)
4642 binder_thread_dec_tmpref(t_from);
4643 buffer->transaction = NULL;
4644 binder_cleanup_transaction(t, "fd fixups failed",
4645 BR_FAILED_REPLY);
4646 binder_free_buf(proc, thread, buffer, true);
4647 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
4648 "%d:%d %stransaction %d fd fixups failed %d/%d, line %d\n",
4649 proc->pid, thread->pid,
4650 oneway ? "async " :
4651 (cmd == BR_REPLY ? "reply " : ""),
4652 tid, BR_FAILED_REPLY, ret, __LINE__);
4653 if (cmd == BR_REPLY) {
4654 cmd = BR_FAILED_REPLY;
4655 if (put_user(cmd, (uint32_t __user *)ptr))
4656 return -EFAULT;
4657 ptr += sizeof(uint32_t);
4658 binder_stat_br(proc, thread, cmd);
4659 break;
4660 }
4661 continue;
4662 }
4663 trd->data_size = t->buffer->data_size;
4664 trd->offsets_size = t->buffer->offsets_size;
4665 trd->data.ptr.buffer = (uintptr_t)t->buffer->user_data;
4666 trd->data.ptr.offsets = trd->data.ptr.buffer +
4667 ALIGN(t->buffer->data_size,
4668 sizeof(void *));
4669
4670 tr.secctx = t->security_ctx;
4671 if (t->security_ctx) {
4672 cmd = BR_TRANSACTION_SEC_CTX;
4673 trsize = sizeof(tr);
4674 }
4675 if (put_user(cmd, (uint32_t __user *)ptr)) {
4676 if (t_from)
4677 binder_thread_dec_tmpref(t_from);
4678
4679 binder_cleanup_transaction(t, "put_user failed",
4680 BR_FAILED_REPLY);
4681
4682 return -EFAULT;
4683 }
4684 ptr += sizeof(uint32_t);
4685 if (copy_to_user(ptr, &tr, trsize)) {
4686 if (t_from)
4687 binder_thread_dec_tmpref(t_from);
4688
4689 binder_cleanup_transaction(t, "copy_to_user failed",
4690 BR_FAILED_REPLY);
4691
4692 return -EFAULT;
4693 }
4694 ptr += trsize;
4695
4696 trace_binder_transaction_received(t);
4697 binder_stat_br(proc, thread, cmd);
4698 binder_debug(BINDER_DEBUG_TRANSACTION,
4699 "%d:%d %s %d %d:%d, cmd %u size %zd-%zd ptr %016llx-%016llx\n",
4700 proc->pid, thread->pid,
4701 (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
4702 (cmd == BR_TRANSACTION_SEC_CTX) ?
4703 "BR_TRANSACTION_SEC_CTX" : "BR_REPLY",
4704 t->debug_id, t_from ? t_from->proc->pid : 0,
4705 t_from ? t_from->pid : 0, cmd,
4706 t->buffer->data_size, t->buffer->offsets_size,
4707 (u64)trd->data.ptr.buffer,
4708 (u64)trd->data.ptr.offsets);
4709
4710 if (t_from)
4711 binder_thread_dec_tmpref(t_from);
4712 t->buffer->allow_user_free = 1;
4713 if (cmd != BR_REPLY && !(t->flags & TF_ONE_WAY)) {
4714 binder_inner_proc_lock(thread->proc);
4715 t->to_parent = thread->transaction_stack;
4716 t->to_thread = thread;
4717 thread->transaction_stack = t;
4718 binder_inner_proc_unlock(thread->proc);
4719 } else {
4720 binder_free_transaction(t);
4721 }
4722 break;
4723 }
4724
4725 done:
4726
4727 *consumed = ptr - buffer;
4728 binder_inner_proc_lock(proc);
4729 if (proc->requested_threads == 0 &&
4730 list_empty(&thread->proc->waiting_threads) &&
4731 proc->requested_threads_started < proc->max_threads &&
4732 (thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
4733 BINDER_LOOPER_STATE_ENTERED))
4734 ) {
4735 proc->requested_threads++;
4736 binder_inner_proc_unlock(proc);
4737 binder_debug(BINDER_DEBUG_THREADS,
4738 "%d:%d BR_SPAWN_LOOPER\n",
4739 proc->pid, thread->pid);
4740 if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
4741 return -EFAULT;
4742 binder_stat_br(proc, thread, BR_SPAWN_LOOPER);
4743 } else
4744 binder_inner_proc_unlock(proc);
4745 return 0;
4746 }
4747
4748 static void binder_release_work(struct binder_proc *proc,
4749 struct list_head *list)
4750 {
4751 struct binder_work *w;
4752 enum binder_work_type wtype;
4753
4754 while (1) {
4755 binder_inner_proc_lock(proc);
4756 w = binder_dequeue_work_head_ilocked(list);
4757 wtype = w ? w->type : 0;
4758 binder_inner_proc_unlock(proc);
4759 if (!w)
4760 return;
4761
4762 switch (wtype) {
4763 case BINDER_WORK_TRANSACTION: {
4764 struct binder_transaction *t;
4765
4766 t = container_of(w, struct binder_transaction, work);
4767
4768 binder_cleanup_transaction(t, "process died.",
4769 BR_DEAD_REPLY);
4770 } break;
4771 case BINDER_WORK_RETURN_ERROR: {
4772 struct binder_error *e = container_of(
4773 w, struct binder_error, work);
4774
4775 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4776 "undelivered TRANSACTION_ERROR: %u\n",
4777 e->cmd);
4778 } break;
4779 case BINDER_WORK_TRANSACTION_COMPLETE: {
4780 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4781 "undelivered TRANSACTION_COMPLETE\n");
4782 kfree(w);
4783 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
4784 } break;
4785 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
4786 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
4787 struct binder_ref_death *death;
4788
4789 death = container_of(w, struct binder_ref_death, work);
4790 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4791 "undelivered death notification, %016llx\n",
4792 (u64)death->cookie);
4793 kfree(death);
4794 binder_stats_deleted(BINDER_STAT_DEATH);
4795 } break;
4796 case BINDER_WORK_NODE:
4797 break;
4798 default:
4799 pr_err("unexpected work type, %d, not freed\n",
4800 wtype);
4801 break;
4802 }
4803 }
4804
4805 }
4806
4807 static struct binder_thread *binder_get_thread_ilocked(
4808 struct binder_proc *proc, struct binder_thread *new_thread)
4809 {
4810 struct binder_thread *thread = NULL;
4811 struct rb_node *parent = NULL;
4812 struct rb_node **p = &proc->threads.rb_node;
4813
4814 while (*p) {
4815 parent = *p;
4816 thread = rb_entry(parent, struct binder_thread, rb_node);
4817
4818 if (current->pid < thread->pid)
4819 p = &(*p)->rb_left;
4820 else if (current->pid > thread->pid)
4821 p = &(*p)->rb_right;
4822 else
4823 return thread;
4824 }
4825 if (!new_thread)
4826 return NULL;
4827 thread = new_thread;
4828 binder_stats_created(BINDER_STAT_THREAD);
4829 thread->proc = proc;
4830 thread->pid = current->pid;
4831 atomic_set(&thread->tmp_ref, 0);
4832 init_waitqueue_head(&thread->wait);
4833 INIT_LIST_HEAD(&thread->todo);
4834 rb_link_node(&thread->rb_node, parent, p);
4835 rb_insert_color(&thread->rb_node, &proc->threads);
4836 thread->looper_need_return = true;
4837 thread->return_error.work.type = BINDER_WORK_RETURN_ERROR;
4838 thread->return_error.cmd = BR_OK;
4839 thread->reply_error.work.type = BINDER_WORK_RETURN_ERROR;
4840 thread->reply_error.cmd = BR_OK;
4841 thread->ee.command = BR_OK;
4842 INIT_LIST_HEAD(&new_thread->waiting_thread_node);
4843 return thread;
4844 }
4845
4846 static struct binder_thread *binder_get_thread(struct binder_proc *proc)
4847 {
4848 struct binder_thread *thread;
4849 struct binder_thread *new_thread;
4850
4851 binder_inner_proc_lock(proc);
4852 thread = binder_get_thread_ilocked(proc, NULL);
4853 binder_inner_proc_unlock(proc);
4854 if (!thread) {
4855 new_thread = kzalloc(sizeof(*thread), GFP_KERNEL);
4856 if (new_thread == NULL)
4857 return NULL;
4858 binder_inner_proc_lock(proc);
4859 thread = binder_get_thread_ilocked(proc, new_thread);
4860 binder_inner_proc_unlock(proc);
4861 if (thread != new_thread)
4862 kfree(new_thread);
4863 }
4864 return thread;
4865 }
4866
4867 static void binder_free_proc(struct binder_proc *proc)
4868 {
4869 struct binder_device *device;
4870
4871 BUG_ON(!list_empty(&proc->todo));
4872 BUG_ON(!list_empty(&proc->delivered_death));
4873 if (proc->outstanding_txns)
4874 pr_warn("%s: Unexpected outstanding_txns %d\n",
4875 __func__, proc->outstanding_txns);
4876 device = container_of(proc->context, struct binder_device, context);
4877 if (refcount_dec_and_test(&device->ref)) {
4878 kfree(proc->context->name);
4879 kfree(device);
4880 }
4881 binder_alloc_deferred_release(&proc->alloc);
4882 put_task_struct(proc->tsk);
4883 put_cred(proc->cred);
4884 binder_stats_deleted(BINDER_STAT_PROC);
4885 kfree(proc);
4886 }
4887
4888 static void binder_free_thread(struct binder_thread *thread)
4889 {
4890 BUG_ON(!list_empty(&thread->todo));
4891 binder_stats_deleted(BINDER_STAT_THREAD);
4892 binder_proc_dec_tmpref(thread->proc);
4893 kfree(thread);
4894 }
4895
4896 static int binder_thread_release(struct binder_proc *proc,
4897 struct binder_thread *thread)
4898 {
4899 struct binder_transaction *t;
4900 struct binder_transaction *send_reply = NULL;
4901 int active_transactions = 0;
4902 struct binder_transaction *last_t = NULL;
4903
4904 binder_inner_proc_lock(thread->proc);
4905
4906
4907
4908
4909
4910
4911 proc->tmp_ref++;
4912
4913
4914
4915
4916 atomic_inc(&thread->tmp_ref);
4917 rb_erase(&thread->rb_node, &proc->threads);
4918 t = thread->transaction_stack;
4919 if (t) {
4920 spin_lock(&t->lock);
4921 if (t->to_thread == thread)
4922 send_reply = t;
4923 } else {
4924 __acquire(&t->lock);
4925 }
4926 thread->is_dead = true;
4927
4928 while (t) {
4929 last_t = t;
4930 active_transactions++;
4931 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4932 "release %d:%d transaction %d %s, still active\n",
4933 proc->pid, thread->pid,
4934 t->debug_id,
4935 (t->to_thread == thread) ? "in" : "out");
4936
4937 if (t->to_thread == thread) {
4938 thread->proc->outstanding_txns--;
4939 t->to_proc = NULL;
4940 t->to_thread = NULL;
4941 if (t->buffer) {
4942 t->buffer->transaction = NULL;
4943 t->buffer = NULL;
4944 }
4945 t = t->to_parent;
4946 } else if (t->from == thread) {
4947 t->from = NULL;
4948 t = t->from_parent;
4949 } else
4950 BUG();
4951 spin_unlock(&last_t->lock);
4952 if (t)
4953 spin_lock(&t->lock);
4954 else
4955 __acquire(&t->lock);
4956 }
4957
4958 __release(&t->lock);
4959
4960
4961
4962
4963
4964 if (thread->looper & BINDER_LOOPER_STATE_POLL)
4965 wake_up_pollfree(&thread->wait);
4966
4967 binder_inner_proc_unlock(thread->proc);
4968
4969
4970
4971
4972
4973
4974
4975
4976 if (thread->looper & BINDER_LOOPER_STATE_POLL)
4977 synchronize_rcu();
4978
4979 if (send_reply)
4980 binder_send_failed_reply(send_reply, BR_DEAD_REPLY);
4981 binder_release_work(proc, &thread->todo);
4982 binder_thread_dec_tmpref(thread);
4983 return active_transactions;
4984 }
4985
4986 static __poll_t binder_poll(struct file *filp,
4987 struct poll_table_struct *wait)
4988 {
4989 struct binder_proc *proc = filp->private_data;
4990 struct binder_thread *thread = NULL;
4991 bool wait_for_proc_work;
4992
4993 thread = binder_get_thread(proc);
4994 if (!thread)
4995 return POLLERR;
4996
4997 binder_inner_proc_lock(thread->proc);
4998 thread->looper |= BINDER_LOOPER_STATE_POLL;
4999 wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
5000
5001 binder_inner_proc_unlock(thread->proc);
5002
5003 poll_wait(filp, &thread->wait, wait);
5004
5005 if (binder_has_work(thread, wait_for_proc_work))
5006 return EPOLLIN;
5007
5008 return 0;
5009 }
5010
5011 static int binder_ioctl_write_read(struct file *filp,
5012 unsigned int cmd, unsigned long arg,
5013 struct binder_thread *thread)
5014 {
5015 int ret = 0;
5016 struct binder_proc *proc = filp->private_data;
5017 unsigned int size = _IOC_SIZE(cmd);
5018 void __user *ubuf = (void __user *)arg;
5019 struct binder_write_read bwr;
5020
5021 if (size != sizeof(struct binder_write_read)) {
5022 ret = -EINVAL;
5023 goto out;
5024 }
5025 if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
5026 ret = -EFAULT;
5027 goto out;
5028 }
5029 binder_debug(BINDER_DEBUG_READ_WRITE,
5030 "%d:%d write %lld at %016llx, read %lld at %016llx\n",
5031 proc->pid, thread->pid,
5032 (u64)bwr.write_size, (u64)bwr.write_buffer,
5033 (u64)bwr.read_size, (u64)bwr.read_buffer);
5034
5035 if (bwr.write_size > 0) {
5036 ret = binder_thread_write(proc, thread,
5037 bwr.write_buffer,
5038 bwr.write_size,
5039 &bwr.write_consumed);
5040 trace_binder_write_done(ret);
5041 if (ret < 0) {
5042 bwr.read_consumed = 0;
5043 if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
5044 ret = -EFAULT;
5045 goto out;
5046 }
5047 }
5048 if (bwr.read_size > 0) {
5049 ret = binder_thread_read(proc, thread, bwr.read_buffer,
5050 bwr.read_size,
5051 &bwr.read_consumed,
5052 filp->f_flags & O_NONBLOCK);
5053 trace_binder_read_done(ret);
5054 binder_inner_proc_lock(proc);
5055 if (!binder_worklist_empty_ilocked(&proc->todo))
5056 binder_wakeup_proc_ilocked(proc);
5057 binder_inner_proc_unlock(proc);
5058 if (ret < 0) {
5059 if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
5060 ret = -EFAULT;
5061 goto out;
5062 }
5063 }
5064 binder_debug(BINDER_DEBUG_READ_WRITE,
5065 "%d:%d wrote %lld of %lld, read return %lld of %lld\n",
5066 proc->pid, thread->pid,
5067 (u64)bwr.write_consumed, (u64)bwr.write_size,
5068 (u64)bwr.read_consumed, (u64)bwr.read_size);
5069 if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
5070 ret = -EFAULT;
5071 goto out;
5072 }
5073 out:
5074 return ret;
5075 }
5076
5077 static int binder_ioctl_set_ctx_mgr(struct file *filp,
5078 struct flat_binder_object *fbo)
5079 {
5080 int ret = 0;
5081 struct binder_proc *proc = filp->private_data;
5082 struct binder_context *context = proc->context;
5083 struct binder_node *new_node;
5084 kuid_t curr_euid = current_euid();
5085
5086 mutex_lock(&context->context_mgr_node_lock);
5087 if (context->binder_context_mgr_node) {
5088 pr_err("BINDER_SET_CONTEXT_MGR already set\n");
5089 ret = -EBUSY;
5090 goto out;
5091 }
5092 ret = security_binder_set_context_mgr(proc->cred);
5093 if (ret < 0)
5094 goto out;
5095 if (uid_valid(context->binder_context_mgr_uid)) {
5096 if (!uid_eq(context->binder_context_mgr_uid, curr_euid)) {
5097 pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n",
5098 from_kuid(&init_user_ns, curr_euid),
5099 from_kuid(&init_user_ns,
5100 context->binder_context_mgr_uid));
5101 ret = -EPERM;
5102 goto out;
5103 }
5104 } else {
5105 context->binder_context_mgr_uid = curr_euid;
5106 }
5107 new_node = binder_new_node(proc, fbo);
5108 if (!new_node) {
5109 ret = -ENOMEM;
5110 goto out;
5111 }
5112 binder_node_lock(new_node);
5113 new_node->local_weak_refs++;
5114 new_node->local_strong_refs++;
5115 new_node->has_strong_ref = 1;
5116 new_node->has_weak_ref = 1;
5117 context->binder_context_mgr_node = new_node;
5118 binder_node_unlock(new_node);
5119 binder_put_node(new_node);
5120 out:
5121 mutex_unlock(&context->context_mgr_node_lock);
5122 return ret;
5123 }
5124
5125 static int binder_ioctl_get_node_info_for_ref(struct binder_proc *proc,
5126 struct binder_node_info_for_ref *info)
5127 {
5128 struct binder_node *node;
5129 struct binder_context *context = proc->context;
5130 __u32 handle = info->handle;
5131
5132 if (info->strong_count || info->weak_count || info->reserved1 ||
5133 info->reserved2 || info->reserved3) {
5134 binder_user_error("%d BINDER_GET_NODE_INFO_FOR_REF: only handle may be non-zero.",
5135 proc->pid);
5136 return -EINVAL;
5137 }
5138
5139
5140 mutex_lock(&context->context_mgr_node_lock);
5141 if (!context->binder_context_mgr_node ||
5142 context->binder_context_mgr_node->proc != proc) {
5143 mutex_unlock(&context->context_mgr_node_lock);
5144 return -EPERM;
5145 }
5146 mutex_unlock(&context->context_mgr_node_lock);
5147
5148 node = binder_get_node_from_ref(proc, handle, true, NULL);
5149 if (!node)
5150 return -EINVAL;
5151
5152 info->strong_count = node->local_strong_refs +
5153 node->internal_strong_refs;
5154 info->weak_count = node->local_weak_refs;
5155
5156 binder_put_node(node);
5157
5158 return 0;
5159 }
5160
5161 static int binder_ioctl_get_node_debug_info(struct binder_proc *proc,
5162 struct binder_node_debug_info *info)
5163 {
5164 struct rb_node *n;
5165 binder_uintptr_t ptr = info->ptr;
5166
5167 memset(info, 0, sizeof(*info));
5168
5169 binder_inner_proc_lock(proc);
5170 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
5171 struct binder_node *node = rb_entry(n, struct binder_node,
5172 rb_node);
5173 if (node->ptr > ptr) {
5174 info->ptr = node->ptr;
5175 info->cookie = node->cookie;
5176 info->has_strong_ref = node->has_strong_ref;
5177 info->has_weak_ref = node->has_weak_ref;
5178 break;
5179 }
5180 }
5181 binder_inner_proc_unlock(proc);
5182
5183 return 0;
5184 }
5185
5186 static bool binder_txns_pending_ilocked(struct binder_proc *proc)
5187 {
5188 struct rb_node *n;
5189 struct binder_thread *thread;
5190
5191 if (proc->outstanding_txns > 0)
5192 return true;
5193
5194 for (n = rb_first(&proc->threads); n; n = rb_next(n)) {
5195 thread = rb_entry(n, struct binder_thread, rb_node);
5196 if (thread->transaction_stack)
5197 return true;
5198 }
5199 return false;
5200 }
5201
5202 static int binder_ioctl_freeze(struct binder_freeze_info *info,
5203 struct binder_proc *target_proc)
5204 {
5205 int ret = 0;
5206
5207 if (!info->enable) {
5208 binder_inner_proc_lock(target_proc);
5209 target_proc->sync_recv = false;
5210 target_proc->async_recv = false;
5211 target_proc->is_frozen = false;
5212 binder_inner_proc_unlock(target_proc);
5213 return 0;
5214 }
5215
5216
5217
5218
5219
5220
5221 binder_inner_proc_lock(target_proc);
5222 target_proc->sync_recv = false;
5223 target_proc->async_recv = false;
5224 target_proc->is_frozen = true;
5225 binder_inner_proc_unlock(target_proc);
5226
5227 if (info->timeout_ms > 0)
5228 ret = wait_event_interruptible_timeout(
5229 target_proc->freeze_wait,
5230 (!target_proc->outstanding_txns),
5231 msecs_to_jiffies(info->timeout_ms));
5232
5233
5234 if (ret >= 0) {
5235 binder_inner_proc_lock(target_proc);
5236 if (binder_txns_pending_ilocked(target_proc))
5237 ret = -EAGAIN;
5238 binder_inner_proc_unlock(target_proc);
5239 }
5240
5241 if (ret < 0) {
5242 binder_inner_proc_lock(target_proc);
5243 target_proc->is_frozen = false;
5244 binder_inner_proc_unlock(target_proc);
5245 }
5246
5247 return ret;
5248 }
5249
5250 static int binder_ioctl_get_freezer_info(
5251 struct binder_frozen_status_info *info)
5252 {
5253 struct binder_proc *target_proc;
5254 bool found = false;
5255 __u32 txns_pending;
5256
5257 info->sync_recv = 0;
5258 info->async_recv = 0;
5259
5260 mutex_lock(&binder_procs_lock);
5261 hlist_for_each_entry(target_proc, &binder_procs, proc_node) {
5262 if (target_proc->pid == info->pid) {
5263 found = true;
5264 binder_inner_proc_lock(target_proc);
5265 txns_pending = binder_txns_pending_ilocked(target_proc);
5266 info->sync_recv |= target_proc->sync_recv |
5267 (txns_pending << 1);
5268 info->async_recv |= target_proc->async_recv;
5269 binder_inner_proc_unlock(target_proc);
5270 }
5271 }
5272 mutex_unlock(&binder_procs_lock);
5273
5274 if (!found)
5275 return -EINVAL;
5276
5277 return 0;
5278 }
5279
5280 static int binder_ioctl_get_extended_error(struct binder_thread *thread,
5281 void __user *ubuf)
5282 {
5283 struct binder_extended_error ee;
5284
5285 binder_inner_proc_lock(thread->proc);
5286 ee = thread->ee;
5287 binder_set_extended_error(&thread->ee, 0, BR_OK, 0);
5288 binder_inner_proc_unlock(thread->proc);
5289
5290 if (copy_to_user(ubuf, &ee, sizeof(ee)))
5291 return -EFAULT;
5292
5293 return 0;
5294 }
5295
5296 static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
5297 {
5298 int ret;
5299 struct binder_proc *proc = filp->private_data;
5300 struct binder_thread *thread;
5301 unsigned int size = _IOC_SIZE(cmd);
5302 void __user *ubuf = (void __user *)arg;
5303
5304
5305
5306
5307 binder_selftest_alloc(&proc->alloc);
5308
5309 trace_binder_ioctl(cmd, arg);
5310
5311 ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
5312 if (ret)
5313 goto err_unlocked;
5314
5315 thread = binder_get_thread(proc);
5316 if (thread == NULL) {
5317 ret = -ENOMEM;
5318 goto err;
5319 }
5320
5321 switch (cmd) {
5322 case BINDER_WRITE_READ:
5323 ret = binder_ioctl_write_read(filp, cmd, arg, thread);
5324 if (ret)
5325 goto err;
5326 break;
5327 case BINDER_SET_MAX_THREADS: {
5328 int max_threads;
5329
5330 if (copy_from_user(&max_threads, ubuf,
5331 sizeof(max_threads))) {
5332 ret = -EINVAL;
5333 goto err;
5334 }
5335 binder_inner_proc_lock(proc);
5336 proc->max_threads = max_threads;
5337 binder_inner_proc_unlock(proc);
5338 break;
5339 }
5340 case BINDER_SET_CONTEXT_MGR_EXT: {
5341 struct flat_binder_object fbo;
5342
5343 if (copy_from_user(&fbo, ubuf, sizeof(fbo))) {
5344 ret = -EINVAL;
5345 goto err;
5346 }
5347 ret = binder_ioctl_set_ctx_mgr(filp, &fbo);
5348 if (ret)
5349 goto err;
5350 break;
5351 }
5352 case BINDER_SET_CONTEXT_MGR:
5353 ret = binder_ioctl_set_ctx_mgr(filp, NULL);
5354 if (ret)
5355 goto err;
5356 break;
5357 case BINDER_THREAD_EXIT:
5358 binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n",
5359 proc->pid, thread->pid);
5360 binder_thread_release(proc, thread);
5361 thread = NULL;
5362 break;
5363 case BINDER_VERSION: {
5364 struct binder_version __user *ver = ubuf;
5365
5366 if (size != sizeof(struct binder_version)) {
5367 ret = -EINVAL;
5368 goto err;
5369 }
5370 if (put_user(BINDER_CURRENT_PROTOCOL_VERSION,
5371 &ver->protocol_version)) {
5372 ret = -EINVAL;
5373 goto err;
5374 }
5375 break;
5376 }
5377 case BINDER_GET_NODE_INFO_FOR_REF: {
5378 struct binder_node_info_for_ref info;
5379
5380 if (copy_from_user(&info, ubuf, sizeof(info))) {
5381 ret = -EFAULT;
5382 goto err;
5383 }
5384
5385 ret = binder_ioctl_get_node_info_for_ref(proc, &info);
5386 if (ret < 0)
5387 goto err;
5388
5389 if (copy_to_user(ubuf, &info, sizeof(info))) {
5390 ret = -EFAULT;
5391 goto err;
5392 }
5393
5394 break;
5395 }
5396 case BINDER_GET_NODE_DEBUG_INFO: {
5397 struct binder_node_debug_info info;
5398
5399 if (copy_from_user(&info, ubuf, sizeof(info))) {
5400 ret = -EFAULT;
5401 goto err;
5402 }
5403
5404 ret = binder_ioctl_get_node_debug_info(proc, &info);
5405 if (ret < 0)
5406 goto err;
5407
5408 if (copy_to_user(ubuf, &info, sizeof(info))) {
5409 ret = -EFAULT;
5410 goto err;
5411 }
5412 break;
5413 }
5414 case BINDER_FREEZE: {
5415 struct binder_freeze_info info;
5416 struct binder_proc **target_procs = NULL, *target_proc;
5417 int target_procs_count = 0, i = 0;
5418
5419 ret = 0;
5420
5421 if (copy_from_user(&info, ubuf, sizeof(info))) {
5422 ret = -EFAULT;
5423 goto err;
5424 }
5425
5426 mutex_lock(&binder_procs_lock);
5427 hlist_for_each_entry(target_proc, &binder_procs, proc_node) {
5428 if (target_proc->pid == info.pid)
5429 target_procs_count++;
5430 }
5431
5432 if (target_procs_count == 0) {
5433 mutex_unlock(&binder_procs_lock);
5434 ret = -EINVAL;
5435 goto err;
5436 }
5437
5438 target_procs = kcalloc(target_procs_count,
5439 sizeof(struct binder_proc *),
5440 GFP_KERNEL);
5441
5442 if (!target_procs) {
5443 mutex_unlock(&binder_procs_lock);
5444 ret = -ENOMEM;
5445 goto err;
5446 }
5447
5448 hlist_for_each_entry(target_proc, &binder_procs, proc_node) {
5449 if (target_proc->pid != info.pid)
5450 continue;
5451
5452 binder_inner_proc_lock(target_proc);
5453 target_proc->tmp_ref++;
5454 binder_inner_proc_unlock(target_proc);
5455
5456 target_procs[i++] = target_proc;
5457 }
5458 mutex_unlock(&binder_procs_lock);
5459
5460 for (i = 0; i < target_procs_count; i++) {
5461 if (ret >= 0)
5462 ret = binder_ioctl_freeze(&info,
5463 target_procs[i]);
5464
5465 binder_proc_dec_tmpref(target_procs[i]);
5466 }
5467
5468 kfree(target_procs);
5469
5470 if (ret < 0)
5471 goto err;
5472 break;
5473 }
5474 case BINDER_GET_FROZEN_INFO: {
5475 struct binder_frozen_status_info info;
5476
5477 if (copy_from_user(&info, ubuf, sizeof(info))) {
5478 ret = -EFAULT;
5479 goto err;
5480 }
5481
5482 ret = binder_ioctl_get_freezer_info(&info);
5483 if (ret < 0)
5484 goto err;
5485
5486 if (copy_to_user(ubuf, &info, sizeof(info))) {
5487 ret = -EFAULT;
5488 goto err;
5489 }
5490 break;
5491 }
5492 case BINDER_ENABLE_ONEWAY_SPAM_DETECTION: {
5493 uint32_t enable;
5494
5495 if (copy_from_user(&enable, ubuf, sizeof(enable))) {
5496 ret = -EFAULT;
5497 goto err;
5498 }
5499 binder_inner_proc_lock(proc);
5500 proc->oneway_spam_detection_enabled = (bool)enable;
5501 binder_inner_proc_unlock(proc);
5502 break;
5503 }
5504 case BINDER_GET_EXTENDED_ERROR:
5505 ret = binder_ioctl_get_extended_error(thread, ubuf);
5506 if (ret < 0)
5507 goto err;
5508 break;
5509 default:
5510 ret = -EINVAL;
5511 goto err;
5512 }
5513 ret = 0;
5514 err:
5515 if (thread)
5516 thread->looper_need_return = false;
5517 wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
5518 if (ret && ret != -EINTR)
5519 pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
5520 err_unlocked:
5521 trace_binder_ioctl_done(ret);
5522 return ret;
5523 }
5524
5525 static void binder_vma_open(struct vm_area_struct *vma)
5526 {
5527 struct binder_proc *proc = vma->vm_private_data;
5528
5529 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5530 "%d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
5531 proc->pid, vma->vm_start, vma->vm_end,
5532 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
5533 (unsigned long)pgprot_val(vma->vm_page_prot));
5534 }
5535
5536 static void binder_vma_close(struct vm_area_struct *vma)
5537 {
5538 struct binder_proc *proc = vma->vm_private_data;
5539
5540 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5541 "%d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
5542 proc->pid, vma->vm_start, vma->vm_end,
5543 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
5544 (unsigned long)pgprot_val(vma->vm_page_prot));
5545 binder_alloc_vma_close(&proc->alloc);
5546 }
5547
5548 static vm_fault_t binder_vm_fault(struct vm_fault *vmf)
5549 {
5550 return VM_FAULT_SIGBUS;
5551 }
5552
5553 static const struct vm_operations_struct binder_vm_ops = {
5554 .open = binder_vma_open,
5555 .close = binder_vma_close,
5556 .fault = binder_vm_fault,
5557 };
5558
5559 static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
5560 {
5561 struct binder_proc *proc = filp->private_data;
5562
5563 if (proc->tsk != current->group_leader)
5564 return -EINVAL;
5565
5566 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5567 "%s: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
5568 __func__, proc->pid, vma->vm_start, vma->vm_end,
5569 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
5570 (unsigned long)pgprot_val(vma->vm_page_prot));
5571
5572 if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) {
5573 pr_err("%s: %d %lx-%lx %s failed %d\n", __func__,
5574 proc->pid, vma->vm_start, vma->vm_end, "bad vm_flags", -EPERM);
5575 return -EPERM;
5576 }
5577 vma->vm_flags |= VM_DONTCOPY | VM_MIXEDMAP;
5578 vma->vm_flags &= ~VM_MAYWRITE;
5579
5580 vma->vm_ops = &binder_vm_ops;
5581 vma->vm_private_data = proc;
5582
5583 return binder_alloc_mmap_handler(&proc->alloc, vma);
5584 }
5585
5586 static int binder_open(struct inode *nodp, struct file *filp)
5587 {
5588 struct binder_proc *proc, *itr;
5589 struct binder_device *binder_dev;
5590 struct binderfs_info *info;
5591 struct dentry *binder_binderfs_dir_entry_proc = NULL;
5592 bool existing_pid = false;
5593
5594 binder_debug(BINDER_DEBUG_OPEN_CLOSE, "%s: %d:%d\n", __func__,
5595 current->group_leader->pid, current->pid);
5596
5597 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
5598 if (proc == NULL)
5599 return -ENOMEM;
5600 spin_lock_init(&proc->inner_lock);
5601 spin_lock_init(&proc->outer_lock);
5602 get_task_struct(current->group_leader);
5603 proc->tsk = current->group_leader;
5604 proc->cred = get_cred(filp->f_cred);
5605 INIT_LIST_HEAD(&proc->todo);
5606 init_waitqueue_head(&proc->freeze_wait);
5607 proc->default_priority = task_nice(current);
5608
5609 if (is_binderfs_device(nodp)) {
5610 binder_dev = nodp->i_private;
5611 info = nodp->i_sb->s_fs_info;
5612 binder_binderfs_dir_entry_proc = info->proc_log_dir;
5613 } else {
5614 binder_dev = container_of(filp->private_data,
5615 struct binder_device, miscdev);
5616 }
5617 refcount_inc(&binder_dev->ref);
5618 proc->context = &binder_dev->context;
5619 binder_alloc_init(&proc->alloc);
5620
5621 binder_stats_created(BINDER_STAT_PROC);
5622 proc->pid = current->group_leader->pid;
5623 INIT_LIST_HEAD(&proc->delivered_death);
5624 INIT_LIST_HEAD(&proc->waiting_threads);
5625 filp->private_data = proc;
5626
5627 mutex_lock(&binder_procs_lock);
5628 hlist_for_each_entry(itr, &binder_procs, proc_node) {
5629 if (itr->pid == proc->pid) {
5630 existing_pid = true;
5631 break;
5632 }
5633 }
5634 hlist_add_head(&proc->proc_node, &binder_procs);
5635 mutex_unlock(&binder_procs_lock);
5636
5637 if (binder_debugfs_dir_entry_proc && !existing_pid) {
5638 char strbuf[11];
5639
5640 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
5641
5642
5643
5644
5645
5646
5647 proc->debugfs_entry = debugfs_create_file(strbuf, 0444,
5648 binder_debugfs_dir_entry_proc,
5649 (void *)(unsigned long)proc->pid,
5650 &proc_fops);
5651 }
5652
5653 if (binder_binderfs_dir_entry_proc && !existing_pid) {
5654 char strbuf[11];
5655 struct dentry *binderfs_entry;
5656
5657 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
5658
5659
5660
5661
5662
5663
5664 binderfs_entry = binderfs_create_file(binder_binderfs_dir_entry_proc,
5665 strbuf, &proc_fops, (void *)(unsigned long)proc->pid);
5666 if (!IS_ERR(binderfs_entry)) {
5667 proc->binderfs_entry = binderfs_entry;
5668 } else {
5669 int error;
5670
5671 error = PTR_ERR(binderfs_entry);
5672 pr_warn("Unable to create file %s in binderfs (error %d)\n",
5673 strbuf, error);
5674 }
5675 }
5676
5677 return 0;
5678 }
5679
5680 static int binder_flush(struct file *filp, fl_owner_t id)
5681 {
5682 struct binder_proc *proc = filp->private_data;
5683
5684 binder_defer_work(proc, BINDER_DEFERRED_FLUSH);
5685
5686 return 0;
5687 }
5688
5689 static void binder_deferred_flush(struct binder_proc *proc)
5690 {
5691 struct rb_node *n;
5692 int wake_count = 0;
5693
5694 binder_inner_proc_lock(proc);
5695 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
5696 struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node);
5697
5698 thread->looper_need_return = true;
5699 if (thread->looper & BINDER_LOOPER_STATE_WAITING) {
5700 wake_up_interruptible(&thread->wait);
5701 wake_count++;
5702 }
5703 }
5704 binder_inner_proc_unlock(proc);
5705
5706 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5707 "binder_flush: %d woke %d threads\n", proc->pid,
5708 wake_count);
5709 }
5710
5711 static int binder_release(struct inode *nodp, struct file *filp)
5712 {
5713 struct binder_proc *proc = filp->private_data;
5714
5715 debugfs_remove(proc->debugfs_entry);
5716
5717 if (proc->binderfs_entry) {
5718 binderfs_remove_file(proc->binderfs_entry);
5719 proc->binderfs_entry = NULL;
5720 }
5721
5722 binder_defer_work(proc, BINDER_DEFERRED_RELEASE);
5723
5724 return 0;
5725 }
5726
5727 static int binder_node_release(struct binder_node *node, int refs)
5728 {
5729 struct binder_ref *ref;
5730 int death = 0;
5731 struct binder_proc *proc = node->proc;
5732
5733 binder_release_work(proc, &node->async_todo);
5734
5735 binder_node_lock(node);
5736 binder_inner_proc_lock(proc);
5737 binder_dequeue_work_ilocked(&node->work);
5738
5739
5740
5741 BUG_ON(!node->tmp_refs);
5742 if (hlist_empty(&node->refs) && node->tmp_refs == 1) {
5743 binder_inner_proc_unlock(proc);
5744 binder_node_unlock(node);
5745 binder_free_node(node);
5746
5747 return refs;
5748 }
5749
5750 node->proc = NULL;
5751 node->local_strong_refs = 0;
5752 node->local_weak_refs = 0;
5753 binder_inner_proc_unlock(proc);
5754
5755 spin_lock(&binder_dead_nodes_lock);
5756 hlist_add_head(&node->dead_node, &binder_dead_nodes);
5757 spin_unlock(&binder_dead_nodes_lock);
5758
5759 hlist_for_each_entry(ref, &node->refs, node_entry) {
5760 refs++;
5761
5762
5763
5764
5765
5766
5767 binder_inner_proc_lock(ref->proc);
5768 if (!ref->death) {
5769 binder_inner_proc_unlock(ref->proc);
5770 continue;
5771 }
5772
5773 death++;
5774
5775 BUG_ON(!list_empty(&ref->death->work.entry));
5776 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
5777 binder_enqueue_work_ilocked(&ref->death->work,
5778 &ref->proc->todo);
5779 binder_wakeup_proc_ilocked(ref->proc);
5780 binder_inner_proc_unlock(ref->proc);
5781 }
5782
5783 binder_debug(BINDER_DEBUG_DEAD_BINDER,
5784 "node %d now dead, refs %d, death %d\n",
5785 node->debug_id, refs, death);
5786 binder_node_unlock(node);
5787 binder_put_node(node);
5788
5789 return refs;
5790 }
5791
5792 static void binder_deferred_release(struct binder_proc *proc)
5793 {
5794 struct binder_context *context = proc->context;
5795 struct rb_node *n;
5796 int threads, nodes, incoming_refs, outgoing_refs, active_transactions;
5797
5798 mutex_lock(&binder_procs_lock);
5799 hlist_del(&proc->proc_node);
5800 mutex_unlock(&binder_procs_lock);
5801
5802 mutex_lock(&context->context_mgr_node_lock);
5803 if (context->binder_context_mgr_node &&
5804 context->binder_context_mgr_node->proc == proc) {
5805 binder_debug(BINDER_DEBUG_DEAD_BINDER,
5806 "%s: %d context_mgr_node gone\n",
5807 __func__, proc->pid);
5808 context->binder_context_mgr_node = NULL;
5809 }
5810 mutex_unlock(&context->context_mgr_node_lock);
5811 binder_inner_proc_lock(proc);
5812
5813
5814
5815
5816 proc->tmp_ref++;
5817
5818 proc->is_dead = true;
5819 proc->is_frozen = false;
5820 proc->sync_recv = false;
5821 proc->async_recv = false;
5822 threads = 0;
5823 active_transactions = 0;
5824 while ((n = rb_first(&proc->threads))) {
5825 struct binder_thread *thread;
5826
5827 thread = rb_entry(n, struct binder_thread, rb_node);
5828 binder_inner_proc_unlock(proc);
5829 threads++;
5830 active_transactions += binder_thread_release(proc, thread);
5831 binder_inner_proc_lock(proc);
5832 }
5833
5834 nodes = 0;
5835 incoming_refs = 0;
5836 while ((n = rb_first(&proc->nodes))) {
5837 struct binder_node *node;
5838
5839 node = rb_entry(n, struct binder_node, rb_node);
5840 nodes++;
5841
5842
5843
5844
5845
5846 binder_inc_node_tmpref_ilocked(node);
5847 rb_erase(&node->rb_node, &proc->nodes);
5848 binder_inner_proc_unlock(proc);
5849 incoming_refs = binder_node_release(node, incoming_refs);
5850 binder_inner_proc_lock(proc);
5851 }
5852 binder_inner_proc_unlock(proc);
5853
5854 outgoing_refs = 0;
5855 binder_proc_lock(proc);
5856 while ((n = rb_first(&proc->refs_by_desc))) {
5857 struct binder_ref *ref;
5858
5859 ref = rb_entry(n, struct binder_ref, rb_node_desc);
5860 outgoing_refs++;
5861 binder_cleanup_ref_olocked(ref);
5862 binder_proc_unlock(proc);
5863 binder_free_ref(ref);
5864 binder_proc_lock(proc);
5865 }
5866 binder_proc_unlock(proc);
5867
5868 binder_release_work(proc, &proc->todo);
5869 binder_release_work(proc, &proc->delivered_death);
5870
5871 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5872 "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d\n",
5873 __func__, proc->pid, threads, nodes, incoming_refs,
5874 outgoing_refs, active_transactions);
5875
5876 binder_proc_dec_tmpref(proc);
5877 }
5878
5879 static void binder_deferred_func(struct work_struct *work)
5880 {
5881 struct binder_proc *proc;
5882
5883 int defer;
5884
5885 do {
5886 mutex_lock(&binder_deferred_lock);
5887 if (!hlist_empty(&binder_deferred_list)) {
5888 proc = hlist_entry(binder_deferred_list.first,
5889 struct binder_proc, deferred_work_node);
5890 hlist_del_init(&proc->deferred_work_node);
5891 defer = proc->deferred_work;
5892 proc->deferred_work = 0;
5893 } else {
5894 proc = NULL;
5895 defer = 0;
5896 }
5897 mutex_unlock(&binder_deferred_lock);
5898
5899 if (defer & BINDER_DEFERRED_FLUSH)
5900 binder_deferred_flush(proc);
5901
5902 if (defer & BINDER_DEFERRED_RELEASE)
5903 binder_deferred_release(proc);
5904 } while (proc);
5905 }
5906 static DECLARE_WORK(binder_deferred_work, binder_deferred_func);
5907
5908 static void
5909 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer)
5910 {
5911 mutex_lock(&binder_deferred_lock);
5912 proc->deferred_work |= defer;
5913 if (hlist_unhashed(&proc->deferred_work_node)) {
5914 hlist_add_head(&proc->deferred_work_node,
5915 &binder_deferred_list);
5916 schedule_work(&binder_deferred_work);
5917 }
5918 mutex_unlock(&binder_deferred_lock);
5919 }
5920
5921 static void print_binder_transaction_ilocked(struct seq_file *m,
5922 struct binder_proc *proc,
5923 const char *prefix,
5924 struct binder_transaction *t)
5925 {
5926 struct binder_proc *to_proc;
5927 struct binder_buffer *buffer = t->buffer;
5928
5929 spin_lock(&t->lock);
5930 to_proc = t->to_proc;
5931 seq_printf(m,
5932 "%s %d: %pK from %d:%d to %d:%d code %x flags %x pri %ld r%d",
5933 prefix, t->debug_id, t,
5934 t->from ? t->from->proc->pid : 0,
5935 t->from ? t->from->pid : 0,
5936 to_proc ? to_proc->pid : 0,
5937 t->to_thread ? t->to_thread->pid : 0,
5938 t->code, t->flags, t->priority, t->need_reply);
5939 spin_unlock(&t->lock);
5940
5941 if (proc != to_proc) {
5942
5943
5944
5945
5946 seq_puts(m, "\n");
5947 return;
5948 }
5949
5950 if (buffer == NULL) {
5951 seq_puts(m, " buffer free\n");
5952 return;
5953 }
5954 if (buffer->target_node)
5955 seq_printf(m, " node %d", buffer->target_node->debug_id);
5956 seq_printf(m, " size %zd:%zd data %pK\n",
5957 buffer->data_size, buffer->offsets_size,
5958 buffer->user_data);
5959 }
5960
5961 static void print_binder_work_ilocked(struct seq_file *m,
5962 struct binder_proc *proc,
5963 const char *prefix,
5964 const char *transaction_prefix,
5965 struct binder_work *w)
5966 {
5967 struct binder_node *node;
5968 struct binder_transaction *t;
5969
5970 switch (w->type) {
5971 case BINDER_WORK_TRANSACTION:
5972 t = container_of(w, struct binder_transaction, work);
5973 print_binder_transaction_ilocked(
5974 m, proc, transaction_prefix, t);
5975 break;
5976 case BINDER_WORK_RETURN_ERROR: {
5977 struct binder_error *e = container_of(
5978 w, struct binder_error, work);
5979
5980 seq_printf(m, "%stransaction error: %u\n",
5981 prefix, e->cmd);
5982 } break;
5983 case BINDER_WORK_TRANSACTION_COMPLETE:
5984 seq_printf(m, "%stransaction complete\n", prefix);
5985 break;
5986 case BINDER_WORK_NODE:
5987 node = container_of(w, struct binder_node, work);
5988 seq_printf(m, "%snode work %d: u%016llx c%016llx\n",
5989 prefix, node->debug_id,
5990 (u64)node->ptr, (u64)node->cookie);
5991 break;
5992 case BINDER_WORK_DEAD_BINDER:
5993 seq_printf(m, "%shas dead binder\n", prefix);
5994 break;
5995 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
5996 seq_printf(m, "%shas cleared dead binder\n", prefix);
5997 break;
5998 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION:
5999 seq_printf(m, "%shas cleared death notification\n", prefix);
6000 break;
6001 default:
6002 seq_printf(m, "%sunknown work: type %d\n", prefix, w->type);
6003 break;
6004 }
6005 }
6006
6007 static void print_binder_thread_ilocked(struct seq_file *m,
6008 struct binder_thread *thread,
6009 int print_always)
6010 {
6011 struct binder_transaction *t;
6012 struct binder_work *w;
6013 size_t start_pos = m->count;
6014 size_t header_pos;
6015
6016 seq_printf(m, " thread %d: l %02x need_return %d tr %d\n",
6017 thread->pid, thread->looper,
6018 thread->looper_need_return,
6019 atomic_read(&thread->tmp_ref));
6020 header_pos = m->count;
6021 t = thread->transaction_stack;
6022 while (t) {
6023 if (t->from == thread) {
6024 print_binder_transaction_ilocked(m, thread->proc,
6025 " outgoing transaction", t);
6026 t = t->from_parent;
6027 } else if (t->to_thread == thread) {
6028 print_binder_transaction_ilocked(m, thread->proc,
6029 " incoming transaction", t);
6030 t = t->to_parent;
6031 } else {
6032 print_binder_transaction_ilocked(m, thread->proc,
6033 " bad transaction", t);
6034 t = NULL;
6035 }
6036 }
6037 list_for_each_entry(w, &thread->todo, entry) {
6038 print_binder_work_ilocked(m, thread->proc, " ",
6039 " pending transaction", w);
6040 }
6041 if (!print_always && m->count == header_pos)
6042 m->count = start_pos;
6043 }
6044
6045 static void print_binder_node_nilocked(struct seq_file *m,
6046 struct binder_node *node)
6047 {
6048 struct binder_ref *ref;
6049 struct binder_work *w;
6050 int count;
6051
6052 count = 0;
6053 hlist_for_each_entry(ref, &node->refs, node_entry)
6054 count++;
6055
6056 seq_printf(m, " node %d: u%016llx c%016llx hs %d hw %d ls %d lw %d is %d iw %d tr %d",
6057 node->debug_id, (u64)node->ptr, (u64)node->cookie,
6058 node->has_strong_ref, node->has_weak_ref,
6059 node->local_strong_refs, node->local_weak_refs,
6060 node->internal_strong_refs, count, node->tmp_refs);
6061 if (count) {
6062 seq_puts(m, " proc");
6063 hlist_for_each_entry(ref, &node->refs, node_entry)
6064 seq_printf(m, " %d", ref->proc->pid);
6065 }
6066 seq_puts(m, "\n");
6067 if (node->proc) {
6068 list_for_each_entry(w, &node->async_todo, entry)
6069 print_binder_work_ilocked(m, node->proc, " ",
6070 " pending async transaction", w);
6071 }
6072 }
6073
6074 static void print_binder_ref_olocked(struct seq_file *m,
6075 struct binder_ref *ref)
6076 {
6077 binder_node_lock(ref->node);
6078 seq_printf(m, " ref %d: desc %d %snode %d s %d w %d d %pK\n",
6079 ref->data.debug_id, ref->data.desc,
6080 ref->node->proc ? "" : "dead ",
6081 ref->node->debug_id, ref->data.strong,
6082 ref->data.weak, ref->death);
6083 binder_node_unlock(ref->node);
6084 }
6085
6086 static void print_binder_proc(struct seq_file *m,
6087 struct binder_proc *proc, int print_all)
6088 {
6089 struct binder_work *w;
6090 struct rb_node *n;
6091 size_t start_pos = m->count;
6092 size_t header_pos;
6093 struct binder_node *last_node = NULL;
6094
6095 seq_printf(m, "proc %d\n", proc->pid);
6096 seq_printf(m, "context %s\n", proc->context->name);
6097 header_pos = m->count;
6098
6099 binder_inner_proc_lock(proc);
6100 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
6101 print_binder_thread_ilocked(m, rb_entry(n, struct binder_thread,
6102 rb_node), print_all);
6103
6104 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
6105 struct binder_node *node = rb_entry(n, struct binder_node,
6106 rb_node);
6107 if (!print_all && !node->has_async_transaction)
6108 continue;
6109
6110
6111
6112
6113
6114
6115 binder_inc_node_tmpref_ilocked(node);
6116
6117 binder_inner_proc_unlock(proc);
6118 if (last_node)
6119 binder_put_node(last_node);
6120 binder_node_inner_lock(node);
6121 print_binder_node_nilocked(m, node);
6122 binder_node_inner_unlock(node);
6123 last_node = node;
6124 binder_inner_proc_lock(proc);
6125 }
6126 binder_inner_proc_unlock(proc);
6127 if (last_node)
6128 binder_put_node(last_node);
6129
6130 if (print_all) {
6131 binder_proc_lock(proc);
6132 for (n = rb_first(&proc->refs_by_desc);
6133 n != NULL;
6134 n = rb_next(n))
6135 print_binder_ref_olocked(m, rb_entry(n,
6136 struct binder_ref,
6137 rb_node_desc));
6138 binder_proc_unlock(proc);
6139 }
6140 binder_alloc_print_allocated(m, &proc->alloc);
6141 binder_inner_proc_lock(proc);
6142 list_for_each_entry(w, &proc->todo, entry)
6143 print_binder_work_ilocked(m, proc, " ",
6144 " pending transaction", w);
6145 list_for_each_entry(w, &proc->delivered_death, entry) {
6146 seq_puts(m, " has delivered dead binder\n");
6147 break;
6148 }
6149 binder_inner_proc_unlock(proc);
6150 if (!print_all && m->count == header_pos)
6151 m->count = start_pos;
6152 }
6153
6154 static const char * const binder_return_strings[] = {
6155 "BR_ERROR",
6156 "BR_OK",
6157 "BR_TRANSACTION",
6158 "BR_REPLY",
6159 "BR_ACQUIRE_RESULT",
6160 "BR_DEAD_REPLY",
6161 "BR_TRANSACTION_COMPLETE",
6162 "BR_INCREFS",
6163 "BR_ACQUIRE",
6164 "BR_RELEASE",
6165 "BR_DECREFS",
6166 "BR_ATTEMPT_ACQUIRE",
6167 "BR_NOOP",
6168 "BR_SPAWN_LOOPER",
6169 "BR_FINISHED",
6170 "BR_DEAD_BINDER",
6171 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
6172 "BR_FAILED_REPLY",
6173 "BR_FROZEN_REPLY",
6174 "BR_ONEWAY_SPAM_SUSPECT",
6175 };
6176
6177 static const char * const binder_command_strings[] = {
6178 "BC_TRANSACTION",
6179 "BC_REPLY",
6180 "BC_ACQUIRE_RESULT",
6181 "BC_FREE_BUFFER",
6182 "BC_INCREFS",
6183 "BC_ACQUIRE",
6184 "BC_RELEASE",
6185 "BC_DECREFS",
6186 "BC_INCREFS_DONE",
6187 "BC_ACQUIRE_DONE",
6188 "BC_ATTEMPT_ACQUIRE",
6189 "BC_REGISTER_LOOPER",
6190 "BC_ENTER_LOOPER",
6191 "BC_EXIT_LOOPER",
6192 "BC_REQUEST_DEATH_NOTIFICATION",
6193 "BC_CLEAR_DEATH_NOTIFICATION",
6194 "BC_DEAD_BINDER_DONE",
6195 "BC_TRANSACTION_SG",
6196 "BC_REPLY_SG",
6197 };
6198
6199 static const char * const binder_objstat_strings[] = {
6200 "proc",
6201 "thread",
6202 "node",
6203 "ref",
6204 "death",
6205 "transaction",
6206 "transaction_complete"
6207 };
6208
6209 static void print_binder_stats(struct seq_file *m, const char *prefix,
6210 struct binder_stats *stats)
6211 {
6212 int i;
6213
6214 BUILD_BUG_ON(ARRAY_SIZE(stats->bc) !=
6215 ARRAY_SIZE(binder_command_strings));
6216 for (i = 0; i < ARRAY_SIZE(stats->bc); i++) {
6217 int temp = atomic_read(&stats->bc[i]);
6218
6219 if (temp)
6220 seq_printf(m, "%s%s: %d\n", prefix,
6221 binder_command_strings[i], temp);
6222 }
6223
6224 BUILD_BUG_ON(ARRAY_SIZE(stats->br) !=
6225 ARRAY_SIZE(binder_return_strings));
6226 for (i = 0; i < ARRAY_SIZE(stats->br); i++) {
6227 int temp = atomic_read(&stats->br[i]);
6228
6229 if (temp)
6230 seq_printf(m, "%s%s: %d\n", prefix,
6231 binder_return_strings[i], temp);
6232 }
6233
6234 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
6235 ARRAY_SIZE(binder_objstat_strings));
6236 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
6237 ARRAY_SIZE(stats->obj_deleted));
6238 for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) {
6239 int created = atomic_read(&stats->obj_created[i]);
6240 int deleted = atomic_read(&stats->obj_deleted[i]);
6241
6242 if (created || deleted)
6243 seq_printf(m, "%s%s: active %d total %d\n",
6244 prefix,
6245 binder_objstat_strings[i],
6246 created - deleted,
6247 created);
6248 }
6249 }
6250
6251 static void print_binder_proc_stats(struct seq_file *m,
6252 struct binder_proc *proc)
6253 {
6254 struct binder_work *w;
6255 struct binder_thread *thread;
6256 struct rb_node *n;
6257 int count, strong, weak, ready_threads;
6258 size_t free_async_space =
6259 binder_alloc_get_free_async_space(&proc->alloc);
6260
6261 seq_printf(m, "proc %d\n", proc->pid);
6262 seq_printf(m, "context %s\n", proc->context->name);
6263 count = 0;
6264 ready_threads = 0;
6265 binder_inner_proc_lock(proc);
6266 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
6267 count++;
6268
6269 list_for_each_entry(thread, &proc->waiting_threads, waiting_thread_node)
6270 ready_threads++;
6271
6272 seq_printf(m, " threads: %d\n", count);
6273 seq_printf(m, " requested threads: %d+%d/%d\n"
6274 " ready threads %d\n"
6275 " free async space %zd\n", proc->requested_threads,
6276 proc->requested_threads_started, proc->max_threads,
6277 ready_threads,
6278 free_async_space);
6279 count = 0;
6280 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n))
6281 count++;
6282 binder_inner_proc_unlock(proc);
6283 seq_printf(m, " nodes: %d\n", count);
6284 count = 0;
6285 strong = 0;
6286 weak = 0;
6287 binder_proc_lock(proc);
6288 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
6289 struct binder_ref *ref = rb_entry(n, struct binder_ref,
6290 rb_node_desc);
6291 count++;
6292 strong += ref->data.strong;
6293 weak += ref->data.weak;
6294 }
6295 binder_proc_unlock(proc);
6296 seq_printf(m, " refs: %d s %d w %d\n", count, strong, weak);
6297
6298 count = binder_alloc_get_allocated_count(&proc->alloc);
6299 seq_printf(m, " buffers: %d\n", count);
6300
6301 binder_alloc_print_pages(m, &proc->alloc);
6302
6303 count = 0;
6304 binder_inner_proc_lock(proc);
6305 list_for_each_entry(w, &proc->todo, entry) {
6306 if (w->type == BINDER_WORK_TRANSACTION)
6307 count++;
6308 }
6309 binder_inner_proc_unlock(proc);
6310 seq_printf(m, " pending transactions: %d\n", count);
6311
6312 print_binder_stats(m, " ", &proc->stats);
6313 }
6314
6315 static int state_show(struct seq_file *m, void *unused)
6316 {
6317 struct binder_proc *proc;
6318 struct binder_node *node;
6319 struct binder_node *last_node = NULL;
6320
6321 seq_puts(m, "binder state:\n");
6322
6323 spin_lock(&binder_dead_nodes_lock);
6324 if (!hlist_empty(&binder_dead_nodes))
6325 seq_puts(m, "dead nodes:\n");
6326 hlist_for_each_entry(node, &binder_dead_nodes, dead_node) {
6327
6328
6329
6330
6331
6332 node->tmp_refs++;
6333 spin_unlock(&binder_dead_nodes_lock);
6334 if (last_node)
6335 binder_put_node(last_node);
6336 binder_node_lock(node);
6337 print_binder_node_nilocked(m, node);
6338 binder_node_unlock(node);
6339 last_node = node;
6340 spin_lock(&binder_dead_nodes_lock);
6341 }
6342 spin_unlock(&binder_dead_nodes_lock);
6343 if (last_node)
6344 binder_put_node(last_node);
6345
6346 mutex_lock(&binder_procs_lock);
6347 hlist_for_each_entry(proc, &binder_procs, proc_node)
6348 print_binder_proc(m, proc, 1);
6349 mutex_unlock(&binder_procs_lock);
6350
6351 return 0;
6352 }
6353
6354 static int stats_show(struct seq_file *m, void *unused)
6355 {
6356 struct binder_proc *proc;
6357
6358 seq_puts(m, "binder stats:\n");
6359
6360 print_binder_stats(m, "", &binder_stats);
6361
6362 mutex_lock(&binder_procs_lock);
6363 hlist_for_each_entry(proc, &binder_procs, proc_node)
6364 print_binder_proc_stats(m, proc);
6365 mutex_unlock(&binder_procs_lock);
6366
6367 return 0;
6368 }
6369
6370 static int transactions_show(struct seq_file *m, void *unused)
6371 {
6372 struct binder_proc *proc;
6373
6374 seq_puts(m, "binder transactions:\n");
6375 mutex_lock(&binder_procs_lock);
6376 hlist_for_each_entry(proc, &binder_procs, proc_node)
6377 print_binder_proc(m, proc, 0);
6378 mutex_unlock(&binder_procs_lock);
6379
6380 return 0;
6381 }
6382
6383 static int proc_show(struct seq_file *m, void *unused)
6384 {
6385 struct binder_proc *itr;
6386 int pid = (unsigned long)m->private;
6387
6388 mutex_lock(&binder_procs_lock);
6389 hlist_for_each_entry(itr, &binder_procs, proc_node) {
6390 if (itr->pid == pid) {
6391 seq_puts(m, "binder proc state:\n");
6392 print_binder_proc(m, itr, 1);
6393 }
6394 }
6395 mutex_unlock(&binder_procs_lock);
6396
6397 return 0;
6398 }
6399
6400 static void print_binder_transaction_log_entry(struct seq_file *m,
6401 struct binder_transaction_log_entry *e)
6402 {
6403 int debug_id = READ_ONCE(e->debug_id_done);
6404
6405
6406
6407
6408 smp_rmb();
6409 seq_printf(m,
6410 "%d: %s from %d:%d to %d:%d context %s node %d handle %d size %d:%d ret %d/%d l=%d",
6411 e->debug_id, (e->call_type == 2) ? "reply" :
6412 ((e->call_type == 1) ? "async" : "call "), e->from_proc,
6413 e->from_thread, e->to_proc, e->to_thread, e->context_name,
6414 e->to_node, e->target_handle, e->data_size, e->offsets_size,
6415 e->return_error, e->return_error_param,
6416 e->return_error_line);
6417
6418
6419
6420
6421 smp_rmb();
6422 seq_printf(m, debug_id && debug_id == READ_ONCE(e->debug_id_done) ?
6423 "\n" : " (incomplete)\n");
6424 }
6425
6426 static int transaction_log_show(struct seq_file *m, void *unused)
6427 {
6428 struct binder_transaction_log *log = m->private;
6429 unsigned int log_cur = atomic_read(&log->cur);
6430 unsigned int count;
6431 unsigned int cur;
6432 int i;
6433
6434 count = log_cur + 1;
6435 cur = count < ARRAY_SIZE(log->entry) && !log->full ?
6436 0 : count % ARRAY_SIZE(log->entry);
6437 if (count > ARRAY_SIZE(log->entry) || log->full)
6438 count = ARRAY_SIZE(log->entry);
6439 for (i = 0; i < count; i++) {
6440 unsigned int index = cur++ % ARRAY_SIZE(log->entry);
6441
6442 print_binder_transaction_log_entry(m, &log->entry[index]);
6443 }
6444 return 0;
6445 }
6446
6447 const struct file_operations binder_fops = {
6448 .owner = THIS_MODULE,
6449 .poll = binder_poll,
6450 .unlocked_ioctl = binder_ioctl,
6451 .compat_ioctl = compat_ptr_ioctl,
6452 .mmap = binder_mmap,
6453 .open = binder_open,
6454 .flush = binder_flush,
6455 .release = binder_release,
6456 };
6457
6458 DEFINE_SHOW_ATTRIBUTE(state);
6459 DEFINE_SHOW_ATTRIBUTE(stats);
6460 DEFINE_SHOW_ATTRIBUTE(transactions);
6461 DEFINE_SHOW_ATTRIBUTE(transaction_log);
6462
6463 const struct binder_debugfs_entry binder_debugfs_entries[] = {
6464 {
6465 .name = "state",
6466 .mode = 0444,
6467 .fops = &state_fops,
6468 .data = NULL,
6469 },
6470 {
6471 .name = "stats",
6472 .mode = 0444,
6473 .fops = &stats_fops,
6474 .data = NULL,
6475 },
6476 {
6477 .name = "transactions",
6478 .mode = 0444,
6479 .fops = &transactions_fops,
6480 .data = NULL,
6481 },
6482 {
6483 .name = "transaction_log",
6484 .mode = 0444,
6485 .fops = &transaction_log_fops,
6486 .data = &binder_transaction_log,
6487 },
6488 {
6489 .name = "failed_transaction_log",
6490 .mode = 0444,
6491 .fops = &transaction_log_fops,
6492 .data = &binder_transaction_log_failed,
6493 },
6494 {}
6495 };
6496
6497 static int __init init_binder_device(const char *name)
6498 {
6499 int ret;
6500 struct binder_device *binder_device;
6501
6502 binder_device = kzalloc(sizeof(*binder_device), GFP_KERNEL);
6503 if (!binder_device)
6504 return -ENOMEM;
6505
6506 binder_device->miscdev.fops = &binder_fops;
6507 binder_device->miscdev.minor = MISC_DYNAMIC_MINOR;
6508 binder_device->miscdev.name = name;
6509
6510 refcount_set(&binder_device->ref, 1);
6511 binder_device->context.binder_context_mgr_uid = INVALID_UID;
6512 binder_device->context.name = name;
6513 mutex_init(&binder_device->context.context_mgr_node_lock);
6514
6515 ret = misc_register(&binder_device->miscdev);
6516 if (ret < 0) {
6517 kfree(binder_device);
6518 return ret;
6519 }
6520
6521 hlist_add_head(&binder_device->hlist, &binder_devices);
6522
6523 return ret;
6524 }
6525
6526 static int __init binder_init(void)
6527 {
6528 int ret;
6529 char *device_name, *device_tmp;
6530 struct binder_device *device;
6531 struct hlist_node *tmp;
6532 char *device_names = NULL;
6533
6534 ret = binder_alloc_shrinker_init();
6535 if (ret)
6536 return ret;
6537
6538 atomic_set(&binder_transaction_log.cur, ~0U);
6539 atomic_set(&binder_transaction_log_failed.cur, ~0U);
6540
6541 binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL);
6542 if (binder_debugfs_dir_entry_root) {
6543 const struct binder_debugfs_entry *db_entry;
6544
6545 binder_for_each_debugfs_entry(db_entry)
6546 debugfs_create_file(db_entry->name,
6547 db_entry->mode,
6548 binder_debugfs_dir_entry_root,
6549 db_entry->data,
6550 db_entry->fops);
6551
6552 binder_debugfs_dir_entry_proc = debugfs_create_dir("proc",
6553 binder_debugfs_dir_entry_root);
6554 }
6555
6556 if (!IS_ENABLED(CONFIG_ANDROID_BINDERFS) &&
6557 strcmp(binder_devices_param, "") != 0) {
6558
6559
6560
6561
6562 device_names = kstrdup(binder_devices_param, GFP_KERNEL);
6563 if (!device_names) {
6564 ret = -ENOMEM;
6565 goto err_alloc_device_names_failed;
6566 }
6567
6568 device_tmp = device_names;
6569 while ((device_name = strsep(&device_tmp, ","))) {
6570 ret = init_binder_device(device_name);
6571 if (ret)
6572 goto err_init_binder_device_failed;
6573 }
6574 }
6575
6576 ret = init_binderfs();
6577 if (ret)
6578 goto err_init_binder_device_failed;
6579
6580 return ret;
6581
6582 err_init_binder_device_failed:
6583 hlist_for_each_entry_safe(device, tmp, &binder_devices, hlist) {
6584 misc_deregister(&device->miscdev);
6585 hlist_del(&device->hlist);
6586 kfree(device);
6587 }
6588
6589 kfree(device_names);
6590
6591 err_alloc_device_names_failed:
6592 debugfs_remove_recursive(binder_debugfs_dir_entry_root);
6593
6594 return ret;
6595 }
6596
6597 device_initcall(binder_init);
6598
6599 #define CREATE_TRACE_POINTS
6600 #include "binder_trace.h"
6601
6602 MODULE_LICENSE("GPL v2");