0001
0002
0003
0004
0005
0006
0007
0008 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
0009
0010 #include <linux/cpu.h>
0011 #include <linux/stacktrace.h>
0012 #include "core.h"
0013 #include "patch.h"
0014 #include "transition.h"
0015
0016 #define MAX_STACK_ENTRIES 100
0017 #define STACK_ERR_BUF_SIZE 128
0018
0019 #define SIGNALS_TIMEOUT 15
0020
0021 struct klp_patch *klp_transition_patch;
0022
0023 static int klp_target_state = KLP_UNDEFINED;
0024
0025 static unsigned int klp_signals_cnt;
0026
0027
0028
0029
0030
0031 static void klp_transition_work_fn(struct work_struct *work)
0032 {
0033 mutex_lock(&klp_mutex);
0034
0035 if (klp_transition_patch)
0036 klp_try_complete_transition();
0037
0038 mutex_unlock(&klp_mutex);
0039 }
0040 static DECLARE_DELAYED_WORK(klp_transition_work, klp_transition_work_fn);
0041
0042
0043
0044
0045
0046
0047 static void klp_sync(struct work_struct *work)
0048 {
0049 }
0050
0051
0052
0053
0054
0055
0056
0057
0058
0059 static void klp_synchronize_transition(void)
0060 {
0061 schedule_on_each_cpu(klp_sync);
0062 }
0063
0064
0065
0066
0067
0068 static void klp_complete_transition(void)
0069 {
0070 struct klp_object *obj;
0071 struct klp_func *func;
0072 struct task_struct *g, *task;
0073 unsigned int cpu;
0074
0075 pr_debug("'%s': completing %s transition\n",
0076 klp_transition_patch->mod->name,
0077 klp_target_state == KLP_PATCHED ? "patching" : "unpatching");
0078
0079 if (klp_transition_patch->replace && klp_target_state == KLP_PATCHED) {
0080 klp_unpatch_replaced_patches(klp_transition_patch);
0081 klp_discard_nops(klp_transition_patch);
0082 }
0083
0084 if (klp_target_state == KLP_UNPATCHED) {
0085
0086
0087
0088
0089 klp_unpatch_objects(klp_transition_patch);
0090
0091
0092
0093
0094
0095
0096
0097 klp_synchronize_transition();
0098 }
0099
0100 klp_for_each_object(klp_transition_patch, obj)
0101 klp_for_each_func(obj, func)
0102 func->transition = false;
0103
0104
0105 if (klp_target_state == KLP_PATCHED)
0106 klp_synchronize_transition();
0107
0108 read_lock(&tasklist_lock);
0109 for_each_process_thread(g, task) {
0110 WARN_ON_ONCE(test_tsk_thread_flag(task, TIF_PATCH_PENDING));
0111 task->patch_state = KLP_UNDEFINED;
0112 }
0113 read_unlock(&tasklist_lock);
0114
0115 for_each_possible_cpu(cpu) {
0116 task = idle_task(cpu);
0117 WARN_ON_ONCE(test_tsk_thread_flag(task, TIF_PATCH_PENDING));
0118 task->patch_state = KLP_UNDEFINED;
0119 }
0120
0121 klp_for_each_object(klp_transition_patch, obj) {
0122 if (!klp_is_object_loaded(obj))
0123 continue;
0124 if (klp_target_state == KLP_PATCHED)
0125 klp_post_patch_callback(obj);
0126 else if (klp_target_state == KLP_UNPATCHED)
0127 klp_post_unpatch_callback(obj);
0128 }
0129
0130 pr_notice("'%s': %s complete\n", klp_transition_patch->mod->name,
0131 klp_target_state == KLP_PATCHED ? "patching" : "unpatching");
0132
0133 klp_target_state = KLP_UNDEFINED;
0134 klp_transition_patch = NULL;
0135 }
0136
0137
0138
0139
0140
0141
0142
0143 void klp_cancel_transition(void)
0144 {
0145 if (WARN_ON_ONCE(klp_target_state != KLP_PATCHED))
0146 return;
0147
0148 pr_debug("'%s': canceling patching transition, going to unpatch\n",
0149 klp_transition_patch->mod->name);
0150
0151 klp_target_state = KLP_UNPATCHED;
0152 klp_complete_transition();
0153 }
0154
0155
0156
0157
0158
0159
0160
0161
0162 void klp_update_patch_state(struct task_struct *task)
0163 {
0164
0165
0166
0167
0168 preempt_disable_notrace();
0169
0170
0171
0172
0173
0174
0175
0176
0177
0178
0179
0180
0181
0182 if (test_and_clear_tsk_thread_flag(task, TIF_PATCH_PENDING))
0183 task->patch_state = READ_ONCE(klp_target_state);
0184
0185 preempt_enable_notrace();
0186 }
0187
0188
0189
0190
0191
0192 static int klp_check_stack_func(struct klp_func *func, unsigned long *entries,
0193 unsigned int nr_entries)
0194 {
0195 unsigned long func_addr, func_size, address;
0196 struct klp_ops *ops;
0197 int i;
0198
0199 for (i = 0; i < nr_entries; i++) {
0200 address = entries[i];
0201
0202 if (klp_target_state == KLP_UNPATCHED) {
0203
0204
0205
0206
0207 func_addr = (unsigned long)func->new_func;
0208 func_size = func->new_size;
0209 } else {
0210
0211
0212
0213
0214 ops = klp_find_ops(func->old_func);
0215
0216 if (list_is_singular(&ops->func_stack)) {
0217
0218 func_addr = (unsigned long)func->old_func;
0219 func_size = func->old_size;
0220 } else {
0221
0222 struct klp_func *prev;
0223
0224 prev = list_next_entry(func, stack_node);
0225 func_addr = (unsigned long)prev->new_func;
0226 func_size = prev->new_size;
0227 }
0228 }
0229
0230 if (address >= func_addr && address < func_addr + func_size)
0231 return -EAGAIN;
0232 }
0233
0234 return 0;
0235 }
0236
0237
0238
0239
0240
0241 static int klp_check_stack(struct task_struct *task, const char **oldname)
0242 {
0243 static unsigned long entries[MAX_STACK_ENTRIES];
0244 struct klp_object *obj;
0245 struct klp_func *func;
0246 int ret, nr_entries;
0247
0248 ret = stack_trace_save_tsk_reliable(task, entries, ARRAY_SIZE(entries));
0249 if (ret < 0)
0250 return -EINVAL;
0251 nr_entries = ret;
0252
0253 klp_for_each_object(klp_transition_patch, obj) {
0254 if (!obj->patched)
0255 continue;
0256 klp_for_each_func(obj, func) {
0257 ret = klp_check_stack_func(func, entries, nr_entries);
0258 if (ret) {
0259 *oldname = func->old_name;
0260 return -EADDRINUSE;
0261 }
0262 }
0263 }
0264
0265 return 0;
0266 }
0267
0268 static int klp_check_and_switch_task(struct task_struct *task, void *arg)
0269 {
0270 int ret;
0271
0272 if (task_curr(task) && task != current)
0273 return -EBUSY;
0274
0275 ret = klp_check_stack(task, arg);
0276 if (ret)
0277 return ret;
0278
0279 clear_tsk_thread_flag(task, TIF_PATCH_PENDING);
0280 task->patch_state = klp_target_state;
0281 return 0;
0282 }
0283
0284
0285
0286
0287
0288
0289 static bool klp_try_switch_task(struct task_struct *task)
0290 {
0291 const char *old_name;
0292 int ret;
0293
0294
0295 if (task->patch_state == klp_target_state)
0296 return true;
0297
0298
0299
0300
0301
0302 if (!klp_have_reliable_stack())
0303 return false;
0304
0305
0306
0307
0308
0309
0310 ret = task_call_func(task, klp_check_and_switch_task, &old_name);
0311 switch (ret) {
0312 case 0:
0313 break;
0314
0315 case -EBUSY:
0316 pr_debug("%s: %s:%d is running\n",
0317 __func__, task->comm, task->pid);
0318 break;
0319 case -EINVAL:
0320 pr_debug("%s: %s:%d has an unreliable stack\n",
0321 __func__, task->comm, task->pid);
0322 break;
0323 case -EADDRINUSE:
0324 pr_debug("%s: %s:%d is sleeping on function %s\n",
0325 __func__, task->comm, task->pid, old_name);
0326 break;
0327
0328 default:
0329 pr_debug("%s: Unknown error code (%d) when trying to switch %s:%d\n",
0330 __func__, ret, task->comm, task->pid);
0331 break;
0332 }
0333
0334 return !ret;
0335 }
0336
0337
0338
0339
0340
0341 static void klp_send_signals(void)
0342 {
0343 struct task_struct *g, *task;
0344
0345 if (klp_signals_cnt == SIGNALS_TIMEOUT)
0346 pr_notice("signaling remaining tasks\n");
0347
0348 read_lock(&tasklist_lock);
0349 for_each_process_thread(g, task) {
0350 if (!klp_patch_pending(task))
0351 continue;
0352
0353
0354
0355
0356
0357
0358
0359 if (task->flags & PF_KTHREAD) {
0360
0361
0362
0363
0364 wake_up_state(task, TASK_INTERRUPTIBLE);
0365 } else {
0366
0367
0368
0369
0370 set_notify_signal(task);
0371 }
0372 }
0373 read_unlock(&tasklist_lock);
0374 }
0375
0376
0377
0378
0379
0380
0381
0382
0383
0384 void klp_try_complete_transition(void)
0385 {
0386 unsigned int cpu;
0387 struct task_struct *g, *task;
0388 struct klp_patch *patch;
0389 bool complete = true;
0390
0391 WARN_ON_ONCE(klp_target_state == KLP_UNDEFINED);
0392
0393
0394
0395
0396
0397
0398
0399
0400
0401
0402 read_lock(&tasklist_lock);
0403 for_each_process_thread(g, task)
0404 if (!klp_try_switch_task(task))
0405 complete = false;
0406 read_unlock(&tasklist_lock);
0407
0408
0409
0410
0411 cpus_read_lock();
0412 for_each_possible_cpu(cpu) {
0413 task = idle_task(cpu);
0414 if (cpu_online(cpu)) {
0415 if (!klp_try_switch_task(task)) {
0416 complete = false;
0417
0418 wake_up_if_idle(cpu);
0419 }
0420 } else if (task->patch_state != klp_target_state) {
0421
0422 clear_tsk_thread_flag(task, TIF_PATCH_PENDING);
0423 task->patch_state = klp_target_state;
0424 }
0425 }
0426 cpus_read_unlock();
0427
0428 if (!complete) {
0429 if (klp_signals_cnt && !(klp_signals_cnt % SIGNALS_TIMEOUT))
0430 klp_send_signals();
0431 klp_signals_cnt++;
0432
0433
0434
0435
0436
0437
0438 schedule_delayed_work(&klp_transition_work,
0439 round_jiffies_relative(HZ));
0440 return;
0441 }
0442
0443
0444 patch = klp_transition_patch;
0445 klp_complete_transition();
0446
0447
0448
0449
0450
0451
0452 if (!patch->enabled)
0453 klp_free_patch_async(patch);
0454 else if (patch->replace)
0455 klp_free_replaced_patches_async(patch);
0456 }
0457
0458
0459
0460
0461
0462 void klp_start_transition(void)
0463 {
0464 struct task_struct *g, *task;
0465 unsigned int cpu;
0466
0467 WARN_ON_ONCE(klp_target_state == KLP_UNDEFINED);
0468
0469 pr_notice("'%s': starting %s transition\n",
0470 klp_transition_patch->mod->name,
0471 klp_target_state == KLP_PATCHED ? "patching" : "unpatching");
0472
0473
0474
0475
0476
0477
0478 read_lock(&tasklist_lock);
0479 for_each_process_thread(g, task)
0480 if (task->patch_state != klp_target_state)
0481 set_tsk_thread_flag(task, TIF_PATCH_PENDING);
0482 read_unlock(&tasklist_lock);
0483
0484
0485
0486
0487
0488
0489 for_each_possible_cpu(cpu) {
0490 task = idle_task(cpu);
0491 if (task->patch_state != klp_target_state)
0492 set_tsk_thread_flag(task, TIF_PATCH_PENDING);
0493 }
0494
0495 klp_signals_cnt = 0;
0496 }
0497
0498
0499
0500
0501
0502
0503 void klp_init_transition(struct klp_patch *patch, int state)
0504 {
0505 struct task_struct *g, *task;
0506 unsigned int cpu;
0507 struct klp_object *obj;
0508 struct klp_func *func;
0509 int initial_state = !state;
0510
0511 WARN_ON_ONCE(klp_target_state != KLP_UNDEFINED);
0512
0513 klp_transition_patch = patch;
0514
0515
0516
0517
0518
0519 klp_target_state = state;
0520
0521 pr_debug("'%s': initializing %s transition\n", patch->mod->name,
0522 klp_target_state == KLP_PATCHED ? "patching" : "unpatching");
0523
0524
0525
0526
0527
0528 read_lock(&tasklist_lock);
0529 for_each_process_thread(g, task) {
0530 WARN_ON_ONCE(task->patch_state != KLP_UNDEFINED);
0531 task->patch_state = initial_state;
0532 }
0533 read_unlock(&tasklist_lock);
0534
0535
0536
0537
0538 for_each_possible_cpu(cpu) {
0539 task = idle_task(cpu);
0540 WARN_ON_ONCE(task->patch_state != KLP_UNDEFINED);
0541 task->patch_state = initial_state;
0542 }
0543
0544
0545
0546
0547
0548
0549
0550
0551
0552
0553 smp_wmb();
0554
0555
0556
0557
0558
0559
0560
0561
0562
0563
0564
0565
0566 klp_for_each_object(patch, obj)
0567 klp_for_each_func(obj, func)
0568 func->transition = true;
0569 }
0570
0571
0572
0573
0574
0575
0576
0577 void klp_reverse_transition(void)
0578 {
0579 unsigned int cpu;
0580 struct task_struct *g, *task;
0581
0582 pr_debug("'%s': reversing transition from %s\n",
0583 klp_transition_patch->mod->name,
0584 klp_target_state == KLP_PATCHED ? "patching to unpatching" :
0585 "unpatching to patching");
0586
0587 klp_transition_patch->enabled = !klp_transition_patch->enabled;
0588
0589 klp_target_state = !klp_target_state;
0590
0591
0592
0593
0594
0595
0596 read_lock(&tasklist_lock);
0597 for_each_process_thread(g, task)
0598 clear_tsk_thread_flag(task, TIF_PATCH_PENDING);
0599 read_unlock(&tasklist_lock);
0600
0601 for_each_possible_cpu(cpu)
0602 clear_tsk_thread_flag(idle_task(cpu), TIF_PATCH_PENDING);
0603
0604
0605 klp_synchronize_transition();
0606
0607 klp_start_transition();
0608 }
0609
0610
0611 void klp_copy_process(struct task_struct *child)
0612 {
0613 child->patch_state = current->patch_state;
0614
0615
0616 }
0617
0618
0619
0620
0621
0622
0623
0624
0625
0626
0627 void klp_force_transition(void)
0628 {
0629 struct klp_patch *patch;
0630 struct task_struct *g, *task;
0631 unsigned int cpu;
0632
0633 pr_warn("forcing remaining tasks to the patched state\n");
0634
0635 read_lock(&tasklist_lock);
0636 for_each_process_thread(g, task)
0637 klp_update_patch_state(task);
0638 read_unlock(&tasklist_lock);
0639
0640 for_each_possible_cpu(cpu)
0641 klp_update_patch_state(idle_task(cpu));
0642
0643
0644 if (klp_target_state == KLP_UNPATCHED)
0645 klp_transition_patch->forced = true;
0646 else if (klp_transition_patch->replace) {
0647 klp_for_each_patch(patch) {
0648 if (patch != klp_transition_patch)
0649 patch->forced = true;
0650 }
0651 }
0652 }