Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-or-later
0002 /*
0003  * patch.c - livepatch patching functions
0004  *
0005  * Copyright (C) 2014 Seth Jennings <sjenning@redhat.com>
0006  * Copyright (C) 2014 SUSE
0007  * Copyright (C) 2015 Josh Poimboeuf <jpoimboe@redhat.com>
0008  */
0009 
0010 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
0011 
0012 #include <linux/livepatch.h>
0013 #include <linux/list.h>
0014 #include <linux/ftrace.h>
0015 #include <linux/rculist.h>
0016 #include <linux/slab.h>
0017 #include <linux/bug.h>
0018 #include <linux/printk.h>
0019 #include "core.h"
0020 #include "patch.h"
0021 #include "transition.h"
0022 
0023 static LIST_HEAD(klp_ops);
0024 
0025 struct klp_ops *klp_find_ops(void *old_func)
0026 {
0027     struct klp_ops *ops;
0028     struct klp_func *func;
0029 
0030     list_for_each_entry(ops, &klp_ops, node) {
0031         func = list_first_entry(&ops->func_stack, struct klp_func,
0032                     stack_node);
0033         if (func->old_func == old_func)
0034             return ops;
0035     }
0036 
0037     return NULL;
0038 }
0039 
0040 static void notrace klp_ftrace_handler(unsigned long ip,
0041                        unsigned long parent_ip,
0042                        struct ftrace_ops *fops,
0043                        struct ftrace_regs *fregs)
0044 {
0045     struct klp_ops *ops;
0046     struct klp_func *func;
0047     int patch_state;
0048     int bit;
0049 
0050     ops = container_of(fops, struct klp_ops, fops);
0051 
0052     /*
0053      * The ftrace_test_recursion_trylock() will disable preemption,
0054      * which is required for the variant of synchronize_rcu() that is
0055      * used to allow patching functions where RCU is not watching.
0056      * See klp_synchronize_transition() for more details.
0057      */
0058     bit = ftrace_test_recursion_trylock(ip, parent_ip);
0059     if (WARN_ON_ONCE(bit < 0))
0060         return;
0061 
0062     func = list_first_or_null_rcu(&ops->func_stack, struct klp_func,
0063                       stack_node);
0064 
0065     /*
0066      * func should never be NULL because preemption should be disabled here
0067      * and unregister_ftrace_function() does the equivalent of a
0068      * synchronize_rcu() before the func_stack removal.
0069      */
0070     if (WARN_ON_ONCE(!func))
0071         goto unlock;
0072 
0073     /*
0074      * In the enable path, enforce the order of the ops->func_stack and
0075      * func->transition reads.  The corresponding write barrier is in
0076      * __klp_enable_patch().
0077      *
0078      * (Note that this barrier technically isn't needed in the disable
0079      * path.  In the rare case where klp_update_patch_state() runs before
0080      * this handler, its TIF_PATCH_PENDING read and this func->transition
0081      * read need to be ordered.  But klp_update_patch_state() already
0082      * enforces that.)
0083      */
0084     smp_rmb();
0085 
0086     if (unlikely(func->transition)) {
0087 
0088         /*
0089          * Enforce the order of the func->transition and
0090          * current->patch_state reads.  Otherwise we could read an
0091          * out-of-date task state and pick the wrong function.  The
0092          * corresponding write barrier is in klp_init_transition().
0093          */
0094         smp_rmb();
0095 
0096         patch_state = current->patch_state;
0097 
0098         WARN_ON_ONCE(patch_state == KLP_UNDEFINED);
0099 
0100         if (patch_state == KLP_UNPATCHED) {
0101             /*
0102              * Use the previously patched version of the function.
0103              * If no previous patches exist, continue with the
0104              * original function.
0105              */
0106             func = list_entry_rcu(func->stack_node.next,
0107                           struct klp_func, stack_node);
0108 
0109             if (&func->stack_node == &ops->func_stack)
0110                 goto unlock;
0111         }
0112     }
0113 
0114     /*
0115      * NOPs are used to replace existing patches with original code.
0116      * Do nothing! Setting pc would cause an infinite loop.
0117      */
0118     if (func->nop)
0119         goto unlock;
0120 
0121     ftrace_instruction_pointer_set(fregs, (unsigned long)func->new_func);
0122 
0123 unlock:
0124     ftrace_test_recursion_unlock(bit);
0125 }
0126 
0127 static void klp_unpatch_func(struct klp_func *func)
0128 {
0129     struct klp_ops *ops;
0130 
0131     if (WARN_ON(!func->patched))
0132         return;
0133     if (WARN_ON(!func->old_func))
0134         return;
0135 
0136     ops = klp_find_ops(func->old_func);
0137     if (WARN_ON(!ops))
0138         return;
0139 
0140     if (list_is_singular(&ops->func_stack)) {
0141         unsigned long ftrace_loc;
0142 
0143         ftrace_loc = ftrace_location((unsigned long)func->old_func);
0144         if (WARN_ON(!ftrace_loc))
0145             return;
0146 
0147         WARN_ON(unregister_ftrace_function(&ops->fops));
0148         WARN_ON(ftrace_set_filter_ip(&ops->fops, ftrace_loc, 1, 0));
0149 
0150         list_del_rcu(&func->stack_node);
0151         list_del(&ops->node);
0152         kfree(ops);
0153     } else {
0154         list_del_rcu(&func->stack_node);
0155     }
0156 
0157     func->patched = false;
0158 }
0159 
0160 static int klp_patch_func(struct klp_func *func)
0161 {
0162     struct klp_ops *ops;
0163     int ret;
0164 
0165     if (WARN_ON(!func->old_func))
0166         return -EINVAL;
0167 
0168     if (WARN_ON(func->patched))
0169         return -EINVAL;
0170 
0171     ops = klp_find_ops(func->old_func);
0172     if (!ops) {
0173         unsigned long ftrace_loc;
0174 
0175         ftrace_loc = ftrace_location((unsigned long)func->old_func);
0176         if (!ftrace_loc) {
0177             pr_err("failed to find location for function '%s'\n",
0178                 func->old_name);
0179             return -EINVAL;
0180         }
0181 
0182         ops = kzalloc(sizeof(*ops), GFP_KERNEL);
0183         if (!ops)
0184             return -ENOMEM;
0185 
0186         ops->fops.func = klp_ftrace_handler;
0187         ops->fops.flags = FTRACE_OPS_FL_DYNAMIC |
0188 #ifndef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS
0189                   FTRACE_OPS_FL_SAVE_REGS |
0190 #endif
0191                   FTRACE_OPS_FL_IPMODIFY |
0192                   FTRACE_OPS_FL_PERMANENT;
0193 
0194         list_add(&ops->node, &klp_ops);
0195 
0196         INIT_LIST_HEAD(&ops->func_stack);
0197         list_add_rcu(&func->stack_node, &ops->func_stack);
0198 
0199         ret = ftrace_set_filter_ip(&ops->fops, ftrace_loc, 0, 0);
0200         if (ret) {
0201             pr_err("failed to set ftrace filter for function '%s' (%d)\n",
0202                    func->old_name, ret);
0203             goto err;
0204         }
0205 
0206         ret = register_ftrace_function(&ops->fops);
0207         if (ret) {
0208             pr_err("failed to register ftrace handler for function '%s' (%d)\n",
0209                    func->old_name, ret);
0210             ftrace_set_filter_ip(&ops->fops, ftrace_loc, 1, 0);
0211             goto err;
0212         }
0213 
0214 
0215     } else {
0216         list_add_rcu(&func->stack_node, &ops->func_stack);
0217     }
0218 
0219     func->patched = true;
0220 
0221     return 0;
0222 
0223 err:
0224     list_del_rcu(&func->stack_node);
0225     list_del(&ops->node);
0226     kfree(ops);
0227     return ret;
0228 }
0229 
0230 static void __klp_unpatch_object(struct klp_object *obj, bool nops_only)
0231 {
0232     struct klp_func *func;
0233 
0234     klp_for_each_func(obj, func) {
0235         if (nops_only && !func->nop)
0236             continue;
0237 
0238         if (func->patched)
0239             klp_unpatch_func(func);
0240     }
0241 
0242     if (obj->dynamic || !nops_only)
0243         obj->patched = false;
0244 }
0245 
0246 
0247 void klp_unpatch_object(struct klp_object *obj)
0248 {
0249     __klp_unpatch_object(obj, false);
0250 }
0251 
0252 int klp_patch_object(struct klp_object *obj)
0253 {
0254     struct klp_func *func;
0255     int ret;
0256 
0257     if (WARN_ON(obj->patched))
0258         return -EINVAL;
0259 
0260     klp_for_each_func(obj, func) {
0261         ret = klp_patch_func(func);
0262         if (ret) {
0263             klp_unpatch_object(obj);
0264             return ret;
0265         }
0266     }
0267     obj->patched = true;
0268 
0269     return 0;
0270 }
0271 
0272 static void __klp_unpatch_objects(struct klp_patch *patch, bool nops_only)
0273 {
0274     struct klp_object *obj;
0275 
0276     klp_for_each_object(patch, obj)
0277         if (obj->patched)
0278             __klp_unpatch_object(obj, nops_only);
0279 }
0280 
0281 void klp_unpatch_objects(struct klp_patch *patch)
0282 {
0283     __klp_unpatch_objects(patch, false);
0284 }
0285 
0286 void klp_unpatch_objects_dynamic(struct klp_patch *patch)
0287 {
0288     __klp_unpatch_objects(patch, true);
0289 }