Back to home page

LXR

 
 

    


0001 /*
0002  * jump label support
0003  *
0004  * Copyright (C) 2009 Jason Baron <jbaron@redhat.com>
0005  * Copyright (C) 2011 Peter Zijlstra
0006  *
0007  */
0008 #include <linux/memory.h>
0009 #include <linux/uaccess.h>
0010 #include <linux/module.h>
0011 #include <linux/list.h>
0012 #include <linux/slab.h>
0013 #include <linux/sort.h>
0014 #include <linux/err.h>
0015 #include <linux/static_key.h>
0016 #include <linux/jump_label_ratelimit.h>
0017 #include <linux/bug.h>
0018 
0019 #ifdef HAVE_JUMP_LABEL
0020 
0021 /* mutex to protect coming/going of the the jump_label table */
0022 static DEFINE_MUTEX(jump_label_mutex);
0023 
0024 void jump_label_lock(void)
0025 {
0026     mutex_lock(&jump_label_mutex);
0027 }
0028 
0029 void jump_label_unlock(void)
0030 {
0031     mutex_unlock(&jump_label_mutex);
0032 }
0033 
0034 static int jump_label_cmp(const void *a, const void *b)
0035 {
0036     const struct jump_entry *jea = a;
0037     const struct jump_entry *jeb = b;
0038 
0039     if (jea->key < jeb->key)
0040         return -1;
0041 
0042     if (jea->key > jeb->key)
0043         return 1;
0044 
0045     return 0;
0046 }
0047 
0048 static void
0049 jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop)
0050 {
0051     unsigned long size;
0052 
0053     size = (((unsigned long)stop - (unsigned long)start)
0054                     / sizeof(struct jump_entry));
0055     sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL);
0056 }
0057 
0058 static void jump_label_update(struct static_key *key);
0059 
0060 /*
0061  * There are similar definitions for the !HAVE_JUMP_LABEL case in jump_label.h.
0062  * The use of 'atomic_read()' requires atomic.h and its problematic for some
0063  * kernel headers such as kernel.h and others. Since static_key_count() is not
0064  * used in the branch statements as it is for the !HAVE_JUMP_LABEL case its ok
0065  * to have it be a function here. Similarly, for 'static_key_enable()' and
0066  * 'static_key_disable()', which require bug.h. This should allow jump_label.h
0067  * to be included from most/all places for HAVE_JUMP_LABEL.
0068  */
0069 int static_key_count(struct static_key *key)
0070 {
0071     /*
0072      * -1 means the first static_key_slow_inc() is in progress.
0073      *  static_key_enabled() must return true, so return 1 here.
0074      */
0075     int n = atomic_read(&key->enabled);
0076 
0077     return n >= 0 ? n : 1;
0078 }
0079 EXPORT_SYMBOL_GPL(static_key_count);
0080 
0081 void static_key_enable(struct static_key *key)
0082 {
0083     int count = static_key_count(key);
0084 
0085     WARN_ON_ONCE(count < 0 || count > 1);
0086 
0087     if (!count)
0088         static_key_slow_inc(key);
0089 }
0090 EXPORT_SYMBOL_GPL(static_key_enable);
0091 
0092 void static_key_disable(struct static_key *key)
0093 {
0094     int count = static_key_count(key);
0095 
0096     WARN_ON_ONCE(count < 0 || count > 1);
0097 
0098     if (count)
0099         static_key_slow_dec(key);
0100 }
0101 EXPORT_SYMBOL_GPL(static_key_disable);
0102 
0103 void static_key_slow_inc(struct static_key *key)
0104 {
0105     int v, v1;
0106 
0107     STATIC_KEY_CHECK_USE();
0108 
0109     /*
0110      * Careful if we get concurrent static_key_slow_inc() calls;
0111      * later calls must wait for the first one to _finish_ the
0112      * jump_label_update() process.  At the same time, however,
0113      * the jump_label_update() call below wants to see
0114      * static_key_enabled(&key) for jumps to be updated properly.
0115      *
0116      * So give a special meaning to negative key->enabled: it sends
0117      * static_key_slow_inc() down the slow path, and it is non-zero
0118      * so it counts as "enabled" in jump_label_update().  Note that
0119      * atomic_inc_unless_negative() checks >= 0, so roll our own.
0120      */
0121     for (v = atomic_read(&key->enabled); v > 0; v = v1) {
0122         v1 = atomic_cmpxchg(&key->enabled, v, v + 1);
0123         if (likely(v1 == v))
0124             return;
0125     }
0126 
0127     jump_label_lock();
0128     if (atomic_read(&key->enabled) == 0) {
0129         atomic_set(&key->enabled, -1);
0130         jump_label_update(key);
0131         atomic_set(&key->enabled, 1);
0132     } else {
0133         atomic_inc(&key->enabled);
0134     }
0135     jump_label_unlock();
0136 }
0137 EXPORT_SYMBOL_GPL(static_key_slow_inc);
0138 
0139 static void __static_key_slow_dec(struct static_key *key,
0140         unsigned long rate_limit, struct delayed_work *work)
0141 {
0142     /*
0143      * The negative count check is valid even when a negative
0144      * key->enabled is in use by static_key_slow_inc(); a
0145      * __static_key_slow_dec() before the first static_key_slow_inc()
0146      * returns is unbalanced, because all other static_key_slow_inc()
0147      * instances block while the update is in progress.
0148      */
0149     if (!atomic_dec_and_mutex_lock(&key->enabled, &jump_label_mutex)) {
0150         WARN(atomic_read(&key->enabled) < 0,
0151              "jump label: negative count!\n");
0152         return;
0153     }
0154 
0155     if (rate_limit) {
0156         atomic_inc(&key->enabled);
0157         schedule_delayed_work(work, rate_limit);
0158     } else {
0159         jump_label_update(key);
0160     }
0161     jump_label_unlock();
0162 }
0163 
0164 static void jump_label_update_timeout(struct work_struct *work)
0165 {
0166     struct static_key_deferred *key =
0167         container_of(work, struct static_key_deferred, work.work);
0168     __static_key_slow_dec(&key->key, 0, NULL);
0169 }
0170 
0171 void static_key_slow_dec(struct static_key *key)
0172 {
0173     STATIC_KEY_CHECK_USE();
0174     __static_key_slow_dec(key, 0, NULL);
0175 }
0176 EXPORT_SYMBOL_GPL(static_key_slow_dec);
0177 
0178 void static_key_slow_dec_deferred(struct static_key_deferred *key)
0179 {
0180     STATIC_KEY_CHECK_USE();
0181     __static_key_slow_dec(&key->key, key->timeout, &key->work);
0182 }
0183 EXPORT_SYMBOL_GPL(static_key_slow_dec_deferred);
0184 
0185 void static_key_deferred_flush(struct static_key_deferred *key)
0186 {
0187     STATIC_KEY_CHECK_USE();
0188     flush_delayed_work(&key->work);
0189 }
0190 EXPORT_SYMBOL_GPL(static_key_deferred_flush);
0191 
0192 void jump_label_rate_limit(struct static_key_deferred *key,
0193         unsigned long rl)
0194 {
0195     STATIC_KEY_CHECK_USE();
0196     key->timeout = rl;
0197     INIT_DELAYED_WORK(&key->work, jump_label_update_timeout);
0198 }
0199 EXPORT_SYMBOL_GPL(jump_label_rate_limit);
0200 
0201 static int addr_conflict(struct jump_entry *entry, void *start, void *end)
0202 {
0203     if (entry->code <= (unsigned long)end &&
0204         entry->code + JUMP_LABEL_NOP_SIZE > (unsigned long)start)
0205         return 1;
0206 
0207     return 0;
0208 }
0209 
0210 static int __jump_label_text_reserved(struct jump_entry *iter_start,
0211         struct jump_entry *iter_stop, void *start, void *end)
0212 {
0213     struct jump_entry *iter;
0214 
0215     iter = iter_start;
0216     while (iter < iter_stop) {
0217         if (addr_conflict(iter, start, end))
0218             return 1;
0219         iter++;
0220     }
0221 
0222     return 0;
0223 }
0224 
0225 /*
0226  * Update code which is definitely not currently executing.
0227  * Architectures which need heavyweight synchronization to modify
0228  * running code can override this to make the non-live update case
0229  * cheaper.
0230  */
0231 void __weak __init_or_module arch_jump_label_transform_static(struct jump_entry *entry,
0232                         enum jump_label_type type)
0233 {
0234     arch_jump_label_transform(entry, type);
0235 }
0236 
0237 static inline struct jump_entry *static_key_entries(struct static_key *key)
0238 {
0239     return (struct jump_entry *)((unsigned long)key->entries & ~JUMP_TYPE_MASK);
0240 }
0241 
0242 static inline bool static_key_type(struct static_key *key)
0243 {
0244     return (unsigned long)key->entries & JUMP_TYPE_MASK;
0245 }
0246 
0247 static inline struct static_key *jump_entry_key(struct jump_entry *entry)
0248 {
0249     return (struct static_key *)((unsigned long)entry->key & ~1UL);
0250 }
0251 
0252 static bool jump_entry_branch(struct jump_entry *entry)
0253 {
0254     return (unsigned long)entry->key & 1UL;
0255 }
0256 
0257 static enum jump_label_type jump_label_type(struct jump_entry *entry)
0258 {
0259     struct static_key *key = jump_entry_key(entry);
0260     bool enabled = static_key_enabled(key);
0261     bool branch = jump_entry_branch(entry);
0262 
0263     /* See the comment in linux/jump_label.h */
0264     return enabled ^ branch;
0265 }
0266 
0267 static void __jump_label_update(struct static_key *key,
0268                 struct jump_entry *entry,
0269                 struct jump_entry *stop)
0270 {
0271     for (; (entry < stop) && (jump_entry_key(entry) == key); entry++) {
0272         /*
0273          * entry->code set to 0 invalidates module init text sections
0274          * kernel_text_address() verifies we are not in core kernel
0275          * init code, see jump_label_invalidate_module_init().
0276          */
0277         if (entry->code && kernel_text_address(entry->code))
0278             arch_jump_label_transform(entry, jump_label_type(entry));
0279     }
0280 }
0281 
0282 void __init jump_label_init(void)
0283 {
0284     struct jump_entry *iter_start = __start___jump_table;
0285     struct jump_entry *iter_stop = __stop___jump_table;
0286     struct static_key *key = NULL;
0287     struct jump_entry *iter;
0288 
0289     /*
0290      * Since we are initializing the static_key.enabled field with
0291      * with the 'raw' int values (to avoid pulling in atomic.h) in
0292      * jump_label.h, let's make sure that is safe. There are only two
0293      * cases to check since we initialize to 0 or 1.
0294      */
0295     BUILD_BUG_ON((int)ATOMIC_INIT(0) != 0);
0296     BUILD_BUG_ON((int)ATOMIC_INIT(1) != 1);
0297 
0298     if (static_key_initialized)
0299         return;
0300 
0301     jump_label_lock();
0302     jump_label_sort_entries(iter_start, iter_stop);
0303 
0304     for (iter = iter_start; iter < iter_stop; iter++) {
0305         struct static_key *iterk;
0306 
0307         /* rewrite NOPs */
0308         if (jump_label_type(iter) == JUMP_LABEL_NOP)
0309             arch_jump_label_transform_static(iter, JUMP_LABEL_NOP);
0310 
0311         iterk = jump_entry_key(iter);
0312         if (iterk == key)
0313             continue;
0314 
0315         key = iterk;
0316         /*
0317          * Set key->entries to iter, but preserve JUMP_LABEL_TRUE_BRANCH.
0318          */
0319         *((unsigned long *)&key->entries) += (unsigned long)iter;
0320 #ifdef CONFIG_MODULES
0321         key->next = NULL;
0322 #endif
0323     }
0324     static_key_initialized = true;
0325     jump_label_unlock();
0326 }
0327 
0328 #ifdef CONFIG_MODULES
0329 
0330 static enum jump_label_type jump_label_init_type(struct jump_entry *entry)
0331 {
0332     struct static_key *key = jump_entry_key(entry);
0333     bool type = static_key_type(key);
0334     bool branch = jump_entry_branch(entry);
0335 
0336     /* See the comment in linux/jump_label.h */
0337     return type ^ branch;
0338 }
0339 
0340 struct static_key_mod {
0341     struct static_key_mod *next;
0342     struct jump_entry *entries;
0343     struct module *mod;
0344 };
0345 
0346 static int __jump_label_mod_text_reserved(void *start, void *end)
0347 {
0348     struct module *mod;
0349 
0350     preempt_disable();
0351     mod = __module_text_address((unsigned long)start);
0352     WARN_ON_ONCE(__module_text_address((unsigned long)end) != mod);
0353     preempt_enable();
0354 
0355     if (!mod)
0356         return 0;
0357 
0358 
0359     return __jump_label_text_reserved(mod->jump_entries,
0360                 mod->jump_entries + mod->num_jump_entries,
0361                 start, end);
0362 }
0363 
0364 static void __jump_label_mod_update(struct static_key *key)
0365 {
0366     struct static_key_mod *mod;
0367 
0368     for (mod = key->next; mod; mod = mod->next) {
0369         struct module *m = mod->mod;
0370 
0371         __jump_label_update(key, mod->entries,
0372                     m->jump_entries + m->num_jump_entries);
0373     }
0374 }
0375 
0376 /***
0377  * apply_jump_label_nops - patch module jump labels with arch_get_jump_label_nop()
0378  * @mod: module to patch
0379  *
0380  * Allow for run-time selection of the optimal nops. Before the module
0381  * loads patch these with arch_get_jump_label_nop(), which is specified by
0382  * the arch specific jump label code.
0383  */
0384 void jump_label_apply_nops(struct module *mod)
0385 {
0386     struct jump_entry *iter_start = mod->jump_entries;
0387     struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
0388     struct jump_entry *iter;
0389 
0390     /* if the module doesn't have jump label entries, just return */
0391     if (iter_start == iter_stop)
0392         return;
0393 
0394     for (iter = iter_start; iter < iter_stop; iter++) {
0395         /* Only write NOPs for arch_branch_static(). */
0396         if (jump_label_init_type(iter) == JUMP_LABEL_NOP)
0397             arch_jump_label_transform_static(iter, JUMP_LABEL_NOP);
0398     }
0399 }
0400 
0401 static int jump_label_add_module(struct module *mod)
0402 {
0403     struct jump_entry *iter_start = mod->jump_entries;
0404     struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
0405     struct jump_entry *iter;
0406     struct static_key *key = NULL;
0407     struct static_key_mod *jlm;
0408 
0409     /* if the module doesn't have jump label entries, just return */
0410     if (iter_start == iter_stop)
0411         return 0;
0412 
0413     jump_label_sort_entries(iter_start, iter_stop);
0414 
0415     for (iter = iter_start; iter < iter_stop; iter++) {
0416         struct static_key *iterk;
0417 
0418         iterk = jump_entry_key(iter);
0419         if (iterk == key)
0420             continue;
0421 
0422         key = iterk;
0423         if (within_module(iter->key, mod)) {
0424             /*
0425              * Set key->entries to iter, but preserve JUMP_LABEL_TRUE_BRANCH.
0426              */
0427             *((unsigned long *)&key->entries) += (unsigned long)iter;
0428             key->next = NULL;
0429             continue;
0430         }
0431         jlm = kzalloc(sizeof(struct static_key_mod), GFP_KERNEL);
0432         if (!jlm)
0433             return -ENOMEM;
0434         jlm->mod = mod;
0435         jlm->entries = iter;
0436         jlm->next = key->next;
0437         key->next = jlm;
0438 
0439         /* Only update if we've changed from our initial state */
0440         if (jump_label_type(iter) != jump_label_init_type(iter))
0441             __jump_label_update(key, iter, iter_stop);
0442     }
0443 
0444     return 0;
0445 }
0446 
0447 static void jump_label_del_module(struct module *mod)
0448 {
0449     struct jump_entry *iter_start = mod->jump_entries;
0450     struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
0451     struct jump_entry *iter;
0452     struct static_key *key = NULL;
0453     struct static_key_mod *jlm, **prev;
0454 
0455     for (iter = iter_start; iter < iter_stop; iter++) {
0456         if (jump_entry_key(iter) == key)
0457             continue;
0458 
0459         key = jump_entry_key(iter);
0460 
0461         if (within_module(iter->key, mod))
0462             continue;
0463 
0464         prev = &key->next;
0465         jlm = key->next;
0466 
0467         while (jlm && jlm->mod != mod) {
0468             prev = &jlm->next;
0469             jlm = jlm->next;
0470         }
0471 
0472         if (jlm) {
0473             *prev = jlm->next;
0474             kfree(jlm);
0475         }
0476     }
0477 }
0478 
0479 static void jump_label_invalidate_module_init(struct module *mod)
0480 {
0481     struct jump_entry *iter_start = mod->jump_entries;
0482     struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
0483     struct jump_entry *iter;
0484 
0485     for (iter = iter_start; iter < iter_stop; iter++) {
0486         if (within_module_init(iter->code, mod))
0487             iter->code = 0;
0488     }
0489 }
0490 
0491 static int
0492 jump_label_module_notify(struct notifier_block *self, unsigned long val,
0493              void *data)
0494 {
0495     struct module *mod = data;
0496     int ret = 0;
0497 
0498     switch (val) {
0499     case MODULE_STATE_COMING:
0500         jump_label_lock();
0501         ret = jump_label_add_module(mod);
0502         if (ret)
0503             jump_label_del_module(mod);
0504         jump_label_unlock();
0505         break;
0506     case MODULE_STATE_GOING:
0507         jump_label_lock();
0508         jump_label_del_module(mod);
0509         jump_label_unlock();
0510         break;
0511     case MODULE_STATE_LIVE:
0512         jump_label_lock();
0513         jump_label_invalidate_module_init(mod);
0514         jump_label_unlock();
0515         break;
0516     }
0517 
0518     return notifier_from_errno(ret);
0519 }
0520 
0521 static struct notifier_block jump_label_module_nb = {
0522     .notifier_call = jump_label_module_notify,
0523     .priority = 1, /* higher than tracepoints */
0524 };
0525 
0526 static __init int jump_label_init_module(void)
0527 {
0528     return register_module_notifier(&jump_label_module_nb);
0529 }
0530 early_initcall(jump_label_init_module);
0531 
0532 #endif /* CONFIG_MODULES */
0533 
0534 /***
0535  * jump_label_text_reserved - check if addr range is reserved
0536  * @start: start text addr
0537  * @end: end text addr
0538  *
0539  * checks if the text addr located between @start and @end
0540  * overlaps with any of the jump label patch addresses. Code
0541  * that wants to modify kernel text should first verify that
0542  * it does not overlap with any of the jump label addresses.
0543  * Caller must hold jump_label_mutex.
0544  *
0545  * returns 1 if there is an overlap, 0 otherwise
0546  */
0547 int jump_label_text_reserved(void *start, void *end)
0548 {
0549     int ret = __jump_label_text_reserved(__start___jump_table,
0550             __stop___jump_table, start, end);
0551 
0552     if (ret)
0553         return ret;
0554 
0555 #ifdef CONFIG_MODULES
0556     ret = __jump_label_mod_text_reserved(start, end);
0557 #endif
0558     return ret;
0559 }
0560 
0561 static void jump_label_update(struct static_key *key)
0562 {
0563     struct jump_entry *stop = __stop___jump_table;
0564     struct jump_entry *entry = static_key_entries(key);
0565 #ifdef CONFIG_MODULES
0566     struct module *mod;
0567 
0568     __jump_label_mod_update(key);
0569 
0570     preempt_disable();
0571     mod = __module_address((unsigned long)key);
0572     if (mod)
0573         stop = mod->jump_entries + mod->num_jump_entries;
0574     preempt_enable();
0575 #endif
0576     /* if there are no users, entry can be NULL */
0577     if (entry)
0578         __jump_label_update(key, entry, stop);
0579 }
0580 
0581 #ifdef CONFIG_STATIC_KEYS_SELFTEST
0582 static DEFINE_STATIC_KEY_TRUE(sk_true);
0583 static DEFINE_STATIC_KEY_FALSE(sk_false);
0584 
0585 static __init int jump_label_test(void)
0586 {
0587     int i;
0588 
0589     for (i = 0; i < 2; i++) {
0590         WARN_ON(static_key_enabled(&sk_true.key) != true);
0591         WARN_ON(static_key_enabled(&sk_false.key) != false);
0592 
0593         WARN_ON(!static_branch_likely(&sk_true));
0594         WARN_ON(!static_branch_unlikely(&sk_true));
0595         WARN_ON(static_branch_likely(&sk_false));
0596         WARN_ON(static_branch_unlikely(&sk_false));
0597 
0598         static_branch_disable(&sk_true);
0599         static_branch_enable(&sk_false);
0600 
0601         WARN_ON(static_key_enabled(&sk_true.key) == true);
0602         WARN_ON(static_key_enabled(&sk_false.key) == false);
0603 
0604         WARN_ON(static_branch_likely(&sk_true));
0605         WARN_ON(static_branch_unlikely(&sk_true));
0606         WARN_ON(!static_branch_likely(&sk_false));
0607         WARN_ON(!static_branch_unlikely(&sk_false));
0608 
0609         static_branch_enable(&sk_true);
0610         static_branch_disable(&sk_false);
0611     }
0612 
0613     return 0;
0614 }
0615 late_initcall(jump_label_test);
0616 #endif /* STATIC_KEYS_SELFTEST */
0617 
0618 #endif /* HAVE_JUMP_LABEL */