Back to home page

LXR

 
 

    


0001 /*
0002    Copyright (C) 2002 Richard Henderson
0003    Copyright (C) 2001 Rusty Russell, 2002, 2010 Rusty Russell IBM.
0004 
0005     This program is free software; you can redistribute it and/or modify
0006     it under the terms of the GNU General Public License as published by
0007     the Free Software Foundation; either version 2 of the License, or
0008     (at your option) any later version.
0009 
0010     This program is distributed in the hope that it will be useful,
0011     but WITHOUT ANY WARRANTY; without even the implied warranty of
0012     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
0013     GNU General Public License for more details.
0014 
0015     You should have received a copy of the GNU General Public License
0016     along with this program; if not, write to the Free Software
0017     Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
0018 */
0019 #include <linux/export.h>
0020 #include <linux/moduleloader.h>
0021 #include <linux/trace_events.h>
0022 #include <linux/init.h>
0023 #include <linux/kallsyms.h>
0024 #include <linux/file.h>
0025 #include <linux/fs.h>
0026 #include <linux/sysfs.h>
0027 #include <linux/kernel.h>
0028 #include <linux/slab.h>
0029 #include <linux/vmalloc.h>
0030 #include <linux/elf.h>
0031 #include <linux/proc_fs.h>
0032 #include <linux/security.h>
0033 #include <linux/seq_file.h>
0034 #include <linux/syscalls.h>
0035 #include <linux/fcntl.h>
0036 #include <linux/rcupdate.h>
0037 #include <linux/capability.h>
0038 #include <linux/cpu.h>
0039 #include <linux/moduleparam.h>
0040 #include <linux/errno.h>
0041 #include <linux/err.h>
0042 #include <linux/vermagic.h>
0043 #include <linux/notifier.h>
0044 #include <linux/sched.h>
0045 #include <linux/device.h>
0046 #include <linux/string.h>
0047 #include <linux/mutex.h>
0048 #include <linux/rculist.h>
0049 #include <linux/uaccess.h>
0050 #include <asm/cacheflush.h>
0051 #include <asm/mmu_context.h>
0052 #include <linux/license.h>
0053 #include <asm/sections.h>
0054 #include <linux/tracepoint.h>
0055 #include <linux/ftrace.h>
0056 #include <linux/livepatch.h>
0057 #include <linux/async.h>
0058 #include <linux/percpu.h>
0059 #include <linux/kmemleak.h>
0060 #include <linux/jump_label.h>
0061 #include <linux/pfn.h>
0062 #include <linux/bsearch.h>
0063 #include <linux/dynamic_debug.h>
0064 #include <uapi/linux/module.h>
0065 #include "module-internal.h"
0066 
0067 #define CREATE_TRACE_POINTS
0068 #include <trace/events/module.h>
0069 
0070 #ifndef ARCH_SHF_SMALL
0071 #define ARCH_SHF_SMALL 0
0072 #endif
0073 
0074 /*
0075  * Modules' sections will be aligned on page boundaries
0076  * to ensure complete separation of code and data, but
0077  * only when CONFIG_DEBUG_SET_MODULE_RONX=y
0078  */
0079 #ifdef CONFIG_DEBUG_SET_MODULE_RONX
0080 # define debug_align(X) ALIGN(X, PAGE_SIZE)
0081 #else
0082 # define debug_align(X) (X)
0083 #endif
0084 
0085 /* If this is set, the section belongs in the init part of the module */
0086 #define INIT_OFFSET_MASK (1UL << (BITS_PER_LONG-1))
0087 
0088 /*
0089  * Mutex protects:
0090  * 1) List of modules (also safely readable with preempt_disable),
0091  * 2) module_use links,
0092  * 3) module_addr_min/module_addr_max.
0093  * (delete and add uses RCU list operations). */
0094 DEFINE_MUTEX(module_mutex);
0095 EXPORT_SYMBOL_GPL(module_mutex);
0096 static LIST_HEAD(modules);
0097 
0098 #ifdef CONFIG_MODULES_TREE_LOOKUP
0099 
0100 /*
0101  * Use a latched RB-tree for __module_address(); this allows us to use
0102  * RCU-sched lookups of the address from any context.
0103  *
0104  * This is conditional on PERF_EVENTS || TRACING because those can really hit
0105  * __module_address() hard by doing a lot of stack unwinding; potentially from
0106  * NMI context.
0107  */
0108 
0109 static __always_inline unsigned long __mod_tree_val(struct latch_tree_node *n)
0110 {
0111     struct module_layout *layout = container_of(n, struct module_layout, mtn.node);
0112 
0113     return (unsigned long)layout->base;
0114 }
0115 
0116 static __always_inline unsigned long __mod_tree_size(struct latch_tree_node *n)
0117 {
0118     struct module_layout *layout = container_of(n, struct module_layout, mtn.node);
0119 
0120     return (unsigned long)layout->size;
0121 }
0122 
0123 static __always_inline bool
0124 mod_tree_less(struct latch_tree_node *a, struct latch_tree_node *b)
0125 {
0126     return __mod_tree_val(a) < __mod_tree_val(b);
0127 }
0128 
0129 static __always_inline int
0130 mod_tree_comp(void *key, struct latch_tree_node *n)
0131 {
0132     unsigned long val = (unsigned long)key;
0133     unsigned long start, end;
0134 
0135     start = __mod_tree_val(n);
0136     if (val < start)
0137         return -1;
0138 
0139     end = start + __mod_tree_size(n);
0140     if (val >= end)
0141         return 1;
0142 
0143     return 0;
0144 }
0145 
0146 static const struct latch_tree_ops mod_tree_ops = {
0147     .less = mod_tree_less,
0148     .comp = mod_tree_comp,
0149 };
0150 
0151 static struct mod_tree_root {
0152     struct latch_tree_root root;
0153     unsigned long addr_min;
0154     unsigned long addr_max;
0155 } mod_tree __cacheline_aligned = {
0156     .addr_min = -1UL,
0157 };
0158 
0159 #define module_addr_min mod_tree.addr_min
0160 #define module_addr_max mod_tree.addr_max
0161 
0162 static noinline void __mod_tree_insert(struct mod_tree_node *node)
0163 {
0164     latch_tree_insert(&node->node, &mod_tree.root, &mod_tree_ops);
0165 }
0166 
0167 static void __mod_tree_remove(struct mod_tree_node *node)
0168 {
0169     latch_tree_erase(&node->node, &mod_tree.root, &mod_tree_ops);
0170 }
0171 
0172 /*
0173  * These modifications: insert, remove_init and remove; are serialized by the
0174  * module_mutex.
0175  */
0176 static void mod_tree_insert(struct module *mod)
0177 {
0178     mod->core_layout.mtn.mod = mod;
0179     mod->init_layout.mtn.mod = mod;
0180 
0181     __mod_tree_insert(&mod->core_layout.mtn);
0182     if (mod->init_layout.size)
0183         __mod_tree_insert(&mod->init_layout.mtn);
0184 }
0185 
0186 static void mod_tree_remove_init(struct module *mod)
0187 {
0188     if (mod->init_layout.size)
0189         __mod_tree_remove(&mod->init_layout.mtn);
0190 }
0191 
0192 static void mod_tree_remove(struct module *mod)
0193 {
0194     __mod_tree_remove(&mod->core_layout.mtn);
0195     mod_tree_remove_init(mod);
0196 }
0197 
0198 static struct module *mod_find(unsigned long addr)
0199 {
0200     struct latch_tree_node *ltn;
0201 
0202     ltn = latch_tree_find((void *)addr, &mod_tree.root, &mod_tree_ops);
0203     if (!ltn)
0204         return NULL;
0205 
0206     return container_of(ltn, struct mod_tree_node, node)->mod;
0207 }
0208 
0209 #else /* MODULES_TREE_LOOKUP */
0210 
0211 static unsigned long module_addr_min = -1UL, module_addr_max = 0;
0212 
0213 static void mod_tree_insert(struct module *mod) { }
0214 static void mod_tree_remove_init(struct module *mod) { }
0215 static void mod_tree_remove(struct module *mod) { }
0216 
0217 static struct module *mod_find(unsigned long addr)
0218 {
0219     struct module *mod;
0220 
0221     list_for_each_entry_rcu(mod, &modules, list) {
0222         if (within_module(addr, mod))
0223             return mod;
0224     }
0225 
0226     return NULL;
0227 }
0228 
0229 #endif /* MODULES_TREE_LOOKUP */
0230 
0231 /*
0232  * Bounds of module text, for speeding up __module_address.
0233  * Protected by module_mutex.
0234  */
0235 static void __mod_update_bounds(void *base, unsigned int size)
0236 {
0237     unsigned long min = (unsigned long)base;
0238     unsigned long max = min + size;
0239 
0240     if (min < module_addr_min)
0241         module_addr_min = min;
0242     if (max > module_addr_max)
0243         module_addr_max = max;
0244 }
0245 
0246 static void mod_update_bounds(struct module *mod)
0247 {
0248     __mod_update_bounds(mod->core_layout.base, mod->core_layout.size);
0249     if (mod->init_layout.size)
0250         __mod_update_bounds(mod->init_layout.base, mod->init_layout.size);
0251 }
0252 
0253 #ifdef CONFIG_KGDB_KDB
0254 struct list_head *kdb_modules = &modules; /* kdb needs the list of modules */
0255 #endif /* CONFIG_KGDB_KDB */
0256 
0257 static void module_assert_mutex(void)
0258 {
0259     lockdep_assert_held(&module_mutex);
0260 }
0261 
0262 static void module_assert_mutex_or_preempt(void)
0263 {
0264 #ifdef CONFIG_LOCKDEP
0265     if (unlikely(!debug_locks))
0266         return;
0267 
0268     WARN_ON_ONCE(!rcu_read_lock_sched_held() &&
0269         !lockdep_is_held(&module_mutex));
0270 #endif
0271 }
0272 
0273 static bool sig_enforce = IS_ENABLED(CONFIG_MODULE_SIG_FORCE);
0274 #ifndef CONFIG_MODULE_SIG_FORCE
0275 module_param(sig_enforce, bool_enable_only, 0644);
0276 #endif /* !CONFIG_MODULE_SIG_FORCE */
0277 
0278 /* Block module loading/unloading? */
0279 int modules_disabled = 0;
0280 core_param(nomodule, modules_disabled, bint, 0);
0281 
0282 /* Waiting for a module to finish initializing? */
0283 static DECLARE_WAIT_QUEUE_HEAD(module_wq);
0284 
0285 static BLOCKING_NOTIFIER_HEAD(module_notify_list);
0286 
0287 int register_module_notifier(struct notifier_block *nb)
0288 {
0289     return blocking_notifier_chain_register(&module_notify_list, nb);
0290 }
0291 EXPORT_SYMBOL(register_module_notifier);
0292 
0293 int unregister_module_notifier(struct notifier_block *nb)
0294 {
0295     return blocking_notifier_chain_unregister(&module_notify_list, nb);
0296 }
0297 EXPORT_SYMBOL(unregister_module_notifier);
0298 
0299 struct load_info {
0300     Elf_Ehdr *hdr;
0301     unsigned long len;
0302     Elf_Shdr *sechdrs;
0303     char *secstrings, *strtab;
0304     unsigned long symoffs, stroffs;
0305     struct _ddebug *debug;
0306     unsigned int num_debug;
0307     bool sig_ok;
0308 #ifdef CONFIG_KALLSYMS
0309     unsigned long mod_kallsyms_init_off;
0310 #endif
0311     struct {
0312         unsigned int sym, str, mod, vers, info, pcpu;
0313     } index;
0314 };
0315 
0316 /*
0317  * We require a truly strong try_module_get(): 0 means success.
0318  * Otherwise an error is returned due to ongoing or failed
0319  * initialization etc.
0320  */
0321 static inline int strong_try_module_get(struct module *mod)
0322 {
0323     BUG_ON(mod && mod->state == MODULE_STATE_UNFORMED);
0324     if (mod && mod->state == MODULE_STATE_COMING)
0325         return -EBUSY;
0326     if (try_module_get(mod))
0327         return 0;
0328     else
0329         return -ENOENT;
0330 }
0331 
0332 static inline void add_taint_module(struct module *mod, unsigned flag,
0333                     enum lockdep_ok lockdep_ok)
0334 {
0335     add_taint(flag, lockdep_ok);
0336     set_bit(flag, &mod->taints);
0337 }
0338 
0339 /*
0340  * A thread that wants to hold a reference to a module only while it
0341  * is running can call this to safely exit.  nfsd and lockd use this.
0342  */
0343 void __noreturn __module_put_and_exit(struct module *mod, long code)
0344 {
0345     module_put(mod);
0346     do_exit(code);
0347 }
0348 EXPORT_SYMBOL(__module_put_and_exit);
0349 
0350 /* Find a module section: 0 means not found. */
0351 static unsigned int find_sec(const struct load_info *info, const char *name)
0352 {
0353     unsigned int i;
0354 
0355     for (i = 1; i < info->hdr->e_shnum; i++) {
0356         Elf_Shdr *shdr = &info->sechdrs[i];
0357         /* Alloc bit cleared means "ignore it." */
0358         if ((shdr->sh_flags & SHF_ALLOC)
0359             && strcmp(info->secstrings + shdr->sh_name, name) == 0)
0360             return i;
0361     }
0362     return 0;
0363 }
0364 
0365 /* Find a module section, or NULL. */
0366 static void *section_addr(const struct load_info *info, const char *name)
0367 {
0368     /* Section 0 has sh_addr 0. */
0369     return (void *)info->sechdrs[find_sec(info, name)].sh_addr;
0370 }
0371 
0372 /* Find a module section, or NULL.  Fill in number of "objects" in section. */
0373 static void *section_objs(const struct load_info *info,
0374               const char *name,
0375               size_t object_size,
0376               unsigned int *num)
0377 {
0378     unsigned int sec = find_sec(info, name);
0379 
0380     /* Section 0 has sh_addr 0 and sh_size 0. */
0381     *num = info->sechdrs[sec].sh_size / object_size;
0382     return (void *)info->sechdrs[sec].sh_addr;
0383 }
0384 
0385 /* Provided by the linker */
0386 extern const struct kernel_symbol __start___ksymtab[];
0387 extern const struct kernel_symbol __stop___ksymtab[];
0388 extern const struct kernel_symbol __start___ksymtab_gpl[];
0389 extern const struct kernel_symbol __stop___ksymtab_gpl[];
0390 extern const struct kernel_symbol __start___ksymtab_gpl_future[];
0391 extern const struct kernel_symbol __stop___ksymtab_gpl_future[];
0392 extern const s32 __start___kcrctab[];
0393 extern const s32 __start___kcrctab_gpl[];
0394 extern const s32 __start___kcrctab_gpl_future[];
0395 #ifdef CONFIG_UNUSED_SYMBOLS
0396 extern const struct kernel_symbol __start___ksymtab_unused[];
0397 extern const struct kernel_symbol __stop___ksymtab_unused[];
0398 extern const struct kernel_symbol __start___ksymtab_unused_gpl[];
0399 extern const struct kernel_symbol __stop___ksymtab_unused_gpl[];
0400 extern const s32 __start___kcrctab_unused[];
0401 extern const s32 __start___kcrctab_unused_gpl[];
0402 #endif
0403 
0404 #ifndef CONFIG_MODVERSIONS
0405 #define symversion(base, idx) NULL
0406 #else
0407 #define symversion(base, idx) ((base != NULL) ? ((base) + (idx)) : NULL)
0408 #endif
0409 
0410 static bool each_symbol_in_section(const struct symsearch *arr,
0411                    unsigned int arrsize,
0412                    struct module *owner,
0413                    bool (*fn)(const struct symsearch *syms,
0414                           struct module *owner,
0415                           void *data),
0416                    void *data)
0417 {
0418     unsigned int j;
0419 
0420     for (j = 0; j < arrsize; j++) {
0421         if (fn(&arr[j], owner, data))
0422             return true;
0423     }
0424 
0425     return false;
0426 }
0427 
0428 /* Returns true as soon as fn returns true, otherwise false. */
0429 bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
0430                     struct module *owner,
0431                     void *data),
0432              void *data)
0433 {
0434     struct module *mod;
0435     static const struct symsearch arr[] = {
0436         { __start___ksymtab, __stop___ksymtab, __start___kcrctab,
0437           NOT_GPL_ONLY, false },
0438         { __start___ksymtab_gpl, __stop___ksymtab_gpl,
0439           __start___kcrctab_gpl,
0440           GPL_ONLY, false },
0441         { __start___ksymtab_gpl_future, __stop___ksymtab_gpl_future,
0442           __start___kcrctab_gpl_future,
0443           WILL_BE_GPL_ONLY, false },
0444 #ifdef CONFIG_UNUSED_SYMBOLS
0445         { __start___ksymtab_unused, __stop___ksymtab_unused,
0446           __start___kcrctab_unused,
0447           NOT_GPL_ONLY, true },
0448         { __start___ksymtab_unused_gpl, __stop___ksymtab_unused_gpl,
0449           __start___kcrctab_unused_gpl,
0450           GPL_ONLY, true },
0451 #endif
0452     };
0453 
0454     module_assert_mutex_or_preempt();
0455 
0456     if (each_symbol_in_section(arr, ARRAY_SIZE(arr), NULL, fn, data))
0457         return true;
0458 
0459     list_for_each_entry_rcu(mod, &modules, list) {
0460         struct symsearch arr[] = {
0461             { mod->syms, mod->syms + mod->num_syms, mod->crcs,
0462               NOT_GPL_ONLY, false },
0463             { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
0464               mod->gpl_crcs,
0465               GPL_ONLY, false },
0466             { mod->gpl_future_syms,
0467               mod->gpl_future_syms + mod->num_gpl_future_syms,
0468               mod->gpl_future_crcs,
0469               WILL_BE_GPL_ONLY, false },
0470 #ifdef CONFIG_UNUSED_SYMBOLS
0471             { mod->unused_syms,
0472               mod->unused_syms + mod->num_unused_syms,
0473               mod->unused_crcs,
0474               NOT_GPL_ONLY, true },
0475             { mod->unused_gpl_syms,
0476               mod->unused_gpl_syms + mod->num_unused_gpl_syms,
0477               mod->unused_gpl_crcs,
0478               GPL_ONLY, true },
0479 #endif
0480         };
0481 
0482         if (mod->state == MODULE_STATE_UNFORMED)
0483             continue;
0484 
0485         if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
0486             return true;
0487     }
0488     return false;
0489 }
0490 EXPORT_SYMBOL_GPL(each_symbol_section);
0491 
0492 struct find_symbol_arg {
0493     /* Input */
0494     const char *name;
0495     bool gplok;
0496     bool warn;
0497 
0498     /* Output */
0499     struct module *owner;
0500     const s32 *crc;
0501     const struct kernel_symbol *sym;
0502 };
0503 
0504 static bool check_symbol(const struct symsearch *syms,
0505                  struct module *owner,
0506                  unsigned int symnum, void *data)
0507 {
0508     struct find_symbol_arg *fsa = data;
0509 
0510     if (!fsa->gplok) {
0511         if (syms->licence == GPL_ONLY)
0512             return false;
0513         if (syms->licence == WILL_BE_GPL_ONLY && fsa->warn) {
0514             pr_warn("Symbol %s is being used by a non-GPL module, "
0515                 "which will not be allowed in the future\n",
0516                 fsa->name);
0517         }
0518     }
0519 
0520 #ifdef CONFIG_UNUSED_SYMBOLS
0521     if (syms->unused && fsa->warn) {
0522         pr_warn("Symbol %s is marked as UNUSED, however this module is "
0523             "using it.\n", fsa->name);
0524         pr_warn("This symbol will go away in the future.\n");
0525         pr_warn("Please evaluate if this is the right api to use and "
0526             "if it really is, submit a report to the linux kernel "
0527             "mailing list together with submitting your code for "
0528             "inclusion.\n");
0529     }
0530 #endif
0531 
0532     fsa->owner = owner;
0533     fsa->crc = symversion(syms->crcs, symnum);
0534     fsa->sym = &syms->start[symnum];
0535     return true;
0536 }
0537 
0538 static int cmp_name(const void *va, const void *vb)
0539 {
0540     const char *a;
0541     const struct kernel_symbol *b;
0542     a = va; b = vb;
0543     return strcmp(a, b->name);
0544 }
0545 
0546 static bool find_symbol_in_section(const struct symsearch *syms,
0547                    struct module *owner,
0548                    void *data)
0549 {
0550     struct find_symbol_arg *fsa = data;
0551     struct kernel_symbol *sym;
0552 
0553     sym = bsearch(fsa->name, syms->start, syms->stop - syms->start,
0554             sizeof(struct kernel_symbol), cmp_name);
0555 
0556     if (sym != NULL && check_symbol(syms, owner, sym - syms->start, data))
0557         return true;
0558 
0559     return false;
0560 }
0561 
0562 /* Find a symbol and return it, along with, (optional) crc and
0563  * (optional) module which owns it.  Needs preempt disabled or module_mutex. */
0564 const struct kernel_symbol *find_symbol(const char *name,
0565                     struct module **owner,
0566                     const s32 **crc,
0567                     bool gplok,
0568                     bool warn)
0569 {
0570     struct find_symbol_arg fsa;
0571 
0572     fsa.name = name;
0573     fsa.gplok = gplok;
0574     fsa.warn = warn;
0575 
0576     if (each_symbol_section(find_symbol_in_section, &fsa)) {
0577         if (owner)
0578             *owner = fsa.owner;
0579         if (crc)
0580             *crc = fsa.crc;
0581         return fsa.sym;
0582     }
0583 
0584     pr_debug("Failed to find symbol %s\n", name);
0585     return NULL;
0586 }
0587 EXPORT_SYMBOL_GPL(find_symbol);
0588 
0589 /*
0590  * Search for module by name: must hold module_mutex (or preempt disabled
0591  * for read-only access).
0592  */
0593 static struct module *find_module_all(const char *name, size_t len,
0594                       bool even_unformed)
0595 {
0596     struct module *mod;
0597 
0598     module_assert_mutex_or_preempt();
0599 
0600     list_for_each_entry(mod, &modules, list) {
0601         if (!even_unformed && mod->state == MODULE_STATE_UNFORMED)
0602             continue;
0603         if (strlen(mod->name) == len && !memcmp(mod->name, name, len))
0604             return mod;
0605     }
0606     return NULL;
0607 }
0608 
0609 struct module *find_module(const char *name)
0610 {
0611     module_assert_mutex();
0612     return find_module_all(name, strlen(name), false);
0613 }
0614 EXPORT_SYMBOL_GPL(find_module);
0615 
0616 #ifdef CONFIG_SMP
0617 
0618 static inline void __percpu *mod_percpu(struct module *mod)
0619 {
0620     return mod->percpu;
0621 }
0622 
0623 static int percpu_modalloc(struct module *mod, struct load_info *info)
0624 {
0625     Elf_Shdr *pcpusec = &info->sechdrs[info->index.pcpu];
0626     unsigned long align = pcpusec->sh_addralign;
0627 
0628     if (!pcpusec->sh_size)
0629         return 0;
0630 
0631     if (align > PAGE_SIZE) {
0632         pr_warn("%s: per-cpu alignment %li > %li\n",
0633             mod->name, align, PAGE_SIZE);
0634         align = PAGE_SIZE;
0635     }
0636 
0637     mod->percpu = __alloc_reserved_percpu(pcpusec->sh_size, align);
0638     if (!mod->percpu) {
0639         pr_warn("%s: Could not allocate %lu bytes percpu data\n",
0640             mod->name, (unsigned long)pcpusec->sh_size);
0641         return -ENOMEM;
0642     }
0643     mod->percpu_size = pcpusec->sh_size;
0644     return 0;
0645 }
0646 
0647 static void percpu_modfree(struct module *mod)
0648 {
0649     free_percpu(mod->percpu);
0650 }
0651 
0652 static unsigned int find_pcpusec(struct load_info *info)
0653 {
0654     return find_sec(info, ".data..percpu");
0655 }
0656 
0657 static void percpu_modcopy(struct module *mod,
0658                const void *from, unsigned long size)
0659 {
0660     int cpu;
0661 
0662     for_each_possible_cpu(cpu)
0663         memcpy(per_cpu_ptr(mod->percpu, cpu), from, size);
0664 }
0665 
0666 /**
0667  * is_module_percpu_address - test whether address is from module static percpu
0668  * @addr: address to test
0669  *
0670  * Test whether @addr belongs to module static percpu area.
0671  *
0672  * RETURNS:
0673  * %true if @addr is from module static percpu area
0674  */
0675 bool is_module_percpu_address(unsigned long addr)
0676 {
0677     struct module *mod;
0678     unsigned int cpu;
0679 
0680     preempt_disable();
0681 
0682     list_for_each_entry_rcu(mod, &modules, list) {
0683         if (mod->state == MODULE_STATE_UNFORMED)
0684             continue;
0685         if (!mod->percpu_size)
0686             continue;
0687         for_each_possible_cpu(cpu) {
0688             void *start = per_cpu_ptr(mod->percpu, cpu);
0689 
0690             if ((void *)addr >= start &&
0691                 (void *)addr < start + mod->percpu_size) {
0692                 preempt_enable();
0693                 return true;
0694             }
0695         }
0696     }
0697 
0698     preempt_enable();
0699     return false;
0700 }
0701 
0702 #else /* ... !CONFIG_SMP */
0703 
0704 static inline void __percpu *mod_percpu(struct module *mod)
0705 {
0706     return NULL;
0707 }
0708 static int percpu_modalloc(struct module *mod, struct load_info *info)
0709 {
0710     /* UP modules shouldn't have this section: ENOMEM isn't quite right */
0711     if (info->sechdrs[info->index.pcpu].sh_size != 0)
0712         return -ENOMEM;
0713     return 0;
0714 }
0715 static inline void percpu_modfree(struct module *mod)
0716 {
0717 }
0718 static unsigned int find_pcpusec(struct load_info *info)
0719 {
0720     return 0;
0721 }
0722 static inline void percpu_modcopy(struct module *mod,
0723                   const void *from, unsigned long size)
0724 {
0725     /* pcpusec should be 0, and size of that section should be 0. */
0726     BUG_ON(size != 0);
0727 }
0728 bool is_module_percpu_address(unsigned long addr)
0729 {
0730     return false;
0731 }
0732 
0733 #endif /* CONFIG_SMP */
0734 
0735 #define MODINFO_ATTR(field) \
0736 static void setup_modinfo_##field(struct module *mod, const char *s)  \
0737 {                                                                     \
0738     mod->field = kstrdup(s, GFP_KERNEL);                          \
0739 }                                                                     \
0740 static ssize_t show_modinfo_##field(struct module_attribute *mattr,   \
0741             struct module_kobject *mk, char *buffer)      \
0742 {                                                                     \
0743     return scnprintf(buffer, PAGE_SIZE, "%s\n", mk->mod->field);  \
0744 }                                                                     \
0745 static int modinfo_##field##_exists(struct module *mod)               \
0746 {                                                                     \
0747     return mod->field != NULL;                                    \
0748 }                                                                     \
0749 static void free_modinfo_##field(struct module *mod)                  \
0750 {                                                                     \
0751     kfree(mod->field);                                            \
0752     mod->field = NULL;                                            \
0753 }                                                                     \
0754 static struct module_attribute modinfo_##field = {                    \
0755     .attr = { .name = __stringify(field), .mode = 0444 },         \
0756     .show = show_modinfo_##field,                                 \
0757     .setup = setup_modinfo_##field,                               \
0758     .test = modinfo_##field##_exists,                             \
0759     .free = free_modinfo_##field,                                 \
0760 };
0761 
0762 MODINFO_ATTR(version);
0763 MODINFO_ATTR(srcversion);
0764 
0765 static char last_unloaded_module[MODULE_NAME_LEN+1];
0766 
0767 #ifdef CONFIG_MODULE_UNLOAD
0768 
0769 EXPORT_TRACEPOINT_SYMBOL(module_get);
0770 
0771 /* MODULE_REF_BASE is the base reference count by kmodule loader. */
0772 #define MODULE_REF_BASE 1
0773 
0774 /* Init the unload section of the module. */
0775 static int module_unload_init(struct module *mod)
0776 {
0777     /*
0778      * Initialize reference counter to MODULE_REF_BASE.
0779      * refcnt == 0 means module is going.
0780      */
0781     atomic_set(&mod->refcnt, MODULE_REF_BASE);
0782 
0783     INIT_LIST_HEAD(&mod->source_list);
0784     INIT_LIST_HEAD(&mod->target_list);
0785 
0786     /* Hold reference count during initialization. */
0787     atomic_inc(&mod->refcnt);
0788 
0789     return 0;
0790 }
0791 
0792 /* Does a already use b? */
0793 static int already_uses(struct module *a, struct module *b)
0794 {
0795     struct module_use *use;
0796 
0797     list_for_each_entry(use, &b->source_list, source_list) {
0798         if (use->source == a) {
0799             pr_debug("%s uses %s!\n", a->name, b->name);
0800             return 1;
0801         }
0802     }
0803     pr_debug("%s does not use %s!\n", a->name, b->name);
0804     return 0;
0805 }
0806 
0807 /*
0808  * Module a uses b
0809  *  - we add 'a' as a "source", 'b' as a "target" of module use
0810  *  - the module_use is added to the list of 'b' sources (so
0811  *    'b' can walk the list to see who sourced them), and of 'a'
0812  *    targets (so 'a' can see what modules it targets).
0813  */
0814 static int add_module_usage(struct module *a, struct module *b)
0815 {
0816     struct module_use *use;
0817 
0818     pr_debug("Allocating new usage for %s.\n", a->name);
0819     use = kmalloc(sizeof(*use), GFP_ATOMIC);
0820     if (!use) {
0821         pr_warn("%s: out of memory loading\n", a->name);
0822         return -ENOMEM;
0823     }
0824 
0825     use->source = a;
0826     use->target = b;
0827     list_add(&use->source_list, &b->source_list);
0828     list_add(&use->target_list, &a->target_list);
0829     return 0;
0830 }
0831 
0832 /* Module a uses b: caller needs module_mutex() */
0833 int ref_module(struct module *a, struct module *b)
0834 {
0835     int err;
0836 
0837     if (b == NULL || already_uses(a, b))
0838         return 0;
0839 
0840     /* If module isn't available, we fail. */
0841     err = strong_try_module_get(b);
0842     if (err)
0843         return err;
0844 
0845     err = add_module_usage(a, b);
0846     if (err) {
0847         module_put(b);
0848         return err;
0849     }
0850     return 0;
0851 }
0852 EXPORT_SYMBOL_GPL(ref_module);
0853 
0854 /* Clear the unload stuff of the module. */
0855 static void module_unload_free(struct module *mod)
0856 {
0857     struct module_use *use, *tmp;
0858 
0859     mutex_lock(&module_mutex);
0860     list_for_each_entry_safe(use, tmp, &mod->target_list, target_list) {
0861         struct module *i = use->target;
0862         pr_debug("%s unusing %s\n", mod->name, i->name);
0863         module_put(i);
0864         list_del(&use->source_list);
0865         list_del(&use->target_list);
0866         kfree(use);
0867     }
0868     mutex_unlock(&module_mutex);
0869 }
0870 
0871 #ifdef CONFIG_MODULE_FORCE_UNLOAD
0872 static inline int try_force_unload(unsigned int flags)
0873 {
0874     int ret = (flags & O_TRUNC);
0875     if (ret)
0876         add_taint(TAINT_FORCED_RMMOD, LOCKDEP_NOW_UNRELIABLE);
0877     return ret;
0878 }
0879 #else
0880 static inline int try_force_unload(unsigned int flags)
0881 {
0882     return 0;
0883 }
0884 #endif /* CONFIG_MODULE_FORCE_UNLOAD */
0885 
0886 /* Try to release refcount of module, 0 means success. */
0887 static int try_release_module_ref(struct module *mod)
0888 {
0889     int ret;
0890 
0891     /* Try to decrement refcnt which we set at loading */
0892     ret = atomic_sub_return(MODULE_REF_BASE, &mod->refcnt);
0893     BUG_ON(ret < 0);
0894     if (ret)
0895         /* Someone can put this right now, recover with checking */
0896         ret = atomic_add_unless(&mod->refcnt, MODULE_REF_BASE, 0);
0897 
0898     return ret;
0899 }
0900 
0901 static int try_stop_module(struct module *mod, int flags, int *forced)
0902 {
0903     /* If it's not unused, quit unless we're forcing. */
0904     if (try_release_module_ref(mod) != 0) {
0905         *forced = try_force_unload(flags);
0906         if (!(*forced))
0907             return -EWOULDBLOCK;
0908     }
0909 
0910     /* Mark it as dying. */
0911     mod->state = MODULE_STATE_GOING;
0912 
0913     return 0;
0914 }
0915 
0916 /**
0917  * module_refcount - return the refcount or -1 if unloading
0918  *
0919  * @mod:    the module we're checking
0920  *
0921  * Returns:
0922  *  -1 if the module is in the process of unloading
0923  *  otherwise the number of references in the kernel to the module
0924  */
0925 int module_refcount(struct module *mod)
0926 {
0927     return atomic_read(&mod->refcnt) - MODULE_REF_BASE;
0928 }
0929 EXPORT_SYMBOL(module_refcount);
0930 
0931 /* This exists whether we can unload or not */
0932 static void free_module(struct module *mod);
0933 
0934 SYSCALL_DEFINE2(delete_module, const char __user *, name_user,
0935         unsigned int, flags)
0936 {
0937     struct module *mod;
0938     char name[MODULE_NAME_LEN];
0939     int ret, forced = 0;
0940 
0941     if (!capable(CAP_SYS_MODULE) || modules_disabled)
0942         return -EPERM;
0943 
0944     if (strncpy_from_user(name, name_user, MODULE_NAME_LEN-1) < 0)
0945         return -EFAULT;
0946     name[MODULE_NAME_LEN-1] = '\0';
0947 
0948     if (mutex_lock_interruptible(&module_mutex) != 0)
0949         return -EINTR;
0950 
0951     mod = find_module(name);
0952     if (!mod) {
0953         ret = -ENOENT;
0954         goto out;
0955     }
0956 
0957     if (!list_empty(&mod->source_list)) {
0958         /* Other modules depend on us: get rid of them first. */
0959         ret = -EWOULDBLOCK;
0960         goto out;
0961     }
0962 
0963     /* Doing init or already dying? */
0964     if (mod->state != MODULE_STATE_LIVE) {
0965         /* FIXME: if (force), slam module count damn the torpedoes */
0966         pr_debug("%s already dying\n", mod->name);
0967         ret = -EBUSY;
0968         goto out;
0969     }
0970 
0971     /* If it has an init func, it must have an exit func to unload */
0972     if (mod->init && !mod->exit) {
0973         forced = try_force_unload(flags);
0974         if (!forced) {
0975             /* This module can't be removed */
0976             ret = -EBUSY;
0977             goto out;
0978         }
0979     }
0980 
0981     /* Stop the machine so refcounts can't move and disable module. */
0982     ret = try_stop_module(mod, flags, &forced);
0983     if (ret != 0)
0984         goto out;
0985 
0986     mutex_unlock(&module_mutex);
0987     /* Final destruction now no one is using it. */
0988     if (mod->exit != NULL)
0989         mod->exit();
0990     blocking_notifier_call_chain(&module_notify_list,
0991                      MODULE_STATE_GOING, mod);
0992     klp_module_going(mod);
0993     ftrace_release_mod(mod);
0994 
0995     async_synchronize_full();
0996 
0997     /* Store the name of the last unloaded module for diagnostic purposes */
0998     strlcpy(last_unloaded_module, mod->name, sizeof(last_unloaded_module));
0999 
1000     free_module(mod);
1001     return 0;
1002 out:
1003     mutex_unlock(&module_mutex);
1004     return ret;
1005 }
1006 
1007 static inline void print_unload_info(struct seq_file *m, struct module *mod)
1008 {
1009     struct module_use *use;
1010     int printed_something = 0;
1011 
1012     seq_printf(m, " %i ", module_refcount(mod));
1013 
1014     /*
1015      * Always include a trailing , so userspace can differentiate
1016      * between this and the old multi-field proc format.
1017      */
1018     list_for_each_entry(use, &mod->source_list, source_list) {
1019         printed_something = 1;
1020         seq_printf(m, "%s,", use->source->name);
1021     }
1022 
1023     if (mod->init != NULL && mod->exit == NULL) {
1024         printed_something = 1;
1025         seq_puts(m, "[permanent],");
1026     }
1027 
1028     if (!printed_something)
1029         seq_puts(m, "-");
1030 }
1031 
1032 void __symbol_put(const char *symbol)
1033 {
1034     struct module *owner;
1035 
1036     preempt_disable();
1037     if (!find_symbol(symbol, &owner, NULL, true, false))
1038         BUG();
1039     module_put(owner);
1040     preempt_enable();
1041 }
1042 EXPORT_SYMBOL(__symbol_put);
1043 
1044 /* Note this assumes addr is a function, which it currently always is. */
1045 void symbol_put_addr(void *addr)
1046 {
1047     struct module *modaddr;
1048     unsigned long a = (unsigned long)dereference_function_descriptor(addr);
1049 
1050     if (core_kernel_text(a))
1051         return;
1052 
1053     /*
1054      * Even though we hold a reference on the module; we still need to
1055      * disable preemption in order to safely traverse the data structure.
1056      */
1057     preempt_disable();
1058     modaddr = __module_text_address(a);
1059     BUG_ON(!modaddr);
1060     module_put(modaddr);
1061     preempt_enable();
1062 }
1063 EXPORT_SYMBOL_GPL(symbol_put_addr);
1064 
1065 static ssize_t show_refcnt(struct module_attribute *mattr,
1066                struct module_kobject *mk, char *buffer)
1067 {
1068     return sprintf(buffer, "%i\n", module_refcount(mk->mod));
1069 }
1070 
1071 static struct module_attribute modinfo_refcnt =
1072     __ATTR(refcnt, 0444, show_refcnt, NULL);
1073 
1074 void __module_get(struct module *module)
1075 {
1076     if (module) {
1077         preempt_disable();
1078         atomic_inc(&module->refcnt);
1079         trace_module_get(module, _RET_IP_);
1080         preempt_enable();
1081     }
1082 }
1083 EXPORT_SYMBOL(__module_get);
1084 
1085 bool try_module_get(struct module *module)
1086 {
1087     bool ret = true;
1088 
1089     if (module) {
1090         preempt_disable();
1091         /* Note: here, we can fail to get a reference */
1092         if (likely(module_is_live(module) &&
1093                atomic_inc_not_zero(&module->refcnt) != 0))
1094             trace_module_get(module, _RET_IP_);
1095         else
1096             ret = false;
1097 
1098         preempt_enable();
1099     }
1100     return ret;
1101 }
1102 EXPORT_SYMBOL(try_module_get);
1103 
1104 void module_put(struct module *module)
1105 {
1106     int ret;
1107 
1108     if (module) {
1109         preempt_disable();
1110         ret = atomic_dec_if_positive(&module->refcnt);
1111         WARN_ON(ret < 0);   /* Failed to put refcount */
1112         trace_module_put(module, _RET_IP_);
1113         preempt_enable();
1114     }
1115 }
1116 EXPORT_SYMBOL(module_put);
1117 
1118 #else /* !CONFIG_MODULE_UNLOAD */
1119 static inline void print_unload_info(struct seq_file *m, struct module *mod)
1120 {
1121     /* We don't know the usage count, or what modules are using. */
1122     seq_puts(m, " - -");
1123 }
1124 
1125 static inline void module_unload_free(struct module *mod)
1126 {
1127 }
1128 
1129 int ref_module(struct module *a, struct module *b)
1130 {
1131     return strong_try_module_get(b);
1132 }
1133 EXPORT_SYMBOL_GPL(ref_module);
1134 
1135 static inline int module_unload_init(struct module *mod)
1136 {
1137     return 0;
1138 }
1139 #endif /* CONFIG_MODULE_UNLOAD */
1140 
1141 static size_t module_flags_taint(struct module *mod, char *buf)
1142 {
1143     size_t l = 0;
1144     int i;
1145 
1146     for (i = 0; i < TAINT_FLAGS_COUNT; i++) {
1147         if (taint_flags[i].module && test_bit(i, &mod->taints))
1148             buf[l++] = taint_flags[i].c_true;
1149     }
1150 
1151     return l;
1152 }
1153 
1154 static ssize_t show_initstate(struct module_attribute *mattr,
1155                   struct module_kobject *mk, char *buffer)
1156 {
1157     const char *state = "unknown";
1158 
1159     switch (mk->mod->state) {
1160     case MODULE_STATE_LIVE:
1161         state = "live";
1162         break;
1163     case MODULE_STATE_COMING:
1164         state = "coming";
1165         break;
1166     case MODULE_STATE_GOING:
1167         state = "going";
1168         break;
1169     default:
1170         BUG();
1171     }
1172     return sprintf(buffer, "%s\n", state);
1173 }
1174 
1175 static struct module_attribute modinfo_initstate =
1176     __ATTR(initstate, 0444, show_initstate, NULL);
1177 
1178 static ssize_t store_uevent(struct module_attribute *mattr,
1179                 struct module_kobject *mk,
1180                 const char *buffer, size_t count)
1181 {
1182     enum kobject_action action;
1183 
1184     if (kobject_action_type(buffer, count, &action) == 0)
1185         kobject_uevent(&mk->kobj, action);
1186     return count;
1187 }
1188 
1189 struct module_attribute module_uevent =
1190     __ATTR(uevent, 0200, NULL, store_uevent);
1191 
1192 static ssize_t show_coresize(struct module_attribute *mattr,
1193                  struct module_kobject *mk, char *buffer)
1194 {
1195     return sprintf(buffer, "%u\n", mk->mod->core_layout.size);
1196 }
1197 
1198 static struct module_attribute modinfo_coresize =
1199     __ATTR(coresize, 0444, show_coresize, NULL);
1200 
1201 static ssize_t show_initsize(struct module_attribute *mattr,
1202                  struct module_kobject *mk, char *buffer)
1203 {
1204     return sprintf(buffer, "%u\n", mk->mod->init_layout.size);
1205 }
1206 
1207 static struct module_attribute modinfo_initsize =
1208     __ATTR(initsize, 0444, show_initsize, NULL);
1209 
1210 static ssize_t show_taint(struct module_attribute *mattr,
1211               struct module_kobject *mk, char *buffer)
1212 {
1213     size_t l;
1214 
1215     l = module_flags_taint(mk->mod, buffer);
1216     buffer[l++] = '\n';
1217     return l;
1218 }
1219 
1220 static struct module_attribute modinfo_taint =
1221     __ATTR(taint, 0444, show_taint, NULL);
1222 
1223 static struct module_attribute *modinfo_attrs[] = {
1224     &module_uevent,
1225     &modinfo_version,
1226     &modinfo_srcversion,
1227     &modinfo_initstate,
1228     &modinfo_coresize,
1229     &modinfo_initsize,
1230     &modinfo_taint,
1231 #ifdef CONFIG_MODULE_UNLOAD
1232     &modinfo_refcnt,
1233 #endif
1234     NULL,
1235 };
1236 
1237 static const char vermagic[] = VERMAGIC_STRING;
1238 
1239 static int try_to_force_load(struct module *mod, const char *reason)
1240 {
1241 #ifdef CONFIG_MODULE_FORCE_LOAD
1242     if (!test_taint(TAINT_FORCED_MODULE))
1243         pr_warn("%s: %s: kernel tainted.\n", mod->name, reason);
1244     add_taint_module(mod, TAINT_FORCED_MODULE, LOCKDEP_NOW_UNRELIABLE);
1245     return 0;
1246 #else
1247     return -ENOEXEC;
1248 #endif
1249 }
1250 
1251 #ifdef CONFIG_MODVERSIONS
1252 
1253 static u32 resolve_rel_crc(const s32 *crc)
1254 {
1255     return *(u32 *)((void *)crc + *crc);
1256 }
1257 
1258 static int check_version(Elf_Shdr *sechdrs,
1259              unsigned int versindex,
1260              const char *symname,
1261              struct module *mod,
1262              const s32 *crc)
1263 {
1264     unsigned int i, num_versions;
1265     struct modversion_info *versions;
1266 
1267     /* Exporting module didn't supply crcs?  OK, we're already tainted. */
1268     if (!crc)
1269         return 1;
1270 
1271     /* No versions at all?  modprobe --force does this. */
1272     if (versindex == 0)
1273         return try_to_force_load(mod, symname) == 0;
1274 
1275     versions = (void *) sechdrs[versindex].sh_addr;
1276     num_versions = sechdrs[versindex].sh_size
1277         / sizeof(struct modversion_info);
1278 
1279     for (i = 0; i < num_versions; i++) {
1280         u32 crcval;
1281 
1282         if (strcmp(versions[i].name, symname) != 0)
1283             continue;
1284 
1285         if (IS_ENABLED(CONFIG_MODULE_REL_CRCS))
1286             crcval = resolve_rel_crc(crc);
1287         else
1288             crcval = *crc;
1289         if (versions[i].crc == crcval)
1290             return 1;
1291         pr_debug("Found checksum %X vs module %lX\n",
1292              crcval, versions[i].crc);
1293         goto bad_version;
1294     }
1295 
1296     /* Broken toolchain. Warn once, then let it go.. */
1297     pr_warn_once("%s: no symbol version for %s\n", mod->name, symname);
1298     return 1;
1299 
1300 bad_version:
1301     pr_warn("%s: disagrees about version of symbol %s\n",
1302            mod->name, symname);
1303     return 0;
1304 }
1305 
1306 static inline int check_modstruct_version(Elf_Shdr *sechdrs,
1307                       unsigned int versindex,
1308                       struct module *mod)
1309 {
1310     const s32 *crc;
1311 
1312     /*
1313      * Since this should be found in kernel (which can't be removed), no
1314      * locking is necessary -- use preempt_disable() to placate lockdep.
1315      */
1316     preempt_disable();
1317     if (!find_symbol(VMLINUX_SYMBOL_STR(module_layout), NULL,
1318              &crc, true, false)) {
1319         preempt_enable();
1320         BUG();
1321     }
1322     preempt_enable();
1323     return check_version(sechdrs, versindex,
1324                  VMLINUX_SYMBOL_STR(module_layout), mod, crc);
1325 }
1326 
1327 /* First part is kernel version, which we ignore if module has crcs. */
1328 static inline int same_magic(const char *amagic, const char *bmagic,
1329                  bool has_crcs)
1330 {
1331     if (has_crcs) {
1332         amagic += strcspn(amagic, " ");
1333         bmagic += strcspn(bmagic, " ");
1334     }
1335     return strcmp(amagic, bmagic) == 0;
1336 }
1337 #else
1338 static inline int check_version(Elf_Shdr *sechdrs,
1339                 unsigned int versindex,
1340                 const char *symname,
1341                 struct module *mod,
1342                 const s32 *crc)
1343 {
1344     return 1;
1345 }
1346 
1347 static inline int check_modstruct_version(Elf_Shdr *sechdrs,
1348                       unsigned int versindex,
1349                       struct module *mod)
1350 {
1351     return 1;
1352 }
1353 
1354 static inline int same_magic(const char *amagic, const char *bmagic,
1355                  bool has_crcs)
1356 {
1357     return strcmp(amagic, bmagic) == 0;
1358 }
1359 #endif /* CONFIG_MODVERSIONS */
1360 
1361 /* Resolve a symbol for this module.  I.e. if we find one, record usage. */
1362 static const struct kernel_symbol *resolve_symbol(struct module *mod,
1363                           const struct load_info *info,
1364                           const char *name,
1365                           char ownername[])
1366 {
1367     struct module *owner;
1368     const struct kernel_symbol *sym;
1369     const s32 *crc;
1370     int err;
1371 
1372     /*
1373      * The module_mutex should not be a heavily contended lock;
1374      * if we get the occasional sleep here, we'll go an extra iteration
1375      * in the wait_event_interruptible(), which is harmless.
1376      */
1377     sched_annotate_sleep();
1378     mutex_lock(&module_mutex);
1379     sym = find_symbol(name, &owner, &crc,
1380               !(mod->taints & (1 << TAINT_PROPRIETARY_MODULE)), true);
1381     if (!sym)
1382         goto unlock;
1383 
1384     if (!check_version(info->sechdrs, info->index.vers, name, mod, crc)) {
1385         sym = ERR_PTR(-EINVAL);
1386         goto getname;
1387     }
1388 
1389     err = ref_module(mod, owner);
1390     if (err) {
1391         sym = ERR_PTR(err);
1392         goto getname;
1393     }
1394 
1395 getname:
1396     /* We must make copy under the lock if we failed to get ref. */
1397     strncpy(ownername, module_name(owner), MODULE_NAME_LEN);
1398 unlock:
1399     mutex_unlock(&module_mutex);
1400     return sym;
1401 }
1402 
1403 static const struct kernel_symbol *
1404 resolve_symbol_wait(struct module *mod,
1405             const struct load_info *info,
1406             const char *name)
1407 {
1408     const struct kernel_symbol *ksym;
1409     char owner[MODULE_NAME_LEN];
1410 
1411     if (wait_event_interruptible_timeout(module_wq,
1412             !IS_ERR(ksym = resolve_symbol(mod, info, name, owner))
1413             || PTR_ERR(ksym) != -EBUSY,
1414                          30 * HZ) <= 0) {
1415         pr_warn("%s: gave up waiting for init of module %s.\n",
1416             mod->name, owner);
1417     }
1418     return ksym;
1419 }
1420 
1421 /*
1422  * /sys/module/foo/sections stuff
1423  * J. Corbet <corbet@lwn.net>
1424  */
1425 #ifdef CONFIG_SYSFS
1426 
1427 #ifdef CONFIG_KALLSYMS
1428 static inline bool sect_empty(const Elf_Shdr *sect)
1429 {
1430     return !(sect->sh_flags & SHF_ALLOC) || sect->sh_size == 0;
1431 }
1432 
1433 struct module_sect_attr {
1434     struct module_attribute mattr;
1435     char *name;
1436     unsigned long address;
1437 };
1438 
1439 struct module_sect_attrs {
1440     struct attribute_group grp;
1441     unsigned int nsections;
1442     struct module_sect_attr attrs[0];
1443 };
1444 
1445 static ssize_t module_sect_show(struct module_attribute *mattr,
1446                 struct module_kobject *mk, char *buf)
1447 {
1448     struct module_sect_attr *sattr =
1449         container_of(mattr, struct module_sect_attr, mattr);
1450     return sprintf(buf, "0x%pK\n", (void *)sattr->address);
1451 }
1452 
1453 static void free_sect_attrs(struct module_sect_attrs *sect_attrs)
1454 {
1455     unsigned int section;
1456 
1457     for (section = 0; section < sect_attrs->nsections; section++)
1458         kfree(sect_attrs->attrs[section].name);
1459     kfree(sect_attrs);
1460 }
1461 
1462 static void add_sect_attrs(struct module *mod, const struct load_info *info)
1463 {
1464     unsigned int nloaded = 0, i, size[2];
1465     struct module_sect_attrs *sect_attrs;
1466     struct module_sect_attr *sattr;
1467     struct attribute **gattr;
1468 
1469     /* Count loaded sections and allocate structures */
1470     for (i = 0; i < info->hdr->e_shnum; i++)
1471         if (!sect_empty(&info->sechdrs[i]))
1472             nloaded++;
1473     size[0] = ALIGN(sizeof(*sect_attrs)
1474             + nloaded * sizeof(sect_attrs->attrs[0]),
1475             sizeof(sect_attrs->grp.attrs[0]));
1476     size[1] = (nloaded + 1) * sizeof(sect_attrs->grp.attrs[0]);
1477     sect_attrs = kzalloc(size[0] + size[1], GFP_KERNEL);
1478     if (sect_attrs == NULL)
1479         return;
1480 
1481     /* Setup section attributes. */
1482     sect_attrs->grp.name = "sections";
1483     sect_attrs->grp.attrs = (void *)sect_attrs + size[0];
1484 
1485     sect_attrs->nsections = 0;
1486     sattr = &sect_attrs->attrs[0];
1487     gattr = &sect_attrs->grp.attrs[0];
1488     for (i = 0; i < info->hdr->e_shnum; i++) {
1489         Elf_Shdr *sec = &info->sechdrs[i];
1490         if (sect_empty(sec))
1491             continue;
1492         sattr->address = sec->sh_addr;
1493         sattr->name = kstrdup(info->secstrings + sec->sh_name,
1494                     GFP_KERNEL);
1495         if (sattr->name == NULL)
1496             goto out;
1497         sect_attrs->nsections++;
1498         sysfs_attr_init(&sattr->mattr.attr);
1499         sattr->mattr.show = module_sect_show;
1500         sattr->mattr.store = NULL;
1501         sattr->mattr.attr.name = sattr->name;
1502         sattr->mattr.attr.mode = S_IRUGO;
1503         *(gattr++) = &(sattr++)->mattr.attr;
1504     }
1505     *gattr = NULL;
1506 
1507     if (sysfs_create_group(&mod->mkobj.kobj, &sect_attrs->grp))
1508         goto out;
1509 
1510     mod->sect_attrs = sect_attrs;
1511     return;
1512   out:
1513     free_sect_attrs(sect_attrs);
1514 }
1515 
1516 static void remove_sect_attrs(struct module *mod)
1517 {
1518     if (mod->sect_attrs) {
1519         sysfs_remove_group(&mod->mkobj.kobj,
1520                    &mod->sect_attrs->grp);
1521         /* We are positive that no one is using any sect attrs
1522          * at this point.  Deallocate immediately. */
1523         free_sect_attrs(mod->sect_attrs);
1524         mod->sect_attrs = NULL;
1525     }
1526 }
1527 
1528 /*
1529  * /sys/module/foo/notes/.section.name gives contents of SHT_NOTE sections.
1530  */
1531 
1532 struct module_notes_attrs {
1533     struct kobject *dir;
1534     unsigned int notes;
1535     struct bin_attribute attrs[0];
1536 };
1537 
1538 static ssize_t module_notes_read(struct file *filp, struct kobject *kobj,
1539                  struct bin_attribute *bin_attr,
1540                  char *buf, loff_t pos, size_t count)
1541 {
1542     /*
1543      * The caller checked the pos and count against our size.
1544      */
1545     memcpy(buf, bin_attr->private + pos, count);
1546     return count;
1547 }
1548 
1549 static void free_notes_attrs(struct module_notes_attrs *notes_attrs,
1550                  unsigned int i)
1551 {
1552     if (notes_attrs->dir) {
1553         while (i-- > 0)
1554             sysfs_remove_bin_file(notes_attrs->dir,
1555                           &notes_attrs->attrs[i]);
1556         kobject_put(notes_attrs->dir);
1557     }
1558     kfree(notes_attrs);
1559 }
1560 
1561 static void add_notes_attrs(struct module *mod, const struct load_info *info)
1562 {
1563     unsigned int notes, loaded, i;
1564     struct module_notes_attrs *notes_attrs;
1565     struct bin_attribute *nattr;
1566 
1567     /* failed to create section attributes, so can't create notes */
1568     if (!mod->sect_attrs)
1569         return;
1570 
1571     /* Count notes sections and allocate structures.  */
1572     notes = 0;
1573     for (i = 0; i < info->hdr->e_shnum; i++)
1574         if (!sect_empty(&info->sechdrs[i]) &&
1575             (info->sechdrs[i].sh_type == SHT_NOTE))
1576             ++notes;
1577 
1578     if (notes == 0)
1579         return;
1580 
1581     notes_attrs = kzalloc(sizeof(*notes_attrs)
1582                   + notes * sizeof(notes_attrs->attrs[0]),
1583                   GFP_KERNEL);
1584     if (notes_attrs == NULL)
1585         return;
1586 
1587     notes_attrs->notes = notes;
1588     nattr = &notes_attrs->attrs[0];
1589     for (loaded = i = 0; i < info->hdr->e_shnum; ++i) {
1590         if (sect_empty(&info->sechdrs[i]))
1591             continue;
1592         if (info->sechdrs[i].sh_type == SHT_NOTE) {
1593             sysfs_bin_attr_init(nattr);
1594             nattr->attr.name = mod->sect_attrs->attrs[loaded].name;
1595             nattr->attr.mode = S_IRUGO;
1596             nattr->size = info->sechdrs[i].sh_size;
1597             nattr->private = (void *) info->sechdrs[i].sh_addr;
1598             nattr->read = module_notes_read;
1599             ++nattr;
1600         }
1601         ++loaded;
1602     }
1603 
1604     notes_attrs->dir = kobject_create_and_add("notes", &mod->mkobj.kobj);
1605     if (!notes_attrs->dir)
1606         goto out;
1607 
1608     for (i = 0; i < notes; ++i)
1609         if (sysfs_create_bin_file(notes_attrs->dir,
1610                       &notes_attrs->attrs[i]))
1611             goto out;
1612 
1613     mod->notes_attrs = notes_attrs;
1614     return;
1615 
1616   out:
1617     free_notes_attrs(notes_attrs, i);
1618 }
1619 
1620 static void remove_notes_attrs(struct module *mod)
1621 {
1622     if (mod->notes_attrs)
1623         free_notes_attrs(mod->notes_attrs, mod->notes_attrs->notes);
1624 }
1625 
1626 #else
1627 
1628 static inline void add_sect_attrs(struct module *mod,
1629                   const struct load_info *info)
1630 {
1631 }
1632 
1633 static inline void remove_sect_attrs(struct module *mod)
1634 {
1635 }
1636 
1637 static inline void add_notes_attrs(struct module *mod,
1638                    const struct load_info *info)
1639 {
1640 }
1641 
1642 static inline void remove_notes_attrs(struct module *mod)
1643 {
1644 }
1645 #endif /* CONFIG_KALLSYMS */
1646 
1647 static void add_usage_links(struct module *mod)
1648 {
1649 #ifdef CONFIG_MODULE_UNLOAD
1650     struct module_use *use;
1651     int nowarn;
1652 
1653     mutex_lock(&module_mutex);
1654     list_for_each_entry(use, &mod->target_list, target_list) {
1655         nowarn = sysfs_create_link(use->target->holders_dir,
1656                        &mod->mkobj.kobj, mod->name);
1657     }
1658     mutex_unlock(&module_mutex);
1659 #endif
1660 }
1661 
1662 static void del_usage_links(struct module *mod)
1663 {
1664 #ifdef CONFIG_MODULE_UNLOAD
1665     struct module_use *use;
1666 
1667     mutex_lock(&module_mutex);
1668     list_for_each_entry(use, &mod->target_list, target_list)
1669         sysfs_remove_link(use->target->holders_dir, mod->name);
1670     mutex_unlock(&module_mutex);
1671 #endif
1672 }
1673 
1674 static int module_add_modinfo_attrs(struct module *mod)
1675 {
1676     struct module_attribute *attr;
1677     struct module_attribute *temp_attr;
1678     int error = 0;
1679     int i;
1680 
1681     mod->modinfo_attrs = kzalloc((sizeof(struct module_attribute) *
1682                     (ARRAY_SIZE(modinfo_attrs) + 1)),
1683                     GFP_KERNEL);
1684     if (!mod->modinfo_attrs)
1685         return -ENOMEM;
1686 
1687     temp_attr = mod->modinfo_attrs;
1688     for (i = 0; (attr = modinfo_attrs[i]) && !error; i++) {
1689         if (!attr->test || attr->test(mod)) {
1690             memcpy(temp_attr, attr, sizeof(*temp_attr));
1691             sysfs_attr_init(&temp_attr->attr);
1692             error = sysfs_create_file(&mod->mkobj.kobj,
1693                     &temp_attr->attr);
1694             ++temp_attr;
1695         }
1696     }
1697     return error;
1698 }
1699 
1700 static void module_remove_modinfo_attrs(struct module *mod)
1701 {
1702     struct module_attribute *attr;
1703     int i;
1704 
1705     for (i = 0; (attr = &mod->modinfo_attrs[i]); i++) {
1706         /* pick a field to test for end of list */
1707         if (!attr->attr.name)
1708             break;
1709         sysfs_remove_file(&mod->mkobj.kobj, &attr->attr);
1710         if (attr->free)
1711             attr->free(mod);
1712     }
1713     kfree(mod->modinfo_attrs);
1714 }
1715 
1716 static void mod_kobject_put(struct module *mod)
1717 {
1718     DECLARE_COMPLETION_ONSTACK(c);
1719     mod->mkobj.kobj_completion = &c;
1720     kobject_put(&mod->mkobj.kobj);
1721     wait_for_completion(&c);
1722 }
1723 
1724 static int mod_sysfs_init(struct module *mod)
1725 {
1726     int err;
1727     struct kobject *kobj;
1728 
1729     if (!module_sysfs_initialized) {
1730         pr_err("%s: module sysfs not initialized\n", mod->name);
1731         err = -EINVAL;
1732         goto out;
1733     }
1734 
1735     kobj = kset_find_obj(module_kset, mod->name);
1736     if (kobj) {
1737         pr_err("%s: module is already loaded\n", mod->name);
1738         kobject_put(kobj);
1739         err = -EINVAL;
1740         goto out;
1741     }
1742 
1743     mod->mkobj.mod = mod;
1744 
1745     memset(&mod->mkobj.kobj, 0, sizeof(mod->mkobj.kobj));
1746     mod->mkobj.kobj.kset = module_kset;
1747     err = kobject_init_and_add(&mod->mkobj.kobj, &module_ktype, NULL,
1748                    "%s", mod->name);
1749     if (err)
1750         mod_kobject_put(mod);
1751 
1752     /* delay uevent until full sysfs population */
1753 out:
1754     return err;
1755 }
1756 
1757 static int mod_sysfs_setup(struct module *mod,
1758                const struct load_info *info,
1759                struct kernel_param *kparam,
1760                unsigned int num_params)
1761 {
1762     int err;
1763 
1764     err = mod_sysfs_init(mod);
1765     if (err)
1766         goto out;
1767 
1768     mod->holders_dir = kobject_create_and_add("holders", &mod->mkobj.kobj);
1769     if (!mod->holders_dir) {
1770         err = -ENOMEM;
1771         goto out_unreg;
1772     }
1773 
1774     err = module_param_sysfs_setup(mod, kparam, num_params);
1775     if (err)
1776         goto out_unreg_holders;
1777 
1778     err = module_add_modinfo_attrs(mod);
1779     if (err)
1780         goto out_unreg_param;
1781 
1782     add_usage_links(mod);
1783     add_sect_attrs(mod, info);
1784     add_notes_attrs(mod, info);
1785 
1786     kobject_uevent(&mod->mkobj.kobj, KOBJ_ADD);
1787     return 0;
1788 
1789 out_unreg_param:
1790     module_param_sysfs_remove(mod);
1791 out_unreg_holders:
1792     kobject_put(mod->holders_dir);
1793 out_unreg:
1794     mod_kobject_put(mod);
1795 out:
1796     return err;
1797 }
1798 
1799 static void mod_sysfs_fini(struct module *mod)
1800 {
1801     remove_notes_attrs(mod);
1802     remove_sect_attrs(mod);
1803     mod_kobject_put(mod);
1804 }
1805 
1806 static void init_param_lock(struct module *mod)
1807 {
1808     mutex_init(&mod->param_lock);
1809 }
1810 #else /* !CONFIG_SYSFS */
1811 
1812 static int mod_sysfs_setup(struct module *mod,
1813                const struct load_info *info,
1814                struct kernel_param *kparam,
1815                unsigned int num_params)
1816 {
1817     return 0;
1818 }
1819 
1820 static void mod_sysfs_fini(struct module *mod)
1821 {
1822 }
1823 
1824 static void module_remove_modinfo_attrs(struct module *mod)
1825 {
1826 }
1827 
1828 static void del_usage_links(struct module *mod)
1829 {
1830 }
1831 
1832 static void init_param_lock(struct module *mod)
1833 {
1834 }
1835 #endif /* CONFIG_SYSFS */
1836 
1837 static void mod_sysfs_teardown(struct module *mod)
1838 {
1839     del_usage_links(mod);
1840     module_remove_modinfo_attrs(mod);
1841     module_param_sysfs_remove(mod);
1842     kobject_put(mod->mkobj.drivers_dir);
1843     kobject_put(mod->holders_dir);
1844     mod_sysfs_fini(mod);
1845 }
1846 
1847 #ifdef CONFIG_DEBUG_SET_MODULE_RONX
1848 /*
1849  * LKM RO/NX protection: protect module's text/ro-data
1850  * from modification and any data from execution.
1851  *
1852  * General layout of module is:
1853  *          [text] [read-only-data] [ro-after-init] [writable data]
1854  * text_size -----^                ^               ^               ^
1855  * ro_size ------------------------|               |               |
1856  * ro_after_init_size -----------------------------|               |
1857  * size -----------------------------------------------------------|
1858  *
1859  * These values are always page-aligned (as is base)
1860  */
1861 static void frob_text(const struct module_layout *layout,
1862               int (*set_memory)(unsigned long start, int num_pages))
1863 {
1864     BUG_ON((unsigned long)layout->base & (PAGE_SIZE-1));
1865     BUG_ON((unsigned long)layout->text_size & (PAGE_SIZE-1));
1866     set_memory((unsigned long)layout->base,
1867            layout->text_size >> PAGE_SHIFT);
1868 }
1869 
1870 static void frob_rodata(const struct module_layout *layout,
1871             int (*set_memory)(unsigned long start, int num_pages))
1872 {
1873     BUG_ON((unsigned long)layout->base & (PAGE_SIZE-1));
1874     BUG_ON((unsigned long)layout->text_size & (PAGE_SIZE-1));
1875     BUG_ON((unsigned long)layout->ro_size & (PAGE_SIZE-1));
1876     set_memory((unsigned long)layout->base + layout->text_size,
1877            (layout->ro_size - layout->text_size) >> PAGE_SHIFT);
1878 }
1879 
1880 static void frob_ro_after_init(const struct module_layout *layout,
1881                 int (*set_memory)(unsigned long start, int num_pages))
1882 {
1883     BUG_ON((unsigned long)layout->base & (PAGE_SIZE-1));
1884     BUG_ON((unsigned long)layout->ro_size & (PAGE_SIZE-1));
1885     BUG_ON((unsigned long)layout->ro_after_init_size & (PAGE_SIZE-1));
1886     set_memory((unsigned long)layout->base + layout->ro_size,
1887            (layout->ro_after_init_size - layout->ro_size) >> PAGE_SHIFT);
1888 }
1889 
1890 static void frob_writable_data(const struct module_layout *layout,
1891                    int (*set_memory)(unsigned long start, int num_pages))
1892 {
1893     BUG_ON((unsigned long)layout->base & (PAGE_SIZE-1));
1894     BUG_ON((unsigned long)layout->ro_after_init_size & (PAGE_SIZE-1));
1895     BUG_ON((unsigned long)layout->size & (PAGE_SIZE-1));
1896     set_memory((unsigned long)layout->base + layout->ro_after_init_size,
1897            (layout->size - layout->ro_after_init_size) >> PAGE_SHIFT);
1898 }
1899 
1900 /* livepatching wants to disable read-only so it can frob module. */
1901 void module_disable_ro(const struct module *mod)
1902 {
1903     if (!rodata_enabled)
1904         return;
1905 
1906     frob_text(&mod->core_layout, set_memory_rw);
1907     frob_rodata(&mod->core_layout, set_memory_rw);
1908     frob_ro_after_init(&mod->core_layout, set_memory_rw);
1909     frob_text(&mod->init_layout, set_memory_rw);
1910     frob_rodata(&mod->init_layout, set_memory_rw);
1911 }
1912 
1913 void module_enable_ro(const struct module *mod, bool after_init)
1914 {
1915     if (!rodata_enabled)
1916         return;
1917 
1918     frob_text(&mod->core_layout, set_memory_ro);
1919     frob_rodata(&mod->core_layout, set_memory_ro);
1920     frob_text(&mod->init_layout, set_memory_ro);
1921     frob_rodata(&mod->init_layout, set_memory_ro);
1922 
1923     if (after_init)
1924         frob_ro_after_init(&mod->core_layout, set_memory_ro);
1925 }
1926 
1927 static void module_enable_nx(const struct module *mod)
1928 {
1929     frob_rodata(&mod->core_layout, set_memory_nx);
1930     frob_ro_after_init(&mod->core_layout, set_memory_nx);
1931     frob_writable_data(&mod->core_layout, set_memory_nx);
1932     frob_rodata(&mod->init_layout, set_memory_nx);
1933     frob_writable_data(&mod->init_layout, set_memory_nx);
1934 }
1935 
1936 static void module_disable_nx(const struct module *mod)
1937 {
1938     frob_rodata(&mod->core_layout, set_memory_x);
1939     frob_ro_after_init(&mod->core_layout, set_memory_x);
1940     frob_writable_data(&mod->core_layout, set_memory_x);
1941     frob_rodata(&mod->init_layout, set_memory_x);
1942     frob_writable_data(&mod->init_layout, set_memory_x);
1943 }
1944 
1945 /* Iterate through all modules and set each module's text as RW */
1946 void set_all_modules_text_rw(void)
1947 {
1948     struct module *mod;
1949 
1950     if (!rodata_enabled)
1951         return;
1952 
1953     mutex_lock(&module_mutex);
1954     list_for_each_entry_rcu(mod, &modules, list) {
1955         if (mod->state == MODULE_STATE_UNFORMED)
1956             continue;
1957 
1958         frob_text(&mod->core_layout, set_memory_rw);
1959         frob_text(&mod->init_layout, set_memory_rw);
1960     }
1961     mutex_unlock(&module_mutex);
1962 }
1963 
1964 /* Iterate through all modules and set each module's text as RO */
1965 void set_all_modules_text_ro(void)
1966 {
1967     struct module *mod;
1968 
1969     if (!rodata_enabled)
1970         return;
1971 
1972     mutex_lock(&module_mutex);
1973     list_for_each_entry_rcu(mod, &modules, list) {
1974         /*
1975          * Ignore going modules since it's possible that ro
1976          * protection has already been disabled, otherwise we'll
1977          * run into protection faults at module deallocation.
1978          */
1979         if (mod->state == MODULE_STATE_UNFORMED ||
1980             mod->state == MODULE_STATE_GOING)
1981             continue;
1982 
1983         frob_text(&mod->core_layout, set_memory_ro);
1984         frob_text(&mod->init_layout, set_memory_ro);
1985     }
1986     mutex_unlock(&module_mutex);
1987 }
1988 
1989 static void disable_ro_nx(const struct module_layout *layout)
1990 {
1991     if (rodata_enabled) {
1992         frob_text(layout, set_memory_rw);
1993         frob_rodata(layout, set_memory_rw);
1994         frob_ro_after_init(layout, set_memory_rw);
1995     }
1996     frob_rodata(layout, set_memory_x);
1997     frob_ro_after_init(layout, set_memory_x);
1998     frob_writable_data(layout, set_memory_x);
1999 }
2000 
2001 #else
2002 static void disable_ro_nx(const struct module_layout *layout) { }
2003 static void module_enable_nx(const struct module *mod) { }
2004 static void module_disable_nx(const struct module *mod) { }
2005 #endif
2006 
2007 #ifdef CONFIG_LIVEPATCH
2008 /*
2009  * Persist Elf information about a module. Copy the Elf header,
2010  * section header table, section string table, and symtab section
2011  * index from info to mod->klp_info.
2012  */
2013 static int copy_module_elf(struct module *mod, struct load_info *info)
2014 {
2015     unsigned int size, symndx;
2016     int ret;
2017 
2018     size = sizeof(*mod->klp_info);
2019     mod->klp_info = kmalloc(size, GFP_KERNEL);
2020     if (mod->klp_info == NULL)
2021         return -ENOMEM;
2022 
2023     /* Elf header */
2024     size = sizeof(mod->klp_info->hdr);
2025     memcpy(&mod->klp_info->hdr, info->hdr, size);
2026 
2027     /* Elf section header table */
2028     size = sizeof(*info->sechdrs) * info->hdr->e_shnum;
2029     mod->klp_info->sechdrs = kmalloc(size, GFP_KERNEL);
2030     if (mod->klp_info->sechdrs == NULL) {
2031         ret = -ENOMEM;
2032         goto free_info;
2033     }
2034     memcpy(mod->klp_info->sechdrs, info->sechdrs, size);
2035 
2036     /* Elf section name string table */
2037     size = info->sechdrs[info->hdr->e_shstrndx].sh_size;
2038     mod->klp_info->secstrings = kmalloc(size, GFP_KERNEL);
2039     if (mod->klp_info->secstrings == NULL) {
2040         ret = -ENOMEM;
2041         goto free_sechdrs;
2042     }
2043     memcpy(mod->klp_info->secstrings, info->secstrings, size);
2044 
2045     /* Elf symbol section index */
2046     symndx = info->index.sym;
2047     mod->klp_info->symndx = symndx;
2048 
2049     /*
2050      * For livepatch modules, core_kallsyms.symtab is a complete
2051      * copy of the original symbol table. Adjust sh_addr to point
2052      * to core_kallsyms.symtab since the copy of the symtab in module
2053      * init memory is freed at the end of do_init_module().
2054      */
2055     mod->klp_info->sechdrs[symndx].sh_addr = \
2056         (unsigned long) mod->core_kallsyms.symtab;
2057 
2058     return 0;
2059 
2060 free_sechdrs:
2061     kfree(mod->klp_info->sechdrs);
2062 free_info:
2063     kfree(mod->klp_info);
2064     return ret;
2065 }
2066 
2067 static void free_module_elf(struct module *mod)
2068 {
2069     kfree(mod->klp_info->sechdrs);
2070     kfree(mod->klp_info->secstrings);
2071     kfree(mod->klp_info);
2072 }
2073 #else /* !CONFIG_LIVEPATCH */
2074 static int copy_module_elf(struct module *mod, struct load_info *info)
2075 {
2076     return 0;
2077 }
2078 
2079 static void free_module_elf(struct module *mod)
2080 {
2081 }
2082 #endif /* CONFIG_LIVEPATCH */
2083 
2084 void __weak module_memfree(void *module_region)
2085 {
2086     vfree(module_region);
2087 }
2088 
2089 void __weak module_arch_cleanup(struct module *mod)
2090 {
2091 }
2092 
2093 void __weak module_arch_freeing_init(struct module *mod)
2094 {
2095 }
2096 
2097 /* Free a module, remove from lists, etc. */
2098 static void free_module(struct module *mod)
2099 {
2100     trace_module_free(mod);
2101 
2102     mod_sysfs_teardown(mod);
2103 
2104     /* We leave it in list to prevent duplicate loads, but make sure
2105      * that noone uses it while it's being deconstructed. */
2106     mutex_lock(&module_mutex);
2107     mod->state = MODULE_STATE_UNFORMED;
2108     mutex_unlock(&module_mutex);
2109 
2110     /* Remove dynamic debug info */
2111     ddebug_remove_module(mod->name);
2112 
2113     /* Arch-specific cleanup. */
2114     module_arch_cleanup(mod);
2115 
2116     /* Module unload stuff */
2117     module_unload_free(mod);
2118 
2119     /* Free any allocated parameters. */
2120     destroy_params(mod->kp, mod->num_kp);
2121 
2122     if (is_livepatch_module(mod))
2123         free_module_elf(mod);
2124 
2125     /* Now we can delete it from the lists */
2126     mutex_lock(&module_mutex);
2127     /* Unlink carefully: kallsyms could be walking list. */
2128     list_del_rcu(&mod->list);
2129     mod_tree_remove(mod);
2130     /* Remove this module from bug list, this uses list_del_rcu */
2131     module_bug_cleanup(mod);
2132     /* Wait for RCU-sched synchronizing before releasing mod->list and buglist. */
2133     synchronize_sched();
2134     mutex_unlock(&module_mutex);
2135 
2136     /* This may be empty, but that's OK */
2137     disable_ro_nx(&mod->init_layout);
2138     module_arch_freeing_init(mod);
2139     module_memfree(mod->init_layout.base);
2140     kfree(mod->args);
2141     percpu_modfree(mod);
2142 
2143     /* Free lock-classes; relies on the preceding sync_rcu(). */
2144     lockdep_free_key_range(mod->core_layout.base, mod->core_layout.size);
2145 
2146     /* Finally, free the core (containing the module structure) */
2147     disable_ro_nx(&mod->core_layout);
2148     module_memfree(mod->core_layout.base);
2149 
2150 #ifdef CONFIG_MPU
2151     update_protections(current->mm);
2152 #endif
2153 }
2154 
2155 void *__symbol_get(const char *symbol)
2156 {
2157     struct module *owner;
2158     const struct kernel_symbol *sym;
2159 
2160     preempt_disable();
2161     sym = find_symbol(symbol, &owner, NULL, true, true);
2162     if (sym && strong_try_module_get(owner))
2163         sym = NULL;
2164     preempt_enable();
2165 
2166     return sym ? (void *)sym->value : NULL;
2167 }
2168 EXPORT_SYMBOL_GPL(__symbol_get);
2169 
2170 /*
2171  * Ensure that an exported symbol [global namespace] does not already exist
2172  * in the kernel or in some other module's exported symbol table.
2173  *
2174  * You must hold the module_mutex.
2175  */
2176 static int verify_export_symbols(struct module *mod)
2177 {
2178     unsigned int i;
2179     struct module *owner;
2180     const struct kernel_symbol *s;
2181     struct {
2182         const struct kernel_symbol *sym;
2183         unsigned int num;
2184     } arr[] = {
2185         { mod->syms, mod->num_syms },
2186         { mod->gpl_syms, mod->num_gpl_syms },
2187         { mod->gpl_future_syms, mod->num_gpl_future_syms },
2188 #ifdef CONFIG_UNUSED_SYMBOLS
2189         { mod->unused_syms, mod->num_unused_syms },
2190         { mod->unused_gpl_syms, mod->num_unused_gpl_syms },
2191 #endif
2192     };
2193 
2194     for (i = 0; i < ARRAY_SIZE(arr); i++) {
2195         for (s = arr[i].sym; s < arr[i].sym + arr[i].num; s++) {
2196             if (find_symbol(s->name, &owner, NULL, true, false)) {
2197                 pr_err("%s: exports duplicate symbol %s"
2198                        " (owned by %s)\n",
2199                        mod->name, s->name, module_name(owner));
2200                 return -ENOEXEC;
2201             }
2202         }
2203     }
2204     return 0;
2205 }
2206 
2207 /* Change all symbols so that st_value encodes the pointer directly. */
2208 static int simplify_symbols(struct module *mod, const struct load_info *info)
2209 {
2210     Elf_Shdr *symsec = &info->sechdrs[info->index.sym];
2211     Elf_Sym *sym = (void *)symsec->sh_addr;
2212     unsigned long secbase;
2213     unsigned int i;
2214     int ret = 0;
2215     const struct kernel_symbol *ksym;
2216 
2217     for (i = 1; i < symsec->sh_size / sizeof(Elf_Sym); i++) {
2218         const char *name = info->strtab + sym[i].st_name;
2219 
2220         switch (sym[i].st_shndx) {
2221         case SHN_COMMON:
2222             /* Ignore common symbols */
2223             if (!strncmp(name, "__gnu_lto", 9))
2224                 break;
2225 
2226             /* We compiled with -fno-common.  These are not
2227                supposed to happen.  */
2228             pr_debug("Common symbol: %s\n", name);
2229             pr_warn("%s: please compile with -fno-common\n",
2230                    mod->name);
2231             ret = -ENOEXEC;
2232             break;
2233 
2234         case SHN_ABS:
2235             /* Don't need to do anything */
2236             pr_debug("Absolute symbol: 0x%08lx\n",
2237                    (long)sym[i].st_value);
2238             break;
2239 
2240         case SHN_LIVEPATCH:
2241             /* Livepatch symbols are resolved by livepatch */
2242             break;
2243 
2244         case SHN_UNDEF:
2245             ksym = resolve_symbol_wait(mod, info, name);
2246             /* Ok if resolved.  */
2247             if (ksym && !IS_ERR(ksym)) {
2248                 sym[i].st_value = ksym->value;
2249                 break;
2250             }
2251 
2252             /* Ok if weak.  */
2253             if (!ksym && ELF_ST_BIND(sym[i].st_info) == STB_WEAK)
2254                 break;
2255 
2256             pr_warn("%s: Unknown symbol %s (err %li)\n",
2257                 mod->name, name, PTR_ERR(ksym));
2258             ret = PTR_ERR(ksym) ?: -ENOENT;
2259             break;
2260 
2261         default:
2262             /* Divert to percpu allocation if a percpu var. */
2263             if (sym[i].st_shndx == info->index.pcpu)
2264                 secbase = (unsigned long)mod_percpu(mod);
2265             else
2266                 secbase = info->sechdrs[sym[i].st_shndx].sh_addr;
2267             sym[i].st_value += secbase;
2268             break;
2269         }
2270     }
2271 
2272     return ret;
2273 }
2274 
2275 static int apply_relocations(struct module *mod, const struct load_info *info)
2276 {
2277     unsigned int i;
2278     int err = 0;
2279 
2280     /* Now do relocations. */
2281     for (i = 1; i < info->hdr->e_shnum; i++) {
2282         unsigned int infosec = info->sechdrs[i].sh_info;
2283 
2284         /* Not a valid relocation section? */
2285         if (infosec >= info->hdr->e_shnum)
2286             continue;
2287 
2288         /* Don't bother with non-allocated sections */
2289         if (!(info->sechdrs[infosec].sh_flags & SHF_ALLOC))
2290             continue;
2291 
2292         /* Livepatch relocation sections are applied by livepatch */
2293         if (info->sechdrs[i].sh_flags & SHF_RELA_LIVEPATCH)
2294             continue;
2295 
2296         if (info->sechdrs[i].sh_type == SHT_REL)
2297             err = apply_relocate(info->sechdrs, info->strtab,
2298                          info->index.sym, i, mod);
2299         else if (info->sechdrs[i].sh_type == SHT_RELA)
2300             err = apply_relocate_add(info->sechdrs, info->strtab,
2301                          info->index.sym, i, mod);
2302         if (err < 0)
2303             break;
2304     }
2305     return err;
2306 }
2307 
2308 /* Additional bytes needed by arch in front of individual sections */
2309 unsigned int __weak arch_mod_section_prepend(struct module *mod,
2310                          unsigned int section)
2311 {
2312     /* default implementation just returns zero */
2313     return 0;
2314 }
2315 
2316 /* Update size with this section: return offset. */
2317 static long get_offset(struct module *mod, unsigned int *size,
2318                Elf_Shdr *sechdr, unsigned int section)
2319 {
2320     long ret;
2321 
2322     *size += arch_mod_section_prepend(mod, section);
2323     ret = ALIGN(*size, sechdr->sh_addralign ?: 1);
2324     *size = ret + sechdr->sh_size;
2325     return ret;
2326 }
2327 
2328 /* Lay out the SHF_ALLOC sections in a way not dissimilar to how ld
2329    might -- code, read-only data, read-write data, small data.  Tally
2330    sizes, and place the offsets into sh_entsize fields: high bit means it
2331    belongs in init. */
2332 static void layout_sections(struct module *mod, struct load_info *info)
2333 {
2334     static unsigned long const masks[][2] = {
2335         /* NOTE: all executable code must be the first section
2336          * in this array; otherwise modify the text_size
2337          * finder in the two loops below */
2338         { SHF_EXECINSTR | SHF_ALLOC, ARCH_SHF_SMALL },
2339         { SHF_ALLOC, SHF_WRITE | ARCH_SHF_SMALL },
2340         { SHF_RO_AFTER_INIT | SHF_ALLOC, ARCH_SHF_SMALL },
2341         { SHF_WRITE | SHF_ALLOC, ARCH_SHF_SMALL },
2342         { ARCH_SHF_SMALL | SHF_ALLOC, 0 }
2343     };
2344     unsigned int m, i;
2345 
2346     for (i = 0; i < info->hdr->e_shnum; i++)
2347         info->sechdrs[i].sh_entsize = ~0UL;
2348 
2349     pr_debug("Core section allocation order:\n");
2350     for (m = 0; m < ARRAY_SIZE(masks); ++m) {
2351         for (i = 0; i < info->hdr->e_shnum; ++i) {
2352             Elf_Shdr *s = &info->sechdrs[i];
2353             const char *sname = info->secstrings + s->sh_name;
2354 
2355             if ((s->sh_flags & masks[m][0]) != masks[m][0]
2356                 || (s->sh_flags & masks[m][1])
2357                 || s->sh_entsize != ~0UL
2358                 || strstarts(sname, ".init"))
2359                 continue;
2360             s->sh_entsize = get_offset(mod, &mod->core_layout.size, s, i);
2361             pr_debug("\t%s\n", sname);
2362         }
2363         switch (m) {
2364         case 0: /* executable */
2365             mod->core_layout.size = debug_align(mod->core_layout.size);
2366             mod->core_layout.text_size = mod->core_layout.size;
2367             break;
2368         case 1: /* RO: text and ro-data */
2369             mod->core_layout.size = debug_align(mod->core_layout.size);
2370             mod->core_layout.ro_size = mod->core_layout.size;
2371             break;
2372         case 2: /* RO after init */
2373             mod->core_layout.size = debug_align(mod->core_layout.size);
2374             mod->core_layout.ro_after_init_size = mod->core_layout.size;
2375             break;
2376         case 4: /* whole core */
2377             mod->core_layout.size = debug_align(mod->core_layout.size);
2378             break;
2379         }
2380     }
2381 
2382     pr_debug("Init section allocation order:\n");
2383     for (m = 0; m < ARRAY_SIZE(masks); ++m) {
2384         for (i = 0; i < info->hdr->e_shnum; ++i) {
2385             Elf_Shdr *s = &info->sechdrs[i];
2386             const char *sname = info->secstrings + s->sh_name;
2387 
2388             if ((s->sh_flags & masks[m][0]) != masks[m][0]
2389                 || (s->sh_flags & masks[m][1])
2390                 || s->sh_entsize != ~0UL
2391                 || !strstarts(sname, ".init"))
2392                 continue;
2393             s->sh_entsize = (get_offset(mod, &mod->init_layout.size, s, i)
2394                      | INIT_OFFSET_MASK);
2395             pr_debug("\t%s\n", sname);
2396         }
2397         switch (m) {
2398         case 0: /* executable */
2399             mod->init_layout.size = debug_align(mod->init_layout.size);
2400             mod->init_layout.text_size = mod->init_layout.size;
2401             break;
2402         case 1: /* RO: text and ro-data */
2403             mod->init_layout.size = debug_align(mod->init_layout.size);
2404             mod->init_layout.ro_size = mod->init_layout.size;
2405             break;
2406         case 2:
2407             /*
2408              * RO after init doesn't apply to init_layout (only
2409              * core_layout), so it just takes the value of ro_size.
2410              */
2411             mod->init_layout.ro_after_init_size = mod->init_layout.ro_size;
2412             break;
2413         case 4: /* whole init */
2414             mod->init_layout.size = debug_align(mod->init_layout.size);
2415             break;
2416         }
2417     }
2418 }
2419 
2420 static void set_license(struct module *mod, const char *license)
2421 {
2422     if (!license)
2423         license = "unspecified";
2424 
2425     if (!license_is_gpl_compatible(license)) {
2426         if (!test_taint(TAINT_PROPRIETARY_MODULE))
2427             pr_warn("%s: module license '%s' taints kernel.\n",
2428                 mod->name, license);
2429         add_taint_module(mod, TAINT_PROPRIETARY_MODULE,
2430                  LOCKDEP_NOW_UNRELIABLE);
2431     }
2432 }
2433 
2434 /* Parse tag=value strings from .modinfo section */
2435 static char *next_string(char *string, unsigned long *secsize)
2436 {
2437     /* Skip non-zero chars */
2438     while (string[0]) {
2439         string++;
2440         if ((*secsize)-- <= 1)
2441             return NULL;
2442     }
2443 
2444     /* Skip any zero padding. */
2445     while (!string[0]) {
2446         string++;
2447         if ((*secsize)-- <= 1)
2448             return NULL;
2449     }
2450     return string;
2451 }
2452 
2453 static char *get_modinfo(struct load_info *info, const char *tag)
2454 {
2455     char *p;
2456     unsigned int taglen = strlen(tag);
2457     Elf_Shdr *infosec = &info->sechdrs[info->index.info];
2458     unsigned long size = infosec->sh_size;
2459 
2460     for (p = (char *)infosec->sh_addr; p; p = next_string(p, &size)) {
2461         if (strncmp(p, tag, taglen) == 0 && p[taglen] == '=')
2462             return p + taglen + 1;
2463     }
2464     return NULL;
2465 }
2466 
2467 static void setup_modinfo(struct module *mod, struct load_info *info)
2468 {
2469     struct module_attribute *attr;
2470     int i;
2471 
2472     for (i = 0; (attr = modinfo_attrs[i]); i++) {
2473         if (attr->setup)
2474             attr->setup(mod, get_modinfo(info, attr->attr.name));
2475     }
2476 }
2477 
2478 static void free_modinfo(struct module *mod)
2479 {
2480     struct module_attribute *attr;
2481     int i;
2482 
2483     for (i = 0; (attr = modinfo_attrs[i]); i++) {
2484         if (attr->free)
2485             attr->free(mod);
2486     }
2487 }
2488 
2489 #ifdef CONFIG_KALLSYMS
2490 
2491 /* lookup symbol in given range of kernel_symbols */
2492 static const struct kernel_symbol *lookup_symbol(const char *name,
2493     const struct kernel_symbol *start,
2494     const struct kernel_symbol *stop)
2495 {
2496     return bsearch(name, start, stop - start,
2497             sizeof(struct kernel_symbol), cmp_name);
2498 }
2499 
2500 static int is_exported(const char *name, unsigned long value,
2501                const struct module *mod)
2502 {
2503     const struct kernel_symbol *ks;
2504     if (!mod)
2505         ks = lookup_symbol(name, __start___ksymtab, __stop___ksymtab);
2506     else
2507         ks = lookup_symbol(name, mod->syms, mod->syms + mod->num_syms);
2508     return ks != NULL && ks->value == value;
2509 }
2510 
2511 /* As per nm */
2512 static char elf_type(const Elf_Sym *sym, const struct load_info *info)
2513 {
2514     const Elf_Shdr *sechdrs = info->sechdrs;
2515 
2516     if (ELF_ST_BIND(sym->st_info) == STB_WEAK) {
2517         if (ELF_ST_TYPE(sym->st_info) == STT_OBJECT)
2518             return 'v';
2519         else
2520             return 'w';
2521     }
2522     if (sym->st_shndx == SHN_UNDEF)
2523         return 'U';
2524     if (sym->st_shndx == SHN_ABS || sym->st_shndx == info->index.pcpu)
2525         return 'a';
2526     if (sym->st_shndx >= SHN_LORESERVE)
2527         return '?';
2528     if (sechdrs[sym->st_shndx].sh_flags & SHF_EXECINSTR)
2529         return 't';
2530     if (sechdrs[sym->st_shndx].sh_flags & SHF_ALLOC
2531         && sechdrs[sym->st_shndx].sh_type != SHT_NOBITS) {
2532         if (!(sechdrs[sym->st_shndx].sh_flags & SHF_WRITE))
2533             return 'r';
2534         else if (sechdrs[sym->st_shndx].sh_flags & ARCH_SHF_SMALL)
2535             return 'g';
2536         else
2537             return 'd';
2538     }
2539     if (sechdrs[sym->st_shndx].sh_type == SHT_NOBITS) {
2540         if (sechdrs[sym->st_shndx].sh_flags & ARCH_SHF_SMALL)
2541             return 's';
2542         else
2543             return 'b';
2544     }
2545     if (strstarts(info->secstrings + sechdrs[sym->st_shndx].sh_name,
2546               ".debug")) {
2547         return 'n';
2548     }
2549     return '?';
2550 }
2551 
2552 static bool is_core_symbol(const Elf_Sym *src, const Elf_Shdr *sechdrs,
2553             unsigned int shnum, unsigned int pcpundx)
2554 {
2555     const Elf_Shdr *sec;
2556 
2557     if (src->st_shndx == SHN_UNDEF
2558         || src->st_shndx >= shnum
2559         || !src->st_name)
2560         return false;
2561 
2562 #ifdef CONFIG_KALLSYMS_ALL
2563     if (src->st_shndx == pcpundx)
2564         return true;
2565 #endif
2566 
2567     sec = sechdrs + src->st_shndx;
2568     if (!(sec->sh_flags & SHF_ALLOC)
2569 #ifndef CONFIG_KALLSYMS_ALL
2570         || !(sec->sh_flags & SHF_EXECINSTR)
2571 #endif
2572         || (sec->sh_entsize & INIT_OFFSET_MASK))
2573         return false;
2574 
2575     return true;
2576 }
2577 
2578 /*
2579  * We only allocate and copy the strings needed by the parts of symtab
2580  * we keep.  This is simple, but has the effect of making multiple
2581  * copies of duplicates.  We could be more sophisticated, see
2582  * linux-kernel thread starting with
2583  * <73defb5e4bca04a6431392cc341112b1@localhost>.
2584  */
2585 static void layout_symtab(struct module *mod, struct load_info *info)
2586 {
2587     Elf_Shdr *symsect = info->sechdrs + info->index.sym;
2588     Elf_Shdr *strsect = info->sechdrs + info->index.str;
2589     const Elf_Sym *src;
2590     unsigned int i, nsrc, ndst, strtab_size = 0;
2591 
2592     /* Put symbol section at end of init part of module. */
2593     symsect->sh_flags |= SHF_ALLOC;
2594     symsect->sh_entsize = get_offset(mod, &mod->init_layout.size, symsect,
2595                      info->index.sym) | INIT_OFFSET_MASK;
2596     pr_debug("\t%s\n", info->secstrings + symsect->sh_name);
2597 
2598     src = (void *)info->hdr + symsect->sh_offset;
2599     nsrc = symsect->sh_size / sizeof(*src);
2600 
2601     /* Compute total space required for the core symbols' strtab. */
2602     for (ndst = i = 0; i < nsrc; i++) {
2603         if (i == 0 || is_livepatch_module(mod) ||
2604             is_core_symbol(src+i, info->sechdrs, info->hdr->e_shnum,
2605                    info->index.pcpu)) {
2606             strtab_size += strlen(&info->strtab[src[i].st_name])+1;
2607             ndst++;
2608         }
2609     }
2610 
2611     /* Append room for core symbols at end of core part. */
2612     info->symoffs = ALIGN(mod->core_layout.size, symsect->sh_addralign ?: 1);
2613     info->stroffs = mod->core_layout.size = info->symoffs + ndst * sizeof(Elf_Sym);
2614     mod->core_layout.size += strtab_size;
2615     mod->core_layout.size = debug_align(mod->core_layout.size);
2616 
2617     /* Put string table section at end of init part of module. */
2618     strsect->sh_flags |= SHF_ALLOC;
2619     strsect->sh_entsize = get_offset(mod, &mod->init_layout.size, strsect,
2620                      info->index.str) | INIT_OFFSET_MASK;
2621     pr_debug("\t%s\n", info->secstrings + strsect->sh_name);
2622 
2623     /* We'll tack temporary mod_kallsyms on the end. */
2624     mod->init_layout.size = ALIGN(mod->init_layout.size,
2625                       __alignof__(struct mod_kallsyms));
2626     info->mod_kallsyms_init_off = mod->init_layout.size;
2627     mod->init_layout.size += sizeof(struct mod_kallsyms);
2628     mod->init_layout.size = debug_align(mod->init_layout.size);
2629 }
2630 
2631 /*
2632  * We use the full symtab and strtab which layout_symtab arranged to
2633  * be appended to the init section.  Later we switch to the cut-down
2634  * core-only ones.
2635  */
2636 static void add_kallsyms(struct module *mod, const struct load_info *info)
2637 {
2638     unsigned int i, ndst;
2639     const Elf_Sym *src;
2640     Elf_Sym *dst;
2641     char *s;
2642     Elf_Shdr *symsec = &info->sechdrs[info->index.sym];
2643 
2644     /* Set up to point into init section. */
2645     mod->kallsyms = mod->init_layout.base + info->mod_kallsyms_init_off;
2646 
2647     mod->kallsyms->symtab = (void *)symsec->sh_addr;
2648     mod->kallsyms->num_symtab = symsec->sh_size / sizeof(Elf_Sym);
2649     /* Make sure we get permanent strtab: don't use info->strtab. */
2650     mod->kallsyms->strtab = (void *)info->sechdrs[info->index.str].sh_addr;
2651 
2652     /* Set types up while we still have access to sections. */
2653     for (i = 0; i < mod->kallsyms->num_symtab; i++)
2654         mod->kallsyms->symtab[i].st_info
2655             = elf_type(&mod->kallsyms->symtab[i], info);
2656 
2657     /* Now populate the cut down core kallsyms for after init. */
2658     mod->core_kallsyms.symtab = dst = mod->core_layout.base + info->symoffs;
2659     mod->core_kallsyms.strtab = s = mod->core_layout.base + info->stroffs;
2660     src = mod->kallsyms->symtab;
2661     for (ndst = i = 0; i < mod->kallsyms->num_symtab; i++) {
2662         if (i == 0 || is_livepatch_module(mod) ||
2663             is_core_symbol(src+i, info->sechdrs, info->hdr->e_shnum,
2664                    info->index.pcpu)) {
2665             dst[ndst] = src[i];
2666             dst[ndst++].st_name = s - mod->core_kallsyms.strtab;
2667             s += strlcpy(s, &mod->kallsyms->strtab[src[i].st_name],
2668                      KSYM_NAME_LEN) + 1;
2669         }
2670     }
2671     mod->core_kallsyms.num_symtab = ndst;
2672 }
2673 #else
2674 static inline void layout_symtab(struct module *mod, struct load_info *info)
2675 {
2676 }
2677 
2678 static void add_kallsyms(struct module *mod, const struct load_info *info)
2679 {
2680 }
2681 #endif /* CONFIG_KALLSYMS */
2682 
2683 static void dynamic_debug_setup(struct _ddebug *debug, unsigned int num)
2684 {
2685     if (!debug)
2686         return;
2687 #ifdef CONFIG_DYNAMIC_DEBUG
2688     if (ddebug_add_module(debug, num, debug->modname))
2689         pr_err("dynamic debug error adding module: %s\n",
2690             debug->modname);
2691 #endif
2692 }
2693 
2694 static void dynamic_debug_remove(struct _ddebug *debug)
2695 {
2696     if (debug)
2697         ddebug_remove_module(debug->modname);
2698 }
2699 
2700 void * __weak module_alloc(unsigned long size)
2701 {
2702     return vmalloc_exec(size);
2703 }
2704 
2705 #ifdef CONFIG_DEBUG_KMEMLEAK
2706 static void kmemleak_load_module(const struct module *mod,
2707                  const struct load_info *info)
2708 {
2709     unsigned int i;
2710 
2711     /* only scan the sections containing data */
2712     kmemleak_scan_area(mod, sizeof(struct module), GFP_KERNEL);
2713 
2714     for (i = 1; i < info->hdr->e_shnum; i++) {
2715         /* Scan all writable sections that's not executable */
2716         if (!(info->sechdrs[i].sh_flags & SHF_ALLOC) ||
2717             !(info->sechdrs[i].sh_flags & SHF_WRITE) ||
2718             (info->sechdrs[i].sh_flags & SHF_EXECINSTR))
2719             continue;
2720 
2721         kmemleak_scan_area((void *)info->sechdrs[i].sh_addr,
2722                    info->sechdrs[i].sh_size, GFP_KERNEL);
2723     }
2724 }
2725 #else
2726 static inline void kmemleak_load_module(const struct module *mod,
2727                     const struct load_info *info)
2728 {
2729 }
2730 #endif
2731 
2732 #ifdef CONFIG_MODULE_SIG
2733 static int module_sig_check(struct load_info *info, int flags)
2734 {
2735     int err = -ENOKEY;
2736     const unsigned long markerlen = sizeof(MODULE_SIG_STRING) - 1;
2737     const void *mod = info->hdr;
2738 
2739     /*
2740      * Require flags == 0, as a module with version information
2741      * removed is no longer the module that was signed
2742      */
2743     if (flags == 0 &&
2744         info->len > markerlen &&
2745         memcmp(mod + info->len - markerlen, MODULE_SIG_STRING, markerlen) == 0) {
2746         /* We truncate the module to discard the signature */
2747         info->len -= markerlen;
2748         err = mod_verify_sig(mod, &info->len);
2749     }
2750 
2751     if (!err) {
2752         info->sig_ok = true;
2753         return 0;
2754     }
2755 
2756     /* Not having a signature is only an error if we're strict. */
2757     if (err == -ENOKEY && !sig_enforce)
2758         err = 0;
2759 
2760     return err;
2761 }
2762 #else /* !CONFIG_MODULE_SIG */
2763 static int module_sig_check(struct load_info *info, int flags)
2764 {
2765     return 0;
2766 }
2767 #endif /* !CONFIG_MODULE_SIG */
2768 
2769 /* Sanity checks against invalid binaries, wrong arch, weird elf version. */
2770 static int elf_header_check(struct load_info *info)
2771 {
2772     if (info->len < sizeof(*(info->hdr)))
2773         return -ENOEXEC;
2774 
2775     if (memcmp(info->hdr->e_ident, ELFMAG, SELFMAG) != 0
2776         || info->hdr->e_type != ET_REL
2777         || !elf_check_arch(info->hdr)
2778         || info->hdr->e_shentsize != sizeof(Elf_Shdr))
2779         return -ENOEXEC;
2780 
2781     if (info->hdr->e_shoff >= info->len
2782         || (info->hdr->e_shnum * sizeof(Elf_Shdr) >
2783         info->len - info->hdr->e_shoff))
2784         return -ENOEXEC;
2785 
2786     return 0;
2787 }
2788 
2789 #define COPY_CHUNK_SIZE (16*PAGE_SIZE)
2790 
2791 static int copy_chunked_from_user(void *dst, const void __user *usrc, unsigned long len)
2792 {
2793     do {
2794         unsigned long n = min(len, COPY_CHUNK_SIZE);
2795 
2796         if (copy_from_user(dst, usrc, n) != 0)
2797             return -EFAULT;
2798         cond_resched();
2799         dst += n;
2800         usrc += n;
2801         len -= n;
2802     } while (len);
2803     return 0;
2804 }
2805 
2806 #ifdef CONFIG_LIVEPATCH
2807 static int check_modinfo_livepatch(struct module *mod, struct load_info *info)
2808 {
2809     if (get_modinfo(info, "livepatch")) {
2810         mod->klp = true;
2811         add_taint_module(mod, TAINT_LIVEPATCH, LOCKDEP_STILL_OK);
2812     }
2813 
2814     return 0;
2815 }
2816 #else /* !CONFIG_LIVEPATCH */
2817 static int check_modinfo_livepatch(struct module *mod, struct load_info *info)
2818 {
2819     if (get_modinfo(info, "livepatch")) {
2820         pr_err("%s: module is marked as livepatch module, but livepatch support is disabled",
2821                mod->name);
2822         return -ENOEXEC;
2823     }
2824 
2825     return 0;
2826 }
2827 #endif /* CONFIG_LIVEPATCH */
2828 
2829 /* Sets info->hdr and info->len. */
2830 static int copy_module_from_user(const void __user *umod, unsigned long len,
2831                   struct load_info *info)
2832 {
2833     int err;
2834 
2835     info->len = len;
2836     if (info->len < sizeof(*(info->hdr)))
2837         return -ENOEXEC;
2838 
2839     err = security_kernel_read_file(NULL, READING_MODULE);
2840     if (err)
2841         return err;
2842 
2843     /* Suck in entire file: we'll want most of it. */
2844     info->hdr = __vmalloc(info->len,
2845             GFP_KERNEL | __GFP_HIGHMEM | __GFP_NOWARN, PAGE_KERNEL);
2846     if (!info->hdr)
2847         return -ENOMEM;
2848 
2849     if (copy_chunked_from_user(info->hdr, umod, info->len) != 0) {
2850         vfree(info->hdr);
2851         return -EFAULT;
2852     }
2853 
2854     return 0;
2855 }
2856 
2857 static void free_copy(struct load_info *info)
2858 {
2859     vfree(info->hdr);
2860 }
2861 
2862 static int rewrite_section_headers(struct load_info *info, int flags)
2863 {
2864     unsigned int i;
2865 
2866     /* This should always be true, but let's be sure. */
2867     info->sechdrs[0].sh_addr = 0;
2868 
2869     for (i = 1; i < info->hdr->e_shnum; i++) {
2870         Elf_Shdr *shdr = &info->sechdrs[i];
2871         if (shdr->sh_type != SHT_NOBITS
2872             && info->len < shdr->sh_offset + shdr->sh_size) {
2873             pr_err("Module len %lu truncated\n", info->len);
2874             return -ENOEXEC;
2875         }
2876 
2877         /* Mark all sections sh_addr with their address in the
2878            temporary image. */
2879         shdr->sh_addr = (size_t)info->hdr + shdr->sh_offset;
2880 
2881 #ifndef CONFIG_MODULE_UNLOAD
2882         /* Don't load .exit sections */
2883         if (strstarts(info->secstrings+shdr->sh_name, ".exit"))
2884             shdr->sh_flags &= ~(unsigned long)SHF_ALLOC;
2885 #endif
2886     }
2887 
2888     /* Track but don't keep modinfo and version sections. */
2889     if (flags & MODULE_INIT_IGNORE_MODVERSIONS)
2890         info->index.vers = 0; /* Pretend no __versions section! */
2891     else
2892         info->index.vers = find_sec(info, "__versions");
2893     info->index.info = find_sec(info, ".modinfo");
2894     info->sechdrs[info->index.info].sh_flags &= ~(unsigned long)SHF_ALLOC;
2895     info->sechdrs[info->index.vers].sh_flags &= ~(unsigned long)SHF_ALLOC;
2896     return 0;
2897 }
2898 
2899 /*
2900  * Set up our basic convenience variables (pointers to section headers,
2901  * search for module section index etc), and do some basic section
2902  * verification.
2903  *
2904  * Return the temporary module pointer (we'll replace it with the final
2905  * one when we move the module sections around).
2906  */
2907 static struct module *setup_load_info(struct load_info *info, int flags)
2908 {
2909     unsigned int i;
2910     int err;
2911     struct module *mod;
2912 
2913     /* Set up the convenience variables */
2914     info->sechdrs = (void *)info->hdr + info->hdr->e_shoff;
2915     info->secstrings = (void *)info->hdr
2916         + info->sechdrs[info->hdr->e_shstrndx].sh_offset;
2917 
2918     err = rewrite_section_headers(info, flags);
2919     if (err)
2920         return ERR_PTR(err);
2921 
2922     /* Find internal symbols and strings. */
2923     for (i = 1; i < info->hdr->e_shnum; i++) {
2924         if (info->sechdrs[i].sh_type == SHT_SYMTAB) {
2925             info->index.sym = i;
2926             info->index.str = info->sechdrs[i].sh_link;
2927             info->strtab = (char *)info->hdr
2928                 + info->sechdrs[info->index.str].sh_offset;
2929             break;
2930         }
2931     }
2932 
2933     info->index.mod = find_sec(info, ".gnu.linkonce.this_module");
2934     if (!info->index.mod) {
2935         pr_warn("No module found in object\n");
2936         return ERR_PTR(-ENOEXEC);
2937     }
2938     /* This is temporary: point mod into copy of data. */
2939     mod = (void *)info->sechdrs[info->index.mod].sh_addr;
2940 
2941     if (info->index.sym == 0) {
2942         pr_warn("%s: module has no symbols (stripped?)\n", mod->name);
2943         return ERR_PTR(-ENOEXEC);
2944     }
2945 
2946     info->index.pcpu = find_pcpusec(info);
2947 
2948     /* Check module struct version now, before we try to use module. */
2949     if (!check_modstruct_version(info->sechdrs, info->index.vers, mod))
2950         return ERR_PTR(-ENOEXEC);
2951 
2952     return mod;
2953 }
2954 
2955 static int check_modinfo(struct module *mod, struct load_info *info, int flags)
2956 {
2957     const char *modmagic = get_modinfo(info, "vermagic");
2958     int err;
2959 
2960     if (flags & MODULE_INIT_IGNORE_VERMAGIC)
2961         modmagic = NULL;
2962 
2963     /* This is allowed: modprobe --force will invalidate it. */
2964     if (!modmagic) {
2965         err = try_to_force_load(mod, "bad vermagic");
2966         if (err)
2967             return err;
2968     } else if (!same_magic(modmagic, vermagic, info->index.vers)) {
2969         pr_err("%s: version magic '%s' should be '%s'\n",
2970                mod->name, modmagic, vermagic);
2971         return -ENOEXEC;
2972     }
2973 
2974     if (!get_modinfo(info, "intree")) {
2975         if (!test_taint(TAINT_OOT_MODULE))
2976             pr_warn("%s: loading out-of-tree module taints kernel.\n",
2977                 mod->name);
2978         add_taint_module(mod, TAINT_OOT_MODULE, LOCKDEP_STILL_OK);
2979     }
2980 
2981     if (get_modinfo(info, "staging")) {
2982         add_taint_module(mod, TAINT_CRAP, LOCKDEP_STILL_OK);
2983         pr_warn("%s: module is from the staging directory, the quality "
2984             "is unknown, you have been warned.\n", mod->name);
2985     }
2986 
2987     err = check_modinfo_livepatch(mod, info);
2988     if (err)
2989         return err;
2990 
2991     /* Set up license info based on the info section */
2992     set_license(mod, get_modinfo(info, "license"));
2993 
2994     return 0;
2995 }
2996 
2997 static int find_module_sections(struct module *mod, struct load_info *info)
2998 {
2999     mod->kp = section_objs(info, "__param",
3000                    sizeof(*mod->kp), &mod->num_kp);
3001     mod->syms = section_objs(info, "__ksymtab",
3002                  sizeof(*mod->syms), &mod->num_syms);
3003     mod->crcs = section_addr(info, "__kcrctab");
3004     mod->gpl_syms = section_objs(info, "__ksymtab_gpl",
3005                      sizeof(*mod->gpl_syms),
3006                      &mod->num_gpl_syms);
3007     mod->gpl_crcs = section_addr(info, "__kcrctab_gpl");
3008     mod->gpl_future_syms = section_objs(info,
3009                         "__ksymtab_gpl_future",
3010                         sizeof(*mod->gpl_future_syms),
3011                         &mod->num_gpl_future_syms);
3012     mod->gpl_future_crcs = section_addr(info, "__kcrctab_gpl_future");
3013 
3014 #ifdef CONFIG_UNUSED_SYMBOLS
3015     mod->unused_syms = section_objs(info, "__ksymtab_unused",
3016                     sizeof(*mod->unused_syms),
3017                     &mod->num_unused_syms);
3018     mod->unused_crcs = section_addr(info, "__kcrctab_unused");
3019     mod->unused_gpl_syms = section_objs(info, "__ksymtab_unused_gpl",
3020                         sizeof(*mod->unused_gpl_syms),
3021                         &mod->num_unused_gpl_syms);
3022     mod->unused_gpl_crcs = section_addr(info, "__kcrctab_unused_gpl");
3023 #endif
3024 #ifdef CONFIG_CONSTRUCTORS
3025     mod->ctors = section_objs(info, ".ctors",
3026                   sizeof(*mod->ctors), &mod->num_ctors);
3027     if (!mod->ctors)
3028         mod->ctors = section_objs(info, ".init_array",
3029                 sizeof(*mod->ctors), &mod->num_ctors);
3030     else if (find_sec(info, ".init_array")) {
3031         /*
3032          * This shouldn't happen with same compiler and binutils
3033          * building all parts of the module.
3034          */
3035         pr_warn("%s: has both .ctors and .init_array.\n",
3036                mod->name);
3037         return -EINVAL;
3038     }
3039 #endif
3040 
3041 #ifdef CONFIG_TRACEPOINTS
3042     mod->tracepoints_ptrs = section_objs(info, "__tracepoints_ptrs",
3043                          sizeof(*mod->tracepoints_ptrs),
3044                          &mod->num_tracepoints);
3045 #endif
3046 #ifdef HAVE_JUMP_LABEL
3047     mod->jump_entries = section_objs(info, "__jump_table",
3048                     sizeof(*mod->jump_entries),
3049                     &mod->num_jump_entries);
3050 #endif
3051 #ifdef CONFIG_EVENT_TRACING
3052     mod->trace_events = section_objs(info, "_ftrace_events",
3053                      sizeof(*mod->trace_events),
3054                      &mod->num_trace_events);
3055     mod->trace_enums = section_objs(info, "_ftrace_enum_map",
3056                     sizeof(*mod->trace_enums),
3057                     &mod->num_trace_enums);
3058 #endif
3059 #ifdef CONFIG_TRACING
3060     mod->trace_bprintk_fmt_start = section_objs(info, "__trace_printk_fmt",
3061                      sizeof(*mod->trace_bprintk_fmt_start),
3062                      &mod->num_trace_bprintk_fmt);
3063 #endif
3064 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
3065     /* sechdrs[0].sh_size is always zero */
3066     mod->ftrace_callsites = section_objs(info, "__mcount_loc",
3067                          sizeof(*mod->ftrace_callsites),
3068                          &mod->num_ftrace_callsites);
3069 #endif
3070 
3071     mod->extable = section_objs(info, "__ex_table",
3072                     sizeof(*mod->extable), &mod->num_exentries);
3073 
3074     if (section_addr(info, "__obsparm"))
3075         pr_warn("%s: Ignoring obsolete parameters\n", mod->name);
3076 
3077     info->debug = section_objs(info, "__verbose",
3078                    sizeof(*info->debug), &info->num_debug);
3079 
3080     return 0;
3081 }
3082 
3083 static int move_module(struct module *mod, struct load_info *info)
3084 {
3085     int i;
3086     void *ptr;
3087 
3088     /* Do the allocs. */
3089     ptr = module_alloc(mod->core_layout.size);
3090     /*
3091      * The pointer to this block is stored in the module structure
3092      * which is inside the block. Just mark it as not being a
3093      * leak.
3094      */
3095     kmemleak_not_leak(ptr);
3096     if (!ptr)
3097         return -ENOMEM;
3098 
3099     memset(ptr, 0, mod->core_layout.size);
3100     mod->core_layout.base = ptr;
3101 
3102     if (mod->init_layout.size) {
3103         ptr = module_alloc(mod->init_layout.size);
3104         /*
3105          * The pointer to this block is stored in the module structure
3106          * which is inside the block. This block doesn't need to be
3107          * scanned as it contains data and code that will be freed
3108          * after the module is initialized.
3109          */
3110         kmemleak_ignore(ptr);
3111         if (!ptr) {
3112             module_memfree(mod->core_layout.base);
3113             return -ENOMEM;
3114         }
3115         memset(ptr, 0, mod->init_layout.size);
3116         mod->init_layout.base = ptr;
3117     } else
3118         mod->init_layout.base = NULL;
3119 
3120     /* Transfer each section which specifies SHF_ALLOC */
3121     pr_debug("final section addresses:\n");
3122     for (i = 0; i < info->hdr->e_shnum; i++) {
3123         void *dest;
3124         Elf_Shdr *shdr = &info->sechdrs[i];
3125 
3126         if (!(shdr->sh_flags & SHF_ALLOC))
3127             continue;
3128 
3129         if (shdr->sh_entsize & INIT_OFFSET_MASK)
3130             dest = mod->init_layout.base
3131                 + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
3132         else
3133             dest = mod->core_layout.base + shdr->sh_entsize;
3134 
3135         if (shdr->sh_type != SHT_NOBITS)
3136             memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
3137         /* Update sh_addr to point to copy in image. */
3138         shdr->sh_addr = (unsigned long)dest;
3139         pr_debug("\t0x%lx %s\n",
3140              (long)shdr->sh_addr, info->secstrings + shdr->sh_name);
3141     }
3142 
3143     return 0;
3144 }
3145 
3146 static int check_module_license_and_versions(struct module *mod)
3147 {
3148     int prev_taint = test_taint(TAINT_PROPRIETARY_MODULE);
3149 
3150     /*
3151      * ndiswrapper is under GPL by itself, but loads proprietary modules.
3152      * Don't use add_taint_module(), as it would prevent ndiswrapper from
3153      * using GPL-only symbols it needs.
3154      */
3155     if (strcmp(mod->name, "ndiswrapper") == 0)
3156         add_taint(TAINT_PROPRIETARY_MODULE, LOCKDEP_NOW_UNRELIABLE);
3157 
3158     /* driverloader was caught wrongly pretending to be under GPL */
3159     if (strcmp(mod->name, "driverloader") == 0)
3160         add_taint_module(mod, TAINT_PROPRIETARY_MODULE,
3161                  LOCKDEP_NOW_UNRELIABLE);
3162 
3163     /* lve claims to be GPL but upstream won't provide source */
3164     if (strcmp(mod->name, "lve") == 0)
3165         add_taint_module(mod, TAINT_PROPRIETARY_MODULE,
3166                  LOCKDEP_NOW_UNRELIABLE);
3167 
3168     if (!prev_taint && test_taint(TAINT_PROPRIETARY_MODULE))
3169         pr_warn("%s: module license taints kernel.\n", mod->name);
3170 
3171 #ifdef CONFIG_MODVERSIONS
3172     if ((mod->num_syms && !mod->crcs)
3173         || (mod->num_gpl_syms && !mod->gpl_crcs)
3174         || (mod->num_gpl_future_syms && !mod->gpl_future_crcs)
3175 #ifdef CONFIG_UNUSED_SYMBOLS
3176         || (mod->num_unused_syms && !mod->unused_crcs)
3177         || (mod->num_unused_gpl_syms && !mod->unused_gpl_crcs)
3178 #endif
3179         ) {
3180         return try_to_force_load(mod,
3181                      "no versions for exported symbols");
3182     }
3183 #endif
3184     return 0;
3185 }
3186 
3187 static void flush_module_icache(const struct module *mod)
3188 {
3189     mm_segment_t old_fs;
3190 
3191     /* flush the icache in correct context */
3192     old_fs = get_fs();
3193     set_fs(KERNEL_DS);
3194 
3195     /*
3196      * Flush the instruction cache, since we've played with text.
3197      * Do it before processing of module parameters, so the module
3198      * can provide parameter accessor functions of its own.
3199      */
3200     if (mod->init_layout.base)
3201         flush_icache_range((unsigned long)mod->init_layout.base,
3202                    (unsigned long)mod->init_layout.base
3203                    + mod->init_layout.size);
3204     flush_icache_range((unsigned long)mod->core_layout.base,
3205                (unsigned long)mod->core_layout.base + mod->core_layout.size);
3206 
3207     set_fs(old_fs);
3208 }
3209 
3210 int __weak module_frob_arch_sections(Elf_Ehdr *hdr,
3211                      Elf_Shdr *sechdrs,
3212                      char *secstrings,
3213                      struct module *mod)
3214 {
3215     return 0;
3216 }
3217 
3218 /* module_blacklist is a comma-separated list of module names */
3219 static char *module_blacklist;
3220 static bool blacklisted(char *module_name)
3221 {
3222     const char *p;
3223     size_t len;
3224 
3225     if (!module_blacklist)
3226         return false;
3227 
3228     for (p = module_blacklist; *p; p += len) {
3229         len = strcspn(p, ",");
3230         if (strlen(module_name) == len && !memcmp(module_name, p, len))
3231             return true;
3232         if (p[len] == ',')
3233             len++;
3234     }
3235     return false;
3236 }
3237 core_param(module_blacklist, module_blacklist, charp, 0400);
3238 
3239 static struct module *layout_and_allocate(struct load_info *info, int flags)
3240 {
3241     /* Module within temporary copy. */
3242     struct module *mod;
3243     unsigned int ndx;
3244     int err;
3245 
3246     mod = setup_load_info(info, flags);
3247     if (IS_ERR(mod))
3248         return mod;
3249 
3250     if (blacklisted(mod->name))
3251         return ERR_PTR(-EPERM);
3252 
3253     err = check_modinfo(mod, info, flags);
3254     if (err)
3255         return ERR_PTR(err);
3256 
3257     /* Allow arches to frob section contents and sizes.  */
3258     err = module_frob_arch_sections(info->hdr, info->sechdrs,
3259                     info->secstrings, mod);
3260     if (err < 0)
3261         return ERR_PTR(err);
3262 
3263     /* We will do a special allocation for per-cpu sections later. */
3264     info->sechdrs[info->index.pcpu].sh_flags &= ~(unsigned long)SHF_ALLOC;
3265 
3266     /*
3267      * Mark ro_after_init section with SHF_RO_AFTER_INIT so that
3268      * layout_sections() can put it in the right place.
3269      * Note: ro_after_init sections also have SHF_{WRITE,ALLOC} set.
3270      */
3271     ndx = find_sec(info, ".data..ro_after_init");
3272     if (ndx)
3273         info->sechdrs[ndx].sh_flags |= SHF_RO_AFTER_INIT;
3274 
3275     /* Determine total sizes, and put offsets in sh_entsize.  For now
3276        this is done generically; there doesn't appear to be any
3277        special cases for the architectures. */
3278     layout_sections(mod, info);
3279     layout_symtab(mod, info);
3280 
3281     /* Allocate and move to the final place */
3282     err = move_module(mod, info);
3283     if (err)
3284         return ERR_PTR(err);
3285 
3286     /* Module has been copied to its final place now: return it. */
3287     mod = (void *)info->sechdrs[info->index.mod].sh_addr;
3288     kmemleak_load_module(mod, info);
3289     return mod;
3290 }
3291 
3292 /* mod is no longer valid after this! */
3293 static void module_deallocate(struct module *mod, struct load_info *info)
3294 {
3295     percpu_modfree(mod);
3296     module_arch_freeing_init(mod);
3297     module_memfree(mod->init_layout.base);
3298     module_memfree(mod->core_layout.base);
3299 }
3300 
3301 int __weak module_finalize(const Elf_Ehdr *hdr,
3302                const Elf_Shdr *sechdrs,
3303                struct module *me)
3304 {
3305     return 0;
3306 }
3307 
3308 static int post_relocation(struct module *mod, const struct load_info *info)
3309 {
3310     /* Sort exception table now relocations are done. */
3311     sort_extable(mod->extable, mod->extable + mod->num_exentries);
3312 
3313     /* Copy relocated percpu area over. */
3314     percpu_modcopy(mod, (void *)info->sechdrs[info->index.pcpu].sh_addr,
3315                info->sechdrs[info->index.pcpu].sh_size);
3316 
3317     /* Setup kallsyms-specific fields. */
3318     add_kallsyms(mod, info);
3319 
3320     /* Arch-specific module finalizing. */
3321     return module_finalize(info->hdr, info->sechdrs, mod);
3322 }
3323 
3324 /* Is this module of this name done loading?  No locks held. */
3325 static bool finished_loading(const char *name)
3326 {
3327     struct module *mod;
3328     bool ret;
3329 
3330     /*
3331      * The module_mutex should not be a heavily contended lock;
3332      * if we get the occasional sleep here, we'll go an extra iteration
3333      * in the wait_event_interruptible(), which is harmless.
3334      */
3335     sched_annotate_sleep();
3336     mutex_lock(&module_mutex);
3337     mod = find_module_all(name, strlen(name), true);
3338     ret = !mod || mod->state == MODULE_STATE_LIVE
3339         || mod->state == MODULE_STATE_GOING;
3340     mutex_unlock(&module_mutex);
3341 
3342     return ret;
3343 }
3344 
3345 /* Call module constructors. */
3346 static void do_mod_ctors(struct module *mod)
3347 {
3348 #ifdef CONFIG_CONSTRUCTORS
3349     unsigned long i;
3350 
3351     for (i = 0; i < mod->num_ctors; i++)
3352         mod->ctors[i]();
3353 #endif
3354 }
3355 
3356 /* For freeing module_init on success, in case kallsyms traversing */
3357 struct mod_initfree {
3358     struct rcu_head rcu;
3359     void *module_init;
3360 };
3361 
3362 static void do_free_init(struct rcu_head *head)
3363 {
3364     struct mod_initfree *m = container_of(head, struct mod_initfree, rcu);
3365     module_memfree(m->module_init);
3366     kfree(m);
3367 }
3368 
3369 /*
3370  * This is where the real work happens.
3371  *
3372  * Keep it uninlined to provide a reliable breakpoint target, e.g. for the gdb
3373  * helper command 'lx-symbols'.
3374  */
3375 static noinline int do_init_module(struct module *mod)
3376 {
3377     int ret = 0;
3378     struct mod_initfree *freeinit;
3379 
3380     freeinit = kmalloc(sizeof(*freeinit), GFP_KERNEL);
3381     if (!freeinit) {
3382         ret = -ENOMEM;
3383         goto fail;
3384     }
3385     freeinit->module_init = mod->init_layout.base;
3386 
3387     /*
3388      * We want to find out whether @mod uses async during init.  Clear
3389      * PF_USED_ASYNC.  async_schedule*() will set it.
3390      */
3391     current->flags &= ~PF_USED_ASYNC;
3392 
3393     do_mod_ctors(mod);
3394     /* Start the module */
3395     if (mod->init != NULL)
3396         ret = do_one_initcall(mod->init);
3397     if (ret < 0) {
3398         goto fail_free_freeinit;
3399     }
3400     if (ret > 0) {
3401         pr_warn("%s: '%s'->init suspiciously returned %d, it should "
3402             "follow 0/-E convention\n"
3403             "%s: loading module anyway...\n",
3404             __func__, mod->name, ret, __func__);
3405         dump_stack();
3406     }
3407 
3408     /* Now it's a first class citizen! */
3409     mod->state = MODULE_STATE_LIVE;
3410     blocking_notifier_call_chain(&module_notify_list,
3411                      MODULE_STATE_LIVE, mod);
3412 
3413     /*
3414      * We need to finish all async code before the module init sequence
3415      * is done.  This has potential to deadlock.  For example, a newly
3416      * detected block device can trigger request_module() of the
3417      * default iosched from async probing task.  Once userland helper
3418      * reaches here, async_synchronize_full() will wait on the async
3419      * task waiting on request_module() and deadlock.
3420      *
3421      * This deadlock is avoided by perfomring async_synchronize_full()
3422      * iff module init queued any async jobs.  This isn't a full
3423      * solution as it will deadlock the same if module loading from
3424      * async jobs nests more than once; however, due to the various
3425      * constraints, this hack seems to be the best option for now.
3426      * Please refer to the following thread for details.
3427      *
3428      * http://thread.gmane.org/gmane.linux.kernel/1420814
3429      */
3430     if (!mod->async_probe_requested && (current->flags & PF_USED_ASYNC))
3431         async_synchronize_full();
3432 
3433     mutex_lock(&module_mutex);
3434     /* Drop initial reference. */
3435     module_put(mod);
3436     trim_init_extable(mod);
3437 #ifdef CONFIG_KALLSYMS
3438     /* Switch to core kallsyms now init is done: kallsyms may be walking! */
3439     rcu_assign_pointer(mod->kallsyms, &mod->core_kallsyms);
3440 #endif
3441     module_enable_ro(mod, true);
3442     mod_tree_remove_init(mod);
3443     disable_ro_nx(&mod->init_layout);
3444     module_arch_freeing_init(mod);
3445     mod->init_layout.base = NULL;
3446     mod->init_layout.size = 0;
3447     mod->init_layout.ro_size = 0;
3448     mod->init_layout.ro_after_init_size = 0;
3449     mod->init_layout.text_size = 0;
3450     /*
3451      * We want to free module_init, but be aware that kallsyms may be
3452      * walking this with preempt disabled.  In all the failure paths, we
3453      * call synchronize_sched(), but we don't want to slow down the success
3454      * path, so use actual RCU here.
3455      */
3456     call_rcu_sched(&freeinit->rcu, do_free_init);
3457     mutex_unlock(&module_mutex);
3458     wake_up_all(&module_wq);
3459 
3460     return 0;
3461 
3462 fail_free_freeinit:
3463     kfree(freeinit);
3464 fail:
3465     /* Try to protect us from buggy refcounters. */
3466     mod->state = MODULE_STATE_GOING;
3467     synchronize_sched();
3468     module_put(mod);
3469     blocking_notifier_call_chain(&module_notify_list,
3470                      MODULE_STATE_GOING, mod);
3471     klp_module_going(mod);
3472     ftrace_release_mod(mod);
3473     free_module(mod);
3474     wake_up_all(&module_wq);
3475     return ret;
3476 }
3477 
3478 static int may_init_module(void)
3479 {
3480     if (!capable(CAP_SYS_MODULE) || modules_disabled)
3481         return -EPERM;
3482 
3483     return 0;
3484 }
3485 
3486 /*
3487  * We try to place it in the list now to make sure it's unique before
3488  * we dedicate too many resources.  In particular, temporary percpu
3489  * memory exhaustion.
3490  */
3491 static int add_unformed_module(struct module *mod)
3492 {
3493     int err;
3494     struct module *old;
3495 
3496     mod->state = MODULE_STATE_UNFORMED;
3497 
3498 again:
3499     mutex_lock(&module_mutex);
3500     old = find_module_all(mod->name, strlen(mod->name), true);
3501     if (old != NULL) {
3502         if (old->state == MODULE_STATE_COMING
3503             || old->state == MODULE_STATE_UNFORMED) {
3504             /* Wait in case it fails to load. */
3505             mutex_unlock(&module_mutex);
3506             err = wait_event_interruptible(module_wq,
3507                            finished_loading(mod->name));
3508             if (err)
3509                 goto out_unlocked;
3510             goto again;
3511         }
3512         err = -EEXIST;
3513         goto out;
3514     }
3515     mod_update_bounds(mod);
3516     list_add_rcu(&mod->list, &modules);
3517     mod_tree_insert(mod);
3518     err = 0;
3519 
3520 out:
3521     mutex_unlock(&module_mutex);
3522 out_unlocked:
3523     return err;
3524 }
3525 
3526 static int complete_formation(struct module *mod, struct load_info *info)
3527 {
3528     int err;
3529 
3530     mutex_lock(&module_mutex);
3531 
3532     /* Find duplicate symbols (must be called under lock). */
3533     err = verify_export_symbols(mod);
3534     if (err < 0)
3535         goto out;
3536 
3537     /* This relies on module_mutex for list integrity. */
3538     module_bug_finalize(info->hdr, info->sechdrs, mod);
3539 
3540     module_enable_ro(mod, false);
3541     module_enable_nx(mod);
3542 
3543     /* Mark state as coming so strong_try_module_get() ignores us,
3544      * but kallsyms etc. can see us. */
3545     mod->state = MODULE_STATE_COMING;
3546     mutex_unlock(&module_mutex);
3547 
3548     return 0;
3549 
3550 out:
3551     mutex_unlock(&module_mutex);
3552     return err;
3553 }
3554 
3555 static int prepare_coming_module(struct module *mod)
3556 {
3557     int err;
3558 
3559     ftrace_module_enable(mod);
3560     err = klp_module_coming(mod);
3561     if (err)
3562         return err;
3563 
3564     blocking_notifier_call_chain(&module_notify_list,
3565                      MODULE_STATE_COMING, mod);
3566     return 0;
3567 }
3568 
3569 static int unknown_module_param_cb(char *param, char *val, const char *modname,
3570                    void *arg)
3571 {
3572     struct module *mod = arg;
3573     int ret;
3574 
3575     if (strcmp(param, "async_probe") == 0) {
3576         mod->async_probe_requested = true;
3577         return 0;
3578     }
3579 
3580     /* Check for magic 'dyndbg' arg */
3581     ret = ddebug_dyndbg_module_param_cb(param, val, modname);
3582     if (ret != 0)
3583         pr_warn("%s: unknown parameter '%s' ignored\n", modname, param);
3584     return 0;
3585 }
3586 
3587 /* Allocate and load the module: note that size of section 0 is always
3588    zero, and we rely on this for optional sections. */
3589 static int load_module(struct load_info *info, const char __user *uargs,
3590                int flags)
3591 {
3592     struct module *mod;
3593     long err;
3594     char *after_dashes;
3595 
3596     err = module_sig_check(info, flags);
3597     if (err)
3598         goto free_copy;
3599 
3600     err = elf_header_check(info);
3601     if (err)
3602         goto free_copy;
3603 
3604     /* Figure out module layout, and allocate all the memory. */
3605     mod = layout_and_allocate(info, flags);
3606     if (IS_ERR(mod)) {
3607         err = PTR_ERR(mod);
3608         goto free_copy;
3609     }
3610 
3611     /* Reserve our place in the list. */
3612     err = add_unformed_module(mod);
3613     if (err)
3614         goto free_module;
3615 
3616 #ifdef CONFIG_MODULE_SIG
3617     mod->sig_ok = info->sig_ok;
3618     if (!mod->sig_ok) {
3619         pr_notice_once("%s: module verification failed: signature "
3620                    "and/or required key missing - tainting "
3621                    "kernel\n", mod->name);
3622         add_taint_module(mod, TAINT_UNSIGNED_MODULE, LOCKDEP_STILL_OK);
3623     }
3624 #endif
3625 
3626     /* To avoid stressing percpu allocator, do this once we're unique. */
3627     err = percpu_modalloc(mod, info);
3628     if (err)
3629         goto unlink_mod;
3630 
3631     /* Now module is in final location, initialize linked lists, etc. */
3632     err = module_unload_init(mod);
3633     if (err)
3634         goto unlink_mod;
3635 
3636     init_param_lock(mod);
3637 
3638     /* Now we've got everything in the final locations, we can
3639      * find optional sections. */
3640     err = find_module_sections(mod, info);
3641     if (err)
3642         goto free_unload;
3643 
3644     err = check_module_license_and_versions(mod);
3645     if (err)
3646         goto free_unload;
3647 
3648     /* Set up MODINFO_ATTR fields */
3649     setup_modinfo(mod, info);
3650 
3651     /* Fix up syms, so that st_value is a pointer to location. */
3652     err = simplify_symbols(mod, info);
3653     if (err < 0)
3654         goto free_modinfo;
3655 
3656     err = apply_relocations(mod, info);
3657     if (err < 0)
3658         goto free_modinfo;
3659 
3660     err = post_relocation(mod, info);
3661     if (err < 0)
3662         goto free_modinfo;
3663 
3664     flush_module_icache(mod);
3665 
3666     /* Now copy in args */
3667     mod->args = strndup_user(uargs, ~0UL >> 1);
3668     if (IS_ERR(mod->args)) {
3669         err = PTR_ERR(mod->args);
3670         goto free_arch_cleanup;
3671     }
3672 
3673     dynamic_debug_setup(info->debug, info->num_debug);
3674 
3675     /* Ftrace init must be called in the MODULE_STATE_UNFORMED state */
3676     ftrace_module_init(mod);
3677 
3678     /* Finally it's fully formed, ready to start executing. */
3679     err = complete_formation(mod, info);
3680     if (err)
3681         goto ddebug_cleanup;
3682 
3683     err = prepare_coming_module(mod);
3684     if (err)
3685         goto bug_cleanup;
3686 
3687     /* Module is ready to execute: parsing args may do that. */
3688     after_dashes = parse_args(mod->name, mod->args, mod->kp, mod->num_kp,
3689                   -32768, 32767, mod,
3690                   unknown_module_param_cb);
3691     if (IS_ERR(after_dashes)) {
3692         err = PTR_ERR(after_dashes);
3693         goto coming_cleanup;
3694     } else if (after_dashes) {
3695         pr_warn("%s: parameters '%s' after `--' ignored\n",
3696                mod->name, after_dashes);
3697     }
3698 
3699     /* Link in to syfs. */
3700     err = mod_sysfs_setup(mod, info, mod->kp, mod->num_kp);
3701     if (err < 0)
3702         goto coming_cleanup;
3703 
3704     if (is_livepatch_module(mod)) {
3705         err = copy_module_elf(mod, info);
3706         if (err < 0)
3707             goto sysfs_cleanup;
3708     }
3709 
3710     /* Get rid of temporary copy. */
3711     free_copy(info);
3712 
3713     /* Done! */
3714     trace_module_load(mod);
3715 
3716     return do_init_module(mod);
3717 
3718  sysfs_cleanup:
3719     mod_sysfs_teardown(mod);
3720  coming_cleanup:
3721     mod->state = MODULE_STATE_GOING;
3722     blocking_notifier_call_chain(&module_notify_list,
3723                      MODULE_STATE_GOING, mod);
3724     klp_module_going(mod);
3725  bug_cleanup:
3726     /* module_bug_cleanup needs module_mutex protection */
3727     mutex_lock(&module_mutex);
3728     module_bug_cleanup(mod);
3729     mutex_unlock(&module_mutex);
3730 
3731     /* we can't deallocate the module until we clear memory protection */
3732     module_disable_ro(mod);
3733     module_disable_nx(mod);
3734 
3735  ddebug_cleanup:
3736     dynamic_debug_remove(info->debug);
3737     synchronize_sched();
3738     kfree(mod->args);
3739  free_arch_cleanup:
3740     module_arch_cleanup(mod);
3741  free_modinfo:
3742     free_modinfo(mod);
3743  free_unload:
3744     module_unload_free(mod);
3745  unlink_mod:
3746     mutex_lock(&module_mutex);
3747     /* Unlink carefully: kallsyms could be walking list. */
3748     list_del_rcu(&mod->list);
3749     mod_tree_remove(mod);
3750     wake_up_all(&module_wq);
3751     /* Wait for RCU-sched synchronizing before releasing mod->list. */
3752     synchronize_sched();
3753     mutex_unlock(&module_mutex);
3754  free_module:
3755     /*
3756      * Ftrace needs to clean up what it initialized.
3757      * This does nothing if ftrace_module_init() wasn't called,
3758      * but it must be called outside of module_mutex.
3759      */
3760     ftrace_release_mod(mod);
3761     /* Free lock-classes; relies on the preceding sync_rcu() */
3762     lockdep_free_key_range(mod->core_layout.base, mod->core_layout.size);
3763 
3764     module_deallocate(mod, info);
3765  free_copy:
3766     free_copy(info);
3767     return err;
3768 }
3769 
3770 SYSCALL_DEFINE3(init_module, void __user *, umod,
3771         unsigned long, len, const char __user *, uargs)
3772 {
3773     int err;
3774     struct load_info info = { };
3775 
3776     err = may_init_module();
3777     if (err)
3778         return err;
3779 
3780     pr_debug("init_module: umod=%p, len=%lu, uargs=%p\n",
3781            umod, len, uargs);
3782 
3783     err = copy_module_from_user(umod, len, &info);
3784     if (err)
3785         return err;
3786 
3787     return load_module(&info, uargs, 0);
3788 }
3789 
3790 SYSCALL_DEFINE3(finit_module, int, fd, const char __user *, uargs, int, flags)
3791 {
3792     struct load_info info = { };
3793     loff_t size;
3794     void *hdr;
3795     int err;
3796 
3797     err = may_init_module();
3798     if (err)
3799         return err;
3800 
3801     pr_debug("finit_module: fd=%d, uargs=%p, flags=%i\n", fd, uargs, flags);
3802 
3803     if (flags & ~(MODULE_INIT_IGNORE_MODVERSIONS
3804               |MODULE_INIT_IGNORE_VERMAGIC))
3805         return -EINVAL;
3806 
3807     err = kernel_read_file_from_fd(fd, &hdr, &size, INT_MAX,
3808                        READING_MODULE);
3809     if (err)
3810         return err;
3811     info.hdr = hdr;
3812     info.len = size;
3813 
3814     return load_module(&info, uargs, flags);
3815 }
3816 
3817 static inline int within(unsigned long addr, void *start, unsigned long size)
3818 {
3819     return ((void *)addr >= start && (void *)addr < start + size);
3820 }
3821 
3822 #ifdef CONFIG_KALLSYMS
3823 /*
3824  * This ignores the intensely annoying "mapping symbols" found
3825  * in ARM ELF files: $a, $t and $d.
3826  */
3827 static inline int is_arm_mapping_symbol(const char *str)
3828 {
3829     if (str[0] == '.' && str[1] == 'L')
3830         return true;
3831     return str[0] == '$' && strchr("axtd", str[1])
3832            && (str[2] == '\0' || str[2] == '.');
3833 }
3834 
3835 static const char *symname(struct mod_kallsyms *kallsyms, unsigned int symnum)
3836 {
3837     return kallsyms->strtab + kallsyms->symtab[symnum].st_name;
3838 }
3839 
3840 static const char *get_ksymbol(struct module *mod,
3841                    unsigned long addr,
3842                    unsigned long *size,
3843                    unsigned long *offset)
3844 {
3845     unsigned int i, best = 0;
3846     unsigned long nextval;
3847     struct mod_kallsyms *kallsyms = rcu_dereference_sched(mod->kallsyms);
3848 
3849     /* At worse, next value is at end of module */
3850     if (within_module_init(addr, mod))
3851         nextval = (unsigned long)mod->init_layout.base+mod->init_layout.text_size;
3852     else
3853         nextval = (unsigned long)mod->core_layout.base+mod->core_layout.text_size;
3854 
3855     /* Scan for closest preceding symbol, and next symbol. (ELF
3856        starts real symbols at 1). */
3857     for (i = 1; i < kallsyms->num_symtab; i++) {
3858         if (kallsyms->symtab[i].st_shndx == SHN_UNDEF)
3859             continue;
3860 
3861         /* We ignore unnamed symbols: they're uninformative
3862          * and inserted at a whim. */
3863         if (*symname(kallsyms, i) == '\0'
3864             || is_arm_mapping_symbol(symname(kallsyms, i)))
3865             continue;
3866 
3867         if (kallsyms->symtab[i].st_value <= addr
3868             && kallsyms->symtab[i].st_value > kallsyms->symtab[best].st_value)
3869             best = i;
3870         if (kallsyms->symtab[i].st_value > addr
3871             && kallsyms->symtab[i].st_value < nextval)
3872             nextval = kallsyms->symtab[i].st_value;
3873     }
3874 
3875     if (!best)
3876         return NULL;
3877 
3878     if (size)
3879         *size = nextval - kallsyms->symtab[best].st_value;
3880     if (offset)
3881         *offset = addr - kallsyms->symtab[best].st_value;
3882     return symname(kallsyms, best);
3883 }
3884 
3885 /* For kallsyms to ask for address resolution.  NULL means not found.  Careful
3886  * not to lock to avoid deadlock on oopses, simply disable preemption. */
3887 const char *module_address_lookup(unsigned long addr,
3888                 unsigned long *size,
3889                 unsigned long *offset,
3890                 char **modname,
3891                 char *namebuf)
3892 {
3893     const char *ret = NULL;
3894     struct module *mod;
3895 
3896     preempt_disable();
3897     mod = __module_address(addr);
3898     if (mod) {
3899         if (modname)
3900             *modname = mod->name;
3901         ret = get_ksymbol(mod, addr, size, offset);
3902     }
3903     /* Make a copy in here where it's safe */
3904     if (ret) {
3905         strncpy(namebuf, ret, KSYM_NAME_LEN - 1);
3906         ret = namebuf;
3907     }
3908     preempt_enable();
3909 
3910     return ret;
3911 }
3912 
3913 int lookup_module_symbol_name(unsigned long addr, char *symname)
3914 {
3915     struct module *mod;
3916 
3917     preempt_disable();
3918     list_for_each_entry_rcu(mod, &modules, list) {
3919         if (mod->state == MODULE_STATE_UNFORMED)
3920             continue;
3921         if (within_module(addr, mod)) {
3922             const char *sym;
3923 
3924             sym = get_ksymbol(mod, addr, NULL, NULL);
3925             if (!sym)
3926                 goto out;
3927             strlcpy(symname, sym, KSYM_NAME_LEN);
3928             preempt_enable();
3929             return 0;
3930         }
3931     }
3932 out:
3933     preempt_enable();
3934     return -ERANGE;
3935 }
3936 
3937 int lookup_module_symbol_attrs(unsigned long addr, unsigned long *size,
3938             unsigned long *offset, char *modname, char *name)
3939 {
3940     struct module *mod;
3941 
3942     preempt_disable();
3943     list_for_each_entry_rcu(mod, &modules, list) {
3944         if (mod->state == MODULE_STATE_UNFORMED)
3945             continue;
3946         if (within_module(addr, mod)) {
3947             const char *sym;
3948 
3949             sym = get_ksymbol(mod, addr, size, offset);
3950             if (!sym)
3951                 goto out;
3952             if (modname)
3953                 strlcpy(modname, mod->name, MODULE_NAME_LEN);
3954             if (name)
3955                 strlcpy(name, sym, KSYM_NAME_LEN);
3956             preempt_enable();
3957             return 0;
3958         }
3959     }
3960 out:
3961     preempt_enable();
3962     return -ERANGE;
3963 }
3964 
3965 int module_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
3966             char *name, char *module_name, int *exported)
3967 {
3968     struct module *mod;
3969 
3970     preempt_disable();
3971     list_for_each_entry_rcu(mod, &modules, list) {
3972         struct mod_kallsyms *kallsyms;
3973 
3974         if (mod->state == MODULE_STATE_UNFORMED)
3975             continue;
3976         kallsyms = rcu_dereference_sched(mod->kallsyms);
3977         if (symnum < kallsyms->num_symtab) {
3978             *value = kallsyms->symtab[symnum].st_value;
3979             *type = kallsyms->symtab[symnum].st_info;
3980             strlcpy(name, symname(kallsyms, symnum), KSYM_NAME_LEN);
3981             strlcpy(module_name, mod->name, MODULE_NAME_LEN);
3982             *exported = is_exported(name, *value, mod);
3983             preempt_enable();
3984             return 0;
3985         }
3986         symnum -= kallsyms->num_symtab;
3987     }
3988     preempt_enable();
3989     return -ERANGE;
3990 }
3991 
3992 static unsigned long mod_find_symname(struct module *mod, const char *name)
3993 {
3994     unsigned int i;
3995     struct mod_kallsyms *kallsyms = rcu_dereference_sched(mod->kallsyms);
3996 
3997     for (i = 0; i < kallsyms->num_symtab; i++)
3998         if (strcmp(name, symname(kallsyms, i)) == 0 &&
3999             kallsyms->symtab[i].st_info != 'U')
4000             return kallsyms->symtab[i].st_value;
4001     return 0;
4002 }
4003 
4004 /* Look for this name: can be of form module:name. */
4005 unsigned long module_kallsyms_lookup_name(const char *name)
4006 {
4007     struct module *mod;
4008     char *colon;
4009     unsigned long ret = 0;
4010 
4011     /* Don't lock: we're in enough trouble already. */
4012     preempt_disable();
4013     if ((colon = strchr(name, ':')) != NULL) {
4014         if ((mod = find_module_all(name, colon - name, false)) != NULL)
4015             ret = mod_find_symname(mod, colon+1);
4016     } else {
4017         list_for_each_entry_rcu(mod, &modules, list) {
4018             if (mod->state == MODULE_STATE_UNFORMED)
4019                 continue;
4020             if ((ret = mod_find_symname(mod, name)) != 0)
4021                 break;
4022         }
4023     }
4024     preempt_enable();
4025     return ret;
4026 }
4027 
4028 int module_kallsyms_on_each_symbol(int (*fn)(void *, const char *,
4029                          struct module *, unsigned long),
4030                    void *data)
4031 {
4032     struct module *mod;
4033     unsigned int i;
4034     int ret;
4035 
4036     module_assert_mutex();
4037 
4038     list_for_each_entry(mod, &modules, list) {
4039         /* We hold module_mutex: no need for rcu_dereference_sched */
4040         struct mod_kallsyms *kallsyms = mod->kallsyms;
4041 
4042         if (mod->state == MODULE_STATE_UNFORMED)
4043             continue;
4044         for (i = 0; i < kallsyms->num_symtab; i++) {
4045             ret = fn(data, symname(kallsyms, i),
4046                  mod, kallsyms->symtab[i].st_value);
4047             if (ret != 0)
4048                 return ret;
4049         }
4050     }
4051     return 0;
4052 }
4053 #endif /* CONFIG_KALLSYMS */
4054 
4055 /* Maximum number of characters written by module_flags() */
4056 #define MODULE_FLAGS_BUF_SIZE (TAINT_FLAGS_COUNT + 4)
4057 
4058 /* Keep in sync with MODULE_FLAGS_BUF_SIZE !!! */
4059 static char *module_flags(struct module *mod, char *buf)
4060 {
4061     int bx = 0;
4062 
4063     BUG_ON(mod->state == MODULE_STATE_UNFORMED);
4064     if (mod->taints ||
4065         mod->state == MODULE_STATE_GOING ||
4066         mod->state == MODULE_STATE_COMING) {
4067         buf[bx++] = '(';
4068         bx += module_flags_taint(mod, buf + bx);
4069         /* Show a - for module-is-being-unloaded */
4070         if (mod->state == MODULE_STATE_GOING)
4071             buf[bx++] = '-';
4072         /* Show a + for module-is-being-loaded */
4073         if (mod->state == MODULE_STATE_COMING)
4074             buf[bx++] = '+';
4075         buf[bx++] = ')';
4076     }
4077     buf[bx] = '\0';
4078 
4079     return buf;
4080 }
4081 
4082 #ifdef CONFIG_PROC_FS
4083 /* Called by the /proc file system to return a list of modules. */
4084 static void *m_start(struct seq_file *m, loff_t *pos)
4085 {
4086     mutex_lock(&module_mutex);
4087     return seq_list_start(&modules, *pos);
4088 }
4089 
4090 static void *m_next(struct seq_file *m, void *p, loff_t *pos)
4091 {
4092     return seq_list_next(p, &modules, pos);
4093 }
4094 
4095 static void m_stop(struct seq_file *m, void *p)
4096 {
4097     mutex_unlock(&module_mutex);
4098 }
4099 
4100 static int m_show(struct seq_file *m, void *p)
4101 {
4102     struct module *mod = list_entry(p, struct module, list);
4103     char buf[MODULE_FLAGS_BUF_SIZE];
4104 
4105     /* We always ignore unformed modules. */
4106     if (mod->state == MODULE_STATE_UNFORMED)
4107         return 0;
4108 
4109     seq_printf(m, "%s %u",
4110            mod->name, mod->init_layout.size + mod->core_layout.size);
4111     print_unload_info(m, mod);
4112 
4113     /* Informative for users. */
4114     seq_printf(m, " %s",
4115            mod->state == MODULE_STATE_GOING ? "Unloading" :
4116            mod->state == MODULE_STATE_COMING ? "Loading" :
4117            "Live");
4118     /* Used by oprofile and other similar tools. */
4119     seq_printf(m, " 0x%pK", mod->core_layout.base);
4120 
4121     /* Taints info */
4122     if (mod->taints)
4123         seq_printf(m, " %s", module_flags(mod, buf));
4124 
4125     seq_puts(m, "\n");
4126     return 0;
4127 }
4128 
4129 /* Format: modulename size refcount deps address
4130 
4131    Where refcount is a number or -, and deps is a comma-separated list
4132    of depends or -.
4133 */
4134 static const struct seq_operations modules_op = {
4135     .start  = m_start,
4136     .next   = m_next,
4137     .stop   = m_stop,
4138     .show   = m_show
4139 };
4140 
4141 static int modules_open(struct inode *inode, struct file *file)
4142 {
4143     return seq_open(file, &modules_op);
4144 }
4145 
4146 static const struct file_operations proc_modules_operations = {
4147     .open       = modules_open,
4148     .read       = seq_read,
4149     .llseek     = seq_lseek,
4150     .release    = seq_release,
4151 };
4152 
4153 static int __init proc_modules_init(void)
4154 {
4155     proc_create("modules", 0, NULL, &proc_modules_operations);
4156     return 0;
4157 }
4158 module_init(proc_modules_init);
4159 #endif
4160 
4161 /* Given an address, look for it in the module exception tables. */
4162 const struct exception_table_entry *search_module_extables(unsigned long addr)
4163 {
4164     const struct exception_table_entry *e = NULL;
4165     struct module *mod;
4166 
4167     preempt_disable();
4168     list_for_each_entry_rcu(mod, &modules, list) {
4169         if (mod->state == MODULE_STATE_UNFORMED)
4170             continue;
4171         if (mod->num_exentries == 0)
4172             continue;
4173 
4174         e = search_extable(mod->extable,
4175                    mod->extable + mod->num_exentries - 1,
4176                    addr);
4177         if (e)
4178             break;
4179     }
4180     preempt_enable();
4181 
4182     /* Now, if we found one, we are running inside it now, hence
4183        we cannot unload the module, hence no refcnt needed. */
4184     return e;
4185 }
4186 
4187 /*
4188  * is_module_address - is this address inside a module?
4189  * @addr: the address to check.
4190  *
4191  * See is_module_text_address() if you simply want to see if the address
4192  * is code (not data).
4193  */
4194 bool is_module_address(unsigned long addr)
4195 {
4196     bool ret;
4197 
4198     preempt_disable();
4199     ret = __module_address(addr) != NULL;
4200     preempt_enable();
4201 
4202     return ret;
4203 }
4204 
4205 /*
4206  * __module_address - get the module which contains an address.
4207  * @addr: the address.
4208  *
4209  * Must be called with preempt disabled or module mutex held so that
4210  * module doesn't get freed during this.
4211  */
4212 struct module *__module_address(unsigned long addr)
4213 {
4214     struct module *mod;
4215 
4216     if (addr < module_addr_min || addr > module_addr_max)
4217         return NULL;
4218 
4219     module_assert_mutex_or_preempt();
4220 
4221     mod = mod_find(addr);
4222     if (mod) {
4223         BUG_ON(!within_module(addr, mod));
4224         if (mod->state == MODULE_STATE_UNFORMED)
4225             mod = NULL;
4226     }
4227     return mod;
4228 }
4229 EXPORT_SYMBOL_GPL(__module_address);
4230 
4231 /*
4232  * is_module_text_address - is this address inside module code?
4233  * @addr: the address to check.
4234  *
4235  * See is_module_address() if you simply want to see if the address is
4236  * anywhere in a module.  See kernel_text_address() for testing if an
4237  * address corresponds to kernel or module code.
4238  */
4239 bool is_module_text_address(unsigned long addr)
4240 {
4241     bool ret;
4242 
4243     preempt_disable();
4244     ret = __module_text_address(addr) != NULL;
4245     preempt_enable();
4246 
4247     return ret;
4248 }
4249 
4250 /*
4251  * __module_text_address - get the module whose code contains an address.
4252  * @addr: the address.
4253  *
4254  * Must be called with preempt disabled or module mutex held so that
4255  * module doesn't get freed during this.
4256  */
4257 struct module *__module_text_address(unsigned long addr)
4258 {
4259     struct module *mod = __module_address(addr);
4260     if (mod) {
4261         /* Make sure it's within the text section. */
4262         if (!within(addr, mod->init_layout.base, mod->init_layout.text_size)
4263             && !within(addr, mod->core_layout.base, mod->core_layout.text_size))
4264             mod = NULL;
4265     }
4266     return mod;
4267 }
4268 EXPORT_SYMBOL_GPL(__module_text_address);
4269 
4270 /* Don't grab lock, we're oopsing. */
4271 void print_modules(void)
4272 {
4273     struct module *mod;
4274     char buf[MODULE_FLAGS_BUF_SIZE];
4275 
4276     printk(KERN_DEFAULT "Modules linked in:");
4277     /* Most callers should already have preempt disabled, but make sure */
4278     preempt_disable();
4279     list_for_each_entry_rcu(mod, &modules, list) {
4280         if (mod->state == MODULE_STATE_UNFORMED)
4281             continue;
4282         pr_cont(" %s%s", mod->name, module_flags(mod, buf));
4283     }
4284     preempt_enable();
4285     if (last_unloaded_module[0])
4286         pr_cont(" [last unloaded: %s]", last_unloaded_module);
4287     pr_cont("\n");
4288 }
4289 
4290 #ifdef CONFIG_MODVERSIONS
4291 /* Generate the signature for all relevant module structures here.
4292  * If these change, we don't want to try to parse the module. */
4293 void module_layout(struct module *mod,
4294            struct modversion_info *ver,
4295            struct kernel_param *kp,
4296            struct kernel_symbol *ks,
4297            struct tracepoint * const *tp)
4298 {
4299 }
4300 EXPORT_SYMBOL(module_layout);
4301 #endif