Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-or-later
0002 /* Rewritten by Rusty Russell, on the backs of many others...
0003    Copyright (C) 2001 Rusty Russell, 2002 Rusty Russell IBM.
0004 
0005 */
0006 #include <linux/elf.h>
0007 #include <linux/ftrace.h>
0008 #include <linux/memory.h>
0009 #include <linux/extable.h>
0010 #include <linux/module.h>
0011 #include <linux/mutex.h>
0012 #include <linux/init.h>
0013 #include <linux/kprobes.h>
0014 #include <linux/filter.h>
0015 
0016 #include <asm/sections.h>
0017 #include <linux/uaccess.h>
0018 
0019 /*
0020  * mutex protecting text section modification (dynamic code patching).
0021  * some users need to sleep (allocating memory...) while they hold this lock.
0022  *
0023  * Note: Also protects SMP-alternatives modification on x86.
0024  *
0025  * NOT exported to modules - patching kernel text is a really delicate matter.
0026  */
0027 DEFINE_MUTEX(text_mutex);
0028 
0029 extern struct exception_table_entry __start___ex_table[];
0030 extern struct exception_table_entry __stop___ex_table[];
0031 
0032 /* Cleared by build time tools if the table is already sorted. */
0033 u32 __initdata __visible main_extable_sort_needed = 1;
0034 
0035 /* Sort the kernel's built-in exception table */
0036 void __init sort_main_extable(void)
0037 {
0038     if (main_extable_sort_needed &&
0039         &__stop___ex_table > &__start___ex_table) {
0040         pr_notice("Sorting __ex_table...\n");
0041         sort_extable(__start___ex_table, __stop___ex_table);
0042     }
0043 }
0044 
0045 /* Given an address, look for it in the kernel exception table */
0046 const
0047 struct exception_table_entry *search_kernel_exception_table(unsigned long addr)
0048 {
0049     return search_extable(__start___ex_table,
0050                   __stop___ex_table - __start___ex_table, addr);
0051 }
0052 
0053 /* Given an address, look for it in the exception tables. */
0054 const struct exception_table_entry *search_exception_tables(unsigned long addr)
0055 {
0056     const struct exception_table_entry *e;
0057 
0058     e = search_kernel_exception_table(addr);
0059     if (!e)
0060         e = search_module_extables(addr);
0061     if (!e)
0062         e = search_bpf_extables(addr);
0063     return e;
0064 }
0065 
0066 int notrace core_kernel_text(unsigned long addr)
0067 {
0068     if (is_kernel_text(addr))
0069         return 1;
0070 
0071     if (system_state < SYSTEM_FREEING_INITMEM &&
0072         is_kernel_inittext(addr))
0073         return 1;
0074     return 0;
0075 }
0076 
0077 int __kernel_text_address(unsigned long addr)
0078 {
0079     if (kernel_text_address(addr))
0080         return 1;
0081     /*
0082      * There might be init symbols in saved stacktraces.
0083      * Give those symbols a chance to be printed in
0084      * backtraces (such as lockdep traces).
0085      *
0086      * Since we are after the module-symbols check, there's
0087      * no danger of address overlap:
0088      */
0089     if (is_kernel_inittext(addr))
0090         return 1;
0091     return 0;
0092 }
0093 
0094 int kernel_text_address(unsigned long addr)
0095 {
0096     bool no_rcu;
0097     int ret = 1;
0098 
0099     if (core_kernel_text(addr))
0100         return 1;
0101 
0102     /*
0103      * If a stack dump happens while RCU is not watching, then
0104      * RCU needs to be notified that it requires to start
0105      * watching again. This can happen either by tracing that
0106      * triggers a stack trace, or a WARN() that happens during
0107      * coming back from idle, or cpu on or offlining.
0108      *
0109      * is_module_text_address() as well as the kprobe slots,
0110      * is_bpf_text_address() and is_bpf_image_address require
0111      * RCU to be watching.
0112      */
0113     no_rcu = !rcu_is_watching();
0114 
0115     /* Treat this like an NMI as it can happen anywhere */
0116     if (no_rcu)
0117         ct_nmi_enter();
0118 
0119     if (is_module_text_address(addr))
0120         goto out;
0121     if (is_ftrace_trampoline(addr))
0122         goto out;
0123     if (is_kprobe_optinsn_slot(addr) || is_kprobe_insn_slot(addr))
0124         goto out;
0125     if (is_bpf_text_address(addr))
0126         goto out;
0127     ret = 0;
0128 out:
0129     if (no_rcu)
0130         ct_nmi_exit();
0131 
0132     return ret;
0133 }
0134 
0135 /*
0136  * On some architectures (PPC64, IA64, PARISC) function pointers
0137  * are actually only tokens to some data that then holds the
0138  * real function address. As a result, to find if a function
0139  * pointer is part of the kernel text, we need to do some
0140  * special dereferencing first.
0141  */
0142 #ifdef CONFIG_HAVE_FUNCTION_DESCRIPTORS
0143 void *dereference_function_descriptor(void *ptr)
0144 {
0145     func_desc_t *desc = ptr;
0146     void *p;
0147 
0148     if (!get_kernel_nofault(p, (void *)&desc->addr))
0149         ptr = p;
0150     return ptr;
0151 }
0152 EXPORT_SYMBOL_GPL(dereference_function_descriptor);
0153 
0154 void *dereference_kernel_function_descriptor(void *ptr)
0155 {
0156     if (ptr < (void *)__start_opd || ptr >= (void *)__end_opd)
0157         return ptr;
0158 
0159     return dereference_function_descriptor(ptr);
0160 }
0161 #endif
0162 
0163 int func_ptr_is_kernel_text(void *ptr)
0164 {
0165     unsigned long addr;
0166     addr = (unsigned long) dereference_function_descriptor(ptr);
0167     if (core_kernel_text(addr))
0168         return 1;
0169     return is_module_text_address(addr);
0170 }