0001
0002 #include <linux/kernel.h>
0003 #include <linux/mm.h>
0004 #include <linux/smp.h>
0005 #include <linux/spinlock.h>
0006 #include <linux/stop_machine.h>
0007 #include <linux/uaccess.h>
0008
0009 #include <asm/cacheflush.h>
0010 #include <asm/fixmap.h>
0011 #include <asm/insn.h>
0012 #include <asm/kprobes.h>
0013 #include <asm/patching.h>
0014 #include <asm/sections.h>
0015
0016 static DEFINE_RAW_SPINLOCK(patch_lock);
0017
0018 static bool is_exit_text(unsigned long addr)
0019 {
0020
0021 return system_state < SYSTEM_RUNNING &&
0022 addr >= (unsigned long)__exittext_begin &&
0023 addr < (unsigned long)__exittext_end;
0024 }
0025
0026 static bool is_image_text(unsigned long addr)
0027 {
0028 return core_kernel_text(addr) || is_exit_text(addr);
0029 }
0030
0031 static void __kprobes *patch_map(void *addr, int fixmap)
0032 {
0033 unsigned long uintaddr = (uintptr_t) addr;
0034 bool image = is_image_text(uintaddr);
0035 struct page *page;
0036
0037 if (image)
0038 page = phys_to_page(__pa_symbol(addr));
0039 else if (IS_ENABLED(CONFIG_STRICT_MODULE_RWX))
0040 page = vmalloc_to_page(addr);
0041 else
0042 return addr;
0043
0044 BUG_ON(!page);
0045 return (void *)set_fixmap_offset(fixmap, page_to_phys(page) +
0046 (uintaddr & ~PAGE_MASK));
0047 }
0048
0049 static void __kprobes patch_unmap(int fixmap)
0050 {
0051 clear_fixmap(fixmap);
0052 }
0053
0054
0055
0056
0057 int __kprobes aarch64_insn_read(void *addr, u32 *insnp)
0058 {
0059 int ret;
0060 __le32 val;
0061
0062 ret = copy_from_kernel_nofault(&val, addr, AARCH64_INSN_SIZE);
0063 if (!ret)
0064 *insnp = le32_to_cpu(val);
0065
0066 return ret;
0067 }
0068
0069 static int __kprobes __aarch64_insn_write(void *addr, __le32 insn)
0070 {
0071 void *waddr = addr;
0072 unsigned long flags = 0;
0073 int ret;
0074
0075 raw_spin_lock_irqsave(&patch_lock, flags);
0076 waddr = patch_map(addr, FIX_TEXT_POKE0);
0077
0078 ret = copy_to_kernel_nofault(waddr, &insn, AARCH64_INSN_SIZE);
0079
0080 patch_unmap(FIX_TEXT_POKE0);
0081 raw_spin_unlock_irqrestore(&patch_lock, flags);
0082
0083 return ret;
0084 }
0085
0086 int __kprobes aarch64_insn_write(void *addr, u32 insn)
0087 {
0088 return __aarch64_insn_write(addr, cpu_to_le32(insn));
0089 }
0090
0091 int __kprobes aarch64_insn_patch_text_nosync(void *addr, u32 insn)
0092 {
0093 u32 *tp = addr;
0094 int ret;
0095
0096
0097 if ((uintptr_t)tp & 0x3)
0098 return -EINVAL;
0099
0100 ret = aarch64_insn_write(tp, insn);
0101 if (ret == 0)
0102 caches_clean_inval_pou((uintptr_t)tp,
0103 (uintptr_t)tp + AARCH64_INSN_SIZE);
0104
0105 return ret;
0106 }
0107
0108 struct aarch64_insn_patch {
0109 void **text_addrs;
0110 u32 *new_insns;
0111 int insn_cnt;
0112 atomic_t cpu_count;
0113 };
0114
0115 static int __kprobes aarch64_insn_patch_text_cb(void *arg)
0116 {
0117 int i, ret = 0;
0118 struct aarch64_insn_patch *pp = arg;
0119
0120
0121 if (atomic_inc_return(&pp->cpu_count) == num_online_cpus()) {
0122 for (i = 0; ret == 0 && i < pp->insn_cnt; i++)
0123 ret = aarch64_insn_patch_text_nosync(pp->text_addrs[i],
0124 pp->new_insns[i]);
0125
0126 atomic_inc(&pp->cpu_count);
0127 } else {
0128 while (atomic_read(&pp->cpu_count) <= num_online_cpus())
0129 cpu_relax();
0130 isb();
0131 }
0132
0133 return ret;
0134 }
0135
0136 int __kprobes aarch64_insn_patch_text(void *addrs[], u32 insns[], int cnt)
0137 {
0138 struct aarch64_insn_patch patch = {
0139 .text_addrs = addrs,
0140 .new_insns = insns,
0141 .insn_cnt = cnt,
0142 .cpu_count = ATOMIC_INIT(0),
0143 };
0144
0145 if (cnt <= 0)
0146 return -EINVAL;
0147
0148 return stop_machine_cpuslocked(aarch64_insn_patch_text_cb, &patch,
0149 cpu_online_mask);
0150 }