Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  * jump label x86 support
0004  *
0005  * Copyright (C) 2009 Jason Baron <jbaron@redhat.com>
0006  *
0007  */
0008 #include <linux/jump_label.h>
0009 #include <linux/memory.h>
0010 #include <linux/uaccess.h>
0011 #include <linux/module.h>
0012 #include <linux/list.h>
0013 #include <linux/jhash.h>
0014 #include <linux/cpu.h>
0015 #include <asm/kprobes.h>
0016 #include <asm/alternative.h>
0017 #include <asm/text-patching.h>
0018 #include <asm/insn.h>
0019 
0020 int arch_jump_entry_size(struct jump_entry *entry)
0021 {
0022     struct insn insn = {};
0023 
0024     insn_decode_kernel(&insn, (void *)jump_entry_code(entry));
0025     BUG_ON(insn.length != 2 && insn.length != 5);
0026 
0027     return insn.length;
0028 }
0029 
0030 struct jump_label_patch {
0031     const void *code;
0032     int size;
0033 };
0034 
0035 static struct jump_label_patch
0036 __jump_label_patch(struct jump_entry *entry, enum jump_label_type type)
0037 {
0038     const void *expect, *code, *nop;
0039     const void *addr, *dest;
0040     int size;
0041 
0042     addr = (void *)jump_entry_code(entry);
0043     dest = (void *)jump_entry_target(entry);
0044 
0045     size = arch_jump_entry_size(entry);
0046     switch (size) {
0047     case JMP8_INSN_SIZE:
0048         code = text_gen_insn(JMP8_INSN_OPCODE, addr, dest);
0049         nop = x86_nops[size];
0050         break;
0051 
0052     case JMP32_INSN_SIZE:
0053         code = text_gen_insn(JMP32_INSN_OPCODE, addr, dest);
0054         nop = x86_nops[size];
0055         break;
0056 
0057     default: BUG();
0058     }
0059 
0060     if (type == JUMP_LABEL_JMP)
0061         expect = nop;
0062     else
0063         expect = code;
0064 
0065     if (memcmp(addr, expect, size)) {
0066         /*
0067          * The location is not an op that we were expecting.
0068          * Something went wrong. Crash the box, as something could be
0069          * corrupting the kernel.
0070          */
0071         pr_crit("jump_label: Fatal kernel bug, unexpected op at %pS [%p] (%5ph != %5ph)) size:%d type:%d\n",
0072                 addr, addr, addr, expect, size, type);
0073         BUG();
0074     }
0075 
0076     if (type == JUMP_LABEL_NOP)
0077         code = nop;
0078 
0079     return (struct jump_label_patch){.code = code, .size = size};
0080 }
0081 
0082 static __always_inline void
0083 __jump_label_transform(struct jump_entry *entry,
0084                enum jump_label_type type,
0085                int init)
0086 {
0087     const struct jump_label_patch jlp = __jump_label_patch(entry, type);
0088 
0089     /*
0090      * As long as only a single processor is running and the code is still
0091      * not marked as RO, text_poke_early() can be used; Checking that
0092      * system_state is SYSTEM_BOOTING guarantees it. It will be set to
0093      * SYSTEM_SCHEDULING before other cores are awaken and before the
0094      * code is write-protected.
0095      *
0096      * At the time the change is being done, just ignore whether we
0097      * are doing nop -> jump or jump -> nop transition, and assume
0098      * always nop being the 'currently valid' instruction
0099      */
0100     if (init || system_state == SYSTEM_BOOTING) {
0101         text_poke_early((void *)jump_entry_code(entry), jlp.code, jlp.size);
0102         return;
0103     }
0104 
0105     text_poke_bp((void *)jump_entry_code(entry), jlp.code, jlp.size, NULL);
0106 }
0107 
0108 static void __ref jump_label_transform(struct jump_entry *entry,
0109                        enum jump_label_type type,
0110                        int init)
0111 {
0112     mutex_lock(&text_mutex);
0113     __jump_label_transform(entry, type, init);
0114     mutex_unlock(&text_mutex);
0115 }
0116 
0117 void arch_jump_label_transform(struct jump_entry *entry,
0118                    enum jump_label_type type)
0119 {
0120     jump_label_transform(entry, type, 0);
0121 }
0122 
0123 bool arch_jump_label_transform_queue(struct jump_entry *entry,
0124                      enum jump_label_type type)
0125 {
0126     struct jump_label_patch jlp;
0127 
0128     if (system_state == SYSTEM_BOOTING) {
0129         /*
0130          * Fallback to the non-batching mode.
0131          */
0132         arch_jump_label_transform(entry, type);
0133         return true;
0134     }
0135 
0136     mutex_lock(&text_mutex);
0137     jlp = __jump_label_patch(entry, type);
0138     text_poke_queue((void *)jump_entry_code(entry), jlp.code, jlp.size, NULL);
0139     mutex_unlock(&text_mutex);
0140     return true;
0141 }
0142 
0143 void arch_jump_label_transform_apply(void)
0144 {
0145     mutex_lock(&text_mutex);
0146     text_poke_finish();
0147     mutex_unlock(&text_mutex);
0148 }