Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0-or-later */
0002 #ifndef _ASM_POWERPC_CODE_PATCHING_H
0003 #define _ASM_POWERPC_CODE_PATCHING_H
0004 
0005 /*
0006  * Copyright 2008, Michael Ellerman, IBM Corporation.
0007  */
0008 
0009 #include <asm/types.h>
0010 #include <asm/ppc-opcode.h>
0011 #include <linux/string.h>
0012 #include <linux/kallsyms.h>
0013 #include <asm/asm-compat.h>
0014 #include <asm/inst.h>
0015 
0016 /* Flags for create_branch:
0017  * "b"   == create_branch(addr, target, 0);
0018  * "ba"  == create_branch(addr, target, BRANCH_ABSOLUTE);
0019  * "bl"  == create_branch(addr, target, BRANCH_SET_LINK);
0020  * "bla" == create_branch(addr, target, BRANCH_ABSOLUTE | BRANCH_SET_LINK);
0021  */
0022 #define BRANCH_SET_LINK 0x1
0023 #define BRANCH_ABSOLUTE 0x2
0024 
0025 DECLARE_STATIC_KEY_FALSE(init_mem_is_free);
0026 
0027 /*
0028  * Powerpc branch instruction is :
0029  *
0030  *  0         6                 30   31
0031  *  +---------+----------------+---+---+
0032  *  | opcode  |     LI         |AA |LK |
0033  *  +---------+----------------+---+---+
0034  *  Where AA = 0 and LK = 0
0035  *
0036  * LI is a signed 24 bits integer. The real branch offset is computed
0037  * by: imm32 = SignExtend(LI:'0b00', 32);
0038  *
0039  * So the maximum forward branch should be:
0040  *   (0x007fffff << 2) = 0x01fffffc =  0x1fffffc
0041  * The maximum backward branch should be:
0042  *   (0xff800000 << 2) = 0xfe000000 = -0x2000000
0043  */
0044 static inline bool is_offset_in_branch_range(long offset)
0045 {
0046     return (offset >= -0x2000000 && offset <= 0x1fffffc && !(offset & 0x3));
0047 }
0048 
0049 static inline bool is_offset_in_cond_branch_range(long offset)
0050 {
0051     return offset >= -0x8000 && offset <= 0x7fff && !(offset & 0x3);
0052 }
0053 
0054 static inline int create_branch(ppc_inst_t *instr, const u32 *addr,
0055                 unsigned long target, int flags)
0056 {
0057     long offset;
0058 
0059     *instr = ppc_inst(0);
0060     offset = target;
0061     if (! (flags & BRANCH_ABSOLUTE))
0062         offset = offset - (unsigned long)addr;
0063 
0064     /* Check we can represent the target in the instruction format */
0065     if (!is_offset_in_branch_range(offset))
0066         return 1;
0067 
0068     /* Mask out the flags and target, so they don't step on each other. */
0069     *instr = ppc_inst(0x48000000 | (flags & 0x3) | (offset & 0x03FFFFFC));
0070 
0071     return 0;
0072 }
0073 
0074 int create_cond_branch(ppc_inst_t *instr, const u32 *addr,
0075                unsigned long target, int flags);
0076 int patch_branch(u32 *addr, unsigned long target, int flags);
0077 int patch_instruction(u32 *addr, ppc_inst_t instr);
0078 int raw_patch_instruction(u32 *addr, ppc_inst_t instr);
0079 
0080 static inline unsigned long patch_site_addr(s32 *site)
0081 {
0082     return (unsigned long)site + *site;
0083 }
0084 
0085 static inline int patch_instruction_site(s32 *site, ppc_inst_t instr)
0086 {
0087     return patch_instruction((u32 *)patch_site_addr(site), instr);
0088 }
0089 
0090 static inline int patch_branch_site(s32 *site, unsigned long target, int flags)
0091 {
0092     return patch_branch((u32 *)patch_site_addr(site), target, flags);
0093 }
0094 
0095 static inline int modify_instruction(unsigned int *addr, unsigned int clr,
0096                      unsigned int set)
0097 {
0098     return patch_instruction(addr, ppc_inst((*addr & ~clr) | set));
0099 }
0100 
0101 static inline int modify_instruction_site(s32 *site, unsigned int clr, unsigned int set)
0102 {
0103     return modify_instruction((unsigned int *)patch_site_addr(site), clr, set);
0104 }
0105 
0106 static inline unsigned int branch_opcode(ppc_inst_t instr)
0107 {
0108     return ppc_inst_primary_opcode(instr) & 0x3F;
0109 }
0110 
0111 static inline int instr_is_branch_iform(ppc_inst_t instr)
0112 {
0113     return branch_opcode(instr) == 18;
0114 }
0115 
0116 static inline int instr_is_branch_bform(ppc_inst_t instr)
0117 {
0118     return branch_opcode(instr) == 16;
0119 }
0120 
0121 int instr_is_relative_branch(ppc_inst_t instr);
0122 int instr_is_relative_link_branch(ppc_inst_t instr);
0123 unsigned long branch_target(const u32 *instr);
0124 int translate_branch(ppc_inst_t *instr, const u32 *dest, const u32 *src);
0125 bool is_conditional_branch(ppc_inst_t instr);
0126 
0127 #define OP_RT_RA_MASK   0xffff0000UL
0128 #define LIS_R2      (PPC_RAW_LIS(_R2, 0))
0129 #define ADDIS_R2_R12    (PPC_RAW_ADDIS(_R2, _R12, 0))
0130 #define ADDI_R2_R2  (PPC_RAW_ADDI(_R2, _R2, 0))
0131 
0132 
0133 static inline unsigned long ppc_function_entry(void *func)
0134 {
0135 #ifdef CONFIG_PPC64_ELF_ABI_V2
0136     u32 *insn = func;
0137 
0138     /*
0139      * A PPC64 ABIv2 function may have a local and a global entry
0140      * point. We need to use the local entry point when patching
0141      * functions, so identify and step over the global entry point
0142      * sequence.
0143      *
0144      * The global entry point sequence is always of the form:
0145      *
0146      * addis r2,r12,XXXX
0147      * addi  r2,r2,XXXX
0148      *
0149      * A linker optimisation may convert the addis to lis:
0150      *
0151      * lis   r2,XXXX
0152      * addi  r2,r2,XXXX
0153      */
0154     if ((((*insn & OP_RT_RA_MASK) == ADDIS_R2_R12) ||
0155          ((*insn & OP_RT_RA_MASK) == LIS_R2)) &&
0156         ((*(insn+1) & OP_RT_RA_MASK) == ADDI_R2_R2))
0157         return (unsigned long)(insn + 2);
0158     else
0159         return (unsigned long)func;
0160 #elif defined(CONFIG_PPC64_ELF_ABI_V1)
0161     /*
0162      * On PPC64 ABIv1 the function pointer actually points to the
0163      * function's descriptor. The first entry in the descriptor is the
0164      * address of the function text.
0165      */
0166     return ((struct func_desc *)func)->addr;
0167 #else
0168     return (unsigned long)func;
0169 #endif
0170 }
0171 
0172 static inline unsigned long ppc_global_function_entry(void *func)
0173 {
0174 #ifdef CONFIG_PPC64_ELF_ABI_V2
0175     /* PPC64 ABIv2 the global entry point is at the address */
0176     return (unsigned long)func;
0177 #else
0178     /* All other cases there is no change vs ppc_function_entry() */
0179     return ppc_function_entry(func);
0180 #endif
0181 }
0182 
0183 /*
0184  * Wrapper around kallsyms_lookup() to return function entry address:
0185  * - For ABIv1, we lookup the dot variant.
0186  * - For ABIv2, we return the local entry point.
0187  */
0188 static inline unsigned long ppc_kallsyms_lookup_name(const char *name)
0189 {
0190     unsigned long addr;
0191 #ifdef CONFIG_PPC64_ELF_ABI_V1
0192     /* check for dot variant */
0193     char dot_name[1 + KSYM_NAME_LEN];
0194     bool dot_appended = false;
0195 
0196     if (strnlen(name, KSYM_NAME_LEN) >= KSYM_NAME_LEN)
0197         return 0;
0198 
0199     if (name[0] != '.') {
0200         dot_name[0] = '.';
0201         dot_name[1] = '\0';
0202         strlcat(dot_name, name, sizeof(dot_name));
0203         dot_appended = true;
0204     } else {
0205         dot_name[0] = '\0';
0206         strlcat(dot_name, name, sizeof(dot_name));
0207     }
0208     addr = kallsyms_lookup_name(dot_name);
0209     if (!addr && dot_appended)
0210         /* Let's try the original non-dot symbol lookup */
0211         addr = kallsyms_lookup_name(name);
0212 #elif defined(CONFIG_PPC64_ELF_ABI_V2)
0213     addr = kallsyms_lookup_name(name);
0214     if (addr)
0215         addr = ppc_function_entry((void *)addr);
0216 #else
0217     addr = kallsyms_lookup_name(name);
0218 #endif
0219     return addr;
0220 }
0221 
0222 /*
0223  * Some instruction encodings commonly used in dynamic ftracing
0224  * and function live patching.
0225  */
0226 
0227 /* This must match the definition of STK_GOT in <asm/ppc_asm.h> */
0228 #ifdef CONFIG_PPC64_ELF_ABI_V2
0229 #define R2_STACK_OFFSET         24
0230 #else
0231 #define R2_STACK_OFFSET         40
0232 #endif
0233 
0234 #define PPC_INST_LD_TOC     PPC_RAW_LD(_R2, _R1, R2_STACK_OFFSET)
0235 
0236 /* usually preceded by a mflr r0 */
0237 #define PPC_INST_STD_LR     PPC_RAW_STD(_R0, _R1, PPC_LR_STKOFF)
0238 
0239 #endif /* _ASM_POWERPC_CODE_PATCHING_H */