Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  * AArch64 loadable module support.
0004  *
0005  * Copyright (C) 2012 ARM Limited
0006  *
0007  * Author: Will Deacon <will.deacon@arm.com>
0008  */
0009 
0010 #include <linux/bitops.h>
0011 #include <linux/elf.h>
0012 #include <linux/ftrace.h>
0013 #include <linux/gfp.h>
0014 #include <linux/kasan.h>
0015 #include <linux/kernel.h>
0016 #include <linux/mm.h>
0017 #include <linux/moduleloader.h>
0018 #include <linux/vmalloc.h>
0019 #include <asm/alternative.h>
0020 #include <asm/insn.h>
0021 #include <asm/sections.h>
0022 
0023 void *module_alloc(unsigned long size)
0024 {
0025     u64 module_alloc_end = module_alloc_base + MODULES_VSIZE;
0026     gfp_t gfp_mask = GFP_KERNEL;
0027     void *p;
0028 
0029     /* Silence the initial allocation */
0030     if (IS_ENABLED(CONFIG_ARM64_MODULE_PLTS))
0031         gfp_mask |= __GFP_NOWARN;
0032 
0033     if (IS_ENABLED(CONFIG_KASAN_GENERIC) ||
0034         IS_ENABLED(CONFIG_KASAN_SW_TAGS))
0035         /* don't exceed the static module region - see below */
0036         module_alloc_end = MODULES_END;
0037 
0038     p = __vmalloc_node_range(size, MODULE_ALIGN, module_alloc_base,
0039                 module_alloc_end, gfp_mask, PAGE_KERNEL, VM_DEFER_KMEMLEAK,
0040                 NUMA_NO_NODE, __builtin_return_address(0));
0041 
0042     if (!p && IS_ENABLED(CONFIG_ARM64_MODULE_PLTS) &&
0043         (IS_ENABLED(CONFIG_KASAN_VMALLOC) ||
0044          (!IS_ENABLED(CONFIG_KASAN_GENERIC) &&
0045           !IS_ENABLED(CONFIG_KASAN_SW_TAGS))))
0046         /*
0047          * KASAN without KASAN_VMALLOC can only deal with module
0048          * allocations being served from the reserved module region,
0049          * since the remainder of the vmalloc region is already
0050          * backed by zero shadow pages, and punching holes into it
0051          * is non-trivial. Since the module region is not randomized
0052          * when KASAN is enabled without KASAN_VMALLOC, it is even
0053          * less likely that the module region gets exhausted, so we
0054          * can simply omit this fallback in that case.
0055          */
0056         p = __vmalloc_node_range(size, MODULE_ALIGN, module_alloc_base,
0057                 module_alloc_base + SZ_2G, GFP_KERNEL,
0058                 PAGE_KERNEL, 0, NUMA_NO_NODE,
0059                 __builtin_return_address(0));
0060 
0061     if (p && (kasan_alloc_module_shadow(p, size, gfp_mask) < 0)) {
0062         vfree(p);
0063         return NULL;
0064     }
0065 
0066     /* Memory is intended to be executable, reset the pointer tag. */
0067     return kasan_reset_tag(p);
0068 }
0069 
0070 enum aarch64_reloc_op {
0071     RELOC_OP_NONE,
0072     RELOC_OP_ABS,
0073     RELOC_OP_PREL,
0074     RELOC_OP_PAGE,
0075 };
0076 
0077 static u64 do_reloc(enum aarch64_reloc_op reloc_op, __le32 *place, u64 val)
0078 {
0079     switch (reloc_op) {
0080     case RELOC_OP_ABS:
0081         return val;
0082     case RELOC_OP_PREL:
0083         return val - (u64)place;
0084     case RELOC_OP_PAGE:
0085         return (val & ~0xfff) - ((u64)place & ~0xfff);
0086     case RELOC_OP_NONE:
0087         return 0;
0088     }
0089 
0090     pr_err("do_reloc: unknown relocation operation %d\n", reloc_op);
0091     return 0;
0092 }
0093 
0094 static int reloc_data(enum aarch64_reloc_op op, void *place, u64 val, int len)
0095 {
0096     s64 sval = do_reloc(op, place, val);
0097 
0098     /*
0099      * The ELF psABI for AArch64 documents the 16-bit and 32-bit place
0100      * relative and absolute relocations as having a range of [-2^15, 2^16)
0101      * or [-2^31, 2^32), respectively. However, in order to be able to
0102      * detect overflows reliably, we have to choose whether we interpret
0103      * such quantities as signed or as unsigned, and stick with it.
0104      * The way we organize our address space requires a signed
0105      * interpretation of 32-bit relative references, so let's use that
0106      * for all R_AARCH64_PRELxx relocations. This means our upper
0107      * bound for overflow detection should be Sxx_MAX rather than Uxx_MAX.
0108      */
0109 
0110     switch (len) {
0111     case 16:
0112         *(s16 *)place = sval;
0113         switch (op) {
0114         case RELOC_OP_ABS:
0115             if (sval < 0 || sval > U16_MAX)
0116                 return -ERANGE;
0117             break;
0118         case RELOC_OP_PREL:
0119             if (sval < S16_MIN || sval > S16_MAX)
0120                 return -ERANGE;
0121             break;
0122         default:
0123             pr_err("Invalid 16-bit data relocation (%d)\n", op);
0124             return 0;
0125         }
0126         break;
0127     case 32:
0128         *(s32 *)place = sval;
0129         switch (op) {
0130         case RELOC_OP_ABS:
0131             if (sval < 0 || sval > U32_MAX)
0132                 return -ERANGE;
0133             break;
0134         case RELOC_OP_PREL:
0135             if (sval < S32_MIN || sval > S32_MAX)
0136                 return -ERANGE;
0137             break;
0138         default:
0139             pr_err("Invalid 32-bit data relocation (%d)\n", op);
0140             return 0;
0141         }
0142         break;
0143     case 64:
0144         *(s64 *)place = sval;
0145         break;
0146     default:
0147         pr_err("Invalid length (%d) for data relocation\n", len);
0148         return 0;
0149     }
0150     return 0;
0151 }
0152 
0153 enum aarch64_insn_movw_imm_type {
0154     AARCH64_INSN_IMM_MOVNZ,
0155     AARCH64_INSN_IMM_MOVKZ,
0156 };
0157 
0158 static int reloc_insn_movw(enum aarch64_reloc_op op, __le32 *place, u64 val,
0159                int lsb, enum aarch64_insn_movw_imm_type imm_type)
0160 {
0161     u64 imm;
0162     s64 sval;
0163     u32 insn = le32_to_cpu(*place);
0164 
0165     sval = do_reloc(op, place, val);
0166     imm = sval >> lsb;
0167 
0168     if (imm_type == AARCH64_INSN_IMM_MOVNZ) {
0169         /*
0170          * For signed MOVW relocations, we have to manipulate the
0171          * instruction encoding depending on whether or not the
0172          * immediate is less than zero.
0173          */
0174         insn &= ~(3 << 29);
0175         if (sval >= 0) {
0176             /* >=0: Set the instruction to MOVZ (opcode 10b). */
0177             insn |= 2 << 29;
0178         } else {
0179             /*
0180              * <0: Set the instruction to MOVN (opcode 00b).
0181              *     Since we've masked the opcode already, we
0182              *     don't need to do anything other than
0183              *     inverting the new immediate field.
0184              */
0185             imm = ~imm;
0186         }
0187     }
0188 
0189     /* Update the instruction with the new encoding. */
0190     insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_16, insn, imm);
0191     *place = cpu_to_le32(insn);
0192 
0193     if (imm > U16_MAX)
0194         return -ERANGE;
0195 
0196     return 0;
0197 }
0198 
0199 static int reloc_insn_imm(enum aarch64_reloc_op op, __le32 *place, u64 val,
0200               int lsb, int len, enum aarch64_insn_imm_type imm_type)
0201 {
0202     u64 imm, imm_mask;
0203     s64 sval;
0204     u32 insn = le32_to_cpu(*place);
0205 
0206     /* Calculate the relocation value. */
0207     sval = do_reloc(op, place, val);
0208     sval >>= lsb;
0209 
0210     /* Extract the value bits and shift them to bit 0. */
0211     imm_mask = (BIT(lsb + len) - 1) >> lsb;
0212     imm = sval & imm_mask;
0213 
0214     /* Update the instruction's immediate field. */
0215     insn = aarch64_insn_encode_immediate(imm_type, insn, imm);
0216     *place = cpu_to_le32(insn);
0217 
0218     /*
0219      * Extract the upper value bits (including the sign bit) and
0220      * shift them to bit 0.
0221      */
0222     sval = (s64)(sval & ~(imm_mask >> 1)) >> (len - 1);
0223 
0224     /*
0225      * Overflow has occurred if the upper bits are not all equal to
0226      * the sign bit of the value.
0227      */
0228     if ((u64)(sval + 1) >= 2)
0229         return -ERANGE;
0230 
0231     return 0;
0232 }
0233 
0234 static int reloc_insn_adrp(struct module *mod, Elf64_Shdr *sechdrs,
0235                __le32 *place, u64 val)
0236 {
0237     u32 insn;
0238 
0239     if (!is_forbidden_offset_for_adrp(place))
0240         return reloc_insn_imm(RELOC_OP_PAGE, place, val, 12, 21,
0241                       AARCH64_INSN_IMM_ADR);
0242 
0243     /* patch ADRP to ADR if it is in range */
0244     if (!reloc_insn_imm(RELOC_OP_PREL, place, val & ~0xfff, 0, 21,
0245                 AARCH64_INSN_IMM_ADR)) {
0246         insn = le32_to_cpu(*place);
0247         insn &= ~BIT(31);
0248     } else {
0249         /* out of range for ADR -> emit a veneer */
0250         val = module_emit_veneer_for_adrp(mod, sechdrs, place, val & ~0xfff);
0251         if (!val)
0252             return -ENOEXEC;
0253         insn = aarch64_insn_gen_branch_imm((u64)place, val,
0254                            AARCH64_INSN_BRANCH_NOLINK);
0255     }
0256 
0257     *place = cpu_to_le32(insn);
0258     return 0;
0259 }
0260 
0261 int apply_relocate_add(Elf64_Shdr *sechdrs,
0262                const char *strtab,
0263                unsigned int symindex,
0264                unsigned int relsec,
0265                struct module *me)
0266 {
0267     unsigned int i;
0268     int ovf;
0269     bool overflow_check;
0270     Elf64_Sym *sym;
0271     void *loc;
0272     u64 val;
0273     Elf64_Rela *rel = (void *)sechdrs[relsec].sh_addr;
0274 
0275     for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
0276         /* loc corresponds to P in the AArch64 ELF document. */
0277         loc = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
0278             + rel[i].r_offset;
0279 
0280         /* sym is the ELF symbol we're referring to. */
0281         sym = (Elf64_Sym *)sechdrs[symindex].sh_addr
0282             + ELF64_R_SYM(rel[i].r_info);
0283 
0284         /* val corresponds to (S + A) in the AArch64 ELF document. */
0285         val = sym->st_value + rel[i].r_addend;
0286 
0287         /* Check for overflow by default. */
0288         overflow_check = true;
0289 
0290         /* Perform the static relocation. */
0291         switch (ELF64_R_TYPE(rel[i].r_info)) {
0292         /* Null relocations. */
0293         case R_ARM_NONE:
0294         case R_AARCH64_NONE:
0295             ovf = 0;
0296             break;
0297 
0298         /* Data relocations. */
0299         case R_AARCH64_ABS64:
0300             overflow_check = false;
0301             ovf = reloc_data(RELOC_OP_ABS, loc, val, 64);
0302             break;
0303         case R_AARCH64_ABS32:
0304             ovf = reloc_data(RELOC_OP_ABS, loc, val, 32);
0305             break;
0306         case R_AARCH64_ABS16:
0307             ovf = reloc_data(RELOC_OP_ABS, loc, val, 16);
0308             break;
0309         case R_AARCH64_PREL64:
0310             overflow_check = false;
0311             ovf = reloc_data(RELOC_OP_PREL, loc, val, 64);
0312             break;
0313         case R_AARCH64_PREL32:
0314             ovf = reloc_data(RELOC_OP_PREL, loc, val, 32);
0315             break;
0316         case R_AARCH64_PREL16:
0317             ovf = reloc_data(RELOC_OP_PREL, loc, val, 16);
0318             break;
0319 
0320         /* MOVW instruction relocations. */
0321         case R_AARCH64_MOVW_UABS_G0_NC:
0322             overflow_check = false;
0323             fallthrough;
0324         case R_AARCH64_MOVW_UABS_G0:
0325             ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 0,
0326                           AARCH64_INSN_IMM_MOVKZ);
0327             break;
0328         case R_AARCH64_MOVW_UABS_G1_NC:
0329             overflow_check = false;
0330             fallthrough;
0331         case R_AARCH64_MOVW_UABS_G1:
0332             ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 16,
0333                           AARCH64_INSN_IMM_MOVKZ);
0334             break;
0335         case R_AARCH64_MOVW_UABS_G2_NC:
0336             overflow_check = false;
0337             fallthrough;
0338         case R_AARCH64_MOVW_UABS_G2:
0339             ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 32,
0340                           AARCH64_INSN_IMM_MOVKZ);
0341             break;
0342         case R_AARCH64_MOVW_UABS_G3:
0343             /* We're using the top bits so we can't overflow. */
0344             overflow_check = false;
0345             ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 48,
0346                           AARCH64_INSN_IMM_MOVKZ);
0347             break;
0348         case R_AARCH64_MOVW_SABS_G0:
0349             ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 0,
0350                           AARCH64_INSN_IMM_MOVNZ);
0351             break;
0352         case R_AARCH64_MOVW_SABS_G1:
0353             ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 16,
0354                           AARCH64_INSN_IMM_MOVNZ);
0355             break;
0356         case R_AARCH64_MOVW_SABS_G2:
0357             ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 32,
0358                           AARCH64_INSN_IMM_MOVNZ);
0359             break;
0360         case R_AARCH64_MOVW_PREL_G0_NC:
0361             overflow_check = false;
0362             ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 0,
0363                           AARCH64_INSN_IMM_MOVKZ);
0364             break;
0365         case R_AARCH64_MOVW_PREL_G0:
0366             ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 0,
0367                           AARCH64_INSN_IMM_MOVNZ);
0368             break;
0369         case R_AARCH64_MOVW_PREL_G1_NC:
0370             overflow_check = false;
0371             ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 16,
0372                           AARCH64_INSN_IMM_MOVKZ);
0373             break;
0374         case R_AARCH64_MOVW_PREL_G1:
0375             ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 16,
0376                           AARCH64_INSN_IMM_MOVNZ);
0377             break;
0378         case R_AARCH64_MOVW_PREL_G2_NC:
0379             overflow_check = false;
0380             ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 32,
0381                           AARCH64_INSN_IMM_MOVKZ);
0382             break;
0383         case R_AARCH64_MOVW_PREL_G2:
0384             ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 32,
0385                           AARCH64_INSN_IMM_MOVNZ);
0386             break;
0387         case R_AARCH64_MOVW_PREL_G3:
0388             /* We're using the top bits so we can't overflow. */
0389             overflow_check = false;
0390             ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 48,
0391                           AARCH64_INSN_IMM_MOVNZ);
0392             break;
0393 
0394         /* Immediate instruction relocations. */
0395         case R_AARCH64_LD_PREL_LO19:
0396             ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 19,
0397                          AARCH64_INSN_IMM_19);
0398             break;
0399         case R_AARCH64_ADR_PREL_LO21:
0400             ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 0, 21,
0401                          AARCH64_INSN_IMM_ADR);
0402             break;
0403         case R_AARCH64_ADR_PREL_PG_HI21_NC:
0404             overflow_check = false;
0405             fallthrough;
0406         case R_AARCH64_ADR_PREL_PG_HI21:
0407             ovf = reloc_insn_adrp(me, sechdrs, loc, val);
0408             if (ovf && ovf != -ERANGE)
0409                 return ovf;
0410             break;
0411         case R_AARCH64_ADD_ABS_LO12_NC:
0412         case R_AARCH64_LDST8_ABS_LO12_NC:
0413             overflow_check = false;
0414             ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 0, 12,
0415                          AARCH64_INSN_IMM_12);
0416             break;
0417         case R_AARCH64_LDST16_ABS_LO12_NC:
0418             overflow_check = false;
0419             ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 1, 11,
0420                          AARCH64_INSN_IMM_12);
0421             break;
0422         case R_AARCH64_LDST32_ABS_LO12_NC:
0423             overflow_check = false;
0424             ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 2, 10,
0425                          AARCH64_INSN_IMM_12);
0426             break;
0427         case R_AARCH64_LDST64_ABS_LO12_NC:
0428             overflow_check = false;
0429             ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 3, 9,
0430                          AARCH64_INSN_IMM_12);
0431             break;
0432         case R_AARCH64_LDST128_ABS_LO12_NC:
0433             overflow_check = false;
0434             ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 4, 8,
0435                          AARCH64_INSN_IMM_12);
0436             break;
0437         case R_AARCH64_TSTBR14:
0438             ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 14,
0439                          AARCH64_INSN_IMM_14);
0440             break;
0441         case R_AARCH64_CONDBR19:
0442             ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 19,
0443                          AARCH64_INSN_IMM_19);
0444             break;
0445         case R_AARCH64_JUMP26:
0446         case R_AARCH64_CALL26:
0447             ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 26,
0448                          AARCH64_INSN_IMM_26);
0449 
0450             if (IS_ENABLED(CONFIG_ARM64_MODULE_PLTS) &&
0451                 ovf == -ERANGE) {
0452                 val = module_emit_plt_entry(me, sechdrs, loc, &rel[i], sym);
0453                 if (!val)
0454                     return -ENOEXEC;
0455                 ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2,
0456                              26, AARCH64_INSN_IMM_26);
0457             }
0458             break;
0459 
0460         default:
0461             pr_err("module %s: unsupported RELA relocation: %llu\n",
0462                    me->name, ELF64_R_TYPE(rel[i].r_info));
0463             return -ENOEXEC;
0464         }
0465 
0466         if (overflow_check && ovf == -ERANGE)
0467             goto overflow;
0468 
0469     }
0470 
0471     return 0;
0472 
0473 overflow:
0474     pr_err("module %s: overflow in relocation type %d val %Lx\n",
0475            me->name, (int)ELF64_R_TYPE(rel[i].r_info), val);
0476     return -ENOEXEC;
0477 }
0478 
0479 static const Elf_Shdr *find_section(const Elf_Ehdr *hdr,
0480                     const Elf_Shdr *sechdrs,
0481                     const char *name)
0482 {
0483     const Elf_Shdr *s, *se;
0484     const char *secstrs = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
0485 
0486     for (s = sechdrs, se = sechdrs + hdr->e_shnum; s < se; s++) {
0487         if (strcmp(name, secstrs + s->sh_name) == 0)
0488             return s;
0489     }
0490 
0491     return NULL;
0492 }
0493 
0494 static inline void __init_plt(struct plt_entry *plt, unsigned long addr)
0495 {
0496     *plt = get_plt_entry(addr, plt);
0497 }
0498 
0499 static int module_init_ftrace_plt(const Elf_Ehdr *hdr,
0500                   const Elf_Shdr *sechdrs,
0501                   struct module *mod)
0502 {
0503 #if defined(CONFIG_ARM64_MODULE_PLTS) && defined(CONFIG_DYNAMIC_FTRACE)
0504     const Elf_Shdr *s;
0505     struct plt_entry *plts;
0506 
0507     s = find_section(hdr, sechdrs, ".text.ftrace_trampoline");
0508     if (!s)
0509         return -ENOEXEC;
0510 
0511     plts = (void *)s->sh_addr;
0512 
0513     __init_plt(&plts[FTRACE_PLT_IDX], FTRACE_ADDR);
0514 
0515     if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS))
0516         __init_plt(&plts[FTRACE_REGS_PLT_IDX], FTRACE_REGS_ADDR);
0517 
0518     mod->arch.ftrace_trampolines = plts;
0519 #endif
0520     return 0;
0521 }
0522 
0523 int module_finalize(const Elf_Ehdr *hdr,
0524             const Elf_Shdr *sechdrs,
0525             struct module *me)
0526 {
0527     const Elf_Shdr *s;
0528     s = find_section(hdr, sechdrs, ".altinstructions");
0529     if (s)
0530         apply_alternatives_module((void *)s->sh_addr, s->sh_size);
0531 
0532     return module_init_ftrace_plt(hdr, sechdrs, me);
0533 }