Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  *  linux/arch/arm/kernel/module.c
0004  *
0005  *  Copyright (C) 2002 Russell King.
0006  *  Modified for nommu by Hyok S. Choi
0007  *
0008  * Module allocation method suggested by Andi Kleen.
0009  */
0010 #include <linux/module.h>
0011 #include <linux/moduleloader.h>
0012 #include <linux/kernel.h>
0013 #include <linux/mm.h>
0014 #include <linux/elf.h>
0015 #include <linux/vmalloc.h>
0016 #include <linux/fs.h>
0017 #include <linux/string.h>
0018 #include <linux/gfp.h>
0019 
0020 #include <asm/sections.h>
0021 #include <asm/smp_plat.h>
0022 #include <asm/unwind.h>
0023 #include <asm/opcodes.h>
0024 
0025 #ifdef CONFIG_XIP_KERNEL
0026 /*
0027  * The XIP kernel text is mapped in the module area for modules and
0028  * some other stuff to work without any indirect relocations.
0029  * MODULES_VADDR is redefined here and not in asm/memory.h to avoid
0030  * recompiling the whole kernel when CONFIG_XIP_KERNEL is turned on/off.
0031  */
0032 #undef MODULES_VADDR
0033 #define MODULES_VADDR   (((unsigned long)_exiprom + ~PMD_MASK) & PMD_MASK)
0034 #endif
0035 
0036 #ifdef CONFIG_MMU
0037 void *module_alloc(unsigned long size)
0038 {
0039     gfp_t gfp_mask = GFP_KERNEL;
0040     void *p;
0041 
0042     /* Silence the initial allocation */
0043     if (IS_ENABLED(CONFIG_ARM_MODULE_PLTS))
0044         gfp_mask |= __GFP_NOWARN;
0045 
0046     p = __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
0047                 gfp_mask, PAGE_KERNEL_EXEC, 0, NUMA_NO_NODE,
0048                 __builtin_return_address(0));
0049     if (!IS_ENABLED(CONFIG_ARM_MODULE_PLTS) || p)
0050         return p;
0051     return __vmalloc_node_range(size, 1,  VMALLOC_START, VMALLOC_END,
0052                 GFP_KERNEL, PAGE_KERNEL_EXEC, 0, NUMA_NO_NODE,
0053                 __builtin_return_address(0));
0054 }
0055 #endif
0056 
0057 bool module_init_section(const char *name)
0058 {
0059     return strstarts(name, ".init") ||
0060         strstarts(name, ".ARM.extab.init") ||
0061         strstarts(name, ".ARM.exidx.init");
0062 }
0063 
0064 bool module_exit_section(const char *name)
0065 {
0066     return strstarts(name, ".exit") ||
0067         strstarts(name, ".ARM.extab.exit") ||
0068         strstarts(name, ".ARM.exidx.exit");
0069 }
0070 
0071 #ifdef CONFIG_ARM_HAS_GROUP_RELOCS
0072 /*
0073  * This implements the partitioning algorithm for group relocations as
0074  * documented in the ARM AArch32 ELF psABI (IHI 0044).
0075  *
0076  * A single PC-relative symbol reference is divided in up to 3 add or subtract
0077  * operations, where the final one could be incorporated into a load/store
0078  * instruction with immediate offset. E.g.,
0079  *
0080  *   ADD    Rd, PC, #...        or  ADD Rd, PC, #...
0081  *   ADD    Rd, Rd, #...            ADD Rd, Rd, #...
0082  *   LDR    Rd, [Rd, #...]          ADD Rd, Rd, #...
0083  *
0084  * The latter has a guaranteed range of only 16 MiB (3x8 == 24 bits), so it is
0085  * of limited use in the kernel. However, the ADD/ADD/LDR combo has a range of
0086  * -/+ 256 MiB, (2x8 + 12 == 28 bits), which means it has sufficient range for
0087  * any in-kernel symbol reference (unless module PLTs are being used).
0088  *
0089  * The main advantage of this approach over the typical pattern using a literal
0090  * load is that literal loads may miss in the D-cache, and generally lead to
0091  * lower cache efficiency for variables that are referenced often from many
0092  * different places in the code.
0093  */
0094 static u32 get_group_rem(u32 group, u32 *offset)
0095 {
0096     u32 val = *offset;
0097     u32 shift;
0098     do {
0099         shift = val ? (31 - __fls(val)) & ~1 : 32;
0100         *offset = val;
0101         if (!val)
0102             break;
0103         val &= 0xffffff >> shift;
0104     } while (group--);
0105     return shift;
0106 }
0107 #endif
0108 
0109 int
0110 apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex,
0111            unsigned int relindex, struct module *module)
0112 {
0113     Elf32_Shdr *symsec = sechdrs + symindex;
0114     Elf32_Shdr *relsec = sechdrs + relindex;
0115     Elf32_Shdr *dstsec = sechdrs + relsec->sh_info;
0116     Elf32_Rel *rel = (void *)relsec->sh_addr;
0117     unsigned int i;
0118 
0119     for (i = 0; i < relsec->sh_size / sizeof(Elf32_Rel); i++, rel++) {
0120         unsigned long loc;
0121         Elf32_Sym *sym;
0122         const char *symname;
0123 #ifdef CONFIG_ARM_HAS_GROUP_RELOCS
0124         u32 shift, group = 1;
0125 #endif
0126         s32 offset;
0127         u32 tmp;
0128 #ifdef CONFIG_THUMB2_KERNEL
0129         u32 upper, lower, sign, j1, j2;
0130 #endif
0131 
0132         offset = ELF32_R_SYM(rel->r_info);
0133         if (offset < 0 || offset > (symsec->sh_size / sizeof(Elf32_Sym))) {
0134             pr_err("%s: section %u reloc %u: bad relocation sym offset\n",
0135                 module->name, relindex, i);
0136             return -ENOEXEC;
0137         }
0138 
0139         sym = ((Elf32_Sym *)symsec->sh_addr) + offset;
0140         symname = strtab + sym->st_name;
0141 
0142         if (rel->r_offset < 0 || rel->r_offset > dstsec->sh_size - sizeof(u32)) {
0143             pr_err("%s: section %u reloc %u sym '%s': out of bounds relocation, offset %d size %u\n",
0144                    module->name, relindex, i, symname,
0145                    rel->r_offset, dstsec->sh_size);
0146             return -ENOEXEC;
0147         }
0148 
0149         loc = dstsec->sh_addr + rel->r_offset;
0150 
0151         switch (ELF32_R_TYPE(rel->r_info)) {
0152         case R_ARM_NONE:
0153             /* ignore */
0154             break;
0155 
0156         case R_ARM_ABS32:
0157         case R_ARM_TARGET1:
0158             *(u32 *)loc += sym->st_value;
0159             break;
0160 
0161         case R_ARM_PC24:
0162         case R_ARM_CALL:
0163         case R_ARM_JUMP24:
0164             if (sym->st_value & 3) {
0165                 pr_err("%s: section %u reloc %u sym '%s': unsupported interworking call (ARM -> Thumb)\n",
0166                        module->name, relindex, i, symname);
0167                 return -ENOEXEC;
0168             }
0169 
0170             offset = __mem_to_opcode_arm(*(u32 *)loc);
0171             offset = (offset & 0x00ffffff) << 2;
0172             if (offset & 0x02000000)
0173                 offset -= 0x04000000;
0174 
0175             offset += sym->st_value - loc;
0176 
0177             /*
0178              * Route through a PLT entry if 'offset' exceeds the
0179              * supported range. Note that 'offset + loc + 8'
0180              * contains the absolute jump target, i.e.,
0181              * @sym + addend, corrected for the +8 PC bias.
0182              */
0183             if (IS_ENABLED(CONFIG_ARM_MODULE_PLTS) &&
0184                 (offset <= (s32)0xfe000000 ||
0185                  offset >= (s32)0x02000000))
0186                 offset = get_module_plt(module, loc,
0187                             offset + loc + 8)
0188                      - loc - 8;
0189 
0190             if (offset <= (s32)0xfe000000 ||
0191                 offset >= (s32)0x02000000) {
0192                 pr_err("%s: section %u reloc %u sym '%s': relocation %u out of range (%#lx -> %#x)\n",
0193                        module->name, relindex, i, symname,
0194                        ELF32_R_TYPE(rel->r_info), loc,
0195                        sym->st_value);
0196                 return -ENOEXEC;
0197             }
0198 
0199             offset >>= 2;
0200             offset &= 0x00ffffff;
0201 
0202             *(u32 *)loc &= __opcode_to_mem_arm(0xff000000);
0203             *(u32 *)loc |= __opcode_to_mem_arm(offset);
0204             break;
0205 
0206            case R_ARM_V4BX:
0207                /* Preserve Rm and the condition code. Alter
0208             * other bits to re-code instruction as
0209             * MOV PC,Rm.
0210             */
0211                *(u32 *)loc &= __opcode_to_mem_arm(0xf000000f);
0212                *(u32 *)loc |= __opcode_to_mem_arm(0x01a0f000);
0213                break;
0214 
0215         case R_ARM_PREL31:
0216             offset = (*(s32 *)loc << 1) >> 1; /* sign extend */
0217             offset += sym->st_value - loc;
0218             if (offset >= 0x40000000 || offset < -0x40000000) {
0219                 pr_err("%s: section %u reloc %u sym '%s': relocation %u out of range (%#lx -> %#x)\n",
0220                        module->name, relindex, i, symname,
0221                        ELF32_R_TYPE(rel->r_info), loc,
0222                        sym->st_value);
0223                 return -ENOEXEC;
0224             }
0225             *(u32 *)loc &= 0x80000000;
0226             *(u32 *)loc |= offset & 0x7fffffff;
0227             break;
0228 
0229         case R_ARM_REL32:
0230             *(u32 *)loc += sym->st_value - loc;
0231             break;
0232 
0233         case R_ARM_MOVW_ABS_NC:
0234         case R_ARM_MOVT_ABS:
0235         case R_ARM_MOVW_PREL_NC:
0236         case R_ARM_MOVT_PREL:
0237             offset = tmp = __mem_to_opcode_arm(*(u32 *)loc);
0238             offset = ((offset & 0xf0000) >> 4) | (offset & 0xfff);
0239             offset = (offset ^ 0x8000) - 0x8000;
0240 
0241             offset += sym->st_value;
0242             if (ELF32_R_TYPE(rel->r_info) == R_ARM_MOVT_PREL ||
0243                 ELF32_R_TYPE(rel->r_info) == R_ARM_MOVW_PREL_NC)
0244                 offset -= loc;
0245             if (ELF32_R_TYPE(rel->r_info) == R_ARM_MOVT_ABS ||
0246                 ELF32_R_TYPE(rel->r_info) == R_ARM_MOVT_PREL)
0247                 offset >>= 16;
0248 
0249             tmp &= 0xfff0f000;
0250             tmp |= ((offset & 0xf000) << 4) |
0251                 (offset & 0x0fff);
0252 
0253             *(u32 *)loc = __opcode_to_mem_arm(tmp);
0254             break;
0255 
0256 #ifdef CONFIG_ARM_HAS_GROUP_RELOCS
0257         case R_ARM_ALU_PC_G0_NC:
0258             group = 0;
0259             fallthrough;
0260         case R_ARM_ALU_PC_G1_NC:
0261             tmp = __mem_to_opcode_arm(*(u32 *)loc);
0262             offset = ror32(tmp & 0xff, (tmp & 0xf00) >> 7);
0263             if (tmp & BIT(22))
0264                 offset = -offset;
0265             offset += sym->st_value - loc;
0266             if (offset < 0) {
0267                 offset = -offset;
0268                 tmp = (tmp & ~BIT(23)) | BIT(22); // SUB opcode
0269             } else {
0270                 tmp = (tmp & ~BIT(22)) | BIT(23); // ADD opcode
0271             }
0272 
0273             shift = get_group_rem(group, &offset);
0274             if (shift < 24) {
0275                 offset >>= 24 - shift;
0276                 offset |= (shift + 8) << 7;
0277             }
0278             *(u32 *)loc = __opcode_to_mem_arm((tmp & ~0xfff) | offset);
0279             break;
0280 
0281         case R_ARM_LDR_PC_G2:
0282             tmp = __mem_to_opcode_arm(*(u32 *)loc);
0283             offset = tmp & 0xfff;
0284             if (~tmp & BIT(23))     // U bit cleared?
0285                 offset = -offset;
0286             offset += sym->st_value - loc;
0287             if (offset < 0) {
0288                 offset = -offset;
0289                 tmp &= ~BIT(23);    // clear U bit
0290             } else {
0291                 tmp |= BIT(23);     // set U bit
0292             }
0293             get_group_rem(2, &offset);
0294 
0295             if (offset > 0xfff) {
0296                 pr_err("%s: section %u reloc %u sym '%s': relocation %u out of range (%#lx -> %#x)\n",
0297                        module->name, relindex, i, symname,
0298                        ELF32_R_TYPE(rel->r_info), loc,
0299                        sym->st_value);
0300                 return -ENOEXEC;
0301             }
0302             *(u32 *)loc = __opcode_to_mem_arm((tmp & ~0xfff) | offset);
0303             break;
0304 #endif
0305 #ifdef CONFIG_THUMB2_KERNEL
0306         case R_ARM_THM_CALL:
0307         case R_ARM_THM_JUMP24:
0308             /*
0309              * For function symbols, only Thumb addresses are
0310              * allowed (no interworking).
0311              *
0312              * For non-function symbols, the destination
0313              * has no specific ARM/Thumb disposition, so
0314              * the branch is resolved under the assumption
0315              * that interworking is not required.
0316              */
0317             if (ELF32_ST_TYPE(sym->st_info) == STT_FUNC &&
0318                 !(sym->st_value & 1)) {
0319                 pr_err("%s: section %u reloc %u sym '%s': unsupported interworking call (Thumb -> ARM)\n",
0320                        module->name, relindex, i, symname);
0321                 return -ENOEXEC;
0322             }
0323 
0324             upper = __mem_to_opcode_thumb16(*(u16 *)loc);
0325             lower = __mem_to_opcode_thumb16(*(u16 *)(loc + 2));
0326 
0327             /*
0328              * 25 bit signed address range (Thumb-2 BL and B.W
0329              * instructions):
0330              *   S:I1:I2:imm10:imm11:0
0331              * where:
0332              *   S     = upper[10]   = offset[24]
0333              *   I1    = ~(J1 ^ S)   = offset[23]
0334              *   I2    = ~(J2 ^ S)   = offset[22]
0335              *   imm10 = upper[9:0]  = offset[21:12]
0336              *   imm11 = lower[10:0] = offset[11:1]
0337              *   J1    = lower[13]
0338              *   J2    = lower[11]
0339              */
0340             sign = (upper >> 10) & 1;
0341             j1 = (lower >> 13) & 1;
0342             j2 = (lower >> 11) & 1;
0343             offset = (sign << 24) | ((~(j1 ^ sign) & 1) << 23) |
0344                 ((~(j2 ^ sign) & 1) << 22) |
0345                 ((upper & 0x03ff) << 12) |
0346                 ((lower & 0x07ff) << 1);
0347             if (offset & 0x01000000)
0348                 offset -= 0x02000000;
0349             offset += sym->st_value - loc;
0350 
0351             /*
0352              * Route through a PLT entry if 'offset' exceeds the
0353              * supported range.
0354              */
0355             if (IS_ENABLED(CONFIG_ARM_MODULE_PLTS) &&
0356                 (offset <= (s32)0xff000000 ||
0357                  offset >= (s32)0x01000000))
0358                 offset = get_module_plt(module, loc,
0359                             offset + loc + 4)
0360                      - loc - 4;
0361 
0362             if (offset <= (s32)0xff000000 ||
0363                 offset >= (s32)0x01000000) {
0364                 pr_err("%s: section %u reloc %u sym '%s': relocation %u out of range (%#lx -> %#x)\n",
0365                        module->name, relindex, i, symname,
0366                        ELF32_R_TYPE(rel->r_info), loc,
0367                        sym->st_value);
0368                 return -ENOEXEC;
0369             }
0370 
0371             sign = (offset >> 24) & 1;
0372             j1 = sign ^ (~(offset >> 23) & 1);
0373             j2 = sign ^ (~(offset >> 22) & 1);
0374             upper = (u16)((upper & 0xf800) | (sign << 10) |
0375                         ((offset >> 12) & 0x03ff));
0376             lower = (u16)((lower & 0xd000) |
0377                       (j1 << 13) | (j2 << 11) |
0378                       ((offset >> 1) & 0x07ff));
0379 
0380             *(u16 *)loc = __opcode_to_mem_thumb16(upper);
0381             *(u16 *)(loc + 2) = __opcode_to_mem_thumb16(lower);
0382             break;
0383 
0384         case R_ARM_THM_MOVW_ABS_NC:
0385         case R_ARM_THM_MOVT_ABS:
0386         case R_ARM_THM_MOVW_PREL_NC:
0387         case R_ARM_THM_MOVT_PREL:
0388             upper = __mem_to_opcode_thumb16(*(u16 *)loc);
0389             lower = __mem_to_opcode_thumb16(*(u16 *)(loc + 2));
0390 
0391             /*
0392              * MOVT/MOVW instructions encoding in Thumb-2:
0393              *
0394              * i    = upper[10]
0395              * imm4 = upper[3:0]
0396              * imm3 = lower[14:12]
0397              * imm8 = lower[7:0]
0398              *
0399              * imm16 = imm4:i:imm3:imm8
0400              */
0401             offset = ((upper & 0x000f) << 12) |
0402                 ((upper & 0x0400) << 1) |
0403                 ((lower & 0x7000) >> 4) | (lower & 0x00ff);
0404             offset = (offset ^ 0x8000) - 0x8000;
0405             offset += sym->st_value;
0406 
0407             if (ELF32_R_TYPE(rel->r_info) == R_ARM_THM_MOVT_PREL ||
0408                 ELF32_R_TYPE(rel->r_info) == R_ARM_THM_MOVW_PREL_NC)
0409                 offset -= loc;
0410             if (ELF32_R_TYPE(rel->r_info) == R_ARM_THM_MOVT_ABS ||
0411                 ELF32_R_TYPE(rel->r_info) == R_ARM_THM_MOVT_PREL)
0412                 offset >>= 16;
0413 
0414             upper = (u16)((upper & 0xfbf0) |
0415                       ((offset & 0xf000) >> 12) |
0416                       ((offset & 0x0800) >> 1));
0417             lower = (u16)((lower & 0x8f00) |
0418                       ((offset & 0x0700) << 4) |
0419                       (offset & 0x00ff));
0420             *(u16 *)loc = __opcode_to_mem_thumb16(upper);
0421             *(u16 *)(loc + 2) = __opcode_to_mem_thumb16(lower);
0422             break;
0423 #endif
0424 
0425         default:
0426             pr_err("%s: unknown relocation: %u\n",
0427                    module->name, ELF32_R_TYPE(rel->r_info));
0428             return -ENOEXEC;
0429         }
0430     }
0431     return 0;
0432 }
0433 
0434 struct mod_unwind_map {
0435     const Elf_Shdr *unw_sec;
0436     const Elf_Shdr *txt_sec;
0437 };
0438 
0439 static const Elf_Shdr *find_mod_section(const Elf32_Ehdr *hdr,
0440     const Elf_Shdr *sechdrs, const char *name)
0441 {
0442     const Elf_Shdr *s, *se;
0443     const char *secstrs = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
0444 
0445     for (s = sechdrs, se = sechdrs + hdr->e_shnum; s < se; s++)
0446         if (strcmp(name, secstrs + s->sh_name) == 0)
0447             return s;
0448 
0449     return NULL;
0450 }
0451 
0452 extern void fixup_pv_table(const void *, unsigned long);
0453 extern void fixup_smp(const void *, unsigned long);
0454 
0455 int module_finalize(const Elf32_Ehdr *hdr, const Elf_Shdr *sechdrs,
0456             struct module *mod)
0457 {
0458     const Elf_Shdr *s = NULL;
0459 #ifdef CONFIG_ARM_UNWIND
0460     const char *secstrs = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
0461     const Elf_Shdr *sechdrs_end = sechdrs + hdr->e_shnum;
0462     struct list_head *unwind_list = &mod->arch.unwind_list;
0463 
0464     INIT_LIST_HEAD(unwind_list);
0465     mod->arch.init_table = NULL;
0466 
0467     for (s = sechdrs; s < sechdrs_end; s++) {
0468         const char *secname = secstrs + s->sh_name;
0469         const char *txtname;
0470         const Elf_Shdr *txt_sec;
0471 
0472         if (!(s->sh_flags & SHF_ALLOC) ||
0473             s->sh_type != ELF_SECTION_UNWIND)
0474             continue;
0475 
0476         if (!strcmp(".ARM.exidx", secname))
0477             txtname = ".text";
0478         else
0479             txtname = secname + strlen(".ARM.exidx");
0480         txt_sec = find_mod_section(hdr, sechdrs, txtname);
0481 
0482         if (txt_sec) {
0483             struct unwind_table *table =
0484                 unwind_table_add(s->sh_addr,
0485                         s->sh_size,
0486                         txt_sec->sh_addr,
0487                         txt_sec->sh_size);
0488 
0489             list_add(&table->mod_list, unwind_list);
0490 
0491             /* save init table for module_arch_freeing_init */
0492             if (strcmp(".ARM.exidx.init.text", secname) == 0)
0493                 mod->arch.init_table = table;
0494         }
0495     }
0496 #endif
0497 #ifdef CONFIG_ARM_PATCH_PHYS_VIRT
0498     s = find_mod_section(hdr, sechdrs, ".pv_table");
0499     if (s)
0500         fixup_pv_table((void *)s->sh_addr, s->sh_size);
0501 #endif
0502     s = find_mod_section(hdr, sechdrs, ".alt.smp.init");
0503     if (s && !is_smp())
0504 #ifdef CONFIG_SMP_ON_UP
0505         fixup_smp((void *)s->sh_addr, s->sh_size);
0506 #else
0507         return -EINVAL;
0508 #endif
0509     return 0;
0510 }
0511 
0512 void
0513 module_arch_cleanup(struct module *mod)
0514 {
0515 #ifdef CONFIG_ARM_UNWIND
0516     struct unwind_table *tmp;
0517     struct unwind_table *n;
0518 
0519     list_for_each_entry_safe(tmp, n,
0520             &mod->arch.unwind_list, mod_list) {
0521         list_del(&tmp->mod_list);
0522         unwind_table_del(tmp);
0523     }
0524     mod->arch.init_table = NULL;
0525 #endif
0526 }
0527 
0528 void __weak module_arch_freeing_init(struct module *mod)
0529 {
0530 #ifdef CONFIG_ARM_UNWIND
0531     struct unwind_table *init = mod->arch.init_table;
0532 
0533     if (init) {
0534         mod->arch.init_table = NULL;
0535         list_del(&init->mod_list);
0536         unwind_table_del(init);
0537     }
0538 #endif
0539 }