0001
0002
0003
0004
0005
0006 #include <linux/elf.h>
0007 #include <linux/ftrace.h>
0008 #include <linux/kernel.h>
0009 #include <linux/module.h>
0010 #include <linux/sort.h>
0011
0012 static struct plt_entry __get_adrp_add_pair(u64 dst, u64 pc,
0013 enum aarch64_insn_register reg)
0014 {
0015 u32 adrp, add;
0016
0017 adrp = aarch64_insn_gen_adr(pc, dst, reg, AARCH64_INSN_ADR_TYPE_ADRP);
0018 add = aarch64_insn_gen_add_sub_imm(reg, reg, dst % SZ_4K,
0019 AARCH64_INSN_VARIANT_64BIT,
0020 AARCH64_INSN_ADSB_ADD);
0021
0022 return (struct plt_entry){ cpu_to_le32(adrp), cpu_to_le32(add) };
0023 }
0024
0025 struct plt_entry get_plt_entry(u64 dst, void *pc)
0026 {
0027 struct plt_entry plt;
0028 static u32 br;
0029
0030 if (!br)
0031 br = aarch64_insn_gen_branch_reg(AARCH64_INSN_REG_16,
0032 AARCH64_INSN_BRANCH_NOLINK);
0033
0034 plt = __get_adrp_add_pair(dst, (u64)pc, AARCH64_INSN_REG_16);
0035 plt.br = cpu_to_le32(br);
0036
0037 return plt;
0038 }
0039
0040 bool plt_entries_equal(const struct plt_entry *a, const struct plt_entry *b)
0041 {
0042 u64 p, q;
0043
0044
0045
0046
0047
0048
0049
0050 if (a->add != b->add || a->br != b->br)
0051 return false;
0052
0053 p = ALIGN_DOWN((u64)a, SZ_4K);
0054 q = ALIGN_DOWN((u64)b, SZ_4K);
0055
0056
0057
0058
0059
0060 if (a->adrp == b->adrp && p == q)
0061 return true;
0062
0063 return (p + aarch64_insn_adrp_get_offset(le32_to_cpu(a->adrp))) ==
0064 (q + aarch64_insn_adrp_get_offset(le32_to_cpu(b->adrp)));
0065 }
0066
0067 static bool in_init(const struct module *mod, void *loc)
0068 {
0069 return (u64)loc - (u64)mod->init_layout.base < mod->init_layout.size;
0070 }
0071
0072 u64 module_emit_plt_entry(struct module *mod, Elf64_Shdr *sechdrs,
0073 void *loc, const Elf64_Rela *rela,
0074 Elf64_Sym *sym)
0075 {
0076 struct mod_plt_sec *pltsec = !in_init(mod, loc) ? &mod->arch.core :
0077 &mod->arch.init;
0078 struct plt_entry *plt = (struct plt_entry *)sechdrs[pltsec->plt_shndx].sh_addr;
0079 int i = pltsec->plt_num_entries;
0080 int j = i - 1;
0081 u64 val = sym->st_value + rela->r_addend;
0082
0083 if (is_forbidden_offset_for_adrp(&plt[i].adrp))
0084 i++;
0085
0086 plt[i] = get_plt_entry(val, &plt[i]);
0087
0088
0089
0090
0091
0092
0093 if (j >= 0 && plt_entries_equal(plt + i, plt + j))
0094 return (u64)&plt[j];
0095
0096 pltsec->plt_num_entries += i - j;
0097 if (WARN_ON(pltsec->plt_num_entries > pltsec->plt_max_entries))
0098 return 0;
0099
0100 return (u64)&plt[i];
0101 }
0102
0103 #ifdef CONFIG_ARM64_ERRATUM_843419
0104 u64 module_emit_veneer_for_adrp(struct module *mod, Elf64_Shdr *sechdrs,
0105 void *loc, u64 val)
0106 {
0107 struct mod_plt_sec *pltsec = !in_init(mod, loc) ? &mod->arch.core :
0108 &mod->arch.init;
0109 struct plt_entry *plt = (struct plt_entry *)sechdrs[pltsec->plt_shndx].sh_addr;
0110 int i = pltsec->plt_num_entries++;
0111 u32 br;
0112 int rd;
0113
0114 if (WARN_ON(pltsec->plt_num_entries > pltsec->plt_max_entries))
0115 return 0;
0116
0117 if (is_forbidden_offset_for_adrp(&plt[i].adrp))
0118 i = pltsec->plt_num_entries++;
0119
0120
0121 rd = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RD,
0122 le32_to_cpup((__le32 *)loc));
0123
0124 br = aarch64_insn_gen_branch_imm((u64)&plt[i].br, (u64)loc + 4,
0125 AARCH64_INSN_BRANCH_NOLINK);
0126
0127 plt[i] = __get_adrp_add_pair(val, (u64)&plt[i], rd);
0128 plt[i].br = cpu_to_le32(br);
0129
0130 return (u64)&plt[i];
0131 }
0132 #endif
0133
0134 #define cmp_3way(a, b) ((a) < (b) ? -1 : (a) > (b))
0135
0136 static int cmp_rela(const void *a, const void *b)
0137 {
0138 const Elf64_Rela *x = a, *y = b;
0139 int i;
0140
0141
0142 i = cmp_3way(ELF64_R_TYPE(x->r_info), ELF64_R_TYPE(y->r_info));
0143 if (i == 0)
0144 i = cmp_3way(ELF64_R_SYM(x->r_info), ELF64_R_SYM(y->r_info));
0145 if (i == 0)
0146 i = cmp_3way(x->r_addend, y->r_addend);
0147 return i;
0148 }
0149
0150 static bool duplicate_rel(const Elf64_Rela *rela, int num)
0151 {
0152
0153
0154
0155
0156
0157 return num > 0 && cmp_rela(rela + num, rela + num - 1) == 0;
0158 }
0159
0160 static unsigned int count_plts(Elf64_Sym *syms, Elf64_Rela *rela, int num,
0161 Elf64_Word dstidx, Elf_Shdr *dstsec)
0162 {
0163 unsigned int ret = 0;
0164 Elf64_Sym *s;
0165 int i;
0166
0167 for (i = 0; i < num; i++) {
0168 u64 min_align;
0169
0170 switch (ELF64_R_TYPE(rela[i].r_info)) {
0171 case R_AARCH64_JUMP26:
0172 case R_AARCH64_CALL26:
0173 if (!IS_ENABLED(CONFIG_RANDOMIZE_BASE))
0174 break;
0175
0176
0177
0178
0179
0180
0181
0182
0183
0184
0185
0186
0187 s = syms + ELF64_R_SYM(rela[i].r_info);
0188 if (s->st_shndx == dstidx)
0189 break;
0190
0191
0192
0193
0194
0195
0196
0197
0198
0199
0200
0201
0202
0203
0204 if (rela[i].r_addend != 0 || !duplicate_rel(rela, i))
0205 ret++;
0206 break;
0207 case R_AARCH64_ADR_PREL_PG_HI21_NC:
0208 case R_AARCH64_ADR_PREL_PG_HI21:
0209 if (!IS_ENABLED(CONFIG_ARM64_ERRATUM_843419) ||
0210 !cpus_have_const_cap(ARM64_WORKAROUND_843419))
0211 break;
0212
0213
0214
0215
0216
0217
0218
0219
0220
0221
0222
0223
0224
0225
0226
0227 min_align = 2ULL << ffz(rela[i].r_offset | 0x7);
0228
0229
0230
0231
0232
0233
0234
0235
0236 if (min_align > SZ_4K)
0237 ret++;
0238 else
0239 dstsec->sh_addralign = max(dstsec->sh_addralign,
0240 min_align);
0241 break;
0242 }
0243 }
0244
0245 if (IS_ENABLED(CONFIG_ARM64_ERRATUM_843419) &&
0246 cpus_have_const_cap(ARM64_WORKAROUND_843419))
0247
0248
0249
0250
0251 ret += DIV_ROUND_UP(ret, (SZ_4K / sizeof(struct plt_entry)));
0252
0253 return ret;
0254 }
0255
0256 static bool branch_rela_needs_plt(Elf64_Sym *syms, Elf64_Rela *rela,
0257 Elf64_Word dstidx)
0258 {
0259
0260 Elf64_Sym *s = syms + ELF64_R_SYM(rela->r_info);
0261
0262 if (s->st_shndx == dstidx)
0263 return false;
0264
0265 return ELF64_R_TYPE(rela->r_info) == R_AARCH64_JUMP26 ||
0266 ELF64_R_TYPE(rela->r_info) == R_AARCH64_CALL26;
0267 }
0268
0269
0270 static int partition_branch_plt_relas(Elf64_Sym *syms, Elf64_Rela *rela,
0271 int numrels, Elf64_Word dstidx)
0272 {
0273 int i = 0, j = numrels - 1;
0274
0275 if (!IS_ENABLED(CONFIG_RANDOMIZE_BASE))
0276 return 0;
0277
0278 while (i < j) {
0279 if (branch_rela_needs_plt(syms, &rela[i], dstidx))
0280 i++;
0281 else if (branch_rela_needs_plt(syms, &rela[j], dstidx))
0282 swap(rela[i], rela[j]);
0283 else
0284 j--;
0285 }
0286
0287 return i;
0288 }
0289
0290 int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
0291 char *secstrings, struct module *mod)
0292 {
0293 unsigned long core_plts = 0;
0294 unsigned long init_plts = 0;
0295 Elf64_Sym *syms = NULL;
0296 Elf_Shdr *pltsec, *tramp = NULL;
0297 int i;
0298
0299
0300
0301
0302
0303 for (i = 0; i < ehdr->e_shnum; i++) {
0304 if (!strcmp(secstrings + sechdrs[i].sh_name, ".plt"))
0305 mod->arch.core.plt_shndx = i;
0306 else if (!strcmp(secstrings + sechdrs[i].sh_name, ".init.plt"))
0307 mod->arch.init.plt_shndx = i;
0308 else if (!strcmp(secstrings + sechdrs[i].sh_name,
0309 ".text.ftrace_trampoline"))
0310 tramp = sechdrs + i;
0311 else if (sechdrs[i].sh_type == SHT_SYMTAB)
0312 syms = (Elf64_Sym *)sechdrs[i].sh_addr;
0313 }
0314
0315 if (!mod->arch.core.plt_shndx || !mod->arch.init.plt_shndx) {
0316 pr_err("%s: module PLT section(s) missing\n", mod->name);
0317 return -ENOEXEC;
0318 }
0319 if (!syms) {
0320 pr_err("%s: module symtab section missing\n", mod->name);
0321 return -ENOEXEC;
0322 }
0323
0324 for (i = 0; i < ehdr->e_shnum; i++) {
0325 Elf64_Rela *rels = (void *)ehdr + sechdrs[i].sh_offset;
0326 int nents, numrels = sechdrs[i].sh_size / sizeof(Elf64_Rela);
0327 Elf64_Shdr *dstsec = sechdrs + sechdrs[i].sh_info;
0328
0329 if (sechdrs[i].sh_type != SHT_RELA)
0330 continue;
0331
0332
0333 if (!(dstsec->sh_flags & SHF_EXECINSTR))
0334 continue;
0335
0336
0337
0338
0339
0340 nents = partition_branch_plt_relas(syms, rels, numrels,
0341 sechdrs[i].sh_info);
0342 if (nents)
0343 sort(rels, nents, sizeof(Elf64_Rela), cmp_rela, NULL);
0344
0345 if (!str_has_prefix(secstrings + dstsec->sh_name, ".init"))
0346 core_plts += count_plts(syms, rels, numrels,
0347 sechdrs[i].sh_info, dstsec);
0348 else
0349 init_plts += count_plts(syms, rels, numrels,
0350 sechdrs[i].sh_info, dstsec);
0351 }
0352
0353 pltsec = sechdrs + mod->arch.core.plt_shndx;
0354 pltsec->sh_type = SHT_NOBITS;
0355 pltsec->sh_flags = SHF_EXECINSTR | SHF_ALLOC;
0356 pltsec->sh_addralign = L1_CACHE_BYTES;
0357 pltsec->sh_size = (core_plts + 1) * sizeof(struct plt_entry);
0358 mod->arch.core.plt_num_entries = 0;
0359 mod->arch.core.plt_max_entries = core_plts;
0360
0361 pltsec = sechdrs + mod->arch.init.plt_shndx;
0362 pltsec->sh_type = SHT_NOBITS;
0363 pltsec->sh_flags = SHF_EXECINSTR | SHF_ALLOC;
0364 pltsec->sh_addralign = L1_CACHE_BYTES;
0365 pltsec->sh_size = (init_plts + 1) * sizeof(struct plt_entry);
0366 mod->arch.init.plt_num_entries = 0;
0367 mod->arch.init.plt_max_entries = init_plts;
0368
0369 if (tramp) {
0370 tramp->sh_type = SHT_NOBITS;
0371 tramp->sh_flags = SHF_EXECINSTR | SHF_ALLOC;
0372 tramp->sh_addralign = __alignof__(struct plt_entry);
0373 tramp->sh_size = NR_FTRACE_PLTS * sizeof(struct plt_entry);
0374 }
0375
0376 return 0;
0377 }