0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013 #include <linux/module.h>
0014 #include <linux/kernel.h>
0015 #include <linux/fs.h>
0016 #include <linux/log2.h>
0017 #include <linux/mm.h>
0018 #include <linux/mman.h>
0019 #include <linux/errno.h>
0020 #include <linux/signal.h>
0021 #include <linux/binfmts.h>
0022 #include <linux/string.h>
0023 #include <linux/file.h>
0024 #include <linux/slab.h>
0025 #include <linux/personality.h>
0026 #include <linux/elfcore.h>
0027 #include <linux/init.h>
0028 #include <linux/highuid.h>
0029 #include <linux/compiler.h>
0030 #include <linux/highmem.h>
0031 #include <linux/hugetlb.h>
0032 #include <linux/pagemap.h>
0033 #include <linux/vmalloc.h>
0034 #include <linux/security.h>
0035 #include <linux/random.h>
0036 #include <linux/elf.h>
0037 #include <linux/elf-randomize.h>
0038 #include <linux/utsname.h>
0039 #include <linux/coredump.h>
0040 #include <linux/sched.h>
0041 #include <linux/sched/coredump.h>
0042 #include <linux/sched/task_stack.h>
0043 #include <linux/sched/cputime.h>
0044 #include <linux/sizes.h>
0045 #include <linux/types.h>
0046 #include <linux/cred.h>
0047 #include <linux/dax.h>
0048 #include <linux/uaccess.h>
0049 #include <asm/param.h>
0050 #include <asm/page.h>
0051
0052 #ifndef ELF_COMPAT
0053 #define ELF_COMPAT 0
0054 #endif
0055
0056 #ifndef user_long_t
0057 #define user_long_t long
0058 #endif
0059 #ifndef user_siginfo_t
0060 #define user_siginfo_t siginfo_t
0061 #endif
0062
0063
0064 #ifndef elf_check_fdpic
0065 #define elf_check_fdpic(ex) false
0066 #endif
0067
0068 static int load_elf_binary(struct linux_binprm *bprm);
0069
0070 #ifdef CONFIG_USELIB
0071 static int load_elf_library(struct file *);
0072 #else
0073 #define load_elf_library NULL
0074 #endif
0075
0076
0077
0078
0079
0080 #ifdef CONFIG_ELF_CORE
0081 static int elf_core_dump(struct coredump_params *cprm);
0082 #else
0083 #define elf_core_dump NULL
0084 #endif
0085
0086 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
0087 #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
0088 #else
0089 #define ELF_MIN_ALIGN PAGE_SIZE
0090 #endif
0091
0092 #ifndef ELF_CORE_EFLAGS
0093 #define ELF_CORE_EFLAGS 0
0094 #endif
0095
0096 #define ELF_PAGESTART(_v) ((_v) & ~(int)(ELF_MIN_ALIGN-1))
0097 #define ELF_PAGEOFFSET(_v) ((_v) & (ELF_MIN_ALIGN-1))
0098 #define ELF_PAGEALIGN(_v) (((_v) + ELF_MIN_ALIGN - 1) & ~(ELF_MIN_ALIGN - 1))
0099
0100 static struct linux_binfmt elf_format = {
0101 .module = THIS_MODULE,
0102 .load_binary = load_elf_binary,
0103 .load_shlib = load_elf_library,
0104 #ifdef CONFIG_COREDUMP
0105 .core_dump = elf_core_dump,
0106 .min_coredump = ELF_EXEC_PAGESIZE,
0107 #endif
0108 };
0109
0110 #define BAD_ADDR(x) (unlikely((unsigned long)(x) >= TASK_SIZE))
0111
0112 static int set_brk(unsigned long start, unsigned long end, int prot)
0113 {
0114 start = ELF_PAGEALIGN(start);
0115 end = ELF_PAGEALIGN(end);
0116 if (end > start) {
0117
0118
0119
0120
0121
0122 int error = vm_brk_flags(start, end - start,
0123 prot & PROT_EXEC ? VM_EXEC : 0);
0124 if (error)
0125 return error;
0126 }
0127 current->mm->start_brk = current->mm->brk = end;
0128 return 0;
0129 }
0130
0131
0132
0133
0134
0135
0136 static int padzero(unsigned long elf_bss)
0137 {
0138 unsigned long nbyte;
0139
0140 nbyte = ELF_PAGEOFFSET(elf_bss);
0141 if (nbyte) {
0142 nbyte = ELF_MIN_ALIGN - nbyte;
0143 if (clear_user((void __user *) elf_bss, nbyte))
0144 return -EFAULT;
0145 }
0146 return 0;
0147 }
0148
0149
0150 #ifdef CONFIG_STACK_GROWSUP
0151 #define STACK_ADD(sp, items) ((elf_addr_t __user *)(sp) + (items))
0152 #define STACK_ROUND(sp, items) \
0153 ((15 + (unsigned long) ((sp) + (items))) &~ 15UL)
0154 #define STACK_ALLOC(sp, len) ({ \
0155 elf_addr_t __user *old_sp = (elf_addr_t __user *)sp; sp += len; \
0156 old_sp; })
0157 #else
0158 #define STACK_ADD(sp, items) ((elf_addr_t __user *)(sp) - (items))
0159 #define STACK_ROUND(sp, items) \
0160 (((unsigned long) (sp - items)) &~ 15UL)
0161 #define STACK_ALLOC(sp, len) (sp -= len)
0162 #endif
0163
0164 #ifndef ELF_BASE_PLATFORM
0165
0166
0167
0168
0169
0170 #define ELF_BASE_PLATFORM NULL
0171 #endif
0172
0173 static int
0174 create_elf_tables(struct linux_binprm *bprm, const struct elfhdr *exec,
0175 unsigned long interp_load_addr,
0176 unsigned long e_entry, unsigned long phdr_addr)
0177 {
0178 struct mm_struct *mm = current->mm;
0179 unsigned long p = bprm->p;
0180 int argc = bprm->argc;
0181 int envc = bprm->envc;
0182 elf_addr_t __user *sp;
0183 elf_addr_t __user *u_platform;
0184 elf_addr_t __user *u_base_platform;
0185 elf_addr_t __user *u_rand_bytes;
0186 const char *k_platform = ELF_PLATFORM;
0187 const char *k_base_platform = ELF_BASE_PLATFORM;
0188 unsigned char k_rand_bytes[16];
0189 int items;
0190 elf_addr_t *elf_info;
0191 elf_addr_t flags = 0;
0192 int ei_index;
0193 const struct cred *cred = current_cred();
0194 struct vm_area_struct *vma;
0195
0196
0197
0198
0199
0200
0201
0202 p = arch_align_stack(p);
0203
0204
0205
0206
0207
0208
0209
0210 u_platform = NULL;
0211 if (k_platform) {
0212 size_t len = strlen(k_platform) + 1;
0213
0214 u_platform = (elf_addr_t __user *)STACK_ALLOC(p, len);
0215 if (copy_to_user(u_platform, k_platform, len))
0216 return -EFAULT;
0217 }
0218
0219
0220
0221
0222
0223 u_base_platform = NULL;
0224 if (k_base_platform) {
0225 size_t len = strlen(k_base_platform) + 1;
0226
0227 u_base_platform = (elf_addr_t __user *)STACK_ALLOC(p, len);
0228 if (copy_to_user(u_base_platform, k_base_platform, len))
0229 return -EFAULT;
0230 }
0231
0232
0233
0234
0235 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
0236 u_rand_bytes = (elf_addr_t __user *)
0237 STACK_ALLOC(p, sizeof(k_rand_bytes));
0238 if (copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
0239 return -EFAULT;
0240
0241
0242 elf_info = (elf_addr_t *)mm->saved_auxv;
0243
0244 #define NEW_AUX_ENT(id, val) \
0245 do { \
0246 *elf_info++ = id; \
0247 *elf_info++ = val; \
0248 } while (0)
0249
0250 #ifdef ARCH_DLINFO
0251
0252
0253
0254
0255
0256
0257 ARCH_DLINFO;
0258 #endif
0259 NEW_AUX_ENT(AT_HWCAP, ELF_HWCAP);
0260 NEW_AUX_ENT(AT_PAGESZ, ELF_EXEC_PAGESIZE);
0261 NEW_AUX_ENT(AT_CLKTCK, CLOCKS_PER_SEC);
0262 NEW_AUX_ENT(AT_PHDR, phdr_addr);
0263 NEW_AUX_ENT(AT_PHENT, sizeof(struct elf_phdr));
0264 NEW_AUX_ENT(AT_PHNUM, exec->e_phnum);
0265 NEW_AUX_ENT(AT_BASE, interp_load_addr);
0266 if (bprm->interp_flags & BINPRM_FLAGS_PRESERVE_ARGV0)
0267 flags |= AT_FLAGS_PRESERVE_ARGV0;
0268 NEW_AUX_ENT(AT_FLAGS, flags);
0269 NEW_AUX_ENT(AT_ENTRY, e_entry);
0270 NEW_AUX_ENT(AT_UID, from_kuid_munged(cred->user_ns, cred->uid));
0271 NEW_AUX_ENT(AT_EUID, from_kuid_munged(cred->user_ns, cred->euid));
0272 NEW_AUX_ENT(AT_GID, from_kgid_munged(cred->user_ns, cred->gid));
0273 NEW_AUX_ENT(AT_EGID, from_kgid_munged(cred->user_ns, cred->egid));
0274 NEW_AUX_ENT(AT_SECURE, bprm->secureexec);
0275 NEW_AUX_ENT(AT_RANDOM, (elf_addr_t)(unsigned long)u_rand_bytes);
0276 #ifdef ELF_HWCAP2
0277 NEW_AUX_ENT(AT_HWCAP2, ELF_HWCAP2);
0278 #endif
0279 NEW_AUX_ENT(AT_EXECFN, bprm->exec);
0280 if (k_platform) {
0281 NEW_AUX_ENT(AT_PLATFORM,
0282 (elf_addr_t)(unsigned long)u_platform);
0283 }
0284 if (k_base_platform) {
0285 NEW_AUX_ENT(AT_BASE_PLATFORM,
0286 (elf_addr_t)(unsigned long)u_base_platform);
0287 }
0288 if (bprm->have_execfd) {
0289 NEW_AUX_ENT(AT_EXECFD, bprm->execfd);
0290 }
0291 #undef NEW_AUX_ENT
0292
0293 memset(elf_info, 0, (char *)mm->saved_auxv +
0294 sizeof(mm->saved_auxv) - (char *)elf_info);
0295
0296
0297 elf_info += 2;
0298
0299 ei_index = elf_info - (elf_addr_t *)mm->saved_auxv;
0300 sp = STACK_ADD(p, ei_index);
0301
0302 items = (argc + 1) + (envc + 1) + 1;
0303 bprm->p = STACK_ROUND(sp, items);
0304
0305
0306 #ifdef CONFIG_STACK_GROWSUP
0307 sp = (elf_addr_t __user *)bprm->p - items - ei_index;
0308 bprm->exec = (unsigned long)sp;
0309 #else
0310 sp = (elf_addr_t __user *)bprm->p;
0311 #endif
0312
0313
0314
0315
0316
0317
0318 if (mmap_read_lock_killable(mm))
0319 return -EINTR;
0320 vma = find_extend_vma(mm, bprm->p);
0321 mmap_read_unlock(mm);
0322 if (!vma)
0323 return -EFAULT;
0324
0325
0326 if (put_user(argc, sp++))
0327 return -EFAULT;
0328
0329
0330 p = mm->arg_end = mm->arg_start;
0331 while (argc-- > 0) {
0332 size_t len;
0333 if (put_user((elf_addr_t)p, sp++))
0334 return -EFAULT;
0335 len = strnlen_user((void __user *)p, MAX_ARG_STRLEN);
0336 if (!len || len > MAX_ARG_STRLEN)
0337 return -EINVAL;
0338 p += len;
0339 }
0340 if (put_user(0, sp++))
0341 return -EFAULT;
0342 mm->arg_end = p;
0343
0344
0345 mm->env_end = mm->env_start = p;
0346 while (envc-- > 0) {
0347 size_t len;
0348 if (put_user((elf_addr_t)p, sp++))
0349 return -EFAULT;
0350 len = strnlen_user((void __user *)p, MAX_ARG_STRLEN);
0351 if (!len || len > MAX_ARG_STRLEN)
0352 return -EINVAL;
0353 p += len;
0354 }
0355 if (put_user(0, sp++))
0356 return -EFAULT;
0357 mm->env_end = p;
0358
0359
0360 if (copy_to_user(sp, mm->saved_auxv, ei_index * sizeof(elf_addr_t)))
0361 return -EFAULT;
0362 return 0;
0363 }
0364
0365 static unsigned long elf_map(struct file *filep, unsigned long addr,
0366 const struct elf_phdr *eppnt, int prot, int type,
0367 unsigned long total_size)
0368 {
0369 unsigned long map_addr;
0370 unsigned long size = eppnt->p_filesz + ELF_PAGEOFFSET(eppnt->p_vaddr);
0371 unsigned long off = eppnt->p_offset - ELF_PAGEOFFSET(eppnt->p_vaddr);
0372 addr = ELF_PAGESTART(addr);
0373 size = ELF_PAGEALIGN(size);
0374
0375
0376
0377 if (!size)
0378 return addr;
0379
0380
0381
0382
0383
0384
0385
0386
0387
0388 if (total_size) {
0389 total_size = ELF_PAGEALIGN(total_size);
0390 map_addr = vm_mmap(filep, addr, total_size, prot, type, off);
0391 if (!BAD_ADDR(map_addr))
0392 vm_munmap(map_addr+size, total_size-size);
0393 } else
0394 map_addr = vm_mmap(filep, addr, size, prot, type, off);
0395
0396 if ((type & MAP_FIXED_NOREPLACE) &&
0397 PTR_ERR((void *)map_addr) == -EEXIST)
0398 pr_info("%d (%s): Uhuuh, elf segment at %px requested but the memory is mapped already\n",
0399 task_pid_nr(current), current->comm, (void *)addr);
0400
0401 return(map_addr);
0402 }
0403
0404 static unsigned long total_mapping_size(const struct elf_phdr *phdr, int nr)
0405 {
0406 elf_addr_t min_addr = -1;
0407 elf_addr_t max_addr = 0;
0408 bool pt_load = false;
0409 int i;
0410
0411 for (i = 0; i < nr; i++) {
0412 if (phdr[i].p_type == PT_LOAD) {
0413 min_addr = min(min_addr, ELF_PAGESTART(phdr[i].p_vaddr));
0414 max_addr = max(max_addr, phdr[i].p_vaddr + phdr[i].p_memsz);
0415 pt_load = true;
0416 }
0417 }
0418 return pt_load ? (max_addr - min_addr) : 0;
0419 }
0420
0421 static int elf_read(struct file *file, void *buf, size_t len, loff_t pos)
0422 {
0423 ssize_t rv;
0424
0425 rv = kernel_read(file, buf, len, &pos);
0426 if (unlikely(rv != len)) {
0427 return (rv < 0) ? rv : -EIO;
0428 }
0429 return 0;
0430 }
0431
0432 static unsigned long maximum_alignment(struct elf_phdr *cmds, int nr)
0433 {
0434 unsigned long alignment = 0;
0435 int i;
0436
0437 for (i = 0; i < nr; i++) {
0438 if (cmds[i].p_type == PT_LOAD) {
0439 unsigned long p_align = cmds[i].p_align;
0440
0441
0442 if (!is_power_of_2(p_align))
0443 continue;
0444 alignment = max(alignment, p_align);
0445 }
0446 }
0447
0448
0449 return ELF_PAGEALIGN(alignment);
0450 }
0451
0452
0453
0454
0455
0456
0457
0458
0459
0460
0461 static struct elf_phdr *load_elf_phdrs(const struct elfhdr *elf_ex,
0462 struct file *elf_file)
0463 {
0464 struct elf_phdr *elf_phdata = NULL;
0465 int retval, err = -1;
0466 unsigned int size;
0467
0468
0469
0470
0471
0472 if (elf_ex->e_phentsize != sizeof(struct elf_phdr))
0473 goto out;
0474
0475
0476
0477 size = sizeof(struct elf_phdr) * elf_ex->e_phnum;
0478 if (size == 0 || size > 65536 || size > ELF_MIN_ALIGN)
0479 goto out;
0480
0481 elf_phdata = kmalloc(size, GFP_KERNEL);
0482 if (!elf_phdata)
0483 goto out;
0484
0485
0486 retval = elf_read(elf_file, elf_phdata, size, elf_ex->e_phoff);
0487 if (retval < 0) {
0488 err = retval;
0489 goto out;
0490 }
0491
0492
0493 err = 0;
0494 out:
0495 if (err) {
0496 kfree(elf_phdata);
0497 elf_phdata = NULL;
0498 }
0499 return elf_phdata;
0500 }
0501
0502 #ifndef CONFIG_ARCH_BINFMT_ELF_STATE
0503
0504
0505
0506
0507
0508
0509
0510
0511
0512
0513
0514
0515 struct arch_elf_state {
0516 };
0517
0518 #define INIT_ARCH_ELF_STATE {}
0519
0520
0521
0522
0523
0524
0525
0526
0527
0528
0529
0530
0531
0532
0533
0534
0535
0536
0537
0538 static inline int arch_elf_pt_proc(struct elfhdr *ehdr,
0539 struct elf_phdr *phdr,
0540 struct file *elf, bool is_interp,
0541 struct arch_elf_state *state)
0542 {
0543
0544 return 0;
0545 }
0546
0547
0548
0549
0550
0551
0552
0553
0554
0555
0556
0557
0558
0559
0560
0561
0562 static inline int arch_check_elf(struct elfhdr *ehdr, bool has_interp,
0563 struct elfhdr *interp_ehdr,
0564 struct arch_elf_state *state)
0565 {
0566
0567 return 0;
0568 }
0569
0570 #endif
0571
0572 static inline int make_prot(u32 p_flags, struct arch_elf_state *arch_state,
0573 bool has_interp, bool is_interp)
0574 {
0575 int prot = 0;
0576
0577 if (p_flags & PF_R)
0578 prot |= PROT_READ;
0579 if (p_flags & PF_W)
0580 prot |= PROT_WRITE;
0581 if (p_flags & PF_X)
0582 prot |= PROT_EXEC;
0583
0584 return arch_elf_adjust_prot(prot, arch_state, has_interp, is_interp);
0585 }
0586
0587
0588
0589
0590
0591
0592 static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
0593 struct file *interpreter,
0594 unsigned long no_base, struct elf_phdr *interp_elf_phdata,
0595 struct arch_elf_state *arch_state)
0596 {
0597 struct elf_phdr *eppnt;
0598 unsigned long load_addr = 0;
0599 int load_addr_set = 0;
0600 unsigned long last_bss = 0, elf_bss = 0;
0601 int bss_prot = 0;
0602 unsigned long error = ~0UL;
0603 unsigned long total_size;
0604 int i;
0605
0606
0607 if (interp_elf_ex->e_type != ET_EXEC &&
0608 interp_elf_ex->e_type != ET_DYN)
0609 goto out;
0610 if (!elf_check_arch(interp_elf_ex) ||
0611 elf_check_fdpic(interp_elf_ex))
0612 goto out;
0613 if (!interpreter->f_op->mmap)
0614 goto out;
0615
0616 total_size = total_mapping_size(interp_elf_phdata,
0617 interp_elf_ex->e_phnum);
0618 if (!total_size) {
0619 error = -EINVAL;
0620 goto out;
0621 }
0622
0623 eppnt = interp_elf_phdata;
0624 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
0625 if (eppnt->p_type == PT_LOAD) {
0626 int elf_type = MAP_PRIVATE;
0627 int elf_prot = make_prot(eppnt->p_flags, arch_state,
0628 true, true);
0629 unsigned long vaddr = 0;
0630 unsigned long k, map_addr;
0631
0632 vaddr = eppnt->p_vaddr;
0633 if (interp_elf_ex->e_type == ET_EXEC || load_addr_set)
0634 elf_type |= MAP_FIXED;
0635 else if (no_base && interp_elf_ex->e_type == ET_DYN)
0636 load_addr = -vaddr;
0637
0638 map_addr = elf_map(interpreter, load_addr + vaddr,
0639 eppnt, elf_prot, elf_type, total_size);
0640 total_size = 0;
0641 error = map_addr;
0642 if (BAD_ADDR(map_addr))
0643 goto out;
0644
0645 if (!load_addr_set &&
0646 interp_elf_ex->e_type == ET_DYN) {
0647 load_addr = map_addr - ELF_PAGESTART(vaddr);
0648 load_addr_set = 1;
0649 }
0650
0651
0652
0653
0654
0655
0656 k = load_addr + eppnt->p_vaddr;
0657 if (BAD_ADDR(k) ||
0658 eppnt->p_filesz > eppnt->p_memsz ||
0659 eppnt->p_memsz > TASK_SIZE ||
0660 TASK_SIZE - eppnt->p_memsz < k) {
0661 error = -ENOMEM;
0662 goto out;
0663 }
0664
0665
0666
0667
0668
0669 k = load_addr + eppnt->p_vaddr + eppnt->p_filesz;
0670 if (k > elf_bss)
0671 elf_bss = k;
0672
0673
0674
0675
0676
0677 k = load_addr + eppnt->p_vaddr + eppnt->p_memsz;
0678 if (k > last_bss) {
0679 last_bss = k;
0680 bss_prot = elf_prot;
0681 }
0682 }
0683 }
0684
0685
0686
0687
0688
0689
0690 if (padzero(elf_bss)) {
0691 error = -EFAULT;
0692 goto out;
0693 }
0694
0695
0696
0697
0698
0699 elf_bss = ELF_PAGEALIGN(elf_bss);
0700 last_bss = ELF_PAGEALIGN(last_bss);
0701
0702 if (last_bss > elf_bss) {
0703 error = vm_brk_flags(elf_bss, last_bss - elf_bss,
0704 bss_prot & PROT_EXEC ? VM_EXEC : 0);
0705 if (error)
0706 goto out;
0707 }
0708
0709 error = load_addr;
0710 out:
0711 return error;
0712 }
0713
0714
0715
0716
0717
0718
0719 static int parse_elf_property(const char *data, size_t *off, size_t datasz,
0720 struct arch_elf_state *arch,
0721 bool have_prev_type, u32 *prev_type)
0722 {
0723 size_t o, step;
0724 const struct gnu_property *pr;
0725 int ret;
0726
0727 if (*off == datasz)
0728 return -ENOENT;
0729
0730 if (WARN_ON_ONCE(*off > datasz || *off % ELF_GNU_PROPERTY_ALIGN))
0731 return -EIO;
0732 o = *off;
0733 datasz -= *off;
0734
0735 if (datasz < sizeof(*pr))
0736 return -ENOEXEC;
0737 pr = (const struct gnu_property *)(data + o);
0738 o += sizeof(*pr);
0739 datasz -= sizeof(*pr);
0740
0741 if (pr->pr_datasz > datasz)
0742 return -ENOEXEC;
0743
0744 WARN_ON_ONCE(o % ELF_GNU_PROPERTY_ALIGN);
0745 step = round_up(pr->pr_datasz, ELF_GNU_PROPERTY_ALIGN);
0746 if (step > datasz)
0747 return -ENOEXEC;
0748
0749
0750 if (have_prev_type && pr->pr_type <= *prev_type)
0751 return -ENOEXEC;
0752 *prev_type = pr->pr_type;
0753
0754 ret = arch_parse_elf_property(pr->pr_type, data + o,
0755 pr->pr_datasz, ELF_COMPAT, arch);
0756 if (ret)
0757 return ret;
0758
0759 *off = o + step;
0760 return 0;
0761 }
0762
0763 #define NOTE_DATA_SZ SZ_1K
0764 #define GNU_PROPERTY_TYPE_0_NAME "GNU"
0765 #define NOTE_NAME_SZ (sizeof(GNU_PROPERTY_TYPE_0_NAME))
0766
0767 static int parse_elf_properties(struct file *f, const struct elf_phdr *phdr,
0768 struct arch_elf_state *arch)
0769 {
0770 union {
0771 struct elf_note nhdr;
0772 char data[NOTE_DATA_SZ];
0773 } note;
0774 loff_t pos;
0775 ssize_t n;
0776 size_t off, datasz;
0777 int ret;
0778 bool have_prev_type;
0779 u32 prev_type;
0780
0781 if (!IS_ENABLED(CONFIG_ARCH_USE_GNU_PROPERTY) || !phdr)
0782 return 0;
0783
0784
0785 if (WARN_ON_ONCE(phdr->p_type != PT_GNU_PROPERTY))
0786 return -ENOEXEC;
0787
0788
0789 if (phdr->p_filesz > sizeof(note))
0790 return -ENOEXEC;
0791
0792 pos = phdr->p_offset;
0793 n = kernel_read(f, ¬e, phdr->p_filesz, &pos);
0794
0795 BUILD_BUG_ON(sizeof(note) < sizeof(note.nhdr) + NOTE_NAME_SZ);
0796 if (n < 0 || n < sizeof(note.nhdr) + NOTE_NAME_SZ)
0797 return -EIO;
0798
0799 if (note.nhdr.n_type != NT_GNU_PROPERTY_TYPE_0 ||
0800 note.nhdr.n_namesz != NOTE_NAME_SZ ||
0801 strncmp(note.data + sizeof(note.nhdr),
0802 GNU_PROPERTY_TYPE_0_NAME, n - sizeof(note.nhdr)))
0803 return -ENOEXEC;
0804
0805 off = round_up(sizeof(note.nhdr) + NOTE_NAME_SZ,
0806 ELF_GNU_PROPERTY_ALIGN);
0807 if (off > n)
0808 return -ENOEXEC;
0809
0810 if (note.nhdr.n_descsz > n - off)
0811 return -ENOEXEC;
0812 datasz = off + note.nhdr.n_descsz;
0813
0814 have_prev_type = false;
0815 do {
0816 ret = parse_elf_property(note.data, &off, datasz, arch,
0817 have_prev_type, &prev_type);
0818 have_prev_type = true;
0819 } while (!ret);
0820
0821 return ret == -ENOENT ? 0 : ret;
0822 }
0823
0824 static int load_elf_binary(struct linux_binprm *bprm)
0825 {
0826 struct file *interpreter = NULL;
0827 unsigned long load_bias = 0, phdr_addr = 0;
0828 int first_pt_load = 1;
0829 unsigned long error;
0830 struct elf_phdr *elf_ppnt, *elf_phdata, *interp_elf_phdata = NULL;
0831 struct elf_phdr *elf_property_phdata = NULL;
0832 unsigned long elf_bss, elf_brk;
0833 int bss_prot = 0;
0834 int retval, i;
0835 unsigned long elf_entry;
0836 unsigned long e_entry;
0837 unsigned long interp_load_addr = 0;
0838 unsigned long start_code, end_code, start_data, end_data;
0839 unsigned long reloc_func_desc __maybe_unused = 0;
0840 int executable_stack = EXSTACK_DEFAULT;
0841 struct elfhdr *elf_ex = (struct elfhdr *)bprm->buf;
0842 struct elfhdr *interp_elf_ex = NULL;
0843 struct arch_elf_state arch_state = INIT_ARCH_ELF_STATE;
0844 struct mm_struct *mm;
0845 struct pt_regs *regs;
0846
0847 retval = -ENOEXEC;
0848
0849 if (memcmp(elf_ex->e_ident, ELFMAG, SELFMAG) != 0)
0850 goto out;
0851
0852 if (elf_ex->e_type != ET_EXEC && elf_ex->e_type != ET_DYN)
0853 goto out;
0854 if (!elf_check_arch(elf_ex))
0855 goto out;
0856 if (elf_check_fdpic(elf_ex))
0857 goto out;
0858 if (!bprm->file->f_op->mmap)
0859 goto out;
0860
0861 elf_phdata = load_elf_phdrs(elf_ex, bprm->file);
0862 if (!elf_phdata)
0863 goto out;
0864
0865 elf_ppnt = elf_phdata;
0866 for (i = 0; i < elf_ex->e_phnum; i++, elf_ppnt++) {
0867 char *elf_interpreter;
0868
0869 if (elf_ppnt->p_type == PT_GNU_PROPERTY) {
0870 elf_property_phdata = elf_ppnt;
0871 continue;
0872 }
0873
0874 if (elf_ppnt->p_type != PT_INTERP)
0875 continue;
0876
0877
0878
0879
0880
0881 retval = -ENOEXEC;
0882 if (elf_ppnt->p_filesz > PATH_MAX || elf_ppnt->p_filesz < 2)
0883 goto out_free_ph;
0884
0885 retval = -ENOMEM;
0886 elf_interpreter = kmalloc(elf_ppnt->p_filesz, GFP_KERNEL);
0887 if (!elf_interpreter)
0888 goto out_free_ph;
0889
0890 retval = elf_read(bprm->file, elf_interpreter, elf_ppnt->p_filesz,
0891 elf_ppnt->p_offset);
0892 if (retval < 0)
0893 goto out_free_interp;
0894
0895 retval = -ENOEXEC;
0896 if (elf_interpreter[elf_ppnt->p_filesz - 1] != '\0')
0897 goto out_free_interp;
0898
0899 interpreter = open_exec(elf_interpreter);
0900 kfree(elf_interpreter);
0901 retval = PTR_ERR(interpreter);
0902 if (IS_ERR(interpreter))
0903 goto out_free_ph;
0904
0905
0906
0907
0908
0909 would_dump(bprm, interpreter);
0910
0911 interp_elf_ex = kmalloc(sizeof(*interp_elf_ex), GFP_KERNEL);
0912 if (!interp_elf_ex) {
0913 retval = -ENOMEM;
0914 goto out_free_ph;
0915 }
0916
0917
0918 retval = elf_read(interpreter, interp_elf_ex,
0919 sizeof(*interp_elf_ex), 0);
0920 if (retval < 0)
0921 goto out_free_dentry;
0922
0923 break;
0924
0925 out_free_interp:
0926 kfree(elf_interpreter);
0927 goto out_free_ph;
0928 }
0929
0930 elf_ppnt = elf_phdata;
0931 for (i = 0; i < elf_ex->e_phnum; i++, elf_ppnt++)
0932 switch (elf_ppnt->p_type) {
0933 case PT_GNU_STACK:
0934 if (elf_ppnt->p_flags & PF_X)
0935 executable_stack = EXSTACK_ENABLE_X;
0936 else
0937 executable_stack = EXSTACK_DISABLE_X;
0938 break;
0939
0940 case PT_LOPROC ... PT_HIPROC:
0941 retval = arch_elf_pt_proc(elf_ex, elf_ppnt,
0942 bprm->file, false,
0943 &arch_state);
0944 if (retval)
0945 goto out_free_dentry;
0946 break;
0947 }
0948
0949
0950 if (interpreter) {
0951 retval = -ELIBBAD;
0952
0953 if (memcmp(interp_elf_ex->e_ident, ELFMAG, SELFMAG) != 0)
0954 goto out_free_dentry;
0955
0956 if (!elf_check_arch(interp_elf_ex) ||
0957 elf_check_fdpic(interp_elf_ex))
0958 goto out_free_dentry;
0959
0960
0961 interp_elf_phdata = load_elf_phdrs(interp_elf_ex,
0962 interpreter);
0963 if (!interp_elf_phdata)
0964 goto out_free_dentry;
0965
0966
0967 elf_property_phdata = NULL;
0968 elf_ppnt = interp_elf_phdata;
0969 for (i = 0; i < interp_elf_ex->e_phnum; i++, elf_ppnt++)
0970 switch (elf_ppnt->p_type) {
0971 case PT_GNU_PROPERTY:
0972 elf_property_phdata = elf_ppnt;
0973 break;
0974
0975 case PT_LOPROC ... PT_HIPROC:
0976 retval = arch_elf_pt_proc(interp_elf_ex,
0977 elf_ppnt, interpreter,
0978 true, &arch_state);
0979 if (retval)
0980 goto out_free_dentry;
0981 break;
0982 }
0983 }
0984
0985 retval = parse_elf_properties(interpreter ?: bprm->file,
0986 elf_property_phdata, &arch_state);
0987 if (retval)
0988 goto out_free_dentry;
0989
0990
0991
0992
0993
0994
0995 retval = arch_check_elf(elf_ex,
0996 !!interpreter, interp_elf_ex,
0997 &arch_state);
0998 if (retval)
0999 goto out_free_dentry;
1000
1001
1002 retval = begin_new_exec(bprm);
1003 if (retval)
1004 goto out_free_dentry;
1005
1006
1007
1008 SET_PERSONALITY2(*elf_ex, &arch_state);
1009 if (elf_read_implies_exec(*elf_ex, executable_stack))
1010 current->personality |= READ_IMPLIES_EXEC;
1011
1012 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
1013 current->flags |= PF_RANDOMIZE;
1014
1015 setup_new_exec(bprm);
1016
1017
1018
1019 retval = setup_arg_pages(bprm, randomize_stack_top(STACK_TOP),
1020 executable_stack);
1021 if (retval < 0)
1022 goto out_free_dentry;
1023
1024 elf_bss = 0;
1025 elf_brk = 0;
1026
1027 start_code = ~0UL;
1028 end_code = 0;
1029 start_data = 0;
1030 end_data = 0;
1031
1032
1033
1034 for(i = 0, elf_ppnt = elf_phdata;
1035 i < elf_ex->e_phnum; i++, elf_ppnt++) {
1036 int elf_prot, elf_flags;
1037 unsigned long k, vaddr;
1038 unsigned long total_size = 0;
1039 unsigned long alignment;
1040
1041 if (elf_ppnt->p_type != PT_LOAD)
1042 continue;
1043
1044 if (unlikely (elf_brk > elf_bss)) {
1045 unsigned long nbyte;
1046
1047
1048
1049
1050 retval = set_brk(elf_bss + load_bias,
1051 elf_brk + load_bias,
1052 bss_prot);
1053 if (retval)
1054 goto out_free_dentry;
1055 nbyte = ELF_PAGEOFFSET(elf_bss);
1056 if (nbyte) {
1057 nbyte = ELF_MIN_ALIGN - nbyte;
1058 if (nbyte > elf_brk - elf_bss)
1059 nbyte = elf_brk - elf_bss;
1060 if (clear_user((void __user *)elf_bss +
1061 load_bias, nbyte)) {
1062
1063
1064
1065
1066
1067 }
1068 }
1069 }
1070
1071 elf_prot = make_prot(elf_ppnt->p_flags, &arch_state,
1072 !!interpreter, false);
1073
1074 elf_flags = MAP_PRIVATE;
1075
1076 vaddr = elf_ppnt->p_vaddr;
1077
1078
1079
1080
1081
1082
1083 if (!first_pt_load) {
1084 elf_flags |= MAP_FIXED;
1085 } else if (elf_ex->e_type == ET_EXEC) {
1086
1087
1088
1089
1090
1091 elf_flags |= MAP_FIXED_NOREPLACE;
1092 } else if (elf_ex->e_type == ET_DYN) {
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120 if (interpreter) {
1121 load_bias = ELF_ET_DYN_BASE;
1122 if (current->flags & PF_RANDOMIZE)
1123 load_bias += arch_mmap_rnd();
1124 alignment = maximum_alignment(elf_phdata, elf_ex->e_phnum);
1125 if (alignment)
1126 load_bias &= ~(alignment - 1);
1127 elf_flags |= MAP_FIXED_NOREPLACE;
1128 } else
1129 load_bias = 0;
1130
1131
1132
1133
1134
1135
1136
1137
1138 load_bias = ELF_PAGESTART(load_bias - vaddr);
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158 total_size = total_mapping_size(elf_phdata,
1159 elf_ex->e_phnum);
1160 if (!total_size) {
1161 retval = -EINVAL;
1162 goto out_free_dentry;
1163 }
1164 }
1165
1166 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
1167 elf_prot, elf_flags, total_size);
1168 if (BAD_ADDR(error)) {
1169 retval = IS_ERR((void *)error) ?
1170 PTR_ERR((void*)error) : -EINVAL;
1171 goto out_free_dentry;
1172 }
1173
1174 if (first_pt_load) {
1175 first_pt_load = 0;
1176 if (elf_ex->e_type == ET_DYN) {
1177 load_bias += error -
1178 ELF_PAGESTART(load_bias + vaddr);
1179 reloc_func_desc = load_bias;
1180 }
1181 }
1182
1183
1184
1185
1186
1187 if (elf_ppnt->p_offset <= elf_ex->e_phoff &&
1188 elf_ex->e_phoff < elf_ppnt->p_offset + elf_ppnt->p_filesz) {
1189 phdr_addr = elf_ex->e_phoff - elf_ppnt->p_offset +
1190 elf_ppnt->p_vaddr;
1191 }
1192
1193 k = elf_ppnt->p_vaddr;
1194 if ((elf_ppnt->p_flags & PF_X) && k < start_code)
1195 start_code = k;
1196 if (start_data < k)
1197 start_data = k;
1198
1199
1200
1201
1202
1203
1204 if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
1205 elf_ppnt->p_memsz > TASK_SIZE ||
1206 TASK_SIZE - elf_ppnt->p_memsz < k) {
1207
1208 retval = -EINVAL;
1209 goto out_free_dentry;
1210 }
1211
1212 k = elf_ppnt->p_vaddr + elf_ppnt->p_filesz;
1213
1214 if (k > elf_bss)
1215 elf_bss = k;
1216 if ((elf_ppnt->p_flags & PF_X) && end_code < k)
1217 end_code = k;
1218 if (end_data < k)
1219 end_data = k;
1220 k = elf_ppnt->p_vaddr + elf_ppnt->p_memsz;
1221 if (k > elf_brk) {
1222 bss_prot = elf_prot;
1223 elf_brk = k;
1224 }
1225 }
1226
1227 e_entry = elf_ex->e_entry + load_bias;
1228 phdr_addr += load_bias;
1229 elf_bss += load_bias;
1230 elf_brk += load_bias;
1231 start_code += load_bias;
1232 end_code += load_bias;
1233 start_data += load_bias;
1234 end_data += load_bias;
1235
1236
1237
1238
1239
1240
1241 retval = set_brk(elf_bss, elf_brk, bss_prot);
1242 if (retval)
1243 goto out_free_dentry;
1244 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
1245 retval = -EFAULT;
1246 goto out_free_dentry;
1247 }
1248
1249 if (interpreter) {
1250 elf_entry = load_elf_interp(interp_elf_ex,
1251 interpreter,
1252 load_bias, interp_elf_phdata,
1253 &arch_state);
1254 if (!IS_ERR((void *)elf_entry)) {
1255
1256
1257
1258
1259 interp_load_addr = elf_entry;
1260 elf_entry += interp_elf_ex->e_entry;
1261 }
1262 if (BAD_ADDR(elf_entry)) {
1263 retval = IS_ERR((void *)elf_entry) ?
1264 (int)elf_entry : -EINVAL;
1265 goto out_free_dentry;
1266 }
1267 reloc_func_desc = interp_load_addr;
1268
1269 allow_write_access(interpreter);
1270 fput(interpreter);
1271
1272 kfree(interp_elf_ex);
1273 kfree(interp_elf_phdata);
1274 } else {
1275 elf_entry = e_entry;
1276 if (BAD_ADDR(elf_entry)) {
1277 retval = -EINVAL;
1278 goto out_free_dentry;
1279 }
1280 }
1281
1282 kfree(elf_phdata);
1283
1284 set_binfmt(&elf_format);
1285
1286 #ifdef ARCH_HAS_SETUP_ADDITIONAL_PAGES
1287 retval = ARCH_SETUP_ADDITIONAL_PAGES(bprm, elf_ex, !!interpreter);
1288 if (retval < 0)
1289 goto out;
1290 #endif
1291
1292 retval = create_elf_tables(bprm, elf_ex, interp_load_addr,
1293 e_entry, phdr_addr);
1294 if (retval < 0)
1295 goto out;
1296
1297 mm = current->mm;
1298 mm->end_code = end_code;
1299 mm->start_code = start_code;
1300 mm->start_data = start_data;
1301 mm->end_data = end_data;
1302 mm->start_stack = bprm->p;
1303
1304 if ((current->flags & PF_RANDOMIZE) && (randomize_va_space > 1)) {
1305
1306
1307
1308
1309
1310
1311
1312 if (IS_ENABLED(CONFIG_ARCH_HAS_ELF_RANDOMIZE) &&
1313 elf_ex->e_type == ET_DYN && !interpreter) {
1314 mm->brk = mm->start_brk = ELF_ET_DYN_BASE;
1315 }
1316
1317 mm->brk = mm->start_brk = arch_randomize_brk(mm);
1318 #ifdef compat_brk_randomized
1319 current->brk_randomized = 1;
1320 #endif
1321 }
1322
1323 if (current->personality & MMAP_PAGE_ZERO) {
1324
1325
1326
1327
1328 error = vm_mmap(NULL, 0, PAGE_SIZE, PROT_READ | PROT_EXEC,
1329 MAP_FIXED | MAP_PRIVATE, 0);
1330 }
1331
1332 regs = current_pt_regs();
1333 #ifdef ELF_PLAT_INIT
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344 ELF_PLAT_INIT(regs, reloc_func_desc);
1345 #endif
1346
1347 finalize_exec(bprm);
1348 START_THREAD(elf_ex, regs, elf_entry, bprm->p);
1349 retval = 0;
1350 out:
1351 return retval;
1352
1353
1354 out_free_dentry:
1355 kfree(interp_elf_ex);
1356 kfree(interp_elf_phdata);
1357 allow_write_access(interpreter);
1358 if (interpreter)
1359 fput(interpreter);
1360 out_free_ph:
1361 kfree(elf_phdata);
1362 goto out;
1363 }
1364
1365 #ifdef CONFIG_USELIB
1366
1367
1368 static int load_elf_library(struct file *file)
1369 {
1370 struct elf_phdr *elf_phdata;
1371 struct elf_phdr *eppnt;
1372 unsigned long elf_bss, bss, len;
1373 int retval, error, i, j;
1374 struct elfhdr elf_ex;
1375
1376 error = -ENOEXEC;
1377 retval = elf_read(file, &elf_ex, sizeof(elf_ex), 0);
1378 if (retval < 0)
1379 goto out;
1380
1381 if (memcmp(elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
1382 goto out;
1383
1384
1385 if (elf_ex.e_type != ET_EXEC || elf_ex.e_phnum > 2 ||
1386 !elf_check_arch(&elf_ex) || !file->f_op->mmap)
1387 goto out;
1388 if (elf_check_fdpic(&elf_ex))
1389 goto out;
1390
1391
1392
1393 j = sizeof(struct elf_phdr) * elf_ex.e_phnum;
1394
1395
1396 error = -ENOMEM;
1397 elf_phdata = kmalloc(j, GFP_KERNEL);
1398 if (!elf_phdata)
1399 goto out;
1400
1401 eppnt = elf_phdata;
1402 error = -ENOEXEC;
1403 retval = elf_read(file, eppnt, j, elf_ex.e_phoff);
1404 if (retval < 0)
1405 goto out_free_ph;
1406
1407 for (j = 0, i = 0; i<elf_ex.e_phnum; i++)
1408 if ((eppnt + i)->p_type == PT_LOAD)
1409 j++;
1410 if (j != 1)
1411 goto out_free_ph;
1412
1413 while (eppnt->p_type != PT_LOAD)
1414 eppnt++;
1415
1416
1417 error = vm_mmap(file,
1418 ELF_PAGESTART(eppnt->p_vaddr),
1419 (eppnt->p_filesz +
1420 ELF_PAGEOFFSET(eppnt->p_vaddr)),
1421 PROT_READ | PROT_WRITE | PROT_EXEC,
1422 MAP_FIXED_NOREPLACE | MAP_PRIVATE,
1423 (eppnt->p_offset -
1424 ELF_PAGEOFFSET(eppnt->p_vaddr)));
1425 if (error != ELF_PAGESTART(eppnt->p_vaddr))
1426 goto out_free_ph;
1427
1428 elf_bss = eppnt->p_vaddr + eppnt->p_filesz;
1429 if (padzero(elf_bss)) {
1430 error = -EFAULT;
1431 goto out_free_ph;
1432 }
1433
1434 len = ELF_PAGEALIGN(eppnt->p_filesz + eppnt->p_vaddr);
1435 bss = ELF_PAGEALIGN(eppnt->p_memsz + eppnt->p_vaddr);
1436 if (bss > len) {
1437 error = vm_brk(len, bss - len);
1438 if (error)
1439 goto out_free_ph;
1440 }
1441 error = 0;
1442
1443 out_free_ph:
1444 kfree(elf_phdata);
1445 out:
1446 return error;
1447 }
1448 #endif
1449
1450 #ifdef CONFIG_ELF_CORE
1451
1452
1453
1454
1455
1456
1457
1458
1459 struct memelfnote
1460 {
1461 const char *name;
1462 int type;
1463 unsigned int datasz;
1464 void *data;
1465 };
1466
1467 static int notesize(struct memelfnote *en)
1468 {
1469 int sz;
1470
1471 sz = sizeof(struct elf_note);
1472 sz += roundup(strlen(en->name) + 1, 4);
1473 sz += roundup(en->datasz, 4);
1474
1475 return sz;
1476 }
1477
1478 static int writenote(struct memelfnote *men, struct coredump_params *cprm)
1479 {
1480 struct elf_note en;
1481 en.n_namesz = strlen(men->name) + 1;
1482 en.n_descsz = men->datasz;
1483 en.n_type = men->type;
1484
1485 return dump_emit(cprm, &en, sizeof(en)) &&
1486 dump_emit(cprm, men->name, en.n_namesz) && dump_align(cprm, 4) &&
1487 dump_emit(cprm, men->data, men->datasz) && dump_align(cprm, 4);
1488 }
1489
1490 static void fill_elf_header(struct elfhdr *elf, int segs,
1491 u16 machine, u32 flags)
1492 {
1493 memset(elf, 0, sizeof(*elf));
1494
1495 memcpy(elf->e_ident, ELFMAG, SELFMAG);
1496 elf->e_ident[EI_CLASS] = ELF_CLASS;
1497 elf->e_ident[EI_DATA] = ELF_DATA;
1498 elf->e_ident[EI_VERSION] = EV_CURRENT;
1499 elf->e_ident[EI_OSABI] = ELF_OSABI;
1500
1501 elf->e_type = ET_CORE;
1502 elf->e_machine = machine;
1503 elf->e_version = EV_CURRENT;
1504 elf->e_phoff = sizeof(struct elfhdr);
1505 elf->e_flags = flags;
1506 elf->e_ehsize = sizeof(struct elfhdr);
1507 elf->e_phentsize = sizeof(struct elf_phdr);
1508 elf->e_phnum = segs;
1509 }
1510
1511 static void fill_elf_note_phdr(struct elf_phdr *phdr, int sz, loff_t offset)
1512 {
1513 phdr->p_type = PT_NOTE;
1514 phdr->p_offset = offset;
1515 phdr->p_vaddr = 0;
1516 phdr->p_paddr = 0;
1517 phdr->p_filesz = sz;
1518 phdr->p_memsz = 0;
1519 phdr->p_flags = 0;
1520 phdr->p_align = 0;
1521 }
1522
1523 static void fill_note(struct memelfnote *note, const char *name, int type,
1524 unsigned int sz, void *data)
1525 {
1526 note->name = name;
1527 note->type = type;
1528 note->datasz = sz;
1529 note->data = data;
1530 }
1531
1532
1533
1534
1535
1536 static void fill_prstatus(struct elf_prstatus_common *prstatus,
1537 struct task_struct *p, long signr)
1538 {
1539 prstatus->pr_info.si_signo = prstatus->pr_cursig = signr;
1540 prstatus->pr_sigpend = p->pending.signal.sig[0];
1541 prstatus->pr_sighold = p->blocked.sig[0];
1542 rcu_read_lock();
1543 prstatus->pr_ppid = task_pid_vnr(rcu_dereference(p->real_parent));
1544 rcu_read_unlock();
1545 prstatus->pr_pid = task_pid_vnr(p);
1546 prstatus->pr_pgrp = task_pgrp_vnr(p);
1547 prstatus->pr_sid = task_session_vnr(p);
1548 if (thread_group_leader(p)) {
1549 struct task_cputime cputime;
1550
1551
1552
1553
1554
1555 thread_group_cputime(p, &cputime);
1556 prstatus->pr_utime = ns_to_kernel_old_timeval(cputime.utime);
1557 prstatus->pr_stime = ns_to_kernel_old_timeval(cputime.stime);
1558 } else {
1559 u64 utime, stime;
1560
1561 task_cputime(p, &utime, &stime);
1562 prstatus->pr_utime = ns_to_kernel_old_timeval(utime);
1563 prstatus->pr_stime = ns_to_kernel_old_timeval(stime);
1564 }
1565
1566 prstatus->pr_cutime = ns_to_kernel_old_timeval(p->signal->cutime);
1567 prstatus->pr_cstime = ns_to_kernel_old_timeval(p->signal->cstime);
1568 }
1569
1570 static int fill_psinfo(struct elf_prpsinfo *psinfo, struct task_struct *p,
1571 struct mm_struct *mm)
1572 {
1573 const struct cred *cred;
1574 unsigned int i, len;
1575 unsigned int state;
1576
1577
1578 memset(psinfo, 0, sizeof(struct elf_prpsinfo));
1579
1580 len = mm->arg_end - mm->arg_start;
1581 if (len >= ELF_PRARGSZ)
1582 len = ELF_PRARGSZ-1;
1583 if (copy_from_user(&psinfo->pr_psargs,
1584 (const char __user *)mm->arg_start, len))
1585 return -EFAULT;
1586 for(i = 0; i < len; i++)
1587 if (psinfo->pr_psargs[i] == 0)
1588 psinfo->pr_psargs[i] = ' ';
1589 psinfo->pr_psargs[len] = 0;
1590
1591 rcu_read_lock();
1592 psinfo->pr_ppid = task_pid_vnr(rcu_dereference(p->real_parent));
1593 rcu_read_unlock();
1594 psinfo->pr_pid = task_pid_vnr(p);
1595 psinfo->pr_pgrp = task_pgrp_vnr(p);
1596 psinfo->pr_sid = task_session_vnr(p);
1597
1598 state = READ_ONCE(p->__state);
1599 i = state ? ffz(~state) + 1 : 0;
1600 psinfo->pr_state = i;
1601 psinfo->pr_sname = (i > 5) ? '.' : "RSDTZW"[i];
1602 psinfo->pr_zomb = psinfo->pr_sname == 'Z';
1603 psinfo->pr_nice = task_nice(p);
1604 psinfo->pr_flag = p->flags;
1605 rcu_read_lock();
1606 cred = __task_cred(p);
1607 SET_UID(psinfo->pr_uid, from_kuid_munged(cred->user_ns, cred->uid));
1608 SET_GID(psinfo->pr_gid, from_kgid_munged(cred->user_ns, cred->gid));
1609 rcu_read_unlock();
1610 get_task_comm(psinfo->pr_fname, p);
1611
1612 return 0;
1613 }
1614
1615 static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm)
1616 {
1617 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
1618 int i = 0;
1619 do
1620 i += 2;
1621 while (auxv[i - 2] != AT_NULL);
1622 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
1623 }
1624
1625 static void fill_siginfo_note(struct memelfnote *note, user_siginfo_t *csigdata,
1626 const kernel_siginfo_t *siginfo)
1627 {
1628 copy_siginfo_to_external(csigdata, siginfo);
1629 fill_note(note, "CORE", NT_SIGINFO, sizeof(*csigdata), csigdata);
1630 }
1631
1632 #define MAX_FILE_NOTE_SIZE (4*1024*1024)
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644 static int fill_files_note(struct memelfnote *note, struct coredump_params *cprm)
1645 {
1646 unsigned count, size, names_ofs, remaining, n;
1647 user_long_t *data;
1648 user_long_t *start_end_ofs;
1649 char *name_base, *name_curpos;
1650 int i;
1651
1652
1653 count = cprm->vma_count;
1654 if (count > UINT_MAX / 64)
1655 return -EINVAL;
1656 size = count * 64;
1657
1658 names_ofs = (2 + 3 * count) * sizeof(data[0]);
1659 alloc:
1660 if (size >= MAX_FILE_NOTE_SIZE)
1661 return -EINVAL;
1662 size = round_up(size, PAGE_SIZE);
1663
1664
1665
1666
1667 data = kvmalloc(size, GFP_KERNEL);
1668 if (ZERO_OR_NULL_PTR(data))
1669 return -ENOMEM;
1670
1671 start_end_ofs = data + 2;
1672 name_base = name_curpos = ((char *)data) + names_ofs;
1673 remaining = size - names_ofs;
1674 count = 0;
1675 for (i = 0; i < cprm->vma_count; i++) {
1676 struct core_vma_metadata *m = &cprm->vma_meta[i];
1677 struct file *file;
1678 const char *filename;
1679
1680 file = m->file;
1681 if (!file)
1682 continue;
1683 filename = file_path(file, name_curpos, remaining);
1684 if (IS_ERR(filename)) {
1685 if (PTR_ERR(filename) == -ENAMETOOLONG) {
1686 kvfree(data);
1687 size = size * 5 / 4;
1688 goto alloc;
1689 }
1690 continue;
1691 }
1692
1693
1694
1695 n = (name_curpos + remaining) - filename;
1696 remaining = filename - name_curpos;
1697 memmove(name_curpos, filename, n);
1698 name_curpos += n;
1699
1700 *start_end_ofs++ = m->start;
1701 *start_end_ofs++ = m->end;
1702 *start_end_ofs++ = m->pgoff;
1703 count++;
1704 }
1705
1706
1707 data[0] = count;
1708 data[1] = PAGE_SIZE;
1709
1710
1711
1712
1713 n = cprm->vma_count - count;
1714 if (n != 0) {
1715 unsigned shift_bytes = n * 3 * sizeof(data[0]);
1716 memmove(name_base - shift_bytes, name_base,
1717 name_curpos - name_base);
1718 name_curpos -= shift_bytes;
1719 }
1720
1721 size = name_curpos - (char *)data;
1722 fill_note(note, "CORE", NT_FILE, size, data);
1723 return 0;
1724 }
1725
1726 #ifdef CORE_DUMP_USE_REGSET
1727 #include <linux/regset.h>
1728
1729 struct elf_thread_core_info {
1730 struct elf_thread_core_info *next;
1731 struct task_struct *task;
1732 struct elf_prstatus prstatus;
1733 struct memelfnote notes[];
1734 };
1735
1736 struct elf_note_info {
1737 struct elf_thread_core_info *thread;
1738 struct memelfnote psinfo;
1739 struct memelfnote signote;
1740 struct memelfnote auxv;
1741 struct memelfnote files;
1742 user_siginfo_t csigdata;
1743 size_t size;
1744 int thread_notes;
1745 };
1746
1747
1748
1749
1750
1751
1752 static void do_thread_regset_writeback(struct task_struct *task,
1753 const struct user_regset *regset)
1754 {
1755 if (regset->writeback)
1756 regset->writeback(task, regset, 1);
1757 }
1758
1759 #ifndef PRSTATUS_SIZE
1760 #define PRSTATUS_SIZE sizeof(struct elf_prstatus)
1761 #endif
1762
1763 #ifndef SET_PR_FPVALID
1764 #define SET_PR_FPVALID(S) ((S)->pr_fpvalid = 1)
1765 #endif
1766
1767 static int fill_thread_core_info(struct elf_thread_core_info *t,
1768 const struct user_regset_view *view,
1769 long signr, struct elf_note_info *info)
1770 {
1771 unsigned int note_iter, view_iter;
1772
1773
1774
1775
1776
1777
1778
1779 fill_prstatus(&t->prstatus.common, t->task, signr);
1780 regset_get(t->task, &view->regsets[0],
1781 sizeof(t->prstatus.pr_reg), &t->prstatus.pr_reg);
1782
1783 fill_note(&t->notes[0], "CORE", NT_PRSTATUS,
1784 PRSTATUS_SIZE, &t->prstatus);
1785 info->size += notesize(&t->notes[0]);
1786
1787 do_thread_regset_writeback(t->task, &view->regsets[0]);
1788
1789
1790
1791
1792
1793 note_iter = 1;
1794 for (view_iter = 1; view_iter < view->n; ++view_iter) {
1795 const struct user_regset *regset = &view->regsets[view_iter];
1796 int note_type = regset->core_note_type;
1797 bool is_fpreg = note_type == NT_PRFPREG;
1798 void *data;
1799 int ret;
1800
1801 do_thread_regset_writeback(t->task, regset);
1802 if (!note_type)
1803 continue;
1804 if (regset->active && regset->active(t->task, regset) <= 0)
1805 continue;
1806
1807 ret = regset_get_alloc(t->task, regset, ~0U, &data);
1808 if (ret < 0)
1809 continue;
1810
1811 if (WARN_ON_ONCE(note_iter >= info->thread_notes))
1812 break;
1813
1814 if (is_fpreg)
1815 SET_PR_FPVALID(&t->prstatus);
1816
1817 fill_note(&t->notes[note_iter], is_fpreg ? "CORE" : "LINUX",
1818 note_type, ret, data);
1819
1820 info->size += notesize(&t->notes[note_iter]);
1821 note_iter++;
1822 }
1823
1824 return 1;
1825 }
1826
1827 static int fill_note_info(struct elfhdr *elf, int phdrs,
1828 struct elf_note_info *info,
1829 struct coredump_params *cprm)
1830 {
1831 struct task_struct *dump_task = current;
1832 const struct user_regset_view *view = task_user_regset_view(dump_task);
1833 struct elf_thread_core_info *t;
1834 struct elf_prpsinfo *psinfo;
1835 struct core_thread *ct;
1836 unsigned int i;
1837
1838 info->size = 0;
1839 info->thread = NULL;
1840
1841 psinfo = kmalloc(sizeof(*psinfo), GFP_KERNEL);
1842 if (psinfo == NULL) {
1843 info->psinfo.data = NULL;
1844 return 0;
1845 }
1846
1847 fill_note(&info->psinfo, "CORE", NT_PRPSINFO, sizeof(*psinfo), psinfo);
1848
1849
1850
1851
1852 info->thread_notes = 0;
1853 for (i = 0; i < view->n; ++i)
1854 if (view->regsets[i].core_note_type != 0)
1855 ++info->thread_notes;
1856
1857
1858
1859
1860
1861 if (unlikely(info->thread_notes == 0) ||
1862 unlikely(view->regsets[0].core_note_type != NT_PRSTATUS)) {
1863 WARN_ON(1);
1864 return 0;
1865 }
1866
1867
1868
1869
1870 fill_elf_header(elf, phdrs,
1871 view->e_machine, view->e_flags);
1872
1873
1874
1875
1876 for (ct = &dump_task->signal->core_state->dumper; ct; ct = ct->next) {
1877 t = kzalloc(offsetof(struct elf_thread_core_info,
1878 notes[info->thread_notes]),
1879 GFP_KERNEL);
1880 if (unlikely(!t))
1881 return 0;
1882
1883 t->task = ct->task;
1884 if (ct->task == dump_task || !info->thread) {
1885 t->next = info->thread;
1886 info->thread = t;
1887 } else {
1888
1889
1890
1891
1892 t->next = info->thread->next;
1893 info->thread->next = t;
1894 }
1895 }
1896
1897
1898
1899
1900 for (t = info->thread; t != NULL; t = t->next)
1901 if (!fill_thread_core_info(t, view, cprm->siginfo->si_signo, info))
1902 return 0;
1903
1904
1905
1906
1907 fill_psinfo(psinfo, dump_task->group_leader, dump_task->mm);
1908 info->size += notesize(&info->psinfo);
1909
1910 fill_siginfo_note(&info->signote, &info->csigdata, cprm->siginfo);
1911 info->size += notesize(&info->signote);
1912
1913 fill_auxv_note(&info->auxv, current->mm);
1914 info->size += notesize(&info->auxv);
1915
1916 if (fill_files_note(&info->files, cprm) == 0)
1917 info->size += notesize(&info->files);
1918
1919 return 1;
1920 }
1921
1922 static size_t get_note_info_size(struct elf_note_info *info)
1923 {
1924 return info->size;
1925 }
1926
1927
1928
1929
1930
1931 static int write_note_info(struct elf_note_info *info,
1932 struct coredump_params *cprm)
1933 {
1934 bool first = true;
1935 struct elf_thread_core_info *t = info->thread;
1936
1937 do {
1938 int i;
1939
1940 if (!writenote(&t->notes[0], cprm))
1941 return 0;
1942
1943 if (first && !writenote(&info->psinfo, cprm))
1944 return 0;
1945 if (first && !writenote(&info->signote, cprm))
1946 return 0;
1947 if (first && !writenote(&info->auxv, cprm))
1948 return 0;
1949 if (first && info->files.data &&
1950 !writenote(&info->files, cprm))
1951 return 0;
1952
1953 for (i = 1; i < info->thread_notes; ++i)
1954 if (t->notes[i].data &&
1955 !writenote(&t->notes[i], cprm))
1956 return 0;
1957
1958 first = false;
1959 t = t->next;
1960 } while (t);
1961
1962 return 1;
1963 }
1964
1965 static void free_note_info(struct elf_note_info *info)
1966 {
1967 struct elf_thread_core_info *threads = info->thread;
1968 while (threads) {
1969 unsigned int i;
1970 struct elf_thread_core_info *t = threads;
1971 threads = t->next;
1972 WARN_ON(t->notes[0].data && t->notes[0].data != &t->prstatus);
1973 for (i = 1; i < info->thread_notes; ++i)
1974 kfree(t->notes[i].data);
1975 kfree(t);
1976 }
1977 kfree(info->psinfo.data);
1978 kvfree(info->files.data);
1979 }
1980
1981 #else
1982
1983
1984 struct elf_thread_status
1985 {
1986 struct list_head list;
1987 struct elf_prstatus prstatus;
1988 elf_fpregset_t fpu;
1989 struct task_struct *thread;
1990 struct memelfnote notes[3];
1991 int num_notes;
1992 };
1993
1994
1995
1996
1997
1998
1999 static int elf_dump_thread_status(long signr, struct elf_thread_status *t)
2000 {
2001 int sz = 0;
2002 struct task_struct *p = t->thread;
2003 t->num_notes = 0;
2004
2005 fill_prstatus(&t->prstatus.common, p, signr);
2006 elf_core_copy_task_regs(p, &t->prstatus.pr_reg);
2007
2008 fill_note(&t->notes[0], "CORE", NT_PRSTATUS, sizeof(t->prstatus),
2009 &(t->prstatus));
2010 t->num_notes++;
2011 sz += notesize(&t->notes[0]);
2012
2013 if ((t->prstatus.pr_fpvalid = elf_core_copy_task_fpregs(p, NULL,
2014 &t->fpu))) {
2015 fill_note(&t->notes[1], "CORE", NT_PRFPREG, sizeof(t->fpu),
2016 &(t->fpu));
2017 t->num_notes++;
2018 sz += notesize(&t->notes[1]);
2019 }
2020 return sz;
2021 }
2022
2023 struct elf_note_info {
2024 struct memelfnote *notes;
2025 struct memelfnote *notes_files;
2026 struct elf_prstatus *prstatus;
2027 struct elf_prpsinfo *psinfo;
2028 struct list_head thread_list;
2029 elf_fpregset_t *fpu;
2030 user_siginfo_t csigdata;
2031 int thread_status_size;
2032 int numnote;
2033 };
2034
2035 static int elf_note_info_init(struct elf_note_info *info)
2036 {
2037 memset(info, 0, sizeof(*info));
2038 INIT_LIST_HEAD(&info->thread_list);
2039
2040
2041 info->notes = kmalloc_array(8, sizeof(struct memelfnote), GFP_KERNEL);
2042 if (!info->notes)
2043 return 0;
2044 info->psinfo = kmalloc(sizeof(*info->psinfo), GFP_KERNEL);
2045 if (!info->psinfo)
2046 return 0;
2047 info->prstatus = kmalloc(sizeof(*info->prstatus), GFP_KERNEL);
2048 if (!info->prstatus)
2049 return 0;
2050 info->fpu = kmalloc(sizeof(*info->fpu), GFP_KERNEL);
2051 if (!info->fpu)
2052 return 0;
2053 return 1;
2054 }
2055
2056 static int fill_note_info(struct elfhdr *elf, int phdrs,
2057 struct elf_note_info *info,
2058 struct coredump_params *cprm)
2059 {
2060 struct core_thread *ct;
2061 struct elf_thread_status *ets;
2062
2063 if (!elf_note_info_init(info))
2064 return 0;
2065
2066 for (ct = current->signal->core_state->dumper.next;
2067 ct; ct = ct->next) {
2068 ets = kzalloc(sizeof(*ets), GFP_KERNEL);
2069 if (!ets)
2070 return 0;
2071
2072 ets->thread = ct->task;
2073 list_add(&ets->list, &info->thread_list);
2074 }
2075
2076 list_for_each_entry(ets, &info->thread_list, list) {
2077 int sz;
2078
2079 sz = elf_dump_thread_status(cprm->siginfo->si_signo, ets);
2080 info->thread_status_size += sz;
2081 }
2082
2083 memset(info->prstatus, 0, sizeof(*info->prstatus));
2084 fill_prstatus(&info->prstatus->common, current, cprm->siginfo->si_signo);
2085 elf_core_copy_regs(&info->prstatus->pr_reg, cprm->regs);
2086
2087
2088 fill_elf_header(elf, phdrs, ELF_ARCH, ELF_CORE_EFLAGS);
2089
2090
2091
2092
2093
2094
2095 fill_note(info->notes + 0, "CORE", NT_PRSTATUS,
2096 sizeof(*info->prstatus), info->prstatus);
2097 fill_psinfo(info->psinfo, current->group_leader, current->mm);
2098 fill_note(info->notes + 1, "CORE", NT_PRPSINFO,
2099 sizeof(*info->psinfo), info->psinfo);
2100
2101 fill_siginfo_note(info->notes + 2, &info->csigdata, cprm->siginfo);
2102 fill_auxv_note(info->notes + 3, current->mm);
2103 info->numnote = 4;
2104
2105 if (fill_files_note(info->notes + info->numnote, cprm) == 0) {
2106 info->notes_files = info->notes + info->numnote;
2107 info->numnote++;
2108 }
2109
2110
2111 info->prstatus->pr_fpvalid =
2112 elf_core_copy_task_fpregs(current, cprm->regs, info->fpu);
2113 if (info->prstatus->pr_fpvalid)
2114 fill_note(info->notes + info->numnote++,
2115 "CORE", NT_PRFPREG, sizeof(*info->fpu), info->fpu);
2116 return 1;
2117 }
2118
2119 static size_t get_note_info_size(struct elf_note_info *info)
2120 {
2121 int sz = 0;
2122 int i;
2123
2124 for (i = 0; i < info->numnote; i++)
2125 sz += notesize(info->notes + i);
2126
2127 sz += info->thread_status_size;
2128
2129 return sz;
2130 }
2131
2132 static int write_note_info(struct elf_note_info *info,
2133 struct coredump_params *cprm)
2134 {
2135 struct elf_thread_status *ets;
2136 int i;
2137
2138 for (i = 0; i < info->numnote; i++)
2139 if (!writenote(info->notes + i, cprm))
2140 return 0;
2141
2142
2143 list_for_each_entry(ets, &info->thread_list, list) {
2144 for (i = 0; i < ets->num_notes; i++)
2145 if (!writenote(&ets->notes[i], cprm))
2146 return 0;
2147 }
2148
2149 return 1;
2150 }
2151
2152 static void free_note_info(struct elf_note_info *info)
2153 {
2154 while (!list_empty(&info->thread_list)) {
2155 struct list_head *tmp = info->thread_list.next;
2156 list_del(tmp);
2157 kfree(list_entry(tmp, struct elf_thread_status, list));
2158 }
2159
2160
2161 if (info->notes_files)
2162 kvfree(info->notes_files->data);
2163
2164 kfree(info->prstatus);
2165 kfree(info->psinfo);
2166 kfree(info->notes);
2167 kfree(info->fpu);
2168 }
2169
2170 #endif
2171
2172 static void fill_extnum_info(struct elfhdr *elf, struct elf_shdr *shdr4extnum,
2173 elf_addr_t e_shoff, int segs)
2174 {
2175 elf->e_shoff = e_shoff;
2176 elf->e_shentsize = sizeof(*shdr4extnum);
2177 elf->e_shnum = 1;
2178 elf->e_shstrndx = SHN_UNDEF;
2179
2180 memset(shdr4extnum, 0, sizeof(*shdr4extnum));
2181
2182 shdr4extnum->sh_type = SHT_NULL;
2183 shdr4extnum->sh_size = elf->e_shnum;
2184 shdr4extnum->sh_link = elf->e_shstrndx;
2185 shdr4extnum->sh_info = segs;
2186 }
2187
2188
2189
2190
2191
2192
2193
2194
2195 static int elf_core_dump(struct coredump_params *cprm)
2196 {
2197 int has_dumped = 0;
2198 int segs, i;
2199 struct elfhdr elf;
2200 loff_t offset = 0, dataoff;
2201 struct elf_note_info info = { };
2202 struct elf_phdr *phdr4note = NULL;
2203 struct elf_shdr *shdr4extnum = NULL;
2204 Elf_Half e_phnum;
2205 elf_addr_t e_shoff;
2206
2207
2208
2209
2210
2211 segs = cprm->vma_count + elf_core_extra_phdrs();
2212
2213
2214 segs++;
2215
2216
2217
2218
2219 e_phnum = segs > PN_XNUM ? PN_XNUM : segs;
2220
2221
2222
2223
2224
2225 if (!fill_note_info(&elf, e_phnum, &info, cprm))
2226 goto end_coredump;
2227
2228 has_dumped = 1;
2229
2230 offset += sizeof(elf);
2231 offset += segs * sizeof(struct elf_phdr);
2232
2233
2234 {
2235 size_t sz = get_note_info_size(&info);
2236
2237
2238 sz += elf_coredump_extra_notes_size();
2239
2240 phdr4note = kmalloc(sizeof(*phdr4note), GFP_KERNEL);
2241 if (!phdr4note)
2242 goto end_coredump;
2243
2244 fill_elf_note_phdr(phdr4note, sz, offset);
2245 offset += sz;
2246 }
2247
2248 dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
2249
2250 offset += cprm->vma_data_size;
2251 offset += elf_core_extra_data_size();
2252 e_shoff = offset;
2253
2254 if (e_phnum == PN_XNUM) {
2255 shdr4extnum = kmalloc(sizeof(*shdr4extnum), GFP_KERNEL);
2256 if (!shdr4extnum)
2257 goto end_coredump;
2258 fill_extnum_info(&elf, shdr4extnum, e_shoff, segs);
2259 }
2260
2261 offset = dataoff;
2262
2263 if (!dump_emit(cprm, &elf, sizeof(elf)))
2264 goto end_coredump;
2265
2266 if (!dump_emit(cprm, phdr4note, sizeof(*phdr4note)))
2267 goto end_coredump;
2268
2269
2270 for (i = 0; i < cprm->vma_count; i++) {
2271 struct core_vma_metadata *meta = cprm->vma_meta + i;
2272 struct elf_phdr phdr;
2273
2274 phdr.p_type = PT_LOAD;
2275 phdr.p_offset = offset;
2276 phdr.p_vaddr = meta->start;
2277 phdr.p_paddr = 0;
2278 phdr.p_filesz = meta->dump_size;
2279 phdr.p_memsz = meta->end - meta->start;
2280 offset += phdr.p_filesz;
2281 phdr.p_flags = 0;
2282 if (meta->flags & VM_READ)
2283 phdr.p_flags |= PF_R;
2284 if (meta->flags & VM_WRITE)
2285 phdr.p_flags |= PF_W;
2286 if (meta->flags & VM_EXEC)
2287 phdr.p_flags |= PF_X;
2288 phdr.p_align = ELF_EXEC_PAGESIZE;
2289
2290 if (!dump_emit(cprm, &phdr, sizeof(phdr)))
2291 goto end_coredump;
2292 }
2293
2294 if (!elf_core_write_extra_phdrs(cprm, offset))
2295 goto end_coredump;
2296
2297
2298 if (!write_note_info(&info, cprm))
2299 goto end_coredump;
2300
2301
2302 if (elf_coredump_extra_notes_write(cprm))
2303 goto end_coredump;
2304
2305
2306 dump_skip_to(cprm, dataoff);
2307
2308 for (i = 0; i < cprm->vma_count; i++) {
2309 struct core_vma_metadata *meta = cprm->vma_meta + i;
2310
2311 if (!dump_user_range(cprm, meta->start, meta->dump_size))
2312 goto end_coredump;
2313 }
2314
2315 if (!elf_core_write_extra_data(cprm))
2316 goto end_coredump;
2317
2318 if (e_phnum == PN_XNUM) {
2319 if (!dump_emit(cprm, shdr4extnum, sizeof(*shdr4extnum)))
2320 goto end_coredump;
2321 }
2322
2323 end_coredump:
2324 free_note_info(&info);
2325 kfree(shdr4extnum);
2326 kfree(phdr4note);
2327 return has_dumped;
2328 }
2329
2330 #endif
2331
2332 static int __init init_elf_binfmt(void)
2333 {
2334 register_binfmt(&elf_format);
2335 return 0;
2336 }
2337
2338 static void __exit exit_elf_binfmt(void)
2339 {
2340
2341 unregister_binfmt(&elf_format);
2342 }
2343
2344 core_initcall(init_elf_binfmt);
2345 module_exit(exit_elf_binfmt);
2346 MODULE_LICENSE("GPL");
2347
2348 #ifdef CONFIG_BINFMT_ELF_KUNIT_TEST
2349 #include "binfmt_elf_test.c"
2350 #endif