Back to home page

LXR

 
 

    


0001 /*
0002  * Copyright (C) 2015 Thomas Meyer (thomas@m3y3r.de)
0003  * Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
0004  * Licensed under the GPL
0005  */
0006 
0007 #include <linux/mm.h>
0008 #include <linux/sched.h>
0009 #include <linux/slab.h>
0010 #include <asm/pgalloc.h>
0011 #include <asm/pgtable.h>
0012 #include <asm/sections.h>
0013 #include <as-layout.h>
0014 #include <os.h>
0015 #include <skas.h>
0016 
0017 static int init_stub_pte(struct mm_struct *mm, unsigned long proc,
0018              unsigned long kernel)
0019 {
0020     pgd_t *pgd;
0021     pud_t *pud;
0022     pmd_t *pmd;
0023     pte_t *pte;
0024 
0025     pgd = pgd_offset(mm, proc);
0026     pud = pud_alloc(mm, pgd, proc);
0027     if (!pud)
0028         goto out;
0029 
0030     pmd = pmd_alloc(mm, pud, proc);
0031     if (!pmd)
0032         goto out_pmd;
0033 
0034     pte = pte_alloc_map(mm, pmd, proc);
0035     if (!pte)
0036         goto out_pte;
0037 
0038     *pte = mk_pte(virt_to_page(kernel), __pgprot(_PAGE_PRESENT));
0039     *pte = pte_mkread(*pte);
0040     return 0;
0041 
0042  out_pte:
0043     pmd_free(mm, pmd);
0044  out_pmd:
0045     pud_free(mm, pud);
0046  out:
0047     return -ENOMEM;
0048 }
0049 
0050 int init_new_context(struct task_struct *task, struct mm_struct *mm)
0051 {
0052     struct mm_context *from_mm = NULL;
0053     struct mm_context *to_mm = &mm->context;
0054     unsigned long stack = 0;
0055     int ret = -ENOMEM;
0056 
0057     stack = get_zeroed_page(GFP_KERNEL);
0058     if (stack == 0)
0059         goto out;
0060 
0061     to_mm->id.stack = stack;
0062     if (current->mm != NULL && current->mm != &init_mm)
0063         from_mm = &current->mm->context;
0064 
0065     block_signals();
0066     if (from_mm)
0067         to_mm->id.u.pid = copy_context_skas0(stack,
0068                              from_mm->id.u.pid);
0069     else to_mm->id.u.pid = start_userspace(stack);
0070     unblock_signals();
0071 
0072     if (to_mm->id.u.pid < 0) {
0073         ret = to_mm->id.u.pid;
0074         goto out_free;
0075     }
0076 
0077     ret = init_new_ldt(to_mm, from_mm);
0078     if (ret < 0) {
0079         printk(KERN_ERR "init_new_context_skas - init_ldt"
0080                " failed, errno = %d\n", ret);
0081         goto out_free;
0082     }
0083 
0084     return 0;
0085 
0086  out_free:
0087     if (to_mm->id.stack != 0)
0088         free_page(to_mm->id.stack);
0089  out:
0090     return ret;
0091 }
0092 
0093 void uml_setup_stubs(struct mm_struct *mm)
0094 {
0095     int err, ret;
0096 
0097     ret = init_stub_pte(mm, STUB_CODE,
0098                 (unsigned long) __syscall_stub_start);
0099     if (ret)
0100         goto out;
0101 
0102     ret = init_stub_pte(mm, STUB_DATA, mm->context.id.stack);
0103     if (ret)
0104         goto out;
0105 
0106     mm->context.stub_pages[0] = virt_to_page(__syscall_stub_start);
0107     mm->context.stub_pages[1] = virt_to_page(mm->context.id.stack);
0108 
0109     /* dup_mmap already holds mmap_sem */
0110     err = install_special_mapping(mm, STUB_START, STUB_END - STUB_START,
0111                       VM_READ | VM_MAYREAD | VM_EXEC |
0112                       VM_MAYEXEC | VM_DONTCOPY | VM_PFNMAP,
0113                       mm->context.stub_pages);
0114     if (err) {
0115         printk(KERN_ERR "install_special_mapping returned %d\n", err);
0116         goto out;
0117     }
0118     return;
0119 
0120 out:
0121     force_sigsegv(SIGSEGV, current);
0122 }
0123 
0124 void arch_exit_mmap(struct mm_struct *mm)
0125 {
0126     pte_t *pte;
0127 
0128     pte = virt_to_pte(mm, STUB_CODE);
0129     if (pte != NULL)
0130         pte_clear(mm, STUB_CODE, pte);
0131 
0132     pte = virt_to_pte(mm, STUB_DATA);
0133     if (pte == NULL)
0134         return;
0135 
0136     pte_clear(mm, STUB_DATA, pte);
0137 }
0138 
0139 void destroy_context(struct mm_struct *mm)
0140 {
0141     struct mm_context *mmu = &mm->context;
0142 
0143     /*
0144      * If init_new_context wasn't called, this will be
0145      * zero, resulting in a kill(0), which will result in the
0146      * whole UML suddenly dying.  Also, cover negative and
0147      * 1 cases, since they shouldn't happen either.
0148      */
0149     if (mmu->id.u.pid < 2) {
0150         printk(KERN_ERR "corrupt mm_context - pid = %d\n",
0151                mmu->id.u.pid);
0152         return;
0153     }
0154     os_kill_ptraced_process(mmu->id.u.pid, 1);
0155 
0156     free_page(mmu->id.stack);
0157     free_ldt(mmu);
0158 }