Back to home page

OSCL-LXR

 
 

    


0001 /*
0002  * Copyright (C) 2005 Paolo 'Blaisorblade' Giarrusso <blaisorblade@yahoo.it>
0003  * Licensed under the GPL
0004  */
0005 
0006 #include <linux/percpu.h>
0007 #include <linux/sched.h>
0008 #include <linux/syscalls.h>
0009 #include <linux/uaccess.h>
0010 #include <asm/ptrace-abi.h>
0011 #include <os.h>
0012 #include <skas.h>
0013 #include <sysdep/tls.h>
0014 
0015 /*
0016  * If needed we can detect when it's uninitialized.
0017  *
0018  * These are initialized in an initcall and unchanged thereafter.
0019  */
0020 static int host_supports_tls = -1;
0021 int host_gdt_entry_tls_min;
0022 
0023 int do_set_thread_area(struct user_desc *info)
0024 {
0025     int ret;
0026     u32 cpu;
0027 
0028     cpu = get_cpu();
0029     ret = os_set_thread_area(info, userspace_pid[cpu]);
0030     put_cpu();
0031 
0032     if (ret)
0033         printk(KERN_ERR "PTRACE_SET_THREAD_AREA failed, err = %d, "
0034                "index = %d\n", ret, info->entry_number);
0035 
0036     return ret;
0037 }
0038 
0039 int do_get_thread_area(struct user_desc *info)
0040 {
0041     int ret;
0042     u32 cpu;
0043 
0044     cpu = get_cpu();
0045     ret = os_get_thread_area(info, userspace_pid[cpu]);
0046     put_cpu();
0047 
0048     if (ret)
0049         printk(KERN_ERR "PTRACE_GET_THREAD_AREA failed, err = %d, "
0050                "index = %d\n", ret, info->entry_number);
0051 
0052     return ret;
0053 }
0054 
0055 /*
0056  * sys_get_thread_area: get a yet unused TLS descriptor index.
0057  * XXX: Consider leaving one free slot for glibc usage at first place. This must
0058  * be done here (and by changing GDT_ENTRY_TLS_* macros) and nowhere else.
0059  *
0060  * Also, this must be tested when compiling in SKAS mode with dynamic linking
0061  * and running against NPTL.
0062  */
0063 static int get_free_idx(struct task_struct* task)
0064 {
0065     struct thread_struct *t = &task->thread;
0066     int idx;
0067 
0068     for (idx = 0; idx < GDT_ENTRY_TLS_ENTRIES; idx++)
0069         if (!t->arch.tls_array[idx].present)
0070             return idx + GDT_ENTRY_TLS_MIN;
0071     return -ESRCH;
0072 }
0073 
0074 static inline void clear_user_desc(struct user_desc* info)
0075 {
0076     /* Postcondition: LDT_empty(info) returns true. */
0077     memset(info, 0, sizeof(*info));
0078 
0079     /*
0080      * Check the LDT_empty or the i386 sys_get_thread_area code - we obtain
0081      * indeed an empty user_desc.
0082      */
0083     info->read_exec_only = 1;
0084     info->seg_not_present = 1;
0085 }
0086 
0087 #define O_FORCE 1
0088 
0089 static int load_TLS(int flags, struct task_struct *to)
0090 {
0091     int ret = 0;
0092     int idx;
0093 
0094     for (idx = GDT_ENTRY_TLS_MIN; idx < GDT_ENTRY_TLS_MAX; idx++) {
0095         struct uml_tls_struct* curr =
0096             &to->thread.arch.tls_array[idx - GDT_ENTRY_TLS_MIN];
0097 
0098         /*
0099          * Actually, now if it wasn't flushed it gets cleared and
0100          * flushed to the host, which will clear it.
0101          */
0102         if (!curr->present) {
0103             if (!curr->flushed) {
0104                 clear_user_desc(&curr->tls);
0105                 curr->tls.entry_number = idx;
0106             } else {
0107                 WARN_ON(!LDT_empty(&curr->tls));
0108                 continue;
0109             }
0110         }
0111 
0112         if (!(flags & O_FORCE) && curr->flushed)
0113             continue;
0114 
0115         ret = do_set_thread_area(&curr->tls);
0116         if (ret)
0117             goto out;
0118 
0119         curr->flushed = 1;
0120     }
0121 out:
0122     return ret;
0123 }
0124 
0125 /*
0126  * Verify if we need to do a flush for the new process, i.e. if there are any
0127  * present desc's, only if they haven't been flushed.
0128  */
0129 static inline int needs_TLS_update(struct task_struct *task)
0130 {
0131     int i;
0132     int ret = 0;
0133 
0134     for (i = GDT_ENTRY_TLS_MIN; i < GDT_ENTRY_TLS_MAX; i++) {
0135         struct uml_tls_struct* curr =
0136             &task->thread.arch.tls_array[i - GDT_ENTRY_TLS_MIN];
0137 
0138         /*
0139          * Can't test curr->present, we may need to clear a descriptor
0140          * which had a value.
0141          */
0142         if (curr->flushed)
0143             continue;
0144         ret = 1;
0145         break;
0146     }
0147     return ret;
0148 }
0149 
0150 /*
0151  * On a newly forked process, the TLS descriptors haven't yet been flushed. So
0152  * we mark them as such and the first switch_to will do the job.
0153  */
0154 void clear_flushed_tls(struct task_struct *task)
0155 {
0156     int i;
0157 
0158     for (i = GDT_ENTRY_TLS_MIN; i < GDT_ENTRY_TLS_MAX; i++) {
0159         struct uml_tls_struct* curr =
0160             &task->thread.arch.tls_array[i - GDT_ENTRY_TLS_MIN];
0161 
0162         /*
0163          * Still correct to do this, if it wasn't present on the host it
0164          * will remain as flushed as it was.
0165          */
0166         if (!curr->present)
0167             continue;
0168 
0169         curr->flushed = 0;
0170     }
0171 }
0172 
0173 /*
0174  * In SKAS0 mode, currently, multiple guest threads sharing the same ->mm have a
0175  * common host process. So this is needed in SKAS0 too.
0176  *
0177  * However, if each thread had a different host process (and this was discussed
0178  * for SMP support) this won't be needed.
0179  *
0180  * And this will not need be used when (and if) we'll add support to the host
0181  * SKAS patch.
0182  */
0183 
0184 int arch_switch_tls(struct task_struct *to)
0185 {
0186     if (!host_supports_tls)
0187         return 0;
0188 
0189     /*
0190      * We have no need whatsoever to switch TLS for kernel threads; beyond
0191      * that, that would also result in us calling os_set_thread_area with
0192      * userspace_pid[cpu] == 0, which gives an error.
0193      */
0194     if (likely(to->mm))
0195         return load_TLS(O_FORCE, to);
0196 
0197     return 0;
0198 }
0199 
0200 static int set_tls_entry(struct task_struct* task, struct user_desc *info,
0201              int idx, int flushed)
0202 {
0203     struct thread_struct *t = &task->thread;
0204 
0205     if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
0206         return -EINVAL;
0207 
0208     t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].tls = *info;
0209     t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].present = 1;
0210     t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].flushed = flushed;
0211 
0212     return 0;
0213 }
0214 
0215 int arch_set_tls(struct task_struct *new, unsigned long tls)
0216 {
0217     struct user_desc info;
0218     int idx, ret = -EFAULT;
0219 
0220     if (copy_from_user(&info, (void __user *) tls, sizeof(info)))
0221         goto out;
0222 
0223     ret = -EINVAL;
0224     if (LDT_empty(&info))
0225         goto out;
0226 
0227     idx = info.entry_number;
0228 
0229     ret = set_tls_entry(new, &info, idx, 0);
0230 out:
0231     return ret;
0232 }
0233 
0234 /* XXX: use do_get_thread_area to read the host value? I'm not at all sure! */
0235 static int get_tls_entry(struct task_struct *task, struct user_desc *info,
0236              int idx)
0237 {
0238     struct thread_struct *t = &task->thread;
0239 
0240     if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
0241         return -EINVAL;
0242 
0243     if (!t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].present)
0244         goto clear;
0245 
0246     *info = t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].tls;
0247 
0248 out:
0249     /*
0250      * Temporary debugging check, to make sure that things have been
0251      * flushed. This could be triggered if load_TLS() failed.
0252      */
0253     if (unlikely(task == current &&
0254              !t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].flushed)) {
0255         printk(KERN_ERR "get_tls_entry: task with pid %d got here "
0256                 "without flushed TLS.", current->pid);
0257     }
0258 
0259     return 0;
0260 clear:
0261     /*
0262      * When the TLS entry has not been set, the values read to user in the
0263      * tls_array are 0 (because it's cleared at boot, see
0264      * arch/i386/kernel/head.S:cpu_gdt_table). Emulate that.
0265      */
0266     clear_user_desc(info);
0267     info->entry_number = idx;
0268     goto out;
0269 }
0270 
0271 SYSCALL_DEFINE1(set_thread_area, struct user_desc __user *, user_desc)
0272 {
0273     struct user_desc info;
0274     int idx, ret;
0275 
0276     if (!host_supports_tls)
0277         return -ENOSYS;
0278 
0279     if (copy_from_user(&info, user_desc, sizeof(info)))
0280         return -EFAULT;
0281 
0282     idx = info.entry_number;
0283 
0284     if (idx == -1) {
0285         idx = get_free_idx(current);
0286         if (idx < 0)
0287             return idx;
0288         info.entry_number = idx;
0289         /* Tell the user which slot we chose for him.*/
0290         if (put_user(idx, &user_desc->entry_number))
0291             return -EFAULT;
0292     }
0293 
0294     ret = do_set_thread_area(&info);
0295     if (ret)
0296         return ret;
0297     return set_tls_entry(current, &info, idx, 1);
0298 }
0299 
0300 /*
0301  * Perform set_thread_area on behalf of the traced child.
0302  * Note: error handling is not done on the deferred load, and this differ from
0303  * i386. However the only possible error are caused by bugs.
0304  */
0305 int ptrace_set_thread_area(struct task_struct *child, int idx,
0306                struct user_desc __user *user_desc)
0307 {
0308     struct user_desc info;
0309 
0310     if (!host_supports_tls)
0311         return -EIO;
0312 
0313     if (copy_from_user(&info, user_desc, sizeof(info)))
0314         return -EFAULT;
0315 
0316     return set_tls_entry(child, &info, idx, 0);
0317 }
0318 
0319 SYSCALL_DEFINE1(get_thread_area, struct user_desc __user *, user_desc)
0320 {
0321     struct user_desc info;
0322     int idx, ret;
0323 
0324     if (!host_supports_tls)
0325         return -ENOSYS;
0326 
0327     if (get_user(idx, &user_desc->entry_number))
0328         return -EFAULT;
0329 
0330     ret = get_tls_entry(current, &info, idx);
0331     if (ret < 0)
0332         goto out;
0333 
0334     if (copy_to_user(user_desc, &info, sizeof(info)))
0335         ret = -EFAULT;
0336 
0337 out:
0338     return ret;
0339 }
0340 
0341 /*
0342  * Perform get_thread_area on behalf of the traced child.
0343  */
0344 int ptrace_get_thread_area(struct task_struct *child, int idx,
0345         struct user_desc __user *user_desc)
0346 {
0347     struct user_desc info;
0348     int ret;
0349 
0350     if (!host_supports_tls)
0351         return -EIO;
0352 
0353     ret = get_tls_entry(child, &info, idx);
0354     if (ret < 0)
0355         goto out;
0356 
0357     if (copy_to_user(user_desc, &info, sizeof(info)))
0358         ret = -EFAULT;
0359 out:
0360     return ret;
0361 }
0362 
0363 /*
0364  * This code is really i386-only, but it detects and logs x86_64 GDT indexes
0365  * if a 32-bit UML is running on a 64-bit host.
0366  */
0367 static int __init __setup_host_supports_tls(void)
0368 {
0369     check_host_supports_tls(&host_supports_tls, &host_gdt_entry_tls_min);
0370     if (host_supports_tls) {
0371         printk(KERN_INFO "Host TLS support detected\n");
0372         printk(KERN_INFO "Detected host type: ");
0373         switch (host_gdt_entry_tls_min) {
0374         case GDT_ENTRY_TLS_MIN_I386:
0375             printk(KERN_CONT "i386");
0376             break;
0377         case GDT_ENTRY_TLS_MIN_X86_64:
0378             printk(KERN_CONT "x86_64");
0379             break;
0380         }
0381         printk(KERN_CONT " (GDT indexes %d to %d)\n",
0382                host_gdt_entry_tls_min,
0383                host_gdt_entry_tls_min + GDT_ENTRY_TLS_ENTRIES);
0384     } else
0385         printk(KERN_ERR "  Host TLS support NOT detected! "
0386                 "TLS support inside UML will not work\n");
0387     return 0;
0388 }
0389 
0390 __initcall(__setup_host_supports_tls);