0001
0002
0003
0004
0005
0006 #include <linux/mm.h>
0007 #include <linux/sched.h>
0008 #include <linux/slab.h>
0009 #include <linux/syscalls.h>
0010 #include <linux/uaccess.h>
0011 #include <asm/unistd.h>
0012 #include <os.h>
0013 #include <skas.h>
0014 #include <sysdep/tls.h>
0015
0016 static inline int modify_ldt (int func, void *ptr, unsigned long bytecount)
0017 {
0018 return syscall(__NR_modify_ldt, func, ptr, bytecount);
0019 }
0020
0021 static long write_ldt_entry(struct mm_id *mm_idp, int func,
0022 struct user_desc *desc, void **addr, int done)
0023 {
0024 long res;
0025 void *stub_addr;
0026
0027 BUILD_BUG_ON(sizeof(*desc) % sizeof(long));
0028
0029 res = syscall_stub_data(mm_idp, (unsigned long *)desc,
0030 sizeof(*desc) / sizeof(long),
0031 addr, &stub_addr);
0032 if (!res) {
0033 unsigned long args[] = { func,
0034 (unsigned long)stub_addr,
0035 sizeof(*desc),
0036 0, 0, 0 };
0037 res = run_syscall_stub(mm_idp, __NR_modify_ldt, args,
0038 0, addr, done);
0039 }
0040
0041 return res;
0042 }
0043
0044
0045
0046
0047
0048
0049
0050
0051
0052
0053
0054
0055
0056 static int read_ldt(void __user * ptr, unsigned long bytecount)
0057 {
0058 int i, err = 0;
0059 unsigned long size;
0060 uml_ldt_t *ldt = ¤t->mm->context.arch.ldt;
0061
0062 if (!ldt->entry_count)
0063 goto out;
0064 if (bytecount > LDT_ENTRY_SIZE*LDT_ENTRIES)
0065 bytecount = LDT_ENTRY_SIZE*LDT_ENTRIES;
0066 err = bytecount;
0067
0068 mutex_lock(&ldt->lock);
0069 if (ldt->entry_count <= LDT_DIRECT_ENTRIES) {
0070 size = LDT_ENTRY_SIZE*LDT_DIRECT_ENTRIES;
0071 if (size > bytecount)
0072 size = bytecount;
0073 if (copy_to_user(ptr, ldt->u.entries, size))
0074 err = -EFAULT;
0075 bytecount -= size;
0076 ptr += size;
0077 }
0078 else {
0079 for (i=0; i<ldt->entry_count/LDT_ENTRIES_PER_PAGE && bytecount;
0080 i++) {
0081 size = PAGE_SIZE;
0082 if (size > bytecount)
0083 size = bytecount;
0084 if (copy_to_user(ptr, ldt->u.pages[i], size)) {
0085 err = -EFAULT;
0086 break;
0087 }
0088 bytecount -= size;
0089 ptr += size;
0090 }
0091 }
0092 mutex_unlock(&ldt->lock);
0093
0094 if (bytecount == 0 || err == -EFAULT)
0095 goto out;
0096
0097 if (clear_user(ptr, bytecount))
0098 err = -EFAULT;
0099
0100 out:
0101 return err;
0102 }
0103
0104 static int read_default_ldt(void __user * ptr, unsigned long bytecount)
0105 {
0106 int err;
0107
0108 if (bytecount > 5*LDT_ENTRY_SIZE)
0109 bytecount = 5*LDT_ENTRY_SIZE;
0110
0111 err = bytecount;
0112
0113
0114
0115
0116
0117 if (clear_user(ptr, bytecount))
0118 err = -EFAULT;
0119
0120 return err;
0121 }
0122
0123 static int write_ldt(void __user * ptr, unsigned long bytecount, int func)
0124 {
0125 uml_ldt_t *ldt = ¤t->mm->context.arch.ldt;
0126 struct mm_id * mm_idp = ¤t->mm->context.id;
0127 int i, err;
0128 struct user_desc ldt_info;
0129 struct ldt_entry entry0, *ldt_p;
0130 void *addr = NULL;
0131
0132 err = -EINVAL;
0133 if (bytecount != sizeof(ldt_info))
0134 goto out;
0135 err = -EFAULT;
0136 if (copy_from_user(&ldt_info, ptr, sizeof(ldt_info)))
0137 goto out;
0138
0139 err = -EINVAL;
0140 if (ldt_info.entry_number >= LDT_ENTRIES)
0141 goto out;
0142 if (ldt_info.contents == 3) {
0143 if (func == 1)
0144 goto out;
0145 if (ldt_info.seg_not_present == 0)
0146 goto out;
0147 }
0148
0149 mutex_lock(&ldt->lock);
0150
0151 err = write_ldt_entry(mm_idp, func, &ldt_info, &addr, 1);
0152 if (err)
0153 goto out_unlock;
0154
0155 if (ldt_info.entry_number >= ldt->entry_count &&
0156 ldt_info.entry_number >= LDT_DIRECT_ENTRIES) {
0157 for (i=ldt->entry_count/LDT_ENTRIES_PER_PAGE;
0158 i*LDT_ENTRIES_PER_PAGE <= ldt_info.entry_number;
0159 i++) {
0160 if (i == 0)
0161 memcpy(&entry0, ldt->u.entries,
0162 sizeof(entry0));
0163 ldt->u.pages[i] = (struct ldt_entry *)
0164 __get_free_page(GFP_KERNEL|__GFP_ZERO);
0165 if (!ldt->u.pages[i]) {
0166 err = -ENOMEM;
0167
0168 memset(&ldt_info, 0, sizeof(ldt_info));
0169 write_ldt_entry(mm_idp, 1, &ldt_info, &addr, 1);
0170 goto out_unlock;
0171 }
0172 if (i == 0) {
0173 memcpy(ldt->u.pages[0], &entry0,
0174 sizeof(entry0));
0175 memcpy(ldt->u.pages[0]+1, ldt->u.entries+1,
0176 sizeof(entry0)*(LDT_DIRECT_ENTRIES-1));
0177 }
0178 ldt->entry_count = (i + 1) * LDT_ENTRIES_PER_PAGE;
0179 }
0180 }
0181 if (ldt->entry_count <= ldt_info.entry_number)
0182 ldt->entry_count = ldt_info.entry_number + 1;
0183
0184 if (ldt->entry_count <= LDT_DIRECT_ENTRIES)
0185 ldt_p = ldt->u.entries + ldt_info.entry_number;
0186 else
0187 ldt_p = ldt->u.pages[ldt_info.entry_number/LDT_ENTRIES_PER_PAGE] +
0188 ldt_info.entry_number%LDT_ENTRIES_PER_PAGE;
0189
0190 if (ldt_info.base_addr == 0 && ldt_info.limit == 0 &&
0191 (func == 1 || LDT_empty(&ldt_info))) {
0192 ldt_p->a = 0;
0193 ldt_p->b = 0;
0194 }
0195 else{
0196 if (func == 1)
0197 ldt_info.useable = 0;
0198 ldt_p->a = LDT_entry_a(&ldt_info);
0199 ldt_p->b = LDT_entry_b(&ldt_info);
0200 }
0201 err = 0;
0202
0203 out_unlock:
0204 mutex_unlock(&ldt->lock);
0205 out:
0206 return err;
0207 }
0208
0209 static long do_modify_ldt_skas(int func, void __user *ptr,
0210 unsigned long bytecount)
0211 {
0212 int ret = -ENOSYS;
0213
0214 switch (func) {
0215 case 0:
0216 ret = read_ldt(ptr, bytecount);
0217 break;
0218 case 1:
0219 case 0x11:
0220 ret = write_ldt(ptr, bytecount, func);
0221 break;
0222 case 2:
0223 ret = read_default_ldt(ptr, bytecount);
0224 break;
0225 }
0226 return ret;
0227 }
0228
0229 static DEFINE_SPINLOCK(host_ldt_lock);
0230 static short dummy_list[9] = {0, -1};
0231 static short * host_ldt_entries = NULL;
0232
0233 static void ldt_get_host_info(void)
0234 {
0235 long ret;
0236 struct ldt_entry * ldt;
0237 short *tmp;
0238 int i, size, k, order;
0239
0240 spin_lock(&host_ldt_lock);
0241
0242 if (host_ldt_entries != NULL) {
0243 spin_unlock(&host_ldt_lock);
0244 return;
0245 }
0246 host_ldt_entries = dummy_list+1;
0247
0248 spin_unlock(&host_ldt_lock);
0249
0250 for (i = LDT_PAGES_MAX-1, order=0; i; i>>=1, order++)
0251 ;
0252
0253 ldt = (struct ldt_entry *)
0254 __get_free_pages(GFP_KERNEL|__GFP_ZERO, order);
0255 if (ldt == NULL) {
0256 printk(KERN_ERR "ldt_get_host_info: couldn't allocate buffer "
0257 "for host ldt\n");
0258 return;
0259 }
0260
0261 ret = modify_ldt(0, ldt, (1<<order)*PAGE_SIZE);
0262 if (ret < 0) {
0263 printk(KERN_ERR "ldt_get_host_info: couldn't read host ldt\n");
0264 goto out_free;
0265 }
0266 if (ret == 0) {
0267
0268 host_ldt_entries = dummy_list;
0269 goto out_free;
0270 }
0271
0272 for (i=0, size=0; i<ret/LDT_ENTRY_SIZE; i++) {
0273 if (ldt[i].a != 0 || ldt[i].b != 0)
0274 size++;
0275 }
0276
0277 if (size < ARRAY_SIZE(dummy_list))
0278 host_ldt_entries = dummy_list;
0279 else {
0280 size = (size + 1) * sizeof(dummy_list[0]);
0281 tmp = kmalloc(size, GFP_KERNEL);
0282 if (tmp == NULL) {
0283 printk(KERN_ERR "ldt_get_host_info: couldn't allocate "
0284 "host ldt list\n");
0285 goto out_free;
0286 }
0287 host_ldt_entries = tmp;
0288 }
0289
0290 for (i=0, k=0; i<ret/LDT_ENTRY_SIZE; i++) {
0291 if (ldt[i].a != 0 || ldt[i].b != 0)
0292 host_ldt_entries[k++] = i;
0293 }
0294 host_ldt_entries[k] = -1;
0295
0296 out_free:
0297 free_pages((unsigned long)ldt, order);
0298 }
0299
0300 long init_new_ldt(struct mm_context *new_mm, struct mm_context *from_mm)
0301 {
0302 struct user_desc desc;
0303 short * num_p;
0304 int i;
0305 long page, err=0;
0306 void *addr = NULL;
0307
0308
0309 mutex_init(&new_mm->arch.ldt.lock);
0310
0311 if (!from_mm) {
0312 memset(&desc, 0, sizeof(desc));
0313
0314
0315
0316
0317
0318 ldt_get_host_info();
0319 for (num_p=host_ldt_entries; *num_p != -1; num_p++) {
0320 desc.entry_number = *num_p;
0321 err = write_ldt_entry(&new_mm->id, 1, &desc,
0322 &addr, *(num_p + 1) == -1);
0323 if (err)
0324 break;
0325 }
0326 new_mm->arch.ldt.entry_count = 0;
0327
0328 goto out;
0329 }
0330
0331
0332
0333
0334
0335
0336
0337 mutex_lock(&from_mm->arch.ldt.lock);
0338 if (from_mm->arch.ldt.entry_count <= LDT_DIRECT_ENTRIES)
0339 memcpy(new_mm->arch.ldt.u.entries, from_mm->arch.ldt.u.entries,
0340 sizeof(new_mm->arch.ldt.u.entries));
0341 else {
0342 i = from_mm->arch.ldt.entry_count / LDT_ENTRIES_PER_PAGE;
0343 while (i-->0) {
0344 page = __get_free_page(GFP_KERNEL|__GFP_ZERO);
0345 if (!page) {
0346 err = -ENOMEM;
0347 break;
0348 }
0349 new_mm->arch.ldt.u.pages[i] =
0350 (struct ldt_entry *) page;
0351 memcpy(new_mm->arch.ldt.u.pages[i],
0352 from_mm->arch.ldt.u.pages[i], PAGE_SIZE);
0353 }
0354 }
0355 new_mm->arch.ldt.entry_count = from_mm->arch.ldt.entry_count;
0356 mutex_unlock(&from_mm->arch.ldt.lock);
0357
0358 out:
0359 return err;
0360 }
0361
0362
0363 void free_ldt(struct mm_context *mm)
0364 {
0365 int i;
0366
0367 if (mm->arch.ldt.entry_count > LDT_DIRECT_ENTRIES) {
0368 i = mm->arch.ldt.entry_count / LDT_ENTRIES_PER_PAGE;
0369 while (i-- > 0)
0370 free_page((long) mm->arch.ldt.u.pages[i]);
0371 }
0372 mm->arch.ldt.entry_count = 0;
0373 }
0374
0375 SYSCALL_DEFINE3(modify_ldt, int , func , void __user * , ptr ,
0376 unsigned long , bytecount)
0377 {
0378
0379 return (unsigned int)do_modify_ldt_skas(func, ptr, bytecount);
0380 }