0001
0002
0003
0004
0005
0006 #include <linux/err.h>
0007 #include <linux/highmem.h>
0008 #include <linux/mm.h>
0009 #include <linux/module.h>
0010 #include <linux/sched.h>
0011 #include <asm/current.h>
0012 #include <asm/page.h>
0013 #include <kern_util.h>
0014 #include <asm/futex.h>
0015 #include <os.h>
0016
0017 pte_t *virt_to_pte(struct mm_struct *mm, unsigned long addr)
0018 {
0019 pgd_t *pgd;
0020 p4d_t *p4d;
0021 pud_t *pud;
0022 pmd_t *pmd;
0023
0024 if (mm == NULL)
0025 return NULL;
0026
0027 pgd = pgd_offset(mm, addr);
0028 if (!pgd_present(*pgd))
0029 return NULL;
0030
0031 p4d = p4d_offset(pgd, addr);
0032 if (!p4d_present(*p4d))
0033 return NULL;
0034
0035 pud = pud_offset(p4d, addr);
0036 if (!pud_present(*pud))
0037 return NULL;
0038
0039 pmd = pmd_offset(pud, addr);
0040 if (!pmd_present(*pmd))
0041 return NULL;
0042
0043 return pte_offset_kernel(pmd, addr);
0044 }
0045
0046 static pte_t *maybe_map(unsigned long virt, int is_write)
0047 {
0048 pte_t *pte = virt_to_pte(current->mm, virt);
0049 int err, dummy_code;
0050
0051 if ((pte == NULL) || !pte_present(*pte) ||
0052 (is_write && !pte_write(*pte))) {
0053 err = handle_page_fault(virt, 0, is_write, 1, &dummy_code);
0054 if (err)
0055 return NULL;
0056 pte = virt_to_pte(current->mm, virt);
0057 }
0058 if (!pte_present(*pte))
0059 pte = NULL;
0060
0061 return pte;
0062 }
0063
0064 static int do_op_one_page(unsigned long addr, int len, int is_write,
0065 int (*op)(unsigned long addr, int len, void *arg), void *arg)
0066 {
0067 struct page *page;
0068 pte_t *pte;
0069 int n;
0070
0071 pte = maybe_map(addr, is_write);
0072 if (pte == NULL)
0073 return -1;
0074
0075 page = pte_page(*pte);
0076 #ifdef CONFIG_64BIT
0077 pagefault_disable();
0078 addr = (unsigned long) page_address(page) +
0079 (addr & ~PAGE_MASK);
0080 #else
0081 addr = (unsigned long) kmap_atomic(page) +
0082 (addr & ~PAGE_MASK);
0083 #endif
0084 n = (*op)(addr, len, arg);
0085
0086 #ifdef CONFIG_64BIT
0087 pagefault_enable();
0088 #else
0089 kunmap_atomic((void *)addr);
0090 #endif
0091
0092 return n;
0093 }
0094
0095 static long buffer_op(unsigned long addr, int len, int is_write,
0096 int (*op)(unsigned long, int, void *), void *arg)
0097 {
0098 long size, remain, n;
0099
0100 size = min(PAGE_ALIGN(addr) - addr, (unsigned long) len);
0101 remain = len;
0102
0103 n = do_op_one_page(addr, size, is_write, op, arg);
0104 if (n != 0) {
0105 remain = (n < 0 ? remain : 0);
0106 goto out;
0107 }
0108
0109 addr += size;
0110 remain -= size;
0111 if (remain == 0)
0112 goto out;
0113
0114 while (addr < ((addr + remain) & PAGE_MASK)) {
0115 n = do_op_one_page(addr, PAGE_SIZE, is_write, op, arg);
0116 if (n != 0) {
0117 remain = (n < 0 ? remain : 0);
0118 goto out;
0119 }
0120
0121 addr += PAGE_SIZE;
0122 remain -= PAGE_SIZE;
0123 }
0124 if (remain == 0)
0125 goto out;
0126
0127 n = do_op_one_page(addr, remain, is_write, op, arg);
0128 if (n != 0) {
0129 remain = (n < 0 ? remain : 0);
0130 goto out;
0131 }
0132
0133 return 0;
0134 out:
0135 return remain;
0136 }
0137
0138 static int copy_chunk_from_user(unsigned long from, int len, void *arg)
0139 {
0140 unsigned long *to_ptr = arg, to = *to_ptr;
0141
0142 memcpy((void *) to, (void *) from, len);
0143 *to_ptr += len;
0144 return 0;
0145 }
0146
0147 unsigned long raw_copy_from_user(void *to, const void __user *from, unsigned long n)
0148 {
0149 return buffer_op((unsigned long) from, n, 0, copy_chunk_from_user, &to);
0150 }
0151 EXPORT_SYMBOL(raw_copy_from_user);
0152
0153 static int copy_chunk_to_user(unsigned long to, int len, void *arg)
0154 {
0155 unsigned long *from_ptr = arg, from = *from_ptr;
0156
0157 memcpy((void *) to, (void *) from, len);
0158 *from_ptr += len;
0159 return 0;
0160 }
0161
0162 unsigned long raw_copy_to_user(void __user *to, const void *from, unsigned long n)
0163 {
0164 return buffer_op((unsigned long) to, n, 1, copy_chunk_to_user, &from);
0165 }
0166 EXPORT_SYMBOL(raw_copy_to_user);
0167
0168 static int strncpy_chunk_from_user(unsigned long from, int len, void *arg)
0169 {
0170 char **to_ptr = arg, *to = *to_ptr;
0171 int n;
0172
0173 strncpy(to, (void *) from, len);
0174 n = strnlen(to, len);
0175 *to_ptr += n;
0176
0177 if (n < len)
0178 return 1;
0179 return 0;
0180 }
0181
0182 long strncpy_from_user(char *dst, const char __user *src, long count)
0183 {
0184 long n;
0185 char *ptr = dst;
0186
0187 if (!access_ok(src, 1))
0188 return -EFAULT;
0189 n = buffer_op((unsigned long) src, count, 0, strncpy_chunk_from_user,
0190 &ptr);
0191 if (n != 0)
0192 return -EFAULT;
0193 return strnlen(dst, count);
0194 }
0195 EXPORT_SYMBOL(strncpy_from_user);
0196
0197 static int clear_chunk(unsigned long addr, int len, void *unused)
0198 {
0199 memset((void *) addr, 0, len);
0200 return 0;
0201 }
0202
0203 unsigned long __clear_user(void __user *mem, unsigned long len)
0204 {
0205 return buffer_op((unsigned long) mem, len, 1, clear_chunk, NULL);
0206 }
0207 EXPORT_SYMBOL(__clear_user);
0208
0209 static int strnlen_chunk(unsigned long str, int len, void *arg)
0210 {
0211 int *len_ptr = arg, n;
0212
0213 n = strnlen((void *) str, len);
0214 *len_ptr += n;
0215
0216 if (n < len)
0217 return 1;
0218 return 0;
0219 }
0220
0221 long strnlen_user(const char __user *str, long len)
0222 {
0223 int count = 0, n;
0224
0225 if (!access_ok(str, 1))
0226 return -EFAULT;
0227 n = buffer_op((unsigned long) str, len, 0, strnlen_chunk, &count);
0228 if (n == 0)
0229 return count + 1;
0230 return 0;
0231 }
0232 EXPORT_SYMBOL(strnlen_user);
0233
0234
0235
0236
0237
0238
0239
0240
0241
0242
0243
0244
0245
0246
0247
0248
0249 int arch_futex_atomic_op_inuser(int op, u32 oparg, int *oval, u32 __user *uaddr)
0250 {
0251 int oldval, ret;
0252 struct page *page;
0253 unsigned long addr = (unsigned long) uaddr;
0254 pte_t *pte;
0255
0256 ret = -EFAULT;
0257 if (!access_ok(uaddr, sizeof(*uaddr)))
0258 return -EFAULT;
0259 preempt_disable();
0260 pte = maybe_map(addr, 1);
0261 if (pte == NULL)
0262 goto out_inuser;
0263
0264 page = pte_page(*pte);
0265 #ifdef CONFIG_64BIT
0266 pagefault_disable();
0267 addr = (unsigned long) page_address(page) +
0268 (((unsigned long) addr) & ~PAGE_MASK);
0269 #else
0270 addr = (unsigned long) kmap_atomic(page) +
0271 ((unsigned long) addr & ~PAGE_MASK);
0272 #endif
0273 uaddr = (u32 *) addr;
0274 oldval = *uaddr;
0275
0276 ret = 0;
0277
0278 switch (op) {
0279 case FUTEX_OP_SET:
0280 *uaddr = oparg;
0281 break;
0282 case FUTEX_OP_ADD:
0283 *uaddr += oparg;
0284 break;
0285 case FUTEX_OP_OR:
0286 *uaddr |= oparg;
0287 break;
0288 case FUTEX_OP_ANDN:
0289 *uaddr &= ~oparg;
0290 break;
0291 case FUTEX_OP_XOR:
0292 *uaddr ^= oparg;
0293 break;
0294 default:
0295 ret = -ENOSYS;
0296 }
0297 #ifdef CONFIG_64BIT
0298 pagefault_enable();
0299 #else
0300 kunmap_atomic((void *)addr);
0301 #endif
0302
0303 out_inuser:
0304 preempt_enable();
0305
0306 if (ret == 0)
0307 *oval = oldval;
0308
0309 return ret;
0310 }
0311 EXPORT_SYMBOL(arch_futex_atomic_op_inuser);
0312
0313
0314
0315
0316
0317
0318
0319
0320
0321
0322
0323
0324
0325
0326
0327
0328 int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
0329 u32 oldval, u32 newval)
0330 {
0331 struct page *page;
0332 pte_t *pte;
0333 int ret = -EFAULT;
0334
0335 if (!access_ok(uaddr, sizeof(*uaddr)))
0336 return -EFAULT;
0337
0338 preempt_disable();
0339 pte = maybe_map((unsigned long) uaddr, 1);
0340 if (pte == NULL)
0341 goto out_inatomic;
0342
0343 page = pte_page(*pte);
0344 #ifdef CONFIG_64BIT
0345 pagefault_disable();
0346 uaddr = page_address(page) + (((unsigned long) uaddr) & ~PAGE_MASK);
0347 #else
0348 uaddr = kmap_atomic(page) + ((unsigned long) uaddr & ~PAGE_MASK);
0349 #endif
0350
0351 *uval = *uaddr;
0352
0353 ret = cmpxchg(uaddr, oldval, newval);
0354
0355 #ifdef CONFIG_64BIT
0356 pagefault_enable();
0357 #else
0358 kunmap_atomic(uaddr);
0359 #endif
0360 ret = 0;
0361
0362 out_inatomic:
0363 preempt_enable();
0364 return ret;
0365 }
0366 EXPORT_SYMBOL(futex_atomic_cmpxchg_inatomic);