Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  * Access kernel memory without faulting -- s390 specific implementation.
0004  *
0005  * Copyright IBM Corp. 2009, 2015
0006  *
0007  */
0008 
0009 #include <linux/uaccess.h>
0010 #include <linux/kernel.h>
0011 #include <linux/types.h>
0012 #include <linux/errno.h>
0013 #include <linux/gfp.h>
0014 #include <linux/cpu.h>
0015 #include <asm/asm-extable.h>
0016 #include <asm/ctl_reg.h>
0017 #include <asm/io.h>
0018 #include <asm/stacktrace.h>
0019 
0020 static notrace long s390_kernel_write_odd(void *dst, const void *src, size_t size)
0021 {
0022     unsigned long aligned, offset, count;
0023     char tmp[8];
0024 
0025     aligned = (unsigned long) dst & ~7UL;
0026     offset = (unsigned long) dst & 7UL;
0027     size = min(8UL - offset, size);
0028     count = size - 1;
0029     asm volatile(
0030         "   bras    1,0f\n"
0031         "   mvc 0(1,%4),0(%5)\n"
0032         "0: mvc 0(8,%3),0(%0)\n"
0033         "   ex  %1,0(1)\n"
0034         "   lg  %1,0(%3)\n"
0035         "   lra %0,0(%0)\n"
0036         "   sturg   %1,%0\n"
0037         : "+&a" (aligned), "+&a" (count), "=m" (tmp)
0038         : "a" (&tmp), "a" (&tmp[offset]), "a" (src)
0039         : "cc", "memory", "1");
0040     return size;
0041 }
0042 
0043 /*
0044  * s390_kernel_write - write to kernel memory bypassing DAT
0045  * @dst: destination address
0046  * @src: source address
0047  * @size: number of bytes to copy
0048  *
0049  * This function writes to kernel memory bypassing DAT and possible page table
0050  * write protection. It writes to the destination using the sturg instruction.
0051  * Therefore we have a read-modify-write sequence: the function reads eight
0052  * bytes from destination at an eight byte boundary, modifies the bytes
0053  * requested and writes the result back in a loop.
0054  */
0055 static DEFINE_SPINLOCK(s390_kernel_write_lock);
0056 
0057 notrace void *s390_kernel_write(void *dst, const void *src, size_t size)
0058 {
0059     void *tmp = dst;
0060     unsigned long flags;
0061     long copied;
0062 
0063     spin_lock_irqsave(&s390_kernel_write_lock, flags);
0064     if (!(flags & PSW_MASK_DAT)) {
0065         memcpy(dst, src, size);
0066     } else {
0067         while (size) {
0068             copied = s390_kernel_write_odd(tmp, src, size);
0069             tmp += copied;
0070             src += copied;
0071             size -= copied;
0072         }
0073     }
0074     spin_unlock_irqrestore(&s390_kernel_write_lock, flags);
0075 
0076     return dst;
0077 }
0078 
0079 static int __no_sanitize_address __memcpy_real(void *dest, void *src, size_t count)
0080 {
0081     union register_pair _dst, _src;
0082     int rc = -EFAULT;
0083 
0084     _dst.even = (unsigned long) dest;
0085     _dst.odd  = (unsigned long) count;
0086     _src.even = (unsigned long) src;
0087     _src.odd  = (unsigned long) count;
0088     asm volatile (
0089         "0: mvcle   %[dst],%[src],0\n"
0090         "1: jo  0b\n"
0091         "   lhi %[rc],0\n"
0092         "2:\n"
0093         EX_TABLE(1b,2b)
0094         : [rc] "+&d" (rc), [dst] "+&d" (_dst.pair), [src] "+&d" (_src.pair)
0095         : : "cc", "memory");
0096     return rc;
0097 }
0098 
0099 static unsigned long __no_sanitize_address _memcpy_real(unsigned long dest,
0100                             unsigned long src,
0101                             unsigned long count)
0102 {
0103     int irqs_disabled, rc;
0104     unsigned long flags;
0105 
0106     if (!count)
0107         return 0;
0108     flags = arch_local_irq_save();
0109     irqs_disabled = arch_irqs_disabled_flags(flags);
0110     if (!irqs_disabled)
0111         trace_hardirqs_off();
0112     __arch_local_irq_stnsm(0xf8); // disable DAT
0113     rc = __memcpy_real((void *) dest, (void *) src, (size_t) count);
0114     if (flags & PSW_MASK_DAT)
0115         __arch_local_irq_stosm(0x04); // enable DAT
0116     if (!irqs_disabled)
0117         trace_hardirqs_on();
0118     __arch_local_irq_ssm(flags);
0119     return rc;
0120 }
0121 
0122 /*
0123  * Copy memory in real mode (kernel to kernel)
0124  */
0125 int memcpy_real(void *dest, unsigned long src, size_t count)
0126 {
0127     unsigned long _dest  = (unsigned long)dest;
0128     unsigned long _src   = (unsigned long)src;
0129     unsigned long _count = (unsigned long)count;
0130     int rc;
0131 
0132     if (S390_lowcore.nodat_stack != 0) {
0133         preempt_disable();
0134         rc = call_on_stack(3, S390_lowcore.nodat_stack,
0135                    unsigned long, _memcpy_real,
0136                    unsigned long, _dest,
0137                    unsigned long, _src,
0138                    unsigned long, _count);
0139         preempt_enable();
0140         return rc;
0141     }
0142     /*
0143      * This is a really early memcpy_real call, the stacks are
0144      * not set up yet. Just call _memcpy_real on the early boot
0145      * stack
0146      */
0147     return _memcpy_real(_dest, _src, _count);
0148 }
0149 
0150 /*
0151  * Copy memory in absolute mode (kernel to kernel)
0152  */
0153 void memcpy_absolute(void *dest, void *src, size_t count)
0154 {
0155     unsigned long cr0, flags, prefix;
0156 
0157     flags = arch_local_irq_save();
0158     __ctl_store(cr0, 0, 0);
0159     __ctl_clear_bit(0, 28); /* disable lowcore protection */
0160     prefix = store_prefix();
0161     if (prefix) {
0162         local_mcck_disable();
0163         set_prefix(0);
0164         memcpy(dest, src, count);
0165         set_prefix(prefix);
0166         local_mcck_enable();
0167     } else {
0168         memcpy(dest, src, count);
0169     }
0170     __ctl_load(cr0, 0, 0);
0171     arch_local_irq_restore(flags);
0172 }
0173 
0174 /*
0175  * Check if physical address is within prefix or zero page
0176  */
0177 static int is_swapped(phys_addr_t addr)
0178 {
0179     phys_addr_t lc;
0180     int cpu;
0181 
0182     if (addr < sizeof(struct lowcore))
0183         return 1;
0184     for_each_online_cpu(cpu) {
0185         lc = virt_to_phys(lowcore_ptr[cpu]);
0186         if (addr > lc + sizeof(struct lowcore) - 1 || addr < lc)
0187             continue;
0188         return 1;
0189     }
0190     return 0;
0191 }
0192 
0193 /*
0194  * Convert a physical pointer for /dev/mem access
0195  *
0196  * For swapped prefix pages a new buffer is returned that contains a copy of
0197  * the absolute memory. The buffer size is maximum one page large.
0198  */
0199 void *xlate_dev_mem_ptr(phys_addr_t addr)
0200 {
0201     void *ptr = phys_to_virt(addr);
0202     void *bounce = ptr;
0203     unsigned long size;
0204 
0205     cpus_read_lock();
0206     preempt_disable();
0207     if (is_swapped(addr)) {
0208         size = PAGE_SIZE - (addr & ~PAGE_MASK);
0209         bounce = (void *) __get_free_page(GFP_ATOMIC);
0210         if (bounce)
0211             memcpy_absolute(bounce, ptr, size);
0212     }
0213     preempt_enable();
0214     cpus_read_unlock();
0215     return bounce;
0216 }
0217 
0218 /*
0219  * Free converted buffer for /dev/mem access (if necessary)
0220  */
0221 void unxlate_dev_mem_ptr(phys_addr_t addr, void *ptr)
0222 {
0223     if (addr != virt_to_phys(ptr))
0224         free_page((unsigned long)ptr);
0225 }