![]() |
|
|||
0001 // SPDX-License-Identifier: GPL-2.0-or-later 0002 /* 0003 * 0004 * Copyright (C) IBM Corporation, 2011 0005 * 0006 * Authors: Sukadev Bhattiprolu <sukadev@linux.vnet.ibm.com> 0007 * Anton Blanchard <anton@au.ibm.com> 0008 */ 0009 #include <linux/uaccess.h> 0010 #include <linux/hardirq.h> 0011 #include <asm/switch_to.h> 0012 0013 int enter_vmx_usercopy(void) 0014 { 0015 if (in_interrupt()) 0016 return 0; 0017 0018 preempt_disable(); 0019 /* 0020 * We need to disable page faults as they can call schedule and 0021 * thus make us lose the VMX context. So on page faults, we just 0022 * fail which will cause a fallback to the normal non-vmx copy. 0023 */ 0024 pagefault_disable(); 0025 0026 enable_kernel_altivec(); 0027 0028 return 1; 0029 } 0030 0031 /* 0032 * This function must return 0 because we tail call optimise when calling 0033 * from __copy_tofrom_user_power7 which returns 0 on success. 0034 */ 0035 int exit_vmx_usercopy(void) 0036 { 0037 disable_kernel_altivec(); 0038 pagefault_enable(); 0039 preempt_enable(); 0040 return 0; 0041 } 0042 0043 int enter_vmx_ops(void) 0044 { 0045 if (in_interrupt()) 0046 return 0; 0047 0048 preempt_disable(); 0049 0050 enable_kernel_altivec(); 0051 0052 return 1; 0053 } 0054 0055 /* 0056 * All calls to this function will be optimised into tail calls. We are 0057 * passed a pointer to the destination which we return as required by a 0058 * memcpy implementation. 0059 */ 0060 void *exit_vmx_ops(void *dest) 0061 { 0062 disable_kernel_altivec(); 0063 preempt_enable(); 0064 return dest; 0065 }
[ Source navigation ] | [ Diff markup ] | [ Identifier search ] | [ general search ] |
This page was automatically generated by the 2.1.0 LXR engine. The LXR team |
![]() ![]() |