0001
0002 #ifndef _ASM_POWERPC_ASM_PROTOTYPES_H
0003 #define _ASM_POWERPC_ASM_PROTOTYPES_H
0004
0005
0006
0007
0008
0009
0010
0011
0012 #include <linux/threads.h>
0013 #include <asm/cacheflush.h>
0014 #include <asm/checksum.h>
0015 #include <linux/uaccess.h>
0016 #include <asm/epapr_hcalls.h>
0017 #include <asm/dcr.h>
0018 #include <asm/mmu_context.h>
0019 #include <asm/ultravisor-api.h>
0020
0021 #include <uapi/asm/ucontext.h>
0022
0023
0024 #if defined(CONFIG_PPC_POWERNV) || defined(CONFIG_PPC_SVM)
0025 long ucall_norets(unsigned long opcode, ...);
0026 #else
0027 static inline long ucall_norets(unsigned long opcode, ...)
0028 {
0029 return U_NOT_AVAILABLE;
0030 }
0031 #endif
0032
0033
0034 int64_t __opal_call(int64_t a0, int64_t a1, int64_t a2, int64_t a3,
0035 int64_t a4, int64_t a5, int64_t a6, int64_t a7,
0036 int64_t opcode, uint64_t msr);
0037
0038
0039 extern u64 __bswapdi2(u64);
0040 extern s64 __lshrdi3(s64, int);
0041 extern s64 __ashldi3(s64, int);
0042 extern s64 __ashrdi3(s64, int);
0043 extern int __cmpdi2(s64, s64);
0044 extern int __ucmpdi2(u64, u64);
0045
0046
0047 void _mcount(void);
0048
0049
0050 void tm_enable(void);
0051 void tm_disable(void);
0052 void tm_abort(uint8_t cause);
0053
0054 struct kvm_vcpu;
0055 void _kvmppc_restore_tm_pr(struct kvm_vcpu *vcpu, u64 guest_msr);
0056 void _kvmppc_save_tm_pr(struct kvm_vcpu *vcpu, u64 guest_msr);
0057
0058
0059 extern s32 patch__call_flush_branch_caches1;
0060 extern s32 patch__call_flush_branch_caches2;
0061 extern s32 patch__call_flush_branch_caches3;
0062 extern s32 patch__flush_count_cache_return;
0063 extern s32 patch__flush_link_stack_return;
0064 extern s32 patch__call_kvm_flush_link_stack;
0065 extern s32 patch__call_kvm_flush_link_stack_p9;
0066 extern s32 patch__memset_nocache, patch__memcpy_nocache;
0067
0068 extern long flush_branch_caches;
0069 extern long kvm_flush_link_stack;
0070
0071 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
0072 void kvmppc_save_tm_hv(struct kvm_vcpu *vcpu, u64 msr, bool preserve_nv);
0073 void kvmppc_restore_tm_hv(struct kvm_vcpu *vcpu, u64 msr, bool preserve_nv);
0074 #else
0075 static inline void kvmppc_save_tm_hv(struct kvm_vcpu *vcpu, u64 msr,
0076 bool preserve_nv) { }
0077 static inline void kvmppc_restore_tm_hv(struct kvm_vcpu *vcpu, u64 msr,
0078 bool preserve_nv) { }
0079 #endif
0080
0081 void kvmppc_p9_enter_guest(struct kvm_vcpu *vcpu);
0082
0083 long kvmppc_h_set_dabr(struct kvm_vcpu *vcpu, unsigned long dabr);
0084 long kvmppc_h_set_xdabr(struct kvm_vcpu *vcpu, unsigned long dabr,
0085 unsigned long dabrx);
0086
0087 #endif