0001
0002
0003
0004
0005
0006
0007
0008 #ifndef _ASM_POWERPC_KEYS_H
0009 #define _ASM_POWERPC_KEYS_H
0010
0011 #include <linux/jump_label.h>
0012 #include <asm/firmware.h>
0013
0014 extern int num_pkey;
0015 extern u32 reserved_allocation_mask;
0016
0017 #define ARCH_VM_PKEY_FLAGS (VM_PKEY_BIT0 | VM_PKEY_BIT1 | VM_PKEY_BIT2 | \
0018 VM_PKEY_BIT3 | VM_PKEY_BIT4)
0019
0020
0021 #define PKEY_DISABLE_EXECUTE 0x4
0022 #define PKEY_ACCESS_MASK (PKEY_DISABLE_ACCESS | \
0023 PKEY_DISABLE_WRITE | \
0024 PKEY_DISABLE_EXECUTE)
0025
0026 #ifdef CONFIG_PPC_BOOK3S_64
0027 #include <asm/book3s/64/pkeys.h>
0028 #else
0029 #error "Not supported"
0030 #endif
0031
0032
0033 static inline u64 pkey_to_vmflag_bits(u16 pkey)
0034 {
0035 return (((u64)pkey << VM_PKEY_SHIFT) & ARCH_VM_PKEY_FLAGS);
0036 }
0037
0038 static inline int vma_pkey(struct vm_area_struct *vma)
0039 {
0040 if (!mmu_has_feature(MMU_FTR_PKEY))
0041 return 0;
0042 return (vma->vm_flags & ARCH_VM_PKEY_FLAGS) >> VM_PKEY_SHIFT;
0043 }
0044
0045 static inline int arch_max_pkey(void)
0046 {
0047 return num_pkey;
0048 }
0049
0050 #define pkey_alloc_mask(pkey) (0x1 << pkey)
0051
0052 #define mm_pkey_allocation_map(mm) (mm->context.pkey_allocation_map)
0053
0054 #define __mm_pkey_allocated(mm, pkey) { \
0055 mm_pkey_allocation_map(mm) |= pkey_alloc_mask(pkey); \
0056 }
0057
0058 #define __mm_pkey_free(mm, pkey) { \
0059 mm_pkey_allocation_map(mm) &= ~pkey_alloc_mask(pkey); \
0060 }
0061
0062 #define __mm_pkey_is_allocated(mm, pkey) \
0063 (mm_pkey_allocation_map(mm) & pkey_alloc_mask(pkey))
0064
0065 #define __mm_pkey_is_reserved(pkey) (reserved_allocation_mask & \
0066 pkey_alloc_mask(pkey))
0067
0068 static inline bool mm_pkey_is_allocated(struct mm_struct *mm, int pkey)
0069 {
0070 if (pkey < 0 || pkey >= arch_max_pkey())
0071 return false;
0072
0073
0074 if (__mm_pkey_is_reserved(pkey))
0075 return false;
0076
0077 return __mm_pkey_is_allocated(mm, pkey);
0078 }
0079
0080
0081
0082
0083
0084
0085 static inline int mm_pkey_alloc(struct mm_struct *mm)
0086 {
0087
0088
0089
0090
0091
0092 u32 all_pkeys_mask = (u32)(~(0x0));
0093 int ret;
0094
0095 if (!mmu_has_feature(MMU_FTR_PKEY))
0096 return -1;
0097
0098
0099
0100
0101 if (mm_pkey_allocation_map(mm) == all_pkeys_mask)
0102 return -1;
0103
0104 ret = ffz((u32)mm_pkey_allocation_map(mm));
0105 __mm_pkey_allocated(mm, ret);
0106
0107 return ret;
0108 }
0109
0110 static inline int mm_pkey_free(struct mm_struct *mm, int pkey)
0111 {
0112 if (!mmu_has_feature(MMU_FTR_PKEY))
0113 return -1;
0114
0115 if (!mm_pkey_is_allocated(mm, pkey))
0116 return -EINVAL;
0117
0118 __mm_pkey_free(mm, pkey);
0119
0120 return 0;
0121 }
0122
0123
0124
0125
0126
0127 extern int execute_only_pkey(struct mm_struct *mm);
0128 extern int __arch_override_mprotect_pkey(struct vm_area_struct *vma,
0129 int prot, int pkey);
0130 static inline int arch_override_mprotect_pkey(struct vm_area_struct *vma,
0131 int prot, int pkey)
0132 {
0133 if (!mmu_has_feature(MMU_FTR_PKEY))
0134 return 0;
0135
0136
0137
0138
0139
0140 if (pkey != -1)
0141 return pkey;
0142
0143 return __arch_override_mprotect_pkey(vma, prot, pkey);
0144 }
0145
0146 extern int __arch_set_user_pkey_access(struct task_struct *tsk, int pkey,
0147 unsigned long init_val);
0148 static inline int arch_set_user_pkey_access(struct task_struct *tsk, int pkey,
0149 unsigned long init_val)
0150 {
0151 if (!mmu_has_feature(MMU_FTR_PKEY))
0152 return -EINVAL;
0153
0154
0155
0156
0157
0158
0159
0160 if (pkey == 0)
0161 return init_val ? -EINVAL : 0;
0162
0163 return __arch_set_user_pkey_access(tsk, pkey, init_val);
0164 }
0165
0166 static inline bool arch_pkeys_enabled(void)
0167 {
0168 return mmu_has_feature(MMU_FTR_PKEY);
0169 }
0170
0171 extern void pkey_mm_init(struct mm_struct *mm);
0172 #endif