0001
0002 #ifndef _ASM_POWERPC_BOOK3S_64_KUP_H
0003 #define _ASM_POWERPC_BOOK3S_64_KUP_H
0004
0005 #include <linux/const.h>
0006 #include <asm/reg.h>
0007
0008 #define AMR_KUAP_BLOCK_READ UL(0x5455555555555555)
0009 #define AMR_KUAP_BLOCK_WRITE UL(0xa8aaaaaaaaaaaaaa)
0010 #define AMR_KUEP_BLOCKED UL(0x5455555555555555)
0011 #define AMR_KUAP_BLOCKED (AMR_KUAP_BLOCK_READ | AMR_KUAP_BLOCK_WRITE)
0012
0013 #ifdef __ASSEMBLY__
0014
0015 .macro kuap_user_restore gpr1, gpr2
0016 #if defined(CONFIG_PPC_PKEY)
0017 BEGIN_MMU_FTR_SECTION_NESTED(67)
0018 b 100f
0019 END_MMU_FTR_SECTION_NESTED_IFCLR(MMU_FTR_PKEY, 67)
0020
0021
0022
0023
0024 ld \gpr1, STACK_REGS_AMR(r1)
0025
0026
0027
0028
0029
0030 BEGIN_MMU_FTR_SECTION_NESTED(68)
0031 mfspr \gpr2, SPRN_AMR
0032 cmpd \gpr1, \gpr2
0033 beq 99f
0034 END_MMU_FTR_SECTION_NESTED_IFCLR(MMU_FTR_BOOK3S_KUAP, 68)
0035
0036 isync
0037 mtspr SPRN_AMR, \gpr1
0038 99:
0039
0040
0041
0042 ld \gpr1, STACK_REGS_IAMR(r1)
0043
0044
0045
0046
0047
0048 BEGIN_MMU_FTR_SECTION_NESTED(69)
0049 mfspr \gpr2, SPRN_IAMR
0050 cmpd \gpr1, \gpr2
0051 beq 100f
0052 END_MMU_FTR_SECTION_NESTED_IFCLR(MMU_FTR_BOOK3S_KUEP, 69)
0053
0054 isync
0055 mtspr SPRN_IAMR, \gpr1
0056
0057 100:
0058
0059 #endif
0060 .endm
0061
0062 .macro kuap_kernel_restore gpr1, gpr2
0063 #if defined(CONFIG_PPC_PKEY)
0064
0065 BEGIN_MMU_FTR_SECTION_NESTED(67)
0066
0067
0068
0069
0070 ld \gpr2, STACK_REGS_AMR(r1)
0071 mfspr \gpr1, SPRN_AMR
0072 cmpd \gpr1, \gpr2
0073 beq 100f
0074 isync
0075 mtspr SPRN_AMR, \gpr2
0076
0077
0078
0079
0080 100:
0081 END_MMU_FTR_SECTION_NESTED_IFSET(MMU_FTR_BOOK3S_KUAP, 67)
0082 #endif
0083 .endm
0084
0085 #ifdef CONFIG_PPC_KUAP
0086 .macro kuap_check_amr gpr1, gpr2
0087 #ifdef CONFIG_PPC_KUAP_DEBUG
0088 BEGIN_MMU_FTR_SECTION_NESTED(67)
0089 mfspr \gpr1, SPRN_AMR
0090
0091 LOAD_REG_IMMEDIATE(\gpr2, AMR_KUAP_BLOCKED)
0092 999: tdne \gpr1, \gpr2
0093 EMIT_WARN_ENTRY 999b, __FILE__, __LINE__, (BUGFLAG_WARNING | BUGFLAG_ONCE)
0094 END_MMU_FTR_SECTION_NESTED_IFSET(MMU_FTR_BOOK3S_KUAP, 67)
0095 #endif
0096 .endm
0097 #endif
0098
0099
0100
0101
0102
0103
0104
0105
0106
0107
0108
0109
0110
0111
0112
0113
0114
0115
0116
0117
0118
0119
0120
0121
0122
0123
0124
0125 .macro kuap_save_amr_and_lock gpr1, gpr2, use_cr, msr_pr_cr
0126 #if defined(CONFIG_PPC_PKEY)
0127
0128
0129
0130
0131 BEGIN_MMU_FTR_SECTION_NESTED(68)
0132 b 100f
0133 END_MMU_FTR_SECTION_NESTED_IFCLR(MMU_FTR_PKEY | MMU_FTR_BOOK3S_KUAP, 68)
0134
0135
0136
0137
0138
0139 BEGIN_MMU_FTR_SECTION_NESTED(67)
0140 .ifnb \msr_pr_cr
0141
0142
0143
0144
0145 bne \msr_pr_cr, 100f
0146 .endif
0147 END_MMU_FTR_SECTION_NESTED_IFCLR(MMU_FTR_PKEY, 67)
0148
0149
0150
0151
0152 mfspr \gpr1, SPRN_AMR
0153 std \gpr1, STACK_REGS_AMR(r1)
0154
0155
0156
0157
0158
0159 BEGIN_MMU_FTR_SECTION_NESTED(69)
0160 LOAD_REG_IMMEDIATE(\gpr2, AMR_KUAP_BLOCKED)
0161 cmpd \use_cr, \gpr1, \gpr2
0162 beq \use_cr, 102f
0163
0164
0165
0166 mtspr SPRN_AMR, \gpr2
0167 isync
0168 102:
0169 END_MMU_FTR_SECTION_NESTED_IFSET(MMU_FTR_BOOK3S_KUAP, 69)
0170
0171
0172
0173
0174 .ifnb \msr_pr_cr
0175 beq \msr_pr_cr, 100f
0176 mfspr \gpr1, SPRN_IAMR
0177 std \gpr1, STACK_REGS_IAMR(r1)
0178
0179
0180
0181
0182
0183 BEGIN_MMU_FTR_SECTION_NESTED(70)
0184 LOAD_REG_IMMEDIATE(\gpr2, AMR_KUEP_BLOCKED)
0185 mtspr SPRN_IAMR, \gpr2
0186 isync
0187 END_MMU_FTR_SECTION_NESTED_IFSET(MMU_FTR_BOOK3S_KUEP, 70)
0188 .endif
0189
0190 100:
0191 #endif
0192 .endm
0193
0194 #else
0195
0196 #include <linux/jump_label.h>
0197
0198 DECLARE_STATIC_KEY_FALSE(uaccess_flush_key);
0199
0200 #ifdef CONFIG_PPC_PKEY
0201
0202 extern u64 __ro_after_init default_uamor;
0203 extern u64 __ro_after_init default_amr;
0204 extern u64 __ro_after_init default_iamr;
0205
0206 #include <asm/mmu.h>
0207 #include <asm/ptrace.h>
0208
0209
0210
0211
0212
0213
0214
0215 static inline u64 current_thread_amr(void)
0216 {
0217 if (current->thread.regs)
0218 return current->thread.regs->amr;
0219 return default_amr;
0220 }
0221
0222 static inline u64 current_thread_iamr(void)
0223 {
0224 if (current->thread.regs)
0225 return current->thread.regs->iamr;
0226 return default_iamr;
0227 }
0228 #endif
0229
0230 #ifdef CONFIG_PPC_KUAP
0231
0232 static __always_inline bool kuap_is_disabled(void)
0233 {
0234 return !mmu_has_feature(MMU_FTR_BOOK3S_KUAP);
0235 }
0236
0237 static inline void kuap_user_restore(struct pt_regs *regs)
0238 {
0239 bool restore_amr = false, restore_iamr = false;
0240 unsigned long amr, iamr;
0241
0242 if (!mmu_has_feature(MMU_FTR_PKEY))
0243 return;
0244
0245 if (!mmu_has_feature(MMU_FTR_BOOK3S_KUAP)) {
0246 amr = mfspr(SPRN_AMR);
0247 if (amr != regs->amr)
0248 restore_amr = true;
0249 } else {
0250 restore_amr = true;
0251 }
0252
0253 if (!mmu_has_feature(MMU_FTR_BOOK3S_KUEP)) {
0254 iamr = mfspr(SPRN_IAMR);
0255 if (iamr != regs->iamr)
0256 restore_iamr = true;
0257 } else {
0258 restore_iamr = true;
0259 }
0260
0261
0262 if (restore_amr || restore_iamr) {
0263 isync();
0264 if (restore_amr)
0265 mtspr(SPRN_AMR, regs->amr);
0266 if (restore_iamr)
0267 mtspr(SPRN_IAMR, regs->iamr);
0268 }
0269
0270
0271
0272
0273
0274 }
0275
0276 static inline void __kuap_kernel_restore(struct pt_regs *regs, unsigned long amr)
0277 {
0278 if (likely(regs->amr == amr))
0279 return;
0280
0281 isync();
0282 mtspr(SPRN_AMR, regs->amr);
0283
0284
0285
0286
0287
0288
0289
0290 }
0291
0292 static inline unsigned long __kuap_get_and_assert_locked(void)
0293 {
0294 unsigned long amr = mfspr(SPRN_AMR);
0295
0296 if (IS_ENABLED(CONFIG_PPC_KUAP_DEBUG))
0297 WARN_ON_ONCE(amr != AMR_KUAP_BLOCKED);
0298 return amr;
0299 }
0300
0301
0302 static inline void __kuap_lock(void)
0303 {
0304 }
0305
0306 static inline void __kuap_save_and_lock(struct pt_regs *regs)
0307 {
0308 }
0309
0310
0311
0312
0313
0314
0315 static inline unsigned long get_kuap(void)
0316 {
0317
0318
0319
0320
0321
0322
0323
0324
0325 if (!mmu_has_feature(MMU_FTR_BOOK3S_KUAP))
0326 return AMR_KUAP_BLOCKED;
0327
0328 return mfspr(SPRN_AMR);
0329 }
0330
0331 static __always_inline void set_kuap(unsigned long value)
0332 {
0333 if (!mmu_has_feature(MMU_FTR_BOOK3S_KUAP))
0334 return;
0335
0336
0337
0338
0339
0340 isync();
0341 mtspr(SPRN_AMR, value);
0342 isync();
0343 }
0344
0345 static inline bool __bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write)
0346 {
0347
0348
0349
0350
0351
0352
0353
0354
0355
0356
0357
0358
0359 if (is_write) {
0360 return (regs->amr & AMR_KUAP_BLOCK_WRITE) == AMR_KUAP_BLOCK_WRITE;
0361 }
0362 return (regs->amr & AMR_KUAP_BLOCK_READ) == AMR_KUAP_BLOCK_READ;
0363 }
0364
0365 static __always_inline void allow_user_access(void __user *to, const void __user *from,
0366 unsigned long size, unsigned long dir)
0367 {
0368 unsigned long thread_amr = 0;
0369
0370
0371 BUILD_BUG_ON(!__builtin_constant_p(dir));
0372
0373 if (mmu_has_feature(MMU_FTR_PKEY))
0374 thread_amr = current_thread_amr();
0375
0376 if (dir == KUAP_READ)
0377 set_kuap(thread_amr | AMR_KUAP_BLOCK_WRITE);
0378 else if (dir == KUAP_WRITE)
0379 set_kuap(thread_amr | AMR_KUAP_BLOCK_READ);
0380 else if (dir == KUAP_READ_WRITE)
0381 set_kuap(thread_amr);
0382 else
0383 BUILD_BUG();
0384 }
0385
0386 #else
0387
0388 static inline unsigned long get_kuap(void)
0389 {
0390 return AMR_KUAP_BLOCKED;
0391 }
0392
0393 static inline void set_kuap(unsigned long value) { }
0394
0395 static __always_inline void allow_user_access(void __user *to, const void __user *from,
0396 unsigned long size, unsigned long dir)
0397 { }
0398
0399 #endif
0400
0401 static __always_inline void prevent_user_access(unsigned long dir)
0402 {
0403 set_kuap(AMR_KUAP_BLOCKED);
0404 if (static_branch_unlikely(&uaccess_flush_key))
0405 do_uaccess_flush();
0406 }
0407
0408 static inline unsigned long prevent_user_access_return(void)
0409 {
0410 unsigned long flags = get_kuap();
0411
0412 set_kuap(AMR_KUAP_BLOCKED);
0413 if (static_branch_unlikely(&uaccess_flush_key))
0414 do_uaccess_flush();
0415
0416 return flags;
0417 }
0418
0419 static inline void restore_user_access(unsigned long flags)
0420 {
0421 set_kuap(flags);
0422 if (static_branch_unlikely(&uaccess_flush_key) && flags == AMR_KUAP_BLOCKED)
0423 do_uaccess_flush();
0424 }
0425 #endif
0426
0427 #endif