Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0 */
0002 #ifndef _ASM_POWERPC_BOOK3S_64_KUP_H
0003 #define _ASM_POWERPC_BOOK3S_64_KUP_H
0004 
0005 #include <linux/const.h>
0006 #include <asm/reg.h>
0007 
0008 #define AMR_KUAP_BLOCK_READ UL(0x5455555555555555)
0009 #define AMR_KUAP_BLOCK_WRITE    UL(0xa8aaaaaaaaaaaaaa)
0010 #define AMR_KUEP_BLOCKED    UL(0x5455555555555555)
0011 #define AMR_KUAP_BLOCKED    (AMR_KUAP_BLOCK_READ | AMR_KUAP_BLOCK_WRITE)
0012 
0013 #ifdef __ASSEMBLY__
0014 
0015 .macro kuap_user_restore gpr1, gpr2
0016 #if defined(CONFIG_PPC_PKEY)
0017     BEGIN_MMU_FTR_SECTION_NESTED(67)
0018     b   100f  // skip_restore_amr
0019     END_MMU_FTR_SECTION_NESTED_IFCLR(MMU_FTR_PKEY, 67)
0020     /*
0021      * AMR and IAMR are going to be different when
0022      * returning to userspace.
0023      */
0024     ld  \gpr1, STACK_REGS_AMR(r1)
0025 
0026     /*
0027      * If kuap feature is not enabled, do the mtspr
0028      * only if AMR value is different.
0029      */
0030     BEGIN_MMU_FTR_SECTION_NESTED(68)
0031     mfspr   \gpr2, SPRN_AMR
0032     cmpd    \gpr1, \gpr2
0033     beq 99f
0034     END_MMU_FTR_SECTION_NESTED_IFCLR(MMU_FTR_BOOK3S_KUAP, 68)
0035 
0036     isync
0037     mtspr   SPRN_AMR, \gpr1
0038 99:
0039     /*
0040      * Restore IAMR only when returning to userspace
0041      */
0042     ld  \gpr1, STACK_REGS_IAMR(r1)
0043 
0044     /*
0045      * If kuep feature is not enabled, do the mtspr
0046      * only if IAMR value is different.
0047      */
0048     BEGIN_MMU_FTR_SECTION_NESTED(69)
0049     mfspr   \gpr2, SPRN_IAMR
0050     cmpd    \gpr1, \gpr2
0051     beq 100f
0052     END_MMU_FTR_SECTION_NESTED_IFCLR(MMU_FTR_BOOK3S_KUEP, 69)
0053 
0054     isync
0055     mtspr   SPRN_IAMR, \gpr1
0056 
0057 100: //skip_restore_amr
0058     /* No isync required, see kuap_user_restore() */
0059 #endif
0060 .endm
0061 
0062 .macro kuap_kernel_restore gpr1, gpr2
0063 #if defined(CONFIG_PPC_PKEY)
0064 
0065     BEGIN_MMU_FTR_SECTION_NESTED(67)
0066     /*
0067      * AMR is going to be mostly the same since we are
0068      * returning to the kernel. Compare and do a mtspr.
0069      */
0070     ld  \gpr2, STACK_REGS_AMR(r1)
0071     mfspr   \gpr1, SPRN_AMR
0072     cmpd    \gpr1, \gpr2
0073     beq 100f
0074     isync
0075     mtspr   SPRN_AMR, \gpr2
0076     /*
0077      * No isync required, see kuap_restore_amr()
0078      * No need to restore IAMR when returning to kernel space.
0079      */
0080 100:
0081     END_MMU_FTR_SECTION_NESTED_IFSET(MMU_FTR_BOOK3S_KUAP, 67)
0082 #endif
0083 .endm
0084 
0085 #ifdef CONFIG_PPC_KUAP
0086 .macro kuap_check_amr gpr1, gpr2
0087 #ifdef CONFIG_PPC_KUAP_DEBUG
0088     BEGIN_MMU_FTR_SECTION_NESTED(67)
0089     mfspr   \gpr1, SPRN_AMR
0090     /* Prevent access to userspace using any key values */
0091     LOAD_REG_IMMEDIATE(\gpr2, AMR_KUAP_BLOCKED)
0092 999:    tdne    \gpr1, \gpr2
0093     EMIT_WARN_ENTRY 999b, __FILE__, __LINE__, (BUGFLAG_WARNING | BUGFLAG_ONCE)
0094     END_MMU_FTR_SECTION_NESTED_IFSET(MMU_FTR_BOOK3S_KUAP, 67)
0095 #endif
0096 .endm
0097 #endif
0098 
0099 /*
0100  *  if (pkey) {
0101  *
0102  *      save AMR -> stack;
0103  *      if (kuap) {
0104  *          if (AMR != BLOCKED)
0105  *              KUAP_BLOCKED -> AMR;
0106  *      }
0107  *      if (from_user) {
0108  *          save IAMR -> stack;
0109  *          if (kuep) {
0110  *              KUEP_BLOCKED ->IAMR
0111  *          }
0112  *      }
0113  *      return;
0114  *  }
0115  *
0116  *  if (kuap) {
0117  *      if (from_kernel) {
0118  *          save AMR -> stack;
0119  *          if (AMR != BLOCKED)
0120  *              KUAP_BLOCKED -> AMR;
0121  *      }
0122  *
0123  *  }
0124  */
0125 .macro kuap_save_amr_and_lock gpr1, gpr2, use_cr, msr_pr_cr
0126 #if defined(CONFIG_PPC_PKEY)
0127 
0128     /*
0129      * if both pkey and kuap is disabled, nothing to do
0130      */
0131     BEGIN_MMU_FTR_SECTION_NESTED(68)
0132     b   100f  // skip_save_amr
0133     END_MMU_FTR_SECTION_NESTED_IFCLR(MMU_FTR_PKEY | MMU_FTR_BOOK3S_KUAP, 68)
0134 
0135     /*
0136      * if pkey is disabled and we are entering from userspace
0137      * don't do anything.
0138      */
0139     BEGIN_MMU_FTR_SECTION_NESTED(67)
0140     .ifnb \msr_pr_cr
0141     /*
0142      * Without pkey we are not changing AMR outside the kernel
0143      * hence skip this completely.
0144      */
0145     bne \msr_pr_cr, 100f  // from userspace
0146     .endif
0147         END_MMU_FTR_SECTION_NESTED_IFCLR(MMU_FTR_PKEY, 67)
0148 
0149     /*
0150      * pkey is enabled or pkey is disabled but entering from kernel
0151      */
0152     mfspr   \gpr1, SPRN_AMR
0153     std \gpr1, STACK_REGS_AMR(r1)
0154 
0155     /*
0156      * update kernel AMR with AMR_KUAP_BLOCKED only
0157      * if KUAP feature is enabled
0158      */
0159     BEGIN_MMU_FTR_SECTION_NESTED(69)
0160     LOAD_REG_IMMEDIATE(\gpr2, AMR_KUAP_BLOCKED)
0161     cmpd    \use_cr, \gpr1, \gpr2
0162     beq \use_cr, 102f
0163     /*
0164      * We don't isync here because we very recently entered via an interrupt
0165      */
0166     mtspr   SPRN_AMR, \gpr2
0167     isync
0168 102:
0169     END_MMU_FTR_SECTION_NESTED_IFSET(MMU_FTR_BOOK3S_KUAP, 69)
0170 
0171     /*
0172      * if entering from kernel we don't need save IAMR
0173      */
0174     .ifnb \msr_pr_cr
0175     beq \msr_pr_cr, 100f // from kernel space
0176     mfspr   \gpr1, SPRN_IAMR
0177     std \gpr1, STACK_REGS_IAMR(r1)
0178 
0179     /*
0180      * update kernel IAMR with AMR_KUEP_BLOCKED only
0181      * if KUEP feature is enabled
0182      */
0183     BEGIN_MMU_FTR_SECTION_NESTED(70)
0184     LOAD_REG_IMMEDIATE(\gpr2, AMR_KUEP_BLOCKED)
0185     mtspr   SPRN_IAMR, \gpr2
0186     isync
0187     END_MMU_FTR_SECTION_NESTED_IFSET(MMU_FTR_BOOK3S_KUEP, 70)
0188     .endif
0189 
0190 100: // skip_save_amr
0191 #endif
0192 .endm
0193 
0194 #else /* !__ASSEMBLY__ */
0195 
0196 #include <linux/jump_label.h>
0197 
0198 DECLARE_STATIC_KEY_FALSE(uaccess_flush_key);
0199 
0200 #ifdef CONFIG_PPC_PKEY
0201 
0202 extern u64 __ro_after_init default_uamor;
0203 extern u64 __ro_after_init default_amr;
0204 extern u64 __ro_after_init default_iamr;
0205 
0206 #include <asm/mmu.h>
0207 #include <asm/ptrace.h>
0208 
0209 /* usage of kthread_use_mm() should inherit the
0210  * AMR value of the operating address space. But, the AMR value is
0211  * thread-specific and we inherit the address space and not thread
0212  * access restrictions. Because of this ignore AMR value when accessing
0213  * userspace via kernel thread.
0214  */
0215 static inline u64 current_thread_amr(void)
0216 {
0217     if (current->thread.regs)
0218         return current->thread.regs->amr;
0219     return default_amr;
0220 }
0221 
0222 static inline u64 current_thread_iamr(void)
0223 {
0224     if (current->thread.regs)
0225         return current->thread.regs->iamr;
0226     return default_iamr;
0227 }
0228 #endif /* CONFIG_PPC_PKEY */
0229 
0230 #ifdef CONFIG_PPC_KUAP
0231 
0232 static __always_inline bool kuap_is_disabled(void)
0233 {
0234     return !mmu_has_feature(MMU_FTR_BOOK3S_KUAP);
0235 }
0236 
0237 static inline void kuap_user_restore(struct pt_regs *regs)
0238 {
0239     bool restore_amr = false, restore_iamr = false;
0240     unsigned long amr, iamr;
0241 
0242     if (!mmu_has_feature(MMU_FTR_PKEY))
0243         return;
0244 
0245     if (!mmu_has_feature(MMU_FTR_BOOK3S_KUAP)) {
0246         amr = mfspr(SPRN_AMR);
0247         if (amr != regs->amr)
0248             restore_amr = true;
0249     } else {
0250         restore_amr = true;
0251     }
0252 
0253     if (!mmu_has_feature(MMU_FTR_BOOK3S_KUEP)) {
0254         iamr = mfspr(SPRN_IAMR);
0255         if (iamr != regs->iamr)
0256             restore_iamr = true;
0257     } else {
0258         restore_iamr = true;
0259     }
0260 
0261 
0262     if (restore_amr || restore_iamr) {
0263         isync();
0264         if (restore_amr)
0265             mtspr(SPRN_AMR, regs->amr);
0266         if (restore_iamr)
0267             mtspr(SPRN_IAMR, regs->iamr);
0268     }
0269     /*
0270      * No isync required here because we are about to rfi
0271      * back to previous context before any user accesses
0272      * would be made, which is a CSI.
0273      */
0274 }
0275 
0276 static inline void __kuap_kernel_restore(struct pt_regs *regs, unsigned long amr)
0277 {
0278     if (likely(regs->amr == amr))
0279         return;
0280 
0281     isync();
0282     mtspr(SPRN_AMR, regs->amr);
0283     /*
0284      * No isync required here because we are about to rfi
0285      * back to previous context before any user accesses
0286      * would be made, which is a CSI.
0287      *
0288      * No need to restore IAMR when returning to kernel space.
0289      */
0290 }
0291 
0292 static inline unsigned long __kuap_get_and_assert_locked(void)
0293 {
0294     unsigned long amr = mfspr(SPRN_AMR);
0295 
0296     if (IS_ENABLED(CONFIG_PPC_KUAP_DEBUG)) /* kuap_check_amr() */
0297         WARN_ON_ONCE(amr != AMR_KUAP_BLOCKED);
0298     return amr;
0299 }
0300 
0301 /* Do nothing, book3s/64 does that in ASM */
0302 static inline void __kuap_lock(void)
0303 {
0304 }
0305 
0306 static inline void __kuap_save_and_lock(struct pt_regs *regs)
0307 {
0308 }
0309 
0310 /*
0311  * We support individually allowing read or write, but we don't support nesting
0312  * because that would require an expensive read/modify write of the AMR.
0313  */
0314 
0315 static inline unsigned long get_kuap(void)
0316 {
0317     /*
0318      * We return AMR_KUAP_BLOCKED when we don't support KUAP because
0319      * prevent_user_access_return needs to return AMR_KUAP_BLOCKED to
0320      * cause restore_user_access to do a flush.
0321      *
0322      * This has no effect in terms of actually blocking things on hash,
0323      * so it doesn't break anything.
0324      */
0325     if (!mmu_has_feature(MMU_FTR_BOOK3S_KUAP))
0326         return AMR_KUAP_BLOCKED;
0327 
0328     return mfspr(SPRN_AMR);
0329 }
0330 
0331 static __always_inline void set_kuap(unsigned long value)
0332 {
0333     if (!mmu_has_feature(MMU_FTR_BOOK3S_KUAP))
0334         return;
0335 
0336     /*
0337      * ISA v3.0B says we need a CSI (Context Synchronising Instruction) both
0338      * before and after the move to AMR. See table 6 on page 1134.
0339      */
0340     isync();
0341     mtspr(SPRN_AMR, value);
0342     isync();
0343 }
0344 
0345 static inline bool __bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write)
0346 {
0347     /*
0348      * For radix this will be a storage protection fault (DSISR_PROTFAULT).
0349      * For hash this will be a key fault (DSISR_KEYFAULT)
0350      */
0351     /*
0352      * We do have exception table entry, but accessing the
0353      * userspace results in fault.  This could be because we
0354      * didn't unlock the AMR or access is denied by userspace
0355      * using a key value that blocks access. We are only interested
0356      * in catching the use case of accessing without unlocking
0357      * the AMR. Hence check for BLOCK_WRITE/READ against AMR.
0358      */
0359     if (is_write) {
0360         return (regs->amr & AMR_KUAP_BLOCK_WRITE) == AMR_KUAP_BLOCK_WRITE;
0361     }
0362     return (regs->amr & AMR_KUAP_BLOCK_READ) == AMR_KUAP_BLOCK_READ;
0363 }
0364 
0365 static __always_inline void allow_user_access(void __user *to, const void __user *from,
0366                           unsigned long size, unsigned long dir)
0367 {
0368     unsigned long thread_amr = 0;
0369 
0370     // This is written so we can resolve to a single case at build time
0371     BUILD_BUG_ON(!__builtin_constant_p(dir));
0372 
0373     if (mmu_has_feature(MMU_FTR_PKEY))
0374         thread_amr = current_thread_amr();
0375 
0376     if (dir == KUAP_READ)
0377         set_kuap(thread_amr | AMR_KUAP_BLOCK_WRITE);
0378     else if (dir == KUAP_WRITE)
0379         set_kuap(thread_amr | AMR_KUAP_BLOCK_READ);
0380     else if (dir == KUAP_READ_WRITE)
0381         set_kuap(thread_amr);
0382     else
0383         BUILD_BUG();
0384 }
0385 
0386 #else /* CONFIG_PPC_KUAP */
0387 
0388 static inline unsigned long get_kuap(void)
0389 {
0390     return AMR_KUAP_BLOCKED;
0391 }
0392 
0393 static inline void set_kuap(unsigned long value) { }
0394 
0395 static __always_inline void allow_user_access(void __user *to, const void __user *from,
0396                           unsigned long size, unsigned long dir)
0397 { }
0398 
0399 #endif /* !CONFIG_PPC_KUAP */
0400 
0401 static __always_inline void prevent_user_access(unsigned long dir)
0402 {
0403     set_kuap(AMR_KUAP_BLOCKED);
0404     if (static_branch_unlikely(&uaccess_flush_key))
0405         do_uaccess_flush();
0406 }
0407 
0408 static inline unsigned long prevent_user_access_return(void)
0409 {
0410     unsigned long flags = get_kuap();
0411 
0412     set_kuap(AMR_KUAP_BLOCKED);
0413     if (static_branch_unlikely(&uaccess_flush_key))
0414         do_uaccess_flush();
0415 
0416     return flags;
0417 }
0418 
0419 static inline void restore_user_access(unsigned long flags)
0420 {
0421     set_kuap(flags);
0422     if (static_branch_unlikely(&uaccess_flush_key) && flags == AMR_KUAP_BLOCKED)
0423         do_uaccess_flush();
0424 }
0425 #endif /* __ASSEMBLY__ */
0426 
0427 #endif /* _ASM_POWERPC_BOOK3S_64_KUP_H */