Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0+
0002 //
0003 // Security related flags and so on.
0004 //
0005 // Copyright 2018, Michael Ellerman, IBM Corporation.
0006 
0007 #include <linux/cpu.h>
0008 #include <linux/kernel.h>
0009 #include <linux/device.h>
0010 #include <linux/memblock.h>
0011 #include <linux/nospec.h>
0012 #include <linux/prctl.h>
0013 #include <linux/seq_buf.h>
0014 #include <linux/debugfs.h>
0015 
0016 #include <asm/asm-prototypes.h>
0017 #include <asm/code-patching.h>
0018 #include <asm/security_features.h>
0019 #include <asm/setup.h>
0020 #include <asm/inst.h>
0021 
0022 #include "setup.h"
0023 
0024 u64 powerpc_security_features __read_mostly = SEC_FTR_DEFAULT;
0025 
0026 enum branch_cache_flush_type {
0027     BRANCH_CACHE_FLUSH_NONE = 0x1,
0028     BRANCH_CACHE_FLUSH_SW   = 0x2,
0029     BRANCH_CACHE_FLUSH_HW   = 0x4,
0030 };
0031 static enum branch_cache_flush_type count_cache_flush_type = BRANCH_CACHE_FLUSH_NONE;
0032 static enum branch_cache_flush_type link_stack_flush_type = BRANCH_CACHE_FLUSH_NONE;
0033 
0034 bool barrier_nospec_enabled;
0035 static bool no_nospec;
0036 static bool btb_flush_enabled;
0037 #if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_BOOK3S_64)
0038 static bool no_spectrev2;
0039 #endif
0040 
0041 static void enable_barrier_nospec(bool enable)
0042 {
0043     barrier_nospec_enabled = enable;
0044     do_barrier_nospec_fixups(enable);
0045 }
0046 
0047 void __init setup_barrier_nospec(void)
0048 {
0049     bool enable;
0050 
0051     /*
0052      * It would make sense to check SEC_FTR_SPEC_BAR_ORI31 below as well.
0053      * But there's a good reason not to. The two flags we check below are
0054      * both are enabled by default in the kernel, so if the hcall is not
0055      * functional they will be enabled.
0056      * On a system where the host firmware has been updated (so the ori
0057      * functions as a barrier), but on which the hypervisor (KVM/Qemu) has
0058      * not been updated, we would like to enable the barrier. Dropping the
0059      * check for SEC_FTR_SPEC_BAR_ORI31 achieves that. The only downside is
0060      * we potentially enable the barrier on systems where the host firmware
0061      * is not updated, but that's harmless as it's a no-op.
0062      */
0063     enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) &&
0064          security_ftr_enabled(SEC_FTR_BNDS_CHK_SPEC_BAR);
0065 
0066     if (!no_nospec && !cpu_mitigations_off())
0067         enable_barrier_nospec(enable);
0068 }
0069 
0070 static int __init handle_nospectre_v1(char *p)
0071 {
0072     no_nospec = true;
0073 
0074     return 0;
0075 }
0076 early_param("nospectre_v1", handle_nospectre_v1);
0077 
0078 #ifdef CONFIG_DEBUG_FS
0079 static int barrier_nospec_set(void *data, u64 val)
0080 {
0081     switch (val) {
0082     case 0:
0083     case 1:
0084         break;
0085     default:
0086         return -EINVAL;
0087     }
0088 
0089     if (!!val == !!barrier_nospec_enabled)
0090         return 0;
0091 
0092     enable_barrier_nospec(!!val);
0093 
0094     return 0;
0095 }
0096 
0097 static int barrier_nospec_get(void *data, u64 *val)
0098 {
0099     *val = barrier_nospec_enabled ? 1 : 0;
0100     return 0;
0101 }
0102 
0103 DEFINE_DEBUGFS_ATTRIBUTE(fops_barrier_nospec, barrier_nospec_get,
0104              barrier_nospec_set, "%llu\n");
0105 
0106 static __init int barrier_nospec_debugfs_init(void)
0107 {
0108     debugfs_create_file_unsafe("barrier_nospec", 0600,
0109                    arch_debugfs_dir, NULL,
0110                    &fops_barrier_nospec);
0111     return 0;
0112 }
0113 device_initcall(barrier_nospec_debugfs_init);
0114 
0115 static __init int security_feature_debugfs_init(void)
0116 {
0117     debugfs_create_x64("security_features", 0400, arch_debugfs_dir,
0118                &powerpc_security_features);
0119     return 0;
0120 }
0121 device_initcall(security_feature_debugfs_init);
0122 #endif /* CONFIG_DEBUG_FS */
0123 
0124 #if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_BOOK3S_64)
0125 static int __init handle_nospectre_v2(char *p)
0126 {
0127     no_spectrev2 = true;
0128 
0129     return 0;
0130 }
0131 early_param("nospectre_v2", handle_nospectre_v2);
0132 #endif /* CONFIG_PPC_FSL_BOOK3E || CONFIG_PPC_BOOK3S_64 */
0133 
0134 #ifdef CONFIG_PPC_FSL_BOOK3E
0135 void __init setup_spectre_v2(void)
0136 {
0137     if (no_spectrev2 || cpu_mitigations_off())
0138         do_btb_flush_fixups();
0139     else
0140         btb_flush_enabled = true;
0141 }
0142 #endif /* CONFIG_PPC_FSL_BOOK3E */
0143 
0144 #ifdef CONFIG_PPC_BOOK3S_64
0145 ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf)
0146 {
0147     bool thread_priv;
0148 
0149     thread_priv = security_ftr_enabled(SEC_FTR_L1D_THREAD_PRIV);
0150 
0151     if (rfi_flush) {
0152         struct seq_buf s;
0153         seq_buf_init(&s, buf, PAGE_SIZE - 1);
0154 
0155         seq_buf_printf(&s, "Mitigation: RFI Flush");
0156         if (thread_priv)
0157             seq_buf_printf(&s, ", L1D private per thread");
0158 
0159         seq_buf_printf(&s, "\n");
0160 
0161         return s.len;
0162     }
0163 
0164     if (thread_priv)
0165         return sprintf(buf, "Vulnerable: L1D private per thread\n");
0166 
0167     if (!security_ftr_enabled(SEC_FTR_L1D_FLUSH_HV) &&
0168         !security_ftr_enabled(SEC_FTR_L1D_FLUSH_PR))
0169         return sprintf(buf, "Not affected\n");
0170 
0171     return sprintf(buf, "Vulnerable\n");
0172 }
0173 
0174 ssize_t cpu_show_l1tf(struct device *dev, struct device_attribute *attr, char *buf)
0175 {
0176     return cpu_show_meltdown(dev, attr, buf);
0177 }
0178 #endif
0179 
0180 ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, char *buf)
0181 {
0182     struct seq_buf s;
0183 
0184     seq_buf_init(&s, buf, PAGE_SIZE - 1);
0185 
0186     if (security_ftr_enabled(SEC_FTR_BNDS_CHK_SPEC_BAR)) {
0187         if (barrier_nospec_enabled)
0188             seq_buf_printf(&s, "Mitigation: __user pointer sanitization");
0189         else
0190             seq_buf_printf(&s, "Vulnerable");
0191 
0192         if (security_ftr_enabled(SEC_FTR_SPEC_BAR_ORI31))
0193             seq_buf_printf(&s, ", ori31 speculation barrier enabled");
0194 
0195         seq_buf_printf(&s, "\n");
0196     } else
0197         seq_buf_printf(&s, "Not affected\n");
0198 
0199     return s.len;
0200 }
0201 
0202 ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, char *buf)
0203 {
0204     struct seq_buf s;
0205     bool bcs, ccd;
0206 
0207     seq_buf_init(&s, buf, PAGE_SIZE - 1);
0208 
0209     bcs = security_ftr_enabled(SEC_FTR_BCCTRL_SERIALISED);
0210     ccd = security_ftr_enabled(SEC_FTR_COUNT_CACHE_DISABLED);
0211 
0212     if (bcs || ccd) {
0213         seq_buf_printf(&s, "Mitigation: ");
0214 
0215         if (bcs)
0216             seq_buf_printf(&s, "Indirect branch serialisation (kernel only)");
0217 
0218         if (bcs && ccd)
0219             seq_buf_printf(&s, ", ");
0220 
0221         if (ccd)
0222             seq_buf_printf(&s, "Indirect branch cache disabled");
0223 
0224     } else if (count_cache_flush_type != BRANCH_CACHE_FLUSH_NONE) {
0225         seq_buf_printf(&s, "Mitigation: Software count cache flush");
0226 
0227         if (count_cache_flush_type == BRANCH_CACHE_FLUSH_HW)
0228             seq_buf_printf(&s, " (hardware accelerated)");
0229 
0230     } else if (btb_flush_enabled) {
0231         seq_buf_printf(&s, "Mitigation: Branch predictor state flush");
0232     } else {
0233         seq_buf_printf(&s, "Vulnerable");
0234     }
0235 
0236     if (bcs || ccd || count_cache_flush_type != BRANCH_CACHE_FLUSH_NONE) {
0237         if (link_stack_flush_type != BRANCH_CACHE_FLUSH_NONE)
0238             seq_buf_printf(&s, ", Software link stack flush");
0239         if (link_stack_flush_type == BRANCH_CACHE_FLUSH_HW)
0240             seq_buf_printf(&s, " (hardware accelerated)");
0241     }
0242 
0243     seq_buf_printf(&s, "\n");
0244 
0245     return s.len;
0246 }
0247 
0248 #ifdef CONFIG_PPC_BOOK3S_64
0249 /*
0250  * Store-forwarding barrier support.
0251  */
0252 
0253 static enum stf_barrier_type stf_enabled_flush_types;
0254 static bool no_stf_barrier;
0255 static bool stf_barrier;
0256 
0257 static int __init handle_no_stf_barrier(char *p)
0258 {
0259     pr_info("stf-barrier: disabled on command line.");
0260     no_stf_barrier = true;
0261     return 0;
0262 }
0263 
0264 early_param("no_stf_barrier", handle_no_stf_barrier);
0265 
0266 enum stf_barrier_type stf_barrier_type_get(void)
0267 {
0268     return stf_enabled_flush_types;
0269 }
0270 
0271 /* This is the generic flag used by other architectures */
0272 static int __init handle_ssbd(char *p)
0273 {
0274     if (!p || strncmp(p, "auto", 5) == 0 || strncmp(p, "on", 2) == 0 ) {
0275         /* Until firmware tells us, we have the barrier with auto */
0276         return 0;
0277     } else if (strncmp(p, "off", 3) == 0) {
0278         handle_no_stf_barrier(NULL);
0279         return 0;
0280     } else
0281         return 1;
0282 
0283     return 0;
0284 }
0285 early_param("spec_store_bypass_disable", handle_ssbd);
0286 
0287 /* This is the generic flag used by other architectures */
0288 static int __init handle_no_ssbd(char *p)
0289 {
0290     handle_no_stf_barrier(NULL);
0291     return 0;
0292 }
0293 early_param("nospec_store_bypass_disable", handle_no_ssbd);
0294 
0295 static void stf_barrier_enable(bool enable)
0296 {
0297     if (enable)
0298         do_stf_barrier_fixups(stf_enabled_flush_types);
0299     else
0300         do_stf_barrier_fixups(STF_BARRIER_NONE);
0301 
0302     stf_barrier = enable;
0303 }
0304 
0305 void setup_stf_barrier(void)
0306 {
0307     enum stf_barrier_type type;
0308     bool enable;
0309 
0310     /* Default to fallback in case fw-features are not available */
0311     if (cpu_has_feature(CPU_FTR_ARCH_300))
0312         type = STF_BARRIER_EIEIO;
0313     else if (cpu_has_feature(CPU_FTR_ARCH_207S))
0314         type = STF_BARRIER_SYNC_ORI;
0315     else if (cpu_has_feature(CPU_FTR_ARCH_206))
0316         type = STF_BARRIER_FALLBACK;
0317     else
0318         type = STF_BARRIER_NONE;
0319 
0320     enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) &&
0321          security_ftr_enabled(SEC_FTR_STF_BARRIER);
0322 
0323     if (type == STF_BARRIER_FALLBACK) {
0324         pr_info("stf-barrier: fallback barrier available\n");
0325     } else if (type == STF_BARRIER_SYNC_ORI) {
0326         pr_info("stf-barrier: hwsync barrier available\n");
0327     } else if (type == STF_BARRIER_EIEIO) {
0328         pr_info("stf-barrier: eieio barrier available\n");
0329     }
0330 
0331     stf_enabled_flush_types = type;
0332 
0333     if (!no_stf_barrier && !cpu_mitigations_off())
0334         stf_barrier_enable(enable);
0335 }
0336 
0337 ssize_t cpu_show_spec_store_bypass(struct device *dev, struct device_attribute *attr, char *buf)
0338 {
0339     if (stf_barrier && stf_enabled_flush_types != STF_BARRIER_NONE) {
0340         const char *type;
0341         switch (stf_enabled_flush_types) {
0342         case STF_BARRIER_EIEIO:
0343             type = "eieio";
0344             break;
0345         case STF_BARRIER_SYNC_ORI:
0346             type = "hwsync";
0347             break;
0348         case STF_BARRIER_FALLBACK:
0349             type = "fallback";
0350             break;
0351         default:
0352             type = "unknown";
0353         }
0354         return sprintf(buf, "Mitigation: Kernel entry/exit barrier (%s)\n", type);
0355     }
0356 
0357     if (!security_ftr_enabled(SEC_FTR_L1D_FLUSH_HV) &&
0358         !security_ftr_enabled(SEC_FTR_L1D_FLUSH_PR))
0359         return sprintf(buf, "Not affected\n");
0360 
0361     return sprintf(buf, "Vulnerable\n");
0362 }
0363 
0364 static int ssb_prctl_get(struct task_struct *task)
0365 {
0366     if (stf_enabled_flush_types == STF_BARRIER_NONE)
0367         /*
0368          * We don't have an explicit signal from firmware that we're
0369          * vulnerable or not, we only have certain CPU revisions that
0370          * are known to be vulnerable.
0371          *
0372          * We assume that if we're on another CPU, where the barrier is
0373          * NONE, then we are not vulnerable.
0374          */
0375         return PR_SPEC_NOT_AFFECTED;
0376     else
0377         /*
0378          * If we do have a barrier type then we are vulnerable. The
0379          * barrier is not a global or per-process mitigation, so the
0380          * only value we can report here is PR_SPEC_ENABLE, which
0381          * appears as "vulnerable" in /proc.
0382          */
0383         return PR_SPEC_ENABLE;
0384 
0385     return -EINVAL;
0386 }
0387 
0388 int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which)
0389 {
0390     switch (which) {
0391     case PR_SPEC_STORE_BYPASS:
0392         return ssb_prctl_get(task);
0393     default:
0394         return -ENODEV;
0395     }
0396 }
0397 
0398 #ifdef CONFIG_DEBUG_FS
0399 static int stf_barrier_set(void *data, u64 val)
0400 {
0401     bool enable;
0402 
0403     if (val == 1)
0404         enable = true;
0405     else if (val == 0)
0406         enable = false;
0407     else
0408         return -EINVAL;
0409 
0410     /* Only do anything if we're changing state */
0411     if (enable != stf_barrier)
0412         stf_barrier_enable(enable);
0413 
0414     return 0;
0415 }
0416 
0417 static int stf_barrier_get(void *data, u64 *val)
0418 {
0419     *val = stf_barrier ? 1 : 0;
0420     return 0;
0421 }
0422 
0423 DEFINE_DEBUGFS_ATTRIBUTE(fops_stf_barrier, stf_barrier_get, stf_barrier_set,
0424              "%llu\n");
0425 
0426 static __init int stf_barrier_debugfs_init(void)
0427 {
0428     debugfs_create_file_unsafe("stf_barrier", 0600, arch_debugfs_dir,
0429                    NULL, &fops_stf_barrier);
0430     return 0;
0431 }
0432 device_initcall(stf_barrier_debugfs_init);
0433 #endif /* CONFIG_DEBUG_FS */
0434 
0435 static void update_branch_cache_flush(void)
0436 {
0437     u32 *site, __maybe_unused *site2;
0438 
0439 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
0440     site = &patch__call_kvm_flush_link_stack;
0441     site2 = &patch__call_kvm_flush_link_stack_p9;
0442     // This controls the branch from guest_exit_cont to kvm_flush_link_stack
0443     if (link_stack_flush_type == BRANCH_CACHE_FLUSH_NONE) {
0444         patch_instruction_site(site, ppc_inst(PPC_RAW_NOP()));
0445         patch_instruction_site(site2, ppc_inst(PPC_RAW_NOP()));
0446     } else {
0447         // Could use HW flush, but that could also flush count cache
0448         patch_branch_site(site, (u64)&kvm_flush_link_stack, BRANCH_SET_LINK);
0449         patch_branch_site(site2, (u64)&kvm_flush_link_stack, BRANCH_SET_LINK);
0450     }
0451 #endif
0452 
0453     // Patch out the bcctr first, then nop the rest
0454     site = &patch__call_flush_branch_caches3;
0455     patch_instruction_site(site, ppc_inst(PPC_RAW_NOP()));
0456     site = &patch__call_flush_branch_caches2;
0457     patch_instruction_site(site, ppc_inst(PPC_RAW_NOP()));
0458     site = &patch__call_flush_branch_caches1;
0459     patch_instruction_site(site, ppc_inst(PPC_RAW_NOP()));
0460 
0461     // This controls the branch from _switch to flush_branch_caches
0462     if (count_cache_flush_type == BRANCH_CACHE_FLUSH_NONE &&
0463         link_stack_flush_type == BRANCH_CACHE_FLUSH_NONE) {
0464         // Nothing to be done
0465 
0466     } else if (count_cache_flush_type == BRANCH_CACHE_FLUSH_HW &&
0467            link_stack_flush_type == BRANCH_CACHE_FLUSH_HW) {
0468         // Patch in the bcctr last
0469         site = &patch__call_flush_branch_caches1;
0470         patch_instruction_site(site, ppc_inst(0x39207fff)); // li r9,0x7fff
0471         site = &patch__call_flush_branch_caches2;
0472         patch_instruction_site(site, ppc_inst(0x7d2903a6)); // mtctr r9
0473         site = &patch__call_flush_branch_caches3;
0474         patch_instruction_site(site, ppc_inst(PPC_INST_BCCTR_FLUSH));
0475 
0476     } else {
0477         patch_branch_site(site, (u64)&flush_branch_caches, BRANCH_SET_LINK);
0478 
0479         // If we just need to flush the link stack, early return
0480         if (count_cache_flush_type == BRANCH_CACHE_FLUSH_NONE) {
0481             patch_instruction_site(&patch__flush_link_stack_return,
0482                            ppc_inst(PPC_RAW_BLR()));
0483 
0484         // If we have flush instruction, early return
0485         } else if (count_cache_flush_type == BRANCH_CACHE_FLUSH_HW) {
0486             patch_instruction_site(&patch__flush_count_cache_return,
0487                            ppc_inst(PPC_RAW_BLR()));
0488         }
0489     }
0490 }
0491 
0492 static void toggle_branch_cache_flush(bool enable)
0493 {
0494     if (!enable || !security_ftr_enabled(SEC_FTR_FLUSH_COUNT_CACHE)) {
0495         if (count_cache_flush_type != BRANCH_CACHE_FLUSH_NONE)
0496             count_cache_flush_type = BRANCH_CACHE_FLUSH_NONE;
0497 
0498         pr_info("count-cache-flush: flush disabled.\n");
0499     } else {
0500         if (security_ftr_enabled(SEC_FTR_BCCTR_FLUSH_ASSIST)) {
0501             count_cache_flush_type = BRANCH_CACHE_FLUSH_HW;
0502             pr_info("count-cache-flush: hardware flush enabled.\n");
0503         } else {
0504             count_cache_flush_type = BRANCH_CACHE_FLUSH_SW;
0505             pr_info("count-cache-flush: software flush enabled.\n");
0506         }
0507     }
0508 
0509     if (!enable || !security_ftr_enabled(SEC_FTR_FLUSH_LINK_STACK)) {
0510         if (link_stack_flush_type != BRANCH_CACHE_FLUSH_NONE)
0511             link_stack_flush_type = BRANCH_CACHE_FLUSH_NONE;
0512 
0513         pr_info("link-stack-flush: flush disabled.\n");
0514     } else {
0515         if (security_ftr_enabled(SEC_FTR_BCCTR_LINK_FLUSH_ASSIST)) {
0516             link_stack_flush_type = BRANCH_CACHE_FLUSH_HW;
0517             pr_info("link-stack-flush: hardware flush enabled.\n");
0518         } else {
0519             link_stack_flush_type = BRANCH_CACHE_FLUSH_SW;
0520             pr_info("link-stack-flush: software flush enabled.\n");
0521         }
0522     }
0523 
0524     update_branch_cache_flush();
0525 }
0526 
0527 void setup_count_cache_flush(void)
0528 {
0529     bool enable = true;
0530 
0531     if (no_spectrev2 || cpu_mitigations_off()) {
0532         if (security_ftr_enabled(SEC_FTR_BCCTRL_SERIALISED) ||
0533             security_ftr_enabled(SEC_FTR_COUNT_CACHE_DISABLED))
0534             pr_warn("Spectre v2 mitigations not fully under software control, can't disable\n");
0535 
0536         enable = false;
0537     }
0538 
0539     /*
0540      * There's no firmware feature flag/hypervisor bit to tell us we need to
0541      * flush the link stack on context switch. So we set it here if we see
0542      * either of the Spectre v2 mitigations that aim to protect userspace.
0543      */
0544     if (security_ftr_enabled(SEC_FTR_COUNT_CACHE_DISABLED) ||
0545         security_ftr_enabled(SEC_FTR_FLUSH_COUNT_CACHE))
0546         security_ftr_set(SEC_FTR_FLUSH_LINK_STACK);
0547 
0548     toggle_branch_cache_flush(enable);
0549 }
0550 
0551 static enum l1d_flush_type enabled_flush_types;
0552 static void *l1d_flush_fallback_area;
0553 static bool no_rfi_flush;
0554 static bool no_entry_flush;
0555 static bool no_uaccess_flush;
0556 bool rfi_flush;
0557 static bool entry_flush;
0558 static bool uaccess_flush;
0559 DEFINE_STATIC_KEY_FALSE(uaccess_flush_key);
0560 EXPORT_SYMBOL(uaccess_flush_key);
0561 
0562 static int __init handle_no_rfi_flush(char *p)
0563 {
0564     pr_info("rfi-flush: disabled on command line.");
0565     no_rfi_flush = true;
0566     return 0;
0567 }
0568 early_param("no_rfi_flush", handle_no_rfi_flush);
0569 
0570 static int __init handle_no_entry_flush(char *p)
0571 {
0572     pr_info("entry-flush: disabled on command line.");
0573     no_entry_flush = true;
0574     return 0;
0575 }
0576 early_param("no_entry_flush", handle_no_entry_flush);
0577 
0578 static int __init handle_no_uaccess_flush(char *p)
0579 {
0580     pr_info("uaccess-flush: disabled on command line.");
0581     no_uaccess_flush = true;
0582     return 0;
0583 }
0584 early_param("no_uaccess_flush", handle_no_uaccess_flush);
0585 
0586 /*
0587  * The RFI flush is not KPTI, but because users will see doco that says to use
0588  * nopti we hijack that option here to also disable the RFI flush.
0589  */
0590 static int __init handle_no_pti(char *p)
0591 {
0592     pr_info("rfi-flush: disabling due to 'nopti' on command line.\n");
0593     handle_no_rfi_flush(NULL);
0594     return 0;
0595 }
0596 early_param("nopti", handle_no_pti);
0597 
0598 static void do_nothing(void *unused)
0599 {
0600     /*
0601      * We don't need to do the flush explicitly, just enter+exit kernel is
0602      * sufficient, the RFI exit handlers will do the right thing.
0603      */
0604 }
0605 
0606 void rfi_flush_enable(bool enable)
0607 {
0608     if (enable) {
0609         do_rfi_flush_fixups(enabled_flush_types);
0610         on_each_cpu(do_nothing, NULL, 1);
0611     } else
0612         do_rfi_flush_fixups(L1D_FLUSH_NONE);
0613 
0614     rfi_flush = enable;
0615 }
0616 
0617 static void entry_flush_enable(bool enable)
0618 {
0619     if (enable) {
0620         do_entry_flush_fixups(enabled_flush_types);
0621         on_each_cpu(do_nothing, NULL, 1);
0622     } else {
0623         do_entry_flush_fixups(L1D_FLUSH_NONE);
0624     }
0625 
0626     entry_flush = enable;
0627 }
0628 
0629 static void uaccess_flush_enable(bool enable)
0630 {
0631     if (enable) {
0632         do_uaccess_flush_fixups(enabled_flush_types);
0633         static_branch_enable(&uaccess_flush_key);
0634         on_each_cpu(do_nothing, NULL, 1);
0635     } else {
0636         static_branch_disable(&uaccess_flush_key);
0637         do_uaccess_flush_fixups(L1D_FLUSH_NONE);
0638     }
0639 
0640     uaccess_flush = enable;
0641 }
0642 
0643 static void __ref init_fallback_flush(void)
0644 {
0645     u64 l1d_size, limit;
0646     int cpu;
0647 
0648     /* Only allocate the fallback flush area once (at boot time). */
0649     if (l1d_flush_fallback_area)
0650         return;
0651 
0652     l1d_size = ppc64_caches.l1d.size;
0653 
0654     /*
0655      * If there is no d-cache-size property in the device tree, l1d_size
0656      * could be zero. That leads to the loop in the asm wrapping around to
0657      * 2^64-1, and then walking off the end of the fallback area and
0658      * eventually causing a page fault which is fatal. Just default to
0659      * something vaguely sane.
0660      */
0661     if (!l1d_size)
0662         l1d_size = (64 * 1024);
0663 
0664     limit = min(ppc64_bolted_size(), ppc64_rma_size);
0665 
0666     /*
0667      * Align to L1d size, and size it at 2x L1d size, to catch possible
0668      * hardware prefetch runoff. We don't have a recipe for load patterns to
0669      * reliably avoid the prefetcher.
0670      */
0671     l1d_flush_fallback_area = memblock_alloc_try_nid(l1d_size * 2,
0672                         l1d_size, MEMBLOCK_LOW_LIMIT,
0673                         limit, NUMA_NO_NODE);
0674     if (!l1d_flush_fallback_area)
0675         panic("%s: Failed to allocate %llu bytes align=0x%llx max_addr=%pa\n",
0676               __func__, l1d_size * 2, l1d_size, &limit);
0677 
0678 
0679     for_each_possible_cpu(cpu) {
0680         struct paca_struct *paca = paca_ptrs[cpu];
0681         paca->rfi_flush_fallback_area = l1d_flush_fallback_area;
0682         paca->l1d_flush_size = l1d_size;
0683     }
0684 }
0685 
0686 void setup_rfi_flush(enum l1d_flush_type types, bool enable)
0687 {
0688     if (types & L1D_FLUSH_FALLBACK) {
0689         pr_info("rfi-flush: fallback displacement flush available\n");
0690         init_fallback_flush();
0691     }
0692 
0693     if (types & L1D_FLUSH_ORI)
0694         pr_info("rfi-flush: ori type flush available\n");
0695 
0696     if (types & L1D_FLUSH_MTTRIG)
0697         pr_info("rfi-flush: mttrig type flush available\n");
0698 
0699     enabled_flush_types = types;
0700 
0701     if (!cpu_mitigations_off() && !no_rfi_flush)
0702         rfi_flush_enable(enable);
0703 }
0704 
0705 void setup_entry_flush(bool enable)
0706 {
0707     if (cpu_mitigations_off())
0708         return;
0709 
0710     if (!no_entry_flush)
0711         entry_flush_enable(enable);
0712 }
0713 
0714 void setup_uaccess_flush(bool enable)
0715 {
0716     if (cpu_mitigations_off())
0717         return;
0718 
0719     if (!no_uaccess_flush)
0720         uaccess_flush_enable(enable);
0721 }
0722 
0723 #ifdef CONFIG_DEBUG_FS
0724 static int count_cache_flush_set(void *data, u64 val)
0725 {
0726     bool enable;
0727 
0728     if (val == 1)
0729         enable = true;
0730     else if (val == 0)
0731         enable = false;
0732     else
0733         return -EINVAL;
0734 
0735     toggle_branch_cache_flush(enable);
0736 
0737     return 0;
0738 }
0739 
0740 static int count_cache_flush_get(void *data, u64 *val)
0741 {
0742     if (count_cache_flush_type == BRANCH_CACHE_FLUSH_NONE)
0743         *val = 0;
0744     else
0745         *val = 1;
0746 
0747     return 0;
0748 }
0749 
0750 static int link_stack_flush_get(void *data, u64 *val)
0751 {
0752     if (link_stack_flush_type == BRANCH_CACHE_FLUSH_NONE)
0753         *val = 0;
0754     else
0755         *val = 1;
0756 
0757     return 0;
0758 }
0759 
0760 DEFINE_DEBUGFS_ATTRIBUTE(fops_count_cache_flush, count_cache_flush_get,
0761              count_cache_flush_set, "%llu\n");
0762 DEFINE_DEBUGFS_ATTRIBUTE(fops_link_stack_flush, link_stack_flush_get,
0763              count_cache_flush_set, "%llu\n");
0764 
0765 static __init int count_cache_flush_debugfs_init(void)
0766 {
0767     debugfs_create_file_unsafe("count_cache_flush", 0600,
0768                    arch_debugfs_dir, NULL,
0769                    &fops_count_cache_flush);
0770     debugfs_create_file_unsafe("link_stack_flush", 0600,
0771                    arch_debugfs_dir, NULL,
0772                    &fops_link_stack_flush);
0773     return 0;
0774 }
0775 device_initcall(count_cache_flush_debugfs_init);
0776 
0777 static int rfi_flush_set(void *data, u64 val)
0778 {
0779     bool enable;
0780 
0781     if (val == 1)
0782         enable = true;
0783     else if (val == 0)
0784         enable = false;
0785     else
0786         return -EINVAL;
0787 
0788     /* Only do anything if we're changing state */
0789     if (enable != rfi_flush)
0790         rfi_flush_enable(enable);
0791 
0792     return 0;
0793 }
0794 
0795 static int rfi_flush_get(void *data, u64 *val)
0796 {
0797     *val = rfi_flush ? 1 : 0;
0798     return 0;
0799 }
0800 
0801 DEFINE_SIMPLE_ATTRIBUTE(fops_rfi_flush, rfi_flush_get, rfi_flush_set, "%llu\n");
0802 
0803 static int entry_flush_set(void *data, u64 val)
0804 {
0805     bool enable;
0806 
0807     if (val == 1)
0808         enable = true;
0809     else if (val == 0)
0810         enable = false;
0811     else
0812         return -EINVAL;
0813 
0814     /* Only do anything if we're changing state */
0815     if (enable != entry_flush)
0816         entry_flush_enable(enable);
0817 
0818     return 0;
0819 }
0820 
0821 static int entry_flush_get(void *data, u64 *val)
0822 {
0823     *val = entry_flush ? 1 : 0;
0824     return 0;
0825 }
0826 
0827 DEFINE_SIMPLE_ATTRIBUTE(fops_entry_flush, entry_flush_get, entry_flush_set, "%llu\n");
0828 
0829 static int uaccess_flush_set(void *data, u64 val)
0830 {
0831     bool enable;
0832 
0833     if (val == 1)
0834         enable = true;
0835     else if (val == 0)
0836         enable = false;
0837     else
0838         return -EINVAL;
0839 
0840     /* Only do anything if we're changing state */
0841     if (enable != uaccess_flush)
0842         uaccess_flush_enable(enable);
0843 
0844     return 0;
0845 }
0846 
0847 static int uaccess_flush_get(void *data, u64 *val)
0848 {
0849     *val = uaccess_flush ? 1 : 0;
0850     return 0;
0851 }
0852 
0853 DEFINE_SIMPLE_ATTRIBUTE(fops_uaccess_flush, uaccess_flush_get, uaccess_flush_set, "%llu\n");
0854 
0855 static __init int rfi_flush_debugfs_init(void)
0856 {
0857     debugfs_create_file("rfi_flush", 0600, arch_debugfs_dir, NULL, &fops_rfi_flush);
0858     debugfs_create_file("entry_flush", 0600, arch_debugfs_dir, NULL, &fops_entry_flush);
0859     debugfs_create_file("uaccess_flush", 0600, arch_debugfs_dir, NULL, &fops_uaccess_flush);
0860     return 0;
0861 }
0862 device_initcall(rfi_flush_debugfs_init);
0863 #endif /* CONFIG_DEBUG_FS */
0864 #endif /* CONFIG_PPC_BOOK3S_64 */