Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0+
0002 
0003 /*
0004  * Copyright 2018-2019 IBM Corporation.
0005  */
0006 
0007 #define __SANE_USERSPACE_TYPES__
0008 
0009 #include <sys/types.h>
0010 #include <stdint.h>
0011 #include <malloc.h>
0012 #include <unistd.h>
0013 #include <stdlib.h>
0014 #include <string.h>
0015 #include <stdio.h>
0016 #include <sys/prctl.h>
0017 #include "utils.h"
0018 
0019 #include "../pmu/event.h"
0020 
0021 
0022 extern void pattern_cache_loop(void);
0023 extern void indirect_branch_loop(void);
0024 
0025 static int do_count_loop(struct event *events, bool is_p9, s64 *miss_percent)
0026 {
0027     u64 pred, mpred;
0028 
0029     prctl(PR_TASK_PERF_EVENTS_ENABLE);
0030 
0031     if (is_p9)
0032         pattern_cache_loop();
0033     else
0034         indirect_branch_loop();
0035 
0036     prctl(PR_TASK_PERF_EVENTS_DISABLE);
0037 
0038     event_read(&events[0]);
0039     event_read(&events[1]);
0040 
0041     // We could scale all the events by running/enabled but we're lazy
0042     // As long as the PMU is uncontended they should all run
0043     FAIL_IF(events[0].result.running != events[0].result.enabled);
0044     FAIL_IF(events[1].result.running != events[1].result.enabled);
0045 
0046     pred =  events[0].result.value;
0047     mpred = events[1].result.value;
0048 
0049     if (is_p9) {
0050         event_read(&events[2]);
0051         event_read(&events[3]);
0052         FAIL_IF(events[2].result.running != events[2].result.enabled);
0053         FAIL_IF(events[3].result.running != events[3].result.enabled);
0054 
0055         pred  += events[2].result.value;
0056         mpred += events[3].result.value;
0057     }
0058 
0059     *miss_percent = 100 * mpred / pred;
0060 
0061     return 0;
0062 }
0063 
0064 static void setup_event(struct event *e, u64 config, char *name)
0065 {
0066     event_init_named(e, config, name);
0067 
0068     e->attr.disabled = 1;
0069     e->attr.exclude_kernel = 1;
0070     e->attr.exclude_hv = 1;
0071     e->attr.exclude_idle = 1;
0072 }
0073 
0074 enum spectre_v2_state {
0075     VULNERABLE = 0,
0076     UNKNOWN = 1,        // Works with FAIL_IF()
0077     NOT_AFFECTED,
0078     BRANCH_SERIALISATION,
0079     COUNT_CACHE_DISABLED,
0080     COUNT_CACHE_FLUSH_SW,
0081     COUNT_CACHE_FLUSH_HW,
0082     BTB_FLUSH,
0083 };
0084 
0085 static enum spectre_v2_state get_sysfs_state(void)
0086 {
0087     enum spectre_v2_state state = UNKNOWN;
0088     char buf[256];
0089     int len;
0090 
0091     memset(buf, 0, sizeof(buf));
0092     FAIL_IF(read_sysfs_file("devices/system/cpu/vulnerabilities/spectre_v2", buf, sizeof(buf)));
0093 
0094     // Make sure it's NULL terminated
0095     buf[sizeof(buf) - 1] = '\0';
0096 
0097     // Trim the trailing newline
0098     len = strlen(buf);
0099     FAIL_IF(len < 1);
0100     buf[len - 1] = '\0';
0101 
0102     printf("sysfs reports: '%s'\n", buf);
0103 
0104     // Order matters
0105     if (strstr(buf, "Vulnerable"))
0106         state = VULNERABLE;
0107     else if (strstr(buf, "Not affected"))
0108         state = NOT_AFFECTED;
0109     else if (strstr(buf, "Indirect branch serialisation (kernel only)"))
0110         state = BRANCH_SERIALISATION;
0111     else if (strstr(buf, "Indirect branch cache disabled"))
0112         state = COUNT_CACHE_DISABLED;
0113     else if (strstr(buf, "Software count cache flush (hardware accelerated)"))
0114         state = COUNT_CACHE_FLUSH_HW;
0115     else if (strstr(buf, "Software count cache flush"))
0116         state = COUNT_CACHE_FLUSH_SW;
0117     else if (strstr(buf, "Branch predictor state flush"))
0118         state = BTB_FLUSH;
0119 
0120     return state;
0121 }
0122 
0123 #define PM_BR_PRED_CCACHE   0x040a4 // P8 + P9
0124 #define PM_BR_MPRED_CCACHE  0x040ac // P8 + P9
0125 #define PM_BR_PRED_PCACHE   0x048a0 // P9 only
0126 #define PM_BR_MPRED_PCACHE  0x048b0 // P9 only
0127 
0128 int spectre_v2_test(void)
0129 {
0130     enum spectre_v2_state state;
0131     struct event events[4];
0132     s64 miss_percent;
0133     bool is_p9;
0134 
0135     // The PMU events we use only work on Power8 or later
0136     SKIP_IF(!have_hwcap2(PPC_FEATURE2_ARCH_2_07));
0137 
0138     state = get_sysfs_state();
0139     if (state == UNKNOWN) {
0140         printf("Error: couldn't determine spectre_v2 mitigation state?\n");
0141         return -1;
0142     }
0143 
0144     memset(events, 0, sizeof(events));
0145 
0146     setup_event(&events[0], PM_BR_PRED_CCACHE,  "PM_BR_PRED_CCACHE");
0147     setup_event(&events[1], PM_BR_MPRED_CCACHE, "PM_BR_MPRED_CCACHE");
0148     FAIL_IF(event_open(&events[0]));
0149     FAIL_IF(event_open_with_group(&events[1], events[0].fd) == -1);
0150 
0151     is_p9 = ((mfspr(SPRN_PVR) >>  16) & 0xFFFF) == 0x4e;
0152 
0153     if (is_p9) {
0154         // Count pattern cache too
0155         setup_event(&events[2], PM_BR_PRED_PCACHE,  "PM_BR_PRED_PCACHE");
0156         setup_event(&events[3], PM_BR_MPRED_PCACHE, "PM_BR_MPRED_PCACHE");
0157 
0158         FAIL_IF(event_open_with_group(&events[2], events[0].fd) == -1);
0159         FAIL_IF(event_open_with_group(&events[3], events[0].fd) == -1);
0160     }
0161 
0162     FAIL_IF(do_count_loop(events, is_p9, &miss_percent));
0163 
0164     event_report_justified(&events[0], 18, 10);
0165     event_report_justified(&events[1], 18, 10);
0166     event_close(&events[0]);
0167     event_close(&events[1]);
0168 
0169     if (is_p9) {
0170         event_report_justified(&events[2], 18, 10);
0171         event_report_justified(&events[3], 18, 10);
0172         event_close(&events[2]);
0173         event_close(&events[3]);
0174     }
0175 
0176     printf("Miss percent %lld %%\n", miss_percent);
0177 
0178     switch (state) {
0179     case VULNERABLE:
0180     case NOT_AFFECTED:
0181     case COUNT_CACHE_FLUSH_SW:
0182     case COUNT_CACHE_FLUSH_HW:
0183         // These should all not affect userspace branch prediction
0184         if (miss_percent > 15) {
0185             if (miss_percent > 95) {
0186                 /*
0187                  * Such a mismatch may be caused by a system being unaware
0188                  * the count cache is disabled. This may be to enable
0189                  * guest migration between hosts with different settings.
0190                  * Return skip code to avoid detecting this as an error.
0191                  * We are not vulnerable and reporting otherwise, so
0192                  * missing such a mismatch is safe.
0193                  */
0194                 printf("Branch misses > 95%% unexpected in this configuration.\n");
0195                 printf("Count cache likely disabled without Linux knowing.\n");
0196                 if (state == COUNT_CACHE_FLUSH_SW)
0197                     printf("WARNING: Kernel performing unnecessary flushes.\n");
0198                 return 4;
0199             }
0200             printf("Branch misses > 15%% unexpected in this configuration!\n");
0201             printf("Possible mismatch between reported & actual mitigation\n");
0202 
0203             return 1;
0204         }
0205         break;
0206     case BRANCH_SERIALISATION:
0207         // This seems to affect userspace branch prediction a bit?
0208         if (miss_percent > 25) {
0209             printf("Branch misses > 25%% unexpected in this configuration!\n");
0210             printf("Possible mismatch between reported & actual mitigation\n");
0211             return 1;
0212         }
0213         break;
0214     case COUNT_CACHE_DISABLED:
0215         if (miss_percent < 95) {
0216             printf("Branch misses < 95%% unexpected in this configuration!\n");
0217             printf("Possible mismatch between reported & actual mitigation\n");
0218             return 1;
0219         }
0220         break;
0221     case UNKNOWN:
0222     case BTB_FLUSH:
0223         printf("Not sure!\n");
0224         return 1;
0225     }
0226 
0227     printf("OK - Measured branch prediction rates match reported spectre v2 mitigation.\n");
0228 
0229     return 0;
0230 }
0231 
0232 int main(int argc, char *argv[])
0233 {
0234     return test_harness(spectre_v2_test, "spectre_v2");
0235 }