Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  * Intel Turbo Boost Max Technology 3.0 legacy (non HWP) enumeration driver
0004  * Copyright (c) 2017, Intel Corporation.
0005  * All rights reserved.
0006  *
0007  * Author: Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>
0008  */
0009 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
0010 
0011 #include <linux/cpufeature.h>
0012 #include <linux/cpuhotplug.h>
0013 #include <linux/init.h>
0014 #include <linux/kernel.h>
0015 #include <linux/topology.h>
0016 #include <linux/workqueue.h>
0017 
0018 #include <asm/cpu_device_id.h>
0019 #include <asm/intel-family.h>
0020 
0021 #define MSR_OC_MAILBOX          0x150
0022 #define MSR_OC_MAILBOX_CMD_OFFSET   32
0023 #define MSR_OC_MAILBOX_RSP_OFFSET   32
0024 #define MSR_OC_MAILBOX_BUSY_BIT     63
0025 #define OC_MAILBOX_FC_CONTROL_CMD   0x1C
0026 
0027 /*
0028  * Typical latency to get mail box response is ~3us, It takes +3 us to
0029  * process reading mailbox after issuing mailbox write on a Broadwell 3.4 GHz
0030  * system. So for most of the time, the first mailbox read should have the
0031  * response, but to avoid some boundary cases retry twice.
0032  */
0033 #define OC_MAILBOX_RETRY_COUNT      2
0034 
0035 static int get_oc_core_priority(unsigned int cpu)
0036 {
0037     u64 value, cmd = OC_MAILBOX_FC_CONTROL_CMD;
0038     int ret, i;
0039 
0040     /* Issue favored core read command */
0041     value = cmd << MSR_OC_MAILBOX_CMD_OFFSET;
0042     /* Set the busy bit to indicate OS is trying to issue command */
0043     value |=  BIT_ULL(MSR_OC_MAILBOX_BUSY_BIT);
0044     ret = wrmsrl_safe(MSR_OC_MAILBOX, value);
0045     if (ret) {
0046         pr_debug("cpu %d OC mailbox write failed\n", cpu);
0047         return ret;
0048     }
0049 
0050     for (i = 0; i < OC_MAILBOX_RETRY_COUNT; ++i) {
0051         ret = rdmsrl_safe(MSR_OC_MAILBOX, &value);
0052         if (ret) {
0053             pr_debug("cpu %d OC mailbox read failed\n", cpu);
0054             break;
0055         }
0056 
0057         if (value & BIT_ULL(MSR_OC_MAILBOX_BUSY_BIT)) {
0058             pr_debug("cpu %d OC mailbox still processing\n", cpu);
0059             ret = -EBUSY;
0060             continue;
0061         }
0062 
0063         if ((value >> MSR_OC_MAILBOX_RSP_OFFSET) & 0xff) {
0064             pr_debug("cpu %d OC mailbox cmd failed\n", cpu);
0065             ret = -ENXIO;
0066             break;
0067         }
0068 
0069         ret = value & 0xff;
0070         pr_debug("cpu %d max_ratio %d\n", cpu, ret);
0071         break;
0072     }
0073 
0074     return ret;
0075 }
0076 
0077 /*
0078  * The work item is needed to avoid CPU hotplug locking issues. The function
0079  * itmt_legacy_set_priority() is called from CPU online callback, so can't
0080  * call sched_set_itmt_support() from there as this function will aquire
0081  * hotplug locks in its path.
0082  */
0083 static void itmt_legacy_work_fn(struct work_struct *work)
0084 {
0085     sched_set_itmt_support();
0086 }
0087 
0088 static DECLARE_WORK(sched_itmt_work, itmt_legacy_work_fn);
0089 
0090 static int itmt_legacy_cpu_online(unsigned int cpu)
0091 {
0092     static u32 max_highest_perf = 0, min_highest_perf = U32_MAX;
0093     int priority;
0094 
0095     priority = get_oc_core_priority(cpu);
0096     if (priority < 0)
0097         return 0;
0098 
0099     sched_set_itmt_core_prio(priority, cpu);
0100 
0101     /* Enable ITMT feature when a core with different priority is found */
0102     if (max_highest_perf <= min_highest_perf) {
0103         if (priority > max_highest_perf)
0104             max_highest_perf = priority;
0105 
0106         if (priority < min_highest_perf)
0107             min_highest_perf = priority;
0108 
0109         if (max_highest_perf > min_highest_perf)
0110             schedule_work(&sched_itmt_work);
0111     }
0112 
0113     return 0;
0114 }
0115 
0116 static const struct x86_cpu_id itmt_legacy_cpu_ids[] = {
0117     X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_X, NULL),
0118     X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_X,   NULL),
0119     {}
0120 };
0121 
0122 static int __init itmt_legacy_init(void)
0123 {
0124     const struct x86_cpu_id *id;
0125     int ret;
0126 
0127     id = x86_match_cpu(itmt_legacy_cpu_ids);
0128     if (!id)
0129         return -ENODEV;
0130 
0131     ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
0132                 "platform/x86/turbo_max_3:online",
0133                 itmt_legacy_cpu_online, NULL);
0134     if (ret < 0)
0135         return ret;
0136 
0137     return 0;
0138 }
0139 late_initcall(itmt_legacy_init)