Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-or-later
0002 /*
0003  * CPU Microcode Update Driver for Linux
0004  *
0005  * Copyright (C) 2000-2006 Tigran Aivazian <aivazian.tigran@gmail.com>
0006  *        2006  Shaohua Li <shaohua.li@intel.com>
0007  *        2013-2016 Borislav Petkov <bp@alien8.de>
0008  *
0009  * X86 CPU microcode early update for Linux:
0010  *
0011  *  Copyright (C) 2012 Fenghua Yu <fenghua.yu@intel.com>
0012  *             H Peter Anvin" <hpa@zytor.com>
0013  *        (C) 2015 Borislav Petkov <bp@alien8.de>
0014  *
0015  * This driver allows to upgrade microcode on x86 processors.
0016  */
0017 
0018 #define pr_fmt(fmt) "microcode: " fmt
0019 
0020 #include <linux/platform_device.h>
0021 #include <linux/stop_machine.h>
0022 #include <linux/syscore_ops.h>
0023 #include <linux/miscdevice.h>
0024 #include <linux/capability.h>
0025 #include <linux/firmware.h>
0026 #include <linux/kernel.h>
0027 #include <linux/delay.h>
0028 #include <linux/mutex.h>
0029 #include <linux/cpu.h>
0030 #include <linux/nmi.h>
0031 #include <linux/fs.h>
0032 #include <linux/mm.h>
0033 
0034 #include <asm/microcode_intel.h>
0035 #include <asm/cpu_device_id.h>
0036 #include <asm/microcode_amd.h>
0037 #include <asm/perf_event.h>
0038 #include <asm/microcode.h>
0039 #include <asm/processor.h>
0040 #include <asm/cmdline.h>
0041 #include <asm/setup.h>
0042 
0043 #define DRIVER_VERSION  "2.2"
0044 
0045 static struct microcode_ops *microcode_ops;
0046 static bool dis_ucode_ldr = true;
0047 
0048 bool initrd_gone;
0049 
0050 LIST_HEAD(microcode_cache);
0051 
0052 /*
0053  * Synchronization.
0054  *
0055  * All non cpu-hotplug-callback call sites use:
0056  *
0057  * - microcode_mutex to synchronize with each other;
0058  * - cpus_read_lock/unlock() to synchronize with
0059  *   the cpu-hotplug-callback call sites.
0060  *
0061  * We guarantee that only a single cpu is being
0062  * updated at any particular moment of time.
0063  */
0064 static DEFINE_MUTEX(microcode_mutex);
0065 
0066 struct ucode_cpu_info       ucode_cpu_info[NR_CPUS];
0067 
0068 struct cpu_info_ctx {
0069     struct cpu_signature    *cpu_sig;
0070     int         err;
0071 };
0072 
0073 /*
0074  * Those patch levels cannot be updated to newer ones and thus should be final.
0075  */
0076 static u32 final_levels[] = {
0077     0x01000098,
0078     0x0100009f,
0079     0x010000af,
0080     0, /* T-101 terminator */
0081 };
0082 
0083 /*
0084  * Check the current patch level on this CPU.
0085  *
0086  * Returns:
0087  *  - true: if update should stop
0088  *  - false: otherwise
0089  */
0090 static bool amd_check_current_patch_level(void)
0091 {
0092     u32 lvl, dummy, i;
0093     u32 *levels;
0094 
0095     native_rdmsr(MSR_AMD64_PATCH_LEVEL, lvl, dummy);
0096 
0097     if (IS_ENABLED(CONFIG_X86_32))
0098         levels = (u32 *)__pa_nodebug(&final_levels);
0099     else
0100         levels = final_levels;
0101 
0102     for (i = 0; levels[i]; i++) {
0103         if (lvl == levels[i])
0104             return true;
0105     }
0106     return false;
0107 }
0108 
0109 static bool __init check_loader_disabled_bsp(void)
0110 {
0111     static const char *__dis_opt_str = "dis_ucode_ldr";
0112 
0113 #ifdef CONFIG_X86_32
0114     const char *cmdline = (const char *)__pa_nodebug(boot_command_line);
0115     const char *option  = (const char *)__pa_nodebug(__dis_opt_str);
0116     bool *res = (bool *)__pa_nodebug(&dis_ucode_ldr);
0117 
0118 #else /* CONFIG_X86_64 */
0119     const char *cmdline = boot_command_line;
0120     const char *option  = __dis_opt_str;
0121     bool *res = &dis_ucode_ldr;
0122 #endif
0123 
0124     /*
0125      * CPUID(1).ECX[31]: reserved for hypervisor use. This is still not
0126      * completely accurate as xen pv guests don't see that CPUID bit set but
0127      * that's good enough as they don't land on the BSP path anyway.
0128      */
0129     if (native_cpuid_ecx(1) & BIT(31))
0130         return *res;
0131 
0132     if (x86_cpuid_vendor() == X86_VENDOR_AMD) {
0133         if (amd_check_current_patch_level())
0134             return *res;
0135     }
0136 
0137     if (cmdline_find_option_bool(cmdline, option) <= 0)
0138         *res = false;
0139 
0140     return *res;
0141 }
0142 
0143 void __init load_ucode_bsp(void)
0144 {
0145     unsigned int cpuid_1_eax;
0146     bool intel = true;
0147 
0148     if (!have_cpuid_p())
0149         return;
0150 
0151     cpuid_1_eax = native_cpuid_eax(1);
0152 
0153     switch (x86_cpuid_vendor()) {
0154     case X86_VENDOR_INTEL:
0155         if (x86_family(cpuid_1_eax) < 6)
0156             return;
0157         break;
0158 
0159     case X86_VENDOR_AMD:
0160         if (x86_family(cpuid_1_eax) < 0x10)
0161             return;
0162         intel = false;
0163         break;
0164 
0165     default:
0166         return;
0167     }
0168 
0169     if (check_loader_disabled_bsp())
0170         return;
0171 
0172     if (intel)
0173         load_ucode_intel_bsp();
0174     else
0175         load_ucode_amd_bsp(cpuid_1_eax);
0176 }
0177 
0178 static bool check_loader_disabled_ap(void)
0179 {
0180 #ifdef CONFIG_X86_32
0181     return *((bool *)__pa_nodebug(&dis_ucode_ldr));
0182 #else
0183     return dis_ucode_ldr;
0184 #endif
0185 }
0186 
0187 void load_ucode_ap(void)
0188 {
0189     unsigned int cpuid_1_eax;
0190 
0191     if (check_loader_disabled_ap())
0192         return;
0193 
0194     cpuid_1_eax = native_cpuid_eax(1);
0195 
0196     switch (x86_cpuid_vendor()) {
0197     case X86_VENDOR_INTEL:
0198         if (x86_family(cpuid_1_eax) >= 6)
0199             load_ucode_intel_ap();
0200         break;
0201     case X86_VENDOR_AMD:
0202         if (x86_family(cpuid_1_eax) >= 0x10)
0203             load_ucode_amd_ap(cpuid_1_eax);
0204         break;
0205     default:
0206         break;
0207     }
0208 }
0209 
0210 static int __init save_microcode_in_initrd(void)
0211 {
0212     struct cpuinfo_x86 *c = &boot_cpu_data;
0213     int ret = -EINVAL;
0214 
0215     switch (c->x86_vendor) {
0216     case X86_VENDOR_INTEL:
0217         if (c->x86 >= 6)
0218             ret = save_microcode_in_initrd_intel();
0219         break;
0220     case X86_VENDOR_AMD:
0221         if (c->x86 >= 0x10)
0222             ret = save_microcode_in_initrd_amd(cpuid_eax(1));
0223         break;
0224     default:
0225         break;
0226     }
0227 
0228     initrd_gone = true;
0229 
0230     return ret;
0231 }
0232 
0233 struct cpio_data find_microcode_in_initrd(const char *path, bool use_pa)
0234 {
0235 #ifdef CONFIG_BLK_DEV_INITRD
0236     unsigned long start = 0;
0237     size_t size;
0238 
0239 #ifdef CONFIG_X86_32
0240     struct boot_params *params;
0241 
0242     if (use_pa)
0243         params = (struct boot_params *)__pa_nodebug(&boot_params);
0244     else
0245         params = &boot_params;
0246 
0247     size = params->hdr.ramdisk_size;
0248 
0249     /*
0250      * Set start only if we have an initrd image. We cannot use initrd_start
0251      * because it is not set that early yet.
0252      */
0253     if (size)
0254         start = params->hdr.ramdisk_image;
0255 
0256 # else /* CONFIG_X86_64 */
0257     size  = (unsigned long)boot_params.ext_ramdisk_size << 32;
0258     size |= boot_params.hdr.ramdisk_size;
0259 
0260     if (size) {
0261         start  = (unsigned long)boot_params.ext_ramdisk_image << 32;
0262         start |= boot_params.hdr.ramdisk_image;
0263 
0264         start += PAGE_OFFSET;
0265     }
0266 # endif
0267 
0268     /*
0269      * Fixup the start address: after reserve_initrd() runs, initrd_start
0270      * has the virtual address of the beginning of the initrd. It also
0271      * possibly relocates the ramdisk. In either case, initrd_start contains
0272      * the updated address so use that instead.
0273      *
0274      * initrd_gone is for the hotplug case where we've thrown out initrd
0275      * already.
0276      */
0277     if (!use_pa) {
0278         if (initrd_gone)
0279             return (struct cpio_data){ NULL, 0, "" };
0280         if (initrd_start)
0281             start = initrd_start;
0282     } else {
0283         /*
0284          * The picture with physical addresses is a bit different: we
0285          * need to get the *physical* address to which the ramdisk was
0286          * relocated, i.e., relocated_ramdisk (not initrd_start) and
0287          * since we're running from physical addresses, we need to access
0288          * relocated_ramdisk through its *physical* address too.
0289          */
0290         u64 *rr = (u64 *)__pa_nodebug(&relocated_ramdisk);
0291         if (*rr)
0292             start = *rr;
0293     }
0294 
0295     return find_cpio_data(path, (void *)start, size, NULL);
0296 #else /* !CONFIG_BLK_DEV_INITRD */
0297     return (struct cpio_data){ NULL, 0, "" };
0298 #endif
0299 }
0300 
0301 void reload_early_microcode(void)
0302 {
0303     int vendor, family;
0304 
0305     vendor = x86_cpuid_vendor();
0306     family = x86_cpuid_family();
0307 
0308     switch (vendor) {
0309     case X86_VENDOR_INTEL:
0310         if (family >= 6)
0311             reload_ucode_intel();
0312         break;
0313     case X86_VENDOR_AMD:
0314         if (family >= 0x10)
0315             reload_ucode_amd();
0316         break;
0317     default:
0318         break;
0319     }
0320 }
0321 
0322 static void collect_cpu_info_local(void *arg)
0323 {
0324     struct cpu_info_ctx *ctx = arg;
0325 
0326     ctx->err = microcode_ops->collect_cpu_info(smp_processor_id(),
0327                            ctx->cpu_sig);
0328 }
0329 
0330 static int collect_cpu_info_on_target(int cpu, struct cpu_signature *cpu_sig)
0331 {
0332     struct cpu_info_ctx ctx = { .cpu_sig = cpu_sig, .err = 0 };
0333     int ret;
0334 
0335     ret = smp_call_function_single(cpu, collect_cpu_info_local, &ctx, 1);
0336     if (!ret)
0337         ret = ctx.err;
0338 
0339     return ret;
0340 }
0341 
0342 static int collect_cpu_info(int cpu)
0343 {
0344     struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
0345     int ret;
0346 
0347     memset(uci, 0, sizeof(*uci));
0348 
0349     ret = collect_cpu_info_on_target(cpu, &uci->cpu_sig);
0350     if (!ret)
0351         uci->valid = 1;
0352 
0353     return ret;
0354 }
0355 
0356 static void apply_microcode_local(void *arg)
0357 {
0358     enum ucode_state *err = arg;
0359 
0360     *err = microcode_ops->apply_microcode(smp_processor_id());
0361 }
0362 
0363 static int apply_microcode_on_target(int cpu)
0364 {
0365     enum ucode_state err;
0366     int ret;
0367 
0368     ret = smp_call_function_single(cpu, apply_microcode_local, &err, 1);
0369     if (!ret) {
0370         if (err == UCODE_ERROR)
0371             ret = 1;
0372     }
0373     return ret;
0374 }
0375 
0376 /* fake device for request_firmware */
0377 static struct platform_device   *microcode_pdev;
0378 
0379 #ifdef CONFIG_MICROCODE_LATE_LOADING
0380 /*
0381  * Late loading dance. Why the heavy-handed stomp_machine effort?
0382  *
0383  * - HT siblings must be idle and not execute other code while the other sibling
0384  *   is loading microcode in order to avoid any negative interactions caused by
0385  *   the loading.
0386  *
0387  * - In addition, microcode update on the cores must be serialized until this
0388  *   requirement can be relaxed in the future. Right now, this is conservative
0389  *   and good.
0390  */
0391 #define SPINUNIT 100 /* 100 nsec */
0392 
0393 static int check_online_cpus(void)
0394 {
0395     unsigned int cpu;
0396 
0397     /*
0398      * Make sure all CPUs are online.  It's fine for SMT to be disabled if
0399      * all the primary threads are still online.
0400      */
0401     for_each_present_cpu(cpu) {
0402         if (topology_is_primary_thread(cpu) && !cpu_online(cpu)) {
0403             pr_err("Not all CPUs online, aborting microcode update.\n");
0404             return -EINVAL;
0405         }
0406     }
0407 
0408     return 0;
0409 }
0410 
0411 static atomic_t late_cpus_in;
0412 static atomic_t late_cpus_out;
0413 
0414 static int __wait_for_cpus(atomic_t *t, long long timeout)
0415 {
0416     int all_cpus = num_online_cpus();
0417 
0418     atomic_inc(t);
0419 
0420     while (atomic_read(t) < all_cpus) {
0421         if (timeout < SPINUNIT) {
0422             pr_err("Timeout while waiting for CPUs rendezvous, remaining: %d\n",
0423                 all_cpus - atomic_read(t));
0424             return 1;
0425         }
0426 
0427         ndelay(SPINUNIT);
0428         timeout -= SPINUNIT;
0429 
0430         touch_nmi_watchdog();
0431     }
0432     return 0;
0433 }
0434 
0435 /*
0436  * Returns:
0437  * < 0 - on error
0438  *   0 - success (no update done or microcode was updated)
0439  */
0440 static int __reload_late(void *info)
0441 {
0442     int cpu = smp_processor_id();
0443     enum ucode_state err;
0444     int ret = 0;
0445 
0446     /*
0447      * Wait for all CPUs to arrive. A load will not be attempted unless all
0448      * CPUs show up.
0449      * */
0450     if (__wait_for_cpus(&late_cpus_in, NSEC_PER_SEC))
0451         return -1;
0452 
0453     /*
0454      * On an SMT system, it suffices to load the microcode on one sibling of
0455      * the core because the microcode engine is shared between the threads.
0456      * Synchronization still needs to take place so that no concurrent
0457      * loading attempts happen on multiple threads of an SMT core. See
0458      * below.
0459      */
0460     if (cpumask_first(topology_sibling_cpumask(cpu)) == cpu)
0461         apply_microcode_local(&err);
0462     else
0463         goto wait_for_siblings;
0464 
0465     if (err >= UCODE_NFOUND) {
0466         if (err == UCODE_ERROR)
0467             pr_warn("Error reloading microcode on CPU %d\n", cpu);
0468 
0469         ret = -1;
0470     }
0471 
0472 wait_for_siblings:
0473     if (__wait_for_cpus(&late_cpus_out, NSEC_PER_SEC))
0474         panic("Timeout during microcode update!\n");
0475 
0476     /*
0477      * At least one thread has completed update on each core.
0478      * For others, simply call the update to make sure the
0479      * per-cpu cpuinfo can be updated with right microcode
0480      * revision.
0481      */
0482     if (cpumask_first(topology_sibling_cpumask(cpu)) != cpu)
0483         apply_microcode_local(&err);
0484 
0485     return ret;
0486 }
0487 
0488 /*
0489  * Reload microcode late on all CPUs. Wait for a sec until they
0490  * all gather together.
0491  */
0492 static int microcode_reload_late(void)
0493 {
0494     int ret;
0495 
0496     pr_err("Attempting late microcode loading - it is dangerous and taints the kernel.\n");
0497     pr_err("You should switch to early loading, if possible.\n");
0498 
0499     atomic_set(&late_cpus_in,  0);
0500     atomic_set(&late_cpus_out, 0);
0501 
0502     ret = stop_machine_cpuslocked(__reload_late, NULL, cpu_online_mask);
0503     if (ret == 0)
0504         microcode_check();
0505 
0506     pr_info("Reload completed, microcode revision: 0x%x\n", boot_cpu_data.microcode);
0507 
0508     return ret;
0509 }
0510 
0511 static ssize_t reload_store(struct device *dev,
0512                 struct device_attribute *attr,
0513                 const char *buf, size_t size)
0514 {
0515     enum ucode_state tmp_ret = UCODE_OK;
0516     int bsp = boot_cpu_data.cpu_index;
0517     unsigned long val;
0518     ssize_t ret = 0;
0519 
0520     ret = kstrtoul(buf, 0, &val);
0521     if (ret)
0522         return ret;
0523 
0524     if (val != 1)
0525         return size;
0526 
0527     cpus_read_lock();
0528 
0529     ret = check_online_cpus();
0530     if (ret)
0531         goto put;
0532 
0533     tmp_ret = microcode_ops->request_microcode_fw(bsp, &microcode_pdev->dev, true);
0534     if (tmp_ret != UCODE_NEW)
0535         goto put;
0536 
0537     mutex_lock(&microcode_mutex);
0538     ret = microcode_reload_late();
0539     mutex_unlock(&microcode_mutex);
0540 
0541 put:
0542     cpus_read_unlock();
0543 
0544     if (ret == 0)
0545         ret = size;
0546 
0547     add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_STILL_OK);
0548 
0549     return ret;
0550 }
0551 
0552 static DEVICE_ATTR_WO(reload);
0553 #endif
0554 
0555 static ssize_t version_show(struct device *dev,
0556             struct device_attribute *attr, char *buf)
0557 {
0558     struct ucode_cpu_info *uci = ucode_cpu_info + dev->id;
0559 
0560     return sprintf(buf, "0x%x\n", uci->cpu_sig.rev);
0561 }
0562 
0563 static ssize_t pf_show(struct device *dev,
0564             struct device_attribute *attr, char *buf)
0565 {
0566     struct ucode_cpu_info *uci = ucode_cpu_info + dev->id;
0567 
0568     return sprintf(buf, "0x%x\n", uci->cpu_sig.pf);
0569 }
0570 
0571 static DEVICE_ATTR(version, 0444, version_show, NULL);
0572 static DEVICE_ATTR(processor_flags, 0444, pf_show, NULL);
0573 
0574 static struct attribute *mc_default_attrs[] = {
0575     &dev_attr_version.attr,
0576     &dev_attr_processor_flags.attr,
0577     NULL
0578 };
0579 
0580 static const struct attribute_group mc_attr_group = {
0581     .attrs          = mc_default_attrs,
0582     .name           = "microcode",
0583 };
0584 
0585 static void microcode_fini_cpu(int cpu)
0586 {
0587     if (microcode_ops->microcode_fini_cpu)
0588         microcode_ops->microcode_fini_cpu(cpu);
0589 }
0590 
0591 static enum ucode_state microcode_resume_cpu(int cpu)
0592 {
0593     if (apply_microcode_on_target(cpu))
0594         return UCODE_ERROR;
0595 
0596     pr_debug("CPU%d updated upon resume\n", cpu);
0597 
0598     return UCODE_OK;
0599 }
0600 
0601 static enum ucode_state microcode_init_cpu(int cpu, bool refresh_fw)
0602 {
0603     enum ucode_state ustate;
0604     struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
0605 
0606     if (uci->valid)
0607         return UCODE_OK;
0608 
0609     if (collect_cpu_info(cpu))
0610         return UCODE_ERROR;
0611 
0612     /* --dimm. Trigger a delayed update? */
0613     if (system_state != SYSTEM_RUNNING)
0614         return UCODE_NFOUND;
0615 
0616     ustate = microcode_ops->request_microcode_fw(cpu, &microcode_pdev->dev, refresh_fw);
0617     if (ustate == UCODE_NEW) {
0618         pr_debug("CPU%d updated upon init\n", cpu);
0619         apply_microcode_on_target(cpu);
0620     }
0621 
0622     return ustate;
0623 }
0624 
0625 static enum ucode_state microcode_update_cpu(int cpu)
0626 {
0627     struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
0628 
0629     /* Refresh CPU microcode revision after resume. */
0630     collect_cpu_info(cpu);
0631 
0632     if (uci->valid)
0633         return microcode_resume_cpu(cpu);
0634 
0635     return microcode_init_cpu(cpu, false);
0636 }
0637 
0638 static int mc_device_add(struct device *dev, struct subsys_interface *sif)
0639 {
0640     int err, cpu = dev->id;
0641 
0642     if (!cpu_online(cpu))
0643         return 0;
0644 
0645     pr_debug("CPU%d added\n", cpu);
0646 
0647     err = sysfs_create_group(&dev->kobj, &mc_attr_group);
0648     if (err)
0649         return err;
0650 
0651     if (microcode_init_cpu(cpu, true) == UCODE_ERROR)
0652         return -EINVAL;
0653 
0654     return err;
0655 }
0656 
0657 static void mc_device_remove(struct device *dev, struct subsys_interface *sif)
0658 {
0659     int cpu = dev->id;
0660 
0661     if (!cpu_online(cpu))
0662         return;
0663 
0664     pr_debug("CPU%d removed\n", cpu);
0665     microcode_fini_cpu(cpu);
0666     sysfs_remove_group(&dev->kobj, &mc_attr_group);
0667 }
0668 
0669 static struct subsys_interface mc_cpu_interface = {
0670     .name           = "microcode",
0671     .subsys         = &cpu_subsys,
0672     .add_dev        = mc_device_add,
0673     .remove_dev     = mc_device_remove,
0674 };
0675 
0676 /**
0677  * microcode_bsp_resume - Update boot CPU microcode during resume.
0678  */
0679 void microcode_bsp_resume(void)
0680 {
0681     int cpu = smp_processor_id();
0682     struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
0683 
0684     if (uci->valid && uci->mc)
0685         microcode_ops->apply_microcode(cpu);
0686     else if (!uci->mc)
0687         reload_early_microcode();
0688 }
0689 
0690 static struct syscore_ops mc_syscore_ops = {
0691     .resume         = microcode_bsp_resume,
0692 };
0693 
0694 static int mc_cpu_starting(unsigned int cpu)
0695 {
0696     microcode_update_cpu(cpu);
0697     pr_debug("CPU%d added\n", cpu);
0698     return 0;
0699 }
0700 
0701 static int mc_cpu_online(unsigned int cpu)
0702 {
0703     struct device *dev = get_cpu_device(cpu);
0704 
0705     if (sysfs_create_group(&dev->kobj, &mc_attr_group))
0706         pr_err("Failed to create group for CPU%d\n", cpu);
0707     return 0;
0708 }
0709 
0710 static int mc_cpu_down_prep(unsigned int cpu)
0711 {
0712     struct device *dev;
0713 
0714     dev = get_cpu_device(cpu);
0715     /* Suspend is in progress, only remove the interface */
0716     sysfs_remove_group(&dev->kobj, &mc_attr_group);
0717     pr_debug("CPU%d removed\n", cpu);
0718 
0719     return 0;
0720 }
0721 
0722 static struct attribute *cpu_root_microcode_attrs[] = {
0723 #ifdef CONFIG_MICROCODE_LATE_LOADING
0724     &dev_attr_reload.attr,
0725 #endif
0726     NULL
0727 };
0728 
0729 static const struct attribute_group cpu_root_microcode_group = {
0730     .name  = "microcode",
0731     .attrs = cpu_root_microcode_attrs,
0732 };
0733 
0734 static int __init microcode_init(void)
0735 {
0736     struct cpuinfo_x86 *c = &boot_cpu_data;
0737     int error;
0738 
0739     if (dis_ucode_ldr)
0740         return -EINVAL;
0741 
0742     if (c->x86_vendor == X86_VENDOR_INTEL)
0743         microcode_ops = init_intel_microcode();
0744     else if (c->x86_vendor == X86_VENDOR_AMD)
0745         microcode_ops = init_amd_microcode();
0746     else
0747         pr_err("no support for this CPU vendor\n");
0748 
0749     if (!microcode_ops)
0750         return -ENODEV;
0751 
0752     microcode_pdev = platform_device_register_simple("microcode", -1,
0753                              NULL, 0);
0754     if (IS_ERR(microcode_pdev))
0755         return PTR_ERR(microcode_pdev);
0756 
0757     cpus_read_lock();
0758     mutex_lock(&microcode_mutex);
0759     error = subsys_interface_register(&mc_cpu_interface);
0760     mutex_unlock(&microcode_mutex);
0761     cpus_read_unlock();
0762 
0763     if (error)
0764         goto out_pdev;
0765 
0766     error = sysfs_create_group(&cpu_subsys.dev_root->kobj,
0767                    &cpu_root_microcode_group);
0768 
0769     if (error) {
0770         pr_err("Error creating microcode group!\n");
0771         goto out_driver;
0772     }
0773 
0774     register_syscore_ops(&mc_syscore_ops);
0775     cpuhp_setup_state_nocalls(CPUHP_AP_MICROCODE_LOADER, "x86/microcode:starting",
0776                   mc_cpu_starting, NULL);
0777     cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "x86/microcode:online",
0778                   mc_cpu_online, mc_cpu_down_prep);
0779 
0780     pr_info("Microcode Update Driver: v%s.", DRIVER_VERSION);
0781 
0782     return 0;
0783 
0784  out_driver:
0785     cpus_read_lock();
0786     mutex_lock(&microcode_mutex);
0787 
0788     subsys_interface_unregister(&mc_cpu_interface);
0789 
0790     mutex_unlock(&microcode_mutex);
0791     cpus_read_unlock();
0792 
0793  out_pdev:
0794     platform_device_unregister(microcode_pdev);
0795     return error;
0796 
0797 }
0798 fs_initcall(save_microcode_in_initrd);
0799 late_initcall(microcode_init);