Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-or-later
0002 /*
0003  * Intel CPU Microcode Update Driver for Linux
0004  *
0005  * Copyright (C) 2000-2006 Tigran Aivazian <aivazian.tigran@gmail.com>
0006  *       2006 Shaohua Li <shaohua.li@intel.com>
0007  *
0008  * Intel CPU microcode early update for Linux
0009  *
0010  * Copyright (C) 2012 Fenghua Yu <fenghua.yu@intel.com>
0011  *            H Peter Anvin" <hpa@zytor.com>
0012  */
0013 
0014 /*
0015  * This needs to be before all headers so that pr_debug in printk.h doesn't turn
0016  * printk calls into no_printk().
0017  *
0018  *#define DEBUG
0019  */
0020 #define pr_fmt(fmt) "microcode: " fmt
0021 
0022 #include <linux/earlycpio.h>
0023 #include <linux/firmware.h>
0024 #include <linux/uaccess.h>
0025 #include <linux/vmalloc.h>
0026 #include <linux/initrd.h>
0027 #include <linux/kernel.h>
0028 #include <linux/slab.h>
0029 #include <linux/cpu.h>
0030 #include <linux/uio.h>
0031 #include <linux/mm.h>
0032 
0033 #include <asm/microcode_intel.h>
0034 #include <asm/intel-family.h>
0035 #include <asm/processor.h>
0036 #include <asm/tlbflush.h>
0037 #include <asm/setup.h>
0038 #include <asm/msr.h>
0039 
0040 static const char ucode_path[] = "kernel/x86/microcode/GenuineIntel.bin";
0041 
0042 /* Current microcode patch used in early patching on the APs. */
0043 static struct microcode_intel *intel_ucode_patch;
0044 
0045 /* last level cache size per core */
0046 static int llc_size_per_core;
0047 
0048 /*
0049  * Returns 1 if update has been found, 0 otherwise.
0050  */
0051 static int find_matching_signature(void *mc, unsigned int csig, int cpf)
0052 {
0053     struct microcode_header_intel *mc_hdr = mc;
0054     struct extended_sigtable *ext_hdr;
0055     struct extended_signature *ext_sig;
0056     int i;
0057 
0058     if (intel_cpu_signatures_match(csig, cpf, mc_hdr->sig, mc_hdr->pf))
0059         return 1;
0060 
0061     /* Look for ext. headers: */
0062     if (get_totalsize(mc_hdr) <= get_datasize(mc_hdr) + MC_HEADER_SIZE)
0063         return 0;
0064 
0065     ext_hdr = mc + get_datasize(mc_hdr) + MC_HEADER_SIZE;
0066     ext_sig = (void *)ext_hdr + EXT_HEADER_SIZE;
0067 
0068     for (i = 0; i < ext_hdr->count; i++) {
0069         if (intel_cpu_signatures_match(csig, cpf, ext_sig->sig, ext_sig->pf))
0070             return 1;
0071         ext_sig++;
0072     }
0073     return 0;
0074 }
0075 
0076 /*
0077  * Returns 1 if update has been found, 0 otherwise.
0078  */
0079 static int has_newer_microcode(void *mc, unsigned int csig, int cpf, int new_rev)
0080 {
0081     struct microcode_header_intel *mc_hdr = mc;
0082 
0083     if (mc_hdr->rev <= new_rev)
0084         return 0;
0085 
0086     return find_matching_signature(mc, csig, cpf);
0087 }
0088 
0089 static struct ucode_patch *memdup_patch(void *data, unsigned int size)
0090 {
0091     struct ucode_patch *p;
0092 
0093     p = kzalloc(sizeof(struct ucode_patch), GFP_KERNEL);
0094     if (!p)
0095         return NULL;
0096 
0097     p->data = kmemdup(data, size, GFP_KERNEL);
0098     if (!p->data) {
0099         kfree(p);
0100         return NULL;
0101     }
0102 
0103     return p;
0104 }
0105 
0106 static void save_microcode_patch(struct ucode_cpu_info *uci, void *data, unsigned int size)
0107 {
0108     struct microcode_header_intel *mc_hdr, *mc_saved_hdr;
0109     struct ucode_patch *iter, *tmp, *p = NULL;
0110     bool prev_found = false;
0111     unsigned int sig, pf;
0112 
0113     mc_hdr = (struct microcode_header_intel *)data;
0114 
0115     list_for_each_entry_safe(iter, tmp, &microcode_cache, plist) {
0116         mc_saved_hdr = (struct microcode_header_intel *)iter->data;
0117         sig      = mc_saved_hdr->sig;
0118         pf       = mc_saved_hdr->pf;
0119 
0120         if (find_matching_signature(data, sig, pf)) {
0121             prev_found = true;
0122 
0123             if (mc_hdr->rev <= mc_saved_hdr->rev)
0124                 continue;
0125 
0126             p = memdup_patch(data, size);
0127             if (!p)
0128                 pr_err("Error allocating buffer %p\n", data);
0129             else {
0130                 list_replace(&iter->plist, &p->plist);
0131                 kfree(iter->data);
0132                 kfree(iter);
0133             }
0134         }
0135     }
0136 
0137     /*
0138      * There weren't any previous patches found in the list cache; save the
0139      * newly found.
0140      */
0141     if (!prev_found) {
0142         p = memdup_patch(data, size);
0143         if (!p)
0144             pr_err("Error allocating buffer for %p\n", data);
0145         else
0146             list_add_tail(&p->plist, &microcode_cache);
0147     }
0148 
0149     if (!p)
0150         return;
0151 
0152     if (!find_matching_signature(p->data, uci->cpu_sig.sig, uci->cpu_sig.pf))
0153         return;
0154 
0155     /*
0156      * Save for early loading. On 32-bit, that needs to be a physical
0157      * address as the APs are running from physical addresses, before
0158      * paging has been enabled.
0159      */
0160     if (IS_ENABLED(CONFIG_X86_32))
0161         intel_ucode_patch = (struct microcode_intel *)__pa_nodebug(p->data);
0162     else
0163         intel_ucode_patch = p->data;
0164 }
0165 
0166 static int microcode_sanity_check(void *mc, int print_err)
0167 {
0168     unsigned long total_size, data_size, ext_table_size;
0169     struct microcode_header_intel *mc_header = mc;
0170     struct extended_sigtable *ext_header = NULL;
0171     u32 sum, orig_sum, ext_sigcount = 0, i;
0172     struct extended_signature *ext_sig;
0173 
0174     total_size = get_totalsize(mc_header);
0175     data_size = get_datasize(mc_header);
0176 
0177     if (data_size + MC_HEADER_SIZE > total_size) {
0178         if (print_err)
0179             pr_err("Error: bad microcode data file size.\n");
0180         return -EINVAL;
0181     }
0182 
0183     if (mc_header->ldrver != 1 || mc_header->hdrver != 1) {
0184         if (print_err)
0185             pr_err("Error: invalid/unknown microcode update format.\n");
0186         return -EINVAL;
0187     }
0188 
0189     ext_table_size = total_size - (MC_HEADER_SIZE + data_size);
0190     if (ext_table_size) {
0191         u32 ext_table_sum = 0;
0192         u32 *ext_tablep;
0193 
0194         if ((ext_table_size < EXT_HEADER_SIZE)
0195          || ((ext_table_size - EXT_HEADER_SIZE) % EXT_SIGNATURE_SIZE)) {
0196             if (print_err)
0197                 pr_err("Error: truncated extended signature table.\n");
0198             return -EINVAL;
0199         }
0200 
0201         ext_header = mc + MC_HEADER_SIZE + data_size;
0202         if (ext_table_size != exttable_size(ext_header)) {
0203             if (print_err)
0204                 pr_err("Error: extended signature table size mismatch.\n");
0205             return -EFAULT;
0206         }
0207 
0208         ext_sigcount = ext_header->count;
0209 
0210         /*
0211          * Check extended table checksum: the sum of all dwords that
0212          * comprise a valid table must be 0.
0213          */
0214         ext_tablep = (u32 *)ext_header;
0215 
0216         i = ext_table_size / sizeof(u32);
0217         while (i--)
0218             ext_table_sum += ext_tablep[i];
0219 
0220         if (ext_table_sum) {
0221             if (print_err)
0222                 pr_warn("Bad extended signature table checksum, aborting.\n");
0223             return -EINVAL;
0224         }
0225     }
0226 
0227     /*
0228      * Calculate the checksum of update data and header. The checksum of
0229      * valid update data and header including the extended signature table
0230      * must be 0.
0231      */
0232     orig_sum = 0;
0233     i = (MC_HEADER_SIZE + data_size) / sizeof(u32);
0234     while (i--)
0235         orig_sum += ((u32 *)mc)[i];
0236 
0237     if (orig_sum) {
0238         if (print_err)
0239             pr_err("Bad microcode data checksum, aborting.\n");
0240         return -EINVAL;
0241     }
0242 
0243     if (!ext_table_size)
0244         return 0;
0245 
0246     /*
0247      * Check extended signature checksum: 0 => valid.
0248      */
0249     for (i = 0; i < ext_sigcount; i++) {
0250         ext_sig = (void *)ext_header + EXT_HEADER_SIZE +
0251               EXT_SIGNATURE_SIZE * i;
0252 
0253         sum = (mc_header->sig + mc_header->pf + mc_header->cksum) -
0254               (ext_sig->sig + ext_sig->pf + ext_sig->cksum);
0255         if (sum) {
0256             if (print_err)
0257                 pr_err("Bad extended signature checksum, aborting.\n");
0258             return -EINVAL;
0259         }
0260     }
0261     return 0;
0262 }
0263 
0264 /*
0265  * Get microcode matching with BSP's model. Only CPUs with the same model as
0266  * BSP can stay in the platform.
0267  */
0268 static struct microcode_intel *
0269 scan_microcode(void *data, size_t size, struct ucode_cpu_info *uci, bool save)
0270 {
0271     struct microcode_header_intel *mc_header;
0272     struct microcode_intel *patch = NULL;
0273     unsigned int mc_size;
0274 
0275     while (size) {
0276         if (size < sizeof(struct microcode_header_intel))
0277             break;
0278 
0279         mc_header = (struct microcode_header_intel *)data;
0280 
0281         mc_size = get_totalsize(mc_header);
0282         if (!mc_size ||
0283             mc_size > size ||
0284             microcode_sanity_check(data, 0) < 0)
0285             break;
0286 
0287         size -= mc_size;
0288 
0289         if (!find_matching_signature(data, uci->cpu_sig.sig,
0290                          uci->cpu_sig.pf)) {
0291             data += mc_size;
0292             continue;
0293         }
0294 
0295         if (save) {
0296             save_microcode_patch(uci, data, mc_size);
0297             goto next;
0298         }
0299 
0300 
0301         if (!patch) {
0302             if (!has_newer_microcode(data,
0303                          uci->cpu_sig.sig,
0304                          uci->cpu_sig.pf,
0305                          uci->cpu_sig.rev))
0306                 goto next;
0307 
0308         } else {
0309             struct microcode_header_intel *phdr = &patch->hdr;
0310 
0311             if (!has_newer_microcode(data,
0312                          phdr->sig,
0313                          phdr->pf,
0314                          phdr->rev))
0315                 goto next;
0316         }
0317 
0318         /* We have a newer patch, save it. */
0319         patch = data;
0320 
0321 next:
0322         data += mc_size;
0323     }
0324 
0325     if (size)
0326         return NULL;
0327 
0328     return patch;
0329 }
0330 
0331 static void show_saved_mc(void)
0332 {
0333 #ifdef DEBUG
0334     int i = 0, j;
0335     unsigned int sig, pf, rev, total_size, data_size, date;
0336     struct ucode_cpu_info uci;
0337     struct ucode_patch *p;
0338 
0339     if (list_empty(&microcode_cache)) {
0340         pr_debug("no microcode data saved.\n");
0341         return;
0342     }
0343 
0344     intel_cpu_collect_info(&uci);
0345 
0346     sig = uci.cpu_sig.sig;
0347     pf  = uci.cpu_sig.pf;
0348     rev = uci.cpu_sig.rev;
0349     pr_debug("CPU: sig=0x%x, pf=0x%x, rev=0x%x\n", sig, pf, rev);
0350 
0351     list_for_each_entry(p, &microcode_cache, plist) {
0352         struct microcode_header_intel *mc_saved_header;
0353         struct extended_sigtable *ext_header;
0354         struct extended_signature *ext_sig;
0355         int ext_sigcount;
0356 
0357         mc_saved_header = (struct microcode_header_intel *)p->data;
0358 
0359         sig = mc_saved_header->sig;
0360         pf  = mc_saved_header->pf;
0361         rev = mc_saved_header->rev;
0362         date    = mc_saved_header->date;
0363 
0364         total_size  = get_totalsize(mc_saved_header);
0365         data_size   = get_datasize(mc_saved_header);
0366 
0367         pr_debug("mc_saved[%d]: sig=0x%x, pf=0x%x, rev=0x%x, total size=0x%x, date = %04x-%02x-%02x\n",
0368              i++, sig, pf, rev, total_size,
0369              date & 0xffff,
0370              date >> 24,
0371              (date >> 16) & 0xff);
0372 
0373         /* Look for ext. headers: */
0374         if (total_size <= data_size + MC_HEADER_SIZE)
0375             continue;
0376 
0377         ext_header = (void *)mc_saved_header + data_size + MC_HEADER_SIZE;
0378         ext_sigcount = ext_header->count;
0379         ext_sig = (void *)ext_header + EXT_HEADER_SIZE;
0380 
0381         for (j = 0; j < ext_sigcount; j++) {
0382             sig = ext_sig->sig;
0383             pf = ext_sig->pf;
0384 
0385             pr_debug("\tExtended[%d]: sig=0x%x, pf=0x%x\n",
0386                  j, sig, pf);
0387 
0388             ext_sig++;
0389         }
0390     }
0391 #endif
0392 }
0393 
0394 /*
0395  * Save this microcode patch. It will be loaded early when a CPU is
0396  * hot-added or resumes.
0397  */
0398 static void save_mc_for_early(struct ucode_cpu_info *uci, u8 *mc, unsigned int size)
0399 {
0400     /* Synchronization during CPU hotplug. */
0401     static DEFINE_MUTEX(x86_cpu_microcode_mutex);
0402 
0403     mutex_lock(&x86_cpu_microcode_mutex);
0404 
0405     save_microcode_patch(uci, mc, size);
0406     show_saved_mc();
0407 
0408     mutex_unlock(&x86_cpu_microcode_mutex);
0409 }
0410 
0411 static bool load_builtin_intel_microcode(struct cpio_data *cp)
0412 {
0413     unsigned int eax = 1, ebx, ecx = 0, edx;
0414     struct firmware fw;
0415     char name[30];
0416 
0417     if (IS_ENABLED(CONFIG_X86_32))
0418         return false;
0419 
0420     native_cpuid(&eax, &ebx, &ecx, &edx);
0421 
0422     sprintf(name, "intel-ucode/%02x-%02x-%02x",
0423               x86_family(eax), x86_model(eax), x86_stepping(eax));
0424 
0425     if (firmware_request_builtin(&fw, name)) {
0426         cp->size = fw.size;
0427         cp->data = (void *)fw.data;
0428         return true;
0429     }
0430 
0431     return false;
0432 }
0433 
0434 /*
0435  * Print ucode update info.
0436  */
0437 static void
0438 print_ucode_info(struct ucode_cpu_info *uci, unsigned int date)
0439 {
0440     pr_info_once("microcode updated early to revision 0x%x, date = %04x-%02x-%02x\n",
0441              uci->cpu_sig.rev,
0442              date & 0xffff,
0443              date >> 24,
0444              (date >> 16) & 0xff);
0445 }
0446 
0447 #ifdef CONFIG_X86_32
0448 
0449 static int delay_ucode_info;
0450 static int current_mc_date;
0451 
0452 /*
0453  * Print early updated ucode info after printk works. This is delayed info dump.
0454  */
0455 void show_ucode_info_early(void)
0456 {
0457     struct ucode_cpu_info uci;
0458 
0459     if (delay_ucode_info) {
0460         intel_cpu_collect_info(&uci);
0461         print_ucode_info(&uci, current_mc_date);
0462         delay_ucode_info = 0;
0463     }
0464 }
0465 
0466 /*
0467  * At this point, we can not call printk() yet. Delay printing microcode info in
0468  * show_ucode_info_early() until printk() works.
0469  */
0470 static void print_ucode(struct ucode_cpu_info *uci)
0471 {
0472     struct microcode_intel *mc;
0473     int *delay_ucode_info_p;
0474     int *current_mc_date_p;
0475 
0476     mc = uci->mc;
0477     if (!mc)
0478         return;
0479 
0480     delay_ucode_info_p = (int *)__pa_nodebug(&delay_ucode_info);
0481     current_mc_date_p = (int *)__pa_nodebug(&current_mc_date);
0482 
0483     *delay_ucode_info_p = 1;
0484     *current_mc_date_p = mc->hdr.date;
0485 }
0486 #else
0487 
0488 static inline void print_ucode(struct ucode_cpu_info *uci)
0489 {
0490     struct microcode_intel *mc;
0491 
0492     mc = uci->mc;
0493     if (!mc)
0494         return;
0495 
0496     print_ucode_info(uci, mc->hdr.date);
0497 }
0498 #endif
0499 
0500 static int apply_microcode_early(struct ucode_cpu_info *uci, bool early)
0501 {
0502     struct microcode_intel *mc;
0503     u32 rev;
0504 
0505     mc = uci->mc;
0506     if (!mc)
0507         return 0;
0508 
0509     /*
0510      * Save us the MSR write below - which is a particular expensive
0511      * operation - when the other hyperthread has updated the microcode
0512      * already.
0513      */
0514     rev = intel_get_microcode_revision();
0515     if (rev >= mc->hdr.rev) {
0516         uci->cpu_sig.rev = rev;
0517         return UCODE_OK;
0518     }
0519 
0520     /*
0521      * Writeback and invalidate caches before updating microcode to avoid
0522      * internal issues depending on what the microcode is updating.
0523      */
0524     native_wbinvd();
0525 
0526     /* write microcode via MSR 0x79 */
0527     native_wrmsrl(MSR_IA32_UCODE_WRITE, (unsigned long)mc->bits);
0528 
0529     rev = intel_get_microcode_revision();
0530     if (rev != mc->hdr.rev)
0531         return -1;
0532 
0533     uci->cpu_sig.rev = rev;
0534 
0535     if (early)
0536         print_ucode(uci);
0537     else
0538         print_ucode_info(uci, mc->hdr.date);
0539 
0540     return 0;
0541 }
0542 
0543 int __init save_microcode_in_initrd_intel(void)
0544 {
0545     struct ucode_cpu_info uci;
0546     struct cpio_data cp;
0547 
0548     /*
0549      * initrd is going away, clear patch ptr. We will scan the microcode one
0550      * last time before jettisoning and save a patch, if found. Then we will
0551      * update that pointer too, with a stable patch address to use when
0552      * resuming the cores.
0553      */
0554     intel_ucode_patch = NULL;
0555 
0556     if (!load_builtin_intel_microcode(&cp))
0557         cp = find_microcode_in_initrd(ucode_path, false);
0558 
0559     if (!(cp.data && cp.size))
0560         return 0;
0561 
0562     intel_cpu_collect_info(&uci);
0563 
0564     scan_microcode(cp.data, cp.size, &uci, true);
0565 
0566     show_saved_mc();
0567 
0568     return 0;
0569 }
0570 
0571 /*
0572  * @res_patch, output: a pointer to the patch we found.
0573  */
0574 static struct microcode_intel *__load_ucode_intel(struct ucode_cpu_info *uci)
0575 {
0576     static const char *path;
0577     struct cpio_data cp;
0578     bool use_pa;
0579 
0580     if (IS_ENABLED(CONFIG_X86_32)) {
0581         path      = (const char *)__pa_nodebug(ucode_path);
0582         use_pa    = true;
0583     } else {
0584         path      = ucode_path;
0585         use_pa    = false;
0586     }
0587 
0588     /* try built-in microcode first */
0589     if (!load_builtin_intel_microcode(&cp))
0590         cp = find_microcode_in_initrd(path, use_pa);
0591 
0592     if (!(cp.data && cp.size))
0593         return NULL;
0594 
0595     intel_cpu_collect_info(uci);
0596 
0597     return scan_microcode(cp.data, cp.size, uci, false);
0598 }
0599 
0600 void __init load_ucode_intel_bsp(void)
0601 {
0602     struct microcode_intel *patch;
0603     struct ucode_cpu_info uci;
0604 
0605     patch = __load_ucode_intel(&uci);
0606     if (!patch)
0607         return;
0608 
0609     uci.mc = patch;
0610 
0611     apply_microcode_early(&uci, true);
0612 }
0613 
0614 void load_ucode_intel_ap(void)
0615 {
0616     struct microcode_intel *patch, **iup;
0617     struct ucode_cpu_info uci;
0618 
0619     if (IS_ENABLED(CONFIG_X86_32))
0620         iup = (struct microcode_intel **) __pa_nodebug(&intel_ucode_patch);
0621     else
0622         iup = &intel_ucode_patch;
0623 
0624 reget:
0625     if (!*iup) {
0626         patch = __load_ucode_intel(&uci);
0627         if (!patch)
0628             return;
0629 
0630         *iup = patch;
0631     }
0632 
0633     uci.mc = *iup;
0634 
0635     if (apply_microcode_early(&uci, true)) {
0636         /* Mixed-silicon system? Try to refetch the proper patch: */
0637         *iup = NULL;
0638 
0639         goto reget;
0640     }
0641 }
0642 
0643 static struct microcode_intel *find_patch(struct ucode_cpu_info *uci)
0644 {
0645     struct microcode_header_intel *phdr;
0646     struct ucode_patch *iter, *tmp;
0647 
0648     list_for_each_entry_safe(iter, tmp, &microcode_cache, plist) {
0649 
0650         phdr = (struct microcode_header_intel *)iter->data;
0651 
0652         if (phdr->rev <= uci->cpu_sig.rev)
0653             continue;
0654 
0655         if (!find_matching_signature(phdr,
0656                          uci->cpu_sig.sig,
0657                          uci->cpu_sig.pf))
0658             continue;
0659 
0660         return iter->data;
0661     }
0662     return NULL;
0663 }
0664 
0665 void reload_ucode_intel(void)
0666 {
0667     struct microcode_intel *p;
0668     struct ucode_cpu_info uci;
0669 
0670     intel_cpu_collect_info(&uci);
0671 
0672     p = find_patch(&uci);
0673     if (!p)
0674         return;
0675 
0676     uci.mc = p;
0677 
0678     apply_microcode_early(&uci, false);
0679 }
0680 
0681 static int collect_cpu_info(int cpu_num, struct cpu_signature *csig)
0682 {
0683     static struct cpu_signature prev;
0684     struct cpuinfo_x86 *c = &cpu_data(cpu_num);
0685     unsigned int val[2];
0686 
0687     memset(csig, 0, sizeof(*csig));
0688 
0689     csig->sig = cpuid_eax(0x00000001);
0690 
0691     if ((c->x86_model >= 5) || (c->x86 > 6)) {
0692         /* get processor flags from MSR 0x17 */
0693         rdmsr(MSR_IA32_PLATFORM_ID, val[0], val[1]);
0694         csig->pf = 1 << ((val[1] >> 18) & 7);
0695     }
0696 
0697     csig->rev = c->microcode;
0698 
0699     /* No extra locking on prev, races are harmless. */
0700     if (csig->sig != prev.sig || csig->pf != prev.pf || csig->rev != prev.rev) {
0701         pr_info("sig=0x%x, pf=0x%x, revision=0x%x\n",
0702             csig->sig, csig->pf, csig->rev);
0703         prev = *csig;
0704     }
0705 
0706     return 0;
0707 }
0708 
0709 static enum ucode_state apply_microcode_intel(int cpu)
0710 {
0711     struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
0712     struct cpuinfo_x86 *c = &cpu_data(cpu);
0713     bool bsp = c->cpu_index == boot_cpu_data.cpu_index;
0714     struct microcode_intel *mc;
0715     enum ucode_state ret;
0716     static int prev_rev;
0717     u32 rev;
0718 
0719     /* We should bind the task to the CPU */
0720     if (WARN_ON(raw_smp_processor_id() != cpu))
0721         return UCODE_ERROR;
0722 
0723     /* Look for a newer patch in our cache: */
0724     mc = find_patch(uci);
0725     if (!mc) {
0726         mc = uci->mc;
0727         if (!mc)
0728             return UCODE_NFOUND;
0729     }
0730 
0731     /*
0732      * Save us the MSR write below - which is a particular expensive
0733      * operation - when the other hyperthread has updated the microcode
0734      * already.
0735      */
0736     rev = intel_get_microcode_revision();
0737     if (rev >= mc->hdr.rev) {
0738         ret = UCODE_OK;
0739         goto out;
0740     }
0741 
0742     /*
0743      * Writeback and invalidate caches before updating microcode to avoid
0744      * internal issues depending on what the microcode is updating.
0745      */
0746     native_wbinvd();
0747 
0748     /* write microcode via MSR 0x79 */
0749     wrmsrl(MSR_IA32_UCODE_WRITE, (unsigned long)mc->bits);
0750 
0751     rev = intel_get_microcode_revision();
0752 
0753     if (rev != mc->hdr.rev) {
0754         pr_err("CPU%d update to revision 0x%x failed\n",
0755                cpu, mc->hdr.rev);
0756         return UCODE_ERROR;
0757     }
0758 
0759     if (bsp && rev != prev_rev) {
0760         pr_info("updated to revision 0x%x, date = %04x-%02x-%02x\n",
0761             rev,
0762             mc->hdr.date & 0xffff,
0763             mc->hdr.date >> 24,
0764             (mc->hdr.date >> 16) & 0xff);
0765         prev_rev = rev;
0766     }
0767 
0768     ret = UCODE_UPDATED;
0769 
0770 out:
0771     uci->cpu_sig.rev = rev;
0772     c->microcode     = rev;
0773 
0774     /* Update boot_cpu_data's revision too, if we're on the BSP: */
0775     if (bsp)
0776         boot_cpu_data.microcode = rev;
0777 
0778     return ret;
0779 }
0780 
0781 static enum ucode_state generic_load_microcode(int cpu, struct iov_iter *iter)
0782 {
0783     struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
0784     unsigned int curr_mc_size = 0, new_mc_size = 0;
0785     enum ucode_state ret = UCODE_OK;
0786     int new_rev = uci->cpu_sig.rev;
0787     u8 *new_mc = NULL, *mc = NULL;
0788     unsigned int csig, cpf;
0789 
0790     while (iov_iter_count(iter)) {
0791         struct microcode_header_intel mc_header;
0792         unsigned int mc_size, data_size;
0793         u8 *data;
0794 
0795         if (!copy_from_iter_full(&mc_header, sizeof(mc_header), iter)) {
0796             pr_err("error! Truncated or inaccessible header in microcode data file\n");
0797             break;
0798         }
0799 
0800         mc_size = get_totalsize(&mc_header);
0801         if (mc_size < sizeof(mc_header)) {
0802             pr_err("error! Bad data in microcode data file (totalsize too small)\n");
0803             break;
0804         }
0805         data_size = mc_size - sizeof(mc_header);
0806         if (data_size > iov_iter_count(iter)) {
0807             pr_err("error! Bad data in microcode data file (truncated file?)\n");
0808             break;
0809         }
0810 
0811         /* For performance reasons, reuse mc area when possible */
0812         if (!mc || mc_size > curr_mc_size) {
0813             vfree(mc);
0814             mc = vmalloc(mc_size);
0815             if (!mc)
0816                 break;
0817             curr_mc_size = mc_size;
0818         }
0819 
0820         memcpy(mc, &mc_header, sizeof(mc_header));
0821         data = mc + sizeof(mc_header);
0822         if (!copy_from_iter_full(data, data_size, iter) ||
0823             microcode_sanity_check(mc, 1) < 0) {
0824             break;
0825         }
0826 
0827         csig = uci->cpu_sig.sig;
0828         cpf = uci->cpu_sig.pf;
0829         if (has_newer_microcode(mc, csig, cpf, new_rev)) {
0830             vfree(new_mc);
0831             new_rev = mc_header.rev;
0832             new_mc  = mc;
0833             new_mc_size = mc_size;
0834             mc = NULL;  /* trigger new vmalloc */
0835             ret = UCODE_NEW;
0836         }
0837     }
0838 
0839     vfree(mc);
0840 
0841     if (iov_iter_count(iter)) {
0842         vfree(new_mc);
0843         return UCODE_ERROR;
0844     }
0845 
0846     if (!new_mc)
0847         return UCODE_NFOUND;
0848 
0849     vfree(uci->mc);
0850     uci->mc = (struct microcode_intel *)new_mc;
0851 
0852     /*
0853      * If early loading microcode is supported, save this mc into
0854      * permanent memory. So it will be loaded early when a CPU is hot added
0855      * or resumes.
0856      */
0857     save_mc_for_early(uci, new_mc, new_mc_size);
0858 
0859     pr_debug("CPU%d found a matching microcode update with version 0x%x (current=0x%x)\n",
0860          cpu, new_rev, uci->cpu_sig.rev);
0861 
0862     return ret;
0863 }
0864 
0865 static bool is_blacklisted(unsigned int cpu)
0866 {
0867     struct cpuinfo_x86 *c = &cpu_data(cpu);
0868 
0869     /*
0870      * Late loading on model 79 with microcode revision less than 0x0b000021
0871      * and LLC size per core bigger than 2.5MB may result in a system hang.
0872      * This behavior is documented in item BDF90, #334165 (Intel Xeon
0873      * Processor E7-8800/4800 v4 Product Family).
0874      */
0875     if (c->x86 == 6 &&
0876         c->x86_model == INTEL_FAM6_BROADWELL_X &&
0877         c->x86_stepping == 0x01 &&
0878         llc_size_per_core > 2621440 &&
0879         c->microcode < 0x0b000021) {
0880         pr_err_once("Erratum BDF90: late loading with revision < 0x0b000021 (0x%x) disabled.\n", c->microcode);
0881         pr_err_once("Please consider either early loading through initrd/built-in or a potential BIOS update.\n");
0882         return true;
0883     }
0884 
0885     return false;
0886 }
0887 
0888 static enum ucode_state request_microcode_fw(int cpu, struct device *device,
0889                          bool refresh_fw)
0890 {
0891     struct cpuinfo_x86 *c = &cpu_data(cpu);
0892     const struct firmware *firmware;
0893     struct iov_iter iter;
0894     enum ucode_state ret;
0895     struct kvec kvec;
0896     char name[30];
0897 
0898     if (is_blacklisted(cpu))
0899         return UCODE_NFOUND;
0900 
0901     sprintf(name, "intel-ucode/%02x-%02x-%02x",
0902         c->x86, c->x86_model, c->x86_stepping);
0903 
0904     if (request_firmware_direct(&firmware, name, device)) {
0905         pr_debug("data file %s load failed\n", name);
0906         return UCODE_NFOUND;
0907     }
0908 
0909     kvec.iov_base = (void *)firmware->data;
0910     kvec.iov_len = firmware->size;
0911     iov_iter_kvec(&iter, WRITE, &kvec, 1, firmware->size);
0912     ret = generic_load_microcode(cpu, &iter);
0913 
0914     release_firmware(firmware);
0915 
0916     return ret;
0917 }
0918 
0919 static enum ucode_state
0920 request_microcode_user(int cpu, const void __user *buf, size_t size)
0921 {
0922     struct iov_iter iter;
0923     struct iovec iov;
0924 
0925     if (is_blacklisted(cpu))
0926         return UCODE_NFOUND;
0927 
0928     iov.iov_base = (void __user *)buf;
0929     iov.iov_len = size;
0930     iov_iter_init(&iter, WRITE, &iov, 1, size);
0931 
0932     return generic_load_microcode(cpu, &iter);
0933 }
0934 
0935 static struct microcode_ops microcode_intel_ops = {
0936     .request_microcode_user       = request_microcode_user,
0937     .request_microcode_fw             = request_microcode_fw,
0938     .collect_cpu_info                 = collect_cpu_info,
0939     .apply_microcode                  = apply_microcode_intel,
0940 };
0941 
0942 static int __init calc_llc_size_per_core(struct cpuinfo_x86 *c)
0943 {
0944     u64 llc_size = c->x86_cache_size * 1024ULL;
0945 
0946     do_div(llc_size, c->x86_max_cores);
0947 
0948     return (int)llc_size;
0949 }
0950 
0951 struct microcode_ops * __init init_intel_microcode(void)
0952 {
0953     struct cpuinfo_x86 *c = &boot_cpu_data;
0954 
0955     if (c->x86_vendor != X86_VENDOR_INTEL || c->x86 < 6 ||
0956         cpu_has(c, X86_FEATURE_IA64)) {
0957         pr_err("Intel CPU family 0x%x not supported\n", c->x86);
0958         return NULL;
0959     }
0960 
0961     llc_size_per_core = calc_llc_size_per_core(c);
0962 
0963     return &microcode_intel_ops;
0964 }