0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022 #define pr_fmt(fmt) "microcode: " fmt
0023
0024 #include <linux/earlycpio.h>
0025 #include <linux/firmware.h>
0026 #include <linux/uaccess.h>
0027 #include <linux/vmalloc.h>
0028 #include <linux/initrd.h>
0029 #include <linux/kernel.h>
0030 #include <linux/pci.h>
0031
0032 #include <asm/microcode_amd.h>
0033 #include <asm/microcode.h>
0034 #include <asm/processor.h>
0035 #include <asm/setup.h>
0036 #include <asm/cpu.h>
0037 #include <asm/msr.h>
0038
0039 static struct equiv_cpu_table {
0040 unsigned int num_entries;
0041 struct equiv_cpu_entry *entry;
0042 } equiv_table;
0043
0044
0045
0046
0047
0048
0049 struct cont_desc {
0050 struct microcode_amd *mc;
0051 u32 cpuid_1_eax;
0052 u32 psize;
0053 u8 *data;
0054 size_t size;
0055 };
0056
0057 static u32 ucode_new_rev;
0058 static u8 amd_ucode_patch[PATCH_MAX_SIZE];
0059
0060
0061
0062
0063
0064 static const char
0065 ucode_path[] __maybe_unused = "kernel/x86/microcode/AuthenticAMD.bin";
0066
0067 static u16 find_equiv_id(struct equiv_cpu_table *et, u32 sig)
0068 {
0069 unsigned int i;
0070
0071 if (!et || !et->num_entries)
0072 return 0;
0073
0074 for (i = 0; i < et->num_entries; i++) {
0075 struct equiv_cpu_entry *e = &et->entry[i];
0076
0077 if (sig == e->installed_cpu)
0078 return e->equiv_cpu;
0079
0080 e++;
0081 }
0082 return 0;
0083 }
0084
0085
0086
0087
0088
0089 static bool verify_container(const u8 *buf, size_t buf_size, bool early)
0090 {
0091 u32 cont_magic;
0092
0093 if (buf_size <= CONTAINER_HDR_SZ) {
0094 if (!early)
0095 pr_debug("Truncated microcode container header.\n");
0096
0097 return false;
0098 }
0099
0100 cont_magic = *(const u32 *)buf;
0101 if (cont_magic != UCODE_MAGIC) {
0102 if (!early)
0103 pr_debug("Invalid magic value (0x%08x).\n", cont_magic);
0104
0105 return false;
0106 }
0107
0108 return true;
0109 }
0110
0111
0112
0113
0114
0115
0116 static bool verify_equivalence_table(const u8 *buf, size_t buf_size, bool early)
0117 {
0118 const u32 *hdr = (const u32 *)buf;
0119 u32 cont_type, equiv_tbl_len;
0120
0121 if (!verify_container(buf, buf_size, early))
0122 return false;
0123
0124 cont_type = hdr[1];
0125 if (cont_type != UCODE_EQUIV_CPU_TABLE_TYPE) {
0126 if (!early)
0127 pr_debug("Wrong microcode container equivalence table type: %u.\n",
0128 cont_type);
0129
0130 return false;
0131 }
0132
0133 buf_size -= CONTAINER_HDR_SZ;
0134
0135 equiv_tbl_len = hdr[2];
0136 if (equiv_tbl_len < sizeof(struct equiv_cpu_entry) ||
0137 buf_size < equiv_tbl_len) {
0138 if (!early)
0139 pr_debug("Truncated equivalence table.\n");
0140
0141 return false;
0142 }
0143
0144 return true;
0145 }
0146
0147
0148
0149
0150
0151
0152
0153
0154
0155 static bool
0156 __verify_patch_section(const u8 *buf, size_t buf_size, u32 *sh_psize, bool early)
0157 {
0158 u32 p_type, p_size;
0159 const u32 *hdr;
0160
0161 if (buf_size < SECTION_HDR_SIZE) {
0162 if (!early)
0163 pr_debug("Truncated patch section.\n");
0164
0165 return false;
0166 }
0167
0168 hdr = (const u32 *)buf;
0169 p_type = hdr[0];
0170 p_size = hdr[1];
0171
0172 if (p_type != UCODE_UCODE_TYPE) {
0173 if (!early)
0174 pr_debug("Invalid type field (0x%x) in container file section header.\n",
0175 p_type);
0176
0177 return false;
0178 }
0179
0180 if (p_size < sizeof(struct microcode_header_amd)) {
0181 if (!early)
0182 pr_debug("Patch of size %u too short.\n", p_size);
0183
0184 return false;
0185 }
0186
0187 *sh_psize = p_size;
0188
0189 return true;
0190 }
0191
0192
0193
0194
0195
0196
0197
0198 static unsigned int __verify_patch_size(u8 family, u32 sh_psize, size_t buf_size)
0199 {
0200 u32 max_size;
0201
0202 if (family >= 0x15)
0203 return min_t(u32, sh_psize, buf_size);
0204
0205 #define F1XH_MPB_MAX_SIZE 2048
0206 #define F14H_MPB_MAX_SIZE 1824
0207
0208 switch (family) {
0209 case 0x10 ... 0x12:
0210 max_size = F1XH_MPB_MAX_SIZE;
0211 break;
0212 case 0x14:
0213 max_size = F14H_MPB_MAX_SIZE;
0214 break;
0215 default:
0216 WARN(1, "%s: WTF family: 0x%x\n", __func__, family);
0217 return 0;
0218 }
0219
0220 if (sh_psize > min_t(u32, buf_size, max_size))
0221 return 0;
0222
0223 return sh_psize;
0224 }
0225
0226
0227
0228
0229
0230
0231
0232
0233
0234 static int
0235 verify_patch(u8 family, const u8 *buf, size_t buf_size, u32 *patch_size, bool early)
0236 {
0237 struct microcode_header_amd *mc_hdr;
0238 unsigned int ret;
0239 u32 sh_psize;
0240 u16 proc_id;
0241 u8 patch_fam;
0242
0243 if (!__verify_patch_section(buf, buf_size, &sh_psize, early))
0244 return -1;
0245
0246
0247
0248
0249
0250
0251 buf_size -= SECTION_HDR_SIZE;
0252
0253
0254
0255
0256
0257 if (buf_size < sh_psize) {
0258 if (!early)
0259 pr_debug("Patch of size %u truncated.\n", sh_psize);
0260
0261 return -1;
0262 }
0263
0264 ret = __verify_patch_size(family, sh_psize, buf_size);
0265 if (!ret) {
0266 if (!early)
0267 pr_debug("Per-family patch size mismatch.\n");
0268 return -1;
0269 }
0270
0271 *patch_size = sh_psize;
0272
0273 mc_hdr = (struct microcode_header_amd *)(buf + SECTION_HDR_SIZE);
0274 if (mc_hdr->nb_dev_id || mc_hdr->sb_dev_id) {
0275 if (!early)
0276 pr_err("Patch-ID 0x%08x: chipset-specific code unsupported.\n", mc_hdr->patch_id);
0277 return -1;
0278 }
0279
0280 proc_id = mc_hdr->processor_rev_id;
0281 patch_fam = 0xf + (proc_id >> 12);
0282 if (patch_fam != family)
0283 return 1;
0284
0285 return 0;
0286 }
0287
0288
0289
0290
0291
0292
0293
0294
0295 static size_t parse_container(u8 *ucode, size_t size, struct cont_desc *desc)
0296 {
0297 struct equiv_cpu_table table;
0298 size_t orig_size = size;
0299 u32 *hdr = (u32 *)ucode;
0300 u16 eq_id;
0301 u8 *buf;
0302
0303 if (!verify_equivalence_table(ucode, size, true))
0304 return 0;
0305
0306 buf = ucode;
0307
0308 table.entry = (struct equiv_cpu_entry *)(buf + CONTAINER_HDR_SZ);
0309 table.num_entries = hdr[2] / sizeof(struct equiv_cpu_entry);
0310
0311
0312
0313
0314
0315
0316 eq_id = find_equiv_id(&table, desc->cpuid_1_eax);
0317
0318 buf += hdr[2] + CONTAINER_HDR_SZ;
0319 size -= hdr[2] + CONTAINER_HDR_SZ;
0320
0321
0322
0323
0324
0325 while (size > 0) {
0326 struct microcode_amd *mc;
0327 u32 patch_size;
0328 int ret;
0329
0330 ret = verify_patch(x86_family(desc->cpuid_1_eax), buf, size, &patch_size, true);
0331 if (ret < 0) {
0332
0333
0334
0335
0336 goto out;
0337 } else if (ret > 0) {
0338 goto skip;
0339 }
0340
0341 mc = (struct microcode_amd *)(buf + SECTION_HDR_SIZE);
0342 if (eq_id == mc->hdr.processor_rev_id) {
0343 desc->psize = patch_size;
0344 desc->mc = mc;
0345 }
0346
0347 skip:
0348
0349 buf += patch_size + SECTION_HDR_SIZE;
0350 size -= patch_size + SECTION_HDR_SIZE;
0351 }
0352
0353
0354
0355
0356
0357
0358
0359
0360 if (desc->mc) {
0361 desc->data = ucode;
0362 desc->size = orig_size - size;
0363
0364 return 0;
0365 }
0366
0367 out:
0368 return orig_size - size;
0369 }
0370
0371
0372
0373
0374
0375 static void scan_containers(u8 *ucode, size_t size, struct cont_desc *desc)
0376 {
0377 while (size) {
0378 size_t s = parse_container(ucode, size, desc);
0379 if (!s)
0380 return;
0381
0382
0383 if (size >= s) {
0384 ucode += s;
0385 size -= s;
0386 } else {
0387 return;
0388 }
0389 }
0390 }
0391
0392 static int __apply_microcode_amd(struct microcode_amd *mc)
0393 {
0394 u32 rev, dummy;
0395
0396 native_wrmsrl(MSR_AMD64_PATCH_LOADER, (u64)(long)&mc->hdr.data_code);
0397
0398
0399 native_rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy);
0400 if (rev != mc->hdr.patch_id)
0401 return -1;
0402
0403 return 0;
0404 }
0405
0406
0407
0408
0409
0410
0411
0412
0413
0414
0415
0416
0417 static bool
0418 apply_microcode_early_amd(u32 cpuid_1_eax, void *ucode, size_t size, bool save_patch)
0419 {
0420 struct cont_desc desc = { 0 };
0421 u8 (*patch)[PATCH_MAX_SIZE];
0422 struct microcode_amd *mc;
0423 u32 rev, dummy, *new_rev;
0424 bool ret = false;
0425
0426 #ifdef CONFIG_X86_32
0427 new_rev = (u32 *)__pa_nodebug(&ucode_new_rev);
0428 patch = (u8 (*)[PATCH_MAX_SIZE])__pa_nodebug(&amd_ucode_patch);
0429 #else
0430 new_rev = &ucode_new_rev;
0431 patch = &amd_ucode_patch;
0432 #endif
0433
0434 desc.cpuid_1_eax = cpuid_1_eax;
0435
0436 scan_containers(ucode, size, &desc);
0437
0438 mc = desc.mc;
0439 if (!mc)
0440 return ret;
0441
0442 native_rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy);
0443 if (rev >= mc->hdr.patch_id)
0444 return ret;
0445
0446 if (!__apply_microcode_amd(mc)) {
0447 *new_rev = mc->hdr.patch_id;
0448 ret = true;
0449
0450 if (save_patch)
0451 memcpy(patch, mc, min_t(u32, desc.psize, PATCH_MAX_SIZE));
0452 }
0453
0454 return ret;
0455 }
0456
0457 static bool get_builtin_microcode(struct cpio_data *cp, unsigned int family)
0458 {
0459 char fw_name[36] = "amd-ucode/microcode_amd.bin";
0460 struct firmware fw;
0461
0462 if (IS_ENABLED(CONFIG_X86_32))
0463 return false;
0464
0465 if (family >= 0x15)
0466 snprintf(fw_name, sizeof(fw_name),
0467 "amd-ucode/microcode_amd_fam%.2xh.bin", family);
0468
0469 if (firmware_request_builtin(&fw, fw_name)) {
0470 cp->size = fw.size;
0471 cp->data = (void *)fw.data;
0472 return true;
0473 }
0474
0475 return false;
0476 }
0477
0478 static void __load_ucode_amd(unsigned int cpuid_1_eax, struct cpio_data *ret)
0479 {
0480 struct ucode_cpu_info *uci;
0481 struct cpio_data cp;
0482 const char *path;
0483 bool use_pa;
0484
0485 if (IS_ENABLED(CONFIG_X86_32)) {
0486 uci = (struct ucode_cpu_info *)__pa_nodebug(ucode_cpu_info);
0487 path = (const char *)__pa_nodebug(ucode_path);
0488 use_pa = true;
0489 } else {
0490 uci = ucode_cpu_info;
0491 path = ucode_path;
0492 use_pa = false;
0493 }
0494
0495 if (!get_builtin_microcode(&cp, x86_family(cpuid_1_eax)))
0496 cp = find_microcode_in_initrd(path, use_pa);
0497
0498
0499 uci->cpu_sig.sig = cpuid_1_eax;
0500
0501 *ret = cp;
0502 }
0503
0504 void __init load_ucode_amd_bsp(unsigned int cpuid_1_eax)
0505 {
0506 struct cpio_data cp = { };
0507
0508 __load_ucode_amd(cpuid_1_eax, &cp);
0509 if (!(cp.data && cp.size))
0510 return;
0511
0512 apply_microcode_early_amd(cpuid_1_eax, cp.data, cp.size, true);
0513 }
0514
0515 void load_ucode_amd_ap(unsigned int cpuid_1_eax)
0516 {
0517 struct microcode_amd *mc;
0518 struct cpio_data cp;
0519 u32 *new_rev, rev, dummy;
0520
0521 if (IS_ENABLED(CONFIG_X86_32)) {
0522 mc = (struct microcode_amd *)__pa_nodebug(amd_ucode_patch);
0523 new_rev = (u32 *)__pa_nodebug(&ucode_new_rev);
0524 } else {
0525 mc = (struct microcode_amd *)amd_ucode_patch;
0526 new_rev = &ucode_new_rev;
0527 }
0528
0529 native_rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy);
0530
0531
0532 if (*new_rev && rev < mc->hdr.patch_id) {
0533 if (!__apply_microcode_amd(mc)) {
0534 *new_rev = mc->hdr.patch_id;
0535 return;
0536 }
0537 }
0538
0539 __load_ucode_amd(cpuid_1_eax, &cp);
0540 if (!(cp.data && cp.size))
0541 return;
0542
0543 apply_microcode_early_amd(cpuid_1_eax, cp.data, cp.size, false);
0544 }
0545
0546 static enum ucode_state
0547 load_microcode_amd(bool save, u8 family, const u8 *data, size_t size);
0548
0549 int __init save_microcode_in_initrd_amd(unsigned int cpuid_1_eax)
0550 {
0551 struct cont_desc desc = { 0 };
0552 enum ucode_state ret;
0553 struct cpio_data cp;
0554
0555 cp = find_microcode_in_initrd(ucode_path, false);
0556 if (!(cp.data && cp.size))
0557 return -EINVAL;
0558
0559 desc.cpuid_1_eax = cpuid_1_eax;
0560
0561 scan_containers(cp.data, cp.size, &desc);
0562 if (!desc.mc)
0563 return -EINVAL;
0564
0565 ret = load_microcode_amd(true, x86_family(cpuid_1_eax), desc.data, desc.size);
0566 if (ret > UCODE_UPDATED)
0567 return -EINVAL;
0568
0569 return 0;
0570 }
0571
0572 void reload_ucode_amd(void)
0573 {
0574 struct microcode_amd *mc;
0575 u32 rev, dummy __always_unused;
0576
0577 mc = (struct microcode_amd *)amd_ucode_patch;
0578
0579 rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy);
0580
0581 if (rev < mc->hdr.patch_id) {
0582 if (!__apply_microcode_amd(mc)) {
0583 ucode_new_rev = mc->hdr.patch_id;
0584 pr_info("reload patch_level=0x%08x\n", ucode_new_rev);
0585 }
0586 }
0587 }
0588 static u16 __find_equiv_id(unsigned int cpu)
0589 {
0590 struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
0591 return find_equiv_id(&equiv_table, uci->cpu_sig.sig);
0592 }
0593
0594
0595
0596
0597 static struct ucode_patch *cache_find_patch(u16 equiv_cpu)
0598 {
0599 struct ucode_patch *p;
0600
0601 list_for_each_entry(p, µcode_cache, plist)
0602 if (p->equiv_cpu == equiv_cpu)
0603 return p;
0604 return NULL;
0605 }
0606
0607 static void update_cache(struct ucode_patch *new_patch)
0608 {
0609 struct ucode_patch *p;
0610
0611 list_for_each_entry(p, µcode_cache, plist) {
0612 if (p->equiv_cpu == new_patch->equiv_cpu) {
0613 if (p->patch_id >= new_patch->patch_id) {
0614
0615 kfree(new_patch->data);
0616 kfree(new_patch);
0617 return;
0618 }
0619
0620 list_replace(&p->plist, &new_patch->plist);
0621 kfree(p->data);
0622 kfree(p);
0623 return;
0624 }
0625 }
0626
0627 list_add_tail(&new_patch->plist, µcode_cache);
0628 }
0629
0630 static void free_cache(void)
0631 {
0632 struct ucode_patch *p, *tmp;
0633
0634 list_for_each_entry_safe(p, tmp, µcode_cache, plist) {
0635 __list_del(p->plist.prev, p->plist.next);
0636 kfree(p->data);
0637 kfree(p);
0638 }
0639 }
0640
0641 static struct ucode_patch *find_patch(unsigned int cpu)
0642 {
0643 u16 equiv_id;
0644
0645 equiv_id = __find_equiv_id(cpu);
0646 if (!equiv_id)
0647 return NULL;
0648
0649 return cache_find_patch(equiv_id);
0650 }
0651
0652 static int collect_cpu_info_amd(int cpu, struct cpu_signature *csig)
0653 {
0654 struct cpuinfo_x86 *c = &cpu_data(cpu);
0655 struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
0656 struct ucode_patch *p;
0657
0658 csig->sig = cpuid_eax(0x00000001);
0659 csig->rev = c->microcode;
0660
0661
0662
0663
0664
0665 p = find_patch(cpu);
0666 if (p && (p->patch_id == csig->rev))
0667 uci->mc = p->data;
0668
0669 pr_info("CPU%d: patch_level=0x%08x\n", cpu, csig->rev);
0670
0671 return 0;
0672 }
0673
0674 static enum ucode_state apply_microcode_amd(int cpu)
0675 {
0676 struct cpuinfo_x86 *c = &cpu_data(cpu);
0677 struct microcode_amd *mc_amd;
0678 struct ucode_cpu_info *uci;
0679 struct ucode_patch *p;
0680 enum ucode_state ret;
0681 u32 rev, dummy __always_unused;
0682
0683 BUG_ON(raw_smp_processor_id() != cpu);
0684
0685 uci = ucode_cpu_info + cpu;
0686
0687 p = find_patch(cpu);
0688 if (!p)
0689 return UCODE_NFOUND;
0690
0691 mc_amd = p->data;
0692 uci->mc = p->data;
0693
0694 rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy);
0695
0696
0697 if (rev >= mc_amd->hdr.patch_id) {
0698 ret = UCODE_OK;
0699 goto out;
0700 }
0701
0702 if (__apply_microcode_amd(mc_amd)) {
0703 pr_err("CPU%d: update failed for patch_level=0x%08x\n",
0704 cpu, mc_amd->hdr.patch_id);
0705 return UCODE_ERROR;
0706 }
0707
0708 rev = mc_amd->hdr.patch_id;
0709 ret = UCODE_UPDATED;
0710
0711 pr_info("CPU%d: new patch_level=0x%08x\n", cpu, rev);
0712
0713 out:
0714 uci->cpu_sig.rev = rev;
0715 c->microcode = rev;
0716
0717
0718 if (c->cpu_index == boot_cpu_data.cpu_index)
0719 boot_cpu_data.microcode = rev;
0720
0721 return ret;
0722 }
0723
0724 static size_t install_equiv_cpu_table(const u8 *buf, size_t buf_size)
0725 {
0726 u32 equiv_tbl_len;
0727 const u32 *hdr;
0728
0729 if (!verify_equivalence_table(buf, buf_size, false))
0730 return 0;
0731
0732 hdr = (const u32 *)buf;
0733 equiv_tbl_len = hdr[2];
0734
0735 equiv_table.entry = vmalloc(equiv_tbl_len);
0736 if (!equiv_table.entry) {
0737 pr_err("failed to allocate equivalent CPU table\n");
0738 return 0;
0739 }
0740
0741 memcpy(equiv_table.entry, buf + CONTAINER_HDR_SZ, equiv_tbl_len);
0742 equiv_table.num_entries = equiv_tbl_len / sizeof(struct equiv_cpu_entry);
0743
0744
0745 return equiv_tbl_len + CONTAINER_HDR_SZ;
0746 }
0747
0748 static void free_equiv_cpu_table(void)
0749 {
0750 vfree(equiv_table.entry);
0751 memset(&equiv_table, 0, sizeof(equiv_table));
0752 }
0753
0754 static void cleanup(void)
0755 {
0756 free_equiv_cpu_table();
0757 free_cache();
0758 }
0759
0760
0761
0762
0763
0764
0765
0766
0767 static int verify_and_add_patch(u8 family, u8 *fw, unsigned int leftover,
0768 unsigned int *patch_size)
0769 {
0770 struct microcode_header_amd *mc_hdr;
0771 struct ucode_patch *patch;
0772 u16 proc_id;
0773 int ret;
0774
0775 ret = verify_patch(family, fw, leftover, patch_size, false);
0776 if (ret)
0777 return ret;
0778
0779 patch = kzalloc(sizeof(*patch), GFP_KERNEL);
0780 if (!patch) {
0781 pr_err("Patch allocation failure.\n");
0782 return -EINVAL;
0783 }
0784
0785 patch->data = kmemdup(fw + SECTION_HDR_SIZE, *patch_size, GFP_KERNEL);
0786 if (!patch->data) {
0787 pr_err("Patch data allocation failure.\n");
0788 kfree(patch);
0789 return -EINVAL;
0790 }
0791
0792 mc_hdr = (struct microcode_header_amd *)(fw + SECTION_HDR_SIZE);
0793 proc_id = mc_hdr->processor_rev_id;
0794
0795 INIT_LIST_HEAD(&patch->plist);
0796 patch->patch_id = mc_hdr->patch_id;
0797 patch->equiv_cpu = proc_id;
0798
0799 pr_debug("%s: Added patch_id: 0x%08x, proc_id: 0x%04x\n",
0800 __func__, patch->patch_id, proc_id);
0801
0802
0803 update_cache(patch);
0804
0805 return 0;
0806 }
0807
0808 static enum ucode_state __load_microcode_amd(u8 family, const u8 *data,
0809 size_t size)
0810 {
0811 u8 *fw = (u8 *)data;
0812 size_t offset;
0813
0814 offset = install_equiv_cpu_table(data, size);
0815 if (!offset)
0816 return UCODE_ERROR;
0817
0818 fw += offset;
0819 size -= offset;
0820
0821 if (*(u32 *)fw != UCODE_UCODE_TYPE) {
0822 pr_err("invalid type field in container file section header\n");
0823 free_equiv_cpu_table();
0824 return UCODE_ERROR;
0825 }
0826
0827 while (size > 0) {
0828 unsigned int crnt_size = 0;
0829 int ret;
0830
0831 ret = verify_and_add_patch(family, fw, size, &crnt_size);
0832 if (ret < 0)
0833 return UCODE_ERROR;
0834
0835 fw += crnt_size + SECTION_HDR_SIZE;
0836 size -= (crnt_size + SECTION_HDR_SIZE);
0837 }
0838
0839 return UCODE_OK;
0840 }
0841
0842 static enum ucode_state
0843 load_microcode_amd(bool save, u8 family, const u8 *data, size_t size)
0844 {
0845 struct ucode_patch *p;
0846 enum ucode_state ret;
0847
0848
0849 free_equiv_cpu_table();
0850
0851 ret = __load_microcode_amd(family, data, size);
0852 if (ret != UCODE_OK) {
0853 cleanup();
0854 return ret;
0855 }
0856
0857 p = find_patch(0);
0858 if (!p) {
0859 return ret;
0860 } else {
0861 if (boot_cpu_data.microcode >= p->patch_id)
0862 return ret;
0863
0864 ret = UCODE_NEW;
0865 }
0866
0867
0868 if (!save)
0869 return ret;
0870
0871 memset(amd_ucode_patch, 0, PATCH_MAX_SIZE);
0872 memcpy(amd_ucode_patch, p->data, min_t(u32, ksize(p->data), PATCH_MAX_SIZE));
0873
0874 return ret;
0875 }
0876
0877
0878
0879
0880
0881
0882
0883
0884
0885
0886
0887
0888
0889
0890
0891
0892
0893 static enum ucode_state request_microcode_amd(int cpu, struct device *device,
0894 bool refresh_fw)
0895 {
0896 char fw_name[36] = "amd-ucode/microcode_amd.bin";
0897 struct cpuinfo_x86 *c = &cpu_data(cpu);
0898 bool bsp = c->cpu_index == boot_cpu_data.cpu_index;
0899 enum ucode_state ret = UCODE_NFOUND;
0900 const struct firmware *fw;
0901
0902
0903 if (!refresh_fw || !bsp)
0904 return UCODE_OK;
0905
0906 if (c->x86 >= 0x15)
0907 snprintf(fw_name, sizeof(fw_name), "amd-ucode/microcode_amd_fam%.2xh.bin", c->x86);
0908
0909 if (request_firmware_direct(&fw, (const char *)fw_name, device)) {
0910 pr_debug("failed to load file %s\n", fw_name);
0911 goto out;
0912 }
0913
0914 ret = UCODE_ERROR;
0915 if (!verify_container(fw->data, fw->size, false))
0916 goto fw_release;
0917
0918 ret = load_microcode_amd(bsp, c->x86, fw->data, fw->size);
0919
0920 fw_release:
0921 release_firmware(fw);
0922
0923 out:
0924 return ret;
0925 }
0926
0927 static enum ucode_state
0928 request_microcode_user(int cpu, const void __user *buf, size_t size)
0929 {
0930 return UCODE_ERROR;
0931 }
0932
0933 static void microcode_fini_cpu_amd(int cpu)
0934 {
0935 struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
0936
0937 uci->mc = NULL;
0938 }
0939
0940 static struct microcode_ops microcode_amd_ops = {
0941 .request_microcode_user = request_microcode_user,
0942 .request_microcode_fw = request_microcode_amd,
0943 .collect_cpu_info = collect_cpu_info_amd,
0944 .apply_microcode = apply_microcode_amd,
0945 .microcode_fini_cpu = microcode_fini_cpu_amd,
0946 };
0947
0948 struct microcode_ops * __init init_amd_microcode(void)
0949 {
0950 struct cpuinfo_x86 *c = &boot_cpu_data;
0951
0952 if (c->x86_vendor != X86_VENDOR_AMD || c->x86 < 0x10) {
0953 pr_warn("AMD CPU family 0x%x not supported\n", c->x86);
0954 return NULL;
0955 }
0956
0957 if (ucode_new_rev)
0958 pr_info_once("microcode updated early to new patch_level=0x%08x\n",
0959 ucode_new_rev);
0960
0961 return µcode_amd_ops;
0962 }
0963
0964 void __exit exit_amd_microcode(void)
0965 {
0966 cleanup();
0967 }