0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018 #include <linux/perf_event.h>
0019 #include <linux/kernel_stat.h>
0020 #include <linux/mc146818rtc.h>
0021 #include <linux/acpi_pmtmr.h>
0022 #include <linux/clockchips.h>
0023 #include <linux/interrupt.h>
0024 #include <linux/memblock.h>
0025 #include <linux/ftrace.h>
0026 #include <linux/ioport.h>
0027 #include <linux/export.h>
0028 #include <linux/syscore_ops.h>
0029 #include <linux/delay.h>
0030 #include <linux/timex.h>
0031 #include <linux/i8253.h>
0032 #include <linux/dmar.h>
0033 #include <linux/init.h>
0034 #include <linux/cpu.h>
0035 #include <linux/dmi.h>
0036 #include <linux/smp.h>
0037 #include <linux/mm.h>
0038
0039 #include <asm/trace/irq_vectors.h>
0040 #include <asm/irq_remapping.h>
0041 #include <asm/pc-conf-reg.h>
0042 #include <asm/perf_event.h>
0043 #include <asm/x86_init.h>
0044 #include <linux/atomic.h>
0045 #include <asm/barrier.h>
0046 #include <asm/mpspec.h>
0047 #include <asm/i8259.h>
0048 #include <asm/proto.h>
0049 #include <asm/traps.h>
0050 #include <asm/apic.h>
0051 #include <asm/acpi.h>
0052 #include <asm/io_apic.h>
0053 #include <asm/desc.h>
0054 #include <asm/hpet.h>
0055 #include <asm/mtrr.h>
0056 #include <asm/time.h>
0057 #include <asm/smp.h>
0058 #include <asm/mce.h>
0059 #include <asm/tsc.h>
0060 #include <asm/hypervisor.h>
0061 #include <asm/cpu_device_id.h>
0062 #include <asm/intel-family.h>
0063 #include <asm/irq_regs.h>
0064
0065 unsigned int num_processors;
0066
0067 unsigned disabled_cpus;
0068
0069
0070 unsigned int boot_cpu_physical_apicid __ro_after_init = -1U;
0071 EXPORT_SYMBOL_GPL(boot_cpu_physical_apicid);
0072
0073 u8 boot_cpu_apic_version __ro_after_init;
0074
0075
0076
0077
0078 static unsigned int max_physical_apicid;
0079
0080
0081
0082
0083 physid_mask_t phys_cpu_present_map;
0084
0085
0086
0087
0088
0089
0090 static unsigned int disabled_cpu_apicid __ro_after_init = BAD_APICID;
0091
0092
0093
0094
0095
0096 static int apic_extnmi __ro_after_init = APIC_EXTNMI_BSP;
0097
0098
0099
0100
0101 static bool virt_ext_dest_id __ro_after_init;
0102
0103
0104
0105
0106 DEFINE_EARLY_PER_CPU_READ_MOSTLY(u16, x86_cpu_to_apicid, BAD_APICID);
0107 DEFINE_EARLY_PER_CPU_READ_MOSTLY(u16, x86_bios_cpu_apicid, BAD_APICID);
0108 DEFINE_EARLY_PER_CPU_READ_MOSTLY(u32, x86_cpu_to_acpiid, U32_MAX);
0109 EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_apicid);
0110 EXPORT_EARLY_PER_CPU_SYMBOL(x86_bios_cpu_apicid);
0111 EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_acpiid);
0112
0113 #ifdef CONFIG_X86_32
0114
0115
0116
0117
0118
0119
0120
0121 DEFINE_EARLY_PER_CPU_READ_MOSTLY(int, x86_cpu_to_logical_apicid, BAD_APICID);
0122
0123
0124 static int enabled_via_apicbase __ro_after_init;
0125
0126
0127
0128
0129
0130
0131
0132
0133
0134 static inline void imcr_pic_to_apic(void)
0135 {
0136
0137 pc_conf_set(PC_CONF_MPS_IMCR, 0x01);
0138 }
0139
0140 static inline void imcr_apic_to_pic(void)
0141 {
0142
0143 pc_conf_set(PC_CONF_MPS_IMCR, 0x00);
0144 }
0145 #endif
0146
0147
0148
0149
0150
0151
0152 static int force_enable_local_apic __initdata;
0153
0154
0155
0156
0157 static int __init parse_lapic(char *arg)
0158 {
0159 if (IS_ENABLED(CONFIG_X86_32) && !arg)
0160 force_enable_local_apic = 1;
0161 else if (arg && !strncmp(arg, "notscdeadline", 13))
0162 setup_clear_cpu_cap(X86_FEATURE_TSC_DEADLINE_TIMER);
0163 return 0;
0164 }
0165 early_param("lapic", parse_lapic);
0166
0167 #ifdef CONFIG_X86_64
0168 static int apic_calibrate_pmtmr __initdata;
0169 static __init int setup_apicpmtimer(char *s)
0170 {
0171 apic_calibrate_pmtmr = 1;
0172 notsc_setup(NULL);
0173 return 1;
0174 }
0175 __setup("apicpmtimer", setup_apicpmtimer);
0176 #endif
0177
0178 unsigned long mp_lapic_addr __ro_after_init;
0179 int disable_apic __ro_after_init;
0180
0181 static int disable_apic_timer __initdata;
0182
0183 int local_apic_timer_c2_ok __ro_after_init;
0184 EXPORT_SYMBOL_GPL(local_apic_timer_c2_ok);
0185
0186
0187
0188
0189 int apic_verbosity __ro_after_init;
0190
0191 int pic_mode __ro_after_init;
0192
0193
0194 int smp_found_config __ro_after_init;
0195
0196 static struct resource lapic_resource = {
0197 .name = "Local APIC",
0198 .flags = IORESOURCE_MEM | IORESOURCE_BUSY,
0199 };
0200
0201 unsigned int lapic_timer_period = 0;
0202
0203 static void apic_pm_activate(void);
0204
0205 static unsigned long apic_phys __ro_after_init;
0206
0207
0208
0209
0210 static inline int lapic_get_version(void)
0211 {
0212 return GET_APIC_VERSION(apic_read(APIC_LVR));
0213 }
0214
0215
0216
0217
0218 static inline int lapic_is_integrated(void)
0219 {
0220 return APIC_INTEGRATED(lapic_get_version());
0221 }
0222
0223
0224
0225
0226 static int modern_apic(void)
0227 {
0228
0229 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
0230 boot_cpu_data.x86 >= 0xf)
0231 return 1;
0232
0233
0234 if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON)
0235 return 1;
0236
0237 return lapic_get_version() >= 0x14;
0238 }
0239
0240
0241
0242
0243
0244 static void __init apic_disable(void)
0245 {
0246 pr_info("APIC: switched to apic NOOP\n");
0247 apic = &apic_noop;
0248 }
0249
0250 void native_apic_wait_icr_idle(void)
0251 {
0252 while (apic_read(APIC_ICR) & APIC_ICR_BUSY)
0253 cpu_relax();
0254 }
0255
0256 u32 native_safe_apic_wait_icr_idle(void)
0257 {
0258 u32 send_status;
0259 int timeout;
0260
0261 timeout = 0;
0262 do {
0263 send_status = apic_read(APIC_ICR) & APIC_ICR_BUSY;
0264 if (!send_status)
0265 break;
0266 inc_irq_stat(icr_read_retry_count);
0267 udelay(100);
0268 } while (timeout++ < 1000);
0269
0270 return send_status;
0271 }
0272
0273 void native_apic_icr_write(u32 low, u32 id)
0274 {
0275 unsigned long flags;
0276
0277 local_irq_save(flags);
0278 apic_write(APIC_ICR2, SET_XAPIC_DEST_FIELD(id));
0279 apic_write(APIC_ICR, low);
0280 local_irq_restore(flags);
0281 }
0282
0283 u64 native_apic_icr_read(void)
0284 {
0285 u32 icr1, icr2;
0286
0287 icr2 = apic_read(APIC_ICR2);
0288 icr1 = apic_read(APIC_ICR);
0289
0290 return icr1 | ((u64)icr2 << 32);
0291 }
0292
0293 #ifdef CONFIG_X86_32
0294
0295
0296
0297 int get_physical_broadcast(void)
0298 {
0299 return modern_apic() ? 0xff : 0xf;
0300 }
0301 #endif
0302
0303
0304
0305
0306 int lapic_get_maxlvt(void)
0307 {
0308
0309
0310
0311
0312 return lapic_is_integrated() ? GET_APIC_MAXLVT(apic_read(APIC_LVR)) : 2;
0313 }
0314
0315
0316
0317
0318
0319
0320 #define APIC_DIVISOR 16
0321 #define TSC_DIVISOR 8
0322
0323
0324 #define I82489DX_BASE_DIVIDER (((0x2) << 18))
0325
0326
0327
0328
0329
0330
0331
0332
0333
0334
0335
0336 static void __setup_APIC_LVTT(unsigned int clocks, int oneshot, int irqen)
0337 {
0338 unsigned int lvtt_value, tmp_value;
0339
0340 lvtt_value = LOCAL_TIMER_VECTOR;
0341 if (!oneshot)
0342 lvtt_value |= APIC_LVT_TIMER_PERIODIC;
0343 else if (boot_cpu_has(X86_FEATURE_TSC_DEADLINE_TIMER))
0344 lvtt_value |= APIC_LVT_TIMER_TSCDEADLINE;
0345
0346
0347
0348
0349
0350
0351
0352 if (!lapic_is_integrated())
0353 lvtt_value |= I82489DX_BASE_DIVIDER;
0354
0355 if (!irqen)
0356 lvtt_value |= APIC_LVT_MASKED;
0357
0358 apic_write(APIC_LVTT, lvtt_value);
0359
0360 if (lvtt_value & APIC_LVT_TIMER_TSCDEADLINE) {
0361
0362
0363
0364
0365
0366 asm volatile("mfence" : : : "memory");
0367 return;
0368 }
0369
0370
0371
0372
0373 tmp_value = apic_read(APIC_TDCR);
0374 apic_write(APIC_TDCR,
0375 (tmp_value & ~(APIC_TDR_DIV_1 | APIC_TDR_DIV_TMBASE)) |
0376 APIC_TDR_DIV_16);
0377
0378 if (!oneshot)
0379 apic_write(APIC_TMICT, clocks / APIC_DIVISOR);
0380 }
0381
0382
0383
0384
0385
0386
0387
0388
0389
0390
0391
0392
0393
0394
0395
0396
0397
0398
0399
0400
0401
0402 static atomic_t eilvt_offsets[APIC_EILVT_NR_MAX];
0403
0404 static inline int eilvt_entry_is_changeable(unsigned int old, unsigned int new)
0405 {
0406 return (old & APIC_EILVT_MASKED)
0407 || (new == APIC_EILVT_MASKED)
0408 || ((new & ~APIC_EILVT_MASKED) == old);
0409 }
0410
0411 static unsigned int reserve_eilvt_offset(int offset, unsigned int new)
0412 {
0413 unsigned int rsvd, vector;
0414
0415 if (offset >= APIC_EILVT_NR_MAX)
0416 return ~0;
0417
0418 rsvd = atomic_read(&eilvt_offsets[offset]);
0419 do {
0420 vector = rsvd & ~APIC_EILVT_MASKED;
0421 if (vector && !eilvt_entry_is_changeable(vector, new))
0422
0423 return rsvd;
0424 rsvd = atomic_cmpxchg(&eilvt_offsets[offset], rsvd, new);
0425 } while (rsvd != new);
0426
0427 rsvd &= ~APIC_EILVT_MASKED;
0428 if (rsvd && rsvd != vector)
0429 pr_info("LVT offset %d assigned for vector 0x%02x\n",
0430 offset, rsvd);
0431
0432 return new;
0433 }
0434
0435
0436
0437
0438
0439
0440
0441 int setup_APIC_eilvt(u8 offset, u8 vector, u8 msg_type, u8 mask)
0442 {
0443 unsigned long reg = APIC_EILVTn(offset);
0444 unsigned int new, old, reserved;
0445
0446 new = (mask << 16) | (msg_type << 8) | vector;
0447 old = apic_read(reg);
0448 reserved = reserve_eilvt_offset(offset, new);
0449
0450 if (reserved != new) {
0451 pr_err(FW_BUG "cpu %d, try to use APIC%lX (LVT offset %d) for "
0452 "vector 0x%x, but the register is already in use for "
0453 "vector 0x%x on another cpu\n",
0454 smp_processor_id(), reg, offset, new, reserved);
0455 return -EINVAL;
0456 }
0457
0458 if (!eilvt_entry_is_changeable(old, new)) {
0459 pr_err(FW_BUG "cpu %d, try to use APIC%lX (LVT offset %d) for "
0460 "vector 0x%x, but the register is already in use for "
0461 "vector 0x%x on this cpu\n",
0462 smp_processor_id(), reg, offset, new, old);
0463 return -EBUSY;
0464 }
0465
0466 apic_write(reg, new);
0467
0468 return 0;
0469 }
0470 EXPORT_SYMBOL_GPL(setup_APIC_eilvt);
0471
0472
0473
0474
0475 static int lapic_next_event(unsigned long delta,
0476 struct clock_event_device *evt)
0477 {
0478 apic_write(APIC_TMICT, delta);
0479 return 0;
0480 }
0481
0482 static int lapic_next_deadline(unsigned long delta,
0483 struct clock_event_device *evt)
0484 {
0485 u64 tsc;
0486
0487
0488 weak_wrmsr_fence();
0489
0490 tsc = rdtsc();
0491 wrmsrl(MSR_IA32_TSC_DEADLINE, tsc + (((u64) delta) * TSC_DIVISOR));
0492 return 0;
0493 }
0494
0495 static int lapic_timer_shutdown(struct clock_event_device *evt)
0496 {
0497 unsigned int v;
0498
0499
0500 if (evt->features & CLOCK_EVT_FEAT_DUMMY)
0501 return 0;
0502
0503 v = apic_read(APIC_LVTT);
0504 v |= (APIC_LVT_MASKED | LOCAL_TIMER_VECTOR);
0505 apic_write(APIC_LVTT, v);
0506 apic_write(APIC_TMICT, 0);
0507 return 0;
0508 }
0509
0510 static inline int
0511 lapic_timer_set_periodic_oneshot(struct clock_event_device *evt, bool oneshot)
0512 {
0513
0514 if (evt->features & CLOCK_EVT_FEAT_DUMMY)
0515 return 0;
0516
0517 __setup_APIC_LVTT(lapic_timer_period, oneshot, 1);
0518 return 0;
0519 }
0520
0521 static int lapic_timer_set_periodic(struct clock_event_device *evt)
0522 {
0523 return lapic_timer_set_periodic_oneshot(evt, false);
0524 }
0525
0526 static int lapic_timer_set_oneshot(struct clock_event_device *evt)
0527 {
0528 return lapic_timer_set_periodic_oneshot(evt, true);
0529 }
0530
0531
0532
0533
0534 static void lapic_timer_broadcast(const struct cpumask *mask)
0535 {
0536 #ifdef CONFIG_SMP
0537 apic->send_IPI_mask(mask, LOCAL_TIMER_VECTOR);
0538 #endif
0539 }
0540
0541
0542
0543
0544
0545 static struct clock_event_device lapic_clockevent = {
0546 .name = "lapic",
0547 .features = CLOCK_EVT_FEAT_PERIODIC |
0548 CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_C3STOP
0549 | CLOCK_EVT_FEAT_DUMMY,
0550 .shift = 32,
0551 .set_state_shutdown = lapic_timer_shutdown,
0552 .set_state_periodic = lapic_timer_set_periodic,
0553 .set_state_oneshot = lapic_timer_set_oneshot,
0554 .set_state_oneshot_stopped = lapic_timer_shutdown,
0555 .set_next_event = lapic_next_event,
0556 .broadcast = lapic_timer_broadcast,
0557 .rating = 100,
0558 .irq = -1,
0559 };
0560 static DEFINE_PER_CPU(struct clock_event_device, lapic_events);
0561
0562 static const struct x86_cpu_id deadline_match[] __initconst = {
0563 X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(HASWELL_X, X86_STEPPINGS(0x2, 0x2), 0x3a),
0564 X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(HASWELL_X, X86_STEPPINGS(0x4, 0x4), 0x0f),
0565
0566 X86_MATCH_INTEL_FAM6_MODEL( BROADWELL_X, 0x0b000020),
0567
0568 X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(BROADWELL_D, X86_STEPPINGS(0x2, 0x2), 0x00000011),
0569 X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(BROADWELL_D, X86_STEPPINGS(0x3, 0x3), 0x0700000e),
0570 X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(BROADWELL_D, X86_STEPPINGS(0x4, 0x4), 0x0f00000c),
0571 X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(BROADWELL_D, X86_STEPPINGS(0x5, 0x5), 0x0e000003),
0572
0573 X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(SKYLAKE_X, X86_STEPPINGS(0x3, 0x3), 0x01000136),
0574 X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(SKYLAKE_X, X86_STEPPINGS(0x4, 0x4), 0x02000014),
0575 X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(SKYLAKE_X, X86_STEPPINGS(0x5, 0xf), 0),
0576
0577 X86_MATCH_INTEL_FAM6_MODEL( HASWELL, 0x22),
0578 X86_MATCH_INTEL_FAM6_MODEL( HASWELL_L, 0x20),
0579 X86_MATCH_INTEL_FAM6_MODEL( HASWELL_G, 0x17),
0580
0581 X86_MATCH_INTEL_FAM6_MODEL( BROADWELL, 0x25),
0582 X86_MATCH_INTEL_FAM6_MODEL( BROADWELL_G, 0x17),
0583
0584 X86_MATCH_INTEL_FAM6_MODEL( SKYLAKE_L, 0xb2),
0585 X86_MATCH_INTEL_FAM6_MODEL( SKYLAKE, 0xb2),
0586
0587 X86_MATCH_INTEL_FAM6_MODEL( KABYLAKE_L, 0x52),
0588 X86_MATCH_INTEL_FAM6_MODEL( KABYLAKE, 0x52),
0589
0590 {},
0591 };
0592
0593 static __init bool apic_validate_deadline_timer(void)
0594 {
0595 const struct x86_cpu_id *m;
0596 u32 rev;
0597
0598 if (!boot_cpu_has(X86_FEATURE_TSC_DEADLINE_TIMER))
0599 return false;
0600 if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
0601 return true;
0602
0603 m = x86_match_cpu(deadline_match);
0604 if (!m)
0605 return true;
0606
0607 rev = (u32)m->driver_data;
0608
0609 if (boot_cpu_data.microcode >= rev)
0610 return true;
0611
0612 setup_clear_cpu_cap(X86_FEATURE_TSC_DEADLINE_TIMER);
0613 pr_err(FW_BUG "TSC_DEADLINE disabled due to Errata; "
0614 "please update microcode to version: 0x%x (or later)\n", rev);
0615 return false;
0616 }
0617
0618
0619
0620
0621
0622 static void setup_APIC_timer(void)
0623 {
0624 struct clock_event_device *levt = this_cpu_ptr(&lapic_events);
0625
0626 if (this_cpu_has(X86_FEATURE_ARAT)) {
0627 lapic_clockevent.features &= ~CLOCK_EVT_FEAT_C3STOP;
0628
0629 lapic_clockevent.rating = 150;
0630 }
0631
0632 memcpy(levt, &lapic_clockevent, sizeof(*levt));
0633 levt->cpumask = cpumask_of(smp_processor_id());
0634
0635 if (this_cpu_has(X86_FEATURE_TSC_DEADLINE_TIMER)) {
0636 levt->name = "lapic-deadline";
0637 levt->features &= ~(CLOCK_EVT_FEAT_PERIODIC |
0638 CLOCK_EVT_FEAT_DUMMY);
0639 levt->set_next_event = lapic_next_deadline;
0640 clockevents_config_and_register(levt,
0641 tsc_khz * (1000 / TSC_DIVISOR),
0642 0xF, ~0UL);
0643 } else
0644 clockevents_register_device(levt);
0645 }
0646
0647
0648
0649
0650
0651 static void __lapic_update_tsc_freq(void *info)
0652 {
0653 struct clock_event_device *levt = this_cpu_ptr(&lapic_events);
0654
0655 if (!this_cpu_has(X86_FEATURE_TSC_DEADLINE_TIMER))
0656 return;
0657
0658 clockevents_update_freq(levt, tsc_khz * (1000 / TSC_DIVISOR));
0659 }
0660
0661 void lapic_update_tsc_freq(void)
0662 {
0663
0664
0665
0666
0667
0668 on_each_cpu(__lapic_update_tsc_freq, NULL, 0);
0669 }
0670
0671
0672
0673
0674
0675
0676
0677
0678
0679
0680
0681
0682
0683
0684
0685
0686
0687
0688
0689
0690
0691
0692 #define LAPIC_CAL_LOOPS (HZ/10)
0693
0694 static __initdata int lapic_cal_loops = -1;
0695 static __initdata long lapic_cal_t1, lapic_cal_t2;
0696 static __initdata unsigned long long lapic_cal_tsc1, lapic_cal_tsc2;
0697 static __initdata unsigned long lapic_cal_pm1, lapic_cal_pm2;
0698 static __initdata unsigned long lapic_cal_j1, lapic_cal_j2;
0699
0700
0701
0702
0703 static void __init lapic_cal_handler(struct clock_event_device *dev)
0704 {
0705 unsigned long long tsc = 0;
0706 long tapic = apic_read(APIC_TMCCT);
0707 unsigned long pm = acpi_pm_read_early();
0708
0709 if (boot_cpu_has(X86_FEATURE_TSC))
0710 tsc = rdtsc();
0711
0712 switch (lapic_cal_loops++) {
0713 case 0:
0714 lapic_cal_t1 = tapic;
0715 lapic_cal_tsc1 = tsc;
0716 lapic_cal_pm1 = pm;
0717 lapic_cal_j1 = jiffies;
0718 break;
0719
0720 case LAPIC_CAL_LOOPS:
0721 lapic_cal_t2 = tapic;
0722 lapic_cal_tsc2 = tsc;
0723 if (pm < lapic_cal_pm1)
0724 pm += ACPI_PM_OVRRUN;
0725 lapic_cal_pm2 = pm;
0726 lapic_cal_j2 = jiffies;
0727 break;
0728 }
0729 }
0730
0731 static int __init
0732 calibrate_by_pmtimer(long deltapm, long *delta, long *deltatsc)
0733 {
0734 const long pm_100ms = PMTMR_TICKS_PER_SEC / 10;
0735 const long pm_thresh = pm_100ms / 100;
0736 unsigned long mult;
0737 u64 res;
0738
0739 #ifndef CONFIG_X86_PM_TIMER
0740 return -1;
0741 #endif
0742
0743 apic_printk(APIC_VERBOSE, "... PM-Timer delta = %ld\n", deltapm);
0744
0745
0746 if (!deltapm)
0747 return -1;
0748
0749 mult = clocksource_hz2mult(PMTMR_TICKS_PER_SEC, 22);
0750
0751 if (deltapm > (pm_100ms - pm_thresh) &&
0752 deltapm < (pm_100ms + pm_thresh)) {
0753 apic_printk(APIC_VERBOSE, "... PM-Timer result ok\n");
0754 return 0;
0755 }
0756
0757 res = (((u64)deltapm) * mult) >> 22;
0758 do_div(res, 1000000);
0759 pr_warn("APIC calibration not consistent "
0760 "with PM-Timer: %ldms instead of 100ms\n", (long)res);
0761
0762
0763 res = (((u64)(*delta)) * pm_100ms);
0764 do_div(res, deltapm);
0765 pr_info("APIC delta adjusted to PM-Timer: "
0766 "%lu (%ld)\n", (unsigned long)res, *delta);
0767 *delta = (long)res;
0768
0769
0770 if (boot_cpu_has(X86_FEATURE_TSC)) {
0771 res = (((u64)(*deltatsc)) * pm_100ms);
0772 do_div(res, deltapm);
0773 apic_printk(APIC_VERBOSE, "TSC delta adjusted to "
0774 "PM-Timer: %lu (%ld)\n",
0775 (unsigned long)res, *deltatsc);
0776 *deltatsc = (long)res;
0777 }
0778
0779 return 0;
0780 }
0781
0782 static int __init lapic_init_clockevent(void)
0783 {
0784 if (!lapic_timer_period)
0785 return -1;
0786
0787
0788 lapic_clockevent.mult = div_sc(lapic_timer_period/APIC_DIVISOR,
0789 TICK_NSEC, lapic_clockevent.shift);
0790 lapic_clockevent.max_delta_ns =
0791 clockevent_delta2ns(0x7FFFFFFF, &lapic_clockevent);
0792 lapic_clockevent.max_delta_ticks = 0x7FFFFFFF;
0793 lapic_clockevent.min_delta_ns =
0794 clockevent_delta2ns(0xF, &lapic_clockevent);
0795 lapic_clockevent.min_delta_ticks = 0xF;
0796
0797 return 0;
0798 }
0799
0800 bool __init apic_needs_pit(void)
0801 {
0802
0803
0804
0805
0806 if (!tsc_khz || !cpu_khz)
0807 return true;
0808
0809
0810 if (!boot_cpu_has(X86_FEATURE_APIC) || disable_apic)
0811 return true;
0812
0813
0814
0815
0816
0817
0818 if (apic_intr_mode == APIC_PIC ||
0819 apic_intr_mode == APIC_VIRTUAL_WIRE_NO_CONFIG)
0820 return true;
0821
0822
0823 if (!boot_cpu_has(X86_FEATURE_ARAT))
0824 return true;
0825
0826
0827 if (boot_cpu_has(X86_FEATURE_TSC_DEADLINE_TIMER))
0828 return false;
0829
0830
0831 if (disable_apic_timer)
0832 return true;
0833
0834
0835
0836
0837 return lapic_timer_period == 0;
0838 }
0839
0840 static int __init calibrate_APIC_clock(void)
0841 {
0842 struct clock_event_device *levt = this_cpu_ptr(&lapic_events);
0843 u64 tsc_perj = 0, tsc_start = 0;
0844 unsigned long jif_start;
0845 unsigned long deltaj;
0846 long delta, deltatsc;
0847 int pm_referenced = 0;
0848
0849 if (boot_cpu_has(X86_FEATURE_TSC_DEADLINE_TIMER))
0850 return 0;
0851
0852
0853
0854
0855
0856
0857 if (!lapic_init_clockevent()) {
0858 apic_printk(APIC_VERBOSE, "lapic timer already calibrated %d\n",
0859 lapic_timer_period);
0860
0861
0862
0863
0864 lapic_clockevent.features &= ~CLOCK_EVT_FEAT_DUMMY;
0865 return 0;
0866 }
0867
0868 apic_printk(APIC_VERBOSE, "Using local APIC timer interrupts.\n"
0869 "calibrating APIC timer ...\n");
0870
0871
0872
0873
0874
0875
0876 local_irq_disable();
0877
0878
0879
0880
0881
0882 __setup_APIC_LVTT(0xffffffff, 0, 0);
0883
0884
0885
0886
0887
0888
0889 jif_start = READ_ONCE(jiffies);
0890
0891 if (tsc_khz) {
0892 tsc_start = rdtsc();
0893 tsc_perj = div_u64((u64)tsc_khz * 1000, HZ);
0894 }
0895
0896
0897
0898
0899
0900 local_irq_enable();
0901
0902 while (lapic_cal_loops <= LAPIC_CAL_LOOPS) {
0903
0904 while (1) {
0905 if (tsc_khz) {
0906 u64 tsc_now = rdtsc();
0907 if ((tsc_now - tsc_start) >= tsc_perj) {
0908 tsc_start += tsc_perj;
0909 break;
0910 }
0911 } else {
0912 unsigned long jif_now = READ_ONCE(jiffies);
0913
0914 if (time_after(jif_now, jif_start)) {
0915 jif_start = jif_now;
0916 break;
0917 }
0918 }
0919 cpu_relax();
0920 }
0921
0922
0923 local_irq_disable();
0924 lapic_cal_handler(NULL);
0925 local_irq_enable();
0926 }
0927
0928 local_irq_disable();
0929
0930
0931 delta = lapic_cal_t1 - lapic_cal_t2;
0932 apic_printk(APIC_VERBOSE, "... lapic delta = %ld\n", delta);
0933
0934 deltatsc = (long)(lapic_cal_tsc2 - lapic_cal_tsc1);
0935
0936
0937 pm_referenced = !calibrate_by_pmtimer(lapic_cal_pm2 - lapic_cal_pm1,
0938 &delta, &deltatsc);
0939
0940 lapic_timer_period = (delta * APIC_DIVISOR) / LAPIC_CAL_LOOPS;
0941 lapic_init_clockevent();
0942
0943 apic_printk(APIC_VERBOSE, "..... delta %ld\n", delta);
0944 apic_printk(APIC_VERBOSE, "..... mult: %u\n", lapic_clockevent.mult);
0945 apic_printk(APIC_VERBOSE, "..... calibration result: %u\n",
0946 lapic_timer_period);
0947
0948 if (boot_cpu_has(X86_FEATURE_TSC)) {
0949 apic_printk(APIC_VERBOSE, "..... CPU clock speed is "
0950 "%ld.%04ld MHz.\n",
0951 (deltatsc / LAPIC_CAL_LOOPS) / (1000000 / HZ),
0952 (deltatsc / LAPIC_CAL_LOOPS) % (1000000 / HZ));
0953 }
0954
0955 apic_printk(APIC_VERBOSE, "..... host bus clock speed is "
0956 "%u.%04u MHz.\n",
0957 lapic_timer_period / (1000000 / HZ),
0958 lapic_timer_period % (1000000 / HZ));
0959
0960
0961
0962
0963 if (lapic_timer_period < (1000000 / HZ)) {
0964 local_irq_enable();
0965 pr_warn("APIC frequency too slow, disabling apic timer\n");
0966 return -1;
0967 }
0968
0969 levt->features &= ~CLOCK_EVT_FEAT_DUMMY;
0970
0971
0972
0973
0974
0975
0976 if (!pm_referenced && global_clock_event) {
0977 apic_printk(APIC_VERBOSE, "... verify APIC timer\n");
0978
0979
0980
0981
0982 levt->event_handler = lapic_cal_handler;
0983 lapic_timer_set_periodic(levt);
0984 lapic_cal_loops = -1;
0985
0986
0987 local_irq_enable();
0988
0989 while (lapic_cal_loops <= LAPIC_CAL_LOOPS)
0990 cpu_relax();
0991
0992
0993 local_irq_disable();
0994 lapic_timer_shutdown(levt);
0995
0996
0997 deltaj = lapic_cal_j2 - lapic_cal_j1;
0998 apic_printk(APIC_VERBOSE, "... jiffies delta = %lu\n", deltaj);
0999
1000
1001 if (deltaj >= LAPIC_CAL_LOOPS-2 && deltaj <= LAPIC_CAL_LOOPS+2)
1002 apic_printk(APIC_VERBOSE, "... jiffies result ok\n");
1003 else
1004 levt->features |= CLOCK_EVT_FEAT_DUMMY;
1005 }
1006 local_irq_enable();
1007
1008 if (levt->features & CLOCK_EVT_FEAT_DUMMY) {
1009 pr_warn("APIC timer disabled due to verification failure\n");
1010 return -1;
1011 }
1012
1013 return 0;
1014 }
1015
1016
1017
1018
1019
1020
1021 void __init setup_boot_APIC_clock(void)
1022 {
1023
1024
1025
1026
1027
1028
1029 if (disable_apic_timer) {
1030 pr_info("Disabling APIC timer\n");
1031
1032 if (num_possible_cpus() > 1) {
1033 lapic_clockevent.mult = 1;
1034 setup_APIC_timer();
1035 }
1036 return;
1037 }
1038
1039 if (calibrate_APIC_clock()) {
1040
1041 if (num_possible_cpus() > 1)
1042 setup_APIC_timer();
1043 return;
1044 }
1045
1046
1047
1048
1049
1050
1051 lapic_clockevent.features &= ~CLOCK_EVT_FEAT_DUMMY;
1052
1053
1054 setup_APIC_timer();
1055 amd_e400_c1e_apic_setup();
1056 }
1057
1058 void setup_secondary_APIC_clock(void)
1059 {
1060 setup_APIC_timer();
1061 amd_e400_c1e_apic_setup();
1062 }
1063
1064
1065
1066
1067 static void local_apic_timer_interrupt(void)
1068 {
1069 struct clock_event_device *evt = this_cpu_ptr(&lapic_events);
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082 if (!evt->event_handler) {
1083 pr_warn("Spurious LAPIC timer interrupt on cpu %d\n",
1084 smp_processor_id());
1085
1086 lapic_timer_shutdown(evt);
1087 return;
1088 }
1089
1090
1091
1092
1093 inc_irq_stat(apic_timer_irqs);
1094
1095 evt->event_handler(evt);
1096 }
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106 DEFINE_IDTENTRY_SYSVEC(sysvec_apic_timer_interrupt)
1107 {
1108 struct pt_regs *old_regs = set_irq_regs(regs);
1109
1110 ack_APIC_irq();
1111 trace_local_timer_entry(LOCAL_TIMER_VECTOR);
1112 local_apic_timer_interrupt();
1113 trace_local_timer_exit(LOCAL_TIMER_VECTOR);
1114
1115 set_irq_regs(old_regs);
1116 }
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129 void clear_local_APIC(void)
1130 {
1131 int maxlvt;
1132 u32 v;
1133
1134
1135 if (!x2apic_mode && !apic_phys)
1136 return;
1137
1138 maxlvt = lapic_get_maxlvt();
1139
1140
1141
1142
1143 if (maxlvt >= 3) {
1144 v = ERROR_APIC_VECTOR;
1145 apic_write(APIC_LVTERR, v | APIC_LVT_MASKED);
1146 }
1147
1148
1149
1150
1151 v = apic_read(APIC_LVTT);
1152 apic_write(APIC_LVTT, v | APIC_LVT_MASKED);
1153 v = apic_read(APIC_LVT0);
1154 apic_write(APIC_LVT0, v | APIC_LVT_MASKED);
1155 v = apic_read(APIC_LVT1);
1156 apic_write(APIC_LVT1, v | APIC_LVT_MASKED);
1157 if (maxlvt >= 4) {
1158 v = apic_read(APIC_LVTPC);
1159 apic_write(APIC_LVTPC, v | APIC_LVT_MASKED);
1160 }
1161
1162
1163 #ifdef CONFIG_X86_THERMAL_VECTOR
1164 if (maxlvt >= 5) {
1165 v = apic_read(APIC_LVTTHMR);
1166 apic_write(APIC_LVTTHMR, v | APIC_LVT_MASKED);
1167 }
1168 #endif
1169 #ifdef CONFIG_X86_MCE_INTEL
1170 if (maxlvt >= 6) {
1171 v = apic_read(APIC_LVTCMCI);
1172 if (!(v & APIC_LVT_MASKED))
1173 apic_write(APIC_LVTCMCI, v | APIC_LVT_MASKED);
1174 }
1175 #endif
1176
1177
1178
1179
1180 apic_write(APIC_LVTT, APIC_LVT_MASKED);
1181 apic_write(APIC_LVT0, APIC_LVT_MASKED);
1182 apic_write(APIC_LVT1, APIC_LVT_MASKED);
1183 if (maxlvt >= 3)
1184 apic_write(APIC_LVTERR, APIC_LVT_MASKED);
1185 if (maxlvt >= 4)
1186 apic_write(APIC_LVTPC, APIC_LVT_MASKED);
1187
1188
1189 if (lapic_is_integrated()) {
1190 if (maxlvt > 3)
1191
1192 apic_write(APIC_ESR, 0);
1193 apic_read(APIC_ESR);
1194 }
1195 }
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208 void apic_soft_disable(void)
1209 {
1210 u32 value;
1211
1212 clear_local_APIC();
1213
1214
1215 value = apic_read(APIC_SPIV);
1216 value &= ~APIC_SPIV_APIC_ENABLED;
1217 apic_write(APIC_SPIV, value);
1218 }
1219
1220
1221
1222
1223 void disable_local_APIC(void)
1224 {
1225
1226 if (!x2apic_mode && !apic_phys)
1227 return;
1228
1229 apic_soft_disable();
1230
1231 #ifdef CONFIG_X86_32
1232
1233
1234
1235
1236 if (enabled_via_apicbase) {
1237 unsigned int l, h;
1238
1239 rdmsr(MSR_IA32_APICBASE, l, h);
1240 l &= ~MSR_IA32_APICBASE_ENABLE;
1241 wrmsr(MSR_IA32_APICBASE, l, h);
1242 }
1243 #endif
1244 }
1245
1246
1247
1248
1249
1250
1251
1252 void lapic_shutdown(void)
1253 {
1254 unsigned long flags;
1255
1256 if (!boot_cpu_has(X86_FEATURE_APIC) && !apic_from_smp_config())
1257 return;
1258
1259 local_irq_save(flags);
1260
1261 #ifdef CONFIG_X86_32
1262 if (!enabled_via_apicbase)
1263 clear_local_APIC();
1264 else
1265 #endif
1266 disable_local_APIC();
1267
1268
1269 local_irq_restore(flags);
1270 }
1271
1272
1273
1274
1275 void __init sync_Arb_IDs(void)
1276 {
1277
1278
1279
1280
1281 if (modern_apic() || boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
1282 return;
1283
1284
1285
1286
1287 apic_wait_icr_idle();
1288
1289 apic_printk(APIC_DEBUG, "Synchronizing Arb IDs.\n");
1290 apic_write(APIC_ICR, APIC_DEST_ALLINC |
1291 APIC_INT_LEVELTRIG | APIC_DM_INIT);
1292 }
1293
1294 enum apic_intr_mode_id apic_intr_mode __ro_after_init;
1295
1296 static int __init __apic_intr_mode_select(void)
1297 {
1298
1299 if (disable_apic) {
1300 pr_info("APIC disabled via kernel command line\n");
1301 return APIC_PIC;
1302 }
1303
1304
1305 #ifdef CONFIG_X86_64
1306
1307 if (!boot_cpu_has(X86_FEATURE_APIC)) {
1308 disable_apic = 1;
1309 pr_info("APIC disabled by BIOS\n");
1310 return APIC_PIC;
1311 }
1312 #else
1313
1314
1315
1316 if (!boot_cpu_has(X86_FEATURE_APIC) && !smp_found_config) {
1317 disable_apic = 1;
1318 return APIC_PIC;
1319 }
1320
1321
1322 if (!boot_cpu_has(X86_FEATURE_APIC) &&
1323 APIC_INTEGRATED(boot_cpu_apic_version)) {
1324 disable_apic = 1;
1325 pr_err(FW_BUG "Local APIC %d not detected, force emulation\n",
1326 boot_cpu_physical_apicid);
1327 return APIC_PIC;
1328 }
1329 #endif
1330
1331
1332 if (!smp_found_config) {
1333 disable_ioapic_support();
1334 if (!acpi_lapic) {
1335 pr_info("APIC: ACPI MADT or MP tables are not detected\n");
1336 return APIC_VIRTUAL_WIRE_NO_CONFIG;
1337 }
1338 return APIC_VIRTUAL_WIRE;
1339 }
1340
1341 #ifdef CONFIG_SMP
1342
1343 if (!setup_max_cpus) {
1344 pr_info("APIC: SMP mode deactivated\n");
1345 return APIC_SYMMETRIC_IO_NO_ROUTING;
1346 }
1347
1348 if (read_apic_id() != boot_cpu_physical_apicid) {
1349 panic("Boot APIC ID in local APIC unexpected (%d vs %d)",
1350 read_apic_id(), boot_cpu_physical_apicid);
1351
1352 }
1353 #endif
1354
1355 return APIC_SYMMETRIC_IO;
1356 }
1357
1358
1359 void __init apic_intr_mode_select(void)
1360 {
1361 apic_intr_mode = __apic_intr_mode_select();
1362 }
1363
1364
1365
1366
1367 void __init init_bsp_APIC(void)
1368 {
1369 unsigned int value;
1370
1371
1372
1373
1374
1375 if (smp_found_config || !boot_cpu_has(X86_FEATURE_APIC))
1376 return;
1377
1378
1379
1380
1381 clear_local_APIC();
1382
1383
1384
1385
1386 value = apic_read(APIC_SPIV);
1387 value &= ~APIC_VECTOR_MASK;
1388 value |= APIC_SPIV_APIC_ENABLED;
1389
1390 #ifdef CONFIG_X86_32
1391
1392 if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) &&
1393 (boot_cpu_data.x86 == 15))
1394 value &= ~APIC_SPIV_FOCUS_DISABLED;
1395 else
1396 #endif
1397 value |= APIC_SPIV_FOCUS_DISABLED;
1398 value |= SPURIOUS_APIC_VECTOR;
1399 apic_write(APIC_SPIV, value);
1400
1401
1402
1403
1404 apic_write(APIC_LVT0, APIC_DM_EXTINT);
1405 value = APIC_DM_NMI;
1406 if (!lapic_is_integrated())
1407 value |= APIC_LVT_LEVEL_TRIGGER;
1408 if (apic_extnmi == APIC_EXTNMI_NONE)
1409 value |= APIC_LVT_MASKED;
1410 apic_write(APIC_LVT1, value);
1411 }
1412
1413 static void __init apic_bsp_setup(bool upmode);
1414
1415
1416 void __init apic_intr_mode_init(void)
1417 {
1418 bool upmode = IS_ENABLED(CONFIG_UP_LATE_INIT);
1419
1420 switch (apic_intr_mode) {
1421 case APIC_PIC:
1422 pr_info("APIC: Keep in PIC mode(8259)\n");
1423 return;
1424 case APIC_VIRTUAL_WIRE:
1425 pr_info("APIC: Switch to virtual wire mode setup\n");
1426 break;
1427 case APIC_VIRTUAL_WIRE_NO_CONFIG:
1428 pr_info("APIC: Switch to virtual wire mode setup with no configuration\n");
1429 upmode = true;
1430 break;
1431 case APIC_SYMMETRIC_IO:
1432 pr_info("APIC: Switch to symmetric I/O mode setup\n");
1433 break;
1434 case APIC_SYMMETRIC_IO_NO_ROUTING:
1435 pr_info("APIC: Switch to symmetric I/O mode setup in no SMP routine\n");
1436 break;
1437 }
1438
1439 default_setup_apic_routing();
1440
1441 if (x86_platform.apic_post_init)
1442 x86_platform.apic_post_init();
1443
1444 apic_bsp_setup(upmode);
1445 }
1446
1447 static void lapic_setup_esr(void)
1448 {
1449 unsigned int oldvalue, value, maxlvt;
1450
1451 if (!lapic_is_integrated()) {
1452 pr_info("No ESR for 82489DX.\n");
1453 return;
1454 }
1455
1456 if (apic->disable_esr) {
1457
1458
1459
1460
1461
1462
1463 pr_info("Leaving ESR disabled.\n");
1464 return;
1465 }
1466
1467 maxlvt = lapic_get_maxlvt();
1468 if (maxlvt > 3)
1469 apic_write(APIC_ESR, 0);
1470 oldvalue = apic_read(APIC_ESR);
1471
1472
1473 value = ERROR_APIC_VECTOR;
1474 apic_write(APIC_LVTERR, value);
1475
1476
1477
1478
1479 if (maxlvt > 3)
1480 apic_write(APIC_ESR, 0);
1481 value = apic_read(APIC_ESR);
1482 if (value != oldvalue)
1483 apic_printk(APIC_VERBOSE, "ESR value before enabling "
1484 "vector: 0x%08x after: 0x%08x\n",
1485 oldvalue, value);
1486 }
1487
1488 #define APIC_IR_REGS APIC_ISR_NR
1489 #define APIC_IR_BITS (APIC_IR_REGS * 32)
1490 #define APIC_IR_MAPSIZE (APIC_IR_BITS / BITS_PER_LONG)
1491
1492 union apic_ir {
1493 unsigned long map[APIC_IR_MAPSIZE];
1494 u32 regs[APIC_IR_REGS];
1495 };
1496
1497 static bool apic_check_and_ack(union apic_ir *irr, union apic_ir *isr)
1498 {
1499 int i, bit;
1500
1501
1502 for (i = 0; i < APIC_IR_REGS; i++)
1503 irr->regs[i] = apic_read(APIC_IRR + i * 0x10);
1504
1505
1506 for (i = 0; i < APIC_IR_REGS; i++)
1507 isr->regs[i] = apic_read(APIC_ISR + i * 0x10);
1508
1509
1510
1511
1512
1513
1514 if (!bitmap_empty(isr->map, APIC_IR_BITS)) {
1515
1516
1517
1518
1519
1520 for_each_set_bit(bit, isr->map, APIC_IR_BITS)
1521 ack_APIC_irq();
1522 return true;
1523 }
1524
1525 return !bitmap_empty(irr->map, APIC_IR_BITS);
1526 }
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542 static void apic_pending_intr_clear(void)
1543 {
1544 union apic_ir irr, isr;
1545 unsigned int i;
1546
1547
1548 for (i = 0; i < 512; i++) {
1549 if (!apic_check_and_ack(&irr, &isr))
1550 return;
1551 }
1552
1553 pr_warn("APIC: Stale IRR: %256pb ISR: %256pb\n", irr.map, isr.map);
1554 }
1555
1556
1557
1558
1559
1560
1561
1562 static void setup_local_APIC(void)
1563 {
1564 int cpu = smp_processor_id();
1565 unsigned int value;
1566
1567 if (disable_apic) {
1568 disable_ioapic_support();
1569 return;
1570 }
1571
1572
1573
1574
1575
1576 value = apic_read(APIC_SPIV);
1577 value &= ~APIC_SPIV_APIC_ENABLED;
1578 apic_write(APIC_SPIV, value);
1579
1580 #ifdef CONFIG_X86_32
1581
1582 if (lapic_is_integrated() && apic->disable_esr) {
1583 apic_write(APIC_ESR, 0);
1584 apic_write(APIC_ESR, 0);
1585 apic_write(APIC_ESR, 0);
1586 apic_write(APIC_ESR, 0);
1587 }
1588 #endif
1589
1590
1591
1592
1593 BUG_ON(!apic->apic_id_registered());
1594
1595
1596
1597
1598
1599
1600 apic->init_apic_ldr();
1601
1602 #ifdef CONFIG_X86_32
1603 if (apic->dest_mode_logical) {
1604 int logical_apicid, ldr_apicid;
1605
1606
1607
1608
1609
1610
1611 logical_apicid = early_per_cpu(x86_cpu_to_logical_apicid, cpu);
1612 ldr_apicid = GET_APIC_LOGICAL_ID(apic_read(APIC_LDR));
1613 if (logical_apicid != BAD_APICID)
1614 WARN_ON(logical_apicid != ldr_apicid);
1615
1616 early_per_cpu(x86_cpu_to_logical_apicid, cpu) = ldr_apicid;
1617 }
1618 #endif
1619
1620
1621
1622
1623
1624
1625
1626 value = apic_read(APIC_TASKPRI);
1627 value &= ~APIC_TPRI_MASK;
1628 value |= 0x10;
1629 apic_write(APIC_TASKPRI, value);
1630
1631
1632 apic_pending_intr_clear();
1633
1634
1635
1636
1637 value = apic_read(APIC_SPIV);
1638 value &= ~APIC_VECTOR_MASK;
1639
1640
1641
1642 value |= APIC_SPIV_APIC_ENABLED;
1643
1644 #ifdef CONFIG_X86_32
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669 value &= ~APIC_SPIV_FOCUS_DISABLED;
1670 #endif
1671
1672
1673
1674
1675 value |= SPURIOUS_APIC_VECTOR;
1676 apic_write(APIC_SPIV, value);
1677
1678 perf_events_lapic_init();
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690 value = apic_read(APIC_LVT0) & APIC_LVT_MASKED;
1691 if (!cpu && (pic_mode || !value || skip_ioapic_setup)) {
1692 value = APIC_DM_EXTINT;
1693 apic_printk(APIC_VERBOSE, "enabled ExtINT on CPU#%d\n", cpu);
1694 } else {
1695 value = APIC_DM_EXTINT | APIC_LVT_MASKED;
1696 apic_printk(APIC_VERBOSE, "masked ExtINT on CPU#%d\n", cpu);
1697 }
1698 apic_write(APIC_LVT0, value);
1699
1700
1701
1702
1703
1704 if ((!cpu && apic_extnmi != APIC_EXTNMI_NONE) ||
1705 apic_extnmi == APIC_EXTNMI_ALL)
1706 value = APIC_DM_NMI;
1707 else
1708 value = APIC_DM_NMI | APIC_LVT_MASKED;
1709
1710
1711 if (!lapic_is_integrated())
1712 value |= APIC_LVT_LEVEL_TRIGGER;
1713 apic_write(APIC_LVT1, value);
1714
1715 #ifdef CONFIG_X86_MCE_INTEL
1716
1717 if (!cpu)
1718 cmci_recheck();
1719 #endif
1720 }
1721
1722 static void end_local_APIC_setup(void)
1723 {
1724 lapic_setup_esr();
1725
1726 #ifdef CONFIG_X86_32
1727 {
1728 unsigned int value;
1729
1730 value = apic_read(APIC_LVTT);
1731 value |= (APIC_LVT_MASKED | LOCAL_TIMER_VECTOR);
1732 apic_write(APIC_LVTT, value);
1733 }
1734 #endif
1735
1736 apic_pm_activate();
1737 }
1738
1739
1740
1741
1742 void apic_ap_setup(void)
1743 {
1744 setup_local_APIC();
1745 end_local_APIC_setup();
1746 }
1747
1748 #ifdef CONFIG_X86_X2APIC
1749 int x2apic_mode;
1750 EXPORT_SYMBOL_GPL(x2apic_mode);
1751
1752 enum {
1753 X2APIC_OFF,
1754 X2APIC_ON,
1755 X2APIC_DISABLED,
1756 };
1757 static int x2apic_state;
1758
1759 static void __x2apic_disable(void)
1760 {
1761 u64 msr;
1762
1763 if (!boot_cpu_has(X86_FEATURE_APIC))
1764 return;
1765
1766 rdmsrl(MSR_IA32_APICBASE, msr);
1767 if (!(msr & X2APIC_ENABLE))
1768 return;
1769
1770 wrmsrl(MSR_IA32_APICBASE, msr & ~(X2APIC_ENABLE | XAPIC_ENABLE));
1771 wrmsrl(MSR_IA32_APICBASE, msr & ~X2APIC_ENABLE);
1772 printk_once(KERN_INFO "x2apic disabled\n");
1773 }
1774
1775 static void __x2apic_enable(void)
1776 {
1777 u64 msr;
1778
1779 rdmsrl(MSR_IA32_APICBASE, msr);
1780 if (msr & X2APIC_ENABLE)
1781 return;
1782 wrmsrl(MSR_IA32_APICBASE, msr | X2APIC_ENABLE);
1783 printk_once(KERN_INFO "x2apic enabled\n");
1784 }
1785
1786 static int __init setup_nox2apic(char *str)
1787 {
1788 if (x2apic_enabled()) {
1789 int apicid = native_apic_msr_read(APIC_ID);
1790
1791 if (apicid >= 255) {
1792 pr_warn("Apicid: %08x, cannot enforce nox2apic\n",
1793 apicid);
1794 return 0;
1795 }
1796 pr_warn("x2apic already enabled.\n");
1797 __x2apic_disable();
1798 }
1799 setup_clear_cpu_cap(X86_FEATURE_X2APIC);
1800 x2apic_state = X2APIC_DISABLED;
1801 x2apic_mode = 0;
1802 return 0;
1803 }
1804 early_param("nox2apic", setup_nox2apic);
1805
1806
1807 void x2apic_setup(void)
1808 {
1809
1810
1811
1812
1813 if (x2apic_state != X2APIC_ON) {
1814 __x2apic_disable();
1815 return;
1816 }
1817 __x2apic_enable();
1818 }
1819
1820 static __init void x2apic_disable(void)
1821 {
1822 u32 x2apic_id, state = x2apic_state;
1823
1824 x2apic_mode = 0;
1825 x2apic_state = X2APIC_DISABLED;
1826
1827 if (state != X2APIC_ON)
1828 return;
1829
1830 x2apic_id = read_apic_id();
1831 if (x2apic_id >= 255)
1832 panic("Cannot disable x2apic, id: %08x\n", x2apic_id);
1833
1834 __x2apic_disable();
1835 register_lapic_address(mp_lapic_addr);
1836 }
1837
1838 static __init void x2apic_enable(void)
1839 {
1840 if (x2apic_state != X2APIC_OFF)
1841 return;
1842
1843 x2apic_mode = 1;
1844 x2apic_state = X2APIC_ON;
1845 __x2apic_enable();
1846 }
1847
1848 static __init void try_to_enable_x2apic(int remap_mode)
1849 {
1850 if (x2apic_state == X2APIC_DISABLED)
1851 return;
1852
1853 if (remap_mode != IRQ_REMAP_X2APIC_MODE) {
1854 u32 apic_limit = 255;
1855
1856
1857
1858
1859
1860 if (!x86_init.hyper.x2apic_available()) {
1861 pr_info("x2apic: IRQ remapping doesn't support X2APIC mode\n");
1862 x2apic_disable();
1863 return;
1864 }
1865
1866
1867
1868
1869
1870
1871 if (x86_init.hyper.msi_ext_dest_id()) {
1872 virt_ext_dest_id = 1;
1873 apic_limit = 32767;
1874 }
1875
1876
1877
1878
1879
1880
1881 x2apic_set_max_apicid(apic_limit);
1882 x2apic_phys = 1;
1883 }
1884 x2apic_enable();
1885 }
1886
1887 void __init check_x2apic(void)
1888 {
1889 if (x2apic_enabled()) {
1890 pr_info("x2apic: enabled by BIOS, switching to x2apic ops\n");
1891 x2apic_mode = 1;
1892 x2apic_state = X2APIC_ON;
1893 } else if (!boot_cpu_has(X86_FEATURE_X2APIC)) {
1894 x2apic_state = X2APIC_DISABLED;
1895 }
1896 }
1897 #else
1898 static int __init validate_x2apic(void)
1899 {
1900 if (!apic_is_x2apic_enabled())
1901 return 0;
1902
1903
1904
1905 panic("BIOS has enabled x2apic but kernel doesn't support x2apic, please disable x2apic in BIOS.\n");
1906 }
1907 early_initcall(validate_x2apic);
1908
1909 static inline void try_to_enable_x2apic(int remap_mode) { }
1910 static inline void __x2apic_enable(void) { }
1911 #endif
1912
1913 void __init enable_IR_x2apic(void)
1914 {
1915 unsigned long flags;
1916 int ret, ir_stat;
1917
1918 if (skip_ioapic_setup) {
1919 pr_info("Not enabling interrupt remapping due to skipped IO-APIC setup\n");
1920 return;
1921 }
1922
1923 ir_stat = irq_remapping_prepare();
1924 if (ir_stat < 0 && !x2apic_supported())
1925 return;
1926
1927 ret = save_ioapic_entries();
1928 if (ret) {
1929 pr_info("Saving IO-APIC state failed: %d\n", ret);
1930 return;
1931 }
1932
1933 local_irq_save(flags);
1934 legacy_pic->mask_all();
1935 mask_ioapic_entries();
1936
1937
1938 if (ir_stat >= 0)
1939 ir_stat = irq_remapping_enable();
1940
1941 try_to_enable_x2apic(ir_stat);
1942
1943 if (ir_stat < 0)
1944 restore_ioapic_entries();
1945 legacy_pic->restore_mask();
1946 local_irq_restore(flags);
1947 }
1948
1949 #ifdef CONFIG_X86_64
1950
1951
1952
1953
1954
1955
1956 static int __init detect_init_APIC(void)
1957 {
1958 if (!boot_cpu_has(X86_FEATURE_APIC)) {
1959 pr_info("No local APIC present\n");
1960 return -1;
1961 }
1962
1963 mp_lapic_addr = APIC_DEFAULT_PHYS_BASE;
1964 return 0;
1965 }
1966 #else
1967
1968 static int __init apic_verify(void)
1969 {
1970 u32 features, h, l;
1971
1972
1973
1974
1975
1976 features = cpuid_edx(1);
1977 if (!(features & (1 << X86_FEATURE_APIC))) {
1978 pr_warn("Could not enable APIC!\n");
1979 return -1;
1980 }
1981 set_cpu_cap(&boot_cpu_data, X86_FEATURE_APIC);
1982 mp_lapic_addr = APIC_DEFAULT_PHYS_BASE;
1983
1984
1985 if (boot_cpu_data.x86 >= 6) {
1986 rdmsr(MSR_IA32_APICBASE, l, h);
1987 if (l & MSR_IA32_APICBASE_ENABLE)
1988 mp_lapic_addr = l & MSR_IA32_APICBASE_BASE;
1989 }
1990
1991 pr_info("Found and enabled local APIC!\n");
1992 return 0;
1993 }
1994
1995 int __init apic_force_enable(unsigned long addr)
1996 {
1997 u32 h, l;
1998
1999 if (disable_apic)
2000 return -1;
2001
2002
2003
2004
2005
2006
2007 if (boot_cpu_data.x86 >= 6) {
2008 rdmsr(MSR_IA32_APICBASE, l, h);
2009 if (!(l & MSR_IA32_APICBASE_ENABLE)) {
2010 pr_info("Local APIC disabled by BIOS -- reenabling.\n");
2011 l &= ~MSR_IA32_APICBASE_BASE;
2012 l |= MSR_IA32_APICBASE_ENABLE | addr;
2013 wrmsr(MSR_IA32_APICBASE, l, h);
2014 enabled_via_apicbase = 1;
2015 }
2016 }
2017 return apic_verify();
2018 }
2019
2020
2021
2022
2023 static int __init detect_init_APIC(void)
2024 {
2025
2026 if (disable_apic)
2027 return -1;
2028
2029 switch (boot_cpu_data.x86_vendor) {
2030 case X86_VENDOR_AMD:
2031 if ((boot_cpu_data.x86 == 6 && boot_cpu_data.x86_model > 1) ||
2032 (boot_cpu_data.x86 >= 15))
2033 break;
2034 goto no_apic;
2035 case X86_VENDOR_HYGON:
2036 break;
2037 case X86_VENDOR_INTEL:
2038 if (boot_cpu_data.x86 == 6 || boot_cpu_data.x86 == 15 ||
2039 (boot_cpu_data.x86 == 5 && boot_cpu_has(X86_FEATURE_APIC)))
2040 break;
2041 goto no_apic;
2042 default:
2043 goto no_apic;
2044 }
2045
2046 if (!boot_cpu_has(X86_FEATURE_APIC)) {
2047
2048
2049
2050
2051 if (!force_enable_local_apic) {
2052 pr_info("Local APIC disabled by BIOS -- "
2053 "you can enable it with \"lapic\"\n");
2054 return -1;
2055 }
2056 if (apic_force_enable(APIC_DEFAULT_PHYS_BASE))
2057 return -1;
2058 } else {
2059 if (apic_verify())
2060 return -1;
2061 }
2062
2063 apic_pm_activate();
2064
2065 return 0;
2066
2067 no_apic:
2068 pr_info("No local APIC present or hardware disabled\n");
2069 return -1;
2070 }
2071 #endif
2072
2073
2074
2075
2076 void __init init_apic_mappings(void)
2077 {
2078 unsigned int new_apicid;
2079
2080 if (apic_validate_deadline_timer())
2081 pr_info("TSC deadline timer available\n");
2082
2083 if (x2apic_mode) {
2084 boot_cpu_physical_apicid = read_apic_id();
2085 return;
2086 }
2087
2088
2089 if (!smp_found_config && detect_init_APIC()) {
2090
2091 pr_info("APIC: disable apic facility\n");
2092 apic_disable();
2093 } else {
2094 apic_phys = mp_lapic_addr;
2095
2096
2097
2098
2099
2100 if (!acpi_lapic && !smp_found_config)
2101 register_lapic_address(apic_phys);
2102 }
2103
2104
2105
2106
2107
2108 new_apicid = read_apic_id();
2109 if (boot_cpu_physical_apicid != new_apicid) {
2110 boot_cpu_physical_apicid = new_apicid;
2111
2112
2113
2114
2115
2116
2117
2118 boot_cpu_apic_version = GET_APIC_VERSION(apic_read(APIC_LVR));
2119 }
2120 }
2121
2122 void __init register_lapic_address(unsigned long address)
2123 {
2124 mp_lapic_addr = address;
2125
2126 if (!x2apic_mode) {
2127 set_fixmap_nocache(FIX_APIC_BASE, address);
2128 apic_printk(APIC_VERBOSE, "mapped APIC to %16lx (%16lx)\n",
2129 APIC_BASE, address);
2130 }
2131 if (boot_cpu_physical_apicid == -1U) {
2132 boot_cpu_physical_apicid = read_apic_id();
2133 boot_cpu_apic_version = GET_APIC_VERSION(apic_read(APIC_LVR));
2134 }
2135 }
2136
2137
2138
2139
2140
2141
2142
2143
2144
2145 static noinline void handle_spurious_interrupt(u8 vector)
2146 {
2147 u32 v;
2148
2149 trace_spurious_apic_entry(vector);
2150
2151 inc_irq_stat(irq_spurious_count);
2152
2153
2154
2155
2156 if (vector == SPURIOUS_APIC_VECTOR) {
2157
2158 pr_info("Spurious APIC interrupt (vector 0xFF) on CPU#%d, should never happen.\n",
2159 smp_processor_id());
2160 goto out;
2161 }
2162
2163
2164
2165
2166
2167 v = apic_read(APIC_ISR + ((vector & ~0x1f) >> 1));
2168 if (v & (1 << (vector & 0x1f))) {
2169 pr_info("Spurious interrupt (vector 0x%02x) on CPU#%d. Acked\n",
2170 vector, smp_processor_id());
2171 ack_APIC_irq();
2172 } else {
2173 pr_info("Spurious interrupt (vector 0x%02x) on CPU#%d. Not pending!\n",
2174 vector, smp_processor_id());
2175 }
2176 out:
2177 trace_spurious_apic_exit(vector);
2178 }
2179
2180
2181
2182
2183
2184
2185
2186
2187
2188
2189 DEFINE_IDTENTRY_IRQ(spurious_interrupt)
2190 {
2191 handle_spurious_interrupt(vector);
2192 }
2193
2194 DEFINE_IDTENTRY_SYSVEC(sysvec_spurious_apic_interrupt)
2195 {
2196 handle_spurious_interrupt(SPURIOUS_APIC_VECTOR);
2197 }
2198
2199
2200
2201
2202 DEFINE_IDTENTRY_SYSVEC(sysvec_error_interrupt)
2203 {
2204 static const char * const error_interrupt_reason[] = {
2205 "Send CS error",
2206 "Receive CS error",
2207 "Send accept error",
2208 "Receive accept error",
2209 "Redirectable IPI",
2210 "Send illegal vector",
2211 "Received illegal vector",
2212 "Illegal register address",
2213 };
2214 u32 v, i = 0;
2215
2216 trace_error_apic_entry(ERROR_APIC_VECTOR);
2217
2218
2219 if (lapic_get_maxlvt() > 3)
2220 apic_write(APIC_ESR, 0);
2221 v = apic_read(APIC_ESR);
2222 ack_APIC_irq();
2223 atomic_inc(&irq_err_count);
2224
2225 apic_printk(APIC_DEBUG, KERN_DEBUG "APIC error on CPU%d: %02x",
2226 smp_processor_id(), v);
2227
2228 v &= 0xff;
2229 while (v) {
2230 if (v & 0x1)
2231 apic_printk(APIC_DEBUG, KERN_CONT " : %s", error_interrupt_reason[i]);
2232 i++;
2233 v >>= 1;
2234 }
2235
2236 apic_printk(APIC_DEBUG, KERN_CONT "\n");
2237
2238 trace_error_apic_exit(ERROR_APIC_VECTOR);
2239 }
2240
2241
2242
2243
2244 static void __init connect_bsp_APIC(void)
2245 {
2246 #ifdef CONFIG_X86_32
2247 if (pic_mode) {
2248
2249
2250
2251 clear_local_APIC();
2252
2253
2254
2255
2256 apic_printk(APIC_VERBOSE, "leaving PIC mode, "
2257 "enabling APIC mode.\n");
2258 imcr_pic_to_apic();
2259 }
2260 #endif
2261 }
2262
2263
2264
2265
2266
2267
2268
2269
2270 void disconnect_bsp_APIC(int virt_wire_setup)
2271 {
2272 unsigned int value;
2273
2274 #ifdef CONFIG_X86_32
2275 if (pic_mode) {
2276
2277
2278
2279
2280
2281
2282 apic_printk(APIC_VERBOSE, "disabling APIC mode, "
2283 "entering PIC mode.\n");
2284 imcr_apic_to_pic();
2285 return;
2286 }
2287 #endif
2288
2289
2290
2291
2292 value = apic_read(APIC_SPIV);
2293 value &= ~APIC_VECTOR_MASK;
2294 value |= APIC_SPIV_APIC_ENABLED;
2295 value |= 0xf;
2296 apic_write(APIC_SPIV, value);
2297
2298 if (!virt_wire_setup) {
2299
2300
2301
2302
2303 value = apic_read(APIC_LVT0);
2304 value &= ~(APIC_MODE_MASK | APIC_SEND_PENDING |
2305 APIC_INPUT_POLARITY | APIC_LVT_REMOTE_IRR |
2306 APIC_LVT_LEVEL_TRIGGER | APIC_LVT_MASKED);
2307 value |= APIC_LVT_REMOTE_IRR | APIC_SEND_PENDING;
2308 value = SET_APIC_DELIVERY_MODE(value, APIC_MODE_EXTINT);
2309 apic_write(APIC_LVT0, value);
2310 } else {
2311
2312 apic_write(APIC_LVT0, APIC_LVT_MASKED);
2313 }
2314
2315
2316
2317
2318
2319 value = apic_read(APIC_LVT1);
2320 value &= ~(APIC_MODE_MASK | APIC_SEND_PENDING |
2321 APIC_INPUT_POLARITY | APIC_LVT_REMOTE_IRR |
2322 APIC_LVT_LEVEL_TRIGGER | APIC_LVT_MASKED);
2323 value |= APIC_LVT_REMOTE_IRR | APIC_SEND_PENDING;
2324 value = SET_APIC_DELIVERY_MODE(value, APIC_MODE_NMI);
2325 apic_write(APIC_LVT1, value);
2326 }
2327
2328
2329
2330
2331
2332
2333
2334
2335
2336 static int nr_logical_cpuids = 1;
2337
2338
2339
2340
2341 static int cpuid_to_apicid[] = {
2342 [0 ... NR_CPUS - 1] = -1,
2343 };
2344
2345 bool arch_match_cpu_phys_id(int cpu, u64 phys_id)
2346 {
2347 return phys_id == cpuid_to_apicid[cpu];
2348 }
2349
2350 #ifdef CONFIG_SMP
2351
2352
2353
2354
2355 bool apic_id_is_primary_thread(unsigned int apicid)
2356 {
2357 u32 mask;
2358
2359 if (smp_num_siblings == 1)
2360 return true;
2361
2362 mask = (1U << (fls(smp_num_siblings) - 1)) - 1;
2363 return !(apicid & mask);
2364 }
2365 #endif
2366
2367
2368
2369
2370
2371 static int allocate_logical_cpuid(int apicid)
2372 {
2373 int i;
2374
2375
2376
2377
2378
2379 for (i = 0; i < nr_logical_cpuids; i++) {
2380 if (cpuid_to_apicid[i] == apicid)
2381 return i;
2382 }
2383
2384
2385 if (nr_logical_cpuids >= nr_cpu_ids) {
2386 WARN_ONCE(1, "APIC: NR_CPUS/possible_cpus limit of %u reached. "
2387 "Processor %d/0x%x and the rest are ignored.\n",
2388 nr_cpu_ids, nr_logical_cpuids, apicid);
2389 return -EINVAL;
2390 }
2391
2392 cpuid_to_apicid[nr_logical_cpuids] = apicid;
2393 return nr_logical_cpuids++;
2394 }
2395
2396 int generic_processor_info(int apicid, int version)
2397 {
2398 int cpu, max = nr_cpu_ids;
2399 bool boot_cpu_detected = physid_isset(boot_cpu_physical_apicid,
2400 phys_cpu_present_map);
2401
2402
2403
2404
2405
2406
2407
2408
2409
2410
2411
2412
2413
2414
2415
2416
2417
2418
2419
2420
2421 if (disabled_cpu_apicid != BAD_APICID &&
2422 disabled_cpu_apicid != read_apic_id() &&
2423 disabled_cpu_apicid == apicid) {
2424 int thiscpu = num_processors + disabled_cpus;
2425
2426 pr_warn("APIC: Disabling requested cpu."
2427 " Processor %d/0x%x ignored.\n", thiscpu, apicid);
2428
2429 disabled_cpus++;
2430 return -ENODEV;
2431 }
2432
2433
2434
2435
2436
2437 if (!boot_cpu_detected && num_processors >= nr_cpu_ids - 1 &&
2438 apicid != boot_cpu_physical_apicid) {
2439 int thiscpu = max + disabled_cpus - 1;
2440
2441 pr_warn("APIC: NR_CPUS/possible_cpus limit of %i almost"
2442 " reached. Keeping one slot for boot cpu."
2443 " Processor %d/0x%x ignored.\n", max, thiscpu, apicid);
2444
2445 disabled_cpus++;
2446 return -ENODEV;
2447 }
2448
2449 if (num_processors >= nr_cpu_ids) {
2450 int thiscpu = max + disabled_cpus;
2451
2452 pr_warn("APIC: NR_CPUS/possible_cpus limit of %i reached. "
2453 "Processor %d/0x%x ignored.\n", max, thiscpu, apicid);
2454
2455 disabled_cpus++;
2456 return -EINVAL;
2457 }
2458
2459 if (apicid == boot_cpu_physical_apicid) {
2460
2461
2462
2463
2464
2465
2466
2467 cpu = 0;
2468
2469
2470 cpuid_to_apicid[0] = apicid;
2471 } else {
2472 cpu = allocate_logical_cpuid(apicid);
2473 if (cpu < 0) {
2474 disabled_cpus++;
2475 return -EINVAL;
2476 }
2477 }
2478
2479
2480
2481
2482 if (version == 0x0) {
2483 pr_warn("BIOS bug: APIC version is 0 for CPU %d/0x%x, fixing up to 0x10\n",
2484 cpu, apicid);
2485 version = 0x10;
2486 }
2487
2488 if (version != boot_cpu_apic_version) {
2489 pr_warn("BIOS bug: APIC version mismatch, boot CPU: %x, CPU %d: version %x\n",
2490 boot_cpu_apic_version, cpu, version);
2491 }
2492
2493 if (apicid > max_physical_apicid)
2494 max_physical_apicid = apicid;
2495
2496 #if defined(CONFIG_SMP) || defined(CONFIG_X86_64)
2497 early_per_cpu(x86_cpu_to_apicid, cpu) = apicid;
2498 early_per_cpu(x86_bios_cpu_apicid, cpu) = apicid;
2499 #endif
2500 #ifdef CONFIG_X86_32
2501 early_per_cpu(x86_cpu_to_logical_apicid, cpu) =
2502 apic->x86_32_early_logical_apicid(cpu);
2503 #endif
2504 set_cpu_possible(cpu, true);
2505 physid_set(apicid, phys_cpu_present_map);
2506 set_cpu_present(cpu, true);
2507 num_processors++;
2508
2509 return cpu;
2510 }
2511
2512 int hard_smp_processor_id(void)
2513 {
2514 return read_apic_id();
2515 }
2516
2517 void __irq_msi_compose_msg(struct irq_cfg *cfg, struct msi_msg *msg,
2518 bool dmar)
2519 {
2520 memset(msg, 0, sizeof(*msg));
2521
2522 msg->arch_addr_lo.base_address = X86_MSI_BASE_ADDRESS_LOW;
2523 msg->arch_addr_lo.dest_mode_logical = apic->dest_mode_logical;
2524 msg->arch_addr_lo.destid_0_7 = cfg->dest_apicid & 0xFF;
2525
2526 msg->arch_data.delivery_mode = APIC_DELIVERY_MODE_FIXED;
2527 msg->arch_data.vector = cfg->vector;
2528
2529 msg->address_hi = X86_MSI_BASE_ADDRESS_HIGH;
2530
2531
2532
2533
2534
2535
2536
2537
2538
2539 if (dmar)
2540 msg->arch_addr_hi.destid_8_31 = cfg->dest_apicid >> 8;
2541 else if (virt_ext_dest_id && cfg->dest_apicid < 0x8000)
2542 msg->arch_addr_lo.virt_destid_8_14 = cfg->dest_apicid >> 8;
2543 else
2544 WARN_ON_ONCE(cfg->dest_apicid > 0xFF);
2545 }
2546
2547 u32 x86_msi_msg_get_destid(struct msi_msg *msg, bool extid)
2548 {
2549 u32 dest = msg->arch_addr_lo.destid_0_7;
2550
2551 if (extid)
2552 dest |= msg->arch_addr_hi.destid_8_31 << 8;
2553 return dest;
2554 }
2555 EXPORT_SYMBOL_GPL(x86_msi_msg_get_destid);
2556
2557 #ifdef CONFIG_X86_64
2558 void __init acpi_wake_cpu_handler_update(wakeup_cpu_handler handler)
2559 {
2560 struct apic **drv;
2561
2562 for (drv = __apicdrivers; drv < __apicdrivers_end; drv++)
2563 (*drv)->wakeup_secondary_cpu_64 = handler;
2564 }
2565 #endif
2566
2567
2568
2569
2570
2571
2572
2573 void __init apic_set_eoi_write(void (*eoi_write)(u32 reg, u32 v))
2574 {
2575 struct apic **drv;
2576
2577 for (drv = __apicdrivers; drv < __apicdrivers_end; drv++) {
2578
2579 WARN_ON((*drv)->eoi_write == eoi_write);
2580 (*drv)->native_eoi_write = (*drv)->eoi_write;
2581 (*drv)->eoi_write = eoi_write;
2582 }
2583 }
2584
2585 static void __init apic_bsp_up_setup(void)
2586 {
2587 #ifdef CONFIG_X86_64
2588 apic_write(APIC_ID, apic->set_apic_id(boot_cpu_physical_apicid));
2589 #else
2590
2591
2592
2593
2594
2595 # ifdef CONFIG_CRASH_DUMP
2596 boot_cpu_physical_apicid = read_apic_id();
2597 # endif
2598 #endif
2599 physid_set_mask_of_physid(boot_cpu_physical_apicid, &phys_cpu_present_map);
2600 }
2601
2602
2603
2604
2605
2606 static void __init apic_bsp_setup(bool upmode)
2607 {
2608 connect_bsp_APIC();
2609 if (upmode)
2610 apic_bsp_up_setup();
2611 setup_local_APIC();
2612
2613 enable_IO_APIC();
2614 end_local_APIC_setup();
2615 irq_remap_enable_fault_handling();
2616 setup_IO_APIC();
2617 lapic_update_legacy_vectors();
2618 }
2619
2620 #ifdef CONFIG_UP_LATE_INIT
2621 void __init up_late_init(void)
2622 {
2623 if (apic_intr_mode == APIC_PIC)
2624 return;
2625
2626
2627 x86_init.timers.setup_percpu_clockev();
2628 }
2629 #endif
2630
2631
2632
2633
2634 #ifdef CONFIG_PM
2635
2636 static struct {
2637
2638
2639
2640
2641
2642 int active;
2643
2644 unsigned int apic_id;
2645 unsigned int apic_taskpri;
2646 unsigned int apic_ldr;
2647 unsigned int apic_dfr;
2648 unsigned int apic_spiv;
2649 unsigned int apic_lvtt;
2650 unsigned int apic_lvtpc;
2651 unsigned int apic_lvt0;
2652 unsigned int apic_lvt1;
2653 unsigned int apic_lvterr;
2654 unsigned int apic_tmict;
2655 unsigned int apic_tdcr;
2656 unsigned int apic_thmr;
2657 unsigned int apic_cmci;
2658 } apic_pm_state;
2659
2660 static int lapic_suspend(void)
2661 {
2662 unsigned long flags;
2663 int maxlvt;
2664
2665 if (!apic_pm_state.active)
2666 return 0;
2667
2668 maxlvt = lapic_get_maxlvt();
2669
2670 apic_pm_state.apic_id = apic_read(APIC_ID);
2671 apic_pm_state.apic_taskpri = apic_read(APIC_TASKPRI);
2672 apic_pm_state.apic_ldr = apic_read(APIC_LDR);
2673 apic_pm_state.apic_dfr = apic_read(APIC_DFR);
2674 apic_pm_state.apic_spiv = apic_read(APIC_SPIV);
2675 apic_pm_state.apic_lvtt = apic_read(APIC_LVTT);
2676 if (maxlvt >= 4)
2677 apic_pm_state.apic_lvtpc = apic_read(APIC_LVTPC);
2678 apic_pm_state.apic_lvt0 = apic_read(APIC_LVT0);
2679 apic_pm_state.apic_lvt1 = apic_read(APIC_LVT1);
2680 apic_pm_state.apic_lvterr = apic_read(APIC_LVTERR);
2681 apic_pm_state.apic_tmict = apic_read(APIC_TMICT);
2682 apic_pm_state.apic_tdcr = apic_read(APIC_TDCR);
2683 #ifdef CONFIG_X86_THERMAL_VECTOR
2684 if (maxlvt >= 5)
2685 apic_pm_state.apic_thmr = apic_read(APIC_LVTTHMR);
2686 #endif
2687 #ifdef CONFIG_X86_MCE_INTEL
2688 if (maxlvt >= 6)
2689 apic_pm_state.apic_cmci = apic_read(APIC_LVTCMCI);
2690 #endif
2691
2692 local_irq_save(flags);
2693
2694
2695
2696
2697
2698 mask_ioapic_entries();
2699
2700 disable_local_APIC();
2701
2702 irq_remapping_disable();
2703
2704 local_irq_restore(flags);
2705 return 0;
2706 }
2707
2708 static void lapic_resume(void)
2709 {
2710 unsigned int l, h;
2711 unsigned long flags;
2712 int maxlvt;
2713
2714 if (!apic_pm_state.active)
2715 return;
2716
2717 local_irq_save(flags);
2718
2719
2720
2721
2722
2723
2724
2725 mask_ioapic_entries();
2726 legacy_pic->mask_all();
2727
2728 if (x2apic_mode) {
2729 __x2apic_enable();
2730 } else {
2731
2732
2733
2734
2735
2736
2737 if (boot_cpu_data.x86 >= 6) {
2738 rdmsr(MSR_IA32_APICBASE, l, h);
2739 l &= ~MSR_IA32_APICBASE_BASE;
2740 l |= MSR_IA32_APICBASE_ENABLE | mp_lapic_addr;
2741 wrmsr(MSR_IA32_APICBASE, l, h);
2742 }
2743 }
2744
2745 maxlvt = lapic_get_maxlvt();
2746 apic_write(APIC_LVTERR, ERROR_APIC_VECTOR | APIC_LVT_MASKED);
2747 apic_write(APIC_ID, apic_pm_state.apic_id);
2748 apic_write(APIC_DFR, apic_pm_state.apic_dfr);
2749 apic_write(APIC_LDR, apic_pm_state.apic_ldr);
2750 apic_write(APIC_TASKPRI, apic_pm_state.apic_taskpri);
2751 apic_write(APIC_SPIV, apic_pm_state.apic_spiv);
2752 apic_write(APIC_LVT0, apic_pm_state.apic_lvt0);
2753 apic_write(APIC_LVT1, apic_pm_state.apic_lvt1);
2754 #ifdef CONFIG_X86_THERMAL_VECTOR
2755 if (maxlvt >= 5)
2756 apic_write(APIC_LVTTHMR, apic_pm_state.apic_thmr);
2757 #endif
2758 #ifdef CONFIG_X86_MCE_INTEL
2759 if (maxlvt >= 6)
2760 apic_write(APIC_LVTCMCI, apic_pm_state.apic_cmci);
2761 #endif
2762 if (maxlvt >= 4)
2763 apic_write(APIC_LVTPC, apic_pm_state.apic_lvtpc);
2764 apic_write(APIC_LVTT, apic_pm_state.apic_lvtt);
2765 apic_write(APIC_TDCR, apic_pm_state.apic_tdcr);
2766 apic_write(APIC_TMICT, apic_pm_state.apic_tmict);
2767 apic_write(APIC_ESR, 0);
2768 apic_read(APIC_ESR);
2769 apic_write(APIC_LVTERR, apic_pm_state.apic_lvterr);
2770 apic_write(APIC_ESR, 0);
2771 apic_read(APIC_ESR);
2772
2773 irq_remapping_reenable(x2apic_mode);
2774
2775 local_irq_restore(flags);
2776 }
2777
2778
2779
2780
2781
2782
2783 static struct syscore_ops lapic_syscore_ops = {
2784 .resume = lapic_resume,
2785 .suspend = lapic_suspend,
2786 };
2787
2788 static void apic_pm_activate(void)
2789 {
2790 apic_pm_state.active = 1;
2791 }
2792
2793 static int __init init_lapic_sysfs(void)
2794 {
2795
2796 if (boot_cpu_has(X86_FEATURE_APIC))
2797 register_syscore_ops(&lapic_syscore_ops);
2798
2799 return 0;
2800 }
2801
2802
2803 core_initcall(init_lapic_sysfs);
2804
2805 #else
2806
2807 static void apic_pm_activate(void) { }
2808
2809 #endif
2810
2811 #ifdef CONFIG_X86_64
2812
2813 static int multi_checked;
2814 static int multi;
2815
2816 static int set_multi(const struct dmi_system_id *d)
2817 {
2818 if (multi)
2819 return 0;
2820 pr_info("APIC: %s detected, Multi Chassis\n", d->ident);
2821 multi = 1;
2822 return 0;
2823 }
2824
2825 static const struct dmi_system_id multi_dmi_table[] = {
2826 {
2827 .callback = set_multi,
2828 .ident = "IBM System Summit2",
2829 .matches = {
2830 DMI_MATCH(DMI_SYS_VENDOR, "IBM"),
2831 DMI_MATCH(DMI_PRODUCT_NAME, "Summit2"),
2832 },
2833 },
2834 {}
2835 };
2836
2837 static void dmi_check_multi(void)
2838 {
2839 if (multi_checked)
2840 return;
2841
2842 dmi_check_system(multi_dmi_table);
2843 multi_checked = 1;
2844 }
2845
2846
2847
2848
2849
2850
2851
2852
2853
2854 int apic_is_clustered_box(void)
2855 {
2856 dmi_check_multi();
2857 return multi;
2858 }
2859 #endif
2860
2861
2862
2863
2864 static int __init setup_disableapic(char *arg)
2865 {
2866 disable_apic = 1;
2867 setup_clear_cpu_cap(X86_FEATURE_APIC);
2868 return 0;
2869 }
2870 early_param("disableapic", setup_disableapic);
2871
2872
2873 static int __init setup_nolapic(char *arg)
2874 {
2875 return setup_disableapic(arg);
2876 }
2877 early_param("nolapic", setup_nolapic);
2878
2879 static int __init parse_lapic_timer_c2_ok(char *arg)
2880 {
2881 local_apic_timer_c2_ok = 1;
2882 return 0;
2883 }
2884 early_param("lapic_timer_c2_ok", parse_lapic_timer_c2_ok);
2885
2886 static int __init parse_disable_apic_timer(char *arg)
2887 {
2888 disable_apic_timer = 1;
2889 return 0;
2890 }
2891 early_param("noapictimer", parse_disable_apic_timer);
2892
2893 static int __init parse_nolapic_timer(char *arg)
2894 {
2895 disable_apic_timer = 1;
2896 return 0;
2897 }
2898 early_param("nolapic_timer", parse_nolapic_timer);
2899
2900 static int __init apic_set_verbosity(char *arg)
2901 {
2902 if (!arg) {
2903 #ifdef CONFIG_X86_64
2904 skip_ioapic_setup = 0;
2905 return 0;
2906 #endif
2907 return -EINVAL;
2908 }
2909
2910 if (strcmp("debug", arg) == 0)
2911 apic_verbosity = APIC_DEBUG;
2912 else if (strcmp("verbose", arg) == 0)
2913 apic_verbosity = APIC_VERBOSE;
2914 #ifdef CONFIG_X86_64
2915 else {
2916 pr_warn("APIC Verbosity level %s not recognised"
2917 " use apic=verbose or apic=debug\n", arg);
2918 return -EINVAL;
2919 }
2920 #endif
2921
2922 return 0;
2923 }
2924 early_param("apic", apic_set_verbosity);
2925
2926 static int __init lapic_insert_resource(void)
2927 {
2928 if (!apic_phys)
2929 return -1;
2930
2931
2932 lapic_resource.start = apic_phys;
2933 lapic_resource.end = lapic_resource.start + PAGE_SIZE - 1;
2934 insert_resource(&iomem_resource, &lapic_resource);
2935
2936 return 0;
2937 }
2938
2939
2940
2941
2942
2943 late_initcall(lapic_insert_resource);
2944
2945 static int __init apic_set_disabled_cpu_apicid(char *arg)
2946 {
2947 if (!arg || !get_option(&arg, &disabled_cpu_apicid))
2948 return -EINVAL;
2949
2950 return 0;
2951 }
2952 early_param("disable_cpu_apicid", apic_set_disabled_cpu_apicid);
2953
2954 static int __init apic_set_extnmi(char *arg)
2955 {
2956 if (!arg)
2957 return -EINVAL;
2958
2959 if (!strncmp("all", arg, 3))
2960 apic_extnmi = APIC_EXTNMI_ALL;
2961 else if (!strncmp("none", arg, 4))
2962 apic_extnmi = APIC_EXTNMI_NONE;
2963 else if (!strncmp("bsp", arg, 3))
2964 apic_extnmi = APIC_EXTNMI_BSP;
2965 else {
2966 pr_warn("Unknown external NMI delivery mode `%s' ignored\n", arg);
2967 return -EINVAL;
2968 }
2969
2970 return 0;
2971 }
2972 early_param("apic_extnmi", apic_set_extnmi);