Back to home page

LXR

 
 

    


0001 /**
0002  * @file nmi_int.c
0003  *
0004  * @remark Copyright 2002-2009 OProfile authors
0005  * @remark Read the file COPYING
0006  *
0007  * @author John Levon <levon@movementarian.org>
0008  * @author Robert Richter <robert.richter@amd.com>
0009  * @author Barry Kasindorf <barry.kasindorf@amd.com>
0010  * @author Jason Yeh <jason.yeh@amd.com>
0011  * @author Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
0012  */
0013 
0014 #include <linux/init.h>
0015 #include <linux/notifier.h>
0016 #include <linux/smp.h>
0017 #include <linux/oprofile.h>
0018 #include <linux/syscore_ops.h>
0019 #include <linux/slab.h>
0020 #include <linux/moduleparam.h>
0021 #include <linux/kdebug.h>
0022 #include <linux/cpu.h>
0023 #include <asm/nmi.h>
0024 #include <asm/msr.h>
0025 #include <asm/apic.h>
0026 
0027 #include "op_counter.h"
0028 #include "op_x86_model.h"
0029 
0030 static struct op_x86_model_spec *model;
0031 static DEFINE_PER_CPU(struct op_msrs, cpu_msrs);
0032 static DEFINE_PER_CPU(unsigned long, saved_lvtpc);
0033 
0034 /* must be protected with get_online_cpus()/put_online_cpus(): */
0035 static int nmi_enabled;
0036 static int ctr_running;
0037 
0038 struct op_counter_config counter_config[OP_MAX_COUNTER];
0039 
0040 /* common functions */
0041 
0042 u64 op_x86_get_ctrl(struct op_x86_model_spec const *model,
0043             struct op_counter_config *counter_config)
0044 {
0045     u64 val = 0;
0046     u16 event = (u16)counter_config->event;
0047 
0048     val |= ARCH_PERFMON_EVENTSEL_INT;
0049     val |= counter_config->user ? ARCH_PERFMON_EVENTSEL_USR : 0;
0050     val |= counter_config->kernel ? ARCH_PERFMON_EVENTSEL_OS : 0;
0051     val |= (counter_config->unit_mask & 0xFF) << 8;
0052     counter_config->extra &= (ARCH_PERFMON_EVENTSEL_INV |
0053                   ARCH_PERFMON_EVENTSEL_EDGE |
0054                   ARCH_PERFMON_EVENTSEL_CMASK);
0055     val |= counter_config->extra;
0056     event &= model->event_mask ? model->event_mask : 0xFF;
0057     val |= event & 0xFF;
0058     val |= (u64)(event & 0x0F00) << 24;
0059 
0060     return val;
0061 }
0062 
0063 
0064 static int profile_exceptions_notify(unsigned int val, struct pt_regs *regs)
0065 {
0066     if (ctr_running)
0067         model->check_ctrs(regs, this_cpu_ptr(&cpu_msrs));
0068     else if (!nmi_enabled)
0069         return NMI_DONE;
0070     else
0071         model->stop(this_cpu_ptr(&cpu_msrs));
0072     return NMI_HANDLED;
0073 }
0074 
0075 static void nmi_cpu_save_registers(struct op_msrs *msrs)
0076 {
0077     struct op_msr *counters = msrs->counters;
0078     struct op_msr *controls = msrs->controls;
0079     unsigned int i;
0080 
0081     for (i = 0; i < model->num_counters; ++i) {
0082         if (counters[i].addr)
0083             rdmsrl(counters[i].addr, counters[i].saved);
0084     }
0085 
0086     for (i = 0; i < model->num_controls; ++i) {
0087         if (controls[i].addr)
0088             rdmsrl(controls[i].addr, controls[i].saved);
0089     }
0090 }
0091 
0092 static void nmi_cpu_start(void *dummy)
0093 {
0094     struct op_msrs const *msrs = this_cpu_ptr(&cpu_msrs);
0095     if (!msrs->controls)
0096         WARN_ON_ONCE(1);
0097     else
0098         model->start(msrs);
0099 }
0100 
0101 static int nmi_start(void)
0102 {
0103     get_online_cpus();
0104     ctr_running = 1;
0105     /* make ctr_running visible to the nmi handler: */
0106     smp_mb();
0107     on_each_cpu(nmi_cpu_start, NULL, 1);
0108     put_online_cpus();
0109     return 0;
0110 }
0111 
0112 static void nmi_cpu_stop(void *dummy)
0113 {
0114     struct op_msrs const *msrs = this_cpu_ptr(&cpu_msrs);
0115     if (!msrs->controls)
0116         WARN_ON_ONCE(1);
0117     else
0118         model->stop(msrs);
0119 }
0120 
0121 static void nmi_stop(void)
0122 {
0123     get_online_cpus();
0124     on_each_cpu(nmi_cpu_stop, NULL, 1);
0125     ctr_running = 0;
0126     put_online_cpus();
0127 }
0128 
0129 #ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
0130 
0131 static DEFINE_PER_CPU(int, switch_index);
0132 
0133 static inline int has_mux(void)
0134 {
0135     return !!model->switch_ctrl;
0136 }
0137 
0138 inline int op_x86_phys_to_virt(int phys)
0139 {
0140     return __this_cpu_read(switch_index) + phys;
0141 }
0142 
0143 inline int op_x86_virt_to_phys(int virt)
0144 {
0145     return virt % model->num_counters;
0146 }
0147 
0148 static void nmi_shutdown_mux(void)
0149 {
0150     int i;
0151 
0152     if (!has_mux())
0153         return;
0154 
0155     for_each_possible_cpu(i) {
0156         kfree(per_cpu(cpu_msrs, i).multiplex);
0157         per_cpu(cpu_msrs, i).multiplex = NULL;
0158         per_cpu(switch_index, i) = 0;
0159     }
0160 }
0161 
0162 static int nmi_setup_mux(void)
0163 {
0164     size_t multiplex_size =
0165         sizeof(struct op_msr) * model->num_virt_counters;
0166     int i;
0167 
0168     if (!has_mux())
0169         return 1;
0170 
0171     for_each_possible_cpu(i) {
0172         per_cpu(cpu_msrs, i).multiplex =
0173             kzalloc(multiplex_size, GFP_KERNEL);
0174         if (!per_cpu(cpu_msrs, i).multiplex)
0175             return 0;
0176     }
0177 
0178     return 1;
0179 }
0180 
0181 static void nmi_cpu_setup_mux(int cpu, struct op_msrs const * const msrs)
0182 {
0183     int i;
0184     struct op_msr *multiplex = msrs->multiplex;
0185 
0186     if (!has_mux())
0187         return;
0188 
0189     for (i = 0; i < model->num_virt_counters; ++i) {
0190         if (counter_config[i].enabled) {
0191             multiplex[i].saved = -(u64)counter_config[i].count;
0192         } else {
0193             multiplex[i].saved = 0;
0194         }
0195     }
0196 
0197     per_cpu(switch_index, cpu) = 0;
0198 }
0199 
0200 static void nmi_cpu_save_mpx_registers(struct op_msrs *msrs)
0201 {
0202     struct op_msr *counters = msrs->counters;
0203     struct op_msr *multiplex = msrs->multiplex;
0204     int i;
0205 
0206     for (i = 0; i < model->num_counters; ++i) {
0207         int virt = op_x86_phys_to_virt(i);
0208         if (counters[i].addr)
0209             rdmsrl(counters[i].addr, multiplex[virt].saved);
0210     }
0211 }
0212 
0213 static void nmi_cpu_restore_mpx_registers(struct op_msrs *msrs)
0214 {
0215     struct op_msr *counters = msrs->counters;
0216     struct op_msr *multiplex = msrs->multiplex;
0217     int i;
0218 
0219     for (i = 0; i < model->num_counters; ++i) {
0220         int virt = op_x86_phys_to_virt(i);
0221         if (counters[i].addr)
0222             wrmsrl(counters[i].addr, multiplex[virt].saved);
0223     }
0224 }
0225 
0226 static void nmi_cpu_switch(void *dummy)
0227 {
0228     int cpu = smp_processor_id();
0229     int si = per_cpu(switch_index, cpu);
0230     struct op_msrs *msrs = &per_cpu(cpu_msrs, cpu);
0231 
0232     nmi_cpu_stop(NULL);
0233     nmi_cpu_save_mpx_registers(msrs);
0234 
0235     /* move to next set */
0236     si += model->num_counters;
0237     if ((si >= model->num_virt_counters) || (counter_config[si].count == 0))
0238         per_cpu(switch_index, cpu) = 0;
0239     else
0240         per_cpu(switch_index, cpu) = si;
0241 
0242     model->switch_ctrl(model, msrs);
0243     nmi_cpu_restore_mpx_registers(msrs);
0244 
0245     nmi_cpu_start(NULL);
0246 }
0247 
0248 
0249 /*
0250  * Quick check to see if multiplexing is necessary.
0251  * The check should be sufficient since counters are used
0252  * in ordre.
0253  */
0254 static int nmi_multiplex_on(void)
0255 {
0256     return counter_config[model->num_counters].count ? 0 : -EINVAL;
0257 }
0258 
0259 static int nmi_switch_event(void)
0260 {
0261     if (!has_mux())
0262         return -ENOSYS;     /* not implemented */
0263     if (nmi_multiplex_on() < 0)
0264         return -EINVAL;     /* not necessary */
0265 
0266     get_online_cpus();
0267     if (ctr_running)
0268         on_each_cpu(nmi_cpu_switch, NULL, 1);
0269     put_online_cpus();
0270 
0271     return 0;
0272 }
0273 
0274 static inline void mux_init(struct oprofile_operations *ops)
0275 {
0276     if (has_mux())
0277         ops->switch_events = nmi_switch_event;
0278 }
0279 
0280 static void mux_clone(int cpu)
0281 {
0282     if (!has_mux())
0283         return;
0284 
0285     memcpy(per_cpu(cpu_msrs, cpu).multiplex,
0286            per_cpu(cpu_msrs, 0).multiplex,
0287            sizeof(struct op_msr) * model->num_virt_counters);
0288 }
0289 
0290 #else
0291 
0292 inline int op_x86_phys_to_virt(int phys) { return phys; }
0293 inline int op_x86_virt_to_phys(int virt) { return virt; }
0294 static inline void nmi_shutdown_mux(void) { }
0295 static inline int nmi_setup_mux(void) { return 1; }
0296 static inline void
0297 nmi_cpu_setup_mux(int cpu, struct op_msrs const * const msrs) { }
0298 static inline void mux_init(struct oprofile_operations *ops) { }
0299 static void mux_clone(int cpu) { }
0300 
0301 #endif
0302 
0303 static void free_msrs(void)
0304 {
0305     int i;
0306     for_each_possible_cpu(i) {
0307         kfree(per_cpu(cpu_msrs, i).counters);
0308         per_cpu(cpu_msrs, i).counters = NULL;
0309         kfree(per_cpu(cpu_msrs, i).controls);
0310         per_cpu(cpu_msrs, i).controls = NULL;
0311     }
0312     nmi_shutdown_mux();
0313 }
0314 
0315 static int allocate_msrs(void)
0316 {
0317     size_t controls_size = sizeof(struct op_msr) * model->num_controls;
0318     size_t counters_size = sizeof(struct op_msr) * model->num_counters;
0319 
0320     int i;
0321     for_each_possible_cpu(i) {
0322         per_cpu(cpu_msrs, i).counters = kzalloc(counters_size,
0323                             GFP_KERNEL);
0324         if (!per_cpu(cpu_msrs, i).counters)
0325             goto fail;
0326         per_cpu(cpu_msrs, i).controls = kzalloc(controls_size,
0327                             GFP_KERNEL);
0328         if (!per_cpu(cpu_msrs, i).controls)
0329             goto fail;
0330     }
0331 
0332     if (!nmi_setup_mux())
0333         goto fail;
0334 
0335     return 1;
0336 
0337 fail:
0338     free_msrs();
0339     return 0;
0340 }
0341 
0342 static void nmi_cpu_setup(void)
0343 {
0344     int cpu = smp_processor_id();
0345     struct op_msrs *msrs = &per_cpu(cpu_msrs, cpu);
0346 
0347     nmi_cpu_save_registers(msrs);
0348     raw_spin_lock(&oprofilefs_lock);
0349     model->setup_ctrs(model, msrs);
0350     nmi_cpu_setup_mux(cpu, msrs);
0351     raw_spin_unlock(&oprofilefs_lock);
0352     per_cpu(saved_lvtpc, cpu) = apic_read(APIC_LVTPC);
0353     apic_write(APIC_LVTPC, APIC_DM_NMI);
0354 }
0355 
0356 static void nmi_cpu_restore_registers(struct op_msrs *msrs)
0357 {
0358     struct op_msr *counters = msrs->counters;
0359     struct op_msr *controls = msrs->controls;
0360     unsigned int i;
0361 
0362     for (i = 0; i < model->num_controls; ++i) {
0363         if (controls[i].addr)
0364             wrmsrl(controls[i].addr, controls[i].saved);
0365     }
0366 
0367     for (i = 0; i < model->num_counters; ++i) {
0368         if (counters[i].addr)
0369             wrmsrl(counters[i].addr, counters[i].saved);
0370     }
0371 }
0372 
0373 static void nmi_cpu_shutdown(void)
0374 {
0375     unsigned int v;
0376     int cpu = smp_processor_id();
0377     struct op_msrs *msrs = &per_cpu(cpu_msrs, cpu);
0378 
0379     /* restoring APIC_LVTPC can trigger an apic error because the delivery
0380      * mode and vector nr combination can be illegal. That's by design: on
0381      * power on apic lvt contain a zero vector nr which are legal only for
0382      * NMI delivery mode. So inhibit apic err before restoring lvtpc
0383      */
0384     v = apic_read(APIC_LVTERR);
0385     apic_write(APIC_LVTERR, v | APIC_LVT_MASKED);
0386     apic_write(APIC_LVTPC, per_cpu(saved_lvtpc, cpu));
0387     apic_write(APIC_LVTERR, v);
0388     nmi_cpu_restore_registers(msrs);
0389 }
0390 
0391 static int nmi_cpu_online(unsigned int cpu)
0392 {
0393     local_irq_disable();
0394     if (nmi_enabled)
0395         nmi_cpu_setup();
0396     if (ctr_running)
0397         nmi_cpu_start(NULL);
0398     local_irq_enable();
0399     return 0;
0400 }
0401 
0402 static int nmi_cpu_down_prep(unsigned int cpu)
0403 {
0404     local_irq_disable();
0405     if (ctr_running)
0406         nmi_cpu_stop(NULL);
0407     if (nmi_enabled)
0408         nmi_cpu_shutdown();
0409     local_irq_enable();
0410     return 0;
0411 }
0412 
0413 static int nmi_create_files(struct dentry *root)
0414 {
0415     unsigned int i;
0416 
0417     for (i = 0; i < model->num_virt_counters; ++i) {
0418         struct dentry *dir;
0419         char buf[4];
0420 
0421         /* quick little hack to _not_ expose a counter if it is not
0422          * available for use.  This should protect userspace app.
0423          * NOTE:  assumes 1:1 mapping here (that counters are organized
0424          *        sequentially in their struct assignment).
0425          */
0426         if (!avail_to_resrv_perfctr_nmi_bit(op_x86_virt_to_phys(i)))
0427             continue;
0428 
0429         snprintf(buf,  sizeof(buf), "%d", i);
0430         dir = oprofilefs_mkdir(root, buf);
0431         oprofilefs_create_ulong(dir, "enabled", &counter_config[i].enabled);
0432         oprofilefs_create_ulong(dir, "event", &counter_config[i].event);
0433         oprofilefs_create_ulong(dir, "count", &counter_config[i].count);
0434         oprofilefs_create_ulong(dir, "unit_mask", &counter_config[i].unit_mask);
0435         oprofilefs_create_ulong(dir, "kernel", &counter_config[i].kernel);
0436         oprofilefs_create_ulong(dir, "user", &counter_config[i].user);
0437         oprofilefs_create_ulong(dir, "extra", &counter_config[i].extra);
0438     }
0439 
0440     return 0;
0441 }
0442 
0443 static enum cpuhp_state cpuhp_nmi_online;
0444 
0445 static int nmi_setup(void)
0446 {
0447     int err = 0;
0448     int cpu;
0449 
0450     if (!allocate_msrs())
0451         return -ENOMEM;
0452 
0453     /* We need to serialize save and setup for HT because the subset
0454      * of msrs are distinct for save and setup operations
0455      */
0456 
0457     /* Assume saved/restored counters are the same on all CPUs */
0458     err = model->fill_in_addresses(&per_cpu(cpu_msrs, 0));
0459     if (err)
0460         goto fail;
0461 
0462     for_each_possible_cpu(cpu) {
0463         if (!cpu)
0464             continue;
0465 
0466         memcpy(per_cpu(cpu_msrs, cpu).counters,
0467                per_cpu(cpu_msrs, 0).counters,
0468                sizeof(struct op_msr) * model->num_counters);
0469 
0470         memcpy(per_cpu(cpu_msrs, cpu).controls,
0471                per_cpu(cpu_msrs, 0).controls,
0472                sizeof(struct op_msr) * model->num_controls);
0473 
0474         mux_clone(cpu);
0475     }
0476 
0477     nmi_enabled = 0;
0478     ctr_running = 0;
0479     /* make variables visible to the nmi handler: */
0480     smp_mb();
0481     err = register_nmi_handler(NMI_LOCAL, profile_exceptions_notify,
0482                     0, "oprofile");
0483     if (err)
0484         goto fail;
0485 
0486     nmi_enabled = 1;
0487     /* make nmi_enabled visible to the nmi handler: */
0488     smp_mb();
0489     err = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "x86/oprofile:online",
0490                 nmi_cpu_online, nmi_cpu_down_prep);
0491     if (err < 0)
0492         goto fail_nmi;
0493     cpuhp_nmi_online = err;
0494     return 0;
0495 fail_nmi:
0496     unregister_nmi_handler(NMI_LOCAL, "oprofile");
0497 fail:
0498     free_msrs();
0499     return err;
0500 }
0501 
0502 static void nmi_shutdown(void)
0503 {
0504     struct op_msrs *msrs;
0505 
0506     cpuhp_remove_state(cpuhp_nmi_online);
0507     nmi_enabled = 0;
0508     ctr_running = 0;
0509 
0510     /* make variables visible to the nmi handler: */
0511     smp_mb();
0512     unregister_nmi_handler(NMI_LOCAL, "oprofile");
0513     msrs = &get_cpu_var(cpu_msrs);
0514     model->shutdown(msrs);
0515     free_msrs();
0516     put_cpu_var(cpu_msrs);
0517 }
0518 
0519 #ifdef CONFIG_PM
0520 
0521 static int nmi_suspend(void)
0522 {
0523     /* Only one CPU left, just stop that one */
0524     if (nmi_enabled == 1)
0525         nmi_cpu_stop(NULL);
0526     return 0;
0527 }
0528 
0529 static void nmi_resume(void)
0530 {
0531     if (nmi_enabled == 1)
0532         nmi_cpu_start(NULL);
0533 }
0534 
0535 static struct syscore_ops oprofile_syscore_ops = {
0536     .resume     = nmi_resume,
0537     .suspend    = nmi_suspend,
0538 };
0539 
0540 static void __init init_suspend_resume(void)
0541 {
0542     register_syscore_ops(&oprofile_syscore_ops);
0543 }
0544 
0545 static void exit_suspend_resume(void)
0546 {
0547     unregister_syscore_ops(&oprofile_syscore_ops);
0548 }
0549 
0550 #else
0551 
0552 static inline void init_suspend_resume(void) { }
0553 static inline void exit_suspend_resume(void) { }
0554 
0555 #endif /* CONFIG_PM */
0556 
0557 static int __init p4_init(char **cpu_type)
0558 {
0559     __u8 cpu_model = boot_cpu_data.x86_model;
0560 
0561     if (cpu_model > 6 || cpu_model == 5)
0562         return 0;
0563 
0564 #ifndef CONFIG_SMP
0565     *cpu_type = "i386/p4";
0566     model = &op_p4_spec;
0567     return 1;
0568 #else
0569     switch (smp_num_siblings) {
0570     case 1:
0571         *cpu_type = "i386/p4";
0572         model = &op_p4_spec;
0573         return 1;
0574 
0575     case 2:
0576         *cpu_type = "i386/p4-ht";
0577         model = &op_p4_ht2_spec;
0578         return 1;
0579     }
0580 #endif
0581 
0582     printk(KERN_INFO "oprofile: P4 HyperThreading detected with > 2 threads\n");
0583     printk(KERN_INFO "oprofile: Reverting to timer mode.\n");
0584     return 0;
0585 }
0586 
0587 enum __force_cpu_type {
0588     reserved = 0,       /* do not force */
0589     timer,
0590     arch_perfmon,
0591 };
0592 
0593 static int force_cpu_type;
0594 
0595 static int set_cpu_type(const char *str, struct kernel_param *kp)
0596 {
0597     if (!strcmp(str, "timer")) {
0598         force_cpu_type = timer;
0599         printk(KERN_INFO "oprofile: forcing NMI timer mode\n");
0600     } else if (!strcmp(str, "arch_perfmon")) {
0601         force_cpu_type = arch_perfmon;
0602         printk(KERN_INFO "oprofile: forcing architectural perfmon\n");
0603     } else {
0604         force_cpu_type = 0;
0605     }
0606 
0607     return 0;
0608 }
0609 module_param_call(cpu_type, set_cpu_type, NULL, NULL, 0);
0610 
0611 static int __init ppro_init(char **cpu_type)
0612 {
0613     __u8 cpu_model = boot_cpu_data.x86_model;
0614     struct op_x86_model_spec *spec = &op_ppro_spec; /* default */
0615 
0616     if (force_cpu_type == arch_perfmon && boot_cpu_has(X86_FEATURE_ARCH_PERFMON))
0617         return 0;
0618 
0619     /*
0620      * Documentation on identifying Intel processors by CPU family
0621      * and model can be found in the Intel Software Developer's
0622      * Manuals (SDM):
0623      *
0624      *  http://www.intel.com/products/processor/manuals/
0625      *
0626      * As of May 2010 the documentation for this was in the:
0627      * "Intel 64 and IA-32 Architectures Software Developer's
0628      * Manual Volume 3B: System Programming Guide", "Table B-1
0629      * CPUID Signature Values of DisplayFamily_DisplayModel".
0630      */
0631     switch (cpu_model) {
0632     case 0 ... 2:
0633         *cpu_type = "i386/ppro";
0634         break;
0635     case 3 ... 5:
0636         *cpu_type = "i386/pii";
0637         break;
0638     case 6 ... 8:
0639     case 10 ... 11:
0640         *cpu_type = "i386/piii";
0641         break;
0642     case 9:
0643     case 13:
0644         *cpu_type = "i386/p6_mobile";
0645         break;
0646     case 14:
0647         *cpu_type = "i386/core";
0648         break;
0649     case 0x0f:
0650     case 0x16:
0651     case 0x17:
0652     case 0x1d:
0653         *cpu_type = "i386/core_2";
0654         break;
0655     case 0x1a:
0656     case 0x1e:
0657     case 0x2e:
0658         spec = &op_arch_perfmon_spec;
0659         *cpu_type = "i386/core_i7";
0660         break;
0661     case 0x1c:
0662         *cpu_type = "i386/atom";
0663         break;
0664     default:
0665         /* Unknown */
0666         return 0;
0667     }
0668 
0669     model = spec;
0670     return 1;
0671 }
0672 
0673 int __init op_nmi_init(struct oprofile_operations *ops)
0674 {
0675     __u8 vendor = boot_cpu_data.x86_vendor;
0676     __u8 family = boot_cpu_data.x86;
0677     char *cpu_type = NULL;
0678     int ret = 0;
0679 
0680     if (!boot_cpu_has(X86_FEATURE_APIC))
0681         return -ENODEV;
0682 
0683     if (force_cpu_type == timer)
0684         return -ENODEV;
0685 
0686     switch (vendor) {
0687     case X86_VENDOR_AMD:
0688         /* Needs to be at least an Athlon (or hammer in 32bit mode) */
0689 
0690         switch (family) {
0691         case 6:
0692             cpu_type = "i386/athlon";
0693             break;
0694         case 0xf:
0695             /*
0696              * Actually it could be i386/hammer too, but
0697              * give user space an consistent name.
0698              */
0699             cpu_type = "x86-64/hammer";
0700             break;
0701         case 0x10:
0702             cpu_type = "x86-64/family10";
0703             break;
0704         case 0x11:
0705             cpu_type = "x86-64/family11h";
0706             break;
0707         case 0x12:
0708             cpu_type = "x86-64/family12h";
0709             break;
0710         case 0x14:
0711             cpu_type = "x86-64/family14h";
0712             break;
0713         case 0x15:
0714             cpu_type = "x86-64/family15h";
0715             break;
0716         default:
0717             return -ENODEV;
0718         }
0719         model = &op_amd_spec;
0720         break;
0721 
0722     case X86_VENDOR_INTEL:
0723         switch (family) {
0724             /* Pentium IV */
0725         case 0xf:
0726             p4_init(&cpu_type);
0727             break;
0728 
0729             /* A P6-class processor */
0730         case 6:
0731             ppro_init(&cpu_type);
0732             break;
0733 
0734         default:
0735             break;
0736         }
0737 
0738         if (cpu_type)
0739             break;
0740 
0741         if (!boot_cpu_has(X86_FEATURE_ARCH_PERFMON))
0742             return -ENODEV;
0743 
0744         /* use arch perfmon as fallback */
0745         cpu_type = "i386/arch_perfmon";
0746         model = &op_arch_perfmon_spec;
0747         break;
0748 
0749     default:
0750         return -ENODEV;
0751     }
0752 
0753     /* default values, can be overwritten by model */
0754     ops->create_files   = nmi_create_files;
0755     ops->setup      = nmi_setup;
0756     ops->shutdown       = nmi_shutdown;
0757     ops->start      = nmi_start;
0758     ops->stop       = nmi_stop;
0759     ops->cpu_type       = cpu_type;
0760 
0761     if (model->init)
0762         ret = model->init(ops);
0763     if (ret)
0764         return ret;
0765 
0766     if (!model->num_virt_counters)
0767         model->num_virt_counters = model->num_counters;
0768 
0769     mux_init(ops);
0770 
0771     init_suspend_resume();
0772 
0773     printk(KERN_INFO "oprofile: using NMI interrupt.\n");
0774     return 0;
0775 }
0776 
0777 void op_nmi_exit(void)
0778 {
0779     exit_suspend_resume();
0780 }