0001
0002
0003
0004
0005
0006
0007 #define KMSG_COMPONENT "cpu"
0008 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
0009
0010 #include <linux/stop_machine.h>
0011 #include <linux/bitops.h>
0012 #include <linux/kernel.h>
0013 #include <linux/random.h>
0014 #include <linux/sched/mm.h>
0015 #include <linux/init.h>
0016 #include <linux/seq_file.h>
0017 #include <linux/mm_types.h>
0018 #include <linux/delay.h>
0019 #include <linux/cpu.h>
0020
0021 #include <asm/diag.h>
0022 #include <asm/facility.h>
0023 #include <asm/elf.h>
0024 #include <asm/lowcore.h>
0025 #include <asm/param.h>
0026 #include <asm/sclp.h>
0027 #include <asm/smp.h>
0028
0029 unsigned long __read_mostly elf_hwcap;
0030 char elf_platform[ELF_PLATFORM_SIZE];
0031
0032 struct cpu_info {
0033 unsigned int cpu_mhz_dynamic;
0034 unsigned int cpu_mhz_static;
0035 struct cpuid cpu_id;
0036 };
0037
0038 static DEFINE_PER_CPU(struct cpu_info, cpu_info);
0039 static DEFINE_PER_CPU(int, cpu_relax_retry);
0040
0041 static bool machine_has_cpu_mhz;
0042
0043 void __init cpu_detect_mhz_feature(void)
0044 {
0045 if (test_facility(34) && __ecag(ECAG_CPU_ATTRIBUTE, 0) != -1UL)
0046 machine_has_cpu_mhz = true;
0047 }
0048
0049 static void update_cpu_mhz(void *arg)
0050 {
0051 unsigned long mhz;
0052 struct cpu_info *c;
0053
0054 mhz = __ecag(ECAG_CPU_ATTRIBUTE, 0);
0055 c = this_cpu_ptr(&cpu_info);
0056 c->cpu_mhz_dynamic = mhz >> 32;
0057 c->cpu_mhz_static = mhz & 0xffffffff;
0058 }
0059
0060 void s390_update_cpu_mhz(void)
0061 {
0062 s390_adjust_jiffies();
0063 if (machine_has_cpu_mhz)
0064 on_each_cpu(update_cpu_mhz, NULL, 0);
0065 }
0066
0067 void notrace stop_machine_yield(const struct cpumask *cpumask)
0068 {
0069 int cpu, this_cpu;
0070
0071 this_cpu = smp_processor_id();
0072 if (__this_cpu_inc_return(cpu_relax_retry) >= spin_retry) {
0073 __this_cpu_write(cpu_relax_retry, 0);
0074 cpu = cpumask_next_wrap(this_cpu, cpumask, this_cpu, false);
0075 if (cpu >= nr_cpu_ids)
0076 return;
0077 if (arch_vcpu_is_preempted(cpu))
0078 smp_yield_cpu(cpu);
0079 }
0080 }
0081
0082
0083
0084
0085 void cpu_init(void)
0086 {
0087 struct cpuid *id = this_cpu_ptr(&cpu_info.cpu_id);
0088
0089 get_cpu_id(id);
0090 if (machine_has_cpu_mhz)
0091 update_cpu_mhz(NULL);
0092 mmgrab(&init_mm);
0093 current->active_mm = &init_mm;
0094 BUG_ON(current->mm);
0095 enter_lazy_tlb(&init_mm, current);
0096 }
0097
0098 static void show_facilities(struct seq_file *m)
0099 {
0100 unsigned int bit;
0101
0102 seq_puts(m, "facilities :");
0103 for_each_set_bit_inv(bit, (long *)&stfle_fac_list, MAX_FACILITY_BIT)
0104 seq_printf(m, " %d", bit);
0105 seq_putc(m, '\n');
0106 }
0107
0108 static void show_cpu_summary(struct seq_file *m, void *v)
0109 {
0110 static const char *hwcap_str[] = {
0111 [HWCAP_NR_ESAN3] = "esan3",
0112 [HWCAP_NR_ZARCH] = "zarch",
0113 [HWCAP_NR_STFLE] = "stfle",
0114 [HWCAP_NR_MSA] = "msa",
0115 [HWCAP_NR_LDISP] = "ldisp",
0116 [HWCAP_NR_EIMM] = "eimm",
0117 [HWCAP_NR_DFP] = "dfp",
0118 [HWCAP_NR_HPAGE] = "edat",
0119 [HWCAP_NR_ETF3EH] = "etf3eh",
0120 [HWCAP_NR_HIGH_GPRS] = "highgprs",
0121 [HWCAP_NR_TE] = "te",
0122 [HWCAP_NR_VXRS] = "vx",
0123 [HWCAP_NR_VXRS_BCD] = "vxd",
0124 [HWCAP_NR_VXRS_EXT] = "vxe",
0125 [HWCAP_NR_GS] = "gs",
0126 [HWCAP_NR_VXRS_EXT2] = "vxe2",
0127 [HWCAP_NR_VXRS_PDE] = "vxp",
0128 [HWCAP_NR_SORT] = "sort",
0129 [HWCAP_NR_DFLT] = "dflt",
0130 [HWCAP_NR_VXRS_PDE2] = "vxp2",
0131 [HWCAP_NR_NNPA] = "nnpa",
0132 [HWCAP_NR_PCI_MIO] = "pcimio",
0133 [HWCAP_NR_SIE] = "sie",
0134 };
0135 int i, cpu;
0136
0137 BUILD_BUG_ON(ARRAY_SIZE(hwcap_str) != HWCAP_NR_MAX);
0138 seq_printf(m, "vendor_id : IBM/S390\n"
0139 "# processors : %i\n"
0140 "bogomips per cpu: %lu.%02lu\n",
0141 num_online_cpus(), loops_per_jiffy/(500000/HZ),
0142 (loops_per_jiffy/(5000/HZ))%100);
0143 seq_printf(m, "max thread id : %d\n", smp_cpu_mtid);
0144 seq_puts(m, "features\t: ");
0145 for (i = 0; i < ARRAY_SIZE(hwcap_str); i++)
0146 if (hwcap_str[i] && (elf_hwcap & (1UL << i)))
0147 seq_printf(m, "%s ", hwcap_str[i]);
0148 seq_puts(m, "\n");
0149 show_facilities(m);
0150 show_cacheinfo(m);
0151 for_each_online_cpu(cpu) {
0152 struct cpuid *id = &per_cpu(cpu_info.cpu_id, cpu);
0153
0154 seq_printf(m, "processor %d: "
0155 "version = %02X, "
0156 "identification = %06X, "
0157 "machine = %04X\n",
0158 cpu, id->version, id->ident, id->machine);
0159 }
0160 }
0161
0162 static int __init setup_hwcaps(void)
0163 {
0164
0165 elf_hwcap |= HWCAP_ESAN3;
0166
0167
0168 elf_hwcap |= HWCAP_ZARCH;
0169
0170
0171 if (test_facility(7))
0172 elf_hwcap |= HWCAP_STFLE;
0173
0174
0175 if (test_facility(17))
0176 elf_hwcap |= HWCAP_MSA;
0177
0178
0179 if (test_facility(19))
0180 elf_hwcap |= HWCAP_LDISP;
0181
0182
0183 elf_hwcap |= HWCAP_EIMM;
0184
0185
0186 if (test_facility(22) && test_facility(30))
0187 elf_hwcap |= HWCAP_ETF3EH;
0188
0189
0190 if (test_facility(42) && test_facility(44))
0191 elf_hwcap |= HWCAP_DFP;
0192
0193
0194 if (MACHINE_HAS_EDAT1)
0195 elf_hwcap |= HWCAP_HPAGE;
0196
0197
0198 elf_hwcap |= HWCAP_HIGH_GPRS;
0199
0200
0201 if (MACHINE_HAS_TE)
0202 elf_hwcap |= HWCAP_TE;
0203
0204
0205
0206
0207
0208 if (MACHINE_HAS_VX) {
0209 elf_hwcap |= HWCAP_VXRS;
0210 if (test_facility(134))
0211 elf_hwcap |= HWCAP_VXRS_BCD;
0212 if (test_facility(135))
0213 elf_hwcap |= HWCAP_VXRS_EXT;
0214 if (test_facility(148))
0215 elf_hwcap |= HWCAP_VXRS_EXT2;
0216 if (test_facility(152))
0217 elf_hwcap |= HWCAP_VXRS_PDE;
0218 if (test_facility(192))
0219 elf_hwcap |= HWCAP_VXRS_PDE2;
0220 }
0221
0222 if (test_facility(150))
0223 elf_hwcap |= HWCAP_SORT;
0224
0225 if (test_facility(151))
0226 elf_hwcap |= HWCAP_DFLT;
0227
0228 if (test_facility(165))
0229 elf_hwcap |= HWCAP_NNPA;
0230
0231
0232 if (MACHINE_HAS_GS)
0233 elf_hwcap |= HWCAP_GS;
0234
0235 if (MACHINE_HAS_PCI_MIO)
0236 elf_hwcap |= HWCAP_PCI_MIO;
0237
0238
0239 if (sclp.has_sief2)
0240 elf_hwcap |= HWCAP_SIE;
0241
0242 return 0;
0243 }
0244 arch_initcall(setup_hwcaps);
0245
0246 static int __init setup_elf_platform(void)
0247 {
0248 struct cpuid cpu_id;
0249
0250 get_cpu_id(&cpu_id);
0251 add_device_randomness(&cpu_id, sizeof(cpu_id));
0252 switch (cpu_id.machine) {
0253 default:
0254 strcpy(elf_platform, "z10");
0255 break;
0256 case 0x2817:
0257 case 0x2818:
0258 strcpy(elf_platform, "z196");
0259 break;
0260 case 0x2827:
0261 case 0x2828:
0262 strcpy(elf_platform, "zEC12");
0263 break;
0264 case 0x2964:
0265 case 0x2965:
0266 strcpy(elf_platform, "z13");
0267 break;
0268 case 0x3906:
0269 case 0x3907:
0270 strcpy(elf_platform, "z14");
0271 break;
0272 case 0x8561:
0273 case 0x8562:
0274 strcpy(elf_platform, "z15");
0275 break;
0276 case 0x3931:
0277 case 0x3932:
0278 strcpy(elf_platform, "z16");
0279 break;
0280 }
0281 return 0;
0282 }
0283 arch_initcall(setup_elf_platform);
0284
0285 static void show_cpu_topology(struct seq_file *m, unsigned long n)
0286 {
0287 #ifdef CONFIG_SCHED_TOPOLOGY
0288 seq_printf(m, "physical id : %d\n", topology_physical_package_id(n));
0289 seq_printf(m, "core id : %d\n", topology_core_id(n));
0290 seq_printf(m, "book id : %d\n", topology_book_id(n));
0291 seq_printf(m, "drawer id : %d\n", topology_drawer_id(n));
0292 seq_printf(m, "dedicated : %d\n", topology_cpu_dedicated(n));
0293 seq_printf(m, "address : %d\n", smp_cpu_get_cpu_address(n));
0294 seq_printf(m, "siblings : %d\n", cpumask_weight(topology_core_cpumask(n)));
0295 seq_printf(m, "cpu cores : %d\n", topology_booted_cores(n));
0296 #endif
0297 }
0298
0299 static void show_cpu_ids(struct seq_file *m, unsigned long n)
0300 {
0301 struct cpuid *id = &per_cpu(cpu_info.cpu_id, n);
0302
0303 seq_printf(m, "version : %02X\n", id->version);
0304 seq_printf(m, "identification : %06X\n", id->ident);
0305 seq_printf(m, "machine : %04X\n", id->machine);
0306 }
0307
0308 static void show_cpu_mhz(struct seq_file *m, unsigned long n)
0309 {
0310 struct cpu_info *c = per_cpu_ptr(&cpu_info, n);
0311
0312 if (!machine_has_cpu_mhz)
0313 return;
0314 seq_printf(m, "cpu MHz dynamic : %d\n", c->cpu_mhz_dynamic);
0315 seq_printf(m, "cpu MHz static : %d\n", c->cpu_mhz_static);
0316 }
0317
0318
0319
0320
0321 static int show_cpuinfo(struct seq_file *m, void *v)
0322 {
0323 unsigned long n = (unsigned long) v - 1;
0324 unsigned long first = cpumask_first(cpu_online_mask);
0325
0326 if (n == first)
0327 show_cpu_summary(m, v);
0328 seq_printf(m, "\ncpu number : %ld\n", n);
0329 show_cpu_topology(m, n);
0330 show_cpu_ids(m, n);
0331 show_cpu_mhz(m, n);
0332 return 0;
0333 }
0334
0335 static inline void *c_update(loff_t *pos)
0336 {
0337 if (*pos)
0338 *pos = cpumask_next(*pos - 1, cpu_online_mask);
0339 else
0340 *pos = cpumask_first(cpu_online_mask);
0341 return *pos < nr_cpu_ids ? (void *)*pos + 1 : NULL;
0342 }
0343
0344 static void *c_start(struct seq_file *m, loff_t *pos)
0345 {
0346 cpus_read_lock();
0347 return c_update(pos);
0348 }
0349
0350 static void *c_next(struct seq_file *m, void *v, loff_t *pos)
0351 {
0352 ++*pos;
0353 return c_update(pos);
0354 }
0355
0356 static void c_stop(struct seq_file *m, void *v)
0357 {
0358 cpus_read_unlock();
0359 }
0360
0361 const struct seq_operations cpuinfo_op = {
0362 .start = c_start,
0363 .next = c_next,
0364 .stop = c_stop,
0365 .show = show_cpuinfo,
0366 };
0367
0368 int s390_isolate_bp(void)
0369 {
0370 if (!test_facility(82))
0371 return -EOPNOTSUPP;
0372 set_thread_flag(TIF_ISOLATE_BP);
0373 return 0;
0374 }
0375 EXPORT_SYMBOL(s390_isolate_bp);
0376
0377 int s390_isolate_bp_guest(void)
0378 {
0379 if (!test_facility(82))
0380 return -EOPNOTSUPP;
0381 set_thread_flag(TIF_ISOLATE_BP_GUEST);
0382 return 0;
0383 }
0384 EXPORT_SYMBOL(s390_isolate_bp_guest);