Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-or-later
0002 /*
0003  * PowerPC64 LPAR Configuration Information Driver
0004  *
0005  * Dave Engebretsen engebret@us.ibm.com
0006  *    Copyright (c) 2003 Dave Engebretsen
0007  * Will Schmidt willschm@us.ibm.com
0008  *    SPLPAR updates, Copyright (c) 2003 Will Schmidt IBM Corporation.
0009  *    seq_file updates, Copyright (c) 2004 Will Schmidt IBM Corporation.
0010  * Nathan Lynch nathanl@austin.ibm.com
0011  *    Added lparcfg_write, Copyright (C) 2004 Nathan Lynch IBM Corporation.
0012  *
0013  * This driver creates a proc file at /proc/ppc64/lparcfg which contains
0014  * keyword - value pairs that specify the configuration of the partition.
0015  */
0016 
0017 #include <linux/module.h>
0018 #include <linux/types.h>
0019 #include <linux/errno.h>
0020 #include <linux/proc_fs.h>
0021 #include <linux/init.h>
0022 #include <linux/seq_file.h>
0023 #include <linux/slab.h>
0024 #include <linux/uaccess.h>
0025 #include <linux/hugetlb.h>
0026 #include <asm/lppaca.h>
0027 #include <asm/hvcall.h>
0028 #include <asm/firmware.h>
0029 #include <asm/rtas.h>
0030 #include <asm/time.h>
0031 #include <asm/vdso_datapage.h>
0032 #include <asm/vio.h>
0033 #include <asm/mmu.h>
0034 #include <asm/machdep.h>
0035 #include <asm/drmem.h>
0036 
0037 #include "pseries.h"
0038 
0039 /*
0040  * This isn't a module but we expose that to userspace
0041  * via /proc so leave the definitions here
0042  */
0043 #define MODULE_VERS "1.9"
0044 #define MODULE_NAME "lparcfg"
0045 
0046 /* #define LPARCFG_DEBUG */
0047 
0048 /*
0049  * Track sum of all purrs across all processors. This is used to further
0050  * calculate usage values by different applications
0051  */
0052 static void cpu_get_purr(void *arg)
0053 {
0054     atomic64_t *sum = arg;
0055 
0056     atomic64_add(mfspr(SPRN_PURR), sum);
0057 }
0058 
0059 static unsigned long get_purr(void)
0060 {
0061     atomic64_t purr = ATOMIC64_INIT(0);
0062 
0063     on_each_cpu(cpu_get_purr, &purr, 1);
0064 
0065     return atomic64_read(&purr);
0066 }
0067 
0068 /*
0069  * Methods used to fetch LPAR data when running on a pSeries platform.
0070  */
0071 
0072 struct hvcall_ppp_data {
0073     u64 entitlement;
0074     u64 unallocated_entitlement;
0075     u16 group_num;
0076     u16 pool_num;
0077     u8  capped;
0078     u8  weight;
0079     u8  unallocated_weight;
0080     u16 active_procs_in_pool;
0081     u16 active_system_procs;
0082     u16 phys_platform_procs;
0083     u32 max_proc_cap_avail;
0084     u32 entitled_proc_cap_avail;
0085 };
0086 
0087 /*
0088  * H_GET_PPP hcall returns info in 4 parms.
0089  *  entitled_capacity,unallocated_capacity,
0090  *  aggregation, resource_capability).
0091  *
0092  *  R4 = Entitled Processor Capacity Percentage.
0093  *  R5 = Unallocated Processor Capacity Percentage.
0094  *  R6 (AABBCCDDEEFFGGHH).
0095  *      XXXX - reserved (0)
0096  *          XXXX - reserved (0)
0097  *              XXXX - Group Number
0098  *                  XXXX - Pool Number.
0099  *  R7 (IIJJKKLLMMNNOOPP).
0100  *      XX - reserved. (0)
0101  *        XX - bit 0-6 reserved (0).   bit 7 is Capped indicator.
0102  *          XX - variable processor Capacity Weight
0103  *            XX - Unallocated Variable Processor Capacity Weight.
0104  *              XXXX - Active processors in Physical Processor Pool.
0105  *                  XXXX  - Processors active on platform.
0106  *  R8 (QQQQRRRRRRSSSSSS). if ibm,partition-performance-parameters-level >= 1
0107  *  XXXX - Physical platform procs allocated to virtualization.
0108  *      XXXXXX - Max procs capacity % available to the partitions pool.
0109  *            XXXXXX - Entitled procs capacity % available to the
0110  *             partitions pool.
0111  */
0112 static unsigned int h_get_ppp(struct hvcall_ppp_data *ppp_data)
0113 {
0114     unsigned long rc;
0115     unsigned long retbuf[PLPAR_HCALL9_BUFSIZE];
0116 
0117     rc = plpar_hcall9(H_GET_PPP, retbuf);
0118 
0119     ppp_data->entitlement = retbuf[0];
0120     ppp_data->unallocated_entitlement = retbuf[1];
0121 
0122     ppp_data->group_num = (retbuf[2] >> 2 * 8) & 0xffff;
0123     ppp_data->pool_num = retbuf[2] & 0xffff;
0124 
0125     ppp_data->capped = (retbuf[3] >> 6 * 8) & 0x01;
0126     ppp_data->weight = (retbuf[3] >> 5 * 8) & 0xff;
0127     ppp_data->unallocated_weight = (retbuf[3] >> 4 * 8) & 0xff;
0128     ppp_data->active_procs_in_pool = (retbuf[3] >> 2 * 8) & 0xffff;
0129     ppp_data->active_system_procs = retbuf[3] & 0xffff;
0130 
0131     ppp_data->phys_platform_procs = retbuf[4] >> 6 * 8;
0132     ppp_data->max_proc_cap_avail = (retbuf[4] >> 3 * 8) & 0xffffff;
0133     ppp_data->entitled_proc_cap_avail = retbuf[4] & 0xffffff;
0134 
0135     return rc;
0136 }
0137 
0138 static void show_gpci_data(struct seq_file *m)
0139 {
0140     struct hv_gpci_request_buffer *buf;
0141     unsigned int affinity_score;
0142     long ret;
0143 
0144     buf = kmalloc(sizeof(*buf), GFP_KERNEL);
0145     if (buf == NULL)
0146         return;
0147 
0148     /*
0149      * Show the local LPAR's affinity score.
0150      *
0151      * 0xB1 selects the Affinity_Domain_Info_By_Partition subcall.
0152      * The score is at byte 0xB in the output buffer.
0153      */
0154     memset(&buf->params, 0, sizeof(buf->params));
0155     buf->params.counter_request = cpu_to_be32(0xB1);
0156     buf->params.starting_index = cpu_to_be32(-1);   /* local LPAR */
0157     buf->params.counter_info_version_in = 0x5;  /* v5+ for score */
0158     ret = plpar_hcall_norets(H_GET_PERF_COUNTER_INFO, virt_to_phys(buf),
0159                  sizeof(*buf));
0160     if (ret != H_SUCCESS) {
0161         pr_debug("hcall failed: H_GET_PERF_COUNTER_INFO: %ld, %x\n",
0162              ret, be32_to_cpu(buf->params.detail_rc));
0163         goto out;
0164     }
0165     affinity_score = buf->bytes[0xB];
0166     seq_printf(m, "partition_affinity_score=%u\n", affinity_score);
0167 out:
0168     kfree(buf);
0169 }
0170 
0171 static unsigned h_pic(unsigned long *pool_idle_time,
0172               unsigned long *num_procs)
0173 {
0174     unsigned long rc;
0175     unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
0176 
0177     rc = plpar_hcall(H_PIC, retbuf);
0178 
0179     *pool_idle_time = retbuf[0];
0180     *num_procs = retbuf[1];
0181 
0182     return rc;
0183 }
0184 
0185 /*
0186  * parse_ppp_data
0187  * Parse out the data returned from h_get_ppp and h_pic
0188  */
0189 static void parse_ppp_data(struct seq_file *m)
0190 {
0191     struct hvcall_ppp_data ppp_data;
0192     struct device_node *root;
0193     const __be32 *perf_level;
0194     int rc;
0195 
0196     rc = h_get_ppp(&ppp_data);
0197     if (rc)
0198         return;
0199 
0200     seq_printf(m, "partition_entitled_capacity=%lld\n",
0201                ppp_data.entitlement);
0202     seq_printf(m, "group=%d\n", ppp_data.group_num);
0203     seq_printf(m, "system_active_processors=%d\n",
0204                ppp_data.active_system_procs);
0205 
0206     /* pool related entries are appropriate for shared configs */
0207     if (lppaca_shared_proc(get_lppaca())) {
0208         unsigned long pool_idle_time, pool_procs;
0209 
0210         seq_printf(m, "pool=%d\n", ppp_data.pool_num);
0211 
0212         /* report pool_capacity in percentage */
0213         seq_printf(m, "pool_capacity=%d\n",
0214                ppp_data.active_procs_in_pool * 100);
0215 
0216         h_pic(&pool_idle_time, &pool_procs);
0217         seq_printf(m, "pool_idle_time=%ld\n", pool_idle_time);
0218         seq_printf(m, "pool_num_procs=%ld\n", pool_procs);
0219     }
0220 
0221     seq_printf(m, "unallocated_capacity_weight=%d\n",
0222            ppp_data.unallocated_weight);
0223     seq_printf(m, "capacity_weight=%d\n", ppp_data.weight);
0224     seq_printf(m, "capped=%d\n", ppp_data.capped);
0225     seq_printf(m, "unallocated_capacity=%lld\n",
0226            ppp_data.unallocated_entitlement);
0227 
0228     /* The last bits of information returned from h_get_ppp are only
0229      * valid if the ibm,partition-performance-parameters-level
0230      * property is >= 1.
0231      */
0232     root = of_find_node_by_path("/");
0233     if (root) {
0234         perf_level = of_get_property(root,
0235                 "ibm,partition-performance-parameters-level",
0236                          NULL);
0237         if (perf_level && (be32_to_cpup(perf_level) >= 1)) {
0238             seq_printf(m,
0239                 "physical_procs_allocated_to_virtualization=%d\n",
0240                    ppp_data.phys_platform_procs);
0241             seq_printf(m, "max_proc_capacity_available=%d\n",
0242                    ppp_data.max_proc_cap_avail);
0243             seq_printf(m, "entitled_proc_capacity_available=%d\n",
0244                    ppp_data.entitled_proc_cap_avail);
0245         }
0246 
0247         of_node_put(root);
0248     }
0249 }
0250 
0251 /**
0252  * parse_mpp_data
0253  * Parse out data returned from h_get_mpp
0254  */
0255 static void parse_mpp_data(struct seq_file *m)
0256 {
0257     struct hvcall_mpp_data mpp_data;
0258     int rc;
0259 
0260     rc = h_get_mpp(&mpp_data);
0261     if (rc)
0262         return;
0263 
0264     seq_printf(m, "entitled_memory=%ld\n", mpp_data.entitled_mem);
0265 
0266     if (mpp_data.mapped_mem != -1)
0267         seq_printf(m, "mapped_entitled_memory=%ld\n",
0268                    mpp_data.mapped_mem);
0269 
0270     seq_printf(m, "entitled_memory_group_number=%d\n", mpp_data.group_num);
0271     seq_printf(m, "entitled_memory_pool_number=%d\n", mpp_data.pool_num);
0272 
0273     seq_printf(m, "entitled_memory_weight=%d\n", mpp_data.mem_weight);
0274     seq_printf(m, "unallocated_entitled_memory_weight=%d\n",
0275                mpp_data.unallocated_mem_weight);
0276     seq_printf(m, "unallocated_io_mapping_entitlement=%ld\n",
0277                mpp_data.unallocated_entitlement);
0278 
0279     if (mpp_data.pool_size != -1)
0280         seq_printf(m, "entitled_memory_pool_size=%ld bytes\n",
0281                    mpp_data.pool_size);
0282 
0283     seq_printf(m, "entitled_memory_loan_request=%ld\n",
0284                mpp_data.loan_request);
0285 
0286     seq_printf(m, "backing_memory=%ld bytes\n", mpp_data.backing_mem);
0287 }
0288 
0289 /**
0290  * parse_mpp_x_data
0291  * Parse out data returned from h_get_mpp_x
0292  */
0293 static void parse_mpp_x_data(struct seq_file *m)
0294 {
0295     struct hvcall_mpp_x_data mpp_x_data;
0296 
0297     if (!firmware_has_feature(FW_FEATURE_XCMO))
0298         return;
0299     if (h_get_mpp_x(&mpp_x_data))
0300         return;
0301 
0302     seq_printf(m, "coalesced_bytes=%ld\n", mpp_x_data.coalesced_bytes);
0303 
0304     if (mpp_x_data.pool_coalesced_bytes)
0305         seq_printf(m, "pool_coalesced_bytes=%ld\n",
0306                mpp_x_data.pool_coalesced_bytes);
0307     if (mpp_x_data.pool_purr_cycles)
0308         seq_printf(m, "coalesce_pool_purr=%ld\n", mpp_x_data.pool_purr_cycles);
0309     if (mpp_x_data.pool_spurr_cycles)
0310         seq_printf(m, "coalesce_pool_spurr=%ld\n", mpp_x_data.pool_spurr_cycles);
0311 }
0312 
0313 /*
0314  * PAPR defines, in section "7.3.16 System Parameters Option", the token 55 to
0315  * read the LPAR name, and the largest output data to 4000 + 2 bytes length.
0316  */
0317 #define SPLPAR_LPAR_NAME_TOKEN  55
0318 #define GET_SYS_PARM_BUF_SIZE   4002
0319 #if GET_SYS_PARM_BUF_SIZE > RTAS_DATA_BUF_SIZE
0320 #error "GET_SYS_PARM_BUF_SIZE is larger than RTAS_DATA_BUF_SIZE"
0321 #endif
0322 
0323 /*
0324  * Read the lpar name using the RTAS ibm,get-system-parameter call.
0325  *
0326  * The name read through this call is updated if changes are made by the end
0327  * user on the hypervisor side.
0328  *
0329  * Some hypervisor (like Qemu) may not provide this value. In that case, a non
0330  * null value is returned.
0331  */
0332 static int read_rtas_lpar_name(struct seq_file *m)
0333 {
0334     int rc, len, token;
0335     union {
0336         char raw_buffer[GET_SYS_PARM_BUF_SIZE];
0337         struct {
0338             __be16 len;
0339             char name[GET_SYS_PARM_BUF_SIZE-2];
0340         };
0341     } *local_buffer;
0342 
0343     token = rtas_token("ibm,get-system-parameter");
0344     if (token == RTAS_UNKNOWN_SERVICE)
0345         return -EINVAL;
0346 
0347     local_buffer = kmalloc(sizeof(*local_buffer), GFP_KERNEL);
0348     if (!local_buffer)
0349         return -ENOMEM;
0350 
0351     do {
0352         spin_lock(&rtas_data_buf_lock);
0353         memset(rtas_data_buf, 0, sizeof(*local_buffer));
0354         rc = rtas_call(token, 3, 1, NULL, SPLPAR_LPAR_NAME_TOKEN,
0355                    __pa(rtas_data_buf), sizeof(*local_buffer));
0356         if (!rc)
0357             memcpy(local_buffer->raw_buffer, rtas_data_buf,
0358                    sizeof(local_buffer->raw_buffer));
0359         spin_unlock(&rtas_data_buf_lock);
0360     } while (rtas_busy_delay(rc));
0361 
0362     if (!rc) {
0363         /* Force end of string */
0364         len = min((int) be16_to_cpu(local_buffer->len),
0365               (int) sizeof(local_buffer->name)-1);
0366         local_buffer->name[len] = '\0';
0367 
0368         seq_printf(m, "partition_name=%s\n", local_buffer->name);
0369     } else
0370         rc = -ENODATA;
0371 
0372     kfree(local_buffer);
0373     return rc;
0374 }
0375 
0376 /*
0377  * Read the LPAR name from the Device Tree.
0378  *
0379  * The value read in the DT is not updated if the end-user is touching the LPAR
0380  * name on the hypervisor side.
0381  */
0382 static int read_dt_lpar_name(struct seq_file *m)
0383 {
0384     const char *name;
0385 
0386     if (of_property_read_string(of_root, "ibm,partition-name", &name))
0387         return -ENOENT;
0388 
0389     seq_printf(m, "partition_name=%s\n", name);
0390     return 0;
0391 }
0392 
0393 static void read_lpar_name(struct seq_file *m)
0394 {
0395     if (read_rtas_lpar_name(m) && read_dt_lpar_name(m))
0396         pr_err_once("Error can't get the LPAR name");
0397 }
0398 
0399 #define SPLPAR_CHARACTERISTICS_TOKEN 20
0400 #define SPLPAR_MAXLENGTH 1026*(sizeof(char))
0401 
0402 /*
0403  * parse_system_parameter_string()
0404  * Retrieve the potential_processors, max_entitled_capacity and friends
0405  * through the get-system-parameter rtas call.  Replace keyword strings as
0406  * necessary.
0407  */
0408 static void parse_system_parameter_string(struct seq_file *m)
0409 {
0410     int call_status;
0411 
0412     unsigned char *local_buffer = kmalloc(SPLPAR_MAXLENGTH, GFP_KERNEL);
0413     if (!local_buffer) {
0414         printk(KERN_ERR "%s %s kmalloc failure at line %d\n",
0415                __FILE__, __func__, __LINE__);
0416         return;
0417     }
0418 
0419     spin_lock(&rtas_data_buf_lock);
0420     memset(rtas_data_buf, 0, SPLPAR_MAXLENGTH);
0421     call_status = rtas_call(rtas_token("ibm,get-system-parameter"), 3, 1,
0422                 NULL,
0423                 SPLPAR_CHARACTERISTICS_TOKEN,
0424                 __pa(rtas_data_buf),
0425                 RTAS_DATA_BUF_SIZE);
0426     memcpy(local_buffer, rtas_data_buf, SPLPAR_MAXLENGTH);
0427     local_buffer[SPLPAR_MAXLENGTH - 1] = '\0';
0428     spin_unlock(&rtas_data_buf_lock);
0429 
0430     if (call_status != 0) {
0431         printk(KERN_INFO
0432                "%s %s Error calling get-system-parameter (0x%x)\n",
0433                __FILE__, __func__, call_status);
0434     } else {
0435         int splpar_strlen;
0436         int idx, w_idx;
0437         char *workbuffer = kzalloc(SPLPAR_MAXLENGTH, GFP_KERNEL);
0438         if (!workbuffer) {
0439             printk(KERN_ERR "%s %s kmalloc failure at line %d\n",
0440                    __FILE__, __func__, __LINE__);
0441             kfree(local_buffer);
0442             return;
0443         }
0444 #ifdef LPARCFG_DEBUG
0445         printk(KERN_INFO "success calling get-system-parameter\n");
0446 #endif
0447         splpar_strlen = local_buffer[0] * 256 + local_buffer[1];
0448         local_buffer += 2;  /* step over strlen value */
0449 
0450         w_idx = 0;
0451         idx = 0;
0452         while ((*local_buffer) && (idx < splpar_strlen)) {
0453             workbuffer[w_idx++] = local_buffer[idx++];
0454             if ((local_buffer[idx] == ',')
0455                 || (local_buffer[idx] == '\0')) {
0456                 workbuffer[w_idx] = '\0';
0457                 if (w_idx) {
0458                     /* avoid the empty string */
0459                     seq_printf(m, "%s\n", workbuffer);
0460                 }
0461                 memset(workbuffer, 0, SPLPAR_MAXLENGTH);
0462                 idx++;  /* skip the comma */
0463                 w_idx = 0;
0464             } else if (local_buffer[idx] == '=') {
0465                 /* code here to replace workbuffer contents
0466                    with different keyword strings */
0467                 if (0 == strcmp(workbuffer, "MaxEntCap")) {
0468                     strcpy(workbuffer,
0469                            "partition_max_entitled_capacity");
0470                     w_idx = strlen(workbuffer);
0471                 }
0472                 if (0 == strcmp(workbuffer, "MaxPlatProcs")) {
0473                     strcpy(workbuffer,
0474                            "system_potential_processors");
0475                     w_idx = strlen(workbuffer);
0476                 }
0477             }
0478         }
0479         kfree(workbuffer);
0480         local_buffer -= 2;  /* back up over strlen value */
0481     }
0482     kfree(local_buffer);
0483 }
0484 
0485 /* Return the number of processors in the system.
0486  * This function reads through the device tree and counts
0487  * the virtual processors, this does not include threads.
0488  */
0489 static int lparcfg_count_active_processors(void)
0490 {
0491     struct device_node *cpus_dn;
0492     int count = 0;
0493 
0494     for_each_node_by_type(cpus_dn, "cpu") {
0495 #ifdef LPARCFG_DEBUG
0496         printk(KERN_ERR "cpus_dn %p\n", cpus_dn);
0497 #endif
0498         count++;
0499     }
0500     return count;
0501 }
0502 
0503 static void pseries_cmo_data(struct seq_file *m)
0504 {
0505     int cpu;
0506     unsigned long cmo_faults = 0;
0507     unsigned long cmo_fault_time = 0;
0508 
0509     seq_printf(m, "cmo_enabled=%d\n", firmware_has_feature(FW_FEATURE_CMO));
0510 
0511     if (!firmware_has_feature(FW_FEATURE_CMO))
0512         return;
0513 
0514     for_each_possible_cpu(cpu) {
0515         cmo_faults += be64_to_cpu(lppaca_of(cpu).cmo_faults);
0516         cmo_fault_time += be64_to_cpu(lppaca_of(cpu).cmo_fault_time);
0517     }
0518 
0519     seq_printf(m, "cmo_faults=%lu\n", cmo_faults);
0520     seq_printf(m, "cmo_fault_time_usec=%lu\n",
0521            cmo_fault_time / tb_ticks_per_usec);
0522     seq_printf(m, "cmo_primary_psp=%d\n", cmo_get_primary_psp());
0523     seq_printf(m, "cmo_secondary_psp=%d\n", cmo_get_secondary_psp());
0524     seq_printf(m, "cmo_page_size=%lu\n", cmo_get_page_size());
0525 }
0526 
0527 static void splpar_dispatch_data(struct seq_file *m)
0528 {
0529     int cpu;
0530     unsigned long dispatches = 0;
0531     unsigned long dispatch_dispersions = 0;
0532 
0533     for_each_possible_cpu(cpu) {
0534         dispatches += be32_to_cpu(lppaca_of(cpu).yield_count);
0535         dispatch_dispersions +=
0536             be32_to_cpu(lppaca_of(cpu).dispersion_count);
0537     }
0538 
0539     seq_printf(m, "dispatches=%lu\n", dispatches);
0540     seq_printf(m, "dispatch_dispersions=%lu\n", dispatch_dispersions);
0541 }
0542 
0543 static void parse_em_data(struct seq_file *m)
0544 {
0545     unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
0546 
0547     if (firmware_has_feature(FW_FEATURE_LPAR) &&
0548         plpar_hcall(H_GET_EM_PARMS, retbuf) == H_SUCCESS)
0549         seq_printf(m, "power_mode_data=%016lx\n", retbuf[0]);
0550 }
0551 
0552 static void maxmem_data(struct seq_file *m)
0553 {
0554     unsigned long maxmem = 0;
0555 
0556     maxmem += (unsigned long)drmem_info->n_lmbs * drmem_info->lmb_size;
0557     maxmem += hugetlb_total_pages() * PAGE_SIZE;
0558 
0559     seq_printf(m, "MaxMem=%lu\n", maxmem);
0560 }
0561 
0562 static int pseries_lparcfg_data(struct seq_file *m, void *v)
0563 {
0564     int partition_potential_processors;
0565     int partition_active_processors;
0566     struct device_node *rtas_node;
0567     const __be32 *lrdrp = NULL;
0568 
0569     rtas_node = of_find_node_by_path("/rtas");
0570     if (rtas_node)
0571         lrdrp = of_get_property(rtas_node, "ibm,lrdr-capacity", NULL);
0572 
0573     if (lrdrp == NULL) {
0574         partition_potential_processors = vdso_data->processorCount;
0575     } else {
0576         partition_potential_processors = be32_to_cpup(lrdrp + 4);
0577     }
0578     of_node_put(rtas_node);
0579 
0580     partition_active_processors = lparcfg_count_active_processors();
0581 
0582     if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
0583         /* this call handles the ibm,get-system-parameter contents */
0584         read_lpar_name(m);
0585         parse_system_parameter_string(m);
0586         parse_ppp_data(m);
0587         parse_mpp_data(m);
0588         parse_mpp_x_data(m);
0589         pseries_cmo_data(m);
0590         splpar_dispatch_data(m);
0591 
0592         seq_printf(m, "purr=%ld\n", get_purr());
0593         seq_printf(m, "tbr=%ld\n", mftb());
0594     } else {        /* non SPLPAR case */
0595 
0596         seq_printf(m, "system_active_processors=%d\n",
0597                partition_potential_processors);
0598 
0599         seq_printf(m, "system_potential_processors=%d\n",
0600                partition_potential_processors);
0601 
0602         seq_printf(m, "partition_max_entitled_capacity=%d\n",
0603                partition_potential_processors * 100);
0604 
0605         seq_printf(m, "partition_entitled_capacity=%d\n",
0606                partition_active_processors * 100);
0607     }
0608 
0609     show_gpci_data(m);
0610 
0611     seq_printf(m, "partition_active_processors=%d\n",
0612            partition_active_processors);
0613 
0614     seq_printf(m, "partition_potential_processors=%d\n",
0615            partition_potential_processors);
0616 
0617     seq_printf(m, "shared_processor_mode=%d\n",
0618            lppaca_shared_proc(get_lppaca()));
0619 
0620 #ifdef CONFIG_PPC_64S_HASH_MMU
0621     if (!radix_enabled())
0622         seq_printf(m, "slb_size=%d\n", mmu_slb_size);
0623 #endif
0624     parse_em_data(m);
0625     maxmem_data(m);
0626 
0627     seq_printf(m, "security_flavor=%u\n", pseries_security_flavor);
0628 
0629     return 0;
0630 }
0631 
0632 static ssize_t update_ppp(u64 *entitlement, u8 *weight)
0633 {
0634     struct hvcall_ppp_data ppp_data;
0635     u8 new_weight;
0636     u64 new_entitled;
0637     ssize_t retval;
0638 
0639     /* Get our current parameters */
0640     retval = h_get_ppp(&ppp_data);
0641     if (retval)
0642         return retval;
0643 
0644     if (entitlement) {
0645         new_weight = ppp_data.weight;
0646         new_entitled = *entitlement;
0647     } else if (weight) {
0648         new_weight = *weight;
0649         new_entitled = ppp_data.entitlement;
0650     } else
0651         return -EINVAL;
0652 
0653     pr_debug("%s: current_entitled = %llu, current_weight = %u\n",
0654          __func__, ppp_data.entitlement, ppp_data.weight);
0655 
0656     pr_debug("%s: new_entitled = %llu, new_weight = %u\n",
0657          __func__, new_entitled, new_weight);
0658 
0659     retval = plpar_hcall_norets(H_SET_PPP, new_entitled, new_weight);
0660     return retval;
0661 }
0662 
0663 /**
0664  * update_mpp
0665  *
0666  * Update the memory entitlement and weight for the partition.  Caller must
0667  * specify either a new entitlement or weight, not both, to be updated
0668  * since the h_set_mpp call takes both entitlement and weight as parameters.
0669  */
0670 static ssize_t update_mpp(u64 *entitlement, u8 *weight)
0671 {
0672     struct hvcall_mpp_data mpp_data;
0673     u64 new_entitled;
0674     u8 new_weight;
0675     ssize_t rc;
0676 
0677     if (entitlement) {
0678         /* Check with vio to ensure the new memory entitlement
0679          * can be handled.
0680          */
0681         rc = vio_cmo_entitlement_update(*entitlement);
0682         if (rc)
0683             return rc;
0684     }
0685 
0686     rc = h_get_mpp(&mpp_data);
0687     if (rc)
0688         return rc;
0689 
0690     if (entitlement) {
0691         new_weight = mpp_data.mem_weight;
0692         new_entitled = *entitlement;
0693     } else if (weight) {
0694         new_weight = *weight;
0695         new_entitled = mpp_data.entitled_mem;
0696     } else
0697         return -EINVAL;
0698 
0699     pr_debug("%s: current_entitled = %lu, current_weight = %u\n",
0700              __func__, mpp_data.entitled_mem, mpp_data.mem_weight);
0701 
0702     pr_debug("%s: new_entitled = %llu, new_weight = %u\n",
0703          __func__, new_entitled, new_weight);
0704 
0705     rc = plpar_hcall_norets(H_SET_MPP, new_entitled, new_weight);
0706     return rc;
0707 }
0708 
0709 /*
0710  * Interface for changing system parameters (variable capacity weight
0711  * and entitled capacity).  Format of input is "param_name=value";
0712  * anything after value is ignored.  Valid parameters at this time are
0713  * "partition_entitled_capacity" and "capacity_weight".  We use
0714  * H_SET_PPP to alter parameters.
0715  *
0716  * This function should be invoked only on systems with
0717  * FW_FEATURE_SPLPAR.
0718  */
0719 static ssize_t lparcfg_write(struct file *file, const char __user * buf,
0720                  size_t count, loff_t * off)
0721 {
0722     char kbuf[64];
0723     char *tmp;
0724     u64 new_entitled, *new_entitled_ptr = &new_entitled;
0725     u8 new_weight, *new_weight_ptr = &new_weight;
0726     ssize_t retval;
0727 
0728     if (!firmware_has_feature(FW_FEATURE_SPLPAR))
0729         return -EINVAL;
0730 
0731     if (count > sizeof(kbuf))
0732         return -EINVAL;
0733 
0734     if (copy_from_user(kbuf, buf, count))
0735         return -EFAULT;
0736 
0737     kbuf[count - 1] = '\0';
0738     tmp = strchr(kbuf, '=');
0739     if (!tmp)
0740         return -EINVAL;
0741 
0742     *tmp++ = '\0';
0743 
0744     if (!strcmp(kbuf, "partition_entitled_capacity")) {
0745         char *endp;
0746         *new_entitled_ptr = (u64) simple_strtoul(tmp, &endp, 10);
0747         if (endp == tmp)
0748             return -EINVAL;
0749 
0750         retval = update_ppp(new_entitled_ptr, NULL);
0751     } else if (!strcmp(kbuf, "capacity_weight")) {
0752         char *endp;
0753         *new_weight_ptr = (u8) simple_strtoul(tmp, &endp, 10);
0754         if (endp == tmp)
0755             return -EINVAL;
0756 
0757         retval = update_ppp(NULL, new_weight_ptr);
0758     } else if (!strcmp(kbuf, "entitled_memory")) {
0759         char *endp;
0760         *new_entitled_ptr = (u64) simple_strtoul(tmp, &endp, 10);
0761         if (endp == tmp)
0762             return -EINVAL;
0763 
0764         retval = update_mpp(new_entitled_ptr, NULL);
0765     } else if (!strcmp(kbuf, "entitled_memory_weight")) {
0766         char *endp;
0767         *new_weight_ptr = (u8) simple_strtoul(tmp, &endp, 10);
0768         if (endp == tmp)
0769             return -EINVAL;
0770 
0771         retval = update_mpp(NULL, new_weight_ptr);
0772     } else
0773         return -EINVAL;
0774 
0775     if (retval == H_SUCCESS || retval == H_CONSTRAINED) {
0776         retval = count;
0777     } else if (retval == H_BUSY) {
0778         retval = -EBUSY;
0779     } else if (retval == H_HARDWARE) {
0780         retval = -EIO;
0781     } else if (retval == H_PARAMETER) {
0782         retval = -EINVAL;
0783     }
0784 
0785     return retval;
0786 }
0787 
0788 static int lparcfg_data(struct seq_file *m, void *v)
0789 {
0790     struct device_node *rootdn;
0791     const char *model = "";
0792     const char *system_id = "";
0793     const char *tmp;
0794     const __be32 *lp_index_ptr;
0795     unsigned int lp_index = 0;
0796 
0797     seq_printf(m, "%s %s\n", MODULE_NAME, MODULE_VERS);
0798 
0799     rootdn = of_find_node_by_path("/");
0800     if (rootdn) {
0801         tmp = of_get_property(rootdn, "model", NULL);
0802         if (tmp)
0803             model = tmp;
0804         tmp = of_get_property(rootdn, "system-id", NULL);
0805         if (tmp)
0806             system_id = tmp;
0807         lp_index_ptr = of_get_property(rootdn, "ibm,partition-no",
0808                     NULL);
0809         if (lp_index_ptr)
0810             lp_index = be32_to_cpup(lp_index_ptr);
0811         of_node_put(rootdn);
0812     }
0813     seq_printf(m, "serial_number=%s\n", system_id);
0814     seq_printf(m, "system_type=%s\n", model);
0815     seq_printf(m, "partition_id=%d\n", (int)lp_index);
0816 
0817     return pseries_lparcfg_data(m, v);
0818 }
0819 
0820 static int lparcfg_open(struct inode *inode, struct file *file)
0821 {
0822     return single_open(file, lparcfg_data, NULL);
0823 }
0824 
0825 static const struct proc_ops lparcfg_proc_ops = {
0826     .proc_read  = seq_read,
0827     .proc_write = lparcfg_write,
0828     .proc_open  = lparcfg_open,
0829     .proc_release   = single_release,
0830     .proc_lseek = seq_lseek,
0831 };
0832 
0833 static int __init lparcfg_init(void)
0834 {
0835     umode_t mode = 0444;
0836 
0837     /* Allow writing if we have FW_FEATURE_SPLPAR */
0838     if (firmware_has_feature(FW_FEATURE_SPLPAR))
0839         mode |= 0200;
0840 
0841     if (!proc_create("powerpc/lparcfg", mode, NULL, &lparcfg_proc_ops)) {
0842         printk(KERN_ERR "Failed to create powerpc/lparcfg\n");
0843         return -EIO;
0844     }
0845     return 0;
0846 }
0847 machine_device_initcall(pseries, lparcfg_init);