0001
0002
0003
0004
0005
0006
0007 #include <linux/kernel.h>
0008 #include <linux/module.h>
0009 #include <linux/types.h>
0010 #include <linux/string.h>
0011 #include <linux/slab.h>
0012 #include <linux/sched.h>
0013 #include <linux/sched/clock.h>
0014 #include <linux/delay.h>
0015 #include <linux/mutex.h>
0016 #include <linux/kthread.h>
0017 #include <linux/reboot.h>
0018 #include <linux/cpu.h>
0019
0020 #include <asm/hypervisor.h>
0021 #include <asm/ldc.h>
0022 #include <asm/vio.h>
0023 #include <asm/mdesc.h>
0024 #include <asm/head.h>
0025 #include <asm/irq.h>
0026
0027 #include "kernel.h"
0028
0029 #define DRV_MODULE_NAME "ds"
0030 #define PFX DRV_MODULE_NAME ": "
0031 #define DRV_MODULE_VERSION "1.0"
0032 #define DRV_MODULE_RELDATE "Jul 11, 2007"
0033
0034 static char version[] =
0035 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
0036 MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
0037 MODULE_DESCRIPTION("Sun LDOM domain services driver");
0038 MODULE_LICENSE("GPL");
0039 MODULE_VERSION(DRV_MODULE_VERSION);
0040
0041 struct ds_msg_tag {
0042 __u32 type;
0043 #define DS_INIT_REQ 0x00
0044 #define DS_INIT_ACK 0x01
0045 #define DS_INIT_NACK 0x02
0046 #define DS_REG_REQ 0x03
0047 #define DS_REG_ACK 0x04
0048 #define DS_REG_NACK 0x05
0049 #define DS_UNREG_REQ 0x06
0050 #define DS_UNREG_ACK 0x07
0051 #define DS_UNREG_NACK 0x08
0052 #define DS_DATA 0x09
0053 #define DS_NACK 0x0a
0054
0055 __u32 len;
0056 };
0057
0058
0059 #define DS_OK 0x00
0060 #define DS_REG_VER_NACK 0x01
0061 #define DS_REG_DUP 0x02
0062 #define DS_INV_HDL 0x03
0063 #define DS_TYPE_UNKNOWN 0x04
0064
0065 struct ds_version {
0066 __u16 major;
0067 __u16 minor;
0068 };
0069
0070 struct ds_ver_req {
0071 struct ds_msg_tag tag;
0072 struct ds_version ver;
0073 };
0074
0075 struct ds_ver_ack {
0076 struct ds_msg_tag tag;
0077 __u16 minor;
0078 };
0079
0080 struct ds_ver_nack {
0081 struct ds_msg_tag tag;
0082 __u16 major;
0083 };
0084
0085 struct ds_reg_req {
0086 struct ds_msg_tag tag;
0087 __u64 handle;
0088 __u16 major;
0089 __u16 minor;
0090 char svc_id[];
0091 };
0092
0093 struct ds_reg_ack {
0094 struct ds_msg_tag tag;
0095 __u64 handle;
0096 __u16 minor;
0097 };
0098
0099 struct ds_reg_nack {
0100 struct ds_msg_tag tag;
0101 __u64 handle;
0102 __u16 major;
0103 };
0104
0105 struct ds_unreg_req {
0106 struct ds_msg_tag tag;
0107 __u64 handle;
0108 };
0109
0110 struct ds_unreg_ack {
0111 struct ds_msg_tag tag;
0112 __u64 handle;
0113 };
0114
0115 struct ds_unreg_nack {
0116 struct ds_msg_tag tag;
0117 __u64 handle;
0118 };
0119
0120 struct ds_data {
0121 struct ds_msg_tag tag;
0122 __u64 handle;
0123 };
0124
0125 struct ds_data_nack {
0126 struct ds_msg_tag tag;
0127 __u64 handle;
0128 __u64 result;
0129 };
0130
0131 struct ds_info;
0132 struct ds_cap_state {
0133 __u64 handle;
0134
0135 void (*data)(struct ds_info *dp,
0136 struct ds_cap_state *cp,
0137 void *buf, int len);
0138
0139 const char *service_id;
0140
0141 u8 state;
0142 #define CAP_STATE_UNKNOWN 0x00
0143 #define CAP_STATE_REG_SENT 0x01
0144 #define CAP_STATE_REGISTERED 0x02
0145 };
0146
0147 static void md_update_data(struct ds_info *dp, struct ds_cap_state *cp,
0148 void *buf, int len);
0149 static void domain_shutdown_data(struct ds_info *dp,
0150 struct ds_cap_state *cp,
0151 void *buf, int len);
0152 static void domain_panic_data(struct ds_info *dp,
0153 struct ds_cap_state *cp,
0154 void *buf, int len);
0155 #ifdef CONFIG_HOTPLUG_CPU
0156 static void dr_cpu_data(struct ds_info *dp,
0157 struct ds_cap_state *cp,
0158 void *buf, int len);
0159 #endif
0160 static void ds_pri_data(struct ds_info *dp,
0161 struct ds_cap_state *cp,
0162 void *buf, int len);
0163 static void ds_var_data(struct ds_info *dp,
0164 struct ds_cap_state *cp,
0165 void *buf, int len);
0166
0167 static struct ds_cap_state ds_states_template[] = {
0168 {
0169 .service_id = "md-update",
0170 .data = md_update_data,
0171 },
0172 {
0173 .service_id = "domain-shutdown",
0174 .data = domain_shutdown_data,
0175 },
0176 {
0177 .service_id = "domain-panic",
0178 .data = domain_panic_data,
0179 },
0180 #ifdef CONFIG_HOTPLUG_CPU
0181 {
0182 .service_id = "dr-cpu",
0183 .data = dr_cpu_data,
0184 },
0185 #endif
0186 {
0187 .service_id = "pri",
0188 .data = ds_pri_data,
0189 },
0190 {
0191 .service_id = "var-config",
0192 .data = ds_var_data,
0193 },
0194 {
0195 .service_id = "var-config-backup",
0196 .data = ds_var_data,
0197 },
0198 };
0199
0200 static DEFINE_SPINLOCK(ds_lock);
0201
0202 struct ds_info {
0203 struct ldc_channel *lp;
0204 u8 hs_state;
0205 #define DS_HS_START 0x01
0206 #define DS_HS_DONE 0x02
0207
0208 u64 id;
0209
0210 void *rcv_buf;
0211 int rcv_buf_len;
0212
0213 struct ds_cap_state *ds_states;
0214 int num_ds_states;
0215
0216 struct ds_info *next;
0217 };
0218
0219 static struct ds_info *ds_info_list;
0220
0221 static struct ds_cap_state *find_cap(struct ds_info *dp, u64 handle)
0222 {
0223 unsigned int index = handle >> 32;
0224
0225 if (index >= dp->num_ds_states)
0226 return NULL;
0227 return &dp->ds_states[index];
0228 }
0229
0230 static struct ds_cap_state *find_cap_by_string(struct ds_info *dp,
0231 const char *name)
0232 {
0233 int i;
0234
0235 for (i = 0; i < dp->num_ds_states; i++) {
0236 if (strcmp(dp->ds_states[i].service_id, name))
0237 continue;
0238
0239 return &dp->ds_states[i];
0240 }
0241 return NULL;
0242 }
0243
0244 static int __ds_send(struct ldc_channel *lp, void *data, int len)
0245 {
0246 int err, limit = 1000;
0247
0248 err = -EINVAL;
0249 while (limit-- > 0) {
0250 err = ldc_write(lp, data, len);
0251 if (!err || (err != -EAGAIN))
0252 break;
0253 udelay(1);
0254 }
0255
0256 return err;
0257 }
0258
0259 static int ds_send(struct ldc_channel *lp, void *data, int len)
0260 {
0261 unsigned long flags;
0262 int err;
0263
0264 spin_lock_irqsave(&ds_lock, flags);
0265 err = __ds_send(lp, data, len);
0266 spin_unlock_irqrestore(&ds_lock, flags);
0267
0268 return err;
0269 }
0270
0271 struct ds_md_update_req {
0272 __u64 req_num;
0273 };
0274
0275 struct ds_md_update_res {
0276 __u64 req_num;
0277 __u32 result;
0278 };
0279
0280 static void md_update_data(struct ds_info *dp,
0281 struct ds_cap_state *cp,
0282 void *buf, int len)
0283 {
0284 struct ldc_channel *lp = dp->lp;
0285 struct ds_data *dpkt = buf;
0286 struct ds_md_update_req *rp;
0287 struct {
0288 struct ds_data data;
0289 struct ds_md_update_res res;
0290 } pkt;
0291
0292 rp = (struct ds_md_update_req *) (dpkt + 1);
0293
0294 printk(KERN_INFO "ds-%llu: Machine description update.\n", dp->id);
0295
0296 mdesc_update();
0297
0298 memset(&pkt, 0, sizeof(pkt));
0299 pkt.data.tag.type = DS_DATA;
0300 pkt.data.tag.len = sizeof(pkt) - sizeof(struct ds_msg_tag);
0301 pkt.data.handle = cp->handle;
0302 pkt.res.req_num = rp->req_num;
0303 pkt.res.result = DS_OK;
0304
0305 ds_send(lp, &pkt, sizeof(pkt));
0306 }
0307
0308 struct ds_shutdown_req {
0309 __u64 req_num;
0310 __u32 ms_delay;
0311 };
0312
0313 struct ds_shutdown_res {
0314 __u64 req_num;
0315 __u32 result;
0316 char reason[1];
0317 };
0318
0319 static void domain_shutdown_data(struct ds_info *dp,
0320 struct ds_cap_state *cp,
0321 void *buf, int len)
0322 {
0323 struct ldc_channel *lp = dp->lp;
0324 struct ds_data *dpkt = buf;
0325 struct ds_shutdown_req *rp;
0326 struct {
0327 struct ds_data data;
0328 struct ds_shutdown_res res;
0329 } pkt;
0330
0331 rp = (struct ds_shutdown_req *) (dpkt + 1);
0332
0333 printk(KERN_ALERT "ds-%llu: Shutdown request from "
0334 "LDOM manager received.\n", dp->id);
0335
0336 memset(&pkt, 0, sizeof(pkt));
0337 pkt.data.tag.type = DS_DATA;
0338 pkt.data.tag.len = sizeof(pkt) - sizeof(struct ds_msg_tag);
0339 pkt.data.handle = cp->handle;
0340 pkt.res.req_num = rp->req_num;
0341 pkt.res.result = DS_OK;
0342 pkt.res.reason[0] = 0;
0343
0344 ds_send(lp, &pkt, sizeof(pkt));
0345
0346 orderly_poweroff(true);
0347 }
0348
0349 struct ds_panic_req {
0350 __u64 req_num;
0351 };
0352
0353 struct ds_panic_res {
0354 __u64 req_num;
0355 __u32 result;
0356 char reason[1];
0357 };
0358
0359 static void domain_panic_data(struct ds_info *dp,
0360 struct ds_cap_state *cp,
0361 void *buf, int len)
0362 {
0363 struct ldc_channel *lp = dp->lp;
0364 struct ds_data *dpkt = buf;
0365 struct ds_panic_req *rp;
0366 struct {
0367 struct ds_data data;
0368 struct ds_panic_res res;
0369 } pkt;
0370
0371 rp = (struct ds_panic_req *) (dpkt + 1);
0372
0373 printk(KERN_ALERT "ds-%llu: Panic request from "
0374 "LDOM manager received.\n", dp->id);
0375
0376 memset(&pkt, 0, sizeof(pkt));
0377 pkt.data.tag.type = DS_DATA;
0378 pkt.data.tag.len = sizeof(pkt) - sizeof(struct ds_msg_tag);
0379 pkt.data.handle = cp->handle;
0380 pkt.res.req_num = rp->req_num;
0381 pkt.res.result = DS_OK;
0382 pkt.res.reason[0] = 0;
0383
0384 ds_send(lp, &pkt, sizeof(pkt));
0385
0386 panic("PANIC requested by LDOM manager.");
0387 }
0388
0389 #ifdef CONFIG_HOTPLUG_CPU
0390 struct dr_cpu_tag {
0391 __u64 req_num;
0392 __u32 type;
0393 #define DR_CPU_CONFIGURE 0x43
0394 #define DR_CPU_UNCONFIGURE 0x55
0395 #define DR_CPU_FORCE_UNCONFIGURE 0x46
0396 #define DR_CPU_STATUS 0x53
0397
0398
0399 #define DR_CPU_OK 0x6f
0400 #define DR_CPU_ERROR 0x65
0401
0402 __u32 num_records;
0403 };
0404
0405 struct dr_cpu_resp_entry {
0406 __u32 cpu;
0407 __u32 result;
0408 #define DR_CPU_RES_OK 0x00
0409 #define DR_CPU_RES_FAILURE 0x01
0410 #define DR_CPU_RES_BLOCKED 0x02
0411 #define DR_CPU_RES_CPU_NOT_RESPONDING 0x03
0412 #define DR_CPU_RES_NOT_IN_MD 0x04
0413
0414 __u32 stat;
0415 #define DR_CPU_STAT_NOT_PRESENT 0x00
0416 #define DR_CPU_STAT_UNCONFIGURED 0x01
0417 #define DR_CPU_STAT_CONFIGURED 0x02
0418
0419 __u32 str_off;
0420 };
0421
0422 static void __dr_cpu_send_error(struct ds_info *dp,
0423 struct ds_cap_state *cp,
0424 struct ds_data *data)
0425 {
0426 struct dr_cpu_tag *tag = (struct dr_cpu_tag *) (data + 1);
0427 struct {
0428 struct ds_data data;
0429 struct dr_cpu_tag tag;
0430 } pkt;
0431 int msg_len;
0432
0433 memset(&pkt, 0, sizeof(pkt));
0434 pkt.data.tag.type = DS_DATA;
0435 pkt.data.handle = cp->handle;
0436 pkt.tag.req_num = tag->req_num;
0437 pkt.tag.type = DR_CPU_ERROR;
0438 pkt.tag.num_records = 0;
0439
0440 msg_len = (sizeof(struct ds_data) +
0441 sizeof(struct dr_cpu_tag));
0442
0443 pkt.data.tag.len = msg_len - sizeof(struct ds_msg_tag);
0444
0445 __ds_send(dp->lp, &pkt, msg_len);
0446 }
0447
0448 static void dr_cpu_send_error(struct ds_info *dp,
0449 struct ds_cap_state *cp,
0450 struct ds_data *data)
0451 {
0452 unsigned long flags;
0453
0454 spin_lock_irqsave(&ds_lock, flags);
0455 __dr_cpu_send_error(dp, cp, data);
0456 spin_unlock_irqrestore(&ds_lock, flags);
0457 }
0458
0459 #define CPU_SENTINEL 0xffffffff
0460
0461 static void purge_dups(u32 *list, u32 num_ents)
0462 {
0463 unsigned int i;
0464
0465 for (i = 0; i < num_ents; i++) {
0466 u32 cpu = list[i];
0467 unsigned int j;
0468
0469 if (cpu == CPU_SENTINEL)
0470 continue;
0471
0472 for (j = i + 1; j < num_ents; j++) {
0473 if (list[j] == cpu)
0474 list[j] = CPU_SENTINEL;
0475 }
0476 }
0477 }
0478
0479 static int dr_cpu_size_response(int ncpus)
0480 {
0481 return (sizeof(struct ds_data) +
0482 sizeof(struct dr_cpu_tag) +
0483 (sizeof(struct dr_cpu_resp_entry) * ncpus));
0484 }
0485
0486 static void dr_cpu_init_response(struct ds_data *resp, u64 req_num,
0487 u64 handle, int resp_len, int ncpus,
0488 cpumask_t *mask, u32 default_stat)
0489 {
0490 struct dr_cpu_resp_entry *ent;
0491 struct dr_cpu_tag *tag;
0492 int i, cpu;
0493
0494 tag = (struct dr_cpu_tag *) (resp + 1);
0495 ent = (struct dr_cpu_resp_entry *) (tag + 1);
0496
0497 resp->tag.type = DS_DATA;
0498 resp->tag.len = resp_len - sizeof(struct ds_msg_tag);
0499 resp->handle = handle;
0500 tag->req_num = req_num;
0501 tag->type = DR_CPU_OK;
0502 tag->num_records = ncpus;
0503
0504 i = 0;
0505 for_each_cpu(cpu, mask) {
0506 ent[i].cpu = cpu;
0507 ent[i].result = DR_CPU_RES_OK;
0508 ent[i].stat = default_stat;
0509 i++;
0510 }
0511 BUG_ON(i != ncpus);
0512 }
0513
0514 static void dr_cpu_mark(struct ds_data *resp, int cpu, int ncpus,
0515 u32 res, u32 stat)
0516 {
0517 struct dr_cpu_resp_entry *ent;
0518 struct dr_cpu_tag *tag;
0519 int i;
0520
0521 tag = (struct dr_cpu_tag *) (resp + 1);
0522 ent = (struct dr_cpu_resp_entry *) (tag + 1);
0523
0524 for (i = 0; i < ncpus; i++) {
0525 if (ent[i].cpu != cpu)
0526 continue;
0527 ent[i].result = res;
0528 ent[i].stat = stat;
0529 break;
0530 }
0531 }
0532
0533 static int dr_cpu_configure(struct ds_info *dp, struct ds_cap_state *cp,
0534 u64 req_num, cpumask_t *mask)
0535 {
0536 struct ds_data *resp;
0537 int resp_len, ncpus, cpu;
0538 unsigned long flags;
0539
0540 ncpus = cpumask_weight(mask);
0541 resp_len = dr_cpu_size_response(ncpus);
0542 resp = kzalloc(resp_len, GFP_KERNEL);
0543 if (!resp)
0544 return -ENOMEM;
0545
0546 dr_cpu_init_response(resp, req_num, cp->handle,
0547 resp_len, ncpus, mask,
0548 DR_CPU_STAT_CONFIGURED);
0549
0550 mdesc_populate_present_mask(mask);
0551 mdesc_fill_in_cpu_data(mask);
0552
0553 for_each_cpu(cpu, mask) {
0554 int err;
0555
0556 printk(KERN_INFO "ds-%llu: Starting cpu %d...\n",
0557 dp->id, cpu);
0558 err = add_cpu(cpu);
0559 if (err) {
0560 __u32 res = DR_CPU_RES_FAILURE;
0561 __u32 stat = DR_CPU_STAT_UNCONFIGURED;
0562
0563 if (!cpu_present(cpu)) {
0564
0565 res = DR_CPU_RES_NOT_IN_MD;
0566 stat = DR_CPU_STAT_NOT_PRESENT;
0567 } else if (err == -ENODEV) {
0568
0569 res = DR_CPU_RES_CPU_NOT_RESPONDING;
0570 }
0571
0572 printk(KERN_INFO "ds-%llu: CPU startup failed err=%d\n",
0573 dp->id, err);
0574 dr_cpu_mark(resp, cpu, ncpus, res, stat);
0575 }
0576 }
0577
0578 spin_lock_irqsave(&ds_lock, flags);
0579 __ds_send(dp->lp, resp, resp_len);
0580 spin_unlock_irqrestore(&ds_lock, flags);
0581
0582 kfree(resp);
0583
0584
0585 fixup_irqs();
0586
0587 return 0;
0588 }
0589
0590 static int dr_cpu_unconfigure(struct ds_info *dp,
0591 struct ds_cap_state *cp,
0592 u64 req_num,
0593 cpumask_t *mask)
0594 {
0595 struct ds_data *resp;
0596 int resp_len, ncpus, cpu;
0597 unsigned long flags;
0598
0599 ncpus = cpumask_weight(mask);
0600 resp_len = dr_cpu_size_response(ncpus);
0601 resp = kzalloc(resp_len, GFP_KERNEL);
0602 if (!resp)
0603 return -ENOMEM;
0604
0605 dr_cpu_init_response(resp, req_num, cp->handle,
0606 resp_len, ncpus, mask,
0607 DR_CPU_STAT_UNCONFIGURED);
0608
0609 for_each_cpu(cpu, mask) {
0610 int err;
0611
0612 printk(KERN_INFO "ds-%llu: Shutting down cpu %d...\n",
0613 dp->id, cpu);
0614 err = remove_cpu(cpu);
0615 if (err)
0616 dr_cpu_mark(resp, cpu, ncpus,
0617 DR_CPU_RES_FAILURE,
0618 DR_CPU_STAT_CONFIGURED);
0619 }
0620
0621 spin_lock_irqsave(&ds_lock, flags);
0622 __ds_send(dp->lp, resp, resp_len);
0623 spin_unlock_irqrestore(&ds_lock, flags);
0624
0625 kfree(resp);
0626
0627 return 0;
0628 }
0629
0630 static void dr_cpu_data(struct ds_info *dp, struct ds_cap_state *cp, void *buf,
0631 int len)
0632 {
0633 struct ds_data *data = buf;
0634 struct dr_cpu_tag *tag = (struct dr_cpu_tag *) (data + 1);
0635 u32 *cpu_list = (u32 *) (tag + 1);
0636 u64 req_num = tag->req_num;
0637 cpumask_t mask;
0638 unsigned int i;
0639 int err;
0640
0641 switch (tag->type) {
0642 case DR_CPU_CONFIGURE:
0643 case DR_CPU_UNCONFIGURE:
0644 case DR_CPU_FORCE_UNCONFIGURE:
0645 break;
0646
0647 default:
0648 dr_cpu_send_error(dp, cp, data);
0649 return;
0650 }
0651
0652 purge_dups(cpu_list, tag->num_records);
0653
0654 cpumask_clear(&mask);
0655 for (i = 0; i < tag->num_records; i++) {
0656 if (cpu_list[i] == CPU_SENTINEL)
0657 continue;
0658
0659 if (cpu_list[i] < nr_cpu_ids)
0660 cpumask_set_cpu(cpu_list[i], &mask);
0661 }
0662
0663 if (tag->type == DR_CPU_CONFIGURE)
0664 err = dr_cpu_configure(dp, cp, req_num, &mask);
0665 else
0666 err = dr_cpu_unconfigure(dp, cp, req_num, &mask);
0667
0668 if (err)
0669 dr_cpu_send_error(dp, cp, data);
0670 }
0671 #endif
0672
0673 struct ds_pri_msg {
0674 __u64 req_num;
0675 __u64 type;
0676 #define DS_PRI_REQUEST 0x00
0677 #define DS_PRI_DATA 0x01
0678 #define DS_PRI_UPDATE 0x02
0679 };
0680
0681 static void ds_pri_data(struct ds_info *dp,
0682 struct ds_cap_state *cp,
0683 void *buf, int len)
0684 {
0685 struct ds_data *dpkt = buf;
0686 struct ds_pri_msg *rp;
0687
0688 rp = (struct ds_pri_msg *) (dpkt + 1);
0689
0690 printk(KERN_INFO "ds-%llu: PRI REQ [%llx:%llx], len=%d\n",
0691 dp->id, rp->req_num, rp->type, len);
0692 }
0693
0694 struct ds_var_hdr {
0695 __u32 type;
0696 #define DS_VAR_SET_REQ 0x00
0697 #define DS_VAR_DELETE_REQ 0x01
0698 #define DS_VAR_SET_RESP 0x02
0699 #define DS_VAR_DELETE_RESP 0x03
0700 };
0701
0702 struct ds_var_set_msg {
0703 struct ds_var_hdr hdr;
0704 char name_and_value[];
0705 };
0706
0707 struct ds_var_delete_msg {
0708 struct ds_var_hdr hdr;
0709 char name[];
0710 };
0711
0712 struct ds_var_resp {
0713 struct ds_var_hdr hdr;
0714 __u32 result;
0715 #define DS_VAR_SUCCESS 0x00
0716 #define DS_VAR_NO_SPACE 0x01
0717 #define DS_VAR_INVALID_VAR 0x02
0718 #define DS_VAR_INVALID_VAL 0x03
0719 #define DS_VAR_NOT_PRESENT 0x04
0720 };
0721
0722 static DEFINE_MUTEX(ds_var_mutex);
0723 static int ds_var_doorbell;
0724 static int ds_var_response;
0725
0726 static void ds_var_data(struct ds_info *dp,
0727 struct ds_cap_state *cp,
0728 void *buf, int len)
0729 {
0730 struct ds_data *dpkt = buf;
0731 struct ds_var_resp *rp;
0732
0733 rp = (struct ds_var_resp *) (dpkt + 1);
0734
0735 if (rp->hdr.type != DS_VAR_SET_RESP &&
0736 rp->hdr.type != DS_VAR_DELETE_RESP)
0737 return;
0738
0739 ds_var_response = rp->result;
0740 wmb();
0741 ds_var_doorbell = 1;
0742 }
0743
0744 void ldom_set_var(const char *var, const char *value)
0745 {
0746 struct ds_cap_state *cp;
0747 struct ds_info *dp;
0748 unsigned long flags;
0749
0750 spin_lock_irqsave(&ds_lock, flags);
0751 cp = NULL;
0752 for (dp = ds_info_list; dp; dp = dp->next) {
0753 struct ds_cap_state *tmp;
0754
0755 tmp = find_cap_by_string(dp, "var-config");
0756 if (tmp && tmp->state == CAP_STATE_REGISTERED) {
0757 cp = tmp;
0758 break;
0759 }
0760 }
0761 if (!cp) {
0762 for (dp = ds_info_list; dp; dp = dp->next) {
0763 struct ds_cap_state *tmp;
0764
0765 tmp = find_cap_by_string(dp, "var-config-backup");
0766 if (tmp && tmp->state == CAP_STATE_REGISTERED) {
0767 cp = tmp;
0768 break;
0769 }
0770 }
0771 }
0772 spin_unlock_irqrestore(&ds_lock, flags);
0773
0774 if (cp) {
0775 union {
0776 struct {
0777 struct ds_data data;
0778 struct ds_var_set_msg msg;
0779 } header;
0780 char all[512];
0781 } pkt;
0782 char *base, *p;
0783 int msg_len, loops;
0784
0785 if (strlen(var) + strlen(value) + 2 >
0786 sizeof(pkt) - sizeof(pkt.header)) {
0787 printk(KERN_ERR PFX
0788 "contents length: %zu, which more than max: %lu,"
0789 "so could not set (%s) variable to (%s).\n",
0790 strlen(var) + strlen(value) + 2,
0791 sizeof(pkt) - sizeof(pkt.header), var, value);
0792 return;
0793 }
0794
0795 memset(&pkt, 0, sizeof(pkt));
0796 pkt.header.data.tag.type = DS_DATA;
0797 pkt.header.data.handle = cp->handle;
0798 pkt.header.msg.hdr.type = DS_VAR_SET_REQ;
0799 base = p = &pkt.header.msg.name_and_value[0];
0800 strcpy(p, var);
0801 p += strlen(var) + 1;
0802 strcpy(p, value);
0803 p += strlen(value) + 1;
0804
0805 msg_len = (sizeof(struct ds_data) +
0806 sizeof(struct ds_var_set_msg) +
0807 (p - base));
0808 msg_len = (msg_len + 3) & ~3;
0809 pkt.header.data.tag.len = msg_len - sizeof(struct ds_msg_tag);
0810
0811 mutex_lock(&ds_var_mutex);
0812
0813 spin_lock_irqsave(&ds_lock, flags);
0814 ds_var_doorbell = 0;
0815 ds_var_response = -1;
0816
0817 __ds_send(dp->lp, &pkt, msg_len);
0818 spin_unlock_irqrestore(&ds_lock, flags);
0819
0820 loops = 1000;
0821 while (ds_var_doorbell == 0) {
0822 if (loops-- < 0)
0823 break;
0824 barrier();
0825 udelay(100);
0826 }
0827
0828 mutex_unlock(&ds_var_mutex);
0829
0830 if (ds_var_doorbell == 0 ||
0831 ds_var_response != DS_VAR_SUCCESS)
0832 printk(KERN_ERR "ds-%llu: var-config [%s:%s] "
0833 "failed, response(%d).\n",
0834 dp->id, var, value,
0835 ds_var_response);
0836 } else {
0837 printk(KERN_ERR PFX "var-config not registered so "
0838 "could not set (%s) variable to (%s).\n",
0839 var, value);
0840 }
0841 }
0842
0843 static char full_boot_str[256] __attribute__((aligned(32)));
0844 static int reboot_data_supported;
0845
0846 void ldom_reboot(const char *boot_command)
0847 {
0848
0849
0850
0851 if (boot_command && strlen(boot_command)) {
0852 unsigned long len;
0853
0854 snprintf(full_boot_str, sizeof(full_boot_str), "boot %s",
0855 boot_command);
0856 len = strlen(full_boot_str);
0857
0858 if (reboot_data_supported) {
0859 unsigned long ra = kimage_addr_to_ra(full_boot_str);
0860 unsigned long hv_ret;
0861
0862 hv_ret = sun4v_reboot_data_set(ra, len);
0863 if (hv_ret != HV_EOK)
0864 pr_err("SUN4V: Unable to set reboot data "
0865 "hv_ret=%lu\n", hv_ret);
0866 } else {
0867 ldom_set_var("reboot-command", full_boot_str);
0868 }
0869 }
0870 sun4v_mach_sir();
0871 }
0872
0873 void ldom_power_off(void)
0874 {
0875 sun4v_mach_exit(0);
0876 }
0877
0878 static void ds_conn_reset(struct ds_info *dp)
0879 {
0880 printk(KERN_ERR "ds-%llu: ds_conn_reset() from %ps\n",
0881 dp->id, __builtin_return_address(0));
0882 }
0883
0884 static int register_services(struct ds_info *dp)
0885 {
0886 struct ldc_channel *lp = dp->lp;
0887 int i;
0888
0889 for (i = 0; i < dp->num_ds_states; i++) {
0890 struct {
0891 struct ds_reg_req req;
0892 u8 id_buf[256];
0893 } pbuf;
0894 struct ds_cap_state *cp = &dp->ds_states[i];
0895 int err, msg_len;
0896 u64 new_count;
0897
0898 if (cp->state == CAP_STATE_REGISTERED)
0899 continue;
0900
0901 new_count = sched_clock() & 0xffffffff;
0902 cp->handle = ((u64) i << 32) | new_count;
0903
0904 msg_len = (sizeof(struct ds_reg_req) +
0905 strlen(cp->service_id));
0906
0907 memset(&pbuf, 0, sizeof(pbuf));
0908 pbuf.req.tag.type = DS_REG_REQ;
0909 pbuf.req.tag.len = (msg_len - sizeof(struct ds_msg_tag));
0910 pbuf.req.handle = cp->handle;
0911 pbuf.req.major = 1;
0912 pbuf.req.minor = 0;
0913 strcpy(pbuf.id_buf, cp->service_id);
0914
0915 err = __ds_send(lp, &pbuf, msg_len);
0916 if (err > 0)
0917 cp->state = CAP_STATE_REG_SENT;
0918 }
0919 return 0;
0920 }
0921
0922 static int ds_handshake(struct ds_info *dp, struct ds_msg_tag *pkt)
0923 {
0924
0925 if (dp->hs_state == DS_HS_START) {
0926 if (pkt->type != DS_INIT_ACK)
0927 goto conn_reset;
0928
0929 dp->hs_state = DS_HS_DONE;
0930
0931 return register_services(dp);
0932 }
0933
0934 if (dp->hs_state != DS_HS_DONE)
0935 goto conn_reset;
0936
0937 if (pkt->type == DS_REG_ACK) {
0938 struct ds_reg_ack *ap = (struct ds_reg_ack *) pkt;
0939 struct ds_cap_state *cp = find_cap(dp, ap->handle);
0940
0941 if (!cp) {
0942 printk(KERN_ERR "ds-%llu: REG ACK for unknown "
0943 "handle %llx\n", dp->id, ap->handle);
0944 return 0;
0945 }
0946 printk(KERN_INFO "ds-%llu: Registered %s service.\n",
0947 dp->id, cp->service_id);
0948 cp->state = CAP_STATE_REGISTERED;
0949 } else if (pkt->type == DS_REG_NACK) {
0950 struct ds_reg_nack *np = (struct ds_reg_nack *) pkt;
0951 struct ds_cap_state *cp = find_cap(dp, np->handle);
0952
0953 if (!cp) {
0954 printk(KERN_ERR "ds-%llu: REG NACK for "
0955 "unknown handle %llx\n",
0956 dp->id, np->handle);
0957 return 0;
0958 }
0959 cp->state = CAP_STATE_UNKNOWN;
0960 }
0961
0962 return 0;
0963
0964 conn_reset:
0965 ds_conn_reset(dp);
0966 return -ECONNRESET;
0967 }
0968
0969 static void __send_ds_nack(struct ds_info *dp, u64 handle)
0970 {
0971 struct ds_data_nack nack = {
0972 .tag = {
0973 .type = DS_NACK,
0974 .len = (sizeof(struct ds_data_nack) -
0975 sizeof(struct ds_msg_tag)),
0976 },
0977 .handle = handle,
0978 .result = DS_INV_HDL,
0979 };
0980
0981 __ds_send(dp->lp, &nack, sizeof(nack));
0982 }
0983
0984 static LIST_HEAD(ds_work_list);
0985 static DECLARE_WAIT_QUEUE_HEAD(ds_wait);
0986
0987 struct ds_queue_entry {
0988 struct list_head list;
0989 struct ds_info *dp;
0990 int req_len;
0991 int __pad;
0992 u64 req[];
0993 };
0994
0995 static void process_ds_work(void)
0996 {
0997 struct ds_queue_entry *qp, *tmp;
0998 unsigned long flags;
0999 LIST_HEAD(todo);
1000
1001 spin_lock_irqsave(&ds_lock, flags);
1002 list_splice_init(&ds_work_list, &todo);
1003 spin_unlock_irqrestore(&ds_lock, flags);
1004
1005 list_for_each_entry_safe(qp, tmp, &todo, list) {
1006 struct ds_data *dpkt = (struct ds_data *) qp->req;
1007 struct ds_info *dp = qp->dp;
1008 struct ds_cap_state *cp = find_cap(dp, dpkt->handle);
1009 int req_len = qp->req_len;
1010
1011 if (!cp) {
1012 printk(KERN_ERR "ds-%llu: Data for unknown "
1013 "handle %llu\n",
1014 dp->id, dpkt->handle);
1015
1016 spin_lock_irqsave(&ds_lock, flags);
1017 __send_ds_nack(dp, dpkt->handle);
1018 spin_unlock_irqrestore(&ds_lock, flags);
1019 } else {
1020 cp->data(dp, cp, dpkt, req_len);
1021 }
1022
1023 list_del(&qp->list);
1024 kfree(qp);
1025 }
1026 }
1027
1028 static int ds_thread(void *__unused)
1029 {
1030 DEFINE_WAIT(wait);
1031
1032 while (1) {
1033 prepare_to_wait(&ds_wait, &wait, TASK_INTERRUPTIBLE);
1034 if (list_empty(&ds_work_list))
1035 schedule();
1036 finish_wait(&ds_wait, &wait);
1037
1038 if (kthread_should_stop())
1039 break;
1040
1041 process_ds_work();
1042 }
1043
1044 return 0;
1045 }
1046
1047 static int ds_data(struct ds_info *dp, struct ds_msg_tag *pkt, int len)
1048 {
1049 struct ds_data *dpkt = (struct ds_data *) pkt;
1050 struct ds_queue_entry *qp;
1051
1052 qp = kmalloc(sizeof(struct ds_queue_entry) + len, GFP_ATOMIC);
1053 if (!qp) {
1054 __send_ds_nack(dp, dpkt->handle);
1055 } else {
1056 qp->dp = dp;
1057 memcpy(&qp->req, pkt, len);
1058 list_add_tail(&qp->list, &ds_work_list);
1059 wake_up(&ds_wait);
1060 }
1061 return 0;
1062 }
1063
1064 static void ds_up(struct ds_info *dp)
1065 {
1066 struct ldc_channel *lp = dp->lp;
1067 struct ds_ver_req req;
1068 int err;
1069
1070 req.tag.type = DS_INIT_REQ;
1071 req.tag.len = sizeof(req) - sizeof(struct ds_msg_tag);
1072 req.ver.major = 1;
1073 req.ver.minor = 0;
1074
1075 err = __ds_send(lp, &req, sizeof(req));
1076 if (err > 0)
1077 dp->hs_state = DS_HS_START;
1078 }
1079
1080 static void ds_reset(struct ds_info *dp)
1081 {
1082 int i;
1083
1084 dp->hs_state = 0;
1085
1086 for (i = 0; i < dp->num_ds_states; i++) {
1087 struct ds_cap_state *cp = &dp->ds_states[i];
1088
1089 cp->state = CAP_STATE_UNKNOWN;
1090 }
1091 }
1092
1093 static void ds_event(void *arg, int event)
1094 {
1095 struct ds_info *dp = arg;
1096 struct ldc_channel *lp = dp->lp;
1097 unsigned long flags;
1098 int err;
1099
1100 spin_lock_irqsave(&ds_lock, flags);
1101
1102 if (event == LDC_EVENT_UP) {
1103 ds_up(dp);
1104 spin_unlock_irqrestore(&ds_lock, flags);
1105 return;
1106 }
1107
1108 if (event == LDC_EVENT_RESET) {
1109 ds_reset(dp);
1110 spin_unlock_irqrestore(&ds_lock, flags);
1111 return;
1112 }
1113
1114 if (event != LDC_EVENT_DATA_READY) {
1115 printk(KERN_WARNING "ds-%llu: Unexpected LDC event %d\n",
1116 dp->id, event);
1117 spin_unlock_irqrestore(&ds_lock, flags);
1118 return;
1119 }
1120
1121 err = 0;
1122 while (1) {
1123 struct ds_msg_tag *tag;
1124
1125 err = ldc_read(lp, dp->rcv_buf, sizeof(*tag));
1126
1127 if (unlikely(err < 0)) {
1128 if (err == -ECONNRESET)
1129 ds_conn_reset(dp);
1130 break;
1131 }
1132 if (err == 0)
1133 break;
1134
1135 tag = dp->rcv_buf;
1136 err = ldc_read(lp, tag + 1, tag->len);
1137
1138 if (unlikely(err < 0)) {
1139 if (err == -ECONNRESET)
1140 ds_conn_reset(dp);
1141 break;
1142 }
1143 if (err < tag->len)
1144 break;
1145
1146 if (tag->type < DS_DATA)
1147 err = ds_handshake(dp, dp->rcv_buf);
1148 else
1149 err = ds_data(dp, dp->rcv_buf,
1150 sizeof(*tag) + err);
1151 if (err == -ECONNRESET)
1152 break;
1153 }
1154
1155 spin_unlock_irqrestore(&ds_lock, flags);
1156 }
1157
1158 static int ds_probe(struct vio_dev *vdev, const struct vio_device_id *id)
1159 {
1160 static int ds_version_printed;
1161 struct ldc_channel_config ds_cfg = {
1162 .event = ds_event,
1163 .mtu = 4096,
1164 .mode = LDC_MODE_STREAM,
1165 };
1166 struct mdesc_handle *hp;
1167 struct ldc_channel *lp;
1168 struct ds_info *dp;
1169 const u64 *val;
1170 int err, i;
1171
1172 if (ds_version_printed++ == 0)
1173 printk(KERN_INFO "%s", version);
1174
1175 dp = kzalloc(sizeof(*dp), GFP_KERNEL);
1176 err = -ENOMEM;
1177 if (!dp)
1178 goto out_err;
1179
1180 hp = mdesc_grab();
1181 val = mdesc_get_property(hp, vdev->mp, "id", NULL);
1182 if (val)
1183 dp->id = *val;
1184 mdesc_release(hp);
1185
1186 dp->rcv_buf = kzalloc(4096, GFP_KERNEL);
1187 if (!dp->rcv_buf)
1188 goto out_free_dp;
1189
1190 dp->rcv_buf_len = 4096;
1191
1192 dp->ds_states = kmemdup(ds_states_template,
1193 sizeof(ds_states_template), GFP_KERNEL);
1194 if (!dp->ds_states)
1195 goto out_free_rcv_buf;
1196
1197 dp->num_ds_states = ARRAY_SIZE(ds_states_template);
1198
1199 for (i = 0; i < dp->num_ds_states; i++)
1200 dp->ds_states[i].handle = ((u64)i << 32);
1201
1202 ds_cfg.tx_irq = vdev->tx_irq;
1203 ds_cfg.rx_irq = vdev->rx_irq;
1204
1205 lp = ldc_alloc(vdev->channel_id, &ds_cfg, dp, "DS");
1206 if (IS_ERR(lp)) {
1207 err = PTR_ERR(lp);
1208 goto out_free_ds_states;
1209 }
1210 dp->lp = lp;
1211
1212 err = ldc_bind(lp);
1213 if (err)
1214 goto out_free_ldc;
1215
1216 spin_lock_irq(&ds_lock);
1217 dp->next = ds_info_list;
1218 ds_info_list = dp;
1219 spin_unlock_irq(&ds_lock);
1220
1221 return err;
1222
1223 out_free_ldc:
1224 ldc_free(dp->lp);
1225
1226 out_free_ds_states:
1227 kfree(dp->ds_states);
1228
1229 out_free_rcv_buf:
1230 kfree(dp->rcv_buf);
1231
1232 out_free_dp:
1233 kfree(dp);
1234
1235 out_err:
1236 return err;
1237 }
1238
1239 static const struct vio_device_id ds_match[] = {
1240 {
1241 .type = "domain-services-port",
1242 },
1243 {},
1244 };
1245
1246 static struct vio_driver ds_driver = {
1247 .id_table = ds_match,
1248 .probe = ds_probe,
1249 .name = "ds",
1250 };
1251
1252 static int __init ds_init(void)
1253 {
1254 unsigned long hv_ret, major, minor;
1255
1256 if (tlb_type == hypervisor) {
1257 hv_ret = sun4v_get_version(HV_GRP_REBOOT_DATA, &major, &minor);
1258 if (hv_ret == HV_EOK) {
1259 pr_info("SUN4V: Reboot data supported (maj=%lu,min=%lu).\n",
1260 major, minor);
1261 reboot_data_supported = 1;
1262 }
1263 }
1264 kthread_run(ds_thread, NULL, "kldomd");
1265
1266 return vio_register_driver(&ds_driver);
1267 }
1268
1269 fs_initcall(ds_init);