0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014 #define KMSG_COMPONENT "ap"
0015 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
0016
0017 #include <linux/kernel_stat.h>
0018 #include <linux/moduleparam.h>
0019 #include <linux/init.h>
0020 #include <linux/delay.h>
0021 #include <linux/err.h>
0022 #include <linux/freezer.h>
0023 #include <linux/interrupt.h>
0024 #include <linux/workqueue.h>
0025 #include <linux/slab.h>
0026 #include <linux/notifier.h>
0027 #include <linux/kthread.h>
0028 #include <linux/mutex.h>
0029 #include <asm/airq.h>
0030 #include <asm/tpi.h>
0031 #include <linux/atomic.h>
0032 #include <asm/isc.h>
0033 #include <linux/hrtimer.h>
0034 #include <linux/ktime.h>
0035 #include <asm/facility.h>
0036 #include <linux/crypto.h>
0037 #include <linux/mod_devicetable.h>
0038 #include <linux/debugfs.h>
0039 #include <linux/ctype.h>
0040 #include <linux/module.h>
0041
0042 #include "ap_bus.h"
0043 #include "ap_debug.h"
0044
0045
0046
0047
0048 int ap_domain_index = -1;
0049 static DEFINE_SPINLOCK(ap_domain_lock);
0050 module_param_named(domain, ap_domain_index, int, 0440);
0051 MODULE_PARM_DESC(domain, "domain index for ap devices");
0052 EXPORT_SYMBOL(ap_domain_index);
0053
0054 static int ap_thread_flag;
0055 module_param_named(poll_thread, ap_thread_flag, int, 0440);
0056 MODULE_PARM_DESC(poll_thread, "Turn on/off poll thread, default is 0 (off).");
0057
0058 static char *apm_str;
0059 module_param_named(apmask, apm_str, charp, 0440);
0060 MODULE_PARM_DESC(apmask, "AP bus adapter mask.");
0061
0062 static char *aqm_str;
0063 module_param_named(aqmask, aqm_str, charp, 0440);
0064 MODULE_PARM_DESC(aqmask, "AP bus domain mask.");
0065
0066 static int ap_useirq = 1;
0067 module_param_named(useirq, ap_useirq, int, 0440);
0068 MODULE_PARM_DESC(useirq, "Use interrupt if available, default is 1 (on).");
0069
0070 atomic_t ap_max_msg_size = ATOMIC_INIT(AP_DEFAULT_MAX_MSG_SIZE);
0071 EXPORT_SYMBOL(ap_max_msg_size);
0072
0073 static struct device *ap_root_device;
0074
0075
0076 DEFINE_HASHTABLE(ap_queues, 8);
0077
0078 DEFINE_SPINLOCK(ap_queues_lock);
0079
0080
0081 struct ap_perms ap_perms;
0082 EXPORT_SYMBOL(ap_perms);
0083 DEFINE_MUTEX(ap_perms_mutex);
0084 EXPORT_SYMBOL(ap_perms_mutex);
0085
0086
0087 static atomic64_t ap_scan_bus_count;
0088
0089
0090 static atomic64_t ap_bindings_complete_count = ATOMIC64_INIT(0);
0091
0092
0093 static DECLARE_COMPLETION(ap_init_apqn_bindings_complete);
0094
0095 static struct ap_config_info *ap_qci_info;
0096 static struct ap_config_info *ap_qci_info_old;
0097
0098
0099
0100
0101 debug_info_t *ap_dbf_info;
0102
0103
0104
0105
0106 static struct timer_list ap_config_timer;
0107 static int ap_config_time = AP_CONFIG_TIME;
0108 static void ap_scan_bus(struct work_struct *);
0109 static DECLARE_WORK(ap_scan_work, ap_scan_bus);
0110
0111
0112
0113
0114 static void ap_tasklet_fn(unsigned long);
0115 static DECLARE_TASKLET_OLD(ap_tasklet, ap_tasklet_fn);
0116 static DECLARE_WAIT_QUEUE_HEAD(ap_poll_wait);
0117 static struct task_struct *ap_poll_kthread;
0118 static DEFINE_MUTEX(ap_poll_thread_mutex);
0119 static DEFINE_SPINLOCK(ap_poll_timer_lock);
0120 static struct hrtimer ap_poll_timer;
0121
0122
0123
0124
0125 static unsigned long long poll_timeout = 250000;
0126
0127
0128 static int ap_max_domain_id = 15;
0129
0130 static int ap_max_adapter_id = 63;
0131
0132 static struct bus_type ap_bus_type;
0133
0134
0135 static void ap_interrupt_handler(struct airq_struct *airq,
0136 struct tpi_info *tpi_info);
0137
0138 static bool ap_irq_flag;
0139
0140 static struct airq_struct ap_airq = {
0141 .handler = ap_interrupt_handler,
0142 .isc = AP_ISC,
0143 };
0144
0145
0146
0147
0148
0149
0150
0151
0152 void *ap_airq_ptr(void)
0153 {
0154 if (ap_irq_flag)
0155 return ap_airq.lsi_ptr;
0156 return NULL;
0157 }
0158
0159
0160
0161
0162
0163
0164 static int ap_interrupts_available(void)
0165 {
0166 return test_facility(65);
0167 }
0168
0169
0170
0171
0172
0173
0174
0175 static int ap_qci_available(void)
0176 {
0177 return test_facility(12);
0178 }
0179
0180
0181
0182
0183
0184
0185
0186 static int ap_apft_available(void)
0187 {
0188 return test_facility(15);
0189 }
0190
0191
0192
0193
0194
0195
0196 static inline int ap_qact_available(void)
0197 {
0198 if (ap_qci_info)
0199 return ap_qci_info->qact;
0200 return 0;
0201 }
0202
0203
0204
0205
0206
0207
0208
0209
0210
0211 static inline int ap_fetch_qci_info(struct ap_config_info *info)
0212 {
0213 if (!ap_qci_available())
0214 return -EOPNOTSUPP;
0215 if (!info)
0216 return -EINVAL;
0217 return ap_qci(info);
0218 }
0219
0220
0221
0222
0223
0224
0225 static void __init ap_init_qci_info(void)
0226 {
0227 if (!ap_qci_available()) {
0228 AP_DBF_INFO("%s QCI not supported\n", __func__);
0229 return;
0230 }
0231
0232 ap_qci_info = kzalloc(sizeof(*ap_qci_info), GFP_KERNEL);
0233 if (!ap_qci_info)
0234 return;
0235 ap_qci_info_old = kzalloc(sizeof(*ap_qci_info_old), GFP_KERNEL);
0236 if (!ap_qci_info_old)
0237 return;
0238 if (ap_fetch_qci_info(ap_qci_info) != 0) {
0239 kfree(ap_qci_info);
0240 kfree(ap_qci_info_old);
0241 ap_qci_info = NULL;
0242 ap_qci_info_old = NULL;
0243 return;
0244 }
0245 AP_DBF_INFO("%s successful fetched initial qci info\n", __func__);
0246
0247 if (ap_qci_info->apxa) {
0248 if (ap_qci_info->Na) {
0249 ap_max_adapter_id = ap_qci_info->Na;
0250 AP_DBF_INFO("%s new ap_max_adapter_id is %d\n",
0251 __func__, ap_max_adapter_id);
0252 }
0253 if (ap_qci_info->Nd) {
0254 ap_max_domain_id = ap_qci_info->Nd;
0255 AP_DBF_INFO("%s new ap_max_domain_id is %d\n",
0256 __func__, ap_max_domain_id);
0257 }
0258 }
0259
0260 memcpy(ap_qci_info_old, ap_qci_info, sizeof(*ap_qci_info));
0261 }
0262
0263
0264
0265
0266
0267 static inline int ap_test_config(unsigned int *field, unsigned int nr)
0268 {
0269 return ap_test_bit((field + (nr >> 5)), (nr & 0x1f));
0270 }
0271
0272
0273
0274
0275
0276
0277
0278
0279 static inline int ap_test_config_card_id(unsigned int id)
0280 {
0281 if (id > ap_max_adapter_id)
0282 return 0;
0283 if (ap_qci_info)
0284 return ap_test_config(ap_qci_info->apm, id);
0285 return 1;
0286 }
0287
0288
0289
0290
0291
0292
0293
0294
0295
0296 int ap_test_config_usage_domain(unsigned int domain)
0297 {
0298 if (domain > ap_max_domain_id)
0299 return 0;
0300 if (ap_qci_info)
0301 return ap_test_config(ap_qci_info->aqm, domain);
0302 return 1;
0303 }
0304 EXPORT_SYMBOL(ap_test_config_usage_domain);
0305
0306
0307
0308
0309
0310
0311
0312
0313
0314 int ap_test_config_ctrl_domain(unsigned int domain)
0315 {
0316 if (!ap_qci_info || domain > ap_max_domain_id)
0317 return 0;
0318 return ap_test_config(ap_qci_info->adm, domain);
0319 }
0320 EXPORT_SYMBOL(ap_test_config_ctrl_domain);
0321
0322
0323
0324
0325
0326
0327 static bool ap_queue_info(ap_qid_t qid, int *q_type, unsigned int *q_fac,
0328 int *q_depth, int *q_ml, bool *q_decfg, bool *q_cstop)
0329 {
0330 struct ap_queue_status status;
0331 union {
0332 unsigned long value;
0333 struct {
0334 unsigned int fac : 32;
0335 unsigned int at : 8;
0336 unsigned int _res1 : 8;
0337 unsigned int _res2 : 4;
0338 unsigned int ml : 4;
0339 unsigned int _res3 : 4;
0340 unsigned int qd : 4;
0341 } tapq_gr2;
0342 } tapq_info;
0343
0344 tapq_info.value = 0;
0345
0346
0347 if (AP_QID_CARD(qid) > ap_max_adapter_id ||
0348 AP_QID_QUEUE(qid) > ap_max_domain_id)
0349 return false;
0350
0351
0352 status = ap_test_queue(qid, ap_apft_available(), &tapq_info.value);
0353 switch (status.response_code) {
0354 case AP_RESPONSE_NORMAL:
0355 case AP_RESPONSE_RESET_IN_PROGRESS:
0356 case AP_RESPONSE_DECONFIGURED:
0357 case AP_RESPONSE_CHECKSTOPPED:
0358 case AP_RESPONSE_BUSY:
0359
0360
0361
0362
0363
0364 if (WARN_ON_ONCE(!tapq_info.value))
0365 return false;
0366 *q_type = tapq_info.tapq_gr2.at;
0367 *q_fac = tapq_info.tapq_gr2.fac;
0368 *q_depth = tapq_info.tapq_gr2.qd;
0369 *q_ml = tapq_info.tapq_gr2.ml;
0370 *q_decfg = status.response_code == AP_RESPONSE_DECONFIGURED;
0371 *q_cstop = status.response_code == AP_RESPONSE_CHECKSTOPPED;
0372 switch (*q_type) {
0373
0374
0375
0376
0377
0378 case AP_DEVICE_TYPE_CEX2A:
0379 case AP_DEVICE_TYPE_CEX3A:
0380 *q_fac |= 0x08000000;
0381 break;
0382 case AP_DEVICE_TYPE_CEX2C:
0383 case AP_DEVICE_TYPE_CEX3C:
0384 *q_fac |= 0x10000000;
0385 break;
0386 default:
0387 break;
0388 }
0389 return true;
0390 default:
0391
0392
0393
0394 return false;
0395 }
0396 }
0397
0398 void ap_wait(enum ap_sm_wait wait)
0399 {
0400 ktime_t hr_time;
0401
0402 switch (wait) {
0403 case AP_SM_WAIT_AGAIN:
0404 case AP_SM_WAIT_INTERRUPT:
0405 if (ap_irq_flag)
0406 break;
0407 if (ap_poll_kthread) {
0408 wake_up(&ap_poll_wait);
0409 break;
0410 }
0411 fallthrough;
0412 case AP_SM_WAIT_TIMEOUT:
0413 spin_lock_bh(&ap_poll_timer_lock);
0414 if (!hrtimer_is_queued(&ap_poll_timer)) {
0415 hr_time = poll_timeout;
0416 hrtimer_forward_now(&ap_poll_timer, hr_time);
0417 hrtimer_restart(&ap_poll_timer);
0418 }
0419 spin_unlock_bh(&ap_poll_timer_lock);
0420 break;
0421 case AP_SM_WAIT_NONE:
0422 default:
0423 break;
0424 }
0425 }
0426
0427
0428
0429
0430
0431
0432
0433 void ap_request_timeout(struct timer_list *t)
0434 {
0435 struct ap_queue *aq = from_timer(aq, t, timeout);
0436
0437 spin_lock_bh(&aq->lock);
0438 ap_wait(ap_sm_event(aq, AP_SM_EVENT_TIMEOUT));
0439 spin_unlock_bh(&aq->lock);
0440 }
0441
0442
0443
0444
0445
0446
0447
0448 static enum hrtimer_restart ap_poll_timeout(struct hrtimer *unused)
0449 {
0450 tasklet_schedule(&ap_tasklet);
0451 return HRTIMER_NORESTART;
0452 }
0453
0454
0455
0456
0457
0458
0459 static void ap_interrupt_handler(struct airq_struct *airq,
0460 struct tpi_info *tpi_info)
0461 {
0462 inc_irq_stat(IRQIO_APB);
0463 tasklet_schedule(&ap_tasklet);
0464 }
0465
0466
0467
0468
0469
0470
0471
0472 static void ap_tasklet_fn(unsigned long dummy)
0473 {
0474 int bkt;
0475 struct ap_queue *aq;
0476 enum ap_sm_wait wait = AP_SM_WAIT_NONE;
0477
0478
0479
0480
0481
0482 if (ap_irq_flag)
0483 xchg(ap_airq.lsi_ptr, 0);
0484
0485 spin_lock_bh(&ap_queues_lock);
0486 hash_for_each(ap_queues, bkt, aq, hnode) {
0487 spin_lock_bh(&aq->lock);
0488 wait = min(wait, ap_sm_event_loop(aq, AP_SM_EVENT_POLL));
0489 spin_unlock_bh(&aq->lock);
0490 }
0491 spin_unlock_bh(&ap_queues_lock);
0492
0493 ap_wait(wait);
0494 }
0495
0496 static int ap_pending_requests(void)
0497 {
0498 int bkt;
0499 struct ap_queue *aq;
0500
0501 spin_lock_bh(&ap_queues_lock);
0502 hash_for_each(ap_queues, bkt, aq, hnode) {
0503 if (aq->queue_count == 0)
0504 continue;
0505 spin_unlock_bh(&ap_queues_lock);
0506 return 1;
0507 }
0508 spin_unlock_bh(&ap_queues_lock);
0509 return 0;
0510 }
0511
0512
0513
0514
0515
0516
0517
0518
0519
0520
0521
0522 static int ap_poll_thread(void *data)
0523 {
0524 DECLARE_WAITQUEUE(wait, current);
0525
0526 set_user_nice(current, MAX_NICE);
0527 set_freezable();
0528 while (!kthread_should_stop()) {
0529 add_wait_queue(&ap_poll_wait, &wait);
0530 set_current_state(TASK_INTERRUPTIBLE);
0531 if (!ap_pending_requests()) {
0532 schedule();
0533 try_to_freeze();
0534 }
0535 set_current_state(TASK_RUNNING);
0536 remove_wait_queue(&ap_poll_wait, &wait);
0537 if (need_resched()) {
0538 schedule();
0539 try_to_freeze();
0540 continue;
0541 }
0542 ap_tasklet_fn(0);
0543 }
0544
0545 return 0;
0546 }
0547
0548 static int ap_poll_thread_start(void)
0549 {
0550 int rc;
0551
0552 if (ap_irq_flag || ap_poll_kthread)
0553 return 0;
0554 mutex_lock(&ap_poll_thread_mutex);
0555 ap_poll_kthread = kthread_run(ap_poll_thread, NULL, "appoll");
0556 rc = PTR_ERR_OR_ZERO(ap_poll_kthread);
0557 if (rc)
0558 ap_poll_kthread = NULL;
0559 mutex_unlock(&ap_poll_thread_mutex);
0560 return rc;
0561 }
0562
0563 static void ap_poll_thread_stop(void)
0564 {
0565 if (!ap_poll_kthread)
0566 return;
0567 mutex_lock(&ap_poll_thread_mutex);
0568 kthread_stop(ap_poll_kthread);
0569 ap_poll_kthread = NULL;
0570 mutex_unlock(&ap_poll_thread_mutex);
0571 }
0572
0573 #define is_card_dev(x) ((x)->parent == ap_root_device)
0574 #define is_queue_dev(x) ((x)->parent != ap_root_device)
0575
0576
0577
0578
0579
0580
0581
0582
0583 static int ap_bus_match(struct device *dev, struct device_driver *drv)
0584 {
0585 struct ap_driver *ap_drv = to_ap_drv(drv);
0586 struct ap_device_id *id;
0587
0588
0589
0590
0591
0592 for (id = ap_drv->ids; id->match_flags; id++) {
0593 if (is_card_dev(dev) &&
0594 id->match_flags & AP_DEVICE_ID_MATCH_CARD_TYPE &&
0595 id->dev_type == to_ap_dev(dev)->device_type)
0596 return 1;
0597 if (is_queue_dev(dev) &&
0598 id->match_flags & AP_DEVICE_ID_MATCH_QUEUE_TYPE &&
0599 id->dev_type == to_ap_dev(dev)->device_type)
0600 return 1;
0601 }
0602 return 0;
0603 }
0604
0605
0606
0607
0608
0609
0610
0611
0612
0613 static int ap_uevent(struct device *dev, struct kobj_uevent_env *env)
0614 {
0615 int rc = 0;
0616 struct ap_device *ap_dev = to_ap_dev(dev);
0617
0618
0619 if (dev == ap_root_device)
0620 return 0;
0621
0622 if (is_card_dev(dev)) {
0623 struct ap_card *ac = to_ap_card(&ap_dev->device);
0624
0625
0626 rc = add_uevent_var(env, "DEV_TYPE=%04X", ap_dev->device_type);
0627 if (rc)
0628 return rc;
0629
0630 rc = add_uevent_var(env, "MODALIAS=ap:t%02X", ap_dev->device_type);
0631 if (rc)
0632 return rc;
0633
0634
0635 if (ap_test_bit(&ac->functions, AP_FUNC_ACCEL))
0636 rc = add_uevent_var(env, "MODE=accel");
0637 else if (ap_test_bit(&ac->functions, AP_FUNC_COPRO))
0638 rc = add_uevent_var(env, "MODE=cca");
0639 else if (ap_test_bit(&ac->functions, AP_FUNC_EP11))
0640 rc = add_uevent_var(env, "MODE=ep11");
0641 if (rc)
0642 return rc;
0643 } else {
0644 struct ap_queue *aq = to_ap_queue(&ap_dev->device);
0645
0646
0647 if (ap_test_bit(&aq->card->functions, AP_FUNC_ACCEL))
0648 rc = add_uevent_var(env, "MODE=accel");
0649 else if (ap_test_bit(&aq->card->functions, AP_FUNC_COPRO))
0650 rc = add_uevent_var(env, "MODE=cca");
0651 else if (ap_test_bit(&aq->card->functions, AP_FUNC_EP11))
0652 rc = add_uevent_var(env, "MODE=ep11");
0653 if (rc)
0654 return rc;
0655 }
0656
0657 return 0;
0658 }
0659
0660 static void ap_send_init_scan_done_uevent(void)
0661 {
0662 char *envp[] = { "INITSCAN=done", NULL };
0663
0664 kobject_uevent_env(&ap_root_device->kobj, KOBJ_CHANGE, envp);
0665 }
0666
0667 static void ap_send_bindings_complete_uevent(void)
0668 {
0669 char buf[32];
0670 char *envp[] = { "BINDINGS=complete", buf, NULL };
0671
0672 snprintf(buf, sizeof(buf), "COMPLETECOUNT=%llu",
0673 atomic64_inc_return(&ap_bindings_complete_count));
0674 kobject_uevent_env(&ap_root_device->kobj, KOBJ_CHANGE, envp);
0675 }
0676
0677 void ap_send_config_uevent(struct ap_device *ap_dev, bool cfg)
0678 {
0679 char buf[16];
0680 char *envp[] = { buf, NULL };
0681
0682 snprintf(buf, sizeof(buf), "CONFIG=%d", cfg ? 1 : 0);
0683
0684 kobject_uevent_env(&ap_dev->device.kobj, KOBJ_CHANGE, envp);
0685 }
0686 EXPORT_SYMBOL(ap_send_config_uevent);
0687
0688 void ap_send_online_uevent(struct ap_device *ap_dev, int online)
0689 {
0690 char buf[16];
0691 char *envp[] = { buf, NULL };
0692
0693 snprintf(buf, sizeof(buf), "ONLINE=%d", online ? 1 : 0);
0694
0695 kobject_uevent_env(&ap_dev->device.kobj, KOBJ_CHANGE, envp);
0696 }
0697 EXPORT_SYMBOL(ap_send_online_uevent);
0698
0699 static void ap_send_mask_changed_uevent(unsigned long *newapm,
0700 unsigned long *newaqm)
0701 {
0702 char buf[100];
0703 char *envp[] = { buf, NULL };
0704
0705 if (newapm)
0706 snprintf(buf, sizeof(buf),
0707 "APMASK=0x%016lx%016lx%016lx%016lx\n",
0708 newapm[0], newapm[1], newapm[2], newapm[3]);
0709 else
0710 snprintf(buf, sizeof(buf),
0711 "AQMASK=0x%016lx%016lx%016lx%016lx\n",
0712 newaqm[0], newaqm[1], newaqm[2], newaqm[3]);
0713
0714 kobject_uevent_env(&ap_root_device->kobj, KOBJ_CHANGE, envp);
0715 }
0716
0717
0718
0719
0720
0721 struct __ap_calc_ctrs {
0722 unsigned int apqns;
0723 unsigned int bound;
0724 };
0725
0726 static int __ap_calc_helper(struct device *dev, void *arg)
0727 {
0728 struct __ap_calc_ctrs *pctrs = (struct __ap_calc_ctrs *)arg;
0729
0730 if (is_queue_dev(dev)) {
0731 pctrs->apqns++;
0732 if (dev->driver)
0733 pctrs->bound++;
0734 }
0735
0736 return 0;
0737 }
0738
0739 static void ap_calc_bound_apqns(unsigned int *apqns, unsigned int *bound)
0740 {
0741 struct __ap_calc_ctrs ctrs;
0742
0743 memset(&ctrs, 0, sizeof(ctrs));
0744 bus_for_each_dev(&ap_bus_type, NULL, (void *)&ctrs, __ap_calc_helper);
0745
0746 *apqns = ctrs.apqns;
0747 *bound = ctrs.bound;
0748 }
0749
0750
0751
0752
0753
0754 static void ap_check_bindings_complete(void)
0755 {
0756 unsigned int apqns, bound;
0757
0758 if (atomic64_read(&ap_scan_bus_count) >= 1) {
0759 ap_calc_bound_apqns(&apqns, &bound);
0760 if (bound == apqns) {
0761 if (!completion_done(&ap_init_apqn_bindings_complete)) {
0762 complete_all(&ap_init_apqn_bindings_complete);
0763 AP_DBF_INFO("%s complete\n", __func__);
0764 }
0765 ap_send_bindings_complete_uevent();
0766 }
0767 }
0768 }
0769
0770
0771
0772
0773
0774
0775
0776
0777
0778
0779
0780 int ap_wait_init_apqn_bindings_complete(unsigned long timeout)
0781 {
0782 long l;
0783
0784 if (completion_done(&ap_init_apqn_bindings_complete))
0785 return 0;
0786
0787 if (timeout)
0788 l = wait_for_completion_interruptible_timeout(
0789 &ap_init_apqn_bindings_complete, timeout);
0790 else
0791 l = wait_for_completion_interruptible(
0792 &ap_init_apqn_bindings_complete);
0793 if (l < 0)
0794 return l == -ERESTARTSYS ? -EINTR : l;
0795 else if (l == 0 && timeout)
0796 return -ETIME;
0797
0798 return 0;
0799 }
0800 EXPORT_SYMBOL(ap_wait_init_apqn_bindings_complete);
0801
0802 static int __ap_queue_devices_with_id_unregister(struct device *dev, void *data)
0803 {
0804 if (is_queue_dev(dev) &&
0805 AP_QID_CARD(to_ap_queue(dev)->qid) == (int)(long)data)
0806 device_unregister(dev);
0807 return 0;
0808 }
0809
0810 static int __ap_revise_reserved(struct device *dev, void *dummy)
0811 {
0812 int rc, card, queue, devres, drvres;
0813
0814 if (is_queue_dev(dev)) {
0815 card = AP_QID_CARD(to_ap_queue(dev)->qid);
0816 queue = AP_QID_QUEUE(to_ap_queue(dev)->qid);
0817 mutex_lock(&ap_perms_mutex);
0818 devres = test_bit_inv(card, ap_perms.apm) &&
0819 test_bit_inv(queue, ap_perms.aqm);
0820 mutex_unlock(&ap_perms_mutex);
0821 drvres = to_ap_drv(dev->driver)->flags
0822 & AP_DRIVER_FLAG_DEFAULT;
0823 if (!!devres != !!drvres) {
0824 AP_DBF_DBG("%s reprobing queue=%02x.%04x\n",
0825 __func__, card, queue);
0826 rc = device_reprobe(dev);
0827 if (rc)
0828 AP_DBF_WARN("%s reprobing queue=%02x.%04x failed\n",
0829 __func__, card, queue);
0830 }
0831 }
0832
0833 return 0;
0834 }
0835
0836 static void ap_bus_revise_bindings(void)
0837 {
0838 bus_for_each_dev(&ap_bus_type, NULL, NULL, __ap_revise_reserved);
0839 }
0840
0841
0842
0843
0844
0845
0846
0847
0848
0849
0850
0851
0852 int ap_owned_by_def_drv(int card, int queue)
0853 {
0854 int rc = 0;
0855
0856 if (card < 0 || card >= AP_DEVICES || queue < 0 || queue >= AP_DOMAINS)
0857 return -EINVAL;
0858
0859 if (test_bit_inv(card, ap_perms.apm) &&
0860 test_bit_inv(queue, ap_perms.aqm))
0861 rc = 1;
0862
0863 return rc;
0864 }
0865 EXPORT_SYMBOL(ap_owned_by_def_drv);
0866
0867
0868
0869
0870
0871
0872
0873
0874
0875
0876
0877
0878
0879 int ap_apqn_in_matrix_owned_by_def_drv(unsigned long *apm,
0880 unsigned long *aqm)
0881 {
0882 int card, queue, rc = 0;
0883
0884 for (card = 0; !rc && card < AP_DEVICES; card++)
0885 if (test_bit_inv(card, apm) &&
0886 test_bit_inv(card, ap_perms.apm))
0887 for (queue = 0; !rc && queue < AP_DOMAINS; queue++)
0888 if (test_bit_inv(queue, aqm) &&
0889 test_bit_inv(queue, ap_perms.aqm))
0890 rc = 1;
0891
0892 return rc;
0893 }
0894 EXPORT_SYMBOL(ap_apqn_in_matrix_owned_by_def_drv);
0895
0896 static int ap_device_probe(struct device *dev)
0897 {
0898 struct ap_device *ap_dev = to_ap_dev(dev);
0899 struct ap_driver *ap_drv = to_ap_drv(dev->driver);
0900 int card, queue, devres, drvres, rc = -ENODEV;
0901
0902 if (!get_device(dev))
0903 return rc;
0904
0905 if (is_queue_dev(dev)) {
0906
0907
0908
0909
0910
0911
0912 card = AP_QID_CARD(to_ap_queue(dev)->qid);
0913 queue = AP_QID_QUEUE(to_ap_queue(dev)->qid);
0914 mutex_lock(&ap_perms_mutex);
0915 devres = test_bit_inv(card, ap_perms.apm) &&
0916 test_bit_inv(queue, ap_perms.aqm);
0917 mutex_unlock(&ap_perms_mutex);
0918 drvres = ap_drv->flags & AP_DRIVER_FLAG_DEFAULT;
0919 if (!!devres != !!drvres)
0920 goto out;
0921 }
0922
0923
0924 spin_lock_bh(&ap_queues_lock);
0925 if (is_queue_dev(dev))
0926 hash_add(ap_queues, &to_ap_queue(dev)->hnode,
0927 to_ap_queue(dev)->qid);
0928 spin_unlock_bh(&ap_queues_lock);
0929
0930 rc = ap_drv->probe ? ap_drv->probe(ap_dev) : -ENODEV;
0931
0932 if (rc) {
0933 spin_lock_bh(&ap_queues_lock);
0934 if (is_queue_dev(dev))
0935 hash_del(&to_ap_queue(dev)->hnode);
0936 spin_unlock_bh(&ap_queues_lock);
0937 } else {
0938 ap_check_bindings_complete();
0939 }
0940
0941 out:
0942 if (rc)
0943 put_device(dev);
0944 return rc;
0945 }
0946
0947 static void ap_device_remove(struct device *dev)
0948 {
0949 struct ap_device *ap_dev = to_ap_dev(dev);
0950 struct ap_driver *ap_drv = to_ap_drv(dev->driver);
0951
0952
0953 if (is_queue_dev(dev))
0954 ap_queue_prepare_remove(to_ap_queue(dev));
0955
0956
0957 if (ap_drv->remove)
0958 ap_drv->remove(ap_dev);
0959
0960
0961 if (is_queue_dev(dev))
0962 ap_queue_remove(to_ap_queue(dev));
0963
0964
0965 spin_lock_bh(&ap_queues_lock);
0966 if (is_queue_dev(dev))
0967 hash_del(&to_ap_queue(dev)->hnode);
0968 spin_unlock_bh(&ap_queues_lock);
0969
0970 put_device(dev);
0971 }
0972
0973 struct ap_queue *ap_get_qdev(ap_qid_t qid)
0974 {
0975 int bkt;
0976 struct ap_queue *aq;
0977
0978 spin_lock_bh(&ap_queues_lock);
0979 hash_for_each(ap_queues, bkt, aq, hnode) {
0980 if (aq->qid == qid) {
0981 get_device(&aq->ap_dev.device);
0982 spin_unlock_bh(&ap_queues_lock);
0983 return aq;
0984 }
0985 }
0986 spin_unlock_bh(&ap_queues_lock);
0987
0988 return NULL;
0989 }
0990 EXPORT_SYMBOL(ap_get_qdev);
0991
0992 int ap_driver_register(struct ap_driver *ap_drv, struct module *owner,
0993 char *name)
0994 {
0995 struct device_driver *drv = &ap_drv->driver;
0996
0997 drv->bus = &ap_bus_type;
0998 drv->owner = owner;
0999 drv->name = name;
1000 return driver_register(drv);
1001 }
1002 EXPORT_SYMBOL(ap_driver_register);
1003
1004 void ap_driver_unregister(struct ap_driver *ap_drv)
1005 {
1006 driver_unregister(&ap_drv->driver);
1007 }
1008 EXPORT_SYMBOL(ap_driver_unregister);
1009
1010 void ap_bus_force_rescan(void)
1011 {
1012
1013 del_timer(&ap_config_timer);
1014 queue_work(system_long_wq, &ap_scan_work);
1015 flush_work(&ap_scan_work);
1016 }
1017 EXPORT_SYMBOL(ap_bus_force_rescan);
1018
1019
1020
1021
1022 void ap_bus_cfg_chg(void)
1023 {
1024 AP_DBF_DBG("%s config change, forcing bus rescan\n", __func__);
1025
1026 ap_bus_force_rescan();
1027 }
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037 static int hex2bitmap(const char *str, unsigned long *bitmap, int bits)
1038 {
1039 int i, n, b;
1040
1041
1042 if (bits & 0x07)
1043 return -EINVAL;
1044
1045 if (str[0] == '0' && str[1] == 'x')
1046 str++;
1047 if (*str == 'x')
1048 str++;
1049
1050 for (i = 0; isxdigit(*str) && i < bits; str++) {
1051 b = hex_to_bin(*str);
1052 for (n = 0; n < 4; n++)
1053 if (b & (0x08 >> n))
1054 set_bit_inv(i + n, bitmap);
1055 i += 4;
1056 }
1057
1058 if (*str == '\n')
1059 str++;
1060 if (*str)
1061 return -EINVAL;
1062 return 0;
1063 }
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082 static int modify_bitmap(const char *str, unsigned long *bitmap, int bits)
1083 {
1084 int a, i, z;
1085 char *np, sign;
1086
1087
1088 if (bits & 0x07)
1089 return -EINVAL;
1090
1091 while (*str) {
1092 sign = *str++;
1093 if (sign != '+' && sign != '-')
1094 return -EINVAL;
1095 a = z = simple_strtoul(str, &np, 0);
1096 if (str == np || a >= bits)
1097 return -EINVAL;
1098 str = np;
1099 if (*str == '-') {
1100 z = simple_strtoul(++str, &np, 0);
1101 if (str == np || a > z || z >= bits)
1102 return -EINVAL;
1103 str = np;
1104 }
1105 for (i = a; i <= z; i++)
1106 if (sign == '+')
1107 set_bit_inv(i, bitmap);
1108 else
1109 clear_bit_inv(i, bitmap);
1110 while (*str == ',' || *str == '\n')
1111 str++;
1112 }
1113
1114 return 0;
1115 }
1116
1117 static int ap_parse_bitmap_str(const char *str, unsigned long *bitmap, int bits,
1118 unsigned long *newmap)
1119 {
1120 unsigned long size;
1121 int rc;
1122
1123 size = BITS_TO_LONGS(bits) * sizeof(unsigned long);
1124 if (*str == '+' || *str == '-') {
1125 memcpy(newmap, bitmap, size);
1126 rc = modify_bitmap(str, newmap, bits);
1127 } else {
1128 memset(newmap, 0, size);
1129 rc = hex2bitmap(str, newmap, bits);
1130 }
1131 return rc;
1132 }
1133
1134 int ap_parse_mask_str(const char *str,
1135 unsigned long *bitmap, int bits,
1136 struct mutex *lock)
1137 {
1138 unsigned long *newmap, size;
1139 int rc;
1140
1141
1142 if (bits & 0x07)
1143 return -EINVAL;
1144
1145 size = BITS_TO_LONGS(bits) * sizeof(unsigned long);
1146 newmap = kmalloc(size, GFP_KERNEL);
1147 if (!newmap)
1148 return -ENOMEM;
1149 if (mutex_lock_interruptible(lock)) {
1150 kfree(newmap);
1151 return -ERESTARTSYS;
1152 }
1153 rc = ap_parse_bitmap_str(str, bitmap, bits, newmap);
1154 if (rc == 0)
1155 memcpy(bitmap, newmap, size);
1156 mutex_unlock(lock);
1157 kfree(newmap);
1158 return rc;
1159 }
1160 EXPORT_SYMBOL(ap_parse_mask_str);
1161
1162
1163
1164
1165
1166 static ssize_t ap_domain_show(struct bus_type *bus, char *buf)
1167 {
1168 return scnprintf(buf, PAGE_SIZE, "%d\n", ap_domain_index);
1169 }
1170
1171 static ssize_t ap_domain_store(struct bus_type *bus,
1172 const char *buf, size_t count)
1173 {
1174 int domain;
1175
1176 if (sscanf(buf, "%i\n", &domain) != 1 ||
1177 domain < 0 || domain > ap_max_domain_id ||
1178 !test_bit_inv(domain, ap_perms.aqm))
1179 return -EINVAL;
1180
1181 spin_lock_bh(&ap_domain_lock);
1182 ap_domain_index = domain;
1183 spin_unlock_bh(&ap_domain_lock);
1184
1185 AP_DBF_INFO("%s stored new default domain=%d\n",
1186 __func__, domain);
1187
1188 return count;
1189 }
1190
1191 static BUS_ATTR_RW(ap_domain);
1192
1193 static ssize_t ap_control_domain_mask_show(struct bus_type *bus, char *buf)
1194 {
1195 if (!ap_qci_info)
1196 return scnprintf(buf, PAGE_SIZE, "not supported\n");
1197
1198 return scnprintf(buf, PAGE_SIZE,
1199 "0x%08x%08x%08x%08x%08x%08x%08x%08x\n",
1200 ap_qci_info->adm[0], ap_qci_info->adm[1],
1201 ap_qci_info->adm[2], ap_qci_info->adm[3],
1202 ap_qci_info->adm[4], ap_qci_info->adm[5],
1203 ap_qci_info->adm[6], ap_qci_info->adm[7]);
1204 }
1205
1206 static BUS_ATTR_RO(ap_control_domain_mask);
1207
1208 static ssize_t ap_usage_domain_mask_show(struct bus_type *bus, char *buf)
1209 {
1210 if (!ap_qci_info)
1211 return scnprintf(buf, PAGE_SIZE, "not supported\n");
1212
1213 return scnprintf(buf, PAGE_SIZE,
1214 "0x%08x%08x%08x%08x%08x%08x%08x%08x\n",
1215 ap_qci_info->aqm[0], ap_qci_info->aqm[1],
1216 ap_qci_info->aqm[2], ap_qci_info->aqm[3],
1217 ap_qci_info->aqm[4], ap_qci_info->aqm[5],
1218 ap_qci_info->aqm[6], ap_qci_info->aqm[7]);
1219 }
1220
1221 static BUS_ATTR_RO(ap_usage_domain_mask);
1222
1223 static ssize_t ap_adapter_mask_show(struct bus_type *bus, char *buf)
1224 {
1225 if (!ap_qci_info)
1226 return scnprintf(buf, PAGE_SIZE, "not supported\n");
1227
1228 return scnprintf(buf, PAGE_SIZE,
1229 "0x%08x%08x%08x%08x%08x%08x%08x%08x\n",
1230 ap_qci_info->apm[0], ap_qci_info->apm[1],
1231 ap_qci_info->apm[2], ap_qci_info->apm[3],
1232 ap_qci_info->apm[4], ap_qci_info->apm[5],
1233 ap_qci_info->apm[6], ap_qci_info->apm[7]);
1234 }
1235
1236 static BUS_ATTR_RO(ap_adapter_mask);
1237
1238 static ssize_t ap_interrupts_show(struct bus_type *bus, char *buf)
1239 {
1240 return scnprintf(buf, PAGE_SIZE, "%d\n",
1241 ap_irq_flag ? 1 : 0);
1242 }
1243
1244 static BUS_ATTR_RO(ap_interrupts);
1245
1246 static ssize_t config_time_show(struct bus_type *bus, char *buf)
1247 {
1248 return scnprintf(buf, PAGE_SIZE, "%d\n", ap_config_time);
1249 }
1250
1251 static ssize_t config_time_store(struct bus_type *bus,
1252 const char *buf, size_t count)
1253 {
1254 int time;
1255
1256 if (sscanf(buf, "%d\n", &time) != 1 || time < 5 || time > 120)
1257 return -EINVAL;
1258 ap_config_time = time;
1259 mod_timer(&ap_config_timer, jiffies + ap_config_time * HZ);
1260 return count;
1261 }
1262
1263 static BUS_ATTR_RW(config_time);
1264
1265 static ssize_t poll_thread_show(struct bus_type *bus, char *buf)
1266 {
1267 return scnprintf(buf, PAGE_SIZE, "%d\n", ap_poll_kthread ? 1 : 0);
1268 }
1269
1270 static ssize_t poll_thread_store(struct bus_type *bus,
1271 const char *buf, size_t count)
1272 {
1273 int flag, rc;
1274
1275 if (sscanf(buf, "%d\n", &flag) != 1)
1276 return -EINVAL;
1277 if (flag) {
1278 rc = ap_poll_thread_start();
1279 if (rc)
1280 count = rc;
1281 } else {
1282 ap_poll_thread_stop();
1283 }
1284 return count;
1285 }
1286
1287 static BUS_ATTR_RW(poll_thread);
1288
1289 static ssize_t poll_timeout_show(struct bus_type *bus, char *buf)
1290 {
1291 return scnprintf(buf, PAGE_SIZE, "%llu\n", poll_timeout);
1292 }
1293
1294 static ssize_t poll_timeout_store(struct bus_type *bus, const char *buf,
1295 size_t count)
1296 {
1297 unsigned long long time;
1298 ktime_t hr_time;
1299
1300
1301 if (sscanf(buf, "%llu\n", &time) != 1 || time < 1 ||
1302 time > 120000000000ULL)
1303 return -EINVAL;
1304 poll_timeout = time;
1305 hr_time = poll_timeout;
1306
1307 spin_lock_bh(&ap_poll_timer_lock);
1308 hrtimer_cancel(&ap_poll_timer);
1309 hrtimer_set_expires(&ap_poll_timer, hr_time);
1310 hrtimer_start_expires(&ap_poll_timer, HRTIMER_MODE_ABS);
1311 spin_unlock_bh(&ap_poll_timer_lock);
1312
1313 return count;
1314 }
1315
1316 static BUS_ATTR_RW(poll_timeout);
1317
1318 static ssize_t ap_max_domain_id_show(struct bus_type *bus, char *buf)
1319 {
1320 return scnprintf(buf, PAGE_SIZE, "%d\n", ap_max_domain_id);
1321 }
1322
1323 static BUS_ATTR_RO(ap_max_domain_id);
1324
1325 static ssize_t ap_max_adapter_id_show(struct bus_type *bus, char *buf)
1326 {
1327 return scnprintf(buf, PAGE_SIZE, "%d\n", ap_max_adapter_id);
1328 }
1329
1330 static BUS_ATTR_RO(ap_max_adapter_id);
1331
1332 static ssize_t apmask_show(struct bus_type *bus, char *buf)
1333 {
1334 int rc;
1335
1336 if (mutex_lock_interruptible(&ap_perms_mutex))
1337 return -ERESTARTSYS;
1338 rc = scnprintf(buf, PAGE_SIZE,
1339 "0x%016lx%016lx%016lx%016lx\n",
1340 ap_perms.apm[0], ap_perms.apm[1],
1341 ap_perms.apm[2], ap_perms.apm[3]);
1342 mutex_unlock(&ap_perms_mutex);
1343
1344 return rc;
1345 }
1346
1347 static int __verify_card_reservations(struct device_driver *drv, void *data)
1348 {
1349 int rc = 0;
1350 struct ap_driver *ap_drv = to_ap_drv(drv);
1351 unsigned long *newapm = (unsigned long *)data;
1352
1353
1354
1355
1356
1357 if (!try_module_get(drv->owner))
1358 return 0;
1359
1360 if (ap_drv->in_use) {
1361 rc = ap_drv->in_use(newapm, ap_perms.aqm);
1362 if (rc)
1363 rc = -EBUSY;
1364 }
1365
1366
1367 module_put(drv->owner);
1368
1369 return rc;
1370 }
1371
1372 static int apmask_commit(unsigned long *newapm)
1373 {
1374 int rc;
1375 unsigned long reserved[BITS_TO_LONGS(AP_DEVICES)];
1376
1377
1378
1379
1380
1381 if (bitmap_andnot(reserved, newapm, ap_perms.apm, AP_DEVICES)) {
1382 rc = bus_for_each_drv(&ap_bus_type, NULL, reserved,
1383 __verify_card_reservations);
1384 if (rc)
1385 return rc;
1386 }
1387
1388 memcpy(ap_perms.apm, newapm, APMASKSIZE);
1389
1390 return 0;
1391 }
1392
1393 static ssize_t apmask_store(struct bus_type *bus, const char *buf,
1394 size_t count)
1395 {
1396 int rc, changes = 0;
1397 DECLARE_BITMAP(newapm, AP_DEVICES);
1398
1399 if (mutex_lock_interruptible(&ap_perms_mutex))
1400 return -ERESTARTSYS;
1401
1402 rc = ap_parse_bitmap_str(buf, ap_perms.apm, AP_DEVICES, newapm);
1403 if (rc)
1404 goto done;
1405
1406 changes = memcmp(ap_perms.apm, newapm, APMASKSIZE);
1407 if (changes)
1408 rc = apmask_commit(newapm);
1409
1410 done:
1411 mutex_unlock(&ap_perms_mutex);
1412 if (rc)
1413 return rc;
1414
1415 if (changes) {
1416 ap_bus_revise_bindings();
1417 ap_send_mask_changed_uevent(newapm, NULL);
1418 }
1419
1420 return count;
1421 }
1422
1423 static BUS_ATTR_RW(apmask);
1424
1425 static ssize_t aqmask_show(struct bus_type *bus, char *buf)
1426 {
1427 int rc;
1428
1429 if (mutex_lock_interruptible(&ap_perms_mutex))
1430 return -ERESTARTSYS;
1431 rc = scnprintf(buf, PAGE_SIZE,
1432 "0x%016lx%016lx%016lx%016lx\n",
1433 ap_perms.aqm[0], ap_perms.aqm[1],
1434 ap_perms.aqm[2], ap_perms.aqm[3]);
1435 mutex_unlock(&ap_perms_mutex);
1436
1437 return rc;
1438 }
1439
1440 static int __verify_queue_reservations(struct device_driver *drv, void *data)
1441 {
1442 int rc = 0;
1443 struct ap_driver *ap_drv = to_ap_drv(drv);
1444 unsigned long *newaqm = (unsigned long *)data;
1445
1446
1447
1448
1449
1450 if (!try_module_get(drv->owner))
1451 return 0;
1452
1453 if (ap_drv->in_use) {
1454 rc = ap_drv->in_use(ap_perms.apm, newaqm);
1455 if (rc)
1456 rc = -EBUSY;
1457 }
1458
1459
1460 module_put(drv->owner);
1461
1462 return rc;
1463 }
1464
1465 static int aqmask_commit(unsigned long *newaqm)
1466 {
1467 int rc;
1468 unsigned long reserved[BITS_TO_LONGS(AP_DOMAINS)];
1469
1470
1471
1472
1473
1474 if (bitmap_andnot(reserved, newaqm, ap_perms.aqm, AP_DOMAINS)) {
1475 rc = bus_for_each_drv(&ap_bus_type, NULL, reserved,
1476 __verify_queue_reservations);
1477 if (rc)
1478 return rc;
1479 }
1480
1481 memcpy(ap_perms.aqm, newaqm, AQMASKSIZE);
1482
1483 return 0;
1484 }
1485
1486 static ssize_t aqmask_store(struct bus_type *bus, const char *buf,
1487 size_t count)
1488 {
1489 int rc, changes = 0;
1490 DECLARE_BITMAP(newaqm, AP_DOMAINS);
1491
1492 if (mutex_lock_interruptible(&ap_perms_mutex))
1493 return -ERESTARTSYS;
1494
1495 rc = ap_parse_bitmap_str(buf, ap_perms.aqm, AP_DOMAINS, newaqm);
1496 if (rc)
1497 goto done;
1498
1499 changes = memcmp(ap_perms.aqm, newaqm, APMASKSIZE);
1500 if (changes)
1501 rc = aqmask_commit(newaqm);
1502
1503 done:
1504 mutex_unlock(&ap_perms_mutex);
1505 if (rc)
1506 return rc;
1507
1508 if (changes) {
1509 ap_bus_revise_bindings();
1510 ap_send_mask_changed_uevent(NULL, newaqm);
1511 }
1512
1513 return count;
1514 }
1515
1516 static BUS_ATTR_RW(aqmask);
1517
1518 static ssize_t scans_show(struct bus_type *bus, char *buf)
1519 {
1520 return scnprintf(buf, PAGE_SIZE, "%llu\n",
1521 atomic64_read(&ap_scan_bus_count));
1522 }
1523
1524 static ssize_t scans_store(struct bus_type *bus, const char *buf,
1525 size_t count)
1526 {
1527 AP_DBF_INFO("%s force AP bus rescan\n", __func__);
1528
1529 ap_bus_force_rescan();
1530
1531 return count;
1532 }
1533
1534 static BUS_ATTR_RW(scans);
1535
1536 static ssize_t bindings_show(struct bus_type *bus, char *buf)
1537 {
1538 int rc;
1539 unsigned int apqns, n;
1540
1541 ap_calc_bound_apqns(&apqns, &n);
1542 if (atomic64_read(&ap_scan_bus_count) >= 1 && n == apqns)
1543 rc = scnprintf(buf, PAGE_SIZE, "%u/%u (complete)\n", n, apqns);
1544 else
1545 rc = scnprintf(buf, PAGE_SIZE, "%u/%u\n", n, apqns);
1546
1547 return rc;
1548 }
1549
1550 static BUS_ATTR_RO(bindings);
1551
1552 static struct attribute *ap_bus_attrs[] = {
1553 &bus_attr_ap_domain.attr,
1554 &bus_attr_ap_control_domain_mask.attr,
1555 &bus_attr_ap_usage_domain_mask.attr,
1556 &bus_attr_ap_adapter_mask.attr,
1557 &bus_attr_config_time.attr,
1558 &bus_attr_poll_thread.attr,
1559 &bus_attr_ap_interrupts.attr,
1560 &bus_attr_poll_timeout.attr,
1561 &bus_attr_ap_max_domain_id.attr,
1562 &bus_attr_ap_max_adapter_id.attr,
1563 &bus_attr_apmask.attr,
1564 &bus_attr_aqmask.attr,
1565 &bus_attr_scans.attr,
1566 &bus_attr_bindings.attr,
1567 NULL,
1568 };
1569 ATTRIBUTE_GROUPS(ap_bus);
1570
1571 static struct bus_type ap_bus_type = {
1572 .name = "ap",
1573 .bus_groups = ap_bus_groups,
1574 .match = &ap_bus_match,
1575 .uevent = &ap_uevent,
1576 .probe = ap_device_probe,
1577 .remove = ap_device_remove,
1578 };
1579
1580
1581
1582
1583
1584 static void ap_select_domain(void)
1585 {
1586 struct ap_queue_status status;
1587 int card, dom;
1588
1589
1590
1591
1592
1593
1594 spin_lock_bh(&ap_domain_lock);
1595 if (ap_domain_index >= 0) {
1596
1597 goto out;
1598 }
1599 for (dom = 0; dom <= ap_max_domain_id; dom++) {
1600 if (!ap_test_config_usage_domain(dom) ||
1601 !test_bit_inv(dom, ap_perms.aqm))
1602 continue;
1603 for (card = 0; card <= ap_max_adapter_id; card++) {
1604 if (!ap_test_config_card_id(card) ||
1605 !test_bit_inv(card, ap_perms.apm))
1606 continue;
1607 status = ap_test_queue(AP_MKQID(card, dom),
1608 ap_apft_available(),
1609 NULL);
1610 if (status.response_code == AP_RESPONSE_NORMAL)
1611 break;
1612 }
1613 if (card <= ap_max_adapter_id)
1614 break;
1615 }
1616 if (dom <= ap_max_domain_id) {
1617 ap_domain_index = dom;
1618 AP_DBF_INFO("%s new default domain is %d\n",
1619 __func__, ap_domain_index);
1620 }
1621 out:
1622 spin_unlock_bh(&ap_domain_lock);
1623 }
1624
1625
1626
1627
1628
1629
1630 static int ap_get_compatible_type(ap_qid_t qid, int rawtype, unsigned int func)
1631 {
1632 int comp_type = 0;
1633
1634
1635 if (rawtype < AP_DEVICE_TYPE_CEX2A) {
1636 AP_DBF_WARN("%s queue=%02x.%04x unsupported type %d\n",
1637 __func__, AP_QID_CARD(qid),
1638 AP_QID_QUEUE(qid), rawtype);
1639 return 0;
1640 }
1641
1642 if (rawtype <= AP_DEVICE_TYPE_CEX8)
1643 return rawtype;
1644
1645
1646
1647
1648
1649 if (ap_qact_available()) {
1650 struct ap_queue_status status;
1651 union ap_qact_ap_info apinfo = {0};
1652
1653 apinfo.mode = (func >> 26) & 0x07;
1654 apinfo.cat = AP_DEVICE_TYPE_CEX8;
1655 status = ap_qact(qid, 0, &apinfo);
1656 if (status.response_code == AP_RESPONSE_NORMAL &&
1657 apinfo.cat >= AP_DEVICE_TYPE_CEX2A &&
1658 apinfo.cat <= AP_DEVICE_TYPE_CEX8)
1659 comp_type = apinfo.cat;
1660 }
1661 if (!comp_type)
1662 AP_DBF_WARN("%s queue=%02x.%04x unable to map type %d\n",
1663 __func__, AP_QID_CARD(qid),
1664 AP_QID_QUEUE(qid), rawtype);
1665 else if (comp_type != rawtype)
1666 AP_DBF_INFO("%s queue=%02x.%04x map type %d to %d\n",
1667 __func__, AP_QID_CARD(qid), AP_QID_QUEUE(qid),
1668 rawtype, comp_type);
1669 return comp_type;
1670 }
1671
1672
1673
1674
1675
1676 static int __match_card_device_with_id(struct device *dev, const void *data)
1677 {
1678 return is_card_dev(dev) && to_ap_card(dev)->id == (int)(long)(void *)data;
1679 }
1680
1681
1682
1683
1684
1685 static int __match_queue_device_with_qid(struct device *dev, const void *data)
1686 {
1687 return is_queue_dev(dev) && to_ap_queue(dev)->qid == (int)(long)data;
1688 }
1689
1690
1691
1692
1693
1694 static int __match_queue_device_with_queue_id(struct device *dev, const void *data)
1695 {
1696 return is_queue_dev(dev) &&
1697 AP_QID_QUEUE(to_ap_queue(dev)->qid) == (int)(long)data;
1698 }
1699
1700
1701 static int __drv_notify_config_changed(struct device_driver *drv, void *data)
1702 {
1703 struct ap_driver *ap_drv = to_ap_drv(drv);
1704
1705 if (try_module_get(drv->owner)) {
1706 if (ap_drv->on_config_changed)
1707 ap_drv->on_config_changed(ap_qci_info, ap_qci_info_old);
1708 module_put(drv->owner);
1709 }
1710
1711 return 0;
1712 }
1713
1714
1715 static inline void notify_config_changed(void)
1716 {
1717 bus_for_each_drv(&ap_bus_type, NULL, NULL,
1718 __drv_notify_config_changed);
1719 }
1720
1721
1722 static int __drv_notify_scan_complete(struct device_driver *drv, void *data)
1723 {
1724 struct ap_driver *ap_drv = to_ap_drv(drv);
1725
1726 if (try_module_get(drv->owner)) {
1727 if (ap_drv->on_scan_complete)
1728 ap_drv->on_scan_complete(ap_qci_info,
1729 ap_qci_info_old);
1730 module_put(drv->owner);
1731 }
1732
1733 return 0;
1734 }
1735
1736
1737 static inline void notify_scan_complete(void)
1738 {
1739 bus_for_each_drv(&ap_bus_type, NULL, NULL,
1740 __drv_notify_scan_complete);
1741 }
1742
1743
1744
1745
1746
1747 static inline void ap_scan_rm_card_dev_and_queue_devs(struct ap_card *ac)
1748 {
1749 bus_for_each_dev(&ap_bus_type, NULL,
1750 (void *)(long)ac->id,
1751 __ap_queue_devices_with_id_unregister);
1752 device_unregister(&ac->ap_dev.device);
1753 }
1754
1755
1756
1757
1758
1759
1760 static inline void ap_scan_domains(struct ap_card *ac)
1761 {
1762 bool decfg, chkstop;
1763 ap_qid_t qid;
1764 unsigned int func;
1765 struct device *dev;
1766 struct ap_queue *aq;
1767 int rc, dom, depth, type, ml;
1768
1769
1770
1771
1772
1773
1774
1775 for (dom = 0; dom <= ap_max_domain_id; dom++) {
1776 qid = AP_MKQID(ac->id, dom);
1777 dev = bus_find_device(&ap_bus_type, NULL,
1778 (void *)(long)qid,
1779 __match_queue_device_with_qid);
1780 aq = dev ? to_ap_queue(dev) : NULL;
1781 if (!ap_test_config_usage_domain(dom)) {
1782 if (dev) {
1783 AP_DBF_INFO("%s(%d,%d) not in config anymore, rm queue dev\n",
1784 __func__, ac->id, dom);
1785 device_unregister(dev);
1786 put_device(dev);
1787 }
1788 continue;
1789 }
1790
1791 if (!ap_queue_info(qid, &type, &func, &depth,
1792 &ml, &decfg, &chkstop)) {
1793 if (aq) {
1794 AP_DBF_INFO("%s(%d,%d) queue_info() failed, rm queue dev\n",
1795 __func__, ac->id, dom);
1796 device_unregister(dev);
1797 put_device(dev);
1798 }
1799 continue;
1800 }
1801
1802 if (!aq) {
1803 aq = ap_queue_create(qid, ac->ap_dev.device_type);
1804 if (!aq) {
1805 AP_DBF_WARN("%s(%d,%d) ap_queue_create() failed\n",
1806 __func__, ac->id, dom);
1807 continue;
1808 }
1809 aq->card = ac;
1810 aq->config = !decfg;
1811 aq->chkstop = chkstop;
1812 dev = &aq->ap_dev.device;
1813 dev->bus = &ap_bus_type;
1814 dev->parent = &ac->ap_dev.device;
1815 dev_set_name(dev, "%02x.%04x", ac->id, dom);
1816
1817 rc = device_register(dev);
1818 if (rc) {
1819 AP_DBF_WARN("%s(%d,%d) device_register() failed\n",
1820 __func__, ac->id, dom);
1821 goto put_dev_and_continue;
1822 }
1823
1824 get_device(dev);
1825 if (decfg)
1826 AP_DBF_INFO("%s(%d,%d) new (decfg) queue dev created\n",
1827 __func__, ac->id, dom);
1828 else if (chkstop)
1829 AP_DBF_INFO("%s(%d,%d) new (chkstop) queue dev created\n",
1830 __func__, ac->id, dom);
1831 else
1832 AP_DBF_INFO("%s(%d,%d) new queue dev created\n",
1833 __func__, ac->id, dom);
1834 goto put_dev_and_continue;
1835 }
1836
1837 spin_lock_bh(&aq->lock);
1838
1839 if (chkstop && !aq->chkstop) {
1840
1841 aq->chkstop = true;
1842 if (aq->dev_state > AP_DEV_STATE_UNINITIATED) {
1843 aq->dev_state = AP_DEV_STATE_ERROR;
1844 aq->last_err_rc = AP_RESPONSE_CHECKSTOPPED;
1845 }
1846 spin_unlock_bh(&aq->lock);
1847 AP_DBF_DBG("%s(%d,%d) queue dev checkstop on\n",
1848 __func__, ac->id, dom);
1849
1850 ap_flush_queue(aq);
1851 goto put_dev_and_continue;
1852 } else if (!chkstop && aq->chkstop) {
1853
1854 aq->chkstop = false;
1855 if (aq->dev_state > AP_DEV_STATE_UNINITIATED) {
1856 aq->dev_state = AP_DEV_STATE_OPERATING;
1857 aq->sm_state = AP_SM_STATE_RESET_START;
1858 }
1859 spin_unlock_bh(&aq->lock);
1860 AP_DBF_DBG("%s(%d,%d) queue dev checkstop off\n",
1861 __func__, ac->id, dom);
1862 goto put_dev_and_continue;
1863 }
1864
1865 if (decfg && aq->config) {
1866
1867 aq->config = false;
1868 if (aq->dev_state > AP_DEV_STATE_UNINITIATED) {
1869 aq->dev_state = AP_DEV_STATE_ERROR;
1870 aq->last_err_rc = AP_RESPONSE_DECONFIGURED;
1871 }
1872 spin_unlock_bh(&aq->lock);
1873 AP_DBF_DBG("%s(%d,%d) queue dev config off\n",
1874 __func__, ac->id, dom);
1875 ap_send_config_uevent(&aq->ap_dev, aq->config);
1876
1877 ap_flush_queue(aq);
1878 goto put_dev_and_continue;
1879 } else if (!decfg && !aq->config) {
1880
1881 aq->config = true;
1882 if (aq->dev_state > AP_DEV_STATE_UNINITIATED) {
1883 aq->dev_state = AP_DEV_STATE_OPERATING;
1884 aq->sm_state = AP_SM_STATE_RESET_START;
1885 }
1886 spin_unlock_bh(&aq->lock);
1887 AP_DBF_DBG("%s(%d,%d) queue dev config on\n",
1888 __func__, ac->id, dom);
1889 ap_send_config_uevent(&aq->ap_dev, aq->config);
1890 goto put_dev_and_continue;
1891 }
1892
1893 if (!decfg && aq->dev_state == AP_DEV_STATE_ERROR) {
1894 spin_unlock_bh(&aq->lock);
1895
1896 ap_flush_queue(aq);
1897
1898 ap_queue_init_state(aq);
1899 AP_DBF_INFO("%s(%d,%d) queue dev reinit enforced\n",
1900 __func__, ac->id, dom);
1901 goto put_dev_and_continue;
1902 }
1903 spin_unlock_bh(&aq->lock);
1904 put_dev_and_continue:
1905 put_device(dev);
1906 }
1907 }
1908
1909
1910
1911
1912
1913 static inline void ap_scan_adapter(int ap)
1914 {
1915 bool decfg, chkstop;
1916 ap_qid_t qid;
1917 unsigned int func;
1918 struct device *dev;
1919 struct ap_card *ac;
1920 int rc, dom, depth, type, comp_type, ml;
1921
1922
1923 dev = bus_find_device(&ap_bus_type, NULL,
1924 (void *)(long)ap,
1925 __match_card_device_with_id);
1926 ac = dev ? to_ap_card(dev) : NULL;
1927
1928
1929 if (!ap_test_config_card_id(ap)) {
1930 if (ac) {
1931 AP_DBF_INFO("%s(%d) ap not in config any more, rm card and queue devs\n",
1932 __func__, ap);
1933 ap_scan_rm_card_dev_and_queue_devs(ac);
1934 put_device(dev);
1935 }
1936 return;
1937 }
1938
1939
1940
1941
1942
1943
1944
1945
1946 for (dom = 0; dom <= ap_max_domain_id; dom++)
1947 if (ap_test_config_usage_domain(dom)) {
1948 qid = AP_MKQID(ap, dom);
1949 if (ap_queue_info(qid, &type, &func, &depth,
1950 &ml, &decfg, &chkstop))
1951 break;
1952 }
1953 if (dom > ap_max_domain_id) {
1954
1955 if (ac) {
1956 AP_DBF_INFO("%s(%d) no type info (no APQN found), rm card and queue devs\n",
1957 __func__, ap);
1958 ap_scan_rm_card_dev_and_queue_devs(ac);
1959 put_device(dev);
1960 } else {
1961 AP_DBF_DBG("%s(%d) no type info (no APQN found), ignored\n",
1962 __func__, ap);
1963 }
1964 return;
1965 }
1966 if (!type) {
1967
1968 if (ac) {
1969 AP_DBF_INFO("%s(%d) no valid type (0) info, rm card and queue devs\n",
1970 __func__, ap);
1971 ap_scan_rm_card_dev_and_queue_devs(ac);
1972 put_device(dev);
1973 } else {
1974 AP_DBF_DBG("%s(%d) no valid type (0) info, ignored\n",
1975 __func__, ap);
1976 }
1977 return;
1978 }
1979
1980 if (ac) {
1981
1982 if (ac->raw_hwtype != type) {
1983 AP_DBF_INFO("%s(%d) hwtype %d changed, rm card and queue devs\n",
1984 __func__, ap, type);
1985 ap_scan_rm_card_dev_and_queue_devs(ac);
1986 put_device(dev);
1987 ac = NULL;
1988 } else if (ac->functions != func) {
1989 AP_DBF_INFO("%s(%d) functions 0x%08x changed, rm card and queue devs\n",
1990 __func__, ap, type);
1991 ap_scan_rm_card_dev_and_queue_devs(ac);
1992 put_device(dev);
1993 ac = NULL;
1994 } else {
1995
1996 if (chkstop && !ac->chkstop) {
1997
1998 ac->chkstop = true;
1999 AP_DBF_INFO("%s(%d) card dev checkstop on\n",
2000 __func__, ap);
2001 } else if (!chkstop && ac->chkstop) {
2002
2003 ac->chkstop = false;
2004 AP_DBF_INFO("%s(%d) card dev checkstop off\n",
2005 __func__, ap);
2006 }
2007
2008 if (decfg && ac->config) {
2009 ac->config = false;
2010 AP_DBF_INFO("%s(%d) card dev config off\n",
2011 __func__, ap);
2012 ap_send_config_uevent(&ac->ap_dev, ac->config);
2013 } else if (!decfg && !ac->config) {
2014 ac->config = true;
2015 AP_DBF_INFO("%s(%d) card dev config on\n",
2016 __func__, ap);
2017 ap_send_config_uevent(&ac->ap_dev, ac->config);
2018 }
2019 }
2020 }
2021
2022 if (!ac) {
2023
2024 comp_type = ap_get_compatible_type(qid, type, func);
2025 if (!comp_type) {
2026 AP_DBF_WARN("%s(%d) type %d, can't get compatibility type\n",
2027 __func__, ap, type);
2028 return;
2029 }
2030 ac = ap_card_create(ap, depth, type, comp_type, func, ml);
2031 if (!ac) {
2032 AP_DBF_WARN("%s(%d) ap_card_create() failed\n",
2033 __func__, ap);
2034 return;
2035 }
2036 ac->config = !decfg;
2037 ac->chkstop = chkstop;
2038 dev = &ac->ap_dev.device;
2039 dev->bus = &ap_bus_type;
2040 dev->parent = ap_root_device;
2041 dev_set_name(dev, "card%02x", ap);
2042
2043 if (ac->maxmsgsize > atomic_read(&ap_max_msg_size)) {
2044 atomic_set(&ap_max_msg_size, ac->maxmsgsize);
2045 AP_DBF_INFO("%s(%d) ap_max_msg_size update to %d byte\n",
2046 __func__, ap,
2047 atomic_read(&ap_max_msg_size));
2048 }
2049
2050 rc = device_register(dev);
2051 if (rc) {
2052 AP_DBF_WARN("%s(%d) device_register() failed\n",
2053 __func__, ap);
2054 put_device(dev);
2055 return;
2056 }
2057
2058 get_device(dev);
2059 if (decfg)
2060 AP_DBF_INFO("%s(%d) new (decfg) card dev type=%d func=0x%08x created\n",
2061 __func__, ap, type, func);
2062 else if (chkstop)
2063 AP_DBF_INFO("%s(%d) new (chkstop) card dev type=%d func=0x%08x created\n",
2064 __func__, ap, type, func);
2065 else
2066 AP_DBF_INFO("%s(%d) new card dev type=%d func=0x%08x created\n",
2067 __func__, ap, type, func);
2068 }
2069
2070
2071 ap_scan_domains(ac);
2072
2073
2074 put_device(&ac->ap_dev.device);
2075 }
2076
2077
2078
2079
2080
2081
2082
2083
2084
2085
2086
2087 static bool ap_get_configuration(void)
2088 {
2089 if (!ap_qci_info)
2090 return false;
2091
2092 memcpy(ap_qci_info_old, ap_qci_info, sizeof(*ap_qci_info));
2093 ap_fetch_qci_info(ap_qci_info);
2094
2095 return memcmp(ap_qci_info, ap_qci_info_old,
2096 sizeof(struct ap_config_info)) != 0;
2097 }
2098
2099
2100
2101
2102
2103
2104 static void ap_scan_bus(struct work_struct *unused)
2105 {
2106 int ap, config_changed = 0;
2107
2108
2109 config_changed = ap_get_configuration();
2110 if (config_changed)
2111 notify_config_changed();
2112 ap_select_domain();
2113
2114 AP_DBF_DBG("%s running\n", __func__);
2115
2116
2117 for (ap = 0; ap <= ap_max_adapter_id; ap++)
2118 ap_scan_adapter(ap);
2119
2120
2121 if (config_changed)
2122 notify_scan_complete();
2123
2124
2125 if (ap_domain_index >= 0) {
2126 struct device *dev =
2127 bus_find_device(&ap_bus_type, NULL,
2128 (void *)(long)ap_domain_index,
2129 __match_queue_device_with_queue_id);
2130 if (dev)
2131 put_device(dev);
2132 else
2133 AP_DBF_INFO("%s no queue device with default domain %d available\n",
2134 __func__, ap_domain_index);
2135 }
2136
2137 if (atomic64_inc_return(&ap_scan_bus_count) == 1) {
2138 AP_DBF_DBG("%s init scan complete\n", __func__);
2139 ap_send_init_scan_done_uevent();
2140 ap_check_bindings_complete();
2141 }
2142
2143 mod_timer(&ap_config_timer, jiffies + ap_config_time * HZ);
2144 }
2145
2146 static void ap_config_timeout(struct timer_list *unused)
2147 {
2148 queue_work(system_long_wq, &ap_scan_work);
2149 }
2150
2151 static int __init ap_debug_init(void)
2152 {
2153 ap_dbf_info = debug_register("ap", 2, 1,
2154 DBF_MAX_SPRINTF_ARGS * sizeof(long));
2155 debug_register_view(ap_dbf_info, &debug_sprintf_view);
2156 debug_set_level(ap_dbf_info, DBF_ERR);
2157
2158 return 0;
2159 }
2160
2161 static void __init ap_perms_init(void)
2162 {
2163
2164 memset(&ap_perms.ioctlm, 0xFF, sizeof(ap_perms.ioctlm));
2165 memset(&ap_perms.apm, 0xFF, sizeof(ap_perms.apm));
2166 memset(&ap_perms.aqm, 0xFF, sizeof(ap_perms.aqm));
2167
2168
2169 if (apm_str) {
2170 memset(&ap_perms.apm, 0, sizeof(ap_perms.apm));
2171 ap_parse_mask_str(apm_str, ap_perms.apm, AP_DEVICES,
2172 &ap_perms_mutex);
2173 }
2174
2175
2176 if (aqm_str) {
2177 memset(&ap_perms.aqm, 0, sizeof(ap_perms.aqm));
2178 ap_parse_mask_str(aqm_str, ap_perms.aqm, AP_DOMAINS,
2179 &ap_perms_mutex);
2180 }
2181 }
2182
2183
2184
2185
2186
2187
2188 static int __init ap_module_init(void)
2189 {
2190 int rc;
2191
2192 rc = ap_debug_init();
2193 if (rc)
2194 return rc;
2195
2196 if (!ap_instructions_available()) {
2197 pr_warn("The hardware system does not support AP instructions\n");
2198 return -ENODEV;
2199 }
2200
2201
2202 hash_init(ap_queues);
2203
2204
2205 ap_perms_init();
2206
2207
2208 ap_init_qci_info();
2209
2210
2211 if (ap_domain_index < -1 || ap_domain_index > ap_max_domain_id ||
2212 (ap_domain_index >= 0 &&
2213 !test_bit_inv(ap_domain_index, ap_perms.aqm))) {
2214 pr_warn("%d is not a valid cryptographic domain\n",
2215 ap_domain_index);
2216 ap_domain_index = -1;
2217 }
2218
2219
2220 if (ap_interrupts_available() && ap_useirq) {
2221 rc = register_adapter_interrupt(&ap_airq);
2222 ap_irq_flag = (rc == 0);
2223 }
2224
2225
2226 rc = bus_register(&ap_bus_type);
2227 if (rc)
2228 goto out;
2229
2230
2231 ap_root_device = root_device_register("ap");
2232 rc = PTR_ERR_OR_ZERO(ap_root_device);
2233 if (rc)
2234 goto out_bus;
2235 ap_root_device->bus = &ap_bus_type;
2236
2237
2238 timer_setup(&ap_config_timer, ap_config_timeout, 0);
2239
2240
2241
2242
2243
2244 if (MACHINE_IS_VM)
2245 poll_timeout = 1500000;
2246 hrtimer_init(&ap_poll_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
2247 ap_poll_timer.function = ap_poll_timeout;
2248
2249
2250 if (ap_thread_flag) {
2251 rc = ap_poll_thread_start();
2252 if (rc)
2253 goto out_work;
2254 }
2255
2256 queue_work(system_long_wq, &ap_scan_work);
2257
2258 return 0;
2259
2260 out_work:
2261 hrtimer_cancel(&ap_poll_timer);
2262 root_device_unregister(ap_root_device);
2263 out_bus:
2264 bus_unregister(&ap_bus_type);
2265 out:
2266 if (ap_irq_flag)
2267 unregister_adapter_interrupt(&ap_airq);
2268 kfree(ap_qci_info);
2269 return rc;
2270 }
2271 device_initcall(ap_module_init);