0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011 #include <linux/string.h>
0012 #include <linux/vfio.h>
0013 #include <linux/device.h>
0014 #include <linux/list.h>
0015 #include <linux/ctype.h>
0016 #include <linux/bitops.h>
0017 #include <linux/kvm_host.h>
0018 #include <linux/module.h>
0019 #include <linux/uuid.h>
0020 #include <asm/kvm.h>
0021 #include <asm/zcrypt.h>
0022
0023 #include "vfio_ap_private.h"
0024 #include "vfio_ap_debug.h"
0025
0026 #define VFIO_AP_MDEV_TYPE_HWVIRT "passthrough"
0027 #define VFIO_AP_MDEV_NAME_HWVIRT "VFIO AP Passthrough Device"
0028
0029 #define AP_QUEUE_ASSIGNED "assigned"
0030 #define AP_QUEUE_UNASSIGNED "unassigned"
0031 #define AP_QUEUE_IN_USE "in use"
0032
0033 static int vfio_ap_mdev_reset_queues(struct ap_queue_table *qtable);
0034 static struct vfio_ap_queue *vfio_ap_find_queue(int apqn);
0035 static const struct vfio_device_ops vfio_ap_matrix_dev_ops;
0036 static int vfio_ap_mdev_reset_queue(struct vfio_ap_queue *q, unsigned int retry);
0037
0038
0039
0040
0041
0042
0043
0044
0045
0046
0047
0048
0049
0050
0051
0052 static inline void get_update_locks_for_kvm(struct kvm *kvm)
0053 {
0054 mutex_lock(&matrix_dev->guests_lock);
0055 if (kvm)
0056 mutex_lock(&kvm->lock);
0057 mutex_lock(&matrix_dev->mdevs_lock);
0058 }
0059
0060
0061
0062
0063
0064
0065
0066
0067
0068
0069
0070
0071
0072
0073 static inline void release_update_locks_for_kvm(struct kvm *kvm)
0074 {
0075 mutex_unlock(&matrix_dev->mdevs_lock);
0076 if (kvm)
0077 mutex_unlock(&kvm->lock);
0078 mutex_unlock(&matrix_dev->guests_lock);
0079 }
0080
0081
0082
0083
0084
0085
0086
0087
0088
0089
0090
0091
0092
0093
0094
0095
0096
0097 static inline void get_update_locks_for_mdev(struct ap_matrix_mdev *matrix_mdev)
0098 {
0099 mutex_lock(&matrix_dev->guests_lock);
0100 if (matrix_mdev && matrix_mdev->kvm)
0101 mutex_lock(&matrix_mdev->kvm->lock);
0102 mutex_lock(&matrix_dev->mdevs_lock);
0103 }
0104
0105
0106
0107
0108
0109
0110
0111
0112
0113
0114
0115
0116
0117
0118
0119
0120 static inline void release_update_locks_for_mdev(struct ap_matrix_mdev *matrix_mdev)
0121 {
0122 mutex_unlock(&matrix_dev->mdevs_lock);
0123 if (matrix_mdev && matrix_mdev->kvm)
0124 mutex_unlock(&matrix_mdev->kvm->lock);
0125 mutex_unlock(&matrix_dev->guests_lock);
0126 }
0127
0128
0129
0130
0131
0132
0133
0134
0135
0136
0137
0138
0139
0140
0141
0142
0143
0144
0145
0146
0147 static struct ap_matrix_mdev *get_update_locks_by_apqn(int apqn)
0148 {
0149 struct ap_matrix_mdev *matrix_mdev;
0150
0151 mutex_lock(&matrix_dev->guests_lock);
0152
0153 list_for_each_entry(matrix_mdev, &matrix_dev->mdev_list, node) {
0154 if (test_bit_inv(AP_QID_CARD(apqn), matrix_mdev->matrix.apm) &&
0155 test_bit_inv(AP_QID_QUEUE(apqn), matrix_mdev->matrix.aqm)) {
0156 if (matrix_mdev->kvm)
0157 mutex_lock(&matrix_mdev->kvm->lock);
0158
0159 mutex_lock(&matrix_dev->mdevs_lock);
0160
0161 return matrix_mdev;
0162 }
0163 }
0164
0165 mutex_lock(&matrix_dev->mdevs_lock);
0166
0167 return NULL;
0168 }
0169
0170
0171
0172
0173
0174
0175
0176
0177
0178
0179
0180
0181
0182
0183
0184
0185
0186 static inline void get_update_locks_for_queue(struct vfio_ap_queue *q)
0187 {
0188 mutex_lock(&matrix_dev->guests_lock);
0189 if (q->matrix_mdev && q->matrix_mdev->kvm)
0190 mutex_lock(&q->matrix_mdev->kvm->lock);
0191 mutex_lock(&matrix_dev->mdevs_lock);
0192 }
0193
0194
0195
0196
0197
0198
0199
0200
0201
0202
0203 static struct vfio_ap_queue *vfio_ap_mdev_get_queue(
0204 struct ap_matrix_mdev *matrix_mdev,
0205 int apqn)
0206 {
0207 struct vfio_ap_queue *q;
0208
0209 hash_for_each_possible(matrix_mdev->qtable.queues, q, mdev_qnode,
0210 apqn) {
0211 if (q && q->apqn == apqn)
0212 return q;
0213 }
0214
0215 return NULL;
0216 }
0217
0218
0219
0220
0221
0222
0223
0224
0225
0226
0227
0228 static void vfio_ap_wait_for_irqclear(int apqn)
0229 {
0230 struct ap_queue_status status;
0231 int retry = 5;
0232
0233 do {
0234 status = ap_tapq(apqn, NULL);
0235 switch (status.response_code) {
0236 case AP_RESPONSE_NORMAL:
0237 case AP_RESPONSE_RESET_IN_PROGRESS:
0238 if (!status.irq_enabled)
0239 return;
0240 fallthrough;
0241 case AP_RESPONSE_BUSY:
0242 msleep(20);
0243 break;
0244 case AP_RESPONSE_Q_NOT_AVAIL:
0245 case AP_RESPONSE_DECONFIGURED:
0246 case AP_RESPONSE_CHECKSTOPPED:
0247 default:
0248 WARN_ONCE(1, "%s: tapq rc %02x: %04x\n", __func__,
0249 status.response_code, apqn);
0250 return;
0251 }
0252 } while (--retry);
0253
0254 WARN_ONCE(1, "%s: tapq rc %02x: %04x could not clear IR bit\n",
0255 __func__, status.response_code, apqn);
0256 }
0257
0258
0259
0260
0261
0262
0263
0264
0265
0266 static void vfio_ap_free_aqic_resources(struct vfio_ap_queue *q)
0267 {
0268 if (!q)
0269 return;
0270 if (q->saved_isc != VFIO_AP_ISC_INVALID &&
0271 !WARN_ON(!(q->matrix_mdev && q->matrix_mdev->kvm))) {
0272 kvm_s390_gisc_unregister(q->matrix_mdev->kvm, q->saved_isc);
0273 q->saved_isc = VFIO_AP_ISC_INVALID;
0274 }
0275 if (q->saved_iova && !WARN_ON(!q->matrix_mdev)) {
0276 vfio_unpin_pages(&q->matrix_mdev->vdev, q->saved_iova, 1);
0277 q->saved_iova = 0;
0278 }
0279 }
0280
0281
0282
0283
0284
0285
0286
0287
0288
0289
0290
0291
0292
0293
0294
0295
0296
0297
0298
0299 static struct ap_queue_status vfio_ap_irq_disable(struct vfio_ap_queue *q)
0300 {
0301 struct ap_qirq_ctrl aqic_gisa = {};
0302 struct ap_queue_status status;
0303 int retries = 5;
0304
0305 do {
0306 status = ap_aqic(q->apqn, aqic_gisa, 0);
0307 switch (status.response_code) {
0308 case AP_RESPONSE_OTHERWISE_CHANGED:
0309 case AP_RESPONSE_NORMAL:
0310 vfio_ap_wait_for_irqclear(q->apqn);
0311 goto end_free;
0312 case AP_RESPONSE_RESET_IN_PROGRESS:
0313 case AP_RESPONSE_BUSY:
0314 msleep(20);
0315 break;
0316 case AP_RESPONSE_Q_NOT_AVAIL:
0317 case AP_RESPONSE_DECONFIGURED:
0318 case AP_RESPONSE_CHECKSTOPPED:
0319 case AP_RESPONSE_INVALID_ADDRESS:
0320 default:
0321
0322 WARN_ONCE(1, "%s: ap_aqic status %d\n", __func__,
0323 status.response_code);
0324 goto end_free;
0325 }
0326 } while (retries--);
0327
0328 WARN_ONCE(1, "%s: ap_aqic status %d\n", __func__,
0329 status.response_code);
0330 end_free:
0331 vfio_ap_free_aqic_resources(q);
0332 return status;
0333 }
0334
0335
0336
0337
0338
0339
0340
0341
0342
0343
0344
0345
0346
0347
0348 static int vfio_ap_validate_nib(struct kvm_vcpu *vcpu, dma_addr_t *nib)
0349 {
0350 *nib = vcpu->run->s.regs.gprs[2];
0351
0352 if (kvm_is_error_hva(gfn_to_hva(vcpu->kvm, *nib >> PAGE_SHIFT)))
0353 return -EINVAL;
0354
0355 return 0;
0356 }
0357
0358
0359
0360
0361
0362
0363
0364
0365
0366
0367
0368
0369
0370
0371
0372
0373
0374
0375
0376
0377
0378 static struct ap_queue_status vfio_ap_irq_enable(struct vfio_ap_queue *q,
0379 int isc,
0380 struct kvm_vcpu *vcpu)
0381 {
0382 struct ap_qirq_ctrl aqic_gisa = {};
0383 struct ap_queue_status status = {};
0384 struct kvm_s390_gisa *gisa;
0385 struct page *h_page;
0386 int nisc;
0387 struct kvm *kvm;
0388 phys_addr_t h_nib;
0389 dma_addr_t nib;
0390 int ret;
0391
0392
0393 if (vfio_ap_validate_nib(vcpu, &nib)) {
0394 VFIO_AP_DBF_WARN("%s: invalid NIB address: nib=%pad, apqn=%#04x\n",
0395 __func__, &nib, q->apqn);
0396
0397 status.response_code = AP_RESPONSE_INVALID_ADDRESS;
0398 return status;
0399 }
0400
0401 ret = vfio_pin_pages(&q->matrix_mdev->vdev, nib, 1,
0402 IOMMU_READ | IOMMU_WRITE, &h_page);
0403 switch (ret) {
0404 case 1:
0405 break;
0406 default:
0407 VFIO_AP_DBF_WARN("%s: vfio_pin_pages failed: rc=%d,"
0408 "nib=%pad, apqn=%#04x\n",
0409 __func__, ret, &nib, q->apqn);
0410
0411 status.response_code = AP_RESPONSE_INVALID_ADDRESS;
0412 return status;
0413 }
0414
0415 kvm = q->matrix_mdev->kvm;
0416 gisa = kvm->arch.gisa_int.origin;
0417
0418 h_nib = page_to_phys(h_page) | (nib & ~PAGE_MASK);
0419 aqic_gisa.gisc = isc;
0420
0421 nisc = kvm_s390_gisc_register(kvm, isc);
0422 if (nisc < 0) {
0423 VFIO_AP_DBF_WARN("%s: gisc registration failed: nisc=%d, isc=%d, apqn=%#04x\n",
0424 __func__, nisc, isc, q->apqn);
0425
0426 status.response_code = AP_RESPONSE_INVALID_GISA;
0427 return status;
0428 }
0429
0430 aqic_gisa.isc = nisc;
0431 aqic_gisa.ir = 1;
0432 aqic_gisa.gisa = (uint64_t)gisa >> 4;
0433
0434 status = ap_aqic(q->apqn, aqic_gisa, h_nib);
0435 switch (status.response_code) {
0436 case AP_RESPONSE_NORMAL:
0437
0438 vfio_ap_free_aqic_resources(q);
0439 q->saved_iova = nib;
0440 q->saved_isc = isc;
0441 break;
0442 case AP_RESPONSE_OTHERWISE_CHANGED:
0443
0444 vfio_unpin_pages(&q->matrix_mdev->vdev, nib, 1);
0445 kvm_s390_gisc_unregister(kvm, isc);
0446 break;
0447 default:
0448 pr_warn("%s: apqn %04x: response: %02x\n", __func__, q->apqn,
0449 status.response_code);
0450 vfio_ap_irq_disable(q);
0451 break;
0452 }
0453
0454 if (status.response_code != AP_RESPONSE_NORMAL) {
0455 VFIO_AP_DBF_WARN("%s: PQAP(AQIC) failed with status=%#02x: "
0456 "zone=%#x, ir=%#x, gisc=%#x, f=%#x,"
0457 "gisa=%#x, isc=%#x, apqn=%#04x\n",
0458 __func__, status.response_code,
0459 aqic_gisa.zone, aqic_gisa.ir, aqic_gisa.gisc,
0460 aqic_gisa.gf, aqic_gisa.gisa, aqic_gisa.isc,
0461 q->apqn);
0462 }
0463
0464 return status;
0465 }
0466
0467
0468
0469
0470
0471
0472
0473
0474
0475
0476
0477
0478
0479
0480
0481
0482
0483
0484
0485
0486
0487
0488
0489
0490
0491
0492
0493
0494 static void vfio_ap_le_guid_to_be_uuid(guid_t *guid, unsigned long *uuid)
0495 {
0496
0497
0498
0499
0500
0501 uuid[0] = le32_to_cpup((__le32 *)guid);
0502 uuid[1] = le16_to_cpup((__le16 *)&guid->b[4]);
0503 uuid[2] = le16_to_cpup((__le16 *)&guid->b[6]);
0504 uuid[3] = *((__u16 *)&guid->b[8]);
0505 uuid[4] = *((__u16 *)&guid->b[10]);
0506 uuid[5] = *((__u32 *)&guid->b[12]);
0507 }
0508
0509
0510
0511
0512
0513
0514
0515
0516
0517
0518
0519
0520
0521
0522
0523
0524
0525
0526
0527
0528
0529
0530 static int handle_pqap(struct kvm_vcpu *vcpu)
0531 {
0532 uint64_t status;
0533 uint16_t apqn;
0534 unsigned long uuid[6];
0535 struct vfio_ap_queue *q;
0536 struct ap_queue_status qstatus = {
0537 .response_code = AP_RESPONSE_Q_NOT_AVAIL, };
0538 struct ap_matrix_mdev *matrix_mdev;
0539
0540 apqn = vcpu->run->s.regs.gprs[0] & 0xffff;
0541
0542
0543 if (!(vcpu->arch.sie_block->eca & ECA_AIV)) {
0544 VFIO_AP_DBF_WARN("%s: AIV facility not installed: apqn=0x%04x, eca=0x%04x\n",
0545 __func__, apqn, vcpu->arch.sie_block->eca);
0546
0547 return -EOPNOTSUPP;
0548 }
0549
0550 mutex_lock(&matrix_dev->mdevs_lock);
0551
0552 if (!vcpu->kvm->arch.crypto.pqap_hook) {
0553 VFIO_AP_DBF_WARN("%s: PQAP(AQIC) hook not registered with the vfio_ap driver: apqn=0x%04x\n",
0554 __func__, apqn);
0555
0556 goto out_unlock;
0557 }
0558
0559 matrix_mdev = container_of(vcpu->kvm->arch.crypto.pqap_hook,
0560 struct ap_matrix_mdev, pqap_hook);
0561
0562
0563 if (!matrix_mdev->kvm) {
0564 vfio_ap_le_guid_to_be_uuid(&matrix_mdev->mdev->uuid, uuid);
0565 VFIO_AP_DBF_WARN("%s: mdev %08lx-%04lx-%04lx-%04lx-%04lx%08lx not in use: apqn=0x%04x\n",
0566 __func__, uuid[0], uuid[1], uuid[2],
0567 uuid[3], uuid[4], uuid[5], apqn);
0568 goto out_unlock;
0569 }
0570
0571 q = vfio_ap_mdev_get_queue(matrix_mdev, apqn);
0572 if (!q) {
0573 VFIO_AP_DBF_WARN("%s: Queue %02x.%04x not bound to the vfio_ap driver\n",
0574 __func__, AP_QID_CARD(apqn),
0575 AP_QID_QUEUE(apqn));
0576 goto out_unlock;
0577 }
0578
0579 status = vcpu->run->s.regs.gprs[1];
0580
0581
0582 if ((status >> (63 - 16)) & 0x01)
0583 qstatus = vfio_ap_irq_enable(q, status & 0x07, vcpu);
0584 else
0585 qstatus = vfio_ap_irq_disable(q);
0586
0587 out_unlock:
0588 memcpy(&vcpu->run->s.regs.gprs[1], &qstatus, sizeof(qstatus));
0589 vcpu->run->s.regs.gprs[1] >>= 32;
0590 mutex_unlock(&matrix_dev->mdevs_lock);
0591 return 0;
0592 }
0593
0594 static void vfio_ap_matrix_init(struct ap_config_info *info,
0595 struct ap_matrix *matrix)
0596 {
0597 matrix->apm_max = info->apxa ? info->Na : 63;
0598 matrix->aqm_max = info->apxa ? info->Nd : 15;
0599 matrix->adm_max = info->apxa ? info->Nd : 15;
0600 }
0601
0602 static void vfio_ap_mdev_update_guest_apcb(struct ap_matrix_mdev *matrix_mdev)
0603 {
0604 if (matrix_mdev->kvm)
0605 kvm_arch_crypto_set_masks(matrix_mdev->kvm,
0606 matrix_mdev->shadow_apcb.apm,
0607 matrix_mdev->shadow_apcb.aqm,
0608 matrix_mdev->shadow_apcb.adm);
0609 }
0610
0611 static bool vfio_ap_mdev_filter_cdoms(struct ap_matrix_mdev *matrix_mdev)
0612 {
0613 DECLARE_BITMAP(prev_shadow_adm, AP_DOMAINS);
0614
0615 bitmap_copy(prev_shadow_adm, matrix_mdev->shadow_apcb.adm, AP_DOMAINS);
0616 bitmap_and(matrix_mdev->shadow_apcb.adm, matrix_mdev->matrix.adm,
0617 (unsigned long *)matrix_dev->info.adm, AP_DOMAINS);
0618
0619 return !bitmap_equal(prev_shadow_adm, matrix_mdev->shadow_apcb.adm,
0620 AP_DOMAINS);
0621 }
0622
0623
0624
0625
0626
0627
0628
0629
0630
0631
0632
0633
0634
0635
0636
0637
0638
0639 static bool vfio_ap_mdev_filter_matrix(unsigned long *apm, unsigned long *aqm,
0640 struct ap_matrix_mdev *matrix_mdev)
0641 {
0642 unsigned long apid, apqi, apqn;
0643 DECLARE_BITMAP(prev_shadow_apm, AP_DEVICES);
0644 DECLARE_BITMAP(prev_shadow_aqm, AP_DOMAINS);
0645 struct vfio_ap_queue *q;
0646
0647 bitmap_copy(prev_shadow_apm, matrix_mdev->shadow_apcb.apm, AP_DEVICES);
0648 bitmap_copy(prev_shadow_aqm, matrix_mdev->shadow_apcb.aqm, AP_DOMAINS);
0649 vfio_ap_matrix_init(&matrix_dev->info, &matrix_mdev->shadow_apcb);
0650
0651
0652
0653
0654
0655
0656 bitmap_and(matrix_mdev->shadow_apcb.apm, matrix_mdev->matrix.apm,
0657 (unsigned long *)matrix_dev->info.apm, AP_DEVICES);
0658 bitmap_and(matrix_mdev->shadow_apcb.aqm, matrix_mdev->matrix.aqm,
0659 (unsigned long *)matrix_dev->info.aqm, AP_DOMAINS);
0660
0661 for_each_set_bit_inv(apid, apm, AP_DEVICES) {
0662 for_each_set_bit_inv(apqi, aqm, AP_DOMAINS) {
0663
0664
0665
0666
0667
0668
0669
0670
0671 apqn = AP_MKQID(apid, apqi);
0672 q = vfio_ap_mdev_get_queue(matrix_mdev, apqn);
0673 if (!q || q->reset_rc) {
0674 clear_bit_inv(apid,
0675 matrix_mdev->shadow_apcb.apm);
0676 break;
0677 }
0678 }
0679 }
0680
0681 return !bitmap_equal(prev_shadow_apm, matrix_mdev->shadow_apcb.apm,
0682 AP_DEVICES) ||
0683 !bitmap_equal(prev_shadow_aqm, matrix_mdev->shadow_apcb.aqm,
0684 AP_DOMAINS);
0685 }
0686
0687 static int vfio_ap_mdev_probe(struct mdev_device *mdev)
0688 {
0689 struct ap_matrix_mdev *matrix_mdev;
0690 int ret;
0691
0692 if ((atomic_dec_if_positive(&matrix_dev->available_instances) < 0))
0693 return -EPERM;
0694
0695 matrix_mdev = kzalloc(sizeof(*matrix_mdev), GFP_KERNEL);
0696 if (!matrix_mdev) {
0697 ret = -ENOMEM;
0698 goto err_dec_available;
0699 }
0700 vfio_init_group_dev(&matrix_mdev->vdev, &mdev->dev,
0701 &vfio_ap_matrix_dev_ops);
0702
0703 matrix_mdev->mdev = mdev;
0704 vfio_ap_matrix_init(&matrix_dev->info, &matrix_mdev->matrix);
0705 matrix_mdev->pqap_hook = handle_pqap;
0706 vfio_ap_matrix_init(&matrix_dev->info, &matrix_mdev->shadow_apcb);
0707 hash_init(matrix_mdev->qtable.queues);
0708
0709 ret = vfio_register_emulated_iommu_dev(&matrix_mdev->vdev);
0710 if (ret)
0711 goto err_list;
0712 dev_set_drvdata(&mdev->dev, matrix_mdev);
0713 mutex_lock(&matrix_dev->mdevs_lock);
0714 list_add(&matrix_mdev->node, &matrix_dev->mdev_list);
0715 mutex_unlock(&matrix_dev->mdevs_lock);
0716 return 0;
0717
0718 err_list:
0719 vfio_uninit_group_dev(&matrix_mdev->vdev);
0720 kfree(matrix_mdev);
0721 err_dec_available:
0722 atomic_inc(&matrix_dev->available_instances);
0723 return ret;
0724 }
0725
0726 static void vfio_ap_mdev_link_queue(struct ap_matrix_mdev *matrix_mdev,
0727 struct vfio_ap_queue *q)
0728 {
0729 if (q) {
0730 q->matrix_mdev = matrix_mdev;
0731 hash_add(matrix_mdev->qtable.queues, &q->mdev_qnode, q->apqn);
0732 }
0733 }
0734
0735 static void vfio_ap_mdev_link_apqn(struct ap_matrix_mdev *matrix_mdev, int apqn)
0736 {
0737 struct vfio_ap_queue *q;
0738
0739 q = vfio_ap_find_queue(apqn);
0740 vfio_ap_mdev_link_queue(matrix_mdev, q);
0741 }
0742
0743 static void vfio_ap_unlink_queue_fr_mdev(struct vfio_ap_queue *q)
0744 {
0745 hash_del(&q->mdev_qnode);
0746 }
0747
0748 static void vfio_ap_unlink_mdev_fr_queue(struct vfio_ap_queue *q)
0749 {
0750 q->matrix_mdev = NULL;
0751 }
0752
0753 static void vfio_ap_mdev_unlink_fr_queues(struct ap_matrix_mdev *matrix_mdev)
0754 {
0755 struct vfio_ap_queue *q;
0756 unsigned long apid, apqi;
0757
0758 for_each_set_bit_inv(apid, matrix_mdev->matrix.apm, AP_DEVICES) {
0759 for_each_set_bit_inv(apqi, matrix_mdev->matrix.aqm,
0760 AP_DOMAINS) {
0761 q = vfio_ap_mdev_get_queue(matrix_mdev,
0762 AP_MKQID(apid, apqi));
0763 if (q)
0764 q->matrix_mdev = NULL;
0765 }
0766 }
0767 }
0768
0769 static void vfio_ap_mdev_remove(struct mdev_device *mdev)
0770 {
0771 struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(&mdev->dev);
0772
0773 vfio_unregister_group_dev(&matrix_mdev->vdev);
0774
0775 mutex_lock(&matrix_dev->guests_lock);
0776 mutex_lock(&matrix_dev->mdevs_lock);
0777 vfio_ap_mdev_reset_queues(&matrix_mdev->qtable);
0778 vfio_ap_mdev_unlink_fr_queues(matrix_mdev);
0779 list_del(&matrix_mdev->node);
0780 mutex_unlock(&matrix_dev->mdevs_lock);
0781 mutex_unlock(&matrix_dev->guests_lock);
0782 vfio_uninit_group_dev(&matrix_mdev->vdev);
0783 kfree(matrix_mdev);
0784 atomic_inc(&matrix_dev->available_instances);
0785 }
0786
0787 static ssize_t name_show(struct mdev_type *mtype,
0788 struct mdev_type_attribute *attr, char *buf)
0789 {
0790 return sprintf(buf, "%s\n", VFIO_AP_MDEV_NAME_HWVIRT);
0791 }
0792
0793 static MDEV_TYPE_ATTR_RO(name);
0794
0795 static ssize_t available_instances_show(struct mdev_type *mtype,
0796 struct mdev_type_attribute *attr,
0797 char *buf)
0798 {
0799 return sprintf(buf, "%d\n",
0800 atomic_read(&matrix_dev->available_instances));
0801 }
0802
0803 static MDEV_TYPE_ATTR_RO(available_instances);
0804
0805 static ssize_t device_api_show(struct mdev_type *mtype,
0806 struct mdev_type_attribute *attr, char *buf)
0807 {
0808 return sprintf(buf, "%s\n", VFIO_DEVICE_API_AP_STRING);
0809 }
0810
0811 static MDEV_TYPE_ATTR_RO(device_api);
0812
0813 static struct attribute *vfio_ap_mdev_type_attrs[] = {
0814 &mdev_type_attr_name.attr,
0815 &mdev_type_attr_device_api.attr,
0816 &mdev_type_attr_available_instances.attr,
0817 NULL,
0818 };
0819
0820 static struct attribute_group vfio_ap_mdev_hwvirt_type_group = {
0821 .name = VFIO_AP_MDEV_TYPE_HWVIRT,
0822 .attrs = vfio_ap_mdev_type_attrs,
0823 };
0824
0825 static struct attribute_group *vfio_ap_mdev_type_groups[] = {
0826 &vfio_ap_mdev_hwvirt_type_group,
0827 NULL,
0828 };
0829
0830 #define MDEV_SHARING_ERR "Userspace may not re-assign queue %02lx.%04lx " \
0831 "already assigned to %s"
0832
0833 static void vfio_ap_mdev_log_sharing_err(struct ap_matrix_mdev *matrix_mdev,
0834 unsigned long *apm,
0835 unsigned long *aqm)
0836 {
0837 unsigned long apid, apqi;
0838 const struct device *dev = mdev_dev(matrix_mdev->mdev);
0839 const char *mdev_name = dev_name(dev);
0840
0841 for_each_set_bit_inv(apid, apm, AP_DEVICES)
0842 for_each_set_bit_inv(apqi, aqm, AP_DOMAINS)
0843 dev_warn(dev, MDEV_SHARING_ERR, apid, apqi, mdev_name);
0844 }
0845
0846
0847
0848
0849
0850
0851
0852
0853
0854
0855
0856
0857
0858 static int vfio_ap_mdev_verify_no_sharing(unsigned long *mdev_apm,
0859 unsigned long *mdev_aqm)
0860 {
0861 struct ap_matrix_mdev *matrix_mdev;
0862 DECLARE_BITMAP(apm, AP_DEVICES);
0863 DECLARE_BITMAP(aqm, AP_DOMAINS);
0864
0865 list_for_each_entry(matrix_mdev, &matrix_dev->mdev_list, node) {
0866
0867
0868
0869
0870 if (mdev_apm == matrix_mdev->matrix.apm &&
0871 mdev_aqm == matrix_mdev->matrix.aqm)
0872 continue;
0873
0874 memset(apm, 0, sizeof(apm));
0875 memset(aqm, 0, sizeof(aqm));
0876
0877
0878
0879
0880
0881 if (!bitmap_and(apm, mdev_apm, matrix_mdev->matrix.apm,
0882 AP_DEVICES))
0883 continue;
0884
0885 if (!bitmap_and(aqm, mdev_aqm, matrix_mdev->matrix.aqm,
0886 AP_DOMAINS))
0887 continue;
0888
0889 vfio_ap_mdev_log_sharing_err(matrix_mdev, apm, aqm);
0890
0891 return -EADDRINUSE;
0892 }
0893
0894 return 0;
0895 }
0896
0897
0898
0899
0900
0901
0902
0903
0904
0905
0906
0907
0908
0909
0910
0911
0912 static int vfio_ap_mdev_validate_masks(struct ap_matrix_mdev *matrix_mdev)
0913 {
0914 if (ap_apqn_in_matrix_owned_by_def_drv(matrix_mdev->matrix.apm,
0915 matrix_mdev->matrix.aqm))
0916 return -EADDRNOTAVAIL;
0917
0918 return vfio_ap_mdev_verify_no_sharing(matrix_mdev->matrix.apm,
0919 matrix_mdev->matrix.aqm);
0920 }
0921
0922 static void vfio_ap_mdev_link_adapter(struct ap_matrix_mdev *matrix_mdev,
0923 unsigned long apid)
0924 {
0925 unsigned long apqi;
0926
0927 for_each_set_bit_inv(apqi, matrix_mdev->matrix.aqm, AP_DOMAINS)
0928 vfio_ap_mdev_link_apqn(matrix_mdev,
0929 AP_MKQID(apid, apqi));
0930 }
0931
0932
0933
0934
0935
0936
0937
0938
0939
0940
0941
0942
0943
0944
0945
0946
0947
0948
0949
0950
0951
0952
0953
0954
0955
0956
0957
0958
0959
0960
0961
0962
0963
0964
0965
0966 static ssize_t assign_adapter_store(struct device *dev,
0967 struct device_attribute *attr,
0968 const char *buf, size_t count)
0969 {
0970 int ret;
0971 unsigned long apid;
0972 DECLARE_BITMAP(apm_delta, AP_DEVICES);
0973 struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev);
0974
0975 mutex_lock(&ap_perms_mutex);
0976 get_update_locks_for_mdev(matrix_mdev);
0977
0978 ret = kstrtoul(buf, 0, &apid);
0979 if (ret)
0980 goto done;
0981
0982 if (apid > matrix_mdev->matrix.apm_max) {
0983 ret = -ENODEV;
0984 goto done;
0985 }
0986
0987 if (test_bit_inv(apid, matrix_mdev->matrix.apm)) {
0988 ret = count;
0989 goto done;
0990 }
0991
0992 set_bit_inv(apid, matrix_mdev->matrix.apm);
0993
0994 ret = vfio_ap_mdev_validate_masks(matrix_mdev);
0995 if (ret) {
0996 clear_bit_inv(apid, matrix_mdev->matrix.apm);
0997 goto done;
0998 }
0999
1000 vfio_ap_mdev_link_adapter(matrix_mdev, apid);
1001 memset(apm_delta, 0, sizeof(apm_delta));
1002 set_bit_inv(apid, apm_delta);
1003
1004 if (vfio_ap_mdev_filter_matrix(apm_delta,
1005 matrix_mdev->matrix.aqm, matrix_mdev))
1006 vfio_ap_mdev_update_guest_apcb(matrix_mdev);
1007
1008 ret = count;
1009 done:
1010 release_update_locks_for_mdev(matrix_mdev);
1011 mutex_unlock(&ap_perms_mutex);
1012
1013 return ret;
1014 }
1015 static DEVICE_ATTR_WO(assign_adapter);
1016
1017 static struct vfio_ap_queue
1018 *vfio_ap_unlink_apqn_fr_mdev(struct ap_matrix_mdev *matrix_mdev,
1019 unsigned long apid, unsigned long apqi)
1020 {
1021 struct vfio_ap_queue *q = NULL;
1022
1023 q = vfio_ap_mdev_get_queue(matrix_mdev, AP_MKQID(apid, apqi));
1024
1025 if (q)
1026 vfio_ap_unlink_queue_fr_mdev(q);
1027
1028 return q;
1029 }
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039 static void vfio_ap_mdev_unlink_adapter(struct ap_matrix_mdev *matrix_mdev,
1040 unsigned long apid,
1041 struct ap_queue_table *qtable)
1042 {
1043 unsigned long apqi;
1044 struct vfio_ap_queue *q;
1045
1046 for_each_set_bit_inv(apqi, matrix_mdev->matrix.aqm, AP_DOMAINS) {
1047 q = vfio_ap_unlink_apqn_fr_mdev(matrix_mdev, apid, apqi);
1048
1049 if (q && qtable) {
1050 if (test_bit_inv(apid, matrix_mdev->shadow_apcb.apm) &&
1051 test_bit_inv(apqi, matrix_mdev->shadow_apcb.aqm))
1052 hash_add(qtable->queues, &q->mdev_qnode,
1053 q->apqn);
1054 }
1055 }
1056 }
1057
1058 static void vfio_ap_mdev_hot_unplug_adapter(struct ap_matrix_mdev *matrix_mdev,
1059 unsigned long apid)
1060 {
1061 int loop_cursor;
1062 struct vfio_ap_queue *q;
1063 struct ap_queue_table *qtable = kzalloc(sizeof(*qtable), GFP_KERNEL);
1064
1065 hash_init(qtable->queues);
1066 vfio_ap_mdev_unlink_adapter(matrix_mdev, apid, qtable);
1067
1068 if (test_bit_inv(apid, matrix_mdev->shadow_apcb.apm)) {
1069 clear_bit_inv(apid, matrix_mdev->shadow_apcb.apm);
1070 vfio_ap_mdev_update_guest_apcb(matrix_mdev);
1071 }
1072
1073 vfio_ap_mdev_reset_queues(qtable);
1074
1075 hash_for_each(qtable->queues, loop_cursor, q, mdev_qnode) {
1076 vfio_ap_unlink_mdev_fr_queue(q);
1077 hash_del(&q->mdev_qnode);
1078 }
1079
1080 kfree(qtable);
1081 }
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098 static ssize_t unassign_adapter_store(struct device *dev,
1099 struct device_attribute *attr,
1100 const char *buf, size_t count)
1101 {
1102 int ret;
1103 unsigned long apid;
1104 struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev);
1105
1106 get_update_locks_for_mdev(matrix_mdev);
1107
1108 ret = kstrtoul(buf, 0, &apid);
1109 if (ret)
1110 goto done;
1111
1112 if (apid > matrix_mdev->matrix.apm_max) {
1113 ret = -ENODEV;
1114 goto done;
1115 }
1116
1117 if (!test_bit_inv(apid, matrix_mdev->matrix.apm)) {
1118 ret = count;
1119 goto done;
1120 }
1121
1122 clear_bit_inv((unsigned long)apid, matrix_mdev->matrix.apm);
1123 vfio_ap_mdev_hot_unplug_adapter(matrix_mdev, apid);
1124 ret = count;
1125 done:
1126 release_update_locks_for_mdev(matrix_mdev);
1127 return ret;
1128 }
1129 static DEVICE_ATTR_WO(unassign_adapter);
1130
1131 static void vfio_ap_mdev_link_domain(struct ap_matrix_mdev *matrix_mdev,
1132 unsigned long apqi)
1133 {
1134 unsigned long apid;
1135
1136 for_each_set_bit_inv(apid, matrix_mdev->matrix.apm, AP_DEVICES)
1137 vfio_ap_mdev_link_apqn(matrix_mdev,
1138 AP_MKQID(apid, apqi));
1139 }
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175 static ssize_t assign_domain_store(struct device *dev,
1176 struct device_attribute *attr,
1177 const char *buf, size_t count)
1178 {
1179 int ret;
1180 unsigned long apqi;
1181 DECLARE_BITMAP(aqm_delta, AP_DOMAINS);
1182 struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev);
1183
1184 mutex_lock(&ap_perms_mutex);
1185 get_update_locks_for_mdev(matrix_mdev);
1186
1187 ret = kstrtoul(buf, 0, &apqi);
1188 if (ret)
1189 goto done;
1190
1191 if (apqi > matrix_mdev->matrix.aqm_max) {
1192 ret = -ENODEV;
1193 goto done;
1194 }
1195
1196 if (test_bit_inv(apqi, matrix_mdev->matrix.aqm)) {
1197 ret = count;
1198 goto done;
1199 }
1200
1201 set_bit_inv(apqi, matrix_mdev->matrix.aqm);
1202
1203 ret = vfio_ap_mdev_validate_masks(matrix_mdev);
1204 if (ret) {
1205 clear_bit_inv(apqi, matrix_mdev->matrix.aqm);
1206 goto done;
1207 }
1208
1209 vfio_ap_mdev_link_domain(matrix_mdev, apqi);
1210 memset(aqm_delta, 0, sizeof(aqm_delta));
1211 set_bit_inv(apqi, aqm_delta);
1212
1213 if (vfio_ap_mdev_filter_matrix(matrix_mdev->matrix.apm, aqm_delta,
1214 matrix_mdev))
1215 vfio_ap_mdev_update_guest_apcb(matrix_mdev);
1216
1217 ret = count;
1218 done:
1219 release_update_locks_for_mdev(matrix_mdev);
1220 mutex_unlock(&ap_perms_mutex);
1221
1222 return ret;
1223 }
1224 static DEVICE_ATTR_WO(assign_domain);
1225
1226 static void vfio_ap_mdev_unlink_domain(struct ap_matrix_mdev *matrix_mdev,
1227 unsigned long apqi,
1228 struct ap_queue_table *qtable)
1229 {
1230 unsigned long apid;
1231 struct vfio_ap_queue *q;
1232
1233 for_each_set_bit_inv(apid, matrix_mdev->matrix.apm, AP_DEVICES) {
1234 q = vfio_ap_unlink_apqn_fr_mdev(matrix_mdev, apid, apqi);
1235
1236 if (q && qtable) {
1237 if (test_bit_inv(apid, matrix_mdev->shadow_apcb.apm) &&
1238 test_bit_inv(apqi, matrix_mdev->shadow_apcb.aqm))
1239 hash_add(qtable->queues, &q->mdev_qnode,
1240 q->apqn);
1241 }
1242 }
1243 }
1244
1245 static void vfio_ap_mdev_hot_unplug_domain(struct ap_matrix_mdev *matrix_mdev,
1246 unsigned long apqi)
1247 {
1248 int loop_cursor;
1249 struct vfio_ap_queue *q;
1250 struct ap_queue_table *qtable = kzalloc(sizeof(*qtable), GFP_KERNEL);
1251
1252 hash_init(qtable->queues);
1253 vfio_ap_mdev_unlink_domain(matrix_mdev, apqi, qtable);
1254
1255 if (test_bit_inv(apqi, matrix_mdev->shadow_apcb.aqm)) {
1256 clear_bit_inv(apqi, matrix_mdev->shadow_apcb.aqm);
1257 vfio_ap_mdev_update_guest_apcb(matrix_mdev);
1258 }
1259
1260 vfio_ap_mdev_reset_queues(qtable);
1261
1262 hash_for_each(qtable->queues, loop_cursor, q, mdev_qnode) {
1263 vfio_ap_unlink_mdev_fr_queue(q);
1264 hash_del(&q->mdev_qnode);
1265 }
1266
1267 kfree(qtable);
1268 }
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285 static ssize_t unassign_domain_store(struct device *dev,
1286 struct device_attribute *attr,
1287 const char *buf, size_t count)
1288 {
1289 int ret;
1290 unsigned long apqi;
1291 struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev);
1292
1293 get_update_locks_for_mdev(matrix_mdev);
1294
1295 ret = kstrtoul(buf, 0, &apqi);
1296 if (ret)
1297 goto done;
1298
1299 if (apqi > matrix_mdev->matrix.aqm_max) {
1300 ret = -ENODEV;
1301 goto done;
1302 }
1303
1304 if (!test_bit_inv(apqi, matrix_mdev->matrix.aqm)) {
1305 ret = count;
1306 goto done;
1307 }
1308
1309 clear_bit_inv((unsigned long)apqi, matrix_mdev->matrix.aqm);
1310 vfio_ap_mdev_hot_unplug_domain(matrix_mdev, apqi);
1311 ret = count;
1312
1313 done:
1314 release_update_locks_for_mdev(matrix_mdev);
1315 return ret;
1316 }
1317 static DEVICE_ATTR_WO(unassign_domain);
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333 static ssize_t assign_control_domain_store(struct device *dev,
1334 struct device_attribute *attr,
1335 const char *buf, size_t count)
1336 {
1337 int ret;
1338 unsigned long id;
1339 struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev);
1340
1341 get_update_locks_for_mdev(matrix_mdev);
1342
1343 ret = kstrtoul(buf, 0, &id);
1344 if (ret)
1345 goto done;
1346
1347 if (id > matrix_mdev->matrix.adm_max) {
1348 ret = -ENODEV;
1349 goto done;
1350 }
1351
1352 if (test_bit_inv(id, matrix_mdev->matrix.adm)) {
1353 ret = count;
1354 goto done;
1355 }
1356
1357
1358
1359
1360
1361
1362 set_bit_inv(id, matrix_mdev->matrix.adm);
1363 if (vfio_ap_mdev_filter_cdoms(matrix_mdev))
1364 vfio_ap_mdev_update_guest_apcb(matrix_mdev);
1365
1366 ret = count;
1367 done:
1368 release_update_locks_for_mdev(matrix_mdev);
1369 return ret;
1370 }
1371 static DEVICE_ATTR_WO(assign_control_domain);
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387 static ssize_t unassign_control_domain_store(struct device *dev,
1388 struct device_attribute *attr,
1389 const char *buf, size_t count)
1390 {
1391 int ret;
1392 unsigned long domid;
1393 struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev);
1394
1395 get_update_locks_for_mdev(matrix_mdev);
1396
1397 ret = kstrtoul(buf, 0, &domid);
1398 if (ret)
1399 goto done;
1400
1401 if (domid > matrix_mdev->matrix.adm_max) {
1402 ret = -ENODEV;
1403 goto done;
1404 }
1405
1406 if (!test_bit_inv(domid, matrix_mdev->matrix.adm)) {
1407 ret = count;
1408 goto done;
1409 }
1410
1411 clear_bit_inv(domid, matrix_mdev->matrix.adm);
1412
1413 if (test_bit_inv(domid, matrix_mdev->shadow_apcb.adm)) {
1414 clear_bit_inv(domid, matrix_mdev->shadow_apcb.adm);
1415 vfio_ap_mdev_update_guest_apcb(matrix_mdev);
1416 }
1417
1418 ret = count;
1419 done:
1420 release_update_locks_for_mdev(matrix_mdev);
1421 return ret;
1422 }
1423 static DEVICE_ATTR_WO(unassign_control_domain);
1424
1425 static ssize_t control_domains_show(struct device *dev,
1426 struct device_attribute *dev_attr,
1427 char *buf)
1428 {
1429 unsigned long id;
1430 int nchars = 0;
1431 int n;
1432 char *bufpos = buf;
1433 struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev);
1434 unsigned long max_domid = matrix_mdev->matrix.adm_max;
1435
1436 mutex_lock(&matrix_dev->mdevs_lock);
1437 for_each_set_bit_inv(id, matrix_mdev->matrix.adm, max_domid + 1) {
1438 n = sprintf(bufpos, "%04lx\n", id);
1439 bufpos += n;
1440 nchars += n;
1441 }
1442 mutex_unlock(&matrix_dev->mdevs_lock);
1443
1444 return nchars;
1445 }
1446 static DEVICE_ATTR_RO(control_domains);
1447
1448 static ssize_t vfio_ap_mdev_matrix_show(struct ap_matrix *matrix, char *buf)
1449 {
1450 char *bufpos = buf;
1451 unsigned long apid;
1452 unsigned long apqi;
1453 unsigned long apid1;
1454 unsigned long apqi1;
1455 unsigned long napm_bits = matrix->apm_max + 1;
1456 unsigned long naqm_bits = matrix->aqm_max + 1;
1457 int nchars = 0;
1458 int n;
1459
1460 apid1 = find_first_bit_inv(matrix->apm, napm_bits);
1461 apqi1 = find_first_bit_inv(matrix->aqm, naqm_bits);
1462
1463 if ((apid1 < napm_bits) && (apqi1 < naqm_bits)) {
1464 for_each_set_bit_inv(apid, matrix->apm, napm_bits) {
1465 for_each_set_bit_inv(apqi, matrix->aqm,
1466 naqm_bits) {
1467 n = sprintf(bufpos, "%02lx.%04lx\n", apid,
1468 apqi);
1469 bufpos += n;
1470 nchars += n;
1471 }
1472 }
1473 } else if (apid1 < napm_bits) {
1474 for_each_set_bit_inv(apid, matrix->apm, napm_bits) {
1475 n = sprintf(bufpos, "%02lx.\n", apid);
1476 bufpos += n;
1477 nchars += n;
1478 }
1479 } else if (apqi1 < naqm_bits) {
1480 for_each_set_bit_inv(apqi, matrix->aqm, naqm_bits) {
1481 n = sprintf(bufpos, ".%04lx\n", apqi);
1482 bufpos += n;
1483 nchars += n;
1484 }
1485 }
1486
1487 return nchars;
1488 }
1489
1490 static ssize_t matrix_show(struct device *dev, struct device_attribute *attr,
1491 char *buf)
1492 {
1493 ssize_t nchars;
1494 struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev);
1495
1496 mutex_lock(&matrix_dev->mdevs_lock);
1497 nchars = vfio_ap_mdev_matrix_show(&matrix_mdev->matrix, buf);
1498 mutex_unlock(&matrix_dev->mdevs_lock);
1499
1500 return nchars;
1501 }
1502 static DEVICE_ATTR_RO(matrix);
1503
1504 static ssize_t guest_matrix_show(struct device *dev,
1505 struct device_attribute *attr, char *buf)
1506 {
1507 ssize_t nchars;
1508 struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev);
1509
1510 mutex_lock(&matrix_dev->mdevs_lock);
1511 nchars = vfio_ap_mdev_matrix_show(&matrix_mdev->shadow_apcb, buf);
1512 mutex_unlock(&matrix_dev->mdevs_lock);
1513
1514 return nchars;
1515 }
1516 static DEVICE_ATTR_RO(guest_matrix);
1517
1518 static struct attribute *vfio_ap_mdev_attrs[] = {
1519 &dev_attr_assign_adapter.attr,
1520 &dev_attr_unassign_adapter.attr,
1521 &dev_attr_assign_domain.attr,
1522 &dev_attr_unassign_domain.attr,
1523 &dev_attr_assign_control_domain.attr,
1524 &dev_attr_unassign_control_domain.attr,
1525 &dev_attr_control_domains.attr,
1526 &dev_attr_matrix.attr,
1527 &dev_attr_guest_matrix.attr,
1528 NULL,
1529 };
1530
1531 static struct attribute_group vfio_ap_mdev_attr_group = {
1532 .attrs = vfio_ap_mdev_attrs
1533 };
1534
1535 static const struct attribute_group *vfio_ap_mdev_attr_groups[] = {
1536 &vfio_ap_mdev_attr_group,
1537 NULL
1538 };
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550 static int vfio_ap_mdev_set_kvm(struct ap_matrix_mdev *matrix_mdev,
1551 struct kvm *kvm)
1552 {
1553 struct ap_matrix_mdev *m;
1554
1555 if (kvm->arch.crypto.crycbd) {
1556 down_write(&kvm->arch.crypto.pqap_hook_rwsem);
1557 kvm->arch.crypto.pqap_hook = &matrix_mdev->pqap_hook;
1558 up_write(&kvm->arch.crypto.pqap_hook_rwsem);
1559
1560 get_update_locks_for_kvm(kvm);
1561
1562 list_for_each_entry(m, &matrix_dev->mdev_list, node) {
1563 if (m != matrix_mdev && m->kvm == kvm) {
1564 release_update_locks_for_kvm(kvm);
1565 return -EPERM;
1566 }
1567 }
1568
1569 kvm_get_kvm(kvm);
1570 matrix_mdev->kvm = kvm;
1571 vfio_ap_mdev_update_guest_apcb(matrix_mdev);
1572
1573 release_update_locks_for_kvm(kvm);
1574 }
1575
1576 return 0;
1577 }
1578
1579 static void vfio_ap_mdev_dma_unmap(struct vfio_device *vdev, u64 iova,
1580 u64 length)
1581 {
1582 struct ap_matrix_mdev *matrix_mdev =
1583 container_of(vdev, struct ap_matrix_mdev, vdev);
1584
1585 vfio_unpin_pages(&matrix_mdev->vdev, iova, 1);
1586 }
1587
1588
1589
1590
1591
1592
1593
1594 static void vfio_ap_mdev_unset_kvm(struct ap_matrix_mdev *matrix_mdev)
1595 {
1596 struct kvm *kvm = matrix_mdev->kvm;
1597
1598 if (kvm && kvm->arch.crypto.crycbd) {
1599 down_write(&kvm->arch.crypto.pqap_hook_rwsem);
1600 kvm->arch.crypto.pqap_hook = NULL;
1601 up_write(&kvm->arch.crypto.pqap_hook_rwsem);
1602
1603 get_update_locks_for_kvm(kvm);
1604
1605 kvm_arch_crypto_clear_masks(kvm);
1606 vfio_ap_mdev_reset_queues(&matrix_mdev->qtable);
1607 kvm_put_kvm(kvm);
1608 matrix_mdev->kvm = NULL;
1609
1610 release_update_locks_for_kvm(kvm);
1611 }
1612 }
1613
1614 static struct vfio_ap_queue *vfio_ap_find_queue(int apqn)
1615 {
1616 struct ap_queue *queue;
1617 struct vfio_ap_queue *q = NULL;
1618
1619 queue = ap_get_qdev(apqn);
1620 if (!queue)
1621 return NULL;
1622
1623 if (queue->ap_dev.device.driver == &matrix_dev->vfio_ap_drv->driver)
1624 q = dev_get_drvdata(&queue->ap_dev.device);
1625
1626 put_device(&queue->ap_dev.device);
1627
1628 return q;
1629 }
1630
1631 static int vfio_ap_mdev_reset_queue(struct vfio_ap_queue *q,
1632 unsigned int retry)
1633 {
1634 struct ap_queue_status status;
1635 int ret;
1636 int retry2 = 2;
1637
1638 if (!q)
1639 return 0;
1640 retry_zapq:
1641 status = ap_zapq(q->apqn);
1642 q->reset_rc = status.response_code;
1643 switch (status.response_code) {
1644 case AP_RESPONSE_NORMAL:
1645 ret = 0;
1646 break;
1647 case AP_RESPONSE_RESET_IN_PROGRESS:
1648 if (retry--) {
1649 msleep(20);
1650 goto retry_zapq;
1651 }
1652 ret = -EBUSY;
1653 break;
1654 case AP_RESPONSE_Q_NOT_AVAIL:
1655 case AP_RESPONSE_DECONFIGURED:
1656 case AP_RESPONSE_CHECKSTOPPED:
1657 WARN_ONCE(status.irq_enabled,
1658 "PQAP/ZAPQ for %02x.%04x failed with rc=%u while IRQ enabled",
1659 AP_QID_CARD(q->apqn), AP_QID_QUEUE(q->apqn),
1660 status.response_code);
1661 ret = -EBUSY;
1662 goto free_resources;
1663 default:
1664
1665 WARN(true,
1666 "PQAP/ZAPQ for %02x.%04x failed with invalid rc=%u\n",
1667 AP_QID_CARD(q->apqn), AP_QID_QUEUE(q->apqn),
1668 status.response_code);
1669 return -EIO;
1670 }
1671
1672
1673 while (retry2--) {
1674 if (status.queue_empty && !status.irq_enabled)
1675 break;
1676 msleep(20);
1677 status = ap_tapq(q->apqn, NULL);
1678 }
1679 WARN_ONCE(retry2 <= 0, "unable to verify reset of queue %02x.%04x",
1680 AP_QID_CARD(q->apqn), AP_QID_QUEUE(q->apqn));
1681
1682 free_resources:
1683 vfio_ap_free_aqic_resources(q);
1684
1685 return ret;
1686 }
1687
1688 static int vfio_ap_mdev_reset_queues(struct ap_queue_table *qtable)
1689 {
1690 int ret, loop_cursor, rc = 0;
1691 struct vfio_ap_queue *q;
1692
1693 hash_for_each(qtable->queues, loop_cursor, q, mdev_qnode) {
1694 ret = vfio_ap_mdev_reset_queue(q, 1);
1695
1696
1697
1698
1699
1700 if (ret)
1701 rc = ret;
1702 }
1703
1704 return rc;
1705 }
1706
1707 static int vfio_ap_mdev_open_device(struct vfio_device *vdev)
1708 {
1709 struct ap_matrix_mdev *matrix_mdev =
1710 container_of(vdev, struct ap_matrix_mdev, vdev);
1711
1712 if (!vdev->kvm)
1713 return -EINVAL;
1714
1715 return vfio_ap_mdev_set_kvm(matrix_mdev, vdev->kvm);
1716 }
1717
1718 static void vfio_ap_mdev_close_device(struct vfio_device *vdev)
1719 {
1720 struct ap_matrix_mdev *matrix_mdev =
1721 container_of(vdev, struct ap_matrix_mdev, vdev);
1722
1723 vfio_ap_mdev_unset_kvm(matrix_mdev);
1724 }
1725
1726 static int vfio_ap_mdev_get_device_info(unsigned long arg)
1727 {
1728 unsigned long minsz;
1729 struct vfio_device_info info;
1730
1731 minsz = offsetofend(struct vfio_device_info, num_irqs);
1732
1733 if (copy_from_user(&info, (void __user *)arg, minsz))
1734 return -EFAULT;
1735
1736 if (info.argsz < minsz)
1737 return -EINVAL;
1738
1739 info.flags = VFIO_DEVICE_FLAGS_AP | VFIO_DEVICE_FLAGS_RESET;
1740 info.num_regions = 0;
1741 info.num_irqs = 0;
1742
1743 return copy_to_user((void __user *)arg, &info, minsz) ? -EFAULT : 0;
1744 }
1745
1746 static ssize_t vfio_ap_mdev_ioctl(struct vfio_device *vdev,
1747 unsigned int cmd, unsigned long arg)
1748 {
1749 struct ap_matrix_mdev *matrix_mdev =
1750 container_of(vdev, struct ap_matrix_mdev, vdev);
1751 int ret;
1752
1753 mutex_lock(&matrix_dev->mdevs_lock);
1754 switch (cmd) {
1755 case VFIO_DEVICE_GET_INFO:
1756 ret = vfio_ap_mdev_get_device_info(arg);
1757 break;
1758 case VFIO_DEVICE_RESET:
1759 ret = vfio_ap_mdev_reset_queues(&matrix_mdev->qtable);
1760 break;
1761 default:
1762 ret = -EOPNOTSUPP;
1763 break;
1764 }
1765 mutex_unlock(&matrix_dev->mdevs_lock);
1766
1767 return ret;
1768 }
1769
1770 static struct ap_matrix_mdev *vfio_ap_mdev_for_queue(struct vfio_ap_queue *q)
1771 {
1772 struct ap_matrix_mdev *matrix_mdev;
1773 unsigned long apid = AP_QID_CARD(q->apqn);
1774 unsigned long apqi = AP_QID_QUEUE(q->apqn);
1775
1776 list_for_each_entry(matrix_mdev, &matrix_dev->mdev_list, node) {
1777 if (test_bit_inv(apid, matrix_mdev->matrix.apm) &&
1778 test_bit_inv(apqi, matrix_mdev->matrix.aqm))
1779 return matrix_mdev;
1780 }
1781
1782 return NULL;
1783 }
1784
1785 static ssize_t status_show(struct device *dev,
1786 struct device_attribute *attr,
1787 char *buf)
1788 {
1789 ssize_t nchars = 0;
1790 struct vfio_ap_queue *q;
1791 struct ap_matrix_mdev *matrix_mdev;
1792 struct ap_device *apdev = to_ap_dev(dev);
1793
1794 mutex_lock(&matrix_dev->mdevs_lock);
1795 q = dev_get_drvdata(&apdev->device);
1796 matrix_mdev = vfio_ap_mdev_for_queue(q);
1797
1798 if (matrix_mdev) {
1799 if (matrix_mdev->kvm)
1800 nchars = scnprintf(buf, PAGE_SIZE, "%s\n",
1801 AP_QUEUE_IN_USE);
1802 else
1803 nchars = scnprintf(buf, PAGE_SIZE, "%s\n",
1804 AP_QUEUE_ASSIGNED);
1805 } else {
1806 nchars = scnprintf(buf, PAGE_SIZE, "%s\n",
1807 AP_QUEUE_UNASSIGNED);
1808 }
1809
1810 mutex_unlock(&matrix_dev->mdevs_lock);
1811
1812 return nchars;
1813 }
1814
1815 static DEVICE_ATTR_RO(status);
1816
1817 static struct attribute *vfio_queue_attrs[] = {
1818 &dev_attr_status.attr,
1819 NULL,
1820 };
1821
1822 static const struct attribute_group vfio_queue_attr_group = {
1823 .attrs = vfio_queue_attrs,
1824 };
1825
1826 static const struct vfio_device_ops vfio_ap_matrix_dev_ops = {
1827 .open_device = vfio_ap_mdev_open_device,
1828 .close_device = vfio_ap_mdev_close_device,
1829 .ioctl = vfio_ap_mdev_ioctl,
1830 .dma_unmap = vfio_ap_mdev_dma_unmap,
1831 };
1832
1833 static struct mdev_driver vfio_ap_matrix_driver = {
1834 .driver = {
1835 .name = "vfio_ap_mdev",
1836 .owner = THIS_MODULE,
1837 .mod_name = KBUILD_MODNAME,
1838 .dev_groups = vfio_ap_mdev_attr_groups,
1839 },
1840 .probe = vfio_ap_mdev_probe,
1841 .remove = vfio_ap_mdev_remove,
1842 .supported_type_groups = vfio_ap_mdev_type_groups,
1843 };
1844
1845 int vfio_ap_mdev_register(void)
1846 {
1847 int ret;
1848
1849 atomic_set(&matrix_dev->available_instances, MAX_ZDEV_ENTRIES_EXT);
1850
1851 ret = mdev_register_driver(&vfio_ap_matrix_driver);
1852 if (ret)
1853 return ret;
1854
1855 ret = mdev_register_device(&matrix_dev->device, &vfio_ap_matrix_driver);
1856 if (ret)
1857 goto err_driver;
1858 return 0;
1859
1860 err_driver:
1861 mdev_unregister_driver(&vfio_ap_matrix_driver);
1862 return ret;
1863 }
1864
1865 void vfio_ap_mdev_unregister(void)
1866 {
1867 mdev_unregister_device(&matrix_dev->device);
1868 mdev_unregister_driver(&vfio_ap_matrix_driver);
1869 }
1870
1871 int vfio_ap_mdev_probe_queue(struct ap_device *apdev)
1872 {
1873 int ret;
1874 struct vfio_ap_queue *q;
1875 struct ap_matrix_mdev *matrix_mdev;
1876
1877 ret = sysfs_create_group(&apdev->device.kobj, &vfio_queue_attr_group);
1878 if (ret)
1879 return ret;
1880
1881 q = kzalloc(sizeof(*q), GFP_KERNEL);
1882 if (!q)
1883 return -ENOMEM;
1884
1885 q->apqn = to_ap_queue(&apdev->device)->qid;
1886 q->saved_isc = VFIO_AP_ISC_INVALID;
1887 matrix_mdev = get_update_locks_by_apqn(q->apqn);
1888
1889 if (matrix_mdev) {
1890 vfio_ap_mdev_link_queue(matrix_mdev, q);
1891
1892 if (vfio_ap_mdev_filter_matrix(matrix_mdev->matrix.apm,
1893 matrix_mdev->matrix.aqm,
1894 matrix_mdev))
1895 vfio_ap_mdev_update_guest_apcb(matrix_mdev);
1896 }
1897 dev_set_drvdata(&apdev->device, q);
1898 release_update_locks_for_mdev(matrix_mdev);
1899
1900 return 0;
1901 }
1902
1903 void vfio_ap_mdev_remove_queue(struct ap_device *apdev)
1904 {
1905 unsigned long apid, apqi;
1906 struct vfio_ap_queue *q;
1907 struct ap_matrix_mdev *matrix_mdev;
1908
1909 sysfs_remove_group(&apdev->device.kobj, &vfio_queue_attr_group);
1910 q = dev_get_drvdata(&apdev->device);
1911 get_update_locks_for_queue(q);
1912 matrix_mdev = q->matrix_mdev;
1913
1914 if (matrix_mdev) {
1915 vfio_ap_unlink_queue_fr_mdev(q);
1916
1917 apid = AP_QID_CARD(q->apqn);
1918 apqi = AP_QID_QUEUE(q->apqn);
1919
1920
1921
1922
1923
1924 if (test_bit_inv(apid, matrix_mdev->shadow_apcb.apm) &&
1925 test_bit_inv(apqi, matrix_mdev->shadow_apcb.aqm)) {
1926 clear_bit_inv(apid, matrix_mdev->shadow_apcb.apm);
1927 vfio_ap_mdev_update_guest_apcb(matrix_mdev);
1928 }
1929 }
1930
1931 vfio_ap_mdev_reset_queue(q, 1);
1932 dev_set_drvdata(&apdev->device, NULL);
1933 kfree(q);
1934 release_update_locks_for_mdev(matrix_mdev);
1935 }
1936
1937
1938
1939
1940
1941
1942
1943
1944
1945
1946
1947
1948
1949
1950
1951 int vfio_ap_mdev_resource_in_use(unsigned long *apm, unsigned long *aqm)
1952 {
1953 int ret;
1954
1955 mutex_lock(&matrix_dev->guests_lock);
1956 mutex_lock(&matrix_dev->mdevs_lock);
1957 ret = vfio_ap_mdev_verify_no_sharing(apm, aqm);
1958 mutex_unlock(&matrix_dev->mdevs_lock);
1959 mutex_unlock(&matrix_dev->guests_lock);
1960
1961 return ret;
1962 }
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975 static void vfio_ap_mdev_hot_unplug_cfg(struct ap_matrix_mdev *matrix_mdev,
1976 unsigned long *aprem,
1977 unsigned long *aqrem,
1978 unsigned long *cdrem)
1979 {
1980 int do_hotplug = 0;
1981
1982 if (!bitmap_empty(aprem, AP_DEVICES)) {
1983 do_hotplug |= bitmap_andnot(matrix_mdev->shadow_apcb.apm,
1984 matrix_mdev->shadow_apcb.apm,
1985 aprem, AP_DEVICES);
1986 }
1987
1988 if (!bitmap_empty(aqrem, AP_DOMAINS)) {
1989 do_hotplug |= bitmap_andnot(matrix_mdev->shadow_apcb.aqm,
1990 matrix_mdev->shadow_apcb.aqm,
1991 aqrem, AP_DEVICES);
1992 }
1993
1994 if (!bitmap_empty(cdrem, AP_DOMAINS))
1995 do_hotplug |= bitmap_andnot(matrix_mdev->shadow_apcb.adm,
1996 matrix_mdev->shadow_apcb.adm,
1997 cdrem, AP_DOMAINS);
1998
1999 if (do_hotplug)
2000 vfio_ap_mdev_update_guest_apcb(matrix_mdev);
2001 }
2002
2003
2004
2005
2006
2007
2008
2009
2010
2011
2012
2013
2014
2015
2016 static void vfio_ap_mdev_cfg_remove(unsigned long *ap_remove,
2017 unsigned long *aq_remove,
2018 unsigned long *cd_remove)
2019 {
2020 struct ap_matrix_mdev *matrix_mdev;
2021 DECLARE_BITMAP(aprem, AP_DEVICES);
2022 DECLARE_BITMAP(aqrem, AP_DOMAINS);
2023 DECLARE_BITMAP(cdrem, AP_DOMAINS);
2024 int do_remove = 0;
2025
2026 list_for_each_entry(matrix_mdev, &matrix_dev->mdev_list, node) {
2027 mutex_lock(&matrix_mdev->kvm->lock);
2028 mutex_lock(&matrix_dev->mdevs_lock);
2029
2030 do_remove |= bitmap_and(aprem, ap_remove,
2031 matrix_mdev->matrix.apm,
2032 AP_DEVICES);
2033 do_remove |= bitmap_and(aqrem, aq_remove,
2034 matrix_mdev->matrix.aqm,
2035 AP_DOMAINS);
2036 do_remove |= bitmap_andnot(cdrem, cd_remove,
2037 matrix_mdev->matrix.adm,
2038 AP_DOMAINS);
2039
2040 if (do_remove)
2041 vfio_ap_mdev_hot_unplug_cfg(matrix_mdev, aprem, aqrem,
2042 cdrem);
2043
2044 mutex_unlock(&matrix_dev->mdevs_lock);
2045 mutex_unlock(&matrix_mdev->kvm->lock);
2046 }
2047 }
2048
2049
2050
2051
2052
2053
2054
2055
2056
2057 static void vfio_ap_mdev_on_cfg_remove(struct ap_config_info *cur_config_info,
2058 struct ap_config_info *prev_config_info)
2059 {
2060 int do_remove;
2061 DECLARE_BITMAP(aprem, AP_DEVICES);
2062 DECLARE_BITMAP(aqrem, AP_DOMAINS);
2063 DECLARE_BITMAP(cdrem, AP_DOMAINS);
2064
2065 do_remove = bitmap_andnot(aprem,
2066 (unsigned long *)prev_config_info->apm,
2067 (unsigned long *)cur_config_info->apm,
2068 AP_DEVICES);
2069 do_remove |= bitmap_andnot(aqrem,
2070 (unsigned long *)prev_config_info->aqm,
2071 (unsigned long *)cur_config_info->aqm,
2072 AP_DEVICES);
2073 do_remove |= bitmap_andnot(cdrem,
2074 (unsigned long *)prev_config_info->adm,
2075 (unsigned long *)cur_config_info->adm,
2076 AP_DEVICES);
2077
2078 if (do_remove)
2079 vfio_ap_mdev_cfg_remove(aprem, aqrem, cdrem);
2080 }
2081
2082
2083
2084
2085
2086
2087
2088 static void vfio_ap_filter_apid_by_qtype(unsigned long *apm, unsigned long *aqm)
2089 {
2090 bool apid_cleared;
2091 struct ap_queue_status status;
2092 unsigned long apid, apqi, info;
2093 int qtype, qtype_mask = 0xff000000;
2094
2095 for_each_set_bit_inv(apid, apm, AP_DEVICES) {
2096 apid_cleared = false;
2097
2098 for_each_set_bit_inv(apqi, aqm, AP_DOMAINS) {
2099 status = ap_test_queue(AP_MKQID(apid, apqi), 1, &info);
2100 switch (status.response_code) {
2101
2102
2103
2104
2105 case AP_RESPONSE_NORMAL:
2106 case AP_RESPONSE_RESET_IN_PROGRESS:
2107 case AP_RESPONSE_DECONFIGURED:
2108 case AP_RESPONSE_CHECKSTOPPED:
2109 case AP_RESPONSE_BUSY:
2110 qtype = info & qtype_mask;
2111
2112
2113
2114
2115
2116
2117
2118 if (qtype < AP_DEVICE_TYPE_CEX4) {
2119 clear_bit_inv(apid, apm);
2120 apid_cleared = true;
2121 }
2122
2123 break;
2124
2125 default:
2126
2127
2128
2129
2130
2131
2132 clear_bit_inv(apid, apm);
2133 apid_cleared = true;
2134 break;
2135 }
2136
2137
2138
2139
2140
2141
2142 if (apid_cleared)
2143 continue;
2144 }
2145 }
2146 }
2147
2148
2149
2150
2151
2152
2153
2154
2155
2156
2157
2158
2159
2160
2161 static void vfio_ap_mdev_cfg_add(unsigned long *apm_add, unsigned long *aqm_add,
2162 unsigned long *adm_add)
2163 {
2164 struct ap_matrix_mdev *matrix_mdev;
2165
2166 if (list_empty(&matrix_dev->mdev_list))
2167 return;
2168
2169 vfio_ap_filter_apid_by_qtype(apm_add, aqm_add);
2170
2171 list_for_each_entry(matrix_mdev, &matrix_dev->mdev_list, node) {
2172 bitmap_and(matrix_mdev->apm_add,
2173 matrix_mdev->matrix.apm, apm_add, AP_DEVICES);
2174 bitmap_and(matrix_mdev->aqm_add,
2175 matrix_mdev->matrix.aqm, aqm_add, AP_DOMAINS);
2176 bitmap_and(matrix_mdev->adm_add,
2177 matrix_mdev->matrix.adm, adm_add, AP_DEVICES);
2178 }
2179 }
2180
2181
2182
2183
2184
2185
2186
2187
2188
2189
2190
2191
2192 static void vfio_ap_mdev_on_cfg_add(struct ap_config_info *cur_config_info,
2193 struct ap_config_info *prev_config_info)
2194 {
2195 bool do_add;
2196 DECLARE_BITMAP(apm_add, AP_DEVICES);
2197 DECLARE_BITMAP(aqm_add, AP_DOMAINS);
2198 DECLARE_BITMAP(adm_add, AP_DOMAINS);
2199
2200 do_add = bitmap_andnot(apm_add,
2201 (unsigned long *)cur_config_info->apm,
2202 (unsigned long *)prev_config_info->apm,
2203 AP_DEVICES);
2204 do_add |= bitmap_andnot(aqm_add,
2205 (unsigned long *)cur_config_info->aqm,
2206 (unsigned long *)prev_config_info->aqm,
2207 AP_DOMAINS);
2208 do_add |= bitmap_andnot(adm_add,
2209 (unsigned long *)cur_config_info->adm,
2210 (unsigned long *)prev_config_info->adm,
2211 AP_DOMAINS);
2212
2213 if (do_add)
2214 vfio_ap_mdev_cfg_add(apm_add, aqm_add, adm_add);
2215 }
2216
2217
2218
2219
2220
2221
2222
2223
2224 void vfio_ap_on_cfg_changed(struct ap_config_info *cur_cfg_info,
2225 struct ap_config_info *prev_cfg_info)
2226 {
2227 if (!cur_cfg_info || !prev_cfg_info)
2228 return;
2229
2230 mutex_lock(&matrix_dev->guests_lock);
2231
2232 vfio_ap_mdev_on_cfg_remove(cur_cfg_info, prev_cfg_info);
2233 vfio_ap_mdev_on_cfg_add(cur_cfg_info, prev_cfg_info);
2234 memcpy(&matrix_dev->info, cur_cfg_info, sizeof(*cur_cfg_info));
2235
2236 mutex_unlock(&matrix_dev->guests_lock);
2237 }
2238
2239 static void vfio_ap_mdev_hot_plug_cfg(struct ap_matrix_mdev *matrix_mdev)
2240 {
2241 bool do_hotplug = false;
2242 int filter_domains = 0;
2243 int filter_adapters = 0;
2244 DECLARE_BITMAP(apm, AP_DEVICES);
2245 DECLARE_BITMAP(aqm, AP_DOMAINS);
2246
2247 mutex_lock(&matrix_mdev->kvm->lock);
2248 mutex_lock(&matrix_dev->mdevs_lock);
2249
2250 filter_adapters = bitmap_and(apm, matrix_mdev->matrix.apm,
2251 matrix_mdev->apm_add, AP_DEVICES);
2252 filter_domains = bitmap_and(aqm, matrix_mdev->matrix.aqm,
2253 matrix_mdev->aqm_add, AP_DOMAINS);
2254
2255 if (filter_adapters && filter_domains)
2256 do_hotplug |= vfio_ap_mdev_filter_matrix(apm, aqm, matrix_mdev);
2257 else if (filter_adapters)
2258 do_hotplug |=
2259 vfio_ap_mdev_filter_matrix(apm,
2260 matrix_mdev->shadow_apcb.aqm,
2261 matrix_mdev);
2262 else
2263 do_hotplug |=
2264 vfio_ap_mdev_filter_matrix(matrix_mdev->shadow_apcb.apm,
2265 aqm, matrix_mdev);
2266
2267 if (bitmap_intersects(matrix_mdev->matrix.adm, matrix_mdev->adm_add,
2268 AP_DOMAINS))
2269 do_hotplug |= vfio_ap_mdev_filter_cdoms(matrix_mdev);
2270
2271 if (do_hotplug)
2272 vfio_ap_mdev_update_guest_apcb(matrix_mdev);
2273
2274 mutex_unlock(&matrix_dev->mdevs_lock);
2275 mutex_unlock(&matrix_mdev->kvm->lock);
2276 }
2277
2278 void vfio_ap_on_scan_complete(struct ap_config_info *new_config_info,
2279 struct ap_config_info *old_config_info)
2280 {
2281 struct ap_matrix_mdev *matrix_mdev;
2282
2283 mutex_lock(&matrix_dev->guests_lock);
2284
2285 list_for_each_entry(matrix_mdev, &matrix_dev->mdev_list, node) {
2286 if (bitmap_empty(matrix_mdev->apm_add, AP_DEVICES) &&
2287 bitmap_empty(matrix_mdev->aqm_add, AP_DOMAINS) &&
2288 bitmap_empty(matrix_mdev->adm_add, AP_DOMAINS))
2289 continue;
2290
2291 vfio_ap_mdev_hot_plug_cfg(matrix_mdev);
2292 bitmap_clear(matrix_mdev->apm_add, 0, AP_DEVICES);
2293 bitmap_clear(matrix_mdev->aqm_add, 0, AP_DOMAINS);
2294 bitmap_clear(matrix_mdev->adm_add, 0, AP_DOMAINS);
2295 }
2296
2297 mutex_unlock(&matrix_dev->guests_lock);
2298 }