Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  * VFIO based Physical Subchannel device driver
0004  *
0005  * Copyright IBM Corp. 2017
0006  * Copyright Red Hat, Inc. 2019
0007  *
0008  * Author(s): Dong Jia Shi <bjsdjshi@linux.vnet.ibm.com>
0009  *            Xiao Feng Ren <renxiaof@linux.vnet.ibm.com>
0010  *            Cornelia Huck <cohuck@redhat.com>
0011  */
0012 
0013 #include <linux/module.h>
0014 #include <linux/init.h>
0015 #include <linux/device.h>
0016 #include <linux/slab.h>
0017 #include <linux/mdev.h>
0018 
0019 #include <asm/isc.h>
0020 
0021 #include "chp.h"
0022 #include "ioasm.h"
0023 #include "css.h"
0024 #include "vfio_ccw_private.h"
0025 
0026 struct workqueue_struct *vfio_ccw_work_q;
0027 static struct kmem_cache *vfio_ccw_io_region;
0028 static struct kmem_cache *vfio_ccw_cmd_region;
0029 static struct kmem_cache *vfio_ccw_schib_region;
0030 static struct kmem_cache *vfio_ccw_crw_region;
0031 
0032 debug_info_t *vfio_ccw_debug_msg_id;
0033 debug_info_t *vfio_ccw_debug_trace_id;
0034 
0035 /*
0036  * Helpers
0037  */
0038 int vfio_ccw_sch_quiesce(struct subchannel *sch)
0039 {
0040     struct vfio_ccw_private *private = dev_get_drvdata(&sch->dev);
0041     DECLARE_COMPLETION_ONSTACK(completion);
0042     int iretry, ret = 0;
0043 
0044     iretry = 255;
0045     do {
0046 
0047         ret = cio_cancel_halt_clear(sch, &iretry);
0048 
0049         if (ret == -EIO) {
0050             pr_err("vfio_ccw: could not quiesce subchannel 0.%x.%04x!\n",
0051                    sch->schid.ssid, sch->schid.sch_no);
0052             break;
0053         }
0054 
0055         /*
0056          * Flush all I/O and wait for
0057          * cancel/halt/clear completion.
0058          */
0059         private->completion = &completion;
0060         spin_unlock_irq(sch->lock);
0061 
0062         if (ret == -EBUSY)
0063             wait_for_completion_timeout(&completion, 3*HZ);
0064 
0065         private->completion = NULL;
0066         flush_workqueue(vfio_ccw_work_q);
0067         spin_lock_irq(sch->lock);
0068         ret = cio_disable_subchannel(sch);
0069     } while (ret == -EBUSY);
0070 
0071     return ret;
0072 }
0073 
0074 static void vfio_ccw_sch_io_todo(struct work_struct *work)
0075 {
0076     struct vfio_ccw_private *private;
0077     struct irb *irb;
0078     bool is_final;
0079     bool cp_is_finished = false;
0080 
0081     private = container_of(work, struct vfio_ccw_private, io_work);
0082     irb = &private->irb;
0083 
0084     is_final = !(scsw_actl(&irb->scsw) &
0085              (SCSW_ACTL_DEVACT | SCSW_ACTL_SCHACT));
0086     if (scsw_is_solicited(&irb->scsw)) {
0087         cp_update_scsw(&private->cp, &irb->scsw);
0088         if (is_final && private->state == VFIO_CCW_STATE_CP_PENDING) {
0089             cp_free(&private->cp);
0090             cp_is_finished = true;
0091         }
0092     }
0093     mutex_lock(&private->io_mutex);
0094     memcpy(private->io_region->irb_area, irb, sizeof(*irb));
0095     mutex_unlock(&private->io_mutex);
0096 
0097     /*
0098      * Reset to IDLE only if processing of a channel program
0099      * has finished. Do not overwrite a possible processing
0100      * state if the interrupt was unsolicited, or if the final
0101      * interrupt was for HSCH or CSCH.
0102      */
0103     if (cp_is_finished)
0104         private->state = VFIO_CCW_STATE_IDLE;
0105 
0106     if (private->io_trigger)
0107         eventfd_signal(private->io_trigger, 1);
0108 }
0109 
0110 static void vfio_ccw_crw_todo(struct work_struct *work)
0111 {
0112     struct vfio_ccw_private *private;
0113 
0114     private = container_of(work, struct vfio_ccw_private, crw_work);
0115 
0116     if (!list_empty(&private->crw) && private->crw_trigger)
0117         eventfd_signal(private->crw_trigger, 1);
0118 }
0119 
0120 /*
0121  * Css driver callbacks
0122  */
0123 static void vfio_ccw_sch_irq(struct subchannel *sch)
0124 {
0125     struct vfio_ccw_private *private = dev_get_drvdata(&sch->dev);
0126 
0127     inc_irq_stat(IRQIO_CIO);
0128     vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_INTERRUPT);
0129 }
0130 
0131 static struct vfio_ccw_private *vfio_ccw_alloc_private(struct subchannel *sch)
0132 {
0133     struct vfio_ccw_private *private;
0134 
0135     private = kzalloc(sizeof(*private), GFP_KERNEL);
0136     if (!private)
0137         return ERR_PTR(-ENOMEM);
0138 
0139     private->sch = sch;
0140     mutex_init(&private->io_mutex);
0141     private->state = VFIO_CCW_STATE_STANDBY;
0142     INIT_LIST_HEAD(&private->crw);
0143     INIT_WORK(&private->io_work, vfio_ccw_sch_io_todo);
0144     INIT_WORK(&private->crw_work, vfio_ccw_crw_todo);
0145     atomic_set(&private->avail, 1);
0146 
0147     private->cp.guest_cp = kcalloc(CCWCHAIN_LEN_MAX, sizeof(struct ccw1),
0148                        GFP_KERNEL);
0149     if (!private->cp.guest_cp)
0150         goto out_free_private;
0151 
0152     private->io_region = kmem_cache_zalloc(vfio_ccw_io_region,
0153                            GFP_KERNEL | GFP_DMA);
0154     if (!private->io_region)
0155         goto out_free_cp;
0156 
0157     private->cmd_region = kmem_cache_zalloc(vfio_ccw_cmd_region,
0158                         GFP_KERNEL | GFP_DMA);
0159     if (!private->cmd_region)
0160         goto out_free_io;
0161 
0162     private->schib_region = kmem_cache_zalloc(vfio_ccw_schib_region,
0163                           GFP_KERNEL | GFP_DMA);
0164 
0165     if (!private->schib_region)
0166         goto out_free_cmd;
0167 
0168     private->crw_region = kmem_cache_zalloc(vfio_ccw_crw_region,
0169                         GFP_KERNEL | GFP_DMA);
0170 
0171     if (!private->crw_region)
0172         goto out_free_schib;
0173     return private;
0174 
0175 out_free_schib:
0176     kmem_cache_free(vfio_ccw_schib_region, private->schib_region);
0177 out_free_cmd:
0178     kmem_cache_free(vfio_ccw_cmd_region, private->cmd_region);
0179 out_free_io:
0180     kmem_cache_free(vfio_ccw_io_region, private->io_region);
0181 out_free_cp:
0182     kfree(private->cp.guest_cp);
0183 out_free_private:
0184     mutex_destroy(&private->io_mutex);
0185     kfree(private);
0186     return ERR_PTR(-ENOMEM);
0187 }
0188 
0189 static void vfio_ccw_free_private(struct vfio_ccw_private *private)
0190 {
0191     struct vfio_ccw_crw *crw, *temp;
0192 
0193     list_for_each_entry_safe(crw, temp, &private->crw, next) {
0194         list_del(&crw->next);
0195         kfree(crw);
0196     }
0197 
0198     kmem_cache_free(vfio_ccw_crw_region, private->crw_region);
0199     kmem_cache_free(vfio_ccw_schib_region, private->schib_region);
0200     kmem_cache_free(vfio_ccw_cmd_region, private->cmd_region);
0201     kmem_cache_free(vfio_ccw_io_region, private->io_region);
0202     kfree(private->cp.guest_cp);
0203     mutex_destroy(&private->io_mutex);
0204     kfree(private);
0205 }
0206 
0207 static int vfio_ccw_sch_probe(struct subchannel *sch)
0208 {
0209     struct pmcw *pmcw = &sch->schib.pmcw;
0210     struct vfio_ccw_private *private;
0211     int ret = -ENOMEM;
0212 
0213     if (pmcw->qf) {
0214         dev_warn(&sch->dev, "vfio: ccw: does not support QDIO: %s\n",
0215              dev_name(&sch->dev));
0216         return -ENODEV;
0217     }
0218 
0219     private = vfio_ccw_alloc_private(sch);
0220     if (IS_ERR(private))
0221         return PTR_ERR(private);
0222 
0223     dev_set_drvdata(&sch->dev, private);
0224 
0225     ret = mdev_register_device(&sch->dev, &vfio_ccw_mdev_driver);
0226     if (ret)
0227         goto out_free;
0228 
0229     VFIO_CCW_MSG_EVENT(4, "bound to subchannel %x.%x.%04x\n",
0230                sch->schid.cssid, sch->schid.ssid,
0231                sch->schid.sch_no);
0232     return 0;
0233 
0234 out_free:
0235     dev_set_drvdata(&sch->dev, NULL);
0236     vfio_ccw_free_private(private);
0237     return ret;
0238 }
0239 
0240 static void vfio_ccw_sch_remove(struct subchannel *sch)
0241 {
0242     struct vfio_ccw_private *private = dev_get_drvdata(&sch->dev);
0243 
0244     mdev_unregister_device(&sch->dev);
0245 
0246     dev_set_drvdata(&sch->dev, NULL);
0247 
0248     vfio_ccw_free_private(private);
0249 
0250     VFIO_CCW_MSG_EVENT(4, "unbound from subchannel %x.%x.%04x\n",
0251                sch->schid.cssid, sch->schid.ssid,
0252                sch->schid.sch_no);
0253 }
0254 
0255 static void vfio_ccw_sch_shutdown(struct subchannel *sch)
0256 {
0257     struct vfio_ccw_private *private = dev_get_drvdata(&sch->dev);
0258 
0259     vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_CLOSE);
0260     vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_NOT_OPER);
0261 }
0262 
0263 /**
0264  * vfio_ccw_sch_event - process subchannel event
0265  * @sch: subchannel
0266  * @process: non-zero if function is called in process context
0267  *
0268  * An unspecified event occurred for this subchannel. Adjust data according
0269  * to the current operational state of the subchannel. Return zero when the
0270  * event has been handled sufficiently or -EAGAIN when this function should
0271  * be called again in process context.
0272  */
0273 static int vfio_ccw_sch_event(struct subchannel *sch, int process)
0274 {
0275     struct vfio_ccw_private *private = dev_get_drvdata(&sch->dev);
0276     unsigned long flags;
0277     int rc = -EAGAIN;
0278 
0279     spin_lock_irqsave(sch->lock, flags);
0280     if (!device_is_registered(&sch->dev))
0281         goto out_unlock;
0282 
0283     if (work_pending(&sch->todo_work))
0284         goto out_unlock;
0285 
0286     rc = 0;
0287 
0288     if (cio_update_schib(sch))
0289         vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_NOT_OPER);
0290 
0291 out_unlock:
0292     spin_unlock_irqrestore(sch->lock, flags);
0293 
0294     return rc;
0295 }
0296 
0297 static void vfio_ccw_queue_crw(struct vfio_ccw_private *private,
0298                    unsigned int rsc,
0299                    unsigned int erc,
0300                    unsigned int rsid)
0301 {
0302     struct vfio_ccw_crw *crw;
0303 
0304     /*
0305      * If unable to allocate a CRW, just drop the event and
0306      * carry on.  The guest will either see a later one or
0307      * learn when it issues its own store subchannel.
0308      */
0309     crw = kzalloc(sizeof(*crw), GFP_ATOMIC);
0310     if (!crw)
0311         return;
0312 
0313     /*
0314      * Build the CRW based on the inputs given to us.
0315      */
0316     crw->crw.rsc = rsc;
0317     crw->crw.erc = erc;
0318     crw->crw.rsid = rsid;
0319 
0320     list_add_tail(&crw->next, &private->crw);
0321     queue_work(vfio_ccw_work_q, &private->crw_work);
0322 }
0323 
0324 static int vfio_ccw_chp_event(struct subchannel *sch,
0325                   struct chp_link *link, int event)
0326 {
0327     struct vfio_ccw_private *private = dev_get_drvdata(&sch->dev);
0328     int mask = chp_ssd_get_mask(&sch->ssd_info, link);
0329     int retry = 255;
0330 
0331     if (!private || !mask)
0332         return 0;
0333 
0334     trace_vfio_ccw_chp_event(private->sch->schid, mask, event);
0335     VFIO_CCW_MSG_EVENT(2, "sch %x.%x.%04x: mask=0x%x event=%d\n",
0336                sch->schid.cssid,
0337                sch->schid.ssid, sch->schid.sch_no,
0338                mask, event);
0339 
0340     if (cio_update_schib(sch))
0341         return -ENODEV;
0342 
0343     switch (event) {
0344     case CHP_VARY_OFF:
0345         /* Path logically turned off */
0346         sch->opm &= ~mask;
0347         sch->lpm &= ~mask;
0348         if (sch->schib.pmcw.lpum & mask)
0349             cio_cancel_halt_clear(sch, &retry);
0350         break;
0351     case CHP_OFFLINE:
0352         /* Path is gone */
0353         if (sch->schib.pmcw.lpum & mask)
0354             cio_cancel_halt_clear(sch, &retry);
0355         vfio_ccw_queue_crw(private, CRW_RSC_CPATH, CRW_ERC_PERRN,
0356                    link->chpid.id);
0357         break;
0358     case CHP_VARY_ON:
0359         /* Path logically turned on */
0360         sch->opm |= mask;
0361         sch->lpm |= mask;
0362         break;
0363     case CHP_ONLINE:
0364         /* Path became available */
0365         sch->lpm |= mask & sch->opm;
0366         vfio_ccw_queue_crw(private, CRW_RSC_CPATH, CRW_ERC_INIT,
0367                    link->chpid.id);
0368         break;
0369     }
0370 
0371     return 0;
0372 }
0373 
0374 static struct css_device_id vfio_ccw_sch_ids[] = {
0375     { .match_flags = 0x1, .type = SUBCHANNEL_TYPE_IO, },
0376     { /* end of list */ },
0377 };
0378 MODULE_DEVICE_TABLE(css, vfio_ccw_sch_ids);
0379 
0380 static struct css_driver vfio_ccw_sch_driver = {
0381     .drv = {
0382         .name = "vfio_ccw",
0383         .owner = THIS_MODULE,
0384     },
0385     .subchannel_type = vfio_ccw_sch_ids,
0386     .irq = vfio_ccw_sch_irq,
0387     .probe = vfio_ccw_sch_probe,
0388     .remove = vfio_ccw_sch_remove,
0389     .shutdown = vfio_ccw_sch_shutdown,
0390     .sch_event = vfio_ccw_sch_event,
0391     .chp_event = vfio_ccw_chp_event,
0392 };
0393 
0394 static int __init vfio_ccw_debug_init(void)
0395 {
0396     vfio_ccw_debug_msg_id = debug_register("vfio_ccw_msg", 16, 1,
0397                            11 * sizeof(long));
0398     if (!vfio_ccw_debug_msg_id)
0399         goto out_unregister;
0400     debug_register_view(vfio_ccw_debug_msg_id, &debug_sprintf_view);
0401     debug_set_level(vfio_ccw_debug_msg_id, 2);
0402     vfio_ccw_debug_trace_id = debug_register("vfio_ccw_trace", 16, 1, 16);
0403     if (!vfio_ccw_debug_trace_id)
0404         goto out_unregister;
0405     debug_register_view(vfio_ccw_debug_trace_id, &debug_hex_ascii_view);
0406     debug_set_level(vfio_ccw_debug_trace_id, 2);
0407     return 0;
0408 
0409 out_unregister:
0410     debug_unregister(vfio_ccw_debug_msg_id);
0411     debug_unregister(vfio_ccw_debug_trace_id);
0412     return -1;
0413 }
0414 
0415 static void vfio_ccw_debug_exit(void)
0416 {
0417     debug_unregister(vfio_ccw_debug_msg_id);
0418     debug_unregister(vfio_ccw_debug_trace_id);
0419 }
0420 
0421 static void vfio_ccw_destroy_regions(void)
0422 {
0423     kmem_cache_destroy(vfio_ccw_crw_region);
0424     kmem_cache_destroy(vfio_ccw_schib_region);
0425     kmem_cache_destroy(vfio_ccw_cmd_region);
0426     kmem_cache_destroy(vfio_ccw_io_region);
0427 }
0428 
0429 static int __init vfio_ccw_sch_init(void)
0430 {
0431     int ret;
0432 
0433     ret = vfio_ccw_debug_init();
0434     if (ret)
0435         return ret;
0436 
0437     vfio_ccw_work_q = create_singlethread_workqueue("vfio-ccw");
0438     if (!vfio_ccw_work_q) {
0439         ret = -ENOMEM;
0440         goto out_regions;
0441     }
0442 
0443     vfio_ccw_io_region = kmem_cache_create_usercopy("vfio_ccw_io_region",
0444                     sizeof(struct ccw_io_region), 0,
0445                     SLAB_ACCOUNT, 0,
0446                     sizeof(struct ccw_io_region), NULL);
0447     if (!vfio_ccw_io_region) {
0448         ret = -ENOMEM;
0449         goto out_regions;
0450     }
0451 
0452     vfio_ccw_cmd_region = kmem_cache_create_usercopy("vfio_ccw_cmd_region",
0453                     sizeof(struct ccw_cmd_region), 0,
0454                     SLAB_ACCOUNT, 0,
0455                     sizeof(struct ccw_cmd_region), NULL);
0456     if (!vfio_ccw_cmd_region) {
0457         ret = -ENOMEM;
0458         goto out_regions;
0459     }
0460 
0461     vfio_ccw_schib_region = kmem_cache_create_usercopy("vfio_ccw_schib_region",
0462                     sizeof(struct ccw_schib_region), 0,
0463                     SLAB_ACCOUNT, 0,
0464                     sizeof(struct ccw_schib_region), NULL);
0465 
0466     if (!vfio_ccw_schib_region) {
0467         ret = -ENOMEM;
0468         goto out_regions;
0469     }
0470 
0471     vfio_ccw_crw_region = kmem_cache_create_usercopy("vfio_ccw_crw_region",
0472                     sizeof(struct ccw_crw_region), 0,
0473                     SLAB_ACCOUNT, 0,
0474                     sizeof(struct ccw_crw_region), NULL);
0475 
0476     if (!vfio_ccw_crw_region) {
0477         ret = -ENOMEM;
0478         goto out_regions;
0479     }
0480 
0481     ret = mdev_register_driver(&vfio_ccw_mdev_driver);
0482     if (ret)
0483         goto out_regions;
0484 
0485     isc_register(VFIO_CCW_ISC);
0486     ret = css_driver_register(&vfio_ccw_sch_driver);
0487     if (ret) {
0488         isc_unregister(VFIO_CCW_ISC);
0489         goto out_driver;
0490     }
0491 
0492     return ret;
0493 
0494 out_driver:
0495     mdev_unregister_driver(&vfio_ccw_mdev_driver);
0496 out_regions:
0497     vfio_ccw_destroy_regions();
0498     destroy_workqueue(vfio_ccw_work_q);
0499     vfio_ccw_debug_exit();
0500     return ret;
0501 }
0502 
0503 static void __exit vfio_ccw_sch_exit(void)
0504 {
0505     css_driver_unregister(&vfio_ccw_sch_driver);
0506     mdev_unregister_driver(&vfio_ccw_mdev_driver);
0507     isc_unregister(VFIO_CCW_ISC);
0508     vfio_ccw_destroy_regions();
0509     destroy_workqueue(vfio_ccw_work_q);
0510     vfio_ccw_debug_exit();
0511 }
0512 module_init(vfio_ccw_sch_init);
0513 module_exit(vfio_ccw_sch_exit);
0514 
0515 MODULE_LICENSE("GPL v2");