Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  * virtio_pmem.c: Virtio pmem Driver
0004  *
0005  * Discovers persistent memory range information
0006  * from host and provides a virtio based flushing
0007  * interface.
0008  */
0009 #include "virtio_pmem.h"
0010 #include "nd.h"
0011 
0012  /* The interrupt handler */
0013 void virtio_pmem_host_ack(struct virtqueue *vq)
0014 {
0015     struct virtio_pmem *vpmem = vq->vdev->priv;
0016     struct virtio_pmem_request *req_data, *req_buf;
0017     unsigned long flags;
0018     unsigned int len;
0019 
0020     spin_lock_irqsave(&vpmem->pmem_lock, flags);
0021     while ((req_data = virtqueue_get_buf(vq, &len)) != NULL) {
0022         req_data->done = true;
0023         wake_up(&req_data->host_acked);
0024 
0025         if (!list_empty(&vpmem->req_list)) {
0026             req_buf = list_first_entry(&vpmem->req_list,
0027                     struct virtio_pmem_request, list);
0028             req_buf->wq_buf_avail = true;
0029             wake_up(&req_buf->wq_buf);
0030             list_del(&req_buf->list);
0031         }
0032     }
0033     spin_unlock_irqrestore(&vpmem->pmem_lock, flags);
0034 }
0035 EXPORT_SYMBOL_GPL(virtio_pmem_host_ack);
0036 
0037  /* The request submission function */
0038 static int virtio_pmem_flush(struct nd_region *nd_region)
0039 {
0040     struct virtio_device *vdev = nd_region->provider_data;
0041     struct virtio_pmem *vpmem  = vdev->priv;
0042     struct virtio_pmem_request *req_data;
0043     struct scatterlist *sgs[2], sg, ret;
0044     unsigned long flags;
0045     int err, err1;
0046 
0047     might_sleep();
0048     req_data = kmalloc(sizeof(*req_data), GFP_KERNEL);
0049     if (!req_data)
0050         return -ENOMEM;
0051 
0052     req_data->done = false;
0053     init_waitqueue_head(&req_data->host_acked);
0054     init_waitqueue_head(&req_data->wq_buf);
0055     INIT_LIST_HEAD(&req_data->list);
0056     req_data->req.type = cpu_to_le32(VIRTIO_PMEM_REQ_TYPE_FLUSH);
0057     sg_init_one(&sg, &req_data->req, sizeof(req_data->req));
0058     sgs[0] = &sg;
0059     sg_init_one(&ret, &req_data->resp.ret, sizeof(req_data->resp));
0060     sgs[1] = &ret;
0061 
0062     spin_lock_irqsave(&vpmem->pmem_lock, flags);
0063      /*
0064       * If virtqueue_add_sgs returns -ENOSPC then req_vq virtual
0065       * queue does not have free descriptor. We add the request
0066       * to req_list and wait for host_ack to wake us up when free
0067       * slots are available.
0068       */
0069     while ((err = virtqueue_add_sgs(vpmem->req_vq, sgs, 1, 1, req_data,
0070                     GFP_ATOMIC)) == -ENOSPC) {
0071 
0072         dev_info(&vdev->dev, "failed to send command to virtio pmem device, no free slots in the virtqueue\n");
0073         req_data->wq_buf_avail = false;
0074         list_add_tail(&req_data->list, &vpmem->req_list);
0075         spin_unlock_irqrestore(&vpmem->pmem_lock, flags);
0076 
0077         /* A host response results in "host_ack" getting called */
0078         wait_event(req_data->wq_buf, req_data->wq_buf_avail);
0079         spin_lock_irqsave(&vpmem->pmem_lock, flags);
0080     }
0081     err1 = virtqueue_kick(vpmem->req_vq);
0082     spin_unlock_irqrestore(&vpmem->pmem_lock, flags);
0083     /*
0084      * virtqueue_add_sgs failed with error different than -ENOSPC, we can't
0085      * do anything about that.
0086      */
0087     if (err || !err1) {
0088         dev_info(&vdev->dev, "failed to send command to virtio pmem device\n");
0089         err = -EIO;
0090     } else {
0091         /* A host repsonse results in "host_ack" getting called */
0092         wait_event(req_data->host_acked, req_data->done);
0093         err = le32_to_cpu(req_data->resp.ret);
0094     }
0095 
0096     kfree(req_data);
0097     return err;
0098 };
0099 
0100 /* The asynchronous flush callback function */
0101 int async_pmem_flush(struct nd_region *nd_region, struct bio *bio)
0102 {
0103     /*
0104      * Create child bio for asynchronous flush and chain with
0105      * parent bio. Otherwise directly call nd_region flush.
0106      */
0107     if (bio && bio->bi_iter.bi_sector != -1) {
0108         struct bio *child = bio_alloc(bio->bi_bdev, 0, REQ_PREFLUSH,
0109                           GFP_ATOMIC);
0110 
0111         if (!child)
0112             return -ENOMEM;
0113         bio_clone_blkg_association(child, bio);
0114         child->bi_iter.bi_sector = -1;
0115         bio_chain(child, bio);
0116         submit_bio(child);
0117         return 0;
0118     }
0119     if (virtio_pmem_flush(nd_region))
0120         return -EIO;
0121 
0122     return 0;
0123 };
0124 EXPORT_SYMBOL_GPL(async_pmem_flush);
0125 MODULE_LICENSE("GPL");