Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  * Block driver for s390 storage class memory.
0004  *
0005  * Copyright IBM Corp. 2012
0006  * Author(s): Sebastian Ott <sebott@linux.vnet.ibm.com>
0007  */
0008 
0009 #define KMSG_COMPONENT "scm_block"
0010 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
0011 
0012 #include <linux/interrupt.h>
0013 #include <linux/spinlock.h>
0014 #include <linux/mempool.h>
0015 #include <linux/module.h>
0016 #include <linux/blkdev.h>
0017 #include <linux/blk-mq.h>
0018 #include <linux/slab.h>
0019 #include <linux/list.h>
0020 #include <asm/eadm.h>
0021 #include "scm_blk.h"
0022 
0023 debug_info_t *scm_debug;
0024 static int scm_major;
0025 static mempool_t *aidaw_pool;
0026 static DEFINE_SPINLOCK(list_lock);
0027 static LIST_HEAD(inactive_requests);
0028 static unsigned int nr_requests = 64;
0029 static unsigned int nr_requests_per_io = 8;
0030 static atomic_t nr_devices = ATOMIC_INIT(0);
0031 module_param(nr_requests, uint, S_IRUGO);
0032 MODULE_PARM_DESC(nr_requests, "Number of parallel requests.");
0033 
0034 module_param(nr_requests_per_io, uint, S_IRUGO);
0035 MODULE_PARM_DESC(nr_requests_per_io, "Number of requests per IO.");
0036 
0037 MODULE_DESCRIPTION("Block driver for s390 storage class memory.");
0038 MODULE_LICENSE("GPL");
0039 MODULE_ALIAS("scm:scmdev*");
0040 
0041 static void __scm_free_rq(struct scm_request *scmrq)
0042 {
0043     struct aob_rq_header *aobrq = to_aobrq(scmrq);
0044 
0045     free_page((unsigned long) scmrq->aob);
0046     kfree(scmrq->request);
0047     kfree(aobrq);
0048 }
0049 
0050 static void scm_free_rqs(void)
0051 {
0052     struct list_head *iter, *safe;
0053     struct scm_request *scmrq;
0054 
0055     spin_lock_irq(&list_lock);
0056     list_for_each_safe(iter, safe, &inactive_requests) {
0057         scmrq = list_entry(iter, struct scm_request, list);
0058         list_del(&scmrq->list);
0059         __scm_free_rq(scmrq);
0060     }
0061     spin_unlock_irq(&list_lock);
0062 
0063     mempool_destroy(aidaw_pool);
0064 }
0065 
0066 static int __scm_alloc_rq(void)
0067 {
0068     struct aob_rq_header *aobrq;
0069     struct scm_request *scmrq;
0070 
0071     aobrq = kzalloc(sizeof(*aobrq) + sizeof(*scmrq), GFP_KERNEL);
0072     if (!aobrq)
0073         return -ENOMEM;
0074 
0075     scmrq = (void *) aobrq->data;
0076     scmrq->aob = (void *) get_zeroed_page(GFP_DMA);
0077     if (!scmrq->aob)
0078         goto free;
0079 
0080     scmrq->request = kcalloc(nr_requests_per_io, sizeof(scmrq->request[0]),
0081                  GFP_KERNEL);
0082     if (!scmrq->request)
0083         goto free;
0084 
0085     INIT_LIST_HEAD(&scmrq->list);
0086     spin_lock_irq(&list_lock);
0087     list_add(&scmrq->list, &inactive_requests);
0088     spin_unlock_irq(&list_lock);
0089 
0090     return 0;
0091 free:
0092     __scm_free_rq(scmrq);
0093     return -ENOMEM;
0094 }
0095 
0096 static int scm_alloc_rqs(unsigned int nrqs)
0097 {
0098     int ret = 0;
0099 
0100     aidaw_pool = mempool_create_page_pool(max(nrqs/8, 1U), 0);
0101     if (!aidaw_pool)
0102         return -ENOMEM;
0103 
0104     while (nrqs-- && !ret)
0105         ret = __scm_alloc_rq();
0106 
0107     return ret;
0108 }
0109 
0110 static struct scm_request *scm_request_fetch(void)
0111 {
0112     struct scm_request *scmrq = NULL;
0113 
0114     spin_lock_irq(&list_lock);
0115     if (list_empty(&inactive_requests))
0116         goto out;
0117     scmrq = list_first_entry(&inactive_requests, struct scm_request, list);
0118     list_del(&scmrq->list);
0119 out:
0120     spin_unlock_irq(&list_lock);
0121     return scmrq;
0122 }
0123 
0124 static void scm_request_done(struct scm_request *scmrq)
0125 {
0126     unsigned long flags;
0127     struct msb *msb;
0128     u64 aidaw;
0129     int i;
0130 
0131     for (i = 0; i < nr_requests_per_io && scmrq->request[i]; i++) {
0132         msb = &scmrq->aob->msb[i];
0133         aidaw = msb->data_addr;
0134 
0135         if ((msb->flags & MSB_FLAG_IDA) && aidaw &&
0136             IS_ALIGNED(aidaw, PAGE_SIZE))
0137             mempool_free(virt_to_page(aidaw), aidaw_pool);
0138     }
0139 
0140     spin_lock_irqsave(&list_lock, flags);
0141     list_add(&scmrq->list, &inactive_requests);
0142     spin_unlock_irqrestore(&list_lock, flags);
0143 }
0144 
0145 static bool scm_permit_request(struct scm_blk_dev *bdev, struct request *req)
0146 {
0147     return rq_data_dir(req) != WRITE || bdev->state != SCM_WR_PROHIBIT;
0148 }
0149 
0150 static inline struct aidaw *scm_aidaw_alloc(void)
0151 {
0152     struct page *page = mempool_alloc(aidaw_pool, GFP_ATOMIC);
0153 
0154     return page ? page_address(page) : NULL;
0155 }
0156 
0157 static inline unsigned long scm_aidaw_bytes(struct aidaw *aidaw)
0158 {
0159     unsigned long _aidaw = (unsigned long) aidaw;
0160     unsigned long bytes = ALIGN(_aidaw, PAGE_SIZE) - _aidaw;
0161 
0162     return (bytes / sizeof(*aidaw)) * PAGE_SIZE;
0163 }
0164 
0165 struct aidaw *scm_aidaw_fetch(struct scm_request *scmrq, unsigned int bytes)
0166 {
0167     struct aidaw *aidaw;
0168 
0169     if (scm_aidaw_bytes(scmrq->next_aidaw) >= bytes)
0170         return scmrq->next_aidaw;
0171 
0172     aidaw = scm_aidaw_alloc();
0173     if (aidaw)
0174         memset(aidaw, 0, PAGE_SIZE);
0175     return aidaw;
0176 }
0177 
0178 static int scm_request_prepare(struct scm_request *scmrq)
0179 {
0180     struct scm_blk_dev *bdev = scmrq->bdev;
0181     struct scm_device *scmdev = bdev->gendisk->private_data;
0182     int pos = scmrq->aob->request.msb_count;
0183     struct msb *msb = &scmrq->aob->msb[pos];
0184     struct request *req = scmrq->request[pos];
0185     struct req_iterator iter;
0186     struct aidaw *aidaw;
0187     struct bio_vec bv;
0188 
0189     aidaw = scm_aidaw_fetch(scmrq, blk_rq_bytes(req));
0190     if (!aidaw)
0191         return -ENOMEM;
0192 
0193     msb->bs = MSB_BS_4K;
0194     scmrq->aob->request.msb_count++;
0195     msb->scm_addr = scmdev->address + ((u64) blk_rq_pos(req) << 9);
0196     msb->oc = (rq_data_dir(req) == READ) ? MSB_OC_READ : MSB_OC_WRITE;
0197     msb->flags |= MSB_FLAG_IDA;
0198     msb->data_addr = (u64) aidaw;
0199 
0200     rq_for_each_segment(bv, req, iter) {
0201         WARN_ON(bv.bv_offset);
0202         msb->blk_count += bv.bv_len >> 12;
0203         aidaw->data_addr = (u64) page_address(bv.bv_page);
0204         aidaw++;
0205     }
0206 
0207     scmrq->next_aidaw = aidaw;
0208     return 0;
0209 }
0210 
0211 static inline void scm_request_set(struct scm_request *scmrq,
0212                    struct request *req)
0213 {
0214     scmrq->request[scmrq->aob->request.msb_count] = req;
0215 }
0216 
0217 static inline void scm_request_init(struct scm_blk_dev *bdev,
0218                     struct scm_request *scmrq)
0219 {
0220     struct aob_rq_header *aobrq = to_aobrq(scmrq);
0221     struct aob *aob = scmrq->aob;
0222 
0223     memset(scmrq->request, 0,
0224            nr_requests_per_io * sizeof(scmrq->request[0]));
0225     memset(aob, 0, sizeof(*aob));
0226     aobrq->scmdev = bdev->scmdev;
0227     aob->request.cmd_code = ARQB_CMD_MOVE;
0228     aob->request.data = (u64) aobrq;
0229     scmrq->bdev = bdev;
0230     scmrq->retries = 4;
0231     scmrq->error = BLK_STS_OK;
0232     /* We don't use all msbs - place aidaws at the end of the aob page. */
0233     scmrq->next_aidaw = (void *) &aob->msb[nr_requests_per_io];
0234 }
0235 
0236 static void scm_request_requeue(struct scm_request *scmrq)
0237 {
0238     struct scm_blk_dev *bdev = scmrq->bdev;
0239     int i;
0240 
0241     for (i = 0; i < nr_requests_per_io && scmrq->request[i]; i++)
0242         blk_mq_requeue_request(scmrq->request[i], false);
0243 
0244     atomic_dec(&bdev->queued_reqs);
0245     scm_request_done(scmrq);
0246     blk_mq_kick_requeue_list(bdev->rq);
0247 }
0248 
0249 static void scm_request_finish(struct scm_request *scmrq)
0250 {
0251     struct scm_blk_dev *bdev = scmrq->bdev;
0252     blk_status_t *error;
0253     int i;
0254 
0255     for (i = 0; i < nr_requests_per_io && scmrq->request[i]; i++) {
0256         error = blk_mq_rq_to_pdu(scmrq->request[i]);
0257         *error = scmrq->error;
0258         if (likely(!blk_should_fake_timeout(scmrq->request[i]->q)))
0259             blk_mq_complete_request(scmrq->request[i]);
0260     }
0261 
0262     atomic_dec(&bdev->queued_reqs);
0263     scm_request_done(scmrq);
0264 }
0265 
0266 static void scm_request_start(struct scm_request *scmrq)
0267 {
0268     struct scm_blk_dev *bdev = scmrq->bdev;
0269 
0270     atomic_inc(&bdev->queued_reqs);
0271     if (eadm_start_aob(scmrq->aob)) {
0272         SCM_LOG(5, "no subchannel");
0273         scm_request_requeue(scmrq);
0274     }
0275 }
0276 
0277 struct scm_queue {
0278     struct scm_request *scmrq;
0279     spinlock_t lock;
0280 };
0281 
0282 static blk_status_t scm_blk_request(struct blk_mq_hw_ctx *hctx,
0283                const struct blk_mq_queue_data *qd)
0284 {
0285     struct scm_device *scmdev = hctx->queue->queuedata;
0286     struct scm_blk_dev *bdev = dev_get_drvdata(&scmdev->dev);
0287     struct scm_queue *sq = hctx->driver_data;
0288     struct request *req = qd->rq;
0289     struct scm_request *scmrq;
0290 
0291     spin_lock(&sq->lock);
0292     if (!scm_permit_request(bdev, req)) {
0293         spin_unlock(&sq->lock);
0294         return BLK_STS_RESOURCE;
0295     }
0296 
0297     scmrq = sq->scmrq;
0298     if (!scmrq) {
0299         scmrq = scm_request_fetch();
0300         if (!scmrq) {
0301             SCM_LOG(5, "no request");
0302             spin_unlock(&sq->lock);
0303             return BLK_STS_RESOURCE;
0304         }
0305         scm_request_init(bdev, scmrq);
0306         sq->scmrq = scmrq;
0307     }
0308     scm_request_set(scmrq, req);
0309 
0310     if (scm_request_prepare(scmrq)) {
0311         SCM_LOG(5, "aidaw alloc failed");
0312         scm_request_set(scmrq, NULL);
0313 
0314         if (scmrq->aob->request.msb_count)
0315             scm_request_start(scmrq);
0316 
0317         sq->scmrq = NULL;
0318         spin_unlock(&sq->lock);
0319         return BLK_STS_RESOURCE;
0320     }
0321     blk_mq_start_request(req);
0322 
0323     if (qd->last || scmrq->aob->request.msb_count == nr_requests_per_io) {
0324         scm_request_start(scmrq);
0325         sq->scmrq = NULL;
0326     }
0327     spin_unlock(&sq->lock);
0328     return BLK_STS_OK;
0329 }
0330 
0331 static int scm_blk_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
0332                  unsigned int idx)
0333 {
0334     struct scm_queue *qd = kzalloc(sizeof(*qd), GFP_KERNEL);
0335 
0336     if (!qd)
0337         return -ENOMEM;
0338 
0339     spin_lock_init(&qd->lock);
0340     hctx->driver_data = qd;
0341 
0342     return 0;
0343 }
0344 
0345 static void scm_blk_exit_hctx(struct blk_mq_hw_ctx *hctx, unsigned int idx)
0346 {
0347     struct scm_queue *qd = hctx->driver_data;
0348 
0349     WARN_ON(qd->scmrq);
0350     kfree(hctx->driver_data);
0351     hctx->driver_data = NULL;
0352 }
0353 
0354 static void __scmrq_log_error(struct scm_request *scmrq)
0355 {
0356     struct aob *aob = scmrq->aob;
0357 
0358     if (scmrq->error == BLK_STS_TIMEOUT)
0359         SCM_LOG(1, "Request timeout");
0360     else {
0361         SCM_LOG(1, "Request error");
0362         SCM_LOG_HEX(1, &aob->response, sizeof(aob->response));
0363     }
0364     if (scmrq->retries)
0365         SCM_LOG(1, "Retry request");
0366     else
0367         pr_err("An I/O operation to SCM failed with rc=%d\n",
0368                scmrq->error);
0369 }
0370 
0371 static void scm_blk_handle_error(struct scm_request *scmrq)
0372 {
0373     struct scm_blk_dev *bdev = scmrq->bdev;
0374     unsigned long flags;
0375 
0376     if (scmrq->error != BLK_STS_IOERR)
0377         goto restart;
0378 
0379     /* For -EIO the response block is valid. */
0380     switch (scmrq->aob->response.eqc) {
0381     case EQC_WR_PROHIBIT:
0382         spin_lock_irqsave(&bdev->lock, flags);
0383         if (bdev->state != SCM_WR_PROHIBIT)
0384             pr_info("%lx: Write access to the SCM increment is suspended\n",
0385                 (unsigned long) bdev->scmdev->address);
0386         bdev->state = SCM_WR_PROHIBIT;
0387         spin_unlock_irqrestore(&bdev->lock, flags);
0388         goto requeue;
0389     default:
0390         break;
0391     }
0392 
0393 restart:
0394     if (!eadm_start_aob(scmrq->aob))
0395         return;
0396 
0397 requeue:
0398     scm_request_requeue(scmrq);
0399 }
0400 
0401 void scm_blk_irq(struct scm_device *scmdev, void *data, blk_status_t error)
0402 {
0403     struct scm_request *scmrq = data;
0404 
0405     scmrq->error = error;
0406     if (error) {
0407         __scmrq_log_error(scmrq);
0408         if (scmrq->retries-- > 0) {
0409             scm_blk_handle_error(scmrq);
0410             return;
0411         }
0412     }
0413 
0414     scm_request_finish(scmrq);
0415 }
0416 
0417 static void scm_blk_request_done(struct request *req)
0418 {
0419     blk_status_t *error = blk_mq_rq_to_pdu(req);
0420 
0421     blk_mq_end_request(req, *error);
0422 }
0423 
0424 static const struct block_device_operations scm_blk_devops = {
0425     .owner = THIS_MODULE,
0426 };
0427 
0428 static const struct blk_mq_ops scm_mq_ops = {
0429     .queue_rq = scm_blk_request,
0430     .complete = scm_blk_request_done,
0431     .init_hctx = scm_blk_init_hctx,
0432     .exit_hctx = scm_blk_exit_hctx,
0433 };
0434 
0435 int scm_blk_dev_setup(struct scm_blk_dev *bdev, struct scm_device *scmdev)
0436 {
0437     unsigned int devindex, nr_max_blk;
0438     struct request_queue *rq;
0439     int len, ret;
0440 
0441     devindex = atomic_inc_return(&nr_devices) - 1;
0442     /* scma..scmz + scmaa..scmzz */
0443     if (devindex > 701) {
0444         ret = -ENODEV;
0445         goto out;
0446     }
0447 
0448     bdev->scmdev = scmdev;
0449     bdev->state = SCM_OPER;
0450     spin_lock_init(&bdev->lock);
0451     atomic_set(&bdev->queued_reqs, 0);
0452 
0453     bdev->tag_set.ops = &scm_mq_ops;
0454     bdev->tag_set.cmd_size = sizeof(blk_status_t);
0455     bdev->tag_set.nr_hw_queues = nr_requests;
0456     bdev->tag_set.queue_depth = nr_requests_per_io * nr_requests;
0457     bdev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
0458     bdev->tag_set.numa_node = NUMA_NO_NODE;
0459 
0460     ret = blk_mq_alloc_tag_set(&bdev->tag_set);
0461     if (ret)
0462         goto out;
0463 
0464     bdev->gendisk = blk_mq_alloc_disk(&bdev->tag_set, scmdev);
0465     if (IS_ERR(bdev->gendisk)) {
0466         ret = PTR_ERR(bdev->gendisk);
0467         goto out_tag;
0468     }
0469     rq = bdev->rq = bdev->gendisk->queue;
0470     nr_max_blk = min(scmdev->nr_max_block,
0471              (unsigned int) (PAGE_SIZE / sizeof(struct aidaw)));
0472 
0473     blk_queue_logical_block_size(rq, 1 << 12);
0474     blk_queue_max_hw_sectors(rq, nr_max_blk << 3); /* 8 * 512 = blk_size */
0475     blk_queue_max_segments(rq, nr_max_blk);
0476     blk_queue_flag_set(QUEUE_FLAG_NONROT, rq);
0477     blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, rq);
0478 
0479     bdev->gendisk->private_data = scmdev;
0480     bdev->gendisk->fops = &scm_blk_devops;
0481     bdev->gendisk->major = scm_major;
0482     bdev->gendisk->first_minor = devindex * SCM_NR_PARTS;
0483     bdev->gendisk->minors = SCM_NR_PARTS;
0484 
0485     len = snprintf(bdev->gendisk->disk_name, DISK_NAME_LEN, "scm");
0486     if (devindex > 25) {
0487         len += snprintf(bdev->gendisk->disk_name + len,
0488                 DISK_NAME_LEN - len, "%c",
0489                 'a' + (devindex / 26) - 1);
0490         devindex = devindex % 26;
0491     }
0492     snprintf(bdev->gendisk->disk_name + len, DISK_NAME_LEN - len, "%c",
0493          'a' + devindex);
0494 
0495     /* 512 byte sectors */
0496     set_capacity(bdev->gendisk, scmdev->size >> 9);
0497     ret = device_add_disk(&scmdev->dev, bdev->gendisk, NULL);
0498     if (ret)
0499         goto out_cleanup_disk;
0500 
0501     return 0;
0502 
0503 out_cleanup_disk:
0504     put_disk(bdev->gendisk);
0505 out_tag:
0506     blk_mq_free_tag_set(&bdev->tag_set);
0507 out:
0508     atomic_dec(&nr_devices);
0509     return ret;
0510 }
0511 
0512 void scm_blk_dev_cleanup(struct scm_blk_dev *bdev)
0513 {
0514     del_gendisk(bdev->gendisk);
0515     put_disk(bdev->gendisk);
0516     blk_mq_free_tag_set(&bdev->tag_set);
0517 }
0518 
0519 void scm_blk_set_available(struct scm_blk_dev *bdev)
0520 {
0521     unsigned long flags;
0522 
0523     spin_lock_irqsave(&bdev->lock, flags);
0524     if (bdev->state == SCM_WR_PROHIBIT)
0525         pr_info("%lx: Write access to the SCM increment is restored\n",
0526             (unsigned long) bdev->scmdev->address);
0527     bdev->state = SCM_OPER;
0528     spin_unlock_irqrestore(&bdev->lock, flags);
0529 }
0530 
0531 static bool __init scm_blk_params_valid(void)
0532 {
0533     if (!nr_requests_per_io || nr_requests_per_io > 64)
0534         return false;
0535 
0536     return true;
0537 }
0538 
0539 static int __init scm_blk_init(void)
0540 {
0541     int ret = -EINVAL;
0542 
0543     if (!scm_blk_params_valid())
0544         goto out;
0545 
0546     ret = register_blkdev(0, "scm");
0547     if (ret < 0)
0548         goto out;
0549 
0550     scm_major = ret;
0551     ret = scm_alloc_rqs(nr_requests);
0552     if (ret)
0553         goto out_free;
0554 
0555     scm_debug = debug_register("scm_log", 16, 1, 16);
0556     if (!scm_debug) {
0557         ret = -ENOMEM;
0558         goto out_free;
0559     }
0560 
0561     debug_register_view(scm_debug, &debug_hex_ascii_view);
0562     debug_set_level(scm_debug, 2);
0563 
0564     ret = scm_drv_init();
0565     if (ret)
0566         goto out_dbf;
0567 
0568     return ret;
0569 
0570 out_dbf:
0571     debug_unregister(scm_debug);
0572 out_free:
0573     scm_free_rqs();
0574     unregister_blkdev(scm_major, "scm");
0575 out:
0576     return ret;
0577 }
0578 module_init(scm_blk_init);
0579 
0580 static void __exit scm_blk_cleanup(void)
0581 {
0582     scm_drv_cleanup();
0583     debug_unregister(scm_debug);
0584     scm_free_rqs();
0585     unregister_blkdev(scm_major, "scm");
0586 }
0587 module_exit(scm_blk_cleanup);