Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
0004  * Based on.......: linux/drivers/s390/block/mdisk.c
0005  * ...............: by Hartmunt Penner <hpenner@de.ibm.com>
0006  * Bugreports.to..: <Linux390@de.ibm.com>
0007  * Copyright IBM Corp. 1999, 2000
0008  *
0009  */
0010 
0011 #define KMSG_COMPONENT "dasd"
0012 
0013 #include <linux/kernel_stat.h>
0014 #include <linux/stddef.h>
0015 #include <linux/kernel.h>
0016 #include <linux/slab.h>
0017 #include <linux/hdreg.h>
0018 #include <linux/bio.h>
0019 #include <linux/module.h>
0020 #include <linux/init.h>
0021 #include <linux/jiffies.h>
0022 #include <asm/asm-extable.h>
0023 #include <asm/dasd.h>
0024 #include <asm/debug.h>
0025 #include <asm/diag.h>
0026 #include <asm/ebcdic.h>
0027 #include <asm/io.h>
0028 #include <asm/irq.h>
0029 #include <asm/vtoc.h>
0030 
0031 #include "dasd_int.h"
0032 #include "dasd_diag.h"
0033 
0034 #define PRINTK_HEADER "dasd(diag):"
0035 
0036 MODULE_LICENSE("GPL");
0037 
0038 /* The maximum number of blocks per request (max_blocks) is dependent on the
0039  * amount of storage that is available in the static I/O buffer for each
0040  * device. Currently each device gets 2 pages. We want to fit two requests
0041  * into the available memory so that we can immediately start the next if one
0042  * finishes. */
0043 #define DIAG_MAX_BLOCKS (((2 * PAGE_SIZE - sizeof(struct dasd_ccw_req) - \
0044                sizeof(struct dasd_diag_req)) / \
0045                    sizeof(struct dasd_diag_bio)) / 2)
0046 #define DIAG_MAX_RETRIES    32
0047 #define DIAG_TIMEOUT        50
0048 
0049 static struct dasd_discipline dasd_diag_discipline;
0050 
0051 struct dasd_diag_private {
0052     struct dasd_diag_characteristics rdc_data;
0053     struct dasd_diag_rw_io iob;
0054     struct dasd_diag_init_io iib;
0055     blocknum_t pt_block;
0056     struct ccw_dev_id dev_id;
0057 };
0058 
0059 struct dasd_diag_req {
0060     unsigned int block_count;
0061     struct dasd_diag_bio bio[];
0062 };
0063 
0064 static const u8 DASD_DIAG_CMS1[] = { 0xc3, 0xd4, 0xe2, 0xf1 };/* EBCDIC CMS1 */
0065 
0066 /* Perform DIAG250 call with block I/O parameter list iob (input and output)
0067  * and function code cmd.
0068  * In case of an exception return 3. Otherwise return result of bitwise OR of
0069  * resulting condition code and DIAG return code. */
0070 static inline int __dia250(void *iob, int cmd)
0071 {
0072     union register_pair rx = { .even = (unsigned long)iob, };
0073     typedef union {
0074         struct dasd_diag_init_io init_io;
0075         struct dasd_diag_rw_io rw_io;
0076     } addr_type;
0077     int cc;
0078 
0079     cc = 3;
0080     asm volatile(
0081         "   diag    %[rx],%[cmd],0x250\n"
0082         "0: ipm %[cc]\n"
0083         "   srl %[cc],28\n"
0084         "1:\n"
0085         EX_TABLE(0b,1b)
0086         : [cc] "+&d" (cc), [rx] "+&d" (rx.pair), "+m" (*(addr_type *)iob)
0087         : [cmd] "d" (cmd)
0088         : "cc");
0089     return cc | rx.odd;
0090 }
0091 
0092 static inline int dia250(void *iob, int cmd)
0093 {
0094     diag_stat_inc(DIAG_STAT_X250);
0095     return __dia250(iob, cmd);
0096 }
0097 
0098 /* Initialize block I/O to DIAG device using the specified blocksize and
0099  * block offset. On success, return zero and set end_block to contain the
0100  * number of blocks on the device minus the specified offset. Return non-zero
0101  * otherwise. */
0102 static inline int
0103 mdsk_init_io(struct dasd_device *device, unsigned int blocksize,
0104          blocknum_t offset, blocknum_t *end_block)
0105 {
0106     struct dasd_diag_private *private = device->private;
0107     struct dasd_diag_init_io *iib = &private->iib;
0108     int rc;
0109 
0110     memset(iib, 0, sizeof (struct dasd_diag_init_io));
0111 
0112     iib->dev_nr = private->dev_id.devno;
0113     iib->block_size = blocksize;
0114     iib->offset = offset;
0115     iib->flaga = DASD_DIAG_FLAGA_DEFAULT;
0116 
0117     rc = dia250(iib, INIT_BIO);
0118 
0119     if ((rc & 3) == 0 && end_block)
0120         *end_block = iib->end_block;
0121 
0122     return rc;
0123 }
0124 
0125 /* Remove block I/O environment for device. Return zero on success, non-zero
0126  * otherwise. */
0127 static inline int
0128 mdsk_term_io(struct dasd_device * device)
0129 {
0130     struct dasd_diag_private *private = device->private;
0131     struct dasd_diag_init_io *iib = &private->iib;
0132     int rc;
0133 
0134     memset(iib, 0, sizeof (struct dasd_diag_init_io));
0135     iib->dev_nr = private->dev_id.devno;
0136     rc = dia250(iib, TERM_BIO);
0137     return rc;
0138 }
0139 
0140 /* Error recovery for failed DIAG requests - try to reestablish the DIAG
0141  * environment. */
0142 static void
0143 dasd_diag_erp(struct dasd_device *device)
0144 {
0145     int rc;
0146 
0147     mdsk_term_io(device);
0148     rc = mdsk_init_io(device, device->block->bp_block, 0, NULL);
0149     if (rc == 4) {
0150         if (!(test_and_set_bit(DASD_FLAG_DEVICE_RO, &device->flags)))
0151             pr_warn("%s: The access mode of a DIAG device changed to read-only\n",
0152                 dev_name(&device->cdev->dev));
0153         rc = 0;
0154     }
0155     if (rc)
0156         pr_warn("%s: DIAG ERP failed with rc=%d\n",
0157             dev_name(&device->cdev->dev), rc);
0158 }
0159 
0160 /* Start a given request at the device. Return zero on success, non-zero
0161  * otherwise. */
0162 static int
0163 dasd_start_diag(struct dasd_ccw_req * cqr)
0164 {
0165     struct dasd_device *device;
0166     struct dasd_diag_private *private;
0167     struct dasd_diag_req *dreq;
0168     int rc;
0169 
0170     device = cqr->startdev;
0171     if (cqr->retries < 0) {
0172         DBF_DEV_EVENT(DBF_ERR, device, "DIAG start_IO: request %p "
0173                 "- no retry left)", cqr);
0174         cqr->status = DASD_CQR_ERROR;
0175         return -EIO;
0176     }
0177     private = device->private;
0178     dreq = cqr->data;
0179 
0180     private->iob.dev_nr = private->dev_id.devno;
0181     private->iob.key = 0;
0182     private->iob.flags = DASD_DIAG_RWFLAG_ASYNC;
0183     private->iob.block_count = dreq->block_count;
0184     private->iob.interrupt_params = (addr_t) cqr;
0185     private->iob.bio_list = dreq->bio;
0186     private->iob.flaga = DASD_DIAG_FLAGA_DEFAULT;
0187 
0188     cqr->startclk = get_tod_clock();
0189     cqr->starttime = jiffies;
0190     cqr->retries--;
0191 
0192     rc = dia250(&private->iob, RW_BIO);
0193     switch (rc) {
0194     case 0: /* Synchronous I/O finished successfully */
0195         cqr->stopclk = get_tod_clock();
0196         cqr->status = DASD_CQR_SUCCESS;
0197         /* Indicate to calling function that only a dasd_schedule_bh()
0198            and no timer is needed */
0199                 rc = -EACCES;
0200         break;
0201     case 8: /* Asynchronous I/O was started */
0202         cqr->status = DASD_CQR_IN_IO;
0203         rc = 0;
0204         break;
0205     default: /* Error condition */
0206         cqr->status = DASD_CQR_QUEUED;
0207         DBF_DEV_EVENT(DBF_WARNING, device, "dia250 returned rc=%d", rc);
0208         dasd_diag_erp(device);
0209         rc = -EIO;
0210         break;
0211     }
0212     cqr->intrc = rc;
0213     return rc;
0214 }
0215 
0216 /* Terminate given request at the device. */
0217 static int
0218 dasd_diag_term_IO(struct dasd_ccw_req * cqr)
0219 {
0220     struct dasd_device *device;
0221 
0222     device = cqr->startdev;
0223     mdsk_term_io(device);
0224     mdsk_init_io(device, device->block->bp_block, 0, NULL);
0225     cqr->status = DASD_CQR_CLEAR_PENDING;
0226     cqr->stopclk = get_tod_clock();
0227     dasd_schedule_device_bh(device);
0228     return 0;
0229 }
0230 
0231 /* Handle external interruption. */
0232 static void dasd_ext_handler(struct ext_code ext_code,
0233                  unsigned int param32, unsigned long param64)
0234 {
0235     struct dasd_ccw_req *cqr, *next;
0236     struct dasd_device *device;
0237     unsigned long expires;
0238     unsigned long flags;
0239     addr_t ip;
0240     int rc;
0241 
0242     switch (ext_code.subcode >> 8) {
0243     case DASD_DIAG_CODE_31BIT:
0244         ip = (addr_t) param32;
0245         break;
0246     case DASD_DIAG_CODE_64BIT:
0247         ip = (addr_t) param64;
0248         break;
0249     default:
0250         return;
0251     }
0252     inc_irq_stat(IRQEXT_DSD);
0253     if (!ip) {      /* no intparm: unsolicited interrupt */
0254         DBF_EVENT(DBF_NOTICE, "%s", "caught unsolicited "
0255                   "interrupt");
0256         return;
0257     }
0258     cqr = (struct dasd_ccw_req *) ip;
0259     device = (struct dasd_device *) cqr->startdev;
0260     if (strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) {
0261         DBF_DEV_EVENT(DBF_WARNING, device,
0262                 " magic number of dasd_ccw_req 0x%08X doesn't"
0263                 " match discipline 0x%08X",
0264                 cqr->magic, *(int *) (&device->discipline->name));
0265         return;
0266     }
0267 
0268     /* get irq lock to modify request queue */
0269     spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
0270 
0271     /* Check for a pending clear operation */
0272     if (cqr->status == DASD_CQR_CLEAR_PENDING) {
0273         cqr->status = DASD_CQR_CLEARED;
0274         dasd_device_clear_timer(device);
0275         dasd_schedule_device_bh(device);
0276         spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
0277         return;
0278     }
0279 
0280     cqr->stopclk = get_tod_clock();
0281 
0282     expires = 0;
0283     if ((ext_code.subcode & 0xff) == 0) {
0284         cqr->status = DASD_CQR_SUCCESS;
0285         /* Start first request on queue if possible -> fast_io. */
0286         if (!list_empty(&device->ccw_queue)) {
0287             next = list_entry(device->ccw_queue.next,
0288                       struct dasd_ccw_req, devlist);
0289             if (next->status == DASD_CQR_QUEUED) {
0290                 rc = dasd_start_diag(next);
0291                 if (rc == 0)
0292                     expires = next->expires;
0293             }
0294         }
0295     } else {
0296         cqr->status = DASD_CQR_QUEUED;
0297         DBF_DEV_EVENT(DBF_DEBUG, device, "interrupt status for "
0298                   "request %p was %d (%d retries left)", cqr,
0299                   ext_code.subcode & 0xff, cqr->retries);
0300         dasd_diag_erp(device);
0301     }
0302 
0303     if (expires != 0)
0304         dasd_device_set_timer(device, expires);
0305     else
0306         dasd_device_clear_timer(device);
0307     dasd_schedule_device_bh(device);
0308 
0309     spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
0310 }
0311 
0312 /* Check whether device can be controlled by DIAG discipline. Return zero on
0313  * success, non-zero otherwise. */
0314 static int
0315 dasd_diag_check_device(struct dasd_device *device)
0316 {
0317     struct dasd_diag_private *private = device->private;
0318     struct dasd_diag_characteristics *rdc_data;
0319     struct vtoc_cms_label *label;
0320     struct dasd_block *block;
0321     struct dasd_diag_bio *bio;
0322     unsigned int sb, bsize;
0323     blocknum_t end_block;
0324     int rc;
0325 
0326     if (private == NULL) {
0327         private = kzalloc(sizeof(*private), GFP_KERNEL);
0328         if (private == NULL) {
0329             DBF_DEV_EVENT(DBF_WARNING, device, "%s",
0330                 "Allocating memory for private DASD data "
0331                       "failed\n");
0332             return -ENOMEM;
0333         }
0334         ccw_device_get_id(device->cdev, &private->dev_id);
0335         device->private = private;
0336     }
0337     block = dasd_alloc_block();
0338     if (IS_ERR(block)) {
0339         DBF_DEV_EVENT(DBF_WARNING, device, "%s",
0340                 "could not allocate dasd block structure");
0341         device->private = NULL;
0342         kfree(private);
0343         return PTR_ERR(block);
0344     }
0345     device->block = block;
0346     block->base = device;
0347 
0348     /* Read Device Characteristics */
0349     rdc_data = &private->rdc_data;
0350     rdc_data->dev_nr = private->dev_id.devno;
0351     rdc_data->rdc_len = sizeof (struct dasd_diag_characteristics);
0352 
0353     rc = diag210((struct diag210 *) rdc_data);
0354     if (rc) {
0355         DBF_DEV_EVENT(DBF_WARNING, device, "failed to retrieve device "
0356                 "information (rc=%d)", rc);
0357         rc = -EOPNOTSUPP;
0358         goto out;
0359     }
0360 
0361     device->default_expires = DIAG_TIMEOUT;
0362     device->default_retries = DIAG_MAX_RETRIES;
0363 
0364     /* Figure out position of label block */
0365     switch (private->rdc_data.vdev_class) {
0366     case DEV_CLASS_FBA:
0367         private->pt_block = 1;
0368         break;
0369     case DEV_CLASS_ECKD:
0370         private->pt_block = 2;
0371         break;
0372     default:
0373         pr_warn("%s: Device type %d is not supported in DIAG mode\n",
0374             dev_name(&device->cdev->dev),
0375             private->rdc_data.vdev_class);
0376         rc = -EOPNOTSUPP;
0377         goto out;
0378     }
0379 
0380     DBF_DEV_EVENT(DBF_INFO, device,
0381               "%04X: %04X on real %04X/%02X",
0382               rdc_data->dev_nr,
0383               rdc_data->vdev_type,
0384               rdc_data->rdev_type, rdc_data->rdev_model);
0385 
0386     /* terminate all outstanding operations */
0387     mdsk_term_io(device);
0388 
0389     /* figure out blocksize of device */
0390     label = (struct vtoc_cms_label *) get_zeroed_page(GFP_KERNEL);
0391     if (label == NULL)  {
0392         DBF_DEV_EVENT(DBF_WARNING, device, "%s",
0393                 "No memory to allocate initialization request");
0394         rc = -ENOMEM;
0395         goto out;
0396     }
0397     bio = kzalloc(sizeof(*bio), GFP_KERNEL);
0398     if (bio == NULL)  {
0399         DBF_DEV_EVENT(DBF_WARNING, device, "%s",
0400                   "No memory to allocate initialization bio");
0401         rc = -ENOMEM;
0402         goto out_label;
0403     }
0404     rc = 0;
0405     end_block = 0;
0406     /* try all sizes - needed for ECKD devices */
0407     for (bsize = 512; bsize <= PAGE_SIZE; bsize <<= 1) {
0408         mdsk_init_io(device, bsize, 0, &end_block);
0409         memset(bio, 0, sizeof(*bio));
0410         bio->type = MDSK_READ_REQ;
0411         bio->block_number = private->pt_block + 1;
0412         bio->buffer = label;
0413         memset(&private->iob, 0, sizeof (struct dasd_diag_rw_io));
0414         private->iob.dev_nr = rdc_data->dev_nr;
0415         private->iob.key = 0;
0416         private->iob.flags = 0; /* do synchronous io */
0417         private->iob.block_count = 1;
0418         private->iob.interrupt_params = 0;
0419         private->iob.bio_list = bio;
0420         private->iob.flaga = DASD_DIAG_FLAGA_DEFAULT;
0421         rc = dia250(&private->iob, RW_BIO);
0422         if (rc == 3) {
0423             pr_warn("%s: A 64-bit DIAG call failed\n",
0424                 dev_name(&device->cdev->dev));
0425             rc = -EOPNOTSUPP;
0426             goto out_bio;
0427         }
0428         mdsk_term_io(device);
0429         if (rc == 0)
0430             break;
0431     }
0432     if (bsize > PAGE_SIZE) {
0433         pr_warn("%s: Accessing the DASD failed because of an incorrect format (rc=%d)\n",
0434             dev_name(&device->cdev->dev), rc);
0435         rc = -EIO;
0436         goto out_bio;
0437     }
0438     /* check for label block */
0439     if (memcmp(label->label_id, DASD_DIAG_CMS1,
0440           sizeof(DASD_DIAG_CMS1)) == 0) {
0441         /* get formatted blocksize from label block */
0442         bsize = (unsigned int) label->block_size;
0443         block->blocks = (unsigned long) label->block_count;
0444     } else
0445         block->blocks = end_block;
0446     block->bp_block = bsize;
0447     block->s2b_shift = 0;   /* bits to shift 512 to get a block */
0448     for (sb = 512; sb < bsize; sb = sb << 1)
0449         block->s2b_shift++;
0450     rc = mdsk_init_io(device, block->bp_block, 0, NULL);
0451     if (rc && (rc != 4)) {
0452         pr_warn("%s: DIAG initialization failed with rc=%d\n",
0453             dev_name(&device->cdev->dev), rc);
0454         rc = -EIO;
0455     } else {
0456         if (rc == 4)
0457             set_bit(DASD_FLAG_DEVICE_RO, &device->flags);
0458         pr_info("%s: New DASD with %ld byte/block, total size %ld "
0459             "KB%s\n", dev_name(&device->cdev->dev),
0460             (unsigned long) block->bp_block,
0461             (unsigned long) (block->blocks <<
0462                      block->s2b_shift) >> 1,
0463             (rc == 4) ? ", read-only device" : "");
0464         rc = 0;
0465     }
0466 out_bio:
0467     kfree(bio);
0468 out_label:
0469     free_page((long) label);
0470 out:
0471     if (rc) {
0472         device->block = NULL;
0473         dasd_free_block(block);
0474         device->private = NULL;
0475         kfree(private);
0476     }
0477     return rc;
0478 }
0479 
0480 /* Fill in virtual disk geometry for device. Return zero on success, non-zero
0481  * otherwise. */
0482 static int
0483 dasd_diag_fill_geometry(struct dasd_block *block, struct hd_geometry *geo)
0484 {
0485     if (dasd_check_blocksize(block->bp_block) != 0)
0486         return -EINVAL;
0487     geo->cylinders = (block->blocks << block->s2b_shift) >> 10;
0488     geo->heads = 16;
0489     geo->sectors = 128 >> block->s2b_shift;
0490     return 0;
0491 }
0492 
0493 static dasd_erp_fn_t
0494 dasd_diag_erp_action(struct dasd_ccw_req * cqr)
0495 {
0496     return dasd_default_erp_action;
0497 }
0498 
0499 static dasd_erp_fn_t
0500 dasd_diag_erp_postaction(struct dasd_ccw_req * cqr)
0501 {
0502     return dasd_default_erp_postaction;
0503 }
0504 
0505 /* Create DASD request from block device request. Return pointer to new
0506  * request on success, ERR_PTR otherwise. */
0507 static struct dasd_ccw_req *dasd_diag_build_cp(struct dasd_device *memdev,
0508                            struct dasd_block *block,
0509                            struct request *req)
0510 {
0511     struct dasd_ccw_req *cqr;
0512     struct dasd_diag_req *dreq;
0513     struct dasd_diag_bio *dbio;
0514     struct req_iterator iter;
0515     struct bio_vec bv;
0516     char *dst;
0517     unsigned int count;
0518     sector_t recid, first_rec, last_rec;
0519     unsigned int blksize, off;
0520     unsigned char rw_cmd;
0521 
0522     if (rq_data_dir(req) == READ)
0523         rw_cmd = MDSK_READ_REQ;
0524     else if (rq_data_dir(req) == WRITE)
0525         rw_cmd = MDSK_WRITE_REQ;
0526     else
0527         return ERR_PTR(-EINVAL);
0528     blksize = block->bp_block;
0529     /* Calculate record id of first and last block. */
0530     first_rec = blk_rq_pos(req) >> block->s2b_shift;
0531     last_rec =
0532         (blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift;
0533     /* Check struct bio and count the number of blocks for the request. */
0534     count = 0;
0535     rq_for_each_segment(bv, req, iter) {
0536         if (bv.bv_len & (blksize - 1))
0537             /* Fba can only do full blocks. */
0538             return ERR_PTR(-EINVAL);
0539         count += bv.bv_len >> (block->s2b_shift + 9);
0540     }
0541     /* Paranoia. */
0542     if (count != last_rec - first_rec + 1)
0543         return ERR_PTR(-EINVAL);
0544     /* Build the request */
0545     cqr = dasd_smalloc_request(DASD_DIAG_MAGIC, 0, struct_size(dreq, bio, count),
0546                    memdev, blk_mq_rq_to_pdu(req));
0547     if (IS_ERR(cqr))
0548         return cqr;
0549 
0550     dreq = (struct dasd_diag_req *) cqr->data;
0551     dreq->block_count = count;
0552     dbio = dreq->bio;
0553     recid = first_rec;
0554     rq_for_each_segment(bv, req, iter) {
0555         dst = bvec_virt(&bv);
0556         for (off = 0; off < bv.bv_len; off += blksize) {
0557             memset(dbio, 0, sizeof (struct dasd_diag_bio));
0558             dbio->type = rw_cmd;
0559             dbio->block_number = recid + 1;
0560             dbio->buffer = dst;
0561             dbio++;
0562             dst += blksize;
0563             recid++;
0564         }
0565     }
0566     cqr->retries = memdev->default_retries;
0567     cqr->buildclk = get_tod_clock();
0568     if (blk_noretry_request(req) ||
0569         block->base->features & DASD_FEATURE_FAILFAST)
0570         set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
0571     cqr->startdev = memdev;
0572     cqr->memdev = memdev;
0573     cqr->block = block;
0574     cqr->expires = memdev->default_expires * HZ;
0575     cqr->status = DASD_CQR_FILLED;
0576     return cqr;
0577 }
0578 
0579 /* Release DASD request. Return non-zero if request was successful, zero
0580  * otherwise. */
0581 static int
0582 dasd_diag_free_cp(struct dasd_ccw_req *cqr, struct request *req)
0583 {
0584     int status;
0585 
0586     status = cqr->status == DASD_CQR_DONE;
0587     dasd_sfree_request(cqr, cqr->memdev);
0588     return status;
0589 }
0590 
0591 static void dasd_diag_handle_terminated_request(struct dasd_ccw_req *cqr)
0592 {
0593     if (cqr->retries < 0)
0594         cqr->status = DASD_CQR_FAILED;
0595     else
0596         cqr->status = DASD_CQR_FILLED;
0597 };
0598 
0599 /* Fill in IOCTL data for device. */
0600 static int
0601 dasd_diag_fill_info(struct dasd_device * device,
0602             struct dasd_information2_t * info)
0603 {
0604     struct dasd_diag_private *private = device->private;
0605 
0606     info->label_block = (unsigned int) private->pt_block;
0607     info->FBA_layout = 1;
0608     info->format = DASD_FORMAT_LDL;
0609     info->characteristics_size = sizeof(private->rdc_data);
0610     memcpy(info->characteristics, &private->rdc_data,
0611            sizeof(private->rdc_data));
0612     info->confdata_size = 0;
0613     return 0;
0614 }
0615 
0616 static void
0617 dasd_diag_dump_sense(struct dasd_device *device, struct dasd_ccw_req * req,
0618              struct irb *stat)
0619 {
0620     DBF_DEV_EVENT(DBF_WARNING, device, "%s",
0621             "dump sense not available for DIAG data");
0622 }
0623 
0624 /*
0625  * Initialize block layer request queue.
0626  */
0627 static void dasd_diag_setup_blk_queue(struct dasd_block *block)
0628 {
0629     unsigned int logical_block_size = block->bp_block;
0630     struct request_queue *q = block->request_queue;
0631     int max;
0632 
0633     max = DIAG_MAX_BLOCKS << block->s2b_shift;
0634     blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
0635     q->limits.max_dev_sectors = max;
0636     blk_queue_logical_block_size(q, logical_block_size);
0637     blk_queue_max_hw_sectors(q, max);
0638     blk_queue_max_segments(q, USHRT_MAX);
0639     /* With page sized segments each segment can be translated into one idaw/tidaw */
0640     blk_queue_max_segment_size(q, PAGE_SIZE);
0641     blk_queue_segment_boundary(q, PAGE_SIZE - 1);
0642     blk_queue_dma_alignment(q, PAGE_SIZE - 1);
0643 }
0644 
0645 static int dasd_diag_pe_handler(struct dasd_device *device,
0646                 __u8 tbvpm, __u8 fcsecpm)
0647 {
0648     return dasd_generic_verify_path(device, tbvpm);
0649 }
0650 
0651 static struct dasd_discipline dasd_diag_discipline = {
0652     .owner = THIS_MODULE,
0653     .name = "DIAG",
0654     .ebcname = "DIAG",
0655     .check_device = dasd_diag_check_device,
0656     .pe_handler = dasd_diag_pe_handler,
0657     .fill_geometry = dasd_diag_fill_geometry,
0658     .setup_blk_queue = dasd_diag_setup_blk_queue,
0659     .start_IO = dasd_start_diag,
0660     .term_IO = dasd_diag_term_IO,
0661     .handle_terminated_request = dasd_diag_handle_terminated_request,
0662     .erp_action = dasd_diag_erp_action,
0663     .erp_postaction = dasd_diag_erp_postaction,
0664     .build_cp = dasd_diag_build_cp,
0665     .free_cp = dasd_diag_free_cp,
0666     .dump_sense = dasd_diag_dump_sense,
0667     .fill_info = dasd_diag_fill_info,
0668 };
0669 
0670 static int __init
0671 dasd_diag_init(void)
0672 {
0673     if (!MACHINE_IS_VM) {
0674         pr_info("Discipline %s cannot be used without z/VM\n",
0675             dasd_diag_discipline.name);
0676         return -ENODEV;
0677     }
0678     ASCEBC(dasd_diag_discipline.ebcname, 4);
0679 
0680     irq_subclass_register(IRQ_SUBCLASS_SERVICE_SIGNAL);
0681     register_external_irq(EXT_IRQ_CP_SERVICE, dasd_ext_handler);
0682     dasd_diag_discipline_pointer = &dasd_diag_discipline;
0683     return 0;
0684 }
0685 
0686 static void __exit
0687 dasd_diag_cleanup(void)
0688 {
0689     unregister_external_irq(EXT_IRQ_CP_SERVICE, dasd_ext_handler);
0690     irq_subclass_unregister(IRQ_SUBCLASS_SERVICE_SIGNAL);
0691     dasd_diag_discipline_pointer = NULL;
0692 }
0693 
0694 module_init(dasd_diag_init);
0695 module_exit(dasd_diag_cleanup);