Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
0004  *          Horst Hummel <Horst.Hummel@de.ibm.com>
0005  *          Carsten Otte <Cotte@de.ibm.com>
0006  *          Martin Schwidefsky <schwidefsky@de.ibm.com>
0007  * Bugreports.to..: <Linux390@de.ibm.com>
0008  * Copyright IBM Corp. 1999, 2009
0009  */
0010 
0011 #define KMSG_COMPONENT "dasd"
0012 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
0013 
0014 #include <linux/kmod.h>
0015 #include <linux/init.h>
0016 #include <linux/interrupt.h>
0017 #include <linux/ctype.h>
0018 #include <linux/major.h>
0019 #include <linux/slab.h>
0020 #include <linux/hdreg.h>
0021 #include <linux/async.h>
0022 #include <linux/mutex.h>
0023 #include <linux/debugfs.h>
0024 #include <linux/seq_file.h>
0025 #include <linux/vmalloc.h>
0026 
0027 #include <asm/ccwdev.h>
0028 #include <asm/ebcdic.h>
0029 #include <asm/idals.h>
0030 #include <asm/itcw.h>
0031 #include <asm/diag.h>
0032 
0033 /* This is ugly... */
0034 #define PRINTK_HEADER "dasd:"
0035 
0036 #include "dasd_int.h"
0037 /*
0038  * SECTION: Constant definitions to be used within this file
0039  */
0040 #define DASD_CHANQ_MAX_SIZE 4
0041 
0042 #define DASD_DIAG_MOD       "dasd_diag_mod"
0043 
0044 static unsigned int queue_depth = 32;
0045 static unsigned int nr_hw_queues = 4;
0046 
0047 module_param(queue_depth, uint, 0444);
0048 MODULE_PARM_DESC(queue_depth, "Default queue depth for new DASD devices");
0049 
0050 module_param(nr_hw_queues, uint, 0444);
0051 MODULE_PARM_DESC(nr_hw_queues, "Default number of hardware queues for new DASD devices");
0052 
0053 /*
0054  * SECTION: exported variables of dasd.c
0055  */
0056 debug_info_t *dasd_debug_area;
0057 EXPORT_SYMBOL(dasd_debug_area);
0058 static struct dentry *dasd_debugfs_root_entry;
0059 struct dasd_discipline *dasd_diag_discipline_pointer;
0060 EXPORT_SYMBOL(dasd_diag_discipline_pointer);
0061 void dasd_int_handler(struct ccw_device *, unsigned long, struct irb *);
0062 
0063 MODULE_AUTHOR("Holger Smolinski <Holger.Smolinski@de.ibm.com>");
0064 MODULE_DESCRIPTION("Linux on S/390 DASD device driver,"
0065            " Copyright IBM Corp. 2000");
0066 MODULE_LICENSE("GPL");
0067 
0068 /*
0069  * SECTION: prototypes for static functions of dasd.c
0070  */
0071 static int  dasd_alloc_queue(struct dasd_block *);
0072 static void dasd_free_queue(struct dasd_block *);
0073 static int dasd_flush_block_queue(struct dasd_block *);
0074 static void dasd_device_tasklet(unsigned long);
0075 static void dasd_block_tasklet(unsigned long);
0076 static void do_kick_device(struct work_struct *);
0077 static void do_reload_device(struct work_struct *);
0078 static void do_requeue_requests(struct work_struct *);
0079 static void dasd_return_cqr_cb(struct dasd_ccw_req *, void *);
0080 static void dasd_device_timeout(struct timer_list *);
0081 static void dasd_block_timeout(struct timer_list *);
0082 static void __dasd_process_erp(struct dasd_device *, struct dasd_ccw_req *);
0083 static void dasd_profile_init(struct dasd_profile *, struct dentry *);
0084 static void dasd_profile_exit(struct dasd_profile *);
0085 static void dasd_hosts_init(struct dentry *, struct dasd_device *);
0086 static void dasd_hosts_exit(struct dasd_device *);
0087 
0088 /*
0089  * SECTION: Operations on the device structure.
0090  */
0091 static wait_queue_head_t dasd_init_waitq;
0092 static wait_queue_head_t dasd_flush_wq;
0093 static wait_queue_head_t generic_waitq;
0094 static wait_queue_head_t shutdown_waitq;
0095 
0096 /*
0097  * Allocate memory for a new device structure.
0098  */
0099 struct dasd_device *dasd_alloc_device(void)
0100 {
0101     struct dasd_device *device;
0102 
0103     device = kzalloc(sizeof(struct dasd_device), GFP_ATOMIC);
0104     if (!device)
0105         return ERR_PTR(-ENOMEM);
0106 
0107     /* Get two pages for normal block device operations. */
0108     device->ccw_mem = (void *) __get_free_pages(GFP_ATOMIC | GFP_DMA, 1);
0109     if (!device->ccw_mem) {
0110         kfree(device);
0111         return ERR_PTR(-ENOMEM);
0112     }
0113     /* Get one page for error recovery. */
0114     device->erp_mem = (void *) get_zeroed_page(GFP_ATOMIC | GFP_DMA);
0115     if (!device->erp_mem) {
0116         free_pages((unsigned long) device->ccw_mem, 1);
0117         kfree(device);
0118         return ERR_PTR(-ENOMEM);
0119     }
0120     /* Get two pages for ese format. */
0121     device->ese_mem = (void *)__get_free_pages(GFP_ATOMIC | GFP_DMA, 1);
0122     if (!device->ese_mem) {
0123         free_page((unsigned long) device->erp_mem);
0124         free_pages((unsigned long) device->ccw_mem, 1);
0125         kfree(device);
0126         return ERR_PTR(-ENOMEM);
0127     }
0128 
0129     dasd_init_chunklist(&device->ccw_chunks, device->ccw_mem, PAGE_SIZE*2);
0130     dasd_init_chunklist(&device->erp_chunks, device->erp_mem, PAGE_SIZE);
0131     dasd_init_chunklist(&device->ese_chunks, device->ese_mem, PAGE_SIZE * 2);
0132     spin_lock_init(&device->mem_lock);
0133     atomic_set(&device->tasklet_scheduled, 0);
0134     tasklet_init(&device->tasklet, dasd_device_tasklet,
0135              (unsigned long) device);
0136     INIT_LIST_HEAD(&device->ccw_queue);
0137     timer_setup(&device->timer, dasd_device_timeout, 0);
0138     INIT_WORK(&device->kick_work, do_kick_device);
0139     INIT_WORK(&device->reload_device, do_reload_device);
0140     INIT_WORK(&device->requeue_requests, do_requeue_requests);
0141     device->state = DASD_STATE_NEW;
0142     device->target = DASD_STATE_NEW;
0143     mutex_init(&device->state_mutex);
0144     spin_lock_init(&device->profile.lock);
0145     return device;
0146 }
0147 
0148 /*
0149  * Free memory of a device structure.
0150  */
0151 void dasd_free_device(struct dasd_device *device)
0152 {
0153     kfree(device->private);
0154     free_pages((unsigned long) device->ese_mem, 1);
0155     free_page((unsigned long) device->erp_mem);
0156     free_pages((unsigned long) device->ccw_mem, 1);
0157     kfree(device);
0158 }
0159 
0160 /*
0161  * Allocate memory for a new device structure.
0162  */
0163 struct dasd_block *dasd_alloc_block(void)
0164 {
0165     struct dasd_block *block;
0166 
0167     block = kzalloc(sizeof(*block), GFP_ATOMIC);
0168     if (!block)
0169         return ERR_PTR(-ENOMEM);
0170     /* open_count = 0 means device online but not in use */
0171     atomic_set(&block->open_count, -1);
0172 
0173     atomic_set(&block->tasklet_scheduled, 0);
0174     tasklet_init(&block->tasklet, dasd_block_tasklet,
0175              (unsigned long) block);
0176     INIT_LIST_HEAD(&block->ccw_queue);
0177     spin_lock_init(&block->queue_lock);
0178     INIT_LIST_HEAD(&block->format_list);
0179     spin_lock_init(&block->format_lock);
0180     timer_setup(&block->timer, dasd_block_timeout, 0);
0181     spin_lock_init(&block->profile.lock);
0182 
0183     return block;
0184 }
0185 EXPORT_SYMBOL_GPL(dasd_alloc_block);
0186 
0187 /*
0188  * Free memory of a device structure.
0189  */
0190 void dasd_free_block(struct dasd_block *block)
0191 {
0192     kfree(block);
0193 }
0194 EXPORT_SYMBOL_GPL(dasd_free_block);
0195 
0196 /*
0197  * Make a new device known to the system.
0198  */
0199 static int dasd_state_new_to_known(struct dasd_device *device)
0200 {
0201     int rc;
0202 
0203     /*
0204      * As long as the device is not in state DASD_STATE_NEW we want to
0205      * keep the reference count > 0.
0206      */
0207     dasd_get_device(device);
0208 
0209     if (device->block) {
0210         rc = dasd_alloc_queue(device->block);
0211         if (rc) {
0212             dasd_put_device(device);
0213             return rc;
0214         }
0215     }
0216     device->state = DASD_STATE_KNOWN;
0217     return 0;
0218 }
0219 
0220 /*
0221  * Let the system forget about a device.
0222  */
0223 static int dasd_state_known_to_new(struct dasd_device *device)
0224 {
0225     /* Disable extended error reporting for this device. */
0226     dasd_eer_disable(device);
0227     device->state = DASD_STATE_NEW;
0228 
0229     if (device->block)
0230         dasd_free_queue(device->block);
0231 
0232     /* Give up reference we took in dasd_state_new_to_known. */
0233     dasd_put_device(device);
0234     return 0;
0235 }
0236 
0237 static struct dentry *dasd_debugfs_setup(const char *name,
0238                      struct dentry *base_dentry)
0239 {
0240     struct dentry *pde;
0241 
0242     if (!base_dentry)
0243         return NULL;
0244     pde = debugfs_create_dir(name, base_dentry);
0245     if (!pde || IS_ERR(pde))
0246         return NULL;
0247     return pde;
0248 }
0249 
0250 /*
0251  * Request the irq line for the device.
0252  */
0253 static int dasd_state_known_to_basic(struct dasd_device *device)
0254 {
0255     struct dasd_block *block = device->block;
0256     int rc = 0;
0257 
0258     /* Allocate and register gendisk structure. */
0259     if (block) {
0260         rc = dasd_gendisk_alloc(block);
0261         if (rc)
0262             return rc;
0263         block->debugfs_dentry =
0264             dasd_debugfs_setup(block->gdp->disk_name,
0265                        dasd_debugfs_root_entry);
0266         dasd_profile_init(&block->profile, block->debugfs_dentry);
0267         if (dasd_global_profile_level == DASD_PROFILE_ON)
0268             dasd_profile_on(&device->block->profile);
0269     }
0270     device->debugfs_dentry =
0271         dasd_debugfs_setup(dev_name(&device->cdev->dev),
0272                    dasd_debugfs_root_entry);
0273     dasd_profile_init(&device->profile, device->debugfs_dentry);
0274     dasd_hosts_init(device->debugfs_dentry, device);
0275 
0276     /* register 'device' debug area, used for all DBF_DEV_XXX calls */
0277     device->debug_area = debug_register(dev_name(&device->cdev->dev), 4, 1,
0278                         8 * sizeof(long));
0279     debug_register_view(device->debug_area, &debug_sprintf_view);
0280     debug_set_level(device->debug_area, DBF_WARNING);
0281     DBF_DEV_EVENT(DBF_EMERG, device, "%s", "debug area created");
0282 
0283     device->state = DASD_STATE_BASIC;
0284 
0285     return rc;
0286 }
0287 
0288 /*
0289  * Release the irq line for the device. Terminate any running i/o.
0290  */
0291 static int dasd_state_basic_to_known(struct dasd_device *device)
0292 {
0293     int rc;
0294 
0295     if (device->discipline->basic_to_known) {
0296         rc = device->discipline->basic_to_known(device);
0297         if (rc)
0298             return rc;
0299     }
0300 
0301     if (device->block) {
0302         dasd_profile_exit(&device->block->profile);
0303         debugfs_remove(device->block->debugfs_dentry);
0304         dasd_gendisk_free(device->block);
0305         dasd_block_clear_timer(device->block);
0306     }
0307     rc = dasd_flush_device_queue(device);
0308     if (rc)
0309         return rc;
0310     dasd_device_clear_timer(device);
0311     dasd_profile_exit(&device->profile);
0312     dasd_hosts_exit(device);
0313     debugfs_remove(device->debugfs_dentry);
0314     DBF_DEV_EVENT(DBF_EMERG, device, "%p debug area deleted", device);
0315     if (device->debug_area != NULL) {
0316         debug_unregister(device->debug_area);
0317         device->debug_area = NULL;
0318     }
0319     device->state = DASD_STATE_KNOWN;
0320     return 0;
0321 }
0322 
0323 /*
0324  * Do the initial analysis. The do_analysis function may return
0325  * -EAGAIN in which case the device keeps the state DASD_STATE_BASIC
0326  * until the discipline decides to continue the startup sequence
0327  * by calling the function dasd_change_state. The eckd disciplines
0328  * uses this to start a ccw that detects the format. The completion
0329  * interrupt for this detection ccw uses the kernel event daemon to
0330  * trigger the call to dasd_change_state. All this is done in the
0331  * discipline code, see dasd_eckd.c.
0332  * After the analysis ccw is done (do_analysis returned 0) the block
0333  * device is setup.
0334  * In case the analysis returns an error, the device setup is stopped
0335  * (a fake disk was already added to allow formatting).
0336  */
0337 static int dasd_state_basic_to_ready(struct dasd_device *device)
0338 {
0339     int rc;
0340     struct dasd_block *block;
0341     struct gendisk *disk;
0342 
0343     rc = 0;
0344     block = device->block;
0345     /* make disk known with correct capacity */
0346     if (block) {
0347         if (block->base->discipline->do_analysis != NULL)
0348             rc = block->base->discipline->do_analysis(block);
0349         if (rc) {
0350             if (rc != -EAGAIN) {
0351                 device->state = DASD_STATE_UNFMT;
0352                 disk = device->block->gdp;
0353                 kobject_uevent(&disk_to_dev(disk)->kobj,
0354                            KOBJ_CHANGE);
0355                 goto out;
0356             }
0357             return rc;
0358         }
0359         if (device->discipline->setup_blk_queue)
0360             device->discipline->setup_blk_queue(block);
0361         set_capacity(block->gdp,
0362                  block->blocks << block->s2b_shift);
0363         device->state = DASD_STATE_READY;
0364         rc = dasd_scan_partitions(block);
0365         if (rc) {
0366             device->state = DASD_STATE_BASIC;
0367             return rc;
0368         }
0369     } else {
0370         device->state = DASD_STATE_READY;
0371     }
0372 out:
0373     if (device->discipline->basic_to_ready)
0374         rc = device->discipline->basic_to_ready(device);
0375     return rc;
0376 }
0377 
0378 static inline
0379 int _wait_for_empty_queues(struct dasd_device *device)
0380 {
0381     if (device->block)
0382         return list_empty(&device->ccw_queue) &&
0383             list_empty(&device->block->ccw_queue);
0384     else
0385         return list_empty(&device->ccw_queue);
0386 }
0387 
0388 /*
0389  * Remove device from block device layer. Destroy dirty buffers.
0390  * Forget format information. Check if the target level is basic
0391  * and if it is create fake disk for formatting.
0392  */
0393 static int dasd_state_ready_to_basic(struct dasd_device *device)
0394 {
0395     int rc;
0396 
0397     device->state = DASD_STATE_BASIC;
0398     if (device->block) {
0399         struct dasd_block *block = device->block;
0400         rc = dasd_flush_block_queue(block);
0401         if (rc) {
0402             device->state = DASD_STATE_READY;
0403             return rc;
0404         }
0405         dasd_destroy_partitions(block);
0406         block->blocks = 0;
0407         block->bp_block = 0;
0408         block->s2b_shift = 0;
0409     }
0410     return 0;
0411 }
0412 
0413 /*
0414  * Back to basic.
0415  */
0416 static int dasd_state_unfmt_to_basic(struct dasd_device *device)
0417 {
0418     device->state = DASD_STATE_BASIC;
0419     return 0;
0420 }
0421 
0422 /*
0423  * Make the device online and schedule the bottom half to start
0424  * the requeueing of requests from the linux request queue to the
0425  * ccw queue.
0426  */
0427 static int
0428 dasd_state_ready_to_online(struct dasd_device * device)
0429 {
0430     device->state = DASD_STATE_ONLINE;
0431     if (device->block) {
0432         dasd_schedule_block_bh(device->block);
0433         if ((device->features & DASD_FEATURE_USERAW)) {
0434             kobject_uevent(&disk_to_dev(device->block->gdp)->kobj,
0435                     KOBJ_CHANGE);
0436             return 0;
0437         }
0438         disk_uevent(device->block->bdev->bd_disk, KOBJ_CHANGE);
0439     }
0440     return 0;
0441 }
0442 
0443 /*
0444  * Stop the requeueing of requests again.
0445  */
0446 static int dasd_state_online_to_ready(struct dasd_device *device)
0447 {
0448     int rc;
0449 
0450     if (device->discipline->online_to_ready) {
0451         rc = device->discipline->online_to_ready(device);
0452         if (rc)
0453             return rc;
0454     }
0455 
0456     device->state = DASD_STATE_READY;
0457     if (device->block && !(device->features & DASD_FEATURE_USERAW))
0458         disk_uevent(device->block->bdev->bd_disk, KOBJ_CHANGE);
0459     return 0;
0460 }
0461 
0462 /*
0463  * Device startup state changes.
0464  */
0465 static int dasd_increase_state(struct dasd_device *device)
0466 {
0467     int rc;
0468 
0469     rc = 0;
0470     if (device->state == DASD_STATE_NEW &&
0471         device->target >= DASD_STATE_KNOWN)
0472         rc = dasd_state_new_to_known(device);
0473 
0474     if (!rc &&
0475         device->state == DASD_STATE_KNOWN &&
0476         device->target >= DASD_STATE_BASIC)
0477         rc = dasd_state_known_to_basic(device);
0478 
0479     if (!rc &&
0480         device->state == DASD_STATE_BASIC &&
0481         device->target >= DASD_STATE_READY)
0482         rc = dasd_state_basic_to_ready(device);
0483 
0484     if (!rc &&
0485         device->state == DASD_STATE_UNFMT &&
0486         device->target > DASD_STATE_UNFMT)
0487         rc = -EPERM;
0488 
0489     if (!rc &&
0490         device->state == DASD_STATE_READY &&
0491         device->target >= DASD_STATE_ONLINE)
0492         rc = dasd_state_ready_to_online(device);
0493 
0494     return rc;
0495 }
0496 
0497 /*
0498  * Device shutdown state changes.
0499  */
0500 static int dasd_decrease_state(struct dasd_device *device)
0501 {
0502     int rc;
0503 
0504     rc = 0;
0505     if (device->state == DASD_STATE_ONLINE &&
0506         device->target <= DASD_STATE_READY)
0507         rc = dasd_state_online_to_ready(device);
0508 
0509     if (!rc &&
0510         device->state == DASD_STATE_READY &&
0511         device->target <= DASD_STATE_BASIC)
0512         rc = dasd_state_ready_to_basic(device);
0513 
0514     if (!rc &&
0515         device->state == DASD_STATE_UNFMT &&
0516         device->target <= DASD_STATE_BASIC)
0517         rc = dasd_state_unfmt_to_basic(device);
0518 
0519     if (!rc &&
0520         device->state == DASD_STATE_BASIC &&
0521         device->target <= DASD_STATE_KNOWN)
0522         rc = dasd_state_basic_to_known(device);
0523 
0524     if (!rc &&
0525         device->state == DASD_STATE_KNOWN &&
0526         device->target <= DASD_STATE_NEW)
0527         rc = dasd_state_known_to_new(device);
0528 
0529     return rc;
0530 }
0531 
0532 /*
0533  * This is the main startup/shutdown routine.
0534  */
0535 static void dasd_change_state(struct dasd_device *device)
0536 {
0537     int rc;
0538 
0539     if (device->state == device->target)
0540         /* Already where we want to go today... */
0541         return;
0542     if (device->state < device->target)
0543         rc = dasd_increase_state(device);
0544     else
0545         rc = dasd_decrease_state(device);
0546     if (rc == -EAGAIN)
0547         return;
0548     if (rc)
0549         device->target = device->state;
0550 
0551     /* let user-space know that the device status changed */
0552     kobject_uevent(&device->cdev->dev.kobj, KOBJ_CHANGE);
0553 
0554     if (device->state == device->target)
0555         wake_up(&dasd_init_waitq);
0556 }
0557 
0558 /*
0559  * Kick starter for devices that did not complete the startup/shutdown
0560  * procedure or were sleeping because of a pending state.
0561  * dasd_kick_device will schedule a call do do_kick_device to the kernel
0562  * event daemon.
0563  */
0564 static void do_kick_device(struct work_struct *work)
0565 {
0566     struct dasd_device *device = container_of(work, struct dasd_device, kick_work);
0567     mutex_lock(&device->state_mutex);
0568     dasd_change_state(device);
0569     mutex_unlock(&device->state_mutex);
0570     dasd_schedule_device_bh(device);
0571     dasd_put_device(device);
0572 }
0573 
0574 void dasd_kick_device(struct dasd_device *device)
0575 {
0576     dasd_get_device(device);
0577     /* queue call to dasd_kick_device to the kernel event daemon. */
0578     if (!schedule_work(&device->kick_work))
0579         dasd_put_device(device);
0580 }
0581 EXPORT_SYMBOL(dasd_kick_device);
0582 
0583 /*
0584  * dasd_reload_device will schedule a call do do_reload_device to the kernel
0585  * event daemon.
0586  */
0587 static void do_reload_device(struct work_struct *work)
0588 {
0589     struct dasd_device *device = container_of(work, struct dasd_device,
0590                           reload_device);
0591     device->discipline->reload(device);
0592     dasd_put_device(device);
0593 }
0594 
0595 void dasd_reload_device(struct dasd_device *device)
0596 {
0597     dasd_get_device(device);
0598     /* queue call to dasd_reload_device to the kernel event daemon. */
0599     if (!schedule_work(&device->reload_device))
0600         dasd_put_device(device);
0601 }
0602 EXPORT_SYMBOL(dasd_reload_device);
0603 
0604 /*
0605  * Set the target state for a device and starts the state change.
0606  */
0607 void dasd_set_target_state(struct dasd_device *device, int target)
0608 {
0609     dasd_get_device(device);
0610     mutex_lock(&device->state_mutex);
0611     /* If we are in probeonly mode stop at DASD_STATE_READY. */
0612     if (dasd_probeonly && target > DASD_STATE_READY)
0613         target = DASD_STATE_READY;
0614     if (device->target != target) {
0615         if (device->state == target)
0616             wake_up(&dasd_init_waitq);
0617         device->target = target;
0618     }
0619     if (device->state != device->target)
0620         dasd_change_state(device);
0621     mutex_unlock(&device->state_mutex);
0622     dasd_put_device(device);
0623 }
0624 
0625 /*
0626  * Enable devices with device numbers in [from..to].
0627  */
0628 static inline int _wait_for_device(struct dasd_device *device)
0629 {
0630     return (device->state == device->target);
0631 }
0632 
0633 void dasd_enable_device(struct dasd_device *device)
0634 {
0635     dasd_set_target_state(device, DASD_STATE_ONLINE);
0636     if (device->state <= DASD_STATE_KNOWN)
0637         /* No discipline for device found. */
0638         dasd_set_target_state(device, DASD_STATE_NEW);
0639     /* Now wait for the devices to come up. */
0640     wait_event(dasd_init_waitq, _wait_for_device(device));
0641 
0642     dasd_reload_device(device);
0643     if (device->discipline->kick_validate)
0644         device->discipline->kick_validate(device);
0645 }
0646 EXPORT_SYMBOL(dasd_enable_device);
0647 
0648 /*
0649  * SECTION: device operation (interrupt handler, start i/o, term i/o ...)
0650  */
0651 
0652 unsigned int dasd_global_profile_level = DASD_PROFILE_OFF;
0653 
0654 #ifdef CONFIG_DASD_PROFILE
0655 struct dasd_profile dasd_global_profile = {
0656     .lock = __SPIN_LOCK_UNLOCKED(dasd_global_profile.lock),
0657 };
0658 static struct dentry *dasd_debugfs_global_entry;
0659 
0660 /*
0661  * Add profiling information for cqr before execution.
0662  */
0663 static void dasd_profile_start(struct dasd_block *block,
0664                    struct dasd_ccw_req *cqr,
0665                    struct request *req)
0666 {
0667     struct list_head *l;
0668     unsigned int counter;
0669     struct dasd_device *device;
0670 
0671     /* count the length of the chanq for statistics */
0672     counter = 0;
0673     if (dasd_global_profile_level || block->profile.data)
0674         list_for_each(l, &block->ccw_queue)
0675             if (++counter >= 31)
0676                 break;
0677 
0678     spin_lock(&dasd_global_profile.lock);
0679     if (dasd_global_profile.data) {
0680         dasd_global_profile.data->dasd_io_nr_req[counter]++;
0681         if (rq_data_dir(req) == READ)
0682             dasd_global_profile.data->dasd_read_nr_req[counter]++;
0683     }
0684     spin_unlock(&dasd_global_profile.lock);
0685 
0686     spin_lock(&block->profile.lock);
0687     if (block->profile.data) {
0688         block->profile.data->dasd_io_nr_req[counter]++;
0689         if (rq_data_dir(req) == READ)
0690             block->profile.data->dasd_read_nr_req[counter]++;
0691     }
0692     spin_unlock(&block->profile.lock);
0693 
0694     /*
0695      * We count the request for the start device, even though it may run on
0696      * some other device due to error recovery. This way we make sure that
0697      * we count each request only once.
0698      */
0699     device = cqr->startdev;
0700     if (device->profile.data) {
0701         counter = 1; /* request is not yet queued on the start device */
0702         list_for_each(l, &device->ccw_queue)
0703             if (++counter >= 31)
0704                 break;
0705     }
0706     spin_lock(&device->profile.lock);
0707     if (device->profile.data) {
0708         device->profile.data->dasd_io_nr_req[counter]++;
0709         if (rq_data_dir(req) == READ)
0710             device->profile.data->dasd_read_nr_req[counter]++;
0711     }
0712     spin_unlock(&device->profile.lock);
0713 }
0714 
0715 /*
0716  * Add profiling information for cqr after execution.
0717  */
0718 
0719 #define dasd_profile_counter(value, index)             \
0720 {                                  \
0721     for (index = 0; index < 31 && value >> (2+index); index++) \
0722         ;                          \
0723 }
0724 
0725 static void dasd_profile_end_add_data(struct dasd_profile_info *data,
0726                       int is_alias,
0727                       int is_tpm,
0728                       int is_read,
0729                       long sectors,
0730                       int sectors_ind,
0731                       int tottime_ind,
0732                       int tottimeps_ind,
0733                       int strtime_ind,
0734                       int irqtime_ind,
0735                       int irqtimeps_ind,
0736                       int endtime_ind)
0737 {
0738     /* in case of an overflow, reset the whole profile */
0739     if (data->dasd_io_reqs == UINT_MAX) {
0740             memset(data, 0, sizeof(*data));
0741             ktime_get_real_ts64(&data->starttod);
0742     }
0743     data->dasd_io_reqs++;
0744     data->dasd_io_sects += sectors;
0745     if (is_alias)
0746         data->dasd_io_alias++;
0747     if (is_tpm)
0748         data->dasd_io_tpm++;
0749 
0750     data->dasd_io_secs[sectors_ind]++;
0751     data->dasd_io_times[tottime_ind]++;
0752     data->dasd_io_timps[tottimeps_ind]++;
0753     data->dasd_io_time1[strtime_ind]++;
0754     data->dasd_io_time2[irqtime_ind]++;
0755     data->dasd_io_time2ps[irqtimeps_ind]++;
0756     data->dasd_io_time3[endtime_ind]++;
0757 
0758     if (is_read) {
0759         data->dasd_read_reqs++;
0760         data->dasd_read_sects += sectors;
0761         if (is_alias)
0762             data->dasd_read_alias++;
0763         if (is_tpm)
0764             data->dasd_read_tpm++;
0765         data->dasd_read_secs[sectors_ind]++;
0766         data->dasd_read_times[tottime_ind]++;
0767         data->dasd_read_time1[strtime_ind]++;
0768         data->dasd_read_time2[irqtime_ind]++;
0769         data->dasd_read_time3[endtime_ind]++;
0770     }
0771 }
0772 
0773 static void dasd_profile_end(struct dasd_block *block,
0774                  struct dasd_ccw_req *cqr,
0775                  struct request *req)
0776 {
0777     unsigned long strtime, irqtime, endtime, tottime;
0778     unsigned long tottimeps, sectors;
0779     struct dasd_device *device;
0780     int sectors_ind, tottime_ind, tottimeps_ind, strtime_ind;
0781     int irqtime_ind, irqtimeps_ind, endtime_ind;
0782     struct dasd_profile_info *data;
0783 
0784     device = cqr->startdev;
0785     if (!(dasd_global_profile_level ||
0786           block->profile.data ||
0787           device->profile.data))
0788         return;
0789 
0790     sectors = blk_rq_sectors(req);
0791     if (!cqr->buildclk || !cqr->startclk ||
0792         !cqr->stopclk || !cqr->endclk ||
0793         !sectors)
0794         return;
0795 
0796     strtime = ((cqr->startclk - cqr->buildclk) >> 12);
0797     irqtime = ((cqr->stopclk - cqr->startclk) >> 12);
0798     endtime = ((cqr->endclk - cqr->stopclk) >> 12);
0799     tottime = ((cqr->endclk - cqr->buildclk) >> 12);
0800     tottimeps = tottime / sectors;
0801 
0802     dasd_profile_counter(sectors, sectors_ind);
0803     dasd_profile_counter(tottime, tottime_ind);
0804     dasd_profile_counter(tottimeps, tottimeps_ind);
0805     dasd_profile_counter(strtime, strtime_ind);
0806     dasd_profile_counter(irqtime, irqtime_ind);
0807     dasd_profile_counter(irqtime / sectors, irqtimeps_ind);
0808     dasd_profile_counter(endtime, endtime_ind);
0809 
0810     spin_lock(&dasd_global_profile.lock);
0811     if (dasd_global_profile.data) {
0812         data = dasd_global_profile.data;
0813         data->dasd_sum_times += tottime;
0814         data->dasd_sum_time_str += strtime;
0815         data->dasd_sum_time_irq += irqtime;
0816         data->dasd_sum_time_end += endtime;
0817         dasd_profile_end_add_data(dasd_global_profile.data,
0818                       cqr->startdev != block->base,
0819                       cqr->cpmode == 1,
0820                       rq_data_dir(req) == READ,
0821                       sectors, sectors_ind, tottime_ind,
0822                       tottimeps_ind, strtime_ind,
0823                       irqtime_ind, irqtimeps_ind,
0824                       endtime_ind);
0825     }
0826     spin_unlock(&dasd_global_profile.lock);
0827 
0828     spin_lock(&block->profile.lock);
0829     if (block->profile.data) {
0830         data = block->profile.data;
0831         data->dasd_sum_times += tottime;
0832         data->dasd_sum_time_str += strtime;
0833         data->dasd_sum_time_irq += irqtime;
0834         data->dasd_sum_time_end += endtime;
0835         dasd_profile_end_add_data(block->profile.data,
0836                       cqr->startdev != block->base,
0837                       cqr->cpmode == 1,
0838                       rq_data_dir(req) == READ,
0839                       sectors, sectors_ind, tottime_ind,
0840                       tottimeps_ind, strtime_ind,
0841                       irqtime_ind, irqtimeps_ind,
0842                       endtime_ind);
0843     }
0844     spin_unlock(&block->profile.lock);
0845 
0846     spin_lock(&device->profile.lock);
0847     if (device->profile.data) {
0848         data = device->profile.data;
0849         data->dasd_sum_times += tottime;
0850         data->dasd_sum_time_str += strtime;
0851         data->dasd_sum_time_irq += irqtime;
0852         data->dasd_sum_time_end += endtime;
0853         dasd_profile_end_add_data(device->profile.data,
0854                       cqr->startdev != block->base,
0855                       cqr->cpmode == 1,
0856                       rq_data_dir(req) == READ,
0857                       sectors, sectors_ind, tottime_ind,
0858                       tottimeps_ind, strtime_ind,
0859                       irqtime_ind, irqtimeps_ind,
0860                       endtime_ind);
0861     }
0862     spin_unlock(&device->profile.lock);
0863 }
0864 
0865 void dasd_profile_reset(struct dasd_profile *profile)
0866 {
0867     struct dasd_profile_info *data;
0868 
0869     spin_lock_bh(&profile->lock);
0870     data = profile->data;
0871     if (!data) {
0872         spin_unlock_bh(&profile->lock);
0873         return;
0874     }
0875     memset(data, 0, sizeof(*data));
0876     ktime_get_real_ts64(&data->starttod);
0877     spin_unlock_bh(&profile->lock);
0878 }
0879 
0880 int dasd_profile_on(struct dasd_profile *profile)
0881 {
0882     struct dasd_profile_info *data;
0883 
0884     data = kzalloc(sizeof(*data), GFP_KERNEL);
0885     if (!data)
0886         return -ENOMEM;
0887     spin_lock_bh(&profile->lock);
0888     if (profile->data) {
0889         spin_unlock_bh(&profile->lock);
0890         kfree(data);
0891         return 0;
0892     }
0893     ktime_get_real_ts64(&data->starttod);
0894     profile->data = data;
0895     spin_unlock_bh(&profile->lock);
0896     return 0;
0897 }
0898 
0899 void dasd_profile_off(struct dasd_profile *profile)
0900 {
0901     spin_lock_bh(&profile->lock);
0902     kfree(profile->data);
0903     profile->data = NULL;
0904     spin_unlock_bh(&profile->lock);
0905 }
0906 
0907 char *dasd_get_user_string(const char __user *user_buf, size_t user_len)
0908 {
0909     char *buffer;
0910 
0911     buffer = vmalloc(user_len + 1);
0912     if (buffer == NULL)
0913         return ERR_PTR(-ENOMEM);
0914     if (copy_from_user(buffer, user_buf, user_len) != 0) {
0915         vfree(buffer);
0916         return ERR_PTR(-EFAULT);
0917     }
0918     /* got the string, now strip linefeed. */
0919     if (buffer[user_len - 1] == '\n')
0920         buffer[user_len - 1] = 0;
0921     else
0922         buffer[user_len] = 0;
0923     return buffer;
0924 }
0925 
0926 static ssize_t dasd_stats_write(struct file *file,
0927                 const char __user *user_buf,
0928                 size_t user_len, loff_t *pos)
0929 {
0930     char *buffer, *str;
0931     int rc;
0932     struct seq_file *m = (struct seq_file *)file->private_data;
0933     struct dasd_profile *prof = m->private;
0934 
0935     if (user_len > 65536)
0936         user_len = 65536;
0937     buffer = dasd_get_user_string(user_buf, user_len);
0938     if (IS_ERR(buffer))
0939         return PTR_ERR(buffer);
0940 
0941     str = skip_spaces(buffer);
0942     rc = user_len;
0943     if (strncmp(str, "reset", 5) == 0) {
0944         dasd_profile_reset(prof);
0945     } else if (strncmp(str, "on", 2) == 0) {
0946         rc = dasd_profile_on(prof);
0947         if (rc)
0948             goto out;
0949         rc = user_len;
0950         if (prof == &dasd_global_profile) {
0951             dasd_profile_reset(prof);
0952             dasd_global_profile_level = DASD_PROFILE_GLOBAL_ONLY;
0953         }
0954     } else if (strncmp(str, "off", 3) == 0) {
0955         if (prof == &dasd_global_profile)
0956             dasd_global_profile_level = DASD_PROFILE_OFF;
0957         dasd_profile_off(prof);
0958     } else
0959         rc = -EINVAL;
0960 out:
0961     vfree(buffer);
0962     return rc;
0963 }
0964 
0965 static void dasd_stats_array(struct seq_file *m, unsigned int *array)
0966 {
0967     int i;
0968 
0969     for (i = 0; i < 32; i++)
0970         seq_printf(m, "%u ", array[i]);
0971     seq_putc(m, '\n');
0972 }
0973 
0974 static void dasd_stats_seq_print(struct seq_file *m,
0975                  struct dasd_profile_info *data)
0976 {
0977     seq_printf(m, "start_time %lld.%09ld\n",
0978            (s64)data->starttod.tv_sec, data->starttod.tv_nsec);
0979     seq_printf(m, "total_requests %u\n", data->dasd_io_reqs);
0980     seq_printf(m, "total_sectors %u\n", data->dasd_io_sects);
0981     seq_printf(m, "total_pav %u\n", data->dasd_io_alias);
0982     seq_printf(m, "total_hpf %u\n", data->dasd_io_tpm);
0983     seq_printf(m, "avg_total %lu\n", data->dasd_io_reqs ?
0984            data->dasd_sum_times / data->dasd_io_reqs : 0UL);
0985     seq_printf(m, "avg_build_to_ssch %lu\n", data->dasd_io_reqs ?
0986            data->dasd_sum_time_str / data->dasd_io_reqs : 0UL);
0987     seq_printf(m, "avg_ssch_to_irq %lu\n", data->dasd_io_reqs ?
0988            data->dasd_sum_time_irq / data->dasd_io_reqs : 0UL);
0989     seq_printf(m, "avg_irq_to_end %lu\n", data->dasd_io_reqs ?
0990            data->dasd_sum_time_end / data->dasd_io_reqs : 0UL);
0991     seq_puts(m, "histogram_sectors ");
0992     dasd_stats_array(m, data->dasd_io_secs);
0993     seq_puts(m, "histogram_io_times ");
0994     dasd_stats_array(m, data->dasd_io_times);
0995     seq_puts(m, "histogram_io_times_weighted ");
0996     dasd_stats_array(m, data->dasd_io_timps);
0997     seq_puts(m, "histogram_time_build_to_ssch ");
0998     dasd_stats_array(m, data->dasd_io_time1);
0999     seq_puts(m, "histogram_time_ssch_to_irq ");
1000     dasd_stats_array(m, data->dasd_io_time2);
1001     seq_puts(m, "histogram_time_ssch_to_irq_weighted ");
1002     dasd_stats_array(m, data->dasd_io_time2ps);
1003     seq_puts(m, "histogram_time_irq_to_end ");
1004     dasd_stats_array(m, data->dasd_io_time3);
1005     seq_puts(m, "histogram_ccw_queue_length ");
1006     dasd_stats_array(m, data->dasd_io_nr_req);
1007     seq_printf(m, "total_read_requests %u\n", data->dasd_read_reqs);
1008     seq_printf(m, "total_read_sectors %u\n", data->dasd_read_sects);
1009     seq_printf(m, "total_read_pav %u\n", data->dasd_read_alias);
1010     seq_printf(m, "total_read_hpf %u\n", data->dasd_read_tpm);
1011     seq_puts(m, "histogram_read_sectors ");
1012     dasd_stats_array(m, data->dasd_read_secs);
1013     seq_puts(m, "histogram_read_times ");
1014     dasd_stats_array(m, data->dasd_read_times);
1015     seq_puts(m, "histogram_read_time_build_to_ssch ");
1016     dasd_stats_array(m, data->dasd_read_time1);
1017     seq_puts(m, "histogram_read_time_ssch_to_irq ");
1018     dasd_stats_array(m, data->dasd_read_time2);
1019     seq_puts(m, "histogram_read_time_irq_to_end ");
1020     dasd_stats_array(m, data->dasd_read_time3);
1021     seq_puts(m, "histogram_read_ccw_queue_length ");
1022     dasd_stats_array(m, data->dasd_read_nr_req);
1023 }
1024 
1025 static int dasd_stats_show(struct seq_file *m, void *v)
1026 {
1027     struct dasd_profile *profile;
1028     struct dasd_profile_info *data;
1029 
1030     profile = m->private;
1031     spin_lock_bh(&profile->lock);
1032     data = profile->data;
1033     if (!data) {
1034         spin_unlock_bh(&profile->lock);
1035         seq_puts(m, "disabled\n");
1036         return 0;
1037     }
1038     dasd_stats_seq_print(m, data);
1039     spin_unlock_bh(&profile->lock);
1040     return 0;
1041 }
1042 
1043 static int dasd_stats_open(struct inode *inode, struct file *file)
1044 {
1045     struct dasd_profile *profile = inode->i_private;
1046     return single_open(file, dasd_stats_show, profile);
1047 }
1048 
1049 static const struct file_operations dasd_stats_raw_fops = {
1050     .owner      = THIS_MODULE,
1051     .open       = dasd_stats_open,
1052     .read       = seq_read,
1053     .llseek     = seq_lseek,
1054     .release    = single_release,
1055     .write      = dasd_stats_write,
1056 };
1057 
1058 static void dasd_profile_init(struct dasd_profile *profile,
1059                   struct dentry *base_dentry)
1060 {
1061     umode_t mode;
1062     struct dentry *pde;
1063 
1064     if (!base_dentry)
1065         return;
1066     profile->dentry = NULL;
1067     profile->data = NULL;
1068     mode = (S_IRUSR | S_IWUSR | S_IFREG);
1069     pde = debugfs_create_file("statistics", mode, base_dentry,
1070                   profile, &dasd_stats_raw_fops);
1071     if (pde && !IS_ERR(pde))
1072         profile->dentry = pde;
1073     return;
1074 }
1075 
1076 static void dasd_profile_exit(struct dasd_profile *profile)
1077 {
1078     dasd_profile_off(profile);
1079     debugfs_remove(profile->dentry);
1080     profile->dentry = NULL;
1081 }
1082 
1083 static void dasd_statistics_removeroot(void)
1084 {
1085     dasd_global_profile_level = DASD_PROFILE_OFF;
1086     dasd_profile_exit(&dasd_global_profile);
1087     debugfs_remove(dasd_debugfs_global_entry);
1088     debugfs_remove(dasd_debugfs_root_entry);
1089 }
1090 
1091 static void dasd_statistics_createroot(void)
1092 {
1093     struct dentry *pde;
1094 
1095     dasd_debugfs_root_entry = NULL;
1096     pde = debugfs_create_dir("dasd", NULL);
1097     if (!pde || IS_ERR(pde))
1098         goto error;
1099     dasd_debugfs_root_entry = pde;
1100     pde = debugfs_create_dir("global", dasd_debugfs_root_entry);
1101     if (!pde || IS_ERR(pde))
1102         goto error;
1103     dasd_debugfs_global_entry = pde;
1104     dasd_profile_init(&dasd_global_profile, dasd_debugfs_global_entry);
1105     return;
1106 
1107 error:
1108     DBF_EVENT(DBF_ERR, "%s",
1109           "Creation of the dasd debugfs interface failed");
1110     dasd_statistics_removeroot();
1111     return;
1112 }
1113 
1114 #else
1115 #define dasd_profile_start(block, cqr, req) do {} while (0)
1116 #define dasd_profile_end(block, cqr, req) do {} while (0)
1117 
1118 static void dasd_statistics_createroot(void)
1119 {
1120     return;
1121 }
1122 
1123 static void dasd_statistics_removeroot(void)
1124 {
1125     return;
1126 }
1127 
1128 int dasd_stats_generic_show(struct seq_file *m, void *v)
1129 {
1130     seq_puts(m, "Statistics are not activated in this kernel\n");
1131     return 0;
1132 }
1133 
1134 static void dasd_profile_init(struct dasd_profile *profile,
1135                   struct dentry *base_dentry)
1136 {
1137     return;
1138 }
1139 
1140 static void dasd_profile_exit(struct dasd_profile *profile)
1141 {
1142     return;
1143 }
1144 
1145 int dasd_profile_on(struct dasd_profile *profile)
1146 {
1147     return 0;
1148 }
1149 
1150 #endif              /* CONFIG_DASD_PROFILE */
1151 
1152 static int dasd_hosts_show(struct seq_file *m, void *v)
1153 {
1154     struct dasd_device *device;
1155     int rc = -EOPNOTSUPP;
1156 
1157     device = m->private;
1158     dasd_get_device(device);
1159 
1160     if (device->discipline->hosts_print)
1161         rc = device->discipline->hosts_print(device, m);
1162 
1163     dasd_put_device(device);
1164     return rc;
1165 }
1166 
1167 DEFINE_SHOW_ATTRIBUTE(dasd_hosts);
1168 
1169 static void dasd_hosts_exit(struct dasd_device *device)
1170 {
1171     debugfs_remove(device->hosts_dentry);
1172     device->hosts_dentry = NULL;
1173 }
1174 
1175 static void dasd_hosts_init(struct dentry *base_dentry,
1176                 struct dasd_device *device)
1177 {
1178     struct dentry *pde;
1179     umode_t mode;
1180 
1181     if (!base_dentry)
1182         return;
1183 
1184     mode = S_IRUSR | S_IFREG;
1185     pde = debugfs_create_file("host_access_list", mode, base_dentry,
1186                   device, &dasd_hosts_fops);
1187     if (pde && !IS_ERR(pde))
1188         device->hosts_dentry = pde;
1189 }
1190 
1191 struct dasd_ccw_req *dasd_smalloc_request(int magic, int cplength, int datasize,
1192                       struct dasd_device *device,
1193                       struct dasd_ccw_req *cqr)
1194 {
1195     unsigned long flags;
1196     char *data, *chunk;
1197     int size = 0;
1198 
1199     if (cplength > 0)
1200         size += cplength * sizeof(struct ccw1);
1201     if (datasize > 0)
1202         size += datasize;
1203     if (!cqr)
1204         size += (sizeof(*cqr) + 7L) & -8L;
1205 
1206     spin_lock_irqsave(&device->mem_lock, flags);
1207     data = chunk = dasd_alloc_chunk(&device->ccw_chunks, size);
1208     spin_unlock_irqrestore(&device->mem_lock, flags);
1209     if (!chunk)
1210         return ERR_PTR(-ENOMEM);
1211     if (!cqr) {
1212         cqr = (void *) data;
1213         data += (sizeof(*cqr) + 7L) & -8L;
1214     }
1215     memset(cqr, 0, sizeof(*cqr));
1216     cqr->mem_chunk = chunk;
1217     if (cplength > 0) {
1218         cqr->cpaddr = data;
1219         data += cplength * sizeof(struct ccw1);
1220         memset(cqr->cpaddr, 0, cplength * sizeof(struct ccw1));
1221     }
1222     if (datasize > 0) {
1223         cqr->data = data;
1224         memset(cqr->data, 0, datasize);
1225     }
1226     cqr->magic = magic;
1227     set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
1228     dasd_get_device(device);
1229     return cqr;
1230 }
1231 EXPORT_SYMBOL(dasd_smalloc_request);
1232 
1233 struct dasd_ccw_req *dasd_fmalloc_request(int magic, int cplength,
1234                       int datasize,
1235                       struct dasd_device *device)
1236 {
1237     struct dasd_ccw_req *cqr;
1238     unsigned long flags;
1239     int size, cqr_size;
1240     char *data;
1241 
1242     cqr_size = (sizeof(*cqr) + 7L) & -8L;
1243     size = cqr_size;
1244     if (cplength > 0)
1245         size += cplength * sizeof(struct ccw1);
1246     if (datasize > 0)
1247         size += datasize;
1248 
1249     spin_lock_irqsave(&device->mem_lock, flags);
1250     cqr = dasd_alloc_chunk(&device->ese_chunks, size);
1251     spin_unlock_irqrestore(&device->mem_lock, flags);
1252     if (!cqr)
1253         return ERR_PTR(-ENOMEM);
1254     memset(cqr, 0, sizeof(*cqr));
1255     data = (char *)cqr + cqr_size;
1256     cqr->cpaddr = NULL;
1257     if (cplength > 0) {
1258         cqr->cpaddr = data;
1259         data += cplength * sizeof(struct ccw1);
1260         memset(cqr->cpaddr, 0, cplength * sizeof(struct ccw1));
1261     }
1262     cqr->data = NULL;
1263     if (datasize > 0) {
1264         cqr->data = data;
1265         memset(cqr->data, 0, datasize);
1266     }
1267 
1268     cqr->magic = magic;
1269     set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
1270     dasd_get_device(device);
1271 
1272     return cqr;
1273 }
1274 EXPORT_SYMBOL(dasd_fmalloc_request);
1275 
1276 void dasd_sfree_request(struct dasd_ccw_req *cqr, struct dasd_device *device)
1277 {
1278     unsigned long flags;
1279 
1280     spin_lock_irqsave(&device->mem_lock, flags);
1281     dasd_free_chunk(&device->ccw_chunks, cqr->mem_chunk);
1282     spin_unlock_irqrestore(&device->mem_lock, flags);
1283     dasd_put_device(device);
1284 }
1285 EXPORT_SYMBOL(dasd_sfree_request);
1286 
1287 void dasd_ffree_request(struct dasd_ccw_req *cqr, struct dasd_device *device)
1288 {
1289     unsigned long flags;
1290 
1291     spin_lock_irqsave(&device->mem_lock, flags);
1292     dasd_free_chunk(&device->ese_chunks, cqr);
1293     spin_unlock_irqrestore(&device->mem_lock, flags);
1294     dasd_put_device(device);
1295 }
1296 EXPORT_SYMBOL(dasd_ffree_request);
1297 
1298 /*
1299  * Check discipline magic in cqr.
1300  */
1301 static inline int dasd_check_cqr(struct dasd_ccw_req *cqr)
1302 {
1303     struct dasd_device *device;
1304 
1305     if (cqr == NULL)
1306         return -EINVAL;
1307     device = cqr->startdev;
1308     if (strncmp((char *) &cqr->magic, device->discipline->ebcname, 4)) {
1309         DBF_DEV_EVENT(DBF_WARNING, device,
1310                 " dasd_ccw_req 0x%08x magic doesn't match"
1311                 " discipline 0x%08x",
1312                 cqr->magic,
1313                 *(unsigned int *) device->discipline->name);
1314         return -EINVAL;
1315     }
1316     return 0;
1317 }
1318 
1319 /*
1320  * Terminate the current i/o and set the request to clear_pending.
1321  * Timer keeps device runnig.
1322  * ccw_device_clear can fail if the i/o subsystem
1323  * is in a bad mood.
1324  */
1325 int dasd_term_IO(struct dasd_ccw_req *cqr)
1326 {
1327     struct dasd_device *device;
1328     int retries, rc;
1329     char errorstring[ERRORLENGTH];
1330 
1331     /* Check the cqr */
1332     rc = dasd_check_cqr(cqr);
1333     if (rc)
1334         return rc;
1335     retries = 0;
1336     device = (struct dasd_device *) cqr->startdev;
1337     while ((retries < 5) && (cqr->status == DASD_CQR_IN_IO)) {
1338         rc = ccw_device_clear(device->cdev, (long) cqr);
1339         switch (rc) {
1340         case 0: /* termination successful */
1341             cqr->status = DASD_CQR_CLEAR_PENDING;
1342             cqr->stopclk = get_tod_clock();
1343             cqr->starttime = 0;
1344             DBF_DEV_EVENT(DBF_DEBUG, device,
1345                       "terminate cqr %p successful",
1346                       cqr);
1347             break;
1348         case -ENODEV:
1349             DBF_DEV_EVENT(DBF_ERR, device, "%s",
1350                       "device gone, retry");
1351             break;
1352         case -EINVAL:
1353             /*
1354              * device not valid so no I/O could be running
1355              * handle CQR as termination successful
1356              */
1357             cqr->status = DASD_CQR_CLEARED;
1358             cqr->stopclk = get_tod_clock();
1359             cqr->starttime = 0;
1360             /* no retries for invalid devices */
1361             cqr->retries = -1;
1362             DBF_DEV_EVENT(DBF_ERR, device, "%s",
1363                       "EINVAL, handle as terminated");
1364             /* fake rc to success */
1365             rc = 0;
1366             break;
1367         default:
1368             /* internal error 10 - unknown rc*/
1369             snprintf(errorstring, ERRORLENGTH, "10 %d", rc);
1370             dev_err(&device->cdev->dev, "An error occurred in the "
1371                 "DASD device driver, reason=%s\n", errorstring);
1372             BUG();
1373             break;
1374         }
1375         retries++;
1376     }
1377     dasd_schedule_device_bh(device);
1378     return rc;
1379 }
1380 EXPORT_SYMBOL(dasd_term_IO);
1381 
1382 /*
1383  * Start the i/o. This start_IO can fail if the channel is really busy.
1384  * In that case set up a timer to start the request later.
1385  */
1386 int dasd_start_IO(struct dasd_ccw_req *cqr)
1387 {
1388     struct dasd_device *device;
1389     int rc;
1390     char errorstring[ERRORLENGTH];
1391 
1392     /* Check the cqr */
1393     rc = dasd_check_cqr(cqr);
1394     if (rc) {
1395         cqr->intrc = rc;
1396         return rc;
1397     }
1398     device = (struct dasd_device *) cqr->startdev;
1399     if (((cqr->block &&
1400           test_bit(DASD_FLAG_LOCK_STOLEN, &cqr->block->base->flags)) ||
1401          test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags)) &&
1402         !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) {
1403         DBF_DEV_EVENT(DBF_DEBUG, device, "start_IO: return request %p "
1404                   "because of stolen lock", cqr);
1405         cqr->status = DASD_CQR_ERROR;
1406         cqr->intrc = -EPERM;
1407         return -EPERM;
1408     }
1409     if (cqr->retries < 0) {
1410         /* internal error 14 - start_IO run out of retries */
1411         sprintf(errorstring, "14 %p", cqr);
1412         dev_err(&device->cdev->dev, "An error occurred in the DASD "
1413             "device driver, reason=%s\n", errorstring);
1414         cqr->status = DASD_CQR_ERROR;
1415         return -EIO;
1416     }
1417     cqr->startclk = get_tod_clock();
1418     cqr->starttime = jiffies;
1419     cqr->retries--;
1420     if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) {
1421         cqr->lpm &= dasd_path_get_opm(device);
1422         if (!cqr->lpm)
1423             cqr->lpm = dasd_path_get_opm(device);
1424     }
1425     /*
1426      * remember the amount of formatted tracks to prevent double format on
1427      * ESE devices
1428      */
1429     if (cqr->block)
1430         cqr->trkcount = atomic_read(&cqr->block->trkcount);
1431 
1432     if (cqr->cpmode == 1) {
1433         rc = ccw_device_tm_start(device->cdev, cqr->cpaddr,
1434                      (long) cqr, cqr->lpm);
1435     } else {
1436         rc = ccw_device_start(device->cdev, cqr->cpaddr,
1437                       (long) cqr, cqr->lpm, 0);
1438     }
1439     switch (rc) {
1440     case 0:
1441         cqr->status = DASD_CQR_IN_IO;
1442         break;
1443     case -EBUSY:
1444         DBF_DEV_EVENT(DBF_WARNING, device, "%s",
1445                   "start_IO: device busy, retry later");
1446         break;
1447     case -EACCES:
1448         /* -EACCES indicates that the request used only a subset of the
1449          * available paths and all these paths are gone. If the lpm of
1450          * this request was only a subset of the opm (e.g. the ppm) then
1451          * we just do a retry with all available paths.
1452          * If we already use the full opm, something is amiss, and we
1453          * need a full path verification.
1454          */
1455         if (test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) {
1456             DBF_DEV_EVENT(DBF_WARNING, device,
1457                       "start_IO: selected paths gone (%x)",
1458                       cqr->lpm);
1459         } else if (cqr->lpm != dasd_path_get_opm(device)) {
1460             cqr->lpm = dasd_path_get_opm(device);
1461             DBF_DEV_EVENT(DBF_DEBUG, device, "%s",
1462                       "start_IO: selected paths gone,"
1463                       " retry on all paths");
1464         } else {
1465             DBF_DEV_EVENT(DBF_WARNING, device, "%s",
1466                       "start_IO: all paths in opm gone,"
1467                       " do path verification");
1468             dasd_generic_last_path_gone(device);
1469             dasd_path_no_path(device);
1470             dasd_path_set_tbvpm(device,
1471                       ccw_device_get_path_mask(
1472                           device->cdev));
1473         }
1474         break;
1475     case -ENODEV:
1476         DBF_DEV_EVENT(DBF_WARNING, device, "%s",
1477                   "start_IO: -ENODEV device gone, retry");
1478         break;
1479     case -EIO:
1480         DBF_DEV_EVENT(DBF_WARNING, device, "%s",
1481                   "start_IO: -EIO device gone, retry");
1482         break;
1483     case -EINVAL:
1484         DBF_DEV_EVENT(DBF_WARNING, device, "%s",
1485                   "start_IO: -EINVAL device currently "
1486                   "not accessible");
1487         break;
1488     default:
1489         /* internal error 11 - unknown rc */
1490         snprintf(errorstring, ERRORLENGTH, "11 %d", rc);
1491         dev_err(&device->cdev->dev,
1492             "An error occurred in the DASD device driver, "
1493             "reason=%s\n", errorstring);
1494         BUG();
1495         break;
1496     }
1497     cqr->intrc = rc;
1498     return rc;
1499 }
1500 EXPORT_SYMBOL(dasd_start_IO);
1501 
1502 /*
1503  * Timeout function for dasd devices. This is used for different purposes
1504  *  1) missing interrupt handler for normal operation
1505  *  2) delayed start of request where start_IO failed with -EBUSY
1506  *  3) timeout for missing state change interrupts
1507  * The head of the ccw queue will have status DASD_CQR_IN_IO for 1),
1508  * DASD_CQR_QUEUED for 2) and 3).
1509  */
1510 static void dasd_device_timeout(struct timer_list *t)
1511 {
1512     unsigned long flags;
1513     struct dasd_device *device;
1514 
1515     device = from_timer(device, t, timer);
1516     spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
1517     /* re-activate request queue */
1518     dasd_device_remove_stop_bits(device, DASD_STOPPED_PENDING);
1519     spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
1520     dasd_schedule_device_bh(device);
1521 }
1522 
1523 /*
1524  * Setup timeout for a device in jiffies.
1525  */
1526 void dasd_device_set_timer(struct dasd_device *device, int expires)
1527 {
1528     if (expires == 0)
1529         del_timer(&device->timer);
1530     else
1531         mod_timer(&device->timer, jiffies + expires);
1532 }
1533 EXPORT_SYMBOL(dasd_device_set_timer);
1534 
1535 /*
1536  * Clear timeout for a device.
1537  */
1538 void dasd_device_clear_timer(struct dasd_device *device)
1539 {
1540     del_timer(&device->timer);
1541 }
1542 EXPORT_SYMBOL(dasd_device_clear_timer);
1543 
1544 static void dasd_handle_killed_request(struct ccw_device *cdev,
1545                        unsigned long intparm)
1546 {
1547     struct dasd_ccw_req *cqr;
1548     struct dasd_device *device;
1549 
1550     if (!intparm)
1551         return;
1552     cqr = (struct dasd_ccw_req *) intparm;
1553     if (cqr->status != DASD_CQR_IN_IO) {
1554         DBF_EVENT_DEVID(DBF_DEBUG, cdev,
1555                 "invalid status in handle_killed_request: "
1556                 "%02x", cqr->status);
1557         return;
1558     }
1559 
1560     device = dasd_device_from_cdev_locked(cdev);
1561     if (IS_ERR(device)) {
1562         DBF_EVENT_DEVID(DBF_DEBUG, cdev, "%s",
1563                 "unable to get device from cdev");
1564         return;
1565     }
1566 
1567     if (!cqr->startdev ||
1568         device != cqr->startdev ||
1569         strncmp(cqr->startdev->discipline->ebcname,
1570             (char *) &cqr->magic, 4)) {
1571         DBF_EVENT_DEVID(DBF_DEBUG, cdev, "%s",
1572                 "invalid device in request");
1573         dasd_put_device(device);
1574         return;
1575     }
1576 
1577     /* Schedule request to be retried. */
1578     cqr->status = DASD_CQR_QUEUED;
1579 
1580     dasd_device_clear_timer(device);
1581     dasd_schedule_device_bh(device);
1582     dasd_put_device(device);
1583 }
1584 
1585 void dasd_generic_handle_state_change(struct dasd_device *device)
1586 {
1587     /* First of all start sense subsystem status request. */
1588     dasd_eer_snss(device);
1589 
1590     dasd_device_remove_stop_bits(device, DASD_STOPPED_PENDING);
1591     dasd_schedule_device_bh(device);
1592     if (device->block) {
1593         dasd_schedule_block_bh(device->block);
1594         if (device->block->request_queue)
1595             blk_mq_run_hw_queues(device->block->request_queue,
1596                          true);
1597     }
1598 }
1599 EXPORT_SYMBOL_GPL(dasd_generic_handle_state_change);
1600 
1601 static int dasd_check_hpf_error(struct irb *irb)
1602 {
1603     return (scsw_tm_is_valid_schxs(&irb->scsw) &&
1604         (irb->scsw.tm.sesq == SCSW_SESQ_DEV_NOFCX ||
1605          irb->scsw.tm.sesq == SCSW_SESQ_PATH_NOFCX));
1606 }
1607 
1608 static int dasd_ese_needs_format(struct dasd_block *block, struct irb *irb)
1609 {
1610     struct dasd_device *device = NULL;
1611     u8 *sense = NULL;
1612 
1613     if (!block)
1614         return 0;
1615     device = block->base;
1616     if (!device || !device->discipline->is_ese)
1617         return 0;
1618     if (!device->discipline->is_ese(device))
1619         return 0;
1620 
1621     sense = dasd_get_sense(irb);
1622     if (!sense)
1623         return 0;
1624 
1625     return !!(sense[1] & SNS1_NO_REC_FOUND) ||
1626         !!(sense[1] & SNS1_FILE_PROTECTED) ||
1627         scsw_cstat(&irb->scsw) == SCHN_STAT_INCORR_LEN;
1628 }
1629 
1630 static int dasd_ese_oos_cond(u8 *sense)
1631 {
1632     return sense[0] & SNS0_EQUIPMENT_CHECK &&
1633         sense[1] & SNS1_PERM_ERR &&
1634         sense[1] & SNS1_WRITE_INHIBITED &&
1635         sense[25] == 0x01;
1636 }
1637 
1638 /*
1639  * Interrupt handler for "normal" ssch-io based dasd devices.
1640  */
1641 void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
1642               struct irb *irb)
1643 {
1644     struct dasd_ccw_req *cqr, *next, *fcqr;
1645     struct dasd_device *device;
1646     unsigned long now;
1647     int nrf_suppressed = 0;
1648     int fp_suppressed = 0;
1649     struct request *req;
1650     u8 *sense = NULL;
1651     int expires;
1652 
1653     cqr = (struct dasd_ccw_req *) intparm;
1654     if (IS_ERR(irb)) {
1655         switch (PTR_ERR(irb)) {
1656         case -EIO:
1657             if (cqr && cqr->status == DASD_CQR_CLEAR_PENDING) {
1658                 device = cqr->startdev;
1659                 cqr->status = DASD_CQR_CLEARED;
1660                 dasd_device_clear_timer(device);
1661                 wake_up(&dasd_flush_wq);
1662                 dasd_schedule_device_bh(device);
1663                 return;
1664             }
1665             break;
1666         case -ETIMEDOUT:
1667             DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s: "
1668                     "request timed out\n", __func__);
1669             break;
1670         default:
1671             DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s: "
1672                     "unknown error %ld\n", __func__,
1673                     PTR_ERR(irb));
1674         }
1675         dasd_handle_killed_request(cdev, intparm);
1676         return;
1677     }
1678 
1679     now = get_tod_clock();
1680     /* check for conditions that should be handled immediately */
1681     if (!cqr ||
1682         !(scsw_dstat(&irb->scsw) == (DEV_STAT_CHN_END | DEV_STAT_DEV_END) &&
1683           scsw_cstat(&irb->scsw) == 0)) {
1684         if (cqr)
1685             memcpy(&cqr->irb, irb, sizeof(*irb));
1686         device = dasd_device_from_cdev_locked(cdev);
1687         if (IS_ERR(device))
1688             return;
1689         /* ignore unsolicited interrupts for DIAG discipline */
1690         if (device->discipline == dasd_diag_discipline_pointer) {
1691             dasd_put_device(device);
1692             return;
1693         }
1694 
1695         /*
1696          * In some cases 'File Protected' or 'No Record Found' errors
1697          * might be expected and debug log messages for the
1698          * corresponding interrupts shouldn't be written then.
1699          * Check if either of the according suppress bits is set.
1700          */
1701         sense = dasd_get_sense(irb);
1702         if (sense) {
1703             fp_suppressed = (sense[1] & SNS1_FILE_PROTECTED) &&
1704                 test_bit(DASD_CQR_SUPPRESS_FP, &cqr->flags);
1705             nrf_suppressed = (sense[1] & SNS1_NO_REC_FOUND) &&
1706                 test_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags);
1707 
1708             /*
1709              * Extent pool probably out-of-space.
1710              * Stop device and check exhaust level.
1711              */
1712             if (dasd_ese_oos_cond(sense)) {
1713                 dasd_generic_space_exhaust(device, cqr);
1714                 device->discipline->ext_pool_exhaust(device, cqr);
1715                 dasd_put_device(device);
1716                 return;
1717             }
1718         }
1719         if (!(fp_suppressed || nrf_suppressed))
1720             device->discipline->dump_sense_dbf(device, irb, "int");
1721 
1722         if (device->features & DASD_FEATURE_ERPLOG)
1723             device->discipline->dump_sense(device, cqr, irb);
1724         device->discipline->check_for_device_change(device, cqr, irb);
1725         dasd_put_device(device);
1726     }
1727 
1728     /* check for attention message */
1729     if (scsw_dstat(&irb->scsw) & DEV_STAT_ATTENTION) {
1730         device = dasd_device_from_cdev_locked(cdev);
1731         if (!IS_ERR(device)) {
1732             device->discipline->check_attention(device,
1733                                 irb->esw.esw1.lpum);
1734             dasd_put_device(device);
1735         }
1736     }
1737 
1738     if (!cqr)
1739         return;
1740 
1741     device = (struct dasd_device *) cqr->startdev;
1742     if (!device ||
1743         strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) {
1744         DBF_EVENT_DEVID(DBF_DEBUG, cdev, "%s",
1745                 "invalid device in request");
1746         return;
1747     }
1748 
1749     if (dasd_ese_needs_format(cqr->block, irb)) {
1750         req = dasd_get_callback_data(cqr);
1751         if (!req) {
1752             cqr->status = DASD_CQR_ERROR;
1753             return;
1754         }
1755         if (rq_data_dir(req) == READ) {
1756             device->discipline->ese_read(cqr, irb);
1757             cqr->status = DASD_CQR_SUCCESS;
1758             cqr->stopclk = now;
1759             dasd_device_clear_timer(device);
1760             dasd_schedule_device_bh(device);
1761             return;
1762         }
1763         fcqr = device->discipline->ese_format(device, cqr, irb);
1764         if (IS_ERR(fcqr)) {
1765             if (PTR_ERR(fcqr) == -EINVAL) {
1766                 cqr->status = DASD_CQR_ERROR;
1767                 return;
1768             }
1769             /*
1770              * If we can't format now, let the request go
1771              * one extra round. Maybe we can format later.
1772              */
1773             cqr->status = DASD_CQR_QUEUED;
1774             dasd_schedule_device_bh(device);
1775             return;
1776         } else {
1777             fcqr->status = DASD_CQR_QUEUED;
1778             cqr->status = DASD_CQR_QUEUED;
1779             list_add(&fcqr->devlist, &device->ccw_queue);
1780             dasd_schedule_device_bh(device);
1781             return;
1782         }
1783     }
1784 
1785     /* Check for clear pending */
1786     if (cqr->status == DASD_CQR_CLEAR_PENDING &&
1787         scsw_fctl(&irb->scsw) & SCSW_FCTL_CLEAR_FUNC) {
1788         cqr->status = DASD_CQR_CLEARED;
1789         dasd_device_clear_timer(device);
1790         wake_up(&dasd_flush_wq);
1791         dasd_schedule_device_bh(device);
1792         return;
1793     }
1794 
1795     /* check status - the request might have been killed by dyn detach */
1796     if (cqr->status != DASD_CQR_IN_IO) {
1797         DBF_DEV_EVENT(DBF_DEBUG, device, "invalid status: bus_id %s, "
1798                   "status %02x", dev_name(&cdev->dev), cqr->status);
1799         return;
1800     }
1801 
1802     next = NULL;
1803     expires = 0;
1804     if (scsw_dstat(&irb->scsw) == (DEV_STAT_CHN_END | DEV_STAT_DEV_END) &&
1805         scsw_cstat(&irb->scsw) == 0) {
1806         /* request was completed successfully */
1807         cqr->status = DASD_CQR_SUCCESS;
1808         cqr->stopclk = now;
1809         /* Start first request on queue if possible -> fast_io. */
1810         if (cqr->devlist.next != &device->ccw_queue) {
1811             next = list_entry(cqr->devlist.next,
1812                       struct dasd_ccw_req, devlist);
1813         }
1814     } else {  /* error */
1815         /* check for HPF error
1816          * call discipline function to requeue all requests
1817          * and disable HPF accordingly
1818          */
1819         if (cqr->cpmode && dasd_check_hpf_error(irb) &&
1820             device->discipline->handle_hpf_error)
1821             device->discipline->handle_hpf_error(device, irb);
1822         /*
1823          * If we don't want complex ERP for this request, then just
1824          * reset this and retry it in the fastpath
1825          */
1826         if (!test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags) &&
1827             cqr->retries > 0) {
1828             if (cqr->lpm == dasd_path_get_opm(device))
1829                 DBF_DEV_EVENT(DBF_DEBUG, device,
1830                           "default ERP in fastpath "
1831                           "(%i retries left)",
1832                           cqr->retries);
1833             if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags))
1834                 cqr->lpm = dasd_path_get_opm(device);
1835             cqr->status = DASD_CQR_QUEUED;
1836             next = cqr;
1837         } else
1838             cqr->status = DASD_CQR_ERROR;
1839     }
1840     if (next && (next->status == DASD_CQR_QUEUED) &&
1841         (!device->stopped)) {
1842         if (device->discipline->start_IO(next) == 0)
1843             expires = next->expires;
1844     }
1845     if (expires != 0)
1846         dasd_device_set_timer(device, expires);
1847     else
1848         dasd_device_clear_timer(device);
1849     dasd_schedule_device_bh(device);
1850 }
1851 EXPORT_SYMBOL(dasd_int_handler);
1852 
1853 enum uc_todo dasd_generic_uc_handler(struct ccw_device *cdev, struct irb *irb)
1854 {
1855     struct dasd_device *device;
1856 
1857     device = dasd_device_from_cdev_locked(cdev);
1858 
1859     if (IS_ERR(device))
1860         goto out;
1861     if (test_bit(DASD_FLAG_OFFLINE, &device->flags) ||
1862        device->state != device->target ||
1863        !device->discipline->check_for_device_change){
1864         dasd_put_device(device);
1865         goto out;
1866     }
1867     if (device->discipline->dump_sense_dbf)
1868         device->discipline->dump_sense_dbf(device, irb, "uc");
1869     device->discipline->check_for_device_change(device, NULL, irb);
1870     dasd_put_device(device);
1871 out:
1872     return UC_TODO_RETRY;
1873 }
1874 EXPORT_SYMBOL_GPL(dasd_generic_uc_handler);
1875 
1876 /*
1877  * If we have an error on a dasd_block layer request then we cancel
1878  * and return all further requests from the same dasd_block as well.
1879  */
1880 static void __dasd_device_recovery(struct dasd_device *device,
1881                    struct dasd_ccw_req *ref_cqr)
1882 {
1883     struct list_head *l, *n;
1884     struct dasd_ccw_req *cqr;
1885 
1886     /*
1887      * only requeue request that came from the dasd_block layer
1888      */
1889     if (!ref_cqr->block)
1890         return;
1891 
1892     list_for_each_safe(l, n, &device->ccw_queue) {
1893         cqr = list_entry(l, struct dasd_ccw_req, devlist);
1894         if (cqr->status == DASD_CQR_QUEUED &&
1895             ref_cqr->block == cqr->block) {
1896             cqr->status = DASD_CQR_CLEARED;
1897         }
1898     }
1899 };
1900 
1901 /*
1902  * Remove those ccw requests from the queue that need to be returned
1903  * to the upper layer.
1904  */
1905 static void __dasd_device_process_ccw_queue(struct dasd_device *device,
1906                         struct list_head *final_queue)
1907 {
1908     struct list_head *l, *n;
1909     struct dasd_ccw_req *cqr;
1910 
1911     /* Process request with final status. */
1912     list_for_each_safe(l, n, &device->ccw_queue) {
1913         cqr = list_entry(l, struct dasd_ccw_req, devlist);
1914 
1915         /* Skip any non-final request. */
1916         if (cqr->status == DASD_CQR_QUEUED ||
1917             cqr->status == DASD_CQR_IN_IO ||
1918             cqr->status == DASD_CQR_CLEAR_PENDING)
1919             continue;
1920         if (cqr->status == DASD_CQR_ERROR) {
1921             __dasd_device_recovery(device, cqr);
1922         }
1923         /* Rechain finished requests to final queue */
1924         list_move_tail(&cqr->devlist, final_queue);
1925     }
1926 }
1927 
1928 static void __dasd_process_cqr(struct dasd_device *device,
1929                    struct dasd_ccw_req *cqr)
1930 {
1931     char errorstring[ERRORLENGTH];
1932 
1933     switch (cqr->status) {
1934     case DASD_CQR_SUCCESS:
1935         cqr->status = DASD_CQR_DONE;
1936         break;
1937     case DASD_CQR_ERROR:
1938         cqr->status = DASD_CQR_NEED_ERP;
1939         break;
1940     case DASD_CQR_CLEARED:
1941         cqr->status = DASD_CQR_TERMINATED;
1942         break;
1943     default:
1944         /* internal error 12 - wrong cqr status*/
1945         snprintf(errorstring, ERRORLENGTH, "12 %p %x02", cqr, cqr->status);
1946         dev_err(&device->cdev->dev,
1947             "An error occurred in the DASD device driver, "
1948             "reason=%s\n", errorstring);
1949         BUG();
1950     }
1951     if (cqr->callback)
1952         cqr->callback(cqr, cqr->callback_data);
1953 }
1954 
1955 /*
1956  * the cqrs from the final queue are returned to the upper layer
1957  * by setting a dasd_block state and calling the callback function
1958  */
1959 static void __dasd_device_process_final_queue(struct dasd_device *device,
1960                           struct list_head *final_queue)
1961 {
1962     struct list_head *l, *n;
1963     struct dasd_ccw_req *cqr;
1964     struct dasd_block *block;
1965 
1966     list_for_each_safe(l, n, final_queue) {
1967         cqr = list_entry(l, struct dasd_ccw_req, devlist);
1968         list_del_init(&cqr->devlist);
1969         block = cqr->block;
1970         if (!block) {
1971             __dasd_process_cqr(device, cqr);
1972         } else {
1973             spin_lock_bh(&block->queue_lock);
1974             __dasd_process_cqr(device, cqr);
1975             spin_unlock_bh(&block->queue_lock);
1976         }
1977     }
1978 }
1979 
1980 /*
1981  * Take a look at the first request on the ccw queue and check
1982  * if it reached its expire time. If so, terminate the IO.
1983  */
1984 static void __dasd_device_check_expire(struct dasd_device *device)
1985 {
1986     struct dasd_ccw_req *cqr;
1987 
1988     if (list_empty(&device->ccw_queue))
1989         return;
1990     cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist);
1991     if ((cqr->status == DASD_CQR_IN_IO && cqr->expires != 0) &&
1992         (time_after_eq(jiffies, cqr->expires + cqr->starttime))) {
1993         if (test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) {
1994             /*
1995              * IO in safe offline processing should not
1996              * run out of retries
1997              */
1998             cqr->retries++;
1999         }
2000         if (device->discipline->term_IO(cqr) != 0) {
2001             /* Hmpf, try again in 5 sec */
2002             dev_err(&device->cdev->dev,
2003                 "cqr %p timed out (%lus) but cannot be "
2004                 "ended, retrying in 5 s\n",
2005                 cqr, (cqr->expires/HZ));
2006             cqr->expires += 5*HZ;
2007             dasd_device_set_timer(device, 5*HZ);
2008         } else {
2009             dev_err(&device->cdev->dev,
2010                 "cqr %p timed out (%lus), %i retries "
2011                 "remaining\n", cqr, (cqr->expires/HZ),
2012                 cqr->retries);
2013         }
2014     }
2015 }
2016 
2017 /*
2018  * return 1 when device is not eligible for IO
2019  */
2020 static int __dasd_device_is_unusable(struct dasd_device *device,
2021                      struct dasd_ccw_req *cqr)
2022 {
2023     int mask = ~(DASD_STOPPED_DC_WAIT | DASD_STOPPED_NOSPC);
2024 
2025     if (test_bit(DASD_FLAG_OFFLINE, &device->flags) &&
2026         !test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) {
2027         /*
2028          * dasd is being set offline
2029          * but it is no safe offline where we have to allow I/O
2030          */
2031         return 1;
2032     }
2033     if (device->stopped) {
2034         if (device->stopped & mask) {
2035             /* stopped and CQR will not change that. */
2036             return 1;
2037         }
2038         if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) {
2039             /* CQR is not able to change device to
2040              * operational. */
2041             return 1;
2042         }
2043         /* CQR required to get device operational. */
2044     }
2045     return 0;
2046 }
2047 
2048 /*
2049  * Take a look at the first request on the ccw queue and check
2050  * if it needs to be started.
2051  */
2052 static void __dasd_device_start_head(struct dasd_device *device)
2053 {
2054     struct dasd_ccw_req *cqr;
2055     int rc;
2056 
2057     if (list_empty(&device->ccw_queue))
2058         return;
2059     cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist);
2060     if (cqr->status != DASD_CQR_QUEUED)
2061         return;
2062     /* if device is not usable return request to upper layer */
2063     if (__dasd_device_is_unusable(device, cqr)) {
2064         cqr->intrc = -EAGAIN;
2065         cqr->status = DASD_CQR_CLEARED;
2066         dasd_schedule_device_bh(device);
2067         return;
2068     }
2069 
2070     rc = device->discipline->start_IO(cqr);
2071     if (rc == 0)
2072         dasd_device_set_timer(device, cqr->expires);
2073     else if (rc == -EACCES) {
2074         dasd_schedule_device_bh(device);
2075     } else
2076         /* Hmpf, try again in 1/2 sec */
2077         dasd_device_set_timer(device, 50);
2078 }
2079 
2080 static void __dasd_device_check_path_events(struct dasd_device *device)
2081 {
2082     __u8 tbvpm, fcsecpm;
2083     int rc;
2084 
2085     tbvpm = dasd_path_get_tbvpm(device);
2086     fcsecpm = dasd_path_get_fcsecpm(device);
2087 
2088     if (!tbvpm && !fcsecpm)
2089         return;
2090 
2091     if (device->stopped & ~(DASD_STOPPED_DC_WAIT))
2092         return;
2093 
2094     dasd_path_clear_all_verify(device);
2095     dasd_path_clear_all_fcsec(device);
2096 
2097     rc = device->discipline->pe_handler(device, tbvpm, fcsecpm);
2098     if (rc) {
2099         dasd_path_add_tbvpm(device, tbvpm);
2100         dasd_path_add_fcsecpm(device, fcsecpm);
2101         dasd_device_set_timer(device, 50);
2102     }
2103 };
2104 
2105 /*
2106  * Go through all request on the dasd_device request queue,
2107  * terminate them on the cdev if necessary, and return them to the
2108  * submitting layer via callback.
2109  * Note:
2110  * Make sure that all 'submitting layers' still exist when
2111  * this function is called!. In other words, when 'device' is a base
2112  * device then all block layer requests must have been removed before
2113  * via dasd_flush_block_queue.
2114  */
2115 int dasd_flush_device_queue(struct dasd_device *device)
2116 {
2117     struct dasd_ccw_req *cqr, *n;
2118     int rc;
2119     struct list_head flush_queue;
2120 
2121     INIT_LIST_HEAD(&flush_queue);
2122     spin_lock_irq(get_ccwdev_lock(device->cdev));
2123     rc = 0;
2124     list_for_each_entry_safe(cqr, n, &device->ccw_queue, devlist) {
2125         /* Check status and move request to flush_queue */
2126         switch (cqr->status) {
2127         case DASD_CQR_IN_IO:
2128             rc = device->discipline->term_IO(cqr);
2129             if (rc) {
2130                 /* unable to terminate requeust */
2131                 dev_err(&device->cdev->dev,
2132                     "Flushing the DASD request queue "
2133                     "failed for request %p\n", cqr);
2134                 /* stop flush processing */
2135                 goto finished;
2136             }
2137             break;
2138         case DASD_CQR_QUEUED:
2139             cqr->stopclk = get_tod_clock();
2140             cqr->status = DASD_CQR_CLEARED;
2141             break;
2142         default: /* no need to modify the others */
2143             break;
2144         }
2145         list_move_tail(&cqr->devlist, &flush_queue);
2146     }
2147 finished:
2148     spin_unlock_irq(get_ccwdev_lock(device->cdev));
2149     /*
2150      * After this point all requests must be in state CLEAR_PENDING,
2151      * CLEARED, SUCCESS or ERROR. Now wait for CLEAR_PENDING to become
2152      * one of the others.
2153      */
2154     list_for_each_entry_safe(cqr, n, &flush_queue, devlist)
2155         wait_event(dasd_flush_wq,
2156                (cqr->status != DASD_CQR_CLEAR_PENDING));
2157     /*
2158      * Now set each request back to TERMINATED, DONE or NEED_ERP
2159      * and call the callback function of flushed requests
2160      */
2161     __dasd_device_process_final_queue(device, &flush_queue);
2162     return rc;
2163 }
2164 EXPORT_SYMBOL_GPL(dasd_flush_device_queue);
2165 
2166 /*
2167  * Acquire the device lock and process queues for the device.
2168  */
2169 static void dasd_device_tasklet(unsigned long data)
2170 {
2171     struct dasd_device *device = (struct dasd_device *) data;
2172     struct list_head final_queue;
2173 
2174     atomic_set (&device->tasklet_scheduled, 0);
2175     INIT_LIST_HEAD(&final_queue);
2176     spin_lock_irq(get_ccwdev_lock(device->cdev));
2177     /* Check expire time of first request on the ccw queue. */
2178     __dasd_device_check_expire(device);
2179     /* find final requests on ccw queue */
2180     __dasd_device_process_ccw_queue(device, &final_queue);
2181     __dasd_device_check_path_events(device);
2182     spin_unlock_irq(get_ccwdev_lock(device->cdev));
2183     /* Now call the callback function of requests with final status */
2184     __dasd_device_process_final_queue(device, &final_queue);
2185     spin_lock_irq(get_ccwdev_lock(device->cdev));
2186     /* Now check if the head of the ccw queue needs to be started. */
2187     __dasd_device_start_head(device);
2188     spin_unlock_irq(get_ccwdev_lock(device->cdev));
2189     if (waitqueue_active(&shutdown_waitq))
2190         wake_up(&shutdown_waitq);
2191     dasd_put_device(device);
2192 }
2193 
2194 /*
2195  * Schedules a call to dasd_tasklet over the device tasklet.
2196  */
2197 void dasd_schedule_device_bh(struct dasd_device *device)
2198 {
2199     /* Protect against rescheduling. */
2200     if (atomic_cmpxchg (&device->tasklet_scheduled, 0, 1) != 0)
2201         return;
2202     dasd_get_device(device);
2203     tasklet_hi_schedule(&device->tasklet);
2204 }
2205 EXPORT_SYMBOL(dasd_schedule_device_bh);
2206 
2207 void dasd_device_set_stop_bits(struct dasd_device *device, int bits)
2208 {
2209     device->stopped |= bits;
2210 }
2211 EXPORT_SYMBOL_GPL(dasd_device_set_stop_bits);
2212 
2213 void dasd_device_remove_stop_bits(struct dasd_device *device, int bits)
2214 {
2215     device->stopped &= ~bits;
2216     if (!device->stopped)
2217         wake_up(&generic_waitq);
2218 }
2219 EXPORT_SYMBOL_GPL(dasd_device_remove_stop_bits);
2220 
2221 /*
2222  * Queue a request to the head of the device ccw_queue.
2223  * Start the I/O if possible.
2224  */
2225 void dasd_add_request_head(struct dasd_ccw_req *cqr)
2226 {
2227     struct dasd_device *device;
2228     unsigned long flags;
2229 
2230     device = cqr->startdev;
2231     spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
2232     cqr->status = DASD_CQR_QUEUED;
2233     list_add(&cqr->devlist, &device->ccw_queue);
2234     /* let the bh start the request to keep them in order */
2235     dasd_schedule_device_bh(device);
2236     spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
2237 }
2238 EXPORT_SYMBOL(dasd_add_request_head);
2239 
2240 /*
2241  * Queue a request to the tail of the device ccw_queue.
2242  * Start the I/O if possible.
2243  */
2244 void dasd_add_request_tail(struct dasd_ccw_req *cqr)
2245 {
2246     struct dasd_device *device;
2247     unsigned long flags;
2248 
2249     device = cqr->startdev;
2250     spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
2251     cqr->status = DASD_CQR_QUEUED;
2252     list_add_tail(&cqr->devlist, &device->ccw_queue);
2253     /* let the bh start the request to keep them in order */
2254     dasd_schedule_device_bh(device);
2255     spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
2256 }
2257 EXPORT_SYMBOL(dasd_add_request_tail);
2258 
2259 /*
2260  * Wakeup helper for the 'sleep_on' functions.
2261  */
2262 void dasd_wakeup_cb(struct dasd_ccw_req *cqr, void *data)
2263 {
2264     spin_lock_irq(get_ccwdev_lock(cqr->startdev->cdev));
2265     cqr->callback_data = DASD_SLEEPON_END_TAG;
2266     spin_unlock_irq(get_ccwdev_lock(cqr->startdev->cdev));
2267     wake_up(&generic_waitq);
2268 }
2269 EXPORT_SYMBOL_GPL(dasd_wakeup_cb);
2270 
2271 static inline int _wait_for_wakeup(struct dasd_ccw_req *cqr)
2272 {
2273     struct dasd_device *device;
2274     int rc;
2275 
2276     device = cqr->startdev;
2277     spin_lock_irq(get_ccwdev_lock(device->cdev));
2278     rc = (cqr->callback_data == DASD_SLEEPON_END_TAG);
2279     spin_unlock_irq(get_ccwdev_lock(device->cdev));
2280     return rc;
2281 }
2282 
2283 /*
2284  * checks if error recovery is necessary, returns 1 if yes, 0 otherwise.
2285  */
2286 static int __dasd_sleep_on_erp(struct dasd_ccw_req *cqr)
2287 {
2288     struct dasd_device *device;
2289     dasd_erp_fn_t erp_fn;
2290 
2291     if (cqr->status == DASD_CQR_FILLED)
2292         return 0;
2293     device = cqr->startdev;
2294     if (test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags)) {
2295         if (cqr->status == DASD_CQR_TERMINATED) {
2296             device->discipline->handle_terminated_request(cqr);
2297             return 1;
2298         }
2299         if (cqr->status == DASD_CQR_NEED_ERP) {
2300             erp_fn = device->discipline->erp_action(cqr);
2301             erp_fn(cqr);
2302             return 1;
2303         }
2304         if (cqr->status == DASD_CQR_FAILED)
2305             dasd_log_sense(cqr, &cqr->irb);
2306         if (cqr->refers) {
2307             __dasd_process_erp(device, cqr);
2308             return 1;
2309         }
2310     }
2311     return 0;
2312 }
2313 
2314 static int __dasd_sleep_on_loop_condition(struct dasd_ccw_req *cqr)
2315 {
2316     if (test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags)) {
2317         if (cqr->refers) /* erp is not done yet */
2318             return 1;
2319         return ((cqr->status != DASD_CQR_DONE) &&
2320             (cqr->status != DASD_CQR_FAILED));
2321     } else
2322         return (cqr->status == DASD_CQR_FILLED);
2323 }
2324 
2325 static int _dasd_sleep_on(struct dasd_ccw_req *maincqr, int interruptible)
2326 {
2327     struct dasd_device *device;
2328     int rc;
2329     struct list_head ccw_queue;
2330     struct dasd_ccw_req *cqr;
2331 
2332     INIT_LIST_HEAD(&ccw_queue);
2333     maincqr->status = DASD_CQR_FILLED;
2334     device = maincqr->startdev;
2335     list_add(&maincqr->blocklist, &ccw_queue);
2336     for (cqr = maincqr;  __dasd_sleep_on_loop_condition(cqr);
2337          cqr = list_first_entry(&ccw_queue,
2338                     struct dasd_ccw_req, blocklist)) {
2339 
2340         if (__dasd_sleep_on_erp(cqr))
2341             continue;
2342         if (cqr->status != DASD_CQR_FILLED) /* could be failed */
2343             continue;
2344         if (test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags) &&
2345             !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) {
2346             cqr->status = DASD_CQR_FAILED;
2347             cqr->intrc = -EPERM;
2348             continue;
2349         }
2350         /* Non-temporary stop condition will trigger fail fast */
2351         if (device->stopped & ~DASD_STOPPED_PENDING &&
2352             test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) &&
2353             (!dasd_eer_enabled(device))) {
2354             cqr->status = DASD_CQR_FAILED;
2355             cqr->intrc = -ENOLINK;
2356             continue;
2357         }
2358         /*
2359          * Don't try to start requests if device is in
2360          * offline processing, it might wait forever
2361          */
2362         if (test_bit(DASD_FLAG_OFFLINE, &device->flags)) {
2363             cqr->status = DASD_CQR_FAILED;
2364             cqr->intrc = -ENODEV;
2365             continue;
2366         }
2367         /*
2368          * Don't try to start requests if device is stopped
2369          * except path verification requests
2370          */
2371         if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) {
2372             if (interruptible) {
2373                 rc = wait_event_interruptible(
2374                     generic_waitq, !(device->stopped));
2375                 if (rc == -ERESTARTSYS) {
2376                     cqr->status = DASD_CQR_FAILED;
2377                     maincqr->intrc = rc;
2378                     continue;
2379                 }
2380             } else
2381                 wait_event(generic_waitq, !(device->stopped));
2382         }
2383         if (!cqr->callback)
2384             cqr->callback = dasd_wakeup_cb;
2385 
2386         cqr->callback_data = DASD_SLEEPON_START_TAG;
2387         dasd_add_request_tail(cqr);
2388         if (interruptible) {
2389             rc = wait_event_interruptible(
2390                 generic_waitq, _wait_for_wakeup(cqr));
2391             if (rc == -ERESTARTSYS) {
2392                 dasd_cancel_req(cqr);
2393                 /* wait (non-interruptible) for final status */
2394                 wait_event(generic_waitq,
2395                        _wait_for_wakeup(cqr));
2396                 cqr->status = DASD_CQR_FAILED;
2397                 maincqr->intrc = rc;
2398                 continue;
2399             }
2400         } else
2401             wait_event(generic_waitq, _wait_for_wakeup(cqr));
2402     }
2403 
2404     maincqr->endclk = get_tod_clock();
2405     if ((maincqr->status != DASD_CQR_DONE) &&
2406         (maincqr->intrc != -ERESTARTSYS))
2407         dasd_log_sense(maincqr, &maincqr->irb);
2408     if (maincqr->status == DASD_CQR_DONE)
2409         rc = 0;
2410     else if (maincqr->intrc)
2411         rc = maincqr->intrc;
2412     else
2413         rc = -EIO;
2414     return rc;
2415 }
2416 
2417 static inline int _wait_for_wakeup_queue(struct list_head *ccw_queue)
2418 {
2419     struct dasd_ccw_req *cqr;
2420 
2421     list_for_each_entry(cqr, ccw_queue, blocklist) {
2422         if (cqr->callback_data != DASD_SLEEPON_END_TAG)
2423             return 0;
2424     }
2425 
2426     return 1;
2427 }
2428 
2429 static int _dasd_sleep_on_queue(struct list_head *ccw_queue, int interruptible)
2430 {
2431     struct dasd_device *device;
2432     struct dasd_ccw_req *cqr, *n;
2433     u8 *sense = NULL;
2434     int rc;
2435 
2436 retry:
2437     list_for_each_entry_safe(cqr, n, ccw_queue, blocklist) {
2438         device = cqr->startdev;
2439         if (cqr->status != DASD_CQR_FILLED) /*could be failed*/
2440             continue;
2441 
2442         if (test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags) &&
2443             !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) {
2444             cqr->status = DASD_CQR_FAILED;
2445             cqr->intrc = -EPERM;
2446             continue;
2447         }
2448         /*Non-temporary stop condition will trigger fail fast*/
2449         if (device->stopped & ~DASD_STOPPED_PENDING &&
2450             test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) &&
2451             !dasd_eer_enabled(device)) {
2452             cqr->status = DASD_CQR_FAILED;
2453             cqr->intrc = -EAGAIN;
2454             continue;
2455         }
2456 
2457         /*Don't try to start requests if device is stopped*/
2458         if (interruptible) {
2459             rc = wait_event_interruptible(
2460                 generic_waitq, !device->stopped);
2461             if (rc == -ERESTARTSYS) {
2462                 cqr->status = DASD_CQR_FAILED;
2463                 cqr->intrc = rc;
2464                 continue;
2465             }
2466         } else
2467             wait_event(generic_waitq, !(device->stopped));
2468 
2469         if (!cqr->callback)
2470             cqr->callback = dasd_wakeup_cb;
2471         cqr->callback_data = DASD_SLEEPON_START_TAG;
2472         dasd_add_request_tail(cqr);
2473     }
2474 
2475     wait_event(generic_waitq, _wait_for_wakeup_queue(ccw_queue));
2476 
2477     rc = 0;
2478     list_for_each_entry_safe(cqr, n, ccw_queue, blocklist) {
2479         /*
2480          * In some cases the 'File Protected' or 'Incorrect Length'
2481          * error might be expected and error recovery would be
2482          * unnecessary in these cases.  Check if the according suppress
2483          * bit is set.
2484          */
2485         sense = dasd_get_sense(&cqr->irb);
2486         if (sense && sense[1] & SNS1_FILE_PROTECTED &&
2487             test_bit(DASD_CQR_SUPPRESS_FP, &cqr->flags))
2488             continue;
2489         if (scsw_cstat(&cqr->irb.scsw) == 0x40 &&
2490             test_bit(DASD_CQR_SUPPRESS_IL, &cqr->flags))
2491             continue;
2492 
2493         /*
2494          * for alias devices simplify error recovery and
2495          * return to upper layer
2496          * do not skip ERP requests
2497          */
2498         if (cqr->startdev != cqr->basedev && !cqr->refers &&
2499             (cqr->status == DASD_CQR_TERMINATED ||
2500              cqr->status == DASD_CQR_NEED_ERP))
2501             return -EAGAIN;
2502 
2503         /* normal recovery for basedev IO */
2504         if (__dasd_sleep_on_erp(cqr))
2505             /* handle erp first */
2506             goto retry;
2507     }
2508 
2509     return 0;
2510 }
2511 
2512 /*
2513  * Queue a request to the tail of the device ccw_queue and wait for
2514  * it's completion.
2515  */
2516 int dasd_sleep_on(struct dasd_ccw_req *cqr)
2517 {
2518     return _dasd_sleep_on(cqr, 0);
2519 }
2520 EXPORT_SYMBOL(dasd_sleep_on);
2521 
2522 /*
2523  * Start requests from a ccw_queue and wait for their completion.
2524  */
2525 int dasd_sleep_on_queue(struct list_head *ccw_queue)
2526 {
2527     return _dasd_sleep_on_queue(ccw_queue, 0);
2528 }
2529 EXPORT_SYMBOL(dasd_sleep_on_queue);
2530 
2531 /*
2532  * Start requests from a ccw_queue and wait interruptible for their completion.
2533  */
2534 int dasd_sleep_on_queue_interruptible(struct list_head *ccw_queue)
2535 {
2536     return _dasd_sleep_on_queue(ccw_queue, 1);
2537 }
2538 EXPORT_SYMBOL(dasd_sleep_on_queue_interruptible);
2539 
2540 /*
2541  * Queue a request to the tail of the device ccw_queue and wait
2542  * interruptible for it's completion.
2543  */
2544 int dasd_sleep_on_interruptible(struct dasd_ccw_req *cqr)
2545 {
2546     return _dasd_sleep_on(cqr, 1);
2547 }
2548 EXPORT_SYMBOL(dasd_sleep_on_interruptible);
2549 
2550 /*
2551  * Whoa nelly now it gets really hairy. For some functions (e.g. steal lock
2552  * for eckd devices) the currently running request has to be terminated
2553  * and be put back to status queued, before the special request is added
2554  * to the head of the queue. Then the special request is waited on normally.
2555  */
2556 static inline int _dasd_term_running_cqr(struct dasd_device *device)
2557 {
2558     struct dasd_ccw_req *cqr;
2559     int rc;
2560 
2561     if (list_empty(&device->ccw_queue))
2562         return 0;
2563     cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist);
2564     rc = device->discipline->term_IO(cqr);
2565     if (!rc)
2566         /*
2567          * CQR terminated because a more important request is pending.
2568          * Undo decreasing of retry counter because this is
2569          * not an error case.
2570          */
2571         cqr->retries++;
2572     return rc;
2573 }
2574 
2575 int dasd_sleep_on_immediatly(struct dasd_ccw_req *cqr)
2576 {
2577     struct dasd_device *device;
2578     int rc;
2579 
2580     device = cqr->startdev;
2581     if (test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags) &&
2582         !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) {
2583         cqr->status = DASD_CQR_FAILED;
2584         cqr->intrc = -EPERM;
2585         return -EIO;
2586     }
2587     spin_lock_irq(get_ccwdev_lock(device->cdev));
2588     rc = _dasd_term_running_cqr(device);
2589     if (rc) {
2590         spin_unlock_irq(get_ccwdev_lock(device->cdev));
2591         return rc;
2592     }
2593     cqr->callback = dasd_wakeup_cb;
2594     cqr->callback_data = DASD_SLEEPON_START_TAG;
2595     cqr->status = DASD_CQR_QUEUED;
2596     /*
2597      * add new request as second
2598      * first the terminated cqr needs to be finished
2599      */
2600     list_add(&cqr->devlist, device->ccw_queue.next);
2601 
2602     /* let the bh start the request to keep them in order */
2603     dasd_schedule_device_bh(device);
2604 
2605     spin_unlock_irq(get_ccwdev_lock(device->cdev));
2606 
2607     wait_event(generic_waitq, _wait_for_wakeup(cqr));
2608 
2609     if (cqr->status == DASD_CQR_DONE)
2610         rc = 0;
2611     else if (cqr->intrc)
2612         rc = cqr->intrc;
2613     else
2614         rc = -EIO;
2615 
2616     /* kick tasklets */
2617     dasd_schedule_device_bh(device);
2618     if (device->block)
2619         dasd_schedule_block_bh(device->block);
2620 
2621     return rc;
2622 }
2623 EXPORT_SYMBOL(dasd_sleep_on_immediatly);
2624 
2625 /*
2626  * Cancels a request that was started with dasd_sleep_on_req.
2627  * This is useful to timeout requests. The request will be
2628  * terminated if it is currently in i/o.
2629  * Returns 0 if request termination was successful
2630  *     negative error code if termination failed
2631  * Cancellation of a request is an asynchronous operation! The calling
2632  * function has to wait until the request is properly returned via callback.
2633  */
2634 static int __dasd_cancel_req(struct dasd_ccw_req *cqr)
2635 {
2636     struct dasd_device *device = cqr->startdev;
2637     int rc = 0;
2638 
2639     switch (cqr->status) {
2640     case DASD_CQR_QUEUED:
2641         /* request was not started - just set to cleared */
2642         cqr->status = DASD_CQR_CLEARED;
2643         break;
2644     case DASD_CQR_IN_IO:
2645         /* request in IO - terminate IO and release again */
2646         rc = device->discipline->term_IO(cqr);
2647         if (rc) {
2648             dev_err(&device->cdev->dev,
2649                 "Cancelling request %p failed with rc=%d\n",
2650                 cqr, rc);
2651         } else {
2652             cqr->stopclk = get_tod_clock();
2653         }
2654         break;
2655     default: /* already finished or clear pending - do nothing */
2656         break;
2657     }
2658     dasd_schedule_device_bh(device);
2659     return rc;
2660 }
2661 
2662 int dasd_cancel_req(struct dasd_ccw_req *cqr)
2663 {
2664     struct dasd_device *device = cqr->startdev;
2665     unsigned long flags;
2666     int rc;
2667 
2668     spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
2669     rc = __dasd_cancel_req(cqr);
2670     spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
2671     return rc;
2672 }
2673 
2674 /*
2675  * SECTION: Operations of the dasd_block layer.
2676  */
2677 
2678 /*
2679  * Timeout function for dasd_block. This is used when the block layer
2680  * is waiting for something that may not come reliably, (e.g. a state
2681  * change interrupt)
2682  */
2683 static void dasd_block_timeout(struct timer_list *t)
2684 {
2685     unsigned long flags;
2686     struct dasd_block *block;
2687 
2688     block = from_timer(block, t, timer);
2689     spin_lock_irqsave(get_ccwdev_lock(block->base->cdev), flags);
2690     /* re-activate request queue */
2691     dasd_device_remove_stop_bits(block->base, DASD_STOPPED_PENDING);
2692     spin_unlock_irqrestore(get_ccwdev_lock(block->base->cdev), flags);
2693     dasd_schedule_block_bh(block);
2694     blk_mq_run_hw_queues(block->request_queue, true);
2695 }
2696 
2697 /*
2698  * Setup timeout for a dasd_block in jiffies.
2699  */
2700 void dasd_block_set_timer(struct dasd_block *block, int expires)
2701 {
2702     if (expires == 0)
2703         del_timer(&block->timer);
2704     else
2705         mod_timer(&block->timer, jiffies + expires);
2706 }
2707 EXPORT_SYMBOL(dasd_block_set_timer);
2708 
2709 /*
2710  * Clear timeout for a dasd_block.
2711  */
2712 void dasd_block_clear_timer(struct dasd_block *block)
2713 {
2714     del_timer(&block->timer);
2715 }
2716 EXPORT_SYMBOL(dasd_block_clear_timer);
2717 
2718 /*
2719  * Process finished error recovery ccw.
2720  */
2721 static void __dasd_process_erp(struct dasd_device *device,
2722                    struct dasd_ccw_req *cqr)
2723 {
2724     dasd_erp_fn_t erp_fn;
2725 
2726     if (cqr->status == DASD_CQR_DONE)
2727         DBF_DEV_EVENT(DBF_NOTICE, device, "%s", "ERP successful");
2728     else
2729         dev_err(&device->cdev->dev, "ERP failed for the DASD\n");
2730     erp_fn = device->discipline->erp_postaction(cqr);
2731     erp_fn(cqr);
2732 }
2733 
2734 static void __dasd_cleanup_cqr(struct dasd_ccw_req *cqr)
2735 {
2736     struct request *req;
2737     blk_status_t error = BLK_STS_OK;
2738     unsigned int proc_bytes;
2739     int status;
2740 
2741     req = (struct request *) cqr->callback_data;
2742     dasd_profile_end(cqr->block, cqr, req);
2743 
2744     proc_bytes = cqr->proc_bytes;
2745     status = cqr->block->base->discipline->free_cp(cqr, req);
2746     if (status < 0)
2747         error = errno_to_blk_status(status);
2748     else if (status == 0) {
2749         switch (cqr->intrc) {
2750         case -EPERM:
2751             error = BLK_STS_NEXUS;
2752             break;
2753         case -ENOLINK:
2754             error = BLK_STS_TRANSPORT;
2755             break;
2756         case -ETIMEDOUT:
2757             error = BLK_STS_TIMEOUT;
2758             break;
2759         default:
2760             error = BLK_STS_IOERR;
2761             break;
2762         }
2763     }
2764 
2765     /*
2766      * We need to take care for ETIMEDOUT errors here since the
2767      * complete callback does not get called in this case.
2768      * Take care of all errors here and avoid additional code to
2769      * transfer the error value to the complete callback.
2770      */
2771     if (error) {
2772         blk_mq_end_request(req, error);
2773         blk_mq_run_hw_queues(req->q, true);
2774     } else {
2775         /*
2776          * Partial completed requests can happen with ESE devices.
2777          * During read we might have gotten a NRF error and have to
2778          * complete a request partially.
2779          */
2780         if (proc_bytes) {
2781             blk_update_request(req, BLK_STS_OK, proc_bytes);
2782             blk_mq_requeue_request(req, true);
2783         } else if (likely(!blk_should_fake_timeout(req->q))) {
2784             blk_mq_complete_request(req);
2785         }
2786     }
2787 }
2788 
2789 /*
2790  * Process ccw request queue.
2791  */
2792 static void __dasd_process_block_ccw_queue(struct dasd_block *block,
2793                        struct list_head *final_queue)
2794 {
2795     struct list_head *l, *n;
2796     struct dasd_ccw_req *cqr;
2797     dasd_erp_fn_t erp_fn;
2798     unsigned long flags;
2799     struct dasd_device *base = block->base;
2800 
2801 restart:
2802     /* Process request with final status. */
2803     list_for_each_safe(l, n, &block->ccw_queue) {
2804         cqr = list_entry(l, struct dasd_ccw_req, blocklist);
2805         if (cqr->status != DASD_CQR_DONE &&
2806             cqr->status != DASD_CQR_FAILED &&
2807             cqr->status != DASD_CQR_NEED_ERP &&
2808             cqr->status != DASD_CQR_TERMINATED)
2809             continue;
2810 
2811         if (cqr->status == DASD_CQR_TERMINATED) {
2812             base->discipline->handle_terminated_request(cqr);
2813             goto restart;
2814         }
2815 
2816         /*  Process requests that may be recovered */
2817         if (cqr->status == DASD_CQR_NEED_ERP) {
2818             erp_fn = base->discipline->erp_action(cqr);
2819             if (IS_ERR(erp_fn(cqr)))
2820                 continue;
2821             goto restart;
2822         }
2823 
2824         /* log sense for fatal error */
2825         if (cqr->status == DASD_CQR_FAILED) {
2826             dasd_log_sense(cqr, &cqr->irb);
2827         }
2828 
2829         /* First of all call extended error reporting. */
2830         if (dasd_eer_enabled(base) &&
2831             cqr->status == DASD_CQR_FAILED) {
2832             dasd_eer_write(base, cqr, DASD_EER_FATALERROR);
2833 
2834             /* restart request  */
2835             cqr->status = DASD_CQR_FILLED;
2836             cqr->retries = 255;
2837             spin_lock_irqsave(get_ccwdev_lock(base->cdev), flags);
2838             dasd_device_set_stop_bits(base, DASD_STOPPED_QUIESCE);
2839             spin_unlock_irqrestore(get_ccwdev_lock(base->cdev),
2840                            flags);
2841             goto restart;
2842         }
2843 
2844         /* Process finished ERP request. */
2845         if (cqr->refers) {
2846             __dasd_process_erp(base, cqr);
2847             goto restart;
2848         }
2849 
2850         /* Rechain finished requests to final queue */
2851         cqr->endclk = get_tod_clock();
2852         list_move_tail(&cqr->blocklist, final_queue);
2853     }
2854 }
2855 
2856 static void dasd_return_cqr_cb(struct dasd_ccw_req *cqr, void *data)
2857 {
2858     dasd_schedule_block_bh(cqr->block);
2859 }
2860 
2861 static void __dasd_block_start_head(struct dasd_block *block)
2862 {
2863     struct dasd_ccw_req *cqr;
2864 
2865     if (list_empty(&block->ccw_queue))
2866         return;
2867     /* We allways begin with the first requests on the queue, as some
2868      * of previously started requests have to be enqueued on a
2869      * dasd_device again for error recovery.
2870      */
2871     list_for_each_entry(cqr, &block->ccw_queue, blocklist) {
2872         if (cqr->status != DASD_CQR_FILLED)
2873             continue;
2874         if (test_bit(DASD_FLAG_LOCK_STOLEN, &block->base->flags) &&
2875             !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) {
2876             cqr->status = DASD_CQR_FAILED;
2877             cqr->intrc = -EPERM;
2878             dasd_schedule_block_bh(block);
2879             continue;
2880         }
2881         /* Non-temporary stop condition will trigger fail fast */
2882         if (block->base->stopped & ~DASD_STOPPED_PENDING &&
2883             test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) &&
2884             (!dasd_eer_enabled(block->base))) {
2885             cqr->status = DASD_CQR_FAILED;
2886             cqr->intrc = -ENOLINK;
2887             dasd_schedule_block_bh(block);
2888             continue;
2889         }
2890         /* Don't try to start requests if device is stopped */
2891         if (block->base->stopped)
2892             return;
2893 
2894         /* just a fail safe check, should not happen */
2895         if (!cqr->startdev)
2896             cqr->startdev = block->base;
2897 
2898         /* make sure that the requests we submit find their way back */
2899         cqr->callback = dasd_return_cqr_cb;
2900 
2901         dasd_add_request_tail(cqr);
2902     }
2903 }
2904 
2905 /*
2906  * Central dasd_block layer routine. Takes requests from the generic
2907  * block layer request queue, creates ccw requests, enqueues them on
2908  * a dasd_device and processes ccw requests that have been returned.
2909  */
2910 static void dasd_block_tasklet(unsigned long data)
2911 {
2912     struct dasd_block *block = (struct dasd_block *) data;
2913     struct list_head final_queue;
2914     struct list_head *l, *n;
2915     struct dasd_ccw_req *cqr;
2916     struct dasd_queue *dq;
2917 
2918     atomic_set(&block->tasklet_scheduled, 0);
2919     INIT_LIST_HEAD(&final_queue);
2920     spin_lock_irq(&block->queue_lock);
2921     /* Finish off requests on ccw queue */
2922     __dasd_process_block_ccw_queue(block, &final_queue);
2923     spin_unlock_irq(&block->queue_lock);
2924 
2925     /* Now call the callback function of requests with final status */
2926     list_for_each_safe(l, n, &final_queue) {
2927         cqr = list_entry(l, struct dasd_ccw_req, blocklist);
2928         dq = cqr->dq;
2929         spin_lock_irq(&dq->lock);
2930         list_del_init(&cqr->blocklist);
2931         __dasd_cleanup_cqr(cqr);
2932         spin_unlock_irq(&dq->lock);
2933     }
2934 
2935     spin_lock_irq(&block->queue_lock);
2936     /* Now check if the head of the ccw queue needs to be started. */
2937     __dasd_block_start_head(block);
2938     spin_unlock_irq(&block->queue_lock);
2939 
2940     if (waitqueue_active(&shutdown_waitq))
2941         wake_up(&shutdown_waitq);
2942     dasd_put_device(block->base);
2943 }
2944 
2945 static void _dasd_wake_block_flush_cb(struct dasd_ccw_req *cqr, void *data)
2946 {
2947     wake_up(&dasd_flush_wq);
2948 }
2949 
2950 /*
2951  * Requeue a request back to the block request queue
2952  * only works for block requests
2953  */
2954 static int _dasd_requeue_request(struct dasd_ccw_req *cqr)
2955 {
2956     struct dasd_block *block = cqr->block;
2957     struct request *req;
2958 
2959     if (!block)
2960         return -EINVAL;
2961     /*
2962      * If the request is an ERP request there is nothing to requeue.
2963      * This will be done with the remaining original request.
2964      */
2965     if (cqr->refers)
2966         return 0;
2967     spin_lock_irq(&cqr->dq->lock);
2968     req = (struct request *) cqr->callback_data;
2969     blk_mq_requeue_request(req, false);
2970     spin_unlock_irq(&cqr->dq->lock);
2971 
2972     return 0;
2973 }
2974 
2975 /*
2976  * Go through all request on the dasd_block request queue, cancel them
2977  * on the respective dasd_device, and return them to the generic
2978  * block layer.
2979  */
2980 static int dasd_flush_block_queue(struct dasd_block *block)
2981 {
2982     struct dasd_ccw_req *cqr, *n;
2983     int rc, i;
2984     struct list_head flush_queue;
2985     unsigned long flags;
2986 
2987     INIT_LIST_HEAD(&flush_queue);
2988     spin_lock_bh(&block->queue_lock);
2989     rc = 0;
2990 restart:
2991     list_for_each_entry_safe(cqr, n, &block->ccw_queue, blocklist) {
2992         /* if this request currently owned by a dasd_device cancel it */
2993         if (cqr->status >= DASD_CQR_QUEUED)
2994             rc = dasd_cancel_req(cqr);
2995         if (rc < 0)
2996             break;
2997         /* Rechain request (including erp chain) so it won't be
2998          * touched by the dasd_block_tasklet anymore.
2999          * Replace the callback so we notice when the request
3000          * is returned from the dasd_device layer.
3001          */
3002         cqr->callback = _dasd_wake_block_flush_cb;
3003         for (i = 0; cqr != NULL; cqr = cqr->refers, i++)
3004             list_move_tail(&cqr->blocklist, &flush_queue);
3005         if (i > 1)
3006             /* moved more than one request - need to restart */
3007             goto restart;
3008     }
3009     spin_unlock_bh(&block->queue_lock);
3010     /* Now call the callback function of flushed requests */
3011 restart_cb:
3012     list_for_each_entry_safe(cqr, n, &flush_queue, blocklist) {
3013         wait_event(dasd_flush_wq, (cqr->status < DASD_CQR_QUEUED));
3014         /* Process finished ERP request. */
3015         if (cqr->refers) {
3016             spin_lock_bh(&block->queue_lock);
3017             __dasd_process_erp(block->base, cqr);
3018             spin_unlock_bh(&block->queue_lock);
3019             /* restart list_for_xx loop since dasd_process_erp
3020              * might remove multiple elements */
3021             goto restart_cb;
3022         }
3023         /* call the callback function */
3024         spin_lock_irqsave(&cqr->dq->lock, flags);
3025         cqr->endclk = get_tod_clock();
3026         list_del_init(&cqr->blocklist);
3027         __dasd_cleanup_cqr(cqr);
3028         spin_unlock_irqrestore(&cqr->dq->lock, flags);
3029     }
3030     return rc;
3031 }
3032 
3033 /*
3034  * Schedules a call to dasd_tasklet over the device tasklet.
3035  */
3036 void dasd_schedule_block_bh(struct dasd_block *block)
3037 {
3038     /* Protect against rescheduling. */
3039     if (atomic_cmpxchg(&block->tasklet_scheduled, 0, 1) != 0)
3040         return;
3041     /* life cycle of block is bound to it's base device */
3042     dasd_get_device(block->base);
3043     tasklet_hi_schedule(&block->tasklet);
3044 }
3045 EXPORT_SYMBOL(dasd_schedule_block_bh);
3046 
3047 
3048 /*
3049  * SECTION: external block device operations
3050  * (request queue handling, open, release, etc.)
3051  */
3052 
3053 /*
3054  * Dasd request queue function. Called from ll_rw_blk.c
3055  */
3056 static blk_status_t do_dasd_request(struct blk_mq_hw_ctx *hctx,
3057                     const struct blk_mq_queue_data *qd)
3058 {
3059     struct dasd_block *block = hctx->queue->queuedata;
3060     struct dasd_queue *dq = hctx->driver_data;
3061     struct request *req = qd->rq;
3062     struct dasd_device *basedev;
3063     struct dasd_ccw_req *cqr;
3064     blk_status_t rc = BLK_STS_OK;
3065 
3066     basedev = block->base;
3067     spin_lock_irq(&dq->lock);
3068     if (basedev->state < DASD_STATE_READY ||
3069         test_bit(DASD_FLAG_OFFLINE, &basedev->flags)) {
3070         DBF_DEV_EVENT(DBF_ERR, basedev,
3071                   "device not ready for request %p", req);
3072         rc = BLK_STS_IOERR;
3073         goto out;
3074     }
3075 
3076     /*
3077      * if device is stopped do not fetch new requests
3078      * except failfast is active which will let requests fail
3079      * immediately in __dasd_block_start_head()
3080      */
3081     if (basedev->stopped && !(basedev->features & DASD_FEATURE_FAILFAST)) {
3082         DBF_DEV_EVENT(DBF_ERR, basedev,
3083                   "device stopped request %p", req);
3084         rc = BLK_STS_RESOURCE;
3085         goto out;
3086     }
3087 
3088     if (basedev->features & DASD_FEATURE_READONLY &&
3089         rq_data_dir(req) == WRITE) {
3090         DBF_DEV_EVENT(DBF_ERR, basedev,
3091                   "Rejecting write request %p", req);
3092         rc = BLK_STS_IOERR;
3093         goto out;
3094     }
3095 
3096     if (test_bit(DASD_FLAG_ABORTALL, &basedev->flags) &&
3097         (basedev->features & DASD_FEATURE_FAILFAST ||
3098          blk_noretry_request(req))) {
3099         DBF_DEV_EVENT(DBF_ERR, basedev,
3100                   "Rejecting failfast request %p", req);
3101         rc = BLK_STS_IOERR;
3102         goto out;
3103     }
3104 
3105     cqr = basedev->discipline->build_cp(basedev, block, req);
3106     if (IS_ERR(cqr)) {
3107         if (PTR_ERR(cqr) == -EBUSY ||
3108             PTR_ERR(cqr) == -ENOMEM ||
3109             PTR_ERR(cqr) == -EAGAIN) {
3110             rc = BLK_STS_RESOURCE;
3111             goto out;
3112         }
3113         DBF_DEV_EVENT(DBF_ERR, basedev,
3114                   "CCW creation failed (rc=%ld) on request %p",
3115                   PTR_ERR(cqr), req);
3116         rc = BLK_STS_IOERR;
3117         goto out;
3118     }
3119     /*
3120      *  Note: callback is set to dasd_return_cqr_cb in
3121      * __dasd_block_start_head to cover erp requests as well
3122      */
3123     cqr->callback_data = req;
3124     cqr->status = DASD_CQR_FILLED;
3125     cqr->dq = dq;
3126 
3127     blk_mq_start_request(req);
3128     spin_lock(&block->queue_lock);
3129     list_add_tail(&cqr->blocklist, &block->ccw_queue);
3130     INIT_LIST_HEAD(&cqr->devlist);
3131     dasd_profile_start(block, cqr, req);
3132     dasd_schedule_block_bh(block);
3133     spin_unlock(&block->queue_lock);
3134 
3135 out:
3136     spin_unlock_irq(&dq->lock);
3137     return rc;
3138 }
3139 
3140 /*
3141  * Block timeout callback, called from the block layer
3142  *
3143  * Return values:
3144  * BLK_EH_RESET_TIMER if the request should be left running
3145  * BLK_EH_DONE if the request is handled or terminated
3146  *            by the driver.
3147  */
3148 enum blk_eh_timer_return dasd_times_out(struct request *req)
3149 {
3150     struct dasd_block *block = req->q->queuedata;
3151     struct dasd_device *device;
3152     struct dasd_ccw_req *cqr;
3153     unsigned long flags;
3154     int rc = 0;
3155 
3156     cqr = blk_mq_rq_to_pdu(req);
3157     if (!cqr)
3158         return BLK_EH_DONE;
3159 
3160     spin_lock_irqsave(&cqr->dq->lock, flags);
3161     device = cqr->startdev ? cqr->startdev : block->base;
3162     if (!device->blk_timeout) {
3163         spin_unlock_irqrestore(&cqr->dq->lock, flags);
3164         return BLK_EH_RESET_TIMER;
3165     }
3166     DBF_DEV_EVENT(DBF_WARNING, device,
3167               " dasd_times_out cqr %p status %x",
3168               cqr, cqr->status);
3169 
3170     spin_lock(&block->queue_lock);
3171     spin_lock(get_ccwdev_lock(device->cdev));
3172     cqr->retries = -1;
3173     cqr->intrc = -ETIMEDOUT;
3174     if (cqr->status >= DASD_CQR_QUEUED) {
3175         rc = __dasd_cancel_req(cqr);
3176     } else if (cqr->status == DASD_CQR_FILLED ||
3177            cqr->status == DASD_CQR_NEED_ERP) {
3178         cqr->status = DASD_CQR_TERMINATED;
3179     } else if (cqr->status == DASD_CQR_IN_ERP) {
3180         struct dasd_ccw_req *searchcqr, *nextcqr, *tmpcqr;
3181 
3182         list_for_each_entry_safe(searchcqr, nextcqr,
3183                      &block->ccw_queue, blocklist) {
3184             tmpcqr = searchcqr;
3185             while (tmpcqr->refers)
3186                 tmpcqr = tmpcqr->refers;
3187             if (tmpcqr != cqr)
3188                 continue;
3189             /* searchcqr is an ERP request for cqr */
3190             searchcqr->retries = -1;
3191             searchcqr->intrc = -ETIMEDOUT;
3192             if (searchcqr->status >= DASD_CQR_QUEUED) {
3193                 rc = __dasd_cancel_req(searchcqr);
3194             } else if ((searchcqr->status == DASD_CQR_FILLED) ||
3195                    (searchcqr->status == DASD_CQR_NEED_ERP)) {
3196                 searchcqr->status = DASD_CQR_TERMINATED;
3197                 rc = 0;
3198             } else if (searchcqr->status == DASD_CQR_IN_ERP) {
3199                 /*
3200                  * Shouldn't happen; most recent ERP
3201                  * request is at the front of queue
3202                  */
3203                 continue;
3204             }
3205             break;
3206         }
3207     }
3208     spin_unlock(get_ccwdev_lock(device->cdev));
3209     dasd_schedule_block_bh(block);
3210     spin_unlock(&block->queue_lock);
3211     spin_unlock_irqrestore(&cqr->dq->lock, flags);
3212 
3213     return rc ? BLK_EH_RESET_TIMER : BLK_EH_DONE;
3214 }
3215 
3216 static int dasd_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
3217               unsigned int idx)
3218 {
3219     struct dasd_queue *dq = kzalloc(sizeof(*dq), GFP_KERNEL);
3220 
3221     if (!dq)
3222         return -ENOMEM;
3223 
3224     spin_lock_init(&dq->lock);
3225     hctx->driver_data = dq;
3226 
3227     return 0;
3228 }
3229 
3230 static void dasd_exit_hctx(struct blk_mq_hw_ctx *hctx, unsigned int idx)
3231 {
3232     kfree(hctx->driver_data);
3233     hctx->driver_data = NULL;
3234 }
3235 
3236 static void dasd_request_done(struct request *req)
3237 {
3238     blk_mq_end_request(req, 0);
3239     blk_mq_run_hw_queues(req->q, true);
3240 }
3241 
3242 static struct blk_mq_ops dasd_mq_ops = {
3243     .queue_rq = do_dasd_request,
3244     .complete = dasd_request_done,
3245     .timeout = dasd_times_out,
3246     .init_hctx = dasd_init_hctx,
3247     .exit_hctx = dasd_exit_hctx,
3248 };
3249 
3250 /*
3251  * Allocate and initialize request queue and default I/O scheduler.
3252  */
3253 static int dasd_alloc_queue(struct dasd_block *block)
3254 {
3255     int rc;
3256 
3257     block->tag_set.ops = &dasd_mq_ops;
3258     block->tag_set.cmd_size = sizeof(struct dasd_ccw_req);
3259     block->tag_set.nr_hw_queues = nr_hw_queues;
3260     block->tag_set.queue_depth = queue_depth;
3261     block->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
3262     block->tag_set.numa_node = NUMA_NO_NODE;
3263 
3264     rc = blk_mq_alloc_tag_set(&block->tag_set);
3265     if (rc)
3266         return rc;
3267 
3268     block->request_queue = blk_mq_init_queue(&block->tag_set);
3269     if (IS_ERR(block->request_queue))
3270         return PTR_ERR(block->request_queue);
3271 
3272     block->request_queue->queuedata = block;
3273 
3274     return 0;
3275 }
3276 
3277 /*
3278  * Deactivate and free request queue.
3279  */
3280 static void dasd_free_queue(struct dasd_block *block)
3281 {
3282     if (block->request_queue) {
3283         blk_mq_destroy_queue(block->request_queue);
3284         blk_mq_free_tag_set(&block->tag_set);
3285         block->request_queue = NULL;
3286     }
3287 }
3288 
3289 static int dasd_open(struct block_device *bdev, fmode_t mode)
3290 {
3291     struct dasd_device *base;
3292     int rc;
3293 
3294     base = dasd_device_from_gendisk(bdev->bd_disk);
3295     if (!base)
3296         return -ENODEV;
3297 
3298     atomic_inc(&base->block->open_count);
3299     if (test_bit(DASD_FLAG_OFFLINE, &base->flags)) {
3300         rc = -ENODEV;
3301         goto unlock;
3302     }
3303 
3304     if (!try_module_get(base->discipline->owner)) {
3305         rc = -EINVAL;
3306         goto unlock;
3307     }
3308 
3309     if (dasd_probeonly) {
3310         dev_info(&base->cdev->dev,
3311              "Accessing the DASD failed because it is in "
3312              "probeonly mode\n");
3313         rc = -EPERM;
3314         goto out;
3315     }
3316 
3317     if (base->state <= DASD_STATE_BASIC) {
3318         DBF_DEV_EVENT(DBF_ERR, base, " %s",
3319                   " Cannot open unrecognized device");
3320         rc = -ENODEV;
3321         goto out;
3322     }
3323 
3324     if ((mode & FMODE_WRITE) &&
3325         (test_bit(DASD_FLAG_DEVICE_RO, &base->flags) ||
3326          (base->features & DASD_FEATURE_READONLY))) {
3327         rc = -EROFS;
3328         goto out;
3329     }
3330 
3331     dasd_put_device(base);
3332     return 0;
3333 
3334 out:
3335     module_put(base->discipline->owner);
3336 unlock:
3337     atomic_dec(&base->block->open_count);
3338     dasd_put_device(base);
3339     return rc;
3340 }
3341 
3342 static void dasd_release(struct gendisk *disk, fmode_t mode)
3343 {
3344     struct dasd_device *base = dasd_device_from_gendisk(disk);
3345     if (base) {
3346         atomic_dec(&base->block->open_count);
3347         module_put(base->discipline->owner);
3348         dasd_put_device(base);
3349     }
3350 }
3351 
3352 /*
3353  * Return disk geometry.
3354  */
3355 static int dasd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
3356 {
3357     struct dasd_device *base;
3358 
3359     base = dasd_device_from_gendisk(bdev->bd_disk);
3360     if (!base)
3361         return -ENODEV;
3362 
3363     if (!base->discipline ||
3364         !base->discipline->fill_geometry) {
3365         dasd_put_device(base);
3366         return -EINVAL;
3367     }
3368     base->discipline->fill_geometry(base->block, geo);
3369     geo->start = get_start_sect(bdev) >> base->block->s2b_shift;
3370     dasd_put_device(base);
3371     return 0;
3372 }
3373 
3374 const struct block_device_operations
3375 dasd_device_operations = {
3376     .owner      = THIS_MODULE,
3377     .open       = dasd_open,
3378     .release    = dasd_release,
3379     .ioctl      = dasd_ioctl,
3380     .compat_ioctl   = dasd_ioctl,
3381     .getgeo     = dasd_getgeo,
3382     .set_read_only  = dasd_set_read_only,
3383 };
3384 
3385 /*******************************************************************************
3386  * end of block device operations
3387  */
3388 
3389 static void
3390 dasd_exit(void)
3391 {
3392 #ifdef CONFIG_PROC_FS
3393     dasd_proc_exit();
3394 #endif
3395     dasd_eer_exit();
3396     kmem_cache_destroy(dasd_page_cache);
3397     dasd_page_cache = NULL;
3398     dasd_gendisk_exit();
3399     dasd_devmap_exit();
3400     if (dasd_debug_area != NULL) {
3401         debug_unregister(dasd_debug_area);
3402         dasd_debug_area = NULL;
3403     }
3404     dasd_statistics_removeroot();
3405 }
3406 
3407 /*
3408  * SECTION: common functions for ccw_driver use
3409  */
3410 
3411 /*
3412  * Is the device read-only?
3413  * Note that this function does not report the setting of the
3414  * readonly device attribute, but how it is configured in z/VM.
3415  */
3416 int dasd_device_is_ro(struct dasd_device *device)
3417 {
3418     struct ccw_dev_id dev_id;
3419     struct diag210 diag_data;
3420     int rc;
3421 
3422     if (!MACHINE_IS_VM)
3423         return 0;
3424     ccw_device_get_id(device->cdev, &dev_id);
3425     memset(&diag_data, 0, sizeof(diag_data));
3426     diag_data.vrdcdvno = dev_id.devno;
3427     diag_data.vrdclen = sizeof(diag_data);
3428     rc = diag210(&diag_data);
3429     if (rc == 0 || rc == 2) {
3430         return diag_data.vrdcvfla & 0x80;
3431     } else {
3432         DBF_EVENT(DBF_WARNING, "diag210 failed for dev=%04x with rc=%d",
3433               dev_id.devno, rc);
3434         return 0;
3435     }
3436 }
3437 EXPORT_SYMBOL_GPL(dasd_device_is_ro);
3438 
3439 static void dasd_generic_auto_online(void *data, async_cookie_t cookie)
3440 {
3441     struct ccw_device *cdev = data;
3442     int ret;
3443 
3444     ret = ccw_device_set_online(cdev);
3445     if (ret)
3446         pr_warn("%s: Setting the DASD online failed with rc=%d\n",
3447             dev_name(&cdev->dev), ret);
3448 }
3449 
3450 /*
3451  * Initial attempt at a probe function. this can be simplified once
3452  * the other detection code is gone.
3453  */
3454 int dasd_generic_probe(struct ccw_device *cdev)
3455 {
3456     cdev->handler = &dasd_int_handler;
3457 
3458     /*
3459      * Automatically online either all dasd devices (dasd_autodetect)
3460      * or all devices specified with dasd= parameters during
3461      * initial probe.
3462      */
3463     if ((dasd_get_feature(cdev, DASD_FEATURE_INITIAL_ONLINE) > 0 ) ||
3464         (dasd_autodetect && dasd_busid_known(dev_name(&cdev->dev)) != 0))
3465         async_schedule(dasd_generic_auto_online, cdev);
3466     return 0;
3467 }
3468 EXPORT_SYMBOL_GPL(dasd_generic_probe);
3469 
3470 void dasd_generic_free_discipline(struct dasd_device *device)
3471 {
3472     /* Forget the discipline information. */
3473     if (device->discipline) {
3474         if (device->discipline->uncheck_device)
3475             device->discipline->uncheck_device(device);
3476         module_put(device->discipline->owner);
3477         device->discipline = NULL;
3478     }
3479     if (device->base_discipline) {
3480         module_put(device->base_discipline->owner);
3481         device->base_discipline = NULL;
3482     }
3483 }
3484 EXPORT_SYMBOL_GPL(dasd_generic_free_discipline);
3485 
3486 /*
3487  * This will one day be called from a global not_oper handler.
3488  * It is also used by driver_unregister during module unload.
3489  */
3490 void dasd_generic_remove(struct ccw_device *cdev)
3491 {
3492     struct dasd_device *device;
3493     struct dasd_block *block;
3494 
3495     device = dasd_device_from_cdev(cdev);
3496     if (IS_ERR(device))
3497         return;
3498 
3499     if (test_and_set_bit(DASD_FLAG_OFFLINE, &device->flags) &&
3500         !test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) {
3501         /* Already doing offline processing */
3502         dasd_put_device(device);
3503         return;
3504     }
3505     /*
3506      * This device is removed unconditionally. Set offline
3507      * flag to prevent dasd_open from opening it while it is
3508      * no quite down yet.
3509      */
3510     dasd_set_target_state(device, DASD_STATE_NEW);
3511     cdev->handler = NULL;
3512     /* dasd_delete_device destroys the device reference. */
3513     block = device->block;
3514     dasd_delete_device(device);
3515     /*
3516      * life cycle of block is bound to device, so delete it after
3517      * device was safely removed
3518      */
3519     if (block)
3520         dasd_free_block(block);
3521 }
3522 EXPORT_SYMBOL_GPL(dasd_generic_remove);
3523 
3524 /*
3525  * Activate a device. This is called from dasd_{eckd,fba}_probe() when either
3526  * the device is detected for the first time and is supposed to be used
3527  * or the user has started activation through sysfs.
3528  */
3529 int dasd_generic_set_online(struct ccw_device *cdev,
3530                 struct dasd_discipline *base_discipline)
3531 {
3532     struct dasd_discipline *discipline;
3533     struct dasd_device *device;
3534     int rc;
3535 
3536     /* first online clears initial online feature flag */
3537     dasd_set_feature(cdev, DASD_FEATURE_INITIAL_ONLINE, 0);
3538     device = dasd_create_device(cdev);
3539     if (IS_ERR(device))
3540         return PTR_ERR(device);
3541 
3542     discipline = base_discipline;
3543     if (device->features & DASD_FEATURE_USEDIAG) {
3544         if (!dasd_diag_discipline_pointer) {
3545             /* Try to load the required module. */
3546             rc = request_module(DASD_DIAG_MOD);
3547             if (rc) {
3548                 pr_warn("%s Setting the DASD online failed "
3549                     "because the required module %s "
3550                     "could not be loaded (rc=%d)\n",
3551                     dev_name(&cdev->dev), DASD_DIAG_MOD,
3552                     rc);
3553                 dasd_delete_device(device);
3554                 return -ENODEV;
3555             }
3556         }
3557         /* Module init could have failed, so check again here after
3558          * request_module(). */
3559         if (!dasd_diag_discipline_pointer) {
3560             pr_warn("%s Setting the DASD online failed because of missing DIAG discipline\n",
3561                 dev_name(&cdev->dev));
3562             dasd_delete_device(device);
3563             return -ENODEV;
3564         }
3565         discipline = dasd_diag_discipline_pointer;
3566     }
3567     if (!try_module_get(base_discipline->owner)) {
3568         dasd_delete_device(device);
3569         return -EINVAL;
3570     }
3571     if (!try_module_get(discipline->owner)) {
3572         module_put(base_discipline->owner);
3573         dasd_delete_device(device);
3574         return -EINVAL;
3575     }
3576     device->base_discipline = base_discipline;
3577     device->discipline = discipline;
3578 
3579     /* check_device will allocate block device if necessary */
3580     rc = discipline->check_device(device);
3581     if (rc) {
3582         pr_warn("%s Setting the DASD online with discipline %s failed with rc=%i\n",
3583             dev_name(&cdev->dev), discipline->name, rc);
3584         module_put(discipline->owner);
3585         module_put(base_discipline->owner);
3586         dasd_delete_device(device);
3587         return rc;
3588     }
3589 
3590     dasd_set_target_state(device, DASD_STATE_ONLINE);
3591     if (device->state <= DASD_STATE_KNOWN) {
3592         pr_warn("%s Setting the DASD online failed because of a missing discipline\n",
3593             dev_name(&cdev->dev));
3594         rc = -ENODEV;
3595         dasd_set_target_state(device, DASD_STATE_NEW);
3596         if (device->block)
3597             dasd_free_block(device->block);
3598         dasd_delete_device(device);
3599     } else
3600         pr_debug("dasd_generic device %s found\n",
3601                 dev_name(&cdev->dev));
3602 
3603     wait_event(dasd_init_waitq, _wait_for_device(device));
3604 
3605     dasd_put_device(device);
3606     return rc;
3607 }
3608 EXPORT_SYMBOL_GPL(dasd_generic_set_online);
3609 
3610 int dasd_generic_set_offline(struct ccw_device *cdev)
3611 {
3612     struct dasd_device *device;
3613     struct dasd_block *block;
3614     int max_count, open_count, rc;
3615     unsigned long flags;
3616 
3617     rc = 0;
3618     spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
3619     device = dasd_device_from_cdev_locked(cdev);
3620     if (IS_ERR(device)) {
3621         spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
3622         return PTR_ERR(device);
3623     }
3624 
3625     /*
3626      * We must make sure that this device is currently not in use.
3627      * The open_count is increased for every opener, that includes
3628      * the blkdev_get in dasd_scan_partitions. We are only interested
3629      * in the other openers.
3630      */
3631     if (device->block) {
3632         max_count = device->block->bdev ? 0 : -1;
3633         open_count = atomic_read(&device->block->open_count);
3634         if (open_count > max_count) {
3635             if (open_count > 0)
3636                 pr_warn("%s: The DASD cannot be set offline with open count %i\n",
3637                     dev_name(&cdev->dev), open_count);
3638             else
3639                 pr_warn("%s: The DASD cannot be set offline while it is in use\n",
3640                     dev_name(&cdev->dev));
3641             rc = -EBUSY;
3642             goto out_err;
3643         }
3644     }
3645 
3646     /*
3647      * Test if the offline processing is already running and exit if so.
3648      * If a safe offline is being processed this could only be a normal
3649      * offline that should be able to overtake the safe offline and
3650      * cancel any I/O we do not want to wait for any longer
3651      */
3652     if (test_bit(DASD_FLAG_OFFLINE, &device->flags)) {
3653         if (test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) {
3654             clear_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING,
3655                   &device->flags);
3656         } else {
3657             rc = -EBUSY;
3658             goto out_err;
3659         }
3660     }
3661     set_bit(DASD_FLAG_OFFLINE, &device->flags);
3662 
3663     /*
3664      * if safe_offline is called set safe_offline_running flag and
3665      * clear safe_offline so that a call to normal offline
3666      * can overrun safe_offline processing
3667      */
3668     if (test_and_clear_bit(DASD_FLAG_SAFE_OFFLINE, &device->flags) &&
3669         !test_and_set_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) {
3670         /* need to unlock here to wait for outstanding I/O */
3671         spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
3672         /*
3673          * If we want to set the device safe offline all IO operations
3674          * should be finished before continuing the offline process
3675          * so sync bdev first and then wait for our queues to become
3676          * empty
3677          */
3678         if (device->block) {
3679             rc = fsync_bdev(device->block->bdev);
3680             if (rc != 0)
3681                 goto interrupted;
3682         }
3683         dasd_schedule_device_bh(device);
3684         rc = wait_event_interruptible(shutdown_waitq,
3685                           _wait_for_empty_queues(device));
3686         if (rc != 0)
3687             goto interrupted;
3688 
3689         /*
3690          * check if a normal offline process overtook the offline
3691          * processing in this case simply do nothing beside returning
3692          * that we got interrupted
3693          * otherwise mark safe offline as not running any longer and
3694          * continue with normal offline
3695          */
3696         spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
3697         if (!test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) {
3698             rc = -ERESTARTSYS;
3699             goto out_err;
3700         }
3701         clear_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags);
3702     }
3703     spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
3704 
3705     dasd_set_target_state(device, DASD_STATE_NEW);
3706     /* dasd_delete_device destroys the device reference. */
3707     block = device->block;
3708     dasd_delete_device(device);
3709     /*
3710      * life cycle of block is bound to device, so delete it after
3711      * device was safely removed
3712      */
3713     if (block)
3714         dasd_free_block(block);
3715 
3716     return 0;
3717 
3718 interrupted:
3719     /* interrupted by signal */
3720     spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
3721     clear_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags);
3722     clear_bit(DASD_FLAG_OFFLINE, &device->flags);
3723 out_err:
3724     dasd_put_device(device);
3725     spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
3726     return rc;
3727 }
3728 EXPORT_SYMBOL_GPL(dasd_generic_set_offline);
3729 
3730 int dasd_generic_last_path_gone(struct dasd_device *device)
3731 {
3732     struct dasd_ccw_req *cqr;
3733 
3734     dev_warn(&device->cdev->dev, "No operational channel path is left "
3735          "for the device\n");
3736     DBF_DEV_EVENT(DBF_WARNING, device, "%s", "last path gone");
3737     /* First of all call extended error reporting. */
3738     dasd_eer_write(device, NULL, DASD_EER_NOPATH);
3739 
3740     if (device->state < DASD_STATE_BASIC)
3741         return 0;
3742     /* Device is active. We want to keep it. */
3743     list_for_each_entry(cqr, &device->ccw_queue, devlist)
3744         if ((cqr->status == DASD_CQR_IN_IO) ||
3745             (cqr->status == DASD_CQR_CLEAR_PENDING)) {
3746             cqr->status = DASD_CQR_QUEUED;
3747             cqr->retries++;
3748         }
3749     dasd_device_set_stop_bits(device, DASD_STOPPED_DC_WAIT);
3750     dasd_device_clear_timer(device);
3751     dasd_schedule_device_bh(device);
3752     return 1;
3753 }
3754 EXPORT_SYMBOL_GPL(dasd_generic_last_path_gone);
3755 
3756 int dasd_generic_path_operational(struct dasd_device *device)
3757 {
3758     dev_info(&device->cdev->dev, "A channel path to the device has become "
3759          "operational\n");
3760     DBF_DEV_EVENT(DBF_WARNING, device, "%s", "path operational");
3761     dasd_device_remove_stop_bits(device, DASD_STOPPED_DC_WAIT);
3762     dasd_schedule_device_bh(device);
3763     if (device->block) {
3764         dasd_schedule_block_bh(device->block);
3765         if (device->block->request_queue)
3766             blk_mq_run_hw_queues(device->block->request_queue,
3767                          true);
3768         }
3769 
3770     if (!device->stopped)
3771         wake_up(&generic_waitq);
3772 
3773     return 1;
3774 }
3775 EXPORT_SYMBOL_GPL(dasd_generic_path_operational);
3776 
3777 int dasd_generic_notify(struct ccw_device *cdev, int event)
3778 {
3779     struct dasd_device *device;
3780     int ret;
3781 
3782     device = dasd_device_from_cdev_locked(cdev);
3783     if (IS_ERR(device))
3784         return 0;
3785     ret = 0;
3786     switch (event) {
3787     case CIO_GONE:
3788     case CIO_BOXED:
3789     case CIO_NO_PATH:
3790         dasd_path_no_path(device);
3791         ret = dasd_generic_last_path_gone(device);
3792         break;
3793     case CIO_OPER:
3794         ret = 1;
3795         if (dasd_path_get_opm(device))
3796             ret = dasd_generic_path_operational(device);
3797         break;
3798     }
3799     dasd_put_device(device);
3800     return ret;
3801 }
3802 EXPORT_SYMBOL_GPL(dasd_generic_notify);
3803 
3804 void dasd_generic_path_event(struct ccw_device *cdev, int *path_event)
3805 {
3806     struct dasd_device *device;
3807     int chp, oldopm, hpfpm, ifccpm;
3808 
3809     device = dasd_device_from_cdev_locked(cdev);
3810     if (IS_ERR(device))
3811         return;
3812 
3813     oldopm = dasd_path_get_opm(device);
3814     for (chp = 0; chp < 8; chp++) {
3815         if (path_event[chp] & PE_PATH_GONE) {
3816             dasd_path_notoper(device, chp);
3817         }
3818         if (path_event[chp] & PE_PATH_AVAILABLE) {
3819             dasd_path_available(device, chp);
3820             dasd_schedule_device_bh(device);
3821         }
3822         if (path_event[chp] & PE_PATHGROUP_ESTABLISHED) {
3823             if (!dasd_path_is_operational(device, chp) &&
3824                 !dasd_path_need_verify(device, chp)) {
3825                 /*
3826                  * we can not establish a pathgroup on an
3827                  * unavailable path, so trigger a path
3828                  * verification first
3829                  */
3830             dasd_path_available(device, chp);
3831             dasd_schedule_device_bh(device);
3832             }
3833             DBF_DEV_EVENT(DBF_WARNING, device, "%s",
3834                       "Pathgroup re-established\n");
3835             if (device->discipline->kick_validate)
3836                 device->discipline->kick_validate(device);
3837         }
3838         if (path_event[chp] & PE_PATH_FCES_EVENT) {
3839             dasd_path_fcsec_update(device, chp);
3840             dasd_schedule_device_bh(device);
3841         }
3842     }
3843     hpfpm = dasd_path_get_hpfpm(device);
3844     ifccpm = dasd_path_get_ifccpm(device);
3845     if (!dasd_path_get_opm(device) && hpfpm) {
3846         /*
3847          * device has no operational paths but at least one path is
3848          * disabled due to HPF errors
3849          * disable HPF at all and use the path(s) again
3850          */
3851         if (device->discipline->disable_hpf)
3852             device->discipline->disable_hpf(device);
3853         dasd_device_set_stop_bits(device, DASD_STOPPED_NOT_ACC);
3854         dasd_path_set_tbvpm(device, hpfpm);
3855         dasd_schedule_device_bh(device);
3856         dasd_schedule_requeue(device);
3857     } else if (!dasd_path_get_opm(device) && ifccpm) {
3858         /*
3859          * device has no operational paths but at least one path is
3860          * disabled due to IFCC errors
3861          * trigger path verification on paths with IFCC errors
3862          */
3863         dasd_path_set_tbvpm(device, ifccpm);
3864         dasd_schedule_device_bh(device);
3865     }
3866     if (oldopm && !dasd_path_get_opm(device) && !hpfpm && !ifccpm) {
3867         dev_warn(&device->cdev->dev,
3868              "No verified channel paths remain for the device\n");
3869         DBF_DEV_EVENT(DBF_WARNING, device,
3870                   "%s", "last verified path gone");
3871         dasd_eer_write(device, NULL, DASD_EER_NOPATH);
3872         dasd_device_set_stop_bits(device,
3873                       DASD_STOPPED_DC_WAIT);
3874     }
3875     dasd_put_device(device);
3876 }
3877 EXPORT_SYMBOL_GPL(dasd_generic_path_event);
3878 
3879 int dasd_generic_verify_path(struct dasd_device *device, __u8 lpm)
3880 {
3881     if (!dasd_path_get_opm(device) && lpm) {
3882         dasd_path_set_opm(device, lpm);
3883         dasd_generic_path_operational(device);
3884     } else
3885         dasd_path_add_opm(device, lpm);
3886     return 0;
3887 }
3888 EXPORT_SYMBOL_GPL(dasd_generic_verify_path);
3889 
3890 void dasd_generic_space_exhaust(struct dasd_device *device,
3891                 struct dasd_ccw_req *cqr)
3892 {
3893     dasd_eer_write(device, NULL, DASD_EER_NOSPC);
3894 
3895     if (device->state < DASD_STATE_BASIC)
3896         return;
3897 
3898     if (cqr->status == DASD_CQR_IN_IO ||
3899         cqr->status == DASD_CQR_CLEAR_PENDING) {
3900         cqr->status = DASD_CQR_QUEUED;
3901         cqr->retries++;
3902     }
3903     dasd_device_set_stop_bits(device, DASD_STOPPED_NOSPC);
3904     dasd_device_clear_timer(device);
3905     dasd_schedule_device_bh(device);
3906 }
3907 EXPORT_SYMBOL_GPL(dasd_generic_space_exhaust);
3908 
3909 void dasd_generic_space_avail(struct dasd_device *device)
3910 {
3911     dev_info(&device->cdev->dev, "Extent pool space is available\n");
3912     DBF_DEV_EVENT(DBF_WARNING, device, "%s", "space available");
3913 
3914     dasd_device_remove_stop_bits(device, DASD_STOPPED_NOSPC);
3915     dasd_schedule_device_bh(device);
3916 
3917     if (device->block) {
3918         dasd_schedule_block_bh(device->block);
3919         if (device->block->request_queue)
3920             blk_mq_run_hw_queues(device->block->request_queue, true);
3921     }
3922     if (!device->stopped)
3923         wake_up(&generic_waitq);
3924 }
3925 EXPORT_SYMBOL_GPL(dasd_generic_space_avail);
3926 
3927 /*
3928  * clear active requests and requeue them to block layer if possible
3929  */
3930 static int dasd_generic_requeue_all_requests(struct dasd_device *device)
3931 {
3932     struct list_head requeue_queue;
3933     struct dasd_ccw_req *cqr, *n;
3934     struct dasd_ccw_req *refers;
3935     int rc;
3936 
3937     INIT_LIST_HEAD(&requeue_queue);
3938     spin_lock_irq(get_ccwdev_lock(device->cdev));
3939     rc = 0;
3940     list_for_each_entry_safe(cqr, n, &device->ccw_queue, devlist) {
3941         /* Check status and move request to flush_queue */
3942         if (cqr->status == DASD_CQR_IN_IO) {
3943             rc = device->discipline->term_IO(cqr);
3944             if (rc) {
3945                 /* unable to terminate requeust */
3946                 dev_err(&device->cdev->dev,
3947                     "Unable to terminate request %p "
3948                     "on suspend\n", cqr);
3949                 spin_unlock_irq(get_ccwdev_lock(device->cdev));
3950                 dasd_put_device(device);
3951                 return rc;
3952             }
3953         }
3954         list_move_tail(&cqr->devlist, &requeue_queue);
3955     }
3956     spin_unlock_irq(get_ccwdev_lock(device->cdev));
3957 
3958     list_for_each_entry_safe(cqr, n, &requeue_queue, devlist) {
3959         wait_event(dasd_flush_wq,
3960                (cqr->status != DASD_CQR_CLEAR_PENDING));
3961 
3962         /*
3963          * requeue requests to blocklayer will only work
3964          * for block device requests
3965          */
3966         if (_dasd_requeue_request(cqr))
3967             continue;
3968 
3969         /* remove requests from device and block queue */
3970         list_del_init(&cqr->devlist);
3971         while (cqr->refers != NULL) {
3972             refers = cqr->refers;
3973             /* remove the request from the block queue */
3974             list_del(&cqr->blocklist);
3975             /* free the finished erp request */
3976             dasd_free_erp_request(cqr, cqr->memdev);
3977             cqr = refers;
3978         }
3979 
3980         /*
3981          * _dasd_requeue_request already checked for a valid
3982          * blockdevice, no need to check again
3983          * all erp requests (cqr->refers) have a cqr->block
3984          * pointer copy from the original cqr
3985          */
3986         list_del_init(&cqr->blocklist);
3987         cqr->block->base->discipline->free_cp(
3988             cqr, (struct request *) cqr->callback_data);
3989     }
3990 
3991     /*
3992      * if requests remain then they are internal request
3993      * and go back to the device queue
3994      */
3995     if (!list_empty(&requeue_queue)) {
3996         /* move freeze_queue to start of the ccw_queue */
3997         spin_lock_irq(get_ccwdev_lock(device->cdev));
3998         list_splice_tail(&requeue_queue, &device->ccw_queue);
3999         spin_unlock_irq(get_ccwdev_lock(device->cdev));
4000     }
4001     dasd_schedule_device_bh(device);
4002     return rc;
4003 }
4004 
4005 static void do_requeue_requests(struct work_struct *work)
4006 {
4007     struct dasd_device *device = container_of(work, struct dasd_device,
4008                           requeue_requests);
4009     dasd_generic_requeue_all_requests(device);
4010     dasd_device_remove_stop_bits(device, DASD_STOPPED_NOT_ACC);
4011     if (device->block)
4012         dasd_schedule_block_bh(device->block);
4013     dasd_put_device(device);
4014 }
4015 
4016 void dasd_schedule_requeue(struct dasd_device *device)
4017 {
4018     dasd_get_device(device);
4019     /* queue call to dasd_reload_device to the kernel event daemon. */
4020     if (!schedule_work(&device->requeue_requests))
4021         dasd_put_device(device);
4022 }
4023 EXPORT_SYMBOL(dasd_schedule_requeue);
4024 
4025 static struct dasd_ccw_req *dasd_generic_build_rdc(struct dasd_device *device,
4026                            int rdc_buffer_size,
4027                            int magic)
4028 {
4029     struct dasd_ccw_req *cqr;
4030     struct ccw1 *ccw;
4031 
4032     cqr = dasd_smalloc_request(magic, 1 /* RDC */, rdc_buffer_size, device,
4033                    NULL);
4034 
4035     if (IS_ERR(cqr)) {
4036         /* internal error 13 - Allocating the RDC request failed*/
4037         dev_err(&device->cdev->dev,
4038              "An error occurred in the DASD device driver, "
4039              "reason=%s\n", "13");
4040         return cqr;
4041     }
4042 
4043     ccw = cqr->cpaddr;
4044     ccw->cmd_code = CCW_CMD_RDC;
4045     ccw->cda = (__u32)(addr_t) cqr->data;
4046     ccw->flags = 0;
4047     ccw->count = rdc_buffer_size;
4048     cqr->startdev = device;
4049     cqr->memdev = device;
4050     cqr->expires = 10*HZ;
4051     cqr->retries = 256;
4052     cqr->buildclk = get_tod_clock();
4053     cqr->status = DASD_CQR_FILLED;
4054     return cqr;
4055 }
4056 
4057 
4058 int dasd_generic_read_dev_chars(struct dasd_device *device, int magic,
4059                 void *rdc_buffer, int rdc_buffer_size)
4060 {
4061     int ret;
4062     struct dasd_ccw_req *cqr;
4063 
4064     cqr = dasd_generic_build_rdc(device, rdc_buffer_size, magic);
4065     if (IS_ERR(cqr))
4066         return PTR_ERR(cqr);
4067 
4068     ret = dasd_sleep_on(cqr);
4069     if (ret == 0)
4070         memcpy(rdc_buffer, cqr->data, rdc_buffer_size);
4071     dasd_sfree_request(cqr, cqr->memdev);
4072     return ret;
4073 }
4074 EXPORT_SYMBOL_GPL(dasd_generic_read_dev_chars);
4075 
4076 /*
4077  *   In command mode and transport mode we need to look for sense
4078  *   data in different places. The sense data itself is allways
4079  *   an array of 32 bytes, so we can unify the sense data access
4080  *   for both modes.
4081  */
4082 char *dasd_get_sense(struct irb *irb)
4083 {
4084     struct tsb *tsb = NULL;
4085     char *sense = NULL;
4086 
4087     if (scsw_is_tm(&irb->scsw) && (irb->scsw.tm.fcxs == 0x01)) {
4088         if (irb->scsw.tm.tcw)
4089             tsb = tcw_get_tsb((struct tcw *)(unsigned long)
4090                       irb->scsw.tm.tcw);
4091         if (tsb && tsb->length == 64 && tsb->flags)
4092             switch (tsb->flags & 0x07) {
4093             case 1: /* tsa_iostat */
4094                 sense = tsb->tsa.iostat.sense;
4095                 break;
4096             case 2: /* tsa_ddpc */
4097                 sense = tsb->tsa.ddpc.sense;
4098                 break;
4099             default:
4100                 /* currently we don't use interrogate data */
4101                 break;
4102             }
4103     } else if (irb->esw.esw0.erw.cons) {
4104         sense = irb->ecw;
4105     }
4106     return sense;
4107 }
4108 EXPORT_SYMBOL_GPL(dasd_get_sense);
4109 
4110 void dasd_generic_shutdown(struct ccw_device *cdev)
4111 {
4112     struct dasd_device *device;
4113 
4114     device = dasd_device_from_cdev(cdev);
4115     if (IS_ERR(device))
4116         return;
4117 
4118     if (device->block)
4119         dasd_schedule_block_bh(device->block);
4120 
4121     dasd_schedule_device_bh(device);
4122 
4123     wait_event(shutdown_waitq, _wait_for_empty_queues(device));
4124 }
4125 EXPORT_SYMBOL_GPL(dasd_generic_shutdown);
4126 
4127 static int __init dasd_init(void)
4128 {
4129     int rc;
4130 
4131     init_waitqueue_head(&dasd_init_waitq);
4132     init_waitqueue_head(&dasd_flush_wq);
4133     init_waitqueue_head(&generic_waitq);
4134     init_waitqueue_head(&shutdown_waitq);
4135 
4136     /* register 'common' DASD debug area, used for all DBF_XXX calls */
4137     dasd_debug_area = debug_register("dasd", 1, 1, 8 * sizeof(long));
4138     if (dasd_debug_area == NULL) {
4139         rc = -ENOMEM;
4140         goto failed;
4141     }
4142     debug_register_view(dasd_debug_area, &debug_sprintf_view);
4143     debug_set_level(dasd_debug_area, DBF_WARNING);
4144 
4145     DBF_EVENT(DBF_EMERG, "%s", "debug area created");
4146 
4147     dasd_diag_discipline_pointer = NULL;
4148 
4149     dasd_statistics_createroot();
4150 
4151     rc = dasd_devmap_init();
4152     if (rc)
4153         goto failed;
4154     rc = dasd_gendisk_init();
4155     if (rc)
4156         goto failed;
4157     rc = dasd_parse();
4158     if (rc)
4159         goto failed;
4160     rc = dasd_eer_init();
4161     if (rc)
4162         goto failed;
4163 #ifdef CONFIG_PROC_FS
4164     rc = dasd_proc_init();
4165     if (rc)
4166         goto failed;
4167 #endif
4168 
4169     return 0;
4170 failed:
4171     pr_info("The DASD device driver could not be initialized\n");
4172     dasd_exit();
4173     return rc;
4174 }
4175 
4176 module_init(dasd_init);
4177 module_exit(dasd_exit);