0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013 #define KMSG_COMPONENT "dasd-eckd"
0014
0015 #include <linux/stddef.h>
0016 #include <linux/kernel.h>
0017 #include <linux/slab.h>
0018 #include <linux/hdreg.h> /* HDIO_GETGEO */
0019 #include <linux/bio.h>
0020 #include <linux/module.h>
0021 #include <linux/compat.h>
0022 #include <linux/init.h>
0023 #include <linux/seq_file.h>
0024
0025 #include <asm/css_chars.h>
0026 #include <asm/debug.h>
0027 #include <asm/idals.h>
0028 #include <asm/ebcdic.h>
0029 #include <asm/io.h>
0030 #include <linux/uaccess.h>
0031 #include <asm/cio.h>
0032 #include <asm/ccwdev.h>
0033 #include <asm/itcw.h>
0034 #include <asm/schid.h>
0035 #include <asm/chpid.h>
0036
0037 #include "dasd_int.h"
0038 #include "dasd_eckd.h"
0039
0040 #ifdef PRINTK_HEADER
0041 #undef PRINTK_HEADER
0042 #endif
0043 #define PRINTK_HEADER "dasd(eckd):"
0044
0045
0046
0047
0048
0049 #define DASD_RAW_BLOCK_PER_TRACK 16
0050 #define DASD_RAW_BLOCKSIZE 4096
0051
0052 #define DASD_RAW_SECTORS_PER_TRACK 128
0053
0054 MODULE_LICENSE("GPL");
0055
0056 static struct dasd_discipline dasd_eckd_discipline;
0057
0058
0059
0060 static struct ccw_device_id dasd_eckd_ids[] = {
0061 { CCW_DEVICE_DEVTYPE (0x3990, 0, 0x3390, 0), .driver_info = 0x1},
0062 { CCW_DEVICE_DEVTYPE (0x2105, 0, 0x3390, 0), .driver_info = 0x2},
0063 { CCW_DEVICE_DEVTYPE (0x3880, 0, 0x3380, 0), .driver_info = 0x3},
0064 { CCW_DEVICE_DEVTYPE (0x3990, 0, 0x3380, 0), .driver_info = 0x4},
0065 { CCW_DEVICE_DEVTYPE (0x2105, 0, 0x3380, 0), .driver_info = 0x5},
0066 { CCW_DEVICE_DEVTYPE (0x9343, 0, 0x9345, 0), .driver_info = 0x6},
0067 { CCW_DEVICE_DEVTYPE (0x2107, 0, 0x3390, 0), .driver_info = 0x7},
0068 { CCW_DEVICE_DEVTYPE (0x2107, 0, 0x3380, 0), .driver_info = 0x8},
0069 { CCW_DEVICE_DEVTYPE (0x1750, 0, 0x3390, 0), .driver_info = 0x9},
0070 { CCW_DEVICE_DEVTYPE (0x1750, 0, 0x3380, 0), .driver_info = 0xa},
0071 { },
0072 };
0073
0074 MODULE_DEVICE_TABLE(ccw, dasd_eckd_ids);
0075
0076 static struct ccw_driver dasd_eckd_driver;
0077
0078 static void *rawpadpage;
0079
0080 #define INIT_CQR_OK 0
0081 #define INIT_CQR_UNFORMATTED 1
0082 #define INIT_CQR_ERROR 2
0083
0084
0085 static struct {
0086 struct dasd_ccw_req cqr;
0087 struct ccw1 ccw;
0088 char data[32];
0089 } *dasd_reserve_req;
0090 static DEFINE_MUTEX(dasd_reserve_mutex);
0091
0092 static struct {
0093 struct dasd_ccw_req cqr;
0094 struct ccw1 ccw[2];
0095 char data[40];
0096 } *dasd_vol_info_req;
0097 static DEFINE_MUTEX(dasd_vol_info_mutex);
0098
0099 struct ext_pool_exhaust_work_data {
0100 struct work_struct worker;
0101 struct dasd_device *device;
0102 struct dasd_device *base;
0103 };
0104
0105
0106 struct pe_handler_work_data {
0107 struct work_struct worker;
0108 struct dasd_device *device;
0109 struct dasd_ccw_req cqr;
0110 struct ccw1 ccw;
0111 __u8 rcd_buffer[DASD_ECKD_RCD_DATA_SIZE];
0112 int isglobal;
0113 __u8 tbvpm;
0114 __u8 fcsecpm;
0115 };
0116 static struct pe_handler_work_data *pe_handler_worker;
0117 static DEFINE_MUTEX(dasd_pe_handler_mutex);
0118
0119 struct check_attention_work_data {
0120 struct work_struct worker;
0121 struct dasd_device *device;
0122 __u8 lpum;
0123 };
0124
0125 static int dasd_eckd_ext_pool_id(struct dasd_device *);
0126 static int prepare_itcw(struct itcw *, unsigned int, unsigned int, int,
0127 struct dasd_device *, struct dasd_device *,
0128 unsigned int, int, unsigned int, unsigned int,
0129 unsigned int, unsigned int);
0130
0131
0132
0133 static int
0134 dasd_eckd_probe (struct ccw_device *cdev)
0135 {
0136 int ret;
0137
0138
0139 ret = ccw_device_set_options(cdev, CCWDEV_ALLOW_FORCE |
0140 CCWDEV_DO_PATHGROUP | CCWDEV_DO_MULTIPATH);
0141 if (ret) {
0142 DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s",
0143 "dasd_eckd_probe: could not set "
0144 "ccw-device options");
0145 return ret;
0146 }
0147 ret = dasd_generic_probe(cdev);
0148 return ret;
0149 }
0150
0151 static int
0152 dasd_eckd_set_online(struct ccw_device *cdev)
0153 {
0154 return dasd_generic_set_online(cdev, &dasd_eckd_discipline);
0155 }
0156
0157 static const int sizes_trk0[] = { 28, 148, 84 };
0158 #define LABEL_SIZE 140
0159
0160
0161 static const int count_area_head[] = { 0, 0, 0, 0, 1 };
0162 static const int count_area_rec[] = { 1, 2, 3, 4, 1 };
0163
0164 static inline unsigned int
0165 ceil_quot(unsigned int d1, unsigned int d2)
0166 {
0167 return (d1 + (d2 - 1)) / d2;
0168 }
0169
0170 static unsigned int
0171 recs_per_track(struct dasd_eckd_characteristics * rdc,
0172 unsigned int kl, unsigned int dl)
0173 {
0174 int dn, kn;
0175
0176 switch (rdc->dev_type) {
0177 case 0x3380:
0178 if (kl)
0179 return 1499 / (15 + 7 + ceil_quot(kl + 12, 32) +
0180 ceil_quot(dl + 12, 32));
0181 else
0182 return 1499 / (15 + ceil_quot(dl + 12, 32));
0183 case 0x3390:
0184 dn = ceil_quot(dl + 6, 232) + 1;
0185 if (kl) {
0186 kn = ceil_quot(kl + 6, 232) + 1;
0187 return 1729 / (10 + 9 + ceil_quot(kl + 6 * kn, 34) +
0188 9 + ceil_quot(dl + 6 * dn, 34));
0189 } else
0190 return 1729 / (10 + 9 + ceil_quot(dl + 6 * dn, 34));
0191 case 0x9345:
0192 dn = ceil_quot(dl + 6, 232) + 1;
0193 if (kl) {
0194 kn = ceil_quot(kl + 6, 232) + 1;
0195 return 1420 / (18 + 7 + ceil_quot(kl + 6 * kn, 34) +
0196 ceil_quot(dl + 6 * dn, 34));
0197 } else
0198 return 1420 / (18 + 7 + ceil_quot(dl + 6 * dn, 34));
0199 }
0200 return 0;
0201 }
0202
0203 static void set_ch_t(struct ch_t *geo, __u32 cyl, __u8 head)
0204 {
0205 geo->cyl = (__u16) cyl;
0206 geo->head = cyl >> 16;
0207 geo->head <<= 4;
0208 geo->head |= head;
0209 }
0210
0211
0212
0213
0214
0215 static int dasd_eckd_track_from_irb(struct irb *irb, struct dasd_device *device,
0216 sector_t *track)
0217 {
0218 struct dasd_eckd_private *private = device->private;
0219 u8 *sense = NULL;
0220 u32 cyl;
0221 u8 head;
0222
0223 sense = dasd_get_sense(irb);
0224 if (!sense) {
0225 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
0226 "ESE error no sense data\n");
0227 return -EINVAL;
0228 }
0229 if (!(sense[27] & DASD_SENSE_BIT_2)) {
0230 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
0231 "ESE error no valid track data\n");
0232 return -EINVAL;
0233 }
0234
0235 if (sense[27] & DASD_SENSE_BIT_3) {
0236
0237 cyl = sense[30] << 20;
0238 cyl |= (sense[31] & 0xF0) << 12;
0239 cyl |= sense[28] << 8;
0240 cyl |= sense[29];
0241 } else {
0242 cyl = sense[29] << 8;
0243 cyl |= sense[30];
0244 }
0245 head = sense[31] & 0x0F;
0246 *track = cyl * private->rdc_data.trk_per_cyl + head;
0247 return 0;
0248 }
0249
0250 static int set_timestamp(struct ccw1 *ccw, struct DE_eckd_data *data,
0251 struct dasd_device *device)
0252 {
0253 struct dasd_eckd_private *private = device->private;
0254 int rc;
0255
0256 rc = get_phys_clock(&data->ep_sys_time);
0257
0258
0259
0260
0261 if ((rc && !private->rdc_data.facilities.XRC_supported) ||
0262 rc == -EOPNOTSUPP || rc == -EACCES)
0263 return 0;
0264
0265
0266 data->ga_extended |= 0x08;
0267 data->ga_extended |= 0x02;
0268
0269 if (ccw) {
0270 ccw->count = sizeof(struct DE_eckd_data);
0271 ccw->flags |= CCW_FLAG_SLI;
0272 }
0273
0274 return rc;
0275 }
0276
0277 static int
0278 define_extent(struct ccw1 *ccw, struct DE_eckd_data *data, unsigned int trk,
0279 unsigned int totrk, int cmd, struct dasd_device *device,
0280 int blksize)
0281 {
0282 struct dasd_eckd_private *private = device->private;
0283 u16 heads, beghead, endhead;
0284 u32 begcyl, endcyl;
0285 int rc = 0;
0286
0287 if (ccw) {
0288 ccw->cmd_code = DASD_ECKD_CCW_DEFINE_EXTENT;
0289 ccw->flags = 0;
0290 ccw->count = 16;
0291 ccw->cda = (__u32)__pa(data);
0292 }
0293
0294 memset(data, 0, sizeof(struct DE_eckd_data));
0295 switch (cmd) {
0296 case DASD_ECKD_CCW_READ_HOME_ADDRESS:
0297 case DASD_ECKD_CCW_READ_RECORD_ZERO:
0298 case DASD_ECKD_CCW_READ:
0299 case DASD_ECKD_CCW_READ_MT:
0300 case DASD_ECKD_CCW_READ_CKD:
0301 case DASD_ECKD_CCW_READ_CKD_MT:
0302 case DASD_ECKD_CCW_READ_KD:
0303 case DASD_ECKD_CCW_READ_KD_MT:
0304 data->mask.perm = 0x1;
0305 data->attributes.operation = private->attrib.operation;
0306 break;
0307 case DASD_ECKD_CCW_READ_COUNT:
0308 data->mask.perm = 0x1;
0309 data->attributes.operation = DASD_BYPASS_CACHE;
0310 break;
0311 case DASD_ECKD_CCW_READ_TRACK:
0312 case DASD_ECKD_CCW_READ_TRACK_DATA:
0313 data->mask.perm = 0x1;
0314 data->attributes.operation = private->attrib.operation;
0315 data->blk_size = 0;
0316 break;
0317 case DASD_ECKD_CCW_WRITE:
0318 case DASD_ECKD_CCW_WRITE_MT:
0319 case DASD_ECKD_CCW_WRITE_KD:
0320 case DASD_ECKD_CCW_WRITE_KD_MT:
0321 data->mask.perm = 0x02;
0322 data->attributes.operation = private->attrib.operation;
0323 rc = set_timestamp(ccw, data, device);
0324 break;
0325 case DASD_ECKD_CCW_WRITE_CKD:
0326 case DASD_ECKD_CCW_WRITE_CKD_MT:
0327 data->attributes.operation = DASD_BYPASS_CACHE;
0328 rc = set_timestamp(ccw, data, device);
0329 break;
0330 case DASD_ECKD_CCW_ERASE:
0331 case DASD_ECKD_CCW_WRITE_HOME_ADDRESS:
0332 case DASD_ECKD_CCW_WRITE_RECORD_ZERO:
0333 data->mask.perm = 0x3;
0334 data->mask.auth = 0x1;
0335 data->attributes.operation = DASD_BYPASS_CACHE;
0336 rc = set_timestamp(ccw, data, device);
0337 break;
0338 case DASD_ECKD_CCW_WRITE_FULL_TRACK:
0339 data->mask.perm = 0x03;
0340 data->attributes.operation = private->attrib.operation;
0341 data->blk_size = 0;
0342 break;
0343 case DASD_ECKD_CCW_WRITE_TRACK_DATA:
0344 data->mask.perm = 0x02;
0345 data->attributes.operation = private->attrib.operation;
0346 data->blk_size = blksize;
0347 rc = set_timestamp(ccw, data, device);
0348 break;
0349 default:
0350 dev_err(&device->cdev->dev,
0351 "0x%x is not a known command\n", cmd);
0352 break;
0353 }
0354
0355 data->attributes.mode = 0x3;
0356
0357 if ((private->rdc_data.cu_type == 0x2105 ||
0358 private->rdc_data.cu_type == 0x2107 ||
0359 private->rdc_data.cu_type == 0x1750)
0360 && !(private->uses_cdl && trk < 2))
0361 data->ga_extended |= 0x40;
0362
0363 heads = private->rdc_data.trk_per_cyl;
0364 begcyl = trk / heads;
0365 beghead = trk % heads;
0366 endcyl = totrk / heads;
0367 endhead = totrk % heads;
0368
0369
0370 if (data->attributes.operation == DASD_SEQ_PRESTAGE ||
0371 data->attributes.operation == DASD_SEQ_ACCESS) {
0372
0373 if (endcyl + private->attrib.nr_cyl < private->real_cyl)
0374 endcyl += private->attrib.nr_cyl;
0375 else
0376 endcyl = (private->real_cyl - 1);
0377 }
0378
0379 set_ch_t(&data->beg_ext, begcyl, beghead);
0380 set_ch_t(&data->end_ext, endcyl, endhead);
0381 return rc;
0382 }
0383
0384
0385 static void locate_record_ext(struct ccw1 *ccw, struct LRE_eckd_data *data,
0386 unsigned int trk, unsigned int rec_on_trk,
0387 int count, int cmd, struct dasd_device *device,
0388 unsigned int reclen, unsigned int tlf)
0389 {
0390 struct dasd_eckd_private *private = device->private;
0391 int sector;
0392 int dn, d;
0393
0394 if (ccw) {
0395 ccw->cmd_code = DASD_ECKD_CCW_LOCATE_RECORD_EXT;
0396 ccw->flags = 0;
0397 if (cmd == DASD_ECKD_CCW_WRITE_FULL_TRACK)
0398 ccw->count = 22;
0399 else
0400 ccw->count = 20;
0401 ccw->cda = (__u32)__pa(data);
0402 }
0403
0404 memset(data, 0, sizeof(*data));
0405 sector = 0;
0406 if (rec_on_trk) {
0407 switch (private->rdc_data.dev_type) {
0408 case 0x3390:
0409 dn = ceil_quot(reclen + 6, 232);
0410 d = 9 + ceil_quot(reclen + 6 * (dn + 1), 34);
0411 sector = (49 + (rec_on_trk - 1) * (10 + d)) / 8;
0412 break;
0413 case 0x3380:
0414 d = 7 + ceil_quot(reclen + 12, 32);
0415 sector = (39 + (rec_on_trk - 1) * (8 + d)) / 7;
0416 break;
0417 }
0418 }
0419 data->sector = sector;
0420
0421
0422
0423
0424 data->count = count;
0425 switch (cmd) {
0426 case DASD_ECKD_CCW_WRITE_HOME_ADDRESS:
0427 data->operation.orientation = 0x3;
0428 data->operation.operation = 0x03;
0429 break;
0430 case DASD_ECKD_CCW_READ_HOME_ADDRESS:
0431 data->operation.orientation = 0x3;
0432 data->operation.operation = 0x16;
0433 break;
0434 case DASD_ECKD_CCW_WRITE_RECORD_ZERO:
0435 data->operation.orientation = 0x1;
0436 data->operation.operation = 0x03;
0437 data->count++;
0438 break;
0439 case DASD_ECKD_CCW_READ_RECORD_ZERO:
0440 data->operation.orientation = 0x3;
0441 data->operation.operation = 0x16;
0442 data->count++;
0443 break;
0444 case DASD_ECKD_CCW_WRITE:
0445 case DASD_ECKD_CCW_WRITE_MT:
0446 case DASD_ECKD_CCW_WRITE_KD:
0447 case DASD_ECKD_CCW_WRITE_KD_MT:
0448 data->auxiliary.length_valid = 0x1;
0449 data->length = reclen;
0450 data->operation.operation = 0x01;
0451 break;
0452 case DASD_ECKD_CCW_WRITE_CKD:
0453 case DASD_ECKD_CCW_WRITE_CKD_MT:
0454 data->auxiliary.length_valid = 0x1;
0455 data->length = reclen;
0456 data->operation.operation = 0x03;
0457 break;
0458 case DASD_ECKD_CCW_WRITE_FULL_TRACK:
0459 data->operation.orientation = 0x0;
0460 data->operation.operation = 0x3F;
0461 data->extended_operation = 0x11;
0462 data->length = 0;
0463 data->extended_parameter_length = 0x02;
0464 if (data->count > 8) {
0465 data->extended_parameter[0] = 0xFF;
0466 data->extended_parameter[1] = 0xFF;
0467 data->extended_parameter[1] <<= (16 - count);
0468 } else {
0469 data->extended_parameter[0] = 0xFF;
0470 data->extended_parameter[0] <<= (8 - count);
0471 data->extended_parameter[1] = 0x00;
0472 }
0473 data->sector = 0xFF;
0474 break;
0475 case DASD_ECKD_CCW_WRITE_TRACK_DATA:
0476 data->auxiliary.length_valid = 0x1;
0477 data->length = reclen;
0478 data->operation.operation = 0x3F;
0479 data->extended_operation = 0x23;
0480 break;
0481 case DASD_ECKD_CCW_READ:
0482 case DASD_ECKD_CCW_READ_MT:
0483 case DASD_ECKD_CCW_READ_KD:
0484 case DASD_ECKD_CCW_READ_KD_MT:
0485 data->auxiliary.length_valid = 0x1;
0486 data->length = reclen;
0487 data->operation.operation = 0x06;
0488 break;
0489 case DASD_ECKD_CCW_READ_CKD:
0490 case DASD_ECKD_CCW_READ_CKD_MT:
0491 data->auxiliary.length_valid = 0x1;
0492 data->length = reclen;
0493 data->operation.operation = 0x16;
0494 break;
0495 case DASD_ECKD_CCW_READ_COUNT:
0496 data->operation.operation = 0x06;
0497 break;
0498 case DASD_ECKD_CCW_READ_TRACK:
0499 data->operation.orientation = 0x1;
0500 data->operation.operation = 0x0C;
0501 data->extended_parameter_length = 0;
0502 data->sector = 0xFF;
0503 break;
0504 case DASD_ECKD_CCW_READ_TRACK_DATA:
0505 data->auxiliary.length_valid = 0x1;
0506 data->length = tlf;
0507 data->operation.operation = 0x0C;
0508 break;
0509 case DASD_ECKD_CCW_ERASE:
0510 data->length = reclen;
0511 data->auxiliary.length_valid = 0x1;
0512 data->operation.operation = 0x0b;
0513 break;
0514 default:
0515 DBF_DEV_EVENT(DBF_ERR, device,
0516 "fill LRE unknown opcode 0x%x", cmd);
0517 BUG();
0518 }
0519 set_ch_t(&data->seek_addr,
0520 trk / private->rdc_data.trk_per_cyl,
0521 trk % private->rdc_data.trk_per_cyl);
0522 data->search_arg.cyl = data->seek_addr.cyl;
0523 data->search_arg.head = data->seek_addr.head;
0524 data->search_arg.record = rec_on_trk;
0525 }
0526
0527 static int prefix_LRE(struct ccw1 *ccw, struct PFX_eckd_data *pfxdata,
0528 unsigned int trk, unsigned int totrk, int cmd,
0529 struct dasd_device *basedev, struct dasd_device *startdev,
0530 unsigned int format, unsigned int rec_on_trk, int count,
0531 unsigned int blksize, unsigned int tlf)
0532 {
0533 struct dasd_eckd_private *basepriv, *startpriv;
0534 struct LRE_eckd_data *lredata;
0535 struct DE_eckd_data *dedata;
0536 int rc = 0;
0537
0538 basepriv = basedev->private;
0539 startpriv = startdev->private;
0540 dedata = &pfxdata->define_extent;
0541 lredata = &pfxdata->locate_record;
0542
0543 ccw->cmd_code = DASD_ECKD_CCW_PFX;
0544 ccw->flags = 0;
0545 if (cmd == DASD_ECKD_CCW_WRITE_FULL_TRACK) {
0546 ccw->count = sizeof(*pfxdata) + 2;
0547 ccw->cda = (__u32) __pa(pfxdata);
0548 memset(pfxdata, 0, sizeof(*pfxdata) + 2);
0549 } else {
0550 ccw->count = sizeof(*pfxdata);
0551 ccw->cda = (__u32) __pa(pfxdata);
0552 memset(pfxdata, 0, sizeof(*pfxdata));
0553 }
0554
0555
0556 if (format > 1) {
0557 DBF_DEV_EVENT(DBF_ERR, basedev,
0558 "PFX LRE unknown format 0x%x", format);
0559 BUG();
0560 return -EINVAL;
0561 }
0562 pfxdata->format = format;
0563 pfxdata->base_address = basepriv->conf.ned->unit_addr;
0564 pfxdata->base_lss = basepriv->conf.ned->ID;
0565 pfxdata->validity.define_extent = 1;
0566
0567
0568 if (startpriv->uid.type == UA_BASE_PAV_ALIAS)
0569 pfxdata->validity.verify_base = 1;
0570
0571 if (startpriv->uid.type == UA_HYPER_PAV_ALIAS) {
0572 pfxdata->validity.verify_base = 1;
0573 pfxdata->validity.hyper_pav = 1;
0574 }
0575
0576 rc = define_extent(NULL, dedata, trk, totrk, cmd, basedev, blksize);
0577
0578
0579
0580
0581
0582
0583 if (dedata->ga_extended & 0x08 && dedata->ga_extended & 0x02)
0584 pfxdata->validity.time_stamp = 1;
0585
0586 if (format == 1) {
0587 locate_record_ext(NULL, lredata, trk, rec_on_trk, count, cmd,
0588 basedev, blksize, tlf);
0589 }
0590
0591 return rc;
0592 }
0593
0594 static int prefix(struct ccw1 *ccw, struct PFX_eckd_data *pfxdata,
0595 unsigned int trk, unsigned int totrk, int cmd,
0596 struct dasd_device *basedev, struct dasd_device *startdev)
0597 {
0598 return prefix_LRE(ccw, pfxdata, trk, totrk, cmd, basedev, startdev,
0599 0, 0, 0, 0, 0);
0600 }
0601
0602 static void
0603 locate_record(struct ccw1 *ccw, struct LO_eckd_data *data, unsigned int trk,
0604 unsigned int rec_on_trk, int no_rec, int cmd,
0605 struct dasd_device * device, int reclen)
0606 {
0607 struct dasd_eckd_private *private = device->private;
0608 int sector;
0609 int dn, d;
0610
0611 DBF_DEV_EVENT(DBF_INFO, device,
0612 "Locate: trk %d, rec %d, no_rec %d, cmd %d, reclen %d",
0613 trk, rec_on_trk, no_rec, cmd, reclen);
0614
0615 ccw->cmd_code = DASD_ECKD_CCW_LOCATE_RECORD;
0616 ccw->flags = 0;
0617 ccw->count = 16;
0618 ccw->cda = (__u32) __pa(data);
0619
0620 memset(data, 0, sizeof(struct LO_eckd_data));
0621 sector = 0;
0622 if (rec_on_trk) {
0623 switch (private->rdc_data.dev_type) {
0624 case 0x3390:
0625 dn = ceil_quot(reclen + 6, 232);
0626 d = 9 + ceil_quot(reclen + 6 * (dn + 1), 34);
0627 sector = (49 + (rec_on_trk - 1) * (10 + d)) / 8;
0628 break;
0629 case 0x3380:
0630 d = 7 + ceil_quot(reclen + 12, 32);
0631 sector = (39 + (rec_on_trk - 1) * (8 + d)) / 7;
0632 break;
0633 }
0634 }
0635 data->sector = sector;
0636 data->count = no_rec;
0637 switch (cmd) {
0638 case DASD_ECKD_CCW_WRITE_HOME_ADDRESS:
0639 data->operation.orientation = 0x3;
0640 data->operation.operation = 0x03;
0641 break;
0642 case DASD_ECKD_CCW_READ_HOME_ADDRESS:
0643 data->operation.orientation = 0x3;
0644 data->operation.operation = 0x16;
0645 break;
0646 case DASD_ECKD_CCW_WRITE_RECORD_ZERO:
0647 data->operation.orientation = 0x1;
0648 data->operation.operation = 0x03;
0649 data->count++;
0650 break;
0651 case DASD_ECKD_CCW_READ_RECORD_ZERO:
0652 data->operation.orientation = 0x3;
0653 data->operation.operation = 0x16;
0654 data->count++;
0655 break;
0656 case DASD_ECKD_CCW_WRITE:
0657 case DASD_ECKD_CCW_WRITE_MT:
0658 case DASD_ECKD_CCW_WRITE_KD:
0659 case DASD_ECKD_CCW_WRITE_KD_MT:
0660 data->auxiliary.last_bytes_used = 0x1;
0661 data->length = reclen;
0662 data->operation.operation = 0x01;
0663 break;
0664 case DASD_ECKD_CCW_WRITE_CKD:
0665 case DASD_ECKD_CCW_WRITE_CKD_MT:
0666 data->auxiliary.last_bytes_used = 0x1;
0667 data->length = reclen;
0668 data->operation.operation = 0x03;
0669 break;
0670 case DASD_ECKD_CCW_READ:
0671 case DASD_ECKD_CCW_READ_MT:
0672 case DASD_ECKD_CCW_READ_KD:
0673 case DASD_ECKD_CCW_READ_KD_MT:
0674 data->auxiliary.last_bytes_used = 0x1;
0675 data->length = reclen;
0676 data->operation.operation = 0x06;
0677 break;
0678 case DASD_ECKD_CCW_READ_CKD:
0679 case DASD_ECKD_CCW_READ_CKD_MT:
0680 data->auxiliary.last_bytes_used = 0x1;
0681 data->length = reclen;
0682 data->operation.operation = 0x16;
0683 break;
0684 case DASD_ECKD_CCW_READ_COUNT:
0685 data->operation.operation = 0x06;
0686 break;
0687 case DASD_ECKD_CCW_ERASE:
0688 data->length = reclen;
0689 data->auxiliary.last_bytes_used = 0x1;
0690 data->operation.operation = 0x0b;
0691 break;
0692 default:
0693 DBF_DEV_EVENT(DBF_ERR, device, "unknown locate record "
0694 "opcode 0x%x", cmd);
0695 }
0696 set_ch_t(&data->seek_addr,
0697 trk / private->rdc_data.trk_per_cyl,
0698 trk % private->rdc_data.trk_per_cyl);
0699 data->search_arg.cyl = data->seek_addr.cyl;
0700 data->search_arg.head = data->seek_addr.head;
0701 data->search_arg.record = rec_on_trk;
0702 }
0703
0704
0705
0706
0707
0708
0709
0710
0711
0712
0713
0714 static inline int
0715 dasd_eckd_cdl_special(int blk_per_trk, int recid)
0716 {
0717 if (recid < 3)
0718 return 1;
0719 if (recid < blk_per_trk)
0720 return 0;
0721 if (recid < 2 * blk_per_trk)
0722 return 1;
0723 return 0;
0724 }
0725
0726
0727
0728
0729
0730
0731 static inline int
0732 dasd_eckd_cdl_reclen(int recid)
0733 {
0734 if (recid < 3)
0735 return sizes_trk0[recid];
0736 return LABEL_SIZE;
0737 }
0738
0739 static void create_uid(struct dasd_conf *conf, struct dasd_uid *uid)
0740 {
0741 int count;
0742
0743 memset(uid, 0, sizeof(struct dasd_uid));
0744 memcpy(uid->vendor, conf->ned->HDA_manufacturer,
0745 sizeof(uid->vendor) - 1);
0746 EBCASC(uid->vendor, sizeof(uid->vendor) - 1);
0747 memcpy(uid->serial, &conf->ned->serial,
0748 sizeof(uid->serial) - 1);
0749 EBCASC(uid->serial, sizeof(uid->serial) - 1);
0750 uid->ssid = conf->gneq->subsystemID;
0751 uid->real_unit_addr = conf->ned->unit_addr;
0752 if (conf->sneq) {
0753 uid->type = conf->sneq->sua_flags;
0754 if (uid->type == UA_BASE_PAV_ALIAS)
0755 uid->base_unit_addr = conf->sneq->base_unit_addr;
0756 } else {
0757 uid->type = UA_BASE_DEVICE;
0758 }
0759 if (conf->vdsneq) {
0760 for (count = 0; count < 16; count++) {
0761 sprintf(uid->vduit+2*count, "%02x",
0762 conf->vdsneq->uit[count]);
0763 }
0764 }
0765 }
0766
0767
0768
0769
0770 static int dasd_eckd_generate_uid(struct dasd_device *device)
0771 {
0772 struct dasd_eckd_private *private = device->private;
0773 unsigned long flags;
0774
0775 if (!private)
0776 return -ENODEV;
0777 if (!private->conf.ned || !private->conf.gneq)
0778 return -ENODEV;
0779 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
0780 create_uid(&private->conf, &private->uid);
0781 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
0782 return 0;
0783 }
0784
0785 static int dasd_eckd_get_uid(struct dasd_device *device, struct dasd_uid *uid)
0786 {
0787 struct dasd_eckd_private *private = device->private;
0788 unsigned long flags;
0789
0790 if (private) {
0791 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
0792 *uid = private->uid;
0793 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
0794 return 0;
0795 }
0796 return -EINVAL;
0797 }
0798
0799
0800
0801
0802
0803 static int dasd_eckd_compare_path_uid(struct dasd_device *device,
0804 struct dasd_conf *path_conf)
0805 {
0806 struct dasd_uid device_uid;
0807 struct dasd_uid path_uid;
0808
0809 create_uid(path_conf, &path_uid);
0810 dasd_eckd_get_uid(device, &device_uid);
0811
0812 return memcmp(&device_uid, &path_uid, sizeof(struct dasd_uid));
0813 }
0814
0815 static void dasd_eckd_fill_rcd_cqr(struct dasd_device *device,
0816 struct dasd_ccw_req *cqr,
0817 __u8 *rcd_buffer,
0818 __u8 lpm)
0819 {
0820 struct ccw1 *ccw;
0821
0822
0823
0824
0825 rcd_buffer[0] = 0xE5;
0826 rcd_buffer[1] = 0xF1;
0827 rcd_buffer[2] = 0x4B;
0828 rcd_buffer[3] = 0xF0;
0829
0830 ccw = cqr->cpaddr;
0831 ccw->cmd_code = DASD_ECKD_CCW_RCD;
0832 ccw->flags = 0;
0833 ccw->cda = (__u32)(addr_t)rcd_buffer;
0834 ccw->count = DASD_ECKD_RCD_DATA_SIZE;
0835 cqr->magic = DASD_ECKD_MAGIC;
0836
0837 cqr->startdev = device;
0838 cqr->memdev = device;
0839 cqr->block = NULL;
0840 cqr->expires = 10*HZ;
0841 cqr->lpm = lpm;
0842 cqr->retries = 256;
0843 cqr->buildclk = get_tod_clock();
0844 cqr->status = DASD_CQR_FILLED;
0845 set_bit(DASD_CQR_VERIFY_PATH, &cqr->flags);
0846 }
0847
0848
0849
0850
0851
0852
0853
0854 static void read_conf_cb(struct dasd_ccw_req *cqr, void *data)
0855 {
0856 struct ccw1 *ccw;
0857 __u8 *rcd_buffer;
0858
0859 if (cqr->status != DASD_CQR_DONE) {
0860 ccw = cqr->cpaddr;
0861 rcd_buffer = (__u8 *)((addr_t) ccw->cda);
0862 memset(rcd_buffer, 0, sizeof(*rcd_buffer));
0863
0864 rcd_buffer[0] = 0xE5;
0865 rcd_buffer[1] = 0xF1;
0866 rcd_buffer[2] = 0x4B;
0867 rcd_buffer[3] = 0xF0;
0868 }
0869 dasd_wakeup_cb(cqr, data);
0870 }
0871
0872 static int dasd_eckd_read_conf_immediately(struct dasd_device *device,
0873 struct dasd_ccw_req *cqr,
0874 __u8 *rcd_buffer,
0875 __u8 lpm)
0876 {
0877 struct ciw *ciw;
0878 int rc;
0879
0880
0881
0882
0883 ciw = ccw_device_get_ciw(device->cdev, CIW_TYPE_RCD);
0884 if (!ciw || ciw->cmd != DASD_ECKD_CCW_RCD)
0885 return -EOPNOTSUPP;
0886
0887 dasd_eckd_fill_rcd_cqr(device, cqr, rcd_buffer, lpm);
0888 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
0889 set_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags);
0890 cqr->retries = 5;
0891 cqr->callback = read_conf_cb;
0892 rc = dasd_sleep_on_immediatly(cqr);
0893 return rc;
0894 }
0895
0896 static int dasd_eckd_read_conf_lpm(struct dasd_device *device,
0897 void **rcd_buffer,
0898 int *rcd_buffer_size, __u8 lpm)
0899 {
0900 struct ciw *ciw;
0901 char *rcd_buf = NULL;
0902 int ret;
0903 struct dasd_ccw_req *cqr;
0904
0905
0906
0907
0908
0909 ciw = ccw_device_get_ciw(device->cdev, CIW_TYPE_RCD);
0910 if (!ciw || ciw->cmd != DASD_ECKD_CCW_RCD) {
0911 ret = -EOPNOTSUPP;
0912 goto out_error;
0913 }
0914 rcd_buf = kzalloc(DASD_ECKD_RCD_DATA_SIZE, GFP_KERNEL | GFP_DMA);
0915 if (!rcd_buf) {
0916 ret = -ENOMEM;
0917 goto out_error;
0918 }
0919 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 ,
0920 0,
0921 device, NULL);
0922 if (IS_ERR(cqr)) {
0923 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
0924 "Could not allocate RCD request");
0925 ret = -ENOMEM;
0926 goto out_error;
0927 }
0928 dasd_eckd_fill_rcd_cqr(device, cqr, rcd_buf, lpm);
0929 cqr->callback = read_conf_cb;
0930 ret = dasd_sleep_on(cqr);
0931
0932
0933
0934 dasd_sfree_request(cqr, cqr->memdev);
0935 if (ret)
0936 goto out_error;
0937
0938 *rcd_buffer_size = DASD_ECKD_RCD_DATA_SIZE;
0939 *rcd_buffer = rcd_buf;
0940 return 0;
0941 out_error:
0942 kfree(rcd_buf);
0943 *rcd_buffer = NULL;
0944 *rcd_buffer_size = 0;
0945 return ret;
0946 }
0947
0948 static int dasd_eckd_identify_conf_parts(struct dasd_conf *conf)
0949 {
0950
0951 struct dasd_sneq *sneq;
0952 int i, count;
0953
0954 conf->ned = NULL;
0955 conf->sneq = NULL;
0956 conf->vdsneq = NULL;
0957 conf->gneq = NULL;
0958 count = conf->len / sizeof(struct dasd_sneq);
0959 sneq = (struct dasd_sneq *)conf->data;
0960 for (i = 0; i < count; ++i) {
0961 if (sneq->flags.identifier == 1 && sneq->format == 1)
0962 conf->sneq = sneq;
0963 else if (sneq->flags.identifier == 1 && sneq->format == 4)
0964 conf->vdsneq = (struct vd_sneq *)sneq;
0965 else if (sneq->flags.identifier == 2)
0966 conf->gneq = (struct dasd_gneq *)sneq;
0967 else if (sneq->flags.identifier == 3 && sneq->res1 == 1)
0968 conf->ned = (struct dasd_ned *)sneq;
0969 sneq++;
0970 }
0971 if (!conf->ned || !conf->gneq) {
0972 conf->ned = NULL;
0973 conf->sneq = NULL;
0974 conf->vdsneq = NULL;
0975 conf->gneq = NULL;
0976 return -EINVAL;
0977 }
0978 return 0;
0979
0980 };
0981
0982 static unsigned char dasd_eckd_path_access(void *conf_data, int conf_len)
0983 {
0984 struct dasd_gneq *gneq;
0985 int i, count, found;
0986
0987 count = conf_len / sizeof(*gneq);
0988 gneq = (struct dasd_gneq *)conf_data;
0989 found = 0;
0990 for (i = 0; i < count; ++i) {
0991 if (gneq->flags.identifier == 2) {
0992 found = 1;
0993 break;
0994 }
0995 gneq++;
0996 }
0997 if (found)
0998 return ((char *)gneq)[18] & 0x07;
0999 else
1000 return 0;
1001 }
1002
1003 static void dasd_eckd_store_conf_data(struct dasd_device *device,
1004 struct dasd_conf_data *conf_data, int chp)
1005 {
1006 struct dasd_eckd_private *private = device->private;
1007 struct channel_path_desc_fmt0 *chp_desc;
1008 struct subchannel_id sch_id;
1009 void *cdp;
1010
1011
1012
1013
1014
1015
1016
1017 cdp = device->path[chp].conf_data;
1018 if (private->conf.data == cdp) {
1019 private->conf.data = (void *)conf_data;
1020 dasd_eckd_identify_conf_parts(&private->conf);
1021 }
1022 ccw_device_get_schid(device->cdev, &sch_id);
1023 device->path[chp].conf_data = conf_data;
1024 device->path[chp].cssid = sch_id.cssid;
1025 device->path[chp].ssid = sch_id.ssid;
1026 chp_desc = ccw_device_get_chp_desc(device->cdev, chp);
1027 if (chp_desc)
1028 device->path[chp].chpid = chp_desc->chpid;
1029 kfree(chp_desc);
1030 kfree(cdp);
1031 }
1032
1033 static void dasd_eckd_clear_conf_data(struct dasd_device *device)
1034 {
1035 struct dasd_eckd_private *private = device->private;
1036 int i;
1037
1038 private->conf.data = NULL;
1039 private->conf.len = 0;
1040 for (i = 0; i < 8; i++) {
1041 kfree(device->path[i].conf_data);
1042 device->path[i].conf_data = NULL;
1043 device->path[i].cssid = 0;
1044 device->path[i].ssid = 0;
1045 device->path[i].chpid = 0;
1046 dasd_path_notoper(device, i);
1047 }
1048 }
1049
1050 static void dasd_eckd_read_fc_security(struct dasd_device *device)
1051 {
1052 struct dasd_eckd_private *private = device->private;
1053 u8 esm_valid;
1054 u8 esm[8];
1055 int chp;
1056 int rc;
1057
1058 rc = chsc_scud(private->uid.ssid, (u64 *)esm, &esm_valid);
1059 if (rc) {
1060 for (chp = 0; chp < 8; chp++)
1061 device->path[chp].fc_security = 0;
1062 return;
1063 }
1064
1065 for (chp = 0; chp < 8; chp++) {
1066 if (esm_valid & (0x80 >> chp))
1067 device->path[chp].fc_security = esm[chp];
1068 else
1069 device->path[chp].fc_security = 0;
1070 }
1071 }
1072
1073 static void dasd_eckd_get_uid_string(struct dasd_conf *conf,
1074 char *print_uid)
1075 {
1076 struct dasd_uid uid;
1077
1078 create_uid(conf, &uid);
1079 if (strlen(uid.vduit) > 0)
1080 snprintf(print_uid, sizeof(*print_uid),
1081 "%s.%s.%04x.%02x.%s",
1082 uid.vendor, uid.serial, uid.ssid,
1083 uid.real_unit_addr, uid.vduit);
1084 else
1085 snprintf(print_uid, sizeof(*print_uid),
1086 "%s.%s.%04x.%02x",
1087 uid.vendor, uid.serial, uid.ssid,
1088 uid.real_unit_addr);
1089 }
1090
1091 static int dasd_eckd_check_cabling(struct dasd_device *device,
1092 void *conf_data, __u8 lpm)
1093 {
1094 struct dasd_eckd_private *private = device->private;
1095 char print_path_uid[60], print_device_uid[60];
1096 struct dasd_conf path_conf;
1097
1098 path_conf.data = conf_data;
1099 path_conf.len = DASD_ECKD_RCD_DATA_SIZE;
1100 if (dasd_eckd_identify_conf_parts(&path_conf))
1101 return 1;
1102
1103 if (dasd_eckd_compare_path_uid(device, &path_conf)) {
1104 dasd_eckd_get_uid_string(&path_conf, print_path_uid);
1105 dasd_eckd_get_uid_string(&private->conf, print_device_uid);
1106 dev_err(&device->cdev->dev,
1107 "Not all channel paths lead to the same device, path %02X leads to device %s instead of %s\n",
1108 lpm, print_path_uid, print_device_uid);
1109 return 1;
1110 }
1111
1112 return 0;
1113 }
1114
1115 static int dasd_eckd_read_conf(struct dasd_device *device)
1116 {
1117 void *conf_data;
1118 int conf_len, conf_data_saved;
1119 int rc, path_err, pos;
1120 __u8 lpm, opm;
1121 struct dasd_eckd_private *private;
1122
1123 private = device->private;
1124 opm = ccw_device_get_path_mask(device->cdev);
1125 conf_data_saved = 0;
1126 path_err = 0;
1127
1128 for (lpm = 0x80; lpm; lpm>>= 1) {
1129 if (!(lpm & opm))
1130 continue;
1131 rc = dasd_eckd_read_conf_lpm(device, &conf_data,
1132 &conf_len, lpm);
1133 if (rc && rc != -EOPNOTSUPP) {
1134 DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
1135 "Read configuration data returned "
1136 "error %d", rc);
1137 return rc;
1138 }
1139 if (conf_data == NULL) {
1140 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
1141 "No configuration data "
1142 "retrieved");
1143
1144 dasd_path_add_opm(device, opm);
1145 continue;
1146 }
1147
1148 if (!conf_data_saved) {
1149
1150 dasd_eckd_clear_conf_data(device);
1151 private->conf.data = conf_data;
1152 private->conf.len = conf_len;
1153 if (dasd_eckd_identify_conf_parts(&private->conf)) {
1154 private->conf.data = NULL;
1155 private->conf.len = 0;
1156 kfree(conf_data);
1157 continue;
1158 }
1159
1160
1161
1162
1163 dasd_eckd_generate_uid(device);
1164 conf_data_saved++;
1165 } else if (dasd_eckd_check_cabling(device, conf_data, lpm)) {
1166 dasd_path_add_cablepm(device, lpm);
1167 path_err = -EINVAL;
1168 kfree(conf_data);
1169 continue;
1170 }
1171
1172 pos = pathmask_to_pos(lpm);
1173 dasd_eckd_store_conf_data(device, conf_data, pos);
1174
1175 switch (dasd_eckd_path_access(conf_data, conf_len)) {
1176 case 0x02:
1177 dasd_path_add_nppm(device, lpm);
1178 break;
1179 case 0x03:
1180 dasd_path_add_ppm(device, lpm);
1181 break;
1182 }
1183 if (!dasd_path_get_opm(device)) {
1184 dasd_path_set_opm(device, lpm);
1185 dasd_generic_path_operational(device);
1186 } else {
1187 dasd_path_add_opm(device, lpm);
1188 }
1189 }
1190
1191 return path_err;
1192 }
1193
1194 static u32 get_fcx_max_data(struct dasd_device *device)
1195 {
1196 struct dasd_eckd_private *private = device->private;
1197 int fcx_in_css, fcx_in_gneq, fcx_in_features;
1198 unsigned int mdc;
1199 int tpm;
1200
1201 if (dasd_nofcx)
1202 return 0;
1203
1204 fcx_in_css = css_general_characteristics.fcx;
1205 fcx_in_gneq = private->conf.gneq->reserved2[7] & 0x04;
1206 fcx_in_features = private->features.feature[40] & 0x80;
1207 tpm = fcx_in_css && fcx_in_gneq && fcx_in_features;
1208
1209 if (!tpm)
1210 return 0;
1211
1212 mdc = ccw_device_get_mdc(device->cdev, 0);
1213 if (mdc == 0) {
1214 dev_warn(&device->cdev->dev, "Detecting the maximum supported data size for zHPF requests failed\n");
1215 return 0;
1216 } else {
1217 return (u32)mdc * FCX_MAX_DATA_FACTOR;
1218 }
1219 }
1220
1221 static int verify_fcx_max_data(struct dasd_device *device, __u8 lpm)
1222 {
1223 struct dasd_eckd_private *private = device->private;
1224 unsigned int mdc;
1225 u32 fcx_max_data;
1226
1227 if (private->fcx_max_data) {
1228 mdc = ccw_device_get_mdc(device->cdev, lpm);
1229 if (mdc == 0) {
1230 dev_warn(&device->cdev->dev,
1231 "Detecting the maximum data size for zHPF "
1232 "requests failed (rc=%d) for a new path %x\n",
1233 mdc, lpm);
1234 return mdc;
1235 }
1236 fcx_max_data = (u32)mdc * FCX_MAX_DATA_FACTOR;
1237 if (fcx_max_data < private->fcx_max_data) {
1238 dev_warn(&device->cdev->dev,
1239 "The maximum data size for zHPF requests %u "
1240 "on a new path %x is below the active maximum "
1241 "%u\n", fcx_max_data, lpm,
1242 private->fcx_max_data);
1243 return -EACCES;
1244 }
1245 }
1246 return 0;
1247 }
1248
1249 static int rebuild_device_uid(struct dasd_device *device,
1250 struct pe_handler_work_data *data)
1251 {
1252 struct dasd_eckd_private *private = device->private;
1253 __u8 lpm, opm = dasd_path_get_opm(device);
1254 int rc = -ENODEV;
1255
1256 for (lpm = 0x80; lpm; lpm >>= 1) {
1257 if (!(lpm & opm))
1258 continue;
1259 memset(&data->rcd_buffer, 0, sizeof(data->rcd_buffer));
1260 memset(&data->cqr, 0, sizeof(data->cqr));
1261 data->cqr.cpaddr = &data->ccw;
1262 rc = dasd_eckd_read_conf_immediately(device, &data->cqr,
1263 data->rcd_buffer,
1264 lpm);
1265
1266 if (rc) {
1267 if (rc == -EOPNOTSUPP)
1268 continue;
1269 DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
1270 "Read configuration data "
1271 "returned error %d", rc);
1272 break;
1273 }
1274 memcpy(private->conf.data, data->rcd_buffer,
1275 DASD_ECKD_RCD_DATA_SIZE);
1276 if (dasd_eckd_identify_conf_parts(&private->conf)) {
1277 rc = -ENODEV;
1278 } else
1279 break;
1280 }
1281
1282 if (!rc)
1283 rc = dasd_eckd_generate_uid(device);
1284
1285 return rc;
1286 }
1287
1288 static void dasd_eckd_path_available_action(struct dasd_device *device,
1289 struct pe_handler_work_data *data)
1290 {
1291 __u8 path_rcd_buf[DASD_ECKD_RCD_DATA_SIZE];
1292 __u8 lpm, opm, npm, ppm, epm, hpfpm, cablepm;
1293 struct dasd_conf_data *conf_data;
1294 struct dasd_conf path_conf;
1295 unsigned long flags;
1296 char print_uid[60];
1297 int rc, pos;
1298
1299 opm = 0;
1300 npm = 0;
1301 ppm = 0;
1302 epm = 0;
1303 hpfpm = 0;
1304 cablepm = 0;
1305
1306 for (lpm = 0x80; lpm; lpm >>= 1) {
1307 if (!(lpm & data->tbvpm))
1308 continue;
1309 memset(&data->rcd_buffer, 0, sizeof(data->rcd_buffer));
1310 memset(&data->cqr, 0, sizeof(data->cqr));
1311 data->cqr.cpaddr = &data->ccw;
1312 rc = dasd_eckd_read_conf_immediately(device, &data->cqr,
1313 data->rcd_buffer,
1314 lpm);
1315 if (!rc) {
1316 switch (dasd_eckd_path_access(data->rcd_buffer,
1317 DASD_ECKD_RCD_DATA_SIZE)
1318 ) {
1319 case 0x02:
1320 npm |= lpm;
1321 break;
1322 case 0x03:
1323 ppm |= lpm;
1324 break;
1325 }
1326 opm |= lpm;
1327 } else if (rc == -EOPNOTSUPP) {
1328 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
1329 "path verification: No configuration "
1330 "data retrieved");
1331 opm |= lpm;
1332 } else if (rc == -EAGAIN) {
1333 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
1334 "path verification: device is stopped,"
1335 " try again later");
1336 epm |= lpm;
1337 } else {
1338 dev_warn(&device->cdev->dev,
1339 "Reading device feature codes failed "
1340 "(rc=%d) for new path %x\n", rc, lpm);
1341 continue;
1342 }
1343 if (verify_fcx_max_data(device, lpm)) {
1344 opm &= ~lpm;
1345 npm &= ~lpm;
1346 ppm &= ~lpm;
1347 hpfpm |= lpm;
1348 continue;
1349 }
1350
1351
1352
1353
1354
1355
1356 memcpy(&path_rcd_buf, data->rcd_buffer,
1357 DASD_ECKD_RCD_DATA_SIZE);
1358 path_conf.data = (void *)&path_rcd_buf;
1359 path_conf.len = DASD_ECKD_RCD_DATA_SIZE;
1360 if (dasd_eckd_identify_conf_parts(&path_conf)) {
1361 path_conf.data = NULL;
1362 path_conf.len = 0;
1363 continue;
1364 }
1365
1366
1367
1368
1369
1370
1371
1372 if (dasd_path_get_opm(device) &&
1373 dasd_eckd_compare_path_uid(device, &path_conf)) {
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385 if (rebuild_device_uid(device, data) ||
1386 dasd_eckd_compare_path_uid(
1387 device, &path_conf)) {
1388 dasd_eckd_get_uid_string(&path_conf, print_uid);
1389 dev_err(&device->cdev->dev,
1390 "The newly added channel path %02X "
1391 "will not be used because it leads "
1392 "to a different device %s\n",
1393 lpm, print_uid);
1394 opm &= ~lpm;
1395 npm &= ~lpm;
1396 ppm &= ~lpm;
1397 cablepm |= lpm;
1398 continue;
1399 }
1400 }
1401
1402 conf_data = kzalloc(DASD_ECKD_RCD_DATA_SIZE, GFP_KERNEL);
1403 if (conf_data) {
1404 memcpy(conf_data, data->rcd_buffer,
1405 DASD_ECKD_RCD_DATA_SIZE);
1406 } else {
1407
1408
1409
1410
1411
1412
1413 epm |= lpm;
1414 }
1415 pos = pathmask_to_pos(lpm);
1416 dasd_eckd_store_conf_data(device, conf_data, pos);
1417
1418
1419
1420
1421
1422
1423
1424
1425 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
1426 if (!dasd_path_get_opm(device) && opm) {
1427 dasd_path_set_opm(device, opm);
1428 dasd_generic_path_operational(device);
1429 } else {
1430 dasd_path_add_opm(device, opm);
1431 }
1432 dasd_path_add_nppm(device, npm);
1433 dasd_path_add_ppm(device, ppm);
1434 if (epm) {
1435 dasd_path_add_tbvpm(device, epm);
1436 dasd_device_set_timer(device, 50);
1437 }
1438 dasd_path_add_cablepm(device, cablepm);
1439 dasd_path_add_nohpfpm(device, hpfpm);
1440 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
1441
1442 dasd_path_create_kobj(device, pos);
1443 }
1444 }
1445
1446 static void do_pe_handler_work(struct work_struct *work)
1447 {
1448 struct pe_handler_work_data *data;
1449 struct dasd_device *device;
1450
1451 data = container_of(work, struct pe_handler_work_data, worker);
1452 device = data->device;
1453
1454
1455 if (test_bit(DASD_FLAG_SUSPENDED, &device->flags)) {
1456 schedule_work(work);
1457 return;
1458 }
1459
1460 if (test_and_set_bit(DASD_FLAG_PATH_VERIFY, &device->flags)) {
1461 schedule_work(work);
1462 return;
1463 }
1464
1465 if (data->tbvpm)
1466 dasd_eckd_path_available_action(device, data);
1467 if (data->fcsecpm)
1468 dasd_eckd_read_fc_security(device);
1469
1470 clear_bit(DASD_FLAG_PATH_VERIFY, &device->flags);
1471 dasd_put_device(device);
1472 if (data->isglobal)
1473 mutex_unlock(&dasd_pe_handler_mutex);
1474 else
1475 kfree(data);
1476 }
1477
1478 static int dasd_eckd_pe_handler(struct dasd_device *device,
1479 __u8 tbvpm, __u8 fcsecpm)
1480 {
1481 struct pe_handler_work_data *data;
1482
1483 data = kzalloc(sizeof(*data), GFP_ATOMIC | GFP_DMA);
1484 if (!data) {
1485 if (mutex_trylock(&dasd_pe_handler_mutex)) {
1486 data = pe_handler_worker;
1487 data->isglobal = 1;
1488 } else {
1489 return -ENOMEM;
1490 }
1491 }
1492 INIT_WORK(&data->worker, do_pe_handler_work);
1493 dasd_get_device(device);
1494 data->device = device;
1495 data->tbvpm = tbvpm;
1496 data->fcsecpm = fcsecpm;
1497 schedule_work(&data->worker);
1498 return 0;
1499 }
1500
1501 static void dasd_eckd_reset_path(struct dasd_device *device, __u8 pm)
1502 {
1503 struct dasd_eckd_private *private = device->private;
1504 unsigned long flags;
1505
1506 if (!private->fcx_max_data)
1507 private->fcx_max_data = get_fcx_max_data(device);
1508 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
1509 dasd_path_set_tbvpm(device, pm ? : dasd_path_get_notoperpm(device));
1510 dasd_schedule_device_bh(device);
1511 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
1512 }
1513
1514 static int dasd_eckd_read_features(struct dasd_device *device)
1515 {
1516 struct dasd_eckd_private *private = device->private;
1517 struct dasd_psf_prssd_data *prssdp;
1518 struct dasd_rssd_features *features;
1519 struct dasd_ccw_req *cqr;
1520 struct ccw1 *ccw;
1521 int rc;
1522
1523 memset(&private->features, 0, sizeof(struct dasd_rssd_features));
1524 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 + 1 ,
1525 (sizeof(struct dasd_psf_prssd_data) +
1526 sizeof(struct dasd_rssd_features)),
1527 device, NULL);
1528 if (IS_ERR(cqr)) {
1529 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s", "Could not "
1530 "allocate initialization request");
1531 return PTR_ERR(cqr);
1532 }
1533 cqr->startdev = device;
1534 cqr->memdev = device;
1535 cqr->block = NULL;
1536 cqr->retries = 256;
1537 cqr->expires = 10 * HZ;
1538
1539
1540 prssdp = (struct dasd_psf_prssd_data *) cqr->data;
1541 memset(prssdp, 0, sizeof(struct dasd_psf_prssd_data));
1542 prssdp->order = PSF_ORDER_PRSSD;
1543 prssdp->suborder = 0x41;
1544
1545
1546 ccw = cqr->cpaddr;
1547 ccw->cmd_code = DASD_ECKD_CCW_PSF;
1548 ccw->count = sizeof(struct dasd_psf_prssd_data);
1549 ccw->flags |= CCW_FLAG_CC;
1550 ccw->cda = (__u32)(addr_t) prssdp;
1551
1552
1553 features = (struct dasd_rssd_features *) (prssdp + 1);
1554 memset(features, 0, sizeof(struct dasd_rssd_features));
1555
1556 ccw++;
1557 ccw->cmd_code = DASD_ECKD_CCW_RSSD;
1558 ccw->count = sizeof(struct dasd_rssd_features);
1559 ccw->cda = (__u32)(addr_t) features;
1560
1561 cqr->buildclk = get_tod_clock();
1562 cqr->status = DASD_CQR_FILLED;
1563 rc = dasd_sleep_on(cqr);
1564 if (rc == 0) {
1565 prssdp = (struct dasd_psf_prssd_data *) cqr->data;
1566 features = (struct dasd_rssd_features *) (prssdp + 1);
1567 memcpy(&private->features, features,
1568 sizeof(struct dasd_rssd_features));
1569 } else
1570 dev_warn(&device->cdev->dev, "Reading device feature codes"
1571 " failed with rc=%d\n", rc);
1572 dasd_sfree_request(cqr, cqr->memdev);
1573 return rc;
1574 }
1575
1576
1577 static int dasd_eckd_read_vol_info(struct dasd_device *device)
1578 {
1579 struct dasd_eckd_private *private = device->private;
1580 struct dasd_psf_prssd_data *prssdp;
1581 struct dasd_rssd_vsq *vsq;
1582 struct dasd_ccw_req *cqr;
1583 struct ccw1 *ccw;
1584 int useglobal;
1585 int rc;
1586
1587
1588 if (private->uid.type == UA_BASE_PAV_ALIAS ||
1589 private->uid.type == UA_HYPER_PAV_ALIAS)
1590 return 0;
1591
1592 useglobal = 0;
1593 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 2 ,
1594 sizeof(*prssdp) + sizeof(*vsq), device, NULL);
1595 if (IS_ERR(cqr)) {
1596 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
1597 "Could not allocate initialization request");
1598 mutex_lock(&dasd_vol_info_mutex);
1599 useglobal = 1;
1600 cqr = &dasd_vol_info_req->cqr;
1601 memset(cqr, 0, sizeof(*cqr));
1602 memset(dasd_vol_info_req, 0, sizeof(*dasd_vol_info_req));
1603 cqr->cpaddr = &dasd_vol_info_req->ccw;
1604 cqr->data = &dasd_vol_info_req->data;
1605 cqr->magic = DASD_ECKD_MAGIC;
1606 }
1607
1608
1609 prssdp = cqr->data;
1610 prssdp->order = PSF_ORDER_PRSSD;
1611 prssdp->suborder = PSF_SUBORDER_VSQ;
1612 prssdp->lss = private->conf.ned->ID;
1613 prssdp->volume = private->conf.ned->unit_addr;
1614
1615 ccw = cqr->cpaddr;
1616 ccw->cmd_code = DASD_ECKD_CCW_PSF;
1617 ccw->count = sizeof(*prssdp);
1618 ccw->flags |= CCW_FLAG_CC;
1619 ccw->cda = (__u32)(addr_t)prssdp;
1620
1621
1622 vsq = (struct dasd_rssd_vsq *)(prssdp + 1);
1623 memset(vsq, 0, sizeof(*vsq));
1624
1625 ccw++;
1626 ccw->cmd_code = DASD_ECKD_CCW_RSSD;
1627 ccw->count = sizeof(*vsq);
1628 ccw->flags |= CCW_FLAG_SLI;
1629 ccw->cda = (__u32)(addr_t)vsq;
1630
1631 cqr->buildclk = get_tod_clock();
1632 cqr->status = DASD_CQR_FILLED;
1633 cqr->startdev = device;
1634 cqr->memdev = device;
1635 cqr->block = NULL;
1636 cqr->retries = 256;
1637 cqr->expires = device->default_expires * HZ;
1638
1639 __set_bit(DASD_CQR_SUPPRESS_CR, &cqr->flags);
1640
1641 rc = dasd_sleep_on_interruptible(cqr);
1642 if (rc == 0) {
1643 memcpy(&private->vsq, vsq, sizeof(*vsq));
1644 } else {
1645 DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
1646 "Reading the volume storage information failed with rc=%d", rc);
1647 }
1648
1649 if (useglobal)
1650 mutex_unlock(&dasd_vol_info_mutex);
1651 else
1652 dasd_sfree_request(cqr, cqr->memdev);
1653
1654 return rc;
1655 }
1656
1657 static int dasd_eckd_is_ese(struct dasd_device *device)
1658 {
1659 struct dasd_eckd_private *private = device->private;
1660
1661 return private->vsq.vol_info.ese;
1662 }
1663
1664 static int dasd_eckd_ext_pool_id(struct dasd_device *device)
1665 {
1666 struct dasd_eckd_private *private = device->private;
1667
1668 return private->vsq.extent_pool_id;
1669 }
1670
1671
1672
1673
1674
1675
1676 static int dasd_eckd_space_configured(struct dasd_device *device)
1677 {
1678 struct dasd_eckd_private *private = device->private;
1679 int rc;
1680
1681 rc = dasd_eckd_read_vol_info(device);
1682
1683 return rc ? : private->vsq.space_configured;
1684 }
1685
1686
1687
1688
1689
1690 static int dasd_eckd_space_allocated(struct dasd_device *device)
1691 {
1692 struct dasd_eckd_private *private = device->private;
1693 int rc;
1694
1695 rc = dasd_eckd_read_vol_info(device);
1696
1697 return rc ? : private->vsq.space_allocated;
1698 }
1699
1700 static int dasd_eckd_logical_capacity(struct dasd_device *device)
1701 {
1702 struct dasd_eckd_private *private = device->private;
1703
1704 return private->vsq.logical_capacity;
1705 }
1706
1707 static void dasd_eckd_ext_pool_exhaust_work(struct work_struct *work)
1708 {
1709 struct ext_pool_exhaust_work_data *data;
1710 struct dasd_device *device;
1711 struct dasd_device *base;
1712
1713 data = container_of(work, struct ext_pool_exhaust_work_data, worker);
1714 device = data->device;
1715 base = data->base;
1716
1717 if (!base)
1718 base = device;
1719 if (dasd_eckd_space_configured(base) != 0) {
1720 dasd_generic_space_avail(device);
1721 } else {
1722 dev_warn(&device->cdev->dev, "No space left in the extent pool\n");
1723 DBF_DEV_EVENT(DBF_WARNING, device, "%s", "out of space");
1724 }
1725
1726 dasd_put_device(device);
1727 kfree(data);
1728 }
1729
1730 static int dasd_eckd_ext_pool_exhaust(struct dasd_device *device,
1731 struct dasd_ccw_req *cqr)
1732 {
1733 struct ext_pool_exhaust_work_data *data;
1734
1735 data = kzalloc(sizeof(*data), GFP_ATOMIC);
1736 if (!data)
1737 return -ENOMEM;
1738 INIT_WORK(&data->worker, dasd_eckd_ext_pool_exhaust_work);
1739 dasd_get_device(device);
1740 data->device = device;
1741
1742 if (cqr->block)
1743 data->base = cqr->block->base;
1744 else if (cqr->basedev)
1745 data->base = cqr->basedev;
1746 else
1747 data->base = NULL;
1748
1749 schedule_work(&data->worker);
1750
1751 return 0;
1752 }
1753
1754 static void dasd_eckd_cpy_ext_pool_data(struct dasd_device *device,
1755 struct dasd_rssd_lcq *lcq)
1756 {
1757 struct dasd_eckd_private *private = device->private;
1758 int pool_id = dasd_eckd_ext_pool_id(device);
1759 struct dasd_ext_pool_sum eps;
1760 int i;
1761
1762 for (i = 0; i < lcq->pool_count; i++) {
1763 eps = lcq->ext_pool_sum[i];
1764 if (eps.pool_id == pool_id) {
1765 memcpy(&private->eps, &eps,
1766 sizeof(struct dasd_ext_pool_sum));
1767 }
1768 }
1769 }
1770
1771
1772 static int dasd_eckd_read_ext_pool_info(struct dasd_device *device)
1773 {
1774 struct dasd_eckd_private *private = device->private;
1775 struct dasd_psf_prssd_data *prssdp;
1776 struct dasd_rssd_lcq *lcq;
1777 struct dasd_ccw_req *cqr;
1778 struct ccw1 *ccw;
1779 int rc;
1780
1781
1782 if (private->uid.type == UA_BASE_PAV_ALIAS ||
1783 private->uid.type == UA_HYPER_PAV_ALIAS)
1784 return 0;
1785
1786 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 2 ,
1787 sizeof(*prssdp) + sizeof(*lcq), device, NULL);
1788 if (IS_ERR(cqr)) {
1789 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
1790 "Could not allocate initialization request");
1791 return PTR_ERR(cqr);
1792 }
1793
1794
1795 prssdp = cqr->data;
1796 memset(prssdp, 0, sizeof(*prssdp));
1797 prssdp->order = PSF_ORDER_PRSSD;
1798 prssdp->suborder = PSF_SUBORDER_LCQ;
1799
1800 ccw = cqr->cpaddr;
1801 ccw->cmd_code = DASD_ECKD_CCW_PSF;
1802 ccw->count = sizeof(*prssdp);
1803 ccw->flags |= CCW_FLAG_CC;
1804 ccw->cda = (__u32)(addr_t)prssdp;
1805
1806 lcq = (struct dasd_rssd_lcq *)(prssdp + 1);
1807 memset(lcq, 0, sizeof(*lcq));
1808
1809 ccw++;
1810 ccw->cmd_code = DASD_ECKD_CCW_RSSD;
1811 ccw->count = sizeof(*lcq);
1812 ccw->flags |= CCW_FLAG_SLI;
1813 ccw->cda = (__u32)(addr_t)lcq;
1814
1815 cqr->buildclk = get_tod_clock();
1816 cqr->status = DASD_CQR_FILLED;
1817 cqr->startdev = device;
1818 cqr->memdev = device;
1819 cqr->block = NULL;
1820 cqr->retries = 256;
1821 cqr->expires = device->default_expires * HZ;
1822
1823 __set_bit(DASD_CQR_SUPPRESS_CR, &cqr->flags);
1824
1825 rc = dasd_sleep_on_interruptible(cqr);
1826 if (rc == 0) {
1827 dasd_eckd_cpy_ext_pool_data(device, lcq);
1828 } else {
1829 DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
1830 "Reading the logical configuration failed with rc=%d", rc);
1831 }
1832
1833 dasd_sfree_request(cqr, cqr->memdev);
1834
1835 return rc;
1836 }
1837
1838
1839
1840
1841
1842
1843 static int dasd_eckd_ext_size(struct dasd_device *device)
1844 {
1845 struct dasd_eckd_private *private = device->private;
1846 struct dasd_ext_pool_sum eps = private->eps;
1847
1848 if (!eps.flags.extent_size_valid)
1849 return 0;
1850 if (eps.extent_size.size_1G)
1851 return 1113;
1852 if (eps.extent_size.size_16M)
1853 return 21;
1854
1855 return 0;
1856 }
1857
1858 static int dasd_eckd_ext_pool_warn_thrshld(struct dasd_device *device)
1859 {
1860 struct dasd_eckd_private *private = device->private;
1861
1862 return private->eps.warn_thrshld;
1863 }
1864
1865 static int dasd_eckd_ext_pool_cap_at_warnlevel(struct dasd_device *device)
1866 {
1867 struct dasd_eckd_private *private = device->private;
1868
1869 return private->eps.flags.capacity_at_warnlevel;
1870 }
1871
1872
1873
1874
1875 static int dasd_eckd_ext_pool_oos(struct dasd_device *device)
1876 {
1877 struct dasd_eckd_private *private = device->private;
1878
1879 return private->eps.flags.pool_oos;
1880 }
1881
1882
1883
1884
1885 static struct dasd_ccw_req *dasd_eckd_build_psf_ssc(struct dasd_device *device,
1886 int enable_pav)
1887 {
1888 struct dasd_ccw_req *cqr;
1889 struct dasd_psf_ssc_data *psf_ssc_data;
1890 struct ccw1 *ccw;
1891
1892 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 ,
1893 sizeof(struct dasd_psf_ssc_data),
1894 device, NULL);
1895
1896 if (IS_ERR(cqr)) {
1897 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
1898 "Could not allocate PSF-SSC request");
1899 return cqr;
1900 }
1901 psf_ssc_data = (struct dasd_psf_ssc_data *)cqr->data;
1902 psf_ssc_data->order = PSF_ORDER_SSC;
1903 psf_ssc_data->suborder = 0xc0;
1904 if (enable_pav) {
1905 psf_ssc_data->suborder |= 0x08;
1906 psf_ssc_data->reserved[0] = 0x88;
1907 }
1908 ccw = cqr->cpaddr;
1909 ccw->cmd_code = DASD_ECKD_CCW_PSF;
1910 ccw->cda = (__u32)(addr_t)psf_ssc_data;
1911 ccw->count = 66;
1912
1913 cqr->startdev = device;
1914 cqr->memdev = device;
1915 cqr->block = NULL;
1916 cqr->retries = 256;
1917 cqr->expires = 10*HZ;
1918 cqr->buildclk = get_tod_clock();
1919 cqr->status = DASD_CQR_FILLED;
1920 return cqr;
1921 }
1922
1923
1924
1925
1926
1927
1928 static int
1929 dasd_eckd_psf_ssc(struct dasd_device *device, int enable_pav,
1930 unsigned long flags)
1931 {
1932 struct dasd_ccw_req *cqr;
1933 int rc;
1934
1935 cqr = dasd_eckd_build_psf_ssc(device, enable_pav);
1936 if (IS_ERR(cqr))
1937 return PTR_ERR(cqr);
1938
1939
1940
1941
1942
1943 cqr->flags |= flags;
1944
1945 rc = dasd_sleep_on(cqr);
1946 if (!rc)
1947
1948 css_schedule_reprobe();
1949 else if (cqr->intrc == -EAGAIN)
1950 rc = -EAGAIN;
1951
1952 dasd_sfree_request(cqr, cqr->memdev);
1953 return rc;
1954 }
1955
1956
1957
1958
1959 static int dasd_eckd_validate_server(struct dasd_device *device,
1960 unsigned long flags)
1961 {
1962 struct dasd_eckd_private *private = device->private;
1963 int enable_pav, rc;
1964
1965 if (private->uid.type == UA_BASE_PAV_ALIAS ||
1966 private->uid.type == UA_HYPER_PAV_ALIAS)
1967 return 0;
1968 if (dasd_nopav || MACHINE_IS_VM)
1969 enable_pav = 0;
1970 else
1971 enable_pav = 1;
1972 rc = dasd_eckd_psf_ssc(device, enable_pav, flags);
1973
1974
1975
1976 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "PSF-SSC for SSID %04x "
1977 "returned rc=%d", private->uid.ssid, rc);
1978 return rc;
1979 }
1980
1981
1982
1983
1984 static void dasd_eckd_do_validate_server(struct work_struct *work)
1985 {
1986 struct dasd_device *device = container_of(work, struct dasd_device,
1987 kick_validate);
1988 unsigned long flags = 0;
1989
1990 set_bit(DASD_CQR_FLAGS_FAILFAST, &flags);
1991 if (dasd_eckd_validate_server(device, flags)
1992 == -EAGAIN) {
1993
1994 schedule_work(&device->kick_validate);
1995 return;
1996 }
1997
1998 dasd_put_device(device);
1999 }
2000
2001 static void dasd_eckd_kick_validate_server(struct dasd_device *device)
2002 {
2003 dasd_get_device(device);
2004
2005 if (test_bit(DASD_FLAG_OFFLINE, &device->flags) ||
2006 device->state < DASD_STATE_ONLINE) {
2007 dasd_put_device(device);
2008 return;
2009 }
2010
2011 if (!schedule_work(&device->kick_validate))
2012 dasd_put_device(device);
2013 }
2014
2015
2016
2017
2018
2019 static int
2020 dasd_eckd_check_characteristics(struct dasd_device *device)
2021 {
2022 struct dasd_eckd_private *private = device->private;
2023 struct dasd_block *block;
2024 struct dasd_uid temp_uid;
2025 int rc, i;
2026 int readonly;
2027 unsigned long value;
2028
2029
2030 INIT_WORK(&device->kick_validate, dasd_eckd_do_validate_server);
2031
2032 INIT_WORK(&device->suc_work, dasd_alias_handle_summary_unit_check);
2033
2034 if (!ccw_device_is_pathgroup(device->cdev)) {
2035 dev_warn(&device->cdev->dev,
2036 "A channel path group could not be established\n");
2037 return -EIO;
2038 }
2039 if (!ccw_device_is_multipath(device->cdev)) {
2040 dev_info(&device->cdev->dev,
2041 "The DASD is not operating in multipath mode\n");
2042 }
2043 if (!private) {
2044 private = kzalloc(sizeof(*private), GFP_KERNEL | GFP_DMA);
2045 if (!private) {
2046 dev_warn(&device->cdev->dev,
2047 "Allocating memory for private DASD data "
2048 "failed\n");
2049 return -ENOMEM;
2050 }
2051 device->private = private;
2052 } else {
2053 memset(private, 0, sizeof(*private));
2054 }
2055
2056 private->init_cqr_status = -1;
2057
2058 private->attrib.operation = DASD_NORMAL_CACHE;
2059 private->attrib.nr_cyl = 0;
2060
2061
2062 rc = dasd_eckd_read_conf(device);
2063 if (rc)
2064 goto out_err1;
2065
2066
2067 device->default_expires = DASD_EXPIRES;
2068 device->default_retries = DASD_RETRIES;
2069 device->path_thrhld = DASD_ECKD_PATH_THRHLD;
2070 device->path_interval = DASD_ECKD_PATH_INTERVAL;
2071
2072 if (private->conf.gneq) {
2073 value = 1;
2074 for (i = 0; i < private->conf.gneq->timeout.value; i++)
2075 value = 10 * value;
2076 value = value * private->conf.gneq->timeout.number;
2077
2078 if (value != 0 && value <= DASD_EXPIRES_MAX)
2079 device->default_expires = value;
2080 }
2081
2082 dasd_eckd_get_uid(device, &temp_uid);
2083 if (temp_uid.type == UA_BASE_DEVICE) {
2084 block = dasd_alloc_block();
2085 if (IS_ERR(block)) {
2086 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
2087 "could not allocate dasd "
2088 "block structure");
2089 rc = PTR_ERR(block);
2090 goto out_err1;
2091 }
2092 device->block = block;
2093 block->base = device;
2094 }
2095
2096
2097 rc = dasd_alias_make_device_known_to_lcu(device);
2098 if (rc)
2099 goto out_err2;
2100
2101 dasd_eckd_validate_server(device, 0);
2102
2103
2104 rc = dasd_eckd_read_conf(device);
2105 if (rc)
2106 goto out_err3;
2107
2108 dasd_eckd_read_fc_security(device);
2109 dasd_path_create_kobjects(device);
2110
2111
2112 dasd_eckd_read_features(device);
2113
2114
2115 dasd_eckd_read_vol_info(device);
2116
2117
2118 dasd_eckd_read_ext_pool_info(device);
2119
2120
2121 rc = dasd_generic_read_dev_chars(device, DASD_ECKD_MAGIC,
2122 &private->rdc_data, 64);
2123 if (rc) {
2124 DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
2125 "Read device characteristic failed, rc=%d", rc);
2126 goto out_err3;
2127 }
2128
2129 if ((device->features & DASD_FEATURE_USERAW) &&
2130 !(private->rdc_data.facilities.RT_in_LR)) {
2131 dev_err(&device->cdev->dev, "The storage server does not "
2132 "support raw-track access\n");
2133 rc = -EINVAL;
2134 goto out_err3;
2135 }
2136
2137
2138 if (private->rdc_data.no_cyl == LV_COMPAT_CYL &&
2139 private->rdc_data.long_no_cyl)
2140 private->real_cyl = private->rdc_data.long_no_cyl;
2141 else
2142 private->real_cyl = private->rdc_data.no_cyl;
2143
2144 private->fcx_max_data = get_fcx_max_data(device);
2145
2146 readonly = dasd_device_is_ro(device);
2147 if (readonly)
2148 set_bit(DASD_FLAG_DEVICE_RO, &device->flags);
2149
2150 dev_info(&device->cdev->dev, "New DASD %04X/%02X (CU %04X/%02X) "
2151 "with %d cylinders, %d heads, %d sectors%s\n",
2152 private->rdc_data.dev_type,
2153 private->rdc_data.dev_model,
2154 private->rdc_data.cu_type,
2155 private->rdc_data.cu_model.model,
2156 private->real_cyl,
2157 private->rdc_data.trk_per_cyl,
2158 private->rdc_data.sec_per_trk,
2159 readonly ? ", read-only device" : "");
2160 return 0;
2161
2162 out_err3:
2163 dasd_alias_disconnect_device_from_lcu(device);
2164 out_err2:
2165 dasd_free_block(device->block);
2166 device->block = NULL;
2167 out_err1:
2168 dasd_eckd_clear_conf_data(device);
2169 dasd_path_remove_kobjects(device);
2170 kfree(device->private);
2171 device->private = NULL;
2172 return rc;
2173 }
2174
2175 static void dasd_eckd_uncheck_device(struct dasd_device *device)
2176 {
2177 struct dasd_eckd_private *private = device->private;
2178
2179 if (!private)
2180 return;
2181
2182 dasd_alias_disconnect_device_from_lcu(device);
2183 private->conf.ned = NULL;
2184 private->conf.sneq = NULL;
2185 private->conf.vdsneq = NULL;
2186 private->conf.gneq = NULL;
2187 dasd_eckd_clear_conf_data(device);
2188 dasd_path_remove_kobjects(device);
2189 }
2190
2191 static struct dasd_ccw_req *
2192 dasd_eckd_analysis_ccw(struct dasd_device *device)
2193 {
2194 struct dasd_eckd_private *private = device->private;
2195 struct eckd_count *count_data;
2196 struct LO_eckd_data *LO_data;
2197 struct dasd_ccw_req *cqr;
2198 struct ccw1 *ccw;
2199 int cplength, datasize;
2200 int i;
2201
2202 cplength = 8;
2203 datasize = sizeof(struct DE_eckd_data) + 2*sizeof(struct LO_eckd_data);
2204 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize, device,
2205 NULL);
2206 if (IS_ERR(cqr))
2207 return cqr;
2208 ccw = cqr->cpaddr;
2209
2210 define_extent(ccw++, cqr->data, 0, 1,
2211 DASD_ECKD_CCW_READ_COUNT, device, 0);
2212 LO_data = cqr->data + sizeof(struct DE_eckd_data);
2213
2214 ccw[-1].flags |= CCW_FLAG_CC;
2215 locate_record(ccw++, LO_data++, 0, 0, 4,
2216 DASD_ECKD_CCW_READ_COUNT, device, 0);
2217
2218 count_data = private->count_area;
2219 for (i = 0; i < 4; i++) {
2220 ccw[-1].flags |= CCW_FLAG_CC;
2221 ccw->cmd_code = DASD_ECKD_CCW_READ_COUNT;
2222 ccw->flags = 0;
2223 ccw->count = 8;
2224 ccw->cda = (__u32)(addr_t) count_data;
2225 ccw++;
2226 count_data++;
2227 }
2228
2229
2230 ccw[-1].flags |= CCW_FLAG_CC;
2231 locate_record(ccw++, LO_data++, 1, 0, 1,
2232 DASD_ECKD_CCW_READ_COUNT, device, 0);
2233
2234 ccw[-1].flags |= CCW_FLAG_CC;
2235 ccw->cmd_code = DASD_ECKD_CCW_READ_COUNT;
2236 ccw->flags = 0;
2237 ccw->count = 8;
2238 ccw->cda = (__u32)(addr_t) count_data;
2239
2240 cqr->block = NULL;
2241 cqr->startdev = device;
2242 cqr->memdev = device;
2243 cqr->retries = 255;
2244 cqr->buildclk = get_tod_clock();
2245 cqr->status = DASD_CQR_FILLED;
2246
2247 set_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags);
2248
2249 return cqr;
2250 }
2251
2252
2253 static int dasd_eckd_analysis_evaluation(struct dasd_ccw_req *init_cqr)
2254 {
2255 char *sense;
2256 if (init_cqr->status == DASD_CQR_DONE)
2257 return INIT_CQR_OK;
2258 else if (init_cqr->status == DASD_CQR_NEED_ERP ||
2259 init_cqr->status == DASD_CQR_FAILED) {
2260 sense = dasd_get_sense(&init_cqr->irb);
2261 if (sense && (sense[1] & SNS1_NO_REC_FOUND))
2262 return INIT_CQR_UNFORMATTED;
2263 else
2264 return INIT_CQR_ERROR;
2265 } else
2266 return INIT_CQR_ERROR;
2267 }
2268
2269
2270
2271
2272
2273
2274
2275
2276 static void dasd_eckd_analysis_callback(struct dasd_ccw_req *init_cqr,
2277 void *data)
2278 {
2279 struct dasd_device *device = init_cqr->startdev;
2280 struct dasd_eckd_private *private = device->private;
2281
2282 private->init_cqr_status = dasd_eckd_analysis_evaluation(init_cqr);
2283 dasd_sfree_request(init_cqr, device);
2284 dasd_kick_device(device);
2285 }
2286
2287 static int dasd_eckd_start_analysis(struct dasd_block *block)
2288 {
2289 struct dasd_ccw_req *init_cqr;
2290
2291 init_cqr = dasd_eckd_analysis_ccw(block->base);
2292 if (IS_ERR(init_cqr))
2293 return PTR_ERR(init_cqr);
2294 init_cqr->callback = dasd_eckd_analysis_callback;
2295 init_cqr->callback_data = NULL;
2296 init_cqr->expires = 5*HZ;
2297
2298
2299
2300 clear_bit(DASD_CQR_FLAGS_USE_ERP, &init_cqr->flags);
2301 init_cqr->retries = 0;
2302 dasd_add_request_head(init_cqr);
2303 return -EAGAIN;
2304 }
2305
2306 static int dasd_eckd_end_analysis(struct dasd_block *block)
2307 {
2308 struct dasd_device *device = block->base;
2309 struct dasd_eckd_private *private = device->private;
2310 struct eckd_count *count_area;
2311 unsigned int sb, blk_per_trk;
2312 int status, i;
2313 struct dasd_ccw_req *init_cqr;
2314
2315 status = private->init_cqr_status;
2316 private->init_cqr_status = -1;
2317 if (status == INIT_CQR_ERROR) {
2318
2319 init_cqr = dasd_eckd_analysis_ccw(device);
2320 dasd_sleep_on(init_cqr);
2321 status = dasd_eckd_analysis_evaluation(init_cqr);
2322 dasd_sfree_request(init_cqr, device);
2323 }
2324
2325 if (device->features & DASD_FEATURE_USERAW) {
2326 block->bp_block = DASD_RAW_BLOCKSIZE;
2327 blk_per_trk = DASD_RAW_BLOCK_PER_TRACK;
2328 block->s2b_shift = 3;
2329 goto raw;
2330 }
2331
2332 if (status == INIT_CQR_UNFORMATTED) {
2333 dev_warn(&device->cdev->dev, "The DASD is not formatted\n");
2334 return -EMEDIUMTYPE;
2335 } else if (status == INIT_CQR_ERROR) {
2336 dev_err(&device->cdev->dev,
2337 "Detecting the DASD disk layout failed because "
2338 "of an I/O error\n");
2339 return -EIO;
2340 }
2341
2342 private->uses_cdl = 1;
2343
2344 count_area = NULL;
2345 for (i = 0; i < 3; i++) {
2346 if (private->count_area[i].kl != 4 ||
2347 private->count_area[i].dl != dasd_eckd_cdl_reclen(i) - 4 ||
2348 private->count_area[i].cyl != 0 ||
2349 private->count_area[i].head != count_area_head[i] ||
2350 private->count_area[i].record != count_area_rec[i]) {
2351 private->uses_cdl = 0;
2352 break;
2353 }
2354 }
2355 if (i == 3)
2356 count_area = &private->count_area[3];
2357
2358 if (private->uses_cdl == 0) {
2359 for (i = 0; i < 5; i++) {
2360 if ((private->count_area[i].kl != 0) ||
2361 (private->count_area[i].dl !=
2362 private->count_area[0].dl) ||
2363 private->count_area[i].cyl != 0 ||
2364 private->count_area[i].head != count_area_head[i] ||
2365 private->count_area[i].record != count_area_rec[i])
2366 break;
2367 }
2368 if (i == 5)
2369 count_area = &private->count_area[0];
2370 } else {
2371 if (private->count_area[3].record == 1)
2372 dev_warn(&device->cdev->dev,
2373 "Track 0 has no records following the VTOC\n");
2374 }
2375
2376 if (count_area != NULL && count_area->kl == 0) {
2377
2378 if (dasd_check_blocksize(count_area->dl) == 0)
2379 block->bp_block = count_area->dl;
2380 }
2381 if (block->bp_block == 0) {
2382 dev_warn(&device->cdev->dev,
2383 "The disk layout of the DASD is not supported\n");
2384 return -EMEDIUMTYPE;
2385 }
2386 block->s2b_shift = 0;
2387 for (sb = 512; sb < block->bp_block; sb = sb << 1)
2388 block->s2b_shift++;
2389
2390 blk_per_trk = recs_per_track(&private->rdc_data, 0, block->bp_block);
2391
2392 raw:
2393 block->blocks = ((unsigned long) private->real_cyl *
2394 private->rdc_data.trk_per_cyl *
2395 blk_per_trk);
2396
2397 dev_info(&device->cdev->dev,
2398 "DASD with %u KB/block, %lu KB total size, %u KB/track, "
2399 "%s\n", (block->bp_block >> 10),
2400 (((unsigned long) private->real_cyl *
2401 private->rdc_data.trk_per_cyl *
2402 blk_per_trk * (block->bp_block >> 9)) >> 1),
2403 ((blk_per_trk * block->bp_block) >> 10),
2404 private->uses_cdl ?
2405 "compatible disk layout" : "linux disk layout");
2406
2407 return 0;
2408 }
2409
2410 static int dasd_eckd_do_analysis(struct dasd_block *block)
2411 {
2412 struct dasd_eckd_private *private = block->base->private;
2413
2414 if (private->init_cqr_status < 0)
2415 return dasd_eckd_start_analysis(block);
2416 else
2417 return dasd_eckd_end_analysis(block);
2418 }
2419
2420 static int dasd_eckd_basic_to_ready(struct dasd_device *device)
2421 {
2422 return dasd_alias_add_device(device);
2423 };
2424
2425 static int dasd_eckd_online_to_ready(struct dasd_device *device)
2426 {
2427 if (cancel_work_sync(&device->reload_device))
2428 dasd_put_device(device);
2429 if (cancel_work_sync(&device->kick_validate))
2430 dasd_put_device(device);
2431
2432 return 0;
2433 };
2434
2435 static int dasd_eckd_basic_to_known(struct dasd_device *device)
2436 {
2437 return dasd_alias_remove_device(device);
2438 };
2439
2440 static int
2441 dasd_eckd_fill_geometry(struct dasd_block *block, struct hd_geometry *geo)
2442 {
2443 struct dasd_eckd_private *private = block->base->private;
2444
2445 if (dasd_check_blocksize(block->bp_block) == 0) {
2446 geo->sectors = recs_per_track(&private->rdc_data,
2447 0, block->bp_block);
2448 }
2449 geo->cylinders = private->rdc_data.no_cyl;
2450 geo->heads = private->rdc_data.trk_per_cyl;
2451 return 0;
2452 }
2453
2454
2455
2456
2457 static struct dasd_ccw_req *
2458 dasd_eckd_build_check_tcw(struct dasd_device *base, struct format_data_t *fdata,
2459 int enable_pav, struct eckd_count *fmt_buffer,
2460 int rpt)
2461 {
2462 struct dasd_eckd_private *start_priv;
2463 struct dasd_device *startdev = NULL;
2464 struct tidaw *last_tidaw = NULL;
2465 struct dasd_ccw_req *cqr;
2466 struct itcw *itcw;
2467 int itcw_size;
2468 int count;
2469 int rc;
2470 int i;
2471
2472 if (enable_pav)
2473 startdev = dasd_alias_get_start_dev(base);
2474
2475 if (!startdev)
2476 startdev = base;
2477
2478 start_priv = startdev->private;
2479
2480 count = rpt * (fdata->stop_unit - fdata->start_unit + 1);
2481
2482
2483
2484
2485
2486 itcw_size = itcw_calc_size(0, count, 0);
2487
2488 cqr = dasd_fmalloc_request(DASD_ECKD_MAGIC, 0, itcw_size, startdev);
2489 if (IS_ERR(cqr))
2490 return cqr;
2491
2492 start_priv->count++;
2493
2494 itcw = itcw_init(cqr->data, itcw_size, ITCW_OP_READ, 0, count, 0);
2495 if (IS_ERR(itcw)) {
2496 rc = -EINVAL;
2497 goto out_err;
2498 }
2499
2500 cqr->cpaddr = itcw_get_tcw(itcw);
2501 rc = prepare_itcw(itcw, fdata->start_unit, fdata->stop_unit,
2502 DASD_ECKD_CCW_READ_COUNT_MT, base, startdev, 0, count,
2503 sizeof(struct eckd_count),
2504 count * sizeof(struct eckd_count), 0, rpt);
2505 if (rc)
2506 goto out_err;
2507
2508 for (i = 0; i < count; i++) {
2509 last_tidaw = itcw_add_tidaw(itcw, 0, fmt_buffer++,
2510 sizeof(struct eckd_count));
2511 if (IS_ERR(last_tidaw)) {
2512 rc = -EINVAL;
2513 goto out_err;
2514 }
2515 }
2516
2517 last_tidaw->flags |= TIDAW_FLAGS_LAST;
2518 itcw_finalize(itcw);
2519
2520 cqr->cpmode = 1;
2521 cqr->startdev = startdev;
2522 cqr->memdev = startdev;
2523 cqr->basedev = base;
2524 cqr->retries = startdev->default_retries;
2525 cqr->expires = startdev->default_expires * HZ;
2526 cqr->buildclk = get_tod_clock();
2527 cqr->status = DASD_CQR_FILLED;
2528
2529 set_bit(DASD_CQR_SUPPRESS_FP, &cqr->flags);
2530 set_bit(DASD_CQR_SUPPRESS_IL, &cqr->flags);
2531
2532 return cqr;
2533
2534 out_err:
2535 dasd_sfree_request(cqr, startdev);
2536
2537 return ERR_PTR(rc);
2538 }
2539
2540
2541
2542
2543 static struct dasd_ccw_req *
2544 dasd_eckd_build_check(struct dasd_device *base, struct format_data_t *fdata,
2545 int enable_pav, struct eckd_count *fmt_buffer, int rpt)
2546 {
2547 struct dasd_eckd_private *start_priv;
2548 struct dasd_eckd_private *base_priv;
2549 struct dasd_device *startdev = NULL;
2550 struct dasd_ccw_req *cqr;
2551 struct ccw1 *ccw;
2552 void *data;
2553 int cplength, datasize;
2554 int use_prefix;
2555 int count;
2556 int i;
2557
2558 if (enable_pav)
2559 startdev = dasd_alias_get_start_dev(base);
2560
2561 if (!startdev)
2562 startdev = base;
2563
2564 start_priv = startdev->private;
2565 base_priv = base->private;
2566
2567 count = rpt * (fdata->stop_unit - fdata->start_unit + 1);
2568
2569 use_prefix = base_priv->features.feature[8] & 0x01;
2570
2571 if (use_prefix) {
2572 cplength = 1;
2573 datasize = sizeof(struct PFX_eckd_data);
2574 } else {
2575 cplength = 2;
2576 datasize = sizeof(struct DE_eckd_data) +
2577 sizeof(struct LO_eckd_data);
2578 }
2579 cplength += count;
2580
2581 cqr = dasd_fmalloc_request(DASD_ECKD_MAGIC, cplength, datasize, startdev);
2582 if (IS_ERR(cqr))
2583 return cqr;
2584
2585 start_priv->count++;
2586 data = cqr->data;
2587 ccw = cqr->cpaddr;
2588
2589 if (use_prefix) {
2590 prefix_LRE(ccw++, data, fdata->start_unit, fdata->stop_unit,
2591 DASD_ECKD_CCW_READ_COUNT, base, startdev, 1, 0,
2592 count, 0, 0);
2593 } else {
2594 define_extent(ccw++, data, fdata->start_unit, fdata->stop_unit,
2595 DASD_ECKD_CCW_READ_COUNT, startdev, 0);
2596
2597 data += sizeof(struct DE_eckd_data);
2598 ccw[-1].flags |= CCW_FLAG_CC;
2599
2600 locate_record(ccw++, data, fdata->start_unit, 0, count,
2601 DASD_ECKD_CCW_READ_COUNT, base, 0);
2602 }
2603
2604 for (i = 0; i < count; i++) {
2605 ccw[-1].flags |= CCW_FLAG_CC;
2606 ccw->cmd_code = DASD_ECKD_CCW_READ_COUNT;
2607 ccw->flags = CCW_FLAG_SLI;
2608 ccw->count = 8;
2609 ccw->cda = (__u32)(addr_t) fmt_buffer;
2610 ccw++;
2611 fmt_buffer++;
2612 }
2613
2614 cqr->startdev = startdev;
2615 cqr->memdev = startdev;
2616 cqr->basedev = base;
2617 cqr->retries = DASD_RETRIES;
2618 cqr->expires = startdev->default_expires * HZ;
2619 cqr->buildclk = get_tod_clock();
2620 cqr->status = DASD_CQR_FILLED;
2621
2622 set_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags);
2623
2624 return cqr;
2625 }
2626
2627 static struct dasd_ccw_req *
2628 dasd_eckd_build_format(struct dasd_device *base, struct dasd_device *startdev,
2629 struct format_data_t *fdata, int enable_pav)
2630 {
2631 struct dasd_eckd_private *base_priv;
2632 struct dasd_eckd_private *start_priv;
2633 struct dasd_ccw_req *fcp;
2634 struct eckd_count *ect;
2635 struct ch_t address;
2636 struct ccw1 *ccw;
2637 void *data;
2638 int rpt;
2639 int cplength, datasize;
2640 int i, j;
2641 int intensity = 0;
2642 int r0_perm;
2643 int nr_tracks;
2644 int use_prefix;
2645
2646 if (enable_pav)
2647 startdev = dasd_alias_get_start_dev(base);
2648
2649 if (!startdev)
2650 startdev = base;
2651
2652 start_priv = startdev->private;
2653 base_priv = base->private;
2654
2655 rpt = recs_per_track(&base_priv->rdc_data, 0, fdata->blksize);
2656
2657 nr_tracks = fdata->stop_unit - fdata->start_unit + 1;
2658
2659
2660
2661
2662
2663
2664
2665
2666
2667
2668 if (fdata->intensity & 0x10) {
2669 r0_perm = 0;
2670 intensity = fdata->intensity & ~0x10;
2671 } else {
2672 r0_perm = 1;
2673 intensity = fdata->intensity;
2674 }
2675
2676 use_prefix = base_priv->features.feature[8] & 0x01;
2677
2678 switch (intensity) {
2679 case 0x00:
2680 case 0x08:
2681 cplength = 2 + (rpt*nr_tracks);
2682 if (use_prefix)
2683 datasize = sizeof(struct PFX_eckd_data) +
2684 sizeof(struct LO_eckd_data) +
2685 rpt * nr_tracks * sizeof(struct eckd_count);
2686 else
2687 datasize = sizeof(struct DE_eckd_data) +
2688 sizeof(struct LO_eckd_data) +
2689 rpt * nr_tracks * sizeof(struct eckd_count);
2690 break;
2691 case 0x01:
2692 case 0x09:
2693 cplength = 2 + rpt * nr_tracks;
2694 if (use_prefix)
2695 datasize = sizeof(struct PFX_eckd_data) +
2696 sizeof(struct LO_eckd_data) +
2697 sizeof(struct eckd_count) +
2698 rpt * nr_tracks * sizeof(struct eckd_count);
2699 else
2700 datasize = sizeof(struct DE_eckd_data) +
2701 sizeof(struct LO_eckd_data) +
2702 sizeof(struct eckd_count) +
2703 rpt * nr_tracks * sizeof(struct eckd_count);
2704 break;
2705 case 0x04:
2706 case 0x0c:
2707 cplength = 3;
2708 if (use_prefix)
2709 datasize = sizeof(struct PFX_eckd_data) +
2710 sizeof(struct LO_eckd_data) +
2711 sizeof(struct eckd_count);
2712 else
2713 datasize = sizeof(struct DE_eckd_data) +
2714 sizeof(struct LO_eckd_data) +
2715 sizeof(struct eckd_count);
2716 break;
2717 default:
2718 dev_warn(&startdev->cdev->dev,
2719 "An I/O control call used incorrect flags 0x%x\n",
2720 fdata->intensity);
2721 return ERR_PTR(-EINVAL);
2722 }
2723
2724 fcp = dasd_fmalloc_request(DASD_ECKD_MAGIC, cplength, datasize, startdev);
2725 if (IS_ERR(fcp))
2726 return fcp;
2727
2728 start_priv->count++;
2729 data = fcp->data;
2730 ccw = fcp->cpaddr;
2731
2732 switch (intensity & ~0x08) {
2733 case 0x00:
2734 if (use_prefix) {
2735 prefix(ccw++, (struct PFX_eckd_data *) data,
2736 fdata->start_unit, fdata->stop_unit,
2737 DASD_ECKD_CCW_WRITE_CKD, base, startdev);
2738
2739 if (r0_perm)
2740 ((struct PFX_eckd_data *)data)
2741 ->define_extent.ga_extended |= 0x04;
2742 data += sizeof(struct PFX_eckd_data);
2743 } else {
2744 define_extent(ccw++, (struct DE_eckd_data *) data,
2745 fdata->start_unit, fdata->stop_unit,
2746 DASD_ECKD_CCW_WRITE_CKD, startdev, 0);
2747
2748 if (r0_perm)
2749 ((struct DE_eckd_data *) data)
2750 ->ga_extended |= 0x04;
2751 data += sizeof(struct DE_eckd_data);
2752 }
2753 ccw[-1].flags |= CCW_FLAG_CC;
2754 locate_record(ccw++, (struct LO_eckd_data *) data,
2755 fdata->start_unit, 0, rpt*nr_tracks,
2756 DASD_ECKD_CCW_WRITE_CKD, base,
2757 fdata->blksize);
2758 data += sizeof(struct LO_eckd_data);
2759 break;
2760 case 0x01:
2761 if (use_prefix) {
2762 prefix(ccw++, (struct PFX_eckd_data *) data,
2763 fdata->start_unit, fdata->stop_unit,
2764 DASD_ECKD_CCW_WRITE_RECORD_ZERO,
2765 base, startdev);
2766 data += sizeof(struct PFX_eckd_data);
2767 } else {
2768 define_extent(ccw++, (struct DE_eckd_data *) data,
2769 fdata->start_unit, fdata->stop_unit,
2770 DASD_ECKD_CCW_WRITE_RECORD_ZERO, startdev, 0);
2771 data += sizeof(struct DE_eckd_data);
2772 }
2773 ccw[-1].flags |= CCW_FLAG_CC;
2774 locate_record(ccw++, (struct LO_eckd_data *) data,
2775 fdata->start_unit, 0, rpt * nr_tracks + 1,
2776 DASD_ECKD_CCW_WRITE_RECORD_ZERO, base,
2777 base->block->bp_block);
2778 data += sizeof(struct LO_eckd_data);
2779 break;
2780 case 0x04:
2781 if (use_prefix) {
2782 prefix(ccw++, (struct PFX_eckd_data *) data,
2783 fdata->start_unit, fdata->stop_unit,
2784 DASD_ECKD_CCW_WRITE_CKD, base, startdev);
2785 data += sizeof(struct PFX_eckd_data);
2786 } else {
2787 define_extent(ccw++, (struct DE_eckd_data *) data,
2788 fdata->start_unit, fdata->stop_unit,
2789 DASD_ECKD_CCW_WRITE_CKD, startdev, 0);
2790 data += sizeof(struct DE_eckd_data);
2791 }
2792 ccw[-1].flags |= CCW_FLAG_CC;
2793 locate_record(ccw++, (struct LO_eckd_data *) data,
2794 fdata->start_unit, 0, 1,
2795 DASD_ECKD_CCW_WRITE_CKD, base, 8);
2796 data += sizeof(struct LO_eckd_data);
2797 break;
2798 }
2799
2800 for (j = 0; j < nr_tracks; j++) {
2801
2802 set_ch_t(&address,
2803 (fdata->start_unit + j) /
2804 base_priv->rdc_data.trk_per_cyl,
2805 (fdata->start_unit + j) %
2806 base_priv->rdc_data.trk_per_cyl);
2807 if (intensity & 0x01) {
2808 ect = (struct eckd_count *) data;
2809 data += sizeof(struct eckd_count);
2810 ect->cyl = address.cyl;
2811 ect->head = address.head;
2812 ect->record = 0;
2813 ect->kl = 0;
2814 ect->dl = 8;
2815 ccw[-1].flags |= CCW_FLAG_CC;
2816 ccw->cmd_code = DASD_ECKD_CCW_WRITE_RECORD_ZERO;
2817 ccw->flags = CCW_FLAG_SLI;
2818 ccw->count = 8;
2819 ccw->cda = (__u32)(addr_t) ect;
2820 ccw++;
2821 }
2822 if ((intensity & ~0x08) & 0x04) {
2823 ect = (struct eckd_count *) data;
2824 data += sizeof(struct eckd_count);
2825 ect->cyl = address.cyl;
2826 ect->head = address.head;
2827 ect->record = 1;
2828 ect->kl = 0;
2829 ect->dl = 0;
2830 ccw[-1].flags |= CCW_FLAG_CC;
2831 ccw->cmd_code = DASD_ECKD_CCW_WRITE_CKD;
2832 ccw->flags = CCW_FLAG_SLI;
2833 ccw->count = 8;
2834 ccw->cda = (__u32)(addr_t) ect;
2835 } else {
2836 for (i = 0; i < rpt; i++) {
2837 ect = (struct eckd_count *) data;
2838 data += sizeof(struct eckd_count);
2839 ect->cyl = address.cyl;
2840 ect->head = address.head;
2841 ect->record = i + 1;
2842 ect->kl = 0;
2843 ect->dl = fdata->blksize;
2844
2845
2846
2847
2848 if ((intensity & 0x08) &&
2849 address.cyl == 0 && address.head == 0) {
2850 if (i < 3) {
2851 ect->kl = 4;
2852 ect->dl = sizes_trk0[i] - 4;
2853 }
2854 }
2855 if ((intensity & 0x08) &&
2856 address.cyl == 0 && address.head == 1) {
2857 ect->kl = 44;
2858 ect->dl = LABEL_SIZE - 44;
2859 }
2860 ccw[-1].flags |= CCW_FLAG_CC;
2861 if (i != 0 || j == 0)
2862 ccw->cmd_code =
2863 DASD_ECKD_CCW_WRITE_CKD;
2864 else
2865 ccw->cmd_code =
2866 DASD_ECKD_CCW_WRITE_CKD_MT;
2867 ccw->flags = CCW_FLAG_SLI;
2868 ccw->count = 8;
2869 ccw->cda = (__u32)(addr_t) ect;
2870 ccw++;
2871 }
2872 }
2873 }
2874
2875 fcp->startdev = startdev;
2876 fcp->memdev = startdev;
2877 fcp->basedev = base;
2878 fcp->retries = 256;
2879 fcp->expires = startdev->default_expires * HZ;
2880 fcp->buildclk = get_tod_clock();
2881 fcp->status = DASD_CQR_FILLED;
2882
2883 return fcp;
2884 }
2885
2886
2887
2888
2889 static struct dasd_ccw_req *
2890 dasd_eckd_format_build_ccw_req(struct dasd_device *base,
2891 struct format_data_t *fdata, int enable_pav,
2892 int tpm, struct eckd_count *fmt_buffer, int rpt)
2893 {
2894 struct dasd_ccw_req *ccw_req;
2895
2896 if (!fmt_buffer) {
2897 ccw_req = dasd_eckd_build_format(base, NULL, fdata, enable_pav);
2898 } else {
2899 if (tpm)
2900 ccw_req = dasd_eckd_build_check_tcw(base, fdata,
2901 enable_pav,
2902 fmt_buffer, rpt);
2903 else
2904 ccw_req = dasd_eckd_build_check(base, fdata, enable_pav,
2905 fmt_buffer, rpt);
2906 }
2907
2908 return ccw_req;
2909 }
2910
2911
2912
2913
2914 static int dasd_eckd_format_sanity_checks(struct dasd_device *base,
2915 struct format_data_t *fdata)
2916 {
2917 struct dasd_eckd_private *private = base->private;
2918
2919 if (fdata->start_unit >=
2920 (private->real_cyl * private->rdc_data.trk_per_cyl)) {
2921 dev_warn(&base->cdev->dev,
2922 "Start track number %u used in formatting is too big\n",
2923 fdata->start_unit);
2924 return -EINVAL;
2925 }
2926 if (fdata->stop_unit >=
2927 (private->real_cyl * private->rdc_data.trk_per_cyl)) {
2928 dev_warn(&base->cdev->dev,
2929 "Stop track number %u used in formatting is too big\n",
2930 fdata->stop_unit);
2931 return -EINVAL;
2932 }
2933 if (fdata->start_unit > fdata->stop_unit) {
2934 dev_warn(&base->cdev->dev,
2935 "Start track %u used in formatting exceeds end track\n",
2936 fdata->start_unit);
2937 return -EINVAL;
2938 }
2939 if (dasd_check_blocksize(fdata->blksize) != 0) {
2940 dev_warn(&base->cdev->dev,
2941 "The DASD cannot be formatted with block size %u\n",
2942 fdata->blksize);
2943 return -EINVAL;
2944 }
2945 return 0;
2946 }
2947
2948
2949
2950
2951 static int dasd_eckd_format_process_data(struct dasd_device *base,
2952 struct format_data_t *fdata,
2953 int enable_pav, int tpm,
2954 struct eckd_count *fmt_buffer, int rpt,
2955 struct irb *irb)
2956 {
2957 struct dasd_eckd_private *private = base->private;
2958 struct dasd_ccw_req *cqr, *n;
2959 struct list_head format_queue;
2960 struct dasd_device *device;
2961 char *sense = NULL;
2962 int old_start, old_stop, format_step;
2963 int step, retry;
2964 int rc;
2965
2966 rc = dasd_eckd_format_sanity_checks(base, fdata);
2967 if (rc)
2968 return rc;
2969
2970 INIT_LIST_HEAD(&format_queue);
2971
2972 old_start = fdata->start_unit;
2973 old_stop = fdata->stop_unit;
2974
2975 if (!tpm && fmt_buffer != NULL) {
2976
2977 format_step = 1;
2978 } else if (tpm && fmt_buffer != NULL) {
2979
2980 format_step = DASD_CQR_MAX_CCW / rpt;
2981 } else {
2982
2983 format_step = DASD_CQR_MAX_CCW /
2984 recs_per_track(&private->rdc_data, 0, fdata->blksize);
2985 }
2986
2987 do {
2988 retry = 0;
2989 while (fdata->start_unit <= old_stop) {
2990 step = fdata->stop_unit - fdata->start_unit + 1;
2991 if (step > format_step) {
2992 fdata->stop_unit =
2993 fdata->start_unit + format_step - 1;
2994 }
2995
2996 cqr = dasd_eckd_format_build_ccw_req(base, fdata,
2997 enable_pav, tpm,
2998 fmt_buffer, rpt);
2999 if (IS_ERR(cqr)) {
3000 rc = PTR_ERR(cqr);
3001 if (rc == -ENOMEM) {
3002 if (list_empty(&format_queue))
3003 goto out;
3004
3005
3006
3007
3008
3009 retry = 1;
3010 break;
3011 }
3012 goto out_err;
3013 }
3014 list_add_tail(&cqr->blocklist, &format_queue);
3015
3016 if (fmt_buffer) {
3017 step = fdata->stop_unit - fdata->start_unit + 1;
3018 fmt_buffer += rpt * step;
3019 }
3020 fdata->start_unit = fdata->stop_unit + 1;
3021 fdata->stop_unit = old_stop;
3022 }
3023
3024 rc = dasd_sleep_on_queue(&format_queue);
3025
3026 out_err:
3027 list_for_each_entry_safe(cqr, n, &format_queue, blocklist) {
3028 device = cqr->startdev;
3029 private = device->private;
3030
3031 if (cqr->status == DASD_CQR_FAILED) {
3032
3033
3034
3035
3036 if (fmt_buffer && irb) {
3037 sense = dasd_get_sense(&cqr->irb);
3038 memcpy(irb, &cqr->irb, sizeof(*irb));
3039 }
3040 rc = -EIO;
3041 }
3042 list_del_init(&cqr->blocklist);
3043 dasd_ffree_request(cqr, device);
3044 private->count--;
3045 }
3046
3047 if (rc && rc != -EIO)
3048 goto out;
3049 if (rc == -EIO) {
3050
3051
3052
3053
3054
3055
3056
3057
3058 if (sense &&
3059 (sense[1] & SNS1_NO_REC_FOUND ||
3060 sense[1] & SNS1_FILE_PROTECTED))
3061 retry = 1;
3062 else
3063 goto out;
3064 }
3065
3066 } while (retry);
3067
3068 out:
3069 fdata->start_unit = old_start;
3070 fdata->stop_unit = old_stop;
3071
3072 return rc;
3073 }
3074
3075 static int dasd_eckd_format_device(struct dasd_device *base,
3076 struct format_data_t *fdata, int enable_pav)
3077 {
3078 return dasd_eckd_format_process_data(base, fdata, enable_pav, 0, NULL,
3079 0, NULL);
3080 }
3081
3082 static bool test_and_set_format_track(struct dasd_format_entry *to_format,
3083 struct dasd_ccw_req *cqr)
3084 {
3085 struct dasd_block *block = cqr->block;
3086 struct dasd_format_entry *format;
3087 unsigned long flags;
3088 bool rc = false;
3089
3090 spin_lock_irqsave(&block->format_lock, flags);
3091 if (cqr->trkcount != atomic_read(&block->trkcount)) {
3092
3093
3094
3095
3096
3097
3098 rc = true;
3099 goto out;
3100 }
3101 list_for_each_entry(format, &block->format_list, list) {
3102 if (format->track == to_format->track) {
3103 rc = true;
3104 goto out;
3105 }
3106 }
3107 list_add_tail(&to_format->list, &block->format_list);
3108
3109 out:
3110 spin_unlock_irqrestore(&block->format_lock, flags);
3111 return rc;
3112 }
3113
3114 static void clear_format_track(struct dasd_format_entry *format,
3115 struct dasd_block *block)
3116 {
3117 unsigned long flags;
3118
3119 spin_lock_irqsave(&block->format_lock, flags);
3120 atomic_inc(&block->trkcount);
3121 list_del_init(&format->list);
3122 spin_unlock_irqrestore(&block->format_lock, flags);
3123 }
3124
3125
3126
3127
3128 static void dasd_eckd_ese_format_cb(struct dasd_ccw_req *cqr, void *data)
3129 {
3130 struct dasd_device *device = cqr->startdev;
3131 struct dasd_eckd_private *private = device->private;
3132 struct dasd_format_entry *format = data;
3133
3134 clear_format_track(format, cqr->basedev->block);
3135 private->count--;
3136 dasd_ffree_request(cqr, device);
3137 }
3138
3139 static struct dasd_ccw_req *
3140 dasd_eckd_ese_format(struct dasd_device *startdev, struct dasd_ccw_req *cqr,
3141 struct irb *irb)
3142 {
3143 struct dasd_eckd_private *private;
3144 struct dasd_format_entry *format;
3145 struct format_data_t fdata;
3146 unsigned int recs_per_trk;
3147 struct dasd_ccw_req *fcqr;
3148 struct dasd_device *base;
3149 struct dasd_block *block;
3150 unsigned int blksize;
3151 struct request *req;
3152 sector_t first_trk;
3153 sector_t last_trk;
3154 sector_t curr_trk;
3155 int rc;
3156
3157 req = dasd_get_callback_data(cqr);
3158 block = cqr->block;
3159 base = block->base;
3160 private = base->private;
3161 blksize = block->bp_block;
3162 recs_per_trk = recs_per_track(&private->rdc_data, 0, blksize);
3163 format = &startdev->format_entry;
3164
3165 first_trk = blk_rq_pos(req) >> block->s2b_shift;
3166 sector_div(first_trk, recs_per_trk);
3167 last_trk =
3168 (blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift;
3169 sector_div(last_trk, recs_per_trk);
3170 rc = dasd_eckd_track_from_irb(irb, base, &curr_trk);
3171 if (rc)
3172 return ERR_PTR(rc);
3173
3174 if (curr_trk < first_trk || curr_trk > last_trk) {
3175 DBF_DEV_EVENT(DBF_WARNING, startdev,
3176 "ESE error track %llu not within range %llu - %llu\n",
3177 curr_trk, first_trk, last_trk);
3178 return ERR_PTR(-EINVAL);
3179 }
3180 format->track = curr_trk;
3181
3182 if (test_and_set_format_track(format, cqr)) {
3183
3184 cqr->retries++;
3185 return ERR_PTR(-EEXIST);
3186 }
3187
3188 fdata.start_unit = curr_trk;
3189 fdata.stop_unit = curr_trk;
3190 fdata.blksize = blksize;
3191 fdata.intensity = private->uses_cdl ? DASD_FMT_INT_COMPAT : 0;
3192
3193 rc = dasd_eckd_format_sanity_checks(base, &fdata);
3194 if (rc)
3195 return ERR_PTR(-EINVAL);
3196
3197
3198
3199
3200
3201 fcqr = dasd_eckd_build_format(base, startdev, &fdata, 0);
3202 if (IS_ERR(fcqr))
3203 return fcqr;
3204
3205 fcqr->callback = dasd_eckd_ese_format_cb;
3206 fcqr->callback_data = (void *) format;
3207
3208 return fcqr;
3209 }
3210
3211
3212
3213
3214
3215
3216
3217
3218
3219
3220
3221
3222
3223 static int dasd_eckd_ese_read(struct dasd_ccw_req *cqr, struct irb *irb)
3224 {
3225 struct dasd_eckd_private *private;
3226 sector_t first_trk, last_trk;
3227 sector_t first_blk, last_blk;
3228 unsigned int blksize, off;
3229 unsigned int recs_per_trk;
3230 struct dasd_device *base;
3231 struct req_iterator iter;
3232 struct dasd_block *block;
3233 unsigned int skip_block;
3234 unsigned int blk_count;
3235 struct request *req;
3236 struct bio_vec bv;
3237 sector_t curr_trk;
3238 sector_t end_blk;
3239 char *dst;
3240 int rc;
3241
3242 req = (struct request *) cqr->callback_data;
3243 base = cqr->block->base;
3244 blksize = base->block->bp_block;
3245 block = cqr->block;
3246 private = base->private;
3247 skip_block = 0;
3248 blk_count = 0;
3249
3250 recs_per_trk = recs_per_track(&private->rdc_data, 0, blksize);
3251 first_trk = first_blk = blk_rq_pos(req) >> block->s2b_shift;
3252 sector_div(first_trk, recs_per_trk);
3253 last_trk = last_blk =
3254 (blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift;
3255 sector_div(last_trk, recs_per_trk);
3256 rc = dasd_eckd_track_from_irb(irb, base, &curr_trk);
3257 if (rc)
3258 return rc;
3259
3260
3261 if (curr_trk < first_trk || curr_trk > last_trk) {
3262 DBF_DEV_EVENT(DBF_WARNING, base,
3263 "ESE error track %llu not within range %llu - %llu\n",
3264 curr_trk, first_trk, last_trk);
3265 return -EINVAL;
3266 }
3267
3268
3269
3270
3271
3272 if (curr_trk != first_trk)
3273 skip_block = curr_trk * recs_per_trk - first_blk;
3274
3275
3276 end_blk = (curr_trk + 1) * recs_per_trk;
3277
3278 rq_for_each_segment(bv, req, iter) {
3279 dst = bvec_virt(&bv);
3280 for (off = 0; off < bv.bv_len; off += blksize) {
3281 if (first_blk + blk_count >= end_blk) {
3282 cqr->proc_bytes = blk_count * blksize;
3283 return 0;
3284 }
3285 if (dst && !skip_block)
3286 memset(dst, 0, blksize);
3287 else
3288 skip_block--;
3289 dst += blksize;
3290 blk_count++;
3291 }
3292 }
3293 return 0;
3294 }
3295
3296
3297
3298
3299 static int dasd_eckd_count_records(struct eckd_count *fmt_buffer, int start,
3300 int max)
3301 {
3302 int head;
3303 int i;
3304
3305 head = fmt_buffer[start].head;
3306
3307
3308
3309
3310
3311
3312
3313
3314
3315
3316 for (i = start; i < max; i++) {
3317 if (i > start) {
3318 if ((fmt_buffer[i].head == head &&
3319 fmt_buffer[i].record == 1) ||
3320 fmt_buffer[i].head != head ||
3321 fmt_buffer[i].record == 0)
3322 break;
3323 }
3324 }
3325
3326 return i - start;
3327 }
3328
3329
3330
3331
3332
3333
3334
3335
3336 static void dasd_eckd_format_evaluate_tracks(struct eckd_count *fmt_buffer,
3337 struct format_check_t *cdata,
3338 int rpt_max, int rpt_exp,
3339 int trk_per_cyl, int tpm)
3340 {
3341 struct ch_t geo;
3342 int max_entries;
3343 int count = 0;
3344 int trkcount;
3345 int blksize;
3346 int pos = 0;
3347 int i, j;
3348 int kl;
3349
3350 trkcount = cdata->expect.stop_unit - cdata->expect.start_unit + 1;
3351 max_entries = trkcount * rpt_max;
3352
3353 for (i = cdata->expect.start_unit; i <= cdata->expect.stop_unit; i++) {
3354
3355 if (tpm) {
3356 while (fmt_buffer[pos].record == 0 &&
3357 fmt_buffer[pos].dl == 0) {
3358 if (pos++ > max_entries)
3359 break;
3360 }
3361 } else {
3362 if (i != cdata->expect.start_unit)
3363 pos += rpt_max - count;
3364 }
3365
3366
3367 set_ch_t(&geo, i / trk_per_cyl, i % trk_per_cyl);
3368
3369
3370 count = dasd_eckd_count_records(fmt_buffer, pos, pos + rpt_max);
3371
3372 if (count < rpt_exp) {
3373 cdata->result = DASD_FMT_ERR_TOO_FEW_RECORDS;
3374 break;
3375 }
3376 if (count > rpt_exp) {
3377 cdata->result = DASD_FMT_ERR_TOO_MANY_RECORDS;
3378 break;
3379 }
3380
3381 for (j = 0; j < count; j++, pos++) {
3382 blksize = cdata->expect.blksize;
3383 kl = 0;
3384
3385
3386
3387
3388
3389 if ((cdata->expect.intensity & 0x08) &&
3390 geo.cyl == 0 && geo.head == 0) {
3391 if (j < 3) {
3392 blksize = sizes_trk0[j] - 4;
3393 kl = 4;
3394 }
3395 }
3396 if ((cdata->expect.intensity & 0x08) &&
3397 geo.cyl == 0 && geo.head == 1) {
3398 blksize = LABEL_SIZE - 44;
3399 kl = 44;
3400 }
3401
3402
3403 if (fmt_buffer[pos].dl != blksize) {
3404 cdata->result = DASD_FMT_ERR_BLKSIZE;
3405 goto out;
3406 }
3407
3408 if (fmt_buffer[pos].kl != kl) {
3409 cdata->result = DASD_FMT_ERR_KEY_LENGTH;
3410 goto out;
3411 }
3412
3413 if (fmt_buffer[pos].cyl != geo.cyl ||
3414 fmt_buffer[pos].head != geo.head ||
3415 fmt_buffer[pos].record != (j + 1)) {
3416 cdata->result = DASD_FMT_ERR_RECORD_ID;
3417 goto out;
3418 }
3419 }
3420 }
3421
3422 out:
3423
3424
3425
3426
3427 if (!cdata->result) {
3428 i--;
3429 pos--;
3430 }
3431
3432 cdata->unit = i;
3433 cdata->num_records = count;
3434 cdata->rec = fmt_buffer[pos].record;
3435 cdata->blksize = fmt_buffer[pos].dl;
3436 cdata->key_length = fmt_buffer[pos].kl;
3437 }
3438
3439
3440
3441
3442 static int dasd_eckd_check_device_format(struct dasd_device *base,
3443 struct format_check_t *cdata,
3444 int enable_pav)
3445 {
3446 struct dasd_eckd_private *private = base->private;
3447 struct eckd_count *fmt_buffer;
3448 struct irb irb;
3449 int rpt_max, rpt_exp;
3450 int fmt_buffer_size;
3451 int trk_per_cyl;
3452 int trkcount;
3453 int tpm = 0;
3454 int rc;
3455
3456 trk_per_cyl = private->rdc_data.trk_per_cyl;
3457
3458
3459 rpt_max = recs_per_track(&private->rdc_data, 0, 512) + 1;
3460 rpt_exp = recs_per_track(&private->rdc_data, 0, cdata->expect.blksize);
3461
3462 trkcount = cdata->expect.stop_unit - cdata->expect.start_unit + 1;
3463 fmt_buffer_size = trkcount * rpt_max * sizeof(struct eckd_count);
3464
3465 fmt_buffer = kzalloc(fmt_buffer_size, GFP_KERNEL | GFP_DMA);
3466 if (!fmt_buffer)
3467 return -ENOMEM;
3468
3469
3470
3471
3472
3473
3474
3475
3476
3477 if ((private->features.feature[40] & 0x04) &&
3478 fmt_buffer_size <= private->fcx_max_data)
3479 tpm = 1;
3480
3481 rc = dasd_eckd_format_process_data(base, &cdata->expect, enable_pav,
3482 tpm, fmt_buffer, rpt_max, &irb);
3483 if (rc && rc != -EIO)
3484 goto out;
3485 if (rc == -EIO) {
3486
3487
3488
3489
3490
3491 if (tpm && scsw_cstat(&irb.scsw) == 0x40) {
3492 tpm = 0;
3493 rc = dasd_eckd_format_process_data(base, &cdata->expect,
3494 enable_pav, tpm,
3495 fmt_buffer, rpt_max,
3496 &irb);
3497 if (rc)
3498 goto out;
3499 } else {
3500 goto out;
3501 }
3502 }
3503
3504 dasd_eckd_format_evaluate_tracks(fmt_buffer, cdata, rpt_max, rpt_exp,
3505 trk_per_cyl, tpm);
3506
3507 out:
3508 kfree(fmt_buffer);
3509
3510 return rc;
3511 }
3512
3513 static void dasd_eckd_handle_terminated_request(struct dasd_ccw_req *cqr)
3514 {
3515 if (cqr->retries < 0) {
3516 cqr->status = DASD_CQR_FAILED;
3517 return;
3518 }
3519 cqr->status = DASD_CQR_FILLED;
3520 if (cqr->block && (cqr->startdev != cqr->block->base)) {
3521 dasd_eckd_reset_ccw_to_base_io(cqr);
3522 cqr->startdev = cqr->block->base;
3523 cqr->lpm = dasd_path_get_opm(cqr->block->base);
3524 }
3525 };
3526
3527 static dasd_erp_fn_t
3528 dasd_eckd_erp_action(struct dasd_ccw_req * cqr)
3529 {
3530 struct dasd_device *device = (struct dasd_device *) cqr->startdev;
3531 struct ccw_device *cdev = device->cdev;
3532
3533 switch (cdev->id.cu_type) {
3534 case 0x3990:
3535 case 0x2105:
3536 case 0x2107:
3537 case 0x1750:
3538 return dasd_3990_erp_action;
3539 case 0x9343:
3540 case 0x3880:
3541 default:
3542 return dasd_default_erp_action;
3543 }
3544 }
3545
3546 static dasd_erp_fn_t
3547 dasd_eckd_erp_postaction(struct dasd_ccw_req * cqr)
3548 {
3549 return dasd_default_erp_postaction;
3550 }
3551
3552 static void dasd_eckd_check_for_device_change(struct dasd_device *device,
3553 struct dasd_ccw_req *cqr,
3554 struct irb *irb)
3555 {
3556 char mask;
3557 char *sense = NULL;
3558 struct dasd_eckd_private *private = device->private;
3559
3560
3561 mask = DEV_STAT_ATTENTION | DEV_STAT_DEV_END | DEV_STAT_UNIT_EXCEP;
3562 if ((scsw_dstat(&irb->scsw) & mask) == mask) {
3563
3564
3565
3566
3567 if (!device->block && private->lcu &&
3568 device->state == DASD_STATE_ONLINE &&
3569 !test_bit(DASD_FLAG_OFFLINE, &device->flags) &&
3570 !test_bit(DASD_FLAG_SUSPENDED, &device->flags)) {
3571
3572 dasd_reload_device(device);
3573 }
3574 dasd_generic_handle_state_change(device);
3575 return;
3576 }
3577
3578 sense = dasd_get_sense(irb);
3579 if (!sense)
3580 return;
3581
3582
3583 if ((sense[27] & DASD_SENSE_BIT_0) && (sense[7] == 0x0D) &&
3584 (scsw_dstat(&irb->scsw) & DEV_STAT_UNIT_CHECK)) {
3585 if (test_and_set_bit(DASD_FLAG_SUC, &device->flags)) {
3586 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
3587 "eckd suc: device already notified");
3588 return;
3589 }
3590 sense = dasd_get_sense(irb);
3591 if (!sense) {
3592 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
3593 "eckd suc: no reason code available");
3594 clear_bit(DASD_FLAG_SUC, &device->flags);
3595 return;
3596
3597 }
3598 private->suc_reason = sense[8];
3599 DBF_DEV_EVENT(DBF_NOTICE, device, "%s %x",
3600 "eckd handle summary unit check: reason",
3601 private->suc_reason);
3602 dasd_get_device(device);
3603 if (!schedule_work(&device->suc_work))
3604 dasd_put_device(device);
3605
3606 return;
3607 }
3608
3609
3610 if (!cqr && !(sense[27] & DASD_SENSE_BIT_0) &&
3611 ((sense[6] & DASD_SIM_SENSE) == DASD_SIM_SENSE)) {
3612 dasd_3990_erp_handle_sim(device, sense);
3613 return;
3614 }
3615
3616
3617
3618
3619 if (device->block && (sense[27] & DASD_SENSE_BIT_0) &&
3620 (sense[7] == 0x3F) &&
3621 (scsw_dstat(&irb->scsw) & DEV_STAT_UNIT_CHECK) &&
3622 test_bit(DASD_FLAG_IS_RESERVED, &device->flags)) {
3623 if (device->features & DASD_FEATURE_FAILONSLCK)
3624 set_bit(DASD_FLAG_LOCK_STOLEN, &device->flags);
3625 clear_bit(DASD_FLAG_IS_RESERVED, &device->flags);
3626 dev_err(&device->cdev->dev,
3627 "The device reservation was lost\n");
3628 }
3629 }
3630
3631 static int dasd_eckd_ras_sanity_checks(struct dasd_device *device,
3632 unsigned int first_trk,
3633 unsigned int last_trk)
3634 {
3635 struct dasd_eckd_private *private = device->private;
3636 unsigned int trks_per_vol;
3637 int rc = 0;
3638
3639 trks_per_vol = private->real_cyl * private->rdc_data.trk_per_cyl;
3640
3641 if (first_trk >= trks_per_vol) {
3642 dev_warn(&device->cdev->dev,
3643 "Start track number %u used in the space release command is too big\n",
3644 first_trk);
3645 rc = -EINVAL;
3646 } else if (last_trk >= trks_per_vol) {
3647 dev_warn(&device->cdev->dev,
3648 "Stop track number %u used in the space release command is too big\n",
3649 last_trk);
3650 rc = -EINVAL;
3651 } else if (first_trk > last_trk) {
3652 dev_warn(&device->cdev->dev,
3653 "Start track %u used in the space release command exceeds the end track\n",
3654 first_trk);
3655 rc = -EINVAL;
3656 }
3657 return rc;
3658 }
3659
3660
3661
3662
3663
3664 static int count_exts(unsigned int from, unsigned int to, int trks_per_ext)
3665 {
3666 int cur_pos = 0;
3667 int count = 0;
3668 int tmp;
3669
3670 if (from == to)
3671 return 1;
3672
3673
3674 if (from % trks_per_ext != 0) {
3675 tmp = from + trks_per_ext - (from % trks_per_ext) - 1;
3676 if (tmp > to)
3677 tmp = to;
3678 cur_pos = tmp - from + 1;
3679 count++;
3680 }
3681
3682 if (to - (from + cur_pos) + 1 >= trks_per_ext) {
3683 tmp = to - ((to - trks_per_ext + 1) % trks_per_ext);
3684 count += (tmp - (from + cur_pos) + 1) / trks_per_ext;
3685 cur_pos = tmp;
3686 }
3687
3688 if (cur_pos < to)
3689 count++;
3690
3691 return count;
3692 }
3693
3694
3695
3696
3697 static struct dasd_ccw_req *
3698 dasd_eckd_dso_ras(struct dasd_device *device, struct dasd_block *block,
3699 struct request *req, unsigned int first_trk,
3700 unsigned int last_trk, int by_extent)
3701 {
3702 struct dasd_eckd_private *private = device->private;
3703 struct dasd_dso_ras_ext_range *ras_range;
3704 struct dasd_rssd_features *features;
3705 struct dasd_dso_ras_data *ras_data;
3706 u16 heads, beg_head, end_head;
3707 int cur_to_trk, cur_from_trk;
3708 struct dasd_ccw_req *cqr;
3709 u32 beg_cyl, end_cyl;
3710 struct ccw1 *ccw;
3711 int trks_per_ext;
3712 size_t ras_size;
3713 size_t size;
3714 int nr_exts;
3715 void *rq;
3716 int i;
3717
3718 if (dasd_eckd_ras_sanity_checks(device, first_trk, last_trk))
3719 return ERR_PTR(-EINVAL);
3720
3721 rq = req ? blk_mq_rq_to_pdu(req) : NULL;
3722
3723 features = &private->features;
3724
3725 trks_per_ext = dasd_eckd_ext_size(device) * private->rdc_data.trk_per_cyl;
3726 nr_exts = 0;
3727 if (by_extent)
3728 nr_exts = count_exts(first_trk, last_trk, trks_per_ext);
3729 ras_size = sizeof(*ras_data);
3730 size = ras_size + (nr_exts * sizeof(*ras_range));
3731
3732 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, size, device, rq);
3733 if (IS_ERR(cqr)) {
3734 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
3735 "Could not allocate RAS request");
3736 return cqr;
3737 }
3738
3739 ras_data = cqr->data;
3740 memset(ras_data, 0, size);
3741
3742 ras_data->order = DSO_ORDER_RAS;
3743 ras_data->flags.vol_type = 0;
3744
3745 ras_data->op_flags.by_extent = by_extent;
3746
3747
3748
3749
3750
3751 ras_data->op_flags.guarantee_init = !!(features->feature[56] & 0x01);
3752 ras_data->lss = private->conf.ned->ID;
3753 ras_data->dev_addr = private->conf.ned->unit_addr;
3754 ras_data->nr_exts = nr_exts;
3755
3756 if (by_extent) {
3757 heads = private->rdc_data.trk_per_cyl;
3758 cur_from_trk = first_trk;
3759 cur_to_trk = first_trk + trks_per_ext -
3760 (first_trk % trks_per_ext) - 1;
3761 if (cur_to_trk > last_trk)
3762 cur_to_trk = last_trk;
3763 ras_range = (struct dasd_dso_ras_ext_range *)(cqr->data + ras_size);
3764
3765 for (i = 0; i < nr_exts; i++) {
3766 beg_cyl = cur_from_trk / heads;
3767 beg_head = cur_from_trk % heads;
3768 end_cyl = cur_to_trk / heads;
3769 end_head = cur_to_trk % heads;
3770
3771 set_ch_t(&ras_range->beg_ext, beg_cyl, beg_head);
3772 set_ch_t(&ras_range->end_ext, end_cyl, end_head);
3773
3774 cur_from_trk = cur_to_trk + 1;
3775 cur_to_trk = cur_from_trk + trks_per_ext - 1;
3776 if (cur_to_trk > last_trk)
3777 cur_to_trk = last_trk;
3778 ras_range++;
3779 }
3780 }
3781
3782 ccw = cqr->cpaddr;
3783 ccw->cda = (__u32)(addr_t)cqr->data;
3784 ccw->cmd_code = DASD_ECKD_CCW_DSO;
3785 ccw->count = size;
3786
3787 cqr->startdev = device;
3788 cqr->memdev = device;
3789 cqr->block = block;
3790 cqr->retries = 256;
3791 cqr->expires = device->default_expires * HZ;
3792 cqr->buildclk = get_tod_clock();
3793 cqr->status = DASD_CQR_FILLED;
3794
3795 return cqr;
3796 }
3797
3798 static int dasd_eckd_release_space_full(struct dasd_device *device)
3799 {
3800 struct dasd_ccw_req *cqr;
3801 int rc;
3802
3803 cqr = dasd_eckd_dso_ras(device, NULL, NULL, 0, 0, 0);
3804 if (IS_ERR(cqr))
3805 return PTR_ERR(cqr);
3806
3807 rc = dasd_sleep_on_interruptible(cqr);
3808
3809 dasd_sfree_request(cqr, cqr->memdev);
3810
3811 return rc;
3812 }
3813
3814 static int dasd_eckd_release_space_trks(struct dasd_device *device,
3815 unsigned int from, unsigned int to)
3816 {
3817 struct dasd_eckd_private *private = device->private;
3818 struct dasd_block *block = device->block;
3819 struct dasd_ccw_req *cqr, *n;
3820 struct list_head ras_queue;
3821 unsigned int device_exts;
3822 int trks_per_ext;
3823 int stop, step;
3824 int cur_pos;
3825 int rc = 0;
3826 int retry;
3827
3828 INIT_LIST_HEAD(&ras_queue);
3829
3830 device_exts = private->real_cyl / dasd_eckd_ext_size(device);
3831 trks_per_ext = dasd_eckd_ext_size(device) * private->rdc_data.trk_per_cyl;
3832
3833
3834 step = trks_per_ext * min(device_exts, DASD_ECKD_RAS_EXTS_MAX);
3835 cur_pos = from;
3836
3837 do {
3838 retry = 0;
3839 while (cur_pos < to) {
3840 stop = cur_pos + step -
3841 ((cur_pos + step) % trks_per_ext) - 1;
3842 if (stop > to)
3843 stop = to;
3844
3845 cqr = dasd_eckd_dso_ras(device, NULL, NULL, cur_pos, stop, 1);
3846 if (IS_ERR(cqr)) {
3847 rc = PTR_ERR(cqr);
3848 if (rc == -ENOMEM) {
3849 if (list_empty(&ras_queue))
3850 goto out;
3851 retry = 1;
3852 break;
3853 }
3854 goto err_out;
3855 }
3856
3857 spin_lock_irq(&block->queue_lock);
3858 list_add_tail(&cqr->blocklist, &ras_queue);
3859 spin_unlock_irq(&block->queue_lock);
3860 cur_pos = stop + 1;
3861 }
3862
3863 rc = dasd_sleep_on_queue_interruptible(&ras_queue);
3864
3865 err_out:
3866 list_for_each_entry_safe(cqr, n, &ras_queue, blocklist) {
3867 device = cqr->startdev;
3868 private = device->private;
3869
3870 spin_lock_irq(&block->queue_lock);
3871 list_del_init(&cqr->blocklist);
3872 spin_unlock_irq(&block->queue_lock);
3873 dasd_sfree_request(cqr, device);
3874 private->count--;
3875 }
3876 } while (retry);
3877
3878 out:
3879 return rc;
3880 }
3881
3882 static int dasd_eckd_release_space(struct dasd_device *device,
3883 struct format_data_t *rdata)
3884 {
3885 if (rdata->intensity & DASD_FMT_INT_ESE_FULL)
3886 return dasd_eckd_release_space_full(device);
3887 else if (rdata->intensity == 0)
3888 return dasd_eckd_release_space_trks(device, rdata->start_unit,
3889 rdata->stop_unit);
3890 else
3891 return -EINVAL;
3892 }
3893
3894 static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_single(
3895 struct dasd_device *startdev,
3896 struct dasd_block *block,
3897 struct request *req,
3898 sector_t first_rec,
3899 sector_t last_rec,
3900 sector_t first_trk,
3901 sector_t last_trk,
3902 unsigned int first_offs,
3903 unsigned int last_offs,
3904 unsigned int blk_per_trk,
3905 unsigned int blksize)
3906 {
3907 struct dasd_eckd_private *private;
3908 unsigned long *idaws;
3909 struct LO_eckd_data *LO_data;
3910 struct dasd_ccw_req *cqr;
3911 struct ccw1 *ccw;
3912 struct req_iterator iter;
3913 struct bio_vec bv;
3914 char *dst;
3915 unsigned int off;
3916 int count, cidaw, cplength, datasize;
3917 sector_t recid;
3918 unsigned char cmd, rcmd;
3919 int use_prefix;
3920 struct dasd_device *basedev;
3921
3922 basedev = block->base;
3923 private = basedev->private;
3924 if (rq_data_dir(req) == READ)
3925 cmd = DASD_ECKD_CCW_READ_MT;
3926 else if (rq_data_dir(req) == WRITE)
3927 cmd = DASD_ECKD_CCW_WRITE_MT;
3928 else
3929 return ERR_PTR(-EINVAL);
3930
3931
3932 count = 0;
3933 cidaw = 0;
3934 rq_for_each_segment(bv, req, iter) {
3935 if (bv.bv_len & (blksize - 1))
3936
3937 return ERR_PTR(-EINVAL);
3938 count += bv.bv_len >> (block->s2b_shift + 9);
3939 if (idal_is_needed (page_address(bv.bv_page), bv.bv_len))
3940 cidaw += bv.bv_len >> (block->s2b_shift + 9);
3941 }
3942
3943 if (count != last_rec - first_rec + 1)
3944 return ERR_PTR(-EINVAL);
3945
3946
3947 use_prefix = private->features.feature[8] & 0x01;
3948 if (use_prefix) {
3949
3950 cplength = 2 + count;
3951
3952 datasize = sizeof(struct PFX_eckd_data) +
3953 sizeof(struct LO_eckd_data) +
3954 cidaw * sizeof(unsigned long);
3955 } else {
3956
3957 cplength = 2 + count;
3958
3959 datasize = sizeof(struct DE_eckd_data) +
3960 sizeof(struct LO_eckd_data) +
3961 cidaw * sizeof(unsigned long);
3962 }
3963
3964 if (private->uses_cdl && first_rec < 2*blk_per_trk) {
3965 if (last_rec >= 2*blk_per_trk)
3966 count = 2*blk_per_trk - first_rec;
3967 cplength += count;
3968 datasize += count*sizeof(struct LO_eckd_data);
3969 }
3970
3971 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize,
3972 startdev, blk_mq_rq_to_pdu(req));
3973 if (IS_ERR(cqr))
3974 return cqr;
3975 ccw = cqr->cpaddr;
3976
3977 if (use_prefix) {
3978 if (prefix(ccw++, cqr->data, first_trk,
3979 last_trk, cmd, basedev, startdev) == -EAGAIN) {
3980
3981
3982
3983 dasd_sfree_request(cqr, startdev);
3984 return ERR_PTR(-EAGAIN);
3985 }
3986 idaws = (unsigned long *) (cqr->data +
3987 sizeof(struct PFX_eckd_data));
3988 } else {
3989 if (define_extent(ccw++, cqr->data, first_trk,
3990 last_trk, cmd, basedev, 0) == -EAGAIN) {
3991
3992
3993
3994 dasd_sfree_request(cqr, startdev);
3995 return ERR_PTR(-EAGAIN);
3996 }
3997 idaws = (unsigned long *) (cqr->data +
3998 sizeof(struct DE_eckd_data));
3999 }
4000
4001 LO_data = (struct LO_eckd_data *) (idaws + cidaw);
4002 recid = first_rec;
4003 if (private->uses_cdl == 0 || recid > 2*blk_per_trk) {
4004
4005 ccw[-1].flags |= CCW_FLAG_CC;
4006 locate_record(ccw++, LO_data++, first_trk, first_offs + 1,
4007 last_rec - recid + 1, cmd, basedev, blksize);
4008 }
4009 rq_for_each_segment(bv, req, iter) {
4010 dst = bvec_virt(&bv);
4011 if (dasd_page_cache) {
4012 char *copy = kmem_cache_alloc(dasd_page_cache,
4013 GFP_DMA | __GFP_NOWARN);
4014 if (copy && rq_data_dir(req) == WRITE)
4015 memcpy(copy + bv.bv_offset, dst, bv.bv_len);
4016 if (copy)
4017 dst = copy + bv.bv_offset;
4018 }
4019 for (off = 0; off < bv.bv_len; off += blksize) {
4020 sector_t trkid = recid;
4021 unsigned int recoffs = sector_div(trkid, blk_per_trk);
4022 rcmd = cmd;
4023 count = blksize;
4024
4025 if (private->uses_cdl && recid < 2*blk_per_trk) {
4026 if (dasd_eckd_cdl_special(blk_per_trk, recid)){
4027 rcmd |= 0x8;
4028 count = dasd_eckd_cdl_reclen(recid);
4029 if (count < blksize &&
4030 rq_data_dir(req) == READ)
4031 memset(dst + count, 0xe5,
4032 blksize - count);
4033 }
4034 ccw[-1].flags |= CCW_FLAG_CC;
4035 locate_record(ccw++, LO_data++,
4036 trkid, recoffs + 1,
4037 1, rcmd, basedev, count);
4038 }
4039
4040 if (private->uses_cdl && recid == 2*blk_per_trk) {
4041 ccw[-1].flags |= CCW_FLAG_CC;
4042 locate_record(ccw++, LO_data++,
4043 trkid, recoffs + 1,
4044 last_rec - recid + 1,
4045 cmd, basedev, count);
4046 }
4047
4048 ccw[-1].flags |= CCW_FLAG_CC;
4049 ccw->cmd_code = rcmd;
4050 ccw->count = count;
4051 if (idal_is_needed(dst, blksize)) {
4052 ccw->cda = (__u32)(addr_t) idaws;
4053 ccw->flags = CCW_FLAG_IDA;
4054 idaws = idal_create_words(idaws, dst, blksize);
4055 } else {
4056 ccw->cda = (__u32)(addr_t) dst;
4057 ccw->flags = 0;
4058 }
4059 ccw++;
4060 dst += blksize;
4061 recid++;
4062 }
4063 }
4064 if (blk_noretry_request(req) ||
4065 block->base->features & DASD_FEATURE_FAILFAST)
4066 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
4067 cqr->startdev = startdev;
4068 cqr->memdev = startdev;
4069 cqr->block = block;
4070 cqr->expires = startdev->default_expires * HZ;
4071 cqr->lpm = dasd_path_get_ppm(startdev);
4072 cqr->retries = startdev->default_retries;
4073 cqr->buildclk = get_tod_clock();
4074 cqr->status = DASD_CQR_FILLED;
4075
4076
4077 if (dasd_eckd_is_ese(basedev)) {
4078 set_bit(DASD_CQR_SUPPRESS_FP, &cqr->flags);
4079 set_bit(DASD_CQR_SUPPRESS_IL, &cqr->flags);
4080 set_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags);
4081 }
4082
4083 return cqr;
4084 }
4085
4086 static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_track(
4087 struct dasd_device *startdev,
4088 struct dasd_block *block,
4089 struct request *req,
4090 sector_t first_rec,
4091 sector_t last_rec,
4092 sector_t first_trk,
4093 sector_t last_trk,
4094 unsigned int first_offs,
4095 unsigned int last_offs,
4096 unsigned int blk_per_trk,
4097 unsigned int blksize)
4098 {
4099 unsigned long *idaws;
4100 struct dasd_ccw_req *cqr;
4101 struct ccw1 *ccw;
4102 struct req_iterator iter;
4103 struct bio_vec bv;
4104 char *dst, *idaw_dst;
4105 unsigned int cidaw, cplength, datasize;
4106 unsigned int tlf;
4107 sector_t recid;
4108 unsigned char cmd;
4109 struct dasd_device *basedev;
4110 unsigned int trkcount, count, count_to_trk_end;
4111 unsigned int idaw_len, seg_len, part_len, len_to_track_end;
4112 unsigned char new_track, end_idaw;
4113 sector_t trkid;
4114 unsigned int recoffs;
4115
4116 basedev = block->base;
4117 if (rq_data_dir(req) == READ)
4118 cmd = DASD_ECKD_CCW_READ_TRACK_DATA;
4119 else if (rq_data_dir(req) == WRITE)
4120 cmd = DASD_ECKD_CCW_WRITE_TRACK_DATA;
4121 else
4122 return ERR_PTR(-EINVAL);
4123
4124
4125
4126
4127
4128
4129 cidaw = last_rec - first_rec + 1;
4130 trkcount = last_trk - first_trk + 1;
4131
4132
4133 cplength = 1 + trkcount;
4134
4135 datasize = sizeof(struct PFX_eckd_data) + cidaw * sizeof(unsigned long);
4136
4137
4138 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize,
4139 startdev, blk_mq_rq_to_pdu(req));
4140 if (IS_ERR(cqr))
4141 return cqr;
4142 ccw = cqr->cpaddr;
4143
4144 if (first_trk == last_trk)
4145 tlf = last_offs - first_offs + 1;
4146 else
4147 tlf = last_offs + 1;
4148 tlf *= blksize;
4149
4150 if (prefix_LRE(ccw++, cqr->data, first_trk,
4151 last_trk, cmd, basedev, startdev,
4152 1 , first_offs + 1,
4153 trkcount, blksize,
4154 tlf) == -EAGAIN) {
4155
4156
4157
4158 dasd_sfree_request(cqr, startdev);
4159 return ERR_PTR(-EAGAIN);
4160 }
4161
4162
4163
4164
4165
4166
4167
4168
4169 idaws = (unsigned long *) (cqr->data + sizeof(struct PFX_eckd_data));
4170 recid = first_rec;
4171 new_track = 1;
4172 end_idaw = 0;
4173 len_to_track_end = 0;
4174 idaw_dst = NULL;
4175 idaw_len = 0;
4176 rq_for_each_segment(bv, req, iter) {
4177 dst = bvec_virt(&bv);
4178 seg_len = bv.bv_len;
4179 while (seg_len) {
4180 if (new_track) {
4181 trkid = recid;
4182 recoffs = sector_div(trkid, blk_per_trk);
4183 count_to_trk_end = blk_per_trk - recoffs;
4184 count = min((last_rec - recid + 1),
4185 (sector_t)count_to_trk_end);
4186 len_to_track_end = count * blksize;
4187 ccw[-1].flags |= CCW_FLAG_CC;
4188 ccw->cmd_code = cmd;
4189 ccw->count = len_to_track_end;
4190 ccw->cda = (__u32)(addr_t)idaws;
4191 ccw->flags = CCW_FLAG_IDA;
4192 ccw++;
4193 recid += count;
4194 new_track = 0;
4195
4196 if (!idaw_dst)
4197 idaw_dst = dst;
4198 }
4199
4200
4201
4202
4203
4204
4205 if (!idaw_dst) {
4206 if (__pa(dst) & (IDA_BLOCK_SIZE-1)) {
4207 dasd_sfree_request(cqr, startdev);
4208 return ERR_PTR(-ERANGE);
4209 } else
4210 idaw_dst = dst;
4211 }
4212 if ((idaw_dst + idaw_len) != dst) {
4213 dasd_sfree_request(cqr, startdev);
4214 return ERR_PTR(-ERANGE);
4215 }
4216 part_len = min(seg_len, len_to_track_end);
4217 seg_len -= part_len;
4218 dst += part_len;
4219 idaw_len += part_len;
4220 len_to_track_end -= part_len;
4221
4222
4223
4224
4225
4226 if (!(__pa(idaw_dst + idaw_len) & (IDA_BLOCK_SIZE-1)))
4227 end_idaw = 1;
4228
4229 if (!len_to_track_end) {
4230 new_track = 1;
4231 end_idaw = 1;
4232 }
4233 if (end_idaw) {
4234 idaws = idal_create_words(idaws, idaw_dst,
4235 idaw_len);
4236 idaw_dst = NULL;
4237 idaw_len = 0;
4238 end_idaw = 0;
4239 }
4240 }
4241 }
4242
4243 if (blk_noretry_request(req) ||
4244 block->base->features & DASD_FEATURE_FAILFAST)
4245 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
4246 cqr->startdev = startdev;
4247 cqr->memdev = startdev;
4248 cqr->block = block;
4249 cqr->expires = startdev->default_expires * HZ;
4250 cqr->lpm = dasd_path_get_ppm(startdev);
4251 cqr->retries = startdev->default_retries;
4252 cqr->buildclk = get_tod_clock();
4253 cqr->status = DASD_CQR_FILLED;
4254
4255
4256 if (dasd_eckd_is_ese(basedev))
4257 set_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags);
4258
4259 return cqr;
4260 }
4261
4262 static int prepare_itcw(struct itcw *itcw,
4263 unsigned int trk, unsigned int totrk, int cmd,
4264 struct dasd_device *basedev,
4265 struct dasd_device *startdev,
4266 unsigned int rec_on_trk, int count,
4267 unsigned int blksize,
4268 unsigned int total_data_size,
4269 unsigned int tlf,
4270 unsigned int blk_per_trk)
4271 {
4272 struct PFX_eckd_data pfxdata;
4273 struct dasd_eckd_private *basepriv, *startpriv;
4274 struct DE_eckd_data *dedata;
4275 struct LRE_eckd_data *lredata;
4276 struct dcw *dcw;
4277
4278 u32 begcyl, endcyl;
4279 u16 heads, beghead, endhead;
4280 u8 pfx_cmd;
4281
4282 int rc = 0;
4283 int sector = 0;
4284 int dn, d;
4285
4286
4287
4288 basepriv = basedev->private;
4289 startpriv = startdev->private;
4290 dedata = &pfxdata.define_extent;
4291 lredata = &pfxdata.locate_record;
4292
4293 memset(&pfxdata, 0, sizeof(pfxdata));
4294 pfxdata.format = 1;
4295 pfxdata.base_address = basepriv->conf.ned->unit_addr;
4296 pfxdata.base_lss = basepriv->conf.ned->ID;
4297 pfxdata.validity.define_extent = 1;
4298
4299
4300 if (startpriv->uid.type == UA_BASE_PAV_ALIAS)
4301 pfxdata.validity.verify_base = 1;
4302
4303 if (startpriv->uid.type == UA_HYPER_PAV_ALIAS) {
4304 pfxdata.validity.verify_base = 1;
4305 pfxdata.validity.hyper_pav = 1;
4306 }
4307
4308 switch (cmd) {
4309 case DASD_ECKD_CCW_READ_TRACK_DATA:
4310 dedata->mask.perm = 0x1;
4311 dedata->attributes.operation = basepriv->attrib.operation;
4312 dedata->blk_size = blksize;
4313 dedata->ga_extended |= 0x42;
4314 lredata->operation.orientation = 0x0;
4315 lredata->operation.operation = 0x0C;
4316 lredata->auxiliary.check_bytes = 0x01;
4317 pfx_cmd = DASD_ECKD_CCW_PFX_READ;
4318 break;
4319 case DASD_ECKD_CCW_WRITE_TRACK_DATA:
4320 dedata->mask.perm = 0x02;
4321 dedata->attributes.operation = basepriv->attrib.operation;
4322 dedata->blk_size = blksize;
4323 rc = set_timestamp(NULL, dedata, basedev);
4324 dedata->ga_extended |= 0x42;
4325 lredata->operation.orientation = 0x0;
4326 lredata->operation.operation = 0x3F;
4327 lredata->extended_operation = 0x23;
4328 lredata->auxiliary.check_bytes = 0x2;
4329
4330
4331
4332
4333
4334 if (dedata->ga_extended & 0x08 && dedata->ga_extended & 0x02)
4335 pfxdata.validity.time_stamp = 1;
4336 pfx_cmd = DASD_ECKD_CCW_PFX;
4337 break;
4338 case DASD_ECKD_CCW_READ_COUNT_MT:
4339 dedata->mask.perm = 0x1;
4340 dedata->attributes.operation = DASD_BYPASS_CACHE;
4341 dedata->ga_extended |= 0x42;
4342 dedata->blk_size = blksize;
4343 lredata->operation.orientation = 0x2;
4344 lredata->operation.operation = 0x16;
4345 lredata->auxiliary.check_bytes = 0x01;
4346 pfx_cmd = DASD_ECKD_CCW_PFX_READ;
4347 break;
4348 default:
4349 DBF_DEV_EVENT(DBF_ERR, basedev,
4350 "prepare itcw, unknown opcode 0x%x", cmd);
4351 BUG();
4352 break;
4353 }
4354 if (rc)
4355 return rc;
4356
4357 dedata->attributes.mode = 0x3;
4358
4359 heads = basepriv->rdc_data.trk_per_cyl;
4360 begcyl = trk / heads;
4361 beghead = trk % heads;
4362 endcyl = totrk / heads;
4363 endhead = totrk % heads;
4364
4365
4366 if (dedata->attributes.operation == DASD_SEQ_PRESTAGE ||
4367 dedata->attributes.operation == DASD_SEQ_ACCESS) {
4368
4369 if (endcyl + basepriv->attrib.nr_cyl < basepriv->real_cyl)
4370 endcyl += basepriv->attrib.nr_cyl;
4371 else
4372 endcyl = (basepriv->real_cyl - 1);
4373 }
4374
4375 set_ch_t(&dedata->beg_ext, begcyl, beghead);
4376 set_ch_t(&dedata->end_ext, endcyl, endhead);
4377
4378 dedata->ep_format = 0x20;
4379 dedata->ep_rec_per_track = blk_per_trk;
4380
4381 if (rec_on_trk) {
4382 switch (basepriv->rdc_data.dev_type) {
4383 case 0x3390:
4384 dn = ceil_quot(blksize + 6, 232);
4385 d = 9 + ceil_quot(blksize + 6 * (dn + 1), 34);
4386 sector = (49 + (rec_on_trk - 1) * (10 + d)) / 8;
4387 break;
4388 case 0x3380:
4389 d = 7 + ceil_quot(blksize + 12, 32);
4390 sector = (39 + (rec_on_trk - 1) * (8 + d)) / 7;
4391 break;
4392 }
4393 }
4394
4395 if (cmd == DASD_ECKD_CCW_READ_COUNT_MT) {
4396 lredata->auxiliary.length_valid = 0;
4397 lredata->auxiliary.length_scope = 0;
4398 lredata->sector = 0xff;
4399 } else {
4400 lredata->auxiliary.length_valid = 1;
4401 lredata->auxiliary.length_scope = 1;
4402 lredata->sector = sector;
4403 }
4404 lredata->auxiliary.imbedded_ccw_valid = 1;
4405 lredata->length = tlf;
4406 lredata->imbedded_ccw = cmd;
4407 lredata->count = count;
4408 set_ch_t(&lredata->seek_addr, begcyl, beghead);
4409 lredata->search_arg.cyl = lredata->seek_addr.cyl;
4410 lredata->search_arg.head = lredata->seek_addr.head;
4411 lredata->search_arg.record = rec_on_trk;
4412
4413 dcw = itcw_add_dcw(itcw, pfx_cmd, 0,
4414 &pfxdata, sizeof(pfxdata), total_data_size);
4415 return PTR_ERR_OR_ZERO(dcw);
4416 }
4417
4418 static struct dasd_ccw_req *dasd_eckd_build_cp_tpm_track(
4419 struct dasd_device *startdev,
4420 struct dasd_block *block,
4421 struct request *req,
4422 sector_t first_rec,
4423 sector_t last_rec,
4424 sector_t first_trk,
4425 sector_t last_trk,
4426 unsigned int first_offs,
4427 unsigned int last_offs,
4428 unsigned int blk_per_trk,
4429 unsigned int blksize)
4430 {
4431 struct dasd_ccw_req *cqr;
4432 struct req_iterator iter;
4433 struct bio_vec bv;
4434 char *dst;
4435 unsigned int trkcount, ctidaw;
4436 unsigned char cmd;
4437 struct dasd_device *basedev;
4438 unsigned int tlf;
4439 struct itcw *itcw;
4440 struct tidaw *last_tidaw = NULL;
4441 int itcw_op;
4442 size_t itcw_size;
4443 u8 tidaw_flags;
4444 unsigned int seg_len, part_len, len_to_track_end;
4445 unsigned char new_track;
4446 sector_t recid, trkid;
4447 unsigned int offs;
4448 unsigned int count, count_to_trk_end;
4449 int ret;
4450
4451 basedev = block->base;
4452 if (rq_data_dir(req) == READ) {
4453 cmd = DASD_ECKD_CCW_READ_TRACK_DATA;
4454 itcw_op = ITCW_OP_READ;
4455 } else if (rq_data_dir(req) == WRITE) {
4456 cmd = DASD_ECKD_CCW_WRITE_TRACK_DATA;
4457 itcw_op = ITCW_OP_WRITE;
4458 } else
4459 return ERR_PTR(-EINVAL);
4460
4461
4462
4463
4464
4465
4466
4467 trkcount = last_trk - first_trk + 1;
4468 ctidaw = 0;
4469 rq_for_each_segment(bv, req, iter) {
4470 ++ctidaw;
4471 }
4472 if (rq_data_dir(req) == WRITE)
4473 ctidaw += (last_trk - first_trk);
4474
4475
4476 itcw_size = itcw_calc_size(0, ctidaw, 0);
4477 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 0, itcw_size, startdev,
4478 blk_mq_rq_to_pdu(req));
4479 if (IS_ERR(cqr))
4480 return cqr;
4481
4482
4483 if (first_trk == last_trk)
4484 tlf = last_offs - first_offs + 1;
4485 else
4486 tlf = last_offs + 1;
4487 tlf *= blksize;
4488
4489 itcw = itcw_init(cqr->data, itcw_size, itcw_op, 0, ctidaw, 0);
4490 if (IS_ERR(itcw)) {
4491 ret = -EINVAL;
4492 goto out_error;
4493 }
4494 cqr->cpaddr = itcw_get_tcw(itcw);
4495 if (prepare_itcw(itcw, first_trk, last_trk,
4496 cmd, basedev, startdev,
4497 first_offs + 1,
4498 trkcount, blksize,
4499 (last_rec - first_rec + 1) * blksize,
4500 tlf, blk_per_trk) == -EAGAIN) {
4501
4502
4503
4504 ret = -EAGAIN;
4505 goto out_error;
4506 }
4507 len_to_track_end = 0;
4508
4509
4510
4511
4512
4513
4514
4515
4516 if (rq_data_dir(req) == WRITE) {
4517 new_track = 1;
4518 recid = first_rec;
4519 rq_for_each_segment(bv, req, iter) {
4520 dst = bvec_virt(&bv);
4521 seg_len = bv.bv_len;
4522 while (seg_len) {
4523 if (new_track) {
4524 trkid = recid;
4525 offs = sector_div(trkid, blk_per_trk);
4526 count_to_trk_end = blk_per_trk - offs;
4527 count = min((last_rec - recid + 1),
4528 (sector_t)count_to_trk_end);
4529 len_to_track_end = count * blksize;
4530 recid += count;
4531 new_track = 0;
4532 }
4533 part_len = min(seg_len, len_to_track_end);
4534 seg_len -= part_len;
4535 len_to_track_end -= part_len;
4536
4537 if (!len_to_track_end) {
4538 new_track = 1;
4539 tidaw_flags = TIDAW_FLAGS_INSERT_CBC;
4540 } else
4541 tidaw_flags = 0;
4542 last_tidaw = itcw_add_tidaw(itcw, tidaw_flags,
4543 dst, part_len);
4544 if (IS_ERR(last_tidaw)) {
4545 ret = -EINVAL;
4546 goto out_error;
4547 }
4548 dst += part_len;
4549 }
4550 }
4551 } else {
4552 rq_for_each_segment(bv, req, iter) {
4553 dst = bvec_virt(&bv);
4554 last_tidaw = itcw_add_tidaw(itcw, 0x00,
4555 dst, bv.bv_len);
4556 if (IS_ERR(last_tidaw)) {
4557 ret = -EINVAL;
4558 goto out_error;
4559 }
4560 }
4561 }
4562 last_tidaw->flags |= TIDAW_FLAGS_LAST;
4563 last_tidaw->flags &= ~TIDAW_FLAGS_INSERT_CBC;
4564 itcw_finalize(itcw);
4565
4566 if (blk_noretry_request(req) ||
4567 block->base->features & DASD_FEATURE_FAILFAST)
4568 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
4569 cqr->cpmode = 1;
4570 cqr->startdev = startdev;
4571 cqr->memdev = startdev;
4572 cqr->block = block;
4573 cqr->expires = startdev->default_expires * HZ;
4574 cqr->lpm = dasd_path_get_ppm(startdev);
4575 cqr->retries = startdev->default_retries;
4576 cqr->buildclk = get_tod_clock();
4577 cqr->status = DASD_CQR_FILLED;
4578
4579
4580 if (dasd_eckd_is_ese(basedev)) {
4581 set_bit(DASD_CQR_SUPPRESS_FP, &cqr->flags);
4582 set_bit(DASD_CQR_SUPPRESS_IL, &cqr->flags);
4583 set_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags);
4584 }
4585
4586 return cqr;
4587 out_error:
4588 dasd_sfree_request(cqr, startdev);
4589 return ERR_PTR(ret);
4590 }
4591
4592 static struct dasd_ccw_req *dasd_eckd_build_cp(struct dasd_device *startdev,
4593 struct dasd_block *block,
4594 struct request *req)
4595 {
4596 int cmdrtd, cmdwtd;
4597 int use_prefix;
4598 int fcx_multitrack;
4599 struct dasd_eckd_private *private;
4600 struct dasd_device *basedev;
4601 sector_t first_rec, last_rec;
4602 sector_t first_trk, last_trk;
4603 unsigned int first_offs, last_offs;
4604 unsigned int blk_per_trk, blksize;
4605 int cdlspecial;
4606 unsigned int data_size;
4607 struct dasd_ccw_req *cqr;
4608
4609 basedev = block->base;
4610 private = basedev->private;
4611
4612
4613 blksize = block->bp_block;
4614 blk_per_trk = recs_per_track(&private->rdc_data, 0, blksize);
4615 if (blk_per_trk == 0)
4616 return ERR_PTR(-EINVAL);
4617
4618 first_rec = first_trk = blk_rq_pos(req) >> block->s2b_shift;
4619 first_offs = sector_div(first_trk, blk_per_trk);
4620 last_rec = last_trk =
4621 (blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift;
4622 last_offs = sector_div(last_trk, blk_per_trk);
4623 cdlspecial = (private->uses_cdl && first_rec < 2*blk_per_trk);
4624
4625 fcx_multitrack = private->features.feature[40] & 0x20;
4626 data_size = blk_rq_bytes(req);
4627 if (data_size % blksize)
4628 return ERR_PTR(-EINVAL);
4629
4630 if (rq_data_dir(req) == WRITE)
4631 data_size += (last_trk - first_trk) * 4;
4632
4633
4634 cmdrtd = private->features.feature[9] & 0x20;
4635 cmdwtd = private->features.feature[12] & 0x40;
4636 use_prefix = private->features.feature[8] & 0x01;
4637
4638 cqr = NULL;
4639 if (cdlspecial || dasd_page_cache) {
4640
4641 } else if ((data_size <= private->fcx_max_data)
4642 && (fcx_multitrack || (first_trk == last_trk))) {
4643 cqr = dasd_eckd_build_cp_tpm_track(startdev, block, req,
4644 first_rec, last_rec,
4645 first_trk, last_trk,
4646 first_offs, last_offs,
4647 blk_per_trk, blksize);
4648 if (IS_ERR(cqr) && (PTR_ERR(cqr) != -EAGAIN) &&
4649 (PTR_ERR(cqr) != -ENOMEM))
4650 cqr = NULL;
4651 } else if (use_prefix &&
4652 (((rq_data_dir(req) == READ) && cmdrtd) ||
4653 ((rq_data_dir(req) == WRITE) && cmdwtd))) {
4654 cqr = dasd_eckd_build_cp_cmd_track(startdev, block, req,
4655 first_rec, last_rec,
4656 first_trk, last_trk,
4657 first_offs, last_offs,
4658 blk_per_trk, blksize);
4659 if (IS_ERR(cqr) && (PTR_ERR(cqr) != -EAGAIN) &&
4660 (PTR_ERR(cqr) != -ENOMEM))
4661 cqr = NULL;
4662 }
4663 if (!cqr)
4664 cqr = dasd_eckd_build_cp_cmd_single(startdev, block, req,
4665 first_rec, last_rec,
4666 first_trk, last_trk,
4667 first_offs, last_offs,
4668 blk_per_trk, blksize);
4669 return cqr;
4670 }
4671
4672 static struct dasd_ccw_req *dasd_eckd_build_cp_raw(struct dasd_device *startdev,
4673 struct dasd_block *block,
4674 struct request *req)
4675 {
4676 sector_t start_padding_sectors, end_sector_offset, end_padding_sectors;
4677 unsigned int seg_len, len_to_track_end;
4678 unsigned int cidaw, cplength, datasize;
4679 sector_t first_trk, last_trk, sectors;
4680 struct dasd_eckd_private *base_priv;
4681 struct dasd_device *basedev;
4682 struct req_iterator iter;
4683 struct dasd_ccw_req *cqr;
4684 unsigned int first_offs;
4685 unsigned int trkcount;
4686 unsigned long *idaws;
4687 unsigned int size;
4688 unsigned char cmd;
4689 struct bio_vec bv;
4690 struct ccw1 *ccw;
4691 int use_prefix;
4692 void *data;
4693 char *dst;
4694
4695
4696
4697
4698
4699
4700 start_padding_sectors = blk_rq_pos(req) % DASD_RAW_SECTORS_PER_TRACK;
4701 end_sector_offset = (blk_rq_pos(req) + blk_rq_sectors(req)) %
4702 DASD_RAW_SECTORS_PER_TRACK;
4703 end_padding_sectors = (DASD_RAW_SECTORS_PER_TRACK - end_sector_offset) %
4704 DASD_RAW_SECTORS_PER_TRACK;
4705 basedev = block->base;
4706 if ((start_padding_sectors || end_padding_sectors) &&
4707 (rq_data_dir(req) == WRITE)) {
4708 DBF_DEV_EVENT(DBF_ERR, basedev,
4709 "raw write not track aligned (%llu,%llu) req %p",
4710 start_padding_sectors, end_padding_sectors, req);
4711 return ERR_PTR(-EINVAL);
4712 }
4713
4714 first_trk = blk_rq_pos(req) / DASD_RAW_SECTORS_PER_TRACK;
4715 last_trk = (blk_rq_pos(req) + blk_rq_sectors(req) - 1) /
4716 DASD_RAW_SECTORS_PER_TRACK;
4717 trkcount = last_trk - first_trk + 1;
4718 first_offs = 0;
4719
4720 if (rq_data_dir(req) == READ)
4721 cmd = DASD_ECKD_CCW_READ_TRACK;
4722 else if (rq_data_dir(req) == WRITE)
4723 cmd = DASD_ECKD_CCW_WRITE_FULL_TRACK;
4724 else
4725 return ERR_PTR(-EINVAL);
4726
4727
4728
4729
4730
4731 cidaw = trkcount * DASD_RAW_BLOCK_PER_TRACK;
4732
4733
4734
4735
4736
4737 base_priv = basedev->private;
4738 use_prefix = base_priv->features.feature[8] & 0x01;
4739 if (use_prefix) {
4740 cplength = 1 + trkcount;
4741 size = sizeof(struct PFX_eckd_data) + 2;
4742 } else {
4743 cplength = 2 + trkcount;
4744 size = sizeof(struct DE_eckd_data) +
4745 sizeof(struct LRE_eckd_data) + 2;
4746 }
4747 size = ALIGN(size, 8);
4748
4749 datasize = size + cidaw * sizeof(unsigned long);
4750
4751
4752 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength,
4753 datasize, startdev, blk_mq_rq_to_pdu(req));
4754 if (IS_ERR(cqr))
4755 return cqr;
4756
4757 ccw = cqr->cpaddr;
4758 data = cqr->data;
4759
4760 if (use_prefix) {
4761 prefix_LRE(ccw++, data, first_trk, last_trk, cmd, basedev,
4762 startdev, 1, first_offs + 1, trkcount, 0, 0);
4763 } else {
4764 define_extent(ccw++, data, first_trk, last_trk, cmd, basedev, 0);
4765 ccw[-1].flags |= CCW_FLAG_CC;
4766
4767 data += sizeof(struct DE_eckd_data);
4768 locate_record_ext(ccw++, data, first_trk, first_offs + 1,
4769 trkcount, cmd, basedev, 0, 0);
4770 }
4771
4772 idaws = (unsigned long *)(cqr->data + size);
4773 len_to_track_end = 0;
4774 if (start_padding_sectors) {
4775 ccw[-1].flags |= CCW_FLAG_CC;
4776 ccw->cmd_code = cmd;
4777
4778 ccw->count = 57326;
4779
4780 len_to_track_end = 65536 - start_padding_sectors * 512;
4781 ccw->cda = (__u32)(addr_t)idaws;
4782 ccw->flags |= CCW_FLAG_IDA;
4783 ccw->flags |= CCW_FLAG_SLI;
4784 ccw++;
4785 for (sectors = 0; sectors < start_padding_sectors; sectors += 8)
4786 idaws = idal_create_words(idaws, rawpadpage, PAGE_SIZE);
4787 }
4788 rq_for_each_segment(bv, req, iter) {
4789 dst = bvec_virt(&bv);
4790 seg_len = bv.bv_len;
4791 if (cmd == DASD_ECKD_CCW_READ_TRACK)
4792 memset(dst, 0, seg_len);
4793 if (!len_to_track_end) {
4794 ccw[-1].flags |= CCW_FLAG_CC;
4795 ccw->cmd_code = cmd;
4796
4797 ccw->count = 57326;
4798
4799 len_to_track_end = 65536;
4800 ccw->cda = (__u32)(addr_t)idaws;
4801 ccw->flags |= CCW_FLAG_IDA;
4802 ccw->flags |= CCW_FLAG_SLI;
4803 ccw++;
4804 }
4805 len_to_track_end -= seg_len;
4806 idaws = idal_create_words(idaws, dst, seg_len);
4807 }
4808 for (sectors = 0; sectors < end_padding_sectors; sectors += 8)
4809 idaws = idal_create_words(idaws, rawpadpage, PAGE_SIZE);
4810 if (blk_noretry_request(req) ||
4811 block->base->features & DASD_FEATURE_FAILFAST)
4812 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
4813 cqr->startdev = startdev;
4814 cqr->memdev = startdev;
4815 cqr->block = block;
4816 cqr->expires = startdev->default_expires * HZ;
4817 cqr->lpm = dasd_path_get_ppm(startdev);
4818 cqr->retries = startdev->default_retries;
4819 cqr->buildclk = get_tod_clock();
4820 cqr->status = DASD_CQR_FILLED;
4821
4822 return cqr;
4823 }
4824
4825
4826 static int
4827 dasd_eckd_free_cp(struct dasd_ccw_req *cqr, struct request *req)
4828 {
4829 struct dasd_eckd_private *private;
4830 struct ccw1 *ccw;
4831 struct req_iterator iter;
4832 struct bio_vec bv;
4833 char *dst, *cda;
4834 unsigned int blksize, blk_per_trk, off;
4835 sector_t recid;
4836 int status;
4837
4838 if (!dasd_page_cache)
4839 goto out;
4840 private = cqr->block->base->private;
4841 blksize = cqr->block->bp_block;
4842 blk_per_trk = recs_per_track(&private->rdc_data, 0, blksize);
4843 recid = blk_rq_pos(req) >> cqr->block->s2b_shift;
4844 ccw = cqr->cpaddr;
4845
4846 ccw++;
4847 if (private->uses_cdl == 0 || recid > 2*blk_per_trk)
4848 ccw++;
4849 rq_for_each_segment(bv, req, iter) {
4850 dst = bvec_virt(&bv);
4851 for (off = 0; off < bv.bv_len; off += blksize) {
4852
4853 if (private->uses_cdl && recid <= 2*blk_per_trk)
4854 ccw++;
4855 if (dst) {
4856 if (ccw->flags & CCW_FLAG_IDA)
4857 cda = *((char **)((addr_t) ccw->cda));
4858 else
4859 cda = (char *)((addr_t) ccw->cda);
4860 if (dst != cda) {
4861 if (rq_data_dir(req) == READ)
4862 memcpy(dst, cda, bv.bv_len);
4863 kmem_cache_free(dasd_page_cache,
4864 (void *)((addr_t)cda & PAGE_MASK));
4865 }
4866 dst = NULL;
4867 }
4868 ccw++;
4869 recid++;
4870 }
4871 }
4872 out:
4873 status = cqr->status == DASD_CQR_DONE;
4874 dasd_sfree_request(cqr, cqr->memdev);
4875 return status;
4876 }
4877
4878
4879
4880
4881
4882
4883
4884
4885 void dasd_eckd_reset_ccw_to_base_io(struct dasd_ccw_req *cqr)
4886 {
4887 struct ccw1 *ccw;
4888 struct PFX_eckd_data *pfxdata;
4889 struct tcw *tcw;
4890 struct tccb *tccb;
4891 struct dcw *dcw;
4892
4893 if (cqr->cpmode == 1) {
4894 tcw = cqr->cpaddr;
4895 tccb = tcw_get_tccb(tcw);
4896 dcw = (struct dcw *)&tccb->tca[0];
4897 pfxdata = (struct PFX_eckd_data *)&dcw->cd[0];
4898 pfxdata->validity.verify_base = 0;
4899 pfxdata->validity.hyper_pav = 0;
4900 } else {
4901 ccw = cqr->cpaddr;
4902 pfxdata = cqr->data;
4903 if (ccw->cmd_code == DASD_ECKD_CCW_PFX) {
4904 pfxdata->validity.verify_base = 0;
4905 pfxdata->validity.hyper_pav = 0;
4906 }
4907 }
4908 }
4909
4910 #define DASD_ECKD_CHANQ_MAX_SIZE 4
4911
4912 static struct dasd_ccw_req *dasd_eckd_build_alias_cp(struct dasd_device *base,
4913 struct dasd_block *block,
4914 struct request *req)
4915 {
4916 struct dasd_eckd_private *private;
4917 struct dasd_device *startdev;
4918 unsigned long flags;
4919 struct dasd_ccw_req *cqr;
4920
4921 startdev = dasd_alias_get_start_dev(base);
4922 if (!startdev)
4923 startdev = base;
4924 private = startdev->private;
4925 if (private->count >= DASD_ECKD_CHANQ_MAX_SIZE)
4926 return ERR_PTR(-EBUSY);
4927
4928 spin_lock_irqsave(get_ccwdev_lock(startdev->cdev), flags);
4929 private->count++;
4930 if ((base->features & DASD_FEATURE_USERAW))
4931 cqr = dasd_eckd_build_cp_raw(startdev, block, req);
4932 else
4933 cqr = dasd_eckd_build_cp(startdev, block, req);
4934 if (IS_ERR(cqr))
4935 private->count--;
4936 spin_unlock_irqrestore(get_ccwdev_lock(startdev->cdev), flags);
4937 return cqr;
4938 }
4939
4940 static int dasd_eckd_free_alias_cp(struct dasd_ccw_req *cqr,
4941 struct request *req)
4942 {
4943 struct dasd_eckd_private *private;
4944 unsigned long flags;
4945
4946 spin_lock_irqsave(get_ccwdev_lock(cqr->memdev->cdev), flags);
4947 private = cqr->memdev->private;
4948 private->count--;
4949 spin_unlock_irqrestore(get_ccwdev_lock(cqr->memdev->cdev), flags);
4950 return dasd_eckd_free_cp(cqr, req);
4951 }
4952
4953 static int
4954 dasd_eckd_fill_info(struct dasd_device * device,
4955 struct dasd_information2_t * info)
4956 {
4957 struct dasd_eckd_private *private = device->private;
4958
4959 info->label_block = 2;
4960 info->FBA_layout = private->uses_cdl ? 0 : 1;
4961 info->format = private->uses_cdl ? DASD_FORMAT_CDL : DASD_FORMAT_LDL;
4962 info->characteristics_size = sizeof(private->rdc_data);
4963 memcpy(info->characteristics, &private->rdc_data,
4964 sizeof(private->rdc_data));
4965 info->confdata_size = min_t(unsigned long, private->conf.len,
4966 sizeof(info->configuration_data));
4967 memcpy(info->configuration_data, private->conf.data,
4968 info->confdata_size);
4969 return 0;
4970 }
4971
4972
4973
4974
4975
4976
4977
4978
4979
4980
4981 static int
4982 dasd_eckd_release(struct dasd_device *device)
4983 {
4984 struct dasd_ccw_req *cqr;
4985 int rc;
4986 struct ccw1 *ccw;
4987 int useglobal;
4988
4989 if (!capable(CAP_SYS_ADMIN))
4990 return -EACCES;
4991
4992 useglobal = 0;
4993 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device, NULL);
4994 if (IS_ERR(cqr)) {
4995 mutex_lock(&dasd_reserve_mutex);
4996 useglobal = 1;
4997 cqr = &dasd_reserve_req->cqr;
4998 memset(cqr, 0, sizeof(*cqr));
4999 memset(&dasd_reserve_req->ccw, 0,
5000 sizeof(dasd_reserve_req->ccw));
5001 cqr->cpaddr = &dasd_reserve_req->ccw;
5002 cqr->data = &dasd_reserve_req->data;
5003 cqr->magic = DASD_ECKD_MAGIC;
5004 }
5005 ccw = cqr->cpaddr;
5006 ccw->cmd_code = DASD_ECKD_CCW_RELEASE;
5007 ccw->flags |= CCW_FLAG_SLI;
5008 ccw->count = 32;
5009 ccw->cda = (__u32)(addr_t) cqr->data;
5010 cqr->startdev = device;
5011 cqr->memdev = device;
5012 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
5013 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
5014 cqr->retries = 2;
5015 cqr->expires = 2 * HZ;
5016 cqr->buildclk = get_tod_clock();
5017 cqr->status = DASD_CQR_FILLED;
5018
5019 rc = dasd_sleep_on_immediatly(cqr);
5020 if (!rc)
5021 clear_bit(DASD_FLAG_IS_RESERVED, &device->flags);
5022
5023 if (useglobal)
5024 mutex_unlock(&dasd_reserve_mutex);
5025 else
5026 dasd_sfree_request(cqr, cqr->memdev);
5027 return rc;
5028 }
5029
5030
5031
5032
5033
5034
5035
5036 static int
5037 dasd_eckd_reserve(struct dasd_device *device)
5038 {
5039 struct dasd_ccw_req *cqr;
5040 int rc;
5041 struct ccw1 *ccw;
5042 int useglobal;
5043
5044 if (!capable(CAP_SYS_ADMIN))
5045 return -EACCES;
5046
5047 useglobal = 0;
5048 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device, NULL);
5049 if (IS_ERR(cqr)) {
5050 mutex_lock(&dasd_reserve_mutex);
5051 useglobal = 1;
5052 cqr = &dasd_reserve_req->cqr;
5053 memset(cqr, 0, sizeof(*cqr));
5054 memset(&dasd_reserve_req->ccw, 0,
5055 sizeof(dasd_reserve_req->ccw));
5056 cqr->cpaddr = &dasd_reserve_req->ccw;
5057 cqr->data = &dasd_reserve_req->data;
5058 cqr->magic = DASD_ECKD_MAGIC;
5059 }
5060 ccw = cqr->cpaddr;
5061 ccw->cmd_code = DASD_ECKD_CCW_RESERVE;
5062 ccw->flags |= CCW_FLAG_SLI;
5063 ccw->count = 32;
5064 ccw->cda = (__u32)(addr_t) cqr->data;
5065 cqr->startdev = device;
5066 cqr->memdev = device;
5067 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
5068 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
5069 cqr->retries = 2;
5070 cqr->expires = 2 * HZ;
5071 cqr->buildclk = get_tod_clock();
5072 cqr->status = DASD_CQR_FILLED;
5073
5074 rc = dasd_sleep_on_immediatly(cqr);
5075 if (!rc)
5076 set_bit(DASD_FLAG_IS_RESERVED, &device->flags);
5077
5078 if (useglobal)
5079 mutex_unlock(&dasd_reserve_mutex);
5080 else
5081 dasd_sfree_request(cqr, cqr->memdev);
5082 return rc;
5083 }
5084
5085
5086
5087
5088
5089
5090 static int
5091 dasd_eckd_steal_lock(struct dasd_device *device)
5092 {
5093 struct dasd_ccw_req *cqr;
5094 int rc;
5095 struct ccw1 *ccw;
5096 int useglobal;
5097
5098 if (!capable(CAP_SYS_ADMIN))
5099 return -EACCES;
5100
5101 useglobal = 0;
5102 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device, NULL);
5103 if (IS_ERR(cqr)) {
5104 mutex_lock(&dasd_reserve_mutex);
5105 useglobal = 1;
5106 cqr = &dasd_reserve_req->cqr;
5107 memset(cqr, 0, sizeof(*cqr));
5108 memset(&dasd_reserve_req->ccw, 0,
5109 sizeof(dasd_reserve_req->ccw));
5110 cqr->cpaddr = &dasd_reserve_req->ccw;
5111 cqr->data = &dasd_reserve_req->data;
5112 cqr->magic = DASD_ECKD_MAGIC;
5113 }
5114 ccw = cqr->cpaddr;
5115 ccw->cmd_code = DASD_ECKD_CCW_SLCK;
5116 ccw->flags |= CCW_FLAG_SLI;
5117 ccw->count = 32;
5118 ccw->cda = (__u32)(addr_t) cqr->data;
5119 cqr->startdev = device;
5120 cqr->memdev = device;
5121 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
5122 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
5123 cqr->retries = 2;
5124 cqr->expires = 2 * HZ;
5125 cqr->buildclk = get_tod_clock();
5126 cqr->status = DASD_CQR_FILLED;
5127
5128 rc = dasd_sleep_on_immediatly(cqr);
5129 if (!rc)
5130 set_bit(DASD_FLAG_IS_RESERVED, &device->flags);
5131
5132 if (useglobal)
5133 mutex_unlock(&dasd_reserve_mutex);
5134 else
5135 dasd_sfree_request(cqr, cqr->memdev);
5136 return rc;
5137 }
5138
5139
5140
5141
5142
5143
5144
5145 static int dasd_eckd_snid(struct dasd_device *device,
5146 void __user *argp)
5147 {
5148 struct dasd_ccw_req *cqr;
5149 int rc;
5150 struct ccw1 *ccw;
5151 int useglobal;
5152 struct dasd_snid_ioctl_data usrparm;
5153
5154 if (!capable(CAP_SYS_ADMIN))
5155 return -EACCES;
5156
5157 if (copy_from_user(&usrparm, argp, sizeof(usrparm)))
5158 return -EFAULT;
5159
5160 useglobal = 0;
5161 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1,
5162 sizeof(struct dasd_snid_data), device,
5163 NULL);
5164 if (IS_ERR(cqr)) {
5165 mutex_lock(&dasd_reserve_mutex);
5166 useglobal = 1;
5167 cqr = &dasd_reserve_req->cqr;
5168 memset(cqr, 0, sizeof(*cqr));
5169 memset(&dasd_reserve_req->ccw, 0,
5170 sizeof(dasd_reserve_req->ccw));
5171 cqr->cpaddr = &dasd_reserve_req->ccw;
5172 cqr->data = &dasd_reserve_req->data;
5173 cqr->magic = DASD_ECKD_MAGIC;
5174 }
5175 ccw = cqr->cpaddr;
5176 ccw->cmd_code = DASD_ECKD_CCW_SNID;
5177 ccw->flags |= CCW_FLAG_SLI;
5178 ccw->count = 12;
5179 ccw->cda = (__u32)(addr_t) cqr->data;
5180 cqr->startdev = device;
5181 cqr->memdev = device;
5182 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
5183 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
5184 set_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags);
5185 cqr->retries = 5;
5186 cqr->expires = 10 * HZ;
5187 cqr->buildclk = get_tod_clock();
5188 cqr->status = DASD_CQR_FILLED;
5189 cqr->lpm = usrparm.path_mask;
5190
5191 rc = dasd_sleep_on_immediatly(cqr);
5192
5193 if (!rc && usrparm.path_mask && (cqr->lpm != usrparm.path_mask))
5194 rc = -EIO;
5195 if (!rc) {
5196 usrparm.data = *((struct dasd_snid_data *)cqr->data);
5197 if (copy_to_user(argp, &usrparm, sizeof(usrparm)))
5198 rc = -EFAULT;
5199 }
5200
5201 if (useglobal)
5202 mutex_unlock(&dasd_reserve_mutex);
5203 else
5204 dasd_sfree_request(cqr, cqr->memdev);
5205 return rc;
5206 }
5207
5208
5209
5210
5211 static int
5212 dasd_eckd_performance(struct dasd_device *device, void __user *argp)
5213 {
5214 struct dasd_psf_prssd_data *prssdp;
5215 struct dasd_rssd_perf_stats_t *stats;
5216 struct dasd_ccw_req *cqr;
5217 struct ccw1 *ccw;
5218 int rc;
5219
5220 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 + 1 ,
5221 (sizeof(struct dasd_psf_prssd_data) +
5222 sizeof(struct dasd_rssd_perf_stats_t)),
5223 device, NULL);
5224 if (IS_ERR(cqr)) {
5225 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
5226 "Could not allocate initialization request");
5227 return PTR_ERR(cqr);
5228 }
5229 cqr->startdev = device;
5230 cqr->memdev = device;
5231 cqr->retries = 0;
5232 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
5233 cqr->expires = 10 * HZ;
5234
5235
5236 prssdp = (struct dasd_psf_prssd_data *) cqr->data;
5237 memset(prssdp, 0, sizeof(struct dasd_psf_prssd_data));
5238 prssdp->order = PSF_ORDER_PRSSD;
5239 prssdp->suborder = 0x01;
5240 prssdp->varies[1] = 0x01;
5241
5242 ccw = cqr->cpaddr;
5243 ccw->cmd_code = DASD_ECKD_CCW_PSF;
5244 ccw->count = sizeof(struct dasd_psf_prssd_data);
5245 ccw->flags |= CCW_FLAG_CC;
5246 ccw->cda = (__u32)(addr_t) prssdp;
5247
5248
5249 stats = (struct dasd_rssd_perf_stats_t *) (prssdp + 1);
5250 memset(stats, 0, sizeof(struct dasd_rssd_perf_stats_t));
5251
5252 ccw++;
5253 ccw->cmd_code = DASD_ECKD_CCW_RSSD;
5254 ccw->count = sizeof(struct dasd_rssd_perf_stats_t);
5255 ccw->cda = (__u32)(addr_t) stats;
5256
5257 cqr->buildclk = get_tod_clock();
5258 cqr->status = DASD_CQR_FILLED;
5259 rc = dasd_sleep_on(cqr);
5260 if (rc == 0) {
5261 prssdp = (struct dasd_psf_prssd_data *) cqr->data;
5262 stats = (struct dasd_rssd_perf_stats_t *) (prssdp + 1);
5263 if (copy_to_user(argp, stats,
5264 sizeof(struct dasd_rssd_perf_stats_t)))
5265 rc = -EFAULT;
5266 }
5267 dasd_sfree_request(cqr, cqr->memdev);
5268 return rc;
5269 }
5270
5271
5272
5273
5274
5275 static int
5276 dasd_eckd_get_attrib(struct dasd_device *device, void __user *argp)
5277 {
5278 struct dasd_eckd_private *private = device->private;
5279 struct attrib_data_t attrib = private->attrib;
5280 int rc;
5281
5282 if (!capable(CAP_SYS_ADMIN))
5283 return -EACCES;
5284 if (!argp)
5285 return -EINVAL;
5286
5287 rc = 0;
5288 if (copy_to_user(argp, (long *) &attrib,
5289 sizeof(struct attrib_data_t)))
5290 rc = -EFAULT;
5291
5292 return rc;
5293 }
5294
5295
5296
5297
5298
5299 static int
5300 dasd_eckd_set_attrib(struct dasd_device *device, void __user *argp)
5301 {
5302 struct dasd_eckd_private *private = device->private;
5303 struct attrib_data_t attrib;
5304
5305 if (!capable(CAP_SYS_ADMIN))
5306 return -EACCES;
5307 if (!argp)
5308 return -EINVAL;
5309
5310 if (copy_from_user(&attrib, argp, sizeof(struct attrib_data_t)))
5311 return -EFAULT;
5312 private->attrib = attrib;
5313
5314 dev_info(&device->cdev->dev,
5315 "The DASD cache mode was set to %x (%i cylinder prestage)\n",
5316 private->attrib.operation, private->attrib.nr_cyl);
5317 return 0;
5318 }
5319
5320
5321
5322
5323
5324 static int dasd_symm_io(struct dasd_device *device, void __user *argp)
5325 {
5326 struct dasd_symmio_parms usrparm;
5327 char *psf_data, *rssd_result;
5328 struct dasd_ccw_req *cqr;
5329 struct ccw1 *ccw;
5330 char psf0, psf1;
5331 int rc;
5332
5333 if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RAWIO))
5334 return -EACCES;
5335 psf0 = psf1 = 0;
5336
5337
5338 rc = -EFAULT;
5339 if (copy_from_user(&usrparm, argp, sizeof(usrparm)))
5340 goto out;
5341 if (is_compat_task()) {
5342
5343 rc = -EINVAL;
5344 if ((usrparm.psf_data >> 32) != 0)
5345 goto out;
5346 if ((usrparm.rssd_result >> 32) != 0)
5347 goto out;
5348 usrparm.psf_data &= 0x7fffffffULL;
5349 usrparm.rssd_result &= 0x7fffffffULL;
5350 }
5351
5352 if (usrparm.psf_data_len < 2) {
5353 DBF_DEV_EVENT(DBF_WARNING, device,
5354 "Symmetrix ioctl invalid data length %d",
5355 usrparm.psf_data_len);
5356 rc = -EINVAL;
5357 goto out;
5358 }
5359
5360 psf_data = kzalloc(usrparm.psf_data_len, GFP_KERNEL | GFP_DMA);
5361 rssd_result = kzalloc(usrparm.rssd_result_len, GFP_KERNEL | GFP_DMA);
5362 if (!psf_data || !rssd_result) {
5363 rc = -ENOMEM;
5364 goto out_free;
5365 }
5366
5367
5368 rc = -EFAULT;
5369 if (copy_from_user(psf_data,
5370 (void __user *)(unsigned long) usrparm.psf_data,
5371 usrparm.psf_data_len))
5372 goto out_free;
5373 psf0 = psf_data[0];
5374 psf1 = psf_data[1];
5375
5376
5377 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 2, 0, device, NULL);
5378 if (IS_ERR(cqr)) {
5379 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
5380 "Could not allocate initialization request");
5381 rc = PTR_ERR(cqr);
5382 goto out_free;
5383 }
5384
5385 cqr->startdev = device;
5386 cqr->memdev = device;
5387 cqr->retries = 3;
5388 cqr->expires = 10 * HZ;
5389 cqr->buildclk = get_tod_clock();
5390 cqr->status = DASD_CQR_FILLED;
5391
5392
5393 ccw = cqr->cpaddr;
5394
5395
5396 ccw->cmd_code = DASD_ECKD_CCW_PSF;
5397 ccw->count = usrparm.psf_data_len;
5398 ccw->flags |= CCW_FLAG_CC;
5399 ccw->cda = (__u32)(addr_t) psf_data;
5400
5401 ccw++;
5402
5403
5404 ccw->cmd_code = DASD_ECKD_CCW_RSSD;
5405 ccw->count = usrparm.rssd_result_len;
5406 ccw->flags = CCW_FLAG_SLI ;
5407 ccw->cda = (__u32)(addr_t) rssd_result;
5408
5409 rc = dasd_sleep_on(cqr);
5410 if (rc)
5411 goto out_sfree;
5412
5413 rc = -EFAULT;
5414 if (copy_to_user((void __user *)(unsigned long) usrparm.rssd_result,
5415 rssd_result, usrparm.rssd_result_len))
5416 goto out_sfree;
5417 rc = 0;
5418
5419 out_sfree:
5420 dasd_sfree_request(cqr, cqr->memdev);
5421 out_free:
5422 kfree(rssd_result);
5423 kfree(psf_data);
5424 out:
5425 DBF_DEV_EVENT(DBF_WARNING, device,
5426 "Symmetrix ioctl (0x%02x 0x%02x): rc=%d",
5427 (int) psf0, (int) psf1, rc);
5428 return rc;
5429 }
5430
5431 static int
5432 dasd_eckd_ioctl(struct dasd_block *block, unsigned int cmd, void __user *argp)
5433 {
5434 struct dasd_device *device = block->base;
5435
5436 switch (cmd) {
5437 case BIODASDGATTR:
5438 return dasd_eckd_get_attrib(device, argp);
5439 case BIODASDSATTR:
5440 return dasd_eckd_set_attrib(device, argp);
5441 case BIODASDPSRD:
5442 return dasd_eckd_performance(device, argp);
5443 case BIODASDRLSE:
5444 return dasd_eckd_release(device);
5445 case BIODASDRSRV:
5446 return dasd_eckd_reserve(device);
5447 case BIODASDSLCK:
5448 return dasd_eckd_steal_lock(device);
5449 case BIODASDSNID:
5450 return dasd_eckd_snid(device, argp);
5451 case BIODASDSYMMIO:
5452 return dasd_symm_io(device, argp);
5453 default:
5454 return -ENOTTY;
5455 }
5456 }
5457
5458
5459
5460
5461
5462 static int
5463 dasd_eckd_dump_ccw_range(struct ccw1 *from, struct ccw1 *to, char *page)
5464 {
5465 int len, count;
5466 char *datap;
5467
5468 len = 0;
5469 while (from <= to) {
5470 len += sprintf(page + len, PRINTK_HEADER
5471 " CCW %p: %08X %08X DAT:",
5472 from, ((int *) from)[0], ((int *) from)[1]);
5473
5474
5475 if (from->flags & CCW_FLAG_IDA)
5476 datap = (char *) *((addr_t *) (addr_t) from->cda);
5477 else
5478 datap = (char *) ((addr_t) from->cda);
5479
5480
5481 for (count = 0; count < from->count && count < 32; count++) {
5482 if (count % 8 == 0) len += sprintf(page + len, " ");
5483 if (count % 4 == 0) len += sprintf(page + len, " ");
5484 len += sprintf(page + len, "%02x", datap[count]);
5485 }
5486 len += sprintf(page + len, "\n");
5487 from++;
5488 }
5489 return len;
5490 }
5491
5492 static void
5493 dasd_eckd_dump_sense_dbf(struct dasd_device *device, struct irb *irb,
5494 char *reason)
5495 {
5496 u64 *sense;
5497 u64 *stat;
5498
5499 sense = (u64 *) dasd_get_sense(irb);
5500 stat = (u64 *) &irb->scsw;
5501 if (sense) {
5502 DBF_DEV_EVENT(DBF_EMERG, device, "%s: %016llx %08x : "
5503 "%016llx %016llx %016llx %016llx",
5504 reason, *stat, *((u32 *) (stat + 1)),
5505 sense[0], sense[1], sense[2], sense[3]);
5506 } else {
5507 DBF_DEV_EVENT(DBF_EMERG, device, "%s: %016llx %08x : %s",
5508 reason, *stat, *((u32 *) (stat + 1)),
5509 "NO VALID SENSE");
5510 }
5511 }
5512
5513
5514
5515
5516
5517 static void dasd_eckd_dump_sense_ccw(struct dasd_device *device,
5518 struct dasd_ccw_req *req, struct irb *irb)
5519 {
5520 char *page;
5521 struct ccw1 *first, *last, *fail, *from, *to;
5522 int len, sl, sct;
5523
5524 page = (char *) get_zeroed_page(GFP_ATOMIC);
5525 if (page == NULL) {
5526 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
5527 "No memory to dump sense data\n");
5528 return;
5529 }
5530
5531 len = sprintf(page, PRINTK_HEADER
5532 " I/O status report for device %s:\n",
5533 dev_name(&device->cdev->dev));
5534 len += sprintf(page + len, PRINTK_HEADER
5535 " in req: %p CC:%02X FC:%02X AC:%02X SC:%02X DS:%02X "
5536 "CS:%02X RC:%d\n",
5537 req, scsw_cc(&irb->scsw), scsw_fctl(&irb->scsw),
5538 scsw_actl(&irb->scsw), scsw_stctl(&irb->scsw),
5539 scsw_dstat(&irb->scsw), scsw_cstat(&irb->scsw),
5540 req ? req->intrc : 0);
5541 len += sprintf(page + len, PRINTK_HEADER
5542 " device %s: Failing CCW: %p\n",
5543 dev_name(&device->cdev->dev),
5544 (void *) (addr_t) irb->scsw.cmd.cpa);
5545 if (irb->esw.esw0.erw.cons) {
5546 for (sl = 0; sl < 4; sl++) {
5547 len += sprintf(page + len, PRINTK_HEADER
5548 " Sense(hex) %2d-%2d:",
5549 (8 * sl), ((8 * sl) + 7));
5550
5551 for (sct = 0; sct < 8; sct++) {
5552 len += sprintf(page + len, " %02x",
5553 irb->ecw[8 * sl + sct]);
5554 }
5555 len += sprintf(page + len, "\n");
5556 }
5557
5558 if (irb->ecw[27] & DASD_SENSE_BIT_0) {
5559
5560 sprintf(page + len, PRINTK_HEADER
5561 " 24 Byte: %x MSG %x, "
5562 "%s MSGb to SYSOP\n",
5563 irb->ecw[7] >> 4, irb->ecw[7] & 0x0f,
5564 irb->ecw[1] & 0x10 ? "" : "no");
5565 } else {
5566
5567 sprintf(page + len, PRINTK_HEADER
5568 " 32 Byte: Format: %x "
5569 "Exception class %x\n",
5570 irb->ecw[6] & 0x0f, irb->ecw[22] >> 4);
5571 }
5572 } else {
5573 sprintf(page + len, PRINTK_HEADER
5574 " SORRY - NO VALID SENSE AVAILABLE\n");
5575 }
5576 printk(KERN_ERR "%s", page);
5577
5578 if (req) {
5579
5580
5581
5582 first = req->cpaddr;
5583 for (last = first; last->flags & (CCW_FLAG_CC | CCW_FLAG_DC); last++);
5584 to = min(first + 6, last);
5585 len = sprintf(page, PRINTK_HEADER
5586 " Related CP in req: %p\n", req);
5587 dasd_eckd_dump_ccw_range(first, to, page + len);
5588 printk(KERN_ERR "%s", page);
5589
5590
5591
5592 len = 0;
5593 from = ++to;
5594 fail = (struct ccw1 *)(addr_t)
5595 irb->scsw.cmd.cpa;
5596 if (from < fail - 2) {
5597 from = fail - 2;
5598 len += sprintf(page, PRINTK_HEADER "......\n");
5599 }
5600 to = min(fail + 1, last);
5601 len += dasd_eckd_dump_ccw_range(from, to, page + len);
5602
5603
5604 from = max(from, ++to);
5605 if (from < last - 1) {
5606 from = last - 1;
5607 len += sprintf(page + len, PRINTK_HEADER "......\n");
5608 }
5609 len += dasd_eckd_dump_ccw_range(from, last, page + len);
5610 if (len > 0)
5611 printk(KERN_ERR "%s", page);
5612 }
5613 free_page((unsigned long) page);
5614 }
5615
5616
5617
5618
5619
5620 static void dasd_eckd_dump_sense_tcw(struct dasd_device *device,
5621 struct dasd_ccw_req *req, struct irb *irb)
5622 {
5623 char *page;
5624 int len, sl, sct, residual;
5625 struct tsb *tsb;
5626 u8 *sense, *rcq;
5627
5628 page = (char *) get_zeroed_page(GFP_ATOMIC);
5629 if (page == NULL) {
5630 DBF_DEV_EVENT(DBF_WARNING, device, " %s",
5631 "No memory to dump sense data");
5632 return;
5633 }
5634
5635 len = sprintf(page, PRINTK_HEADER
5636 " I/O status report for device %s:\n",
5637 dev_name(&device->cdev->dev));
5638 len += sprintf(page + len, PRINTK_HEADER
5639 " in req: %p CC:%02X FC:%02X AC:%02X SC:%02X DS:%02X "
5640 "CS:%02X fcxs:%02X schxs:%02X RC:%d\n",
5641 req, scsw_cc(&irb->scsw), scsw_fctl(&irb->scsw),
5642 scsw_actl(&irb->scsw), scsw_stctl(&irb->scsw),
5643 scsw_dstat(&irb->scsw), scsw_cstat(&irb->scsw),
5644 irb->scsw.tm.fcxs,
5645 (irb->scsw.tm.ifob << 7) | irb->scsw.tm.sesq,
5646 req ? req->intrc : 0);
5647 len += sprintf(page + len, PRINTK_HEADER
5648 " device %s: Failing TCW: %p\n",
5649 dev_name(&device->cdev->dev),
5650 (void *) (addr_t) irb->scsw.tm.tcw);
5651
5652 tsb = NULL;
5653 sense = NULL;
5654 if (irb->scsw.tm.tcw && (irb->scsw.tm.fcxs & 0x01))
5655 tsb = tcw_get_tsb(
5656 (struct tcw *)(unsigned long)irb->scsw.tm.tcw);
5657
5658 if (tsb) {
5659 len += sprintf(page + len, PRINTK_HEADER
5660 " tsb->length %d\n", tsb->length);
5661 len += sprintf(page + len, PRINTK_HEADER
5662 " tsb->flags %x\n", tsb->flags);
5663 len += sprintf(page + len, PRINTK_HEADER
5664 " tsb->dcw_offset %d\n", tsb->dcw_offset);
5665 len += sprintf(page + len, PRINTK_HEADER
5666 " tsb->count %d\n", tsb->count);
5667 residual = tsb->count - 28;
5668 len += sprintf(page + len, PRINTK_HEADER
5669 " residual %d\n", residual);
5670
5671 switch (tsb->flags & 0x07) {
5672 case 1:
5673 len += sprintf(page + len, PRINTK_HEADER
5674 " tsb->tsa.iostat.dev_time %d\n",
5675 tsb->tsa.iostat.dev_time);
5676 len += sprintf(page + len, PRINTK_HEADER
5677 " tsb->tsa.iostat.def_time %d\n",
5678 tsb->tsa.iostat.def_time);
5679 len += sprintf(page + len, PRINTK_HEADER
5680 " tsb->tsa.iostat.queue_time %d\n",
5681 tsb->tsa.iostat.queue_time);
5682 len += sprintf(page + len, PRINTK_HEADER
5683 " tsb->tsa.iostat.dev_busy_time %d\n",
5684 tsb->tsa.iostat.dev_busy_time);
5685 len += sprintf(page + len, PRINTK_HEADER
5686 " tsb->tsa.iostat.dev_act_time %d\n",
5687 tsb->tsa.iostat.dev_act_time);
5688 sense = tsb->tsa.iostat.sense;
5689 break;
5690 case 2:
5691 len += sprintf(page + len, PRINTK_HEADER
5692 " tsb->tsa.ddpc.rc %d\n", tsb->tsa.ddpc.rc);
5693 for (sl = 0; sl < 2; sl++) {
5694 len += sprintf(page + len, PRINTK_HEADER
5695 " tsb->tsa.ddpc.rcq %2d-%2d: ",
5696 (8 * sl), ((8 * sl) + 7));
5697 rcq = tsb->tsa.ddpc.rcq;
5698 for (sct = 0; sct < 8; sct++) {
5699 len += sprintf(page + len, " %02x",
5700 rcq[8 * sl + sct]);
5701 }
5702 len += sprintf(page + len, "\n");
5703 }
5704 sense = tsb->tsa.ddpc.sense;
5705 break;
5706 case 3:
5707 len += sprintf(page + len, PRINTK_HEADER
5708 " tsb->tsa.intrg.: not supported yet\n");
5709 break;
5710 }
5711
5712 if (sense) {
5713 for (sl = 0; sl < 4; sl++) {
5714 len += sprintf(page + len, PRINTK_HEADER
5715 " Sense(hex) %2d-%2d:",
5716 (8 * sl), ((8 * sl) + 7));
5717 for (sct = 0; sct < 8; sct++) {
5718 len += sprintf(page + len, " %02x",
5719 sense[8 * sl + sct]);
5720 }
5721 len += sprintf(page + len, "\n");
5722 }
5723
5724 if (sense[27] & DASD_SENSE_BIT_0) {
5725
5726 sprintf(page + len, PRINTK_HEADER
5727 " 24 Byte: %x MSG %x, "
5728 "%s MSGb to SYSOP\n",
5729 sense[7] >> 4, sense[7] & 0x0f,
5730 sense[1] & 0x10 ? "" : "no");
5731 } else {
5732
5733 sprintf(page + len, PRINTK_HEADER
5734 " 32 Byte: Format: %x "
5735 "Exception class %x\n",
5736 sense[6] & 0x0f, sense[22] >> 4);
5737 }
5738 } else {
5739 sprintf(page + len, PRINTK_HEADER
5740 " SORRY - NO VALID SENSE AVAILABLE\n");
5741 }
5742 } else {
5743 sprintf(page + len, PRINTK_HEADER
5744 " SORRY - NO TSB DATA AVAILABLE\n");
5745 }
5746 printk(KERN_ERR "%s", page);
5747 free_page((unsigned long) page);
5748 }
5749
5750 static void dasd_eckd_dump_sense(struct dasd_device *device,
5751 struct dasd_ccw_req *req, struct irb *irb)
5752 {
5753 u8 *sense = dasd_get_sense(irb);
5754
5755 if (scsw_is_tm(&irb->scsw)) {
5756
5757
5758
5759
5760
5761 if (sense && (sense[1] & SNS1_FILE_PROTECTED) &&
5762 test_bit(DASD_CQR_SUPPRESS_FP, &req->flags))
5763 return;
5764 if (scsw_cstat(&irb->scsw) == 0x40 &&
5765 test_bit(DASD_CQR_SUPPRESS_IL, &req->flags))
5766 return;
5767
5768 dasd_eckd_dump_sense_tcw(device, req, irb);
5769 } else {
5770
5771
5772
5773
5774
5775 if (sense && sense[0] & SNS0_CMD_REJECT &&
5776 test_bit(DASD_CQR_SUPPRESS_CR, &req->flags))
5777 return;
5778
5779 if (sense && sense[1] & SNS1_NO_REC_FOUND &&
5780 test_bit(DASD_CQR_SUPPRESS_NRF, &req->flags))
5781 return;
5782
5783 dasd_eckd_dump_sense_ccw(device, req, irb);
5784 }
5785 }
5786
5787 static int dasd_eckd_reload_device(struct dasd_device *device)
5788 {
5789 struct dasd_eckd_private *private = device->private;
5790 int rc, old_base;
5791 char print_uid[60];
5792 struct dasd_uid uid;
5793 unsigned long flags;
5794
5795
5796
5797
5798
5799 dasd_alias_remove_device(device);
5800
5801 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
5802 old_base = private->uid.base_unit_addr;
5803 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
5804
5805
5806 rc = dasd_eckd_read_conf(device);
5807 if (rc)
5808 goto out_err;
5809
5810 dasd_eckd_read_fc_security(device);
5811
5812 rc = dasd_eckd_generate_uid(device);
5813 if (rc)
5814 goto out_err;
5815
5816
5817
5818
5819 dasd_alias_update_add_device(device);
5820
5821 dasd_eckd_get_uid(device, &uid);
5822
5823 if (old_base != uid.base_unit_addr) {
5824 dasd_eckd_get_uid_string(&private->conf, print_uid);
5825 dev_info(&device->cdev->dev,
5826 "An Alias device was reassigned to a new base device "
5827 "with UID: %s\n", print_uid);
5828 }
5829 return 0;
5830
5831 out_err:
5832 return -1;
5833 }
5834
5835 static int dasd_eckd_read_message_buffer(struct dasd_device *device,
5836 struct dasd_rssd_messages *messages,
5837 __u8 lpum)
5838 {
5839 struct dasd_rssd_messages *message_buf;
5840 struct dasd_psf_prssd_data *prssdp;
5841 struct dasd_ccw_req *cqr;
5842 struct ccw1 *ccw;
5843 int rc;
5844
5845 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 + 1 ,
5846 (sizeof(struct dasd_psf_prssd_data) +
5847 sizeof(struct dasd_rssd_messages)),
5848 device, NULL);
5849 if (IS_ERR(cqr)) {
5850 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
5851 "Could not allocate read message buffer request");
5852 return PTR_ERR(cqr);
5853 }
5854
5855 cqr->lpm = lpum;
5856 retry:
5857 cqr->startdev = device;
5858 cqr->memdev = device;
5859 cqr->block = NULL;
5860 cqr->expires = 10 * HZ;
5861 set_bit(DASD_CQR_VERIFY_PATH, &cqr->flags);
5862
5863
5864
5865 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
5866 cqr->retries = 256;
5867
5868
5869 prssdp = (struct dasd_psf_prssd_data *) cqr->data;
5870 memset(prssdp, 0, sizeof(struct dasd_psf_prssd_data));
5871 prssdp->order = PSF_ORDER_PRSSD;
5872 prssdp->suborder = 0x03;
5873
5874
5875 ccw = cqr->cpaddr;
5876 ccw->cmd_code = DASD_ECKD_CCW_PSF;
5877 ccw->count = sizeof(struct dasd_psf_prssd_data);
5878 ccw->flags |= CCW_FLAG_CC;
5879 ccw->flags |= CCW_FLAG_SLI;
5880 ccw->cda = (__u32)(addr_t) prssdp;
5881
5882
5883 message_buf = (struct dasd_rssd_messages *) (prssdp + 1);
5884 memset(message_buf, 0, sizeof(struct dasd_rssd_messages));
5885
5886 ccw++;
5887 ccw->cmd_code = DASD_ECKD_CCW_RSSD;
5888 ccw->count = sizeof(struct dasd_rssd_messages);
5889 ccw->flags |= CCW_FLAG_SLI;
5890 ccw->cda = (__u32)(addr_t) message_buf;
5891
5892 cqr->buildclk = get_tod_clock();
5893 cqr->status = DASD_CQR_FILLED;
5894 rc = dasd_sleep_on_immediatly(cqr);
5895 if (rc == 0) {
5896 prssdp = (struct dasd_psf_prssd_data *) cqr->data;
5897 message_buf = (struct dasd_rssd_messages *)
5898 (prssdp + 1);
5899 memcpy(messages, message_buf,
5900 sizeof(struct dasd_rssd_messages));
5901 } else if (cqr->lpm) {
5902
5903
5904
5905
5906
5907 cqr->lpm = 0;
5908 goto retry;
5909 } else
5910 DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
5911 "Reading messages failed with rc=%d\n"
5912 , rc);
5913 dasd_sfree_request(cqr, cqr->memdev);
5914 return rc;
5915 }
5916
5917 static int dasd_eckd_query_host_access(struct dasd_device *device,
5918 struct dasd_psf_query_host_access *data)
5919 {
5920 struct dasd_eckd_private *private = device->private;
5921 struct dasd_psf_query_host_access *host_access;
5922 struct dasd_psf_prssd_data *prssdp;
5923 struct dasd_ccw_req *cqr;
5924 struct ccw1 *ccw;
5925 int rc;
5926
5927
5928 if (!device->block && private->lcu->pav == HYPER_PAV)
5929 return -EOPNOTSUPP;
5930
5931
5932 if (!(private->features.feature[14] & 0x80))
5933 return -EOPNOTSUPP;
5934
5935 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 + 1 ,
5936 sizeof(struct dasd_psf_prssd_data) + 1,
5937 device, NULL);
5938 if (IS_ERR(cqr)) {
5939 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
5940 "Could not allocate read message buffer request");
5941 return PTR_ERR(cqr);
5942 }
5943 host_access = kzalloc(sizeof(*host_access), GFP_KERNEL | GFP_DMA);
5944 if (!host_access) {
5945 dasd_sfree_request(cqr, device);
5946 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
5947 "Could not allocate host_access buffer");
5948 return -ENOMEM;
5949 }
5950 cqr->startdev = device;
5951 cqr->memdev = device;
5952 cqr->block = NULL;
5953 cqr->retries = 256;
5954 cqr->expires = 10 * HZ;
5955
5956
5957 prssdp = (struct dasd_psf_prssd_data *) cqr->data;
5958 memset(prssdp, 0, sizeof(struct dasd_psf_prssd_data));
5959 prssdp->order = PSF_ORDER_PRSSD;
5960 prssdp->suborder = PSF_SUBORDER_QHA;
5961
5962 prssdp->lss = private->conf.ned->ID;
5963 prssdp->volume = private->conf.ned->unit_addr;
5964
5965
5966 ccw = cqr->cpaddr;
5967 ccw->cmd_code = DASD_ECKD_CCW_PSF;
5968 ccw->count = sizeof(struct dasd_psf_prssd_data);
5969 ccw->flags |= CCW_FLAG_CC;
5970 ccw->flags |= CCW_FLAG_SLI;
5971 ccw->cda = (__u32)(addr_t) prssdp;
5972
5973
5974 ccw++;
5975 ccw->cmd_code = DASD_ECKD_CCW_RSSD;
5976 ccw->count = sizeof(struct dasd_psf_query_host_access);
5977 ccw->flags |= CCW_FLAG_SLI;
5978 ccw->cda = (__u32)(addr_t) host_access;
5979
5980 cqr->buildclk = get_tod_clock();
5981 cqr->status = DASD_CQR_FILLED;
5982
5983 __set_bit(DASD_CQR_SUPPRESS_CR, &cqr->flags);
5984 rc = dasd_sleep_on_interruptible(cqr);
5985 if (rc == 0) {
5986 *data = *host_access;
5987 } else {
5988 DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
5989 "Reading host access data failed with rc=%d\n",
5990 rc);
5991 rc = -EOPNOTSUPP;
5992 }
5993
5994 dasd_sfree_request(cqr, cqr->memdev);
5995 kfree(host_access);
5996 return rc;
5997 }
5998
5999
6000
6001 static int dasd_eckd_host_access_count(struct dasd_device *device)
6002 {
6003 struct dasd_psf_query_host_access *access;
6004 struct dasd_ckd_path_group_entry *entry;
6005 struct dasd_ckd_host_information *info;
6006 int count = 0;
6007 int rc, i;
6008
6009 access = kzalloc(sizeof(*access), GFP_NOIO);
6010 if (!access) {
6011 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
6012 "Could not allocate access buffer");
6013 return -ENOMEM;
6014 }
6015 rc = dasd_eckd_query_host_access(device, access);
6016 if (rc) {
6017 kfree(access);
6018 return rc;
6019 }
6020
6021 info = (struct dasd_ckd_host_information *)
6022 access->host_access_information;
6023 for (i = 0; i < info->entry_count; i++) {
6024 entry = (struct dasd_ckd_path_group_entry *)
6025 (info->entry + i * info->entry_size);
6026 if (entry->status_flags & DASD_ECKD_PG_GROUPED)
6027 count++;
6028 }
6029
6030 kfree(access);
6031 return count;
6032 }
6033
6034
6035
6036
6037 static int dasd_hosts_print(struct dasd_device *device, struct seq_file *m)
6038 {
6039 struct dasd_psf_query_host_access *access;
6040 struct dasd_ckd_path_group_entry *entry;
6041 struct dasd_ckd_host_information *info;
6042 char sysplex[9] = "";
6043 int rc, i;
6044
6045 access = kzalloc(sizeof(*access), GFP_NOIO);
6046 if (!access) {
6047 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
6048 "Could not allocate access buffer");
6049 return -ENOMEM;
6050 }
6051 rc = dasd_eckd_query_host_access(device, access);
6052 if (rc) {
6053 kfree(access);
6054 return rc;
6055 }
6056
6057 info = (struct dasd_ckd_host_information *)
6058 access->host_access_information;
6059 for (i = 0; i < info->entry_count; i++) {
6060 entry = (struct dasd_ckd_path_group_entry *)
6061 (info->entry + i * info->entry_size);
6062
6063 seq_printf(m, "pgid %*phN\n", 11, entry->pgid);
6064
6065 seq_printf(m, "status_flags %02x\n", entry->status_flags);
6066
6067 memcpy(&sysplex, &entry->sysplex_name, sizeof(sysplex) - 1);
6068 EBCASC(sysplex, sizeof(sysplex));
6069 seq_printf(m, "sysplex_name %8s\n", sysplex);
6070
6071 seq_printf(m, "supported_cylinder %d\n", entry->cylinder);
6072
6073 seq_printf(m, "timestamp %lu\n", (unsigned long)
6074 entry->timestamp);
6075 }
6076 kfree(access);
6077
6078 return 0;
6079 }
6080
6081
6082
6083
6084 static int
6085 dasd_eckd_psf_cuir_response(struct dasd_device *device, int response,
6086 __u32 message_id, __u8 lpum)
6087 {
6088 struct dasd_psf_cuir_response *psf_cuir;
6089 int pos = pathmask_to_pos(lpum);
6090 struct dasd_ccw_req *cqr;
6091 struct ccw1 *ccw;
6092 int rc;
6093
6094 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 ,
6095 sizeof(struct dasd_psf_cuir_response),
6096 device, NULL);
6097
6098 if (IS_ERR(cqr)) {
6099 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
6100 "Could not allocate PSF-CUIR request");
6101 return PTR_ERR(cqr);
6102 }
6103
6104 psf_cuir = (struct dasd_psf_cuir_response *)cqr->data;
6105 psf_cuir->order = PSF_ORDER_CUIR_RESPONSE;
6106 psf_cuir->cc = response;
6107 psf_cuir->chpid = device->path[pos].chpid;
6108 psf_cuir->message_id = message_id;
6109 psf_cuir->cssid = device->path[pos].cssid;
6110 psf_cuir->ssid = device->path[pos].ssid;
6111 ccw = cqr->cpaddr;
6112 ccw->cmd_code = DASD_ECKD_CCW_PSF;
6113 ccw->cda = (__u32)(addr_t)psf_cuir;
6114 ccw->flags = CCW_FLAG_SLI;
6115 ccw->count = sizeof(struct dasd_psf_cuir_response);
6116
6117 cqr->startdev = device;
6118 cqr->memdev = device;
6119 cqr->block = NULL;
6120 cqr->retries = 256;
6121 cqr->expires = 10*HZ;
6122 cqr->buildclk = get_tod_clock();
6123 cqr->status = DASD_CQR_FILLED;
6124 set_bit(DASD_CQR_VERIFY_PATH, &cqr->flags);
6125
6126 rc = dasd_sleep_on(cqr);
6127
6128 dasd_sfree_request(cqr, cqr->memdev);
6129 return rc;
6130 }
6131
6132
6133
6134
6135
6136
6137 static struct dasd_conf_data *dasd_eckd_get_ref_conf(struct dasd_device *device,
6138 __u8 lpum,
6139 struct dasd_cuir_message *cuir)
6140 {
6141 struct dasd_conf_data *conf_data;
6142 int path, pos;
6143
6144 if (cuir->record_selector == 0)
6145 goto out;
6146 for (path = 0x80, pos = 0; path; path >>= 1, pos++) {
6147 conf_data = device->path[pos].conf_data;
6148 if (conf_data->gneq.record_selector ==
6149 cuir->record_selector)
6150 return conf_data;
6151 }
6152 out:
6153 return device->path[pathmask_to_pos(lpum)].conf_data;
6154 }
6155
6156
6157
6158
6159
6160
6161
6162
6163
6164
6165 static int dasd_eckd_cuir_scope(struct dasd_device *device, __u8 lpum,
6166 struct dasd_cuir_message *cuir)
6167 {
6168 struct dasd_conf_data *ref_conf_data;
6169 unsigned long bitmask = 0, mask = 0;
6170 struct dasd_conf_data *conf_data;
6171 unsigned int pos, path;
6172 char *ref_gneq, *gneq;
6173 char *ref_ned, *ned;
6174 int tbcpm = 0;
6175
6176
6177
6178 if (!cuir->ned_map ||
6179 !(cuir->neq_map[0] | cuir->neq_map[1] | cuir->neq_map[2]))
6180 return lpum;
6181
6182
6183 ref_conf_data = dasd_eckd_get_ref_conf(device, lpum, cuir);
6184
6185 pos = 8 - ffs(cuir->ned_map);
6186 ref_ned = (char *)&ref_conf_data->neds[pos];
6187 ref_gneq = (char *)&ref_conf_data->gneq;
6188
6189 mask = cuir->neq_map[2];
6190 mask |= cuir->neq_map[1] << 8;
6191 mask |= cuir->neq_map[0] << 16;
6192
6193 for (path = 0; path < 8; path++) {
6194
6195 bitmask = mask;
6196 conf_data = device->path[path].conf_data;
6197 pos = 8 - ffs(cuir->ned_map);
6198 ned = (char *) &conf_data->neds[pos];
6199
6200 if (memcmp(ref_ned, ned, sizeof(*ned)) != 0)
6201 continue;
6202 gneq = (char *)&conf_data->gneq;
6203
6204
6205
6206 while (bitmask) {
6207 pos = ffs(bitmask) - 1;
6208 if (memcmp(&ref_gneq[31 - pos], &gneq[31 - pos], 1)
6209 != 0)
6210 break;
6211 clear_bit(pos, &bitmask);
6212 }
6213 if (bitmask)
6214 continue;
6215
6216
6217 tbcpm |= 0x80 >> path;
6218 }
6219 return tbcpm;
6220 }
6221
6222 static void dasd_eckd_cuir_notify_user(struct dasd_device *device,
6223 unsigned long paths, int action)
6224 {
6225 int pos;
6226
6227 while (paths) {
6228
6229 pos = 8 - ffs(paths);
6230
6231 if (action == CUIR_QUIESCE)
6232 pr_warn("Service on the storage server caused path %x.%02x to go offline",
6233 device->path[pos].cssid,
6234 device->path[pos].chpid);
6235 else if (action == CUIR_RESUME)
6236 pr_info("Path %x.%02x is back online after service on the storage server",
6237 device->path[pos].cssid,
6238 device->path[pos].chpid);
6239 clear_bit(7 - pos, &paths);
6240 }
6241 }
6242
6243 static int dasd_eckd_cuir_remove_path(struct dasd_device *device, __u8 lpum,
6244 struct dasd_cuir_message *cuir)
6245 {
6246 unsigned long tbcpm;
6247
6248 tbcpm = dasd_eckd_cuir_scope(device, lpum, cuir);
6249
6250 if (!(dasd_path_get_opm(device) & tbcpm))
6251 return 0;
6252 if (!(dasd_path_get_opm(device) & ~tbcpm)) {
6253
6254
6255 return -EINVAL;
6256 }
6257
6258 dasd_path_remove_opm(device, tbcpm);
6259 dasd_path_add_cuirpm(device, tbcpm);
6260 return tbcpm;
6261 }
6262
6263
6264
6265
6266
6267
6268
6269
6270
6271 static int dasd_eckd_cuir_quiesce(struct dasd_device *device, __u8 lpum,
6272 struct dasd_cuir_message *cuir)
6273 {
6274 struct dasd_eckd_private *private = device->private;
6275 struct alias_pav_group *pavgroup, *tempgroup;
6276 struct dasd_device *dev, *n;
6277 unsigned long paths = 0;
6278 unsigned long flags;
6279 int tbcpm;
6280
6281
6282 list_for_each_entry_safe(dev, n, &private->lcu->active_devices,
6283 alias_list) {
6284 spin_lock_irqsave(get_ccwdev_lock(dev->cdev), flags);
6285 tbcpm = dasd_eckd_cuir_remove_path(dev, lpum, cuir);
6286 spin_unlock_irqrestore(get_ccwdev_lock(dev->cdev), flags);
6287 if (tbcpm < 0)
6288 goto out_err;
6289 paths |= tbcpm;
6290 }
6291
6292 list_for_each_entry_safe(dev, n, &private->lcu->inactive_devices,
6293 alias_list) {
6294 spin_lock_irqsave(get_ccwdev_lock(dev->cdev), flags);
6295 tbcpm = dasd_eckd_cuir_remove_path(dev, lpum, cuir);
6296 spin_unlock_irqrestore(get_ccwdev_lock(dev->cdev), flags);
6297 if (tbcpm < 0)
6298 goto out_err;
6299 paths |= tbcpm;
6300 }
6301
6302 list_for_each_entry_safe(pavgroup, tempgroup,
6303 &private->lcu->grouplist, group) {
6304 list_for_each_entry_safe(dev, n, &pavgroup->baselist,
6305 alias_list) {
6306 spin_lock_irqsave(get_ccwdev_lock(dev->cdev), flags);
6307 tbcpm = dasd_eckd_cuir_remove_path(dev, lpum, cuir);
6308 spin_unlock_irqrestore(
6309 get_ccwdev_lock(dev->cdev), flags);
6310 if (tbcpm < 0)
6311 goto out_err;
6312 paths |= tbcpm;
6313 }
6314 list_for_each_entry_safe(dev, n, &pavgroup->aliaslist,
6315 alias_list) {
6316 spin_lock_irqsave(get_ccwdev_lock(dev->cdev), flags);
6317 tbcpm = dasd_eckd_cuir_remove_path(dev, lpum, cuir);
6318 spin_unlock_irqrestore(
6319 get_ccwdev_lock(dev->cdev), flags);
6320 if (tbcpm < 0)
6321 goto out_err;
6322 paths |= tbcpm;
6323 }
6324 }
6325
6326 dasd_eckd_cuir_notify_user(device, paths, CUIR_QUIESCE);
6327 return 0;
6328 out_err:
6329 return tbcpm;
6330 }
6331
6332 static int dasd_eckd_cuir_resume(struct dasd_device *device, __u8 lpum,
6333 struct dasd_cuir_message *cuir)
6334 {
6335 struct dasd_eckd_private *private = device->private;
6336 struct alias_pav_group *pavgroup, *tempgroup;
6337 struct dasd_device *dev, *n;
6338 unsigned long paths = 0;
6339 int tbcpm;
6340
6341
6342
6343
6344
6345 list_for_each_entry_safe(dev, n,
6346 &private->lcu->active_devices,
6347 alias_list) {
6348 tbcpm = dasd_eckd_cuir_scope(dev, lpum, cuir);
6349 paths |= tbcpm;
6350 if (!(dasd_path_get_opm(dev) & tbcpm)) {
6351 dasd_path_add_tbvpm(dev, tbcpm);
6352 dasd_schedule_device_bh(dev);
6353 }
6354 }
6355 list_for_each_entry_safe(dev, n,
6356 &private->lcu->inactive_devices,
6357 alias_list) {
6358 tbcpm = dasd_eckd_cuir_scope(dev, lpum, cuir);
6359 paths |= tbcpm;
6360 if (!(dasd_path_get_opm(dev) & tbcpm)) {
6361 dasd_path_add_tbvpm(dev, tbcpm);
6362 dasd_schedule_device_bh(dev);
6363 }
6364 }
6365
6366 list_for_each_entry_safe(pavgroup, tempgroup,
6367 &private->lcu->grouplist,
6368 group) {
6369 list_for_each_entry_safe(dev, n,
6370 &pavgroup->baselist,
6371 alias_list) {
6372 tbcpm = dasd_eckd_cuir_scope(dev, lpum, cuir);
6373 paths |= tbcpm;
6374 if (!(dasd_path_get_opm(dev) & tbcpm)) {
6375 dasd_path_add_tbvpm(dev, tbcpm);
6376 dasd_schedule_device_bh(dev);
6377 }
6378 }
6379 list_for_each_entry_safe(dev, n,
6380 &pavgroup->aliaslist,
6381 alias_list) {
6382 tbcpm = dasd_eckd_cuir_scope(dev, lpum, cuir);
6383 paths |= tbcpm;
6384 if (!(dasd_path_get_opm(dev) & tbcpm)) {
6385 dasd_path_add_tbvpm(dev, tbcpm);
6386 dasd_schedule_device_bh(dev);
6387 }
6388 }
6389 }
6390
6391 dasd_eckd_cuir_notify_user(device, paths, CUIR_RESUME);
6392 return 0;
6393 }
6394
6395 static void dasd_eckd_handle_cuir(struct dasd_device *device, void *messages,
6396 __u8 lpum)
6397 {
6398 struct dasd_cuir_message *cuir = messages;
6399 int response;
6400
6401 DBF_DEV_EVENT(DBF_WARNING, device,
6402 "CUIR request: %016llx %016llx %016llx %08x",
6403 ((u64 *)cuir)[0], ((u64 *)cuir)[1], ((u64 *)cuir)[2],
6404 ((u32 *)cuir)[3]);
6405
6406 if (cuir->code == CUIR_QUIESCE) {
6407
6408 if (dasd_eckd_cuir_quiesce(device, lpum, cuir))
6409 response = PSF_CUIR_LAST_PATH;
6410 else
6411 response = PSF_CUIR_COMPLETED;
6412 } else if (cuir->code == CUIR_RESUME) {
6413
6414 dasd_eckd_cuir_resume(device, lpum, cuir);
6415 response = PSF_CUIR_COMPLETED;
6416 } else
6417 response = PSF_CUIR_NOT_SUPPORTED;
6418
6419 dasd_eckd_psf_cuir_response(device, response,
6420 cuir->message_id, lpum);
6421 DBF_DEV_EVENT(DBF_WARNING, device,
6422 "CUIR response: %d on message ID %08x", response,
6423 cuir->message_id);
6424
6425 device->discipline->check_attention(device, lpum);
6426 }
6427
6428 static void dasd_eckd_oos_resume(struct dasd_device *device)
6429 {
6430 struct dasd_eckd_private *private = device->private;
6431 struct alias_pav_group *pavgroup, *tempgroup;
6432 struct dasd_device *dev, *n;
6433 unsigned long flags;
6434
6435 spin_lock_irqsave(&private->lcu->lock, flags);
6436 list_for_each_entry_safe(dev, n, &private->lcu->active_devices,
6437 alias_list) {
6438 if (dev->stopped & DASD_STOPPED_NOSPC)
6439 dasd_generic_space_avail(dev);
6440 }
6441 list_for_each_entry_safe(dev, n, &private->lcu->inactive_devices,
6442 alias_list) {
6443 if (dev->stopped & DASD_STOPPED_NOSPC)
6444 dasd_generic_space_avail(dev);
6445 }
6446
6447 list_for_each_entry_safe(pavgroup, tempgroup,
6448 &private->lcu->grouplist,
6449 group) {
6450 list_for_each_entry_safe(dev, n, &pavgroup->baselist,
6451 alias_list) {
6452 if (dev->stopped & DASD_STOPPED_NOSPC)
6453 dasd_generic_space_avail(dev);
6454 }
6455 list_for_each_entry_safe(dev, n, &pavgroup->aliaslist,
6456 alias_list) {
6457 if (dev->stopped & DASD_STOPPED_NOSPC)
6458 dasd_generic_space_avail(dev);
6459 }
6460 }
6461 spin_unlock_irqrestore(&private->lcu->lock, flags);
6462 }
6463
6464 static void dasd_eckd_handle_oos(struct dasd_device *device, void *messages,
6465 __u8 lpum)
6466 {
6467 struct dasd_oos_message *oos = messages;
6468
6469 switch (oos->code) {
6470 case REPO_WARN:
6471 case POOL_WARN:
6472 dev_warn(&device->cdev->dev,
6473 "Extent pool usage has reached a critical value\n");
6474 dasd_eckd_oos_resume(device);
6475 break;
6476 case REPO_EXHAUST:
6477 case POOL_EXHAUST:
6478 dev_warn(&device->cdev->dev,
6479 "Extent pool is exhausted\n");
6480 break;
6481 case REPO_RELIEVE:
6482 case POOL_RELIEVE:
6483 dev_info(&device->cdev->dev,
6484 "Extent pool physical space constraint has been relieved\n");
6485 break;
6486 }
6487
6488
6489 dasd_eckd_read_ext_pool_info(device);
6490
6491
6492 device->discipline->check_attention(device, lpum);
6493 }
6494
6495 static void dasd_eckd_check_attention_work(struct work_struct *work)
6496 {
6497 struct check_attention_work_data *data;
6498 struct dasd_rssd_messages *messages;
6499 struct dasd_device *device;
6500 int rc;
6501
6502 data = container_of(work, struct check_attention_work_data, worker);
6503 device = data->device;
6504 messages = kzalloc(sizeof(*messages), GFP_KERNEL);
6505 if (!messages) {
6506 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
6507 "Could not allocate attention message buffer");
6508 goto out;
6509 }
6510 rc = dasd_eckd_read_message_buffer(device, messages, data->lpum);
6511 if (rc)
6512 goto out;
6513
6514 if (messages->length == ATTENTION_LENGTH_CUIR &&
6515 messages->format == ATTENTION_FORMAT_CUIR)
6516 dasd_eckd_handle_cuir(device, messages, data->lpum);
6517 if (messages->length == ATTENTION_LENGTH_OOS &&
6518 messages->format == ATTENTION_FORMAT_OOS)
6519 dasd_eckd_handle_oos(device, messages, data->lpum);
6520
6521 out:
6522 dasd_put_device(device);
6523 kfree(messages);
6524 kfree(data);
6525 }
6526
6527 static int dasd_eckd_check_attention(struct dasd_device *device, __u8 lpum)
6528 {
6529 struct check_attention_work_data *data;
6530
6531 data = kzalloc(sizeof(*data), GFP_ATOMIC);
6532 if (!data)
6533 return -ENOMEM;
6534 INIT_WORK(&data->worker, dasd_eckd_check_attention_work);
6535 dasd_get_device(device);
6536 data->device = device;
6537 data->lpum = lpum;
6538 schedule_work(&data->worker);
6539 return 0;
6540 }
6541
6542 static int dasd_eckd_disable_hpf_path(struct dasd_device *device, __u8 lpum)
6543 {
6544 if (~lpum & dasd_path_get_opm(device)) {
6545 dasd_path_add_nohpfpm(device, lpum);
6546 dasd_path_remove_opm(device, lpum);
6547 dev_err(&device->cdev->dev,
6548 "Channel path %02X lost HPF functionality and is disabled\n",
6549 lpum);
6550 return 1;
6551 }
6552 return 0;
6553 }
6554
6555 static void dasd_eckd_disable_hpf_device(struct dasd_device *device)
6556 {
6557 struct dasd_eckd_private *private = device->private;
6558
6559 dev_err(&device->cdev->dev,
6560 "High Performance FICON disabled\n");
6561 private->fcx_max_data = 0;
6562 }
6563
6564 static int dasd_eckd_hpf_enabled(struct dasd_device *device)
6565 {
6566 struct dasd_eckd_private *private = device->private;
6567
6568 return private->fcx_max_data ? 1 : 0;
6569 }
6570
6571 static void dasd_eckd_handle_hpf_error(struct dasd_device *device,
6572 struct irb *irb)
6573 {
6574 struct dasd_eckd_private *private = device->private;
6575
6576 if (!private->fcx_max_data) {
6577
6578 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
6579 "Trying to disable HPF for a non HPF device");
6580 return;
6581 }
6582 if (irb->scsw.tm.sesq == SCSW_SESQ_DEV_NOFCX) {
6583 dasd_eckd_disable_hpf_device(device);
6584 } else if (irb->scsw.tm.sesq == SCSW_SESQ_PATH_NOFCX) {
6585 if (dasd_eckd_disable_hpf_path(device, irb->esw.esw1.lpum))
6586 return;
6587 dasd_eckd_disable_hpf_device(device);
6588 dasd_path_set_tbvpm(device,
6589 dasd_path_get_hpfpm(device));
6590 }
6591
6592
6593
6594
6595 dasd_device_set_stop_bits(device, DASD_STOPPED_NOT_ACC);
6596 dasd_schedule_requeue(device);
6597 }
6598
6599
6600
6601
6602 static void dasd_eckd_setup_blk_queue(struct dasd_block *block)
6603 {
6604 unsigned int logical_block_size = block->bp_block;
6605 struct request_queue *q = block->request_queue;
6606 struct dasd_device *device = block->base;
6607 int max;
6608
6609 if (device->features & DASD_FEATURE_USERAW) {
6610
6611
6612
6613
6614
6615
6616
6617 max = DASD_ECKD_MAX_BLOCKS_RAW << block->s2b_shift;
6618 } else {
6619 max = DASD_ECKD_MAX_BLOCKS << block->s2b_shift;
6620 }
6621 blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
6622 q->limits.max_dev_sectors = max;
6623 blk_queue_logical_block_size(q, logical_block_size);
6624 blk_queue_max_hw_sectors(q, max);
6625 blk_queue_max_segments(q, USHRT_MAX);
6626
6627 blk_queue_max_segment_size(q, PAGE_SIZE);
6628 blk_queue_segment_boundary(q, PAGE_SIZE - 1);
6629 blk_queue_dma_alignment(q, PAGE_SIZE - 1);
6630 }
6631
6632 static struct ccw_driver dasd_eckd_driver = {
6633 .driver = {
6634 .name = "dasd-eckd",
6635 .owner = THIS_MODULE,
6636 .dev_groups = dasd_dev_groups,
6637 },
6638 .ids = dasd_eckd_ids,
6639 .probe = dasd_eckd_probe,
6640 .remove = dasd_generic_remove,
6641 .set_offline = dasd_generic_set_offline,
6642 .set_online = dasd_eckd_set_online,
6643 .notify = dasd_generic_notify,
6644 .path_event = dasd_generic_path_event,
6645 .shutdown = dasd_generic_shutdown,
6646 .uc_handler = dasd_generic_uc_handler,
6647 .int_class = IRQIO_DAS,
6648 };
6649
6650 static struct dasd_discipline dasd_eckd_discipline = {
6651 .owner = THIS_MODULE,
6652 .name = "ECKD",
6653 .ebcname = "ECKD",
6654 .check_device = dasd_eckd_check_characteristics,
6655 .uncheck_device = dasd_eckd_uncheck_device,
6656 .do_analysis = dasd_eckd_do_analysis,
6657 .pe_handler = dasd_eckd_pe_handler,
6658 .basic_to_ready = dasd_eckd_basic_to_ready,
6659 .online_to_ready = dasd_eckd_online_to_ready,
6660 .basic_to_known = dasd_eckd_basic_to_known,
6661 .setup_blk_queue = dasd_eckd_setup_blk_queue,
6662 .fill_geometry = dasd_eckd_fill_geometry,
6663 .start_IO = dasd_start_IO,
6664 .term_IO = dasd_term_IO,
6665 .handle_terminated_request = dasd_eckd_handle_terminated_request,
6666 .format_device = dasd_eckd_format_device,
6667 .check_device_format = dasd_eckd_check_device_format,
6668 .erp_action = dasd_eckd_erp_action,
6669 .erp_postaction = dasd_eckd_erp_postaction,
6670 .check_for_device_change = dasd_eckd_check_for_device_change,
6671 .build_cp = dasd_eckd_build_alias_cp,
6672 .free_cp = dasd_eckd_free_alias_cp,
6673 .dump_sense = dasd_eckd_dump_sense,
6674 .dump_sense_dbf = dasd_eckd_dump_sense_dbf,
6675 .fill_info = dasd_eckd_fill_info,
6676 .ioctl = dasd_eckd_ioctl,
6677 .reload = dasd_eckd_reload_device,
6678 .get_uid = dasd_eckd_get_uid,
6679 .kick_validate = dasd_eckd_kick_validate_server,
6680 .check_attention = dasd_eckd_check_attention,
6681 .host_access_count = dasd_eckd_host_access_count,
6682 .hosts_print = dasd_hosts_print,
6683 .handle_hpf_error = dasd_eckd_handle_hpf_error,
6684 .disable_hpf = dasd_eckd_disable_hpf_device,
6685 .hpf_enabled = dasd_eckd_hpf_enabled,
6686 .reset_path = dasd_eckd_reset_path,
6687 .is_ese = dasd_eckd_is_ese,
6688 .space_allocated = dasd_eckd_space_allocated,
6689 .space_configured = dasd_eckd_space_configured,
6690 .logical_capacity = dasd_eckd_logical_capacity,
6691 .release_space = dasd_eckd_release_space,
6692 .ext_pool_id = dasd_eckd_ext_pool_id,
6693 .ext_size = dasd_eckd_ext_size,
6694 .ext_pool_cap_at_warnlevel = dasd_eckd_ext_pool_cap_at_warnlevel,
6695 .ext_pool_warn_thrshld = dasd_eckd_ext_pool_warn_thrshld,
6696 .ext_pool_oos = dasd_eckd_ext_pool_oos,
6697 .ext_pool_exhaust = dasd_eckd_ext_pool_exhaust,
6698 .ese_format = dasd_eckd_ese_format,
6699 .ese_read = dasd_eckd_ese_read,
6700 };
6701
6702 static int __init
6703 dasd_eckd_init(void)
6704 {
6705 int ret;
6706
6707 ASCEBC(dasd_eckd_discipline.ebcname, 4);
6708 dasd_reserve_req = kmalloc(sizeof(*dasd_reserve_req),
6709 GFP_KERNEL | GFP_DMA);
6710 if (!dasd_reserve_req)
6711 return -ENOMEM;
6712 dasd_vol_info_req = kmalloc(sizeof(*dasd_vol_info_req),
6713 GFP_KERNEL | GFP_DMA);
6714 if (!dasd_vol_info_req)
6715 return -ENOMEM;
6716 pe_handler_worker = kmalloc(sizeof(*pe_handler_worker),
6717 GFP_KERNEL | GFP_DMA);
6718 if (!pe_handler_worker) {
6719 kfree(dasd_reserve_req);
6720 kfree(dasd_vol_info_req);
6721 return -ENOMEM;
6722 }
6723 rawpadpage = (void *)__get_free_page(GFP_KERNEL);
6724 if (!rawpadpage) {
6725 kfree(pe_handler_worker);
6726 kfree(dasd_reserve_req);
6727 kfree(dasd_vol_info_req);
6728 return -ENOMEM;
6729 }
6730 ret = ccw_driver_register(&dasd_eckd_driver);
6731 if (!ret)
6732 wait_for_device_probe();
6733 else {
6734 kfree(pe_handler_worker);
6735 kfree(dasd_reserve_req);
6736 kfree(dasd_vol_info_req);
6737 free_page((unsigned long)rawpadpage);
6738 }
6739 return ret;
6740 }
6741
6742 static void __exit
6743 dasd_eckd_cleanup(void)
6744 {
6745 ccw_driver_unregister(&dasd_eckd_driver);
6746 kfree(pe_handler_worker);
6747 kfree(dasd_reserve_req);
6748 free_page((unsigned long)rawpadpage);
6749 }
6750
6751 module_init(dasd_eckd_init);
6752 module_exit(dasd_eckd_cleanup);