0001
0002
0003
0004
0005
0006
0007
0008
0009
0010 #define DRIVER_NAME "ms_block"
0011 #define pr_fmt(fmt) DRIVER_NAME ": " fmt
0012
0013 #include <linux/module.h>
0014 #include <linux/blk-mq.h>
0015 #include <linux/memstick.h>
0016 #include <linux/idr.h>
0017 #include <linux/hdreg.h>
0018 #include <linux/delay.h>
0019 #include <linux/slab.h>
0020 #include <linux/random.h>
0021 #include <linux/bitmap.h>
0022 #include <linux/scatterlist.h>
0023 #include <linux/jiffies.h>
0024 #include <linux/workqueue.h>
0025 #include <linux/mutex.h>
0026 #include "ms_block.h"
0027
0028 static int debug;
0029 static int cache_flush_timeout = 1000;
0030 static bool verify_writes;
0031
0032
0033
0034
0035
0036 static size_t msb_sg_copy(struct scatterlist *sg_from,
0037 struct scatterlist *sg_to, int to_nents, size_t offset, size_t len)
0038 {
0039 size_t copied = 0;
0040
0041 while (offset > 0) {
0042 if (offset >= sg_from->length) {
0043 if (sg_is_last(sg_from))
0044 return 0;
0045
0046 offset -= sg_from->length;
0047 sg_from = sg_next(sg_from);
0048 continue;
0049 }
0050
0051 copied = min(len, sg_from->length - offset);
0052 sg_set_page(sg_to, sg_page(sg_from),
0053 copied, sg_from->offset + offset);
0054
0055 len -= copied;
0056 offset = 0;
0057
0058 if (sg_is_last(sg_from) || !len)
0059 goto out;
0060
0061 sg_to = sg_next(sg_to);
0062 to_nents--;
0063 sg_from = sg_next(sg_from);
0064 }
0065
0066 while (len > sg_from->length && to_nents--) {
0067 len -= sg_from->length;
0068 copied += sg_from->length;
0069
0070 sg_set_page(sg_to, sg_page(sg_from),
0071 sg_from->length, sg_from->offset);
0072
0073 if (sg_is_last(sg_from) || !len)
0074 goto out;
0075
0076 sg_from = sg_next(sg_from);
0077 sg_to = sg_next(sg_to);
0078 }
0079
0080 if (len && to_nents) {
0081 sg_set_page(sg_to, sg_page(sg_from), len, sg_from->offset);
0082 copied += len;
0083 }
0084 out:
0085 sg_mark_end(sg_to);
0086 return copied;
0087 }
0088
0089
0090
0091
0092
0093
0094 static int msb_sg_compare_to_buffer(struct scatterlist *sg,
0095 size_t offset, u8 *buffer, size_t len)
0096 {
0097 int retval = 0, cmplen;
0098 struct sg_mapping_iter miter;
0099
0100 sg_miter_start(&miter, sg, sg_nents(sg),
0101 SG_MITER_ATOMIC | SG_MITER_FROM_SG);
0102
0103 while (sg_miter_next(&miter) && len > 0) {
0104 if (offset >= miter.length) {
0105 offset -= miter.length;
0106 continue;
0107 }
0108
0109 cmplen = min(miter.length - offset, len);
0110 retval = memcmp(miter.addr + offset, buffer, cmplen) ? -1 : 0;
0111 if (retval)
0112 break;
0113
0114 buffer += cmplen;
0115 len -= cmplen;
0116 offset = 0;
0117 }
0118
0119 if (!retval && len)
0120 retval = -1;
0121
0122 sg_miter_stop(&miter);
0123 return retval;
0124 }
0125
0126
0127
0128
0129
0130
0131
0132
0133 static int msb_get_zone_from_lba(int lba)
0134 {
0135 if (lba < 494)
0136 return 0;
0137 return ((lba - 494) / 496) + 1;
0138 }
0139
0140
0141 static int msb_get_zone_from_pba(int pba)
0142 {
0143 return pba / MS_BLOCKS_IN_ZONE;
0144 }
0145
0146
0147 static int msb_validate_used_block_bitmap(struct msb_data *msb)
0148 {
0149 int total_free_blocks = 0;
0150 int i;
0151
0152 if (!debug)
0153 return 0;
0154
0155 for (i = 0; i < msb->zone_count; i++)
0156 total_free_blocks += msb->free_block_count[i];
0157
0158 if (msb->block_count - bitmap_weight(msb->used_blocks_bitmap,
0159 msb->block_count) == total_free_blocks)
0160 return 0;
0161
0162 pr_err("BUG: free block counts don't match the bitmap");
0163 msb->read_only = true;
0164 return -EINVAL;
0165 }
0166
0167
0168 static void msb_mark_block_used(struct msb_data *msb, int pba)
0169 {
0170 int zone = msb_get_zone_from_pba(pba);
0171
0172 if (test_bit(pba, msb->used_blocks_bitmap)) {
0173 pr_err(
0174 "BUG: attempt to mark already used pba %d as used", pba);
0175 msb->read_only = true;
0176 return;
0177 }
0178
0179 if (msb_validate_used_block_bitmap(msb))
0180 return;
0181
0182
0183 __set_bit(pba, msb->used_blocks_bitmap);
0184 msb->free_block_count[zone]--;
0185 }
0186
0187
0188 static void msb_mark_block_unused(struct msb_data *msb, int pba)
0189 {
0190 int zone = msb_get_zone_from_pba(pba);
0191
0192 if (!test_bit(pba, msb->used_blocks_bitmap)) {
0193 pr_err("BUG: attempt to mark already unused pba %d as unused" , pba);
0194 msb->read_only = true;
0195 return;
0196 }
0197
0198 if (msb_validate_used_block_bitmap(msb))
0199 return;
0200
0201
0202 __clear_bit(pba, msb->used_blocks_bitmap);
0203 msb->free_block_count[zone]++;
0204 }
0205
0206
0207 static void msb_invalidate_reg_window(struct msb_data *msb)
0208 {
0209 msb->reg_addr.w_offset = offsetof(struct ms_register, id);
0210 msb->reg_addr.w_length = sizeof(struct ms_id_register);
0211 msb->reg_addr.r_offset = offsetof(struct ms_register, id);
0212 msb->reg_addr.r_length = sizeof(struct ms_id_register);
0213 msb->addr_valid = false;
0214 }
0215
0216
0217 static int msb_run_state_machine(struct msb_data *msb, int (*state_func)
0218 (struct memstick_dev *card, struct memstick_request **req))
0219 {
0220 struct memstick_dev *card = msb->card;
0221
0222 WARN_ON(msb->state != -1);
0223 msb->int_polling = false;
0224 msb->state = 0;
0225 msb->exit_error = 0;
0226
0227 memset(&card->current_mrq, 0, sizeof(card->current_mrq));
0228
0229 card->next_request = state_func;
0230 memstick_new_req(card->host);
0231 wait_for_completion(&card->mrq_complete);
0232
0233 WARN_ON(msb->state != -1);
0234 return msb->exit_error;
0235 }
0236
0237
0238 static int msb_exit_state_machine(struct msb_data *msb, int error)
0239 {
0240 WARN_ON(msb->state == -1);
0241
0242 msb->state = -1;
0243 msb->exit_error = error;
0244 msb->card->next_request = h_msb_default_bad;
0245
0246
0247 if (error)
0248 msb_invalidate_reg_window(msb);
0249
0250 complete(&msb->card->mrq_complete);
0251 return -ENXIO;
0252 }
0253
0254
0255 static int msb_read_int_reg(struct msb_data *msb, long timeout)
0256 {
0257 struct memstick_request *mrq = &msb->card->current_mrq;
0258
0259 WARN_ON(msb->state == -1);
0260
0261 if (!msb->int_polling) {
0262 msb->int_timeout = jiffies +
0263 msecs_to_jiffies(timeout == -1 ? 500 : timeout);
0264 msb->int_polling = true;
0265 } else if (time_after(jiffies, msb->int_timeout)) {
0266 mrq->data[0] = MEMSTICK_INT_CMDNAK;
0267 return 0;
0268 }
0269
0270 if ((msb->caps & MEMSTICK_CAP_AUTO_GET_INT) &&
0271 mrq->need_card_int && !mrq->error) {
0272 mrq->data[0] = mrq->int_reg;
0273 mrq->need_card_int = false;
0274 return 0;
0275 } else {
0276 memstick_init_req(mrq, MS_TPC_GET_INT, NULL, 1);
0277 return 1;
0278 }
0279 }
0280
0281
0282 static int msb_read_regs(struct msb_data *msb, int offset, int len)
0283 {
0284 struct memstick_request *req = &msb->card->current_mrq;
0285
0286 if (msb->reg_addr.r_offset != offset ||
0287 msb->reg_addr.r_length != len || !msb->addr_valid) {
0288
0289 msb->reg_addr.r_offset = offset;
0290 msb->reg_addr.r_length = len;
0291 msb->addr_valid = true;
0292
0293 memstick_init_req(req, MS_TPC_SET_RW_REG_ADRS,
0294 &msb->reg_addr, sizeof(msb->reg_addr));
0295 return 0;
0296 }
0297
0298 memstick_init_req(req, MS_TPC_READ_REG, NULL, len);
0299 return 1;
0300 }
0301
0302
0303 static int msb_write_regs(struct msb_data *msb, int offset, int len, void *buf)
0304 {
0305 struct memstick_request *req = &msb->card->current_mrq;
0306
0307 if (msb->reg_addr.w_offset != offset ||
0308 msb->reg_addr.w_length != len || !msb->addr_valid) {
0309
0310 msb->reg_addr.w_offset = offset;
0311 msb->reg_addr.w_length = len;
0312 msb->addr_valid = true;
0313
0314 memstick_init_req(req, MS_TPC_SET_RW_REG_ADRS,
0315 &msb->reg_addr, sizeof(msb->reg_addr));
0316 return 0;
0317 }
0318
0319 memstick_init_req(req, MS_TPC_WRITE_REG, buf, len);
0320 return 1;
0321 }
0322
0323
0324 static int h_msb_default_bad(struct memstick_dev *card,
0325 struct memstick_request **mrq)
0326 {
0327 return -ENXIO;
0328 }
0329
0330
0331
0332
0333
0334
0335 static int h_msb_read_page(struct memstick_dev *card,
0336 struct memstick_request **out_mrq)
0337 {
0338 struct msb_data *msb = memstick_get_drvdata(card);
0339 struct memstick_request *mrq = *out_mrq = &card->current_mrq;
0340 struct scatterlist sg[2];
0341 u8 command, intreg;
0342
0343 if (mrq->error) {
0344 dbg("read_page, unknown error");
0345 return msb_exit_state_machine(msb, mrq->error);
0346 }
0347 again:
0348 switch (msb->state) {
0349 case MSB_RP_SEND_BLOCK_ADDRESS:
0350
0351
0352
0353
0354 if (!msb_write_regs(msb,
0355 offsetof(struct ms_register, param),
0356 sizeof(struct ms_param_register),
0357 (unsigned char *)&msb->regs.param))
0358 return 0;
0359
0360 msb->state = MSB_RP_SEND_READ_COMMAND;
0361 return 0;
0362
0363 case MSB_RP_SEND_READ_COMMAND:
0364 command = MS_CMD_BLOCK_READ;
0365 memstick_init_req(mrq, MS_TPC_SET_CMD, &command, 1);
0366 msb->state = MSB_RP_SEND_INT_REQ;
0367 return 0;
0368
0369 case MSB_RP_SEND_INT_REQ:
0370 msb->state = MSB_RP_RECEIVE_INT_REQ_RESULT;
0371
0372
0373
0374 if (msb_read_int_reg(msb, -1))
0375 return 0;
0376 fallthrough;
0377
0378 case MSB_RP_RECEIVE_INT_REQ_RESULT:
0379 intreg = mrq->data[0];
0380 msb->regs.status.interrupt = intreg;
0381
0382 if (intreg & MEMSTICK_INT_CMDNAK)
0383 return msb_exit_state_machine(msb, -EIO);
0384
0385 if (!(intreg & MEMSTICK_INT_CED)) {
0386 msb->state = MSB_RP_SEND_INT_REQ;
0387 goto again;
0388 }
0389
0390 msb->int_polling = false;
0391 msb->state = (intreg & MEMSTICK_INT_ERR) ?
0392 MSB_RP_SEND_READ_STATUS_REG : MSB_RP_SEND_OOB_READ;
0393 goto again;
0394
0395 case MSB_RP_SEND_READ_STATUS_REG:
0396
0397 if (!msb_read_regs(msb,
0398 offsetof(struct ms_register, status),
0399 sizeof(struct ms_status_register)))
0400 return 0;
0401
0402 msb->state = MSB_RP_RECEIVE_STATUS_REG;
0403 return 0;
0404
0405 case MSB_RP_RECEIVE_STATUS_REG:
0406 msb->regs.status = *(struct ms_status_register *)mrq->data;
0407 msb->state = MSB_RP_SEND_OOB_READ;
0408 fallthrough;
0409
0410 case MSB_RP_SEND_OOB_READ:
0411 if (!msb_read_regs(msb,
0412 offsetof(struct ms_register, extra_data),
0413 sizeof(struct ms_extra_data_register)))
0414 return 0;
0415
0416 msb->state = MSB_RP_RECEIVE_OOB_READ;
0417 return 0;
0418
0419 case MSB_RP_RECEIVE_OOB_READ:
0420 msb->regs.extra_data =
0421 *(struct ms_extra_data_register *) mrq->data;
0422 msb->state = MSB_RP_SEND_READ_DATA;
0423 fallthrough;
0424
0425 case MSB_RP_SEND_READ_DATA:
0426
0427 if (msb->regs.param.cp == MEMSTICK_CP_EXTRA) {
0428 msb->state = MSB_RP_RECEIVE_READ_DATA;
0429 goto again;
0430 }
0431
0432 sg_init_table(sg, ARRAY_SIZE(sg));
0433 msb_sg_copy(msb->current_sg, sg, ARRAY_SIZE(sg),
0434 msb->current_sg_offset,
0435 msb->page_size);
0436
0437 memstick_init_req_sg(mrq, MS_TPC_READ_LONG_DATA, sg);
0438 msb->state = MSB_RP_RECEIVE_READ_DATA;
0439 return 0;
0440
0441 case MSB_RP_RECEIVE_READ_DATA:
0442 if (!(msb->regs.status.interrupt & MEMSTICK_INT_ERR)) {
0443 msb->current_sg_offset += msb->page_size;
0444 return msb_exit_state_machine(msb, 0);
0445 }
0446
0447 if (msb->regs.status.status1 & MEMSTICK_UNCORR_ERROR) {
0448 dbg("read_page: uncorrectable error");
0449 return msb_exit_state_machine(msb, -EBADMSG);
0450 }
0451
0452 if (msb->regs.status.status1 & MEMSTICK_CORR_ERROR) {
0453 dbg("read_page: correctable error");
0454 msb->current_sg_offset += msb->page_size;
0455 return msb_exit_state_machine(msb, -EUCLEAN);
0456 } else {
0457 dbg("read_page: INT error, but no status error bits");
0458 return msb_exit_state_machine(msb, -EIO);
0459 }
0460 }
0461
0462 BUG();
0463 }
0464
0465
0466
0467
0468
0469
0470
0471
0472
0473 static int h_msb_write_block(struct memstick_dev *card,
0474 struct memstick_request **out_mrq)
0475 {
0476 struct msb_data *msb = memstick_get_drvdata(card);
0477 struct memstick_request *mrq = *out_mrq = &card->current_mrq;
0478 struct scatterlist sg[2];
0479 u8 intreg, command;
0480
0481 if (mrq->error)
0482 return msb_exit_state_machine(msb, mrq->error);
0483
0484 again:
0485 switch (msb->state) {
0486
0487
0488
0489
0490
0491
0492
0493 case MSB_WB_SEND_WRITE_PARAMS:
0494 if (!msb_write_regs(msb,
0495 offsetof(struct ms_register, param),
0496 sizeof(struct ms_param_register),
0497 &msb->regs.param))
0498 return 0;
0499
0500 msb->state = MSB_WB_SEND_WRITE_OOB;
0501 return 0;
0502
0503 case MSB_WB_SEND_WRITE_OOB:
0504 if (!msb_write_regs(msb,
0505 offsetof(struct ms_register, extra_data),
0506 sizeof(struct ms_extra_data_register),
0507 &msb->regs.extra_data))
0508 return 0;
0509 msb->state = MSB_WB_SEND_WRITE_COMMAND;
0510 return 0;
0511
0512
0513 case MSB_WB_SEND_WRITE_COMMAND:
0514 command = MS_CMD_BLOCK_WRITE;
0515 memstick_init_req(mrq, MS_TPC_SET_CMD, &command, 1);
0516 msb->state = MSB_WB_SEND_INT_REQ;
0517 return 0;
0518
0519 case MSB_WB_SEND_INT_REQ:
0520 msb->state = MSB_WB_RECEIVE_INT_REQ;
0521 if (msb_read_int_reg(msb, -1))
0522 return 0;
0523 fallthrough;
0524
0525 case MSB_WB_RECEIVE_INT_REQ:
0526 intreg = mrq->data[0];
0527 msb->regs.status.interrupt = intreg;
0528
0529
0530 if (intreg & (MEMSTICK_INT_CMDNAK))
0531 return msb_exit_state_machine(msb, -EIO);
0532
0533 if (intreg & MEMSTICK_INT_ERR)
0534 return msb_exit_state_machine(msb, -EBADMSG);
0535
0536
0537
0538 if (msb->current_page == msb->pages_in_block) {
0539 if (intreg & MEMSTICK_INT_CED)
0540 return msb_exit_state_machine(msb, 0);
0541 msb->state = MSB_WB_SEND_INT_REQ;
0542 goto again;
0543
0544 }
0545
0546
0547 if (!(intreg & MEMSTICK_INT_BREQ)) {
0548 msb->state = MSB_WB_SEND_INT_REQ;
0549 goto again;
0550 }
0551
0552 msb->int_polling = false;
0553 msb->state = MSB_WB_SEND_WRITE_DATA;
0554 fallthrough;
0555
0556 case MSB_WB_SEND_WRITE_DATA:
0557 sg_init_table(sg, ARRAY_SIZE(sg));
0558
0559 if (msb_sg_copy(msb->current_sg, sg, ARRAY_SIZE(sg),
0560 msb->current_sg_offset,
0561 msb->page_size) < msb->page_size)
0562 return msb_exit_state_machine(msb, -EIO);
0563
0564 memstick_init_req_sg(mrq, MS_TPC_WRITE_LONG_DATA, sg);
0565 mrq->need_card_int = 1;
0566 msb->state = MSB_WB_RECEIVE_WRITE_CONFIRMATION;
0567 return 0;
0568
0569 case MSB_WB_RECEIVE_WRITE_CONFIRMATION:
0570 msb->current_page++;
0571 msb->current_sg_offset += msb->page_size;
0572 msb->state = MSB_WB_SEND_INT_REQ;
0573 goto again;
0574 default:
0575 BUG();
0576 }
0577
0578 return 0;
0579 }
0580
0581
0582
0583
0584
0585 static int h_msb_send_command(struct memstick_dev *card,
0586 struct memstick_request **out_mrq)
0587 {
0588 struct msb_data *msb = memstick_get_drvdata(card);
0589 struct memstick_request *mrq = *out_mrq = &card->current_mrq;
0590 u8 intreg;
0591
0592 if (mrq->error) {
0593 dbg("send_command: unknown error");
0594 return msb_exit_state_machine(msb, mrq->error);
0595 }
0596 again:
0597 switch (msb->state) {
0598
0599
0600 case MSB_SC_SEND_WRITE_PARAMS:
0601 if (!msb_write_regs(msb,
0602 offsetof(struct ms_register, param),
0603 sizeof(struct ms_param_register),
0604 &msb->regs.param))
0605 return 0;
0606 msb->state = MSB_SC_SEND_WRITE_OOB;
0607 return 0;
0608
0609 case MSB_SC_SEND_WRITE_OOB:
0610 if (!msb->command_need_oob) {
0611 msb->state = MSB_SC_SEND_COMMAND;
0612 goto again;
0613 }
0614
0615 if (!msb_write_regs(msb,
0616 offsetof(struct ms_register, extra_data),
0617 sizeof(struct ms_extra_data_register),
0618 &msb->regs.extra_data))
0619 return 0;
0620
0621 msb->state = MSB_SC_SEND_COMMAND;
0622 return 0;
0623
0624 case MSB_SC_SEND_COMMAND:
0625 memstick_init_req(mrq, MS_TPC_SET_CMD, &msb->command_value, 1);
0626 msb->state = MSB_SC_SEND_INT_REQ;
0627 return 0;
0628
0629 case MSB_SC_SEND_INT_REQ:
0630 msb->state = MSB_SC_RECEIVE_INT_REQ;
0631 if (msb_read_int_reg(msb, -1))
0632 return 0;
0633 fallthrough;
0634
0635 case MSB_SC_RECEIVE_INT_REQ:
0636 intreg = mrq->data[0];
0637
0638 if (intreg & MEMSTICK_INT_CMDNAK)
0639 return msb_exit_state_machine(msb, -EIO);
0640 if (intreg & MEMSTICK_INT_ERR)
0641 return msb_exit_state_machine(msb, -EBADMSG);
0642
0643 if (!(intreg & MEMSTICK_INT_CED)) {
0644 msb->state = MSB_SC_SEND_INT_REQ;
0645 goto again;
0646 }
0647
0648 return msb_exit_state_machine(msb, 0);
0649 }
0650
0651 BUG();
0652 }
0653
0654
0655 static int h_msb_reset(struct memstick_dev *card,
0656 struct memstick_request **out_mrq)
0657 {
0658 u8 command = MS_CMD_RESET;
0659 struct msb_data *msb = memstick_get_drvdata(card);
0660 struct memstick_request *mrq = *out_mrq = &card->current_mrq;
0661
0662 if (mrq->error)
0663 return msb_exit_state_machine(msb, mrq->error);
0664
0665 switch (msb->state) {
0666 case MSB_RS_SEND:
0667 memstick_init_req(mrq, MS_TPC_SET_CMD, &command, 1);
0668 mrq->need_card_int = 0;
0669 msb->state = MSB_RS_CONFIRM;
0670 return 0;
0671 case MSB_RS_CONFIRM:
0672 return msb_exit_state_machine(msb, 0);
0673 }
0674 BUG();
0675 }
0676
0677
0678 static int h_msb_parallel_switch(struct memstick_dev *card,
0679 struct memstick_request **out_mrq)
0680 {
0681 struct msb_data *msb = memstick_get_drvdata(card);
0682 struct memstick_request *mrq = *out_mrq = &card->current_mrq;
0683 struct memstick_host *host = card->host;
0684
0685 if (mrq->error) {
0686 dbg("parallel_switch: error");
0687 msb->regs.param.system &= ~MEMSTICK_SYS_PAM;
0688 return msb_exit_state_machine(msb, mrq->error);
0689 }
0690
0691 switch (msb->state) {
0692 case MSB_PS_SEND_SWITCH_COMMAND:
0693
0694 msb->regs.param.system |= MEMSTICK_SYS_PAM;
0695
0696 if (!msb_write_regs(msb,
0697 offsetof(struct ms_register, param),
0698 1,
0699 (unsigned char *)&msb->regs.param))
0700 return 0;
0701
0702 msb->state = MSB_PS_SWICH_HOST;
0703 return 0;
0704
0705 case MSB_PS_SWICH_HOST:
0706
0707
0708
0709 host->set_param(host, MEMSTICK_INTERFACE, MEMSTICK_PAR4);
0710 memstick_init_req(mrq, MS_TPC_GET_INT, NULL, 1);
0711 msb->state = MSB_PS_CONFIRM;
0712 return 0;
0713
0714 case MSB_PS_CONFIRM:
0715 return msb_exit_state_machine(msb, 0);
0716 }
0717
0718 BUG();
0719 }
0720
0721 static int msb_switch_to_parallel(struct msb_data *msb);
0722
0723
0724 static int msb_reset(struct msb_data *msb, bool full)
0725 {
0726
0727 bool was_parallel = msb->regs.param.system & MEMSTICK_SYS_PAM;
0728 struct memstick_dev *card = msb->card;
0729 struct memstick_host *host = card->host;
0730 int error;
0731
0732
0733 msb->regs.param.system = MEMSTICK_SYS_BAMD;
0734
0735 if (full) {
0736 error = host->set_param(host,
0737 MEMSTICK_POWER, MEMSTICK_POWER_OFF);
0738 if (error)
0739 goto out_error;
0740
0741 msb_invalidate_reg_window(msb);
0742
0743 error = host->set_param(host,
0744 MEMSTICK_POWER, MEMSTICK_POWER_ON);
0745 if (error)
0746 goto out_error;
0747
0748 error = host->set_param(host,
0749 MEMSTICK_INTERFACE, MEMSTICK_SERIAL);
0750 if (error) {
0751 out_error:
0752 dbg("Failed to reset the host controller");
0753 msb->read_only = true;
0754 return -EFAULT;
0755 }
0756 }
0757
0758 error = msb_run_state_machine(msb, h_msb_reset);
0759 if (error) {
0760 dbg("Failed to reset the card");
0761 msb->read_only = true;
0762 return -ENODEV;
0763 }
0764
0765
0766 if (was_parallel)
0767 msb_switch_to_parallel(msb);
0768 return 0;
0769 }
0770
0771
0772 static int msb_switch_to_parallel(struct msb_data *msb)
0773 {
0774 int error;
0775
0776 error = msb_run_state_machine(msb, h_msb_parallel_switch);
0777 if (error) {
0778 pr_err("Switch to parallel failed");
0779 msb->regs.param.system &= ~MEMSTICK_SYS_PAM;
0780 msb_reset(msb, true);
0781 return -EFAULT;
0782 }
0783
0784 msb->caps |= MEMSTICK_CAP_AUTO_GET_INT;
0785 return 0;
0786 }
0787
0788
0789 static int msb_set_overwrite_flag(struct msb_data *msb,
0790 u16 pba, u8 page, u8 flag)
0791 {
0792 if (msb->read_only)
0793 return -EROFS;
0794
0795 msb->regs.param.block_address = cpu_to_be16(pba);
0796 msb->regs.param.page_address = page;
0797 msb->regs.param.cp = MEMSTICK_CP_OVERWRITE;
0798 msb->regs.extra_data.overwrite_flag = flag;
0799 msb->command_value = MS_CMD_BLOCK_WRITE;
0800 msb->command_need_oob = true;
0801
0802 dbg_verbose("changing overwrite flag to %02x for sector %d, page %d",
0803 flag, pba, page);
0804 return msb_run_state_machine(msb, h_msb_send_command);
0805 }
0806
0807 static int msb_mark_bad(struct msb_data *msb, int pba)
0808 {
0809 pr_notice("marking pba %d as bad", pba);
0810 msb_reset(msb, true);
0811 return msb_set_overwrite_flag(
0812 msb, pba, 0, 0xFF & ~MEMSTICK_OVERWRITE_BKST);
0813 }
0814
0815 static int msb_mark_page_bad(struct msb_data *msb, int pba, int page)
0816 {
0817 dbg("marking page %d of pba %d as bad", page, pba);
0818 msb_reset(msb, true);
0819 return msb_set_overwrite_flag(msb,
0820 pba, page, ~MEMSTICK_OVERWRITE_PGST0);
0821 }
0822
0823
0824 static int msb_erase_block(struct msb_data *msb, u16 pba)
0825 {
0826 int error, try;
0827
0828 if (msb->read_only)
0829 return -EROFS;
0830
0831 dbg_verbose("erasing pba %d", pba);
0832
0833 for (try = 1; try < 3; try++) {
0834 msb->regs.param.block_address = cpu_to_be16(pba);
0835 msb->regs.param.page_address = 0;
0836 msb->regs.param.cp = MEMSTICK_CP_BLOCK;
0837 msb->command_value = MS_CMD_BLOCK_ERASE;
0838 msb->command_need_oob = false;
0839
0840
0841 error = msb_run_state_machine(msb, h_msb_send_command);
0842 if (!error || msb_reset(msb, true))
0843 break;
0844 }
0845
0846 if (error) {
0847 pr_err("erase failed, marking pba %d as bad", pba);
0848 msb_mark_bad(msb, pba);
0849 }
0850
0851 dbg_verbose("erase success, marking pba %d as unused", pba);
0852 msb_mark_block_unused(msb, pba);
0853 __set_bit(pba, msb->erased_blocks_bitmap);
0854 return error;
0855 }
0856
0857
0858 static int msb_read_page(struct msb_data *msb,
0859 u16 pba, u8 page, struct ms_extra_data_register *extra,
0860 struct scatterlist *sg, int offset)
0861 {
0862 int try, error;
0863
0864 if (pba == MS_BLOCK_INVALID) {
0865 unsigned long flags;
0866 struct sg_mapping_iter miter;
0867 size_t len = msb->page_size;
0868
0869 dbg_verbose("read unmapped sector. returning 0xFF");
0870
0871 local_irq_save(flags);
0872 sg_miter_start(&miter, sg, sg_nents(sg),
0873 SG_MITER_ATOMIC | SG_MITER_TO_SG);
0874
0875 while (sg_miter_next(&miter) && len > 0) {
0876
0877 int chunklen;
0878
0879 if (offset && offset >= miter.length) {
0880 offset -= miter.length;
0881 continue;
0882 }
0883
0884 chunklen = min(miter.length - offset, len);
0885 memset(miter.addr + offset, 0xFF, chunklen);
0886 len -= chunklen;
0887 offset = 0;
0888 }
0889
0890 sg_miter_stop(&miter);
0891 local_irq_restore(flags);
0892
0893 if (offset)
0894 return -EFAULT;
0895
0896 if (extra)
0897 memset(extra, 0xFF, sizeof(*extra));
0898 return 0;
0899 }
0900
0901 if (pba >= msb->block_count) {
0902 pr_err("BUG: attempt to read beyond the end of the card at pba %d", pba);
0903 return -EINVAL;
0904 }
0905
0906 for (try = 1; try < 3; try++) {
0907 msb->regs.param.block_address = cpu_to_be16(pba);
0908 msb->regs.param.page_address = page;
0909 msb->regs.param.cp = MEMSTICK_CP_PAGE;
0910
0911 msb->current_sg = sg;
0912 msb->current_sg_offset = offset;
0913 error = msb_run_state_machine(msb, h_msb_read_page);
0914
0915
0916 if (error == -EUCLEAN) {
0917 pr_notice("correctable error on pba %d, page %d",
0918 pba, page);
0919 error = 0;
0920 }
0921
0922 if (!error && extra)
0923 *extra = msb->regs.extra_data;
0924
0925 if (!error || msb_reset(msb, true))
0926 break;
0927
0928 }
0929
0930
0931 if (error == -EBADMSG) {
0932 pr_err("uncorrectable error on read of pba %d, page %d",
0933 pba, page);
0934
0935 if (msb->regs.extra_data.overwrite_flag &
0936 MEMSTICK_OVERWRITE_PGST0)
0937 msb_mark_page_bad(msb, pba, page);
0938 return -EBADMSG;
0939 }
0940
0941 if (error)
0942 pr_err("read of pba %d, page %d failed with error %d",
0943 pba, page, error);
0944 return error;
0945 }
0946
0947
0948 static int msb_read_oob(struct msb_data *msb, u16 pba, u16 page,
0949 struct ms_extra_data_register *extra)
0950 {
0951 int error;
0952
0953 BUG_ON(!extra);
0954 msb->regs.param.block_address = cpu_to_be16(pba);
0955 msb->regs.param.page_address = page;
0956 msb->regs.param.cp = MEMSTICK_CP_EXTRA;
0957
0958 if (pba > msb->block_count) {
0959 pr_err("BUG: attempt to read beyond the end of card at pba %d", pba);
0960 return -EINVAL;
0961 }
0962
0963 error = msb_run_state_machine(msb, h_msb_read_page);
0964 *extra = msb->regs.extra_data;
0965
0966 if (error == -EUCLEAN) {
0967 pr_notice("correctable error on pba %d, page %d",
0968 pba, page);
0969 return 0;
0970 }
0971
0972 return error;
0973 }
0974
0975
0976 static int msb_verify_block(struct msb_data *msb, u16 pba,
0977 struct scatterlist *orig_sg, int offset)
0978 {
0979 struct scatterlist sg;
0980 int page = 0, error;
0981
0982 sg_init_one(&sg, msb->block_buffer, msb->block_size);
0983
0984 while (page < msb->pages_in_block) {
0985
0986 error = msb_read_page(msb, pba, page,
0987 NULL, &sg, page * msb->page_size);
0988 if (error)
0989 return error;
0990 page++;
0991 }
0992
0993 if (msb_sg_compare_to_buffer(orig_sg, offset,
0994 msb->block_buffer, msb->block_size))
0995 return -EIO;
0996 return 0;
0997 }
0998
0999
1000 static int msb_write_block(struct msb_data *msb,
1001 u16 pba, u32 lba, struct scatterlist *sg, int offset)
1002 {
1003 int error, current_try = 1;
1004
1005 BUG_ON(sg->length < msb->page_size);
1006
1007 if (msb->read_only)
1008 return -EROFS;
1009
1010 if (pba == MS_BLOCK_INVALID) {
1011 pr_err(
1012 "BUG: write: attempt to write MS_BLOCK_INVALID block");
1013 return -EINVAL;
1014 }
1015
1016 if (pba >= msb->block_count || lba >= msb->logical_block_count) {
1017 pr_err(
1018 "BUG: write: attempt to write beyond the end of device");
1019 return -EINVAL;
1020 }
1021
1022 if (msb_get_zone_from_lba(lba) != msb_get_zone_from_pba(pba)) {
1023 pr_err("BUG: write: lba zone mismatch");
1024 return -EINVAL;
1025 }
1026
1027 if (pba == msb->boot_block_locations[0] ||
1028 pba == msb->boot_block_locations[1]) {
1029 pr_err("BUG: write: attempt to write to boot blocks!");
1030 return -EINVAL;
1031 }
1032
1033 while (1) {
1034
1035 if (msb->read_only)
1036 return -EROFS;
1037
1038 msb->regs.param.cp = MEMSTICK_CP_BLOCK;
1039 msb->regs.param.page_address = 0;
1040 msb->regs.param.block_address = cpu_to_be16(pba);
1041
1042 msb->regs.extra_data.management_flag = 0xFF;
1043 msb->regs.extra_data.overwrite_flag = 0xF8;
1044 msb->regs.extra_data.logical_address = cpu_to_be16(lba);
1045
1046 msb->current_sg = sg;
1047 msb->current_sg_offset = offset;
1048 msb->current_page = 0;
1049
1050 error = msb_run_state_machine(msb, h_msb_write_block);
1051
1052
1053
1054
1055
1056
1057
1058
1059 if (!error && (verify_writes ||
1060 !test_bit(pba, msb->erased_blocks_bitmap)))
1061 error = msb_verify_block(msb, pba, sg, offset);
1062
1063 if (!error)
1064 break;
1065
1066 if (current_try > 1 || msb_reset(msb, true))
1067 break;
1068
1069 pr_err("write failed, trying to erase the pba %d", pba);
1070 error = msb_erase_block(msb, pba);
1071 if (error)
1072 break;
1073
1074 current_try++;
1075 }
1076 return error;
1077 }
1078
1079
1080 static u16 msb_get_free_block(struct msb_data *msb, int zone)
1081 {
1082 u16 pos;
1083 int pba = zone * MS_BLOCKS_IN_ZONE;
1084 int i;
1085
1086 get_random_bytes(&pos, sizeof(pos));
1087
1088 if (!msb->free_block_count[zone]) {
1089 pr_err("NO free blocks in the zone %d, to use for a write, (media is WORN out) switching to RO mode", zone);
1090 msb->read_only = true;
1091 return MS_BLOCK_INVALID;
1092 }
1093
1094 pos %= msb->free_block_count[zone];
1095
1096 dbg_verbose("have %d choices for a free block, selected randomly: %d",
1097 msb->free_block_count[zone], pos);
1098
1099 pba = find_next_zero_bit(msb->used_blocks_bitmap,
1100 msb->block_count, pba);
1101 for (i = 0; i < pos; ++i)
1102 pba = find_next_zero_bit(msb->used_blocks_bitmap,
1103 msb->block_count, pba + 1);
1104
1105 dbg_verbose("result of the free blocks scan: pba %d", pba);
1106
1107 if (pba == msb->block_count || (msb_get_zone_from_pba(pba)) != zone) {
1108 pr_err("BUG: can't get a free block");
1109 msb->read_only = true;
1110 return MS_BLOCK_INVALID;
1111 }
1112
1113 msb_mark_block_used(msb, pba);
1114 return pba;
1115 }
1116
1117 static int msb_update_block(struct msb_data *msb, u16 lba,
1118 struct scatterlist *sg, int offset)
1119 {
1120 u16 pba, new_pba;
1121 int error, try;
1122
1123 pba = msb->lba_to_pba_table[lba];
1124 dbg_verbose("start of a block update at lba %d, pba %d", lba, pba);
1125
1126 if (pba != MS_BLOCK_INVALID) {
1127 dbg_verbose("setting the update flag on the block");
1128 msb_set_overwrite_flag(msb, pba, 0,
1129 0xFF & ~MEMSTICK_OVERWRITE_UDST);
1130 }
1131
1132 for (try = 0; try < 3; try++) {
1133 new_pba = msb_get_free_block(msb,
1134 msb_get_zone_from_lba(lba));
1135
1136 if (new_pba == MS_BLOCK_INVALID) {
1137 error = -EIO;
1138 goto out;
1139 }
1140
1141 dbg_verbose("block update: writing updated block to the pba %d",
1142 new_pba);
1143 error = msb_write_block(msb, new_pba, lba, sg, offset);
1144 if (error == -EBADMSG) {
1145 msb_mark_bad(msb, new_pba);
1146 continue;
1147 }
1148
1149 if (error)
1150 goto out;
1151
1152 dbg_verbose("block update: erasing the old block");
1153 msb_erase_block(msb, pba);
1154 msb->lba_to_pba_table[lba] = new_pba;
1155 return 0;
1156 }
1157 out:
1158 if (error) {
1159 pr_err("block update error after %d tries, switching to r/o mode", try);
1160 msb->read_only = true;
1161 }
1162 return error;
1163 }
1164
1165
1166 static void msb_fix_boot_page_endianness(struct ms_boot_page *p)
1167 {
1168 p->header.block_id = be16_to_cpu(p->header.block_id);
1169 p->header.format_reserved = be16_to_cpu(p->header.format_reserved);
1170 p->entry.disabled_block.start_addr
1171 = be32_to_cpu(p->entry.disabled_block.start_addr);
1172 p->entry.disabled_block.data_size
1173 = be32_to_cpu(p->entry.disabled_block.data_size);
1174 p->entry.cis_idi.start_addr
1175 = be32_to_cpu(p->entry.cis_idi.start_addr);
1176 p->entry.cis_idi.data_size
1177 = be32_to_cpu(p->entry.cis_idi.data_size);
1178 p->attr.block_size = be16_to_cpu(p->attr.block_size);
1179 p->attr.number_of_blocks = be16_to_cpu(p->attr.number_of_blocks);
1180 p->attr.number_of_effective_blocks
1181 = be16_to_cpu(p->attr.number_of_effective_blocks);
1182 p->attr.page_size = be16_to_cpu(p->attr.page_size);
1183 p->attr.memory_manufacturer_code
1184 = be16_to_cpu(p->attr.memory_manufacturer_code);
1185 p->attr.memory_device_code = be16_to_cpu(p->attr.memory_device_code);
1186 p->attr.implemented_capacity
1187 = be16_to_cpu(p->attr.implemented_capacity);
1188 p->attr.controller_number = be16_to_cpu(p->attr.controller_number);
1189 p->attr.controller_function = be16_to_cpu(p->attr.controller_function);
1190 }
1191
1192 static int msb_read_boot_blocks(struct msb_data *msb)
1193 {
1194 int pba = 0;
1195 struct scatterlist sg;
1196 struct ms_extra_data_register extra;
1197 struct ms_boot_page *page;
1198
1199 msb->boot_block_locations[0] = MS_BLOCK_INVALID;
1200 msb->boot_block_locations[1] = MS_BLOCK_INVALID;
1201 msb->boot_block_count = 0;
1202
1203 dbg_verbose("Start of a scan for the boot blocks");
1204
1205 if (!msb->boot_page) {
1206 page = kmalloc_array(2, sizeof(struct ms_boot_page),
1207 GFP_KERNEL);
1208 if (!page)
1209 return -ENOMEM;
1210
1211 msb->boot_page = page;
1212 } else
1213 page = msb->boot_page;
1214
1215 msb->block_count = MS_BLOCK_MAX_BOOT_ADDR;
1216
1217 for (pba = 0; pba < MS_BLOCK_MAX_BOOT_ADDR; pba++) {
1218
1219 sg_init_one(&sg, page, sizeof(*page));
1220 if (msb_read_page(msb, pba, 0, &extra, &sg, 0)) {
1221 dbg("boot scan: can't read pba %d", pba);
1222 continue;
1223 }
1224
1225 if (extra.management_flag & MEMSTICK_MANAGEMENT_SYSFLG) {
1226 dbg("management flag doesn't indicate boot block %d",
1227 pba);
1228 continue;
1229 }
1230
1231 if (be16_to_cpu(page->header.block_id) != MS_BLOCK_BOOT_ID) {
1232 dbg("the pba at %d doesn't contain boot block ID", pba);
1233 continue;
1234 }
1235
1236 msb_fix_boot_page_endianness(page);
1237 msb->boot_block_locations[msb->boot_block_count] = pba;
1238
1239 page++;
1240 msb->boot_block_count++;
1241
1242 if (msb->boot_block_count == 2)
1243 break;
1244 }
1245
1246 if (!msb->boot_block_count) {
1247 pr_err("media doesn't contain master page, aborting");
1248 return -EIO;
1249 }
1250
1251 dbg_verbose("End of scan for boot blocks");
1252 return 0;
1253 }
1254
1255 static int msb_read_bad_block_table(struct msb_data *msb, int block_nr)
1256 {
1257 struct ms_boot_page *boot_block;
1258 struct scatterlist sg;
1259 u16 *buffer = NULL;
1260 int offset = 0;
1261 int i, error = 0;
1262 int data_size, data_offset, page, page_offset, size_to_read;
1263 u16 pba;
1264
1265 BUG_ON(block_nr > 1);
1266 boot_block = &msb->boot_page[block_nr];
1267 pba = msb->boot_block_locations[block_nr];
1268
1269 if (msb->boot_block_locations[block_nr] == MS_BLOCK_INVALID)
1270 return -EINVAL;
1271
1272 data_size = boot_block->entry.disabled_block.data_size;
1273 data_offset = sizeof(struct ms_boot_page) +
1274 boot_block->entry.disabled_block.start_addr;
1275 if (!data_size)
1276 return 0;
1277
1278 page = data_offset / msb->page_size;
1279 page_offset = data_offset % msb->page_size;
1280 size_to_read =
1281 DIV_ROUND_UP(data_size + page_offset, msb->page_size) *
1282 msb->page_size;
1283
1284 dbg("reading bad block of boot block at pba %d, offset %d len %d",
1285 pba, data_offset, data_size);
1286
1287 buffer = kzalloc(size_to_read, GFP_KERNEL);
1288 if (!buffer)
1289 return -ENOMEM;
1290
1291
1292 sg_init_one(&sg, buffer, size_to_read);
1293
1294 while (offset < size_to_read) {
1295 error = msb_read_page(msb, pba, page, NULL, &sg, offset);
1296 if (error)
1297 goto out;
1298
1299 page++;
1300 offset += msb->page_size;
1301
1302 if (page == msb->pages_in_block) {
1303 pr_err(
1304 "bad block table extends beyond the boot block");
1305 break;
1306 }
1307 }
1308
1309
1310 for (i = page_offset; i < data_size / sizeof(u16); i++) {
1311
1312 u16 bad_block = be16_to_cpu(buffer[i]);
1313
1314 if (bad_block >= msb->block_count) {
1315 dbg("bad block table contains invalid block %d",
1316 bad_block);
1317 continue;
1318 }
1319
1320 if (test_bit(bad_block, msb->used_blocks_bitmap)) {
1321 dbg("duplicate bad block %d in the table",
1322 bad_block);
1323 continue;
1324 }
1325
1326 dbg("block %d is marked as factory bad", bad_block);
1327 msb_mark_block_used(msb, bad_block);
1328 }
1329 out:
1330 kfree(buffer);
1331 return error;
1332 }
1333
1334 static int msb_ftl_initialize(struct msb_data *msb)
1335 {
1336 int i;
1337
1338 if (msb->ftl_initialized)
1339 return 0;
1340
1341 msb->zone_count = msb->block_count / MS_BLOCKS_IN_ZONE;
1342 msb->logical_block_count = msb->zone_count * 496 - 2;
1343
1344 msb->used_blocks_bitmap = bitmap_zalloc(msb->block_count, GFP_KERNEL);
1345 msb->erased_blocks_bitmap = bitmap_zalloc(msb->block_count, GFP_KERNEL);
1346 msb->lba_to_pba_table =
1347 kmalloc_array(msb->logical_block_count, sizeof(u16),
1348 GFP_KERNEL);
1349
1350 if (!msb->used_blocks_bitmap || !msb->lba_to_pba_table ||
1351 !msb->erased_blocks_bitmap) {
1352 bitmap_free(msb->used_blocks_bitmap);
1353 bitmap_free(msb->erased_blocks_bitmap);
1354 kfree(msb->lba_to_pba_table);
1355 return -ENOMEM;
1356 }
1357
1358 for (i = 0; i < msb->zone_count; i++)
1359 msb->free_block_count[i] = MS_BLOCKS_IN_ZONE;
1360
1361 memset(msb->lba_to_pba_table, MS_BLOCK_INVALID,
1362 msb->logical_block_count * sizeof(u16));
1363
1364 dbg("initial FTL tables created. Zone count = %d, Logical block count = %d",
1365 msb->zone_count, msb->logical_block_count);
1366
1367 msb->ftl_initialized = true;
1368 return 0;
1369 }
1370
1371 static int msb_ftl_scan(struct msb_data *msb)
1372 {
1373 u16 pba, lba, other_block;
1374 u8 overwrite_flag, management_flag, other_overwrite_flag;
1375 int error;
1376 struct ms_extra_data_register extra;
1377 u8 *overwrite_flags = kzalloc(msb->block_count, GFP_KERNEL);
1378
1379 if (!overwrite_flags)
1380 return -ENOMEM;
1381
1382 dbg("Start of media scanning");
1383 for (pba = 0; pba < msb->block_count; pba++) {
1384
1385 if (pba == msb->boot_block_locations[0] ||
1386 pba == msb->boot_block_locations[1]) {
1387 dbg_verbose("pba %05d -> [boot block]", pba);
1388 msb_mark_block_used(msb, pba);
1389 continue;
1390 }
1391
1392 if (test_bit(pba, msb->used_blocks_bitmap)) {
1393 dbg_verbose("pba %05d -> [factory bad]", pba);
1394 continue;
1395 }
1396
1397 memset(&extra, 0, sizeof(extra));
1398 error = msb_read_oob(msb, pba, 0, &extra);
1399
1400
1401 if (error == -EBADMSG) {
1402 pr_notice(
1403 "oob of pba %d damaged, will try to erase it", pba);
1404 msb_mark_block_used(msb, pba);
1405 msb_erase_block(msb, pba);
1406 continue;
1407 } else if (error) {
1408 pr_err("unknown error %d on read of oob of pba %d - aborting",
1409 error, pba);
1410
1411 kfree(overwrite_flags);
1412 return error;
1413 }
1414
1415 lba = be16_to_cpu(extra.logical_address);
1416 management_flag = extra.management_flag;
1417 overwrite_flag = extra.overwrite_flag;
1418 overwrite_flags[pba] = overwrite_flag;
1419
1420
1421 if (!(overwrite_flag & MEMSTICK_OVERWRITE_BKST)) {
1422 dbg("pba %05d -> [BAD]", pba);
1423 msb_mark_block_used(msb, pba);
1424 continue;
1425 }
1426
1427
1428 if ((management_flag & MEMSTICK_MANAGEMENT_FLAG_NORMAL) !=
1429 MEMSTICK_MANAGEMENT_FLAG_NORMAL) {
1430 dbg("pba %05d -> [reserved management flag %02x]",
1431 pba, management_flag);
1432 msb_mark_block_used(msb, pba);
1433 continue;
1434 }
1435
1436
1437 if (!(management_flag & MEMSTICK_MANAGEMENT_ATFLG)) {
1438 dbg("pba %05d -> [temp table] - will erase", pba);
1439
1440 msb_mark_block_used(msb, pba);
1441 msb_erase_block(msb, pba);
1442 continue;
1443 }
1444
1445 if (lba == MS_BLOCK_INVALID) {
1446 dbg_verbose("pba %05d -> [free]", pba);
1447 continue;
1448 }
1449
1450 msb_mark_block_used(msb, pba);
1451
1452
1453 if (msb_get_zone_from_lba(lba) != msb_get_zone_from_pba(pba)) {
1454 pr_notice("pba %05d -> [bad lba %05d] - will erase",
1455 pba, lba);
1456 msb_erase_block(msb, pba);
1457 continue;
1458 }
1459
1460
1461 if (msb->lba_to_pba_table[lba] == MS_BLOCK_INVALID) {
1462 dbg_verbose("pba %05d -> [lba %05d]", pba, lba);
1463 msb->lba_to_pba_table[lba] = pba;
1464 continue;
1465 }
1466
1467 other_block = msb->lba_to_pba_table[lba];
1468 other_overwrite_flag = overwrite_flags[other_block];
1469
1470 pr_notice("Collision between pba %d and pba %d",
1471 pba, other_block);
1472
1473 if (!(overwrite_flag & MEMSTICK_OVERWRITE_UDST)) {
1474 pr_notice("pba %d is marked as stable, use it", pba);
1475 msb_erase_block(msb, other_block);
1476 msb->lba_to_pba_table[lba] = pba;
1477 continue;
1478 }
1479
1480 if (!(other_overwrite_flag & MEMSTICK_OVERWRITE_UDST)) {
1481 pr_notice("pba %d is marked as stable, use it",
1482 other_block);
1483 msb_erase_block(msb, pba);
1484 continue;
1485 }
1486
1487 pr_notice("collision between blocks %d and %d, without stable flag set on both, erasing pba %d",
1488 pba, other_block, other_block);
1489
1490 msb_erase_block(msb, other_block);
1491 msb->lba_to_pba_table[lba] = pba;
1492 }
1493
1494 dbg("End of media scanning");
1495 kfree(overwrite_flags);
1496 return 0;
1497 }
1498
1499 static void msb_cache_flush_timer(struct timer_list *t)
1500 {
1501 struct msb_data *msb = from_timer(msb, t, cache_flush_timer);
1502
1503 msb->need_flush_cache = true;
1504 queue_work(msb->io_queue, &msb->io_work);
1505 }
1506
1507
1508 static void msb_cache_discard(struct msb_data *msb)
1509 {
1510 if (msb->cache_block_lba == MS_BLOCK_INVALID)
1511 return;
1512
1513 del_timer_sync(&msb->cache_flush_timer);
1514
1515 dbg_verbose("Discarding the write cache");
1516 msb->cache_block_lba = MS_BLOCK_INVALID;
1517 bitmap_zero(&msb->valid_cache_bitmap, msb->pages_in_block);
1518 }
1519
1520 static int msb_cache_init(struct msb_data *msb)
1521 {
1522 timer_setup(&msb->cache_flush_timer, msb_cache_flush_timer, 0);
1523
1524 if (!msb->cache)
1525 msb->cache = kzalloc(msb->block_size, GFP_KERNEL);
1526 if (!msb->cache)
1527 return -ENOMEM;
1528
1529 msb_cache_discard(msb);
1530 return 0;
1531 }
1532
1533 static int msb_cache_flush(struct msb_data *msb)
1534 {
1535 struct scatterlist sg;
1536 struct ms_extra_data_register extra;
1537 int page, offset, error;
1538 u16 pba, lba;
1539
1540 if (msb->read_only)
1541 return -EROFS;
1542
1543 if (msb->cache_block_lba == MS_BLOCK_INVALID)
1544 return 0;
1545
1546 lba = msb->cache_block_lba;
1547 pba = msb->lba_to_pba_table[lba];
1548
1549 dbg_verbose("Flushing the write cache of pba %d (LBA %d)",
1550 pba, msb->cache_block_lba);
1551
1552 sg_init_one(&sg, msb->cache , msb->block_size);
1553
1554
1555 for (page = 0; page < msb->pages_in_block; page++) {
1556
1557 if (test_bit(page, &msb->valid_cache_bitmap))
1558 continue;
1559
1560 offset = page * msb->page_size;
1561
1562 dbg_verbose("reading non-present sector %d of cache block %d",
1563 page, lba);
1564 error = msb_read_page(msb, pba, page, &extra, &sg, offset);
1565
1566
1567 if (error == -EBADMSG) {
1568 pr_err("read error on sector %d, contents probably damaged", page);
1569 continue;
1570 }
1571
1572 if (error)
1573 return error;
1574
1575 if ((extra.overwrite_flag & MEMSTICK_OV_PG_NORMAL) !=
1576 MEMSTICK_OV_PG_NORMAL) {
1577 dbg("page %d is marked as bad", page);
1578 continue;
1579 }
1580
1581 set_bit(page, &msb->valid_cache_bitmap);
1582 }
1583
1584
1585 error = msb_update_block(msb, msb->cache_block_lba, &sg, 0);
1586 pba = msb->lba_to_pba_table[msb->cache_block_lba];
1587
1588
1589 if (!error) {
1590 for (page = 0; page < msb->pages_in_block; page++) {
1591
1592 if (test_bit(page, &msb->valid_cache_bitmap))
1593 continue;
1594
1595 dbg("marking page %d as containing damaged data",
1596 page);
1597 msb_set_overwrite_flag(msb,
1598 pba , page, 0xFF & ~MEMSTICK_OV_PG_NORMAL);
1599 }
1600 }
1601
1602 msb_cache_discard(msb);
1603 return error;
1604 }
1605
1606 static int msb_cache_write(struct msb_data *msb, int lba,
1607 int page, bool add_to_cache_only, struct scatterlist *sg, int offset)
1608 {
1609 int error;
1610 struct scatterlist sg_tmp[10];
1611
1612 if (msb->read_only)
1613 return -EROFS;
1614
1615 if (msb->cache_block_lba == MS_BLOCK_INVALID ||
1616 lba != msb->cache_block_lba)
1617 if (add_to_cache_only)
1618 return 0;
1619
1620
1621 if (msb->cache_block_lba != MS_BLOCK_INVALID &&
1622 lba != msb->cache_block_lba) {
1623 dbg_verbose("first flush the cache");
1624 error = msb_cache_flush(msb);
1625 if (error)
1626 return error;
1627 }
1628
1629 if (msb->cache_block_lba == MS_BLOCK_INVALID) {
1630 msb->cache_block_lba = lba;
1631 mod_timer(&msb->cache_flush_timer,
1632 jiffies + msecs_to_jiffies(cache_flush_timeout));
1633 }
1634
1635 dbg_verbose("Write of LBA %d page %d to cache ", lba, page);
1636
1637 sg_init_table(sg_tmp, ARRAY_SIZE(sg_tmp));
1638 msb_sg_copy(sg, sg_tmp, ARRAY_SIZE(sg_tmp), offset, msb->page_size);
1639
1640 sg_copy_to_buffer(sg_tmp, sg_nents(sg_tmp),
1641 msb->cache + page * msb->page_size, msb->page_size);
1642
1643 set_bit(page, &msb->valid_cache_bitmap);
1644 return 0;
1645 }
1646
1647 static int msb_cache_read(struct msb_data *msb, int lba,
1648 int page, struct scatterlist *sg, int offset)
1649 {
1650 int pba = msb->lba_to_pba_table[lba];
1651 struct scatterlist sg_tmp[10];
1652 int error = 0;
1653
1654 if (lba == msb->cache_block_lba &&
1655 test_bit(page, &msb->valid_cache_bitmap)) {
1656
1657 dbg_verbose("Read of LBA %d (pba %d) sector %d from cache",
1658 lba, pba, page);
1659
1660 sg_init_table(sg_tmp, ARRAY_SIZE(sg_tmp));
1661 msb_sg_copy(sg, sg_tmp, ARRAY_SIZE(sg_tmp),
1662 offset, msb->page_size);
1663 sg_copy_from_buffer(sg_tmp, sg_nents(sg_tmp),
1664 msb->cache + msb->page_size * page,
1665 msb->page_size);
1666 } else {
1667 dbg_verbose("Read of LBA %d (pba %d) sector %d from device",
1668 lba, pba, page);
1669
1670 error = msb_read_page(msb, pba, page, NULL, sg, offset);
1671 if (error)
1672 return error;
1673
1674 msb_cache_write(msb, lba, page, true, sg, offset);
1675 }
1676 return error;
1677 }
1678
1679
1680
1681
1682
1683
1684
1685
1686 static const struct chs_entry chs_table[] = {
1687
1688 { 4, 16, 247, 2 },
1689 { 8, 16, 495, 2 },
1690 { 16, 16, 495, 4 },
1691 { 32, 16, 991, 4 },
1692 { 64, 16, 991, 8 },
1693 {128, 16, 991, 16 },
1694 { 0 }
1695 };
1696
1697
1698 static int msb_init_card(struct memstick_dev *card)
1699 {
1700 struct msb_data *msb = memstick_get_drvdata(card);
1701 struct memstick_host *host = card->host;
1702 struct ms_boot_page *boot_block;
1703 int error = 0, i, raw_size_in_megs;
1704
1705 msb->caps = 0;
1706
1707 if (card->id.class >= MEMSTICK_CLASS_ROM &&
1708 card->id.class <= MEMSTICK_CLASS_ROM)
1709 msb->read_only = true;
1710
1711 msb->state = -1;
1712 error = msb_reset(msb, false);
1713 if (error)
1714 return error;
1715
1716
1717
1718
1719
1720 if (host->caps & MEMSTICK_CAP_PAR4)
1721 msb_switch_to_parallel(msb);
1722
1723 msb->page_size = sizeof(struct ms_boot_page);
1724
1725
1726 error = msb_read_boot_blocks(msb);
1727 if (error)
1728 return -EIO;
1729
1730 boot_block = &msb->boot_page[0];
1731
1732
1733 msb->block_count = boot_block->attr.number_of_blocks;
1734 msb->page_size = boot_block->attr.page_size;
1735
1736 msb->pages_in_block = boot_block->attr.block_size * 2;
1737 msb->block_size = msb->page_size * msb->pages_in_block;
1738
1739 if ((size_t)msb->page_size > PAGE_SIZE) {
1740
1741 dbg("device page %d size isn't supported", msb->page_size);
1742 return -EINVAL;
1743 }
1744
1745 msb->block_buffer = kzalloc(msb->block_size, GFP_KERNEL);
1746 if (!msb->block_buffer)
1747 return -ENOMEM;
1748
1749 raw_size_in_megs = (msb->block_size * msb->block_count) >> 20;
1750
1751 for (i = 0; chs_table[i].size; i++) {
1752
1753 if (chs_table[i].size != raw_size_in_megs)
1754 continue;
1755
1756 msb->geometry.cylinders = chs_table[i].cyl;
1757 msb->geometry.heads = chs_table[i].head;
1758 msb->geometry.sectors = chs_table[i].sec;
1759 break;
1760 }
1761
1762 if (boot_block->attr.transfer_supporting == 1)
1763 msb->caps |= MEMSTICK_CAP_PAR4;
1764
1765 if (boot_block->attr.device_type & 0x03)
1766 msb->read_only = true;
1767
1768 dbg("Total block count = %d", msb->block_count);
1769 dbg("Each block consists of %d pages", msb->pages_in_block);
1770 dbg("Page size = %d bytes", msb->page_size);
1771 dbg("Parallel mode supported: %d", !!(msb->caps & MEMSTICK_CAP_PAR4));
1772 dbg("Read only: %d", msb->read_only);
1773
1774 #if 0
1775
1776 if (host->caps & msb->caps & MEMSTICK_CAP_PAR4)
1777 msb_switch_to_parallel(msb);
1778 #endif
1779
1780 error = msb_cache_init(msb);
1781 if (error)
1782 return error;
1783
1784 error = msb_ftl_initialize(msb);
1785 if (error)
1786 return error;
1787
1788
1789
1790 error = msb_read_bad_block_table(msb, 0);
1791
1792 if (error && error != -ENOMEM) {
1793 dbg("failed to read bad block table from primary boot block, trying from backup");
1794 error = msb_read_bad_block_table(msb, 1);
1795 }
1796
1797 if (error)
1798 return error;
1799
1800
1801 error = msb_ftl_scan(msb);
1802 if (error) {
1803 pr_err("Scan of media failed");
1804 return error;
1805 }
1806
1807 return 0;
1808
1809 }
1810
1811 static int msb_do_write_request(struct msb_data *msb, int lba,
1812 int page, struct scatterlist *sg, size_t len, int *sucessfuly_written)
1813 {
1814 int error = 0;
1815 off_t offset = 0;
1816 *sucessfuly_written = 0;
1817
1818 while (offset < len) {
1819 if (page == 0 && len - offset >= msb->block_size) {
1820
1821 if (msb->cache_block_lba == lba)
1822 msb_cache_discard(msb);
1823
1824 dbg_verbose("Writing whole lba %d", lba);
1825 error = msb_update_block(msb, lba, sg, offset);
1826 if (error)
1827 return error;
1828
1829 offset += msb->block_size;
1830 *sucessfuly_written += msb->block_size;
1831 lba++;
1832 continue;
1833 }
1834
1835 error = msb_cache_write(msb, lba, page, false, sg, offset);
1836 if (error)
1837 return error;
1838
1839 offset += msb->page_size;
1840 *sucessfuly_written += msb->page_size;
1841
1842 page++;
1843 if (page == msb->pages_in_block) {
1844 page = 0;
1845 lba++;
1846 }
1847 }
1848 return 0;
1849 }
1850
1851 static int msb_do_read_request(struct msb_data *msb, int lba,
1852 int page, struct scatterlist *sg, int len, int *sucessfuly_read)
1853 {
1854 int error = 0;
1855 int offset = 0;
1856 *sucessfuly_read = 0;
1857
1858 while (offset < len) {
1859
1860 error = msb_cache_read(msb, lba, page, sg, offset);
1861 if (error)
1862 return error;
1863
1864 offset += msb->page_size;
1865 *sucessfuly_read += msb->page_size;
1866
1867 page++;
1868 if (page == msb->pages_in_block) {
1869 page = 0;
1870 lba++;
1871 }
1872 }
1873 return 0;
1874 }
1875
1876 static void msb_io_work(struct work_struct *work)
1877 {
1878 struct msb_data *msb = container_of(work, struct msb_data, io_work);
1879 int page, error, len;
1880 sector_t lba;
1881 struct scatterlist *sg = msb->prealloc_sg;
1882 struct request *req;
1883
1884 dbg_verbose("IO: work started");
1885
1886 while (1) {
1887 spin_lock_irq(&msb->q_lock);
1888
1889 if (msb->need_flush_cache) {
1890 msb->need_flush_cache = false;
1891 spin_unlock_irq(&msb->q_lock);
1892 msb_cache_flush(msb);
1893 continue;
1894 }
1895
1896 req = msb->req;
1897 if (!req) {
1898 dbg_verbose("IO: no more requests exiting");
1899 spin_unlock_irq(&msb->q_lock);
1900 return;
1901 }
1902
1903 spin_unlock_irq(&msb->q_lock);
1904
1905
1906 dbg_verbose("IO: processing new request");
1907 blk_rq_map_sg(msb->queue, req, sg);
1908
1909 lba = blk_rq_pos(req);
1910
1911 sector_div(lba, msb->page_size / 512);
1912 page = sector_div(lba, msb->pages_in_block);
1913
1914 if (rq_data_dir(msb->req) == READ)
1915 error = msb_do_read_request(msb, lba, page, sg,
1916 blk_rq_bytes(req), &len);
1917 else
1918 error = msb_do_write_request(msb, lba, page, sg,
1919 blk_rq_bytes(req), &len);
1920
1921 if (len && !blk_update_request(req, BLK_STS_OK, len)) {
1922 __blk_mq_end_request(req, BLK_STS_OK);
1923 spin_lock_irq(&msb->q_lock);
1924 msb->req = NULL;
1925 spin_unlock_irq(&msb->q_lock);
1926 }
1927
1928 if (error && msb->req) {
1929 blk_status_t ret = errno_to_blk_status(error);
1930
1931 dbg_verbose("IO: ending one sector of the request with error");
1932 blk_mq_end_request(req, ret);
1933 spin_lock_irq(&msb->q_lock);
1934 msb->req = NULL;
1935 spin_unlock_irq(&msb->q_lock);
1936 }
1937
1938 if (msb->req)
1939 dbg_verbose("IO: request still pending");
1940 }
1941 }
1942
1943 static DEFINE_IDR(msb_disk_idr);
1944 static DEFINE_MUTEX(msb_disk_lock);
1945
1946 static void msb_data_clear(struct msb_data *msb)
1947 {
1948 kfree(msb->boot_page);
1949 bitmap_free(msb->used_blocks_bitmap);
1950 bitmap_free(msb->erased_blocks_bitmap);
1951 kfree(msb->lba_to_pba_table);
1952 kfree(msb->cache);
1953 msb->card = NULL;
1954 }
1955
1956 static int msb_bd_getgeo(struct block_device *bdev,
1957 struct hd_geometry *geo)
1958 {
1959 struct msb_data *msb = bdev->bd_disk->private_data;
1960 *geo = msb->geometry;
1961 return 0;
1962 }
1963
1964 static void msb_bd_free_disk(struct gendisk *disk)
1965 {
1966 struct msb_data *msb = disk->private_data;
1967
1968 mutex_lock(&msb_disk_lock);
1969 idr_remove(&msb_disk_idr, msb->disk_id);
1970 mutex_unlock(&msb_disk_lock);
1971
1972 kfree(msb);
1973 }
1974
1975 static blk_status_t msb_queue_rq(struct blk_mq_hw_ctx *hctx,
1976 const struct blk_mq_queue_data *bd)
1977 {
1978 struct memstick_dev *card = hctx->queue->queuedata;
1979 struct msb_data *msb = memstick_get_drvdata(card);
1980 struct request *req = bd->rq;
1981
1982 dbg_verbose("Submit request");
1983
1984 spin_lock_irq(&msb->q_lock);
1985
1986 if (msb->card_dead) {
1987 dbg("Refusing requests on removed card");
1988
1989 WARN_ON(!msb->io_queue_stopped);
1990
1991 spin_unlock_irq(&msb->q_lock);
1992 blk_mq_start_request(req);
1993 return BLK_STS_IOERR;
1994 }
1995
1996 if (msb->req) {
1997 spin_unlock_irq(&msb->q_lock);
1998 return BLK_STS_DEV_RESOURCE;
1999 }
2000
2001 blk_mq_start_request(req);
2002 msb->req = req;
2003
2004 if (!msb->io_queue_stopped)
2005 queue_work(msb->io_queue, &msb->io_work);
2006
2007 spin_unlock_irq(&msb->q_lock);
2008 return BLK_STS_OK;
2009 }
2010
2011 static int msb_check_card(struct memstick_dev *card)
2012 {
2013 struct msb_data *msb = memstick_get_drvdata(card);
2014
2015 return (msb->card_dead == 0);
2016 }
2017
2018 static void msb_stop(struct memstick_dev *card)
2019 {
2020 struct msb_data *msb = memstick_get_drvdata(card);
2021 unsigned long flags;
2022
2023 dbg("Stopping all msblock IO");
2024
2025 blk_mq_stop_hw_queues(msb->queue);
2026 spin_lock_irqsave(&msb->q_lock, flags);
2027 msb->io_queue_stopped = true;
2028 spin_unlock_irqrestore(&msb->q_lock, flags);
2029
2030 del_timer_sync(&msb->cache_flush_timer);
2031 flush_workqueue(msb->io_queue);
2032
2033 spin_lock_irqsave(&msb->q_lock, flags);
2034 if (msb->req) {
2035 blk_mq_requeue_request(msb->req, false);
2036 msb->req = NULL;
2037 }
2038 spin_unlock_irqrestore(&msb->q_lock, flags);
2039 }
2040
2041 static void msb_start(struct memstick_dev *card)
2042 {
2043 struct msb_data *msb = memstick_get_drvdata(card);
2044 unsigned long flags;
2045
2046 dbg("Resuming IO from msblock");
2047
2048 msb_invalidate_reg_window(msb);
2049
2050 spin_lock_irqsave(&msb->q_lock, flags);
2051 if (!msb->io_queue_stopped || msb->card_dead) {
2052 spin_unlock_irqrestore(&msb->q_lock, flags);
2053 return;
2054 }
2055 spin_unlock_irqrestore(&msb->q_lock, flags);
2056
2057
2058 msb->need_flush_cache = true;
2059 msb->io_queue_stopped = false;
2060
2061 blk_mq_start_hw_queues(msb->queue);
2062
2063 queue_work(msb->io_queue, &msb->io_work);
2064
2065 }
2066
2067 static const struct block_device_operations msb_bdops = {
2068 .owner = THIS_MODULE,
2069 .getgeo = msb_bd_getgeo,
2070 .free_disk = msb_bd_free_disk,
2071 };
2072
2073 static const struct blk_mq_ops msb_mq_ops = {
2074 .queue_rq = msb_queue_rq,
2075 };
2076
2077
2078 static int msb_init_disk(struct memstick_dev *card)
2079 {
2080 struct msb_data *msb = memstick_get_drvdata(card);
2081 int rc;
2082 unsigned long capacity;
2083
2084 mutex_lock(&msb_disk_lock);
2085 msb->disk_id = idr_alloc(&msb_disk_idr, card, 0, 256, GFP_KERNEL);
2086 mutex_unlock(&msb_disk_lock);
2087
2088 if (msb->disk_id < 0)
2089 return msb->disk_id;
2090
2091 rc = blk_mq_alloc_sq_tag_set(&msb->tag_set, &msb_mq_ops, 2,
2092 BLK_MQ_F_SHOULD_MERGE);
2093 if (rc)
2094 goto out_release_id;
2095
2096 msb->disk = blk_mq_alloc_disk(&msb->tag_set, card);
2097 if (IS_ERR(msb->disk)) {
2098 rc = PTR_ERR(msb->disk);
2099 goto out_free_tag_set;
2100 }
2101 msb->queue = msb->disk->queue;
2102
2103 blk_queue_max_hw_sectors(msb->queue, MS_BLOCK_MAX_PAGES);
2104 blk_queue_max_segments(msb->queue, MS_BLOCK_MAX_SEGS);
2105 blk_queue_max_segment_size(msb->queue,
2106 MS_BLOCK_MAX_PAGES * msb->page_size);
2107 blk_queue_logical_block_size(msb->queue, msb->page_size);
2108
2109 sprintf(msb->disk->disk_name, "msblk%d", msb->disk_id);
2110 msb->disk->fops = &msb_bdops;
2111 msb->disk->private_data = msb;
2112
2113 capacity = msb->pages_in_block * msb->logical_block_count;
2114 capacity *= (msb->page_size / 512);
2115 set_capacity(msb->disk, capacity);
2116 dbg("Set total disk size to %lu sectors", capacity);
2117
2118 msb->io_queue = alloc_ordered_workqueue("ms_block", WQ_MEM_RECLAIM);
2119 INIT_WORK(&msb->io_work, msb_io_work);
2120 sg_init_table(msb->prealloc_sg, MS_BLOCK_MAX_SEGS+1);
2121
2122 if (msb->read_only)
2123 set_disk_ro(msb->disk, 1);
2124
2125 msb_start(card);
2126 rc = device_add_disk(&card->dev, msb->disk, NULL);
2127 if (rc)
2128 goto out_cleanup_disk;
2129 dbg("Disk added");
2130 return 0;
2131
2132 out_cleanup_disk:
2133 put_disk(msb->disk);
2134 out_free_tag_set:
2135 blk_mq_free_tag_set(&msb->tag_set);
2136 out_release_id:
2137 mutex_lock(&msb_disk_lock);
2138 idr_remove(&msb_disk_idr, msb->disk_id);
2139 mutex_unlock(&msb_disk_lock);
2140 return rc;
2141 }
2142
2143 static int msb_probe(struct memstick_dev *card)
2144 {
2145 struct msb_data *msb;
2146 int rc = 0;
2147
2148 msb = kzalloc(sizeof(struct msb_data), GFP_KERNEL);
2149 if (!msb)
2150 return -ENOMEM;
2151 memstick_set_drvdata(card, msb);
2152 msb->card = card;
2153 spin_lock_init(&msb->q_lock);
2154
2155 rc = msb_init_card(card);
2156 if (rc)
2157 goto out_free;
2158
2159 rc = msb_init_disk(card);
2160 if (!rc) {
2161 card->check = msb_check_card;
2162 card->stop = msb_stop;
2163 card->start = msb_start;
2164 return 0;
2165 }
2166 out_free:
2167 memstick_set_drvdata(card, NULL);
2168 msb_data_clear(msb);
2169 kfree(msb);
2170 return rc;
2171 }
2172
2173 static void msb_remove(struct memstick_dev *card)
2174 {
2175 struct msb_data *msb = memstick_get_drvdata(card);
2176 unsigned long flags;
2177
2178 if (!msb->io_queue_stopped)
2179 msb_stop(card);
2180
2181 dbg("Removing the disk device");
2182
2183
2184 spin_lock_irqsave(&msb->q_lock, flags);
2185 msb->card_dead = true;
2186 spin_unlock_irqrestore(&msb->q_lock, flags);
2187 blk_mq_start_hw_queues(msb->queue);
2188
2189
2190 del_gendisk(msb->disk);
2191 blk_mq_free_tag_set(&msb->tag_set);
2192 msb->queue = NULL;
2193
2194 mutex_lock(&msb_disk_lock);
2195 msb_data_clear(msb);
2196 mutex_unlock(&msb_disk_lock);
2197
2198 put_disk(msb->disk);
2199 memstick_set_drvdata(card, NULL);
2200 }
2201
2202 #ifdef CONFIG_PM
2203
2204 static int msb_suspend(struct memstick_dev *card, pm_message_t state)
2205 {
2206 msb_stop(card);
2207 return 0;
2208 }
2209
2210 static int msb_resume(struct memstick_dev *card)
2211 {
2212 struct msb_data *msb = memstick_get_drvdata(card);
2213 struct msb_data *new_msb = NULL;
2214 bool card_dead = true;
2215
2216 #ifndef CONFIG_MEMSTICK_UNSAFE_RESUME
2217 msb->card_dead = true;
2218 return 0;
2219 #endif
2220 mutex_lock(&card->host->lock);
2221
2222 new_msb = kzalloc(sizeof(struct msb_data), GFP_KERNEL);
2223 if (!new_msb)
2224 goto out;
2225
2226 new_msb->card = card;
2227 memstick_set_drvdata(card, new_msb);
2228 spin_lock_init(&new_msb->q_lock);
2229 sg_init_table(msb->prealloc_sg, MS_BLOCK_MAX_SEGS+1);
2230
2231 if (msb_init_card(card))
2232 goto out;
2233
2234 if (msb->block_size != new_msb->block_size)
2235 goto out;
2236
2237 if (memcmp(msb->boot_page, new_msb->boot_page,
2238 sizeof(struct ms_boot_page)))
2239 goto out;
2240
2241 if (msb->logical_block_count != new_msb->logical_block_count ||
2242 memcmp(msb->lba_to_pba_table, new_msb->lba_to_pba_table,
2243 msb->logical_block_count))
2244 goto out;
2245
2246 if (msb->block_count != new_msb->block_count ||
2247 !bitmap_equal(msb->used_blocks_bitmap, new_msb->used_blocks_bitmap,
2248 msb->block_count))
2249 goto out;
2250
2251 card_dead = false;
2252 out:
2253 if (card_dead)
2254 dbg("Card was removed/replaced during suspend");
2255
2256 msb->card_dead = card_dead;
2257 memstick_set_drvdata(card, msb);
2258
2259 if (new_msb) {
2260 msb_data_clear(new_msb);
2261 kfree(new_msb);
2262 }
2263
2264 msb_start(card);
2265 mutex_unlock(&card->host->lock);
2266 return 0;
2267 }
2268 #else
2269
2270 #define msb_suspend NULL
2271 #define msb_resume NULL
2272
2273 #endif
2274
2275 static struct memstick_device_id msb_id_tbl[] = {
2276 {MEMSTICK_MATCH_ALL, MEMSTICK_TYPE_LEGACY, MEMSTICK_CATEGORY_STORAGE,
2277 MEMSTICK_CLASS_FLASH},
2278
2279 {MEMSTICK_MATCH_ALL, MEMSTICK_TYPE_LEGACY, MEMSTICK_CATEGORY_STORAGE,
2280 MEMSTICK_CLASS_ROM},
2281
2282 {MEMSTICK_MATCH_ALL, MEMSTICK_TYPE_LEGACY, MEMSTICK_CATEGORY_STORAGE,
2283 MEMSTICK_CLASS_RO},
2284
2285 {MEMSTICK_MATCH_ALL, MEMSTICK_TYPE_LEGACY, MEMSTICK_CATEGORY_STORAGE,
2286 MEMSTICK_CLASS_WP},
2287
2288 {MEMSTICK_MATCH_ALL, MEMSTICK_TYPE_DUO, MEMSTICK_CATEGORY_STORAGE_DUO,
2289 MEMSTICK_CLASS_DUO},
2290 {}
2291 };
2292 MODULE_DEVICE_TABLE(memstick, msb_id_tbl);
2293
2294
2295 static struct memstick_driver msb_driver = {
2296 .driver = {
2297 .name = DRIVER_NAME,
2298 .owner = THIS_MODULE
2299 },
2300 .id_table = msb_id_tbl,
2301 .probe = msb_probe,
2302 .remove = msb_remove,
2303 .suspend = msb_suspend,
2304 .resume = msb_resume
2305 };
2306
2307 static int __init msb_init(void)
2308 {
2309 int rc = memstick_register_driver(&msb_driver);
2310
2311 if (rc)
2312 pr_err("failed to register memstick driver (error %d)\n", rc);
2313
2314 return rc;
2315 }
2316
2317 static void __exit msb_exit(void)
2318 {
2319 memstick_unregister_driver(&msb_driver);
2320 idr_destroy(&msb_disk_idr);
2321 }
2322
2323 module_init(msb_init);
2324 module_exit(msb_exit);
2325
2326 module_param(cache_flush_timeout, int, S_IRUGO);
2327 MODULE_PARM_DESC(cache_flush_timeout,
2328 "Cache flush timeout in msec (1000 default)");
2329 module_param(debug, int, S_IRUGO | S_IWUSR);
2330 MODULE_PARM_DESC(debug, "Debug level (0-2)");
2331
2332 module_param(verify_writes, bool, S_IRUGO);
2333 MODULE_PARM_DESC(verify_writes, "Read back and check all data that is written");
2334
2335 MODULE_LICENSE("GPL");
2336 MODULE_AUTHOR("Maxim Levitsky");
2337 MODULE_DESCRIPTION("Sony MemoryStick block device driver");