0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037
0038
0039
0040
0041
0042
0043
0044
0045 #include "esas2r.h"
0046
0047
0048 static void esas2r_disc_abort(struct esas2r_adapter *a,
0049 struct esas2r_request *rq);
0050 static bool esas2r_disc_continue(struct esas2r_adapter *a,
0051 struct esas2r_request *rq);
0052 static void esas2r_disc_fix_curr_requests(struct esas2r_adapter *a);
0053 static u32 esas2r_disc_get_phys_addr(struct esas2r_sg_context *sgc, u64 *addr);
0054 static bool esas2r_disc_start_request(struct esas2r_adapter *a,
0055 struct esas2r_request *rq);
0056
0057
0058 static bool esas2r_disc_block_dev_scan(struct esas2r_adapter *a,
0059 struct esas2r_request *rq);
0060 static void esas2r_disc_block_dev_scan_cb(struct esas2r_adapter *a,
0061 struct esas2r_request *rq);
0062 static bool esas2r_disc_dev_add(struct esas2r_adapter *a,
0063 struct esas2r_request *rq);
0064 static bool esas2r_disc_dev_remove(struct esas2r_adapter *a,
0065 struct esas2r_request *rq);
0066 static bool esas2r_disc_part_info(struct esas2r_adapter *a,
0067 struct esas2r_request *rq);
0068 static void esas2r_disc_part_info_cb(struct esas2r_adapter *a,
0069 struct esas2r_request *rq);
0070 static bool esas2r_disc_passthru_dev_info(struct esas2r_adapter *a,
0071 struct esas2r_request *rq);
0072 static void esas2r_disc_passthru_dev_info_cb(struct esas2r_adapter *a,
0073 struct esas2r_request *rq);
0074 static bool esas2r_disc_passthru_dev_addr(struct esas2r_adapter *a,
0075 struct esas2r_request *rq);
0076 static void esas2r_disc_passthru_dev_addr_cb(struct esas2r_adapter *a,
0077 struct esas2r_request *rq);
0078 static bool esas2r_disc_raid_grp_info(struct esas2r_adapter *a,
0079 struct esas2r_request *rq);
0080 static void esas2r_disc_raid_grp_info_cb(struct esas2r_adapter *a,
0081 struct esas2r_request *rq);
0082
0083 void esas2r_disc_initialize(struct esas2r_adapter *a)
0084 {
0085 struct esas2r_sas_nvram *nvr = a->nvram;
0086
0087 esas2r_trace_enter();
0088
0089 clear_bit(AF_DISC_IN_PROG, &a->flags);
0090 clear_bit(AF2_DEV_SCAN, &a->flags2);
0091 clear_bit(AF2_DEV_CNT_OK, &a->flags2);
0092
0093 a->disc_start_time = jiffies_to_msecs(jiffies);
0094 a->disc_wait_time = nvr->dev_wait_time * 1000;
0095 a->disc_wait_cnt = nvr->dev_wait_count;
0096
0097 if (a->disc_wait_cnt > ESAS2R_MAX_TARGETS)
0098 a->disc_wait_cnt = ESAS2R_MAX_TARGETS;
0099
0100
0101
0102
0103
0104
0105
0106 esas2r_hdebug("starting discovery...");
0107
0108 a->general_req.interrupt_cx = NULL;
0109
0110 if (test_bit(AF_CHPRST_DETECTED, &a->flags) ||
0111 test_bit(AF_POWER_MGT, &a->flags)) {
0112 if (a->prev_dev_cnt == 0) {
0113
0114
0115
0116 a->disc_wait_time = 0;
0117 } else {
0118
0119
0120
0121
0122
0123
0124
0125 a->disc_wait_cnt = a->prev_dev_cnt;
0126
0127
0128
0129
0130
0131
0132 if (a->disc_wait_time < 15000)
0133 a->disc_wait_time = 15000;
0134 }
0135 }
0136
0137 esas2r_trace("disc wait count: %d", a->disc_wait_cnt);
0138 esas2r_trace("disc wait time: %d", a->disc_wait_time);
0139
0140 if (a->disc_wait_time == 0)
0141 esas2r_disc_check_complete(a);
0142
0143 esas2r_trace_exit();
0144 }
0145
0146 void esas2r_disc_start_waiting(struct esas2r_adapter *a)
0147 {
0148 unsigned long flags;
0149
0150 spin_lock_irqsave(&a->mem_lock, flags);
0151
0152 if (a->disc_ctx.disc_evt)
0153 esas2r_disc_start_port(a);
0154
0155 spin_unlock_irqrestore(&a->mem_lock, flags);
0156 }
0157
0158 void esas2r_disc_check_for_work(struct esas2r_adapter *a)
0159 {
0160 struct esas2r_request *rq = &a->general_req;
0161
0162
0163
0164 esas2r_polled_interrupt(a);
0165
0166
0167
0168
0169
0170
0171
0172 esas2r_disc_start_waiting(a);
0173
0174 if (rq->interrupt_cx == NULL)
0175 return;
0176
0177 if (rq->req_stat == RS_STARTED
0178 && rq->timeout <= RQ_MAX_TIMEOUT) {
0179
0180 esas2r_wait_request(a, rq);
0181
0182 if (rq->req_stat == RS_TIMEOUT) {
0183 esas2r_disc_abort(a, rq);
0184 esas2r_local_reset_adapter(a);
0185 return;
0186 }
0187 }
0188
0189 if (rq->req_stat == RS_PENDING
0190 || rq->req_stat == RS_STARTED)
0191 return;
0192
0193 esas2r_disc_continue(a, rq);
0194 }
0195
0196 void esas2r_disc_check_complete(struct esas2r_adapter *a)
0197 {
0198 unsigned long flags;
0199
0200 esas2r_trace_enter();
0201
0202
0203 if (a->disc_wait_time) {
0204 u32 currtime = jiffies_to_msecs(jiffies);
0205 u32 time = currtime - a->disc_start_time;
0206
0207
0208
0209
0210
0211 if (time < a->disc_wait_time
0212 && (esas2r_targ_db_get_tgt_cnt(a) < a->disc_wait_cnt
0213 || a->disc_wait_cnt == 0)) {
0214
0215 if (time >= 3000
0216 && !test_and_set_bit(AF2_DEV_SCAN, &a->flags2)) {
0217 spin_lock_irqsave(&a->mem_lock, flags);
0218 esas2r_disc_queue_event(a, DCDE_DEV_SCAN);
0219 spin_unlock_irqrestore(&a->mem_lock, flags);
0220 }
0221
0222 esas2r_trace_exit();
0223 return;
0224 }
0225
0226
0227
0228
0229
0230 if (!test_and_set_bit(AF2_DEV_CNT_OK, &a->flags2))
0231 a->disc_wait_time = time + 3000;
0232
0233
0234 if (!test_and_set_bit(AF2_DEV_SCAN, &a->flags2)) {
0235 spin_lock_irqsave(&a->mem_lock, flags);
0236 esas2r_disc_queue_event(a, DCDE_DEV_SCAN);
0237 spin_unlock_irqrestore(&a->mem_lock, flags);
0238 esas2r_trace_exit();
0239 return;
0240 }
0241
0242
0243
0244
0245
0246 if (time < a->disc_wait_time) {
0247 esas2r_trace_exit();
0248 return;
0249 }
0250 } else {
0251 if (!test_and_set_bit(AF2_DEV_SCAN, &a->flags2)) {
0252 spin_lock_irqsave(&a->mem_lock, flags);
0253 esas2r_disc_queue_event(a, DCDE_DEV_SCAN);
0254 spin_unlock_irqrestore(&a->mem_lock, flags);
0255 }
0256 }
0257
0258
0259 a->disc_wait_time = 0;
0260
0261 if (test_bit(AF_DISC_POLLED, &a->flags) &&
0262 test_bit(AF_DISC_IN_PROG, &a->flags)) {
0263
0264
0265
0266
0267
0268
0269 } else {
0270
0271
0272
0273
0274
0275 esas2r_disc_fix_curr_requests(a);
0276 clear_bit(AF_DISC_PENDING, &a->flags);
0277
0278
0279
0280
0281
0282
0283 set_bit(AF_PORT_CHANGE, &a->flags);
0284 }
0285
0286 esas2r_trace_exit();
0287 }
0288
0289 void esas2r_disc_queue_event(struct esas2r_adapter *a, u8 disc_evt)
0290 {
0291 struct esas2r_disc_context *dc = &a->disc_ctx;
0292
0293 esas2r_trace_enter();
0294
0295 esas2r_trace("disc_event: %d", disc_evt);
0296
0297
0298 dc->disc_evt |= disc_evt;
0299
0300
0301
0302
0303
0304 if (!test_bit(AF_CHPRST_PENDING, &a->flags) &&
0305 !test_bit(AF_DISC_POLLED, &a->flags))
0306 esas2r_disc_start_port(a);
0307
0308 esas2r_trace_exit();
0309 }
0310
0311 bool esas2r_disc_start_port(struct esas2r_adapter *a)
0312 {
0313 struct esas2r_request *rq = &a->general_req;
0314 struct esas2r_disc_context *dc = &a->disc_ctx;
0315 bool ret;
0316
0317 esas2r_trace_enter();
0318
0319 if (test_bit(AF_DISC_IN_PROG, &a->flags)) {
0320 esas2r_trace_exit();
0321
0322 return false;
0323 }
0324
0325
0326 if (dc->disc_evt) {
0327 if (test_bit(AF_DISC_POLLED, &a->flags)
0328 && a->disc_wait_time == 0) {
0329
0330
0331
0332
0333
0334
0335 esas2r_trace_exit();
0336
0337 return false;
0338 }
0339 } else {
0340
0341
0342 esas2r_hdebug("disc done");
0343
0344 set_bit(AF_PORT_CHANGE, &a->flags);
0345
0346 esas2r_trace_exit();
0347
0348 return false;
0349 }
0350
0351
0352 esas2r_trace("disc_evt: %d", dc->disc_evt);
0353 set_bit(AF_DISC_IN_PROG, &a->flags);
0354 dc->flags = 0;
0355
0356 if (test_bit(AF_DISC_POLLED, &a->flags))
0357 dc->flags |= DCF_POLLED;
0358
0359 rq->interrupt_cx = dc;
0360 rq->req_stat = RS_SUCCESS;
0361
0362
0363 if (dc->disc_evt & DCDE_DEV_SCAN) {
0364 dc->disc_evt &= ~DCDE_DEV_SCAN;
0365
0366 dc->flags |= DCF_DEV_SCAN;
0367 dc->state = DCS_BLOCK_DEV_SCAN;
0368 } else if (dc->disc_evt & DCDE_DEV_CHANGE) {
0369 dc->disc_evt &= ~DCDE_DEV_CHANGE;
0370
0371 dc->flags |= DCF_DEV_CHANGE;
0372 dc->state = DCS_DEV_RMV;
0373 }
0374
0375
0376 if (!test_bit(AF_DISC_POLLED, &a->flags))
0377 ret = esas2r_disc_continue(a, rq);
0378 else
0379 ret = true;
0380
0381 esas2r_trace_exit();
0382
0383 return ret;
0384 }
0385
0386 static bool esas2r_disc_continue(struct esas2r_adapter *a,
0387 struct esas2r_request *rq)
0388 {
0389 struct esas2r_disc_context *dc =
0390 (struct esas2r_disc_context *)rq->interrupt_cx;
0391 bool rslt;
0392
0393
0394 while (dc->flags & (DCF_DEV_CHANGE | DCF_DEV_SCAN)) {
0395 rslt = false;
0396
0397 switch (dc->state) {
0398 case DCS_DEV_RMV:
0399
0400 rslt = esas2r_disc_dev_remove(a, rq);
0401 break;
0402
0403 case DCS_DEV_ADD:
0404
0405 rslt = esas2r_disc_dev_add(a, rq);
0406 break;
0407
0408 case DCS_BLOCK_DEV_SCAN:
0409
0410 rslt = esas2r_disc_block_dev_scan(a, rq);
0411 break;
0412
0413 case DCS_RAID_GRP_INFO:
0414
0415 rslt = esas2r_disc_raid_grp_info(a, rq);
0416 break;
0417
0418 case DCS_PART_INFO:
0419
0420 rslt = esas2r_disc_part_info(a, rq);
0421 break;
0422
0423 case DCS_PT_DEV_INFO:
0424
0425 rslt = esas2r_disc_passthru_dev_info(a, rq);
0426 break;
0427 case DCS_PT_DEV_ADDR:
0428
0429 rslt = esas2r_disc_passthru_dev_addr(a, rq);
0430 break;
0431 case DCS_DISC_DONE:
0432
0433 dc->flags &= ~(DCF_DEV_CHANGE | DCF_DEV_SCAN);
0434 break;
0435
0436 default:
0437
0438 esas2r_bugon();
0439 dc->state = DCS_DISC_DONE;
0440 break;
0441 }
0442
0443 if (rslt)
0444 return true;
0445 }
0446
0447
0448 rq->interrupt_cx = NULL;
0449
0450 if (!test_bit(AF_DISC_PENDING, &a->flags))
0451 esas2r_disc_fix_curr_requests(a);
0452
0453 clear_bit(AF_DISC_IN_PROG, &a->flags);
0454
0455
0456 return esas2r_disc_start_port(a);
0457 }
0458
0459 static bool esas2r_disc_start_request(struct esas2r_adapter *a,
0460 struct esas2r_request *rq)
0461 {
0462 unsigned long flags;
0463
0464
0465 if (rq->timeout < ESAS2R_DEFAULT_TMO)
0466 rq->timeout = ESAS2R_DEFAULT_TMO;
0467
0468
0469
0470
0471
0472
0473 rq->req_type = RT_DISC_REQ;
0474
0475 spin_lock_irqsave(&a->queue_lock, flags);
0476
0477 if (!test_bit(AF_CHPRST_PENDING, &a->flags) &&
0478 !test_bit(AF_FLASHING, &a->flags))
0479 esas2r_disc_local_start_request(a, rq);
0480 else
0481 list_add_tail(&rq->req_list, &a->defer_list);
0482
0483 spin_unlock_irqrestore(&a->queue_lock, flags);
0484
0485 return true;
0486 }
0487
0488 void esas2r_disc_local_start_request(struct esas2r_adapter *a,
0489 struct esas2r_request *rq)
0490 {
0491 esas2r_trace_enter();
0492
0493 list_add_tail(&rq->req_list, &a->active_list);
0494
0495 esas2r_start_vda_request(a, rq);
0496
0497 esas2r_trace_exit();
0498
0499 return;
0500 }
0501
0502 static void esas2r_disc_abort(struct esas2r_adapter *a,
0503 struct esas2r_request *rq)
0504 {
0505 struct esas2r_disc_context *dc =
0506 (struct esas2r_disc_context *)rq->interrupt_cx;
0507
0508 esas2r_trace_enter();
0509
0510
0511
0512 dc->state = DCS_DISC_DONE;
0513
0514 esas2r_trace_exit();
0515 }
0516
0517 static bool esas2r_disc_block_dev_scan(struct esas2r_adapter *a,
0518 struct esas2r_request *rq)
0519 {
0520 struct esas2r_disc_context *dc =
0521 (struct esas2r_disc_context *)rq->interrupt_cx;
0522 bool rslt;
0523
0524 esas2r_trace_enter();
0525
0526 esas2r_rq_init_request(rq, a);
0527
0528 esas2r_build_mgt_req(a,
0529 rq,
0530 VDAMGT_DEV_SCAN,
0531 0,
0532 0,
0533 0,
0534 NULL);
0535
0536 rq->comp_cb = esas2r_disc_block_dev_scan_cb;
0537
0538 rq->timeout = 30000;
0539 rq->interrupt_cx = dc;
0540
0541 rslt = esas2r_disc_start_request(a, rq);
0542
0543 esas2r_trace_exit();
0544
0545 return rslt;
0546 }
0547
0548 static void esas2r_disc_block_dev_scan_cb(struct esas2r_adapter *a,
0549 struct esas2r_request *rq)
0550 {
0551 struct esas2r_disc_context *dc =
0552 (struct esas2r_disc_context *)rq->interrupt_cx;
0553 unsigned long flags;
0554
0555 esas2r_trace_enter();
0556
0557 spin_lock_irqsave(&a->mem_lock, flags);
0558
0559 if (rq->req_stat == RS_SUCCESS)
0560 dc->scan_gen = rq->func_rsp.mgt_rsp.scan_generation;
0561
0562 dc->state = DCS_RAID_GRP_INFO;
0563 dc->raid_grp_ix = 0;
0564
0565 esas2r_rq_destroy_request(rq, a);
0566
0567
0568
0569 if (!(dc->flags & DCF_POLLED))
0570 esas2r_disc_continue(a, rq);
0571
0572 spin_unlock_irqrestore(&a->mem_lock, flags);
0573
0574 esas2r_trace_exit();
0575 }
0576
0577 static bool esas2r_disc_raid_grp_info(struct esas2r_adapter *a,
0578 struct esas2r_request *rq)
0579 {
0580 struct esas2r_disc_context *dc =
0581 (struct esas2r_disc_context *)rq->interrupt_cx;
0582 bool rslt;
0583 struct atto_vda_grp_info *grpinfo;
0584
0585 esas2r_trace_enter();
0586
0587 esas2r_trace("raid_group_idx: %d", dc->raid_grp_ix);
0588
0589 if (dc->raid_grp_ix >= VDA_MAX_RAID_GROUPS) {
0590 dc->state = DCS_DISC_DONE;
0591
0592 esas2r_trace_exit();
0593
0594 return false;
0595 }
0596
0597 esas2r_rq_init_request(rq, a);
0598
0599 grpinfo = &rq->vda_rsp_data->mgt_data.data.grp_info;
0600
0601 memset(grpinfo, 0, sizeof(struct atto_vda_grp_info));
0602
0603 esas2r_build_mgt_req(a,
0604 rq,
0605 VDAMGT_GRP_INFO,
0606 dc->scan_gen,
0607 0,
0608 sizeof(struct atto_vda_grp_info),
0609 NULL);
0610
0611 grpinfo->grp_index = dc->raid_grp_ix;
0612
0613 rq->comp_cb = esas2r_disc_raid_grp_info_cb;
0614
0615 rq->interrupt_cx = dc;
0616
0617 rslt = esas2r_disc_start_request(a, rq);
0618
0619 esas2r_trace_exit();
0620
0621 return rslt;
0622 }
0623
0624 static void esas2r_disc_raid_grp_info_cb(struct esas2r_adapter *a,
0625 struct esas2r_request *rq)
0626 {
0627 struct esas2r_disc_context *dc =
0628 (struct esas2r_disc_context *)rq->interrupt_cx;
0629 unsigned long flags;
0630 struct atto_vda_grp_info *grpinfo;
0631
0632 esas2r_trace_enter();
0633
0634 spin_lock_irqsave(&a->mem_lock, flags);
0635
0636 if (rq->req_stat == RS_SCAN_GEN) {
0637 dc->scan_gen = rq->func_rsp.mgt_rsp.scan_generation;
0638 dc->raid_grp_ix = 0;
0639 goto done;
0640 }
0641
0642 if (rq->req_stat == RS_SUCCESS) {
0643 grpinfo = &rq->vda_rsp_data->mgt_data.data.grp_info;
0644
0645 if (grpinfo->status != VDA_GRP_STAT_ONLINE
0646 && grpinfo->status != VDA_GRP_STAT_DEGRADED) {
0647
0648
0649 dc->raid_grp_ix++;
0650 } else {
0651 memcpy(&dc->raid_grp_name[0],
0652 &grpinfo->grp_name[0],
0653 sizeof(grpinfo->grp_name));
0654
0655 dc->interleave = le32_to_cpu(grpinfo->interleave);
0656 dc->block_size = le32_to_cpu(grpinfo->block_size);
0657
0658 dc->state = DCS_PART_INFO;
0659 dc->part_num = 0;
0660 }
0661 } else {
0662 if (!(rq->req_stat == RS_GRP_INVALID)) {
0663 esas2r_log(ESAS2R_LOG_WARN,
0664 "A request for RAID group info failed - "
0665 "returned with %x",
0666 rq->req_stat);
0667 }
0668
0669 dc->dev_ix = 0;
0670 dc->state = DCS_PT_DEV_INFO;
0671 }
0672
0673 done:
0674
0675 esas2r_rq_destroy_request(rq, a);
0676
0677
0678
0679 if (!(dc->flags & DCF_POLLED))
0680 esas2r_disc_continue(a, rq);
0681
0682 spin_unlock_irqrestore(&a->mem_lock, flags);
0683
0684 esas2r_trace_exit();
0685 }
0686
0687 static bool esas2r_disc_part_info(struct esas2r_adapter *a,
0688 struct esas2r_request *rq)
0689 {
0690 struct esas2r_disc_context *dc =
0691 (struct esas2r_disc_context *)rq->interrupt_cx;
0692 bool rslt;
0693 struct atto_vdapart_info *partinfo;
0694
0695 esas2r_trace_enter();
0696
0697 esas2r_trace("part_num: %d", dc->part_num);
0698
0699 if (dc->part_num >= VDA_MAX_PARTITIONS) {
0700 dc->state = DCS_RAID_GRP_INFO;
0701 dc->raid_grp_ix++;
0702
0703 esas2r_trace_exit();
0704
0705 return false;
0706 }
0707
0708 esas2r_rq_init_request(rq, a);
0709
0710 partinfo = &rq->vda_rsp_data->mgt_data.data.part_info;
0711
0712 memset(partinfo, 0, sizeof(struct atto_vdapart_info));
0713
0714 esas2r_build_mgt_req(a,
0715 rq,
0716 VDAMGT_PART_INFO,
0717 dc->scan_gen,
0718 0,
0719 sizeof(struct atto_vdapart_info),
0720 NULL);
0721
0722 partinfo->part_no = dc->part_num;
0723
0724 memcpy(&partinfo->grp_name[0],
0725 &dc->raid_grp_name[0],
0726 sizeof(partinfo->grp_name));
0727
0728 rq->comp_cb = esas2r_disc_part_info_cb;
0729
0730 rq->interrupt_cx = dc;
0731
0732 rslt = esas2r_disc_start_request(a, rq);
0733
0734 esas2r_trace_exit();
0735
0736 return rslt;
0737 }
0738
0739 static void esas2r_disc_part_info_cb(struct esas2r_adapter *a,
0740 struct esas2r_request *rq)
0741 {
0742 struct esas2r_disc_context *dc =
0743 (struct esas2r_disc_context *)rq->interrupt_cx;
0744 unsigned long flags;
0745 struct atto_vdapart_info *partinfo;
0746
0747 esas2r_trace_enter();
0748
0749 spin_lock_irqsave(&a->mem_lock, flags);
0750
0751 if (rq->req_stat == RS_SCAN_GEN) {
0752 dc->scan_gen = rq->func_rsp.mgt_rsp.scan_generation;
0753 dc->raid_grp_ix = 0;
0754 dc->state = DCS_RAID_GRP_INFO;
0755 } else if (rq->req_stat == RS_SUCCESS) {
0756 partinfo = &rq->vda_rsp_data->mgt_data.data.part_info;
0757
0758 dc->part_num = partinfo->part_no;
0759
0760 dc->curr_virt_id = le16_to_cpu(partinfo->target_id);
0761
0762 esas2r_targ_db_add_raid(a, dc);
0763
0764 dc->part_num++;
0765 } else {
0766 if (!(rq->req_stat == RS_PART_LAST)) {
0767 esas2r_log(ESAS2R_LOG_WARN,
0768 "A request for RAID group partition info "
0769 "failed - status:%d", rq->req_stat);
0770 }
0771
0772 dc->state = DCS_RAID_GRP_INFO;
0773 dc->raid_grp_ix++;
0774 }
0775
0776 esas2r_rq_destroy_request(rq, a);
0777
0778
0779
0780 if (!(dc->flags & DCF_POLLED))
0781 esas2r_disc_continue(a, rq);
0782
0783 spin_unlock_irqrestore(&a->mem_lock, flags);
0784
0785 esas2r_trace_exit();
0786 }
0787
0788 static bool esas2r_disc_passthru_dev_info(struct esas2r_adapter *a,
0789 struct esas2r_request *rq)
0790 {
0791 struct esas2r_disc_context *dc =
0792 (struct esas2r_disc_context *)rq->interrupt_cx;
0793 bool rslt;
0794 struct atto_vda_devinfo *devinfo;
0795
0796 esas2r_trace_enter();
0797
0798 esas2r_trace("dev_ix: %d", dc->dev_ix);
0799
0800 esas2r_rq_init_request(rq, a);
0801
0802 devinfo = &rq->vda_rsp_data->mgt_data.data.dev_info;
0803
0804 memset(devinfo, 0, sizeof(struct atto_vda_devinfo));
0805
0806 esas2r_build_mgt_req(a,
0807 rq,
0808 VDAMGT_DEV_PT_INFO,
0809 dc->scan_gen,
0810 dc->dev_ix,
0811 sizeof(struct atto_vda_devinfo),
0812 NULL);
0813
0814 rq->comp_cb = esas2r_disc_passthru_dev_info_cb;
0815
0816 rq->interrupt_cx = dc;
0817
0818 rslt = esas2r_disc_start_request(a, rq);
0819
0820 esas2r_trace_exit();
0821
0822 return rslt;
0823 }
0824
0825 static void esas2r_disc_passthru_dev_info_cb(struct esas2r_adapter *a,
0826 struct esas2r_request *rq)
0827 {
0828 struct esas2r_disc_context *dc =
0829 (struct esas2r_disc_context *)rq->interrupt_cx;
0830 unsigned long flags;
0831 struct atto_vda_devinfo *devinfo;
0832
0833 esas2r_trace_enter();
0834
0835 spin_lock_irqsave(&a->mem_lock, flags);
0836
0837 if (rq->req_stat == RS_SCAN_GEN) {
0838 dc->scan_gen = rq->func_rsp.mgt_rsp.scan_generation;
0839 dc->dev_ix = 0;
0840 dc->state = DCS_PT_DEV_INFO;
0841 } else if (rq->req_stat == RS_SUCCESS) {
0842 devinfo = &rq->vda_rsp_data->mgt_data.data.dev_info;
0843
0844 dc->dev_ix = le16_to_cpu(rq->func_rsp.mgt_rsp.dev_index);
0845
0846 dc->curr_virt_id = le16_to_cpu(devinfo->target_id);
0847
0848 if (le16_to_cpu(devinfo->features) & VDADEVFEAT_PHYS_ID) {
0849 dc->curr_phys_id =
0850 le16_to_cpu(devinfo->phys_target_id);
0851 dc->dev_addr_type = ATTO_GDA_AT_PORT;
0852 dc->state = DCS_PT_DEV_ADDR;
0853
0854 esas2r_trace("curr_virt_id: %d", dc->curr_virt_id);
0855 esas2r_trace("curr_phys_id: %d", dc->curr_phys_id);
0856 } else {
0857 dc->dev_ix++;
0858 }
0859 } else {
0860 if (!(rq->req_stat == RS_DEV_INVALID)) {
0861 esas2r_log(ESAS2R_LOG_WARN,
0862 "A request for device information failed - "
0863 "status:%d", rq->req_stat);
0864 }
0865
0866 dc->state = DCS_DISC_DONE;
0867 }
0868
0869 esas2r_rq_destroy_request(rq, a);
0870
0871
0872
0873 if (!(dc->flags & DCF_POLLED))
0874 esas2r_disc_continue(a, rq);
0875
0876 spin_unlock_irqrestore(&a->mem_lock, flags);
0877
0878 esas2r_trace_exit();
0879 }
0880
0881 static bool esas2r_disc_passthru_dev_addr(struct esas2r_adapter *a,
0882 struct esas2r_request *rq)
0883 {
0884 struct esas2r_disc_context *dc =
0885 (struct esas2r_disc_context *)rq->interrupt_cx;
0886 bool rslt;
0887 struct atto_ioctl *hi;
0888 struct esas2r_sg_context sgc;
0889
0890 esas2r_trace_enter();
0891
0892 esas2r_rq_init_request(rq, a);
0893
0894
0895
0896 sgc.cur_offset = NULL;
0897 sgc.get_phys_addr = (PGETPHYSADDR)esas2r_disc_get_phys_addr;
0898 sgc.length = offsetof(struct atto_ioctl, data)
0899 + sizeof(struct atto_hba_get_device_address);
0900
0901 esas2r_sgc_init(&sgc, a, rq, rq->vrq->ioctl.sge);
0902
0903 esas2r_build_ioctl_req(a, rq, sgc.length, VDA_IOCTL_HBA);
0904
0905 if (!esas2r_build_sg_list(a, rq, &sgc)) {
0906 esas2r_rq_destroy_request(rq, a);
0907
0908 esas2r_trace_exit();
0909
0910 return false;
0911 }
0912
0913 rq->comp_cb = esas2r_disc_passthru_dev_addr_cb;
0914
0915 rq->interrupt_cx = dc;
0916
0917
0918
0919 hi = (struct atto_ioctl *)a->disc_buffer;
0920
0921 memset(a->disc_buffer, 0, ESAS2R_DISC_BUF_LEN);
0922
0923 hi->version = ATTO_VER_GET_DEV_ADDR0;
0924 hi->function = ATTO_FUNC_GET_DEV_ADDR;
0925 hi->flags = HBAF_TUNNEL;
0926
0927 hi->data.get_dev_addr.target_id = le32_to_cpu(dc->curr_phys_id);
0928 hi->data.get_dev_addr.addr_type = dc->dev_addr_type;
0929
0930
0931
0932 rslt = esas2r_disc_start_request(a, rq);
0933
0934 esas2r_trace_exit();
0935
0936 return rslt;
0937 }
0938
0939 static void esas2r_disc_passthru_dev_addr_cb(struct esas2r_adapter *a,
0940 struct esas2r_request *rq)
0941 {
0942 struct esas2r_disc_context *dc =
0943 (struct esas2r_disc_context *)rq->interrupt_cx;
0944 struct esas2r_target *t = NULL;
0945 unsigned long flags;
0946 struct atto_ioctl *hi;
0947 u16 addrlen;
0948
0949 esas2r_trace_enter();
0950
0951 spin_lock_irqsave(&a->mem_lock, flags);
0952
0953 hi = (struct atto_ioctl *)a->disc_buffer;
0954
0955 if (rq->req_stat == RS_SUCCESS
0956 && hi->status == ATTO_STS_SUCCESS) {
0957 addrlen = le16_to_cpu(hi->data.get_dev_addr.addr_len);
0958
0959 if (dc->dev_addr_type == ATTO_GDA_AT_PORT) {
0960 if (addrlen == sizeof(u64))
0961 memcpy(&dc->sas_addr,
0962 &hi->data.get_dev_addr.address[0],
0963 addrlen);
0964 else
0965 memset(&dc->sas_addr, 0, sizeof(dc->sas_addr));
0966
0967
0968 dc->dev_addr_type = ATTO_GDA_AT_UNIQUE;
0969
0970 goto next_dev_addr;
0971 } else {
0972
0973 if (HIBYTE(addrlen) == 0) {
0974 t = esas2r_targ_db_add_pthru(a,
0975 dc,
0976 &hi->data.
0977 get_dev_addr.
0978 address[0],
0979 (u8)hi->data.
0980 get_dev_addr.
0981 addr_len);
0982
0983 if (t)
0984 memcpy(&t->sas_addr, &dc->sas_addr,
0985 sizeof(t->sas_addr));
0986 } else {
0987
0988
0989 esas2r_log(ESAS2R_LOG_WARN,
0990 "an error occurred retrieving the "
0991 "back end data (%s:%d)",
0992 __func__,
0993 __LINE__);
0994 }
0995 }
0996 } else {
0997
0998
0999 esas2r_log(ESAS2R_LOG_WARN,
1000 "an error occurred retrieving the back end data - "
1001 "rq->req_stat:%d hi->status:%d",
1002 rq->req_stat, hi->status);
1003 }
1004
1005
1006
1007 if (dc->flags & DCF_DEV_SCAN) {
1008 dc->dev_ix++;
1009 dc->state = DCS_PT_DEV_INFO;
1010 } else if (dc->flags & DCF_DEV_CHANGE) {
1011 dc->curr_targ++;
1012 dc->state = DCS_DEV_ADD;
1013 } else {
1014 esas2r_bugon();
1015 }
1016
1017 next_dev_addr:
1018 esas2r_rq_destroy_request(rq, a);
1019
1020
1021
1022 if (!(dc->flags & DCF_POLLED))
1023 esas2r_disc_continue(a, rq);
1024
1025 spin_unlock_irqrestore(&a->mem_lock, flags);
1026
1027 esas2r_trace_exit();
1028 }
1029
1030 static u32 esas2r_disc_get_phys_addr(struct esas2r_sg_context *sgc, u64 *addr)
1031 {
1032 struct esas2r_adapter *a = sgc->adapter;
1033
1034 if (sgc->length > ESAS2R_DISC_BUF_LEN) {
1035 esas2r_bugon();
1036 }
1037
1038 *addr = a->uncached_phys
1039 + (u64)((u8 *)a->disc_buffer - a->uncached);
1040
1041 return sgc->length;
1042 }
1043
1044 static bool esas2r_disc_dev_remove(struct esas2r_adapter *a,
1045 struct esas2r_request *rq)
1046 {
1047 struct esas2r_disc_context *dc =
1048 (struct esas2r_disc_context *)rq->interrupt_cx;
1049 struct esas2r_target *t;
1050 struct esas2r_target *t2;
1051
1052 esas2r_trace_enter();
1053
1054
1055
1056 for (t = a->targetdb; t < a->targetdb_end; t++) {
1057 if (t->new_target_state != TS_NOT_PRESENT)
1058 continue;
1059
1060 t->new_target_state = TS_INVALID;
1061
1062
1063
1064 t2 =
1065 esas2r_targ_db_find_by_virt_id(a,
1066 esas2r_targ_get_id(t,
1067 a));
1068
1069 if (t2)
1070 esas2r_targ_db_remove(a, t2);
1071 }
1072
1073
1074
1075 dc->state = DCS_DEV_ADD;
1076 dc->curr_targ = a->targetdb;
1077
1078 esas2r_trace_exit();
1079
1080 return false;
1081 }
1082
1083 static bool esas2r_disc_dev_add(struct esas2r_adapter *a,
1084 struct esas2r_request *rq)
1085 {
1086 struct esas2r_disc_context *dc =
1087 (struct esas2r_disc_context *)rq->interrupt_cx;
1088 struct esas2r_target *t = dc->curr_targ;
1089
1090 if (t >= a->targetdb_end) {
1091
1092
1093 dc->state = DCS_DISC_DONE;
1094 } else if (t->new_target_state == TS_PRESENT) {
1095 struct atto_vda_ae_lu *luevt = &t->lu_event;
1096
1097 esas2r_trace_enter();
1098
1099
1100
1101 t->new_target_state = TS_INVALID;
1102
1103
1104
1105 dc->curr_virt_id = esas2r_targ_get_id(t, a);
1106
1107 if ((luevt->hdr.bylength >= offsetof(struct atto_vda_ae_lu, id)
1108 + sizeof(struct atto_vda_ae_lu_tgt_lun_raid))
1109 && !(luevt->dwevent & VDAAE_LU_PASSTHROUGH)) {
1110 dc->block_size = luevt->id.tgtlun_raid.dwblock_size;
1111 dc->interleave = luevt->id.tgtlun_raid.dwinterleave;
1112 } else {
1113 dc->block_size = 0;
1114 dc->interleave = 0;
1115 }
1116
1117
1118
1119 if (luevt->dwevent & VDAAE_LU_PASSTHROUGH) {
1120 if (luevt->dwevent & VDAAE_LU_PHYS_ID) {
1121 dc->state = DCS_PT_DEV_ADDR;
1122 dc->dev_addr_type = ATTO_GDA_AT_PORT;
1123 dc->curr_phys_id = luevt->wphys_target_id;
1124 } else {
1125 esas2r_log(ESAS2R_LOG_WARN,
1126 "luevt->dwevent does not have the "
1127 "VDAAE_LU_PHYS_ID bit set (%s:%d)",
1128 __func__, __LINE__);
1129 }
1130 } else {
1131 dc->raid_grp_name[0] = 0;
1132
1133 esas2r_targ_db_add_raid(a, dc);
1134 }
1135
1136 esas2r_trace("curr_virt_id: %d", dc->curr_virt_id);
1137 esas2r_trace("curr_phys_id: %d", dc->curr_phys_id);
1138 esas2r_trace("dwevent: %d", luevt->dwevent);
1139
1140 esas2r_trace_exit();
1141 }
1142
1143 if (dc->state == DCS_DEV_ADD) {
1144
1145
1146 dc->curr_targ++;
1147 }
1148
1149 return false;
1150 }
1151
1152
1153
1154
1155
1156
1157
1158
1159 static void esas2r_disc_fix_curr_requests(struct esas2r_adapter *a)
1160 {
1161 unsigned long flags;
1162 struct esas2r_target *t;
1163 struct esas2r_request *rq;
1164 struct list_head *element;
1165
1166
1167
1168 spin_lock_irqsave(&a->queue_lock, flags);
1169
1170 list_for_each(element, &a->defer_list) {
1171 rq = list_entry(element, struct esas2r_request, req_list);
1172 if (rq->vrq->scsi.function == VDA_FUNC_SCSI) {
1173 t = a->targetdb + rq->target_id;
1174
1175 if (t->target_state == TS_PRESENT)
1176 rq->vrq->scsi.target_id = le16_to_cpu(
1177 t->virt_targ_id);
1178 else
1179 rq->req_stat = RS_SEL;
1180 }
1181
1182 }
1183
1184 spin_unlock_irqrestore(&a->queue_lock, flags);
1185 }