0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037
0038
0039
0040
0041
0042
0043
0044
0045 #include "esas2r.h"
0046
0047
0048 static void esas2r_doorbell_interrupt(struct esas2r_adapter *a, u32 doorbell);
0049 static void esas2r_get_outbound_responses(struct esas2r_adapter *a);
0050 static void esas2r_process_bus_reset(struct esas2r_adapter *a);
0051
0052
0053
0054
0055
0056 void esas2r_polled_interrupt(struct esas2r_adapter *a)
0057 {
0058 u32 intstat;
0059 u32 doorbell;
0060
0061 esas2r_disable_chip_interrupts(a);
0062
0063 intstat = esas2r_read_register_dword(a, MU_INT_STATUS_OUT);
0064
0065 if (intstat & MU_INTSTAT_POST_OUT) {
0066
0067
0068 esas2r_write_register_dword(a, MU_OUT_LIST_INT_STAT,
0069 MU_OLIS_INT);
0070 esas2r_flush_register_dword(a, MU_OUT_LIST_INT_STAT);
0071
0072 esas2r_get_outbound_responses(a);
0073 }
0074
0075 if (intstat & MU_INTSTAT_DRBL) {
0076 doorbell = esas2r_read_register_dword(a, MU_DOORBELL_OUT);
0077 if (doorbell != 0)
0078 esas2r_doorbell_interrupt(a, doorbell);
0079 }
0080
0081 esas2r_enable_chip_interrupts(a);
0082
0083 if (atomic_read(&a->disable_cnt) == 0)
0084 esas2r_do_deferred_processes(a);
0085 }
0086
0087
0088
0089
0090
0091
0092 irqreturn_t esas2r_interrupt(int irq, void *dev_id)
0093 {
0094 struct esas2r_adapter *a = (struct esas2r_adapter *)dev_id;
0095
0096 if (!esas2r_adapter_interrupt_pending(a))
0097 return IRQ_NONE;
0098
0099 set_bit(AF2_INT_PENDING, &a->flags2);
0100 esas2r_schedule_tasklet(a);
0101
0102 return IRQ_HANDLED;
0103 }
0104
0105 void esas2r_adapter_interrupt(struct esas2r_adapter *a)
0106 {
0107 u32 doorbell;
0108
0109 if (likely(a->int_stat & MU_INTSTAT_POST_OUT)) {
0110
0111 esas2r_write_register_dword(a, MU_OUT_LIST_INT_STAT,
0112 MU_OLIS_INT);
0113 esas2r_flush_register_dword(a, MU_OUT_LIST_INT_STAT);
0114 esas2r_get_outbound_responses(a);
0115 }
0116
0117 if (unlikely(a->int_stat & MU_INTSTAT_DRBL)) {
0118 doorbell = esas2r_read_register_dword(a, MU_DOORBELL_OUT);
0119 if (doorbell != 0)
0120 esas2r_doorbell_interrupt(a, doorbell);
0121 }
0122
0123 a->int_mask = ESAS2R_INT_STS_MASK;
0124
0125 esas2r_enable_chip_interrupts(a);
0126
0127 if (likely(atomic_read(&a->disable_cnt) == 0))
0128 esas2r_do_deferred_processes(a);
0129 }
0130
0131 irqreturn_t esas2r_msi_interrupt(int irq, void *dev_id)
0132 {
0133 struct esas2r_adapter *a = (struct esas2r_adapter *)dev_id;
0134 u32 intstat;
0135 u32 doorbell;
0136
0137 intstat = esas2r_read_register_dword(a, MU_INT_STATUS_OUT);
0138
0139 if (likely(intstat & MU_INTSTAT_POST_OUT)) {
0140
0141
0142 esas2r_write_register_dword(a, MU_OUT_LIST_INT_STAT,
0143 MU_OLIS_INT);
0144 esas2r_flush_register_dword(a, MU_OUT_LIST_INT_STAT);
0145
0146 esas2r_get_outbound_responses(a);
0147 }
0148
0149 if (unlikely(intstat & MU_INTSTAT_DRBL)) {
0150 doorbell = esas2r_read_register_dword(a, MU_DOORBELL_OUT);
0151 if (doorbell != 0)
0152 esas2r_doorbell_interrupt(a, doorbell);
0153 }
0154
0155
0156
0157
0158
0159 esas2r_disable_chip_interrupts(a);
0160 esas2r_enable_chip_interrupts(a);
0161
0162 if (likely(atomic_read(&a->disable_cnt) == 0))
0163 esas2r_do_deferred_processes(a);
0164
0165 esas2r_do_tasklet_tasks(a);
0166
0167 return 1;
0168 }
0169
0170
0171
0172 static void esas2r_handle_outbound_rsp_err(struct esas2r_adapter *a,
0173 struct esas2r_request *rq,
0174 struct atto_vda_ob_rsp *rsp)
0175 {
0176
0177
0178
0179
0180
0181 if (unlikely(rq->req_stat != RS_SUCCESS)) {
0182 memcpy(&rq->func_rsp, &rsp->func_rsp, sizeof(rsp->func_rsp));
0183
0184 if (rq->req_stat == RS_ABORTED) {
0185 if (rq->timeout > RQ_MAX_TIMEOUT)
0186 rq->req_stat = RS_TIMEOUT;
0187 } else if (rq->req_stat == RS_SCSI_ERROR) {
0188 u8 scsistatus = rq->func_rsp.scsi_rsp.scsi_stat;
0189
0190 esas2r_trace("scsistatus: %x", scsistatus);
0191
0192
0193 if (scsistatus == SAM_STAT_GOOD || scsistatus ==
0194 SAM_STAT_CONDITION_MET || scsistatus ==
0195 SAM_STAT_INTERMEDIATE || scsistatus ==
0196 SAM_STAT_INTERMEDIATE_CONDITION_MET) {
0197 rq->req_stat = RS_SUCCESS;
0198 rq->func_rsp.scsi_rsp.scsi_stat =
0199 SAM_STAT_GOOD;
0200 }
0201 }
0202 }
0203 }
0204
0205 static void esas2r_get_outbound_responses(struct esas2r_adapter *a)
0206 {
0207 struct atto_vda_ob_rsp *rsp;
0208 u32 rspput_ptr;
0209 u32 rspget_ptr;
0210 struct esas2r_request *rq;
0211 u32 handle;
0212 unsigned long flags;
0213
0214 LIST_HEAD(comp_list);
0215
0216 esas2r_trace_enter();
0217
0218 spin_lock_irqsave(&a->queue_lock, flags);
0219
0220
0221 rspput_ptr = le32_to_cpu(*a->outbound_copy) & MU_OLC_WRT_PTR;
0222 rspget_ptr = a->last_read;
0223
0224 esas2r_trace("rspput_ptr: %x, rspget_ptr: %x", rspput_ptr, rspget_ptr);
0225
0226
0227 if (unlikely(rspget_ptr == rspput_ptr)) {
0228 spin_unlock_irqrestore(&a->queue_lock, flags);
0229 esas2r_trace_exit();
0230 return;
0231 }
0232
0233
0234 if (unlikely(rspput_ptr >= a->list_size)) {
0235 spin_unlock_irqrestore(&a->queue_lock, flags);
0236 esas2r_bugon();
0237 esas2r_local_reset_adapter(a);
0238 esas2r_trace_exit();
0239 return;
0240 }
0241
0242 do {
0243 rspget_ptr++;
0244
0245 if (rspget_ptr >= a->list_size)
0246 rspget_ptr = 0;
0247
0248 rsp = (struct atto_vda_ob_rsp *)a->outbound_list_md.virt_addr
0249 + rspget_ptr;
0250
0251 handle = rsp->handle;
0252
0253
0254 if (unlikely(LOWORD(handle) == 0
0255 || LOWORD(handle) > num_requests +
0256 num_ae_requests + 1)) {
0257 esas2r_bugon();
0258 continue;
0259 }
0260
0261
0262 rq = a->req_table[LOWORD(handle)];
0263
0264 if (unlikely(rq == NULL || rq->vrq->scsi.handle != handle)) {
0265 esas2r_bugon();
0266 continue;
0267 }
0268
0269 list_del(&rq->req_list);
0270
0271
0272 rq->req_stat = rsp->req_stat;
0273
0274 esas2r_trace("handle: %x", handle);
0275 esas2r_trace("rq: %p", rq);
0276 esas2r_trace("req_status: %x", rq->req_stat);
0277
0278 if (likely(rq->vrq->scsi.function == VDA_FUNC_SCSI)) {
0279 esas2r_handle_outbound_rsp_err(a, rq, rsp);
0280 } else {
0281
0282
0283
0284
0285 memcpy(&rq->func_rsp, &rsp->func_rsp,
0286 sizeof(rsp->func_rsp));
0287 }
0288
0289
0290 list_add_tail(&rq->comp_list, &comp_list);
0291
0292 } while (rspget_ptr != rspput_ptr);
0293
0294 a->last_read = rspget_ptr;
0295 spin_unlock_irqrestore(&a->queue_lock, flags);
0296
0297 esas2r_comp_list_drain(a, &comp_list);
0298 esas2r_trace_exit();
0299 }
0300
0301
0302
0303
0304
0305
0306 void esas2r_do_deferred_processes(struct esas2r_adapter *a)
0307 {
0308 int startreqs = 2;
0309 struct esas2r_request *rq;
0310 unsigned long flags;
0311
0312
0313
0314
0315
0316
0317
0318
0319
0320 if (test_bit(AF_CHPRST_PENDING, &a->flags) ||
0321 test_bit(AF_FLASHING, &a->flags))
0322 startreqs = 0;
0323 else if (test_bit(AF_DISC_PENDING, &a->flags))
0324 startreqs = 1;
0325
0326 atomic_inc(&a->disable_cnt);
0327
0328
0329
0330 if (esas2r_is_tasklet_pending(a)) {
0331 esas2r_schedule_tasklet(a);
0332
0333 startreqs = 0;
0334 }
0335
0336
0337
0338
0339
0340 if (startreqs && !list_empty(&a->defer_list)) {
0341 LIST_HEAD(comp_list);
0342 struct list_head *element, *next;
0343
0344 spin_lock_irqsave(&a->queue_lock, flags);
0345
0346 list_for_each_safe(element, next, &a->defer_list) {
0347 rq = list_entry(element, struct esas2r_request,
0348 req_list);
0349
0350 if (rq->req_stat != RS_PENDING) {
0351 list_del(element);
0352 list_add_tail(&rq->comp_list, &comp_list);
0353 }
0354
0355
0356
0357
0358
0359
0360 else if (rq->req_type == RT_DISC_REQ) {
0361 list_del(element);
0362 esas2r_disc_local_start_request(a, rq);
0363 } else if (startreqs == 2) {
0364 list_del(element);
0365 esas2r_local_start_request(a, rq);
0366
0367
0368
0369
0370
0371 if (test_bit(AF_FLASHING, &a->flags))
0372 break;
0373 }
0374 }
0375
0376 spin_unlock_irqrestore(&a->queue_lock, flags);
0377 esas2r_comp_list_drain(a, &comp_list);
0378 }
0379
0380 atomic_dec(&a->disable_cnt);
0381 }
0382
0383
0384
0385
0386
0387
0388 void esas2r_process_adapter_reset(struct esas2r_adapter *a)
0389 {
0390 struct esas2r_request *rq = &a->general_req;
0391 unsigned long flags;
0392 struct esas2r_disc_context *dc;
0393
0394 LIST_HEAD(comp_list);
0395 struct list_head *element;
0396
0397 esas2r_trace_enter();
0398
0399 spin_lock_irqsave(&a->queue_lock, flags);
0400
0401
0402
0403 if (rq->interrupt_cx) {
0404 dc = (struct esas2r_disc_context *)rq->interrupt_cx;
0405
0406 dc->disc_evt = 0;
0407
0408 clear_bit(AF_DISC_IN_PROG, &a->flags);
0409 }
0410
0411
0412
0413
0414
0415
0416
0417
0418 rq->interrupt_cx = NULL;
0419 rq->interrupt_cb = NULL;
0420
0421 rq->comp_cb = esas2r_dummy_complete;
0422
0423
0424
0425 *a->outbound_copy =
0426 a->last_write =
0427 a->last_read = a->list_size - 1;
0428
0429 set_bit(AF_COMM_LIST_TOGGLE, &a->flags);
0430
0431
0432 list_for_each(element, &a->defer_list) {
0433 rq = list_entry(element, struct esas2r_request, req_list);
0434
0435 if (rq->req_stat == RS_STARTED)
0436 if (esas2r_ioreq_aborted(a, rq, RS_ABORTED))
0437 list_add_tail(&rq->comp_list, &comp_list);
0438 }
0439
0440 spin_unlock_irqrestore(&a->queue_lock, flags);
0441 esas2r_comp_list_drain(a, &comp_list);
0442 esas2r_process_bus_reset(a);
0443 esas2r_trace_exit();
0444 }
0445
0446 static void esas2r_process_bus_reset(struct esas2r_adapter *a)
0447 {
0448 struct esas2r_request *rq;
0449 struct list_head *element;
0450 unsigned long flags;
0451
0452 LIST_HEAD(comp_list);
0453
0454 esas2r_trace_enter();
0455
0456 esas2r_hdebug("reset detected");
0457
0458 spin_lock_irqsave(&a->queue_lock, flags);
0459
0460
0461 list_for_each(element, &a->defer_list) {
0462 rq = list_entry(element, struct esas2r_request, req_list);
0463 if (esas2r_ioreq_aborted(a, rq, RS_ABORTED))
0464 list_add_tail(&rq->comp_list, &comp_list);
0465 }
0466
0467 spin_unlock_irqrestore(&a->queue_lock, flags);
0468
0469 esas2r_comp_list_drain(a, &comp_list);
0470
0471 if (atomic_read(&a->disable_cnt) == 0)
0472 esas2r_do_deferred_processes(a);
0473
0474 clear_bit(AF_OS_RESET, &a->flags);
0475
0476 esas2r_trace_exit();
0477 }
0478
0479 static void esas2r_chip_rst_needed_during_tasklet(struct esas2r_adapter *a)
0480 {
0481
0482 clear_bit(AF_CHPRST_NEEDED, &a->flags);
0483 clear_bit(AF_BUSRST_NEEDED, &a->flags);
0484 clear_bit(AF_BUSRST_DETECTED, &a->flags);
0485 clear_bit(AF_BUSRST_PENDING, &a->flags);
0486
0487
0488
0489
0490
0491
0492
0493
0494
0495
0496
0497
0498
0499 if (!esas2r_is_adapter_present(a) || (a->chip_uptime >=
0500 ESAS2R_CHP_UPTIME_MAX)) {
0501 esas2r_hdebug("*** adapter disabled ***");
0502
0503
0504
0505
0506
0507
0508
0509
0510
0511 set_bit(AF_DEGRADED_MODE, &a->flags);
0512 set_bit(AF_DISABLED, &a->flags);
0513 clear_bit(AF_CHPRST_PENDING, &a->flags);
0514 clear_bit(AF_DISC_PENDING, &a->flags);
0515
0516 esas2r_disable_chip_interrupts(a);
0517 a->int_mask = 0;
0518 esas2r_process_adapter_reset(a);
0519
0520 esas2r_log(ESAS2R_LOG_CRIT,
0521 "Adapter disabled because of hardware failure");
0522 } else {
0523 bool alrdyrst = test_and_set_bit(AF_CHPRST_STARTED, &a->flags);
0524
0525 if (!alrdyrst)
0526
0527
0528
0529
0530 esas2r_disable_chip_interrupts(a);
0531
0532 if ((test_bit(AF_POWER_MGT, &a->flags)) &&
0533 !test_bit(AF_FIRST_INIT, &a->flags) && !alrdyrst) {
0534
0535
0536
0537
0538 } else {
0539 esas2r_hdebug("*** resetting chip ***");
0540 esas2r_reset_chip(a);
0541 }
0542
0543
0544 a->chip_uptime += ESAS2R_CHP_UPTIME_CNT;
0545 a->chip_init_time = jiffies_to_msecs(jiffies);
0546 if (!test_bit(AF_POWER_MGT, &a->flags)) {
0547 esas2r_process_adapter_reset(a);
0548
0549 if (!alrdyrst) {
0550
0551 a->prev_dev_cnt =
0552 esas2r_targ_db_get_tgt_cnt(a);
0553 esas2r_targ_db_remove_all(a, false);
0554 }
0555 }
0556
0557 a->int_mask = 0;
0558 }
0559 }
0560
0561 static void esas2r_handle_chip_rst_during_tasklet(struct esas2r_adapter *a)
0562 {
0563 while (test_bit(AF_CHPRST_DETECTED, &a->flags)) {
0564
0565
0566
0567
0568
0569 if (!test_bit(AF_DEGRADED_MODE, &a->flags) &&
0570 !test_bit(AF_POWER_MGT, &a->flags))
0571 esas2r_disable_chip_interrupts(a);
0572
0573
0574 esas2r_check_adapter(a);
0575 esas2r_init_adapter_hw(a, 0);
0576
0577 if (test_bit(AF_CHPRST_NEEDED, &a->flags))
0578 break;
0579
0580 if (test_bit(AF_POWER_MGT, &a->flags)) {
0581
0582 if (test_bit(AF_FIRST_INIT, &a->flags)) {
0583
0584 esas2r_log(ESAS2R_LOG_CRIT,
0585 "The firmware was reset during a normal power-up sequence");
0586 } else {
0587
0588 clear_bit(AF_POWER_MGT, &a->flags);
0589 esas2r_send_reset_ae(a, true);
0590 }
0591 } else {
0592
0593 if (test_bit(AF_FIRST_INIT, &a->flags)) {
0594
0595 } else {
0596
0597 esas2r_send_reset_ae(a, false);
0598 }
0599
0600 esas2r_log(ESAS2R_LOG_CRIT,
0601 "Recovering from a chip reset while the chip was online");
0602 }
0603
0604 clear_bit(AF_CHPRST_STARTED, &a->flags);
0605 esas2r_enable_chip_interrupts(a);
0606
0607
0608
0609
0610
0611 clear_bit(AF_CHPRST_DETECTED, &a->flags);
0612 }
0613 }
0614
0615
0616
0617 void esas2r_do_tasklet_tasks(struct esas2r_adapter *a)
0618 {
0619
0620 if (test_bit(AF_CHPRST_NEEDED, &a->flags) ||
0621 test_bit(AF_CHPRST_DETECTED, &a->flags)) {
0622 if (test_bit(AF_CHPRST_NEEDED, &a->flags))
0623 esas2r_chip_rst_needed_during_tasklet(a);
0624
0625 esas2r_handle_chip_rst_during_tasklet(a);
0626 }
0627
0628 if (test_bit(AF_BUSRST_NEEDED, &a->flags)) {
0629 esas2r_hdebug("hard resetting bus");
0630
0631 clear_bit(AF_BUSRST_NEEDED, &a->flags);
0632
0633 if (test_bit(AF_FLASHING, &a->flags))
0634 set_bit(AF_BUSRST_DETECTED, &a->flags);
0635 else
0636 esas2r_write_register_dword(a, MU_DOORBELL_IN,
0637 DRBL_RESET_BUS);
0638 }
0639
0640 if (test_bit(AF_BUSRST_DETECTED, &a->flags)) {
0641 esas2r_process_bus_reset(a);
0642
0643 esas2r_log_dev(ESAS2R_LOG_WARN,
0644 &(a->host->shost_gendev),
0645 "scsi_report_bus_reset() called");
0646
0647 scsi_report_bus_reset(a->host, 0);
0648
0649 clear_bit(AF_BUSRST_DETECTED, &a->flags);
0650 clear_bit(AF_BUSRST_PENDING, &a->flags);
0651
0652 esas2r_log(ESAS2R_LOG_WARN, "Bus reset complete");
0653 }
0654
0655 if (test_bit(AF_PORT_CHANGE, &a->flags)) {
0656 clear_bit(AF_PORT_CHANGE, &a->flags);
0657
0658 esas2r_targ_db_report_changes(a);
0659 }
0660
0661 if (atomic_read(&a->disable_cnt) == 0)
0662 esas2r_do_deferred_processes(a);
0663 }
0664
0665 static void esas2r_doorbell_interrupt(struct esas2r_adapter *a, u32 doorbell)
0666 {
0667 if (!(doorbell & DRBL_FORCE_INT)) {
0668 esas2r_trace_enter();
0669 esas2r_trace("doorbell: %x", doorbell);
0670 }
0671
0672
0673 esas2r_write_register_dword(a, MU_DOORBELL_OUT, doorbell);
0674
0675 if (doorbell & DRBL_RESET_BUS)
0676 set_bit(AF_BUSRST_DETECTED, &a->flags);
0677
0678 if (doorbell & DRBL_FORCE_INT)
0679 clear_bit(AF_HEARTBEAT, &a->flags);
0680
0681 if (doorbell & DRBL_PANIC_REASON_MASK) {
0682 esas2r_hdebug("*** Firmware Panic ***");
0683 esas2r_log(ESAS2R_LOG_CRIT, "The firmware has panicked");
0684 }
0685
0686 if (doorbell & DRBL_FW_RESET) {
0687 set_bit(AF2_COREDUMP_AVAIL, &a->flags2);
0688 esas2r_local_reset_adapter(a);
0689 }
0690
0691 if (!(doorbell & DRBL_FORCE_INT)) {
0692 esas2r_trace_exit();
0693 }
0694 }
0695
0696 void esas2r_force_interrupt(struct esas2r_adapter *a)
0697 {
0698 esas2r_write_register_dword(a, MU_DOORBELL_IN, DRBL_FORCE_INT |
0699 DRBL_DRV_VER);
0700 }
0701
0702
0703 static void esas2r_lun_event(struct esas2r_adapter *a, union atto_vda_ae *ae,
0704 u16 target, u32 length)
0705 {
0706 struct esas2r_target *t = a->targetdb + target;
0707 u32 cplen = length;
0708 unsigned long flags;
0709
0710 if (cplen > sizeof(t->lu_event))
0711 cplen = sizeof(t->lu_event);
0712
0713 esas2r_trace("ae->lu.dwevent: %x", ae->lu.dwevent);
0714 esas2r_trace("ae->lu.bystate: %x", ae->lu.bystate);
0715
0716 spin_lock_irqsave(&a->mem_lock, flags);
0717
0718 t->new_target_state = TS_INVALID;
0719
0720 if (ae->lu.dwevent & VDAAE_LU_LOST) {
0721 t->new_target_state = TS_NOT_PRESENT;
0722 } else {
0723 switch (ae->lu.bystate) {
0724 case VDAAE_LU_NOT_PRESENT:
0725 case VDAAE_LU_OFFLINE:
0726 case VDAAE_LU_DELETED:
0727 case VDAAE_LU_FACTORY_DISABLED:
0728 t->new_target_state = TS_NOT_PRESENT;
0729 break;
0730
0731 case VDAAE_LU_ONLINE:
0732 case VDAAE_LU_DEGRADED:
0733 t->new_target_state = TS_PRESENT;
0734 break;
0735 }
0736 }
0737
0738 if (t->new_target_state != TS_INVALID) {
0739 memcpy(&t->lu_event, &ae->lu, cplen);
0740
0741 esas2r_disc_queue_event(a, DCDE_DEV_CHANGE);
0742 }
0743
0744 spin_unlock_irqrestore(&a->mem_lock, flags);
0745 }
0746
0747
0748
0749 void esas2r_ae_complete(struct esas2r_adapter *a, struct esas2r_request *rq)
0750 {
0751 union atto_vda_ae *ae =
0752 (union atto_vda_ae *)rq->vda_rsp_data->ae_data.event_data;
0753 u32 length = le32_to_cpu(rq->func_rsp.ae_rsp.length);
0754 union atto_vda_ae *last =
0755 (union atto_vda_ae *)(rq->vda_rsp_data->ae_data.event_data
0756 + length);
0757
0758 esas2r_trace_enter();
0759 esas2r_trace("length: %d", length);
0760
0761 if (length > sizeof(struct atto_vda_ae_data)
0762 || (length & 3) != 0
0763 || length == 0) {
0764 esas2r_log(ESAS2R_LOG_WARN,
0765 "The AE request response length (%p) is too long: %d",
0766 rq, length);
0767
0768 esas2r_hdebug("aereq->length (0x%x) too long", length);
0769 esas2r_bugon();
0770
0771 last = ae;
0772 }
0773
0774 while (ae < last) {
0775 u16 target;
0776
0777 esas2r_trace("ae: %p", ae);
0778 esas2r_trace("ae->hdr: %p", &(ae->hdr));
0779
0780 length = ae->hdr.bylength;
0781
0782 if (length > (u32)((u8 *)last - (u8 *)ae)
0783 || (length & 3) != 0
0784 || length == 0) {
0785 esas2r_log(ESAS2R_LOG_CRIT,
0786 "the async event length is invalid (%p): %d",
0787 ae, length);
0788
0789 esas2r_hdebug("ae->hdr.length (0x%x) invalid", length);
0790 esas2r_bugon();
0791
0792 break;
0793 }
0794
0795 esas2r_nuxi_ae_data(ae);
0796
0797 esas2r_queue_fw_event(a, fw_event_vda_ae, ae,
0798 sizeof(union atto_vda_ae));
0799
0800 switch (ae->hdr.bytype) {
0801 case VDAAE_HDR_TYPE_RAID:
0802
0803 if (ae->raid.dwflags & (VDAAE_GROUP_STATE
0804 | VDAAE_RBLD_STATE
0805 | VDAAE_MEMBER_CHG
0806 | VDAAE_PART_CHG)) {
0807 esas2r_log(ESAS2R_LOG_INFO,
0808 "RAID event received - name:%s rebuild_state:%d group_state:%d",
0809 ae->raid.acname,
0810 ae->raid.byrebuild_state,
0811 ae->raid.bygroup_state);
0812 }
0813
0814 break;
0815
0816 case VDAAE_HDR_TYPE_LU:
0817 esas2r_log(ESAS2R_LOG_INFO,
0818 "LUN event received: event:%d target_id:%d LUN:%d state:%d",
0819 ae->lu.dwevent,
0820 ae->lu.id.tgtlun.wtarget_id,
0821 ae->lu.id.tgtlun.bylun,
0822 ae->lu.bystate);
0823
0824 target = ae->lu.id.tgtlun.wtarget_id;
0825
0826 if (target < ESAS2R_MAX_TARGETS)
0827 esas2r_lun_event(a, ae, target, length);
0828
0829 break;
0830
0831 case VDAAE_HDR_TYPE_DISK:
0832 esas2r_log(ESAS2R_LOG_INFO, "Disk event received");
0833 break;
0834
0835 default:
0836
0837
0838
0839
0840
0841 break;
0842 }
0843
0844 ae = (union atto_vda_ae *)((u8 *)ae + length);
0845 }
0846
0847
0848 esas2r_start_ae_request(a, rq);
0849 esas2r_trace_exit();
0850 }
0851
0852
0853 void esas2r_send_reset_ae(struct esas2r_adapter *a, bool pwr_mgt)
0854 {
0855 struct atto_vda_ae_hdr ae;
0856
0857 if (pwr_mgt)
0858 ae.bytype = VDAAE_HDR_TYPE_PWRMGT;
0859 else
0860 ae.bytype = VDAAE_HDR_TYPE_RESET;
0861
0862 ae.byversion = VDAAE_HDR_VER_0;
0863 ae.byflags = 0;
0864 ae.bylength = (u8)sizeof(struct atto_vda_ae_hdr);
0865
0866 if (pwr_mgt) {
0867 esas2r_hdebug("*** sending power management AE ***");
0868 } else {
0869 esas2r_hdebug("*** sending reset AE ***");
0870 }
0871
0872 esas2r_queue_fw_event(a, fw_event_vda_ae, &ae,
0873 sizeof(union atto_vda_ae));
0874 }
0875
0876 void esas2r_dummy_complete(struct esas2r_adapter *a, struct esas2r_request *rq)
0877 {}
0878
0879 static void esas2r_check_req_rsp_sense(struct esas2r_adapter *a,
0880 struct esas2r_request *rq)
0881 {
0882 u8 snslen, snslen2;
0883
0884 snslen = snslen2 = rq->func_rsp.scsi_rsp.sense_len;
0885
0886 if (snslen > rq->sense_len)
0887 snslen = rq->sense_len;
0888
0889 if (snslen) {
0890 if (rq->sense_buf)
0891 memcpy(rq->sense_buf, rq->data_buf, snslen);
0892 else
0893 rq->sense_buf = (u8 *)rq->data_buf;
0894
0895
0896 if (snslen2 > 0x0c) {
0897 u8 *s = (u8 *)rq->data_buf;
0898
0899 esas2r_trace_enter();
0900
0901
0902 if (s[0x0c] == 0x3f && s[0x0d] == 0x0E) {
0903 esas2r_trace("rq->target_id: %d",
0904 rq->target_id);
0905 esas2r_target_state_changed(a, rq->target_id,
0906 TS_LUN_CHANGE);
0907 }
0908
0909 esas2r_trace("add_sense_key=%x", s[0x0c]);
0910 esas2r_trace("add_sense_qual=%x", s[0x0d]);
0911 esas2r_trace_exit();
0912 }
0913 }
0914
0915 rq->sense_len = snslen;
0916 }
0917
0918
0919 void esas2r_complete_request(struct esas2r_adapter *a,
0920 struct esas2r_request *rq)
0921 {
0922 if (rq->vrq->scsi.function == VDA_FUNC_FLASH
0923 && rq->vrq->flash.sub_func == VDA_FLASH_COMMIT)
0924 clear_bit(AF_FLASHING, &a->flags);
0925
0926
0927
0928 if (rq->interrupt_cb) {
0929 (*rq->interrupt_cb)(a, rq);
0930
0931 if (rq->req_stat == RS_PENDING) {
0932 esas2r_start_request(a, rq);
0933 return;
0934 }
0935 }
0936
0937 if (likely(rq->vrq->scsi.function == VDA_FUNC_SCSI)
0938 && unlikely(rq->req_stat != RS_SUCCESS)) {
0939 esas2r_check_req_rsp_sense(a, rq);
0940 esas2r_log_request_failure(a, rq);
0941 }
0942
0943 (*rq->comp_cb)(a, rq);
0944 }