0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037
0038
0039
0040
0041
0042
0043
0044
0045
0046
0047
0048
0049
0050
0051
0052
0053
0054
0055 #include <linux/circ_buf.h>
0056 #include <linux/device.h>
0057 #include <scsi/sas.h>
0058 #include "host.h"
0059 #include "isci.h"
0060 #include "port.h"
0061 #include "probe_roms.h"
0062 #include "remote_device.h"
0063 #include "request.h"
0064 #include "scu_completion_codes.h"
0065 #include "scu_event_codes.h"
0066 #include "registers.h"
0067 #include "scu_remote_node_context.h"
0068 #include "scu_task_context.h"
0069
0070 #define SCU_CONTEXT_RAM_INIT_STALL_TIME 200
0071
0072 #define smu_max_ports(dcc_value) \
0073 (\
0074 (((dcc_value) & SMU_DEVICE_CONTEXT_CAPACITY_MAX_LP_MASK) \
0075 >> SMU_DEVICE_CONTEXT_CAPACITY_MAX_LP_SHIFT) + 1 \
0076 )
0077
0078 #define smu_max_task_contexts(dcc_value) \
0079 (\
0080 (((dcc_value) & SMU_DEVICE_CONTEXT_CAPACITY_MAX_TC_MASK) \
0081 >> SMU_DEVICE_CONTEXT_CAPACITY_MAX_TC_SHIFT) + 1 \
0082 )
0083
0084 #define smu_max_rncs(dcc_value) \
0085 (\
0086 (((dcc_value) & SMU_DEVICE_CONTEXT_CAPACITY_MAX_RNC_MASK) \
0087 >> SMU_DEVICE_CONTEXT_CAPACITY_MAX_RNC_SHIFT) + 1 \
0088 )
0089
0090 #define SCIC_SDS_CONTROLLER_PHY_START_TIMEOUT 100
0091
0092
0093
0094
0095
0096
0097 #define SCIC_SDS_CONTROLLER_POWER_CONTROL_INTERVAL 500
0098
0099
0100
0101
0102
0103
0104
0105 #define NORMALIZE_PUT_POINTER(x) \
0106 ((x) & SMU_COMPLETION_QUEUE_PUT_POINTER_MASK)
0107
0108
0109
0110
0111
0112
0113
0114
0115 #define NORMALIZE_EVENT_POINTER(x) \
0116 (\
0117 ((x) & SMU_COMPLETION_QUEUE_GET_EVENT_POINTER_MASK) \
0118 >> SMU_COMPLETION_QUEUE_GET_EVENT_POINTER_SHIFT \
0119 )
0120
0121
0122
0123
0124
0125
0126
0127 #define NORMALIZE_GET_POINTER(x) \
0128 ((x) & SMU_COMPLETION_QUEUE_GET_POINTER_MASK)
0129
0130
0131
0132
0133
0134
0135
0136 #define NORMALIZE_GET_POINTER_CYCLE_BIT(x) \
0137 ((SMU_CQGR_CYCLE_BIT & (x)) << (31 - SMU_COMPLETION_QUEUE_GET_CYCLE_BIT_SHIFT))
0138
0139
0140
0141
0142
0143
0144 #define COMPLETION_QUEUE_CYCLE_BIT(x) ((x) & 0x80000000)
0145
0146
0147 void sci_init_sm(struct sci_base_state_machine *sm,
0148 const struct sci_base_state *state_table, u32 initial_state)
0149 {
0150 sci_state_transition_t handler;
0151
0152 sm->initial_state_id = initial_state;
0153 sm->previous_state_id = initial_state;
0154 sm->current_state_id = initial_state;
0155 sm->state_table = state_table;
0156
0157 handler = sm->state_table[initial_state].enter_state;
0158 if (handler)
0159 handler(sm);
0160 }
0161
0162
0163 void sci_change_state(struct sci_base_state_machine *sm, u32 next_state)
0164 {
0165 sci_state_transition_t handler;
0166
0167 handler = sm->state_table[sm->current_state_id].exit_state;
0168 if (handler)
0169 handler(sm);
0170
0171 sm->previous_state_id = sm->current_state_id;
0172 sm->current_state_id = next_state;
0173
0174 handler = sm->state_table[sm->current_state_id].enter_state;
0175 if (handler)
0176 handler(sm);
0177 }
0178
0179 static bool sci_controller_completion_queue_has_entries(struct isci_host *ihost)
0180 {
0181 u32 get_value = ihost->completion_queue_get;
0182 u32 get_index = get_value & SMU_COMPLETION_QUEUE_GET_POINTER_MASK;
0183
0184 if (NORMALIZE_GET_POINTER_CYCLE_BIT(get_value) ==
0185 COMPLETION_QUEUE_CYCLE_BIT(ihost->completion_queue[get_index]))
0186 return true;
0187
0188 return false;
0189 }
0190
0191 static bool sci_controller_isr(struct isci_host *ihost)
0192 {
0193 if (sci_controller_completion_queue_has_entries(ihost))
0194 return true;
0195
0196
0197
0198
0199
0200 writel(SMU_ISR_COMPLETION, &ihost->smu_registers->interrupt_status);
0201
0202
0203
0204
0205
0206
0207
0208 spin_lock(&ihost->scic_lock);
0209 if (test_bit(IHOST_IRQ_ENABLED, &ihost->flags)) {
0210 writel(0xFF000000, &ihost->smu_registers->interrupt_mask);
0211 writel(0, &ihost->smu_registers->interrupt_mask);
0212 }
0213 spin_unlock(&ihost->scic_lock);
0214
0215 return false;
0216 }
0217
0218 irqreturn_t isci_msix_isr(int vec, void *data)
0219 {
0220 struct isci_host *ihost = data;
0221
0222 if (sci_controller_isr(ihost))
0223 tasklet_schedule(&ihost->completion_tasklet);
0224
0225 return IRQ_HANDLED;
0226 }
0227
0228 static bool sci_controller_error_isr(struct isci_host *ihost)
0229 {
0230 u32 interrupt_status;
0231
0232 interrupt_status =
0233 readl(&ihost->smu_registers->interrupt_status);
0234 interrupt_status &= (SMU_ISR_QUEUE_ERROR | SMU_ISR_QUEUE_SUSPEND);
0235
0236 if (interrupt_status != 0) {
0237
0238
0239
0240 return true;
0241 }
0242
0243
0244
0245
0246
0247
0248
0249 writel(0xff, &ihost->smu_registers->interrupt_mask);
0250 writel(0, &ihost->smu_registers->interrupt_mask);
0251
0252 return false;
0253 }
0254
0255 static void sci_controller_task_completion(struct isci_host *ihost, u32 ent)
0256 {
0257 u32 index = SCU_GET_COMPLETION_INDEX(ent);
0258 struct isci_request *ireq = ihost->reqs[index];
0259
0260
0261 if (test_bit(IREQ_ACTIVE, &ireq->flags) &&
0262 ireq->io_tag != SCI_CONTROLLER_INVALID_IO_TAG &&
0263 ISCI_TAG_SEQ(ireq->io_tag) == ihost->io_request_sequence[index])
0264
0265
0266
0267 sci_io_request_tc_completion(ireq, ent);
0268 }
0269
0270 static void sci_controller_sdma_completion(struct isci_host *ihost, u32 ent)
0271 {
0272 u32 index;
0273 struct isci_request *ireq;
0274 struct isci_remote_device *idev;
0275
0276 index = SCU_GET_COMPLETION_INDEX(ent);
0277
0278 switch (scu_get_command_request_type(ent)) {
0279 case SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC:
0280 case SCU_CONTEXT_COMMAND_REQUEST_TYPE_DUMP_TC:
0281 ireq = ihost->reqs[index];
0282 dev_warn(&ihost->pdev->dev, "%s: %x for io request %p\n",
0283 __func__, ent, ireq);
0284
0285
0286
0287 break;
0288 case SCU_CONTEXT_COMMAND_REQUEST_TYPE_DUMP_RNC:
0289 case SCU_CONTEXT_COMMAND_REQUEST_TYPE_OTHER_RNC:
0290 case SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_RNC:
0291 idev = ihost->device_table[index];
0292 dev_warn(&ihost->pdev->dev, "%s: %x for device %p\n",
0293 __func__, ent, idev);
0294
0295
0296
0297 break;
0298 default:
0299 dev_warn(&ihost->pdev->dev, "%s: unknown completion type %x\n",
0300 __func__, ent);
0301 break;
0302 }
0303 }
0304
0305 static void sci_controller_unsolicited_frame(struct isci_host *ihost, u32 ent)
0306 {
0307 u32 index;
0308 u32 frame_index;
0309
0310 struct scu_unsolicited_frame_header *frame_header;
0311 struct isci_phy *iphy;
0312 struct isci_remote_device *idev;
0313
0314 enum sci_status result = SCI_FAILURE;
0315
0316 frame_index = SCU_GET_FRAME_INDEX(ent);
0317
0318 frame_header = ihost->uf_control.buffers.array[frame_index].header;
0319 ihost->uf_control.buffers.array[frame_index].state = UNSOLICITED_FRAME_IN_USE;
0320
0321 if (SCU_GET_FRAME_ERROR(ent)) {
0322
0323
0324
0325
0326 sci_controller_release_frame(ihost, frame_index);
0327 return;
0328 }
0329
0330 if (frame_header->is_address_frame) {
0331 index = SCU_GET_PROTOCOL_ENGINE_INDEX(ent);
0332 iphy = &ihost->phys[index];
0333 result = sci_phy_frame_handler(iphy, frame_index);
0334 } else {
0335
0336 index = SCU_GET_COMPLETION_INDEX(ent);
0337
0338 if (index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) {
0339
0340
0341
0342
0343 index = SCU_GET_PROTOCOL_ENGINE_INDEX(ent);
0344 iphy = &ihost->phys[index];
0345 result = sci_phy_frame_handler(iphy, frame_index);
0346 } else {
0347 if (index < ihost->remote_node_entries)
0348 idev = ihost->device_table[index];
0349 else
0350 idev = NULL;
0351
0352 if (idev != NULL)
0353 result = sci_remote_device_frame_handler(idev, frame_index);
0354 else
0355 sci_controller_release_frame(ihost, frame_index);
0356 }
0357 }
0358
0359 if (result != SCI_SUCCESS) {
0360
0361
0362
0363 }
0364 }
0365
0366 static void sci_controller_event_completion(struct isci_host *ihost, u32 ent)
0367 {
0368 struct isci_remote_device *idev;
0369 struct isci_request *ireq;
0370 struct isci_phy *iphy;
0371 u32 index;
0372
0373 index = SCU_GET_COMPLETION_INDEX(ent);
0374
0375 switch (scu_get_event_type(ent)) {
0376 case SCU_EVENT_TYPE_SMU_COMMAND_ERROR:
0377
0378 dev_err(&ihost->pdev->dev,
0379 "%s: SCIC Controller 0x%p received SMU command error "
0380 "0x%x\n",
0381 __func__,
0382 ihost,
0383 ent);
0384 break;
0385
0386 case SCU_EVENT_TYPE_SMU_PCQ_ERROR:
0387 case SCU_EVENT_TYPE_SMU_ERROR:
0388 case SCU_EVENT_TYPE_FATAL_MEMORY_ERROR:
0389
0390
0391
0392 dev_err(&ihost->pdev->dev,
0393 "%s: SCIC Controller 0x%p received fatal controller "
0394 "event 0x%x\n",
0395 __func__,
0396 ihost,
0397 ent);
0398 break;
0399
0400 case SCU_EVENT_TYPE_TRANSPORT_ERROR:
0401 ireq = ihost->reqs[index];
0402 sci_io_request_event_handler(ireq, ent);
0403 break;
0404
0405 case SCU_EVENT_TYPE_PTX_SCHEDULE_EVENT:
0406 switch (scu_get_event_specifier(ent)) {
0407 case SCU_EVENT_SPECIFIC_SMP_RESPONSE_NO_PE:
0408 case SCU_EVENT_SPECIFIC_TASK_TIMEOUT:
0409 ireq = ihost->reqs[index];
0410 if (ireq != NULL)
0411 sci_io_request_event_handler(ireq, ent);
0412 else
0413 dev_warn(&ihost->pdev->dev,
0414 "%s: SCIC Controller 0x%p received "
0415 "event 0x%x for io request object "
0416 "that doesn't exist.\n",
0417 __func__,
0418 ihost,
0419 ent);
0420
0421 break;
0422
0423 case SCU_EVENT_SPECIFIC_IT_NEXUS_TIMEOUT:
0424 idev = ihost->device_table[index];
0425 if (idev != NULL)
0426 sci_remote_device_event_handler(idev, ent);
0427 else
0428 dev_warn(&ihost->pdev->dev,
0429 "%s: SCIC Controller 0x%p received "
0430 "event 0x%x for remote device object "
0431 "that doesn't exist.\n",
0432 __func__,
0433 ihost,
0434 ent);
0435
0436 break;
0437 }
0438 break;
0439
0440 case SCU_EVENT_TYPE_BROADCAST_CHANGE:
0441
0442
0443
0444 case SCU_EVENT_TYPE_ERR_CNT_EVENT:
0445
0446
0447
0448 case SCU_EVENT_TYPE_OSSP_EVENT:
0449 index = SCU_GET_PROTOCOL_ENGINE_INDEX(ent);
0450 iphy = &ihost->phys[index];
0451 sci_phy_event_handler(iphy, ent);
0452 break;
0453
0454 case SCU_EVENT_TYPE_RNC_SUSPEND_TX:
0455 case SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX:
0456 case SCU_EVENT_TYPE_RNC_OPS_MISC:
0457 if (index < ihost->remote_node_entries) {
0458 idev = ihost->device_table[index];
0459
0460 if (idev != NULL)
0461 sci_remote_device_event_handler(idev, ent);
0462 } else
0463 dev_err(&ihost->pdev->dev,
0464 "%s: SCIC Controller 0x%p received event 0x%x "
0465 "for remote device object 0x%0x that doesn't "
0466 "exist.\n",
0467 __func__,
0468 ihost,
0469 ent,
0470 index);
0471
0472 break;
0473
0474 default:
0475 dev_warn(&ihost->pdev->dev,
0476 "%s: SCIC Controller received unknown event code %x\n",
0477 __func__,
0478 ent);
0479 break;
0480 }
0481 }
0482
0483 static void sci_controller_process_completions(struct isci_host *ihost)
0484 {
0485 u32 completion_count = 0;
0486 u32 ent;
0487 u32 get_index;
0488 u32 get_cycle;
0489 u32 event_get;
0490 u32 event_cycle;
0491
0492 dev_dbg(&ihost->pdev->dev,
0493 "%s: completion queue beginning get:0x%08x\n",
0494 __func__,
0495 ihost->completion_queue_get);
0496
0497
0498 get_index = NORMALIZE_GET_POINTER(ihost->completion_queue_get);
0499 get_cycle = SMU_CQGR_CYCLE_BIT & ihost->completion_queue_get;
0500
0501 event_get = NORMALIZE_EVENT_POINTER(ihost->completion_queue_get);
0502 event_cycle = SMU_CQGR_EVENT_CYCLE_BIT & ihost->completion_queue_get;
0503
0504 while (
0505 NORMALIZE_GET_POINTER_CYCLE_BIT(get_cycle)
0506 == COMPLETION_QUEUE_CYCLE_BIT(ihost->completion_queue[get_index])
0507 ) {
0508 completion_count++;
0509
0510 ent = ihost->completion_queue[get_index];
0511
0512
0513 get_cycle ^= ((get_index+1) & SCU_MAX_COMPLETION_QUEUE_ENTRIES) <<
0514 (SMU_COMPLETION_QUEUE_GET_CYCLE_BIT_SHIFT - SCU_MAX_COMPLETION_QUEUE_SHIFT);
0515 get_index = (get_index+1) & (SCU_MAX_COMPLETION_QUEUE_ENTRIES-1);
0516
0517 dev_dbg(&ihost->pdev->dev,
0518 "%s: completion queue entry:0x%08x\n",
0519 __func__,
0520 ent);
0521
0522 switch (SCU_GET_COMPLETION_TYPE(ent)) {
0523 case SCU_COMPLETION_TYPE_TASK:
0524 sci_controller_task_completion(ihost, ent);
0525 break;
0526
0527 case SCU_COMPLETION_TYPE_SDMA:
0528 sci_controller_sdma_completion(ihost, ent);
0529 break;
0530
0531 case SCU_COMPLETION_TYPE_UFI:
0532 sci_controller_unsolicited_frame(ihost, ent);
0533 break;
0534
0535 case SCU_COMPLETION_TYPE_EVENT:
0536 sci_controller_event_completion(ihost, ent);
0537 break;
0538
0539 case SCU_COMPLETION_TYPE_NOTIFY: {
0540 event_cycle ^= ((event_get+1) & SCU_MAX_EVENTS) <<
0541 (SMU_COMPLETION_QUEUE_GET_EVENT_CYCLE_BIT_SHIFT - SCU_MAX_EVENTS_SHIFT);
0542 event_get = (event_get+1) & (SCU_MAX_EVENTS-1);
0543
0544 sci_controller_event_completion(ihost, ent);
0545 break;
0546 }
0547 default:
0548 dev_warn(&ihost->pdev->dev,
0549 "%s: SCIC Controller received unknown "
0550 "completion type %x\n",
0551 __func__,
0552 ent);
0553 break;
0554 }
0555 }
0556
0557
0558 if (completion_count > 0) {
0559 ihost->completion_queue_get =
0560 SMU_CQGR_GEN_BIT(ENABLE) |
0561 SMU_CQGR_GEN_BIT(EVENT_ENABLE) |
0562 event_cycle |
0563 SMU_CQGR_GEN_VAL(EVENT_POINTER, event_get) |
0564 get_cycle |
0565 SMU_CQGR_GEN_VAL(POINTER, get_index);
0566
0567 writel(ihost->completion_queue_get,
0568 &ihost->smu_registers->completion_queue_get);
0569
0570 }
0571
0572 dev_dbg(&ihost->pdev->dev,
0573 "%s: completion queue ending get:0x%08x\n",
0574 __func__,
0575 ihost->completion_queue_get);
0576
0577 }
0578
0579 static void sci_controller_error_handler(struct isci_host *ihost)
0580 {
0581 u32 interrupt_status;
0582
0583 interrupt_status =
0584 readl(&ihost->smu_registers->interrupt_status);
0585
0586 if ((interrupt_status & SMU_ISR_QUEUE_SUSPEND) &&
0587 sci_controller_completion_queue_has_entries(ihost)) {
0588
0589 sci_controller_process_completions(ihost);
0590 writel(SMU_ISR_QUEUE_SUSPEND, &ihost->smu_registers->interrupt_status);
0591 } else {
0592 dev_err(&ihost->pdev->dev, "%s: status: %#x\n", __func__,
0593 interrupt_status);
0594
0595 sci_change_state(&ihost->sm, SCIC_FAILED);
0596
0597 return;
0598 }
0599
0600
0601
0602
0603 writel(0, &ihost->smu_registers->interrupt_mask);
0604 }
0605
0606 irqreturn_t isci_intx_isr(int vec, void *data)
0607 {
0608 irqreturn_t ret = IRQ_NONE;
0609 struct isci_host *ihost = data;
0610
0611 if (sci_controller_isr(ihost)) {
0612 writel(SMU_ISR_COMPLETION, &ihost->smu_registers->interrupt_status);
0613 tasklet_schedule(&ihost->completion_tasklet);
0614 ret = IRQ_HANDLED;
0615 } else if (sci_controller_error_isr(ihost)) {
0616 spin_lock(&ihost->scic_lock);
0617 sci_controller_error_handler(ihost);
0618 spin_unlock(&ihost->scic_lock);
0619 ret = IRQ_HANDLED;
0620 }
0621
0622 return ret;
0623 }
0624
0625 irqreturn_t isci_error_isr(int vec, void *data)
0626 {
0627 struct isci_host *ihost = data;
0628
0629 if (sci_controller_error_isr(ihost))
0630 sci_controller_error_handler(ihost);
0631
0632 return IRQ_HANDLED;
0633 }
0634
0635
0636
0637
0638
0639
0640
0641
0642
0643 static void isci_host_start_complete(struct isci_host *ihost, enum sci_status completion_status)
0644 {
0645 if (completion_status != SCI_SUCCESS)
0646 dev_info(&ihost->pdev->dev,
0647 "controller start timed out, continuing...\n");
0648 clear_bit(IHOST_START_PENDING, &ihost->flags);
0649 wake_up(&ihost->eventq);
0650 }
0651
0652 int isci_host_scan_finished(struct Scsi_Host *shost, unsigned long time)
0653 {
0654 struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost);
0655 struct isci_host *ihost = ha->lldd_ha;
0656
0657 if (test_bit(IHOST_START_PENDING, &ihost->flags))
0658 return 0;
0659
0660 sas_drain_work(ha);
0661
0662 return 1;
0663 }
0664
0665
0666
0667
0668
0669
0670
0671
0672
0673
0674
0675
0676
0677 static u32 sci_controller_get_suggested_start_timeout(struct isci_host *ihost)
0678 {
0679
0680 if (!ihost)
0681 return 0;
0682
0683
0684
0685
0686
0687
0688
0689
0690
0691
0692
0693
0694
0695
0696
0697 return SCIC_SDS_SIGNATURE_FIS_TIMEOUT
0698 + SCIC_SDS_CONTROLLER_PHY_START_TIMEOUT
0699 + ((SCI_MAX_PHYS - 1) * SCIC_SDS_CONTROLLER_POWER_CONTROL_INTERVAL);
0700 }
0701
0702 static void sci_controller_enable_interrupts(struct isci_host *ihost)
0703 {
0704 set_bit(IHOST_IRQ_ENABLED, &ihost->flags);
0705 writel(0, &ihost->smu_registers->interrupt_mask);
0706 }
0707
0708 void sci_controller_disable_interrupts(struct isci_host *ihost)
0709 {
0710 clear_bit(IHOST_IRQ_ENABLED, &ihost->flags);
0711 writel(0xffffffff, &ihost->smu_registers->interrupt_mask);
0712 readl(&ihost->smu_registers->interrupt_mask);
0713 }
0714
0715 static void sci_controller_enable_port_task_scheduler(struct isci_host *ihost)
0716 {
0717 u32 port_task_scheduler_value;
0718
0719 port_task_scheduler_value =
0720 readl(&ihost->scu_registers->peg0.ptsg.control);
0721 port_task_scheduler_value |=
0722 (SCU_PTSGCR_GEN_BIT(ETM_ENABLE) |
0723 SCU_PTSGCR_GEN_BIT(PTSG_ENABLE));
0724 writel(port_task_scheduler_value,
0725 &ihost->scu_registers->peg0.ptsg.control);
0726 }
0727
0728 static void sci_controller_assign_task_entries(struct isci_host *ihost)
0729 {
0730 u32 task_assignment;
0731
0732
0733
0734
0735
0736
0737 task_assignment =
0738 readl(&ihost->smu_registers->task_context_assignment[0]);
0739
0740 task_assignment |= (SMU_TCA_GEN_VAL(STARTING, 0)) |
0741 (SMU_TCA_GEN_VAL(ENDING, ihost->task_context_entries - 1)) |
0742 (SMU_TCA_GEN_BIT(RANGE_CHECK_ENABLE));
0743
0744 writel(task_assignment,
0745 &ihost->smu_registers->task_context_assignment[0]);
0746
0747 }
0748
0749 static void sci_controller_initialize_completion_queue(struct isci_host *ihost)
0750 {
0751 u32 index;
0752 u32 completion_queue_control_value;
0753 u32 completion_queue_get_value;
0754 u32 completion_queue_put_value;
0755
0756 ihost->completion_queue_get = 0;
0757
0758 completion_queue_control_value =
0759 (SMU_CQC_QUEUE_LIMIT_SET(SCU_MAX_COMPLETION_QUEUE_ENTRIES - 1) |
0760 SMU_CQC_EVENT_LIMIT_SET(SCU_MAX_EVENTS - 1));
0761
0762 writel(completion_queue_control_value,
0763 &ihost->smu_registers->completion_queue_control);
0764
0765
0766
0767 completion_queue_get_value = (
0768 (SMU_CQGR_GEN_VAL(POINTER, 0))
0769 | (SMU_CQGR_GEN_VAL(EVENT_POINTER, 0))
0770 | (SMU_CQGR_GEN_BIT(ENABLE))
0771 | (SMU_CQGR_GEN_BIT(EVENT_ENABLE))
0772 );
0773
0774 writel(completion_queue_get_value,
0775 &ihost->smu_registers->completion_queue_get);
0776
0777
0778 completion_queue_put_value = (
0779 (SMU_CQPR_GEN_VAL(POINTER, 0))
0780 | (SMU_CQPR_GEN_VAL(EVENT_POINTER, 0))
0781 );
0782
0783 writel(completion_queue_put_value,
0784 &ihost->smu_registers->completion_queue_put);
0785
0786
0787 for (index = 0; index < SCU_MAX_COMPLETION_QUEUE_ENTRIES; index++) {
0788
0789
0790
0791
0792 ihost->completion_queue[index] = 0x80000000;
0793 }
0794 }
0795
0796 static void sci_controller_initialize_unsolicited_frame_queue(struct isci_host *ihost)
0797 {
0798 u32 frame_queue_control_value;
0799 u32 frame_queue_get_value;
0800 u32 frame_queue_put_value;
0801
0802
0803 frame_queue_control_value =
0804 SCU_UFQC_GEN_VAL(QUEUE_SIZE, SCU_MAX_UNSOLICITED_FRAMES);
0805
0806 writel(frame_queue_control_value,
0807 &ihost->scu_registers->sdma.unsolicited_frame_queue_control);
0808
0809
0810 frame_queue_get_value = (
0811 SCU_UFQGP_GEN_VAL(POINTER, 0)
0812 | SCU_UFQGP_GEN_BIT(ENABLE_BIT)
0813 );
0814
0815 writel(frame_queue_get_value,
0816 &ihost->scu_registers->sdma.unsolicited_frame_get_pointer);
0817
0818 frame_queue_put_value = SCU_UFQPP_GEN_VAL(POINTER, 0);
0819 writel(frame_queue_put_value,
0820 &ihost->scu_registers->sdma.unsolicited_frame_put_pointer);
0821 }
0822
0823 void sci_controller_transition_to_ready(struct isci_host *ihost, enum sci_status status)
0824 {
0825 if (ihost->sm.current_state_id == SCIC_STARTING) {
0826
0827
0828
0829
0830 sci_change_state(&ihost->sm, SCIC_READY);
0831
0832 isci_host_start_complete(ihost, status);
0833 }
0834 }
0835
0836 static bool is_phy_starting(struct isci_phy *iphy)
0837 {
0838 enum sci_phy_states state;
0839
0840 state = iphy->sm.current_state_id;
0841 switch (state) {
0842 case SCI_PHY_STARTING:
0843 case SCI_PHY_SUB_INITIAL:
0844 case SCI_PHY_SUB_AWAIT_SAS_SPEED_EN:
0845 case SCI_PHY_SUB_AWAIT_IAF_UF:
0846 case SCI_PHY_SUB_AWAIT_SAS_POWER:
0847 case SCI_PHY_SUB_AWAIT_SATA_POWER:
0848 case SCI_PHY_SUB_AWAIT_SATA_PHY_EN:
0849 case SCI_PHY_SUB_AWAIT_SATA_SPEED_EN:
0850 case SCI_PHY_SUB_AWAIT_OSSP_EN:
0851 case SCI_PHY_SUB_AWAIT_SIG_FIS_UF:
0852 case SCI_PHY_SUB_FINAL:
0853 return true;
0854 default:
0855 return false;
0856 }
0857 }
0858
0859 bool is_controller_start_complete(struct isci_host *ihost)
0860 {
0861 int i;
0862
0863 for (i = 0; i < SCI_MAX_PHYS; i++) {
0864 struct isci_phy *iphy = &ihost->phys[i];
0865 u32 state = iphy->sm.current_state_id;
0866
0867
0868
0869
0870
0871 if (is_port_config_apc(ihost))
0872 ;
0873 else if (!phy_get_non_dummy_port(iphy))
0874 continue;
0875
0876
0877
0878
0879
0880
0881
0882 if ((iphy->is_in_link_training == false && state == SCI_PHY_INITIAL) ||
0883 (iphy->is_in_link_training == false && state == SCI_PHY_STOPPED) ||
0884 (iphy->is_in_link_training == true && is_phy_starting(iphy)) ||
0885 (ihost->port_agent.phy_ready_mask != ihost->port_agent.phy_configured_mask))
0886 return false;
0887 }
0888
0889 return true;
0890 }
0891
0892
0893
0894
0895
0896
0897
0898
0899
0900 static enum sci_status sci_controller_start_next_phy(struct isci_host *ihost)
0901 {
0902 struct sci_oem_params *oem = &ihost->oem_parameters;
0903 struct isci_phy *iphy;
0904 enum sci_status status;
0905
0906 status = SCI_SUCCESS;
0907
0908 if (ihost->phy_startup_timer_pending)
0909 return status;
0910
0911 if (ihost->next_phy_to_start >= SCI_MAX_PHYS) {
0912 if (is_controller_start_complete(ihost)) {
0913 sci_controller_transition_to_ready(ihost, SCI_SUCCESS);
0914 sci_del_timer(&ihost->phy_timer);
0915 ihost->phy_startup_timer_pending = false;
0916 }
0917 } else {
0918 iphy = &ihost->phys[ihost->next_phy_to_start];
0919
0920 if (oem->controller.mode_type == SCIC_PORT_MANUAL_CONFIGURATION_MODE) {
0921 if (phy_get_non_dummy_port(iphy) == NULL) {
0922 ihost->next_phy_to_start++;
0923
0924
0925
0926
0927
0928
0929
0930
0931
0932
0933 return sci_controller_start_next_phy(ihost);
0934 }
0935 }
0936
0937 status = sci_phy_start(iphy);
0938
0939 if (status == SCI_SUCCESS) {
0940 sci_mod_timer(&ihost->phy_timer,
0941 SCIC_SDS_CONTROLLER_PHY_START_TIMEOUT);
0942 ihost->phy_startup_timer_pending = true;
0943 } else {
0944 dev_warn(&ihost->pdev->dev,
0945 "%s: Controller stop operation failed "
0946 "to stop phy %d because of status "
0947 "%d.\n",
0948 __func__,
0949 ihost->phys[ihost->next_phy_to_start].phy_index,
0950 status);
0951 }
0952
0953 ihost->next_phy_to_start++;
0954 }
0955
0956 return status;
0957 }
0958
0959 static void phy_startup_timeout(struct timer_list *t)
0960 {
0961 struct sci_timer *tmr = from_timer(tmr, t, timer);
0962 struct isci_host *ihost = container_of(tmr, typeof(*ihost), phy_timer);
0963 unsigned long flags;
0964 enum sci_status status;
0965
0966 spin_lock_irqsave(&ihost->scic_lock, flags);
0967
0968 if (tmr->cancel)
0969 goto done;
0970
0971 ihost->phy_startup_timer_pending = false;
0972
0973 do {
0974 status = sci_controller_start_next_phy(ihost);
0975 } while (status != SCI_SUCCESS);
0976
0977 done:
0978 spin_unlock_irqrestore(&ihost->scic_lock, flags);
0979 }
0980
0981 static u16 isci_tci_active(struct isci_host *ihost)
0982 {
0983 return CIRC_CNT(ihost->tci_head, ihost->tci_tail, SCI_MAX_IO_REQUESTS);
0984 }
0985
0986 static enum sci_status sci_controller_start(struct isci_host *ihost,
0987 u32 timeout)
0988 {
0989 enum sci_status result;
0990 u16 index;
0991
0992 if (ihost->sm.current_state_id != SCIC_INITIALIZED) {
0993 dev_warn(&ihost->pdev->dev, "%s invalid state: %d\n",
0994 __func__, ihost->sm.current_state_id);
0995 return SCI_FAILURE_INVALID_STATE;
0996 }
0997
0998
0999 BUILD_BUG_ON(SCI_MAX_IO_REQUESTS > 1 << sizeof(ihost->tci_pool[0]) * 8);
1000 ihost->tci_head = 0;
1001 ihost->tci_tail = 0;
1002 for (index = 0; index < ihost->task_context_entries; index++)
1003 isci_tci_free(ihost, index);
1004
1005
1006 sci_remote_node_table_initialize(&ihost->available_remote_nodes,
1007 ihost->remote_node_entries);
1008
1009
1010
1011
1012
1013 sci_controller_disable_interrupts(ihost);
1014
1015
1016 sci_controller_enable_port_task_scheduler(ihost);
1017
1018
1019 sci_controller_assign_task_entries(ihost);
1020
1021
1022 sci_controller_initialize_completion_queue(ihost);
1023
1024
1025 sci_controller_initialize_unsolicited_frame_queue(ihost);
1026
1027
1028 for (index = 0; index < ihost->logical_port_entries; index++) {
1029 struct isci_port *iport = &ihost->ports[index];
1030
1031 result = sci_port_start(iport);
1032 if (result)
1033 return result;
1034 }
1035
1036 sci_controller_start_next_phy(ihost);
1037
1038 sci_mod_timer(&ihost->timer, timeout);
1039
1040 sci_change_state(&ihost->sm, SCIC_STARTING);
1041
1042 return SCI_SUCCESS;
1043 }
1044
1045 void isci_host_start(struct Scsi_Host *shost)
1046 {
1047 struct isci_host *ihost = SHOST_TO_SAS_HA(shost)->lldd_ha;
1048 unsigned long tmo = sci_controller_get_suggested_start_timeout(ihost);
1049
1050 set_bit(IHOST_START_PENDING, &ihost->flags);
1051
1052 spin_lock_irq(&ihost->scic_lock);
1053 sci_controller_start(ihost, tmo);
1054 sci_controller_enable_interrupts(ihost);
1055 spin_unlock_irq(&ihost->scic_lock);
1056 }
1057
1058 static void isci_host_stop_complete(struct isci_host *ihost)
1059 {
1060 sci_controller_disable_interrupts(ihost);
1061 clear_bit(IHOST_STOP_PENDING, &ihost->flags);
1062 wake_up(&ihost->eventq);
1063 }
1064
1065 static void sci_controller_completion_handler(struct isci_host *ihost)
1066 {
1067
1068 if (sci_controller_completion_queue_has_entries(ihost))
1069 sci_controller_process_completions(ihost);
1070
1071
1072 writel(SMU_ISR_COMPLETION, &ihost->smu_registers->interrupt_status);
1073
1074 writel(0xFF000000, &ihost->smu_registers->interrupt_mask);
1075 writel(0, &ihost->smu_registers->interrupt_mask);
1076 }
1077
1078 void ireq_done(struct isci_host *ihost, struct isci_request *ireq, struct sas_task *task)
1079 {
1080 if (!test_bit(IREQ_ABORT_PATH_ACTIVE, &ireq->flags) &&
1081 !(task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
1082 if (test_bit(IREQ_COMPLETE_IN_TARGET, &ireq->flags)) {
1083
1084 dev_dbg(&ihost->pdev->dev,
1085 "%s: Normal - ireq/task = %p/%p\n",
1086 __func__, ireq, task);
1087 task->lldd_task = NULL;
1088 task->task_done(task);
1089 } else {
1090 dev_dbg(&ihost->pdev->dev,
1091 "%s: Error - ireq/task = %p/%p\n",
1092 __func__, ireq, task);
1093 if (sas_protocol_ata(task->task_proto))
1094 task->lldd_task = NULL;
1095 sas_task_abort(task);
1096 }
1097 } else
1098 task->lldd_task = NULL;
1099
1100 if (test_and_clear_bit(IREQ_ABORT_PATH_ACTIVE, &ireq->flags))
1101 wake_up_all(&ihost->eventq);
1102
1103 if (!test_bit(IREQ_NO_AUTO_FREE_TAG, &ireq->flags))
1104 isci_free_tag(ihost, ireq->io_tag);
1105 }
1106
1107
1108
1109
1110
1111
1112
1113
1114 void isci_host_completion_routine(unsigned long data)
1115 {
1116 struct isci_host *ihost = (struct isci_host *)data;
1117 u16 active;
1118
1119 spin_lock_irq(&ihost->scic_lock);
1120 sci_controller_completion_handler(ihost);
1121 spin_unlock_irq(&ihost->scic_lock);
1122
1123
1124
1125
1126
1127 active = isci_tci_active(ihost) - SCI_MAX_PORTS;
1128
1129
1130
1131
1132
1133 writel(SMU_ICC_GEN_VAL(NUMBER, active) |
1134 SMU_ICC_GEN_VAL(TIMER, ISCI_COALESCE_BASE + ilog2(active)),
1135 &ihost->smu_registers->interrupt_coalesce_control);
1136 }
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156 static enum sci_status sci_controller_stop(struct isci_host *ihost, u32 timeout)
1157 {
1158 if (ihost->sm.current_state_id != SCIC_READY) {
1159 dev_warn(&ihost->pdev->dev, "%s invalid state: %d\n",
1160 __func__, ihost->sm.current_state_id);
1161 return SCI_FAILURE_INVALID_STATE;
1162 }
1163
1164 sci_mod_timer(&ihost->timer, timeout);
1165 sci_change_state(&ihost->sm, SCIC_STOPPING);
1166 return SCI_SUCCESS;
1167 }
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181 static enum sci_status sci_controller_reset(struct isci_host *ihost)
1182 {
1183 switch (ihost->sm.current_state_id) {
1184 case SCIC_RESET:
1185 case SCIC_READY:
1186 case SCIC_STOPPING:
1187 case SCIC_FAILED:
1188
1189
1190
1191
1192 sci_change_state(&ihost->sm, SCIC_RESETTING);
1193 return SCI_SUCCESS;
1194 default:
1195 dev_warn(&ihost->pdev->dev, "%s invalid state: %d\n",
1196 __func__, ihost->sm.current_state_id);
1197 return SCI_FAILURE_INVALID_STATE;
1198 }
1199 }
1200
1201 static enum sci_status sci_controller_stop_phys(struct isci_host *ihost)
1202 {
1203 u32 index;
1204 enum sci_status status;
1205 enum sci_status phy_status;
1206
1207 status = SCI_SUCCESS;
1208
1209 for (index = 0; index < SCI_MAX_PHYS; index++) {
1210 phy_status = sci_phy_stop(&ihost->phys[index]);
1211
1212 if (phy_status != SCI_SUCCESS &&
1213 phy_status != SCI_FAILURE_INVALID_STATE) {
1214 status = SCI_FAILURE;
1215
1216 dev_warn(&ihost->pdev->dev,
1217 "%s: Controller stop operation failed to stop "
1218 "phy %d because of status %d.\n",
1219 __func__,
1220 ihost->phys[index].phy_index, phy_status);
1221 }
1222 }
1223
1224 return status;
1225 }
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239 void isci_host_deinit(struct isci_host *ihost)
1240 {
1241 int i;
1242
1243
1244 for (i = 0; i < isci_gpio_count(ihost); i++)
1245 writel(SGPIO_HW_CONTROL, &ihost->scu_registers->peg0.sgpio.output_data_select[i]);
1246
1247 set_bit(IHOST_STOP_PENDING, &ihost->flags);
1248
1249 spin_lock_irq(&ihost->scic_lock);
1250 sci_controller_stop(ihost, SCIC_CONTROLLER_STOP_TIMEOUT);
1251 spin_unlock_irq(&ihost->scic_lock);
1252
1253 wait_for_stop(ihost);
1254
1255
1256
1257
1258
1259
1260 sci_controller_stop_phys(ihost);
1261
1262
1263
1264
1265 writel(0, &ihost->scu_registers->peg0.sgpio.interface_control);
1266
1267 spin_lock_irq(&ihost->scic_lock);
1268 sci_controller_reset(ihost);
1269 spin_unlock_irq(&ihost->scic_lock);
1270
1271
1272 for (i = 0; i < ihost->logical_port_entries; i++) {
1273 struct isci_port *iport = &ihost->ports[i];
1274 del_timer_sync(&iport->timer.timer);
1275 }
1276
1277
1278 for (i = 0; i < SCI_MAX_PHYS; i++) {
1279 struct isci_phy *iphy = &ihost->phys[i];
1280 del_timer_sync(&iphy->sata_timer.timer);
1281 }
1282
1283 del_timer_sync(&ihost->port_agent.timer.timer);
1284
1285 del_timer_sync(&ihost->power_control.timer.timer);
1286
1287 del_timer_sync(&ihost->timer.timer);
1288
1289 del_timer_sync(&ihost->phy_timer.timer);
1290 }
1291
1292 static void __iomem *scu_base(struct isci_host *isci_host)
1293 {
1294 struct pci_dev *pdev = isci_host->pdev;
1295 int id = isci_host->id;
1296
1297 return pcim_iomap_table(pdev)[SCI_SCU_BAR * 2] + SCI_SCU_BAR_SIZE * id;
1298 }
1299
1300 static void __iomem *smu_base(struct isci_host *isci_host)
1301 {
1302 struct pci_dev *pdev = isci_host->pdev;
1303 int id = isci_host->id;
1304
1305 return pcim_iomap_table(pdev)[SCI_SMU_BAR * 2] + SCI_SMU_BAR_SIZE * id;
1306 }
1307
1308 static void sci_controller_initial_state_enter(struct sci_base_state_machine *sm)
1309 {
1310 struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
1311
1312 sci_change_state(&ihost->sm, SCIC_RESET);
1313 }
1314
1315 static inline void sci_controller_starting_state_exit(struct sci_base_state_machine *sm)
1316 {
1317 struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
1318
1319 sci_del_timer(&ihost->timer);
1320 }
1321
1322 #define INTERRUPT_COALESCE_TIMEOUT_BASE_RANGE_LOWER_BOUND_NS 853
1323 #define INTERRUPT_COALESCE_TIMEOUT_BASE_RANGE_UPPER_BOUND_NS 1280
1324 #define INTERRUPT_COALESCE_TIMEOUT_MAX_US 2700000
1325 #define INTERRUPT_COALESCE_NUMBER_MAX 256
1326 #define INTERRUPT_COALESCE_TIMEOUT_ENCODE_MIN 7
1327 #define INTERRUPT_COALESCE_TIMEOUT_ENCODE_MAX 28
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346 static enum sci_status
1347 sci_controller_set_interrupt_coalescence(struct isci_host *ihost,
1348 u32 coalesce_number,
1349 u32 coalesce_timeout)
1350 {
1351 u8 timeout_encode = 0;
1352 u32 min = 0;
1353 u32 max = 0;
1354
1355
1356 if (coalesce_number > INTERRUPT_COALESCE_NUMBER_MAX)
1357 return SCI_FAILURE_INVALID_PARAMETER_VALUE;
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397 if (coalesce_timeout == 0)
1398 timeout_encode = 0;
1399 else{
1400
1401 coalesce_timeout = coalesce_timeout * 100;
1402 min = INTERRUPT_COALESCE_TIMEOUT_BASE_RANGE_LOWER_BOUND_NS / 10;
1403 max = INTERRUPT_COALESCE_TIMEOUT_BASE_RANGE_UPPER_BOUND_NS / 10;
1404
1405
1406 for (timeout_encode = INTERRUPT_COALESCE_TIMEOUT_ENCODE_MIN;
1407 timeout_encode <= INTERRUPT_COALESCE_TIMEOUT_ENCODE_MAX;
1408 timeout_encode++) {
1409 if (min <= coalesce_timeout && max > coalesce_timeout)
1410 break;
1411 else if (coalesce_timeout >= max && coalesce_timeout < min * 2
1412 && coalesce_timeout <= INTERRUPT_COALESCE_TIMEOUT_MAX_US * 100) {
1413 if ((coalesce_timeout - max) < (2 * min - coalesce_timeout))
1414 break;
1415 else{
1416 timeout_encode++;
1417 break;
1418 }
1419 } else {
1420 max = max * 2;
1421 min = min * 2;
1422 }
1423 }
1424
1425 if (timeout_encode == INTERRUPT_COALESCE_TIMEOUT_ENCODE_MAX + 1)
1426
1427 return SCI_FAILURE_INVALID_PARAMETER_VALUE;
1428 }
1429
1430 writel(SMU_ICC_GEN_VAL(NUMBER, coalesce_number) |
1431 SMU_ICC_GEN_VAL(TIMER, timeout_encode),
1432 &ihost->smu_registers->interrupt_coalesce_control);
1433
1434
1435 ihost->interrupt_coalesce_number = (u16)coalesce_number;
1436 ihost->interrupt_coalesce_timeout = coalesce_timeout / 100;
1437
1438 return SCI_SUCCESS;
1439 }
1440
1441
1442 static void sci_controller_ready_state_enter(struct sci_base_state_machine *sm)
1443 {
1444 struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
1445 u32 val;
1446
1447
1448 val = readl(&ihost->smu_registers->clock_gating_control);
1449 val &= ~(SMU_CGUCR_GEN_BIT(REGCLK_ENABLE) |
1450 SMU_CGUCR_GEN_BIT(TXCLK_ENABLE) |
1451 SMU_CGUCR_GEN_BIT(XCLK_ENABLE));
1452 val |= SMU_CGUCR_GEN_BIT(IDLE_ENABLE);
1453 writel(val, &ihost->smu_registers->clock_gating_control);
1454
1455
1456 sci_controller_set_interrupt_coalescence(ihost, 0, 0);
1457 }
1458
1459 static void sci_controller_ready_state_exit(struct sci_base_state_machine *sm)
1460 {
1461 struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
1462
1463
1464 sci_controller_set_interrupt_coalescence(ihost, 0, 0);
1465 }
1466
1467 static enum sci_status sci_controller_stop_ports(struct isci_host *ihost)
1468 {
1469 u32 index;
1470 enum sci_status port_status;
1471 enum sci_status status = SCI_SUCCESS;
1472
1473 for (index = 0; index < ihost->logical_port_entries; index++) {
1474 struct isci_port *iport = &ihost->ports[index];
1475
1476 port_status = sci_port_stop(iport);
1477
1478 if ((port_status != SCI_SUCCESS) &&
1479 (port_status != SCI_FAILURE_INVALID_STATE)) {
1480 status = SCI_FAILURE;
1481
1482 dev_warn(&ihost->pdev->dev,
1483 "%s: Controller stop operation failed to "
1484 "stop port %d because of status %d.\n",
1485 __func__,
1486 iport->logical_port_index,
1487 port_status);
1488 }
1489 }
1490
1491 return status;
1492 }
1493
1494 static enum sci_status sci_controller_stop_devices(struct isci_host *ihost)
1495 {
1496 u32 index;
1497 enum sci_status status;
1498 enum sci_status device_status;
1499
1500 status = SCI_SUCCESS;
1501
1502 for (index = 0; index < ihost->remote_node_entries; index++) {
1503 if (ihost->device_table[index] != NULL) {
1504
1505 device_status = sci_remote_device_stop(ihost->device_table[index], 0);
1506
1507 if ((device_status != SCI_SUCCESS) &&
1508 (device_status != SCI_FAILURE_INVALID_STATE)) {
1509 dev_warn(&ihost->pdev->dev,
1510 "%s: Controller stop operation failed "
1511 "to stop device 0x%p because of "
1512 "status %d.\n",
1513 __func__,
1514 ihost->device_table[index], device_status);
1515 }
1516 }
1517 }
1518
1519 return status;
1520 }
1521
1522 static void sci_controller_stopping_state_enter(struct sci_base_state_machine *sm)
1523 {
1524 struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
1525
1526 sci_controller_stop_devices(ihost);
1527 sci_controller_stop_ports(ihost);
1528
1529 if (!sci_controller_has_remote_devices_stopping(ihost))
1530 isci_host_stop_complete(ihost);
1531 }
1532
1533 static void sci_controller_stopping_state_exit(struct sci_base_state_machine *sm)
1534 {
1535 struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
1536
1537 sci_del_timer(&ihost->timer);
1538 }
1539
1540 static void sci_controller_reset_hardware(struct isci_host *ihost)
1541 {
1542
1543 sci_controller_disable_interrupts(ihost);
1544
1545
1546 writel(0xFFFFFFFF, &ihost->smu_registers->soft_reset_control);
1547
1548
1549 udelay(1000);
1550
1551
1552 writel(0x00000000, &ihost->smu_registers->completion_queue_get);
1553
1554
1555 writel(0, &ihost->scu_registers->sdma.unsolicited_frame_get_pointer);
1556
1557
1558 writel(~SMU_INTERRUPT_STATUS_RESERVED_MASK, &ihost->smu_registers->interrupt_status);
1559 }
1560
1561 static void sci_controller_resetting_state_enter(struct sci_base_state_machine *sm)
1562 {
1563 struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
1564
1565 sci_controller_reset_hardware(ihost);
1566 sci_change_state(&ihost->sm, SCIC_RESET);
1567 }
1568
1569 static const struct sci_base_state sci_controller_state_table[] = {
1570 [SCIC_INITIAL] = {
1571 .enter_state = sci_controller_initial_state_enter,
1572 },
1573 [SCIC_RESET] = {},
1574 [SCIC_INITIALIZING] = {},
1575 [SCIC_INITIALIZED] = {},
1576 [SCIC_STARTING] = {
1577 .exit_state = sci_controller_starting_state_exit,
1578 },
1579 [SCIC_READY] = {
1580 .enter_state = sci_controller_ready_state_enter,
1581 .exit_state = sci_controller_ready_state_exit,
1582 },
1583 [SCIC_RESETTING] = {
1584 .enter_state = sci_controller_resetting_state_enter,
1585 },
1586 [SCIC_STOPPING] = {
1587 .enter_state = sci_controller_stopping_state_enter,
1588 .exit_state = sci_controller_stopping_state_exit,
1589 },
1590 [SCIC_FAILED] = {}
1591 };
1592
1593 static void controller_timeout(struct timer_list *t)
1594 {
1595 struct sci_timer *tmr = from_timer(tmr, t, timer);
1596 struct isci_host *ihost = container_of(tmr, typeof(*ihost), timer);
1597 struct sci_base_state_machine *sm = &ihost->sm;
1598 unsigned long flags;
1599
1600 spin_lock_irqsave(&ihost->scic_lock, flags);
1601
1602 if (tmr->cancel)
1603 goto done;
1604
1605 if (sm->current_state_id == SCIC_STARTING)
1606 sci_controller_transition_to_ready(ihost, SCI_FAILURE_TIMEOUT);
1607 else if (sm->current_state_id == SCIC_STOPPING) {
1608 sci_change_state(sm, SCIC_FAILED);
1609 isci_host_stop_complete(ihost);
1610 } else
1611 dev_err(&ihost->pdev->dev,
1612 "%s: Controller timer fired when controller was not "
1613 "in a state being timed.\n",
1614 __func__);
1615
1616 done:
1617 spin_unlock_irqrestore(&ihost->scic_lock, flags);
1618 }
1619
1620 static enum sci_status sci_controller_construct(struct isci_host *ihost,
1621 void __iomem *scu_base,
1622 void __iomem *smu_base)
1623 {
1624 u8 i;
1625
1626 sci_init_sm(&ihost->sm, sci_controller_state_table, SCIC_INITIAL);
1627
1628 ihost->scu_registers = scu_base;
1629 ihost->smu_registers = smu_base;
1630
1631 sci_port_configuration_agent_construct(&ihost->port_agent);
1632
1633
1634 for (i = 0; i < SCI_MAX_PORTS; i++)
1635 sci_port_construct(&ihost->ports[i], i, ihost);
1636 sci_port_construct(&ihost->ports[i], SCIC_SDS_DUMMY_PORT, ihost);
1637
1638
1639 for (i = 0; i < SCI_MAX_PHYS; i++) {
1640
1641 sci_phy_construct(&ihost->phys[i],
1642 &ihost->ports[SCI_MAX_PORTS], i);
1643 }
1644
1645 ihost->invalid_phy_mask = 0;
1646
1647 sci_init_timer(&ihost->timer, controller_timeout);
1648
1649 return sci_controller_reset(ihost);
1650 }
1651
1652 int sci_oem_parameters_validate(struct sci_oem_params *oem, u8 version)
1653 {
1654 int i;
1655
1656 for (i = 0; i < SCI_MAX_PORTS; i++)
1657 if (oem->ports[i].phy_mask > SCIC_SDS_PARM_PHY_MASK_MAX)
1658 return -EINVAL;
1659
1660 for (i = 0; i < SCI_MAX_PHYS; i++)
1661 if (oem->phys[i].sas_address.high == 0 &&
1662 oem->phys[i].sas_address.low == 0)
1663 return -EINVAL;
1664
1665 if (oem->controller.mode_type == SCIC_PORT_AUTOMATIC_CONFIGURATION_MODE) {
1666 for (i = 0; i < SCI_MAX_PHYS; i++)
1667 if (oem->ports[i].phy_mask != 0)
1668 return -EINVAL;
1669 } else if (oem->controller.mode_type == SCIC_PORT_MANUAL_CONFIGURATION_MODE) {
1670 u8 phy_mask = 0;
1671
1672 for (i = 0; i < SCI_MAX_PHYS; i++)
1673 phy_mask |= oem->ports[i].phy_mask;
1674
1675 if (phy_mask == 0)
1676 return -EINVAL;
1677 } else
1678 return -EINVAL;
1679
1680 if (oem->controller.max_concurr_spin_up > MAX_CONCURRENT_DEVICE_SPIN_UP_COUNT ||
1681 oem->controller.max_concurr_spin_up < 1)
1682 return -EINVAL;
1683
1684 if (oem->controller.do_enable_ssc) {
1685 if (version < ISCI_ROM_VER_1_1 && oem->controller.do_enable_ssc != 1)
1686 return -EINVAL;
1687
1688 if (version >= ISCI_ROM_VER_1_1) {
1689 u8 test = oem->controller.ssc_sata_tx_spread_level;
1690
1691 switch (test) {
1692 case 0:
1693 case 2:
1694 case 3:
1695 case 6:
1696 case 7:
1697 break;
1698 default:
1699 return -EINVAL;
1700 }
1701
1702 test = oem->controller.ssc_sas_tx_spread_level;
1703 if (oem->controller.ssc_sas_tx_type == 0) {
1704 switch (test) {
1705 case 0:
1706 case 2:
1707 case 3:
1708 break;
1709 default:
1710 return -EINVAL;
1711 }
1712 } else if (oem->controller.ssc_sas_tx_type == 1) {
1713 switch (test) {
1714 case 0:
1715 case 3:
1716 case 6:
1717 break;
1718 default:
1719 return -EINVAL;
1720 }
1721 }
1722 }
1723 }
1724
1725 return 0;
1726 }
1727
1728 static u8 max_spin_up(struct isci_host *ihost)
1729 {
1730 if (ihost->user_parameters.max_concurr_spinup)
1731 return min_t(u8, ihost->user_parameters.max_concurr_spinup,
1732 MAX_CONCURRENT_DEVICE_SPIN_UP_COUNT);
1733 else
1734 return min_t(u8, ihost->oem_parameters.controller.max_concurr_spin_up,
1735 MAX_CONCURRENT_DEVICE_SPIN_UP_COUNT);
1736 }
1737
1738 static void power_control_timeout(struct timer_list *t)
1739 {
1740 struct sci_timer *tmr = from_timer(tmr, t, timer);
1741 struct isci_host *ihost = container_of(tmr, typeof(*ihost), power_control.timer);
1742 struct isci_phy *iphy;
1743 unsigned long flags;
1744 u8 i;
1745
1746 spin_lock_irqsave(&ihost->scic_lock, flags);
1747
1748 if (tmr->cancel)
1749 goto done;
1750
1751 ihost->power_control.phys_granted_power = 0;
1752
1753 if (ihost->power_control.phys_waiting == 0) {
1754 ihost->power_control.timer_started = false;
1755 goto done;
1756 }
1757
1758 for (i = 0; i < SCI_MAX_PHYS; i++) {
1759
1760 if (ihost->power_control.phys_waiting == 0)
1761 break;
1762
1763 iphy = ihost->power_control.requesters[i];
1764 if (iphy == NULL)
1765 continue;
1766
1767 if (ihost->power_control.phys_granted_power >= max_spin_up(ihost))
1768 break;
1769
1770 ihost->power_control.requesters[i] = NULL;
1771 ihost->power_control.phys_waiting--;
1772 ihost->power_control.phys_granted_power++;
1773 sci_phy_consume_power_handler(iphy);
1774
1775 if (iphy->protocol == SAS_PROTOCOL_SSP) {
1776 u8 j;
1777
1778 for (j = 0; j < SCI_MAX_PHYS; j++) {
1779 struct isci_phy *requester = ihost->power_control.requesters[j];
1780
1781
1782
1783
1784
1785
1786 if (requester != NULL && requester != iphy) {
1787 u8 other = memcmp(requester->frame_rcvd.iaf.sas_addr,
1788 iphy->frame_rcvd.iaf.sas_addr,
1789 sizeof(requester->frame_rcvd.iaf.sas_addr));
1790
1791 if (other == 0) {
1792 ihost->power_control.requesters[j] = NULL;
1793 ihost->power_control.phys_waiting--;
1794 sci_phy_consume_power_handler(requester);
1795 }
1796 }
1797 }
1798 }
1799 }
1800
1801
1802
1803
1804
1805 sci_mod_timer(tmr, SCIC_SDS_CONTROLLER_POWER_CONTROL_INTERVAL);
1806 ihost->power_control.timer_started = true;
1807
1808 done:
1809 spin_unlock_irqrestore(&ihost->scic_lock, flags);
1810 }
1811
1812 void sci_controller_power_control_queue_insert(struct isci_host *ihost,
1813 struct isci_phy *iphy)
1814 {
1815 BUG_ON(iphy == NULL);
1816
1817 if (ihost->power_control.phys_granted_power < max_spin_up(ihost)) {
1818 ihost->power_control.phys_granted_power++;
1819 sci_phy_consume_power_handler(iphy);
1820
1821
1822
1823
1824
1825 if (ihost->power_control.timer_started)
1826 sci_del_timer(&ihost->power_control.timer);
1827
1828 sci_mod_timer(&ihost->power_control.timer,
1829 SCIC_SDS_CONTROLLER_POWER_CONTROL_INTERVAL);
1830 ihost->power_control.timer_started = true;
1831
1832 } else {
1833
1834
1835
1836
1837 u8 i;
1838 struct isci_phy *current_phy;
1839
1840 for (i = 0; i < SCI_MAX_PHYS; i++) {
1841 u8 other;
1842 current_phy = &ihost->phys[i];
1843
1844 other = memcmp(current_phy->frame_rcvd.iaf.sas_addr,
1845 iphy->frame_rcvd.iaf.sas_addr,
1846 sizeof(current_phy->frame_rcvd.iaf.sas_addr));
1847
1848 if (current_phy->sm.current_state_id == SCI_PHY_READY &&
1849 current_phy->protocol == SAS_PROTOCOL_SSP &&
1850 other == 0) {
1851 sci_phy_consume_power_handler(iphy);
1852 break;
1853 }
1854 }
1855
1856 if (i == SCI_MAX_PHYS) {
1857
1858 ihost->power_control.requesters[iphy->phy_index] = iphy;
1859 ihost->power_control.phys_waiting++;
1860 }
1861 }
1862 }
1863
1864 void sci_controller_power_control_queue_remove(struct isci_host *ihost,
1865 struct isci_phy *iphy)
1866 {
1867 BUG_ON(iphy == NULL);
1868
1869 if (ihost->power_control.requesters[iphy->phy_index])
1870 ihost->power_control.phys_waiting--;
1871
1872 ihost->power_control.requesters[iphy->phy_index] = NULL;
1873 }
1874
1875 static int is_long_cable(int phy, unsigned char selection_byte)
1876 {
1877 return !!(selection_byte & (1 << phy));
1878 }
1879
1880 static int is_medium_cable(int phy, unsigned char selection_byte)
1881 {
1882 return !!(selection_byte & (1 << (phy + 4)));
1883 }
1884
1885 static enum cable_selections decode_selection_byte(
1886 int phy,
1887 unsigned char selection_byte)
1888 {
1889 return ((selection_byte & (1 << phy)) ? 1 : 0)
1890 + (selection_byte & (1 << (phy + 4)) ? 2 : 0);
1891 }
1892
1893 static unsigned char *to_cable_select(struct isci_host *ihost)
1894 {
1895 if (is_cable_select_overridden())
1896 return ((unsigned char *)&cable_selection_override)
1897 + ihost->id;
1898 else
1899 return &ihost->oem_parameters.controller.cable_selection_mask;
1900 }
1901
1902 enum cable_selections decode_cable_selection(struct isci_host *ihost, int phy)
1903 {
1904 return decode_selection_byte(phy, *to_cable_select(ihost));
1905 }
1906
1907 char *lookup_cable_names(enum cable_selections selection)
1908 {
1909 static char *cable_names[] = {
1910 [short_cable] = "short",
1911 [long_cable] = "long",
1912 [medium_cable] = "medium",
1913 [undefined_cable] = "<undefined, assumed long>"
1914 };
1915 return (selection <= undefined_cable) ? cable_names[selection]
1916 : cable_names[undefined_cable];
1917 }
1918
1919 #define AFE_REGISTER_WRITE_DELAY 10
1920
1921 static void sci_controller_afe_initialization(struct isci_host *ihost)
1922 {
1923 struct scu_afe_registers __iomem *afe = &ihost->scu_registers->afe;
1924 const struct sci_oem_params *oem = &ihost->oem_parameters;
1925 struct pci_dev *pdev = ihost->pdev;
1926 u32 afe_status;
1927 u32 phy_id;
1928 unsigned char cable_selection_mask = *to_cable_select(ihost);
1929
1930
1931 writel(0x0081000f, &afe->afe_dfx_master_control0);
1932 udelay(AFE_REGISTER_WRITE_DELAY);
1933
1934 if (is_b0(pdev) || is_c0(pdev) || is_c1(pdev)) {
1935
1936
1937
1938 writel(0x0007FFFF, &afe->afe_pmsn_master_control2);
1939 udelay(AFE_REGISTER_WRITE_DELAY);
1940 }
1941
1942
1943 if (is_a2(pdev))
1944 writel(0x00005A00, &afe->afe_bias_control);
1945 else if (is_b0(pdev) || is_c0(pdev))
1946 writel(0x00005F00, &afe->afe_bias_control);
1947 else if (is_c1(pdev))
1948 writel(0x00005500, &afe->afe_bias_control);
1949
1950 udelay(AFE_REGISTER_WRITE_DELAY);
1951
1952
1953 if (is_a2(pdev))
1954 writel(0x80040908, &afe->afe_pll_control0);
1955 else if (is_b0(pdev) || is_c0(pdev))
1956 writel(0x80040A08, &afe->afe_pll_control0);
1957 else if (is_c1(pdev)) {
1958 writel(0x80000B08, &afe->afe_pll_control0);
1959 udelay(AFE_REGISTER_WRITE_DELAY);
1960 writel(0x00000B08, &afe->afe_pll_control0);
1961 udelay(AFE_REGISTER_WRITE_DELAY);
1962 writel(0x80000B08, &afe->afe_pll_control0);
1963 }
1964
1965 udelay(AFE_REGISTER_WRITE_DELAY);
1966
1967
1968 do {
1969 afe_status = readl(&afe->afe_common_block_status);
1970 udelay(AFE_REGISTER_WRITE_DELAY);
1971 } while ((afe_status & 0x00001000) == 0);
1972
1973 if (is_a2(pdev)) {
1974
1975
1976
1977 writel(0x7bcc96ad, &afe->afe_pmsn_master_control0);
1978 udelay(AFE_REGISTER_WRITE_DELAY);
1979 }
1980
1981 for (phy_id = 0; phy_id < SCI_MAX_PHYS; phy_id++) {
1982 struct scu_afe_transceiver __iomem *xcvr = &afe->scu_afe_xcvr[phy_id];
1983 const struct sci_phy_oem_params *oem_phy = &oem->phys[phy_id];
1984 int cable_length_long =
1985 is_long_cable(phy_id, cable_selection_mask);
1986 int cable_length_medium =
1987 is_medium_cable(phy_id, cable_selection_mask);
1988
1989 if (is_a2(pdev)) {
1990
1991
1992
1993 writel(0x00004512, &xcvr->afe_xcvr_control0);
1994 udelay(AFE_REGISTER_WRITE_DELAY);
1995
1996 writel(0x0050100F, &xcvr->afe_xcvr_control1);
1997 udelay(AFE_REGISTER_WRITE_DELAY);
1998 } else if (is_b0(pdev)) {
1999
2000 writel(0x00030000, &xcvr->afe_tx_ssc_control);
2001 udelay(AFE_REGISTER_WRITE_DELAY);
2002 } else if (is_c0(pdev)) {
2003
2004 writel(0x00010202, &xcvr->afe_tx_ssc_control);
2005 udelay(AFE_REGISTER_WRITE_DELAY);
2006
2007
2008
2009
2010 writel(0x00014500, &xcvr->afe_xcvr_control0);
2011 udelay(AFE_REGISTER_WRITE_DELAY);
2012 } else if (is_c1(pdev)) {
2013
2014 writel(0x00010202, &xcvr->afe_tx_ssc_control);
2015 udelay(AFE_REGISTER_WRITE_DELAY);
2016
2017
2018
2019
2020 writel(0x0001C500, &xcvr->afe_xcvr_control0);
2021 udelay(AFE_REGISTER_WRITE_DELAY);
2022 }
2023
2024
2025
2026
2027 if (is_a2(pdev))
2028 writel(0x000003F0, &xcvr->afe_channel_control);
2029 else if (is_b0(pdev)) {
2030 writel(0x000003D7, &xcvr->afe_channel_control);
2031 udelay(AFE_REGISTER_WRITE_DELAY);
2032
2033 writel(0x000003D4, &xcvr->afe_channel_control);
2034 } else if (is_c0(pdev)) {
2035 writel(0x000001E7, &xcvr->afe_channel_control);
2036 udelay(AFE_REGISTER_WRITE_DELAY);
2037
2038 writel(0x000001E4, &xcvr->afe_channel_control);
2039 } else if (is_c1(pdev)) {
2040 writel(cable_length_long ? 0x000002F7 : 0x000001F7,
2041 &xcvr->afe_channel_control);
2042 udelay(AFE_REGISTER_WRITE_DELAY);
2043
2044 writel(cable_length_long ? 0x000002F4 : 0x000001F4,
2045 &xcvr->afe_channel_control);
2046 }
2047 udelay(AFE_REGISTER_WRITE_DELAY);
2048
2049 if (is_a2(pdev)) {
2050
2051 writel(0x00040000, &xcvr->afe_tx_control);
2052 udelay(AFE_REGISTER_WRITE_DELAY);
2053 }
2054
2055 if (is_a2(pdev) || is_b0(pdev))
2056
2057
2058
2059
2060 writel(0x00004100, &xcvr->afe_xcvr_control0);
2061 else if (is_c0(pdev))
2062 writel(0x00014100, &xcvr->afe_xcvr_control0);
2063 else if (is_c1(pdev))
2064 writel(0x0001C100, &xcvr->afe_xcvr_control0);
2065 udelay(AFE_REGISTER_WRITE_DELAY);
2066
2067
2068 if (is_a2(pdev))
2069 writel(0x3F11103F, &xcvr->afe_rx_ssc_control0);
2070 else if (is_b0(pdev)) {
2071 writel(0x3F11103F, &xcvr->afe_rx_ssc_control0);
2072 udelay(AFE_REGISTER_WRITE_DELAY);
2073
2074 writel(0x00040000, &xcvr->afe_tx_control);
2075 } else if (is_c0(pdev)) {
2076 writel(0x01400C0F, &xcvr->afe_rx_ssc_control1);
2077 udelay(AFE_REGISTER_WRITE_DELAY);
2078
2079 writel(0x3F6F103F, &xcvr->afe_rx_ssc_control0);
2080 udelay(AFE_REGISTER_WRITE_DELAY);
2081
2082
2083 writel(0x00040000, &xcvr->afe_tx_control);
2084 } else if (is_c1(pdev)) {
2085 writel(cable_length_long ? 0x01500C0C :
2086 cable_length_medium ? 0x01400C0D : 0x02400C0D,
2087 &xcvr->afe_xcvr_control1);
2088 udelay(AFE_REGISTER_WRITE_DELAY);
2089
2090 writel(0x000003E0, &xcvr->afe_dfx_rx_control1);
2091 udelay(AFE_REGISTER_WRITE_DELAY);
2092
2093 writel(cable_length_long ? 0x33091C1F :
2094 cable_length_medium ? 0x3315181F : 0x2B17161F,
2095 &xcvr->afe_rx_ssc_control0);
2096 udelay(AFE_REGISTER_WRITE_DELAY);
2097
2098
2099 writel(0x00040000, &xcvr->afe_tx_control);
2100 }
2101
2102 udelay(AFE_REGISTER_WRITE_DELAY);
2103
2104 writel(oem_phy->afe_tx_amp_control0, &xcvr->afe_tx_amp_control0);
2105 udelay(AFE_REGISTER_WRITE_DELAY);
2106
2107 writel(oem_phy->afe_tx_amp_control1, &xcvr->afe_tx_amp_control1);
2108 udelay(AFE_REGISTER_WRITE_DELAY);
2109
2110 writel(oem_phy->afe_tx_amp_control2, &xcvr->afe_tx_amp_control2);
2111 udelay(AFE_REGISTER_WRITE_DELAY);
2112
2113 writel(oem_phy->afe_tx_amp_control3, &xcvr->afe_tx_amp_control3);
2114 udelay(AFE_REGISTER_WRITE_DELAY);
2115 }
2116
2117
2118 writel(0x00010f00, &afe->afe_dfx_master_control0);
2119 udelay(AFE_REGISTER_WRITE_DELAY);
2120 }
2121
2122 static void sci_controller_initialize_power_control(struct isci_host *ihost)
2123 {
2124 sci_init_timer(&ihost->power_control.timer, power_control_timeout);
2125
2126 memset(ihost->power_control.requesters, 0,
2127 sizeof(ihost->power_control.requesters));
2128
2129 ihost->power_control.phys_waiting = 0;
2130 ihost->power_control.phys_granted_power = 0;
2131 }
2132
2133 static enum sci_status sci_controller_initialize(struct isci_host *ihost)
2134 {
2135 struct sci_base_state_machine *sm = &ihost->sm;
2136 enum sci_status result = SCI_FAILURE;
2137 unsigned long i, state, val;
2138
2139 if (ihost->sm.current_state_id != SCIC_RESET) {
2140 dev_warn(&ihost->pdev->dev, "%s invalid state: %d\n",
2141 __func__, ihost->sm.current_state_id);
2142 return SCI_FAILURE_INVALID_STATE;
2143 }
2144
2145 sci_change_state(sm, SCIC_INITIALIZING);
2146
2147 sci_init_timer(&ihost->phy_timer, phy_startup_timeout);
2148
2149 ihost->next_phy_to_start = 0;
2150 ihost->phy_startup_timer_pending = false;
2151
2152 sci_controller_initialize_power_control(ihost);
2153
2154
2155
2156
2157
2158
2159 sci_controller_afe_initialization(ihost);
2160
2161
2162
2163 writel(0, &ihost->smu_registers->soft_reset_control);
2164
2165
2166
2167
2168 for (i = 100; i >= 1; i--) {
2169 u32 status;
2170
2171
2172 udelay(SCU_CONTEXT_RAM_INIT_STALL_TIME);
2173 status = readl(&ihost->smu_registers->control_status);
2174
2175 if ((status & SCU_RAM_INIT_COMPLETED) == SCU_RAM_INIT_COMPLETED)
2176 break;
2177 }
2178 if (i == 0)
2179 goto out;
2180
2181
2182
2183
2184 val = readl(&ihost->smu_registers->device_context_capacity);
2185
2186
2187 ihost->logical_port_entries = min(smu_max_ports(val), SCI_MAX_PORTS);
2188 ihost->task_context_entries = min(smu_max_task_contexts(val), SCI_MAX_IO_REQUESTS);
2189 ihost->remote_node_entries = min(smu_max_rncs(val), SCI_MAX_REMOTE_DEVICES);
2190
2191
2192
2193
2194
2195 for (i = 0; i < ihost->logical_port_entries; i++) {
2196 struct scu_port_task_scheduler_group_registers __iomem
2197 *ptsg = &ihost->scu_registers->peg0.ptsg;
2198
2199 writel(i, &ptsg->protocol_engine[i]);
2200 }
2201
2202
2203 val = readl(&ihost->scu_registers->sdma.pdma_configuration);
2204 val |= SCU_PDMACR_GEN_BIT(PCI_RELAXED_ORDERING_ENABLE);
2205 writel(val, &ihost->scu_registers->sdma.pdma_configuration);
2206
2207 val = readl(&ihost->scu_registers->sdma.cdma_configuration);
2208 val |= SCU_CDMACR_GEN_BIT(PCI_RELAXED_ORDERING_ENABLE);
2209 writel(val, &ihost->scu_registers->sdma.cdma_configuration);
2210
2211
2212
2213
2214
2215 for (i = 0; i < SCI_MAX_PHYS; i++) {
2216 result = sci_phy_initialize(&ihost->phys[i],
2217 &ihost->scu_registers->peg0.pe[i].tl,
2218 &ihost->scu_registers->peg0.pe[i].ll);
2219 if (result != SCI_SUCCESS)
2220 goto out;
2221 }
2222
2223 for (i = 0; i < ihost->logical_port_entries; i++) {
2224 struct isci_port *iport = &ihost->ports[i];
2225
2226 iport->port_task_scheduler_registers = &ihost->scu_registers->peg0.ptsg.port[i];
2227 iport->port_pe_configuration_register = &ihost->scu_registers->peg0.ptsg.protocol_engine[0];
2228 iport->viit_registers = &ihost->scu_registers->peg0.viit[i];
2229 }
2230
2231 result = sci_port_configuration_agent_initialize(ihost, &ihost->port_agent);
2232
2233 out:
2234
2235 if (result == SCI_SUCCESS)
2236 state = SCIC_INITIALIZED;
2237 else
2238 state = SCIC_FAILED;
2239 sci_change_state(sm, state);
2240
2241 return result;
2242 }
2243
2244 static int sci_controller_dma_alloc(struct isci_host *ihost)
2245 {
2246 struct device *dev = &ihost->pdev->dev;
2247 size_t size;
2248 int i;
2249
2250
2251 if (ihost->completion_queue)
2252 return 0;
2253
2254 size = SCU_MAX_COMPLETION_QUEUE_ENTRIES * sizeof(u32);
2255 ihost->completion_queue = dmam_alloc_coherent(dev, size, &ihost->cq_dma,
2256 GFP_KERNEL);
2257 if (!ihost->completion_queue)
2258 return -ENOMEM;
2259
2260 size = ihost->remote_node_entries * sizeof(union scu_remote_node_context);
2261 ihost->remote_node_context_table = dmam_alloc_coherent(dev, size, &ihost->rnc_dma,
2262 GFP_KERNEL);
2263
2264 if (!ihost->remote_node_context_table)
2265 return -ENOMEM;
2266
2267 size = ihost->task_context_entries * sizeof(struct scu_task_context),
2268 ihost->task_context_table = dmam_alloc_coherent(dev, size, &ihost->tc_dma,
2269 GFP_KERNEL);
2270 if (!ihost->task_context_table)
2271 return -ENOMEM;
2272
2273 size = SCI_UFI_TOTAL_SIZE;
2274 ihost->ufi_buf = dmam_alloc_coherent(dev, size, &ihost->ufi_dma, GFP_KERNEL);
2275 if (!ihost->ufi_buf)
2276 return -ENOMEM;
2277
2278 for (i = 0; i < SCI_MAX_IO_REQUESTS; i++) {
2279 struct isci_request *ireq;
2280 dma_addr_t dma;
2281
2282 ireq = dmam_alloc_coherent(dev, sizeof(*ireq), &dma, GFP_KERNEL);
2283 if (!ireq)
2284 return -ENOMEM;
2285
2286 ireq->tc = &ihost->task_context_table[i];
2287 ireq->owning_controller = ihost;
2288 ireq->request_daddr = dma;
2289 ireq->isci_host = ihost;
2290 ihost->reqs[i] = ireq;
2291 }
2292
2293 return 0;
2294 }
2295
2296 static int sci_controller_mem_init(struct isci_host *ihost)
2297 {
2298 int err = sci_controller_dma_alloc(ihost);
2299
2300 if (err)
2301 return err;
2302
2303 writel(lower_32_bits(ihost->cq_dma), &ihost->smu_registers->completion_queue_lower);
2304 writel(upper_32_bits(ihost->cq_dma), &ihost->smu_registers->completion_queue_upper);
2305
2306 writel(lower_32_bits(ihost->rnc_dma), &ihost->smu_registers->remote_node_context_lower);
2307 writel(upper_32_bits(ihost->rnc_dma), &ihost->smu_registers->remote_node_context_upper);
2308
2309 writel(lower_32_bits(ihost->tc_dma), &ihost->smu_registers->host_task_table_lower);
2310 writel(upper_32_bits(ihost->tc_dma), &ihost->smu_registers->host_task_table_upper);
2311
2312 sci_unsolicited_frame_control_construct(ihost);
2313
2314
2315
2316
2317
2318 writel(lower_32_bits(ihost->uf_control.headers.physical_address),
2319 &ihost->scu_registers->sdma.uf_header_base_address_lower);
2320 writel(upper_32_bits(ihost->uf_control.headers.physical_address),
2321 &ihost->scu_registers->sdma.uf_header_base_address_upper);
2322
2323 writel(lower_32_bits(ihost->uf_control.address_table.physical_address),
2324 &ihost->scu_registers->sdma.uf_address_table_lower);
2325 writel(upper_32_bits(ihost->uf_control.address_table.physical_address),
2326 &ihost->scu_registers->sdma.uf_address_table_upper);
2327
2328 return 0;
2329 }
2330
2331
2332
2333
2334
2335
2336
2337
2338
2339 int isci_host_init(struct isci_host *ihost)
2340 {
2341 int i, err;
2342 enum sci_status status;
2343
2344 spin_lock_irq(&ihost->scic_lock);
2345 status = sci_controller_construct(ihost, scu_base(ihost), smu_base(ihost));
2346 spin_unlock_irq(&ihost->scic_lock);
2347 if (status != SCI_SUCCESS) {
2348 dev_err(&ihost->pdev->dev,
2349 "%s: sci_controller_construct failed - status = %x\n",
2350 __func__,
2351 status);
2352 return -ENODEV;
2353 }
2354
2355 spin_lock_irq(&ihost->scic_lock);
2356 status = sci_controller_initialize(ihost);
2357 spin_unlock_irq(&ihost->scic_lock);
2358 if (status != SCI_SUCCESS) {
2359 dev_warn(&ihost->pdev->dev,
2360 "%s: sci_controller_initialize failed -"
2361 " status = 0x%x\n",
2362 __func__, status);
2363 return -ENODEV;
2364 }
2365
2366 err = sci_controller_mem_init(ihost);
2367 if (err)
2368 return err;
2369
2370
2371 writel(1, &ihost->scu_registers->peg0.sgpio.interface_control);
2372 for (i = 0; i < isci_gpio_count(ihost); i++)
2373 writel(SGPIO_HW_CONTROL, &ihost->scu_registers->peg0.sgpio.output_data_select[i]);
2374 writel(0, &ihost->scu_registers->peg0.sgpio.vendor_specific_code);
2375
2376 return 0;
2377 }
2378
2379 void sci_controller_link_up(struct isci_host *ihost, struct isci_port *iport,
2380 struct isci_phy *iphy)
2381 {
2382 switch (ihost->sm.current_state_id) {
2383 case SCIC_STARTING:
2384 sci_del_timer(&ihost->phy_timer);
2385 ihost->phy_startup_timer_pending = false;
2386 ihost->port_agent.link_up_handler(ihost, &ihost->port_agent,
2387 iport, iphy);
2388 sci_controller_start_next_phy(ihost);
2389 break;
2390 case SCIC_READY:
2391 ihost->port_agent.link_up_handler(ihost, &ihost->port_agent,
2392 iport, iphy);
2393 break;
2394 default:
2395 dev_dbg(&ihost->pdev->dev,
2396 "%s: SCIC Controller linkup event from phy %d in "
2397 "unexpected state %d\n", __func__, iphy->phy_index,
2398 ihost->sm.current_state_id);
2399 }
2400 }
2401
2402 void sci_controller_link_down(struct isci_host *ihost, struct isci_port *iport,
2403 struct isci_phy *iphy)
2404 {
2405 switch (ihost->sm.current_state_id) {
2406 case SCIC_STARTING:
2407 case SCIC_READY:
2408 ihost->port_agent.link_down_handler(ihost, &ihost->port_agent,
2409 iport, iphy);
2410 break;
2411 default:
2412 dev_dbg(&ihost->pdev->dev,
2413 "%s: SCIC Controller linkdown event from phy %d in "
2414 "unexpected state %d\n",
2415 __func__,
2416 iphy->phy_index,
2417 ihost->sm.current_state_id);
2418 }
2419 }
2420
2421 bool sci_controller_has_remote_devices_stopping(struct isci_host *ihost)
2422 {
2423 u32 index;
2424
2425 for (index = 0; index < ihost->remote_node_entries; index++) {
2426 if ((ihost->device_table[index] != NULL) &&
2427 (ihost->device_table[index]->sm.current_state_id == SCI_DEV_STOPPING))
2428 return true;
2429 }
2430
2431 return false;
2432 }
2433
2434 void sci_controller_remote_device_stopped(struct isci_host *ihost,
2435 struct isci_remote_device *idev)
2436 {
2437 if (ihost->sm.current_state_id != SCIC_STOPPING) {
2438 dev_dbg(&ihost->pdev->dev,
2439 "SCIC Controller 0x%p remote device stopped event "
2440 "from device 0x%p in unexpected state %d\n",
2441 ihost, idev,
2442 ihost->sm.current_state_id);
2443 return;
2444 }
2445
2446 if (!sci_controller_has_remote_devices_stopping(ihost))
2447 isci_host_stop_complete(ihost);
2448 }
2449
2450 void sci_controller_post_request(struct isci_host *ihost, u32 request)
2451 {
2452 dev_dbg(&ihost->pdev->dev, "%s[%d]: %#x\n",
2453 __func__, ihost->id, request);
2454
2455 writel(request, &ihost->smu_registers->post_context_port);
2456 }
2457
2458 struct isci_request *sci_request_by_tag(struct isci_host *ihost, u16 io_tag)
2459 {
2460 u16 task_index;
2461 u16 task_sequence;
2462
2463 task_index = ISCI_TAG_TCI(io_tag);
2464
2465 if (task_index < ihost->task_context_entries) {
2466 struct isci_request *ireq = ihost->reqs[task_index];
2467
2468 if (test_bit(IREQ_ACTIVE, &ireq->flags)) {
2469 task_sequence = ISCI_TAG_SEQ(io_tag);
2470
2471 if (task_sequence == ihost->io_request_sequence[task_index])
2472 return ireq;
2473 }
2474 }
2475
2476 return NULL;
2477 }
2478
2479
2480
2481
2482
2483
2484
2485
2486
2487
2488
2489
2490
2491
2492
2493
2494 enum sci_status sci_controller_allocate_remote_node_context(struct isci_host *ihost,
2495 struct isci_remote_device *idev,
2496 u16 *node_id)
2497 {
2498 u16 node_index;
2499 u32 remote_node_count = sci_remote_device_node_count(idev);
2500
2501 node_index = sci_remote_node_table_allocate_remote_node(
2502 &ihost->available_remote_nodes, remote_node_count
2503 );
2504
2505 if (node_index != SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) {
2506 ihost->device_table[node_index] = idev;
2507
2508 *node_id = node_index;
2509
2510 return SCI_SUCCESS;
2511 }
2512
2513 return SCI_FAILURE_INSUFFICIENT_RESOURCES;
2514 }
2515
2516 void sci_controller_free_remote_node_context(struct isci_host *ihost,
2517 struct isci_remote_device *idev,
2518 u16 node_id)
2519 {
2520 u32 remote_node_count = sci_remote_device_node_count(idev);
2521
2522 if (ihost->device_table[node_id] == idev) {
2523 ihost->device_table[node_id] = NULL;
2524
2525 sci_remote_node_table_release_remote_node_index(
2526 &ihost->available_remote_nodes, remote_node_count, node_id
2527 );
2528 }
2529 }
2530
2531 void sci_controller_copy_sata_response(void *response_buffer,
2532 void *frame_header,
2533 void *frame_buffer)
2534 {
2535
2536 memcpy(response_buffer, frame_header, sizeof(u32));
2537
2538 memcpy(response_buffer + sizeof(u32),
2539 frame_buffer,
2540 sizeof(struct dev_to_host_fis) - sizeof(u32));
2541 }
2542
2543 void sci_controller_release_frame(struct isci_host *ihost, u32 frame_index)
2544 {
2545 if (sci_unsolicited_frame_control_release_frame(&ihost->uf_control, frame_index))
2546 writel(ihost->uf_control.get,
2547 &ihost->scu_registers->sdma.unsolicited_frame_get_pointer);
2548 }
2549
2550 void isci_tci_free(struct isci_host *ihost, u16 tci)
2551 {
2552 u16 tail = ihost->tci_tail & (SCI_MAX_IO_REQUESTS-1);
2553
2554 ihost->tci_pool[tail] = tci;
2555 ihost->tci_tail = tail + 1;
2556 }
2557
2558 static u16 isci_tci_alloc(struct isci_host *ihost)
2559 {
2560 u16 head = ihost->tci_head & (SCI_MAX_IO_REQUESTS-1);
2561 u16 tci = ihost->tci_pool[head];
2562
2563 ihost->tci_head = head + 1;
2564 return tci;
2565 }
2566
2567 static u16 isci_tci_space(struct isci_host *ihost)
2568 {
2569 return CIRC_SPACE(ihost->tci_head, ihost->tci_tail, SCI_MAX_IO_REQUESTS);
2570 }
2571
2572 u16 isci_alloc_tag(struct isci_host *ihost)
2573 {
2574 if (isci_tci_space(ihost)) {
2575 u16 tci = isci_tci_alloc(ihost);
2576 u8 seq = ihost->io_request_sequence[tci];
2577
2578 return ISCI_TAG(seq, tci);
2579 }
2580
2581 return SCI_CONTROLLER_INVALID_IO_TAG;
2582 }
2583
2584 enum sci_status isci_free_tag(struct isci_host *ihost, u16 io_tag)
2585 {
2586 u16 tci = ISCI_TAG_TCI(io_tag);
2587 u16 seq = ISCI_TAG_SEQ(io_tag);
2588
2589
2590 if (isci_tci_active(ihost) == 0)
2591 return SCI_FAILURE_INVALID_IO_TAG;
2592
2593 if (seq == ihost->io_request_sequence[tci]) {
2594 ihost->io_request_sequence[tci] = (seq+1) & (SCI_MAX_SEQ-1);
2595
2596 isci_tci_free(ihost, tci);
2597
2598 return SCI_SUCCESS;
2599 }
2600 return SCI_FAILURE_INVALID_IO_TAG;
2601 }
2602
2603 enum sci_status sci_controller_start_io(struct isci_host *ihost,
2604 struct isci_remote_device *idev,
2605 struct isci_request *ireq)
2606 {
2607 enum sci_status status;
2608
2609 if (ihost->sm.current_state_id != SCIC_READY) {
2610 dev_warn(&ihost->pdev->dev, "%s invalid state: %d\n",
2611 __func__, ihost->sm.current_state_id);
2612 return SCI_FAILURE_INVALID_STATE;
2613 }
2614
2615 status = sci_remote_device_start_io(ihost, idev, ireq);
2616 if (status != SCI_SUCCESS)
2617 return status;
2618
2619 set_bit(IREQ_ACTIVE, &ireq->flags);
2620 sci_controller_post_request(ihost, ireq->post_context);
2621 return SCI_SUCCESS;
2622 }
2623
2624 enum sci_status sci_controller_terminate_request(struct isci_host *ihost,
2625 struct isci_remote_device *idev,
2626 struct isci_request *ireq)
2627 {
2628
2629
2630
2631
2632 enum sci_status status;
2633
2634 if (ihost->sm.current_state_id != SCIC_READY) {
2635 dev_warn(&ihost->pdev->dev, "%s invalid state: %d\n",
2636 __func__, ihost->sm.current_state_id);
2637 return SCI_FAILURE_INVALID_STATE;
2638 }
2639 status = sci_io_request_terminate(ireq);
2640
2641 dev_dbg(&ihost->pdev->dev, "%s: status=%d; ireq=%p; flags=%lx\n",
2642 __func__, status, ireq, ireq->flags);
2643
2644 if ((status == SCI_SUCCESS) &&
2645 !test_bit(IREQ_PENDING_ABORT, &ireq->flags) &&
2646 !test_and_set_bit(IREQ_TC_ABORT_POSTED, &ireq->flags)) {
2647
2648
2649
2650 sci_controller_post_request(
2651 ihost, ireq->post_context |
2652 SCU_CONTEXT_COMMAND_REQUEST_POST_TC_ABORT);
2653 }
2654 return status;
2655 }
2656
2657
2658
2659
2660
2661
2662
2663
2664
2665
2666
2667
2668 enum sci_status sci_controller_complete_io(struct isci_host *ihost,
2669 struct isci_remote_device *idev,
2670 struct isci_request *ireq)
2671 {
2672 enum sci_status status;
2673
2674 switch (ihost->sm.current_state_id) {
2675 case SCIC_STOPPING:
2676
2677 return SCI_FAILURE;
2678 case SCIC_READY:
2679 status = sci_remote_device_complete_io(ihost, idev, ireq);
2680 if (status != SCI_SUCCESS)
2681 return status;
2682
2683 clear_bit(IREQ_ACTIVE, &ireq->flags);
2684 return SCI_SUCCESS;
2685 default:
2686 dev_warn(&ihost->pdev->dev, "%s invalid state: %d\n",
2687 __func__, ihost->sm.current_state_id);
2688 return SCI_FAILURE_INVALID_STATE;
2689 }
2690
2691 }
2692
2693 enum sci_status sci_controller_continue_io(struct isci_request *ireq)
2694 {
2695 struct isci_host *ihost = ireq->owning_controller;
2696
2697 if (ihost->sm.current_state_id != SCIC_READY) {
2698 dev_warn(&ihost->pdev->dev, "%s invalid state: %d\n",
2699 __func__, ihost->sm.current_state_id);
2700 return SCI_FAILURE_INVALID_STATE;
2701 }
2702
2703 set_bit(IREQ_ACTIVE, &ireq->flags);
2704 sci_controller_post_request(ihost, ireq->post_context);
2705 return SCI_SUCCESS;
2706 }
2707
2708
2709
2710
2711
2712
2713
2714
2715
2716
2717 enum sci_status sci_controller_start_task(struct isci_host *ihost,
2718 struct isci_remote_device *idev,
2719 struct isci_request *ireq)
2720 {
2721 enum sci_status status;
2722
2723 if (ihost->sm.current_state_id != SCIC_READY) {
2724 dev_warn(&ihost->pdev->dev,
2725 "%s: SCIC Controller starting task from invalid "
2726 "state\n",
2727 __func__);
2728 return SCI_FAILURE_INVALID_STATE;
2729 }
2730
2731 status = sci_remote_device_start_task(ihost, idev, ireq);
2732 switch (status) {
2733 case SCI_FAILURE_RESET_DEVICE_PARTIAL_SUCCESS:
2734 set_bit(IREQ_ACTIVE, &ireq->flags);
2735
2736
2737
2738
2739
2740
2741 return SCI_SUCCESS;
2742 case SCI_SUCCESS:
2743 set_bit(IREQ_ACTIVE, &ireq->flags);
2744 sci_controller_post_request(ihost, ireq->post_context);
2745 break;
2746 default:
2747 break;
2748 }
2749
2750 return status;
2751 }
2752
2753 static int sci_write_gpio_tx_gp(struct isci_host *ihost, u8 reg_index, u8 reg_count, u8 *write_data)
2754 {
2755 int d;
2756
2757
2758 if (reg_index == 0)
2759 return -EINVAL;
2760
2761 for (d = 0; d < isci_gpio_count(ihost); d++) {
2762 u32 val = 0x444;
2763 int i;
2764
2765 for (i = 0; i < 3; i++) {
2766 int bit;
2767
2768 bit = try_test_sas_gpio_gp_bit(to_sas_gpio_od(d, i),
2769 write_data, reg_index,
2770 reg_count);
2771 if (bit < 0)
2772 break;
2773
2774
2775 val &= ~(bit << ((i << 2) + 2));
2776 }
2777
2778 if (i < 3)
2779 break;
2780 writel(val, &ihost->scu_registers->peg0.sgpio.output_data_select[d]);
2781 }
2782
2783
2784
2785
2786 return d > 0;
2787 }
2788
2789 int isci_gpio_write(struct sas_ha_struct *sas_ha, u8 reg_type, u8 reg_index,
2790 u8 reg_count, u8 *write_data)
2791 {
2792 struct isci_host *ihost = sas_ha->lldd_ha;
2793 int written;
2794
2795 switch (reg_type) {
2796 case SAS_GPIO_REG_TX_GP:
2797 written = sci_write_gpio_tx_gp(ihost, reg_index, reg_count, write_data);
2798 break;
2799 default:
2800 written = -EINVAL;
2801 }
2802
2803 return written;
2804 }