0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037
0038
0039
0040
0041
0042
0043
0044
0045
0046
0047
0048
0049
0050
0051
0052
0053
0054
0055
0056 #include "isci.h"
0057 #include "port.h"
0058 #include "request.h"
0059
0060 #define SCIC_SDS_PORT_HARD_RESET_TIMEOUT (1000)
0061 #define SCU_DUMMY_INDEX (0xFFFF)
0062
0063 #undef C
0064 #define C(a) (#a)
0065 static const char *port_state_name(enum sci_port_states state)
0066 {
0067 static const char * const strings[] = PORT_STATES;
0068
0069 return strings[state];
0070 }
0071 #undef C
0072
0073 static struct device *sciport_to_dev(struct isci_port *iport)
0074 {
0075 int i = iport->physical_port_index;
0076 struct isci_port *table;
0077 struct isci_host *ihost;
0078
0079 if (i == SCIC_SDS_DUMMY_PORT)
0080 i = SCI_MAX_PORTS+1;
0081
0082 table = iport - i;
0083 ihost = container_of(table, typeof(*ihost), ports[0]);
0084
0085 return &ihost->pdev->dev;
0086 }
0087
0088 static void sci_port_get_protocols(struct isci_port *iport, struct sci_phy_proto *proto)
0089 {
0090 u8 index;
0091
0092 proto->all = 0;
0093 for (index = 0; index < SCI_MAX_PHYS; index++) {
0094 struct isci_phy *iphy = iport->phy_table[index];
0095
0096 if (!iphy)
0097 continue;
0098 sci_phy_get_protocols(iphy, proto);
0099 }
0100 }
0101
0102 static u32 sci_port_get_phys(struct isci_port *iport)
0103 {
0104 u32 index;
0105 u32 mask;
0106
0107 mask = 0;
0108 for (index = 0; index < SCI_MAX_PHYS; index++)
0109 if (iport->phy_table[index])
0110 mask |= (1 << index);
0111
0112 return mask;
0113 }
0114
0115
0116
0117
0118
0119
0120
0121
0122
0123
0124
0125
0126
0127
0128 enum sci_status sci_port_get_properties(struct isci_port *iport,
0129 struct sci_port_properties *prop)
0130 {
0131 if (!iport || iport->logical_port_index == SCIC_SDS_DUMMY_PORT)
0132 return SCI_FAILURE_INVALID_PORT;
0133
0134 prop->index = iport->logical_port_index;
0135 prop->phy_mask = sci_port_get_phys(iport);
0136 sci_port_get_sas_address(iport, &prop->local.sas_address);
0137 sci_port_get_protocols(iport, &prop->local.protocols);
0138 sci_port_get_attached_sas_address(iport, &prop->remote.sas_address);
0139
0140 return SCI_SUCCESS;
0141 }
0142
0143 static void sci_port_bcn_enable(struct isci_port *iport)
0144 {
0145 struct isci_phy *iphy;
0146 u32 val;
0147 int i;
0148
0149 for (i = 0; i < ARRAY_SIZE(iport->phy_table); i++) {
0150 iphy = iport->phy_table[i];
0151 if (!iphy)
0152 continue;
0153 val = readl(&iphy->link_layer_registers->link_layer_control);
0154
0155 writel(val, &iphy->link_layer_registers->link_layer_control);
0156 }
0157 }
0158
0159 static void isci_port_bc_change_received(struct isci_host *ihost,
0160 struct isci_port *iport,
0161 struct isci_phy *iphy)
0162 {
0163 dev_dbg(&ihost->pdev->dev,
0164 "%s: isci_phy = %p, sas_phy = %p\n",
0165 __func__, iphy, &iphy->sas_phy);
0166
0167 sas_notify_port_event(&iphy->sas_phy,
0168 PORTE_BROADCAST_RCVD, GFP_ATOMIC);
0169 sci_port_bcn_enable(iport);
0170 }
0171
0172 static void isci_port_link_up(struct isci_host *isci_host,
0173 struct isci_port *iport,
0174 struct isci_phy *iphy)
0175 {
0176 unsigned long flags;
0177 struct sci_port_properties properties;
0178 unsigned long success = true;
0179
0180 dev_dbg(&isci_host->pdev->dev,
0181 "%s: isci_port = %p\n",
0182 __func__, iport);
0183
0184 spin_lock_irqsave(&iphy->sas_phy.frame_rcvd_lock, flags);
0185
0186 sci_port_get_properties(iport, &properties);
0187
0188 if (iphy->protocol == SAS_PROTOCOL_SATA) {
0189 u64 attached_sas_address;
0190
0191 iphy->sas_phy.oob_mode = SATA_OOB_MODE;
0192 iphy->sas_phy.frame_rcvd_size = sizeof(struct dev_to_host_fis);
0193
0194
0195
0196
0197
0198
0199
0200
0201 attached_sas_address = properties.remote.sas_address.high;
0202 attached_sas_address <<= 32;
0203 attached_sas_address |= properties.remote.sas_address.low;
0204 swab64s(&attached_sas_address);
0205
0206 memcpy(&iphy->sas_phy.attached_sas_addr,
0207 &attached_sas_address, sizeof(attached_sas_address));
0208 } else if (iphy->protocol == SAS_PROTOCOL_SSP) {
0209 iphy->sas_phy.oob_mode = SAS_OOB_MODE;
0210 iphy->sas_phy.frame_rcvd_size = sizeof(struct sas_identify_frame);
0211
0212
0213 memcpy(iphy->sas_phy.attached_sas_addr,
0214 iphy->frame_rcvd.iaf.sas_addr, SAS_ADDR_SIZE);
0215 } else {
0216 dev_err(&isci_host->pdev->dev, "%s: unknown target\n", __func__);
0217 success = false;
0218 }
0219
0220 iphy->sas_phy.phy->negotiated_linkrate = sci_phy_linkrate(iphy);
0221
0222 spin_unlock_irqrestore(&iphy->sas_phy.frame_rcvd_lock, flags);
0223
0224
0225
0226 if (success)
0227 sas_notify_port_event(&iphy->sas_phy,
0228 PORTE_BYTES_DMAED, GFP_ATOMIC);
0229 }
0230
0231
0232
0233
0234
0235
0236
0237
0238
0239
0240 static void isci_port_link_down(struct isci_host *isci_host,
0241 struct isci_phy *isci_phy,
0242 struct isci_port *isci_port)
0243 {
0244 struct isci_remote_device *isci_device;
0245
0246 dev_dbg(&isci_host->pdev->dev,
0247 "%s: isci_port = %p\n", __func__, isci_port);
0248
0249 if (isci_port) {
0250
0251
0252 if (isci_phy->sas_phy.port &&
0253 isci_phy->sas_phy.port->num_phys == 1) {
0254
0255
0256
0257
0258
0259 list_for_each_entry(isci_device,
0260 &isci_port->remote_dev_list,
0261 node) {
0262 dev_dbg(&isci_host->pdev->dev,
0263 "%s: isci_device = %p\n",
0264 __func__, isci_device);
0265 set_bit(IDEV_GONE, &isci_device->flags);
0266 }
0267 }
0268 }
0269
0270
0271
0272
0273 sas_phy_disconnected(&isci_phy->sas_phy);
0274 sas_notify_phy_event(&isci_phy->sas_phy,
0275 PHYE_LOSS_OF_SIGNAL, GFP_ATOMIC);
0276
0277 dev_dbg(&isci_host->pdev->dev,
0278 "%s: isci_port = %p - Done\n", __func__, isci_port);
0279 }
0280
0281 static bool is_port_ready_state(enum sci_port_states state)
0282 {
0283 switch (state) {
0284 case SCI_PORT_READY:
0285 case SCI_PORT_SUB_WAITING:
0286 case SCI_PORT_SUB_OPERATIONAL:
0287 case SCI_PORT_SUB_CONFIGURING:
0288 return true;
0289 default:
0290 return false;
0291 }
0292 }
0293
0294
0295 static void port_state_machine_change(struct isci_port *iport,
0296 enum sci_port_states state)
0297 {
0298 struct sci_base_state_machine *sm = &iport->sm;
0299 enum sci_port_states old_state = sm->current_state_id;
0300
0301 if (is_port_ready_state(old_state) && !is_port_ready_state(state))
0302 iport->ready_exit = true;
0303
0304 sci_change_state(sm, state);
0305 iport->ready_exit = false;
0306 }
0307
0308
0309
0310
0311
0312
0313
0314
0315
0316 static void isci_port_hard_reset_complete(struct isci_port *isci_port,
0317 enum sci_status completion_status)
0318 {
0319 struct isci_host *ihost = isci_port->owning_controller;
0320
0321 dev_dbg(&ihost->pdev->dev,
0322 "%s: isci_port = %p, completion_status=%x\n",
0323 __func__, isci_port, completion_status);
0324
0325
0326 isci_port->hard_reset_status = completion_status;
0327
0328 if (completion_status != SCI_SUCCESS) {
0329
0330
0331 if (isci_port->active_phy_mask == 0) {
0332 int phy_idx = isci_port->last_active_phy;
0333 struct isci_phy *iphy = &ihost->phys[phy_idx];
0334
0335
0336
0337
0338
0339 isci_port_link_down(ihost, iphy, isci_port);
0340 }
0341
0342
0343
0344 port_state_machine_change(isci_port, SCI_PORT_SUB_WAITING);
0345
0346 }
0347 clear_bit(IPORT_RESET_PENDING, &isci_port->state);
0348 wake_up(&ihost->eventq);
0349
0350 }
0351
0352
0353
0354
0355
0356
0357
0358
0359
0360
0361
0362
0363 bool sci_port_is_valid_phy_assignment(struct isci_port *iport, u32 phy_index)
0364 {
0365 struct isci_host *ihost = iport->owning_controller;
0366 struct sci_user_parameters *user = &ihost->user_parameters;
0367
0368
0369 u32 existing_phy_index = SCI_MAX_PHYS;
0370 u32 index;
0371
0372 if ((iport->physical_port_index == 1) && (phy_index != 1))
0373 return false;
0374
0375 if (iport->physical_port_index == 3 && phy_index != 3)
0376 return false;
0377
0378 if (iport->physical_port_index == 2 &&
0379 (phy_index == 0 || phy_index == 1))
0380 return false;
0381
0382 for (index = 0; index < SCI_MAX_PHYS; index++)
0383 if (iport->phy_table[index] && index != phy_index)
0384 existing_phy_index = index;
0385
0386
0387
0388
0389 if (existing_phy_index < SCI_MAX_PHYS &&
0390 user->phys[phy_index].max_speed_generation !=
0391 user->phys[existing_phy_index].max_speed_generation)
0392 return false;
0393
0394 return true;
0395 }
0396
0397
0398
0399
0400
0401
0402
0403
0404
0405
0406
0407
0408
0409
0410 static bool sci_port_is_phy_mask_valid(
0411 struct isci_port *iport,
0412 u32 phy_mask)
0413 {
0414 if (iport->physical_port_index == 0) {
0415 if (((phy_mask & 0x0F) == 0x0F)
0416 || ((phy_mask & 0x03) == 0x03)
0417 || ((phy_mask & 0x01) == 0x01)
0418 || (phy_mask == 0))
0419 return true;
0420 } else if (iport->physical_port_index == 1) {
0421 if (((phy_mask & 0x02) == 0x02)
0422 || (phy_mask == 0))
0423 return true;
0424 } else if (iport->physical_port_index == 2) {
0425 if (((phy_mask & 0x0C) == 0x0C)
0426 || ((phy_mask & 0x04) == 0x04)
0427 || (phy_mask == 0))
0428 return true;
0429 } else if (iport->physical_port_index == 3) {
0430 if (((phy_mask & 0x08) == 0x08)
0431 || (phy_mask == 0))
0432 return true;
0433 }
0434
0435 return false;
0436 }
0437
0438
0439
0440
0441
0442
0443
0444
0445
0446 static struct isci_phy *sci_port_get_a_connected_phy(struct isci_port *iport)
0447 {
0448 u32 index;
0449 struct isci_phy *iphy;
0450
0451 for (index = 0; index < SCI_MAX_PHYS; index++) {
0452
0453
0454
0455 iphy = iport->phy_table[index];
0456 if (iphy && sci_port_active_phy(iport, iphy))
0457 return iphy;
0458 }
0459
0460 return NULL;
0461 }
0462
0463 static enum sci_status sci_port_set_phy(struct isci_port *iport, struct isci_phy *iphy)
0464 {
0465
0466
0467
0468
0469 if (!iport->phy_table[iphy->phy_index] &&
0470 !phy_get_non_dummy_port(iphy) &&
0471 sci_port_is_valid_phy_assignment(iport, iphy->phy_index)) {
0472
0473
0474
0475 iport->logical_port_index = iport->physical_port_index;
0476 iport->phy_table[iphy->phy_index] = iphy;
0477 sci_phy_set_port(iphy, iport);
0478
0479 return SCI_SUCCESS;
0480 }
0481
0482 return SCI_FAILURE;
0483 }
0484
0485 static enum sci_status sci_port_clear_phy(struct isci_port *iport, struct isci_phy *iphy)
0486 {
0487
0488 if (iport->phy_table[iphy->phy_index] == iphy &&
0489 phy_get_non_dummy_port(iphy) == iport) {
0490 struct isci_host *ihost = iport->owning_controller;
0491
0492
0493 sci_phy_set_port(iphy, &ihost->ports[SCI_MAX_PORTS]);
0494 iport->phy_table[iphy->phy_index] = NULL;
0495 return SCI_SUCCESS;
0496 }
0497
0498 return SCI_FAILURE;
0499 }
0500
0501 void sci_port_get_sas_address(struct isci_port *iport, struct sci_sas_address *sas)
0502 {
0503 u32 index;
0504
0505 sas->high = 0;
0506 sas->low = 0;
0507 for (index = 0; index < SCI_MAX_PHYS; index++)
0508 if (iport->phy_table[index])
0509 sci_phy_get_sas_address(iport->phy_table[index], sas);
0510 }
0511
0512 void sci_port_get_attached_sas_address(struct isci_port *iport, struct sci_sas_address *sas)
0513 {
0514 struct isci_phy *iphy;
0515
0516
0517
0518
0519
0520 iphy = sci_port_get_a_connected_phy(iport);
0521 if (iphy) {
0522 if (iphy->protocol != SAS_PROTOCOL_SATA) {
0523 sci_phy_get_attached_sas_address(iphy, sas);
0524 } else {
0525 sci_phy_get_sas_address(iphy, sas);
0526 sas->low += iphy->phy_index;
0527 }
0528 } else {
0529 sas->high = 0;
0530 sas->low = 0;
0531 }
0532 }
0533
0534
0535
0536
0537
0538
0539
0540
0541
0542
0543
0544 static void sci_port_construct_dummy_rnc(struct isci_port *iport, u16 rni)
0545 {
0546 union scu_remote_node_context *rnc;
0547
0548 rnc = &iport->owning_controller->remote_node_context_table[rni];
0549
0550 memset(rnc, 0, sizeof(union scu_remote_node_context));
0551
0552 rnc->ssp.remote_sas_address_hi = 0;
0553 rnc->ssp.remote_sas_address_lo = 0;
0554
0555 rnc->ssp.remote_node_index = rni;
0556 rnc->ssp.remote_node_port_width = 1;
0557 rnc->ssp.logical_port_index = iport->physical_port_index;
0558
0559 rnc->ssp.nexus_loss_timer_enable = false;
0560 rnc->ssp.check_bit = false;
0561 rnc->ssp.is_valid = true;
0562 rnc->ssp.is_remote_node_context = true;
0563 rnc->ssp.function_number = 0;
0564 rnc->ssp.arbitration_wait_time = 0;
0565 }
0566
0567
0568
0569
0570
0571
0572 static void sci_port_construct_dummy_task(struct isci_port *iport, u16 tag)
0573 {
0574 struct isci_host *ihost = iport->owning_controller;
0575 struct scu_task_context *task_context;
0576
0577 task_context = &ihost->task_context_table[ISCI_TAG_TCI(tag)];
0578 memset(task_context, 0, sizeof(struct scu_task_context));
0579
0580 task_context->initiator_request = 1;
0581 task_context->connection_rate = 1;
0582 task_context->logical_port_index = iport->physical_port_index;
0583 task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_SSP;
0584 task_context->task_index = ISCI_TAG_TCI(tag);
0585 task_context->valid = SCU_TASK_CONTEXT_VALID;
0586 task_context->context_type = SCU_TASK_CONTEXT_TYPE;
0587 task_context->remote_node_index = iport->reserved_rni;
0588 task_context->do_not_dma_ssp_good_response = 1;
0589 task_context->task_phase = 0x01;
0590 }
0591
0592 static void sci_port_destroy_dummy_resources(struct isci_port *iport)
0593 {
0594 struct isci_host *ihost = iport->owning_controller;
0595
0596 if (iport->reserved_tag != SCI_CONTROLLER_INVALID_IO_TAG)
0597 isci_free_tag(ihost, iport->reserved_tag);
0598
0599 if (iport->reserved_rni != SCU_DUMMY_INDEX)
0600 sci_remote_node_table_release_remote_node_index(&ihost->available_remote_nodes,
0601 1, iport->reserved_rni);
0602
0603 iport->reserved_rni = SCU_DUMMY_INDEX;
0604 iport->reserved_tag = SCI_CONTROLLER_INVALID_IO_TAG;
0605 }
0606
0607 void sci_port_setup_transports(struct isci_port *iport, u32 device_id)
0608 {
0609 u8 index;
0610
0611 for (index = 0; index < SCI_MAX_PHYS; index++) {
0612 if (iport->active_phy_mask & (1 << index))
0613 sci_phy_setup_transport(iport->phy_table[index], device_id);
0614 }
0615 }
0616
0617 static void sci_port_resume_phy(struct isci_port *iport, struct isci_phy *iphy)
0618 {
0619 sci_phy_resume(iphy);
0620 iport->enabled_phy_mask |= 1 << iphy->phy_index;
0621 }
0622
0623 static void sci_port_activate_phy(struct isci_port *iport,
0624 struct isci_phy *iphy,
0625 u8 flags)
0626 {
0627 struct isci_host *ihost = iport->owning_controller;
0628
0629 if (iphy->protocol != SAS_PROTOCOL_SATA && (flags & PF_RESUME))
0630 sci_phy_resume(iphy);
0631
0632 iport->active_phy_mask |= 1 << iphy->phy_index;
0633
0634 sci_controller_clear_invalid_phy(ihost, iphy);
0635
0636 if (flags & PF_NOTIFY)
0637 isci_port_link_up(ihost, iport, iphy);
0638 }
0639
0640 void sci_port_deactivate_phy(struct isci_port *iport, struct isci_phy *iphy,
0641 bool do_notify_user)
0642 {
0643 struct isci_host *ihost = iport->owning_controller;
0644
0645 iport->active_phy_mask &= ~(1 << iphy->phy_index);
0646 iport->enabled_phy_mask &= ~(1 << iphy->phy_index);
0647 if (!iport->active_phy_mask)
0648 iport->last_active_phy = iphy->phy_index;
0649
0650 iphy->max_negotiated_speed = SAS_LINK_RATE_UNKNOWN;
0651
0652
0653
0654
0655 if (iport->owning_controller->oem_parameters.controller.mode_type ==
0656 SCIC_PORT_AUTOMATIC_CONFIGURATION_MODE)
0657 writel(iphy->phy_index,
0658 &iport->port_pe_configuration_register[iphy->phy_index]);
0659
0660 if (do_notify_user == true)
0661 isci_port_link_down(ihost, iphy, iport);
0662 }
0663
0664 static void sci_port_invalid_link_up(struct isci_port *iport, struct isci_phy *iphy)
0665 {
0666 struct isci_host *ihost = iport->owning_controller;
0667
0668
0669
0670
0671
0672
0673 if ((ihost->invalid_phy_mask & (1 << iphy->phy_index)) == 0) {
0674 ihost->invalid_phy_mask |= 1 << iphy->phy_index;
0675 dev_warn(&ihost->pdev->dev, "Invalid link up!\n");
0676 }
0677 }
0678
0679
0680
0681
0682
0683
0684
0685
0686
0687
0688
0689
0690 static void sci_port_general_link_up_handler(struct isci_port *iport,
0691 struct isci_phy *iphy,
0692 u8 flags)
0693 {
0694 struct sci_sas_address port_sas_address;
0695 struct sci_sas_address phy_sas_address;
0696
0697 sci_port_get_attached_sas_address(iport, &port_sas_address);
0698 sci_phy_get_attached_sas_address(iphy, &phy_sas_address);
0699
0700
0701
0702
0703
0704
0705 if ((phy_sas_address.high == port_sas_address.high &&
0706 phy_sas_address.low == port_sas_address.low) ||
0707 iport->active_phy_mask == 0) {
0708 struct sci_base_state_machine *sm = &iport->sm;
0709
0710 sci_port_activate_phy(iport, iphy, flags);
0711 if (sm->current_state_id == SCI_PORT_RESETTING)
0712 port_state_machine_change(iport, SCI_PORT_READY);
0713 } else
0714 sci_port_invalid_link_up(iport, iphy);
0715 }
0716
0717
0718
0719
0720
0721
0722
0723
0724
0725
0726
0727
0728
0729 static bool sci_port_is_wide(struct isci_port *iport)
0730 {
0731 u32 index;
0732 u32 phy_count = 0;
0733
0734 for (index = 0; index < SCI_MAX_PHYS; index++) {
0735 if (iport->phy_table[index] != NULL) {
0736 phy_count++;
0737 }
0738 }
0739
0740 return phy_count != 1;
0741 }
0742
0743
0744
0745
0746
0747
0748
0749
0750
0751
0752
0753
0754
0755
0756
0757
0758 bool sci_port_link_detected(struct isci_port *iport, struct isci_phy *iphy)
0759 {
0760 if ((iport->logical_port_index != SCIC_SDS_DUMMY_PORT) &&
0761 (iphy->protocol == SAS_PROTOCOL_SATA)) {
0762 if (sci_port_is_wide(iport)) {
0763 sci_port_invalid_link_up(iport, iphy);
0764 return false;
0765 } else {
0766 struct isci_host *ihost = iport->owning_controller;
0767 struct isci_port *dst_port = &(ihost->ports[iphy->phy_index]);
0768 writel(iphy->phy_index,
0769 &dst_port->port_pe_configuration_register[iphy->phy_index]);
0770 }
0771 }
0772
0773 return true;
0774 }
0775
0776 static void port_timeout(struct timer_list *t)
0777 {
0778 struct sci_timer *tmr = from_timer(tmr, t, timer);
0779 struct isci_port *iport = container_of(tmr, typeof(*iport), timer);
0780 struct isci_host *ihost = iport->owning_controller;
0781 unsigned long flags;
0782 u32 current_state;
0783
0784 spin_lock_irqsave(&ihost->scic_lock, flags);
0785
0786 if (tmr->cancel)
0787 goto done;
0788
0789 current_state = iport->sm.current_state_id;
0790
0791 if (current_state == SCI_PORT_RESETTING) {
0792
0793
0794
0795 port_state_machine_change(iport, SCI_PORT_FAILED);
0796 } else if (current_state == SCI_PORT_STOPPED) {
0797
0798
0799
0800 dev_err(sciport_to_dev(iport),
0801 "%s: SCIC Port 0x%p failed to stop before timeout.\n",
0802 __func__,
0803 iport);
0804 } else if (current_state == SCI_PORT_STOPPING) {
0805 dev_dbg(sciport_to_dev(iport),
0806 "%s: port%d: stop complete timeout\n",
0807 __func__, iport->physical_port_index);
0808 } else {
0809
0810
0811
0812 dev_err(sciport_to_dev(iport),
0813 "%s: SCIC Port 0x%p is processing a timeout operation "
0814 "in state %d.\n", __func__, iport, current_state);
0815 }
0816
0817 done:
0818 spin_unlock_irqrestore(&ihost->scic_lock, flags);
0819 }
0820
0821
0822
0823
0824
0825
0826 static void sci_port_update_viit_entry(struct isci_port *iport)
0827 {
0828 struct sci_sas_address sas_address;
0829
0830 sci_port_get_sas_address(iport, &sas_address);
0831
0832 writel(sas_address.high,
0833 &iport->viit_registers->initiator_sas_address_hi);
0834 writel(sas_address.low,
0835 &iport->viit_registers->initiator_sas_address_lo);
0836
0837
0838 writel(0, &iport->viit_registers->reserved);
0839
0840
0841 writel(SCU_VIIT_ENTRY_ID_VIIT |
0842 SCU_VIIT_IPPT_INITIATOR |
0843 ((1 << iport->physical_port_index) << SCU_VIIT_ENTRY_LPVIE_SHIFT) |
0844 SCU_VIIT_STATUS_ALL_VALID,
0845 &iport->viit_registers->status);
0846 }
0847
0848 enum sas_linkrate sci_port_get_max_allowed_speed(struct isci_port *iport)
0849 {
0850 u16 index;
0851 struct isci_phy *iphy;
0852 enum sas_linkrate max_allowed_speed = SAS_LINK_RATE_6_0_GBPS;
0853
0854
0855
0856
0857 for (index = 0; index < SCI_MAX_PHYS; index++) {
0858 iphy = iport->phy_table[index];
0859 if (iphy && sci_port_active_phy(iport, iphy) &&
0860 iphy->max_negotiated_speed < max_allowed_speed)
0861 max_allowed_speed = iphy->max_negotiated_speed;
0862 }
0863
0864 return max_allowed_speed;
0865 }
0866
0867 static void sci_port_suspend_port_task_scheduler(struct isci_port *iport)
0868 {
0869 u32 pts_control_value;
0870
0871 pts_control_value = readl(&iport->port_task_scheduler_registers->control);
0872 pts_control_value |= SCU_PTSxCR_GEN_BIT(SUSPEND);
0873 writel(pts_control_value, &iport->port_task_scheduler_registers->control);
0874 }
0875
0876
0877
0878
0879
0880
0881
0882
0883
0884
0885 static void sci_port_post_dummy_request(struct isci_port *iport)
0886 {
0887 struct isci_host *ihost = iport->owning_controller;
0888 u16 tag = iport->reserved_tag;
0889 struct scu_task_context *tc;
0890 u32 command;
0891
0892 tc = &ihost->task_context_table[ISCI_TAG_TCI(tag)];
0893 tc->abort = 0;
0894
0895 command = SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
0896 iport->physical_port_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT |
0897 ISCI_TAG_TCI(tag);
0898
0899 sci_controller_post_request(ihost, command);
0900 }
0901
0902
0903
0904
0905
0906
0907
0908
0909
0910 static void sci_port_abort_dummy_request(struct isci_port *iport)
0911 {
0912 struct isci_host *ihost = iport->owning_controller;
0913 u16 tag = iport->reserved_tag;
0914 struct scu_task_context *tc;
0915 u32 command;
0916
0917 tc = &ihost->task_context_table[ISCI_TAG_TCI(tag)];
0918 tc->abort = 1;
0919
0920 command = SCU_CONTEXT_COMMAND_REQUEST_POST_TC_ABORT |
0921 iport->physical_port_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT |
0922 ISCI_TAG_TCI(tag);
0923
0924 sci_controller_post_request(ihost, command);
0925 }
0926
0927
0928
0929
0930
0931
0932
0933 static void
0934 sci_port_resume_port_task_scheduler(struct isci_port *iport)
0935 {
0936 u32 pts_control_value;
0937
0938 pts_control_value = readl(&iport->port_task_scheduler_registers->control);
0939 pts_control_value &= ~SCU_PTSxCR_GEN_BIT(SUSPEND);
0940 writel(pts_control_value, &iport->port_task_scheduler_registers->control);
0941 }
0942
0943 static void sci_port_ready_substate_waiting_enter(struct sci_base_state_machine *sm)
0944 {
0945 struct isci_port *iport = container_of(sm, typeof(*iport), sm);
0946
0947 sci_port_suspend_port_task_scheduler(iport);
0948
0949 iport->not_ready_reason = SCIC_PORT_NOT_READY_NO_ACTIVE_PHYS;
0950
0951 if (iport->active_phy_mask != 0) {
0952
0953 port_state_machine_change(iport,
0954 SCI_PORT_SUB_OPERATIONAL);
0955 }
0956 }
0957
0958 static void scic_sds_port_ready_substate_waiting_exit(
0959 struct sci_base_state_machine *sm)
0960 {
0961 struct isci_port *iport = container_of(sm, typeof(*iport), sm);
0962 sci_port_resume_port_task_scheduler(iport);
0963 }
0964
0965 static void sci_port_ready_substate_operational_enter(struct sci_base_state_machine *sm)
0966 {
0967 u32 index;
0968 struct isci_port *iport = container_of(sm, typeof(*iport), sm);
0969 struct isci_host *ihost = iport->owning_controller;
0970
0971 dev_dbg(&ihost->pdev->dev, "%s: port%d ready\n",
0972 __func__, iport->physical_port_index);
0973
0974 for (index = 0; index < SCI_MAX_PHYS; index++) {
0975 if (iport->phy_table[index]) {
0976 writel(iport->physical_port_index,
0977 &iport->port_pe_configuration_register[
0978 iport->phy_table[index]->phy_index]);
0979 if (((iport->active_phy_mask^iport->enabled_phy_mask) & (1 << index)) != 0)
0980 sci_port_resume_phy(iport, iport->phy_table[index]);
0981 }
0982 }
0983
0984 sci_port_update_viit_entry(iport);
0985
0986
0987
0988
0989
0990 sci_port_post_dummy_request(iport);
0991 }
0992
0993 static void sci_port_invalidate_dummy_remote_node(struct isci_port *iport)
0994 {
0995 struct isci_host *ihost = iport->owning_controller;
0996 u8 phys_index = iport->physical_port_index;
0997 union scu_remote_node_context *rnc;
0998 u16 rni = iport->reserved_rni;
0999 u32 command;
1000
1001 rnc = &ihost->remote_node_context_table[rni];
1002
1003 rnc->ssp.is_valid = false;
1004
1005
1006
1007
1008
1009 readl(&ihost->smu_registers->interrupt_status);
1010 udelay(10);
1011
1012 command = SCU_CONTEXT_COMMAND_POST_RNC_INVALIDATE |
1013 phys_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT | rni;
1014
1015 sci_controller_post_request(ihost, command);
1016 }
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026 static void sci_port_ready_substate_operational_exit(struct sci_base_state_machine *sm)
1027 {
1028 struct isci_port *iport = container_of(sm, typeof(*iport), sm);
1029 struct isci_host *ihost = iport->owning_controller;
1030
1031
1032
1033
1034
1035
1036 sci_port_abort_dummy_request(iport);
1037
1038 dev_dbg(&ihost->pdev->dev, "%s: port%d !ready\n",
1039 __func__, iport->physical_port_index);
1040
1041 if (iport->ready_exit)
1042 sci_port_invalidate_dummy_remote_node(iport);
1043 }
1044
1045 static void sci_port_ready_substate_configuring_enter(struct sci_base_state_machine *sm)
1046 {
1047 struct isci_port *iport = container_of(sm, typeof(*iport), sm);
1048 struct isci_host *ihost = iport->owning_controller;
1049
1050 if (iport->active_phy_mask == 0) {
1051 dev_dbg(&ihost->pdev->dev, "%s: port%d !ready\n",
1052 __func__, iport->physical_port_index);
1053
1054 port_state_machine_change(iport, SCI_PORT_SUB_WAITING);
1055 } else
1056 port_state_machine_change(iport, SCI_PORT_SUB_OPERATIONAL);
1057 }
1058
1059 enum sci_status sci_port_start(struct isci_port *iport)
1060 {
1061 struct isci_host *ihost = iport->owning_controller;
1062 enum sci_status status = SCI_SUCCESS;
1063 enum sci_port_states state;
1064 u32 phy_mask;
1065
1066 state = iport->sm.current_state_id;
1067 if (state != SCI_PORT_STOPPED) {
1068 dev_warn(sciport_to_dev(iport), "%s: in wrong state: %s\n",
1069 __func__, port_state_name(state));
1070 return SCI_FAILURE_INVALID_STATE;
1071 }
1072
1073 if (iport->assigned_device_count > 0) {
1074
1075
1076
1077
1078
1079 return SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION;
1080 }
1081
1082 if (iport->reserved_rni == SCU_DUMMY_INDEX) {
1083 u16 rni = sci_remote_node_table_allocate_remote_node(
1084 &ihost->available_remote_nodes, 1);
1085
1086 if (rni != SCU_DUMMY_INDEX)
1087 sci_port_construct_dummy_rnc(iport, rni);
1088 else
1089 status = SCI_FAILURE_INSUFFICIENT_RESOURCES;
1090 iport->reserved_rni = rni;
1091 }
1092
1093 if (iport->reserved_tag == SCI_CONTROLLER_INVALID_IO_TAG) {
1094 u16 tag;
1095
1096 tag = isci_alloc_tag(ihost);
1097 if (tag == SCI_CONTROLLER_INVALID_IO_TAG)
1098 status = SCI_FAILURE_INSUFFICIENT_RESOURCES;
1099 else
1100 sci_port_construct_dummy_task(iport, tag);
1101 iport->reserved_tag = tag;
1102 }
1103
1104 if (status == SCI_SUCCESS) {
1105 phy_mask = sci_port_get_phys(iport);
1106
1107
1108
1109
1110
1111
1112 if (sci_port_is_phy_mask_valid(iport, phy_mask) == true) {
1113 port_state_machine_change(iport,
1114 SCI_PORT_READY);
1115
1116 return SCI_SUCCESS;
1117 }
1118 status = SCI_FAILURE;
1119 }
1120
1121 if (status != SCI_SUCCESS)
1122 sci_port_destroy_dummy_resources(iport);
1123
1124 return status;
1125 }
1126
1127 enum sci_status sci_port_stop(struct isci_port *iport)
1128 {
1129 enum sci_port_states state;
1130
1131 state = iport->sm.current_state_id;
1132 switch (state) {
1133 case SCI_PORT_STOPPED:
1134 return SCI_SUCCESS;
1135 case SCI_PORT_SUB_WAITING:
1136 case SCI_PORT_SUB_OPERATIONAL:
1137 case SCI_PORT_SUB_CONFIGURING:
1138 case SCI_PORT_RESETTING:
1139 port_state_machine_change(iport,
1140 SCI_PORT_STOPPING);
1141 return SCI_SUCCESS;
1142 default:
1143 dev_warn(sciport_to_dev(iport), "%s: in wrong state: %s\n",
1144 __func__, port_state_name(state));
1145 return SCI_FAILURE_INVALID_STATE;
1146 }
1147 }
1148
1149 static enum sci_status sci_port_hard_reset(struct isci_port *iport, u32 timeout)
1150 {
1151 enum sci_status status = SCI_FAILURE_INVALID_PHY;
1152 struct isci_phy *iphy = NULL;
1153 enum sci_port_states state;
1154 u32 phy_index;
1155
1156 state = iport->sm.current_state_id;
1157 if (state != SCI_PORT_SUB_OPERATIONAL) {
1158 dev_warn(sciport_to_dev(iport), "%s: in wrong state: %s\n",
1159 __func__, port_state_name(state));
1160 return SCI_FAILURE_INVALID_STATE;
1161 }
1162
1163
1164 for (phy_index = 0; phy_index < SCI_MAX_PHYS && !iphy; phy_index++) {
1165 iphy = iport->phy_table[phy_index];
1166 if (iphy && !sci_port_active_phy(iport, iphy)) {
1167
1168
1169
1170
1171 iphy = NULL;
1172 }
1173 }
1174
1175
1176 if (!iphy)
1177 return status;
1178 status = sci_phy_reset(iphy);
1179
1180 if (status != SCI_SUCCESS)
1181 return status;
1182
1183 sci_mod_timer(&iport->timer, timeout);
1184 iport->not_ready_reason = SCIC_PORT_NOT_READY_HARD_RESET_REQUESTED;
1185
1186 port_state_machine_change(iport, SCI_PORT_RESETTING);
1187 return SCI_SUCCESS;
1188 }
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199 enum sci_status sci_port_add_phy(struct isci_port *iport,
1200 struct isci_phy *iphy)
1201 {
1202 enum sci_status status;
1203 enum sci_port_states state;
1204
1205 sci_port_bcn_enable(iport);
1206
1207 state = iport->sm.current_state_id;
1208 switch (state) {
1209 case SCI_PORT_STOPPED: {
1210 struct sci_sas_address port_sas_address;
1211
1212
1213 sci_port_get_sas_address(iport, &port_sas_address);
1214
1215 if (port_sas_address.high != 0 && port_sas_address.low != 0) {
1216 struct sci_sas_address phy_sas_address;
1217
1218
1219
1220
1221 sci_phy_get_sas_address(iphy, &phy_sas_address);
1222
1223 if (port_sas_address.high != phy_sas_address.high ||
1224 port_sas_address.low != phy_sas_address.low)
1225 return SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION;
1226 }
1227 return sci_port_set_phy(iport, iphy);
1228 }
1229 case SCI_PORT_SUB_WAITING:
1230 case SCI_PORT_SUB_OPERATIONAL:
1231 status = sci_port_set_phy(iport, iphy);
1232
1233 if (status != SCI_SUCCESS)
1234 return status;
1235
1236 sci_port_general_link_up_handler(iport, iphy, PF_NOTIFY|PF_RESUME);
1237 iport->not_ready_reason = SCIC_PORT_NOT_READY_RECONFIGURING;
1238 port_state_machine_change(iport, SCI_PORT_SUB_CONFIGURING);
1239
1240 return status;
1241 case SCI_PORT_SUB_CONFIGURING:
1242 status = sci_port_set_phy(iport, iphy);
1243
1244 if (status != SCI_SUCCESS)
1245 return status;
1246 sci_port_general_link_up_handler(iport, iphy, PF_NOTIFY);
1247
1248
1249
1250
1251 port_state_machine_change(iport,
1252 SCI_PORT_SUB_CONFIGURING);
1253 return SCI_SUCCESS;
1254 default:
1255 dev_warn(sciport_to_dev(iport), "%s: in wrong state: %s\n",
1256 __func__, port_state_name(state));
1257 return SCI_FAILURE_INVALID_STATE;
1258 }
1259 }
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270 enum sci_status sci_port_remove_phy(struct isci_port *iport,
1271 struct isci_phy *iphy)
1272 {
1273 enum sci_status status;
1274 enum sci_port_states state;
1275
1276 state = iport->sm.current_state_id;
1277
1278 switch (state) {
1279 case SCI_PORT_STOPPED:
1280 return sci_port_clear_phy(iport, iphy);
1281 case SCI_PORT_SUB_OPERATIONAL:
1282 status = sci_port_clear_phy(iport, iphy);
1283 if (status != SCI_SUCCESS)
1284 return status;
1285
1286 sci_port_deactivate_phy(iport, iphy, true);
1287 iport->not_ready_reason = SCIC_PORT_NOT_READY_RECONFIGURING;
1288 port_state_machine_change(iport,
1289 SCI_PORT_SUB_CONFIGURING);
1290 return SCI_SUCCESS;
1291 case SCI_PORT_SUB_CONFIGURING:
1292 status = sci_port_clear_phy(iport, iphy);
1293
1294 if (status != SCI_SUCCESS)
1295 return status;
1296 sci_port_deactivate_phy(iport, iphy, true);
1297
1298
1299
1300
1301 port_state_machine_change(iport,
1302 SCI_PORT_SUB_CONFIGURING);
1303 return SCI_SUCCESS;
1304 default:
1305 dev_warn(sciport_to_dev(iport), "%s: in wrong state: %s\n",
1306 __func__, port_state_name(state));
1307 return SCI_FAILURE_INVALID_STATE;
1308 }
1309 }
1310
1311 enum sci_status sci_port_link_up(struct isci_port *iport,
1312 struct isci_phy *iphy)
1313 {
1314 enum sci_port_states state;
1315
1316 state = iport->sm.current_state_id;
1317 switch (state) {
1318 case SCI_PORT_SUB_WAITING:
1319
1320
1321
1322 sci_port_activate_phy(iport, iphy, PF_NOTIFY|PF_RESUME);
1323
1324 port_state_machine_change(iport,
1325 SCI_PORT_SUB_OPERATIONAL);
1326 return SCI_SUCCESS;
1327 case SCI_PORT_SUB_OPERATIONAL:
1328 sci_port_general_link_up_handler(iport, iphy, PF_NOTIFY|PF_RESUME);
1329 return SCI_SUCCESS;
1330 case SCI_PORT_RESETTING:
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345 sci_port_general_link_up_handler(iport, iphy, PF_RESUME);
1346 return SCI_SUCCESS;
1347 default:
1348 dev_warn(sciport_to_dev(iport), "%s: in wrong state: %s\n",
1349 __func__, port_state_name(state));
1350 return SCI_FAILURE_INVALID_STATE;
1351 }
1352 }
1353
1354 enum sci_status sci_port_link_down(struct isci_port *iport,
1355 struct isci_phy *iphy)
1356 {
1357 enum sci_port_states state;
1358
1359 state = iport->sm.current_state_id;
1360 switch (state) {
1361 case SCI_PORT_SUB_OPERATIONAL:
1362 sci_port_deactivate_phy(iport, iphy, true);
1363
1364
1365
1366
1367
1368 if (iport->active_phy_mask == 0)
1369 port_state_machine_change(iport,
1370 SCI_PORT_SUB_WAITING);
1371 return SCI_SUCCESS;
1372 case SCI_PORT_RESETTING:
1373
1374
1375 sci_port_deactivate_phy(iport, iphy, false);
1376 return SCI_SUCCESS;
1377 default:
1378 dev_warn(sciport_to_dev(iport), "%s: in wrong state: %s\n",
1379 __func__, port_state_name(state));
1380 return SCI_FAILURE_INVALID_STATE;
1381 }
1382 }
1383
1384 enum sci_status sci_port_start_io(struct isci_port *iport,
1385 struct isci_remote_device *idev,
1386 struct isci_request *ireq)
1387 {
1388 enum sci_port_states state;
1389
1390 state = iport->sm.current_state_id;
1391 switch (state) {
1392 case SCI_PORT_SUB_WAITING:
1393 return SCI_FAILURE_INVALID_STATE;
1394 case SCI_PORT_SUB_OPERATIONAL:
1395 iport->started_request_count++;
1396 return SCI_SUCCESS;
1397 default:
1398 dev_warn(sciport_to_dev(iport), "%s: in wrong state: %s\n",
1399 __func__, port_state_name(state));
1400 return SCI_FAILURE_INVALID_STATE;
1401 }
1402 }
1403
1404 enum sci_status sci_port_complete_io(struct isci_port *iport,
1405 struct isci_remote_device *idev,
1406 struct isci_request *ireq)
1407 {
1408 enum sci_port_states state;
1409
1410 state = iport->sm.current_state_id;
1411 switch (state) {
1412 case SCI_PORT_STOPPED:
1413 dev_warn(sciport_to_dev(iport), "%s: in wrong state: %s\n",
1414 __func__, port_state_name(state));
1415 return SCI_FAILURE_INVALID_STATE;
1416 case SCI_PORT_STOPPING:
1417 sci_port_decrement_request_count(iport);
1418
1419 if (iport->started_request_count == 0)
1420 port_state_machine_change(iport,
1421 SCI_PORT_STOPPED);
1422 break;
1423 case SCI_PORT_READY:
1424 case SCI_PORT_RESETTING:
1425 case SCI_PORT_FAILED:
1426 case SCI_PORT_SUB_WAITING:
1427 case SCI_PORT_SUB_OPERATIONAL:
1428 sci_port_decrement_request_count(iport);
1429 break;
1430 case SCI_PORT_SUB_CONFIGURING:
1431 sci_port_decrement_request_count(iport);
1432 if (iport->started_request_count == 0) {
1433 port_state_machine_change(iport,
1434 SCI_PORT_SUB_OPERATIONAL);
1435 }
1436 break;
1437 }
1438 return SCI_SUCCESS;
1439 }
1440
1441 static void sci_port_enable_port_task_scheduler(struct isci_port *iport)
1442 {
1443 u32 pts_control_value;
1444
1445
1446 pts_control_value = readl(&iport->port_task_scheduler_registers->control);
1447 pts_control_value |= SCU_PTSxCR_GEN_BIT(ENABLE) | SCU_PTSxCR_GEN_BIT(SUSPEND);
1448 writel(pts_control_value, &iport->port_task_scheduler_registers->control);
1449 }
1450
1451 static void sci_port_disable_port_task_scheduler(struct isci_port *iport)
1452 {
1453 u32 pts_control_value;
1454
1455 pts_control_value = readl(&iport->port_task_scheduler_registers->control);
1456 pts_control_value &=
1457 ~(SCU_PTSxCR_GEN_BIT(ENABLE) | SCU_PTSxCR_GEN_BIT(SUSPEND));
1458 writel(pts_control_value, &iport->port_task_scheduler_registers->control);
1459 }
1460
1461 static void sci_port_post_dummy_remote_node(struct isci_port *iport)
1462 {
1463 struct isci_host *ihost = iport->owning_controller;
1464 u8 phys_index = iport->physical_port_index;
1465 union scu_remote_node_context *rnc;
1466 u16 rni = iport->reserved_rni;
1467 u32 command;
1468
1469 rnc = &ihost->remote_node_context_table[rni];
1470 rnc->ssp.is_valid = true;
1471
1472 command = SCU_CONTEXT_COMMAND_POST_RNC_32 |
1473 phys_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT | rni;
1474
1475 sci_controller_post_request(ihost, command);
1476
1477
1478
1479
1480 readl(&ihost->smu_registers->interrupt_status);
1481 udelay(10);
1482
1483 command = SCU_CONTEXT_COMMAND_POST_RNC_SUSPEND_TX_RX |
1484 phys_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT | rni;
1485
1486 sci_controller_post_request(ihost, command);
1487 }
1488
1489 static void sci_port_stopped_state_enter(struct sci_base_state_machine *sm)
1490 {
1491 struct isci_port *iport = container_of(sm, typeof(*iport), sm);
1492
1493 if (iport->sm.previous_state_id == SCI_PORT_STOPPING) {
1494
1495
1496
1497
1498 sci_port_disable_port_task_scheduler(iport);
1499 }
1500 }
1501
1502 static void sci_port_stopped_state_exit(struct sci_base_state_machine *sm)
1503 {
1504 struct isci_port *iport = container_of(sm, typeof(*iport), sm);
1505
1506
1507 sci_port_enable_port_task_scheduler(iport);
1508 }
1509
1510 static void sci_port_ready_state_enter(struct sci_base_state_machine *sm)
1511 {
1512 struct isci_port *iport = container_of(sm, typeof(*iport), sm);
1513 struct isci_host *ihost = iport->owning_controller;
1514 u32 prev_state;
1515
1516 prev_state = iport->sm.previous_state_id;
1517 if (prev_state == SCI_PORT_RESETTING)
1518 isci_port_hard_reset_complete(iport, SCI_SUCCESS);
1519 else
1520 dev_dbg(&ihost->pdev->dev, "%s: port%d !ready\n",
1521 __func__, iport->physical_port_index);
1522
1523
1524 sci_port_post_dummy_remote_node(iport);
1525
1526
1527 port_state_machine_change(iport,
1528 SCI_PORT_SUB_WAITING);
1529 }
1530
1531 static void sci_port_resetting_state_exit(struct sci_base_state_machine *sm)
1532 {
1533 struct isci_port *iport = container_of(sm, typeof(*iport), sm);
1534
1535 sci_del_timer(&iport->timer);
1536 }
1537
1538 static void sci_port_stopping_state_exit(struct sci_base_state_machine *sm)
1539 {
1540 struct isci_port *iport = container_of(sm, typeof(*iport), sm);
1541
1542 sci_del_timer(&iport->timer);
1543
1544 sci_port_destroy_dummy_resources(iport);
1545 }
1546
1547 static void sci_port_failed_state_enter(struct sci_base_state_machine *sm)
1548 {
1549 struct isci_port *iport = container_of(sm, typeof(*iport), sm);
1550
1551 isci_port_hard_reset_complete(iport, SCI_FAILURE_TIMEOUT);
1552 }
1553
1554 void sci_port_set_hang_detection_timeout(struct isci_port *iport, u32 timeout)
1555 {
1556 int phy_index;
1557 u32 phy_mask = iport->active_phy_mask;
1558
1559 if (timeout)
1560 ++iport->hang_detect_users;
1561 else if (iport->hang_detect_users > 1)
1562 --iport->hang_detect_users;
1563 else
1564 iport->hang_detect_users = 0;
1565
1566 if (timeout || (iport->hang_detect_users == 0)) {
1567 for (phy_index = 0; phy_index < SCI_MAX_PHYS; phy_index++) {
1568 if ((phy_mask >> phy_index) & 1) {
1569 writel(timeout,
1570 &iport->phy_table[phy_index]
1571 ->link_layer_registers
1572 ->link_layer_hang_detection_timeout);
1573 }
1574 }
1575 }
1576 }
1577
1578
1579 static const struct sci_base_state sci_port_state_table[] = {
1580 [SCI_PORT_STOPPED] = {
1581 .enter_state = sci_port_stopped_state_enter,
1582 .exit_state = sci_port_stopped_state_exit
1583 },
1584 [SCI_PORT_STOPPING] = {
1585 .exit_state = sci_port_stopping_state_exit
1586 },
1587 [SCI_PORT_READY] = {
1588 .enter_state = sci_port_ready_state_enter,
1589 },
1590 [SCI_PORT_SUB_WAITING] = {
1591 .enter_state = sci_port_ready_substate_waiting_enter,
1592 .exit_state = scic_sds_port_ready_substate_waiting_exit,
1593 },
1594 [SCI_PORT_SUB_OPERATIONAL] = {
1595 .enter_state = sci_port_ready_substate_operational_enter,
1596 .exit_state = sci_port_ready_substate_operational_exit
1597 },
1598 [SCI_PORT_SUB_CONFIGURING] = {
1599 .enter_state = sci_port_ready_substate_configuring_enter
1600 },
1601 [SCI_PORT_RESETTING] = {
1602 .exit_state = sci_port_resetting_state_exit
1603 },
1604 [SCI_PORT_FAILED] = {
1605 .enter_state = sci_port_failed_state_enter,
1606 }
1607 };
1608
1609 void sci_port_construct(struct isci_port *iport, u8 index,
1610 struct isci_host *ihost)
1611 {
1612 sci_init_sm(&iport->sm, sci_port_state_table, SCI_PORT_STOPPED);
1613
1614 iport->logical_port_index = SCIC_SDS_DUMMY_PORT;
1615 iport->physical_port_index = index;
1616 iport->active_phy_mask = 0;
1617 iport->enabled_phy_mask = 0;
1618 iport->last_active_phy = 0;
1619 iport->ready_exit = false;
1620
1621 iport->owning_controller = ihost;
1622
1623 iport->started_request_count = 0;
1624 iport->assigned_device_count = 0;
1625 iport->hang_detect_users = 0;
1626
1627 iport->reserved_rni = SCU_DUMMY_INDEX;
1628 iport->reserved_tag = SCI_CONTROLLER_INVALID_IO_TAG;
1629
1630 sci_init_timer(&iport->timer, port_timeout);
1631
1632 iport->port_task_scheduler_registers = NULL;
1633
1634 for (index = 0; index < SCI_MAX_PHYS; index++)
1635 iport->phy_table[index] = NULL;
1636 }
1637
1638 void sci_port_broadcast_change_received(struct isci_port *iport, struct isci_phy *iphy)
1639 {
1640 struct isci_host *ihost = iport->owning_controller;
1641
1642
1643 isci_port_bc_change_received(ihost, iport, iphy);
1644 }
1645
1646 static void wait_port_reset(struct isci_host *ihost, struct isci_port *iport)
1647 {
1648 wait_event(ihost->eventq, !test_bit(IPORT_RESET_PENDING, &iport->state));
1649 }
1650
1651 int isci_port_perform_hard_reset(struct isci_host *ihost, struct isci_port *iport,
1652 struct isci_phy *iphy)
1653 {
1654 unsigned long flags;
1655 enum sci_status status;
1656 int ret = TMF_RESP_FUNC_COMPLETE;
1657
1658 dev_dbg(&ihost->pdev->dev, "%s: iport = %p\n",
1659 __func__, iport);
1660
1661 spin_lock_irqsave(&ihost->scic_lock, flags);
1662 set_bit(IPORT_RESET_PENDING, &iport->state);
1663
1664 #define ISCI_PORT_RESET_TIMEOUT SCIC_SDS_SIGNATURE_FIS_TIMEOUT
1665 status = sci_port_hard_reset(iport, ISCI_PORT_RESET_TIMEOUT);
1666
1667 spin_unlock_irqrestore(&ihost->scic_lock, flags);
1668
1669 if (status == SCI_SUCCESS) {
1670 wait_port_reset(ihost, iport);
1671
1672 dev_dbg(&ihost->pdev->dev,
1673 "%s: iport = %p; hard reset completion\n",
1674 __func__, iport);
1675
1676 if (iport->hard_reset_status != SCI_SUCCESS) {
1677 ret = TMF_RESP_FUNC_FAILED;
1678
1679 dev_err(&ihost->pdev->dev,
1680 "%s: iport = %p; hard reset failed (0x%x)\n",
1681 __func__, iport, iport->hard_reset_status);
1682 }
1683 } else {
1684 clear_bit(IPORT_RESET_PENDING, &iport->state);
1685 wake_up(&ihost->eventq);
1686 ret = TMF_RESP_FUNC_FAILED;
1687
1688 dev_err(&ihost->pdev->dev,
1689 "%s: iport = %p; sci_port_hard_reset call"
1690 " failed 0x%x\n",
1691 __func__, iport, status);
1692
1693 }
1694 return ret;
1695 }
1696
1697 int isci_ata_check_ready(struct domain_device *dev)
1698 {
1699 struct isci_port *iport = dev->port->lldd_port;
1700 struct isci_host *ihost = dev_to_ihost(dev);
1701 struct isci_remote_device *idev;
1702 unsigned long flags;
1703 int rc = 0;
1704
1705 spin_lock_irqsave(&ihost->scic_lock, flags);
1706 idev = isci_lookup_device(dev);
1707 spin_unlock_irqrestore(&ihost->scic_lock, flags);
1708
1709 if (!idev)
1710 goto out;
1711
1712 if (test_bit(IPORT_RESET_PENDING, &iport->state))
1713 goto out;
1714
1715 rc = !!iport->active_phy_mask;
1716 out:
1717 isci_put_device(idev);
1718
1719 return rc;
1720 }
1721
1722 void isci_port_deformed(struct asd_sas_phy *phy)
1723 {
1724 struct isci_host *ihost = phy->ha->lldd_ha;
1725 struct isci_port *iport = phy->port->lldd_port;
1726 unsigned long flags;
1727 int i;
1728
1729
1730
1731
1732 if (!iport)
1733 return;
1734
1735 spin_lock_irqsave(&ihost->scic_lock, flags);
1736 for (i = 0; i < SCI_MAX_PHYS; i++) {
1737 if (iport->active_phy_mask & 1 << i)
1738 break;
1739 }
1740 spin_unlock_irqrestore(&ihost->scic_lock, flags);
1741
1742 if (i >= SCI_MAX_PHYS)
1743 dev_dbg(&ihost->pdev->dev, "%s: port: %ld\n",
1744 __func__, (long) (iport - &ihost->ports[0]));
1745 }
1746
1747 void isci_port_formed(struct asd_sas_phy *phy)
1748 {
1749 struct isci_host *ihost = phy->ha->lldd_ha;
1750 struct isci_phy *iphy = to_iphy(phy);
1751 struct asd_sas_port *port = phy->port;
1752 struct isci_port *iport = NULL;
1753 unsigned long flags;
1754 int i;
1755
1756
1757
1758
1759 wait_for_start(ihost);
1760
1761 spin_lock_irqsave(&ihost->scic_lock, flags);
1762 for (i = 0; i < SCI_MAX_PORTS; i++) {
1763 iport = &ihost->ports[i];
1764 if (iport->active_phy_mask & 1 << iphy->phy_index)
1765 break;
1766 }
1767 spin_unlock_irqrestore(&ihost->scic_lock, flags);
1768
1769 if (i >= SCI_MAX_PORTS)
1770 iport = NULL;
1771
1772 port->lldd_port = iport;
1773 }