0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037
0038
0039
0040
0041 #include <linux/slab.h>
0042 #include "pm8001_sas.h"
0043 #include "pm80xx_tracepoints.h"
0044
0045
0046
0047
0048
0049
0050 static int pm8001_find_tag(struct sas_task *task, u32 *tag)
0051 {
0052 if (task->lldd_task) {
0053 struct pm8001_ccb_info *ccb;
0054 ccb = task->lldd_task;
0055 *tag = ccb->ccb_tag;
0056 return 1;
0057 }
0058 return 0;
0059 }
0060
0061
0062
0063
0064
0065
0066 void pm8001_tag_free(struct pm8001_hba_info *pm8001_ha, u32 tag)
0067 {
0068 void *bitmap = pm8001_ha->tags;
0069 unsigned long flags;
0070
0071 spin_lock_irqsave(&pm8001_ha->bitmap_lock, flags);
0072 __clear_bit(tag, bitmap);
0073 spin_unlock_irqrestore(&pm8001_ha->bitmap_lock, flags);
0074 }
0075
0076
0077
0078
0079
0080
0081 int pm8001_tag_alloc(struct pm8001_hba_info *pm8001_ha, u32 *tag_out)
0082 {
0083 void *bitmap = pm8001_ha->tags;
0084 unsigned long flags;
0085 unsigned int tag;
0086
0087 spin_lock_irqsave(&pm8001_ha->bitmap_lock, flags);
0088 tag = find_first_zero_bit(bitmap, pm8001_ha->tags_num);
0089 if (tag >= pm8001_ha->tags_num) {
0090 spin_unlock_irqrestore(&pm8001_ha->bitmap_lock, flags);
0091 return -SAS_QUEUE_FULL;
0092 }
0093 __set_bit(tag, bitmap);
0094 spin_unlock_irqrestore(&pm8001_ha->bitmap_lock, flags);
0095 *tag_out = tag;
0096 return 0;
0097 }
0098
0099 void pm8001_tag_init(struct pm8001_hba_info *pm8001_ha)
0100 {
0101 int i;
0102 for (i = 0; i < pm8001_ha->tags_num; ++i)
0103 pm8001_tag_free(pm8001_ha, i);
0104 }
0105
0106
0107
0108
0109
0110
0111
0112
0113
0114
0115
0116 int pm8001_mem_alloc(struct pci_dev *pdev, void **virt_addr,
0117 dma_addr_t *pphys_addr, u32 *pphys_addr_hi,
0118 u32 *pphys_addr_lo, u32 mem_size, u32 align)
0119 {
0120 caddr_t mem_virt_alloc;
0121 dma_addr_t mem_dma_handle;
0122 u64 phys_align;
0123 u64 align_offset = 0;
0124 if (align)
0125 align_offset = (dma_addr_t)align - 1;
0126 mem_virt_alloc = dma_alloc_coherent(&pdev->dev, mem_size + align,
0127 &mem_dma_handle, GFP_KERNEL);
0128 if (!mem_virt_alloc)
0129 return -ENOMEM;
0130 *pphys_addr = mem_dma_handle;
0131 phys_align = (*pphys_addr + align_offset) & ~align_offset;
0132 *virt_addr = (void *)mem_virt_alloc + phys_align - *pphys_addr;
0133 *pphys_addr_hi = upper_32_bits(phys_align);
0134 *pphys_addr_lo = lower_32_bits(phys_align);
0135 return 0;
0136 }
0137
0138
0139
0140
0141
0142
0143 static
0144 struct pm8001_hba_info *pm8001_find_ha_by_dev(struct domain_device *dev)
0145 {
0146 struct sas_ha_struct *sha = dev->port->ha;
0147 struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
0148 return pm8001_ha;
0149 }
0150
0151
0152
0153
0154
0155
0156
0157
0158
0159
0160 int pm8001_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
0161 void *funcdata)
0162 {
0163 int rc = 0, phy_id = sas_phy->id;
0164 struct pm8001_hba_info *pm8001_ha = NULL;
0165 struct sas_phy_linkrates *rates;
0166 struct pm8001_phy *phy;
0167 DECLARE_COMPLETION_ONSTACK(completion);
0168 unsigned long flags;
0169 pm8001_ha = sas_phy->ha->lldd_ha;
0170 phy = &pm8001_ha->phy[phy_id];
0171 pm8001_ha->phy[phy_id].enable_completion = &completion;
0172 switch (func) {
0173 case PHY_FUNC_SET_LINK_RATE:
0174 rates = funcdata;
0175 if (rates->minimum_linkrate) {
0176 pm8001_ha->phy[phy_id].minimum_linkrate =
0177 rates->minimum_linkrate;
0178 }
0179 if (rates->maximum_linkrate) {
0180 pm8001_ha->phy[phy_id].maximum_linkrate =
0181 rates->maximum_linkrate;
0182 }
0183 if (pm8001_ha->phy[phy_id].phy_state == PHY_LINK_DISABLE) {
0184 PM8001_CHIP_DISP->phy_start_req(pm8001_ha, phy_id);
0185 wait_for_completion(&completion);
0186 }
0187 PM8001_CHIP_DISP->phy_ctl_req(pm8001_ha, phy_id,
0188 PHY_LINK_RESET);
0189 break;
0190 case PHY_FUNC_HARD_RESET:
0191 if (pm8001_ha->phy[phy_id].phy_state == PHY_LINK_DISABLE) {
0192 PM8001_CHIP_DISP->phy_start_req(pm8001_ha, phy_id);
0193 wait_for_completion(&completion);
0194 }
0195 PM8001_CHIP_DISP->phy_ctl_req(pm8001_ha, phy_id,
0196 PHY_HARD_RESET);
0197 break;
0198 case PHY_FUNC_LINK_RESET:
0199 if (pm8001_ha->phy[phy_id].phy_state == PHY_LINK_DISABLE) {
0200 PM8001_CHIP_DISP->phy_start_req(pm8001_ha, phy_id);
0201 wait_for_completion(&completion);
0202 }
0203 PM8001_CHIP_DISP->phy_ctl_req(pm8001_ha, phy_id,
0204 PHY_LINK_RESET);
0205 break;
0206 case PHY_FUNC_RELEASE_SPINUP_HOLD:
0207 PM8001_CHIP_DISP->phy_ctl_req(pm8001_ha, phy_id,
0208 PHY_LINK_RESET);
0209 break;
0210 case PHY_FUNC_DISABLE:
0211 if (pm8001_ha->chip_id != chip_8001) {
0212 if (pm8001_ha->phy[phy_id].phy_state ==
0213 PHY_STATE_LINK_UP_SPCV) {
0214 sas_phy_disconnected(&phy->sas_phy);
0215 sas_notify_phy_event(&phy->sas_phy,
0216 PHYE_LOSS_OF_SIGNAL, GFP_KERNEL);
0217 phy->phy_attached = 0;
0218 }
0219 } else {
0220 if (pm8001_ha->phy[phy_id].phy_state ==
0221 PHY_STATE_LINK_UP_SPC) {
0222 sas_phy_disconnected(&phy->sas_phy);
0223 sas_notify_phy_event(&phy->sas_phy,
0224 PHYE_LOSS_OF_SIGNAL, GFP_KERNEL);
0225 phy->phy_attached = 0;
0226 }
0227 }
0228 PM8001_CHIP_DISP->phy_stop_req(pm8001_ha, phy_id);
0229 break;
0230 case PHY_FUNC_GET_EVENTS:
0231 spin_lock_irqsave(&pm8001_ha->lock, flags);
0232 if (pm8001_ha->chip_id == chip_8001) {
0233 if (-1 == pm8001_bar4_shift(pm8001_ha,
0234 (phy_id < 4) ? 0x30000 : 0x40000)) {
0235 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
0236 return -EINVAL;
0237 }
0238 }
0239 {
0240 struct sas_phy *phy = sas_phy->phy;
0241 u32 __iomem *qp = pm8001_ha->io_mem[2].memvirtaddr
0242 + 0x1034 + (0x4000 * (phy_id & 3));
0243
0244 phy->invalid_dword_count = readl(qp);
0245 phy->running_disparity_error_count = readl(&qp[1]);
0246 phy->loss_of_dword_sync_count = readl(&qp[3]);
0247 phy->phy_reset_problem_count = readl(&qp[4]);
0248 }
0249 if (pm8001_ha->chip_id == chip_8001)
0250 pm8001_bar4_shift(pm8001_ha, 0);
0251 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
0252 return 0;
0253 default:
0254 pm8001_dbg(pm8001_ha, DEVIO, "func 0x%x\n", func);
0255 rc = -EOPNOTSUPP;
0256 }
0257 msleep(300);
0258 return rc;
0259 }
0260
0261
0262
0263
0264
0265
0266 void pm8001_scan_start(struct Scsi_Host *shost)
0267 {
0268 int i;
0269 struct pm8001_hba_info *pm8001_ha;
0270 struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
0271 DECLARE_COMPLETION_ONSTACK(completion);
0272 pm8001_ha = sha->lldd_ha;
0273
0274 if (pm8001_ha->chip_id == chip_8001)
0275 PM8001_CHIP_DISP->sas_re_init_req(pm8001_ha);
0276 for (i = 0; i < pm8001_ha->chip->n_phy; ++i) {
0277 pm8001_ha->phy[i].enable_completion = &completion;
0278 PM8001_CHIP_DISP->phy_start_req(pm8001_ha, i);
0279 wait_for_completion(&completion);
0280 msleep(300);
0281 }
0282 }
0283
0284 int pm8001_scan_finished(struct Scsi_Host *shost, unsigned long time)
0285 {
0286 struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost);
0287
0288
0289
0290 if (time < HZ)
0291 return 0;
0292
0293 sas_drain_work(ha);
0294 return 1;
0295 }
0296
0297
0298
0299
0300
0301
0302 static int pm8001_task_prep_smp(struct pm8001_hba_info *pm8001_ha,
0303 struct pm8001_ccb_info *ccb)
0304 {
0305 return PM8001_CHIP_DISP->smp_req(pm8001_ha, ccb);
0306 }
0307
0308 u32 pm8001_get_ncq_tag(struct sas_task *task, u32 *tag)
0309 {
0310 struct ata_queued_cmd *qc = task->uldd_task;
0311
0312 if (qc && ata_is_ncq(qc->tf.protocol)) {
0313 *tag = qc->tag;
0314 return 1;
0315 }
0316
0317 return 0;
0318 }
0319
0320
0321
0322
0323
0324
0325 static int pm8001_task_prep_ata(struct pm8001_hba_info *pm8001_ha,
0326 struct pm8001_ccb_info *ccb)
0327 {
0328 return PM8001_CHIP_DISP->sata_req(pm8001_ha, ccb);
0329 }
0330
0331
0332
0333
0334
0335
0336
0337 static int pm8001_task_prep_internal_abort(struct pm8001_hba_info *pm8001_ha,
0338 struct pm8001_ccb_info *ccb)
0339 {
0340 return PM8001_CHIP_DISP->task_abort(pm8001_ha, ccb);
0341 }
0342
0343
0344
0345
0346
0347
0348
0349 static int pm8001_task_prep_ssp_tm(struct pm8001_hba_info *pm8001_ha,
0350 struct pm8001_ccb_info *ccb, struct sas_tmf_task *tmf)
0351 {
0352 return PM8001_CHIP_DISP->ssp_tm_req(pm8001_ha, ccb, tmf);
0353 }
0354
0355
0356
0357
0358
0359
0360 static int pm8001_task_prep_ssp(struct pm8001_hba_info *pm8001_ha,
0361 struct pm8001_ccb_info *ccb)
0362 {
0363 return PM8001_CHIP_DISP->ssp_io_req(pm8001_ha, ccb);
0364 }
0365
0366
0367 static int sas_find_local_port_id(struct domain_device *dev)
0368 {
0369 struct domain_device *pdev = dev->parent;
0370
0371
0372 if (!pdev)
0373 return dev->port->id;
0374 while (pdev) {
0375 struct domain_device *pdev_p = pdev->parent;
0376 if (!pdev_p)
0377 return pdev->port->id;
0378 pdev = pdev->parent;
0379 }
0380 return 0;
0381 }
0382
0383 #define DEV_IS_GONE(pm8001_dev) \
0384 ((!pm8001_dev || (pm8001_dev->dev_type == SAS_PHY_UNUSED)))
0385
0386
0387 static int pm8001_deliver_command(struct pm8001_hba_info *pm8001_ha,
0388 struct pm8001_ccb_info *ccb)
0389 {
0390 struct sas_task *task = ccb->task;
0391 enum sas_protocol task_proto = task->task_proto;
0392 struct sas_tmf_task *tmf = task->tmf;
0393 int is_tmf = !!tmf;
0394
0395 switch (task_proto) {
0396 case SAS_PROTOCOL_SMP:
0397 return pm8001_task_prep_smp(pm8001_ha, ccb);
0398 case SAS_PROTOCOL_SSP:
0399 if (is_tmf)
0400 return pm8001_task_prep_ssp_tm(pm8001_ha, ccb, tmf);
0401 return pm8001_task_prep_ssp(pm8001_ha, ccb);
0402 case SAS_PROTOCOL_SATA:
0403 case SAS_PROTOCOL_STP:
0404 return pm8001_task_prep_ata(pm8001_ha, ccb);
0405 case SAS_PROTOCOL_INTERNAL_ABORT:
0406 return pm8001_task_prep_internal_abort(pm8001_ha, ccb);
0407 default:
0408 dev_err(pm8001_ha->dev, "unknown sas_task proto: 0x%x\n",
0409 task_proto);
0410 }
0411
0412 return -EINVAL;
0413 }
0414
0415
0416
0417
0418
0419
0420
0421 int pm8001_queue_command(struct sas_task *task, gfp_t gfp_flags)
0422 {
0423 struct task_status_struct *ts = &task->task_status;
0424 enum sas_protocol task_proto = task->task_proto;
0425 struct domain_device *dev = task->dev;
0426 struct pm8001_device *pm8001_dev = dev->lldd_dev;
0427 bool internal_abort = sas_is_internal_abort(task);
0428 struct pm8001_hba_info *pm8001_ha;
0429 struct pm8001_port *port = NULL;
0430 struct pm8001_ccb_info *ccb;
0431 unsigned long flags;
0432 u32 n_elem = 0;
0433 int rc = 0;
0434
0435 if (!internal_abort && !dev->port) {
0436 ts->resp = SAS_TASK_UNDELIVERED;
0437 ts->stat = SAS_PHY_DOWN;
0438 if (dev->dev_type != SAS_SATA_DEV)
0439 task->task_done(task);
0440 return 0;
0441 }
0442
0443 pm8001_ha = pm8001_find_ha_by_dev(dev);
0444 if (pm8001_ha->controller_fatal_error) {
0445 ts->resp = SAS_TASK_UNDELIVERED;
0446 task->task_done(task);
0447 return 0;
0448 }
0449
0450 pm8001_dbg(pm8001_ha, IO, "pm8001_task_exec device\n");
0451
0452 spin_lock_irqsave(&pm8001_ha->lock, flags);
0453
0454 pm8001_dev = dev->lldd_dev;
0455 port = &pm8001_ha->port[sas_find_local_port_id(dev)];
0456
0457 if (!internal_abort &&
0458 (DEV_IS_GONE(pm8001_dev) || !port->port_attached)) {
0459 ts->resp = SAS_TASK_UNDELIVERED;
0460 ts->stat = SAS_PHY_DOWN;
0461 if (sas_protocol_ata(task_proto)) {
0462 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
0463 task->task_done(task);
0464 spin_lock_irqsave(&pm8001_ha->lock, flags);
0465 } else {
0466 task->task_done(task);
0467 }
0468 rc = -ENODEV;
0469 goto err_out;
0470 }
0471
0472 ccb = pm8001_ccb_alloc(pm8001_ha, pm8001_dev, task);
0473 if (!ccb) {
0474 rc = -SAS_QUEUE_FULL;
0475 goto err_out;
0476 }
0477
0478 if (!sas_protocol_ata(task_proto)) {
0479 if (task->num_scatter) {
0480 n_elem = dma_map_sg(pm8001_ha->dev, task->scatter,
0481 task->num_scatter, task->data_dir);
0482 if (!n_elem) {
0483 rc = -ENOMEM;
0484 goto err_out_ccb;
0485 }
0486 }
0487 } else {
0488 n_elem = task->num_scatter;
0489 }
0490
0491 task->lldd_task = ccb;
0492 ccb->n_elem = n_elem;
0493
0494 atomic_inc(&pm8001_dev->running_req);
0495
0496 rc = pm8001_deliver_command(pm8001_ha, ccb);
0497 if (rc) {
0498 atomic_dec(&pm8001_dev->running_req);
0499 if (!sas_protocol_ata(task_proto) && n_elem)
0500 dma_unmap_sg(pm8001_ha->dev, task->scatter,
0501 task->num_scatter, task->data_dir);
0502 err_out_ccb:
0503 pm8001_ccb_free(pm8001_ha, ccb);
0504
0505 err_out:
0506 pm8001_dbg(pm8001_ha, IO, "pm8001_task_exec failed[%d]!\n", rc);
0507 }
0508
0509 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
0510
0511 return rc;
0512 }
0513
0514
0515
0516
0517
0518
0519 void pm8001_ccb_task_free(struct pm8001_hba_info *pm8001_ha,
0520 struct pm8001_ccb_info *ccb)
0521 {
0522 struct sas_task *task = ccb->task;
0523 struct ata_queued_cmd *qc;
0524 struct pm8001_device *pm8001_dev;
0525
0526 if (!task)
0527 return;
0528
0529 if (!sas_protocol_ata(task->task_proto) && ccb->n_elem)
0530 dma_unmap_sg(pm8001_ha->dev, task->scatter,
0531 task->num_scatter, task->data_dir);
0532
0533 switch (task->task_proto) {
0534 case SAS_PROTOCOL_SMP:
0535 dma_unmap_sg(pm8001_ha->dev, &task->smp_task.smp_resp, 1,
0536 DMA_FROM_DEVICE);
0537 dma_unmap_sg(pm8001_ha->dev, &task->smp_task.smp_req, 1,
0538 DMA_TO_DEVICE);
0539 break;
0540
0541 case SAS_PROTOCOL_SATA:
0542 case SAS_PROTOCOL_STP:
0543 case SAS_PROTOCOL_SSP:
0544 default:
0545
0546 break;
0547 }
0548
0549 if (sas_protocol_ata(task->task_proto)) {
0550
0551 qc = task->uldd_task;
0552 pm8001_dev = ccb->device;
0553 trace_pm80xx_request_complete(pm8001_ha->id,
0554 pm8001_dev ? pm8001_dev->attached_phy : PM8001_MAX_PHYS,
0555 ccb->ccb_tag, 0 ,
0556 qc ? qc->tf.command : 0,
0557 pm8001_dev ? atomic_read(&pm8001_dev->running_req) : -1);
0558 }
0559
0560 task->lldd_task = NULL;
0561 pm8001_ccb_free(pm8001_ha, ccb);
0562 }
0563
0564
0565
0566
0567
0568 static struct pm8001_device *pm8001_alloc_dev(struct pm8001_hba_info *pm8001_ha)
0569 {
0570 u32 dev;
0571 for (dev = 0; dev < PM8001_MAX_DEVICES; dev++) {
0572 if (pm8001_ha->devices[dev].dev_type == SAS_PHY_UNUSED) {
0573 pm8001_ha->devices[dev].id = dev;
0574 return &pm8001_ha->devices[dev];
0575 }
0576 }
0577 if (dev == PM8001_MAX_DEVICES) {
0578 pm8001_dbg(pm8001_ha, FAIL,
0579 "max support %d devices, ignore ..\n",
0580 PM8001_MAX_DEVICES);
0581 }
0582 return NULL;
0583 }
0584
0585
0586
0587
0588
0589 struct pm8001_device *pm8001_find_dev(struct pm8001_hba_info *pm8001_ha,
0590 u32 device_id)
0591 {
0592 u32 dev;
0593 for (dev = 0; dev < PM8001_MAX_DEVICES; dev++) {
0594 if (pm8001_ha->devices[dev].device_id == device_id)
0595 return &pm8001_ha->devices[dev];
0596 }
0597 if (dev == PM8001_MAX_DEVICES) {
0598 pm8001_dbg(pm8001_ha, FAIL, "NO MATCHING DEVICE FOUND !!!\n");
0599 }
0600 return NULL;
0601 }
0602
0603 void pm8001_free_dev(struct pm8001_device *pm8001_dev)
0604 {
0605 u32 id = pm8001_dev->id;
0606 memset(pm8001_dev, 0, sizeof(*pm8001_dev));
0607 pm8001_dev->id = id;
0608 pm8001_dev->dev_type = SAS_PHY_UNUSED;
0609 pm8001_dev->device_id = PM8001_MAX_DEVICES;
0610 pm8001_dev->sas_device = NULL;
0611 }
0612
0613
0614
0615
0616
0617
0618
0619
0620
0621
0622
0623
0624
0625 static int pm8001_dev_found_notify(struct domain_device *dev)
0626 {
0627 unsigned long flags = 0;
0628 int res = 0;
0629 struct pm8001_hba_info *pm8001_ha = NULL;
0630 struct domain_device *parent_dev = dev->parent;
0631 struct pm8001_device *pm8001_device;
0632 DECLARE_COMPLETION_ONSTACK(completion);
0633 u32 flag = 0;
0634 pm8001_ha = pm8001_find_ha_by_dev(dev);
0635 spin_lock_irqsave(&pm8001_ha->lock, flags);
0636
0637 pm8001_device = pm8001_alloc_dev(pm8001_ha);
0638 if (!pm8001_device) {
0639 res = -1;
0640 goto found_out;
0641 }
0642 pm8001_device->sas_device = dev;
0643 dev->lldd_dev = pm8001_device;
0644 pm8001_device->dev_type = dev->dev_type;
0645 pm8001_device->dcompletion = &completion;
0646 if (parent_dev && dev_is_expander(parent_dev->dev_type)) {
0647 int phy_id;
0648 struct ex_phy *phy;
0649 for (phy_id = 0; phy_id < parent_dev->ex_dev.num_phys;
0650 phy_id++) {
0651 phy = &parent_dev->ex_dev.ex_phy[phy_id];
0652 if (SAS_ADDR(phy->attached_sas_addr)
0653 == SAS_ADDR(dev->sas_addr)) {
0654 pm8001_device->attached_phy = phy_id;
0655 break;
0656 }
0657 }
0658 if (phy_id == parent_dev->ex_dev.num_phys) {
0659 pm8001_dbg(pm8001_ha, FAIL,
0660 "Error: no attached dev:%016llx at ex:%016llx.\n",
0661 SAS_ADDR(dev->sas_addr),
0662 SAS_ADDR(parent_dev->sas_addr));
0663 res = -1;
0664 }
0665 } else {
0666 if (dev->dev_type == SAS_SATA_DEV) {
0667 pm8001_device->attached_phy =
0668 dev->rphy->identify.phy_identifier;
0669 flag = 1;
0670 }
0671 }
0672 pm8001_dbg(pm8001_ha, DISC, "Found device\n");
0673 PM8001_CHIP_DISP->reg_dev_req(pm8001_ha, pm8001_device, flag);
0674 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
0675 wait_for_completion(&completion);
0676 if (dev->dev_type == SAS_END_DEVICE)
0677 msleep(50);
0678 pm8001_ha->flags = PM8001F_RUN_TIME;
0679 return 0;
0680 found_out:
0681 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
0682 return res;
0683 }
0684
0685 int pm8001_dev_found(struct domain_device *dev)
0686 {
0687 return pm8001_dev_found_notify(dev);
0688 }
0689
0690 void pm8001_task_done(struct sas_task *task)
0691 {
0692 del_timer(&task->slow_task->timer);
0693 complete(&task->slow_task->completion);
0694 }
0695
0696 #define PM8001_TASK_TIMEOUT 20
0697
0698
0699
0700
0701
0702 static void pm8001_dev_gone_notify(struct domain_device *dev)
0703 {
0704 unsigned long flags = 0;
0705 struct pm8001_hba_info *pm8001_ha;
0706 struct pm8001_device *pm8001_dev = dev->lldd_dev;
0707
0708 pm8001_ha = pm8001_find_ha_by_dev(dev);
0709 spin_lock_irqsave(&pm8001_ha->lock, flags);
0710 if (pm8001_dev) {
0711 u32 device_id = pm8001_dev->device_id;
0712
0713 pm8001_dbg(pm8001_ha, DISC, "found dev[%d:%x] is gone.\n",
0714 pm8001_dev->device_id, pm8001_dev->dev_type);
0715 if (atomic_read(&pm8001_dev->running_req)) {
0716 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
0717 sas_execute_internal_abort_dev(dev, 0, NULL);
0718 while (atomic_read(&pm8001_dev->running_req))
0719 msleep(20);
0720 spin_lock_irqsave(&pm8001_ha->lock, flags);
0721 }
0722 PM8001_CHIP_DISP->dereg_dev_req(pm8001_ha, device_id);
0723 pm8001_free_dev(pm8001_dev);
0724 } else {
0725 pm8001_dbg(pm8001_ha, DISC, "Found dev has gone.\n");
0726 }
0727 dev->lldd_dev = NULL;
0728 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
0729 }
0730
0731 void pm8001_dev_gone(struct domain_device *dev)
0732 {
0733 pm8001_dev_gone_notify(dev);
0734 }
0735
0736
0737 void pm8001_open_reject_retry(
0738 struct pm8001_hba_info *pm8001_ha,
0739 struct sas_task *task_to_close,
0740 struct pm8001_device *device_to_close)
0741 {
0742 int i;
0743 unsigned long flags;
0744
0745 if (pm8001_ha == NULL)
0746 return;
0747
0748 spin_lock_irqsave(&pm8001_ha->lock, flags);
0749
0750 for (i = 0; i < PM8001_MAX_CCB; i++) {
0751 struct sas_task *task;
0752 struct task_status_struct *ts;
0753 struct pm8001_device *pm8001_dev;
0754 unsigned long flags1;
0755 struct pm8001_ccb_info *ccb = &pm8001_ha->ccb_info[i];
0756
0757 if (ccb->ccb_tag == PM8001_INVALID_TAG)
0758 continue;
0759
0760 pm8001_dev = ccb->device;
0761 if (!pm8001_dev || (pm8001_dev->dev_type == SAS_PHY_UNUSED))
0762 continue;
0763 if (!device_to_close) {
0764 uintptr_t d = (uintptr_t)pm8001_dev
0765 - (uintptr_t)&pm8001_ha->devices;
0766 if (((d % sizeof(*pm8001_dev)) != 0)
0767 || ((d / sizeof(*pm8001_dev)) >= PM8001_MAX_DEVICES))
0768 continue;
0769 } else if (pm8001_dev != device_to_close)
0770 continue;
0771 task = ccb->task;
0772 if (!task || !task->task_done)
0773 continue;
0774 if (task_to_close && (task != task_to_close))
0775 continue;
0776 ts = &task->task_status;
0777 ts->resp = SAS_TASK_COMPLETE;
0778
0779 ts->stat = SAS_OPEN_REJECT;
0780 ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
0781 if (pm8001_dev)
0782 atomic_dec(&pm8001_dev->running_req);
0783 spin_lock_irqsave(&task->task_state_lock, flags1);
0784 task->task_state_flags &= ~SAS_TASK_STATE_PENDING;
0785 task->task_state_flags |= SAS_TASK_STATE_DONE;
0786 if (unlikely((task->task_state_flags
0787 & SAS_TASK_STATE_ABORTED))) {
0788 spin_unlock_irqrestore(&task->task_state_lock,
0789 flags1);
0790 pm8001_ccb_task_free(pm8001_ha, ccb);
0791 } else {
0792 spin_unlock_irqrestore(&task->task_state_lock,
0793 flags1);
0794 pm8001_ccb_task_free(pm8001_ha, ccb);
0795 mb();
0796 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
0797 task->task_done(task);
0798 spin_lock_irqsave(&pm8001_ha->lock, flags);
0799 }
0800 }
0801
0802 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
0803 }
0804
0805
0806
0807
0808
0809
0810
0811
0812 int pm8001_I_T_nexus_reset(struct domain_device *dev)
0813 {
0814 int rc = TMF_RESP_FUNC_FAILED;
0815 struct pm8001_device *pm8001_dev;
0816 struct pm8001_hba_info *pm8001_ha;
0817 struct sas_phy *phy;
0818
0819 if (!dev || !dev->lldd_dev)
0820 return -ENODEV;
0821
0822 pm8001_dev = dev->lldd_dev;
0823 pm8001_ha = pm8001_find_ha_by_dev(dev);
0824 phy = sas_get_local_phy(dev);
0825
0826 if (dev_is_sata(dev)) {
0827 if (scsi_is_sas_phy_local(phy)) {
0828 rc = 0;
0829 goto out;
0830 }
0831 rc = sas_phy_reset(phy, 1);
0832 if (rc) {
0833 pm8001_dbg(pm8001_ha, EH,
0834 "phy reset failed for device %x\n"
0835 "with rc %d\n", pm8001_dev->device_id, rc);
0836 rc = TMF_RESP_FUNC_FAILED;
0837 goto out;
0838 }
0839 msleep(2000);
0840 rc = sas_execute_internal_abort_dev(dev, 0, NULL);
0841 if (rc) {
0842 pm8001_dbg(pm8001_ha, EH, "task abort failed %x\n"
0843 "with rc %d\n", pm8001_dev->device_id, rc);
0844 rc = TMF_RESP_FUNC_FAILED;
0845 }
0846 } else {
0847 rc = sas_phy_reset(phy, 1);
0848 msleep(2000);
0849 }
0850 pm8001_dbg(pm8001_ha, EH, " for device[%x]:rc=%d\n",
0851 pm8001_dev->device_id, rc);
0852 out:
0853 sas_put_local_phy(phy);
0854 return rc;
0855 }
0856
0857
0858
0859
0860
0861 int pm8001_I_T_nexus_event_handler(struct domain_device *dev)
0862 {
0863 int rc = TMF_RESP_FUNC_FAILED;
0864 struct pm8001_device *pm8001_dev;
0865 struct pm8001_hba_info *pm8001_ha;
0866 struct sas_phy *phy;
0867
0868 if (!dev || !dev->lldd_dev)
0869 return -1;
0870
0871 pm8001_dev = dev->lldd_dev;
0872 pm8001_ha = pm8001_find_ha_by_dev(dev);
0873
0874 pm8001_dbg(pm8001_ha, EH, "I_T_Nexus handler invoked !!\n");
0875
0876 phy = sas_get_local_phy(dev);
0877
0878 if (dev_is_sata(dev)) {
0879 DECLARE_COMPLETION_ONSTACK(completion_setstate);
0880 if (scsi_is_sas_phy_local(phy)) {
0881 rc = 0;
0882 goto out;
0883 }
0884
0885 sas_execute_internal_abort_dev(dev, 0, NULL);
0886 msleep(100);
0887
0888
0889 pm8001_dev_gone_notify(dev);
0890 msleep(200);
0891
0892
0893 rc = sas_phy_reset(phy, 1);
0894 msleep(2000);
0895 pm8001_dev->setds_completion = &completion_setstate;
0896
0897 wait_for_completion(&completion_setstate);
0898 } else {
0899
0900 sas_execute_internal_abort_dev(dev, 0, NULL);
0901 msleep(100);
0902
0903
0904 pm8001_dev_gone_notify(dev);
0905 msleep(200);
0906
0907
0908 rc = sas_phy_reset(phy, 1);
0909 msleep(2000);
0910 }
0911 pm8001_dbg(pm8001_ha, EH, " for device[%x]:rc=%d\n",
0912 pm8001_dev->device_id, rc);
0913 out:
0914 sas_put_local_phy(phy);
0915
0916 return rc;
0917 }
0918
0919 int pm8001_lu_reset(struct domain_device *dev, u8 *lun)
0920 {
0921 int rc = TMF_RESP_FUNC_FAILED;
0922 struct pm8001_device *pm8001_dev = dev->lldd_dev;
0923 struct pm8001_hba_info *pm8001_ha = pm8001_find_ha_by_dev(dev);
0924 DECLARE_COMPLETION_ONSTACK(completion_setstate);
0925 if (dev_is_sata(dev)) {
0926 struct sas_phy *phy = sas_get_local_phy(dev);
0927 sas_execute_internal_abort_dev(dev, 0, NULL);
0928 rc = sas_phy_reset(phy, 1);
0929 sas_put_local_phy(phy);
0930 pm8001_dev->setds_completion = &completion_setstate;
0931 rc = PM8001_CHIP_DISP->set_dev_state_req(pm8001_ha,
0932 pm8001_dev, DS_OPERATIONAL);
0933 wait_for_completion(&completion_setstate);
0934 } else {
0935 rc = sas_lu_reset(dev, lun);
0936 }
0937
0938 pm8001_dbg(pm8001_ha, EH, "for device[%x]:rc=%d\n",
0939 pm8001_dev->device_id, rc);
0940 return rc;
0941 }
0942
0943
0944 int pm8001_query_task(struct sas_task *task)
0945 {
0946 u32 tag = 0xdeadbeef;
0947 int rc = TMF_RESP_FUNC_FAILED;
0948 if (unlikely(!task || !task->lldd_task || !task->dev))
0949 return rc;
0950
0951 if (task->task_proto & SAS_PROTOCOL_SSP) {
0952 struct scsi_cmnd *cmnd = task->uldd_task;
0953 struct domain_device *dev = task->dev;
0954 struct pm8001_hba_info *pm8001_ha =
0955 pm8001_find_ha_by_dev(dev);
0956
0957 rc = pm8001_find_tag(task, &tag);
0958 if (rc == 0) {
0959 rc = TMF_RESP_FUNC_FAILED;
0960 return rc;
0961 }
0962 pm8001_dbg(pm8001_ha, EH, "Query:[%16ph]\n", cmnd->cmnd);
0963
0964 rc = sas_query_task(task, tag);
0965 switch (rc) {
0966
0967 case TMF_RESP_FUNC_SUCC:
0968 pm8001_dbg(pm8001_ha, EH,
0969 "The task is still in Lun\n");
0970 break;
0971
0972 case TMF_RESP_FUNC_FAILED:
0973 case TMF_RESP_FUNC_COMPLETE:
0974 pm8001_dbg(pm8001_ha, EH,
0975 "The task is not in Lun or failed, reset the phy\n");
0976 break;
0977 }
0978 }
0979 pr_err("pm80xx: rc= %d\n", rc);
0980 return rc;
0981 }
0982
0983
0984 int pm8001_abort_task(struct sas_task *task)
0985 {
0986 unsigned long flags;
0987 u32 tag;
0988 struct domain_device *dev ;
0989 struct pm8001_hba_info *pm8001_ha;
0990 struct pm8001_device *pm8001_dev;
0991 int rc = TMF_RESP_FUNC_FAILED, ret;
0992 u32 phy_id, port_id;
0993 struct sas_task_slow slow_task;
0994
0995 if (unlikely(!task || !task->lldd_task || !task->dev))
0996 return TMF_RESP_FUNC_FAILED;
0997
0998 dev = task->dev;
0999 pm8001_dev = dev->lldd_dev;
1000 pm8001_ha = pm8001_find_ha_by_dev(dev);
1001 phy_id = pm8001_dev->attached_phy;
1002
1003 if (PM8001_CHIP_DISP->fatal_errors(pm8001_ha)) {
1004
1005
1006 return TMF_RESP_FUNC_FAILED;
1007 }
1008
1009 ret = pm8001_find_tag(task, &tag);
1010 if (ret == 0) {
1011 pm8001_info(pm8001_ha, "no tag for task:%p\n", task);
1012 return TMF_RESP_FUNC_FAILED;
1013 }
1014 spin_lock_irqsave(&task->task_state_lock, flags);
1015 if (task->task_state_flags & SAS_TASK_STATE_DONE) {
1016 spin_unlock_irqrestore(&task->task_state_lock, flags);
1017 return TMF_RESP_FUNC_COMPLETE;
1018 }
1019 task->task_state_flags |= SAS_TASK_STATE_ABORTED;
1020 if (task->slow_task == NULL) {
1021 init_completion(&slow_task.completion);
1022 task->slow_task = &slow_task;
1023 }
1024 spin_unlock_irqrestore(&task->task_state_lock, flags);
1025 if (task->task_proto & SAS_PROTOCOL_SSP) {
1026 rc = sas_abort_task(task, tag);
1027 sas_execute_internal_abort_single(dev, tag, 0, NULL);
1028 } else if (task->task_proto & SAS_PROTOCOL_SATA ||
1029 task->task_proto & SAS_PROTOCOL_STP) {
1030 if (pm8001_ha->chip_id == chip_8006) {
1031 DECLARE_COMPLETION_ONSTACK(completion_reset);
1032 DECLARE_COMPLETION_ONSTACK(completion);
1033 struct pm8001_phy *phy = pm8001_ha->phy + phy_id;
1034 port_id = phy->port->port_id;
1035
1036
1037 pm8001_dev->setds_completion = &completion;
1038 PM8001_CHIP_DISP->set_dev_state_req(pm8001_ha,
1039 pm8001_dev, DS_IN_RECOVERY);
1040 wait_for_completion(&completion);
1041
1042
1043 reinit_completion(&completion);
1044 phy->port_reset_status = PORT_RESET_TMO;
1045 phy->reset_success = false;
1046 phy->enable_completion = &completion;
1047 phy->reset_completion = &completion_reset;
1048 ret = PM8001_CHIP_DISP->phy_ctl_req(pm8001_ha, phy_id,
1049 PHY_HARD_RESET);
1050 if (ret) {
1051 phy->enable_completion = NULL;
1052 phy->reset_completion = NULL;
1053 goto out;
1054 }
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064 pm8001_dbg(pm8001_ha, MSG,
1065 "Waiting for local phy ctl\n");
1066 ret = wait_for_completion_timeout(&completion,
1067 PM8001_TASK_TIMEOUT * HZ);
1068 if (!ret || !phy->reset_success) {
1069 phy->enable_completion = NULL;
1070 phy->reset_completion = NULL;
1071 } else {
1072
1073
1074
1075 pm8001_dbg(pm8001_ha, MSG,
1076 "Waiting for Port reset\n");
1077 ret = wait_for_completion_timeout(
1078 &completion_reset,
1079 PM8001_TASK_TIMEOUT * HZ);
1080 if (!ret)
1081 phy->reset_completion = NULL;
1082 WARN_ON(phy->port_reset_status ==
1083 PORT_RESET_TMO);
1084 if (phy->port_reset_status == PORT_RESET_TMO) {
1085 pm8001_dev_gone_notify(dev);
1086 PM8001_CHIP_DISP->hw_event_ack_req(
1087 pm8001_ha, 0,
1088 0x07,
1089 port_id, phy_id, 0, 0);
1090 goto out;
1091 }
1092 }
1093
1094
1095
1096
1097
1098
1099
1100 ret = sas_execute_internal_abort_dev(dev, 0, NULL);
1101 if (ret)
1102 goto out;
1103 ret = wait_for_completion_timeout(
1104 &task->slow_task->completion,
1105 PM8001_TASK_TIMEOUT * HZ);
1106 if (!ret)
1107 goto out;
1108
1109
1110 reinit_completion(&completion);
1111 pm8001_dev->setds_completion = &completion;
1112 PM8001_CHIP_DISP->set_dev_state_req(pm8001_ha,
1113 pm8001_dev, DS_OPERATIONAL);
1114 wait_for_completion(&completion);
1115 } else {
1116 ret = sas_execute_internal_abort_single(dev, tag, 0, NULL);
1117 }
1118 rc = TMF_RESP_FUNC_COMPLETE;
1119 } else if (task->task_proto & SAS_PROTOCOL_SMP) {
1120
1121 rc = sas_execute_internal_abort_single(dev, tag, 0, NULL);
1122
1123 }
1124 out:
1125 spin_lock_irqsave(&task->task_state_lock, flags);
1126 if (task->slow_task == &slow_task)
1127 task->slow_task = NULL;
1128 spin_unlock_irqrestore(&task->task_state_lock, flags);
1129 if (rc != TMF_RESP_FUNC_COMPLETE)
1130 pm8001_info(pm8001_ha, "rc= %d\n", rc);
1131 return rc;
1132 }
1133
1134 int pm8001_clear_task_set(struct domain_device *dev, u8 *lun)
1135 {
1136 struct pm8001_device *pm8001_dev = dev->lldd_dev;
1137 struct pm8001_hba_info *pm8001_ha = pm8001_find_ha_by_dev(dev);
1138
1139 pm8001_dbg(pm8001_ha, EH, "I_T_L_Q clear task set[%x]\n",
1140 pm8001_dev->device_id);
1141 return sas_clear_task_set(dev, lun);
1142 }
1143
1144 void pm8001_port_formed(struct asd_sas_phy *sas_phy)
1145 {
1146 struct sas_ha_struct *sas_ha = sas_phy->ha;
1147 struct pm8001_hba_info *pm8001_ha = sas_ha->lldd_ha;
1148 struct pm8001_phy *phy = sas_phy->lldd_phy;
1149 struct asd_sas_port *sas_port = sas_phy->port;
1150 struct pm8001_port *port = phy->port;
1151
1152 if (!sas_port) {
1153 pm8001_dbg(pm8001_ha, FAIL, "Received null port\n");
1154 return;
1155 }
1156 sas_port->lldd_port = port;
1157 }
1158
1159 void pm8001_setds_completion(struct domain_device *dev)
1160 {
1161 struct pm8001_hba_info *pm8001_ha = pm8001_find_ha_by_dev(dev);
1162 struct pm8001_device *pm8001_dev = dev->lldd_dev;
1163 DECLARE_COMPLETION_ONSTACK(completion_setstate);
1164
1165 if (pm8001_ha->chip_id != chip_8001) {
1166 pm8001_dev->setds_completion = &completion_setstate;
1167 PM8001_CHIP_DISP->set_dev_state_req(pm8001_ha,
1168 pm8001_dev, DS_OPERATIONAL);
1169 wait_for_completion(&completion_setstate);
1170 }
1171 }
1172
1173 void pm8001_tmf_aborted(struct sas_task *task)
1174 {
1175 struct pm8001_ccb_info *ccb = task->lldd_task;
1176
1177 if (ccb)
1178 ccb->task = NULL;
1179 }