0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015 #define dev_fmt(fmt) "pciehp: " fmt
0016
0017 #include <linux/dmi.h>
0018 #include <linux/kernel.h>
0019 #include <linux/types.h>
0020 #include <linux/jiffies.h>
0021 #include <linux/kthread.h>
0022 #include <linux/pci.h>
0023 #include <linux/pm_runtime.h>
0024 #include <linux/interrupt.h>
0025 #include <linux/slab.h>
0026
0027 #include "../pci.h"
0028 #include "pciehp.h"
0029
0030 static const struct dmi_system_id inband_presence_disabled_dmi_table[] = {
0031
0032
0033
0034
0035
0036
0037
0038
0039 {
0040 .ident = "Dell System",
0041 .matches = {
0042 DMI_MATCH(DMI_OEM_STRING, "Dell System"),
0043 },
0044 },
0045 {}
0046 };
0047
0048 static inline struct pci_dev *ctrl_dev(struct controller *ctrl)
0049 {
0050 return ctrl->pcie->port;
0051 }
0052
0053 static irqreturn_t pciehp_isr(int irq, void *dev_id);
0054 static irqreturn_t pciehp_ist(int irq, void *dev_id);
0055 static int pciehp_poll(void *data);
0056
0057 static inline int pciehp_request_irq(struct controller *ctrl)
0058 {
0059 int retval, irq = ctrl->pcie->irq;
0060
0061 if (pciehp_poll_mode) {
0062 ctrl->poll_thread = kthread_run(&pciehp_poll, ctrl,
0063 "pciehp_poll-%s",
0064 slot_name(ctrl));
0065 return PTR_ERR_OR_ZERO(ctrl->poll_thread);
0066 }
0067
0068
0069 retval = request_threaded_irq(irq, pciehp_isr, pciehp_ist,
0070 IRQF_SHARED, "pciehp", ctrl);
0071 if (retval)
0072 ctrl_err(ctrl, "Cannot get irq %d for the hotplug controller\n",
0073 irq);
0074 return retval;
0075 }
0076
0077 static inline void pciehp_free_irq(struct controller *ctrl)
0078 {
0079 if (pciehp_poll_mode)
0080 kthread_stop(ctrl->poll_thread);
0081 else
0082 free_irq(ctrl->pcie->irq, ctrl);
0083 }
0084
0085 static int pcie_poll_cmd(struct controller *ctrl, int timeout)
0086 {
0087 struct pci_dev *pdev = ctrl_dev(ctrl);
0088 u16 slot_status;
0089
0090 do {
0091 pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status);
0092 if (PCI_POSSIBLE_ERROR(slot_status)) {
0093 ctrl_info(ctrl, "%s: no response from device\n",
0094 __func__);
0095 return 0;
0096 }
0097
0098 if (slot_status & PCI_EXP_SLTSTA_CC) {
0099 pcie_capability_write_word(pdev, PCI_EXP_SLTSTA,
0100 PCI_EXP_SLTSTA_CC);
0101 ctrl->cmd_busy = 0;
0102 smp_mb();
0103 return 1;
0104 }
0105 msleep(10);
0106 timeout -= 10;
0107 } while (timeout >= 0);
0108 return 0;
0109 }
0110
0111 static void pcie_wait_cmd(struct controller *ctrl)
0112 {
0113 unsigned int msecs = pciehp_poll_mode ? 2500 : 1000;
0114 unsigned long duration = msecs_to_jiffies(msecs);
0115 unsigned long cmd_timeout = ctrl->cmd_started + duration;
0116 unsigned long now, timeout;
0117 int rc;
0118
0119
0120
0121
0122
0123 if (NO_CMD_CMPL(ctrl))
0124 return;
0125
0126 if (!ctrl->cmd_busy)
0127 return;
0128
0129
0130
0131
0132
0133 now = jiffies;
0134 if (time_before_eq(cmd_timeout, now))
0135 timeout = 1;
0136 else
0137 timeout = cmd_timeout - now;
0138
0139 if (ctrl->slot_ctrl & PCI_EXP_SLTCTL_HPIE &&
0140 ctrl->slot_ctrl & PCI_EXP_SLTCTL_CCIE)
0141 rc = wait_event_timeout(ctrl->queue, !ctrl->cmd_busy, timeout);
0142 else
0143 rc = pcie_poll_cmd(ctrl, jiffies_to_msecs(timeout));
0144
0145 if (!rc)
0146 ctrl_info(ctrl, "Timeout on hotplug command %#06x (issued %u msec ago)\n",
0147 ctrl->slot_ctrl,
0148 jiffies_to_msecs(jiffies - ctrl->cmd_started));
0149 }
0150
0151 #define CC_ERRATUM_MASK (PCI_EXP_SLTCTL_PCC | \
0152 PCI_EXP_SLTCTL_PIC | \
0153 PCI_EXP_SLTCTL_AIC | \
0154 PCI_EXP_SLTCTL_EIC)
0155
0156 static void pcie_do_write_cmd(struct controller *ctrl, u16 cmd,
0157 u16 mask, bool wait)
0158 {
0159 struct pci_dev *pdev = ctrl_dev(ctrl);
0160 u16 slot_ctrl_orig, slot_ctrl;
0161
0162 mutex_lock(&ctrl->ctrl_lock);
0163
0164
0165
0166
0167 pcie_wait_cmd(ctrl);
0168
0169 pcie_capability_read_word(pdev, PCI_EXP_SLTCTL, &slot_ctrl);
0170 if (PCI_POSSIBLE_ERROR(slot_ctrl)) {
0171 ctrl_info(ctrl, "%s: no response from device\n", __func__);
0172 goto out;
0173 }
0174
0175 slot_ctrl_orig = slot_ctrl;
0176 slot_ctrl &= ~mask;
0177 slot_ctrl |= (cmd & mask);
0178 ctrl->cmd_busy = 1;
0179 smp_mb();
0180 ctrl->slot_ctrl = slot_ctrl;
0181 pcie_capability_write_word(pdev, PCI_EXP_SLTCTL, slot_ctrl);
0182 ctrl->cmd_started = jiffies;
0183
0184
0185
0186
0187
0188
0189
0190
0191 if (pdev->broken_cmd_compl &&
0192 (slot_ctrl_orig & CC_ERRATUM_MASK) == (slot_ctrl & CC_ERRATUM_MASK))
0193 ctrl->cmd_busy = 0;
0194
0195
0196
0197
0198
0199 if (wait)
0200 pcie_wait_cmd(ctrl);
0201
0202 out:
0203 mutex_unlock(&ctrl->ctrl_lock);
0204 }
0205
0206
0207
0208
0209
0210
0211
0212 static void pcie_write_cmd(struct controller *ctrl, u16 cmd, u16 mask)
0213 {
0214 pcie_do_write_cmd(ctrl, cmd, mask, true);
0215 }
0216
0217
0218 static void pcie_write_cmd_nowait(struct controller *ctrl, u16 cmd, u16 mask)
0219 {
0220 pcie_do_write_cmd(ctrl, cmd, mask, false);
0221 }
0222
0223
0224
0225
0226
0227
0228
0229
0230
0231
0232
0233
0234 int pciehp_check_link_active(struct controller *ctrl)
0235 {
0236 struct pci_dev *pdev = ctrl_dev(ctrl);
0237 u16 lnk_status;
0238 int ret;
0239
0240 ret = pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &lnk_status);
0241 if (ret == PCIBIOS_DEVICE_NOT_FOUND || PCI_POSSIBLE_ERROR(lnk_status))
0242 return -ENODEV;
0243
0244 ret = !!(lnk_status & PCI_EXP_LNKSTA_DLLLA);
0245 ctrl_dbg(ctrl, "%s: lnk_status = %x\n", __func__, lnk_status);
0246
0247 return ret;
0248 }
0249
0250 static bool pci_bus_check_dev(struct pci_bus *bus, int devfn)
0251 {
0252 u32 l;
0253 int count = 0;
0254 int delay = 1000, step = 20;
0255 bool found = false;
0256
0257 do {
0258 found = pci_bus_read_dev_vendor_id(bus, devfn, &l, 0);
0259 count++;
0260
0261 if (found)
0262 break;
0263
0264 msleep(step);
0265 delay -= step;
0266 } while (delay > 0);
0267
0268 if (count > 1)
0269 pr_debug("pci %04x:%02x:%02x.%d id reading try %d times with interval %d ms to get %08x\n",
0270 pci_domain_nr(bus), bus->number, PCI_SLOT(devfn),
0271 PCI_FUNC(devfn), count, step, l);
0272
0273 return found;
0274 }
0275
0276 static void pcie_wait_for_presence(struct pci_dev *pdev)
0277 {
0278 int timeout = 1250;
0279 u16 slot_status;
0280
0281 do {
0282 pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status);
0283 if (slot_status & PCI_EXP_SLTSTA_PDS)
0284 return;
0285 msleep(10);
0286 timeout -= 10;
0287 } while (timeout > 0);
0288 }
0289
0290 int pciehp_check_link_status(struct controller *ctrl)
0291 {
0292 struct pci_dev *pdev = ctrl_dev(ctrl);
0293 bool found;
0294 u16 lnk_status;
0295
0296 if (!pcie_wait_for_link(pdev, true)) {
0297 ctrl_info(ctrl, "Slot(%s): No link\n", slot_name(ctrl));
0298 return -1;
0299 }
0300
0301 if (ctrl->inband_presence_disabled)
0302 pcie_wait_for_presence(pdev);
0303
0304 found = pci_bus_check_dev(ctrl->pcie->port->subordinate,
0305 PCI_DEVFN(0, 0));
0306
0307
0308 if (found)
0309 atomic_and(~(PCI_EXP_SLTSTA_DLLSC | PCI_EXP_SLTSTA_PDC),
0310 &ctrl->pending_events);
0311
0312 pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &lnk_status);
0313 ctrl_dbg(ctrl, "%s: lnk_status = %x\n", __func__, lnk_status);
0314 if ((lnk_status & PCI_EXP_LNKSTA_LT) ||
0315 !(lnk_status & PCI_EXP_LNKSTA_NLW)) {
0316 ctrl_info(ctrl, "Slot(%s): Cannot train link: status %#06x\n",
0317 slot_name(ctrl), lnk_status);
0318 return -1;
0319 }
0320
0321 pcie_update_link_speed(ctrl->pcie->port->subordinate, lnk_status);
0322
0323 if (!found) {
0324 ctrl_info(ctrl, "Slot(%s): No device found\n",
0325 slot_name(ctrl));
0326 return -1;
0327 }
0328
0329 return 0;
0330 }
0331
0332 static int __pciehp_link_set(struct controller *ctrl, bool enable)
0333 {
0334 struct pci_dev *pdev = ctrl_dev(ctrl);
0335 u16 lnk_ctrl;
0336
0337 pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &lnk_ctrl);
0338
0339 if (enable)
0340 lnk_ctrl &= ~PCI_EXP_LNKCTL_LD;
0341 else
0342 lnk_ctrl |= PCI_EXP_LNKCTL_LD;
0343
0344 pcie_capability_write_word(pdev, PCI_EXP_LNKCTL, lnk_ctrl);
0345 ctrl_dbg(ctrl, "%s: lnk_ctrl = %x\n", __func__, lnk_ctrl);
0346 return 0;
0347 }
0348
0349 static int pciehp_link_enable(struct controller *ctrl)
0350 {
0351 return __pciehp_link_set(ctrl, true);
0352 }
0353
0354 int pciehp_get_raw_indicator_status(struct hotplug_slot *hotplug_slot,
0355 u8 *status)
0356 {
0357 struct controller *ctrl = to_ctrl(hotplug_slot);
0358 struct pci_dev *pdev = ctrl_dev(ctrl);
0359 u16 slot_ctrl;
0360
0361 pci_config_pm_runtime_get(pdev);
0362 pcie_capability_read_word(pdev, PCI_EXP_SLTCTL, &slot_ctrl);
0363 pci_config_pm_runtime_put(pdev);
0364 *status = (slot_ctrl & (PCI_EXP_SLTCTL_AIC | PCI_EXP_SLTCTL_PIC)) >> 6;
0365 return 0;
0366 }
0367
0368 int pciehp_get_attention_status(struct hotplug_slot *hotplug_slot, u8 *status)
0369 {
0370 struct controller *ctrl = to_ctrl(hotplug_slot);
0371 struct pci_dev *pdev = ctrl_dev(ctrl);
0372 u16 slot_ctrl;
0373
0374 pci_config_pm_runtime_get(pdev);
0375 pcie_capability_read_word(pdev, PCI_EXP_SLTCTL, &slot_ctrl);
0376 pci_config_pm_runtime_put(pdev);
0377 ctrl_dbg(ctrl, "%s: SLOTCTRL %x, value read %x\n", __func__,
0378 pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, slot_ctrl);
0379
0380 switch (slot_ctrl & PCI_EXP_SLTCTL_AIC) {
0381 case PCI_EXP_SLTCTL_ATTN_IND_ON:
0382 *status = 1;
0383 break;
0384 case PCI_EXP_SLTCTL_ATTN_IND_BLINK:
0385 *status = 2;
0386 break;
0387 case PCI_EXP_SLTCTL_ATTN_IND_OFF:
0388 *status = 0;
0389 break;
0390 default:
0391 *status = 0xFF;
0392 break;
0393 }
0394
0395 return 0;
0396 }
0397
0398 void pciehp_get_power_status(struct controller *ctrl, u8 *status)
0399 {
0400 struct pci_dev *pdev = ctrl_dev(ctrl);
0401 u16 slot_ctrl;
0402
0403 pcie_capability_read_word(pdev, PCI_EXP_SLTCTL, &slot_ctrl);
0404 ctrl_dbg(ctrl, "%s: SLOTCTRL %x value read %x\n", __func__,
0405 pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, slot_ctrl);
0406
0407 switch (slot_ctrl & PCI_EXP_SLTCTL_PCC) {
0408 case PCI_EXP_SLTCTL_PWR_ON:
0409 *status = 1;
0410 break;
0411 case PCI_EXP_SLTCTL_PWR_OFF:
0412 *status = 0;
0413 break;
0414 default:
0415 *status = 0xFF;
0416 break;
0417 }
0418 }
0419
0420 void pciehp_get_latch_status(struct controller *ctrl, u8 *status)
0421 {
0422 struct pci_dev *pdev = ctrl_dev(ctrl);
0423 u16 slot_status;
0424
0425 pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status);
0426 *status = !!(slot_status & PCI_EXP_SLTSTA_MRLSS);
0427 }
0428
0429
0430
0431
0432
0433
0434
0435
0436
0437
0438
0439
0440
0441 int pciehp_card_present(struct controller *ctrl)
0442 {
0443 struct pci_dev *pdev = ctrl_dev(ctrl);
0444 u16 slot_status;
0445 int ret;
0446
0447 ret = pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status);
0448 if (ret == PCIBIOS_DEVICE_NOT_FOUND || PCI_POSSIBLE_ERROR(slot_status))
0449 return -ENODEV;
0450
0451 return !!(slot_status & PCI_EXP_SLTSTA_PDS);
0452 }
0453
0454
0455
0456
0457
0458
0459
0460
0461
0462
0463
0464
0465
0466 int pciehp_card_present_or_link_active(struct controller *ctrl)
0467 {
0468 int ret;
0469
0470 ret = pciehp_card_present(ctrl);
0471 if (ret)
0472 return ret;
0473
0474 return pciehp_check_link_active(ctrl);
0475 }
0476
0477 int pciehp_query_power_fault(struct controller *ctrl)
0478 {
0479 struct pci_dev *pdev = ctrl_dev(ctrl);
0480 u16 slot_status;
0481
0482 pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status);
0483 return !!(slot_status & PCI_EXP_SLTSTA_PFD);
0484 }
0485
0486 int pciehp_set_raw_indicator_status(struct hotplug_slot *hotplug_slot,
0487 u8 status)
0488 {
0489 struct controller *ctrl = to_ctrl(hotplug_slot);
0490 struct pci_dev *pdev = ctrl_dev(ctrl);
0491
0492 pci_config_pm_runtime_get(pdev);
0493 pcie_write_cmd_nowait(ctrl, status << 6,
0494 PCI_EXP_SLTCTL_AIC | PCI_EXP_SLTCTL_PIC);
0495 pci_config_pm_runtime_put(pdev);
0496 return 0;
0497 }
0498
0499
0500
0501
0502
0503
0504
0505
0506
0507
0508
0509
0510
0511
0512
0513
0514 void pciehp_set_indicators(struct controller *ctrl, int pwr, int attn)
0515 {
0516 u16 cmd = 0, mask = 0;
0517
0518 if (PWR_LED(ctrl) && pwr != INDICATOR_NOOP) {
0519 cmd |= (pwr & PCI_EXP_SLTCTL_PIC);
0520 mask |= PCI_EXP_SLTCTL_PIC;
0521 }
0522
0523 if (ATTN_LED(ctrl) && attn != INDICATOR_NOOP) {
0524 cmd |= (attn & PCI_EXP_SLTCTL_AIC);
0525 mask |= PCI_EXP_SLTCTL_AIC;
0526 }
0527
0528 if (cmd) {
0529 pcie_write_cmd_nowait(ctrl, cmd, mask);
0530 ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
0531 pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, cmd);
0532 }
0533 }
0534
0535 int pciehp_power_on_slot(struct controller *ctrl)
0536 {
0537 struct pci_dev *pdev = ctrl_dev(ctrl);
0538 u16 slot_status;
0539 int retval;
0540
0541
0542 pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status);
0543 if (slot_status & PCI_EXP_SLTSTA_PFD)
0544 pcie_capability_write_word(pdev, PCI_EXP_SLTSTA,
0545 PCI_EXP_SLTSTA_PFD);
0546 ctrl->power_fault_detected = 0;
0547
0548 pcie_write_cmd(ctrl, PCI_EXP_SLTCTL_PWR_ON, PCI_EXP_SLTCTL_PCC);
0549 ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
0550 pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL,
0551 PCI_EXP_SLTCTL_PWR_ON);
0552
0553 retval = pciehp_link_enable(ctrl);
0554 if (retval)
0555 ctrl_err(ctrl, "%s: Can not enable the link!\n", __func__);
0556
0557 return retval;
0558 }
0559
0560 void pciehp_power_off_slot(struct controller *ctrl)
0561 {
0562 pcie_write_cmd(ctrl, PCI_EXP_SLTCTL_PWR_OFF, PCI_EXP_SLTCTL_PCC);
0563 ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
0564 pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL,
0565 PCI_EXP_SLTCTL_PWR_OFF);
0566 }
0567
0568 static void pciehp_ignore_dpc_link_change(struct controller *ctrl,
0569 struct pci_dev *pdev, int irq)
0570 {
0571
0572
0573
0574
0575 synchronize_hardirq(irq);
0576 atomic_and(~PCI_EXP_SLTSTA_DLLSC, &ctrl->pending_events);
0577 if (pciehp_poll_mode)
0578 pcie_capability_write_word(pdev, PCI_EXP_SLTSTA,
0579 PCI_EXP_SLTSTA_DLLSC);
0580 ctrl_info(ctrl, "Slot(%s): Link Down/Up ignored (recovered by DPC)\n",
0581 slot_name(ctrl));
0582
0583
0584
0585
0586
0587
0588 down_read_nested(&ctrl->reset_lock, ctrl->depth);
0589 if (!pciehp_check_link_active(ctrl))
0590 pciehp_request(ctrl, PCI_EXP_SLTSTA_DLLSC);
0591 up_read(&ctrl->reset_lock);
0592 }
0593
0594 static irqreturn_t pciehp_isr(int irq, void *dev_id)
0595 {
0596 struct controller *ctrl = (struct controller *)dev_id;
0597 struct pci_dev *pdev = ctrl_dev(ctrl);
0598 struct device *parent = pdev->dev.parent;
0599 u16 status, events = 0;
0600
0601
0602
0603
0604
0605 if (pdev->current_state == PCI_D3cold ||
0606 (!(ctrl->slot_ctrl & PCI_EXP_SLTCTL_HPIE) && !pciehp_poll_mode))
0607 return IRQ_NONE;
0608
0609
0610
0611
0612
0613
0614 if (parent) {
0615 pm_runtime_get_noresume(parent);
0616 if (!pm_runtime_active(parent)) {
0617 pm_runtime_put(parent);
0618 disable_irq_nosync(irq);
0619 atomic_or(RERUN_ISR, &ctrl->pending_events);
0620 return IRQ_WAKE_THREAD;
0621 }
0622 }
0623
0624 read_status:
0625 pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &status);
0626 if (PCI_POSSIBLE_ERROR(status)) {
0627 ctrl_info(ctrl, "%s: no response from device\n", __func__);
0628 if (parent)
0629 pm_runtime_put(parent);
0630 return IRQ_NONE;
0631 }
0632
0633
0634
0635
0636
0637 status &= PCI_EXP_SLTSTA_ABP | PCI_EXP_SLTSTA_PFD |
0638 PCI_EXP_SLTSTA_PDC | PCI_EXP_SLTSTA_CC |
0639 PCI_EXP_SLTSTA_DLLSC;
0640
0641
0642
0643
0644
0645 if (ctrl->power_fault_detected)
0646 status &= ~PCI_EXP_SLTSTA_PFD;
0647 else if (status & PCI_EXP_SLTSTA_PFD)
0648 ctrl->power_fault_detected = true;
0649
0650 events |= status;
0651 if (!events) {
0652 if (parent)
0653 pm_runtime_put(parent);
0654 return IRQ_NONE;
0655 }
0656
0657 if (status) {
0658 pcie_capability_write_word(pdev, PCI_EXP_SLTSTA, status);
0659
0660
0661
0662
0663
0664
0665
0666 if (pci_dev_msi_enabled(pdev) && !pciehp_poll_mode)
0667 goto read_status;
0668 }
0669
0670 ctrl_dbg(ctrl, "pending interrupts %#06x from Slot Status\n", events);
0671 if (parent)
0672 pm_runtime_put(parent);
0673
0674
0675
0676
0677
0678 if (events & PCI_EXP_SLTSTA_CC) {
0679 ctrl->cmd_busy = 0;
0680 smp_mb();
0681 wake_up(&ctrl->queue);
0682
0683 if (events == PCI_EXP_SLTSTA_CC)
0684 return IRQ_HANDLED;
0685
0686 events &= ~PCI_EXP_SLTSTA_CC;
0687 }
0688
0689 if (pdev->ignore_hotplug) {
0690 ctrl_dbg(ctrl, "ignoring hotplug event %#06x\n", events);
0691 return IRQ_HANDLED;
0692 }
0693
0694
0695 atomic_or(events, &ctrl->pending_events);
0696 return IRQ_WAKE_THREAD;
0697 }
0698
0699 static irqreturn_t pciehp_ist(int irq, void *dev_id)
0700 {
0701 struct controller *ctrl = (struct controller *)dev_id;
0702 struct pci_dev *pdev = ctrl_dev(ctrl);
0703 irqreturn_t ret;
0704 u32 events;
0705
0706 ctrl->ist_running = true;
0707 pci_config_pm_runtime_get(pdev);
0708
0709
0710 if (atomic_fetch_and(~RERUN_ISR, &ctrl->pending_events) & RERUN_ISR) {
0711 ret = pciehp_isr(irq, dev_id);
0712 enable_irq(irq);
0713 if (ret != IRQ_WAKE_THREAD)
0714 goto out;
0715 }
0716
0717 synchronize_hardirq(irq);
0718 events = atomic_xchg(&ctrl->pending_events, 0);
0719 if (!events) {
0720 ret = IRQ_NONE;
0721 goto out;
0722 }
0723
0724
0725 if (events & PCI_EXP_SLTSTA_ABP) {
0726 ctrl_info(ctrl, "Slot(%s): Attention button pressed\n",
0727 slot_name(ctrl));
0728 pciehp_handle_button_press(ctrl);
0729 }
0730
0731
0732 if (events & PCI_EXP_SLTSTA_PFD) {
0733 ctrl_err(ctrl, "Slot(%s): Power fault\n", slot_name(ctrl));
0734 pciehp_set_indicators(ctrl, PCI_EXP_SLTCTL_PWR_IND_OFF,
0735 PCI_EXP_SLTCTL_ATTN_IND_ON);
0736 }
0737
0738
0739
0740
0741
0742 if ((events & PCI_EXP_SLTSTA_DLLSC) && pci_dpc_recovered(pdev) &&
0743 ctrl->state == ON_STATE) {
0744 events &= ~PCI_EXP_SLTSTA_DLLSC;
0745 pciehp_ignore_dpc_link_change(ctrl, pdev, irq);
0746 }
0747
0748
0749
0750
0751
0752 down_read_nested(&ctrl->reset_lock, ctrl->depth);
0753 if (events & DISABLE_SLOT)
0754 pciehp_handle_disable_request(ctrl);
0755 else if (events & (PCI_EXP_SLTSTA_PDC | PCI_EXP_SLTSTA_DLLSC))
0756 pciehp_handle_presence_or_link_change(ctrl, events);
0757 up_read(&ctrl->reset_lock);
0758
0759 ret = IRQ_HANDLED;
0760 out:
0761 pci_config_pm_runtime_put(pdev);
0762 ctrl->ist_running = false;
0763 wake_up(&ctrl->requester);
0764 return ret;
0765 }
0766
0767 static int pciehp_poll(void *data)
0768 {
0769 struct controller *ctrl = data;
0770
0771 schedule_timeout_idle(10 * HZ);
0772
0773 while (!kthread_should_stop()) {
0774
0775 while (pciehp_isr(IRQ_NOTCONNECTED, ctrl) == IRQ_WAKE_THREAD ||
0776 atomic_read(&ctrl->pending_events))
0777 pciehp_ist(IRQ_NOTCONNECTED, ctrl);
0778
0779 if (pciehp_poll_time <= 0 || pciehp_poll_time > 60)
0780 pciehp_poll_time = 2;
0781
0782 schedule_timeout_idle(pciehp_poll_time * HZ);
0783 }
0784
0785 return 0;
0786 }
0787
0788 static void pcie_enable_notification(struct controller *ctrl)
0789 {
0790 u16 cmd, mask;
0791
0792
0793
0794
0795
0796
0797
0798
0799
0800
0801
0802
0803
0804
0805
0806
0807
0808 cmd = PCI_EXP_SLTCTL_DLLSCE;
0809 if (ATTN_BUTTN(ctrl))
0810 cmd |= PCI_EXP_SLTCTL_ABPE;
0811 else
0812 cmd |= PCI_EXP_SLTCTL_PDCE;
0813 if (!pciehp_poll_mode)
0814 cmd |= PCI_EXP_SLTCTL_HPIE | PCI_EXP_SLTCTL_CCIE;
0815
0816 mask = (PCI_EXP_SLTCTL_PDCE | PCI_EXP_SLTCTL_ABPE |
0817 PCI_EXP_SLTCTL_PFDE |
0818 PCI_EXP_SLTCTL_HPIE | PCI_EXP_SLTCTL_CCIE |
0819 PCI_EXP_SLTCTL_DLLSCE);
0820
0821 pcie_write_cmd_nowait(ctrl, cmd, mask);
0822 ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
0823 pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, cmd);
0824 }
0825
0826 static void pcie_disable_notification(struct controller *ctrl)
0827 {
0828 u16 mask;
0829
0830 mask = (PCI_EXP_SLTCTL_PDCE | PCI_EXP_SLTCTL_ABPE |
0831 PCI_EXP_SLTCTL_MRLSCE | PCI_EXP_SLTCTL_PFDE |
0832 PCI_EXP_SLTCTL_HPIE | PCI_EXP_SLTCTL_CCIE |
0833 PCI_EXP_SLTCTL_DLLSCE);
0834 pcie_write_cmd(ctrl, 0, mask);
0835 ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
0836 pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, 0);
0837 }
0838
0839 void pcie_clear_hotplug_events(struct controller *ctrl)
0840 {
0841 pcie_capability_write_word(ctrl_dev(ctrl), PCI_EXP_SLTSTA,
0842 PCI_EXP_SLTSTA_PDC | PCI_EXP_SLTSTA_DLLSC);
0843 }
0844
0845 void pcie_enable_interrupt(struct controller *ctrl)
0846 {
0847 u16 mask;
0848
0849 mask = PCI_EXP_SLTCTL_HPIE | PCI_EXP_SLTCTL_DLLSCE;
0850 pcie_write_cmd(ctrl, mask, mask);
0851 }
0852
0853 void pcie_disable_interrupt(struct controller *ctrl)
0854 {
0855 u16 mask;
0856
0857
0858
0859
0860
0861
0862
0863
0864 mask = PCI_EXP_SLTCTL_HPIE | PCI_EXP_SLTCTL_DLLSCE;
0865 pcie_write_cmd(ctrl, 0, mask);
0866 }
0867
0868
0869
0870
0871
0872
0873
0874
0875
0876
0877
0878 int pciehp_slot_reset(struct pcie_device *dev)
0879 {
0880 struct controller *ctrl = get_service_data(dev);
0881
0882 if (ctrl->state != ON_STATE)
0883 return 0;
0884
0885 pcie_capability_write_word(dev->port, PCI_EXP_SLTSTA,
0886 PCI_EXP_SLTSTA_DLLSC);
0887
0888 if (!pciehp_check_link_active(ctrl))
0889 pciehp_request(ctrl, PCI_EXP_SLTSTA_DLLSC);
0890
0891 return 0;
0892 }
0893
0894
0895
0896
0897
0898
0899
0900
0901
0902 int pciehp_reset_slot(struct hotplug_slot *hotplug_slot, bool probe)
0903 {
0904 struct controller *ctrl = to_ctrl(hotplug_slot);
0905 struct pci_dev *pdev = ctrl_dev(ctrl);
0906 u16 stat_mask = 0, ctrl_mask = 0;
0907 int rc;
0908
0909 if (probe)
0910 return 0;
0911
0912 down_write_nested(&ctrl->reset_lock, ctrl->depth);
0913
0914 if (!ATTN_BUTTN(ctrl)) {
0915 ctrl_mask |= PCI_EXP_SLTCTL_PDCE;
0916 stat_mask |= PCI_EXP_SLTSTA_PDC;
0917 }
0918 ctrl_mask |= PCI_EXP_SLTCTL_DLLSCE;
0919 stat_mask |= PCI_EXP_SLTSTA_DLLSC;
0920
0921 pcie_write_cmd(ctrl, 0, ctrl_mask);
0922 ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
0923 pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, 0);
0924
0925 rc = pci_bridge_secondary_bus_reset(ctrl->pcie->port);
0926
0927 pcie_capability_write_word(pdev, PCI_EXP_SLTSTA, stat_mask);
0928 pcie_write_cmd_nowait(ctrl, ctrl_mask, ctrl_mask);
0929 ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
0930 pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, ctrl_mask);
0931
0932 up_write(&ctrl->reset_lock);
0933 return rc;
0934 }
0935
0936 int pcie_init_notification(struct controller *ctrl)
0937 {
0938 if (pciehp_request_irq(ctrl))
0939 return -1;
0940 pcie_enable_notification(ctrl);
0941 ctrl->notification_enabled = 1;
0942 return 0;
0943 }
0944
0945 void pcie_shutdown_notification(struct controller *ctrl)
0946 {
0947 if (ctrl->notification_enabled) {
0948 pcie_disable_notification(ctrl);
0949 pciehp_free_irq(ctrl);
0950 ctrl->notification_enabled = 0;
0951 }
0952 }
0953
0954 static inline void dbg_ctrl(struct controller *ctrl)
0955 {
0956 struct pci_dev *pdev = ctrl->pcie->port;
0957 u16 reg16;
0958
0959 ctrl_dbg(ctrl, "Slot Capabilities : 0x%08x\n", ctrl->slot_cap);
0960 pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, ®16);
0961 ctrl_dbg(ctrl, "Slot Status : 0x%04x\n", reg16);
0962 pcie_capability_read_word(pdev, PCI_EXP_SLTCTL, ®16);
0963 ctrl_dbg(ctrl, "Slot Control : 0x%04x\n", reg16);
0964 }
0965
0966 #define FLAG(x, y) (((x) & (y)) ? '+' : '-')
0967
0968 static inline int pcie_hotplug_depth(struct pci_dev *dev)
0969 {
0970 struct pci_bus *bus = dev->bus;
0971 int depth = 0;
0972
0973 while (bus->parent) {
0974 bus = bus->parent;
0975 if (bus->self && bus->self->is_hotplug_bridge)
0976 depth++;
0977 }
0978
0979 return depth;
0980 }
0981
0982 struct controller *pcie_init(struct pcie_device *dev)
0983 {
0984 struct controller *ctrl;
0985 u32 slot_cap, slot_cap2, link_cap;
0986 u8 poweron;
0987 struct pci_dev *pdev = dev->port;
0988 struct pci_bus *subordinate = pdev->subordinate;
0989
0990 ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
0991 if (!ctrl)
0992 return NULL;
0993
0994 ctrl->pcie = dev;
0995 ctrl->depth = pcie_hotplug_depth(dev->port);
0996 pcie_capability_read_dword(pdev, PCI_EXP_SLTCAP, &slot_cap);
0997
0998 if (pdev->hotplug_user_indicators)
0999 slot_cap &= ~(PCI_EXP_SLTCAP_AIP | PCI_EXP_SLTCAP_PIP);
1000
1001
1002
1003
1004
1005 if (pdev->is_thunderbolt)
1006 slot_cap |= PCI_EXP_SLTCAP_NCCS;
1007
1008 ctrl->slot_cap = slot_cap;
1009 mutex_init(&ctrl->ctrl_lock);
1010 mutex_init(&ctrl->state_lock);
1011 init_rwsem(&ctrl->reset_lock);
1012 init_waitqueue_head(&ctrl->requester);
1013 init_waitqueue_head(&ctrl->queue);
1014 INIT_DELAYED_WORK(&ctrl->button_work, pciehp_queue_pushbutton_work);
1015 dbg_ctrl(ctrl);
1016
1017 down_read(&pci_bus_sem);
1018 ctrl->state = list_empty(&subordinate->devices) ? OFF_STATE : ON_STATE;
1019 up_read(&pci_bus_sem);
1020
1021 pcie_capability_read_dword(pdev, PCI_EXP_SLTCAP2, &slot_cap2);
1022 if (slot_cap2 & PCI_EXP_SLTCAP2_IBPD) {
1023 pcie_write_cmd_nowait(ctrl, PCI_EXP_SLTCTL_IBPD_DISABLE,
1024 PCI_EXP_SLTCTL_IBPD_DISABLE);
1025 ctrl->inband_presence_disabled = 1;
1026 }
1027
1028 if (dmi_first_match(inband_presence_disabled_dmi_table))
1029 ctrl->inband_presence_disabled = 1;
1030
1031
1032 pcie_capability_read_dword(pdev, PCI_EXP_LNKCAP, &link_cap);
1033
1034
1035 pcie_capability_write_word(pdev, PCI_EXP_SLTSTA,
1036 PCI_EXP_SLTSTA_ABP | PCI_EXP_SLTSTA_PFD |
1037 PCI_EXP_SLTSTA_MRLSC | PCI_EXP_SLTSTA_CC |
1038 PCI_EXP_SLTSTA_DLLSC | PCI_EXP_SLTSTA_PDC);
1039
1040 ctrl_info(ctrl, "Slot #%d AttnBtn%c PwrCtrl%c MRL%c AttnInd%c PwrInd%c HotPlug%c Surprise%c Interlock%c NoCompl%c IbPresDis%c LLActRep%c%s\n",
1041 (slot_cap & PCI_EXP_SLTCAP_PSN) >> 19,
1042 FLAG(slot_cap, PCI_EXP_SLTCAP_ABP),
1043 FLAG(slot_cap, PCI_EXP_SLTCAP_PCP),
1044 FLAG(slot_cap, PCI_EXP_SLTCAP_MRLSP),
1045 FLAG(slot_cap, PCI_EXP_SLTCAP_AIP),
1046 FLAG(slot_cap, PCI_EXP_SLTCAP_PIP),
1047 FLAG(slot_cap, PCI_EXP_SLTCAP_HPC),
1048 FLAG(slot_cap, PCI_EXP_SLTCAP_HPS),
1049 FLAG(slot_cap, PCI_EXP_SLTCAP_EIP),
1050 FLAG(slot_cap, PCI_EXP_SLTCAP_NCCS),
1051 FLAG(slot_cap2, PCI_EXP_SLTCAP2_IBPD),
1052 FLAG(link_cap, PCI_EXP_LNKCAP_DLLLARC),
1053 pdev->broken_cmd_compl ? " (with Cmd Compl erratum)" : "");
1054
1055
1056
1057
1058
1059 if (POWER_CTRL(ctrl)) {
1060 pciehp_get_power_status(ctrl, &poweron);
1061 if (!pciehp_card_present_or_link_active(ctrl) && poweron) {
1062 pcie_disable_notification(ctrl);
1063 pciehp_power_off_slot(ctrl);
1064 }
1065 }
1066
1067 return ctrl;
1068 }
1069
1070 void pciehp_release_ctrl(struct controller *ctrl)
1071 {
1072 cancel_delayed_work_sync(&ctrl->button_work);
1073 kfree(ctrl);
1074 }
1075
1076 static void quirk_cmd_compl(struct pci_dev *pdev)
1077 {
1078 u32 slot_cap;
1079
1080 if (pci_is_pcie(pdev)) {
1081 pcie_capability_read_dword(pdev, PCI_EXP_SLTCAP, &slot_cap);
1082 if (slot_cap & PCI_EXP_SLTCAP_HPC &&
1083 !(slot_cap & PCI_EXP_SLTCAP_NCCS))
1084 pdev->broken_cmd_compl = 1;
1085 }
1086 }
1087 DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, PCI_ANY_ID,
1088 PCI_CLASS_BRIDGE_PCI, 8, quirk_cmd_compl);
1089 DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_QCOM, 0x0110,
1090 PCI_CLASS_BRIDGE_PCI, 8, quirk_cmd_compl);
1091 DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_QCOM, 0x0400,
1092 PCI_CLASS_BRIDGE_PCI, 8, quirk_cmd_compl);
1093 DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_QCOM, 0x0401,
1094 PCI_CLASS_BRIDGE_PCI, 8, quirk_cmd_compl);
1095 DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_HXT, 0x0401,
1096 PCI_CLASS_BRIDGE_PCI, 8, quirk_cmd_compl);