0001
0002
0003
0004
0005
0006 #include <linux/delay.h>
0007 #include <linux/kernel.h>
0008 #include <linux/of.h>
0009 #include <linux/pci-epc.h>
0010 #include <linux/platform_device.h>
0011 #include <linux/sizes.h>
0012
0013 #include "pcie-cadence.h"
0014
0015 #define CDNS_PCIE_EP_MIN_APERTURE 128
0016 #define CDNS_PCIE_EP_IRQ_PCI_ADDR_NONE 0x1
0017 #define CDNS_PCIE_EP_IRQ_PCI_ADDR_LEGACY 0x3
0018
0019 static u8 cdns_pcie_get_fn_from_vfn(struct cdns_pcie *pcie, u8 fn, u8 vfn)
0020 {
0021 u32 cap = CDNS_PCIE_EP_FUNC_SRIOV_CAP_OFFSET;
0022 u32 first_vf_offset, stride;
0023
0024 if (vfn == 0)
0025 return fn;
0026
0027 first_vf_offset = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_SRIOV_VF_OFFSET);
0028 stride = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_SRIOV_VF_STRIDE);
0029 fn = fn + first_vf_offset + ((vfn - 1) * stride);
0030
0031 return fn;
0032 }
0033
0034 static int cdns_pcie_ep_write_header(struct pci_epc *epc, u8 fn, u8 vfn,
0035 struct pci_epf_header *hdr)
0036 {
0037 struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
0038 u32 cap = CDNS_PCIE_EP_FUNC_SRIOV_CAP_OFFSET;
0039 struct cdns_pcie *pcie = &ep->pcie;
0040 u32 reg;
0041
0042 if (vfn > 1) {
0043 dev_err(&epc->dev, "Only Virtual Function #1 has deviceID\n");
0044 return -EINVAL;
0045 } else if (vfn == 1) {
0046 reg = cap + PCI_SRIOV_VF_DID;
0047 cdns_pcie_ep_fn_writew(pcie, fn, reg, hdr->deviceid);
0048 return 0;
0049 }
0050
0051 cdns_pcie_ep_fn_writew(pcie, fn, PCI_DEVICE_ID, hdr->deviceid);
0052 cdns_pcie_ep_fn_writeb(pcie, fn, PCI_REVISION_ID, hdr->revid);
0053 cdns_pcie_ep_fn_writeb(pcie, fn, PCI_CLASS_PROG, hdr->progif_code);
0054 cdns_pcie_ep_fn_writew(pcie, fn, PCI_CLASS_DEVICE,
0055 hdr->subclass_code | hdr->baseclass_code << 8);
0056 cdns_pcie_ep_fn_writeb(pcie, fn, PCI_CACHE_LINE_SIZE,
0057 hdr->cache_line_size);
0058 cdns_pcie_ep_fn_writew(pcie, fn, PCI_SUBSYSTEM_ID, hdr->subsys_id);
0059 cdns_pcie_ep_fn_writeb(pcie, fn, PCI_INTERRUPT_PIN, hdr->interrupt_pin);
0060
0061
0062
0063
0064
0065 if (fn == 0) {
0066
0067 u32 id = CDNS_PCIE_LM_ID_VENDOR(hdr->vendorid) |
0068 CDNS_PCIE_LM_ID_SUBSYS(hdr->subsys_vendor_id);
0069
0070 cdns_pcie_writel(pcie, CDNS_PCIE_LM_ID, id);
0071 }
0072
0073 return 0;
0074 }
0075
0076 static int cdns_pcie_ep_set_bar(struct pci_epc *epc, u8 fn, u8 vfn,
0077 struct pci_epf_bar *epf_bar)
0078 {
0079 struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
0080 struct cdns_pcie_epf *epf = &ep->epf[fn];
0081 struct cdns_pcie *pcie = &ep->pcie;
0082 dma_addr_t bar_phys = epf_bar->phys_addr;
0083 enum pci_barno bar = epf_bar->barno;
0084 int flags = epf_bar->flags;
0085 u32 addr0, addr1, reg, cfg, b, aperture, ctrl;
0086 u64 sz;
0087
0088
0089 sz = max_t(size_t, epf_bar->size, CDNS_PCIE_EP_MIN_APERTURE);
0090
0091
0092
0093
0094 sz = 1ULL << fls64(sz - 1);
0095 aperture = ilog2(sz) - 7;
0096
0097 if ((flags & PCI_BASE_ADDRESS_SPACE) == PCI_BASE_ADDRESS_SPACE_IO) {
0098 ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_IO_32BITS;
0099 } else {
0100 bool is_prefetch = !!(flags & PCI_BASE_ADDRESS_MEM_PREFETCH);
0101 bool is_64bits = sz > SZ_2G;
0102
0103 if (is_64bits && (bar & 1))
0104 return -EINVAL;
0105
0106 if (is_64bits && !(flags & PCI_BASE_ADDRESS_MEM_TYPE_64))
0107 epf_bar->flags |= PCI_BASE_ADDRESS_MEM_TYPE_64;
0108
0109 if (is_64bits && is_prefetch)
0110 ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_PREFETCH_MEM_64BITS;
0111 else if (is_prefetch)
0112 ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_PREFETCH_MEM_32BITS;
0113 else if (is_64bits)
0114 ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_MEM_64BITS;
0115 else
0116 ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_MEM_32BITS;
0117 }
0118
0119 addr0 = lower_32_bits(bar_phys);
0120 addr1 = upper_32_bits(bar_phys);
0121
0122 if (vfn == 1)
0123 reg = CDNS_PCIE_LM_EP_VFUNC_BAR_CFG(bar, fn);
0124 else
0125 reg = CDNS_PCIE_LM_EP_FUNC_BAR_CFG(bar, fn);
0126 b = (bar < BAR_4) ? bar : bar - BAR_4;
0127
0128 if (vfn == 0 || vfn == 1) {
0129 cfg = cdns_pcie_readl(pcie, reg);
0130 cfg &= ~(CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b) |
0131 CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b));
0132 cfg |= (CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE(b, aperture) |
0133 CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL(b, ctrl));
0134 cdns_pcie_writel(pcie, reg, cfg);
0135 }
0136
0137 fn = cdns_pcie_get_fn_from_vfn(pcie, fn, vfn);
0138 cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR0(fn, bar),
0139 addr0);
0140 cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR1(fn, bar),
0141 addr1);
0142
0143 if (vfn > 0)
0144 epf = &epf->epf[vfn - 1];
0145 epf->epf_bar[bar] = epf_bar;
0146
0147 return 0;
0148 }
0149
0150 static void cdns_pcie_ep_clear_bar(struct pci_epc *epc, u8 fn, u8 vfn,
0151 struct pci_epf_bar *epf_bar)
0152 {
0153 struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
0154 struct cdns_pcie_epf *epf = &ep->epf[fn];
0155 struct cdns_pcie *pcie = &ep->pcie;
0156 enum pci_barno bar = epf_bar->barno;
0157 u32 reg, cfg, b, ctrl;
0158
0159 if (vfn == 1)
0160 reg = CDNS_PCIE_LM_EP_VFUNC_BAR_CFG(bar, fn);
0161 else
0162 reg = CDNS_PCIE_LM_EP_FUNC_BAR_CFG(bar, fn);
0163 b = (bar < BAR_4) ? bar : bar - BAR_4;
0164
0165 if (vfn == 0 || vfn == 1) {
0166 ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_DISABLED;
0167 cfg = cdns_pcie_readl(pcie, reg);
0168 cfg &= ~(CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b) |
0169 CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b));
0170 cfg |= CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL(b, ctrl);
0171 cdns_pcie_writel(pcie, reg, cfg);
0172 }
0173
0174 fn = cdns_pcie_get_fn_from_vfn(pcie, fn, vfn);
0175 cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR0(fn, bar), 0);
0176 cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR1(fn, bar), 0);
0177
0178 if (vfn > 0)
0179 epf = &epf->epf[vfn - 1];
0180 epf->epf_bar[bar] = NULL;
0181 }
0182
0183 static int cdns_pcie_ep_map_addr(struct pci_epc *epc, u8 fn, u8 vfn,
0184 phys_addr_t addr, u64 pci_addr, size_t size)
0185 {
0186 struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
0187 struct cdns_pcie *pcie = &ep->pcie;
0188 u32 r;
0189
0190 r = find_first_zero_bit(&ep->ob_region_map, BITS_PER_LONG);
0191 if (r >= ep->max_regions - 1) {
0192 dev_err(&epc->dev, "no free outbound region\n");
0193 return -EINVAL;
0194 }
0195
0196 fn = cdns_pcie_get_fn_from_vfn(pcie, fn, vfn);
0197 cdns_pcie_set_outbound_region(pcie, 0, fn, r, false, addr, pci_addr, size);
0198
0199 set_bit(r, &ep->ob_region_map);
0200 ep->ob_addr[r] = addr;
0201
0202 return 0;
0203 }
0204
0205 static void cdns_pcie_ep_unmap_addr(struct pci_epc *epc, u8 fn, u8 vfn,
0206 phys_addr_t addr)
0207 {
0208 struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
0209 struct cdns_pcie *pcie = &ep->pcie;
0210 u32 r;
0211
0212 for (r = 0; r < ep->max_regions - 1; r++)
0213 if (ep->ob_addr[r] == addr)
0214 break;
0215
0216 if (r == ep->max_regions - 1)
0217 return;
0218
0219 cdns_pcie_reset_outbound_region(pcie, r);
0220
0221 ep->ob_addr[r] = 0;
0222 clear_bit(r, &ep->ob_region_map);
0223 }
0224
0225 static int cdns_pcie_ep_set_msi(struct pci_epc *epc, u8 fn, u8 vfn, u8 mmc)
0226 {
0227 struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
0228 struct cdns_pcie *pcie = &ep->pcie;
0229 u32 cap = CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET;
0230 u16 flags;
0231
0232 fn = cdns_pcie_get_fn_from_vfn(pcie, fn, vfn);
0233
0234
0235
0236
0237
0238 flags = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSI_FLAGS);
0239 flags = (flags & ~PCI_MSI_FLAGS_QMASK) | (mmc << 1);
0240 flags |= PCI_MSI_FLAGS_64BIT;
0241 flags &= ~PCI_MSI_FLAGS_MASKBIT;
0242 cdns_pcie_ep_fn_writew(pcie, fn, cap + PCI_MSI_FLAGS, flags);
0243
0244 return 0;
0245 }
0246
0247 static int cdns_pcie_ep_get_msi(struct pci_epc *epc, u8 fn, u8 vfn)
0248 {
0249 struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
0250 struct cdns_pcie *pcie = &ep->pcie;
0251 u32 cap = CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET;
0252 u16 flags, mme;
0253
0254 fn = cdns_pcie_get_fn_from_vfn(pcie, fn, vfn);
0255
0256
0257 flags = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSI_FLAGS);
0258 if (!(flags & PCI_MSI_FLAGS_ENABLE))
0259 return -EINVAL;
0260
0261
0262
0263
0264
0265 mme = (flags & PCI_MSI_FLAGS_QSIZE) >> 4;
0266
0267 return mme;
0268 }
0269
0270 static int cdns_pcie_ep_get_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no)
0271 {
0272 struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
0273 struct cdns_pcie *pcie = &ep->pcie;
0274 u32 cap = CDNS_PCIE_EP_FUNC_MSIX_CAP_OFFSET;
0275 u32 val, reg;
0276
0277 func_no = cdns_pcie_get_fn_from_vfn(pcie, func_no, vfunc_no);
0278
0279 reg = cap + PCI_MSIX_FLAGS;
0280 val = cdns_pcie_ep_fn_readw(pcie, func_no, reg);
0281 if (!(val & PCI_MSIX_FLAGS_ENABLE))
0282 return -EINVAL;
0283
0284 val &= PCI_MSIX_FLAGS_QSIZE;
0285
0286 return val;
0287 }
0288
0289 static int cdns_pcie_ep_set_msix(struct pci_epc *epc, u8 fn, u8 vfn,
0290 u16 interrupts, enum pci_barno bir,
0291 u32 offset)
0292 {
0293 struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
0294 struct cdns_pcie *pcie = &ep->pcie;
0295 u32 cap = CDNS_PCIE_EP_FUNC_MSIX_CAP_OFFSET;
0296 u32 val, reg;
0297
0298 fn = cdns_pcie_get_fn_from_vfn(pcie, fn, vfn);
0299
0300 reg = cap + PCI_MSIX_FLAGS;
0301 val = cdns_pcie_ep_fn_readw(pcie, fn, reg);
0302 val &= ~PCI_MSIX_FLAGS_QSIZE;
0303 val |= interrupts;
0304 cdns_pcie_ep_fn_writew(pcie, fn, reg, val);
0305
0306
0307 reg = cap + PCI_MSIX_TABLE;
0308 val = offset | bir;
0309 cdns_pcie_ep_fn_writel(pcie, fn, reg, val);
0310
0311
0312 reg = cap + PCI_MSIX_PBA;
0313 val = (offset + (interrupts * PCI_MSIX_ENTRY_SIZE)) | bir;
0314 cdns_pcie_ep_fn_writel(pcie, fn, reg, val);
0315
0316 return 0;
0317 }
0318
0319 static void cdns_pcie_ep_assert_intx(struct cdns_pcie_ep *ep, u8 fn, u8 intx,
0320 bool is_asserted)
0321 {
0322 struct cdns_pcie *pcie = &ep->pcie;
0323 unsigned long flags;
0324 u32 offset;
0325 u16 status;
0326 u8 msg_code;
0327
0328 intx &= 3;
0329
0330
0331 if (unlikely(ep->irq_pci_addr != CDNS_PCIE_EP_IRQ_PCI_ADDR_LEGACY ||
0332 ep->irq_pci_fn != fn)) {
0333
0334 cdns_pcie_set_outbound_region_for_normal_msg(pcie, 0, fn, 0,
0335 ep->irq_phys_addr);
0336 ep->irq_pci_addr = CDNS_PCIE_EP_IRQ_PCI_ADDR_LEGACY;
0337 ep->irq_pci_fn = fn;
0338 }
0339
0340 if (is_asserted) {
0341 ep->irq_pending |= BIT(intx);
0342 msg_code = MSG_CODE_ASSERT_INTA + intx;
0343 } else {
0344 ep->irq_pending &= ~BIT(intx);
0345 msg_code = MSG_CODE_DEASSERT_INTA + intx;
0346 }
0347
0348 spin_lock_irqsave(&ep->lock, flags);
0349 status = cdns_pcie_ep_fn_readw(pcie, fn, PCI_STATUS);
0350 if (((status & PCI_STATUS_INTERRUPT) != 0) ^ (ep->irq_pending != 0)) {
0351 status ^= PCI_STATUS_INTERRUPT;
0352 cdns_pcie_ep_fn_writew(pcie, fn, PCI_STATUS, status);
0353 }
0354 spin_unlock_irqrestore(&ep->lock, flags);
0355
0356 offset = CDNS_PCIE_NORMAL_MSG_ROUTING(MSG_ROUTING_LOCAL) |
0357 CDNS_PCIE_NORMAL_MSG_CODE(msg_code) |
0358 CDNS_PCIE_MSG_NO_DATA;
0359 writel(0, ep->irq_cpu_addr + offset);
0360 }
0361
0362 static int cdns_pcie_ep_send_legacy_irq(struct cdns_pcie_ep *ep, u8 fn, u8 vfn,
0363 u8 intx)
0364 {
0365 u16 cmd;
0366
0367 cmd = cdns_pcie_ep_fn_readw(&ep->pcie, fn, PCI_COMMAND);
0368 if (cmd & PCI_COMMAND_INTX_DISABLE)
0369 return -EINVAL;
0370
0371 cdns_pcie_ep_assert_intx(ep, fn, intx, true);
0372
0373
0374
0375 mdelay(1);
0376 cdns_pcie_ep_assert_intx(ep, fn, intx, false);
0377 return 0;
0378 }
0379
0380 static int cdns_pcie_ep_send_msi_irq(struct cdns_pcie_ep *ep, u8 fn, u8 vfn,
0381 u8 interrupt_num)
0382 {
0383 struct cdns_pcie *pcie = &ep->pcie;
0384 u32 cap = CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET;
0385 u16 flags, mme, data, data_mask;
0386 u8 msi_count;
0387 u64 pci_addr, pci_addr_mask = 0xff;
0388
0389 fn = cdns_pcie_get_fn_from_vfn(pcie, fn, vfn);
0390
0391
0392 flags = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSI_FLAGS);
0393 if (!(flags & PCI_MSI_FLAGS_ENABLE))
0394 return -EINVAL;
0395
0396
0397 mme = (flags & PCI_MSI_FLAGS_QSIZE) >> 4;
0398 msi_count = 1 << mme;
0399 if (!interrupt_num || interrupt_num > msi_count)
0400 return -EINVAL;
0401
0402
0403 data_mask = msi_count - 1;
0404 data = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSI_DATA_64);
0405 data = (data & ~data_mask) | ((interrupt_num - 1) & data_mask);
0406
0407
0408 pci_addr = cdns_pcie_ep_fn_readl(pcie, fn, cap + PCI_MSI_ADDRESS_HI);
0409 pci_addr <<= 32;
0410 pci_addr |= cdns_pcie_ep_fn_readl(pcie, fn, cap + PCI_MSI_ADDRESS_LO);
0411 pci_addr &= GENMASK_ULL(63, 2);
0412
0413
0414 if (unlikely(ep->irq_pci_addr != (pci_addr & ~pci_addr_mask) ||
0415 ep->irq_pci_fn != fn)) {
0416
0417 cdns_pcie_set_outbound_region(pcie, 0, fn, 0,
0418 false,
0419 ep->irq_phys_addr,
0420 pci_addr & ~pci_addr_mask,
0421 pci_addr_mask + 1);
0422 ep->irq_pci_addr = (pci_addr & ~pci_addr_mask);
0423 ep->irq_pci_fn = fn;
0424 }
0425 writel(data, ep->irq_cpu_addr + (pci_addr & pci_addr_mask));
0426
0427 return 0;
0428 }
0429
0430 static int cdns_pcie_ep_map_msi_irq(struct pci_epc *epc, u8 fn, u8 vfn,
0431 phys_addr_t addr, u8 interrupt_num,
0432 u32 entry_size, u32 *msi_data,
0433 u32 *msi_addr_offset)
0434 {
0435 struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
0436 u32 cap = CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET;
0437 struct cdns_pcie *pcie = &ep->pcie;
0438 u64 pci_addr, pci_addr_mask = 0xff;
0439 u16 flags, mme, data, data_mask;
0440 u8 msi_count;
0441 int ret;
0442 int i;
0443
0444 fn = cdns_pcie_get_fn_from_vfn(pcie, fn, vfn);
0445
0446
0447 flags = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSI_FLAGS);
0448 if (!(flags & PCI_MSI_FLAGS_ENABLE))
0449 return -EINVAL;
0450
0451
0452 mme = (flags & PCI_MSI_FLAGS_QSIZE) >> 4;
0453 msi_count = 1 << mme;
0454 if (!interrupt_num || interrupt_num > msi_count)
0455 return -EINVAL;
0456
0457
0458 data_mask = msi_count - 1;
0459 data = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSI_DATA_64);
0460 data = data & ~data_mask;
0461
0462
0463 pci_addr = cdns_pcie_ep_fn_readl(pcie, fn, cap + PCI_MSI_ADDRESS_HI);
0464 pci_addr <<= 32;
0465 pci_addr |= cdns_pcie_ep_fn_readl(pcie, fn, cap + PCI_MSI_ADDRESS_LO);
0466 pci_addr &= GENMASK_ULL(63, 2);
0467
0468 for (i = 0; i < interrupt_num; i++) {
0469 ret = cdns_pcie_ep_map_addr(epc, fn, vfn, addr,
0470 pci_addr & ~pci_addr_mask,
0471 entry_size);
0472 if (ret)
0473 return ret;
0474 addr = addr + entry_size;
0475 }
0476
0477 *msi_data = data;
0478 *msi_addr_offset = pci_addr & pci_addr_mask;
0479
0480 return 0;
0481 }
0482
0483 static int cdns_pcie_ep_send_msix_irq(struct cdns_pcie_ep *ep, u8 fn, u8 vfn,
0484 u16 interrupt_num)
0485 {
0486 u32 cap = CDNS_PCIE_EP_FUNC_MSIX_CAP_OFFSET;
0487 u32 tbl_offset, msg_data, reg;
0488 struct cdns_pcie *pcie = &ep->pcie;
0489 struct pci_epf_msix_tbl *msix_tbl;
0490 struct cdns_pcie_epf *epf;
0491 u64 pci_addr_mask = 0xff;
0492 u64 msg_addr;
0493 u16 flags;
0494 u8 bir;
0495
0496 epf = &ep->epf[fn];
0497 if (vfn > 0)
0498 epf = &epf->epf[vfn - 1];
0499
0500 fn = cdns_pcie_get_fn_from_vfn(pcie, fn, vfn);
0501
0502
0503 flags = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSIX_FLAGS);
0504 if (!(flags & PCI_MSIX_FLAGS_ENABLE))
0505 return -EINVAL;
0506
0507 reg = cap + PCI_MSIX_TABLE;
0508 tbl_offset = cdns_pcie_ep_fn_readl(pcie, fn, reg);
0509 bir = tbl_offset & PCI_MSIX_TABLE_BIR;
0510 tbl_offset &= PCI_MSIX_TABLE_OFFSET;
0511
0512 msix_tbl = epf->epf_bar[bir]->addr + tbl_offset;
0513 msg_addr = msix_tbl[(interrupt_num - 1)].msg_addr;
0514 msg_data = msix_tbl[(interrupt_num - 1)].msg_data;
0515
0516
0517 if (ep->irq_pci_addr != (msg_addr & ~pci_addr_mask) ||
0518 ep->irq_pci_fn != fn) {
0519
0520 cdns_pcie_set_outbound_region(pcie, 0, fn, 0,
0521 false,
0522 ep->irq_phys_addr,
0523 msg_addr & ~pci_addr_mask,
0524 pci_addr_mask + 1);
0525 ep->irq_pci_addr = (msg_addr & ~pci_addr_mask);
0526 ep->irq_pci_fn = fn;
0527 }
0528 writel(msg_data, ep->irq_cpu_addr + (msg_addr & pci_addr_mask));
0529
0530 return 0;
0531 }
0532
0533 static int cdns_pcie_ep_raise_irq(struct pci_epc *epc, u8 fn, u8 vfn,
0534 enum pci_epc_irq_type type,
0535 u16 interrupt_num)
0536 {
0537 struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
0538 struct cdns_pcie *pcie = &ep->pcie;
0539 struct device *dev = pcie->dev;
0540
0541 switch (type) {
0542 case PCI_EPC_IRQ_LEGACY:
0543 if (vfn > 0) {
0544 dev_err(dev, "Cannot raise legacy interrupts for VF\n");
0545 return -EINVAL;
0546 }
0547 return cdns_pcie_ep_send_legacy_irq(ep, fn, vfn, 0);
0548
0549 case PCI_EPC_IRQ_MSI:
0550 return cdns_pcie_ep_send_msi_irq(ep, fn, vfn, interrupt_num);
0551
0552 case PCI_EPC_IRQ_MSIX:
0553 return cdns_pcie_ep_send_msix_irq(ep, fn, vfn, interrupt_num);
0554
0555 default:
0556 break;
0557 }
0558
0559 return -EINVAL;
0560 }
0561
0562 static int cdns_pcie_ep_start(struct pci_epc *epc)
0563 {
0564 struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
0565 struct cdns_pcie *pcie = &ep->pcie;
0566 struct device *dev = pcie->dev;
0567 int max_epfs = sizeof(epc->function_num_map) * 8;
0568 int ret, value, epf;
0569
0570
0571
0572
0573
0574 cdns_pcie_writel(pcie, CDNS_PCIE_LM_EP_FUNC_CFG, epc->function_num_map);
0575
0576 if (ep->quirk_disable_flr) {
0577 for (epf = 0; epf < max_epfs; epf++) {
0578 if (!(epc->function_num_map & BIT(epf)))
0579 continue;
0580
0581 value = cdns_pcie_ep_fn_readl(pcie, epf,
0582 CDNS_PCIE_EP_FUNC_DEV_CAP_OFFSET +
0583 PCI_EXP_DEVCAP);
0584 value &= ~PCI_EXP_DEVCAP_FLR;
0585 cdns_pcie_ep_fn_writel(pcie, epf,
0586 CDNS_PCIE_EP_FUNC_DEV_CAP_OFFSET +
0587 PCI_EXP_DEVCAP, value);
0588 }
0589 }
0590
0591 ret = cdns_pcie_start_link(pcie);
0592 if (ret) {
0593 dev_err(dev, "Failed to start link\n");
0594 return ret;
0595 }
0596
0597 return 0;
0598 }
0599
0600 static const struct pci_epc_features cdns_pcie_epc_vf_features = {
0601 .linkup_notifier = false,
0602 .msi_capable = true,
0603 .msix_capable = true,
0604 .align = 65536,
0605 };
0606
0607 static const struct pci_epc_features cdns_pcie_epc_features = {
0608 .linkup_notifier = false,
0609 .msi_capable = true,
0610 .msix_capable = true,
0611 .align = 256,
0612 };
0613
0614 static const struct pci_epc_features*
0615 cdns_pcie_ep_get_features(struct pci_epc *epc, u8 func_no, u8 vfunc_no)
0616 {
0617 if (!vfunc_no)
0618 return &cdns_pcie_epc_features;
0619
0620 return &cdns_pcie_epc_vf_features;
0621 }
0622
0623 static const struct pci_epc_ops cdns_pcie_epc_ops = {
0624 .write_header = cdns_pcie_ep_write_header,
0625 .set_bar = cdns_pcie_ep_set_bar,
0626 .clear_bar = cdns_pcie_ep_clear_bar,
0627 .map_addr = cdns_pcie_ep_map_addr,
0628 .unmap_addr = cdns_pcie_ep_unmap_addr,
0629 .set_msi = cdns_pcie_ep_set_msi,
0630 .get_msi = cdns_pcie_ep_get_msi,
0631 .set_msix = cdns_pcie_ep_set_msix,
0632 .get_msix = cdns_pcie_ep_get_msix,
0633 .raise_irq = cdns_pcie_ep_raise_irq,
0634 .map_msi_irq = cdns_pcie_ep_map_msi_irq,
0635 .start = cdns_pcie_ep_start,
0636 .get_features = cdns_pcie_ep_get_features,
0637 };
0638
0639
0640 int cdns_pcie_ep_setup(struct cdns_pcie_ep *ep)
0641 {
0642 struct device *dev = ep->pcie.dev;
0643 struct platform_device *pdev = to_platform_device(dev);
0644 struct device_node *np = dev->of_node;
0645 struct cdns_pcie *pcie = &ep->pcie;
0646 struct cdns_pcie_epf *epf;
0647 struct resource *res;
0648 struct pci_epc *epc;
0649 int ret;
0650 int i;
0651
0652 pcie->is_rc = false;
0653
0654 pcie->reg_base = devm_platform_ioremap_resource_byname(pdev, "reg");
0655 if (IS_ERR(pcie->reg_base)) {
0656 dev_err(dev, "missing \"reg\"\n");
0657 return PTR_ERR(pcie->reg_base);
0658 }
0659
0660 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mem");
0661 if (!res) {
0662 dev_err(dev, "missing \"mem\"\n");
0663 return -EINVAL;
0664 }
0665 pcie->mem_res = res;
0666
0667 ep->max_regions = CDNS_PCIE_MAX_OB;
0668 of_property_read_u32(np, "cdns,max-outbound-regions", &ep->max_regions);
0669
0670 ep->ob_addr = devm_kcalloc(dev,
0671 ep->max_regions, sizeof(*ep->ob_addr),
0672 GFP_KERNEL);
0673 if (!ep->ob_addr)
0674 return -ENOMEM;
0675
0676
0677 cdns_pcie_writel(pcie, CDNS_PCIE_LM_EP_FUNC_CFG, BIT(0));
0678
0679 epc = devm_pci_epc_create(dev, &cdns_pcie_epc_ops);
0680 if (IS_ERR(epc)) {
0681 dev_err(dev, "failed to create epc device\n");
0682 return PTR_ERR(epc);
0683 }
0684
0685 epc_set_drvdata(epc, ep);
0686
0687 if (of_property_read_u8(np, "max-functions", &epc->max_functions) < 0)
0688 epc->max_functions = 1;
0689
0690 ep->epf = devm_kcalloc(dev, epc->max_functions, sizeof(*ep->epf),
0691 GFP_KERNEL);
0692 if (!ep->epf)
0693 return -ENOMEM;
0694
0695 epc->max_vfs = devm_kcalloc(dev, epc->max_functions,
0696 sizeof(*epc->max_vfs), GFP_KERNEL);
0697 if (!epc->max_vfs)
0698 return -ENOMEM;
0699
0700 ret = of_property_read_u8_array(np, "max-virtual-functions",
0701 epc->max_vfs, epc->max_functions);
0702 if (ret == 0) {
0703 for (i = 0; i < epc->max_functions; i++) {
0704 epf = &ep->epf[i];
0705 if (epc->max_vfs[i] == 0)
0706 continue;
0707 epf->epf = devm_kcalloc(dev, epc->max_vfs[i],
0708 sizeof(*ep->epf), GFP_KERNEL);
0709 if (!epf->epf)
0710 return -ENOMEM;
0711 }
0712 }
0713
0714 ret = pci_epc_mem_init(epc, pcie->mem_res->start,
0715 resource_size(pcie->mem_res), PAGE_SIZE);
0716 if (ret < 0) {
0717 dev_err(dev, "failed to initialize the memory space\n");
0718 return ret;
0719 }
0720
0721 ep->irq_cpu_addr = pci_epc_mem_alloc_addr(epc, &ep->irq_phys_addr,
0722 SZ_128K);
0723 if (!ep->irq_cpu_addr) {
0724 dev_err(dev, "failed to reserve memory space for MSI\n");
0725 ret = -ENOMEM;
0726 goto free_epc_mem;
0727 }
0728 ep->irq_pci_addr = CDNS_PCIE_EP_IRQ_PCI_ADDR_NONE;
0729
0730 set_bit(0, &ep->ob_region_map);
0731
0732 if (ep->quirk_detect_quiet_flag)
0733 cdns_pcie_detect_quiet_min_delay_set(&ep->pcie);
0734
0735 spin_lock_init(&ep->lock);
0736
0737 return 0;
0738
0739 free_epc_mem:
0740 pci_epc_mem_exit(epc);
0741
0742 return ret;
0743 }