0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031 #include "qman_priv.h"
0032
0033 u16 qman_ip_rev;
0034 EXPORT_SYMBOL(qman_ip_rev);
0035 u16 qm_channel_pool1 = QMAN_CHANNEL_POOL1;
0036 EXPORT_SYMBOL(qm_channel_pool1);
0037 u16 qm_channel_caam = QMAN_CHANNEL_CAAM;
0038 EXPORT_SYMBOL(qm_channel_caam);
0039
0040
0041 #define REG_QCSP_LIO_CFG(n) (0x0000 + ((n) * 0x10))
0042 #define REG_QCSP_IO_CFG(n) (0x0004 + ((n) * 0x10))
0043 #define REG_QCSP_DD_CFG(n) (0x000c + ((n) * 0x10))
0044 #define REG_DD_CFG 0x0200
0045 #define REG_DCP_CFG(n) (0x0300 + ((n) * 0x10))
0046 #define REG_DCP_DD_CFG(n) (0x0304 + ((n) * 0x10))
0047 #define REG_DCP_DLM_AVG(n) (0x030c + ((n) * 0x10))
0048 #define REG_PFDR_FPC 0x0400
0049 #define REG_PFDR_FP_HEAD 0x0404
0050 #define REG_PFDR_FP_TAIL 0x0408
0051 #define REG_PFDR_FP_LWIT 0x0410
0052 #define REG_PFDR_CFG 0x0414
0053 #define REG_SFDR_CFG 0x0500
0054 #define REG_SFDR_IN_USE 0x0504
0055 #define REG_WQ_CS_CFG(n) (0x0600 + ((n) * 0x04))
0056 #define REG_WQ_DEF_ENC_WQID 0x0630
0057 #define REG_WQ_SC_DD_CFG(n) (0x640 + ((n) * 0x04))
0058 #define REG_WQ_PC_DD_CFG(n) (0x680 + ((n) * 0x04))
0059 #define REG_WQ_DC0_DD_CFG(n) (0x6c0 + ((n) * 0x04))
0060 #define REG_WQ_DC1_DD_CFG(n) (0x700 + ((n) * 0x04))
0061 #define REG_WQ_DCn_DD_CFG(n) (0x6c0 + ((n) * 0x40))
0062 #define REG_CM_CFG 0x0800
0063 #define REG_ECSR 0x0a00
0064 #define REG_ECIR 0x0a04
0065 #define REG_EADR 0x0a08
0066 #define REG_ECIR2 0x0a0c
0067 #define REG_EDATA(n) (0x0a10 + ((n) * 0x04))
0068 #define REG_SBEC(n) (0x0a80 + ((n) * 0x04))
0069 #define REG_MCR 0x0b00
0070 #define REG_MCP(n) (0x0b04 + ((n) * 0x04))
0071 #define REG_MISC_CFG 0x0be0
0072 #define REG_HID_CFG 0x0bf0
0073 #define REG_IDLE_STAT 0x0bf4
0074 #define REG_IP_REV_1 0x0bf8
0075 #define REG_IP_REV_2 0x0bfc
0076 #define REG_FQD_BARE 0x0c00
0077 #define REG_PFDR_BARE 0x0c20
0078 #define REG_offset_BAR 0x0004
0079 #define REG_offset_AR 0x0010
0080 #define REG_QCSP_BARE 0x0c80
0081 #define REG_QCSP_BAR 0x0c84
0082 #define REG_CI_SCHED_CFG 0x0d00
0083 #define REG_SRCIDR 0x0d04
0084 #define REG_LIODNR 0x0d08
0085 #define REG_CI_RLM_AVG 0x0d14
0086 #define REG_ERR_ISR 0x0e00
0087 #define REG_ERR_IER 0x0e04
0088 #define REG_REV3_QCSP_LIO_CFG(n) (0x1000 + ((n) * 0x10))
0089 #define REG_REV3_QCSP_IO_CFG(n) (0x1004 + ((n) * 0x10))
0090 #define REG_REV3_QCSP_DD_CFG(n) (0x100c + ((n) * 0x10))
0091
0092
0093 #define MCR_INIT_PFDR 0x01000000
0094 #define MCR_get_rslt(v) (u8)((v) >> 24)
0095 #define MCR_rslt_idle(r) (!(r) || ((r) >= 0xf0))
0096 #define MCR_rslt_ok(r) ((r) == 0xf0)
0097 #define MCR_rslt_eaccess(r) ((r) == 0xf8)
0098 #define MCR_rslt_inval(r) ((r) == 0xff)
0099
0100
0101
0102
0103
0104 #define QM_CI_SCHED_CFG_SRCCIV 4
0105 #define QM_CI_SCHED_CFG_SRQ_W 3
0106 #define QM_CI_SCHED_CFG_RW_W 2
0107 #define QM_CI_SCHED_CFG_BMAN_W 2
0108
0109 #define QM_CI_SCHED_CFG_SRCCIV_EN BIT(31)
0110
0111
0112 enum qm_wq_class {
0113 qm_wq_portal = 0,
0114 qm_wq_pool = 1,
0115 qm_wq_fman0 = 2,
0116 qm_wq_fman1 = 3,
0117 qm_wq_caam = 4,
0118 qm_wq_pme = 5,
0119 qm_wq_first = qm_wq_portal,
0120 qm_wq_last = qm_wq_pme
0121 };
0122
0123
0124 enum qm_memory {
0125 qm_memory_fqd,
0126 qm_memory_pfdr
0127 };
0128
0129
0130 #define QM_EIRQ_CIDE 0x20000000
0131 #define QM_EIRQ_CTDE 0x10000000
0132 #define QM_EIRQ_CITT 0x08000000
0133 #define QM_EIRQ_PLWI 0x04000000
0134 #define QM_EIRQ_MBEI 0x02000000
0135 #define QM_EIRQ_SBEI 0x01000000
0136 #define QM_EIRQ_PEBI 0x00800000
0137 #define QM_EIRQ_IFSI 0x00020000
0138 #define QM_EIRQ_ICVI 0x00010000
0139 #define QM_EIRQ_IDDI 0x00000800
0140 #define QM_EIRQ_IDFI 0x00000400
0141 #define QM_EIRQ_IDSI 0x00000200
0142 #define QM_EIRQ_IDQI 0x00000100
0143 #define QM_EIRQ_IECE 0x00000010
0144 #define QM_EIRQ_IEOI 0x00000008
0145 #define QM_EIRQ_IESI 0x00000004
0146 #define QM_EIRQ_IECI 0x00000002
0147 #define QM_EIRQ_IEQI 0x00000001
0148
0149
0150 #define PORTAL_ECSR_ERR (QM_EIRQ_IEQI | QM_EIRQ_IESI | QM_EIRQ_IEOI | \
0151 QM_EIRQ_IDQI | QM_EIRQ_IDSI | QM_EIRQ_IDFI | \
0152 QM_EIRQ_IDDI | QM_EIRQ_ICVI | QM_EIRQ_IFSI)
0153 #define FQID_ECSR_ERR (QM_EIRQ_IEQI | QM_EIRQ_IECI | QM_EIRQ_IESI | \
0154 QM_EIRQ_IEOI | QM_EIRQ_IDQI | QM_EIRQ_IDFI | \
0155 QM_EIRQ_IFSI)
0156
0157 struct qm_ecir {
0158 u32 info;
0159 };
0160
0161 static bool qm_ecir_is_dcp(const struct qm_ecir *p)
0162 {
0163 return p->info & BIT(29);
0164 }
0165
0166 static int qm_ecir_get_pnum(const struct qm_ecir *p)
0167 {
0168 return (p->info >> 24) & 0x1f;
0169 }
0170
0171 static int qm_ecir_get_fqid(const struct qm_ecir *p)
0172 {
0173 return p->info & (BIT(24) - 1);
0174 }
0175
0176 struct qm_ecir2 {
0177 u32 info;
0178 };
0179
0180 static bool qm_ecir2_is_dcp(const struct qm_ecir2 *p)
0181 {
0182 return p->info & BIT(31);
0183 }
0184
0185 static int qm_ecir2_get_pnum(const struct qm_ecir2 *p)
0186 {
0187 return p->info & (BIT(10) - 1);
0188 }
0189
0190 struct qm_eadr {
0191 u32 info;
0192
0193 };
0194
0195 static int qm_eadr_get_memid(const struct qm_eadr *p)
0196 {
0197 return (p->info >> 24) & 0xf;
0198 }
0199
0200 static int qm_eadr_get_eadr(const struct qm_eadr *p)
0201 {
0202 return p->info & (BIT(12) - 1);
0203 }
0204
0205 static int qm_eadr_v3_get_memid(const struct qm_eadr *p)
0206 {
0207 return (p->info >> 24) & 0x1f;
0208 }
0209
0210 static int qm_eadr_v3_get_eadr(const struct qm_eadr *p)
0211 {
0212 return p->info & (BIT(16) - 1);
0213 }
0214
0215 struct qman_hwerr_txt {
0216 u32 mask;
0217 const char *txt;
0218 };
0219
0220
0221 static const struct qman_hwerr_txt qman_hwerr_txts[] = {
0222 { QM_EIRQ_CIDE, "Corenet Initiator Data Error" },
0223 { QM_EIRQ_CTDE, "Corenet Target Data Error" },
0224 { QM_EIRQ_CITT, "Corenet Invalid Target Transaction" },
0225 { QM_EIRQ_PLWI, "PFDR Low Watermark" },
0226 { QM_EIRQ_MBEI, "Multi-bit ECC Error" },
0227 { QM_EIRQ_SBEI, "Single-bit ECC Error" },
0228 { QM_EIRQ_PEBI, "PFDR Enqueues Blocked Interrupt" },
0229 { QM_EIRQ_ICVI, "Invalid Command Verb" },
0230 { QM_EIRQ_IFSI, "Invalid Flow Control State" },
0231 { QM_EIRQ_IDDI, "Invalid Dequeue (Direct-connect)" },
0232 { QM_EIRQ_IDFI, "Invalid Dequeue FQ" },
0233 { QM_EIRQ_IDSI, "Invalid Dequeue Source" },
0234 { QM_EIRQ_IDQI, "Invalid Dequeue Queue" },
0235 { QM_EIRQ_IECE, "Invalid Enqueue Configuration" },
0236 { QM_EIRQ_IEOI, "Invalid Enqueue Overflow" },
0237 { QM_EIRQ_IESI, "Invalid Enqueue State" },
0238 { QM_EIRQ_IECI, "Invalid Enqueue Channel" },
0239 { QM_EIRQ_IEQI, "Invalid Enqueue Queue" },
0240 };
0241
0242 struct qman_error_info_mdata {
0243 u16 addr_mask;
0244 u16 bits;
0245 const char *txt;
0246 };
0247
0248 static const struct qman_error_info_mdata error_mdata[] = {
0249 { 0x01FF, 24, "FQD cache tag memory 0" },
0250 { 0x01FF, 24, "FQD cache tag memory 1" },
0251 { 0x01FF, 24, "FQD cache tag memory 2" },
0252 { 0x01FF, 24, "FQD cache tag memory 3" },
0253 { 0x0FFF, 512, "FQD cache memory" },
0254 { 0x07FF, 128, "SFDR memory" },
0255 { 0x01FF, 72, "WQ context memory" },
0256 { 0x00FF, 240, "CGR memory" },
0257 { 0x00FF, 302, "Internal Order Restoration List memory" },
0258 { 0x01FF, 256, "SW portal ring memory" },
0259 };
0260
0261 #define QMAN_ERRS_TO_DISABLE (QM_EIRQ_PLWI | QM_EIRQ_PEBI)
0262
0263
0264
0265
0266
0267
0268
0269
0270
0271
0272
0273 static u32 __iomem *qm_ccsr_start;
0274
0275 static u32 qm_pools_sdqcr;
0276 static int __qman_probed;
0277 static int __qman_requires_cleanup;
0278
0279 static inline u32 qm_ccsr_in(u32 offset)
0280 {
0281 return ioread32be(qm_ccsr_start + offset/4);
0282 }
0283
0284 static inline void qm_ccsr_out(u32 offset, u32 val)
0285 {
0286 iowrite32be(val, qm_ccsr_start + offset/4);
0287 }
0288
0289 u32 qm_get_pools_sdqcr(void)
0290 {
0291 return qm_pools_sdqcr;
0292 }
0293
0294 enum qm_dc_portal {
0295 qm_dc_portal_fman0 = 0,
0296 qm_dc_portal_fman1 = 1
0297 };
0298
0299 static void qm_set_dc(enum qm_dc_portal portal, int ed, u8 sernd)
0300 {
0301 DPAA_ASSERT(!ed || portal == qm_dc_portal_fman0 ||
0302 portal == qm_dc_portal_fman1);
0303 if ((qman_ip_rev & 0xFF00) >= QMAN_REV30)
0304 qm_ccsr_out(REG_DCP_CFG(portal),
0305 (ed ? 0x1000 : 0) | (sernd & 0x3ff));
0306 else
0307 qm_ccsr_out(REG_DCP_CFG(portal),
0308 (ed ? 0x100 : 0) | (sernd & 0x1f));
0309 }
0310
0311 static void qm_set_wq_scheduling(enum qm_wq_class wq_class,
0312 u8 cs_elev, u8 csw2, u8 csw3, u8 csw4,
0313 u8 csw5, u8 csw6, u8 csw7)
0314 {
0315 qm_ccsr_out(REG_WQ_CS_CFG(wq_class), ((cs_elev & 0xff) << 24) |
0316 ((csw2 & 0x7) << 20) | ((csw3 & 0x7) << 16) |
0317 ((csw4 & 0x7) << 12) | ((csw5 & 0x7) << 8) |
0318 ((csw6 & 0x7) << 4) | (csw7 & 0x7));
0319 }
0320
0321 static void qm_set_hid(void)
0322 {
0323 qm_ccsr_out(REG_HID_CFG, 0);
0324 }
0325
0326 static void qm_set_corenet_initiator(void)
0327 {
0328 qm_ccsr_out(REG_CI_SCHED_CFG, QM_CI_SCHED_CFG_SRCCIV_EN |
0329 (QM_CI_SCHED_CFG_SRCCIV << 24) |
0330 (QM_CI_SCHED_CFG_SRQ_W << 8) |
0331 (QM_CI_SCHED_CFG_RW_W << 4) |
0332 QM_CI_SCHED_CFG_BMAN_W);
0333 }
0334
0335 static void qm_get_version(u16 *id, u8 *major, u8 *minor)
0336 {
0337 u32 v = qm_ccsr_in(REG_IP_REV_1);
0338 *id = (v >> 16);
0339 *major = (v >> 8) & 0xff;
0340 *minor = v & 0xff;
0341 }
0342
0343 #define PFDR_AR_EN BIT(31)
0344 static int qm_set_memory(enum qm_memory memory, u64 ba, u32 size)
0345 {
0346 void *ptr;
0347 u32 offset = (memory == qm_memory_fqd) ? REG_FQD_BARE : REG_PFDR_BARE;
0348 u32 exp = ilog2(size);
0349 u32 bar, bare;
0350
0351
0352 DPAA_ASSERT((size >= 4096) && (size <= 1024*1024*1024) &&
0353 is_power_of_2(size));
0354
0355 DPAA_ASSERT(!(ba & (size - 1)));
0356
0357
0358 bar = qm_ccsr_in(offset + REG_offset_BAR);
0359 if (bar) {
0360
0361 bare = qm_ccsr_in(offset);
0362 if (bare != upper_32_bits(ba) || bar != lower_32_bits(ba)) {
0363 pr_err("Attempted to reinitialize QMan with different BAR, got 0x%llx read BARE=0x%x BAR=0x%x\n",
0364 ba, bare, bar);
0365 return -ENOMEM;
0366 }
0367 __qman_requires_cleanup = 1;
0368
0369 return 1;
0370 }
0371
0372 ptr = memremap(ba, size, MEMREMAP_WB);
0373 if (!ptr) {
0374 pr_crit("memremap() of QMan private memory failed\n");
0375 return -ENOMEM;
0376 }
0377 memset(ptr, 0, size);
0378
0379 #ifdef CONFIG_PPC
0380
0381
0382
0383
0384
0385 flush_dcache_range((unsigned long) ptr, (unsigned long) ptr+size);
0386 #endif
0387 memunmap(ptr);
0388
0389 qm_ccsr_out(offset, upper_32_bits(ba));
0390 qm_ccsr_out(offset + REG_offset_BAR, lower_32_bits(ba));
0391 qm_ccsr_out(offset + REG_offset_AR, PFDR_AR_EN | (exp - 1));
0392 return 0;
0393 }
0394
0395 static void qm_set_pfdr_threshold(u32 th, u8 k)
0396 {
0397 qm_ccsr_out(REG_PFDR_FP_LWIT, th & 0xffffff);
0398 qm_ccsr_out(REG_PFDR_CFG, k);
0399 }
0400
0401 static void qm_set_sfdr_threshold(u16 th)
0402 {
0403 qm_ccsr_out(REG_SFDR_CFG, th & 0x3ff);
0404 }
0405
0406 static int qm_init_pfdr(struct device *dev, u32 pfdr_start, u32 num)
0407 {
0408 u8 rslt = MCR_get_rslt(qm_ccsr_in(REG_MCR));
0409
0410 DPAA_ASSERT(pfdr_start && !(pfdr_start & 7) && !(num & 7) && num);
0411
0412 if (!MCR_rslt_idle(rslt)) {
0413 dev_crit(dev, "QMAN_MCR isn't idle");
0414 WARN_ON(1);
0415 }
0416
0417
0418 qm_ccsr_out(REG_MCP(0), pfdr_start);
0419
0420
0421
0422
0423
0424 qm_ccsr_out(REG_MCP(1), pfdr_start + num - 16);
0425 dma_wmb();
0426 qm_ccsr_out(REG_MCR, MCR_INIT_PFDR);
0427
0428 do {
0429 rslt = MCR_get_rslt(qm_ccsr_in(REG_MCR));
0430 } while (!MCR_rslt_idle(rslt));
0431 if (MCR_rslt_ok(rslt))
0432 return 0;
0433 if (MCR_rslt_eaccess(rslt))
0434 return -EACCES;
0435 if (MCR_rslt_inval(rslt))
0436 return -EINVAL;
0437 dev_crit(dev, "Unexpected result from MCR_INIT_PFDR: %02x\n", rslt);
0438 return -ENODEV;
0439 }
0440
0441
0442
0443
0444
0445
0446
0447
0448 static dma_addr_t fqd_a, pfdr_a;
0449 static size_t fqd_sz, pfdr_sz;
0450
0451 #ifdef CONFIG_PPC
0452
0453
0454
0455
0456 static int zero_priv_mem(phys_addr_t addr, size_t sz)
0457 {
0458
0459 void __iomem *tmpp = ioremap_cache(addr, sz);
0460
0461 if (!tmpp)
0462 return -ENOMEM;
0463
0464 memset_io(tmpp, 0, sz);
0465 flush_dcache_range((unsigned long)tmpp,
0466 (unsigned long)tmpp + sz);
0467 iounmap(tmpp);
0468
0469 return 0;
0470 }
0471
0472 static int qman_fqd(struct reserved_mem *rmem)
0473 {
0474 fqd_a = rmem->base;
0475 fqd_sz = rmem->size;
0476
0477 WARN_ON(!(fqd_a && fqd_sz));
0478 return 0;
0479 }
0480 RESERVEDMEM_OF_DECLARE(qman_fqd, "fsl,qman-fqd", qman_fqd);
0481
0482 static int qman_pfdr(struct reserved_mem *rmem)
0483 {
0484 pfdr_a = rmem->base;
0485 pfdr_sz = rmem->size;
0486
0487 WARN_ON(!(pfdr_a && pfdr_sz));
0488
0489 return 0;
0490 }
0491 RESERVEDMEM_OF_DECLARE(qman_pfdr, "fsl,qman-pfdr", qman_pfdr);
0492
0493 #endif
0494
0495 unsigned int qm_get_fqid_maxcnt(void)
0496 {
0497 return fqd_sz / 64;
0498 }
0499
0500 static void log_edata_bits(struct device *dev, u32 bit_count)
0501 {
0502 u32 i, j, mask = 0xffffffff;
0503
0504 dev_warn(dev, "ErrInt, EDATA:\n");
0505 i = bit_count / 32;
0506 if (bit_count % 32) {
0507 i++;
0508 mask = ~(mask << bit_count % 32);
0509 }
0510 j = 16 - i;
0511 dev_warn(dev, " 0x%08x\n", qm_ccsr_in(REG_EDATA(j)) & mask);
0512 j++;
0513 for (; j < 16; j++)
0514 dev_warn(dev, " 0x%08x\n", qm_ccsr_in(REG_EDATA(j)));
0515 }
0516
0517 static void log_additional_error_info(struct device *dev, u32 isr_val,
0518 u32 ecsr_val)
0519 {
0520 struct qm_ecir ecir_val;
0521 struct qm_eadr eadr_val;
0522 int memid;
0523
0524 ecir_val.info = qm_ccsr_in(REG_ECIR);
0525
0526 if ((qman_ip_rev & 0xFF00) >= QMAN_REV30) {
0527 struct qm_ecir2 ecir2_val;
0528
0529 ecir2_val.info = qm_ccsr_in(REG_ECIR2);
0530 if (ecsr_val & PORTAL_ECSR_ERR) {
0531 dev_warn(dev, "ErrInt: %s id %d\n",
0532 qm_ecir2_is_dcp(&ecir2_val) ? "DCP" : "SWP",
0533 qm_ecir2_get_pnum(&ecir2_val));
0534 }
0535 if (ecsr_val & (FQID_ECSR_ERR | QM_EIRQ_IECE))
0536 dev_warn(dev, "ErrInt: ecir.fqid 0x%x\n",
0537 qm_ecir_get_fqid(&ecir_val));
0538
0539 if (ecsr_val & (QM_EIRQ_SBEI|QM_EIRQ_MBEI)) {
0540 eadr_val.info = qm_ccsr_in(REG_EADR);
0541 memid = qm_eadr_v3_get_memid(&eadr_val);
0542 dev_warn(dev, "ErrInt: EADR Memory: %s, 0x%x\n",
0543 error_mdata[memid].txt,
0544 error_mdata[memid].addr_mask
0545 & qm_eadr_v3_get_eadr(&eadr_val));
0546 log_edata_bits(dev, error_mdata[memid].bits);
0547 }
0548 } else {
0549 if (ecsr_val & PORTAL_ECSR_ERR) {
0550 dev_warn(dev, "ErrInt: %s id %d\n",
0551 qm_ecir_is_dcp(&ecir_val) ? "DCP" : "SWP",
0552 qm_ecir_get_pnum(&ecir_val));
0553 }
0554 if (ecsr_val & FQID_ECSR_ERR)
0555 dev_warn(dev, "ErrInt: ecir.fqid 0x%x\n",
0556 qm_ecir_get_fqid(&ecir_val));
0557
0558 if (ecsr_val & (QM_EIRQ_SBEI|QM_EIRQ_MBEI)) {
0559 eadr_val.info = qm_ccsr_in(REG_EADR);
0560 memid = qm_eadr_get_memid(&eadr_val);
0561 dev_warn(dev, "ErrInt: EADR Memory: %s, 0x%x\n",
0562 error_mdata[memid].txt,
0563 error_mdata[memid].addr_mask
0564 & qm_eadr_get_eadr(&eadr_val));
0565 log_edata_bits(dev, error_mdata[memid].bits);
0566 }
0567 }
0568 }
0569
0570 static irqreturn_t qman_isr(int irq, void *ptr)
0571 {
0572 u32 isr_val, ier_val, ecsr_val, isr_mask, i;
0573 struct device *dev = ptr;
0574
0575 ier_val = qm_ccsr_in(REG_ERR_IER);
0576 isr_val = qm_ccsr_in(REG_ERR_ISR);
0577 ecsr_val = qm_ccsr_in(REG_ECSR);
0578 isr_mask = isr_val & ier_val;
0579
0580 if (!isr_mask)
0581 return IRQ_NONE;
0582
0583 for (i = 0; i < ARRAY_SIZE(qman_hwerr_txts); i++) {
0584 if (qman_hwerr_txts[i].mask & isr_mask) {
0585 dev_err_ratelimited(dev, "ErrInt: %s\n",
0586 qman_hwerr_txts[i].txt);
0587 if (qman_hwerr_txts[i].mask & ecsr_val) {
0588 log_additional_error_info(dev, isr_mask,
0589 ecsr_val);
0590
0591 qm_ccsr_out(REG_ECSR, ecsr_val);
0592 }
0593 if (qman_hwerr_txts[i].mask & QMAN_ERRS_TO_DISABLE) {
0594 dev_dbg(dev, "Disabling error 0x%x\n",
0595 qman_hwerr_txts[i].mask);
0596 ier_val &= ~qman_hwerr_txts[i].mask;
0597 qm_ccsr_out(REG_ERR_IER, ier_val);
0598 }
0599 }
0600 }
0601 qm_ccsr_out(REG_ERR_ISR, isr_val);
0602
0603 return IRQ_HANDLED;
0604 }
0605
0606 static int qman_init_ccsr(struct device *dev)
0607 {
0608 int i, err;
0609
0610
0611 err = qm_set_memory(qm_memory_fqd, fqd_a, fqd_sz);
0612 if (err < 0)
0613 return err;
0614
0615 err = qm_set_memory(qm_memory_pfdr, pfdr_a, pfdr_sz);
0616 if (err < 0)
0617 return err;
0618
0619 if (err == 0) {
0620 err = qm_init_pfdr(dev, 8, pfdr_sz / 64 - 8);
0621 if (err)
0622 return err;
0623 }
0624
0625 qm_set_pfdr_threshold(512, 64);
0626 qm_set_sfdr_threshold(128);
0627
0628 qm_ccsr_out(REG_ERR_ISR, QM_EIRQ_PEBI);
0629
0630 qm_set_corenet_initiator();
0631
0632 qm_set_hid();
0633
0634 for (i = qm_wq_first; i <= qm_wq_last; i++)
0635 qm_set_wq_scheduling(i, 0, 0, 0, 0, 0, 0, 0);
0636
0637 qm_set_dc(qm_dc_portal_fman0, 1, 0);
0638 qm_set_dc(qm_dc_portal_fman1, 1, 0);
0639 return 0;
0640 }
0641
0642 #define LIO_CFG_LIODN_MASK 0x0fff0000
0643 void __qman_liodn_fixup(u16 channel)
0644 {
0645 static int done;
0646 static u32 liodn_offset;
0647 u32 before, after;
0648 int idx = channel - QM_CHANNEL_SWPORTAL0;
0649
0650 if ((qman_ip_rev & 0xFF00) >= QMAN_REV30)
0651 before = qm_ccsr_in(REG_REV3_QCSP_LIO_CFG(idx));
0652 else
0653 before = qm_ccsr_in(REG_QCSP_LIO_CFG(idx));
0654 if (!done) {
0655 liodn_offset = before & LIO_CFG_LIODN_MASK;
0656 done = 1;
0657 return;
0658 }
0659 after = (before & (~LIO_CFG_LIODN_MASK)) | liodn_offset;
0660 if ((qman_ip_rev & 0xFF00) >= QMAN_REV30)
0661 qm_ccsr_out(REG_REV3_QCSP_LIO_CFG(idx), after);
0662 else
0663 qm_ccsr_out(REG_QCSP_LIO_CFG(idx), after);
0664 }
0665
0666 #define IO_CFG_SDEST_MASK 0x00ff0000
0667 void qman_set_sdest(u16 channel, unsigned int cpu_idx)
0668 {
0669 int idx = channel - QM_CHANNEL_SWPORTAL0;
0670 u32 before, after;
0671
0672 if ((qman_ip_rev & 0xFF00) >= QMAN_REV30) {
0673 before = qm_ccsr_in(REG_REV3_QCSP_IO_CFG(idx));
0674
0675 cpu_idx /= 2;
0676 after = (before & (~IO_CFG_SDEST_MASK)) | (cpu_idx << 16);
0677 qm_ccsr_out(REG_REV3_QCSP_IO_CFG(idx), after);
0678 } else {
0679 before = qm_ccsr_in(REG_QCSP_IO_CFG(idx));
0680 after = (before & (~IO_CFG_SDEST_MASK)) | (cpu_idx << 16);
0681 qm_ccsr_out(REG_QCSP_IO_CFG(idx), after);
0682 }
0683 }
0684
0685 static int qman_resource_init(struct device *dev)
0686 {
0687 int pool_chan_num, cgrid_num;
0688 int ret, i;
0689
0690 switch (qman_ip_rev >> 8) {
0691 case 1:
0692 pool_chan_num = 15;
0693 cgrid_num = 256;
0694 break;
0695 case 2:
0696 pool_chan_num = 3;
0697 cgrid_num = 64;
0698 break;
0699 case 3:
0700 pool_chan_num = 15;
0701 cgrid_num = 256;
0702 break;
0703 default:
0704 return -ENODEV;
0705 }
0706
0707 ret = gen_pool_add(qm_qpalloc, qm_channel_pool1 | DPAA_GENALLOC_OFF,
0708 pool_chan_num, -1);
0709 if (ret) {
0710 dev_err(dev, "Failed to seed pool channels (%d)\n", ret);
0711 return ret;
0712 }
0713
0714 ret = gen_pool_add(qm_cgralloc, DPAA_GENALLOC_OFF, cgrid_num, -1);
0715 if (ret) {
0716 dev_err(dev, "Failed to seed CGRID range (%d)\n", ret);
0717 return ret;
0718 }
0719
0720
0721 for (i = 0; i < cgrid_num; i++)
0722 qm_pools_sdqcr |= QM_SDQCR_CHANNELS_POOL_CONV(i);
0723
0724 ret = gen_pool_add(qm_fqalloc, QM_FQID_RANGE_START | DPAA_GENALLOC_OFF,
0725 qm_get_fqid_maxcnt() - QM_FQID_RANGE_START, -1);
0726 if (ret) {
0727 dev_err(dev, "Failed to seed FQID range (%d)\n", ret);
0728 return ret;
0729 }
0730
0731 return 0;
0732 }
0733
0734 int qman_is_probed(void)
0735 {
0736 return __qman_probed;
0737 }
0738 EXPORT_SYMBOL_GPL(qman_is_probed);
0739
0740 int qman_requires_cleanup(void)
0741 {
0742 return __qman_requires_cleanup;
0743 }
0744
0745 void qman_done_cleanup(void)
0746 {
0747 qman_enable_irqs();
0748 __qman_requires_cleanup = 0;
0749 }
0750
0751
0752 static int fsl_qman_probe(struct platform_device *pdev)
0753 {
0754 struct device *dev = &pdev->dev;
0755 struct device_node *node = dev->of_node;
0756 struct resource *res;
0757 int ret, err_irq;
0758 u16 id;
0759 u8 major, minor;
0760
0761 __qman_probed = -1;
0762
0763 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
0764 if (!res) {
0765 dev_err(dev, "Can't get %pOF property 'IORESOURCE_MEM'\n",
0766 node);
0767 return -ENXIO;
0768 }
0769 qm_ccsr_start = devm_ioremap(dev, res->start, resource_size(res));
0770 if (!qm_ccsr_start)
0771 return -ENXIO;
0772
0773 qm_get_version(&id, &major, &minor);
0774 if (major == 1 && minor == 0) {
0775 dev_err(dev, "Rev1.0 on P4080 rev1 is not supported!\n");
0776 return -ENODEV;
0777 } else if (major == 1 && minor == 1)
0778 qman_ip_rev = QMAN_REV11;
0779 else if (major == 1 && minor == 2)
0780 qman_ip_rev = QMAN_REV12;
0781 else if (major == 2 && minor == 0)
0782 qman_ip_rev = QMAN_REV20;
0783 else if (major == 3 && minor == 0)
0784 qman_ip_rev = QMAN_REV30;
0785 else if (major == 3 && minor == 1)
0786 qman_ip_rev = QMAN_REV31;
0787 else if (major == 3 && minor == 2)
0788 qman_ip_rev = QMAN_REV32;
0789 else {
0790 dev_err(dev, "Unknown QMan version\n");
0791 return -ENODEV;
0792 }
0793
0794 if ((qman_ip_rev & 0xff00) >= QMAN_REV30) {
0795 qm_channel_pool1 = QMAN_CHANNEL_POOL1_REV3;
0796 qm_channel_caam = QMAN_CHANNEL_CAAM_REV3;
0797 }
0798
0799 if (fqd_a) {
0800 #ifdef CONFIG_PPC
0801
0802
0803
0804
0805 zero_priv_mem(fqd_a, fqd_sz);
0806 #else
0807 WARN(1, "Unexpected architecture using non shared-dma-mem reservations");
0808 #endif
0809 } else {
0810
0811
0812
0813
0814
0815 ret = qbman_init_private_mem(dev, 0, &fqd_a, &fqd_sz);
0816 if (ret) {
0817 dev_err(dev, "qbman_init_private_mem() for FQD failed 0x%x\n",
0818 ret);
0819 return -ENODEV;
0820 }
0821 }
0822 dev_dbg(dev, "Allocated FQD 0x%llx 0x%zx\n", fqd_a, fqd_sz);
0823
0824 if (!pfdr_a) {
0825
0826 ret = qbman_init_private_mem(dev, 1, &pfdr_a, &pfdr_sz);
0827 if (ret) {
0828 dev_err(dev, "qbman_init_private_mem() for PFDR failed 0x%x\n",
0829 ret);
0830 return -ENODEV;
0831 }
0832 }
0833 dev_dbg(dev, "Allocated PFDR 0x%llx 0x%zx\n", pfdr_a, pfdr_sz);
0834
0835 ret = qman_init_ccsr(dev);
0836 if (ret) {
0837 dev_err(dev, "CCSR setup failed\n");
0838 return ret;
0839 }
0840
0841 err_irq = platform_get_irq(pdev, 0);
0842 if (err_irq <= 0) {
0843 dev_info(dev, "Can't get %pOF property 'interrupts'\n",
0844 node);
0845 return -ENODEV;
0846 }
0847 ret = devm_request_irq(dev, err_irq, qman_isr, IRQF_SHARED, "qman-err",
0848 dev);
0849 if (ret) {
0850 dev_err(dev, "devm_request_irq() failed %d for '%pOF'\n",
0851 ret, node);
0852 return ret;
0853 }
0854
0855
0856
0857
0858
0859 qm_ccsr_out(REG_ERR_ISR, 0xffffffff);
0860
0861 qm_ccsr_out(REG_ERR_IER, 0xffffffff);
0862
0863 qm_fqalloc = devm_gen_pool_create(dev, 0, -1, "qman-fqalloc");
0864 if (IS_ERR(qm_fqalloc)) {
0865 ret = PTR_ERR(qm_fqalloc);
0866 dev_err(dev, "qman-fqalloc pool init failed (%d)\n", ret);
0867 return ret;
0868 }
0869
0870 qm_qpalloc = devm_gen_pool_create(dev, 0, -1, "qman-qpalloc");
0871 if (IS_ERR(qm_qpalloc)) {
0872 ret = PTR_ERR(qm_qpalloc);
0873 dev_err(dev, "qman-qpalloc pool init failed (%d)\n", ret);
0874 return ret;
0875 }
0876
0877 qm_cgralloc = devm_gen_pool_create(dev, 0, -1, "qman-cgralloc");
0878 if (IS_ERR(qm_cgralloc)) {
0879 ret = PTR_ERR(qm_cgralloc);
0880 dev_err(dev, "qman-cgralloc pool init failed (%d)\n", ret);
0881 return ret;
0882 }
0883
0884 ret = qman_resource_init(dev);
0885 if (ret)
0886 return ret;
0887
0888 ret = qman_alloc_fq_table(qm_get_fqid_maxcnt());
0889 if (ret)
0890 return ret;
0891
0892 ret = qman_wq_alloc();
0893 if (ret)
0894 return ret;
0895
0896 __qman_probed = 1;
0897
0898 return 0;
0899 }
0900
0901 static const struct of_device_id fsl_qman_ids[] = {
0902 {
0903 .compatible = "fsl,qman",
0904 },
0905 {}
0906 };
0907
0908 static struct platform_driver fsl_qman_driver = {
0909 .driver = {
0910 .name = KBUILD_MODNAME,
0911 .of_match_table = fsl_qman_ids,
0912 .suppress_bind_attrs = true,
0913 },
0914 .probe = fsl_qman_probe,
0915 };
0916
0917 builtin_platform_driver(fsl_qman_driver);