0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031 #include "qman_priv.h"
0032
0033 #define DQRR_MAXFILL 15
0034 #define EQCR_ITHRESH 4
0035 #define IRQNAME "QMan portal %d"
0036 #define MAX_IRQNAME 16
0037 #define QMAN_POLL_LIMIT 32
0038 #define QMAN_PIRQ_DQRR_ITHRESH 12
0039 #define QMAN_DQRR_IT_MAX 15
0040 #define QMAN_ITP_MAX 0xFFF
0041 #define QMAN_PIRQ_MR_ITHRESH 4
0042 #define QMAN_PIRQ_IPERIOD 100
0043
0044
0045
0046 #if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
0047
0048 #define QM_REG_EQCR_PI_CINH 0x3000
0049 #define QM_REG_EQCR_CI_CINH 0x3040
0050 #define QM_REG_EQCR_ITR 0x3080
0051 #define QM_REG_DQRR_PI_CINH 0x3100
0052 #define QM_REG_DQRR_CI_CINH 0x3140
0053 #define QM_REG_DQRR_ITR 0x3180
0054 #define QM_REG_DQRR_DCAP 0x31C0
0055 #define QM_REG_DQRR_SDQCR 0x3200
0056 #define QM_REG_DQRR_VDQCR 0x3240
0057 #define QM_REG_DQRR_PDQCR 0x3280
0058 #define QM_REG_MR_PI_CINH 0x3300
0059 #define QM_REG_MR_CI_CINH 0x3340
0060 #define QM_REG_MR_ITR 0x3380
0061 #define QM_REG_CFG 0x3500
0062 #define QM_REG_ISR 0x3600
0063 #define QM_REG_IER 0x3640
0064 #define QM_REG_ISDR 0x3680
0065 #define QM_REG_IIR 0x36C0
0066 #define QM_REG_ITPR 0x3740
0067
0068
0069 #define QM_CL_EQCR 0x0000
0070 #define QM_CL_DQRR 0x1000
0071 #define QM_CL_MR 0x2000
0072 #define QM_CL_EQCR_PI_CENA 0x3000
0073 #define QM_CL_EQCR_CI_CENA 0x3040
0074 #define QM_CL_DQRR_PI_CENA 0x3100
0075 #define QM_CL_DQRR_CI_CENA 0x3140
0076 #define QM_CL_MR_PI_CENA 0x3300
0077 #define QM_CL_MR_CI_CENA 0x3340
0078 #define QM_CL_CR 0x3800
0079 #define QM_CL_RR0 0x3900
0080 #define QM_CL_RR1 0x3940
0081
0082 #else
0083
0084 #define QM_REG_EQCR_PI_CINH 0x0000
0085 #define QM_REG_EQCR_CI_CINH 0x0004
0086 #define QM_REG_EQCR_ITR 0x0008
0087 #define QM_REG_DQRR_PI_CINH 0x0040
0088 #define QM_REG_DQRR_CI_CINH 0x0044
0089 #define QM_REG_DQRR_ITR 0x0048
0090 #define QM_REG_DQRR_DCAP 0x0050
0091 #define QM_REG_DQRR_SDQCR 0x0054
0092 #define QM_REG_DQRR_VDQCR 0x0058
0093 #define QM_REG_DQRR_PDQCR 0x005c
0094 #define QM_REG_MR_PI_CINH 0x0080
0095 #define QM_REG_MR_CI_CINH 0x0084
0096 #define QM_REG_MR_ITR 0x0088
0097 #define QM_REG_CFG 0x0100
0098 #define QM_REG_ISR 0x0e00
0099 #define QM_REG_IER 0x0e04
0100 #define QM_REG_ISDR 0x0e08
0101 #define QM_REG_IIR 0x0e0c
0102 #define QM_REG_ITPR 0x0e14
0103
0104
0105 #define QM_CL_EQCR 0x0000
0106 #define QM_CL_DQRR 0x1000
0107 #define QM_CL_MR 0x2000
0108 #define QM_CL_EQCR_PI_CENA 0x3000
0109 #define QM_CL_EQCR_CI_CENA 0x3100
0110 #define QM_CL_DQRR_PI_CENA 0x3200
0111 #define QM_CL_DQRR_CI_CENA 0x3300
0112 #define QM_CL_MR_PI_CENA 0x3400
0113 #define QM_CL_MR_CI_CENA 0x3500
0114 #define QM_CL_CR 0x3800
0115 #define QM_CL_RR0 0x3900
0116 #define QM_CL_RR1 0x3940
0117 #endif
0118
0119
0120
0121
0122
0123
0124
0125
0126
0127
0128 #define qm_cl(base, idx) ((void *)base + ((idx) << 6))
0129
0130
0131
0132
0133
0134
0135
0136
0137
0138
0139
0140
0141
0142
0143
0144 enum qm_eqcr_pmode {
0145 qm_eqcr_pci = 0,
0146 qm_eqcr_pce = 1,
0147 qm_eqcr_pvb = 2
0148 };
0149 enum qm_dqrr_dmode {
0150 qm_dqrr_dpush = 0,
0151 qm_dqrr_dpull = 1
0152 };
0153 enum qm_dqrr_pmode {
0154 qm_dqrr_pci,
0155 qm_dqrr_pce,
0156 qm_dqrr_pvb
0157 };
0158 enum qm_dqrr_cmode {
0159 qm_dqrr_cci = 0,
0160 qm_dqrr_cce = 1,
0161 qm_dqrr_cdc = 2
0162 };
0163 enum qm_mr_pmode {
0164 qm_mr_pci,
0165 qm_mr_pce,
0166 qm_mr_pvb
0167 };
0168 enum qm_mr_cmode {
0169 qm_mr_cci = 0,
0170 qm_mr_cce = 1
0171 };
0172
0173
0174
0175 #define QM_EQCR_SIZE 8
0176 #define QM_DQRR_SIZE 16
0177 #define QM_MR_SIZE 8
0178
0179
0180 struct qm_eqcr_entry {
0181 u8 _ncw_verb;
0182 u8 dca;
0183 __be16 seqnum;
0184 u8 __reserved[4];
0185 __be32 fqid;
0186 __be32 tag;
0187 struct qm_fd fd;
0188 u8 __reserved3[32];
0189 } __packed __aligned(8);
0190 #define QM_EQCR_VERB_VBIT 0x80
0191 #define QM_EQCR_VERB_CMD_MASK 0x61
0192 #define QM_EQCR_VERB_CMD_ENQUEUE 0x01
0193 #define QM_EQCR_SEQNUM_NESN 0x8000
0194 #define QM_EQCR_SEQNUM_NLIS 0x4000
0195 #define QM_EQCR_SEQNUM_SEQMASK 0x3fff
0196
0197 struct qm_eqcr {
0198 struct qm_eqcr_entry *ring, *cursor;
0199 u8 ci, available, ithresh, vbit;
0200 #ifdef CONFIG_FSL_DPAA_CHECKING
0201 u32 busy;
0202 enum qm_eqcr_pmode pmode;
0203 #endif
0204 };
0205
0206 struct qm_dqrr {
0207 const struct qm_dqrr_entry *ring, *cursor;
0208 u8 pi, ci, fill, ithresh, vbit;
0209 #ifdef CONFIG_FSL_DPAA_CHECKING
0210 enum qm_dqrr_dmode dmode;
0211 enum qm_dqrr_pmode pmode;
0212 enum qm_dqrr_cmode cmode;
0213 #endif
0214 };
0215
0216 struct qm_mr {
0217 union qm_mr_entry *ring, *cursor;
0218 u8 pi, ci, fill, ithresh, vbit;
0219 #ifdef CONFIG_FSL_DPAA_CHECKING
0220 enum qm_mr_pmode pmode;
0221 enum qm_mr_cmode cmode;
0222 #endif
0223 };
0224
0225
0226
0227 struct qm_mcc_fq {
0228 u8 _ncw_verb;
0229 u8 __reserved1[3];
0230 __be32 fqid;
0231 u8 __reserved2[56];
0232 } __packed;
0233
0234
0235 struct qm_mcc_cgr {
0236 u8 _ncw_verb;
0237 u8 __reserved1[30];
0238 u8 cgid;
0239 u8 __reserved2[32];
0240 };
0241
0242 #define QM_MCC_VERB_VBIT 0x80
0243 #define QM_MCC_VERB_MASK 0x7f
0244 #define QM_MCC_VERB_INITFQ_PARKED 0x40
0245 #define QM_MCC_VERB_INITFQ_SCHED 0x41
0246 #define QM_MCC_VERB_QUERYFQ 0x44
0247 #define QM_MCC_VERB_QUERYFQ_NP 0x45
0248 #define QM_MCC_VERB_QUERYWQ 0x46
0249 #define QM_MCC_VERB_QUERYWQ_DEDICATED 0x47
0250 #define QM_MCC_VERB_ALTER_SCHED 0x48
0251 #define QM_MCC_VERB_ALTER_FE 0x49
0252 #define QM_MCC_VERB_ALTER_RETIRE 0x4a
0253 #define QM_MCC_VERB_ALTER_OOS 0x4b
0254 #define QM_MCC_VERB_ALTER_FQXON 0x4d
0255 #define QM_MCC_VERB_ALTER_FQXOFF 0x4e
0256 #define QM_MCC_VERB_INITCGR 0x50
0257 #define QM_MCC_VERB_MODIFYCGR 0x51
0258 #define QM_MCC_VERB_CGRTESTWRITE 0x52
0259 #define QM_MCC_VERB_QUERYCGR 0x58
0260 #define QM_MCC_VERB_QUERYCONGESTION 0x59
0261 union qm_mc_command {
0262 struct {
0263 u8 _ncw_verb;
0264 u8 __reserved[63];
0265 };
0266 struct qm_mcc_initfq initfq;
0267 struct qm_mcc_initcgr initcgr;
0268 struct qm_mcc_fq fq;
0269 struct qm_mcc_cgr cgr;
0270 };
0271
0272
0273
0274 struct qm_mcr_queryfq {
0275 u8 verb;
0276 u8 result;
0277 u8 __reserved1[8];
0278 struct qm_fqd fqd;
0279 u8 __reserved2[30];
0280 } __packed;
0281
0282
0283 struct qm_mcr_alterfq {
0284 u8 verb;
0285 u8 result;
0286 u8 fqs;
0287 u8 __reserved1[61];
0288 };
0289 #define QM_MCR_VERB_RRID 0x80
0290 #define QM_MCR_VERB_MASK QM_MCC_VERB_MASK
0291 #define QM_MCR_VERB_INITFQ_PARKED QM_MCC_VERB_INITFQ_PARKED
0292 #define QM_MCR_VERB_INITFQ_SCHED QM_MCC_VERB_INITFQ_SCHED
0293 #define QM_MCR_VERB_QUERYFQ QM_MCC_VERB_QUERYFQ
0294 #define QM_MCR_VERB_QUERYFQ_NP QM_MCC_VERB_QUERYFQ_NP
0295 #define QM_MCR_VERB_QUERYWQ QM_MCC_VERB_QUERYWQ
0296 #define QM_MCR_VERB_QUERYWQ_DEDICATED QM_MCC_VERB_QUERYWQ_DEDICATED
0297 #define QM_MCR_VERB_ALTER_SCHED QM_MCC_VERB_ALTER_SCHED
0298 #define QM_MCR_VERB_ALTER_FE QM_MCC_VERB_ALTER_FE
0299 #define QM_MCR_VERB_ALTER_RETIRE QM_MCC_VERB_ALTER_RETIRE
0300 #define QM_MCR_VERB_ALTER_OOS QM_MCC_VERB_ALTER_OOS
0301 #define QM_MCR_RESULT_NULL 0x00
0302 #define QM_MCR_RESULT_OK 0xf0
0303 #define QM_MCR_RESULT_ERR_FQID 0xf1
0304 #define QM_MCR_RESULT_ERR_FQSTATE 0xf2
0305 #define QM_MCR_RESULT_ERR_NOTEMPTY 0xf3
0306 #define QM_MCR_RESULT_ERR_BADCHANNEL 0xf4
0307 #define QM_MCR_RESULT_PENDING 0xf8
0308 #define QM_MCR_RESULT_ERR_BADCOMMAND 0xff
0309 #define QM_MCR_FQS_ORLPRESENT 0x02
0310 #define QM_MCR_FQS_NOTEMPTY 0x01
0311 #define QM_MCR_TIMEOUT 10000
0312 union qm_mc_result {
0313 struct {
0314 u8 verb;
0315 u8 result;
0316 u8 __reserved1[62];
0317 };
0318 struct qm_mcr_queryfq queryfq;
0319 struct qm_mcr_alterfq alterfq;
0320 struct qm_mcr_querycgr querycgr;
0321 struct qm_mcr_querycongestion querycongestion;
0322 struct qm_mcr_querywq querywq;
0323 struct qm_mcr_queryfq_np queryfq_np;
0324 };
0325
0326 struct qm_mc {
0327 union qm_mc_command *cr;
0328 union qm_mc_result *rr;
0329 u8 rridx, vbit;
0330 #ifdef CONFIG_FSL_DPAA_CHECKING
0331 enum {
0332
0333 qman_mc_idle,
0334
0335 qman_mc_user,
0336
0337 qman_mc_hw
0338 } state;
0339 #endif
0340 };
0341
0342 struct qm_addr {
0343 void *ce;
0344 __be32 *ce_be;
0345 void __iomem *ci;
0346 };
0347
0348 struct qm_portal {
0349
0350
0351
0352
0353
0354
0355 struct qm_addr addr;
0356 struct qm_eqcr eqcr;
0357 struct qm_dqrr dqrr;
0358 struct qm_mr mr;
0359 struct qm_mc mc;
0360 } ____cacheline_aligned;
0361
0362
0363 static inline u32 qm_in(struct qm_portal *p, u32 offset)
0364 {
0365 return ioread32be(p->addr.ci + offset);
0366 }
0367
0368 static inline void qm_out(struct qm_portal *p, u32 offset, u32 val)
0369 {
0370 iowrite32be(val, p->addr.ci + offset);
0371 }
0372
0373
0374 static inline void qm_cl_invalidate(struct qm_portal *p, u32 offset)
0375 {
0376 dpaa_invalidate(p->addr.ce + offset);
0377 }
0378
0379 static inline void qm_cl_touch_ro(struct qm_portal *p, u32 offset)
0380 {
0381 dpaa_touch_ro(p->addr.ce + offset);
0382 }
0383
0384 static inline u32 qm_ce_in(struct qm_portal *p, u32 offset)
0385 {
0386 return be32_to_cpu(*(p->addr.ce_be + (offset/4)));
0387 }
0388
0389
0390
0391 #define EQCR_SHIFT ilog2(sizeof(struct qm_eqcr_entry))
0392 #define EQCR_CARRY (uintptr_t)(QM_EQCR_SIZE << EQCR_SHIFT)
0393
0394
0395 static struct qm_eqcr_entry *eqcr_carryclear(struct qm_eqcr_entry *p)
0396 {
0397 uintptr_t addr = (uintptr_t)p;
0398
0399 addr &= ~EQCR_CARRY;
0400
0401 return (struct qm_eqcr_entry *)addr;
0402 }
0403
0404
0405 static int eqcr_ptr2idx(struct qm_eqcr_entry *e)
0406 {
0407 return ((uintptr_t)e >> EQCR_SHIFT) & (QM_EQCR_SIZE - 1);
0408 }
0409
0410
0411 static inline void eqcr_inc(struct qm_eqcr *eqcr)
0412 {
0413
0414 struct qm_eqcr_entry *partial = eqcr->cursor + 1;
0415
0416 eqcr->cursor = eqcr_carryclear(partial);
0417 if (partial != eqcr->cursor)
0418 eqcr->vbit ^= QM_EQCR_VERB_VBIT;
0419 }
0420
0421 static inline int qm_eqcr_init(struct qm_portal *portal,
0422 enum qm_eqcr_pmode pmode,
0423 unsigned int eq_stash_thresh,
0424 int eq_stash_prio)
0425 {
0426 struct qm_eqcr *eqcr = &portal->eqcr;
0427 u32 cfg;
0428 u8 pi;
0429
0430 eqcr->ring = portal->addr.ce + QM_CL_EQCR;
0431 eqcr->ci = qm_in(portal, QM_REG_EQCR_CI_CINH) & (QM_EQCR_SIZE - 1);
0432 qm_cl_invalidate(portal, QM_CL_EQCR_CI_CENA);
0433 pi = qm_in(portal, QM_REG_EQCR_PI_CINH) & (QM_EQCR_SIZE - 1);
0434 eqcr->cursor = eqcr->ring + pi;
0435 eqcr->vbit = (qm_in(portal, QM_REG_EQCR_PI_CINH) & QM_EQCR_SIZE) ?
0436 QM_EQCR_VERB_VBIT : 0;
0437 eqcr->available = QM_EQCR_SIZE - 1 -
0438 dpaa_cyc_diff(QM_EQCR_SIZE, eqcr->ci, pi);
0439 eqcr->ithresh = qm_in(portal, QM_REG_EQCR_ITR);
0440 #ifdef CONFIG_FSL_DPAA_CHECKING
0441 eqcr->busy = 0;
0442 eqcr->pmode = pmode;
0443 #endif
0444 cfg = (qm_in(portal, QM_REG_CFG) & 0x00ffffff) |
0445 (eq_stash_thresh << 28) |
0446 (eq_stash_prio << 26) |
0447 ((pmode & 0x3) << 24);
0448 qm_out(portal, QM_REG_CFG, cfg);
0449 return 0;
0450 }
0451
0452 static inline void qm_eqcr_finish(struct qm_portal *portal)
0453 {
0454 struct qm_eqcr *eqcr = &portal->eqcr;
0455 u8 pi = qm_in(portal, QM_REG_EQCR_PI_CINH) & (QM_EQCR_SIZE - 1);
0456 u8 ci = qm_in(portal, QM_REG_EQCR_CI_CINH) & (QM_EQCR_SIZE - 1);
0457
0458 DPAA_ASSERT(!eqcr->busy);
0459 if (pi != eqcr_ptr2idx(eqcr->cursor))
0460 pr_crit("losing uncommitted EQCR entries\n");
0461 if (ci != eqcr->ci)
0462 pr_crit("missing existing EQCR completions\n");
0463 if (eqcr->ci != eqcr_ptr2idx(eqcr->cursor))
0464 pr_crit("EQCR destroyed unquiesced\n");
0465 }
0466
0467 static inline struct qm_eqcr_entry *qm_eqcr_start_no_stash(struct qm_portal
0468 *portal)
0469 {
0470 struct qm_eqcr *eqcr = &portal->eqcr;
0471
0472 DPAA_ASSERT(!eqcr->busy);
0473 if (!eqcr->available)
0474 return NULL;
0475
0476 #ifdef CONFIG_FSL_DPAA_CHECKING
0477 eqcr->busy = 1;
0478 #endif
0479 dpaa_zero(eqcr->cursor);
0480 return eqcr->cursor;
0481 }
0482
0483 static inline struct qm_eqcr_entry *qm_eqcr_start_stash(struct qm_portal
0484 *portal)
0485 {
0486 struct qm_eqcr *eqcr = &portal->eqcr;
0487 u8 diff, old_ci;
0488
0489 DPAA_ASSERT(!eqcr->busy);
0490 if (!eqcr->available) {
0491 old_ci = eqcr->ci;
0492 eqcr->ci = qm_ce_in(portal, QM_CL_EQCR_CI_CENA) &
0493 (QM_EQCR_SIZE - 1);
0494 diff = dpaa_cyc_diff(QM_EQCR_SIZE, old_ci, eqcr->ci);
0495 eqcr->available += diff;
0496 if (!diff)
0497 return NULL;
0498 }
0499 #ifdef CONFIG_FSL_DPAA_CHECKING
0500 eqcr->busy = 1;
0501 #endif
0502 dpaa_zero(eqcr->cursor);
0503 return eqcr->cursor;
0504 }
0505
0506 static inline void eqcr_commit_checks(struct qm_eqcr *eqcr)
0507 {
0508 DPAA_ASSERT(eqcr->busy);
0509 DPAA_ASSERT(!(be32_to_cpu(eqcr->cursor->fqid) & ~QM_FQID_MASK));
0510 DPAA_ASSERT(eqcr->available >= 1);
0511 }
0512
0513 static inline void qm_eqcr_pvb_commit(struct qm_portal *portal, u8 myverb)
0514 {
0515 struct qm_eqcr *eqcr = &portal->eqcr;
0516 struct qm_eqcr_entry *eqcursor;
0517
0518 eqcr_commit_checks(eqcr);
0519 DPAA_ASSERT(eqcr->pmode == qm_eqcr_pvb);
0520 dma_wmb();
0521 eqcursor = eqcr->cursor;
0522 eqcursor->_ncw_verb = myverb | eqcr->vbit;
0523 dpaa_flush(eqcursor);
0524 eqcr_inc(eqcr);
0525 eqcr->available--;
0526 #ifdef CONFIG_FSL_DPAA_CHECKING
0527 eqcr->busy = 0;
0528 #endif
0529 }
0530
0531 static inline void qm_eqcr_cce_prefetch(struct qm_portal *portal)
0532 {
0533 qm_cl_touch_ro(portal, QM_CL_EQCR_CI_CENA);
0534 }
0535
0536 static inline u8 qm_eqcr_cce_update(struct qm_portal *portal)
0537 {
0538 struct qm_eqcr *eqcr = &portal->eqcr;
0539 u8 diff, old_ci = eqcr->ci;
0540
0541 eqcr->ci = qm_ce_in(portal, QM_CL_EQCR_CI_CENA) & (QM_EQCR_SIZE - 1);
0542 qm_cl_invalidate(portal, QM_CL_EQCR_CI_CENA);
0543 diff = dpaa_cyc_diff(QM_EQCR_SIZE, old_ci, eqcr->ci);
0544 eqcr->available += diff;
0545 return diff;
0546 }
0547
0548 static inline void qm_eqcr_set_ithresh(struct qm_portal *portal, u8 ithresh)
0549 {
0550 struct qm_eqcr *eqcr = &portal->eqcr;
0551
0552 eqcr->ithresh = ithresh;
0553 qm_out(portal, QM_REG_EQCR_ITR, ithresh);
0554 }
0555
0556 static inline u8 qm_eqcr_get_avail(struct qm_portal *portal)
0557 {
0558 struct qm_eqcr *eqcr = &portal->eqcr;
0559
0560 return eqcr->available;
0561 }
0562
0563 static inline u8 qm_eqcr_get_fill(struct qm_portal *portal)
0564 {
0565 struct qm_eqcr *eqcr = &portal->eqcr;
0566
0567 return QM_EQCR_SIZE - 1 - eqcr->available;
0568 }
0569
0570
0571
0572 #define DQRR_SHIFT ilog2(sizeof(struct qm_dqrr_entry))
0573 #define DQRR_CARRY (uintptr_t)(QM_DQRR_SIZE << DQRR_SHIFT)
0574
0575 static const struct qm_dqrr_entry *dqrr_carryclear(
0576 const struct qm_dqrr_entry *p)
0577 {
0578 uintptr_t addr = (uintptr_t)p;
0579
0580 addr &= ~DQRR_CARRY;
0581
0582 return (const struct qm_dqrr_entry *)addr;
0583 }
0584
0585 static inline int dqrr_ptr2idx(const struct qm_dqrr_entry *e)
0586 {
0587 return ((uintptr_t)e >> DQRR_SHIFT) & (QM_DQRR_SIZE - 1);
0588 }
0589
0590 static const struct qm_dqrr_entry *dqrr_inc(const struct qm_dqrr_entry *e)
0591 {
0592 return dqrr_carryclear(e + 1);
0593 }
0594
0595 static inline void qm_dqrr_set_maxfill(struct qm_portal *portal, u8 mf)
0596 {
0597 qm_out(portal, QM_REG_CFG, (qm_in(portal, QM_REG_CFG) & 0xff0fffff) |
0598 ((mf & (QM_DQRR_SIZE - 1)) << 20));
0599 }
0600
0601 static inline int qm_dqrr_init(struct qm_portal *portal,
0602 const struct qm_portal_config *config,
0603 enum qm_dqrr_dmode dmode,
0604 enum qm_dqrr_pmode pmode,
0605 enum qm_dqrr_cmode cmode, u8 max_fill)
0606 {
0607 struct qm_dqrr *dqrr = &portal->dqrr;
0608 u32 cfg;
0609
0610
0611 qm_out(portal, QM_REG_DQRR_SDQCR, 0);
0612 qm_out(portal, QM_REG_DQRR_VDQCR, 0);
0613 qm_out(portal, QM_REG_DQRR_PDQCR, 0);
0614 dqrr->ring = portal->addr.ce + QM_CL_DQRR;
0615 dqrr->pi = qm_in(portal, QM_REG_DQRR_PI_CINH) & (QM_DQRR_SIZE - 1);
0616 dqrr->ci = qm_in(portal, QM_REG_DQRR_CI_CINH) & (QM_DQRR_SIZE - 1);
0617 dqrr->cursor = dqrr->ring + dqrr->ci;
0618 dqrr->fill = dpaa_cyc_diff(QM_DQRR_SIZE, dqrr->ci, dqrr->pi);
0619 dqrr->vbit = (qm_in(portal, QM_REG_DQRR_PI_CINH) & QM_DQRR_SIZE) ?
0620 QM_DQRR_VERB_VBIT : 0;
0621 dqrr->ithresh = qm_in(portal, QM_REG_DQRR_ITR);
0622 #ifdef CONFIG_FSL_DPAA_CHECKING
0623 dqrr->dmode = dmode;
0624 dqrr->pmode = pmode;
0625 dqrr->cmode = cmode;
0626 #endif
0627
0628 for (cfg = 0; cfg < QM_DQRR_SIZE; cfg++)
0629 dpaa_invalidate(qm_cl(dqrr->ring, cfg));
0630 cfg = (qm_in(portal, QM_REG_CFG) & 0xff000f00) |
0631 ((max_fill & (QM_DQRR_SIZE - 1)) << 20) |
0632 ((dmode & 1) << 18) |
0633 ((cmode & 3) << 16) |
0634 0xa0 |
0635 (0 ? 0x40 : 0) |
0636 (0 ? 0x10 : 0);
0637 qm_out(portal, QM_REG_CFG, cfg);
0638 qm_dqrr_set_maxfill(portal, max_fill);
0639 return 0;
0640 }
0641
0642 static inline void qm_dqrr_finish(struct qm_portal *portal)
0643 {
0644 #ifdef CONFIG_FSL_DPAA_CHECKING
0645 struct qm_dqrr *dqrr = &portal->dqrr;
0646
0647 if (dqrr->cmode != qm_dqrr_cdc &&
0648 dqrr->ci != dqrr_ptr2idx(dqrr->cursor))
0649 pr_crit("Ignoring completed DQRR entries\n");
0650 #endif
0651 }
0652
0653 static inline const struct qm_dqrr_entry *qm_dqrr_current(
0654 struct qm_portal *portal)
0655 {
0656 struct qm_dqrr *dqrr = &portal->dqrr;
0657
0658 if (!dqrr->fill)
0659 return NULL;
0660 return dqrr->cursor;
0661 }
0662
0663 static inline u8 qm_dqrr_next(struct qm_portal *portal)
0664 {
0665 struct qm_dqrr *dqrr = &portal->dqrr;
0666
0667 DPAA_ASSERT(dqrr->fill);
0668 dqrr->cursor = dqrr_inc(dqrr->cursor);
0669 return --dqrr->fill;
0670 }
0671
0672 static inline void qm_dqrr_pvb_update(struct qm_portal *portal)
0673 {
0674 struct qm_dqrr *dqrr = &portal->dqrr;
0675 struct qm_dqrr_entry *res = qm_cl(dqrr->ring, dqrr->pi);
0676
0677 DPAA_ASSERT(dqrr->pmode == qm_dqrr_pvb);
0678 #ifndef CONFIG_FSL_PAMU
0679
0680
0681
0682
0683 dpaa_invalidate_touch_ro(res);
0684 #endif
0685 if ((res->verb & QM_DQRR_VERB_VBIT) == dqrr->vbit) {
0686 dqrr->pi = (dqrr->pi + 1) & (QM_DQRR_SIZE - 1);
0687 if (!dqrr->pi)
0688 dqrr->vbit ^= QM_DQRR_VERB_VBIT;
0689 dqrr->fill++;
0690 }
0691 }
0692
0693 static inline void qm_dqrr_cdc_consume_1ptr(struct qm_portal *portal,
0694 const struct qm_dqrr_entry *dq,
0695 int park)
0696 {
0697 __maybe_unused struct qm_dqrr *dqrr = &portal->dqrr;
0698 int idx = dqrr_ptr2idx(dq);
0699
0700 DPAA_ASSERT(dqrr->cmode == qm_dqrr_cdc);
0701 DPAA_ASSERT((dqrr->ring + idx) == dq);
0702 DPAA_ASSERT(idx < QM_DQRR_SIZE);
0703 qm_out(portal, QM_REG_DQRR_DCAP, (0 << 8) |
0704 ((park ? 1 : 0) << 6) |
0705 idx);
0706 }
0707
0708 static inline void qm_dqrr_cdc_consume_n(struct qm_portal *portal, u32 bitmask)
0709 {
0710 __maybe_unused struct qm_dqrr *dqrr = &portal->dqrr;
0711
0712 DPAA_ASSERT(dqrr->cmode == qm_dqrr_cdc);
0713 qm_out(portal, QM_REG_DQRR_DCAP, (1 << 8) |
0714 (bitmask << 16));
0715 }
0716
0717 static inline void qm_dqrr_sdqcr_set(struct qm_portal *portal, u32 sdqcr)
0718 {
0719 qm_out(portal, QM_REG_DQRR_SDQCR, sdqcr);
0720 }
0721
0722 static inline void qm_dqrr_vdqcr_set(struct qm_portal *portal, u32 vdqcr)
0723 {
0724 qm_out(portal, QM_REG_DQRR_VDQCR, vdqcr);
0725 }
0726
0727 static inline int qm_dqrr_set_ithresh(struct qm_portal *portal, u8 ithresh)
0728 {
0729
0730 if (ithresh > QMAN_DQRR_IT_MAX)
0731 return -EINVAL;
0732
0733 qm_out(portal, QM_REG_DQRR_ITR, ithresh);
0734
0735 return 0;
0736 }
0737
0738
0739
0740 #define MR_SHIFT ilog2(sizeof(union qm_mr_entry))
0741 #define MR_CARRY (uintptr_t)(QM_MR_SIZE << MR_SHIFT)
0742
0743 static union qm_mr_entry *mr_carryclear(union qm_mr_entry *p)
0744 {
0745 uintptr_t addr = (uintptr_t)p;
0746
0747 addr &= ~MR_CARRY;
0748
0749 return (union qm_mr_entry *)addr;
0750 }
0751
0752 static inline int mr_ptr2idx(const union qm_mr_entry *e)
0753 {
0754 return ((uintptr_t)e >> MR_SHIFT) & (QM_MR_SIZE - 1);
0755 }
0756
0757 static inline union qm_mr_entry *mr_inc(union qm_mr_entry *e)
0758 {
0759 return mr_carryclear(e + 1);
0760 }
0761
0762 static inline int qm_mr_init(struct qm_portal *portal, enum qm_mr_pmode pmode,
0763 enum qm_mr_cmode cmode)
0764 {
0765 struct qm_mr *mr = &portal->mr;
0766 u32 cfg;
0767
0768 mr->ring = portal->addr.ce + QM_CL_MR;
0769 mr->pi = qm_in(portal, QM_REG_MR_PI_CINH) & (QM_MR_SIZE - 1);
0770 mr->ci = qm_in(portal, QM_REG_MR_CI_CINH) & (QM_MR_SIZE - 1);
0771 mr->cursor = mr->ring + mr->ci;
0772 mr->fill = dpaa_cyc_diff(QM_MR_SIZE, mr->ci, mr->pi);
0773 mr->vbit = (qm_in(portal, QM_REG_MR_PI_CINH) & QM_MR_SIZE)
0774 ? QM_MR_VERB_VBIT : 0;
0775 mr->ithresh = qm_in(portal, QM_REG_MR_ITR);
0776 #ifdef CONFIG_FSL_DPAA_CHECKING
0777 mr->pmode = pmode;
0778 mr->cmode = cmode;
0779 #endif
0780 cfg = (qm_in(portal, QM_REG_CFG) & 0xfffff0ff) |
0781 ((cmode & 1) << 8);
0782 qm_out(portal, QM_REG_CFG, cfg);
0783 return 0;
0784 }
0785
0786 static inline void qm_mr_finish(struct qm_portal *portal)
0787 {
0788 struct qm_mr *mr = &portal->mr;
0789
0790 if (mr->ci != mr_ptr2idx(mr->cursor))
0791 pr_crit("Ignoring completed MR entries\n");
0792 }
0793
0794 static inline const union qm_mr_entry *qm_mr_current(struct qm_portal *portal)
0795 {
0796 struct qm_mr *mr = &portal->mr;
0797
0798 if (!mr->fill)
0799 return NULL;
0800 return mr->cursor;
0801 }
0802
0803 static inline int qm_mr_next(struct qm_portal *portal)
0804 {
0805 struct qm_mr *mr = &portal->mr;
0806
0807 DPAA_ASSERT(mr->fill);
0808 mr->cursor = mr_inc(mr->cursor);
0809 return --mr->fill;
0810 }
0811
0812 static inline void qm_mr_pvb_update(struct qm_portal *portal)
0813 {
0814 struct qm_mr *mr = &portal->mr;
0815 union qm_mr_entry *res = qm_cl(mr->ring, mr->pi);
0816
0817 DPAA_ASSERT(mr->pmode == qm_mr_pvb);
0818
0819 if ((res->verb & QM_MR_VERB_VBIT) == mr->vbit) {
0820 mr->pi = (mr->pi + 1) & (QM_MR_SIZE - 1);
0821 if (!mr->pi)
0822 mr->vbit ^= QM_MR_VERB_VBIT;
0823 mr->fill++;
0824 res = mr_inc(res);
0825 }
0826 dpaa_invalidate_touch_ro(res);
0827 }
0828
0829 static inline void qm_mr_cci_consume(struct qm_portal *portal, u8 num)
0830 {
0831 struct qm_mr *mr = &portal->mr;
0832
0833 DPAA_ASSERT(mr->cmode == qm_mr_cci);
0834 mr->ci = (mr->ci + num) & (QM_MR_SIZE - 1);
0835 qm_out(portal, QM_REG_MR_CI_CINH, mr->ci);
0836 }
0837
0838 static inline void qm_mr_cci_consume_to_current(struct qm_portal *portal)
0839 {
0840 struct qm_mr *mr = &portal->mr;
0841
0842 DPAA_ASSERT(mr->cmode == qm_mr_cci);
0843 mr->ci = mr_ptr2idx(mr->cursor);
0844 qm_out(portal, QM_REG_MR_CI_CINH, mr->ci);
0845 }
0846
0847 static inline void qm_mr_set_ithresh(struct qm_portal *portal, u8 ithresh)
0848 {
0849 qm_out(portal, QM_REG_MR_ITR, ithresh);
0850 }
0851
0852
0853
0854 static inline int qm_mc_init(struct qm_portal *portal)
0855 {
0856 u8 rr0, rr1;
0857 struct qm_mc *mc = &portal->mc;
0858
0859 mc->cr = portal->addr.ce + QM_CL_CR;
0860 mc->rr = portal->addr.ce + QM_CL_RR0;
0861
0862
0863
0864
0865
0866
0867
0868 rr0 = mc->rr->verb;
0869 rr1 = (mc->rr+1)->verb;
0870 if ((rr0 == 0 && rr1 == 0) || rr0 != 0)
0871 mc->rridx = 1;
0872 else
0873 mc->rridx = 0;
0874 mc->vbit = mc->rridx ? QM_MCC_VERB_VBIT : 0;
0875 #ifdef CONFIG_FSL_DPAA_CHECKING
0876 mc->state = qman_mc_idle;
0877 #endif
0878 return 0;
0879 }
0880
0881 static inline void qm_mc_finish(struct qm_portal *portal)
0882 {
0883 #ifdef CONFIG_FSL_DPAA_CHECKING
0884 struct qm_mc *mc = &portal->mc;
0885
0886 DPAA_ASSERT(mc->state == qman_mc_idle);
0887 if (mc->state != qman_mc_idle)
0888 pr_crit("Losing incomplete MC command\n");
0889 #endif
0890 }
0891
0892 static inline union qm_mc_command *qm_mc_start(struct qm_portal *portal)
0893 {
0894 struct qm_mc *mc = &portal->mc;
0895
0896 DPAA_ASSERT(mc->state == qman_mc_idle);
0897 #ifdef CONFIG_FSL_DPAA_CHECKING
0898 mc->state = qman_mc_user;
0899 #endif
0900 dpaa_zero(mc->cr);
0901 return mc->cr;
0902 }
0903
0904 static inline void qm_mc_commit(struct qm_portal *portal, u8 myverb)
0905 {
0906 struct qm_mc *mc = &portal->mc;
0907 union qm_mc_result *rr = mc->rr + mc->rridx;
0908
0909 DPAA_ASSERT(mc->state == qman_mc_user);
0910 dma_wmb();
0911 mc->cr->_ncw_verb = myverb | mc->vbit;
0912 dpaa_flush(mc->cr);
0913 dpaa_invalidate_touch_ro(rr);
0914 #ifdef CONFIG_FSL_DPAA_CHECKING
0915 mc->state = qman_mc_hw;
0916 #endif
0917 }
0918
0919 static inline union qm_mc_result *qm_mc_result(struct qm_portal *portal)
0920 {
0921 struct qm_mc *mc = &portal->mc;
0922 union qm_mc_result *rr = mc->rr + mc->rridx;
0923
0924 DPAA_ASSERT(mc->state == qman_mc_hw);
0925
0926
0927
0928
0929
0930 if (!rr->verb) {
0931 dpaa_invalidate_touch_ro(rr);
0932 return NULL;
0933 }
0934 mc->rridx ^= 1;
0935 mc->vbit ^= QM_MCC_VERB_VBIT;
0936 #ifdef CONFIG_FSL_DPAA_CHECKING
0937 mc->state = qman_mc_idle;
0938 #endif
0939 return rr;
0940 }
0941
0942 static inline int qm_mc_result_timeout(struct qm_portal *portal,
0943 union qm_mc_result **mcr)
0944 {
0945 int timeout = QM_MCR_TIMEOUT;
0946
0947 do {
0948 *mcr = qm_mc_result(portal);
0949 if (*mcr)
0950 break;
0951 udelay(1);
0952 } while (--timeout);
0953
0954 return timeout;
0955 }
0956
0957 static inline void fq_set(struct qman_fq *fq, u32 mask)
0958 {
0959 fq->flags |= mask;
0960 }
0961
0962 static inline void fq_clear(struct qman_fq *fq, u32 mask)
0963 {
0964 fq->flags &= ~mask;
0965 }
0966
0967 static inline int fq_isset(struct qman_fq *fq, u32 mask)
0968 {
0969 return fq->flags & mask;
0970 }
0971
0972 static inline int fq_isclear(struct qman_fq *fq, u32 mask)
0973 {
0974 return !(fq->flags & mask);
0975 }
0976
0977 struct qman_portal {
0978 struct qm_portal p;
0979
0980 unsigned long bits;
0981
0982 unsigned long irq_sources;
0983 u32 use_eqcr_ci_stashing;
0984
0985 struct qman_fq *vdqcr_owned;
0986 u32 sdqcr;
0987
0988 const struct qm_portal_config *config;
0989
0990 struct qman_cgrs *cgrs;
0991
0992 struct list_head cgr_cbs;
0993
0994 spinlock_t cgr_lock;
0995 struct work_struct congestion_work;
0996 struct work_struct mr_work;
0997 char irqname[MAX_IRQNAME];
0998 };
0999
1000 static cpumask_t affine_mask;
1001 static DEFINE_SPINLOCK(affine_mask_lock);
1002 static u16 affine_channels[NR_CPUS];
1003 static DEFINE_PER_CPU(struct qman_portal, qman_affine_portal);
1004 struct qman_portal *affine_portals[NR_CPUS];
1005
1006 static inline struct qman_portal *get_affine_portal(void)
1007 {
1008 return &get_cpu_var(qman_affine_portal);
1009 }
1010
1011 static inline void put_affine_portal(void)
1012 {
1013 put_cpu_var(qman_affine_portal);
1014 }
1015
1016
1017 static inline struct qman_portal *get_portal_for_channel(u16 channel)
1018 {
1019 int i;
1020
1021 for (i = 0; i < num_possible_cpus(); i++) {
1022 if (affine_portals[i] &&
1023 affine_portals[i]->config->channel == channel)
1024 return affine_portals[i];
1025 }
1026
1027 return NULL;
1028 }
1029
1030 static struct workqueue_struct *qm_portal_wq;
1031
1032 int qman_dqrr_set_ithresh(struct qman_portal *portal, u8 ithresh)
1033 {
1034 int res;
1035
1036 if (!portal)
1037 return -EINVAL;
1038
1039 res = qm_dqrr_set_ithresh(&portal->p, ithresh);
1040 if (res)
1041 return res;
1042
1043 portal->p.dqrr.ithresh = ithresh;
1044
1045 return 0;
1046 }
1047 EXPORT_SYMBOL(qman_dqrr_set_ithresh);
1048
1049 void qman_dqrr_get_ithresh(struct qman_portal *portal, u8 *ithresh)
1050 {
1051 if (portal && ithresh)
1052 *ithresh = qm_in(&portal->p, QM_REG_DQRR_ITR);
1053 }
1054 EXPORT_SYMBOL(qman_dqrr_get_ithresh);
1055
1056 void qman_portal_get_iperiod(struct qman_portal *portal, u32 *iperiod)
1057 {
1058 if (portal && iperiod)
1059 *iperiod = qm_in(&portal->p, QM_REG_ITPR);
1060 }
1061 EXPORT_SYMBOL(qman_portal_get_iperiod);
1062
1063 int qman_portal_set_iperiod(struct qman_portal *portal, u32 iperiod)
1064 {
1065 if (!portal || iperiod > QMAN_ITP_MAX)
1066 return -EINVAL;
1067
1068 qm_out(&portal->p, QM_REG_ITPR, iperiod);
1069
1070 return 0;
1071 }
1072 EXPORT_SYMBOL(qman_portal_set_iperiod);
1073
1074 int qman_wq_alloc(void)
1075 {
1076 qm_portal_wq = alloc_workqueue("qman_portal_wq", 0, 1);
1077 if (!qm_portal_wq)
1078 return -ENOMEM;
1079 return 0;
1080 }
1081
1082
1083 void qman_enable_irqs(void)
1084 {
1085 int i;
1086
1087 for (i = 0; i < num_possible_cpus(); i++) {
1088 if (affine_portals[i]) {
1089 qm_out(&affine_portals[i]->p, QM_REG_ISR, 0xffffffff);
1090 qm_out(&affine_portals[i]->p, QM_REG_IIR, 0);
1091 }
1092
1093 }
1094 }
1095
1096
1097
1098
1099
1100 static DECLARE_WAIT_QUEUE_HEAD(affine_queue);
1101
1102 static struct qman_fq **fq_table;
1103 static u32 num_fqids;
1104
1105 int qman_alloc_fq_table(u32 _num_fqids)
1106 {
1107 num_fqids = _num_fqids;
1108
1109 fq_table = vzalloc(array3_size(sizeof(struct qman_fq *),
1110 num_fqids, 2));
1111 if (!fq_table)
1112 return -ENOMEM;
1113
1114 pr_debug("Allocated fq lookup table at %p, entry count %u\n",
1115 fq_table, num_fqids * 2);
1116 return 0;
1117 }
1118
1119 static struct qman_fq *idx_to_fq(u32 idx)
1120 {
1121 struct qman_fq *fq;
1122
1123 #ifdef CONFIG_FSL_DPAA_CHECKING
1124 if (WARN_ON(idx >= num_fqids * 2))
1125 return NULL;
1126 #endif
1127 fq = fq_table[idx];
1128 DPAA_ASSERT(!fq || idx == fq->idx);
1129
1130 return fq;
1131 }
1132
1133
1134
1135
1136
1137 static struct qman_fq *fqid_to_fq(u32 fqid)
1138 {
1139 return idx_to_fq(fqid * 2);
1140 }
1141
1142 static struct qman_fq *tag_to_fq(u32 tag)
1143 {
1144 #if BITS_PER_LONG == 64
1145 return idx_to_fq(tag);
1146 #else
1147 return (struct qman_fq *)tag;
1148 #endif
1149 }
1150
1151 static u32 fq_to_tag(struct qman_fq *fq)
1152 {
1153 #if BITS_PER_LONG == 64
1154 return fq->idx;
1155 #else
1156 return (u32)fq;
1157 #endif
1158 }
1159
1160 static u32 __poll_portal_slow(struct qman_portal *p, u32 is);
1161 static inline unsigned int __poll_portal_fast(struct qman_portal *p,
1162 unsigned int poll_limit, bool sched_napi);
1163 static void qm_congestion_task(struct work_struct *work);
1164 static void qm_mr_process_task(struct work_struct *work);
1165
1166 static irqreturn_t portal_isr(int irq, void *ptr)
1167 {
1168 struct qman_portal *p = ptr;
1169 u32 is = qm_in(&p->p, QM_REG_ISR) & p->irq_sources;
1170 u32 clear = 0;
1171
1172 if (unlikely(!is))
1173 return IRQ_NONE;
1174
1175
1176 if (is & QM_PIRQ_DQRI) {
1177 __poll_portal_fast(p, QMAN_POLL_LIMIT, true);
1178 clear = QM_DQAVAIL_MASK | QM_PIRQ_DQRI;
1179 }
1180
1181 clear |= __poll_portal_slow(p, is) & QM_PIRQ_SLOW;
1182 qm_out(&p->p, QM_REG_ISR, clear);
1183 return IRQ_HANDLED;
1184 }
1185
1186 static int drain_mr_fqrni(struct qm_portal *p)
1187 {
1188 const union qm_mr_entry *msg;
1189 loop:
1190 qm_mr_pvb_update(p);
1191 msg = qm_mr_current(p);
1192 if (!msg) {
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207 mdelay(1);
1208 qm_mr_pvb_update(p);
1209 msg = qm_mr_current(p);
1210 if (!msg)
1211 return 0;
1212 }
1213 if ((msg->verb & QM_MR_VERB_TYPE_MASK) != QM_MR_VERB_FQRNI) {
1214
1215 pr_err("Found verb 0x%x in MR\n", msg->verb);
1216 return -1;
1217 }
1218 qm_mr_next(p);
1219 qm_mr_cci_consume(p, 1);
1220 goto loop;
1221 }
1222
1223 static int qman_create_portal(struct qman_portal *portal,
1224 const struct qm_portal_config *c,
1225 const struct qman_cgrs *cgrs)
1226 {
1227 struct qm_portal *p;
1228 int ret;
1229 u32 isdr;
1230
1231 p = &portal->p;
1232
1233 #ifdef CONFIG_FSL_PAMU
1234
1235 portal->use_eqcr_ci_stashing = ((qman_ip_rev >= QMAN_REV30) ? 1 : 0);
1236 #else
1237 portal->use_eqcr_ci_stashing = 0;
1238 #endif
1239
1240
1241
1242
1243
1244 p->addr.ce = c->addr_virt_ce;
1245 p->addr.ce_be = c->addr_virt_ce;
1246 p->addr.ci = c->addr_virt_ci;
1247
1248
1249
1250
1251 if (qm_eqcr_init(p, qm_eqcr_pvb,
1252 portal->use_eqcr_ci_stashing ? 3 : 0, 1)) {
1253 dev_err(c->dev, "EQCR initialisation failed\n");
1254 goto fail_eqcr;
1255 }
1256 if (qm_dqrr_init(p, c, qm_dqrr_dpush, qm_dqrr_pvb,
1257 qm_dqrr_cdc, DQRR_MAXFILL)) {
1258 dev_err(c->dev, "DQRR initialisation failed\n");
1259 goto fail_dqrr;
1260 }
1261 if (qm_mr_init(p, qm_mr_pvb, qm_mr_cci)) {
1262 dev_err(c->dev, "MR initialisation failed\n");
1263 goto fail_mr;
1264 }
1265 if (qm_mc_init(p)) {
1266 dev_err(c->dev, "MC initialisation failed\n");
1267 goto fail_mc;
1268 }
1269
1270 qm_dqrr_set_ithresh(p, QMAN_PIRQ_DQRR_ITHRESH);
1271 qm_mr_set_ithresh(p, QMAN_PIRQ_MR_ITHRESH);
1272 qm_out(p, QM_REG_ITPR, QMAN_PIRQ_IPERIOD);
1273 portal->cgrs = kmalloc_array(2, sizeof(*cgrs), GFP_KERNEL);
1274 if (!portal->cgrs)
1275 goto fail_cgrs;
1276
1277 qman_cgrs_init(&portal->cgrs[1]);
1278 if (cgrs)
1279 portal->cgrs[0] = *cgrs;
1280 else
1281
1282 qman_cgrs_fill(&portal->cgrs[0]);
1283 INIT_LIST_HEAD(&portal->cgr_cbs);
1284 spin_lock_init(&portal->cgr_lock);
1285 INIT_WORK(&portal->congestion_work, qm_congestion_task);
1286 INIT_WORK(&portal->mr_work, qm_mr_process_task);
1287 portal->bits = 0;
1288 portal->sdqcr = QM_SDQCR_SOURCE_CHANNELS | QM_SDQCR_COUNT_UPTO3 |
1289 QM_SDQCR_DEDICATED_PRECEDENCE | QM_SDQCR_TYPE_PRIO_QOS |
1290 QM_SDQCR_TOKEN_SET(0xab) | QM_SDQCR_CHANNELS_DEDICATED;
1291 isdr = 0xffffffff;
1292 qm_out(p, QM_REG_ISDR, isdr);
1293 portal->irq_sources = 0;
1294 qm_out(p, QM_REG_IER, 0);
1295 snprintf(portal->irqname, MAX_IRQNAME, IRQNAME, c->cpu);
1296 qm_out(p, QM_REG_IIR, 1);
1297 if (request_irq(c->irq, portal_isr, 0, portal->irqname, portal)) {
1298 dev_err(c->dev, "request_irq() failed\n");
1299 goto fail_irq;
1300 }
1301
1302 if (dpaa_set_portal_irq_affinity(c->dev, c->irq, c->cpu))
1303 goto fail_affinity;
1304
1305
1306 isdr &= ~QM_PIRQ_EQCI;
1307 qm_out(p, QM_REG_ISDR, isdr);
1308 ret = qm_eqcr_get_fill(p);
1309 if (ret) {
1310 dev_err(c->dev, "EQCR unclean\n");
1311 goto fail_eqcr_empty;
1312 }
1313 isdr &= ~(QM_PIRQ_DQRI | QM_PIRQ_MRI);
1314 qm_out(p, QM_REG_ISDR, isdr);
1315 if (qm_dqrr_current(p)) {
1316 dev_dbg(c->dev, "DQRR unclean\n");
1317 qm_dqrr_cdc_consume_n(p, 0xffff);
1318 }
1319 if (qm_mr_current(p) && drain_mr_fqrni(p)) {
1320
1321 const union qm_mr_entry *e = qm_mr_current(p);
1322
1323 dev_err(c->dev, "MR dirty, VB 0x%x, rc 0x%x, addr 0x%llx\n",
1324 e->verb, e->ern.rc, qm_fd_addr_get64(&e->ern.fd));
1325 goto fail_dqrr_mr_empty;
1326 }
1327
1328 portal->config = c;
1329 qm_out(p, QM_REG_ISR, 0xffffffff);
1330 qm_out(p, QM_REG_ISDR, 0);
1331 if (!qman_requires_cleanup())
1332 qm_out(p, QM_REG_IIR, 0);
1333
1334 qm_dqrr_sdqcr_set(p, portal->sdqcr);
1335 return 0;
1336
1337 fail_dqrr_mr_empty:
1338 fail_eqcr_empty:
1339 fail_affinity:
1340 free_irq(c->irq, portal);
1341 fail_irq:
1342 kfree(portal->cgrs);
1343 fail_cgrs:
1344 qm_mc_finish(p);
1345 fail_mc:
1346 qm_mr_finish(p);
1347 fail_mr:
1348 qm_dqrr_finish(p);
1349 fail_dqrr:
1350 qm_eqcr_finish(p);
1351 fail_eqcr:
1352 return -EIO;
1353 }
1354
1355 struct qman_portal *qman_create_affine_portal(const struct qm_portal_config *c,
1356 const struct qman_cgrs *cgrs)
1357 {
1358 struct qman_portal *portal;
1359 int err;
1360
1361 portal = &per_cpu(qman_affine_portal, c->cpu);
1362 err = qman_create_portal(portal, c, cgrs);
1363 if (err)
1364 return NULL;
1365
1366 spin_lock(&affine_mask_lock);
1367 cpumask_set_cpu(c->cpu, &affine_mask);
1368 affine_channels[c->cpu] = c->channel;
1369 affine_portals[c->cpu] = portal;
1370 spin_unlock(&affine_mask_lock);
1371
1372 return portal;
1373 }
1374
1375 static void qman_destroy_portal(struct qman_portal *qm)
1376 {
1377 const struct qm_portal_config *pcfg;
1378
1379
1380 qm_dqrr_sdqcr_set(&qm->p, 0);
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391 qm_eqcr_cce_update(&qm->p);
1392 qm_eqcr_cce_update(&qm->p);
1393 pcfg = qm->config;
1394
1395 free_irq(pcfg->irq, qm);
1396
1397 kfree(qm->cgrs);
1398 qm_mc_finish(&qm->p);
1399 qm_mr_finish(&qm->p);
1400 qm_dqrr_finish(&qm->p);
1401 qm_eqcr_finish(&qm->p);
1402
1403 qm->config = NULL;
1404 }
1405
1406 const struct qm_portal_config *qman_destroy_affine_portal(void)
1407 {
1408 struct qman_portal *qm = get_affine_portal();
1409 const struct qm_portal_config *pcfg;
1410 int cpu;
1411
1412 pcfg = qm->config;
1413 cpu = pcfg->cpu;
1414
1415 qman_destroy_portal(qm);
1416
1417 spin_lock(&affine_mask_lock);
1418 cpumask_clear_cpu(cpu, &affine_mask);
1419 spin_unlock(&affine_mask_lock);
1420 put_affine_portal();
1421 return pcfg;
1422 }
1423
1424
1425 static inline void fq_state_change(struct qman_portal *p, struct qman_fq *fq,
1426 const union qm_mr_entry *msg, u8 verb)
1427 {
1428 switch (verb) {
1429 case QM_MR_VERB_FQRL:
1430 DPAA_ASSERT(fq_isset(fq, QMAN_FQ_STATE_ORL));
1431 fq_clear(fq, QMAN_FQ_STATE_ORL);
1432 break;
1433 case QM_MR_VERB_FQRN:
1434 DPAA_ASSERT(fq->state == qman_fq_state_parked ||
1435 fq->state == qman_fq_state_sched);
1436 DPAA_ASSERT(fq_isset(fq, QMAN_FQ_STATE_CHANGING));
1437 fq_clear(fq, QMAN_FQ_STATE_CHANGING);
1438 if (msg->fq.fqs & QM_MR_FQS_NOTEMPTY)
1439 fq_set(fq, QMAN_FQ_STATE_NE);
1440 if (msg->fq.fqs & QM_MR_FQS_ORLPRESENT)
1441 fq_set(fq, QMAN_FQ_STATE_ORL);
1442 fq->state = qman_fq_state_retired;
1443 break;
1444 case QM_MR_VERB_FQPN:
1445 DPAA_ASSERT(fq->state == qman_fq_state_sched);
1446 DPAA_ASSERT(fq_isclear(fq, QMAN_FQ_STATE_CHANGING));
1447 fq->state = qman_fq_state_parked;
1448 }
1449 }
1450
1451 static void qm_congestion_task(struct work_struct *work)
1452 {
1453 struct qman_portal *p = container_of(work, struct qman_portal,
1454 congestion_work);
1455 struct qman_cgrs rr, c;
1456 union qm_mc_result *mcr;
1457 struct qman_cgr *cgr;
1458
1459 spin_lock(&p->cgr_lock);
1460 qm_mc_start(&p->p);
1461 qm_mc_commit(&p->p, QM_MCC_VERB_QUERYCONGESTION);
1462 if (!qm_mc_result_timeout(&p->p, &mcr)) {
1463 spin_unlock(&p->cgr_lock);
1464 dev_crit(p->config->dev, "QUERYCONGESTION timeout\n");
1465 qman_p_irqsource_add(p, QM_PIRQ_CSCI);
1466 return;
1467 }
1468
1469 qman_cgrs_and(&rr, (struct qman_cgrs *)&mcr->querycongestion.state,
1470 &p->cgrs[0]);
1471
1472 qman_cgrs_xor(&c, &rr, &p->cgrs[1]);
1473
1474 qman_cgrs_cp(&p->cgrs[1], &rr);
1475
1476 list_for_each_entry(cgr, &p->cgr_cbs, node)
1477 if (cgr->cb && qman_cgrs_get(&c, cgr->cgrid))
1478 cgr->cb(p, cgr, qman_cgrs_get(&rr, cgr->cgrid));
1479 spin_unlock(&p->cgr_lock);
1480 qman_p_irqsource_add(p, QM_PIRQ_CSCI);
1481 }
1482
1483 static void qm_mr_process_task(struct work_struct *work)
1484 {
1485 struct qman_portal *p = container_of(work, struct qman_portal,
1486 mr_work);
1487 const union qm_mr_entry *msg;
1488 struct qman_fq *fq;
1489 u8 verb, num = 0;
1490
1491 preempt_disable();
1492
1493 while (1) {
1494 qm_mr_pvb_update(&p->p);
1495 msg = qm_mr_current(&p->p);
1496 if (!msg)
1497 break;
1498
1499 verb = msg->verb & QM_MR_VERB_TYPE_MASK;
1500
1501 if (verb & 0x20) {
1502 switch (verb) {
1503 case QM_MR_VERB_FQRNI:
1504
1505 break;
1506 case QM_MR_VERB_FQRN:
1507 case QM_MR_VERB_FQRL:
1508
1509 fq = fqid_to_fq(qm_fqid_get(&msg->fq));
1510 if (WARN_ON(!fq))
1511 break;
1512 fq_state_change(p, fq, msg, verb);
1513 if (fq->cb.fqs)
1514 fq->cb.fqs(p, fq, msg);
1515 break;
1516 case QM_MR_VERB_FQPN:
1517
1518 fq = tag_to_fq(be32_to_cpu(msg->fq.context_b));
1519 fq_state_change(p, fq, msg, verb);
1520 if (fq->cb.fqs)
1521 fq->cb.fqs(p, fq, msg);
1522 break;
1523 case QM_MR_VERB_DC_ERN:
1524
1525 pr_crit_once("Leaking DCP ERNs!\n");
1526 break;
1527 default:
1528 pr_crit("Invalid MR verb 0x%02x\n", verb);
1529 }
1530 } else {
1531
1532 fq = tag_to_fq(be32_to_cpu(msg->ern.tag));
1533 fq->cb.ern(p, fq, msg);
1534 }
1535 num++;
1536 qm_mr_next(&p->p);
1537 }
1538
1539 qm_mr_cci_consume(&p->p, num);
1540 qman_p_irqsource_add(p, QM_PIRQ_MRI);
1541 preempt_enable();
1542 }
1543
1544 static u32 __poll_portal_slow(struct qman_portal *p, u32 is)
1545 {
1546 if (is & QM_PIRQ_CSCI) {
1547 qman_p_irqsource_remove(p, QM_PIRQ_CSCI);
1548 queue_work_on(smp_processor_id(), qm_portal_wq,
1549 &p->congestion_work);
1550 }
1551
1552 if (is & QM_PIRQ_EQRI) {
1553 qm_eqcr_cce_update(&p->p);
1554 qm_eqcr_set_ithresh(&p->p, 0);
1555 wake_up(&affine_queue);
1556 }
1557
1558 if (is & QM_PIRQ_MRI) {
1559 qman_p_irqsource_remove(p, QM_PIRQ_MRI);
1560 queue_work_on(smp_processor_id(), qm_portal_wq,
1561 &p->mr_work);
1562 }
1563
1564 return is;
1565 }
1566
1567
1568
1569
1570
1571 static noinline void clear_vdqcr(struct qman_portal *p, struct qman_fq *fq)
1572 {
1573 p->vdqcr_owned = NULL;
1574 fq_clear(fq, QMAN_FQ_STATE_VDQCR);
1575 wake_up(&affine_queue);
1576 }
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604 static inline unsigned int __poll_portal_fast(struct qman_portal *p,
1605 unsigned int poll_limit, bool sched_napi)
1606 {
1607 const struct qm_dqrr_entry *dq;
1608 struct qman_fq *fq;
1609 enum qman_cb_dqrr_result res;
1610 unsigned int limit = 0;
1611
1612 do {
1613 qm_dqrr_pvb_update(&p->p);
1614 dq = qm_dqrr_current(&p->p);
1615 if (!dq)
1616 break;
1617
1618 if (dq->stat & QM_DQRR_STAT_UNSCHEDULED) {
1619
1620
1621
1622
1623
1624 fq = p->vdqcr_owned;
1625
1626
1627
1628
1629
1630
1631 if (dq->stat & QM_DQRR_STAT_FQ_EMPTY)
1632 fq_clear(fq, QMAN_FQ_STATE_NE);
1633
1634
1635
1636
1637
1638
1639 res = fq->cb.dqrr(p, fq, dq, sched_napi);
1640 if (res == qman_cb_dqrr_stop)
1641 break;
1642
1643 if (dq->stat & QM_DQRR_STAT_DQCR_EXPIRED)
1644 clear_vdqcr(p, fq);
1645 } else {
1646
1647 fq = tag_to_fq(be32_to_cpu(dq->context_b));
1648
1649 res = fq->cb.dqrr(p, fq, dq, sched_napi);
1650
1651
1652
1653
1654 if (res == qman_cb_dqrr_stop)
1655 break;
1656 }
1657
1658
1659
1660
1661
1662
1663 DPAA_ASSERT((dq->stat & QM_DQRR_STAT_FQ_HELDACTIVE) ||
1664 (res != qman_cb_dqrr_park));
1665
1666 if (res != qman_cb_dqrr_defer)
1667 qm_dqrr_cdc_consume_1ptr(&p->p, dq,
1668 res == qman_cb_dqrr_park);
1669
1670 qm_dqrr_next(&p->p);
1671
1672
1673
1674
1675
1676
1677 } while (++limit < poll_limit && res != qman_cb_dqrr_consume_stop);
1678
1679 return limit;
1680 }
1681
1682 void qman_p_irqsource_add(struct qman_portal *p, u32 bits)
1683 {
1684 unsigned long irqflags;
1685
1686 local_irq_save(irqflags);
1687 p->irq_sources |= bits & QM_PIRQ_VISIBLE;
1688 qm_out(&p->p, QM_REG_IER, p->irq_sources);
1689 local_irq_restore(irqflags);
1690 }
1691 EXPORT_SYMBOL(qman_p_irqsource_add);
1692
1693 void qman_p_irqsource_remove(struct qman_portal *p, u32 bits)
1694 {
1695 unsigned long irqflags;
1696 u32 ier;
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708 local_irq_save(irqflags);
1709 bits &= QM_PIRQ_VISIBLE;
1710 p->irq_sources &= ~bits;
1711 qm_out(&p->p, QM_REG_IER, p->irq_sources);
1712 ier = qm_in(&p->p, QM_REG_IER);
1713
1714
1715
1716
1717 qm_out(&p->p, QM_REG_ISR, ~ier);
1718 local_irq_restore(irqflags);
1719 }
1720 EXPORT_SYMBOL(qman_p_irqsource_remove);
1721
1722 const cpumask_t *qman_affine_cpus(void)
1723 {
1724 return &affine_mask;
1725 }
1726 EXPORT_SYMBOL(qman_affine_cpus);
1727
1728 u16 qman_affine_channel(int cpu)
1729 {
1730 if (cpu < 0) {
1731 struct qman_portal *portal = get_affine_portal();
1732
1733 cpu = portal->config->cpu;
1734 put_affine_portal();
1735 }
1736 WARN_ON(!cpumask_test_cpu(cpu, &affine_mask));
1737 return affine_channels[cpu];
1738 }
1739 EXPORT_SYMBOL(qman_affine_channel);
1740
1741 struct qman_portal *qman_get_affine_portal(int cpu)
1742 {
1743 return affine_portals[cpu];
1744 }
1745 EXPORT_SYMBOL(qman_get_affine_portal);
1746
1747 int qman_start_using_portal(struct qman_portal *p, struct device *dev)
1748 {
1749 return (!device_link_add(dev, p->config->dev,
1750 DL_FLAG_AUTOREMOVE_CONSUMER)) ? -EINVAL : 0;
1751 }
1752 EXPORT_SYMBOL(qman_start_using_portal);
1753
1754 int qman_p_poll_dqrr(struct qman_portal *p, unsigned int limit)
1755 {
1756 return __poll_portal_fast(p, limit, false);
1757 }
1758 EXPORT_SYMBOL(qman_p_poll_dqrr);
1759
1760 void qman_p_static_dequeue_add(struct qman_portal *p, u32 pools)
1761 {
1762 unsigned long irqflags;
1763
1764 local_irq_save(irqflags);
1765 pools &= p->config->pools;
1766 p->sdqcr |= pools;
1767 qm_dqrr_sdqcr_set(&p->p, p->sdqcr);
1768 local_irq_restore(irqflags);
1769 }
1770 EXPORT_SYMBOL(qman_p_static_dequeue_add);
1771
1772
1773
1774 static const char *mcr_result_str(u8 result)
1775 {
1776 switch (result) {
1777 case QM_MCR_RESULT_NULL:
1778 return "QM_MCR_RESULT_NULL";
1779 case QM_MCR_RESULT_OK:
1780 return "QM_MCR_RESULT_OK";
1781 case QM_MCR_RESULT_ERR_FQID:
1782 return "QM_MCR_RESULT_ERR_FQID";
1783 case QM_MCR_RESULT_ERR_FQSTATE:
1784 return "QM_MCR_RESULT_ERR_FQSTATE";
1785 case QM_MCR_RESULT_ERR_NOTEMPTY:
1786 return "QM_MCR_RESULT_ERR_NOTEMPTY";
1787 case QM_MCR_RESULT_PENDING:
1788 return "QM_MCR_RESULT_PENDING";
1789 case QM_MCR_RESULT_ERR_BADCOMMAND:
1790 return "QM_MCR_RESULT_ERR_BADCOMMAND";
1791 }
1792 return "<unknown MCR result>";
1793 }
1794
1795 int qman_create_fq(u32 fqid, u32 flags, struct qman_fq *fq)
1796 {
1797 if (flags & QMAN_FQ_FLAG_DYNAMIC_FQID) {
1798 int ret = qman_alloc_fqid(&fqid);
1799
1800 if (ret)
1801 return ret;
1802 }
1803 fq->fqid = fqid;
1804 fq->flags = flags;
1805 fq->state = qman_fq_state_oos;
1806 fq->cgr_groupid = 0;
1807
1808
1809 if (fqid == 0 || fqid >= num_fqids) {
1810 WARN(1, "bad fqid %d\n", fqid);
1811 return -EINVAL;
1812 }
1813
1814 fq->idx = fqid * 2;
1815 if (flags & QMAN_FQ_FLAG_NO_MODIFY)
1816 fq->idx++;
1817
1818 WARN_ON(fq_table[fq->idx]);
1819 fq_table[fq->idx] = fq;
1820
1821 return 0;
1822 }
1823 EXPORT_SYMBOL(qman_create_fq);
1824
1825 void qman_destroy_fq(struct qman_fq *fq)
1826 {
1827
1828
1829
1830
1831 switch (fq->state) {
1832 case qman_fq_state_parked:
1833 case qman_fq_state_oos:
1834 if (fq_isset(fq, QMAN_FQ_FLAG_DYNAMIC_FQID))
1835 qman_release_fqid(fq->fqid);
1836
1837 DPAA_ASSERT(fq_table[fq->idx]);
1838 fq_table[fq->idx] = NULL;
1839 return;
1840 default:
1841 break;
1842 }
1843 DPAA_ASSERT(NULL == "qman_free_fq() on unquiesced FQ!");
1844 }
1845 EXPORT_SYMBOL(qman_destroy_fq);
1846
1847 u32 qman_fq_fqid(struct qman_fq *fq)
1848 {
1849 return fq->fqid;
1850 }
1851 EXPORT_SYMBOL(qman_fq_fqid);
1852
1853 int qman_init_fq(struct qman_fq *fq, u32 flags, struct qm_mcc_initfq *opts)
1854 {
1855 union qm_mc_command *mcc;
1856 union qm_mc_result *mcr;
1857 struct qman_portal *p;
1858 u8 res, myverb;
1859 int ret = 0;
1860
1861 myverb = (flags & QMAN_INITFQ_FLAG_SCHED)
1862 ? QM_MCC_VERB_INITFQ_SCHED : QM_MCC_VERB_INITFQ_PARKED;
1863
1864 if (fq->state != qman_fq_state_oos &&
1865 fq->state != qman_fq_state_parked)
1866 return -EINVAL;
1867 #ifdef CONFIG_FSL_DPAA_CHECKING
1868 if (fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY))
1869 return -EINVAL;
1870 #endif
1871 if (opts && (be16_to_cpu(opts->we_mask) & QM_INITFQ_WE_OAC)) {
1872
1873 if (be16_to_cpu(opts->we_mask) & QM_INITFQ_WE_TDTHRESH)
1874 return -EINVAL;
1875 }
1876
1877 p = get_affine_portal();
1878 if (fq_isset(fq, QMAN_FQ_STATE_CHANGING) ||
1879 (fq->state != qman_fq_state_oos &&
1880 fq->state != qman_fq_state_parked)) {
1881 ret = -EBUSY;
1882 goto out;
1883 }
1884 mcc = qm_mc_start(&p->p);
1885 if (opts)
1886 mcc->initfq = *opts;
1887 qm_fqid_set(&mcc->fq, fq->fqid);
1888 mcc->initfq.count = 0;
1889
1890
1891
1892
1893
1894 if (fq_isclear(fq, QMAN_FQ_FLAG_TO_DCPORTAL)) {
1895 dma_addr_t phys_fq;
1896
1897 mcc->initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_CONTEXTB);
1898 mcc->initfq.fqd.context_b = cpu_to_be32(fq_to_tag(fq));
1899
1900
1901
1902
1903 if (!(be16_to_cpu(mcc->initfq.we_mask) &
1904 QM_INITFQ_WE_CONTEXTA)) {
1905 mcc->initfq.we_mask |=
1906 cpu_to_be16(QM_INITFQ_WE_CONTEXTA);
1907 memset(&mcc->initfq.fqd.context_a, 0,
1908 sizeof(mcc->initfq.fqd.context_a));
1909 } else {
1910 struct qman_portal *p = qman_dma_portal;
1911
1912 phys_fq = dma_map_single(p->config->dev, fq,
1913 sizeof(*fq), DMA_TO_DEVICE);
1914 if (dma_mapping_error(p->config->dev, phys_fq)) {
1915 dev_err(p->config->dev, "dma_mapping failed\n");
1916 ret = -EIO;
1917 goto out;
1918 }
1919
1920 qm_fqd_stashing_set64(&mcc->initfq.fqd, phys_fq);
1921 }
1922 }
1923 if (flags & QMAN_INITFQ_FLAG_LOCAL) {
1924 int wq = 0;
1925
1926 if (!(be16_to_cpu(mcc->initfq.we_mask) &
1927 QM_INITFQ_WE_DESTWQ)) {
1928 mcc->initfq.we_mask |=
1929 cpu_to_be16(QM_INITFQ_WE_DESTWQ);
1930 wq = 4;
1931 }
1932 qm_fqd_set_destwq(&mcc->initfq.fqd, p->config->channel, wq);
1933 }
1934 qm_mc_commit(&p->p, myverb);
1935 if (!qm_mc_result_timeout(&p->p, &mcr)) {
1936 dev_err(p->config->dev, "MCR timeout\n");
1937 ret = -ETIMEDOUT;
1938 goto out;
1939 }
1940
1941 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == myverb);
1942 res = mcr->result;
1943 if (res != QM_MCR_RESULT_OK) {
1944 ret = -EIO;
1945 goto out;
1946 }
1947 if (opts) {
1948 if (be16_to_cpu(opts->we_mask) & QM_INITFQ_WE_FQCTRL) {
1949 if (be16_to_cpu(opts->fqd.fq_ctrl) & QM_FQCTRL_CGE)
1950 fq_set(fq, QMAN_FQ_STATE_CGR_EN);
1951 else
1952 fq_clear(fq, QMAN_FQ_STATE_CGR_EN);
1953 }
1954 if (be16_to_cpu(opts->we_mask) & QM_INITFQ_WE_CGID)
1955 fq->cgr_groupid = opts->fqd.cgid;
1956 }
1957 fq->state = (flags & QMAN_INITFQ_FLAG_SCHED) ?
1958 qman_fq_state_sched : qman_fq_state_parked;
1959
1960 out:
1961 put_affine_portal();
1962 return ret;
1963 }
1964 EXPORT_SYMBOL(qman_init_fq);
1965
1966 int qman_schedule_fq(struct qman_fq *fq)
1967 {
1968 union qm_mc_command *mcc;
1969 union qm_mc_result *mcr;
1970 struct qman_portal *p;
1971 int ret = 0;
1972
1973 if (fq->state != qman_fq_state_parked)
1974 return -EINVAL;
1975 #ifdef CONFIG_FSL_DPAA_CHECKING
1976 if (fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY))
1977 return -EINVAL;
1978 #endif
1979
1980 p = get_affine_portal();
1981 if (fq_isset(fq, QMAN_FQ_STATE_CHANGING) ||
1982 fq->state != qman_fq_state_parked) {
1983 ret = -EBUSY;
1984 goto out;
1985 }
1986 mcc = qm_mc_start(&p->p);
1987 qm_fqid_set(&mcc->fq, fq->fqid);
1988 qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_SCHED);
1989 if (!qm_mc_result_timeout(&p->p, &mcr)) {
1990 dev_err(p->config->dev, "ALTER_SCHED timeout\n");
1991 ret = -ETIMEDOUT;
1992 goto out;
1993 }
1994
1995 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_SCHED);
1996 if (mcr->result != QM_MCR_RESULT_OK) {
1997 ret = -EIO;
1998 goto out;
1999 }
2000 fq->state = qman_fq_state_sched;
2001 out:
2002 put_affine_portal();
2003 return ret;
2004 }
2005 EXPORT_SYMBOL(qman_schedule_fq);
2006
2007 int qman_retire_fq(struct qman_fq *fq, u32 *flags)
2008 {
2009 union qm_mc_command *mcc;
2010 union qm_mc_result *mcr;
2011 struct qman_portal *p;
2012 int ret;
2013 u8 res;
2014
2015 if (fq->state != qman_fq_state_parked &&
2016 fq->state != qman_fq_state_sched)
2017 return -EINVAL;
2018 #ifdef CONFIG_FSL_DPAA_CHECKING
2019 if (fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY))
2020 return -EINVAL;
2021 #endif
2022 p = get_affine_portal();
2023 if (fq_isset(fq, QMAN_FQ_STATE_CHANGING) ||
2024 fq->state == qman_fq_state_retired ||
2025 fq->state == qman_fq_state_oos) {
2026 ret = -EBUSY;
2027 goto out;
2028 }
2029 mcc = qm_mc_start(&p->p);
2030 qm_fqid_set(&mcc->fq, fq->fqid);
2031 qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_RETIRE);
2032 if (!qm_mc_result_timeout(&p->p, &mcr)) {
2033 dev_crit(p->config->dev, "ALTER_RETIRE timeout\n");
2034 ret = -ETIMEDOUT;
2035 goto out;
2036 }
2037
2038 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_RETIRE);
2039 res = mcr->result;
2040
2041
2042
2043
2044
2045
2046
2047
2048
2049 if (res == QM_MCR_RESULT_OK) {
2050 ret = 0;
2051
2052 if (mcr->alterfq.fqs & QM_MCR_FQS_NOTEMPTY)
2053 fq_set(fq, QMAN_FQ_STATE_NE);
2054 if (mcr->alterfq.fqs & QM_MCR_FQS_ORLPRESENT)
2055 fq_set(fq, QMAN_FQ_STATE_ORL);
2056 if (flags)
2057 *flags = fq->flags;
2058 fq->state = qman_fq_state_retired;
2059 if (fq->cb.fqs) {
2060
2061
2062
2063
2064
2065
2066
2067
2068
2069 union qm_mr_entry msg;
2070
2071 msg.verb = QM_MR_VERB_FQRNI;
2072 msg.fq.fqs = mcr->alterfq.fqs;
2073 qm_fqid_set(&msg.fq, fq->fqid);
2074 msg.fq.context_b = cpu_to_be32(fq_to_tag(fq));
2075 fq->cb.fqs(p, fq, &msg);
2076 }
2077 } else if (res == QM_MCR_RESULT_PENDING) {
2078 ret = 1;
2079 fq_set(fq, QMAN_FQ_STATE_CHANGING);
2080 } else {
2081 ret = -EIO;
2082 }
2083 out:
2084 put_affine_portal();
2085 return ret;
2086 }
2087 EXPORT_SYMBOL(qman_retire_fq);
2088
2089 int qman_oos_fq(struct qman_fq *fq)
2090 {
2091 union qm_mc_command *mcc;
2092 union qm_mc_result *mcr;
2093 struct qman_portal *p;
2094 int ret = 0;
2095
2096 if (fq->state != qman_fq_state_retired)
2097 return -EINVAL;
2098 #ifdef CONFIG_FSL_DPAA_CHECKING
2099 if (fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY))
2100 return -EINVAL;
2101 #endif
2102 p = get_affine_portal();
2103 if (fq_isset(fq, QMAN_FQ_STATE_BLOCKOOS) ||
2104 fq->state != qman_fq_state_retired) {
2105 ret = -EBUSY;
2106 goto out;
2107 }
2108 mcc = qm_mc_start(&p->p);
2109 qm_fqid_set(&mcc->fq, fq->fqid);
2110 qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_OOS);
2111 if (!qm_mc_result_timeout(&p->p, &mcr)) {
2112 ret = -ETIMEDOUT;
2113 goto out;
2114 }
2115 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_OOS);
2116 if (mcr->result != QM_MCR_RESULT_OK) {
2117 ret = -EIO;
2118 goto out;
2119 }
2120 fq->state = qman_fq_state_oos;
2121 out:
2122 put_affine_portal();
2123 return ret;
2124 }
2125 EXPORT_SYMBOL(qman_oos_fq);
2126
2127 int qman_query_fq(struct qman_fq *fq, struct qm_fqd *fqd)
2128 {
2129 union qm_mc_command *mcc;
2130 union qm_mc_result *mcr;
2131 struct qman_portal *p = get_affine_portal();
2132 int ret = 0;
2133
2134 mcc = qm_mc_start(&p->p);
2135 qm_fqid_set(&mcc->fq, fq->fqid);
2136 qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ);
2137 if (!qm_mc_result_timeout(&p->p, &mcr)) {
2138 ret = -ETIMEDOUT;
2139 goto out;
2140 }
2141
2142 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ);
2143 if (mcr->result == QM_MCR_RESULT_OK)
2144 *fqd = mcr->queryfq.fqd;
2145 else
2146 ret = -EIO;
2147 out:
2148 put_affine_portal();
2149 return ret;
2150 }
2151
2152 int qman_query_fq_np(struct qman_fq *fq, struct qm_mcr_queryfq_np *np)
2153 {
2154 union qm_mc_command *mcc;
2155 union qm_mc_result *mcr;
2156 struct qman_portal *p = get_affine_portal();
2157 int ret = 0;
2158
2159 mcc = qm_mc_start(&p->p);
2160 qm_fqid_set(&mcc->fq, fq->fqid);
2161 qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ_NP);
2162 if (!qm_mc_result_timeout(&p->p, &mcr)) {
2163 ret = -ETIMEDOUT;
2164 goto out;
2165 }
2166
2167 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ_NP);
2168 if (mcr->result == QM_MCR_RESULT_OK)
2169 *np = mcr->queryfq_np;
2170 else if (mcr->result == QM_MCR_RESULT_ERR_FQID)
2171 ret = -ERANGE;
2172 else
2173 ret = -EIO;
2174 out:
2175 put_affine_portal();
2176 return ret;
2177 }
2178 EXPORT_SYMBOL(qman_query_fq_np);
2179
2180 static int qman_query_cgr(struct qman_cgr *cgr,
2181 struct qm_mcr_querycgr *cgrd)
2182 {
2183 union qm_mc_command *mcc;
2184 union qm_mc_result *mcr;
2185 struct qman_portal *p = get_affine_portal();
2186 int ret = 0;
2187
2188 mcc = qm_mc_start(&p->p);
2189 mcc->cgr.cgid = cgr->cgrid;
2190 qm_mc_commit(&p->p, QM_MCC_VERB_QUERYCGR);
2191 if (!qm_mc_result_timeout(&p->p, &mcr)) {
2192 ret = -ETIMEDOUT;
2193 goto out;
2194 }
2195 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_QUERYCGR);
2196 if (mcr->result == QM_MCR_RESULT_OK)
2197 *cgrd = mcr->querycgr;
2198 else {
2199 dev_err(p->config->dev, "QUERY_CGR failed: %s\n",
2200 mcr_result_str(mcr->result));
2201 ret = -EIO;
2202 }
2203 out:
2204 put_affine_portal();
2205 return ret;
2206 }
2207
2208 int qman_query_cgr_congested(struct qman_cgr *cgr, bool *result)
2209 {
2210 struct qm_mcr_querycgr query_cgr;
2211 int err;
2212
2213 err = qman_query_cgr(cgr, &query_cgr);
2214 if (err)
2215 return err;
2216
2217 *result = !!query_cgr.cgr.cs;
2218 return 0;
2219 }
2220 EXPORT_SYMBOL(qman_query_cgr_congested);
2221
2222
2223 static int set_p_vdqcr(struct qman_portal *p, struct qman_fq *fq, u32 vdqcr)
2224 {
2225 unsigned long irqflags;
2226 int ret = -EBUSY;
2227
2228 local_irq_save(irqflags);
2229 if (p->vdqcr_owned)
2230 goto out;
2231 if (fq_isset(fq, QMAN_FQ_STATE_VDQCR))
2232 goto out;
2233
2234 fq_set(fq, QMAN_FQ_STATE_VDQCR);
2235 p->vdqcr_owned = fq;
2236 qm_dqrr_vdqcr_set(&p->p, vdqcr);
2237 ret = 0;
2238 out:
2239 local_irq_restore(irqflags);
2240 return ret;
2241 }
2242
2243 static int set_vdqcr(struct qman_portal **p, struct qman_fq *fq, u32 vdqcr)
2244 {
2245 int ret;
2246
2247 *p = get_affine_portal();
2248 ret = set_p_vdqcr(*p, fq, vdqcr);
2249 put_affine_portal();
2250 return ret;
2251 }
2252
2253 static int wait_vdqcr_start(struct qman_portal **p, struct qman_fq *fq,
2254 u32 vdqcr, u32 flags)
2255 {
2256 int ret = 0;
2257
2258 if (flags & QMAN_VOLATILE_FLAG_WAIT_INT)
2259 ret = wait_event_interruptible(affine_queue,
2260 !set_vdqcr(p, fq, vdqcr));
2261 else
2262 wait_event(affine_queue, !set_vdqcr(p, fq, vdqcr));
2263 return ret;
2264 }
2265
2266 int qman_volatile_dequeue(struct qman_fq *fq, u32 flags, u32 vdqcr)
2267 {
2268 struct qman_portal *p;
2269 int ret;
2270
2271 if (fq->state != qman_fq_state_parked &&
2272 fq->state != qman_fq_state_retired)
2273 return -EINVAL;
2274 if (vdqcr & QM_VDQCR_FQID_MASK)
2275 return -EINVAL;
2276 if (fq_isset(fq, QMAN_FQ_STATE_VDQCR))
2277 return -EBUSY;
2278 vdqcr = (vdqcr & ~QM_VDQCR_FQID_MASK) | fq->fqid;
2279 if (flags & QMAN_VOLATILE_FLAG_WAIT)
2280 ret = wait_vdqcr_start(&p, fq, vdqcr, flags);
2281 else
2282 ret = set_vdqcr(&p, fq, vdqcr);
2283 if (ret)
2284 return ret;
2285
2286 if (flags & QMAN_VOLATILE_FLAG_FINISH) {
2287 if (flags & QMAN_VOLATILE_FLAG_WAIT_INT)
2288
2289
2290
2291
2292
2293
2294 wait_event_interruptible(affine_queue,
2295 !fq_isset(fq, QMAN_FQ_STATE_VDQCR));
2296 else
2297 wait_event(affine_queue,
2298 !fq_isset(fq, QMAN_FQ_STATE_VDQCR));
2299 }
2300 return 0;
2301 }
2302 EXPORT_SYMBOL(qman_volatile_dequeue);
2303
2304 static void update_eqcr_ci(struct qman_portal *p, u8 avail)
2305 {
2306 if (avail)
2307 qm_eqcr_cce_prefetch(&p->p);
2308 else
2309 qm_eqcr_cce_update(&p->p);
2310 }
2311
2312 int qman_enqueue(struct qman_fq *fq, const struct qm_fd *fd)
2313 {
2314 struct qman_portal *p;
2315 struct qm_eqcr_entry *eq;
2316 unsigned long irqflags;
2317 u8 avail;
2318
2319 p = get_affine_portal();
2320 local_irq_save(irqflags);
2321
2322 if (p->use_eqcr_ci_stashing) {
2323
2324
2325
2326
2327 eq = qm_eqcr_start_stash(&p->p);
2328 } else {
2329
2330
2331
2332
2333 avail = qm_eqcr_get_avail(&p->p);
2334 if (avail < 2)
2335 update_eqcr_ci(p, avail);
2336 eq = qm_eqcr_start_no_stash(&p->p);
2337 }
2338
2339 if (unlikely(!eq))
2340 goto out;
2341
2342 qm_fqid_set(eq, fq->fqid);
2343 eq->tag = cpu_to_be32(fq_to_tag(fq));
2344 eq->fd = *fd;
2345
2346 qm_eqcr_pvb_commit(&p->p, QM_EQCR_VERB_CMD_ENQUEUE);
2347 out:
2348 local_irq_restore(irqflags);
2349 put_affine_portal();
2350 return 0;
2351 }
2352 EXPORT_SYMBOL(qman_enqueue);
2353
2354 static int qm_modify_cgr(struct qman_cgr *cgr, u32 flags,
2355 struct qm_mcc_initcgr *opts)
2356 {
2357 union qm_mc_command *mcc;
2358 union qm_mc_result *mcr;
2359 struct qman_portal *p = get_affine_portal();
2360 u8 verb = QM_MCC_VERB_MODIFYCGR;
2361 int ret = 0;
2362
2363 mcc = qm_mc_start(&p->p);
2364 if (opts)
2365 mcc->initcgr = *opts;
2366 mcc->initcgr.cgid = cgr->cgrid;
2367 if (flags & QMAN_CGR_FLAG_USE_INIT)
2368 verb = QM_MCC_VERB_INITCGR;
2369 qm_mc_commit(&p->p, verb);
2370 if (!qm_mc_result_timeout(&p->p, &mcr)) {
2371 ret = -ETIMEDOUT;
2372 goto out;
2373 }
2374
2375 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == verb);
2376 if (mcr->result != QM_MCR_RESULT_OK)
2377 ret = -EIO;
2378
2379 out:
2380 put_affine_portal();
2381 return ret;
2382 }
2383
2384 #define PORTAL_IDX(n) (n->config->channel - QM_CHANNEL_SWPORTAL0)
2385
2386
2387 static void qm_cgr_cscn_targ_set(struct __qm_mc_cgr *cgr, int pi, u32 val)
2388 {
2389 if (qman_ip_rev >= QMAN_REV30)
2390 cgr->cscn_targ_upd_ctrl = cpu_to_be16(pi |
2391 QM_CGR_TARG_UDP_CTRL_WRITE_BIT);
2392 else
2393 cgr->cscn_targ = cpu_to_be32(val | QM_CGR_TARG_PORTAL(pi));
2394 }
2395
2396 static void qm_cgr_cscn_targ_clear(struct __qm_mc_cgr *cgr, int pi, u32 val)
2397 {
2398 if (qman_ip_rev >= QMAN_REV30)
2399 cgr->cscn_targ_upd_ctrl = cpu_to_be16(pi);
2400 else
2401 cgr->cscn_targ = cpu_to_be32(val & ~QM_CGR_TARG_PORTAL(pi));
2402 }
2403
2404 static u8 qman_cgr_cpus[CGR_NUM];
2405
2406 void qman_init_cgr_all(void)
2407 {
2408 struct qman_cgr cgr;
2409 int err_cnt = 0;
2410
2411 for (cgr.cgrid = 0; cgr.cgrid < CGR_NUM; cgr.cgrid++) {
2412 if (qm_modify_cgr(&cgr, QMAN_CGR_FLAG_USE_INIT, NULL))
2413 err_cnt++;
2414 }
2415
2416 if (err_cnt)
2417 pr_err("Warning: %d error%s while initialising CGR h/w\n",
2418 err_cnt, (err_cnt > 1) ? "s" : "");
2419 }
2420
2421 int qman_create_cgr(struct qman_cgr *cgr, u32 flags,
2422 struct qm_mcc_initcgr *opts)
2423 {
2424 struct qm_mcr_querycgr cgr_state;
2425 int ret;
2426 struct qman_portal *p;
2427
2428
2429
2430
2431
2432
2433
2434 if (cgr->cgrid >= CGR_NUM)
2435 return -EINVAL;
2436
2437 preempt_disable();
2438 p = get_affine_portal();
2439 qman_cgr_cpus[cgr->cgrid] = smp_processor_id();
2440 preempt_enable();
2441
2442 cgr->chan = p->config->channel;
2443 spin_lock(&p->cgr_lock);
2444
2445 if (opts) {
2446 struct qm_mcc_initcgr local_opts = *opts;
2447
2448 ret = qman_query_cgr(cgr, &cgr_state);
2449 if (ret)
2450 goto out;
2451
2452 qm_cgr_cscn_targ_set(&local_opts.cgr, PORTAL_IDX(p),
2453 be32_to_cpu(cgr_state.cgr.cscn_targ));
2454 local_opts.we_mask |= cpu_to_be16(QM_CGR_WE_CSCN_TARG);
2455
2456
2457 if (flags & QMAN_CGR_FLAG_USE_INIT)
2458 ret = qm_modify_cgr(cgr, QMAN_CGR_FLAG_USE_INIT,
2459 &local_opts);
2460 else
2461 ret = qm_modify_cgr(cgr, 0, &local_opts);
2462 if (ret)
2463 goto out;
2464 }
2465
2466 list_add(&cgr->node, &p->cgr_cbs);
2467
2468
2469 ret = qman_query_cgr(cgr, &cgr_state);
2470 if (ret) {
2471
2472 dev_err(p->config->dev, "CGR HW state partially modified\n");
2473 ret = 0;
2474 goto out;
2475 }
2476 if (cgr->cb && cgr_state.cgr.cscn_en &&
2477 qman_cgrs_get(&p->cgrs[1], cgr->cgrid))
2478 cgr->cb(p, cgr, 1);
2479 out:
2480 spin_unlock(&p->cgr_lock);
2481 put_affine_portal();
2482 return ret;
2483 }
2484 EXPORT_SYMBOL(qman_create_cgr);
2485
2486 int qman_delete_cgr(struct qman_cgr *cgr)
2487 {
2488 unsigned long irqflags;
2489 struct qm_mcr_querycgr cgr_state;
2490 struct qm_mcc_initcgr local_opts;
2491 int ret = 0;
2492 struct qman_cgr *i;
2493 struct qman_portal *p = get_affine_portal();
2494
2495 if (cgr->chan != p->config->channel) {
2496
2497 dev_err(p->config->dev, "CGR not owned by current portal");
2498 dev_dbg(p->config->dev, " create 0x%x, delete 0x%x\n",
2499 cgr->chan, p->config->channel);
2500
2501 ret = -EINVAL;
2502 goto put_portal;
2503 }
2504 memset(&local_opts, 0, sizeof(struct qm_mcc_initcgr));
2505 spin_lock_irqsave(&p->cgr_lock, irqflags);
2506 list_del(&cgr->node);
2507
2508
2509
2510
2511 list_for_each_entry(i, &p->cgr_cbs, node)
2512 if (i->cgrid == cgr->cgrid && i->cb)
2513 goto release_lock;
2514 ret = qman_query_cgr(cgr, &cgr_state);
2515 if (ret) {
2516
2517 list_add(&cgr->node, &p->cgr_cbs);
2518 goto release_lock;
2519 }
2520
2521 local_opts.we_mask = cpu_to_be16(QM_CGR_WE_CSCN_TARG);
2522 qm_cgr_cscn_targ_clear(&local_opts.cgr, PORTAL_IDX(p),
2523 be32_to_cpu(cgr_state.cgr.cscn_targ));
2524
2525 ret = qm_modify_cgr(cgr, 0, &local_opts);
2526 if (ret)
2527
2528 list_add(&cgr->node, &p->cgr_cbs);
2529 release_lock:
2530 spin_unlock_irqrestore(&p->cgr_lock, irqflags);
2531 put_portal:
2532 put_affine_portal();
2533 return ret;
2534 }
2535 EXPORT_SYMBOL(qman_delete_cgr);
2536
2537 struct cgr_comp {
2538 struct qman_cgr *cgr;
2539 struct completion completion;
2540 };
2541
2542 static void qman_delete_cgr_smp_call(void *p)
2543 {
2544 qman_delete_cgr((struct qman_cgr *)p);
2545 }
2546
2547 void qman_delete_cgr_safe(struct qman_cgr *cgr)
2548 {
2549 preempt_disable();
2550 if (qman_cgr_cpus[cgr->cgrid] != smp_processor_id()) {
2551 smp_call_function_single(qman_cgr_cpus[cgr->cgrid],
2552 qman_delete_cgr_smp_call, cgr, true);
2553 preempt_enable();
2554 return;
2555 }
2556
2557 qman_delete_cgr(cgr);
2558 preempt_enable();
2559 }
2560 EXPORT_SYMBOL(qman_delete_cgr_safe);
2561
2562
2563
2564 static int _qm_mr_consume_and_match_verb(struct qm_portal *p, int v)
2565 {
2566 const union qm_mr_entry *msg;
2567 int found = 0;
2568
2569 qm_mr_pvb_update(p);
2570 msg = qm_mr_current(p);
2571 while (msg) {
2572 if ((msg->verb & QM_MR_VERB_TYPE_MASK) == v)
2573 found = 1;
2574 qm_mr_next(p);
2575 qm_mr_cci_consume_to_current(p);
2576 qm_mr_pvb_update(p);
2577 msg = qm_mr_current(p);
2578 }
2579 return found;
2580 }
2581
2582 static int _qm_dqrr_consume_and_match(struct qm_portal *p, u32 fqid, int s,
2583 bool wait)
2584 {
2585 const struct qm_dqrr_entry *dqrr;
2586 int found = 0;
2587
2588 do {
2589 qm_dqrr_pvb_update(p);
2590 dqrr = qm_dqrr_current(p);
2591 if (!dqrr)
2592 cpu_relax();
2593 } while (wait && !dqrr);
2594
2595 while (dqrr) {
2596 if (qm_fqid_get(dqrr) == fqid && (dqrr->stat & s))
2597 found = 1;
2598 qm_dqrr_cdc_consume_1ptr(p, dqrr, 0);
2599 qm_dqrr_pvb_update(p);
2600 qm_dqrr_next(p);
2601 dqrr = qm_dqrr_current(p);
2602 }
2603 return found;
2604 }
2605
2606 #define qm_mr_drain(p, V) \
2607 _qm_mr_consume_and_match_verb(p, QM_MR_VERB_##V)
2608
2609 #define qm_dqrr_drain(p, f, S) \
2610 _qm_dqrr_consume_and_match(p, f, QM_DQRR_STAT_##S, false)
2611
2612 #define qm_dqrr_drain_wait(p, f, S) \
2613 _qm_dqrr_consume_and_match(p, f, QM_DQRR_STAT_##S, true)
2614
2615 #define qm_dqrr_drain_nomatch(p) \
2616 _qm_dqrr_consume_and_match(p, 0, 0, false)
2617
2618 int qman_shutdown_fq(u32 fqid)
2619 {
2620 struct qman_portal *p, *channel_portal;
2621 struct device *dev;
2622 union qm_mc_command *mcc;
2623 union qm_mc_result *mcr;
2624 int orl_empty, drain = 0, ret = 0;
2625 u32 channel, res;
2626 u8 state;
2627
2628 p = get_affine_portal();
2629 dev = p->config->dev;
2630
2631 mcc = qm_mc_start(&p->p);
2632 qm_fqid_set(&mcc->fq, fqid);
2633 qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ_NP);
2634 if (!qm_mc_result_timeout(&p->p, &mcr)) {
2635 dev_err(dev, "QUERYFQ_NP timeout\n");
2636 ret = -ETIMEDOUT;
2637 goto out;
2638 }
2639
2640 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ_NP);
2641 state = mcr->queryfq_np.state & QM_MCR_NP_STATE_MASK;
2642 if (state == QM_MCR_NP_STATE_OOS)
2643 goto out;
2644
2645
2646 mcc = qm_mc_start(&p->p);
2647 qm_fqid_set(&mcc->fq, fqid);
2648 qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ);
2649 if (!qm_mc_result_timeout(&p->p, &mcr)) {
2650 dev_err(dev, "QUERYFQ timeout\n");
2651 ret = -ETIMEDOUT;
2652 goto out;
2653 }
2654
2655 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ);
2656
2657 channel = qm_fqd_get_chan(&mcr->queryfq.fqd);
2658 qm_fqd_get_wq(&mcr->queryfq.fqd);
2659
2660 if (channel < qm_channel_pool1) {
2661 channel_portal = get_portal_for_channel(channel);
2662 if (channel_portal == NULL) {
2663 dev_err(dev, "Can't find portal for dedicated channel 0x%x\n",
2664 channel);
2665 ret = -EIO;
2666 goto out;
2667 }
2668 } else
2669 channel_portal = p;
2670
2671 switch (state) {
2672 case QM_MCR_NP_STATE_TEN_SCHED:
2673 case QM_MCR_NP_STATE_TRU_SCHED:
2674 case QM_MCR_NP_STATE_ACTIVE:
2675 case QM_MCR_NP_STATE_PARKED:
2676 orl_empty = 0;
2677 mcc = qm_mc_start(&channel_portal->p);
2678 qm_fqid_set(&mcc->fq, fqid);
2679 qm_mc_commit(&channel_portal->p, QM_MCC_VERB_ALTER_RETIRE);
2680 if (!qm_mc_result_timeout(&channel_portal->p, &mcr)) {
2681 dev_err(dev, "ALTER_RETIRE timeout\n");
2682 ret = -ETIMEDOUT;
2683 goto out;
2684 }
2685 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
2686 QM_MCR_VERB_ALTER_RETIRE);
2687 res = mcr->result;
2688
2689 if (res == QM_MCR_RESULT_OK)
2690 drain_mr_fqrni(&channel_portal->p);
2691
2692 if (res == QM_MCR_RESULT_PENDING) {
2693
2694
2695
2696
2697
2698
2699 int found_fqrn = 0;
2700
2701
2702 drain = 1;
2703
2704 if (channel >= qm_channel_pool1 &&
2705 channel < qm_channel_pool1 + 15) {
2706
2707 } else if (channel < qm_channel_pool1) {
2708
2709 } else {
2710 dev_err(dev, "Can't recover FQ 0x%x, ch: 0x%x",
2711 fqid, channel);
2712 ret = -EBUSY;
2713 goto out;
2714 }
2715
2716 if (channel < qm_channel_pool1)
2717 qm_dqrr_sdqcr_set(&channel_portal->p,
2718 QM_SDQCR_TYPE_ACTIVE |
2719 QM_SDQCR_CHANNELS_DEDICATED);
2720 else
2721 qm_dqrr_sdqcr_set(&channel_portal->p,
2722 QM_SDQCR_TYPE_ACTIVE |
2723 QM_SDQCR_CHANNELS_POOL_CONV
2724 (channel));
2725 do {
2726
2727 qm_dqrr_drain_nomatch(&channel_portal->p);
2728
2729 found_fqrn = qm_mr_drain(&channel_portal->p,
2730 FQRN);
2731 cpu_relax();
2732 } while (!found_fqrn);
2733
2734 qm_dqrr_sdqcr_set(&channel_portal->p,
2735 channel_portal->sdqcr);
2736
2737 }
2738 if (res != QM_MCR_RESULT_OK &&
2739 res != QM_MCR_RESULT_PENDING) {
2740 dev_err(dev, "retire_fq failed: FQ 0x%x, res=0x%x\n",
2741 fqid, res);
2742 ret = -EIO;
2743 goto out;
2744 }
2745 if (!(mcr->alterfq.fqs & QM_MCR_FQS_ORLPRESENT)) {
2746
2747
2748
2749
2750 orl_empty = 1;
2751 }
2752
2753
2754
2755
2756 if (drain || mcr->alterfq.fqs & QM_MCR_FQS_NOTEMPTY) {
2757
2758 do {
2759 u32 vdqcr = fqid | QM_VDQCR_NUMFRAMES_SET(3);
2760
2761 qm_dqrr_vdqcr_set(&p->p, vdqcr);
2762
2763
2764
2765
2766 } while (!qm_dqrr_drain_wait(&p->p, fqid, FQ_EMPTY));
2767 }
2768
2769 while (!orl_empty) {
2770
2771 orl_empty = qm_mr_drain(&p->p, FQRL);
2772 cpu_relax();
2773 }
2774 mcc = qm_mc_start(&p->p);
2775 qm_fqid_set(&mcc->fq, fqid);
2776 qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_OOS);
2777 if (!qm_mc_result_timeout(&p->p, &mcr)) {
2778 ret = -ETIMEDOUT;
2779 goto out;
2780 }
2781
2782 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
2783 QM_MCR_VERB_ALTER_OOS);
2784 if (mcr->result != QM_MCR_RESULT_OK) {
2785 dev_err(dev, "OOS after drain fail: FQ 0x%x (0x%x)\n",
2786 fqid, mcr->result);
2787 ret = -EIO;
2788 goto out;
2789 }
2790 break;
2791
2792 case QM_MCR_NP_STATE_RETIRED:
2793
2794 mcc = qm_mc_start(&p->p);
2795 qm_fqid_set(&mcc->fq, fqid);
2796 qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_OOS);
2797 if (!qm_mc_result_timeout(&p->p, &mcr)) {
2798 ret = -ETIMEDOUT;
2799 goto out;
2800 }
2801
2802 DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
2803 QM_MCR_VERB_ALTER_OOS);
2804 if (mcr->result != QM_MCR_RESULT_OK) {
2805 dev_err(dev, "OOS fail: FQ 0x%x (0x%x)\n",
2806 fqid, mcr->result);
2807 ret = -EIO;
2808 goto out;
2809 }
2810 break;
2811
2812 case QM_MCR_NP_STATE_OOS:
2813
2814 break;
2815
2816 default:
2817 ret = -EIO;
2818 }
2819
2820 out:
2821 put_affine_portal();
2822 return ret;
2823 }
2824
2825 const struct qm_portal_config *qman_get_qm_portal_config(
2826 struct qman_portal *portal)
2827 {
2828 return portal->config;
2829 }
2830 EXPORT_SYMBOL(qman_get_qm_portal_config);
2831
2832 struct gen_pool *qm_fqalloc;
2833 struct gen_pool *qm_qpalloc;
2834 struct gen_pool *qm_cgralloc;
2835
2836 static int qman_alloc_range(struct gen_pool *p, u32 *result, u32 cnt)
2837 {
2838 unsigned long addr;
2839
2840 if (!p)
2841 return -ENODEV;
2842
2843 addr = gen_pool_alloc(p, cnt);
2844 if (!addr)
2845 return -ENOMEM;
2846
2847 *result = addr & ~DPAA_GENALLOC_OFF;
2848
2849 return 0;
2850 }
2851
2852 int qman_alloc_fqid_range(u32 *result, u32 count)
2853 {
2854 return qman_alloc_range(qm_fqalloc, result, count);
2855 }
2856 EXPORT_SYMBOL(qman_alloc_fqid_range);
2857
2858 int qman_alloc_pool_range(u32 *result, u32 count)
2859 {
2860 return qman_alloc_range(qm_qpalloc, result, count);
2861 }
2862 EXPORT_SYMBOL(qman_alloc_pool_range);
2863
2864 int qman_alloc_cgrid_range(u32 *result, u32 count)
2865 {
2866 return qman_alloc_range(qm_cgralloc, result, count);
2867 }
2868 EXPORT_SYMBOL(qman_alloc_cgrid_range);
2869
2870 int qman_release_fqid(u32 fqid)
2871 {
2872 int ret = qman_shutdown_fq(fqid);
2873
2874 if (ret) {
2875 pr_debug("FQID %d leaked\n", fqid);
2876 return ret;
2877 }
2878
2879 gen_pool_free(qm_fqalloc, fqid | DPAA_GENALLOC_OFF, 1);
2880 return 0;
2881 }
2882 EXPORT_SYMBOL(qman_release_fqid);
2883
2884 static int qpool_cleanup(u32 qp)
2885 {
2886
2887
2888
2889
2890
2891
2892 struct qman_fq fq = {
2893 .fqid = QM_FQID_RANGE_START
2894 };
2895 int err;
2896
2897 do {
2898 struct qm_mcr_queryfq_np np;
2899
2900 err = qman_query_fq_np(&fq, &np);
2901 if (err == -ERANGE)
2902
2903 return 0;
2904 else if (WARN_ON(err))
2905 return err;
2906
2907 if ((np.state & QM_MCR_NP_STATE_MASK) != QM_MCR_NP_STATE_OOS) {
2908 struct qm_fqd fqd;
2909
2910 err = qman_query_fq(&fq, &fqd);
2911 if (WARN_ON(err))
2912 return err;
2913 if (qm_fqd_get_chan(&fqd) == qp) {
2914
2915 err = qman_shutdown_fq(fq.fqid);
2916 if (err)
2917
2918
2919
2920
2921 return err;
2922 }
2923 }
2924
2925 fq.fqid++;
2926 } while (1);
2927 }
2928
2929 int qman_release_pool(u32 qp)
2930 {
2931 int ret;
2932
2933 ret = qpool_cleanup(qp);
2934 if (ret) {
2935 pr_debug("CHID %d leaked\n", qp);
2936 return ret;
2937 }
2938
2939 gen_pool_free(qm_qpalloc, qp | DPAA_GENALLOC_OFF, 1);
2940 return 0;
2941 }
2942 EXPORT_SYMBOL(qman_release_pool);
2943
2944 static int cgr_cleanup(u32 cgrid)
2945 {
2946
2947
2948
2949
2950 struct qman_fq fq = {
2951 .fqid = QM_FQID_RANGE_START
2952 };
2953 int err;
2954
2955 do {
2956 struct qm_mcr_queryfq_np np;
2957
2958 err = qman_query_fq_np(&fq, &np);
2959 if (err == -ERANGE)
2960
2961 return 0;
2962 else if (WARN_ON(err))
2963 return err;
2964
2965 if ((np.state & QM_MCR_NP_STATE_MASK) != QM_MCR_NP_STATE_OOS) {
2966 struct qm_fqd fqd;
2967
2968 err = qman_query_fq(&fq, &fqd);
2969 if (WARN_ON(err))
2970 return err;
2971 if (be16_to_cpu(fqd.fq_ctrl) & QM_FQCTRL_CGE &&
2972 fqd.cgid == cgrid) {
2973 pr_err("CRGID 0x%x is being used by FQID 0x%x, CGR will be leaked\n",
2974 cgrid, fq.fqid);
2975 return -EIO;
2976 }
2977 }
2978
2979 fq.fqid++;
2980 } while (1);
2981 }
2982
2983 int qman_release_cgrid(u32 cgrid)
2984 {
2985 int ret;
2986
2987 ret = cgr_cleanup(cgrid);
2988 if (ret) {
2989 pr_debug("CGRID %d leaked\n", cgrid);
2990 return ret;
2991 }
2992
2993 gen_pool_free(qm_cgralloc, cgrid | DPAA_GENALLOC_OFF, 1);
2994 return 0;
2995 }
2996 EXPORT_SYMBOL(qman_release_cgrid);