Back to home page

OSCL-LXR

 
 

    


0001 /* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
0002  *
0003  * Redistribution and use in source and binary forms, with or without
0004  * modification, are permitted provided that the following conditions are met:
0005  *     * Redistributions of source code must retain the above copyright
0006  *   notice, this list of conditions and the following disclaimer.
0007  *     * Redistributions in binary form must reproduce the above copyright
0008  *   notice, this list of conditions and the following disclaimer in the
0009  *   documentation and/or other materials provided with the distribution.
0010  *     * Neither the name of Freescale Semiconductor nor the
0011  *   names of its contributors may be used to endorse or promote products
0012  *   derived from this software without specific prior written permission.
0013  *
0014  * ALTERNATIVELY, this software may be distributed under the terms of the
0015  * GNU General Public License ("GPL") as published by the Free Software
0016  * Foundation, either version 2 of that License or (at your option) any
0017  * later version.
0018  *
0019  * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
0020  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
0021  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
0022  * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
0023  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
0024  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
0025  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
0026  * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
0027  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
0028  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
0029  */
0030 
0031 #ifndef __FSL_QMAN_H
0032 #define __FSL_QMAN_H
0033 
0034 #include <linux/bitops.h>
0035 #include <linux/device.h>
0036 
0037 /* Hardware constants */
0038 #define QM_CHANNEL_SWPORTAL0 0
0039 #define QMAN_CHANNEL_POOL1 0x21
0040 #define QMAN_CHANNEL_CAAM 0x80
0041 #define QMAN_CHANNEL_POOL1_REV3 0x401
0042 #define QMAN_CHANNEL_CAAM_REV3 0x840
0043 extern u16 qm_channel_pool1;
0044 extern u16 qm_channel_caam;
0045 
0046 /* Portal processing (interrupt) sources */
0047 #define QM_PIRQ_CSCI    0x00100000  /* Congestion State Change */
0048 #define QM_PIRQ_EQCI    0x00080000  /* Enqueue Command Committed */
0049 #define QM_PIRQ_EQRI    0x00040000  /* EQCR Ring (below threshold) */
0050 #define QM_PIRQ_DQRI    0x00020000  /* DQRR Ring (non-empty) */
0051 #define QM_PIRQ_MRI 0x00010000  /* MR Ring (non-empty) */
0052 /*
0053  * This mask contains all the interrupt sources that need handling except DQRI,
0054  * ie. that if present should trigger slow-path processing.
0055  */
0056 #define QM_PIRQ_SLOW    (QM_PIRQ_CSCI | QM_PIRQ_EQCI | QM_PIRQ_EQRI | \
0057              QM_PIRQ_MRI)
0058 
0059 /* For qman_static_dequeue_*** APIs */
0060 #define QM_SDQCR_CHANNELS_POOL_MASK 0x00007fff
0061 /* for n in [1,15] */
0062 #define QM_SDQCR_CHANNELS_POOL(n)   (0x00008000 >> (n))
0063 /* for conversion from n of qm_channel */
0064 static inline u32 QM_SDQCR_CHANNELS_POOL_CONV(u16 channel)
0065 {
0066     return QM_SDQCR_CHANNELS_POOL(channel + 1 - qm_channel_pool1);
0067 }
0068 
0069 /* --- QMan data structures (and associated constants) --- */
0070 
0071 /* "Frame Descriptor (FD)" */
0072 struct qm_fd {
0073     union {
0074         struct {
0075             u8 cfg8b_w1;
0076             u8 bpid;    /* Buffer Pool ID */
0077             u8 cfg8b_w3;
0078             u8 addr_hi; /* high 8-bits of 40-bit address */
0079             __be32 addr_lo; /* low 32-bits of 40-bit address */
0080         } __packed;
0081         __be64 data;
0082     };
0083     __be32 cfg; /* format, offset, length / congestion */
0084     union {
0085         __be32 cmd;
0086         __be32 status;
0087     };
0088 } __aligned(8);
0089 
0090 #define QM_FD_FORMAT_SG     BIT(31)
0091 #define QM_FD_FORMAT_LONG   BIT(30)
0092 #define QM_FD_FORMAT_COMPOUND   BIT(29)
0093 #define QM_FD_FORMAT_MASK   GENMASK(31, 29)
0094 #define QM_FD_OFF_SHIFT     20
0095 #define QM_FD_OFF_MASK      GENMASK(28, 20)
0096 #define QM_FD_LEN_MASK      GENMASK(19, 0)
0097 #define QM_FD_LEN_BIG_MASK  GENMASK(28, 0)
0098 
0099 enum qm_fd_format {
0100     /*
0101      * 'contig' implies a contiguous buffer, whereas 'sg' implies a
0102      * scatter-gather table. 'big' implies a 29-bit length with no offset
0103      * field, otherwise length is 20-bit and offset is 9-bit. 'compound'
0104      * implies a s/g-like table, where each entry itself represents a frame
0105      * (contiguous or scatter-gather) and the 29-bit "length" is
0106      * interpreted purely for congestion calculations, ie. a "congestion
0107      * weight".
0108      */
0109     qm_fd_contig = 0,
0110     qm_fd_contig_big = QM_FD_FORMAT_LONG,
0111     qm_fd_sg = QM_FD_FORMAT_SG,
0112     qm_fd_sg_big = QM_FD_FORMAT_SG | QM_FD_FORMAT_LONG,
0113     qm_fd_compound = QM_FD_FORMAT_COMPOUND
0114 };
0115 
0116 static inline dma_addr_t qm_fd_addr(const struct qm_fd *fd)
0117 {
0118     return be64_to_cpu(fd->data) & 0xffffffffffLLU;
0119 }
0120 
0121 static inline u64 qm_fd_addr_get64(const struct qm_fd *fd)
0122 {
0123     return be64_to_cpu(fd->data) & 0xffffffffffLLU;
0124 }
0125 
0126 static inline void qm_fd_addr_set64(struct qm_fd *fd, u64 addr)
0127 {
0128     fd->addr_hi = upper_32_bits(addr);
0129     fd->addr_lo = cpu_to_be32(lower_32_bits(addr));
0130 }
0131 
0132 /*
0133  * The 'format' field indicates the interpretation of the remaining
0134  * 29 bits of the 32-bit word.
0135  * If 'format' is _contig or _sg, 20b length and 9b offset.
0136  * If 'format' is _contig_big or _sg_big, 29b length.
0137  * If 'format' is _compound, 29b "congestion weight".
0138  */
0139 static inline enum qm_fd_format qm_fd_get_format(const struct qm_fd *fd)
0140 {
0141     return be32_to_cpu(fd->cfg) & QM_FD_FORMAT_MASK;
0142 }
0143 
0144 static inline int qm_fd_get_offset(const struct qm_fd *fd)
0145 {
0146     return (be32_to_cpu(fd->cfg) & QM_FD_OFF_MASK) >> QM_FD_OFF_SHIFT;
0147 }
0148 
0149 static inline int qm_fd_get_length(const struct qm_fd *fd)
0150 {
0151     return be32_to_cpu(fd->cfg) & QM_FD_LEN_MASK;
0152 }
0153 
0154 static inline int qm_fd_get_len_big(const struct qm_fd *fd)
0155 {
0156     return be32_to_cpu(fd->cfg) & QM_FD_LEN_BIG_MASK;
0157 }
0158 
0159 static inline void qm_fd_set_param(struct qm_fd *fd, enum qm_fd_format fmt,
0160                    int off, int len)
0161 {
0162     fd->cfg = cpu_to_be32(fmt | (len & QM_FD_LEN_BIG_MASK) |
0163                   ((off << QM_FD_OFF_SHIFT) & QM_FD_OFF_MASK));
0164 }
0165 
0166 #define qm_fd_set_contig(fd, off, len) \
0167     qm_fd_set_param(fd, qm_fd_contig, off, len)
0168 #define qm_fd_set_sg(fd, off, len) qm_fd_set_param(fd, qm_fd_sg, off, len)
0169 #define qm_fd_set_contig_big(fd, len) \
0170     qm_fd_set_param(fd, qm_fd_contig_big, 0, len)
0171 #define qm_fd_set_sg_big(fd, len) qm_fd_set_param(fd, qm_fd_sg_big, 0, len)
0172 #define qm_fd_set_compound(fd, len) qm_fd_set_param(fd, qm_fd_compound, 0, len)
0173 
0174 static inline void qm_fd_clear_fd(struct qm_fd *fd)
0175 {
0176     fd->data = 0;
0177     fd->cfg = 0;
0178     fd->cmd = 0;
0179 }
0180 
0181 /* Scatter/Gather table entry */
0182 struct qm_sg_entry {
0183     union {
0184         struct {
0185             u8 __reserved1[3];
0186             u8 addr_hi; /* high 8-bits of 40-bit address */
0187             __be32 addr_lo; /* low 32-bits of 40-bit address */
0188         };
0189         __be64 data;
0190     };
0191     __be32 cfg; /* E bit, F bit, length */
0192     u8 __reserved2;
0193     u8 bpid;
0194     __be16 offset; /* 13-bit, _res[13-15]*/
0195 } __packed;
0196 
0197 #define QM_SG_LEN_MASK  GENMASK(29, 0)
0198 #define QM_SG_OFF_MASK  GENMASK(12, 0)
0199 #define QM_SG_FIN   BIT(30)
0200 #define QM_SG_EXT   BIT(31)
0201 
0202 static inline dma_addr_t qm_sg_addr(const struct qm_sg_entry *sg)
0203 {
0204     return be64_to_cpu(sg->data) & 0xffffffffffLLU;
0205 }
0206 
0207 static inline u64 qm_sg_entry_get64(const struct qm_sg_entry *sg)
0208 {
0209     return be64_to_cpu(sg->data) & 0xffffffffffLLU;
0210 }
0211 
0212 static inline void qm_sg_entry_set64(struct qm_sg_entry *sg, u64 addr)
0213 {
0214     sg->addr_hi = upper_32_bits(addr);
0215     sg->addr_lo = cpu_to_be32(lower_32_bits(addr));
0216 }
0217 
0218 static inline bool qm_sg_entry_is_final(const struct qm_sg_entry *sg)
0219 {
0220     return be32_to_cpu(sg->cfg) & QM_SG_FIN;
0221 }
0222 
0223 static inline bool qm_sg_entry_is_ext(const struct qm_sg_entry *sg)
0224 {
0225     return be32_to_cpu(sg->cfg) & QM_SG_EXT;
0226 }
0227 
0228 static inline int qm_sg_entry_get_len(const struct qm_sg_entry *sg)
0229 {
0230     return be32_to_cpu(sg->cfg) & QM_SG_LEN_MASK;
0231 }
0232 
0233 static inline void qm_sg_entry_set_len(struct qm_sg_entry *sg, int len)
0234 {
0235     sg->cfg = cpu_to_be32(len & QM_SG_LEN_MASK);
0236 }
0237 
0238 static inline void qm_sg_entry_set_f(struct qm_sg_entry *sg, int len)
0239 {
0240     sg->cfg = cpu_to_be32(QM_SG_FIN | (len & QM_SG_LEN_MASK));
0241 }
0242 
0243 static inline int qm_sg_entry_get_off(const struct qm_sg_entry *sg)
0244 {
0245     return be32_to_cpu(sg->offset) & QM_SG_OFF_MASK;
0246 }
0247 
0248 /* "Frame Dequeue Response" */
0249 struct qm_dqrr_entry {
0250     u8 verb;
0251     u8 stat;
0252     __be16 seqnum;  /* 15-bit */
0253     u8 tok;
0254     u8 __reserved2[3];
0255     __be32 fqid;    /* 24-bit */
0256     __be32 context_b;
0257     struct qm_fd fd;
0258     u8 __reserved4[32];
0259 } __packed __aligned(64);
0260 #define QM_DQRR_VERB_VBIT       0x80
0261 #define QM_DQRR_VERB_MASK       0x7f    /* where the verb contains; */
0262 #define QM_DQRR_VERB_FRAME_DEQUEUE  0x60    /* "this format" */
0263 #define QM_DQRR_STAT_FQ_EMPTY       0x80    /* FQ empty */
0264 #define QM_DQRR_STAT_FQ_HELDACTIVE  0x40    /* FQ held active */
0265 #define QM_DQRR_STAT_FQ_FORCEELIGIBLE   0x20    /* FQ was force-eligible'd */
0266 #define QM_DQRR_STAT_FD_VALID       0x10    /* has a non-NULL FD */
0267 #define QM_DQRR_STAT_UNSCHEDULED    0x02    /* Unscheduled dequeue */
0268 #define QM_DQRR_STAT_DQCR_EXPIRED   0x01    /* VDQCR or PDQCR expired*/
0269 
0270 /* 'fqid' is a 24-bit field in every h/w descriptor */
0271 #define QM_FQID_MASK    GENMASK(23, 0)
0272 #define qm_fqid_set(p, v) ((p)->fqid = cpu_to_be32((v) & QM_FQID_MASK))
0273 #define qm_fqid_get(p)    (be32_to_cpu((p)->fqid) & QM_FQID_MASK)
0274 
0275 /* "ERN Message Response" */
0276 /* "FQ State Change Notification" */
0277 union qm_mr_entry {
0278     struct {
0279         u8 verb;
0280         u8 __reserved[63];
0281     };
0282     struct {
0283         u8 verb;
0284         u8 dca;
0285         __be16 seqnum;
0286         u8 rc;      /* Rej Code: 8-bit */
0287         u8 __reserved[3];
0288         __be32 fqid;    /* 24-bit */
0289         __be32 tag;
0290         struct qm_fd fd;
0291         u8 __reserved1[32];
0292     } __packed __aligned(64) ern;
0293     struct {
0294         u8 verb;
0295         u8 fqs;     /* Frame Queue Status */
0296         u8 __reserved1[6];
0297         __be32 fqid;    /* 24-bit */
0298         __be32 context_b;
0299         u8 __reserved2[48];
0300     } __packed fq;      /* FQRN/FQRNI/FQRL/FQPN */
0301 };
0302 #define QM_MR_VERB_VBIT         0x80
0303 /*
0304  * ERNs originating from direct-connect portals ("dcern") use 0x20 as a verb
0305  * which would be invalid as a s/w enqueue verb. A s/w ERN can be distinguished
0306  * from the other MR types by noting if the 0x20 bit is unset.
0307  */
0308 #define QM_MR_VERB_TYPE_MASK        0x27
0309 #define QM_MR_VERB_DC_ERN       0x20
0310 #define QM_MR_VERB_FQRN         0x21
0311 #define QM_MR_VERB_FQRNI        0x22
0312 #define QM_MR_VERB_FQRL         0x23
0313 #define QM_MR_VERB_FQPN         0x24
0314 #define QM_MR_RC_MASK           0xf0    /* contains one of; */
0315 #define QM_MR_RC_CGR_TAILDROP       0x00
0316 #define QM_MR_RC_WRED           0x10
0317 #define QM_MR_RC_ERROR          0x20
0318 #define QM_MR_RC_ORPWINDOW_EARLY    0x30
0319 #define QM_MR_RC_ORPWINDOW_LATE     0x40
0320 #define QM_MR_RC_FQ_TAILDROP        0x50
0321 #define QM_MR_RC_ORPWINDOW_RETIRED  0x60
0322 #define QM_MR_RC_ORP_ZERO       0x70
0323 #define QM_MR_FQS_ORLPRESENT        0x02    /* ORL fragments to come */
0324 #define QM_MR_FQS_NOTEMPTY      0x01    /* FQ has enqueued frames */
0325 
0326 /*
0327  * An identical structure of FQD fields is present in the "Init FQ" command and
0328  * the "Query FQ" result, it's suctioned out into the "struct qm_fqd" type.
0329  * Within that, the 'stashing' and 'taildrop' pieces are also factored out, the
0330  * latter has two inlines to assist with converting to/from the mant+exp
0331  * representation.
0332  */
0333 struct qm_fqd_stashing {
0334     /* See QM_STASHING_EXCL_<...> */
0335     u8 exclusive;
0336     /* Numbers of cachelines */
0337     u8 cl; /* _res[6-7], as[4-5], ds[2-3], cs[0-1] */
0338 };
0339 
0340 struct qm_fqd_oac {
0341     /* "Overhead Accounting Control", see QM_OAC_<...> */
0342     u8 oac; /* oac[6-7], _res[0-5] */
0343     /* Two's-complement value (-128 to +127) */
0344     s8 oal; /* "Overhead Accounting Length" */
0345 };
0346 
0347 struct qm_fqd {
0348     /* _res[6-7], orprws[3-5], oa[2], olws[0-1] */
0349     u8 orpc;
0350     u8 cgid;
0351     __be16 fq_ctrl; /* See QM_FQCTRL_<...> */
0352     __be16 dest_wq; /* channel[3-15], wq[0-2] */
0353     __be16 ics_cred; /* 15-bit */
0354     /*
0355      * For "Initialize Frame Queue" commands, the write-enable mask
0356      * determines whether 'td' or 'oac_init' is observed. For query
0357      * commands, this field is always 'td', and 'oac_query' (below) reflects
0358      * the Overhead ACcounting values.
0359      */
0360     union {
0361         __be16 td; /* "Taildrop": _res[13-15], mant[5-12], exp[0-4] */
0362         struct qm_fqd_oac oac_init;
0363     };
0364     __be32 context_b;
0365     union {
0366         /* Treat it as 64-bit opaque */
0367         __be64 opaque;
0368         struct {
0369             __be32 hi;
0370             __be32 lo;
0371         };
0372         /* Treat it as s/w portal stashing config */
0373         /* see "FQD Context_A field used for [...]" */
0374         struct {
0375             struct qm_fqd_stashing stashing;
0376             /*
0377              * 48-bit address of FQ context to
0378              * stash, must be cacheline-aligned
0379              */
0380             __be16 context_hi;
0381             __be32 context_lo;
0382         } __packed;
0383     } context_a;
0384     struct qm_fqd_oac oac_query;
0385 } __packed;
0386 
0387 #define QM_FQD_CHAN_OFF     3
0388 #define QM_FQD_WQ_MASK      GENMASK(2, 0)
0389 #define QM_FQD_TD_EXP_MASK  GENMASK(4, 0)
0390 #define QM_FQD_TD_MANT_OFF  5
0391 #define QM_FQD_TD_MANT_MASK GENMASK(12, 5)
0392 #define QM_FQD_TD_MAX       0xe0000000
0393 #define QM_FQD_TD_MANT_MAX  0xff
0394 #define QM_FQD_OAC_OFF      6
0395 #define QM_FQD_AS_OFF       4
0396 #define QM_FQD_DS_OFF       2
0397 #define QM_FQD_XS_MASK      0x3
0398 
0399 /* 64-bit converters for context_hi/lo */
0400 static inline u64 qm_fqd_stashing_get64(const struct qm_fqd *fqd)
0401 {
0402     return be64_to_cpu(fqd->context_a.opaque) & 0xffffffffffffULL;
0403 }
0404 
0405 static inline dma_addr_t qm_fqd_stashing_addr(const struct qm_fqd *fqd)
0406 {
0407     return be64_to_cpu(fqd->context_a.opaque) & 0xffffffffffffULL;
0408 }
0409 
0410 static inline u64 qm_fqd_context_a_get64(const struct qm_fqd *fqd)
0411 {
0412     return qm_fqd_stashing_get64(fqd);
0413 }
0414 
0415 static inline void qm_fqd_stashing_set64(struct qm_fqd *fqd, u64 addr)
0416 {
0417     fqd->context_a.context_hi = cpu_to_be16(upper_32_bits(addr));
0418     fqd->context_a.context_lo = cpu_to_be32(lower_32_bits(addr));
0419 }
0420 
0421 static inline void qm_fqd_context_a_set64(struct qm_fqd *fqd, u64 addr)
0422 {
0423     fqd->context_a.hi = cpu_to_be32(upper_32_bits(addr));
0424     fqd->context_a.lo = cpu_to_be32(lower_32_bits(addr));
0425 }
0426 
0427 /* convert a threshold value into mant+exp representation */
0428 static inline int qm_fqd_set_taildrop(struct qm_fqd *fqd, u32 val,
0429                       int roundup)
0430 {
0431     u32 e = 0;
0432     int td, oddbit = 0;
0433 
0434     if (val > QM_FQD_TD_MAX)
0435         return -ERANGE;
0436 
0437     while (val > QM_FQD_TD_MANT_MAX) {
0438         oddbit = val & 1;
0439         val >>= 1;
0440         e++;
0441         if (roundup && oddbit)
0442             val++;
0443     }
0444 
0445     td = (val << QM_FQD_TD_MANT_OFF) & QM_FQD_TD_MANT_MASK;
0446     td |= (e & QM_FQD_TD_EXP_MASK);
0447     fqd->td = cpu_to_be16(td);
0448     return 0;
0449 }
0450 /* and the other direction */
0451 static inline int qm_fqd_get_taildrop(const struct qm_fqd *fqd)
0452 {
0453     int td = be16_to_cpu(fqd->td);
0454 
0455     return ((td & QM_FQD_TD_MANT_MASK) >> QM_FQD_TD_MANT_OFF)
0456         << (td & QM_FQD_TD_EXP_MASK);
0457 }
0458 
0459 static inline void qm_fqd_set_stashing(struct qm_fqd *fqd, u8 as, u8 ds, u8 cs)
0460 {
0461     struct qm_fqd_stashing *st = &fqd->context_a.stashing;
0462 
0463     st->cl = ((as & QM_FQD_XS_MASK) << QM_FQD_AS_OFF) |
0464          ((ds & QM_FQD_XS_MASK) << QM_FQD_DS_OFF) |
0465          (cs & QM_FQD_XS_MASK);
0466 }
0467 
0468 static inline u8 qm_fqd_get_stashing(const struct qm_fqd *fqd)
0469 {
0470     return fqd->context_a.stashing.cl;
0471 }
0472 
0473 static inline void qm_fqd_set_oac(struct qm_fqd *fqd, u8 val)
0474 {
0475     fqd->oac_init.oac = val << QM_FQD_OAC_OFF;
0476 }
0477 
0478 static inline void qm_fqd_set_oal(struct qm_fqd *fqd, s8 val)
0479 {
0480     fqd->oac_init.oal = val;
0481 }
0482 
0483 static inline void qm_fqd_set_destwq(struct qm_fqd *fqd, int ch, int wq)
0484 {
0485     fqd->dest_wq = cpu_to_be16((ch << QM_FQD_CHAN_OFF) |
0486                    (wq & QM_FQD_WQ_MASK));
0487 }
0488 
0489 static inline int qm_fqd_get_chan(const struct qm_fqd *fqd)
0490 {
0491     return be16_to_cpu(fqd->dest_wq) >> QM_FQD_CHAN_OFF;
0492 }
0493 
0494 static inline int qm_fqd_get_wq(const struct qm_fqd *fqd)
0495 {
0496     return be16_to_cpu(fqd->dest_wq) & QM_FQD_WQ_MASK;
0497 }
0498 
0499 /* See "Frame Queue Descriptor (FQD)" */
0500 /* Frame Queue Descriptor (FQD) field 'fq_ctrl' uses these constants */
0501 #define QM_FQCTRL_MASK      0x07ff  /* 'fq_ctrl' flags; */
0502 #define QM_FQCTRL_CGE       0x0400  /* Congestion Group Enable */
0503 #define QM_FQCTRL_TDE       0x0200  /* Tail-Drop Enable */
0504 #define QM_FQCTRL_CTXASTASHING  0x0080  /* Context-A stashing */
0505 #define QM_FQCTRL_CPCSTASH  0x0040  /* CPC Stash Enable */
0506 #define QM_FQCTRL_FORCESFDR 0x0008  /* High-priority SFDRs */
0507 #define QM_FQCTRL_AVOIDBLOCK    0x0004  /* Don't block active */
0508 #define QM_FQCTRL_HOLDACTIVE    0x0002  /* Hold active in portal */
0509 #define QM_FQCTRL_PREFERINCACHE 0x0001  /* Aggressively cache FQD */
0510 #define QM_FQCTRL_LOCKINCACHE   QM_FQCTRL_PREFERINCACHE /* older naming */
0511 
0512 /* See "FQD Context_A field used for [...] */
0513 /* Frame Queue Descriptor (FQD) field 'CONTEXT_A' uses these constants */
0514 #define QM_STASHING_EXCL_ANNOTATION 0x04
0515 #define QM_STASHING_EXCL_DATA       0x02
0516 #define QM_STASHING_EXCL_CTX        0x01
0517 
0518 /* See "Intra Class Scheduling" */
0519 /* FQD field 'OAC' (Overhead ACcounting) uses these constants */
0520 #define QM_OAC_ICS      0x2 /* Accounting for Intra-Class Scheduling */
0521 #define QM_OAC_CG       0x1 /* Accounting for Congestion Groups */
0522 
0523 /*
0524  * This struct represents the 32-bit "WR_PARM_[GYR]" parameters in CGR fields
0525  * and associated commands/responses. The WRED parameters are calculated from
0526  * these fields as follows;
0527  *   MaxTH = MA * (2 ^ Mn)
0528  *   Slope = SA / (2 ^ Sn)
0529  *    MaxP = 4 * (Pn + 1)
0530  */
0531 struct qm_cgr_wr_parm {
0532     /* MA[24-31], Mn[19-23], SA[12-18], Sn[6-11], Pn[0-5] */
0533     __be32 word;
0534 };
0535 /*
0536  * This struct represents the 13-bit "CS_THRES" CGR field. In the corresponding
0537  * management commands, this is padded to a 16-bit structure field, so that's
0538  * how we represent it here. The congestion state threshold is calculated from
0539  * these fields as follows;
0540  *   CS threshold = TA * (2 ^ Tn)
0541  */
0542 struct qm_cgr_cs_thres {
0543     /* _res[13-15], TA[5-12], Tn[0-4] */
0544     __be16 word;
0545 };
0546 /*
0547  * This identical structure of CGR fields is present in the "Init/Modify CGR"
0548  * commands and the "Query CGR" result. It's suctioned out here into its own
0549  * struct.
0550  */
0551 struct __qm_mc_cgr {
0552     struct qm_cgr_wr_parm wr_parm_g;
0553     struct qm_cgr_wr_parm wr_parm_y;
0554     struct qm_cgr_wr_parm wr_parm_r;
0555     u8 wr_en_g; /* boolean, use QM_CGR_EN */
0556     u8 wr_en_y; /* boolean, use QM_CGR_EN */
0557     u8 wr_en_r; /* boolean, use QM_CGR_EN */
0558     u8 cscn_en; /* boolean, use QM_CGR_EN */
0559     union {
0560         struct {
0561             __be16 cscn_targ_upd_ctrl; /* use QM_CGR_TARG_UDP_* */
0562             __be16 cscn_targ_dcp_low;
0563         };
0564         __be32 cscn_targ;   /* use QM_CGR_TARG_* */
0565     };
0566     u8 cstd_en; /* boolean, use QM_CGR_EN */
0567     u8 cs;      /* boolean, only used in query response */
0568     struct qm_cgr_cs_thres cs_thres; /* use qm_cgr_cs_thres_set64() */
0569     u8 mode;    /* QMAN_CGR_MODE_FRAME not supported in rev1.0 */
0570 } __packed;
0571 #define QM_CGR_EN       0x01 /* For wr_en_*, cscn_en, cstd_en */
0572 #define QM_CGR_TARG_UDP_CTRL_WRITE_BIT  0x8000 /* value written to portal bit*/
0573 #define QM_CGR_TARG_UDP_CTRL_DCP    0x4000 /* 0: SWP, 1: DCP */
0574 #define QM_CGR_TARG_PORTAL(n)   (0x80000000 >> (n)) /* s/w portal, 0-9 */
0575 #define QM_CGR_TARG_FMAN0   0x00200000 /* direct-connect portal: fman0 */
0576 #define QM_CGR_TARG_FMAN1   0x00100000 /*              : fman1 */
0577 /* Convert CGR thresholds to/from "cs_thres" format */
0578 static inline u64 qm_cgr_cs_thres_get64(const struct qm_cgr_cs_thres *th)
0579 {
0580     int thres = be16_to_cpu(th->word);
0581 
0582     return ((thres >> 5) & 0xff) << (thres & 0x1f);
0583 }
0584 
0585 static inline int qm_cgr_cs_thres_set64(struct qm_cgr_cs_thres *th, u64 val,
0586                     int roundup)
0587 {
0588     u32 e = 0;
0589     int oddbit = 0;
0590 
0591     while (val > 0xff) {
0592         oddbit = val & 1;
0593         val >>= 1;
0594         e++;
0595         if (roundup && oddbit)
0596             val++;
0597     }
0598     th->word = cpu_to_be16(((val & 0xff) << 5) | (e & 0x1f));
0599     return 0;
0600 }
0601 
0602 /* "Initialize FQ" */
0603 struct qm_mcc_initfq {
0604     u8 __reserved1[2];
0605     __be16 we_mask; /* Write Enable Mask */
0606     __be32 fqid;    /* 24-bit */
0607     __be16 count;   /* Initialises 'count+1' FQDs */
0608     struct qm_fqd fqd; /* the FQD fields go here */
0609     u8 __reserved2[30];
0610 } __packed;
0611 /* "Initialize/Modify CGR" */
0612 struct qm_mcc_initcgr {
0613     u8 __reserve1[2];
0614     __be16 we_mask; /* Write Enable Mask */
0615     struct __qm_mc_cgr cgr; /* CGR fields */
0616     u8 __reserved2[2];
0617     u8 cgid;
0618     u8 __reserved3[32];
0619 } __packed;
0620 
0621 /* INITFQ-specific flags */
0622 #define QM_INITFQ_WE_MASK       0x01ff  /* 'Write Enable' flags; */
0623 #define QM_INITFQ_WE_OAC        0x0100
0624 #define QM_INITFQ_WE_ORPC       0x0080
0625 #define QM_INITFQ_WE_CGID       0x0040
0626 #define QM_INITFQ_WE_FQCTRL     0x0020
0627 #define QM_INITFQ_WE_DESTWQ     0x0010
0628 #define QM_INITFQ_WE_ICSCRED        0x0008
0629 #define QM_INITFQ_WE_TDTHRESH       0x0004
0630 #define QM_INITFQ_WE_CONTEXTB       0x0002
0631 #define QM_INITFQ_WE_CONTEXTA       0x0001
0632 /* INITCGR/MODIFYCGR-specific flags */
0633 #define QM_CGR_WE_MASK          0x07ff  /* 'Write Enable Mask'; */
0634 #define QM_CGR_WE_WR_PARM_G     0x0400
0635 #define QM_CGR_WE_WR_PARM_Y     0x0200
0636 #define QM_CGR_WE_WR_PARM_R     0x0100
0637 #define QM_CGR_WE_WR_EN_G       0x0080
0638 #define QM_CGR_WE_WR_EN_Y       0x0040
0639 #define QM_CGR_WE_WR_EN_R       0x0020
0640 #define QM_CGR_WE_CSCN_EN       0x0010
0641 #define QM_CGR_WE_CSCN_TARG     0x0008
0642 #define QM_CGR_WE_CSTD_EN       0x0004
0643 #define QM_CGR_WE_CS_THRES      0x0002
0644 #define QM_CGR_WE_MODE          0x0001
0645 
0646 #define QMAN_CGR_FLAG_USE_INIT       0x00000001
0647 #define QMAN_CGR_MODE_FRAME          0x00000001
0648 
0649     /* Portal and Frame Queues */
0650 /* Represents a managed portal */
0651 struct qman_portal;
0652 
0653 /*
0654  * This object type represents QMan frame queue descriptors (FQD), it is
0655  * cacheline-aligned, and initialised by qman_create_fq(). The structure is
0656  * defined further down.
0657  */
0658 struct qman_fq;
0659 
0660 /*
0661  * This object type represents a QMan congestion group, it is defined further
0662  * down.
0663  */
0664 struct qman_cgr;
0665 
0666 /*
0667  * This enum, and the callback type that returns it, are used when handling
0668  * dequeued frames via DQRR. Note that for "null" callbacks registered with the
0669  * portal object (for handling dequeues that do not demux because context_b is
0670  * NULL), the return value *MUST* be qman_cb_dqrr_consume.
0671  */
0672 enum qman_cb_dqrr_result {
0673     /* DQRR entry can be consumed */
0674     qman_cb_dqrr_consume,
0675     /* Like _consume, but requests parking - FQ must be held-active */
0676     qman_cb_dqrr_park,
0677     /* Does not consume, for DCA mode only. */
0678     qman_cb_dqrr_defer,
0679     /*
0680      * Stop processing without consuming this ring entry. Exits the current
0681      * qman_p_poll_dqrr() or interrupt-handling, as appropriate. If within
0682      * an interrupt handler, the callback would typically call
0683      * qman_irqsource_remove(QM_PIRQ_DQRI) before returning this value,
0684      * otherwise the interrupt will reassert immediately.
0685      */
0686     qman_cb_dqrr_stop,
0687     /* Like qman_cb_dqrr_stop, but consumes the current entry. */
0688     qman_cb_dqrr_consume_stop
0689 };
0690 typedef enum qman_cb_dqrr_result (*qman_cb_dqrr)(struct qman_portal *qm,
0691                     struct qman_fq *fq,
0692                     const struct qm_dqrr_entry *dqrr,
0693                     bool sched_napi);
0694 
0695 /*
0696  * This callback type is used when handling ERNs, FQRNs and FQRLs via MR. They
0697  * are always consumed after the callback returns.
0698  */
0699 typedef void (*qman_cb_mr)(struct qman_portal *qm, struct qman_fq *fq,
0700                const union qm_mr_entry *msg);
0701 
0702 /*
0703  * s/w-visible states. Ie. tentatively scheduled + truly scheduled + active +
0704  * held-active + held-suspended are just "sched". Things like "retired" will not
0705  * be assumed until it is complete (ie. QMAN_FQ_STATE_CHANGING is set until
0706  * then, to indicate it's completing and to gate attempts to retry the retire
0707  * command). Note, park commands do not set QMAN_FQ_STATE_CHANGING because it's
0708  * technically impossible in the case of enqueue DCAs (which refer to DQRR ring
0709  * index rather than the FQ that ring entry corresponds to), so repeated park
0710  * commands are allowed (if you're silly enough to try) but won't change FQ
0711  * state, and the resulting park notifications move FQs from "sched" to
0712  * "parked".
0713  */
0714 enum qman_fq_state {
0715     qman_fq_state_oos,
0716     qman_fq_state_parked,
0717     qman_fq_state_sched,
0718     qman_fq_state_retired
0719 };
0720 
0721 #define QMAN_FQ_STATE_CHANGING       0x80000000 /* 'state' is changing */
0722 #define QMAN_FQ_STATE_NE         0x40000000 /* retired FQ isn't empty */
0723 #define QMAN_FQ_STATE_ORL        0x20000000 /* retired FQ has ORL */
0724 #define QMAN_FQ_STATE_BLOCKOOS       0xe0000000 /* if any are set, no OOS */
0725 #define QMAN_FQ_STATE_CGR_EN         0x10000000 /* CGR enabled */
0726 #define QMAN_FQ_STATE_VDQCR      0x08000000 /* being volatile dequeued */
0727 
0728 /*
0729  * Frame queue objects (struct qman_fq) are stored within memory passed to
0730  * qman_create_fq(), as this allows stashing of caller-provided demux callback
0731  * pointers at no extra cost to stashing of (driver-internal) FQ state. If the
0732  * caller wishes to add per-FQ state and have it benefit from dequeue-stashing,
0733  * they should;
0734  *
0735  * (a) extend the qman_fq structure with their state; eg.
0736  *
0737  *     // myfq is allocated and driver_fq callbacks filled in;
0738  *     struct my_fq {
0739  *     struct qman_fq base;
0740  *     int an_extra_field;
0741  *     [ ... add other fields to be associated with each FQ ...]
0742  *     } *myfq = some_my_fq_allocator();
0743  *     struct qman_fq *fq = qman_create_fq(fqid, flags, &myfq->base);
0744  *
0745  *     // in a dequeue callback, access extra fields from 'fq' via a cast;
0746  *     struct my_fq *myfq = (struct my_fq *)fq;
0747  *     do_something_with(myfq->an_extra_field);
0748  *     [...]
0749  *
0750  * (b) when and if configuring the FQ for context stashing, specify how ever
0751  *     many cachelines are required to stash 'struct my_fq', to accelerate not
0752  *     only the QMan driver but the callback as well.
0753  */
0754 
0755 struct qman_fq_cb {
0756     qman_cb_dqrr dqrr;  /* for dequeued frames */
0757     qman_cb_mr ern;     /* for s/w ERNs */
0758     qman_cb_mr fqs;     /* frame-queue state changes*/
0759 };
0760 
0761 struct qman_fq {
0762     /* Caller of qman_create_fq() provides these demux callbacks */
0763     struct qman_fq_cb cb;
0764     /*
0765      * These are internal to the driver, don't touch. In particular, they
0766      * may change, be removed, or extended (so you shouldn't rely on
0767      * sizeof(qman_fq) being a constant).
0768      */
0769     u32 fqid, idx;
0770     unsigned long flags;
0771     enum qman_fq_state state;
0772     int cgr_groupid;
0773 };
0774 
0775 /*
0776  * This callback type is used when handling congestion group entry/exit.
0777  * 'congested' is non-zero on congestion-entry, and zero on congestion-exit.
0778  */
0779 typedef void (*qman_cb_cgr)(struct qman_portal *qm,
0780                 struct qman_cgr *cgr, int congested);
0781 
0782 struct qman_cgr {
0783     /* Set these prior to qman_create_cgr() */
0784     u32 cgrid; /* 0..255, but u32 to allow specials like -1, 256, etc.*/
0785     qman_cb_cgr cb;
0786     /* These are private to the driver */
0787     u16 chan; /* portal channel this object is created on */
0788     struct list_head node;
0789 };
0790 
0791 /* Flags to qman_create_fq() */
0792 #define QMAN_FQ_FLAG_NO_ENQUEUE      0x00000001 /* can't enqueue */
0793 #define QMAN_FQ_FLAG_NO_MODIFY       0x00000002 /* can only enqueue */
0794 #define QMAN_FQ_FLAG_TO_DCPORTAL     0x00000004 /* consumed by CAAM/PME/Fman */
0795 #define QMAN_FQ_FLAG_DYNAMIC_FQID    0x00000020 /* (de)allocate fqid */
0796 
0797 /* Flags to qman_init_fq() */
0798 #define QMAN_INITFQ_FLAG_SCHED       0x00000001 /* schedule rather than park */
0799 #define QMAN_INITFQ_FLAG_LOCAL       0x00000004 /* set dest portal */
0800 
0801 /*
0802  * For qman_volatile_dequeue(); Choose one PRECEDENCE. EXACT is optional. Use
0803  * NUMFRAMES(n) (6-bit) or NUMFRAMES_TILLEMPTY to fill in the frame-count. Use
0804  * FQID(n) to fill in the frame queue ID.
0805  */
0806 #define QM_VDQCR_PRECEDENCE_VDQCR   0x0
0807 #define QM_VDQCR_PRECEDENCE_SDQCR   0x80000000
0808 #define QM_VDQCR_EXACT          0x40000000
0809 #define QM_VDQCR_NUMFRAMES_MASK     0x3f000000
0810 #define QM_VDQCR_NUMFRAMES_SET(n)   (((n) & 0x3f) << 24)
0811 #define QM_VDQCR_NUMFRAMES_GET(n)   (((n) >> 24) & 0x3f)
0812 #define QM_VDQCR_NUMFRAMES_TILLEMPTY    QM_VDQCR_NUMFRAMES_SET(0)
0813 
0814 #define QMAN_VOLATILE_FLAG_WAIT      0x00000001 /* wait if VDQCR is in use */
0815 #define QMAN_VOLATILE_FLAG_WAIT_INT  0x00000002 /* if wait, interruptible? */
0816 #define QMAN_VOLATILE_FLAG_FINISH    0x00000004 /* wait till VDQCR completes */
0817 
0818 /* "Query FQ Non-Programmable Fields" */
0819 struct qm_mcr_queryfq_np {
0820     u8 verb;
0821     u8 result;
0822     u8 __reserved1;
0823     u8 state;       /* QM_MCR_NP_STATE_*** */
0824     u32 fqd_link;       /* 24-bit, _res2[24-31] */
0825     u16 odp_seq;        /* 14-bit, _res3[14-15] */
0826     u16 orp_nesn;       /* 14-bit, _res4[14-15] */
0827     u16 orp_ea_hseq;    /* 15-bit, _res5[15] */
0828     u16 orp_ea_tseq;    /* 15-bit, _res6[15] */
0829     u32 orp_ea_hptr;    /* 24-bit, _res7[24-31] */
0830     u32 orp_ea_tptr;    /* 24-bit, _res8[24-31] */
0831     u32 pfdr_hptr;      /* 24-bit, _res9[24-31] */
0832     u32 pfdr_tptr;      /* 24-bit, _res10[24-31] */
0833     u8 __reserved2[5];
0834     u8 is;          /* 1-bit, _res12[1-7] */
0835     u16 ics_surp;
0836     u32 byte_cnt;
0837     u32 frm_cnt;        /* 24-bit, _res13[24-31] */
0838     u32 __reserved3;
0839     u16 ra1_sfdr;       /* QM_MCR_NP_RA1_*** */
0840     u16 ra2_sfdr;       /* QM_MCR_NP_RA2_*** */
0841     u16 __reserved4;
0842     u16 od1_sfdr;       /* QM_MCR_NP_OD1_*** */
0843     u16 od2_sfdr;       /* QM_MCR_NP_OD2_*** */
0844     u16 od3_sfdr;       /* QM_MCR_NP_OD3_*** */
0845 } __packed;
0846 
0847 #define QM_MCR_NP_STATE_FE      0x10
0848 #define QM_MCR_NP_STATE_R       0x08
0849 #define QM_MCR_NP_STATE_MASK        0x07    /* Reads FQD::STATE; */
0850 #define QM_MCR_NP_STATE_OOS     0x00
0851 #define QM_MCR_NP_STATE_RETIRED     0x01
0852 #define QM_MCR_NP_STATE_TEN_SCHED   0x02
0853 #define QM_MCR_NP_STATE_TRU_SCHED   0x03
0854 #define QM_MCR_NP_STATE_PARKED      0x04
0855 #define QM_MCR_NP_STATE_ACTIVE      0x05
0856 #define QM_MCR_NP_PTR_MASK      0x07ff  /* for RA[12] & OD[123] */
0857 #define QM_MCR_NP_RA1_NRA(v)        (((v) >> 14) & 0x3) /* FQD::NRA */
0858 #define QM_MCR_NP_RA2_IT(v)     (((v) >> 14) & 0x1) /* FQD::IT */
0859 #define QM_MCR_NP_OD1_NOD(v)        (((v) >> 14) & 0x3) /* FQD::NOD */
0860 #define QM_MCR_NP_OD3_NPC(v)        (((v) >> 14) & 0x3) /* FQD::NPC */
0861 
0862 enum qm_mcr_queryfq_np_masks {
0863     qm_mcr_fqd_link_mask = BIT(24) - 1,
0864     qm_mcr_odp_seq_mask = BIT(14) - 1,
0865     qm_mcr_orp_nesn_mask = BIT(14) - 1,
0866     qm_mcr_orp_ea_hseq_mask = BIT(15) - 1,
0867     qm_mcr_orp_ea_tseq_mask = BIT(15) - 1,
0868     qm_mcr_orp_ea_hptr_mask = BIT(24) - 1,
0869     qm_mcr_orp_ea_tptr_mask = BIT(24) - 1,
0870     qm_mcr_pfdr_hptr_mask = BIT(24) - 1,
0871     qm_mcr_pfdr_tptr_mask = BIT(24) - 1,
0872     qm_mcr_is_mask = BIT(1) - 1,
0873     qm_mcr_frm_cnt_mask = BIT(24) - 1,
0874 };
0875 
0876 #define qm_mcr_np_get(np, field) \
0877     ((np)->field & (qm_mcr_##field##_mask))
0878 
0879     /* Portal Management */
0880 /**
0881  * qman_p_irqsource_add - add processing sources to be interrupt-driven
0882  * @bits: bitmask of QM_PIRQ_**I processing sources
0883  *
0884  * Adds processing sources that should be interrupt-driven (rather than
0885  * processed via qman_poll_***() functions).
0886  */
0887 void qman_p_irqsource_add(struct qman_portal *p, u32 bits);
0888 
0889 /**
0890  * qman_p_irqsource_remove - remove processing sources from being int-driven
0891  * @bits: bitmask of QM_PIRQ_**I processing sources
0892  *
0893  * Removes processing sources from being interrupt-driven, so that they will
0894  * instead be processed via qman_poll_***() functions.
0895  */
0896 void qman_p_irqsource_remove(struct qman_portal *p, u32 bits);
0897 
0898 /**
0899  * qman_affine_cpus - return a mask of cpus that have affine portals
0900  */
0901 const cpumask_t *qman_affine_cpus(void);
0902 
0903 /**
0904  * qman_affine_channel - return the channel ID of an portal
0905  * @cpu: the cpu whose affine portal is the subject of the query
0906  *
0907  * If @cpu is -1, the affine portal for the current CPU will be used. It is a
0908  * bug to call this function for any value of @cpu (other than -1) that is not a
0909  * member of the mask returned from qman_affine_cpus().
0910  */
0911 u16 qman_affine_channel(int cpu);
0912 
0913 /**
0914  * qman_get_affine_portal - return the portal pointer affine to cpu
0915  * @cpu: the cpu whose affine portal is the subject of the query
0916  */
0917 struct qman_portal *qman_get_affine_portal(int cpu);
0918 
0919 /**
0920  * qman_start_using_portal - register a device link for the portal user
0921  * @p: the portal that will be in use
0922  * @dev: the device that will use the portal
0923  *
0924  * Makes sure that the devices that use the portal are unbound when the
0925  * portal is unbound
0926  */
0927 int qman_start_using_portal(struct qman_portal *p, struct device *dev);
0928 
0929 /**
0930  * qman_p_poll_dqrr - process DQRR (fast-path) entries
0931  * @limit: the maximum number of DQRR entries to process
0932  *
0933  * Use of this function requires that DQRR processing not be interrupt-driven.
0934  * The return value represents the number of DQRR entries processed.
0935  */
0936 int qman_p_poll_dqrr(struct qman_portal *p, unsigned int limit);
0937 
0938 /**
0939  * qman_p_static_dequeue_add - Add pool channels to the portal SDQCR
0940  * @pools: bit-mask of pool channels, using QM_SDQCR_CHANNELS_POOL(n)
0941  *
0942  * Adds a set of pool channels to the portal's static dequeue command register
0943  * (SDQCR). The requested pools are limited to those the portal has dequeue
0944  * access to.
0945  */
0946 void qman_p_static_dequeue_add(struct qman_portal *p, u32 pools);
0947 
0948     /* FQ management */
0949 /**
0950  * qman_create_fq - Allocates a FQ
0951  * @fqid: the index of the FQD to encapsulate, must be "Out of Service"
0952  * @flags: bit-mask of QMAN_FQ_FLAG_*** options
0953  * @fq: memory for storing the 'fq', with callbacks filled in
0954  *
0955  * Creates a frame queue object for the given @fqid, unless the
0956  * QMAN_FQ_FLAG_DYNAMIC_FQID flag is set in @flags, in which case a FQID is
0957  * dynamically allocated (or the function fails if none are available). Once
0958  * created, the caller should not touch the memory at 'fq' except as extended to
0959  * adjacent memory for user-defined fields (see the definition of "struct
0960  * qman_fq" for more info). NO_MODIFY is only intended for enqueuing to
0961  * pre-existing frame-queues that aren't to be otherwise interfered with, it
0962  * prevents all other modifications to the frame queue. The TO_DCPORTAL flag
0963  * causes the driver to honour any context_b modifications requested in the
0964  * qm_init_fq() API, as this indicates the frame queue will be consumed by a
0965  * direct-connect portal (PME, CAAM, or Fman). When frame queues are consumed by
0966  * software portals, the context_b field is controlled by the driver and can't
0967  * be modified by the caller.
0968  */
0969 int qman_create_fq(u32 fqid, u32 flags, struct qman_fq *fq);
0970 
0971 /**
0972  * qman_destroy_fq - Deallocates a FQ
0973  * @fq: the frame queue object to release
0974  *
0975  * The memory for this frame queue object ('fq' provided in qman_create_fq()) is
0976  * not deallocated but the caller regains ownership, to do with as desired. The
0977  * FQ must be in the 'out-of-service' or in the 'parked' state.
0978  */
0979 void qman_destroy_fq(struct qman_fq *fq);
0980 
0981 /**
0982  * qman_fq_fqid - Queries the frame queue ID of a FQ object
0983  * @fq: the frame queue object to query
0984  */
0985 u32 qman_fq_fqid(struct qman_fq *fq);
0986 
0987 /**
0988  * qman_init_fq - Initialises FQ fields, leaves the FQ "parked" or "scheduled"
0989  * @fq: the frame queue object to modify, must be 'parked' or new.
0990  * @flags: bit-mask of QMAN_INITFQ_FLAG_*** options
0991  * @opts: the FQ-modification settings, as defined in the low-level API
0992  *
0993  * The @opts parameter comes from the low-level portal API. Select
0994  * QMAN_INITFQ_FLAG_SCHED in @flags to cause the frame queue to be scheduled
0995  * rather than parked. NB, @opts can be NULL.
0996  *
0997  * Note that some fields and options within @opts may be ignored or overwritten
0998  * by the driver;
0999  * 1. the 'count' and 'fqid' fields are always ignored (this operation only
1000  * affects one frame queue: @fq).
1001  * 2. the QM_INITFQ_WE_CONTEXTB option of the 'we_mask' field and the associated
1002  * 'fqd' structure's 'context_b' field are sometimes overwritten;
1003  *   - if @fq was not created with QMAN_FQ_FLAG_TO_DCPORTAL, then context_b is
1004  *     initialised to a value used by the driver for demux.
1005  *   - if context_b is initialised for demux, so is context_a in case stashing
1006  *     is requested (see item 4).
1007  * (So caller control of context_b is only possible for TO_DCPORTAL frame queue
1008  * objects.)
1009  * 3. if @flags contains QMAN_INITFQ_FLAG_LOCAL, the 'fqd' structure's
1010  * 'dest::channel' field will be overwritten to match the portal used to issue
1011  * the command. If the WE_DESTWQ write-enable bit had already been set by the
1012  * caller, the channel workqueue will be left as-is, otherwise the write-enable
1013  * bit is set and the workqueue is set to a default of 4. If the "LOCAL" flag
1014  * isn't set, the destination channel/workqueue fields and the write-enable bit
1015  * are left as-is.
1016  * 4. if the driver overwrites context_a/b for demux, then if
1017  * QM_INITFQ_WE_CONTEXTA is set, the driver will only overwrite
1018  * context_a.address fields and will leave the stashing fields provided by the
1019  * user alone, otherwise it will zero out the context_a.stashing fields.
1020  */
1021 int qman_init_fq(struct qman_fq *fq, u32 flags, struct qm_mcc_initfq *opts);
1022 
1023 /**
1024  * qman_schedule_fq - Schedules a FQ
1025  * @fq: the frame queue object to schedule, must be 'parked'
1026  *
1027  * Schedules the frame queue, which must be Parked, which takes it to
1028  * Tentatively-Scheduled or Truly-Scheduled depending on its fill-level.
1029  */
1030 int qman_schedule_fq(struct qman_fq *fq);
1031 
1032 /**
1033  * qman_retire_fq - Retires a FQ
1034  * @fq: the frame queue object to retire
1035  * @flags: FQ flags (QMAN_FQ_STATE*) if retirement completes immediately
1036  *
1037  * Retires the frame queue. This returns zero if it succeeds immediately, +1 if
1038  * the retirement was started asynchronously, otherwise it returns negative for
1039  * failure. When this function returns zero, @flags is set to indicate whether
1040  * the retired FQ is empty and/or whether it has any ORL fragments (to show up
1041  * as ERNs). Otherwise the corresponding flags will be known when a subsequent
1042  * FQRN message shows up on the portal's message ring.
1043  *
1044  * NB, if the retirement is asynchronous (the FQ was in the Truly Scheduled or
1045  * Active state), the completion will be via the message ring as a FQRN - but
1046  * the corresponding callback may occur before this function returns!! Ie. the
1047  * caller should be prepared to accept the callback as the function is called,
1048  * not only once it has returned.
1049  */
1050 int qman_retire_fq(struct qman_fq *fq, u32 *flags);
1051 
1052 /**
1053  * qman_oos_fq - Puts a FQ "out of service"
1054  * @fq: the frame queue object to be put out-of-service, must be 'retired'
1055  *
1056  * The frame queue must be retired and empty, and if any order restoration list
1057  * was released as ERNs at the time of retirement, they must all be consumed.
1058  */
1059 int qman_oos_fq(struct qman_fq *fq);
1060 
1061 /*
1062  * qman_volatile_dequeue - Issue a volatile dequeue command
1063  * @fq: the frame queue object to dequeue from
1064  * @flags: a bit-mask of QMAN_VOLATILE_FLAG_*** options
1065  * @vdqcr: bit mask of QM_VDQCR_*** options, as per qm_dqrr_vdqcr_set()
1066  *
1067  * Attempts to lock access to the portal's VDQCR volatile dequeue functionality.
1068  * The function will block and sleep if QMAN_VOLATILE_FLAG_WAIT is specified and
1069  * the VDQCR is already in use, otherwise returns non-zero for failure. If
1070  * QMAN_VOLATILE_FLAG_FINISH is specified, the function will only return once
1071  * the VDQCR command has finished executing (ie. once the callback for the last
1072  * DQRR entry resulting from the VDQCR command has been called). If not using
1073  * the FINISH flag, completion can be determined either by detecting the
1074  * presence of the QM_DQRR_STAT_UNSCHEDULED and QM_DQRR_STAT_DQCR_EXPIRED bits
1075  * in the "stat" parameter passed to the FQ's dequeue callback, or by waiting
1076  * for the QMAN_FQ_STATE_VDQCR bit to disappear.
1077  */
1078 int qman_volatile_dequeue(struct qman_fq *fq, u32 flags, u32 vdqcr);
1079 
1080 /**
1081  * qman_enqueue - Enqueue a frame to a frame queue
1082  * @fq: the frame queue object to enqueue to
1083  * @fd: a descriptor of the frame to be enqueued
1084  *
1085  * Fills an entry in the EQCR of portal @qm to enqueue the frame described by
1086  * @fd. The descriptor details are copied from @fd to the EQCR entry, the 'pid'
1087  * field is ignored. The return value is non-zero on error, such as ring full.
1088  */
1089 int qman_enqueue(struct qman_fq *fq, const struct qm_fd *fd);
1090 
1091 /**
1092  * qman_alloc_fqid_range - Allocate a contiguous range of FQIDs
1093  * @result: is set by the API to the base FQID of the allocated range
1094  * @count: the number of FQIDs required
1095  *
1096  * Returns 0 on success, or a negative error code.
1097  */
1098 int qman_alloc_fqid_range(u32 *result, u32 count);
1099 #define qman_alloc_fqid(result) qman_alloc_fqid_range(result, 1)
1100 
1101 /**
1102  * qman_release_fqid - Release the specified frame queue ID
1103  * @fqid: the FQID to be released back to the resource pool
1104  *
1105  * This function can also be used to seed the allocator with
1106  * FQID ranges that it can subsequently allocate from.
1107  * Returns 0 on success, or a negative error code.
1108  */
1109 int qman_release_fqid(u32 fqid);
1110 
1111 /**
1112  * qman_query_fq_np - Queries non-programmable FQD fields
1113  * @fq: the frame queue object to be queried
1114  * @np: storage for the queried FQD fields
1115  */
1116 int qman_query_fq_np(struct qman_fq *fq, struct qm_mcr_queryfq_np *np);
1117 
1118     /* Pool-channel management */
1119 /**
1120  * qman_alloc_pool_range - Allocate a contiguous range of pool-channel IDs
1121  * @result: is set by the API to the base pool-channel ID of the allocated range
1122  * @count: the number of pool-channel IDs required
1123  *
1124  * Returns 0 on success, or a negative error code.
1125  */
1126 int qman_alloc_pool_range(u32 *result, u32 count);
1127 #define qman_alloc_pool(result) qman_alloc_pool_range(result, 1)
1128 
1129 /**
1130  * qman_release_pool - Release the specified pool-channel ID
1131  * @id: the pool-chan ID to be released back to the resource pool
1132  *
1133  * This function can also be used to seed the allocator with
1134  * pool-channel ID ranges that it can subsequently allocate from.
1135  * Returns 0 on success, or a negative error code.
1136  */
1137 int qman_release_pool(u32 id);
1138 
1139     /* CGR management */
1140 /**
1141  * qman_create_cgr - Register a congestion group object
1142  * @cgr: the 'cgr' object, with fields filled in
1143  * @flags: QMAN_CGR_FLAG_* values
1144  * @opts: optional state of CGR settings
1145  *
1146  * Registers this object to receiving congestion entry/exit callbacks on the
1147  * portal affine to the cpu portal on which this API is executed. If opts is
1148  * NULL then only the callback (cgr->cb) function is registered. If @flags
1149  * contains QMAN_CGR_FLAG_USE_INIT, then an init hw command (which will reset
1150  * any unspecified parameters) will be used rather than a modify hw hardware
1151  * (which only modifies the specified parameters).
1152  */
1153 int qman_create_cgr(struct qman_cgr *cgr, u32 flags,
1154             struct qm_mcc_initcgr *opts);
1155 
1156 /**
1157  * qman_delete_cgr - Deregisters a congestion group object
1158  * @cgr: the 'cgr' object to deregister
1159  *
1160  * "Unplugs" this CGR object from the portal affine to the cpu on which this API
1161  * is executed. This must be excuted on the same affine portal on which it was
1162  * created.
1163  */
1164 int qman_delete_cgr(struct qman_cgr *cgr);
1165 
1166 /**
1167  * qman_delete_cgr_safe - Deregisters a congestion group object from any CPU
1168  * @cgr: the 'cgr' object to deregister
1169  *
1170  * This will select the proper CPU and run there qman_delete_cgr().
1171  */
1172 void qman_delete_cgr_safe(struct qman_cgr *cgr);
1173 
1174 /**
1175  * qman_query_cgr_congested - Queries CGR's congestion status
1176  * @cgr: the 'cgr' object to query
1177  * @result: returns 'cgr's congestion status, 1 (true) if congested
1178  */
1179 int qman_query_cgr_congested(struct qman_cgr *cgr, bool *result);
1180 
1181 /**
1182  * qman_alloc_cgrid_range - Allocate a contiguous range of CGR IDs
1183  * @result: is set by the API to the base CGR ID of the allocated range
1184  * @count: the number of CGR IDs required
1185  *
1186  * Returns 0 on success, or a negative error code.
1187  */
1188 int qman_alloc_cgrid_range(u32 *result, u32 count);
1189 #define qman_alloc_cgrid(result) qman_alloc_cgrid_range(result, 1)
1190 
1191 /**
1192  * qman_release_cgrid - Release the specified CGR ID
1193  * @id: the CGR ID to be released back to the resource pool
1194  *
1195  * This function can also be used to seed the allocator with
1196  * CGR ID ranges that it can subsequently allocate from.
1197  * Returns 0 on success, or a negative error code.
1198  */
1199 int qman_release_cgrid(u32 id);
1200 
1201 /**
1202  * qman_is_probed - Check if qman is probed
1203  *
1204  * Returns 1 if the qman driver successfully probed, -1 if the qman driver
1205  * failed to probe or 0 if the qman driver did not probed yet.
1206  */
1207 int qman_is_probed(void);
1208 
1209 /**
1210  * qman_portals_probed - Check if all cpu bound qman portals are probed
1211  *
1212  * Returns 1 if all the required cpu bound qman portals successfully probed,
1213  * -1 if probe errors appeared or 0 if the qman portals did not yet finished
1214  * probing.
1215  */
1216 int qman_portals_probed(void);
1217 
1218 /**
1219  * qman_dqrr_get_ithresh - Get coalesce interrupt threshold
1220  * @portal: portal to get the value for
1221  * @ithresh: threshold pointer
1222  */
1223 void qman_dqrr_get_ithresh(struct qman_portal *portal, u8 *ithresh);
1224 
1225 /**
1226  * qman_dqrr_set_ithresh - Set coalesce interrupt threshold
1227  * @portal: portal to set the new value on
1228  * @ithresh: new threshold value
1229  *
1230  * Returns 0 on success, or a negative error code.
1231  */
1232 int qman_dqrr_set_ithresh(struct qman_portal *portal, u8 ithresh);
1233 
1234 /**
1235  * qman_dqrr_get_iperiod - Get coalesce interrupt period
1236  * @portal: portal to get the value for
1237  * @iperiod: period pointer
1238  */
1239 void qman_portal_get_iperiod(struct qman_portal *portal, u32 *iperiod);
1240 
1241 /**
1242  * qman_dqrr_set_iperiod - Set coalesce interrupt period
1243  * @portal: portal to set the new value on
1244  * @ithresh: new period value
1245  *
1246  * Returns 0 on success, or a negative error code.
1247  */
1248 int qman_portal_set_iperiod(struct qman_portal *portal, u32 iperiod);
1249 
1250 #endif  /* __FSL_QMAN_H */