Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
0002 /*
0003  * Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
0004  * Copyright 2016-2019 NXP
0005  *
0006  */
0007 #ifndef __FSL_QBMAN_PORTAL_H
0008 #define __FSL_QBMAN_PORTAL_H
0009 
0010 #include <soc/fsl/dpaa2-fd.h>
0011 
0012 #define QMAN_REV_4000   0x04000000
0013 #define QMAN_REV_4100   0x04010000
0014 #define QMAN_REV_4101   0x04010001
0015 #define QMAN_REV_5000   0x05000000
0016 
0017 #define QMAN_REV_MASK   0xffff0000
0018 
0019 struct dpaa2_dq;
0020 struct qbman_swp;
0021 
0022 /* qbman software portal descriptor structure */
0023 struct qbman_swp_desc {
0024     void *cena_bar; /* Cache-enabled portal base address */
0025     void __iomem *cinh_bar; /* Cache-inhibited portal base address */
0026     u32 qman_version;
0027     u32 qman_clk;
0028     u32 qman_256_cycles_per_ns;
0029 };
0030 
0031 #define QBMAN_SWP_INTERRUPT_EQRI 0x01
0032 #define QBMAN_SWP_INTERRUPT_EQDI 0x02
0033 #define QBMAN_SWP_INTERRUPT_DQRI 0x04
0034 #define QBMAN_SWP_INTERRUPT_RCRI 0x08
0035 #define QBMAN_SWP_INTERRUPT_RCDI 0x10
0036 #define QBMAN_SWP_INTERRUPT_VDCI 0x20
0037 
0038 /* the structure for pull dequeue descriptor */
0039 struct qbman_pull_desc {
0040     u8 verb;
0041     u8 numf;
0042     u8 tok;
0043     u8 reserved;
0044     __le32 dq_src;
0045     __le64 rsp_addr;
0046     u64 rsp_addr_virt;
0047     u8 padding[40];
0048 };
0049 
0050 enum qbman_pull_type_e {
0051     /* dequeue with priority precedence, respect intra-class scheduling */
0052     qbman_pull_type_prio = 1,
0053     /* dequeue with active FQ precedence, respect ICS */
0054     qbman_pull_type_active,
0055     /* dequeue with active FQ precedence, no ICS */
0056     qbman_pull_type_active_noics
0057 };
0058 
0059 /* Definitions for parsing dequeue entries */
0060 #define QBMAN_RESULT_MASK      0x7f
0061 #define QBMAN_RESULT_DQ        0x60
0062 #define QBMAN_RESULT_FQRN      0x21
0063 #define QBMAN_RESULT_FQRNI     0x22
0064 #define QBMAN_RESULT_FQPN      0x24
0065 #define QBMAN_RESULT_FQDAN     0x25
0066 #define QBMAN_RESULT_CDAN      0x26
0067 #define QBMAN_RESULT_CSCN_MEM  0x27
0068 #define QBMAN_RESULT_CGCU      0x28
0069 #define QBMAN_RESULT_BPSCN     0x29
0070 #define QBMAN_RESULT_CSCN_WQ   0x2a
0071 
0072 /* QBMan FQ management command codes */
0073 #define QBMAN_FQ_SCHEDULE   0x48
0074 #define QBMAN_FQ_FORCE      0x49
0075 #define QBMAN_FQ_XON        0x4d
0076 #define QBMAN_FQ_XOFF       0x4e
0077 
0078 /* structure of enqueue descriptor */
0079 struct qbman_eq_desc {
0080     u8 verb;
0081     u8 dca;
0082     __le16 seqnum;
0083     __le16 orpid;
0084     __le16 reserved1;
0085     __le32 tgtid;
0086     __le32 tag;
0087     __le16 qdbin;
0088     u8 qpri;
0089     u8 reserved[3];
0090     u8 wae;
0091     u8 rspid;
0092     __le64 rsp_addr;
0093 };
0094 
0095 struct qbman_eq_desc_with_fd {
0096     struct qbman_eq_desc desc;
0097     u8 fd[32];
0098 };
0099 
0100 /* buffer release descriptor */
0101 struct qbman_release_desc {
0102     u8 verb;
0103     u8 reserved;
0104     __le16 bpid;
0105     __le32 reserved2;
0106     __le64 buf[7];
0107 };
0108 
0109 /* Management command result codes */
0110 #define QBMAN_MC_RSLT_OK      0xf0
0111 
0112 #define CODE_CDAN_WE_EN    0x1
0113 #define CODE_CDAN_WE_CTX   0x4
0114 
0115 /* portal data structure */
0116 struct qbman_swp {
0117     const struct qbman_swp_desc *desc;
0118     void *addr_cena;
0119     void __iomem *addr_cinh;
0120 
0121     /* Management commands */
0122     struct {
0123         u32 valid_bit; /* 0x00 or 0x80 */
0124     } mc;
0125 
0126     /* Management response */
0127     struct {
0128         u32 valid_bit; /* 0x00 or 0x80 */
0129     } mr;
0130 
0131     /* Push dequeues */
0132     u32 sdq;
0133 
0134     /* Volatile dequeues */
0135     struct {
0136         atomic_t available; /* indicates if a command can be sent */
0137         u32 valid_bit; /* 0x00 or 0x80 */
0138         struct dpaa2_dq *storage; /* NULL if DQRR */
0139     } vdq;
0140 
0141     /* DQRR */
0142     struct {
0143         u32 next_idx;
0144         u32 valid_bit;
0145         u8 dqrr_size;
0146         int reset_bug; /* indicates dqrr reset workaround is needed */
0147     } dqrr;
0148 
0149     struct {
0150         u32 pi;
0151         u32 pi_vb;
0152         u32 pi_ring_size;
0153         u32 pi_ci_mask;
0154         u32 ci;
0155         int available;
0156         u32 pend;
0157         u32 no_pfdr;
0158     } eqcr;
0159 
0160     spinlock_t access_spinlock;
0161 
0162     /* Interrupt coalescing */
0163     u32 irq_threshold;
0164     u32 irq_holdoff;
0165     int use_adaptive_rx_coalesce;
0166 };
0167 
0168 /* Function pointers */
0169 extern
0170 int (*qbman_swp_enqueue_ptr)(struct qbman_swp *s,
0171                  const struct qbman_eq_desc *d,
0172                  const struct dpaa2_fd *fd);
0173 extern
0174 int (*qbman_swp_enqueue_multiple_ptr)(struct qbman_swp *s,
0175                       const struct qbman_eq_desc *d,
0176                       const struct dpaa2_fd *fd,
0177                       uint32_t *flags,
0178                       int num_frames);
0179 extern
0180 int (*qbman_swp_enqueue_multiple_desc_ptr)(struct qbman_swp *s,
0181                        const struct qbman_eq_desc *d,
0182                        const struct dpaa2_fd *fd,
0183                        int num_frames);
0184 extern
0185 int (*qbman_swp_pull_ptr)(struct qbman_swp *s, struct qbman_pull_desc *d);
0186 extern
0187 const struct dpaa2_dq *(*qbman_swp_dqrr_next_ptr)(struct qbman_swp *s);
0188 extern
0189 int (*qbman_swp_release_ptr)(struct qbman_swp *s,
0190                  const struct qbman_release_desc *d,
0191                  const u64 *buffers,
0192                  unsigned int num_buffers);
0193 
0194 /* Functions */
0195 struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d);
0196 void qbman_swp_finish(struct qbman_swp *p);
0197 u32 qbman_swp_interrupt_read_status(struct qbman_swp *p);
0198 void qbman_swp_interrupt_clear_status(struct qbman_swp *p, u32 mask);
0199 u32 qbman_swp_interrupt_get_trigger(struct qbman_swp *p);
0200 void qbman_swp_interrupt_set_trigger(struct qbman_swp *p, u32 mask);
0201 int qbman_swp_interrupt_get_inhibit(struct qbman_swp *p);
0202 void qbman_swp_interrupt_set_inhibit(struct qbman_swp *p, int inhibit);
0203 
0204 void qbman_swp_push_get(struct qbman_swp *p, u8 channel_idx, int *enabled);
0205 void qbman_swp_push_set(struct qbman_swp *p, u8 channel_idx, int enable);
0206 
0207 void qbman_pull_desc_clear(struct qbman_pull_desc *d);
0208 void qbman_pull_desc_set_storage(struct qbman_pull_desc *d,
0209                  struct dpaa2_dq *storage,
0210                  dma_addr_t storage_phys,
0211                  int stash);
0212 void qbman_pull_desc_set_numframes(struct qbman_pull_desc *d, u8 numframes);
0213 void qbman_pull_desc_set_fq(struct qbman_pull_desc *d, u32 fqid);
0214 void qbman_pull_desc_set_wq(struct qbman_pull_desc *d, u32 wqid,
0215                 enum qbman_pull_type_e dct);
0216 void qbman_pull_desc_set_channel(struct qbman_pull_desc *d, u32 chid,
0217                  enum qbman_pull_type_e dct);
0218 
0219 void qbman_swp_dqrr_consume(struct qbman_swp *s, const struct dpaa2_dq *dq);
0220 
0221 int qbman_result_has_new_result(struct qbman_swp *p, const struct dpaa2_dq *dq);
0222 
0223 void qbman_eq_desc_clear(struct qbman_eq_desc *d);
0224 void qbman_eq_desc_set_no_orp(struct qbman_eq_desc *d, int respond_success);
0225 void qbman_eq_desc_set_token(struct qbman_eq_desc *d, u8 token);
0226 void qbman_eq_desc_set_fq(struct qbman_eq_desc *d, u32 fqid);
0227 void qbman_eq_desc_set_qd(struct qbman_eq_desc *d, u32 qdid,
0228               u32 qd_bin, u32 qd_prio);
0229 
0230 
0231 void qbman_release_desc_clear(struct qbman_release_desc *d);
0232 void qbman_release_desc_set_bpid(struct qbman_release_desc *d, u16 bpid);
0233 void qbman_release_desc_set_rcdi(struct qbman_release_desc *d, int enable);
0234 
0235 int qbman_swp_acquire(struct qbman_swp *s, u16 bpid, u64 *buffers,
0236               unsigned int num_buffers);
0237 int qbman_swp_alt_fq_state(struct qbman_swp *s, u32 fqid,
0238                u8 alt_fq_verb);
0239 int qbman_swp_CDAN_set(struct qbman_swp *s, u16 channelid,
0240                u8 we_mask, u8 cdan_en,
0241                u64 ctx);
0242 
0243 void *qbman_swp_mc_start(struct qbman_swp *p);
0244 void qbman_swp_mc_submit(struct qbman_swp *p, void *cmd, u8 cmd_verb);
0245 void *qbman_swp_mc_result(struct qbman_swp *p);
0246 
0247 /**
0248  * qbman_swp_enqueue() - Issue an enqueue command
0249  * @s:  the software portal used for enqueue
0250  * @d:  the enqueue descriptor
0251  * @fd: the frame descriptor to be enqueued
0252  *
0253  * Return 0 for successful enqueue, -EBUSY if the EQCR is not ready.
0254  */
0255 static inline int
0256 qbman_swp_enqueue(struct qbman_swp *s, const struct qbman_eq_desc *d,
0257           const struct dpaa2_fd *fd)
0258 {
0259     return qbman_swp_enqueue_ptr(s, d, fd);
0260 }
0261 
0262 /**
0263  * qbman_swp_enqueue_multiple() - Issue a multi enqueue command
0264  * using one enqueue descriptor
0265  * @s:  the software portal used for enqueue
0266  * @d:  the enqueue descriptor
0267  * @fd: table pointer of frame descriptor table to be enqueued
0268  * @flags: table pointer of QBMAN_ENQUEUE_FLAG_DCA flags, not used if NULL
0269  * @num_frames: number of fd to be enqueued
0270  *
0271  * Return the number of fd enqueued, or a negative error number.
0272  */
0273 static inline int
0274 qbman_swp_enqueue_multiple(struct qbman_swp *s,
0275                const struct qbman_eq_desc *d,
0276                const struct dpaa2_fd *fd,
0277                uint32_t *flags,
0278                int num_frames)
0279 {
0280     return qbman_swp_enqueue_multiple_ptr(s, d, fd, flags, num_frames);
0281 }
0282 
0283 /**
0284  * qbman_swp_enqueue_multiple_desc() - Issue a multi enqueue command
0285  * using multiple enqueue descriptor
0286  * @s:  the software portal used for enqueue
0287  * @d:  table of minimal enqueue descriptor
0288  * @fd: table pointer of frame descriptor table to be enqueued
0289  * @num_frames: number of fd to be enqueued
0290  *
0291  * Return the number of fd enqueued, or a negative error number.
0292  */
0293 static inline int
0294 qbman_swp_enqueue_multiple_desc(struct qbman_swp *s,
0295                 const struct qbman_eq_desc *d,
0296                 const struct dpaa2_fd *fd,
0297                 int num_frames)
0298 {
0299     return qbman_swp_enqueue_multiple_desc_ptr(s, d, fd, num_frames);
0300 }
0301 
0302 /**
0303  * qbman_result_is_DQ() - check if the dequeue result is a dequeue response
0304  * @dq: the dequeue result to be checked
0305  *
0306  * DQRR entries may contain non-dequeue results, ie. notifications
0307  */
0308 static inline int qbman_result_is_DQ(const struct dpaa2_dq *dq)
0309 {
0310     return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_DQ);
0311 }
0312 
0313 /**
0314  * qbman_result_is_SCN() - Check the dequeue result is notification or not
0315  * @dq: the dequeue result to be checked
0316  *
0317  */
0318 static inline int qbman_result_is_SCN(const struct dpaa2_dq *dq)
0319 {
0320     return !qbman_result_is_DQ(dq);
0321 }
0322 
0323 /* FQ Data Availability */
0324 static inline int qbman_result_is_FQDAN(const struct dpaa2_dq *dq)
0325 {
0326     return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_FQDAN);
0327 }
0328 
0329 /* Channel Data Availability */
0330 static inline int qbman_result_is_CDAN(const struct dpaa2_dq *dq)
0331 {
0332     return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_CDAN);
0333 }
0334 
0335 /* Congestion State Change */
0336 static inline int qbman_result_is_CSCN(const struct dpaa2_dq *dq)
0337 {
0338     return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_CSCN_WQ);
0339 }
0340 
0341 /* Buffer Pool State Change */
0342 static inline int qbman_result_is_BPSCN(const struct dpaa2_dq *dq)
0343 {
0344     return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_BPSCN);
0345 }
0346 
0347 /* Congestion Group Count Update */
0348 static inline int qbman_result_is_CGCU(const struct dpaa2_dq *dq)
0349 {
0350     return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_CGCU);
0351 }
0352 
0353 /* Retirement */
0354 static inline int qbman_result_is_FQRN(const struct dpaa2_dq *dq)
0355 {
0356     return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_FQRN);
0357 }
0358 
0359 /* Retirement Immediate */
0360 static inline int qbman_result_is_FQRNI(const struct dpaa2_dq *dq)
0361 {
0362     return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_FQRNI);
0363 }
0364 
0365  /* Park */
0366 static inline int qbman_result_is_FQPN(const struct dpaa2_dq *dq)
0367 {
0368     return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_FQPN);
0369 }
0370 
0371 /**
0372  * qbman_result_SCN_state() - Get the state field in State-change notification
0373  */
0374 static inline u8 qbman_result_SCN_state(const struct dpaa2_dq *scn)
0375 {
0376     return scn->scn.state;
0377 }
0378 
0379 #define SCN_RID_MASK 0x00FFFFFF
0380 
0381 /**
0382  * qbman_result_SCN_rid() - Get the resource id in State-change notification
0383  */
0384 static inline u32 qbman_result_SCN_rid(const struct dpaa2_dq *scn)
0385 {
0386     return le32_to_cpu(scn->scn.rid_tok) & SCN_RID_MASK;
0387 }
0388 
0389 /**
0390  * qbman_result_SCN_ctx() - Get the context data in State-change notification
0391  */
0392 static inline u64 qbman_result_SCN_ctx(const struct dpaa2_dq *scn)
0393 {
0394     return le64_to_cpu(scn->scn.ctx);
0395 }
0396 
0397 /**
0398  * qbman_swp_fq_schedule() - Move the fq to the scheduled state
0399  * @s:    the software portal object
0400  * @fqid: the index of frame queue to be scheduled
0401  *
0402  * There are a couple of different ways that a FQ can end up parked state,
0403  * This schedules it.
0404  *
0405  * Return 0 for success, or negative error code for failure.
0406  */
0407 static inline int qbman_swp_fq_schedule(struct qbman_swp *s, u32 fqid)
0408 {
0409     return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_SCHEDULE);
0410 }
0411 
0412 /**
0413  * qbman_swp_fq_force() - Force the FQ to fully scheduled state
0414  * @s:    the software portal object
0415  * @fqid: the index of frame queue to be forced
0416  *
0417  * Force eligible will force a tentatively-scheduled FQ to be fully-scheduled
0418  * and thus be available for selection by any channel-dequeuing behaviour (push
0419  * or pull). If the FQ is subsequently "dequeued" from the channel and is still
0420  * empty at the time this happens, the resulting dq_entry will have no FD.
0421  * (qbman_result_DQ_fd() will return NULL.)
0422  *
0423  * Return 0 for success, or negative error code for failure.
0424  */
0425 static inline int qbman_swp_fq_force(struct qbman_swp *s, u32 fqid)
0426 {
0427     return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_FORCE);
0428 }
0429 
0430 /**
0431  * qbman_swp_fq_xon() - sets FQ flow-control to XON
0432  * @s:    the software portal object
0433  * @fqid: the index of frame queue
0434  *
0435  * This setting doesn't affect enqueues to the FQ, just dequeues.
0436  *
0437  * Return 0 for success, or negative error code for failure.
0438  */
0439 static inline int qbman_swp_fq_xon(struct qbman_swp *s, u32 fqid)
0440 {
0441     return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_XON);
0442 }
0443 
0444 /**
0445  * qbman_swp_fq_xoff() - sets FQ flow-control to XOFF
0446  * @s:    the software portal object
0447  * @fqid: the index of frame queue
0448  *
0449  * This setting doesn't affect enqueues to the FQ, just dequeues.
0450  * XOFF FQs will remain in the tenatively-scheduled state, even when
0451  * non-empty, meaning they won't be selected for scheduled dequeuing.
0452  * If a FQ is changed to XOFF after it had already become truly-scheduled
0453  * to a channel, and a pull dequeue of that channel occurs that selects
0454  * that FQ for dequeuing, then the resulting dq_entry will have no FD.
0455  * (qbman_result_DQ_fd() will return NULL.)
0456  *
0457  * Return 0 for success, or negative error code for failure.
0458  */
0459 static inline int qbman_swp_fq_xoff(struct qbman_swp *s, u32 fqid)
0460 {
0461     return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_XOFF);
0462 }
0463 
0464 /* If the user has been allocated a channel object that is going to generate
0465  * CDANs to another channel, then the qbman_swp_CDAN* functions will be
0466  * necessary.
0467  *
0468  * CDAN-enabled channels only generate a single CDAN notification, after which
0469  * they need to be reenabled before they'll generate another. The idea is
0470  * that pull dequeuing will occur in reaction to the CDAN, followed by a
0471  * reenable step. Each function generates a distinct command to hardware, so a
0472  * combination function is provided if the user wishes to modify the "context"
0473  * (which shows up in each CDAN message) each time they reenable, as a single
0474  * command to hardware.
0475  */
0476 
0477 /**
0478  * qbman_swp_CDAN_set_context() - Set CDAN context
0479  * @s:         the software portal object
0480  * @channelid: the channel index
0481  * @ctx:       the context to be set in CDAN
0482  *
0483  * Return 0 for success, or negative error code for failure.
0484  */
0485 static inline int qbman_swp_CDAN_set_context(struct qbman_swp *s, u16 channelid,
0486                          u64 ctx)
0487 {
0488     return qbman_swp_CDAN_set(s, channelid,
0489                   CODE_CDAN_WE_CTX,
0490                   0, ctx);
0491 }
0492 
0493 /**
0494  * qbman_swp_CDAN_enable() - Enable CDAN for the channel
0495  * @s:         the software portal object
0496  * @channelid: the index of the channel to generate CDAN
0497  *
0498  * Return 0 for success, or negative error code for failure.
0499  */
0500 static inline int qbman_swp_CDAN_enable(struct qbman_swp *s, u16 channelid)
0501 {
0502     return qbman_swp_CDAN_set(s, channelid,
0503                   CODE_CDAN_WE_EN,
0504                   1, 0);
0505 }
0506 
0507 /**
0508  * qbman_swp_CDAN_disable() - disable CDAN for the channel
0509  * @s:         the software portal object
0510  * @channelid: the index of the channel to generate CDAN
0511  *
0512  * Return 0 for success, or negative error code for failure.
0513  */
0514 static inline int qbman_swp_CDAN_disable(struct qbman_swp *s, u16 channelid)
0515 {
0516     return qbman_swp_CDAN_set(s, channelid,
0517                   CODE_CDAN_WE_EN,
0518                   0, 0);
0519 }
0520 
0521 /**
0522  * qbman_swp_CDAN_set_context_enable() - Set CDAN contest and enable CDAN
0523  * @s:         the software portal object
0524  * @channelid: the index of the channel to generate CDAN
0525  * @ctx:i      the context set in CDAN
0526  *
0527  * Return 0 for success, or negative error code for failure.
0528  */
0529 static inline int qbman_swp_CDAN_set_context_enable(struct qbman_swp *s,
0530                             u16 channelid,
0531                             u64 ctx)
0532 {
0533     return qbman_swp_CDAN_set(s, channelid,
0534                   CODE_CDAN_WE_EN | CODE_CDAN_WE_CTX,
0535                   1, ctx);
0536 }
0537 
0538 /* Wraps up submit + poll-for-result */
0539 static inline void *qbman_swp_mc_complete(struct qbman_swp *swp, void *cmd,
0540                       u8 cmd_verb)
0541 {
0542     int loopvar = 2000;
0543 
0544     qbman_swp_mc_submit(swp, cmd, cmd_verb);
0545 
0546     do {
0547         cmd = qbman_swp_mc_result(swp);
0548     } while (!cmd && loopvar--);
0549 
0550     WARN_ON(!loopvar);
0551 
0552     return cmd;
0553 }
0554 
0555 /* Query APIs */
0556 struct qbman_fq_query_np_rslt {
0557     u8 verb;
0558     u8 rslt;
0559     u8 st1;
0560     u8 st2;
0561     u8 reserved[2];
0562     __le16 od1_sfdr;
0563     __le16 od2_sfdr;
0564     __le16 od3_sfdr;
0565     __le16 ra1_sfdr;
0566     __le16 ra2_sfdr;
0567     __le32 pfdr_hptr;
0568     __le32 pfdr_tptr;
0569     __le32 frm_cnt;
0570     __le32 byte_cnt;
0571     __le16 ics_surp;
0572     u8 is;
0573     u8 reserved2[29];
0574 };
0575 
0576 int qbman_fq_query_state(struct qbman_swp *s, u32 fqid,
0577              struct qbman_fq_query_np_rslt *r);
0578 u32 qbman_fq_state_frame_count(const struct qbman_fq_query_np_rslt *r);
0579 u32 qbman_fq_state_byte_count(const struct qbman_fq_query_np_rslt *r);
0580 
0581 struct qbman_bp_query_rslt {
0582     u8 verb;
0583     u8 rslt;
0584     u8 reserved[4];
0585     u8 bdi;
0586     u8 state;
0587     __le32 fill;
0588     __le32 hdotr;
0589     __le16 swdet;
0590     __le16 swdxt;
0591     __le16 hwdet;
0592     __le16 hwdxt;
0593     __le16 swset;
0594     __le16 swsxt;
0595     __le16 vbpid;
0596     __le16 icid;
0597     __le64 bpscn_addr;
0598     __le64 bpscn_ctx;
0599     __le16 hw_targ;
0600     u8 dbe;
0601     u8 reserved2;
0602     u8 sdcnt;
0603     u8 hdcnt;
0604     u8 sscnt;
0605     u8 reserved3[9];
0606 };
0607 
0608 int qbman_bp_query(struct qbman_swp *s, u16 bpid,
0609            struct qbman_bp_query_rslt *r);
0610 
0611 u32 qbman_bp_info_num_free_bufs(struct qbman_bp_query_rslt *a);
0612 
0613 /**
0614  * qbman_swp_release() - Issue a buffer release command
0615  * @s:           the software portal object
0616  * @d:           the release descriptor
0617  * @buffers:     a pointer pointing to the buffer address to be released
0618  * @num_buffers: number of buffers to be released,  must be less than 8
0619  *
0620  * Return 0 for success, -EBUSY if the release command ring is not ready.
0621  */
0622 static inline int qbman_swp_release(struct qbman_swp *s,
0623                     const struct qbman_release_desc *d,
0624                     const u64 *buffers,
0625                     unsigned int num_buffers)
0626 {
0627     return qbman_swp_release_ptr(s, d, buffers, num_buffers);
0628 }
0629 
0630 /**
0631  * qbman_swp_pull() - Issue the pull dequeue command
0632  * @s: the software portal object
0633  * @d: the software portal descriptor which has been configured with
0634  *     the set of qbman_pull_desc_set_*() calls
0635  *
0636  * Return 0 for success, and -EBUSY if the software portal is not ready
0637  * to do pull dequeue.
0638  */
0639 static inline int qbman_swp_pull(struct qbman_swp *s,
0640                  struct qbman_pull_desc *d)
0641 {
0642     return qbman_swp_pull_ptr(s, d);
0643 }
0644 
0645 /**
0646  * qbman_swp_dqrr_next() - Get an valid DQRR entry
0647  * @s: the software portal object
0648  *
0649  * Return NULL if there are no unconsumed DQRR entries. Return a DQRR entry
0650  * only once, so repeated calls can return a sequence of DQRR entries, without
0651  * requiring they be consumed immediately or in any particular order.
0652  */
0653 static inline const struct dpaa2_dq *qbman_swp_dqrr_next(struct qbman_swp *s)
0654 {
0655     return qbman_swp_dqrr_next_ptr(s);
0656 }
0657 
0658 int qbman_swp_set_irq_coalescing(struct qbman_swp *p, u32 irq_threshold,
0659                  u32 irq_holdoff);
0660 
0661 void qbman_swp_get_irq_coalescing(struct qbman_swp *p, u32 *irq_threshold,
0662                   u32 *irq_holdoff);
0663 
0664 #endif /* __FSL_QBMAN_PORTAL_H */