0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031 #include "dpaa_sys.h"
0032
0033 #include <soc/fsl/qman.h>
0034 #include <linux/dma-mapping.h>
0035 #include <linux/iommu.h>
0036
0037 #if defined(CONFIG_FSL_PAMU)
0038 #include <asm/fsl_pamu_stash.h>
0039 #endif
0040
0041 struct qm_mcr_querywq {
0042 u8 verb;
0043 u8 result;
0044 u16 channel_wq;
0045 u8 __reserved[28];
0046 u32 wq_len[8];
0047 } __packed;
0048
0049 static inline u16 qm_mcr_querywq_get_chan(const struct qm_mcr_querywq *wq)
0050 {
0051 return wq->channel_wq >> 3;
0052 }
0053
0054 struct __qm_mcr_querycongestion {
0055 u32 state[8];
0056 };
0057
0058
0059 struct qm_mcr_querycongestion {
0060 u8 verb;
0061 u8 result;
0062 u8 __reserved[30];
0063
0064 struct __qm_mcr_querycongestion state;
0065 } __packed;
0066
0067
0068 struct qm_mcr_querycgr {
0069 u8 verb;
0070 u8 result;
0071 u16 __reserved1;
0072 struct __qm_mc_cgr cgr;
0073 u8 __reserved2[6];
0074 u8 i_bcnt_hi;
0075 __be32 i_bcnt_lo;
0076 u8 __reserved3[3];
0077 u8 a_bcnt_hi;
0078 __be32 a_bcnt_lo;
0079 __be32 cscn_targ_swp[4];
0080 } __packed;
0081
0082 static inline u64 qm_mcr_querycgr_i_get64(const struct qm_mcr_querycgr *q)
0083 {
0084 return ((u64)q->i_bcnt_hi << 32) | be32_to_cpu(q->i_bcnt_lo);
0085 }
0086 static inline u64 qm_mcr_querycgr_a_get64(const struct qm_mcr_querycgr *q)
0087 {
0088 return ((u64)q->a_bcnt_hi << 32) | be32_to_cpu(q->a_bcnt_lo);
0089 }
0090
0091
0092
0093
0094
0095
0096
0097
0098
0099 #define CGR_BITS_PER_WORD 5
0100 #define CGR_WORD(x) ((x) >> CGR_BITS_PER_WORD)
0101 #define CGR_BIT(x) (BIT(31) >> ((x) & 0x1f))
0102 #define CGR_NUM (sizeof(struct __qm_mcr_querycongestion) << 3)
0103
0104 struct qman_cgrs {
0105 struct __qm_mcr_querycongestion q;
0106 };
0107
0108 static inline void qman_cgrs_init(struct qman_cgrs *c)
0109 {
0110 memset(c, 0, sizeof(*c));
0111 }
0112
0113 static inline void qman_cgrs_fill(struct qman_cgrs *c)
0114 {
0115 memset(c, 0xff, sizeof(*c));
0116 }
0117
0118 static inline int qman_cgrs_get(struct qman_cgrs *c, u8 cgr)
0119 {
0120 return c->q.state[CGR_WORD(cgr)] & CGR_BIT(cgr);
0121 }
0122
0123 static inline void qman_cgrs_cp(struct qman_cgrs *dest,
0124 const struct qman_cgrs *src)
0125 {
0126 *dest = *src;
0127 }
0128
0129 static inline void qman_cgrs_and(struct qman_cgrs *dest,
0130 const struct qman_cgrs *a, const struct qman_cgrs *b)
0131 {
0132 int ret;
0133 u32 *_d = dest->q.state;
0134 const u32 *_a = a->q.state;
0135 const u32 *_b = b->q.state;
0136
0137 for (ret = 0; ret < 8; ret++)
0138 *_d++ = *_a++ & *_b++;
0139 }
0140
0141 static inline void qman_cgrs_xor(struct qman_cgrs *dest,
0142 const struct qman_cgrs *a, const struct qman_cgrs *b)
0143 {
0144 int ret;
0145 u32 *_d = dest->q.state;
0146 const u32 *_a = a->q.state;
0147 const u32 *_b = b->q.state;
0148
0149 for (ret = 0; ret < 8; ret++)
0150 *_d++ = *_a++ ^ *_b++;
0151 }
0152
0153 void qman_init_cgr_all(void);
0154
0155 struct qm_portal_config {
0156
0157 void *addr_virt_ce;
0158 void __iomem *addr_virt_ci;
0159 struct device *dev;
0160 struct iommu_domain *iommu_domain;
0161
0162 struct list_head list;
0163
0164
0165 int cpu;
0166
0167 int irq;
0168
0169
0170
0171
0172 u16 channel;
0173
0174
0175
0176
0177 u32 pools;
0178 };
0179
0180
0181 #define QMAN_REV11 0x0101
0182 #define QMAN_REV12 0x0102
0183 #define QMAN_REV20 0x0200
0184 #define QMAN_REV30 0x0300
0185 #define QMAN_REV31 0x0301
0186 #define QMAN_REV32 0x0302
0187 extern u16 qman_ip_rev;
0188
0189 #define QM_FQID_RANGE_START 1
0190 extern struct gen_pool *qm_fqalloc;
0191 extern struct gen_pool *qm_qpalloc;
0192 extern struct gen_pool *qm_cgralloc;
0193 u32 qm_get_pools_sdqcr(void);
0194
0195 int qman_wq_alloc(void);
0196 #ifdef CONFIG_FSL_PAMU
0197 #define qman_liodn_fixup __qman_liodn_fixup
0198 #else
0199 static inline void qman_liodn_fixup(u16 channel)
0200 {
0201 }
0202 #endif
0203 void __qman_liodn_fixup(u16 channel);
0204 void qman_set_sdest(u16 channel, unsigned int cpu_idx);
0205
0206 struct qman_portal *qman_create_affine_portal(
0207 const struct qm_portal_config *config,
0208 const struct qman_cgrs *cgrs);
0209 const struct qm_portal_config *qman_destroy_affine_portal(void);
0210
0211
0212
0213
0214
0215
0216 int qman_query_fq(struct qman_fq *fq, struct qm_fqd *fqd);
0217
0218 int qman_alloc_fq_table(u32 num_fqids);
0219
0220
0221
0222
0223
0224
0225
0226
0227
0228
0229
0230
0231
0232
0233
0234
0235 #define QM_SDQCR_SOURCE_CHANNELS 0x0
0236 #define QM_SDQCR_SOURCE_SPECIFICWQ 0x40000000
0237 #define QM_SDQCR_COUNT_EXACT1 0x0
0238 #define QM_SDQCR_COUNT_UPTO3 0x20000000
0239 #define QM_SDQCR_DEDICATED_PRECEDENCE 0x10000000
0240 #define QM_SDQCR_TYPE_MASK 0x03000000
0241 #define QM_SDQCR_TYPE_NULL 0x0
0242 #define QM_SDQCR_TYPE_PRIO_QOS 0x01000000
0243 #define QM_SDQCR_TYPE_ACTIVE_QOS 0x02000000
0244 #define QM_SDQCR_TYPE_ACTIVE 0x03000000
0245 #define QM_SDQCR_TOKEN_MASK 0x00ff0000
0246 #define QM_SDQCR_TOKEN_SET(v) (((v) & 0xff) << 16)
0247 #define QM_SDQCR_TOKEN_GET(v) (((v) >> 16) & 0xff)
0248 #define QM_SDQCR_CHANNELS_DEDICATED 0x00008000
0249 #define QM_SDQCR_SPECIFICWQ_MASK 0x000000f7
0250 #define QM_SDQCR_SPECIFICWQ_DEDICATED 0x00000000
0251 #define QM_SDQCR_SPECIFICWQ_POOL(n) ((n) << 4)
0252 #define QM_SDQCR_SPECIFICWQ_WQ(n) (n)
0253
0254
0255 #define QM_VDQCR_FQID_MASK 0x00ffffff
0256 #define QM_VDQCR_FQID(n) ((n) & QM_VDQCR_FQID_MASK)
0257
0258
0259
0260
0261
0262 #define QM_PIRQ_DQAVAIL 0x0000ffff
0263
0264
0265 #define QM_DQAVAIL_PORTAL 0x8000
0266 #define QM_DQAVAIL_POOL(n) (0x8000 >> (n))
0267 #define QM_DQAVAIL_MASK 0xffff
0268
0269 #define QM_PIRQ_VISIBLE (QM_PIRQ_SLOW | QM_PIRQ_DQRI)
0270
0271 extern struct qman_portal *affine_portals[NR_CPUS];
0272 extern struct qman_portal *qman_dma_portal;
0273 const struct qm_portal_config *qman_get_qm_portal_config(
0274 struct qman_portal *portal);
0275
0276 unsigned int qm_get_fqid_maxcnt(void);
0277
0278 int qman_shutdown_fq(u32 fqid);
0279
0280 int qman_requires_cleanup(void);
0281 void qman_done_cleanup(void);
0282 void qman_enable_irqs(void);