Back to home page

OSCL-LXR

 
 

    


0001 /* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
0002  *
0003  * Redistribution and use in source and binary forms, with or without
0004  * modification, are permitted provided that the following conditions are met:
0005  *     * Redistributions of source code must retain the above copyright
0006  *   notice, this list of conditions and the following disclaimer.
0007  *     * Redistributions in binary form must reproduce the above copyright
0008  *   notice, this list of conditions and the following disclaimer in the
0009  *   documentation and/or other materials provided with the distribution.
0010  *     * Neither the name of Freescale Semiconductor nor the
0011  *   names of its contributors may be used to endorse or promote products
0012  *   derived from this software without specific prior written permission.
0013  *
0014  * ALTERNATIVELY, this software may be distributed under the terms of the
0015  * GNU General Public License ("GPL") as published by the Free Software
0016  * Foundation, either version 2 of that License or (at your option) any
0017  * later version.
0018  *
0019  * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
0020  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
0021  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
0022  * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
0023  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
0024  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
0025  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
0026  * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
0027  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
0028  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
0029  */
0030 
0031 #include "bman_priv.h"
0032 
0033 #define IRQNAME     "BMan portal %d"
0034 #define MAX_IRQNAME 16  /* big enough for "BMan portal %d" */
0035 
0036 /* Portal register assists */
0037 
0038 #if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
0039 /* Cache-inhibited register offsets */
0040 #define BM_REG_RCR_PI_CINH  0x3000
0041 #define BM_REG_RCR_CI_CINH  0x3100
0042 #define BM_REG_RCR_ITR      0x3200
0043 #define BM_REG_CFG      0x3300
0044 #define BM_REG_SCN(n)       (0x3400 + ((n) << 6))
0045 #define BM_REG_ISR      0x3e00
0046 #define BM_REG_IER      0x3e40
0047 #define BM_REG_ISDR     0x3e80
0048 #define BM_REG_IIR      0x3ec0
0049 
0050 /* Cache-enabled register offsets */
0051 #define BM_CL_CR        0x0000
0052 #define BM_CL_RR0       0x0100
0053 #define BM_CL_RR1       0x0140
0054 #define BM_CL_RCR       0x1000
0055 #define BM_CL_RCR_PI_CENA   0x3000
0056 #define BM_CL_RCR_CI_CENA   0x3100
0057 
0058 #else
0059 /* Cache-inhibited register offsets */
0060 #define BM_REG_RCR_PI_CINH  0x0000
0061 #define BM_REG_RCR_CI_CINH  0x0004
0062 #define BM_REG_RCR_ITR      0x0008
0063 #define BM_REG_CFG      0x0100
0064 #define BM_REG_SCN(n)       (0x0200 + ((n) << 2))
0065 #define BM_REG_ISR      0x0e00
0066 #define BM_REG_IER      0x0e04
0067 #define BM_REG_ISDR     0x0e08
0068 #define BM_REG_IIR      0x0e0c
0069 
0070 /* Cache-enabled register offsets */
0071 #define BM_CL_CR        0x0000
0072 #define BM_CL_RR0       0x0100
0073 #define BM_CL_RR1       0x0140
0074 #define BM_CL_RCR       0x1000
0075 #define BM_CL_RCR_PI_CENA   0x3000
0076 #define BM_CL_RCR_CI_CENA   0x3100
0077 #endif
0078 
0079 /*
0080  * Portal modes.
0081  *   Enum types;
0082  *     pmode == production mode
0083  *     cmode == consumption mode,
0084  *   Enum values use 3 letter codes. First letter matches the portal mode,
0085  *   remaining two letters indicate;
0086  *     ci == cache-inhibited portal register
0087  *     ce == cache-enabled portal register
0088  *     vb == in-band valid-bit (cache-enabled)
0089  */
0090 enum bm_rcr_pmode {     /* matches BCSP_CFG::RPM */
0091     bm_rcr_pci = 0,     /* PI index, cache-inhibited */
0092     bm_rcr_pce = 1,     /* PI index, cache-enabled */
0093     bm_rcr_pvb = 2      /* valid-bit */
0094 };
0095 enum bm_rcr_cmode {     /* s/w-only */
0096     bm_rcr_cci,     /* CI index, cache-inhibited */
0097     bm_rcr_cce      /* CI index, cache-enabled */
0098 };
0099 
0100 
0101 /* --- Portal structures --- */
0102 
0103 #define BM_RCR_SIZE     8
0104 
0105 /* Release Command */
0106 struct bm_rcr_entry {
0107     union {
0108         struct {
0109             u8 _ncw_verb; /* writes to this are non-coherent */
0110             u8 bpid; /* used with BM_RCR_VERB_CMD_BPID_SINGLE */
0111             u8 __reserved1[62];
0112         };
0113         struct bm_buffer bufs[8];
0114     };
0115 };
0116 #define BM_RCR_VERB_VBIT        0x80
0117 #define BM_RCR_VERB_CMD_MASK        0x70    /* one of two values; */
0118 #define BM_RCR_VERB_CMD_BPID_SINGLE 0x20
0119 #define BM_RCR_VERB_CMD_BPID_MULTI  0x30
0120 #define BM_RCR_VERB_BUFCOUNT_MASK   0x0f    /* values 1..8 */
0121 
0122 struct bm_rcr {
0123     struct bm_rcr_entry *ring, *cursor;
0124     u8 ci, available, ithresh, vbit;
0125 #ifdef CONFIG_FSL_DPAA_CHECKING
0126     u32 busy;
0127     enum bm_rcr_pmode pmode;
0128     enum bm_rcr_cmode cmode;
0129 #endif
0130 };
0131 
0132 /* MC (Management Command) command */
0133 struct bm_mc_command {
0134     u8 _ncw_verb; /* writes to this are non-coherent */
0135     u8 bpid; /* used by acquire command */
0136     u8 __reserved[62];
0137 };
0138 #define BM_MCC_VERB_VBIT        0x80
0139 #define BM_MCC_VERB_CMD_MASK        0x70    /* where the verb contains; */
0140 #define BM_MCC_VERB_CMD_ACQUIRE     0x10
0141 #define BM_MCC_VERB_CMD_QUERY       0x40
0142 #define BM_MCC_VERB_ACQUIRE_BUFCOUNT    0x0f    /* values 1..8 go here */
0143 
0144 /* MC result, Acquire and Query Response */
0145 union bm_mc_result {
0146     struct {
0147         u8 verb;
0148         u8 bpid;
0149         u8 __reserved[62];
0150     };
0151     struct bm_buffer bufs[8];
0152 };
0153 #define BM_MCR_VERB_VBIT        0x80
0154 #define BM_MCR_VERB_CMD_MASK        BM_MCC_VERB_CMD_MASK
0155 #define BM_MCR_VERB_CMD_ACQUIRE     BM_MCC_VERB_CMD_ACQUIRE
0156 #define BM_MCR_VERB_CMD_QUERY       BM_MCC_VERB_CMD_QUERY
0157 #define BM_MCR_VERB_CMD_ERR_INVALID 0x60
0158 #define BM_MCR_VERB_CMD_ERR_ECC     0x70
0159 #define BM_MCR_VERB_ACQUIRE_BUFCOUNT    BM_MCC_VERB_ACQUIRE_BUFCOUNT /* 0..8 */
0160 #define BM_MCR_TIMEOUT          10000 /* us */
0161 
0162 struct bm_mc {
0163     struct bm_mc_command *cr;
0164     union bm_mc_result *rr;
0165     u8 rridx, vbit;
0166 #ifdef CONFIG_FSL_DPAA_CHECKING
0167     enum {
0168         /* Can only be _mc_start()ed */
0169         mc_idle,
0170         /* Can only be _mc_commit()ed or _mc_abort()ed */
0171         mc_user,
0172         /* Can only be _mc_retry()ed */
0173         mc_hw
0174     } state;
0175 #endif
0176 };
0177 
0178 struct bm_addr {
0179     void *ce;       /* cache-enabled */
0180     __be32 *ce_be;      /* Same as above but for direct access */
0181     void __iomem *ci;   /* cache-inhibited */
0182 };
0183 
0184 struct bm_portal {
0185     struct bm_addr addr;
0186     struct bm_rcr rcr;
0187     struct bm_mc mc;
0188 } ____cacheline_aligned;
0189 
0190 /* Cache-inhibited register access. */
0191 static inline u32 bm_in(struct bm_portal *p, u32 offset)
0192 {
0193     return ioread32be(p->addr.ci + offset);
0194 }
0195 
0196 static inline void bm_out(struct bm_portal *p, u32 offset, u32 val)
0197 {
0198     iowrite32be(val, p->addr.ci + offset);
0199 }
0200 
0201 /* Cache Enabled Portal Access */
0202 static inline void bm_cl_invalidate(struct bm_portal *p, u32 offset)
0203 {
0204     dpaa_invalidate(p->addr.ce + offset);
0205 }
0206 
0207 static inline void bm_cl_touch_ro(struct bm_portal *p, u32 offset)
0208 {
0209     dpaa_touch_ro(p->addr.ce + offset);
0210 }
0211 
0212 static inline u32 bm_ce_in(struct bm_portal *p, u32 offset)
0213 {
0214     return be32_to_cpu(*(p->addr.ce_be + (offset/4)));
0215 }
0216 
0217 struct bman_portal {
0218     struct bm_portal p;
0219     /* interrupt sources processed by portal_isr(), configurable */
0220     unsigned long irq_sources;
0221     /* probing time config params for cpu-affine portals */
0222     const struct bm_portal_config *config;
0223     char irqname[MAX_IRQNAME];
0224 };
0225 
0226 static cpumask_t affine_mask;
0227 static DEFINE_SPINLOCK(affine_mask_lock);
0228 static DEFINE_PER_CPU(struct bman_portal, bman_affine_portal);
0229 
0230 static inline struct bman_portal *get_affine_portal(void)
0231 {
0232     return &get_cpu_var(bman_affine_portal);
0233 }
0234 
0235 static inline void put_affine_portal(void)
0236 {
0237     put_cpu_var(bman_affine_portal);
0238 }
0239 
0240 /*
0241  * This object type refers to a pool, it isn't *the* pool. There may be
0242  * more than one such object per BMan buffer pool, eg. if different users of the
0243  * pool are operating via different portals.
0244  */
0245 struct bman_pool {
0246     /* index of the buffer pool to encapsulate (0-63) */
0247     u32 bpid;
0248     /* Used for hash-table admin when using depletion notifications. */
0249     struct bman_portal *portal;
0250     struct bman_pool *next;
0251 };
0252 
0253 static u32 poll_portal_slow(struct bman_portal *p, u32 is);
0254 
0255 static irqreturn_t portal_isr(int irq, void *ptr)
0256 {
0257     struct bman_portal *p = ptr;
0258     struct bm_portal *portal = &p->p;
0259     u32 clear = p->irq_sources;
0260     u32 is = bm_in(portal, BM_REG_ISR) & p->irq_sources;
0261 
0262     if (unlikely(!is))
0263         return IRQ_NONE;
0264 
0265     clear |= poll_portal_slow(p, is);
0266     bm_out(portal, BM_REG_ISR, clear);
0267     return IRQ_HANDLED;
0268 }
0269 
0270 /* --- RCR API --- */
0271 
0272 #define RCR_SHIFT   ilog2(sizeof(struct bm_rcr_entry))
0273 #define RCR_CARRY   (uintptr_t)(BM_RCR_SIZE << RCR_SHIFT)
0274 
0275 /* Bit-wise logic to wrap a ring pointer by clearing the "carry bit" */
0276 static struct bm_rcr_entry *rcr_carryclear(struct bm_rcr_entry *p)
0277 {
0278     uintptr_t addr = (uintptr_t)p;
0279 
0280     addr &= ~RCR_CARRY;
0281 
0282     return (struct bm_rcr_entry *)addr;
0283 }
0284 
0285 #ifdef CONFIG_FSL_DPAA_CHECKING
0286 /* Bit-wise logic to convert a ring pointer to a ring index */
0287 static int rcr_ptr2idx(struct bm_rcr_entry *e)
0288 {
0289     return ((uintptr_t)e >> RCR_SHIFT) & (BM_RCR_SIZE - 1);
0290 }
0291 #endif
0292 
0293 /* Increment the 'cursor' ring pointer, taking 'vbit' into account */
0294 static inline void rcr_inc(struct bm_rcr *rcr)
0295 {
0296     /* increment to the next RCR pointer and handle overflow and 'vbit' */
0297     struct bm_rcr_entry *partial = rcr->cursor + 1;
0298 
0299     rcr->cursor = rcr_carryclear(partial);
0300     if (partial != rcr->cursor)
0301         rcr->vbit ^= BM_RCR_VERB_VBIT;
0302 }
0303 
0304 static int bm_rcr_get_avail(struct bm_portal *portal)
0305 {
0306     struct bm_rcr *rcr = &portal->rcr;
0307 
0308     return rcr->available;
0309 }
0310 
0311 static int bm_rcr_get_fill(struct bm_portal *portal)
0312 {
0313     struct bm_rcr *rcr = &portal->rcr;
0314 
0315     return BM_RCR_SIZE - 1 - rcr->available;
0316 }
0317 
0318 static void bm_rcr_set_ithresh(struct bm_portal *portal, u8 ithresh)
0319 {
0320     struct bm_rcr *rcr = &portal->rcr;
0321 
0322     rcr->ithresh = ithresh;
0323     bm_out(portal, BM_REG_RCR_ITR, ithresh);
0324 }
0325 
0326 static void bm_rcr_cce_prefetch(struct bm_portal *portal)
0327 {
0328     __maybe_unused struct bm_rcr *rcr = &portal->rcr;
0329 
0330     DPAA_ASSERT(rcr->cmode == bm_rcr_cce);
0331     bm_cl_touch_ro(portal, BM_CL_RCR_CI_CENA);
0332 }
0333 
0334 static u8 bm_rcr_cce_update(struct bm_portal *portal)
0335 {
0336     struct bm_rcr *rcr = &portal->rcr;
0337     u8 diff, old_ci = rcr->ci;
0338 
0339     DPAA_ASSERT(rcr->cmode == bm_rcr_cce);
0340     rcr->ci = bm_ce_in(portal, BM_CL_RCR_CI_CENA) & (BM_RCR_SIZE - 1);
0341     bm_cl_invalidate(portal, BM_CL_RCR_CI_CENA);
0342     diff = dpaa_cyc_diff(BM_RCR_SIZE, old_ci, rcr->ci);
0343     rcr->available += diff;
0344     return diff;
0345 }
0346 
0347 static inline struct bm_rcr_entry *bm_rcr_start(struct bm_portal *portal)
0348 {
0349     struct bm_rcr *rcr = &portal->rcr;
0350 
0351     DPAA_ASSERT(!rcr->busy);
0352     if (!rcr->available)
0353         return NULL;
0354 #ifdef CONFIG_FSL_DPAA_CHECKING
0355     rcr->busy = 1;
0356 #endif
0357     dpaa_zero(rcr->cursor);
0358     return rcr->cursor;
0359 }
0360 
0361 static inline void bm_rcr_pvb_commit(struct bm_portal *portal, u8 myverb)
0362 {
0363     struct bm_rcr *rcr = &portal->rcr;
0364     struct bm_rcr_entry *rcursor;
0365 
0366     DPAA_ASSERT(rcr->busy);
0367     DPAA_ASSERT(rcr->pmode == bm_rcr_pvb);
0368     DPAA_ASSERT(rcr->available >= 1);
0369     dma_wmb();
0370     rcursor = rcr->cursor;
0371     rcursor->_ncw_verb = myverb | rcr->vbit;
0372     dpaa_flush(rcursor);
0373     rcr_inc(rcr);
0374     rcr->available--;
0375 #ifdef CONFIG_FSL_DPAA_CHECKING
0376     rcr->busy = 0;
0377 #endif
0378 }
0379 
0380 static int bm_rcr_init(struct bm_portal *portal, enum bm_rcr_pmode pmode,
0381                enum bm_rcr_cmode cmode)
0382 {
0383     struct bm_rcr *rcr = &portal->rcr;
0384     u32 cfg;
0385     u8 pi;
0386 
0387     rcr->ring = portal->addr.ce + BM_CL_RCR;
0388     rcr->ci = bm_in(portal, BM_REG_RCR_CI_CINH) & (BM_RCR_SIZE - 1);
0389     pi = bm_in(portal, BM_REG_RCR_PI_CINH) & (BM_RCR_SIZE - 1);
0390     rcr->cursor = rcr->ring + pi;
0391     rcr->vbit = (bm_in(portal, BM_REG_RCR_PI_CINH) & BM_RCR_SIZE) ?
0392         BM_RCR_VERB_VBIT : 0;
0393     rcr->available = BM_RCR_SIZE - 1
0394         - dpaa_cyc_diff(BM_RCR_SIZE, rcr->ci, pi);
0395     rcr->ithresh = bm_in(portal, BM_REG_RCR_ITR);
0396 #ifdef CONFIG_FSL_DPAA_CHECKING
0397     rcr->busy = 0;
0398     rcr->pmode = pmode;
0399     rcr->cmode = cmode;
0400 #endif
0401     cfg = (bm_in(portal, BM_REG_CFG) & 0xffffffe0)
0402         | (pmode & 0x3); /* BCSP_CFG::RPM */
0403     bm_out(portal, BM_REG_CFG, cfg);
0404     return 0;
0405 }
0406 
0407 static void bm_rcr_finish(struct bm_portal *portal)
0408 {
0409 #ifdef CONFIG_FSL_DPAA_CHECKING
0410     struct bm_rcr *rcr = &portal->rcr;
0411     int i;
0412 
0413     DPAA_ASSERT(!rcr->busy);
0414 
0415     i = bm_in(portal, BM_REG_RCR_PI_CINH) & (BM_RCR_SIZE - 1);
0416     if (i != rcr_ptr2idx(rcr->cursor))
0417         pr_crit("losing uncommitted RCR entries\n");
0418 
0419     i = bm_in(portal, BM_REG_RCR_CI_CINH) & (BM_RCR_SIZE - 1);
0420     if (i != rcr->ci)
0421         pr_crit("missing existing RCR completions\n");
0422     if (rcr->ci != rcr_ptr2idx(rcr->cursor))
0423         pr_crit("RCR destroyed unquiesced\n");
0424 #endif
0425 }
0426 
0427 /* --- Management command API --- */
0428 static int bm_mc_init(struct bm_portal *portal)
0429 {
0430     struct bm_mc *mc = &portal->mc;
0431 
0432     mc->cr = portal->addr.ce + BM_CL_CR;
0433     mc->rr = portal->addr.ce + BM_CL_RR0;
0434     mc->rridx = (mc->cr->_ncw_verb & BM_MCC_VERB_VBIT) ?
0435             0 : 1;
0436     mc->vbit = mc->rridx ? BM_MCC_VERB_VBIT : 0;
0437 #ifdef CONFIG_FSL_DPAA_CHECKING
0438     mc->state = mc_idle;
0439 #endif
0440     return 0;
0441 }
0442 
0443 static void bm_mc_finish(struct bm_portal *portal)
0444 {
0445 #ifdef CONFIG_FSL_DPAA_CHECKING
0446     struct bm_mc *mc = &portal->mc;
0447 
0448     DPAA_ASSERT(mc->state == mc_idle);
0449     if (mc->state != mc_idle)
0450         pr_crit("Losing incomplete MC command\n");
0451 #endif
0452 }
0453 
0454 static inline struct bm_mc_command *bm_mc_start(struct bm_portal *portal)
0455 {
0456     struct bm_mc *mc = &portal->mc;
0457 
0458     DPAA_ASSERT(mc->state == mc_idle);
0459 #ifdef CONFIG_FSL_DPAA_CHECKING
0460     mc->state = mc_user;
0461 #endif
0462     dpaa_zero(mc->cr);
0463     return mc->cr;
0464 }
0465 
0466 static inline void bm_mc_commit(struct bm_portal *portal, u8 myverb)
0467 {
0468     struct bm_mc *mc = &portal->mc;
0469     union bm_mc_result *rr = mc->rr + mc->rridx;
0470 
0471     DPAA_ASSERT(mc->state == mc_user);
0472     dma_wmb();
0473     mc->cr->_ncw_verb = myverb | mc->vbit;
0474     dpaa_flush(mc->cr);
0475     dpaa_invalidate_touch_ro(rr);
0476 #ifdef CONFIG_FSL_DPAA_CHECKING
0477     mc->state = mc_hw;
0478 #endif
0479 }
0480 
0481 static inline union bm_mc_result *bm_mc_result(struct bm_portal *portal)
0482 {
0483     struct bm_mc *mc = &portal->mc;
0484     union bm_mc_result *rr = mc->rr + mc->rridx;
0485 
0486     DPAA_ASSERT(mc->state == mc_hw);
0487     /*
0488      * The inactive response register's verb byte always returns zero until
0489      * its command is submitted and completed. This includes the valid-bit,
0490      * in case you were wondering...
0491      */
0492     if (!rr->verb) {
0493         dpaa_invalidate_touch_ro(rr);
0494         return NULL;
0495     }
0496     mc->rridx ^= 1;
0497     mc->vbit ^= BM_MCC_VERB_VBIT;
0498 #ifdef CONFIG_FSL_DPAA_CHECKING
0499     mc->state = mc_idle;
0500 #endif
0501     return rr;
0502 }
0503 
0504 static inline int bm_mc_result_timeout(struct bm_portal *portal,
0505                        union bm_mc_result **mcr)
0506 {
0507     int timeout = BM_MCR_TIMEOUT;
0508 
0509     do {
0510         *mcr = bm_mc_result(portal);
0511         if (*mcr)
0512             break;
0513         udelay(1);
0514     } while (--timeout);
0515 
0516     return timeout;
0517 }
0518 
0519 /* Disable all BSCN interrupts for the portal */
0520 static void bm_isr_bscn_disable(struct bm_portal *portal)
0521 {
0522     bm_out(portal, BM_REG_SCN(0), 0);
0523     bm_out(portal, BM_REG_SCN(1), 0);
0524 }
0525 
0526 static int bman_create_portal(struct bman_portal *portal,
0527                   const struct bm_portal_config *c)
0528 {
0529     struct bm_portal *p;
0530     int ret;
0531 
0532     p = &portal->p;
0533     /*
0534      * prep the low-level portal struct with the mapped addresses from the
0535      * config, everything that follows depends on it and "config" is more
0536      * for (de)reference...
0537      */
0538     p->addr.ce = c->addr_virt_ce;
0539     p->addr.ce_be = c->addr_virt_ce;
0540     p->addr.ci = c->addr_virt_ci;
0541     if (bm_rcr_init(p, bm_rcr_pvb, bm_rcr_cce)) {
0542         dev_err(c->dev, "RCR initialisation failed\n");
0543         goto fail_rcr;
0544     }
0545     if (bm_mc_init(p)) {
0546         dev_err(c->dev, "MC initialisation failed\n");
0547         goto fail_mc;
0548     }
0549     /*
0550      * Default to all BPIDs disabled, we enable as required at
0551      * run-time.
0552      */
0553     bm_isr_bscn_disable(p);
0554 
0555     /* Write-to-clear any stale interrupt status bits */
0556     bm_out(p, BM_REG_ISDR, 0xffffffff);
0557     portal->irq_sources = 0;
0558     bm_out(p, BM_REG_IER, 0);
0559     bm_out(p, BM_REG_ISR, 0xffffffff);
0560     snprintf(portal->irqname, MAX_IRQNAME, IRQNAME, c->cpu);
0561     if (request_irq(c->irq, portal_isr, 0, portal->irqname, portal)) {
0562         dev_err(c->dev, "request_irq() failed\n");
0563         goto fail_irq;
0564     }
0565 
0566     if (dpaa_set_portal_irq_affinity(c->dev, c->irq, c->cpu))
0567         goto fail_affinity;
0568 
0569     /* Need RCR to be empty before continuing */
0570     ret = bm_rcr_get_fill(p);
0571     if (ret) {
0572         dev_err(c->dev, "RCR unclean\n");
0573         goto fail_rcr_empty;
0574     }
0575     /* Success */
0576     portal->config = c;
0577 
0578     bm_out(p, BM_REG_ISDR, 0);
0579     bm_out(p, BM_REG_IIR, 0);
0580 
0581     return 0;
0582 
0583 fail_rcr_empty:
0584 fail_affinity:
0585     free_irq(c->irq, portal);
0586 fail_irq:
0587     bm_mc_finish(p);
0588 fail_mc:
0589     bm_rcr_finish(p);
0590 fail_rcr:
0591     return -EIO;
0592 }
0593 
0594 struct bman_portal *bman_create_affine_portal(const struct bm_portal_config *c)
0595 {
0596     struct bman_portal *portal;
0597     int err;
0598 
0599     portal = &per_cpu(bman_affine_portal, c->cpu);
0600     err = bman_create_portal(portal, c);
0601     if (err)
0602         return NULL;
0603 
0604     spin_lock(&affine_mask_lock);
0605     cpumask_set_cpu(c->cpu, &affine_mask);
0606     spin_unlock(&affine_mask_lock);
0607 
0608     return portal;
0609 }
0610 
0611 static u32 poll_portal_slow(struct bman_portal *p, u32 is)
0612 {
0613     u32 ret = is;
0614 
0615     if (is & BM_PIRQ_RCRI) {
0616         bm_rcr_cce_update(&p->p);
0617         bm_rcr_set_ithresh(&p->p, 0);
0618         bm_out(&p->p, BM_REG_ISR, BM_PIRQ_RCRI);
0619         is &= ~BM_PIRQ_RCRI;
0620     }
0621 
0622     /* There should be no status register bits left undefined */
0623     DPAA_ASSERT(!is);
0624     return ret;
0625 }
0626 
0627 int bman_p_irqsource_add(struct bman_portal *p, u32 bits)
0628 {
0629     unsigned long irqflags;
0630 
0631     local_irq_save(irqflags);
0632     p->irq_sources |= bits & BM_PIRQ_VISIBLE;
0633     bm_out(&p->p, BM_REG_IER, p->irq_sources);
0634     local_irq_restore(irqflags);
0635     return 0;
0636 }
0637 
0638 int bm_shutdown_pool(u32 bpid)
0639 {
0640     int err = 0;
0641     struct bm_mc_command *bm_cmd;
0642     union bm_mc_result *bm_res;
0643 
0644 
0645     struct bman_portal *p = get_affine_portal();
0646     while (1) {
0647         /* Acquire buffers until empty */
0648         bm_cmd = bm_mc_start(&p->p);
0649         bm_cmd->bpid = bpid;
0650         bm_mc_commit(&p->p, BM_MCC_VERB_CMD_ACQUIRE | 1);
0651         if (!bm_mc_result_timeout(&p->p, &bm_res)) {
0652             pr_crit("BMan Acquire Command timedout\n");
0653             err = -ETIMEDOUT;
0654             goto done;
0655         }
0656         if (!(bm_res->verb & BM_MCR_VERB_ACQUIRE_BUFCOUNT)) {
0657             /* Pool is empty */
0658             goto done;
0659         }
0660     }
0661 done:
0662     put_affine_portal();
0663     return err;
0664 }
0665 
0666 struct gen_pool *bm_bpalloc;
0667 
0668 static int bm_alloc_bpid_range(u32 *result, u32 count)
0669 {
0670     unsigned long addr;
0671 
0672     addr = gen_pool_alloc(bm_bpalloc, count);
0673     if (!addr)
0674         return -ENOMEM;
0675 
0676     *result = addr & ~DPAA_GENALLOC_OFF;
0677 
0678     return 0;
0679 }
0680 
0681 static int bm_release_bpid(u32 bpid)
0682 {
0683     int ret;
0684 
0685     ret = bm_shutdown_pool(bpid);
0686     if (ret) {
0687         pr_debug("BPID %d leaked\n", bpid);
0688         return ret;
0689     }
0690 
0691     gen_pool_free(bm_bpalloc, bpid | DPAA_GENALLOC_OFF, 1);
0692     return 0;
0693 }
0694 
0695 struct bman_pool *bman_new_pool(void)
0696 {
0697     struct bman_pool *pool = NULL;
0698     u32 bpid;
0699 
0700     if (bm_alloc_bpid_range(&bpid, 1))
0701         return NULL;
0702 
0703     pool = kmalloc(sizeof(*pool), GFP_KERNEL);
0704     if (!pool)
0705         goto err;
0706 
0707     pool->bpid = bpid;
0708 
0709     return pool;
0710 err:
0711     bm_release_bpid(bpid);
0712     return NULL;
0713 }
0714 EXPORT_SYMBOL(bman_new_pool);
0715 
0716 void bman_free_pool(struct bman_pool *pool)
0717 {
0718     bm_release_bpid(pool->bpid);
0719 
0720     kfree(pool);
0721 }
0722 EXPORT_SYMBOL(bman_free_pool);
0723 
0724 int bman_get_bpid(const struct bman_pool *pool)
0725 {
0726     return pool->bpid;
0727 }
0728 EXPORT_SYMBOL(bman_get_bpid);
0729 
0730 static void update_rcr_ci(struct bman_portal *p, int avail)
0731 {
0732     if (avail)
0733         bm_rcr_cce_prefetch(&p->p);
0734     else
0735         bm_rcr_cce_update(&p->p);
0736 }
0737 
0738 int bman_release(struct bman_pool *pool, const struct bm_buffer *bufs, u8 num)
0739 {
0740     struct bman_portal *p;
0741     struct bm_rcr_entry *r;
0742     unsigned long irqflags;
0743     int avail, timeout = 1000; /* 1ms */
0744     int i = num - 1;
0745 
0746     DPAA_ASSERT(num > 0 && num <= 8);
0747 
0748     do {
0749         p = get_affine_portal();
0750         local_irq_save(irqflags);
0751         avail = bm_rcr_get_avail(&p->p);
0752         if (avail < 2)
0753             update_rcr_ci(p, avail);
0754         r = bm_rcr_start(&p->p);
0755         local_irq_restore(irqflags);
0756         put_affine_portal();
0757         if (likely(r))
0758             break;
0759 
0760         udelay(1);
0761     } while (--timeout);
0762 
0763     if (unlikely(!timeout))
0764         return -ETIMEDOUT;
0765 
0766     p = get_affine_portal();
0767     local_irq_save(irqflags);
0768     /*
0769      * we can copy all but the first entry, as this can trigger badness
0770      * with the valid-bit
0771      */
0772     bm_buffer_set64(r->bufs, bm_buffer_get64(bufs));
0773     bm_buffer_set_bpid(r->bufs, pool->bpid);
0774     if (i)
0775         memcpy(&r->bufs[1], &bufs[1], i * sizeof(bufs[0]));
0776 
0777     bm_rcr_pvb_commit(&p->p, BM_RCR_VERB_CMD_BPID_SINGLE |
0778               (num & BM_RCR_VERB_BUFCOUNT_MASK));
0779 
0780     local_irq_restore(irqflags);
0781     put_affine_portal();
0782     return 0;
0783 }
0784 EXPORT_SYMBOL(bman_release);
0785 
0786 int bman_acquire(struct bman_pool *pool, struct bm_buffer *bufs, u8 num)
0787 {
0788     struct bman_portal *p = get_affine_portal();
0789     struct bm_mc_command *mcc;
0790     union bm_mc_result *mcr;
0791     int ret;
0792 
0793     DPAA_ASSERT(num > 0 && num <= 8);
0794 
0795     mcc = bm_mc_start(&p->p);
0796     mcc->bpid = pool->bpid;
0797     bm_mc_commit(&p->p, BM_MCC_VERB_CMD_ACQUIRE |
0798              (num & BM_MCC_VERB_ACQUIRE_BUFCOUNT));
0799     if (!bm_mc_result_timeout(&p->p, &mcr)) {
0800         put_affine_portal();
0801         pr_crit("BMan Acquire Timeout\n");
0802         return -ETIMEDOUT;
0803     }
0804     ret = mcr->verb & BM_MCR_VERB_ACQUIRE_BUFCOUNT;
0805     if (bufs)
0806         memcpy(&bufs[0], &mcr->bufs[0], num * sizeof(bufs[0]));
0807 
0808     put_affine_portal();
0809     if (ret != num)
0810         ret = -ENOMEM;
0811     return ret;
0812 }
0813 EXPORT_SYMBOL(bman_acquire);
0814 
0815 const struct bm_portal_config *
0816 bman_get_bm_portal_config(const struct bman_portal *portal)
0817 {
0818     return portal->config;
0819 }