Back to home page

OSCL-LXR

 
 

    


0001 /* Copyright (c) 2009 - 2016 Freescale Semiconductor, Inc.
0002  *
0003  * Redistribution and use in source and binary forms, with or without
0004  * modification, are permitted provided that the following conditions are met:
0005  *     * Redistributions of source code must retain the above copyright
0006  *   notice, this list of conditions and the following disclaimer.
0007  *     * Redistributions in binary form must reproduce the above copyright
0008  *   notice, this list of conditions and the following disclaimer in the
0009  *   documentation and/or other materials provided with the distribution.
0010  *     * Neither the name of Freescale Semiconductor nor the
0011  *   names of its contributors may be used to endorse or promote products
0012  *   derived from this software without specific prior written permission.
0013  *
0014  * ALTERNATIVELY, this software may be distributed under the terms of the
0015  * GNU General Public License ("GPL") as published by the Free Software
0016  * Foundation, either version 2 of that License or (at your option) any
0017  * later version.
0018  *
0019  * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
0020  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
0021  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
0022  * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
0023  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
0024  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
0025  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
0026  * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
0027  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
0028  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
0029  */
0030 
0031 #include "bman_priv.h"
0032 
0033 u16 bman_ip_rev;
0034 EXPORT_SYMBOL(bman_ip_rev);
0035 
0036 /* Register offsets */
0037 #define REG_FBPR_FPC        0x0800
0038 #define REG_ECSR        0x0a00
0039 #define REG_ECIR        0x0a04
0040 #define REG_EADR        0x0a08
0041 #define REG_EDATA(n)        (0x0a10 + ((n) * 0x04))
0042 #define REG_SBEC(n)     (0x0a80 + ((n) * 0x04))
0043 #define REG_IP_REV_1        0x0bf8
0044 #define REG_IP_REV_2        0x0bfc
0045 #define REG_FBPR_BARE       0x0c00
0046 #define REG_FBPR_BAR        0x0c04
0047 #define REG_FBPR_AR     0x0c10
0048 #define REG_SRCIDR      0x0d04
0049 #define REG_LIODNR      0x0d08
0050 #define REG_ERR_ISR     0x0e00
0051 #define REG_ERR_IER     0x0e04
0052 #define REG_ERR_ISDR        0x0e08
0053 
0054 /* Used by all error interrupt registers except 'inhibit' */
0055 #define BM_EIRQ_IVCI    0x00000010  /* Invalid Command Verb */
0056 #define BM_EIRQ_FLWI    0x00000008  /* FBPR Low Watermark */
0057 #define BM_EIRQ_MBEI    0x00000004  /* Multi-bit ECC Error */
0058 #define BM_EIRQ_SBEI    0x00000002  /* Single-bit ECC Error */
0059 #define BM_EIRQ_BSCN    0x00000001  /* pool State Change Notification */
0060 
0061 struct bman_hwerr_txt {
0062     u32 mask;
0063     const char *txt;
0064 };
0065 
0066 static const struct bman_hwerr_txt bman_hwerr_txts[] = {
0067     { BM_EIRQ_IVCI, "Invalid Command Verb" },
0068     { BM_EIRQ_FLWI, "FBPR Low Watermark" },
0069     { BM_EIRQ_MBEI, "Multi-bit ECC Error" },
0070     { BM_EIRQ_SBEI, "Single-bit ECC Error" },
0071     { BM_EIRQ_BSCN, "Pool State Change Notification" },
0072 };
0073 
0074 /* Only trigger low water mark interrupt once only */
0075 #define BMAN_ERRS_TO_DISABLE BM_EIRQ_FLWI
0076 
0077 /* Pointer to the start of the BMan's CCSR space */
0078 static u32 __iomem *bm_ccsr_start;
0079 
0080 static inline u32 bm_ccsr_in(u32 offset)
0081 {
0082     return ioread32be(bm_ccsr_start + offset/4);
0083 }
0084 static inline void bm_ccsr_out(u32 offset, u32 val)
0085 {
0086     iowrite32be(val, bm_ccsr_start + offset/4);
0087 }
0088 
0089 static void bm_get_version(u16 *id, u8 *major, u8 *minor)
0090 {
0091     u32 v = bm_ccsr_in(REG_IP_REV_1);
0092     *id = (v >> 16);
0093     *major = (v >> 8) & 0xff;
0094     *minor = v & 0xff;
0095 }
0096 
0097 /* signal transactions for FBPRs with higher priority */
0098 #define FBPR_AR_RPRIO_HI BIT(30)
0099 
0100 /* Track if probe has occurred and if cleanup is required */
0101 static int __bman_probed;
0102 static int __bman_requires_cleanup;
0103 
0104 
0105 static int bm_set_memory(u64 ba, u32 size)
0106 {
0107     u32 bar, bare;
0108     u32 exp = ilog2(size);
0109     /* choke if size isn't within range */
0110     DPAA_ASSERT(size >= 4096 && size <= 1024*1024*1024 &&
0111            is_power_of_2(size));
0112     /* choke if '[e]ba' has lower-alignment than 'size' */
0113     DPAA_ASSERT(!(ba & (size - 1)));
0114 
0115     /* Check to see if BMan has already been initialized */
0116     bar = bm_ccsr_in(REG_FBPR_BAR);
0117     if (bar) {
0118         /* Maker sure ba == what was programmed) */
0119         bare = bm_ccsr_in(REG_FBPR_BARE);
0120         if (bare != upper_32_bits(ba) || bar != lower_32_bits(ba)) {
0121             pr_err("Attempted to reinitialize BMan with different BAR, got 0x%llx read BARE=0x%x BAR=0x%x\n",
0122                    ba, bare, bar);
0123             return -ENOMEM;
0124         }
0125         pr_info("BMan BAR already configured\n");
0126         __bman_requires_cleanup = 1;
0127         return 1;
0128     }
0129 
0130     bm_ccsr_out(REG_FBPR_BARE, upper_32_bits(ba));
0131     bm_ccsr_out(REG_FBPR_BAR, lower_32_bits(ba));
0132     bm_ccsr_out(REG_FBPR_AR, exp - 1);
0133     return 0;
0134 }
0135 
0136 /*
0137  * Location and size of BMan private memory
0138  *
0139  * Ideally we would use the DMA API to turn rmem->base into a DMA address
0140  * (especially if iommu translations ever get involved).  Unfortunately, the
0141  * DMA API currently does not allow mapping anything that is not backed with
0142  * a struct page.
0143  */
0144 static dma_addr_t fbpr_a;
0145 static size_t fbpr_sz;
0146 
0147 static int bman_fbpr(struct reserved_mem *rmem)
0148 {
0149     fbpr_a = rmem->base;
0150     fbpr_sz = rmem->size;
0151 
0152     WARN_ON(!(fbpr_a && fbpr_sz));
0153 
0154     return 0;
0155 }
0156 RESERVEDMEM_OF_DECLARE(bman_fbpr, "fsl,bman-fbpr", bman_fbpr);
0157 
0158 static irqreturn_t bman_isr(int irq, void *ptr)
0159 {
0160     u32 isr_val, ier_val, ecsr_val, isr_mask, i;
0161     struct device *dev = ptr;
0162 
0163     ier_val = bm_ccsr_in(REG_ERR_IER);
0164     isr_val = bm_ccsr_in(REG_ERR_ISR);
0165     ecsr_val = bm_ccsr_in(REG_ECSR);
0166     isr_mask = isr_val & ier_val;
0167 
0168     if (!isr_mask)
0169         return IRQ_NONE;
0170 
0171     for (i = 0; i < ARRAY_SIZE(bman_hwerr_txts); i++) {
0172         if (bman_hwerr_txts[i].mask & isr_mask) {
0173             dev_err_ratelimited(dev, "ErrInt: %s\n",
0174                         bman_hwerr_txts[i].txt);
0175             if (bman_hwerr_txts[i].mask & ecsr_val) {
0176                 /* Re-arm error capture registers */
0177                 bm_ccsr_out(REG_ECSR, ecsr_val);
0178             }
0179             if (bman_hwerr_txts[i].mask & BMAN_ERRS_TO_DISABLE) {
0180                 dev_dbg(dev, "Disabling error 0x%x\n",
0181                     bman_hwerr_txts[i].mask);
0182                 ier_val &= ~bman_hwerr_txts[i].mask;
0183                 bm_ccsr_out(REG_ERR_IER, ier_val);
0184             }
0185         }
0186     }
0187     bm_ccsr_out(REG_ERR_ISR, isr_val);
0188 
0189     return IRQ_HANDLED;
0190 }
0191 
0192 int bman_is_probed(void)
0193 {
0194     return __bman_probed;
0195 }
0196 EXPORT_SYMBOL_GPL(bman_is_probed);
0197 
0198 int bman_requires_cleanup(void)
0199 {
0200     return __bman_requires_cleanup;
0201 }
0202 
0203 void bman_done_cleanup(void)
0204 {
0205     __bman_requires_cleanup = 0;
0206 }
0207 
0208 static int fsl_bman_probe(struct platform_device *pdev)
0209 {
0210     int ret, err_irq;
0211     struct device *dev = &pdev->dev;
0212     struct device_node *node = dev->of_node;
0213     struct resource *res;
0214     u16 id, bm_pool_cnt;
0215     u8 major, minor;
0216 
0217     __bman_probed = -1;
0218 
0219     res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
0220     if (!res) {
0221         dev_err(dev, "Can't get %pOF property 'IORESOURCE_MEM'\n",
0222             node);
0223         return -ENXIO;
0224     }
0225     bm_ccsr_start = devm_ioremap(dev, res->start, resource_size(res));
0226     if (!bm_ccsr_start)
0227         return -ENXIO;
0228 
0229     bm_get_version(&id, &major, &minor);
0230     if (major == 1 && minor == 0) {
0231         bman_ip_rev = BMAN_REV10;
0232         bm_pool_cnt = BM_POOL_MAX;
0233     } else if (major == 2 && minor == 0) {
0234         bman_ip_rev = BMAN_REV20;
0235         bm_pool_cnt = 8;
0236     } else if (major == 2 && minor == 1) {
0237         bman_ip_rev = BMAN_REV21;
0238         bm_pool_cnt = BM_POOL_MAX;
0239     } else {
0240         dev_err(dev, "Unknown Bman version:%04x,%02x,%02x\n",
0241             id, major, minor);
0242         return -ENODEV;
0243     }
0244 
0245     /*
0246      * If FBPR memory wasn't defined using the qbman compatible string
0247      * try using the of_reserved_mem_device method
0248      */
0249     if (!fbpr_a) {
0250         ret = qbman_init_private_mem(dev, 0, &fbpr_a, &fbpr_sz);
0251         if (ret) {
0252             dev_err(dev, "qbman_init_private_mem() failed 0x%x\n",
0253                 ret);
0254             return -ENODEV;
0255         }
0256     }
0257 
0258     dev_dbg(dev, "Allocated FBPR 0x%llx 0x%zx\n", fbpr_a, fbpr_sz);
0259 
0260     bm_set_memory(fbpr_a, fbpr_sz);
0261 
0262     err_irq = platform_get_irq(pdev, 0);
0263     if (err_irq <= 0) {
0264         dev_info(dev, "Can't get %pOF IRQ\n", node);
0265         return -ENODEV;
0266     }
0267     ret = devm_request_irq(dev, err_irq, bman_isr, IRQF_SHARED, "bman-err",
0268                    dev);
0269     if (ret)  {
0270         dev_err(dev, "devm_request_irq() failed %d for '%pOF'\n",
0271             ret, node);
0272         return ret;
0273     }
0274     /* Disable Buffer Pool State Change */
0275     bm_ccsr_out(REG_ERR_ISDR, BM_EIRQ_BSCN);
0276     /*
0277      * Write-to-clear any stale bits, (eg. starvation being asserted prior
0278      * to resource allocation during driver init).
0279      */
0280     bm_ccsr_out(REG_ERR_ISR, 0xffffffff);
0281     /* Enable Error Interrupts */
0282     bm_ccsr_out(REG_ERR_IER, 0xffffffff);
0283 
0284     bm_bpalloc = devm_gen_pool_create(dev, 0, -1, "bman-bpalloc");
0285     if (IS_ERR(bm_bpalloc)) {
0286         ret = PTR_ERR(bm_bpalloc);
0287         dev_err(dev, "bman-bpalloc pool init failed (%d)\n", ret);
0288         return ret;
0289     }
0290 
0291     /* seed BMan resource pool */
0292     ret = gen_pool_add(bm_bpalloc, DPAA_GENALLOC_OFF, bm_pool_cnt, -1);
0293     if (ret) {
0294         dev_err(dev, "Failed to seed BPID range [%d..%d] (%d)\n",
0295             0, bm_pool_cnt - 1, ret);
0296         return ret;
0297     }
0298 
0299     __bman_probed = 1;
0300 
0301     return 0;
0302 };
0303 
0304 static const struct of_device_id fsl_bman_ids[] = {
0305     {
0306         .compatible = "fsl,bman",
0307     },
0308     {}
0309 };
0310 
0311 static struct platform_driver fsl_bman_driver = {
0312     .driver = {
0313         .name = KBUILD_MODNAME,
0314         .of_match_table = fsl_bman_ids,
0315         .suppress_bind_attrs = true,
0316     },
0317     .probe = fsl_bman_probe,
0318 };
0319 
0320 builtin_platform_driver(fsl_bman_driver);