Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 // Copyright (C) 2017 Broadcom
0003 
0004 /*
0005  * Broadcom FlexRM Mailbox Driver
0006  *
0007  * Each Broadcom FlexSparx4 offload engine is implemented as an
0008  * extension to Broadcom FlexRM ring manager. The FlexRM ring
0009  * manager provides a set of rings which can be used to submit
0010  * work to a FlexSparx4 offload engine.
0011  *
0012  * This driver creates a mailbox controller using a set of FlexRM
0013  * rings where each mailbox channel represents a separate FlexRM ring.
0014  */
0015 
0016 #include <asm/barrier.h>
0017 #include <asm/byteorder.h>
0018 #include <linux/atomic.h>
0019 #include <linux/bitmap.h>
0020 #include <linux/debugfs.h>
0021 #include <linux/delay.h>
0022 #include <linux/device.h>
0023 #include <linux/dma-mapping.h>
0024 #include <linux/dmapool.h>
0025 #include <linux/err.h>
0026 #include <linux/interrupt.h>
0027 #include <linux/kernel.h>
0028 #include <linux/mailbox_controller.h>
0029 #include <linux/mailbox_client.h>
0030 #include <linux/mailbox/brcm-message.h>
0031 #include <linux/module.h>
0032 #include <linux/msi.h>
0033 #include <linux/of_address.h>
0034 #include <linux/of_irq.h>
0035 #include <linux/platform_device.h>
0036 #include <linux/spinlock.h>
0037 
0038 /* ====== FlexRM register defines ===== */
0039 
0040 /* FlexRM configuration */
0041 #define RING_REGS_SIZE                  0x10000
0042 #define RING_DESC_SIZE                  8
0043 #define RING_DESC_INDEX(offset)             \
0044             ((offset) / RING_DESC_SIZE)
0045 #define RING_DESC_OFFSET(index)             \
0046             ((index) * RING_DESC_SIZE)
0047 #define RING_MAX_REQ_COUNT              1024
0048 #define RING_BD_ALIGN_ORDER             12
0049 #define RING_BD_ALIGN_CHECK(addr)           \
0050             (!((addr) & ((0x1 << RING_BD_ALIGN_ORDER) - 1)))
0051 #define RING_BD_TOGGLE_INVALID(offset)          \
0052             (((offset) >> RING_BD_ALIGN_ORDER) & 0x1)
0053 #define RING_BD_TOGGLE_VALID(offset)            \
0054             (!RING_BD_TOGGLE_INVALID(offset))
0055 #define RING_BD_DESC_PER_REQ                32
0056 #define RING_BD_DESC_COUNT              \
0057             (RING_MAX_REQ_COUNT * RING_BD_DESC_PER_REQ)
0058 #define RING_BD_SIZE                    \
0059             (RING_BD_DESC_COUNT * RING_DESC_SIZE)
0060 #define RING_CMPL_ALIGN_ORDER               13
0061 #define RING_CMPL_DESC_COUNT                RING_MAX_REQ_COUNT
0062 #define RING_CMPL_SIZE                  \
0063             (RING_CMPL_DESC_COUNT * RING_DESC_SIZE)
0064 #define RING_VER_MAGIC                  0x76303031
0065 
0066 /* Per-Ring register offsets */
0067 #define RING_VER                    0x000
0068 #define RING_BD_START_ADDR              0x004
0069 #define RING_BD_READ_PTR                0x008
0070 #define RING_BD_WRITE_PTR               0x00c
0071 #define RING_BD_READ_PTR_DDR_LS             0x010
0072 #define RING_BD_READ_PTR_DDR_MS             0x014
0073 #define RING_CMPL_START_ADDR                0x018
0074 #define RING_CMPL_WRITE_PTR             0x01c
0075 #define RING_NUM_REQ_RECV_LS                0x020
0076 #define RING_NUM_REQ_RECV_MS                0x024
0077 #define RING_NUM_REQ_TRANS_LS               0x028
0078 #define RING_NUM_REQ_TRANS_MS               0x02c
0079 #define RING_NUM_REQ_OUTSTAND               0x030
0080 #define RING_CONTROL                    0x034
0081 #define RING_FLUSH_DONE                 0x038
0082 #define RING_MSI_ADDR_LS                0x03c
0083 #define RING_MSI_ADDR_MS                0x040
0084 #define RING_MSI_CONTROL                0x048
0085 #define RING_BD_READ_PTR_DDR_CONTROL            0x04c
0086 #define RING_MSI_DATA_VALUE             0x064
0087 
0088 /* Register RING_BD_START_ADDR fields */
0089 #define BD_LAST_UPDATE_HW_SHIFT             28
0090 #define BD_LAST_UPDATE_HW_MASK              0x1
0091 #define BD_START_ADDR_VALUE(pa)             \
0092     ((u32)((((dma_addr_t)(pa)) >> RING_BD_ALIGN_ORDER) & 0x0fffffff))
0093 #define BD_START_ADDR_DECODE(val)           \
0094     ((dma_addr_t)((val) & 0x0fffffff) << RING_BD_ALIGN_ORDER)
0095 
0096 /* Register RING_CMPL_START_ADDR fields */
0097 #define CMPL_START_ADDR_VALUE(pa)           \
0098     ((u32)((((u64)(pa)) >> RING_CMPL_ALIGN_ORDER) & 0x07ffffff))
0099 
0100 /* Register RING_CONTROL fields */
0101 #define CONTROL_MASK_DISABLE_CONTROL            12
0102 #define CONTROL_FLUSH_SHIFT             5
0103 #define CONTROL_ACTIVE_SHIFT                4
0104 #define CONTROL_RATE_ADAPT_MASK             0xf
0105 #define CONTROL_RATE_DYNAMIC                0x0
0106 #define CONTROL_RATE_FAST               0x8
0107 #define CONTROL_RATE_MEDIUM             0x9
0108 #define CONTROL_RATE_SLOW               0xa
0109 #define CONTROL_RATE_IDLE               0xb
0110 
0111 /* Register RING_FLUSH_DONE fields */
0112 #define FLUSH_DONE_MASK                 0x1
0113 
0114 /* Register RING_MSI_CONTROL fields */
0115 #define MSI_TIMER_VAL_SHIFT             16
0116 #define MSI_TIMER_VAL_MASK              0xffff
0117 #define MSI_ENABLE_SHIFT                15
0118 #define MSI_ENABLE_MASK                 0x1
0119 #define MSI_COUNT_SHIFT                 0
0120 #define MSI_COUNT_MASK                  0x3ff
0121 
0122 /* Register RING_BD_READ_PTR_DDR_CONTROL fields */
0123 #define BD_READ_PTR_DDR_TIMER_VAL_SHIFT         16
0124 #define BD_READ_PTR_DDR_TIMER_VAL_MASK          0xffff
0125 #define BD_READ_PTR_DDR_ENABLE_SHIFT            15
0126 #define BD_READ_PTR_DDR_ENABLE_MASK         0x1
0127 
0128 /* ====== FlexRM ring descriptor defines ===== */
0129 
0130 /* Completion descriptor format */
0131 #define CMPL_OPAQUE_SHIFT           0
0132 #define CMPL_OPAQUE_MASK            0xffff
0133 #define CMPL_ENGINE_STATUS_SHIFT        16
0134 #define CMPL_ENGINE_STATUS_MASK         0xffff
0135 #define CMPL_DME_STATUS_SHIFT           32
0136 #define CMPL_DME_STATUS_MASK            0xffff
0137 #define CMPL_RM_STATUS_SHIFT            48
0138 #define CMPL_RM_STATUS_MASK         0xffff
0139 
0140 /* Completion DME status code */
0141 #define DME_STATUS_MEM_COR_ERR          BIT(0)
0142 #define DME_STATUS_MEM_UCOR_ERR         BIT(1)
0143 #define DME_STATUS_FIFO_UNDERFLOW       BIT(2)
0144 #define DME_STATUS_FIFO_OVERFLOW        BIT(3)
0145 #define DME_STATUS_RRESP_ERR            BIT(4)
0146 #define DME_STATUS_BRESP_ERR            BIT(5)
0147 #define DME_STATUS_ERROR_MASK           (DME_STATUS_MEM_COR_ERR | \
0148                          DME_STATUS_MEM_UCOR_ERR | \
0149                          DME_STATUS_FIFO_UNDERFLOW | \
0150                          DME_STATUS_FIFO_OVERFLOW | \
0151                          DME_STATUS_RRESP_ERR | \
0152                          DME_STATUS_BRESP_ERR)
0153 
0154 /* Completion RM status code */
0155 #define RM_STATUS_CODE_SHIFT            0
0156 #define RM_STATUS_CODE_MASK         0x3ff
0157 #define RM_STATUS_CODE_GOOD         0x0
0158 #define RM_STATUS_CODE_AE_TIMEOUT       0x3ff
0159 
0160 /* General descriptor format */
0161 #define DESC_TYPE_SHIFT             60
0162 #define DESC_TYPE_MASK              0xf
0163 #define DESC_PAYLOAD_SHIFT          0
0164 #define DESC_PAYLOAD_MASK           0x0fffffffffffffff
0165 
0166 /* Null descriptor format  */
0167 #define NULL_TYPE               0
0168 #define NULL_TOGGLE_SHIFT           58
0169 #define NULL_TOGGLE_MASK            0x1
0170 
0171 /* Header descriptor format */
0172 #define HEADER_TYPE             1
0173 #define HEADER_TOGGLE_SHIFT         58
0174 #define HEADER_TOGGLE_MASK          0x1
0175 #define HEADER_ENDPKT_SHIFT         57
0176 #define HEADER_ENDPKT_MASK          0x1
0177 #define HEADER_STARTPKT_SHIFT           56
0178 #define HEADER_STARTPKT_MASK            0x1
0179 #define HEADER_BDCOUNT_SHIFT            36
0180 #define HEADER_BDCOUNT_MASK         0x1f
0181 #define HEADER_BDCOUNT_MAX          HEADER_BDCOUNT_MASK
0182 #define HEADER_FLAGS_SHIFT          16
0183 #define HEADER_FLAGS_MASK           0xffff
0184 #define HEADER_OPAQUE_SHIFT         0
0185 #define HEADER_OPAQUE_MASK          0xffff
0186 
0187 /* Source (SRC) descriptor format */
0188 #define SRC_TYPE                2
0189 #define SRC_LENGTH_SHIFT            44
0190 #define SRC_LENGTH_MASK             0xffff
0191 #define SRC_ADDR_SHIFT              0
0192 #define SRC_ADDR_MASK               0x00000fffffffffff
0193 
0194 /* Destination (DST) descriptor format */
0195 #define DST_TYPE                3
0196 #define DST_LENGTH_SHIFT            44
0197 #define DST_LENGTH_MASK             0xffff
0198 #define DST_ADDR_SHIFT              0
0199 #define DST_ADDR_MASK               0x00000fffffffffff
0200 
0201 /* Immediate (IMM) descriptor format */
0202 #define IMM_TYPE                4
0203 #define IMM_DATA_SHIFT              0
0204 #define IMM_DATA_MASK               0x0fffffffffffffff
0205 
0206 /* Next pointer (NPTR) descriptor format */
0207 #define NPTR_TYPE               5
0208 #define NPTR_TOGGLE_SHIFT           58
0209 #define NPTR_TOGGLE_MASK            0x1
0210 #define NPTR_ADDR_SHIFT             0
0211 #define NPTR_ADDR_MASK              0x00000fffffffffff
0212 
0213 /* Mega source (MSRC) descriptor format */
0214 #define MSRC_TYPE               6
0215 #define MSRC_LENGTH_SHIFT           44
0216 #define MSRC_LENGTH_MASK            0xffff
0217 #define MSRC_ADDR_SHIFT             0
0218 #define MSRC_ADDR_MASK              0x00000fffffffffff
0219 
0220 /* Mega destination (MDST) descriptor format */
0221 #define MDST_TYPE               7
0222 #define MDST_LENGTH_SHIFT           44
0223 #define MDST_LENGTH_MASK            0xffff
0224 #define MDST_ADDR_SHIFT             0
0225 #define MDST_ADDR_MASK              0x00000fffffffffff
0226 
0227 /* Source with tlast (SRCT) descriptor format */
0228 #define SRCT_TYPE               8
0229 #define SRCT_LENGTH_SHIFT           44
0230 #define SRCT_LENGTH_MASK            0xffff
0231 #define SRCT_ADDR_SHIFT             0
0232 #define SRCT_ADDR_MASK              0x00000fffffffffff
0233 
0234 /* Destination with tlast (DSTT) descriptor format */
0235 #define DSTT_TYPE               9
0236 #define DSTT_LENGTH_SHIFT           44
0237 #define DSTT_LENGTH_MASK            0xffff
0238 #define DSTT_ADDR_SHIFT             0
0239 #define DSTT_ADDR_MASK              0x00000fffffffffff
0240 
0241 /* Immediate with tlast (IMMT) descriptor format */
0242 #define IMMT_TYPE               10
0243 #define IMMT_DATA_SHIFT             0
0244 #define IMMT_DATA_MASK              0x0fffffffffffffff
0245 
0246 /* Descriptor helper macros */
0247 #define DESC_DEC(_d, _s, _m)            (((_d) >> (_s)) & (_m))
0248 #define DESC_ENC(_d, _v, _s, _m)        \
0249             do { \
0250                 (_d) &= ~((u64)(_m) << (_s)); \
0251                 (_d) |= (((u64)(_v) & (_m)) << (_s)); \
0252             } while (0)
0253 
0254 /* ====== FlexRM data structures ===== */
0255 
0256 struct flexrm_ring {
0257     /* Unprotected members */
0258     int num;
0259     struct flexrm_mbox *mbox;
0260     void __iomem *regs;
0261     bool irq_requested;
0262     unsigned int irq;
0263     cpumask_t irq_aff_hint;
0264     unsigned int msi_timer_val;
0265     unsigned int msi_count_threshold;
0266     struct brcm_message *requests[RING_MAX_REQ_COUNT];
0267     void *bd_base;
0268     dma_addr_t bd_dma_base;
0269     u32 bd_write_offset;
0270     void *cmpl_base;
0271     dma_addr_t cmpl_dma_base;
0272     /* Atomic stats */
0273     atomic_t msg_send_count;
0274     atomic_t msg_cmpl_count;
0275     /* Protected members */
0276     spinlock_t lock;
0277     DECLARE_BITMAP(requests_bmap, RING_MAX_REQ_COUNT);
0278     u32 cmpl_read_offset;
0279 };
0280 
0281 struct flexrm_mbox {
0282     struct device *dev;
0283     void __iomem *regs;
0284     u32 num_rings;
0285     struct flexrm_ring *rings;
0286     struct dma_pool *bd_pool;
0287     struct dma_pool *cmpl_pool;
0288     struct dentry *root;
0289     struct mbox_controller controller;
0290 };
0291 
0292 /* ====== FlexRM ring descriptor helper routines ===== */
0293 
0294 static u64 flexrm_read_desc(void *desc_ptr)
0295 {
0296     return le64_to_cpu(*((u64 *)desc_ptr));
0297 }
0298 
0299 static void flexrm_write_desc(void *desc_ptr, u64 desc)
0300 {
0301     *((u64 *)desc_ptr) = cpu_to_le64(desc);
0302 }
0303 
0304 static u32 flexrm_cmpl_desc_to_reqid(u64 cmpl_desc)
0305 {
0306     return (u32)(cmpl_desc & CMPL_OPAQUE_MASK);
0307 }
0308 
0309 static int flexrm_cmpl_desc_to_error(u64 cmpl_desc)
0310 {
0311     u32 status;
0312 
0313     status = DESC_DEC(cmpl_desc, CMPL_DME_STATUS_SHIFT,
0314               CMPL_DME_STATUS_MASK);
0315     if (status & DME_STATUS_ERROR_MASK)
0316         return -EIO;
0317 
0318     status = DESC_DEC(cmpl_desc, CMPL_RM_STATUS_SHIFT,
0319               CMPL_RM_STATUS_MASK);
0320     status &= RM_STATUS_CODE_MASK;
0321     if (status == RM_STATUS_CODE_AE_TIMEOUT)
0322         return -ETIMEDOUT;
0323 
0324     return 0;
0325 }
0326 
0327 static bool flexrm_is_next_table_desc(void *desc_ptr)
0328 {
0329     u64 desc = flexrm_read_desc(desc_ptr);
0330     u32 type = DESC_DEC(desc, DESC_TYPE_SHIFT, DESC_TYPE_MASK);
0331 
0332     return (type == NPTR_TYPE) ? true : false;
0333 }
0334 
0335 static u64 flexrm_next_table_desc(u32 toggle, dma_addr_t next_addr)
0336 {
0337     u64 desc = 0;
0338 
0339     DESC_ENC(desc, NPTR_TYPE, DESC_TYPE_SHIFT, DESC_TYPE_MASK);
0340     DESC_ENC(desc, toggle, NPTR_TOGGLE_SHIFT, NPTR_TOGGLE_MASK);
0341     DESC_ENC(desc, next_addr, NPTR_ADDR_SHIFT, NPTR_ADDR_MASK);
0342 
0343     return desc;
0344 }
0345 
0346 static u64 flexrm_null_desc(u32 toggle)
0347 {
0348     u64 desc = 0;
0349 
0350     DESC_ENC(desc, NULL_TYPE, DESC_TYPE_SHIFT, DESC_TYPE_MASK);
0351     DESC_ENC(desc, toggle, NULL_TOGGLE_SHIFT, NULL_TOGGLE_MASK);
0352 
0353     return desc;
0354 }
0355 
0356 static u32 flexrm_estimate_header_desc_count(u32 nhcnt)
0357 {
0358     u32 hcnt = nhcnt / HEADER_BDCOUNT_MAX;
0359 
0360     if (!(nhcnt % HEADER_BDCOUNT_MAX))
0361         hcnt += 1;
0362 
0363     return hcnt;
0364 }
0365 
0366 static void flexrm_flip_header_toggle(void *desc_ptr)
0367 {
0368     u64 desc = flexrm_read_desc(desc_ptr);
0369 
0370     if (desc & ((u64)0x1 << HEADER_TOGGLE_SHIFT))
0371         desc &= ~((u64)0x1 << HEADER_TOGGLE_SHIFT);
0372     else
0373         desc |= ((u64)0x1 << HEADER_TOGGLE_SHIFT);
0374 
0375     flexrm_write_desc(desc_ptr, desc);
0376 }
0377 
0378 static u64 flexrm_header_desc(u32 toggle, u32 startpkt, u32 endpkt,
0379                    u32 bdcount, u32 flags, u32 opaque)
0380 {
0381     u64 desc = 0;
0382 
0383     DESC_ENC(desc, HEADER_TYPE, DESC_TYPE_SHIFT, DESC_TYPE_MASK);
0384     DESC_ENC(desc, toggle, HEADER_TOGGLE_SHIFT, HEADER_TOGGLE_MASK);
0385     DESC_ENC(desc, startpkt, HEADER_STARTPKT_SHIFT, HEADER_STARTPKT_MASK);
0386     DESC_ENC(desc, endpkt, HEADER_ENDPKT_SHIFT, HEADER_ENDPKT_MASK);
0387     DESC_ENC(desc, bdcount, HEADER_BDCOUNT_SHIFT, HEADER_BDCOUNT_MASK);
0388     DESC_ENC(desc, flags, HEADER_FLAGS_SHIFT, HEADER_FLAGS_MASK);
0389     DESC_ENC(desc, opaque, HEADER_OPAQUE_SHIFT, HEADER_OPAQUE_MASK);
0390 
0391     return desc;
0392 }
0393 
0394 static void flexrm_enqueue_desc(u32 nhpos, u32 nhcnt, u32 reqid,
0395                  u64 desc, void **desc_ptr, u32 *toggle,
0396                  void *start_desc, void *end_desc)
0397 {
0398     u64 d;
0399     u32 nhavail, _toggle, _startpkt, _endpkt, _bdcount;
0400 
0401     /* Sanity check */
0402     if (nhcnt <= nhpos)
0403         return;
0404 
0405     /*
0406      * Each request or packet start with a HEADER descriptor followed
0407      * by one or more non-HEADER descriptors (SRC, SRCT, MSRC, DST,
0408      * DSTT, MDST, IMM, and IMMT). The number of non-HEADER descriptors
0409      * following a HEADER descriptor is represented by BDCOUNT field
0410      * of HEADER descriptor. The max value of BDCOUNT field is 31 which
0411      * means we can only have 31 non-HEADER descriptors following one
0412      * HEADER descriptor.
0413      *
0414      * In general use, number of non-HEADER descriptors can easily go
0415      * beyond 31. To tackle this situation, we have packet (or request)
0416      * extension bits (STARTPKT and ENDPKT) in the HEADER descriptor.
0417      *
0418      * To use packet extension, the first HEADER descriptor of request
0419      * (or packet) will have STARTPKT=1 and ENDPKT=0. The intermediate
0420      * HEADER descriptors will have STARTPKT=0 and ENDPKT=0. The last
0421      * HEADER descriptor will have STARTPKT=0 and ENDPKT=1. Also, the
0422      * TOGGLE bit of the first HEADER will be set to invalid state to
0423      * ensure that FlexRM does not start fetching descriptors till all
0424      * descriptors are enqueued. The user of this function will flip
0425      * the TOGGLE bit of first HEADER after all descriptors are
0426      * enqueued.
0427      */
0428 
0429     if ((nhpos % HEADER_BDCOUNT_MAX == 0) && (nhcnt - nhpos)) {
0430         /* Prepare the header descriptor */
0431         nhavail = (nhcnt - nhpos);
0432         _toggle = (nhpos == 0) ? !(*toggle) : (*toggle);
0433         _startpkt = (nhpos == 0) ? 0x1 : 0x0;
0434         _endpkt = (nhavail <= HEADER_BDCOUNT_MAX) ? 0x1 : 0x0;
0435         _bdcount = (nhavail <= HEADER_BDCOUNT_MAX) ?
0436                 nhavail : HEADER_BDCOUNT_MAX;
0437         if (nhavail <= HEADER_BDCOUNT_MAX)
0438             _bdcount = nhavail;
0439         else
0440             _bdcount = HEADER_BDCOUNT_MAX;
0441         d = flexrm_header_desc(_toggle, _startpkt, _endpkt,
0442                     _bdcount, 0x0, reqid);
0443 
0444         /* Write header descriptor */
0445         flexrm_write_desc(*desc_ptr, d);
0446 
0447         /* Point to next descriptor */
0448         *desc_ptr += sizeof(desc);
0449         if (*desc_ptr == end_desc)
0450             *desc_ptr = start_desc;
0451 
0452         /* Skip next pointer descriptors */
0453         while (flexrm_is_next_table_desc(*desc_ptr)) {
0454             *toggle = (*toggle) ? 0 : 1;
0455             *desc_ptr += sizeof(desc);
0456             if (*desc_ptr == end_desc)
0457                 *desc_ptr = start_desc;
0458         }
0459     }
0460 
0461     /* Write desired descriptor */
0462     flexrm_write_desc(*desc_ptr, desc);
0463 
0464     /* Point to next descriptor */
0465     *desc_ptr += sizeof(desc);
0466     if (*desc_ptr == end_desc)
0467         *desc_ptr = start_desc;
0468 
0469     /* Skip next pointer descriptors */
0470     while (flexrm_is_next_table_desc(*desc_ptr)) {
0471         *toggle = (*toggle) ? 0 : 1;
0472         *desc_ptr += sizeof(desc);
0473         if (*desc_ptr == end_desc)
0474             *desc_ptr = start_desc;
0475     }
0476 }
0477 
0478 static u64 flexrm_src_desc(dma_addr_t addr, unsigned int length)
0479 {
0480     u64 desc = 0;
0481 
0482     DESC_ENC(desc, SRC_TYPE, DESC_TYPE_SHIFT, DESC_TYPE_MASK);
0483     DESC_ENC(desc, length, SRC_LENGTH_SHIFT, SRC_LENGTH_MASK);
0484     DESC_ENC(desc, addr, SRC_ADDR_SHIFT, SRC_ADDR_MASK);
0485 
0486     return desc;
0487 }
0488 
0489 static u64 flexrm_msrc_desc(dma_addr_t addr, unsigned int length_div_16)
0490 {
0491     u64 desc = 0;
0492 
0493     DESC_ENC(desc, MSRC_TYPE, DESC_TYPE_SHIFT, DESC_TYPE_MASK);
0494     DESC_ENC(desc, length_div_16, MSRC_LENGTH_SHIFT, MSRC_LENGTH_MASK);
0495     DESC_ENC(desc, addr, MSRC_ADDR_SHIFT, MSRC_ADDR_MASK);
0496 
0497     return desc;
0498 }
0499 
0500 static u64 flexrm_dst_desc(dma_addr_t addr, unsigned int length)
0501 {
0502     u64 desc = 0;
0503 
0504     DESC_ENC(desc, DST_TYPE, DESC_TYPE_SHIFT, DESC_TYPE_MASK);
0505     DESC_ENC(desc, length, DST_LENGTH_SHIFT, DST_LENGTH_MASK);
0506     DESC_ENC(desc, addr, DST_ADDR_SHIFT, DST_ADDR_MASK);
0507 
0508     return desc;
0509 }
0510 
0511 static u64 flexrm_mdst_desc(dma_addr_t addr, unsigned int length_div_16)
0512 {
0513     u64 desc = 0;
0514 
0515     DESC_ENC(desc, MDST_TYPE, DESC_TYPE_SHIFT, DESC_TYPE_MASK);
0516     DESC_ENC(desc, length_div_16, MDST_LENGTH_SHIFT, MDST_LENGTH_MASK);
0517     DESC_ENC(desc, addr, MDST_ADDR_SHIFT, MDST_ADDR_MASK);
0518 
0519     return desc;
0520 }
0521 
0522 static u64 flexrm_imm_desc(u64 data)
0523 {
0524     u64 desc = 0;
0525 
0526     DESC_ENC(desc, IMM_TYPE, DESC_TYPE_SHIFT, DESC_TYPE_MASK);
0527     DESC_ENC(desc, data, IMM_DATA_SHIFT, IMM_DATA_MASK);
0528 
0529     return desc;
0530 }
0531 
0532 static u64 flexrm_srct_desc(dma_addr_t addr, unsigned int length)
0533 {
0534     u64 desc = 0;
0535 
0536     DESC_ENC(desc, SRCT_TYPE, DESC_TYPE_SHIFT, DESC_TYPE_MASK);
0537     DESC_ENC(desc, length, SRCT_LENGTH_SHIFT, SRCT_LENGTH_MASK);
0538     DESC_ENC(desc, addr, SRCT_ADDR_SHIFT, SRCT_ADDR_MASK);
0539 
0540     return desc;
0541 }
0542 
0543 static u64 flexrm_dstt_desc(dma_addr_t addr, unsigned int length)
0544 {
0545     u64 desc = 0;
0546 
0547     DESC_ENC(desc, DSTT_TYPE, DESC_TYPE_SHIFT, DESC_TYPE_MASK);
0548     DESC_ENC(desc, length, DSTT_LENGTH_SHIFT, DSTT_LENGTH_MASK);
0549     DESC_ENC(desc, addr, DSTT_ADDR_SHIFT, DSTT_ADDR_MASK);
0550 
0551     return desc;
0552 }
0553 
0554 static u64 flexrm_immt_desc(u64 data)
0555 {
0556     u64 desc = 0;
0557 
0558     DESC_ENC(desc, IMMT_TYPE, DESC_TYPE_SHIFT, DESC_TYPE_MASK);
0559     DESC_ENC(desc, data, IMMT_DATA_SHIFT, IMMT_DATA_MASK);
0560 
0561     return desc;
0562 }
0563 
0564 static bool flexrm_spu_sanity_check(struct brcm_message *msg)
0565 {
0566     struct scatterlist *sg;
0567 
0568     if (!msg->spu.src || !msg->spu.dst)
0569         return false;
0570     for (sg = msg->spu.src; sg; sg = sg_next(sg)) {
0571         if (sg->length & 0xf) {
0572             if (sg->length > SRC_LENGTH_MASK)
0573                 return false;
0574         } else {
0575             if (sg->length > (MSRC_LENGTH_MASK * 16))
0576                 return false;
0577         }
0578     }
0579     for (sg = msg->spu.dst; sg; sg = sg_next(sg)) {
0580         if (sg->length & 0xf) {
0581             if (sg->length > DST_LENGTH_MASK)
0582                 return false;
0583         } else {
0584             if (sg->length > (MDST_LENGTH_MASK * 16))
0585                 return false;
0586         }
0587     }
0588 
0589     return true;
0590 }
0591 
0592 static u32 flexrm_spu_estimate_nonheader_desc_count(struct brcm_message *msg)
0593 {
0594     u32 cnt = 0;
0595     unsigned int dst_target = 0;
0596     struct scatterlist *src_sg = msg->spu.src, *dst_sg = msg->spu.dst;
0597 
0598     while (src_sg || dst_sg) {
0599         if (src_sg) {
0600             cnt++;
0601             dst_target = src_sg->length;
0602             src_sg = sg_next(src_sg);
0603         } else
0604             dst_target = UINT_MAX;
0605 
0606         while (dst_target && dst_sg) {
0607             cnt++;
0608             if (dst_sg->length < dst_target)
0609                 dst_target -= dst_sg->length;
0610             else
0611                 dst_target = 0;
0612             dst_sg = sg_next(dst_sg);
0613         }
0614     }
0615 
0616     return cnt;
0617 }
0618 
0619 static int flexrm_spu_dma_map(struct device *dev, struct brcm_message *msg)
0620 {
0621     int rc;
0622 
0623     rc = dma_map_sg(dev, msg->spu.src, sg_nents(msg->spu.src),
0624             DMA_TO_DEVICE);
0625     if (rc < 0)
0626         return rc;
0627 
0628     rc = dma_map_sg(dev, msg->spu.dst, sg_nents(msg->spu.dst),
0629             DMA_FROM_DEVICE);
0630     if (rc < 0) {
0631         dma_unmap_sg(dev, msg->spu.src, sg_nents(msg->spu.src),
0632                  DMA_TO_DEVICE);
0633         return rc;
0634     }
0635 
0636     return 0;
0637 }
0638 
0639 static void flexrm_spu_dma_unmap(struct device *dev, struct brcm_message *msg)
0640 {
0641     dma_unmap_sg(dev, msg->spu.dst, sg_nents(msg->spu.dst),
0642              DMA_FROM_DEVICE);
0643     dma_unmap_sg(dev, msg->spu.src, sg_nents(msg->spu.src),
0644              DMA_TO_DEVICE);
0645 }
0646 
0647 static void *flexrm_spu_write_descs(struct brcm_message *msg, u32 nhcnt,
0648                      u32 reqid, void *desc_ptr, u32 toggle,
0649                      void *start_desc, void *end_desc)
0650 {
0651     u64 d;
0652     u32 nhpos = 0;
0653     void *orig_desc_ptr = desc_ptr;
0654     unsigned int dst_target = 0;
0655     struct scatterlist *src_sg = msg->spu.src, *dst_sg = msg->spu.dst;
0656 
0657     while (src_sg || dst_sg) {
0658         if (src_sg) {
0659             if (sg_dma_len(src_sg) & 0xf)
0660                 d = flexrm_src_desc(sg_dma_address(src_sg),
0661                              sg_dma_len(src_sg));
0662             else
0663                 d = flexrm_msrc_desc(sg_dma_address(src_sg),
0664                               sg_dma_len(src_sg)/16);
0665             flexrm_enqueue_desc(nhpos, nhcnt, reqid,
0666                          d, &desc_ptr, &toggle,
0667                          start_desc, end_desc);
0668             nhpos++;
0669             dst_target = sg_dma_len(src_sg);
0670             src_sg = sg_next(src_sg);
0671         } else
0672             dst_target = UINT_MAX;
0673 
0674         while (dst_target && dst_sg) {
0675             if (sg_dma_len(dst_sg) & 0xf)
0676                 d = flexrm_dst_desc(sg_dma_address(dst_sg),
0677                              sg_dma_len(dst_sg));
0678             else
0679                 d = flexrm_mdst_desc(sg_dma_address(dst_sg),
0680                               sg_dma_len(dst_sg)/16);
0681             flexrm_enqueue_desc(nhpos, nhcnt, reqid,
0682                          d, &desc_ptr, &toggle,
0683                          start_desc, end_desc);
0684             nhpos++;
0685             if (sg_dma_len(dst_sg) < dst_target)
0686                 dst_target -= sg_dma_len(dst_sg);
0687             else
0688                 dst_target = 0;
0689             dst_sg = sg_next(dst_sg);
0690         }
0691     }
0692 
0693     /* Null descriptor with invalid toggle bit */
0694     flexrm_write_desc(desc_ptr, flexrm_null_desc(!toggle));
0695 
0696     /* Ensure that descriptors have been written to memory */
0697     wmb();
0698 
0699     /* Flip toggle bit in header */
0700     flexrm_flip_header_toggle(orig_desc_ptr);
0701 
0702     return desc_ptr;
0703 }
0704 
0705 static bool flexrm_sba_sanity_check(struct brcm_message *msg)
0706 {
0707     u32 i;
0708 
0709     if (!msg->sba.cmds || !msg->sba.cmds_count)
0710         return false;
0711 
0712     for (i = 0; i < msg->sba.cmds_count; i++) {
0713         if (((msg->sba.cmds[i].flags & BRCM_SBA_CMD_TYPE_B) ||
0714              (msg->sba.cmds[i].flags & BRCM_SBA_CMD_TYPE_C)) &&
0715             (msg->sba.cmds[i].flags & BRCM_SBA_CMD_HAS_OUTPUT))
0716             return false;
0717         if ((msg->sba.cmds[i].flags & BRCM_SBA_CMD_TYPE_B) &&
0718             (msg->sba.cmds[i].data_len > SRCT_LENGTH_MASK))
0719             return false;
0720         if ((msg->sba.cmds[i].flags & BRCM_SBA_CMD_TYPE_C) &&
0721             (msg->sba.cmds[i].data_len > SRCT_LENGTH_MASK))
0722             return false;
0723         if ((msg->sba.cmds[i].flags & BRCM_SBA_CMD_HAS_RESP) &&
0724             (msg->sba.cmds[i].resp_len > DSTT_LENGTH_MASK))
0725             return false;
0726         if ((msg->sba.cmds[i].flags & BRCM_SBA_CMD_HAS_OUTPUT) &&
0727             (msg->sba.cmds[i].data_len > DSTT_LENGTH_MASK))
0728             return false;
0729     }
0730 
0731     return true;
0732 }
0733 
0734 static u32 flexrm_sba_estimate_nonheader_desc_count(struct brcm_message *msg)
0735 {
0736     u32 i, cnt;
0737 
0738     cnt = 0;
0739     for (i = 0; i < msg->sba.cmds_count; i++) {
0740         cnt++;
0741 
0742         if ((msg->sba.cmds[i].flags & BRCM_SBA_CMD_TYPE_B) ||
0743             (msg->sba.cmds[i].flags & BRCM_SBA_CMD_TYPE_C))
0744             cnt++;
0745 
0746         if (msg->sba.cmds[i].flags & BRCM_SBA_CMD_HAS_RESP)
0747             cnt++;
0748 
0749         if (msg->sba.cmds[i].flags & BRCM_SBA_CMD_HAS_OUTPUT)
0750             cnt++;
0751     }
0752 
0753     return cnt;
0754 }
0755 
0756 static void *flexrm_sba_write_descs(struct brcm_message *msg, u32 nhcnt,
0757                      u32 reqid, void *desc_ptr, u32 toggle,
0758                      void *start_desc, void *end_desc)
0759 {
0760     u64 d;
0761     u32 i, nhpos = 0;
0762     struct brcm_sba_command *c;
0763     void *orig_desc_ptr = desc_ptr;
0764 
0765     /* Convert SBA commands into descriptors */
0766     for (i = 0; i < msg->sba.cmds_count; i++) {
0767         c = &msg->sba.cmds[i];
0768 
0769         if ((c->flags & BRCM_SBA_CMD_HAS_RESP) &&
0770             (c->flags & BRCM_SBA_CMD_HAS_OUTPUT)) {
0771             /* Destination response descriptor */
0772             d = flexrm_dst_desc(c->resp, c->resp_len);
0773             flexrm_enqueue_desc(nhpos, nhcnt, reqid,
0774                          d, &desc_ptr, &toggle,
0775                          start_desc, end_desc);
0776             nhpos++;
0777         } else if (c->flags & BRCM_SBA_CMD_HAS_RESP) {
0778             /* Destination response with tlast descriptor */
0779             d = flexrm_dstt_desc(c->resp, c->resp_len);
0780             flexrm_enqueue_desc(nhpos, nhcnt, reqid,
0781                          d, &desc_ptr, &toggle,
0782                          start_desc, end_desc);
0783             nhpos++;
0784         }
0785 
0786         if (c->flags & BRCM_SBA_CMD_HAS_OUTPUT) {
0787             /* Destination with tlast descriptor */
0788             d = flexrm_dstt_desc(c->data, c->data_len);
0789             flexrm_enqueue_desc(nhpos, nhcnt, reqid,
0790                          d, &desc_ptr, &toggle,
0791                          start_desc, end_desc);
0792             nhpos++;
0793         }
0794 
0795         if (c->flags & BRCM_SBA_CMD_TYPE_B) {
0796             /* Command as immediate descriptor */
0797             d = flexrm_imm_desc(c->cmd);
0798             flexrm_enqueue_desc(nhpos, nhcnt, reqid,
0799                          d, &desc_ptr, &toggle,
0800                          start_desc, end_desc);
0801             nhpos++;
0802         } else {
0803             /* Command as immediate descriptor with tlast */
0804             d = flexrm_immt_desc(c->cmd);
0805             flexrm_enqueue_desc(nhpos, nhcnt, reqid,
0806                          d, &desc_ptr, &toggle,
0807                          start_desc, end_desc);
0808             nhpos++;
0809         }
0810 
0811         if ((c->flags & BRCM_SBA_CMD_TYPE_B) ||
0812             (c->flags & BRCM_SBA_CMD_TYPE_C)) {
0813             /* Source with tlast descriptor */
0814             d = flexrm_srct_desc(c->data, c->data_len);
0815             flexrm_enqueue_desc(nhpos, nhcnt, reqid,
0816                          d, &desc_ptr, &toggle,
0817                          start_desc, end_desc);
0818             nhpos++;
0819         }
0820     }
0821 
0822     /* Null descriptor with invalid toggle bit */
0823     flexrm_write_desc(desc_ptr, flexrm_null_desc(!toggle));
0824 
0825     /* Ensure that descriptors have been written to memory */
0826     wmb();
0827 
0828     /* Flip toggle bit in header */
0829     flexrm_flip_header_toggle(orig_desc_ptr);
0830 
0831     return desc_ptr;
0832 }
0833 
0834 static bool flexrm_sanity_check(struct brcm_message *msg)
0835 {
0836     if (!msg)
0837         return false;
0838 
0839     switch (msg->type) {
0840     case BRCM_MESSAGE_SPU:
0841         return flexrm_spu_sanity_check(msg);
0842     case BRCM_MESSAGE_SBA:
0843         return flexrm_sba_sanity_check(msg);
0844     default:
0845         return false;
0846     };
0847 }
0848 
0849 static u32 flexrm_estimate_nonheader_desc_count(struct brcm_message *msg)
0850 {
0851     if (!msg)
0852         return 0;
0853 
0854     switch (msg->type) {
0855     case BRCM_MESSAGE_SPU:
0856         return flexrm_spu_estimate_nonheader_desc_count(msg);
0857     case BRCM_MESSAGE_SBA:
0858         return flexrm_sba_estimate_nonheader_desc_count(msg);
0859     default:
0860         return 0;
0861     };
0862 }
0863 
0864 static int flexrm_dma_map(struct device *dev, struct brcm_message *msg)
0865 {
0866     if (!dev || !msg)
0867         return -EINVAL;
0868 
0869     switch (msg->type) {
0870     case BRCM_MESSAGE_SPU:
0871         return flexrm_spu_dma_map(dev, msg);
0872     default:
0873         break;
0874     }
0875 
0876     return 0;
0877 }
0878 
0879 static void flexrm_dma_unmap(struct device *dev, struct brcm_message *msg)
0880 {
0881     if (!dev || !msg)
0882         return;
0883 
0884     switch (msg->type) {
0885     case BRCM_MESSAGE_SPU:
0886         flexrm_spu_dma_unmap(dev, msg);
0887         break;
0888     default:
0889         break;
0890     }
0891 }
0892 
0893 static void *flexrm_write_descs(struct brcm_message *msg, u32 nhcnt,
0894                 u32 reqid, void *desc_ptr, u32 toggle,
0895                 void *start_desc, void *end_desc)
0896 {
0897     if (!msg || !desc_ptr || !start_desc || !end_desc)
0898         return ERR_PTR(-ENOTSUPP);
0899 
0900     if ((desc_ptr < start_desc) || (end_desc <= desc_ptr))
0901         return ERR_PTR(-ERANGE);
0902 
0903     switch (msg->type) {
0904     case BRCM_MESSAGE_SPU:
0905         return flexrm_spu_write_descs(msg, nhcnt, reqid,
0906                            desc_ptr, toggle,
0907                            start_desc, end_desc);
0908     case BRCM_MESSAGE_SBA:
0909         return flexrm_sba_write_descs(msg, nhcnt, reqid,
0910                            desc_ptr, toggle,
0911                            start_desc, end_desc);
0912     default:
0913         return ERR_PTR(-ENOTSUPP);
0914     };
0915 }
0916 
0917 /* ====== FlexRM driver helper routines ===== */
0918 
0919 static void flexrm_write_config_in_seqfile(struct flexrm_mbox *mbox,
0920                        struct seq_file *file)
0921 {
0922     int i;
0923     const char *state;
0924     struct flexrm_ring *ring;
0925 
0926     seq_printf(file, "%-5s %-9s %-18s %-10s %-18s %-10s\n",
0927            "Ring#", "State", "BD_Addr", "BD_Size",
0928            "Cmpl_Addr", "Cmpl_Size");
0929 
0930     for (i = 0; i < mbox->num_rings; i++) {
0931         ring = &mbox->rings[i];
0932         if (readl(ring->regs + RING_CONTROL) &
0933             BIT(CONTROL_ACTIVE_SHIFT))
0934             state = "active";
0935         else
0936             state = "inactive";
0937         seq_printf(file,
0938                "%-5d %-9s 0x%016llx 0x%08x 0x%016llx 0x%08x\n",
0939                ring->num, state,
0940                (unsigned long long)ring->bd_dma_base,
0941                (u32)RING_BD_SIZE,
0942                (unsigned long long)ring->cmpl_dma_base,
0943                (u32)RING_CMPL_SIZE);
0944     }
0945 }
0946 
0947 static void flexrm_write_stats_in_seqfile(struct flexrm_mbox *mbox,
0948                       struct seq_file *file)
0949 {
0950     int i;
0951     u32 val, bd_read_offset;
0952     struct flexrm_ring *ring;
0953 
0954     seq_printf(file, "%-5s %-10s %-10s %-10s %-11s %-11s\n",
0955            "Ring#", "BD_Read", "BD_Write",
0956            "Cmpl_Read", "Submitted", "Completed");
0957 
0958     for (i = 0; i < mbox->num_rings; i++) {
0959         ring = &mbox->rings[i];
0960         bd_read_offset = readl_relaxed(ring->regs + RING_BD_READ_PTR);
0961         val = readl_relaxed(ring->regs + RING_BD_START_ADDR);
0962         bd_read_offset *= RING_DESC_SIZE;
0963         bd_read_offset += (u32)(BD_START_ADDR_DECODE(val) -
0964                     ring->bd_dma_base);
0965         seq_printf(file, "%-5d 0x%08x 0x%08x 0x%08x %-11d %-11d\n",
0966                ring->num,
0967                (u32)bd_read_offset,
0968                (u32)ring->bd_write_offset,
0969                (u32)ring->cmpl_read_offset,
0970                (u32)atomic_read(&ring->msg_send_count),
0971                (u32)atomic_read(&ring->msg_cmpl_count));
0972     }
0973 }
0974 
0975 static int flexrm_new_request(struct flexrm_ring *ring,
0976                 struct brcm_message *batch_msg,
0977                 struct brcm_message *msg)
0978 {
0979     void *next;
0980     unsigned long flags;
0981     u32 val, count, nhcnt;
0982     u32 read_offset, write_offset;
0983     bool exit_cleanup = false;
0984     int ret = 0, reqid;
0985 
0986     /* Do sanity check on message */
0987     if (!flexrm_sanity_check(msg))
0988         return -EIO;
0989     msg->error = 0;
0990 
0991     /* If no requests possible then save data pointer and goto done. */
0992     spin_lock_irqsave(&ring->lock, flags);
0993     reqid = bitmap_find_free_region(ring->requests_bmap,
0994                     RING_MAX_REQ_COUNT, 0);
0995     spin_unlock_irqrestore(&ring->lock, flags);
0996     if (reqid < 0)
0997         return -ENOSPC;
0998     ring->requests[reqid] = msg;
0999 
1000     /* Do DMA mappings for the message */
1001     ret = flexrm_dma_map(ring->mbox->dev, msg);
1002     if (ret < 0) {
1003         ring->requests[reqid] = NULL;
1004         spin_lock_irqsave(&ring->lock, flags);
1005         bitmap_release_region(ring->requests_bmap, reqid, 0);
1006         spin_unlock_irqrestore(&ring->lock, flags);
1007         return ret;
1008     }
1009 
1010     /* Determine current HW BD read offset */
1011     read_offset = readl_relaxed(ring->regs + RING_BD_READ_PTR);
1012     val = readl_relaxed(ring->regs + RING_BD_START_ADDR);
1013     read_offset *= RING_DESC_SIZE;
1014     read_offset += (u32)(BD_START_ADDR_DECODE(val) - ring->bd_dma_base);
1015 
1016     /*
1017      * Number required descriptors = number of non-header descriptors +
1018      *               number of header descriptors +
1019      *               1x null descriptor
1020      */
1021     nhcnt = flexrm_estimate_nonheader_desc_count(msg);
1022     count = flexrm_estimate_header_desc_count(nhcnt) + nhcnt + 1;
1023 
1024     /* Check for available descriptor space. */
1025     write_offset = ring->bd_write_offset;
1026     while (count) {
1027         if (!flexrm_is_next_table_desc(ring->bd_base + write_offset))
1028             count--;
1029         write_offset += RING_DESC_SIZE;
1030         if (write_offset == RING_BD_SIZE)
1031             write_offset = 0x0;
1032         if (write_offset == read_offset)
1033             break;
1034     }
1035     if (count) {
1036         ret = -ENOSPC;
1037         exit_cleanup = true;
1038         goto exit;
1039     }
1040 
1041     /* Write descriptors to ring */
1042     next = flexrm_write_descs(msg, nhcnt, reqid,
1043             ring->bd_base + ring->bd_write_offset,
1044             RING_BD_TOGGLE_VALID(ring->bd_write_offset),
1045             ring->bd_base, ring->bd_base + RING_BD_SIZE);
1046     if (IS_ERR(next)) {
1047         ret = PTR_ERR(next);
1048         exit_cleanup = true;
1049         goto exit;
1050     }
1051 
1052     /* Save ring BD write offset */
1053     ring->bd_write_offset = (unsigned long)(next - ring->bd_base);
1054 
1055     /* Increment number of messages sent */
1056     atomic_inc_return(&ring->msg_send_count);
1057 
1058 exit:
1059     /* Update error status in message */
1060     msg->error = ret;
1061 
1062     /* Cleanup if we failed */
1063     if (exit_cleanup) {
1064         flexrm_dma_unmap(ring->mbox->dev, msg);
1065         ring->requests[reqid] = NULL;
1066         spin_lock_irqsave(&ring->lock, flags);
1067         bitmap_release_region(ring->requests_bmap, reqid, 0);
1068         spin_unlock_irqrestore(&ring->lock, flags);
1069     }
1070 
1071     return ret;
1072 }
1073 
1074 static int flexrm_process_completions(struct flexrm_ring *ring)
1075 {
1076     u64 desc;
1077     int err, count = 0;
1078     unsigned long flags;
1079     struct brcm_message *msg = NULL;
1080     u32 reqid, cmpl_read_offset, cmpl_write_offset;
1081     struct mbox_chan *chan = &ring->mbox->controller.chans[ring->num];
1082 
1083     spin_lock_irqsave(&ring->lock, flags);
1084 
1085     /*
1086      * Get current completion read and write offset
1087      *
1088      * Note: We should read completion write pointer at least once
1089      * after we get a MSI interrupt because HW maintains internal
1090      * MSI status which will allow next MSI interrupt only after
1091      * completion write pointer is read.
1092      */
1093     cmpl_write_offset = readl_relaxed(ring->regs + RING_CMPL_WRITE_PTR);
1094     cmpl_write_offset *= RING_DESC_SIZE;
1095     cmpl_read_offset = ring->cmpl_read_offset;
1096     ring->cmpl_read_offset = cmpl_write_offset;
1097 
1098     spin_unlock_irqrestore(&ring->lock, flags);
1099 
1100     /* For each completed request notify mailbox clients */
1101     reqid = 0;
1102     while (cmpl_read_offset != cmpl_write_offset) {
1103         /* Dequeue next completion descriptor */
1104         desc = *((u64 *)(ring->cmpl_base + cmpl_read_offset));
1105 
1106         /* Next read offset */
1107         cmpl_read_offset += RING_DESC_SIZE;
1108         if (cmpl_read_offset == RING_CMPL_SIZE)
1109             cmpl_read_offset = 0;
1110 
1111         /* Decode error from completion descriptor */
1112         err = flexrm_cmpl_desc_to_error(desc);
1113         if (err < 0) {
1114             dev_warn(ring->mbox->dev,
1115             "ring%d got completion desc=0x%lx with error %d\n",
1116             ring->num, (unsigned long)desc, err);
1117         }
1118 
1119         /* Determine request id from completion descriptor */
1120         reqid = flexrm_cmpl_desc_to_reqid(desc);
1121 
1122         /* Determine message pointer based on reqid */
1123         msg = ring->requests[reqid];
1124         if (!msg) {
1125             dev_warn(ring->mbox->dev,
1126             "ring%d null msg pointer for completion desc=0x%lx\n",
1127             ring->num, (unsigned long)desc);
1128             continue;
1129         }
1130 
1131         /* Release reqid for recycling */
1132         ring->requests[reqid] = NULL;
1133         spin_lock_irqsave(&ring->lock, flags);
1134         bitmap_release_region(ring->requests_bmap, reqid, 0);
1135         spin_unlock_irqrestore(&ring->lock, flags);
1136 
1137         /* Unmap DMA mappings */
1138         flexrm_dma_unmap(ring->mbox->dev, msg);
1139 
1140         /* Give-back message to mailbox client */
1141         msg->error = err;
1142         mbox_chan_received_data(chan, msg);
1143 
1144         /* Increment number of completions processed */
1145         atomic_inc_return(&ring->msg_cmpl_count);
1146         count++;
1147     }
1148 
1149     return count;
1150 }
1151 
1152 /* ====== FlexRM Debugfs callbacks ====== */
1153 
1154 static int flexrm_debugfs_conf_show(struct seq_file *file, void *offset)
1155 {
1156     struct flexrm_mbox *mbox = dev_get_drvdata(file->private);
1157 
1158     /* Write config in file */
1159     flexrm_write_config_in_seqfile(mbox, file);
1160 
1161     return 0;
1162 }
1163 
1164 static int flexrm_debugfs_stats_show(struct seq_file *file, void *offset)
1165 {
1166     struct flexrm_mbox *mbox = dev_get_drvdata(file->private);
1167 
1168     /* Write stats in file */
1169     flexrm_write_stats_in_seqfile(mbox, file);
1170 
1171     return 0;
1172 }
1173 
1174 /* ====== FlexRM interrupt handler ===== */
1175 
1176 static irqreturn_t flexrm_irq_event(int irq, void *dev_id)
1177 {
1178     /* We only have MSI for completions so just wakeup IRQ thread */
1179     /* Ring related errors will be informed via completion descriptors */
1180 
1181     return IRQ_WAKE_THREAD;
1182 }
1183 
1184 static irqreturn_t flexrm_irq_thread(int irq, void *dev_id)
1185 {
1186     flexrm_process_completions(dev_id);
1187 
1188     return IRQ_HANDLED;
1189 }
1190 
1191 /* ====== FlexRM mailbox callbacks ===== */
1192 
1193 static int flexrm_send_data(struct mbox_chan *chan, void *data)
1194 {
1195     int i, rc;
1196     struct flexrm_ring *ring = chan->con_priv;
1197     struct brcm_message *msg = data;
1198 
1199     if (msg->type == BRCM_MESSAGE_BATCH) {
1200         for (i = msg->batch.msgs_queued;
1201              i < msg->batch.msgs_count; i++) {
1202             rc = flexrm_new_request(ring, msg,
1203                          &msg->batch.msgs[i]);
1204             if (rc) {
1205                 msg->error = rc;
1206                 return rc;
1207             }
1208             msg->batch.msgs_queued++;
1209         }
1210         return 0;
1211     }
1212 
1213     return flexrm_new_request(ring, NULL, data);
1214 }
1215 
1216 static bool flexrm_peek_data(struct mbox_chan *chan)
1217 {
1218     int cnt = flexrm_process_completions(chan->con_priv);
1219 
1220     return (cnt > 0) ? true : false;
1221 }
1222 
1223 static int flexrm_startup(struct mbox_chan *chan)
1224 {
1225     u64 d;
1226     u32 val, off;
1227     int ret = 0;
1228     dma_addr_t next_addr;
1229     struct flexrm_ring *ring = chan->con_priv;
1230 
1231     /* Allocate BD memory */
1232     ring->bd_base = dma_pool_alloc(ring->mbox->bd_pool,
1233                        GFP_KERNEL, &ring->bd_dma_base);
1234     if (!ring->bd_base) {
1235         dev_err(ring->mbox->dev,
1236             "can't allocate BD memory for ring%d\n",
1237             ring->num);
1238         ret = -ENOMEM;
1239         goto fail;
1240     }
1241 
1242     /* Configure next table pointer entries in BD memory */
1243     for (off = 0; off < RING_BD_SIZE; off += RING_DESC_SIZE) {
1244         next_addr = off + RING_DESC_SIZE;
1245         if (next_addr == RING_BD_SIZE)
1246             next_addr = 0;
1247         next_addr += ring->bd_dma_base;
1248         if (RING_BD_ALIGN_CHECK(next_addr))
1249             d = flexrm_next_table_desc(RING_BD_TOGGLE_VALID(off),
1250                             next_addr);
1251         else
1252             d = flexrm_null_desc(RING_BD_TOGGLE_INVALID(off));
1253         flexrm_write_desc(ring->bd_base + off, d);
1254     }
1255 
1256     /* Allocate completion memory */
1257     ring->cmpl_base = dma_pool_zalloc(ring->mbox->cmpl_pool,
1258                      GFP_KERNEL, &ring->cmpl_dma_base);
1259     if (!ring->cmpl_base) {
1260         dev_err(ring->mbox->dev,
1261             "can't allocate completion memory for ring%d\n",
1262             ring->num);
1263         ret = -ENOMEM;
1264         goto fail_free_bd_memory;
1265     }
1266 
1267     /* Request IRQ */
1268     if (ring->irq == UINT_MAX) {
1269         dev_err(ring->mbox->dev,
1270             "ring%d IRQ not available\n", ring->num);
1271         ret = -ENODEV;
1272         goto fail_free_cmpl_memory;
1273     }
1274     ret = request_threaded_irq(ring->irq,
1275                    flexrm_irq_event,
1276                    flexrm_irq_thread,
1277                    0, dev_name(ring->mbox->dev), ring);
1278     if (ret) {
1279         dev_err(ring->mbox->dev,
1280             "failed to request ring%d IRQ\n", ring->num);
1281         goto fail_free_cmpl_memory;
1282     }
1283     ring->irq_requested = true;
1284 
1285     /* Set IRQ affinity hint */
1286     ring->irq_aff_hint = CPU_MASK_NONE;
1287     val = ring->mbox->num_rings;
1288     val = (num_online_cpus() < val) ? val / num_online_cpus() : 1;
1289     cpumask_set_cpu((ring->num / val) % num_online_cpus(),
1290             &ring->irq_aff_hint);
1291     ret = irq_update_affinity_hint(ring->irq, &ring->irq_aff_hint);
1292     if (ret) {
1293         dev_err(ring->mbox->dev,
1294             "failed to set IRQ affinity hint for ring%d\n",
1295             ring->num);
1296         goto fail_free_irq;
1297     }
1298 
1299     /* Disable/inactivate ring */
1300     writel_relaxed(0x0, ring->regs + RING_CONTROL);
1301 
1302     /* Program BD start address */
1303     val = BD_START_ADDR_VALUE(ring->bd_dma_base);
1304     writel_relaxed(val, ring->regs + RING_BD_START_ADDR);
1305 
1306     /* BD write pointer will be same as HW write pointer */
1307     ring->bd_write_offset =
1308             readl_relaxed(ring->regs + RING_BD_WRITE_PTR);
1309     ring->bd_write_offset *= RING_DESC_SIZE;
1310 
1311     /* Program completion start address */
1312     val = CMPL_START_ADDR_VALUE(ring->cmpl_dma_base);
1313     writel_relaxed(val, ring->regs + RING_CMPL_START_ADDR);
1314 
1315     /* Completion read pointer will be same as HW write pointer */
1316     ring->cmpl_read_offset =
1317             readl_relaxed(ring->regs + RING_CMPL_WRITE_PTR);
1318     ring->cmpl_read_offset *= RING_DESC_SIZE;
1319 
1320     /* Read ring Tx, Rx, and Outstanding counts to clear */
1321     readl_relaxed(ring->regs + RING_NUM_REQ_RECV_LS);
1322     readl_relaxed(ring->regs + RING_NUM_REQ_RECV_MS);
1323     readl_relaxed(ring->regs + RING_NUM_REQ_TRANS_LS);
1324     readl_relaxed(ring->regs + RING_NUM_REQ_TRANS_MS);
1325     readl_relaxed(ring->regs + RING_NUM_REQ_OUTSTAND);
1326 
1327     /* Configure RING_MSI_CONTROL */
1328     val = 0;
1329     val |= (ring->msi_timer_val << MSI_TIMER_VAL_SHIFT);
1330     val |= BIT(MSI_ENABLE_SHIFT);
1331     val |= (ring->msi_count_threshold & MSI_COUNT_MASK) << MSI_COUNT_SHIFT;
1332     writel_relaxed(val, ring->regs + RING_MSI_CONTROL);
1333 
1334     /* Enable/activate ring */
1335     val = BIT(CONTROL_ACTIVE_SHIFT);
1336     writel_relaxed(val, ring->regs + RING_CONTROL);
1337 
1338     /* Reset stats to zero */
1339     atomic_set(&ring->msg_send_count, 0);
1340     atomic_set(&ring->msg_cmpl_count, 0);
1341 
1342     return 0;
1343 
1344 fail_free_irq:
1345     free_irq(ring->irq, ring);
1346     ring->irq_requested = false;
1347 fail_free_cmpl_memory:
1348     dma_pool_free(ring->mbox->cmpl_pool,
1349               ring->cmpl_base, ring->cmpl_dma_base);
1350     ring->cmpl_base = NULL;
1351 fail_free_bd_memory:
1352     dma_pool_free(ring->mbox->bd_pool,
1353               ring->bd_base, ring->bd_dma_base);
1354     ring->bd_base = NULL;
1355 fail:
1356     return ret;
1357 }
1358 
1359 static void flexrm_shutdown(struct mbox_chan *chan)
1360 {
1361     u32 reqid;
1362     unsigned int timeout;
1363     struct brcm_message *msg;
1364     struct flexrm_ring *ring = chan->con_priv;
1365 
1366     /* Disable/inactivate ring */
1367     writel_relaxed(0x0, ring->regs + RING_CONTROL);
1368 
1369     /* Set ring flush state */
1370     timeout = 1000; /* timeout of 1s */
1371     writel_relaxed(BIT(CONTROL_FLUSH_SHIFT),
1372             ring->regs + RING_CONTROL);
1373     do {
1374         if (readl_relaxed(ring->regs + RING_FLUSH_DONE) &
1375             FLUSH_DONE_MASK)
1376             break;
1377         mdelay(1);
1378     } while (--timeout);
1379     if (!timeout)
1380         dev_err(ring->mbox->dev,
1381             "setting ring%d flush state timedout\n", ring->num);
1382 
1383     /* Clear ring flush state */
1384     timeout = 1000; /* timeout of 1s */
1385     writel_relaxed(0x0, ring->regs + RING_CONTROL);
1386     do {
1387         if (!(readl_relaxed(ring->regs + RING_FLUSH_DONE) &
1388               FLUSH_DONE_MASK))
1389             break;
1390         mdelay(1);
1391     } while (--timeout);
1392     if (!timeout)
1393         dev_err(ring->mbox->dev,
1394             "clearing ring%d flush state timedout\n", ring->num);
1395 
1396     /* Abort all in-flight requests */
1397     for (reqid = 0; reqid < RING_MAX_REQ_COUNT; reqid++) {
1398         msg = ring->requests[reqid];
1399         if (!msg)
1400             continue;
1401 
1402         /* Release reqid for recycling */
1403         ring->requests[reqid] = NULL;
1404 
1405         /* Unmap DMA mappings */
1406         flexrm_dma_unmap(ring->mbox->dev, msg);
1407 
1408         /* Give-back message to mailbox client */
1409         msg->error = -EIO;
1410         mbox_chan_received_data(chan, msg);
1411     }
1412 
1413     /* Clear requests bitmap */
1414     bitmap_zero(ring->requests_bmap, RING_MAX_REQ_COUNT);
1415 
1416     /* Release IRQ */
1417     if (ring->irq_requested) {
1418         irq_update_affinity_hint(ring->irq, NULL);
1419         free_irq(ring->irq, ring);
1420         ring->irq_requested = false;
1421     }
1422 
1423     /* Free-up completion descriptor ring */
1424     if (ring->cmpl_base) {
1425         dma_pool_free(ring->mbox->cmpl_pool,
1426                   ring->cmpl_base, ring->cmpl_dma_base);
1427         ring->cmpl_base = NULL;
1428     }
1429 
1430     /* Free-up BD descriptor ring */
1431     if (ring->bd_base) {
1432         dma_pool_free(ring->mbox->bd_pool,
1433                   ring->bd_base, ring->bd_dma_base);
1434         ring->bd_base = NULL;
1435     }
1436 }
1437 
1438 static const struct mbox_chan_ops flexrm_mbox_chan_ops = {
1439     .send_data  = flexrm_send_data,
1440     .startup    = flexrm_startup,
1441     .shutdown   = flexrm_shutdown,
1442     .peek_data  = flexrm_peek_data,
1443 };
1444 
1445 static struct mbox_chan *flexrm_mbox_of_xlate(struct mbox_controller *cntlr,
1446                     const struct of_phandle_args *pa)
1447 {
1448     struct mbox_chan *chan;
1449     struct flexrm_ring *ring;
1450 
1451     if (pa->args_count < 3)
1452         return ERR_PTR(-EINVAL);
1453 
1454     if (pa->args[0] >= cntlr->num_chans)
1455         return ERR_PTR(-ENOENT);
1456 
1457     if (pa->args[1] > MSI_COUNT_MASK)
1458         return ERR_PTR(-EINVAL);
1459 
1460     if (pa->args[2] > MSI_TIMER_VAL_MASK)
1461         return ERR_PTR(-EINVAL);
1462 
1463     chan = &cntlr->chans[pa->args[0]];
1464     ring = chan->con_priv;
1465     ring->msi_count_threshold = pa->args[1];
1466     ring->msi_timer_val = pa->args[2];
1467 
1468     return chan;
1469 }
1470 
1471 /* ====== FlexRM platform driver ===== */
1472 
1473 static void flexrm_mbox_msi_write(struct msi_desc *desc, struct msi_msg *msg)
1474 {
1475     struct device *dev = msi_desc_to_dev(desc);
1476     struct flexrm_mbox *mbox = dev_get_drvdata(dev);
1477     struct flexrm_ring *ring = &mbox->rings[desc->msi_index];
1478 
1479     /* Configure per-Ring MSI registers */
1480     writel_relaxed(msg->address_lo, ring->regs + RING_MSI_ADDR_LS);
1481     writel_relaxed(msg->address_hi, ring->regs + RING_MSI_ADDR_MS);
1482     writel_relaxed(msg->data, ring->regs + RING_MSI_DATA_VALUE);
1483 }
1484 
1485 static int flexrm_mbox_probe(struct platform_device *pdev)
1486 {
1487     int index, ret = 0;
1488     void __iomem *regs;
1489     void __iomem *regs_end;
1490     struct resource *iomem;
1491     struct flexrm_ring *ring;
1492     struct flexrm_mbox *mbox;
1493     struct device *dev = &pdev->dev;
1494 
1495     /* Allocate driver mailbox struct */
1496     mbox = devm_kzalloc(dev, sizeof(*mbox), GFP_KERNEL);
1497     if (!mbox) {
1498         ret = -ENOMEM;
1499         goto fail;
1500     }
1501     mbox->dev = dev;
1502     platform_set_drvdata(pdev, mbox);
1503 
1504     /* Get resource for registers */
1505     iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1506     if (!iomem || (resource_size(iomem) < RING_REGS_SIZE)) {
1507         ret = -ENODEV;
1508         goto fail;
1509     }
1510 
1511     /* Map registers of all rings */
1512     mbox->regs = devm_ioremap_resource(&pdev->dev, iomem);
1513     if (IS_ERR(mbox->regs)) {
1514         ret = PTR_ERR(mbox->regs);
1515         goto fail;
1516     }
1517     regs_end = mbox->regs + resource_size(iomem);
1518 
1519     /* Scan and count available rings */
1520     mbox->num_rings = 0;
1521     for (regs = mbox->regs; regs < regs_end; regs += RING_REGS_SIZE) {
1522         if (readl_relaxed(regs + RING_VER) == RING_VER_MAGIC)
1523             mbox->num_rings++;
1524     }
1525     if (!mbox->num_rings) {
1526         ret = -ENODEV;
1527         goto fail;
1528     }
1529 
1530     /* Allocate driver ring structs */
1531     ring = devm_kcalloc(dev, mbox->num_rings, sizeof(*ring), GFP_KERNEL);
1532     if (!ring) {
1533         ret = -ENOMEM;
1534         goto fail;
1535     }
1536     mbox->rings = ring;
1537 
1538     /* Initialize members of driver ring structs */
1539     regs = mbox->regs;
1540     for (index = 0; index < mbox->num_rings; index++) {
1541         ring = &mbox->rings[index];
1542         ring->num = index;
1543         ring->mbox = mbox;
1544         while ((regs < regs_end) &&
1545                (readl_relaxed(regs + RING_VER) != RING_VER_MAGIC))
1546             regs += RING_REGS_SIZE;
1547         if (regs_end <= regs) {
1548             ret = -ENODEV;
1549             goto fail;
1550         }
1551         ring->regs = regs;
1552         regs += RING_REGS_SIZE;
1553         ring->irq = UINT_MAX;
1554         ring->irq_requested = false;
1555         ring->msi_timer_val = MSI_TIMER_VAL_MASK;
1556         ring->msi_count_threshold = 0x1;
1557         memset(ring->requests, 0, sizeof(ring->requests));
1558         ring->bd_base = NULL;
1559         ring->bd_dma_base = 0;
1560         ring->cmpl_base = NULL;
1561         ring->cmpl_dma_base = 0;
1562         atomic_set(&ring->msg_send_count, 0);
1563         atomic_set(&ring->msg_cmpl_count, 0);
1564         spin_lock_init(&ring->lock);
1565         bitmap_zero(ring->requests_bmap, RING_MAX_REQ_COUNT);
1566         ring->cmpl_read_offset = 0;
1567     }
1568 
1569     /* FlexRM is capable of 40-bit physical addresses only */
1570     ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(40));
1571     if (ret) {
1572         ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
1573         if (ret)
1574             goto fail;
1575     }
1576 
1577     /* Create DMA pool for ring BD memory */
1578     mbox->bd_pool = dma_pool_create("bd", dev, RING_BD_SIZE,
1579                     1 << RING_BD_ALIGN_ORDER, 0);
1580     if (!mbox->bd_pool) {
1581         ret = -ENOMEM;
1582         goto fail;
1583     }
1584 
1585     /* Create DMA pool for ring completion memory */
1586     mbox->cmpl_pool = dma_pool_create("cmpl", dev, RING_CMPL_SIZE,
1587                       1 << RING_CMPL_ALIGN_ORDER, 0);
1588     if (!mbox->cmpl_pool) {
1589         ret = -ENOMEM;
1590         goto fail_destroy_bd_pool;
1591     }
1592 
1593     /* Allocate platform MSIs for each ring */
1594     ret = platform_msi_domain_alloc_irqs(dev, mbox->num_rings,
1595                         flexrm_mbox_msi_write);
1596     if (ret)
1597         goto fail_destroy_cmpl_pool;
1598 
1599     /* Save alloced IRQ numbers for each ring */
1600     for (index = 0; index < mbox->num_rings; index++)
1601         mbox->rings[index].irq = msi_get_virq(dev, index);
1602 
1603     /* Check availability of debugfs */
1604     if (!debugfs_initialized())
1605         goto skip_debugfs;
1606 
1607     /* Create debugfs root entry */
1608     mbox->root = debugfs_create_dir(dev_name(mbox->dev), NULL);
1609 
1610     /* Create debugfs config entry */
1611     debugfs_create_devm_seqfile(mbox->dev, "config", mbox->root,
1612                     flexrm_debugfs_conf_show);
1613 
1614     /* Create debugfs stats entry */
1615     debugfs_create_devm_seqfile(mbox->dev, "stats", mbox->root,
1616                     flexrm_debugfs_stats_show);
1617 
1618 skip_debugfs:
1619 
1620     /* Initialize mailbox controller */
1621     mbox->controller.txdone_irq = false;
1622     mbox->controller.txdone_poll = false;
1623     mbox->controller.ops = &flexrm_mbox_chan_ops;
1624     mbox->controller.dev = dev;
1625     mbox->controller.num_chans = mbox->num_rings;
1626     mbox->controller.of_xlate = flexrm_mbox_of_xlate;
1627     mbox->controller.chans = devm_kcalloc(dev, mbox->num_rings,
1628                 sizeof(*mbox->controller.chans), GFP_KERNEL);
1629     if (!mbox->controller.chans) {
1630         ret = -ENOMEM;
1631         goto fail_free_debugfs_root;
1632     }
1633     for (index = 0; index < mbox->num_rings; index++)
1634         mbox->controller.chans[index].con_priv = &mbox->rings[index];
1635 
1636     /* Register mailbox controller */
1637     ret = devm_mbox_controller_register(dev, &mbox->controller);
1638     if (ret)
1639         goto fail_free_debugfs_root;
1640 
1641     dev_info(dev, "registered flexrm mailbox with %d channels\n",
1642             mbox->controller.num_chans);
1643 
1644     return 0;
1645 
1646 fail_free_debugfs_root:
1647     debugfs_remove_recursive(mbox->root);
1648     platform_msi_domain_free_irqs(dev);
1649 fail_destroy_cmpl_pool:
1650     dma_pool_destroy(mbox->cmpl_pool);
1651 fail_destroy_bd_pool:
1652     dma_pool_destroy(mbox->bd_pool);
1653 fail:
1654     return ret;
1655 }
1656 
1657 static int flexrm_mbox_remove(struct platform_device *pdev)
1658 {
1659     struct device *dev = &pdev->dev;
1660     struct flexrm_mbox *mbox = platform_get_drvdata(pdev);
1661 
1662     debugfs_remove_recursive(mbox->root);
1663 
1664     platform_msi_domain_free_irqs(dev);
1665 
1666     dma_pool_destroy(mbox->cmpl_pool);
1667     dma_pool_destroy(mbox->bd_pool);
1668 
1669     return 0;
1670 }
1671 
1672 static const struct of_device_id flexrm_mbox_of_match[] = {
1673     { .compatible = "brcm,iproc-flexrm-mbox", },
1674     {},
1675 };
1676 MODULE_DEVICE_TABLE(of, flexrm_mbox_of_match);
1677 
1678 static struct platform_driver flexrm_mbox_driver = {
1679     .driver = {
1680         .name = "brcm-flexrm-mbox",
1681         .of_match_table = flexrm_mbox_of_match,
1682     },
1683     .probe      = flexrm_mbox_probe,
1684     .remove     = flexrm_mbox_remove,
1685 };
1686 module_platform_driver(flexrm_mbox_driver);
1687 
1688 MODULE_AUTHOR("Anup Patel <anup.patel@broadcom.com>");
1689 MODULE_DESCRIPTION("Broadcom FlexRM mailbox driver");
1690 MODULE_LICENSE("GPL v2");