Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 //
0003 // Register map access API - SPI AVMM support
0004 //
0005 // Copyright (C) 2018-2020 Intel Corporation. All rights reserved.
0006 
0007 #include <linux/module.h>
0008 #include <linux/regmap.h>
0009 #include <linux/spi/spi.h>
0010 
0011 /*
0012  * This driver implements the regmap operations for a generic SPI
0013  * master to access the registers of the spi slave chip which has an
0014  * Avalone bus in it.
0015  *
0016  * The "SPI slave to Avalon Master Bridge" (spi-avmm) IP should be integrated
0017  * in the spi slave chip. The IP acts as a bridge to convert encoded streams of
0018  * bytes from the host to the internal register read/write on Avalon bus. In
0019  * order to issue register access requests to the slave chip, the host should
0020  * send formatted bytes that conform to the transfer protocol.
0021  * The transfer protocol contains 3 layers: transaction layer, packet layer
0022  * and physical layer.
0023  *
0024  * Reference Documents could be found at:
0025  * https://www.intel.com/content/www/us/en/programmable/documentation/sfo1400787952932.html
0026  *
0027  * Chapter "SPI Slave/JTAG to Avalon Master Bridge Cores" is a general
0028  * introduction to the protocol.
0029  *
0030  * Chapter "Avalon Packets to Transactions Converter Core" describes
0031  * the transaction layer.
0032  *
0033  * Chapter "Avalon-ST Bytes to Packets and Packets to Bytes Converter Cores"
0034  * describes the packet layer.
0035  *
0036  * Chapter "Avalon-ST Serial Peripheral Interface Core" describes the
0037  * physical layer.
0038  *
0039  *
0040  * When host issues a regmap read/write, the driver will transform the request
0041  * to byte stream layer by layer. It formats the register addr, value and
0042  * length to the transaction layer request, then converts the request to packet
0043  * layer bytes stream and then to physical layer bytes stream. Finally the
0044  * driver sends the formatted byte stream over SPI bus to the slave chip.
0045  *
0046  * The spi-avmm IP on the slave chip decodes the byte stream and initiates
0047  * register read/write on its internal Avalon bus, and then encodes the
0048  * response to byte stream and sends back to host.
0049  *
0050  * The driver receives the byte stream, reverses the 3 layers transformation,
0051  * and finally gets the response value (read out data for register read,
0052  * successful written size for register write).
0053  */
0054 
0055 #define PKT_SOP         0x7a
0056 #define PKT_EOP         0x7b
0057 #define PKT_CHANNEL     0x7c
0058 #define PKT_ESC         0x7d
0059 
0060 #define PHY_IDLE        0x4a
0061 #define PHY_ESC         0x4d
0062 
0063 #define TRANS_CODE_WRITE    0x0
0064 #define TRANS_CODE_SEQ_WRITE    0x4
0065 #define TRANS_CODE_READ     0x10
0066 #define TRANS_CODE_SEQ_READ 0x14
0067 #define TRANS_CODE_NO_TRANS 0x7f
0068 
0069 #define SPI_AVMM_XFER_TIMEOUT   (msecs_to_jiffies(200))
0070 
0071 /* slave's register addr is 32 bits */
0072 #define SPI_AVMM_REG_SIZE       4UL
0073 /* slave's register value is 32 bits */
0074 #define SPI_AVMM_VAL_SIZE       4UL
0075 
0076 /*
0077  * max rx size could be larger. But considering the buffer consuming,
0078  * it is proper that we limit 1KB xfer at max.
0079  */
0080 #define MAX_READ_CNT        256UL
0081 #define MAX_WRITE_CNT       1UL
0082 
0083 struct trans_req_header {
0084     u8 code;
0085     u8 rsvd;
0086     __be16 size;
0087     __be32 addr;
0088 } __packed;
0089 
0090 struct trans_resp_header {
0091     u8 r_code;
0092     u8 rsvd;
0093     __be16 size;
0094 } __packed;
0095 
0096 #define TRANS_REQ_HD_SIZE   (sizeof(struct trans_req_header))
0097 #define TRANS_RESP_HD_SIZE  (sizeof(struct trans_resp_header))
0098 
0099 /*
0100  * In transaction layer,
0101  * the write request format is: Transaction request header + data
0102  * the read request format is: Transaction request header
0103  * the write response format is: Transaction response header
0104  * the read response format is: pure data, no Transaction response header
0105  */
0106 #define TRANS_WR_TX_SIZE(n) (TRANS_REQ_HD_SIZE + SPI_AVMM_VAL_SIZE * (n))
0107 #define TRANS_RD_TX_SIZE    TRANS_REQ_HD_SIZE
0108 #define TRANS_TX_MAX        TRANS_WR_TX_SIZE(MAX_WRITE_CNT)
0109 
0110 #define TRANS_RD_RX_SIZE(n) (SPI_AVMM_VAL_SIZE * (n))
0111 #define TRANS_WR_RX_SIZE    TRANS_RESP_HD_SIZE
0112 #define TRANS_RX_MAX        TRANS_RD_RX_SIZE(MAX_READ_CNT)
0113 
0114 /* tx & rx share one transaction layer buffer */
0115 #define TRANS_BUF_SIZE      ((TRANS_TX_MAX > TRANS_RX_MAX) ?    \
0116                  TRANS_TX_MAX : TRANS_RX_MAX)
0117 
0118 /*
0119  * In tx phase, the host prepares all the phy layer bytes of a request in the
0120  * phy buffer and sends them in a batch.
0121  *
0122  * The packet layer and physical layer defines several special chars for
0123  * various purpose, when a transaction layer byte hits one of these special
0124  * chars, it should be escaped. The escape rule is, "Escape char first,
0125  * following the byte XOR'ed with 0x20".
0126  *
0127  * This macro defines the max possible length of the phy data. In the worst
0128  * case, all transaction layer bytes need to be escaped (so the data length
0129  * doubles), plus 4 special chars (SOP, CHANNEL, CHANNEL_NUM, EOP). Finally
0130  * we should make sure the length is aligned to SPI BPW.
0131  */
0132 #define PHY_TX_MAX      ALIGN(2 * TRANS_TX_MAX + 4, 4)
0133 
0134 /*
0135  * Unlike tx, phy rx is affected by possible PHY_IDLE bytes from slave, the max
0136  * length of the rx bit stream is unpredictable. So the driver reads the words
0137  * one by one, and parses each word immediately into transaction layer buffer.
0138  * Only one word length of phy buffer is used for rx.
0139  */
0140 #define PHY_BUF_SIZE        PHY_TX_MAX
0141 
0142 /**
0143  * struct spi_avmm_bridge - SPI slave to AVMM bus master bridge
0144  *
0145  * @spi: spi slave associated with this bridge.
0146  * @word_len: bytes of word for spi transfer.
0147  * @trans_len: length of valid data in trans_buf.
0148  * @phy_len: length of valid data in phy_buf.
0149  * @trans_buf: the bridge buffer for transaction layer data.
0150  * @phy_buf: the bridge buffer for physical layer data.
0151  * @swap_words: the word swapping cb for phy data. NULL if not needed.
0152  *
0153  * As a device's registers are implemented on the AVMM bus address space, it
0154  * requires the driver to issue formatted requests to spi slave to AVMM bus
0155  * master bridge to perform register access.
0156  */
0157 struct spi_avmm_bridge {
0158     struct spi_device *spi;
0159     unsigned char word_len;
0160     unsigned int trans_len;
0161     unsigned int phy_len;
0162     /* bridge buffer used in translation between protocol layers */
0163     char trans_buf[TRANS_BUF_SIZE];
0164     char phy_buf[PHY_BUF_SIZE];
0165     void (*swap_words)(char *buf, unsigned int len);
0166 };
0167 
0168 static void br_swap_words_32(char *buf, unsigned int len)
0169 {
0170     u32 *p = (u32 *)buf;
0171     unsigned int count;
0172 
0173     count = len / 4;
0174     while (count--) {
0175         *p = swab32p(p);
0176         p++;
0177     }
0178 }
0179 
0180 /*
0181  * Format transaction layer data in br->trans_buf according to the register
0182  * access request, Store valid transaction layer data length in br->trans_len.
0183  */
0184 static int br_trans_tx_prepare(struct spi_avmm_bridge *br, bool is_read, u32 reg,
0185                    u32 *wr_val, u32 count)
0186 {
0187     struct trans_req_header *header;
0188     unsigned int trans_len;
0189     u8 code;
0190     __le32 *data;
0191     int i;
0192 
0193     if (is_read) {
0194         if (count == 1)
0195             code = TRANS_CODE_READ;
0196         else
0197             code = TRANS_CODE_SEQ_READ;
0198     } else {
0199         if (count == 1)
0200             code = TRANS_CODE_WRITE;
0201         else
0202             code = TRANS_CODE_SEQ_WRITE;
0203     }
0204 
0205     header = (struct trans_req_header *)br->trans_buf;
0206     header->code = code;
0207     header->rsvd = 0;
0208     header->size = cpu_to_be16((u16)count * SPI_AVMM_VAL_SIZE);
0209     header->addr = cpu_to_be32(reg);
0210 
0211     trans_len = TRANS_REQ_HD_SIZE;
0212 
0213     if (!is_read) {
0214         trans_len += SPI_AVMM_VAL_SIZE * count;
0215         if (trans_len > sizeof(br->trans_buf))
0216             return -ENOMEM;
0217 
0218         data = (__le32 *)(br->trans_buf + TRANS_REQ_HD_SIZE);
0219 
0220         for (i = 0; i < count; i++)
0221             *data++ = cpu_to_le32(*wr_val++);
0222     }
0223 
0224     /* Store valid trans data length for next layer */
0225     br->trans_len = trans_len;
0226 
0227     return 0;
0228 }
0229 
0230 /*
0231  * Convert transaction layer data (in br->trans_buf) to phy layer data, store
0232  * them in br->phy_buf. Pad the phy_buf aligned with SPI's BPW. Store valid phy
0233  * layer data length in br->phy_len.
0234  *
0235  * phy_buf len should be aligned with SPI's BPW. Spare bytes should be padded
0236  * with PHY_IDLE, then the slave will just drop them.
0237  *
0238  * The driver will not simply pad 4a at the tail. The concern is that driver
0239  * will not store MISO data during tx phase, if the driver pads 4a at the tail,
0240  * it is possible that if the slave is fast enough to response at the padding
0241  * time. As a result these rx bytes are lost. In the following case, 7a,7c,00
0242  * will lost.
0243  * MOSI ...|7a|7c|00|10| |00|00|04|02| |4b|7d|5a|7b| |40|4a|4a|4a| |XX|XX|...
0244  * MISO ...|4a|4a|4a|4a| |4a|4a|4a|4a| |4a|4a|4a|4a| |4a|7a|7c|00| |78|56|...
0245  *
0246  * So the driver moves EOP and bytes after EOP to the end of the aligned size,
0247  * then fill the hole with PHY_IDLE. As following:
0248  * before pad ...|7a|7c|00|10| |00|00|04|02| |4b|7d|5a|7b| |40|
0249  * after pad  ...|7a|7c|00|10| |00|00|04|02| |4b|7d|5a|4a| |4a|4a|7b|40|
0250  * Then if the slave will not get the entire packet before the tx phase is
0251  * over, it can't responsed to anything either.
0252  */
0253 static int br_pkt_phy_tx_prepare(struct spi_avmm_bridge *br)
0254 {
0255     char *tb, *tb_end, *pb, *pb_limit, *pb_eop = NULL;
0256     unsigned int aligned_phy_len, move_size;
0257     bool need_esc = false;
0258 
0259     tb = br->trans_buf;
0260     tb_end = tb + br->trans_len;
0261     pb = br->phy_buf;
0262     pb_limit = pb + ARRAY_SIZE(br->phy_buf);
0263 
0264     *pb++ = PKT_SOP;
0265 
0266     /*
0267      * The driver doesn't support multiple channels so the channel number
0268      * is always 0.
0269      */
0270     *pb++ = PKT_CHANNEL;
0271     *pb++ = 0x0;
0272 
0273     for (; pb < pb_limit && tb < tb_end; pb++) {
0274         if (need_esc) {
0275             *pb = *tb++ ^ 0x20;
0276             need_esc = false;
0277             continue;
0278         }
0279 
0280         /* EOP should be inserted before the last valid char */
0281         if (tb == tb_end - 1 && !pb_eop) {
0282             *pb = PKT_EOP;
0283             pb_eop = pb;
0284             continue;
0285         }
0286 
0287         /*
0288          * insert an ESCAPE char if the data value equals any special
0289          * char.
0290          */
0291         switch (*tb) {
0292         case PKT_SOP:
0293         case PKT_EOP:
0294         case PKT_CHANNEL:
0295         case PKT_ESC:
0296             *pb = PKT_ESC;
0297             need_esc = true;
0298             break;
0299         case PHY_IDLE:
0300         case PHY_ESC:
0301             *pb = PHY_ESC;
0302             need_esc = true;
0303             break;
0304         default:
0305             *pb = *tb++;
0306             break;
0307         }
0308     }
0309 
0310     /* The phy buffer is used out but transaction layer data remains */
0311     if (tb < tb_end)
0312         return -ENOMEM;
0313 
0314     /* Store valid phy data length for spi transfer */
0315     br->phy_len = pb - br->phy_buf;
0316 
0317     if (br->word_len == 1)
0318         return 0;
0319 
0320     /* Do phy buf padding if word_len > 1 byte. */
0321     aligned_phy_len = ALIGN(br->phy_len, br->word_len);
0322     if (aligned_phy_len > sizeof(br->phy_buf))
0323         return -ENOMEM;
0324 
0325     if (aligned_phy_len == br->phy_len)
0326         return 0;
0327 
0328     /* move EOP and bytes after EOP to the end of aligned size */
0329     move_size = pb - pb_eop;
0330     memmove(&br->phy_buf[aligned_phy_len - move_size], pb_eop, move_size);
0331 
0332     /* fill the hole with PHY_IDLEs */
0333     memset(pb_eop, PHY_IDLE, aligned_phy_len - br->phy_len);
0334 
0335     /* update the phy data length */
0336     br->phy_len = aligned_phy_len;
0337 
0338     return 0;
0339 }
0340 
0341 /*
0342  * In tx phase, the slave only returns PHY_IDLE (0x4a). So the driver will
0343  * ignore rx in tx phase.
0344  */
0345 static int br_do_tx(struct spi_avmm_bridge *br)
0346 {
0347     /* reorder words for spi transfer */
0348     if (br->swap_words)
0349         br->swap_words(br->phy_buf, br->phy_len);
0350 
0351     /* send all data in phy_buf  */
0352     return spi_write(br->spi, br->phy_buf, br->phy_len);
0353 }
0354 
0355 /*
0356  * This function read the rx byte stream from SPI word by word and convert
0357  * them to transaction layer data in br->trans_buf. It also stores the length
0358  * of rx transaction layer data in br->trans_len
0359  *
0360  * The slave may send an unknown number of PHY_IDLEs in rx phase, so we cannot
0361  * prepare a fixed length buffer to receive all of the rx data in a batch. We
0362  * have to read word by word and convert them to transaction layer data at
0363  * once.
0364  */
0365 static int br_do_rx_and_pkt_phy_parse(struct spi_avmm_bridge *br)
0366 {
0367     bool eop_found = false, channel_found = false, esc_found = false;
0368     bool valid_word = false, last_try = false;
0369     struct device *dev = &br->spi->dev;
0370     char *pb, *tb_limit, *tb = NULL;
0371     unsigned long poll_timeout;
0372     int ret, i;
0373 
0374     tb_limit = br->trans_buf + ARRAY_SIZE(br->trans_buf);
0375     pb = br->phy_buf;
0376     poll_timeout = jiffies + SPI_AVMM_XFER_TIMEOUT;
0377     while (tb < tb_limit) {
0378         ret = spi_read(br->spi, pb, br->word_len);
0379         if (ret)
0380             return ret;
0381 
0382         /* reorder the word back */
0383         if (br->swap_words)
0384             br->swap_words(pb, br->word_len);
0385 
0386         valid_word = false;
0387         for (i = 0; i < br->word_len; i++) {
0388             /* drop everything before first SOP */
0389             if (!tb && pb[i] != PKT_SOP)
0390                 continue;
0391 
0392             /* drop PHY_IDLE */
0393             if (pb[i] == PHY_IDLE)
0394                 continue;
0395 
0396             valid_word = true;
0397 
0398             /*
0399              * We don't support multiple channels, so error out if
0400              * a non-zero channel number is found.
0401              */
0402             if (channel_found) {
0403                 if (pb[i] != 0) {
0404                     dev_err(dev, "%s channel num != 0\n",
0405                         __func__);
0406                     return -EFAULT;
0407                 }
0408 
0409                 channel_found = false;
0410                 continue;
0411             }
0412 
0413             switch (pb[i]) {
0414             case PKT_SOP:
0415                 /*
0416                  * reset the parsing if a second SOP appears.
0417                  */
0418                 tb = br->trans_buf;
0419                 eop_found = false;
0420                 channel_found = false;
0421                 esc_found = false;
0422                 break;
0423             case PKT_EOP:
0424                 /*
0425                  * No special char is expected after ESC char.
0426                  * No special char (except ESC & PHY_IDLE) is
0427                  * expected after EOP char.
0428                  *
0429                  * The special chars are all dropped.
0430                  */
0431                 if (esc_found || eop_found)
0432                     return -EFAULT;
0433 
0434                 eop_found = true;
0435                 break;
0436             case PKT_CHANNEL:
0437                 if (esc_found || eop_found)
0438                     return -EFAULT;
0439 
0440                 channel_found = true;
0441                 break;
0442             case PKT_ESC:
0443             case PHY_ESC:
0444                 if (esc_found)
0445                     return -EFAULT;
0446 
0447                 esc_found = true;
0448                 break;
0449             default:
0450                 /* Record the normal byte in trans_buf. */
0451                 if (esc_found) {
0452                     *tb++ = pb[i] ^ 0x20;
0453                     esc_found = false;
0454                 } else {
0455                     *tb++ = pb[i];
0456                 }
0457 
0458                 /*
0459                  * We get the last normal byte after EOP, it is
0460                  * time we finish. Normally the function should
0461                  * return here.
0462                  */
0463                 if (eop_found) {
0464                     br->trans_len = tb - br->trans_buf;
0465                     return 0;
0466                 }
0467             }
0468         }
0469 
0470         if (valid_word) {
0471             /* update poll timeout when we get valid word */
0472             poll_timeout = jiffies + SPI_AVMM_XFER_TIMEOUT;
0473             last_try = false;
0474         } else {
0475             /*
0476              * We timeout when rx keeps invalid for some time. But
0477              * it is possible we are scheduled out for long time
0478              * after a spi_read. So when we are scheduled in, a SW
0479              * timeout happens. But actually HW may have worked fine and
0480              * has been ready long time ago. So we need to do an extra
0481              * read, if we get a valid word then we could continue rx,
0482              * otherwise real a HW issue happens.
0483              */
0484             if (last_try)
0485                 return -ETIMEDOUT;
0486 
0487             if (time_after(jiffies, poll_timeout))
0488                 last_try = true;
0489         }
0490     }
0491 
0492     /*
0493      * We have used out all transfer layer buffer but cannot find the end
0494      * of the byte stream.
0495      */
0496     dev_err(dev, "%s transfer buffer is full but rx doesn't end\n",
0497         __func__);
0498 
0499     return -EFAULT;
0500 }
0501 
0502 /*
0503  * For read transactions, the avmm bus will directly return register values
0504  * without transaction response header.
0505  */
0506 static int br_rd_trans_rx_parse(struct spi_avmm_bridge *br,
0507                 u32 *val, unsigned int expected_count)
0508 {
0509     unsigned int i, trans_len = br->trans_len;
0510     __le32 *data;
0511 
0512     if (expected_count * SPI_AVMM_VAL_SIZE != trans_len)
0513         return -EFAULT;
0514 
0515     data = (__le32 *)br->trans_buf;
0516     for (i = 0; i < expected_count; i++)
0517         *val++ = le32_to_cpu(*data++);
0518 
0519     return 0;
0520 }
0521 
0522 /*
0523  * For write transactions, the slave will return a transaction response
0524  * header.
0525  */
0526 static int br_wr_trans_rx_parse(struct spi_avmm_bridge *br,
0527                 unsigned int expected_count)
0528 {
0529     unsigned int trans_len = br->trans_len;
0530     struct trans_resp_header *resp;
0531     u8 code;
0532     u16 val_len;
0533 
0534     if (trans_len != TRANS_RESP_HD_SIZE)
0535         return -EFAULT;
0536 
0537     resp = (struct trans_resp_header *)br->trans_buf;
0538 
0539     code = resp->r_code ^ 0x80;
0540     val_len = be16_to_cpu(resp->size);
0541     if (!val_len || val_len != expected_count * SPI_AVMM_VAL_SIZE)
0542         return -EFAULT;
0543 
0544     /* error out if the trans code doesn't align with the val size */
0545     if ((val_len == SPI_AVMM_VAL_SIZE && code != TRANS_CODE_WRITE) ||
0546         (val_len > SPI_AVMM_VAL_SIZE && code != TRANS_CODE_SEQ_WRITE))
0547         return -EFAULT;
0548 
0549     return 0;
0550 }
0551 
0552 static int do_reg_access(void *context, bool is_read, unsigned int reg,
0553              unsigned int *value, unsigned int count)
0554 {
0555     struct spi_avmm_bridge *br = context;
0556     int ret;
0557 
0558     /* invalidate bridge buffers first */
0559     br->trans_len = 0;
0560     br->phy_len = 0;
0561 
0562     ret = br_trans_tx_prepare(br, is_read, reg, value, count);
0563     if (ret)
0564         return ret;
0565 
0566     ret = br_pkt_phy_tx_prepare(br);
0567     if (ret)
0568         return ret;
0569 
0570     ret = br_do_tx(br);
0571     if (ret)
0572         return ret;
0573 
0574     ret = br_do_rx_and_pkt_phy_parse(br);
0575     if (ret)
0576         return ret;
0577 
0578     if (is_read)
0579         return br_rd_trans_rx_parse(br, value, count);
0580     else
0581         return br_wr_trans_rx_parse(br, count);
0582 }
0583 
0584 static int regmap_spi_avmm_gather_write(void *context,
0585                     const void *reg_buf, size_t reg_len,
0586                     const void *val_buf, size_t val_len)
0587 {
0588     if (reg_len != SPI_AVMM_REG_SIZE)
0589         return -EINVAL;
0590 
0591     if (!IS_ALIGNED(val_len, SPI_AVMM_VAL_SIZE))
0592         return -EINVAL;
0593 
0594     return do_reg_access(context, false, *(u32 *)reg_buf, (u32 *)val_buf,
0595                  val_len / SPI_AVMM_VAL_SIZE);
0596 }
0597 
0598 static int regmap_spi_avmm_write(void *context, const void *data, size_t bytes)
0599 {
0600     if (bytes < SPI_AVMM_REG_SIZE + SPI_AVMM_VAL_SIZE)
0601         return -EINVAL;
0602 
0603     return regmap_spi_avmm_gather_write(context, data, SPI_AVMM_REG_SIZE,
0604                         data + SPI_AVMM_REG_SIZE,
0605                         bytes - SPI_AVMM_REG_SIZE);
0606 }
0607 
0608 static int regmap_spi_avmm_read(void *context,
0609                 const void *reg_buf, size_t reg_len,
0610                 void *val_buf, size_t val_len)
0611 {
0612     if (reg_len != SPI_AVMM_REG_SIZE)
0613         return -EINVAL;
0614 
0615     if (!IS_ALIGNED(val_len, SPI_AVMM_VAL_SIZE))
0616         return -EINVAL;
0617 
0618     return do_reg_access(context, true, *(u32 *)reg_buf, val_buf,
0619                  (val_len / SPI_AVMM_VAL_SIZE));
0620 }
0621 
0622 static struct spi_avmm_bridge *
0623 spi_avmm_bridge_ctx_gen(struct spi_device *spi)
0624 {
0625     struct spi_avmm_bridge *br;
0626 
0627     if (!spi)
0628         return ERR_PTR(-ENODEV);
0629 
0630     /* Only support BPW == 8 or 32 now. Try 32 BPW first. */
0631     spi->mode = SPI_MODE_1;
0632     spi->bits_per_word = 32;
0633     if (spi_setup(spi)) {
0634         spi->bits_per_word = 8;
0635         if (spi_setup(spi))
0636             return ERR_PTR(-EINVAL);
0637     }
0638 
0639     br = kzalloc(sizeof(*br), GFP_KERNEL);
0640     if (!br)
0641         return ERR_PTR(-ENOMEM);
0642 
0643     br->spi = spi;
0644     br->word_len = spi->bits_per_word / 8;
0645     if (br->word_len == 4) {
0646         /*
0647          * The protocol requires little endian byte order but MSB
0648          * first. So driver needs to swap the byte order word by word
0649          * if word length > 1.
0650          */
0651         br->swap_words = br_swap_words_32;
0652     }
0653 
0654     return br;
0655 }
0656 
0657 static void spi_avmm_bridge_ctx_free(void *context)
0658 {
0659     kfree(context);
0660 }
0661 
0662 static const struct regmap_bus regmap_spi_avmm_bus = {
0663     .write = regmap_spi_avmm_write,
0664     .gather_write = regmap_spi_avmm_gather_write,
0665     .read = regmap_spi_avmm_read,
0666     .reg_format_endian_default = REGMAP_ENDIAN_NATIVE,
0667     .val_format_endian_default = REGMAP_ENDIAN_NATIVE,
0668     .max_raw_read = SPI_AVMM_VAL_SIZE * MAX_READ_CNT,
0669     .max_raw_write = SPI_AVMM_VAL_SIZE * MAX_WRITE_CNT,
0670     .free_context = spi_avmm_bridge_ctx_free,
0671 };
0672 
0673 struct regmap *__regmap_init_spi_avmm(struct spi_device *spi,
0674                       const struct regmap_config *config,
0675                       struct lock_class_key *lock_key,
0676                       const char *lock_name)
0677 {
0678     struct spi_avmm_bridge *bridge;
0679     struct regmap *map;
0680 
0681     bridge = spi_avmm_bridge_ctx_gen(spi);
0682     if (IS_ERR(bridge))
0683         return ERR_CAST(bridge);
0684 
0685     map = __regmap_init(&spi->dev, &regmap_spi_avmm_bus,
0686                 bridge, config, lock_key, lock_name);
0687     if (IS_ERR(map)) {
0688         spi_avmm_bridge_ctx_free(bridge);
0689         return ERR_CAST(map);
0690     }
0691 
0692     return map;
0693 }
0694 EXPORT_SYMBOL_GPL(__regmap_init_spi_avmm);
0695 
0696 struct regmap *__devm_regmap_init_spi_avmm(struct spi_device *spi,
0697                        const struct regmap_config *config,
0698                        struct lock_class_key *lock_key,
0699                        const char *lock_name)
0700 {
0701     struct spi_avmm_bridge *bridge;
0702     struct regmap *map;
0703 
0704     bridge = spi_avmm_bridge_ctx_gen(spi);
0705     if (IS_ERR(bridge))
0706         return ERR_CAST(bridge);
0707 
0708     map = __devm_regmap_init(&spi->dev, &regmap_spi_avmm_bus,
0709                  bridge, config, lock_key, lock_name);
0710     if (IS_ERR(map)) {
0711         spi_avmm_bridge_ctx_free(bridge);
0712         return ERR_CAST(map);
0713     }
0714 
0715     return map;
0716 }
0717 EXPORT_SYMBOL_GPL(__devm_regmap_init_spi_avmm);
0718 
0719 MODULE_LICENSE("GPL v2");