0001
0002
0003
0004
0005
0006
0007 #include <linux/module.h>
0008 #include <linux/regmap.h>
0009 #include <linux/spi/spi.h>
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037
0038
0039
0040
0041
0042
0043
0044
0045
0046
0047
0048
0049
0050
0051
0052
0053
0054
0055 #define PKT_SOP 0x7a
0056 #define PKT_EOP 0x7b
0057 #define PKT_CHANNEL 0x7c
0058 #define PKT_ESC 0x7d
0059
0060 #define PHY_IDLE 0x4a
0061 #define PHY_ESC 0x4d
0062
0063 #define TRANS_CODE_WRITE 0x0
0064 #define TRANS_CODE_SEQ_WRITE 0x4
0065 #define TRANS_CODE_READ 0x10
0066 #define TRANS_CODE_SEQ_READ 0x14
0067 #define TRANS_CODE_NO_TRANS 0x7f
0068
0069 #define SPI_AVMM_XFER_TIMEOUT (msecs_to_jiffies(200))
0070
0071
0072 #define SPI_AVMM_REG_SIZE 4UL
0073
0074 #define SPI_AVMM_VAL_SIZE 4UL
0075
0076
0077
0078
0079
0080 #define MAX_READ_CNT 256UL
0081 #define MAX_WRITE_CNT 1UL
0082
0083 struct trans_req_header {
0084 u8 code;
0085 u8 rsvd;
0086 __be16 size;
0087 __be32 addr;
0088 } __packed;
0089
0090 struct trans_resp_header {
0091 u8 r_code;
0092 u8 rsvd;
0093 __be16 size;
0094 } __packed;
0095
0096 #define TRANS_REQ_HD_SIZE (sizeof(struct trans_req_header))
0097 #define TRANS_RESP_HD_SIZE (sizeof(struct trans_resp_header))
0098
0099
0100
0101
0102
0103
0104
0105
0106 #define TRANS_WR_TX_SIZE(n) (TRANS_REQ_HD_SIZE + SPI_AVMM_VAL_SIZE * (n))
0107 #define TRANS_RD_TX_SIZE TRANS_REQ_HD_SIZE
0108 #define TRANS_TX_MAX TRANS_WR_TX_SIZE(MAX_WRITE_CNT)
0109
0110 #define TRANS_RD_RX_SIZE(n) (SPI_AVMM_VAL_SIZE * (n))
0111 #define TRANS_WR_RX_SIZE TRANS_RESP_HD_SIZE
0112 #define TRANS_RX_MAX TRANS_RD_RX_SIZE(MAX_READ_CNT)
0113
0114
0115 #define TRANS_BUF_SIZE ((TRANS_TX_MAX > TRANS_RX_MAX) ? \
0116 TRANS_TX_MAX : TRANS_RX_MAX)
0117
0118
0119
0120
0121
0122
0123
0124
0125
0126
0127
0128
0129
0130
0131
0132 #define PHY_TX_MAX ALIGN(2 * TRANS_TX_MAX + 4, 4)
0133
0134
0135
0136
0137
0138
0139
0140 #define PHY_BUF_SIZE PHY_TX_MAX
0141
0142
0143
0144
0145
0146
0147
0148
0149
0150
0151
0152
0153
0154
0155
0156
0157 struct spi_avmm_bridge {
0158 struct spi_device *spi;
0159 unsigned char word_len;
0160 unsigned int trans_len;
0161 unsigned int phy_len;
0162
0163 char trans_buf[TRANS_BUF_SIZE];
0164 char phy_buf[PHY_BUF_SIZE];
0165 void (*swap_words)(char *buf, unsigned int len);
0166 };
0167
0168 static void br_swap_words_32(char *buf, unsigned int len)
0169 {
0170 u32 *p = (u32 *)buf;
0171 unsigned int count;
0172
0173 count = len / 4;
0174 while (count--) {
0175 *p = swab32p(p);
0176 p++;
0177 }
0178 }
0179
0180
0181
0182
0183
0184 static int br_trans_tx_prepare(struct spi_avmm_bridge *br, bool is_read, u32 reg,
0185 u32 *wr_val, u32 count)
0186 {
0187 struct trans_req_header *header;
0188 unsigned int trans_len;
0189 u8 code;
0190 __le32 *data;
0191 int i;
0192
0193 if (is_read) {
0194 if (count == 1)
0195 code = TRANS_CODE_READ;
0196 else
0197 code = TRANS_CODE_SEQ_READ;
0198 } else {
0199 if (count == 1)
0200 code = TRANS_CODE_WRITE;
0201 else
0202 code = TRANS_CODE_SEQ_WRITE;
0203 }
0204
0205 header = (struct trans_req_header *)br->trans_buf;
0206 header->code = code;
0207 header->rsvd = 0;
0208 header->size = cpu_to_be16((u16)count * SPI_AVMM_VAL_SIZE);
0209 header->addr = cpu_to_be32(reg);
0210
0211 trans_len = TRANS_REQ_HD_SIZE;
0212
0213 if (!is_read) {
0214 trans_len += SPI_AVMM_VAL_SIZE * count;
0215 if (trans_len > sizeof(br->trans_buf))
0216 return -ENOMEM;
0217
0218 data = (__le32 *)(br->trans_buf + TRANS_REQ_HD_SIZE);
0219
0220 for (i = 0; i < count; i++)
0221 *data++ = cpu_to_le32(*wr_val++);
0222 }
0223
0224
0225 br->trans_len = trans_len;
0226
0227 return 0;
0228 }
0229
0230
0231
0232
0233
0234
0235
0236
0237
0238
0239
0240
0241
0242
0243
0244
0245
0246
0247
0248
0249
0250
0251
0252
0253 static int br_pkt_phy_tx_prepare(struct spi_avmm_bridge *br)
0254 {
0255 char *tb, *tb_end, *pb, *pb_limit, *pb_eop = NULL;
0256 unsigned int aligned_phy_len, move_size;
0257 bool need_esc = false;
0258
0259 tb = br->trans_buf;
0260 tb_end = tb + br->trans_len;
0261 pb = br->phy_buf;
0262 pb_limit = pb + ARRAY_SIZE(br->phy_buf);
0263
0264 *pb++ = PKT_SOP;
0265
0266
0267
0268
0269
0270 *pb++ = PKT_CHANNEL;
0271 *pb++ = 0x0;
0272
0273 for (; pb < pb_limit && tb < tb_end; pb++) {
0274 if (need_esc) {
0275 *pb = *tb++ ^ 0x20;
0276 need_esc = false;
0277 continue;
0278 }
0279
0280
0281 if (tb == tb_end - 1 && !pb_eop) {
0282 *pb = PKT_EOP;
0283 pb_eop = pb;
0284 continue;
0285 }
0286
0287
0288
0289
0290
0291 switch (*tb) {
0292 case PKT_SOP:
0293 case PKT_EOP:
0294 case PKT_CHANNEL:
0295 case PKT_ESC:
0296 *pb = PKT_ESC;
0297 need_esc = true;
0298 break;
0299 case PHY_IDLE:
0300 case PHY_ESC:
0301 *pb = PHY_ESC;
0302 need_esc = true;
0303 break;
0304 default:
0305 *pb = *tb++;
0306 break;
0307 }
0308 }
0309
0310
0311 if (tb < tb_end)
0312 return -ENOMEM;
0313
0314
0315 br->phy_len = pb - br->phy_buf;
0316
0317 if (br->word_len == 1)
0318 return 0;
0319
0320
0321 aligned_phy_len = ALIGN(br->phy_len, br->word_len);
0322 if (aligned_phy_len > sizeof(br->phy_buf))
0323 return -ENOMEM;
0324
0325 if (aligned_phy_len == br->phy_len)
0326 return 0;
0327
0328
0329 move_size = pb - pb_eop;
0330 memmove(&br->phy_buf[aligned_phy_len - move_size], pb_eop, move_size);
0331
0332
0333 memset(pb_eop, PHY_IDLE, aligned_phy_len - br->phy_len);
0334
0335
0336 br->phy_len = aligned_phy_len;
0337
0338 return 0;
0339 }
0340
0341
0342
0343
0344
0345 static int br_do_tx(struct spi_avmm_bridge *br)
0346 {
0347
0348 if (br->swap_words)
0349 br->swap_words(br->phy_buf, br->phy_len);
0350
0351
0352 return spi_write(br->spi, br->phy_buf, br->phy_len);
0353 }
0354
0355
0356
0357
0358
0359
0360
0361
0362
0363
0364
0365 static int br_do_rx_and_pkt_phy_parse(struct spi_avmm_bridge *br)
0366 {
0367 bool eop_found = false, channel_found = false, esc_found = false;
0368 bool valid_word = false, last_try = false;
0369 struct device *dev = &br->spi->dev;
0370 char *pb, *tb_limit, *tb = NULL;
0371 unsigned long poll_timeout;
0372 int ret, i;
0373
0374 tb_limit = br->trans_buf + ARRAY_SIZE(br->trans_buf);
0375 pb = br->phy_buf;
0376 poll_timeout = jiffies + SPI_AVMM_XFER_TIMEOUT;
0377 while (tb < tb_limit) {
0378 ret = spi_read(br->spi, pb, br->word_len);
0379 if (ret)
0380 return ret;
0381
0382
0383 if (br->swap_words)
0384 br->swap_words(pb, br->word_len);
0385
0386 valid_word = false;
0387 for (i = 0; i < br->word_len; i++) {
0388
0389 if (!tb && pb[i] != PKT_SOP)
0390 continue;
0391
0392
0393 if (pb[i] == PHY_IDLE)
0394 continue;
0395
0396 valid_word = true;
0397
0398
0399
0400
0401
0402 if (channel_found) {
0403 if (pb[i] != 0) {
0404 dev_err(dev, "%s channel num != 0\n",
0405 __func__);
0406 return -EFAULT;
0407 }
0408
0409 channel_found = false;
0410 continue;
0411 }
0412
0413 switch (pb[i]) {
0414 case PKT_SOP:
0415
0416
0417
0418 tb = br->trans_buf;
0419 eop_found = false;
0420 channel_found = false;
0421 esc_found = false;
0422 break;
0423 case PKT_EOP:
0424
0425
0426
0427
0428
0429
0430
0431 if (esc_found || eop_found)
0432 return -EFAULT;
0433
0434 eop_found = true;
0435 break;
0436 case PKT_CHANNEL:
0437 if (esc_found || eop_found)
0438 return -EFAULT;
0439
0440 channel_found = true;
0441 break;
0442 case PKT_ESC:
0443 case PHY_ESC:
0444 if (esc_found)
0445 return -EFAULT;
0446
0447 esc_found = true;
0448 break;
0449 default:
0450
0451 if (esc_found) {
0452 *tb++ = pb[i] ^ 0x20;
0453 esc_found = false;
0454 } else {
0455 *tb++ = pb[i];
0456 }
0457
0458
0459
0460
0461
0462
0463 if (eop_found) {
0464 br->trans_len = tb - br->trans_buf;
0465 return 0;
0466 }
0467 }
0468 }
0469
0470 if (valid_word) {
0471
0472 poll_timeout = jiffies + SPI_AVMM_XFER_TIMEOUT;
0473 last_try = false;
0474 } else {
0475
0476
0477
0478
0479
0480
0481
0482
0483
0484 if (last_try)
0485 return -ETIMEDOUT;
0486
0487 if (time_after(jiffies, poll_timeout))
0488 last_try = true;
0489 }
0490 }
0491
0492
0493
0494
0495
0496 dev_err(dev, "%s transfer buffer is full but rx doesn't end\n",
0497 __func__);
0498
0499 return -EFAULT;
0500 }
0501
0502
0503
0504
0505
0506 static int br_rd_trans_rx_parse(struct spi_avmm_bridge *br,
0507 u32 *val, unsigned int expected_count)
0508 {
0509 unsigned int i, trans_len = br->trans_len;
0510 __le32 *data;
0511
0512 if (expected_count * SPI_AVMM_VAL_SIZE != trans_len)
0513 return -EFAULT;
0514
0515 data = (__le32 *)br->trans_buf;
0516 for (i = 0; i < expected_count; i++)
0517 *val++ = le32_to_cpu(*data++);
0518
0519 return 0;
0520 }
0521
0522
0523
0524
0525
0526 static int br_wr_trans_rx_parse(struct spi_avmm_bridge *br,
0527 unsigned int expected_count)
0528 {
0529 unsigned int trans_len = br->trans_len;
0530 struct trans_resp_header *resp;
0531 u8 code;
0532 u16 val_len;
0533
0534 if (trans_len != TRANS_RESP_HD_SIZE)
0535 return -EFAULT;
0536
0537 resp = (struct trans_resp_header *)br->trans_buf;
0538
0539 code = resp->r_code ^ 0x80;
0540 val_len = be16_to_cpu(resp->size);
0541 if (!val_len || val_len != expected_count * SPI_AVMM_VAL_SIZE)
0542 return -EFAULT;
0543
0544
0545 if ((val_len == SPI_AVMM_VAL_SIZE && code != TRANS_CODE_WRITE) ||
0546 (val_len > SPI_AVMM_VAL_SIZE && code != TRANS_CODE_SEQ_WRITE))
0547 return -EFAULT;
0548
0549 return 0;
0550 }
0551
0552 static int do_reg_access(void *context, bool is_read, unsigned int reg,
0553 unsigned int *value, unsigned int count)
0554 {
0555 struct spi_avmm_bridge *br = context;
0556 int ret;
0557
0558
0559 br->trans_len = 0;
0560 br->phy_len = 0;
0561
0562 ret = br_trans_tx_prepare(br, is_read, reg, value, count);
0563 if (ret)
0564 return ret;
0565
0566 ret = br_pkt_phy_tx_prepare(br);
0567 if (ret)
0568 return ret;
0569
0570 ret = br_do_tx(br);
0571 if (ret)
0572 return ret;
0573
0574 ret = br_do_rx_and_pkt_phy_parse(br);
0575 if (ret)
0576 return ret;
0577
0578 if (is_read)
0579 return br_rd_trans_rx_parse(br, value, count);
0580 else
0581 return br_wr_trans_rx_parse(br, count);
0582 }
0583
0584 static int regmap_spi_avmm_gather_write(void *context,
0585 const void *reg_buf, size_t reg_len,
0586 const void *val_buf, size_t val_len)
0587 {
0588 if (reg_len != SPI_AVMM_REG_SIZE)
0589 return -EINVAL;
0590
0591 if (!IS_ALIGNED(val_len, SPI_AVMM_VAL_SIZE))
0592 return -EINVAL;
0593
0594 return do_reg_access(context, false, *(u32 *)reg_buf, (u32 *)val_buf,
0595 val_len / SPI_AVMM_VAL_SIZE);
0596 }
0597
0598 static int regmap_spi_avmm_write(void *context, const void *data, size_t bytes)
0599 {
0600 if (bytes < SPI_AVMM_REG_SIZE + SPI_AVMM_VAL_SIZE)
0601 return -EINVAL;
0602
0603 return regmap_spi_avmm_gather_write(context, data, SPI_AVMM_REG_SIZE,
0604 data + SPI_AVMM_REG_SIZE,
0605 bytes - SPI_AVMM_REG_SIZE);
0606 }
0607
0608 static int regmap_spi_avmm_read(void *context,
0609 const void *reg_buf, size_t reg_len,
0610 void *val_buf, size_t val_len)
0611 {
0612 if (reg_len != SPI_AVMM_REG_SIZE)
0613 return -EINVAL;
0614
0615 if (!IS_ALIGNED(val_len, SPI_AVMM_VAL_SIZE))
0616 return -EINVAL;
0617
0618 return do_reg_access(context, true, *(u32 *)reg_buf, val_buf,
0619 (val_len / SPI_AVMM_VAL_SIZE));
0620 }
0621
0622 static struct spi_avmm_bridge *
0623 spi_avmm_bridge_ctx_gen(struct spi_device *spi)
0624 {
0625 struct spi_avmm_bridge *br;
0626
0627 if (!spi)
0628 return ERR_PTR(-ENODEV);
0629
0630
0631 spi->mode = SPI_MODE_1;
0632 spi->bits_per_word = 32;
0633 if (spi_setup(spi)) {
0634 spi->bits_per_word = 8;
0635 if (spi_setup(spi))
0636 return ERR_PTR(-EINVAL);
0637 }
0638
0639 br = kzalloc(sizeof(*br), GFP_KERNEL);
0640 if (!br)
0641 return ERR_PTR(-ENOMEM);
0642
0643 br->spi = spi;
0644 br->word_len = spi->bits_per_word / 8;
0645 if (br->word_len == 4) {
0646
0647
0648
0649
0650
0651 br->swap_words = br_swap_words_32;
0652 }
0653
0654 return br;
0655 }
0656
0657 static void spi_avmm_bridge_ctx_free(void *context)
0658 {
0659 kfree(context);
0660 }
0661
0662 static const struct regmap_bus regmap_spi_avmm_bus = {
0663 .write = regmap_spi_avmm_write,
0664 .gather_write = regmap_spi_avmm_gather_write,
0665 .read = regmap_spi_avmm_read,
0666 .reg_format_endian_default = REGMAP_ENDIAN_NATIVE,
0667 .val_format_endian_default = REGMAP_ENDIAN_NATIVE,
0668 .max_raw_read = SPI_AVMM_VAL_SIZE * MAX_READ_CNT,
0669 .max_raw_write = SPI_AVMM_VAL_SIZE * MAX_WRITE_CNT,
0670 .free_context = spi_avmm_bridge_ctx_free,
0671 };
0672
0673 struct regmap *__regmap_init_spi_avmm(struct spi_device *spi,
0674 const struct regmap_config *config,
0675 struct lock_class_key *lock_key,
0676 const char *lock_name)
0677 {
0678 struct spi_avmm_bridge *bridge;
0679 struct regmap *map;
0680
0681 bridge = spi_avmm_bridge_ctx_gen(spi);
0682 if (IS_ERR(bridge))
0683 return ERR_CAST(bridge);
0684
0685 map = __regmap_init(&spi->dev, ®map_spi_avmm_bus,
0686 bridge, config, lock_key, lock_name);
0687 if (IS_ERR(map)) {
0688 spi_avmm_bridge_ctx_free(bridge);
0689 return ERR_CAST(map);
0690 }
0691
0692 return map;
0693 }
0694 EXPORT_SYMBOL_GPL(__regmap_init_spi_avmm);
0695
0696 struct regmap *__devm_regmap_init_spi_avmm(struct spi_device *spi,
0697 const struct regmap_config *config,
0698 struct lock_class_key *lock_key,
0699 const char *lock_name)
0700 {
0701 struct spi_avmm_bridge *bridge;
0702 struct regmap *map;
0703
0704 bridge = spi_avmm_bridge_ctx_gen(spi);
0705 if (IS_ERR(bridge))
0706 return ERR_CAST(bridge);
0707
0708 map = __devm_regmap_init(&spi->dev, ®map_spi_avmm_bus,
0709 bridge, config, lock_key, lock_name);
0710 if (IS_ERR(map)) {
0711 spi_avmm_bridge_ctx_free(bridge);
0712 return ERR_CAST(map);
0713 }
0714
0715 return map;
0716 }
0717 EXPORT_SYMBOL_GPL(__devm_regmap_init_spi_avmm);
0718
0719 MODULE_LICENSE("GPL v2");