0001
0002
0003
0004
0005
0006
0007 #include <linux/clk.h>
0008 #include <linux/device.h>
0009 #include <linux/err.h>
0010 #include <linux/interrupt.h>
0011 #include <linux/io.h>
0012 #include <linux/ioport.h>
0013 #include <linux/module.h>
0014 #include <linux/of.h>
0015 #include <linux/gpio/consumer.h>
0016 #include <linux/platform_device.h>
0017 #include <linux/platform_data/spi-mt65xx.h>
0018 #include <linux/pm_runtime.h>
0019 #include <linux/spi/spi.h>
0020 #include <linux/spi/spi-mem.h>
0021 #include <linux/dma-mapping.h>
0022
0023 #define SPI_CFG0_REG 0x0000
0024 #define SPI_CFG1_REG 0x0004
0025 #define SPI_TX_SRC_REG 0x0008
0026 #define SPI_RX_DST_REG 0x000c
0027 #define SPI_TX_DATA_REG 0x0010
0028 #define SPI_RX_DATA_REG 0x0014
0029 #define SPI_CMD_REG 0x0018
0030 #define SPI_STATUS0_REG 0x001c
0031 #define SPI_PAD_SEL_REG 0x0024
0032 #define SPI_CFG2_REG 0x0028
0033 #define SPI_TX_SRC_REG_64 0x002c
0034 #define SPI_RX_DST_REG_64 0x0030
0035 #define SPI_CFG3_IPM_REG 0x0040
0036
0037 #define SPI_CFG0_SCK_HIGH_OFFSET 0
0038 #define SPI_CFG0_SCK_LOW_OFFSET 8
0039 #define SPI_CFG0_CS_HOLD_OFFSET 16
0040 #define SPI_CFG0_CS_SETUP_OFFSET 24
0041 #define SPI_ADJUST_CFG0_CS_HOLD_OFFSET 0
0042 #define SPI_ADJUST_CFG0_CS_SETUP_OFFSET 16
0043
0044 #define SPI_CFG1_CS_IDLE_OFFSET 0
0045 #define SPI_CFG1_PACKET_LOOP_OFFSET 8
0046 #define SPI_CFG1_PACKET_LENGTH_OFFSET 16
0047 #define SPI_CFG1_GET_TICK_DLY_OFFSET 29
0048 #define SPI_CFG1_GET_TICK_DLY_OFFSET_V1 30
0049
0050 #define SPI_CFG1_GET_TICK_DLY_MASK 0xe0000000
0051 #define SPI_CFG1_GET_TICK_DLY_MASK_V1 0xc0000000
0052
0053 #define SPI_CFG1_CS_IDLE_MASK 0xff
0054 #define SPI_CFG1_PACKET_LOOP_MASK 0xff00
0055 #define SPI_CFG1_PACKET_LENGTH_MASK 0x3ff0000
0056 #define SPI_CFG1_IPM_PACKET_LENGTH_MASK GENMASK(31, 16)
0057 #define SPI_CFG2_SCK_HIGH_OFFSET 0
0058 #define SPI_CFG2_SCK_LOW_OFFSET 16
0059
0060 #define SPI_CMD_ACT BIT(0)
0061 #define SPI_CMD_RESUME BIT(1)
0062 #define SPI_CMD_RST BIT(2)
0063 #define SPI_CMD_PAUSE_EN BIT(4)
0064 #define SPI_CMD_DEASSERT BIT(5)
0065 #define SPI_CMD_SAMPLE_SEL BIT(6)
0066 #define SPI_CMD_CS_POL BIT(7)
0067 #define SPI_CMD_CPHA BIT(8)
0068 #define SPI_CMD_CPOL BIT(9)
0069 #define SPI_CMD_RX_DMA BIT(10)
0070 #define SPI_CMD_TX_DMA BIT(11)
0071 #define SPI_CMD_TXMSBF BIT(12)
0072 #define SPI_CMD_RXMSBF BIT(13)
0073 #define SPI_CMD_RX_ENDIAN BIT(14)
0074 #define SPI_CMD_TX_ENDIAN BIT(15)
0075 #define SPI_CMD_FINISH_IE BIT(16)
0076 #define SPI_CMD_PAUSE_IE BIT(17)
0077 #define SPI_CMD_IPM_NONIDLE_MODE BIT(19)
0078 #define SPI_CMD_IPM_SPIM_LOOP BIT(21)
0079 #define SPI_CMD_IPM_GET_TICKDLY_OFFSET 22
0080
0081 #define SPI_CMD_IPM_GET_TICKDLY_MASK GENMASK(24, 22)
0082
0083 #define PIN_MODE_CFG(x) ((x) / 2)
0084
0085 #define SPI_CFG3_IPM_HALF_DUPLEX_DIR BIT(2)
0086 #define SPI_CFG3_IPM_HALF_DUPLEX_EN BIT(3)
0087 #define SPI_CFG3_IPM_XMODE_EN BIT(4)
0088 #define SPI_CFG3_IPM_NODATA_FLAG BIT(5)
0089 #define SPI_CFG3_IPM_CMD_BYTELEN_OFFSET 8
0090 #define SPI_CFG3_IPM_ADDR_BYTELEN_OFFSET 12
0091
0092 #define SPI_CFG3_IPM_CMD_PIN_MODE_MASK GENMASK(1, 0)
0093 #define SPI_CFG3_IPM_CMD_BYTELEN_MASK GENMASK(11, 8)
0094 #define SPI_CFG3_IPM_ADDR_BYTELEN_MASK GENMASK(15, 12)
0095
0096 #define MT8173_SPI_MAX_PAD_SEL 3
0097
0098 #define MTK_SPI_PAUSE_INT_STATUS 0x2
0099
0100 #define MTK_SPI_MAX_FIFO_SIZE 32U
0101 #define MTK_SPI_PACKET_SIZE 1024
0102 #define MTK_SPI_IPM_PACKET_SIZE SZ_64K
0103 #define MTK_SPI_IPM_PACKET_LOOP SZ_256
0104
0105 #define MTK_SPI_IDLE 0
0106 #define MTK_SPI_PAUSED 1
0107
0108 #define MTK_SPI_32BITS_MASK (0xffffffff)
0109
0110 #define DMA_ADDR_EXT_BITS (36)
0111 #define DMA_ADDR_DEF_BITS (32)
0112
0113
0114
0115
0116
0117
0118
0119
0120
0121
0122 struct mtk_spi_compatible {
0123 bool need_pad_sel;
0124 bool must_tx;
0125 bool enhance_timing;
0126 bool dma_ext;
0127 bool no_need_unprepare;
0128 bool ipm_design;
0129 };
0130
0131
0132
0133
0134
0135
0136
0137
0138
0139
0140
0141
0142
0143
0144
0145
0146
0147
0148
0149
0150
0151
0152
0153
0154
0155
0156 struct mtk_spi {
0157 void __iomem *base;
0158 u32 state;
0159 int pad_num;
0160 u32 *pad_sel;
0161 struct clk *parent_clk, *sel_clk, *spi_clk, *spi_hclk;
0162 struct spi_transfer *cur_transfer;
0163 u32 xfer_len;
0164 u32 num_xfered;
0165 struct scatterlist *tx_sgl, *rx_sgl;
0166 u32 tx_sgl_len, rx_sgl_len;
0167 const struct mtk_spi_compatible *dev_comp;
0168 u32 spi_clk_hz;
0169 struct completion spimem_done;
0170 bool use_spimem;
0171 struct device *dev;
0172 dma_addr_t tx_dma;
0173 dma_addr_t rx_dma;
0174 };
0175
0176 static const struct mtk_spi_compatible mtk_common_compat;
0177
0178 static const struct mtk_spi_compatible mt2712_compat = {
0179 .must_tx = true,
0180 };
0181
0182 static const struct mtk_spi_compatible mtk_ipm_compat = {
0183 .enhance_timing = true,
0184 .dma_ext = true,
0185 .ipm_design = true,
0186 };
0187
0188 static const struct mtk_spi_compatible mt6765_compat = {
0189 .need_pad_sel = true,
0190 .must_tx = true,
0191 .enhance_timing = true,
0192 .dma_ext = true,
0193 };
0194
0195 static const struct mtk_spi_compatible mt7622_compat = {
0196 .must_tx = true,
0197 .enhance_timing = true,
0198 };
0199
0200 static const struct mtk_spi_compatible mt8173_compat = {
0201 .need_pad_sel = true,
0202 .must_tx = true,
0203 };
0204
0205 static const struct mtk_spi_compatible mt8183_compat = {
0206 .need_pad_sel = true,
0207 .must_tx = true,
0208 .enhance_timing = true,
0209 };
0210
0211 static const struct mtk_spi_compatible mt6893_compat = {
0212 .need_pad_sel = true,
0213 .must_tx = true,
0214 .enhance_timing = true,
0215 .dma_ext = true,
0216 .no_need_unprepare = true,
0217 };
0218
0219
0220
0221
0222
0223 static const struct mtk_chip_config mtk_default_chip_info = {
0224 .sample_sel = 0,
0225 .tick_delay = 0,
0226 };
0227
0228 static const struct of_device_id mtk_spi_of_match[] = {
0229 { .compatible = "mediatek,spi-ipm",
0230 .data = (void *)&mtk_ipm_compat,
0231 },
0232 { .compatible = "mediatek,mt2701-spi",
0233 .data = (void *)&mtk_common_compat,
0234 },
0235 { .compatible = "mediatek,mt2712-spi",
0236 .data = (void *)&mt2712_compat,
0237 },
0238 { .compatible = "mediatek,mt6589-spi",
0239 .data = (void *)&mtk_common_compat,
0240 },
0241 { .compatible = "mediatek,mt6765-spi",
0242 .data = (void *)&mt6765_compat,
0243 },
0244 { .compatible = "mediatek,mt7622-spi",
0245 .data = (void *)&mt7622_compat,
0246 },
0247 { .compatible = "mediatek,mt7629-spi",
0248 .data = (void *)&mt7622_compat,
0249 },
0250 { .compatible = "mediatek,mt8135-spi",
0251 .data = (void *)&mtk_common_compat,
0252 },
0253 { .compatible = "mediatek,mt8173-spi",
0254 .data = (void *)&mt8173_compat,
0255 },
0256 { .compatible = "mediatek,mt8183-spi",
0257 .data = (void *)&mt8183_compat,
0258 },
0259 { .compatible = "mediatek,mt8192-spi",
0260 .data = (void *)&mt6765_compat,
0261 },
0262 { .compatible = "mediatek,mt6893-spi",
0263 .data = (void *)&mt6893_compat,
0264 },
0265 {}
0266 };
0267 MODULE_DEVICE_TABLE(of, mtk_spi_of_match);
0268
0269 static void mtk_spi_reset(struct mtk_spi *mdata)
0270 {
0271 u32 reg_val;
0272
0273
0274 reg_val = readl(mdata->base + SPI_CMD_REG);
0275 reg_val |= SPI_CMD_RST;
0276 writel(reg_val, mdata->base + SPI_CMD_REG);
0277
0278 reg_val = readl(mdata->base + SPI_CMD_REG);
0279 reg_val &= ~SPI_CMD_RST;
0280 writel(reg_val, mdata->base + SPI_CMD_REG);
0281 }
0282
0283 static int mtk_spi_set_hw_cs_timing(struct spi_device *spi)
0284 {
0285 struct mtk_spi *mdata = spi_master_get_devdata(spi->master);
0286 struct spi_delay *cs_setup = &spi->cs_setup;
0287 struct spi_delay *cs_hold = &spi->cs_hold;
0288 struct spi_delay *cs_inactive = &spi->cs_inactive;
0289 u32 setup, hold, inactive;
0290 u32 reg_val;
0291 int delay;
0292
0293 delay = spi_delay_to_ns(cs_setup, NULL);
0294 if (delay < 0)
0295 return delay;
0296 setup = (delay * DIV_ROUND_UP(mdata->spi_clk_hz, 1000000)) / 1000;
0297
0298 delay = spi_delay_to_ns(cs_hold, NULL);
0299 if (delay < 0)
0300 return delay;
0301 hold = (delay * DIV_ROUND_UP(mdata->spi_clk_hz, 1000000)) / 1000;
0302
0303 delay = spi_delay_to_ns(cs_inactive, NULL);
0304 if (delay < 0)
0305 return delay;
0306 inactive = (delay * DIV_ROUND_UP(mdata->spi_clk_hz, 1000000)) / 1000;
0307
0308 if (hold || setup) {
0309 reg_val = readl(mdata->base + SPI_CFG0_REG);
0310 if (mdata->dev_comp->enhance_timing) {
0311 if (hold) {
0312 hold = min_t(u32, hold, 0x10000);
0313 reg_val &= ~(0xffff << SPI_ADJUST_CFG0_CS_HOLD_OFFSET);
0314 reg_val |= (((hold - 1) & 0xffff)
0315 << SPI_ADJUST_CFG0_CS_HOLD_OFFSET);
0316 }
0317 if (setup) {
0318 setup = min_t(u32, setup, 0x10000);
0319 reg_val &= ~(0xffff << SPI_ADJUST_CFG0_CS_SETUP_OFFSET);
0320 reg_val |= (((setup - 1) & 0xffff)
0321 << SPI_ADJUST_CFG0_CS_SETUP_OFFSET);
0322 }
0323 } else {
0324 if (hold) {
0325 hold = min_t(u32, hold, 0x100);
0326 reg_val &= ~(0xff << SPI_CFG0_CS_HOLD_OFFSET);
0327 reg_val |= (((hold - 1) & 0xff) << SPI_CFG0_CS_HOLD_OFFSET);
0328 }
0329 if (setup) {
0330 setup = min_t(u32, setup, 0x100);
0331 reg_val &= ~(0xff << SPI_CFG0_CS_SETUP_OFFSET);
0332 reg_val |= (((setup - 1) & 0xff)
0333 << SPI_CFG0_CS_SETUP_OFFSET);
0334 }
0335 }
0336 writel(reg_val, mdata->base + SPI_CFG0_REG);
0337 }
0338
0339 if (inactive) {
0340 inactive = min_t(u32, inactive, 0x100);
0341 reg_val = readl(mdata->base + SPI_CFG1_REG);
0342 reg_val &= ~SPI_CFG1_CS_IDLE_MASK;
0343 reg_val |= (((inactive - 1) & 0xff) << SPI_CFG1_CS_IDLE_OFFSET);
0344 writel(reg_val, mdata->base + SPI_CFG1_REG);
0345 }
0346
0347 return 0;
0348 }
0349
0350 static int mtk_spi_hw_init(struct spi_master *master,
0351 struct spi_device *spi)
0352 {
0353 u16 cpha, cpol;
0354 u32 reg_val;
0355 struct mtk_chip_config *chip_config = spi->controller_data;
0356 struct mtk_spi *mdata = spi_master_get_devdata(master);
0357
0358 cpha = spi->mode & SPI_CPHA ? 1 : 0;
0359 cpol = spi->mode & SPI_CPOL ? 1 : 0;
0360
0361 reg_val = readl(mdata->base + SPI_CMD_REG);
0362 if (mdata->dev_comp->ipm_design) {
0363
0364 reg_val |= SPI_CMD_IPM_NONIDLE_MODE;
0365 if (spi->mode & SPI_LOOP)
0366 reg_val |= SPI_CMD_IPM_SPIM_LOOP;
0367 else
0368 reg_val &= ~SPI_CMD_IPM_SPIM_LOOP;
0369 }
0370
0371 if (cpha)
0372 reg_val |= SPI_CMD_CPHA;
0373 else
0374 reg_val &= ~SPI_CMD_CPHA;
0375 if (cpol)
0376 reg_val |= SPI_CMD_CPOL;
0377 else
0378 reg_val &= ~SPI_CMD_CPOL;
0379
0380
0381 if (spi->mode & SPI_LSB_FIRST) {
0382 reg_val &= ~SPI_CMD_TXMSBF;
0383 reg_val &= ~SPI_CMD_RXMSBF;
0384 } else {
0385 reg_val |= SPI_CMD_TXMSBF;
0386 reg_val |= SPI_CMD_RXMSBF;
0387 }
0388
0389
0390 #ifdef __LITTLE_ENDIAN
0391 reg_val &= ~SPI_CMD_TX_ENDIAN;
0392 reg_val &= ~SPI_CMD_RX_ENDIAN;
0393 #else
0394 reg_val |= SPI_CMD_TX_ENDIAN;
0395 reg_val |= SPI_CMD_RX_ENDIAN;
0396 #endif
0397
0398 if (mdata->dev_comp->enhance_timing) {
0399
0400 if (spi->mode & SPI_CS_HIGH)
0401 reg_val |= SPI_CMD_CS_POL;
0402 else
0403 reg_val &= ~SPI_CMD_CS_POL;
0404
0405 if (chip_config->sample_sel)
0406 reg_val |= SPI_CMD_SAMPLE_SEL;
0407 else
0408 reg_val &= ~SPI_CMD_SAMPLE_SEL;
0409 }
0410
0411
0412 reg_val |= SPI_CMD_FINISH_IE | SPI_CMD_PAUSE_IE;
0413
0414
0415 reg_val &= ~(SPI_CMD_TX_DMA | SPI_CMD_RX_DMA);
0416
0417
0418 reg_val &= ~SPI_CMD_DEASSERT;
0419
0420 writel(reg_val, mdata->base + SPI_CMD_REG);
0421
0422
0423 if (mdata->dev_comp->need_pad_sel)
0424 writel(mdata->pad_sel[spi->chip_select],
0425 mdata->base + SPI_PAD_SEL_REG);
0426
0427
0428 if (mdata->dev_comp->enhance_timing) {
0429 if (mdata->dev_comp->ipm_design) {
0430 reg_val = readl(mdata->base + SPI_CMD_REG);
0431 reg_val &= ~SPI_CMD_IPM_GET_TICKDLY_MASK;
0432 reg_val |= ((chip_config->tick_delay & 0x7)
0433 << SPI_CMD_IPM_GET_TICKDLY_OFFSET);
0434 writel(reg_val, mdata->base + SPI_CMD_REG);
0435 } else {
0436 reg_val = readl(mdata->base + SPI_CFG1_REG);
0437 reg_val &= ~SPI_CFG1_GET_TICK_DLY_MASK;
0438 reg_val |= ((chip_config->tick_delay & 0x7)
0439 << SPI_CFG1_GET_TICK_DLY_OFFSET);
0440 writel(reg_val, mdata->base + SPI_CFG1_REG);
0441 }
0442 } else {
0443 reg_val = readl(mdata->base + SPI_CFG1_REG);
0444 reg_val &= ~SPI_CFG1_GET_TICK_DLY_MASK_V1;
0445 reg_val |= ((chip_config->tick_delay & 0x3)
0446 << SPI_CFG1_GET_TICK_DLY_OFFSET_V1);
0447 writel(reg_val, mdata->base + SPI_CFG1_REG);
0448 }
0449
0450
0451 mtk_spi_set_hw_cs_timing(spi);
0452 return 0;
0453 }
0454
0455 static int mtk_spi_prepare_message(struct spi_master *master,
0456 struct spi_message *msg)
0457 {
0458 return mtk_spi_hw_init(master, msg->spi);
0459 }
0460
0461 static void mtk_spi_set_cs(struct spi_device *spi, bool enable)
0462 {
0463 u32 reg_val;
0464 struct mtk_spi *mdata = spi_master_get_devdata(spi->master);
0465
0466 if (spi->mode & SPI_CS_HIGH)
0467 enable = !enable;
0468
0469 reg_val = readl(mdata->base + SPI_CMD_REG);
0470 if (!enable) {
0471 reg_val |= SPI_CMD_PAUSE_EN;
0472 writel(reg_val, mdata->base + SPI_CMD_REG);
0473 } else {
0474 reg_val &= ~SPI_CMD_PAUSE_EN;
0475 writel(reg_val, mdata->base + SPI_CMD_REG);
0476 mdata->state = MTK_SPI_IDLE;
0477 mtk_spi_reset(mdata);
0478 }
0479 }
0480
0481 static void mtk_spi_prepare_transfer(struct spi_master *master,
0482 u32 speed_hz)
0483 {
0484 u32 div, sck_time, reg_val;
0485 struct mtk_spi *mdata = spi_master_get_devdata(master);
0486
0487 if (speed_hz < mdata->spi_clk_hz / 2)
0488 div = DIV_ROUND_UP(mdata->spi_clk_hz, speed_hz);
0489 else
0490 div = 1;
0491
0492 sck_time = (div + 1) / 2;
0493
0494 if (mdata->dev_comp->enhance_timing) {
0495 reg_val = readl(mdata->base + SPI_CFG2_REG);
0496 reg_val &= ~(0xffff << SPI_CFG2_SCK_HIGH_OFFSET);
0497 reg_val |= (((sck_time - 1) & 0xffff)
0498 << SPI_CFG2_SCK_HIGH_OFFSET);
0499 reg_val &= ~(0xffff << SPI_CFG2_SCK_LOW_OFFSET);
0500 reg_val |= (((sck_time - 1) & 0xffff)
0501 << SPI_CFG2_SCK_LOW_OFFSET);
0502 writel(reg_val, mdata->base + SPI_CFG2_REG);
0503 } else {
0504 reg_val = readl(mdata->base + SPI_CFG0_REG);
0505 reg_val &= ~(0xff << SPI_CFG0_SCK_HIGH_OFFSET);
0506 reg_val |= (((sck_time - 1) & 0xff)
0507 << SPI_CFG0_SCK_HIGH_OFFSET);
0508 reg_val &= ~(0xff << SPI_CFG0_SCK_LOW_OFFSET);
0509 reg_val |= (((sck_time - 1) & 0xff) << SPI_CFG0_SCK_LOW_OFFSET);
0510 writel(reg_val, mdata->base + SPI_CFG0_REG);
0511 }
0512 }
0513
0514 static void mtk_spi_setup_packet(struct spi_master *master)
0515 {
0516 u32 packet_size, packet_loop, reg_val;
0517 struct mtk_spi *mdata = spi_master_get_devdata(master);
0518
0519 if (mdata->dev_comp->ipm_design)
0520 packet_size = min_t(u32,
0521 mdata->xfer_len,
0522 MTK_SPI_IPM_PACKET_SIZE);
0523 else
0524 packet_size = min_t(u32,
0525 mdata->xfer_len,
0526 MTK_SPI_PACKET_SIZE);
0527
0528 packet_loop = mdata->xfer_len / packet_size;
0529
0530 reg_val = readl(mdata->base + SPI_CFG1_REG);
0531 if (mdata->dev_comp->ipm_design)
0532 reg_val &= ~SPI_CFG1_IPM_PACKET_LENGTH_MASK;
0533 else
0534 reg_val &= ~SPI_CFG1_PACKET_LENGTH_MASK;
0535 reg_val |= (packet_size - 1) << SPI_CFG1_PACKET_LENGTH_OFFSET;
0536 reg_val &= ~SPI_CFG1_PACKET_LOOP_MASK;
0537 reg_val |= (packet_loop - 1) << SPI_CFG1_PACKET_LOOP_OFFSET;
0538 writel(reg_val, mdata->base + SPI_CFG1_REG);
0539 }
0540
0541 static void mtk_spi_enable_transfer(struct spi_master *master)
0542 {
0543 u32 cmd;
0544 struct mtk_spi *mdata = spi_master_get_devdata(master);
0545
0546 cmd = readl(mdata->base + SPI_CMD_REG);
0547 if (mdata->state == MTK_SPI_IDLE)
0548 cmd |= SPI_CMD_ACT;
0549 else
0550 cmd |= SPI_CMD_RESUME;
0551 writel(cmd, mdata->base + SPI_CMD_REG);
0552 }
0553
0554 static int mtk_spi_get_mult_delta(u32 xfer_len)
0555 {
0556 u32 mult_delta;
0557
0558 if (xfer_len > MTK_SPI_PACKET_SIZE)
0559 mult_delta = xfer_len % MTK_SPI_PACKET_SIZE;
0560 else
0561 mult_delta = 0;
0562
0563 return mult_delta;
0564 }
0565
0566 static void mtk_spi_update_mdata_len(struct spi_master *master)
0567 {
0568 int mult_delta;
0569 struct mtk_spi *mdata = spi_master_get_devdata(master);
0570
0571 if (mdata->tx_sgl_len && mdata->rx_sgl_len) {
0572 if (mdata->tx_sgl_len > mdata->rx_sgl_len) {
0573 mult_delta = mtk_spi_get_mult_delta(mdata->rx_sgl_len);
0574 mdata->xfer_len = mdata->rx_sgl_len - mult_delta;
0575 mdata->rx_sgl_len = mult_delta;
0576 mdata->tx_sgl_len -= mdata->xfer_len;
0577 } else {
0578 mult_delta = mtk_spi_get_mult_delta(mdata->tx_sgl_len);
0579 mdata->xfer_len = mdata->tx_sgl_len - mult_delta;
0580 mdata->tx_sgl_len = mult_delta;
0581 mdata->rx_sgl_len -= mdata->xfer_len;
0582 }
0583 } else if (mdata->tx_sgl_len) {
0584 mult_delta = mtk_spi_get_mult_delta(mdata->tx_sgl_len);
0585 mdata->xfer_len = mdata->tx_sgl_len - mult_delta;
0586 mdata->tx_sgl_len = mult_delta;
0587 } else if (mdata->rx_sgl_len) {
0588 mult_delta = mtk_spi_get_mult_delta(mdata->rx_sgl_len);
0589 mdata->xfer_len = mdata->rx_sgl_len - mult_delta;
0590 mdata->rx_sgl_len = mult_delta;
0591 }
0592 }
0593
0594 static void mtk_spi_setup_dma_addr(struct spi_master *master,
0595 struct spi_transfer *xfer)
0596 {
0597 struct mtk_spi *mdata = spi_master_get_devdata(master);
0598
0599 if (mdata->tx_sgl) {
0600 writel((u32)(xfer->tx_dma & MTK_SPI_32BITS_MASK),
0601 mdata->base + SPI_TX_SRC_REG);
0602 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
0603 if (mdata->dev_comp->dma_ext)
0604 writel((u32)(xfer->tx_dma >> 32),
0605 mdata->base + SPI_TX_SRC_REG_64);
0606 #endif
0607 }
0608
0609 if (mdata->rx_sgl) {
0610 writel((u32)(xfer->rx_dma & MTK_SPI_32BITS_MASK),
0611 mdata->base + SPI_RX_DST_REG);
0612 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
0613 if (mdata->dev_comp->dma_ext)
0614 writel((u32)(xfer->rx_dma >> 32),
0615 mdata->base + SPI_RX_DST_REG_64);
0616 #endif
0617 }
0618 }
0619
0620 static int mtk_spi_fifo_transfer(struct spi_master *master,
0621 struct spi_device *spi,
0622 struct spi_transfer *xfer)
0623 {
0624 int cnt, remainder;
0625 u32 reg_val;
0626 struct mtk_spi *mdata = spi_master_get_devdata(master);
0627
0628 mdata->cur_transfer = xfer;
0629 mdata->xfer_len = min(MTK_SPI_MAX_FIFO_SIZE, xfer->len);
0630 mdata->num_xfered = 0;
0631 mtk_spi_prepare_transfer(master, xfer->speed_hz);
0632 mtk_spi_setup_packet(master);
0633
0634 if (xfer->tx_buf) {
0635 cnt = xfer->len / 4;
0636 iowrite32_rep(mdata->base + SPI_TX_DATA_REG, xfer->tx_buf, cnt);
0637 remainder = xfer->len % 4;
0638 if (remainder > 0) {
0639 reg_val = 0;
0640 memcpy(®_val, xfer->tx_buf + (cnt * 4), remainder);
0641 writel(reg_val, mdata->base + SPI_TX_DATA_REG);
0642 }
0643 }
0644
0645 mtk_spi_enable_transfer(master);
0646
0647 return 1;
0648 }
0649
0650 static int mtk_spi_dma_transfer(struct spi_master *master,
0651 struct spi_device *spi,
0652 struct spi_transfer *xfer)
0653 {
0654 int cmd;
0655 struct mtk_spi *mdata = spi_master_get_devdata(master);
0656
0657 mdata->tx_sgl = NULL;
0658 mdata->rx_sgl = NULL;
0659 mdata->tx_sgl_len = 0;
0660 mdata->rx_sgl_len = 0;
0661 mdata->cur_transfer = xfer;
0662 mdata->num_xfered = 0;
0663
0664 mtk_spi_prepare_transfer(master, xfer->speed_hz);
0665
0666 cmd = readl(mdata->base + SPI_CMD_REG);
0667 if (xfer->tx_buf)
0668 cmd |= SPI_CMD_TX_DMA;
0669 if (xfer->rx_buf)
0670 cmd |= SPI_CMD_RX_DMA;
0671 writel(cmd, mdata->base + SPI_CMD_REG);
0672
0673 if (xfer->tx_buf)
0674 mdata->tx_sgl = xfer->tx_sg.sgl;
0675 if (xfer->rx_buf)
0676 mdata->rx_sgl = xfer->rx_sg.sgl;
0677
0678 if (mdata->tx_sgl) {
0679 xfer->tx_dma = sg_dma_address(mdata->tx_sgl);
0680 mdata->tx_sgl_len = sg_dma_len(mdata->tx_sgl);
0681 }
0682 if (mdata->rx_sgl) {
0683 xfer->rx_dma = sg_dma_address(mdata->rx_sgl);
0684 mdata->rx_sgl_len = sg_dma_len(mdata->rx_sgl);
0685 }
0686
0687 mtk_spi_update_mdata_len(master);
0688 mtk_spi_setup_packet(master);
0689 mtk_spi_setup_dma_addr(master, xfer);
0690 mtk_spi_enable_transfer(master);
0691
0692 return 1;
0693 }
0694
0695 static int mtk_spi_transfer_one(struct spi_master *master,
0696 struct spi_device *spi,
0697 struct spi_transfer *xfer)
0698 {
0699 struct mtk_spi *mdata = spi_master_get_devdata(spi->master);
0700 u32 reg_val = 0;
0701
0702
0703 if (mdata->dev_comp->ipm_design) {
0704 if (!xfer->tx_buf || !xfer->rx_buf) {
0705 reg_val |= SPI_CFG3_IPM_HALF_DUPLEX_EN;
0706 if (xfer->rx_buf)
0707 reg_val |= SPI_CFG3_IPM_HALF_DUPLEX_DIR;
0708 }
0709 writel(reg_val, mdata->base + SPI_CFG3_IPM_REG);
0710 }
0711
0712 if (master->can_dma(master, spi, xfer))
0713 return mtk_spi_dma_transfer(master, spi, xfer);
0714 else
0715 return mtk_spi_fifo_transfer(master, spi, xfer);
0716 }
0717
0718 static bool mtk_spi_can_dma(struct spi_master *master,
0719 struct spi_device *spi,
0720 struct spi_transfer *xfer)
0721 {
0722
0723 return (xfer->len > MTK_SPI_MAX_FIFO_SIZE &&
0724 (unsigned long)xfer->tx_buf % 4 == 0 &&
0725 (unsigned long)xfer->rx_buf % 4 == 0);
0726 }
0727
0728 static int mtk_spi_setup(struct spi_device *spi)
0729 {
0730 struct mtk_spi *mdata = spi_master_get_devdata(spi->master);
0731
0732 if (!spi->controller_data)
0733 spi->controller_data = (void *)&mtk_default_chip_info;
0734
0735 if (mdata->dev_comp->need_pad_sel && spi->cs_gpiod)
0736
0737 gpiod_direction_output(spi->cs_gpiod, 0);
0738
0739 return 0;
0740 }
0741
0742 static irqreturn_t mtk_spi_interrupt(int irq, void *dev_id)
0743 {
0744 u32 cmd, reg_val, cnt, remainder, len;
0745 struct spi_master *master = dev_id;
0746 struct mtk_spi *mdata = spi_master_get_devdata(master);
0747 struct spi_transfer *trans = mdata->cur_transfer;
0748
0749 reg_val = readl(mdata->base + SPI_STATUS0_REG);
0750 if (reg_val & MTK_SPI_PAUSE_INT_STATUS)
0751 mdata->state = MTK_SPI_PAUSED;
0752 else
0753 mdata->state = MTK_SPI_IDLE;
0754
0755
0756 if (mdata->use_spimem) {
0757 complete(&mdata->spimem_done);
0758 return IRQ_HANDLED;
0759 }
0760
0761 if (!master->can_dma(master, NULL, trans)) {
0762 if (trans->rx_buf) {
0763 cnt = mdata->xfer_len / 4;
0764 ioread32_rep(mdata->base + SPI_RX_DATA_REG,
0765 trans->rx_buf + mdata->num_xfered, cnt);
0766 remainder = mdata->xfer_len % 4;
0767 if (remainder > 0) {
0768 reg_val = readl(mdata->base + SPI_RX_DATA_REG);
0769 memcpy(trans->rx_buf +
0770 mdata->num_xfered +
0771 (cnt * 4),
0772 ®_val,
0773 remainder);
0774 }
0775 }
0776
0777 mdata->num_xfered += mdata->xfer_len;
0778 if (mdata->num_xfered == trans->len) {
0779 spi_finalize_current_transfer(master);
0780 return IRQ_HANDLED;
0781 }
0782
0783 len = trans->len - mdata->num_xfered;
0784 mdata->xfer_len = min(MTK_SPI_MAX_FIFO_SIZE, len);
0785 mtk_spi_setup_packet(master);
0786
0787 cnt = mdata->xfer_len / 4;
0788 iowrite32_rep(mdata->base + SPI_TX_DATA_REG,
0789 trans->tx_buf + mdata->num_xfered, cnt);
0790
0791 remainder = mdata->xfer_len % 4;
0792 if (remainder > 0) {
0793 reg_val = 0;
0794 memcpy(®_val,
0795 trans->tx_buf + (cnt * 4) + mdata->num_xfered,
0796 remainder);
0797 writel(reg_val, mdata->base + SPI_TX_DATA_REG);
0798 }
0799
0800 mtk_spi_enable_transfer(master);
0801
0802 return IRQ_HANDLED;
0803 }
0804
0805 if (mdata->tx_sgl)
0806 trans->tx_dma += mdata->xfer_len;
0807 if (mdata->rx_sgl)
0808 trans->rx_dma += mdata->xfer_len;
0809
0810 if (mdata->tx_sgl && (mdata->tx_sgl_len == 0)) {
0811 mdata->tx_sgl = sg_next(mdata->tx_sgl);
0812 if (mdata->tx_sgl) {
0813 trans->tx_dma = sg_dma_address(mdata->tx_sgl);
0814 mdata->tx_sgl_len = sg_dma_len(mdata->tx_sgl);
0815 }
0816 }
0817 if (mdata->rx_sgl && (mdata->rx_sgl_len == 0)) {
0818 mdata->rx_sgl = sg_next(mdata->rx_sgl);
0819 if (mdata->rx_sgl) {
0820 trans->rx_dma = sg_dma_address(mdata->rx_sgl);
0821 mdata->rx_sgl_len = sg_dma_len(mdata->rx_sgl);
0822 }
0823 }
0824
0825 if (!mdata->tx_sgl && !mdata->rx_sgl) {
0826
0827 cmd = readl(mdata->base + SPI_CMD_REG);
0828 cmd &= ~SPI_CMD_TX_DMA;
0829 cmd &= ~SPI_CMD_RX_DMA;
0830 writel(cmd, mdata->base + SPI_CMD_REG);
0831
0832 spi_finalize_current_transfer(master);
0833 return IRQ_HANDLED;
0834 }
0835
0836 mtk_spi_update_mdata_len(master);
0837 mtk_spi_setup_packet(master);
0838 mtk_spi_setup_dma_addr(master, trans);
0839 mtk_spi_enable_transfer(master);
0840
0841 return IRQ_HANDLED;
0842 }
0843
0844 static int mtk_spi_mem_adjust_op_size(struct spi_mem *mem,
0845 struct spi_mem_op *op)
0846 {
0847 int opcode_len;
0848
0849 if (op->data.dir != SPI_MEM_NO_DATA) {
0850 opcode_len = 1 + op->addr.nbytes + op->dummy.nbytes;
0851 if (opcode_len + op->data.nbytes > MTK_SPI_IPM_PACKET_SIZE) {
0852 op->data.nbytes = MTK_SPI_IPM_PACKET_SIZE - opcode_len;
0853
0854 op->data.nbytes -= op->data.nbytes % 4;
0855 }
0856 }
0857
0858 return 0;
0859 }
0860
0861 static bool mtk_spi_mem_supports_op(struct spi_mem *mem,
0862 const struct spi_mem_op *op)
0863 {
0864 if (!spi_mem_default_supports_op(mem, op))
0865 return false;
0866
0867 if (op->addr.nbytes && op->dummy.nbytes &&
0868 op->addr.buswidth != op->dummy.buswidth)
0869 return false;
0870
0871 if (op->addr.nbytes + op->dummy.nbytes > 16)
0872 return false;
0873
0874 if (op->data.nbytes > MTK_SPI_IPM_PACKET_SIZE) {
0875 if (op->data.nbytes / MTK_SPI_IPM_PACKET_SIZE >
0876 MTK_SPI_IPM_PACKET_LOOP ||
0877 op->data.nbytes % MTK_SPI_IPM_PACKET_SIZE != 0)
0878 return false;
0879 }
0880
0881 return true;
0882 }
0883
0884 static void mtk_spi_mem_setup_dma_xfer(struct spi_master *master,
0885 const struct spi_mem_op *op)
0886 {
0887 struct mtk_spi *mdata = spi_master_get_devdata(master);
0888
0889 writel((u32)(mdata->tx_dma & MTK_SPI_32BITS_MASK),
0890 mdata->base + SPI_TX_SRC_REG);
0891 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
0892 if (mdata->dev_comp->dma_ext)
0893 writel((u32)(mdata->tx_dma >> 32),
0894 mdata->base + SPI_TX_SRC_REG_64);
0895 #endif
0896
0897 if (op->data.dir == SPI_MEM_DATA_IN) {
0898 writel((u32)(mdata->rx_dma & MTK_SPI_32BITS_MASK),
0899 mdata->base + SPI_RX_DST_REG);
0900 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
0901 if (mdata->dev_comp->dma_ext)
0902 writel((u32)(mdata->rx_dma >> 32),
0903 mdata->base + SPI_RX_DST_REG_64);
0904 #endif
0905 }
0906 }
0907
0908 static int mtk_spi_transfer_wait(struct spi_mem *mem,
0909 const struct spi_mem_op *op)
0910 {
0911 struct mtk_spi *mdata = spi_master_get_devdata(mem->spi->master);
0912
0913
0914
0915
0916
0917 u64 ms = 8000LL;
0918
0919 if (op->data.dir == SPI_MEM_NO_DATA)
0920 ms *= 32;
0921 else
0922 ms *= op->data.nbytes;
0923 ms = div_u64(ms, mem->spi->max_speed_hz);
0924 ms += ms + 1000;
0925
0926 if (ms > UINT_MAX)
0927 ms = UINT_MAX;
0928
0929 if (!wait_for_completion_timeout(&mdata->spimem_done,
0930 msecs_to_jiffies(ms))) {
0931 dev_err(mdata->dev, "spi-mem transfer timeout\n");
0932 return -ETIMEDOUT;
0933 }
0934
0935 return 0;
0936 }
0937
0938 static int mtk_spi_mem_exec_op(struct spi_mem *mem,
0939 const struct spi_mem_op *op)
0940 {
0941 struct mtk_spi *mdata = spi_master_get_devdata(mem->spi->master);
0942 u32 reg_val, nio, tx_size;
0943 char *tx_tmp_buf, *rx_tmp_buf;
0944 int ret = 0;
0945
0946 mdata->use_spimem = true;
0947 reinit_completion(&mdata->spimem_done);
0948
0949 mtk_spi_reset(mdata);
0950 mtk_spi_hw_init(mem->spi->master, mem->spi);
0951 mtk_spi_prepare_transfer(mem->spi->master, mem->spi->max_speed_hz);
0952
0953 reg_val = readl(mdata->base + SPI_CFG3_IPM_REG);
0954
0955 reg_val &= ~SPI_CFG3_IPM_CMD_BYTELEN_MASK;
0956 reg_val |= 1 << SPI_CFG3_IPM_CMD_BYTELEN_OFFSET;
0957
0958
0959 reg_val &= ~SPI_CFG3_IPM_ADDR_BYTELEN_MASK;
0960 if (op->addr.nbytes || op->dummy.nbytes)
0961 reg_val |= (op->addr.nbytes + op->dummy.nbytes) <<
0962 SPI_CFG3_IPM_ADDR_BYTELEN_OFFSET;
0963
0964
0965 if (op->data.dir == SPI_MEM_NO_DATA) {
0966 reg_val |= SPI_CFG3_IPM_NODATA_FLAG;
0967 writel(0, mdata->base + SPI_CFG1_REG);
0968 } else {
0969 reg_val &= ~SPI_CFG3_IPM_NODATA_FLAG;
0970 mdata->xfer_len = op->data.nbytes;
0971 mtk_spi_setup_packet(mem->spi->master);
0972 }
0973
0974 if (op->addr.nbytes || op->dummy.nbytes) {
0975 if (op->addr.buswidth == 1 || op->dummy.buswidth == 1)
0976 reg_val |= SPI_CFG3_IPM_XMODE_EN;
0977 else
0978 reg_val &= ~SPI_CFG3_IPM_XMODE_EN;
0979 }
0980
0981 if (op->addr.buswidth == 2 ||
0982 op->dummy.buswidth == 2 ||
0983 op->data.buswidth == 2)
0984 nio = 2;
0985 else if (op->addr.buswidth == 4 ||
0986 op->dummy.buswidth == 4 ||
0987 op->data.buswidth == 4)
0988 nio = 4;
0989 else
0990 nio = 1;
0991
0992 reg_val &= ~SPI_CFG3_IPM_CMD_PIN_MODE_MASK;
0993 reg_val |= PIN_MODE_CFG(nio);
0994
0995 reg_val |= SPI_CFG3_IPM_HALF_DUPLEX_EN;
0996 if (op->data.dir == SPI_MEM_DATA_IN)
0997 reg_val |= SPI_CFG3_IPM_HALF_DUPLEX_DIR;
0998 else
0999 reg_val &= ~SPI_CFG3_IPM_HALF_DUPLEX_DIR;
1000 writel(reg_val, mdata->base + SPI_CFG3_IPM_REG);
1001
1002 tx_size = 1 + op->addr.nbytes + op->dummy.nbytes;
1003 if (op->data.dir == SPI_MEM_DATA_OUT)
1004 tx_size += op->data.nbytes;
1005
1006 tx_size = max_t(u32, tx_size, 32);
1007
1008 tx_tmp_buf = kzalloc(tx_size, GFP_KERNEL | GFP_DMA);
1009 if (!tx_tmp_buf) {
1010 mdata->use_spimem = false;
1011 return -ENOMEM;
1012 }
1013
1014 tx_tmp_buf[0] = op->cmd.opcode;
1015
1016 if (op->addr.nbytes) {
1017 int i;
1018
1019 for (i = 0; i < op->addr.nbytes; i++)
1020 tx_tmp_buf[i + 1] = op->addr.val >>
1021 (8 * (op->addr.nbytes - i - 1));
1022 }
1023
1024 if (op->dummy.nbytes)
1025 memset(tx_tmp_buf + op->addr.nbytes + 1,
1026 0xff,
1027 op->dummy.nbytes);
1028
1029 if (op->data.nbytes && op->data.dir == SPI_MEM_DATA_OUT)
1030 memcpy(tx_tmp_buf + op->dummy.nbytes + op->addr.nbytes + 1,
1031 op->data.buf.out,
1032 op->data.nbytes);
1033
1034 mdata->tx_dma = dma_map_single(mdata->dev, tx_tmp_buf,
1035 tx_size, DMA_TO_DEVICE);
1036 if (dma_mapping_error(mdata->dev, mdata->tx_dma)) {
1037 ret = -ENOMEM;
1038 goto err_exit;
1039 }
1040
1041 if (op->data.dir == SPI_MEM_DATA_IN) {
1042 if (!IS_ALIGNED((size_t)op->data.buf.in, 4)) {
1043 rx_tmp_buf = kzalloc(op->data.nbytes,
1044 GFP_KERNEL | GFP_DMA);
1045 if (!rx_tmp_buf) {
1046 ret = -ENOMEM;
1047 goto unmap_tx_dma;
1048 }
1049 } else {
1050 rx_tmp_buf = op->data.buf.in;
1051 }
1052
1053 mdata->rx_dma = dma_map_single(mdata->dev,
1054 rx_tmp_buf,
1055 op->data.nbytes,
1056 DMA_FROM_DEVICE);
1057 if (dma_mapping_error(mdata->dev, mdata->rx_dma)) {
1058 ret = -ENOMEM;
1059 goto kfree_rx_tmp_buf;
1060 }
1061 }
1062
1063 reg_val = readl(mdata->base + SPI_CMD_REG);
1064 reg_val |= SPI_CMD_TX_DMA;
1065 if (op->data.dir == SPI_MEM_DATA_IN)
1066 reg_val |= SPI_CMD_RX_DMA;
1067 writel(reg_val, mdata->base + SPI_CMD_REG);
1068
1069 mtk_spi_mem_setup_dma_xfer(mem->spi->master, op);
1070
1071 mtk_spi_enable_transfer(mem->spi->master);
1072
1073
1074 ret = mtk_spi_transfer_wait(mem, op);
1075 if (ret)
1076 goto unmap_rx_dma;
1077
1078
1079 reg_val = readl(mdata->base + SPI_CMD_REG);
1080 reg_val &= ~SPI_CMD_TX_DMA;
1081 if (op->data.dir == SPI_MEM_DATA_IN)
1082 reg_val &= ~SPI_CMD_RX_DMA;
1083 writel(reg_val, mdata->base + SPI_CMD_REG);
1084
1085 unmap_rx_dma:
1086 if (op->data.dir == SPI_MEM_DATA_IN) {
1087 dma_unmap_single(mdata->dev, mdata->rx_dma,
1088 op->data.nbytes, DMA_FROM_DEVICE);
1089 if (!IS_ALIGNED((size_t)op->data.buf.in, 4))
1090 memcpy(op->data.buf.in, rx_tmp_buf, op->data.nbytes);
1091 }
1092 kfree_rx_tmp_buf:
1093 if (op->data.dir == SPI_MEM_DATA_IN &&
1094 !IS_ALIGNED((size_t)op->data.buf.in, 4))
1095 kfree(rx_tmp_buf);
1096 unmap_tx_dma:
1097 dma_unmap_single(mdata->dev, mdata->tx_dma,
1098 tx_size, DMA_TO_DEVICE);
1099 err_exit:
1100 kfree(tx_tmp_buf);
1101 mdata->use_spimem = false;
1102
1103 return ret;
1104 }
1105
1106 static const struct spi_controller_mem_ops mtk_spi_mem_ops = {
1107 .adjust_op_size = mtk_spi_mem_adjust_op_size,
1108 .supports_op = mtk_spi_mem_supports_op,
1109 .exec_op = mtk_spi_mem_exec_op,
1110 };
1111
1112 static int mtk_spi_probe(struct platform_device *pdev)
1113 {
1114 struct device *dev = &pdev->dev;
1115 struct spi_master *master;
1116 struct mtk_spi *mdata;
1117 int i, irq, ret, addr_bits;
1118
1119 master = devm_spi_alloc_master(dev, sizeof(*mdata));
1120 if (!master)
1121 return dev_err_probe(dev, -ENOMEM, "failed to alloc spi master\n");
1122
1123 master->auto_runtime_pm = true;
1124 master->dev.of_node = dev->of_node;
1125 master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST;
1126
1127 master->set_cs = mtk_spi_set_cs;
1128 master->prepare_message = mtk_spi_prepare_message;
1129 master->transfer_one = mtk_spi_transfer_one;
1130 master->can_dma = mtk_spi_can_dma;
1131 master->setup = mtk_spi_setup;
1132 master->set_cs_timing = mtk_spi_set_hw_cs_timing;
1133 master->use_gpio_descriptors = true;
1134
1135 mdata = spi_master_get_devdata(master);
1136 mdata->dev_comp = device_get_match_data(dev);
1137
1138 if (mdata->dev_comp->enhance_timing)
1139 master->mode_bits |= SPI_CS_HIGH;
1140
1141 if (mdata->dev_comp->must_tx)
1142 master->flags = SPI_MASTER_MUST_TX;
1143 if (mdata->dev_comp->ipm_design)
1144 master->mode_bits |= SPI_LOOP;
1145
1146 if (mdata->dev_comp->ipm_design) {
1147 mdata->dev = dev;
1148 master->mem_ops = &mtk_spi_mem_ops;
1149 init_completion(&mdata->spimem_done);
1150 }
1151
1152 if (mdata->dev_comp->need_pad_sel) {
1153 mdata->pad_num = of_property_count_u32_elems(dev->of_node,
1154 "mediatek,pad-select");
1155 if (mdata->pad_num < 0)
1156 return dev_err_probe(dev, -EINVAL,
1157 "No 'mediatek,pad-select' property\n");
1158
1159 mdata->pad_sel = devm_kmalloc_array(dev, mdata->pad_num,
1160 sizeof(u32), GFP_KERNEL);
1161 if (!mdata->pad_sel)
1162 return -ENOMEM;
1163
1164 for (i = 0; i < mdata->pad_num; i++) {
1165 of_property_read_u32_index(dev->of_node,
1166 "mediatek,pad-select",
1167 i, &mdata->pad_sel[i]);
1168 if (mdata->pad_sel[i] > MT8173_SPI_MAX_PAD_SEL)
1169 return dev_err_probe(dev, -EINVAL,
1170 "wrong pad-sel[%d]: %u\n",
1171 i, mdata->pad_sel[i]);
1172 }
1173 }
1174
1175 platform_set_drvdata(pdev, master);
1176 mdata->base = devm_platform_ioremap_resource(pdev, 0);
1177 if (IS_ERR(mdata->base))
1178 return PTR_ERR(mdata->base);
1179
1180 irq = platform_get_irq(pdev, 0);
1181 if (irq < 0)
1182 return irq;
1183
1184 if (!dev->dma_mask)
1185 dev->dma_mask = &dev->coherent_dma_mask;
1186
1187 ret = devm_request_irq(dev, irq, mtk_spi_interrupt,
1188 IRQF_TRIGGER_NONE, dev_name(dev), master);
1189 if (ret)
1190 return dev_err_probe(dev, ret, "failed to register irq\n");
1191
1192 mdata->parent_clk = devm_clk_get(dev, "parent-clk");
1193 if (IS_ERR(mdata->parent_clk))
1194 return dev_err_probe(dev, PTR_ERR(mdata->parent_clk),
1195 "failed to get parent-clk\n");
1196
1197 mdata->sel_clk = devm_clk_get(dev, "sel-clk");
1198 if (IS_ERR(mdata->sel_clk))
1199 return dev_err_probe(dev, PTR_ERR(mdata->sel_clk), "failed to get sel-clk\n");
1200
1201 mdata->spi_clk = devm_clk_get(dev, "spi-clk");
1202 if (IS_ERR(mdata->spi_clk))
1203 return dev_err_probe(dev, PTR_ERR(mdata->spi_clk), "failed to get spi-clk\n");
1204
1205 mdata->spi_hclk = devm_clk_get_optional(dev, "hclk");
1206 if (IS_ERR(mdata->spi_hclk))
1207 return dev_err_probe(dev, PTR_ERR(mdata->spi_hclk), "failed to get hclk\n");
1208
1209 ret = clk_set_parent(mdata->sel_clk, mdata->parent_clk);
1210 if (ret < 0)
1211 return dev_err_probe(dev, ret, "failed to clk_set_parent\n");
1212
1213 ret = clk_prepare_enable(mdata->spi_hclk);
1214 if (ret < 0)
1215 return dev_err_probe(dev, ret, "failed to enable hclk\n");
1216
1217 ret = clk_prepare_enable(mdata->spi_clk);
1218 if (ret < 0) {
1219 clk_disable_unprepare(mdata->spi_hclk);
1220 return dev_err_probe(dev, ret, "failed to enable spi_clk\n");
1221 }
1222
1223 mdata->spi_clk_hz = clk_get_rate(mdata->spi_clk);
1224
1225 if (mdata->dev_comp->no_need_unprepare) {
1226 clk_disable(mdata->spi_clk);
1227 clk_disable(mdata->spi_hclk);
1228 } else {
1229 clk_disable_unprepare(mdata->spi_clk);
1230 clk_disable_unprepare(mdata->spi_hclk);
1231 }
1232
1233 if (mdata->dev_comp->need_pad_sel) {
1234 if (mdata->pad_num != master->num_chipselect)
1235 return dev_err_probe(dev, -EINVAL,
1236 "pad_num does not match num_chipselect(%d != %d)\n",
1237 mdata->pad_num, master->num_chipselect);
1238
1239 if (!master->cs_gpiods && master->num_chipselect > 1)
1240 return dev_err_probe(dev, -EINVAL,
1241 "cs_gpios not specified and num_chipselect > 1\n");
1242 }
1243
1244 if (mdata->dev_comp->dma_ext)
1245 addr_bits = DMA_ADDR_EXT_BITS;
1246 else
1247 addr_bits = DMA_ADDR_DEF_BITS;
1248 ret = dma_set_mask(dev, DMA_BIT_MASK(addr_bits));
1249 if (ret)
1250 dev_notice(dev, "SPI dma_set_mask(%d) failed, ret:%d\n",
1251 addr_bits, ret);
1252
1253 pm_runtime_enable(dev);
1254
1255 ret = devm_spi_register_master(dev, master);
1256 if (ret) {
1257 pm_runtime_disable(dev);
1258 return dev_err_probe(dev, ret, "failed to register master\n");
1259 }
1260
1261 return 0;
1262 }
1263
1264 static int mtk_spi_remove(struct platform_device *pdev)
1265 {
1266 struct spi_master *master = platform_get_drvdata(pdev);
1267 struct mtk_spi *mdata = spi_master_get_devdata(master);
1268
1269 pm_runtime_disable(&pdev->dev);
1270
1271 mtk_spi_reset(mdata);
1272
1273 if (mdata->dev_comp->no_need_unprepare) {
1274 clk_unprepare(mdata->spi_clk);
1275 clk_unprepare(mdata->spi_hclk);
1276 }
1277
1278 return 0;
1279 }
1280
1281 #ifdef CONFIG_PM_SLEEP
1282 static int mtk_spi_suspend(struct device *dev)
1283 {
1284 int ret;
1285 struct spi_master *master = dev_get_drvdata(dev);
1286 struct mtk_spi *mdata = spi_master_get_devdata(master);
1287
1288 ret = spi_master_suspend(master);
1289 if (ret)
1290 return ret;
1291
1292 if (!pm_runtime_suspended(dev)) {
1293 clk_disable_unprepare(mdata->spi_clk);
1294 clk_disable_unprepare(mdata->spi_hclk);
1295 }
1296
1297 return ret;
1298 }
1299
1300 static int mtk_spi_resume(struct device *dev)
1301 {
1302 int ret;
1303 struct spi_master *master = dev_get_drvdata(dev);
1304 struct mtk_spi *mdata = spi_master_get_devdata(master);
1305
1306 if (!pm_runtime_suspended(dev)) {
1307 ret = clk_prepare_enable(mdata->spi_clk);
1308 if (ret < 0) {
1309 dev_err(dev, "failed to enable spi_clk (%d)\n", ret);
1310 return ret;
1311 }
1312
1313 ret = clk_prepare_enable(mdata->spi_hclk);
1314 if (ret < 0) {
1315 dev_err(dev, "failed to enable spi_hclk (%d)\n", ret);
1316 clk_disable_unprepare(mdata->spi_clk);
1317 return ret;
1318 }
1319 }
1320
1321 ret = spi_master_resume(master);
1322 if (ret < 0) {
1323 clk_disable_unprepare(mdata->spi_clk);
1324 clk_disable_unprepare(mdata->spi_hclk);
1325 }
1326
1327 return ret;
1328 }
1329 #endif
1330
1331 #ifdef CONFIG_PM
1332 static int mtk_spi_runtime_suspend(struct device *dev)
1333 {
1334 struct spi_master *master = dev_get_drvdata(dev);
1335 struct mtk_spi *mdata = spi_master_get_devdata(master);
1336
1337 if (mdata->dev_comp->no_need_unprepare) {
1338 clk_disable(mdata->spi_clk);
1339 clk_disable(mdata->spi_hclk);
1340 } else {
1341 clk_disable_unprepare(mdata->spi_clk);
1342 clk_disable_unprepare(mdata->spi_hclk);
1343 }
1344
1345 return 0;
1346 }
1347
1348 static int mtk_spi_runtime_resume(struct device *dev)
1349 {
1350 struct spi_master *master = dev_get_drvdata(dev);
1351 struct mtk_spi *mdata = spi_master_get_devdata(master);
1352 int ret;
1353
1354 if (mdata->dev_comp->no_need_unprepare) {
1355 ret = clk_enable(mdata->spi_clk);
1356 if (ret < 0) {
1357 dev_err(dev, "failed to enable spi_clk (%d)\n", ret);
1358 return ret;
1359 }
1360 ret = clk_enable(mdata->spi_hclk);
1361 if (ret < 0) {
1362 dev_err(dev, "failed to enable spi_hclk (%d)\n", ret);
1363 clk_disable(mdata->spi_clk);
1364 return ret;
1365 }
1366 } else {
1367 ret = clk_prepare_enable(mdata->spi_clk);
1368 if (ret < 0) {
1369 dev_err(dev, "failed to prepare_enable spi_clk (%d)\n", ret);
1370 return ret;
1371 }
1372
1373 ret = clk_prepare_enable(mdata->spi_hclk);
1374 if (ret < 0) {
1375 dev_err(dev, "failed to prepare_enable spi_hclk (%d)\n", ret);
1376 clk_disable_unprepare(mdata->spi_clk);
1377 return ret;
1378 }
1379 }
1380
1381 return 0;
1382 }
1383 #endif
1384
1385 static const struct dev_pm_ops mtk_spi_pm = {
1386 SET_SYSTEM_SLEEP_PM_OPS(mtk_spi_suspend, mtk_spi_resume)
1387 SET_RUNTIME_PM_OPS(mtk_spi_runtime_suspend,
1388 mtk_spi_runtime_resume, NULL)
1389 };
1390
1391 static struct platform_driver mtk_spi_driver = {
1392 .driver = {
1393 .name = "mtk-spi",
1394 .pm = &mtk_spi_pm,
1395 .of_match_table = mtk_spi_of_match,
1396 },
1397 .probe = mtk_spi_probe,
1398 .remove = mtk_spi_remove,
1399 };
1400
1401 module_platform_driver(mtk_spi_driver);
1402
1403 MODULE_DESCRIPTION("MTK SPI Controller driver");
1404 MODULE_AUTHOR("Leilk Liu <leilk.liu@mediatek.com>");
1405 MODULE_LICENSE("GPL v2");
1406 MODULE_ALIAS("platform:mtk-spi");