0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016 #include <linux/bitfield.h>
0017 #include <linux/delay.h>
0018 #include <linux/dma-direction.h>
0019 #include <linux/dma-mapping.h>
0020 #include <linux/gpio/consumer.h>
0021 #include <linux/interrupt.h>
0022 #include <linux/mmc/mmc.h>
0023 #include <linux/mmc/slot-gpio.h>
0024 #include <linux/module.h>
0025 #include <linux/regulator/consumer.h>
0026 #include <linux/scatterlist.h>
0027 #include <linux/time.h>
0028
0029 #include "cavium.h"
0030
0031 const char *cvm_mmc_irq_names[] = {
0032 "MMC Buffer",
0033 "MMC Command",
0034 "MMC DMA",
0035 "MMC Command Error",
0036 "MMC DMA Error",
0037 "MMC Switch",
0038 "MMC Switch Error",
0039 "MMC DMA int Fifo",
0040 "MMC DMA int",
0041 };
0042
0043
0044
0045
0046
0047
0048
0049
0050
0051
0052
0053
0054 static struct cvm_mmc_cr_type cvm_mmc_cr_types[] = {
0055 {0, 0},
0056 {0, 3},
0057 {0, 2},
0058 {0, 1},
0059 {0, 0},
0060 {0, 1},
0061 {0, 1},
0062 {0, 1},
0063 {1, 1},
0064 {0, 2},
0065 {0, 2},
0066 {1, 1},
0067 {0, 1},
0068 {0, 1},
0069 {1, 1},
0070 {0, 0},
0071 {0, 1},
0072 {1, 1},
0073 {1, 1},
0074 {3, 1},
0075 {2, 1},
0076 {0, 0},
0077 {0, 0},
0078 {0, 1},
0079 {2, 1},
0080 {2, 1},
0081 {2, 1},
0082 {2, 1},
0083 {0, 1},
0084 {0, 1},
0085 {1, 1},
0086 {1, 1},
0087 {0, 0},
0088 {0, 0},
0089 {0, 0},
0090 {0, 1},
0091 {0, 1},
0092 {0, 0},
0093 {0, 1},
0094 {0, 4},
0095 {0, 5},
0096 {0, 0},
0097 {2, 1},
0098 {0, 0},
0099 {0, 0},
0100 {0, 0},
0101 {0, 0},
0102 {0, 0},
0103 {0, 0},
0104 {0, 0},
0105 {0, 0},
0106 {0, 0},
0107 {0, 0},
0108 {0, 0},
0109 {0, 0},
0110 {0, 1},
0111 {0xff, 0xff},
0112 {0, 0},
0113 {0, 0},
0114 {0, 0},
0115 {0, 0},
0116 {0, 0},
0117 {0, 0},
0118 {0, 0}
0119 };
0120
0121 static struct cvm_mmc_cr_mods cvm_mmc_get_cr_mods(struct mmc_command *cmd)
0122 {
0123 struct cvm_mmc_cr_type *cr;
0124 u8 hardware_ctype, hardware_rtype;
0125 u8 desired_ctype = 0, desired_rtype = 0;
0126 struct cvm_mmc_cr_mods r;
0127
0128 cr = cvm_mmc_cr_types + (cmd->opcode & 0x3f);
0129 hardware_ctype = cr->ctype;
0130 hardware_rtype = cr->rtype;
0131 if (cmd->opcode == MMC_GEN_CMD)
0132 hardware_ctype = (cmd->arg & 1) ? 1 : 2;
0133
0134 switch (mmc_cmd_type(cmd)) {
0135 case MMC_CMD_ADTC:
0136 desired_ctype = (cmd->data->flags & MMC_DATA_WRITE) ? 2 : 1;
0137 break;
0138 case MMC_CMD_AC:
0139 case MMC_CMD_BC:
0140 case MMC_CMD_BCR:
0141 desired_ctype = 0;
0142 break;
0143 }
0144
0145 switch (mmc_resp_type(cmd)) {
0146 case MMC_RSP_NONE:
0147 desired_rtype = 0;
0148 break;
0149 case MMC_RSP_R1:
0150 case MMC_RSP_R1B:
0151 desired_rtype = 1;
0152 break;
0153 case MMC_RSP_R2:
0154 desired_rtype = 2;
0155 break;
0156 case MMC_RSP_R3:
0157 desired_rtype = 3;
0158 break;
0159 }
0160 r.ctype_xor = desired_ctype ^ hardware_ctype;
0161 r.rtype_xor = desired_rtype ^ hardware_rtype;
0162 return r;
0163 }
0164
0165 static void check_switch_errors(struct cvm_mmc_host *host)
0166 {
0167 u64 emm_switch;
0168
0169 emm_switch = readq(host->base + MIO_EMM_SWITCH(host));
0170 if (emm_switch & MIO_EMM_SWITCH_ERR0)
0171 dev_err(host->dev, "Switch power class error\n");
0172 if (emm_switch & MIO_EMM_SWITCH_ERR1)
0173 dev_err(host->dev, "Switch hs timing error\n");
0174 if (emm_switch & MIO_EMM_SWITCH_ERR2)
0175 dev_err(host->dev, "Switch bus width error\n");
0176 }
0177
0178 static void clear_bus_id(u64 *reg)
0179 {
0180 u64 bus_id_mask = GENMASK_ULL(61, 60);
0181
0182 *reg &= ~bus_id_mask;
0183 }
0184
0185 static void set_bus_id(u64 *reg, int bus_id)
0186 {
0187 clear_bus_id(reg);
0188 *reg |= FIELD_PREP(GENMASK(61, 60), bus_id);
0189 }
0190
0191 static int get_bus_id(u64 reg)
0192 {
0193 return FIELD_GET(GENMASK_ULL(61, 60), reg);
0194 }
0195
0196
0197
0198
0199
0200 static void do_switch(struct cvm_mmc_host *host, u64 emm_switch)
0201 {
0202 int retries = 100;
0203 u64 rsp_sts;
0204 int bus_id;
0205
0206
0207
0208
0209
0210 bus_id = get_bus_id(emm_switch);
0211 clear_bus_id(&emm_switch);
0212 writeq(emm_switch, host->base + MIO_EMM_SWITCH(host));
0213
0214 set_bus_id(&emm_switch, bus_id);
0215 writeq(emm_switch, host->base + MIO_EMM_SWITCH(host));
0216
0217
0218 do {
0219 rsp_sts = readq(host->base + MIO_EMM_RSP_STS(host));
0220 if (!(rsp_sts & MIO_EMM_RSP_STS_SWITCH_VAL))
0221 break;
0222 udelay(10);
0223 } while (--retries);
0224
0225 check_switch_errors(host);
0226 }
0227
0228 static bool switch_val_changed(struct cvm_mmc_slot *slot, u64 new_val)
0229 {
0230
0231 u64 match = 0x3001070fffffffffull;
0232
0233 return (slot->cached_switch & match) != (new_val & match);
0234 }
0235
0236 static void set_wdog(struct cvm_mmc_slot *slot, unsigned int ns)
0237 {
0238 u64 timeout;
0239
0240 if (!slot->clock)
0241 return;
0242
0243 if (ns)
0244 timeout = (slot->clock * ns) / NSEC_PER_SEC;
0245 else
0246 timeout = (slot->clock * 850ull) / 1000ull;
0247 writeq(timeout, slot->host->base + MIO_EMM_WDOG(slot->host));
0248 }
0249
0250 static void cvm_mmc_reset_bus(struct cvm_mmc_slot *slot)
0251 {
0252 struct cvm_mmc_host *host = slot->host;
0253 u64 emm_switch, wdog;
0254
0255 emm_switch = readq(slot->host->base + MIO_EMM_SWITCH(host));
0256 emm_switch &= ~(MIO_EMM_SWITCH_EXE | MIO_EMM_SWITCH_ERR0 |
0257 MIO_EMM_SWITCH_ERR1 | MIO_EMM_SWITCH_ERR2);
0258 set_bus_id(&emm_switch, slot->bus_id);
0259
0260 wdog = readq(slot->host->base + MIO_EMM_WDOG(host));
0261 do_switch(slot->host, emm_switch);
0262
0263 slot->cached_switch = emm_switch;
0264
0265 msleep(20);
0266
0267 writeq(wdog, slot->host->base + MIO_EMM_WDOG(host));
0268 }
0269
0270
0271 static void cvm_mmc_switch_to(struct cvm_mmc_slot *slot)
0272 {
0273 struct cvm_mmc_host *host = slot->host;
0274 struct cvm_mmc_slot *old_slot;
0275 u64 emm_sample, emm_switch;
0276
0277 if (slot->bus_id == host->last_slot)
0278 return;
0279
0280 if (host->last_slot >= 0 && host->slot[host->last_slot]) {
0281 old_slot = host->slot[host->last_slot];
0282 old_slot->cached_switch = readq(host->base + MIO_EMM_SWITCH(host));
0283 old_slot->cached_rca = readq(host->base + MIO_EMM_RCA(host));
0284 }
0285
0286 writeq(slot->cached_rca, host->base + MIO_EMM_RCA(host));
0287 emm_switch = slot->cached_switch;
0288 set_bus_id(&emm_switch, slot->bus_id);
0289 do_switch(host, emm_switch);
0290
0291 emm_sample = FIELD_PREP(MIO_EMM_SAMPLE_CMD_CNT, slot->cmd_cnt) |
0292 FIELD_PREP(MIO_EMM_SAMPLE_DAT_CNT, slot->dat_cnt);
0293 writeq(emm_sample, host->base + MIO_EMM_SAMPLE(host));
0294
0295 host->last_slot = slot->bus_id;
0296 }
0297
0298 static void do_read(struct cvm_mmc_host *host, struct mmc_request *req,
0299 u64 dbuf)
0300 {
0301 struct sg_mapping_iter *smi = &host->smi;
0302 int data_len = req->data->blocks * req->data->blksz;
0303 int bytes_xfered, shift = -1;
0304 u64 dat = 0;
0305
0306
0307 writeq((0x10000 | (dbuf << 6)), host->base + MIO_EMM_BUF_IDX(host));
0308
0309 for (bytes_xfered = 0; bytes_xfered < data_len;) {
0310 if (smi->consumed >= smi->length) {
0311 if (!sg_miter_next(smi))
0312 break;
0313 smi->consumed = 0;
0314 }
0315
0316 if (shift < 0) {
0317 dat = readq(host->base + MIO_EMM_BUF_DAT(host));
0318 shift = 56;
0319 }
0320
0321 while (smi->consumed < smi->length && shift >= 0) {
0322 ((u8 *)smi->addr)[smi->consumed] = (dat >> shift) & 0xff;
0323 bytes_xfered++;
0324 smi->consumed++;
0325 shift -= 8;
0326 }
0327 }
0328
0329 sg_miter_stop(smi);
0330 req->data->bytes_xfered = bytes_xfered;
0331 req->data->error = 0;
0332 }
0333
0334 static void do_write(struct mmc_request *req)
0335 {
0336 req->data->bytes_xfered = req->data->blocks * req->data->blksz;
0337 req->data->error = 0;
0338 }
0339
0340 static void set_cmd_response(struct cvm_mmc_host *host, struct mmc_request *req,
0341 u64 rsp_sts)
0342 {
0343 u64 rsp_hi, rsp_lo;
0344
0345 if (!(rsp_sts & MIO_EMM_RSP_STS_RSP_VAL))
0346 return;
0347
0348 rsp_lo = readq(host->base + MIO_EMM_RSP_LO(host));
0349
0350 switch (FIELD_GET(MIO_EMM_RSP_STS_RSP_TYPE, rsp_sts)) {
0351 case 1:
0352 case 3:
0353 req->cmd->resp[0] = (rsp_lo >> 8) & 0xffffffff;
0354 req->cmd->resp[1] = 0;
0355 req->cmd->resp[2] = 0;
0356 req->cmd->resp[3] = 0;
0357 break;
0358 case 2:
0359 req->cmd->resp[3] = rsp_lo & 0xffffffff;
0360 req->cmd->resp[2] = (rsp_lo >> 32) & 0xffffffff;
0361 rsp_hi = readq(host->base + MIO_EMM_RSP_HI(host));
0362 req->cmd->resp[1] = rsp_hi & 0xffffffff;
0363 req->cmd->resp[0] = (rsp_hi >> 32) & 0xffffffff;
0364 break;
0365 }
0366 }
0367
0368 static int get_dma_dir(struct mmc_data *data)
0369 {
0370 return (data->flags & MMC_DATA_WRITE) ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
0371 }
0372
0373 static int finish_dma_single(struct cvm_mmc_host *host, struct mmc_data *data)
0374 {
0375 data->bytes_xfered = data->blocks * data->blksz;
0376 data->error = 0;
0377 dma_unmap_sg(host->dev, data->sg, data->sg_len, get_dma_dir(data));
0378 return 1;
0379 }
0380
0381 static int finish_dma_sg(struct cvm_mmc_host *host, struct mmc_data *data)
0382 {
0383 u64 fifo_cfg;
0384 int count;
0385
0386
0387 fifo_cfg = readq(host->dma_base + MIO_EMM_DMA_FIFO_CFG(host));
0388 count = FIELD_GET(MIO_EMM_DMA_FIFO_CFG_COUNT, fifo_cfg);
0389 if (count)
0390 dev_err(host->dev, "%u requests still pending\n", count);
0391
0392 data->bytes_xfered = data->blocks * data->blksz;
0393 data->error = 0;
0394
0395
0396 writeq(BIT_ULL(16), host->dma_base + MIO_EMM_DMA_FIFO_CFG(host));
0397 dma_unmap_sg(host->dev, data->sg, data->sg_len, get_dma_dir(data));
0398 return 1;
0399 }
0400
0401 static int finish_dma(struct cvm_mmc_host *host, struct mmc_data *data)
0402 {
0403 if (host->use_sg && data->sg_len > 1)
0404 return finish_dma_sg(host, data);
0405 else
0406 return finish_dma_single(host, data);
0407 }
0408
0409 static int check_status(u64 rsp_sts)
0410 {
0411 if (rsp_sts & MIO_EMM_RSP_STS_RSP_BAD_STS ||
0412 rsp_sts & MIO_EMM_RSP_STS_RSP_CRC_ERR ||
0413 rsp_sts & MIO_EMM_RSP_STS_BLK_CRC_ERR)
0414 return -EILSEQ;
0415 if (rsp_sts & MIO_EMM_RSP_STS_RSP_TIMEOUT ||
0416 rsp_sts & MIO_EMM_RSP_STS_BLK_TIMEOUT)
0417 return -ETIMEDOUT;
0418 if (rsp_sts & MIO_EMM_RSP_STS_DBUF_ERR)
0419 return -EIO;
0420 return 0;
0421 }
0422
0423
0424 static void cleanup_dma(struct cvm_mmc_host *host, u64 rsp_sts)
0425 {
0426 u64 emm_dma;
0427
0428 emm_dma = readq(host->base + MIO_EMM_DMA(host));
0429 emm_dma |= FIELD_PREP(MIO_EMM_DMA_VAL, 1) |
0430 FIELD_PREP(MIO_EMM_DMA_DAT_NULL, 1);
0431 set_bus_id(&emm_dma, get_bus_id(rsp_sts));
0432 writeq(emm_dma, host->base + MIO_EMM_DMA(host));
0433 }
0434
0435 irqreturn_t cvm_mmc_interrupt(int irq, void *dev_id)
0436 {
0437 struct cvm_mmc_host *host = dev_id;
0438 struct mmc_request *req;
0439 u64 emm_int, rsp_sts;
0440 bool host_done;
0441
0442 if (host->need_irq_handler_lock)
0443 spin_lock(&host->irq_handler_lock);
0444 else
0445 __acquire(&host->irq_handler_lock);
0446
0447
0448 emm_int = readq(host->base + MIO_EMM_INT(host));
0449 writeq(emm_int, host->base + MIO_EMM_INT(host));
0450
0451 if (emm_int & MIO_EMM_INT_SWITCH_ERR)
0452 check_switch_errors(host);
0453
0454 req = host->current_req;
0455 if (!req)
0456 goto out;
0457
0458 rsp_sts = readq(host->base + MIO_EMM_RSP_STS(host));
0459
0460
0461
0462
0463
0464 if ((rsp_sts & MIO_EMM_RSP_STS_DMA_VAL) && host->dma_active)
0465 goto out;
0466
0467 if (!host->dma_active && req->data &&
0468 (emm_int & MIO_EMM_INT_BUF_DONE)) {
0469 unsigned int type = (rsp_sts >> 7) & 3;
0470
0471 if (type == 1)
0472 do_read(host, req, rsp_sts & MIO_EMM_RSP_STS_DBUF);
0473 else if (type == 2)
0474 do_write(req);
0475 }
0476
0477 host_done = emm_int & MIO_EMM_INT_CMD_DONE ||
0478 emm_int & MIO_EMM_INT_DMA_DONE ||
0479 emm_int & MIO_EMM_INT_CMD_ERR ||
0480 emm_int & MIO_EMM_INT_DMA_ERR;
0481
0482 if (!(host_done && req->done))
0483 goto no_req_done;
0484
0485 req->cmd->error = check_status(rsp_sts);
0486
0487 if (host->dma_active && req->data)
0488 if (!finish_dma(host, req->data))
0489 goto no_req_done;
0490
0491 set_cmd_response(host, req, rsp_sts);
0492 if ((emm_int & MIO_EMM_INT_DMA_ERR) &&
0493 (rsp_sts & MIO_EMM_RSP_STS_DMA_PEND))
0494 cleanup_dma(host, rsp_sts);
0495
0496 host->current_req = NULL;
0497 req->done(req);
0498
0499 no_req_done:
0500 if (host->dmar_fixup_done)
0501 host->dmar_fixup_done(host);
0502 if (host_done)
0503 host->release_bus(host);
0504 out:
0505 if (host->need_irq_handler_lock)
0506 spin_unlock(&host->irq_handler_lock);
0507 else
0508 __release(&host->irq_handler_lock);
0509 return IRQ_RETVAL(emm_int != 0);
0510 }
0511
0512
0513
0514
0515
0516 static u64 prepare_dma_single(struct cvm_mmc_host *host, struct mmc_data *data)
0517 {
0518 u64 dma_cfg, addr;
0519 int count, rw;
0520
0521 count = dma_map_sg(host->dev, data->sg, data->sg_len,
0522 get_dma_dir(data));
0523 if (!count)
0524 return 0;
0525
0526 rw = (data->flags & MMC_DATA_WRITE) ? 1 : 0;
0527 dma_cfg = FIELD_PREP(MIO_EMM_DMA_CFG_EN, 1) |
0528 FIELD_PREP(MIO_EMM_DMA_CFG_RW, rw);
0529 #ifdef __LITTLE_ENDIAN
0530 dma_cfg |= FIELD_PREP(MIO_EMM_DMA_CFG_ENDIAN, 1);
0531 #endif
0532 dma_cfg |= FIELD_PREP(MIO_EMM_DMA_CFG_SIZE,
0533 (sg_dma_len(&data->sg[0]) / 8) - 1);
0534
0535 addr = sg_dma_address(&data->sg[0]);
0536 if (!host->big_dma_addr)
0537 dma_cfg |= FIELD_PREP(MIO_EMM_DMA_CFG_ADR, addr);
0538 writeq(dma_cfg, host->dma_base + MIO_EMM_DMA_CFG(host));
0539
0540 pr_debug("[%s] sg_dma_len: %u total sg_elem: %d\n",
0541 (rw) ? "W" : "R", sg_dma_len(&data->sg[0]), count);
0542
0543 if (host->big_dma_addr)
0544 writeq(addr, host->dma_base + MIO_EMM_DMA_ADR(host));
0545 return addr;
0546 }
0547
0548
0549
0550
0551
0552 static u64 prepare_dma_sg(struct cvm_mmc_host *host, struct mmc_data *data)
0553 {
0554 struct scatterlist *sg;
0555 u64 fifo_cmd, addr;
0556 int count, i, rw;
0557
0558 count = dma_map_sg(host->dev, data->sg, data->sg_len,
0559 get_dma_dir(data));
0560 if (!count)
0561 return 0;
0562 if (count > 16)
0563 goto error;
0564
0565
0566 writeq(0, host->dma_base + MIO_EMM_DMA_FIFO_CFG(host));
0567
0568 for_each_sg(data->sg, sg, count, i) {
0569
0570 addr = sg_dma_address(sg);
0571 if (addr & 7)
0572 goto error;
0573 writeq(addr, host->dma_base + MIO_EMM_DMA_FIFO_ADR(host));
0574
0575
0576
0577
0578
0579
0580 rw = (data->flags & MMC_DATA_WRITE) ? 1 : 0;
0581 fifo_cmd = FIELD_PREP(MIO_EMM_DMA_FIFO_CMD_RW, rw);
0582
0583
0584 fifo_cmd |= FIELD_PREP(MIO_EMM_DMA_FIFO_CMD_INTDIS,
0585 (i + 1 == count) ? 0 : 1);
0586
0587 #ifdef __LITTLE_ENDIAN
0588 fifo_cmd |= FIELD_PREP(MIO_EMM_DMA_FIFO_CMD_ENDIAN, 1);
0589 #endif
0590 fifo_cmd |= FIELD_PREP(MIO_EMM_DMA_FIFO_CMD_SIZE,
0591 sg_dma_len(sg) / 8 - 1);
0592
0593
0594
0595
0596 writeq(fifo_cmd, host->dma_base + MIO_EMM_DMA_FIFO_CMD(host));
0597 pr_debug("[%s] sg_dma_len: %u sg_elem: %d/%d\n",
0598 (rw) ? "W" : "R", sg_dma_len(sg), i, count);
0599 }
0600
0601
0602
0603
0604
0605
0606
0607 return 1;
0608
0609 error:
0610 WARN_ON_ONCE(1);
0611 dma_unmap_sg(host->dev, data->sg, data->sg_len, get_dma_dir(data));
0612
0613 writeq(BIT_ULL(16), host->dma_base + MIO_EMM_DMA_FIFO_CFG(host));
0614 return 0;
0615 }
0616
0617 static u64 prepare_dma(struct cvm_mmc_host *host, struct mmc_data *data)
0618 {
0619 if (host->use_sg && data->sg_len > 1)
0620 return prepare_dma_sg(host, data);
0621 else
0622 return prepare_dma_single(host, data);
0623 }
0624
0625 static u64 prepare_ext_dma(struct mmc_host *mmc, struct mmc_request *mrq)
0626 {
0627 struct cvm_mmc_slot *slot = mmc_priv(mmc);
0628 u64 emm_dma;
0629
0630 emm_dma = FIELD_PREP(MIO_EMM_DMA_VAL, 1) |
0631 FIELD_PREP(MIO_EMM_DMA_SECTOR,
0632 mmc_card_is_blockaddr(mmc->card) ? 1 : 0) |
0633 FIELD_PREP(MIO_EMM_DMA_RW,
0634 (mrq->data->flags & MMC_DATA_WRITE) ? 1 : 0) |
0635 FIELD_PREP(MIO_EMM_DMA_BLOCK_CNT, mrq->data->blocks) |
0636 FIELD_PREP(MIO_EMM_DMA_CARD_ADDR, mrq->cmd->arg);
0637 set_bus_id(&emm_dma, slot->bus_id);
0638
0639 if (mmc_card_mmc(mmc->card) || (mmc_card_sd(mmc->card) &&
0640 (mmc->card->scr.cmds & SD_SCR_CMD23_SUPPORT)))
0641 emm_dma |= FIELD_PREP(MIO_EMM_DMA_MULTI, 1);
0642
0643 pr_debug("[%s] blocks: %u multi: %d\n",
0644 (emm_dma & MIO_EMM_DMA_RW) ? "W" : "R",
0645 mrq->data->blocks, (emm_dma & MIO_EMM_DMA_MULTI) ? 1 : 0);
0646 return emm_dma;
0647 }
0648
0649 static void cvm_mmc_dma_request(struct mmc_host *mmc,
0650 struct mmc_request *mrq)
0651 {
0652 struct cvm_mmc_slot *slot = mmc_priv(mmc);
0653 struct cvm_mmc_host *host = slot->host;
0654 struct mmc_data *data;
0655 u64 emm_dma, addr;
0656
0657 if (!mrq->data || !mrq->data->sg || !mrq->data->sg_len ||
0658 !mrq->stop || mrq->stop->opcode != MMC_STOP_TRANSMISSION) {
0659 dev_err(&mmc->card->dev, "Error: %s no data\n", __func__);
0660 goto error;
0661 }
0662
0663 cvm_mmc_switch_to(slot);
0664
0665 data = mrq->data;
0666 pr_debug("DMA request blocks: %d block_size: %d total_size: %d\n",
0667 data->blocks, data->blksz, data->blocks * data->blksz);
0668 if (data->timeout_ns)
0669 set_wdog(slot, data->timeout_ns);
0670
0671 WARN_ON(host->current_req);
0672 host->current_req = mrq;
0673
0674 emm_dma = prepare_ext_dma(mmc, mrq);
0675 addr = prepare_dma(host, data);
0676 if (!addr) {
0677 dev_err(host->dev, "prepare_dma failed\n");
0678 goto error;
0679 }
0680
0681 host->dma_active = true;
0682 host->int_enable(host, MIO_EMM_INT_CMD_ERR | MIO_EMM_INT_DMA_DONE |
0683 MIO_EMM_INT_DMA_ERR);
0684
0685 if (host->dmar_fixup)
0686 host->dmar_fixup(host, mrq->cmd, data, addr);
0687
0688
0689
0690
0691
0692
0693 if (mmc_card_sd(mmc->card))
0694 writeq(0x00b00000ull, host->base + MIO_EMM_STS_MASK(host));
0695 else
0696 writeq(0xe4390080ull, host->base + MIO_EMM_STS_MASK(host));
0697 writeq(emm_dma, host->base + MIO_EMM_DMA(host));
0698 return;
0699
0700 error:
0701 mrq->cmd->error = -EINVAL;
0702 if (mrq->done)
0703 mrq->done(mrq);
0704 host->release_bus(host);
0705 }
0706
0707 static void do_read_request(struct cvm_mmc_host *host, struct mmc_request *mrq)
0708 {
0709 sg_miter_start(&host->smi, mrq->data->sg, mrq->data->sg_len,
0710 SG_MITER_ATOMIC | SG_MITER_TO_SG);
0711 }
0712
0713 static void do_write_request(struct cvm_mmc_host *host, struct mmc_request *mrq)
0714 {
0715 unsigned int data_len = mrq->data->blocks * mrq->data->blksz;
0716 struct sg_mapping_iter *smi = &host->smi;
0717 unsigned int bytes_xfered;
0718 int shift = 56;
0719 u64 dat = 0;
0720
0721
0722 sg_miter_start(smi, mrq->data->sg, mrq->data->sg_len, SG_MITER_FROM_SG);
0723
0724
0725 writeq(0x10000ull, host->base + MIO_EMM_BUF_IDX(host));
0726
0727 for (bytes_xfered = 0; bytes_xfered < data_len;) {
0728 if (smi->consumed >= smi->length) {
0729 if (!sg_miter_next(smi))
0730 break;
0731 smi->consumed = 0;
0732 }
0733
0734 while (smi->consumed < smi->length && shift >= 0) {
0735 dat |= (u64)((u8 *)smi->addr)[smi->consumed] << shift;
0736 bytes_xfered++;
0737 smi->consumed++;
0738 shift -= 8;
0739 }
0740
0741 if (shift < 0) {
0742 writeq(dat, host->base + MIO_EMM_BUF_DAT(host));
0743 shift = 56;
0744 dat = 0;
0745 }
0746 }
0747 sg_miter_stop(smi);
0748 }
0749
0750 static void cvm_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
0751 {
0752 struct cvm_mmc_slot *slot = mmc_priv(mmc);
0753 struct cvm_mmc_host *host = slot->host;
0754 struct mmc_command *cmd = mrq->cmd;
0755 struct cvm_mmc_cr_mods mods;
0756 u64 emm_cmd, rsp_sts;
0757 int retries = 100;
0758
0759
0760
0761
0762
0763
0764
0765
0766
0767
0768 host->acquire_bus(host);
0769
0770 if (cmd->opcode == MMC_READ_MULTIPLE_BLOCK ||
0771 cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK)
0772 return cvm_mmc_dma_request(mmc, mrq);
0773
0774 cvm_mmc_switch_to(slot);
0775
0776 mods = cvm_mmc_get_cr_mods(cmd);
0777
0778 WARN_ON(host->current_req);
0779 host->current_req = mrq;
0780
0781 if (cmd->data) {
0782 if (cmd->data->flags & MMC_DATA_READ)
0783 do_read_request(host, mrq);
0784 else
0785 do_write_request(host, mrq);
0786
0787 if (cmd->data->timeout_ns)
0788 set_wdog(slot, cmd->data->timeout_ns);
0789 } else
0790 set_wdog(slot, 0);
0791
0792 host->dma_active = false;
0793 host->int_enable(host, MIO_EMM_INT_CMD_DONE | MIO_EMM_INT_CMD_ERR);
0794
0795 emm_cmd = FIELD_PREP(MIO_EMM_CMD_VAL, 1) |
0796 FIELD_PREP(MIO_EMM_CMD_CTYPE_XOR, mods.ctype_xor) |
0797 FIELD_PREP(MIO_EMM_CMD_RTYPE_XOR, mods.rtype_xor) |
0798 FIELD_PREP(MIO_EMM_CMD_IDX, cmd->opcode) |
0799 FIELD_PREP(MIO_EMM_CMD_ARG, cmd->arg);
0800 set_bus_id(&emm_cmd, slot->bus_id);
0801 if (cmd->data && mmc_cmd_type(cmd) == MMC_CMD_ADTC)
0802 emm_cmd |= FIELD_PREP(MIO_EMM_CMD_OFFSET,
0803 64 - ((cmd->data->blocks * cmd->data->blksz) / 8));
0804
0805 writeq(0, host->base + MIO_EMM_STS_MASK(host));
0806
0807 retry:
0808 rsp_sts = readq(host->base + MIO_EMM_RSP_STS(host));
0809 if (rsp_sts & MIO_EMM_RSP_STS_DMA_VAL ||
0810 rsp_sts & MIO_EMM_RSP_STS_CMD_VAL ||
0811 rsp_sts & MIO_EMM_RSP_STS_SWITCH_VAL ||
0812 rsp_sts & MIO_EMM_RSP_STS_DMA_PEND) {
0813 udelay(10);
0814 if (--retries)
0815 goto retry;
0816 }
0817 if (!retries)
0818 dev_err(host->dev, "Bad status: %llx before command write\n", rsp_sts);
0819 writeq(emm_cmd, host->base + MIO_EMM_CMD(host));
0820 }
0821
0822 static void cvm_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
0823 {
0824 struct cvm_mmc_slot *slot = mmc_priv(mmc);
0825 struct cvm_mmc_host *host = slot->host;
0826 int clk_period = 0, power_class = 10, bus_width = 0;
0827 u64 clock, emm_switch;
0828
0829 host->acquire_bus(host);
0830 cvm_mmc_switch_to(slot);
0831
0832
0833 switch (ios->power_mode) {
0834 case MMC_POWER_ON:
0835 break;
0836
0837 case MMC_POWER_OFF:
0838 cvm_mmc_reset_bus(slot);
0839 if (host->global_pwr_gpiod)
0840 host->set_shared_power(host, 0);
0841 else if (!IS_ERR(mmc->supply.vmmc))
0842 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
0843 break;
0844
0845 case MMC_POWER_UP:
0846 if (host->global_pwr_gpiod)
0847 host->set_shared_power(host, 1);
0848 else if (!IS_ERR(mmc->supply.vmmc))
0849 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd);
0850 break;
0851 }
0852
0853
0854 switch (ios->bus_width) {
0855 case MMC_BUS_WIDTH_8:
0856 bus_width = 2;
0857 break;
0858 case MMC_BUS_WIDTH_4:
0859 bus_width = 1;
0860 break;
0861 case MMC_BUS_WIDTH_1:
0862 bus_width = 0;
0863 break;
0864 }
0865
0866
0867 if (ios->bus_width && ios->timing == MMC_TIMING_MMC_DDR52)
0868 bus_width |= 4;
0869
0870
0871 clock = ios->clock;
0872 if (clock > 52000000)
0873 clock = 52000000;
0874 slot->clock = clock;
0875
0876 if (clock)
0877 clk_period = (host->sys_freq + clock - 1) / (2 * clock);
0878
0879 emm_switch = FIELD_PREP(MIO_EMM_SWITCH_HS_TIMING,
0880 (ios->timing == MMC_TIMING_MMC_HS)) |
0881 FIELD_PREP(MIO_EMM_SWITCH_BUS_WIDTH, bus_width) |
0882 FIELD_PREP(MIO_EMM_SWITCH_POWER_CLASS, power_class) |
0883 FIELD_PREP(MIO_EMM_SWITCH_CLK_HI, clk_period) |
0884 FIELD_PREP(MIO_EMM_SWITCH_CLK_LO, clk_period);
0885 set_bus_id(&emm_switch, slot->bus_id);
0886
0887 if (!switch_val_changed(slot, emm_switch))
0888 goto out;
0889
0890 set_wdog(slot, 0);
0891 do_switch(host, emm_switch);
0892 slot->cached_switch = emm_switch;
0893 out:
0894 host->release_bus(host);
0895 }
0896
0897 static const struct mmc_host_ops cvm_mmc_ops = {
0898 .request = cvm_mmc_request,
0899 .set_ios = cvm_mmc_set_ios,
0900 .get_ro = mmc_gpio_get_ro,
0901 .get_cd = mmc_gpio_get_cd,
0902 };
0903
0904 static void cvm_mmc_set_clock(struct cvm_mmc_slot *slot, unsigned int clock)
0905 {
0906 struct mmc_host *mmc = slot->mmc;
0907
0908 clock = min(clock, mmc->f_max);
0909 clock = max(clock, mmc->f_min);
0910 slot->clock = clock;
0911 }
0912
0913 static int cvm_mmc_init_lowlevel(struct cvm_mmc_slot *slot)
0914 {
0915 struct cvm_mmc_host *host = slot->host;
0916 u64 emm_switch;
0917
0918
0919 host->emm_cfg |= (1ull << slot->bus_id);
0920 writeq(host->emm_cfg, slot->host->base + MIO_EMM_CFG(host));
0921 udelay(10);
0922
0923
0924 cvm_mmc_set_clock(slot, slot->mmc->f_min);
0925 emm_switch = FIELD_PREP(MIO_EMM_SWITCH_POWER_CLASS, 10);
0926 emm_switch |= FIELD_PREP(MIO_EMM_SWITCH_CLK_HI,
0927 (host->sys_freq / slot->clock) / 2);
0928 emm_switch |= FIELD_PREP(MIO_EMM_SWITCH_CLK_LO,
0929 (host->sys_freq / slot->clock) / 2);
0930
0931
0932 set_bus_id(&emm_switch, slot->bus_id);
0933 do_switch(host, emm_switch);
0934
0935 slot->cached_switch = emm_switch;
0936
0937
0938
0939
0940
0941
0942
0943 set_wdog(slot, 0);
0944 writeq(0xe4390080ull, host->base + MIO_EMM_STS_MASK(host));
0945 writeq(1, host->base + MIO_EMM_RCA(host));
0946 return 0;
0947 }
0948
0949 static int cvm_mmc_of_parse(struct device *dev, struct cvm_mmc_slot *slot)
0950 {
0951 u32 id, cmd_skew = 0, dat_skew = 0, bus_width = 0;
0952 struct device_node *node = dev->of_node;
0953 struct mmc_host *mmc = slot->mmc;
0954 u64 clock_period;
0955 int ret;
0956
0957 ret = of_property_read_u32(node, "reg", &id);
0958 if (ret) {
0959 dev_err(dev, "Missing or invalid reg property on %pOF\n", node);
0960 return ret;
0961 }
0962
0963 if (id >= CAVIUM_MAX_MMC || slot->host->slot[id]) {
0964 dev_err(dev, "Invalid reg property on %pOF\n", node);
0965 return -EINVAL;
0966 }
0967
0968 ret = mmc_regulator_get_supply(mmc);
0969 if (ret)
0970 return ret;
0971
0972
0973
0974
0975 if (IS_ERR(mmc->supply.vmmc))
0976 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
0977
0978
0979 ret = mmc_of_parse(mmc);
0980 if (ret)
0981 return ret;
0982
0983
0984 if (!(mmc->caps & (MMC_CAP_8_BIT_DATA | MMC_CAP_4_BIT_DATA))) {
0985 of_property_read_u32(node, "cavium,bus-max-width", &bus_width);
0986 if (bus_width == 8)
0987 mmc->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_4_BIT_DATA;
0988 else if (bus_width == 4)
0989 mmc->caps |= MMC_CAP_4_BIT_DATA;
0990 }
0991
0992
0993 if (!mmc->f_max)
0994 of_property_read_u32(node, "spi-max-frequency", &mmc->f_max);
0995 if (!mmc->f_max || mmc->f_max > 52000000)
0996 mmc->f_max = 52000000;
0997 mmc->f_min = 400000;
0998
0999
1000 clock_period = 1000000000000ull / slot->host->sys_freq;
1001 of_property_read_u32(node, "cavium,cmd-clk-skew", &cmd_skew);
1002 of_property_read_u32(node, "cavium,dat-clk-skew", &dat_skew);
1003 slot->cmd_cnt = (cmd_skew + clock_period / 2) / clock_period;
1004 slot->dat_cnt = (dat_skew + clock_period / 2) / clock_period;
1005
1006 return id;
1007 }
1008
1009 int cvm_mmc_of_slot_probe(struct device *dev, struct cvm_mmc_host *host)
1010 {
1011 struct cvm_mmc_slot *slot;
1012 struct mmc_host *mmc;
1013 int ret, id;
1014
1015 mmc = mmc_alloc_host(sizeof(struct cvm_mmc_slot), dev);
1016 if (!mmc)
1017 return -ENOMEM;
1018
1019 slot = mmc_priv(mmc);
1020 slot->mmc = mmc;
1021 slot->host = host;
1022
1023 ret = cvm_mmc_of_parse(dev, slot);
1024 if (ret < 0)
1025 goto error;
1026 id = ret;
1027
1028
1029 mmc->ops = &cvm_mmc_ops;
1030
1031
1032
1033
1034
1035
1036
1037
1038 mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED |
1039 MMC_CAP_CMD23 | MMC_CAP_POWER_OFF_CARD | MMC_CAP_3_3V_DDR;
1040
1041 if (host->use_sg)
1042 mmc->max_segs = 16;
1043 else
1044 mmc->max_segs = 1;
1045
1046
1047 mmc->max_seg_size = min_t(unsigned int, 8 * 1024 * 1024,
1048 dma_get_max_seg_size(host->dev));
1049 mmc->max_req_size = mmc->max_seg_size;
1050
1051 mmc->max_blk_size = 512;
1052
1053 mmc->max_blk_count = 32767;
1054
1055 slot->clock = mmc->f_min;
1056 slot->bus_id = id;
1057 slot->cached_rca = 1;
1058
1059 host->acquire_bus(host);
1060 host->slot[id] = slot;
1061 cvm_mmc_switch_to(slot);
1062 cvm_mmc_init_lowlevel(slot);
1063 host->release_bus(host);
1064
1065 ret = mmc_add_host(mmc);
1066 if (ret) {
1067 dev_err(dev, "mmc_add_host() returned %d\n", ret);
1068 slot->host->slot[id] = NULL;
1069 goto error;
1070 }
1071 return 0;
1072
1073 error:
1074 mmc_free_host(slot->mmc);
1075 return ret;
1076 }
1077
1078 int cvm_mmc_of_slot_remove(struct cvm_mmc_slot *slot)
1079 {
1080 mmc_remove_host(slot->mmc);
1081 slot->host->slot[slot->bus_id] = NULL;
1082 mmc_free_host(slot->mmc);
1083 return 0;
1084 }