0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012 #include <linux/bitfield.h>
0013 #include <linux/delay.h>
0014 #include <linux/dmaengine.h>
0015 #include <linux/ktime.h>
0016 #include <linux/highmem.h>
0017 #include <linux/io.h>
0018 #include <linux/module.h>
0019 #include <linux/dma-mapping.h>
0020 #include <linux/slab.h>
0021 #include <linux/scatterlist.h>
0022 #include <linux/sizes.h>
0023 #include <linux/regulator/consumer.h>
0024 #include <linux/pm_runtime.h>
0025 #include <linux/of.h>
0026
0027 #include <linux/leds.h>
0028
0029 #include <linux/mmc/mmc.h>
0030 #include <linux/mmc/host.h>
0031 #include <linux/mmc/card.h>
0032 #include <linux/mmc/sdio.h>
0033 #include <linux/mmc/slot-gpio.h>
0034
0035 #include "sdhci.h"
0036
0037 #define DRIVER_NAME "sdhci"
0038
0039 #define DBG(f, x...) \
0040 pr_debug("%s: " DRIVER_NAME ": " f, mmc_hostname(host->mmc), ## x)
0041
0042 #define SDHCI_DUMP(f, x...) \
0043 pr_err("%s: " DRIVER_NAME ": " f, mmc_hostname(host->mmc), ## x)
0044
0045 #define MAX_TUNING_LOOP 40
0046
0047 static unsigned int debug_quirks = 0;
0048 static unsigned int debug_quirks2;
0049
0050 static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable);
0051
0052 static bool sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd);
0053
0054 void sdhci_dumpregs(struct sdhci_host *host)
0055 {
0056 SDHCI_DUMP("============ SDHCI REGISTER DUMP ===========\n");
0057
0058 SDHCI_DUMP("Sys addr: 0x%08x | Version: 0x%08x\n",
0059 sdhci_readl(host, SDHCI_DMA_ADDRESS),
0060 sdhci_readw(host, SDHCI_HOST_VERSION));
0061 SDHCI_DUMP("Blk size: 0x%08x | Blk cnt: 0x%08x\n",
0062 sdhci_readw(host, SDHCI_BLOCK_SIZE),
0063 sdhci_readw(host, SDHCI_BLOCK_COUNT));
0064 SDHCI_DUMP("Argument: 0x%08x | Trn mode: 0x%08x\n",
0065 sdhci_readl(host, SDHCI_ARGUMENT),
0066 sdhci_readw(host, SDHCI_TRANSFER_MODE));
0067 SDHCI_DUMP("Present: 0x%08x | Host ctl: 0x%08x\n",
0068 sdhci_readl(host, SDHCI_PRESENT_STATE),
0069 sdhci_readb(host, SDHCI_HOST_CONTROL));
0070 SDHCI_DUMP("Power: 0x%08x | Blk gap: 0x%08x\n",
0071 sdhci_readb(host, SDHCI_POWER_CONTROL),
0072 sdhci_readb(host, SDHCI_BLOCK_GAP_CONTROL));
0073 SDHCI_DUMP("Wake-up: 0x%08x | Clock: 0x%08x\n",
0074 sdhci_readb(host, SDHCI_WAKE_UP_CONTROL),
0075 sdhci_readw(host, SDHCI_CLOCK_CONTROL));
0076 SDHCI_DUMP("Timeout: 0x%08x | Int stat: 0x%08x\n",
0077 sdhci_readb(host, SDHCI_TIMEOUT_CONTROL),
0078 sdhci_readl(host, SDHCI_INT_STATUS));
0079 SDHCI_DUMP("Int enab: 0x%08x | Sig enab: 0x%08x\n",
0080 sdhci_readl(host, SDHCI_INT_ENABLE),
0081 sdhci_readl(host, SDHCI_SIGNAL_ENABLE));
0082 SDHCI_DUMP("ACmd stat: 0x%08x | Slot int: 0x%08x\n",
0083 sdhci_readw(host, SDHCI_AUTO_CMD_STATUS),
0084 sdhci_readw(host, SDHCI_SLOT_INT_STATUS));
0085 SDHCI_DUMP("Caps: 0x%08x | Caps_1: 0x%08x\n",
0086 sdhci_readl(host, SDHCI_CAPABILITIES),
0087 sdhci_readl(host, SDHCI_CAPABILITIES_1));
0088 SDHCI_DUMP("Cmd: 0x%08x | Max curr: 0x%08x\n",
0089 sdhci_readw(host, SDHCI_COMMAND),
0090 sdhci_readl(host, SDHCI_MAX_CURRENT));
0091 SDHCI_DUMP("Resp[0]: 0x%08x | Resp[1]: 0x%08x\n",
0092 sdhci_readl(host, SDHCI_RESPONSE),
0093 sdhci_readl(host, SDHCI_RESPONSE + 4));
0094 SDHCI_DUMP("Resp[2]: 0x%08x | Resp[3]: 0x%08x\n",
0095 sdhci_readl(host, SDHCI_RESPONSE + 8),
0096 sdhci_readl(host, SDHCI_RESPONSE + 12));
0097 SDHCI_DUMP("Host ctl2: 0x%08x\n",
0098 sdhci_readw(host, SDHCI_HOST_CONTROL2));
0099
0100 if (host->flags & SDHCI_USE_ADMA) {
0101 if (host->flags & SDHCI_USE_64_BIT_DMA) {
0102 SDHCI_DUMP("ADMA Err: 0x%08x | ADMA Ptr: 0x%08x%08x\n",
0103 sdhci_readl(host, SDHCI_ADMA_ERROR),
0104 sdhci_readl(host, SDHCI_ADMA_ADDRESS_HI),
0105 sdhci_readl(host, SDHCI_ADMA_ADDRESS));
0106 } else {
0107 SDHCI_DUMP("ADMA Err: 0x%08x | ADMA Ptr: 0x%08x\n",
0108 sdhci_readl(host, SDHCI_ADMA_ERROR),
0109 sdhci_readl(host, SDHCI_ADMA_ADDRESS));
0110 }
0111 }
0112
0113 if (host->ops->dump_vendor_regs)
0114 host->ops->dump_vendor_regs(host);
0115
0116 SDHCI_DUMP("============================================\n");
0117 }
0118 EXPORT_SYMBOL_GPL(sdhci_dumpregs);
0119
0120
0121
0122
0123
0124
0125
0126 static void sdhci_do_enable_v4_mode(struct sdhci_host *host)
0127 {
0128 u16 ctrl2;
0129
0130 ctrl2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
0131 if (ctrl2 & SDHCI_CTRL_V4_MODE)
0132 return;
0133
0134 ctrl2 |= SDHCI_CTRL_V4_MODE;
0135 sdhci_writew(host, ctrl2, SDHCI_HOST_CONTROL2);
0136 }
0137
0138
0139
0140
0141
0142 void sdhci_enable_v4_mode(struct sdhci_host *host)
0143 {
0144 host->v4_mode = true;
0145 sdhci_do_enable_v4_mode(host);
0146 }
0147 EXPORT_SYMBOL_GPL(sdhci_enable_v4_mode);
0148
0149 static inline bool sdhci_data_line_cmd(struct mmc_command *cmd)
0150 {
0151 return cmd->data || cmd->flags & MMC_RSP_BUSY;
0152 }
0153
0154 static void sdhci_set_card_detection(struct sdhci_host *host, bool enable)
0155 {
0156 u32 present;
0157
0158 if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) ||
0159 !mmc_card_is_removable(host->mmc) || mmc_can_gpio_cd(host->mmc))
0160 return;
0161
0162 if (enable) {
0163 present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
0164 SDHCI_CARD_PRESENT;
0165
0166 host->ier |= present ? SDHCI_INT_CARD_REMOVE :
0167 SDHCI_INT_CARD_INSERT;
0168 } else {
0169 host->ier &= ~(SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT);
0170 }
0171
0172 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
0173 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
0174 }
0175
0176 static void sdhci_enable_card_detection(struct sdhci_host *host)
0177 {
0178 sdhci_set_card_detection(host, true);
0179 }
0180
0181 static void sdhci_disable_card_detection(struct sdhci_host *host)
0182 {
0183 sdhci_set_card_detection(host, false);
0184 }
0185
0186 static void sdhci_runtime_pm_bus_on(struct sdhci_host *host)
0187 {
0188 if (host->bus_on)
0189 return;
0190 host->bus_on = true;
0191 pm_runtime_get_noresume(mmc_dev(host->mmc));
0192 }
0193
0194 static void sdhci_runtime_pm_bus_off(struct sdhci_host *host)
0195 {
0196 if (!host->bus_on)
0197 return;
0198 host->bus_on = false;
0199 pm_runtime_put_noidle(mmc_dev(host->mmc));
0200 }
0201
0202 void sdhci_reset(struct sdhci_host *host, u8 mask)
0203 {
0204 ktime_t timeout;
0205
0206 sdhci_writeb(host, mask, SDHCI_SOFTWARE_RESET);
0207
0208 if (mask & SDHCI_RESET_ALL) {
0209 host->clock = 0;
0210
0211 if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
0212 sdhci_runtime_pm_bus_off(host);
0213 }
0214
0215
0216 timeout = ktime_add_ms(ktime_get(), 100);
0217
0218
0219 while (1) {
0220 bool timedout = ktime_after(ktime_get(), timeout);
0221
0222 if (!(sdhci_readb(host, SDHCI_SOFTWARE_RESET) & mask))
0223 break;
0224 if (timedout) {
0225 pr_err("%s: Reset 0x%x never completed.\n",
0226 mmc_hostname(host->mmc), (int)mask);
0227 sdhci_err_stats_inc(host, CTRL_TIMEOUT);
0228 sdhci_dumpregs(host);
0229 return;
0230 }
0231 udelay(10);
0232 }
0233 }
0234 EXPORT_SYMBOL_GPL(sdhci_reset);
0235
0236 static void sdhci_do_reset(struct sdhci_host *host, u8 mask)
0237 {
0238 if (host->quirks & SDHCI_QUIRK_NO_CARD_NO_RESET) {
0239 struct mmc_host *mmc = host->mmc;
0240
0241 if (!mmc->ops->get_cd(mmc))
0242 return;
0243 }
0244
0245 host->ops->reset(host, mask);
0246
0247 if (mask & SDHCI_RESET_ALL) {
0248 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
0249 if (host->ops->enable_dma)
0250 host->ops->enable_dma(host);
0251 }
0252
0253
0254 host->preset_enabled = false;
0255 }
0256 }
0257
0258 static void sdhci_set_default_irqs(struct sdhci_host *host)
0259 {
0260 host->ier = SDHCI_INT_BUS_POWER | SDHCI_INT_DATA_END_BIT |
0261 SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_TIMEOUT |
0262 SDHCI_INT_INDEX | SDHCI_INT_END_BIT | SDHCI_INT_CRC |
0263 SDHCI_INT_TIMEOUT | SDHCI_INT_DATA_END |
0264 SDHCI_INT_RESPONSE;
0265
0266 if (host->tuning_mode == SDHCI_TUNING_MODE_2 ||
0267 host->tuning_mode == SDHCI_TUNING_MODE_3)
0268 host->ier |= SDHCI_INT_RETUNE;
0269
0270 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
0271 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
0272 }
0273
0274 static void sdhci_config_dma(struct sdhci_host *host)
0275 {
0276 u8 ctrl;
0277 u16 ctrl2;
0278
0279 if (host->version < SDHCI_SPEC_200)
0280 return;
0281
0282 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
0283
0284
0285
0286
0287
0288
0289 ctrl &= ~SDHCI_CTRL_DMA_MASK;
0290 if (!(host->flags & SDHCI_REQ_USE_DMA))
0291 goto out;
0292
0293
0294 if (host->flags & SDHCI_USE_ADMA)
0295 ctrl |= SDHCI_CTRL_ADMA32;
0296
0297 if (host->flags & SDHCI_USE_64_BIT_DMA) {
0298
0299
0300
0301
0302
0303 if (host->v4_mode) {
0304 ctrl2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
0305 ctrl2 |= SDHCI_CTRL_64BIT_ADDR;
0306 sdhci_writew(host, ctrl2, SDHCI_HOST_CONTROL2);
0307 } else if (host->flags & SDHCI_USE_ADMA) {
0308
0309
0310
0311
0312 ctrl |= SDHCI_CTRL_ADMA64;
0313 }
0314 }
0315
0316 out:
0317 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
0318 }
0319
0320 static void sdhci_init(struct sdhci_host *host, int soft)
0321 {
0322 struct mmc_host *mmc = host->mmc;
0323 unsigned long flags;
0324
0325 if (soft)
0326 sdhci_do_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
0327 else
0328 sdhci_do_reset(host, SDHCI_RESET_ALL);
0329
0330 if (host->v4_mode)
0331 sdhci_do_enable_v4_mode(host);
0332
0333 spin_lock_irqsave(&host->lock, flags);
0334 sdhci_set_default_irqs(host);
0335 spin_unlock_irqrestore(&host->lock, flags);
0336
0337 host->cqe_on = false;
0338
0339 if (soft) {
0340
0341 host->clock = 0;
0342 mmc->ops->set_ios(mmc, &mmc->ios);
0343 }
0344 }
0345
0346 static void sdhci_reinit(struct sdhci_host *host)
0347 {
0348 u32 cd = host->ier & (SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT);
0349
0350 sdhci_init(host, 0);
0351 sdhci_enable_card_detection(host);
0352
0353
0354
0355
0356
0357
0358
0359 if (cd != (host->ier & (SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT)))
0360 mmc_detect_change(host->mmc, msecs_to_jiffies(200));
0361 }
0362
0363 static void __sdhci_led_activate(struct sdhci_host *host)
0364 {
0365 u8 ctrl;
0366
0367 if (host->quirks & SDHCI_QUIRK_NO_LED)
0368 return;
0369
0370 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
0371 ctrl |= SDHCI_CTRL_LED;
0372 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
0373 }
0374
0375 static void __sdhci_led_deactivate(struct sdhci_host *host)
0376 {
0377 u8 ctrl;
0378
0379 if (host->quirks & SDHCI_QUIRK_NO_LED)
0380 return;
0381
0382 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
0383 ctrl &= ~SDHCI_CTRL_LED;
0384 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
0385 }
0386
0387 #if IS_REACHABLE(CONFIG_LEDS_CLASS)
0388 static void sdhci_led_control(struct led_classdev *led,
0389 enum led_brightness brightness)
0390 {
0391 struct sdhci_host *host = container_of(led, struct sdhci_host, led);
0392 unsigned long flags;
0393
0394 spin_lock_irqsave(&host->lock, flags);
0395
0396 if (host->runtime_suspended)
0397 goto out;
0398
0399 if (brightness == LED_OFF)
0400 __sdhci_led_deactivate(host);
0401 else
0402 __sdhci_led_activate(host);
0403 out:
0404 spin_unlock_irqrestore(&host->lock, flags);
0405 }
0406
0407 static int sdhci_led_register(struct sdhci_host *host)
0408 {
0409 struct mmc_host *mmc = host->mmc;
0410
0411 if (host->quirks & SDHCI_QUIRK_NO_LED)
0412 return 0;
0413
0414 snprintf(host->led_name, sizeof(host->led_name),
0415 "%s::", mmc_hostname(mmc));
0416
0417 host->led.name = host->led_name;
0418 host->led.brightness = LED_OFF;
0419 host->led.default_trigger = mmc_hostname(mmc);
0420 host->led.brightness_set = sdhci_led_control;
0421
0422 return led_classdev_register(mmc_dev(mmc), &host->led);
0423 }
0424
0425 static void sdhci_led_unregister(struct sdhci_host *host)
0426 {
0427 if (host->quirks & SDHCI_QUIRK_NO_LED)
0428 return;
0429
0430 led_classdev_unregister(&host->led);
0431 }
0432
0433 static inline void sdhci_led_activate(struct sdhci_host *host)
0434 {
0435 }
0436
0437 static inline void sdhci_led_deactivate(struct sdhci_host *host)
0438 {
0439 }
0440
0441 #else
0442
0443 static inline int sdhci_led_register(struct sdhci_host *host)
0444 {
0445 return 0;
0446 }
0447
0448 static inline void sdhci_led_unregister(struct sdhci_host *host)
0449 {
0450 }
0451
0452 static inline void sdhci_led_activate(struct sdhci_host *host)
0453 {
0454 __sdhci_led_activate(host);
0455 }
0456
0457 static inline void sdhci_led_deactivate(struct sdhci_host *host)
0458 {
0459 __sdhci_led_deactivate(host);
0460 }
0461
0462 #endif
0463
0464 static void sdhci_mod_timer(struct sdhci_host *host, struct mmc_request *mrq,
0465 unsigned long timeout)
0466 {
0467 if (sdhci_data_line_cmd(mrq->cmd))
0468 mod_timer(&host->data_timer, timeout);
0469 else
0470 mod_timer(&host->timer, timeout);
0471 }
0472
0473 static void sdhci_del_timer(struct sdhci_host *host, struct mmc_request *mrq)
0474 {
0475 if (sdhci_data_line_cmd(mrq->cmd))
0476 del_timer(&host->data_timer);
0477 else
0478 del_timer(&host->timer);
0479 }
0480
0481 static inline bool sdhci_has_requests(struct sdhci_host *host)
0482 {
0483 return host->cmd || host->data_cmd;
0484 }
0485
0486
0487
0488
0489
0490
0491
0492 static void sdhci_read_block_pio(struct sdhci_host *host)
0493 {
0494 unsigned long flags;
0495 size_t blksize, len, chunk;
0496 u32 scratch;
0497 u8 *buf;
0498
0499 DBG("PIO reading\n");
0500
0501 blksize = host->data->blksz;
0502 chunk = 0;
0503
0504 local_irq_save(flags);
0505
0506 while (blksize) {
0507 BUG_ON(!sg_miter_next(&host->sg_miter));
0508
0509 len = min(host->sg_miter.length, blksize);
0510
0511 blksize -= len;
0512 host->sg_miter.consumed = len;
0513
0514 buf = host->sg_miter.addr;
0515
0516 while (len) {
0517 if (chunk == 0) {
0518 scratch = sdhci_readl(host, SDHCI_BUFFER);
0519 chunk = 4;
0520 }
0521
0522 *buf = scratch & 0xFF;
0523
0524 buf++;
0525 scratch >>= 8;
0526 chunk--;
0527 len--;
0528 }
0529 }
0530
0531 sg_miter_stop(&host->sg_miter);
0532
0533 local_irq_restore(flags);
0534 }
0535
0536 static void sdhci_write_block_pio(struct sdhci_host *host)
0537 {
0538 unsigned long flags;
0539 size_t blksize, len, chunk;
0540 u32 scratch;
0541 u8 *buf;
0542
0543 DBG("PIO writing\n");
0544
0545 blksize = host->data->blksz;
0546 chunk = 0;
0547 scratch = 0;
0548
0549 local_irq_save(flags);
0550
0551 while (blksize) {
0552 BUG_ON(!sg_miter_next(&host->sg_miter));
0553
0554 len = min(host->sg_miter.length, blksize);
0555
0556 blksize -= len;
0557 host->sg_miter.consumed = len;
0558
0559 buf = host->sg_miter.addr;
0560
0561 while (len) {
0562 scratch |= (u32)*buf << (chunk * 8);
0563
0564 buf++;
0565 chunk++;
0566 len--;
0567
0568 if ((chunk == 4) || ((len == 0) && (blksize == 0))) {
0569 sdhci_writel(host, scratch, SDHCI_BUFFER);
0570 chunk = 0;
0571 scratch = 0;
0572 }
0573 }
0574 }
0575
0576 sg_miter_stop(&host->sg_miter);
0577
0578 local_irq_restore(flags);
0579 }
0580
0581 static void sdhci_transfer_pio(struct sdhci_host *host)
0582 {
0583 u32 mask;
0584
0585 if (host->blocks == 0)
0586 return;
0587
0588 if (host->data->flags & MMC_DATA_READ)
0589 mask = SDHCI_DATA_AVAILABLE;
0590 else
0591 mask = SDHCI_SPACE_AVAILABLE;
0592
0593
0594
0595
0596
0597
0598 if ((host->quirks & SDHCI_QUIRK_BROKEN_SMALL_PIO) &&
0599 (host->data->blocks == 1))
0600 mask = ~0;
0601
0602 while (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) {
0603 if (host->quirks & SDHCI_QUIRK_PIO_NEEDS_DELAY)
0604 udelay(100);
0605
0606 if (host->data->flags & MMC_DATA_READ)
0607 sdhci_read_block_pio(host);
0608 else
0609 sdhci_write_block_pio(host);
0610
0611 host->blocks--;
0612 if (host->blocks == 0)
0613 break;
0614 }
0615
0616 DBG("PIO transfer complete.\n");
0617 }
0618
0619 static int sdhci_pre_dma_transfer(struct sdhci_host *host,
0620 struct mmc_data *data, int cookie)
0621 {
0622 int sg_count;
0623
0624
0625
0626
0627
0628 if (data->host_cookie == COOKIE_PRE_MAPPED)
0629 return data->sg_count;
0630
0631
0632 if (host->bounce_buffer) {
0633 unsigned int length = data->blksz * data->blocks;
0634
0635 if (length > host->bounce_buffer_size) {
0636 pr_err("%s: asked for transfer of %u bytes exceeds bounce buffer %u bytes\n",
0637 mmc_hostname(host->mmc), length,
0638 host->bounce_buffer_size);
0639 return -EIO;
0640 }
0641 if (mmc_get_dma_dir(data) == DMA_TO_DEVICE) {
0642
0643 if (host->ops->copy_to_bounce_buffer) {
0644 host->ops->copy_to_bounce_buffer(host,
0645 data, length);
0646 } else {
0647 sg_copy_to_buffer(data->sg, data->sg_len,
0648 host->bounce_buffer, length);
0649 }
0650 }
0651
0652 dma_sync_single_for_device(mmc_dev(host->mmc),
0653 host->bounce_addr,
0654 host->bounce_buffer_size,
0655 mmc_get_dma_dir(data));
0656
0657 sg_count = 1;
0658 } else {
0659
0660 sg_count = dma_map_sg(mmc_dev(host->mmc),
0661 data->sg, data->sg_len,
0662 mmc_get_dma_dir(data));
0663 }
0664
0665 if (sg_count == 0)
0666 return -ENOSPC;
0667
0668 data->sg_count = sg_count;
0669 data->host_cookie = cookie;
0670
0671 return sg_count;
0672 }
0673
0674 static char *sdhci_kmap_atomic(struct scatterlist *sg, unsigned long *flags)
0675 {
0676 local_irq_save(*flags);
0677 return kmap_atomic(sg_page(sg)) + sg->offset;
0678 }
0679
0680 static void sdhci_kunmap_atomic(void *buffer, unsigned long *flags)
0681 {
0682 kunmap_atomic(buffer);
0683 local_irq_restore(*flags);
0684 }
0685
0686 void sdhci_adma_write_desc(struct sdhci_host *host, void **desc,
0687 dma_addr_t addr, int len, unsigned int cmd)
0688 {
0689 struct sdhci_adma2_64_desc *dma_desc = *desc;
0690
0691
0692 dma_desc->cmd = cpu_to_le16(cmd);
0693 dma_desc->len = cpu_to_le16(len);
0694 dma_desc->addr_lo = cpu_to_le32(lower_32_bits(addr));
0695
0696 if (host->flags & SDHCI_USE_64_BIT_DMA)
0697 dma_desc->addr_hi = cpu_to_le32(upper_32_bits(addr));
0698
0699 *desc += host->desc_sz;
0700 }
0701 EXPORT_SYMBOL_GPL(sdhci_adma_write_desc);
0702
0703 static inline void __sdhci_adma_write_desc(struct sdhci_host *host,
0704 void **desc, dma_addr_t addr,
0705 int len, unsigned int cmd)
0706 {
0707 if (host->ops->adma_write_desc)
0708 host->ops->adma_write_desc(host, desc, addr, len, cmd);
0709 else
0710 sdhci_adma_write_desc(host, desc, addr, len, cmd);
0711 }
0712
0713 static void sdhci_adma_mark_end(void *desc)
0714 {
0715 struct sdhci_adma2_64_desc *dma_desc = desc;
0716
0717
0718 dma_desc->cmd |= cpu_to_le16(ADMA2_END);
0719 }
0720
0721 static void sdhci_adma_table_pre(struct sdhci_host *host,
0722 struct mmc_data *data, int sg_count)
0723 {
0724 struct scatterlist *sg;
0725 unsigned long flags;
0726 dma_addr_t addr, align_addr;
0727 void *desc, *align;
0728 char *buffer;
0729 int len, offset, i;
0730
0731
0732
0733
0734
0735
0736 host->sg_count = sg_count;
0737
0738 desc = host->adma_table;
0739 align = host->align_buffer;
0740
0741 align_addr = host->align_addr;
0742
0743 for_each_sg(data->sg, sg, host->sg_count, i) {
0744 addr = sg_dma_address(sg);
0745 len = sg_dma_len(sg);
0746
0747
0748
0749
0750
0751
0752
0753 offset = (SDHCI_ADMA2_ALIGN - (addr & SDHCI_ADMA2_MASK)) &
0754 SDHCI_ADMA2_MASK;
0755 if (offset) {
0756 if (data->flags & MMC_DATA_WRITE) {
0757 buffer = sdhci_kmap_atomic(sg, &flags);
0758 memcpy(align, buffer, offset);
0759 sdhci_kunmap_atomic(buffer, &flags);
0760 }
0761
0762
0763 __sdhci_adma_write_desc(host, &desc, align_addr,
0764 offset, ADMA2_TRAN_VALID);
0765
0766 BUG_ON(offset > 65536);
0767
0768 align += SDHCI_ADMA2_ALIGN;
0769 align_addr += SDHCI_ADMA2_ALIGN;
0770
0771 addr += offset;
0772 len -= offset;
0773 }
0774
0775
0776
0777
0778
0779
0780
0781 while (len > host->max_adma) {
0782 int n = 32 * 1024;
0783
0784 __sdhci_adma_write_desc(host, &desc, addr, n, ADMA2_TRAN_VALID);
0785 addr += n;
0786 len -= n;
0787 }
0788
0789
0790 if (len)
0791 __sdhci_adma_write_desc(host, &desc, addr, len,
0792 ADMA2_TRAN_VALID);
0793
0794
0795
0796
0797
0798 WARN_ON((desc - host->adma_table) >= host->adma_table_sz);
0799 }
0800
0801 if (host->quirks & SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC) {
0802
0803 if (desc != host->adma_table) {
0804 desc -= host->desc_sz;
0805 sdhci_adma_mark_end(desc);
0806 }
0807 } else {
0808
0809 __sdhci_adma_write_desc(host, &desc, 0, 0, ADMA2_NOP_END_VALID);
0810 }
0811 }
0812
0813 static void sdhci_adma_table_post(struct sdhci_host *host,
0814 struct mmc_data *data)
0815 {
0816 struct scatterlist *sg;
0817 int i, size;
0818 void *align;
0819 char *buffer;
0820 unsigned long flags;
0821
0822 if (data->flags & MMC_DATA_READ) {
0823 bool has_unaligned = false;
0824
0825
0826 for_each_sg(data->sg, sg, host->sg_count, i)
0827 if (sg_dma_address(sg) & SDHCI_ADMA2_MASK) {
0828 has_unaligned = true;
0829 break;
0830 }
0831
0832 if (has_unaligned) {
0833 dma_sync_sg_for_cpu(mmc_dev(host->mmc), data->sg,
0834 data->sg_len, DMA_FROM_DEVICE);
0835
0836 align = host->align_buffer;
0837
0838 for_each_sg(data->sg, sg, host->sg_count, i) {
0839 if (sg_dma_address(sg) & SDHCI_ADMA2_MASK) {
0840 size = SDHCI_ADMA2_ALIGN -
0841 (sg_dma_address(sg) & SDHCI_ADMA2_MASK);
0842
0843 buffer = sdhci_kmap_atomic(sg, &flags);
0844 memcpy(buffer, align, size);
0845 sdhci_kunmap_atomic(buffer, &flags);
0846
0847 align += SDHCI_ADMA2_ALIGN;
0848 }
0849 }
0850 }
0851 }
0852 }
0853
0854 static void sdhci_set_adma_addr(struct sdhci_host *host, dma_addr_t addr)
0855 {
0856 sdhci_writel(host, lower_32_bits(addr), SDHCI_ADMA_ADDRESS);
0857 if (host->flags & SDHCI_USE_64_BIT_DMA)
0858 sdhci_writel(host, upper_32_bits(addr), SDHCI_ADMA_ADDRESS_HI);
0859 }
0860
0861 static dma_addr_t sdhci_sdma_address(struct sdhci_host *host)
0862 {
0863 if (host->bounce_buffer)
0864 return host->bounce_addr;
0865 else
0866 return sg_dma_address(host->data->sg);
0867 }
0868
0869 static void sdhci_set_sdma_addr(struct sdhci_host *host, dma_addr_t addr)
0870 {
0871 if (host->v4_mode)
0872 sdhci_set_adma_addr(host, addr);
0873 else
0874 sdhci_writel(host, addr, SDHCI_DMA_ADDRESS);
0875 }
0876
0877 static unsigned int sdhci_target_timeout(struct sdhci_host *host,
0878 struct mmc_command *cmd,
0879 struct mmc_data *data)
0880 {
0881 unsigned int target_timeout;
0882
0883
0884 if (!data) {
0885 target_timeout = cmd->busy_timeout * 1000;
0886 } else {
0887 target_timeout = DIV_ROUND_UP(data->timeout_ns, 1000);
0888 if (host->clock && data->timeout_clks) {
0889 unsigned long long val;
0890
0891
0892
0893
0894
0895
0896 val = 1000000ULL * data->timeout_clks;
0897 if (do_div(val, host->clock))
0898 target_timeout++;
0899 target_timeout += val;
0900 }
0901 }
0902
0903 return target_timeout;
0904 }
0905
0906 static void sdhci_calc_sw_timeout(struct sdhci_host *host,
0907 struct mmc_command *cmd)
0908 {
0909 struct mmc_data *data = cmd->data;
0910 struct mmc_host *mmc = host->mmc;
0911 struct mmc_ios *ios = &mmc->ios;
0912 unsigned char bus_width = 1 << ios->bus_width;
0913 unsigned int blksz;
0914 unsigned int freq;
0915 u64 target_timeout;
0916 u64 transfer_time;
0917
0918 target_timeout = sdhci_target_timeout(host, cmd, data);
0919 target_timeout *= NSEC_PER_USEC;
0920
0921 if (data) {
0922 blksz = data->blksz;
0923 freq = mmc->actual_clock ? : host->clock;
0924 transfer_time = (u64)blksz * NSEC_PER_SEC * (8 / bus_width);
0925 do_div(transfer_time, freq);
0926
0927 transfer_time = transfer_time * 2;
0928
0929 host->data_timeout = data->blocks * target_timeout +
0930 transfer_time;
0931 } else {
0932 host->data_timeout = target_timeout;
0933 }
0934
0935 if (host->data_timeout)
0936 host->data_timeout += MMC_CMD_TRANSFER_TIME;
0937 }
0938
0939 static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd,
0940 bool *too_big)
0941 {
0942 u8 count;
0943 struct mmc_data *data;
0944 unsigned target_timeout, current_timeout;
0945
0946 *too_big = false;
0947
0948
0949
0950
0951
0952
0953
0954 if (host->quirks & SDHCI_QUIRK_BROKEN_TIMEOUT_VAL)
0955 return host->max_timeout_count;
0956
0957
0958 if (cmd == NULL)
0959 return host->max_timeout_count;
0960
0961 data = cmd->data;
0962
0963 if (!data && !cmd->busy_timeout)
0964 return host->max_timeout_count;
0965
0966
0967 target_timeout = sdhci_target_timeout(host, cmd, data);
0968
0969
0970
0971
0972
0973
0974
0975
0976
0977
0978
0979 count = 0;
0980 current_timeout = (1 << 13) * 1000 / host->timeout_clk;
0981 while (current_timeout < target_timeout) {
0982 count++;
0983 current_timeout <<= 1;
0984 if (count > host->max_timeout_count) {
0985 if (!(host->quirks2 & SDHCI_QUIRK2_DISABLE_HW_TIMEOUT))
0986 DBG("Too large timeout 0x%x requested for CMD%d!\n",
0987 count, cmd->opcode);
0988 count = host->max_timeout_count;
0989 *too_big = true;
0990 break;
0991 }
0992 }
0993
0994 return count;
0995 }
0996
0997 static void sdhci_set_transfer_irqs(struct sdhci_host *host)
0998 {
0999 u32 pio_irqs = SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL;
1000 u32 dma_irqs = SDHCI_INT_DMA_END | SDHCI_INT_ADMA_ERROR;
1001
1002 if (host->flags & SDHCI_REQ_USE_DMA)
1003 host->ier = (host->ier & ~pio_irqs) | dma_irqs;
1004 else
1005 host->ier = (host->ier & ~dma_irqs) | pio_irqs;
1006
1007 if (host->flags & (SDHCI_AUTO_CMD23 | SDHCI_AUTO_CMD12))
1008 host->ier |= SDHCI_INT_AUTO_CMD_ERR;
1009 else
1010 host->ier &= ~SDHCI_INT_AUTO_CMD_ERR;
1011
1012 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
1013 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
1014 }
1015
1016 void sdhci_set_data_timeout_irq(struct sdhci_host *host, bool enable)
1017 {
1018 if (enable)
1019 host->ier |= SDHCI_INT_DATA_TIMEOUT;
1020 else
1021 host->ier &= ~SDHCI_INT_DATA_TIMEOUT;
1022 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
1023 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
1024 }
1025 EXPORT_SYMBOL_GPL(sdhci_set_data_timeout_irq);
1026
1027 void __sdhci_set_timeout(struct sdhci_host *host, struct mmc_command *cmd)
1028 {
1029 bool too_big = false;
1030 u8 count = sdhci_calc_timeout(host, cmd, &too_big);
1031
1032 if (too_big &&
1033 host->quirks2 & SDHCI_QUIRK2_DISABLE_HW_TIMEOUT) {
1034 sdhci_calc_sw_timeout(host, cmd);
1035 sdhci_set_data_timeout_irq(host, false);
1036 } else if (!(host->ier & SDHCI_INT_DATA_TIMEOUT)) {
1037 sdhci_set_data_timeout_irq(host, true);
1038 }
1039
1040 sdhci_writeb(host, count, SDHCI_TIMEOUT_CONTROL);
1041 }
1042 EXPORT_SYMBOL_GPL(__sdhci_set_timeout);
1043
1044 static void sdhci_set_timeout(struct sdhci_host *host, struct mmc_command *cmd)
1045 {
1046 if (host->ops->set_timeout)
1047 host->ops->set_timeout(host, cmd);
1048 else
1049 __sdhci_set_timeout(host, cmd);
1050 }
1051
1052 static void sdhci_initialize_data(struct sdhci_host *host,
1053 struct mmc_data *data)
1054 {
1055 WARN_ON(host->data);
1056
1057
1058 BUG_ON(data->blksz * data->blocks > 524288);
1059 BUG_ON(data->blksz > host->mmc->max_blk_size);
1060 BUG_ON(data->blocks > 65535);
1061
1062 host->data = data;
1063 host->data_early = 0;
1064 host->data->bytes_xfered = 0;
1065 }
1066
1067 static inline void sdhci_set_block_info(struct sdhci_host *host,
1068 struct mmc_data *data)
1069 {
1070
1071 sdhci_writew(host,
1072 SDHCI_MAKE_BLKSZ(host->sdma_boundary, data->blksz),
1073 SDHCI_BLOCK_SIZE);
1074
1075
1076
1077
1078 if (host->version >= SDHCI_SPEC_410 && host->v4_mode &&
1079 (host->quirks2 & SDHCI_QUIRK2_USE_32BIT_BLK_CNT)) {
1080 if (sdhci_readw(host, SDHCI_BLOCK_COUNT))
1081 sdhci_writew(host, 0, SDHCI_BLOCK_COUNT);
1082 sdhci_writew(host, data->blocks, SDHCI_32BIT_BLK_CNT);
1083 } else {
1084 sdhci_writew(host, data->blocks, SDHCI_BLOCK_COUNT);
1085 }
1086 }
1087
1088 static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd)
1089 {
1090 struct mmc_data *data = cmd->data;
1091
1092 sdhci_initialize_data(host, data);
1093
1094 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
1095 struct scatterlist *sg;
1096 unsigned int length_mask, offset_mask;
1097 int i;
1098
1099 host->flags |= SDHCI_REQ_USE_DMA;
1100
1101
1102
1103
1104
1105
1106
1107
1108 length_mask = 0;
1109 offset_mask = 0;
1110 if (host->flags & SDHCI_USE_ADMA) {
1111 if (host->quirks & SDHCI_QUIRK_32BIT_ADMA_SIZE) {
1112 length_mask = 3;
1113
1114
1115
1116
1117
1118 offset_mask = 3;
1119 }
1120 } else {
1121 if (host->quirks & SDHCI_QUIRK_32BIT_DMA_SIZE)
1122 length_mask = 3;
1123 if (host->quirks & SDHCI_QUIRK_32BIT_DMA_ADDR)
1124 offset_mask = 3;
1125 }
1126
1127 if (unlikely(length_mask | offset_mask)) {
1128 for_each_sg(data->sg, sg, data->sg_len, i) {
1129 if (sg->length & length_mask) {
1130 DBG("Reverting to PIO because of transfer size (%d)\n",
1131 sg->length);
1132 host->flags &= ~SDHCI_REQ_USE_DMA;
1133 break;
1134 }
1135 if (sg->offset & offset_mask) {
1136 DBG("Reverting to PIO because of bad alignment\n");
1137 host->flags &= ~SDHCI_REQ_USE_DMA;
1138 break;
1139 }
1140 }
1141 }
1142 }
1143
1144 if (host->flags & SDHCI_REQ_USE_DMA) {
1145 int sg_cnt = sdhci_pre_dma_transfer(host, data, COOKIE_MAPPED);
1146
1147 if (sg_cnt <= 0) {
1148
1149
1150
1151
1152 WARN_ON(1);
1153 host->flags &= ~SDHCI_REQ_USE_DMA;
1154 } else if (host->flags & SDHCI_USE_ADMA) {
1155 sdhci_adma_table_pre(host, data, sg_cnt);
1156 sdhci_set_adma_addr(host, host->adma_addr);
1157 } else {
1158 WARN_ON(sg_cnt != 1);
1159 sdhci_set_sdma_addr(host, sdhci_sdma_address(host));
1160 }
1161 }
1162
1163 sdhci_config_dma(host);
1164
1165 if (!(host->flags & SDHCI_REQ_USE_DMA)) {
1166 int flags;
1167
1168 flags = SG_MITER_ATOMIC;
1169 if (host->data->flags & MMC_DATA_READ)
1170 flags |= SG_MITER_TO_SG;
1171 else
1172 flags |= SG_MITER_FROM_SG;
1173 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
1174 host->blocks = data->blocks;
1175 }
1176
1177 sdhci_set_transfer_irqs(host);
1178
1179 sdhci_set_block_info(host, data);
1180 }
1181
1182 #if IS_ENABLED(CONFIG_MMC_SDHCI_EXTERNAL_DMA)
1183
1184 static int sdhci_external_dma_init(struct sdhci_host *host)
1185 {
1186 int ret = 0;
1187 struct mmc_host *mmc = host->mmc;
1188
1189 host->tx_chan = dma_request_chan(mmc_dev(mmc), "tx");
1190 if (IS_ERR(host->tx_chan)) {
1191 ret = PTR_ERR(host->tx_chan);
1192 if (ret != -EPROBE_DEFER)
1193 pr_warn("Failed to request TX DMA channel.\n");
1194 host->tx_chan = NULL;
1195 return ret;
1196 }
1197
1198 host->rx_chan = dma_request_chan(mmc_dev(mmc), "rx");
1199 if (IS_ERR(host->rx_chan)) {
1200 if (host->tx_chan) {
1201 dma_release_channel(host->tx_chan);
1202 host->tx_chan = NULL;
1203 }
1204
1205 ret = PTR_ERR(host->rx_chan);
1206 if (ret != -EPROBE_DEFER)
1207 pr_warn("Failed to request RX DMA channel.\n");
1208 host->rx_chan = NULL;
1209 }
1210
1211 return ret;
1212 }
1213
1214 static struct dma_chan *sdhci_external_dma_channel(struct sdhci_host *host,
1215 struct mmc_data *data)
1216 {
1217 return data->flags & MMC_DATA_WRITE ? host->tx_chan : host->rx_chan;
1218 }
1219
1220 static int sdhci_external_dma_setup(struct sdhci_host *host,
1221 struct mmc_command *cmd)
1222 {
1223 int ret, i;
1224 enum dma_transfer_direction dir;
1225 struct dma_async_tx_descriptor *desc;
1226 struct mmc_data *data = cmd->data;
1227 struct dma_chan *chan;
1228 struct dma_slave_config cfg;
1229 dma_cookie_t cookie;
1230 int sg_cnt;
1231
1232 if (!host->mapbase)
1233 return -EINVAL;
1234
1235 memset(&cfg, 0, sizeof(cfg));
1236 cfg.src_addr = host->mapbase + SDHCI_BUFFER;
1237 cfg.dst_addr = host->mapbase + SDHCI_BUFFER;
1238 cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
1239 cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
1240 cfg.src_maxburst = data->blksz / 4;
1241 cfg.dst_maxburst = data->blksz / 4;
1242
1243
1244 for (i = 0; i < data->sg_len; i++) {
1245 if ((data->sg + i)->length % data->blksz)
1246 return -EINVAL;
1247 }
1248
1249 chan = sdhci_external_dma_channel(host, data);
1250
1251 ret = dmaengine_slave_config(chan, &cfg);
1252 if (ret)
1253 return ret;
1254
1255 sg_cnt = sdhci_pre_dma_transfer(host, data, COOKIE_MAPPED);
1256 if (sg_cnt <= 0)
1257 return -EINVAL;
1258
1259 dir = data->flags & MMC_DATA_WRITE ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM;
1260 desc = dmaengine_prep_slave_sg(chan, data->sg, data->sg_len, dir,
1261 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1262 if (!desc)
1263 return -EINVAL;
1264
1265 desc->callback = NULL;
1266 desc->callback_param = NULL;
1267
1268 cookie = dmaengine_submit(desc);
1269 if (dma_submit_error(cookie))
1270 ret = cookie;
1271
1272 return ret;
1273 }
1274
1275 static void sdhci_external_dma_release(struct sdhci_host *host)
1276 {
1277 if (host->tx_chan) {
1278 dma_release_channel(host->tx_chan);
1279 host->tx_chan = NULL;
1280 }
1281
1282 if (host->rx_chan) {
1283 dma_release_channel(host->rx_chan);
1284 host->rx_chan = NULL;
1285 }
1286
1287 sdhci_switch_external_dma(host, false);
1288 }
1289
1290 static void __sdhci_external_dma_prepare_data(struct sdhci_host *host,
1291 struct mmc_command *cmd)
1292 {
1293 struct mmc_data *data = cmd->data;
1294
1295 sdhci_initialize_data(host, data);
1296
1297 host->flags |= SDHCI_REQ_USE_DMA;
1298 sdhci_set_transfer_irqs(host);
1299
1300 sdhci_set_block_info(host, data);
1301 }
1302
1303 static void sdhci_external_dma_prepare_data(struct sdhci_host *host,
1304 struct mmc_command *cmd)
1305 {
1306 if (!sdhci_external_dma_setup(host, cmd)) {
1307 __sdhci_external_dma_prepare_data(host, cmd);
1308 } else {
1309 sdhci_external_dma_release(host);
1310 pr_err("%s: Cannot use external DMA, switch to the DMA/PIO which standard SDHCI provides.\n",
1311 mmc_hostname(host->mmc));
1312 sdhci_prepare_data(host, cmd);
1313 }
1314 }
1315
1316 static void sdhci_external_dma_pre_transfer(struct sdhci_host *host,
1317 struct mmc_command *cmd)
1318 {
1319 struct dma_chan *chan;
1320
1321 if (!cmd->data)
1322 return;
1323
1324 chan = sdhci_external_dma_channel(host, cmd->data);
1325 if (chan)
1326 dma_async_issue_pending(chan);
1327 }
1328
1329 #else
1330
1331 static inline int sdhci_external_dma_init(struct sdhci_host *host)
1332 {
1333 return -EOPNOTSUPP;
1334 }
1335
1336 static inline void sdhci_external_dma_release(struct sdhci_host *host)
1337 {
1338 }
1339
1340 static inline void sdhci_external_dma_prepare_data(struct sdhci_host *host,
1341 struct mmc_command *cmd)
1342 {
1343
1344 WARN_ON_ONCE(1);
1345 }
1346
1347 static inline void sdhci_external_dma_pre_transfer(struct sdhci_host *host,
1348 struct mmc_command *cmd)
1349 {
1350 }
1351
1352 static inline struct dma_chan *sdhci_external_dma_channel(struct sdhci_host *host,
1353 struct mmc_data *data)
1354 {
1355 return NULL;
1356 }
1357
1358 #endif
1359
1360 void sdhci_switch_external_dma(struct sdhci_host *host, bool en)
1361 {
1362 host->use_external_dma = en;
1363 }
1364 EXPORT_SYMBOL_GPL(sdhci_switch_external_dma);
1365
1366 static inline bool sdhci_auto_cmd12(struct sdhci_host *host,
1367 struct mmc_request *mrq)
1368 {
1369 return !mrq->sbc && (host->flags & SDHCI_AUTO_CMD12) &&
1370 !mrq->cap_cmd_during_tfr;
1371 }
1372
1373 static inline bool sdhci_auto_cmd23(struct sdhci_host *host,
1374 struct mmc_request *mrq)
1375 {
1376 return mrq->sbc && (host->flags & SDHCI_AUTO_CMD23);
1377 }
1378
1379 static inline bool sdhci_manual_cmd23(struct sdhci_host *host,
1380 struct mmc_request *mrq)
1381 {
1382 return mrq->sbc && !(host->flags & SDHCI_AUTO_CMD23);
1383 }
1384
1385 static inline void sdhci_auto_cmd_select(struct sdhci_host *host,
1386 struct mmc_command *cmd,
1387 u16 *mode)
1388 {
1389 bool use_cmd12 = sdhci_auto_cmd12(host, cmd->mrq) &&
1390 (cmd->opcode != SD_IO_RW_EXTENDED);
1391 bool use_cmd23 = sdhci_auto_cmd23(host, cmd->mrq);
1392 u16 ctrl2;
1393
1394
1395
1396
1397
1398
1399
1400 if (host->version >= SDHCI_SPEC_410 && host->v4_mode &&
1401 (use_cmd12 || use_cmd23)) {
1402 *mode |= SDHCI_TRNS_AUTO_SEL;
1403
1404 ctrl2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1405 if (use_cmd23)
1406 ctrl2 |= SDHCI_CMD23_ENABLE;
1407 else
1408 ctrl2 &= ~SDHCI_CMD23_ENABLE;
1409 sdhci_writew(host, ctrl2, SDHCI_HOST_CONTROL2);
1410
1411 return;
1412 }
1413
1414
1415
1416
1417
1418 if (use_cmd12)
1419 *mode |= SDHCI_TRNS_AUTO_CMD12;
1420 else if (use_cmd23)
1421 *mode |= SDHCI_TRNS_AUTO_CMD23;
1422 }
1423
1424 static void sdhci_set_transfer_mode(struct sdhci_host *host,
1425 struct mmc_command *cmd)
1426 {
1427 u16 mode = 0;
1428 struct mmc_data *data = cmd->data;
1429
1430 if (data == NULL) {
1431 if (host->quirks2 &
1432 SDHCI_QUIRK2_CLEAR_TRANSFERMODE_REG_BEFORE_CMD) {
1433
1434 if (cmd->opcode != MMC_SEND_TUNING_BLOCK_HS200)
1435 sdhci_writew(host, 0x0, SDHCI_TRANSFER_MODE);
1436 } else {
1437
1438 mode = sdhci_readw(host, SDHCI_TRANSFER_MODE);
1439 sdhci_writew(host, mode & ~(SDHCI_TRNS_AUTO_CMD12 |
1440 SDHCI_TRNS_AUTO_CMD23), SDHCI_TRANSFER_MODE);
1441 }
1442 return;
1443 }
1444
1445 WARN_ON(!host->data);
1446
1447 if (!(host->quirks2 & SDHCI_QUIRK2_SUPPORT_SINGLE))
1448 mode = SDHCI_TRNS_BLK_CNT_EN;
1449
1450 if (mmc_op_multi(cmd->opcode) || data->blocks > 1) {
1451 mode = SDHCI_TRNS_BLK_CNT_EN | SDHCI_TRNS_MULTI;
1452 sdhci_auto_cmd_select(host, cmd, &mode);
1453 if (sdhci_auto_cmd23(host, cmd->mrq))
1454 sdhci_writel(host, cmd->mrq->sbc->arg, SDHCI_ARGUMENT2);
1455 }
1456
1457 if (data->flags & MMC_DATA_READ)
1458 mode |= SDHCI_TRNS_READ;
1459 if (host->flags & SDHCI_REQ_USE_DMA)
1460 mode |= SDHCI_TRNS_DMA;
1461
1462 sdhci_writew(host, mode, SDHCI_TRANSFER_MODE);
1463 }
1464
1465 static bool sdhci_needs_reset(struct sdhci_host *host, struct mmc_request *mrq)
1466 {
1467 return (!(host->flags & SDHCI_DEVICE_DEAD) &&
1468 ((mrq->cmd && mrq->cmd->error) ||
1469 (mrq->sbc && mrq->sbc->error) ||
1470 (mrq->data && mrq->data->stop && mrq->data->stop->error) ||
1471 (host->quirks & SDHCI_QUIRK_RESET_AFTER_REQUEST)));
1472 }
1473
1474 static void sdhci_set_mrq_done(struct sdhci_host *host, struct mmc_request *mrq)
1475 {
1476 int i;
1477
1478 for (i = 0; i < SDHCI_MAX_MRQS; i++) {
1479 if (host->mrqs_done[i] == mrq) {
1480 WARN_ON(1);
1481 return;
1482 }
1483 }
1484
1485 for (i = 0; i < SDHCI_MAX_MRQS; i++) {
1486 if (!host->mrqs_done[i]) {
1487 host->mrqs_done[i] = mrq;
1488 break;
1489 }
1490 }
1491
1492 WARN_ON(i >= SDHCI_MAX_MRQS);
1493 }
1494
1495 static void __sdhci_finish_mrq(struct sdhci_host *host, struct mmc_request *mrq)
1496 {
1497 if (host->cmd && host->cmd->mrq == mrq)
1498 host->cmd = NULL;
1499
1500 if (host->data_cmd && host->data_cmd->mrq == mrq)
1501 host->data_cmd = NULL;
1502
1503 if (host->deferred_cmd && host->deferred_cmd->mrq == mrq)
1504 host->deferred_cmd = NULL;
1505
1506 if (host->data && host->data->mrq == mrq)
1507 host->data = NULL;
1508
1509 if (sdhci_needs_reset(host, mrq))
1510 host->pending_reset = true;
1511
1512 sdhci_set_mrq_done(host, mrq);
1513
1514 sdhci_del_timer(host, mrq);
1515
1516 if (!sdhci_has_requests(host))
1517 sdhci_led_deactivate(host);
1518 }
1519
1520 static void sdhci_finish_mrq(struct sdhci_host *host, struct mmc_request *mrq)
1521 {
1522 __sdhci_finish_mrq(host, mrq);
1523
1524 queue_work(host->complete_wq, &host->complete_work);
1525 }
1526
1527 static void __sdhci_finish_data(struct sdhci_host *host, bool sw_data_timeout)
1528 {
1529 struct mmc_command *data_cmd = host->data_cmd;
1530 struct mmc_data *data = host->data;
1531
1532 host->data = NULL;
1533 host->data_cmd = NULL;
1534
1535
1536
1537
1538
1539 if (data->error) {
1540 if (!host->cmd || host->cmd == data_cmd)
1541 sdhci_do_reset(host, SDHCI_RESET_CMD);
1542 sdhci_do_reset(host, SDHCI_RESET_DATA);
1543 }
1544
1545 if ((host->flags & (SDHCI_REQ_USE_DMA | SDHCI_USE_ADMA)) ==
1546 (SDHCI_REQ_USE_DMA | SDHCI_USE_ADMA))
1547 sdhci_adma_table_post(host, data);
1548
1549
1550
1551
1552
1553
1554
1555
1556 if (data->error)
1557 data->bytes_xfered = 0;
1558 else
1559 data->bytes_xfered = data->blksz * data->blocks;
1560
1561
1562
1563
1564
1565
1566 if (data->stop &&
1567 ((!data->mrq->sbc && !sdhci_auto_cmd12(host, data->mrq)) ||
1568 data->error)) {
1569
1570
1571
1572
1573
1574 if (data->mrq->cap_cmd_during_tfr) {
1575 __sdhci_finish_mrq(host, data->mrq);
1576 } else {
1577
1578 host->cmd = NULL;
1579 if (!sdhci_send_command(host, data->stop)) {
1580 if (sw_data_timeout) {
1581
1582
1583
1584
1585 data->stop->error = -EIO;
1586 __sdhci_finish_mrq(host, data->mrq);
1587 } else {
1588 WARN_ON(host->deferred_cmd);
1589 host->deferred_cmd = data->stop;
1590 }
1591 }
1592 }
1593 } else {
1594 __sdhci_finish_mrq(host, data->mrq);
1595 }
1596 }
1597
1598 static void sdhci_finish_data(struct sdhci_host *host)
1599 {
1600 __sdhci_finish_data(host, false);
1601 }
1602
1603 static bool sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd)
1604 {
1605 int flags;
1606 u32 mask;
1607 unsigned long timeout;
1608
1609 WARN_ON(host->cmd);
1610
1611
1612 cmd->error = 0;
1613
1614 if ((host->quirks2 & SDHCI_QUIRK2_STOP_WITH_TC) &&
1615 cmd->opcode == MMC_STOP_TRANSMISSION)
1616 cmd->flags |= MMC_RSP_BUSY;
1617
1618 mask = SDHCI_CMD_INHIBIT;
1619 if (sdhci_data_line_cmd(cmd))
1620 mask |= SDHCI_DATA_INHIBIT;
1621
1622
1623
1624 if (cmd->mrq->data && (cmd == cmd->mrq->data->stop))
1625 mask &= ~SDHCI_DATA_INHIBIT;
1626
1627 if (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask)
1628 return false;
1629
1630 host->cmd = cmd;
1631 host->data_timeout = 0;
1632 if (sdhci_data_line_cmd(cmd)) {
1633 WARN_ON(host->data_cmd);
1634 host->data_cmd = cmd;
1635 sdhci_set_timeout(host, cmd);
1636 }
1637
1638 if (cmd->data) {
1639 if (host->use_external_dma)
1640 sdhci_external_dma_prepare_data(host, cmd);
1641 else
1642 sdhci_prepare_data(host, cmd);
1643 }
1644
1645 sdhci_writel(host, cmd->arg, SDHCI_ARGUMENT);
1646
1647 sdhci_set_transfer_mode(host, cmd);
1648
1649 if ((cmd->flags & MMC_RSP_136) && (cmd->flags & MMC_RSP_BUSY)) {
1650 WARN_ONCE(1, "Unsupported response type!\n");
1651
1652
1653
1654
1655
1656 cmd->flags &= ~MMC_RSP_BUSY;
1657 }
1658
1659 if (!(cmd->flags & MMC_RSP_PRESENT))
1660 flags = SDHCI_CMD_RESP_NONE;
1661 else if (cmd->flags & MMC_RSP_136)
1662 flags = SDHCI_CMD_RESP_LONG;
1663 else if (cmd->flags & MMC_RSP_BUSY)
1664 flags = SDHCI_CMD_RESP_SHORT_BUSY;
1665 else
1666 flags = SDHCI_CMD_RESP_SHORT;
1667
1668 if (cmd->flags & MMC_RSP_CRC)
1669 flags |= SDHCI_CMD_CRC;
1670 if (cmd->flags & MMC_RSP_OPCODE)
1671 flags |= SDHCI_CMD_INDEX;
1672
1673
1674 if (cmd->data || cmd->opcode == MMC_SEND_TUNING_BLOCK ||
1675 cmd->opcode == MMC_SEND_TUNING_BLOCK_HS200)
1676 flags |= SDHCI_CMD_DATA;
1677
1678 timeout = jiffies;
1679 if (host->data_timeout)
1680 timeout += nsecs_to_jiffies(host->data_timeout);
1681 else if (!cmd->data && cmd->busy_timeout > 9000)
1682 timeout += DIV_ROUND_UP(cmd->busy_timeout, 1000) * HZ + HZ;
1683 else
1684 timeout += 10 * HZ;
1685 sdhci_mod_timer(host, cmd->mrq, timeout);
1686
1687 if (host->use_external_dma)
1688 sdhci_external_dma_pre_transfer(host, cmd);
1689
1690 sdhci_writew(host, SDHCI_MAKE_CMD(cmd->opcode, flags), SDHCI_COMMAND);
1691
1692 return true;
1693 }
1694
1695 static bool sdhci_present_error(struct sdhci_host *host,
1696 struct mmc_command *cmd, bool present)
1697 {
1698 if (!present || host->flags & SDHCI_DEVICE_DEAD) {
1699 cmd->error = -ENOMEDIUM;
1700 return true;
1701 }
1702
1703 return false;
1704 }
1705
1706 static bool sdhci_send_command_retry(struct sdhci_host *host,
1707 struct mmc_command *cmd,
1708 unsigned long flags)
1709 __releases(host->lock)
1710 __acquires(host->lock)
1711 {
1712 struct mmc_command *deferred_cmd = host->deferred_cmd;
1713 int timeout = 10;
1714 bool present;
1715
1716 while (!sdhci_send_command(host, cmd)) {
1717 if (!timeout--) {
1718 pr_err("%s: Controller never released inhibit bit(s).\n",
1719 mmc_hostname(host->mmc));
1720 sdhci_err_stats_inc(host, CTRL_TIMEOUT);
1721 sdhci_dumpregs(host);
1722 cmd->error = -EIO;
1723 return false;
1724 }
1725
1726 spin_unlock_irqrestore(&host->lock, flags);
1727
1728 usleep_range(1000, 1250);
1729
1730 present = host->mmc->ops->get_cd(host->mmc);
1731
1732 spin_lock_irqsave(&host->lock, flags);
1733
1734
1735 if (cmd == deferred_cmd && cmd != host->deferred_cmd)
1736 return true;
1737
1738 if (sdhci_present_error(host, cmd, present))
1739 return false;
1740 }
1741
1742 if (cmd == host->deferred_cmd)
1743 host->deferred_cmd = NULL;
1744
1745 return true;
1746 }
1747
1748 static void sdhci_read_rsp_136(struct sdhci_host *host, struct mmc_command *cmd)
1749 {
1750 int i, reg;
1751
1752 for (i = 0; i < 4; i++) {
1753 reg = SDHCI_RESPONSE + (3 - i) * 4;
1754 cmd->resp[i] = sdhci_readl(host, reg);
1755 }
1756
1757 if (host->quirks2 & SDHCI_QUIRK2_RSP_136_HAS_CRC)
1758 return;
1759
1760
1761 for (i = 0; i < 4; i++) {
1762 cmd->resp[i] <<= 8;
1763 if (i != 3)
1764 cmd->resp[i] |= cmd->resp[i + 1] >> 24;
1765 }
1766 }
1767
1768 static void sdhci_finish_command(struct sdhci_host *host)
1769 {
1770 struct mmc_command *cmd = host->cmd;
1771
1772 host->cmd = NULL;
1773
1774 if (cmd->flags & MMC_RSP_PRESENT) {
1775 if (cmd->flags & MMC_RSP_136) {
1776 sdhci_read_rsp_136(host, cmd);
1777 } else {
1778 cmd->resp[0] = sdhci_readl(host, SDHCI_RESPONSE);
1779 }
1780 }
1781
1782 if (cmd->mrq->cap_cmd_during_tfr && cmd == cmd->mrq->cmd)
1783 mmc_command_done(host->mmc, cmd->mrq);
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795 if (cmd->flags & MMC_RSP_BUSY) {
1796 if (cmd->data) {
1797 DBG("Cannot wait for busy signal when also doing a data transfer");
1798 } else if (!(host->quirks & SDHCI_QUIRK_NO_BUSY_IRQ) &&
1799 cmd == host->data_cmd) {
1800
1801 return;
1802 }
1803 }
1804
1805
1806 if (cmd == cmd->mrq->sbc) {
1807 if (!sdhci_send_command(host, cmd->mrq->cmd)) {
1808 WARN_ON(host->deferred_cmd);
1809 host->deferred_cmd = cmd->mrq->cmd;
1810 }
1811 } else {
1812
1813
1814 if (host->data && host->data_early)
1815 sdhci_finish_data(host);
1816
1817 if (!cmd->data)
1818 __sdhci_finish_mrq(host, cmd->mrq);
1819 }
1820 }
1821
1822 static u16 sdhci_get_preset_value(struct sdhci_host *host)
1823 {
1824 u16 preset = 0;
1825
1826 switch (host->timing) {
1827 case MMC_TIMING_MMC_HS:
1828 case MMC_TIMING_SD_HS:
1829 preset = sdhci_readw(host, SDHCI_PRESET_FOR_HIGH_SPEED);
1830 break;
1831 case MMC_TIMING_UHS_SDR12:
1832 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR12);
1833 break;
1834 case MMC_TIMING_UHS_SDR25:
1835 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR25);
1836 break;
1837 case MMC_TIMING_UHS_SDR50:
1838 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR50);
1839 break;
1840 case MMC_TIMING_UHS_SDR104:
1841 case MMC_TIMING_MMC_HS200:
1842 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR104);
1843 break;
1844 case MMC_TIMING_UHS_DDR50:
1845 case MMC_TIMING_MMC_DDR52:
1846 preset = sdhci_readw(host, SDHCI_PRESET_FOR_DDR50);
1847 break;
1848 case MMC_TIMING_MMC_HS400:
1849 preset = sdhci_readw(host, SDHCI_PRESET_FOR_HS400);
1850 break;
1851 default:
1852 pr_warn("%s: Invalid UHS-I mode selected\n",
1853 mmc_hostname(host->mmc));
1854 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR12);
1855 break;
1856 }
1857 return preset;
1858 }
1859
1860 u16 sdhci_calc_clk(struct sdhci_host *host, unsigned int clock,
1861 unsigned int *actual_clock)
1862 {
1863 int div = 0;
1864 int real_div = div, clk_mul = 1;
1865 u16 clk = 0;
1866 bool switch_base_clk = false;
1867
1868 if (host->version >= SDHCI_SPEC_300) {
1869 if (host->preset_enabled) {
1870 u16 pre_val;
1871
1872 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
1873 pre_val = sdhci_get_preset_value(host);
1874 div = FIELD_GET(SDHCI_PRESET_SDCLK_FREQ_MASK, pre_val);
1875 if (host->clk_mul &&
1876 (pre_val & SDHCI_PRESET_CLKGEN_SEL)) {
1877 clk = SDHCI_PROG_CLOCK_MODE;
1878 real_div = div + 1;
1879 clk_mul = host->clk_mul;
1880 } else {
1881 real_div = max_t(int, 1, div << 1);
1882 }
1883 goto clock_set;
1884 }
1885
1886
1887
1888
1889
1890 if (host->clk_mul) {
1891 for (div = 1; div <= 1024; div++) {
1892 if ((host->max_clk * host->clk_mul / div)
1893 <= clock)
1894 break;
1895 }
1896 if ((host->max_clk * host->clk_mul / div) <= clock) {
1897
1898
1899
1900
1901 clk = SDHCI_PROG_CLOCK_MODE;
1902 real_div = div;
1903 clk_mul = host->clk_mul;
1904 div--;
1905 } else {
1906
1907
1908
1909
1910 switch_base_clk = true;
1911 }
1912 }
1913
1914 if (!host->clk_mul || switch_base_clk) {
1915
1916 if (host->max_clk <= clock)
1917 div = 1;
1918 else {
1919 for (div = 2; div < SDHCI_MAX_DIV_SPEC_300;
1920 div += 2) {
1921 if ((host->max_clk / div) <= clock)
1922 break;
1923 }
1924 }
1925 real_div = div;
1926 div >>= 1;
1927 if ((host->quirks2 & SDHCI_QUIRK2_CLOCK_DIV_ZERO_BROKEN)
1928 && !div && host->max_clk <= 25000000)
1929 div = 1;
1930 }
1931 } else {
1932
1933 for (div = 1; div < SDHCI_MAX_DIV_SPEC_200; div *= 2) {
1934 if ((host->max_clk / div) <= clock)
1935 break;
1936 }
1937 real_div = div;
1938 div >>= 1;
1939 }
1940
1941 clock_set:
1942 if (real_div)
1943 *actual_clock = (host->max_clk * clk_mul) / real_div;
1944 clk |= (div & SDHCI_DIV_MASK) << SDHCI_DIVIDER_SHIFT;
1945 clk |= ((div & SDHCI_DIV_HI_MASK) >> SDHCI_DIV_MASK_LEN)
1946 << SDHCI_DIVIDER_HI_SHIFT;
1947
1948 return clk;
1949 }
1950 EXPORT_SYMBOL_GPL(sdhci_calc_clk);
1951
1952 void sdhci_enable_clk(struct sdhci_host *host, u16 clk)
1953 {
1954 ktime_t timeout;
1955
1956 clk |= SDHCI_CLOCK_INT_EN;
1957 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1958
1959
1960 timeout = ktime_add_ms(ktime_get(), 150);
1961 while (1) {
1962 bool timedout = ktime_after(ktime_get(), timeout);
1963
1964 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
1965 if (clk & SDHCI_CLOCK_INT_STABLE)
1966 break;
1967 if (timedout) {
1968 pr_err("%s: Internal clock never stabilised.\n",
1969 mmc_hostname(host->mmc));
1970 sdhci_err_stats_inc(host, CTRL_TIMEOUT);
1971 sdhci_dumpregs(host);
1972 return;
1973 }
1974 udelay(10);
1975 }
1976
1977 if (host->version >= SDHCI_SPEC_410 && host->v4_mode) {
1978 clk |= SDHCI_CLOCK_PLL_EN;
1979 clk &= ~SDHCI_CLOCK_INT_STABLE;
1980 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1981
1982
1983 timeout = ktime_add_ms(ktime_get(), 150);
1984 while (1) {
1985 bool timedout = ktime_after(ktime_get(), timeout);
1986
1987 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
1988 if (clk & SDHCI_CLOCK_INT_STABLE)
1989 break;
1990 if (timedout) {
1991 pr_err("%s: PLL clock never stabilised.\n",
1992 mmc_hostname(host->mmc));
1993 sdhci_err_stats_inc(host, CTRL_TIMEOUT);
1994 sdhci_dumpregs(host);
1995 return;
1996 }
1997 udelay(10);
1998 }
1999 }
2000
2001 clk |= SDHCI_CLOCK_CARD_EN;
2002 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
2003 }
2004 EXPORT_SYMBOL_GPL(sdhci_enable_clk);
2005
2006 void sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
2007 {
2008 u16 clk;
2009
2010 host->mmc->actual_clock = 0;
2011
2012 sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
2013
2014 if (clock == 0)
2015 return;
2016
2017 clk = sdhci_calc_clk(host, clock, &host->mmc->actual_clock);
2018 sdhci_enable_clk(host, clk);
2019 }
2020 EXPORT_SYMBOL_GPL(sdhci_set_clock);
2021
2022 static void sdhci_set_power_reg(struct sdhci_host *host, unsigned char mode,
2023 unsigned short vdd)
2024 {
2025 struct mmc_host *mmc = host->mmc;
2026
2027 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);
2028
2029 if (mode != MMC_POWER_OFF)
2030 sdhci_writeb(host, SDHCI_POWER_ON, SDHCI_POWER_CONTROL);
2031 else
2032 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
2033 }
2034
2035 void sdhci_set_power_noreg(struct sdhci_host *host, unsigned char mode,
2036 unsigned short vdd)
2037 {
2038 u8 pwr = 0;
2039
2040 if (mode != MMC_POWER_OFF) {
2041 switch (1 << vdd) {
2042 case MMC_VDD_165_195:
2043
2044
2045
2046
2047
2048
2049 case MMC_VDD_20_21:
2050 pwr = SDHCI_POWER_180;
2051 break;
2052 case MMC_VDD_29_30:
2053 case MMC_VDD_30_31:
2054 pwr = SDHCI_POWER_300;
2055 break;
2056 case MMC_VDD_32_33:
2057 case MMC_VDD_33_34:
2058
2059
2060
2061
2062 case MMC_VDD_34_35:
2063 case MMC_VDD_35_36:
2064 pwr = SDHCI_POWER_330;
2065 break;
2066 default:
2067 WARN(1, "%s: Invalid vdd %#x\n",
2068 mmc_hostname(host->mmc), vdd);
2069 break;
2070 }
2071 }
2072
2073 if (host->pwr == pwr)
2074 return;
2075
2076 host->pwr = pwr;
2077
2078 if (pwr == 0) {
2079 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
2080 if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
2081 sdhci_runtime_pm_bus_off(host);
2082 } else {
2083
2084
2085
2086
2087 if (!(host->quirks & SDHCI_QUIRK_SINGLE_POWER_WRITE))
2088 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
2089
2090
2091
2092
2093
2094
2095 if (host->quirks & SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER)
2096 sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
2097
2098 pwr |= SDHCI_POWER_ON;
2099
2100 sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
2101
2102 if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
2103 sdhci_runtime_pm_bus_on(host);
2104
2105
2106
2107
2108
2109 if (host->quirks & SDHCI_QUIRK_DELAY_AFTER_POWER)
2110 mdelay(10);
2111 }
2112 }
2113 EXPORT_SYMBOL_GPL(sdhci_set_power_noreg);
2114
2115 void sdhci_set_power(struct sdhci_host *host, unsigned char mode,
2116 unsigned short vdd)
2117 {
2118 if (IS_ERR(host->mmc->supply.vmmc))
2119 sdhci_set_power_noreg(host, mode, vdd);
2120 else
2121 sdhci_set_power_reg(host, mode, vdd);
2122 }
2123 EXPORT_SYMBOL_GPL(sdhci_set_power);
2124
2125
2126
2127
2128
2129
2130
2131 void sdhci_set_power_and_bus_voltage(struct sdhci_host *host,
2132 unsigned char mode,
2133 unsigned short vdd)
2134 {
2135 if (!IS_ERR(host->mmc->supply.vmmc)) {
2136 struct mmc_host *mmc = host->mmc;
2137
2138 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);
2139 }
2140 sdhci_set_power_noreg(host, mode, vdd);
2141 }
2142 EXPORT_SYMBOL_GPL(sdhci_set_power_and_bus_voltage);
2143
2144
2145
2146
2147
2148
2149
2150 void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
2151 {
2152 struct sdhci_host *host = mmc_priv(mmc);
2153 struct mmc_command *cmd;
2154 unsigned long flags;
2155 bool present;
2156
2157
2158 present = mmc->ops->get_cd(mmc);
2159
2160 spin_lock_irqsave(&host->lock, flags);
2161
2162 sdhci_led_activate(host);
2163
2164 if (sdhci_present_error(host, mrq->cmd, present))
2165 goto out_finish;
2166
2167 cmd = sdhci_manual_cmd23(host, mrq) ? mrq->sbc : mrq->cmd;
2168
2169 if (!sdhci_send_command_retry(host, cmd, flags))
2170 goto out_finish;
2171
2172 spin_unlock_irqrestore(&host->lock, flags);
2173
2174 return;
2175
2176 out_finish:
2177 sdhci_finish_mrq(host, mrq);
2178 spin_unlock_irqrestore(&host->lock, flags);
2179 }
2180 EXPORT_SYMBOL_GPL(sdhci_request);
2181
2182 int sdhci_request_atomic(struct mmc_host *mmc, struct mmc_request *mrq)
2183 {
2184 struct sdhci_host *host = mmc_priv(mmc);
2185 struct mmc_command *cmd;
2186 unsigned long flags;
2187 int ret = 0;
2188
2189 spin_lock_irqsave(&host->lock, flags);
2190
2191 if (sdhci_present_error(host, mrq->cmd, true)) {
2192 sdhci_finish_mrq(host, mrq);
2193 goto out_finish;
2194 }
2195
2196 cmd = sdhci_manual_cmd23(host, mrq) ? mrq->sbc : mrq->cmd;
2197
2198
2199
2200
2201
2202
2203
2204
2205 if (!sdhci_send_command(host, cmd))
2206 ret = -EBUSY;
2207 else
2208 sdhci_led_activate(host);
2209
2210 out_finish:
2211 spin_unlock_irqrestore(&host->lock, flags);
2212 return ret;
2213 }
2214 EXPORT_SYMBOL_GPL(sdhci_request_atomic);
2215
2216 void sdhci_set_bus_width(struct sdhci_host *host, int width)
2217 {
2218 u8 ctrl;
2219
2220 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
2221 if (width == MMC_BUS_WIDTH_8) {
2222 ctrl &= ~SDHCI_CTRL_4BITBUS;
2223 ctrl |= SDHCI_CTRL_8BITBUS;
2224 } else {
2225 if (host->mmc->caps & MMC_CAP_8_BIT_DATA)
2226 ctrl &= ~SDHCI_CTRL_8BITBUS;
2227 if (width == MMC_BUS_WIDTH_4)
2228 ctrl |= SDHCI_CTRL_4BITBUS;
2229 else
2230 ctrl &= ~SDHCI_CTRL_4BITBUS;
2231 }
2232 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
2233 }
2234 EXPORT_SYMBOL_GPL(sdhci_set_bus_width);
2235
2236 void sdhci_set_uhs_signaling(struct sdhci_host *host, unsigned timing)
2237 {
2238 u16 ctrl_2;
2239
2240 ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2241
2242 ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
2243 if ((timing == MMC_TIMING_MMC_HS200) ||
2244 (timing == MMC_TIMING_UHS_SDR104))
2245 ctrl_2 |= SDHCI_CTRL_UHS_SDR104;
2246 else if (timing == MMC_TIMING_UHS_SDR12)
2247 ctrl_2 |= SDHCI_CTRL_UHS_SDR12;
2248 else if (timing == MMC_TIMING_UHS_SDR25)
2249 ctrl_2 |= SDHCI_CTRL_UHS_SDR25;
2250 else if (timing == MMC_TIMING_UHS_SDR50)
2251 ctrl_2 |= SDHCI_CTRL_UHS_SDR50;
2252 else if ((timing == MMC_TIMING_UHS_DDR50) ||
2253 (timing == MMC_TIMING_MMC_DDR52))
2254 ctrl_2 |= SDHCI_CTRL_UHS_DDR50;
2255 else if (timing == MMC_TIMING_MMC_HS400)
2256 ctrl_2 |= SDHCI_CTRL_HS400;
2257 sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
2258 }
2259 EXPORT_SYMBOL_GPL(sdhci_set_uhs_signaling);
2260
2261 void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
2262 {
2263 struct sdhci_host *host = mmc_priv(mmc);
2264 u8 ctrl;
2265
2266 if (ios->power_mode == MMC_POWER_UNDEFINED)
2267 return;
2268
2269 if (host->flags & SDHCI_DEVICE_DEAD) {
2270 if (!IS_ERR(mmc->supply.vmmc) &&
2271 ios->power_mode == MMC_POWER_OFF)
2272 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
2273 return;
2274 }
2275
2276
2277
2278
2279
2280 if (ios->power_mode == MMC_POWER_OFF) {
2281 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
2282 sdhci_reinit(host);
2283 }
2284
2285 if (host->version >= SDHCI_SPEC_300 &&
2286 (ios->power_mode == MMC_POWER_UP) &&
2287 !(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN))
2288 sdhci_enable_preset_value(host, false);
2289
2290 if (!ios->clock || ios->clock != host->clock) {
2291 host->ops->set_clock(host, ios->clock);
2292 host->clock = ios->clock;
2293
2294 if (host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK &&
2295 host->clock) {
2296 host->timeout_clk = mmc->actual_clock ?
2297 mmc->actual_clock / 1000 :
2298 host->clock / 1000;
2299 mmc->max_busy_timeout =
2300 host->ops->get_max_timeout_count ?
2301 host->ops->get_max_timeout_count(host) :
2302 1 << 27;
2303 mmc->max_busy_timeout /= host->timeout_clk;
2304 }
2305 }
2306
2307 if (host->ops->set_power)
2308 host->ops->set_power(host, ios->power_mode, ios->vdd);
2309 else
2310 sdhci_set_power(host, ios->power_mode, ios->vdd);
2311
2312 if (host->ops->platform_send_init_74_clocks)
2313 host->ops->platform_send_init_74_clocks(host, ios->power_mode);
2314
2315 host->ops->set_bus_width(host, ios->bus_width);
2316
2317 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
2318
2319 if (!(host->quirks & SDHCI_QUIRK_NO_HISPD_BIT)) {
2320 if (ios->timing == MMC_TIMING_SD_HS ||
2321 ios->timing == MMC_TIMING_MMC_HS ||
2322 ios->timing == MMC_TIMING_MMC_HS400 ||
2323 ios->timing == MMC_TIMING_MMC_HS200 ||
2324 ios->timing == MMC_TIMING_MMC_DDR52 ||
2325 ios->timing == MMC_TIMING_UHS_SDR50 ||
2326 ios->timing == MMC_TIMING_UHS_SDR104 ||
2327 ios->timing == MMC_TIMING_UHS_DDR50 ||
2328 ios->timing == MMC_TIMING_UHS_SDR25)
2329 ctrl |= SDHCI_CTRL_HISPD;
2330 else
2331 ctrl &= ~SDHCI_CTRL_HISPD;
2332 }
2333
2334 if (host->version >= SDHCI_SPEC_300) {
2335 u16 clk, ctrl_2;
2336
2337 if (!host->preset_enabled) {
2338 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
2339
2340
2341
2342
2343 ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2344 ctrl_2 &= ~SDHCI_CTRL_DRV_TYPE_MASK;
2345 if (ios->drv_type == MMC_SET_DRIVER_TYPE_A)
2346 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_A;
2347 else if (ios->drv_type == MMC_SET_DRIVER_TYPE_B)
2348 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_B;
2349 else if (ios->drv_type == MMC_SET_DRIVER_TYPE_C)
2350 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_C;
2351 else if (ios->drv_type == MMC_SET_DRIVER_TYPE_D)
2352 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_D;
2353 else {
2354 pr_warn("%s: invalid driver type, default to driver type B\n",
2355 mmc_hostname(mmc));
2356 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_B;
2357 }
2358
2359 sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
2360 } else {
2361
2362
2363
2364
2365
2366
2367
2368
2369 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
2370 clk &= ~SDHCI_CLOCK_CARD_EN;
2371 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
2372
2373 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
2374
2375
2376 host->ops->set_clock(host, host->clock);
2377 }
2378
2379
2380 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
2381 clk &= ~SDHCI_CLOCK_CARD_EN;
2382 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
2383
2384 host->ops->set_uhs_signaling(host, ios->timing);
2385 host->timing = ios->timing;
2386
2387 if (!(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN) &&
2388 ((ios->timing == MMC_TIMING_UHS_SDR12) ||
2389 (ios->timing == MMC_TIMING_UHS_SDR25) ||
2390 (ios->timing == MMC_TIMING_UHS_SDR50) ||
2391 (ios->timing == MMC_TIMING_UHS_SDR104) ||
2392 (ios->timing == MMC_TIMING_UHS_DDR50) ||
2393 (ios->timing == MMC_TIMING_MMC_DDR52))) {
2394 u16 preset;
2395
2396 sdhci_enable_preset_value(host, true);
2397 preset = sdhci_get_preset_value(host);
2398 ios->drv_type = FIELD_GET(SDHCI_PRESET_DRV_MASK,
2399 preset);
2400 }
2401
2402
2403 host->ops->set_clock(host, host->clock);
2404 } else
2405 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
2406
2407
2408
2409
2410
2411
2412 if (host->quirks & SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS)
2413 sdhci_do_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
2414 }
2415 EXPORT_SYMBOL_GPL(sdhci_set_ios);
2416
2417 static int sdhci_get_cd(struct mmc_host *mmc)
2418 {
2419 struct sdhci_host *host = mmc_priv(mmc);
2420 int gpio_cd = mmc_gpio_get_cd(mmc);
2421
2422 if (host->flags & SDHCI_DEVICE_DEAD)
2423 return 0;
2424
2425
2426 if (!mmc_card_is_removable(mmc))
2427 return 1;
2428
2429
2430
2431
2432
2433 if (gpio_cd >= 0)
2434 return !!gpio_cd;
2435
2436
2437 if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION)
2438 return 1;
2439
2440
2441 return !!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT);
2442 }
2443
2444 int sdhci_get_cd_nogpio(struct mmc_host *mmc)
2445 {
2446 struct sdhci_host *host = mmc_priv(mmc);
2447 unsigned long flags;
2448 int ret = 0;
2449
2450 spin_lock_irqsave(&host->lock, flags);
2451
2452 if (host->flags & SDHCI_DEVICE_DEAD)
2453 goto out;
2454
2455 ret = !!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT);
2456 out:
2457 spin_unlock_irqrestore(&host->lock, flags);
2458
2459 return ret;
2460 }
2461 EXPORT_SYMBOL_GPL(sdhci_get_cd_nogpio);
2462
2463 static int sdhci_check_ro(struct sdhci_host *host)
2464 {
2465 unsigned long flags;
2466 int is_readonly;
2467
2468 spin_lock_irqsave(&host->lock, flags);
2469
2470 if (host->flags & SDHCI_DEVICE_DEAD)
2471 is_readonly = 0;
2472 else if (host->ops->get_ro)
2473 is_readonly = host->ops->get_ro(host);
2474 else if (mmc_can_gpio_ro(host->mmc))
2475 is_readonly = mmc_gpio_get_ro(host->mmc);
2476 else
2477 is_readonly = !(sdhci_readl(host, SDHCI_PRESENT_STATE)
2478 & SDHCI_WRITE_PROTECT);
2479
2480 spin_unlock_irqrestore(&host->lock, flags);
2481
2482
2483 return host->quirks & SDHCI_QUIRK_INVERTED_WRITE_PROTECT ?
2484 !is_readonly : is_readonly;
2485 }
2486
2487 #define SAMPLE_COUNT 5
2488
2489 static int sdhci_get_ro(struct mmc_host *mmc)
2490 {
2491 struct sdhci_host *host = mmc_priv(mmc);
2492 int i, ro_count;
2493
2494 if (!(host->quirks & SDHCI_QUIRK_UNSTABLE_RO_DETECT))
2495 return sdhci_check_ro(host);
2496
2497 ro_count = 0;
2498 for (i = 0; i < SAMPLE_COUNT; i++) {
2499 if (sdhci_check_ro(host)) {
2500 if (++ro_count > SAMPLE_COUNT / 2)
2501 return 1;
2502 }
2503 msleep(30);
2504 }
2505 return 0;
2506 }
2507
2508 static void sdhci_hw_reset(struct mmc_host *mmc)
2509 {
2510 struct sdhci_host *host = mmc_priv(mmc);
2511
2512 if (host->ops && host->ops->hw_reset)
2513 host->ops->hw_reset(host);
2514 }
2515
2516 static void sdhci_enable_sdio_irq_nolock(struct sdhci_host *host, int enable)
2517 {
2518 if (!(host->flags & SDHCI_DEVICE_DEAD)) {
2519 if (enable)
2520 host->ier |= SDHCI_INT_CARD_INT;
2521 else
2522 host->ier &= ~SDHCI_INT_CARD_INT;
2523
2524 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
2525 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
2526 }
2527 }
2528
2529 void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable)
2530 {
2531 struct sdhci_host *host = mmc_priv(mmc);
2532 unsigned long flags;
2533
2534 if (enable)
2535 pm_runtime_get_noresume(mmc_dev(mmc));
2536
2537 spin_lock_irqsave(&host->lock, flags);
2538 sdhci_enable_sdio_irq_nolock(host, enable);
2539 spin_unlock_irqrestore(&host->lock, flags);
2540
2541 if (!enable)
2542 pm_runtime_put_noidle(mmc_dev(mmc));
2543 }
2544 EXPORT_SYMBOL_GPL(sdhci_enable_sdio_irq);
2545
2546 static void sdhci_ack_sdio_irq(struct mmc_host *mmc)
2547 {
2548 struct sdhci_host *host = mmc_priv(mmc);
2549 unsigned long flags;
2550
2551 spin_lock_irqsave(&host->lock, flags);
2552 sdhci_enable_sdio_irq_nolock(host, true);
2553 spin_unlock_irqrestore(&host->lock, flags);
2554 }
2555
2556 int sdhci_start_signal_voltage_switch(struct mmc_host *mmc,
2557 struct mmc_ios *ios)
2558 {
2559 struct sdhci_host *host = mmc_priv(mmc);
2560 u16 ctrl;
2561 int ret;
2562
2563
2564
2565
2566
2567 if (host->version < SDHCI_SPEC_300)
2568 return 0;
2569
2570 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2571
2572 switch (ios->signal_voltage) {
2573 case MMC_SIGNAL_VOLTAGE_330:
2574 if (!(host->flags & SDHCI_SIGNALING_330))
2575 return -EINVAL;
2576
2577 ctrl &= ~SDHCI_CTRL_VDD_180;
2578 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
2579
2580 if (!IS_ERR(mmc->supply.vqmmc)) {
2581 ret = mmc_regulator_set_vqmmc(mmc, ios);
2582 if (ret < 0) {
2583 pr_warn("%s: Switching to 3.3V signalling voltage failed\n",
2584 mmc_hostname(mmc));
2585 return -EIO;
2586 }
2587 }
2588
2589 usleep_range(5000, 5500);
2590
2591
2592 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2593 if (!(ctrl & SDHCI_CTRL_VDD_180))
2594 return 0;
2595
2596 pr_warn("%s: 3.3V regulator output did not become stable\n",
2597 mmc_hostname(mmc));
2598
2599 return -EAGAIN;
2600 case MMC_SIGNAL_VOLTAGE_180:
2601 if (!(host->flags & SDHCI_SIGNALING_180))
2602 return -EINVAL;
2603 if (!IS_ERR(mmc->supply.vqmmc)) {
2604 ret = mmc_regulator_set_vqmmc(mmc, ios);
2605 if (ret < 0) {
2606 pr_warn("%s: Switching to 1.8V signalling voltage failed\n",
2607 mmc_hostname(mmc));
2608 return -EIO;
2609 }
2610 }
2611
2612
2613
2614
2615
2616 ctrl |= SDHCI_CTRL_VDD_180;
2617 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
2618
2619
2620 if (host->ops->voltage_switch)
2621 host->ops->voltage_switch(host);
2622
2623
2624 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2625 if (ctrl & SDHCI_CTRL_VDD_180)
2626 return 0;
2627
2628 pr_warn("%s: 1.8V regulator output did not become stable\n",
2629 mmc_hostname(mmc));
2630
2631 return -EAGAIN;
2632 case MMC_SIGNAL_VOLTAGE_120:
2633 if (!(host->flags & SDHCI_SIGNALING_120))
2634 return -EINVAL;
2635 if (!IS_ERR(mmc->supply.vqmmc)) {
2636 ret = mmc_regulator_set_vqmmc(mmc, ios);
2637 if (ret < 0) {
2638 pr_warn("%s: Switching to 1.2V signalling voltage failed\n",
2639 mmc_hostname(mmc));
2640 return -EIO;
2641 }
2642 }
2643 return 0;
2644 default:
2645
2646 return 0;
2647 }
2648 }
2649 EXPORT_SYMBOL_GPL(sdhci_start_signal_voltage_switch);
2650
2651 static int sdhci_card_busy(struct mmc_host *mmc)
2652 {
2653 struct sdhci_host *host = mmc_priv(mmc);
2654 u32 present_state;
2655
2656
2657 present_state = sdhci_readl(host, SDHCI_PRESENT_STATE);
2658
2659 return !(present_state & SDHCI_DATA_0_LVL_MASK);
2660 }
2661
2662 static int sdhci_prepare_hs400_tuning(struct mmc_host *mmc, struct mmc_ios *ios)
2663 {
2664 struct sdhci_host *host = mmc_priv(mmc);
2665 unsigned long flags;
2666
2667 spin_lock_irqsave(&host->lock, flags);
2668 host->flags |= SDHCI_HS400_TUNING;
2669 spin_unlock_irqrestore(&host->lock, flags);
2670
2671 return 0;
2672 }
2673
2674 void sdhci_start_tuning(struct sdhci_host *host)
2675 {
2676 u16 ctrl;
2677
2678 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2679 ctrl |= SDHCI_CTRL_EXEC_TUNING;
2680 if (host->quirks2 & SDHCI_QUIRK2_TUNING_WORK_AROUND)
2681 ctrl |= SDHCI_CTRL_TUNED_CLK;
2682 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
2683
2684
2685
2686
2687
2688
2689
2690
2691
2692
2693
2694 sdhci_writel(host, SDHCI_INT_DATA_AVAIL, SDHCI_INT_ENABLE);
2695 sdhci_writel(host, SDHCI_INT_DATA_AVAIL, SDHCI_SIGNAL_ENABLE);
2696 }
2697 EXPORT_SYMBOL_GPL(sdhci_start_tuning);
2698
2699 void sdhci_end_tuning(struct sdhci_host *host)
2700 {
2701 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
2702 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
2703 }
2704 EXPORT_SYMBOL_GPL(sdhci_end_tuning);
2705
2706 void sdhci_reset_tuning(struct sdhci_host *host)
2707 {
2708 u16 ctrl;
2709
2710 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2711 ctrl &= ~SDHCI_CTRL_TUNED_CLK;
2712 ctrl &= ~SDHCI_CTRL_EXEC_TUNING;
2713 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
2714 }
2715 EXPORT_SYMBOL_GPL(sdhci_reset_tuning);
2716
2717 void sdhci_abort_tuning(struct sdhci_host *host, u32 opcode)
2718 {
2719 sdhci_reset_tuning(host);
2720
2721 sdhci_do_reset(host, SDHCI_RESET_CMD);
2722 sdhci_do_reset(host, SDHCI_RESET_DATA);
2723
2724 sdhci_end_tuning(host);
2725
2726 mmc_send_abort_tuning(host->mmc, opcode);
2727 }
2728 EXPORT_SYMBOL_GPL(sdhci_abort_tuning);
2729
2730
2731
2732
2733
2734
2735
2736
2737 void sdhci_send_tuning(struct sdhci_host *host, u32 opcode)
2738 {
2739 struct mmc_host *mmc = host->mmc;
2740 struct mmc_command cmd = {};
2741 struct mmc_request mrq = {};
2742 unsigned long flags;
2743 u32 b = host->sdma_boundary;
2744
2745 spin_lock_irqsave(&host->lock, flags);
2746
2747 cmd.opcode = opcode;
2748 cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
2749 cmd.mrq = &mrq;
2750
2751 mrq.cmd = &cmd;
2752
2753
2754
2755
2756
2757 if (cmd.opcode == MMC_SEND_TUNING_BLOCK_HS200 &&
2758 mmc->ios.bus_width == MMC_BUS_WIDTH_8)
2759 sdhci_writew(host, SDHCI_MAKE_BLKSZ(b, 128), SDHCI_BLOCK_SIZE);
2760 else
2761 sdhci_writew(host, SDHCI_MAKE_BLKSZ(b, 64), SDHCI_BLOCK_SIZE);
2762
2763
2764
2765
2766
2767
2768
2769 sdhci_writew(host, SDHCI_TRNS_READ, SDHCI_TRANSFER_MODE);
2770
2771 if (!sdhci_send_command_retry(host, &cmd, flags)) {
2772 spin_unlock_irqrestore(&host->lock, flags);
2773 host->tuning_done = 0;
2774 return;
2775 }
2776
2777 host->cmd = NULL;
2778
2779 sdhci_del_timer(host, &mrq);
2780
2781 host->tuning_done = 0;
2782
2783 spin_unlock_irqrestore(&host->lock, flags);
2784
2785
2786 wait_event_timeout(host->buf_ready_int, (host->tuning_done == 1),
2787 msecs_to_jiffies(50));
2788
2789 }
2790 EXPORT_SYMBOL_GPL(sdhci_send_tuning);
2791
2792 static int __sdhci_execute_tuning(struct sdhci_host *host, u32 opcode)
2793 {
2794 int i;
2795
2796
2797
2798
2799
2800 for (i = 0; i < host->tuning_loop_count; i++) {
2801 u16 ctrl;
2802
2803 sdhci_send_tuning(host, opcode);
2804
2805 if (!host->tuning_done) {
2806 pr_debug("%s: Tuning timeout, falling back to fixed sampling clock\n",
2807 mmc_hostname(host->mmc));
2808 sdhci_abort_tuning(host, opcode);
2809 return -ETIMEDOUT;
2810 }
2811
2812
2813 if (host->tuning_delay > 0)
2814 mdelay(host->tuning_delay);
2815
2816 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2817 if (!(ctrl & SDHCI_CTRL_EXEC_TUNING)) {
2818 if (ctrl & SDHCI_CTRL_TUNED_CLK)
2819 return 0;
2820 break;
2821 }
2822
2823 }
2824
2825 pr_info("%s: Tuning failed, falling back to fixed sampling clock\n",
2826 mmc_hostname(host->mmc));
2827 sdhci_reset_tuning(host);
2828 return -EAGAIN;
2829 }
2830
2831 int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
2832 {
2833 struct sdhci_host *host = mmc_priv(mmc);
2834 int err = 0;
2835 unsigned int tuning_count = 0;
2836 bool hs400_tuning;
2837
2838 hs400_tuning = host->flags & SDHCI_HS400_TUNING;
2839
2840 if (host->tuning_mode == SDHCI_TUNING_MODE_1)
2841 tuning_count = host->tuning_count;
2842
2843
2844
2845
2846
2847
2848
2849
2850 switch (host->timing) {
2851
2852 case MMC_TIMING_MMC_HS400:
2853 err = -EINVAL;
2854 goto out;
2855
2856 case MMC_TIMING_MMC_HS200:
2857
2858
2859
2860
2861 if (hs400_tuning)
2862 tuning_count = 0;
2863 break;
2864
2865 case MMC_TIMING_UHS_SDR104:
2866 case MMC_TIMING_UHS_DDR50:
2867 break;
2868
2869 case MMC_TIMING_UHS_SDR50:
2870 if (host->flags & SDHCI_SDR50_NEEDS_TUNING)
2871 break;
2872 fallthrough;
2873
2874 default:
2875 goto out;
2876 }
2877
2878 if (host->ops->platform_execute_tuning) {
2879 err = host->ops->platform_execute_tuning(host, opcode);
2880 goto out;
2881 }
2882
2883 mmc->retune_period = tuning_count;
2884
2885 if (host->tuning_delay < 0)
2886 host->tuning_delay = opcode == MMC_SEND_TUNING_BLOCK;
2887
2888 sdhci_start_tuning(host);
2889
2890 host->tuning_err = __sdhci_execute_tuning(host, opcode);
2891
2892 sdhci_end_tuning(host);
2893 out:
2894 host->flags &= ~SDHCI_HS400_TUNING;
2895
2896 return err;
2897 }
2898 EXPORT_SYMBOL_GPL(sdhci_execute_tuning);
2899
2900 static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable)
2901 {
2902
2903 if (host->version < SDHCI_SPEC_300)
2904 return;
2905
2906
2907
2908
2909
2910 if (host->preset_enabled != enable) {
2911 u16 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2912
2913 if (enable)
2914 ctrl |= SDHCI_CTRL_PRESET_VAL_ENABLE;
2915 else
2916 ctrl &= ~SDHCI_CTRL_PRESET_VAL_ENABLE;
2917
2918 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
2919
2920 if (enable)
2921 host->flags |= SDHCI_PV_ENABLED;
2922 else
2923 host->flags &= ~SDHCI_PV_ENABLED;
2924
2925 host->preset_enabled = enable;
2926 }
2927 }
2928
2929 static void sdhci_post_req(struct mmc_host *mmc, struct mmc_request *mrq,
2930 int err)
2931 {
2932 struct mmc_data *data = mrq->data;
2933
2934 if (data->host_cookie != COOKIE_UNMAPPED)
2935 dma_unmap_sg(mmc_dev(mmc), data->sg, data->sg_len,
2936 mmc_get_dma_dir(data));
2937
2938 data->host_cookie = COOKIE_UNMAPPED;
2939 }
2940
2941 static void sdhci_pre_req(struct mmc_host *mmc, struct mmc_request *mrq)
2942 {
2943 struct sdhci_host *host = mmc_priv(mmc);
2944
2945 mrq->data->host_cookie = COOKIE_UNMAPPED;
2946
2947
2948
2949
2950
2951
2952 if (host->flags & SDHCI_REQ_USE_DMA && !host->bounce_buffer)
2953 sdhci_pre_dma_transfer(host, mrq->data, COOKIE_PRE_MAPPED);
2954 }
2955
2956 static void sdhci_error_out_mrqs(struct sdhci_host *host, int err)
2957 {
2958 if (host->data_cmd) {
2959 host->data_cmd->error = err;
2960 sdhci_finish_mrq(host, host->data_cmd->mrq);
2961 }
2962
2963 if (host->cmd) {
2964 host->cmd->error = err;
2965 sdhci_finish_mrq(host, host->cmd->mrq);
2966 }
2967 }
2968
2969 static void sdhci_card_event(struct mmc_host *mmc)
2970 {
2971 struct sdhci_host *host = mmc_priv(mmc);
2972 unsigned long flags;
2973 int present;
2974
2975
2976 if (host->ops->card_event)
2977 host->ops->card_event(host);
2978
2979 present = mmc->ops->get_cd(mmc);
2980
2981 spin_lock_irqsave(&host->lock, flags);
2982
2983
2984 if (sdhci_has_requests(host) && !present) {
2985 pr_err("%s: Card removed during transfer!\n",
2986 mmc_hostname(mmc));
2987 pr_err("%s: Resetting controller.\n",
2988 mmc_hostname(mmc));
2989
2990 sdhci_do_reset(host, SDHCI_RESET_CMD);
2991 sdhci_do_reset(host, SDHCI_RESET_DATA);
2992
2993 sdhci_error_out_mrqs(host, -ENOMEDIUM);
2994 }
2995
2996 spin_unlock_irqrestore(&host->lock, flags);
2997 }
2998
2999 static const struct mmc_host_ops sdhci_ops = {
3000 .request = sdhci_request,
3001 .post_req = sdhci_post_req,
3002 .pre_req = sdhci_pre_req,
3003 .set_ios = sdhci_set_ios,
3004 .get_cd = sdhci_get_cd,
3005 .get_ro = sdhci_get_ro,
3006 .card_hw_reset = sdhci_hw_reset,
3007 .enable_sdio_irq = sdhci_enable_sdio_irq,
3008 .ack_sdio_irq = sdhci_ack_sdio_irq,
3009 .start_signal_voltage_switch = sdhci_start_signal_voltage_switch,
3010 .prepare_hs400_tuning = sdhci_prepare_hs400_tuning,
3011 .execute_tuning = sdhci_execute_tuning,
3012 .card_event = sdhci_card_event,
3013 .card_busy = sdhci_card_busy,
3014 };
3015
3016
3017
3018
3019
3020
3021
3022 static bool sdhci_request_done(struct sdhci_host *host)
3023 {
3024 unsigned long flags;
3025 struct mmc_request *mrq;
3026 int i;
3027
3028 spin_lock_irqsave(&host->lock, flags);
3029
3030 for (i = 0; i < SDHCI_MAX_MRQS; i++) {
3031 mrq = host->mrqs_done[i];
3032 if (mrq)
3033 break;
3034 }
3035
3036 if (!mrq) {
3037 spin_unlock_irqrestore(&host->lock, flags);
3038 return true;
3039 }
3040
3041
3042
3043
3044
3045 if (sdhci_needs_reset(host, mrq)) {
3046
3047
3048
3049
3050
3051
3052 if (host->cmd || host->data_cmd) {
3053 spin_unlock_irqrestore(&host->lock, flags);
3054 return true;
3055 }
3056
3057
3058 if (host->quirks & SDHCI_QUIRK_CLOCK_BEFORE_RESET)
3059
3060 host->ops->set_clock(host, host->clock);
3061
3062
3063
3064
3065
3066 sdhci_do_reset(host, SDHCI_RESET_CMD);
3067 sdhci_do_reset(host, SDHCI_RESET_DATA);
3068
3069 host->pending_reset = false;
3070 }
3071
3072
3073
3074
3075
3076
3077 if (host->flags & SDHCI_REQ_USE_DMA) {
3078 struct mmc_data *data = mrq->data;
3079
3080 if (host->use_external_dma && data &&
3081 (mrq->cmd->error || data->error)) {
3082 struct dma_chan *chan = sdhci_external_dma_channel(host, data);
3083
3084 host->mrqs_done[i] = NULL;
3085 spin_unlock_irqrestore(&host->lock, flags);
3086 dmaengine_terminate_sync(chan);
3087 spin_lock_irqsave(&host->lock, flags);
3088 sdhci_set_mrq_done(host, mrq);
3089 }
3090
3091 if (data && data->host_cookie == COOKIE_MAPPED) {
3092 if (host->bounce_buffer) {
3093
3094
3095
3096
3097 if (mmc_get_dma_dir(data) == DMA_FROM_DEVICE) {
3098 unsigned int length = data->bytes_xfered;
3099
3100 if (length > host->bounce_buffer_size) {
3101 pr_err("%s: bounce buffer is %u bytes but DMA claims to have transferred %u bytes\n",
3102 mmc_hostname(host->mmc),
3103 host->bounce_buffer_size,
3104 data->bytes_xfered);
3105
3106 length = host->bounce_buffer_size;
3107 }
3108 dma_sync_single_for_cpu(
3109 mmc_dev(host->mmc),
3110 host->bounce_addr,
3111 host->bounce_buffer_size,
3112 DMA_FROM_DEVICE);
3113 sg_copy_from_buffer(data->sg,
3114 data->sg_len,
3115 host->bounce_buffer,
3116 length);
3117 } else {
3118
3119 dma_sync_single_for_cpu(
3120 mmc_dev(host->mmc),
3121 host->bounce_addr,
3122 host->bounce_buffer_size,
3123 mmc_get_dma_dir(data));
3124 }
3125 } else {
3126
3127 dma_unmap_sg(mmc_dev(host->mmc), data->sg,
3128 data->sg_len,
3129 mmc_get_dma_dir(data));
3130 }
3131 data->host_cookie = COOKIE_UNMAPPED;
3132 }
3133 }
3134
3135 host->mrqs_done[i] = NULL;
3136
3137 spin_unlock_irqrestore(&host->lock, flags);
3138
3139 if (host->ops->request_done)
3140 host->ops->request_done(host, mrq);
3141 else
3142 mmc_request_done(host->mmc, mrq);
3143
3144 return false;
3145 }
3146
3147 static void sdhci_complete_work(struct work_struct *work)
3148 {
3149 struct sdhci_host *host = container_of(work, struct sdhci_host,
3150 complete_work);
3151
3152 while (!sdhci_request_done(host))
3153 ;
3154 }
3155
3156 static void sdhci_timeout_timer(struct timer_list *t)
3157 {
3158 struct sdhci_host *host;
3159 unsigned long flags;
3160
3161 host = from_timer(host, t, timer);
3162
3163 spin_lock_irqsave(&host->lock, flags);
3164
3165 if (host->cmd && !sdhci_data_line_cmd(host->cmd)) {
3166 pr_err("%s: Timeout waiting for hardware cmd interrupt.\n",
3167 mmc_hostname(host->mmc));
3168 sdhci_err_stats_inc(host, REQ_TIMEOUT);
3169 sdhci_dumpregs(host);
3170
3171 host->cmd->error = -ETIMEDOUT;
3172 sdhci_finish_mrq(host, host->cmd->mrq);
3173 }
3174
3175 spin_unlock_irqrestore(&host->lock, flags);
3176 }
3177
3178 static void sdhci_timeout_data_timer(struct timer_list *t)
3179 {
3180 struct sdhci_host *host;
3181 unsigned long flags;
3182
3183 host = from_timer(host, t, data_timer);
3184
3185 spin_lock_irqsave(&host->lock, flags);
3186
3187 if (host->data || host->data_cmd ||
3188 (host->cmd && sdhci_data_line_cmd(host->cmd))) {
3189 pr_err("%s: Timeout waiting for hardware interrupt.\n",
3190 mmc_hostname(host->mmc));
3191 sdhci_err_stats_inc(host, REQ_TIMEOUT);
3192 sdhci_dumpregs(host);
3193
3194 if (host->data) {
3195 host->data->error = -ETIMEDOUT;
3196 __sdhci_finish_data(host, true);
3197 queue_work(host->complete_wq, &host->complete_work);
3198 } else if (host->data_cmd) {
3199 host->data_cmd->error = -ETIMEDOUT;
3200 sdhci_finish_mrq(host, host->data_cmd->mrq);
3201 } else {
3202 host->cmd->error = -ETIMEDOUT;
3203 sdhci_finish_mrq(host, host->cmd->mrq);
3204 }
3205 }
3206
3207 spin_unlock_irqrestore(&host->lock, flags);
3208 }
3209
3210
3211
3212
3213
3214
3215
3216 static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask, u32 *intmask_p)
3217 {
3218
3219 if (intmask & SDHCI_INT_AUTO_CMD_ERR && host->data_cmd) {
3220 struct mmc_request *mrq = host->data_cmd->mrq;
3221 u16 auto_cmd_status = sdhci_readw(host, SDHCI_AUTO_CMD_STATUS);
3222 int data_err_bit = (auto_cmd_status & SDHCI_AUTO_CMD_TIMEOUT) ?
3223 SDHCI_INT_DATA_TIMEOUT :
3224 SDHCI_INT_DATA_CRC;
3225
3226
3227 if (!mrq->sbc && (host->flags & SDHCI_AUTO_CMD12)) {
3228 *intmask_p |= data_err_bit;
3229 return;
3230 }
3231 }
3232
3233 if (!host->cmd) {
3234
3235
3236
3237
3238
3239 if (host->pending_reset)
3240 return;
3241 pr_err("%s: Got command interrupt 0x%08x even though no command operation was in progress.\n",
3242 mmc_hostname(host->mmc), (unsigned)intmask);
3243 sdhci_err_stats_inc(host, UNEXPECTED_IRQ);
3244 sdhci_dumpregs(host);
3245 return;
3246 }
3247
3248 if (intmask & (SDHCI_INT_TIMEOUT | SDHCI_INT_CRC |
3249 SDHCI_INT_END_BIT | SDHCI_INT_INDEX)) {
3250 if (intmask & SDHCI_INT_TIMEOUT) {
3251 host->cmd->error = -ETIMEDOUT;
3252 sdhci_err_stats_inc(host, CMD_TIMEOUT);
3253 } else {
3254 host->cmd->error = -EILSEQ;
3255 if (!mmc_op_tuning(host->cmd->opcode))
3256 sdhci_err_stats_inc(host, CMD_CRC);
3257 }
3258
3259 if (host->cmd->data &&
3260 (intmask & (SDHCI_INT_CRC | SDHCI_INT_TIMEOUT)) ==
3261 SDHCI_INT_CRC) {
3262 host->cmd = NULL;
3263 *intmask_p |= SDHCI_INT_DATA_CRC;
3264 return;
3265 }
3266
3267 __sdhci_finish_mrq(host, host->cmd->mrq);
3268 return;
3269 }
3270
3271
3272 if (intmask & SDHCI_INT_AUTO_CMD_ERR) {
3273 struct mmc_request *mrq = host->cmd->mrq;
3274 u16 auto_cmd_status = sdhci_readw(host, SDHCI_AUTO_CMD_STATUS);
3275 int err = (auto_cmd_status & SDHCI_AUTO_CMD_TIMEOUT) ?
3276 -ETIMEDOUT :
3277 -EILSEQ;
3278
3279 sdhci_err_stats_inc(host, AUTO_CMD);
3280
3281 if (sdhci_auto_cmd23(host, mrq)) {
3282 mrq->sbc->error = err;
3283 __sdhci_finish_mrq(host, mrq);
3284 return;
3285 }
3286 }
3287
3288 if (intmask & SDHCI_INT_RESPONSE)
3289 sdhci_finish_command(host);
3290 }
3291
3292 static void sdhci_adma_show_error(struct sdhci_host *host)
3293 {
3294 void *desc = host->adma_table;
3295 dma_addr_t dma = host->adma_addr;
3296
3297 sdhci_dumpregs(host);
3298
3299 while (true) {
3300 struct sdhci_adma2_64_desc *dma_desc = desc;
3301
3302 if (host->flags & SDHCI_USE_64_BIT_DMA)
3303 SDHCI_DUMP("%08llx: DMA 0x%08x%08x, LEN 0x%04x, Attr=0x%02x\n",
3304 (unsigned long long)dma,
3305 le32_to_cpu(dma_desc->addr_hi),
3306 le32_to_cpu(dma_desc->addr_lo),
3307 le16_to_cpu(dma_desc->len),
3308 le16_to_cpu(dma_desc->cmd));
3309 else
3310 SDHCI_DUMP("%08llx: DMA 0x%08x, LEN 0x%04x, Attr=0x%02x\n",
3311 (unsigned long long)dma,
3312 le32_to_cpu(dma_desc->addr_lo),
3313 le16_to_cpu(dma_desc->len),
3314 le16_to_cpu(dma_desc->cmd));
3315
3316 desc += host->desc_sz;
3317 dma += host->desc_sz;
3318
3319 if (dma_desc->cmd & cpu_to_le16(ADMA2_END))
3320 break;
3321 }
3322 }
3323
3324 static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
3325 {
3326 u32 command;
3327
3328
3329
3330
3331
3332
3333
3334
3335 if (intmask & SDHCI_INT_DATA_AVAIL && !host->data) {
3336 command = SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND));
3337 if (command == MMC_SEND_TUNING_BLOCK ||
3338 command == MMC_SEND_TUNING_BLOCK_HS200) {
3339 host->tuning_done = 1;
3340 wake_up(&host->buf_ready_int);
3341 return;
3342 }
3343 }
3344
3345 if (!host->data) {
3346 struct mmc_command *data_cmd = host->data_cmd;
3347
3348
3349
3350
3351
3352
3353 if (data_cmd && (data_cmd->flags & MMC_RSP_BUSY)) {
3354 if (intmask & SDHCI_INT_DATA_TIMEOUT) {
3355 host->data_cmd = NULL;
3356 data_cmd->error = -ETIMEDOUT;
3357 sdhci_err_stats_inc(host, CMD_TIMEOUT);
3358 __sdhci_finish_mrq(host, data_cmd->mrq);
3359 return;
3360 }
3361 if (intmask & SDHCI_INT_DATA_END) {
3362 host->data_cmd = NULL;
3363
3364
3365
3366
3367
3368 if (host->cmd == data_cmd)
3369 return;
3370
3371 __sdhci_finish_mrq(host, data_cmd->mrq);
3372 return;
3373 }
3374 }
3375
3376
3377
3378
3379
3380
3381 if (host->pending_reset)
3382 return;
3383
3384 pr_err("%s: Got data interrupt 0x%08x even though no data operation was in progress.\n",
3385 mmc_hostname(host->mmc), (unsigned)intmask);
3386 sdhci_err_stats_inc(host, UNEXPECTED_IRQ);
3387 sdhci_dumpregs(host);
3388
3389 return;
3390 }
3391
3392 if (intmask & SDHCI_INT_DATA_TIMEOUT) {
3393 host->data->error = -ETIMEDOUT;
3394 sdhci_err_stats_inc(host, DAT_TIMEOUT);
3395 } else if (intmask & SDHCI_INT_DATA_END_BIT) {
3396 host->data->error = -EILSEQ;
3397 if (!mmc_op_tuning(SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND))))
3398 sdhci_err_stats_inc(host, DAT_CRC);
3399 } else if ((intmask & SDHCI_INT_DATA_CRC) &&
3400 SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND))
3401 != MMC_BUS_TEST_R) {
3402 host->data->error = -EILSEQ;
3403 if (!mmc_op_tuning(SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND))))
3404 sdhci_err_stats_inc(host, DAT_CRC);
3405 } else if (intmask & SDHCI_INT_ADMA_ERROR) {
3406 pr_err("%s: ADMA error: 0x%08x\n", mmc_hostname(host->mmc),
3407 intmask);
3408 sdhci_adma_show_error(host);
3409 sdhci_err_stats_inc(host, ADMA);
3410 host->data->error = -EIO;
3411 if (host->ops->adma_workaround)
3412 host->ops->adma_workaround(host, intmask);
3413 }
3414
3415 if (host->data->error)
3416 sdhci_finish_data(host);
3417 else {
3418 if (intmask & (SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL))
3419 sdhci_transfer_pio(host);
3420
3421
3422
3423
3424
3425
3426
3427
3428
3429
3430 if (intmask & SDHCI_INT_DMA_END) {
3431 dma_addr_t dmastart, dmanow;
3432
3433 dmastart = sdhci_sdma_address(host);
3434 dmanow = dmastart + host->data->bytes_xfered;
3435
3436
3437
3438 dmanow = (dmanow &
3439 ~((dma_addr_t)SDHCI_DEFAULT_BOUNDARY_SIZE - 1)) +
3440 SDHCI_DEFAULT_BOUNDARY_SIZE;
3441 host->data->bytes_xfered = dmanow - dmastart;
3442 DBG("DMA base %pad, transferred 0x%06x bytes, next %pad\n",
3443 &dmastart, host->data->bytes_xfered, &dmanow);
3444 sdhci_set_sdma_addr(host, dmanow);
3445 }
3446
3447 if (intmask & SDHCI_INT_DATA_END) {
3448 if (host->cmd == host->data_cmd) {
3449
3450
3451
3452
3453
3454 host->data_early = 1;
3455 } else {
3456 sdhci_finish_data(host);
3457 }
3458 }
3459 }
3460 }
3461
3462 static inline bool sdhci_defer_done(struct sdhci_host *host,
3463 struct mmc_request *mrq)
3464 {
3465 struct mmc_data *data = mrq->data;
3466
3467 return host->pending_reset || host->always_defer_done ||
3468 ((host->flags & SDHCI_REQ_USE_DMA) && data &&
3469 data->host_cookie == COOKIE_MAPPED);
3470 }
3471
3472 static irqreturn_t sdhci_irq(int irq, void *dev_id)
3473 {
3474 struct mmc_request *mrqs_done[SDHCI_MAX_MRQS] = {0};
3475 irqreturn_t result = IRQ_NONE;
3476 struct sdhci_host *host = dev_id;
3477 u32 intmask, mask, unexpected = 0;
3478 int max_loops = 16;
3479 int i;
3480
3481 spin_lock(&host->lock);
3482
3483 if (host->runtime_suspended) {
3484 spin_unlock(&host->lock);
3485 return IRQ_NONE;
3486 }
3487
3488 intmask = sdhci_readl(host, SDHCI_INT_STATUS);
3489 if (!intmask || intmask == 0xffffffff) {
3490 result = IRQ_NONE;
3491 goto out;
3492 }
3493
3494 do {
3495 DBG("IRQ status 0x%08x\n", intmask);
3496
3497 if (host->ops->irq) {
3498 intmask = host->ops->irq(host, intmask);
3499 if (!intmask)
3500 goto cont;
3501 }
3502
3503
3504 mask = intmask & (SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK |
3505 SDHCI_INT_BUS_POWER);
3506 sdhci_writel(host, mask, SDHCI_INT_STATUS);
3507
3508 if (intmask & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) {
3509 u32 present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
3510 SDHCI_CARD_PRESENT;
3511
3512
3513
3514
3515
3516
3517
3518
3519
3520
3521
3522
3523 host->ier &= ~(SDHCI_INT_CARD_INSERT |
3524 SDHCI_INT_CARD_REMOVE);
3525 host->ier |= present ? SDHCI_INT_CARD_REMOVE :
3526 SDHCI_INT_CARD_INSERT;
3527 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
3528 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
3529
3530 sdhci_writel(host, intmask & (SDHCI_INT_CARD_INSERT |
3531 SDHCI_INT_CARD_REMOVE), SDHCI_INT_STATUS);
3532
3533 host->thread_isr |= intmask & (SDHCI_INT_CARD_INSERT |
3534 SDHCI_INT_CARD_REMOVE);
3535 result = IRQ_WAKE_THREAD;
3536 }
3537
3538 if (intmask & SDHCI_INT_CMD_MASK)
3539 sdhci_cmd_irq(host, intmask & SDHCI_INT_CMD_MASK, &intmask);
3540
3541 if (intmask & SDHCI_INT_DATA_MASK)
3542 sdhci_data_irq(host, intmask & SDHCI_INT_DATA_MASK);
3543
3544 if (intmask & SDHCI_INT_BUS_POWER)
3545 pr_err("%s: Card is consuming too much power!\n",
3546 mmc_hostname(host->mmc));
3547
3548 if (intmask & SDHCI_INT_RETUNE)
3549 mmc_retune_needed(host->mmc);
3550
3551 if ((intmask & SDHCI_INT_CARD_INT) &&
3552 (host->ier & SDHCI_INT_CARD_INT)) {
3553 sdhci_enable_sdio_irq_nolock(host, false);
3554 sdio_signal_irq(host->mmc);
3555 }
3556
3557 intmask &= ~(SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE |
3558 SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK |
3559 SDHCI_INT_ERROR | SDHCI_INT_BUS_POWER |
3560 SDHCI_INT_RETUNE | SDHCI_INT_CARD_INT);
3561
3562 if (intmask) {
3563 unexpected |= intmask;
3564 sdhci_writel(host, intmask, SDHCI_INT_STATUS);
3565 }
3566 cont:
3567 if (result == IRQ_NONE)
3568 result = IRQ_HANDLED;
3569
3570 intmask = sdhci_readl(host, SDHCI_INT_STATUS);
3571 } while (intmask && --max_loops);
3572
3573
3574 for (i = 0; i < SDHCI_MAX_MRQS; i++) {
3575 struct mmc_request *mrq = host->mrqs_done[i];
3576
3577 if (!mrq)
3578 continue;
3579
3580 if (sdhci_defer_done(host, mrq)) {
3581 result = IRQ_WAKE_THREAD;
3582 } else {
3583 mrqs_done[i] = mrq;
3584 host->mrqs_done[i] = NULL;
3585 }
3586 }
3587 out:
3588 if (host->deferred_cmd)
3589 result = IRQ_WAKE_THREAD;
3590
3591 spin_unlock(&host->lock);
3592
3593
3594 for (i = 0; i < SDHCI_MAX_MRQS; i++) {
3595 if (!mrqs_done[i])
3596 continue;
3597
3598 if (host->ops->request_done)
3599 host->ops->request_done(host, mrqs_done[i]);
3600 else
3601 mmc_request_done(host->mmc, mrqs_done[i]);
3602 }
3603
3604 if (unexpected) {
3605 pr_err("%s: Unexpected interrupt 0x%08x.\n",
3606 mmc_hostname(host->mmc), unexpected);
3607 sdhci_err_stats_inc(host, UNEXPECTED_IRQ);
3608 sdhci_dumpregs(host);
3609 }
3610
3611 return result;
3612 }
3613
3614 static irqreturn_t sdhci_thread_irq(int irq, void *dev_id)
3615 {
3616 struct sdhci_host *host = dev_id;
3617 struct mmc_command *cmd;
3618 unsigned long flags;
3619 u32 isr;
3620
3621 while (!sdhci_request_done(host))
3622 ;
3623
3624 spin_lock_irqsave(&host->lock, flags);
3625
3626 isr = host->thread_isr;
3627 host->thread_isr = 0;
3628
3629 cmd = host->deferred_cmd;
3630 if (cmd && !sdhci_send_command_retry(host, cmd, flags))
3631 sdhci_finish_mrq(host, cmd->mrq);
3632
3633 spin_unlock_irqrestore(&host->lock, flags);
3634
3635 if (isr & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) {
3636 struct mmc_host *mmc = host->mmc;
3637
3638 mmc->ops->card_event(mmc);
3639 mmc_detect_change(mmc, msecs_to_jiffies(200));
3640 }
3641
3642 return IRQ_HANDLED;
3643 }
3644
3645
3646
3647
3648
3649
3650
3651 #ifdef CONFIG_PM
3652
3653 static bool sdhci_cd_irq_can_wakeup(struct sdhci_host *host)
3654 {
3655 return mmc_card_is_removable(host->mmc) &&
3656 !(host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) &&
3657 !mmc_can_gpio_cd(host->mmc);
3658 }
3659
3660
3661
3662
3663
3664
3665
3666
3667
3668 static bool sdhci_enable_irq_wakeups(struct sdhci_host *host)
3669 {
3670 u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE |
3671 SDHCI_WAKE_ON_INT;
3672 u32 irq_val = 0;
3673 u8 wake_val = 0;
3674 u8 val;
3675
3676 if (sdhci_cd_irq_can_wakeup(host)) {
3677 wake_val |= SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE;
3678 irq_val |= SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE;
3679 }
3680
3681 if (mmc_card_wake_sdio_irq(host->mmc)) {
3682 wake_val |= SDHCI_WAKE_ON_INT;
3683 irq_val |= SDHCI_INT_CARD_INT;
3684 }
3685
3686 if (!irq_val)
3687 return false;
3688
3689 val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL);
3690 val &= ~mask;
3691 val |= wake_val;
3692 sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL);
3693
3694 sdhci_writel(host, irq_val, SDHCI_INT_ENABLE);
3695
3696 host->irq_wake_enabled = !enable_irq_wake(host->irq);
3697
3698 return host->irq_wake_enabled;
3699 }
3700
3701 static void sdhci_disable_irq_wakeups(struct sdhci_host *host)
3702 {
3703 u8 val;
3704 u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE
3705 | SDHCI_WAKE_ON_INT;
3706
3707 val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL);
3708 val &= ~mask;
3709 sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL);
3710
3711 disable_irq_wake(host->irq);
3712
3713 host->irq_wake_enabled = false;
3714 }
3715
3716 int sdhci_suspend_host(struct sdhci_host *host)
3717 {
3718 sdhci_disable_card_detection(host);
3719
3720 mmc_retune_timer_stop(host->mmc);
3721
3722 if (!device_may_wakeup(mmc_dev(host->mmc)) ||
3723 !sdhci_enable_irq_wakeups(host)) {
3724 host->ier = 0;
3725 sdhci_writel(host, 0, SDHCI_INT_ENABLE);
3726 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
3727 free_irq(host->irq, host);
3728 }
3729
3730 return 0;
3731 }
3732
3733 EXPORT_SYMBOL_GPL(sdhci_suspend_host);
3734
3735 int sdhci_resume_host(struct sdhci_host *host)
3736 {
3737 struct mmc_host *mmc = host->mmc;
3738 int ret = 0;
3739
3740 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
3741 if (host->ops->enable_dma)
3742 host->ops->enable_dma(host);
3743 }
3744
3745 if ((mmc->pm_flags & MMC_PM_KEEP_POWER) &&
3746 (host->quirks2 & SDHCI_QUIRK2_HOST_OFF_CARD_ON)) {
3747
3748 sdhci_init(host, 0);
3749 host->pwr = 0;
3750 host->clock = 0;
3751 mmc->ops->set_ios(mmc, &mmc->ios);
3752 } else {
3753 sdhci_init(host, (mmc->pm_flags & MMC_PM_KEEP_POWER));
3754 }
3755
3756 if (host->irq_wake_enabled) {
3757 sdhci_disable_irq_wakeups(host);
3758 } else {
3759 ret = request_threaded_irq(host->irq, sdhci_irq,
3760 sdhci_thread_irq, IRQF_SHARED,
3761 mmc_hostname(mmc), host);
3762 if (ret)
3763 return ret;
3764 }
3765
3766 sdhci_enable_card_detection(host);
3767
3768 return ret;
3769 }
3770
3771 EXPORT_SYMBOL_GPL(sdhci_resume_host);
3772
3773 int sdhci_runtime_suspend_host(struct sdhci_host *host)
3774 {
3775 unsigned long flags;
3776
3777 mmc_retune_timer_stop(host->mmc);
3778
3779 spin_lock_irqsave(&host->lock, flags);
3780 host->ier &= SDHCI_INT_CARD_INT;
3781 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
3782 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
3783 spin_unlock_irqrestore(&host->lock, flags);
3784
3785 synchronize_hardirq(host->irq);
3786
3787 spin_lock_irqsave(&host->lock, flags);
3788 host->runtime_suspended = true;
3789 spin_unlock_irqrestore(&host->lock, flags);
3790
3791 return 0;
3792 }
3793 EXPORT_SYMBOL_GPL(sdhci_runtime_suspend_host);
3794
3795 int sdhci_runtime_resume_host(struct sdhci_host *host, int soft_reset)
3796 {
3797 struct mmc_host *mmc = host->mmc;
3798 unsigned long flags;
3799 int host_flags = host->flags;
3800
3801 if (host_flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
3802 if (host->ops->enable_dma)
3803 host->ops->enable_dma(host);
3804 }
3805
3806 sdhci_init(host, soft_reset);
3807
3808 if (mmc->ios.power_mode != MMC_POWER_UNDEFINED &&
3809 mmc->ios.power_mode != MMC_POWER_OFF) {
3810
3811 host->pwr = 0;
3812 host->clock = 0;
3813 mmc->ops->start_signal_voltage_switch(mmc, &mmc->ios);
3814 mmc->ops->set_ios(mmc, &mmc->ios);
3815
3816 if ((host_flags & SDHCI_PV_ENABLED) &&
3817 !(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN)) {
3818 spin_lock_irqsave(&host->lock, flags);
3819 sdhci_enable_preset_value(host, true);
3820 spin_unlock_irqrestore(&host->lock, flags);
3821 }
3822
3823 if ((mmc->caps2 & MMC_CAP2_HS400_ES) &&
3824 mmc->ops->hs400_enhanced_strobe)
3825 mmc->ops->hs400_enhanced_strobe(mmc, &mmc->ios);
3826 }
3827
3828 spin_lock_irqsave(&host->lock, flags);
3829
3830 host->runtime_suspended = false;
3831
3832
3833 if (sdio_irq_claimed(mmc))
3834 sdhci_enable_sdio_irq_nolock(host, true);
3835
3836
3837 sdhci_enable_card_detection(host);
3838
3839 spin_unlock_irqrestore(&host->lock, flags);
3840
3841 return 0;
3842 }
3843 EXPORT_SYMBOL_GPL(sdhci_runtime_resume_host);
3844
3845 #endif
3846
3847
3848
3849
3850
3851
3852
3853 void sdhci_cqe_enable(struct mmc_host *mmc)
3854 {
3855 struct sdhci_host *host = mmc_priv(mmc);
3856 unsigned long flags;
3857 u8 ctrl;
3858
3859 spin_lock_irqsave(&host->lock, flags);
3860
3861 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
3862 ctrl &= ~SDHCI_CTRL_DMA_MASK;
3863
3864
3865
3866
3867
3868 if (host->v4_mode && (host->caps1 & SDHCI_CAN_DO_ADMA3))
3869 ctrl |= SDHCI_CTRL_ADMA3;
3870 else if (host->flags & SDHCI_USE_64_BIT_DMA)
3871 ctrl |= SDHCI_CTRL_ADMA64;
3872 else
3873 ctrl |= SDHCI_CTRL_ADMA32;
3874 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
3875
3876 sdhci_writew(host, SDHCI_MAKE_BLKSZ(host->sdma_boundary, 512),
3877 SDHCI_BLOCK_SIZE);
3878
3879
3880 sdhci_set_timeout(host, NULL);
3881
3882 host->ier = host->cqe_ier;
3883
3884 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
3885 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
3886
3887 host->cqe_on = true;
3888
3889 pr_debug("%s: sdhci: CQE on, IRQ mask %#x, IRQ status %#x\n",
3890 mmc_hostname(mmc), host->ier,
3891 sdhci_readl(host, SDHCI_INT_STATUS));
3892
3893 spin_unlock_irqrestore(&host->lock, flags);
3894 }
3895 EXPORT_SYMBOL_GPL(sdhci_cqe_enable);
3896
3897 void sdhci_cqe_disable(struct mmc_host *mmc, bool recovery)
3898 {
3899 struct sdhci_host *host = mmc_priv(mmc);
3900 unsigned long flags;
3901
3902 spin_lock_irqsave(&host->lock, flags);
3903
3904 sdhci_set_default_irqs(host);
3905
3906 host->cqe_on = false;
3907
3908 if (recovery) {
3909 sdhci_do_reset(host, SDHCI_RESET_CMD);
3910 sdhci_do_reset(host, SDHCI_RESET_DATA);
3911 }
3912
3913 pr_debug("%s: sdhci: CQE off, IRQ mask %#x, IRQ status %#x\n",
3914 mmc_hostname(mmc), host->ier,
3915 sdhci_readl(host, SDHCI_INT_STATUS));
3916
3917 spin_unlock_irqrestore(&host->lock, flags);
3918 }
3919 EXPORT_SYMBOL_GPL(sdhci_cqe_disable);
3920
3921 bool sdhci_cqe_irq(struct sdhci_host *host, u32 intmask, int *cmd_error,
3922 int *data_error)
3923 {
3924 u32 mask;
3925
3926 if (!host->cqe_on)
3927 return false;
3928
3929 if (intmask & (SDHCI_INT_INDEX | SDHCI_INT_END_BIT | SDHCI_INT_CRC)) {
3930 *cmd_error = -EILSEQ;
3931 if (!mmc_op_tuning(SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND))))
3932 sdhci_err_stats_inc(host, CMD_CRC);
3933 } else if (intmask & SDHCI_INT_TIMEOUT) {
3934 *cmd_error = -ETIMEDOUT;
3935 sdhci_err_stats_inc(host, CMD_TIMEOUT);
3936 } else
3937 *cmd_error = 0;
3938
3939 if (intmask & (SDHCI_INT_DATA_END_BIT | SDHCI_INT_DATA_CRC)) {
3940 *data_error = -EILSEQ;
3941 if (!mmc_op_tuning(SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND))))
3942 sdhci_err_stats_inc(host, DAT_CRC);
3943 } else if (intmask & SDHCI_INT_DATA_TIMEOUT) {
3944 *data_error = -ETIMEDOUT;
3945 sdhci_err_stats_inc(host, DAT_TIMEOUT);
3946 } else if (intmask & SDHCI_INT_ADMA_ERROR) {
3947 *data_error = -EIO;
3948 sdhci_err_stats_inc(host, ADMA);
3949 } else
3950 *data_error = 0;
3951
3952
3953 mask = intmask & host->cqe_ier;
3954 sdhci_writel(host, mask, SDHCI_INT_STATUS);
3955
3956 if (intmask & SDHCI_INT_BUS_POWER)
3957 pr_err("%s: Card is consuming too much power!\n",
3958 mmc_hostname(host->mmc));
3959
3960 intmask &= ~(host->cqe_ier | SDHCI_INT_ERROR);
3961 if (intmask) {
3962 sdhci_writel(host, intmask, SDHCI_INT_STATUS);
3963 pr_err("%s: CQE: Unexpected interrupt 0x%08x.\n",
3964 mmc_hostname(host->mmc), intmask);
3965 sdhci_err_stats_inc(host, UNEXPECTED_IRQ);
3966 sdhci_dumpregs(host);
3967 }
3968
3969 return true;
3970 }
3971 EXPORT_SYMBOL_GPL(sdhci_cqe_irq);
3972
3973
3974
3975
3976
3977
3978
3979 struct sdhci_host *sdhci_alloc_host(struct device *dev,
3980 size_t priv_size)
3981 {
3982 struct mmc_host *mmc;
3983 struct sdhci_host *host;
3984
3985 WARN_ON(dev == NULL);
3986
3987 mmc = mmc_alloc_host(sizeof(struct sdhci_host) + priv_size, dev);
3988 if (!mmc)
3989 return ERR_PTR(-ENOMEM);
3990
3991 host = mmc_priv(mmc);
3992 host->mmc = mmc;
3993 host->mmc_host_ops = sdhci_ops;
3994 mmc->ops = &host->mmc_host_ops;
3995
3996 host->flags = SDHCI_SIGNALING_330;
3997
3998 host->cqe_ier = SDHCI_CQE_INT_MASK;
3999 host->cqe_err_ier = SDHCI_CQE_INT_ERR_MASK;
4000
4001 host->tuning_delay = -1;
4002 host->tuning_loop_count = MAX_TUNING_LOOP;
4003
4004 host->sdma_boundary = SDHCI_DEFAULT_BOUNDARY_ARG;
4005
4006
4007
4008
4009
4010
4011 host->adma_table_cnt = SDHCI_MAX_SEGS * 2 + 1;
4012 host->max_adma = 65536;
4013
4014 host->max_timeout_count = 0xE;
4015
4016 return host;
4017 }
4018
4019 EXPORT_SYMBOL_GPL(sdhci_alloc_host);
4020
4021 static int sdhci_set_dma_mask(struct sdhci_host *host)
4022 {
4023 struct mmc_host *mmc = host->mmc;
4024 struct device *dev = mmc_dev(mmc);
4025 int ret = -EINVAL;
4026
4027 if (host->quirks2 & SDHCI_QUIRK2_BROKEN_64_BIT_DMA)
4028 host->flags &= ~SDHCI_USE_64_BIT_DMA;
4029
4030
4031 if (host->flags & SDHCI_USE_64_BIT_DMA) {
4032 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
4033 if (ret) {
4034 pr_warn("%s: Failed to set 64-bit DMA mask.\n",
4035 mmc_hostname(mmc));
4036 host->flags &= ~SDHCI_USE_64_BIT_DMA;
4037 }
4038 }
4039
4040
4041 if (ret) {
4042 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
4043 if (ret)
4044 pr_warn("%s: Failed to set 32-bit DMA mask.\n",
4045 mmc_hostname(mmc));
4046 }
4047
4048 return ret;
4049 }
4050
4051 void __sdhci_read_caps(struct sdhci_host *host, const u16 *ver,
4052 const u32 *caps, const u32 *caps1)
4053 {
4054 u16 v;
4055 u64 dt_caps_mask = 0;
4056 u64 dt_caps = 0;
4057
4058 if (host->read_caps)
4059 return;
4060
4061 host->read_caps = true;
4062
4063 if (debug_quirks)
4064 host->quirks = debug_quirks;
4065
4066 if (debug_quirks2)
4067 host->quirks2 = debug_quirks2;
4068
4069 sdhci_do_reset(host, SDHCI_RESET_ALL);
4070
4071 if (host->v4_mode)
4072 sdhci_do_enable_v4_mode(host);
4073
4074 device_property_read_u64(mmc_dev(host->mmc),
4075 "sdhci-caps-mask", &dt_caps_mask);
4076 device_property_read_u64(mmc_dev(host->mmc),
4077 "sdhci-caps", &dt_caps);
4078
4079 v = ver ? *ver : sdhci_readw(host, SDHCI_HOST_VERSION);
4080 host->version = (v & SDHCI_SPEC_VER_MASK) >> SDHCI_SPEC_VER_SHIFT;
4081
4082 if (host->quirks & SDHCI_QUIRK_MISSING_CAPS)
4083 return;
4084
4085 if (caps) {
4086 host->caps = *caps;
4087 } else {
4088 host->caps = sdhci_readl(host, SDHCI_CAPABILITIES);
4089 host->caps &= ~lower_32_bits(dt_caps_mask);
4090 host->caps |= lower_32_bits(dt_caps);
4091 }
4092
4093 if (host->version < SDHCI_SPEC_300)
4094 return;
4095
4096 if (caps1) {
4097 host->caps1 = *caps1;
4098 } else {
4099 host->caps1 = sdhci_readl(host, SDHCI_CAPABILITIES_1);
4100 host->caps1 &= ~upper_32_bits(dt_caps_mask);
4101 host->caps1 |= upper_32_bits(dt_caps);
4102 }
4103 }
4104 EXPORT_SYMBOL_GPL(__sdhci_read_caps);
4105
4106 static void sdhci_allocate_bounce_buffer(struct sdhci_host *host)
4107 {
4108 struct mmc_host *mmc = host->mmc;
4109 unsigned int max_blocks;
4110 unsigned int bounce_size;
4111 int ret;
4112
4113
4114
4115
4116
4117
4118 bounce_size = SZ_64K;
4119
4120
4121
4122
4123
4124 if (mmc->max_req_size < bounce_size)
4125 bounce_size = mmc->max_req_size;
4126 max_blocks = bounce_size / 512;
4127
4128
4129
4130
4131
4132
4133 host->bounce_buffer = devm_kmalloc(mmc_dev(mmc),
4134 bounce_size,
4135 GFP_KERNEL);
4136 if (!host->bounce_buffer) {
4137 pr_err("%s: failed to allocate %u bytes for bounce buffer, falling back to single segments\n",
4138 mmc_hostname(mmc),
4139 bounce_size);
4140
4141
4142
4143
4144 return;
4145 }
4146
4147 host->bounce_addr = dma_map_single(mmc_dev(mmc),
4148 host->bounce_buffer,
4149 bounce_size,
4150 DMA_BIDIRECTIONAL);
4151 ret = dma_mapping_error(mmc_dev(mmc), host->bounce_addr);
4152 if (ret) {
4153 devm_kfree(mmc_dev(mmc), host->bounce_buffer);
4154 host->bounce_buffer = NULL;
4155
4156 return;
4157 }
4158
4159 host->bounce_buffer_size = bounce_size;
4160
4161
4162 mmc->max_segs = max_blocks;
4163 mmc->max_seg_size = bounce_size;
4164 mmc->max_req_size = bounce_size;
4165
4166 pr_info("%s bounce up to %u segments into one, max segment size %u bytes\n",
4167 mmc_hostname(mmc), max_blocks, bounce_size);
4168 }
4169
4170 static inline bool sdhci_can_64bit_dma(struct sdhci_host *host)
4171 {
4172
4173
4174
4175
4176
4177 if (host->version >= SDHCI_SPEC_410 && host->v4_mode)
4178 return host->caps & SDHCI_CAN_64BIT_V4;
4179
4180 return host->caps & SDHCI_CAN_64BIT;
4181 }
4182
4183 int sdhci_setup_host(struct sdhci_host *host)
4184 {
4185 struct mmc_host *mmc;
4186 u32 max_current_caps;
4187 unsigned int ocr_avail;
4188 unsigned int override_timeout_clk;
4189 u32 max_clk;
4190 int ret = 0;
4191 bool enable_vqmmc = false;
4192
4193 WARN_ON(host == NULL);
4194 if (host == NULL)
4195 return -EINVAL;
4196
4197 mmc = host->mmc;
4198
4199
4200
4201
4202
4203
4204
4205 if (!mmc->supply.vqmmc) {
4206 ret = mmc_regulator_get_supply(mmc);
4207 if (ret)
4208 return ret;
4209 enable_vqmmc = true;
4210 }
4211
4212 DBG("Version: 0x%08x | Present: 0x%08x\n",
4213 sdhci_readw(host, SDHCI_HOST_VERSION),
4214 sdhci_readl(host, SDHCI_PRESENT_STATE));
4215 DBG("Caps: 0x%08x | Caps_1: 0x%08x\n",
4216 sdhci_readl(host, SDHCI_CAPABILITIES),
4217 sdhci_readl(host, SDHCI_CAPABILITIES_1));
4218
4219 sdhci_read_caps(host);
4220
4221 override_timeout_clk = host->timeout_clk;
4222
4223 if (host->version > SDHCI_SPEC_420) {
4224 pr_err("%s: Unknown controller version (%d). You may experience problems.\n",
4225 mmc_hostname(mmc), host->version);
4226 }
4227
4228 if (host->quirks & SDHCI_QUIRK_FORCE_DMA)
4229 host->flags |= SDHCI_USE_SDMA;
4230 else if (!(host->caps & SDHCI_CAN_DO_SDMA))
4231 DBG("Controller doesn't have SDMA capability\n");
4232 else
4233 host->flags |= SDHCI_USE_SDMA;
4234
4235 if ((host->quirks & SDHCI_QUIRK_BROKEN_DMA) &&
4236 (host->flags & SDHCI_USE_SDMA)) {
4237 DBG("Disabling DMA as it is marked broken\n");
4238 host->flags &= ~SDHCI_USE_SDMA;
4239 }
4240
4241 if ((host->version >= SDHCI_SPEC_200) &&
4242 (host->caps & SDHCI_CAN_DO_ADMA2))
4243 host->flags |= SDHCI_USE_ADMA;
4244
4245 if ((host->quirks & SDHCI_QUIRK_BROKEN_ADMA) &&
4246 (host->flags & SDHCI_USE_ADMA)) {
4247 DBG("Disabling ADMA as it is marked broken\n");
4248 host->flags &= ~SDHCI_USE_ADMA;
4249 }
4250
4251 if (sdhci_can_64bit_dma(host))
4252 host->flags |= SDHCI_USE_64_BIT_DMA;
4253
4254 if (host->use_external_dma) {
4255 ret = sdhci_external_dma_init(host);
4256 if (ret == -EPROBE_DEFER)
4257 goto unreg;
4258
4259
4260
4261
4262 else if (ret)
4263 sdhci_switch_external_dma(host, false);
4264
4265 else
4266 host->flags &= ~(SDHCI_USE_SDMA | SDHCI_USE_ADMA);
4267 }
4268
4269 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
4270 if (host->ops->set_dma_mask)
4271 ret = host->ops->set_dma_mask(host);
4272 else
4273 ret = sdhci_set_dma_mask(host);
4274
4275 if (!ret && host->ops->enable_dma)
4276 ret = host->ops->enable_dma(host);
4277
4278 if (ret) {
4279 pr_warn("%s: No suitable DMA available - falling back to PIO\n",
4280 mmc_hostname(mmc));
4281 host->flags &= ~(SDHCI_USE_SDMA | SDHCI_USE_ADMA);
4282
4283 ret = 0;
4284 }
4285 }
4286
4287
4288 if ((host->flags & SDHCI_USE_64_BIT_DMA) && !host->v4_mode)
4289 host->flags &= ~SDHCI_USE_SDMA;
4290
4291 if (host->flags & SDHCI_USE_ADMA) {
4292 dma_addr_t dma;
4293 void *buf;
4294
4295 if (!(host->flags & SDHCI_USE_64_BIT_DMA))
4296 host->alloc_desc_sz = SDHCI_ADMA2_32_DESC_SZ;
4297 else if (!host->alloc_desc_sz)
4298 host->alloc_desc_sz = SDHCI_ADMA2_64_DESC_SZ(host);
4299
4300 host->desc_sz = host->alloc_desc_sz;
4301 host->adma_table_sz = host->adma_table_cnt * host->desc_sz;
4302
4303 host->align_buffer_sz = SDHCI_MAX_SEGS * SDHCI_ADMA2_ALIGN;
4304
4305
4306
4307
4308 buf = dma_alloc_coherent(mmc_dev(mmc),
4309 host->align_buffer_sz + host->adma_table_sz,
4310 &dma, GFP_KERNEL);
4311 if (!buf) {
4312 pr_warn("%s: Unable to allocate ADMA buffers - falling back to standard DMA\n",
4313 mmc_hostname(mmc));
4314 host->flags &= ~SDHCI_USE_ADMA;
4315 } else if ((dma + host->align_buffer_sz) &
4316 (SDHCI_ADMA2_DESC_ALIGN - 1)) {
4317 pr_warn("%s: unable to allocate aligned ADMA descriptor\n",
4318 mmc_hostname(mmc));
4319 host->flags &= ~SDHCI_USE_ADMA;
4320 dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
4321 host->adma_table_sz, buf, dma);
4322 } else {
4323 host->align_buffer = buf;
4324 host->align_addr = dma;
4325
4326 host->adma_table = buf + host->align_buffer_sz;
4327 host->adma_addr = dma + host->align_buffer_sz;
4328 }
4329 }
4330
4331
4332
4333
4334
4335
4336 if (!(host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA))) {
4337 host->dma_mask = DMA_BIT_MASK(64);
4338 mmc_dev(mmc)->dma_mask = &host->dma_mask;
4339 }
4340
4341 if (host->version >= SDHCI_SPEC_300)
4342 host->max_clk = FIELD_GET(SDHCI_CLOCK_V3_BASE_MASK, host->caps);
4343 else
4344 host->max_clk = FIELD_GET(SDHCI_CLOCK_BASE_MASK, host->caps);
4345
4346 host->max_clk *= 1000000;
4347 if (host->max_clk == 0 || host->quirks &
4348 SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN) {
4349 if (!host->ops->get_max_clock) {
4350 pr_err("%s: Hardware doesn't specify base clock frequency.\n",
4351 mmc_hostname(mmc));
4352 ret = -ENODEV;
4353 goto undma;
4354 }
4355 host->max_clk = host->ops->get_max_clock(host);
4356 }
4357
4358
4359
4360
4361
4362 host->clk_mul = FIELD_GET(SDHCI_CLOCK_MUL_MASK, host->caps1);
4363
4364
4365
4366
4367
4368
4369
4370 if (host->clk_mul)
4371 host->clk_mul += 1;
4372
4373
4374
4375
4376 max_clk = host->max_clk;
4377
4378 if (host->ops->get_min_clock)
4379 mmc->f_min = host->ops->get_min_clock(host);
4380 else if (host->version >= SDHCI_SPEC_300) {
4381 if (host->clk_mul)
4382 max_clk = host->max_clk * host->clk_mul;
4383
4384
4385
4386
4387 mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_300;
4388 } else
4389 mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_200;
4390
4391 if (!mmc->f_max || mmc->f_max > max_clk)
4392 mmc->f_max = max_clk;
4393
4394 if (!(host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK)) {
4395 host->timeout_clk = FIELD_GET(SDHCI_TIMEOUT_CLK_MASK, host->caps);
4396
4397 if (host->caps & SDHCI_TIMEOUT_CLK_UNIT)
4398 host->timeout_clk *= 1000;
4399
4400 if (host->timeout_clk == 0) {
4401 if (!host->ops->get_timeout_clock) {
4402 pr_err("%s: Hardware doesn't specify timeout clock frequency.\n",
4403 mmc_hostname(mmc));
4404 ret = -ENODEV;
4405 goto undma;
4406 }
4407
4408 host->timeout_clk =
4409 DIV_ROUND_UP(host->ops->get_timeout_clock(host),
4410 1000);
4411 }
4412
4413 if (override_timeout_clk)
4414 host->timeout_clk = override_timeout_clk;
4415
4416 mmc->max_busy_timeout = host->ops->get_max_timeout_count ?
4417 host->ops->get_max_timeout_count(host) : 1 << 27;
4418 mmc->max_busy_timeout /= host->timeout_clk;
4419 }
4420
4421 if (host->quirks2 & SDHCI_QUIRK2_DISABLE_HW_TIMEOUT &&
4422 !host->ops->get_max_timeout_count)
4423 mmc->max_busy_timeout = 0;
4424
4425 mmc->caps |= MMC_CAP_SDIO_IRQ | MMC_CAP_CMD23;
4426 mmc->caps2 |= MMC_CAP2_SDIO_IRQ_NOTHREAD;
4427
4428 if (host->quirks & SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12)
4429 host->flags |= SDHCI_AUTO_CMD12;
4430
4431
4432
4433
4434
4435 if ((host->version >= SDHCI_SPEC_300) &&
4436 ((host->flags & SDHCI_USE_ADMA) ||
4437 !(host->flags & SDHCI_USE_SDMA) || host->v4_mode) &&
4438 !(host->quirks2 & SDHCI_QUIRK2_ACMD23_BROKEN)) {
4439 host->flags |= SDHCI_AUTO_CMD23;
4440 DBG("Auto-CMD23 available\n");
4441 } else {
4442 DBG("Auto-CMD23 unavailable\n");
4443 }
4444
4445
4446
4447
4448
4449
4450
4451
4452 if (!(host->quirks & SDHCI_QUIRK_FORCE_1_BIT_DATA))
4453 mmc->caps |= MMC_CAP_4_BIT_DATA;
4454
4455 if (host->quirks2 & SDHCI_QUIRK2_HOST_NO_CMD23)
4456 mmc->caps &= ~MMC_CAP_CMD23;
4457
4458 if (host->caps & SDHCI_CAN_DO_HISPD)
4459 mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
4460
4461 if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) &&
4462 mmc_card_is_removable(mmc) &&
4463 mmc_gpio_get_cd(mmc) < 0)
4464 mmc->caps |= MMC_CAP_NEEDS_POLL;
4465
4466 if (!IS_ERR(mmc->supply.vqmmc)) {
4467 if (enable_vqmmc) {
4468 ret = regulator_enable(mmc->supply.vqmmc);
4469 host->sdhci_core_to_disable_vqmmc = !ret;
4470 }
4471
4472
4473 if (!regulator_is_supported_voltage(mmc->supply.vqmmc, 1700000,
4474 1950000))
4475 host->caps1 &= ~(SDHCI_SUPPORT_SDR104 |
4476 SDHCI_SUPPORT_SDR50 |
4477 SDHCI_SUPPORT_DDR50);
4478
4479
4480 if (!regulator_is_supported_voltage(mmc->supply.vqmmc, 2700000,
4481 3600000))
4482 host->flags &= ~SDHCI_SIGNALING_330;
4483
4484 if (ret) {
4485 pr_warn("%s: Failed to enable vqmmc regulator: %d\n",
4486 mmc_hostname(mmc), ret);
4487 mmc->supply.vqmmc = ERR_PTR(-EINVAL);
4488 }
4489
4490 }
4491
4492 if (host->quirks2 & SDHCI_QUIRK2_NO_1_8_V) {
4493 host->caps1 &= ~(SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 |
4494 SDHCI_SUPPORT_DDR50);
4495
4496
4497
4498
4499
4500
4501
4502
4503 mmc->caps2 &= ~(MMC_CAP2_HSX00_1_8V | MMC_CAP2_HS400_ES);
4504 mmc->caps &= ~(MMC_CAP_1_8V_DDR | MMC_CAP_UHS);
4505 }
4506
4507
4508 if (host->caps1 & (SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 |
4509 SDHCI_SUPPORT_DDR50))
4510 mmc->caps |= MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25;
4511
4512
4513 if (host->caps1 & SDHCI_SUPPORT_SDR104) {
4514 mmc->caps |= MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_SDR50;
4515
4516
4517
4518 if (!(host->quirks2 & SDHCI_QUIRK2_BROKEN_HS200))
4519 mmc->caps2 |= MMC_CAP2_HS200;
4520 } else if (host->caps1 & SDHCI_SUPPORT_SDR50) {
4521 mmc->caps |= MMC_CAP_UHS_SDR50;
4522 }
4523
4524 if (host->quirks2 & SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400 &&
4525 (host->caps1 & SDHCI_SUPPORT_HS400))
4526 mmc->caps2 |= MMC_CAP2_HS400;
4527
4528 if ((mmc->caps2 & MMC_CAP2_HSX00_1_2V) &&
4529 (IS_ERR(mmc->supply.vqmmc) ||
4530 !regulator_is_supported_voltage(mmc->supply.vqmmc, 1100000,
4531 1300000)))
4532 mmc->caps2 &= ~MMC_CAP2_HSX00_1_2V;
4533
4534 if ((host->caps1 & SDHCI_SUPPORT_DDR50) &&
4535 !(host->quirks2 & SDHCI_QUIRK2_BROKEN_DDR50))
4536 mmc->caps |= MMC_CAP_UHS_DDR50;
4537
4538
4539 if (host->caps1 & SDHCI_USE_SDR50_TUNING)
4540 host->flags |= SDHCI_SDR50_NEEDS_TUNING;
4541
4542
4543 if (host->caps1 & SDHCI_DRIVER_TYPE_A)
4544 mmc->caps |= MMC_CAP_DRIVER_TYPE_A;
4545 if (host->caps1 & SDHCI_DRIVER_TYPE_C)
4546 mmc->caps |= MMC_CAP_DRIVER_TYPE_C;
4547 if (host->caps1 & SDHCI_DRIVER_TYPE_D)
4548 mmc->caps |= MMC_CAP_DRIVER_TYPE_D;
4549
4550
4551 host->tuning_count = FIELD_GET(SDHCI_RETUNING_TIMER_COUNT_MASK,
4552 host->caps1);
4553
4554
4555
4556
4557
4558 if (host->tuning_count)
4559 host->tuning_count = 1 << (host->tuning_count - 1);
4560
4561
4562 host->tuning_mode = FIELD_GET(SDHCI_RETUNING_MODE_MASK, host->caps1);
4563
4564 ocr_avail = 0;
4565
4566
4567
4568
4569
4570
4571
4572
4573 max_current_caps = sdhci_readl(host, SDHCI_MAX_CURRENT);
4574 if (!max_current_caps && !IS_ERR(mmc->supply.vmmc)) {
4575 int curr = regulator_get_current_limit(mmc->supply.vmmc);
4576 if (curr > 0) {
4577
4578
4579 curr = curr/1000;
4580 curr = curr/SDHCI_MAX_CURRENT_MULTIPLIER;
4581
4582 curr = min_t(u32, curr, SDHCI_MAX_CURRENT_LIMIT);
4583 max_current_caps =
4584 FIELD_PREP(SDHCI_MAX_CURRENT_330_MASK, curr) |
4585 FIELD_PREP(SDHCI_MAX_CURRENT_300_MASK, curr) |
4586 FIELD_PREP(SDHCI_MAX_CURRENT_180_MASK, curr);
4587 }
4588 }
4589
4590 if (host->caps & SDHCI_CAN_VDD_330) {
4591 ocr_avail |= MMC_VDD_32_33 | MMC_VDD_33_34;
4592
4593 mmc->max_current_330 = FIELD_GET(SDHCI_MAX_CURRENT_330_MASK,
4594 max_current_caps) *
4595 SDHCI_MAX_CURRENT_MULTIPLIER;
4596 }
4597 if (host->caps & SDHCI_CAN_VDD_300) {
4598 ocr_avail |= MMC_VDD_29_30 | MMC_VDD_30_31;
4599
4600 mmc->max_current_300 = FIELD_GET(SDHCI_MAX_CURRENT_300_MASK,
4601 max_current_caps) *
4602 SDHCI_MAX_CURRENT_MULTIPLIER;
4603 }
4604 if (host->caps & SDHCI_CAN_VDD_180) {
4605 ocr_avail |= MMC_VDD_165_195;
4606
4607 mmc->max_current_180 = FIELD_GET(SDHCI_MAX_CURRENT_180_MASK,
4608 max_current_caps) *
4609 SDHCI_MAX_CURRENT_MULTIPLIER;
4610 }
4611
4612
4613 if (host->ocr_mask)
4614 ocr_avail = host->ocr_mask;
4615
4616
4617 if (mmc->ocr_avail)
4618 ocr_avail = mmc->ocr_avail;
4619
4620 mmc->ocr_avail = ocr_avail;
4621 mmc->ocr_avail_sdio = ocr_avail;
4622 if (host->ocr_avail_sdio)
4623 mmc->ocr_avail_sdio &= host->ocr_avail_sdio;
4624 mmc->ocr_avail_sd = ocr_avail;
4625 if (host->ocr_avail_sd)
4626 mmc->ocr_avail_sd &= host->ocr_avail_sd;
4627 else
4628 mmc->ocr_avail_sd &= ~MMC_VDD_165_195;
4629 mmc->ocr_avail_mmc = ocr_avail;
4630 if (host->ocr_avail_mmc)
4631 mmc->ocr_avail_mmc &= host->ocr_avail_mmc;
4632
4633 if (mmc->ocr_avail == 0) {
4634 pr_err("%s: Hardware doesn't report any support voltages.\n",
4635 mmc_hostname(mmc));
4636 ret = -ENODEV;
4637 goto unreg;
4638 }
4639
4640 if ((mmc->caps & (MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 |
4641 MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 |
4642 MMC_CAP_UHS_DDR50 | MMC_CAP_1_8V_DDR)) ||
4643 (mmc->caps2 & (MMC_CAP2_HS200_1_8V_SDR | MMC_CAP2_HS400_1_8V)))
4644 host->flags |= SDHCI_SIGNALING_180;
4645
4646 if (mmc->caps2 & MMC_CAP2_HSX00_1_2V)
4647 host->flags |= SDHCI_SIGNALING_120;
4648
4649 spin_lock_init(&host->lock);
4650
4651
4652
4653
4654
4655
4656 mmc->max_req_size = 524288;
4657
4658
4659
4660
4661
4662 if (host->flags & SDHCI_USE_ADMA) {
4663 mmc->max_segs = SDHCI_MAX_SEGS;
4664 } else if (host->flags & SDHCI_USE_SDMA) {
4665 mmc->max_segs = 1;
4666 mmc->max_req_size = min_t(size_t, mmc->max_req_size,
4667 dma_max_mapping_size(mmc_dev(mmc)));
4668 } else {
4669 mmc->max_segs = SDHCI_MAX_SEGS;
4670 }
4671
4672
4673
4674
4675
4676
4677 if (host->flags & SDHCI_USE_ADMA) {
4678 if (host->quirks & SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC) {
4679 host->max_adma = 65532;
4680 mmc->max_seg_size = 65535;
4681 } else {
4682 mmc->max_seg_size = 65536;
4683 }
4684 } else {
4685 mmc->max_seg_size = mmc->max_req_size;
4686 }
4687
4688
4689
4690
4691
4692 if (host->quirks & SDHCI_QUIRK_FORCE_BLK_SZ_2048) {
4693 mmc->max_blk_size = 2;
4694 } else {
4695 mmc->max_blk_size = (host->caps & SDHCI_MAX_BLOCK_MASK) >>
4696 SDHCI_MAX_BLOCK_SHIFT;
4697 if (mmc->max_blk_size >= 3) {
4698 pr_warn("%s: Invalid maximum block size, assuming 512 bytes\n",
4699 mmc_hostname(mmc));
4700 mmc->max_blk_size = 0;
4701 }
4702 }
4703
4704 mmc->max_blk_size = 512 << mmc->max_blk_size;
4705
4706
4707
4708
4709 mmc->max_blk_count = (host->quirks & SDHCI_QUIRK_NO_MULTIBLOCK) ? 1 : 65535;
4710
4711 if (mmc->max_segs == 1)
4712
4713 sdhci_allocate_bounce_buffer(host);
4714
4715 return 0;
4716
4717 unreg:
4718 if (host->sdhci_core_to_disable_vqmmc)
4719 regulator_disable(mmc->supply.vqmmc);
4720 undma:
4721 if (host->align_buffer)
4722 dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
4723 host->adma_table_sz, host->align_buffer,
4724 host->align_addr);
4725 host->adma_table = NULL;
4726 host->align_buffer = NULL;
4727
4728 return ret;
4729 }
4730 EXPORT_SYMBOL_GPL(sdhci_setup_host);
4731
4732 void sdhci_cleanup_host(struct sdhci_host *host)
4733 {
4734 struct mmc_host *mmc = host->mmc;
4735
4736 if (host->sdhci_core_to_disable_vqmmc)
4737 regulator_disable(mmc->supply.vqmmc);
4738
4739 if (host->align_buffer)
4740 dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
4741 host->adma_table_sz, host->align_buffer,
4742 host->align_addr);
4743
4744 if (host->use_external_dma)
4745 sdhci_external_dma_release(host);
4746
4747 host->adma_table = NULL;
4748 host->align_buffer = NULL;
4749 }
4750 EXPORT_SYMBOL_GPL(sdhci_cleanup_host);
4751
4752 int __sdhci_add_host(struct sdhci_host *host)
4753 {
4754 unsigned int flags = WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_HIGHPRI;
4755 struct mmc_host *mmc = host->mmc;
4756 int ret;
4757
4758 if ((mmc->caps2 & MMC_CAP2_CQE) &&
4759 (host->quirks & SDHCI_QUIRK_BROKEN_CQE)) {
4760 mmc->caps2 &= ~MMC_CAP2_CQE;
4761 mmc->cqe_ops = NULL;
4762 }
4763
4764 host->complete_wq = alloc_workqueue("sdhci", flags, 0);
4765 if (!host->complete_wq)
4766 return -ENOMEM;
4767
4768 INIT_WORK(&host->complete_work, sdhci_complete_work);
4769
4770 timer_setup(&host->timer, sdhci_timeout_timer, 0);
4771 timer_setup(&host->data_timer, sdhci_timeout_data_timer, 0);
4772
4773 init_waitqueue_head(&host->buf_ready_int);
4774
4775 sdhci_init(host, 0);
4776
4777 ret = request_threaded_irq(host->irq, sdhci_irq, sdhci_thread_irq,
4778 IRQF_SHARED, mmc_hostname(mmc), host);
4779 if (ret) {
4780 pr_err("%s: Failed to request IRQ %d: %d\n",
4781 mmc_hostname(mmc), host->irq, ret);
4782 goto unwq;
4783 }
4784
4785 ret = sdhci_led_register(host);
4786 if (ret) {
4787 pr_err("%s: Failed to register LED device: %d\n",
4788 mmc_hostname(mmc), ret);
4789 goto unirq;
4790 }
4791
4792 ret = mmc_add_host(mmc);
4793 if (ret)
4794 goto unled;
4795
4796 pr_info("%s: SDHCI controller on %s [%s] using %s\n",
4797 mmc_hostname(mmc), host->hw_name, dev_name(mmc_dev(mmc)),
4798 host->use_external_dma ? "External DMA" :
4799 (host->flags & SDHCI_USE_ADMA) ?
4800 (host->flags & SDHCI_USE_64_BIT_DMA) ? "ADMA 64-bit" : "ADMA" :
4801 (host->flags & SDHCI_USE_SDMA) ? "DMA" : "PIO");
4802
4803 sdhci_enable_card_detection(host);
4804
4805 return 0;
4806
4807 unled:
4808 sdhci_led_unregister(host);
4809 unirq:
4810 sdhci_do_reset(host, SDHCI_RESET_ALL);
4811 sdhci_writel(host, 0, SDHCI_INT_ENABLE);
4812 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
4813 free_irq(host->irq, host);
4814 unwq:
4815 destroy_workqueue(host->complete_wq);
4816
4817 return ret;
4818 }
4819 EXPORT_SYMBOL_GPL(__sdhci_add_host);
4820
4821 int sdhci_add_host(struct sdhci_host *host)
4822 {
4823 int ret;
4824
4825 ret = sdhci_setup_host(host);
4826 if (ret)
4827 return ret;
4828
4829 ret = __sdhci_add_host(host);
4830 if (ret)
4831 goto cleanup;
4832
4833 return 0;
4834
4835 cleanup:
4836 sdhci_cleanup_host(host);
4837
4838 return ret;
4839 }
4840 EXPORT_SYMBOL_GPL(sdhci_add_host);
4841
4842 void sdhci_remove_host(struct sdhci_host *host, int dead)
4843 {
4844 struct mmc_host *mmc = host->mmc;
4845 unsigned long flags;
4846
4847 if (dead) {
4848 spin_lock_irqsave(&host->lock, flags);
4849
4850 host->flags |= SDHCI_DEVICE_DEAD;
4851
4852 if (sdhci_has_requests(host)) {
4853 pr_err("%s: Controller removed during "
4854 " transfer!\n", mmc_hostname(mmc));
4855 sdhci_error_out_mrqs(host, -ENOMEDIUM);
4856 }
4857
4858 spin_unlock_irqrestore(&host->lock, flags);
4859 }
4860
4861 sdhci_disable_card_detection(host);
4862
4863 mmc_remove_host(mmc);
4864
4865 sdhci_led_unregister(host);
4866
4867 if (!dead)
4868 sdhci_do_reset(host, SDHCI_RESET_ALL);
4869
4870 sdhci_writel(host, 0, SDHCI_INT_ENABLE);
4871 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
4872 free_irq(host->irq, host);
4873
4874 del_timer_sync(&host->timer);
4875 del_timer_sync(&host->data_timer);
4876
4877 destroy_workqueue(host->complete_wq);
4878
4879 if (host->sdhci_core_to_disable_vqmmc)
4880 regulator_disable(mmc->supply.vqmmc);
4881
4882 if (host->align_buffer)
4883 dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
4884 host->adma_table_sz, host->align_buffer,
4885 host->align_addr);
4886
4887 if (host->use_external_dma)
4888 sdhci_external_dma_release(host);
4889
4890 host->adma_table = NULL;
4891 host->align_buffer = NULL;
4892 }
4893
4894 EXPORT_SYMBOL_GPL(sdhci_remove_host);
4895
4896 void sdhci_free_host(struct sdhci_host *host)
4897 {
4898 mmc_free_host(host->mmc);
4899 }
4900
4901 EXPORT_SYMBOL_GPL(sdhci_free_host);
4902
4903
4904
4905
4906
4907
4908
4909 static int __init sdhci_drv_init(void)
4910 {
4911 pr_info(DRIVER_NAME
4912 ": Secure Digital Host Controller Interface driver\n");
4913 pr_info(DRIVER_NAME ": Copyright(c) Pierre Ossman\n");
4914
4915 return 0;
4916 }
4917
4918 static void __exit sdhci_drv_exit(void)
4919 {
4920 }
4921
4922 module_init(sdhci_drv_init);
4923 module_exit(sdhci_drv_exit);
4924
4925 module_param(debug_quirks, uint, 0444);
4926 module_param(debug_quirks2, uint, 0444);
4927
4928 MODULE_AUTHOR("Pierre Ossman <pierre@ossman.eu>");
4929 MODULE_DESCRIPTION("Secure Digital Host Controller Interface core driver");
4930 MODULE_LICENSE("GPL");
4931
4932 MODULE_PARM_DESC(debug_quirks, "Force certain quirks.");
4933 MODULE_PARM_DESC(debug_quirks2, "Force certain other quirks.");