0001
0002
0003
0004
0005
0006
0007
0008 #include <linux/dmaengine.h>
0009 #include <linux/iopoll.h>
0010 #include <linux/pm_runtime.h>
0011 #include <linux/spi/spi.h>
0012 #include <linux/spi/spi-mem.h>
0013 #include <linux/sched/task_stack.h>
0014
0015 #include "internals.h"
0016
0017 #define SPI_MEM_MAX_BUSWIDTH 8
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036 int spi_controller_dma_map_mem_op_data(struct spi_controller *ctlr,
0037 const struct spi_mem_op *op,
0038 struct sg_table *sgt)
0039 {
0040 struct device *dmadev;
0041
0042 if (!op->data.nbytes)
0043 return -EINVAL;
0044
0045 if (op->data.dir == SPI_MEM_DATA_OUT && ctlr->dma_tx)
0046 dmadev = ctlr->dma_tx->device->dev;
0047 else if (op->data.dir == SPI_MEM_DATA_IN && ctlr->dma_rx)
0048 dmadev = ctlr->dma_rx->device->dev;
0049 else
0050 dmadev = ctlr->dev.parent;
0051
0052 if (!dmadev)
0053 return -EINVAL;
0054
0055 return spi_map_buf(ctlr, dmadev, sgt, op->data.buf.in, op->data.nbytes,
0056 op->data.dir == SPI_MEM_DATA_IN ?
0057 DMA_FROM_DEVICE : DMA_TO_DEVICE);
0058 }
0059 EXPORT_SYMBOL_GPL(spi_controller_dma_map_mem_op_data);
0060
0061
0062
0063
0064
0065
0066
0067
0068
0069
0070
0071
0072
0073
0074
0075
0076
0077
0078
0079
0080
0081
0082 void spi_controller_dma_unmap_mem_op_data(struct spi_controller *ctlr,
0083 const struct spi_mem_op *op,
0084 struct sg_table *sgt)
0085 {
0086 struct device *dmadev;
0087
0088 if (!op->data.nbytes)
0089 return;
0090
0091 if (op->data.dir == SPI_MEM_DATA_OUT && ctlr->dma_tx)
0092 dmadev = ctlr->dma_tx->device->dev;
0093 else if (op->data.dir == SPI_MEM_DATA_IN && ctlr->dma_rx)
0094 dmadev = ctlr->dma_rx->device->dev;
0095 else
0096 dmadev = ctlr->dev.parent;
0097
0098 spi_unmap_buf(ctlr, dmadev, sgt,
0099 op->data.dir == SPI_MEM_DATA_IN ?
0100 DMA_FROM_DEVICE : DMA_TO_DEVICE);
0101 }
0102 EXPORT_SYMBOL_GPL(spi_controller_dma_unmap_mem_op_data);
0103
0104 static int spi_check_buswidth_req(struct spi_mem *mem, u8 buswidth, bool tx)
0105 {
0106 u32 mode = mem->spi->mode;
0107
0108 switch (buswidth) {
0109 case 1:
0110 return 0;
0111
0112 case 2:
0113 if ((tx &&
0114 (mode & (SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL))) ||
0115 (!tx &&
0116 (mode & (SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL))))
0117 return 0;
0118
0119 break;
0120
0121 case 4:
0122 if ((tx && (mode & (SPI_TX_QUAD | SPI_TX_OCTAL))) ||
0123 (!tx && (mode & (SPI_RX_QUAD | SPI_RX_OCTAL))))
0124 return 0;
0125
0126 break;
0127
0128 case 8:
0129 if ((tx && (mode & SPI_TX_OCTAL)) ||
0130 (!tx && (mode & SPI_RX_OCTAL)))
0131 return 0;
0132
0133 break;
0134
0135 default:
0136 break;
0137 }
0138
0139 return -ENOTSUPP;
0140 }
0141
0142 static bool spi_mem_check_buswidth(struct spi_mem *mem,
0143 const struct spi_mem_op *op)
0144 {
0145 if (spi_check_buswidth_req(mem, op->cmd.buswidth, true))
0146 return false;
0147
0148 if (op->addr.nbytes &&
0149 spi_check_buswidth_req(mem, op->addr.buswidth, true))
0150 return false;
0151
0152 if (op->dummy.nbytes &&
0153 spi_check_buswidth_req(mem, op->dummy.buswidth, true))
0154 return false;
0155
0156 if (op->data.dir != SPI_MEM_NO_DATA &&
0157 spi_check_buswidth_req(mem, op->data.buswidth,
0158 op->data.dir == SPI_MEM_DATA_OUT))
0159 return false;
0160
0161 return true;
0162 }
0163
0164 bool spi_mem_default_supports_op(struct spi_mem *mem,
0165 const struct spi_mem_op *op)
0166 {
0167 struct spi_controller *ctlr = mem->spi->controller;
0168 bool op_is_dtr =
0169 op->cmd.dtr || op->addr.dtr || op->dummy.dtr || op->data.dtr;
0170
0171 if (op_is_dtr) {
0172 if (!spi_mem_controller_is_capable(ctlr, dtr))
0173 return false;
0174
0175 if (op->cmd.nbytes != 2)
0176 return false;
0177 } else {
0178 if (op->cmd.nbytes != 1)
0179 return false;
0180 }
0181
0182 if (op->data.ecc) {
0183 if (!spi_mem_controller_is_capable(ctlr, ecc))
0184 return false;
0185 }
0186
0187 return spi_mem_check_buswidth(mem, op);
0188 }
0189 EXPORT_SYMBOL_GPL(spi_mem_default_supports_op);
0190
0191 static bool spi_mem_buswidth_is_valid(u8 buswidth)
0192 {
0193 if (hweight8(buswidth) > 1 || buswidth > SPI_MEM_MAX_BUSWIDTH)
0194 return false;
0195
0196 return true;
0197 }
0198
0199 static int spi_mem_check_op(const struct spi_mem_op *op)
0200 {
0201 if (!op->cmd.buswidth || !op->cmd.nbytes)
0202 return -EINVAL;
0203
0204 if ((op->addr.nbytes && !op->addr.buswidth) ||
0205 (op->dummy.nbytes && !op->dummy.buswidth) ||
0206 (op->data.nbytes && !op->data.buswidth))
0207 return -EINVAL;
0208
0209 if (!spi_mem_buswidth_is_valid(op->cmd.buswidth) ||
0210 !spi_mem_buswidth_is_valid(op->addr.buswidth) ||
0211 !spi_mem_buswidth_is_valid(op->dummy.buswidth) ||
0212 !spi_mem_buswidth_is_valid(op->data.buswidth))
0213 return -EINVAL;
0214
0215
0216 if (WARN_ON_ONCE(op->data.dir == SPI_MEM_DATA_IN &&
0217 object_is_on_stack(op->data.buf.in)))
0218 return -EINVAL;
0219
0220 if (WARN_ON_ONCE(op->data.dir == SPI_MEM_DATA_OUT &&
0221 object_is_on_stack(op->data.buf.out)))
0222 return -EINVAL;
0223
0224 return 0;
0225 }
0226
0227 static bool spi_mem_internal_supports_op(struct spi_mem *mem,
0228 const struct spi_mem_op *op)
0229 {
0230 struct spi_controller *ctlr = mem->spi->controller;
0231
0232 if (ctlr->mem_ops && ctlr->mem_ops->supports_op)
0233 return ctlr->mem_ops->supports_op(mem, op);
0234
0235 return spi_mem_default_supports_op(mem, op);
0236 }
0237
0238
0239
0240
0241
0242
0243
0244
0245
0246
0247
0248
0249
0250
0251
0252
0253 bool spi_mem_supports_op(struct spi_mem *mem, const struct spi_mem_op *op)
0254 {
0255 if (spi_mem_check_op(op))
0256 return false;
0257
0258 return spi_mem_internal_supports_op(mem, op);
0259 }
0260 EXPORT_SYMBOL_GPL(spi_mem_supports_op);
0261
0262 static int spi_mem_access_start(struct spi_mem *mem)
0263 {
0264 struct spi_controller *ctlr = mem->spi->controller;
0265
0266
0267
0268
0269
0270 spi_flush_queue(ctlr);
0271
0272 if (ctlr->auto_runtime_pm) {
0273 int ret;
0274
0275 ret = pm_runtime_resume_and_get(ctlr->dev.parent);
0276 if (ret < 0) {
0277 dev_err(&ctlr->dev, "Failed to power device: %d\n",
0278 ret);
0279 return ret;
0280 }
0281 }
0282
0283 mutex_lock(&ctlr->bus_lock_mutex);
0284 mutex_lock(&ctlr->io_mutex);
0285
0286 return 0;
0287 }
0288
0289 static void spi_mem_access_end(struct spi_mem *mem)
0290 {
0291 struct spi_controller *ctlr = mem->spi->controller;
0292
0293 mutex_unlock(&ctlr->io_mutex);
0294 mutex_unlock(&ctlr->bus_lock_mutex);
0295
0296 if (ctlr->auto_runtime_pm)
0297 pm_runtime_put(ctlr->dev.parent);
0298 }
0299
0300
0301
0302
0303
0304
0305
0306
0307
0308
0309
0310
0311
0312 int spi_mem_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
0313 {
0314 unsigned int tmpbufsize, xferpos = 0, totalxferlen = 0;
0315 struct spi_controller *ctlr = mem->spi->controller;
0316 struct spi_transfer xfers[4] = { };
0317 struct spi_message msg;
0318 u8 *tmpbuf;
0319 int ret;
0320
0321 ret = spi_mem_check_op(op);
0322 if (ret)
0323 return ret;
0324
0325 if (!spi_mem_internal_supports_op(mem, op))
0326 return -ENOTSUPP;
0327
0328 if (ctlr->mem_ops && !mem->spi->cs_gpiod) {
0329 ret = spi_mem_access_start(mem);
0330 if (ret)
0331 return ret;
0332
0333 ret = ctlr->mem_ops->exec_op(mem, op);
0334
0335 spi_mem_access_end(mem);
0336
0337
0338
0339
0340
0341
0342 if (!ret || ret != -ENOTSUPP)
0343 return ret;
0344 }
0345
0346 tmpbufsize = op->cmd.nbytes + op->addr.nbytes + op->dummy.nbytes;
0347
0348
0349
0350
0351
0352
0353 tmpbuf = kzalloc(tmpbufsize, GFP_KERNEL | GFP_DMA);
0354 if (!tmpbuf)
0355 return -ENOMEM;
0356
0357 spi_message_init(&msg);
0358
0359 tmpbuf[0] = op->cmd.opcode;
0360 xfers[xferpos].tx_buf = tmpbuf;
0361 xfers[xferpos].len = op->cmd.nbytes;
0362 xfers[xferpos].tx_nbits = op->cmd.buswidth;
0363 spi_message_add_tail(&xfers[xferpos], &msg);
0364 xferpos++;
0365 totalxferlen++;
0366
0367 if (op->addr.nbytes) {
0368 int i;
0369
0370 for (i = 0; i < op->addr.nbytes; i++)
0371 tmpbuf[i + 1] = op->addr.val >>
0372 (8 * (op->addr.nbytes - i - 1));
0373
0374 xfers[xferpos].tx_buf = tmpbuf + 1;
0375 xfers[xferpos].len = op->addr.nbytes;
0376 xfers[xferpos].tx_nbits = op->addr.buswidth;
0377 spi_message_add_tail(&xfers[xferpos], &msg);
0378 xferpos++;
0379 totalxferlen += op->addr.nbytes;
0380 }
0381
0382 if (op->dummy.nbytes) {
0383 memset(tmpbuf + op->addr.nbytes + 1, 0xff, op->dummy.nbytes);
0384 xfers[xferpos].tx_buf = tmpbuf + op->addr.nbytes + 1;
0385 xfers[xferpos].len = op->dummy.nbytes;
0386 xfers[xferpos].tx_nbits = op->dummy.buswidth;
0387 xfers[xferpos].dummy_data = 1;
0388 spi_message_add_tail(&xfers[xferpos], &msg);
0389 xferpos++;
0390 totalxferlen += op->dummy.nbytes;
0391 }
0392
0393 if (op->data.nbytes) {
0394 if (op->data.dir == SPI_MEM_DATA_IN) {
0395 xfers[xferpos].rx_buf = op->data.buf.in;
0396 xfers[xferpos].rx_nbits = op->data.buswidth;
0397 } else {
0398 xfers[xferpos].tx_buf = op->data.buf.out;
0399 xfers[xferpos].tx_nbits = op->data.buswidth;
0400 }
0401
0402 xfers[xferpos].len = op->data.nbytes;
0403 spi_message_add_tail(&xfers[xferpos], &msg);
0404 xferpos++;
0405 totalxferlen += op->data.nbytes;
0406 }
0407
0408 ret = spi_sync(mem->spi, &msg);
0409
0410 kfree(tmpbuf);
0411
0412 if (ret)
0413 return ret;
0414
0415 if (msg.actual_length != totalxferlen)
0416 return -EIO;
0417
0418 return 0;
0419 }
0420 EXPORT_SYMBOL_GPL(spi_mem_exec_op);
0421
0422
0423
0424
0425
0426
0427
0428
0429
0430
0431
0432
0433
0434 const char *spi_mem_get_name(struct spi_mem *mem)
0435 {
0436 return mem->name;
0437 }
0438 EXPORT_SYMBOL_GPL(spi_mem_get_name);
0439
0440
0441
0442
0443
0444
0445
0446
0447
0448
0449
0450
0451
0452
0453
0454
0455 int spi_mem_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op)
0456 {
0457 struct spi_controller *ctlr = mem->spi->controller;
0458 size_t len;
0459
0460 if (ctlr->mem_ops && ctlr->mem_ops->adjust_op_size)
0461 return ctlr->mem_ops->adjust_op_size(mem, op);
0462
0463 if (!ctlr->mem_ops || !ctlr->mem_ops->exec_op) {
0464 len = op->cmd.nbytes + op->addr.nbytes + op->dummy.nbytes;
0465
0466 if (len > spi_max_transfer_size(mem->spi))
0467 return -EINVAL;
0468
0469 op->data.nbytes = min3((size_t)op->data.nbytes,
0470 spi_max_transfer_size(mem->spi),
0471 spi_max_message_size(mem->spi) -
0472 len);
0473 if (!op->data.nbytes)
0474 return -EINVAL;
0475 }
0476
0477 return 0;
0478 }
0479 EXPORT_SYMBOL_GPL(spi_mem_adjust_op_size);
0480
0481 static ssize_t spi_mem_no_dirmap_read(struct spi_mem_dirmap_desc *desc,
0482 u64 offs, size_t len, void *buf)
0483 {
0484 struct spi_mem_op op = desc->info.op_tmpl;
0485 int ret;
0486
0487 op.addr.val = desc->info.offset + offs;
0488 op.data.buf.in = buf;
0489 op.data.nbytes = len;
0490 ret = spi_mem_adjust_op_size(desc->mem, &op);
0491 if (ret)
0492 return ret;
0493
0494 ret = spi_mem_exec_op(desc->mem, &op);
0495 if (ret)
0496 return ret;
0497
0498 return op.data.nbytes;
0499 }
0500
0501 static ssize_t spi_mem_no_dirmap_write(struct spi_mem_dirmap_desc *desc,
0502 u64 offs, size_t len, const void *buf)
0503 {
0504 struct spi_mem_op op = desc->info.op_tmpl;
0505 int ret;
0506
0507 op.addr.val = desc->info.offset + offs;
0508 op.data.buf.out = buf;
0509 op.data.nbytes = len;
0510 ret = spi_mem_adjust_op_size(desc->mem, &op);
0511 if (ret)
0512 return ret;
0513
0514 ret = spi_mem_exec_op(desc->mem, &op);
0515 if (ret)
0516 return ret;
0517
0518 return op.data.nbytes;
0519 }
0520
0521
0522
0523
0524
0525
0526
0527
0528
0529
0530
0531
0532
0533
0534 struct spi_mem_dirmap_desc *
0535 spi_mem_dirmap_create(struct spi_mem *mem,
0536 const struct spi_mem_dirmap_info *info)
0537 {
0538 struct spi_controller *ctlr = mem->spi->controller;
0539 struct spi_mem_dirmap_desc *desc;
0540 int ret = -ENOTSUPP;
0541
0542
0543 if (!info->op_tmpl.addr.nbytes || info->op_tmpl.addr.nbytes > 8)
0544 return ERR_PTR(-EINVAL);
0545
0546
0547 if (info->op_tmpl.data.dir == SPI_MEM_NO_DATA)
0548 return ERR_PTR(-EINVAL);
0549
0550 desc = kzalloc(sizeof(*desc), GFP_KERNEL);
0551 if (!desc)
0552 return ERR_PTR(-ENOMEM);
0553
0554 desc->mem = mem;
0555 desc->info = *info;
0556 if (ctlr->mem_ops && ctlr->mem_ops->dirmap_create)
0557 ret = ctlr->mem_ops->dirmap_create(desc);
0558
0559 if (ret) {
0560 desc->nodirmap = true;
0561 if (!spi_mem_supports_op(desc->mem, &desc->info.op_tmpl))
0562 ret = -ENOTSUPP;
0563 else
0564 ret = 0;
0565 }
0566
0567 if (ret) {
0568 kfree(desc);
0569 return ERR_PTR(ret);
0570 }
0571
0572 return desc;
0573 }
0574 EXPORT_SYMBOL_GPL(spi_mem_dirmap_create);
0575
0576
0577
0578
0579
0580
0581
0582
0583 void spi_mem_dirmap_destroy(struct spi_mem_dirmap_desc *desc)
0584 {
0585 struct spi_controller *ctlr = desc->mem->spi->controller;
0586
0587 if (!desc->nodirmap && ctlr->mem_ops && ctlr->mem_ops->dirmap_destroy)
0588 ctlr->mem_ops->dirmap_destroy(desc);
0589
0590 kfree(desc);
0591 }
0592 EXPORT_SYMBOL_GPL(spi_mem_dirmap_destroy);
0593
0594 static void devm_spi_mem_dirmap_release(struct device *dev, void *res)
0595 {
0596 struct spi_mem_dirmap_desc *desc = *(struct spi_mem_dirmap_desc **)res;
0597
0598 spi_mem_dirmap_destroy(desc);
0599 }
0600
0601
0602
0603
0604
0605
0606
0607
0608
0609
0610
0611
0612
0613 struct spi_mem_dirmap_desc *
0614 devm_spi_mem_dirmap_create(struct device *dev, struct spi_mem *mem,
0615 const struct spi_mem_dirmap_info *info)
0616 {
0617 struct spi_mem_dirmap_desc **ptr, *desc;
0618
0619 ptr = devres_alloc(devm_spi_mem_dirmap_release, sizeof(*ptr),
0620 GFP_KERNEL);
0621 if (!ptr)
0622 return ERR_PTR(-ENOMEM);
0623
0624 desc = spi_mem_dirmap_create(mem, info);
0625 if (IS_ERR(desc)) {
0626 devres_free(ptr);
0627 } else {
0628 *ptr = desc;
0629 devres_add(dev, ptr);
0630 }
0631
0632 return desc;
0633 }
0634 EXPORT_SYMBOL_GPL(devm_spi_mem_dirmap_create);
0635
0636 static int devm_spi_mem_dirmap_match(struct device *dev, void *res, void *data)
0637 {
0638 struct spi_mem_dirmap_desc **ptr = res;
0639
0640 if (WARN_ON(!ptr || !*ptr))
0641 return 0;
0642
0643 return *ptr == data;
0644 }
0645
0646
0647
0648
0649
0650
0651
0652
0653
0654
0655 void devm_spi_mem_dirmap_destroy(struct device *dev,
0656 struct spi_mem_dirmap_desc *desc)
0657 {
0658 devres_release(dev, devm_spi_mem_dirmap_release,
0659 devm_spi_mem_dirmap_match, desc);
0660 }
0661 EXPORT_SYMBOL_GPL(devm_spi_mem_dirmap_destroy);
0662
0663
0664
0665
0666
0667
0668
0669
0670
0671
0672
0673
0674
0675
0676
0677
0678
0679 ssize_t spi_mem_dirmap_read(struct spi_mem_dirmap_desc *desc,
0680 u64 offs, size_t len, void *buf)
0681 {
0682 struct spi_controller *ctlr = desc->mem->spi->controller;
0683 ssize_t ret;
0684
0685 if (desc->info.op_tmpl.data.dir != SPI_MEM_DATA_IN)
0686 return -EINVAL;
0687
0688 if (!len)
0689 return 0;
0690
0691 if (desc->nodirmap) {
0692 ret = spi_mem_no_dirmap_read(desc, offs, len, buf);
0693 } else if (ctlr->mem_ops && ctlr->mem_ops->dirmap_read) {
0694 ret = spi_mem_access_start(desc->mem);
0695 if (ret)
0696 return ret;
0697
0698 ret = ctlr->mem_ops->dirmap_read(desc, offs, len, buf);
0699
0700 spi_mem_access_end(desc->mem);
0701 } else {
0702 ret = -ENOTSUPP;
0703 }
0704
0705 return ret;
0706 }
0707 EXPORT_SYMBOL_GPL(spi_mem_dirmap_read);
0708
0709
0710
0711
0712
0713
0714
0715
0716
0717
0718
0719
0720
0721
0722
0723
0724
0725 ssize_t spi_mem_dirmap_write(struct spi_mem_dirmap_desc *desc,
0726 u64 offs, size_t len, const void *buf)
0727 {
0728 struct spi_controller *ctlr = desc->mem->spi->controller;
0729 ssize_t ret;
0730
0731 if (desc->info.op_tmpl.data.dir != SPI_MEM_DATA_OUT)
0732 return -EINVAL;
0733
0734 if (!len)
0735 return 0;
0736
0737 if (desc->nodirmap) {
0738 ret = spi_mem_no_dirmap_write(desc, offs, len, buf);
0739 } else if (ctlr->mem_ops && ctlr->mem_ops->dirmap_write) {
0740 ret = spi_mem_access_start(desc->mem);
0741 if (ret)
0742 return ret;
0743
0744 ret = ctlr->mem_ops->dirmap_write(desc, offs, len, buf);
0745
0746 spi_mem_access_end(desc->mem);
0747 } else {
0748 ret = -ENOTSUPP;
0749 }
0750
0751 return ret;
0752 }
0753 EXPORT_SYMBOL_GPL(spi_mem_dirmap_write);
0754
0755 static inline struct spi_mem_driver *to_spi_mem_drv(struct device_driver *drv)
0756 {
0757 return container_of(drv, struct spi_mem_driver, spidrv.driver);
0758 }
0759
0760 static int spi_mem_read_status(struct spi_mem *mem,
0761 const struct spi_mem_op *op,
0762 u16 *status)
0763 {
0764 const u8 *bytes = (u8 *)op->data.buf.in;
0765 int ret;
0766
0767 ret = spi_mem_exec_op(mem, op);
0768 if (ret)
0769 return ret;
0770
0771 if (op->data.nbytes > 1)
0772 *status = ((u16)bytes[0] << 8) | bytes[1];
0773 else
0774 *status = bytes[0];
0775
0776 return 0;
0777 }
0778
0779
0780
0781
0782
0783
0784
0785
0786
0787
0788
0789
0790
0791
0792
0793
0794
0795 int spi_mem_poll_status(struct spi_mem *mem,
0796 const struct spi_mem_op *op,
0797 u16 mask, u16 match,
0798 unsigned long initial_delay_us,
0799 unsigned long polling_delay_us,
0800 u16 timeout_ms)
0801 {
0802 struct spi_controller *ctlr = mem->spi->controller;
0803 int ret = -EOPNOTSUPP;
0804 int read_status_ret;
0805 u16 status;
0806
0807 if (op->data.nbytes < 1 || op->data.nbytes > 2 ||
0808 op->data.dir != SPI_MEM_DATA_IN)
0809 return -EINVAL;
0810
0811 if (ctlr->mem_ops && ctlr->mem_ops->poll_status && !mem->spi->cs_gpiod) {
0812 ret = spi_mem_access_start(mem);
0813 if (ret)
0814 return ret;
0815
0816 ret = ctlr->mem_ops->poll_status(mem, op, mask, match,
0817 initial_delay_us, polling_delay_us,
0818 timeout_ms);
0819
0820 spi_mem_access_end(mem);
0821 }
0822
0823 if (ret == -EOPNOTSUPP) {
0824 if (!spi_mem_supports_op(mem, op))
0825 return ret;
0826
0827 if (initial_delay_us < 10)
0828 udelay(initial_delay_us);
0829 else
0830 usleep_range((initial_delay_us >> 2) + 1,
0831 initial_delay_us);
0832
0833 ret = read_poll_timeout(spi_mem_read_status, read_status_ret,
0834 (read_status_ret || ((status) & mask) == match),
0835 polling_delay_us, timeout_ms * 1000, false, mem,
0836 op, &status);
0837 if (read_status_ret)
0838 return read_status_ret;
0839 }
0840
0841 return ret;
0842 }
0843 EXPORT_SYMBOL_GPL(spi_mem_poll_status);
0844
0845 static int spi_mem_probe(struct spi_device *spi)
0846 {
0847 struct spi_mem_driver *memdrv = to_spi_mem_drv(spi->dev.driver);
0848 struct spi_controller *ctlr = spi->controller;
0849 struct spi_mem *mem;
0850
0851 mem = devm_kzalloc(&spi->dev, sizeof(*mem), GFP_KERNEL);
0852 if (!mem)
0853 return -ENOMEM;
0854
0855 mem->spi = spi;
0856
0857 if (ctlr->mem_ops && ctlr->mem_ops->get_name)
0858 mem->name = ctlr->mem_ops->get_name(mem);
0859 else
0860 mem->name = dev_name(&spi->dev);
0861
0862 if (IS_ERR_OR_NULL(mem->name))
0863 return PTR_ERR_OR_ZERO(mem->name);
0864
0865 spi_set_drvdata(spi, mem);
0866
0867 return memdrv->probe(mem);
0868 }
0869
0870 static void spi_mem_remove(struct spi_device *spi)
0871 {
0872 struct spi_mem_driver *memdrv = to_spi_mem_drv(spi->dev.driver);
0873 struct spi_mem *mem = spi_get_drvdata(spi);
0874
0875 if (memdrv->remove)
0876 memdrv->remove(mem);
0877 }
0878
0879 static void spi_mem_shutdown(struct spi_device *spi)
0880 {
0881 struct spi_mem_driver *memdrv = to_spi_mem_drv(spi->dev.driver);
0882 struct spi_mem *mem = spi_get_drvdata(spi);
0883
0884 if (memdrv->shutdown)
0885 memdrv->shutdown(mem);
0886 }
0887
0888
0889
0890
0891
0892
0893
0894
0895
0896
0897
0898 int spi_mem_driver_register_with_owner(struct spi_mem_driver *memdrv,
0899 struct module *owner)
0900 {
0901 memdrv->spidrv.probe = spi_mem_probe;
0902 memdrv->spidrv.remove = spi_mem_remove;
0903 memdrv->spidrv.shutdown = spi_mem_shutdown;
0904
0905 return __spi_register_driver(owner, &memdrv->spidrv);
0906 }
0907 EXPORT_SYMBOL_GPL(spi_mem_driver_register_with_owner);
0908
0909
0910
0911
0912
0913
0914
0915 void spi_mem_driver_unregister(struct spi_mem_driver *memdrv)
0916 {
0917 spi_unregister_driver(&memdrv->spidrv);
0918 }
0919 EXPORT_SYMBOL_GPL(spi_mem_driver_unregister);