Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0+
0002 /*
0003  * Copyright (C) 2018 Exceet Electronics GmbH
0004  * Copyright (C) 2018 Bootlin
0005  *
0006  * Author: Boris Brezillon <boris.brezillon@bootlin.com>
0007  */
0008 #include <linux/dmaengine.h>
0009 #include <linux/iopoll.h>
0010 #include <linux/pm_runtime.h>
0011 #include <linux/spi/spi.h>
0012 #include <linux/spi/spi-mem.h>
0013 #include <linux/sched/task_stack.h>
0014 
0015 #include "internals.h"
0016 
0017 #define SPI_MEM_MAX_BUSWIDTH        8
0018 
0019 /**
0020  * spi_controller_dma_map_mem_op_data() - DMA-map the buffer attached to a
0021  *                    memory operation
0022  * @ctlr: the SPI controller requesting this dma_map()
0023  * @op: the memory operation containing the buffer to map
0024  * @sgt: a pointer to a non-initialized sg_table that will be filled by this
0025  *   function
0026  *
0027  * Some controllers might want to do DMA on the data buffer embedded in @op.
0028  * This helper prepares everything for you and provides a ready-to-use
0029  * sg_table. This function is not intended to be called from spi drivers.
0030  * Only SPI controller drivers should use it.
0031  * Note that the caller must ensure the memory region pointed by
0032  * op->data.buf.{in,out} is DMA-able before calling this function.
0033  *
0034  * Return: 0 in case of success, a negative error code otherwise.
0035  */
0036 int spi_controller_dma_map_mem_op_data(struct spi_controller *ctlr,
0037                        const struct spi_mem_op *op,
0038                        struct sg_table *sgt)
0039 {
0040     struct device *dmadev;
0041 
0042     if (!op->data.nbytes)
0043         return -EINVAL;
0044 
0045     if (op->data.dir == SPI_MEM_DATA_OUT && ctlr->dma_tx)
0046         dmadev = ctlr->dma_tx->device->dev;
0047     else if (op->data.dir == SPI_MEM_DATA_IN && ctlr->dma_rx)
0048         dmadev = ctlr->dma_rx->device->dev;
0049     else
0050         dmadev = ctlr->dev.parent;
0051 
0052     if (!dmadev)
0053         return -EINVAL;
0054 
0055     return spi_map_buf(ctlr, dmadev, sgt, op->data.buf.in, op->data.nbytes,
0056                op->data.dir == SPI_MEM_DATA_IN ?
0057                DMA_FROM_DEVICE : DMA_TO_DEVICE);
0058 }
0059 EXPORT_SYMBOL_GPL(spi_controller_dma_map_mem_op_data);
0060 
0061 /**
0062  * spi_controller_dma_unmap_mem_op_data() - DMA-unmap the buffer attached to a
0063  *                      memory operation
0064  * @ctlr: the SPI controller requesting this dma_unmap()
0065  * @op: the memory operation containing the buffer to unmap
0066  * @sgt: a pointer to an sg_table previously initialized by
0067  *   spi_controller_dma_map_mem_op_data()
0068  *
0069  * Some controllers might want to do DMA on the data buffer embedded in @op.
0070  * This helper prepares things so that the CPU can access the
0071  * op->data.buf.{in,out} buffer again.
0072  *
0073  * This function is not intended to be called from SPI drivers. Only SPI
0074  * controller drivers should use it.
0075  *
0076  * This function should be called after the DMA operation has finished and is
0077  * only valid if the previous spi_controller_dma_map_mem_op_data() call
0078  * returned 0.
0079  *
0080  * Return: 0 in case of success, a negative error code otherwise.
0081  */
0082 void spi_controller_dma_unmap_mem_op_data(struct spi_controller *ctlr,
0083                       const struct spi_mem_op *op,
0084                       struct sg_table *sgt)
0085 {
0086     struct device *dmadev;
0087 
0088     if (!op->data.nbytes)
0089         return;
0090 
0091     if (op->data.dir == SPI_MEM_DATA_OUT && ctlr->dma_tx)
0092         dmadev = ctlr->dma_tx->device->dev;
0093     else if (op->data.dir == SPI_MEM_DATA_IN && ctlr->dma_rx)
0094         dmadev = ctlr->dma_rx->device->dev;
0095     else
0096         dmadev = ctlr->dev.parent;
0097 
0098     spi_unmap_buf(ctlr, dmadev, sgt,
0099               op->data.dir == SPI_MEM_DATA_IN ?
0100               DMA_FROM_DEVICE : DMA_TO_DEVICE);
0101 }
0102 EXPORT_SYMBOL_GPL(spi_controller_dma_unmap_mem_op_data);
0103 
0104 static int spi_check_buswidth_req(struct spi_mem *mem, u8 buswidth, bool tx)
0105 {
0106     u32 mode = mem->spi->mode;
0107 
0108     switch (buswidth) {
0109     case 1:
0110         return 0;
0111 
0112     case 2:
0113         if ((tx &&
0114              (mode & (SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL))) ||
0115             (!tx &&
0116              (mode & (SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL))))
0117             return 0;
0118 
0119         break;
0120 
0121     case 4:
0122         if ((tx && (mode & (SPI_TX_QUAD | SPI_TX_OCTAL))) ||
0123             (!tx && (mode & (SPI_RX_QUAD | SPI_RX_OCTAL))))
0124             return 0;
0125 
0126         break;
0127 
0128     case 8:
0129         if ((tx && (mode & SPI_TX_OCTAL)) ||
0130             (!tx && (mode & SPI_RX_OCTAL)))
0131             return 0;
0132 
0133         break;
0134 
0135     default:
0136         break;
0137     }
0138 
0139     return -ENOTSUPP;
0140 }
0141 
0142 static bool spi_mem_check_buswidth(struct spi_mem *mem,
0143                    const struct spi_mem_op *op)
0144 {
0145     if (spi_check_buswidth_req(mem, op->cmd.buswidth, true))
0146         return false;
0147 
0148     if (op->addr.nbytes &&
0149         spi_check_buswidth_req(mem, op->addr.buswidth, true))
0150         return false;
0151 
0152     if (op->dummy.nbytes &&
0153         spi_check_buswidth_req(mem, op->dummy.buswidth, true))
0154         return false;
0155 
0156     if (op->data.dir != SPI_MEM_NO_DATA &&
0157         spi_check_buswidth_req(mem, op->data.buswidth,
0158                    op->data.dir == SPI_MEM_DATA_OUT))
0159         return false;
0160 
0161     return true;
0162 }
0163 
0164 bool spi_mem_default_supports_op(struct spi_mem *mem,
0165                  const struct spi_mem_op *op)
0166 {
0167     struct spi_controller *ctlr = mem->spi->controller;
0168     bool op_is_dtr =
0169         op->cmd.dtr || op->addr.dtr || op->dummy.dtr || op->data.dtr;
0170 
0171     if (op_is_dtr) {
0172         if (!spi_mem_controller_is_capable(ctlr, dtr))
0173             return false;
0174 
0175         if (op->cmd.nbytes != 2)
0176             return false;
0177     } else {
0178         if (op->cmd.nbytes != 1)
0179             return false;
0180     }
0181 
0182     if (op->data.ecc) {
0183         if (!spi_mem_controller_is_capable(ctlr, ecc))
0184             return false;
0185     }
0186 
0187     return spi_mem_check_buswidth(mem, op);
0188 }
0189 EXPORT_SYMBOL_GPL(spi_mem_default_supports_op);
0190 
0191 static bool spi_mem_buswidth_is_valid(u8 buswidth)
0192 {
0193     if (hweight8(buswidth) > 1 || buswidth > SPI_MEM_MAX_BUSWIDTH)
0194         return false;
0195 
0196     return true;
0197 }
0198 
0199 static int spi_mem_check_op(const struct spi_mem_op *op)
0200 {
0201     if (!op->cmd.buswidth || !op->cmd.nbytes)
0202         return -EINVAL;
0203 
0204     if ((op->addr.nbytes && !op->addr.buswidth) ||
0205         (op->dummy.nbytes && !op->dummy.buswidth) ||
0206         (op->data.nbytes && !op->data.buswidth))
0207         return -EINVAL;
0208 
0209     if (!spi_mem_buswidth_is_valid(op->cmd.buswidth) ||
0210         !spi_mem_buswidth_is_valid(op->addr.buswidth) ||
0211         !spi_mem_buswidth_is_valid(op->dummy.buswidth) ||
0212         !spi_mem_buswidth_is_valid(op->data.buswidth))
0213         return -EINVAL;
0214 
0215     /* Buffers must be DMA-able. */
0216     if (WARN_ON_ONCE(op->data.dir == SPI_MEM_DATA_IN &&
0217              object_is_on_stack(op->data.buf.in)))
0218         return -EINVAL;
0219 
0220     if (WARN_ON_ONCE(op->data.dir == SPI_MEM_DATA_OUT &&
0221              object_is_on_stack(op->data.buf.out)))
0222         return -EINVAL;
0223 
0224     return 0;
0225 }
0226 
0227 static bool spi_mem_internal_supports_op(struct spi_mem *mem,
0228                      const struct spi_mem_op *op)
0229 {
0230     struct spi_controller *ctlr = mem->spi->controller;
0231 
0232     if (ctlr->mem_ops && ctlr->mem_ops->supports_op)
0233         return ctlr->mem_ops->supports_op(mem, op);
0234 
0235     return spi_mem_default_supports_op(mem, op);
0236 }
0237 
0238 /**
0239  * spi_mem_supports_op() - Check if a memory device and the controller it is
0240  *             connected to support a specific memory operation
0241  * @mem: the SPI memory
0242  * @op: the memory operation to check
0243  *
0244  * Some controllers are only supporting Single or Dual IOs, others might only
0245  * support specific opcodes, or it can even be that the controller and device
0246  * both support Quad IOs but the hardware prevents you from using it because
0247  * only 2 IO lines are connected.
0248  *
0249  * This function checks whether a specific operation is supported.
0250  *
0251  * Return: true if @op is supported, false otherwise.
0252  */
0253 bool spi_mem_supports_op(struct spi_mem *mem, const struct spi_mem_op *op)
0254 {
0255     if (spi_mem_check_op(op))
0256         return false;
0257 
0258     return spi_mem_internal_supports_op(mem, op);
0259 }
0260 EXPORT_SYMBOL_GPL(spi_mem_supports_op);
0261 
0262 static int spi_mem_access_start(struct spi_mem *mem)
0263 {
0264     struct spi_controller *ctlr = mem->spi->controller;
0265 
0266     /*
0267      * Flush the message queue before executing our SPI memory
0268      * operation to prevent preemption of regular SPI transfers.
0269      */
0270     spi_flush_queue(ctlr);
0271 
0272     if (ctlr->auto_runtime_pm) {
0273         int ret;
0274 
0275         ret = pm_runtime_resume_and_get(ctlr->dev.parent);
0276         if (ret < 0) {
0277             dev_err(&ctlr->dev, "Failed to power device: %d\n",
0278                 ret);
0279             return ret;
0280         }
0281     }
0282 
0283     mutex_lock(&ctlr->bus_lock_mutex);
0284     mutex_lock(&ctlr->io_mutex);
0285 
0286     return 0;
0287 }
0288 
0289 static void spi_mem_access_end(struct spi_mem *mem)
0290 {
0291     struct spi_controller *ctlr = mem->spi->controller;
0292 
0293     mutex_unlock(&ctlr->io_mutex);
0294     mutex_unlock(&ctlr->bus_lock_mutex);
0295 
0296     if (ctlr->auto_runtime_pm)
0297         pm_runtime_put(ctlr->dev.parent);
0298 }
0299 
0300 /**
0301  * spi_mem_exec_op() - Execute a memory operation
0302  * @mem: the SPI memory
0303  * @op: the memory operation to execute
0304  *
0305  * Executes a memory operation.
0306  *
0307  * This function first checks that @op is supported and then tries to execute
0308  * it.
0309  *
0310  * Return: 0 in case of success, a negative error code otherwise.
0311  */
0312 int spi_mem_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
0313 {
0314     unsigned int tmpbufsize, xferpos = 0, totalxferlen = 0;
0315     struct spi_controller *ctlr = mem->spi->controller;
0316     struct spi_transfer xfers[4] = { };
0317     struct spi_message msg;
0318     u8 *tmpbuf;
0319     int ret;
0320 
0321     ret = spi_mem_check_op(op);
0322     if (ret)
0323         return ret;
0324 
0325     if (!spi_mem_internal_supports_op(mem, op))
0326         return -ENOTSUPP;
0327 
0328     if (ctlr->mem_ops && !mem->spi->cs_gpiod) {
0329         ret = spi_mem_access_start(mem);
0330         if (ret)
0331             return ret;
0332 
0333         ret = ctlr->mem_ops->exec_op(mem, op);
0334 
0335         spi_mem_access_end(mem);
0336 
0337         /*
0338          * Some controllers only optimize specific paths (typically the
0339          * read path) and expect the core to use the regular SPI
0340          * interface in other cases.
0341          */
0342         if (!ret || ret != -ENOTSUPP)
0343             return ret;
0344     }
0345 
0346     tmpbufsize = op->cmd.nbytes + op->addr.nbytes + op->dummy.nbytes;
0347 
0348     /*
0349      * Allocate a buffer to transmit the CMD, ADDR cycles with kmalloc() so
0350      * we're guaranteed that this buffer is DMA-able, as required by the
0351      * SPI layer.
0352      */
0353     tmpbuf = kzalloc(tmpbufsize, GFP_KERNEL | GFP_DMA);
0354     if (!tmpbuf)
0355         return -ENOMEM;
0356 
0357     spi_message_init(&msg);
0358 
0359     tmpbuf[0] = op->cmd.opcode;
0360     xfers[xferpos].tx_buf = tmpbuf;
0361     xfers[xferpos].len = op->cmd.nbytes;
0362     xfers[xferpos].tx_nbits = op->cmd.buswidth;
0363     spi_message_add_tail(&xfers[xferpos], &msg);
0364     xferpos++;
0365     totalxferlen++;
0366 
0367     if (op->addr.nbytes) {
0368         int i;
0369 
0370         for (i = 0; i < op->addr.nbytes; i++)
0371             tmpbuf[i + 1] = op->addr.val >>
0372                     (8 * (op->addr.nbytes - i - 1));
0373 
0374         xfers[xferpos].tx_buf = tmpbuf + 1;
0375         xfers[xferpos].len = op->addr.nbytes;
0376         xfers[xferpos].tx_nbits = op->addr.buswidth;
0377         spi_message_add_tail(&xfers[xferpos], &msg);
0378         xferpos++;
0379         totalxferlen += op->addr.nbytes;
0380     }
0381 
0382     if (op->dummy.nbytes) {
0383         memset(tmpbuf + op->addr.nbytes + 1, 0xff, op->dummy.nbytes);
0384         xfers[xferpos].tx_buf = tmpbuf + op->addr.nbytes + 1;
0385         xfers[xferpos].len = op->dummy.nbytes;
0386         xfers[xferpos].tx_nbits = op->dummy.buswidth;
0387         xfers[xferpos].dummy_data = 1;
0388         spi_message_add_tail(&xfers[xferpos], &msg);
0389         xferpos++;
0390         totalxferlen += op->dummy.nbytes;
0391     }
0392 
0393     if (op->data.nbytes) {
0394         if (op->data.dir == SPI_MEM_DATA_IN) {
0395             xfers[xferpos].rx_buf = op->data.buf.in;
0396             xfers[xferpos].rx_nbits = op->data.buswidth;
0397         } else {
0398             xfers[xferpos].tx_buf = op->data.buf.out;
0399             xfers[xferpos].tx_nbits = op->data.buswidth;
0400         }
0401 
0402         xfers[xferpos].len = op->data.nbytes;
0403         spi_message_add_tail(&xfers[xferpos], &msg);
0404         xferpos++;
0405         totalxferlen += op->data.nbytes;
0406     }
0407 
0408     ret = spi_sync(mem->spi, &msg);
0409 
0410     kfree(tmpbuf);
0411 
0412     if (ret)
0413         return ret;
0414 
0415     if (msg.actual_length != totalxferlen)
0416         return -EIO;
0417 
0418     return 0;
0419 }
0420 EXPORT_SYMBOL_GPL(spi_mem_exec_op);
0421 
0422 /**
0423  * spi_mem_get_name() - Return the SPI mem device name to be used by the
0424  *          upper layer if necessary
0425  * @mem: the SPI memory
0426  *
0427  * This function allows SPI mem users to retrieve the SPI mem device name.
0428  * It is useful if the upper layer needs to expose a custom name for
0429  * compatibility reasons.
0430  *
0431  * Return: a string containing the name of the memory device to be used
0432  *     by the SPI mem user
0433  */
0434 const char *spi_mem_get_name(struct spi_mem *mem)
0435 {
0436     return mem->name;
0437 }
0438 EXPORT_SYMBOL_GPL(spi_mem_get_name);
0439 
0440 /**
0441  * spi_mem_adjust_op_size() - Adjust the data size of a SPI mem operation to
0442  *                match controller limitations
0443  * @mem: the SPI memory
0444  * @op: the operation to adjust
0445  *
0446  * Some controllers have FIFO limitations and must split a data transfer
0447  * operation into multiple ones, others require a specific alignment for
0448  * optimized accesses. This function allows SPI mem drivers to split a single
0449  * operation into multiple sub-operations when required.
0450  *
0451  * Return: a negative error code if the controller can't properly adjust @op,
0452  *     0 otherwise. Note that @op->data.nbytes will be updated if @op
0453  *     can't be handled in a single step.
0454  */
0455 int spi_mem_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op)
0456 {
0457     struct spi_controller *ctlr = mem->spi->controller;
0458     size_t len;
0459 
0460     if (ctlr->mem_ops && ctlr->mem_ops->adjust_op_size)
0461         return ctlr->mem_ops->adjust_op_size(mem, op);
0462 
0463     if (!ctlr->mem_ops || !ctlr->mem_ops->exec_op) {
0464         len = op->cmd.nbytes + op->addr.nbytes + op->dummy.nbytes;
0465 
0466         if (len > spi_max_transfer_size(mem->spi))
0467             return -EINVAL;
0468 
0469         op->data.nbytes = min3((size_t)op->data.nbytes,
0470                        spi_max_transfer_size(mem->spi),
0471                        spi_max_message_size(mem->spi) -
0472                        len);
0473         if (!op->data.nbytes)
0474             return -EINVAL;
0475     }
0476 
0477     return 0;
0478 }
0479 EXPORT_SYMBOL_GPL(spi_mem_adjust_op_size);
0480 
0481 static ssize_t spi_mem_no_dirmap_read(struct spi_mem_dirmap_desc *desc,
0482                       u64 offs, size_t len, void *buf)
0483 {
0484     struct spi_mem_op op = desc->info.op_tmpl;
0485     int ret;
0486 
0487     op.addr.val = desc->info.offset + offs;
0488     op.data.buf.in = buf;
0489     op.data.nbytes = len;
0490     ret = spi_mem_adjust_op_size(desc->mem, &op);
0491     if (ret)
0492         return ret;
0493 
0494     ret = spi_mem_exec_op(desc->mem, &op);
0495     if (ret)
0496         return ret;
0497 
0498     return op.data.nbytes;
0499 }
0500 
0501 static ssize_t spi_mem_no_dirmap_write(struct spi_mem_dirmap_desc *desc,
0502                        u64 offs, size_t len, const void *buf)
0503 {
0504     struct spi_mem_op op = desc->info.op_tmpl;
0505     int ret;
0506 
0507     op.addr.val = desc->info.offset + offs;
0508     op.data.buf.out = buf;
0509     op.data.nbytes = len;
0510     ret = spi_mem_adjust_op_size(desc->mem, &op);
0511     if (ret)
0512         return ret;
0513 
0514     ret = spi_mem_exec_op(desc->mem, &op);
0515     if (ret)
0516         return ret;
0517 
0518     return op.data.nbytes;
0519 }
0520 
0521 /**
0522  * spi_mem_dirmap_create() - Create a direct mapping descriptor
0523  * @mem: SPI mem device this direct mapping should be created for
0524  * @info: direct mapping information
0525  *
0526  * This function is creating a direct mapping descriptor which can then be used
0527  * to access the memory using spi_mem_dirmap_read() or spi_mem_dirmap_write().
0528  * If the SPI controller driver does not support direct mapping, this function
0529  * falls back to an implementation using spi_mem_exec_op(), so that the caller
0530  * doesn't have to bother implementing a fallback on his own.
0531  *
0532  * Return: a valid pointer in case of success, and ERR_PTR() otherwise.
0533  */
0534 struct spi_mem_dirmap_desc *
0535 spi_mem_dirmap_create(struct spi_mem *mem,
0536               const struct spi_mem_dirmap_info *info)
0537 {
0538     struct spi_controller *ctlr = mem->spi->controller;
0539     struct spi_mem_dirmap_desc *desc;
0540     int ret = -ENOTSUPP;
0541 
0542     /* Make sure the number of address cycles is between 1 and 8 bytes. */
0543     if (!info->op_tmpl.addr.nbytes || info->op_tmpl.addr.nbytes > 8)
0544         return ERR_PTR(-EINVAL);
0545 
0546     /* data.dir should either be SPI_MEM_DATA_IN or SPI_MEM_DATA_OUT. */
0547     if (info->op_tmpl.data.dir == SPI_MEM_NO_DATA)
0548         return ERR_PTR(-EINVAL);
0549 
0550     desc = kzalloc(sizeof(*desc), GFP_KERNEL);
0551     if (!desc)
0552         return ERR_PTR(-ENOMEM);
0553 
0554     desc->mem = mem;
0555     desc->info = *info;
0556     if (ctlr->mem_ops && ctlr->mem_ops->dirmap_create)
0557         ret = ctlr->mem_ops->dirmap_create(desc);
0558 
0559     if (ret) {
0560         desc->nodirmap = true;
0561         if (!spi_mem_supports_op(desc->mem, &desc->info.op_tmpl))
0562             ret = -ENOTSUPP;
0563         else
0564             ret = 0;
0565     }
0566 
0567     if (ret) {
0568         kfree(desc);
0569         return ERR_PTR(ret);
0570     }
0571 
0572     return desc;
0573 }
0574 EXPORT_SYMBOL_GPL(spi_mem_dirmap_create);
0575 
0576 /**
0577  * spi_mem_dirmap_destroy() - Destroy a direct mapping descriptor
0578  * @desc: the direct mapping descriptor to destroy
0579  *
0580  * This function destroys a direct mapping descriptor previously created by
0581  * spi_mem_dirmap_create().
0582  */
0583 void spi_mem_dirmap_destroy(struct spi_mem_dirmap_desc *desc)
0584 {
0585     struct spi_controller *ctlr = desc->mem->spi->controller;
0586 
0587     if (!desc->nodirmap && ctlr->mem_ops && ctlr->mem_ops->dirmap_destroy)
0588         ctlr->mem_ops->dirmap_destroy(desc);
0589 
0590     kfree(desc);
0591 }
0592 EXPORT_SYMBOL_GPL(spi_mem_dirmap_destroy);
0593 
0594 static void devm_spi_mem_dirmap_release(struct device *dev, void *res)
0595 {
0596     struct spi_mem_dirmap_desc *desc = *(struct spi_mem_dirmap_desc **)res;
0597 
0598     spi_mem_dirmap_destroy(desc);
0599 }
0600 
0601 /**
0602  * devm_spi_mem_dirmap_create() - Create a direct mapping descriptor and attach
0603  *                it to a device
0604  * @dev: device the dirmap desc will be attached to
0605  * @mem: SPI mem device this direct mapping should be created for
0606  * @info: direct mapping information
0607  *
0608  * devm_ variant of the spi_mem_dirmap_create() function. See
0609  * spi_mem_dirmap_create() for more details.
0610  *
0611  * Return: a valid pointer in case of success, and ERR_PTR() otherwise.
0612  */
0613 struct spi_mem_dirmap_desc *
0614 devm_spi_mem_dirmap_create(struct device *dev, struct spi_mem *mem,
0615                const struct spi_mem_dirmap_info *info)
0616 {
0617     struct spi_mem_dirmap_desc **ptr, *desc;
0618 
0619     ptr = devres_alloc(devm_spi_mem_dirmap_release, sizeof(*ptr),
0620                GFP_KERNEL);
0621     if (!ptr)
0622         return ERR_PTR(-ENOMEM);
0623 
0624     desc = spi_mem_dirmap_create(mem, info);
0625     if (IS_ERR(desc)) {
0626         devres_free(ptr);
0627     } else {
0628         *ptr = desc;
0629         devres_add(dev, ptr);
0630     }
0631 
0632     return desc;
0633 }
0634 EXPORT_SYMBOL_GPL(devm_spi_mem_dirmap_create);
0635 
0636 static int devm_spi_mem_dirmap_match(struct device *dev, void *res, void *data)
0637 {
0638     struct spi_mem_dirmap_desc **ptr = res;
0639 
0640     if (WARN_ON(!ptr || !*ptr))
0641         return 0;
0642 
0643     return *ptr == data;
0644 }
0645 
0646 /**
0647  * devm_spi_mem_dirmap_destroy() - Destroy a direct mapping descriptor attached
0648  *                 to a device
0649  * @dev: device the dirmap desc is attached to
0650  * @desc: the direct mapping descriptor to destroy
0651  *
0652  * devm_ variant of the spi_mem_dirmap_destroy() function. See
0653  * spi_mem_dirmap_destroy() for more details.
0654  */
0655 void devm_spi_mem_dirmap_destroy(struct device *dev,
0656                  struct spi_mem_dirmap_desc *desc)
0657 {
0658     devres_release(dev, devm_spi_mem_dirmap_release,
0659                devm_spi_mem_dirmap_match, desc);
0660 }
0661 EXPORT_SYMBOL_GPL(devm_spi_mem_dirmap_destroy);
0662 
0663 /**
0664  * spi_mem_dirmap_read() - Read data through a direct mapping
0665  * @desc: direct mapping descriptor
0666  * @offs: offset to start reading from. Note that this is not an absolute
0667  *    offset, but the offset within the direct mapping which already has
0668  *    its own offset
0669  * @len: length in bytes
0670  * @buf: destination buffer. This buffer must be DMA-able
0671  *
0672  * This function reads data from a memory device using a direct mapping
0673  * previously instantiated with spi_mem_dirmap_create().
0674  *
0675  * Return: the amount of data read from the memory device or a negative error
0676  * code. Note that the returned size might be smaller than @len, and the caller
0677  * is responsible for calling spi_mem_dirmap_read() again when that happens.
0678  */
0679 ssize_t spi_mem_dirmap_read(struct spi_mem_dirmap_desc *desc,
0680                 u64 offs, size_t len, void *buf)
0681 {
0682     struct spi_controller *ctlr = desc->mem->spi->controller;
0683     ssize_t ret;
0684 
0685     if (desc->info.op_tmpl.data.dir != SPI_MEM_DATA_IN)
0686         return -EINVAL;
0687 
0688     if (!len)
0689         return 0;
0690 
0691     if (desc->nodirmap) {
0692         ret = spi_mem_no_dirmap_read(desc, offs, len, buf);
0693     } else if (ctlr->mem_ops && ctlr->mem_ops->dirmap_read) {
0694         ret = spi_mem_access_start(desc->mem);
0695         if (ret)
0696             return ret;
0697 
0698         ret = ctlr->mem_ops->dirmap_read(desc, offs, len, buf);
0699 
0700         spi_mem_access_end(desc->mem);
0701     } else {
0702         ret = -ENOTSUPP;
0703     }
0704 
0705     return ret;
0706 }
0707 EXPORT_SYMBOL_GPL(spi_mem_dirmap_read);
0708 
0709 /**
0710  * spi_mem_dirmap_write() - Write data through a direct mapping
0711  * @desc: direct mapping descriptor
0712  * @offs: offset to start writing from. Note that this is not an absolute
0713  *    offset, but the offset within the direct mapping which already has
0714  *    its own offset
0715  * @len: length in bytes
0716  * @buf: source buffer. This buffer must be DMA-able
0717  *
0718  * This function writes data to a memory device using a direct mapping
0719  * previously instantiated with spi_mem_dirmap_create().
0720  *
0721  * Return: the amount of data written to the memory device or a negative error
0722  * code. Note that the returned size might be smaller than @len, and the caller
0723  * is responsible for calling spi_mem_dirmap_write() again when that happens.
0724  */
0725 ssize_t spi_mem_dirmap_write(struct spi_mem_dirmap_desc *desc,
0726                  u64 offs, size_t len, const void *buf)
0727 {
0728     struct spi_controller *ctlr = desc->mem->spi->controller;
0729     ssize_t ret;
0730 
0731     if (desc->info.op_tmpl.data.dir != SPI_MEM_DATA_OUT)
0732         return -EINVAL;
0733 
0734     if (!len)
0735         return 0;
0736 
0737     if (desc->nodirmap) {
0738         ret = spi_mem_no_dirmap_write(desc, offs, len, buf);
0739     } else if (ctlr->mem_ops && ctlr->mem_ops->dirmap_write) {
0740         ret = spi_mem_access_start(desc->mem);
0741         if (ret)
0742             return ret;
0743 
0744         ret = ctlr->mem_ops->dirmap_write(desc, offs, len, buf);
0745 
0746         spi_mem_access_end(desc->mem);
0747     } else {
0748         ret = -ENOTSUPP;
0749     }
0750 
0751     return ret;
0752 }
0753 EXPORT_SYMBOL_GPL(spi_mem_dirmap_write);
0754 
0755 static inline struct spi_mem_driver *to_spi_mem_drv(struct device_driver *drv)
0756 {
0757     return container_of(drv, struct spi_mem_driver, spidrv.driver);
0758 }
0759 
0760 static int spi_mem_read_status(struct spi_mem *mem,
0761                    const struct spi_mem_op *op,
0762                    u16 *status)
0763 {
0764     const u8 *bytes = (u8 *)op->data.buf.in;
0765     int ret;
0766 
0767     ret = spi_mem_exec_op(mem, op);
0768     if (ret)
0769         return ret;
0770 
0771     if (op->data.nbytes > 1)
0772         *status = ((u16)bytes[0] << 8) | bytes[1];
0773     else
0774         *status = bytes[0];
0775 
0776     return 0;
0777 }
0778 
0779 /**
0780  * spi_mem_poll_status() - Poll memory device status
0781  * @mem: SPI memory device
0782  * @op: the memory operation to execute
0783  * @mask: status bitmask to ckeck
0784  * @match: (status & mask) expected value
0785  * @initial_delay_us: delay in us before starting to poll
0786  * @polling_delay_us: time to sleep between reads in us
0787  * @timeout_ms: timeout in milliseconds
0788  *
0789  * This function polls a status register and returns when
0790  * (status & mask) == match or when the timeout has expired.
0791  *
0792  * Return: 0 in case of success, -ETIMEDOUT in case of error,
0793  *         -EOPNOTSUPP if not supported.
0794  */
0795 int spi_mem_poll_status(struct spi_mem *mem,
0796             const struct spi_mem_op *op,
0797             u16 mask, u16 match,
0798             unsigned long initial_delay_us,
0799             unsigned long polling_delay_us,
0800             u16 timeout_ms)
0801 {
0802     struct spi_controller *ctlr = mem->spi->controller;
0803     int ret = -EOPNOTSUPP;
0804     int read_status_ret;
0805     u16 status;
0806 
0807     if (op->data.nbytes < 1 || op->data.nbytes > 2 ||
0808         op->data.dir != SPI_MEM_DATA_IN)
0809         return -EINVAL;
0810 
0811     if (ctlr->mem_ops && ctlr->mem_ops->poll_status && !mem->spi->cs_gpiod) {
0812         ret = spi_mem_access_start(mem);
0813         if (ret)
0814             return ret;
0815 
0816         ret = ctlr->mem_ops->poll_status(mem, op, mask, match,
0817                          initial_delay_us, polling_delay_us,
0818                          timeout_ms);
0819 
0820         spi_mem_access_end(mem);
0821     }
0822 
0823     if (ret == -EOPNOTSUPP) {
0824         if (!spi_mem_supports_op(mem, op))
0825             return ret;
0826 
0827         if (initial_delay_us < 10)
0828             udelay(initial_delay_us);
0829         else
0830             usleep_range((initial_delay_us >> 2) + 1,
0831                      initial_delay_us);
0832 
0833         ret = read_poll_timeout(spi_mem_read_status, read_status_ret,
0834                     (read_status_ret || ((status) & mask) == match),
0835                     polling_delay_us, timeout_ms * 1000, false, mem,
0836                     op, &status);
0837         if (read_status_ret)
0838             return read_status_ret;
0839     }
0840 
0841     return ret;
0842 }
0843 EXPORT_SYMBOL_GPL(spi_mem_poll_status);
0844 
0845 static int spi_mem_probe(struct spi_device *spi)
0846 {
0847     struct spi_mem_driver *memdrv = to_spi_mem_drv(spi->dev.driver);
0848     struct spi_controller *ctlr = spi->controller;
0849     struct spi_mem *mem;
0850 
0851     mem = devm_kzalloc(&spi->dev, sizeof(*mem), GFP_KERNEL);
0852     if (!mem)
0853         return -ENOMEM;
0854 
0855     mem->spi = spi;
0856 
0857     if (ctlr->mem_ops && ctlr->mem_ops->get_name)
0858         mem->name = ctlr->mem_ops->get_name(mem);
0859     else
0860         mem->name = dev_name(&spi->dev);
0861 
0862     if (IS_ERR_OR_NULL(mem->name))
0863         return PTR_ERR_OR_ZERO(mem->name);
0864 
0865     spi_set_drvdata(spi, mem);
0866 
0867     return memdrv->probe(mem);
0868 }
0869 
0870 static void spi_mem_remove(struct spi_device *spi)
0871 {
0872     struct spi_mem_driver *memdrv = to_spi_mem_drv(spi->dev.driver);
0873     struct spi_mem *mem = spi_get_drvdata(spi);
0874 
0875     if (memdrv->remove)
0876         memdrv->remove(mem);
0877 }
0878 
0879 static void spi_mem_shutdown(struct spi_device *spi)
0880 {
0881     struct spi_mem_driver *memdrv = to_spi_mem_drv(spi->dev.driver);
0882     struct spi_mem *mem = spi_get_drvdata(spi);
0883 
0884     if (memdrv->shutdown)
0885         memdrv->shutdown(mem);
0886 }
0887 
0888 /**
0889  * spi_mem_driver_register_with_owner() - Register a SPI memory driver
0890  * @memdrv: the SPI memory driver to register
0891  * @owner: the owner of this driver
0892  *
0893  * Registers a SPI memory driver.
0894  *
0895  * Return: 0 in case of success, a negative error core otherwise.
0896  */
0897 
0898 int spi_mem_driver_register_with_owner(struct spi_mem_driver *memdrv,
0899                        struct module *owner)
0900 {
0901     memdrv->spidrv.probe = spi_mem_probe;
0902     memdrv->spidrv.remove = spi_mem_remove;
0903     memdrv->spidrv.shutdown = spi_mem_shutdown;
0904 
0905     return __spi_register_driver(owner, &memdrv->spidrv);
0906 }
0907 EXPORT_SYMBOL_GPL(spi_mem_driver_register_with_owner);
0908 
0909 /**
0910  * spi_mem_driver_unregister() - Unregister a SPI memory driver
0911  * @memdrv: the SPI memory driver to unregister
0912  *
0913  * Unregisters a SPI memory driver.
0914  */
0915 void spi_mem_driver_unregister(struct spi_mem_driver *memdrv)
0916 {
0917     spi_unregister_driver(&memdrv->spidrv);
0918 }
0919 EXPORT_SYMBOL_GPL(spi_mem_driver_unregister);