0001
0002
0003
0004
0005
0006
0007 #include <linux/kernel.h>
0008 #include <linux/device.h>
0009 #include <linux/init.h>
0010 #include <linux/cache.h>
0011 #include <linux/dma-mapping.h>
0012 #include <linux/dmaengine.h>
0013 #include <linux/mutex.h>
0014 #include <linux/of_device.h>
0015 #include <linux/of_irq.h>
0016 #include <linux/clk/clk-conf.h>
0017 #include <linux/slab.h>
0018 #include <linux/mod_devicetable.h>
0019 #include <linux/spi/spi.h>
0020 #include <linux/spi/spi-mem.h>
0021 #include <linux/gpio/consumer.h>
0022 #include <linux/pm_runtime.h>
0023 #include <linux/pm_domain.h>
0024 #include <linux/property.h>
0025 #include <linux/export.h>
0026 #include <linux/sched/rt.h>
0027 #include <uapi/linux/sched/types.h>
0028 #include <linux/delay.h>
0029 #include <linux/kthread.h>
0030 #include <linux/ioport.h>
0031 #include <linux/acpi.h>
0032 #include <linux/highmem.h>
0033 #include <linux/idr.h>
0034 #include <linux/platform_data/x86/apple.h>
0035 #include <linux/ptp_clock_kernel.h>
0036 #include <linux/percpu.h>
0037
0038 #define CREATE_TRACE_POINTS
0039 #include <trace/events/spi.h>
0040 EXPORT_TRACEPOINT_SYMBOL(spi_transfer_start);
0041 EXPORT_TRACEPOINT_SYMBOL(spi_transfer_stop);
0042
0043 #include "internals.h"
0044
0045 static DEFINE_IDR(spi_master_idr);
0046
0047 static void spidev_release(struct device *dev)
0048 {
0049 struct spi_device *spi = to_spi_device(dev);
0050
0051 spi_controller_put(spi->controller);
0052 kfree(spi->driver_override);
0053 free_percpu(spi->pcpu_statistics);
0054 kfree(spi);
0055 }
0056
0057 static ssize_t
0058 modalias_show(struct device *dev, struct device_attribute *a, char *buf)
0059 {
0060 const struct spi_device *spi = to_spi_device(dev);
0061 int len;
0062
0063 len = acpi_device_modalias(dev, buf, PAGE_SIZE - 1);
0064 if (len != -ENODEV)
0065 return len;
0066
0067 return sprintf(buf, "%s%s\n", SPI_MODULE_PREFIX, spi->modalias);
0068 }
0069 static DEVICE_ATTR_RO(modalias);
0070
0071 static ssize_t driver_override_store(struct device *dev,
0072 struct device_attribute *a,
0073 const char *buf, size_t count)
0074 {
0075 struct spi_device *spi = to_spi_device(dev);
0076 int ret;
0077
0078 ret = driver_set_override(dev, &spi->driver_override, buf, count);
0079 if (ret)
0080 return ret;
0081
0082 return count;
0083 }
0084
0085 static ssize_t driver_override_show(struct device *dev,
0086 struct device_attribute *a, char *buf)
0087 {
0088 const struct spi_device *spi = to_spi_device(dev);
0089 ssize_t len;
0090
0091 device_lock(dev);
0092 len = snprintf(buf, PAGE_SIZE, "%s\n", spi->driver_override ? : "");
0093 device_unlock(dev);
0094 return len;
0095 }
0096 static DEVICE_ATTR_RW(driver_override);
0097
0098 static struct spi_statistics __percpu *spi_alloc_pcpu_stats(struct device *dev)
0099 {
0100 struct spi_statistics __percpu *pcpu_stats;
0101
0102 if (dev)
0103 pcpu_stats = devm_alloc_percpu(dev, struct spi_statistics);
0104 else
0105 pcpu_stats = alloc_percpu_gfp(struct spi_statistics, GFP_KERNEL);
0106
0107 if (pcpu_stats) {
0108 int cpu;
0109
0110 for_each_possible_cpu(cpu) {
0111 struct spi_statistics *stat;
0112
0113 stat = per_cpu_ptr(pcpu_stats, cpu);
0114 u64_stats_init(&stat->syncp);
0115 }
0116 }
0117 return pcpu_stats;
0118 }
0119
0120 #define spi_pcpu_stats_totalize(ret, in, field) \
0121 do { \
0122 int i; \
0123 ret = 0; \
0124 for_each_possible_cpu(i) { \
0125 const struct spi_statistics *pcpu_stats; \
0126 u64 inc; \
0127 unsigned int start; \
0128 pcpu_stats = per_cpu_ptr(in, i); \
0129 do { \
0130 start = u64_stats_fetch_begin_irq( \
0131 &pcpu_stats->syncp); \
0132 inc = u64_stats_read(&pcpu_stats->field); \
0133 } while (u64_stats_fetch_retry_irq( \
0134 &pcpu_stats->syncp, start)); \
0135 ret += inc; \
0136 } \
0137 } while (0)
0138
0139 #define SPI_STATISTICS_ATTRS(field, file) \
0140 static ssize_t spi_controller_##field##_show(struct device *dev, \
0141 struct device_attribute *attr, \
0142 char *buf) \
0143 { \
0144 struct spi_controller *ctlr = container_of(dev, \
0145 struct spi_controller, dev); \
0146 return spi_statistics_##field##_show(ctlr->pcpu_statistics, buf); \
0147 } \
0148 static struct device_attribute dev_attr_spi_controller_##field = { \
0149 .attr = { .name = file, .mode = 0444 }, \
0150 .show = spi_controller_##field##_show, \
0151 }; \
0152 static ssize_t spi_device_##field##_show(struct device *dev, \
0153 struct device_attribute *attr, \
0154 char *buf) \
0155 { \
0156 struct spi_device *spi = to_spi_device(dev); \
0157 return spi_statistics_##field##_show(spi->pcpu_statistics, buf); \
0158 } \
0159 static struct device_attribute dev_attr_spi_device_##field = { \
0160 .attr = { .name = file, .mode = 0444 }, \
0161 .show = spi_device_##field##_show, \
0162 }
0163
0164 #define SPI_STATISTICS_SHOW_NAME(name, file, field) \
0165 static ssize_t spi_statistics_##name##_show(struct spi_statistics __percpu *stat, \
0166 char *buf) \
0167 { \
0168 ssize_t len; \
0169 u64 val; \
0170 spi_pcpu_stats_totalize(val, stat, field); \
0171 len = sysfs_emit(buf, "%llu\n", val); \
0172 return len; \
0173 } \
0174 SPI_STATISTICS_ATTRS(name, file)
0175
0176 #define SPI_STATISTICS_SHOW(field) \
0177 SPI_STATISTICS_SHOW_NAME(field, __stringify(field), \
0178 field)
0179
0180 SPI_STATISTICS_SHOW(messages);
0181 SPI_STATISTICS_SHOW(transfers);
0182 SPI_STATISTICS_SHOW(errors);
0183 SPI_STATISTICS_SHOW(timedout);
0184
0185 SPI_STATISTICS_SHOW(spi_sync);
0186 SPI_STATISTICS_SHOW(spi_sync_immediate);
0187 SPI_STATISTICS_SHOW(spi_async);
0188
0189 SPI_STATISTICS_SHOW(bytes);
0190 SPI_STATISTICS_SHOW(bytes_rx);
0191 SPI_STATISTICS_SHOW(bytes_tx);
0192
0193 #define SPI_STATISTICS_TRANSFER_BYTES_HISTO(index, number) \
0194 SPI_STATISTICS_SHOW_NAME(transfer_bytes_histo##index, \
0195 "transfer_bytes_histo_" number, \
0196 transfer_bytes_histo[index])
0197 SPI_STATISTICS_TRANSFER_BYTES_HISTO(0, "0-1");
0198 SPI_STATISTICS_TRANSFER_BYTES_HISTO(1, "2-3");
0199 SPI_STATISTICS_TRANSFER_BYTES_HISTO(2, "4-7");
0200 SPI_STATISTICS_TRANSFER_BYTES_HISTO(3, "8-15");
0201 SPI_STATISTICS_TRANSFER_BYTES_HISTO(4, "16-31");
0202 SPI_STATISTICS_TRANSFER_BYTES_HISTO(5, "32-63");
0203 SPI_STATISTICS_TRANSFER_BYTES_HISTO(6, "64-127");
0204 SPI_STATISTICS_TRANSFER_BYTES_HISTO(7, "128-255");
0205 SPI_STATISTICS_TRANSFER_BYTES_HISTO(8, "256-511");
0206 SPI_STATISTICS_TRANSFER_BYTES_HISTO(9, "512-1023");
0207 SPI_STATISTICS_TRANSFER_BYTES_HISTO(10, "1024-2047");
0208 SPI_STATISTICS_TRANSFER_BYTES_HISTO(11, "2048-4095");
0209 SPI_STATISTICS_TRANSFER_BYTES_HISTO(12, "4096-8191");
0210 SPI_STATISTICS_TRANSFER_BYTES_HISTO(13, "8192-16383");
0211 SPI_STATISTICS_TRANSFER_BYTES_HISTO(14, "16384-32767");
0212 SPI_STATISTICS_TRANSFER_BYTES_HISTO(15, "32768-65535");
0213 SPI_STATISTICS_TRANSFER_BYTES_HISTO(16, "65536+");
0214
0215 SPI_STATISTICS_SHOW(transfers_split_maxsize);
0216
0217 static struct attribute *spi_dev_attrs[] = {
0218 &dev_attr_modalias.attr,
0219 &dev_attr_driver_override.attr,
0220 NULL,
0221 };
0222
0223 static const struct attribute_group spi_dev_group = {
0224 .attrs = spi_dev_attrs,
0225 };
0226
0227 static struct attribute *spi_device_statistics_attrs[] = {
0228 &dev_attr_spi_device_messages.attr,
0229 &dev_attr_spi_device_transfers.attr,
0230 &dev_attr_spi_device_errors.attr,
0231 &dev_attr_spi_device_timedout.attr,
0232 &dev_attr_spi_device_spi_sync.attr,
0233 &dev_attr_spi_device_spi_sync_immediate.attr,
0234 &dev_attr_spi_device_spi_async.attr,
0235 &dev_attr_spi_device_bytes.attr,
0236 &dev_attr_spi_device_bytes_rx.attr,
0237 &dev_attr_spi_device_bytes_tx.attr,
0238 &dev_attr_spi_device_transfer_bytes_histo0.attr,
0239 &dev_attr_spi_device_transfer_bytes_histo1.attr,
0240 &dev_attr_spi_device_transfer_bytes_histo2.attr,
0241 &dev_attr_spi_device_transfer_bytes_histo3.attr,
0242 &dev_attr_spi_device_transfer_bytes_histo4.attr,
0243 &dev_attr_spi_device_transfer_bytes_histo5.attr,
0244 &dev_attr_spi_device_transfer_bytes_histo6.attr,
0245 &dev_attr_spi_device_transfer_bytes_histo7.attr,
0246 &dev_attr_spi_device_transfer_bytes_histo8.attr,
0247 &dev_attr_spi_device_transfer_bytes_histo9.attr,
0248 &dev_attr_spi_device_transfer_bytes_histo10.attr,
0249 &dev_attr_spi_device_transfer_bytes_histo11.attr,
0250 &dev_attr_spi_device_transfer_bytes_histo12.attr,
0251 &dev_attr_spi_device_transfer_bytes_histo13.attr,
0252 &dev_attr_spi_device_transfer_bytes_histo14.attr,
0253 &dev_attr_spi_device_transfer_bytes_histo15.attr,
0254 &dev_attr_spi_device_transfer_bytes_histo16.attr,
0255 &dev_attr_spi_device_transfers_split_maxsize.attr,
0256 NULL,
0257 };
0258
0259 static const struct attribute_group spi_device_statistics_group = {
0260 .name = "statistics",
0261 .attrs = spi_device_statistics_attrs,
0262 };
0263
0264 static const struct attribute_group *spi_dev_groups[] = {
0265 &spi_dev_group,
0266 &spi_device_statistics_group,
0267 NULL,
0268 };
0269
0270 static struct attribute *spi_controller_statistics_attrs[] = {
0271 &dev_attr_spi_controller_messages.attr,
0272 &dev_attr_spi_controller_transfers.attr,
0273 &dev_attr_spi_controller_errors.attr,
0274 &dev_attr_spi_controller_timedout.attr,
0275 &dev_attr_spi_controller_spi_sync.attr,
0276 &dev_attr_spi_controller_spi_sync_immediate.attr,
0277 &dev_attr_spi_controller_spi_async.attr,
0278 &dev_attr_spi_controller_bytes.attr,
0279 &dev_attr_spi_controller_bytes_rx.attr,
0280 &dev_attr_spi_controller_bytes_tx.attr,
0281 &dev_attr_spi_controller_transfer_bytes_histo0.attr,
0282 &dev_attr_spi_controller_transfer_bytes_histo1.attr,
0283 &dev_attr_spi_controller_transfer_bytes_histo2.attr,
0284 &dev_attr_spi_controller_transfer_bytes_histo3.attr,
0285 &dev_attr_spi_controller_transfer_bytes_histo4.attr,
0286 &dev_attr_spi_controller_transfer_bytes_histo5.attr,
0287 &dev_attr_spi_controller_transfer_bytes_histo6.attr,
0288 &dev_attr_spi_controller_transfer_bytes_histo7.attr,
0289 &dev_attr_spi_controller_transfer_bytes_histo8.attr,
0290 &dev_attr_spi_controller_transfer_bytes_histo9.attr,
0291 &dev_attr_spi_controller_transfer_bytes_histo10.attr,
0292 &dev_attr_spi_controller_transfer_bytes_histo11.attr,
0293 &dev_attr_spi_controller_transfer_bytes_histo12.attr,
0294 &dev_attr_spi_controller_transfer_bytes_histo13.attr,
0295 &dev_attr_spi_controller_transfer_bytes_histo14.attr,
0296 &dev_attr_spi_controller_transfer_bytes_histo15.attr,
0297 &dev_attr_spi_controller_transfer_bytes_histo16.attr,
0298 &dev_attr_spi_controller_transfers_split_maxsize.attr,
0299 NULL,
0300 };
0301
0302 static const struct attribute_group spi_controller_statistics_group = {
0303 .name = "statistics",
0304 .attrs = spi_controller_statistics_attrs,
0305 };
0306
0307 static const struct attribute_group *spi_master_groups[] = {
0308 &spi_controller_statistics_group,
0309 NULL,
0310 };
0311
0312 static void spi_statistics_add_transfer_stats(struct spi_statistics __percpu *pcpu_stats,
0313 struct spi_transfer *xfer,
0314 struct spi_controller *ctlr)
0315 {
0316 int l2len = min(fls(xfer->len), SPI_STATISTICS_HISTO_SIZE) - 1;
0317 struct spi_statistics *stats;
0318
0319 if (l2len < 0)
0320 l2len = 0;
0321
0322 get_cpu();
0323 stats = this_cpu_ptr(pcpu_stats);
0324 u64_stats_update_begin(&stats->syncp);
0325
0326 u64_stats_inc(&stats->transfers);
0327 u64_stats_inc(&stats->transfer_bytes_histo[l2len]);
0328
0329 u64_stats_add(&stats->bytes, xfer->len);
0330 if ((xfer->tx_buf) &&
0331 (xfer->tx_buf != ctlr->dummy_tx))
0332 u64_stats_add(&stats->bytes_tx, xfer->len);
0333 if ((xfer->rx_buf) &&
0334 (xfer->rx_buf != ctlr->dummy_rx))
0335 u64_stats_add(&stats->bytes_rx, xfer->len);
0336
0337 u64_stats_update_end(&stats->syncp);
0338 put_cpu();
0339 }
0340
0341
0342
0343
0344
0345 static const struct spi_device_id *spi_match_id(const struct spi_device_id *id, const char *name)
0346 {
0347 while (id->name[0]) {
0348 if (!strcmp(name, id->name))
0349 return id;
0350 id++;
0351 }
0352 return NULL;
0353 }
0354
0355 const struct spi_device_id *spi_get_device_id(const struct spi_device *sdev)
0356 {
0357 const struct spi_driver *sdrv = to_spi_driver(sdev->dev.driver);
0358
0359 return spi_match_id(sdrv->id_table, sdev->modalias);
0360 }
0361 EXPORT_SYMBOL_GPL(spi_get_device_id);
0362
0363 static int spi_match_device(struct device *dev, struct device_driver *drv)
0364 {
0365 const struct spi_device *spi = to_spi_device(dev);
0366 const struct spi_driver *sdrv = to_spi_driver(drv);
0367
0368
0369 if (spi->driver_override)
0370 return strcmp(spi->driver_override, drv->name) == 0;
0371
0372
0373 if (of_driver_match_device(dev, drv))
0374 return 1;
0375
0376
0377 if (acpi_driver_match_device(dev, drv))
0378 return 1;
0379
0380 if (sdrv->id_table)
0381 return !!spi_match_id(sdrv->id_table, spi->modalias);
0382
0383 return strcmp(spi->modalias, drv->name) == 0;
0384 }
0385
0386 static int spi_uevent(struct device *dev, struct kobj_uevent_env *env)
0387 {
0388 const struct spi_device *spi = to_spi_device(dev);
0389 int rc;
0390
0391 rc = acpi_device_uevent_modalias(dev, env);
0392 if (rc != -ENODEV)
0393 return rc;
0394
0395 return add_uevent_var(env, "MODALIAS=%s%s", SPI_MODULE_PREFIX, spi->modalias);
0396 }
0397
0398 static int spi_probe(struct device *dev)
0399 {
0400 const struct spi_driver *sdrv = to_spi_driver(dev->driver);
0401 struct spi_device *spi = to_spi_device(dev);
0402 int ret;
0403
0404 ret = of_clk_set_defaults(dev->of_node, false);
0405 if (ret)
0406 return ret;
0407
0408 if (dev->of_node) {
0409 spi->irq = of_irq_get(dev->of_node, 0);
0410 if (spi->irq == -EPROBE_DEFER)
0411 return -EPROBE_DEFER;
0412 if (spi->irq < 0)
0413 spi->irq = 0;
0414 }
0415
0416 ret = dev_pm_domain_attach(dev, true);
0417 if (ret)
0418 return ret;
0419
0420 if (sdrv->probe) {
0421 ret = sdrv->probe(spi);
0422 if (ret)
0423 dev_pm_domain_detach(dev, true);
0424 }
0425
0426 return ret;
0427 }
0428
0429 static void spi_remove(struct device *dev)
0430 {
0431 const struct spi_driver *sdrv = to_spi_driver(dev->driver);
0432
0433 if (sdrv->remove)
0434 sdrv->remove(to_spi_device(dev));
0435
0436 dev_pm_domain_detach(dev, true);
0437 }
0438
0439 static void spi_shutdown(struct device *dev)
0440 {
0441 if (dev->driver) {
0442 const struct spi_driver *sdrv = to_spi_driver(dev->driver);
0443
0444 if (sdrv->shutdown)
0445 sdrv->shutdown(to_spi_device(dev));
0446 }
0447 }
0448
0449 struct bus_type spi_bus_type = {
0450 .name = "spi",
0451 .dev_groups = spi_dev_groups,
0452 .match = spi_match_device,
0453 .uevent = spi_uevent,
0454 .probe = spi_probe,
0455 .remove = spi_remove,
0456 .shutdown = spi_shutdown,
0457 };
0458 EXPORT_SYMBOL_GPL(spi_bus_type);
0459
0460
0461
0462
0463
0464
0465
0466
0467
0468 int __spi_register_driver(struct module *owner, struct spi_driver *sdrv)
0469 {
0470 sdrv->driver.owner = owner;
0471 sdrv->driver.bus = &spi_bus_type;
0472
0473
0474
0475
0476
0477
0478 if (sdrv->driver.of_match_table) {
0479 const struct of_device_id *of_id;
0480
0481 for (of_id = sdrv->driver.of_match_table; of_id->compatible[0];
0482 of_id++) {
0483 const char *of_name;
0484
0485
0486 of_name = strnchr(of_id->compatible,
0487 sizeof(of_id->compatible), ',');
0488 if (of_name)
0489 of_name++;
0490 else
0491 of_name = of_id->compatible;
0492
0493 if (sdrv->id_table) {
0494 const struct spi_device_id *spi_id;
0495
0496 spi_id = spi_match_id(sdrv->id_table, of_name);
0497 if (spi_id)
0498 continue;
0499 } else {
0500 if (strcmp(sdrv->driver.name, of_name) == 0)
0501 continue;
0502 }
0503
0504 pr_warn("SPI driver %s has no spi_device_id for %s\n",
0505 sdrv->driver.name, of_id->compatible);
0506 }
0507 }
0508
0509 return driver_register(&sdrv->driver);
0510 }
0511 EXPORT_SYMBOL_GPL(__spi_register_driver);
0512
0513
0514
0515
0516
0517
0518
0519
0520
0521
0522 struct boardinfo {
0523 struct list_head list;
0524 struct spi_board_info board_info;
0525 };
0526
0527 static LIST_HEAD(board_list);
0528 static LIST_HEAD(spi_controller_list);
0529
0530
0531
0532
0533
0534
0535 static DEFINE_MUTEX(board_lock);
0536
0537
0538
0539
0540
0541
0542
0543
0544
0545
0546
0547
0548
0549
0550
0551
0552
0553
0554 struct spi_device *spi_alloc_device(struct spi_controller *ctlr)
0555 {
0556 struct spi_device *spi;
0557
0558 if (!spi_controller_get(ctlr))
0559 return NULL;
0560
0561 spi = kzalloc(sizeof(*spi), GFP_KERNEL);
0562 if (!spi) {
0563 spi_controller_put(ctlr);
0564 return NULL;
0565 }
0566
0567 spi->pcpu_statistics = spi_alloc_pcpu_stats(NULL);
0568 if (!spi->pcpu_statistics) {
0569 kfree(spi);
0570 spi_controller_put(ctlr);
0571 return NULL;
0572 }
0573
0574 spi->master = spi->controller = ctlr;
0575 spi->dev.parent = &ctlr->dev;
0576 spi->dev.bus = &spi_bus_type;
0577 spi->dev.release = spidev_release;
0578 spi->mode = ctlr->buswidth_override_bits;
0579
0580 device_initialize(&spi->dev);
0581 return spi;
0582 }
0583 EXPORT_SYMBOL_GPL(spi_alloc_device);
0584
0585 static void spi_dev_set_name(struct spi_device *spi)
0586 {
0587 struct acpi_device *adev = ACPI_COMPANION(&spi->dev);
0588
0589 if (adev) {
0590 dev_set_name(&spi->dev, "spi-%s", acpi_dev_name(adev));
0591 return;
0592 }
0593
0594 dev_set_name(&spi->dev, "%s.%u", dev_name(&spi->controller->dev),
0595 spi->chip_select);
0596 }
0597
0598 static int spi_dev_check(struct device *dev, void *data)
0599 {
0600 struct spi_device *spi = to_spi_device(dev);
0601 struct spi_device *new_spi = data;
0602
0603 if (spi->controller == new_spi->controller &&
0604 spi->chip_select == new_spi->chip_select)
0605 return -EBUSY;
0606 return 0;
0607 }
0608
0609 static void spi_cleanup(struct spi_device *spi)
0610 {
0611 if (spi->controller->cleanup)
0612 spi->controller->cleanup(spi);
0613 }
0614
0615 static int __spi_add_device(struct spi_device *spi)
0616 {
0617 struct spi_controller *ctlr = spi->controller;
0618 struct device *dev = ctlr->dev.parent;
0619 int status;
0620
0621
0622
0623
0624
0625
0626 status = bus_for_each_dev(&spi_bus_type, NULL, spi, spi_dev_check);
0627 if (status) {
0628 dev_err(dev, "chipselect %d already in use\n",
0629 spi->chip_select);
0630 return status;
0631 }
0632
0633
0634 if (IS_ENABLED(CONFIG_SPI_DYNAMIC) &&
0635 !device_is_registered(&ctlr->dev)) {
0636 return -ENODEV;
0637 }
0638
0639 if (ctlr->cs_gpiods)
0640 spi->cs_gpiod = ctlr->cs_gpiods[spi->chip_select];
0641
0642
0643
0644
0645
0646
0647 status = spi_setup(spi);
0648 if (status < 0) {
0649 dev_err(dev, "can't setup %s, status %d\n",
0650 dev_name(&spi->dev), status);
0651 return status;
0652 }
0653
0654
0655 status = device_add(&spi->dev);
0656 if (status < 0) {
0657 dev_err(dev, "can't add %s, status %d\n",
0658 dev_name(&spi->dev), status);
0659 spi_cleanup(spi);
0660 } else {
0661 dev_dbg(dev, "registered child %s\n", dev_name(&spi->dev));
0662 }
0663
0664 return status;
0665 }
0666
0667
0668
0669
0670
0671
0672
0673
0674
0675
0676 int spi_add_device(struct spi_device *spi)
0677 {
0678 struct spi_controller *ctlr = spi->controller;
0679 struct device *dev = ctlr->dev.parent;
0680 int status;
0681
0682
0683 if (spi->chip_select >= ctlr->num_chipselect) {
0684 dev_err(dev, "cs%d >= max %d\n", spi->chip_select,
0685 ctlr->num_chipselect);
0686 return -EINVAL;
0687 }
0688
0689
0690 spi_dev_set_name(spi);
0691
0692 mutex_lock(&ctlr->add_lock);
0693 status = __spi_add_device(spi);
0694 mutex_unlock(&ctlr->add_lock);
0695 return status;
0696 }
0697 EXPORT_SYMBOL_GPL(spi_add_device);
0698
0699 static int spi_add_device_locked(struct spi_device *spi)
0700 {
0701 struct spi_controller *ctlr = spi->controller;
0702 struct device *dev = ctlr->dev.parent;
0703
0704
0705 if (spi->chip_select >= ctlr->num_chipselect) {
0706 dev_err(dev, "cs%d >= max %d\n", spi->chip_select,
0707 ctlr->num_chipselect);
0708 return -EINVAL;
0709 }
0710
0711
0712 spi_dev_set_name(spi);
0713
0714 WARN_ON(!mutex_is_locked(&ctlr->add_lock));
0715 return __spi_add_device(spi);
0716 }
0717
0718
0719
0720
0721
0722
0723
0724
0725
0726
0727
0728
0729
0730
0731
0732 struct spi_device *spi_new_device(struct spi_controller *ctlr,
0733 struct spi_board_info *chip)
0734 {
0735 struct spi_device *proxy;
0736 int status;
0737
0738
0739
0740
0741
0742
0743
0744
0745
0746 proxy = spi_alloc_device(ctlr);
0747 if (!proxy)
0748 return NULL;
0749
0750 WARN_ON(strlen(chip->modalias) >= sizeof(proxy->modalias));
0751
0752 proxy->chip_select = chip->chip_select;
0753 proxy->max_speed_hz = chip->max_speed_hz;
0754 proxy->mode = chip->mode;
0755 proxy->irq = chip->irq;
0756 strlcpy(proxy->modalias, chip->modalias, sizeof(proxy->modalias));
0757 proxy->dev.platform_data = (void *) chip->platform_data;
0758 proxy->controller_data = chip->controller_data;
0759 proxy->controller_state = NULL;
0760
0761 if (chip->swnode) {
0762 status = device_add_software_node(&proxy->dev, chip->swnode);
0763 if (status) {
0764 dev_err(&ctlr->dev, "failed to add software node to '%s': %d\n",
0765 chip->modalias, status);
0766 goto err_dev_put;
0767 }
0768 }
0769
0770 status = spi_add_device(proxy);
0771 if (status < 0)
0772 goto err_dev_put;
0773
0774 return proxy;
0775
0776 err_dev_put:
0777 device_remove_software_node(&proxy->dev);
0778 spi_dev_put(proxy);
0779 return NULL;
0780 }
0781 EXPORT_SYMBOL_GPL(spi_new_device);
0782
0783
0784
0785
0786
0787
0788
0789
0790 void spi_unregister_device(struct spi_device *spi)
0791 {
0792 if (!spi)
0793 return;
0794
0795 if (spi->dev.of_node) {
0796 of_node_clear_flag(spi->dev.of_node, OF_POPULATED);
0797 of_node_put(spi->dev.of_node);
0798 }
0799 if (ACPI_COMPANION(&spi->dev))
0800 acpi_device_clear_enumerated(ACPI_COMPANION(&spi->dev));
0801 device_remove_software_node(&spi->dev);
0802 device_del(&spi->dev);
0803 spi_cleanup(spi);
0804 put_device(&spi->dev);
0805 }
0806 EXPORT_SYMBOL_GPL(spi_unregister_device);
0807
0808 static void spi_match_controller_to_boardinfo(struct spi_controller *ctlr,
0809 struct spi_board_info *bi)
0810 {
0811 struct spi_device *dev;
0812
0813 if (ctlr->bus_num != bi->bus_num)
0814 return;
0815
0816 dev = spi_new_device(ctlr, bi);
0817 if (!dev)
0818 dev_err(ctlr->dev.parent, "can't create new device for %s\n",
0819 bi->modalias);
0820 }
0821
0822
0823
0824
0825
0826
0827
0828
0829
0830
0831
0832
0833
0834
0835
0836
0837
0838
0839
0840
0841
0842
0843 int spi_register_board_info(struct spi_board_info const *info, unsigned n)
0844 {
0845 struct boardinfo *bi;
0846 int i;
0847
0848 if (!n)
0849 return 0;
0850
0851 bi = kcalloc(n, sizeof(*bi), GFP_KERNEL);
0852 if (!bi)
0853 return -ENOMEM;
0854
0855 for (i = 0; i < n; i++, bi++, info++) {
0856 struct spi_controller *ctlr;
0857
0858 memcpy(&bi->board_info, info, sizeof(*info));
0859
0860 mutex_lock(&board_lock);
0861 list_add_tail(&bi->list, &board_list);
0862 list_for_each_entry(ctlr, &spi_controller_list, list)
0863 spi_match_controller_to_boardinfo(ctlr,
0864 &bi->board_info);
0865 mutex_unlock(&board_lock);
0866 }
0867
0868 return 0;
0869 }
0870
0871
0872
0873
0874
0875
0876
0877
0878
0879
0880
0881
0882
0883
0884
0885
0886
0887
0888
0889 static void *spi_res_alloc(struct spi_device *spi, spi_res_release_t release,
0890 size_t size, gfp_t gfp)
0891 {
0892 struct spi_res *sres;
0893
0894 sres = kzalloc(sizeof(*sres) + size, gfp);
0895 if (!sres)
0896 return NULL;
0897
0898 INIT_LIST_HEAD(&sres->entry);
0899 sres->release = release;
0900
0901 return sres->data;
0902 }
0903
0904
0905
0906
0907
0908 static void spi_res_free(void *res)
0909 {
0910 struct spi_res *sres = container_of(res, struct spi_res, data);
0911
0912 if (!res)
0913 return;
0914
0915 WARN_ON(!list_empty(&sres->entry));
0916 kfree(sres);
0917 }
0918
0919
0920
0921
0922
0923
0924 static void spi_res_add(struct spi_message *message, void *res)
0925 {
0926 struct spi_res *sres = container_of(res, struct spi_res, data);
0927
0928 WARN_ON(!list_empty(&sres->entry));
0929 list_add_tail(&sres->entry, &message->resources);
0930 }
0931
0932
0933
0934
0935
0936
0937 static void spi_res_release(struct spi_controller *ctlr, struct spi_message *message)
0938 {
0939 struct spi_res *res, *tmp;
0940
0941 list_for_each_entry_safe_reverse(res, tmp, &message->resources, entry) {
0942 if (res->release)
0943 res->release(ctlr, message, res->data);
0944
0945 list_del(&res->entry);
0946
0947 kfree(res);
0948 }
0949 }
0950
0951
0952
0953 static void spi_set_cs(struct spi_device *spi, bool enable, bool force)
0954 {
0955 bool activate = enable;
0956
0957
0958
0959
0960
0961 if (!force && ((enable && spi->controller->last_cs == spi->chip_select) ||
0962 (!enable && spi->controller->last_cs != spi->chip_select)) &&
0963 (spi->controller->last_cs_mode_high == (spi->mode & SPI_CS_HIGH)))
0964 return;
0965
0966 trace_spi_set_cs(spi, activate);
0967
0968 spi->controller->last_cs = enable ? spi->chip_select : -1;
0969 spi->controller->last_cs_mode_high = spi->mode & SPI_CS_HIGH;
0970
0971 if ((spi->cs_gpiod || !spi->controller->set_cs_timing) && !activate) {
0972 spi_delay_exec(&spi->cs_hold, NULL);
0973 }
0974
0975 if (spi->mode & SPI_CS_HIGH)
0976 enable = !enable;
0977
0978 if (spi->cs_gpiod) {
0979 if (!(spi->mode & SPI_NO_CS)) {
0980
0981
0982
0983
0984
0985
0986
0987
0988
0989
0990 if (has_acpi_companion(&spi->dev))
0991 gpiod_set_value_cansleep(spi->cs_gpiod, !enable);
0992 else
0993
0994 gpiod_set_value_cansleep(spi->cs_gpiod, activate);
0995 }
0996
0997 if ((spi->controller->flags & SPI_MASTER_GPIO_SS) &&
0998 spi->controller->set_cs)
0999 spi->controller->set_cs(spi, !enable);
1000 } else if (spi->controller->set_cs) {
1001 spi->controller->set_cs(spi, !enable);
1002 }
1003
1004 if (spi->cs_gpiod || !spi->controller->set_cs_timing) {
1005 if (activate)
1006 spi_delay_exec(&spi->cs_setup, NULL);
1007 else
1008 spi_delay_exec(&spi->cs_inactive, NULL);
1009 }
1010 }
1011
1012 #ifdef CONFIG_HAS_DMA
1013 int spi_map_buf(struct spi_controller *ctlr, struct device *dev,
1014 struct sg_table *sgt, void *buf, size_t len,
1015 enum dma_data_direction dir)
1016 {
1017 const bool vmalloced_buf = is_vmalloc_addr(buf);
1018 unsigned int max_seg_size = dma_get_max_seg_size(dev);
1019 #ifdef CONFIG_HIGHMEM
1020 const bool kmap_buf = ((unsigned long)buf >= PKMAP_BASE &&
1021 (unsigned long)buf < (PKMAP_BASE +
1022 (LAST_PKMAP * PAGE_SIZE)));
1023 #else
1024 const bool kmap_buf = false;
1025 #endif
1026 int desc_len;
1027 int sgs;
1028 struct page *vm_page;
1029 struct scatterlist *sg;
1030 void *sg_buf;
1031 size_t min;
1032 int i, ret;
1033
1034 if (vmalloced_buf || kmap_buf) {
1035 desc_len = min_t(unsigned long, max_seg_size, PAGE_SIZE);
1036 sgs = DIV_ROUND_UP(len + offset_in_page(buf), desc_len);
1037 } else if (virt_addr_valid(buf)) {
1038 desc_len = min_t(size_t, max_seg_size, ctlr->max_dma_len);
1039 sgs = DIV_ROUND_UP(len, desc_len);
1040 } else {
1041 return -EINVAL;
1042 }
1043
1044 ret = sg_alloc_table(sgt, sgs, GFP_KERNEL);
1045 if (ret != 0)
1046 return ret;
1047
1048 sg = &sgt->sgl[0];
1049 for (i = 0; i < sgs; i++) {
1050
1051 if (vmalloced_buf || kmap_buf) {
1052
1053
1054
1055
1056
1057 min = min_t(size_t, desc_len,
1058 min_t(size_t, len,
1059 PAGE_SIZE - offset_in_page(buf)));
1060 if (vmalloced_buf)
1061 vm_page = vmalloc_to_page(buf);
1062 else
1063 vm_page = kmap_to_page(buf);
1064 if (!vm_page) {
1065 sg_free_table(sgt);
1066 return -ENOMEM;
1067 }
1068 sg_set_page(sg, vm_page,
1069 min, offset_in_page(buf));
1070 } else {
1071 min = min_t(size_t, len, desc_len);
1072 sg_buf = buf;
1073 sg_set_buf(sg, sg_buf, min);
1074 }
1075
1076 buf += min;
1077 len -= min;
1078 sg = sg_next(sg);
1079 }
1080
1081 ret = dma_map_sg(dev, sgt->sgl, sgt->nents, dir);
1082 if (!ret)
1083 ret = -ENOMEM;
1084 if (ret < 0) {
1085 sg_free_table(sgt);
1086 return ret;
1087 }
1088
1089 sgt->nents = ret;
1090
1091 return 0;
1092 }
1093
1094 void spi_unmap_buf(struct spi_controller *ctlr, struct device *dev,
1095 struct sg_table *sgt, enum dma_data_direction dir)
1096 {
1097 if (sgt->orig_nents) {
1098 dma_unmap_sg(dev, sgt->sgl, sgt->orig_nents, dir);
1099 sg_free_table(sgt);
1100 }
1101 }
1102
1103 static int __spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg)
1104 {
1105 struct device *tx_dev, *rx_dev;
1106 struct spi_transfer *xfer;
1107 int ret;
1108
1109 if (!ctlr->can_dma)
1110 return 0;
1111
1112 if (ctlr->dma_tx)
1113 tx_dev = ctlr->dma_tx->device->dev;
1114 else if (ctlr->dma_map_dev)
1115 tx_dev = ctlr->dma_map_dev;
1116 else
1117 tx_dev = ctlr->dev.parent;
1118
1119 if (ctlr->dma_rx)
1120 rx_dev = ctlr->dma_rx->device->dev;
1121 else if (ctlr->dma_map_dev)
1122 rx_dev = ctlr->dma_map_dev;
1123 else
1124 rx_dev = ctlr->dev.parent;
1125
1126 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1127 if (!ctlr->can_dma(ctlr, msg->spi, xfer))
1128 continue;
1129
1130 if (xfer->tx_buf != NULL) {
1131 ret = spi_map_buf(ctlr, tx_dev, &xfer->tx_sg,
1132 (void *)xfer->tx_buf, xfer->len,
1133 DMA_TO_DEVICE);
1134 if (ret != 0)
1135 return ret;
1136 }
1137
1138 if (xfer->rx_buf != NULL) {
1139 ret = spi_map_buf(ctlr, rx_dev, &xfer->rx_sg,
1140 xfer->rx_buf, xfer->len,
1141 DMA_FROM_DEVICE);
1142 if (ret != 0) {
1143 spi_unmap_buf(ctlr, tx_dev, &xfer->tx_sg,
1144 DMA_TO_DEVICE);
1145 return ret;
1146 }
1147 }
1148 }
1149
1150 ctlr->cur_msg_mapped = true;
1151
1152 return 0;
1153 }
1154
1155 static int __spi_unmap_msg(struct spi_controller *ctlr, struct spi_message *msg)
1156 {
1157 struct spi_transfer *xfer;
1158 struct device *tx_dev, *rx_dev;
1159
1160 if (!ctlr->cur_msg_mapped || !ctlr->can_dma)
1161 return 0;
1162
1163 if (ctlr->dma_tx)
1164 tx_dev = ctlr->dma_tx->device->dev;
1165 else if (ctlr->dma_map_dev)
1166 tx_dev = ctlr->dma_map_dev;
1167 else
1168 tx_dev = ctlr->dev.parent;
1169
1170 if (ctlr->dma_rx)
1171 rx_dev = ctlr->dma_rx->device->dev;
1172 else if (ctlr->dma_map_dev)
1173 rx_dev = ctlr->dma_map_dev;
1174 else
1175 rx_dev = ctlr->dev.parent;
1176
1177 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1178 if (!ctlr->can_dma(ctlr, msg->spi, xfer))
1179 continue;
1180
1181 spi_unmap_buf(ctlr, rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE);
1182 spi_unmap_buf(ctlr, tx_dev, &xfer->tx_sg, DMA_TO_DEVICE);
1183 }
1184
1185 ctlr->cur_msg_mapped = false;
1186
1187 return 0;
1188 }
1189 #else
1190 static inline int __spi_map_msg(struct spi_controller *ctlr,
1191 struct spi_message *msg)
1192 {
1193 return 0;
1194 }
1195
1196 static inline int __spi_unmap_msg(struct spi_controller *ctlr,
1197 struct spi_message *msg)
1198 {
1199 return 0;
1200 }
1201 #endif
1202
1203 static inline int spi_unmap_msg(struct spi_controller *ctlr,
1204 struct spi_message *msg)
1205 {
1206 struct spi_transfer *xfer;
1207
1208 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1209
1210
1211
1212
1213 if (xfer->tx_buf == ctlr->dummy_tx)
1214 xfer->tx_buf = NULL;
1215 if (xfer->rx_buf == ctlr->dummy_rx)
1216 xfer->rx_buf = NULL;
1217 }
1218
1219 return __spi_unmap_msg(ctlr, msg);
1220 }
1221
1222 static int spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg)
1223 {
1224 struct spi_transfer *xfer;
1225 void *tmp;
1226 unsigned int max_tx, max_rx;
1227
1228 if ((ctlr->flags & (SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX))
1229 && !(msg->spi->mode & SPI_3WIRE)) {
1230 max_tx = 0;
1231 max_rx = 0;
1232
1233 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1234 if ((ctlr->flags & SPI_CONTROLLER_MUST_TX) &&
1235 !xfer->tx_buf)
1236 max_tx = max(xfer->len, max_tx);
1237 if ((ctlr->flags & SPI_CONTROLLER_MUST_RX) &&
1238 !xfer->rx_buf)
1239 max_rx = max(xfer->len, max_rx);
1240 }
1241
1242 if (max_tx) {
1243 tmp = krealloc(ctlr->dummy_tx, max_tx,
1244 GFP_KERNEL | GFP_DMA | __GFP_ZERO);
1245 if (!tmp)
1246 return -ENOMEM;
1247 ctlr->dummy_tx = tmp;
1248 }
1249
1250 if (max_rx) {
1251 tmp = krealloc(ctlr->dummy_rx, max_rx,
1252 GFP_KERNEL | GFP_DMA);
1253 if (!tmp)
1254 return -ENOMEM;
1255 ctlr->dummy_rx = tmp;
1256 }
1257
1258 if (max_tx || max_rx) {
1259 list_for_each_entry(xfer, &msg->transfers,
1260 transfer_list) {
1261 if (!xfer->len)
1262 continue;
1263 if (!xfer->tx_buf)
1264 xfer->tx_buf = ctlr->dummy_tx;
1265 if (!xfer->rx_buf)
1266 xfer->rx_buf = ctlr->dummy_rx;
1267 }
1268 }
1269 }
1270
1271 return __spi_map_msg(ctlr, msg);
1272 }
1273
1274 static int spi_transfer_wait(struct spi_controller *ctlr,
1275 struct spi_message *msg,
1276 struct spi_transfer *xfer)
1277 {
1278 struct spi_statistics __percpu *statm = ctlr->pcpu_statistics;
1279 struct spi_statistics __percpu *stats = msg->spi->pcpu_statistics;
1280 u32 speed_hz = xfer->speed_hz;
1281 unsigned long long ms;
1282
1283 if (spi_controller_is_slave(ctlr)) {
1284 if (wait_for_completion_interruptible(&ctlr->xfer_completion)) {
1285 dev_dbg(&msg->spi->dev, "SPI transfer interrupted\n");
1286 return -EINTR;
1287 }
1288 } else {
1289 if (!speed_hz)
1290 speed_hz = 100000;
1291
1292
1293
1294
1295
1296
1297
1298 ms = 8LL * MSEC_PER_SEC * xfer->len;
1299 do_div(ms, speed_hz);
1300
1301
1302
1303
1304
1305 ms += ms + 200;
1306 if (ms > UINT_MAX)
1307 ms = UINT_MAX;
1308
1309 ms = wait_for_completion_timeout(&ctlr->xfer_completion,
1310 msecs_to_jiffies(ms));
1311
1312 if (ms == 0) {
1313 SPI_STATISTICS_INCREMENT_FIELD(statm, timedout);
1314 SPI_STATISTICS_INCREMENT_FIELD(stats, timedout);
1315 dev_err(&msg->spi->dev,
1316 "SPI transfer timed out\n");
1317 return -ETIMEDOUT;
1318 }
1319 }
1320
1321 return 0;
1322 }
1323
1324 static void _spi_transfer_delay_ns(u32 ns)
1325 {
1326 if (!ns)
1327 return;
1328 if (ns <= NSEC_PER_USEC) {
1329 ndelay(ns);
1330 } else {
1331 u32 us = DIV_ROUND_UP(ns, NSEC_PER_USEC);
1332
1333 if (us <= 10)
1334 udelay(us);
1335 else
1336 usleep_range(us, us + DIV_ROUND_UP(us, 10));
1337 }
1338 }
1339
1340 int spi_delay_to_ns(struct spi_delay *_delay, struct spi_transfer *xfer)
1341 {
1342 u32 delay = _delay->value;
1343 u32 unit = _delay->unit;
1344 u32 hz;
1345
1346 if (!delay)
1347 return 0;
1348
1349 switch (unit) {
1350 case SPI_DELAY_UNIT_USECS:
1351 delay *= NSEC_PER_USEC;
1352 break;
1353 case SPI_DELAY_UNIT_NSECS:
1354
1355 break;
1356 case SPI_DELAY_UNIT_SCK:
1357
1358 if (!xfer)
1359 return -EINVAL;
1360
1361
1362
1363
1364 hz = xfer->effective_speed_hz ?: xfer->speed_hz / 2;
1365 if (!hz)
1366 return -EINVAL;
1367
1368
1369 delay *= DIV_ROUND_UP(NSEC_PER_SEC, hz);
1370 break;
1371 default:
1372 return -EINVAL;
1373 }
1374
1375 return delay;
1376 }
1377 EXPORT_SYMBOL_GPL(spi_delay_to_ns);
1378
1379 int spi_delay_exec(struct spi_delay *_delay, struct spi_transfer *xfer)
1380 {
1381 int delay;
1382
1383 might_sleep();
1384
1385 if (!_delay)
1386 return -EINVAL;
1387
1388 delay = spi_delay_to_ns(_delay, xfer);
1389 if (delay < 0)
1390 return delay;
1391
1392 _spi_transfer_delay_ns(delay);
1393
1394 return 0;
1395 }
1396 EXPORT_SYMBOL_GPL(spi_delay_exec);
1397
1398 static void _spi_transfer_cs_change_delay(struct spi_message *msg,
1399 struct spi_transfer *xfer)
1400 {
1401 u32 default_delay_ns = 10 * NSEC_PER_USEC;
1402 u32 delay = xfer->cs_change_delay.value;
1403 u32 unit = xfer->cs_change_delay.unit;
1404 int ret;
1405
1406
1407 if (!delay) {
1408 if (unit == SPI_DELAY_UNIT_USECS)
1409 _spi_transfer_delay_ns(default_delay_ns);
1410 return;
1411 }
1412
1413 ret = spi_delay_exec(&xfer->cs_change_delay, xfer);
1414 if (ret) {
1415 dev_err_once(&msg->spi->dev,
1416 "Use of unsupported delay unit %i, using default of %luus\n",
1417 unit, default_delay_ns / NSEC_PER_USEC);
1418 _spi_transfer_delay_ns(default_delay_ns);
1419 }
1420 }
1421
1422
1423
1424
1425
1426
1427
1428
1429 static int spi_transfer_one_message(struct spi_controller *ctlr,
1430 struct spi_message *msg)
1431 {
1432 struct spi_transfer *xfer;
1433 bool keep_cs = false;
1434 int ret = 0;
1435 struct spi_statistics __percpu *statm = ctlr->pcpu_statistics;
1436 struct spi_statistics __percpu *stats = msg->spi->pcpu_statistics;
1437
1438 spi_set_cs(msg->spi, true, false);
1439
1440 SPI_STATISTICS_INCREMENT_FIELD(statm, messages);
1441 SPI_STATISTICS_INCREMENT_FIELD(stats, messages);
1442
1443 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1444 trace_spi_transfer_start(msg, xfer);
1445
1446 spi_statistics_add_transfer_stats(statm, xfer, ctlr);
1447 spi_statistics_add_transfer_stats(stats, xfer, ctlr);
1448
1449 if (!ctlr->ptp_sts_supported) {
1450 xfer->ptp_sts_word_pre = 0;
1451 ptp_read_system_prets(xfer->ptp_sts);
1452 }
1453
1454 if ((xfer->tx_buf || xfer->rx_buf) && xfer->len) {
1455 reinit_completion(&ctlr->xfer_completion);
1456
1457 fallback_pio:
1458 ret = ctlr->transfer_one(ctlr, msg->spi, xfer);
1459 if (ret < 0) {
1460 if (ctlr->cur_msg_mapped &&
1461 (xfer->error & SPI_TRANS_FAIL_NO_START)) {
1462 __spi_unmap_msg(ctlr, msg);
1463 ctlr->fallback = true;
1464 xfer->error &= ~SPI_TRANS_FAIL_NO_START;
1465 goto fallback_pio;
1466 }
1467
1468 SPI_STATISTICS_INCREMENT_FIELD(statm,
1469 errors);
1470 SPI_STATISTICS_INCREMENT_FIELD(stats,
1471 errors);
1472 dev_err(&msg->spi->dev,
1473 "SPI transfer failed: %d\n", ret);
1474 goto out;
1475 }
1476
1477 if (ret > 0) {
1478 ret = spi_transfer_wait(ctlr, msg, xfer);
1479 if (ret < 0)
1480 msg->status = ret;
1481 }
1482 } else {
1483 if (xfer->len)
1484 dev_err(&msg->spi->dev,
1485 "Bufferless transfer has length %u\n",
1486 xfer->len);
1487 }
1488
1489 if (!ctlr->ptp_sts_supported) {
1490 ptp_read_system_postts(xfer->ptp_sts);
1491 xfer->ptp_sts_word_post = xfer->len;
1492 }
1493
1494 trace_spi_transfer_stop(msg, xfer);
1495
1496 if (msg->status != -EINPROGRESS)
1497 goto out;
1498
1499 spi_transfer_delay_exec(xfer);
1500
1501 if (xfer->cs_change) {
1502 if (list_is_last(&xfer->transfer_list,
1503 &msg->transfers)) {
1504 keep_cs = true;
1505 } else {
1506 spi_set_cs(msg->spi, false, false);
1507 _spi_transfer_cs_change_delay(msg, xfer);
1508 spi_set_cs(msg->spi, true, false);
1509 }
1510 }
1511
1512 msg->actual_length += xfer->len;
1513 }
1514
1515 out:
1516 if (ret != 0 || !keep_cs)
1517 spi_set_cs(msg->spi, false, false);
1518
1519 if (msg->status == -EINPROGRESS)
1520 msg->status = ret;
1521
1522 if (msg->status && ctlr->handle_err)
1523 ctlr->handle_err(ctlr, msg);
1524
1525 spi_finalize_current_message(ctlr);
1526
1527 return ret;
1528 }
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538 void spi_finalize_current_transfer(struct spi_controller *ctlr)
1539 {
1540 complete(&ctlr->xfer_completion);
1541 }
1542 EXPORT_SYMBOL_GPL(spi_finalize_current_transfer);
1543
1544 static void spi_idle_runtime_pm(struct spi_controller *ctlr)
1545 {
1546 if (ctlr->auto_runtime_pm) {
1547 pm_runtime_mark_last_busy(ctlr->dev.parent);
1548 pm_runtime_put_autosuspend(ctlr->dev.parent);
1549 }
1550 }
1551
1552 static int __spi_pump_transfer_message(struct spi_controller *ctlr,
1553 struct spi_message *msg, bool was_busy)
1554 {
1555 struct spi_transfer *xfer;
1556 int ret;
1557
1558 if (!was_busy && ctlr->auto_runtime_pm) {
1559 ret = pm_runtime_get_sync(ctlr->dev.parent);
1560 if (ret < 0) {
1561 pm_runtime_put_noidle(ctlr->dev.parent);
1562 dev_err(&ctlr->dev, "Failed to power device: %d\n",
1563 ret);
1564 return ret;
1565 }
1566 }
1567
1568 if (!was_busy)
1569 trace_spi_controller_busy(ctlr);
1570
1571 if (!was_busy && ctlr->prepare_transfer_hardware) {
1572 ret = ctlr->prepare_transfer_hardware(ctlr);
1573 if (ret) {
1574 dev_err(&ctlr->dev,
1575 "failed to prepare transfer hardware: %d\n",
1576 ret);
1577
1578 if (ctlr->auto_runtime_pm)
1579 pm_runtime_put(ctlr->dev.parent);
1580
1581 msg->status = ret;
1582 spi_finalize_current_message(ctlr);
1583
1584 return ret;
1585 }
1586 }
1587
1588 trace_spi_message_start(msg);
1589
1590 if (ctlr->prepare_message) {
1591 ret = ctlr->prepare_message(ctlr, msg);
1592 if (ret) {
1593 dev_err(&ctlr->dev, "failed to prepare message: %d\n",
1594 ret);
1595 msg->status = ret;
1596 spi_finalize_current_message(ctlr);
1597 return ret;
1598 }
1599 msg->prepared = true;
1600 }
1601
1602 ret = spi_map_msg(ctlr, msg);
1603 if (ret) {
1604 msg->status = ret;
1605 spi_finalize_current_message(ctlr);
1606 return ret;
1607 }
1608
1609 if (!ctlr->ptp_sts_supported && !ctlr->transfer_one) {
1610 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1611 xfer->ptp_sts_word_pre = 0;
1612 ptp_read_system_prets(xfer->ptp_sts);
1613 }
1614 }
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629 WRITE_ONCE(ctlr->cur_msg_incomplete, true);
1630 WRITE_ONCE(ctlr->cur_msg_need_completion, false);
1631 reinit_completion(&ctlr->cur_msg_completion);
1632 smp_wmb();
1633
1634 ret = ctlr->transfer_one_message(ctlr, msg);
1635 if (ret) {
1636 dev_err(&ctlr->dev,
1637 "failed to transfer one message from queue\n");
1638 return ret;
1639 }
1640
1641 WRITE_ONCE(ctlr->cur_msg_need_completion, true);
1642 smp_mb();
1643 if (READ_ONCE(ctlr->cur_msg_incomplete))
1644 wait_for_completion(&ctlr->cur_msg_completion);
1645
1646 return 0;
1647 }
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662 static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread)
1663 {
1664 struct spi_message *msg;
1665 bool was_busy = false;
1666 unsigned long flags;
1667 int ret;
1668
1669
1670 mutex_lock(&ctlr->io_mutex);
1671
1672
1673 spin_lock_irqsave(&ctlr->queue_lock, flags);
1674
1675
1676 if (ctlr->cur_msg)
1677 goto out_unlock;
1678
1679
1680 if (list_empty(&ctlr->queue) || !ctlr->running) {
1681 if (!ctlr->busy)
1682 goto out_unlock;
1683
1684
1685 if (!in_kthread) {
1686 if (!ctlr->dummy_rx && !ctlr->dummy_tx &&
1687 !ctlr->unprepare_transfer_hardware) {
1688 spi_idle_runtime_pm(ctlr);
1689 ctlr->busy = false;
1690 ctlr->queue_empty = true;
1691 trace_spi_controller_idle(ctlr);
1692 } else {
1693 kthread_queue_work(ctlr->kworker,
1694 &ctlr->pump_messages);
1695 }
1696 goto out_unlock;
1697 }
1698
1699 ctlr->busy = false;
1700 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1701
1702 kfree(ctlr->dummy_rx);
1703 ctlr->dummy_rx = NULL;
1704 kfree(ctlr->dummy_tx);
1705 ctlr->dummy_tx = NULL;
1706 if (ctlr->unprepare_transfer_hardware &&
1707 ctlr->unprepare_transfer_hardware(ctlr))
1708 dev_err(&ctlr->dev,
1709 "failed to unprepare transfer hardware\n");
1710 spi_idle_runtime_pm(ctlr);
1711 trace_spi_controller_idle(ctlr);
1712
1713 spin_lock_irqsave(&ctlr->queue_lock, flags);
1714 ctlr->queue_empty = true;
1715 goto out_unlock;
1716 }
1717
1718
1719 msg = list_first_entry(&ctlr->queue, struct spi_message, queue);
1720 ctlr->cur_msg = msg;
1721
1722 list_del_init(&msg->queue);
1723 if (ctlr->busy)
1724 was_busy = true;
1725 else
1726 ctlr->busy = true;
1727 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1728
1729 ret = __spi_pump_transfer_message(ctlr, msg, was_busy);
1730 kthread_queue_work(ctlr->kworker, &ctlr->pump_messages);
1731
1732 ctlr->cur_msg = NULL;
1733 ctlr->fallback = false;
1734
1735 mutex_unlock(&ctlr->io_mutex);
1736
1737
1738 if (!ret)
1739 cond_resched();
1740 return;
1741
1742 out_unlock:
1743 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1744 mutex_unlock(&ctlr->io_mutex);
1745 }
1746
1747
1748
1749
1750
1751 static void spi_pump_messages(struct kthread_work *work)
1752 {
1753 struct spi_controller *ctlr =
1754 container_of(work, struct spi_controller, pump_messages);
1755
1756 __spi_pump_messages(ctlr, true);
1757 }
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779 void spi_take_timestamp_pre(struct spi_controller *ctlr,
1780 struct spi_transfer *xfer,
1781 size_t progress, bool irqs_off)
1782 {
1783 if (!xfer->ptp_sts)
1784 return;
1785
1786 if (xfer->timestamped)
1787 return;
1788
1789 if (progress > xfer->ptp_sts_word_pre)
1790 return;
1791
1792
1793 xfer->ptp_sts_word_pre = progress;
1794
1795 if (irqs_off) {
1796 local_irq_save(ctlr->irq_flags);
1797 preempt_disable();
1798 }
1799
1800 ptp_read_system_prets(xfer->ptp_sts);
1801 }
1802 EXPORT_SYMBOL_GPL(spi_take_timestamp_pre);
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815
1816 void spi_take_timestamp_post(struct spi_controller *ctlr,
1817 struct spi_transfer *xfer,
1818 size_t progress, bool irqs_off)
1819 {
1820 if (!xfer->ptp_sts)
1821 return;
1822
1823 if (xfer->timestamped)
1824 return;
1825
1826 if (progress < xfer->ptp_sts_word_post)
1827 return;
1828
1829 ptp_read_system_postts(xfer->ptp_sts);
1830
1831 if (irqs_off) {
1832 local_irq_restore(ctlr->irq_flags);
1833 preempt_enable();
1834 }
1835
1836
1837 xfer->ptp_sts_word_post = progress;
1838
1839 xfer->timestamped = true;
1840 }
1841 EXPORT_SYMBOL_GPL(spi_take_timestamp_post);
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858 static void spi_set_thread_rt(struct spi_controller *ctlr)
1859 {
1860 dev_info(&ctlr->dev,
1861 "will run message pump with realtime priority\n");
1862 sched_set_fifo(ctlr->kworker->task);
1863 }
1864
1865 static int spi_init_queue(struct spi_controller *ctlr)
1866 {
1867 ctlr->running = false;
1868 ctlr->busy = false;
1869 ctlr->queue_empty = true;
1870
1871 ctlr->kworker = kthread_create_worker(0, dev_name(&ctlr->dev));
1872 if (IS_ERR(ctlr->kworker)) {
1873 dev_err(&ctlr->dev, "failed to create message pump kworker\n");
1874 return PTR_ERR(ctlr->kworker);
1875 }
1876
1877 kthread_init_work(&ctlr->pump_messages, spi_pump_messages);
1878
1879
1880
1881
1882
1883
1884
1885
1886 if (ctlr->rt)
1887 spi_set_thread_rt(ctlr);
1888
1889 return 0;
1890 }
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900
1901
1902 struct spi_message *spi_get_next_queued_message(struct spi_controller *ctlr)
1903 {
1904 struct spi_message *next;
1905 unsigned long flags;
1906
1907
1908 spin_lock_irqsave(&ctlr->queue_lock, flags);
1909 next = list_first_entry_or_null(&ctlr->queue, struct spi_message,
1910 queue);
1911 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1912
1913 return next;
1914 }
1915 EXPORT_SYMBOL_GPL(spi_get_next_queued_message);
1916
1917
1918
1919
1920
1921
1922
1923
1924 void spi_finalize_current_message(struct spi_controller *ctlr)
1925 {
1926 struct spi_transfer *xfer;
1927 struct spi_message *mesg;
1928 int ret;
1929
1930 mesg = ctlr->cur_msg;
1931
1932 if (!ctlr->ptp_sts_supported && !ctlr->transfer_one) {
1933 list_for_each_entry(xfer, &mesg->transfers, transfer_list) {
1934 ptp_read_system_postts(xfer->ptp_sts);
1935 xfer->ptp_sts_word_post = xfer->len;
1936 }
1937 }
1938
1939 if (unlikely(ctlr->ptp_sts_supported))
1940 list_for_each_entry(xfer, &mesg->transfers, transfer_list)
1941 WARN_ON_ONCE(xfer->ptp_sts && !xfer->timestamped);
1942
1943 spi_unmap_msg(ctlr, mesg);
1944
1945
1946
1947
1948
1949
1950
1951
1952 spi_res_release(ctlr, mesg);
1953
1954 if (mesg->prepared && ctlr->unprepare_message) {
1955 ret = ctlr->unprepare_message(ctlr, mesg);
1956 if (ret) {
1957 dev_err(&ctlr->dev, "failed to unprepare message: %d\n",
1958 ret);
1959 }
1960 }
1961
1962 mesg->prepared = false;
1963
1964 WRITE_ONCE(ctlr->cur_msg_incomplete, false);
1965 smp_mb();
1966 if (READ_ONCE(ctlr->cur_msg_need_completion))
1967 complete(&ctlr->cur_msg_completion);
1968
1969 trace_spi_message_done(mesg);
1970
1971 mesg->state = NULL;
1972 if (mesg->complete)
1973 mesg->complete(mesg->context);
1974 }
1975 EXPORT_SYMBOL_GPL(spi_finalize_current_message);
1976
1977 static int spi_start_queue(struct spi_controller *ctlr)
1978 {
1979 unsigned long flags;
1980
1981 spin_lock_irqsave(&ctlr->queue_lock, flags);
1982
1983 if (ctlr->running || ctlr->busy) {
1984 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1985 return -EBUSY;
1986 }
1987
1988 ctlr->running = true;
1989 ctlr->cur_msg = NULL;
1990 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1991
1992 kthread_queue_work(ctlr->kworker, &ctlr->pump_messages);
1993
1994 return 0;
1995 }
1996
1997 static int spi_stop_queue(struct spi_controller *ctlr)
1998 {
1999 unsigned long flags;
2000 unsigned limit = 500;
2001 int ret = 0;
2002
2003 spin_lock_irqsave(&ctlr->queue_lock, flags);
2004
2005
2006
2007
2008
2009
2010
2011 while ((!list_empty(&ctlr->queue) || ctlr->busy) && limit--) {
2012 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
2013 usleep_range(10000, 11000);
2014 spin_lock_irqsave(&ctlr->queue_lock, flags);
2015 }
2016
2017 if (!list_empty(&ctlr->queue) || ctlr->busy)
2018 ret = -EBUSY;
2019 else
2020 ctlr->running = false;
2021
2022 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
2023
2024 if (ret) {
2025 dev_warn(&ctlr->dev, "could not stop message queue\n");
2026 return ret;
2027 }
2028 return ret;
2029 }
2030
2031 static int spi_destroy_queue(struct spi_controller *ctlr)
2032 {
2033 int ret;
2034
2035 ret = spi_stop_queue(ctlr);
2036
2037
2038
2039
2040
2041
2042
2043 if (ret) {
2044 dev_err(&ctlr->dev, "problem destroying queue\n");
2045 return ret;
2046 }
2047
2048 kthread_destroy_worker(ctlr->kworker);
2049
2050 return 0;
2051 }
2052
2053 static int __spi_queued_transfer(struct spi_device *spi,
2054 struct spi_message *msg,
2055 bool need_pump)
2056 {
2057 struct spi_controller *ctlr = spi->controller;
2058 unsigned long flags;
2059
2060 spin_lock_irqsave(&ctlr->queue_lock, flags);
2061
2062 if (!ctlr->running) {
2063 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
2064 return -ESHUTDOWN;
2065 }
2066 msg->actual_length = 0;
2067 msg->status = -EINPROGRESS;
2068
2069 list_add_tail(&msg->queue, &ctlr->queue);
2070 ctlr->queue_empty = false;
2071 if (!ctlr->busy && need_pump)
2072 kthread_queue_work(ctlr->kworker, &ctlr->pump_messages);
2073
2074 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
2075 return 0;
2076 }
2077
2078
2079
2080
2081
2082
2083
2084
2085 static int spi_queued_transfer(struct spi_device *spi, struct spi_message *msg)
2086 {
2087 return __spi_queued_transfer(spi, msg, true);
2088 }
2089
2090 static int spi_controller_initialize_queue(struct spi_controller *ctlr)
2091 {
2092 int ret;
2093
2094 ctlr->transfer = spi_queued_transfer;
2095 if (!ctlr->transfer_one_message)
2096 ctlr->transfer_one_message = spi_transfer_one_message;
2097
2098
2099 ret = spi_init_queue(ctlr);
2100 if (ret) {
2101 dev_err(&ctlr->dev, "problem initializing queue\n");
2102 goto err_init_queue;
2103 }
2104 ctlr->queued = true;
2105 ret = spi_start_queue(ctlr);
2106 if (ret) {
2107 dev_err(&ctlr->dev, "problem starting queue\n");
2108 goto err_start_queue;
2109 }
2110
2111 return 0;
2112
2113 err_start_queue:
2114 spi_destroy_queue(ctlr);
2115 err_init_queue:
2116 return ret;
2117 }
2118
2119
2120
2121
2122
2123
2124
2125
2126
2127
2128
2129 void spi_flush_queue(struct spi_controller *ctlr)
2130 {
2131 if (ctlr->transfer == spi_queued_transfer)
2132 __spi_pump_messages(ctlr, false);
2133 }
2134
2135
2136
2137 #if defined(CONFIG_OF)
2138 static int of_spi_parse_dt(struct spi_controller *ctlr, struct spi_device *spi,
2139 struct device_node *nc)
2140 {
2141 u32 value;
2142 int rc;
2143
2144
2145 if (of_property_read_bool(nc, "spi-cpha"))
2146 spi->mode |= SPI_CPHA;
2147 if (of_property_read_bool(nc, "spi-cpol"))
2148 spi->mode |= SPI_CPOL;
2149 if (of_property_read_bool(nc, "spi-3wire"))
2150 spi->mode |= SPI_3WIRE;
2151 if (of_property_read_bool(nc, "spi-lsb-first"))
2152 spi->mode |= SPI_LSB_FIRST;
2153 if (of_property_read_bool(nc, "spi-cs-high"))
2154 spi->mode |= SPI_CS_HIGH;
2155
2156
2157 if (!of_property_read_u32(nc, "spi-tx-bus-width", &value)) {
2158 switch (value) {
2159 case 0:
2160 spi->mode |= SPI_NO_TX;
2161 break;
2162 case 1:
2163 break;
2164 case 2:
2165 spi->mode |= SPI_TX_DUAL;
2166 break;
2167 case 4:
2168 spi->mode |= SPI_TX_QUAD;
2169 break;
2170 case 8:
2171 spi->mode |= SPI_TX_OCTAL;
2172 break;
2173 default:
2174 dev_warn(&ctlr->dev,
2175 "spi-tx-bus-width %d not supported\n",
2176 value);
2177 break;
2178 }
2179 }
2180
2181 if (!of_property_read_u32(nc, "spi-rx-bus-width", &value)) {
2182 switch (value) {
2183 case 0:
2184 spi->mode |= SPI_NO_RX;
2185 break;
2186 case 1:
2187 break;
2188 case 2:
2189 spi->mode |= SPI_RX_DUAL;
2190 break;
2191 case 4:
2192 spi->mode |= SPI_RX_QUAD;
2193 break;
2194 case 8:
2195 spi->mode |= SPI_RX_OCTAL;
2196 break;
2197 default:
2198 dev_warn(&ctlr->dev,
2199 "spi-rx-bus-width %d not supported\n",
2200 value);
2201 break;
2202 }
2203 }
2204
2205 if (spi_controller_is_slave(ctlr)) {
2206 if (!of_node_name_eq(nc, "slave")) {
2207 dev_err(&ctlr->dev, "%pOF is not called 'slave'\n",
2208 nc);
2209 return -EINVAL;
2210 }
2211 return 0;
2212 }
2213
2214
2215 rc = of_property_read_u32(nc, "reg", &value);
2216 if (rc) {
2217 dev_err(&ctlr->dev, "%pOF has no valid 'reg' property (%d)\n",
2218 nc, rc);
2219 return rc;
2220 }
2221 spi->chip_select = value;
2222
2223
2224 if (!of_property_read_u32(nc, "spi-max-frequency", &value))
2225 spi->max_speed_hz = value;
2226
2227 return 0;
2228 }
2229
2230 static struct spi_device *
2231 of_register_spi_device(struct spi_controller *ctlr, struct device_node *nc)
2232 {
2233 struct spi_device *spi;
2234 int rc;
2235
2236
2237 spi = spi_alloc_device(ctlr);
2238 if (!spi) {
2239 dev_err(&ctlr->dev, "spi_device alloc error for %pOF\n", nc);
2240 rc = -ENOMEM;
2241 goto err_out;
2242 }
2243
2244
2245 rc = of_modalias_node(nc, spi->modalias,
2246 sizeof(spi->modalias));
2247 if (rc < 0) {
2248 dev_err(&ctlr->dev, "cannot find modalias for %pOF\n", nc);
2249 goto err_out;
2250 }
2251
2252 rc = of_spi_parse_dt(ctlr, spi, nc);
2253 if (rc)
2254 goto err_out;
2255
2256
2257 of_node_get(nc);
2258 spi->dev.of_node = nc;
2259 spi->dev.fwnode = of_fwnode_handle(nc);
2260
2261
2262 rc = spi_add_device(spi);
2263 if (rc) {
2264 dev_err(&ctlr->dev, "spi_device register error %pOF\n", nc);
2265 goto err_of_node_put;
2266 }
2267
2268 return spi;
2269
2270 err_of_node_put:
2271 of_node_put(nc);
2272 err_out:
2273 spi_dev_put(spi);
2274 return ERR_PTR(rc);
2275 }
2276
2277
2278
2279
2280
2281
2282
2283
2284 static void of_register_spi_devices(struct spi_controller *ctlr)
2285 {
2286 struct spi_device *spi;
2287 struct device_node *nc;
2288
2289 if (!ctlr->dev.of_node)
2290 return;
2291
2292 for_each_available_child_of_node(ctlr->dev.of_node, nc) {
2293 if (of_node_test_and_set_flag(nc, OF_POPULATED))
2294 continue;
2295 spi = of_register_spi_device(ctlr, nc);
2296 if (IS_ERR(spi)) {
2297 dev_warn(&ctlr->dev,
2298 "Failed to create SPI device for %pOF\n", nc);
2299 of_node_clear_flag(nc, OF_POPULATED);
2300 }
2301 }
2302 }
2303 #else
2304 static void of_register_spi_devices(struct spi_controller *ctlr) { }
2305 #endif
2306
2307
2308
2309
2310
2311
2312
2313
2314
2315
2316
2317
2318
2319 struct spi_device *spi_new_ancillary_device(struct spi_device *spi,
2320 u8 chip_select)
2321 {
2322 struct spi_device *ancillary;
2323 int rc = 0;
2324
2325
2326 ancillary = spi_alloc_device(spi->controller);
2327 if (!ancillary) {
2328 rc = -ENOMEM;
2329 goto err_out;
2330 }
2331
2332 strlcpy(ancillary->modalias, "dummy", sizeof(ancillary->modalias));
2333
2334
2335 ancillary->chip_select = chip_select;
2336
2337
2338 ancillary->max_speed_hz = spi->max_speed_hz;
2339 ancillary->mode = spi->mode;
2340
2341
2342 rc = spi_add_device_locked(ancillary);
2343 if (rc) {
2344 dev_err(&spi->dev, "failed to register ancillary device\n");
2345 goto err_out;
2346 }
2347
2348 return ancillary;
2349
2350 err_out:
2351 spi_dev_put(ancillary);
2352 return ERR_PTR(rc);
2353 }
2354 EXPORT_SYMBOL_GPL(spi_new_ancillary_device);
2355
2356 #ifdef CONFIG_ACPI
2357 struct acpi_spi_lookup {
2358 struct spi_controller *ctlr;
2359 u32 max_speed_hz;
2360 u32 mode;
2361 int irq;
2362 u8 bits_per_word;
2363 u8 chip_select;
2364 int n;
2365 int index;
2366 };
2367
2368 static int acpi_spi_count(struct acpi_resource *ares, void *data)
2369 {
2370 struct acpi_resource_spi_serialbus *sb;
2371 int *count = data;
2372
2373 if (ares->type != ACPI_RESOURCE_TYPE_SERIAL_BUS)
2374 return 1;
2375
2376 sb = &ares->data.spi_serial_bus;
2377 if (sb->type != ACPI_RESOURCE_SERIAL_TYPE_SPI)
2378 return 1;
2379
2380 *count = *count + 1;
2381
2382 return 1;
2383 }
2384
2385
2386
2387
2388
2389
2390
2391
2392 int acpi_spi_count_resources(struct acpi_device *adev)
2393 {
2394 LIST_HEAD(r);
2395 int count = 0;
2396 int ret;
2397
2398 ret = acpi_dev_get_resources(adev, &r, acpi_spi_count, &count);
2399 if (ret < 0)
2400 return ret;
2401
2402 acpi_dev_free_resource_list(&r);
2403
2404 return count;
2405 }
2406 EXPORT_SYMBOL_GPL(acpi_spi_count_resources);
2407
2408 static void acpi_spi_parse_apple_properties(struct acpi_device *dev,
2409 struct acpi_spi_lookup *lookup)
2410 {
2411 const union acpi_object *obj;
2412
2413 if (!x86_apple_machine)
2414 return;
2415
2416 if (!acpi_dev_get_property(dev, "spiSclkPeriod", ACPI_TYPE_BUFFER, &obj)
2417 && obj->buffer.length >= 4)
2418 lookup->max_speed_hz = NSEC_PER_SEC / *(u32 *)obj->buffer.pointer;
2419
2420 if (!acpi_dev_get_property(dev, "spiWordSize", ACPI_TYPE_BUFFER, &obj)
2421 && obj->buffer.length == 8)
2422 lookup->bits_per_word = *(u64 *)obj->buffer.pointer;
2423
2424 if (!acpi_dev_get_property(dev, "spiBitOrder", ACPI_TYPE_BUFFER, &obj)
2425 && obj->buffer.length == 8 && !*(u64 *)obj->buffer.pointer)
2426 lookup->mode |= SPI_LSB_FIRST;
2427
2428 if (!acpi_dev_get_property(dev, "spiSPO", ACPI_TYPE_BUFFER, &obj)
2429 && obj->buffer.length == 8 && *(u64 *)obj->buffer.pointer)
2430 lookup->mode |= SPI_CPOL;
2431
2432 if (!acpi_dev_get_property(dev, "spiSPH", ACPI_TYPE_BUFFER, &obj)
2433 && obj->buffer.length == 8 && *(u64 *)obj->buffer.pointer)
2434 lookup->mode |= SPI_CPHA;
2435 }
2436
2437 static struct spi_controller *acpi_spi_find_controller_by_adev(struct acpi_device *adev);
2438
2439 static int acpi_spi_add_resource(struct acpi_resource *ares, void *data)
2440 {
2441 struct acpi_spi_lookup *lookup = data;
2442 struct spi_controller *ctlr = lookup->ctlr;
2443
2444 if (ares->type == ACPI_RESOURCE_TYPE_SERIAL_BUS) {
2445 struct acpi_resource_spi_serialbus *sb;
2446 acpi_handle parent_handle;
2447 acpi_status status;
2448
2449 sb = &ares->data.spi_serial_bus;
2450 if (sb->type == ACPI_RESOURCE_SERIAL_TYPE_SPI) {
2451
2452 if (lookup->index != -1 && lookup->n++ != lookup->index)
2453 return 1;
2454
2455 status = acpi_get_handle(NULL,
2456 sb->resource_source.string_ptr,
2457 &parent_handle);
2458
2459 if (ACPI_FAILURE(status))
2460 return -ENODEV;
2461
2462 if (ctlr) {
2463 if (ACPI_HANDLE(ctlr->dev.parent) != parent_handle)
2464 return -ENODEV;
2465 } else {
2466 struct acpi_device *adev;
2467
2468 adev = acpi_fetch_acpi_dev(parent_handle);
2469 if (!adev)
2470 return -ENODEV;
2471
2472 ctlr = acpi_spi_find_controller_by_adev(adev);
2473 if (!ctlr)
2474 return -EPROBE_DEFER;
2475
2476 lookup->ctlr = ctlr;
2477 }
2478
2479
2480
2481
2482
2483
2484
2485
2486 if (ctlr->fw_translate_cs) {
2487 int cs = ctlr->fw_translate_cs(ctlr,
2488 sb->device_selection);
2489 if (cs < 0)
2490 return cs;
2491 lookup->chip_select = cs;
2492 } else {
2493 lookup->chip_select = sb->device_selection;
2494 }
2495
2496 lookup->max_speed_hz = sb->connection_speed;
2497 lookup->bits_per_word = sb->data_bit_length;
2498
2499 if (sb->clock_phase == ACPI_SPI_SECOND_PHASE)
2500 lookup->mode |= SPI_CPHA;
2501 if (sb->clock_polarity == ACPI_SPI_START_HIGH)
2502 lookup->mode |= SPI_CPOL;
2503 if (sb->device_polarity == ACPI_SPI_ACTIVE_HIGH)
2504 lookup->mode |= SPI_CS_HIGH;
2505 }
2506 } else if (lookup->irq < 0) {
2507 struct resource r;
2508
2509 if (acpi_dev_resource_interrupt(ares, 0, &r))
2510 lookup->irq = r.start;
2511 }
2512
2513
2514 return 1;
2515 }
2516
2517
2518
2519
2520
2521
2522
2523
2524
2525
2526
2527
2528
2529
2530
2531
2532
2533 struct spi_device *acpi_spi_device_alloc(struct spi_controller *ctlr,
2534 struct acpi_device *adev,
2535 int index)
2536 {
2537 acpi_handle parent_handle = NULL;
2538 struct list_head resource_list;
2539 struct acpi_spi_lookup lookup = {};
2540 struct spi_device *spi;
2541 int ret;
2542
2543 if (!ctlr && index == -1)
2544 return ERR_PTR(-EINVAL);
2545
2546 lookup.ctlr = ctlr;
2547 lookup.irq = -1;
2548 lookup.index = index;
2549 lookup.n = 0;
2550
2551 INIT_LIST_HEAD(&resource_list);
2552 ret = acpi_dev_get_resources(adev, &resource_list,
2553 acpi_spi_add_resource, &lookup);
2554 acpi_dev_free_resource_list(&resource_list);
2555
2556 if (ret < 0)
2557
2558 return ERR_PTR(ret);
2559
2560 if (!lookup.max_speed_hz &&
2561 ACPI_SUCCESS(acpi_get_parent(adev->handle, &parent_handle)) &&
2562 ACPI_HANDLE(lookup.ctlr->dev.parent) == parent_handle) {
2563
2564 acpi_spi_parse_apple_properties(adev, &lookup);
2565 }
2566
2567 if (!lookup.max_speed_hz)
2568 return ERR_PTR(-ENODEV);
2569
2570 spi = spi_alloc_device(lookup.ctlr);
2571 if (!spi) {
2572 dev_err(&lookup.ctlr->dev, "failed to allocate SPI device for %s\n",
2573 dev_name(&adev->dev));
2574 return ERR_PTR(-ENOMEM);
2575 }
2576
2577 ACPI_COMPANION_SET(&spi->dev, adev);
2578 spi->max_speed_hz = lookup.max_speed_hz;
2579 spi->mode |= lookup.mode;
2580 spi->irq = lookup.irq;
2581 spi->bits_per_word = lookup.bits_per_word;
2582 spi->chip_select = lookup.chip_select;
2583
2584 return spi;
2585 }
2586 EXPORT_SYMBOL_GPL(acpi_spi_device_alloc);
2587
2588 static acpi_status acpi_register_spi_device(struct spi_controller *ctlr,
2589 struct acpi_device *adev)
2590 {
2591 struct spi_device *spi;
2592
2593 if (acpi_bus_get_status(adev) || !adev->status.present ||
2594 acpi_device_enumerated(adev))
2595 return AE_OK;
2596
2597 spi = acpi_spi_device_alloc(ctlr, adev, -1);
2598 if (IS_ERR(spi)) {
2599 if (PTR_ERR(spi) == -ENOMEM)
2600 return AE_NO_MEMORY;
2601 else
2602 return AE_OK;
2603 }
2604
2605 acpi_set_modalias(adev, acpi_device_hid(adev), spi->modalias,
2606 sizeof(spi->modalias));
2607
2608 if (spi->irq < 0)
2609 spi->irq = acpi_dev_gpio_irq_get(adev, 0);
2610
2611 acpi_device_set_enumerated(adev);
2612
2613 adev->power.flags.ignore_parent = true;
2614 if (spi_add_device(spi)) {
2615 adev->power.flags.ignore_parent = false;
2616 dev_err(&ctlr->dev, "failed to add SPI device %s from ACPI\n",
2617 dev_name(&adev->dev));
2618 spi_dev_put(spi);
2619 }
2620
2621 return AE_OK;
2622 }
2623
2624 static acpi_status acpi_spi_add_device(acpi_handle handle, u32 level,
2625 void *data, void **return_value)
2626 {
2627 struct acpi_device *adev = acpi_fetch_acpi_dev(handle);
2628 struct spi_controller *ctlr = data;
2629
2630 if (!adev)
2631 return AE_OK;
2632
2633 return acpi_register_spi_device(ctlr, adev);
2634 }
2635
2636 #define SPI_ACPI_ENUMERATE_MAX_DEPTH 32
2637
2638 static void acpi_register_spi_devices(struct spi_controller *ctlr)
2639 {
2640 acpi_status status;
2641 acpi_handle handle;
2642
2643 handle = ACPI_HANDLE(ctlr->dev.parent);
2644 if (!handle)
2645 return;
2646
2647 status = acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
2648 SPI_ACPI_ENUMERATE_MAX_DEPTH,
2649 acpi_spi_add_device, NULL, ctlr, NULL);
2650 if (ACPI_FAILURE(status))
2651 dev_warn(&ctlr->dev, "failed to enumerate SPI slaves\n");
2652 }
2653 #else
2654 static inline void acpi_register_spi_devices(struct spi_controller *ctlr) {}
2655 #endif
2656
2657 static void spi_controller_release(struct device *dev)
2658 {
2659 struct spi_controller *ctlr;
2660
2661 ctlr = container_of(dev, struct spi_controller, dev);
2662 kfree(ctlr);
2663 }
2664
2665 static struct class spi_master_class = {
2666 .name = "spi_master",
2667 .owner = THIS_MODULE,
2668 .dev_release = spi_controller_release,
2669 .dev_groups = spi_master_groups,
2670 };
2671
2672 #ifdef CONFIG_SPI_SLAVE
2673
2674
2675
2676
2677
2678 int spi_slave_abort(struct spi_device *spi)
2679 {
2680 struct spi_controller *ctlr = spi->controller;
2681
2682 if (spi_controller_is_slave(ctlr) && ctlr->slave_abort)
2683 return ctlr->slave_abort(ctlr);
2684
2685 return -ENOTSUPP;
2686 }
2687 EXPORT_SYMBOL_GPL(spi_slave_abort);
2688
2689 static ssize_t slave_show(struct device *dev, struct device_attribute *attr,
2690 char *buf)
2691 {
2692 struct spi_controller *ctlr = container_of(dev, struct spi_controller,
2693 dev);
2694 struct device *child;
2695
2696 child = device_find_any_child(&ctlr->dev);
2697 return sprintf(buf, "%s\n",
2698 child ? to_spi_device(child)->modalias : NULL);
2699 }
2700
2701 static ssize_t slave_store(struct device *dev, struct device_attribute *attr,
2702 const char *buf, size_t count)
2703 {
2704 struct spi_controller *ctlr = container_of(dev, struct spi_controller,
2705 dev);
2706 struct spi_device *spi;
2707 struct device *child;
2708 char name[32];
2709 int rc;
2710
2711 rc = sscanf(buf, "%31s", name);
2712 if (rc != 1 || !name[0])
2713 return -EINVAL;
2714
2715 child = device_find_any_child(&ctlr->dev);
2716 if (child) {
2717
2718 device_unregister(child);
2719 put_device(child);
2720 }
2721
2722 if (strcmp(name, "(null)")) {
2723
2724 spi = spi_alloc_device(ctlr);
2725 if (!spi)
2726 return -ENOMEM;
2727
2728 strlcpy(spi->modalias, name, sizeof(spi->modalias));
2729
2730 rc = spi_add_device(spi);
2731 if (rc) {
2732 spi_dev_put(spi);
2733 return rc;
2734 }
2735 }
2736
2737 return count;
2738 }
2739
2740 static DEVICE_ATTR_RW(slave);
2741
2742 static struct attribute *spi_slave_attrs[] = {
2743 &dev_attr_slave.attr,
2744 NULL,
2745 };
2746
2747 static const struct attribute_group spi_slave_group = {
2748 .attrs = spi_slave_attrs,
2749 };
2750
2751 static const struct attribute_group *spi_slave_groups[] = {
2752 &spi_controller_statistics_group,
2753 &spi_slave_group,
2754 NULL,
2755 };
2756
2757 static struct class spi_slave_class = {
2758 .name = "spi_slave",
2759 .owner = THIS_MODULE,
2760 .dev_release = spi_controller_release,
2761 .dev_groups = spi_slave_groups,
2762 };
2763 #else
2764 extern struct class spi_slave_class;
2765 #endif
2766
2767
2768
2769
2770
2771
2772
2773
2774
2775
2776
2777
2778
2779
2780
2781
2782
2783
2784
2785
2786
2787
2788
2789
2790
2791
2792 struct spi_controller *__spi_alloc_controller(struct device *dev,
2793 unsigned int size, bool slave)
2794 {
2795 struct spi_controller *ctlr;
2796 size_t ctlr_size = ALIGN(sizeof(*ctlr), dma_get_cache_alignment());
2797
2798 if (!dev)
2799 return NULL;
2800
2801 ctlr = kzalloc(size + ctlr_size, GFP_KERNEL);
2802 if (!ctlr)
2803 return NULL;
2804
2805 device_initialize(&ctlr->dev);
2806 INIT_LIST_HEAD(&ctlr->queue);
2807 spin_lock_init(&ctlr->queue_lock);
2808 spin_lock_init(&ctlr->bus_lock_spinlock);
2809 mutex_init(&ctlr->bus_lock_mutex);
2810 mutex_init(&ctlr->io_mutex);
2811 mutex_init(&ctlr->add_lock);
2812 ctlr->bus_num = -1;
2813 ctlr->num_chipselect = 1;
2814 ctlr->slave = slave;
2815 if (IS_ENABLED(CONFIG_SPI_SLAVE) && slave)
2816 ctlr->dev.class = &spi_slave_class;
2817 else
2818 ctlr->dev.class = &spi_master_class;
2819 ctlr->dev.parent = dev;
2820 pm_suspend_ignore_children(&ctlr->dev, true);
2821 spi_controller_set_devdata(ctlr, (void *)ctlr + ctlr_size);
2822
2823 return ctlr;
2824 }
2825 EXPORT_SYMBOL_GPL(__spi_alloc_controller);
2826
2827 static void devm_spi_release_controller(struct device *dev, void *ctlr)
2828 {
2829 spi_controller_put(*(struct spi_controller **)ctlr);
2830 }
2831
2832
2833
2834
2835
2836
2837
2838
2839
2840
2841
2842
2843
2844
2845
2846
2847 struct spi_controller *__devm_spi_alloc_controller(struct device *dev,
2848 unsigned int size,
2849 bool slave)
2850 {
2851 struct spi_controller **ptr, *ctlr;
2852
2853 ptr = devres_alloc(devm_spi_release_controller, sizeof(*ptr),
2854 GFP_KERNEL);
2855 if (!ptr)
2856 return NULL;
2857
2858 ctlr = __spi_alloc_controller(dev, size, slave);
2859 if (ctlr) {
2860 ctlr->devm_allocated = true;
2861 *ptr = ctlr;
2862 devres_add(dev, ptr);
2863 } else {
2864 devres_free(ptr);
2865 }
2866
2867 return ctlr;
2868 }
2869 EXPORT_SYMBOL_GPL(__devm_spi_alloc_controller);
2870
2871
2872
2873
2874
2875 static int spi_get_gpio_descs(struct spi_controller *ctlr)
2876 {
2877 int nb, i;
2878 struct gpio_desc **cs;
2879 struct device *dev = &ctlr->dev;
2880 unsigned long native_cs_mask = 0;
2881 unsigned int num_cs_gpios = 0;
2882
2883 nb = gpiod_count(dev, "cs");
2884 if (nb < 0) {
2885
2886 if (nb == -ENOENT)
2887 return 0;
2888 return nb;
2889 }
2890
2891 ctlr->num_chipselect = max_t(int, nb, ctlr->num_chipselect);
2892
2893 cs = devm_kcalloc(dev, ctlr->num_chipselect, sizeof(*cs),
2894 GFP_KERNEL);
2895 if (!cs)
2896 return -ENOMEM;
2897 ctlr->cs_gpiods = cs;
2898
2899 for (i = 0; i < nb; i++) {
2900
2901
2902
2903
2904
2905
2906
2907 cs[i] = devm_gpiod_get_index_optional(dev, "cs", i,
2908 GPIOD_OUT_LOW);
2909 if (IS_ERR(cs[i]))
2910 return PTR_ERR(cs[i]);
2911
2912 if (cs[i]) {
2913
2914
2915
2916
2917 char *gpioname;
2918
2919 gpioname = devm_kasprintf(dev, GFP_KERNEL, "%s CS%d",
2920 dev_name(dev), i);
2921 if (!gpioname)
2922 return -ENOMEM;
2923 gpiod_set_consumer_name(cs[i], gpioname);
2924 num_cs_gpios++;
2925 continue;
2926 }
2927
2928 if (ctlr->max_native_cs && i >= ctlr->max_native_cs) {
2929 dev_err(dev, "Invalid native chip select %d\n", i);
2930 return -EINVAL;
2931 }
2932 native_cs_mask |= BIT(i);
2933 }
2934
2935 ctlr->unused_native_cs = ffs(~native_cs_mask) - 1;
2936
2937 if ((ctlr->flags & SPI_MASTER_GPIO_SS) && num_cs_gpios &&
2938 ctlr->max_native_cs && ctlr->unused_native_cs >= ctlr->max_native_cs) {
2939 dev_err(dev, "No unused native chip select available\n");
2940 return -EINVAL;
2941 }
2942
2943 return 0;
2944 }
2945
2946 static int spi_controller_check_ops(struct spi_controller *ctlr)
2947 {
2948
2949
2950
2951
2952
2953
2954
2955 if (ctlr->mem_ops) {
2956 if (!ctlr->mem_ops->exec_op)
2957 return -EINVAL;
2958 } else if (!ctlr->transfer && !ctlr->transfer_one &&
2959 !ctlr->transfer_one_message) {
2960 return -EINVAL;
2961 }
2962
2963 return 0;
2964 }
2965
2966
2967
2968
2969
2970
2971
2972
2973
2974
2975
2976
2977
2978
2979
2980
2981
2982
2983
2984
2985
2986
2987
2988
2989 int spi_register_controller(struct spi_controller *ctlr)
2990 {
2991 struct device *dev = ctlr->dev.parent;
2992 struct boardinfo *bi;
2993 int status;
2994 int id, first_dynamic;
2995
2996 if (!dev)
2997 return -ENODEV;
2998
2999
3000
3001
3002
3003 status = spi_controller_check_ops(ctlr);
3004 if (status)
3005 return status;
3006
3007 if (ctlr->bus_num >= 0) {
3008
3009 mutex_lock(&board_lock);
3010 id = idr_alloc(&spi_master_idr, ctlr, ctlr->bus_num,
3011 ctlr->bus_num + 1, GFP_KERNEL);
3012 mutex_unlock(&board_lock);
3013 if (WARN(id < 0, "couldn't get idr"))
3014 return id == -ENOSPC ? -EBUSY : id;
3015 ctlr->bus_num = id;
3016 } else if (ctlr->dev.of_node) {
3017
3018 id = of_alias_get_id(ctlr->dev.of_node, "spi");
3019 if (id >= 0) {
3020 ctlr->bus_num = id;
3021 mutex_lock(&board_lock);
3022 id = idr_alloc(&spi_master_idr, ctlr, ctlr->bus_num,
3023 ctlr->bus_num + 1, GFP_KERNEL);
3024 mutex_unlock(&board_lock);
3025 if (WARN(id < 0, "couldn't get idr"))
3026 return id == -ENOSPC ? -EBUSY : id;
3027 }
3028 }
3029 if (ctlr->bus_num < 0) {
3030 first_dynamic = of_alias_get_highest_id("spi");
3031 if (first_dynamic < 0)
3032 first_dynamic = 0;
3033 else
3034 first_dynamic++;
3035
3036 mutex_lock(&board_lock);
3037 id = idr_alloc(&spi_master_idr, ctlr, first_dynamic,
3038 0, GFP_KERNEL);
3039 mutex_unlock(&board_lock);
3040 if (WARN(id < 0, "couldn't get idr"))
3041 return id;
3042 ctlr->bus_num = id;
3043 }
3044 ctlr->bus_lock_flag = 0;
3045 init_completion(&ctlr->xfer_completion);
3046 init_completion(&ctlr->cur_msg_completion);
3047 if (!ctlr->max_dma_len)
3048 ctlr->max_dma_len = INT_MAX;
3049
3050
3051
3052
3053
3054 dev_set_name(&ctlr->dev, "spi%u", ctlr->bus_num);
3055
3056 if (!spi_controller_is_slave(ctlr) && ctlr->use_gpio_descriptors) {
3057 status = spi_get_gpio_descs(ctlr);
3058 if (status)
3059 goto free_bus_id;
3060
3061
3062
3063
3064 ctlr->mode_bits |= SPI_CS_HIGH;
3065 }
3066
3067
3068
3069
3070
3071 if (!ctlr->num_chipselect) {
3072 status = -EINVAL;
3073 goto free_bus_id;
3074 }
3075
3076
3077 ctlr->last_cs = -1;
3078
3079 status = device_add(&ctlr->dev);
3080 if (status < 0)
3081 goto free_bus_id;
3082 dev_dbg(dev, "registered %s %s\n",
3083 spi_controller_is_slave(ctlr) ? "slave" : "master",
3084 dev_name(&ctlr->dev));
3085
3086
3087
3088
3089
3090
3091 if (ctlr->transfer) {
3092 dev_info(dev, "controller is unqueued, this is deprecated\n");
3093 } else if (ctlr->transfer_one || ctlr->transfer_one_message) {
3094 status = spi_controller_initialize_queue(ctlr);
3095 if (status) {
3096 device_del(&ctlr->dev);
3097 goto free_bus_id;
3098 }
3099 }
3100
3101 ctlr->pcpu_statistics = spi_alloc_pcpu_stats(dev);
3102 if (!ctlr->pcpu_statistics) {
3103 dev_err(dev, "Error allocating per-cpu statistics\n");
3104 status = -ENOMEM;
3105 goto destroy_queue;
3106 }
3107
3108 mutex_lock(&board_lock);
3109 list_add_tail(&ctlr->list, &spi_controller_list);
3110 list_for_each_entry(bi, &board_list, list)
3111 spi_match_controller_to_boardinfo(ctlr, &bi->board_info);
3112 mutex_unlock(&board_lock);
3113
3114
3115 of_register_spi_devices(ctlr);
3116 acpi_register_spi_devices(ctlr);
3117 return status;
3118
3119 destroy_queue:
3120 spi_destroy_queue(ctlr);
3121 free_bus_id:
3122 mutex_lock(&board_lock);
3123 idr_remove(&spi_master_idr, ctlr->bus_num);
3124 mutex_unlock(&board_lock);
3125 return status;
3126 }
3127 EXPORT_SYMBOL_GPL(spi_register_controller);
3128
3129 static void devm_spi_unregister(struct device *dev, void *res)
3130 {
3131 spi_unregister_controller(*(struct spi_controller **)res);
3132 }
3133
3134
3135
3136
3137
3138
3139
3140
3141
3142
3143
3144
3145
3146
3147 int devm_spi_register_controller(struct device *dev,
3148 struct spi_controller *ctlr)
3149 {
3150 struct spi_controller **ptr;
3151 int ret;
3152
3153 ptr = devres_alloc(devm_spi_unregister, sizeof(*ptr), GFP_KERNEL);
3154 if (!ptr)
3155 return -ENOMEM;
3156
3157 ret = spi_register_controller(ctlr);
3158 if (!ret) {
3159 *ptr = ctlr;
3160 devres_add(dev, ptr);
3161 } else {
3162 devres_free(ptr);
3163 }
3164
3165 return ret;
3166 }
3167 EXPORT_SYMBOL_GPL(devm_spi_register_controller);
3168
3169 static int __unregister(struct device *dev, void *null)
3170 {
3171 spi_unregister_device(to_spi_device(dev));
3172 return 0;
3173 }
3174
3175
3176
3177
3178
3179
3180
3181
3182
3183
3184
3185
3186
3187 void spi_unregister_controller(struct spi_controller *ctlr)
3188 {
3189 struct spi_controller *found;
3190 int id = ctlr->bus_num;
3191
3192
3193 if (IS_ENABLED(CONFIG_SPI_DYNAMIC))
3194 mutex_lock(&ctlr->add_lock);
3195
3196 device_for_each_child(&ctlr->dev, NULL, __unregister);
3197
3198
3199 mutex_lock(&board_lock);
3200 found = idr_find(&spi_master_idr, id);
3201 mutex_unlock(&board_lock);
3202 if (ctlr->queued) {
3203 if (spi_destroy_queue(ctlr))
3204 dev_err(&ctlr->dev, "queue remove failed\n");
3205 }
3206 mutex_lock(&board_lock);
3207 list_del(&ctlr->list);
3208 mutex_unlock(&board_lock);
3209
3210 device_del(&ctlr->dev);
3211
3212
3213 mutex_lock(&board_lock);
3214 if (found == ctlr)
3215 idr_remove(&spi_master_idr, id);
3216 mutex_unlock(&board_lock);
3217
3218 if (IS_ENABLED(CONFIG_SPI_DYNAMIC))
3219 mutex_unlock(&ctlr->add_lock);
3220
3221
3222
3223
3224 if (!ctlr->devm_allocated)
3225 put_device(&ctlr->dev);
3226 }
3227 EXPORT_SYMBOL_GPL(spi_unregister_controller);
3228
3229 int spi_controller_suspend(struct spi_controller *ctlr)
3230 {
3231 int ret;
3232
3233
3234 if (!ctlr->queued)
3235 return 0;
3236
3237 ret = spi_stop_queue(ctlr);
3238 if (ret)
3239 dev_err(&ctlr->dev, "queue stop failed\n");
3240
3241 return ret;
3242 }
3243 EXPORT_SYMBOL_GPL(spi_controller_suspend);
3244
3245 int spi_controller_resume(struct spi_controller *ctlr)
3246 {
3247 int ret;
3248
3249 if (!ctlr->queued)
3250 return 0;
3251
3252 ret = spi_start_queue(ctlr);
3253 if (ret)
3254 dev_err(&ctlr->dev, "queue restart failed\n");
3255
3256 return ret;
3257 }
3258 EXPORT_SYMBOL_GPL(spi_controller_resume);
3259
3260
3261
3262
3263
3264 static void __spi_replace_transfers_release(struct spi_controller *ctlr,
3265 struct spi_message *msg,
3266 void *res)
3267 {
3268 struct spi_replaced_transfers *rxfer = res;
3269 size_t i;
3270
3271
3272 if (rxfer->release)
3273 rxfer->release(ctlr, msg, res);
3274
3275
3276 list_splice(&rxfer->replaced_transfers, rxfer->replaced_after);
3277
3278
3279 for (i = 0; i < rxfer->inserted; i++)
3280 list_del(&rxfer->inserted_transfers[i].transfer_list);
3281 }
3282
3283
3284
3285
3286
3287
3288
3289
3290
3291
3292
3293
3294
3295
3296
3297
3298 static struct spi_replaced_transfers *spi_replace_transfers(
3299 struct spi_message *msg,
3300 struct spi_transfer *xfer_first,
3301 size_t remove,
3302 size_t insert,
3303 spi_replaced_release_t release,
3304 size_t extradatasize,
3305 gfp_t gfp)
3306 {
3307 struct spi_replaced_transfers *rxfer;
3308 struct spi_transfer *xfer;
3309 size_t i;
3310
3311
3312 rxfer = spi_res_alloc(msg->spi, __spi_replace_transfers_release,
3313 struct_size(rxfer, inserted_transfers, insert)
3314 + extradatasize,
3315 gfp);
3316 if (!rxfer)
3317 return ERR_PTR(-ENOMEM);
3318
3319
3320 rxfer->release = release;
3321
3322
3323 if (extradatasize)
3324 rxfer->extradata =
3325 &rxfer->inserted_transfers[insert];
3326
3327
3328 INIT_LIST_HEAD(&rxfer->replaced_transfers);
3329
3330
3331
3332
3333
3334 rxfer->replaced_after = xfer_first->transfer_list.prev;
3335
3336
3337 for (i = 0; i < remove; i++) {
3338
3339
3340
3341
3342
3343 if (rxfer->replaced_after->next == &msg->transfers) {
3344 dev_err(&msg->spi->dev,
3345 "requested to remove more spi_transfers than are available\n");
3346
3347 list_splice(&rxfer->replaced_transfers,
3348 rxfer->replaced_after);
3349
3350
3351 spi_res_free(rxfer);
3352
3353
3354 return ERR_PTR(-EINVAL);
3355 }
3356
3357
3358
3359
3360
3361 list_move_tail(rxfer->replaced_after->next,
3362 &rxfer->replaced_transfers);
3363 }
3364
3365
3366
3367
3368
3369 for (i = 0; i < insert; i++) {
3370
3371 xfer = &rxfer->inserted_transfers[insert - 1 - i];
3372
3373
3374 memcpy(xfer, xfer_first, sizeof(*xfer));
3375
3376
3377 list_add(&xfer->transfer_list, rxfer->replaced_after);
3378
3379
3380 if (i) {
3381 xfer->cs_change = false;
3382 xfer->delay.value = 0;
3383 }
3384 }
3385
3386
3387 rxfer->inserted = insert;
3388
3389
3390 spi_res_add(msg, rxfer);
3391
3392 return rxfer;
3393 }
3394
3395 static int __spi_split_transfer_maxsize(struct spi_controller *ctlr,
3396 struct spi_message *msg,
3397 struct spi_transfer **xferp,
3398 size_t maxsize,
3399 gfp_t gfp)
3400 {
3401 struct spi_transfer *xfer = *xferp, *xfers;
3402 struct spi_replaced_transfers *srt;
3403 size_t offset;
3404 size_t count, i;
3405
3406
3407 count = DIV_ROUND_UP(xfer->len, maxsize);
3408
3409
3410 srt = spi_replace_transfers(msg, xfer, 1, count, NULL, 0, gfp);
3411 if (IS_ERR(srt))
3412 return PTR_ERR(srt);
3413 xfers = srt->inserted_transfers;
3414
3415
3416
3417
3418
3419
3420
3421
3422
3423
3424
3425
3426
3427
3428
3429
3430 xfers[0].len = min_t(size_t, maxsize, xfer[0].len);
3431
3432
3433 for (i = 1, offset = maxsize; i < count; offset += maxsize, i++) {
3434
3435 if (xfers[i].rx_buf)
3436 xfers[i].rx_buf += offset;
3437 if (xfers[i].rx_dma)
3438 xfers[i].rx_dma += offset;
3439 if (xfers[i].tx_buf)
3440 xfers[i].tx_buf += offset;
3441 if (xfers[i].tx_dma)
3442 xfers[i].tx_dma += offset;
3443
3444
3445 xfers[i].len = min(maxsize, xfers[i].len - offset);
3446 }
3447
3448
3449
3450
3451
3452 *xferp = &xfers[count - 1];
3453
3454
3455 SPI_STATISTICS_INCREMENT_FIELD(ctlr->pcpu_statistics,
3456 transfers_split_maxsize);
3457 SPI_STATISTICS_INCREMENT_FIELD(msg->spi->pcpu_statistics,
3458 transfers_split_maxsize);
3459
3460 return 0;
3461 }
3462
3463
3464
3465
3466
3467
3468
3469
3470
3471
3472
3473
3474 int spi_split_transfers_maxsize(struct spi_controller *ctlr,
3475 struct spi_message *msg,
3476 size_t maxsize,
3477 gfp_t gfp)
3478 {
3479 struct spi_transfer *xfer;
3480 int ret;
3481
3482
3483
3484
3485
3486
3487
3488
3489 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
3490 if (xfer->len > maxsize) {
3491 ret = __spi_split_transfer_maxsize(ctlr, msg, &xfer,
3492 maxsize, gfp);
3493 if (ret)
3494 return ret;
3495 }
3496 }
3497
3498 return 0;
3499 }
3500 EXPORT_SYMBOL_GPL(spi_split_transfers_maxsize);
3501
3502
3503
3504
3505
3506
3507
3508 static int __spi_validate_bits_per_word(struct spi_controller *ctlr,
3509 u8 bits_per_word)
3510 {
3511 if (ctlr->bits_per_word_mask) {
3512
3513 if (bits_per_word > 32)
3514 return -EINVAL;
3515 if (!(ctlr->bits_per_word_mask & SPI_BPW_MASK(bits_per_word)))
3516 return -EINVAL;
3517 }
3518
3519 return 0;
3520 }
3521
3522
3523
3524
3525
3526
3527
3528
3529
3530
3531
3532
3533
3534
3535
3536
3537
3538
3539
3540
3541
3542 int spi_setup(struct spi_device *spi)
3543 {
3544 unsigned bad_bits, ugly_bits;
3545 int status = 0;
3546
3547
3548
3549
3550
3551 if ((hweight_long(spi->mode &
3552 (SPI_TX_DUAL | SPI_TX_QUAD | SPI_NO_TX)) > 1) ||
3553 (hweight_long(spi->mode &
3554 (SPI_RX_DUAL | SPI_RX_QUAD | SPI_NO_RX)) > 1)) {
3555 dev_err(&spi->dev,
3556 "setup: can not select any two of dual, quad and no-rx/tx at the same time\n");
3557 return -EINVAL;
3558 }
3559
3560 if ((spi->mode & SPI_3WIRE) && (spi->mode &
3561 (SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL |
3562 SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL)))
3563 return -EINVAL;
3564
3565
3566
3567
3568
3569
3570 bad_bits = spi->mode & ~(spi->controller->mode_bits | SPI_CS_WORD |
3571 SPI_NO_TX | SPI_NO_RX);
3572 ugly_bits = bad_bits &
3573 (SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL |
3574 SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL);
3575 if (ugly_bits) {
3576 dev_warn(&spi->dev,
3577 "setup: ignoring unsupported mode bits %x\n",
3578 ugly_bits);
3579 spi->mode &= ~ugly_bits;
3580 bad_bits &= ~ugly_bits;
3581 }
3582 if (bad_bits) {
3583 dev_err(&spi->dev, "setup: unsupported mode bits %x\n",
3584 bad_bits);
3585 return -EINVAL;
3586 }
3587
3588 if (!spi->bits_per_word) {
3589 spi->bits_per_word = 8;
3590 } else {
3591
3592
3593
3594
3595 status = __spi_validate_bits_per_word(spi->controller,
3596 spi->bits_per_word);
3597 if (status)
3598 return status;
3599 }
3600
3601 if (spi->controller->max_speed_hz &&
3602 (!spi->max_speed_hz ||
3603 spi->max_speed_hz > spi->controller->max_speed_hz))
3604 spi->max_speed_hz = spi->controller->max_speed_hz;
3605
3606 mutex_lock(&spi->controller->io_mutex);
3607
3608 if (spi->controller->setup) {
3609 status = spi->controller->setup(spi);
3610 if (status) {
3611 mutex_unlock(&spi->controller->io_mutex);
3612 dev_err(&spi->controller->dev, "Failed to setup device: %d\n",
3613 status);
3614 return status;
3615 }
3616 }
3617
3618 if (spi->controller->auto_runtime_pm && spi->controller->set_cs) {
3619 status = pm_runtime_resume_and_get(spi->controller->dev.parent);
3620 if (status < 0) {
3621 mutex_unlock(&spi->controller->io_mutex);
3622 dev_err(&spi->controller->dev, "Failed to power device: %d\n",
3623 status);
3624 return status;
3625 }
3626
3627
3628
3629
3630
3631
3632
3633 status = 0;
3634
3635 spi_set_cs(spi, false, true);
3636 pm_runtime_mark_last_busy(spi->controller->dev.parent);
3637 pm_runtime_put_autosuspend(spi->controller->dev.parent);
3638 } else {
3639 spi_set_cs(spi, false, true);
3640 }
3641
3642 mutex_unlock(&spi->controller->io_mutex);
3643
3644 if (spi->rt && !spi->controller->rt) {
3645 spi->controller->rt = true;
3646 spi_set_thread_rt(spi->controller);
3647 }
3648
3649 trace_spi_setup(spi, status);
3650
3651 dev_dbg(&spi->dev, "setup mode %lu, %s%s%s%s%u bits/w, %u Hz max --> %d\n",
3652 spi->mode & SPI_MODE_X_MASK,
3653 (spi->mode & SPI_CS_HIGH) ? "cs_high, " : "",
3654 (spi->mode & SPI_LSB_FIRST) ? "lsb, " : "",
3655 (spi->mode & SPI_3WIRE) ? "3wire, " : "",
3656 (spi->mode & SPI_LOOP) ? "loopback, " : "",
3657 spi->bits_per_word, spi->max_speed_hz,
3658 status);
3659
3660 return status;
3661 }
3662 EXPORT_SYMBOL_GPL(spi_setup);
3663
3664 static int _spi_xfer_word_delay_update(struct spi_transfer *xfer,
3665 struct spi_device *spi)
3666 {
3667 int delay1, delay2;
3668
3669 delay1 = spi_delay_to_ns(&xfer->word_delay, xfer);
3670 if (delay1 < 0)
3671 return delay1;
3672
3673 delay2 = spi_delay_to_ns(&spi->word_delay, xfer);
3674 if (delay2 < 0)
3675 return delay2;
3676
3677 if (delay1 < delay2)
3678 memcpy(&xfer->word_delay, &spi->word_delay,
3679 sizeof(xfer->word_delay));
3680
3681 return 0;
3682 }
3683
3684 static int __spi_validate(struct spi_device *spi, struct spi_message *message)
3685 {
3686 struct spi_controller *ctlr = spi->controller;
3687 struct spi_transfer *xfer;
3688 int w_size;
3689
3690 if (list_empty(&message->transfers))
3691 return -EINVAL;
3692
3693
3694
3695
3696
3697
3698
3699
3700 if ((spi->mode & SPI_CS_WORD) && (!(ctlr->mode_bits & SPI_CS_WORD) ||
3701 spi->cs_gpiod)) {
3702 size_t maxsize;
3703 int ret;
3704
3705 maxsize = (spi->bits_per_word + 7) / 8;
3706
3707
3708 message->spi = spi;
3709
3710 ret = spi_split_transfers_maxsize(ctlr, message, maxsize,
3711 GFP_KERNEL);
3712 if (ret)
3713 return ret;
3714
3715 list_for_each_entry(xfer, &message->transfers, transfer_list) {
3716
3717 if (list_is_last(&xfer->transfer_list, &message->transfers))
3718 break;
3719 xfer->cs_change = 1;
3720 }
3721 }
3722
3723
3724
3725
3726
3727
3728
3729 if ((ctlr->flags & SPI_CONTROLLER_HALF_DUPLEX) ||
3730 (spi->mode & SPI_3WIRE)) {
3731 unsigned flags = ctlr->flags;
3732
3733 list_for_each_entry(xfer, &message->transfers, transfer_list) {
3734 if (xfer->rx_buf && xfer->tx_buf)
3735 return -EINVAL;
3736 if ((flags & SPI_CONTROLLER_NO_TX) && xfer->tx_buf)
3737 return -EINVAL;
3738 if ((flags & SPI_CONTROLLER_NO_RX) && xfer->rx_buf)
3739 return -EINVAL;
3740 }
3741 }
3742
3743
3744
3745
3746
3747
3748
3749
3750
3751 message->frame_length = 0;
3752 list_for_each_entry(xfer, &message->transfers, transfer_list) {
3753 xfer->effective_speed_hz = 0;
3754 message->frame_length += xfer->len;
3755 if (!xfer->bits_per_word)
3756 xfer->bits_per_word = spi->bits_per_word;
3757
3758 if (!xfer->speed_hz)
3759 xfer->speed_hz = spi->max_speed_hz;
3760
3761 if (ctlr->max_speed_hz && xfer->speed_hz > ctlr->max_speed_hz)
3762 xfer->speed_hz = ctlr->max_speed_hz;
3763
3764 if (__spi_validate_bits_per_word(ctlr, xfer->bits_per_word))
3765 return -EINVAL;
3766
3767
3768
3769
3770
3771 if (xfer->bits_per_word <= 8)
3772 w_size = 1;
3773 else if (xfer->bits_per_word <= 16)
3774 w_size = 2;
3775 else
3776 w_size = 4;
3777
3778
3779 if (xfer->len % w_size)
3780 return -EINVAL;
3781
3782 if (xfer->speed_hz && ctlr->min_speed_hz &&
3783 xfer->speed_hz < ctlr->min_speed_hz)
3784 return -EINVAL;
3785
3786 if (xfer->tx_buf && !xfer->tx_nbits)
3787 xfer->tx_nbits = SPI_NBITS_SINGLE;
3788 if (xfer->rx_buf && !xfer->rx_nbits)
3789 xfer->rx_nbits = SPI_NBITS_SINGLE;
3790
3791
3792
3793
3794
3795 if (xfer->tx_buf) {
3796 if (spi->mode & SPI_NO_TX)
3797 return -EINVAL;
3798 if (xfer->tx_nbits != SPI_NBITS_SINGLE &&
3799 xfer->tx_nbits != SPI_NBITS_DUAL &&
3800 xfer->tx_nbits != SPI_NBITS_QUAD)
3801 return -EINVAL;
3802 if ((xfer->tx_nbits == SPI_NBITS_DUAL) &&
3803 !(spi->mode & (SPI_TX_DUAL | SPI_TX_QUAD)))
3804 return -EINVAL;
3805 if ((xfer->tx_nbits == SPI_NBITS_QUAD) &&
3806 !(spi->mode & SPI_TX_QUAD))
3807 return -EINVAL;
3808 }
3809
3810 if (xfer->rx_buf) {
3811 if (spi->mode & SPI_NO_RX)
3812 return -EINVAL;
3813 if (xfer->rx_nbits != SPI_NBITS_SINGLE &&
3814 xfer->rx_nbits != SPI_NBITS_DUAL &&
3815 xfer->rx_nbits != SPI_NBITS_QUAD)
3816 return -EINVAL;
3817 if ((xfer->rx_nbits == SPI_NBITS_DUAL) &&
3818 !(spi->mode & (SPI_RX_DUAL | SPI_RX_QUAD)))
3819 return -EINVAL;
3820 if ((xfer->rx_nbits == SPI_NBITS_QUAD) &&
3821 !(spi->mode & SPI_RX_QUAD))
3822 return -EINVAL;
3823 }
3824
3825 if (_spi_xfer_word_delay_update(xfer, spi))
3826 return -EINVAL;
3827 }
3828
3829 message->status = -EINPROGRESS;
3830
3831 return 0;
3832 }
3833
3834 static int __spi_async(struct spi_device *spi, struct spi_message *message)
3835 {
3836 struct spi_controller *ctlr = spi->controller;
3837 struct spi_transfer *xfer;
3838
3839
3840
3841
3842
3843 if (!ctlr->transfer)
3844 return -ENOTSUPP;
3845
3846 message->spi = spi;
3847
3848 SPI_STATISTICS_INCREMENT_FIELD(ctlr->pcpu_statistics, spi_async);
3849 SPI_STATISTICS_INCREMENT_FIELD(spi->pcpu_statistics, spi_async);
3850
3851 trace_spi_message_submit(message);
3852
3853 if (!ctlr->ptp_sts_supported) {
3854 list_for_each_entry(xfer, &message->transfers, transfer_list) {
3855 xfer->ptp_sts_word_pre = 0;
3856 ptp_read_system_prets(xfer->ptp_sts);
3857 }
3858 }
3859
3860 return ctlr->transfer(spi, message);
3861 }
3862
3863
3864
3865
3866
3867
3868
3869
3870
3871
3872
3873
3874
3875
3876
3877
3878
3879
3880
3881
3882
3883
3884
3885
3886
3887
3888
3889
3890
3891
3892
3893
3894 int spi_async(struct spi_device *spi, struct spi_message *message)
3895 {
3896 struct spi_controller *ctlr = spi->controller;
3897 int ret;
3898 unsigned long flags;
3899
3900 ret = __spi_validate(spi, message);
3901 if (ret != 0)
3902 return ret;
3903
3904 spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
3905
3906 if (ctlr->bus_lock_flag)
3907 ret = -EBUSY;
3908 else
3909 ret = __spi_async(spi, message);
3910
3911 spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
3912
3913 return ret;
3914 }
3915 EXPORT_SYMBOL_GPL(spi_async);
3916
3917
3918
3919
3920
3921
3922
3923
3924
3925
3926
3927
3928
3929
3930
3931
3932
3933
3934
3935
3936
3937
3938
3939
3940
3941
3942
3943
3944
3945
3946
3947
3948 static int spi_async_locked(struct spi_device *spi, struct spi_message *message)
3949 {
3950 struct spi_controller *ctlr = spi->controller;
3951 int ret;
3952 unsigned long flags;
3953
3954 ret = __spi_validate(spi, message);
3955 if (ret != 0)
3956 return ret;
3957
3958 spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
3959
3960 ret = __spi_async(spi, message);
3961
3962 spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
3963
3964 return ret;
3965
3966 }
3967
3968 static void __spi_transfer_message_noqueue(struct spi_controller *ctlr, struct spi_message *msg)
3969 {
3970 bool was_busy;
3971 int ret;
3972
3973 mutex_lock(&ctlr->io_mutex);
3974
3975 was_busy = ctlr->busy;
3976
3977 ctlr->cur_msg = msg;
3978 ret = __spi_pump_transfer_message(ctlr, msg, was_busy);
3979 if (ret)
3980 goto out;
3981
3982 ctlr->cur_msg = NULL;
3983 ctlr->fallback = false;
3984
3985 if (!was_busy) {
3986 kfree(ctlr->dummy_rx);
3987 ctlr->dummy_rx = NULL;
3988 kfree(ctlr->dummy_tx);
3989 ctlr->dummy_tx = NULL;
3990 if (ctlr->unprepare_transfer_hardware &&
3991 ctlr->unprepare_transfer_hardware(ctlr))
3992 dev_err(&ctlr->dev,
3993 "failed to unprepare transfer hardware\n");
3994 spi_idle_runtime_pm(ctlr);
3995 }
3996
3997 out:
3998 mutex_unlock(&ctlr->io_mutex);
3999 }
4000
4001
4002
4003
4004
4005
4006
4007
4008
4009 static void spi_complete(void *arg)
4010 {
4011 complete(arg);
4012 }
4013
4014 static int __spi_sync(struct spi_device *spi, struct spi_message *message)
4015 {
4016 DECLARE_COMPLETION_ONSTACK(done);
4017 int status;
4018 struct spi_controller *ctlr = spi->controller;
4019
4020 status = __spi_validate(spi, message);
4021 if (status != 0)
4022 return status;
4023
4024 message->spi = spi;
4025
4026 SPI_STATISTICS_INCREMENT_FIELD(ctlr->pcpu_statistics, spi_sync);
4027 SPI_STATISTICS_INCREMENT_FIELD(spi->pcpu_statistics, spi_sync);
4028
4029
4030
4031
4032
4033
4034
4035 if (READ_ONCE(ctlr->queue_empty) && !ctlr->must_async) {
4036 message->actual_length = 0;
4037 message->status = -EINPROGRESS;
4038
4039 trace_spi_message_submit(message);
4040
4041 SPI_STATISTICS_INCREMENT_FIELD(ctlr->pcpu_statistics, spi_sync_immediate);
4042 SPI_STATISTICS_INCREMENT_FIELD(spi->pcpu_statistics, spi_sync_immediate);
4043
4044 __spi_transfer_message_noqueue(ctlr, message);
4045
4046 return message->status;
4047 }
4048
4049
4050
4051
4052
4053
4054
4055 message->complete = spi_complete;
4056 message->context = &done;
4057 status = spi_async_locked(spi, message);
4058 if (status == 0) {
4059 wait_for_completion(&done);
4060 status = message->status;
4061 }
4062 message->context = NULL;
4063
4064 return status;
4065 }
4066
4067
4068
4069
4070
4071
4072
4073
4074
4075
4076
4077
4078
4079
4080
4081
4082
4083
4084
4085
4086
4087
4088 int spi_sync(struct spi_device *spi, struct spi_message *message)
4089 {
4090 int ret;
4091
4092 mutex_lock(&spi->controller->bus_lock_mutex);
4093 ret = __spi_sync(spi, message);
4094 mutex_unlock(&spi->controller->bus_lock_mutex);
4095
4096 return ret;
4097 }
4098 EXPORT_SYMBOL_GPL(spi_sync);
4099
4100
4101
4102
4103
4104
4105
4106
4107
4108
4109
4110
4111
4112
4113
4114
4115
4116 int spi_sync_locked(struct spi_device *spi, struct spi_message *message)
4117 {
4118 return __spi_sync(spi, message);
4119 }
4120 EXPORT_SYMBOL_GPL(spi_sync_locked);
4121
4122
4123
4124
4125
4126
4127
4128
4129
4130
4131
4132
4133
4134
4135
4136
4137 int spi_bus_lock(struct spi_controller *ctlr)
4138 {
4139 unsigned long flags;
4140
4141 mutex_lock(&ctlr->bus_lock_mutex);
4142
4143 spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
4144 ctlr->bus_lock_flag = 1;
4145 spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
4146
4147
4148
4149 return 0;
4150 }
4151 EXPORT_SYMBOL_GPL(spi_bus_lock);
4152
4153
4154
4155
4156
4157
4158
4159
4160
4161
4162
4163
4164
4165
4166 int spi_bus_unlock(struct spi_controller *ctlr)
4167 {
4168 ctlr->bus_lock_flag = 0;
4169
4170 mutex_unlock(&ctlr->bus_lock_mutex);
4171
4172 return 0;
4173 }
4174 EXPORT_SYMBOL_GPL(spi_bus_unlock);
4175
4176
4177 #define SPI_BUFSIZ max(32, SMP_CACHE_BYTES)
4178
4179 static u8 *buf;
4180
4181
4182
4183
4184
4185
4186
4187
4188
4189
4190
4191
4192
4193
4194
4195
4196
4197
4198
4199
4200
4201 int spi_write_then_read(struct spi_device *spi,
4202 const void *txbuf, unsigned n_tx,
4203 void *rxbuf, unsigned n_rx)
4204 {
4205 static DEFINE_MUTEX(lock);
4206
4207 int status;
4208 struct spi_message message;
4209 struct spi_transfer x[2];
4210 u8 *local_buf;
4211
4212
4213
4214
4215
4216
4217
4218 if ((n_tx + n_rx) > SPI_BUFSIZ || !mutex_trylock(&lock)) {
4219 local_buf = kmalloc(max((unsigned)SPI_BUFSIZ, n_tx + n_rx),
4220 GFP_KERNEL | GFP_DMA);
4221 if (!local_buf)
4222 return -ENOMEM;
4223 } else {
4224 local_buf = buf;
4225 }
4226
4227 spi_message_init(&message);
4228 memset(x, 0, sizeof(x));
4229 if (n_tx) {
4230 x[0].len = n_tx;
4231 spi_message_add_tail(&x[0], &message);
4232 }
4233 if (n_rx) {
4234 x[1].len = n_rx;
4235 spi_message_add_tail(&x[1], &message);
4236 }
4237
4238 memcpy(local_buf, txbuf, n_tx);
4239 x[0].tx_buf = local_buf;
4240 x[1].rx_buf = local_buf + n_tx;
4241
4242
4243 status = spi_sync(spi, &message);
4244 if (status == 0)
4245 memcpy(rxbuf, x[1].rx_buf, n_rx);
4246
4247 if (x[0].tx_buf == buf)
4248 mutex_unlock(&lock);
4249 else
4250 kfree(local_buf);
4251
4252 return status;
4253 }
4254 EXPORT_SYMBOL_GPL(spi_write_then_read);
4255
4256
4257
4258 #if IS_ENABLED(CONFIG_OF_DYNAMIC)
4259
4260 static struct spi_device *of_find_spi_device_by_node(struct device_node *node)
4261 {
4262 struct device *dev = bus_find_device_by_of_node(&spi_bus_type, node);
4263
4264 return dev ? to_spi_device(dev) : NULL;
4265 }
4266
4267
4268 static struct spi_controller *of_find_spi_controller_by_node(struct device_node *node)
4269 {
4270 struct device *dev;
4271
4272 dev = class_find_device_by_of_node(&spi_master_class, node);
4273 if (!dev && IS_ENABLED(CONFIG_SPI_SLAVE))
4274 dev = class_find_device_by_of_node(&spi_slave_class, node);
4275 if (!dev)
4276 return NULL;
4277
4278
4279 return container_of(dev, struct spi_controller, dev);
4280 }
4281
4282 static int of_spi_notify(struct notifier_block *nb, unsigned long action,
4283 void *arg)
4284 {
4285 struct of_reconfig_data *rd = arg;
4286 struct spi_controller *ctlr;
4287 struct spi_device *spi;
4288
4289 switch (of_reconfig_get_state_change(action, arg)) {
4290 case OF_RECONFIG_CHANGE_ADD:
4291 ctlr = of_find_spi_controller_by_node(rd->dn->parent);
4292 if (ctlr == NULL)
4293 return NOTIFY_OK;
4294
4295 if (of_node_test_and_set_flag(rd->dn, OF_POPULATED)) {
4296 put_device(&ctlr->dev);
4297 return NOTIFY_OK;
4298 }
4299
4300 spi = of_register_spi_device(ctlr, rd->dn);
4301 put_device(&ctlr->dev);
4302
4303 if (IS_ERR(spi)) {
4304 pr_err("%s: failed to create for '%pOF'\n",
4305 __func__, rd->dn);
4306 of_node_clear_flag(rd->dn, OF_POPULATED);
4307 return notifier_from_errno(PTR_ERR(spi));
4308 }
4309 break;
4310
4311 case OF_RECONFIG_CHANGE_REMOVE:
4312
4313 if (!of_node_check_flag(rd->dn, OF_POPULATED))
4314 return NOTIFY_OK;
4315
4316
4317 spi = of_find_spi_device_by_node(rd->dn);
4318 if (spi == NULL)
4319 return NOTIFY_OK;
4320
4321
4322 spi_unregister_device(spi);
4323
4324
4325 put_device(&spi->dev);
4326 break;
4327 }
4328
4329 return NOTIFY_OK;
4330 }
4331
4332 static struct notifier_block spi_of_notifier = {
4333 .notifier_call = of_spi_notify,
4334 };
4335 #else
4336 extern struct notifier_block spi_of_notifier;
4337 #endif
4338
4339 #if IS_ENABLED(CONFIG_ACPI)
4340 static int spi_acpi_controller_match(struct device *dev, const void *data)
4341 {
4342 return ACPI_COMPANION(dev->parent) == data;
4343 }
4344
4345 static struct spi_controller *acpi_spi_find_controller_by_adev(struct acpi_device *adev)
4346 {
4347 struct device *dev;
4348
4349 dev = class_find_device(&spi_master_class, NULL, adev,
4350 spi_acpi_controller_match);
4351 if (!dev && IS_ENABLED(CONFIG_SPI_SLAVE))
4352 dev = class_find_device(&spi_slave_class, NULL, adev,
4353 spi_acpi_controller_match);
4354 if (!dev)
4355 return NULL;
4356
4357 return container_of(dev, struct spi_controller, dev);
4358 }
4359
4360 static struct spi_device *acpi_spi_find_device_by_adev(struct acpi_device *adev)
4361 {
4362 struct device *dev;
4363
4364 dev = bus_find_device_by_acpi_dev(&spi_bus_type, adev);
4365 return to_spi_device(dev);
4366 }
4367
4368 static int acpi_spi_notify(struct notifier_block *nb, unsigned long value,
4369 void *arg)
4370 {
4371 struct acpi_device *adev = arg;
4372 struct spi_controller *ctlr;
4373 struct spi_device *spi;
4374
4375 switch (value) {
4376 case ACPI_RECONFIG_DEVICE_ADD:
4377 ctlr = acpi_spi_find_controller_by_adev(adev->parent);
4378 if (!ctlr)
4379 break;
4380
4381 acpi_register_spi_device(ctlr, adev);
4382 put_device(&ctlr->dev);
4383 break;
4384 case ACPI_RECONFIG_DEVICE_REMOVE:
4385 if (!acpi_device_enumerated(adev))
4386 break;
4387
4388 spi = acpi_spi_find_device_by_adev(adev);
4389 if (!spi)
4390 break;
4391
4392 spi_unregister_device(spi);
4393 put_device(&spi->dev);
4394 break;
4395 }
4396
4397 return NOTIFY_OK;
4398 }
4399
4400 static struct notifier_block spi_acpi_notifier = {
4401 .notifier_call = acpi_spi_notify,
4402 };
4403 #else
4404 extern struct notifier_block spi_acpi_notifier;
4405 #endif
4406
4407 static int __init spi_init(void)
4408 {
4409 int status;
4410
4411 buf = kmalloc(SPI_BUFSIZ, GFP_KERNEL);
4412 if (!buf) {
4413 status = -ENOMEM;
4414 goto err0;
4415 }
4416
4417 status = bus_register(&spi_bus_type);
4418 if (status < 0)
4419 goto err1;
4420
4421 status = class_register(&spi_master_class);
4422 if (status < 0)
4423 goto err2;
4424
4425 if (IS_ENABLED(CONFIG_SPI_SLAVE)) {
4426 status = class_register(&spi_slave_class);
4427 if (status < 0)
4428 goto err3;
4429 }
4430
4431 if (IS_ENABLED(CONFIG_OF_DYNAMIC))
4432 WARN_ON(of_reconfig_notifier_register(&spi_of_notifier));
4433 if (IS_ENABLED(CONFIG_ACPI))
4434 WARN_ON(acpi_reconfig_notifier_register(&spi_acpi_notifier));
4435
4436 return 0;
4437
4438 err3:
4439 class_unregister(&spi_master_class);
4440 err2:
4441 bus_unregister(&spi_bus_type);
4442 err1:
4443 kfree(buf);
4444 buf = NULL;
4445 err0:
4446 return status;
4447 }
4448
4449
4450
4451
4452
4453
4454
4455
4456
4457 postcore_initcall(spi_init);