0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
0033
0034 #include <linux/platform_device.h>
0035 #include <linux/dma-mapping.h>
0036 #include <linux/init.h>
0037 #include <linux/module.h>
0038 #include <linux/mm.h>
0039 #include <linux/device.h>
0040 #include <linux/dmaengine.h>
0041 #include <linux/hardirq.h>
0042 #include <linux/spinlock.h>
0043 #include <linux/percpu.h>
0044 #include <linux/rcupdate.h>
0045 #include <linux/mutex.h>
0046 #include <linux/jiffies.h>
0047 #include <linux/rculist.h>
0048 #include <linux/idr.h>
0049 #include <linux/slab.h>
0050 #include <linux/acpi.h>
0051 #include <linux/acpi_dma.h>
0052 #include <linux/of_dma.h>
0053 #include <linux/mempool.h>
0054 #include <linux/numa.h>
0055
0056 #include "dmaengine.h"
0057
0058 static DEFINE_MUTEX(dma_list_mutex);
0059 static DEFINE_IDA(dma_ida);
0060 static LIST_HEAD(dma_device_list);
0061 static long dmaengine_ref_count;
0062
0063
0064 #ifdef CONFIG_DEBUG_FS
0065 #include <linux/debugfs.h>
0066
0067 static struct dentry *rootdir;
0068
0069 static void dmaengine_debug_register(struct dma_device *dma_dev)
0070 {
0071 dma_dev->dbg_dev_root = debugfs_create_dir(dev_name(dma_dev->dev),
0072 rootdir);
0073 if (IS_ERR(dma_dev->dbg_dev_root))
0074 dma_dev->dbg_dev_root = NULL;
0075 }
0076
0077 static void dmaengine_debug_unregister(struct dma_device *dma_dev)
0078 {
0079 debugfs_remove_recursive(dma_dev->dbg_dev_root);
0080 dma_dev->dbg_dev_root = NULL;
0081 }
0082
0083 static void dmaengine_dbg_summary_show(struct seq_file *s,
0084 struct dma_device *dma_dev)
0085 {
0086 struct dma_chan *chan;
0087
0088 list_for_each_entry(chan, &dma_dev->channels, device_node) {
0089 if (chan->client_count) {
0090 seq_printf(s, " %-13s| %s", dma_chan_name(chan),
0091 chan->dbg_client_name ?: "in-use");
0092
0093 if (chan->router)
0094 seq_printf(s, " (via router: %s)\n",
0095 dev_name(chan->router->dev));
0096 else
0097 seq_puts(s, "\n");
0098 }
0099 }
0100 }
0101
0102 static int dmaengine_summary_show(struct seq_file *s, void *data)
0103 {
0104 struct dma_device *dma_dev = NULL;
0105
0106 mutex_lock(&dma_list_mutex);
0107 list_for_each_entry(dma_dev, &dma_device_list, global_node) {
0108 seq_printf(s, "dma%d (%s): number of channels: %u\n",
0109 dma_dev->dev_id, dev_name(dma_dev->dev),
0110 dma_dev->chancnt);
0111
0112 if (dma_dev->dbg_summary_show)
0113 dma_dev->dbg_summary_show(s, dma_dev);
0114 else
0115 dmaengine_dbg_summary_show(s, dma_dev);
0116
0117 if (!list_is_last(&dma_dev->global_node, &dma_device_list))
0118 seq_puts(s, "\n");
0119 }
0120 mutex_unlock(&dma_list_mutex);
0121
0122 return 0;
0123 }
0124 DEFINE_SHOW_ATTRIBUTE(dmaengine_summary);
0125
0126 static void __init dmaengine_debugfs_init(void)
0127 {
0128 rootdir = debugfs_create_dir("dmaengine", NULL);
0129
0130
0131 debugfs_create_file("summary", 0444, rootdir, NULL,
0132 &dmaengine_summary_fops);
0133 }
0134 #else
0135 static inline void dmaengine_debugfs_init(void) { }
0136 static inline int dmaengine_debug_register(struct dma_device *dma_dev)
0137 {
0138 return 0;
0139 }
0140
0141 static inline void dmaengine_debug_unregister(struct dma_device *dma_dev) { }
0142 #endif
0143
0144
0145
0146 #define DMA_SLAVE_NAME "slave"
0147
0148
0149
0150
0151
0152
0153
0154 static struct dma_chan *dev_to_dma_chan(struct device *dev)
0155 {
0156 struct dma_chan_dev *chan_dev;
0157
0158 chan_dev = container_of(dev, typeof(*chan_dev), device);
0159 return chan_dev->chan;
0160 }
0161
0162 static ssize_t memcpy_count_show(struct device *dev,
0163 struct device_attribute *attr, char *buf)
0164 {
0165 struct dma_chan *chan;
0166 unsigned long count = 0;
0167 int i;
0168 int err;
0169
0170 mutex_lock(&dma_list_mutex);
0171 chan = dev_to_dma_chan(dev);
0172 if (chan) {
0173 for_each_possible_cpu(i)
0174 count += per_cpu_ptr(chan->local, i)->memcpy_count;
0175 err = sprintf(buf, "%lu\n", count);
0176 } else
0177 err = -ENODEV;
0178 mutex_unlock(&dma_list_mutex);
0179
0180 return err;
0181 }
0182 static DEVICE_ATTR_RO(memcpy_count);
0183
0184 static ssize_t bytes_transferred_show(struct device *dev,
0185 struct device_attribute *attr, char *buf)
0186 {
0187 struct dma_chan *chan;
0188 unsigned long count = 0;
0189 int i;
0190 int err;
0191
0192 mutex_lock(&dma_list_mutex);
0193 chan = dev_to_dma_chan(dev);
0194 if (chan) {
0195 for_each_possible_cpu(i)
0196 count += per_cpu_ptr(chan->local, i)->bytes_transferred;
0197 err = sprintf(buf, "%lu\n", count);
0198 } else
0199 err = -ENODEV;
0200 mutex_unlock(&dma_list_mutex);
0201
0202 return err;
0203 }
0204 static DEVICE_ATTR_RO(bytes_transferred);
0205
0206 static ssize_t in_use_show(struct device *dev, struct device_attribute *attr,
0207 char *buf)
0208 {
0209 struct dma_chan *chan;
0210 int err;
0211
0212 mutex_lock(&dma_list_mutex);
0213 chan = dev_to_dma_chan(dev);
0214 if (chan)
0215 err = sprintf(buf, "%d\n", chan->client_count);
0216 else
0217 err = -ENODEV;
0218 mutex_unlock(&dma_list_mutex);
0219
0220 return err;
0221 }
0222 static DEVICE_ATTR_RO(in_use);
0223
0224 static struct attribute *dma_dev_attrs[] = {
0225 &dev_attr_memcpy_count.attr,
0226 &dev_attr_bytes_transferred.attr,
0227 &dev_attr_in_use.attr,
0228 NULL,
0229 };
0230 ATTRIBUTE_GROUPS(dma_dev);
0231
0232 static void chan_dev_release(struct device *dev)
0233 {
0234 struct dma_chan_dev *chan_dev;
0235
0236 chan_dev = container_of(dev, typeof(*chan_dev), device);
0237 kfree(chan_dev);
0238 }
0239
0240 static struct class dma_devclass = {
0241 .name = "dma",
0242 .dev_groups = dma_dev_groups,
0243 .dev_release = chan_dev_release,
0244 };
0245
0246
0247
0248
0249 static dma_cap_mask_t dma_cap_mask_all;
0250
0251
0252
0253
0254
0255 struct dma_chan_tbl_ent {
0256 struct dma_chan *chan;
0257 };
0258
0259
0260 static struct dma_chan_tbl_ent __percpu *channel_table[DMA_TX_TYPE_END];
0261
0262 static int __init dma_channel_table_init(void)
0263 {
0264 enum dma_transaction_type cap;
0265 int err = 0;
0266
0267 bitmap_fill(dma_cap_mask_all.bits, DMA_TX_TYPE_END);
0268
0269
0270
0271
0272
0273 clear_bit(DMA_INTERRUPT, dma_cap_mask_all.bits);
0274 clear_bit(DMA_PRIVATE, dma_cap_mask_all.bits);
0275 clear_bit(DMA_SLAVE, dma_cap_mask_all.bits);
0276
0277 for_each_dma_cap_mask(cap, dma_cap_mask_all) {
0278 channel_table[cap] = alloc_percpu(struct dma_chan_tbl_ent);
0279 if (!channel_table[cap]) {
0280 err = -ENOMEM;
0281 break;
0282 }
0283 }
0284
0285 if (err) {
0286 pr_err("dmaengine dma_channel_table_init failure: %d\n", err);
0287 for_each_dma_cap_mask(cap, dma_cap_mask_all)
0288 free_percpu(channel_table[cap]);
0289 }
0290
0291 return err;
0292 }
0293 arch_initcall(dma_channel_table_init);
0294
0295
0296
0297
0298
0299
0300
0301
0302 static bool dma_chan_is_local(struct dma_chan *chan, int cpu)
0303 {
0304 int node = dev_to_node(chan->device->dev);
0305 return node == NUMA_NO_NODE ||
0306 cpumask_test_cpu(cpu, cpumask_of_node(node));
0307 }
0308
0309
0310
0311
0312
0313
0314
0315
0316
0317
0318
0319
0320 static struct dma_chan *min_chan(enum dma_transaction_type cap, int cpu)
0321 {
0322 struct dma_device *device;
0323 struct dma_chan *chan;
0324 struct dma_chan *min = NULL;
0325 struct dma_chan *localmin = NULL;
0326
0327 list_for_each_entry(device, &dma_device_list, global_node) {
0328 if (!dma_has_cap(cap, device->cap_mask) ||
0329 dma_has_cap(DMA_PRIVATE, device->cap_mask))
0330 continue;
0331 list_for_each_entry(chan, &device->channels, device_node) {
0332 if (!chan->client_count)
0333 continue;
0334 if (!min || chan->table_count < min->table_count)
0335 min = chan;
0336
0337 if (dma_chan_is_local(chan, cpu))
0338 if (!localmin ||
0339 chan->table_count < localmin->table_count)
0340 localmin = chan;
0341 }
0342 }
0343
0344 chan = localmin ? localmin : min;
0345
0346 if (chan)
0347 chan->table_count++;
0348
0349 return chan;
0350 }
0351
0352
0353
0354
0355
0356
0357
0358
0359
0360
0361 static void dma_channel_rebalance(void)
0362 {
0363 struct dma_chan *chan;
0364 struct dma_device *device;
0365 int cpu;
0366 int cap;
0367
0368
0369 for_each_dma_cap_mask(cap, dma_cap_mask_all)
0370 for_each_possible_cpu(cpu)
0371 per_cpu_ptr(channel_table[cap], cpu)->chan = NULL;
0372
0373 list_for_each_entry(device, &dma_device_list, global_node) {
0374 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
0375 continue;
0376 list_for_each_entry(chan, &device->channels, device_node)
0377 chan->table_count = 0;
0378 }
0379
0380
0381 if (!dmaengine_ref_count)
0382 return;
0383
0384
0385 for_each_dma_cap_mask(cap, dma_cap_mask_all)
0386 for_each_online_cpu(cpu) {
0387 chan = min_chan(cap, cpu);
0388 per_cpu_ptr(channel_table[cap], cpu)->chan = chan;
0389 }
0390 }
0391
0392 static int dma_device_satisfies_mask(struct dma_device *device,
0393 const dma_cap_mask_t *want)
0394 {
0395 dma_cap_mask_t has;
0396
0397 bitmap_and(has.bits, want->bits, device->cap_mask.bits,
0398 DMA_TX_TYPE_END);
0399 return bitmap_equal(want->bits, has.bits, DMA_TX_TYPE_END);
0400 }
0401
0402 static struct module *dma_chan_to_owner(struct dma_chan *chan)
0403 {
0404 return chan->device->owner;
0405 }
0406
0407
0408
0409
0410
0411
0412
0413 static void balance_ref_count(struct dma_chan *chan)
0414 {
0415 struct module *owner = dma_chan_to_owner(chan);
0416
0417 while (chan->client_count < dmaengine_ref_count) {
0418 __module_get(owner);
0419 chan->client_count++;
0420 }
0421 }
0422
0423 static void dma_device_release(struct kref *ref)
0424 {
0425 struct dma_device *device = container_of(ref, struct dma_device, ref);
0426
0427 list_del_rcu(&device->global_node);
0428 dma_channel_rebalance();
0429
0430 if (device->device_release)
0431 device->device_release(device);
0432 }
0433
0434 static void dma_device_put(struct dma_device *device)
0435 {
0436 lockdep_assert_held(&dma_list_mutex);
0437 kref_put(&device->ref, dma_device_release);
0438 }
0439
0440
0441
0442
0443
0444
0445
0446 static int dma_chan_get(struct dma_chan *chan)
0447 {
0448 struct module *owner = dma_chan_to_owner(chan);
0449 int ret;
0450
0451
0452 if (chan->client_count) {
0453 __module_get(owner);
0454 goto out;
0455 }
0456
0457 if (!try_module_get(owner))
0458 return -ENODEV;
0459
0460 ret = kref_get_unless_zero(&chan->device->ref);
0461 if (!ret) {
0462 ret = -ENODEV;
0463 goto module_put_out;
0464 }
0465
0466
0467 if (chan->device->device_alloc_chan_resources) {
0468 ret = chan->device->device_alloc_chan_resources(chan);
0469 if (ret < 0)
0470 goto err_out;
0471 }
0472
0473 if (!dma_has_cap(DMA_PRIVATE, chan->device->cap_mask))
0474 balance_ref_count(chan);
0475
0476 out:
0477 chan->client_count++;
0478 return 0;
0479
0480 err_out:
0481 dma_device_put(chan->device);
0482 module_put_out:
0483 module_put(owner);
0484 return ret;
0485 }
0486
0487
0488
0489
0490
0491
0492
0493 static void dma_chan_put(struct dma_chan *chan)
0494 {
0495
0496 if (!chan->client_count)
0497 return;
0498
0499 chan->client_count--;
0500
0501
0502 if (!chan->client_count && chan->device->device_free_chan_resources) {
0503
0504 dmaengine_synchronize(chan);
0505 chan->device->device_free_chan_resources(chan);
0506 }
0507
0508
0509 if (chan->router && chan->router->route_free) {
0510 chan->router->route_free(chan->router->dev, chan->route_data);
0511 chan->router = NULL;
0512 chan->route_data = NULL;
0513 }
0514
0515 dma_device_put(chan->device);
0516 module_put(dma_chan_to_owner(chan));
0517 }
0518
0519 enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie)
0520 {
0521 enum dma_status status;
0522 unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000);
0523
0524 dma_async_issue_pending(chan);
0525 do {
0526 status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
0527 if (time_after_eq(jiffies, dma_sync_wait_timeout)) {
0528 dev_err(chan->device->dev, "%s: timeout!\n", __func__);
0529 return DMA_ERROR;
0530 }
0531 if (status != DMA_IN_PROGRESS)
0532 break;
0533 cpu_relax();
0534 } while (1);
0535
0536 return status;
0537 }
0538 EXPORT_SYMBOL(dma_sync_wait);
0539
0540
0541
0542
0543
0544 struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type)
0545 {
0546 return this_cpu_read(channel_table[tx_type]->chan);
0547 }
0548 EXPORT_SYMBOL(dma_find_channel);
0549
0550
0551
0552
0553 void dma_issue_pending_all(void)
0554 {
0555 struct dma_device *device;
0556 struct dma_chan *chan;
0557
0558 rcu_read_lock();
0559 list_for_each_entry_rcu(device, &dma_device_list, global_node) {
0560 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
0561 continue;
0562 list_for_each_entry(chan, &device->channels, device_node)
0563 if (chan->client_count)
0564 device->device_issue_pending(chan);
0565 }
0566 rcu_read_unlock();
0567 }
0568 EXPORT_SYMBOL(dma_issue_pending_all);
0569
0570 int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps)
0571 {
0572 struct dma_device *device;
0573
0574 if (!chan || !caps)
0575 return -EINVAL;
0576
0577 device = chan->device;
0578
0579
0580 if (!(test_bit(DMA_SLAVE, device->cap_mask.bits) ||
0581 test_bit(DMA_CYCLIC, device->cap_mask.bits)))
0582 return -ENXIO;
0583
0584
0585
0586
0587
0588
0589 if (!device->directions)
0590 return -ENXIO;
0591
0592 caps->src_addr_widths = device->src_addr_widths;
0593 caps->dst_addr_widths = device->dst_addr_widths;
0594 caps->directions = device->directions;
0595 caps->min_burst = device->min_burst;
0596 caps->max_burst = device->max_burst;
0597 caps->max_sg_burst = device->max_sg_burst;
0598 caps->residue_granularity = device->residue_granularity;
0599 caps->descriptor_reuse = device->descriptor_reuse;
0600 caps->cmd_pause = !!device->device_pause;
0601 caps->cmd_resume = !!device->device_resume;
0602 caps->cmd_terminate = !!device->device_terminate_all;
0603
0604
0605
0606
0607
0608
0609
0610
0611 if (device->device_caps)
0612 device->device_caps(chan, caps);
0613
0614 return 0;
0615 }
0616 EXPORT_SYMBOL_GPL(dma_get_slave_caps);
0617
0618 static struct dma_chan *private_candidate(const dma_cap_mask_t *mask,
0619 struct dma_device *dev,
0620 dma_filter_fn fn, void *fn_param)
0621 {
0622 struct dma_chan *chan;
0623
0624 if (mask && !dma_device_satisfies_mask(dev, mask)) {
0625 dev_dbg(dev->dev, "%s: wrong capabilities\n", __func__);
0626 return NULL;
0627 }
0628
0629
0630
0631 if (dev->chancnt > 1 && !dma_has_cap(DMA_PRIVATE, dev->cap_mask))
0632 list_for_each_entry(chan, &dev->channels, device_node) {
0633
0634 if (chan->client_count)
0635 return NULL;
0636 }
0637
0638 list_for_each_entry(chan, &dev->channels, device_node) {
0639 if (chan->client_count) {
0640 dev_dbg(dev->dev, "%s: %s busy\n",
0641 __func__, dma_chan_name(chan));
0642 continue;
0643 }
0644 if (fn && !fn(chan, fn_param)) {
0645 dev_dbg(dev->dev, "%s: %s filter said false\n",
0646 __func__, dma_chan_name(chan));
0647 continue;
0648 }
0649 return chan;
0650 }
0651
0652 return NULL;
0653 }
0654
0655 static struct dma_chan *find_candidate(struct dma_device *device,
0656 const dma_cap_mask_t *mask,
0657 dma_filter_fn fn, void *fn_param)
0658 {
0659 struct dma_chan *chan = private_candidate(mask, device, fn, fn_param);
0660 int err;
0661
0662 if (chan) {
0663
0664
0665
0666
0667
0668 dma_cap_set(DMA_PRIVATE, device->cap_mask);
0669 device->privatecnt++;
0670 err = dma_chan_get(chan);
0671
0672 if (err) {
0673 if (err == -ENODEV) {
0674 dev_dbg(device->dev, "%s: %s module removed\n",
0675 __func__, dma_chan_name(chan));
0676 list_del_rcu(&device->global_node);
0677 } else
0678 dev_dbg(device->dev,
0679 "%s: failed to get %s: (%d)\n",
0680 __func__, dma_chan_name(chan), err);
0681
0682 if (--device->privatecnt == 0)
0683 dma_cap_clear(DMA_PRIVATE, device->cap_mask);
0684
0685 chan = ERR_PTR(err);
0686 }
0687 }
0688
0689 return chan ? chan : ERR_PTR(-EPROBE_DEFER);
0690 }
0691
0692
0693
0694
0695
0696 struct dma_chan *dma_get_slave_channel(struct dma_chan *chan)
0697 {
0698
0699 mutex_lock(&dma_list_mutex);
0700
0701 if (chan->client_count == 0) {
0702 struct dma_device *device = chan->device;
0703 int err;
0704
0705 dma_cap_set(DMA_PRIVATE, device->cap_mask);
0706 device->privatecnt++;
0707 err = dma_chan_get(chan);
0708 if (err) {
0709 dev_dbg(chan->device->dev,
0710 "%s: failed to get %s: (%d)\n",
0711 __func__, dma_chan_name(chan), err);
0712 chan = NULL;
0713 if (--device->privatecnt == 0)
0714 dma_cap_clear(DMA_PRIVATE, device->cap_mask);
0715 }
0716 } else
0717 chan = NULL;
0718
0719 mutex_unlock(&dma_list_mutex);
0720
0721
0722 return chan;
0723 }
0724 EXPORT_SYMBOL_GPL(dma_get_slave_channel);
0725
0726 struct dma_chan *dma_get_any_slave_channel(struct dma_device *device)
0727 {
0728 dma_cap_mask_t mask;
0729 struct dma_chan *chan;
0730
0731 dma_cap_zero(mask);
0732 dma_cap_set(DMA_SLAVE, mask);
0733
0734
0735 mutex_lock(&dma_list_mutex);
0736
0737 chan = find_candidate(device, &mask, NULL, NULL);
0738
0739 mutex_unlock(&dma_list_mutex);
0740
0741 return IS_ERR(chan) ? NULL : chan;
0742 }
0743 EXPORT_SYMBOL_GPL(dma_get_any_slave_channel);
0744
0745
0746
0747
0748
0749
0750
0751
0752
0753
0754 struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
0755 dma_filter_fn fn, void *fn_param,
0756 struct device_node *np)
0757 {
0758 struct dma_device *device, *_d;
0759 struct dma_chan *chan = NULL;
0760
0761
0762 mutex_lock(&dma_list_mutex);
0763 list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
0764
0765 if (np && device->dev->of_node && np != device->dev->of_node)
0766 continue;
0767
0768 chan = find_candidate(device, mask, fn, fn_param);
0769 if (!IS_ERR(chan))
0770 break;
0771
0772 chan = NULL;
0773 }
0774 mutex_unlock(&dma_list_mutex);
0775
0776 pr_debug("%s: %s (%s)\n",
0777 __func__,
0778 chan ? "success" : "fail",
0779 chan ? dma_chan_name(chan) : NULL);
0780
0781 return chan;
0782 }
0783 EXPORT_SYMBOL_GPL(__dma_request_channel);
0784
0785 static const struct dma_slave_map *dma_filter_match(struct dma_device *device,
0786 const char *name,
0787 struct device *dev)
0788 {
0789 int i;
0790
0791 if (!device->filter.mapcnt)
0792 return NULL;
0793
0794 for (i = 0; i < device->filter.mapcnt; i++) {
0795 const struct dma_slave_map *map = &device->filter.map[i];
0796
0797 if (!strcmp(map->devname, dev_name(dev)) &&
0798 !strcmp(map->slave, name))
0799 return map;
0800 }
0801
0802 return NULL;
0803 }
0804
0805
0806
0807
0808
0809
0810
0811
0812 struct dma_chan *dma_request_chan(struct device *dev, const char *name)
0813 {
0814 struct dma_device *d, *_d;
0815 struct dma_chan *chan = NULL;
0816
0817
0818 if (dev->of_node)
0819 chan = of_dma_request_slave_channel(dev->of_node, name);
0820
0821
0822 if (has_acpi_companion(dev) && !chan)
0823 chan = acpi_dma_request_slave_chan_by_name(dev, name);
0824
0825 if (PTR_ERR(chan) == -EPROBE_DEFER)
0826 return chan;
0827
0828 if (!IS_ERR_OR_NULL(chan))
0829 goto found;
0830
0831
0832 mutex_lock(&dma_list_mutex);
0833 list_for_each_entry_safe(d, _d, &dma_device_list, global_node) {
0834 dma_cap_mask_t mask;
0835 const struct dma_slave_map *map = dma_filter_match(d, name, dev);
0836
0837 if (!map)
0838 continue;
0839
0840 dma_cap_zero(mask);
0841 dma_cap_set(DMA_SLAVE, mask);
0842
0843 chan = find_candidate(d, &mask, d->filter.fn, map->param);
0844 if (!IS_ERR(chan))
0845 break;
0846 }
0847 mutex_unlock(&dma_list_mutex);
0848
0849 if (IS_ERR(chan))
0850 return chan;
0851 if (!chan)
0852 return ERR_PTR(-EPROBE_DEFER);
0853
0854 found:
0855 #ifdef CONFIG_DEBUG_FS
0856 chan->dbg_client_name = kasprintf(GFP_KERNEL, "%s:%s", dev_name(dev),
0857 name);
0858 #endif
0859
0860 chan->name = kasprintf(GFP_KERNEL, "dma:%s", name);
0861 if (!chan->name)
0862 return chan;
0863 chan->slave = dev;
0864
0865 if (sysfs_create_link(&chan->dev->device.kobj, &dev->kobj,
0866 DMA_SLAVE_NAME))
0867 dev_warn(dev, "Cannot create DMA %s symlink\n", DMA_SLAVE_NAME);
0868 if (sysfs_create_link(&dev->kobj, &chan->dev->device.kobj, chan->name))
0869 dev_warn(dev, "Cannot create DMA %s symlink\n", chan->name);
0870
0871 return chan;
0872 }
0873 EXPORT_SYMBOL_GPL(dma_request_chan);
0874
0875
0876
0877
0878
0879
0880
0881 struct dma_chan *dma_request_chan_by_mask(const dma_cap_mask_t *mask)
0882 {
0883 struct dma_chan *chan;
0884
0885 if (!mask)
0886 return ERR_PTR(-ENODEV);
0887
0888 chan = __dma_request_channel(mask, NULL, NULL, NULL);
0889 if (!chan) {
0890 mutex_lock(&dma_list_mutex);
0891 if (list_empty(&dma_device_list))
0892 chan = ERR_PTR(-EPROBE_DEFER);
0893 else
0894 chan = ERR_PTR(-ENODEV);
0895 mutex_unlock(&dma_list_mutex);
0896 }
0897
0898 return chan;
0899 }
0900 EXPORT_SYMBOL_GPL(dma_request_chan_by_mask);
0901
0902 void dma_release_channel(struct dma_chan *chan)
0903 {
0904 mutex_lock(&dma_list_mutex);
0905 WARN_ONCE(chan->client_count != 1,
0906 "chan reference count %d != 1\n", chan->client_count);
0907 dma_chan_put(chan);
0908
0909 if (--chan->device->privatecnt == 0)
0910 dma_cap_clear(DMA_PRIVATE, chan->device->cap_mask);
0911
0912 if (chan->slave) {
0913 sysfs_remove_link(&chan->dev->device.kobj, DMA_SLAVE_NAME);
0914 sysfs_remove_link(&chan->slave->kobj, chan->name);
0915 kfree(chan->name);
0916 chan->name = NULL;
0917 chan->slave = NULL;
0918 }
0919
0920 #ifdef CONFIG_DEBUG_FS
0921 kfree(chan->dbg_client_name);
0922 chan->dbg_client_name = NULL;
0923 #endif
0924 mutex_unlock(&dma_list_mutex);
0925 }
0926 EXPORT_SYMBOL_GPL(dma_release_channel);
0927
0928
0929
0930
0931 void dmaengine_get(void)
0932 {
0933 struct dma_device *device, *_d;
0934 struct dma_chan *chan;
0935 int err;
0936
0937 mutex_lock(&dma_list_mutex);
0938 dmaengine_ref_count++;
0939
0940
0941 list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
0942 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
0943 continue;
0944 list_for_each_entry(chan, &device->channels, device_node) {
0945 err = dma_chan_get(chan);
0946 if (err == -ENODEV) {
0947
0948 list_del_rcu(&device->global_node);
0949 break;
0950 } else if (err)
0951 dev_dbg(chan->device->dev,
0952 "%s: failed to get %s: (%d)\n",
0953 __func__, dma_chan_name(chan), err);
0954 }
0955 }
0956
0957
0958
0959
0960
0961 if (dmaengine_ref_count == 1)
0962 dma_channel_rebalance();
0963 mutex_unlock(&dma_list_mutex);
0964 }
0965 EXPORT_SYMBOL(dmaengine_get);
0966
0967
0968
0969
0970 void dmaengine_put(void)
0971 {
0972 struct dma_device *device, *_d;
0973 struct dma_chan *chan;
0974
0975 mutex_lock(&dma_list_mutex);
0976 dmaengine_ref_count--;
0977 BUG_ON(dmaengine_ref_count < 0);
0978
0979 list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
0980 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
0981 continue;
0982 list_for_each_entry(chan, &device->channels, device_node)
0983 dma_chan_put(chan);
0984 }
0985 mutex_unlock(&dma_list_mutex);
0986 }
0987 EXPORT_SYMBOL(dmaengine_put);
0988
0989 static bool device_has_all_tx_types(struct dma_device *device)
0990 {
0991
0992
0993
0994
0995 #ifdef CONFIG_ASYNC_TX_DMA
0996 if (!dma_has_cap(DMA_INTERRUPT, device->cap_mask))
0997 return false;
0998 #endif
0999
1000 #if IS_ENABLED(CONFIG_ASYNC_MEMCPY)
1001 if (!dma_has_cap(DMA_MEMCPY, device->cap_mask))
1002 return false;
1003 #endif
1004
1005 #if IS_ENABLED(CONFIG_ASYNC_XOR)
1006 if (!dma_has_cap(DMA_XOR, device->cap_mask))
1007 return false;
1008
1009 #ifndef CONFIG_ASYNC_TX_DISABLE_XOR_VAL_DMA
1010 if (!dma_has_cap(DMA_XOR_VAL, device->cap_mask))
1011 return false;
1012 #endif
1013 #endif
1014
1015 #if IS_ENABLED(CONFIG_ASYNC_PQ)
1016 if (!dma_has_cap(DMA_PQ, device->cap_mask))
1017 return false;
1018
1019 #ifndef CONFIG_ASYNC_TX_DISABLE_PQ_VAL_DMA
1020 if (!dma_has_cap(DMA_PQ_VAL, device->cap_mask))
1021 return false;
1022 #endif
1023 #endif
1024
1025 return true;
1026 }
1027
1028 static int get_dma_id(struct dma_device *device)
1029 {
1030 int rc = ida_alloc(&dma_ida, GFP_KERNEL);
1031
1032 if (rc < 0)
1033 return rc;
1034 device->dev_id = rc;
1035 return 0;
1036 }
1037
1038 static int __dma_async_device_channel_register(struct dma_device *device,
1039 struct dma_chan *chan)
1040 {
1041 int rc;
1042
1043 chan->local = alloc_percpu(typeof(*chan->local));
1044 if (!chan->local)
1045 return -ENOMEM;
1046 chan->dev = kzalloc(sizeof(*chan->dev), GFP_KERNEL);
1047 if (!chan->dev) {
1048 rc = -ENOMEM;
1049 goto err_free_local;
1050 }
1051
1052
1053
1054
1055
1056 chan->chan_id = ida_alloc(&device->chan_ida, GFP_KERNEL);
1057 if (chan->chan_id < 0) {
1058 pr_err("%s: unable to alloc ida for chan: %d\n",
1059 __func__, chan->chan_id);
1060 rc = chan->chan_id;
1061 goto err_free_dev;
1062 }
1063
1064 chan->dev->device.class = &dma_devclass;
1065 chan->dev->device.parent = device->dev;
1066 chan->dev->chan = chan;
1067 chan->dev->dev_id = device->dev_id;
1068 dev_set_name(&chan->dev->device, "dma%dchan%d",
1069 device->dev_id, chan->chan_id);
1070 rc = device_register(&chan->dev->device);
1071 if (rc)
1072 goto err_out_ida;
1073 chan->client_count = 0;
1074 device->chancnt++;
1075
1076 return 0;
1077
1078 err_out_ida:
1079 ida_free(&device->chan_ida, chan->chan_id);
1080 err_free_dev:
1081 kfree(chan->dev);
1082 err_free_local:
1083 free_percpu(chan->local);
1084 chan->local = NULL;
1085 return rc;
1086 }
1087
1088 int dma_async_device_channel_register(struct dma_device *device,
1089 struct dma_chan *chan)
1090 {
1091 int rc;
1092
1093 rc = __dma_async_device_channel_register(device, chan);
1094 if (rc < 0)
1095 return rc;
1096
1097 dma_channel_rebalance();
1098 return 0;
1099 }
1100 EXPORT_SYMBOL_GPL(dma_async_device_channel_register);
1101
1102 static void __dma_async_device_channel_unregister(struct dma_device *device,
1103 struct dma_chan *chan)
1104 {
1105 WARN_ONCE(!device->device_release && chan->client_count,
1106 "%s called while %d clients hold a reference\n",
1107 __func__, chan->client_count);
1108 mutex_lock(&dma_list_mutex);
1109 device->chancnt--;
1110 chan->dev->chan = NULL;
1111 mutex_unlock(&dma_list_mutex);
1112 ida_free(&device->chan_ida, chan->chan_id);
1113 device_unregister(&chan->dev->device);
1114 free_percpu(chan->local);
1115 }
1116
1117 void dma_async_device_channel_unregister(struct dma_device *device,
1118 struct dma_chan *chan)
1119 {
1120 __dma_async_device_channel_unregister(device, chan);
1121 dma_channel_rebalance();
1122 }
1123 EXPORT_SYMBOL_GPL(dma_async_device_channel_unregister);
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133 int dma_async_device_register(struct dma_device *device)
1134 {
1135 int rc;
1136 struct dma_chan* chan;
1137
1138 if (!device)
1139 return -ENODEV;
1140
1141
1142 if (!device->dev) {
1143 pr_err("DMAdevice must have dev\n");
1144 return -EIO;
1145 }
1146
1147 device->owner = device->dev->driver->owner;
1148
1149 if (dma_has_cap(DMA_MEMCPY, device->cap_mask) && !device->device_prep_dma_memcpy) {
1150 dev_err(device->dev,
1151 "Device claims capability %s, but op is not defined\n",
1152 "DMA_MEMCPY");
1153 return -EIO;
1154 }
1155
1156 if (dma_has_cap(DMA_XOR, device->cap_mask) && !device->device_prep_dma_xor) {
1157 dev_err(device->dev,
1158 "Device claims capability %s, but op is not defined\n",
1159 "DMA_XOR");
1160 return -EIO;
1161 }
1162
1163 if (dma_has_cap(DMA_XOR_VAL, device->cap_mask) && !device->device_prep_dma_xor_val) {
1164 dev_err(device->dev,
1165 "Device claims capability %s, but op is not defined\n",
1166 "DMA_XOR_VAL");
1167 return -EIO;
1168 }
1169
1170 if (dma_has_cap(DMA_PQ, device->cap_mask) && !device->device_prep_dma_pq) {
1171 dev_err(device->dev,
1172 "Device claims capability %s, but op is not defined\n",
1173 "DMA_PQ");
1174 return -EIO;
1175 }
1176
1177 if (dma_has_cap(DMA_PQ_VAL, device->cap_mask) && !device->device_prep_dma_pq_val) {
1178 dev_err(device->dev,
1179 "Device claims capability %s, but op is not defined\n",
1180 "DMA_PQ_VAL");
1181 return -EIO;
1182 }
1183
1184 if (dma_has_cap(DMA_MEMSET, device->cap_mask) && !device->device_prep_dma_memset) {
1185 dev_err(device->dev,
1186 "Device claims capability %s, but op is not defined\n",
1187 "DMA_MEMSET");
1188 return -EIO;
1189 }
1190
1191 if (dma_has_cap(DMA_INTERRUPT, device->cap_mask) && !device->device_prep_dma_interrupt) {
1192 dev_err(device->dev,
1193 "Device claims capability %s, but op is not defined\n",
1194 "DMA_INTERRUPT");
1195 return -EIO;
1196 }
1197
1198 if (dma_has_cap(DMA_CYCLIC, device->cap_mask) && !device->device_prep_dma_cyclic) {
1199 dev_err(device->dev,
1200 "Device claims capability %s, but op is not defined\n",
1201 "DMA_CYCLIC");
1202 return -EIO;
1203 }
1204
1205 if (dma_has_cap(DMA_INTERLEAVE, device->cap_mask) && !device->device_prep_interleaved_dma) {
1206 dev_err(device->dev,
1207 "Device claims capability %s, but op is not defined\n",
1208 "DMA_INTERLEAVE");
1209 return -EIO;
1210 }
1211
1212
1213 if (!device->device_tx_status) {
1214 dev_err(device->dev, "Device tx_status is not defined\n");
1215 return -EIO;
1216 }
1217
1218
1219 if (!device->device_issue_pending) {
1220 dev_err(device->dev, "Device issue_pending is not defined\n");
1221 return -EIO;
1222 }
1223
1224 if (!device->device_release)
1225 dev_dbg(device->dev,
1226 "WARN: Device release is not defined so it is not safe to unbind this driver while in use\n");
1227
1228 kref_init(&device->ref);
1229
1230
1231
1232
1233 if (device_has_all_tx_types(device))
1234 dma_cap_set(DMA_ASYNC_TX, device->cap_mask);
1235
1236 rc = get_dma_id(device);
1237 if (rc != 0)
1238 return rc;
1239
1240 ida_init(&device->chan_ida);
1241
1242
1243 list_for_each_entry(chan, &device->channels, device_node) {
1244 rc = __dma_async_device_channel_register(device, chan);
1245 if (rc < 0)
1246 goto err_out;
1247 }
1248
1249 mutex_lock(&dma_list_mutex);
1250
1251 if (dmaengine_ref_count && !dma_has_cap(DMA_PRIVATE, device->cap_mask))
1252 list_for_each_entry(chan, &device->channels, device_node) {
1253
1254
1255
1256 if (dma_chan_get(chan) == -ENODEV) {
1257
1258
1259
1260
1261 rc = -ENODEV;
1262 mutex_unlock(&dma_list_mutex);
1263 goto err_out;
1264 }
1265 }
1266 list_add_tail_rcu(&device->global_node, &dma_device_list);
1267 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
1268 device->privatecnt++;
1269 dma_channel_rebalance();
1270 mutex_unlock(&dma_list_mutex);
1271
1272 dmaengine_debug_register(device);
1273
1274 return 0;
1275
1276 err_out:
1277
1278 if (!device->chancnt) {
1279 ida_free(&dma_ida, device->dev_id);
1280 return rc;
1281 }
1282
1283 list_for_each_entry(chan, &device->channels, device_node) {
1284 if (chan->local == NULL)
1285 continue;
1286 mutex_lock(&dma_list_mutex);
1287 chan->dev->chan = NULL;
1288 mutex_unlock(&dma_list_mutex);
1289 device_unregister(&chan->dev->device);
1290 free_percpu(chan->local);
1291 }
1292 return rc;
1293 }
1294 EXPORT_SYMBOL(dma_async_device_register);
1295
1296
1297
1298
1299
1300
1301
1302
1303 void dma_async_device_unregister(struct dma_device *device)
1304 {
1305 struct dma_chan *chan, *n;
1306
1307 dmaengine_debug_unregister(device);
1308
1309 list_for_each_entry_safe(chan, n, &device->channels, device_node)
1310 __dma_async_device_channel_unregister(device, chan);
1311
1312 mutex_lock(&dma_list_mutex);
1313
1314
1315
1316
1317 dma_cap_set(DMA_PRIVATE, device->cap_mask);
1318 dma_channel_rebalance();
1319 ida_free(&dma_ida, device->dev_id);
1320 dma_device_put(device);
1321 mutex_unlock(&dma_list_mutex);
1322 }
1323 EXPORT_SYMBOL(dma_async_device_unregister);
1324
1325 static void dmam_device_release(struct device *dev, void *res)
1326 {
1327 struct dma_device *device;
1328
1329 device = *(struct dma_device **)res;
1330 dma_async_device_unregister(device);
1331 }
1332
1333
1334
1335
1336
1337
1338
1339 int dmaenginem_async_device_register(struct dma_device *device)
1340 {
1341 void *p;
1342 int ret;
1343
1344 p = devres_alloc(dmam_device_release, sizeof(void *), GFP_KERNEL);
1345 if (!p)
1346 return -ENOMEM;
1347
1348 ret = dma_async_device_register(device);
1349 if (!ret) {
1350 *(struct dma_device **)p = device;
1351 devres_add(device->dev, p);
1352 } else {
1353 devres_free(p);
1354 }
1355
1356 return ret;
1357 }
1358 EXPORT_SYMBOL(dmaenginem_async_device_register);
1359
1360 struct dmaengine_unmap_pool {
1361 struct kmem_cache *cache;
1362 const char *name;
1363 mempool_t *pool;
1364 size_t size;
1365 };
1366
1367 #define __UNMAP_POOL(x) { .size = x, .name = "dmaengine-unmap-" __stringify(x) }
1368 static struct dmaengine_unmap_pool unmap_pool[] = {
1369 __UNMAP_POOL(2),
1370 #if IS_ENABLED(CONFIG_DMA_ENGINE_RAID)
1371 __UNMAP_POOL(16),
1372 __UNMAP_POOL(128),
1373 __UNMAP_POOL(256),
1374 #endif
1375 };
1376
1377 static struct dmaengine_unmap_pool *__get_unmap_pool(int nr)
1378 {
1379 int order = get_count_order(nr);
1380
1381 switch (order) {
1382 case 0 ... 1:
1383 return &unmap_pool[0];
1384 #if IS_ENABLED(CONFIG_DMA_ENGINE_RAID)
1385 case 2 ... 4:
1386 return &unmap_pool[1];
1387 case 5 ... 7:
1388 return &unmap_pool[2];
1389 case 8:
1390 return &unmap_pool[3];
1391 #endif
1392 default:
1393 BUG();
1394 return NULL;
1395 }
1396 }
1397
1398 static void dmaengine_unmap(struct kref *kref)
1399 {
1400 struct dmaengine_unmap_data *unmap = container_of(kref, typeof(*unmap), kref);
1401 struct device *dev = unmap->dev;
1402 int cnt, i;
1403
1404 cnt = unmap->to_cnt;
1405 for (i = 0; i < cnt; i++)
1406 dma_unmap_page(dev, unmap->addr[i], unmap->len,
1407 DMA_TO_DEVICE);
1408 cnt += unmap->from_cnt;
1409 for (; i < cnt; i++)
1410 dma_unmap_page(dev, unmap->addr[i], unmap->len,
1411 DMA_FROM_DEVICE);
1412 cnt += unmap->bidi_cnt;
1413 for (; i < cnt; i++) {
1414 if (unmap->addr[i] == 0)
1415 continue;
1416 dma_unmap_page(dev, unmap->addr[i], unmap->len,
1417 DMA_BIDIRECTIONAL);
1418 }
1419 cnt = unmap->map_cnt;
1420 mempool_free(unmap, __get_unmap_pool(cnt)->pool);
1421 }
1422
1423 void dmaengine_unmap_put(struct dmaengine_unmap_data *unmap)
1424 {
1425 if (unmap)
1426 kref_put(&unmap->kref, dmaengine_unmap);
1427 }
1428 EXPORT_SYMBOL_GPL(dmaengine_unmap_put);
1429
1430 static void dmaengine_destroy_unmap_pool(void)
1431 {
1432 int i;
1433
1434 for (i = 0; i < ARRAY_SIZE(unmap_pool); i++) {
1435 struct dmaengine_unmap_pool *p = &unmap_pool[i];
1436
1437 mempool_destroy(p->pool);
1438 p->pool = NULL;
1439 kmem_cache_destroy(p->cache);
1440 p->cache = NULL;
1441 }
1442 }
1443
1444 static int __init dmaengine_init_unmap_pool(void)
1445 {
1446 int i;
1447
1448 for (i = 0; i < ARRAY_SIZE(unmap_pool); i++) {
1449 struct dmaengine_unmap_pool *p = &unmap_pool[i];
1450 size_t size;
1451
1452 size = sizeof(struct dmaengine_unmap_data) +
1453 sizeof(dma_addr_t) * p->size;
1454
1455 p->cache = kmem_cache_create(p->name, size, 0,
1456 SLAB_HWCACHE_ALIGN, NULL);
1457 if (!p->cache)
1458 break;
1459 p->pool = mempool_create_slab_pool(1, p->cache);
1460 if (!p->pool)
1461 break;
1462 }
1463
1464 if (i == ARRAY_SIZE(unmap_pool))
1465 return 0;
1466
1467 dmaengine_destroy_unmap_pool();
1468 return -ENOMEM;
1469 }
1470
1471 struct dmaengine_unmap_data *
1472 dmaengine_get_unmap_data(struct device *dev, int nr, gfp_t flags)
1473 {
1474 struct dmaengine_unmap_data *unmap;
1475
1476 unmap = mempool_alloc(__get_unmap_pool(nr)->pool, flags);
1477 if (!unmap)
1478 return NULL;
1479
1480 memset(unmap, 0, sizeof(*unmap));
1481 kref_init(&unmap->kref);
1482 unmap->dev = dev;
1483 unmap->map_cnt = nr;
1484
1485 return unmap;
1486 }
1487 EXPORT_SYMBOL(dmaengine_get_unmap_data);
1488
1489 void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx,
1490 struct dma_chan *chan)
1491 {
1492 tx->chan = chan;
1493 #ifdef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
1494 spin_lock_init(&tx->lock);
1495 #endif
1496 }
1497 EXPORT_SYMBOL(dma_async_tx_descriptor_init);
1498
1499 static inline int desc_check_and_set_metadata_mode(
1500 struct dma_async_tx_descriptor *desc, enum dma_desc_metadata_mode mode)
1501 {
1502
1503 if (!desc->desc_metadata_mode) {
1504 if (dmaengine_is_metadata_mode_supported(desc->chan, mode))
1505 desc->desc_metadata_mode = mode;
1506 else
1507 return -ENOTSUPP;
1508 } else if (desc->desc_metadata_mode != mode) {
1509 return -EINVAL;
1510 }
1511
1512 return 0;
1513 }
1514
1515 int dmaengine_desc_attach_metadata(struct dma_async_tx_descriptor *desc,
1516 void *data, size_t len)
1517 {
1518 int ret;
1519
1520 if (!desc)
1521 return -EINVAL;
1522
1523 ret = desc_check_and_set_metadata_mode(desc, DESC_METADATA_CLIENT);
1524 if (ret)
1525 return ret;
1526
1527 if (!desc->metadata_ops || !desc->metadata_ops->attach)
1528 return -ENOTSUPP;
1529
1530 return desc->metadata_ops->attach(desc, data, len);
1531 }
1532 EXPORT_SYMBOL_GPL(dmaengine_desc_attach_metadata);
1533
1534 void *dmaengine_desc_get_metadata_ptr(struct dma_async_tx_descriptor *desc,
1535 size_t *payload_len, size_t *max_len)
1536 {
1537 int ret;
1538
1539 if (!desc)
1540 return ERR_PTR(-EINVAL);
1541
1542 ret = desc_check_and_set_metadata_mode(desc, DESC_METADATA_ENGINE);
1543 if (ret)
1544 return ERR_PTR(ret);
1545
1546 if (!desc->metadata_ops || !desc->metadata_ops->get_ptr)
1547 return ERR_PTR(-ENOTSUPP);
1548
1549 return desc->metadata_ops->get_ptr(desc, payload_len, max_len);
1550 }
1551 EXPORT_SYMBOL_GPL(dmaengine_desc_get_metadata_ptr);
1552
1553 int dmaengine_desc_set_metadata_len(struct dma_async_tx_descriptor *desc,
1554 size_t payload_len)
1555 {
1556 int ret;
1557
1558 if (!desc)
1559 return -EINVAL;
1560
1561 ret = desc_check_and_set_metadata_mode(desc, DESC_METADATA_ENGINE);
1562 if (ret)
1563 return ret;
1564
1565 if (!desc->metadata_ops || !desc->metadata_ops->set_len)
1566 return -ENOTSUPP;
1567
1568 return desc->metadata_ops->set_len(desc, payload_len);
1569 }
1570 EXPORT_SYMBOL_GPL(dmaengine_desc_set_metadata_len);
1571
1572
1573
1574
1575
1576 enum dma_status
1577 dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
1578 {
1579 unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000);
1580
1581 if (!tx)
1582 return DMA_COMPLETE;
1583
1584 while (tx->cookie == -EBUSY) {
1585 if (time_after_eq(jiffies, dma_sync_wait_timeout)) {
1586 dev_err(tx->chan->device->dev,
1587 "%s timeout waiting for descriptor submission\n",
1588 __func__);
1589 return DMA_ERROR;
1590 }
1591 cpu_relax();
1592 }
1593 return dma_sync_wait(tx->chan, tx->cookie);
1594 }
1595 EXPORT_SYMBOL_GPL(dma_wait_for_async_tx);
1596
1597
1598
1599
1600
1601
1602
1603
1604 void dma_run_dependencies(struct dma_async_tx_descriptor *tx)
1605 {
1606 struct dma_async_tx_descriptor *dep = txd_next(tx);
1607 struct dma_async_tx_descriptor *dep_next;
1608 struct dma_chan *chan;
1609
1610 if (!dep)
1611 return;
1612
1613
1614 txd_clear_next(tx);
1615 chan = dep->chan;
1616
1617
1618
1619
1620
1621 for (; dep; dep = dep_next) {
1622 txd_lock(dep);
1623 txd_clear_parent(dep);
1624 dep_next = txd_next(dep);
1625 if (dep_next && dep_next->chan == chan)
1626 txd_clear_next(dep);
1627 else
1628 dep_next = NULL;
1629 txd_unlock(dep);
1630
1631 dep->tx_submit(dep);
1632 }
1633
1634 chan->device->device_issue_pending(chan);
1635 }
1636 EXPORT_SYMBOL_GPL(dma_run_dependencies);
1637
1638 static int __init dma_bus_init(void)
1639 {
1640 int err = dmaengine_init_unmap_pool();
1641
1642 if (err)
1643 return err;
1644
1645 err = class_register(&dma_devclass);
1646 if (!err)
1647 dmaengine_debugfs_init();
1648
1649 return err;
1650 }
1651 arch_initcall(dma_bus_init);