0001
0002
0003
0004
0005
0006
0007
0008 #define pr_fmt(fmt) "DMA-API: " fmt
0009
0010 #include <linux/sched/task_stack.h>
0011 #include <linux/scatterlist.h>
0012 #include <linux/dma-map-ops.h>
0013 #include <linux/sched/task.h>
0014 #include <linux/stacktrace.h>
0015 #include <linux/spinlock.h>
0016 #include <linux/vmalloc.h>
0017 #include <linux/debugfs.h>
0018 #include <linux/uaccess.h>
0019 #include <linux/export.h>
0020 #include <linux/device.h>
0021 #include <linux/types.h>
0022 #include <linux/sched.h>
0023 #include <linux/ctype.h>
0024 #include <linux/list.h>
0025 #include <linux/slab.h>
0026 #include <asm/sections.h>
0027 #include "debug.h"
0028
0029 #define HASH_SIZE 16384ULL
0030 #define HASH_FN_SHIFT 13
0031 #define HASH_FN_MASK (HASH_SIZE - 1)
0032
0033 #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
0034
0035 #define DMA_DEBUG_DYNAMIC_ENTRIES (PAGE_SIZE / sizeof(struct dma_debug_entry))
0036
0037 enum {
0038 dma_debug_single,
0039 dma_debug_sg,
0040 dma_debug_coherent,
0041 dma_debug_resource,
0042 };
0043
0044 enum map_err_types {
0045 MAP_ERR_CHECK_NOT_APPLICABLE,
0046 MAP_ERR_NOT_CHECKED,
0047 MAP_ERR_CHECKED,
0048 };
0049
0050 #define DMA_DEBUG_STACKTRACE_ENTRIES 5
0051
0052
0053
0054
0055
0056
0057
0058
0059
0060
0061
0062
0063
0064
0065
0066 struct dma_debug_entry {
0067 struct list_head list;
0068 struct device *dev;
0069 u64 dev_addr;
0070 u64 size;
0071 int type;
0072 int direction;
0073 int sg_call_ents;
0074 int sg_mapped_ents;
0075 unsigned long pfn;
0076 size_t offset;
0077 enum map_err_types map_err_type;
0078 #ifdef CONFIG_STACKTRACE
0079 unsigned int stack_len;
0080 unsigned long stack_entries[DMA_DEBUG_STACKTRACE_ENTRIES];
0081 #endif
0082 } ____cacheline_aligned_in_smp;
0083
0084 typedef bool (*match_fn)(struct dma_debug_entry *, struct dma_debug_entry *);
0085
0086 struct hash_bucket {
0087 struct list_head list;
0088 spinlock_t lock;
0089 };
0090
0091
0092 static struct hash_bucket dma_entry_hash[HASH_SIZE];
0093
0094 static LIST_HEAD(free_entries);
0095
0096 static DEFINE_SPINLOCK(free_entries_lock);
0097
0098
0099 static bool global_disable __read_mostly;
0100
0101
0102 static bool dma_debug_initialized __read_mostly;
0103
0104 static inline bool dma_debug_disabled(void)
0105 {
0106 return global_disable || !dma_debug_initialized;
0107 }
0108
0109
0110 static u32 error_count;
0111
0112
0113 static u32 show_all_errors __read_mostly;
0114
0115 static u32 show_num_errors = 1;
0116
0117 static u32 num_free_entries;
0118 static u32 min_free_entries;
0119 static u32 nr_total_entries;
0120
0121
0122 static u32 nr_prealloc_entries = PREALLOC_DMA_DEBUG_ENTRIES;
0123
0124
0125
0126 #define NAME_MAX_LEN 64
0127
0128 static char current_driver_name[NAME_MAX_LEN] __read_mostly;
0129 static struct device_driver *current_driver __read_mostly;
0130
0131 static DEFINE_RWLOCK(driver_name_lock);
0132
0133 static const char *const maperr2str[] = {
0134 [MAP_ERR_CHECK_NOT_APPLICABLE] = "dma map error check not applicable",
0135 [MAP_ERR_NOT_CHECKED] = "dma map error not checked",
0136 [MAP_ERR_CHECKED] = "dma map error checked",
0137 };
0138
0139 static const char *type2name[] = {
0140 [dma_debug_single] = "single",
0141 [dma_debug_sg] = "scather-gather",
0142 [dma_debug_coherent] = "coherent",
0143 [dma_debug_resource] = "resource",
0144 };
0145
0146 static const char *dir2name[] = {
0147 [DMA_BIDIRECTIONAL] = "DMA_BIDIRECTIONAL",
0148 [DMA_TO_DEVICE] = "DMA_TO_DEVICE",
0149 [DMA_FROM_DEVICE] = "DMA_FROM_DEVICE",
0150 [DMA_NONE] = "DMA_NONE",
0151 };
0152
0153
0154
0155
0156
0157
0158
0159
0160
0161
0162
0163
0164
0165
0166 static inline void dump_entry_trace(struct dma_debug_entry *entry)
0167 {
0168 #ifdef CONFIG_STACKTRACE
0169 if (entry) {
0170 pr_warn("Mapped at:\n");
0171 stack_trace_print(entry->stack_entries, entry->stack_len, 0);
0172 }
0173 #endif
0174 }
0175
0176 static bool driver_filter(struct device *dev)
0177 {
0178 struct device_driver *drv;
0179 unsigned long flags;
0180 bool ret;
0181
0182
0183 if (likely(!current_driver_name[0]))
0184 return true;
0185
0186
0187 if (current_driver && dev && dev->driver == current_driver)
0188 return true;
0189
0190
0191 if (!dev)
0192 return false;
0193
0194 if (current_driver || !current_driver_name[0])
0195 return false;
0196
0197
0198 drv = dev->driver;
0199 if (!drv)
0200 return false;
0201
0202
0203 read_lock_irqsave(&driver_name_lock, flags);
0204
0205 ret = false;
0206 if (drv->name &&
0207 strncmp(current_driver_name, drv->name, NAME_MAX_LEN - 1) == 0) {
0208 current_driver = drv;
0209 ret = true;
0210 }
0211
0212 read_unlock_irqrestore(&driver_name_lock, flags);
0213
0214 return ret;
0215 }
0216
0217 #define err_printk(dev, entry, format, arg...) do { \
0218 error_count += 1; \
0219 if (driver_filter(dev) && \
0220 (show_all_errors || show_num_errors > 0)) { \
0221 WARN(1, pr_fmt("%s %s: ") format, \
0222 dev ? dev_driver_string(dev) : "NULL", \
0223 dev ? dev_name(dev) : "NULL", ## arg); \
0224 dump_entry_trace(entry); \
0225 } \
0226 if (!show_all_errors && show_num_errors > 0) \
0227 show_num_errors -= 1; \
0228 } while (0);
0229
0230
0231
0232
0233
0234
0235
0236 static int hash_fn(struct dma_debug_entry *entry)
0237 {
0238
0239
0240
0241
0242 return (entry->dev_addr >> HASH_FN_SHIFT) & HASH_FN_MASK;
0243 }
0244
0245
0246
0247
0248 static struct hash_bucket *get_hash_bucket(struct dma_debug_entry *entry,
0249 unsigned long *flags)
0250 __acquires(&dma_entry_hash[idx].lock)
0251 {
0252 int idx = hash_fn(entry);
0253 unsigned long __flags;
0254
0255 spin_lock_irqsave(&dma_entry_hash[idx].lock, __flags);
0256 *flags = __flags;
0257 return &dma_entry_hash[idx];
0258 }
0259
0260
0261
0262
0263 static void put_hash_bucket(struct hash_bucket *bucket,
0264 unsigned long flags)
0265 __releases(&bucket->lock)
0266 {
0267 spin_unlock_irqrestore(&bucket->lock, flags);
0268 }
0269
0270 static bool exact_match(struct dma_debug_entry *a, struct dma_debug_entry *b)
0271 {
0272 return ((a->dev_addr == b->dev_addr) &&
0273 (a->dev == b->dev)) ? true : false;
0274 }
0275
0276 static bool containing_match(struct dma_debug_entry *a,
0277 struct dma_debug_entry *b)
0278 {
0279 if (a->dev != b->dev)
0280 return false;
0281
0282 if ((b->dev_addr <= a->dev_addr) &&
0283 ((b->dev_addr + b->size) >= (a->dev_addr + a->size)))
0284 return true;
0285
0286 return false;
0287 }
0288
0289
0290
0291
0292 static struct dma_debug_entry *__hash_bucket_find(struct hash_bucket *bucket,
0293 struct dma_debug_entry *ref,
0294 match_fn match)
0295 {
0296 struct dma_debug_entry *entry, *ret = NULL;
0297 int matches = 0, match_lvl, last_lvl = -1;
0298
0299 list_for_each_entry(entry, &bucket->list, list) {
0300 if (!match(ref, entry))
0301 continue;
0302
0303
0304
0305
0306
0307
0308
0309
0310
0311
0312
0313 matches += 1;
0314 match_lvl = 0;
0315 entry->size == ref->size ? ++match_lvl : 0;
0316 entry->type == ref->type ? ++match_lvl : 0;
0317 entry->direction == ref->direction ? ++match_lvl : 0;
0318 entry->sg_call_ents == ref->sg_call_ents ? ++match_lvl : 0;
0319
0320 if (match_lvl == 4) {
0321
0322 return entry;
0323 } else if (match_lvl > last_lvl) {
0324
0325
0326
0327
0328 last_lvl = match_lvl;
0329 ret = entry;
0330 }
0331 }
0332
0333
0334
0335
0336
0337 ret = (matches == 1) ? ret : NULL;
0338
0339 return ret;
0340 }
0341
0342 static struct dma_debug_entry *bucket_find_exact(struct hash_bucket *bucket,
0343 struct dma_debug_entry *ref)
0344 {
0345 return __hash_bucket_find(bucket, ref, exact_match);
0346 }
0347
0348 static struct dma_debug_entry *bucket_find_contain(struct hash_bucket **bucket,
0349 struct dma_debug_entry *ref,
0350 unsigned long *flags)
0351 {
0352
0353 struct dma_debug_entry *entry, index = *ref;
0354 int limit = min(HASH_SIZE, (index.dev_addr >> HASH_FN_SHIFT) + 1);
0355
0356 for (int i = 0; i < limit; i++) {
0357 entry = __hash_bucket_find(*bucket, ref, containing_match);
0358
0359 if (entry)
0360 return entry;
0361
0362
0363
0364
0365 put_hash_bucket(*bucket, *flags);
0366 index.dev_addr -= (1 << HASH_FN_SHIFT);
0367 *bucket = get_hash_bucket(&index, flags);
0368 }
0369
0370 return NULL;
0371 }
0372
0373
0374
0375
0376 static void hash_bucket_add(struct hash_bucket *bucket,
0377 struct dma_debug_entry *entry)
0378 {
0379 list_add_tail(&entry->list, &bucket->list);
0380 }
0381
0382
0383
0384
0385 static void hash_bucket_del(struct dma_debug_entry *entry)
0386 {
0387 list_del(&entry->list);
0388 }
0389
0390 static unsigned long long phys_addr(struct dma_debug_entry *entry)
0391 {
0392 if (entry->type == dma_debug_resource)
0393 return __pfn_to_phys(entry->pfn) + entry->offset;
0394
0395 return page_to_phys(pfn_to_page(entry->pfn)) + entry->offset;
0396 }
0397
0398
0399
0400
0401 void debug_dma_dump_mappings(struct device *dev)
0402 {
0403 int idx;
0404
0405 for (idx = 0; idx < HASH_SIZE; idx++) {
0406 struct hash_bucket *bucket = &dma_entry_hash[idx];
0407 struct dma_debug_entry *entry;
0408 unsigned long flags;
0409
0410 spin_lock_irqsave(&bucket->lock, flags);
0411
0412 list_for_each_entry(entry, &bucket->list, list) {
0413 if (!dev || dev == entry->dev) {
0414 dev_info(entry->dev,
0415 "%s idx %d P=%Lx N=%lx D=%Lx L=%Lx %s %s\n",
0416 type2name[entry->type], idx,
0417 phys_addr(entry), entry->pfn,
0418 entry->dev_addr, entry->size,
0419 dir2name[entry->direction],
0420 maperr2str[entry->map_err_type]);
0421 }
0422 }
0423
0424 spin_unlock_irqrestore(&bucket->lock, flags);
0425 cond_resched();
0426 }
0427 }
0428
0429
0430
0431
0432
0433
0434
0435
0436
0437
0438
0439
0440
0441
0442
0443
0444
0445
0446
0447
0448
0449 static RADIX_TREE(dma_active_cacheline, GFP_ATOMIC);
0450 static DEFINE_SPINLOCK(radix_lock);
0451 #define ACTIVE_CACHELINE_MAX_OVERLAP ((1 << RADIX_TREE_MAX_TAGS) - 1)
0452 #define CACHELINE_PER_PAGE_SHIFT (PAGE_SHIFT - L1_CACHE_SHIFT)
0453 #define CACHELINES_PER_PAGE (1 << CACHELINE_PER_PAGE_SHIFT)
0454
0455 static phys_addr_t to_cacheline_number(struct dma_debug_entry *entry)
0456 {
0457 return (entry->pfn << CACHELINE_PER_PAGE_SHIFT) +
0458 (entry->offset >> L1_CACHE_SHIFT);
0459 }
0460
0461 static int active_cacheline_read_overlap(phys_addr_t cln)
0462 {
0463 int overlap = 0, i;
0464
0465 for (i = RADIX_TREE_MAX_TAGS - 1; i >= 0; i--)
0466 if (radix_tree_tag_get(&dma_active_cacheline, cln, i))
0467 overlap |= 1 << i;
0468 return overlap;
0469 }
0470
0471 static int active_cacheline_set_overlap(phys_addr_t cln, int overlap)
0472 {
0473 int i;
0474
0475 if (overlap > ACTIVE_CACHELINE_MAX_OVERLAP || overlap < 0)
0476 return overlap;
0477
0478 for (i = RADIX_TREE_MAX_TAGS - 1; i >= 0; i--)
0479 if (overlap & 1 << i)
0480 radix_tree_tag_set(&dma_active_cacheline, cln, i);
0481 else
0482 radix_tree_tag_clear(&dma_active_cacheline, cln, i);
0483
0484 return overlap;
0485 }
0486
0487 static void active_cacheline_inc_overlap(phys_addr_t cln)
0488 {
0489 int overlap = active_cacheline_read_overlap(cln);
0490
0491 overlap = active_cacheline_set_overlap(cln, ++overlap);
0492
0493
0494
0495
0496 WARN_ONCE(overlap > ACTIVE_CACHELINE_MAX_OVERLAP,
0497 pr_fmt("exceeded %d overlapping mappings of cacheline %pa\n"),
0498 ACTIVE_CACHELINE_MAX_OVERLAP, &cln);
0499 }
0500
0501 static int active_cacheline_dec_overlap(phys_addr_t cln)
0502 {
0503 int overlap = active_cacheline_read_overlap(cln);
0504
0505 return active_cacheline_set_overlap(cln, --overlap);
0506 }
0507
0508 static int active_cacheline_insert(struct dma_debug_entry *entry)
0509 {
0510 phys_addr_t cln = to_cacheline_number(entry);
0511 unsigned long flags;
0512 int rc;
0513
0514
0515
0516
0517
0518 if (entry->direction == DMA_TO_DEVICE)
0519 return 0;
0520
0521 spin_lock_irqsave(&radix_lock, flags);
0522 rc = radix_tree_insert(&dma_active_cacheline, cln, entry);
0523 if (rc == -EEXIST)
0524 active_cacheline_inc_overlap(cln);
0525 spin_unlock_irqrestore(&radix_lock, flags);
0526
0527 return rc;
0528 }
0529
0530 static void active_cacheline_remove(struct dma_debug_entry *entry)
0531 {
0532 phys_addr_t cln = to_cacheline_number(entry);
0533 unsigned long flags;
0534
0535
0536 if (entry->direction == DMA_TO_DEVICE)
0537 return;
0538
0539 spin_lock_irqsave(&radix_lock, flags);
0540
0541
0542
0543
0544 if (active_cacheline_dec_overlap(cln) < 0)
0545 radix_tree_delete(&dma_active_cacheline, cln);
0546 spin_unlock_irqrestore(&radix_lock, flags);
0547 }
0548
0549
0550
0551
0552
0553 static void add_dma_entry(struct dma_debug_entry *entry, unsigned long attrs)
0554 {
0555 struct hash_bucket *bucket;
0556 unsigned long flags;
0557 int rc;
0558
0559 bucket = get_hash_bucket(entry, &flags);
0560 hash_bucket_add(bucket, entry);
0561 put_hash_bucket(bucket, flags);
0562
0563 rc = active_cacheline_insert(entry);
0564 if (rc == -ENOMEM) {
0565 pr_err_once("cacheline tracking ENOMEM, dma-debug disabled\n");
0566 global_disable = true;
0567 } else if (rc == -EEXIST && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) {
0568 err_printk(entry->dev, entry,
0569 "cacheline tracking EEXIST, overlapping mappings aren't supported\n");
0570 }
0571 }
0572
0573 static int dma_debug_create_entries(gfp_t gfp)
0574 {
0575 struct dma_debug_entry *entry;
0576 int i;
0577
0578 entry = (void *)get_zeroed_page(gfp);
0579 if (!entry)
0580 return -ENOMEM;
0581
0582 for (i = 0; i < DMA_DEBUG_DYNAMIC_ENTRIES; i++)
0583 list_add_tail(&entry[i].list, &free_entries);
0584
0585 num_free_entries += DMA_DEBUG_DYNAMIC_ENTRIES;
0586 nr_total_entries += DMA_DEBUG_DYNAMIC_ENTRIES;
0587
0588 return 0;
0589 }
0590
0591 static struct dma_debug_entry *__dma_entry_alloc(void)
0592 {
0593 struct dma_debug_entry *entry;
0594
0595 entry = list_entry(free_entries.next, struct dma_debug_entry, list);
0596 list_del(&entry->list);
0597 memset(entry, 0, sizeof(*entry));
0598
0599 num_free_entries -= 1;
0600 if (num_free_entries < min_free_entries)
0601 min_free_entries = num_free_entries;
0602
0603 return entry;
0604 }
0605
0606 static void __dma_entry_alloc_check_leak(void)
0607 {
0608 u32 tmp = nr_total_entries % nr_prealloc_entries;
0609
0610
0611 if (tmp < DMA_DEBUG_DYNAMIC_ENTRIES) {
0612 pr_info("dma_debug_entry pool grown to %u (%u00%%)\n",
0613 nr_total_entries,
0614 (nr_total_entries / nr_prealloc_entries));
0615 }
0616 }
0617
0618
0619
0620
0621
0622
0623 static struct dma_debug_entry *dma_entry_alloc(void)
0624 {
0625 struct dma_debug_entry *entry;
0626 unsigned long flags;
0627
0628 spin_lock_irqsave(&free_entries_lock, flags);
0629 if (num_free_entries == 0) {
0630 if (dma_debug_create_entries(GFP_ATOMIC)) {
0631 global_disable = true;
0632 spin_unlock_irqrestore(&free_entries_lock, flags);
0633 pr_err("debugging out of memory - disabling\n");
0634 return NULL;
0635 }
0636 __dma_entry_alloc_check_leak();
0637 }
0638
0639 entry = __dma_entry_alloc();
0640
0641 spin_unlock_irqrestore(&free_entries_lock, flags);
0642
0643 #ifdef CONFIG_STACKTRACE
0644 entry->stack_len = stack_trace_save(entry->stack_entries,
0645 ARRAY_SIZE(entry->stack_entries),
0646 1);
0647 #endif
0648 return entry;
0649 }
0650
0651 static void dma_entry_free(struct dma_debug_entry *entry)
0652 {
0653 unsigned long flags;
0654
0655 active_cacheline_remove(entry);
0656
0657
0658
0659
0660
0661 spin_lock_irqsave(&free_entries_lock, flags);
0662 list_add(&entry->list, &free_entries);
0663 num_free_entries += 1;
0664 spin_unlock_irqrestore(&free_entries_lock, flags);
0665 }
0666
0667
0668
0669
0670
0671
0672
0673
0674
0675 static ssize_t filter_read(struct file *file, char __user *user_buf,
0676 size_t count, loff_t *ppos)
0677 {
0678 char buf[NAME_MAX_LEN + 1];
0679 unsigned long flags;
0680 int len;
0681
0682 if (!current_driver_name[0])
0683 return 0;
0684
0685
0686
0687
0688
0689
0690 read_lock_irqsave(&driver_name_lock, flags);
0691 len = scnprintf(buf, NAME_MAX_LEN + 1, "%s\n", current_driver_name);
0692 read_unlock_irqrestore(&driver_name_lock, flags);
0693
0694 return simple_read_from_buffer(user_buf, count, ppos, buf, len);
0695 }
0696
0697 static ssize_t filter_write(struct file *file, const char __user *userbuf,
0698 size_t count, loff_t *ppos)
0699 {
0700 char buf[NAME_MAX_LEN];
0701 unsigned long flags;
0702 size_t len;
0703 int i;
0704
0705
0706
0707
0708
0709
0710
0711 len = min(count, (size_t)(NAME_MAX_LEN - 1));
0712 if (copy_from_user(buf, userbuf, len))
0713 return -EFAULT;
0714
0715 buf[len] = 0;
0716
0717 write_lock_irqsave(&driver_name_lock, flags);
0718
0719
0720
0721
0722
0723
0724
0725
0726
0727 if (!isalnum(buf[0])) {
0728
0729
0730
0731
0732
0733 if (current_driver_name[0])
0734 pr_info("switching off dma-debug driver filter\n");
0735 current_driver_name[0] = 0;
0736 current_driver = NULL;
0737 goto out_unlock;
0738 }
0739
0740
0741
0742
0743
0744 for (i = 0; i < NAME_MAX_LEN - 1; ++i) {
0745 current_driver_name[i] = buf[i];
0746 if (isspace(buf[i]) || buf[i] == ' ' || buf[i] == 0)
0747 break;
0748 }
0749 current_driver_name[i] = 0;
0750 current_driver = NULL;
0751
0752 pr_info("enable driver filter for driver [%s]\n",
0753 current_driver_name);
0754
0755 out_unlock:
0756 write_unlock_irqrestore(&driver_name_lock, flags);
0757
0758 return count;
0759 }
0760
0761 static const struct file_operations filter_fops = {
0762 .read = filter_read,
0763 .write = filter_write,
0764 .llseek = default_llseek,
0765 };
0766
0767 static int dump_show(struct seq_file *seq, void *v)
0768 {
0769 int idx;
0770
0771 for (idx = 0; idx < HASH_SIZE; idx++) {
0772 struct hash_bucket *bucket = &dma_entry_hash[idx];
0773 struct dma_debug_entry *entry;
0774 unsigned long flags;
0775
0776 spin_lock_irqsave(&bucket->lock, flags);
0777 list_for_each_entry(entry, &bucket->list, list) {
0778 seq_printf(seq,
0779 "%s %s %s idx %d P=%llx N=%lx D=%llx L=%llx %s %s\n",
0780 dev_name(entry->dev),
0781 dev_driver_string(entry->dev),
0782 type2name[entry->type], idx,
0783 phys_addr(entry), entry->pfn,
0784 entry->dev_addr, entry->size,
0785 dir2name[entry->direction],
0786 maperr2str[entry->map_err_type]);
0787 }
0788 spin_unlock_irqrestore(&bucket->lock, flags);
0789 }
0790 return 0;
0791 }
0792 DEFINE_SHOW_ATTRIBUTE(dump);
0793
0794 static int __init dma_debug_fs_init(void)
0795 {
0796 struct dentry *dentry = debugfs_create_dir("dma-api", NULL);
0797
0798 debugfs_create_bool("disabled", 0444, dentry, &global_disable);
0799 debugfs_create_u32("error_count", 0444, dentry, &error_count);
0800 debugfs_create_u32("all_errors", 0644, dentry, &show_all_errors);
0801 debugfs_create_u32("num_errors", 0644, dentry, &show_num_errors);
0802 debugfs_create_u32("num_free_entries", 0444, dentry, &num_free_entries);
0803 debugfs_create_u32("min_free_entries", 0444, dentry, &min_free_entries);
0804 debugfs_create_u32("nr_total_entries", 0444, dentry, &nr_total_entries);
0805 debugfs_create_file("driver_filter", 0644, dentry, NULL, &filter_fops);
0806 debugfs_create_file("dump", 0444, dentry, NULL, &dump_fops);
0807
0808 return 0;
0809 }
0810 core_initcall_sync(dma_debug_fs_init);
0811
0812 static int device_dma_allocations(struct device *dev, struct dma_debug_entry **out_entry)
0813 {
0814 struct dma_debug_entry *entry;
0815 unsigned long flags;
0816 int count = 0, i;
0817
0818 for (i = 0; i < HASH_SIZE; ++i) {
0819 spin_lock_irqsave(&dma_entry_hash[i].lock, flags);
0820 list_for_each_entry(entry, &dma_entry_hash[i].list, list) {
0821 if (entry->dev == dev) {
0822 count += 1;
0823 *out_entry = entry;
0824 }
0825 }
0826 spin_unlock_irqrestore(&dma_entry_hash[i].lock, flags);
0827 }
0828
0829 return count;
0830 }
0831
0832 static int dma_debug_device_change(struct notifier_block *nb, unsigned long action, void *data)
0833 {
0834 struct device *dev = data;
0835 struct dma_debug_entry *entry;
0836 int count;
0837
0838 if (dma_debug_disabled())
0839 return 0;
0840
0841 switch (action) {
0842 case BUS_NOTIFY_UNBOUND_DRIVER:
0843 count = device_dma_allocations(dev, &entry);
0844 if (count == 0)
0845 break;
0846 err_printk(dev, entry, "device driver has pending "
0847 "DMA allocations while released from device "
0848 "[count=%d]\n"
0849 "One of leaked entries details: "
0850 "[device address=0x%016llx] [size=%llu bytes] "
0851 "[mapped with %s] [mapped as %s]\n",
0852 count, entry->dev_addr, entry->size,
0853 dir2name[entry->direction], type2name[entry->type]);
0854 break;
0855 default:
0856 break;
0857 }
0858
0859 return 0;
0860 }
0861
0862 void dma_debug_add_bus(struct bus_type *bus)
0863 {
0864 struct notifier_block *nb;
0865
0866 if (dma_debug_disabled())
0867 return;
0868
0869 nb = kzalloc(sizeof(struct notifier_block), GFP_KERNEL);
0870 if (nb == NULL) {
0871 pr_err("dma_debug_add_bus: out of memory\n");
0872 return;
0873 }
0874
0875 nb->notifier_call = dma_debug_device_change;
0876
0877 bus_register_notifier(bus, nb);
0878 }
0879
0880 static int dma_debug_init(void)
0881 {
0882 int i, nr_pages;
0883
0884
0885
0886
0887 if (global_disable)
0888 return 0;
0889
0890 for (i = 0; i < HASH_SIZE; ++i) {
0891 INIT_LIST_HEAD(&dma_entry_hash[i].list);
0892 spin_lock_init(&dma_entry_hash[i].lock);
0893 }
0894
0895 nr_pages = DIV_ROUND_UP(nr_prealloc_entries, DMA_DEBUG_DYNAMIC_ENTRIES);
0896 for (i = 0; i < nr_pages; ++i)
0897 dma_debug_create_entries(GFP_KERNEL);
0898 if (num_free_entries >= nr_prealloc_entries) {
0899 pr_info("preallocated %d debug entries\n", nr_total_entries);
0900 } else if (num_free_entries > 0) {
0901 pr_warn("%d debug entries requested but only %d allocated\n",
0902 nr_prealloc_entries, nr_total_entries);
0903 } else {
0904 pr_err("debugging out of memory error - disabled\n");
0905 global_disable = true;
0906
0907 return 0;
0908 }
0909 min_free_entries = num_free_entries;
0910
0911 dma_debug_initialized = true;
0912
0913 pr_info("debugging enabled by kernel config\n");
0914 return 0;
0915 }
0916 core_initcall(dma_debug_init);
0917
0918 static __init int dma_debug_cmdline(char *str)
0919 {
0920 if (!str)
0921 return -EINVAL;
0922
0923 if (strncmp(str, "off", 3) == 0) {
0924 pr_info("debugging disabled on kernel command line\n");
0925 global_disable = true;
0926 }
0927
0928 return 1;
0929 }
0930
0931 static __init int dma_debug_entries_cmdline(char *str)
0932 {
0933 if (!str)
0934 return -EINVAL;
0935 if (!get_option(&str, &nr_prealloc_entries))
0936 nr_prealloc_entries = PREALLOC_DMA_DEBUG_ENTRIES;
0937 return 1;
0938 }
0939
0940 __setup("dma_debug=", dma_debug_cmdline);
0941 __setup("dma_debug_entries=", dma_debug_entries_cmdline);
0942
0943 static void check_unmap(struct dma_debug_entry *ref)
0944 {
0945 struct dma_debug_entry *entry;
0946 struct hash_bucket *bucket;
0947 unsigned long flags;
0948
0949 bucket = get_hash_bucket(ref, &flags);
0950 entry = bucket_find_exact(bucket, ref);
0951
0952 if (!entry) {
0953
0954 put_hash_bucket(bucket, flags);
0955
0956 if (dma_mapping_error(ref->dev, ref->dev_addr)) {
0957 err_printk(ref->dev, NULL,
0958 "device driver tries to free an "
0959 "invalid DMA memory address\n");
0960 } else {
0961 err_printk(ref->dev, NULL,
0962 "device driver tries to free DMA "
0963 "memory it has not allocated [device "
0964 "address=0x%016llx] [size=%llu bytes]\n",
0965 ref->dev_addr, ref->size);
0966 }
0967 return;
0968 }
0969
0970 if (ref->size != entry->size) {
0971 err_printk(ref->dev, entry, "device driver frees "
0972 "DMA memory with different size "
0973 "[device address=0x%016llx] [map size=%llu bytes] "
0974 "[unmap size=%llu bytes]\n",
0975 ref->dev_addr, entry->size, ref->size);
0976 }
0977
0978 if (ref->type != entry->type) {
0979 err_printk(ref->dev, entry, "device driver frees "
0980 "DMA memory with wrong function "
0981 "[device address=0x%016llx] [size=%llu bytes] "
0982 "[mapped as %s] [unmapped as %s]\n",
0983 ref->dev_addr, ref->size,
0984 type2name[entry->type], type2name[ref->type]);
0985 } else if ((entry->type == dma_debug_coherent) &&
0986 (phys_addr(ref) != phys_addr(entry))) {
0987 err_printk(ref->dev, entry, "device driver frees "
0988 "DMA memory with different CPU address "
0989 "[device address=0x%016llx] [size=%llu bytes] "
0990 "[cpu alloc address=0x%016llx] "
0991 "[cpu free address=0x%016llx]",
0992 ref->dev_addr, ref->size,
0993 phys_addr(entry),
0994 phys_addr(ref));
0995 }
0996
0997 if (ref->sg_call_ents && ref->type == dma_debug_sg &&
0998 ref->sg_call_ents != entry->sg_call_ents) {
0999 err_printk(ref->dev, entry, "device driver frees "
1000 "DMA sg list with different entry count "
1001 "[map count=%d] [unmap count=%d]\n",
1002 entry->sg_call_ents, ref->sg_call_ents);
1003 }
1004
1005
1006
1007
1008
1009 if (ref->direction != entry->direction) {
1010 err_printk(ref->dev, entry, "device driver frees "
1011 "DMA memory with different direction "
1012 "[device address=0x%016llx] [size=%llu bytes] "
1013 "[mapped with %s] [unmapped with %s]\n",
1014 ref->dev_addr, ref->size,
1015 dir2name[entry->direction],
1016 dir2name[ref->direction]);
1017 }
1018
1019
1020
1021
1022
1023
1024 if (entry->map_err_type == MAP_ERR_NOT_CHECKED) {
1025 err_printk(ref->dev, entry,
1026 "device driver failed to check map error"
1027 "[device address=0x%016llx] [size=%llu bytes] "
1028 "[mapped as %s]",
1029 ref->dev_addr, ref->size,
1030 type2name[entry->type]);
1031 }
1032
1033 hash_bucket_del(entry);
1034 dma_entry_free(entry);
1035
1036 put_hash_bucket(bucket, flags);
1037 }
1038
1039 static void check_for_stack(struct device *dev,
1040 struct page *page, size_t offset)
1041 {
1042 void *addr;
1043 struct vm_struct *stack_vm_area = task_stack_vm_area(current);
1044
1045 if (!stack_vm_area) {
1046
1047 if (PageHighMem(page))
1048 return;
1049 addr = page_address(page) + offset;
1050 if (object_is_on_stack(addr))
1051 err_printk(dev, NULL, "device driver maps memory from stack [addr=%p]\n", addr);
1052 } else {
1053
1054 int i;
1055
1056 for (i = 0; i < stack_vm_area->nr_pages; i++) {
1057 if (page != stack_vm_area->pages[i])
1058 continue;
1059
1060 addr = (u8 *)current->stack + i * PAGE_SIZE + offset;
1061 err_printk(dev, NULL, "device driver maps memory from stack [probable addr=%p]\n", addr);
1062 break;
1063 }
1064 }
1065 }
1066
1067 static void check_for_illegal_area(struct device *dev, void *addr, unsigned long len)
1068 {
1069 if (memory_intersects(_stext, _etext, addr, len) ||
1070 memory_intersects(__start_rodata, __end_rodata, addr, len))
1071 err_printk(dev, NULL, "device driver maps memory from kernel text or rodata [addr=%p] [len=%lu]\n", addr, len);
1072 }
1073
1074 static void check_sync(struct device *dev,
1075 struct dma_debug_entry *ref,
1076 bool to_cpu)
1077 {
1078 struct dma_debug_entry *entry;
1079 struct hash_bucket *bucket;
1080 unsigned long flags;
1081
1082 bucket = get_hash_bucket(ref, &flags);
1083
1084 entry = bucket_find_contain(&bucket, ref, &flags);
1085
1086 if (!entry) {
1087 err_printk(dev, NULL, "device driver tries "
1088 "to sync DMA memory it has not allocated "
1089 "[device address=0x%016llx] [size=%llu bytes]\n",
1090 (unsigned long long)ref->dev_addr, ref->size);
1091 goto out;
1092 }
1093
1094 if (ref->size > entry->size) {
1095 err_printk(dev, entry, "device driver syncs"
1096 " DMA memory outside allocated range "
1097 "[device address=0x%016llx] "
1098 "[allocation size=%llu bytes] "
1099 "[sync offset+size=%llu]\n",
1100 entry->dev_addr, entry->size,
1101 ref->size);
1102 }
1103
1104 if (entry->direction == DMA_BIDIRECTIONAL)
1105 goto out;
1106
1107 if (ref->direction != entry->direction) {
1108 err_printk(dev, entry, "device driver syncs "
1109 "DMA memory with different direction "
1110 "[device address=0x%016llx] [size=%llu bytes] "
1111 "[mapped with %s] [synced with %s]\n",
1112 (unsigned long long)ref->dev_addr, entry->size,
1113 dir2name[entry->direction],
1114 dir2name[ref->direction]);
1115 }
1116
1117 if (to_cpu && !(entry->direction == DMA_FROM_DEVICE) &&
1118 !(ref->direction == DMA_TO_DEVICE))
1119 err_printk(dev, entry, "device driver syncs "
1120 "device read-only DMA memory for cpu "
1121 "[device address=0x%016llx] [size=%llu bytes] "
1122 "[mapped with %s] [synced with %s]\n",
1123 (unsigned long long)ref->dev_addr, entry->size,
1124 dir2name[entry->direction],
1125 dir2name[ref->direction]);
1126
1127 if (!to_cpu && !(entry->direction == DMA_TO_DEVICE) &&
1128 !(ref->direction == DMA_FROM_DEVICE))
1129 err_printk(dev, entry, "device driver syncs "
1130 "device write-only DMA memory to device "
1131 "[device address=0x%016llx] [size=%llu bytes] "
1132 "[mapped with %s] [synced with %s]\n",
1133 (unsigned long long)ref->dev_addr, entry->size,
1134 dir2name[entry->direction],
1135 dir2name[ref->direction]);
1136
1137 if (ref->sg_call_ents && ref->type == dma_debug_sg &&
1138 ref->sg_call_ents != entry->sg_call_ents) {
1139 err_printk(ref->dev, entry, "device driver syncs "
1140 "DMA sg list with different entry count "
1141 "[map count=%d] [sync count=%d]\n",
1142 entry->sg_call_ents, ref->sg_call_ents);
1143 }
1144
1145 out:
1146 put_hash_bucket(bucket, flags);
1147 }
1148
1149 static void check_sg_segment(struct device *dev, struct scatterlist *sg)
1150 {
1151 #ifdef CONFIG_DMA_API_DEBUG_SG
1152 unsigned int max_seg = dma_get_max_seg_size(dev);
1153 u64 start, end, boundary = dma_get_seg_boundary(dev);
1154
1155
1156
1157
1158
1159 if (sg->length > max_seg)
1160 err_printk(dev, NULL, "mapping sg segment longer than device claims to support [len=%u] [max=%u]\n",
1161 sg->length, max_seg);
1162
1163
1164
1165
1166
1167 start = sg_dma_address(sg);
1168 end = start + sg_dma_len(sg) - 1;
1169 if ((start ^ end) & ~boundary)
1170 err_printk(dev, NULL, "mapping sg segment across boundary [start=0x%016llx] [end=0x%016llx] [boundary=0x%016llx]\n",
1171 start, end, boundary);
1172 #endif
1173 }
1174
1175 void debug_dma_map_single(struct device *dev, const void *addr,
1176 unsigned long len)
1177 {
1178 if (unlikely(dma_debug_disabled()))
1179 return;
1180
1181 if (!virt_addr_valid(addr))
1182 err_printk(dev, NULL, "device driver maps memory from invalid area [addr=%p] [len=%lu]\n",
1183 addr, len);
1184
1185 if (is_vmalloc_addr(addr))
1186 err_printk(dev, NULL, "device driver maps memory from vmalloc area [addr=%p] [len=%lu]\n",
1187 addr, len);
1188 }
1189 EXPORT_SYMBOL(debug_dma_map_single);
1190
1191 void debug_dma_map_page(struct device *dev, struct page *page, size_t offset,
1192 size_t size, int direction, dma_addr_t dma_addr,
1193 unsigned long attrs)
1194 {
1195 struct dma_debug_entry *entry;
1196
1197 if (unlikely(dma_debug_disabled()))
1198 return;
1199
1200 if (dma_mapping_error(dev, dma_addr))
1201 return;
1202
1203 entry = dma_entry_alloc();
1204 if (!entry)
1205 return;
1206
1207 entry->dev = dev;
1208 entry->type = dma_debug_single;
1209 entry->pfn = page_to_pfn(page);
1210 entry->offset = offset;
1211 entry->dev_addr = dma_addr;
1212 entry->size = size;
1213 entry->direction = direction;
1214 entry->map_err_type = MAP_ERR_NOT_CHECKED;
1215
1216 check_for_stack(dev, page, offset);
1217
1218 if (!PageHighMem(page)) {
1219 void *addr = page_address(page) + offset;
1220
1221 check_for_illegal_area(dev, addr, size);
1222 }
1223
1224 add_dma_entry(entry, attrs);
1225 }
1226
1227 void debug_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
1228 {
1229 struct dma_debug_entry ref;
1230 struct dma_debug_entry *entry;
1231 struct hash_bucket *bucket;
1232 unsigned long flags;
1233
1234 if (unlikely(dma_debug_disabled()))
1235 return;
1236
1237 ref.dev = dev;
1238 ref.dev_addr = dma_addr;
1239 bucket = get_hash_bucket(&ref, &flags);
1240
1241 list_for_each_entry(entry, &bucket->list, list) {
1242 if (!exact_match(&ref, entry))
1243 continue;
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255 if (entry->map_err_type == MAP_ERR_NOT_CHECKED) {
1256 entry->map_err_type = MAP_ERR_CHECKED;
1257 break;
1258 }
1259 }
1260
1261 put_hash_bucket(bucket, flags);
1262 }
1263 EXPORT_SYMBOL(debug_dma_mapping_error);
1264
1265 void debug_dma_unmap_page(struct device *dev, dma_addr_t addr,
1266 size_t size, int direction)
1267 {
1268 struct dma_debug_entry ref = {
1269 .type = dma_debug_single,
1270 .dev = dev,
1271 .dev_addr = addr,
1272 .size = size,
1273 .direction = direction,
1274 };
1275
1276 if (unlikely(dma_debug_disabled()))
1277 return;
1278 check_unmap(&ref);
1279 }
1280
1281 void debug_dma_map_sg(struct device *dev, struct scatterlist *sg,
1282 int nents, int mapped_ents, int direction,
1283 unsigned long attrs)
1284 {
1285 struct dma_debug_entry *entry;
1286 struct scatterlist *s;
1287 int i;
1288
1289 if (unlikely(dma_debug_disabled()))
1290 return;
1291
1292 for_each_sg(sg, s, nents, i) {
1293 check_for_stack(dev, sg_page(s), s->offset);
1294 if (!PageHighMem(sg_page(s)))
1295 check_for_illegal_area(dev, sg_virt(s), s->length);
1296 }
1297
1298 for_each_sg(sg, s, mapped_ents, i) {
1299 entry = dma_entry_alloc();
1300 if (!entry)
1301 return;
1302
1303 entry->type = dma_debug_sg;
1304 entry->dev = dev;
1305 entry->pfn = page_to_pfn(sg_page(s));
1306 entry->offset = s->offset;
1307 entry->size = sg_dma_len(s);
1308 entry->dev_addr = sg_dma_address(s);
1309 entry->direction = direction;
1310 entry->sg_call_ents = nents;
1311 entry->sg_mapped_ents = mapped_ents;
1312
1313 check_sg_segment(dev, s);
1314
1315 add_dma_entry(entry, attrs);
1316 }
1317 }
1318
1319 static int get_nr_mapped_entries(struct device *dev,
1320 struct dma_debug_entry *ref)
1321 {
1322 struct dma_debug_entry *entry;
1323 struct hash_bucket *bucket;
1324 unsigned long flags;
1325 int mapped_ents;
1326
1327 bucket = get_hash_bucket(ref, &flags);
1328 entry = bucket_find_exact(bucket, ref);
1329 mapped_ents = 0;
1330
1331 if (entry)
1332 mapped_ents = entry->sg_mapped_ents;
1333 put_hash_bucket(bucket, flags);
1334
1335 return mapped_ents;
1336 }
1337
1338 void debug_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
1339 int nelems, int dir)
1340 {
1341 struct scatterlist *s;
1342 int mapped_ents = 0, i;
1343
1344 if (unlikely(dma_debug_disabled()))
1345 return;
1346
1347 for_each_sg(sglist, s, nelems, i) {
1348
1349 struct dma_debug_entry ref = {
1350 .type = dma_debug_sg,
1351 .dev = dev,
1352 .pfn = page_to_pfn(sg_page(s)),
1353 .offset = s->offset,
1354 .dev_addr = sg_dma_address(s),
1355 .size = sg_dma_len(s),
1356 .direction = dir,
1357 .sg_call_ents = nelems,
1358 };
1359
1360 if (mapped_ents && i >= mapped_ents)
1361 break;
1362
1363 if (!i)
1364 mapped_ents = get_nr_mapped_entries(dev, &ref);
1365
1366 check_unmap(&ref);
1367 }
1368 }
1369
1370 void debug_dma_alloc_coherent(struct device *dev, size_t size,
1371 dma_addr_t dma_addr, void *virt,
1372 unsigned long attrs)
1373 {
1374 struct dma_debug_entry *entry;
1375
1376 if (unlikely(dma_debug_disabled()))
1377 return;
1378
1379 if (unlikely(virt == NULL))
1380 return;
1381
1382
1383 if (!is_vmalloc_addr(virt) && !virt_addr_valid(virt))
1384 return;
1385
1386 entry = dma_entry_alloc();
1387 if (!entry)
1388 return;
1389
1390 entry->type = dma_debug_coherent;
1391 entry->dev = dev;
1392 entry->offset = offset_in_page(virt);
1393 entry->size = size;
1394 entry->dev_addr = dma_addr;
1395 entry->direction = DMA_BIDIRECTIONAL;
1396
1397 if (is_vmalloc_addr(virt))
1398 entry->pfn = vmalloc_to_pfn(virt);
1399 else
1400 entry->pfn = page_to_pfn(virt_to_page(virt));
1401
1402 add_dma_entry(entry, attrs);
1403 }
1404
1405 void debug_dma_free_coherent(struct device *dev, size_t size,
1406 void *virt, dma_addr_t addr)
1407 {
1408 struct dma_debug_entry ref = {
1409 .type = dma_debug_coherent,
1410 .dev = dev,
1411 .offset = offset_in_page(virt),
1412 .dev_addr = addr,
1413 .size = size,
1414 .direction = DMA_BIDIRECTIONAL,
1415 };
1416
1417
1418 if (!is_vmalloc_addr(virt) && !virt_addr_valid(virt))
1419 return;
1420
1421 if (is_vmalloc_addr(virt))
1422 ref.pfn = vmalloc_to_pfn(virt);
1423 else
1424 ref.pfn = page_to_pfn(virt_to_page(virt));
1425
1426 if (unlikely(dma_debug_disabled()))
1427 return;
1428
1429 check_unmap(&ref);
1430 }
1431
1432 void debug_dma_map_resource(struct device *dev, phys_addr_t addr, size_t size,
1433 int direction, dma_addr_t dma_addr,
1434 unsigned long attrs)
1435 {
1436 struct dma_debug_entry *entry;
1437
1438 if (unlikely(dma_debug_disabled()))
1439 return;
1440
1441 entry = dma_entry_alloc();
1442 if (!entry)
1443 return;
1444
1445 entry->type = dma_debug_resource;
1446 entry->dev = dev;
1447 entry->pfn = PHYS_PFN(addr);
1448 entry->offset = offset_in_page(addr);
1449 entry->size = size;
1450 entry->dev_addr = dma_addr;
1451 entry->direction = direction;
1452 entry->map_err_type = MAP_ERR_NOT_CHECKED;
1453
1454 add_dma_entry(entry, attrs);
1455 }
1456
1457 void debug_dma_unmap_resource(struct device *dev, dma_addr_t dma_addr,
1458 size_t size, int direction)
1459 {
1460 struct dma_debug_entry ref = {
1461 .type = dma_debug_resource,
1462 .dev = dev,
1463 .dev_addr = dma_addr,
1464 .size = size,
1465 .direction = direction,
1466 };
1467
1468 if (unlikely(dma_debug_disabled()))
1469 return;
1470
1471 check_unmap(&ref);
1472 }
1473
1474 void debug_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
1475 size_t size, int direction)
1476 {
1477 struct dma_debug_entry ref;
1478
1479 if (unlikely(dma_debug_disabled()))
1480 return;
1481
1482 ref.type = dma_debug_single;
1483 ref.dev = dev;
1484 ref.dev_addr = dma_handle;
1485 ref.size = size;
1486 ref.direction = direction;
1487 ref.sg_call_ents = 0;
1488
1489 check_sync(dev, &ref, true);
1490 }
1491
1492 void debug_dma_sync_single_for_device(struct device *dev,
1493 dma_addr_t dma_handle, size_t size,
1494 int direction)
1495 {
1496 struct dma_debug_entry ref;
1497
1498 if (unlikely(dma_debug_disabled()))
1499 return;
1500
1501 ref.type = dma_debug_single;
1502 ref.dev = dev;
1503 ref.dev_addr = dma_handle;
1504 ref.size = size;
1505 ref.direction = direction;
1506 ref.sg_call_ents = 0;
1507
1508 check_sync(dev, &ref, false);
1509 }
1510
1511 void debug_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
1512 int nelems, int direction)
1513 {
1514 struct scatterlist *s;
1515 int mapped_ents = 0, i;
1516
1517 if (unlikely(dma_debug_disabled()))
1518 return;
1519
1520 for_each_sg(sg, s, nelems, i) {
1521
1522 struct dma_debug_entry ref = {
1523 .type = dma_debug_sg,
1524 .dev = dev,
1525 .pfn = page_to_pfn(sg_page(s)),
1526 .offset = s->offset,
1527 .dev_addr = sg_dma_address(s),
1528 .size = sg_dma_len(s),
1529 .direction = direction,
1530 .sg_call_ents = nelems,
1531 };
1532
1533 if (!i)
1534 mapped_ents = get_nr_mapped_entries(dev, &ref);
1535
1536 if (i >= mapped_ents)
1537 break;
1538
1539 check_sync(dev, &ref, true);
1540 }
1541 }
1542
1543 void debug_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
1544 int nelems, int direction)
1545 {
1546 struct scatterlist *s;
1547 int mapped_ents = 0, i;
1548
1549 if (unlikely(dma_debug_disabled()))
1550 return;
1551
1552 for_each_sg(sg, s, nelems, i) {
1553
1554 struct dma_debug_entry ref = {
1555 .type = dma_debug_sg,
1556 .dev = dev,
1557 .pfn = page_to_pfn(sg_page(s)),
1558 .offset = s->offset,
1559 .dev_addr = sg_dma_address(s),
1560 .size = sg_dma_len(s),
1561 .direction = direction,
1562 .sg_call_ents = nelems,
1563 };
1564 if (!i)
1565 mapped_ents = get_nr_mapped_entries(dev, &ref);
1566
1567 if (i >= mapped_ents)
1568 break;
1569
1570 check_sync(dev, &ref, false);
1571 }
1572 }
1573
1574 static int __init dma_debug_driver_setup(char *str)
1575 {
1576 int i;
1577
1578 for (i = 0; i < NAME_MAX_LEN - 1; ++i, ++str) {
1579 current_driver_name[i] = *str;
1580 if (*str == 0)
1581 break;
1582 }
1583
1584 if (current_driver_name[0])
1585 pr_info("enable driver filter for driver [%s]\n",
1586 current_driver_name);
1587
1588
1589 return 1;
1590 }
1591 __setup("dma_debug_driver=", dma_debug_driver_setup);