2 * Copyright (C) 2008 Advanced Micro Devices, Inc.
4 * Author: Joerg Roedel <joerg.roedel@amd.com>
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 #define pr_fmt(fmt) "DMA-API: " fmt
22 #include <linux/sched/task_stack.h>
23 #include <linux/scatterlist.h>
24 #include <linux/dma-mapping.h>
25 #include <linux/sched/task.h>
26 #include <linux/stacktrace.h>
27 #include <linux/dma-debug.h>
28 #include <linux/spinlock.h>
29 #include <linux/vmalloc.h>
30 #include <linux/debugfs.h>
31 #include <linux/uaccess.h>
32 #include <linux/export.h>
33 #include <linux/device.h>
34 #include <linux/types.h>
35 #include <linux/sched.h>
36 #include <linux/ctype.h>
37 #include <linux/list.h>
38 #include <linux/slab.h>
40 #include <asm/sections.h>
42 #define HASH_SIZE 1024ULL
43 #define HASH_FN_SHIFT 13
44 #define HASH_FN_MASK (HASH_SIZE - 1)
46 #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
47 /* If the pool runs out, add this many new entries at once */
48 #define DMA_DEBUG_DYNAMIC_ENTRIES (PAGE_SIZE / sizeof(struct dma_debug_entry))
58 MAP_ERR_CHECK_NOT_APPLICABLE
,
63 #define DMA_DEBUG_STACKTRACE_ENTRIES 5
66 * struct dma_debug_entry - track a dma_map* or dma_alloc_coherent mapping
67 * @list: node on pre-allocated free_entries list
68 * @dev: 'dev' argument to dma_map_{page|single|sg} or dma_alloc_coherent
69 * @type: single, page, sg, coherent
70 * @pfn: page frame of the start address
71 * @offset: offset of mapping relative to pfn
72 * @size: length of the mapping
73 * @direction: enum dma_data_direction
74 * @sg_call_ents: 'nents' from dma_map_sg
75 * @sg_mapped_ents: 'mapped_ents' from dma_map_sg
76 * @map_err_type: track whether dma_mapping_error() was checked
77 * @stacktrace: support backtraces when a violation is detected
79 struct dma_debug_entry
{
80 struct list_head list
;
90 enum map_err_types map_err_type
;
91 #ifdef CONFIG_STACKTRACE
92 struct stack_trace stacktrace
;
93 unsigned long st_entries
[DMA_DEBUG_STACKTRACE_ENTRIES
];
97 typedef bool (*match_fn
)(struct dma_debug_entry
*, struct dma_debug_entry
*);
100 struct list_head list
;
102 } ____cacheline_aligned_in_smp
;
104 /* Hash list to save the allocated dma addresses */
105 static struct hash_bucket dma_entry_hash
[HASH_SIZE
];
106 /* List of pre-allocated dma_debug_entry's */
107 static LIST_HEAD(free_entries
);
108 /* Lock for the list above */
109 static DEFINE_SPINLOCK(free_entries_lock
);
111 /* Global disable flag - will be set in case of an error */
112 static bool global_disable __read_mostly
;
114 /* Early initialization disable flag, set at the end of dma_debug_init */
115 static bool dma_debug_initialized __read_mostly
;
117 static inline bool dma_debug_disabled(void)
119 return global_disable
|| !dma_debug_initialized
;
122 /* Global error count */
123 static u32 error_count
;
125 /* Global error show enable*/
126 static u32 show_all_errors __read_mostly
;
127 /* Number of errors to show */
128 static u32 show_num_errors
= 1;
130 static u32 num_free_entries
;
131 static u32 min_free_entries
;
132 static u32 nr_total_entries
;
134 /* number of preallocated entries requested by kernel cmdline */
135 static u32 nr_prealloc_entries
= PREALLOC_DMA_DEBUG_ENTRIES
;
137 /* debugfs dentry's for the stuff above */
138 static struct dentry
*dma_debug_dent __read_mostly
;
139 static struct dentry
*global_disable_dent __read_mostly
;
140 static struct dentry
*error_count_dent __read_mostly
;
141 static struct dentry
*show_all_errors_dent __read_mostly
;
142 static struct dentry
*show_num_errors_dent __read_mostly
;
143 static struct dentry
*num_free_entries_dent __read_mostly
;
144 static struct dentry
*min_free_entries_dent __read_mostly
;
145 static struct dentry
*nr_total_entries_dent __read_mostly
;
146 static struct dentry
*filter_dent __read_mostly
;
148 /* per-driver filter related state */
150 #define NAME_MAX_LEN 64
152 static char current_driver_name
[NAME_MAX_LEN
] __read_mostly
;
153 static struct device_driver
*current_driver __read_mostly
;
155 static DEFINE_RWLOCK(driver_name_lock
);
157 static const char *const maperr2str
[] = {
158 [MAP_ERR_CHECK_NOT_APPLICABLE
] = "dma map error check not applicable",
159 [MAP_ERR_NOT_CHECKED
] = "dma map error not checked",
160 [MAP_ERR_CHECKED
] = "dma map error checked",
163 static const char *type2name
[5] = { "single", "page",
164 "scather-gather", "coherent",
167 static const char *dir2name
[4] = { "DMA_BIDIRECTIONAL", "DMA_TO_DEVICE",
168 "DMA_FROM_DEVICE", "DMA_NONE" };
171 * The access to some variables in this macro is racy. We can't use atomic_t
172 * here because all these variables are exported to debugfs. Some of them even
173 * writeable. This is also the reason why a lock won't help much. But anyway,
174 * the races are no big deal. Here is why:
176 * error_count: the addition is racy, but the worst thing that can happen is
177 * that we don't count some errors
178 * show_num_errors: the subtraction is racy. Also no big deal because in
179 * worst case this will result in one warning more in the
180 * system log than the user configured. This variable is
181 * writeable via debugfs.
183 static inline void dump_entry_trace(struct dma_debug_entry
*entry
)
185 #ifdef CONFIG_STACKTRACE
187 pr_warning("Mapped at:\n");
188 print_stack_trace(&entry
->stacktrace
, 0);
193 static bool driver_filter(struct device
*dev
)
195 struct device_driver
*drv
;
199 /* driver filter off */
200 if (likely(!current_driver_name
[0]))
203 /* driver filter on and initialized */
204 if (current_driver
&& dev
&& dev
->driver
== current_driver
)
207 /* driver filter on, but we can't filter on a NULL device... */
211 if (current_driver
|| !current_driver_name
[0])
214 /* driver filter on but not yet initialized */
219 /* lock to protect against change of current_driver_name */
220 read_lock_irqsave(&driver_name_lock
, flags
);
224 strncmp(current_driver_name
, drv
->name
, NAME_MAX_LEN
- 1) == 0) {
225 current_driver
= drv
;
229 read_unlock_irqrestore(&driver_name_lock
, flags
);
234 #define err_printk(dev, entry, format, arg...) do { \
236 if (driver_filter(dev) && \
237 (show_all_errors || show_num_errors > 0)) { \
238 WARN(1, pr_fmt("%s %s: ") format, \
239 dev ? dev_driver_string(dev) : "NULL", \
240 dev ? dev_name(dev) : "NULL", ## arg); \
241 dump_entry_trace(entry); \
243 if (!show_all_errors && show_num_errors > 0) \
244 show_num_errors -= 1; \
248 * Hash related functions
250 * Every DMA-API request is saved into a struct dma_debug_entry. To
251 * have quick access to these structs they are stored into a hash.
253 static int hash_fn(struct dma_debug_entry
*entry
)
256 * Hash function is based on the dma address.
257 * We use bits 20-27 here as the index into the hash
259 return (entry
->dev_addr
>> HASH_FN_SHIFT
) & HASH_FN_MASK
;
263 * Request exclusive access to a hash bucket for a given dma_debug_entry.
265 static struct hash_bucket
*get_hash_bucket(struct dma_debug_entry
*entry
,
266 unsigned long *flags
)
267 __acquires(&dma_entry_hash
[idx
].lock
)
269 int idx
= hash_fn(entry
);
270 unsigned long __flags
;
272 spin_lock_irqsave(&dma_entry_hash
[idx
].lock
, __flags
);
274 return &dma_entry_hash
[idx
];
278 * Give up exclusive access to the hash bucket
280 static void put_hash_bucket(struct hash_bucket
*bucket
,
281 unsigned long *flags
)
282 __releases(&bucket
->lock
)
284 unsigned long __flags
= *flags
;
286 spin_unlock_irqrestore(&bucket
->lock
, __flags
);
289 static bool exact_match(struct dma_debug_entry
*a
, struct dma_debug_entry
*b
)
291 return ((a
->dev_addr
== b
->dev_addr
) &&
292 (a
->dev
== b
->dev
)) ? true : false;
295 static bool containing_match(struct dma_debug_entry
*a
,
296 struct dma_debug_entry
*b
)
298 if (a
->dev
!= b
->dev
)
301 if ((b
->dev_addr
<= a
->dev_addr
) &&
302 ((b
->dev_addr
+ b
->size
) >= (a
->dev_addr
+ a
->size
)))
309 * Search a given entry in the hash bucket list
311 static struct dma_debug_entry
*__hash_bucket_find(struct hash_bucket
*bucket
,
312 struct dma_debug_entry
*ref
,
315 struct dma_debug_entry
*entry
, *ret
= NULL
;
316 int matches
= 0, match_lvl
, last_lvl
= -1;
318 list_for_each_entry(entry
, &bucket
->list
, list
) {
319 if (!match(ref
, entry
))
323 * Some drivers map the same physical address multiple
324 * times. Without a hardware IOMMU this results in the
325 * same device addresses being put into the dma-debug
326 * hash multiple times too. This can result in false
327 * positives being reported. Therefore we implement a
328 * best-fit algorithm here which returns the entry from
329 * the hash which fits best to the reference value
330 * instead of the first-fit.
334 entry
->size
== ref
->size
? ++match_lvl
: 0;
335 entry
->type
== ref
->type
? ++match_lvl
: 0;
336 entry
->direction
== ref
->direction
? ++match_lvl
: 0;
337 entry
->sg_call_ents
== ref
->sg_call_ents
? ++match_lvl
: 0;
339 if (match_lvl
== 4) {
340 /* perfect-fit - return the result */
342 } else if (match_lvl
> last_lvl
) {
344 * We found an entry that fits better then the
345 * previous one or it is the 1st match.
347 last_lvl
= match_lvl
;
353 * If we have multiple matches but no perfect-fit, just return
356 ret
= (matches
== 1) ? ret
: NULL
;
361 static struct dma_debug_entry
*bucket_find_exact(struct hash_bucket
*bucket
,
362 struct dma_debug_entry
*ref
)
364 return __hash_bucket_find(bucket
, ref
, exact_match
);
367 static struct dma_debug_entry
*bucket_find_contain(struct hash_bucket
**bucket
,
368 struct dma_debug_entry
*ref
,
369 unsigned long *flags
)
372 unsigned int max_range
= dma_get_max_seg_size(ref
->dev
);
373 struct dma_debug_entry
*entry
, index
= *ref
;
374 unsigned int range
= 0;
376 while (range
<= max_range
) {
377 entry
= __hash_bucket_find(*bucket
, ref
, containing_match
);
383 * Nothing found, go back a hash bucket
385 put_hash_bucket(*bucket
, flags
);
386 range
+= (1 << HASH_FN_SHIFT
);
387 index
.dev_addr
-= (1 << HASH_FN_SHIFT
);
388 *bucket
= get_hash_bucket(&index
, flags
);
395 * Add an entry to a hash bucket
397 static void hash_bucket_add(struct hash_bucket
*bucket
,
398 struct dma_debug_entry
*entry
)
400 list_add_tail(&entry
->list
, &bucket
->list
);
404 * Remove entry from a hash bucket list
406 static void hash_bucket_del(struct dma_debug_entry
*entry
)
408 list_del(&entry
->list
);
411 static unsigned long long phys_addr(struct dma_debug_entry
*entry
)
413 if (entry
->type
== dma_debug_resource
)
414 return __pfn_to_phys(entry
->pfn
) + entry
->offset
;
416 return page_to_phys(pfn_to_page(entry
->pfn
)) + entry
->offset
;
420 * Dump mapping entries for debugging purposes
422 void debug_dma_dump_mappings(struct device
*dev
)
426 for (idx
= 0; idx
< HASH_SIZE
; idx
++) {
427 struct hash_bucket
*bucket
= &dma_entry_hash
[idx
];
428 struct dma_debug_entry
*entry
;
431 spin_lock_irqsave(&bucket
->lock
, flags
);
433 list_for_each_entry(entry
, &bucket
->list
, list
) {
434 if (!dev
|| dev
== entry
->dev
) {
436 "%s idx %d P=%Lx N=%lx D=%Lx L=%Lx %s %s\n",
437 type2name
[entry
->type
], idx
,
438 phys_addr(entry
), entry
->pfn
,
439 entry
->dev_addr
, entry
->size
,
440 dir2name
[entry
->direction
],
441 maperr2str
[entry
->map_err_type
]);
445 spin_unlock_irqrestore(&bucket
->lock
, flags
);
450 * For each mapping (initial cacheline in the case of
451 * dma_alloc_coherent/dma_map_page, initial cacheline in each page of a
452 * scatterlist, or the cacheline specified in dma_map_single) insert
453 * into this tree using the cacheline as the key. At
454 * dma_unmap_{single|sg|page} or dma_free_coherent delete the entry. If
455 * the entry already exists at insertion time add a tag as a reference
456 * count for the overlapping mappings. For now, the overlap tracking
457 * just ensures that 'unmaps' balance 'maps' before marking the
458 * cacheline idle, but we should also be flagging overlaps as an API
461 * Memory usage is mostly constrained by the maximum number of available
462 * dma-debug entries in that we need a free dma_debug_entry before
463 * inserting into the tree. In the case of dma_map_page and
464 * dma_alloc_coherent there is only one dma_debug_entry and one
465 * dma_active_cacheline entry to track per event. dma_map_sg(), on the
466 * other hand, consumes a single dma_debug_entry, but inserts 'nents'
467 * entries into the tree.
469 * At any time debug_dma_assert_idle() can be called to trigger a
470 * warning if any cachelines in the given page are in the active set.
472 static RADIX_TREE(dma_active_cacheline
, GFP_NOWAIT
);
473 static DEFINE_SPINLOCK(radix_lock
);
474 #define ACTIVE_CACHELINE_MAX_OVERLAP ((1 << RADIX_TREE_MAX_TAGS) - 1)
475 #define CACHELINE_PER_PAGE_SHIFT (PAGE_SHIFT - L1_CACHE_SHIFT)
476 #define CACHELINES_PER_PAGE (1 << CACHELINE_PER_PAGE_SHIFT)
478 static phys_addr_t
to_cacheline_number(struct dma_debug_entry
*entry
)
480 return (entry
->pfn
<< CACHELINE_PER_PAGE_SHIFT
) +
481 (entry
->offset
>> L1_CACHE_SHIFT
);
484 static int active_cacheline_read_overlap(phys_addr_t cln
)
488 for (i
= RADIX_TREE_MAX_TAGS
- 1; i
>= 0; i
--)
489 if (radix_tree_tag_get(&dma_active_cacheline
, cln
, i
))
494 static int active_cacheline_set_overlap(phys_addr_t cln
, int overlap
)
498 if (overlap
> ACTIVE_CACHELINE_MAX_OVERLAP
|| overlap
< 0)
501 for (i
= RADIX_TREE_MAX_TAGS
- 1; i
>= 0; i
--)
502 if (overlap
& 1 << i
)
503 radix_tree_tag_set(&dma_active_cacheline
, cln
, i
);
505 radix_tree_tag_clear(&dma_active_cacheline
, cln
, i
);
510 static void active_cacheline_inc_overlap(phys_addr_t cln
)
512 int overlap
= active_cacheline_read_overlap(cln
);
514 overlap
= active_cacheline_set_overlap(cln
, ++overlap
);
516 /* If we overflowed the overlap counter then we're potentially
517 * leaking dma-mappings. Otherwise, if maps and unmaps are
518 * balanced then this overflow may cause false negatives in
519 * debug_dma_assert_idle() as the cacheline may be marked idle
522 WARN_ONCE(overlap
> ACTIVE_CACHELINE_MAX_OVERLAP
,
523 pr_fmt("exceeded %d overlapping mappings of cacheline %pa\n"),
524 ACTIVE_CACHELINE_MAX_OVERLAP
, &cln
);
527 static int active_cacheline_dec_overlap(phys_addr_t cln
)
529 int overlap
= active_cacheline_read_overlap(cln
);
531 return active_cacheline_set_overlap(cln
, --overlap
);
534 static int active_cacheline_insert(struct dma_debug_entry
*entry
)
536 phys_addr_t cln
= to_cacheline_number(entry
);
540 /* If the device is not writing memory then we don't have any
541 * concerns about the cpu consuming stale data. This mitigates
542 * legitimate usages of overlapping mappings.
544 if (entry
->direction
== DMA_TO_DEVICE
)
547 spin_lock_irqsave(&radix_lock
, flags
);
548 rc
= radix_tree_insert(&dma_active_cacheline
, cln
, entry
);
550 active_cacheline_inc_overlap(cln
);
551 spin_unlock_irqrestore(&radix_lock
, flags
);
556 static void active_cacheline_remove(struct dma_debug_entry
*entry
)
558 phys_addr_t cln
= to_cacheline_number(entry
);
561 /* ...mirror the insert case */
562 if (entry
->direction
== DMA_TO_DEVICE
)
565 spin_lock_irqsave(&radix_lock
, flags
);
566 /* since we are counting overlaps the final put of the
567 * cacheline will occur when the overlap count is 0.
568 * active_cacheline_dec_overlap() returns -1 in that case
570 if (active_cacheline_dec_overlap(cln
) < 0)
571 radix_tree_delete(&dma_active_cacheline
, cln
);
572 spin_unlock_irqrestore(&radix_lock
, flags
);
576 * debug_dma_assert_idle() - assert that a page is not undergoing dma
577 * @page: page to lookup in the dma_active_cacheline tree
579 * Place a call to this routine in cases where the cpu touching the page
580 * before the dma completes (page is dma_unmapped) will lead to data
583 void debug_dma_assert_idle(struct page
*page
)
585 static struct dma_debug_entry
*ents
[CACHELINES_PER_PAGE
];
586 struct dma_debug_entry
*entry
= NULL
;
587 void **results
= (void **) &ents
;
588 unsigned int nents
, i
;
592 if (dma_debug_disabled())
598 cln
= (phys_addr_t
) page_to_pfn(page
) << CACHELINE_PER_PAGE_SHIFT
;
599 spin_lock_irqsave(&radix_lock
, flags
);
600 nents
= radix_tree_gang_lookup(&dma_active_cacheline
, results
, cln
,
601 CACHELINES_PER_PAGE
);
602 for (i
= 0; i
< nents
; i
++) {
603 phys_addr_t ent_cln
= to_cacheline_number(ents
[i
]);
605 if (ent_cln
== cln
) {
608 } else if (ent_cln
>= cln
+ CACHELINES_PER_PAGE
)
611 spin_unlock_irqrestore(&radix_lock
, flags
);
616 cln
= to_cacheline_number(entry
);
617 err_printk(entry
->dev
, entry
,
618 "cpu touching an active dma mapped cacheline [cln=%pa]\n",
623 * Wrapper function for adding an entry to the hash.
624 * This function takes care of locking itself.
626 static void add_dma_entry(struct dma_debug_entry
*entry
)
628 struct hash_bucket
*bucket
;
632 bucket
= get_hash_bucket(entry
, &flags
);
633 hash_bucket_add(bucket
, entry
);
634 put_hash_bucket(bucket
, &flags
);
636 rc
= active_cacheline_insert(entry
);
638 pr_err("cacheline tracking ENOMEM, dma-debug disabled\n");
639 global_disable
= true;
642 /* TODO: report -EEXIST errors here as overlapping mappings are
643 * not supported by the DMA API
647 static int dma_debug_create_entries(gfp_t gfp
)
649 struct dma_debug_entry
*entry
;
652 entry
= (void *)get_zeroed_page(gfp
);
656 for (i
= 0; i
< DMA_DEBUG_DYNAMIC_ENTRIES
; i
++)
657 list_add_tail(&entry
[i
].list
, &free_entries
);
659 num_free_entries
+= DMA_DEBUG_DYNAMIC_ENTRIES
;
660 nr_total_entries
+= DMA_DEBUG_DYNAMIC_ENTRIES
;
665 static struct dma_debug_entry
*__dma_entry_alloc(void)
667 struct dma_debug_entry
*entry
;
669 entry
= list_entry(free_entries
.next
, struct dma_debug_entry
, list
);
670 list_del(&entry
->list
);
671 memset(entry
, 0, sizeof(*entry
));
673 num_free_entries
-= 1;
674 if (num_free_entries
< min_free_entries
)
675 min_free_entries
= num_free_entries
;
680 void __dma_entry_alloc_check_leak(void)
682 u32 tmp
= nr_total_entries
% nr_prealloc_entries
;
684 /* Shout each time we tick over some multiple of the initial pool */
685 if (tmp
< DMA_DEBUG_DYNAMIC_ENTRIES
) {
686 pr_info("dma_debug_entry pool grown to %u (%u00%%)\n",
688 (nr_total_entries
/ nr_prealloc_entries
));
692 /* struct dma_entry allocator
694 * The next two functions implement the allocator for
695 * struct dma_debug_entries.
697 static struct dma_debug_entry
*dma_entry_alloc(void)
699 struct dma_debug_entry
*entry
;
702 spin_lock_irqsave(&free_entries_lock
, flags
);
703 if (num_free_entries
== 0) {
704 if (dma_debug_create_entries(GFP_ATOMIC
)) {
705 global_disable
= true;
706 spin_unlock_irqrestore(&free_entries_lock
, flags
);
707 pr_err("debugging out of memory - disabling\n");
710 __dma_entry_alloc_check_leak();
713 entry
= __dma_entry_alloc();
715 spin_unlock_irqrestore(&free_entries_lock
, flags
);
717 #ifdef CONFIG_STACKTRACE
718 entry
->stacktrace
.max_entries
= DMA_DEBUG_STACKTRACE_ENTRIES
;
719 entry
->stacktrace
.entries
= entry
->st_entries
;
720 entry
->stacktrace
.skip
= 2;
721 save_stack_trace(&entry
->stacktrace
);
727 static void dma_entry_free(struct dma_debug_entry
*entry
)
731 active_cacheline_remove(entry
);
734 * add to beginning of the list - this way the entries are
735 * more likely cache hot when they are reallocated.
737 spin_lock_irqsave(&free_entries_lock
, flags
);
738 list_add(&entry
->list
, &free_entries
);
739 num_free_entries
+= 1;
740 spin_unlock_irqrestore(&free_entries_lock
, flags
);
744 * DMA-API debugging init code
746 * The init code does two things:
747 * 1. Initialize core data structures
748 * 2. Preallocate a given number of dma_debug_entry structs
751 static ssize_t
filter_read(struct file
*file
, char __user
*user_buf
,
752 size_t count
, loff_t
*ppos
)
754 char buf
[NAME_MAX_LEN
+ 1];
758 if (!current_driver_name
[0])
762 * We can't copy to userspace directly because current_driver_name can
763 * only be read under the driver_name_lock with irqs disabled. So
764 * create a temporary copy first.
766 read_lock_irqsave(&driver_name_lock
, flags
);
767 len
= scnprintf(buf
, NAME_MAX_LEN
+ 1, "%s\n", current_driver_name
);
768 read_unlock_irqrestore(&driver_name_lock
, flags
);
770 return simple_read_from_buffer(user_buf
, count
, ppos
, buf
, len
);
773 static ssize_t
filter_write(struct file
*file
, const char __user
*userbuf
,
774 size_t count
, loff_t
*ppos
)
776 char buf
[NAME_MAX_LEN
];
782 * We can't copy from userspace directly. Access to
783 * current_driver_name is protected with a write_lock with irqs
784 * disabled. Since copy_from_user can fault and may sleep we
785 * need to copy to temporary buffer first
787 len
= min(count
, (size_t)(NAME_MAX_LEN
- 1));
788 if (copy_from_user(buf
, userbuf
, len
))
793 write_lock_irqsave(&driver_name_lock
, flags
);
796 * Now handle the string we got from userspace very carefully.
798 * - only use the first token we got
799 * - token delimiter is everything looking like a space
800 * character (' ', '\n', '\t' ...)
803 if (!isalnum(buf
[0])) {
805 * If the first character userspace gave us is not
806 * alphanumerical then assume the filter should be
809 if (current_driver_name
[0])
810 pr_info("switching off dma-debug driver filter\n");
811 current_driver_name
[0] = 0;
812 current_driver
= NULL
;
817 * Now parse out the first token and use it as the name for the
818 * driver to filter for.
820 for (i
= 0; i
< NAME_MAX_LEN
- 1; ++i
) {
821 current_driver_name
[i
] = buf
[i
];
822 if (isspace(buf
[i
]) || buf
[i
] == ' ' || buf
[i
] == 0)
825 current_driver_name
[i
] = 0;
826 current_driver
= NULL
;
828 pr_info("enable driver filter for driver [%s]\n",
829 current_driver_name
);
832 write_unlock_irqrestore(&driver_name_lock
, flags
);
837 static const struct file_operations filter_fops
= {
839 .write
= filter_write
,
840 .llseek
= default_llseek
,
843 static int dma_debug_fs_init(void)
845 dma_debug_dent
= debugfs_create_dir("dma-api", NULL
);
846 if (!dma_debug_dent
) {
847 pr_err("can not create debugfs directory\n");
851 global_disable_dent
= debugfs_create_bool("disabled", 0444,
854 if (!global_disable_dent
)
857 error_count_dent
= debugfs_create_u32("error_count", 0444,
858 dma_debug_dent
, &error_count
);
859 if (!error_count_dent
)
862 show_all_errors_dent
= debugfs_create_u32("all_errors", 0644,
865 if (!show_all_errors_dent
)
868 show_num_errors_dent
= debugfs_create_u32("num_errors", 0644,
871 if (!show_num_errors_dent
)
874 num_free_entries_dent
= debugfs_create_u32("num_free_entries", 0444,
877 if (!num_free_entries_dent
)
880 min_free_entries_dent
= debugfs_create_u32("min_free_entries", 0444,
883 if (!min_free_entries_dent
)
886 nr_total_entries_dent
= debugfs_create_u32("nr_total_entries", 0444,
889 if (!nr_total_entries_dent
)
892 filter_dent
= debugfs_create_file("driver_filter", 0644,
893 dma_debug_dent
, NULL
, &filter_fops
);
900 debugfs_remove_recursive(dma_debug_dent
);
905 static int device_dma_allocations(struct device
*dev
, struct dma_debug_entry
**out_entry
)
907 struct dma_debug_entry
*entry
;
911 for (i
= 0; i
< HASH_SIZE
; ++i
) {
912 spin_lock_irqsave(&dma_entry_hash
[i
].lock
, flags
);
913 list_for_each_entry(entry
, &dma_entry_hash
[i
].list
, list
) {
914 if (entry
->dev
== dev
) {
919 spin_unlock_irqrestore(&dma_entry_hash
[i
].lock
, flags
);
925 static int dma_debug_device_change(struct notifier_block
*nb
, unsigned long action
, void *data
)
927 struct device
*dev
= data
;
928 struct dma_debug_entry
*uninitialized_var(entry
);
931 if (dma_debug_disabled())
935 case BUS_NOTIFY_UNBOUND_DRIVER
:
936 count
= device_dma_allocations(dev
, &entry
);
939 err_printk(dev
, entry
, "device driver has pending "
940 "DMA allocations while released from device "
942 "One of leaked entries details: "
943 "[device address=0x%016llx] [size=%llu bytes] "
944 "[mapped with %s] [mapped as %s]\n",
945 count
, entry
->dev_addr
, entry
->size
,
946 dir2name
[entry
->direction
], type2name
[entry
->type
]);
955 void dma_debug_add_bus(struct bus_type
*bus
)
957 struct notifier_block
*nb
;
959 if (dma_debug_disabled())
962 nb
= kzalloc(sizeof(struct notifier_block
), GFP_KERNEL
);
964 pr_err("dma_debug_add_bus: out of memory\n");
968 nb
->notifier_call
= dma_debug_device_change
;
970 bus_register_notifier(bus
, nb
);
973 static int dma_debug_init(void)
977 /* Do not use dma_debug_initialized here, since we really want to be
978 * called to set dma_debug_initialized
983 for (i
= 0; i
< HASH_SIZE
; ++i
) {
984 INIT_LIST_HEAD(&dma_entry_hash
[i
].list
);
985 spin_lock_init(&dma_entry_hash
[i
].lock
);
988 if (dma_debug_fs_init() != 0) {
989 pr_err("error creating debugfs entries - disabling\n");
990 global_disable
= true;
995 nr_pages
= DIV_ROUND_UP(nr_prealloc_entries
, DMA_DEBUG_DYNAMIC_ENTRIES
);
996 for (i
= 0; i
< nr_pages
; ++i
)
997 dma_debug_create_entries(GFP_KERNEL
);
998 if (num_free_entries
>= nr_prealloc_entries
) {
999 pr_info("preallocated %d debug entries\n", nr_total_entries
);
1000 } else if (num_free_entries
> 0) {
1001 pr_warn("%d debug entries requested but only %d allocated\n",
1002 nr_prealloc_entries
, nr_total_entries
);
1004 pr_err("debugging out of memory error - disabled\n");
1005 global_disable
= true;
1009 min_free_entries
= num_free_entries
;
1011 dma_debug_initialized
= true;
1013 pr_info("debugging enabled by kernel config\n");
1016 core_initcall(dma_debug_init
);
1018 static __init
int dma_debug_cmdline(char *str
)
1023 if (strncmp(str
, "off", 3) == 0) {
1024 pr_info("debugging disabled on kernel command line\n");
1025 global_disable
= true;
1031 static __init
int dma_debug_entries_cmdline(char *str
)
1035 if (!get_option(&str
, &nr_prealloc_entries
))
1036 nr_prealloc_entries
= PREALLOC_DMA_DEBUG_ENTRIES
;
1040 __setup("dma_debug=", dma_debug_cmdline
);
1041 __setup("dma_debug_entries=", dma_debug_entries_cmdline
);
1043 static void check_unmap(struct dma_debug_entry
*ref
)
1045 struct dma_debug_entry
*entry
;
1046 struct hash_bucket
*bucket
;
1047 unsigned long flags
;
1049 bucket
= get_hash_bucket(ref
, &flags
);
1050 entry
= bucket_find_exact(bucket
, ref
);
1053 /* must drop lock before calling dma_mapping_error */
1054 put_hash_bucket(bucket
, &flags
);
1056 if (dma_mapping_error(ref
->dev
, ref
->dev_addr
)) {
1057 err_printk(ref
->dev
, NULL
,
1058 "device driver tries to free an "
1059 "invalid DMA memory address\n");
1061 err_printk(ref
->dev
, NULL
,
1062 "device driver tries to free DMA "
1063 "memory it has not allocated [device "
1064 "address=0x%016llx] [size=%llu bytes]\n",
1065 ref
->dev_addr
, ref
->size
);
1070 if (ref
->size
!= entry
->size
) {
1071 err_printk(ref
->dev
, entry
, "device driver frees "
1072 "DMA memory with different size "
1073 "[device address=0x%016llx] [map size=%llu bytes] "
1074 "[unmap size=%llu bytes]\n",
1075 ref
->dev_addr
, entry
->size
, ref
->size
);
1078 if (ref
->type
!= entry
->type
) {
1079 err_printk(ref
->dev
, entry
, "device driver frees "
1080 "DMA memory with wrong function "
1081 "[device address=0x%016llx] [size=%llu bytes] "
1082 "[mapped as %s] [unmapped as %s]\n",
1083 ref
->dev_addr
, ref
->size
,
1084 type2name
[entry
->type
], type2name
[ref
->type
]);
1085 } else if ((entry
->type
== dma_debug_coherent
) &&
1086 (phys_addr(ref
) != phys_addr(entry
))) {
1087 err_printk(ref
->dev
, entry
, "device driver frees "
1088 "DMA memory with different CPU address "
1089 "[device address=0x%016llx] [size=%llu bytes] "
1090 "[cpu alloc address=0x%016llx] "
1091 "[cpu free address=0x%016llx]",
1092 ref
->dev_addr
, ref
->size
,
1097 if (ref
->sg_call_ents
&& ref
->type
== dma_debug_sg
&&
1098 ref
->sg_call_ents
!= entry
->sg_call_ents
) {
1099 err_printk(ref
->dev
, entry
, "device driver frees "
1100 "DMA sg list with different entry count "
1101 "[map count=%d] [unmap count=%d]\n",
1102 entry
->sg_call_ents
, ref
->sg_call_ents
);
1106 * This may be no bug in reality - but most implementations of the
1107 * DMA API don't handle this properly, so check for it here
1109 if (ref
->direction
!= entry
->direction
) {
1110 err_printk(ref
->dev
, entry
, "device driver frees "
1111 "DMA memory with different direction "
1112 "[device address=0x%016llx] [size=%llu bytes] "
1113 "[mapped with %s] [unmapped with %s]\n",
1114 ref
->dev_addr
, ref
->size
,
1115 dir2name
[entry
->direction
],
1116 dir2name
[ref
->direction
]);
1120 * Drivers should use dma_mapping_error() to check the returned
1121 * addresses of dma_map_single() and dma_map_page().
1122 * If not, print this warning message. See Documentation/DMA-API.txt.
1124 if (entry
->map_err_type
== MAP_ERR_NOT_CHECKED
) {
1125 err_printk(ref
->dev
, entry
,
1126 "device driver failed to check map error"
1127 "[device address=0x%016llx] [size=%llu bytes] "
1129 ref
->dev_addr
, ref
->size
,
1130 type2name
[entry
->type
]);
1133 hash_bucket_del(entry
);
1134 dma_entry_free(entry
);
1136 put_hash_bucket(bucket
, &flags
);
1139 static void check_for_stack(struct device
*dev
,
1140 struct page
*page
, size_t offset
)
1143 struct vm_struct
*stack_vm_area
= task_stack_vm_area(current
);
1145 if (!stack_vm_area
) {
1146 /* Stack is direct-mapped. */
1147 if (PageHighMem(page
))
1149 addr
= page_address(page
) + offset
;
1150 if (object_is_on_stack(addr
))
1151 err_printk(dev
, NULL
, "device driver maps memory from stack [addr=%p]\n", addr
);
1153 /* Stack is vmalloced. */
1156 for (i
= 0; i
< stack_vm_area
->nr_pages
; i
++) {
1157 if (page
!= stack_vm_area
->pages
[i
])
1160 addr
= (u8
*)current
->stack
+ i
* PAGE_SIZE
+ offset
;
1161 err_printk(dev
, NULL
, "device driver maps memory from stack [probable addr=%p]\n", addr
);
1167 static inline bool overlap(void *addr
, unsigned long len
, void *start
, void *end
)
1169 unsigned long a1
= (unsigned long)addr
;
1170 unsigned long b1
= a1
+ len
;
1171 unsigned long a2
= (unsigned long)start
;
1172 unsigned long b2
= (unsigned long)end
;
1174 return !(b1
<= a2
|| a1
>= b2
);
1177 static void check_for_illegal_area(struct device
*dev
, void *addr
, unsigned long len
)
1179 if (overlap(addr
, len
, _stext
, _etext
) ||
1180 overlap(addr
, len
, __start_rodata
, __end_rodata
))
1181 err_printk(dev
, NULL
, "device driver maps memory from kernel text or rodata [addr=%p] [len=%lu]\n", addr
, len
);
1184 static void check_sync(struct device
*dev
,
1185 struct dma_debug_entry
*ref
,
1188 struct dma_debug_entry
*entry
;
1189 struct hash_bucket
*bucket
;
1190 unsigned long flags
;
1192 bucket
= get_hash_bucket(ref
, &flags
);
1194 entry
= bucket_find_contain(&bucket
, ref
, &flags
);
1197 err_printk(dev
, NULL
, "device driver tries "
1198 "to sync DMA memory it has not allocated "
1199 "[device address=0x%016llx] [size=%llu bytes]\n",
1200 (unsigned long long)ref
->dev_addr
, ref
->size
);
1204 if (ref
->size
> entry
->size
) {
1205 err_printk(dev
, entry
, "device driver syncs"
1206 " DMA memory outside allocated range "
1207 "[device address=0x%016llx] "
1208 "[allocation size=%llu bytes] "
1209 "[sync offset+size=%llu]\n",
1210 entry
->dev_addr
, entry
->size
,
1214 if (entry
->direction
== DMA_BIDIRECTIONAL
)
1217 if (ref
->direction
!= entry
->direction
) {
1218 err_printk(dev
, entry
, "device driver syncs "
1219 "DMA memory with different direction "
1220 "[device address=0x%016llx] [size=%llu bytes] "
1221 "[mapped with %s] [synced with %s]\n",
1222 (unsigned long long)ref
->dev_addr
, entry
->size
,
1223 dir2name
[entry
->direction
],
1224 dir2name
[ref
->direction
]);
1227 if (to_cpu
&& !(entry
->direction
== DMA_FROM_DEVICE
) &&
1228 !(ref
->direction
== DMA_TO_DEVICE
))
1229 err_printk(dev
, entry
, "device driver syncs "
1230 "device read-only DMA memory for cpu "
1231 "[device address=0x%016llx] [size=%llu bytes] "
1232 "[mapped with %s] [synced with %s]\n",
1233 (unsigned long long)ref
->dev_addr
, entry
->size
,
1234 dir2name
[entry
->direction
],
1235 dir2name
[ref
->direction
]);
1237 if (!to_cpu
&& !(entry
->direction
== DMA_TO_DEVICE
) &&
1238 !(ref
->direction
== DMA_FROM_DEVICE
))
1239 err_printk(dev
, entry
, "device driver syncs "
1240 "device write-only DMA memory to device "
1241 "[device address=0x%016llx] [size=%llu bytes] "
1242 "[mapped with %s] [synced with %s]\n",
1243 (unsigned long long)ref
->dev_addr
, entry
->size
,
1244 dir2name
[entry
->direction
],
1245 dir2name
[ref
->direction
]);
1247 if (ref
->sg_call_ents
&& ref
->type
== dma_debug_sg
&&
1248 ref
->sg_call_ents
!= entry
->sg_call_ents
) {
1249 err_printk(ref
->dev
, entry
, "device driver syncs "
1250 "DMA sg list with different entry count "
1251 "[map count=%d] [sync count=%d]\n",
1252 entry
->sg_call_ents
, ref
->sg_call_ents
);
1256 put_hash_bucket(bucket
, &flags
);
1259 static void check_sg_segment(struct device
*dev
, struct scatterlist
*sg
)
1261 #ifdef CONFIG_DMA_API_DEBUG_SG
1262 unsigned int max_seg
= dma_get_max_seg_size(dev
);
1263 u64 start
, end
, boundary
= dma_get_seg_boundary(dev
);
1266 * Either the driver forgot to set dma_parms appropriately, or
1267 * whoever generated the list forgot to check them.
1269 if (sg
->length
> max_seg
)
1270 err_printk(dev
, NULL
, "mapping sg segment longer than device claims to support [len=%u] [max=%u]\n",
1271 sg
->length
, max_seg
);
1273 * In some cases this could potentially be the DMA API
1274 * implementation's fault, but it would usually imply that
1275 * the scatterlist was built inappropriately to begin with.
1277 start
= sg_dma_address(sg
);
1278 end
= start
+ sg_dma_len(sg
) - 1;
1279 if ((start
^ end
) & ~boundary
)
1280 err_printk(dev
, NULL
, "mapping sg segment across boundary [start=0x%016llx] [end=0x%016llx] [boundary=0x%016llx]\n",
1281 start
, end
, boundary
);
1285 void debug_dma_map_single(struct device
*dev
, const void *addr
,
1288 if (unlikely(dma_debug_disabled()))
1291 if (!virt_addr_valid(addr
))
1292 err_printk(dev
, NULL
, "device driver maps memory from invalid area [addr=%p] [len=%lu]\n",
1295 if (is_vmalloc_addr(addr
))
1296 err_printk(dev
, NULL
, "device driver maps memory from vmalloc area [addr=%p] [len=%lu]\n",
1299 EXPORT_SYMBOL(debug_dma_map_single
);
1301 void debug_dma_map_page(struct device
*dev
, struct page
*page
, size_t offset
,
1302 size_t size
, int direction
, dma_addr_t dma_addr
)
1304 struct dma_debug_entry
*entry
;
1306 if (unlikely(dma_debug_disabled()))
1309 if (dma_mapping_error(dev
, dma_addr
))
1312 entry
= dma_entry_alloc();
1317 entry
->type
= dma_debug_single
;
1318 entry
->pfn
= page_to_pfn(page
);
1319 entry
->offset
= offset
,
1320 entry
->dev_addr
= dma_addr
;
1322 entry
->direction
= direction
;
1323 entry
->map_err_type
= MAP_ERR_NOT_CHECKED
;
1325 check_for_stack(dev
, page
, offset
);
1327 if (!PageHighMem(page
)) {
1328 void *addr
= page_address(page
) + offset
;
1330 check_for_illegal_area(dev
, addr
, size
);
1333 add_dma_entry(entry
);
1335 EXPORT_SYMBOL(debug_dma_map_page
);
1337 void debug_dma_mapping_error(struct device
*dev
, dma_addr_t dma_addr
)
1339 struct dma_debug_entry ref
;
1340 struct dma_debug_entry
*entry
;
1341 struct hash_bucket
*bucket
;
1342 unsigned long flags
;
1344 if (unlikely(dma_debug_disabled()))
1348 ref
.dev_addr
= dma_addr
;
1349 bucket
= get_hash_bucket(&ref
, &flags
);
1351 list_for_each_entry(entry
, &bucket
->list
, list
) {
1352 if (!exact_match(&ref
, entry
))
1356 * The same physical address can be mapped multiple
1357 * times. Without a hardware IOMMU this results in the
1358 * same device addresses being put into the dma-debug
1359 * hash multiple times too. This can result in false
1360 * positives being reported. Therefore we implement a
1361 * best-fit algorithm here which updates the first entry
1362 * from the hash which fits the reference value and is
1363 * not currently listed as being checked.
1365 if (entry
->map_err_type
== MAP_ERR_NOT_CHECKED
) {
1366 entry
->map_err_type
= MAP_ERR_CHECKED
;
1371 put_hash_bucket(bucket
, &flags
);
1373 EXPORT_SYMBOL(debug_dma_mapping_error
);
1375 void debug_dma_unmap_page(struct device
*dev
, dma_addr_t addr
,
1376 size_t size
, int direction
)
1378 struct dma_debug_entry ref
= {
1379 .type
= dma_debug_single
,
1383 .direction
= direction
,
1386 if (unlikely(dma_debug_disabled()))
1390 EXPORT_SYMBOL(debug_dma_unmap_page
);
1392 void debug_dma_map_sg(struct device
*dev
, struct scatterlist
*sg
,
1393 int nents
, int mapped_ents
, int direction
)
1395 struct dma_debug_entry
*entry
;
1396 struct scatterlist
*s
;
1399 if (unlikely(dma_debug_disabled()))
1402 for_each_sg(sg
, s
, mapped_ents
, i
) {
1403 entry
= dma_entry_alloc();
1407 entry
->type
= dma_debug_sg
;
1409 entry
->pfn
= page_to_pfn(sg_page(s
));
1410 entry
->offset
= s
->offset
,
1411 entry
->size
= sg_dma_len(s
);
1412 entry
->dev_addr
= sg_dma_address(s
);
1413 entry
->direction
= direction
;
1414 entry
->sg_call_ents
= nents
;
1415 entry
->sg_mapped_ents
= mapped_ents
;
1417 check_for_stack(dev
, sg_page(s
), s
->offset
);
1419 if (!PageHighMem(sg_page(s
))) {
1420 check_for_illegal_area(dev
, sg_virt(s
), sg_dma_len(s
));
1423 check_sg_segment(dev
, s
);
1425 add_dma_entry(entry
);
1428 EXPORT_SYMBOL(debug_dma_map_sg
);
1430 static int get_nr_mapped_entries(struct device
*dev
,
1431 struct dma_debug_entry
*ref
)
1433 struct dma_debug_entry
*entry
;
1434 struct hash_bucket
*bucket
;
1435 unsigned long flags
;
1438 bucket
= get_hash_bucket(ref
, &flags
);
1439 entry
= bucket_find_exact(bucket
, ref
);
1443 mapped_ents
= entry
->sg_mapped_ents
;
1444 put_hash_bucket(bucket
, &flags
);
1449 void debug_dma_unmap_sg(struct device
*dev
, struct scatterlist
*sglist
,
1450 int nelems
, int dir
)
1452 struct scatterlist
*s
;
1453 int mapped_ents
= 0, i
;
1455 if (unlikely(dma_debug_disabled()))
1458 for_each_sg(sglist
, s
, nelems
, i
) {
1460 struct dma_debug_entry ref
= {
1461 .type
= dma_debug_sg
,
1463 .pfn
= page_to_pfn(sg_page(s
)),
1464 .offset
= s
->offset
,
1465 .dev_addr
= sg_dma_address(s
),
1466 .size
= sg_dma_len(s
),
1468 .sg_call_ents
= nelems
,
1471 if (mapped_ents
&& i
>= mapped_ents
)
1475 mapped_ents
= get_nr_mapped_entries(dev
, &ref
);
1480 EXPORT_SYMBOL(debug_dma_unmap_sg
);
1482 void debug_dma_alloc_coherent(struct device
*dev
, size_t size
,
1483 dma_addr_t dma_addr
, void *virt
)
1485 struct dma_debug_entry
*entry
;
1487 if (unlikely(dma_debug_disabled()))
1490 if (unlikely(virt
== NULL
))
1493 /* handle vmalloc and linear addresses */
1494 if (!is_vmalloc_addr(virt
) && !virt_addr_valid(virt
))
1497 entry
= dma_entry_alloc();
1501 entry
->type
= dma_debug_coherent
;
1503 entry
->offset
= offset_in_page(virt
);
1505 entry
->dev_addr
= dma_addr
;
1506 entry
->direction
= DMA_BIDIRECTIONAL
;
1508 if (is_vmalloc_addr(virt
))
1509 entry
->pfn
= vmalloc_to_pfn(virt
);
1511 entry
->pfn
= page_to_pfn(virt_to_page(virt
));
1513 add_dma_entry(entry
);
1516 void debug_dma_free_coherent(struct device
*dev
, size_t size
,
1517 void *virt
, dma_addr_t addr
)
1519 struct dma_debug_entry ref
= {
1520 .type
= dma_debug_coherent
,
1522 .offset
= offset_in_page(virt
),
1525 .direction
= DMA_BIDIRECTIONAL
,
1528 /* handle vmalloc and linear addresses */
1529 if (!is_vmalloc_addr(virt
) && !virt_addr_valid(virt
))
1532 if (is_vmalloc_addr(virt
))
1533 ref
.pfn
= vmalloc_to_pfn(virt
);
1535 ref
.pfn
= page_to_pfn(virt_to_page(virt
));
1537 if (unlikely(dma_debug_disabled()))
1543 void debug_dma_map_resource(struct device
*dev
, phys_addr_t addr
, size_t size
,
1544 int direction
, dma_addr_t dma_addr
)
1546 struct dma_debug_entry
*entry
;
1548 if (unlikely(dma_debug_disabled()))
1551 entry
= dma_entry_alloc();
1555 entry
->type
= dma_debug_resource
;
1557 entry
->pfn
= PHYS_PFN(addr
);
1558 entry
->offset
= offset_in_page(addr
);
1560 entry
->dev_addr
= dma_addr
;
1561 entry
->direction
= direction
;
1562 entry
->map_err_type
= MAP_ERR_NOT_CHECKED
;
1564 add_dma_entry(entry
);
1566 EXPORT_SYMBOL(debug_dma_map_resource
);
1568 void debug_dma_unmap_resource(struct device
*dev
, dma_addr_t dma_addr
,
1569 size_t size
, int direction
)
1571 struct dma_debug_entry ref
= {
1572 .type
= dma_debug_resource
,
1574 .dev_addr
= dma_addr
,
1576 .direction
= direction
,
1579 if (unlikely(dma_debug_disabled()))
1584 EXPORT_SYMBOL(debug_dma_unmap_resource
);
1586 void debug_dma_sync_single_for_cpu(struct device
*dev
, dma_addr_t dma_handle
,
1587 size_t size
, int direction
)
1589 struct dma_debug_entry ref
;
1591 if (unlikely(dma_debug_disabled()))
1594 ref
.type
= dma_debug_single
;
1596 ref
.dev_addr
= dma_handle
;
1598 ref
.direction
= direction
;
1599 ref
.sg_call_ents
= 0;
1601 check_sync(dev
, &ref
, true);
1603 EXPORT_SYMBOL(debug_dma_sync_single_for_cpu
);
1605 void debug_dma_sync_single_for_device(struct device
*dev
,
1606 dma_addr_t dma_handle
, size_t size
,
1609 struct dma_debug_entry ref
;
1611 if (unlikely(dma_debug_disabled()))
1614 ref
.type
= dma_debug_single
;
1616 ref
.dev_addr
= dma_handle
;
1618 ref
.direction
= direction
;
1619 ref
.sg_call_ents
= 0;
1621 check_sync(dev
, &ref
, false);
1623 EXPORT_SYMBOL(debug_dma_sync_single_for_device
);
1625 void debug_dma_sync_sg_for_cpu(struct device
*dev
, struct scatterlist
*sg
,
1626 int nelems
, int direction
)
1628 struct scatterlist
*s
;
1629 int mapped_ents
= 0, i
;
1631 if (unlikely(dma_debug_disabled()))
1634 for_each_sg(sg
, s
, nelems
, i
) {
1636 struct dma_debug_entry ref
= {
1637 .type
= dma_debug_sg
,
1639 .pfn
= page_to_pfn(sg_page(s
)),
1640 .offset
= s
->offset
,
1641 .dev_addr
= sg_dma_address(s
),
1642 .size
= sg_dma_len(s
),
1643 .direction
= direction
,
1644 .sg_call_ents
= nelems
,
1648 mapped_ents
= get_nr_mapped_entries(dev
, &ref
);
1650 if (i
>= mapped_ents
)
1653 check_sync(dev
, &ref
, true);
1656 EXPORT_SYMBOL(debug_dma_sync_sg_for_cpu
);
1658 void debug_dma_sync_sg_for_device(struct device
*dev
, struct scatterlist
*sg
,
1659 int nelems
, int direction
)
1661 struct scatterlist
*s
;
1662 int mapped_ents
= 0, i
;
1664 if (unlikely(dma_debug_disabled()))
1667 for_each_sg(sg
, s
, nelems
, i
) {
1669 struct dma_debug_entry ref
= {
1670 .type
= dma_debug_sg
,
1672 .pfn
= page_to_pfn(sg_page(s
)),
1673 .offset
= s
->offset
,
1674 .dev_addr
= sg_dma_address(s
),
1675 .size
= sg_dma_len(s
),
1676 .direction
= direction
,
1677 .sg_call_ents
= nelems
,
1680 mapped_ents
= get_nr_mapped_entries(dev
, &ref
);
1682 if (i
>= mapped_ents
)
1685 check_sync(dev
, &ref
, false);
1688 EXPORT_SYMBOL(debug_dma_sync_sg_for_device
);
1690 static int __init
dma_debug_driver_setup(char *str
)
1694 for (i
= 0; i
< NAME_MAX_LEN
- 1; ++i
, ++str
) {
1695 current_driver_name
[i
] = *str
;
1700 if (current_driver_name
[0])
1701 pr_info("enable driver filter for driver [%s]\n",
1702 current_driver_name
);
1707 __setup("dma_debug_driver=", dma_debug_driver_setup
);