1 // SPDX-License-Identifier: GPL-2.0-only
3 * PowerPC version derived from arch/arm/mm/consistent.c
4 * Copyright (C) 2001 Dan Malek (dmalek@jlc.net)
6 * Copyright (C) 2000 Russell King
9 #include <linux/kernel.h>
10 #include <linux/errno.h>
11 #include <linux/types.h>
12 #include <linux/highmem.h>
13 #include <linux/dma-direct.h>
14 #include <linux/dma-map-ops.h>
16 #include <asm/tlbflush.h>
20 * make an area consistent.
22 static void __dma_sync(void *vaddr
, size_t size
, int direction
)
24 unsigned long start
= (unsigned long)vaddr
;
25 unsigned long end
= start
+ size
;
32 * invalidate only when cache-line aligned otherwise there is
33 * the potential for discarding uncommitted data from the cache
35 if ((start
| end
) & (L1_CACHE_BYTES
- 1))
36 flush_dcache_range(start
, end
);
38 invalidate_dcache_range(start
, end
);
40 case DMA_TO_DEVICE
: /* writeback only */
41 clean_dcache_range(start
, end
);
43 case DMA_BIDIRECTIONAL
: /* writeback and invalidate */
44 flush_dcache_range(start
, end
);
51 * __dma_sync_page() implementation for systems using highmem.
52 * In this case, each page of a buffer must be kmapped/kunmapped
53 * in order to have a virtual address for __dma_sync(). This must
54 * not sleep so kmap_atomic()/kunmap_atomic() are used.
56 * Note: yes, it is possible and correct to have a buffer extend
57 * beyond the first page.
59 static inline void __dma_sync_page_highmem(struct page
*page
,
60 unsigned long offset
, size_t size
, int direction
)
62 size_t seg_size
= min((size_t)(PAGE_SIZE
- offset
), size
);
63 size_t cur_size
= seg_size
;
64 unsigned long flags
, start
, seg_offset
= offset
;
65 int nr_segs
= 1 + ((size
- seg_size
) + PAGE_SIZE
- 1)/PAGE_SIZE
;
68 local_irq_save(flags
);
71 start
= (unsigned long)kmap_atomic(page
+ seg_nr
) + seg_offset
;
73 /* Sync this buffer segment */
74 __dma_sync((void *)start
, seg_size
, direction
);
75 kunmap_atomic((void *)start
);
78 /* Calculate next buffer segment size */
79 seg_size
= min((size_t)PAGE_SIZE
, size
- cur_size
);
81 /* Add the segment size to our running total */
84 } while (seg_nr
< nr_segs
);
86 local_irq_restore(flags
);
88 #endif /* CONFIG_HIGHMEM */
91 * __dma_sync_page makes memory consistent. identical to __dma_sync, but
92 * takes a struct page instead of a virtual address
94 static void __dma_sync_page(phys_addr_t paddr
, size_t size
, int dir
)
96 struct page
*page
= pfn_to_page(paddr
>> PAGE_SHIFT
);
97 unsigned offset
= paddr
& ~PAGE_MASK
;
100 __dma_sync_page_highmem(page
, offset
, size
, dir
);
102 unsigned long start
= (unsigned long)page_address(page
) + offset
;
103 __dma_sync((void *)start
, size
, dir
);
107 void arch_sync_dma_for_device(phys_addr_t paddr
, size_t size
,
108 enum dma_data_direction dir
)
110 __dma_sync_page(paddr
, size
, dir
);
113 void arch_sync_dma_for_cpu(phys_addr_t paddr
, size_t size
,
114 enum dma_data_direction dir
)
116 __dma_sync_page(paddr
, size
, dir
);
119 void arch_dma_prep_coherent(struct page
*page
, size_t size
)
121 unsigned long kaddr
= (unsigned long)page_address(page
);
123 flush_dcache_range(kaddr
, kaddr
+ size
);