1 // SPDX-License-Identifier: GPL-2.0-only
3 * Microblaze support for cache consistent memory.
4 * Copyright (C) 2010 Michal Simek <monstr@monstr.eu>
5 * Copyright (C) 2010 PetaLogix
6 * Copyright (C) 2005 John Williams <jwilliams@itee.uq.edu.au>
9 #include <linux/kernel.h>
10 #include <linux/string.h>
11 #include <linux/types.h>
13 #include <linux/init.h>
14 #include <linux/dma-noncoherent.h>
15 #include <asm/cpuinfo.h>
16 #include <asm/cacheflush.h>
18 void arch_dma_prep_coherent(struct page
*page
, size_t size
)
20 phys_addr_t paddr
= page_to_phys(page
);
22 flush_dcache_range(paddr
, paddr
+ size
);
27 * Consistent memory allocators. Used for DMA devices that want to share
28 * uncached memory with the processor core. My crufty no-MMU approach is
29 * simple. In the HW platform we can optionally mirror the DDR up above the
30 * processor cacheable region. So, memory accessed in this mirror region will
31 * not be cached. It's alloced from the same pool as normal memory, but the
32 * handle we return is shifted up into the uncached region. This will no doubt
33 * cause big problems if memory allocated here is not also freed properly. -- JW
35 * I have to use dcache values because I can't relate on ram size:
37 #ifdef CONFIG_XILINX_UNCACHED_SHADOW
38 #define UNCACHED_SHADOW_MASK (cpuinfo.dcache_high - cpuinfo.dcache_base + 1)
40 #define UNCACHED_SHADOW_MASK 0
41 #endif /* CONFIG_XILINX_UNCACHED_SHADOW */
43 void *uncached_kernel_address(void *ptr
)
45 unsigned long addr
= (unsigned long)ptr
;
47 addr
|= UNCACHED_SHADOW_MASK
;
48 if (addr
> cpuinfo
.dcache_base
&& addr
< cpuinfo
.dcache_high
)
49 pr_warn("ERROR: Your cache coherent area is CACHED!!!\n");
53 void *cached_kernel_address(void *ptr
)
55 unsigned long addr
= (unsigned long)ptr
;
57 return (void *)(addr
& ~UNCACHED_SHADOW_MASK
);
59 #endif /* CONFIG_MMU */