Merge tag 'sched-urgent-2020-12-27' of git://git.kernel.org/pub/scm/linux/kernel...
[linux/fpc-iii.git] / arch / openrisc / kernel / dma.c
blob1b16d97e7da7f96eb6ab80f4162eb9a581b8f9ab
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * OpenRISC Linux
5 * Linux architectural port borrowing liberally from similar works of
6 * others. All original copyrights apply as per the original source
7 * declaration.
9 * Modifications for the OpenRISC architecture:
10 * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
11 * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
13 * DMA mapping callbacks...
16 #include <linux/dma-map-ops.h>
17 #include <linux/pagewalk.h>
19 #include <asm/cpuinfo.h>
20 #include <asm/spr_defs.h>
21 #include <asm/tlbflush.h>
23 static int
24 page_set_nocache(pte_t *pte, unsigned long addr,
25 unsigned long next, struct mm_walk *walk)
27 unsigned long cl;
28 struct cpuinfo_or1k *cpuinfo = &cpuinfo_or1k[smp_processor_id()];
30 pte_val(*pte) |= _PAGE_CI;
33 * Flush the page out of the TLB so that the new page flags get
34 * picked up next time there's an access
36 flush_tlb_page(NULL, addr);
38 /* Flush page out of dcache */
39 for (cl = __pa(addr); cl < __pa(next); cl += cpuinfo->dcache_block_size)
40 mtspr(SPR_DCBFR, cl);
42 return 0;
45 static const struct mm_walk_ops set_nocache_walk_ops = {
46 .pte_entry = page_set_nocache,
49 static int
50 page_clear_nocache(pte_t *pte, unsigned long addr,
51 unsigned long next, struct mm_walk *walk)
53 pte_val(*pte) &= ~_PAGE_CI;
56 * Flush the page out of the TLB so that the new page flags get
57 * picked up next time there's an access
59 flush_tlb_page(NULL, addr);
61 return 0;
64 static const struct mm_walk_ops clear_nocache_walk_ops = {
65 .pte_entry = page_clear_nocache,
68 void *arch_dma_set_uncached(void *cpu_addr, size_t size)
70 unsigned long va = (unsigned long)cpu_addr;
71 int error;
74 * We need to iterate through the pages, clearing the dcache for
75 * them and setting the cache-inhibit bit.
77 mmap_read_lock(&init_mm);
78 error = walk_page_range(&init_mm, va, va + size, &set_nocache_walk_ops,
79 NULL);
80 mmap_read_unlock(&init_mm);
82 if (error)
83 return ERR_PTR(error);
84 return cpu_addr;
87 void arch_dma_clear_uncached(void *cpu_addr, size_t size)
89 unsigned long va = (unsigned long)cpu_addr;
91 mmap_read_lock(&init_mm);
92 /* walk_page_range shouldn't be able to fail here */
93 WARN_ON(walk_page_range(&init_mm, va, va + size,
94 &clear_nocache_walk_ops, NULL));
95 mmap_read_unlock(&init_mm);
98 void arch_sync_dma_for_device(phys_addr_t addr, size_t size,
99 enum dma_data_direction dir)
101 unsigned long cl;
102 struct cpuinfo_or1k *cpuinfo = &cpuinfo_or1k[smp_processor_id()];
104 switch (dir) {
105 case DMA_TO_DEVICE:
106 /* Flush the dcache for the requested range */
107 for (cl = addr; cl < addr + size;
108 cl += cpuinfo->dcache_block_size)
109 mtspr(SPR_DCBFR, cl);
110 break;
111 case DMA_FROM_DEVICE:
112 /* Invalidate the dcache for the requested range */
113 for (cl = addr; cl < addr + size;
114 cl += cpuinfo->dcache_block_size)
115 mtspr(SPR_DCBIR, cl);
116 break;
117 default:
119 * NOTE: If dir == DMA_BIDIRECTIONAL then there's no need to
120 * flush nor invalidate the cache here as the area will need
121 * to be manually synced anyway.
123 break;