treewide: remove redundant IS_ERR() before error code check
[linux/fpc-iii.git] / arch / openrisc / kernel / dma.c
blobadec711ad39d5bafdab35cc07a87758cca4d5eef
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * OpenRISC Linux
5 * Linux architectural port borrowing liberally from similar works of
6 * others. All original copyrights apply as per the original source
7 * declaration.
9 * Modifications for the OpenRISC architecture:
10 * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
11 * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
13 * DMA mapping callbacks...
14 * As alloc_coherent is the only DMA callback being used currently, that's
15 * the only thing implemented properly. The rest need looking into...
18 #include <linux/dma-noncoherent.h>
19 #include <linux/pagewalk.h>
21 #include <asm/cpuinfo.h>
22 #include <asm/spr_defs.h>
23 #include <asm/tlbflush.h>
25 static int
26 page_set_nocache(pte_t *pte, unsigned long addr,
27 unsigned long next, struct mm_walk *walk)
29 unsigned long cl;
30 struct cpuinfo_or1k *cpuinfo = &cpuinfo_or1k[smp_processor_id()];
32 pte_val(*pte) |= _PAGE_CI;
35 * Flush the page out of the TLB so that the new page flags get
36 * picked up next time there's an access
38 flush_tlb_page(NULL, addr);
40 /* Flush page out of dcache */
41 for (cl = __pa(addr); cl < __pa(next); cl += cpuinfo->dcache_block_size)
42 mtspr(SPR_DCBFR, cl);
44 return 0;
47 static const struct mm_walk_ops set_nocache_walk_ops = {
48 .pte_entry = page_set_nocache,
51 static int
52 page_clear_nocache(pte_t *pte, unsigned long addr,
53 unsigned long next, struct mm_walk *walk)
55 pte_val(*pte) &= ~_PAGE_CI;
58 * Flush the page out of the TLB so that the new page flags get
59 * picked up next time there's an access
61 flush_tlb_page(NULL, addr);
63 return 0;
66 static const struct mm_walk_ops clear_nocache_walk_ops = {
67 .pte_entry = page_clear_nocache,
71 * Alloc "coherent" memory, which for OpenRISC means simply uncached.
73 * This function effectively just calls __get_free_pages, sets the
74 * cache-inhibit bit on those pages, and makes sure that the pages are
75 * flushed out of the cache before they are used.
77 * If the NON_CONSISTENT attribute is set, then this function just
78 * returns "normal", cachable memory.
80 * There are additional flags WEAK_ORDERING and WRITE_COMBINE to take
81 * into consideration here, too. All current known implementations of
82 * the OR1K support only strongly ordered memory accesses, so that flag
83 * is being ignored for now; uncached but write-combined memory is a
84 * missing feature of the OR1K.
86 void *
87 arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
88 gfp_t gfp, unsigned long attrs)
90 unsigned long va;
91 void *page;
93 page = alloc_pages_exact(size, gfp | __GFP_ZERO);
94 if (!page)
95 return NULL;
97 /* This gives us the real physical address of the first page. */
98 *dma_handle = __pa(page);
100 va = (unsigned long)page;
103 * We need to iterate through the pages, clearing the dcache for
104 * them and setting the cache-inhibit bit.
106 if (walk_page_range(&init_mm, va, va + size, &set_nocache_walk_ops,
107 NULL)) {
108 free_pages_exact(page, size);
109 return NULL;
112 return (void *)va;
115 void
116 arch_dma_free(struct device *dev, size_t size, void *vaddr,
117 dma_addr_t dma_handle, unsigned long attrs)
119 unsigned long va = (unsigned long)vaddr;
121 /* walk_page_range shouldn't be able to fail here */
122 WARN_ON(walk_page_range(&init_mm, va, va + size,
123 &clear_nocache_walk_ops, NULL));
125 free_pages_exact(vaddr, size);
128 void arch_sync_dma_for_device(phys_addr_t addr, size_t size,
129 enum dma_data_direction dir)
131 unsigned long cl;
132 struct cpuinfo_or1k *cpuinfo = &cpuinfo_or1k[smp_processor_id()];
134 switch (dir) {
135 case DMA_TO_DEVICE:
136 /* Flush the dcache for the requested range */
137 for (cl = addr; cl < addr + size;
138 cl += cpuinfo->dcache_block_size)
139 mtspr(SPR_DCBFR, cl);
140 break;
141 case DMA_FROM_DEVICE:
142 /* Invalidate the dcache for the requested range */
143 for (cl = addr; cl < addr + size;
144 cl += cpuinfo->dcache_block_size)
145 mtspr(SPR_DCBIR, cl);
146 break;
147 default:
149 * NOTE: If dir == DMA_BIDIRECTIONAL then there's no need to
150 * flush nor invalidate the cache here as the area will need
151 * to be manually synced anyway.
153 break;