Merge tag 'x86-urgent-2020-08-15' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux/fpc-iii.git] / arch / m68k / sun3 / sun3dvma.c
blob4b560f4d3960c2b509a767222b06408534045024
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * linux/arch/m68k/sun3/sun3dvma.c
5 * Copyright (C) 2000 Sam Creasey
7 * Contains common routines for sun3/sun3x DVMA management.
8 */
10 #include <linux/memblock.h>
11 #include <linux/init.h>
12 #include <linux/module.h>
13 #include <linux/kernel.h>
14 #include <linux/gfp.h>
15 #include <linux/mm.h>
16 #include <linux/list.h>
18 #include <asm/page.h>
19 #include <asm/dvma.h>
21 #undef DVMA_DEBUG
23 #ifdef CONFIG_SUN3X
24 extern void dvma_unmap_iommu(unsigned long baddr, int len);
25 #else
26 static inline void dvma_unmap_iommu(unsigned long a, int b)
29 #endif
31 #ifdef CONFIG_SUN3
32 extern void sun3_dvma_init(void);
33 #endif
35 static unsigned long *iommu_use;
37 #define dvma_index(baddr) ((baddr - DVMA_START) >> DVMA_PAGE_SHIFT)
39 #define dvma_entry_use(baddr) (iommu_use[dvma_index(baddr)])
41 struct hole {
42 unsigned long start;
43 unsigned long end;
44 unsigned long size;
45 struct list_head list;
48 static struct list_head hole_list;
49 static struct list_head hole_cache;
50 static struct hole initholes[64];
52 #ifdef DVMA_DEBUG
54 static unsigned long dvma_allocs;
55 static unsigned long dvma_frees;
56 static unsigned long long dvma_alloc_bytes;
57 static unsigned long long dvma_free_bytes;
59 static void print_use(void)
62 int i;
63 int j = 0;
65 pr_info("dvma entry usage:\n");
67 for(i = 0; i < IOMMU_TOTAL_ENTRIES; i++) {
68 if(!iommu_use[i])
69 continue;
71 j++;
73 pr_info("dvma entry: %08x len %08lx\n",
74 (i << DVMA_PAGE_SHIFT) + DVMA_START, iommu_use[i]);
77 pr_info("%d entries in use total\n", j);
79 pr_info("allocation/free calls: %lu/%lu\n", dvma_allocs, dvma_frees);
80 pr_info("allocation/free bytes: %Lx/%Lx\n", dvma_alloc_bytes,
81 dvma_free_bytes);
84 static void print_holes(struct list_head *holes)
87 struct list_head *cur;
88 struct hole *hole;
90 pr_info("listing dvma holes\n");
91 list_for_each(cur, holes) {
92 hole = list_entry(cur, struct hole, list);
94 if((hole->start == 0) && (hole->end == 0) && (hole->size == 0))
95 continue;
97 pr_info("hole: start %08lx end %08lx size %08lx\n",
98 hole->start, hole->end, hole->size);
101 pr_info("end of hole listing...\n");
103 #endif /* DVMA_DEBUG */
105 static inline int refill(void)
108 struct hole *hole;
109 struct hole *prev = NULL;
110 struct list_head *cur;
111 int ret = 0;
113 list_for_each(cur, &hole_list) {
114 hole = list_entry(cur, struct hole, list);
116 if(!prev) {
117 prev = hole;
118 continue;
121 if(hole->end == prev->start) {
122 hole->size += prev->size;
123 hole->end = prev->end;
124 list_move(&(prev->list), &hole_cache);
125 ret++;
130 return ret;
133 static inline struct hole *rmcache(void)
135 struct hole *ret;
137 if(list_empty(&hole_cache)) {
138 if(!refill()) {
139 pr_crit("out of dvma hole cache!\n");
140 BUG();
144 ret = list_entry(hole_cache.next, struct hole, list);
145 list_del(&(ret->list));
147 return ret;
151 static inline unsigned long get_baddr(int len, unsigned long align)
154 struct list_head *cur;
155 struct hole *hole;
157 if(list_empty(&hole_list)) {
158 #ifdef DVMA_DEBUG
159 pr_crit("out of dvma holes! (printing hole cache)\n");
160 print_holes(&hole_cache);
161 print_use();
162 #endif
163 BUG();
166 list_for_each(cur, &hole_list) {
167 unsigned long newlen;
169 hole = list_entry(cur, struct hole, list);
171 if(align > DVMA_PAGE_SIZE)
172 newlen = len + ((hole->end - len) & (align-1));
173 else
174 newlen = len;
176 if(hole->size > newlen) {
177 hole->end -= newlen;
178 hole->size -= newlen;
179 dvma_entry_use(hole->end) = newlen;
180 #ifdef DVMA_DEBUG
181 dvma_allocs++;
182 dvma_alloc_bytes += newlen;
183 #endif
184 return hole->end;
185 } else if(hole->size == newlen) {
186 list_move(&(hole->list), &hole_cache);
187 dvma_entry_use(hole->start) = newlen;
188 #ifdef DVMA_DEBUG
189 dvma_allocs++;
190 dvma_alloc_bytes += newlen;
191 #endif
192 return hole->start;
197 pr_crit("unable to find dvma hole!\n");
198 BUG();
199 return 0;
202 static inline int free_baddr(unsigned long baddr)
205 unsigned long len;
206 struct hole *hole;
207 struct list_head *cur;
208 unsigned long orig_baddr;
210 orig_baddr = baddr;
211 len = dvma_entry_use(baddr);
212 dvma_entry_use(baddr) = 0;
213 baddr &= DVMA_PAGE_MASK;
214 dvma_unmap_iommu(baddr, len);
216 #ifdef DVMA_DEBUG
217 dvma_frees++;
218 dvma_free_bytes += len;
219 #endif
221 list_for_each(cur, &hole_list) {
222 hole = list_entry(cur, struct hole, list);
224 if(hole->end == baddr) {
225 hole->end += len;
226 hole->size += len;
227 return 0;
228 } else if(hole->start == (baddr + len)) {
229 hole->start = baddr;
230 hole->size += len;
231 return 0;
236 hole = rmcache();
238 hole->start = baddr;
239 hole->end = baddr + len;
240 hole->size = len;
242 // list_add_tail(&(hole->list), cur);
243 list_add(&(hole->list), cur);
245 return 0;
249 void __init dvma_init(void)
252 struct hole *hole;
253 int i;
255 INIT_LIST_HEAD(&hole_list);
256 INIT_LIST_HEAD(&hole_cache);
258 /* prepare the hole cache */
259 for(i = 0; i < 64; i++)
260 list_add(&(initholes[i].list), &hole_cache);
262 hole = rmcache();
263 hole->start = DVMA_START;
264 hole->end = DVMA_END;
265 hole->size = DVMA_SIZE;
267 list_add(&(hole->list), &hole_list);
269 iommu_use = memblock_alloc(IOMMU_TOTAL_ENTRIES * sizeof(unsigned long),
270 SMP_CACHE_BYTES);
271 if (!iommu_use)
272 panic("%s: Failed to allocate %zu bytes\n", __func__,
273 IOMMU_TOTAL_ENTRIES * sizeof(unsigned long));
275 dvma_unmap_iommu(DVMA_START, DVMA_SIZE);
277 #ifdef CONFIG_SUN3
278 sun3_dvma_init();
279 #endif
283 unsigned long dvma_map_align(unsigned long kaddr, int len, int align)
286 unsigned long baddr;
287 unsigned long off;
289 if(!len)
290 len = 0x800;
292 if(!kaddr || !len) {
293 // pr_err("error: kaddr %lx len %x\n", kaddr, len);
294 // *(int *)4 = 0;
295 return 0;
298 pr_debug("dvma_map request %08x bytes from %08lx\n", len, kaddr);
299 off = kaddr & ~DVMA_PAGE_MASK;
300 kaddr &= PAGE_MASK;
301 len += off;
302 len = ((len + (DVMA_PAGE_SIZE-1)) & DVMA_PAGE_MASK);
304 if(align == 0)
305 align = DVMA_PAGE_SIZE;
306 else
307 align = ((align + (DVMA_PAGE_SIZE-1)) & DVMA_PAGE_MASK);
309 baddr = get_baddr(len, align);
310 // pr_info("using baddr %lx\n", baddr);
312 if(!dvma_map_iommu(kaddr, baddr, len))
313 return (baddr + off);
315 pr_crit("dvma_map failed kaddr %lx baddr %lx len %x\n", kaddr, baddr,
316 len);
317 BUG();
318 return 0;
320 EXPORT_SYMBOL(dvma_map_align);
322 void dvma_unmap(void *baddr)
324 unsigned long addr;
326 addr = (unsigned long)baddr;
327 /* check if this is a vme mapping */
328 if(!(addr & 0x00f00000))
329 addr |= 0xf00000;
331 free_baddr(addr);
333 return;
336 EXPORT_SYMBOL(dvma_unmap);
338 void *dvma_malloc_align(unsigned long len, unsigned long align)
340 unsigned long kaddr;
341 unsigned long baddr;
342 unsigned long vaddr;
344 if(!len)
345 return NULL;
347 pr_debug("dvma_malloc request %lx bytes\n", len);
348 len = ((len + (DVMA_PAGE_SIZE-1)) & DVMA_PAGE_MASK);
350 if((kaddr = __get_free_pages(GFP_ATOMIC, get_order(len))) == 0)
351 return NULL;
353 if((baddr = (unsigned long)dvma_map_align(kaddr, len, align)) == 0) {
354 free_pages(kaddr, get_order(len));
355 return NULL;
358 vaddr = dvma_btov(baddr);
360 if(dvma_map_cpu(kaddr, vaddr, len) < 0) {
361 dvma_unmap((void *)baddr);
362 free_pages(kaddr, get_order(len));
363 return NULL;
366 pr_debug("mapped %08lx bytes %08lx kern -> %08lx bus\n", len, kaddr,
367 baddr);
369 return (void *)vaddr;
372 EXPORT_SYMBOL(dvma_malloc_align);
374 void dvma_free(void *vaddr)
377 return;
380 EXPORT_SYMBOL(dvma_free);