treewide: remove redundant IS_ERR() before error code check
[linux/fpc-iii.git] / drivers / scsi / sym53c8xx_2 / sym_malloc.c
blobeb5c045c7c59f6d2129b8c52730ccf9d841a19ce
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Device driver for the SYMBIOS/LSILOGIC 53C8XX and 53C1010 family
4 * of PCI-SCSI IO processors.
6 * Copyright (C) 1999-2001 Gerard Roudier <groudier@free.fr>
8 * This driver is derived from the Linux sym53c8xx driver.
9 * Copyright (C) 1998-2000 Gerard Roudier
11 * The sym53c8xx driver is derived from the ncr53c8xx driver that had been
12 * a port of the FreeBSD ncr driver to Linux-1.2.13.
14 * The original ncr driver has been written for 386bsd and FreeBSD by
15 * Wolfgang Stanglmeier <wolf@cologne.de>
16 * Stefan Esser <se@mi.Uni-Koeln.de>
17 * Copyright (C) 1994 Wolfgang Stanglmeier
19 * Other major contributions:
21 * NVRAM detection and reading.
22 * Copyright (C) 1997 Richard Waltham <dormouse@farsrobt.demon.co.uk>
24 *-----------------------------------------------------------------------------
27 #include "sym_glue.h"
30 * Simple power of two buddy-like generic allocator.
31 * Provides naturally aligned memory chunks.
33 * This simple code is not intended to be fast, but to
34 * provide power of 2 aligned memory allocations.
35 * Since the SCRIPTS processor only supplies 8 bit arithmetic,
36 * this allocator allows simple and fast address calculations
37 * from the SCRIPTS code. In addition, cache line alignment
38 * is guaranteed for power of 2 cache line size.
40 * This allocator has been developed for the Linux sym53c8xx
41 * driver, since this O/S does not provide naturally aligned
42 * allocations.
43 * It has the advantage of allowing the driver to use private
44 * pages of memory that will be useful if we ever need to deal
45 * with IO MMUs for PCI.
47 static void *___sym_malloc(m_pool_p mp, int size)
49 int i = 0;
50 int s = (1 << SYM_MEM_SHIFT);
51 int j;
52 void *a;
53 m_link_p h = mp->h;
55 if (size > SYM_MEM_CLUSTER_SIZE)
56 return NULL;
58 while (size > s) {
59 s <<= 1;
60 ++i;
63 j = i;
64 while (!h[j].next) {
65 if (s == SYM_MEM_CLUSTER_SIZE) {
66 h[j].next = (m_link_p) M_GET_MEM_CLUSTER();
67 if (h[j].next)
68 h[j].next->next = NULL;
69 break;
71 ++j;
72 s <<= 1;
74 a = h[j].next;
75 if (a) {
76 h[j].next = h[j].next->next;
77 while (j > i) {
78 j -= 1;
79 s >>= 1;
80 h[j].next = (m_link_p) (a+s);
81 h[j].next->next = NULL;
84 #ifdef DEBUG
85 printf("___sym_malloc(%d) = %p\n", size, (void *) a);
86 #endif
87 return a;
91 * Counter-part of the generic allocator.
93 static void ___sym_mfree(m_pool_p mp, void *ptr, int size)
95 int i = 0;
96 int s = (1 << SYM_MEM_SHIFT);
97 m_link_p q;
98 unsigned long a, b;
99 m_link_p h = mp->h;
101 #ifdef DEBUG
102 printf("___sym_mfree(%p, %d)\n", ptr, size);
103 #endif
105 if (size > SYM_MEM_CLUSTER_SIZE)
106 return;
108 while (size > s) {
109 s <<= 1;
110 ++i;
113 a = (unsigned long)ptr;
115 while (1) {
116 if (s == SYM_MEM_CLUSTER_SIZE) {
117 #ifdef SYM_MEM_FREE_UNUSED
118 M_FREE_MEM_CLUSTER((void *)a);
119 #else
120 ((m_link_p) a)->next = h[i].next;
121 h[i].next = (m_link_p) a;
122 #endif
123 break;
125 b = a ^ s;
126 q = &h[i];
127 while (q->next && q->next != (m_link_p) b) {
128 q = q->next;
130 if (!q->next) {
131 ((m_link_p) a)->next = h[i].next;
132 h[i].next = (m_link_p) a;
133 break;
135 q->next = q->next->next;
136 a = a & b;
137 s <<= 1;
138 ++i;
143 * Verbose and zeroing allocator that wrapps to the generic allocator.
145 static void *__sym_calloc2(m_pool_p mp, int size, char *name, int uflags)
147 void *p;
149 p = ___sym_malloc(mp, size);
151 if (DEBUG_FLAGS & DEBUG_ALLOC) {
152 printf ("new %-10s[%4d] @%p.\n", name, size, p);
155 if (p)
156 memset(p, 0, size);
157 else if (uflags & SYM_MEM_WARN)
158 printf ("__sym_calloc2: failed to allocate %s[%d]\n", name, size);
159 return p;
161 #define __sym_calloc(mp, s, n) __sym_calloc2(mp, s, n, SYM_MEM_WARN)
164 * Its counter-part.
166 static void __sym_mfree(m_pool_p mp, void *ptr, int size, char *name)
168 if (DEBUG_FLAGS & DEBUG_ALLOC)
169 printf ("freeing %-10s[%4d] @%p.\n", name, size, ptr);
171 ___sym_mfree(mp, ptr, size);
175 * Default memory pool we donnot need to involve in DMA.
177 * With DMA abstraction, we use functions (methods), to
178 * distinguish between non DMAable memory and DMAable memory.
180 static void *___mp0_get_mem_cluster(m_pool_p mp)
182 void *m = sym_get_mem_cluster();
183 if (m)
184 ++mp->nump;
185 return m;
188 #ifdef SYM_MEM_FREE_UNUSED
189 static void ___mp0_free_mem_cluster(m_pool_p mp, void *m)
191 sym_free_mem_cluster(m);
192 --mp->nump;
194 #else
195 #define ___mp0_free_mem_cluster NULL
196 #endif
198 static struct sym_m_pool mp0 = {
199 NULL,
200 ___mp0_get_mem_cluster,
201 ___mp0_free_mem_cluster
205 * Methods that maintains DMAable pools according to user allocations.
206 * New pools are created on the fly when a new pool id is provided.
207 * They are deleted on the fly when they get emptied.
209 /* Get a memory cluster that matches the DMA constraints of a given pool */
210 static void * ___get_dma_mem_cluster(m_pool_p mp)
212 m_vtob_p vbp;
213 void *vaddr;
215 vbp = __sym_calloc(&mp0, sizeof(*vbp), "VTOB");
216 if (!vbp)
217 goto out_err;
219 vaddr = sym_m_get_dma_mem_cluster(mp, vbp);
220 if (vaddr) {
221 int hc = VTOB_HASH_CODE(vaddr);
222 vbp->next = mp->vtob[hc];
223 mp->vtob[hc] = vbp;
224 ++mp->nump;
226 return vaddr;
227 out_err:
228 return NULL;
231 #ifdef SYM_MEM_FREE_UNUSED
232 /* Free a memory cluster and associated resources for DMA */
233 static void ___free_dma_mem_cluster(m_pool_p mp, void *m)
235 m_vtob_p *vbpp, vbp;
236 int hc = VTOB_HASH_CODE(m);
238 vbpp = &mp->vtob[hc];
239 while (*vbpp && (*vbpp)->vaddr != m)
240 vbpp = &(*vbpp)->next;
241 if (*vbpp) {
242 vbp = *vbpp;
243 *vbpp = (*vbpp)->next;
244 sym_m_free_dma_mem_cluster(mp, vbp);
245 __sym_mfree(&mp0, vbp, sizeof(*vbp), "VTOB");
246 --mp->nump;
249 #endif
251 /* Fetch the memory pool for a given pool id (i.e. DMA constraints) */
252 static inline m_pool_p ___get_dma_pool(m_pool_ident_t dev_dmat)
254 m_pool_p mp;
255 for (mp = mp0.next;
256 mp && !sym_m_pool_match(mp->dev_dmat, dev_dmat);
257 mp = mp->next);
258 return mp;
261 /* Create a new memory DMAable pool (when fetch failed) */
262 static m_pool_p ___cre_dma_pool(m_pool_ident_t dev_dmat)
264 m_pool_p mp = __sym_calloc(&mp0, sizeof(*mp), "MPOOL");
265 if (mp) {
266 mp->dev_dmat = dev_dmat;
267 mp->get_mem_cluster = ___get_dma_mem_cluster;
268 #ifdef SYM_MEM_FREE_UNUSED
269 mp->free_mem_cluster = ___free_dma_mem_cluster;
270 #endif
271 mp->next = mp0.next;
272 mp0.next = mp;
273 return mp;
275 return NULL;
278 #ifdef SYM_MEM_FREE_UNUSED
279 /* Destroy a DMAable memory pool (when got emptied) */
280 static void ___del_dma_pool(m_pool_p p)
282 m_pool_p *pp = &mp0.next;
284 while (*pp && *pp != p)
285 pp = &(*pp)->next;
286 if (*pp) {
287 *pp = (*pp)->next;
288 __sym_mfree(&mp0, p, sizeof(*p), "MPOOL");
291 #endif
293 /* This lock protects only the memory allocation/free. */
294 static DEFINE_SPINLOCK(sym53c8xx_lock);
297 * Actual allocator for DMAable memory.
299 void *__sym_calloc_dma(m_pool_ident_t dev_dmat, int size, char *name)
301 unsigned long flags;
302 m_pool_p mp;
303 void *m = NULL;
305 spin_lock_irqsave(&sym53c8xx_lock, flags);
306 mp = ___get_dma_pool(dev_dmat);
307 if (!mp)
308 mp = ___cre_dma_pool(dev_dmat);
309 if (!mp)
310 goto out;
311 m = __sym_calloc(mp, size, name);
312 #ifdef SYM_MEM_FREE_UNUSED
313 if (!mp->nump)
314 ___del_dma_pool(mp);
315 #endif
317 out:
318 spin_unlock_irqrestore(&sym53c8xx_lock, flags);
319 return m;
322 void __sym_mfree_dma(m_pool_ident_t dev_dmat, void *m, int size, char *name)
324 unsigned long flags;
325 m_pool_p mp;
327 spin_lock_irqsave(&sym53c8xx_lock, flags);
328 mp = ___get_dma_pool(dev_dmat);
329 if (!mp)
330 goto out;
331 __sym_mfree(mp, m, size, name);
332 #ifdef SYM_MEM_FREE_UNUSED
333 if (!mp->nump)
334 ___del_dma_pool(mp);
335 #endif
336 out:
337 spin_unlock_irqrestore(&sym53c8xx_lock, flags);
341 * Actual virtual to bus physical address translator
342 * for 32 bit addressable DMAable memory.
344 dma_addr_t __vtobus(m_pool_ident_t dev_dmat, void *m)
346 unsigned long flags;
347 m_pool_p mp;
348 int hc = VTOB_HASH_CODE(m);
349 m_vtob_p vp = NULL;
350 void *a = (void *)((unsigned long)m & ~SYM_MEM_CLUSTER_MASK);
351 dma_addr_t b;
353 spin_lock_irqsave(&sym53c8xx_lock, flags);
354 mp = ___get_dma_pool(dev_dmat);
355 if (mp) {
356 vp = mp->vtob[hc];
357 while (vp && vp->vaddr != a)
358 vp = vp->next;
360 if (!vp)
361 panic("sym: VTOBUS FAILED!\n");
362 b = vp->baddr + (m - a);
363 spin_unlock_irqrestore(&sym53c8xx_lock, flags);
364 return b;