2 * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 /* Crude resource management */
33 #include <linux/kernel.h>
34 #include <linux/random.h>
35 #include <linux/slab.h>
36 #include <linux/kfifo.h>
37 #include <linux/spinlock.h>
38 #include <linux/errno.h>
39 #include <linux/genalloc.h>
42 #define RANDOM_SIZE 16
44 static int __c4iw_init_resource_fifo(struct kfifo
*fifo
,
45 spinlock_t
*fifo_lock
,
50 u32 i
, j
, entry
= 0, idx
;
53 spin_lock_init(fifo_lock
);
55 if (kfifo_alloc(fifo
, nr
* sizeof(u32
), GFP_KERNEL
))
58 for (i
= 0; i
< skip_low
+ skip_high
; i
++)
59 kfifo_in(fifo
, (unsigned char *) &entry
, sizeof(u32
));
62 random_bytes
= random32();
63 for (i
= 0; i
< RANDOM_SIZE
; i
++)
64 rarray
[i
] = i
+ skip_low
;
65 for (i
= skip_low
+ RANDOM_SIZE
; i
< nr
- skip_high
; i
++) {
66 if (j
>= RANDOM_SIZE
) {
68 random_bytes
= random32();
70 idx
= (random_bytes
>> (j
* 2)) & 0xF;
72 (unsigned char *) &rarray
[idx
],
77 for (i
= 0; i
< RANDOM_SIZE
; i
++)
79 (unsigned char *) &rarray
[i
],
82 for (i
= skip_low
; i
< nr
- skip_high
; i
++)
83 kfifo_in(fifo
, (unsigned char *) &i
, sizeof(u32
));
85 for (i
= 0; i
< skip_low
+ skip_high
; i
++)
86 if (kfifo_out_locked(fifo
, (unsigned char *) &entry
,
87 sizeof(u32
), fifo_lock
))
92 static int c4iw_init_resource_fifo(struct kfifo
*fifo
, spinlock_t
* fifo_lock
,
93 u32 nr
, u32 skip_low
, u32 skip_high
)
95 return __c4iw_init_resource_fifo(fifo
, fifo_lock
, nr
, skip_low
,
99 static int c4iw_init_resource_fifo_random(struct kfifo
*fifo
,
100 spinlock_t
*fifo_lock
,
101 u32 nr
, u32 skip_low
, u32 skip_high
)
103 return __c4iw_init_resource_fifo(fifo
, fifo_lock
, nr
, skip_low
,
107 static int c4iw_init_qid_fifo(struct c4iw_rdev
*rdev
)
111 spin_lock_init(&rdev
->resource
.qid_fifo_lock
);
113 if (kfifo_alloc(&rdev
->resource
.qid_fifo
, rdev
->lldi
.vr
->qp
.size
*
114 sizeof(u32
), GFP_KERNEL
))
117 for (i
= rdev
->lldi
.vr
->qp
.start
;
118 i
< rdev
->lldi
.vr
->qp
.start
+ rdev
->lldi
.vr
->qp
.size
; i
++)
119 if (!(i
& rdev
->qpmask
))
120 kfifo_in(&rdev
->resource
.qid_fifo
,
121 (unsigned char *) &i
, sizeof(u32
));
125 /* nr_* must be power of 2 */
126 int c4iw_init_resource(struct c4iw_rdev
*rdev
, u32 nr_tpt
, u32 nr_pdid
)
129 err
= c4iw_init_resource_fifo_random(&rdev
->resource
.tpt_fifo
,
130 &rdev
->resource
.tpt_fifo_lock
,
134 err
= c4iw_init_qid_fifo(rdev
);
137 err
= c4iw_init_resource_fifo(&rdev
->resource
.pdid_fifo
,
138 &rdev
->resource
.pdid_fifo_lock
,
144 kfifo_free(&rdev
->resource
.qid_fifo
);
146 kfifo_free(&rdev
->resource
.tpt_fifo
);
152 * returns 0 if no resource available
154 u32
c4iw_get_resource(struct kfifo
*fifo
, spinlock_t
*lock
)
157 if (kfifo_out_locked(fifo
, (unsigned char *) &entry
, sizeof(u32
), lock
))
163 void c4iw_put_resource(struct kfifo
*fifo
, u32 entry
, spinlock_t
*lock
)
165 PDBG("%s entry 0x%x\n", __func__
, entry
);
166 kfifo_in_locked(fifo
, (unsigned char *) &entry
, sizeof(u32
), lock
);
169 u32
c4iw_get_cqid(struct c4iw_rdev
*rdev
, struct c4iw_dev_ucontext
*uctx
)
171 struct c4iw_qid_list
*entry
;
175 mutex_lock(&uctx
->lock
);
176 if (!list_empty(&uctx
->cqids
)) {
177 entry
= list_entry(uctx
->cqids
.next
, struct c4iw_qid_list
,
179 list_del(&entry
->entry
);
183 qid
= c4iw_get_resource(&rdev
->resource
.qid_fifo
,
184 &rdev
->resource
.qid_fifo_lock
);
187 for (i
= qid
+1; i
& rdev
->qpmask
; i
++) {
188 entry
= kmalloc(sizeof *entry
, GFP_KERNEL
);
192 list_add_tail(&entry
->entry
, &uctx
->cqids
);
196 * now put the same ids on the qp list since they all
197 * map to the same db/gts page.
199 entry
= kmalloc(sizeof *entry
, GFP_KERNEL
);
203 list_add_tail(&entry
->entry
, &uctx
->qpids
);
204 for (i
= qid
+1; i
& rdev
->qpmask
; i
++) {
205 entry
= kmalloc(sizeof *entry
, GFP_KERNEL
);
209 list_add_tail(&entry
->entry
, &uctx
->qpids
);
213 mutex_unlock(&uctx
->lock
);
214 PDBG("%s qid 0x%x\n", __func__
, qid
);
218 void c4iw_put_cqid(struct c4iw_rdev
*rdev
, u32 qid
,
219 struct c4iw_dev_ucontext
*uctx
)
221 struct c4iw_qid_list
*entry
;
223 entry
= kmalloc(sizeof *entry
, GFP_KERNEL
);
226 PDBG("%s qid 0x%x\n", __func__
, qid
);
228 mutex_lock(&uctx
->lock
);
229 list_add_tail(&entry
->entry
, &uctx
->cqids
);
230 mutex_unlock(&uctx
->lock
);
233 u32
c4iw_get_qpid(struct c4iw_rdev
*rdev
, struct c4iw_dev_ucontext
*uctx
)
235 struct c4iw_qid_list
*entry
;
239 mutex_lock(&uctx
->lock
);
240 if (!list_empty(&uctx
->qpids
)) {
241 entry
= list_entry(uctx
->qpids
.next
, struct c4iw_qid_list
,
243 list_del(&entry
->entry
);
247 qid
= c4iw_get_resource(&rdev
->resource
.qid_fifo
,
248 &rdev
->resource
.qid_fifo_lock
);
251 for (i
= qid
+1; i
& rdev
->qpmask
; i
++) {
252 entry
= kmalloc(sizeof *entry
, GFP_KERNEL
);
256 list_add_tail(&entry
->entry
, &uctx
->qpids
);
260 * now put the same ids on the cq list since they all
261 * map to the same db/gts page.
263 entry
= kmalloc(sizeof *entry
, GFP_KERNEL
);
267 list_add_tail(&entry
->entry
, &uctx
->cqids
);
268 for (i
= qid
; i
& rdev
->qpmask
; i
++) {
269 entry
= kmalloc(sizeof *entry
, GFP_KERNEL
);
273 list_add_tail(&entry
->entry
, &uctx
->cqids
);
277 mutex_unlock(&uctx
->lock
);
278 PDBG("%s qid 0x%x\n", __func__
, qid
);
282 void c4iw_put_qpid(struct c4iw_rdev
*rdev
, u32 qid
,
283 struct c4iw_dev_ucontext
*uctx
)
285 struct c4iw_qid_list
*entry
;
287 entry
= kmalloc(sizeof *entry
, GFP_KERNEL
);
290 PDBG("%s qid 0x%x\n", __func__
, qid
);
292 mutex_lock(&uctx
->lock
);
293 list_add_tail(&entry
->entry
, &uctx
->qpids
);
294 mutex_unlock(&uctx
->lock
);
297 void c4iw_destroy_resource(struct c4iw_resource
*rscp
)
299 kfifo_free(&rscp
->tpt_fifo
);
300 kfifo_free(&rscp
->qid_fifo
);
301 kfifo_free(&rscp
->pdid_fifo
);
305 * PBL Memory Manager. Uses Linux generic allocator.
308 #define MIN_PBL_SHIFT 8 /* 256B == min PBL size (32 entries) */
310 u32
c4iw_pblpool_alloc(struct c4iw_rdev
*rdev
, int size
)
312 unsigned long addr
= gen_pool_alloc(rdev
->pbl_pool
, size
);
313 PDBG("%s addr 0x%x size %d\n", __func__
, (u32
)addr
, size
);
314 if (!addr
&& printk_ratelimit())
315 printk(KERN_WARNING MOD
"%s: Out of PBL memory\n",
316 pci_name(rdev
->lldi
.pdev
));
320 void c4iw_pblpool_free(struct c4iw_rdev
*rdev
, u32 addr
, int size
)
322 PDBG("%s addr 0x%x size %d\n", __func__
, addr
, size
);
323 gen_pool_free(rdev
->pbl_pool
, (unsigned long)addr
, size
);
326 int c4iw_pblpool_create(struct c4iw_rdev
*rdev
)
328 unsigned pbl_start
, pbl_chunk
, pbl_top
;
330 rdev
->pbl_pool
= gen_pool_create(MIN_PBL_SHIFT
, -1);
334 pbl_start
= rdev
->lldi
.vr
->pbl
.start
;
335 pbl_chunk
= rdev
->lldi
.vr
->pbl
.size
;
336 pbl_top
= pbl_start
+ pbl_chunk
;
338 while (pbl_start
< pbl_top
) {
339 pbl_chunk
= min(pbl_top
- pbl_start
+ 1, pbl_chunk
);
340 if (gen_pool_add(rdev
->pbl_pool
, pbl_start
, pbl_chunk
, -1)) {
341 PDBG("%s failed to add PBL chunk (%x/%x)\n",
342 __func__
, pbl_start
, pbl_chunk
);
343 if (pbl_chunk
<= 1024 << MIN_PBL_SHIFT
) {
344 printk(KERN_WARNING MOD
345 "Failed to add all PBL chunks (%x/%x)\n",
347 pbl_top
- pbl_start
);
352 PDBG("%s added PBL chunk (%x/%x)\n",
353 __func__
, pbl_start
, pbl_chunk
);
354 pbl_start
+= pbl_chunk
;
361 void c4iw_pblpool_destroy(struct c4iw_rdev
*rdev
)
363 gen_pool_destroy(rdev
->pbl_pool
);
367 * RQT Memory Manager. Uses Linux generic allocator.
370 #define MIN_RQT_SHIFT 10 /* 1KB == min RQT size (16 entries) */
372 u32
c4iw_rqtpool_alloc(struct c4iw_rdev
*rdev
, int size
)
374 unsigned long addr
= gen_pool_alloc(rdev
->rqt_pool
, size
<< 6);
375 PDBG("%s addr 0x%x size %d\n", __func__
, (u32
)addr
, size
<< 6);
376 if (!addr
&& printk_ratelimit())
377 printk(KERN_WARNING MOD
"%s: Out of RQT memory\n",
378 pci_name(rdev
->lldi
.pdev
));
382 void c4iw_rqtpool_free(struct c4iw_rdev
*rdev
, u32 addr
, int size
)
384 PDBG("%s addr 0x%x size %d\n", __func__
, addr
, size
<< 6);
385 gen_pool_free(rdev
->rqt_pool
, (unsigned long)addr
, size
<< 6);
388 int c4iw_rqtpool_create(struct c4iw_rdev
*rdev
)
390 unsigned rqt_start
, rqt_chunk
, rqt_top
;
392 rdev
->rqt_pool
= gen_pool_create(MIN_RQT_SHIFT
, -1);
396 rqt_start
= rdev
->lldi
.vr
->rq
.start
;
397 rqt_chunk
= rdev
->lldi
.vr
->rq
.size
;
398 rqt_top
= rqt_start
+ rqt_chunk
;
400 while (rqt_start
< rqt_top
) {
401 rqt_chunk
= min(rqt_top
- rqt_start
+ 1, rqt_chunk
);
402 if (gen_pool_add(rdev
->rqt_pool
, rqt_start
, rqt_chunk
, -1)) {
403 PDBG("%s failed to add RQT chunk (%x/%x)\n",
404 __func__
, rqt_start
, rqt_chunk
);
405 if (rqt_chunk
<= 1024 << MIN_RQT_SHIFT
) {
406 printk(KERN_WARNING MOD
407 "Failed to add all RQT chunks (%x/%x)\n",
408 rqt_start
, rqt_top
- rqt_start
);
413 PDBG("%s added RQT chunk (%x/%x)\n",
414 __func__
, rqt_start
, rqt_chunk
);
415 rqt_start
+= rqt_chunk
;
421 void c4iw_rqtpool_destroy(struct c4iw_rdev
*rdev
)
423 gen_pool_destroy(rdev
->rqt_pool
);
429 #define MIN_OCQP_SHIFT 12 /* 4KB == min ocqp size */
431 u32
c4iw_ocqp_pool_alloc(struct c4iw_rdev
*rdev
, int size
)
433 unsigned long addr
= gen_pool_alloc(rdev
->ocqp_pool
, size
);
434 PDBG("%s addr 0x%x size %d\n", __func__
, (u32
)addr
, size
);
438 void c4iw_ocqp_pool_free(struct c4iw_rdev
*rdev
, u32 addr
, int size
)
440 PDBG("%s addr 0x%x size %d\n", __func__
, addr
, size
);
441 gen_pool_free(rdev
->ocqp_pool
, (unsigned long)addr
, size
);
444 int c4iw_ocqp_pool_create(struct c4iw_rdev
*rdev
)
446 unsigned start
, chunk
, top
;
448 rdev
->ocqp_pool
= gen_pool_create(MIN_OCQP_SHIFT
, -1);
449 if (!rdev
->ocqp_pool
)
452 start
= rdev
->lldi
.vr
->ocq
.start
;
453 chunk
= rdev
->lldi
.vr
->ocq
.size
;
456 while (start
< top
) {
457 chunk
= min(top
- start
+ 1, chunk
);
458 if (gen_pool_add(rdev
->ocqp_pool
, start
, chunk
, -1)) {
459 PDBG("%s failed to add OCQP chunk (%x/%x)\n",
460 __func__
, start
, chunk
);
461 if (chunk
<= 1024 << MIN_OCQP_SHIFT
) {
462 printk(KERN_WARNING MOD
463 "Failed to add all OCQP chunks (%x/%x)\n",
469 PDBG("%s added OCQP chunk (%x/%x)\n",
470 __func__
, start
, chunk
);
477 void c4iw_ocqp_pool_destroy(struct c4iw_rdev
*rdev
)
479 gen_pool_destroy(rdev
->ocqp_pool
);