2 * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 /* Crude resource management */
33 #include <linux/spinlock.h>
34 #include <linux/genalloc.h>
35 #include <linux/ratelimit.h>
38 static int c4iw_init_qid_table(struct c4iw_rdev
*rdev
)
42 if (c4iw_id_table_alloc(&rdev
->resource
.qid_table
,
43 rdev
->lldi
.vr
->qp
.start
,
44 rdev
->lldi
.vr
->qp
.size
,
45 rdev
->lldi
.vr
->qp
.size
, 0))
48 for (i
= rdev
->lldi
.vr
->qp
.start
;
49 i
< rdev
->lldi
.vr
->qp
.start
+ rdev
->lldi
.vr
->qp
.size
; i
++)
50 if (!(i
& rdev
->qpmask
))
51 c4iw_id_free(&rdev
->resource
.qid_table
, i
);
55 /* nr_* must be power of 2 */
56 int c4iw_init_resource(struct c4iw_rdev
*rdev
, u32 nr_tpt
, u32 nr_pdid
)
59 err
= c4iw_id_table_alloc(&rdev
->resource
.tpt_table
, 0, nr_tpt
, 1,
60 C4IW_ID_TABLE_F_RANDOM
);
63 err
= c4iw_init_qid_table(rdev
);
66 err
= c4iw_id_table_alloc(&rdev
->resource
.pdid_table
, 0,
72 c4iw_id_table_free(&rdev
->resource
.qid_table
);
74 c4iw_id_table_free(&rdev
->resource
.tpt_table
);
80 * returns 0 if no resource available
82 u32
c4iw_get_resource(struct c4iw_id_table
*id_table
)
85 entry
= c4iw_id_alloc(id_table
);
86 if (entry
== (u32
)(-1))
91 void c4iw_put_resource(struct c4iw_id_table
*id_table
, u32 entry
)
93 PDBG("%s entry 0x%x\n", __func__
, entry
);
94 c4iw_id_free(id_table
, entry
);
97 u32
c4iw_get_cqid(struct c4iw_rdev
*rdev
, struct c4iw_dev_ucontext
*uctx
)
99 struct c4iw_qid_list
*entry
;
103 mutex_lock(&uctx
->lock
);
104 if (!list_empty(&uctx
->cqids
)) {
105 entry
= list_entry(uctx
->cqids
.next
, struct c4iw_qid_list
,
107 list_del(&entry
->entry
);
111 qid
= c4iw_get_resource(&rdev
->resource
.qid_table
);
114 mutex_lock(&rdev
->stats
.lock
);
115 rdev
->stats
.qid
.cur
+= rdev
->qpmask
+ 1;
116 mutex_unlock(&rdev
->stats
.lock
);
117 for (i
= qid
+1; i
& rdev
->qpmask
; i
++) {
118 entry
= kmalloc(sizeof *entry
, GFP_KERNEL
);
122 list_add_tail(&entry
->entry
, &uctx
->cqids
);
126 * now put the same ids on the qp list since they all
127 * map to the same db/gts page.
129 entry
= kmalloc(sizeof *entry
, GFP_KERNEL
);
133 list_add_tail(&entry
->entry
, &uctx
->qpids
);
134 for (i
= qid
+1; i
& rdev
->qpmask
; i
++) {
135 entry
= kmalloc(sizeof *entry
, GFP_KERNEL
);
139 list_add_tail(&entry
->entry
, &uctx
->qpids
);
143 mutex_unlock(&uctx
->lock
);
144 PDBG("%s qid 0x%x\n", __func__
, qid
);
145 mutex_lock(&rdev
->stats
.lock
);
146 if (rdev
->stats
.qid
.cur
> rdev
->stats
.qid
.max
)
147 rdev
->stats
.qid
.max
= rdev
->stats
.qid
.cur
;
148 mutex_unlock(&rdev
->stats
.lock
);
152 void c4iw_put_cqid(struct c4iw_rdev
*rdev
, u32 qid
,
153 struct c4iw_dev_ucontext
*uctx
)
155 struct c4iw_qid_list
*entry
;
157 entry
= kmalloc(sizeof *entry
, GFP_KERNEL
);
160 PDBG("%s qid 0x%x\n", __func__
, qid
);
162 mutex_lock(&uctx
->lock
);
163 list_add_tail(&entry
->entry
, &uctx
->cqids
);
164 mutex_unlock(&uctx
->lock
);
167 u32
c4iw_get_qpid(struct c4iw_rdev
*rdev
, struct c4iw_dev_ucontext
*uctx
)
169 struct c4iw_qid_list
*entry
;
173 mutex_lock(&uctx
->lock
);
174 if (!list_empty(&uctx
->qpids
)) {
175 entry
= list_entry(uctx
->qpids
.next
, struct c4iw_qid_list
,
177 list_del(&entry
->entry
);
181 qid
= c4iw_get_resource(&rdev
->resource
.qid_table
);
184 mutex_lock(&rdev
->stats
.lock
);
185 rdev
->stats
.qid
.cur
+= rdev
->qpmask
+ 1;
186 mutex_unlock(&rdev
->stats
.lock
);
187 for (i
= qid
+1; i
& rdev
->qpmask
; i
++) {
188 entry
= kmalloc(sizeof *entry
, GFP_KERNEL
);
192 list_add_tail(&entry
->entry
, &uctx
->qpids
);
196 * now put the same ids on the cq list since they all
197 * map to the same db/gts page.
199 entry
= kmalloc(sizeof *entry
, GFP_KERNEL
);
203 list_add_tail(&entry
->entry
, &uctx
->cqids
);
204 for (i
= qid
; i
& rdev
->qpmask
; i
++) {
205 entry
= kmalloc(sizeof *entry
, GFP_KERNEL
);
209 list_add_tail(&entry
->entry
, &uctx
->cqids
);
213 mutex_unlock(&uctx
->lock
);
214 PDBG("%s qid 0x%x\n", __func__
, qid
);
215 mutex_lock(&rdev
->stats
.lock
);
216 if (rdev
->stats
.qid
.cur
> rdev
->stats
.qid
.max
)
217 rdev
->stats
.qid
.max
= rdev
->stats
.qid
.cur
;
218 mutex_unlock(&rdev
->stats
.lock
);
222 void c4iw_put_qpid(struct c4iw_rdev
*rdev
, u32 qid
,
223 struct c4iw_dev_ucontext
*uctx
)
225 struct c4iw_qid_list
*entry
;
227 entry
= kmalloc(sizeof *entry
, GFP_KERNEL
);
230 PDBG("%s qid 0x%x\n", __func__
, qid
);
232 mutex_lock(&uctx
->lock
);
233 list_add_tail(&entry
->entry
, &uctx
->qpids
);
234 mutex_unlock(&uctx
->lock
);
237 void c4iw_destroy_resource(struct c4iw_resource
*rscp
)
239 c4iw_id_table_free(&rscp
->tpt_table
);
240 c4iw_id_table_free(&rscp
->qid_table
);
241 c4iw_id_table_free(&rscp
->pdid_table
);
245 * PBL Memory Manager. Uses Linux generic allocator.
248 #define MIN_PBL_SHIFT 8 /* 256B == min PBL size (32 entries) */
250 u32
c4iw_pblpool_alloc(struct c4iw_rdev
*rdev
, int size
)
252 unsigned long addr
= gen_pool_alloc(rdev
->pbl_pool
, size
);
253 PDBG("%s addr 0x%x size %d\n", __func__
, (u32
)addr
, size
);
254 mutex_lock(&rdev
->stats
.lock
);
256 rdev
->stats
.pbl
.cur
+= roundup(size
, 1 << MIN_PBL_SHIFT
);
257 if (rdev
->stats
.pbl
.cur
> rdev
->stats
.pbl
.max
)
258 rdev
->stats
.pbl
.max
= rdev
->stats
.pbl
.cur
;
260 rdev
->stats
.pbl
.fail
++;
261 mutex_unlock(&rdev
->stats
.lock
);
265 void c4iw_pblpool_free(struct c4iw_rdev
*rdev
, u32 addr
, int size
)
267 PDBG("%s addr 0x%x size %d\n", __func__
, addr
, size
);
268 mutex_lock(&rdev
->stats
.lock
);
269 rdev
->stats
.pbl
.cur
-= roundup(size
, 1 << MIN_PBL_SHIFT
);
270 mutex_unlock(&rdev
->stats
.lock
);
271 gen_pool_free(rdev
->pbl_pool
, (unsigned long)addr
, size
);
274 int c4iw_pblpool_create(struct c4iw_rdev
*rdev
)
276 unsigned pbl_start
, pbl_chunk
, pbl_top
;
278 rdev
->pbl_pool
= gen_pool_create(MIN_PBL_SHIFT
, -1);
282 pbl_start
= rdev
->lldi
.vr
->pbl
.start
;
283 pbl_chunk
= rdev
->lldi
.vr
->pbl
.size
;
284 pbl_top
= pbl_start
+ pbl_chunk
;
286 while (pbl_start
< pbl_top
) {
287 pbl_chunk
= min(pbl_top
- pbl_start
+ 1, pbl_chunk
);
288 if (gen_pool_add(rdev
->pbl_pool
, pbl_start
, pbl_chunk
, -1)) {
289 PDBG("%s failed to add PBL chunk (%x/%x)\n",
290 __func__
, pbl_start
, pbl_chunk
);
291 if (pbl_chunk
<= 1024 << MIN_PBL_SHIFT
) {
292 printk(KERN_WARNING MOD
293 "Failed to add all PBL chunks (%x/%x)\n",
295 pbl_top
- pbl_start
);
300 PDBG("%s added PBL chunk (%x/%x)\n",
301 __func__
, pbl_start
, pbl_chunk
);
302 pbl_start
+= pbl_chunk
;
309 void c4iw_pblpool_destroy(struct c4iw_rdev
*rdev
)
311 gen_pool_destroy(rdev
->pbl_pool
);
315 * RQT Memory Manager. Uses Linux generic allocator.
318 #define MIN_RQT_SHIFT 10 /* 1KB == min RQT size (16 entries) */
320 u32
c4iw_rqtpool_alloc(struct c4iw_rdev
*rdev
, int size
)
322 unsigned long addr
= gen_pool_alloc(rdev
->rqt_pool
, size
<< 6);
323 PDBG("%s addr 0x%x size %d\n", __func__
, (u32
)addr
, size
<< 6);
325 printk_ratelimited(KERN_WARNING MOD
"%s: Out of RQT memory\n",
326 pci_name(rdev
->lldi
.pdev
));
327 mutex_lock(&rdev
->stats
.lock
);
329 rdev
->stats
.rqt
.cur
+= roundup(size
<< 6, 1 << MIN_RQT_SHIFT
);
330 if (rdev
->stats
.rqt
.cur
> rdev
->stats
.rqt
.max
)
331 rdev
->stats
.rqt
.max
= rdev
->stats
.rqt
.cur
;
333 rdev
->stats
.rqt
.fail
++;
334 mutex_unlock(&rdev
->stats
.lock
);
338 void c4iw_rqtpool_free(struct c4iw_rdev
*rdev
, u32 addr
, int size
)
340 PDBG("%s addr 0x%x size %d\n", __func__
, addr
, size
<< 6);
341 mutex_lock(&rdev
->stats
.lock
);
342 rdev
->stats
.rqt
.cur
-= roundup(size
<< 6, 1 << MIN_RQT_SHIFT
);
343 mutex_unlock(&rdev
->stats
.lock
);
344 gen_pool_free(rdev
->rqt_pool
, (unsigned long)addr
, size
<< 6);
347 int c4iw_rqtpool_create(struct c4iw_rdev
*rdev
)
349 unsigned rqt_start
, rqt_chunk
, rqt_top
;
351 rdev
->rqt_pool
= gen_pool_create(MIN_RQT_SHIFT
, -1);
355 rqt_start
= rdev
->lldi
.vr
->rq
.start
;
356 rqt_chunk
= rdev
->lldi
.vr
->rq
.size
;
357 rqt_top
= rqt_start
+ rqt_chunk
;
359 while (rqt_start
< rqt_top
) {
360 rqt_chunk
= min(rqt_top
- rqt_start
+ 1, rqt_chunk
);
361 if (gen_pool_add(rdev
->rqt_pool
, rqt_start
, rqt_chunk
, -1)) {
362 PDBG("%s failed to add RQT chunk (%x/%x)\n",
363 __func__
, rqt_start
, rqt_chunk
);
364 if (rqt_chunk
<= 1024 << MIN_RQT_SHIFT
) {
365 printk(KERN_WARNING MOD
366 "Failed to add all RQT chunks (%x/%x)\n",
367 rqt_start
, rqt_top
- rqt_start
);
372 PDBG("%s added RQT chunk (%x/%x)\n",
373 __func__
, rqt_start
, rqt_chunk
);
374 rqt_start
+= rqt_chunk
;
380 void c4iw_rqtpool_destroy(struct c4iw_rdev
*rdev
)
382 gen_pool_destroy(rdev
->rqt_pool
);
388 #define MIN_OCQP_SHIFT 12 /* 4KB == min ocqp size */
390 u32
c4iw_ocqp_pool_alloc(struct c4iw_rdev
*rdev
, int size
)
392 unsigned long addr
= gen_pool_alloc(rdev
->ocqp_pool
, size
);
393 PDBG("%s addr 0x%x size %d\n", __func__
, (u32
)addr
, size
);
395 mutex_lock(&rdev
->stats
.lock
);
396 rdev
->stats
.ocqp
.cur
+= roundup(size
, 1 << MIN_OCQP_SHIFT
);
397 if (rdev
->stats
.ocqp
.cur
> rdev
->stats
.ocqp
.max
)
398 rdev
->stats
.ocqp
.max
= rdev
->stats
.ocqp
.cur
;
399 mutex_unlock(&rdev
->stats
.lock
);
404 void c4iw_ocqp_pool_free(struct c4iw_rdev
*rdev
, u32 addr
, int size
)
406 PDBG("%s addr 0x%x size %d\n", __func__
, addr
, size
);
407 mutex_lock(&rdev
->stats
.lock
);
408 rdev
->stats
.ocqp
.cur
-= roundup(size
, 1 << MIN_OCQP_SHIFT
);
409 mutex_unlock(&rdev
->stats
.lock
);
410 gen_pool_free(rdev
->ocqp_pool
, (unsigned long)addr
, size
);
413 int c4iw_ocqp_pool_create(struct c4iw_rdev
*rdev
)
415 unsigned start
, chunk
, top
;
417 rdev
->ocqp_pool
= gen_pool_create(MIN_OCQP_SHIFT
, -1);
418 if (!rdev
->ocqp_pool
)
421 start
= rdev
->lldi
.vr
->ocq
.start
;
422 chunk
= rdev
->lldi
.vr
->ocq
.size
;
425 while (start
< top
) {
426 chunk
= min(top
- start
+ 1, chunk
);
427 if (gen_pool_add(rdev
->ocqp_pool
, start
, chunk
, -1)) {
428 PDBG("%s failed to add OCQP chunk (%x/%x)\n",
429 __func__
, start
, chunk
);
430 if (chunk
<= 1024 << MIN_OCQP_SHIFT
) {
431 printk(KERN_WARNING MOD
432 "Failed to add all OCQP chunks (%x/%x)\n",
438 PDBG("%s added OCQP chunk (%x/%x)\n",
439 __func__
, start
, chunk
);
446 void c4iw_ocqp_pool_destroy(struct c4iw_rdev
*rdev
)
448 gen_pool_destroy(rdev
->ocqp_pool
);