2 * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 /* Crude resource management */
33 #include <linux/spinlock.h>
34 #include <linux/genalloc.h>
35 #include <linux/ratelimit.h>
38 static int c4iw_init_qid_table(struct c4iw_rdev
*rdev
)
42 if (c4iw_id_table_alloc(&rdev
->resource
.qid_table
,
43 rdev
->lldi
.vr
->qp
.start
,
44 rdev
->lldi
.vr
->qp
.size
,
45 rdev
->lldi
.vr
->qp
.size
, 0))
48 for (i
= rdev
->lldi
.vr
->qp
.start
;
49 i
< rdev
->lldi
.vr
->qp
.start
+ rdev
->lldi
.vr
->qp
.size
; i
++)
50 if (!(i
& rdev
->qpmask
))
51 c4iw_id_free(&rdev
->resource
.qid_table
, i
);
55 /* nr_* must be power of 2 */
56 int c4iw_init_resource(struct c4iw_rdev
*rdev
, u32 nr_tpt
, u32 nr_pdid
)
59 err
= c4iw_id_table_alloc(&rdev
->resource
.tpt_table
, 0, nr_tpt
, 1,
60 C4IW_ID_TABLE_F_RANDOM
);
63 err
= c4iw_init_qid_table(rdev
);
66 err
= c4iw_id_table_alloc(&rdev
->resource
.pdid_table
, 0,
72 c4iw_id_table_free(&rdev
->resource
.qid_table
);
74 c4iw_id_table_free(&rdev
->resource
.tpt_table
);
80 * returns 0 if no resource available
82 u32
c4iw_get_resource(struct c4iw_id_table
*id_table
)
85 entry
= c4iw_id_alloc(id_table
);
86 if (entry
== (u32
)(-1))
91 void c4iw_put_resource(struct c4iw_id_table
*id_table
, u32 entry
)
93 pr_debug("entry 0x%x\n", entry
);
94 c4iw_id_free(id_table
, entry
);
97 u32
c4iw_get_cqid(struct c4iw_rdev
*rdev
, struct c4iw_dev_ucontext
*uctx
)
99 struct c4iw_qid_list
*entry
;
103 mutex_lock(&uctx
->lock
);
104 if (!list_empty(&uctx
->cqids
)) {
105 entry
= list_entry(uctx
->cqids
.next
, struct c4iw_qid_list
,
107 list_del(&entry
->entry
);
111 qid
= c4iw_get_resource(&rdev
->resource
.qid_table
);
114 mutex_lock(&rdev
->stats
.lock
);
115 rdev
->stats
.qid
.cur
+= rdev
->qpmask
+ 1;
116 mutex_unlock(&rdev
->stats
.lock
);
117 for (i
= qid
+1; i
& rdev
->qpmask
; i
++) {
118 entry
= kmalloc(sizeof *entry
, GFP_KERNEL
);
122 list_add_tail(&entry
->entry
, &uctx
->cqids
);
126 * now put the same ids on the qp list since they all
127 * map to the same db/gts page.
129 entry
= kmalloc(sizeof *entry
, GFP_KERNEL
);
133 list_add_tail(&entry
->entry
, &uctx
->qpids
);
134 for (i
= qid
+1; i
& rdev
->qpmask
; i
++) {
135 entry
= kmalloc(sizeof *entry
, GFP_KERNEL
);
139 list_add_tail(&entry
->entry
, &uctx
->qpids
);
143 mutex_unlock(&uctx
->lock
);
144 pr_debug("qid 0x%x\n", qid
);
145 mutex_lock(&rdev
->stats
.lock
);
146 if (rdev
->stats
.qid
.cur
> rdev
->stats
.qid
.max
)
147 rdev
->stats
.qid
.max
= rdev
->stats
.qid
.cur
;
148 mutex_unlock(&rdev
->stats
.lock
);
152 void c4iw_put_cqid(struct c4iw_rdev
*rdev
, u32 qid
,
153 struct c4iw_dev_ucontext
*uctx
)
155 struct c4iw_qid_list
*entry
;
157 entry
= kmalloc(sizeof *entry
, GFP_KERNEL
);
160 pr_debug("qid 0x%x\n", qid
);
162 mutex_lock(&uctx
->lock
);
163 list_add_tail(&entry
->entry
, &uctx
->cqids
);
164 mutex_unlock(&uctx
->lock
);
167 u32
c4iw_get_qpid(struct c4iw_rdev
*rdev
, struct c4iw_dev_ucontext
*uctx
)
169 struct c4iw_qid_list
*entry
;
173 mutex_lock(&uctx
->lock
);
174 if (!list_empty(&uctx
->qpids
)) {
175 entry
= list_entry(uctx
->qpids
.next
, struct c4iw_qid_list
,
177 list_del(&entry
->entry
);
181 qid
= c4iw_get_resource(&rdev
->resource
.qid_table
);
183 mutex_lock(&rdev
->stats
.lock
);
184 rdev
->stats
.qid
.fail
++;
185 mutex_unlock(&rdev
->stats
.lock
);
188 mutex_lock(&rdev
->stats
.lock
);
189 rdev
->stats
.qid
.cur
+= rdev
->qpmask
+ 1;
190 mutex_unlock(&rdev
->stats
.lock
);
191 for (i
= qid
+1; i
& rdev
->qpmask
; i
++) {
192 entry
= kmalloc(sizeof *entry
, GFP_KERNEL
);
196 list_add_tail(&entry
->entry
, &uctx
->qpids
);
200 * now put the same ids on the cq list since they all
201 * map to the same db/gts page.
203 entry
= kmalloc(sizeof *entry
, GFP_KERNEL
);
207 list_add_tail(&entry
->entry
, &uctx
->cqids
);
208 for (i
= qid
; i
& rdev
->qpmask
; i
++) {
209 entry
= kmalloc(sizeof *entry
, GFP_KERNEL
);
213 list_add_tail(&entry
->entry
, &uctx
->cqids
);
217 mutex_unlock(&uctx
->lock
);
218 pr_debug("qid 0x%x\n", qid
);
219 mutex_lock(&rdev
->stats
.lock
);
220 if (rdev
->stats
.qid
.cur
> rdev
->stats
.qid
.max
)
221 rdev
->stats
.qid
.max
= rdev
->stats
.qid
.cur
;
222 mutex_unlock(&rdev
->stats
.lock
);
226 void c4iw_put_qpid(struct c4iw_rdev
*rdev
, u32 qid
,
227 struct c4iw_dev_ucontext
*uctx
)
229 struct c4iw_qid_list
*entry
;
231 entry
= kmalloc(sizeof *entry
, GFP_KERNEL
);
234 pr_debug("qid 0x%x\n", qid
);
236 mutex_lock(&uctx
->lock
);
237 list_add_tail(&entry
->entry
, &uctx
->qpids
);
238 mutex_unlock(&uctx
->lock
);
241 void c4iw_destroy_resource(struct c4iw_resource
*rscp
)
243 c4iw_id_table_free(&rscp
->tpt_table
);
244 c4iw_id_table_free(&rscp
->qid_table
);
245 c4iw_id_table_free(&rscp
->pdid_table
);
249 * PBL Memory Manager. Uses Linux generic allocator.
252 #define MIN_PBL_SHIFT 8 /* 256B == min PBL size (32 entries) */
254 u32
c4iw_pblpool_alloc(struct c4iw_rdev
*rdev
, int size
)
256 unsigned long addr
= gen_pool_alloc(rdev
->pbl_pool
, size
);
257 pr_debug("addr 0x%x size %d\n", (u32
)addr
, size
);
258 mutex_lock(&rdev
->stats
.lock
);
260 rdev
->stats
.pbl
.cur
+= roundup(size
, 1 << MIN_PBL_SHIFT
);
261 if (rdev
->stats
.pbl
.cur
> rdev
->stats
.pbl
.max
)
262 rdev
->stats
.pbl
.max
= rdev
->stats
.pbl
.cur
;
264 rdev
->stats
.pbl
.fail
++;
265 mutex_unlock(&rdev
->stats
.lock
);
269 void c4iw_pblpool_free(struct c4iw_rdev
*rdev
, u32 addr
, int size
)
271 pr_debug("addr 0x%x size %d\n", addr
, size
);
272 mutex_lock(&rdev
->stats
.lock
);
273 rdev
->stats
.pbl
.cur
-= roundup(size
, 1 << MIN_PBL_SHIFT
);
274 mutex_unlock(&rdev
->stats
.lock
);
275 gen_pool_free(rdev
->pbl_pool
, (unsigned long)addr
, size
);
278 int c4iw_pblpool_create(struct c4iw_rdev
*rdev
)
280 unsigned pbl_start
, pbl_chunk
, pbl_top
;
282 rdev
->pbl_pool
= gen_pool_create(MIN_PBL_SHIFT
, -1);
286 pbl_start
= rdev
->lldi
.vr
->pbl
.start
;
287 pbl_chunk
= rdev
->lldi
.vr
->pbl
.size
;
288 pbl_top
= pbl_start
+ pbl_chunk
;
290 while (pbl_start
< pbl_top
) {
291 pbl_chunk
= min(pbl_top
- pbl_start
+ 1, pbl_chunk
);
292 if (gen_pool_add(rdev
->pbl_pool
, pbl_start
, pbl_chunk
, -1)) {
293 pr_debug("failed to add PBL chunk (%x/%x)\n",
294 pbl_start
, pbl_chunk
);
295 if (pbl_chunk
<= 1024 << MIN_PBL_SHIFT
) {
296 pr_warn("Failed to add all PBL chunks (%x/%x)\n",
297 pbl_start
, pbl_top
- pbl_start
);
302 pr_debug("added PBL chunk (%x/%x)\n",
303 pbl_start
, pbl_chunk
);
304 pbl_start
+= pbl_chunk
;
311 void c4iw_pblpool_destroy(struct c4iw_rdev
*rdev
)
313 gen_pool_destroy(rdev
->pbl_pool
);
317 * RQT Memory Manager. Uses Linux generic allocator.
320 #define MIN_RQT_SHIFT 10 /* 1KB == min RQT size (16 entries) */
322 u32
c4iw_rqtpool_alloc(struct c4iw_rdev
*rdev
, int size
)
324 unsigned long addr
= gen_pool_alloc(rdev
->rqt_pool
, size
<< 6);
325 pr_debug("addr 0x%x size %d\n", (u32
)addr
, size
<< 6);
327 pr_warn_ratelimited("%s: Out of RQT memory\n",
328 pci_name(rdev
->lldi
.pdev
));
329 mutex_lock(&rdev
->stats
.lock
);
331 rdev
->stats
.rqt
.cur
+= roundup(size
<< 6, 1 << MIN_RQT_SHIFT
);
332 if (rdev
->stats
.rqt
.cur
> rdev
->stats
.rqt
.max
)
333 rdev
->stats
.rqt
.max
= rdev
->stats
.rqt
.cur
;
335 rdev
->stats
.rqt
.fail
++;
336 mutex_unlock(&rdev
->stats
.lock
);
340 void c4iw_rqtpool_free(struct c4iw_rdev
*rdev
, u32 addr
, int size
)
342 pr_debug("addr 0x%x size %d\n", addr
, size
<< 6);
343 mutex_lock(&rdev
->stats
.lock
);
344 rdev
->stats
.rqt
.cur
-= roundup(size
<< 6, 1 << MIN_RQT_SHIFT
);
345 mutex_unlock(&rdev
->stats
.lock
);
346 gen_pool_free(rdev
->rqt_pool
, (unsigned long)addr
, size
<< 6);
349 int c4iw_rqtpool_create(struct c4iw_rdev
*rdev
)
351 unsigned rqt_start
, rqt_chunk
, rqt_top
;
353 rdev
->rqt_pool
= gen_pool_create(MIN_RQT_SHIFT
, -1);
357 rqt_start
= rdev
->lldi
.vr
->rq
.start
;
358 rqt_chunk
= rdev
->lldi
.vr
->rq
.size
;
359 rqt_top
= rqt_start
+ rqt_chunk
;
361 while (rqt_start
< rqt_top
) {
362 rqt_chunk
= min(rqt_top
- rqt_start
+ 1, rqt_chunk
);
363 if (gen_pool_add(rdev
->rqt_pool
, rqt_start
, rqt_chunk
, -1)) {
364 pr_debug("failed to add RQT chunk (%x/%x)\n",
365 rqt_start
, rqt_chunk
);
366 if (rqt_chunk
<= 1024 << MIN_RQT_SHIFT
) {
367 pr_warn("Failed to add all RQT chunks (%x/%x)\n",
368 rqt_start
, rqt_top
- rqt_start
);
373 pr_debug("added RQT chunk (%x/%x)\n",
374 rqt_start
, rqt_chunk
);
375 rqt_start
+= rqt_chunk
;
381 void c4iw_rqtpool_destroy(struct c4iw_rdev
*rdev
)
383 gen_pool_destroy(rdev
->rqt_pool
);
389 #define MIN_OCQP_SHIFT 12 /* 4KB == min ocqp size */
391 u32
c4iw_ocqp_pool_alloc(struct c4iw_rdev
*rdev
, int size
)
393 unsigned long addr
= gen_pool_alloc(rdev
->ocqp_pool
, size
);
394 pr_debug("addr 0x%x size %d\n", (u32
)addr
, size
);
396 mutex_lock(&rdev
->stats
.lock
);
397 rdev
->stats
.ocqp
.cur
+= roundup(size
, 1 << MIN_OCQP_SHIFT
);
398 if (rdev
->stats
.ocqp
.cur
> rdev
->stats
.ocqp
.max
)
399 rdev
->stats
.ocqp
.max
= rdev
->stats
.ocqp
.cur
;
400 mutex_unlock(&rdev
->stats
.lock
);
405 void c4iw_ocqp_pool_free(struct c4iw_rdev
*rdev
, u32 addr
, int size
)
407 pr_debug("addr 0x%x size %d\n", addr
, size
);
408 mutex_lock(&rdev
->stats
.lock
);
409 rdev
->stats
.ocqp
.cur
-= roundup(size
, 1 << MIN_OCQP_SHIFT
);
410 mutex_unlock(&rdev
->stats
.lock
);
411 gen_pool_free(rdev
->ocqp_pool
, (unsigned long)addr
, size
);
414 int c4iw_ocqp_pool_create(struct c4iw_rdev
*rdev
)
416 unsigned start
, chunk
, top
;
418 rdev
->ocqp_pool
= gen_pool_create(MIN_OCQP_SHIFT
, -1);
419 if (!rdev
->ocqp_pool
)
422 start
= rdev
->lldi
.vr
->ocq
.start
;
423 chunk
= rdev
->lldi
.vr
->ocq
.size
;
426 while (start
< top
) {
427 chunk
= min(top
- start
+ 1, chunk
);
428 if (gen_pool_add(rdev
->ocqp_pool
, start
, chunk
, -1)) {
429 pr_debug("failed to add OCQP chunk (%x/%x)\n",
431 if (chunk
<= 1024 << MIN_OCQP_SHIFT
) {
432 pr_warn("Failed to add all OCQP chunks (%x/%x)\n",
438 pr_debug("added OCQP chunk (%x/%x)\n",
446 void c4iw_ocqp_pool_destroy(struct c4iw_rdev
*rdev
)
448 gen_pool_destroy(rdev
->ocqp_pool
);