2 * Copyright (c) 2005 Cisco Systems. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * $Id: mthca_srq.c 3047 2005-08-10 03:59:35Z roland $
35 #include <linux/slab.h>
36 #include <linux/string.h>
40 #include "mthca_dev.h"
41 #include "mthca_cmd.h"
42 #include "mthca_memfree.h"
43 #include "mthca_wqe.h"
46 MTHCA_MAX_DIRECT_SRQ_SIZE
= 4 * PAGE_SIZE
49 struct mthca_tavor_srq_context
{
50 __be64 wqe_base_ds
; /* low 6 bits is descriptor size */
54 __be16 limit_watermark
;
59 struct mthca_arbel_srq_context
{
60 __be32 state_logsize_srqn
;
63 __be32 logstride_usrpage
;
66 __be16 limit_watermark
;
73 static void *get_wqe(struct mthca_srq
*srq
, int n
)
76 return srq
->queue
.direct
.buf
+ (n
<< srq
->wqe_shift
);
78 return srq
->queue
.page_list
[(n
<< srq
->wqe_shift
) >> PAGE_SHIFT
].buf
+
79 ((n
<< srq
->wqe_shift
) & (PAGE_SIZE
- 1));
83 * Return a pointer to the location within a WQE that we're using as a
84 * link when the WQE is in the free list. We use the imm field
85 * because in the Tavor case, posting a WQE may overwrite the next
86 * segment of the previous WQE, but a receive WQE will never touch the
87 * imm field. This avoids corrupting our free list if the previous
88 * WQE has already completed and been put on the free list when we
91 static inline int *wqe_to_link(void *wqe
)
93 return (int *) (wqe
+ offsetof(struct mthca_next_seg
, imm
));
96 static void mthca_tavor_init_srq_context(struct mthca_dev
*dev
,
98 struct mthca_srq
*srq
,
99 struct mthca_tavor_srq_context
*context
)
101 memset(context
, 0, sizeof *context
);
103 context
->wqe_base_ds
= cpu_to_be64(1 << (srq
->wqe_shift
- 4));
104 context
->state_pd
= cpu_to_be32(pd
->pd_num
);
105 context
->lkey
= cpu_to_be32(srq
->mr
.ibmr
.lkey
);
107 if (pd
->ibpd
.uobject
)
109 cpu_to_be32(to_mucontext(pd
->ibpd
.uobject
->context
)->uar
.index
);
111 context
->uar
= cpu_to_be32(dev
->driver_uar
.index
);
114 static void mthca_arbel_init_srq_context(struct mthca_dev
*dev
,
116 struct mthca_srq
*srq
,
117 struct mthca_arbel_srq_context
*context
)
121 memset(context
, 0, sizeof *context
);
123 logsize
= ilog2(srq
->max
);
124 context
->state_logsize_srqn
= cpu_to_be32(logsize
<< 24 | srq
->srqn
);
125 context
->lkey
= cpu_to_be32(srq
->mr
.ibmr
.lkey
);
126 context
->db_index
= cpu_to_be32(srq
->db_index
);
127 context
->logstride_usrpage
= cpu_to_be32((srq
->wqe_shift
- 4) << 29);
128 if (pd
->ibpd
.uobject
)
129 context
->logstride_usrpage
|=
130 cpu_to_be32(to_mucontext(pd
->ibpd
.uobject
->context
)->uar
.index
);
132 context
->logstride_usrpage
|= cpu_to_be32(dev
->driver_uar
.index
);
133 context
->eq_pd
= cpu_to_be32(MTHCA_EQ_ASYNC
<< 24 | pd
->pd_num
);
136 static void mthca_free_srq_buf(struct mthca_dev
*dev
, struct mthca_srq
*srq
)
138 mthca_buf_free(dev
, srq
->max
<< srq
->wqe_shift
, &srq
->queue
,
139 srq
->is_direct
, &srq
->mr
);
143 static int mthca_alloc_srq_buf(struct mthca_dev
*dev
, struct mthca_pd
*pd
,
144 struct mthca_srq
*srq
)
146 struct mthca_data_seg
*scatter
;
151 if (pd
->ibpd
.uobject
)
154 srq
->wrid
= kmalloc(srq
->max
* sizeof (u64
), GFP_KERNEL
);
158 err
= mthca_buf_alloc(dev
, srq
->max
<< srq
->wqe_shift
,
159 MTHCA_MAX_DIRECT_SRQ_SIZE
,
160 &srq
->queue
, &srq
->is_direct
, pd
, 1, &srq
->mr
);
167 * Now initialize the SRQ buffer so that all of the WQEs are
168 * linked into the list of free WQEs. In addition, set the
169 * scatter list L_Keys to the sentry value of 0x100.
171 for (i
= 0; i
< srq
->max
; ++i
) {
172 wqe
= get_wqe(srq
, i
);
174 *wqe_to_link(wqe
) = i
< srq
->max
- 1 ? i
+ 1 : -1;
176 for (scatter
= wqe
+ sizeof (struct mthca_next_seg
);
177 (void *) scatter
< wqe
+ (1 << srq
->wqe_shift
);
179 scatter
->lkey
= cpu_to_be32(MTHCA_INVAL_LKEY
);
182 srq
->last
= get_wqe(srq
, srq
->max
- 1);
187 int mthca_alloc_srq(struct mthca_dev
*dev
, struct mthca_pd
*pd
,
188 struct ib_srq_attr
*attr
, struct mthca_srq
*srq
)
190 struct mthca_mailbox
*mailbox
;
195 /* Sanity check SRQ size before proceeding */
196 if (attr
->max_wr
> dev
->limits
.max_srq_wqes
||
197 attr
->max_sge
> dev
->limits
.max_srq_sge
)
200 srq
->max
= attr
->max_wr
;
201 srq
->max_gs
= attr
->max_sge
;
204 if (mthca_is_memfree(dev
))
205 srq
->max
= roundup_pow_of_two(srq
->max
+ 1);
207 srq
->max
= srq
->max
+ 1;
210 roundup_pow_of_two(sizeof (struct mthca_next_seg
) +
211 srq
->max_gs
* sizeof (struct mthca_data_seg
)));
213 if (!mthca_is_memfree(dev
) && (ds
> dev
->limits
.max_desc_sz
))
216 srq
->wqe_shift
= ilog2(ds
);
218 srq
->srqn
= mthca_alloc(&dev
->srq_table
.alloc
);
222 if (mthca_is_memfree(dev
)) {
223 err
= mthca_table_get(dev
, dev
->srq_table
.table
, srq
->srqn
);
227 if (!pd
->ibpd
.uobject
) {
228 srq
->db_index
= mthca_alloc_db(dev
, MTHCA_DB_TYPE_SRQ
,
229 srq
->srqn
, &srq
->db
);
230 if (srq
->db_index
< 0) {
237 mailbox
= mthca_alloc_mailbox(dev
, GFP_KERNEL
);
238 if (IS_ERR(mailbox
)) {
239 err
= PTR_ERR(mailbox
);
243 err
= mthca_alloc_srq_buf(dev
, pd
, srq
);
245 goto err_out_mailbox
;
247 spin_lock_init(&srq
->lock
);
249 init_waitqueue_head(&srq
->wait
);
250 mutex_init(&srq
->mutex
);
252 if (mthca_is_memfree(dev
))
253 mthca_arbel_init_srq_context(dev
, pd
, srq
, mailbox
->buf
);
255 mthca_tavor_init_srq_context(dev
, pd
, srq
, mailbox
->buf
);
257 err
= mthca_SW2HW_SRQ(dev
, mailbox
, srq
->srqn
, &status
);
260 mthca_warn(dev
, "SW2HW_SRQ failed (%d)\n", err
);
261 goto err_out_free_buf
;
264 mthca_warn(dev
, "SW2HW_SRQ returned status 0x%02x\n",
267 goto err_out_free_buf
;
270 spin_lock_irq(&dev
->srq_table
.lock
);
271 if (mthca_array_set(&dev
->srq_table
.srq
,
272 srq
->srqn
& (dev
->limits
.num_srqs
- 1),
274 spin_unlock_irq(&dev
->srq_table
.lock
);
275 goto err_out_free_srq
;
277 spin_unlock_irq(&dev
->srq_table
.lock
);
279 mthca_free_mailbox(dev
, mailbox
);
282 srq
->last_free
= srq
->max
- 1;
284 attr
->max_wr
= srq
->max
- 1;
285 attr
->max_sge
= srq
->max_gs
;
290 err
= mthca_HW2SW_SRQ(dev
, mailbox
, srq
->srqn
, &status
);
292 mthca_warn(dev
, "HW2SW_SRQ failed (%d)\n", err
);
294 mthca_warn(dev
, "HW2SW_SRQ returned status 0x%02x\n", status
);
297 if (!pd
->ibpd
.uobject
)
298 mthca_free_srq_buf(dev
, srq
);
301 mthca_free_mailbox(dev
, mailbox
);
304 if (!pd
->ibpd
.uobject
&& mthca_is_memfree(dev
))
305 mthca_free_db(dev
, MTHCA_DB_TYPE_SRQ
, srq
->db_index
);
308 mthca_table_put(dev
, dev
->srq_table
.table
, srq
->srqn
);
311 mthca_free(&dev
->srq_table
.alloc
, srq
->srqn
);
316 static inline int get_srq_refcount(struct mthca_dev
*dev
, struct mthca_srq
*srq
)
320 spin_lock_irq(&dev
->srq_table
.lock
);
322 spin_unlock_irq(&dev
->srq_table
.lock
);
327 void mthca_free_srq(struct mthca_dev
*dev
, struct mthca_srq
*srq
)
329 struct mthca_mailbox
*mailbox
;
333 mailbox
= mthca_alloc_mailbox(dev
, GFP_KERNEL
);
334 if (IS_ERR(mailbox
)) {
335 mthca_warn(dev
, "No memory for mailbox to free SRQ.\n");
339 err
= mthca_HW2SW_SRQ(dev
, mailbox
, srq
->srqn
, &status
);
341 mthca_warn(dev
, "HW2SW_SRQ failed (%d)\n", err
);
343 mthca_warn(dev
, "HW2SW_SRQ returned status 0x%02x\n", status
);
345 spin_lock_irq(&dev
->srq_table
.lock
);
346 mthca_array_clear(&dev
->srq_table
.srq
,
347 srq
->srqn
& (dev
->limits
.num_srqs
- 1));
349 spin_unlock_irq(&dev
->srq_table
.lock
);
351 wait_event(srq
->wait
, !get_srq_refcount(dev
, srq
));
353 if (!srq
->ibsrq
.uobject
) {
354 mthca_free_srq_buf(dev
, srq
);
355 if (mthca_is_memfree(dev
))
356 mthca_free_db(dev
, MTHCA_DB_TYPE_SRQ
, srq
->db_index
);
359 mthca_table_put(dev
, dev
->srq_table
.table
, srq
->srqn
);
360 mthca_free(&dev
->srq_table
.alloc
, srq
->srqn
);
361 mthca_free_mailbox(dev
, mailbox
);
364 int mthca_modify_srq(struct ib_srq
*ibsrq
, struct ib_srq_attr
*attr
,
365 enum ib_srq_attr_mask attr_mask
, struct ib_udata
*udata
)
367 struct mthca_dev
*dev
= to_mdev(ibsrq
->device
);
368 struct mthca_srq
*srq
= to_msrq(ibsrq
);
372 /* We don't support resizing SRQs (yet?) */
373 if (attr_mask
& IB_SRQ_MAX_WR
)
376 if (attr_mask
& IB_SRQ_LIMIT
) {
377 u32 max_wr
= mthca_is_memfree(dev
) ? srq
->max
- 1 : srq
->max
;
378 if (attr
->srq_limit
> max_wr
)
381 mutex_lock(&srq
->mutex
);
382 ret
= mthca_ARM_SRQ(dev
, srq
->srqn
, attr
->srq_limit
, &status
);
383 mutex_unlock(&srq
->mutex
);
394 int mthca_query_srq(struct ib_srq
*ibsrq
, struct ib_srq_attr
*srq_attr
)
396 struct mthca_dev
*dev
= to_mdev(ibsrq
->device
);
397 struct mthca_srq
*srq
= to_msrq(ibsrq
);
398 struct mthca_mailbox
*mailbox
;
399 struct mthca_arbel_srq_context
*arbel_ctx
;
400 struct mthca_tavor_srq_context
*tavor_ctx
;
404 mailbox
= mthca_alloc_mailbox(dev
, GFP_KERNEL
);
406 return PTR_ERR(mailbox
);
408 err
= mthca_QUERY_SRQ(dev
, srq
->srqn
, mailbox
, &status
);
412 if (mthca_is_memfree(dev
)) {
413 arbel_ctx
= mailbox
->buf
;
414 srq_attr
->srq_limit
= be16_to_cpu(arbel_ctx
->limit_watermark
);
416 tavor_ctx
= mailbox
->buf
;
417 srq_attr
->srq_limit
= be16_to_cpu(tavor_ctx
->limit_watermark
);
420 srq_attr
->max_wr
= srq
->max
- 1;
421 srq_attr
->max_sge
= srq
->max_gs
;
424 mthca_free_mailbox(dev
, mailbox
);
429 void mthca_srq_event(struct mthca_dev
*dev
, u32 srqn
,
430 enum ib_event_type event_type
)
432 struct mthca_srq
*srq
;
433 struct ib_event event
;
435 spin_lock(&dev
->srq_table
.lock
);
436 srq
= mthca_array_get(&dev
->srq_table
.srq
, srqn
& (dev
->limits
.num_srqs
- 1));
439 spin_unlock(&dev
->srq_table
.lock
);
442 mthca_warn(dev
, "Async event for bogus SRQ %08x\n", srqn
);
446 if (!srq
->ibsrq
.event_handler
)
449 event
.device
= &dev
->ib_dev
;
450 event
.event
= event_type
;
451 event
.element
.srq
= &srq
->ibsrq
;
452 srq
->ibsrq
.event_handler(&event
, srq
->ibsrq
.srq_context
);
455 spin_lock(&dev
->srq_table
.lock
);
456 if (!--srq
->refcount
)
458 spin_unlock(&dev
->srq_table
.lock
);
462 * This function must be called with IRQs disabled.
464 void mthca_free_srq_wqe(struct mthca_srq
*srq
, u32 wqe_addr
)
468 ind
= wqe_addr
>> srq
->wqe_shift
;
470 spin_lock(&srq
->lock
);
472 if (likely(srq
->first_free
>= 0))
473 *wqe_to_link(get_wqe(srq
, srq
->last_free
)) = ind
;
475 srq
->first_free
= ind
;
477 *wqe_to_link(get_wqe(srq
, ind
)) = -1;
478 srq
->last_free
= ind
;
480 spin_unlock(&srq
->lock
);
483 int mthca_tavor_post_srq_recv(struct ib_srq
*ibsrq
, struct ib_recv_wr
*wr
,
484 struct ib_recv_wr
**bad_wr
)
486 struct mthca_dev
*dev
= to_mdev(ibsrq
->device
);
487 struct mthca_srq
*srq
= to_msrq(ibsrq
);
499 spin_lock_irqsave(&srq
->lock
, flags
);
501 first_ind
= srq
->first_free
;
503 for (nreq
= 0; wr
; wr
= wr
->next
) {
504 ind
= srq
->first_free
;
507 mthca_err(dev
, "SRQ %06x full\n", srq
->srqn
);
513 wqe
= get_wqe(srq
, ind
);
514 next_ind
= *wqe_to_link(wqe
);
517 mthca_err(dev
, "SRQ %06x full\n", srq
->srqn
);
523 prev_wqe
= srq
->last
;
526 ((struct mthca_next_seg
*) wqe
)->nda_op
= 0;
527 ((struct mthca_next_seg
*) wqe
)->ee_nds
= 0;
528 /* flags field will always remain 0 */
530 wqe
+= sizeof (struct mthca_next_seg
);
532 if (unlikely(wr
->num_sge
> srq
->max_gs
)) {
535 srq
->last
= prev_wqe
;
539 for (i
= 0; i
< wr
->num_sge
; ++i
) {
540 ((struct mthca_data_seg
*) wqe
)->byte_count
=
541 cpu_to_be32(wr
->sg_list
[i
].length
);
542 ((struct mthca_data_seg
*) wqe
)->lkey
=
543 cpu_to_be32(wr
->sg_list
[i
].lkey
);
544 ((struct mthca_data_seg
*) wqe
)->addr
=
545 cpu_to_be64(wr
->sg_list
[i
].addr
);
546 wqe
+= sizeof (struct mthca_data_seg
);
549 if (i
< srq
->max_gs
) {
550 ((struct mthca_data_seg
*) wqe
)->byte_count
= 0;
551 ((struct mthca_data_seg
*) wqe
)->lkey
= cpu_to_be32(MTHCA_INVAL_LKEY
);
552 ((struct mthca_data_seg
*) wqe
)->addr
= 0;
555 ((struct mthca_next_seg
*) prev_wqe
)->nda_op
=
556 cpu_to_be32((ind
<< srq
->wqe_shift
) | 1);
558 ((struct mthca_next_seg
*) prev_wqe
)->ee_nds
=
559 cpu_to_be32(MTHCA_NEXT_DBD
);
561 srq
->wrid
[ind
] = wr
->wr_id
;
562 srq
->first_free
= next_ind
;
565 if (unlikely(nreq
== MTHCA_TAVOR_MAX_WQES_PER_RECV_DB
)) {
568 doorbell
[0] = cpu_to_be32(first_ind
<< srq
->wqe_shift
);
569 doorbell
[1] = cpu_to_be32(srq
->srqn
<< 8);
572 * Make sure that descriptors are written
573 * before doorbell is rung.
577 mthca_write64(doorbell
,
578 dev
->kar
+ MTHCA_RECEIVE_DOORBELL
,
579 MTHCA_GET_DOORBELL_LOCK(&dev
->doorbell_lock
));
581 first_ind
= srq
->first_free
;
586 doorbell
[0] = cpu_to_be32(first_ind
<< srq
->wqe_shift
);
587 doorbell
[1] = cpu_to_be32((srq
->srqn
<< 8) | nreq
);
590 * Make sure that descriptors are written before
595 mthca_write64(doorbell
,
596 dev
->kar
+ MTHCA_RECEIVE_DOORBELL
,
597 MTHCA_GET_DOORBELL_LOCK(&dev
->doorbell_lock
));
601 * Make sure doorbells don't leak out of SRQ spinlock and
602 * reach the HCA out of order:
606 spin_unlock_irqrestore(&srq
->lock
, flags
);
610 int mthca_arbel_post_srq_recv(struct ib_srq
*ibsrq
, struct ib_recv_wr
*wr
,
611 struct ib_recv_wr
**bad_wr
)
613 struct mthca_dev
*dev
= to_mdev(ibsrq
->device
);
614 struct mthca_srq
*srq
= to_msrq(ibsrq
);
623 spin_lock_irqsave(&srq
->lock
, flags
);
625 for (nreq
= 0; wr
; ++nreq
, wr
= wr
->next
) {
626 ind
= srq
->first_free
;
629 mthca_err(dev
, "SRQ %06x full\n", srq
->srqn
);
635 wqe
= get_wqe(srq
, ind
);
636 next_ind
= *wqe_to_link(wqe
);
639 mthca_err(dev
, "SRQ %06x full\n", srq
->srqn
);
645 ((struct mthca_next_seg
*) wqe
)->nda_op
=
646 cpu_to_be32((next_ind
<< srq
->wqe_shift
) | 1);
647 ((struct mthca_next_seg
*) wqe
)->ee_nds
= 0;
648 /* flags field will always remain 0 */
650 wqe
+= sizeof (struct mthca_next_seg
);
652 if (unlikely(wr
->num_sge
> srq
->max_gs
)) {
658 for (i
= 0; i
< wr
->num_sge
; ++i
) {
659 ((struct mthca_data_seg
*) wqe
)->byte_count
=
660 cpu_to_be32(wr
->sg_list
[i
].length
);
661 ((struct mthca_data_seg
*) wqe
)->lkey
=
662 cpu_to_be32(wr
->sg_list
[i
].lkey
);
663 ((struct mthca_data_seg
*) wqe
)->addr
=
664 cpu_to_be64(wr
->sg_list
[i
].addr
);
665 wqe
+= sizeof (struct mthca_data_seg
);
668 if (i
< srq
->max_gs
) {
669 ((struct mthca_data_seg
*) wqe
)->byte_count
= 0;
670 ((struct mthca_data_seg
*) wqe
)->lkey
= cpu_to_be32(MTHCA_INVAL_LKEY
);
671 ((struct mthca_data_seg
*) wqe
)->addr
= 0;
674 srq
->wrid
[ind
] = wr
->wr_id
;
675 srq
->first_free
= next_ind
;
679 srq
->counter
+= nreq
;
682 * Make sure that descriptors are written before
683 * we write doorbell record.
686 *srq
->db
= cpu_to_be32(srq
->counter
);
689 spin_unlock_irqrestore(&srq
->lock
, flags
);
693 int mthca_max_srq_sge(struct mthca_dev
*dev
)
695 if (mthca_is_memfree(dev
))
696 return dev
->limits
.max_sg
;
699 * SRQ allocations are based on powers of 2 for Tavor,
700 * (although they only need to be multiples of 16 bytes).
702 * Therefore, we need to base the max number of sg entries on
703 * the largest power of 2 descriptor size that is <= to the
704 * actual max WQE descriptor size, rather than return the
705 * max_sg value given by the firmware (which is based on WQE
706 * sizes as multiples of 16, not powers of 2).
708 * If SRQ implementation is changed for Tavor to be based on
709 * multiples of 16, the calculation below can be deleted and
710 * the FW max_sg value returned.
712 return min_t(int, dev
->limits
.max_sg
,
713 ((1 << (fls(dev
->limits
.max_desc_sz
) - 1)) -
714 sizeof (struct mthca_next_seg
)) /
715 sizeof (struct mthca_data_seg
));
718 int mthca_init_srq_table(struct mthca_dev
*dev
)
722 if (!(dev
->mthca_flags
& MTHCA_FLAG_SRQ
))
725 spin_lock_init(&dev
->srq_table
.lock
);
727 err
= mthca_alloc_init(&dev
->srq_table
.alloc
,
728 dev
->limits
.num_srqs
,
729 dev
->limits
.num_srqs
- 1,
730 dev
->limits
.reserved_srqs
);
734 err
= mthca_array_init(&dev
->srq_table
.srq
,
735 dev
->limits
.num_srqs
);
737 mthca_alloc_cleanup(&dev
->srq_table
.alloc
);
742 void mthca_cleanup_srq_table(struct mthca_dev
*dev
)
744 if (!(dev
->mthca_flags
& MTHCA_FLAG_SRQ
))
747 mthca_array_cleanup(&dev
->srq_table
.srq
, dev
->limits
.num_srqs
);
748 mthca_alloc_cleanup(&dev
->srq_table
.alloc
);