2 * Copyright(c) 2016 - 2019 Intel Corporation.
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions
24 * - Redistributions of source code must retain the above copyright
25 * notice, this list of conditions and the following disclaimer.
26 * - Redistributions in binary form must reproduce the above copyright
27 * notice, this list of conditions and the following disclaimer in
28 * the documentation and/or other materials provided with the
30 * - Neither the name of Intel Corporation nor the names of its
31 * contributors may be used to endorse or promote products derived
32 * from this software without specific prior written permission.
34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
48 #include <linux/hash.h>
49 #include <linux/bitops.h>
50 #include <linux/lockdep.h>
51 #include <linux/vmalloc.h>
52 #include <linux/slab.h>
53 #include <rdma/ib_verbs.h>
54 #include <rdma/ib_hdrs.h>
55 #include <rdma/opa_addr.h>
56 #include <rdma/uverbs_ioctl.h>
61 #define RVT_RWQ_COUNT_THRESHOLD 16
63 static void rvt_rc_timeout(struct timer_list
*t
);
66 * Convert the AETH RNR timeout code into the number of microseconds.
68 static const u32 ib_rvt_rnr_table
[32] = {
69 655360, /* 00: 655.36 */
89 10240, /* 14: 10.24 */
90 15360, /* 15: 15.36 */
91 20480, /* 16: 20.48 */
92 30720, /* 17: 30.72 */
93 40960, /* 18: 40.96 */
94 61440, /* 19: 61.44 */
95 81920, /* 1A: 81.92 */
96 122880, /* 1B: 122.88 */
97 163840, /* 1C: 163.84 */
98 245760, /* 1D: 245.76 */
99 327680, /* 1E: 327.68 */
100 491520 /* 1F: 491.52 */
104 * Note that it is OK to post send work requests in the SQE and ERR
105 * states; rvt_do_send() will process them and generate error
106 * completions as per IB 1.2 C10-96.
108 const int ib_rvt_state_ops
[IB_QPS_ERR
+ 1] = {
110 [IB_QPS_INIT
] = RVT_POST_RECV_OK
,
111 [IB_QPS_RTR
] = RVT_POST_RECV_OK
| RVT_PROCESS_RECV_OK
,
112 [IB_QPS_RTS
] = RVT_POST_RECV_OK
| RVT_PROCESS_RECV_OK
|
113 RVT_POST_SEND_OK
| RVT_PROCESS_SEND_OK
|
114 RVT_PROCESS_NEXT_SEND_OK
,
115 [IB_QPS_SQD
] = RVT_POST_RECV_OK
| RVT_PROCESS_RECV_OK
|
116 RVT_POST_SEND_OK
| RVT_PROCESS_SEND_OK
,
117 [IB_QPS_SQE
] = RVT_POST_RECV_OK
| RVT_PROCESS_RECV_OK
|
118 RVT_POST_SEND_OK
| RVT_FLUSH_SEND
,
119 [IB_QPS_ERR
] = RVT_POST_RECV_OK
| RVT_FLUSH_RECV
|
120 RVT_POST_SEND_OK
| RVT_FLUSH_SEND
,
122 EXPORT_SYMBOL(ib_rvt_state_ops
);
124 /* platform specific: return the last level cache (llc) size, in KiB */
125 static int rvt_wss_llc_size(void)
127 /* assume that the boot CPU value is universal for all CPUs */
128 return boot_cpu_data
.x86_cache_size
;
131 /* platform specific: cacheless copy */
132 static void cacheless_memcpy(void *dst
, void *src
, size_t n
)
135 * Use the only available X64 cacheless copy. Add a __user cast
136 * to quiet sparse. The src agument is already in the kernel so
137 * there are no security issues. The extra fault recovery machinery
140 __copy_user_nocache(dst
, (void __user
*)src
, n
, 0);
143 void rvt_wss_exit(struct rvt_dev_info
*rdi
)
145 struct rvt_wss
*wss
= rdi
->wss
;
150 /* coded to handle partially initialized and repeat callers */
158 * rvt_wss_init - Init wss data structures
160 * Return: 0 on success
162 int rvt_wss_init(struct rvt_dev_info
*rdi
)
164 unsigned int sge_copy_mode
= rdi
->dparms
.sge_copy_mode
;
165 unsigned int wss_threshold
= rdi
->dparms
.wss_threshold
;
166 unsigned int wss_clean_period
= rdi
->dparms
.wss_clean_period
;
172 int node
= rdi
->dparms
.node
;
174 if (sge_copy_mode
!= RVT_SGE_COPY_ADAPTIVE
) {
179 rdi
->wss
= kzalloc_node(sizeof(*rdi
->wss
), GFP_KERNEL
, node
);
184 /* check for a valid percent range - default to 80 if none or invalid */
185 if (wss_threshold
< 1 || wss_threshold
> 100)
188 /* reject a wildly large period */
189 if (wss_clean_period
> 1000000)
190 wss_clean_period
= 256;
192 /* reject a zero period */
193 if (wss_clean_period
== 0)
194 wss_clean_period
= 1;
197 * Calculate the table size - the next power of 2 larger than the
198 * LLC size. LLC size is in KiB.
200 llc_size
= rvt_wss_llc_size() * 1024;
201 table_size
= roundup_pow_of_two(llc_size
);
203 /* one bit per page in rounded up table */
204 llc_bits
= llc_size
/ PAGE_SIZE
;
205 table_bits
= table_size
/ PAGE_SIZE
;
206 wss
->pages_mask
= table_bits
- 1;
207 wss
->num_entries
= table_bits
/ BITS_PER_LONG
;
209 wss
->threshold
= (llc_bits
* wss_threshold
) / 100;
210 if (wss
->threshold
== 0)
213 wss
->clean_period
= wss_clean_period
;
214 atomic_set(&wss
->clean_counter
, wss_clean_period
);
216 wss
->entries
= kcalloc_node(wss
->num_entries
, sizeof(*wss
->entries
),
227 * Advance the clean counter. When the clean period has expired,
230 * This is implemented in atomics to avoid locking. Because multiple
231 * variables are involved, it can be racy which can lead to slightly
232 * inaccurate information. Since this is only a heuristic, this is
233 * OK. Any innaccuracies will clean themselves out as the counter
234 * advances. That said, it is unlikely the entry clean operation will
235 * race - the next possible racer will not start until the next clean
238 * The clean counter is implemented as a decrement to zero. When zero
239 * is reached an entry is cleaned.
241 static void wss_advance_clean_counter(struct rvt_wss
*wss
)
247 /* become the cleaner if we decrement the counter to zero */
248 if (atomic_dec_and_test(&wss
->clean_counter
)) {
250 * Set, not add, the clean period. This avoids an issue
251 * where the counter could decrement below the clean period.
252 * Doing a set can result in lost decrements, slowing the
253 * clean advance. Since this a heuristic, this possible
256 * An alternative is to loop, advancing the counter by a
257 * clean period until the result is > 0. However, this could
258 * lead to several threads keeping another in the clean loop.
259 * This could be mitigated by limiting the number of times
260 * we stay in the loop.
262 atomic_set(&wss
->clean_counter
, wss
->clean_period
);
265 * Uniquely grab the entry to clean and move to next.
266 * The current entry is always the lower bits of
267 * wss.clean_entry. The table size, wss.num_entries,
268 * is always a power-of-2.
270 entry
= (atomic_inc_return(&wss
->clean_entry
) - 1)
271 & (wss
->num_entries
- 1);
273 /* clear the entry and count the bits */
274 bits
= xchg(&wss
->entries
[entry
], 0);
275 weight
= hweight64((u64
)bits
);
276 /* only adjust the contended total count if needed */
278 atomic_sub(weight
, &wss
->total_count
);
283 * Insert the given address into the working set array.
285 static void wss_insert(struct rvt_wss
*wss
, void *address
)
287 u32 page
= ((unsigned long)address
>> PAGE_SHIFT
) & wss
->pages_mask
;
288 u32 entry
= page
/ BITS_PER_LONG
; /* assumes this ends up a shift */
289 u32 nr
= page
& (BITS_PER_LONG
- 1);
291 if (!test_and_set_bit(nr
, &wss
->entries
[entry
]))
292 atomic_inc(&wss
->total_count
);
294 wss_advance_clean_counter(wss
);
298 * Is the working set larger than the threshold?
300 static inline bool wss_exceeds_threshold(struct rvt_wss
*wss
)
302 return atomic_read(&wss
->total_count
) >= wss
->threshold
;
305 static void get_map_page(struct rvt_qpn_table
*qpt
,
306 struct rvt_qpn_map
*map
)
308 unsigned long page
= get_zeroed_page(GFP_KERNEL
);
311 * Free the page if someone raced with us installing it.
314 spin_lock(&qpt
->lock
);
318 map
->page
= (void *)page
;
319 spin_unlock(&qpt
->lock
);
323 * init_qpn_table - initialize the QP number table for a device
324 * @qpt: the QPN table
326 static int init_qpn_table(struct rvt_dev_info
*rdi
, struct rvt_qpn_table
*qpt
)
329 struct rvt_qpn_map
*map
;
332 if (!(rdi
->dparms
.qpn_res_end
>= rdi
->dparms
.qpn_res_start
))
335 spin_lock_init(&qpt
->lock
);
337 qpt
->last
= rdi
->dparms
.qpn_start
;
338 qpt
->incr
= rdi
->dparms
.qpn_inc
<< rdi
->dparms
.qos_shift
;
341 * Drivers may want some QPs beyond what we need for verbs let them use
342 * our qpn table. No need for two. Lets go ahead and mark the bitmaps
343 * for those. The reserved range must be *after* the range which verbs
347 /* Figure out number of bit maps needed before reserved range */
348 qpt
->nmaps
= rdi
->dparms
.qpn_res_start
/ RVT_BITS_PER_PAGE
;
350 /* This should always be zero */
351 offset
= rdi
->dparms
.qpn_res_start
& RVT_BITS_PER_PAGE_MASK
;
353 /* Starting with the first reserved bit map */
354 map
= &qpt
->map
[qpt
->nmaps
];
356 rvt_pr_info(rdi
, "Reserving QPNs from 0x%x to 0x%x for non-verbs use\n",
357 rdi
->dparms
.qpn_res_start
, rdi
->dparms
.qpn_res_end
);
358 for (i
= rdi
->dparms
.qpn_res_start
; i
<= rdi
->dparms
.qpn_res_end
; i
++) {
360 get_map_page(qpt
, map
);
366 set_bit(offset
, map
->page
);
368 if (offset
== RVT_BITS_PER_PAGE
) {
379 * free_qpn_table - free the QP number table for a device
380 * @qpt: the QPN table
382 static void free_qpn_table(struct rvt_qpn_table
*qpt
)
386 for (i
= 0; i
< ARRAY_SIZE(qpt
->map
); i
++)
387 free_page((unsigned long)qpt
->map
[i
].page
);
391 * rvt_driver_qp_init - Init driver qp resources
392 * @rdi: rvt dev strucutre
394 * Return: 0 on success
396 int rvt_driver_qp_init(struct rvt_dev_info
*rdi
)
401 if (!rdi
->dparms
.qp_table_size
)
405 * If driver is not doing any QP allocation then make sure it is
406 * providing the necessary QP functions.
408 if (!rdi
->driver_f
.free_all_qps
||
409 !rdi
->driver_f
.qp_priv_alloc
||
410 !rdi
->driver_f
.qp_priv_free
||
411 !rdi
->driver_f
.notify_qp_reset
||
412 !rdi
->driver_f
.notify_restart_rc
)
415 /* allocate parent object */
416 rdi
->qp_dev
= kzalloc_node(sizeof(*rdi
->qp_dev
), GFP_KERNEL
,
421 /* allocate hash table */
422 rdi
->qp_dev
->qp_table_size
= rdi
->dparms
.qp_table_size
;
423 rdi
->qp_dev
->qp_table_bits
= ilog2(rdi
->dparms
.qp_table_size
);
424 rdi
->qp_dev
->qp_table
=
425 kmalloc_array_node(rdi
->qp_dev
->qp_table_size
,
426 sizeof(*rdi
->qp_dev
->qp_table
),
427 GFP_KERNEL
, rdi
->dparms
.node
);
428 if (!rdi
->qp_dev
->qp_table
)
431 for (i
= 0; i
< rdi
->qp_dev
->qp_table_size
; i
++)
432 RCU_INIT_POINTER(rdi
->qp_dev
->qp_table
[i
], NULL
);
434 spin_lock_init(&rdi
->qp_dev
->qpt_lock
);
436 /* initialize qpn map */
437 if (init_qpn_table(rdi
, &rdi
->qp_dev
->qpn_table
))
440 spin_lock_init(&rdi
->n_qps_lock
);
445 kfree(rdi
->qp_dev
->qp_table
);
446 free_qpn_table(&rdi
->qp_dev
->qpn_table
);
455 * free_all_qps - check for QPs still in use
456 * @rdi: rvt device info structure
458 * There should not be any QPs still in use.
459 * Free memory for table.
461 static unsigned rvt_free_all_qps(struct rvt_dev_info
*rdi
)
465 unsigned n
, qp_inuse
= 0;
466 spinlock_t
*ql
; /* work around too long line below */
468 if (rdi
->driver_f
.free_all_qps
)
469 qp_inuse
= rdi
->driver_f
.free_all_qps(rdi
);
471 qp_inuse
+= rvt_mcast_tree_empty(rdi
);
476 ql
= &rdi
->qp_dev
->qpt_lock
;
477 spin_lock_irqsave(ql
, flags
);
478 for (n
= 0; n
< rdi
->qp_dev
->qp_table_size
; n
++) {
479 qp
= rcu_dereference_protected(rdi
->qp_dev
->qp_table
[n
],
480 lockdep_is_held(ql
));
481 RCU_INIT_POINTER(rdi
->qp_dev
->qp_table
[n
], NULL
);
483 for (; qp
; qp
= rcu_dereference_protected(qp
->next
,
484 lockdep_is_held(ql
)))
487 spin_unlock_irqrestore(ql
, flags
);
493 * rvt_qp_exit - clean up qps on device exit
494 * @rdi: rvt dev structure
496 * Check for qp leaks and free resources.
498 void rvt_qp_exit(struct rvt_dev_info
*rdi
)
500 u32 qps_inuse
= rvt_free_all_qps(rdi
);
503 rvt_pr_err(rdi
, "QP memory leak! %u still in use\n",
508 kfree(rdi
->qp_dev
->qp_table
);
509 free_qpn_table(&rdi
->qp_dev
->qpn_table
);
513 static inline unsigned mk_qpn(struct rvt_qpn_table
*qpt
,
514 struct rvt_qpn_map
*map
, unsigned off
)
516 return (map
- qpt
->map
) * RVT_BITS_PER_PAGE
+ off
;
520 * alloc_qpn - Allocate the next available qpn or zero/one for QP type
521 * IB_QPT_SMI/IB_QPT_GSI
522 * @rdi: rvt device info structure
523 * @qpt: queue pair number table pointer
524 * @port_num: IB port number, 1 based, comes from core
526 * Return: The queue pair number
528 static int alloc_qpn(struct rvt_dev_info
*rdi
, struct rvt_qpn_table
*qpt
,
529 enum ib_qp_type type
, u8 port_num
)
531 u32 i
, offset
, max_scan
, qpn
;
532 struct rvt_qpn_map
*map
;
535 if (rdi
->driver_f
.alloc_qpn
)
536 return rdi
->driver_f
.alloc_qpn(rdi
, qpt
, type
, port_num
);
538 if (type
== IB_QPT_SMI
|| type
== IB_QPT_GSI
) {
541 ret
= type
== IB_QPT_GSI
;
542 n
= 1 << (ret
+ 2 * (port_num
- 1));
543 spin_lock(&qpt
->lock
);
548 spin_unlock(&qpt
->lock
);
552 qpn
= qpt
->last
+ qpt
->incr
;
553 if (qpn
>= RVT_QPN_MAX
)
554 qpn
= qpt
->incr
| ((qpt
->last
& 1) ^ 1);
555 /* offset carries bit 0 */
556 offset
= qpn
& RVT_BITS_PER_PAGE_MASK
;
557 map
= &qpt
->map
[qpn
/ RVT_BITS_PER_PAGE
];
558 max_scan
= qpt
->nmaps
- !offset
;
560 if (unlikely(!map
->page
)) {
561 get_map_page(qpt
, map
);
562 if (unlikely(!map
->page
))
566 if (!test_and_set_bit(offset
, map
->page
)) {
573 * This qpn might be bogus if offset >= BITS_PER_PAGE.
574 * That is OK. It gets re-assigned below
576 qpn
= mk_qpn(qpt
, map
, offset
);
577 } while (offset
< RVT_BITS_PER_PAGE
&& qpn
< RVT_QPN_MAX
);
579 * In order to keep the number of pages allocated to a
580 * minimum, we scan the all existing pages before increasing
581 * the size of the bitmap table.
583 if (++i
> max_scan
) {
584 if (qpt
->nmaps
== RVT_QPNMAP_ENTRIES
)
586 map
= &qpt
->map
[qpt
->nmaps
++];
587 /* start at incr with current bit 0 */
588 offset
= qpt
->incr
| (offset
& 1);
589 } else if (map
< &qpt
->map
[qpt
->nmaps
]) {
591 /* start at incr with current bit 0 */
592 offset
= qpt
->incr
| (offset
& 1);
595 /* wrap to first map page, invert bit 0 */
596 offset
= qpt
->incr
| ((offset
& 1) ^ 1);
598 /* there can be no set bits in low-order QoS bits */
599 WARN_ON(rdi
->dparms
.qos_shift
> 1 &&
600 offset
& ((BIT(rdi
->dparms
.qos_shift
- 1) - 1) << 1));
601 qpn
= mk_qpn(qpt
, map
, offset
);
611 * rvt_clear_mr_refs - Drop help mr refs
612 * @qp: rvt qp data structure
613 * @clr_sends: If shoudl clear send side or not
615 static void rvt_clear_mr_refs(struct rvt_qp
*qp
, int clr_sends
)
618 struct rvt_dev_info
*rdi
= ib_to_rvt(qp
->ibqp
.device
);
620 if (test_and_clear_bit(RVT_R_REWIND_SGE
, &qp
->r_aflags
))
621 rvt_put_ss(&qp
->s_rdma_read_sge
);
623 rvt_put_ss(&qp
->r_sge
);
626 while (qp
->s_last
!= qp
->s_head
) {
627 struct rvt_swqe
*wqe
= rvt_get_swqe_ptr(qp
, qp
->s_last
);
629 rvt_put_qp_swqe(qp
, wqe
);
630 if (++qp
->s_last
>= qp
->s_size
)
632 smp_wmb(); /* see qp_set_savail */
635 rvt_put_mr(qp
->s_rdma_mr
);
636 qp
->s_rdma_mr
= NULL
;
640 for (n
= 0; qp
->s_ack_queue
&& n
< rvt_max_atomic(rdi
); n
++) {
641 struct rvt_ack_entry
*e
= &qp
->s_ack_queue
[n
];
643 if (e
->rdma_sge
.mr
) {
644 rvt_put_mr(e
->rdma_sge
.mr
);
645 e
->rdma_sge
.mr
= NULL
;
651 * rvt_swqe_has_lkey - return true if lkey is used by swqe
652 * @wqe - the send wqe
655 * Test the swqe for using lkey
657 static bool rvt_swqe_has_lkey(struct rvt_swqe
*wqe
, u32 lkey
)
661 for (i
= 0; i
< wqe
->wr
.num_sge
; i
++) {
662 struct rvt_sge
*sge
= &wqe
->sg_list
[i
];
664 if (rvt_mr_has_lkey(sge
->mr
, lkey
))
671 * rvt_qp_sends_has_lkey - return true is qp sends use lkey
675 static bool rvt_qp_sends_has_lkey(struct rvt_qp
*qp
, u32 lkey
)
677 u32 s_last
= qp
->s_last
;
679 while (s_last
!= qp
->s_head
) {
680 struct rvt_swqe
*wqe
= rvt_get_swqe_ptr(qp
, s_last
);
682 if (rvt_swqe_has_lkey(wqe
, lkey
))
685 if (++s_last
>= qp
->s_size
)
689 if (rvt_mr_has_lkey(qp
->s_rdma_mr
, lkey
))
695 * rvt_qp_acks_has_lkey - return true if acks have lkey
699 static bool rvt_qp_acks_has_lkey(struct rvt_qp
*qp
, u32 lkey
)
702 struct rvt_dev_info
*rdi
= ib_to_rvt(qp
->ibqp
.device
);
704 for (i
= 0; qp
->s_ack_queue
&& i
< rvt_max_atomic(rdi
); i
++) {
705 struct rvt_ack_entry
*e
= &qp
->s_ack_queue
[i
];
707 if (rvt_mr_has_lkey(e
->rdma_sge
.mr
, lkey
))
714 * rvt_qp_mr_clean - clean up remote ops for lkey
716 * @lkey - the lkey that is being de-registered
718 * This routine checks if the lkey is being used by
721 * If so, the qp is put into an error state to elminate
722 * any references from the qp.
724 void rvt_qp_mr_clean(struct rvt_qp
*qp
, u32 lkey
)
726 bool lastwqe
= false;
728 if (qp
->ibqp
.qp_type
== IB_QPT_SMI
||
729 qp
->ibqp
.qp_type
== IB_QPT_GSI
)
730 /* avoid special QPs */
732 spin_lock_irq(&qp
->r_lock
);
733 spin_lock(&qp
->s_hlock
);
734 spin_lock(&qp
->s_lock
);
736 if (qp
->state
== IB_QPS_ERR
|| qp
->state
== IB_QPS_RESET
)
739 if (rvt_ss_has_lkey(&qp
->r_sge
, lkey
) ||
740 rvt_qp_sends_has_lkey(qp
, lkey
) ||
741 rvt_qp_acks_has_lkey(qp
, lkey
))
742 lastwqe
= rvt_error_qp(qp
, IB_WC_LOC_PROT_ERR
);
744 spin_unlock(&qp
->s_lock
);
745 spin_unlock(&qp
->s_hlock
);
746 spin_unlock_irq(&qp
->r_lock
);
750 ev
.device
= qp
->ibqp
.device
;
751 ev
.element
.qp
= &qp
->ibqp
;
752 ev
.event
= IB_EVENT_QP_LAST_WQE_REACHED
;
753 qp
->ibqp
.event_handler(&ev
, qp
->ibqp
.qp_context
);
758 * rvt_remove_qp - remove qp form table
759 * @rdi: rvt dev struct
762 * Remove the QP from the table so it can't be found asynchronously by
763 * the receive routine.
765 static void rvt_remove_qp(struct rvt_dev_info
*rdi
, struct rvt_qp
*qp
)
767 struct rvt_ibport
*rvp
= rdi
->ports
[qp
->port_num
- 1];
768 u32 n
= hash_32(qp
->ibqp
.qp_num
, rdi
->qp_dev
->qp_table_bits
);
772 spin_lock_irqsave(&rdi
->qp_dev
->qpt_lock
, flags
);
774 if (rcu_dereference_protected(rvp
->qp
[0],
775 lockdep_is_held(&rdi
->qp_dev
->qpt_lock
)) == qp
) {
776 RCU_INIT_POINTER(rvp
->qp
[0], NULL
);
777 } else if (rcu_dereference_protected(rvp
->qp
[1],
778 lockdep_is_held(&rdi
->qp_dev
->qpt_lock
)) == qp
) {
779 RCU_INIT_POINTER(rvp
->qp
[1], NULL
);
782 struct rvt_qp __rcu
**qpp
;
785 qpp
= &rdi
->qp_dev
->qp_table
[n
];
786 for (; (q
= rcu_dereference_protected(*qpp
,
787 lockdep_is_held(&rdi
->qp_dev
->qpt_lock
))) != NULL
;
790 RCU_INIT_POINTER(*qpp
,
791 rcu_dereference_protected(qp
->next
,
792 lockdep_is_held(&rdi
->qp_dev
->qpt_lock
)));
794 trace_rvt_qpremove(qp
, n
);
800 spin_unlock_irqrestore(&rdi
->qp_dev
->qpt_lock
, flags
);
808 * rvt_alloc_rq - allocate memory for user or kernel buffer
809 * @rq: receive queue data structure
810 * @size: number of request queue entries
811 * @node: The NUMA node
812 * @udata: True if user data is available or not false
814 * Return: If memory allocation failed, return -ENONEM
815 * This function is used by both shared receive
816 * queues and non-shared receive queues to allocate
819 int rvt_alloc_rq(struct rvt_rq
*rq
, u32 size
, int node
,
820 struct ib_udata
*udata
)
823 rq
->wq
= vmalloc_user(sizeof(struct rvt_rwq
) + size
);
826 /* need kwq with no buffers */
827 rq
->kwq
= kzalloc_node(sizeof(*rq
->kwq
), GFP_KERNEL
, node
);
830 rq
->kwq
->curr_wq
= rq
->wq
->wq
;
832 /* need kwq with buffers */
834 vzalloc_node(sizeof(struct rvt_krwq
) + size
, node
);
837 rq
->kwq
->curr_wq
= rq
->kwq
->wq
;
840 spin_lock_init(&rq
->kwq
->p_lock
);
841 spin_lock_init(&rq
->kwq
->c_lock
);
849 * rvt_init_qp - initialize the QP state to the reset state
850 * @qp: the QP to init or reinit
853 * This function is called from both rvt_create_qp() and
854 * rvt_reset_qp(). The difference is that the reset
855 * patch the necessary locks to protect against concurent
858 static void rvt_init_qp(struct rvt_dev_info
*rdi
, struct rvt_qp
*qp
,
859 enum ib_qp_type type
)
863 qp
->qp_access_flags
= 0;
864 qp
->s_flags
&= RVT_S_SIGNAL_REQ_WR
;
870 qp
->s_sending_psn
= 0;
871 qp
->s_sending_hpsn
= 0;
875 if (type
== IB_QPT_RC
) {
876 qp
->s_state
= IB_OPCODE_RC_SEND_LAST
;
877 qp
->r_state
= IB_OPCODE_RC_SEND_LAST
;
879 qp
->s_state
= IB_OPCODE_UC_SEND_LAST
;
880 qp
->r_state
= IB_OPCODE_UC_SEND_LAST
;
882 qp
->s_ack_state
= IB_OPCODE_RC_ACKNOWLEDGE
;
893 qp
->s_mig_state
= IB_MIG_MIGRATED
;
894 qp
->r_head_ack_queue
= 0;
895 qp
->s_tail_ack_queue
= 0;
896 qp
->s_acked_ack_queue
= 0;
897 qp
->s_num_rd_atomic
= 0;
899 qp
->r_rq
.kwq
->count
= qp
->r_rq
.size
;
900 qp
->r_sge
.num_sge
= 0;
901 atomic_set(&qp
->s_reserved_used
, 0);
905 * rvt_reset_qp - initialize the QP state to the reset state
906 * @qp: the QP to reset
909 * r_lock, s_hlock, and s_lock are required to be held by the caller
911 static void rvt_reset_qp(struct rvt_dev_info
*rdi
, struct rvt_qp
*qp
,
912 enum ib_qp_type type
)
913 __must_hold(&qp
->s_lock
)
914 __must_hold(&qp
->s_hlock
)
915 __must_hold(&qp
->r_lock
)
917 lockdep_assert_held(&qp
->r_lock
);
918 lockdep_assert_held(&qp
->s_hlock
);
919 lockdep_assert_held(&qp
->s_lock
);
920 if (qp
->state
!= IB_QPS_RESET
) {
921 qp
->state
= IB_QPS_RESET
;
923 /* Let drivers flush their waitlist */
924 rdi
->driver_f
.flush_qp_waiters(qp
);
925 rvt_stop_rc_timers(qp
);
926 qp
->s_flags
&= ~(RVT_S_TIMER
| RVT_S_ANY_WAIT
);
927 spin_unlock(&qp
->s_lock
);
928 spin_unlock(&qp
->s_hlock
);
929 spin_unlock_irq(&qp
->r_lock
);
931 /* Stop the send queue and the retry timer */
932 rdi
->driver_f
.stop_send_queue(qp
);
933 rvt_del_timers_sync(qp
);
934 /* Wait for things to stop */
935 rdi
->driver_f
.quiesce_qp(qp
);
937 /* take qp out the hash and wait for it to be unused */
938 rvt_remove_qp(rdi
, qp
);
940 /* grab the lock b/c it was locked at call time */
941 spin_lock_irq(&qp
->r_lock
);
942 spin_lock(&qp
->s_hlock
);
943 spin_lock(&qp
->s_lock
);
945 rvt_clear_mr_refs(qp
, 1);
947 * Let the driver do any tear down or re-init it needs to for
948 * a qp that has been reset
950 rdi
->driver_f
.notify_qp_reset(qp
);
952 rvt_init_qp(rdi
, qp
, type
);
953 lockdep_assert_held(&qp
->r_lock
);
954 lockdep_assert_held(&qp
->s_hlock
);
955 lockdep_assert_held(&qp
->s_lock
);
958 /** rvt_free_qpn - Free a qpn from the bit map
960 * @qpn: queue pair number to free
962 static void rvt_free_qpn(struct rvt_qpn_table
*qpt
, u32 qpn
)
964 struct rvt_qpn_map
*map
;
966 map
= qpt
->map
+ (qpn
& RVT_QPN_MASK
) / RVT_BITS_PER_PAGE
;
968 clear_bit(qpn
& RVT_BITS_PER_PAGE_MASK
, map
->page
);
972 * get_allowed_ops - Given a QP type return the appropriate allowed OP
973 * @type: valid, supported, QP type
975 static u8
get_allowed_ops(enum ib_qp_type type
)
977 return type
== IB_QPT_RC
? IB_OPCODE_RC
: type
== IB_QPT_UC
?
978 IB_OPCODE_UC
: IB_OPCODE_UD
;
982 * free_ud_wq_attr - Clean up AH attribute cache for UD QPs
983 * @qp: Valid QP with allowed_ops set
985 * The rvt_swqe data structure being used is a union, so this is
986 * only valid for UD QPs.
988 static void free_ud_wq_attr(struct rvt_qp
*qp
)
990 struct rvt_swqe
*wqe
;
993 for (i
= 0; qp
->allowed_ops
== IB_OPCODE_UD
&& i
< qp
->s_size
; i
++) {
994 wqe
= rvt_get_swqe_ptr(qp
, i
);
995 kfree(wqe
->ud_wr
.attr
);
996 wqe
->ud_wr
.attr
= NULL
;
1001 * alloc_ud_wq_attr - AH attribute cache for UD QPs
1002 * @qp: Valid QP with allowed_ops set
1003 * @node: Numa node for allocation
1005 * The rvt_swqe data structure being used is a union, so this is
1006 * only valid for UD QPs.
1008 static int alloc_ud_wq_attr(struct rvt_qp
*qp
, int node
)
1010 struct rvt_swqe
*wqe
;
1013 for (i
= 0; qp
->allowed_ops
== IB_OPCODE_UD
&& i
< qp
->s_size
; i
++) {
1014 wqe
= rvt_get_swqe_ptr(qp
, i
);
1015 wqe
->ud_wr
.attr
= kzalloc_node(sizeof(*wqe
->ud_wr
.attr
),
1017 if (!wqe
->ud_wr
.attr
) {
1018 free_ud_wq_attr(qp
);
1027 * rvt_create_qp - create a queue pair for a device
1028 * @ibpd: the protection domain who's device we create the queue pair for
1029 * @init_attr: the attributes of the queue pair
1030 * @udata: user data for libibverbs.so
1032 * Queue pair creation is mostly an rvt issue. However, drivers have their own
1033 * unique idea of what queue pair numbers mean. For instance there is a reserved
1036 * Return: the queue pair on success, otherwise returns an errno.
1038 * Called by the ib_create_qp() core verbs function.
1040 struct ib_qp
*rvt_create_qp(struct ib_pd
*ibpd
,
1041 struct ib_qp_init_attr
*init_attr
,
1042 struct ib_udata
*udata
)
1046 struct rvt_swqe
*swq
= NULL
;
1049 struct ib_qp
*ret
= ERR_PTR(-ENOMEM
);
1050 struct rvt_dev_info
*rdi
= ib_to_rvt(ibpd
->device
);
1055 return ERR_PTR(-EINVAL
);
1057 if (init_attr
->cap
.max_send_sge
> rdi
->dparms
.props
.max_send_sge
||
1058 init_attr
->cap
.max_send_wr
> rdi
->dparms
.props
.max_qp_wr
||
1059 init_attr
->create_flags
)
1060 return ERR_PTR(-EINVAL
);
1062 /* Check receive queue parameters if no SRQ is specified. */
1063 if (!init_attr
->srq
) {
1064 if (init_attr
->cap
.max_recv_sge
>
1065 rdi
->dparms
.props
.max_recv_sge
||
1066 init_attr
->cap
.max_recv_wr
> rdi
->dparms
.props
.max_qp_wr
)
1067 return ERR_PTR(-EINVAL
);
1069 if (init_attr
->cap
.max_send_sge
+
1070 init_attr
->cap
.max_send_wr
+
1071 init_attr
->cap
.max_recv_sge
+
1072 init_attr
->cap
.max_recv_wr
== 0)
1073 return ERR_PTR(-EINVAL
);
1076 init_attr
->cap
.max_send_wr
+ 1 +
1077 rdi
->dparms
.reserved_operations
;
1078 switch (init_attr
->qp_type
) {
1081 if (init_attr
->port_num
== 0 ||
1082 init_attr
->port_num
> ibpd
->device
->phys_port_cnt
)
1083 return ERR_PTR(-EINVAL
);
1088 sz
= struct_size(swq
, sg_list
, init_attr
->cap
.max_send_sge
);
1089 swq
= vzalloc_node(array_size(sz
, sqsize
), rdi
->dparms
.node
);
1091 return ERR_PTR(-ENOMEM
);
1095 if (init_attr
->srq
) {
1096 struct rvt_srq
*srq
= ibsrq_to_rvtsrq(init_attr
->srq
);
1098 if (srq
->rq
.max_sge
> 1)
1099 sg_list_sz
= sizeof(*qp
->r_sg_list
) *
1100 (srq
->rq
.max_sge
- 1);
1101 } else if (init_attr
->cap
.max_recv_sge
> 1)
1102 sg_list_sz
= sizeof(*qp
->r_sg_list
) *
1103 (init_attr
->cap
.max_recv_sge
- 1);
1104 qp
= kzalloc_node(sz
+ sg_list_sz
, GFP_KERNEL
,
1108 qp
->allowed_ops
= get_allowed_ops(init_attr
->qp_type
);
1110 RCU_INIT_POINTER(qp
->next
, NULL
);
1111 if (init_attr
->qp_type
== IB_QPT_RC
) {
1113 kcalloc_node(rvt_max_atomic(rdi
),
1114 sizeof(*qp
->s_ack_queue
),
1117 if (!qp
->s_ack_queue
)
1120 /* initialize timers needed for rc qp */
1121 timer_setup(&qp
->s_timer
, rvt_rc_timeout
, 0);
1122 hrtimer_init(&qp
->s_rnr_timer
, CLOCK_MONOTONIC
,
1124 qp
->s_rnr_timer
.function
= rvt_rc_rnr_retry
;
1127 * Driver needs to set up it's private QP structure and do any
1128 * initialization that is needed.
1130 priv
= rdi
->driver_f
.qp_priv_alloc(rdi
, qp
);
1136 qp
->timeout_jiffies
=
1137 usecs_to_jiffies((4096UL * (1UL << qp
->timeout
)) /
1139 if (init_attr
->srq
) {
1142 qp
->r_rq
.size
= init_attr
->cap
.max_recv_wr
+ 1;
1143 qp
->r_rq
.max_sge
= init_attr
->cap
.max_recv_sge
;
1144 sz
= (sizeof(struct ib_sge
) * qp
->r_rq
.max_sge
) +
1145 sizeof(struct rvt_rwqe
);
1146 err
= rvt_alloc_rq(&qp
->r_rq
, qp
->r_rq
.size
* sz
,
1147 rdi
->dparms
.node
, udata
);
1150 goto bail_driver_priv
;
1155 * ib_create_qp() will initialize qp->ibqp
1156 * except for qp->ibqp.qp_num.
1158 spin_lock_init(&qp
->r_lock
);
1159 spin_lock_init(&qp
->s_hlock
);
1160 spin_lock_init(&qp
->s_lock
);
1161 atomic_set(&qp
->refcount
, 0);
1162 atomic_set(&qp
->local_ops_pending
, 0);
1163 init_waitqueue_head(&qp
->wait
);
1164 INIT_LIST_HEAD(&qp
->rspwait
);
1165 qp
->state
= IB_QPS_RESET
;
1167 qp
->s_size
= sqsize
;
1168 qp
->s_avail
= init_attr
->cap
.max_send_wr
;
1169 qp
->s_max_sge
= init_attr
->cap
.max_send_sge
;
1170 if (init_attr
->sq_sig_type
== IB_SIGNAL_REQ_WR
)
1171 qp
->s_flags
= RVT_S_SIGNAL_REQ_WR
;
1172 err
= alloc_ud_wq_attr(qp
, rdi
->dparms
.node
);
1174 ret
= (ERR_PTR(err
));
1175 goto bail_driver_priv
;
1178 err
= alloc_qpn(rdi
, &rdi
->qp_dev
->qpn_table
,
1180 init_attr
->port_num
);
1185 qp
->ibqp
.qp_num
= err
;
1186 qp
->port_num
= init_attr
->port_num
;
1187 rvt_init_qp(rdi
, qp
, init_attr
->qp_type
);
1188 if (rdi
->driver_f
.qp_priv_init
) {
1189 err
= rdi
->driver_f
.qp_priv_init(rdi
, qp
, init_attr
);
1198 /* Don't support raw QPs */
1199 return ERR_PTR(-EINVAL
);
1202 init_attr
->cap
.max_inline_data
= 0;
1205 * Return the address of the RWQ as the offset to mmap.
1206 * See rvt_mmap() for details.
1208 if (udata
&& udata
->outlen
>= sizeof(__u64
)) {
1212 err
= ib_copy_to_udata(udata
, &offset
,
1219 u32 s
= sizeof(struct rvt_rwq
) + qp
->r_rq
.size
* sz
;
1221 qp
->ip
= rvt_create_mmap_info(rdi
, s
, udata
,
1224 ret
= ERR_PTR(-ENOMEM
);
1228 err
= ib_copy_to_udata(udata
, &qp
->ip
->offset
,
1229 sizeof(qp
->ip
->offset
));
1235 qp
->pid
= current
->pid
;
1238 spin_lock(&rdi
->n_qps_lock
);
1239 if (rdi
->n_qps_allocated
== rdi
->dparms
.props
.max_qp
) {
1240 spin_unlock(&rdi
->n_qps_lock
);
1241 ret
= ERR_PTR(-ENOMEM
);
1245 rdi
->n_qps_allocated
++;
1247 * Maintain a busy_jiffies variable that will be added to the timeout
1248 * period in mod_retry_timer and add_retry_timer. This busy jiffies
1249 * is scaled by the number of rc qps created for the device to reduce
1250 * the number of timeouts occurring when there is a large number of
1251 * qps. busy_jiffies is incremented every rc qp scaling interval.
1252 * The scaling interval is selected based on extensive performance
1253 * evaluation of targeted workloads.
1255 if (init_attr
->qp_type
== IB_QPT_RC
) {
1257 rdi
->busy_jiffies
= rdi
->n_rc_qps
/ RC_QP_SCALING_INTERVAL
;
1259 spin_unlock(&rdi
->n_qps_lock
);
1262 spin_lock_irq(&rdi
->pending_lock
);
1263 list_add(&qp
->ip
->pending_mmaps
, &rdi
->pending_mmaps
);
1264 spin_unlock_irq(&rdi
->pending_lock
);
1273 kref_put(&qp
->ip
->ref
, rvt_release_mmap_info
);
1276 rvt_free_qpn(&rdi
->qp_dev
->qpn_table
, qp
->ibqp
.qp_num
);
1279 rvt_free_rq(&qp
->r_rq
);
1280 free_ud_wq_attr(qp
);
1283 rdi
->driver_f
.qp_priv_free(rdi
, qp
);
1286 kfree(qp
->s_ack_queue
);
1296 * rvt_error_qp - put a QP into the error state
1297 * @qp: the QP to put into the error state
1298 * @err: the receive completion error to signal if a RWQE is active
1300 * Flushes both send and receive work queues.
1302 * Return: true if last WQE event should be generated.
1303 * The QP r_lock and s_lock should be held and interrupts disabled.
1304 * If we are already in error state, just return.
1306 int rvt_error_qp(struct rvt_qp
*qp
, enum ib_wc_status err
)
1310 struct rvt_dev_info
*rdi
= ib_to_rvt(qp
->ibqp
.device
);
1312 lockdep_assert_held(&qp
->r_lock
);
1313 lockdep_assert_held(&qp
->s_lock
);
1314 if (qp
->state
== IB_QPS_ERR
|| qp
->state
== IB_QPS_RESET
)
1317 qp
->state
= IB_QPS_ERR
;
1319 if (qp
->s_flags
& (RVT_S_TIMER
| RVT_S_WAIT_RNR
)) {
1320 qp
->s_flags
&= ~(RVT_S_TIMER
| RVT_S_WAIT_RNR
);
1321 del_timer(&qp
->s_timer
);
1324 if (qp
->s_flags
& RVT_S_ANY_WAIT_SEND
)
1325 qp
->s_flags
&= ~RVT_S_ANY_WAIT_SEND
;
1327 rdi
->driver_f
.notify_error_qp(qp
);
1329 /* Schedule the sending tasklet to drain the send work queue. */
1330 if (READ_ONCE(qp
->s_last
) != qp
->s_head
)
1331 rdi
->driver_f
.schedule_send(qp
);
1333 rvt_clear_mr_refs(qp
, 0);
1335 memset(&wc
, 0, sizeof(wc
));
1337 wc
.opcode
= IB_WC_RECV
;
1339 if (test_and_clear_bit(RVT_R_WRID_VALID
, &qp
->r_aflags
)) {
1340 wc
.wr_id
= qp
->r_wr_id
;
1342 rvt_cq_enter(ibcq_to_rvtcq(qp
->ibqp
.recv_cq
), &wc
, 1);
1344 wc
.status
= IB_WC_WR_FLUSH_ERR
;
1349 struct rvt_rwq
*wq
= NULL
;
1350 struct rvt_krwq
*kwq
= NULL
;
1352 spin_lock(&qp
->r_rq
.kwq
->c_lock
);
1353 /* qp->ip used to validate if there is a user buffer mmaped */
1356 head
= RDMA_READ_UAPI_ATOMIC(wq
->head
);
1357 tail
= RDMA_READ_UAPI_ATOMIC(wq
->tail
);
1363 /* sanity check pointers before trusting them */
1364 if (head
>= qp
->r_rq
.size
)
1366 if (tail
>= qp
->r_rq
.size
)
1368 while (tail
!= head
) {
1369 wc
.wr_id
= rvt_get_rwqe_ptr(&qp
->r_rq
, tail
)->wr_id
;
1370 if (++tail
>= qp
->r_rq
.size
)
1372 rvt_cq_enter(ibcq_to_rvtcq(qp
->ibqp
.recv_cq
), &wc
, 1);
1375 RDMA_WRITE_UAPI_ATOMIC(wq
->tail
, tail
);
1378 spin_unlock(&qp
->r_rq
.kwq
->c_lock
);
1379 } else if (qp
->ibqp
.event_handler
) {
1386 EXPORT_SYMBOL(rvt_error_qp
);
1389 * Put the QP into the hash table.
1390 * The hash table holds a reference to the QP.
1392 static void rvt_insert_qp(struct rvt_dev_info
*rdi
, struct rvt_qp
*qp
)
1394 struct rvt_ibport
*rvp
= rdi
->ports
[qp
->port_num
- 1];
1395 unsigned long flags
;
1398 spin_lock_irqsave(&rdi
->qp_dev
->qpt_lock
, flags
);
1400 if (qp
->ibqp
.qp_num
<= 1) {
1401 rcu_assign_pointer(rvp
->qp
[qp
->ibqp
.qp_num
], qp
);
1403 u32 n
= hash_32(qp
->ibqp
.qp_num
, rdi
->qp_dev
->qp_table_bits
);
1405 qp
->next
= rdi
->qp_dev
->qp_table
[n
];
1406 rcu_assign_pointer(rdi
->qp_dev
->qp_table
[n
], qp
);
1407 trace_rvt_qpinsert(qp
, n
);
1410 spin_unlock_irqrestore(&rdi
->qp_dev
->qpt_lock
, flags
);
1414 * rvt_modify_qp - modify the attributes of a queue pair
1415 * @ibqp: the queue pair who's attributes we're modifying
1416 * @attr: the new attributes
1417 * @attr_mask: the mask of attributes to modify
1418 * @udata: user data for libibverbs.so
1420 * Return: 0 on success, otherwise returns an errno.
1422 int rvt_modify_qp(struct ib_qp
*ibqp
, struct ib_qp_attr
*attr
,
1423 int attr_mask
, struct ib_udata
*udata
)
1425 struct rvt_dev_info
*rdi
= ib_to_rvt(ibqp
->device
);
1426 struct rvt_qp
*qp
= ibqp_to_rvtqp(ibqp
);
1427 enum ib_qp_state cur_state
, new_state
;
1431 int pmtu
= 0; /* for gcc warning only */
1434 spin_lock_irq(&qp
->r_lock
);
1435 spin_lock(&qp
->s_hlock
);
1436 spin_lock(&qp
->s_lock
);
1438 cur_state
= attr_mask
& IB_QP_CUR_STATE
?
1439 attr
->cur_qp_state
: qp
->state
;
1440 new_state
= attr_mask
& IB_QP_STATE
? attr
->qp_state
: cur_state
;
1441 opa_ah
= rdma_cap_opa_ah(ibqp
->device
, qp
->port_num
);
1443 if (!ib_modify_qp_is_ok(cur_state
, new_state
, ibqp
->qp_type
,
1447 if (rdi
->driver_f
.check_modify_qp
&&
1448 rdi
->driver_f
.check_modify_qp(qp
, attr
, attr_mask
, udata
))
1451 if (attr_mask
& IB_QP_AV
) {
1453 if (rdma_ah_get_dlid(&attr
->ah_attr
) >=
1454 opa_get_mcast_base(OPA_MCAST_NR
))
1457 if (rdma_ah_get_dlid(&attr
->ah_attr
) >=
1458 be16_to_cpu(IB_MULTICAST_LID_BASE
))
1462 if (rvt_check_ah(qp
->ibqp
.device
, &attr
->ah_attr
))
1466 if (attr_mask
& IB_QP_ALT_PATH
) {
1468 if (rdma_ah_get_dlid(&attr
->alt_ah_attr
) >=
1469 opa_get_mcast_base(OPA_MCAST_NR
))
1472 if (rdma_ah_get_dlid(&attr
->alt_ah_attr
) >=
1473 be16_to_cpu(IB_MULTICAST_LID_BASE
))
1477 if (rvt_check_ah(qp
->ibqp
.device
, &attr
->alt_ah_attr
))
1479 if (attr
->alt_pkey_index
>= rvt_get_npkeys(rdi
))
1483 if (attr_mask
& IB_QP_PKEY_INDEX
)
1484 if (attr
->pkey_index
>= rvt_get_npkeys(rdi
))
1487 if (attr_mask
& IB_QP_MIN_RNR_TIMER
)
1488 if (attr
->min_rnr_timer
> 31)
1491 if (attr_mask
& IB_QP_PORT
)
1492 if (qp
->ibqp
.qp_type
== IB_QPT_SMI
||
1493 qp
->ibqp
.qp_type
== IB_QPT_GSI
||
1494 attr
->port_num
== 0 ||
1495 attr
->port_num
> ibqp
->device
->phys_port_cnt
)
1498 if (attr_mask
& IB_QP_DEST_QPN
)
1499 if (attr
->dest_qp_num
> RVT_QPN_MASK
)
1502 if (attr_mask
& IB_QP_RETRY_CNT
)
1503 if (attr
->retry_cnt
> 7)
1506 if (attr_mask
& IB_QP_RNR_RETRY
)
1507 if (attr
->rnr_retry
> 7)
1511 * Don't allow invalid path_mtu values. OK to set greater
1512 * than the active mtu (or even the max_cap, if we have tuned
1513 * that to a small mtu. We'll set qp->path_mtu
1514 * to the lesser of requested attribute mtu and active,
1515 * for packetizing messages.
1516 * Note that the QP port has to be set in INIT and MTU in RTR.
1518 if (attr_mask
& IB_QP_PATH_MTU
) {
1519 pmtu
= rdi
->driver_f
.get_pmtu_from_attr(rdi
, qp
, attr
);
1524 if (attr_mask
& IB_QP_PATH_MIG_STATE
) {
1525 if (attr
->path_mig_state
== IB_MIG_REARM
) {
1526 if (qp
->s_mig_state
== IB_MIG_ARMED
)
1528 if (new_state
!= IB_QPS_RTS
)
1530 } else if (attr
->path_mig_state
== IB_MIG_MIGRATED
) {
1531 if (qp
->s_mig_state
== IB_MIG_REARM
)
1533 if (new_state
!= IB_QPS_RTS
&& new_state
!= IB_QPS_SQD
)
1535 if (qp
->s_mig_state
== IB_MIG_ARMED
)
1542 if (attr_mask
& IB_QP_MAX_DEST_RD_ATOMIC
)
1543 if (attr
->max_dest_rd_atomic
> rdi
->dparms
.max_rdma_atomic
)
1546 switch (new_state
) {
1548 if (qp
->state
!= IB_QPS_RESET
)
1549 rvt_reset_qp(rdi
, qp
, ibqp
->qp_type
);
1553 /* Allow event to re-trigger if QP set to RTR more than once */
1554 qp
->r_flags
&= ~RVT_R_COMM_EST
;
1555 qp
->state
= new_state
;
1559 qp
->s_draining
= qp
->s_last
!= qp
->s_cur
;
1560 qp
->state
= new_state
;
1564 if (qp
->ibqp
.qp_type
== IB_QPT_RC
)
1566 qp
->state
= new_state
;
1570 lastwqe
= rvt_error_qp(qp
, IB_WC_WR_FLUSH_ERR
);
1574 qp
->state
= new_state
;
1578 if (attr_mask
& IB_QP_PKEY_INDEX
)
1579 qp
->s_pkey_index
= attr
->pkey_index
;
1581 if (attr_mask
& IB_QP_PORT
)
1582 qp
->port_num
= attr
->port_num
;
1584 if (attr_mask
& IB_QP_DEST_QPN
)
1585 qp
->remote_qpn
= attr
->dest_qp_num
;
1587 if (attr_mask
& IB_QP_SQ_PSN
) {
1588 qp
->s_next_psn
= attr
->sq_psn
& rdi
->dparms
.psn_modify_mask
;
1589 qp
->s_psn
= qp
->s_next_psn
;
1590 qp
->s_sending_psn
= qp
->s_next_psn
;
1591 qp
->s_last_psn
= qp
->s_next_psn
- 1;
1592 qp
->s_sending_hpsn
= qp
->s_last_psn
;
1595 if (attr_mask
& IB_QP_RQ_PSN
)
1596 qp
->r_psn
= attr
->rq_psn
& rdi
->dparms
.psn_modify_mask
;
1598 if (attr_mask
& IB_QP_ACCESS_FLAGS
)
1599 qp
->qp_access_flags
= attr
->qp_access_flags
;
1601 if (attr_mask
& IB_QP_AV
) {
1602 rdma_replace_ah_attr(&qp
->remote_ah_attr
, &attr
->ah_attr
);
1603 qp
->s_srate
= rdma_ah_get_static_rate(&attr
->ah_attr
);
1604 qp
->srate_mbps
= ib_rate_to_mbps(qp
->s_srate
);
1607 if (attr_mask
& IB_QP_ALT_PATH
) {
1608 rdma_replace_ah_attr(&qp
->alt_ah_attr
, &attr
->alt_ah_attr
);
1609 qp
->s_alt_pkey_index
= attr
->alt_pkey_index
;
1612 if (attr_mask
& IB_QP_PATH_MIG_STATE
) {
1613 qp
->s_mig_state
= attr
->path_mig_state
;
1615 qp
->remote_ah_attr
= qp
->alt_ah_attr
;
1616 qp
->port_num
= rdma_ah_get_port_num(&qp
->alt_ah_attr
);
1617 qp
->s_pkey_index
= qp
->s_alt_pkey_index
;
1621 if (attr_mask
& IB_QP_PATH_MTU
) {
1622 qp
->pmtu
= rdi
->driver_f
.mtu_from_qp(rdi
, qp
, pmtu
);
1623 qp
->log_pmtu
= ilog2(qp
->pmtu
);
1626 if (attr_mask
& IB_QP_RETRY_CNT
) {
1627 qp
->s_retry_cnt
= attr
->retry_cnt
;
1628 qp
->s_retry
= attr
->retry_cnt
;
1631 if (attr_mask
& IB_QP_RNR_RETRY
) {
1632 qp
->s_rnr_retry_cnt
= attr
->rnr_retry
;
1633 qp
->s_rnr_retry
= attr
->rnr_retry
;
1636 if (attr_mask
& IB_QP_MIN_RNR_TIMER
)
1637 qp
->r_min_rnr_timer
= attr
->min_rnr_timer
;
1639 if (attr_mask
& IB_QP_TIMEOUT
) {
1640 qp
->timeout
= attr
->timeout
;
1641 qp
->timeout_jiffies
= rvt_timeout_to_jiffies(qp
->timeout
);
1644 if (attr_mask
& IB_QP_QKEY
)
1645 qp
->qkey
= attr
->qkey
;
1647 if (attr_mask
& IB_QP_MAX_DEST_RD_ATOMIC
)
1648 qp
->r_max_rd_atomic
= attr
->max_dest_rd_atomic
;
1650 if (attr_mask
& IB_QP_MAX_QP_RD_ATOMIC
)
1651 qp
->s_max_rd_atomic
= attr
->max_rd_atomic
;
1653 if (rdi
->driver_f
.modify_qp
)
1654 rdi
->driver_f
.modify_qp(qp
, attr
, attr_mask
, udata
);
1656 spin_unlock(&qp
->s_lock
);
1657 spin_unlock(&qp
->s_hlock
);
1658 spin_unlock_irq(&qp
->r_lock
);
1660 if (cur_state
== IB_QPS_RESET
&& new_state
== IB_QPS_INIT
)
1661 rvt_insert_qp(rdi
, qp
);
1664 ev
.device
= qp
->ibqp
.device
;
1665 ev
.element
.qp
= &qp
->ibqp
;
1666 ev
.event
= IB_EVENT_QP_LAST_WQE_REACHED
;
1667 qp
->ibqp
.event_handler(&ev
, qp
->ibqp
.qp_context
);
1670 ev
.device
= qp
->ibqp
.device
;
1671 ev
.element
.qp
= &qp
->ibqp
;
1672 ev
.event
= IB_EVENT_PATH_MIG
;
1673 qp
->ibqp
.event_handler(&ev
, qp
->ibqp
.qp_context
);
1678 spin_unlock(&qp
->s_lock
);
1679 spin_unlock(&qp
->s_hlock
);
1680 spin_unlock_irq(&qp
->r_lock
);
1685 * rvt_destroy_qp - destroy a queue pair
1686 * @ibqp: the queue pair to destroy
1688 * Note that this can be called while the QP is actively sending or
1691 * Return: 0 on success.
1693 int rvt_destroy_qp(struct ib_qp
*ibqp
, struct ib_udata
*udata
)
1695 struct rvt_qp
*qp
= ibqp_to_rvtqp(ibqp
);
1696 struct rvt_dev_info
*rdi
= ib_to_rvt(ibqp
->device
);
1698 spin_lock_irq(&qp
->r_lock
);
1699 spin_lock(&qp
->s_hlock
);
1700 spin_lock(&qp
->s_lock
);
1701 rvt_reset_qp(rdi
, qp
, ibqp
->qp_type
);
1702 spin_unlock(&qp
->s_lock
);
1703 spin_unlock(&qp
->s_hlock
);
1704 spin_unlock_irq(&qp
->r_lock
);
1706 wait_event(qp
->wait
, !atomic_read(&qp
->refcount
));
1707 /* qpn is now available for use again */
1708 rvt_free_qpn(&rdi
->qp_dev
->qpn_table
, qp
->ibqp
.qp_num
);
1710 spin_lock(&rdi
->n_qps_lock
);
1711 rdi
->n_qps_allocated
--;
1712 if (qp
->ibqp
.qp_type
== IB_QPT_RC
) {
1714 rdi
->busy_jiffies
= rdi
->n_rc_qps
/ RC_QP_SCALING_INTERVAL
;
1716 spin_unlock(&rdi
->n_qps_lock
);
1719 kref_put(&qp
->ip
->ref
, rvt_release_mmap_info
);
1720 kvfree(qp
->r_rq
.kwq
);
1721 rdi
->driver_f
.qp_priv_free(rdi
, qp
);
1722 kfree(qp
->s_ack_queue
);
1723 rdma_destroy_ah_attr(&qp
->remote_ah_attr
);
1724 rdma_destroy_ah_attr(&qp
->alt_ah_attr
);
1725 free_ud_wq_attr(qp
);
1732 * rvt_query_qp - query an ipbq
1733 * @ibqp: IB qp to query
1734 * @attr: attr struct to fill in
1735 * @attr_mask: attr mask ignored
1736 * @init_attr: struct to fill in
1740 int rvt_query_qp(struct ib_qp
*ibqp
, struct ib_qp_attr
*attr
,
1741 int attr_mask
, struct ib_qp_init_attr
*init_attr
)
1743 struct rvt_qp
*qp
= ibqp_to_rvtqp(ibqp
);
1744 struct rvt_dev_info
*rdi
= ib_to_rvt(ibqp
->device
);
1746 attr
->qp_state
= qp
->state
;
1747 attr
->cur_qp_state
= attr
->qp_state
;
1748 attr
->path_mtu
= rdi
->driver_f
.mtu_to_path_mtu(qp
->pmtu
);
1749 attr
->path_mig_state
= qp
->s_mig_state
;
1750 attr
->qkey
= qp
->qkey
;
1751 attr
->rq_psn
= qp
->r_psn
& rdi
->dparms
.psn_mask
;
1752 attr
->sq_psn
= qp
->s_next_psn
& rdi
->dparms
.psn_mask
;
1753 attr
->dest_qp_num
= qp
->remote_qpn
;
1754 attr
->qp_access_flags
= qp
->qp_access_flags
;
1755 attr
->cap
.max_send_wr
= qp
->s_size
- 1 -
1756 rdi
->dparms
.reserved_operations
;
1757 attr
->cap
.max_recv_wr
= qp
->ibqp
.srq
? 0 : qp
->r_rq
.size
- 1;
1758 attr
->cap
.max_send_sge
= qp
->s_max_sge
;
1759 attr
->cap
.max_recv_sge
= qp
->r_rq
.max_sge
;
1760 attr
->cap
.max_inline_data
= 0;
1761 attr
->ah_attr
= qp
->remote_ah_attr
;
1762 attr
->alt_ah_attr
= qp
->alt_ah_attr
;
1763 attr
->pkey_index
= qp
->s_pkey_index
;
1764 attr
->alt_pkey_index
= qp
->s_alt_pkey_index
;
1765 attr
->en_sqd_async_notify
= 0;
1766 attr
->sq_draining
= qp
->s_draining
;
1767 attr
->max_rd_atomic
= qp
->s_max_rd_atomic
;
1768 attr
->max_dest_rd_atomic
= qp
->r_max_rd_atomic
;
1769 attr
->min_rnr_timer
= qp
->r_min_rnr_timer
;
1770 attr
->port_num
= qp
->port_num
;
1771 attr
->timeout
= qp
->timeout
;
1772 attr
->retry_cnt
= qp
->s_retry_cnt
;
1773 attr
->rnr_retry
= qp
->s_rnr_retry_cnt
;
1774 attr
->alt_port_num
=
1775 rdma_ah_get_port_num(&qp
->alt_ah_attr
);
1776 attr
->alt_timeout
= qp
->alt_timeout
;
1778 init_attr
->event_handler
= qp
->ibqp
.event_handler
;
1779 init_attr
->qp_context
= qp
->ibqp
.qp_context
;
1780 init_attr
->send_cq
= qp
->ibqp
.send_cq
;
1781 init_attr
->recv_cq
= qp
->ibqp
.recv_cq
;
1782 init_attr
->srq
= qp
->ibqp
.srq
;
1783 init_attr
->cap
= attr
->cap
;
1784 if (qp
->s_flags
& RVT_S_SIGNAL_REQ_WR
)
1785 init_attr
->sq_sig_type
= IB_SIGNAL_REQ_WR
;
1787 init_attr
->sq_sig_type
= IB_SIGNAL_ALL_WR
;
1788 init_attr
->qp_type
= qp
->ibqp
.qp_type
;
1789 init_attr
->port_num
= qp
->port_num
;
1794 * rvt_post_receive - post a receive on a QP
1795 * @ibqp: the QP to post the receive on
1796 * @wr: the WR to post
1797 * @bad_wr: the first bad WR is put here
1799 * This may be called from interrupt context.
1801 * Return: 0 on success otherwise errno
1803 int rvt_post_recv(struct ib_qp
*ibqp
, const struct ib_recv_wr
*wr
,
1804 const struct ib_recv_wr
**bad_wr
)
1806 struct rvt_qp
*qp
= ibqp_to_rvtqp(ibqp
);
1807 struct rvt_krwq
*wq
= qp
->r_rq
.kwq
;
1808 unsigned long flags
;
1809 int qp_err_flush
= (ib_rvt_state_ops
[qp
->state
] & RVT_FLUSH_RECV
) &&
1812 /* Check that state is OK to post receive. */
1813 if (!(ib_rvt_state_ops
[qp
->state
] & RVT_POST_RECV_OK
) || !wq
) {
1818 for (; wr
; wr
= wr
->next
) {
1819 struct rvt_rwqe
*wqe
;
1823 if ((unsigned)wr
->num_sge
> qp
->r_rq
.max_sge
) {
1828 spin_lock_irqsave(&qp
->r_rq
.kwq
->p_lock
, flags
);
1829 next
= wq
->head
+ 1;
1830 if (next
>= qp
->r_rq
.size
)
1832 if (next
== READ_ONCE(wq
->tail
)) {
1833 spin_unlock_irqrestore(&qp
->r_rq
.kwq
->p_lock
, flags
);
1837 if (unlikely(qp_err_flush
)) {
1840 memset(&wc
, 0, sizeof(wc
));
1842 wc
.opcode
= IB_WC_RECV
;
1843 wc
.wr_id
= wr
->wr_id
;
1844 wc
.status
= IB_WC_WR_FLUSH_ERR
;
1845 rvt_cq_enter(ibcq_to_rvtcq(qp
->ibqp
.recv_cq
), &wc
, 1);
1847 wqe
= rvt_get_rwqe_ptr(&qp
->r_rq
, wq
->head
);
1848 wqe
->wr_id
= wr
->wr_id
;
1849 wqe
->num_sge
= wr
->num_sge
;
1850 for (i
= 0; i
< wr
->num_sge
; i
++) {
1851 wqe
->sg_list
[i
].addr
= wr
->sg_list
[i
].addr
;
1852 wqe
->sg_list
[i
].length
= wr
->sg_list
[i
].length
;
1853 wqe
->sg_list
[i
].lkey
= wr
->sg_list
[i
].lkey
;
1856 * Make sure queue entry is written
1857 * before the head index.
1859 smp_store_release(&wq
->head
, next
);
1861 spin_unlock_irqrestore(&qp
->r_rq
.kwq
->p_lock
, flags
);
1867 * rvt_qp_valid_operation - validate post send wr request
1869 * @post-parms - the post send table for the driver
1870 * @wr - the work request
1872 * The routine validates the operation based on the
1873 * validation table an returns the length of the operation
1874 * which can extend beyond the ib_send_bw. Operation
1875 * dependent flags key atomic operation validation.
1877 * There is an exception for UD qps that validates the pd and
1878 * overrides the length to include the additional UD specific
1881 * Returns a negative error or the length of the work request
1882 * for building the swqe.
1884 static inline int rvt_qp_valid_operation(
1886 const struct rvt_operation_params
*post_parms
,
1887 const struct ib_send_wr
*wr
)
1891 if (wr
->opcode
>= RVT_OPERATION_MAX
|| !post_parms
[wr
->opcode
].length
)
1893 if (!(post_parms
[wr
->opcode
].qpt_support
& BIT(qp
->ibqp
.qp_type
)))
1895 if ((post_parms
[wr
->opcode
].flags
& RVT_OPERATION_PRIV
) &&
1896 ibpd_to_rvtpd(qp
->ibqp
.pd
)->user
)
1898 if (post_parms
[wr
->opcode
].flags
& RVT_OPERATION_ATOMIC_SGE
&&
1899 (wr
->num_sge
== 0 ||
1900 wr
->sg_list
[0].length
< sizeof(u64
) ||
1901 wr
->sg_list
[0].addr
& (sizeof(u64
) - 1)))
1903 if (post_parms
[wr
->opcode
].flags
& RVT_OPERATION_ATOMIC
&&
1904 !qp
->s_max_rd_atomic
)
1906 len
= post_parms
[wr
->opcode
].length
;
1908 if (qp
->ibqp
.qp_type
!= IB_QPT_UC
&&
1909 qp
->ibqp
.qp_type
!= IB_QPT_RC
) {
1910 if (qp
->ibqp
.pd
!= ud_wr(wr
)->ah
->pd
)
1912 len
= sizeof(struct ib_ud_wr
);
1918 * rvt_qp_is_avail - determine queue capacity
1920 * @rdi: the rdmavt device
1921 * @reserved_op: is reserved operation
1923 * This assumes the s_hlock is held but the s_last
1924 * qp variable is uncontrolled.
1926 * For non reserved operations, the qp->s_avail
1929 * The return value is zero or a -ENOMEM.
1931 static inline int rvt_qp_is_avail(
1933 struct rvt_dev_info
*rdi
,
1940 /* see rvt_qp_wqe_unreserve() */
1941 smp_mb__before_atomic();
1942 if (unlikely(reserved_op
)) {
1943 /* see rvt_qp_wqe_unreserve() */
1944 reserved_used
= atomic_read(&qp
->s_reserved_used
);
1945 if (reserved_used
>= rdi
->dparms
.reserved_operations
)
1949 /* non-reserved operations */
1950 if (likely(qp
->s_avail
))
1952 /* See rvt_qp_complete_swqe() */
1953 slast
= smp_load_acquire(&qp
->s_last
);
1954 if (qp
->s_head
>= slast
)
1955 avail
= qp
->s_size
- (qp
->s_head
- slast
);
1957 avail
= slast
- qp
->s_head
;
1959 reserved_used
= atomic_read(&qp
->s_reserved_used
);
1961 (rdi
->dparms
.reserved_operations
- reserved_used
);
1962 /* insure we don't assign a negative s_avail */
1963 if ((s32
)avail
<= 0)
1965 qp
->s_avail
= avail
;
1966 if (WARN_ON(qp
->s_avail
>
1967 (qp
->s_size
- 1 - rdi
->dparms
.reserved_operations
)))
1969 "More avail entries than QP RB size.\nQP: %u, size: %u, avail: %u\nhead: %u, tail: %u, cur: %u, acked: %u, last: %u",
1970 qp
->ibqp
.qp_num
, qp
->s_size
, qp
->s_avail
,
1971 qp
->s_head
, qp
->s_tail
, qp
->s_cur
,
1972 qp
->s_acked
, qp
->s_last
);
1977 * rvt_post_one_wr - post one RC, UC, or UD send work request
1978 * @qp: the QP to post on
1979 * @wr: the work request to send
1981 static int rvt_post_one_wr(struct rvt_qp
*qp
,
1982 const struct ib_send_wr
*wr
,
1985 struct rvt_swqe
*wqe
;
1990 struct rvt_lkey_table
*rkt
;
1992 struct rvt_dev_info
*rdi
= ib_to_rvt(qp
->ibqp
.device
);
1997 int local_ops_delayed
= 0;
1999 BUILD_BUG_ON(IB_QPT_MAX
>= (sizeof(u32
) * BITS_PER_BYTE
));
2001 /* IB spec says that num_sge == 0 is OK. */
2002 if (unlikely(wr
->num_sge
> qp
->s_max_sge
))
2005 ret
= rvt_qp_valid_operation(qp
, rdi
->post_parms
, wr
);
2011 * Local operations include fast register and local invalidate.
2012 * Fast register needs to be processed immediately because the
2013 * registered lkey may be used by following work requests and the
2014 * lkey needs to be valid at the time those requests are posted.
2015 * Local invalidate can be processed immediately if fencing is
2016 * not required and no previous local invalidate ops are pending.
2017 * Signaled local operations that have been processed immediately
2018 * need to have requests with "completion only" flags set posted
2019 * to the send queue in order to generate completions.
2021 if ((rdi
->post_parms
[wr
->opcode
].flags
& RVT_OPERATION_LOCAL
)) {
2022 switch (wr
->opcode
) {
2024 ret
= rvt_fast_reg_mr(qp
,
2027 reg_wr(wr
)->access
);
2028 if (ret
|| !(wr
->send_flags
& IB_SEND_SIGNALED
))
2031 case IB_WR_LOCAL_INV
:
2032 if ((wr
->send_flags
& IB_SEND_FENCE
) ||
2033 atomic_read(&qp
->local_ops_pending
)) {
2034 local_ops_delayed
= 1;
2036 ret
= rvt_invalidate_rkey(
2037 qp
, wr
->ex
.invalidate_rkey
);
2038 if (ret
|| !(wr
->send_flags
& IB_SEND_SIGNALED
))
2047 reserved_op
= rdi
->post_parms
[wr
->opcode
].flags
&
2048 RVT_OPERATION_USE_RESERVE
;
2049 /* check for avail */
2050 ret
= rvt_qp_is_avail(qp
, rdi
, reserved_op
);
2053 next
= qp
->s_head
+ 1;
2054 if (next
>= qp
->s_size
)
2057 rkt
= &rdi
->lkey_table
;
2058 pd
= ibpd_to_rvtpd(qp
->ibqp
.pd
);
2059 wqe
= rvt_get_swqe_ptr(qp
, qp
->s_head
);
2061 /* cplen has length from above */
2062 memcpy(&wqe
->wr
, wr
, cplen
);
2067 struct rvt_sge
*last_sge
= NULL
;
2069 acc
= wr
->opcode
>= IB_WR_RDMA_READ
?
2070 IB_ACCESS_LOCAL_WRITE
: 0;
2071 for (i
= 0; i
< wr
->num_sge
; i
++) {
2072 u32 length
= wr
->sg_list
[i
].length
;
2076 ret
= rvt_lkey_ok(rkt
, pd
, &wqe
->sg_list
[j
], last_sge
,
2077 &wr
->sg_list
[i
], acc
);
2078 if (unlikely(ret
< 0))
2079 goto bail_inval_free
;
2080 wqe
->length
+= length
;
2082 last_sge
= &wqe
->sg_list
[j
];
2085 wqe
->wr
.num_sge
= j
;
2089 * Calculate and set SWQE PSN values prior to handing it off
2090 * to the driver's check routine. This give the driver the
2091 * opportunity to adjust PSN values based on internal checks.
2093 log_pmtu
= qp
->log_pmtu
;
2094 if (qp
->allowed_ops
== IB_OPCODE_UD
) {
2095 struct rvt_ah
*ah
= rvt_get_swqe_ah(wqe
);
2097 log_pmtu
= ah
->log_pmtu
;
2098 rdma_copy_ah_attr(wqe
->ud_wr
.attr
, &ah
->attr
);
2101 if (rdi
->post_parms
[wr
->opcode
].flags
& RVT_OPERATION_LOCAL
) {
2102 if (local_ops_delayed
)
2103 atomic_inc(&qp
->local_ops_pending
);
2105 wqe
->wr
.send_flags
|= RVT_SEND_COMPLETION_ONLY
;
2110 wqe
->ssn
= qp
->s_ssn
++;
2111 wqe
->psn
= qp
->s_next_psn
;
2112 wqe
->lpsn
= wqe
->psn
+
2114 ((wqe
->length
- 1) >> log_pmtu
) :
2118 /* general part of wqe valid - allow for driver checks */
2119 if (rdi
->driver_f
.setup_wqe
) {
2120 ret
= rdi
->driver_f
.setup_wqe(qp
, wqe
, call_send
);
2122 goto bail_inval_free_ref
;
2125 if (!(rdi
->post_parms
[wr
->opcode
].flags
& RVT_OPERATION_LOCAL
))
2126 qp
->s_next_psn
= wqe
->lpsn
+ 1;
2128 if (unlikely(reserved_op
)) {
2129 wqe
->wr
.send_flags
|= RVT_SEND_RESERVE_USED
;
2130 rvt_qp_wqe_reserve(qp
, wqe
);
2132 wqe
->wr
.send_flags
&= ~RVT_SEND_RESERVE_USED
;
2135 trace_rvt_post_one_wr(qp
, wqe
, wr
->num_sge
);
2136 smp_wmb(); /* see request builders */
2141 bail_inval_free_ref
:
2142 if (qp
->allowed_ops
== IB_OPCODE_UD
)
2143 rdma_destroy_ah_attr(wqe
->ud_wr
.attr
);
2145 /* release mr holds */
2147 struct rvt_sge
*sge
= &wqe
->sg_list
[--j
];
2149 rvt_put_mr(sge
->mr
);
2155 * rvt_post_send - post a send on a QP
2156 * @ibqp: the QP to post the send on
2157 * @wr: the list of work requests to post
2158 * @bad_wr: the first bad WR is put here
2160 * This may be called from interrupt context.
2162 * Return: 0 on success else errno
2164 int rvt_post_send(struct ib_qp
*ibqp
, const struct ib_send_wr
*wr
,
2165 const struct ib_send_wr
**bad_wr
)
2167 struct rvt_qp
*qp
= ibqp_to_rvtqp(ibqp
);
2168 struct rvt_dev_info
*rdi
= ib_to_rvt(ibqp
->device
);
2169 unsigned long flags
= 0;
2174 spin_lock_irqsave(&qp
->s_hlock
, flags
);
2177 * Ensure QP state is such that we can send. If not bail out early,
2178 * there is no need to do this every time we post a send.
2180 if (unlikely(!(ib_rvt_state_ops
[qp
->state
] & RVT_POST_SEND_OK
))) {
2181 spin_unlock_irqrestore(&qp
->s_hlock
, flags
);
2186 * If the send queue is empty, and we only have a single WR then just go
2187 * ahead and kick the send engine into gear. Otherwise we will always
2188 * just schedule the send to happen later.
2190 call_send
= qp
->s_head
== READ_ONCE(qp
->s_last
) && !wr
->next
;
2192 for (; wr
; wr
= wr
->next
) {
2193 err
= rvt_post_one_wr(qp
, wr
, &call_send
);
2194 if (unlikely(err
)) {
2201 spin_unlock_irqrestore(&qp
->s_hlock
, flags
);
2204 * Only call do_send if there is exactly one packet, and the
2205 * driver said it was ok.
2207 if (nreq
== 1 && call_send
)
2208 rdi
->driver_f
.do_send(qp
);
2210 rdi
->driver_f
.schedule_send_no_lock(qp
);
2216 * rvt_post_srq_receive - post a receive on a shared receive queue
2217 * @ibsrq: the SRQ to post the receive on
2218 * @wr: the list of work requests to post
2219 * @bad_wr: A pointer to the first WR to cause a problem is put here
2221 * This may be called from interrupt context.
2223 * Return: 0 on success else errno
2225 int rvt_post_srq_recv(struct ib_srq
*ibsrq
, const struct ib_recv_wr
*wr
,
2226 const struct ib_recv_wr
**bad_wr
)
2228 struct rvt_srq
*srq
= ibsrq_to_rvtsrq(ibsrq
);
2229 struct rvt_krwq
*wq
;
2230 unsigned long flags
;
2232 for (; wr
; wr
= wr
->next
) {
2233 struct rvt_rwqe
*wqe
;
2237 if ((unsigned)wr
->num_sge
> srq
->rq
.max_sge
) {
2242 spin_lock_irqsave(&srq
->rq
.kwq
->p_lock
, flags
);
2244 next
= wq
->head
+ 1;
2245 if (next
>= srq
->rq
.size
)
2247 if (next
== READ_ONCE(wq
->tail
)) {
2248 spin_unlock_irqrestore(&srq
->rq
.kwq
->p_lock
, flags
);
2253 wqe
= rvt_get_rwqe_ptr(&srq
->rq
, wq
->head
);
2254 wqe
->wr_id
= wr
->wr_id
;
2255 wqe
->num_sge
= wr
->num_sge
;
2256 for (i
= 0; i
< wr
->num_sge
; i
++) {
2257 wqe
->sg_list
[i
].addr
= wr
->sg_list
[i
].addr
;
2258 wqe
->sg_list
[i
].length
= wr
->sg_list
[i
].length
;
2259 wqe
->sg_list
[i
].lkey
= wr
->sg_list
[i
].lkey
;
2261 /* Make sure queue entry is written before the head index. */
2262 smp_store_release(&wq
->head
, next
);
2263 spin_unlock_irqrestore(&srq
->rq
.kwq
->p_lock
, flags
);
2269 * rvt used the internal kernel struct as part of its ABI, for now make sure
2270 * the kernel struct does not change layout. FIXME: rvt should never cast the
2271 * user struct to a kernel struct.
2273 static struct ib_sge
*rvt_cast_sge(struct rvt_wqe_sge
*sge
)
2275 BUILD_BUG_ON(offsetof(struct ib_sge
, addr
) !=
2276 offsetof(struct rvt_wqe_sge
, addr
));
2277 BUILD_BUG_ON(offsetof(struct ib_sge
, length
) !=
2278 offsetof(struct rvt_wqe_sge
, length
));
2279 BUILD_BUG_ON(offsetof(struct ib_sge
, lkey
) !=
2280 offsetof(struct rvt_wqe_sge
, lkey
));
2281 return (struct ib_sge
*)sge
;
2285 * Validate a RWQE and fill in the SGE state.
2288 static int init_sge(struct rvt_qp
*qp
, struct rvt_rwqe
*wqe
)
2292 struct rvt_lkey_table
*rkt
;
2294 struct rvt_sge_state
*ss
;
2295 struct rvt_dev_info
*rdi
= ib_to_rvt(qp
->ibqp
.device
);
2297 rkt
= &rdi
->lkey_table
;
2298 pd
= ibpd_to_rvtpd(qp
->ibqp
.srq
? qp
->ibqp
.srq
->pd
: qp
->ibqp
.pd
);
2300 ss
->sg_list
= qp
->r_sg_list
;
2302 for (i
= j
= 0; i
< wqe
->num_sge
; i
++) {
2303 if (wqe
->sg_list
[i
].length
== 0)
2306 ret
= rvt_lkey_ok(rkt
, pd
, j
? &ss
->sg_list
[j
- 1] : &ss
->sge
,
2307 NULL
, rvt_cast_sge(&wqe
->sg_list
[i
]),
2308 IB_ACCESS_LOCAL_WRITE
);
2309 if (unlikely(ret
<= 0))
2311 qp
->r_len
+= wqe
->sg_list
[i
].length
;
2315 ss
->total_len
= qp
->r_len
;
2320 struct rvt_sge
*sge
= --j
? &ss
->sg_list
[j
- 1] : &ss
->sge
;
2322 rvt_put_mr(sge
->mr
);
2325 memset(&wc
, 0, sizeof(wc
));
2326 wc
.wr_id
= wqe
->wr_id
;
2327 wc
.status
= IB_WC_LOC_PROT_ERR
;
2328 wc
.opcode
= IB_WC_RECV
;
2330 /* Signal solicited completion event. */
2331 rvt_cq_enter(ibcq_to_rvtcq(qp
->ibqp
.recv_cq
), &wc
, 1);
2336 * get_count - count numbers of request work queue entries
2337 * in circular buffer
2338 * @rq: data structure for request queue entry
2339 * @tail: tail indices of the circular buffer
2340 * @head: head indices of the circular buffer
2342 * Return - total number of entries in the circular buffer
2344 static u32
get_count(struct rvt_rq
*rq
, u32 tail
, u32 head
)
2350 if (count
>= rq
->size
)
2353 count
+= rq
->size
- tail
;
2361 * get_rvt_head - get head indices of the circular buffer
2362 * @rq: data structure for request queue entry
2365 * Return - head index value
2367 static inline u32
get_rvt_head(struct rvt_rq
*rq
, void *ip
)
2372 head
= RDMA_READ_UAPI_ATOMIC(rq
->wq
->head
);
2374 head
= rq
->kwq
->head
;
2380 * rvt_get_rwqe - copy the next RWQE into the QP's RWQE
2382 * @wr_id_only: update qp->r_wr_id only, not qp->r_sge
2384 * Return -1 if there is a local error, 0 if no RWQE is available,
2385 * otherwise return 1.
2387 * Can be called from interrupt level.
2389 int rvt_get_rwqe(struct rvt_qp
*qp
, bool wr_id_only
)
2391 unsigned long flags
;
2393 struct rvt_krwq
*kwq
= NULL
;
2395 struct rvt_srq
*srq
;
2396 struct rvt_rwqe
*wqe
;
2397 void (*handler
)(struct ib_event
*, void *);
2404 srq
= ibsrq_to_rvtsrq(qp
->ibqp
.srq
);
2405 handler
= srq
->ibsrq
.event_handler
;
2415 spin_lock_irqsave(&rq
->kwq
->c_lock
, flags
);
2416 if (!(ib_rvt_state_ops
[qp
->state
] & RVT_PROCESS_RECV_OK
)) {
2423 tail
= RDMA_READ_UAPI_ATOMIC(wq
->tail
);
2428 /* Validate tail before using it since it is user writable. */
2429 if (tail
>= rq
->size
)
2432 if (kwq
->count
< RVT_RWQ_COUNT_THRESHOLD
) {
2433 head
= get_rvt_head(rq
, ip
);
2434 kwq
->count
= get_count(rq
, tail
, head
);
2436 if (unlikely(kwq
->count
== 0)) {
2440 /* Make sure entry is read after the count is read. */
2442 wqe
= rvt_get_rwqe_ptr(rq
, tail
);
2444 * Even though we update the tail index in memory, the verbs
2445 * consumer is not supposed to post more entries until a
2446 * completion is generated.
2448 if (++tail
>= rq
->size
)
2451 RDMA_WRITE_UAPI_ATOMIC(wq
->tail
, tail
);
2454 if (!wr_id_only
&& !init_sge(qp
, wqe
)) {
2458 qp
->r_wr_id
= wqe
->wr_id
;
2462 set_bit(RVT_R_WRID_VALID
, &qp
->r_aflags
);
2465 * Validate head pointer value and compute
2466 * the number of remaining WQEs.
2468 if (kwq
->count
< srq
->limit
) {
2469 kwq
->count
= get_count(rq
, tail
, get_rvt_head(rq
, ip
));
2470 if (kwq
->count
< srq
->limit
) {
2474 spin_unlock_irqrestore(&rq
->kwq
->c_lock
, flags
);
2475 ev
.device
= qp
->ibqp
.device
;
2476 ev
.element
.srq
= qp
->ibqp
.srq
;
2477 ev
.event
= IB_EVENT_SRQ_LIMIT_REACHED
;
2478 handler(&ev
, srq
->ibsrq
.srq_context
);
2484 spin_unlock_irqrestore(&rq
->kwq
->c_lock
, flags
);
2488 EXPORT_SYMBOL(rvt_get_rwqe
);
2491 * qp_comm_est - handle trap with QP established
2494 void rvt_comm_est(struct rvt_qp
*qp
)
2496 qp
->r_flags
|= RVT_R_COMM_EST
;
2497 if (qp
->ibqp
.event_handler
) {
2500 ev
.device
= qp
->ibqp
.device
;
2501 ev
.element
.qp
= &qp
->ibqp
;
2502 ev
.event
= IB_EVENT_COMM_EST
;
2503 qp
->ibqp
.event_handler(&ev
, qp
->ibqp
.qp_context
);
2506 EXPORT_SYMBOL(rvt_comm_est
);
2508 void rvt_rc_error(struct rvt_qp
*qp
, enum ib_wc_status err
)
2510 unsigned long flags
;
2513 spin_lock_irqsave(&qp
->s_lock
, flags
);
2514 lastwqe
= rvt_error_qp(qp
, err
);
2515 spin_unlock_irqrestore(&qp
->s_lock
, flags
);
2520 ev
.device
= qp
->ibqp
.device
;
2521 ev
.element
.qp
= &qp
->ibqp
;
2522 ev
.event
= IB_EVENT_QP_LAST_WQE_REACHED
;
2523 qp
->ibqp
.event_handler(&ev
, qp
->ibqp
.qp_context
);
2526 EXPORT_SYMBOL(rvt_rc_error
);
2529 * rvt_rnr_tbl_to_usec - return index into ib_rvt_rnr_table
2530 * @index - the index
2531 * return usec from an index into ib_rvt_rnr_table
2533 unsigned long rvt_rnr_tbl_to_usec(u32 index
)
2535 return ib_rvt_rnr_table
[(index
& IB_AETH_CREDIT_MASK
)];
2537 EXPORT_SYMBOL(rvt_rnr_tbl_to_usec
);
2539 static inline unsigned long rvt_aeth_to_usec(u32 aeth
)
2541 return ib_rvt_rnr_table
[(aeth
>> IB_AETH_CREDIT_SHIFT
) &
2542 IB_AETH_CREDIT_MASK
];
2546 * rvt_add_retry_timer_ext - add/start a retry timer
2548 * @shift - timeout shift to wait for multiple packets
2549 * add a retry timer on the QP
2551 void rvt_add_retry_timer_ext(struct rvt_qp
*qp
, u8 shift
)
2553 struct ib_qp
*ibqp
= &qp
->ibqp
;
2554 struct rvt_dev_info
*rdi
= ib_to_rvt(ibqp
->device
);
2556 lockdep_assert_held(&qp
->s_lock
);
2557 qp
->s_flags
|= RVT_S_TIMER
;
2558 /* 4.096 usec. * (1 << qp->timeout) */
2559 qp
->s_timer
.expires
= jiffies
+ rdi
->busy_jiffies
+
2560 (qp
->timeout_jiffies
<< shift
);
2561 add_timer(&qp
->s_timer
);
2563 EXPORT_SYMBOL(rvt_add_retry_timer_ext
);
2566 * rvt_add_rnr_timer - add/start an rnr timer on the QP
2568 * @aeth: aeth of RNR timeout, simulated aeth for loopback
2570 void rvt_add_rnr_timer(struct rvt_qp
*qp
, u32 aeth
)
2574 lockdep_assert_held(&qp
->s_lock
);
2575 qp
->s_flags
|= RVT_S_WAIT_RNR
;
2576 to
= rvt_aeth_to_usec(aeth
);
2577 trace_rvt_rnrnak_add(qp
, to
);
2578 hrtimer_start(&qp
->s_rnr_timer
,
2579 ns_to_ktime(1000 * to
), HRTIMER_MODE_REL_PINNED
);
2581 EXPORT_SYMBOL(rvt_add_rnr_timer
);
2584 * rvt_stop_rc_timers - stop all timers
2586 * stop any pending timers
2588 void rvt_stop_rc_timers(struct rvt_qp
*qp
)
2590 lockdep_assert_held(&qp
->s_lock
);
2591 /* Remove QP from all timers */
2592 if (qp
->s_flags
& (RVT_S_TIMER
| RVT_S_WAIT_RNR
)) {
2593 qp
->s_flags
&= ~(RVT_S_TIMER
| RVT_S_WAIT_RNR
);
2594 del_timer(&qp
->s_timer
);
2595 hrtimer_try_to_cancel(&qp
->s_rnr_timer
);
2598 EXPORT_SYMBOL(rvt_stop_rc_timers
);
2601 * rvt_stop_rnr_timer - stop an rnr timer
2604 * stop an rnr timer and return if the timer
2607 static void rvt_stop_rnr_timer(struct rvt_qp
*qp
)
2609 lockdep_assert_held(&qp
->s_lock
);
2610 /* Remove QP from rnr timer */
2611 if (qp
->s_flags
& RVT_S_WAIT_RNR
) {
2612 qp
->s_flags
&= ~RVT_S_WAIT_RNR
;
2613 trace_rvt_rnrnak_stop(qp
, 0);
2618 * rvt_del_timers_sync - wait for any timeout routines to exit
2621 void rvt_del_timers_sync(struct rvt_qp
*qp
)
2623 del_timer_sync(&qp
->s_timer
);
2624 hrtimer_cancel(&qp
->s_rnr_timer
);
2626 EXPORT_SYMBOL(rvt_del_timers_sync
);
2629 * This is called from s_timer for missing responses.
2631 static void rvt_rc_timeout(struct timer_list
*t
)
2633 struct rvt_qp
*qp
= from_timer(qp
, t
, s_timer
);
2634 struct rvt_dev_info
*rdi
= ib_to_rvt(qp
->ibqp
.device
);
2635 unsigned long flags
;
2637 spin_lock_irqsave(&qp
->r_lock
, flags
);
2638 spin_lock(&qp
->s_lock
);
2639 if (qp
->s_flags
& RVT_S_TIMER
) {
2640 struct rvt_ibport
*rvp
= rdi
->ports
[qp
->port_num
- 1];
2642 qp
->s_flags
&= ~RVT_S_TIMER
;
2643 rvp
->n_rc_timeouts
++;
2644 del_timer(&qp
->s_timer
);
2645 trace_rvt_rc_timeout(qp
, qp
->s_last_psn
+ 1);
2646 if (rdi
->driver_f
.notify_restart_rc
)
2647 rdi
->driver_f
.notify_restart_rc(qp
,
2650 rdi
->driver_f
.schedule_send(qp
);
2652 spin_unlock(&qp
->s_lock
);
2653 spin_unlock_irqrestore(&qp
->r_lock
, flags
);
2657 * This is called from s_timer for RNR timeouts.
2659 enum hrtimer_restart
rvt_rc_rnr_retry(struct hrtimer
*t
)
2661 struct rvt_qp
*qp
= container_of(t
, struct rvt_qp
, s_rnr_timer
);
2662 struct rvt_dev_info
*rdi
= ib_to_rvt(qp
->ibqp
.device
);
2663 unsigned long flags
;
2665 spin_lock_irqsave(&qp
->s_lock
, flags
);
2666 rvt_stop_rnr_timer(qp
);
2667 trace_rvt_rnrnak_timeout(qp
, 0);
2668 rdi
->driver_f
.schedule_send(qp
);
2669 spin_unlock_irqrestore(&qp
->s_lock
, flags
);
2670 return HRTIMER_NORESTART
;
2672 EXPORT_SYMBOL(rvt_rc_rnr_retry
);
2675 * rvt_qp_iter_init - initial for QP iteration
2678 * @cb: user-defined callback
2680 * This returns an iterator suitable for iterating QPs
2683 * The @cb is a user-defined callback and @v is a 64-bit
2684 * value passed to and relevant for processing in the
2685 * @cb. An example use case would be to alter QP processing
2686 * based on criteria not part of the rvt_qp.
2688 * Use cases that require memory allocation to succeed
2689 * must preallocate appropriately.
2691 * Return: a pointer to an rvt_qp_iter or NULL
2693 struct rvt_qp_iter
*rvt_qp_iter_init(struct rvt_dev_info
*rdi
,
2695 void (*cb
)(struct rvt_qp
*qp
, u64 v
))
2697 struct rvt_qp_iter
*i
;
2699 i
= kzalloc(sizeof(*i
), GFP_KERNEL
);
2704 /* number of special QPs (SMI/GSI) for device */
2705 i
->specials
= rdi
->ibdev
.phys_port_cnt
* 2;
2711 EXPORT_SYMBOL(rvt_qp_iter_init
);
2714 * rvt_qp_iter_next - return the next QP in iter
2715 * @iter: the iterator
2717 * Fine grained QP iterator suitable for use
2718 * with debugfs seq_file mechanisms.
2720 * Updates iter->qp with the current QP when the return
2723 * Return: 0 - iter->qp is valid 1 - no more QPs
2725 int rvt_qp_iter_next(struct rvt_qp_iter
*iter
)
2730 struct rvt_qp
*pqp
= iter
->qp
;
2732 struct rvt_dev_info
*rdi
= iter
->rdi
;
2735 * The approach is to consider the special qps
2736 * as additional table entries before the
2737 * real hash table. Since the qp code sets
2738 * the qp->next hash link to NULL, this works just fine.
2740 * iter->specials is 2 * # ports
2742 * n = 0..iter->specials is the special qp indices
2744 * n = iter->specials..rdi->qp_dev->qp_table_size+iter->specials are
2745 * the potential hash bucket entries
2748 for (; n
< rdi
->qp_dev
->qp_table_size
+ iter
->specials
; n
++) {
2750 qp
= rcu_dereference(pqp
->next
);
2752 if (n
< iter
->specials
) {
2753 struct rvt_ibport
*rvp
;
2756 pidx
= n
% rdi
->ibdev
.phys_port_cnt
;
2757 rvp
= rdi
->ports
[pidx
];
2758 qp
= rcu_dereference(rvp
->qp
[n
& 1]);
2760 qp
= rcu_dereference(
2761 rdi
->qp_dev
->qp_table
[
2762 (n
- iter
->specials
)]);
2774 EXPORT_SYMBOL(rvt_qp_iter_next
);
2777 * rvt_qp_iter - iterate all QPs
2779 * @v: a 64-bit value
2782 * This provides a way for iterating all QPs.
2784 * The @cb is a user-defined callback and @v is a 64-bit
2785 * value passed to and relevant for processing in the
2786 * cb. An example use case would be to alter QP processing
2787 * based on criteria not part of the rvt_qp.
2789 * The code has an internal iterator to simplify
2790 * non seq_file use cases.
2792 void rvt_qp_iter(struct rvt_dev_info
*rdi
,
2794 void (*cb
)(struct rvt_qp
*qp
, u64 v
))
2797 struct rvt_qp_iter i
= {
2799 .specials
= rdi
->ibdev
.phys_port_cnt
* 2,
2806 ret
= rvt_qp_iter_next(&i
);
2817 EXPORT_SYMBOL(rvt_qp_iter
);
2820 * This should be called with s_lock held.
2822 void rvt_send_complete(struct rvt_qp
*qp
, struct rvt_swqe
*wqe
,
2823 enum ib_wc_status status
)
2826 struct rvt_dev_info
*rdi
;
2828 if (!(ib_rvt_state_ops
[qp
->state
] & RVT_PROCESS_OR_FLUSH_SEND
))
2830 rdi
= ib_to_rvt(qp
->ibqp
.device
);
2832 old_last
= qp
->s_last
;
2833 trace_rvt_qp_send_completion(qp
, wqe
, old_last
);
2834 last
= rvt_qp_complete_swqe(qp
, wqe
, rdi
->wc_opcode
[wqe
->wr
.opcode
],
2836 if (qp
->s_acked
== old_last
)
2838 if (qp
->s_cur
== old_last
)
2840 if (qp
->s_tail
== old_last
)
2842 if (qp
->state
== IB_QPS_SQD
&& last
== qp
->s_cur
)
2845 EXPORT_SYMBOL(rvt_send_complete
);
2848 * rvt_copy_sge - copy data to SGE memory
2849 * @qp: associated QP
2850 * @ss: the SGE state
2851 * @data: the data to copy
2852 * @length: the length of the data
2853 * @release: boolean to release MR
2854 * @copy_last: do a separate copy of the last 8 bytes
2856 void rvt_copy_sge(struct rvt_qp
*qp
, struct rvt_sge_state
*ss
,
2857 void *data
, u32 length
,
2858 bool release
, bool copy_last
)
2860 struct rvt_sge
*sge
= &ss
->sge
;
2862 bool in_last
= false;
2863 bool cacheless_copy
= false;
2864 struct rvt_dev_info
*rdi
= ib_to_rvt(qp
->ibqp
.device
);
2865 struct rvt_wss
*wss
= rdi
->wss
;
2866 unsigned int sge_copy_mode
= rdi
->dparms
.sge_copy_mode
;
2868 if (sge_copy_mode
== RVT_SGE_COPY_CACHELESS
) {
2869 cacheless_copy
= length
>= PAGE_SIZE
;
2870 } else if (sge_copy_mode
== RVT_SGE_COPY_ADAPTIVE
) {
2871 if (length
>= PAGE_SIZE
) {
2873 * NOTE: this *assumes*:
2874 * o The first vaddr is the dest.
2875 * o If multiple pages, then vaddr is sequential.
2877 wss_insert(wss
, sge
->vaddr
);
2878 if (length
>= (2 * PAGE_SIZE
))
2879 wss_insert(wss
, (sge
->vaddr
+ PAGE_SIZE
));
2881 cacheless_copy
= wss_exceeds_threshold(wss
);
2883 wss_advance_clean_counter(wss
);
2898 u32 len
= rvt_get_sge_length(sge
, length
);
2900 WARN_ON_ONCE(len
== 0);
2901 if (unlikely(in_last
)) {
2902 /* enforce byte transfer ordering */
2903 for (i
= 0; i
< len
; i
++)
2904 ((u8
*)sge
->vaddr
)[i
] = ((u8
*)data
)[i
];
2905 } else if (cacheless_copy
) {
2906 cacheless_memcpy(sge
->vaddr
, data
, len
);
2908 memcpy(sge
->vaddr
, data
, len
);
2910 rvt_update_sge(ss
, len
, release
);
2922 EXPORT_SYMBOL(rvt_copy_sge
);
2924 static enum ib_wc_status
loopback_qp_drop(struct rvt_ibport
*rvp
,
2929 * For RC, the requester would timeout and retry so
2930 * shortcut the timeouts and just signal too many retries.
2932 return sqp
->ibqp
.qp_type
== IB_QPT_RC
?
2933 IB_WC_RETRY_EXC_ERR
: IB_WC_SUCCESS
;
2937 * ruc_loopback - handle UC and RC loopback requests
2938 * @sqp: the sending QP
2940 * This is called from rvt_do_send() to forward a WQE addressed to the same HFI
2941 * Note that although we are single threaded due to the send engine, we still
2942 * have to protect against post_send(). We don't have to worry about
2943 * receive interrupts since this is a connected protocol and all packets
2944 * will pass through here.
2946 void rvt_ruc_loopback(struct rvt_qp
*sqp
)
2948 struct rvt_ibport
*rvp
= NULL
;
2949 struct rvt_dev_info
*rdi
= ib_to_rvt(sqp
->ibqp
.device
);
2951 struct rvt_swqe
*wqe
;
2952 struct rvt_sge
*sge
;
2953 unsigned long flags
;
2957 enum ib_wc_status send_status
;
2960 bool copy_last
= false;
2964 rvp
= rdi
->ports
[sqp
->port_num
- 1];
2967 * Note that we check the responder QP state after
2968 * checking the requester's state.
2971 qp
= rvt_lookup_qpn(ib_to_rvt(sqp
->ibqp
.device
), rvp
,
2974 spin_lock_irqsave(&sqp
->s_lock
, flags
);
2976 /* Return if we are already busy processing a work request. */
2977 if ((sqp
->s_flags
& (RVT_S_BUSY
| RVT_S_ANY_WAIT
)) ||
2978 !(ib_rvt_state_ops
[sqp
->state
] & RVT_PROCESS_OR_FLUSH_SEND
))
2981 sqp
->s_flags
|= RVT_S_BUSY
;
2984 if (sqp
->s_last
== READ_ONCE(sqp
->s_head
))
2986 wqe
= rvt_get_swqe_ptr(sqp
, sqp
->s_last
);
2988 /* Return if it is not OK to start a new work request. */
2989 if (!(ib_rvt_state_ops
[sqp
->state
] & RVT_PROCESS_NEXT_SEND_OK
)) {
2990 if (!(ib_rvt_state_ops
[sqp
->state
] & RVT_FLUSH_SEND
))
2992 /* We are in the error state, flush the work request. */
2993 send_status
= IB_WC_WR_FLUSH_ERR
;
2998 * We can rely on the entry not changing without the s_lock
2999 * being held until we update s_last.
3000 * We increment s_cur to indicate s_last is in progress.
3002 if (sqp
->s_last
== sqp
->s_cur
) {
3003 if (++sqp
->s_cur
>= sqp
->s_size
)
3006 spin_unlock_irqrestore(&sqp
->s_lock
, flags
);
3009 send_status
= loopback_qp_drop(rvp
, sqp
);
3010 goto serr_no_r_lock
;
3012 spin_lock_irqsave(&qp
->r_lock
, flags
);
3013 if (!(ib_rvt_state_ops
[qp
->state
] & RVT_PROCESS_RECV_OK
) ||
3014 qp
->ibqp
.qp_type
!= sqp
->ibqp
.qp_type
) {
3015 send_status
= loopback_qp_drop(rvp
, sqp
);
3019 memset(&wc
, 0, sizeof(wc
));
3020 send_status
= IB_WC_SUCCESS
;
3023 sqp
->s_sge
.sge
= wqe
->sg_list
[0];
3024 sqp
->s_sge
.sg_list
= wqe
->sg_list
+ 1;
3025 sqp
->s_sge
.num_sge
= wqe
->wr
.num_sge
;
3026 sqp
->s_len
= wqe
->length
;
3027 switch (wqe
->wr
.opcode
) {
3031 case IB_WR_LOCAL_INV
:
3032 if (!(wqe
->wr
.send_flags
& RVT_SEND_COMPLETION_ONLY
)) {
3033 if (rvt_invalidate_rkey(sqp
,
3034 wqe
->wr
.ex
.invalidate_rkey
))
3035 send_status
= IB_WC_LOC_PROT_ERR
;
3040 case IB_WR_SEND_WITH_INV
:
3041 case IB_WR_SEND_WITH_IMM
:
3043 ret
= rvt_get_rwqe(qp
, false);
3048 if (wqe
->length
> qp
->r_len
)
3050 switch (wqe
->wr
.opcode
) {
3051 case IB_WR_SEND_WITH_INV
:
3052 if (!rvt_invalidate_rkey(qp
,
3053 wqe
->wr
.ex
.invalidate_rkey
)) {
3054 wc
.wc_flags
= IB_WC_WITH_INVALIDATE
;
3055 wc
.ex
.invalidate_rkey
=
3056 wqe
->wr
.ex
.invalidate_rkey
;
3059 case IB_WR_SEND_WITH_IMM
:
3060 wc
.wc_flags
= IB_WC_WITH_IMM
;
3061 wc
.ex
.imm_data
= wqe
->wr
.ex
.imm_data
;
3068 case IB_WR_RDMA_WRITE_WITH_IMM
:
3069 if (unlikely(!(qp
->qp_access_flags
& IB_ACCESS_REMOTE_WRITE
)))
3071 wc
.wc_flags
= IB_WC_WITH_IMM
;
3072 wc
.ex
.imm_data
= wqe
->wr
.ex
.imm_data
;
3073 ret
= rvt_get_rwqe(qp
, true);
3078 /* skip copy_last set and qp_access_flags recheck */
3080 case IB_WR_RDMA_WRITE
:
3081 copy_last
= rvt_is_user_qp(qp
);
3082 if (unlikely(!(qp
->qp_access_flags
& IB_ACCESS_REMOTE_WRITE
)))
3085 if (wqe
->length
== 0)
3087 if (unlikely(!rvt_rkey_ok(qp
, &qp
->r_sge
.sge
, wqe
->length
,
3088 wqe
->rdma_wr
.remote_addr
,
3090 IB_ACCESS_REMOTE_WRITE
)))
3092 qp
->r_sge
.sg_list
= NULL
;
3093 qp
->r_sge
.num_sge
= 1;
3094 qp
->r_sge
.total_len
= wqe
->length
;
3097 case IB_WR_RDMA_READ
:
3098 if (unlikely(!(qp
->qp_access_flags
& IB_ACCESS_REMOTE_READ
)))
3100 if (unlikely(!rvt_rkey_ok(qp
, &sqp
->s_sge
.sge
, wqe
->length
,
3101 wqe
->rdma_wr
.remote_addr
,
3103 IB_ACCESS_REMOTE_READ
)))
3106 sqp
->s_sge
.sg_list
= NULL
;
3107 sqp
->s_sge
.num_sge
= 1;
3108 qp
->r_sge
.sge
= wqe
->sg_list
[0];
3109 qp
->r_sge
.sg_list
= wqe
->sg_list
+ 1;
3110 qp
->r_sge
.num_sge
= wqe
->wr
.num_sge
;
3111 qp
->r_sge
.total_len
= wqe
->length
;
3114 case IB_WR_ATOMIC_CMP_AND_SWP
:
3115 case IB_WR_ATOMIC_FETCH_AND_ADD
:
3116 if (unlikely(!(qp
->qp_access_flags
& IB_ACCESS_REMOTE_ATOMIC
)))
3118 if (unlikely(!rvt_rkey_ok(qp
, &qp
->r_sge
.sge
, sizeof(u64
),
3119 wqe
->atomic_wr
.remote_addr
,
3120 wqe
->atomic_wr
.rkey
,
3121 IB_ACCESS_REMOTE_ATOMIC
)))
3123 /* Perform atomic OP and save result. */
3124 maddr
= (atomic64_t
*)qp
->r_sge
.sge
.vaddr
;
3125 sdata
= wqe
->atomic_wr
.compare_add
;
3126 *(u64
*)sqp
->s_sge
.sge
.vaddr
=
3127 (wqe
->wr
.opcode
== IB_WR_ATOMIC_FETCH_AND_ADD
) ?
3128 (u64
)atomic64_add_return(sdata
, maddr
) - sdata
:
3129 (u64
)cmpxchg((u64
*)qp
->r_sge
.sge
.vaddr
,
3130 sdata
, wqe
->atomic_wr
.swap
);
3131 rvt_put_mr(qp
->r_sge
.sge
.mr
);
3132 qp
->r_sge
.num_sge
= 0;
3136 send_status
= IB_WC_LOC_QP_OP_ERR
;
3140 sge
= &sqp
->s_sge
.sge
;
3141 while (sqp
->s_len
) {
3142 u32 len
= rvt_get_sge_length(sge
, sqp
->s_len
);
3144 WARN_ON_ONCE(len
== 0);
3145 rvt_copy_sge(qp
, &qp
->r_sge
, sge
->vaddr
,
3146 len
, release
, copy_last
);
3147 rvt_update_sge(&sqp
->s_sge
, len
, !release
);
3151 rvt_put_ss(&qp
->r_sge
);
3153 if (!test_and_clear_bit(RVT_R_WRID_VALID
, &qp
->r_aflags
))
3156 if (wqe
->wr
.opcode
== IB_WR_RDMA_WRITE_WITH_IMM
)
3157 wc
.opcode
= IB_WC_RECV_RDMA_WITH_IMM
;
3159 wc
.opcode
= IB_WC_RECV
;
3160 wc
.wr_id
= qp
->r_wr_id
;
3161 wc
.status
= IB_WC_SUCCESS
;
3162 wc
.byte_len
= wqe
->length
;
3164 wc
.src_qp
= qp
->remote_qpn
;
3165 wc
.slid
= rdma_ah_get_dlid(&qp
->remote_ah_attr
) & U16_MAX
;
3166 wc
.sl
= rdma_ah_get_sl(&qp
->remote_ah_attr
);
3168 /* Signal completion event if the solicited bit is set. */
3169 rvt_recv_cq(qp
, &wc
, wqe
->wr
.send_flags
& IB_SEND_SOLICITED
);
3172 spin_unlock_irqrestore(&qp
->r_lock
, flags
);
3173 spin_lock_irqsave(&sqp
->s_lock
, flags
);
3176 sqp
->s_rnr_retry
= sqp
->s_rnr_retry_cnt
;
3177 rvt_send_complete(sqp
, wqe
, send_status
);
3179 atomic_dec(&sqp
->local_ops_pending
);
3185 /* Handle RNR NAK */
3186 if (qp
->ibqp
.qp_type
== IB_QPT_UC
)
3190 * Note: we don't need the s_lock held since the BUSY flag
3191 * makes this single threaded.
3193 if (sqp
->s_rnr_retry
== 0) {
3194 send_status
= IB_WC_RNR_RETRY_EXC_ERR
;
3197 if (sqp
->s_rnr_retry_cnt
< 7)
3199 spin_unlock_irqrestore(&qp
->r_lock
, flags
);
3200 spin_lock_irqsave(&sqp
->s_lock
, flags
);
3201 if (!(ib_rvt_state_ops
[sqp
->state
] & RVT_PROCESS_RECV_OK
))
3203 rvt_add_rnr_timer(sqp
, qp
->r_min_rnr_timer
<<
3204 IB_AETH_CREDIT_SHIFT
);
3208 send_status
= IB_WC_REM_OP_ERR
;
3209 wc
.status
= IB_WC_LOC_QP_OP_ERR
;
3214 sqp
->ibqp
.qp_type
== IB_QPT_RC
?
3215 IB_WC_REM_INV_REQ_ERR
:
3217 wc
.status
= IB_WC_LOC_QP_OP_ERR
;
3221 send_status
= IB_WC_REM_ACCESS_ERR
;
3222 wc
.status
= IB_WC_LOC_PROT_ERR
;
3224 /* responder goes to error state */
3225 rvt_rc_error(qp
, wc
.status
);
3228 spin_unlock_irqrestore(&qp
->r_lock
, flags
);
3230 spin_lock_irqsave(&sqp
->s_lock
, flags
);
3231 rvt_send_complete(sqp
, wqe
, send_status
);
3232 if (sqp
->ibqp
.qp_type
== IB_QPT_RC
) {
3233 int lastwqe
= rvt_error_qp(sqp
, IB_WC_WR_FLUSH_ERR
);
3235 sqp
->s_flags
&= ~RVT_S_BUSY
;
3236 spin_unlock_irqrestore(&sqp
->s_lock
, flags
);
3240 ev
.device
= sqp
->ibqp
.device
;
3241 ev
.element
.qp
= &sqp
->ibqp
;
3242 ev
.event
= IB_EVENT_QP_LAST_WQE_REACHED
;
3243 sqp
->ibqp
.event_handler(&ev
, sqp
->ibqp
.qp_context
);
3248 sqp
->s_flags
&= ~RVT_S_BUSY
;
3250 spin_unlock_irqrestore(&sqp
->s_lock
, flags
);
3254 EXPORT_SYMBOL(rvt_ruc_loopback
);