2 * Copyright(c) 2016 - 2020 Intel Corporation.
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions
24 * - Redistributions of source code must retain the above copyright
25 * notice, this list of conditions and the following disclaimer.
26 * - Redistributions in binary form must reproduce the above copyright
27 * notice, this list of conditions and the following disclaimer in
28 * the documentation and/or other materials provided with the
30 * - Neither the name of Intel Corporation nor the names of its
31 * contributors may be used to endorse or promote products derived
32 * from this software without specific prior written permission.
34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
48 #include <linux/hash.h>
49 #include <linux/bitops.h>
50 #include <linux/lockdep.h>
51 #include <linux/vmalloc.h>
52 #include <linux/slab.h>
53 #include <rdma/ib_verbs.h>
54 #include <rdma/ib_hdrs.h>
55 #include <rdma/opa_addr.h>
56 #include <rdma/uverbs_ioctl.h>
61 #define RVT_RWQ_COUNT_THRESHOLD 16
63 static void rvt_rc_timeout(struct timer_list
*t
);
64 static void rvt_reset_qp(struct rvt_dev_info
*rdi
, struct rvt_qp
*qp
,
65 enum ib_qp_type type
);
68 * Convert the AETH RNR timeout code into the number of microseconds.
70 static const u32 ib_rvt_rnr_table
[32] = {
71 655360, /* 00: 655.36 */
91 10240, /* 14: 10.24 */
92 15360, /* 15: 15.36 */
93 20480, /* 16: 20.48 */
94 30720, /* 17: 30.72 */
95 40960, /* 18: 40.96 */
96 61440, /* 19: 61.44 */
97 81920, /* 1A: 81.92 */
98 122880, /* 1B: 122.88 */
99 163840, /* 1C: 163.84 */
100 245760, /* 1D: 245.76 */
101 327680, /* 1E: 327.68 */
102 491520 /* 1F: 491.52 */
106 * Note that it is OK to post send work requests in the SQE and ERR
107 * states; rvt_do_send() will process them and generate error
108 * completions as per IB 1.2 C10-96.
110 const int ib_rvt_state_ops
[IB_QPS_ERR
+ 1] = {
112 [IB_QPS_INIT
] = RVT_POST_RECV_OK
,
113 [IB_QPS_RTR
] = RVT_POST_RECV_OK
| RVT_PROCESS_RECV_OK
,
114 [IB_QPS_RTS
] = RVT_POST_RECV_OK
| RVT_PROCESS_RECV_OK
|
115 RVT_POST_SEND_OK
| RVT_PROCESS_SEND_OK
|
116 RVT_PROCESS_NEXT_SEND_OK
,
117 [IB_QPS_SQD
] = RVT_POST_RECV_OK
| RVT_PROCESS_RECV_OK
|
118 RVT_POST_SEND_OK
| RVT_PROCESS_SEND_OK
,
119 [IB_QPS_SQE
] = RVT_POST_RECV_OK
| RVT_PROCESS_RECV_OK
|
120 RVT_POST_SEND_OK
| RVT_FLUSH_SEND
,
121 [IB_QPS_ERR
] = RVT_POST_RECV_OK
| RVT_FLUSH_RECV
|
122 RVT_POST_SEND_OK
| RVT_FLUSH_SEND
,
124 EXPORT_SYMBOL(ib_rvt_state_ops
);
126 /* platform specific: return the last level cache (llc) size, in KiB */
127 static int rvt_wss_llc_size(void)
129 /* assume that the boot CPU value is universal for all CPUs */
130 return boot_cpu_data
.x86_cache_size
;
133 /* platform specific: cacheless copy */
134 static void cacheless_memcpy(void *dst
, void *src
, size_t n
)
137 * Use the only available X64 cacheless copy. Add a __user cast
138 * to quiet sparse. The src agument is already in the kernel so
139 * there are no security issues. The extra fault recovery machinery
142 __copy_user_nocache(dst
, (void __user
*)src
, n
, 0);
145 void rvt_wss_exit(struct rvt_dev_info
*rdi
)
147 struct rvt_wss
*wss
= rdi
->wss
;
152 /* coded to handle partially initialized and repeat callers */
160 * rvt_wss_init - Init wss data structures
162 * Return: 0 on success
164 int rvt_wss_init(struct rvt_dev_info
*rdi
)
166 unsigned int sge_copy_mode
= rdi
->dparms
.sge_copy_mode
;
167 unsigned int wss_threshold
= rdi
->dparms
.wss_threshold
;
168 unsigned int wss_clean_period
= rdi
->dparms
.wss_clean_period
;
174 int node
= rdi
->dparms
.node
;
176 if (sge_copy_mode
!= RVT_SGE_COPY_ADAPTIVE
) {
181 rdi
->wss
= kzalloc_node(sizeof(*rdi
->wss
), GFP_KERNEL
, node
);
186 /* check for a valid percent range - default to 80 if none or invalid */
187 if (wss_threshold
< 1 || wss_threshold
> 100)
190 /* reject a wildly large period */
191 if (wss_clean_period
> 1000000)
192 wss_clean_period
= 256;
194 /* reject a zero period */
195 if (wss_clean_period
== 0)
196 wss_clean_period
= 1;
199 * Calculate the table size - the next power of 2 larger than the
200 * LLC size. LLC size is in KiB.
202 llc_size
= rvt_wss_llc_size() * 1024;
203 table_size
= roundup_pow_of_two(llc_size
);
205 /* one bit per page in rounded up table */
206 llc_bits
= llc_size
/ PAGE_SIZE
;
207 table_bits
= table_size
/ PAGE_SIZE
;
208 wss
->pages_mask
= table_bits
- 1;
209 wss
->num_entries
= table_bits
/ BITS_PER_LONG
;
211 wss
->threshold
= (llc_bits
* wss_threshold
) / 100;
212 if (wss
->threshold
== 0)
215 wss
->clean_period
= wss_clean_period
;
216 atomic_set(&wss
->clean_counter
, wss_clean_period
);
218 wss
->entries
= kcalloc_node(wss
->num_entries
, sizeof(*wss
->entries
),
229 * Advance the clean counter. When the clean period has expired,
232 * This is implemented in atomics to avoid locking. Because multiple
233 * variables are involved, it can be racy which can lead to slightly
234 * inaccurate information. Since this is only a heuristic, this is
235 * OK. Any innaccuracies will clean themselves out as the counter
236 * advances. That said, it is unlikely the entry clean operation will
237 * race - the next possible racer will not start until the next clean
240 * The clean counter is implemented as a decrement to zero. When zero
241 * is reached an entry is cleaned.
243 static void wss_advance_clean_counter(struct rvt_wss
*wss
)
249 /* become the cleaner if we decrement the counter to zero */
250 if (atomic_dec_and_test(&wss
->clean_counter
)) {
252 * Set, not add, the clean period. This avoids an issue
253 * where the counter could decrement below the clean period.
254 * Doing a set can result in lost decrements, slowing the
255 * clean advance. Since this a heuristic, this possible
258 * An alternative is to loop, advancing the counter by a
259 * clean period until the result is > 0. However, this could
260 * lead to several threads keeping another in the clean loop.
261 * This could be mitigated by limiting the number of times
262 * we stay in the loop.
264 atomic_set(&wss
->clean_counter
, wss
->clean_period
);
267 * Uniquely grab the entry to clean and move to next.
268 * The current entry is always the lower bits of
269 * wss.clean_entry. The table size, wss.num_entries,
270 * is always a power-of-2.
272 entry
= (atomic_inc_return(&wss
->clean_entry
) - 1)
273 & (wss
->num_entries
- 1);
275 /* clear the entry and count the bits */
276 bits
= xchg(&wss
->entries
[entry
], 0);
277 weight
= hweight64((u64
)bits
);
278 /* only adjust the contended total count if needed */
280 atomic_sub(weight
, &wss
->total_count
);
285 * Insert the given address into the working set array.
287 static void wss_insert(struct rvt_wss
*wss
, void *address
)
289 u32 page
= ((unsigned long)address
>> PAGE_SHIFT
) & wss
->pages_mask
;
290 u32 entry
= page
/ BITS_PER_LONG
; /* assumes this ends up a shift */
291 u32 nr
= page
& (BITS_PER_LONG
- 1);
293 if (!test_and_set_bit(nr
, &wss
->entries
[entry
]))
294 atomic_inc(&wss
->total_count
);
296 wss_advance_clean_counter(wss
);
300 * Is the working set larger than the threshold?
302 static inline bool wss_exceeds_threshold(struct rvt_wss
*wss
)
304 return atomic_read(&wss
->total_count
) >= wss
->threshold
;
307 static void get_map_page(struct rvt_qpn_table
*qpt
,
308 struct rvt_qpn_map
*map
)
310 unsigned long page
= get_zeroed_page(GFP_KERNEL
);
313 * Free the page if someone raced with us installing it.
316 spin_lock(&qpt
->lock
);
320 map
->page
= (void *)page
;
321 spin_unlock(&qpt
->lock
);
325 * init_qpn_table - initialize the QP number table for a device
326 * @qpt: the QPN table
328 static int init_qpn_table(struct rvt_dev_info
*rdi
, struct rvt_qpn_table
*qpt
)
331 struct rvt_qpn_map
*map
;
334 if (!(rdi
->dparms
.qpn_res_end
>= rdi
->dparms
.qpn_res_start
))
337 spin_lock_init(&qpt
->lock
);
339 qpt
->last
= rdi
->dparms
.qpn_start
;
340 qpt
->incr
= rdi
->dparms
.qpn_inc
<< rdi
->dparms
.qos_shift
;
343 * Drivers may want some QPs beyond what we need for verbs let them use
344 * our qpn table. No need for two. Lets go ahead and mark the bitmaps
345 * for those. The reserved range must be *after* the range which verbs
349 /* Figure out number of bit maps needed before reserved range */
350 qpt
->nmaps
= rdi
->dparms
.qpn_res_start
/ RVT_BITS_PER_PAGE
;
352 /* This should always be zero */
353 offset
= rdi
->dparms
.qpn_res_start
& RVT_BITS_PER_PAGE_MASK
;
355 /* Starting with the first reserved bit map */
356 map
= &qpt
->map
[qpt
->nmaps
];
358 rvt_pr_info(rdi
, "Reserving QPNs from 0x%x to 0x%x for non-verbs use\n",
359 rdi
->dparms
.qpn_res_start
, rdi
->dparms
.qpn_res_end
);
360 for (i
= rdi
->dparms
.qpn_res_start
; i
<= rdi
->dparms
.qpn_res_end
; i
++) {
362 get_map_page(qpt
, map
);
368 set_bit(offset
, map
->page
);
370 if (offset
== RVT_BITS_PER_PAGE
) {
381 * free_qpn_table - free the QP number table for a device
382 * @qpt: the QPN table
384 static void free_qpn_table(struct rvt_qpn_table
*qpt
)
388 for (i
= 0; i
< ARRAY_SIZE(qpt
->map
); i
++)
389 free_page((unsigned long)qpt
->map
[i
].page
);
393 * rvt_driver_qp_init - Init driver qp resources
394 * @rdi: rvt dev strucutre
396 * Return: 0 on success
398 int rvt_driver_qp_init(struct rvt_dev_info
*rdi
)
403 if (!rdi
->dparms
.qp_table_size
)
407 * If driver is not doing any QP allocation then make sure it is
408 * providing the necessary QP functions.
410 if (!rdi
->driver_f
.free_all_qps
||
411 !rdi
->driver_f
.qp_priv_alloc
||
412 !rdi
->driver_f
.qp_priv_free
||
413 !rdi
->driver_f
.notify_qp_reset
||
414 !rdi
->driver_f
.notify_restart_rc
)
417 /* allocate parent object */
418 rdi
->qp_dev
= kzalloc_node(sizeof(*rdi
->qp_dev
), GFP_KERNEL
,
423 /* allocate hash table */
424 rdi
->qp_dev
->qp_table_size
= rdi
->dparms
.qp_table_size
;
425 rdi
->qp_dev
->qp_table_bits
= ilog2(rdi
->dparms
.qp_table_size
);
426 rdi
->qp_dev
->qp_table
=
427 kmalloc_array_node(rdi
->qp_dev
->qp_table_size
,
428 sizeof(*rdi
->qp_dev
->qp_table
),
429 GFP_KERNEL
, rdi
->dparms
.node
);
430 if (!rdi
->qp_dev
->qp_table
)
433 for (i
= 0; i
< rdi
->qp_dev
->qp_table_size
; i
++)
434 RCU_INIT_POINTER(rdi
->qp_dev
->qp_table
[i
], NULL
);
436 spin_lock_init(&rdi
->qp_dev
->qpt_lock
);
438 /* initialize qpn map */
439 if (init_qpn_table(rdi
, &rdi
->qp_dev
->qpn_table
))
442 spin_lock_init(&rdi
->n_qps_lock
);
447 kfree(rdi
->qp_dev
->qp_table
);
448 free_qpn_table(&rdi
->qp_dev
->qpn_table
);
457 * rvt_free_qp_cb - callback function to reset a qp
458 * @qp: the qp to reset
461 * This function resets the qp and removes it from the
464 static void rvt_free_qp_cb(struct rvt_qp
*qp
, u64 v
)
466 unsigned int *qp_inuse
= (unsigned int *)v
;
467 struct rvt_dev_info
*rdi
= ib_to_rvt(qp
->ibqp
.device
);
469 /* Reset the qp and remove it from the qp hash list */
470 rvt_reset_qp(rdi
, qp
, qp
->ibqp
.qp_type
);
472 /* Increment the qp_inuse count */
477 * rvt_free_all_qps - check for QPs still in use
478 * @rdi: rvt device info structure
480 * There should not be any QPs still in use.
481 * Free memory for table.
482 * Return the number of QPs still in use.
484 static unsigned rvt_free_all_qps(struct rvt_dev_info
*rdi
)
486 unsigned int qp_inuse
= 0;
488 qp_inuse
+= rvt_mcast_tree_empty(rdi
);
490 rvt_qp_iter(rdi
, (u64
)&qp_inuse
, rvt_free_qp_cb
);
496 * rvt_qp_exit - clean up qps on device exit
497 * @rdi: rvt dev structure
499 * Check for qp leaks and free resources.
501 void rvt_qp_exit(struct rvt_dev_info
*rdi
)
503 u32 qps_inuse
= rvt_free_all_qps(rdi
);
506 rvt_pr_err(rdi
, "QP memory leak! %u still in use\n",
511 kfree(rdi
->qp_dev
->qp_table
);
512 free_qpn_table(&rdi
->qp_dev
->qpn_table
);
516 static inline unsigned mk_qpn(struct rvt_qpn_table
*qpt
,
517 struct rvt_qpn_map
*map
, unsigned off
)
519 return (map
- qpt
->map
) * RVT_BITS_PER_PAGE
+ off
;
523 * alloc_qpn - Allocate the next available qpn or zero/one for QP type
524 * IB_QPT_SMI/IB_QPT_GSI
525 * @rdi: rvt device info structure
526 * @qpt: queue pair number table pointer
527 * @port_num: IB port number, 1 based, comes from core
528 * @exclude_prefix: prefix of special queue pair number being allocated
530 * Return: The queue pair number
532 static int alloc_qpn(struct rvt_dev_info
*rdi
, struct rvt_qpn_table
*qpt
,
533 enum ib_qp_type type
, u8 port_num
, u8 exclude_prefix
)
535 u32 i
, offset
, max_scan
, qpn
;
536 struct rvt_qpn_map
*map
;
538 u32 max_qpn
= exclude_prefix
== RVT_AIP_QP_PREFIX
?
539 RVT_AIP_QPN_MAX
: RVT_QPN_MAX
;
541 if (rdi
->driver_f
.alloc_qpn
)
542 return rdi
->driver_f
.alloc_qpn(rdi
, qpt
, type
, port_num
);
544 if (type
== IB_QPT_SMI
|| type
== IB_QPT_GSI
) {
547 ret
= type
== IB_QPT_GSI
;
548 n
= 1 << (ret
+ 2 * (port_num
- 1));
549 spin_lock(&qpt
->lock
);
554 spin_unlock(&qpt
->lock
);
558 qpn
= qpt
->last
+ qpt
->incr
;
560 qpn
= qpt
->incr
| ((qpt
->last
& 1) ^ 1);
561 /* offset carries bit 0 */
562 offset
= qpn
& RVT_BITS_PER_PAGE_MASK
;
563 map
= &qpt
->map
[qpn
/ RVT_BITS_PER_PAGE
];
564 max_scan
= qpt
->nmaps
- !offset
;
566 if (unlikely(!map
->page
)) {
567 get_map_page(qpt
, map
);
568 if (unlikely(!map
->page
))
572 if (!test_and_set_bit(offset
, map
->page
)) {
579 * This qpn might be bogus if offset >= BITS_PER_PAGE.
580 * That is OK. It gets re-assigned below
582 qpn
= mk_qpn(qpt
, map
, offset
);
583 } while (offset
< RVT_BITS_PER_PAGE
&& qpn
< RVT_QPN_MAX
);
585 * In order to keep the number of pages allocated to a
586 * minimum, we scan the all existing pages before increasing
587 * the size of the bitmap table.
589 if (++i
> max_scan
) {
590 if (qpt
->nmaps
== RVT_QPNMAP_ENTRIES
)
592 map
= &qpt
->map
[qpt
->nmaps
++];
593 /* start at incr with current bit 0 */
594 offset
= qpt
->incr
| (offset
& 1);
595 } else if (map
< &qpt
->map
[qpt
->nmaps
]) {
597 /* start at incr with current bit 0 */
598 offset
= qpt
->incr
| (offset
& 1);
601 /* wrap to first map page, invert bit 0 */
602 offset
= qpt
->incr
| ((offset
& 1) ^ 1);
604 /* there can be no set bits in low-order QoS bits */
605 WARN_ON(rdi
->dparms
.qos_shift
> 1 &&
606 offset
& ((BIT(rdi
->dparms
.qos_shift
- 1) - 1) << 1));
607 qpn
= mk_qpn(qpt
, map
, offset
);
617 * rvt_clear_mr_refs - Drop help mr refs
618 * @qp: rvt qp data structure
619 * @clr_sends: If shoudl clear send side or not
621 static void rvt_clear_mr_refs(struct rvt_qp
*qp
, int clr_sends
)
624 struct rvt_dev_info
*rdi
= ib_to_rvt(qp
->ibqp
.device
);
626 if (test_and_clear_bit(RVT_R_REWIND_SGE
, &qp
->r_aflags
))
627 rvt_put_ss(&qp
->s_rdma_read_sge
);
629 rvt_put_ss(&qp
->r_sge
);
632 while (qp
->s_last
!= qp
->s_head
) {
633 struct rvt_swqe
*wqe
= rvt_get_swqe_ptr(qp
, qp
->s_last
);
635 rvt_put_qp_swqe(qp
, wqe
);
636 if (++qp
->s_last
>= qp
->s_size
)
638 smp_wmb(); /* see qp_set_savail */
641 rvt_put_mr(qp
->s_rdma_mr
);
642 qp
->s_rdma_mr
= NULL
;
646 for (n
= 0; qp
->s_ack_queue
&& n
< rvt_max_atomic(rdi
); n
++) {
647 struct rvt_ack_entry
*e
= &qp
->s_ack_queue
[n
];
649 if (e
->rdma_sge
.mr
) {
650 rvt_put_mr(e
->rdma_sge
.mr
);
651 e
->rdma_sge
.mr
= NULL
;
657 * rvt_swqe_has_lkey - return true if lkey is used by swqe
658 * @wqe - the send wqe
661 * Test the swqe for using lkey
663 static bool rvt_swqe_has_lkey(struct rvt_swqe
*wqe
, u32 lkey
)
667 for (i
= 0; i
< wqe
->wr
.num_sge
; i
++) {
668 struct rvt_sge
*sge
= &wqe
->sg_list
[i
];
670 if (rvt_mr_has_lkey(sge
->mr
, lkey
))
677 * rvt_qp_sends_has_lkey - return true is qp sends use lkey
681 static bool rvt_qp_sends_has_lkey(struct rvt_qp
*qp
, u32 lkey
)
683 u32 s_last
= qp
->s_last
;
685 while (s_last
!= qp
->s_head
) {
686 struct rvt_swqe
*wqe
= rvt_get_swqe_ptr(qp
, s_last
);
688 if (rvt_swqe_has_lkey(wqe
, lkey
))
691 if (++s_last
>= qp
->s_size
)
695 if (rvt_mr_has_lkey(qp
->s_rdma_mr
, lkey
))
701 * rvt_qp_acks_has_lkey - return true if acks have lkey
705 static bool rvt_qp_acks_has_lkey(struct rvt_qp
*qp
, u32 lkey
)
708 struct rvt_dev_info
*rdi
= ib_to_rvt(qp
->ibqp
.device
);
710 for (i
= 0; qp
->s_ack_queue
&& i
< rvt_max_atomic(rdi
); i
++) {
711 struct rvt_ack_entry
*e
= &qp
->s_ack_queue
[i
];
713 if (rvt_mr_has_lkey(e
->rdma_sge
.mr
, lkey
))
720 * rvt_qp_mr_clean - clean up remote ops for lkey
722 * @lkey - the lkey that is being de-registered
724 * This routine checks if the lkey is being used by
727 * If so, the qp is put into an error state to elminate
728 * any references from the qp.
730 void rvt_qp_mr_clean(struct rvt_qp
*qp
, u32 lkey
)
732 bool lastwqe
= false;
734 if (qp
->ibqp
.qp_type
== IB_QPT_SMI
||
735 qp
->ibqp
.qp_type
== IB_QPT_GSI
)
736 /* avoid special QPs */
738 spin_lock_irq(&qp
->r_lock
);
739 spin_lock(&qp
->s_hlock
);
740 spin_lock(&qp
->s_lock
);
742 if (qp
->state
== IB_QPS_ERR
|| qp
->state
== IB_QPS_RESET
)
745 if (rvt_ss_has_lkey(&qp
->r_sge
, lkey
) ||
746 rvt_qp_sends_has_lkey(qp
, lkey
) ||
747 rvt_qp_acks_has_lkey(qp
, lkey
))
748 lastwqe
= rvt_error_qp(qp
, IB_WC_LOC_PROT_ERR
);
750 spin_unlock(&qp
->s_lock
);
751 spin_unlock(&qp
->s_hlock
);
752 spin_unlock_irq(&qp
->r_lock
);
756 ev
.device
= qp
->ibqp
.device
;
757 ev
.element
.qp
= &qp
->ibqp
;
758 ev
.event
= IB_EVENT_QP_LAST_WQE_REACHED
;
759 qp
->ibqp
.event_handler(&ev
, qp
->ibqp
.qp_context
);
764 * rvt_remove_qp - remove qp form table
765 * @rdi: rvt dev struct
768 * Remove the QP from the table so it can't be found asynchronously by
769 * the receive routine.
771 static void rvt_remove_qp(struct rvt_dev_info
*rdi
, struct rvt_qp
*qp
)
773 struct rvt_ibport
*rvp
= rdi
->ports
[qp
->port_num
- 1];
774 u32 n
= hash_32(qp
->ibqp
.qp_num
, rdi
->qp_dev
->qp_table_bits
);
778 spin_lock_irqsave(&rdi
->qp_dev
->qpt_lock
, flags
);
780 if (rcu_dereference_protected(rvp
->qp
[0],
781 lockdep_is_held(&rdi
->qp_dev
->qpt_lock
)) == qp
) {
782 RCU_INIT_POINTER(rvp
->qp
[0], NULL
);
783 } else if (rcu_dereference_protected(rvp
->qp
[1],
784 lockdep_is_held(&rdi
->qp_dev
->qpt_lock
)) == qp
) {
785 RCU_INIT_POINTER(rvp
->qp
[1], NULL
);
788 struct rvt_qp __rcu
**qpp
;
791 qpp
= &rdi
->qp_dev
->qp_table
[n
];
792 for (; (q
= rcu_dereference_protected(*qpp
,
793 lockdep_is_held(&rdi
->qp_dev
->qpt_lock
))) != NULL
;
796 RCU_INIT_POINTER(*qpp
,
797 rcu_dereference_protected(qp
->next
,
798 lockdep_is_held(&rdi
->qp_dev
->qpt_lock
)));
800 trace_rvt_qpremove(qp
, n
);
806 spin_unlock_irqrestore(&rdi
->qp_dev
->qpt_lock
, flags
);
814 * rvt_alloc_rq - allocate memory for user or kernel buffer
815 * @rq: receive queue data structure
816 * @size: number of request queue entries
817 * @node: The NUMA node
818 * @udata: True if user data is available or not false
820 * Return: If memory allocation failed, return -ENONEM
821 * This function is used by both shared receive
822 * queues and non-shared receive queues to allocate
825 int rvt_alloc_rq(struct rvt_rq
*rq
, u32 size
, int node
,
826 struct ib_udata
*udata
)
829 rq
->wq
= vmalloc_user(sizeof(struct rvt_rwq
) + size
);
832 /* need kwq with no buffers */
833 rq
->kwq
= kzalloc_node(sizeof(*rq
->kwq
), GFP_KERNEL
, node
);
836 rq
->kwq
->curr_wq
= rq
->wq
->wq
;
838 /* need kwq with buffers */
840 vzalloc_node(sizeof(struct rvt_krwq
) + size
, node
);
843 rq
->kwq
->curr_wq
= rq
->kwq
->wq
;
846 spin_lock_init(&rq
->kwq
->p_lock
);
847 spin_lock_init(&rq
->kwq
->c_lock
);
855 * rvt_init_qp - initialize the QP state to the reset state
856 * @qp: the QP to init or reinit
859 * This function is called from both rvt_create_qp() and
860 * rvt_reset_qp(). The difference is that the reset
861 * patch the necessary locks to protect against concurent
864 static void rvt_init_qp(struct rvt_dev_info
*rdi
, struct rvt_qp
*qp
,
865 enum ib_qp_type type
)
869 qp
->qp_access_flags
= 0;
870 qp
->s_flags
&= RVT_S_SIGNAL_REQ_WR
;
876 qp
->s_sending_psn
= 0;
877 qp
->s_sending_hpsn
= 0;
881 if (type
== IB_QPT_RC
) {
882 qp
->s_state
= IB_OPCODE_RC_SEND_LAST
;
883 qp
->r_state
= IB_OPCODE_RC_SEND_LAST
;
885 qp
->s_state
= IB_OPCODE_UC_SEND_LAST
;
886 qp
->r_state
= IB_OPCODE_UC_SEND_LAST
;
888 qp
->s_ack_state
= IB_OPCODE_RC_ACKNOWLEDGE
;
899 qp
->s_mig_state
= IB_MIG_MIGRATED
;
900 qp
->r_head_ack_queue
= 0;
901 qp
->s_tail_ack_queue
= 0;
902 qp
->s_acked_ack_queue
= 0;
903 qp
->s_num_rd_atomic
= 0;
904 qp
->r_sge
.num_sge
= 0;
905 atomic_set(&qp
->s_reserved_used
, 0);
909 * _rvt_reset_qp - initialize the QP state to the reset state
910 * @qp: the QP to reset
913 * r_lock, s_hlock, and s_lock are required to be held by the caller
915 static void _rvt_reset_qp(struct rvt_dev_info
*rdi
, struct rvt_qp
*qp
,
916 enum ib_qp_type type
)
917 __must_hold(&qp
->s_lock
)
918 __must_hold(&qp
->s_hlock
)
919 __must_hold(&qp
->r_lock
)
921 lockdep_assert_held(&qp
->r_lock
);
922 lockdep_assert_held(&qp
->s_hlock
);
923 lockdep_assert_held(&qp
->s_lock
);
924 if (qp
->state
!= IB_QPS_RESET
) {
925 qp
->state
= IB_QPS_RESET
;
927 /* Let drivers flush their waitlist */
928 rdi
->driver_f
.flush_qp_waiters(qp
);
929 rvt_stop_rc_timers(qp
);
930 qp
->s_flags
&= ~(RVT_S_TIMER
| RVT_S_ANY_WAIT
);
931 spin_unlock(&qp
->s_lock
);
932 spin_unlock(&qp
->s_hlock
);
933 spin_unlock_irq(&qp
->r_lock
);
935 /* Stop the send queue and the retry timer */
936 rdi
->driver_f
.stop_send_queue(qp
);
937 rvt_del_timers_sync(qp
);
938 /* Wait for things to stop */
939 rdi
->driver_f
.quiesce_qp(qp
);
941 /* take qp out the hash and wait for it to be unused */
942 rvt_remove_qp(rdi
, qp
);
944 /* grab the lock b/c it was locked at call time */
945 spin_lock_irq(&qp
->r_lock
);
946 spin_lock(&qp
->s_hlock
);
947 spin_lock(&qp
->s_lock
);
949 rvt_clear_mr_refs(qp
, 1);
951 * Let the driver do any tear down or re-init it needs to for
952 * a qp that has been reset
954 rdi
->driver_f
.notify_qp_reset(qp
);
956 rvt_init_qp(rdi
, qp
, type
);
957 lockdep_assert_held(&qp
->r_lock
);
958 lockdep_assert_held(&qp
->s_hlock
);
959 lockdep_assert_held(&qp
->s_lock
);
963 * rvt_reset_qp - initialize the QP state to the reset state
964 * @rdi: the device info
965 * @qp: the QP to reset
968 * This is the wrapper function to acquire the r_lock, s_hlock, and s_lock
969 * before calling _rvt_reset_qp().
971 static void rvt_reset_qp(struct rvt_dev_info
*rdi
, struct rvt_qp
*qp
,
972 enum ib_qp_type type
)
974 spin_lock_irq(&qp
->r_lock
);
975 spin_lock(&qp
->s_hlock
);
976 spin_lock(&qp
->s_lock
);
977 _rvt_reset_qp(rdi
, qp
, type
);
978 spin_unlock(&qp
->s_lock
);
979 spin_unlock(&qp
->s_hlock
);
980 spin_unlock_irq(&qp
->r_lock
);
983 /** rvt_free_qpn - Free a qpn from the bit map
985 * @qpn: queue pair number to free
987 static void rvt_free_qpn(struct rvt_qpn_table
*qpt
, u32 qpn
)
989 struct rvt_qpn_map
*map
;
991 if ((qpn
& RVT_AIP_QP_PREFIX_MASK
) == RVT_AIP_QP_BASE
)
992 qpn
&= RVT_AIP_QP_SUFFIX
;
994 map
= qpt
->map
+ (qpn
& RVT_QPN_MASK
) / RVT_BITS_PER_PAGE
;
996 clear_bit(qpn
& RVT_BITS_PER_PAGE_MASK
, map
->page
);
1000 * get_allowed_ops - Given a QP type return the appropriate allowed OP
1001 * @type: valid, supported, QP type
1003 static u8
get_allowed_ops(enum ib_qp_type type
)
1005 return type
== IB_QPT_RC
? IB_OPCODE_RC
: type
== IB_QPT_UC
?
1006 IB_OPCODE_UC
: IB_OPCODE_UD
;
1010 * free_ud_wq_attr - Clean up AH attribute cache for UD QPs
1011 * @qp: Valid QP with allowed_ops set
1013 * The rvt_swqe data structure being used is a union, so this is
1014 * only valid for UD QPs.
1016 static void free_ud_wq_attr(struct rvt_qp
*qp
)
1018 struct rvt_swqe
*wqe
;
1021 for (i
= 0; qp
->allowed_ops
== IB_OPCODE_UD
&& i
< qp
->s_size
; i
++) {
1022 wqe
= rvt_get_swqe_ptr(qp
, i
);
1023 kfree(wqe
->ud_wr
.attr
);
1024 wqe
->ud_wr
.attr
= NULL
;
1029 * alloc_ud_wq_attr - AH attribute cache for UD QPs
1030 * @qp: Valid QP with allowed_ops set
1031 * @node: Numa node for allocation
1033 * The rvt_swqe data structure being used is a union, so this is
1034 * only valid for UD QPs.
1036 static int alloc_ud_wq_attr(struct rvt_qp
*qp
, int node
)
1038 struct rvt_swqe
*wqe
;
1041 for (i
= 0; qp
->allowed_ops
== IB_OPCODE_UD
&& i
< qp
->s_size
; i
++) {
1042 wqe
= rvt_get_swqe_ptr(qp
, i
);
1043 wqe
->ud_wr
.attr
= kzalloc_node(sizeof(*wqe
->ud_wr
.attr
),
1045 if (!wqe
->ud_wr
.attr
) {
1046 free_ud_wq_attr(qp
);
1055 * rvt_create_qp - create a queue pair for a device
1056 * @ibpd: the protection domain who's device we create the queue pair for
1057 * @init_attr: the attributes of the queue pair
1058 * @udata: user data for libibverbs.so
1060 * Queue pair creation is mostly an rvt issue. However, drivers have their own
1061 * unique idea of what queue pair numbers mean. For instance there is a reserved
1064 * Return: the queue pair on success, otherwise returns an errno.
1066 * Called by the ib_create_qp() core verbs function.
1068 struct ib_qp
*rvt_create_qp(struct ib_pd
*ibpd
,
1069 struct ib_qp_init_attr
*init_attr
,
1070 struct ib_udata
*udata
)
1074 struct rvt_swqe
*swq
= NULL
;
1077 struct ib_qp
*ret
= ERR_PTR(-ENOMEM
);
1078 struct rvt_dev_info
*rdi
= ib_to_rvt(ibpd
->device
);
1081 u8 exclude_prefix
= 0;
1084 return ERR_PTR(-EINVAL
);
1086 if (init_attr
->create_flags
& ~IB_QP_CREATE_NETDEV_USE
)
1087 return ERR_PTR(-EOPNOTSUPP
);
1089 if (init_attr
->cap
.max_send_sge
> rdi
->dparms
.props
.max_send_sge
||
1090 init_attr
->cap
.max_send_wr
> rdi
->dparms
.props
.max_qp_wr
)
1091 return ERR_PTR(-EINVAL
);
1093 /* Check receive queue parameters if no SRQ is specified. */
1094 if (!init_attr
->srq
) {
1095 if (init_attr
->cap
.max_recv_sge
>
1096 rdi
->dparms
.props
.max_recv_sge
||
1097 init_attr
->cap
.max_recv_wr
> rdi
->dparms
.props
.max_qp_wr
)
1098 return ERR_PTR(-EINVAL
);
1100 if (init_attr
->cap
.max_send_sge
+
1101 init_attr
->cap
.max_send_wr
+
1102 init_attr
->cap
.max_recv_sge
+
1103 init_attr
->cap
.max_recv_wr
== 0)
1104 return ERR_PTR(-EINVAL
);
1107 init_attr
->cap
.max_send_wr
+ 1 +
1108 rdi
->dparms
.reserved_operations
;
1109 switch (init_attr
->qp_type
) {
1112 if (init_attr
->port_num
== 0 ||
1113 init_attr
->port_num
> ibpd
->device
->phys_port_cnt
)
1114 return ERR_PTR(-EINVAL
);
1119 sz
= struct_size(swq
, sg_list
, init_attr
->cap
.max_send_sge
);
1120 swq
= vzalloc_node(array_size(sz
, sqsize
), rdi
->dparms
.node
);
1122 return ERR_PTR(-ENOMEM
);
1126 if (init_attr
->srq
) {
1127 struct rvt_srq
*srq
= ibsrq_to_rvtsrq(init_attr
->srq
);
1129 if (srq
->rq
.max_sge
> 1)
1130 sg_list_sz
= sizeof(*qp
->r_sg_list
) *
1131 (srq
->rq
.max_sge
- 1);
1132 } else if (init_attr
->cap
.max_recv_sge
> 1)
1133 sg_list_sz
= sizeof(*qp
->r_sg_list
) *
1134 (init_attr
->cap
.max_recv_sge
- 1);
1135 qp
= kzalloc_node(sz
+ sg_list_sz
, GFP_KERNEL
,
1139 qp
->allowed_ops
= get_allowed_ops(init_attr
->qp_type
);
1141 RCU_INIT_POINTER(qp
->next
, NULL
);
1142 if (init_attr
->qp_type
== IB_QPT_RC
) {
1144 kcalloc_node(rvt_max_atomic(rdi
),
1145 sizeof(*qp
->s_ack_queue
),
1148 if (!qp
->s_ack_queue
)
1151 /* initialize timers needed for rc qp */
1152 timer_setup(&qp
->s_timer
, rvt_rc_timeout
, 0);
1153 hrtimer_init(&qp
->s_rnr_timer
, CLOCK_MONOTONIC
,
1155 qp
->s_rnr_timer
.function
= rvt_rc_rnr_retry
;
1158 * Driver needs to set up it's private QP structure and do any
1159 * initialization that is needed.
1161 priv
= rdi
->driver_f
.qp_priv_alloc(rdi
, qp
);
1167 qp
->timeout_jiffies
=
1168 usecs_to_jiffies((4096UL * (1UL << qp
->timeout
)) /
1170 if (init_attr
->srq
) {
1173 qp
->r_rq
.size
= init_attr
->cap
.max_recv_wr
+ 1;
1174 qp
->r_rq
.max_sge
= init_attr
->cap
.max_recv_sge
;
1175 sz
= (sizeof(struct ib_sge
) * qp
->r_rq
.max_sge
) +
1176 sizeof(struct rvt_rwqe
);
1177 err
= rvt_alloc_rq(&qp
->r_rq
, qp
->r_rq
.size
* sz
,
1178 rdi
->dparms
.node
, udata
);
1181 goto bail_driver_priv
;
1186 * ib_create_qp() will initialize qp->ibqp
1187 * except for qp->ibqp.qp_num.
1189 spin_lock_init(&qp
->r_lock
);
1190 spin_lock_init(&qp
->s_hlock
);
1191 spin_lock_init(&qp
->s_lock
);
1192 atomic_set(&qp
->refcount
, 0);
1193 atomic_set(&qp
->local_ops_pending
, 0);
1194 init_waitqueue_head(&qp
->wait
);
1195 INIT_LIST_HEAD(&qp
->rspwait
);
1196 qp
->state
= IB_QPS_RESET
;
1198 qp
->s_size
= sqsize
;
1199 qp
->s_avail
= init_attr
->cap
.max_send_wr
;
1200 qp
->s_max_sge
= init_attr
->cap
.max_send_sge
;
1201 if (init_attr
->sq_sig_type
== IB_SIGNAL_REQ_WR
)
1202 qp
->s_flags
= RVT_S_SIGNAL_REQ_WR
;
1203 err
= alloc_ud_wq_attr(qp
, rdi
->dparms
.node
);
1205 ret
= (ERR_PTR(err
));
1209 if (init_attr
->create_flags
& IB_QP_CREATE_NETDEV_USE
)
1210 exclude_prefix
= RVT_AIP_QP_PREFIX
;
1212 err
= alloc_qpn(rdi
, &rdi
->qp_dev
->qpn_table
,
1214 init_attr
->port_num
,
1220 qp
->ibqp
.qp_num
= err
;
1221 if (init_attr
->create_flags
& IB_QP_CREATE_NETDEV_USE
)
1222 qp
->ibqp
.qp_num
|= RVT_AIP_QP_BASE
;
1223 qp
->port_num
= init_attr
->port_num
;
1224 rvt_init_qp(rdi
, qp
, init_attr
->qp_type
);
1225 if (rdi
->driver_f
.qp_priv_init
) {
1226 err
= rdi
->driver_f
.qp_priv_init(rdi
, qp
, init_attr
);
1235 /* Don't support raw QPs */
1236 return ERR_PTR(-EOPNOTSUPP
);
1239 init_attr
->cap
.max_inline_data
= 0;
1242 * Return the address of the RWQ as the offset to mmap.
1243 * See rvt_mmap() for details.
1245 if (udata
&& udata
->outlen
>= sizeof(__u64
)) {
1249 err
= ib_copy_to_udata(udata
, &offset
,
1256 u32 s
= sizeof(struct rvt_rwq
) + qp
->r_rq
.size
* sz
;
1258 qp
->ip
= rvt_create_mmap_info(rdi
, s
, udata
,
1260 if (IS_ERR(qp
->ip
)) {
1261 ret
= ERR_CAST(qp
->ip
);
1265 err
= ib_copy_to_udata(udata
, &qp
->ip
->offset
,
1266 sizeof(qp
->ip
->offset
));
1272 qp
->pid
= current
->pid
;
1275 spin_lock(&rdi
->n_qps_lock
);
1276 if (rdi
->n_qps_allocated
== rdi
->dparms
.props
.max_qp
) {
1277 spin_unlock(&rdi
->n_qps_lock
);
1278 ret
= ERR_PTR(-ENOMEM
);
1282 rdi
->n_qps_allocated
++;
1284 * Maintain a busy_jiffies variable that will be added to the timeout
1285 * period in mod_retry_timer and add_retry_timer. This busy jiffies
1286 * is scaled by the number of rc qps created for the device to reduce
1287 * the number of timeouts occurring when there is a large number of
1288 * qps. busy_jiffies is incremented every rc qp scaling interval.
1289 * The scaling interval is selected based on extensive performance
1290 * evaluation of targeted workloads.
1292 if (init_attr
->qp_type
== IB_QPT_RC
) {
1294 rdi
->busy_jiffies
= rdi
->n_rc_qps
/ RC_QP_SCALING_INTERVAL
;
1296 spin_unlock(&rdi
->n_qps_lock
);
1299 spin_lock_irq(&rdi
->pending_lock
);
1300 list_add(&qp
->ip
->pending_mmaps
, &rdi
->pending_mmaps
);
1301 spin_unlock_irq(&rdi
->pending_lock
);
1310 kref_put(&qp
->ip
->ref
, rvt_release_mmap_info
);
1313 rvt_free_qpn(&rdi
->qp_dev
->qpn_table
, qp
->ibqp
.qp_num
);
1316 free_ud_wq_attr(qp
);
1319 rvt_free_rq(&qp
->r_rq
);
1322 rdi
->driver_f
.qp_priv_free(rdi
, qp
);
1325 kfree(qp
->s_ack_queue
);
1335 * rvt_error_qp - put a QP into the error state
1336 * @qp: the QP to put into the error state
1337 * @err: the receive completion error to signal if a RWQE is active
1339 * Flushes both send and receive work queues.
1341 * Return: true if last WQE event should be generated.
1342 * The QP r_lock and s_lock should be held and interrupts disabled.
1343 * If we are already in error state, just return.
1345 int rvt_error_qp(struct rvt_qp
*qp
, enum ib_wc_status err
)
1349 struct rvt_dev_info
*rdi
= ib_to_rvt(qp
->ibqp
.device
);
1351 lockdep_assert_held(&qp
->r_lock
);
1352 lockdep_assert_held(&qp
->s_lock
);
1353 if (qp
->state
== IB_QPS_ERR
|| qp
->state
== IB_QPS_RESET
)
1356 qp
->state
= IB_QPS_ERR
;
1358 if (qp
->s_flags
& (RVT_S_TIMER
| RVT_S_WAIT_RNR
)) {
1359 qp
->s_flags
&= ~(RVT_S_TIMER
| RVT_S_WAIT_RNR
);
1360 del_timer(&qp
->s_timer
);
1363 if (qp
->s_flags
& RVT_S_ANY_WAIT_SEND
)
1364 qp
->s_flags
&= ~RVT_S_ANY_WAIT_SEND
;
1366 rdi
->driver_f
.notify_error_qp(qp
);
1368 /* Schedule the sending tasklet to drain the send work queue. */
1369 if (READ_ONCE(qp
->s_last
) != qp
->s_head
)
1370 rdi
->driver_f
.schedule_send(qp
);
1372 rvt_clear_mr_refs(qp
, 0);
1374 memset(&wc
, 0, sizeof(wc
));
1376 wc
.opcode
= IB_WC_RECV
;
1378 if (test_and_clear_bit(RVT_R_WRID_VALID
, &qp
->r_aflags
)) {
1379 wc
.wr_id
= qp
->r_wr_id
;
1381 rvt_cq_enter(ibcq_to_rvtcq(qp
->ibqp
.recv_cq
), &wc
, 1);
1383 wc
.status
= IB_WC_WR_FLUSH_ERR
;
1388 struct rvt_rwq
*wq
= NULL
;
1389 struct rvt_krwq
*kwq
= NULL
;
1391 spin_lock(&qp
->r_rq
.kwq
->c_lock
);
1392 /* qp->ip used to validate if there is a user buffer mmaped */
1395 head
= RDMA_READ_UAPI_ATOMIC(wq
->head
);
1396 tail
= RDMA_READ_UAPI_ATOMIC(wq
->tail
);
1402 /* sanity check pointers before trusting them */
1403 if (head
>= qp
->r_rq
.size
)
1405 if (tail
>= qp
->r_rq
.size
)
1407 while (tail
!= head
) {
1408 wc
.wr_id
= rvt_get_rwqe_ptr(&qp
->r_rq
, tail
)->wr_id
;
1409 if (++tail
>= qp
->r_rq
.size
)
1411 rvt_cq_enter(ibcq_to_rvtcq(qp
->ibqp
.recv_cq
), &wc
, 1);
1414 RDMA_WRITE_UAPI_ATOMIC(wq
->tail
, tail
);
1417 spin_unlock(&qp
->r_rq
.kwq
->c_lock
);
1418 } else if (qp
->ibqp
.event_handler
) {
1425 EXPORT_SYMBOL(rvt_error_qp
);
1428 * Put the QP into the hash table.
1429 * The hash table holds a reference to the QP.
1431 static void rvt_insert_qp(struct rvt_dev_info
*rdi
, struct rvt_qp
*qp
)
1433 struct rvt_ibport
*rvp
= rdi
->ports
[qp
->port_num
- 1];
1434 unsigned long flags
;
1437 spin_lock_irqsave(&rdi
->qp_dev
->qpt_lock
, flags
);
1439 if (qp
->ibqp
.qp_num
<= 1) {
1440 rcu_assign_pointer(rvp
->qp
[qp
->ibqp
.qp_num
], qp
);
1442 u32 n
= hash_32(qp
->ibqp
.qp_num
, rdi
->qp_dev
->qp_table_bits
);
1444 qp
->next
= rdi
->qp_dev
->qp_table
[n
];
1445 rcu_assign_pointer(rdi
->qp_dev
->qp_table
[n
], qp
);
1446 trace_rvt_qpinsert(qp
, n
);
1449 spin_unlock_irqrestore(&rdi
->qp_dev
->qpt_lock
, flags
);
1453 * rvt_modify_qp - modify the attributes of a queue pair
1454 * @ibqp: the queue pair who's attributes we're modifying
1455 * @attr: the new attributes
1456 * @attr_mask: the mask of attributes to modify
1457 * @udata: user data for libibverbs.so
1459 * Return: 0 on success, otherwise returns an errno.
1461 int rvt_modify_qp(struct ib_qp
*ibqp
, struct ib_qp_attr
*attr
,
1462 int attr_mask
, struct ib_udata
*udata
)
1464 struct rvt_dev_info
*rdi
= ib_to_rvt(ibqp
->device
);
1465 struct rvt_qp
*qp
= ibqp_to_rvtqp(ibqp
);
1466 enum ib_qp_state cur_state
, new_state
;
1470 int pmtu
= 0; /* for gcc warning only */
1473 if (attr_mask
& ~IB_QP_ATTR_STANDARD_BITS
)
1476 spin_lock_irq(&qp
->r_lock
);
1477 spin_lock(&qp
->s_hlock
);
1478 spin_lock(&qp
->s_lock
);
1480 cur_state
= attr_mask
& IB_QP_CUR_STATE
?
1481 attr
->cur_qp_state
: qp
->state
;
1482 new_state
= attr_mask
& IB_QP_STATE
? attr
->qp_state
: cur_state
;
1483 opa_ah
= rdma_cap_opa_ah(ibqp
->device
, qp
->port_num
);
1485 if (!ib_modify_qp_is_ok(cur_state
, new_state
, ibqp
->qp_type
,
1489 if (rdi
->driver_f
.check_modify_qp
&&
1490 rdi
->driver_f
.check_modify_qp(qp
, attr
, attr_mask
, udata
))
1493 if (attr_mask
& IB_QP_AV
) {
1495 if (rdma_ah_get_dlid(&attr
->ah_attr
) >=
1496 opa_get_mcast_base(OPA_MCAST_NR
))
1499 if (rdma_ah_get_dlid(&attr
->ah_attr
) >=
1500 be16_to_cpu(IB_MULTICAST_LID_BASE
))
1504 if (rvt_check_ah(qp
->ibqp
.device
, &attr
->ah_attr
))
1508 if (attr_mask
& IB_QP_ALT_PATH
) {
1510 if (rdma_ah_get_dlid(&attr
->alt_ah_attr
) >=
1511 opa_get_mcast_base(OPA_MCAST_NR
))
1514 if (rdma_ah_get_dlid(&attr
->alt_ah_attr
) >=
1515 be16_to_cpu(IB_MULTICAST_LID_BASE
))
1519 if (rvt_check_ah(qp
->ibqp
.device
, &attr
->alt_ah_attr
))
1521 if (attr
->alt_pkey_index
>= rvt_get_npkeys(rdi
))
1525 if (attr_mask
& IB_QP_PKEY_INDEX
)
1526 if (attr
->pkey_index
>= rvt_get_npkeys(rdi
))
1529 if (attr_mask
& IB_QP_MIN_RNR_TIMER
)
1530 if (attr
->min_rnr_timer
> 31)
1533 if (attr_mask
& IB_QP_PORT
)
1534 if (qp
->ibqp
.qp_type
== IB_QPT_SMI
||
1535 qp
->ibqp
.qp_type
== IB_QPT_GSI
||
1536 attr
->port_num
== 0 ||
1537 attr
->port_num
> ibqp
->device
->phys_port_cnt
)
1540 if (attr_mask
& IB_QP_DEST_QPN
)
1541 if (attr
->dest_qp_num
> RVT_QPN_MASK
)
1544 if (attr_mask
& IB_QP_RETRY_CNT
)
1545 if (attr
->retry_cnt
> 7)
1548 if (attr_mask
& IB_QP_RNR_RETRY
)
1549 if (attr
->rnr_retry
> 7)
1553 * Don't allow invalid path_mtu values. OK to set greater
1554 * than the active mtu (or even the max_cap, if we have tuned
1555 * that to a small mtu. We'll set qp->path_mtu
1556 * to the lesser of requested attribute mtu and active,
1557 * for packetizing messages.
1558 * Note that the QP port has to be set in INIT and MTU in RTR.
1560 if (attr_mask
& IB_QP_PATH_MTU
) {
1561 pmtu
= rdi
->driver_f
.get_pmtu_from_attr(rdi
, qp
, attr
);
1566 if (attr_mask
& IB_QP_PATH_MIG_STATE
) {
1567 if (attr
->path_mig_state
== IB_MIG_REARM
) {
1568 if (qp
->s_mig_state
== IB_MIG_ARMED
)
1570 if (new_state
!= IB_QPS_RTS
)
1572 } else if (attr
->path_mig_state
== IB_MIG_MIGRATED
) {
1573 if (qp
->s_mig_state
== IB_MIG_REARM
)
1575 if (new_state
!= IB_QPS_RTS
&& new_state
!= IB_QPS_SQD
)
1577 if (qp
->s_mig_state
== IB_MIG_ARMED
)
1584 if (attr_mask
& IB_QP_MAX_DEST_RD_ATOMIC
)
1585 if (attr
->max_dest_rd_atomic
> rdi
->dparms
.max_rdma_atomic
)
1588 switch (new_state
) {
1590 if (qp
->state
!= IB_QPS_RESET
)
1591 _rvt_reset_qp(rdi
, qp
, ibqp
->qp_type
);
1595 /* Allow event to re-trigger if QP set to RTR more than once */
1596 qp
->r_flags
&= ~RVT_R_COMM_EST
;
1597 qp
->state
= new_state
;
1601 qp
->s_draining
= qp
->s_last
!= qp
->s_cur
;
1602 qp
->state
= new_state
;
1606 if (qp
->ibqp
.qp_type
== IB_QPT_RC
)
1608 qp
->state
= new_state
;
1612 lastwqe
= rvt_error_qp(qp
, IB_WC_WR_FLUSH_ERR
);
1616 qp
->state
= new_state
;
1620 if (attr_mask
& IB_QP_PKEY_INDEX
)
1621 qp
->s_pkey_index
= attr
->pkey_index
;
1623 if (attr_mask
& IB_QP_PORT
)
1624 qp
->port_num
= attr
->port_num
;
1626 if (attr_mask
& IB_QP_DEST_QPN
)
1627 qp
->remote_qpn
= attr
->dest_qp_num
;
1629 if (attr_mask
& IB_QP_SQ_PSN
) {
1630 qp
->s_next_psn
= attr
->sq_psn
& rdi
->dparms
.psn_modify_mask
;
1631 qp
->s_psn
= qp
->s_next_psn
;
1632 qp
->s_sending_psn
= qp
->s_next_psn
;
1633 qp
->s_last_psn
= qp
->s_next_psn
- 1;
1634 qp
->s_sending_hpsn
= qp
->s_last_psn
;
1637 if (attr_mask
& IB_QP_RQ_PSN
)
1638 qp
->r_psn
= attr
->rq_psn
& rdi
->dparms
.psn_modify_mask
;
1640 if (attr_mask
& IB_QP_ACCESS_FLAGS
)
1641 qp
->qp_access_flags
= attr
->qp_access_flags
;
1643 if (attr_mask
& IB_QP_AV
) {
1644 rdma_replace_ah_attr(&qp
->remote_ah_attr
, &attr
->ah_attr
);
1645 qp
->s_srate
= rdma_ah_get_static_rate(&attr
->ah_attr
);
1646 qp
->srate_mbps
= ib_rate_to_mbps(qp
->s_srate
);
1649 if (attr_mask
& IB_QP_ALT_PATH
) {
1650 rdma_replace_ah_attr(&qp
->alt_ah_attr
, &attr
->alt_ah_attr
);
1651 qp
->s_alt_pkey_index
= attr
->alt_pkey_index
;
1654 if (attr_mask
& IB_QP_PATH_MIG_STATE
) {
1655 qp
->s_mig_state
= attr
->path_mig_state
;
1657 qp
->remote_ah_attr
= qp
->alt_ah_attr
;
1658 qp
->port_num
= rdma_ah_get_port_num(&qp
->alt_ah_attr
);
1659 qp
->s_pkey_index
= qp
->s_alt_pkey_index
;
1663 if (attr_mask
& IB_QP_PATH_MTU
) {
1664 qp
->pmtu
= rdi
->driver_f
.mtu_from_qp(rdi
, qp
, pmtu
);
1665 qp
->log_pmtu
= ilog2(qp
->pmtu
);
1668 if (attr_mask
& IB_QP_RETRY_CNT
) {
1669 qp
->s_retry_cnt
= attr
->retry_cnt
;
1670 qp
->s_retry
= attr
->retry_cnt
;
1673 if (attr_mask
& IB_QP_RNR_RETRY
) {
1674 qp
->s_rnr_retry_cnt
= attr
->rnr_retry
;
1675 qp
->s_rnr_retry
= attr
->rnr_retry
;
1678 if (attr_mask
& IB_QP_MIN_RNR_TIMER
)
1679 qp
->r_min_rnr_timer
= attr
->min_rnr_timer
;
1681 if (attr_mask
& IB_QP_TIMEOUT
) {
1682 qp
->timeout
= attr
->timeout
;
1683 qp
->timeout_jiffies
= rvt_timeout_to_jiffies(qp
->timeout
);
1686 if (attr_mask
& IB_QP_QKEY
)
1687 qp
->qkey
= attr
->qkey
;
1689 if (attr_mask
& IB_QP_MAX_DEST_RD_ATOMIC
)
1690 qp
->r_max_rd_atomic
= attr
->max_dest_rd_atomic
;
1692 if (attr_mask
& IB_QP_MAX_QP_RD_ATOMIC
)
1693 qp
->s_max_rd_atomic
= attr
->max_rd_atomic
;
1695 if (rdi
->driver_f
.modify_qp
)
1696 rdi
->driver_f
.modify_qp(qp
, attr
, attr_mask
, udata
);
1698 spin_unlock(&qp
->s_lock
);
1699 spin_unlock(&qp
->s_hlock
);
1700 spin_unlock_irq(&qp
->r_lock
);
1702 if (cur_state
== IB_QPS_RESET
&& new_state
== IB_QPS_INIT
)
1703 rvt_insert_qp(rdi
, qp
);
1706 ev
.device
= qp
->ibqp
.device
;
1707 ev
.element
.qp
= &qp
->ibqp
;
1708 ev
.event
= IB_EVENT_QP_LAST_WQE_REACHED
;
1709 qp
->ibqp
.event_handler(&ev
, qp
->ibqp
.qp_context
);
1712 ev
.device
= qp
->ibqp
.device
;
1713 ev
.element
.qp
= &qp
->ibqp
;
1714 ev
.event
= IB_EVENT_PATH_MIG
;
1715 qp
->ibqp
.event_handler(&ev
, qp
->ibqp
.qp_context
);
1720 spin_unlock(&qp
->s_lock
);
1721 spin_unlock(&qp
->s_hlock
);
1722 spin_unlock_irq(&qp
->r_lock
);
1727 * rvt_destroy_qp - destroy a queue pair
1728 * @ibqp: the queue pair to destroy
1730 * Note that this can be called while the QP is actively sending or
1733 * Return: 0 on success.
1735 int rvt_destroy_qp(struct ib_qp
*ibqp
, struct ib_udata
*udata
)
1737 struct rvt_qp
*qp
= ibqp_to_rvtqp(ibqp
);
1738 struct rvt_dev_info
*rdi
= ib_to_rvt(ibqp
->device
);
1740 rvt_reset_qp(rdi
, qp
, ibqp
->qp_type
);
1742 wait_event(qp
->wait
, !atomic_read(&qp
->refcount
));
1743 /* qpn is now available for use again */
1744 rvt_free_qpn(&rdi
->qp_dev
->qpn_table
, qp
->ibqp
.qp_num
);
1746 spin_lock(&rdi
->n_qps_lock
);
1747 rdi
->n_qps_allocated
--;
1748 if (qp
->ibqp
.qp_type
== IB_QPT_RC
) {
1750 rdi
->busy_jiffies
= rdi
->n_rc_qps
/ RC_QP_SCALING_INTERVAL
;
1752 spin_unlock(&rdi
->n_qps_lock
);
1755 kref_put(&qp
->ip
->ref
, rvt_release_mmap_info
);
1756 kvfree(qp
->r_rq
.kwq
);
1757 rdi
->driver_f
.qp_priv_free(rdi
, qp
);
1758 kfree(qp
->s_ack_queue
);
1759 rdma_destroy_ah_attr(&qp
->remote_ah_attr
);
1760 rdma_destroy_ah_attr(&qp
->alt_ah_attr
);
1761 free_ud_wq_attr(qp
);
1768 * rvt_query_qp - query an ipbq
1769 * @ibqp: IB qp to query
1770 * @attr: attr struct to fill in
1771 * @attr_mask: attr mask ignored
1772 * @init_attr: struct to fill in
1776 int rvt_query_qp(struct ib_qp
*ibqp
, struct ib_qp_attr
*attr
,
1777 int attr_mask
, struct ib_qp_init_attr
*init_attr
)
1779 struct rvt_qp
*qp
= ibqp_to_rvtqp(ibqp
);
1780 struct rvt_dev_info
*rdi
= ib_to_rvt(ibqp
->device
);
1782 attr
->qp_state
= qp
->state
;
1783 attr
->cur_qp_state
= attr
->qp_state
;
1784 attr
->path_mtu
= rdi
->driver_f
.mtu_to_path_mtu(qp
->pmtu
);
1785 attr
->path_mig_state
= qp
->s_mig_state
;
1786 attr
->qkey
= qp
->qkey
;
1787 attr
->rq_psn
= qp
->r_psn
& rdi
->dparms
.psn_mask
;
1788 attr
->sq_psn
= qp
->s_next_psn
& rdi
->dparms
.psn_mask
;
1789 attr
->dest_qp_num
= qp
->remote_qpn
;
1790 attr
->qp_access_flags
= qp
->qp_access_flags
;
1791 attr
->cap
.max_send_wr
= qp
->s_size
- 1 -
1792 rdi
->dparms
.reserved_operations
;
1793 attr
->cap
.max_recv_wr
= qp
->ibqp
.srq
? 0 : qp
->r_rq
.size
- 1;
1794 attr
->cap
.max_send_sge
= qp
->s_max_sge
;
1795 attr
->cap
.max_recv_sge
= qp
->r_rq
.max_sge
;
1796 attr
->cap
.max_inline_data
= 0;
1797 attr
->ah_attr
= qp
->remote_ah_attr
;
1798 attr
->alt_ah_attr
= qp
->alt_ah_attr
;
1799 attr
->pkey_index
= qp
->s_pkey_index
;
1800 attr
->alt_pkey_index
= qp
->s_alt_pkey_index
;
1801 attr
->en_sqd_async_notify
= 0;
1802 attr
->sq_draining
= qp
->s_draining
;
1803 attr
->max_rd_atomic
= qp
->s_max_rd_atomic
;
1804 attr
->max_dest_rd_atomic
= qp
->r_max_rd_atomic
;
1805 attr
->min_rnr_timer
= qp
->r_min_rnr_timer
;
1806 attr
->port_num
= qp
->port_num
;
1807 attr
->timeout
= qp
->timeout
;
1808 attr
->retry_cnt
= qp
->s_retry_cnt
;
1809 attr
->rnr_retry
= qp
->s_rnr_retry_cnt
;
1810 attr
->alt_port_num
=
1811 rdma_ah_get_port_num(&qp
->alt_ah_attr
);
1812 attr
->alt_timeout
= qp
->alt_timeout
;
1814 init_attr
->event_handler
= qp
->ibqp
.event_handler
;
1815 init_attr
->qp_context
= qp
->ibqp
.qp_context
;
1816 init_attr
->send_cq
= qp
->ibqp
.send_cq
;
1817 init_attr
->recv_cq
= qp
->ibqp
.recv_cq
;
1818 init_attr
->srq
= qp
->ibqp
.srq
;
1819 init_attr
->cap
= attr
->cap
;
1820 if (qp
->s_flags
& RVT_S_SIGNAL_REQ_WR
)
1821 init_attr
->sq_sig_type
= IB_SIGNAL_REQ_WR
;
1823 init_attr
->sq_sig_type
= IB_SIGNAL_ALL_WR
;
1824 init_attr
->qp_type
= qp
->ibqp
.qp_type
;
1825 init_attr
->port_num
= qp
->port_num
;
1830 * rvt_post_recv - post a receive on a QP
1831 * @ibqp: the QP to post the receive on
1832 * @wr: the WR to post
1833 * @bad_wr: the first bad WR is put here
1835 * This may be called from interrupt context.
1837 * Return: 0 on success otherwise errno
1839 int rvt_post_recv(struct ib_qp
*ibqp
, const struct ib_recv_wr
*wr
,
1840 const struct ib_recv_wr
**bad_wr
)
1842 struct rvt_qp
*qp
= ibqp_to_rvtqp(ibqp
);
1843 struct rvt_krwq
*wq
= qp
->r_rq
.kwq
;
1844 unsigned long flags
;
1845 int qp_err_flush
= (ib_rvt_state_ops
[qp
->state
] & RVT_FLUSH_RECV
) &&
1848 /* Check that state is OK to post receive. */
1849 if (!(ib_rvt_state_ops
[qp
->state
] & RVT_POST_RECV_OK
) || !wq
) {
1854 for (; wr
; wr
= wr
->next
) {
1855 struct rvt_rwqe
*wqe
;
1859 if ((unsigned)wr
->num_sge
> qp
->r_rq
.max_sge
) {
1864 spin_lock_irqsave(&qp
->r_rq
.kwq
->p_lock
, flags
);
1865 next
= wq
->head
+ 1;
1866 if (next
>= qp
->r_rq
.size
)
1868 if (next
== READ_ONCE(wq
->tail
)) {
1869 spin_unlock_irqrestore(&qp
->r_rq
.kwq
->p_lock
, flags
);
1873 if (unlikely(qp_err_flush
)) {
1876 memset(&wc
, 0, sizeof(wc
));
1878 wc
.opcode
= IB_WC_RECV
;
1879 wc
.wr_id
= wr
->wr_id
;
1880 wc
.status
= IB_WC_WR_FLUSH_ERR
;
1881 rvt_cq_enter(ibcq_to_rvtcq(qp
->ibqp
.recv_cq
), &wc
, 1);
1883 wqe
= rvt_get_rwqe_ptr(&qp
->r_rq
, wq
->head
);
1884 wqe
->wr_id
= wr
->wr_id
;
1885 wqe
->num_sge
= wr
->num_sge
;
1886 for (i
= 0; i
< wr
->num_sge
; i
++) {
1887 wqe
->sg_list
[i
].addr
= wr
->sg_list
[i
].addr
;
1888 wqe
->sg_list
[i
].length
= wr
->sg_list
[i
].length
;
1889 wqe
->sg_list
[i
].lkey
= wr
->sg_list
[i
].lkey
;
1892 * Make sure queue entry is written
1893 * before the head index.
1895 smp_store_release(&wq
->head
, next
);
1897 spin_unlock_irqrestore(&qp
->r_rq
.kwq
->p_lock
, flags
);
1903 * rvt_qp_valid_operation - validate post send wr request
1905 * @post-parms - the post send table for the driver
1906 * @wr - the work request
1908 * The routine validates the operation based on the
1909 * validation table an returns the length of the operation
1910 * which can extend beyond the ib_send_bw. Operation
1911 * dependent flags key atomic operation validation.
1913 * There is an exception for UD qps that validates the pd and
1914 * overrides the length to include the additional UD specific
1917 * Returns a negative error or the length of the work request
1918 * for building the swqe.
1920 static inline int rvt_qp_valid_operation(
1922 const struct rvt_operation_params
*post_parms
,
1923 const struct ib_send_wr
*wr
)
1927 if (wr
->opcode
>= RVT_OPERATION_MAX
|| !post_parms
[wr
->opcode
].length
)
1929 if (!(post_parms
[wr
->opcode
].qpt_support
& BIT(qp
->ibqp
.qp_type
)))
1931 if ((post_parms
[wr
->opcode
].flags
& RVT_OPERATION_PRIV
) &&
1932 ibpd_to_rvtpd(qp
->ibqp
.pd
)->user
)
1934 if (post_parms
[wr
->opcode
].flags
& RVT_OPERATION_ATOMIC_SGE
&&
1935 (wr
->num_sge
== 0 ||
1936 wr
->sg_list
[0].length
< sizeof(u64
) ||
1937 wr
->sg_list
[0].addr
& (sizeof(u64
) - 1)))
1939 if (post_parms
[wr
->opcode
].flags
& RVT_OPERATION_ATOMIC
&&
1940 !qp
->s_max_rd_atomic
)
1942 len
= post_parms
[wr
->opcode
].length
;
1944 if (qp
->ibqp
.qp_type
!= IB_QPT_UC
&&
1945 qp
->ibqp
.qp_type
!= IB_QPT_RC
) {
1946 if (qp
->ibqp
.pd
!= ud_wr(wr
)->ah
->pd
)
1948 len
= sizeof(struct ib_ud_wr
);
1954 * rvt_qp_is_avail - determine queue capacity
1956 * @rdi: the rdmavt device
1957 * @reserved_op: is reserved operation
1959 * This assumes the s_hlock is held but the s_last
1960 * qp variable is uncontrolled.
1962 * For non reserved operations, the qp->s_avail
1965 * The return value is zero or a -ENOMEM.
1967 static inline int rvt_qp_is_avail(
1969 struct rvt_dev_info
*rdi
,
1976 /* see rvt_qp_wqe_unreserve() */
1977 smp_mb__before_atomic();
1978 if (unlikely(reserved_op
)) {
1979 /* see rvt_qp_wqe_unreserve() */
1980 reserved_used
= atomic_read(&qp
->s_reserved_used
);
1981 if (reserved_used
>= rdi
->dparms
.reserved_operations
)
1985 /* non-reserved operations */
1986 if (likely(qp
->s_avail
))
1988 /* See rvt_qp_complete_swqe() */
1989 slast
= smp_load_acquire(&qp
->s_last
);
1990 if (qp
->s_head
>= slast
)
1991 avail
= qp
->s_size
- (qp
->s_head
- slast
);
1993 avail
= slast
- qp
->s_head
;
1995 reserved_used
= atomic_read(&qp
->s_reserved_used
);
1997 (rdi
->dparms
.reserved_operations
- reserved_used
);
1998 /* insure we don't assign a negative s_avail */
1999 if ((s32
)avail
<= 0)
2001 qp
->s_avail
= avail
;
2002 if (WARN_ON(qp
->s_avail
>
2003 (qp
->s_size
- 1 - rdi
->dparms
.reserved_operations
)))
2005 "More avail entries than QP RB size.\nQP: %u, size: %u, avail: %u\nhead: %u, tail: %u, cur: %u, acked: %u, last: %u",
2006 qp
->ibqp
.qp_num
, qp
->s_size
, qp
->s_avail
,
2007 qp
->s_head
, qp
->s_tail
, qp
->s_cur
,
2008 qp
->s_acked
, qp
->s_last
);
2013 * rvt_post_one_wr - post one RC, UC, or UD send work request
2014 * @qp: the QP to post on
2015 * @wr: the work request to send
2017 static int rvt_post_one_wr(struct rvt_qp
*qp
,
2018 const struct ib_send_wr
*wr
,
2021 struct rvt_swqe
*wqe
;
2026 struct rvt_lkey_table
*rkt
;
2028 struct rvt_dev_info
*rdi
= ib_to_rvt(qp
->ibqp
.device
);
2033 int local_ops_delayed
= 0;
2035 BUILD_BUG_ON(IB_QPT_MAX
>= (sizeof(u32
) * BITS_PER_BYTE
));
2037 /* IB spec says that num_sge == 0 is OK. */
2038 if (unlikely(wr
->num_sge
> qp
->s_max_sge
))
2041 ret
= rvt_qp_valid_operation(qp
, rdi
->post_parms
, wr
);
2047 * Local operations include fast register and local invalidate.
2048 * Fast register needs to be processed immediately because the
2049 * registered lkey may be used by following work requests and the
2050 * lkey needs to be valid at the time those requests are posted.
2051 * Local invalidate can be processed immediately if fencing is
2052 * not required and no previous local invalidate ops are pending.
2053 * Signaled local operations that have been processed immediately
2054 * need to have requests with "completion only" flags set posted
2055 * to the send queue in order to generate completions.
2057 if ((rdi
->post_parms
[wr
->opcode
].flags
& RVT_OPERATION_LOCAL
)) {
2058 switch (wr
->opcode
) {
2060 ret
= rvt_fast_reg_mr(qp
,
2063 reg_wr(wr
)->access
);
2064 if (ret
|| !(wr
->send_flags
& IB_SEND_SIGNALED
))
2067 case IB_WR_LOCAL_INV
:
2068 if ((wr
->send_flags
& IB_SEND_FENCE
) ||
2069 atomic_read(&qp
->local_ops_pending
)) {
2070 local_ops_delayed
= 1;
2072 ret
= rvt_invalidate_rkey(
2073 qp
, wr
->ex
.invalidate_rkey
);
2074 if (ret
|| !(wr
->send_flags
& IB_SEND_SIGNALED
))
2083 reserved_op
= rdi
->post_parms
[wr
->opcode
].flags
&
2084 RVT_OPERATION_USE_RESERVE
;
2085 /* check for avail */
2086 ret
= rvt_qp_is_avail(qp
, rdi
, reserved_op
);
2089 next
= qp
->s_head
+ 1;
2090 if (next
>= qp
->s_size
)
2093 rkt
= &rdi
->lkey_table
;
2094 pd
= ibpd_to_rvtpd(qp
->ibqp
.pd
);
2095 wqe
= rvt_get_swqe_ptr(qp
, qp
->s_head
);
2097 /* cplen has length from above */
2098 memcpy(&wqe
->wr
, wr
, cplen
);
2103 struct rvt_sge
*last_sge
= NULL
;
2105 acc
= wr
->opcode
>= IB_WR_RDMA_READ
?
2106 IB_ACCESS_LOCAL_WRITE
: 0;
2107 for (i
= 0; i
< wr
->num_sge
; i
++) {
2108 u32 length
= wr
->sg_list
[i
].length
;
2112 ret
= rvt_lkey_ok(rkt
, pd
, &wqe
->sg_list
[j
], last_sge
,
2113 &wr
->sg_list
[i
], acc
);
2114 if (unlikely(ret
< 0))
2115 goto bail_inval_free
;
2116 wqe
->length
+= length
;
2118 last_sge
= &wqe
->sg_list
[j
];
2121 wqe
->wr
.num_sge
= j
;
2125 * Calculate and set SWQE PSN values prior to handing it off
2126 * to the driver's check routine. This give the driver the
2127 * opportunity to adjust PSN values based on internal checks.
2129 log_pmtu
= qp
->log_pmtu
;
2130 if (qp
->allowed_ops
== IB_OPCODE_UD
) {
2131 struct rvt_ah
*ah
= rvt_get_swqe_ah(wqe
);
2133 log_pmtu
= ah
->log_pmtu
;
2134 rdma_copy_ah_attr(wqe
->ud_wr
.attr
, &ah
->attr
);
2137 if (rdi
->post_parms
[wr
->opcode
].flags
& RVT_OPERATION_LOCAL
) {
2138 if (local_ops_delayed
)
2139 atomic_inc(&qp
->local_ops_pending
);
2141 wqe
->wr
.send_flags
|= RVT_SEND_COMPLETION_ONLY
;
2146 wqe
->ssn
= qp
->s_ssn
++;
2147 wqe
->psn
= qp
->s_next_psn
;
2148 wqe
->lpsn
= wqe
->psn
+
2150 ((wqe
->length
- 1) >> log_pmtu
) :
2154 /* general part of wqe valid - allow for driver checks */
2155 if (rdi
->driver_f
.setup_wqe
) {
2156 ret
= rdi
->driver_f
.setup_wqe(qp
, wqe
, call_send
);
2158 goto bail_inval_free_ref
;
2161 if (!(rdi
->post_parms
[wr
->opcode
].flags
& RVT_OPERATION_LOCAL
))
2162 qp
->s_next_psn
= wqe
->lpsn
+ 1;
2164 if (unlikely(reserved_op
)) {
2165 wqe
->wr
.send_flags
|= RVT_SEND_RESERVE_USED
;
2166 rvt_qp_wqe_reserve(qp
, wqe
);
2168 wqe
->wr
.send_flags
&= ~RVT_SEND_RESERVE_USED
;
2171 trace_rvt_post_one_wr(qp
, wqe
, wr
->num_sge
);
2172 smp_wmb(); /* see request builders */
2177 bail_inval_free_ref
:
2178 if (qp
->allowed_ops
== IB_OPCODE_UD
)
2179 rdma_destroy_ah_attr(wqe
->ud_wr
.attr
);
2181 /* release mr holds */
2183 struct rvt_sge
*sge
= &wqe
->sg_list
[--j
];
2185 rvt_put_mr(sge
->mr
);
2191 * rvt_post_send - post a send on a QP
2192 * @ibqp: the QP to post the send on
2193 * @wr: the list of work requests to post
2194 * @bad_wr: the first bad WR is put here
2196 * This may be called from interrupt context.
2198 * Return: 0 on success else errno
2200 int rvt_post_send(struct ib_qp
*ibqp
, const struct ib_send_wr
*wr
,
2201 const struct ib_send_wr
**bad_wr
)
2203 struct rvt_qp
*qp
= ibqp_to_rvtqp(ibqp
);
2204 struct rvt_dev_info
*rdi
= ib_to_rvt(ibqp
->device
);
2205 unsigned long flags
= 0;
2210 spin_lock_irqsave(&qp
->s_hlock
, flags
);
2213 * Ensure QP state is such that we can send. If not bail out early,
2214 * there is no need to do this every time we post a send.
2216 if (unlikely(!(ib_rvt_state_ops
[qp
->state
] & RVT_POST_SEND_OK
))) {
2217 spin_unlock_irqrestore(&qp
->s_hlock
, flags
);
2222 * If the send queue is empty, and we only have a single WR then just go
2223 * ahead and kick the send engine into gear. Otherwise we will always
2224 * just schedule the send to happen later.
2226 call_send
= qp
->s_head
== READ_ONCE(qp
->s_last
) && !wr
->next
;
2228 for (; wr
; wr
= wr
->next
) {
2229 err
= rvt_post_one_wr(qp
, wr
, &call_send
);
2230 if (unlikely(err
)) {
2237 spin_unlock_irqrestore(&qp
->s_hlock
, flags
);
2240 * Only call do_send if there is exactly one packet, and the
2241 * driver said it was ok.
2243 if (nreq
== 1 && call_send
)
2244 rdi
->driver_f
.do_send(qp
);
2246 rdi
->driver_f
.schedule_send_no_lock(qp
);
2252 * rvt_post_srq_recv - post a receive on a shared receive queue
2253 * @ibsrq: the SRQ to post the receive on
2254 * @wr: the list of work requests to post
2255 * @bad_wr: A pointer to the first WR to cause a problem is put here
2257 * This may be called from interrupt context.
2259 * Return: 0 on success else errno
2261 int rvt_post_srq_recv(struct ib_srq
*ibsrq
, const struct ib_recv_wr
*wr
,
2262 const struct ib_recv_wr
**bad_wr
)
2264 struct rvt_srq
*srq
= ibsrq_to_rvtsrq(ibsrq
);
2265 struct rvt_krwq
*wq
;
2266 unsigned long flags
;
2268 for (; wr
; wr
= wr
->next
) {
2269 struct rvt_rwqe
*wqe
;
2273 if ((unsigned)wr
->num_sge
> srq
->rq
.max_sge
) {
2278 spin_lock_irqsave(&srq
->rq
.kwq
->p_lock
, flags
);
2280 next
= wq
->head
+ 1;
2281 if (next
>= srq
->rq
.size
)
2283 if (next
== READ_ONCE(wq
->tail
)) {
2284 spin_unlock_irqrestore(&srq
->rq
.kwq
->p_lock
, flags
);
2289 wqe
= rvt_get_rwqe_ptr(&srq
->rq
, wq
->head
);
2290 wqe
->wr_id
= wr
->wr_id
;
2291 wqe
->num_sge
= wr
->num_sge
;
2292 for (i
= 0; i
< wr
->num_sge
; i
++) {
2293 wqe
->sg_list
[i
].addr
= wr
->sg_list
[i
].addr
;
2294 wqe
->sg_list
[i
].length
= wr
->sg_list
[i
].length
;
2295 wqe
->sg_list
[i
].lkey
= wr
->sg_list
[i
].lkey
;
2297 /* Make sure queue entry is written before the head index. */
2298 smp_store_release(&wq
->head
, next
);
2299 spin_unlock_irqrestore(&srq
->rq
.kwq
->p_lock
, flags
);
2305 * rvt used the internal kernel struct as part of its ABI, for now make sure
2306 * the kernel struct does not change layout. FIXME: rvt should never cast the
2307 * user struct to a kernel struct.
2309 static struct ib_sge
*rvt_cast_sge(struct rvt_wqe_sge
*sge
)
2311 BUILD_BUG_ON(offsetof(struct ib_sge
, addr
) !=
2312 offsetof(struct rvt_wqe_sge
, addr
));
2313 BUILD_BUG_ON(offsetof(struct ib_sge
, length
) !=
2314 offsetof(struct rvt_wqe_sge
, length
));
2315 BUILD_BUG_ON(offsetof(struct ib_sge
, lkey
) !=
2316 offsetof(struct rvt_wqe_sge
, lkey
));
2317 return (struct ib_sge
*)sge
;
2321 * Validate a RWQE and fill in the SGE state.
2324 static int init_sge(struct rvt_qp
*qp
, struct rvt_rwqe
*wqe
)
2328 struct rvt_lkey_table
*rkt
;
2330 struct rvt_sge_state
*ss
;
2331 struct rvt_dev_info
*rdi
= ib_to_rvt(qp
->ibqp
.device
);
2333 rkt
= &rdi
->lkey_table
;
2334 pd
= ibpd_to_rvtpd(qp
->ibqp
.srq
? qp
->ibqp
.srq
->pd
: qp
->ibqp
.pd
);
2336 ss
->sg_list
= qp
->r_sg_list
;
2338 for (i
= j
= 0; i
< wqe
->num_sge
; i
++) {
2339 if (wqe
->sg_list
[i
].length
== 0)
2342 ret
= rvt_lkey_ok(rkt
, pd
, j
? &ss
->sg_list
[j
- 1] : &ss
->sge
,
2343 NULL
, rvt_cast_sge(&wqe
->sg_list
[i
]),
2344 IB_ACCESS_LOCAL_WRITE
);
2345 if (unlikely(ret
<= 0))
2347 qp
->r_len
+= wqe
->sg_list
[i
].length
;
2351 ss
->total_len
= qp
->r_len
;
2356 struct rvt_sge
*sge
= --j
? &ss
->sg_list
[j
- 1] : &ss
->sge
;
2358 rvt_put_mr(sge
->mr
);
2361 memset(&wc
, 0, sizeof(wc
));
2362 wc
.wr_id
= wqe
->wr_id
;
2363 wc
.status
= IB_WC_LOC_PROT_ERR
;
2364 wc
.opcode
= IB_WC_RECV
;
2366 /* Signal solicited completion event. */
2367 rvt_cq_enter(ibcq_to_rvtcq(qp
->ibqp
.recv_cq
), &wc
, 1);
2372 * get_rvt_head - get head indices of the circular buffer
2373 * @rq: data structure for request queue entry
2376 * Return - head index value
2378 static inline u32
get_rvt_head(struct rvt_rq
*rq
, void *ip
)
2383 head
= RDMA_READ_UAPI_ATOMIC(rq
->wq
->head
);
2385 head
= rq
->kwq
->head
;
2391 * rvt_get_rwqe - copy the next RWQE into the QP's RWQE
2393 * @wr_id_only: update qp->r_wr_id only, not qp->r_sge
2395 * Return -1 if there is a local error, 0 if no RWQE is available,
2396 * otherwise return 1.
2398 * Can be called from interrupt level.
2400 int rvt_get_rwqe(struct rvt_qp
*qp
, bool wr_id_only
)
2402 unsigned long flags
;
2404 struct rvt_krwq
*kwq
= NULL
;
2406 struct rvt_srq
*srq
;
2407 struct rvt_rwqe
*wqe
;
2408 void (*handler
)(struct ib_event
*, void *);
2415 srq
= ibsrq_to_rvtsrq(qp
->ibqp
.srq
);
2416 handler
= srq
->ibsrq
.event_handler
;
2426 spin_lock_irqsave(&rq
->kwq
->c_lock
, flags
);
2427 if (!(ib_rvt_state_ops
[qp
->state
] & RVT_PROCESS_RECV_OK
)) {
2434 tail
= RDMA_READ_UAPI_ATOMIC(wq
->tail
);
2439 /* Validate tail before using it since it is user writable. */
2440 if (tail
>= rq
->size
)
2443 if (kwq
->count
< RVT_RWQ_COUNT_THRESHOLD
) {
2444 head
= get_rvt_head(rq
, ip
);
2445 kwq
->count
= rvt_get_rq_count(rq
, head
, tail
);
2447 if (unlikely(kwq
->count
== 0)) {
2451 /* Make sure entry is read after the count is read. */
2453 wqe
= rvt_get_rwqe_ptr(rq
, tail
);
2455 * Even though we update the tail index in memory, the verbs
2456 * consumer is not supposed to post more entries until a
2457 * completion is generated.
2459 if (++tail
>= rq
->size
)
2462 RDMA_WRITE_UAPI_ATOMIC(wq
->tail
, tail
);
2465 if (!wr_id_only
&& !init_sge(qp
, wqe
)) {
2469 qp
->r_wr_id
= wqe
->wr_id
;
2473 set_bit(RVT_R_WRID_VALID
, &qp
->r_aflags
);
2476 * Validate head pointer value and compute
2477 * the number of remaining WQEs.
2479 if (kwq
->count
< srq
->limit
) {
2481 rvt_get_rq_count(rq
,
2482 get_rvt_head(rq
, ip
), tail
);
2483 if (kwq
->count
< srq
->limit
) {
2487 spin_unlock_irqrestore(&rq
->kwq
->c_lock
, flags
);
2488 ev
.device
= qp
->ibqp
.device
;
2489 ev
.element
.srq
= qp
->ibqp
.srq
;
2490 ev
.event
= IB_EVENT_SRQ_LIMIT_REACHED
;
2491 handler(&ev
, srq
->ibsrq
.srq_context
);
2497 spin_unlock_irqrestore(&rq
->kwq
->c_lock
, flags
);
2501 EXPORT_SYMBOL(rvt_get_rwqe
);
2504 * rvt_comm_est - handle trap with QP established
2507 void rvt_comm_est(struct rvt_qp
*qp
)
2509 qp
->r_flags
|= RVT_R_COMM_EST
;
2510 if (qp
->ibqp
.event_handler
) {
2513 ev
.device
= qp
->ibqp
.device
;
2514 ev
.element
.qp
= &qp
->ibqp
;
2515 ev
.event
= IB_EVENT_COMM_EST
;
2516 qp
->ibqp
.event_handler(&ev
, qp
->ibqp
.qp_context
);
2519 EXPORT_SYMBOL(rvt_comm_est
);
2521 void rvt_rc_error(struct rvt_qp
*qp
, enum ib_wc_status err
)
2523 unsigned long flags
;
2526 spin_lock_irqsave(&qp
->s_lock
, flags
);
2527 lastwqe
= rvt_error_qp(qp
, err
);
2528 spin_unlock_irqrestore(&qp
->s_lock
, flags
);
2533 ev
.device
= qp
->ibqp
.device
;
2534 ev
.element
.qp
= &qp
->ibqp
;
2535 ev
.event
= IB_EVENT_QP_LAST_WQE_REACHED
;
2536 qp
->ibqp
.event_handler(&ev
, qp
->ibqp
.qp_context
);
2539 EXPORT_SYMBOL(rvt_rc_error
);
2542 * rvt_rnr_tbl_to_usec - return index into ib_rvt_rnr_table
2543 * @index - the index
2544 * return usec from an index into ib_rvt_rnr_table
2546 unsigned long rvt_rnr_tbl_to_usec(u32 index
)
2548 return ib_rvt_rnr_table
[(index
& IB_AETH_CREDIT_MASK
)];
2550 EXPORT_SYMBOL(rvt_rnr_tbl_to_usec
);
2552 static inline unsigned long rvt_aeth_to_usec(u32 aeth
)
2554 return ib_rvt_rnr_table
[(aeth
>> IB_AETH_CREDIT_SHIFT
) &
2555 IB_AETH_CREDIT_MASK
];
2559 * rvt_add_retry_timer_ext - add/start a retry timer
2561 * @shift - timeout shift to wait for multiple packets
2562 * add a retry timer on the QP
2564 void rvt_add_retry_timer_ext(struct rvt_qp
*qp
, u8 shift
)
2566 struct ib_qp
*ibqp
= &qp
->ibqp
;
2567 struct rvt_dev_info
*rdi
= ib_to_rvt(ibqp
->device
);
2569 lockdep_assert_held(&qp
->s_lock
);
2570 qp
->s_flags
|= RVT_S_TIMER
;
2571 /* 4.096 usec. * (1 << qp->timeout) */
2572 qp
->s_timer
.expires
= jiffies
+ rdi
->busy_jiffies
+
2573 (qp
->timeout_jiffies
<< shift
);
2574 add_timer(&qp
->s_timer
);
2576 EXPORT_SYMBOL(rvt_add_retry_timer_ext
);
2579 * rvt_add_rnr_timer - add/start an rnr timer on the QP
2581 * @aeth: aeth of RNR timeout, simulated aeth for loopback
2583 void rvt_add_rnr_timer(struct rvt_qp
*qp
, u32 aeth
)
2587 lockdep_assert_held(&qp
->s_lock
);
2588 qp
->s_flags
|= RVT_S_WAIT_RNR
;
2589 to
= rvt_aeth_to_usec(aeth
);
2590 trace_rvt_rnrnak_add(qp
, to
);
2591 hrtimer_start(&qp
->s_rnr_timer
,
2592 ns_to_ktime(1000 * to
), HRTIMER_MODE_REL_PINNED
);
2594 EXPORT_SYMBOL(rvt_add_rnr_timer
);
2597 * rvt_stop_rc_timers - stop all timers
2599 * stop any pending timers
2601 void rvt_stop_rc_timers(struct rvt_qp
*qp
)
2603 lockdep_assert_held(&qp
->s_lock
);
2604 /* Remove QP from all timers */
2605 if (qp
->s_flags
& (RVT_S_TIMER
| RVT_S_WAIT_RNR
)) {
2606 qp
->s_flags
&= ~(RVT_S_TIMER
| RVT_S_WAIT_RNR
);
2607 del_timer(&qp
->s_timer
);
2608 hrtimer_try_to_cancel(&qp
->s_rnr_timer
);
2611 EXPORT_SYMBOL(rvt_stop_rc_timers
);
2614 * rvt_stop_rnr_timer - stop an rnr timer
2617 * stop an rnr timer and return if the timer
2620 static void rvt_stop_rnr_timer(struct rvt_qp
*qp
)
2622 lockdep_assert_held(&qp
->s_lock
);
2623 /* Remove QP from rnr timer */
2624 if (qp
->s_flags
& RVT_S_WAIT_RNR
) {
2625 qp
->s_flags
&= ~RVT_S_WAIT_RNR
;
2626 trace_rvt_rnrnak_stop(qp
, 0);
2631 * rvt_del_timers_sync - wait for any timeout routines to exit
2634 void rvt_del_timers_sync(struct rvt_qp
*qp
)
2636 del_timer_sync(&qp
->s_timer
);
2637 hrtimer_cancel(&qp
->s_rnr_timer
);
2639 EXPORT_SYMBOL(rvt_del_timers_sync
);
2642 * This is called from s_timer for missing responses.
2644 static void rvt_rc_timeout(struct timer_list
*t
)
2646 struct rvt_qp
*qp
= from_timer(qp
, t
, s_timer
);
2647 struct rvt_dev_info
*rdi
= ib_to_rvt(qp
->ibqp
.device
);
2648 unsigned long flags
;
2650 spin_lock_irqsave(&qp
->r_lock
, flags
);
2651 spin_lock(&qp
->s_lock
);
2652 if (qp
->s_flags
& RVT_S_TIMER
) {
2653 struct rvt_ibport
*rvp
= rdi
->ports
[qp
->port_num
- 1];
2655 qp
->s_flags
&= ~RVT_S_TIMER
;
2656 rvp
->n_rc_timeouts
++;
2657 del_timer(&qp
->s_timer
);
2658 trace_rvt_rc_timeout(qp
, qp
->s_last_psn
+ 1);
2659 if (rdi
->driver_f
.notify_restart_rc
)
2660 rdi
->driver_f
.notify_restart_rc(qp
,
2663 rdi
->driver_f
.schedule_send(qp
);
2665 spin_unlock(&qp
->s_lock
);
2666 spin_unlock_irqrestore(&qp
->r_lock
, flags
);
2670 * This is called from s_timer for RNR timeouts.
2672 enum hrtimer_restart
rvt_rc_rnr_retry(struct hrtimer
*t
)
2674 struct rvt_qp
*qp
= container_of(t
, struct rvt_qp
, s_rnr_timer
);
2675 struct rvt_dev_info
*rdi
= ib_to_rvt(qp
->ibqp
.device
);
2676 unsigned long flags
;
2678 spin_lock_irqsave(&qp
->s_lock
, flags
);
2679 rvt_stop_rnr_timer(qp
);
2680 trace_rvt_rnrnak_timeout(qp
, 0);
2681 rdi
->driver_f
.schedule_send(qp
);
2682 spin_unlock_irqrestore(&qp
->s_lock
, flags
);
2683 return HRTIMER_NORESTART
;
2685 EXPORT_SYMBOL(rvt_rc_rnr_retry
);
2688 * rvt_qp_iter_init - initial for QP iteration
2691 * @cb: user-defined callback
2693 * This returns an iterator suitable for iterating QPs
2696 * The @cb is a user-defined callback and @v is a 64-bit
2697 * value passed to and relevant for processing in the
2698 * @cb. An example use case would be to alter QP processing
2699 * based on criteria not part of the rvt_qp.
2701 * Use cases that require memory allocation to succeed
2702 * must preallocate appropriately.
2704 * Return: a pointer to an rvt_qp_iter or NULL
2706 struct rvt_qp_iter
*rvt_qp_iter_init(struct rvt_dev_info
*rdi
,
2708 void (*cb
)(struct rvt_qp
*qp
, u64 v
))
2710 struct rvt_qp_iter
*i
;
2712 i
= kzalloc(sizeof(*i
), GFP_KERNEL
);
2717 /* number of special QPs (SMI/GSI) for device */
2718 i
->specials
= rdi
->ibdev
.phys_port_cnt
* 2;
2724 EXPORT_SYMBOL(rvt_qp_iter_init
);
2727 * rvt_qp_iter_next - return the next QP in iter
2728 * @iter: the iterator
2730 * Fine grained QP iterator suitable for use
2731 * with debugfs seq_file mechanisms.
2733 * Updates iter->qp with the current QP when the return
2736 * Return: 0 - iter->qp is valid 1 - no more QPs
2738 int rvt_qp_iter_next(struct rvt_qp_iter
*iter
)
2743 struct rvt_qp
*pqp
= iter
->qp
;
2745 struct rvt_dev_info
*rdi
= iter
->rdi
;
2748 * The approach is to consider the special qps
2749 * as additional table entries before the
2750 * real hash table. Since the qp code sets
2751 * the qp->next hash link to NULL, this works just fine.
2753 * iter->specials is 2 * # ports
2755 * n = 0..iter->specials is the special qp indices
2757 * n = iter->specials..rdi->qp_dev->qp_table_size+iter->specials are
2758 * the potential hash bucket entries
2761 for (; n
< rdi
->qp_dev
->qp_table_size
+ iter
->specials
; n
++) {
2763 qp
= rcu_dereference(pqp
->next
);
2765 if (n
< iter
->specials
) {
2766 struct rvt_ibport
*rvp
;
2769 pidx
= n
% rdi
->ibdev
.phys_port_cnt
;
2770 rvp
= rdi
->ports
[pidx
];
2771 qp
= rcu_dereference(rvp
->qp
[n
& 1]);
2773 qp
= rcu_dereference(
2774 rdi
->qp_dev
->qp_table
[
2775 (n
- iter
->specials
)]);
2787 EXPORT_SYMBOL(rvt_qp_iter_next
);
2790 * rvt_qp_iter - iterate all QPs
2792 * @v: a 64-bit value
2795 * This provides a way for iterating all QPs.
2797 * The @cb is a user-defined callback and @v is a 64-bit
2798 * value passed to and relevant for processing in the
2799 * cb. An example use case would be to alter QP processing
2800 * based on criteria not part of the rvt_qp.
2802 * The code has an internal iterator to simplify
2803 * non seq_file use cases.
2805 void rvt_qp_iter(struct rvt_dev_info
*rdi
,
2807 void (*cb
)(struct rvt_qp
*qp
, u64 v
))
2810 struct rvt_qp_iter i
= {
2812 .specials
= rdi
->ibdev
.phys_port_cnt
* 2,
2819 ret
= rvt_qp_iter_next(&i
);
2830 EXPORT_SYMBOL(rvt_qp_iter
);
2833 * This should be called with s_lock held.
2835 void rvt_send_complete(struct rvt_qp
*qp
, struct rvt_swqe
*wqe
,
2836 enum ib_wc_status status
)
2839 struct rvt_dev_info
*rdi
;
2841 if (!(ib_rvt_state_ops
[qp
->state
] & RVT_PROCESS_OR_FLUSH_SEND
))
2843 rdi
= ib_to_rvt(qp
->ibqp
.device
);
2845 old_last
= qp
->s_last
;
2846 trace_rvt_qp_send_completion(qp
, wqe
, old_last
);
2847 last
= rvt_qp_complete_swqe(qp
, wqe
, rdi
->wc_opcode
[wqe
->wr
.opcode
],
2849 if (qp
->s_acked
== old_last
)
2851 if (qp
->s_cur
== old_last
)
2853 if (qp
->s_tail
== old_last
)
2855 if (qp
->state
== IB_QPS_SQD
&& last
== qp
->s_cur
)
2858 EXPORT_SYMBOL(rvt_send_complete
);
2861 * rvt_copy_sge - copy data to SGE memory
2862 * @qp: associated QP
2863 * @ss: the SGE state
2864 * @data: the data to copy
2865 * @length: the length of the data
2866 * @release: boolean to release MR
2867 * @copy_last: do a separate copy of the last 8 bytes
2869 void rvt_copy_sge(struct rvt_qp
*qp
, struct rvt_sge_state
*ss
,
2870 void *data
, u32 length
,
2871 bool release
, bool copy_last
)
2873 struct rvt_sge
*sge
= &ss
->sge
;
2875 bool in_last
= false;
2876 bool cacheless_copy
= false;
2877 struct rvt_dev_info
*rdi
= ib_to_rvt(qp
->ibqp
.device
);
2878 struct rvt_wss
*wss
= rdi
->wss
;
2879 unsigned int sge_copy_mode
= rdi
->dparms
.sge_copy_mode
;
2881 if (sge_copy_mode
== RVT_SGE_COPY_CACHELESS
) {
2882 cacheless_copy
= length
>= PAGE_SIZE
;
2883 } else if (sge_copy_mode
== RVT_SGE_COPY_ADAPTIVE
) {
2884 if (length
>= PAGE_SIZE
) {
2886 * NOTE: this *assumes*:
2887 * o The first vaddr is the dest.
2888 * o If multiple pages, then vaddr is sequential.
2890 wss_insert(wss
, sge
->vaddr
);
2891 if (length
>= (2 * PAGE_SIZE
))
2892 wss_insert(wss
, (sge
->vaddr
+ PAGE_SIZE
));
2894 cacheless_copy
= wss_exceeds_threshold(wss
);
2896 wss_advance_clean_counter(wss
);
2911 u32 len
= rvt_get_sge_length(sge
, length
);
2913 WARN_ON_ONCE(len
== 0);
2914 if (unlikely(in_last
)) {
2915 /* enforce byte transfer ordering */
2916 for (i
= 0; i
< len
; i
++)
2917 ((u8
*)sge
->vaddr
)[i
] = ((u8
*)data
)[i
];
2918 } else if (cacheless_copy
) {
2919 cacheless_memcpy(sge
->vaddr
, data
, len
);
2921 memcpy(sge
->vaddr
, data
, len
);
2923 rvt_update_sge(ss
, len
, release
);
2935 EXPORT_SYMBOL(rvt_copy_sge
);
2937 static enum ib_wc_status
loopback_qp_drop(struct rvt_ibport
*rvp
,
2942 * For RC, the requester would timeout and retry so
2943 * shortcut the timeouts and just signal too many retries.
2945 return sqp
->ibqp
.qp_type
== IB_QPT_RC
?
2946 IB_WC_RETRY_EXC_ERR
: IB_WC_SUCCESS
;
2950 * rvt_ruc_loopback - handle UC and RC loopback requests
2951 * @sqp: the sending QP
2953 * This is called from rvt_do_send() to forward a WQE addressed to the same HFI
2954 * Note that although we are single threaded due to the send engine, we still
2955 * have to protect against post_send(). We don't have to worry about
2956 * receive interrupts since this is a connected protocol and all packets
2957 * will pass through here.
2959 void rvt_ruc_loopback(struct rvt_qp
*sqp
)
2961 struct rvt_ibport
*rvp
= NULL
;
2962 struct rvt_dev_info
*rdi
= ib_to_rvt(sqp
->ibqp
.device
);
2964 struct rvt_swqe
*wqe
;
2965 struct rvt_sge
*sge
;
2966 unsigned long flags
;
2970 enum ib_wc_status send_status
;
2973 bool copy_last
= false;
2977 rvp
= rdi
->ports
[sqp
->port_num
- 1];
2980 * Note that we check the responder QP state after
2981 * checking the requester's state.
2984 qp
= rvt_lookup_qpn(ib_to_rvt(sqp
->ibqp
.device
), rvp
,
2987 spin_lock_irqsave(&sqp
->s_lock
, flags
);
2989 /* Return if we are already busy processing a work request. */
2990 if ((sqp
->s_flags
& (RVT_S_BUSY
| RVT_S_ANY_WAIT
)) ||
2991 !(ib_rvt_state_ops
[sqp
->state
] & RVT_PROCESS_OR_FLUSH_SEND
))
2994 sqp
->s_flags
|= RVT_S_BUSY
;
2997 if (sqp
->s_last
== READ_ONCE(sqp
->s_head
))
2999 wqe
= rvt_get_swqe_ptr(sqp
, sqp
->s_last
);
3001 /* Return if it is not OK to start a new work request. */
3002 if (!(ib_rvt_state_ops
[sqp
->state
] & RVT_PROCESS_NEXT_SEND_OK
)) {
3003 if (!(ib_rvt_state_ops
[sqp
->state
] & RVT_FLUSH_SEND
))
3005 /* We are in the error state, flush the work request. */
3006 send_status
= IB_WC_WR_FLUSH_ERR
;
3011 * We can rely on the entry not changing without the s_lock
3012 * being held until we update s_last.
3013 * We increment s_cur to indicate s_last is in progress.
3015 if (sqp
->s_last
== sqp
->s_cur
) {
3016 if (++sqp
->s_cur
>= sqp
->s_size
)
3019 spin_unlock_irqrestore(&sqp
->s_lock
, flags
);
3022 send_status
= loopback_qp_drop(rvp
, sqp
);
3023 goto serr_no_r_lock
;
3025 spin_lock_irqsave(&qp
->r_lock
, flags
);
3026 if (!(ib_rvt_state_ops
[qp
->state
] & RVT_PROCESS_RECV_OK
) ||
3027 qp
->ibqp
.qp_type
!= sqp
->ibqp
.qp_type
) {
3028 send_status
= loopback_qp_drop(rvp
, sqp
);
3032 memset(&wc
, 0, sizeof(wc
));
3033 send_status
= IB_WC_SUCCESS
;
3036 sqp
->s_sge
.sge
= wqe
->sg_list
[0];
3037 sqp
->s_sge
.sg_list
= wqe
->sg_list
+ 1;
3038 sqp
->s_sge
.num_sge
= wqe
->wr
.num_sge
;
3039 sqp
->s_len
= wqe
->length
;
3040 switch (wqe
->wr
.opcode
) {
3044 case IB_WR_LOCAL_INV
:
3045 if (!(wqe
->wr
.send_flags
& RVT_SEND_COMPLETION_ONLY
)) {
3046 if (rvt_invalidate_rkey(sqp
,
3047 wqe
->wr
.ex
.invalidate_rkey
))
3048 send_status
= IB_WC_LOC_PROT_ERR
;
3053 case IB_WR_SEND_WITH_INV
:
3054 case IB_WR_SEND_WITH_IMM
:
3056 ret
= rvt_get_rwqe(qp
, false);
3061 if (wqe
->length
> qp
->r_len
)
3063 switch (wqe
->wr
.opcode
) {
3064 case IB_WR_SEND_WITH_INV
:
3065 if (!rvt_invalidate_rkey(qp
,
3066 wqe
->wr
.ex
.invalidate_rkey
)) {
3067 wc
.wc_flags
= IB_WC_WITH_INVALIDATE
;
3068 wc
.ex
.invalidate_rkey
=
3069 wqe
->wr
.ex
.invalidate_rkey
;
3072 case IB_WR_SEND_WITH_IMM
:
3073 wc
.wc_flags
= IB_WC_WITH_IMM
;
3074 wc
.ex
.imm_data
= wqe
->wr
.ex
.imm_data
;
3081 case IB_WR_RDMA_WRITE_WITH_IMM
:
3082 if (unlikely(!(qp
->qp_access_flags
& IB_ACCESS_REMOTE_WRITE
)))
3084 wc
.wc_flags
= IB_WC_WITH_IMM
;
3085 wc
.ex
.imm_data
= wqe
->wr
.ex
.imm_data
;
3086 ret
= rvt_get_rwqe(qp
, true);
3091 /* skip copy_last set and qp_access_flags recheck */
3093 case IB_WR_RDMA_WRITE
:
3094 copy_last
= rvt_is_user_qp(qp
);
3095 if (unlikely(!(qp
->qp_access_flags
& IB_ACCESS_REMOTE_WRITE
)))
3098 if (wqe
->length
== 0)
3100 if (unlikely(!rvt_rkey_ok(qp
, &qp
->r_sge
.sge
, wqe
->length
,
3101 wqe
->rdma_wr
.remote_addr
,
3103 IB_ACCESS_REMOTE_WRITE
)))
3105 qp
->r_sge
.sg_list
= NULL
;
3106 qp
->r_sge
.num_sge
= 1;
3107 qp
->r_sge
.total_len
= wqe
->length
;
3110 case IB_WR_RDMA_READ
:
3111 if (unlikely(!(qp
->qp_access_flags
& IB_ACCESS_REMOTE_READ
)))
3113 if (unlikely(!rvt_rkey_ok(qp
, &sqp
->s_sge
.sge
, wqe
->length
,
3114 wqe
->rdma_wr
.remote_addr
,
3116 IB_ACCESS_REMOTE_READ
)))
3119 sqp
->s_sge
.sg_list
= NULL
;
3120 sqp
->s_sge
.num_sge
= 1;
3121 qp
->r_sge
.sge
= wqe
->sg_list
[0];
3122 qp
->r_sge
.sg_list
= wqe
->sg_list
+ 1;
3123 qp
->r_sge
.num_sge
= wqe
->wr
.num_sge
;
3124 qp
->r_sge
.total_len
= wqe
->length
;
3127 case IB_WR_ATOMIC_CMP_AND_SWP
:
3128 case IB_WR_ATOMIC_FETCH_AND_ADD
:
3129 if (unlikely(!(qp
->qp_access_flags
& IB_ACCESS_REMOTE_ATOMIC
)))
3131 if (unlikely(!rvt_rkey_ok(qp
, &qp
->r_sge
.sge
, sizeof(u64
),
3132 wqe
->atomic_wr
.remote_addr
,
3133 wqe
->atomic_wr
.rkey
,
3134 IB_ACCESS_REMOTE_ATOMIC
)))
3136 /* Perform atomic OP and save result. */
3137 maddr
= (atomic64_t
*)qp
->r_sge
.sge
.vaddr
;
3138 sdata
= wqe
->atomic_wr
.compare_add
;
3139 *(u64
*)sqp
->s_sge
.sge
.vaddr
=
3140 (wqe
->wr
.opcode
== IB_WR_ATOMIC_FETCH_AND_ADD
) ?
3141 (u64
)atomic64_add_return(sdata
, maddr
) - sdata
:
3142 (u64
)cmpxchg((u64
*)qp
->r_sge
.sge
.vaddr
,
3143 sdata
, wqe
->atomic_wr
.swap
);
3144 rvt_put_mr(qp
->r_sge
.sge
.mr
);
3145 qp
->r_sge
.num_sge
= 0;
3149 send_status
= IB_WC_LOC_QP_OP_ERR
;
3153 sge
= &sqp
->s_sge
.sge
;
3154 while (sqp
->s_len
) {
3155 u32 len
= rvt_get_sge_length(sge
, sqp
->s_len
);
3157 WARN_ON_ONCE(len
== 0);
3158 rvt_copy_sge(qp
, &qp
->r_sge
, sge
->vaddr
,
3159 len
, release
, copy_last
);
3160 rvt_update_sge(&sqp
->s_sge
, len
, !release
);
3164 rvt_put_ss(&qp
->r_sge
);
3166 if (!test_and_clear_bit(RVT_R_WRID_VALID
, &qp
->r_aflags
))
3169 if (wqe
->wr
.opcode
== IB_WR_RDMA_WRITE_WITH_IMM
)
3170 wc
.opcode
= IB_WC_RECV_RDMA_WITH_IMM
;
3172 wc
.opcode
= IB_WC_RECV
;
3173 wc
.wr_id
= qp
->r_wr_id
;
3174 wc
.status
= IB_WC_SUCCESS
;
3175 wc
.byte_len
= wqe
->length
;
3177 wc
.src_qp
= qp
->remote_qpn
;
3178 wc
.slid
= rdma_ah_get_dlid(&qp
->remote_ah_attr
) & U16_MAX
;
3179 wc
.sl
= rdma_ah_get_sl(&qp
->remote_ah_attr
);
3181 /* Signal completion event if the solicited bit is set. */
3182 rvt_recv_cq(qp
, &wc
, wqe
->wr
.send_flags
& IB_SEND_SOLICITED
);
3185 spin_unlock_irqrestore(&qp
->r_lock
, flags
);
3186 spin_lock_irqsave(&sqp
->s_lock
, flags
);
3189 sqp
->s_rnr_retry
= sqp
->s_rnr_retry_cnt
;
3190 rvt_send_complete(sqp
, wqe
, send_status
);
3192 atomic_dec(&sqp
->local_ops_pending
);
3198 /* Handle RNR NAK */
3199 if (qp
->ibqp
.qp_type
== IB_QPT_UC
)
3203 * Note: we don't need the s_lock held since the BUSY flag
3204 * makes this single threaded.
3206 if (sqp
->s_rnr_retry
== 0) {
3207 send_status
= IB_WC_RNR_RETRY_EXC_ERR
;
3210 if (sqp
->s_rnr_retry_cnt
< 7)
3212 spin_unlock_irqrestore(&qp
->r_lock
, flags
);
3213 spin_lock_irqsave(&sqp
->s_lock
, flags
);
3214 if (!(ib_rvt_state_ops
[sqp
->state
] & RVT_PROCESS_RECV_OK
))
3216 rvt_add_rnr_timer(sqp
, qp
->r_min_rnr_timer
<<
3217 IB_AETH_CREDIT_SHIFT
);
3221 send_status
= IB_WC_REM_OP_ERR
;
3222 wc
.status
= IB_WC_LOC_QP_OP_ERR
;
3227 sqp
->ibqp
.qp_type
== IB_QPT_RC
?
3228 IB_WC_REM_INV_REQ_ERR
:
3230 wc
.status
= IB_WC_LOC_QP_OP_ERR
;
3234 send_status
= IB_WC_REM_ACCESS_ERR
;
3235 wc
.status
= IB_WC_LOC_PROT_ERR
;
3237 /* responder goes to error state */
3238 rvt_rc_error(qp
, wc
.status
);
3241 spin_unlock_irqrestore(&qp
->r_lock
, flags
);
3243 spin_lock_irqsave(&sqp
->s_lock
, flags
);
3244 rvt_send_complete(sqp
, wqe
, send_status
);
3245 if (sqp
->ibqp
.qp_type
== IB_QPT_RC
) {
3246 int lastwqe
= rvt_error_qp(sqp
, IB_WC_WR_FLUSH_ERR
);
3248 sqp
->s_flags
&= ~RVT_S_BUSY
;
3249 spin_unlock_irqrestore(&sqp
->s_lock
, flags
);
3253 ev
.device
= sqp
->ibqp
.device
;
3254 ev
.element
.qp
= &sqp
->ibqp
;
3255 ev
.event
= IB_EVENT_QP_LAST_WQE_REACHED
;
3256 sqp
->ibqp
.event_handler(&ev
, sqp
->ibqp
.qp_context
);
3261 sqp
->s_flags
&= ~RVT_S_BUSY
;
3263 spin_unlock_irqrestore(&sqp
->s_lock
, flags
);
3267 EXPORT_SYMBOL(rvt_ruc_loopback
);