2 * Copyright (c) 2012 - 2019 Intel Corporation. All rights reserved.
3 * Copyright (c) 2006 - 2012 QLogic Corporation. * All rights reserved.
4 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #include <linux/err.h>
36 #include <linux/vmalloc.h>
37 #include <rdma/rdma_vt.h>
38 #ifdef CONFIG_DEBUG_FS
39 #include <linux/seq_file.h>
44 static inline unsigned mk_qpn(struct rvt_qpn_table
*qpt
,
45 struct rvt_qpn_map
*map
, unsigned off
)
47 return (map
- qpt
->map
) * RVT_BITS_PER_PAGE
+ off
;
50 static inline unsigned find_next_offset(struct rvt_qpn_table
*qpt
,
51 struct rvt_qpn_map
*map
, unsigned off
,
52 unsigned n
, u16 qpt_mask
)
56 if (((off
& qpt_mask
) >> 1) >= n
)
57 off
= (off
| qpt_mask
) + 2;
59 off
= find_next_zero_bit(map
->page
, RVT_BITS_PER_PAGE
, off
);
64 const struct rvt_operation_params qib_post_parms
[RVT_OPERATION_MAX
] = {
65 [IB_WR_RDMA_WRITE
] = {
66 .length
= sizeof(struct ib_rdma_wr
),
67 .qpt_support
= BIT(IB_QPT_UC
) | BIT(IB_QPT_RC
),
71 .length
= sizeof(struct ib_rdma_wr
),
72 .qpt_support
= BIT(IB_QPT_RC
),
73 .flags
= RVT_OPERATION_ATOMIC
,
76 [IB_WR_ATOMIC_CMP_AND_SWP
] = {
77 .length
= sizeof(struct ib_atomic_wr
),
78 .qpt_support
= BIT(IB_QPT_RC
),
79 .flags
= RVT_OPERATION_ATOMIC
| RVT_OPERATION_ATOMIC_SGE
,
82 [IB_WR_ATOMIC_FETCH_AND_ADD
] = {
83 .length
= sizeof(struct ib_atomic_wr
),
84 .qpt_support
= BIT(IB_QPT_RC
),
85 .flags
= RVT_OPERATION_ATOMIC
| RVT_OPERATION_ATOMIC_SGE
,
88 [IB_WR_RDMA_WRITE_WITH_IMM
] = {
89 .length
= sizeof(struct ib_rdma_wr
),
90 .qpt_support
= BIT(IB_QPT_UC
) | BIT(IB_QPT_RC
),
94 .length
= sizeof(struct ib_send_wr
),
95 .qpt_support
= BIT(IB_QPT_UD
) | BIT(IB_QPT_SMI
) | BIT(IB_QPT_GSI
) |
96 BIT(IB_QPT_UC
) | BIT(IB_QPT_RC
),
99 [IB_WR_SEND_WITH_IMM
] = {
100 .length
= sizeof(struct ib_send_wr
),
101 .qpt_support
= BIT(IB_QPT_UD
) | BIT(IB_QPT_SMI
) | BIT(IB_QPT_GSI
) |
102 BIT(IB_QPT_UC
) | BIT(IB_QPT_RC
),
107 static void get_map_page(struct rvt_qpn_table
*qpt
, struct rvt_qpn_map
*map
)
109 unsigned long page
= get_zeroed_page(GFP_KERNEL
);
112 * Free the page if someone raced with us installing it.
115 spin_lock(&qpt
->lock
);
119 map
->page
= (void *)page
;
120 spin_unlock(&qpt
->lock
);
124 * Allocate the next available QPN or
125 * zero/one for QP type IB_QPT_SMI/IB_QPT_GSI.
127 int qib_alloc_qpn(struct rvt_dev_info
*rdi
, struct rvt_qpn_table
*qpt
,
128 enum ib_qp_type type
, u8 port
)
130 u32 i
, offset
, max_scan
, qpn
;
131 struct rvt_qpn_map
*map
;
133 struct qib_ibdev
*verbs_dev
= container_of(rdi
, struct qib_ibdev
, rdi
);
134 struct qib_devdata
*dd
= container_of(verbs_dev
, struct qib_devdata
,
136 u16 qpt_mask
= dd
->qpn_mask
;
138 if (type
== IB_QPT_SMI
|| type
== IB_QPT_GSI
) {
141 ret
= type
== IB_QPT_GSI
;
142 n
= 1 << (ret
+ 2 * (port
- 1));
143 spin_lock(&qpt
->lock
);
148 spin_unlock(&qpt
->lock
);
153 if (qpn
>= RVT_QPN_MAX
)
155 if (qpt_mask
&& ((qpn
& qpt_mask
) >> 1) >= dd
->n_krcv_queues
)
156 qpn
= (qpn
| qpt_mask
) + 2;
157 offset
= qpn
& RVT_BITS_PER_PAGE_MASK
;
158 map
= &qpt
->map
[qpn
/ RVT_BITS_PER_PAGE
];
159 max_scan
= qpt
->nmaps
- !offset
;
161 if (unlikely(!map
->page
)) {
162 get_map_page(qpt
, map
);
163 if (unlikely(!map
->page
))
167 if (!test_and_set_bit(offset
, map
->page
)) {
172 offset
= find_next_offset(qpt
, map
, offset
,
173 dd
->n_krcv_queues
, qpt_mask
);
174 qpn
= mk_qpn(qpt
, map
, offset
);
176 * This test differs from alloc_pidmap().
177 * If find_next_offset() does find a zero
178 * bit, we don't need to check for QPN
179 * wrapping around past our starting QPN.
180 * We just need to be sure we don't loop
183 } while (offset
< RVT_BITS_PER_PAGE
&& qpn
< RVT_QPN_MAX
);
185 * In order to keep the number of pages allocated to a
186 * minimum, we scan the all existing pages before increasing
187 * the size of the bitmap table.
189 if (++i
> max_scan
) {
190 if (qpt
->nmaps
== RVT_QPNMAP_ENTRIES
)
192 map
= &qpt
->map
[qpt
->nmaps
++];
194 } else if (map
< &qpt
->map
[qpt
->nmaps
]) {
201 qpn
= mk_qpn(qpt
, map
, offset
);
211 * qib_free_all_qps - check for QPs still in use
213 unsigned qib_free_all_qps(struct rvt_dev_info
*rdi
)
215 struct qib_ibdev
*verbs_dev
= container_of(rdi
, struct qib_ibdev
, rdi
);
216 struct qib_devdata
*dd
= container_of(verbs_dev
, struct qib_devdata
,
218 unsigned n
, qp_inuse
= 0;
220 for (n
= 0; n
< dd
->num_pports
; n
++) {
221 struct qib_ibport
*ibp
= &dd
->pport
[n
].ibport_data
;
224 if (rcu_dereference(ibp
->rvp
.qp
[0]))
226 if (rcu_dereference(ibp
->rvp
.qp
[1]))
233 void qib_notify_qp_reset(struct rvt_qp
*qp
)
235 struct qib_qp_priv
*priv
= qp
->priv
;
237 atomic_set(&priv
->s_dma_busy
, 0);
240 void qib_notify_error_qp(struct rvt_qp
*qp
)
242 struct qib_qp_priv
*priv
= qp
->priv
;
243 struct qib_ibdev
*dev
= to_idev(qp
->ibqp
.device
);
245 spin_lock(&dev
->rdi
.pending_lock
);
246 if (!list_empty(&priv
->iowait
) && !(qp
->s_flags
& RVT_S_BUSY
)) {
247 qp
->s_flags
&= ~RVT_S_ANY_WAIT_IO
;
248 list_del_init(&priv
->iowait
);
250 spin_unlock(&dev
->rdi
.pending_lock
);
252 if (!(qp
->s_flags
& RVT_S_BUSY
)) {
255 rvt_put_mr(qp
->s_rdma_mr
);
256 qp
->s_rdma_mr
= NULL
;
259 qib_put_txreq(priv
->s_tx
);
265 static int mtu_to_enum(u32 mtu
)
271 enum_mtu
= IB_MTU_4096
;
274 enum_mtu
= IB_MTU_2048
;
277 enum_mtu
= IB_MTU_1024
;
280 enum_mtu
= IB_MTU_512
;
283 enum_mtu
= IB_MTU_256
;
286 enum_mtu
= IB_MTU_2048
;
291 int qib_get_pmtu_from_attr(struct rvt_dev_info
*rdi
, struct rvt_qp
*qp
,
292 struct ib_qp_attr
*attr
)
294 int mtu
, pmtu
, pidx
= qp
->port_num
- 1;
295 struct qib_ibdev
*verbs_dev
= container_of(rdi
, struct qib_ibdev
, rdi
);
296 struct qib_devdata
*dd
= container_of(verbs_dev
, struct qib_devdata
,
298 mtu
= ib_mtu_enum_to_int(attr
->path_mtu
);
302 if (mtu
> dd
->pport
[pidx
].ibmtu
)
303 pmtu
= mtu_to_enum(dd
->pport
[pidx
].ibmtu
);
305 pmtu
= attr
->path_mtu
;
309 int qib_mtu_to_path_mtu(u32 mtu
)
311 return mtu_to_enum(mtu
);
314 u32
qib_mtu_from_qp(struct rvt_dev_info
*rdi
, struct rvt_qp
*qp
, u32 pmtu
)
316 return ib_mtu_enum_to_int(pmtu
);
319 void *qib_qp_priv_alloc(struct rvt_dev_info
*rdi
, struct rvt_qp
*qp
)
321 struct qib_qp_priv
*priv
;
323 priv
= kzalloc(sizeof(*priv
), GFP_KERNEL
);
325 return ERR_PTR(-ENOMEM
);
328 priv
->s_hdr
= kzalloc(sizeof(*priv
->s_hdr
), GFP_KERNEL
);
331 return ERR_PTR(-ENOMEM
);
333 init_waitqueue_head(&priv
->wait_dma
);
334 INIT_WORK(&priv
->s_work
, _qib_do_send
);
335 INIT_LIST_HEAD(&priv
->iowait
);
340 void qib_qp_priv_free(struct rvt_dev_info
*rdi
, struct rvt_qp
*qp
)
342 struct qib_qp_priv
*priv
= qp
->priv
;
348 void qib_stop_send_queue(struct rvt_qp
*qp
)
350 struct qib_qp_priv
*priv
= qp
->priv
;
352 cancel_work_sync(&priv
->s_work
);
355 void qib_quiesce_qp(struct rvt_qp
*qp
)
357 struct qib_qp_priv
*priv
= qp
->priv
;
359 wait_event(priv
->wait_dma
, !atomic_read(&priv
->s_dma_busy
));
361 qib_put_txreq(priv
->s_tx
);
366 void qib_flush_qp_waiters(struct rvt_qp
*qp
)
368 struct qib_qp_priv
*priv
= qp
->priv
;
369 struct qib_ibdev
*dev
= to_idev(qp
->ibqp
.device
);
371 spin_lock(&dev
->rdi
.pending_lock
);
372 if (!list_empty(&priv
->iowait
))
373 list_del_init(&priv
->iowait
);
374 spin_unlock(&dev
->rdi
.pending_lock
);
378 * qib_check_send_wqe - validate wr/wqe
380 * @wqe - The built wqe
381 * @call_send - Determine if the send should be posted or scheduled
383 * Returns 0 on success, -EINVAL on failure
385 int qib_check_send_wqe(struct rvt_qp
*qp
,
386 struct rvt_swqe
*wqe
, bool *call_send
)
390 switch (qp
->ibqp
.qp_type
) {
393 if (wqe
->length
> 0x80000000U
)
395 if (wqe
->length
> qp
->pmtu
)
401 ah
= rvt_get_swqe_ah(wqe
);
402 if (wqe
->length
> (1 << ah
->log_pmtu
))
413 #ifdef CONFIG_DEBUG_FS
415 static const char * const qp_type_str
[] = {
416 "SMI", "GSI", "RC", "UC", "UD",
420 * qib_qp_iter_print - print information to seq_file
422 * @iter - the iterator
424 void qib_qp_iter_print(struct seq_file
*s
, struct rvt_qp_iter
*iter
)
426 struct rvt_swqe
*wqe
;
427 struct rvt_qp
*qp
= iter
->qp
;
428 struct qib_qp_priv
*priv
= qp
->priv
;
430 wqe
= rvt_get_swqe_ptr(qp
, qp
->s_last
);
432 "N %d QP%u %s %u %u %u f=%x %u %u %u %u %u PSN %x %x %x %x %x (%u %u %u %u %u %u) QP%u LID %x\n",
435 qp_type_str
[qp
->ibqp
.qp_type
],
440 atomic_read(&priv
->s_dma_busy
),
441 !list_empty(&priv
->iowait
),
446 qp
->s_psn
, qp
->s_next_psn
,
447 qp
->s_sending_psn
, qp
->s_sending_hpsn
,
448 qp
->s_last
, qp
->s_acked
, qp
->s_cur
,
449 qp
->s_tail
, qp
->s_head
, qp
->s_size
,
451 rdma_ah_get_dlid(&qp
->remote_ah_attr
));