2 * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/bug.h>
34 #include <linux/errno.h>
35 #include <linux/module.h>
36 #include <linux/spinlock.h>
38 #include "usnic_log.h"
39 #include "usnic_vnic.h"
40 #include "usnic_fwd.h"
41 #include "usnic_uiom.h"
42 #include "usnic_debugfs.h"
43 #include "usnic_ib_qp_grp.h"
44 #include "usnic_ib_sysfs.h"
45 #include "usnic_transport.h"
49 const char *usnic_ib_qp_grp_state_to_string(enum ib_qp_state state
)
67 return "UNKNOWN STATE";
72 int usnic_ib_qp_grp_dump_hdr(char *buf
, int buf_sz
)
74 return scnprintf(buf
, buf_sz
, "|QPN\t|State\t|PID\t|VF Idx\t|Fil ID");
77 int usnic_ib_qp_grp_dump_rows(void *obj
, char *buf
, int buf_sz
)
79 struct usnic_ib_qp_grp
*qp_grp
= obj
;
80 struct usnic_ib_qp_grp_flow
*default_flow
;
82 default_flow
= list_first_entry(&qp_grp
->flows_lst
,
83 struct usnic_ib_qp_grp_flow
, link
);
84 return scnprintf(buf
, buf_sz
, "|%d\t|%s\t|%d\t|%hu\t|%d",
86 usnic_ib_qp_grp_state_to_string(
89 usnic_vnic_get_index(qp_grp
->vf
->vnic
),
90 default_flow
->flow
->flow_id
);
92 return scnprintf(buf
, buf_sz
, "|N/A\t|N/A\t|N/A\t|N/A\t|N/A");
96 static struct usnic_vnic_res_chunk
*
97 get_qp_res_chunk(struct usnic_ib_qp_grp
*qp_grp
)
99 lockdep_assert_held(&qp_grp
->lock
);
101 * The QP res chunk, used to derive qp indices,
102 * are just indices of the RQs
104 return usnic_ib_qp_grp_get_chunk(qp_grp
, USNIC_VNIC_RES_TYPE_RQ
);
107 static int enable_qp_grp(struct usnic_ib_qp_grp
*qp_grp
)
112 struct usnic_vnic_res_chunk
*res_chunk
;
113 struct usnic_vnic_res
*res
;
115 lockdep_assert_held(&qp_grp
->lock
);
117 vnic_idx
= usnic_vnic_get_index(qp_grp
->vf
->vnic
);
119 res_chunk
= get_qp_res_chunk(qp_grp
);
120 if (IS_ERR(res_chunk
)) {
121 usnic_err("Unable to get qp res with err %ld\n",
123 return PTR_ERR(res_chunk
);
126 for (i
= 0; i
< res_chunk
->cnt
; i
++) {
127 res
= res_chunk
->res
[i
];
128 status
= usnic_fwd_enable_qp(qp_grp
->ufdev
, vnic_idx
,
131 usnic_err("Failed to enable qp %d of %s:%d\n with err %d\n",
132 res
->vnic_idx
, qp_grp
->ufdev
->name
,
141 for (i
--; i
>= 0; i
--) {
142 res
= res_chunk
->res
[i
];
143 usnic_fwd_disable_qp(qp_grp
->ufdev
, vnic_idx
,
150 static int disable_qp_grp(struct usnic_ib_qp_grp
*qp_grp
)
153 struct usnic_vnic_res_chunk
*res_chunk
;
154 struct usnic_vnic_res
*res
;
157 lockdep_assert_held(&qp_grp
->lock
);
158 vnic_idx
= usnic_vnic_get_index(qp_grp
->vf
->vnic
);
160 res_chunk
= get_qp_res_chunk(qp_grp
);
161 if (IS_ERR(res_chunk
)) {
162 usnic_err("Unable to get qp res with err %ld\n",
164 return PTR_ERR(res_chunk
);
167 for (i
= 0; i
< res_chunk
->cnt
; i
++) {
168 res
= res_chunk
->res
[i
];
169 status
= usnic_fwd_disable_qp(qp_grp
->ufdev
, vnic_idx
,
172 usnic_err("Failed to disable rq %d of %s:%d\n with err %d\n",
183 static int init_filter_action(struct usnic_ib_qp_grp
*qp_grp
,
184 struct usnic_filter_action
*uaction
)
186 struct usnic_vnic_res_chunk
*res_chunk
;
188 res_chunk
= usnic_ib_qp_grp_get_chunk(qp_grp
, USNIC_VNIC_RES_TYPE_RQ
);
189 if (IS_ERR(res_chunk
)) {
190 usnic_err("Unable to get %s with err %ld\n",
191 usnic_vnic_res_type_to_str(USNIC_VNIC_RES_TYPE_RQ
),
193 return PTR_ERR(res_chunk
);
196 uaction
->vnic_idx
= usnic_vnic_get_index(qp_grp
->vf
->vnic
);
197 uaction
->action
.type
= FILTER_ACTION_RQ_STEERING
;
198 uaction
->action
.u
.rq_idx
= res_chunk
->res
[DFLT_RQ_IDX
]->vnic_idx
;
203 static struct usnic_ib_qp_grp_flow
*
204 create_roce_custom_flow(struct usnic_ib_qp_grp
*qp_grp
,
205 struct usnic_transport_spec
*trans_spec
)
209 struct filter filter
;
210 struct usnic_filter_action uaction
;
211 struct usnic_ib_qp_grp_flow
*qp_flow
;
212 struct usnic_fwd_flow
*flow
;
213 enum usnic_transport_type trans_type
;
215 trans_type
= trans_spec
->trans_type
;
216 port_num
= trans_spec
->usnic_roce
.port_num
;
219 port_num
= usnic_transport_rsrv_port(trans_type
, port_num
);
221 return ERR_PTR(-EINVAL
);
224 usnic_fwd_init_usnic_filter(&filter
, port_num
);
225 err
= init_filter_action(qp_grp
, &uaction
);
227 goto out_unreserve_port
;
229 flow
= usnic_fwd_alloc_flow(qp_grp
->ufdev
, &filter
, &uaction
);
230 if (IS_ERR_OR_NULL(flow
)) {
231 err
= flow
? PTR_ERR(flow
) : -EFAULT
;
232 goto out_unreserve_port
;
235 /* Create Flow Handle */
236 qp_flow
= kzalloc(sizeof(*qp_flow
), GFP_ATOMIC
);
239 goto out_dealloc_flow
;
241 qp_flow
->flow
= flow
;
242 qp_flow
->trans_type
= trans_type
;
243 qp_flow
->usnic_roce
.port_num
= port_num
;
244 qp_flow
->qp_grp
= qp_grp
;
248 usnic_fwd_dealloc_flow(flow
);
250 usnic_transport_unrsrv_port(trans_type
, port_num
);
254 static void release_roce_custom_flow(struct usnic_ib_qp_grp_flow
*qp_flow
)
256 usnic_fwd_dealloc_flow(qp_flow
->flow
);
257 usnic_transport_unrsrv_port(qp_flow
->trans_type
,
258 qp_flow
->usnic_roce
.port_num
);
262 static struct usnic_ib_qp_grp_flow
*
263 create_udp_flow(struct usnic_ib_qp_grp
*qp_grp
,
264 struct usnic_transport_spec
*trans_spec
)
269 struct filter filter
;
270 struct usnic_filter_action uaction
;
271 struct usnic_ib_qp_grp_flow
*qp_flow
;
272 struct usnic_fwd_flow
*flow
;
273 enum usnic_transport_type trans_type
;
278 trans_type
= trans_spec
->trans_type
;
279 sock_fd
= trans_spec
->udp
.sock_fd
;
281 /* Get and check socket */
282 sock
= usnic_transport_get_socket(sock_fd
);
283 if (IS_ERR_OR_NULL(sock
))
284 return ERR_CAST(sock
);
286 err
= usnic_transport_sock_get_addr(sock
, &proto
, &addr
, &port_num
);
290 if (proto
!= IPPROTO_UDP
) {
291 usnic_err("Protocol for fd %d is not UDP", sock_fd
);
297 usnic_fwd_init_udp_filter(&filter
, addr
, port_num
);
298 err
= init_filter_action(qp_grp
, &uaction
);
302 flow
= usnic_fwd_alloc_flow(qp_grp
->ufdev
, &filter
, &uaction
);
303 if (IS_ERR_OR_NULL(flow
)) {
304 err
= flow
? PTR_ERR(flow
) : -EFAULT
;
309 qp_flow
= kzalloc(sizeof(*qp_flow
), GFP_ATOMIC
);
312 goto out_dealloc_flow
;
314 qp_flow
->flow
= flow
;
315 qp_flow
->trans_type
= trans_type
;
316 qp_flow
->udp
.sock
= sock
;
317 qp_flow
->qp_grp
= qp_grp
;
321 usnic_fwd_dealloc_flow(flow
);
323 usnic_transport_put_socket(sock
);
327 static void release_udp_flow(struct usnic_ib_qp_grp_flow
*qp_flow
)
329 usnic_fwd_dealloc_flow(qp_flow
->flow
);
330 usnic_transport_put_socket(qp_flow
->udp
.sock
);
334 static struct usnic_ib_qp_grp_flow
*
335 create_and_add_flow(struct usnic_ib_qp_grp
*qp_grp
,
336 struct usnic_transport_spec
*trans_spec
)
338 struct usnic_ib_qp_grp_flow
*qp_flow
;
339 enum usnic_transport_type trans_type
;
341 trans_type
= trans_spec
->trans_type
;
342 switch (trans_type
) {
343 case USNIC_TRANSPORT_ROCE_CUSTOM
:
344 qp_flow
= create_roce_custom_flow(qp_grp
, trans_spec
);
346 case USNIC_TRANSPORT_IPV4_UDP
:
347 qp_flow
= create_udp_flow(qp_grp
, trans_spec
);
350 usnic_err("Unsupported transport %u\n",
351 trans_spec
->trans_type
);
352 return ERR_PTR(-EINVAL
);
355 if (!IS_ERR_OR_NULL(qp_flow
)) {
356 list_add_tail(&qp_flow
->link
, &qp_grp
->flows_lst
);
357 usnic_debugfs_flow_add(qp_flow
);
364 static void release_and_remove_flow(struct usnic_ib_qp_grp_flow
*qp_flow
)
366 usnic_debugfs_flow_remove(qp_flow
);
367 list_del(&qp_flow
->link
);
369 switch (qp_flow
->trans_type
) {
370 case USNIC_TRANSPORT_ROCE_CUSTOM
:
371 release_roce_custom_flow(qp_flow
);
373 case USNIC_TRANSPORT_IPV4_UDP
:
374 release_udp_flow(qp_flow
);
377 WARN(1, "Unsupported transport %u\n",
378 qp_flow
->trans_type
);
383 static void release_and_remove_all_flows(struct usnic_ib_qp_grp
*qp_grp
)
385 struct usnic_ib_qp_grp_flow
*qp_flow
, *tmp
;
386 list_for_each_entry_safe(qp_flow
, tmp
, &qp_grp
->flows_lst
, link
)
387 release_and_remove_flow(qp_flow
);
390 int usnic_ib_qp_grp_modify(struct usnic_ib_qp_grp
*qp_grp
,
391 enum ib_qp_state new_state
,
395 struct ib_event ib_event
;
396 enum ib_qp_state old_state
;
397 struct usnic_transport_spec
*trans_spec
;
398 struct usnic_ib_qp_grp_flow
*qp_flow
;
400 old_state
= qp_grp
->state
;
401 trans_spec
= (struct usnic_transport_spec
*) data
;
403 spin_lock(&qp_grp
->lock
);
411 release_and_remove_all_flows(qp_grp
);
417 status
= disable_qp_grp(qp_grp
);
418 release_and_remove_all_flows(qp_grp
);
428 qp_flow
= create_and_add_flow(qp_grp
,
430 if (IS_ERR_OR_NULL(qp_flow
)) {
431 status
= qp_flow
? PTR_ERR(qp_flow
) : -EFAULT
;
436 * Optional to specify filters.
443 qp_flow
= create_and_add_flow(qp_grp
,
445 if (IS_ERR_OR_NULL(qp_flow
)) {
446 status
= qp_flow
? PTR_ERR(qp_flow
) : -EFAULT
;
451 * Doesn't make sense to go into INIT state
452 * from INIT state w/o adding filters.
458 status
= disable_qp_grp(qp_grp
);
461 status
= disable_qp_grp(qp_grp
);
470 status
= enable_qp_grp(qp_grp
);
486 ib_event
.device
= &qp_grp
->vf
->pf
->ib_dev
;
487 ib_event
.element
.qp
= &qp_grp
->ibqp
;
488 ib_event
.event
= IB_EVENT_QP_FATAL
;
492 qp_grp
->ibqp
.event_handler(&ib_event
,
493 qp_grp
->ibqp
.qp_context
);
496 release_and_remove_all_flows(qp_grp
);
497 qp_grp
->ibqp
.event_handler(&ib_event
,
498 qp_grp
->ibqp
.qp_context
);
502 status
= disable_qp_grp(qp_grp
);
503 release_and_remove_all_flows(qp_grp
);
504 qp_grp
->ibqp
.event_handler(&ib_event
,
505 qp_grp
->ibqp
.qp_context
);
514 spin_unlock(&qp_grp
->lock
);
517 qp_grp
->state
= new_state
;
518 usnic_info("Transitioned %u from %s to %s",
520 usnic_ib_qp_grp_state_to_string(old_state
),
521 usnic_ib_qp_grp_state_to_string(new_state
));
523 usnic_err("Failed to transition %u from %s to %s",
525 usnic_ib_qp_grp_state_to_string(old_state
),
526 usnic_ib_qp_grp_state_to_string(new_state
));
532 static struct usnic_vnic_res_chunk
**
533 alloc_res_chunk_list(struct usnic_vnic
*vnic
,
534 struct usnic_vnic_res_spec
*res_spec
, void *owner_obj
)
536 enum usnic_vnic_res_type res_type
;
537 struct usnic_vnic_res_chunk
**res_chunk_list
;
538 int err
, i
, res_cnt
, res_lst_sz
;
541 res_spec
->resources
[res_lst_sz
].type
!= USNIC_VNIC_RES_TYPE_EOL
;
546 res_chunk_list
= kcalloc(res_lst_sz
+ 1, sizeof(*res_chunk_list
),
549 return ERR_PTR(-ENOMEM
);
551 for (i
= 0; res_spec
->resources
[i
].type
!= USNIC_VNIC_RES_TYPE_EOL
;
553 res_type
= res_spec
->resources
[i
].type
;
554 res_cnt
= res_spec
->resources
[i
].cnt
;
556 res_chunk_list
[i
] = usnic_vnic_get_resources(vnic
, res_type
,
558 if (IS_ERR_OR_NULL(res_chunk_list
[i
])) {
559 err
= res_chunk_list
[i
] ?
560 PTR_ERR(res_chunk_list
[i
]) : -ENOMEM
;
561 usnic_err("Failed to get %s from %s with err %d\n",
562 usnic_vnic_res_type_to_str(res_type
),
563 usnic_vnic_pci_name(vnic
),
569 return res_chunk_list
;
572 for (i
--; i
>= 0; i
--)
573 usnic_vnic_put_resources(res_chunk_list
[i
]);
574 kfree(res_chunk_list
);
578 static void free_qp_grp_res(struct usnic_vnic_res_chunk
**res_chunk_list
)
581 for (i
= 0; res_chunk_list
[i
]; i
++)
582 usnic_vnic_put_resources(res_chunk_list
[i
]);
583 kfree(res_chunk_list
);
586 static int qp_grp_and_vf_bind(struct usnic_ib_vf
*vf
,
587 struct usnic_ib_pd
*pd
,
588 struct usnic_ib_qp_grp
*qp_grp
)
591 struct pci_dev
*pdev
;
593 lockdep_assert_held(&vf
->lock
);
595 pdev
= usnic_vnic_get_pdev(vf
->vnic
);
596 if (vf
->qp_grp_ref_cnt
== 0) {
597 err
= usnic_uiom_attach_dev_to_pd(pd
->umem_pd
, &pdev
->dev
);
599 usnic_err("Failed to attach %s to domain\n",
605 vf
->qp_grp_ref_cnt
++;
607 WARN_ON(vf
->pd
!= pd
);
613 static void qp_grp_and_vf_unbind(struct usnic_ib_qp_grp
*qp_grp
)
615 struct pci_dev
*pdev
;
616 struct usnic_ib_pd
*pd
;
618 lockdep_assert_held(&qp_grp
->vf
->lock
);
621 pdev
= usnic_vnic_get_pdev(qp_grp
->vf
->vnic
);
622 if (--qp_grp
->vf
->qp_grp_ref_cnt
== 0) {
623 qp_grp
->vf
->pd
= NULL
;
624 usnic_uiom_detach_dev_from_pd(pd
->umem_pd
, &pdev
->dev
);
629 static void log_spec(struct usnic_vnic_res_spec
*res_spec
)
632 usnic_vnic_spec_dump(buf
, sizeof(buf
), res_spec
);
633 usnic_dbg("%s\n", buf
);
636 static int qp_grp_id_from_flow(struct usnic_ib_qp_grp_flow
*qp_flow
,
639 enum usnic_transport_type trans_type
= qp_flow
->trans_type
;
641 uint16_t port_num
= 0;
643 switch (trans_type
) {
644 case USNIC_TRANSPORT_ROCE_CUSTOM
:
645 *id
= qp_flow
->usnic_roce
.port_num
;
647 case USNIC_TRANSPORT_IPV4_UDP
:
648 err
= usnic_transport_sock_get_addr(qp_flow
->udp
.sock
,
654 * Copy port_num to stack first and then to *id,
655 * so that the short to int cast works for little
656 * and big endian systems.
661 usnic_err("Unsupported transport %u\n", trans_type
);
668 struct usnic_ib_qp_grp
*
669 usnic_ib_qp_grp_create(struct usnic_fwd_dev
*ufdev
, struct usnic_ib_vf
*vf
,
670 struct usnic_ib_pd
*pd
,
671 struct usnic_vnic_res_spec
*res_spec
,
672 struct usnic_transport_spec
*transport_spec
)
674 struct usnic_ib_qp_grp
*qp_grp
;
676 enum usnic_transport_type transport
= transport_spec
->trans_type
;
677 struct usnic_ib_qp_grp_flow
*qp_flow
;
679 lockdep_assert_held(&vf
->lock
);
681 err
= usnic_vnic_res_spec_satisfied(&min_transport_spec
[transport
],
684 usnic_err("Spec does not meet minimum req for transport %d\n",
690 qp_grp
= kzalloc(sizeof(*qp_grp
), GFP_ATOMIC
);
694 qp_grp
->res_chunk_list
= alloc_res_chunk_list(vf
->vnic
, res_spec
,
696 if (IS_ERR_OR_NULL(qp_grp
->res_chunk_list
)) {
697 err
= qp_grp
->res_chunk_list
?
698 PTR_ERR(qp_grp
->res_chunk_list
) : -ENOMEM
;
699 goto out_free_qp_grp
;
702 err
= qp_grp_and_vf_bind(vf
, pd
, qp_grp
);
706 INIT_LIST_HEAD(&qp_grp
->flows_lst
);
707 spin_lock_init(&qp_grp
->lock
);
708 qp_grp
->ufdev
= ufdev
;
709 qp_grp
->state
= IB_QPS_RESET
;
710 qp_grp
->owner_pid
= current
->pid
;
712 qp_flow
= create_and_add_flow(qp_grp
, transport_spec
);
713 if (IS_ERR_OR_NULL(qp_flow
)) {
714 usnic_err("Unable to create and add flow with err %ld\n",
716 err
= qp_flow
? PTR_ERR(qp_flow
) : -EFAULT
;
717 goto out_qp_grp_vf_unbind
;
720 err
= qp_grp_id_from_flow(qp_flow
, &qp_grp
->grp_id
);
722 goto out_release_flow
;
723 qp_grp
->ibqp
.qp_num
= qp_grp
->grp_id
;
725 usnic_ib_sysfs_qpn_add(qp_grp
);
730 release_and_remove_flow(qp_flow
);
731 out_qp_grp_vf_unbind
:
732 qp_grp_and_vf_unbind(qp_grp
);
734 free_qp_grp_res(qp_grp
->res_chunk_list
);
741 void usnic_ib_qp_grp_destroy(struct usnic_ib_qp_grp
*qp_grp
)
744 WARN_ON(qp_grp
->state
!= IB_QPS_RESET
);
745 lockdep_assert_held(&qp_grp
->vf
->lock
);
747 release_and_remove_all_flows(qp_grp
);
748 usnic_ib_sysfs_qpn_remove(qp_grp
);
749 qp_grp_and_vf_unbind(qp_grp
);
750 free_qp_grp_res(qp_grp
->res_chunk_list
);
754 struct usnic_vnic_res_chunk
*
755 usnic_ib_qp_grp_get_chunk(struct usnic_ib_qp_grp
*qp_grp
,
756 enum usnic_vnic_res_type res_type
)
760 for (i
= 0; qp_grp
->res_chunk_list
[i
]; i
++) {
761 if (qp_grp
->res_chunk_list
[i
]->type
== res_type
)
762 return qp_grp
->res_chunk_list
[i
];
765 return ERR_PTR(-EINVAL
);