2 * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/bug.h>
34 #include <linux/errno.h>
35 #include <linux/module.h>
36 #include <linux/spinlock.h>
38 #include "usnic_log.h"
39 #include "usnic_vnic.h"
40 #include "usnic_fwd.h"
41 #include "usnic_uiom.h"
42 #include "usnic_debugfs.h"
43 #include "usnic_ib_qp_grp.h"
44 #include "usnic_ib_sysfs.h"
45 #include "usnic_transport.h"
49 const char *usnic_ib_qp_grp_state_to_string(enum ib_qp_state state
)
67 return "UNKOWN STATE";
72 int usnic_ib_qp_grp_dump_hdr(char *buf
, int buf_sz
)
74 return scnprintf(buf
, buf_sz
, "|QPN\t|State\t|PID\t|VF Idx\t|Fil ID");
77 int usnic_ib_qp_grp_dump_rows(void *obj
, char *buf
, int buf_sz
)
79 struct usnic_ib_qp_grp
*qp_grp
= obj
;
80 struct usnic_ib_qp_grp_flow
*default_flow
;
82 default_flow
= list_first_entry(&qp_grp
->flows_lst
,
83 struct usnic_ib_qp_grp_flow
, link
);
84 return scnprintf(buf
, buf_sz
, "|%d\t|%s\t|%d\t|%hu\t|%d",
86 usnic_ib_qp_grp_state_to_string(
89 usnic_vnic_get_index(qp_grp
->vf
->vnic
),
90 default_flow
->flow
->flow_id
);
92 return scnprintf(buf
, buf_sz
, "|N/A\t|N/A\t|N/A\t|N/A\t|N/A");
96 static struct usnic_vnic_res_chunk
*
97 get_qp_res_chunk(struct usnic_ib_qp_grp
*qp_grp
)
99 lockdep_assert_held(&qp_grp
->lock
);
101 * The QP res chunk, used to derive qp indices,
102 * are just indices of the RQs
104 return usnic_ib_qp_grp_get_chunk(qp_grp
, USNIC_VNIC_RES_TYPE_RQ
);
107 static int enable_qp_grp(struct usnic_ib_qp_grp
*qp_grp
)
112 struct usnic_vnic_res_chunk
*res_chunk
;
113 struct usnic_vnic_res
*res
;
115 lockdep_assert_held(&qp_grp
->lock
);
117 vnic_idx
= usnic_vnic_get_index(qp_grp
->vf
->vnic
);
119 res_chunk
= get_qp_res_chunk(qp_grp
);
120 if (IS_ERR_OR_NULL(res_chunk
)) {
121 usnic_err("Unable to get qp res with err %ld\n",
123 return res_chunk
? PTR_ERR(res_chunk
) : -ENOMEM
;
126 for (i
= 0; i
< res_chunk
->cnt
; i
++) {
127 res
= res_chunk
->res
[i
];
128 status
= usnic_fwd_enable_qp(qp_grp
->ufdev
, vnic_idx
,
131 usnic_err("Failed to enable qp %d of %s:%d\n with err %d\n",
132 res
->vnic_idx
, qp_grp
->ufdev
->name
,
141 for (i
--; i
>= 0; i
--) {
142 res
= res_chunk
->res
[i
];
143 usnic_fwd_disable_qp(qp_grp
->ufdev
, vnic_idx
,
150 static int disable_qp_grp(struct usnic_ib_qp_grp
*qp_grp
)
153 struct usnic_vnic_res_chunk
*res_chunk
;
154 struct usnic_vnic_res
*res
;
157 lockdep_assert_held(&qp_grp
->lock
);
158 vnic_idx
= usnic_vnic_get_index(qp_grp
->vf
->vnic
);
160 res_chunk
= get_qp_res_chunk(qp_grp
);
161 if (IS_ERR_OR_NULL(res_chunk
)) {
162 usnic_err("Unable to get qp res with err %ld\n",
164 return res_chunk
? PTR_ERR(res_chunk
) : -ENOMEM
;
167 for (i
= 0; i
< res_chunk
->cnt
; i
++) {
168 res
= res_chunk
->res
[i
];
169 status
= usnic_fwd_disable_qp(qp_grp
->ufdev
, vnic_idx
,
172 usnic_err("Failed to disable rq %d of %s:%d\n with err %d\n",
183 static int init_filter_action(struct usnic_ib_qp_grp
*qp_grp
,
184 struct usnic_filter_action
*uaction
)
186 struct usnic_vnic_res_chunk
*res_chunk
;
188 res_chunk
= usnic_ib_qp_grp_get_chunk(qp_grp
, USNIC_VNIC_RES_TYPE_RQ
);
189 if (IS_ERR_OR_NULL(res_chunk
)) {
190 usnic_err("Unable to get %s with err %ld\n",
191 usnic_vnic_res_type_to_str(USNIC_VNIC_RES_TYPE_RQ
),
193 return res_chunk
? PTR_ERR(res_chunk
) : -ENOMEM
;
196 uaction
->vnic_idx
= usnic_vnic_get_index(qp_grp
->vf
->vnic
);
197 uaction
->action
.type
= FILTER_ACTION_RQ_STEERING
;
198 uaction
->action
.u
.rq_idx
= res_chunk
->res
[DFLT_RQ_IDX
]->vnic_idx
;
203 static struct usnic_ib_qp_grp_flow
*
204 create_roce_custom_flow(struct usnic_ib_qp_grp
*qp_grp
,
205 struct usnic_transport_spec
*trans_spec
)
209 struct filter filter
;
210 struct usnic_filter_action uaction
;
211 struct usnic_ib_qp_grp_flow
*qp_flow
;
212 struct usnic_fwd_flow
*flow
;
213 enum usnic_transport_type trans_type
;
215 trans_type
= trans_spec
->trans_type
;
216 port_num
= trans_spec
->usnic_roce
.port_num
;
219 port_num
= usnic_transport_rsrv_port(trans_type
, port_num
);
221 return ERR_PTR(-EINVAL
);
224 usnic_fwd_init_usnic_filter(&filter
, port_num
);
225 err
= init_filter_action(qp_grp
, &uaction
);
227 goto out_unreserve_port
;
229 flow
= usnic_fwd_alloc_flow(qp_grp
->ufdev
, &filter
, &uaction
);
230 if (IS_ERR_OR_NULL(flow
)) {
231 usnic_err("Unable to alloc flow failed with err %ld\n",
233 err
= flow
? PTR_ERR(flow
) : -EFAULT
;
234 goto out_unreserve_port
;
237 /* Create Flow Handle */
238 qp_flow
= kzalloc(sizeof(*qp_flow
), GFP_ATOMIC
);
241 goto out_dealloc_flow
;
243 qp_flow
->flow
= flow
;
244 qp_flow
->trans_type
= trans_type
;
245 qp_flow
->usnic_roce
.port_num
= port_num
;
246 qp_flow
->qp_grp
= qp_grp
;
250 usnic_fwd_dealloc_flow(flow
);
252 usnic_transport_unrsrv_port(trans_type
, port_num
);
256 static void release_roce_custom_flow(struct usnic_ib_qp_grp_flow
*qp_flow
)
258 usnic_fwd_dealloc_flow(qp_flow
->flow
);
259 usnic_transport_unrsrv_port(qp_flow
->trans_type
,
260 qp_flow
->usnic_roce
.port_num
);
264 static struct usnic_ib_qp_grp_flow
*
265 create_udp_flow(struct usnic_ib_qp_grp
*qp_grp
,
266 struct usnic_transport_spec
*trans_spec
)
271 struct filter filter
;
272 struct usnic_filter_action uaction
;
273 struct usnic_ib_qp_grp_flow
*qp_flow
;
274 struct usnic_fwd_flow
*flow
;
275 enum usnic_transport_type trans_type
;
280 trans_type
= trans_spec
->trans_type
;
281 sock_fd
= trans_spec
->udp
.sock_fd
;
283 /* Get and check socket */
284 sock
= usnic_transport_get_socket(sock_fd
);
285 if (IS_ERR_OR_NULL(sock
))
286 return ERR_CAST(sock
);
288 err
= usnic_transport_sock_get_addr(sock
, &proto
, &addr
, &port_num
);
292 if (proto
!= IPPROTO_UDP
) {
293 usnic_err("Protocol for fd %d is not UDP", sock_fd
);
299 usnic_fwd_init_udp_filter(&filter
, addr
, port_num
);
300 err
= init_filter_action(qp_grp
, &uaction
);
304 flow
= usnic_fwd_alloc_flow(qp_grp
->ufdev
, &filter
, &uaction
);
305 if (IS_ERR_OR_NULL(flow
)) {
306 usnic_err("Unable to alloc flow failed with err %ld\n",
308 err
= flow
? PTR_ERR(flow
) : -EFAULT
;
313 qp_flow
= kzalloc(sizeof(*qp_flow
), GFP_ATOMIC
);
316 goto out_dealloc_flow
;
318 qp_flow
->flow
= flow
;
319 qp_flow
->trans_type
= trans_type
;
320 qp_flow
->udp
.sock
= sock
;
321 qp_flow
->qp_grp
= qp_grp
;
325 usnic_fwd_dealloc_flow(flow
);
327 usnic_transport_put_socket(sock
);
331 static void release_udp_flow(struct usnic_ib_qp_grp_flow
*qp_flow
)
333 usnic_fwd_dealloc_flow(qp_flow
->flow
);
334 usnic_transport_put_socket(qp_flow
->udp
.sock
);
338 static struct usnic_ib_qp_grp_flow
*
339 create_and_add_flow(struct usnic_ib_qp_grp
*qp_grp
,
340 struct usnic_transport_spec
*trans_spec
)
342 struct usnic_ib_qp_grp_flow
*qp_flow
;
343 enum usnic_transport_type trans_type
;
345 trans_type
= trans_spec
->trans_type
;
346 switch (trans_type
) {
347 case USNIC_TRANSPORT_ROCE_CUSTOM
:
348 qp_flow
= create_roce_custom_flow(qp_grp
, trans_spec
);
350 case USNIC_TRANSPORT_IPV4_UDP
:
351 qp_flow
= create_udp_flow(qp_grp
, trans_spec
);
354 usnic_err("Unsupported transport %u\n",
355 trans_spec
->trans_type
);
356 return ERR_PTR(-EINVAL
);
359 if (!IS_ERR_OR_NULL(qp_flow
)) {
360 list_add_tail(&qp_flow
->link
, &qp_grp
->flows_lst
);
361 usnic_debugfs_flow_add(qp_flow
);
368 static void release_and_remove_flow(struct usnic_ib_qp_grp_flow
*qp_flow
)
370 usnic_debugfs_flow_remove(qp_flow
);
371 list_del(&qp_flow
->link
);
373 switch (qp_flow
->trans_type
) {
374 case USNIC_TRANSPORT_ROCE_CUSTOM
:
375 release_roce_custom_flow(qp_flow
);
377 case USNIC_TRANSPORT_IPV4_UDP
:
378 release_udp_flow(qp_flow
);
381 WARN(1, "Unsupported transport %u\n",
382 qp_flow
->trans_type
);
387 static void release_and_remove_all_flows(struct usnic_ib_qp_grp
*qp_grp
)
389 struct usnic_ib_qp_grp_flow
*qp_flow
, *tmp
;
390 list_for_each_entry_safe(qp_flow
, tmp
, &qp_grp
->flows_lst
, link
)
391 release_and_remove_flow(qp_flow
);
394 int usnic_ib_qp_grp_modify(struct usnic_ib_qp_grp
*qp_grp
,
395 enum ib_qp_state new_state
,
400 struct ib_event ib_event
;
401 enum ib_qp_state old_state
;
402 struct usnic_transport_spec
*trans_spec
;
403 struct usnic_ib_qp_grp_flow
*qp_flow
;
405 old_state
= qp_grp
->state
;
406 vnic_idx
= usnic_vnic_get_index(qp_grp
->vf
->vnic
);
407 trans_spec
= (struct usnic_transport_spec
*) data
;
409 spin_lock(&qp_grp
->lock
);
417 release_and_remove_all_flows(qp_grp
);
423 status
= disable_qp_grp(qp_grp
);
424 release_and_remove_all_flows(qp_grp
);
434 qp_flow
= create_and_add_flow(qp_grp
,
436 if (IS_ERR_OR_NULL(qp_flow
)) {
437 status
= qp_flow
? PTR_ERR(qp_flow
) : -EFAULT
;
442 * Optional to specify filters.
449 qp_flow
= create_and_add_flow(qp_grp
,
451 if (IS_ERR_OR_NULL(qp_flow
)) {
452 status
= qp_flow
? PTR_ERR(qp_flow
) : -EFAULT
;
457 * Doesn't make sense to go into INIT state
458 * from INIT state w/o adding filters.
464 status
= disable_qp_grp(qp_grp
);
467 status
= disable_qp_grp(qp_grp
);
476 status
= enable_qp_grp(qp_grp
);
492 ib_event
.device
= &qp_grp
->vf
->pf
->ib_dev
;
493 ib_event
.element
.qp
= &qp_grp
->ibqp
;
494 ib_event
.event
= IB_EVENT_QP_FATAL
;
498 qp_grp
->ibqp
.event_handler(&ib_event
,
499 qp_grp
->ibqp
.qp_context
);
502 release_and_remove_all_flows(qp_grp
);
503 qp_grp
->ibqp
.event_handler(&ib_event
,
504 qp_grp
->ibqp
.qp_context
);
508 status
= disable_qp_grp(qp_grp
);
509 release_and_remove_all_flows(qp_grp
);
510 qp_grp
->ibqp
.event_handler(&ib_event
,
511 qp_grp
->ibqp
.qp_context
);
520 spin_unlock(&qp_grp
->lock
);
523 qp_grp
->state
= new_state
;
524 usnic_info("Transistioned %u from %s to %s",
526 usnic_ib_qp_grp_state_to_string(old_state
),
527 usnic_ib_qp_grp_state_to_string(new_state
));
529 usnic_err("Failed to transition %u from %s to %s",
531 usnic_ib_qp_grp_state_to_string(old_state
),
532 usnic_ib_qp_grp_state_to_string(new_state
));
538 static struct usnic_vnic_res_chunk
**
539 alloc_res_chunk_list(struct usnic_vnic
*vnic
,
540 struct usnic_vnic_res_spec
*res_spec
, void *owner_obj
)
542 enum usnic_vnic_res_type res_type
;
543 struct usnic_vnic_res_chunk
**res_chunk_list
;
544 int err
, i
, res_cnt
, res_lst_sz
;
547 res_spec
->resources
[res_lst_sz
].type
!= USNIC_VNIC_RES_TYPE_EOL
;
552 res_chunk_list
= kzalloc(sizeof(*res_chunk_list
)*(res_lst_sz
+1),
555 return ERR_PTR(-ENOMEM
);
557 for (i
= 0; res_spec
->resources
[i
].type
!= USNIC_VNIC_RES_TYPE_EOL
;
559 res_type
= res_spec
->resources
[i
].type
;
560 res_cnt
= res_spec
->resources
[i
].cnt
;
562 res_chunk_list
[i
] = usnic_vnic_get_resources(vnic
, res_type
,
564 if (IS_ERR_OR_NULL(res_chunk_list
[i
])) {
565 err
= res_chunk_list
[i
] ?
566 PTR_ERR(res_chunk_list
[i
]) : -ENOMEM
;
567 usnic_err("Failed to get %s from %s with err %d\n",
568 usnic_vnic_res_type_to_str(res_type
),
569 usnic_vnic_pci_name(vnic
),
575 return res_chunk_list
;
578 for (i
--; i
> 0; i
--)
579 usnic_vnic_put_resources(res_chunk_list
[i
]);
580 kfree(res_chunk_list
);
584 static void free_qp_grp_res(struct usnic_vnic_res_chunk
**res_chunk_list
)
587 for (i
= 0; res_chunk_list
[i
]; i
++)
588 usnic_vnic_put_resources(res_chunk_list
[i
]);
589 kfree(res_chunk_list
);
592 static int qp_grp_and_vf_bind(struct usnic_ib_vf
*vf
,
593 struct usnic_ib_pd
*pd
,
594 struct usnic_ib_qp_grp
*qp_grp
)
597 struct pci_dev
*pdev
;
599 lockdep_assert_held(&vf
->lock
);
601 pdev
= usnic_vnic_get_pdev(vf
->vnic
);
602 if (vf
->qp_grp_ref_cnt
== 0) {
603 err
= usnic_uiom_attach_dev_to_pd(pd
->umem_pd
, &pdev
->dev
);
605 usnic_err("Failed to attach %s to domain\n",
611 vf
->qp_grp_ref_cnt
++;
613 WARN_ON(vf
->pd
!= pd
);
619 static void qp_grp_and_vf_unbind(struct usnic_ib_qp_grp
*qp_grp
)
621 struct pci_dev
*pdev
;
622 struct usnic_ib_pd
*pd
;
624 lockdep_assert_held(&qp_grp
->vf
->lock
);
627 pdev
= usnic_vnic_get_pdev(qp_grp
->vf
->vnic
);
628 if (--qp_grp
->vf
->qp_grp_ref_cnt
== 0) {
629 qp_grp
->vf
->pd
= NULL
;
630 usnic_uiom_detach_dev_from_pd(pd
->umem_pd
, &pdev
->dev
);
635 static void log_spec(struct usnic_vnic_res_spec
*res_spec
)
638 usnic_vnic_spec_dump(buf
, sizeof(buf
), res_spec
);
639 usnic_dbg("%s\n", buf
);
642 static int qp_grp_id_from_flow(struct usnic_ib_qp_grp_flow
*qp_flow
,
645 enum usnic_transport_type trans_type
= qp_flow
->trans_type
;
647 uint16_t port_num
= 0;
649 switch (trans_type
) {
650 case USNIC_TRANSPORT_ROCE_CUSTOM
:
651 *id
= qp_flow
->usnic_roce
.port_num
;
653 case USNIC_TRANSPORT_IPV4_UDP
:
654 err
= usnic_transport_sock_get_addr(qp_flow
->udp
.sock
,
660 * Copy port_num to stack first and then to *id,
661 * so that the short to int cast works for little
662 * and big endian systems.
667 usnic_err("Unsupported transport %u\n", trans_type
);
674 struct usnic_ib_qp_grp
*
675 usnic_ib_qp_grp_create(struct usnic_fwd_dev
*ufdev
, struct usnic_ib_vf
*vf
,
676 struct usnic_ib_pd
*pd
,
677 struct usnic_vnic_res_spec
*res_spec
,
678 struct usnic_transport_spec
*transport_spec
)
680 struct usnic_ib_qp_grp
*qp_grp
;
682 enum usnic_transport_type transport
= transport_spec
->trans_type
;
683 struct usnic_ib_qp_grp_flow
*qp_flow
;
685 lockdep_assert_held(&vf
->lock
);
687 err
= usnic_vnic_res_spec_satisfied(&min_transport_spec
[transport
],
690 usnic_err("Spec does not meet miniumum req for transport %d\n",
696 qp_grp
= kzalloc(sizeof(*qp_grp
), GFP_ATOMIC
);
698 usnic_err("Unable to alloc qp_grp - Out of memory\n");
702 qp_grp
->res_chunk_list
= alloc_res_chunk_list(vf
->vnic
, res_spec
,
704 if (IS_ERR_OR_NULL(qp_grp
->res_chunk_list
)) {
705 err
= qp_grp
->res_chunk_list
?
706 PTR_ERR(qp_grp
->res_chunk_list
) : -ENOMEM
;
707 usnic_err("Unable to alloc res for %d with err %d\n",
708 qp_grp
->grp_id
, err
);
709 goto out_free_qp_grp
;
712 err
= qp_grp_and_vf_bind(vf
, pd
, qp_grp
);
716 INIT_LIST_HEAD(&qp_grp
->flows_lst
);
717 spin_lock_init(&qp_grp
->lock
);
718 qp_grp
->ufdev
= ufdev
;
719 qp_grp
->state
= IB_QPS_RESET
;
720 qp_grp
->owner_pid
= current
->pid
;
722 qp_flow
= create_and_add_flow(qp_grp
, transport_spec
);
723 if (IS_ERR_OR_NULL(qp_flow
)) {
724 usnic_err("Unable to create and add flow with err %ld\n",
726 err
= qp_flow
? PTR_ERR(qp_flow
) : -EFAULT
;
727 goto out_qp_grp_vf_unbind
;
730 err
= qp_grp_id_from_flow(qp_flow
, &qp_grp
->grp_id
);
732 goto out_release_flow
;
733 qp_grp
->ibqp
.qp_num
= qp_grp
->grp_id
;
735 usnic_ib_sysfs_qpn_add(qp_grp
);
740 release_and_remove_flow(qp_flow
);
741 out_qp_grp_vf_unbind
:
742 qp_grp_and_vf_unbind(qp_grp
);
744 free_qp_grp_res(qp_grp
->res_chunk_list
);
751 void usnic_ib_qp_grp_destroy(struct usnic_ib_qp_grp
*qp_grp
)
754 WARN_ON(qp_grp
->state
!= IB_QPS_RESET
);
755 lockdep_assert_held(&qp_grp
->vf
->lock
);
757 release_and_remove_all_flows(qp_grp
);
758 usnic_ib_sysfs_qpn_remove(qp_grp
);
759 qp_grp_and_vf_unbind(qp_grp
);
760 free_qp_grp_res(qp_grp
->res_chunk_list
);
764 struct usnic_vnic_res_chunk
*
765 usnic_ib_qp_grp_get_chunk(struct usnic_ib_qp_grp
*qp_grp
,
766 enum usnic_vnic_res_type res_type
)
770 for (i
= 0; qp_grp
->res_chunk_list
[i
]; i
++) {
771 if (qp_grp
->res_chunk_list
[i
]->type
== res_type
)
772 return qp_grp
->res_chunk_list
[i
];
775 return ERR_PTR(-EINVAL
);