2 * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
4 * This program is free software; you may redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; version 2 of the License.
8 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
9 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
10 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
11 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
12 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
13 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
14 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
18 #include <linux/bug.h>
19 #include <linux/errno.h>
20 #include <linux/module.h>
21 #include <linux/spinlock.h>
23 #include "usnic_log.h"
24 #include "usnic_vnic.h"
25 #include "usnic_fwd.h"
26 #include "usnic_uiom.h"
27 #include "usnic_debugfs.h"
28 #include "usnic_ib_qp_grp.h"
29 #include "usnic_ib_sysfs.h"
30 #include "usnic_transport.h"
34 const char *usnic_ib_qp_grp_state_to_string(enum ib_qp_state state
)
52 return "UNKOWN STATE";
57 int usnic_ib_qp_grp_dump_hdr(char *buf
, int buf_sz
)
59 return scnprintf(buf
, buf_sz
, "|QPN\t|State\t|PID\t|VF Idx\t|Fil ID");
62 int usnic_ib_qp_grp_dump_rows(void *obj
, char *buf
, int buf_sz
)
64 struct usnic_ib_qp_grp
*qp_grp
= obj
;
65 struct usnic_ib_qp_grp_flow
*default_flow
;
67 default_flow
= list_first_entry(&qp_grp
->flows_lst
,
68 struct usnic_ib_qp_grp_flow
, link
);
69 return scnprintf(buf
, buf_sz
, "|%d\t|%s\t|%d\t|%hu\t|%d",
71 usnic_ib_qp_grp_state_to_string(
74 usnic_vnic_get_index(qp_grp
->vf
->vnic
),
75 default_flow
->flow
->flow_id
);
77 return scnprintf(buf
, buf_sz
, "|N/A\t|N/A\t|N/A\t|N/A\t|N/A");
81 static struct usnic_vnic_res_chunk
*
82 get_qp_res_chunk(struct usnic_ib_qp_grp
*qp_grp
)
84 lockdep_assert_held(&qp_grp
->lock
);
86 * The QP res chunk, used to derive qp indices,
87 * are just indices of the RQs
89 return usnic_ib_qp_grp_get_chunk(qp_grp
, USNIC_VNIC_RES_TYPE_RQ
);
92 static int enable_qp_grp(struct usnic_ib_qp_grp
*qp_grp
)
97 struct usnic_vnic_res_chunk
*res_chunk
;
98 struct usnic_vnic_res
*res
;
100 lockdep_assert_held(&qp_grp
->lock
);
102 vnic_idx
= usnic_vnic_get_index(qp_grp
->vf
->vnic
);
104 res_chunk
= get_qp_res_chunk(qp_grp
);
105 if (IS_ERR_OR_NULL(res_chunk
)) {
106 usnic_err("Unable to get qp res with err %ld\n",
108 return res_chunk
? PTR_ERR(res_chunk
) : -ENOMEM
;
111 for (i
= 0; i
< res_chunk
->cnt
; i
++) {
112 res
= res_chunk
->res
[i
];
113 status
= usnic_fwd_enable_qp(qp_grp
->ufdev
, vnic_idx
,
116 usnic_err("Failed to enable qp %d of %s:%d\n with err %d\n",
117 res
->vnic_idx
, qp_grp
->ufdev
->name
,
126 for (i
--; i
>= 0; i
--) {
127 res
= res_chunk
->res
[i
];
128 usnic_fwd_disable_qp(qp_grp
->ufdev
, vnic_idx
,
135 static int disable_qp_grp(struct usnic_ib_qp_grp
*qp_grp
)
138 struct usnic_vnic_res_chunk
*res_chunk
;
139 struct usnic_vnic_res
*res
;
142 lockdep_assert_held(&qp_grp
->lock
);
143 vnic_idx
= usnic_vnic_get_index(qp_grp
->vf
->vnic
);
145 res_chunk
= get_qp_res_chunk(qp_grp
);
146 if (IS_ERR_OR_NULL(res_chunk
)) {
147 usnic_err("Unable to get qp res with err %ld\n",
149 return res_chunk
? PTR_ERR(res_chunk
) : -ENOMEM
;
152 for (i
= 0; i
< res_chunk
->cnt
; i
++) {
153 res
= res_chunk
->res
[i
];
154 status
= usnic_fwd_disable_qp(qp_grp
->ufdev
, vnic_idx
,
157 usnic_err("Failed to disable rq %d of %s:%d\n with err %d\n",
168 static int init_filter_action(struct usnic_ib_qp_grp
*qp_grp
,
169 struct usnic_filter_action
*uaction
)
171 struct usnic_vnic_res_chunk
*res_chunk
;
173 res_chunk
= usnic_ib_qp_grp_get_chunk(qp_grp
, USNIC_VNIC_RES_TYPE_RQ
);
174 if (IS_ERR_OR_NULL(res_chunk
)) {
175 usnic_err("Unable to get %s with err %ld\n",
176 usnic_vnic_res_type_to_str(USNIC_VNIC_RES_TYPE_RQ
),
178 return res_chunk
? PTR_ERR(res_chunk
) : -ENOMEM
;
181 uaction
->vnic_idx
= usnic_vnic_get_index(qp_grp
->vf
->vnic
);
182 uaction
->action
.type
= FILTER_ACTION_RQ_STEERING
;
183 uaction
->action
.u
.rq_idx
= res_chunk
->res
[DFLT_RQ_IDX
]->vnic_idx
;
188 static struct usnic_ib_qp_grp_flow
*
189 create_roce_custom_flow(struct usnic_ib_qp_grp
*qp_grp
,
190 struct usnic_transport_spec
*trans_spec
)
194 struct filter filter
;
195 struct usnic_filter_action uaction
;
196 struct usnic_ib_qp_grp_flow
*qp_flow
;
197 struct usnic_fwd_flow
*flow
;
198 enum usnic_transport_type trans_type
;
200 trans_type
= trans_spec
->trans_type
;
201 port_num
= trans_spec
->usnic_roce
.port_num
;
204 port_num
= usnic_transport_rsrv_port(trans_type
, port_num
);
206 return ERR_PTR(-EINVAL
);
209 usnic_fwd_init_usnic_filter(&filter
, port_num
);
210 err
= init_filter_action(qp_grp
, &uaction
);
212 goto out_unreserve_port
;
214 flow
= usnic_fwd_alloc_flow(qp_grp
->ufdev
, &filter
, &uaction
);
215 if (IS_ERR_OR_NULL(flow
)) {
216 usnic_err("Unable to alloc flow failed with err %ld\n",
218 err
= flow
? PTR_ERR(flow
) : -EFAULT
;
219 goto out_unreserve_port
;
222 /* Create Flow Handle */
223 qp_flow
= kzalloc(sizeof(*qp_flow
), GFP_ATOMIC
);
224 if (IS_ERR_OR_NULL(qp_flow
)) {
225 err
= qp_flow
? PTR_ERR(qp_flow
) : -ENOMEM
;
226 goto out_dealloc_flow
;
228 qp_flow
->flow
= flow
;
229 qp_flow
->trans_type
= trans_type
;
230 qp_flow
->usnic_roce
.port_num
= port_num
;
231 qp_flow
->qp_grp
= qp_grp
;
235 usnic_fwd_dealloc_flow(flow
);
237 usnic_transport_unrsrv_port(trans_type
, port_num
);
241 static void release_roce_custom_flow(struct usnic_ib_qp_grp_flow
*qp_flow
)
243 usnic_fwd_dealloc_flow(qp_flow
->flow
);
244 usnic_transport_unrsrv_port(qp_flow
->trans_type
,
245 qp_flow
->usnic_roce
.port_num
);
249 static struct usnic_ib_qp_grp_flow
*
250 create_udp_flow(struct usnic_ib_qp_grp
*qp_grp
,
251 struct usnic_transport_spec
*trans_spec
)
256 struct filter filter
;
257 struct usnic_filter_action uaction
;
258 struct usnic_ib_qp_grp_flow
*qp_flow
;
259 struct usnic_fwd_flow
*flow
;
260 enum usnic_transport_type trans_type
;
265 trans_type
= trans_spec
->trans_type
;
266 sock_fd
= trans_spec
->udp
.sock_fd
;
268 /* Get and check socket */
269 sock
= usnic_transport_get_socket(sock_fd
);
270 if (IS_ERR_OR_NULL(sock
))
271 return ERR_CAST(sock
);
273 err
= usnic_transport_sock_get_addr(sock
, &proto
, &addr
, &port_num
);
277 if (proto
!= IPPROTO_UDP
) {
278 usnic_err("Protocol for fd %d is not UDP", sock_fd
);
284 usnic_fwd_init_udp_filter(&filter
, addr
, port_num
);
285 err
= init_filter_action(qp_grp
, &uaction
);
289 flow
= usnic_fwd_alloc_flow(qp_grp
->ufdev
, &filter
, &uaction
);
290 if (IS_ERR_OR_NULL(flow
)) {
291 usnic_err("Unable to alloc flow failed with err %ld\n",
293 err
= flow
? PTR_ERR(flow
) : -EFAULT
;
298 qp_flow
= kzalloc(sizeof(*qp_flow
), GFP_ATOMIC
);
299 if (IS_ERR_OR_NULL(qp_flow
)) {
300 err
= qp_flow
? PTR_ERR(qp_flow
) : -ENOMEM
;
301 goto out_dealloc_flow
;
303 qp_flow
->flow
= flow
;
304 qp_flow
->trans_type
= trans_type
;
305 qp_flow
->udp
.sock
= sock
;
306 qp_flow
->qp_grp
= qp_grp
;
310 usnic_fwd_dealloc_flow(flow
);
312 usnic_transport_put_socket(sock
);
316 static void release_udp_flow(struct usnic_ib_qp_grp_flow
*qp_flow
)
318 usnic_fwd_dealloc_flow(qp_flow
->flow
);
319 usnic_transport_put_socket(qp_flow
->udp
.sock
);
323 static struct usnic_ib_qp_grp_flow
*
324 create_and_add_flow(struct usnic_ib_qp_grp
*qp_grp
,
325 struct usnic_transport_spec
*trans_spec
)
327 struct usnic_ib_qp_grp_flow
*qp_flow
;
328 enum usnic_transport_type trans_type
;
330 trans_type
= trans_spec
->trans_type
;
331 switch (trans_type
) {
332 case USNIC_TRANSPORT_ROCE_CUSTOM
:
333 qp_flow
= create_roce_custom_flow(qp_grp
, trans_spec
);
335 case USNIC_TRANSPORT_IPV4_UDP
:
336 qp_flow
= create_udp_flow(qp_grp
, trans_spec
);
339 usnic_err("Unsupported transport %u\n",
340 trans_spec
->trans_type
);
341 return ERR_PTR(-EINVAL
);
344 if (!IS_ERR_OR_NULL(qp_flow
)) {
345 list_add_tail(&qp_flow
->link
, &qp_grp
->flows_lst
);
346 usnic_debugfs_flow_add(qp_flow
);
353 static void release_and_remove_flow(struct usnic_ib_qp_grp_flow
*qp_flow
)
355 usnic_debugfs_flow_remove(qp_flow
);
356 list_del(&qp_flow
->link
);
358 switch (qp_flow
->trans_type
) {
359 case USNIC_TRANSPORT_ROCE_CUSTOM
:
360 release_roce_custom_flow(qp_flow
);
362 case USNIC_TRANSPORT_IPV4_UDP
:
363 release_udp_flow(qp_flow
);
366 WARN(1, "Unsupported transport %u\n",
367 qp_flow
->trans_type
);
372 static void release_and_remove_all_flows(struct usnic_ib_qp_grp
*qp_grp
)
374 struct usnic_ib_qp_grp_flow
*qp_flow
, *tmp
;
375 list_for_each_entry_safe(qp_flow
, tmp
, &qp_grp
->flows_lst
, link
)
376 release_and_remove_flow(qp_flow
);
379 int usnic_ib_qp_grp_modify(struct usnic_ib_qp_grp
*qp_grp
,
380 enum ib_qp_state new_state
,
385 struct ib_event ib_event
;
386 enum ib_qp_state old_state
;
387 struct usnic_transport_spec
*trans_spec
;
388 struct usnic_ib_qp_grp_flow
*qp_flow
;
390 old_state
= qp_grp
->state
;
391 vnic_idx
= usnic_vnic_get_index(qp_grp
->vf
->vnic
);
392 trans_spec
= (struct usnic_transport_spec
*) data
;
394 spin_lock(&qp_grp
->lock
);
402 release_and_remove_all_flows(qp_grp
);
408 status
= disable_qp_grp(qp_grp
);
409 release_and_remove_all_flows(qp_grp
);
419 qp_flow
= create_and_add_flow(qp_grp
,
421 if (IS_ERR_OR_NULL(qp_flow
)) {
422 status
= qp_flow
? PTR_ERR(qp_flow
) : -EFAULT
;
427 * Optional to specify filters.
434 qp_flow
= create_and_add_flow(qp_grp
,
436 if (IS_ERR_OR_NULL(qp_flow
)) {
437 status
= qp_flow
? PTR_ERR(qp_flow
) : -EFAULT
;
442 * Doesn't make sense to go into INIT state
443 * from INIT state w/o adding filters.
449 status
= disable_qp_grp(qp_grp
);
452 status
= disable_qp_grp(qp_grp
);
461 status
= enable_qp_grp(qp_grp
);
477 ib_event
.device
= &qp_grp
->vf
->pf
->ib_dev
;
478 ib_event
.element
.qp
= &qp_grp
->ibqp
;
479 ib_event
.event
= IB_EVENT_QP_FATAL
;
483 qp_grp
->ibqp
.event_handler(&ib_event
,
484 qp_grp
->ibqp
.qp_context
);
487 release_and_remove_all_flows(qp_grp
);
488 qp_grp
->ibqp
.event_handler(&ib_event
,
489 qp_grp
->ibqp
.qp_context
);
493 status
= disable_qp_grp(qp_grp
);
494 release_and_remove_all_flows(qp_grp
);
495 qp_grp
->ibqp
.event_handler(&ib_event
,
496 qp_grp
->ibqp
.qp_context
);
505 spin_unlock(&qp_grp
->lock
);
508 qp_grp
->state
= new_state
;
509 usnic_info("Transistioned %u from %s to %s",
511 usnic_ib_qp_grp_state_to_string(old_state
),
512 usnic_ib_qp_grp_state_to_string(new_state
));
514 usnic_err("Failed to transistion %u from %s to %s",
516 usnic_ib_qp_grp_state_to_string(old_state
),
517 usnic_ib_qp_grp_state_to_string(new_state
));
523 static struct usnic_vnic_res_chunk
**
524 alloc_res_chunk_list(struct usnic_vnic
*vnic
,
525 struct usnic_vnic_res_spec
*res_spec
, void *owner_obj
)
527 enum usnic_vnic_res_type res_type
;
528 struct usnic_vnic_res_chunk
**res_chunk_list
;
529 int err
, i
, res_cnt
, res_lst_sz
;
532 res_spec
->resources
[res_lst_sz
].type
!= USNIC_VNIC_RES_TYPE_EOL
;
537 res_chunk_list
= kzalloc(sizeof(*res_chunk_list
)*(res_lst_sz
+1),
540 return ERR_PTR(-ENOMEM
);
542 for (i
= 0; res_spec
->resources
[i
].type
!= USNIC_VNIC_RES_TYPE_EOL
;
544 res_type
= res_spec
->resources
[i
].type
;
545 res_cnt
= res_spec
->resources
[i
].cnt
;
547 res_chunk_list
[i
] = usnic_vnic_get_resources(vnic
, res_type
,
549 if (IS_ERR_OR_NULL(res_chunk_list
[i
])) {
550 err
= res_chunk_list
[i
] ?
551 PTR_ERR(res_chunk_list
[i
]) : -ENOMEM
;
552 usnic_err("Failed to get %s from %s with err %d\n",
553 usnic_vnic_res_type_to_str(res_type
),
554 usnic_vnic_pci_name(vnic
),
560 return res_chunk_list
;
563 for (i
--; i
> 0; i
--)
564 usnic_vnic_put_resources(res_chunk_list
[i
]);
565 kfree(res_chunk_list
);
569 static void free_qp_grp_res(struct usnic_vnic_res_chunk
**res_chunk_list
)
572 for (i
= 0; res_chunk_list
[i
]; i
++)
573 usnic_vnic_put_resources(res_chunk_list
[i
]);
574 kfree(res_chunk_list
);
577 static int qp_grp_and_vf_bind(struct usnic_ib_vf
*vf
,
578 struct usnic_ib_pd
*pd
,
579 struct usnic_ib_qp_grp
*qp_grp
)
582 struct pci_dev
*pdev
;
584 lockdep_assert_held(&vf
->lock
);
586 pdev
= usnic_vnic_get_pdev(vf
->vnic
);
587 if (vf
->qp_grp_ref_cnt
== 0) {
588 err
= usnic_uiom_attach_dev_to_pd(pd
->umem_pd
, &pdev
->dev
);
590 usnic_err("Failed to attach %s to domain\n",
596 vf
->qp_grp_ref_cnt
++;
598 WARN_ON(vf
->pd
!= pd
);
604 static void qp_grp_and_vf_unbind(struct usnic_ib_qp_grp
*qp_grp
)
606 struct pci_dev
*pdev
;
607 struct usnic_ib_pd
*pd
;
609 lockdep_assert_held(&qp_grp
->vf
->lock
);
612 pdev
= usnic_vnic_get_pdev(qp_grp
->vf
->vnic
);
613 if (--qp_grp
->vf
->qp_grp_ref_cnt
== 0) {
614 qp_grp
->vf
->pd
= NULL
;
615 usnic_uiom_detach_dev_from_pd(pd
->umem_pd
, &pdev
->dev
);
620 static void log_spec(struct usnic_vnic_res_spec
*res_spec
)
623 usnic_vnic_spec_dump(buf
, sizeof(buf
), res_spec
);
624 usnic_dbg("%s\n", buf
);
627 static int qp_grp_id_from_flow(struct usnic_ib_qp_grp_flow
*qp_flow
,
630 enum usnic_transport_type trans_type
= qp_flow
->trans_type
;
632 uint16_t port_num
= 0;
634 switch (trans_type
) {
635 case USNIC_TRANSPORT_ROCE_CUSTOM
:
636 *id
= qp_flow
->usnic_roce
.port_num
;
638 case USNIC_TRANSPORT_IPV4_UDP
:
639 err
= usnic_transport_sock_get_addr(qp_flow
->udp
.sock
,
645 * Copy port_num to stack first and then to *id,
646 * so that the short to int cast works for little
647 * and big endian systems.
652 usnic_err("Unsupported transport %u\n", trans_type
);
659 struct usnic_ib_qp_grp
*
660 usnic_ib_qp_grp_create(struct usnic_fwd_dev
*ufdev
, struct usnic_ib_vf
*vf
,
661 struct usnic_ib_pd
*pd
,
662 struct usnic_vnic_res_spec
*res_spec
,
663 struct usnic_transport_spec
*transport_spec
)
665 struct usnic_ib_qp_grp
*qp_grp
;
667 enum usnic_transport_type transport
= transport_spec
->trans_type
;
668 struct usnic_ib_qp_grp_flow
*qp_flow
;
670 lockdep_assert_held(&vf
->lock
);
672 err
= usnic_vnic_res_spec_satisfied(&min_transport_spec
[transport
],
675 usnic_err("Spec does not meet miniumum req for transport %d\n",
681 qp_grp
= kzalloc(sizeof(*qp_grp
), GFP_ATOMIC
);
683 usnic_err("Unable to alloc qp_grp - Out of memory\n");
687 qp_grp
->res_chunk_list
= alloc_res_chunk_list(vf
->vnic
, res_spec
,
689 if (IS_ERR_OR_NULL(qp_grp
->res_chunk_list
)) {
690 err
= qp_grp
->res_chunk_list
?
691 PTR_ERR(qp_grp
->res_chunk_list
) : -ENOMEM
;
692 usnic_err("Unable to alloc res for %d with err %d\n",
693 qp_grp
->grp_id
, err
);
694 goto out_free_qp_grp
;
697 err
= qp_grp_and_vf_bind(vf
, pd
, qp_grp
);
701 INIT_LIST_HEAD(&qp_grp
->flows_lst
);
702 spin_lock_init(&qp_grp
->lock
);
703 qp_grp
->ufdev
= ufdev
;
704 qp_grp
->state
= IB_QPS_RESET
;
705 qp_grp
->owner_pid
= current
->pid
;
707 qp_flow
= create_and_add_flow(qp_grp
, transport_spec
);
708 if (IS_ERR_OR_NULL(qp_flow
)) {
709 usnic_err("Unable to create and add flow with err %ld\n",
711 err
= qp_flow
? PTR_ERR(qp_flow
) : -EFAULT
;
712 goto out_qp_grp_vf_unbind
;
715 err
= qp_grp_id_from_flow(qp_flow
, &qp_grp
->grp_id
);
717 goto out_release_flow
;
718 qp_grp
->ibqp
.qp_num
= qp_grp
->grp_id
;
720 usnic_ib_sysfs_qpn_add(qp_grp
);
725 release_and_remove_flow(qp_flow
);
726 out_qp_grp_vf_unbind
:
727 qp_grp_and_vf_unbind(qp_grp
);
729 free_qp_grp_res(qp_grp
->res_chunk_list
);
736 void usnic_ib_qp_grp_destroy(struct usnic_ib_qp_grp
*qp_grp
)
739 WARN_ON(qp_grp
->state
!= IB_QPS_RESET
);
740 lockdep_assert_held(&qp_grp
->vf
->lock
);
742 release_and_remove_all_flows(qp_grp
);
743 usnic_ib_sysfs_qpn_remove(qp_grp
);
744 qp_grp_and_vf_unbind(qp_grp
);
745 free_qp_grp_res(qp_grp
->res_chunk_list
);
749 struct usnic_vnic_res_chunk
*
750 usnic_ib_qp_grp_get_chunk(struct usnic_ib_qp_grp
*qp_grp
,
751 enum usnic_vnic_res_type res_type
)
755 for (i
= 0; qp_grp
->res_chunk_list
[i
]; i
++) {
756 if (qp_grp
->res_chunk_list
[i
]->type
== res_type
)
757 return qp_grp
->res_chunk_list
[i
];
760 return ERR_PTR(-EINVAL
);