treewide: remove redundant IS_ERR() before error code check
[linux/fpc-iii.git] / drivers / infiniband / hw / qedr / qedr_iw_cm.c
blob792eecd206b61c33e0227f2f1a8b90a4bf7c208c
1 /* QLogic qedr NIC Driver
2 * Copyright (c) 2015-2017 QLogic Corporation
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and /or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
32 #include <net/ip.h>
33 #include <net/ipv6.h>
34 #include <net/udp.h>
35 #include <net/addrconf.h>
36 #include <net/route.h>
37 #include <net/ip6_route.h>
38 #include <net/flow.h>
39 #include "qedr.h"
40 #include "qedr_iw_cm.h"
42 static inline void
43 qedr_fill_sockaddr4(const struct qed_iwarp_cm_info *cm_info,
44 struct iw_cm_event *event)
46 struct sockaddr_in *laddr = (struct sockaddr_in *)&event->local_addr;
47 struct sockaddr_in *raddr = (struct sockaddr_in *)&event->remote_addr;
49 laddr->sin_family = AF_INET;
50 raddr->sin_family = AF_INET;
52 laddr->sin_port = htons(cm_info->local_port);
53 raddr->sin_port = htons(cm_info->remote_port);
55 laddr->sin_addr.s_addr = htonl(cm_info->local_ip[0]);
56 raddr->sin_addr.s_addr = htonl(cm_info->remote_ip[0]);
59 static inline void
60 qedr_fill_sockaddr6(const struct qed_iwarp_cm_info *cm_info,
61 struct iw_cm_event *event)
63 struct sockaddr_in6 *laddr6 = (struct sockaddr_in6 *)&event->local_addr;
64 struct sockaddr_in6 *raddr6 =
65 (struct sockaddr_in6 *)&event->remote_addr;
66 int i;
68 laddr6->sin6_family = AF_INET6;
69 raddr6->sin6_family = AF_INET6;
71 laddr6->sin6_port = htons(cm_info->local_port);
72 raddr6->sin6_port = htons(cm_info->remote_port);
74 for (i = 0; i < 4; i++) {
75 laddr6->sin6_addr.in6_u.u6_addr32[i] =
76 htonl(cm_info->local_ip[i]);
77 raddr6->sin6_addr.in6_u.u6_addr32[i] =
78 htonl(cm_info->remote_ip[i]);
82 static void qedr_iw_free_qp(struct kref *ref)
84 struct qedr_qp *qp = container_of(ref, struct qedr_qp, refcnt);
86 kfree(qp);
89 static void
90 qedr_iw_free_ep(struct kref *ref)
92 struct qedr_iw_ep *ep = container_of(ref, struct qedr_iw_ep, refcnt);
94 if (ep->qp)
95 kref_put(&ep->qp->refcnt, qedr_iw_free_qp);
97 if (ep->cm_id)
98 ep->cm_id->rem_ref(ep->cm_id);
100 kfree(ep);
103 static void
104 qedr_iw_mpa_request(void *context, struct qed_iwarp_cm_event_params *params)
106 struct qedr_iw_listener *listener = (struct qedr_iw_listener *)context;
107 struct qedr_dev *dev = listener->dev;
108 struct iw_cm_event event;
109 struct qedr_iw_ep *ep;
111 ep = kzalloc(sizeof(*ep), GFP_ATOMIC);
112 if (!ep)
113 return;
115 ep->dev = dev;
116 ep->qed_context = params->ep_context;
117 kref_init(&ep->refcnt);
119 memset(&event, 0, sizeof(event));
120 event.event = IW_CM_EVENT_CONNECT_REQUEST;
121 event.status = params->status;
123 if (!IS_ENABLED(CONFIG_IPV6) ||
124 params->cm_info->ip_version == QED_TCP_IPV4)
125 qedr_fill_sockaddr4(params->cm_info, &event);
126 else
127 qedr_fill_sockaddr6(params->cm_info, &event);
129 event.provider_data = (void *)ep;
130 event.private_data = (void *)params->cm_info->private_data;
131 event.private_data_len = (u8)params->cm_info->private_data_len;
132 event.ord = params->cm_info->ord;
133 event.ird = params->cm_info->ird;
135 listener->cm_id->event_handler(listener->cm_id, &event);
138 static void
139 qedr_iw_issue_event(void *context,
140 struct qed_iwarp_cm_event_params *params,
141 enum iw_cm_event_type event_type)
143 struct qedr_iw_ep *ep = (struct qedr_iw_ep *)context;
144 struct iw_cm_event event;
146 memset(&event, 0, sizeof(event));
147 event.status = params->status;
148 event.event = event_type;
150 if (params->cm_info) {
151 event.ird = params->cm_info->ird;
152 event.ord = params->cm_info->ord;
153 event.private_data_len = params->cm_info->private_data_len;
154 event.private_data = (void *)params->cm_info->private_data;
157 if (ep->cm_id)
158 ep->cm_id->event_handler(ep->cm_id, &event);
161 static void
162 qedr_iw_close_event(void *context, struct qed_iwarp_cm_event_params *params)
164 struct qedr_iw_ep *ep = (struct qedr_iw_ep *)context;
166 if (ep->cm_id)
167 qedr_iw_issue_event(context, params, IW_CM_EVENT_CLOSE);
169 kref_put(&ep->refcnt, qedr_iw_free_ep);
172 static void
173 qedr_iw_qp_event(void *context,
174 struct qed_iwarp_cm_event_params *params,
175 enum ib_event_type ib_event, char *str)
177 struct qedr_iw_ep *ep = (struct qedr_iw_ep *)context;
178 struct qedr_dev *dev = ep->dev;
179 struct ib_qp *ibqp = &ep->qp->ibqp;
180 struct ib_event event;
182 DP_NOTICE(dev, "QP error received: %s\n", str);
184 if (ibqp->event_handler) {
185 event.event = ib_event;
186 event.device = ibqp->device;
187 event.element.qp = ibqp;
188 ibqp->event_handler(&event, ibqp->qp_context);
192 struct qedr_discon_work {
193 struct work_struct work;
194 struct qedr_iw_ep *ep;
195 enum qed_iwarp_event_type event;
196 int status;
199 static void qedr_iw_disconnect_worker(struct work_struct *work)
201 struct qedr_discon_work *dwork =
202 container_of(work, struct qedr_discon_work, work);
203 struct qed_rdma_modify_qp_in_params qp_params = { 0 };
204 struct qedr_iw_ep *ep = dwork->ep;
205 struct qedr_dev *dev = ep->dev;
206 struct qedr_qp *qp = ep->qp;
207 struct iw_cm_event event;
209 /* The qp won't be released until we release the ep.
210 * the ep's refcnt was increased before calling this
211 * function, therefore it is safe to access qp
213 if (test_and_set_bit(QEDR_IWARP_CM_WAIT_FOR_DISCONNECT,
214 &qp->iwarp_cm_flags))
215 goto out;
217 memset(&event, 0, sizeof(event));
218 event.status = dwork->status;
219 event.event = IW_CM_EVENT_DISCONNECT;
221 /* Success means graceful disconnect was requested. modifying
222 * to SQD is translated to graceful disconnect. O/w reset is sent
224 if (dwork->status)
225 qp_params.new_state = QED_ROCE_QP_STATE_ERR;
226 else
227 qp_params.new_state = QED_ROCE_QP_STATE_SQD;
230 if (ep->cm_id)
231 ep->cm_id->event_handler(ep->cm_id, &event);
233 SET_FIELD(qp_params.modify_flags,
234 QED_RDMA_MODIFY_QP_VALID_NEW_STATE, 1);
236 dev->ops->rdma_modify_qp(dev->rdma_ctx, qp->qed_qp, &qp_params);
238 complete(&ep->qp->iwarp_cm_comp);
239 out:
240 kfree(dwork);
241 kref_put(&ep->refcnt, qedr_iw_free_ep);
244 static void
245 qedr_iw_disconnect_event(void *context,
246 struct qed_iwarp_cm_event_params *params)
248 struct qedr_discon_work *work;
249 struct qedr_iw_ep *ep = (struct qedr_iw_ep *)context;
250 struct qedr_dev *dev = ep->dev;
252 work = kzalloc(sizeof(*work), GFP_ATOMIC);
253 if (!work)
254 return;
256 /* We can't get a close event before disconnect, but since
257 * we're scheduling a work queue we need to make sure close
258 * won't delete the ep, so we increase the refcnt
260 kref_get(&ep->refcnt);
262 work->ep = ep;
263 work->event = params->event;
264 work->status = params->status;
266 INIT_WORK(&work->work, qedr_iw_disconnect_worker);
267 queue_work(dev->iwarp_wq, &work->work);
270 static void
271 qedr_iw_passive_complete(void *context,
272 struct qed_iwarp_cm_event_params *params)
274 struct qedr_iw_ep *ep = (struct qedr_iw_ep *)context;
275 struct qedr_dev *dev = ep->dev;
277 /* We will only reach the following state if MPA_REJECT was called on
278 * passive. In this case there will be no associated QP.
280 if ((params->status == -ECONNREFUSED) && (!ep->qp)) {
281 DP_DEBUG(dev, QEDR_MSG_IWARP,
282 "PASSIVE connection refused releasing ep...\n");
283 kref_put(&ep->refcnt, qedr_iw_free_ep);
284 return;
287 complete(&ep->qp->iwarp_cm_comp);
288 qedr_iw_issue_event(context, params, IW_CM_EVENT_ESTABLISHED);
290 if (params->status < 0)
291 qedr_iw_close_event(context, params);
294 static void
295 qedr_iw_active_complete(void *context,
296 struct qed_iwarp_cm_event_params *params)
298 struct qedr_iw_ep *ep = (struct qedr_iw_ep *)context;
300 complete(&ep->qp->iwarp_cm_comp);
301 qedr_iw_issue_event(context, params, IW_CM_EVENT_CONNECT_REPLY);
303 if (params->status < 0)
304 kref_put(&ep->refcnt, qedr_iw_free_ep);
307 static int
308 qedr_iw_mpa_reply(void *context, struct qed_iwarp_cm_event_params *params)
310 struct qedr_iw_ep *ep = (struct qedr_iw_ep *)context;
311 struct qedr_dev *dev = ep->dev;
312 struct qed_iwarp_send_rtr_in rtr_in;
314 rtr_in.ep_context = params->ep_context;
316 return dev->ops->iwarp_send_rtr(dev->rdma_ctx, &rtr_in);
319 static int
320 qedr_iw_event_handler(void *context, struct qed_iwarp_cm_event_params *params)
322 struct qedr_iw_ep *ep = (struct qedr_iw_ep *)context;
323 struct qedr_dev *dev = ep->dev;
325 switch (params->event) {
326 case QED_IWARP_EVENT_MPA_REQUEST:
327 qedr_iw_mpa_request(context, params);
328 break;
329 case QED_IWARP_EVENT_ACTIVE_MPA_REPLY:
330 qedr_iw_mpa_reply(context, params);
331 break;
332 case QED_IWARP_EVENT_PASSIVE_COMPLETE:
333 qedr_iw_passive_complete(context, params);
334 break;
335 case QED_IWARP_EVENT_ACTIVE_COMPLETE:
336 qedr_iw_active_complete(context, params);
337 break;
338 case QED_IWARP_EVENT_DISCONNECT:
339 qedr_iw_disconnect_event(context, params);
340 break;
341 case QED_IWARP_EVENT_CLOSE:
342 qedr_iw_close_event(context, params);
343 break;
344 case QED_IWARP_EVENT_RQ_EMPTY:
345 qedr_iw_qp_event(context, params, IB_EVENT_QP_FATAL,
346 "QED_IWARP_EVENT_RQ_EMPTY");
347 break;
348 case QED_IWARP_EVENT_IRQ_FULL:
349 qedr_iw_qp_event(context, params, IB_EVENT_QP_FATAL,
350 "QED_IWARP_EVENT_IRQ_FULL");
351 break;
352 case QED_IWARP_EVENT_LLP_TIMEOUT:
353 qedr_iw_qp_event(context, params, IB_EVENT_QP_FATAL,
354 "QED_IWARP_EVENT_LLP_TIMEOUT");
355 break;
356 case QED_IWARP_EVENT_REMOTE_PROTECTION_ERROR:
357 qedr_iw_qp_event(context, params, IB_EVENT_QP_ACCESS_ERR,
358 "QED_IWARP_EVENT_REMOTE_PROTECTION_ERROR");
359 break;
360 case QED_IWARP_EVENT_CQ_OVERFLOW:
361 qedr_iw_qp_event(context, params, IB_EVENT_QP_FATAL,
362 "QED_IWARP_EVENT_CQ_OVERFLOW");
363 break;
364 case QED_IWARP_EVENT_QP_CATASTROPHIC:
365 qedr_iw_qp_event(context, params, IB_EVENT_QP_FATAL,
366 "QED_IWARP_EVENT_QP_CATASTROPHIC");
367 break;
368 case QED_IWARP_EVENT_LOCAL_ACCESS_ERROR:
369 qedr_iw_qp_event(context, params, IB_EVENT_QP_ACCESS_ERR,
370 "QED_IWARP_EVENT_LOCAL_ACCESS_ERROR");
371 break;
372 case QED_IWARP_EVENT_REMOTE_OPERATION_ERROR:
373 qedr_iw_qp_event(context, params, IB_EVENT_QP_FATAL,
374 "QED_IWARP_EVENT_REMOTE_OPERATION_ERROR");
375 break;
376 case QED_IWARP_EVENT_TERMINATE_RECEIVED:
377 DP_NOTICE(dev, "Got terminate message\n");
378 break;
379 default:
380 DP_NOTICE(dev, "Unknown event received %d\n", params->event);
381 break;
383 return 0;
386 static u16 qedr_iw_get_vlan_ipv4(struct qedr_dev *dev, u32 *addr)
388 struct net_device *ndev;
389 u16 vlan_id = 0;
391 ndev = ip_dev_find(&init_net, htonl(addr[0]));
393 if (ndev) {
394 vlan_id = rdma_vlan_dev_vlan_id(ndev);
395 dev_put(ndev);
397 if (vlan_id == 0xffff)
398 vlan_id = 0;
399 return vlan_id;
402 static u16 qedr_iw_get_vlan_ipv6(u32 *addr)
404 struct net_device *ndev = NULL;
405 struct in6_addr laddr6;
406 u16 vlan_id = 0;
407 int i;
409 if (!IS_ENABLED(CONFIG_IPV6))
410 return vlan_id;
412 for (i = 0; i < 4; i++)
413 laddr6.in6_u.u6_addr32[i] = htonl(addr[i]);
415 rcu_read_lock();
416 for_each_netdev_rcu(&init_net, ndev) {
417 if (ipv6_chk_addr(&init_net, &laddr6, ndev, 1)) {
418 vlan_id = rdma_vlan_dev_vlan_id(ndev);
419 break;
423 rcu_read_unlock();
424 if (vlan_id == 0xffff)
425 vlan_id = 0;
427 return vlan_id;
430 static int
431 qedr_addr4_resolve(struct qedr_dev *dev,
432 struct sockaddr_in *src_in,
433 struct sockaddr_in *dst_in, u8 *dst_mac)
435 __be32 src_ip = src_in->sin_addr.s_addr;
436 __be32 dst_ip = dst_in->sin_addr.s_addr;
437 struct neighbour *neigh = NULL;
438 struct rtable *rt = NULL;
439 int rc = 0;
441 rt = ip_route_output(&init_net, dst_ip, src_ip, 0, 0);
442 if (IS_ERR(rt)) {
443 DP_ERR(dev, "ip_route_output returned error\n");
444 return -EINVAL;
447 neigh = dst_neigh_lookup(&rt->dst, &dst_ip);
449 if (neigh) {
450 rcu_read_lock();
451 if (neigh->nud_state & NUD_VALID) {
452 ether_addr_copy(dst_mac, neigh->ha);
453 DP_DEBUG(dev, QEDR_MSG_QP, "mac_addr=[%pM]\n", dst_mac);
454 } else {
455 neigh_event_send(neigh, NULL);
457 rcu_read_unlock();
458 neigh_release(neigh);
461 ip_rt_put(rt);
463 return rc;
466 static int
467 qedr_addr6_resolve(struct qedr_dev *dev,
468 struct sockaddr_in6 *src_in,
469 struct sockaddr_in6 *dst_in, u8 *dst_mac)
471 struct neighbour *neigh = NULL;
472 struct dst_entry *dst;
473 struct flowi6 fl6;
474 int rc = 0;
476 memset(&fl6, 0, sizeof(fl6));
477 fl6.daddr = dst_in->sin6_addr;
478 fl6.saddr = src_in->sin6_addr;
480 dst = ip6_route_output(&init_net, NULL, &fl6);
482 if ((!dst) || dst->error) {
483 if (dst) {
484 DP_ERR(dev,
485 "ip6_route_output returned dst->error = %d\n",
486 dst->error);
487 dst_release(dst);
489 return -EINVAL;
491 neigh = dst_neigh_lookup(dst, &fl6.daddr);
492 if (neigh) {
493 rcu_read_lock();
494 if (neigh->nud_state & NUD_VALID) {
495 ether_addr_copy(dst_mac, neigh->ha);
496 DP_DEBUG(dev, QEDR_MSG_QP, "mac_addr=[%pM]\n", dst_mac);
497 } else {
498 neigh_event_send(neigh, NULL);
500 rcu_read_unlock();
501 neigh_release(neigh);
504 dst_release(dst);
506 return rc;
509 static struct qedr_qp *qedr_iw_load_qp(struct qedr_dev *dev, u32 qpn)
511 struct qedr_qp *qp;
513 xa_lock(&dev->qps);
514 qp = xa_load(&dev->qps, qpn);
515 if (qp)
516 kref_get(&qp->refcnt);
517 xa_unlock(&dev->qps);
519 return qp;
522 int qedr_iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
524 struct qedr_dev *dev = get_qedr_dev(cm_id->device);
525 struct qed_iwarp_connect_out out_params;
526 struct qed_iwarp_connect_in in_params;
527 struct qed_iwarp_cm_info *cm_info;
528 struct sockaddr_in6 *laddr6;
529 struct sockaddr_in6 *raddr6;
530 struct sockaddr_in *laddr;
531 struct sockaddr_in *raddr;
532 struct qedr_iw_ep *ep;
533 struct qedr_qp *qp;
534 int rc = 0;
535 int i;
537 laddr = (struct sockaddr_in *)&cm_id->m_local_addr;
538 raddr = (struct sockaddr_in *)&cm_id->m_remote_addr;
539 laddr6 = (struct sockaddr_in6 *)&cm_id->m_local_addr;
540 raddr6 = (struct sockaddr_in6 *)&cm_id->m_remote_addr;
542 DP_DEBUG(dev, QEDR_MSG_IWARP, "MAPPED %d %d\n",
543 ntohs(((struct sockaddr_in *)&cm_id->remote_addr)->sin_port),
544 ntohs(raddr->sin_port));
546 DP_DEBUG(dev, QEDR_MSG_IWARP,
547 "Connect source address: %pISpc, remote address: %pISpc\n",
548 &cm_id->local_addr, &cm_id->remote_addr);
550 if (!laddr->sin_port || !raddr->sin_port)
551 return -EINVAL;
553 ep = kzalloc(sizeof(*ep), GFP_KERNEL);
554 if (!ep)
555 return -ENOMEM;
557 ep->dev = dev;
558 kref_init(&ep->refcnt);
560 qp = qedr_iw_load_qp(dev, conn_param->qpn);
561 if (!qp) {
562 rc = -EINVAL;
563 goto err;
566 ep->qp = qp;
567 cm_id->add_ref(cm_id);
568 ep->cm_id = cm_id;
570 in_params.event_cb = qedr_iw_event_handler;
571 in_params.cb_context = ep;
573 cm_info = &in_params.cm_info;
574 memset(cm_info->local_ip, 0, sizeof(cm_info->local_ip));
575 memset(cm_info->remote_ip, 0, sizeof(cm_info->remote_ip));
577 if (!IS_ENABLED(CONFIG_IPV6) ||
578 cm_id->remote_addr.ss_family == AF_INET) {
579 cm_info->ip_version = QED_TCP_IPV4;
581 cm_info->remote_ip[0] = ntohl(raddr->sin_addr.s_addr);
582 cm_info->local_ip[0] = ntohl(laddr->sin_addr.s_addr);
583 cm_info->remote_port = ntohs(raddr->sin_port);
584 cm_info->local_port = ntohs(laddr->sin_port);
585 cm_info->vlan = qedr_iw_get_vlan_ipv4(dev, cm_info->local_ip);
587 rc = qedr_addr4_resolve(dev, laddr, raddr,
588 (u8 *)in_params.remote_mac_addr);
590 in_params.mss = dev->iwarp_max_mtu -
591 (sizeof(struct iphdr) + sizeof(struct tcphdr));
593 } else {
594 in_params.cm_info.ip_version = QED_TCP_IPV6;
596 for (i = 0; i < 4; i++) {
597 cm_info->remote_ip[i] =
598 ntohl(raddr6->sin6_addr.in6_u.u6_addr32[i]);
599 cm_info->local_ip[i] =
600 ntohl(laddr6->sin6_addr.in6_u.u6_addr32[i]);
603 cm_info->local_port = ntohs(laddr6->sin6_port);
604 cm_info->remote_port = ntohs(raddr6->sin6_port);
606 in_params.mss = dev->iwarp_max_mtu -
607 (sizeof(struct ipv6hdr) + sizeof(struct tcphdr));
609 cm_info->vlan = qedr_iw_get_vlan_ipv6(cm_info->local_ip);
611 rc = qedr_addr6_resolve(dev, laddr6, raddr6,
612 (u8 *)in_params.remote_mac_addr);
614 if (rc)
615 goto err;
617 DP_DEBUG(dev, QEDR_MSG_IWARP,
618 "ord = %d ird=%d private_data=%p private_data_len=%d rq_psn=%d\n",
619 conn_param->ord, conn_param->ird, conn_param->private_data,
620 conn_param->private_data_len, qp->rq_psn);
622 cm_info->ord = conn_param->ord;
623 cm_info->ird = conn_param->ird;
624 cm_info->private_data = conn_param->private_data;
625 cm_info->private_data_len = conn_param->private_data_len;
626 in_params.qp = qp->qed_qp;
627 memcpy(in_params.local_mac_addr, dev->ndev->dev_addr, ETH_ALEN);
629 if (test_and_set_bit(QEDR_IWARP_CM_WAIT_FOR_CONNECT,
630 &qp->iwarp_cm_flags))
631 goto err; /* QP already being destroyed */
633 rc = dev->ops->iwarp_connect(dev->rdma_ctx, &in_params, &out_params);
634 if (rc) {
635 complete(&qp->iwarp_cm_comp);
636 goto err;
639 return rc;
641 err:
642 kref_put(&ep->refcnt, qedr_iw_free_ep);
643 return rc;
646 int qedr_iw_create_listen(struct iw_cm_id *cm_id, int backlog)
648 struct qedr_dev *dev = get_qedr_dev(cm_id->device);
649 struct qedr_iw_listener *listener;
650 struct qed_iwarp_listen_in iparams;
651 struct qed_iwarp_listen_out oparams;
652 struct sockaddr_in *laddr;
653 struct sockaddr_in6 *laddr6;
654 int rc;
655 int i;
657 laddr = (struct sockaddr_in *)&cm_id->m_local_addr;
658 laddr6 = (struct sockaddr_in6 *)&cm_id->m_local_addr;
660 DP_DEBUG(dev, QEDR_MSG_IWARP,
661 "Create Listener address: %pISpc\n", &cm_id->local_addr);
663 listener = kzalloc(sizeof(*listener), GFP_KERNEL);
664 if (!listener)
665 return -ENOMEM;
667 listener->dev = dev;
668 cm_id->add_ref(cm_id);
669 listener->cm_id = cm_id;
670 listener->backlog = backlog;
672 iparams.cb_context = listener;
673 iparams.event_cb = qedr_iw_event_handler;
674 iparams.max_backlog = backlog;
676 if (!IS_ENABLED(CONFIG_IPV6) ||
677 cm_id->local_addr.ss_family == AF_INET) {
678 iparams.ip_version = QED_TCP_IPV4;
679 memset(iparams.ip_addr, 0, sizeof(iparams.ip_addr));
681 iparams.ip_addr[0] = ntohl(laddr->sin_addr.s_addr);
682 iparams.port = ntohs(laddr->sin_port);
683 iparams.vlan = qedr_iw_get_vlan_ipv4(dev, iparams.ip_addr);
684 } else {
685 iparams.ip_version = QED_TCP_IPV6;
687 for (i = 0; i < 4; i++) {
688 iparams.ip_addr[i] =
689 ntohl(laddr6->sin6_addr.in6_u.u6_addr32[i]);
692 iparams.port = ntohs(laddr6->sin6_port);
694 iparams.vlan = qedr_iw_get_vlan_ipv6(iparams.ip_addr);
696 rc = dev->ops->iwarp_create_listen(dev->rdma_ctx, &iparams, &oparams);
697 if (rc)
698 goto err;
700 listener->qed_handle = oparams.handle;
701 cm_id->provider_data = listener;
702 return rc;
704 err:
705 cm_id->rem_ref(cm_id);
706 kfree(listener);
707 return rc;
710 int qedr_iw_destroy_listen(struct iw_cm_id *cm_id)
712 struct qedr_iw_listener *listener = cm_id->provider_data;
713 struct qedr_dev *dev = get_qedr_dev(cm_id->device);
714 int rc = 0;
716 if (listener->qed_handle)
717 rc = dev->ops->iwarp_destroy_listen(dev->rdma_ctx,
718 listener->qed_handle);
720 cm_id->rem_ref(cm_id);
721 return rc;
724 int qedr_iw_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
726 struct qedr_iw_ep *ep = (struct qedr_iw_ep *)cm_id->provider_data;
727 struct qedr_dev *dev = ep->dev;
728 struct qedr_qp *qp;
729 struct qed_iwarp_accept_in params;
730 int rc = 0;
732 DP_DEBUG(dev, QEDR_MSG_IWARP, "Accept on qpid=%d\n", conn_param->qpn);
734 qp = qedr_iw_load_qp(dev, conn_param->qpn);
735 if (!qp) {
736 DP_ERR(dev, "Invalid QP number %d\n", conn_param->qpn);
737 return -EINVAL;
740 ep->qp = qp;
741 cm_id->add_ref(cm_id);
742 ep->cm_id = cm_id;
744 params.ep_context = ep->qed_context;
745 params.cb_context = ep;
746 params.qp = ep->qp->qed_qp;
747 params.private_data = conn_param->private_data;
748 params.private_data_len = conn_param->private_data_len;
749 params.ird = conn_param->ird;
750 params.ord = conn_param->ord;
752 if (test_and_set_bit(QEDR_IWARP_CM_WAIT_FOR_CONNECT,
753 &qp->iwarp_cm_flags))
754 goto err; /* QP already destroyed */
756 rc = dev->ops->iwarp_accept(dev->rdma_ctx, &params);
757 if (rc) {
758 complete(&qp->iwarp_cm_comp);
759 goto err;
762 return rc;
764 err:
765 kref_put(&ep->refcnt, qedr_iw_free_ep);
767 return rc;
770 int qedr_iw_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
772 struct qedr_iw_ep *ep = (struct qedr_iw_ep *)cm_id->provider_data;
773 struct qedr_dev *dev = ep->dev;
774 struct qed_iwarp_reject_in params;
776 params.ep_context = ep->qed_context;
777 params.cb_context = ep;
778 params.private_data = pdata;
779 params.private_data_len = pdata_len;
780 ep->qp = NULL;
782 return dev->ops->iwarp_reject(dev->rdma_ctx, &params);
785 void qedr_iw_qp_add_ref(struct ib_qp *ibqp)
787 struct qedr_qp *qp = get_qedr_qp(ibqp);
789 kref_get(&qp->refcnt);
792 void qedr_iw_qp_rem_ref(struct ib_qp *ibqp)
794 struct qedr_qp *qp = get_qedr_qp(ibqp);
796 kref_put(&qp->refcnt, qedr_iw_free_qp);
799 struct ib_qp *qedr_iw_get_qp(struct ib_device *ibdev, int qpn)
801 struct qedr_dev *dev = get_qedr_dev(ibdev);
803 return xa_load(&dev->qps, qpn);