1 /* QLogic qedr NIC Driver
2 * Copyright (c) 2015-2016 QLogic Corporation
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and /or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 #include <linux/dma-mapping.h>
33 #include <linux/crc32.h>
34 #include <linux/iommu.h>
39 #include <rdma/ib_verbs.h>
40 #include <rdma/ib_user_verbs.h>
41 #include <rdma/iw_cm.h>
42 #include <rdma/ib_umem.h>
43 #include <rdma/ib_addr.h>
44 #include <rdma/ib_cache.h>
46 #include <linux/qed/qed_if.h>
47 #include <linux/qed/qed_rdma_if.h>
50 #include <rdma/qedr-abi.h>
51 #include "qedr_roce_cm.h"
53 void qedr_inc_sw_gsi_cons(struct qedr_qp_hwq_info
*info
)
55 info
->gsi_cons
= (info
->gsi_cons
+ 1) % info
->max_wr
;
58 void qedr_store_gsi_qp_cq(struct qedr_dev
*dev
, struct qedr_qp
*qp
,
59 struct ib_qp_init_attr
*attrs
)
61 dev
->gsi_qp_created
= 1;
62 dev
->gsi_sqcq
= get_qedr_cq(attrs
->send_cq
);
63 dev
->gsi_rqcq
= get_qedr_cq(attrs
->recv_cq
);
67 static void qedr_ll2_complete_tx_packet(void *cxt
, u8 connection_handle
,
69 dma_addr_t first_frag_addr
,
73 struct qedr_dev
*dev
= (struct qedr_dev
*)cxt
;
74 struct qed_roce_ll2_packet
*pkt
= cookie
;
75 struct qedr_cq
*cq
= dev
->gsi_sqcq
;
76 struct qedr_qp
*qp
= dev
->gsi_qp
;
79 DP_DEBUG(dev
, QEDR_MSG_GSI
,
80 "LL2 TX CB: gsi_sqcq=%p, gsi_rqcq=%p, gsi_cons=%d, ibcq_comp=%s\n",
81 dev
->gsi_sqcq
, dev
->gsi_rqcq
, qp
->sq
.gsi_cons
,
82 cq
->ibcq
.comp_handler
? "Yes" : "No");
84 dma_free_coherent(&dev
->pdev
->dev
, pkt
->header
.len
, pkt
->header
.vaddr
,
88 spin_lock_irqsave(&qp
->q_lock
, flags
);
89 qedr_inc_sw_gsi_cons(&qp
->sq
);
90 spin_unlock_irqrestore(&qp
->q_lock
, flags
);
92 if (cq
->ibcq
.comp_handler
)
93 (*cq
->ibcq
.comp_handler
) (&cq
->ibcq
, cq
->ibcq
.cq_context
);
96 static void qedr_ll2_complete_rx_packet(void *cxt
,
97 struct qed_ll2_comp_rx_data
*data
)
99 struct qedr_dev
*dev
= (struct qedr_dev
*)cxt
;
100 struct qedr_cq
*cq
= dev
->gsi_rqcq
;
101 struct qedr_qp
*qp
= dev
->gsi_qp
;
104 spin_lock_irqsave(&qp
->q_lock
, flags
);
106 qp
->rqe_wr_id
[qp
->rq
.gsi_cons
].rc
= data
->u
.data_length_error
?
108 qp
->rqe_wr_id
[qp
->rq
.gsi_cons
].vlan
= data
->vlan
;
109 /* note: length stands for data length i.e. GRH is excluded */
110 qp
->rqe_wr_id
[qp
->rq
.gsi_cons
].sg_list
[0].length
=
111 data
->length
.data_length
;
112 *((u32
*)&qp
->rqe_wr_id
[qp
->rq
.gsi_cons
].smac
[0]) =
113 ntohl(data
->opaque_data_0
);
114 *((u16
*)&qp
->rqe_wr_id
[qp
->rq
.gsi_cons
].smac
[4]) =
115 ntohs((u16
)data
->opaque_data_1
);
117 qedr_inc_sw_gsi_cons(&qp
->rq
);
119 spin_unlock_irqrestore(&qp
->q_lock
, flags
);
121 if (cq
->ibcq
.comp_handler
)
122 (*cq
->ibcq
.comp_handler
) (&cq
->ibcq
, cq
->ibcq
.cq_context
);
125 static void qedr_ll2_release_rx_packet(void *cxt
, u8 connection_handle
,
126 void *cookie
, dma_addr_t rx_buf_addr
,
132 static void qedr_destroy_gsi_cq(struct qedr_dev
*dev
,
133 struct ib_qp_init_attr
*attrs
)
135 struct qed_rdma_destroy_cq_in_params iparams
;
136 struct qed_rdma_destroy_cq_out_params oparams
;
139 cq
= get_qedr_cq(attrs
->send_cq
);
140 iparams
.icid
= cq
->icid
;
141 dev
->ops
->rdma_destroy_cq(dev
->rdma_ctx
, &iparams
, &oparams
);
142 dev
->ops
->common
->chain_free(dev
->cdev
, &cq
->pbl
);
144 cq
= get_qedr_cq(attrs
->recv_cq
);
145 /* if a dedicated recv_cq was used, delete it too */
146 if (iparams
.icid
!= cq
->icid
) {
147 iparams
.icid
= cq
->icid
;
148 dev
->ops
->rdma_destroy_cq(dev
->rdma_ctx
, &iparams
, &oparams
);
149 dev
->ops
->common
->chain_free(dev
->cdev
, &cq
->pbl
);
153 static inline int qedr_check_gsi_qp_attrs(struct qedr_dev
*dev
,
154 struct ib_qp_init_attr
*attrs
)
156 if (attrs
->cap
.max_recv_sge
> QEDR_GSI_MAX_RECV_SGE
) {
158 " create gsi qp: failed. max_recv_sge is larger the max %d>%d\n",
159 attrs
->cap
.max_recv_sge
, QEDR_GSI_MAX_RECV_SGE
);
163 if (attrs
->cap
.max_recv_wr
> QEDR_GSI_MAX_RECV_WR
) {
165 " create gsi qp: failed. max_recv_wr is too large %d>%d\n",
166 attrs
->cap
.max_recv_wr
, QEDR_GSI_MAX_RECV_WR
);
170 if (attrs
->cap
.max_send_wr
> QEDR_GSI_MAX_SEND_WR
) {
172 " create gsi qp: failed. max_send_wr is too large %d>%d\n",
173 attrs
->cap
.max_send_wr
, QEDR_GSI_MAX_SEND_WR
);
180 static int qedr_ll2_post_tx(struct qedr_dev
*dev
,
181 struct qed_roce_ll2_packet
*pkt
)
183 enum qed_ll2_roce_flavor_type roce_flavor
;
184 struct qed_ll2_tx_pkt_info ll2_tx_pkt
;
188 memset(&ll2_tx_pkt
, 0, sizeof(ll2_tx_pkt
));
190 roce_flavor
= (pkt
->roce_mode
== ROCE_V1
) ?
191 QED_LL2_ROCE
: QED_LL2_RROCE
;
193 if (pkt
->roce_mode
== ROCE_V2_IPV4
)
194 ll2_tx_pkt
.enable_ip_cksum
= 1;
196 ll2_tx_pkt
.num_of_bds
= 1 /* hdr */ + pkt
->n_seg
;
198 ll2_tx_pkt
.tx_dest
= pkt
->tx_dest
;
199 ll2_tx_pkt
.qed_roce_flavor
= roce_flavor
;
200 ll2_tx_pkt
.first_frag
= pkt
->header
.baddr
;
201 ll2_tx_pkt
.first_frag_len
= pkt
->header
.len
;
202 ll2_tx_pkt
.cookie
= pkt
;
205 rc
= dev
->ops
->ll2_prepare_tx_packet(dev
->rdma_ctx
,
209 /* TX failed while posting header - release resources */
210 dma_free_coherent(&dev
->pdev
->dev
, pkt
->header
.len
,
211 pkt
->header
.vaddr
, pkt
->header
.baddr
);
214 DP_ERR(dev
, "roce ll2 tx: header failed (rc=%d)\n", rc
);
219 for (i
= 0; i
< pkt
->n_seg
; i
++) {
220 rc
= dev
->ops
->ll2_set_fragment_of_tx_packet(
223 pkt
->payload
[i
].baddr
,
224 pkt
->payload
[i
].len
);
227 /* if failed not much to do here, partial packet has
228 * been posted we can't free memory, will need to wait
231 DP_ERR(dev
, "ll2 tx: payload failed (rc=%d)\n", rc
);
239 static int qedr_ll2_stop(struct qedr_dev
*dev
)
243 if (dev
->gsi_ll2_handle
== QED_LL2_UNUSED_HANDLE
)
246 /* remove LL2 MAC address filter */
247 rc
= dev
->ops
->ll2_set_mac_filter(dev
->cdev
,
248 dev
->gsi_ll2_mac_address
, NULL
);
250 rc
= dev
->ops
->ll2_terminate_connection(dev
->rdma_ctx
,
251 dev
->gsi_ll2_handle
);
253 DP_ERR(dev
, "Failed to terminate LL2 connection (rc=%d)\n", rc
);
255 dev
->ops
->ll2_release_connection(dev
->rdma_ctx
, dev
->gsi_ll2_handle
);
257 dev
->gsi_ll2_handle
= QED_LL2_UNUSED_HANDLE
;
262 static int qedr_ll2_start(struct qedr_dev
*dev
,
263 struct ib_qp_init_attr
*attrs
, struct qedr_qp
*qp
)
265 struct qed_ll2_acquire_data data
;
266 struct qed_ll2_cbs cbs
;
269 /* configure and start LL2 */
270 cbs
.rx_comp_cb
= qedr_ll2_complete_rx_packet
;
271 cbs
.tx_comp_cb
= qedr_ll2_complete_tx_packet
;
272 cbs
.rx_release_cb
= qedr_ll2_release_rx_packet
;
273 cbs
.tx_release_cb
= qedr_ll2_complete_tx_packet
;
276 memset(&data
, 0, sizeof(data
));
277 data
.input
.conn_type
= QED_LL2_TYPE_ROCE
;
278 data
.input
.mtu
= dev
->ndev
->mtu
;
279 data
.input
.rx_num_desc
= attrs
->cap
.max_recv_wr
;
280 data
.input
.rx_drop_ttl0_flg
= true;
281 data
.input
.rx_vlan_removal_en
= false;
282 data
.input
.tx_num_desc
= attrs
->cap
.max_send_wr
;
283 data
.input
.tx_tc
= 0;
284 data
.input
.tx_dest
= QED_LL2_TX_DEST_NW
;
285 data
.input
.ai_err_packet_too_big
= QED_LL2_DROP_PACKET
;
286 data
.input
.ai_err_no_buf
= QED_LL2_DROP_PACKET
;
287 data
.input
.gsi_enable
= 1;
288 data
.p_connection_handle
= &dev
->gsi_ll2_handle
;
291 rc
= dev
->ops
->ll2_acquire_connection(dev
->rdma_ctx
, &data
);
294 "ll2 start: failed to acquire LL2 connection (rc=%d)\n",
299 rc
= dev
->ops
->ll2_establish_connection(dev
->rdma_ctx
,
300 dev
->gsi_ll2_handle
);
303 "ll2 start: failed to establish LL2 connection (rc=%d)\n",
308 rc
= dev
->ops
->ll2_set_mac_filter(dev
->cdev
, NULL
, dev
->ndev
->dev_addr
);
315 dev
->ops
->ll2_terminate_connection(dev
->rdma_ctx
, dev
->gsi_ll2_handle
);
317 dev
->ops
->ll2_release_connection(dev
->rdma_ctx
, dev
->gsi_ll2_handle
);
322 int qedr_create_gsi_qp(struct qedr_dev
*dev
, struct ib_qp_init_attr
*attrs
,
327 rc
= qedr_check_gsi_qp_attrs(dev
, attrs
);
331 rc
= qedr_ll2_start(dev
, attrs
, qp
);
333 DP_ERR(dev
, "create gsi qp: failed on ll2 start. rc=%d\n", rc
);
339 qp
->rq
.max_wr
= attrs
->cap
.max_recv_wr
;
340 qp
->sq
.max_wr
= attrs
->cap
.max_send_wr
;
342 qp
->rqe_wr_id
= kcalloc(qp
->rq
.max_wr
, sizeof(*qp
->rqe_wr_id
),
346 qp
->wqe_wr_id
= kcalloc(qp
->sq
.max_wr
, sizeof(*qp
->wqe_wr_id
),
351 qedr_store_gsi_qp_cq(dev
, qp
, attrs
);
352 ether_addr_copy(dev
->gsi_ll2_mac_address
, dev
->ndev
->dev_addr
);
354 /* the GSI CQ is handled by the driver so remove it from the FW */
355 qedr_destroy_gsi_cq(dev
, attrs
);
356 dev
->gsi_rqcq
->cq_type
= QEDR_CQ_TYPE_GSI
;
358 DP_DEBUG(dev
, QEDR_MSG_GSI
, "created GSI QP %p\n", qp
);
363 kfree(qp
->rqe_wr_id
);
365 rc
= qedr_ll2_stop(dev
);
367 DP_ERR(dev
, "create gsi qp: failed destroy on create\n");
372 int qedr_destroy_gsi_qp(struct qedr_dev
*dev
)
374 return qedr_ll2_stop(dev
);
377 #define QEDR_MAX_UD_HEADER_SIZE (100)
378 #define QEDR_GSI_QPN (1)
379 static inline int qedr_gsi_build_header(struct qedr_dev
*dev
,
381 const struct ib_send_wr
*swr
,
382 struct ib_ud_header
*udh
,
385 bool has_vlan
= false, has_grh_ipv6
= true;
386 struct rdma_ah_attr
*ah_attr
= &get_qedr_ah(ud_wr(swr
)->ah
)->attr
;
387 const struct ib_global_route
*grh
= rdma_ah_read_grh(ah_attr
);
388 const struct ib_gid_attr
*sgid_attr
= grh
->sgid_attr
;
395 bool has_udp
= false;
398 rc
= rdma_read_gid_l2_fields(sgid_attr
, &vlan_id
, NULL
);
402 if (vlan_id
< VLAN_CFI_MASK
)
406 for (i
= 0; i
< swr
->num_sge
; ++i
)
407 send_size
+= swr
->sg_list
[i
].length
;
409 has_udp
= (sgid_attr
->gid_type
== IB_GID_TYPE_ROCE_UDP_ENCAP
);
412 ether_type
= ETH_P_IBOE
;
413 *roce_mode
= ROCE_V1
;
414 } else if (ipv6_addr_v4mapped((struct in6_addr
*)&sgid_attr
->gid
)) {
417 ether_type
= ETH_P_IP
;
418 has_grh_ipv6
= false;
419 *roce_mode
= ROCE_V2_IPV4
;
423 ether_type
= ETH_P_IPV6
;
424 *roce_mode
= ROCE_V2_IPV6
;
427 rc
= ib_ud_header_init(send_size
, false, true, has_vlan
,
428 has_grh_ipv6
, ip_ver
, has_udp
, 0, udh
);
430 DP_ERR(dev
, "gsi post send: failed to init header\n");
434 /* ENET + VLAN headers */
435 ether_addr_copy(udh
->eth
.dmac_h
, ah_attr
->roce
.dmac
);
436 ether_addr_copy(udh
->eth
.smac_h
, dev
->ndev
->dev_addr
);
438 udh
->eth
.type
= htons(ETH_P_8021Q
);
439 udh
->vlan
.tag
= htons(vlan_id
);
440 udh
->vlan
.type
= htons(ether_type
);
442 udh
->eth
.type
= htons(ether_type
);
446 udh
->bth
.solicited_event
= !!(swr
->send_flags
& IB_SEND_SOLICITED
);
447 udh
->bth
.pkey
= QEDR_ROCE_PKEY_DEFAULT
;
448 udh
->bth
.destination_qpn
= htonl(ud_wr(swr
)->remote_qpn
);
449 udh
->bth
.psn
= htonl((qp
->sq_psn
++) & ((1 << 24) - 1));
450 udh
->bth
.opcode
= IB_OPCODE_UD_SEND_ONLY
;
453 udh
->deth
.qkey
= htonl(0x80010000);
454 udh
->deth
.source_qpn
= htonl(QEDR_GSI_QPN
);
457 /* GRH / IPv6 header */
458 udh
->grh
.traffic_class
= grh
->traffic_class
;
459 udh
->grh
.flow_label
= grh
->flow_label
;
460 udh
->grh
.hop_limit
= grh
->hop_limit
;
461 udh
->grh
.destination_gid
= grh
->dgid
;
462 memcpy(&udh
->grh
.source_gid
.raw
, sgid_attr
->gid
.raw
,
463 sizeof(udh
->grh
.source_gid
.raw
));
468 udh
->ip4
.protocol
= IPPROTO_UDP
;
469 udh
->ip4
.tos
= htonl(grh
->flow_label
);
470 udh
->ip4
.frag_off
= htons(IP_DF
);
471 udh
->ip4
.ttl
= grh
->hop_limit
;
473 ipv4_addr
= qedr_get_ipv4_from_gid(sgid_attr
->gid
.raw
);
474 udh
->ip4
.saddr
= ipv4_addr
;
475 ipv4_addr
= qedr_get_ipv4_from_gid(grh
->dgid
.raw
);
476 udh
->ip4
.daddr
= ipv4_addr
;
477 /* note: checksum is calculated by the device */
482 udh
->udp
.sport
= htons(QEDR_ROCE_V2_UDP_SPORT
);
483 udh
->udp
.dport
= htons(ROCE_V2_UDP_DPORT
);
485 /* UDP length is untouched hence is zero */
490 static inline int qedr_gsi_build_packet(struct qedr_dev
*dev
,
492 const struct ib_send_wr
*swr
,
493 struct qed_roce_ll2_packet
**p_packet
)
495 u8 ud_header_buffer
[QEDR_MAX_UD_HEADER_SIZE
];
496 struct qed_roce_ll2_packet
*packet
;
497 struct pci_dev
*pdev
= dev
->pdev
;
498 int roce_mode
, header_size
;
499 struct ib_ud_header udh
;
504 rc
= qedr_gsi_build_header(dev
, qp
, swr
, &udh
, &roce_mode
);
508 header_size
= ib_ud_header_pack(&udh
, &ud_header_buffer
);
510 packet
= kzalloc(sizeof(*packet
), GFP_ATOMIC
);
514 packet
->header
.vaddr
= dma_alloc_coherent(&pdev
->dev
, header_size
,
515 &packet
->header
.baddr
,
517 if (!packet
->header
.vaddr
) {
522 if (ether_addr_equal(udh
.eth
.smac_h
, udh
.eth
.dmac_h
))
523 packet
->tx_dest
= QED_LL2_TX_DEST_LB
;
525 packet
->tx_dest
= QED_LL2_TX_DEST_NW
;
527 packet
->roce_mode
= roce_mode
;
528 memcpy(packet
->header
.vaddr
, ud_header_buffer
, header_size
);
529 packet
->header
.len
= header_size
;
530 packet
->n_seg
= swr
->num_sge
;
531 for (i
= 0; i
< packet
->n_seg
; i
++) {
532 packet
->payload
[i
].baddr
= swr
->sg_list
[i
].addr
;
533 packet
->payload
[i
].len
= swr
->sg_list
[i
].length
;
541 int qedr_gsi_post_send(struct ib_qp
*ibqp
, const struct ib_send_wr
*wr
,
542 const struct ib_send_wr
**bad_wr
)
544 struct qed_roce_ll2_packet
*pkt
= NULL
;
545 struct qedr_qp
*qp
= get_qedr_qp(ibqp
);
546 struct qedr_dev
*dev
= qp
->dev
;
550 if (qp
->state
!= QED_ROCE_QP_STATE_RTS
) {
553 "gsi post recv: failed to post rx buffer. state is %d and not QED_ROCE_QP_STATE_RTS\n",
558 if (wr
->num_sge
> RDMA_MAX_SGE_PER_SQ_WQE
) {
559 DP_ERR(dev
, "gsi post send: num_sge is too large (%d>%d)\n",
560 wr
->num_sge
, RDMA_MAX_SGE_PER_SQ_WQE
);
565 if (wr
->opcode
!= IB_WR_SEND
) {
567 "gsi post send: failed due to unsupported opcode %d\n",
573 spin_lock_irqsave(&qp
->q_lock
, flags
);
575 rc
= qedr_gsi_build_packet(dev
, qp
, wr
, &pkt
);
577 spin_unlock_irqrestore(&qp
->q_lock
, flags
);
581 rc
= qedr_ll2_post_tx(dev
, pkt
);
584 qp
->wqe_wr_id
[qp
->sq
.prod
].wr_id
= wr
->wr_id
;
585 qedr_inc_sw_prod(&qp
->sq
);
586 DP_DEBUG(qp
->dev
, QEDR_MSG_GSI
,
587 "gsi post send: opcode=%d, wr_id=%llx\n", wr
->opcode
,
590 DP_ERR(dev
, "gsi post send: failed to transmit (rc=%d)\n", rc
);
595 spin_unlock_irqrestore(&qp
->q_lock
, flags
);
599 "gsi post send: failed second WR. Only one WR may be passed at a time\n");
611 int qedr_gsi_post_recv(struct ib_qp
*ibqp
, const struct ib_recv_wr
*wr
,
612 const struct ib_recv_wr
**bad_wr
)
614 struct qedr_dev
*dev
= get_qedr_dev(ibqp
->device
);
615 struct qedr_qp
*qp
= get_qedr_qp(ibqp
);
619 if ((qp
->state
!= QED_ROCE_QP_STATE_RTR
) &&
620 (qp
->state
!= QED_ROCE_QP_STATE_RTS
)) {
623 "gsi post recv: failed to post rx buffer. state is %d and not QED_ROCE_QP_STATE_RTR/S\n",
628 spin_lock_irqsave(&qp
->q_lock
, flags
);
631 if (wr
->num_sge
> QEDR_GSI_MAX_RECV_SGE
) {
633 "gsi post recv: failed to post rx buffer. too many sges %d>%d\n",
634 wr
->num_sge
, QEDR_GSI_MAX_RECV_SGE
);
638 rc
= dev
->ops
->ll2_post_rx_buffer(dev
->rdma_ctx
,
641 wr
->sg_list
[0].length
,
646 "gsi post recv: failed to post rx buffer (rc=%d)\n",
651 memset(&qp
->rqe_wr_id
[qp
->rq
.prod
], 0,
652 sizeof(qp
->rqe_wr_id
[qp
->rq
.prod
]));
653 qp
->rqe_wr_id
[qp
->rq
.prod
].sg_list
[0] = wr
->sg_list
[0];
654 qp
->rqe_wr_id
[qp
->rq
.prod
].wr_id
= wr
->wr_id
;
656 qedr_inc_sw_prod(&qp
->rq
);
661 spin_unlock_irqrestore(&qp
->q_lock
, flags
);
665 spin_unlock_irqrestore(&qp
->q_lock
, flags
);
670 int qedr_gsi_poll_cq(struct ib_cq
*ibcq
, int num_entries
, struct ib_wc
*wc
)
672 struct qedr_dev
*dev
= get_qedr_dev(ibcq
->device
);
673 struct qedr_cq
*cq
= get_qedr_cq(ibcq
);
674 struct qedr_qp
*qp
= dev
->gsi_qp
;
679 spin_lock_irqsave(&cq
->cq_lock
, flags
);
681 while (i
< num_entries
&& qp
->rq
.cons
!= qp
->rq
.gsi_cons
) {
682 memset(&wc
[i
], 0, sizeof(*wc
));
684 wc
[i
].qp
= &qp
->ibqp
;
685 wc
[i
].wr_id
= qp
->rqe_wr_id
[qp
->rq
.cons
].wr_id
;
686 wc
[i
].opcode
= IB_WC_RECV
;
687 wc
[i
].pkey_index
= 0;
688 wc
[i
].status
= (qp
->rqe_wr_id
[qp
->rq
.cons
].rc
) ?
689 IB_WC_GENERAL_ERR
: IB_WC_SUCCESS
;
690 /* 0 - currently only one recv sg is supported */
691 wc
[i
].byte_len
= qp
->rqe_wr_id
[qp
->rq
.cons
].sg_list
[0].length
;
692 wc
[i
].wc_flags
|= IB_WC_GRH
| IB_WC_IP_CSUM_OK
;
693 ether_addr_copy(wc
[i
].smac
, qp
->rqe_wr_id
[qp
->rq
.cons
].smac
);
694 wc
[i
].wc_flags
|= IB_WC_WITH_SMAC
;
696 vlan_id
= qp
->rqe_wr_id
[qp
->rq
.cons
].vlan
&
699 wc
[i
].wc_flags
|= IB_WC_WITH_VLAN
;
700 wc
[i
].vlan_id
= vlan_id
;
701 wc
[i
].sl
= (qp
->rqe_wr_id
[qp
->rq
.cons
].vlan
&
702 VLAN_PRIO_MASK
) >> VLAN_PRIO_SHIFT
;
705 qedr_inc_sw_cons(&qp
->rq
);
709 while (i
< num_entries
&& qp
->sq
.cons
!= qp
->sq
.gsi_cons
) {
710 memset(&wc
[i
], 0, sizeof(*wc
));
712 wc
[i
].qp
= &qp
->ibqp
;
713 wc
[i
].wr_id
= qp
->wqe_wr_id
[qp
->sq
.cons
].wr_id
;
714 wc
[i
].opcode
= IB_WC_SEND
;
715 wc
[i
].status
= IB_WC_SUCCESS
;
717 qedr_inc_sw_cons(&qp
->sq
);
721 spin_unlock_irqrestore(&cq
->cq_lock
, flags
);
723 DP_DEBUG(dev
, QEDR_MSG_GSI
,
724 "gsi poll_cq: requested entries=%d, actual=%d, qp->rq.cons=%d, qp->rq.gsi_cons=%x, qp->sq.cons=%d, qp->sq.gsi_cons=%d, qp_num=%d\n",
725 num_entries
, i
, qp
->rq
.cons
, qp
->rq
.gsi_cons
, qp
->sq
.cons
,
726 qp
->sq
.gsi_cons
, qp
->ibqp
.qp_num
);