1 /* QLogic qedr NIC Driver
2 * Copyright (c) 2015-2016 QLogic Corporation
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and /or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 #include <linux/dma-mapping.h>
33 #include <linux/crc32.h>
34 #include <linux/iommu.h>
39 #include <rdma/ib_verbs.h>
40 #include <rdma/ib_user_verbs.h>
41 #include <rdma/iw_cm.h>
42 #include <rdma/ib_umem.h>
43 #include <rdma/ib_addr.h>
44 #include <rdma/ib_cache.h>
46 #include <linux/qed/qed_if.h>
47 #include <linux/qed/qed_rdma_if.h>
50 #include <rdma/qedr-abi.h>
51 #include "qedr_roce_cm.h"
53 void qedr_inc_sw_gsi_cons(struct qedr_qp_hwq_info
*info
)
55 info
->gsi_cons
= (info
->gsi_cons
+ 1) % info
->max_wr
;
58 void qedr_store_gsi_qp_cq(struct qedr_dev
*dev
, struct qedr_qp
*qp
,
59 struct ib_qp_init_attr
*attrs
)
61 dev
->gsi_qp_created
= 1;
62 dev
->gsi_sqcq
= get_qedr_cq(attrs
->send_cq
);
63 dev
->gsi_rqcq
= get_qedr_cq(attrs
->recv_cq
);
67 static void qedr_ll2_complete_tx_packet(void *cxt
, u8 connection_handle
,
69 dma_addr_t first_frag_addr
,
73 struct qedr_dev
*dev
= (struct qedr_dev
*)cxt
;
74 struct qed_roce_ll2_packet
*pkt
= cookie
;
75 struct qedr_cq
*cq
= dev
->gsi_sqcq
;
76 struct qedr_qp
*qp
= dev
->gsi_qp
;
79 DP_DEBUG(dev
, QEDR_MSG_GSI
,
80 "LL2 TX CB: gsi_sqcq=%p, gsi_rqcq=%p, gsi_cons=%d, ibcq_comp=%s\n",
81 dev
->gsi_sqcq
, dev
->gsi_rqcq
, qp
->sq
.gsi_cons
,
82 cq
->ibcq
.comp_handler
? "Yes" : "No");
84 dma_free_coherent(&dev
->pdev
->dev
, pkt
->header
.len
, pkt
->header
.vaddr
,
88 spin_lock_irqsave(&qp
->q_lock
, flags
);
89 qedr_inc_sw_gsi_cons(&qp
->sq
);
90 spin_unlock_irqrestore(&qp
->q_lock
, flags
);
92 if (cq
->ibcq
.comp_handler
)
93 (*cq
->ibcq
.comp_handler
) (&cq
->ibcq
, cq
->ibcq
.cq_context
);
96 static void qedr_ll2_complete_rx_packet(void *cxt
,
97 struct qed_ll2_comp_rx_data
*data
)
99 struct qedr_dev
*dev
= (struct qedr_dev
*)cxt
;
100 struct qedr_cq
*cq
= dev
->gsi_rqcq
;
101 struct qedr_qp
*qp
= dev
->gsi_qp
;
104 spin_lock_irqsave(&qp
->q_lock
, flags
);
106 qp
->rqe_wr_id
[qp
->rq
.gsi_cons
].rc
= data
->u
.data_length_error
?
108 qp
->rqe_wr_id
[qp
->rq
.gsi_cons
].vlan
= data
->vlan
;
109 /* note: length stands for data length i.e. GRH is excluded */
110 qp
->rqe_wr_id
[qp
->rq
.gsi_cons
].sg_list
[0].length
=
111 data
->length
.data_length
;
112 *((u32
*)&qp
->rqe_wr_id
[qp
->rq
.gsi_cons
].smac
[0]) =
113 ntohl(data
->opaque_data_0
);
114 *((u16
*)&qp
->rqe_wr_id
[qp
->rq
.gsi_cons
].smac
[4]) =
115 ntohs((u16
)data
->opaque_data_1
);
117 qedr_inc_sw_gsi_cons(&qp
->rq
);
119 spin_unlock_irqrestore(&qp
->q_lock
, flags
);
121 if (cq
->ibcq
.comp_handler
)
122 (*cq
->ibcq
.comp_handler
) (&cq
->ibcq
, cq
->ibcq
.cq_context
);
125 static void qedr_ll2_release_rx_packet(void *cxt
, u8 connection_handle
,
126 void *cookie
, dma_addr_t rx_buf_addr
,
132 static void qedr_destroy_gsi_cq(struct qedr_dev
*dev
,
133 struct ib_qp_init_attr
*attrs
)
135 struct qed_rdma_destroy_cq_in_params iparams
;
136 struct qed_rdma_destroy_cq_out_params oparams
;
139 cq
= get_qedr_cq(attrs
->send_cq
);
140 iparams
.icid
= cq
->icid
;
141 dev
->ops
->rdma_destroy_cq(dev
->rdma_ctx
, &iparams
, &oparams
);
142 dev
->ops
->common
->chain_free(dev
->cdev
, &cq
->pbl
);
144 cq
= get_qedr_cq(attrs
->recv_cq
);
145 /* if a dedicated recv_cq was used, delete it too */
146 if (iparams
.icid
!= cq
->icid
) {
147 iparams
.icid
= cq
->icid
;
148 dev
->ops
->rdma_destroy_cq(dev
->rdma_ctx
, &iparams
, &oparams
);
149 dev
->ops
->common
->chain_free(dev
->cdev
, &cq
->pbl
);
153 static inline int qedr_check_gsi_qp_attrs(struct qedr_dev
*dev
,
154 struct ib_qp_init_attr
*attrs
)
156 if (attrs
->cap
.max_recv_sge
> QEDR_GSI_MAX_RECV_SGE
) {
158 " create gsi qp: failed. max_recv_sge is larger the max %d>%d\n",
159 attrs
->cap
.max_recv_sge
, QEDR_GSI_MAX_RECV_SGE
);
163 if (attrs
->cap
.max_recv_wr
> QEDR_GSI_MAX_RECV_WR
) {
165 " create gsi qp: failed. max_recv_wr is too large %d>%d\n",
166 attrs
->cap
.max_recv_wr
, QEDR_GSI_MAX_RECV_WR
);
170 if (attrs
->cap
.max_send_wr
> QEDR_GSI_MAX_SEND_WR
) {
172 " create gsi qp: failed. max_send_wr is too large %d>%d\n",
173 attrs
->cap
.max_send_wr
, QEDR_GSI_MAX_SEND_WR
);
180 static int qedr_ll2_post_tx(struct qedr_dev
*dev
,
181 struct qed_roce_ll2_packet
*pkt
)
183 enum qed_ll2_roce_flavor_type roce_flavor
;
184 struct qed_ll2_tx_pkt_info ll2_tx_pkt
;
188 memset(&ll2_tx_pkt
, 0, sizeof(ll2_tx_pkt
));
190 roce_flavor
= (pkt
->roce_mode
== ROCE_V1
) ?
191 QED_LL2_ROCE
: QED_LL2_RROCE
;
193 if (pkt
->roce_mode
== ROCE_V2_IPV4
)
194 ll2_tx_pkt
.enable_ip_cksum
= 1;
196 ll2_tx_pkt
.num_of_bds
= 1 /* hdr */ + pkt
->n_seg
;
198 ll2_tx_pkt
.tx_dest
= pkt
->tx_dest
;
199 ll2_tx_pkt
.qed_roce_flavor
= roce_flavor
;
200 ll2_tx_pkt
.first_frag
= pkt
->header
.baddr
;
201 ll2_tx_pkt
.first_frag_len
= pkt
->header
.len
;
202 ll2_tx_pkt
.cookie
= pkt
;
205 rc
= dev
->ops
->ll2_prepare_tx_packet(dev
->rdma_ctx
,
209 /* TX failed while posting header - release resources */
210 dma_free_coherent(&dev
->pdev
->dev
, pkt
->header
.len
,
211 pkt
->header
.vaddr
, pkt
->header
.baddr
);
214 DP_ERR(dev
, "roce ll2 tx: header failed (rc=%d)\n", rc
);
219 for (i
= 0; i
< pkt
->n_seg
; i
++) {
220 rc
= dev
->ops
->ll2_set_fragment_of_tx_packet(
223 pkt
->payload
[i
].baddr
,
224 pkt
->payload
[i
].len
);
227 /* if failed not much to do here, partial packet has
228 * been posted we can't free memory, will need to wait
231 DP_ERR(dev
, "ll2 tx: payload failed (rc=%d)\n", rc
);
239 static int qedr_ll2_stop(struct qedr_dev
*dev
)
243 if (dev
->gsi_ll2_handle
== QED_LL2_UNUSED_HANDLE
)
246 /* remove LL2 MAC address filter */
247 rc
= dev
->ops
->ll2_set_mac_filter(dev
->cdev
,
248 dev
->gsi_ll2_mac_address
, NULL
);
250 rc
= dev
->ops
->ll2_terminate_connection(dev
->rdma_ctx
,
251 dev
->gsi_ll2_handle
);
253 DP_ERR(dev
, "Failed to terminate LL2 connection (rc=%d)\n", rc
);
255 dev
->ops
->ll2_release_connection(dev
->rdma_ctx
, dev
->gsi_ll2_handle
);
257 dev
->gsi_ll2_handle
= QED_LL2_UNUSED_HANDLE
;
262 static int qedr_ll2_start(struct qedr_dev
*dev
,
263 struct ib_qp_init_attr
*attrs
, struct qedr_qp
*qp
)
265 struct qed_ll2_acquire_data data
;
266 struct qed_ll2_cbs cbs
;
269 /* configure and start LL2 */
270 cbs
.rx_comp_cb
= qedr_ll2_complete_rx_packet
;
271 cbs
.tx_comp_cb
= qedr_ll2_complete_tx_packet
;
272 cbs
.rx_release_cb
= qedr_ll2_release_rx_packet
;
273 cbs
.tx_release_cb
= qedr_ll2_complete_tx_packet
;
276 memset(&data
, 0, sizeof(data
));
277 data
.input
.conn_type
= QED_LL2_TYPE_ROCE
;
278 data
.input
.mtu
= dev
->ndev
->mtu
;
279 data
.input
.rx_num_desc
= attrs
->cap
.max_recv_wr
;
280 data
.input
.rx_drop_ttl0_flg
= true;
281 data
.input
.rx_vlan_removal_en
= false;
282 data
.input
.tx_num_desc
= attrs
->cap
.max_send_wr
;
283 data
.input
.tx_tc
= 0;
284 data
.input
.tx_dest
= QED_LL2_TX_DEST_NW
;
285 data
.input
.ai_err_packet_too_big
= QED_LL2_DROP_PACKET
;
286 data
.input
.ai_err_no_buf
= QED_LL2_DROP_PACKET
;
287 data
.input
.gsi_enable
= 1;
288 data
.p_connection_handle
= &dev
->gsi_ll2_handle
;
291 rc
= dev
->ops
->ll2_acquire_connection(dev
->rdma_ctx
, &data
);
294 "ll2 start: failed to acquire LL2 connection (rc=%d)\n",
299 rc
= dev
->ops
->ll2_establish_connection(dev
->rdma_ctx
,
300 dev
->gsi_ll2_handle
);
303 "ll2 start: failed to establish LL2 connection (rc=%d)\n",
308 rc
= dev
->ops
->ll2_set_mac_filter(dev
->cdev
, NULL
, dev
->ndev
->dev_addr
);
315 dev
->ops
->ll2_terminate_connection(dev
->rdma_ctx
, dev
->gsi_ll2_handle
);
317 dev
->ops
->ll2_release_connection(dev
->rdma_ctx
, dev
->gsi_ll2_handle
);
322 struct ib_qp
*qedr_create_gsi_qp(struct qedr_dev
*dev
,
323 struct ib_qp_init_attr
*attrs
,
328 rc
= qedr_check_gsi_qp_attrs(dev
, attrs
);
332 rc
= qedr_ll2_start(dev
, attrs
, qp
);
334 DP_ERR(dev
, "create gsi qp: failed on ll2 start. rc=%d\n", rc
);
340 qp
->rq
.max_wr
= attrs
->cap
.max_recv_wr
;
341 qp
->sq
.max_wr
= attrs
->cap
.max_send_wr
;
343 qp
->rqe_wr_id
= kcalloc(qp
->rq
.max_wr
, sizeof(*qp
->rqe_wr_id
),
347 qp
->wqe_wr_id
= kcalloc(qp
->sq
.max_wr
, sizeof(*qp
->wqe_wr_id
),
352 qedr_store_gsi_qp_cq(dev
, qp
, attrs
);
353 ether_addr_copy(dev
->gsi_ll2_mac_address
, dev
->ndev
->dev_addr
);
355 /* the GSI CQ is handled by the driver so remove it from the FW */
356 qedr_destroy_gsi_cq(dev
, attrs
);
357 dev
->gsi_rqcq
->cq_type
= QEDR_CQ_TYPE_GSI
;
358 dev
->gsi_rqcq
->cq_type
= QEDR_CQ_TYPE_GSI
;
360 DP_DEBUG(dev
, QEDR_MSG_GSI
, "created GSI QP %p\n", qp
);
365 kfree(qp
->rqe_wr_id
);
367 rc
= qedr_ll2_stop(dev
);
369 DP_ERR(dev
, "create gsi qp: failed destroy on create\n");
371 return ERR_PTR(-ENOMEM
);
374 int qedr_destroy_gsi_qp(struct qedr_dev
*dev
)
376 return qedr_ll2_stop(dev
);
379 #define QEDR_MAX_UD_HEADER_SIZE (100)
380 #define QEDR_GSI_QPN (1)
381 static inline int qedr_gsi_build_header(struct qedr_dev
*dev
,
383 struct ib_send_wr
*swr
,
384 struct ib_ud_header
*udh
,
387 bool has_vlan
= false, has_grh_ipv6
= true;
388 struct rdma_ah_attr
*ah_attr
= &get_qedr_ah(ud_wr(swr
)->ah
)->attr
;
389 const struct ib_global_route
*grh
= rdma_ah_read_grh(ah_attr
);
394 struct ib_gid_attr sgid_attr
;
398 bool has_udp
= false;
402 for (i
= 0; i
< swr
->num_sge
; ++i
)
403 send_size
+= swr
->sg_list
[i
].length
;
405 rc
= ib_get_cached_gid(qp
->ibqp
.device
, rdma_ah_get_port_num(ah_attr
),
406 grh
->sgid_index
, &sgid
, &sgid_attr
);
409 "gsi post send: failed to get cached GID (port=%d, ix=%d)\n",
410 rdma_ah_get_port_num(ah_attr
),
415 if (sgid_attr
.ndev
) {
416 vlan_id
= rdma_vlan_dev_vlan_id(sgid_attr
.ndev
);
417 if (vlan_id
< VLAN_CFI_MASK
)
420 dev_put(sgid_attr
.ndev
);
423 if (!memcmp(&sgid
, &zgid
, sizeof(sgid
))) {
424 DP_ERR(dev
, "gsi post send: GID not found GID index %d\n",
429 has_udp
= (sgid_attr
.gid_type
== IB_GID_TYPE_ROCE_UDP_ENCAP
);
432 ether_type
= ETH_P_IBOE
;
433 *roce_mode
= ROCE_V1
;
434 } else if (ipv6_addr_v4mapped((struct in6_addr
*)&sgid
)) {
437 ether_type
= ETH_P_IP
;
438 has_grh_ipv6
= false;
439 *roce_mode
= ROCE_V2_IPV4
;
443 ether_type
= ETH_P_IPV6
;
444 *roce_mode
= ROCE_V2_IPV6
;
447 rc
= ib_ud_header_init(send_size
, false, true, has_vlan
,
448 has_grh_ipv6
, ip_ver
, has_udp
, 0, udh
);
450 DP_ERR(dev
, "gsi post send: failed to init header\n");
454 /* ENET + VLAN headers */
455 ether_addr_copy(udh
->eth
.dmac_h
, ah_attr
->roce
.dmac
);
456 ether_addr_copy(udh
->eth
.smac_h
, dev
->ndev
->dev_addr
);
458 udh
->eth
.type
= htons(ETH_P_8021Q
);
459 udh
->vlan
.tag
= htons(vlan_id
);
460 udh
->vlan
.type
= htons(ether_type
);
462 udh
->eth
.type
= htons(ether_type
);
466 udh
->bth
.solicited_event
= !!(swr
->send_flags
& IB_SEND_SOLICITED
);
467 udh
->bth
.pkey
= QEDR_ROCE_PKEY_DEFAULT
;
468 udh
->bth
.destination_qpn
= htonl(ud_wr(swr
)->remote_qpn
);
469 udh
->bth
.psn
= htonl((qp
->sq_psn
++) & ((1 << 24) - 1));
470 udh
->bth
.opcode
= IB_OPCODE_UD_SEND_ONLY
;
473 udh
->deth
.qkey
= htonl(0x80010000);
474 udh
->deth
.source_qpn
= htonl(QEDR_GSI_QPN
);
477 /* GRH / IPv6 header */
478 udh
->grh
.traffic_class
= grh
->traffic_class
;
479 udh
->grh
.flow_label
= grh
->flow_label
;
480 udh
->grh
.hop_limit
= grh
->hop_limit
;
481 udh
->grh
.destination_gid
= grh
->dgid
;
482 memcpy(&udh
->grh
.source_gid
.raw
, &sgid
.raw
,
483 sizeof(udh
->grh
.source_gid
.raw
));
488 udh
->ip4
.protocol
= IPPROTO_UDP
;
489 udh
->ip4
.tos
= htonl(grh
->flow_label
);
490 udh
->ip4
.frag_off
= htons(IP_DF
);
491 udh
->ip4
.ttl
= grh
->hop_limit
;
493 ipv4_addr
= qedr_get_ipv4_from_gid(sgid
.raw
);
494 udh
->ip4
.saddr
= ipv4_addr
;
495 ipv4_addr
= qedr_get_ipv4_from_gid(grh
->dgid
.raw
);
496 udh
->ip4
.daddr
= ipv4_addr
;
497 /* note: checksum is calculated by the device */
502 udh
->udp
.sport
= htons(QEDR_ROCE_V2_UDP_SPORT
);
503 udh
->udp
.dport
= htons(ROCE_V2_UDP_DPORT
);
505 /* UDP length is untouched hence is zero */
510 static inline int qedr_gsi_build_packet(struct qedr_dev
*dev
,
512 struct ib_send_wr
*swr
,
513 struct qed_roce_ll2_packet
**p_packet
)
515 u8 ud_header_buffer
[QEDR_MAX_UD_HEADER_SIZE
];
516 struct qed_roce_ll2_packet
*packet
;
517 struct pci_dev
*pdev
= dev
->pdev
;
518 int roce_mode
, header_size
;
519 struct ib_ud_header udh
;
524 rc
= qedr_gsi_build_header(dev
, qp
, swr
, &udh
, &roce_mode
);
528 header_size
= ib_ud_header_pack(&udh
, &ud_header_buffer
);
530 packet
= kzalloc(sizeof(*packet
), GFP_ATOMIC
);
534 packet
->header
.vaddr
= dma_alloc_coherent(&pdev
->dev
, header_size
,
535 &packet
->header
.baddr
,
537 if (!packet
->header
.vaddr
) {
542 if (ether_addr_equal(udh
.eth
.smac_h
, udh
.eth
.dmac_h
))
543 packet
->tx_dest
= QED_ROCE_LL2_TX_DEST_LB
;
545 packet
->tx_dest
= QED_ROCE_LL2_TX_DEST_NW
;
547 packet
->roce_mode
= roce_mode
;
548 memcpy(packet
->header
.vaddr
, ud_header_buffer
, header_size
);
549 packet
->header
.len
= header_size
;
550 packet
->n_seg
= swr
->num_sge
;
551 for (i
= 0; i
< packet
->n_seg
; i
++) {
552 packet
->payload
[i
].baddr
= swr
->sg_list
[i
].addr
;
553 packet
->payload
[i
].len
= swr
->sg_list
[i
].length
;
561 int qedr_gsi_post_send(struct ib_qp
*ibqp
, struct ib_send_wr
*wr
,
562 struct ib_send_wr
**bad_wr
)
564 struct qed_roce_ll2_packet
*pkt
= NULL
;
565 struct qedr_qp
*qp
= get_qedr_qp(ibqp
);
566 struct qedr_dev
*dev
= qp
->dev
;
570 if (qp
->state
!= QED_ROCE_QP_STATE_RTS
) {
573 "gsi post recv: failed to post rx buffer. state is %d and not QED_ROCE_QP_STATE_RTS\n",
578 if (wr
->num_sge
> RDMA_MAX_SGE_PER_SQ_WQE
) {
579 DP_ERR(dev
, "gsi post send: num_sge is too large (%d>%d)\n",
580 wr
->num_sge
, RDMA_MAX_SGE_PER_SQ_WQE
);
585 if (wr
->opcode
!= IB_WR_SEND
) {
587 "gsi post send: failed due to unsupported opcode %d\n",
593 spin_lock_irqsave(&qp
->q_lock
, flags
);
595 rc
= qedr_gsi_build_packet(dev
, qp
, wr
, &pkt
);
597 spin_unlock_irqrestore(&qp
->q_lock
, flags
);
601 rc
= qedr_ll2_post_tx(dev
, pkt
);
604 qp
->wqe_wr_id
[qp
->sq
.prod
].wr_id
= wr
->wr_id
;
605 qedr_inc_sw_prod(&qp
->sq
);
606 DP_DEBUG(qp
->dev
, QEDR_MSG_GSI
,
607 "gsi post send: opcode=%d, in_irq=%ld, irqs_disabled=%d, wr_id=%llx\n",
608 wr
->opcode
, in_irq(), irqs_disabled(), wr
->wr_id
);
610 DP_ERR(dev
, "gsi post send: failed to transmit (rc=%d)\n", rc
);
615 spin_unlock_irqrestore(&qp
->q_lock
, flags
);
619 "gsi post send: failed second WR. Only one WR may be passed at a time\n");
631 int qedr_gsi_post_recv(struct ib_qp
*ibqp
, struct ib_recv_wr
*wr
,
632 struct ib_recv_wr
**bad_wr
)
634 struct qedr_dev
*dev
= get_qedr_dev(ibqp
->device
);
635 struct qedr_qp
*qp
= get_qedr_qp(ibqp
);
639 if ((qp
->state
!= QED_ROCE_QP_STATE_RTR
) &&
640 (qp
->state
!= QED_ROCE_QP_STATE_RTS
)) {
643 "gsi post recv: failed to post rx buffer. state is %d and not QED_ROCE_QP_STATE_RTR/S\n",
648 spin_lock_irqsave(&qp
->q_lock
, flags
);
651 if (wr
->num_sge
> QEDR_GSI_MAX_RECV_SGE
) {
653 "gsi post recv: failed to post rx buffer. too many sges %d>%d\n",
654 wr
->num_sge
, QEDR_GSI_MAX_RECV_SGE
);
658 rc
= dev
->ops
->ll2_post_rx_buffer(dev
->rdma_ctx
,
661 wr
->sg_list
[0].length
,
666 "gsi post recv: failed to post rx buffer (rc=%d)\n",
671 memset(&qp
->rqe_wr_id
[qp
->rq
.prod
], 0,
672 sizeof(qp
->rqe_wr_id
[qp
->rq
.prod
]));
673 qp
->rqe_wr_id
[qp
->rq
.prod
].sg_list
[0] = wr
->sg_list
[0];
674 qp
->rqe_wr_id
[qp
->rq
.prod
].wr_id
= wr
->wr_id
;
676 qedr_inc_sw_prod(&qp
->rq
);
681 spin_unlock_irqrestore(&qp
->q_lock
, flags
);
685 spin_unlock_irqrestore(&qp
->q_lock
, flags
);
690 int qedr_gsi_poll_cq(struct ib_cq
*ibcq
, int num_entries
, struct ib_wc
*wc
)
692 struct qedr_dev
*dev
= get_qedr_dev(ibcq
->device
);
693 struct qedr_cq
*cq
= get_qedr_cq(ibcq
);
694 struct qedr_qp
*qp
= dev
->gsi_qp
;
699 spin_lock_irqsave(&cq
->cq_lock
, flags
);
701 while (i
< num_entries
&& qp
->rq
.cons
!= qp
->rq
.gsi_cons
) {
702 memset(&wc
[i
], 0, sizeof(*wc
));
704 wc
[i
].qp
= &qp
->ibqp
;
705 wc
[i
].wr_id
= qp
->rqe_wr_id
[qp
->rq
.cons
].wr_id
;
706 wc
[i
].opcode
= IB_WC_RECV
;
707 wc
[i
].pkey_index
= 0;
708 wc
[i
].status
= (qp
->rqe_wr_id
[qp
->rq
.cons
].rc
) ?
709 IB_WC_GENERAL_ERR
: IB_WC_SUCCESS
;
710 /* 0 - currently only one recv sg is supported */
711 wc
[i
].byte_len
= qp
->rqe_wr_id
[qp
->rq
.cons
].sg_list
[0].length
;
712 wc
[i
].wc_flags
|= IB_WC_GRH
| IB_WC_IP_CSUM_OK
;
713 ether_addr_copy(wc
[i
].smac
, qp
->rqe_wr_id
[qp
->rq
.cons
].smac
);
714 wc
[i
].wc_flags
|= IB_WC_WITH_SMAC
;
716 vlan_id
= qp
->rqe_wr_id
[qp
->rq
.cons
].vlan
&
719 wc
[i
].wc_flags
|= IB_WC_WITH_VLAN
;
720 wc
[i
].vlan_id
= vlan_id
;
721 wc
[i
].sl
= (qp
->rqe_wr_id
[qp
->rq
.cons
].vlan
&
722 VLAN_PRIO_MASK
) >> VLAN_PRIO_SHIFT
;
725 qedr_inc_sw_cons(&qp
->rq
);
729 while (i
< num_entries
&& qp
->sq
.cons
!= qp
->sq
.gsi_cons
) {
730 memset(&wc
[i
], 0, sizeof(*wc
));
732 wc
[i
].qp
= &qp
->ibqp
;
733 wc
[i
].wr_id
= qp
->wqe_wr_id
[qp
->sq
.cons
].wr_id
;
734 wc
[i
].opcode
= IB_WC_SEND
;
735 wc
[i
].status
= IB_WC_SUCCESS
;
737 qedr_inc_sw_cons(&qp
->sq
);
741 spin_unlock_irqrestore(&cq
->cq_lock
, flags
);
743 DP_DEBUG(dev
, QEDR_MSG_GSI
,
744 "gsi poll_cq: requested entries=%d, actual=%d, qp->rq.cons=%d, qp->rq.gsi_cons=%x, qp->sq.cons=%d, qp->sq.gsi_cons=%d, qp_num=%d\n",
745 num_entries
, i
, qp
->rq
.cons
, qp
->rq
.gsi_cons
, qp
->sq
.cons
,
746 qp
->sq
.gsi_cons
, qp
->ibqp
.qp_num
);