1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
2 /* QLogic qed NIC Driver
3 * Copyright (c) 2015-2017 QLogic Corporation
4 * Copyright (c) 2019-2020 Marvell International Ltd.
7 #include <linux/types.h>
8 #include <asm/byteorder.h>
9 #include <linux/dma-mapping.h>
10 #include <linux/if_vlan.h>
11 #include <linux/kernel.h>
12 #include <linux/pci.h>
13 #include <linux/slab.h>
14 #include <linux/stddef.h>
15 #include <linux/workqueue.h>
17 #include <linux/bitops.h>
18 #include <linux/delay.h>
19 #include <linux/errno.h>
20 #include <linux/etherdevice.h>
22 #include <linux/list.h>
23 #include <linux/mutex.h>
24 #include <linux/spinlock.h>
25 #include <linux/string.h>
26 #include <linux/qed/qed_ll2_if.h>
29 #include "qed_dev_api.h"
36 #include "qed_reg_addr.h"
40 #define QED_LL2_RX_REGISTERED(ll2) ((ll2)->rx_queue.b_cb_registered)
41 #define QED_LL2_TX_REGISTERED(ll2) ((ll2)->tx_queue.b_cb_registered)
43 #define QED_LL2_TX_SIZE (256)
44 #define QED_LL2_RX_SIZE (4096)
46 struct qed_cb_ll2_info
{
51 /* Lock protecting LL2 buffer lists in sleepless context */
53 struct list_head list
;
55 const struct qed_ll2_cb_ops
*cbs
;
59 struct qed_ll2_buffer
{
60 struct list_head list
;
65 static void qed_ll2b_complete_tx_packet(void *cxt
,
68 dma_addr_t first_frag_addr
,
72 struct qed_hwfn
*p_hwfn
= cxt
;
73 struct qed_dev
*cdev
= p_hwfn
->cdev
;
74 struct sk_buff
*skb
= cookie
;
76 /* All we need to do is release the mapping */
77 dma_unmap_single(&p_hwfn
->cdev
->pdev
->dev
, first_frag_addr
,
78 skb_headlen(skb
), DMA_TO_DEVICE
);
80 if (cdev
->ll2
->cbs
&& cdev
->ll2
->cbs
->tx_cb
)
81 cdev
->ll2
->cbs
->tx_cb(cdev
->ll2
->cb_cookie
, skb
,
84 dev_kfree_skb_any(skb
);
87 static int qed_ll2_alloc_buffer(struct qed_dev
*cdev
,
88 u8
**data
, dma_addr_t
*phys_addr
)
90 *data
= kmalloc(cdev
->ll2
->rx_size
, GFP_ATOMIC
);
92 DP_INFO(cdev
, "Failed to allocate LL2 buffer data\n");
96 *phys_addr
= dma_map_single(&cdev
->pdev
->dev
,
97 ((*data
) + NET_SKB_PAD
),
98 cdev
->ll2
->rx_size
, DMA_FROM_DEVICE
);
99 if (dma_mapping_error(&cdev
->pdev
->dev
, *phys_addr
)) {
100 DP_INFO(cdev
, "Failed to map LL2 buffer data\n");
108 static int qed_ll2_dealloc_buffer(struct qed_dev
*cdev
,
109 struct qed_ll2_buffer
*buffer
)
111 spin_lock_bh(&cdev
->ll2
->lock
);
113 dma_unmap_single(&cdev
->pdev
->dev
, buffer
->phys_addr
,
114 cdev
->ll2
->rx_size
, DMA_FROM_DEVICE
);
116 list_del(&buffer
->list
);
119 if (!cdev
->ll2
->rx_cnt
)
120 DP_INFO(cdev
, "All LL2 entries were removed\n");
122 spin_unlock_bh(&cdev
->ll2
->lock
);
127 static void qed_ll2_kill_buffers(struct qed_dev
*cdev
)
129 struct qed_ll2_buffer
*buffer
, *tmp_buffer
;
131 list_for_each_entry_safe(buffer
, tmp_buffer
, &cdev
->ll2
->list
, list
)
132 qed_ll2_dealloc_buffer(cdev
, buffer
);
135 static void qed_ll2b_complete_rx_packet(void *cxt
,
136 struct qed_ll2_comp_rx_data
*data
)
138 struct qed_hwfn
*p_hwfn
= cxt
;
139 struct qed_ll2_buffer
*buffer
= data
->cookie
;
140 struct qed_dev
*cdev
= p_hwfn
->cdev
;
141 dma_addr_t new_phys_addr
;
148 (NETIF_MSG_RX_STATUS
| QED_MSG_STORAGE
| NETIF_MSG_PKTDATA
),
149 "Got an LL2 Rx completion: [Buffer at phys 0x%llx, offset 0x%02x] Length 0x%04x Parse_flags 0x%04x vlan 0x%04x Opaque data [0x%08x:0x%08x]\n",
150 (u64
)data
->rx_buf_addr
,
151 data
->u
.placement_offset
,
152 data
->length
.packet_length
,
154 data
->vlan
, data
->opaque_data_0
, data
->opaque_data_1
);
156 if ((cdev
->dp_module
& NETIF_MSG_PKTDATA
) && buffer
->data
) {
157 print_hex_dump(KERN_INFO
, "",
158 DUMP_PREFIX_OFFSET
, 16, 1,
159 buffer
->data
, data
->length
.packet_length
, false);
162 /* Determine if data is valid */
163 if (data
->length
.packet_length
< ETH_HLEN
)
166 /* Allocate a replacement for buffer; Reuse upon failure */
168 rc
= qed_ll2_alloc_buffer(p_hwfn
->cdev
, &new_data
,
171 /* If need to reuse or there's no replacement buffer, repost this */
174 dma_unmap_single(&cdev
->pdev
->dev
, buffer
->phys_addr
,
175 cdev
->ll2
->rx_size
, DMA_FROM_DEVICE
);
177 skb
= build_skb(buffer
->data
, 0);
179 DP_INFO(cdev
, "Failed to build SKB\n");
184 data
->u
.placement_offset
+= NET_SKB_PAD
;
185 skb_reserve(skb
, data
->u
.placement_offset
);
186 skb_put(skb
, data
->length
.packet_length
);
187 skb_checksum_none_assert(skb
);
189 /* Get parital ethernet information instead of eth_type_trans(),
190 * Since we don't have an associated net_device.
192 skb_reset_mac_header(skb
);
193 skb
->protocol
= eth_hdr(skb
)->h_proto
;
195 /* Pass SKB onward */
196 if (cdev
->ll2
->cbs
&& cdev
->ll2
->cbs
->rx_cb
) {
198 __vlan_hwaccel_put_tag(skb
, htons(ETH_P_8021Q
),
200 cdev
->ll2
->cbs
->rx_cb(cdev
->ll2
->cb_cookie
, skb
,
202 data
->opaque_data_1
);
204 DP_VERBOSE(p_hwfn
, (NETIF_MSG_RX_STATUS
| NETIF_MSG_PKTDATA
|
205 QED_MSG_LL2
| QED_MSG_STORAGE
),
206 "Dropping the packet\n");
211 /* Update Buffer information and update FW producer */
212 buffer
->data
= new_data
;
213 buffer
->phys_addr
= new_phys_addr
;
216 rc
= qed_ll2_post_rx_buffer(p_hwfn
, cdev
->ll2
->handle
,
217 buffer
->phys_addr
, 0, buffer
, 1);
219 qed_ll2_dealloc_buffer(cdev
, buffer
);
222 static struct qed_ll2_info
*__qed_ll2_handle_sanity(struct qed_hwfn
*p_hwfn
,
223 u8 connection_handle
,
227 struct qed_ll2_info
*p_ll2_conn
, *p_ret
= NULL
;
229 if (connection_handle
>= QED_MAX_NUM_OF_LL2_CONNECTIONS
)
232 if (!p_hwfn
->p_ll2_info
)
235 p_ll2_conn
= &p_hwfn
->p_ll2_info
[connection_handle
];
239 mutex_lock(&p_ll2_conn
->mutex
);
240 if (p_ll2_conn
->b_active
)
243 mutex_unlock(&p_ll2_conn
->mutex
);
251 static struct qed_ll2_info
*qed_ll2_handle_sanity(struct qed_hwfn
*p_hwfn
,
252 u8 connection_handle
)
254 return __qed_ll2_handle_sanity(p_hwfn
, connection_handle
, false, true);
257 static struct qed_ll2_info
*qed_ll2_handle_sanity_lock(struct qed_hwfn
*p_hwfn
,
258 u8 connection_handle
)
260 return __qed_ll2_handle_sanity(p_hwfn
, connection_handle
, true, true);
263 static struct qed_ll2_info
*qed_ll2_handle_sanity_inactive(struct qed_hwfn
265 u8 connection_handle
)
267 return __qed_ll2_handle_sanity(p_hwfn
, connection_handle
, false, false);
270 static void qed_ll2_txq_flush(struct qed_hwfn
*p_hwfn
, u8 connection_handle
)
272 bool b_last_packet
= false, b_last_frag
= false;
273 struct qed_ll2_tx_packet
*p_pkt
= NULL
;
274 struct qed_ll2_info
*p_ll2_conn
;
275 struct qed_ll2_tx_queue
*p_tx
;
276 unsigned long flags
= 0;
279 p_ll2_conn
= qed_ll2_handle_sanity_inactive(p_hwfn
, connection_handle
);
283 p_tx
= &p_ll2_conn
->tx_queue
;
285 spin_lock_irqsave(&p_tx
->lock
, flags
);
286 while (!list_empty(&p_tx
->active_descq
)) {
287 p_pkt
= list_first_entry(&p_tx
->active_descq
,
288 struct qed_ll2_tx_packet
, list_entry
);
292 list_del(&p_pkt
->list_entry
);
293 b_last_packet
= list_empty(&p_tx
->active_descq
);
294 list_add_tail(&p_pkt
->list_entry
, &p_tx
->free_descq
);
295 spin_unlock_irqrestore(&p_tx
->lock
, flags
);
296 if (p_ll2_conn
->input
.conn_type
== QED_LL2_TYPE_OOO
) {
297 struct qed_ooo_buffer
*p_buffer
;
299 p_buffer
= (struct qed_ooo_buffer
*)p_pkt
->cookie
;
300 qed_ooo_put_free_buffer(p_hwfn
, p_hwfn
->p_ooo_info
,
303 p_tx
->cur_completing_packet
= *p_pkt
;
304 p_tx
->cur_completing_bd_idx
= 1;
306 p_tx
->cur_completing_bd_idx
== p_pkt
->bd_used
;
307 tx_frag
= p_pkt
->bds_set
[0].tx_frag
;
308 p_ll2_conn
->cbs
.tx_release_cb(p_ll2_conn
->cbs
.cookie
,
315 spin_lock_irqsave(&p_tx
->lock
, flags
);
317 spin_unlock_irqrestore(&p_tx
->lock
, flags
);
320 static int qed_ll2_txq_completion(struct qed_hwfn
*p_hwfn
, void *p_cookie
)
322 struct qed_ll2_info
*p_ll2_conn
= p_cookie
;
323 struct qed_ll2_tx_queue
*p_tx
= &p_ll2_conn
->tx_queue
;
324 u16 new_idx
= 0, num_bds
= 0, num_bds_in_packet
= 0;
325 struct qed_ll2_tx_packet
*p_pkt
;
326 bool b_last_frag
= false;
330 spin_lock_irqsave(&p_tx
->lock
, flags
);
331 if (p_tx
->b_completing_packet
) {
336 new_idx
= le16_to_cpu(*p_tx
->p_fw_cons
);
337 num_bds
= ((s16
)new_idx
- (s16
)p_tx
->bds_idx
);
339 if (list_empty(&p_tx
->active_descq
))
342 p_pkt
= list_first_entry(&p_tx
->active_descq
,
343 struct qed_ll2_tx_packet
, list_entry
);
347 p_tx
->b_completing_packet
= true;
348 p_tx
->cur_completing_packet
= *p_pkt
;
349 num_bds_in_packet
= p_pkt
->bd_used
;
350 list_del(&p_pkt
->list_entry
);
352 if (num_bds
< num_bds_in_packet
) {
354 "Rest of BDs does not cover whole packet\n");
358 num_bds
-= num_bds_in_packet
;
359 p_tx
->bds_idx
+= num_bds_in_packet
;
360 while (num_bds_in_packet
--)
361 qed_chain_consume(&p_tx
->txq_chain
);
363 p_tx
->cur_completing_bd_idx
= 1;
364 b_last_frag
= p_tx
->cur_completing_bd_idx
== p_pkt
->bd_used
;
365 list_add_tail(&p_pkt
->list_entry
, &p_tx
->free_descq
);
367 spin_unlock_irqrestore(&p_tx
->lock
, flags
);
369 p_ll2_conn
->cbs
.tx_comp_cb(p_ll2_conn
->cbs
.cookie
,
372 p_pkt
->bds_set
[0].tx_frag
,
373 b_last_frag
, !num_bds
);
375 spin_lock_irqsave(&p_tx
->lock
, flags
);
378 p_tx
->b_completing_packet
= false;
381 spin_unlock_irqrestore(&p_tx
->lock
, flags
);
385 static void qed_ll2_rxq_parse_gsi(struct qed_hwfn
*p_hwfn
,
386 union core_rx_cqe_union
*p_cqe
,
387 struct qed_ll2_comp_rx_data
*data
)
389 data
->parse_flags
= le16_to_cpu(p_cqe
->rx_cqe_gsi
.parse_flags
.flags
);
390 data
->length
.data_length
= le16_to_cpu(p_cqe
->rx_cqe_gsi
.data_length
);
391 data
->vlan
= le16_to_cpu(p_cqe
->rx_cqe_gsi
.vlan
);
392 data
->opaque_data_0
= le32_to_cpu(p_cqe
->rx_cqe_gsi
.src_mac_addrhi
);
393 data
->opaque_data_1
= le16_to_cpu(p_cqe
->rx_cqe_gsi
.src_mac_addrlo
);
394 data
->u
.data_length_error
= p_cqe
->rx_cqe_gsi
.data_length_error
;
395 data
->qp_id
= le16_to_cpu(p_cqe
->rx_cqe_gsi
.qp_id
);
397 data
->src_qp
= le32_to_cpu(p_cqe
->rx_cqe_gsi
.src_qp
);
400 static void qed_ll2_rxq_parse_reg(struct qed_hwfn
*p_hwfn
,
401 union core_rx_cqe_union
*p_cqe
,
402 struct qed_ll2_comp_rx_data
*data
)
404 data
->parse_flags
= le16_to_cpu(p_cqe
->rx_cqe_fp
.parse_flags
.flags
);
405 data
->err_flags
= le16_to_cpu(p_cqe
->rx_cqe_fp
.err_flags
.flags
);
406 data
->length
.packet_length
=
407 le16_to_cpu(p_cqe
->rx_cqe_fp
.packet_length
);
408 data
->vlan
= le16_to_cpu(p_cqe
->rx_cqe_fp
.vlan
);
409 data
->opaque_data_0
= le32_to_cpu(p_cqe
->rx_cqe_fp
.opaque_data
.data
[0]);
410 data
->opaque_data_1
= le32_to_cpu(p_cqe
->rx_cqe_fp
.opaque_data
.data
[1]);
411 data
->u
.placement_offset
= p_cqe
->rx_cqe_fp
.placement_offset
;
415 qed_ll2_handle_slowpath(struct qed_hwfn
*p_hwfn
,
416 struct qed_ll2_info
*p_ll2_conn
,
417 union core_rx_cqe_union
*p_cqe
,
418 unsigned long *p_lock_flags
)
420 struct qed_ll2_rx_queue
*p_rx
= &p_ll2_conn
->rx_queue
;
421 struct core_rx_slow_path_cqe
*sp_cqe
;
423 sp_cqe
= &p_cqe
->rx_cqe_sp
;
424 if (sp_cqe
->ramrod_cmd_id
!= CORE_RAMROD_RX_QUEUE_FLUSH
) {
426 "LL2 - unexpected Rx CQE slowpath ramrod_cmd_id:%d\n",
427 sp_cqe
->ramrod_cmd_id
);
431 if (!p_ll2_conn
->cbs
.slowpath_cb
) {
433 "LL2 - received RX_QUEUE_FLUSH but no callback was provided\n");
437 spin_unlock_irqrestore(&p_rx
->lock
, *p_lock_flags
);
439 p_ll2_conn
->cbs
.slowpath_cb(p_ll2_conn
->cbs
.cookie
,
441 le32_to_cpu(sp_cqe
->opaque_data
.data
[0]),
442 le32_to_cpu(sp_cqe
->opaque_data
.data
[1]));
444 spin_lock_irqsave(&p_rx
->lock
, *p_lock_flags
);
450 qed_ll2_rxq_handle_completion(struct qed_hwfn
*p_hwfn
,
451 struct qed_ll2_info
*p_ll2_conn
,
452 union core_rx_cqe_union
*p_cqe
,
453 unsigned long *p_lock_flags
, bool b_last_cqe
)
455 struct qed_ll2_rx_queue
*p_rx
= &p_ll2_conn
->rx_queue
;
456 struct qed_ll2_rx_packet
*p_pkt
= NULL
;
457 struct qed_ll2_comp_rx_data data
;
459 if (!list_empty(&p_rx
->active_descq
))
460 p_pkt
= list_first_entry(&p_rx
->active_descq
,
461 struct qed_ll2_rx_packet
, list_entry
);
464 "[%d] LL2 Rx completion but active_descq is empty\n",
465 p_ll2_conn
->input
.conn_type
);
469 list_del(&p_pkt
->list_entry
);
471 if (p_cqe
->rx_cqe_sp
.type
== CORE_RX_CQE_TYPE_REGULAR
)
472 qed_ll2_rxq_parse_reg(p_hwfn
, p_cqe
, &data
);
474 qed_ll2_rxq_parse_gsi(p_hwfn
, p_cqe
, &data
);
475 if (qed_chain_consume(&p_rx
->rxq_chain
) != p_pkt
->rxq_bd
)
477 "Mismatch between active_descq and the LL2 Rx chain\n");
479 list_add_tail(&p_pkt
->list_entry
, &p_rx
->free_descq
);
481 data
.connection_handle
= p_ll2_conn
->my_id
;
482 data
.cookie
= p_pkt
->cookie
;
483 data
.rx_buf_addr
= p_pkt
->rx_buf_addr
;
484 data
.b_last_packet
= b_last_cqe
;
486 spin_unlock_irqrestore(&p_rx
->lock
, *p_lock_flags
);
487 p_ll2_conn
->cbs
.rx_comp_cb(p_ll2_conn
->cbs
.cookie
, &data
);
489 spin_lock_irqsave(&p_rx
->lock
, *p_lock_flags
);
494 static int qed_ll2_rxq_completion(struct qed_hwfn
*p_hwfn
, void *cookie
)
496 struct qed_ll2_info
*p_ll2_conn
= (struct qed_ll2_info
*)cookie
;
497 struct qed_ll2_rx_queue
*p_rx
= &p_ll2_conn
->rx_queue
;
498 union core_rx_cqe_union
*cqe
= NULL
;
499 u16 cq_new_idx
= 0, cq_old_idx
= 0;
500 unsigned long flags
= 0;
503 spin_lock_irqsave(&p_rx
->lock
, flags
);
504 cq_new_idx
= le16_to_cpu(*p_rx
->p_fw_cons
);
505 cq_old_idx
= qed_chain_get_cons_idx(&p_rx
->rcq_chain
);
507 while (cq_new_idx
!= cq_old_idx
) {
508 bool b_last_cqe
= (cq_new_idx
== cq_old_idx
);
511 (union core_rx_cqe_union
*)
512 qed_chain_consume(&p_rx
->rcq_chain
);
513 cq_old_idx
= qed_chain_get_cons_idx(&p_rx
->rcq_chain
);
517 "LL2 [sw. cons %04x, fw. at %04x] - Got Packet of type %02x\n",
518 cq_old_idx
, cq_new_idx
, cqe
->rx_cqe_sp
.type
);
520 switch (cqe
->rx_cqe_sp
.type
) {
521 case CORE_RX_CQE_TYPE_SLOW_PATH
:
522 rc
= qed_ll2_handle_slowpath(p_hwfn
, p_ll2_conn
,
525 case CORE_RX_CQE_TYPE_GSI_OFFLOAD
:
526 case CORE_RX_CQE_TYPE_REGULAR
:
527 rc
= qed_ll2_rxq_handle_completion(p_hwfn
, p_ll2_conn
,
536 spin_unlock_irqrestore(&p_rx
->lock
, flags
);
540 static void qed_ll2_rxq_flush(struct qed_hwfn
*p_hwfn
, u8 connection_handle
)
542 struct qed_ll2_info
*p_ll2_conn
= NULL
;
543 struct qed_ll2_rx_packet
*p_pkt
= NULL
;
544 struct qed_ll2_rx_queue
*p_rx
;
545 unsigned long flags
= 0;
547 p_ll2_conn
= qed_ll2_handle_sanity_inactive(p_hwfn
, connection_handle
);
551 p_rx
= &p_ll2_conn
->rx_queue
;
553 spin_lock_irqsave(&p_rx
->lock
, flags
);
554 while (!list_empty(&p_rx
->active_descq
)) {
555 p_pkt
= list_first_entry(&p_rx
->active_descq
,
556 struct qed_ll2_rx_packet
, list_entry
);
559 list_move_tail(&p_pkt
->list_entry
, &p_rx
->free_descq
);
560 spin_unlock_irqrestore(&p_rx
->lock
, flags
);
562 if (p_ll2_conn
->input
.conn_type
== QED_LL2_TYPE_OOO
) {
563 struct qed_ooo_buffer
*p_buffer
;
565 p_buffer
= (struct qed_ooo_buffer
*)p_pkt
->cookie
;
566 qed_ooo_put_free_buffer(p_hwfn
, p_hwfn
->p_ooo_info
,
569 dma_addr_t rx_buf_addr
= p_pkt
->rx_buf_addr
;
570 void *cookie
= p_pkt
->cookie
;
573 b_last
= list_empty(&p_rx
->active_descq
);
574 p_ll2_conn
->cbs
.rx_release_cb(p_ll2_conn
->cbs
.cookie
,
577 rx_buf_addr
, b_last
);
579 spin_lock_irqsave(&p_rx
->lock
, flags
);
581 spin_unlock_irqrestore(&p_rx
->lock
, flags
);
585 qed_ll2_lb_rxq_handler_slowpath(struct qed_hwfn
*p_hwfn
,
586 struct core_rx_slow_path_cqe
*p_cqe
)
588 struct ooo_opaque
*iscsi_ooo
;
591 if (p_cqe
->ramrod_cmd_id
!= CORE_RAMROD_RX_QUEUE_FLUSH
)
594 iscsi_ooo
= (struct ooo_opaque
*)&p_cqe
->opaque_data
;
595 if (iscsi_ooo
->ooo_opcode
!= TCP_EVENT_DELETE_ISLES
)
598 /* Need to make a flush */
599 cid
= le32_to_cpu(iscsi_ooo
->cid
);
600 qed_ooo_release_connection_isles(p_hwfn
, p_hwfn
->p_ooo_info
, cid
);
605 static int qed_ll2_lb_rxq_handler(struct qed_hwfn
*p_hwfn
,
606 struct qed_ll2_info
*p_ll2_conn
)
608 struct qed_ll2_rx_queue
*p_rx
= &p_ll2_conn
->rx_queue
;
609 u16 packet_length
= 0, parse_flags
= 0, vlan
= 0;
610 struct qed_ll2_rx_packet
*p_pkt
= NULL
;
611 u32 num_ooo_add_to_peninsula
= 0, cid
;
612 union core_rx_cqe_union
*cqe
= NULL
;
613 u16 cq_new_idx
= 0, cq_old_idx
= 0;
614 struct qed_ooo_buffer
*p_buffer
;
615 struct ooo_opaque
*iscsi_ooo
;
616 u8 placement_offset
= 0;
619 cq_new_idx
= le16_to_cpu(*p_rx
->p_fw_cons
);
620 cq_old_idx
= qed_chain_get_cons_idx(&p_rx
->rcq_chain
);
621 if (cq_new_idx
== cq_old_idx
)
624 while (cq_new_idx
!= cq_old_idx
) {
625 struct core_rx_fast_path_cqe
*p_cqe_fp
;
627 cqe
= qed_chain_consume(&p_rx
->rcq_chain
);
628 cq_old_idx
= qed_chain_get_cons_idx(&p_rx
->rcq_chain
);
629 cqe_type
= cqe
->rx_cqe_sp
.type
;
631 if (cqe_type
== CORE_RX_CQE_TYPE_SLOW_PATH
)
632 if (qed_ll2_lb_rxq_handler_slowpath(p_hwfn
,
636 if (cqe_type
!= CORE_RX_CQE_TYPE_REGULAR
) {
638 "Got a non-regular LB LL2 completion [type 0x%02x]\n",
642 p_cqe_fp
= &cqe
->rx_cqe_fp
;
644 placement_offset
= p_cqe_fp
->placement_offset
;
645 parse_flags
= le16_to_cpu(p_cqe_fp
->parse_flags
.flags
);
646 packet_length
= le16_to_cpu(p_cqe_fp
->packet_length
);
647 vlan
= le16_to_cpu(p_cqe_fp
->vlan
);
648 iscsi_ooo
= (struct ooo_opaque
*)&p_cqe_fp
->opaque_data
;
649 qed_ooo_save_history_entry(p_hwfn
, p_hwfn
->p_ooo_info
,
651 cid
= le32_to_cpu(iscsi_ooo
->cid
);
653 /* Process delete isle first */
654 if (iscsi_ooo
->drop_size
)
655 qed_ooo_delete_isles(p_hwfn
, p_hwfn
->p_ooo_info
, cid
,
656 iscsi_ooo
->drop_isle
,
657 iscsi_ooo
->drop_size
);
659 if (iscsi_ooo
->ooo_opcode
== TCP_EVENT_NOP
)
662 /* Now process create/add/join isles */
663 if (list_empty(&p_rx
->active_descq
)) {
665 "LL2 OOO RX chain has no submitted buffers\n"
670 p_pkt
= list_first_entry(&p_rx
->active_descq
,
671 struct qed_ll2_rx_packet
, list_entry
);
673 if ((iscsi_ooo
->ooo_opcode
== TCP_EVENT_ADD_NEW_ISLE
) ||
674 (iscsi_ooo
->ooo_opcode
== TCP_EVENT_ADD_ISLE_RIGHT
) ||
675 (iscsi_ooo
->ooo_opcode
== TCP_EVENT_ADD_ISLE_LEFT
) ||
676 (iscsi_ooo
->ooo_opcode
== TCP_EVENT_ADD_PEN
) ||
677 (iscsi_ooo
->ooo_opcode
== TCP_EVENT_JOIN
)) {
680 "LL2 OOO RX packet is not valid\n");
683 list_del(&p_pkt
->list_entry
);
684 p_buffer
= (struct qed_ooo_buffer
*)p_pkt
->cookie
;
685 p_buffer
->packet_length
= packet_length
;
686 p_buffer
->parse_flags
= parse_flags
;
687 p_buffer
->vlan
= vlan
;
688 p_buffer
->placement_offset
= placement_offset
;
689 qed_chain_consume(&p_rx
->rxq_chain
);
690 list_add_tail(&p_pkt
->list_entry
, &p_rx
->free_descq
);
692 switch (iscsi_ooo
->ooo_opcode
) {
693 case TCP_EVENT_ADD_NEW_ISLE
:
694 qed_ooo_add_new_isle(p_hwfn
,
700 case TCP_EVENT_ADD_ISLE_RIGHT
:
701 qed_ooo_add_new_buffer(p_hwfn
,
708 case TCP_EVENT_ADD_ISLE_LEFT
:
709 qed_ooo_add_new_buffer(p_hwfn
,
717 qed_ooo_add_new_buffer(p_hwfn
,
720 iscsi_ooo
->ooo_isle
+
724 qed_ooo_join_isles(p_hwfn
,
726 cid
, iscsi_ooo
->ooo_isle
);
728 case TCP_EVENT_ADD_PEN
:
729 num_ooo_add_to_peninsula
++;
730 qed_ooo_put_ready_buffer(p_hwfn
,
737 "Unexpected event (%d) TX OOO completion\n",
738 iscsi_ooo
->ooo_opcode
);
746 qed_ooo_submit_tx_buffers(struct qed_hwfn
*p_hwfn
,
747 struct qed_ll2_info
*p_ll2_conn
)
749 struct qed_ll2_tx_pkt_info tx_pkt
;
750 struct qed_ooo_buffer
*p_buffer
;
752 dma_addr_t first_frag
;
756 /* Submit Tx buffers here */
757 while ((p_buffer
= qed_ooo_get_ready_buffer(p_hwfn
,
758 p_hwfn
->p_ooo_info
))) {
762 first_frag
= p_buffer
->rx_buffer_phys_addr
+
763 p_buffer
->placement_offset
;
764 SET_FIELD(bd_flags
, CORE_TX_BD_DATA_FORCE_VLAN_MODE
, 1);
765 SET_FIELD(bd_flags
, CORE_TX_BD_DATA_L4_PROTOCOL
, 1);
767 memset(&tx_pkt
, 0, sizeof(tx_pkt
));
768 tx_pkt
.num_of_bds
= 1;
769 tx_pkt
.vlan
= p_buffer
->vlan
;
770 tx_pkt
.bd_flags
= bd_flags
;
771 tx_pkt
.l4_hdr_offset_w
= l4_hdr_offset_w
;
772 switch (p_ll2_conn
->tx_dest
) {
773 case CORE_TX_DEST_NW
:
774 tx_pkt
.tx_dest
= QED_LL2_TX_DEST_NW
;
776 case CORE_TX_DEST_LB
:
777 tx_pkt
.tx_dest
= QED_LL2_TX_DEST_LB
;
779 case CORE_TX_DEST_DROP
:
781 tx_pkt
.tx_dest
= QED_LL2_TX_DEST_DROP
;
784 tx_pkt
.first_frag
= first_frag
;
785 tx_pkt
.first_frag_len
= p_buffer
->packet_length
;
786 tx_pkt
.cookie
= p_buffer
;
788 rc
= qed_ll2_prepare_tx_packet(p_hwfn
, p_ll2_conn
->my_id
,
791 qed_ooo_put_ready_buffer(p_hwfn
, p_hwfn
->p_ooo_info
,
799 qed_ooo_submit_rx_buffers(struct qed_hwfn
*p_hwfn
,
800 struct qed_ll2_info
*p_ll2_conn
)
802 struct qed_ooo_buffer
*p_buffer
;
805 while ((p_buffer
= qed_ooo_get_free_buffer(p_hwfn
,
806 p_hwfn
->p_ooo_info
))) {
807 rc
= qed_ll2_post_rx_buffer(p_hwfn
,
809 p_buffer
->rx_buffer_phys_addr
,
812 qed_ooo_put_free_buffer(p_hwfn
,
813 p_hwfn
->p_ooo_info
, p_buffer
);
819 static int qed_ll2_lb_rxq_completion(struct qed_hwfn
*p_hwfn
, void *p_cookie
)
821 struct qed_ll2_info
*p_ll2_conn
= (struct qed_ll2_info
*)p_cookie
;
824 if (!QED_LL2_RX_REGISTERED(p_ll2_conn
))
827 rc
= qed_ll2_lb_rxq_handler(p_hwfn
, p_ll2_conn
);
831 qed_ooo_submit_rx_buffers(p_hwfn
, p_ll2_conn
);
832 qed_ooo_submit_tx_buffers(p_hwfn
, p_ll2_conn
);
837 static int qed_ll2_lb_txq_completion(struct qed_hwfn
*p_hwfn
, void *p_cookie
)
839 struct qed_ll2_info
*p_ll2_conn
= (struct qed_ll2_info
*)p_cookie
;
840 struct qed_ll2_tx_queue
*p_tx
= &p_ll2_conn
->tx_queue
;
841 struct qed_ll2_tx_packet
*p_pkt
= NULL
;
842 struct qed_ooo_buffer
*p_buffer
;
843 bool b_dont_submit_rx
= false;
844 u16 new_idx
= 0, num_bds
= 0;
847 if (!QED_LL2_TX_REGISTERED(p_ll2_conn
))
850 new_idx
= le16_to_cpu(*p_tx
->p_fw_cons
);
851 num_bds
= ((s16
)new_idx
- (s16
)p_tx
->bds_idx
);
857 if (list_empty(&p_tx
->active_descq
))
860 p_pkt
= list_first_entry(&p_tx
->active_descq
,
861 struct qed_ll2_tx_packet
, list_entry
);
865 if (p_pkt
->bd_used
!= 1) {
867 "Unexpectedly many BDs(%d) in TX OOO completion\n",
872 list_del(&p_pkt
->list_entry
);
876 qed_chain_consume(&p_tx
->txq_chain
);
878 p_buffer
= (struct qed_ooo_buffer
*)p_pkt
->cookie
;
879 list_add_tail(&p_pkt
->list_entry
, &p_tx
->free_descq
);
881 if (b_dont_submit_rx
) {
882 qed_ooo_put_free_buffer(p_hwfn
, p_hwfn
->p_ooo_info
,
887 rc
= qed_ll2_post_rx_buffer(p_hwfn
, p_ll2_conn
->my_id
,
888 p_buffer
->rx_buffer_phys_addr
, 0,
891 qed_ooo_put_free_buffer(p_hwfn
,
892 p_hwfn
->p_ooo_info
, p_buffer
);
893 b_dont_submit_rx
= true;
897 qed_ooo_submit_tx_buffers(p_hwfn
, p_ll2_conn
);
902 static void qed_ll2_stop_ooo(struct qed_hwfn
*p_hwfn
)
904 u8
*handle
= &p_hwfn
->pf_params
.iscsi_pf_params
.ll2_ooo_queue_id
;
906 DP_VERBOSE(p_hwfn
, (QED_MSG_STORAGE
| QED_MSG_LL2
),
907 "Stopping LL2 OOO queue [%02x]\n", *handle
);
909 qed_ll2_terminate_connection(p_hwfn
, *handle
);
910 qed_ll2_release_connection(p_hwfn
, *handle
);
911 *handle
= QED_LL2_UNUSED_HANDLE
;
914 static int qed_sp_ll2_rx_queue_start(struct qed_hwfn
*p_hwfn
,
915 struct qed_ll2_info
*p_ll2_conn
,
918 enum qed_ll2_conn_type conn_type
= p_ll2_conn
->input
.conn_type
;
919 struct qed_ll2_rx_queue
*p_rx
= &p_ll2_conn
->rx_queue
;
920 struct core_rx_start_ramrod_data
*p_ramrod
= NULL
;
921 struct qed_spq_entry
*p_ent
= NULL
;
922 struct qed_sp_init_data init_data
;
927 memset(&init_data
, 0, sizeof(init_data
));
928 init_data
.cid
= p_ll2_conn
->cid
;
929 init_data
.opaque_fid
= p_hwfn
->hw_info
.opaque_fid
;
930 init_data
.comp_mode
= QED_SPQ_MODE_EBLOCK
;
932 rc
= qed_sp_init_request(p_hwfn
, &p_ent
,
933 CORE_RAMROD_RX_QUEUE_START
,
934 PROTOCOLID_CORE
, &init_data
);
938 p_ramrod
= &p_ent
->ramrod
.core_rx_queue_start
;
939 memset(p_ramrod
, 0, sizeof(*p_ramrod
));
940 p_ramrod
->sb_id
= cpu_to_le16(qed_int_get_sp_sb_id(p_hwfn
));
941 p_ramrod
->sb_index
= p_rx
->rx_sb_index
;
942 p_ramrod
->complete_event_flg
= 1;
944 p_ramrod
->mtu
= cpu_to_le16(p_ll2_conn
->input
.mtu
);
945 DMA_REGPAIR_LE(p_ramrod
->bd_base
, p_rx
->rxq_chain
.p_phys_addr
);
946 cqe_pbl_size
= (u16
)qed_chain_get_page_cnt(&p_rx
->rcq_chain
);
947 p_ramrod
->num_of_pbl_pages
= cpu_to_le16(cqe_pbl_size
);
948 DMA_REGPAIR_LE(p_ramrod
->cqe_pbl_addr
,
949 qed_chain_get_pbl_phys(&p_rx
->rcq_chain
));
951 p_ramrod
->drop_ttl0_flg
= p_ll2_conn
->input
.rx_drop_ttl0_flg
;
952 p_ramrod
->inner_vlan_stripping_en
=
953 p_ll2_conn
->input
.rx_vlan_removal_en
;
955 if (test_bit(QED_MF_UFP_SPECIFIC
, &p_hwfn
->cdev
->mf_bits
) &&
956 p_ll2_conn
->input
.conn_type
== QED_LL2_TYPE_FCOE
)
957 p_ramrod
->report_outer_vlan
= 1;
958 p_ramrod
->queue_id
= p_ll2_conn
->queue_id
;
959 p_ramrod
->main_func_queue
= p_ll2_conn
->main_func_queue
? 1 : 0;
961 if (test_bit(QED_MF_LL2_NON_UNICAST
, &p_hwfn
->cdev
->mf_bits
) &&
962 p_ramrod
->main_func_queue
&& conn_type
!= QED_LL2_TYPE_ROCE
&&
963 conn_type
!= QED_LL2_TYPE_IWARP
) {
964 p_ramrod
->mf_si_bcast_accept_all
= 1;
965 p_ramrod
->mf_si_mcast_accept_all
= 1;
967 p_ramrod
->mf_si_bcast_accept_all
= 0;
968 p_ramrod
->mf_si_mcast_accept_all
= 0;
971 p_ramrod
->action_on_error
.error_type
= action_on_error
;
972 p_ramrod
->gsi_offload_flag
= p_ll2_conn
->input
.gsi_enable
;
973 p_ramrod
->zero_prod_flg
= 1;
975 return qed_spq_post(p_hwfn
, p_ent
, NULL
);
978 static int qed_sp_ll2_tx_queue_start(struct qed_hwfn
*p_hwfn
,
979 struct qed_ll2_info
*p_ll2_conn
)
981 enum qed_ll2_conn_type conn_type
= p_ll2_conn
->input
.conn_type
;
982 struct qed_ll2_tx_queue
*p_tx
= &p_ll2_conn
->tx_queue
;
983 struct core_tx_start_ramrod_data
*p_ramrod
= NULL
;
984 struct qed_spq_entry
*p_ent
= NULL
;
985 struct qed_sp_init_data init_data
;
986 u16 pq_id
= 0, pbl_size
;
989 if (!QED_LL2_TX_REGISTERED(p_ll2_conn
))
992 if (p_ll2_conn
->input
.conn_type
== QED_LL2_TYPE_OOO
)
993 p_ll2_conn
->tx_stats_en
= 0;
995 p_ll2_conn
->tx_stats_en
= 1;
998 memset(&init_data
, 0, sizeof(init_data
));
999 init_data
.cid
= p_ll2_conn
->cid
;
1000 init_data
.opaque_fid
= p_hwfn
->hw_info
.opaque_fid
;
1001 init_data
.comp_mode
= QED_SPQ_MODE_EBLOCK
;
1003 rc
= qed_sp_init_request(p_hwfn
, &p_ent
,
1004 CORE_RAMROD_TX_QUEUE_START
,
1005 PROTOCOLID_CORE
, &init_data
);
1009 p_ramrod
= &p_ent
->ramrod
.core_tx_queue_start
;
1011 p_ramrod
->sb_id
= cpu_to_le16(qed_int_get_sp_sb_id(p_hwfn
));
1012 p_ramrod
->sb_index
= p_tx
->tx_sb_index
;
1013 p_ramrod
->mtu
= cpu_to_le16(p_ll2_conn
->input
.mtu
);
1014 p_ramrod
->stats_en
= p_ll2_conn
->tx_stats_en
;
1015 p_ramrod
->stats_id
= p_ll2_conn
->tx_stats_id
;
1017 DMA_REGPAIR_LE(p_ramrod
->pbl_base_addr
,
1018 qed_chain_get_pbl_phys(&p_tx
->txq_chain
));
1019 pbl_size
= qed_chain_get_page_cnt(&p_tx
->txq_chain
);
1020 p_ramrod
->pbl_size
= cpu_to_le16(pbl_size
);
1022 switch (p_ll2_conn
->input
.tx_tc
) {
1024 pq_id
= qed_get_cm_pq_idx(p_hwfn
, PQ_FLAGS_LB
);
1027 pq_id
= qed_get_cm_pq_idx(p_hwfn
, PQ_FLAGS_OOO
);
1030 pq_id
= qed_get_cm_pq_idx(p_hwfn
, PQ_FLAGS_OFLD
);
1034 p_ramrod
->qm_pq_id
= cpu_to_le16(pq_id
);
1036 switch (conn_type
) {
1037 case QED_LL2_TYPE_FCOE
:
1038 p_ramrod
->conn_type
= PROTOCOLID_FCOE
;
1040 case QED_LL2_TYPE_ISCSI
:
1041 p_ramrod
->conn_type
= PROTOCOLID_ISCSI
;
1043 case QED_LL2_TYPE_ROCE
:
1044 p_ramrod
->conn_type
= PROTOCOLID_ROCE
;
1046 case QED_LL2_TYPE_IWARP
:
1047 p_ramrod
->conn_type
= PROTOCOLID_IWARP
;
1049 case QED_LL2_TYPE_OOO
:
1050 if (p_hwfn
->hw_info
.personality
== QED_PCI_ISCSI
)
1051 p_ramrod
->conn_type
= PROTOCOLID_ISCSI
;
1053 p_ramrod
->conn_type
= PROTOCOLID_IWARP
;
1056 p_ramrod
->conn_type
= PROTOCOLID_ETH
;
1057 DP_NOTICE(p_hwfn
, "Unknown connection type: %d\n", conn_type
);
1060 p_ramrod
->gsi_offload_flag
= p_ll2_conn
->input
.gsi_enable
;
1062 rc
= qed_spq_post(p_hwfn
, p_ent
, NULL
);
1066 rc
= qed_db_recovery_add(p_hwfn
->cdev
, p_tx
->doorbell_addr
,
1067 &p_tx
->db_msg
, DB_REC_WIDTH_32B
,
1072 static int qed_sp_ll2_rx_queue_stop(struct qed_hwfn
*p_hwfn
,
1073 struct qed_ll2_info
*p_ll2_conn
)
1075 struct core_rx_stop_ramrod_data
*p_ramrod
= NULL
;
1076 struct qed_spq_entry
*p_ent
= NULL
;
1077 struct qed_sp_init_data init_data
;
1081 memset(&init_data
, 0, sizeof(init_data
));
1082 init_data
.cid
= p_ll2_conn
->cid
;
1083 init_data
.opaque_fid
= p_hwfn
->hw_info
.opaque_fid
;
1084 init_data
.comp_mode
= QED_SPQ_MODE_EBLOCK
;
1086 rc
= qed_sp_init_request(p_hwfn
, &p_ent
,
1087 CORE_RAMROD_RX_QUEUE_STOP
,
1088 PROTOCOLID_CORE
, &init_data
);
1092 p_ramrod
= &p_ent
->ramrod
.core_rx_queue_stop
;
1094 p_ramrod
->complete_event_flg
= 1;
1095 p_ramrod
->queue_id
= p_ll2_conn
->queue_id
;
1097 return qed_spq_post(p_hwfn
, p_ent
, NULL
);
1100 static int qed_sp_ll2_tx_queue_stop(struct qed_hwfn
*p_hwfn
,
1101 struct qed_ll2_info
*p_ll2_conn
)
1103 struct qed_ll2_tx_queue
*p_tx
= &p_ll2_conn
->tx_queue
;
1104 struct qed_spq_entry
*p_ent
= NULL
;
1105 struct qed_sp_init_data init_data
;
1107 qed_db_recovery_del(p_hwfn
->cdev
, p_tx
->doorbell_addr
, &p_tx
->db_msg
);
1110 memset(&init_data
, 0, sizeof(init_data
));
1111 init_data
.cid
= p_ll2_conn
->cid
;
1112 init_data
.opaque_fid
= p_hwfn
->hw_info
.opaque_fid
;
1113 init_data
.comp_mode
= QED_SPQ_MODE_EBLOCK
;
1115 rc
= qed_sp_init_request(p_hwfn
, &p_ent
,
1116 CORE_RAMROD_TX_QUEUE_STOP
,
1117 PROTOCOLID_CORE
, &init_data
);
1121 return qed_spq_post(p_hwfn
, p_ent
, NULL
);
1125 qed_ll2_acquire_connection_rx(struct qed_hwfn
*p_hwfn
,
1126 struct qed_ll2_info
*p_ll2_info
)
1128 struct qed_chain_init_params params
= {
1129 .intended_use
= QED_CHAIN_USE_TO_CONSUME_PRODUCE
,
1130 .cnt_type
= QED_CHAIN_CNT_TYPE_U16
,
1131 .num_elems
= p_ll2_info
->input
.rx_num_desc
,
1133 struct qed_dev
*cdev
= p_hwfn
->cdev
;
1134 struct qed_ll2_rx_packet
*p_descq
;
1138 if (!p_ll2_info
->input
.rx_num_desc
)
1141 params
.mode
= QED_CHAIN_MODE_NEXT_PTR
;
1142 params
.elem_size
= sizeof(struct core_rx_bd
);
1144 rc
= qed_chain_alloc(cdev
, &p_ll2_info
->rx_queue
.rxq_chain
, ¶ms
);
1146 DP_NOTICE(p_hwfn
, "Failed to allocate ll2 rxq chain\n");
1150 capacity
= qed_chain_get_capacity(&p_ll2_info
->rx_queue
.rxq_chain
);
1151 p_descq
= kcalloc(capacity
, sizeof(struct qed_ll2_rx_packet
),
1155 DP_NOTICE(p_hwfn
, "Failed to allocate ll2 Rx desc\n");
1158 p_ll2_info
->rx_queue
.descq_array
= p_descq
;
1160 params
.mode
= QED_CHAIN_MODE_PBL
;
1161 params
.elem_size
= sizeof(struct core_rx_fast_path_cqe
);
1163 rc
= qed_chain_alloc(cdev
, &p_ll2_info
->rx_queue
.rcq_chain
, ¶ms
);
1165 DP_NOTICE(p_hwfn
, "Failed to allocate ll2 rcq chain\n");
1169 DP_VERBOSE(p_hwfn
, QED_MSG_LL2
,
1170 "Allocated LL2 Rxq [Type %08x] with 0x%08x buffers\n",
1171 p_ll2_info
->input
.conn_type
, p_ll2_info
->input
.rx_num_desc
);
1177 static int qed_ll2_acquire_connection_tx(struct qed_hwfn
*p_hwfn
,
1178 struct qed_ll2_info
*p_ll2_info
)
1180 struct qed_chain_init_params params
= {
1181 .mode
= QED_CHAIN_MODE_PBL
,
1182 .intended_use
= QED_CHAIN_USE_TO_CONSUME_PRODUCE
,
1183 .cnt_type
= QED_CHAIN_CNT_TYPE_U16
,
1184 .num_elems
= p_ll2_info
->input
.tx_num_desc
,
1185 .elem_size
= sizeof(struct core_tx_bd
),
1187 struct qed_ll2_tx_packet
*p_descq
;
1192 if (!p_ll2_info
->input
.tx_num_desc
)
1195 rc
= qed_chain_alloc(p_hwfn
->cdev
, &p_ll2_info
->tx_queue
.txq_chain
,
1200 capacity
= qed_chain_get_capacity(&p_ll2_info
->tx_queue
.txq_chain
);
1201 /* All bds_set elements are flexibily added. */
1202 desc_size
= struct_size(p_descq
, bds_set
,
1203 p_ll2_info
->input
.tx_max_bds_per_packet
);
1205 p_descq
= kcalloc(capacity
, desc_size
, GFP_KERNEL
);
1210 p_ll2_info
->tx_queue
.descq_mem
= p_descq
;
1212 DP_VERBOSE(p_hwfn
, QED_MSG_LL2
,
1213 "Allocated LL2 Txq [Type %08x] with 0x%08x buffers\n",
1214 p_ll2_info
->input
.conn_type
, p_ll2_info
->input
.tx_num_desc
);
1219 "Can't allocate memory for Tx LL2 with 0x%08x buffers\n",
1220 p_ll2_info
->input
.tx_num_desc
);
1225 qed_ll2_acquire_connection_ooo(struct qed_hwfn
*p_hwfn
,
1226 struct qed_ll2_info
*p_ll2_info
, u16 mtu
)
1228 struct qed_ooo_buffer
*p_buf
= NULL
;
1233 if (p_ll2_info
->input
.conn_type
!= QED_LL2_TYPE_OOO
)
1236 /* Correct number of requested OOO buffers if needed */
1237 if (!p_ll2_info
->input
.rx_num_ooo_buffers
) {
1238 u16 num_desc
= p_ll2_info
->input
.rx_num_desc
;
1242 p_ll2_info
->input
.rx_num_ooo_buffers
= num_desc
* 2;
1245 for (buf_idx
= 0; buf_idx
< p_ll2_info
->input
.rx_num_ooo_buffers
;
1247 p_buf
= kzalloc(sizeof(*p_buf
), GFP_KERNEL
);
1253 p_buf
->rx_buffer_size
= mtu
+ 26 + ETH_CACHE_LINE_SIZE
;
1254 p_buf
->rx_buffer_size
= (p_buf
->rx_buffer_size
+
1255 ETH_CACHE_LINE_SIZE
- 1) &
1256 ~(ETH_CACHE_LINE_SIZE
- 1);
1257 p_virt
= dma_alloc_coherent(&p_hwfn
->cdev
->pdev
->dev
,
1258 p_buf
->rx_buffer_size
,
1259 &p_buf
->rx_buffer_phys_addr
,
1267 p_buf
->rx_buffer_virt_addr
= p_virt
;
1268 qed_ooo_put_free_buffer(p_hwfn
, p_hwfn
->p_ooo_info
, p_buf
);
1271 DP_VERBOSE(p_hwfn
, QED_MSG_LL2
,
1272 "Allocated [%04x] LL2 OOO buffers [each of size 0x%08x]\n",
1273 p_ll2_info
->input
.rx_num_ooo_buffers
, p_buf
->rx_buffer_size
);
1280 qed_ll2_set_cbs(struct qed_ll2_info
*p_ll2_info
, const struct qed_ll2_cbs
*cbs
)
1282 if (!cbs
|| (!cbs
->rx_comp_cb
||
1283 !cbs
->rx_release_cb
||
1284 !cbs
->tx_comp_cb
|| !cbs
->tx_release_cb
|| !cbs
->cookie
))
1287 p_ll2_info
->cbs
.rx_comp_cb
= cbs
->rx_comp_cb
;
1288 p_ll2_info
->cbs
.rx_release_cb
= cbs
->rx_release_cb
;
1289 p_ll2_info
->cbs
.tx_comp_cb
= cbs
->tx_comp_cb
;
1290 p_ll2_info
->cbs
.tx_release_cb
= cbs
->tx_release_cb
;
1291 p_ll2_info
->cbs
.slowpath_cb
= cbs
->slowpath_cb
;
1292 p_ll2_info
->cbs
.cookie
= cbs
->cookie
;
1297 static void _qed_ll2_calc_allowed_conns(struct qed_hwfn
*p_hwfn
,
1298 struct qed_ll2_acquire_data
*data
,
1299 u8
*start_idx
, u8
*last_idx
)
1301 /* LL2 queues handles will be split as follows:
1302 * First will be the legacy queues, and then the ctx based.
1304 if (data
->input
.rx_conn_type
== QED_LL2_RX_TYPE_LEGACY
) {
1305 *start_idx
= QED_LL2_LEGACY_CONN_BASE_PF
;
1306 *last_idx
= *start_idx
+
1307 QED_MAX_NUM_OF_LEGACY_LL2_CONNS_PF
;
1309 /* QED_LL2_RX_TYPE_CTX */
1310 *start_idx
= QED_LL2_CTX_CONN_BASE_PF
;
1311 *last_idx
= *start_idx
+
1312 QED_MAX_NUM_OF_CTX_LL2_CONNS_PF
;
1316 static enum core_error_handle
1317 qed_ll2_get_error_choice(enum qed_ll2_error_handle err
)
1320 case QED_LL2_DROP_PACKET
:
1321 return LL2_DROP_PACKET
;
1322 case QED_LL2_DO_NOTHING
:
1323 return LL2_DO_NOTHING
;
1324 case QED_LL2_ASSERT
:
1327 return LL2_DO_NOTHING
;
1331 int qed_ll2_acquire_connection(void *cxt
, struct qed_ll2_acquire_data
*data
)
1333 struct qed_hwfn
*p_hwfn
= cxt
;
1334 qed_int_comp_cb_t comp_rx_cb
, comp_tx_cb
;
1335 struct qed_ll2_info
*p_ll2_info
= NULL
;
1336 u8 i
, first_idx
, last_idx
, *p_tx_max
;
1339 if (!data
->p_connection_handle
|| !p_hwfn
->p_ll2_info
)
1342 _qed_ll2_calc_allowed_conns(p_hwfn
, data
, &first_idx
, &last_idx
);
1344 /* Find a free connection to be used */
1345 for (i
= first_idx
; i
< last_idx
; i
++) {
1346 mutex_lock(&p_hwfn
->p_ll2_info
[i
].mutex
);
1347 if (p_hwfn
->p_ll2_info
[i
].b_active
) {
1348 mutex_unlock(&p_hwfn
->p_ll2_info
[i
].mutex
);
1352 p_hwfn
->p_ll2_info
[i
].b_active
= true;
1353 p_ll2_info
= &p_hwfn
->p_ll2_info
[i
];
1354 mutex_unlock(&p_hwfn
->p_ll2_info
[i
].mutex
);
1360 memcpy(&p_ll2_info
->input
, &data
->input
, sizeof(p_ll2_info
->input
));
1362 switch (data
->input
.tx_dest
) {
1363 case QED_LL2_TX_DEST_NW
:
1364 p_ll2_info
->tx_dest
= CORE_TX_DEST_NW
;
1366 case QED_LL2_TX_DEST_LB
:
1367 p_ll2_info
->tx_dest
= CORE_TX_DEST_LB
;
1369 case QED_LL2_TX_DEST_DROP
:
1370 p_ll2_info
->tx_dest
= CORE_TX_DEST_DROP
;
1376 if (data
->input
.conn_type
== QED_LL2_TYPE_OOO
||
1377 data
->input
.secondary_queue
)
1378 p_ll2_info
->main_func_queue
= false;
1380 p_ll2_info
->main_func_queue
= true;
1382 /* Correct maximum number of Tx BDs */
1383 p_tx_max
= &p_ll2_info
->input
.tx_max_bds_per_packet
;
1385 *p_tx_max
= CORE_LL2_TX_MAX_BDS_PER_PACKET
;
1387 *p_tx_max
= min_t(u8
, *p_tx_max
,
1388 CORE_LL2_TX_MAX_BDS_PER_PACKET
);
1390 rc
= qed_ll2_set_cbs(p_ll2_info
, data
->cbs
);
1392 DP_NOTICE(p_hwfn
, "Invalid callback functions\n");
1393 goto q_allocate_fail
;
1396 rc
= qed_ll2_acquire_connection_rx(p_hwfn
, p_ll2_info
);
1398 goto q_allocate_fail
;
1400 rc
= qed_ll2_acquire_connection_tx(p_hwfn
, p_ll2_info
);
1402 goto q_allocate_fail
;
1404 rc
= qed_ll2_acquire_connection_ooo(p_hwfn
, p_ll2_info
,
1407 goto q_allocate_fail
;
1409 /* Register callbacks for the Rx/Tx queues */
1410 if (data
->input
.conn_type
== QED_LL2_TYPE_OOO
) {
1411 comp_rx_cb
= qed_ll2_lb_rxq_completion
;
1412 comp_tx_cb
= qed_ll2_lb_txq_completion
;
1414 comp_rx_cb
= qed_ll2_rxq_completion
;
1415 comp_tx_cb
= qed_ll2_txq_completion
;
1418 if (data
->input
.rx_num_desc
) {
1419 qed_int_register_cb(p_hwfn
, comp_rx_cb
,
1420 &p_hwfn
->p_ll2_info
[i
],
1421 &p_ll2_info
->rx_queue
.rx_sb_index
,
1422 &p_ll2_info
->rx_queue
.p_fw_cons
);
1423 p_ll2_info
->rx_queue
.b_cb_registered
= true;
1426 if (data
->input
.tx_num_desc
) {
1427 qed_int_register_cb(p_hwfn
,
1429 &p_hwfn
->p_ll2_info
[i
],
1430 &p_ll2_info
->tx_queue
.tx_sb_index
,
1431 &p_ll2_info
->tx_queue
.p_fw_cons
);
1432 p_ll2_info
->tx_queue
.b_cb_registered
= true;
1435 *data
->p_connection_handle
= i
;
1439 qed_ll2_release_connection(p_hwfn
, i
);
1443 static int qed_ll2_establish_connection_rx(struct qed_hwfn
*p_hwfn
,
1444 struct qed_ll2_info
*p_ll2_conn
)
1446 enum qed_ll2_error_handle error_input
;
1447 enum core_error_handle error_mode
;
1448 u8 action_on_error
= 0;
1451 if (!QED_LL2_RX_REGISTERED(p_ll2_conn
))
1454 DIRECT_REG_WR(p_ll2_conn
->rx_queue
.set_prod_addr
, 0x0);
1455 error_input
= p_ll2_conn
->input
.ai_err_packet_too_big
;
1456 error_mode
= qed_ll2_get_error_choice(error_input
);
1457 SET_FIELD(action_on_error
,
1458 CORE_RX_ACTION_ON_ERROR_PACKET_TOO_BIG
, error_mode
);
1459 error_input
= p_ll2_conn
->input
.ai_err_no_buf
;
1460 error_mode
= qed_ll2_get_error_choice(error_input
);
1461 SET_FIELD(action_on_error
, CORE_RX_ACTION_ON_ERROR_NO_BUFF
, error_mode
);
1463 rc
= qed_sp_ll2_rx_queue_start(p_hwfn
, p_ll2_conn
, action_on_error
);
1467 if (p_ll2_conn
->rx_queue
.ctx_based
) {
1468 rc
= qed_db_recovery_add(p_hwfn
->cdev
,
1469 p_ll2_conn
->rx_queue
.set_prod_addr
,
1470 &p_ll2_conn
->rx_queue
.db_data
,
1471 DB_REC_WIDTH_64B
, DB_REC_KERNEL
);
1478 qed_ll2_establish_connection_ooo(struct qed_hwfn
*p_hwfn
,
1479 struct qed_ll2_info
*p_ll2_conn
)
1481 if (p_ll2_conn
->input
.conn_type
!= QED_LL2_TYPE_OOO
)
1484 qed_ooo_release_all_isles(p_hwfn
, p_hwfn
->p_ooo_info
);
1485 qed_ooo_submit_rx_buffers(p_hwfn
, p_ll2_conn
);
1488 static inline u8
qed_ll2_handle_to_queue_id(struct qed_hwfn
*p_hwfn
,
1494 if (ll2_queue_type
== QED_LL2_RX_TYPE_LEGACY
)
1495 return p_hwfn
->hw_info
.resc_start
[QED_LL2_RAM_QUEUE
] + handle
;
1497 /* QED_LL2_RX_TYPE_CTX
1498 * FW distinguishes between the legacy queues (ram based) and the
1499 * ctx based queues by the queue_id.
1500 * The first MAX_NUM_LL2_RX_RAM_QUEUES queues are legacy
1501 * and the queue ids above that are ctx base.
1503 qid
= p_hwfn
->hw_info
.resc_start
[QED_LL2_CTX_QUEUE
] +
1504 MAX_NUM_LL2_RX_RAM_QUEUES
;
1506 /* See comment on the acquire connection for how the ll2
1507 * queues handles are divided.
1509 qid
+= (handle
- QED_MAX_NUM_OF_LEGACY_LL2_CONNS_PF
);
1514 int qed_ll2_establish_connection(void *cxt
, u8 connection_handle
)
1516 struct e4_core_conn_context
*p_cxt
;
1517 struct qed_ll2_tx_packet
*p_pkt
;
1518 struct qed_ll2_info
*p_ll2_conn
;
1519 struct qed_hwfn
*p_hwfn
= cxt
;
1520 struct qed_ll2_rx_queue
*p_rx
;
1521 struct qed_ll2_tx_queue
*p_tx
;
1522 struct qed_cxt_info cxt_info
;
1523 struct qed_ptt
*p_ptt
;
1529 p_ptt
= qed_ptt_acquire(p_hwfn
);
1533 p_ll2_conn
= qed_ll2_handle_sanity_lock(p_hwfn
, connection_handle
);
1539 p_rx
= &p_ll2_conn
->rx_queue
;
1540 p_tx
= &p_ll2_conn
->tx_queue
;
1542 qed_chain_reset(&p_rx
->rxq_chain
);
1543 qed_chain_reset(&p_rx
->rcq_chain
);
1544 INIT_LIST_HEAD(&p_rx
->active_descq
);
1545 INIT_LIST_HEAD(&p_rx
->free_descq
);
1546 INIT_LIST_HEAD(&p_rx
->posting_descq
);
1547 spin_lock_init(&p_rx
->lock
);
1548 capacity
= qed_chain_get_capacity(&p_rx
->rxq_chain
);
1549 for (i
= 0; i
< capacity
; i
++)
1550 list_add_tail(&p_rx
->descq_array
[i
].list_entry
,
1552 *p_rx
->p_fw_cons
= 0;
1554 qed_chain_reset(&p_tx
->txq_chain
);
1555 INIT_LIST_HEAD(&p_tx
->active_descq
);
1556 INIT_LIST_HEAD(&p_tx
->free_descq
);
1557 INIT_LIST_HEAD(&p_tx
->sending_descq
);
1558 spin_lock_init(&p_tx
->lock
);
1559 capacity
= qed_chain_get_capacity(&p_tx
->txq_chain
);
1560 /* All bds_set elements are flexibily added. */
1561 desc_size
= struct_size(p_pkt
, bds_set
,
1562 p_ll2_conn
->input
.tx_max_bds_per_packet
);
1564 for (i
= 0; i
< capacity
; i
++) {
1565 p_pkt
= p_tx
->descq_mem
+ desc_size
* i
;
1566 list_add_tail(&p_pkt
->list_entry
, &p_tx
->free_descq
);
1568 p_tx
->cur_completing_bd_idx
= 0;
1570 p_tx
->b_completing_packet
= false;
1571 p_tx
->cur_send_packet
= NULL
;
1572 p_tx
->cur_send_frag_num
= 0;
1573 p_tx
->cur_completing_frag_num
= 0;
1574 *p_tx
->p_fw_cons
= 0;
1576 rc
= qed_cxt_acquire_cid(p_hwfn
, PROTOCOLID_CORE
, &p_ll2_conn
->cid
);
1579 cxt_info
.iid
= p_ll2_conn
->cid
;
1580 rc
= qed_cxt_get_cid_info(p_hwfn
, &cxt_info
);
1582 DP_NOTICE(p_hwfn
, "Cannot find context info for cid=%d\n",
1587 p_cxt
= cxt_info
.p_cxt
;
1589 memset(p_cxt
, 0, sizeof(*p_cxt
));
1591 qid
= qed_ll2_handle_to_queue_id(p_hwfn
, connection_handle
,
1592 p_ll2_conn
->input
.rx_conn_type
);
1593 p_ll2_conn
->queue_id
= qid
;
1594 p_ll2_conn
->tx_stats_id
= qid
;
1596 DP_VERBOSE(p_hwfn
, QED_MSG_LL2
,
1597 "Establishing ll2 queue. PF %d ctx_based=%d abs qid=%d\n",
1598 p_hwfn
->rel_pf_id
, p_ll2_conn
->input
.rx_conn_type
, qid
);
1600 if (p_ll2_conn
->input
.rx_conn_type
== QED_LL2_RX_TYPE_LEGACY
) {
1601 p_rx
->set_prod_addr
= p_hwfn
->regview
+
1602 GTT_BAR0_MAP_REG_TSDM_RAM
+ TSTORM_LL2_RX_PRODS_OFFSET(qid
);
1604 /* QED_LL2_RX_TYPE_CTX - using doorbell */
1605 p_rx
->ctx_based
= 1;
1607 p_rx
->set_prod_addr
= p_hwfn
->doorbells
+
1608 p_hwfn
->dpi_start_offset
+
1609 DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_LL2_PROD_UPDATE
);
1611 /* prepare db data */
1612 p_rx
->db_data
.icid
= cpu_to_le16((u16
)p_ll2_conn
->cid
);
1613 SET_FIELD(p_rx
->db_data
.params
,
1614 CORE_PWM_PROD_UPDATE_DATA_AGG_CMD
, DB_AGG_CMD_SET
);
1615 SET_FIELD(p_rx
->db_data
.params
,
1616 CORE_PWM_PROD_UPDATE_DATA_RESERVED1
, 0);
1619 p_tx
->doorbell_addr
= (u8 __iomem
*)p_hwfn
->doorbells
+
1620 qed_db_addr(p_ll2_conn
->cid
,
1622 /* prepare db data */
1623 SET_FIELD(p_tx
->db_msg
.params
, CORE_DB_DATA_DEST
, DB_DEST_XCM
);
1624 SET_FIELD(p_tx
->db_msg
.params
, CORE_DB_DATA_AGG_CMD
, DB_AGG_CMD_SET
);
1625 SET_FIELD(p_tx
->db_msg
.params
, CORE_DB_DATA_AGG_VAL_SEL
,
1626 DQ_XCM_CORE_TX_BD_PROD_CMD
);
1627 p_tx
->db_msg
.agg_flags
= DQ_XCM_CORE_DQ_CF_CMD
;
1629 rc
= qed_ll2_establish_connection_rx(p_hwfn
, p_ll2_conn
);
1633 rc
= qed_sp_ll2_tx_queue_start(p_hwfn
, p_ll2_conn
);
1637 if (!QED_IS_RDMA_PERSONALITY(p_hwfn
))
1638 qed_wr(p_hwfn
, p_ptt
, PRS_REG_USE_LIGHT_L2
, 1);
1640 qed_ll2_establish_connection_ooo(p_hwfn
, p_ll2_conn
);
1642 if (p_ll2_conn
->input
.conn_type
== QED_LL2_TYPE_FCOE
) {
1643 if (!test_bit(QED_MF_UFP_SPECIFIC
, &p_hwfn
->cdev
->mf_bits
))
1644 qed_llh_add_protocol_filter(p_hwfn
->cdev
, 0,
1645 QED_LLH_FILTER_ETHERTYPE
,
1647 qed_llh_add_protocol_filter(p_hwfn
->cdev
, 0,
1648 QED_LLH_FILTER_ETHERTYPE
,
1653 qed_ptt_release(p_hwfn
, p_ptt
);
1657 static void qed_ll2_post_rx_buffer_notify_fw(struct qed_hwfn
*p_hwfn
,
1658 struct qed_ll2_rx_queue
*p_rx
,
1659 struct qed_ll2_rx_packet
*p_curp
)
1661 struct qed_ll2_rx_packet
*p_posting_packet
= NULL
;
1662 struct core_ll2_rx_prod rx_prod
= { 0, 0 };
1663 bool b_notify_fw
= false;
1664 u16 bd_prod
, cq_prod
;
1666 /* This handles the flushing of already posted buffers */
1667 while (!list_empty(&p_rx
->posting_descq
)) {
1668 p_posting_packet
= list_first_entry(&p_rx
->posting_descq
,
1669 struct qed_ll2_rx_packet
,
1671 list_move_tail(&p_posting_packet
->list_entry
,
1672 &p_rx
->active_descq
);
1676 /* This handles the supplied packet [if there is one] */
1678 list_add_tail(&p_curp
->list_entry
, &p_rx
->active_descq
);
1685 bd_prod
= qed_chain_get_prod_idx(&p_rx
->rxq_chain
);
1686 cq_prod
= qed_chain_get_prod_idx(&p_rx
->rcq_chain
);
1687 if (p_rx
->ctx_based
) {
1688 /* update producer by giving a doorbell */
1689 p_rx
->db_data
.prod
.bd_prod
= cpu_to_le16(bd_prod
);
1690 p_rx
->db_data
.prod
.cqe_prod
= cpu_to_le16(cq_prod
);
1691 /* Make sure chain element is updated before ringing the
1695 DIRECT_REG_WR64(p_rx
->set_prod_addr
,
1696 *((u64
*)&p_rx
->db_data
));
1698 rx_prod
.bd_prod
= cpu_to_le16(bd_prod
);
1699 rx_prod
.cqe_prod
= cpu_to_le16(cq_prod
);
1701 /* Make sure chain element is updated before ringing the
1706 DIRECT_REG_WR(p_rx
->set_prod_addr
, *((u32
*)&rx_prod
));
1710 int qed_ll2_post_rx_buffer(void *cxt
,
1711 u8 connection_handle
,
1713 u16 buf_len
, void *cookie
, u8 notify_fw
)
1715 struct qed_hwfn
*p_hwfn
= cxt
;
1716 struct core_rx_bd_with_buff_len
*p_curb
= NULL
;
1717 struct qed_ll2_rx_packet
*p_curp
= NULL
;
1718 struct qed_ll2_info
*p_ll2_conn
;
1719 struct qed_ll2_rx_queue
*p_rx
;
1720 unsigned long flags
;
1724 p_ll2_conn
= qed_ll2_handle_sanity(p_hwfn
, connection_handle
);
1727 p_rx
= &p_ll2_conn
->rx_queue
;
1729 spin_lock_irqsave(&p_rx
->lock
, flags
);
1730 if (!list_empty(&p_rx
->free_descq
))
1731 p_curp
= list_first_entry(&p_rx
->free_descq
,
1732 struct qed_ll2_rx_packet
, list_entry
);
1734 if (qed_chain_get_elem_left(&p_rx
->rxq_chain
) &&
1735 qed_chain_get_elem_left(&p_rx
->rcq_chain
)) {
1736 p_data
= qed_chain_produce(&p_rx
->rxq_chain
);
1737 p_curb
= (struct core_rx_bd_with_buff_len
*)p_data
;
1738 qed_chain_produce(&p_rx
->rcq_chain
);
1742 /* If we're lacking entires, let's try to flush buffers to FW */
1743 if (!p_curp
|| !p_curb
) {
1749 /* We have an Rx packet we can fill */
1750 DMA_REGPAIR_LE(p_curb
->addr
, addr
);
1751 p_curb
->buff_length
= cpu_to_le16(buf_len
);
1752 p_curp
->rx_buf_addr
= addr
;
1753 p_curp
->cookie
= cookie
;
1754 p_curp
->rxq_bd
= p_curb
;
1755 p_curp
->buf_length
= buf_len
;
1756 list_del(&p_curp
->list_entry
);
1758 /* Check if we only want to enqueue this packet without informing FW */
1760 list_add_tail(&p_curp
->list_entry
, &p_rx
->posting_descq
);
1765 qed_ll2_post_rx_buffer_notify_fw(p_hwfn
, p_rx
, p_curp
);
1767 spin_unlock_irqrestore(&p_rx
->lock
, flags
);
1771 static void qed_ll2_prepare_tx_packet_set(struct qed_hwfn
*p_hwfn
,
1772 struct qed_ll2_tx_queue
*p_tx
,
1773 struct qed_ll2_tx_packet
*p_curp
,
1774 struct qed_ll2_tx_pkt_info
*pkt
,
1777 list_del(&p_curp
->list_entry
);
1778 p_curp
->cookie
= pkt
->cookie
;
1779 p_curp
->bd_used
= pkt
->num_of_bds
;
1780 p_curp
->notify_fw
= notify_fw
;
1781 p_tx
->cur_send_packet
= p_curp
;
1782 p_tx
->cur_send_frag_num
= 0;
1784 p_curp
->bds_set
[p_tx
->cur_send_frag_num
].tx_frag
= pkt
->first_frag
;
1785 p_curp
->bds_set
[p_tx
->cur_send_frag_num
].frag_len
= pkt
->first_frag_len
;
1786 p_tx
->cur_send_frag_num
++;
1790 qed_ll2_prepare_tx_packet_set_bd(struct qed_hwfn
*p_hwfn
,
1791 struct qed_ll2_info
*p_ll2
,
1792 struct qed_ll2_tx_packet
*p_curp
,
1793 struct qed_ll2_tx_pkt_info
*pkt
)
1795 struct qed_chain
*p_tx_chain
= &p_ll2
->tx_queue
.txq_chain
;
1796 u16 prod_idx
= qed_chain_get_prod_idx(p_tx_chain
);
1797 struct core_tx_bd
*start_bd
= NULL
;
1798 enum core_roce_flavor_type roce_flavor
;
1799 enum core_tx_dest tx_dest
;
1800 u16 bd_data
= 0, frag_idx
;
1803 roce_flavor
= (pkt
->qed_roce_flavor
== QED_LL2_ROCE
) ? CORE_ROCE
1806 switch (pkt
->tx_dest
) {
1807 case QED_LL2_TX_DEST_NW
:
1808 tx_dest
= CORE_TX_DEST_NW
;
1810 case QED_LL2_TX_DEST_LB
:
1811 tx_dest
= CORE_TX_DEST_LB
;
1813 case QED_LL2_TX_DEST_DROP
:
1814 tx_dest
= CORE_TX_DEST_DROP
;
1817 tx_dest
= CORE_TX_DEST_LB
;
1821 start_bd
= (struct core_tx_bd
*)qed_chain_produce(p_tx_chain
);
1822 if (QED_IS_IWARP_PERSONALITY(p_hwfn
) &&
1823 p_ll2
->input
.conn_type
== QED_LL2_TYPE_OOO
) {
1824 start_bd
->nw_vlan_or_lb_echo
=
1825 cpu_to_le16(IWARP_LL2_IN_ORDER_TX_QUEUE
);
1827 start_bd
->nw_vlan_or_lb_echo
= cpu_to_le16(pkt
->vlan
);
1828 if (test_bit(QED_MF_UFP_SPECIFIC
, &p_hwfn
->cdev
->mf_bits
) &&
1829 p_ll2
->input
.conn_type
== QED_LL2_TYPE_FCOE
)
1830 pkt
->remove_stag
= true;
1833 bitfield1
= le16_to_cpu(start_bd
->bitfield1
);
1834 SET_FIELD(bitfield1
, CORE_TX_BD_L4_HDR_OFFSET_W
, pkt
->l4_hdr_offset_w
);
1835 SET_FIELD(bitfield1
, CORE_TX_BD_TX_DST
, tx_dest
);
1836 start_bd
->bitfield1
= cpu_to_le16(bitfield1
);
1838 bd_data
|= pkt
->bd_flags
;
1839 SET_FIELD(bd_data
, CORE_TX_BD_DATA_START_BD
, 0x1);
1840 SET_FIELD(bd_data
, CORE_TX_BD_DATA_NBDS
, pkt
->num_of_bds
);
1841 SET_FIELD(bd_data
, CORE_TX_BD_DATA_ROCE_FLAV
, roce_flavor
);
1842 SET_FIELD(bd_data
, CORE_TX_BD_DATA_IP_CSUM
, !!(pkt
->enable_ip_cksum
));
1843 SET_FIELD(bd_data
, CORE_TX_BD_DATA_L4_CSUM
, !!(pkt
->enable_l4_cksum
));
1844 SET_FIELD(bd_data
, CORE_TX_BD_DATA_IP_LEN
, !!(pkt
->calc_ip_len
));
1845 SET_FIELD(bd_data
, CORE_TX_BD_DATA_DISABLE_STAG_INSERTION
,
1846 !!(pkt
->remove_stag
));
1848 start_bd
->bd_data
.as_bitfield
= cpu_to_le16(bd_data
);
1849 DMA_REGPAIR_LE(start_bd
->addr
, pkt
->first_frag
);
1850 start_bd
->nbytes
= cpu_to_le16(pkt
->first_frag_len
);
1853 (NETIF_MSG_TX_QUEUED
| QED_MSG_LL2
),
1854 "LL2 [q 0x%02x cid 0x%08x type 0x%08x] Tx Producer at [0x%04x] - set with a %04x bytes %02x BDs buffer at %08x:%08x\n",
1857 p_ll2
->input
.conn_type
,
1859 pkt
->first_frag_len
,
1861 le32_to_cpu(start_bd
->addr
.hi
),
1862 le32_to_cpu(start_bd
->addr
.lo
));
1864 if (p_ll2
->tx_queue
.cur_send_frag_num
== pkt
->num_of_bds
)
1867 /* Need to provide the packet with additional BDs for frags */
1868 for (frag_idx
= p_ll2
->tx_queue
.cur_send_frag_num
;
1869 frag_idx
< pkt
->num_of_bds
; frag_idx
++) {
1870 struct core_tx_bd
**p_bd
= &p_curp
->bds_set
[frag_idx
].txq_bd
;
1872 *p_bd
= (struct core_tx_bd
*)qed_chain_produce(p_tx_chain
);
1873 (*p_bd
)->bd_data
.as_bitfield
= 0;
1874 (*p_bd
)->bitfield1
= 0;
1875 p_curp
->bds_set
[frag_idx
].tx_frag
= 0;
1876 p_curp
->bds_set
[frag_idx
].frag_len
= 0;
1880 /* This should be called while the Txq spinlock is being held */
1881 static void qed_ll2_tx_packet_notify(struct qed_hwfn
*p_hwfn
,
1882 struct qed_ll2_info
*p_ll2_conn
)
1884 bool b_notify
= p_ll2_conn
->tx_queue
.cur_send_packet
->notify_fw
;
1885 struct qed_ll2_tx_queue
*p_tx
= &p_ll2_conn
->tx_queue
;
1886 struct qed_ll2_tx_packet
*p_pkt
= NULL
;
1889 /* If there are missing BDs, don't do anything now */
1890 if (p_ll2_conn
->tx_queue
.cur_send_frag_num
!=
1891 p_ll2_conn
->tx_queue
.cur_send_packet
->bd_used
)
1894 /* Push the current packet to the list and clean after it */
1895 list_add_tail(&p_ll2_conn
->tx_queue
.cur_send_packet
->list_entry
,
1896 &p_ll2_conn
->tx_queue
.sending_descq
);
1897 p_ll2_conn
->tx_queue
.cur_send_packet
= NULL
;
1898 p_ll2_conn
->tx_queue
.cur_send_frag_num
= 0;
1900 /* Notify FW of packet only if requested to */
1904 bd_prod
= qed_chain_get_prod_idx(&p_ll2_conn
->tx_queue
.txq_chain
);
1906 while (!list_empty(&p_tx
->sending_descq
)) {
1907 p_pkt
= list_first_entry(&p_tx
->sending_descq
,
1908 struct qed_ll2_tx_packet
, list_entry
);
1912 list_move_tail(&p_pkt
->list_entry
, &p_tx
->active_descq
);
1915 p_tx
->db_msg
.spq_prod
= cpu_to_le16(bd_prod
);
1917 /* Make sure the BDs data is updated before ringing the doorbell */
1920 DIRECT_REG_WR(p_tx
->doorbell_addr
, *((u32
*)&p_tx
->db_msg
));
1923 (NETIF_MSG_TX_QUEUED
| QED_MSG_LL2
),
1924 "LL2 [q 0x%02x cid 0x%08x type 0x%08x] Doorbelled [producer 0x%04x]\n",
1925 p_ll2_conn
->queue_id
,
1927 p_ll2_conn
->input
.conn_type
, p_tx
->db_msg
.spq_prod
);
1930 int qed_ll2_prepare_tx_packet(void *cxt
,
1931 u8 connection_handle
,
1932 struct qed_ll2_tx_pkt_info
*pkt
,
1935 struct qed_hwfn
*p_hwfn
= cxt
;
1936 struct qed_ll2_tx_packet
*p_curp
= NULL
;
1937 struct qed_ll2_info
*p_ll2_conn
= NULL
;
1938 struct qed_ll2_tx_queue
*p_tx
;
1939 struct qed_chain
*p_tx_chain
;
1940 unsigned long flags
;
1943 p_ll2_conn
= qed_ll2_handle_sanity(p_hwfn
, connection_handle
);
1946 p_tx
= &p_ll2_conn
->tx_queue
;
1947 p_tx_chain
= &p_tx
->txq_chain
;
1949 if (pkt
->num_of_bds
> p_ll2_conn
->input
.tx_max_bds_per_packet
)
1952 spin_lock_irqsave(&p_tx
->lock
, flags
);
1953 if (p_tx
->cur_send_packet
) {
1958 /* Get entry, but only if we have tx elements for it */
1959 if (!list_empty(&p_tx
->free_descq
))
1960 p_curp
= list_first_entry(&p_tx
->free_descq
,
1961 struct qed_ll2_tx_packet
, list_entry
);
1962 if (p_curp
&& qed_chain_get_elem_left(p_tx_chain
) < pkt
->num_of_bds
)
1970 /* Prepare packet and BD, and perhaps send a doorbell to FW */
1971 qed_ll2_prepare_tx_packet_set(p_hwfn
, p_tx
, p_curp
, pkt
, notify_fw
);
1973 qed_ll2_prepare_tx_packet_set_bd(p_hwfn
, p_ll2_conn
, p_curp
, pkt
);
1975 qed_ll2_tx_packet_notify(p_hwfn
, p_ll2_conn
);
1978 spin_unlock_irqrestore(&p_tx
->lock
, flags
);
1982 int qed_ll2_set_fragment_of_tx_packet(void *cxt
,
1983 u8 connection_handle
,
1984 dma_addr_t addr
, u16 nbytes
)
1986 struct qed_ll2_tx_packet
*p_cur_send_packet
= NULL
;
1987 struct qed_hwfn
*p_hwfn
= cxt
;
1988 struct qed_ll2_info
*p_ll2_conn
= NULL
;
1989 u16 cur_send_frag_num
= 0;
1990 struct core_tx_bd
*p_bd
;
1991 unsigned long flags
;
1993 p_ll2_conn
= qed_ll2_handle_sanity(p_hwfn
, connection_handle
);
1997 if (!p_ll2_conn
->tx_queue
.cur_send_packet
)
2000 p_cur_send_packet
= p_ll2_conn
->tx_queue
.cur_send_packet
;
2001 cur_send_frag_num
= p_ll2_conn
->tx_queue
.cur_send_frag_num
;
2003 if (cur_send_frag_num
>= p_cur_send_packet
->bd_used
)
2006 /* Fill the BD information, and possibly notify FW */
2007 p_bd
= p_cur_send_packet
->bds_set
[cur_send_frag_num
].txq_bd
;
2008 DMA_REGPAIR_LE(p_bd
->addr
, addr
);
2009 p_bd
->nbytes
= cpu_to_le16(nbytes
);
2010 p_cur_send_packet
->bds_set
[cur_send_frag_num
].tx_frag
= addr
;
2011 p_cur_send_packet
->bds_set
[cur_send_frag_num
].frag_len
= nbytes
;
2013 p_ll2_conn
->tx_queue
.cur_send_frag_num
++;
2015 spin_lock_irqsave(&p_ll2_conn
->tx_queue
.lock
, flags
);
2016 qed_ll2_tx_packet_notify(p_hwfn
, p_ll2_conn
);
2017 spin_unlock_irqrestore(&p_ll2_conn
->tx_queue
.lock
, flags
);
2022 int qed_ll2_terminate_connection(void *cxt
, u8 connection_handle
)
2024 struct qed_hwfn
*p_hwfn
= cxt
;
2025 struct qed_ll2_info
*p_ll2_conn
= NULL
;
2027 struct qed_ptt
*p_ptt
;
2029 p_ptt
= qed_ptt_acquire(p_hwfn
);
2033 p_ll2_conn
= qed_ll2_handle_sanity_lock(p_hwfn
, connection_handle
);
2039 /* Stop Tx & Rx of connection, if needed */
2040 if (QED_LL2_TX_REGISTERED(p_ll2_conn
)) {
2041 p_ll2_conn
->tx_queue
.b_cb_registered
= false;
2042 smp_wmb(); /* Make sure this is seen by ll2_lb_rxq_completion */
2043 rc
= qed_sp_ll2_tx_queue_stop(p_hwfn
, p_ll2_conn
);
2047 qed_ll2_txq_flush(p_hwfn
, connection_handle
);
2048 qed_int_unregister_cb(p_hwfn
, p_ll2_conn
->tx_queue
.tx_sb_index
);
2051 if (QED_LL2_RX_REGISTERED(p_ll2_conn
)) {
2052 p_ll2_conn
->rx_queue
.b_cb_registered
= false;
2053 smp_wmb(); /* Make sure this is seen by ll2_lb_rxq_completion */
2055 if (p_ll2_conn
->rx_queue
.ctx_based
)
2056 qed_db_recovery_del(p_hwfn
->cdev
,
2057 p_ll2_conn
->rx_queue
.set_prod_addr
,
2058 &p_ll2_conn
->rx_queue
.db_data
);
2060 rc
= qed_sp_ll2_rx_queue_stop(p_hwfn
, p_ll2_conn
);
2064 qed_ll2_rxq_flush(p_hwfn
, connection_handle
);
2065 qed_int_unregister_cb(p_hwfn
, p_ll2_conn
->rx_queue
.rx_sb_index
);
2068 if (p_ll2_conn
->input
.conn_type
== QED_LL2_TYPE_OOO
)
2069 qed_ooo_release_all_isles(p_hwfn
, p_hwfn
->p_ooo_info
);
2071 if (p_ll2_conn
->input
.conn_type
== QED_LL2_TYPE_FCOE
) {
2072 if (!test_bit(QED_MF_UFP_SPECIFIC
, &p_hwfn
->cdev
->mf_bits
))
2073 qed_llh_remove_protocol_filter(p_hwfn
->cdev
, 0,
2074 QED_LLH_FILTER_ETHERTYPE
,
2076 qed_llh_remove_protocol_filter(p_hwfn
->cdev
, 0,
2077 QED_LLH_FILTER_ETHERTYPE
,
2082 qed_ptt_release(p_hwfn
, p_ptt
);
2086 static void qed_ll2_release_connection_ooo(struct qed_hwfn
*p_hwfn
,
2087 struct qed_ll2_info
*p_ll2_conn
)
2089 struct qed_ooo_buffer
*p_buffer
;
2091 if (p_ll2_conn
->input
.conn_type
!= QED_LL2_TYPE_OOO
)
2094 qed_ooo_release_all_isles(p_hwfn
, p_hwfn
->p_ooo_info
);
2095 while ((p_buffer
= qed_ooo_get_free_buffer(p_hwfn
,
2096 p_hwfn
->p_ooo_info
))) {
2097 dma_free_coherent(&p_hwfn
->cdev
->pdev
->dev
,
2098 p_buffer
->rx_buffer_size
,
2099 p_buffer
->rx_buffer_virt_addr
,
2100 p_buffer
->rx_buffer_phys_addr
);
2105 void qed_ll2_release_connection(void *cxt
, u8 connection_handle
)
2107 struct qed_hwfn
*p_hwfn
= cxt
;
2108 struct qed_ll2_info
*p_ll2_conn
= NULL
;
2110 p_ll2_conn
= qed_ll2_handle_sanity(p_hwfn
, connection_handle
);
2114 kfree(p_ll2_conn
->tx_queue
.descq_mem
);
2115 qed_chain_free(p_hwfn
->cdev
, &p_ll2_conn
->tx_queue
.txq_chain
);
2117 kfree(p_ll2_conn
->rx_queue
.descq_array
);
2118 qed_chain_free(p_hwfn
->cdev
, &p_ll2_conn
->rx_queue
.rxq_chain
);
2119 qed_chain_free(p_hwfn
->cdev
, &p_ll2_conn
->rx_queue
.rcq_chain
);
2121 qed_cxt_release_cid(p_hwfn
, p_ll2_conn
->cid
);
2123 qed_ll2_release_connection_ooo(p_hwfn
, p_ll2_conn
);
2125 mutex_lock(&p_ll2_conn
->mutex
);
2126 p_ll2_conn
->b_active
= false;
2127 mutex_unlock(&p_ll2_conn
->mutex
);
2130 int qed_ll2_alloc(struct qed_hwfn
*p_hwfn
)
2132 struct qed_ll2_info
*p_ll2_connections
;
2135 /* Allocate LL2's set struct */
2136 p_ll2_connections
= kcalloc(QED_MAX_NUM_OF_LL2_CONNECTIONS
,
2137 sizeof(struct qed_ll2_info
), GFP_KERNEL
);
2138 if (!p_ll2_connections
) {
2139 DP_NOTICE(p_hwfn
, "Failed to allocate `struct qed_ll2'\n");
2143 for (i
= 0; i
< QED_MAX_NUM_OF_LL2_CONNECTIONS
; i
++)
2144 p_ll2_connections
[i
].my_id
= i
;
2146 p_hwfn
->p_ll2_info
= p_ll2_connections
;
2150 void qed_ll2_setup(struct qed_hwfn
*p_hwfn
)
2154 for (i
= 0; i
< QED_MAX_NUM_OF_LL2_CONNECTIONS
; i
++)
2155 mutex_init(&p_hwfn
->p_ll2_info
[i
].mutex
);
2158 void qed_ll2_free(struct qed_hwfn
*p_hwfn
)
2160 if (!p_hwfn
->p_ll2_info
)
2163 kfree(p_hwfn
->p_ll2_info
);
2164 p_hwfn
->p_ll2_info
= NULL
;
2167 static void _qed_ll2_get_port_stats(struct qed_hwfn
*p_hwfn
,
2168 struct qed_ptt
*p_ptt
,
2169 struct qed_ll2_stats
*p_stats
)
2171 struct core_ll2_port_stats port_stats
;
2173 memset(&port_stats
, 0, sizeof(port_stats
));
2174 qed_memcpy_from(p_hwfn
, p_ptt
, &port_stats
,
2175 BAR0_MAP_REG_TSDM_RAM
+
2176 TSTORM_LL2_PORT_STAT_OFFSET(MFW_PORT(p_hwfn
)),
2177 sizeof(port_stats
));
2179 p_stats
->gsi_invalid_hdr
+= HILO_64_REGPAIR(port_stats
.gsi_invalid_hdr
);
2180 p_stats
->gsi_invalid_pkt_length
+=
2181 HILO_64_REGPAIR(port_stats
.gsi_invalid_pkt_length
);
2182 p_stats
->gsi_unsupported_pkt_typ
+=
2183 HILO_64_REGPAIR(port_stats
.gsi_unsupported_pkt_typ
);
2184 p_stats
->gsi_crcchksm_error
+=
2185 HILO_64_REGPAIR(port_stats
.gsi_crcchksm_error
);
2188 static void _qed_ll2_get_tstats(struct qed_hwfn
*p_hwfn
,
2189 struct qed_ptt
*p_ptt
,
2190 struct qed_ll2_info
*p_ll2_conn
,
2191 struct qed_ll2_stats
*p_stats
)
2193 struct core_ll2_tstorm_per_queue_stat tstats
;
2194 u8 qid
= p_ll2_conn
->queue_id
;
2197 memset(&tstats
, 0, sizeof(tstats
));
2198 tstats_addr
= BAR0_MAP_REG_TSDM_RAM
+
2199 CORE_LL2_TSTORM_PER_QUEUE_STAT_OFFSET(qid
);
2200 qed_memcpy_from(p_hwfn
, p_ptt
, &tstats
, tstats_addr
, sizeof(tstats
));
2202 p_stats
->packet_too_big_discard
+=
2203 HILO_64_REGPAIR(tstats
.packet_too_big_discard
);
2204 p_stats
->no_buff_discard
+= HILO_64_REGPAIR(tstats
.no_buff_discard
);
2207 static void _qed_ll2_get_ustats(struct qed_hwfn
*p_hwfn
,
2208 struct qed_ptt
*p_ptt
,
2209 struct qed_ll2_info
*p_ll2_conn
,
2210 struct qed_ll2_stats
*p_stats
)
2212 struct core_ll2_ustorm_per_queue_stat ustats
;
2213 u8 qid
= p_ll2_conn
->queue_id
;
2216 memset(&ustats
, 0, sizeof(ustats
));
2217 ustats_addr
= BAR0_MAP_REG_USDM_RAM
+
2218 CORE_LL2_USTORM_PER_QUEUE_STAT_OFFSET(qid
);
2219 qed_memcpy_from(p_hwfn
, p_ptt
, &ustats
, ustats_addr
, sizeof(ustats
));
2221 p_stats
->rcv_ucast_bytes
+= HILO_64_REGPAIR(ustats
.rcv_ucast_bytes
);
2222 p_stats
->rcv_mcast_bytes
+= HILO_64_REGPAIR(ustats
.rcv_mcast_bytes
);
2223 p_stats
->rcv_bcast_bytes
+= HILO_64_REGPAIR(ustats
.rcv_bcast_bytes
);
2224 p_stats
->rcv_ucast_pkts
+= HILO_64_REGPAIR(ustats
.rcv_ucast_pkts
);
2225 p_stats
->rcv_mcast_pkts
+= HILO_64_REGPAIR(ustats
.rcv_mcast_pkts
);
2226 p_stats
->rcv_bcast_pkts
+= HILO_64_REGPAIR(ustats
.rcv_bcast_pkts
);
2229 static void _qed_ll2_get_pstats(struct qed_hwfn
*p_hwfn
,
2230 struct qed_ptt
*p_ptt
,
2231 struct qed_ll2_info
*p_ll2_conn
,
2232 struct qed_ll2_stats
*p_stats
)
2234 struct core_ll2_pstorm_per_queue_stat pstats
;
2235 u8 stats_id
= p_ll2_conn
->tx_stats_id
;
2238 memset(&pstats
, 0, sizeof(pstats
));
2239 pstats_addr
= BAR0_MAP_REG_PSDM_RAM
+
2240 CORE_LL2_PSTORM_PER_QUEUE_STAT_OFFSET(stats_id
);
2241 qed_memcpy_from(p_hwfn
, p_ptt
, &pstats
, pstats_addr
, sizeof(pstats
));
2243 p_stats
->sent_ucast_bytes
+= HILO_64_REGPAIR(pstats
.sent_ucast_bytes
);
2244 p_stats
->sent_mcast_bytes
+= HILO_64_REGPAIR(pstats
.sent_mcast_bytes
);
2245 p_stats
->sent_bcast_bytes
+= HILO_64_REGPAIR(pstats
.sent_bcast_bytes
);
2246 p_stats
->sent_ucast_pkts
+= HILO_64_REGPAIR(pstats
.sent_ucast_pkts
);
2247 p_stats
->sent_mcast_pkts
+= HILO_64_REGPAIR(pstats
.sent_mcast_pkts
);
2248 p_stats
->sent_bcast_pkts
+= HILO_64_REGPAIR(pstats
.sent_bcast_pkts
);
2251 static int __qed_ll2_get_stats(void *cxt
, u8 connection_handle
,
2252 struct qed_ll2_stats
*p_stats
)
2254 struct qed_hwfn
*p_hwfn
= cxt
;
2255 struct qed_ll2_info
*p_ll2_conn
= NULL
;
2256 struct qed_ptt
*p_ptt
;
2258 if ((connection_handle
>= QED_MAX_NUM_OF_LL2_CONNECTIONS
) ||
2259 !p_hwfn
->p_ll2_info
)
2262 p_ll2_conn
= &p_hwfn
->p_ll2_info
[connection_handle
];
2264 p_ptt
= qed_ptt_acquire(p_hwfn
);
2266 DP_ERR(p_hwfn
, "Failed to acquire ptt\n");
2270 if (p_ll2_conn
->input
.gsi_enable
)
2271 _qed_ll2_get_port_stats(p_hwfn
, p_ptt
, p_stats
);
2273 _qed_ll2_get_tstats(p_hwfn
, p_ptt
, p_ll2_conn
, p_stats
);
2275 _qed_ll2_get_ustats(p_hwfn
, p_ptt
, p_ll2_conn
, p_stats
);
2277 if (p_ll2_conn
->tx_stats_en
)
2278 _qed_ll2_get_pstats(p_hwfn
, p_ptt
, p_ll2_conn
, p_stats
);
2280 qed_ptt_release(p_hwfn
, p_ptt
);
2285 int qed_ll2_get_stats(void *cxt
,
2286 u8 connection_handle
, struct qed_ll2_stats
*p_stats
)
2288 memset(p_stats
, 0, sizeof(*p_stats
));
2289 return __qed_ll2_get_stats(cxt
, connection_handle
, p_stats
);
2292 static void qed_ll2b_release_rx_packet(void *cxt
,
2293 u8 connection_handle
,
2295 dma_addr_t rx_buf_addr
,
2298 struct qed_hwfn
*p_hwfn
= cxt
;
2300 qed_ll2_dealloc_buffer(p_hwfn
->cdev
, cookie
);
2303 static void qed_ll2_register_cb_ops(struct qed_dev
*cdev
,
2304 const struct qed_ll2_cb_ops
*ops
,
2307 cdev
->ll2
->cbs
= ops
;
2308 cdev
->ll2
->cb_cookie
= cookie
;
2311 static struct qed_ll2_cbs ll2_cbs
= {
2312 .rx_comp_cb
= &qed_ll2b_complete_rx_packet
,
2313 .rx_release_cb
= &qed_ll2b_release_rx_packet
,
2314 .tx_comp_cb
= &qed_ll2b_complete_tx_packet
,
2315 .tx_release_cb
= &qed_ll2b_complete_tx_packet
,
2318 static void qed_ll2_set_conn_data(struct qed_hwfn
*p_hwfn
,
2319 struct qed_ll2_acquire_data
*data
,
2320 struct qed_ll2_params
*params
,
2321 enum qed_ll2_conn_type conn_type
,
2322 u8
*handle
, bool lb
)
2324 memset(data
, 0, sizeof(*data
));
2326 data
->input
.conn_type
= conn_type
;
2327 data
->input
.mtu
= params
->mtu
;
2328 data
->input
.rx_num_desc
= QED_LL2_RX_SIZE
;
2329 data
->input
.rx_drop_ttl0_flg
= params
->drop_ttl0_packets
;
2330 data
->input
.rx_vlan_removal_en
= params
->rx_vlan_stripping
;
2331 data
->input
.tx_num_desc
= QED_LL2_TX_SIZE
;
2332 data
->p_connection_handle
= handle
;
2333 data
->cbs
= &ll2_cbs
;
2334 ll2_cbs
.cookie
= p_hwfn
;
2337 data
->input
.tx_tc
= PKT_LB_TC
;
2338 data
->input
.tx_dest
= QED_LL2_TX_DEST_LB
;
2340 data
->input
.tx_tc
= 0;
2341 data
->input
.tx_dest
= QED_LL2_TX_DEST_NW
;
2345 static int qed_ll2_start_ooo(struct qed_hwfn
*p_hwfn
,
2346 struct qed_ll2_params
*params
)
2348 u8
*handle
= &p_hwfn
->pf_params
.iscsi_pf_params
.ll2_ooo_queue_id
;
2349 struct qed_ll2_acquire_data data
;
2352 qed_ll2_set_conn_data(p_hwfn
, &data
, params
,
2353 QED_LL2_TYPE_OOO
, handle
, true);
2355 rc
= qed_ll2_acquire_connection(p_hwfn
, &data
);
2357 DP_INFO(p_hwfn
, "Failed to acquire LL2 OOO connection\n");
2361 rc
= qed_ll2_establish_connection(p_hwfn
, *handle
);
2363 DP_INFO(p_hwfn
, "Failed to establish LL2 OOO connection\n");
2370 qed_ll2_release_connection(p_hwfn
, *handle
);
2372 *handle
= QED_LL2_UNUSED_HANDLE
;
2376 static bool qed_ll2_is_storage_eng1(struct qed_dev
*cdev
)
2378 return (QED_IS_FCOE_PERSONALITY(QED_LEADING_HWFN(cdev
)) ||
2379 QED_IS_ISCSI_PERSONALITY(QED_LEADING_HWFN(cdev
))) &&
2380 (QED_AFFIN_HWFN(cdev
) != QED_LEADING_HWFN(cdev
));
2383 static int __qed_ll2_stop(struct qed_hwfn
*p_hwfn
)
2385 struct qed_dev
*cdev
= p_hwfn
->cdev
;
2388 rc
= qed_ll2_terminate_connection(p_hwfn
, cdev
->ll2
->handle
);
2390 DP_INFO(cdev
, "Failed to terminate LL2 connection\n");
2392 qed_ll2_release_connection(p_hwfn
, cdev
->ll2
->handle
);
2397 static int qed_ll2_stop(struct qed_dev
*cdev
)
2399 bool b_is_storage_eng1
= qed_ll2_is_storage_eng1(cdev
);
2400 struct qed_hwfn
*p_hwfn
= QED_AFFIN_HWFN(cdev
);
2401 int rc
= 0, rc2
= 0;
2403 if (cdev
->ll2
->handle
== QED_LL2_UNUSED_HANDLE
)
2406 qed_llh_remove_mac_filter(cdev
, 0, cdev
->ll2_mac_address
);
2407 eth_zero_addr(cdev
->ll2_mac_address
);
2409 if (QED_IS_ISCSI_PERSONALITY(p_hwfn
))
2410 qed_ll2_stop_ooo(p_hwfn
);
2412 /* In CMT mode, LL2 is always started on engine 0 for a storage PF */
2413 if (b_is_storage_eng1
) {
2414 rc2
= __qed_ll2_stop(QED_LEADING_HWFN(cdev
));
2416 DP_NOTICE(QED_LEADING_HWFN(cdev
),
2417 "Failed to stop LL2 on engine 0\n");
2420 rc
= __qed_ll2_stop(p_hwfn
);
2422 DP_NOTICE(p_hwfn
, "Failed to stop LL2\n");
2424 qed_ll2_kill_buffers(cdev
);
2426 cdev
->ll2
->handle
= QED_LL2_UNUSED_HANDLE
;
2431 static int __qed_ll2_start(struct qed_hwfn
*p_hwfn
,
2432 struct qed_ll2_params
*params
)
2434 struct qed_ll2_buffer
*buffer
, *tmp_buffer
;
2435 struct qed_dev
*cdev
= p_hwfn
->cdev
;
2436 enum qed_ll2_conn_type conn_type
;
2437 struct qed_ll2_acquire_data data
;
2440 switch (p_hwfn
->hw_info
.personality
) {
2442 conn_type
= QED_LL2_TYPE_FCOE
;
2445 conn_type
= QED_LL2_TYPE_ISCSI
;
2447 case QED_PCI_ETH_ROCE
:
2448 conn_type
= QED_LL2_TYPE_ROCE
;
2452 conn_type
= QED_LL2_TYPE_TEST
;
2455 qed_ll2_set_conn_data(p_hwfn
, &data
, params
, conn_type
,
2456 &cdev
->ll2
->handle
, false);
2458 rc
= qed_ll2_acquire_connection(p_hwfn
, &data
);
2460 DP_INFO(p_hwfn
, "Failed to acquire LL2 connection\n");
2464 rc
= qed_ll2_establish_connection(p_hwfn
, cdev
->ll2
->handle
);
2466 DP_INFO(p_hwfn
, "Failed to establish LL2 connection\n");
2470 /* Post all Rx buffers to FW */
2471 spin_lock_bh(&cdev
->ll2
->lock
);
2472 rx_cnt
= cdev
->ll2
->rx_cnt
;
2473 list_for_each_entry_safe(buffer
, tmp_buffer
, &cdev
->ll2
->list
, list
) {
2474 rc
= qed_ll2_post_rx_buffer(p_hwfn
,
2476 buffer
->phys_addr
, 0, buffer
, 1);
2479 "Failed to post an Rx buffer; Deleting it\n");
2480 dma_unmap_single(&cdev
->pdev
->dev
, buffer
->phys_addr
,
2481 cdev
->ll2
->rx_size
, DMA_FROM_DEVICE
);
2482 kfree(buffer
->data
);
2483 list_del(&buffer
->list
);
2489 spin_unlock_bh(&cdev
->ll2
->lock
);
2491 if (rx_cnt
== cdev
->ll2
->rx_cnt
) {
2492 DP_NOTICE(p_hwfn
, "Failed passing even a single Rx buffer\n");
2493 goto terminate_conn
;
2495 cdev
->ll2
->rx_cnt
= rx_cnt
;
2500 qed_ll2_terminate_connection(p_hwfn
, cdev
->ll2
->handle
);
2502 qed_ll2_release_connection(p_hwfn
, cdev
->ll2
->handle
);
2506 static int qed_ll2_start(struct qed_dev
*cdev
, struct qed_ll2_params
*params
)
2508 bool b_is_storage_eng1
= qed_ll2_is_storage_eng1(cdev
);
2509 struct qed_hwfn
*p_hwfn
= QED_AFFIN_HWFN(cdev
);
2510 struct qed_ll2_buffer
*buffer
;
2511 int rx_num_desc
, i
, rc
;
2513 if (!is_valid_ether_addr(params
->ll2_mac_address
)) {
2514 DP_NOTICE(cdev
, "Invalid Ethernet address\n");
2518 WARN_ON(!cdev
->ll2
->cbs
);
2520 /* Initialize LL2 locks & lists */
2521 INIT_LIST_HEAD(&cdev
->ll2
->list
);
2522 spin_lock_init(&cdev
->ll2
->lock
);
2524 cdev
->ll2
->rx_size
= NET_SKB_PAD
+ ETH_HLEN
+
2525 L1_CACHE_BYTES
+ params
->mtu
;
2527 /* Allocate memory for LL2.
2528 * In CMT mode, in case of a storage PF which is affintized to engine 1,
2529 * LL2 is started also on engine 0 and thus we need twofold buffers.
2531 rx_num_desc
= QED_LL2_RX_SIZE
* (b_is_storage_eng1
? 2 : 1);
2532 DP_INFO(cdev
, "Allocating %d LL2 buffers of size %08x bytes\n",
2533 rx_num_desc
, cdev
->ll2
->rx_size
);
2534 for (i
= 0; i
< rx_num_desc
; i
++) {
2535 buffer
= kzalloc(sizeof(*buffer
), GFP_KERNEL
);
2537 DP_INFO(cdev
, "Failed to allocate LL2 buffers\n");
2542 rc
= qed_ll2_alloc_buffer(cdev
, (u8
**)&buffer
->data
,
2543 &buffer
->phys_addr
);
2549 list_add_tail(&buffer
->list
, &cdev
->ll2
->list
);
2552 rc
= __qed_ll2_start(p_hwfn
, params
);
2554 DP_NOTICE(cdev
, "Failed to start LL2\n");
2558 /* In CMT mode, always need to start LL2 on engine 0 for a storage PF,
2559 * since broadcast/mutlicast packets are routed to engine 0.
2561 if (b_is_storage_eng1
) {
2562 rc
= __qed_ll2_start(QED_LEADING_HWFN(cdev
), params
);
2564 DP_NOTICE(QED_LEADING_HWFN(cdev
),
2565 "Failed to start LL2 on engine 0\n");
2570 if (QED_IS_ISCSI_PERSONALITY(p_hwfn
)) {
2571 DP_VERBOSE(cdev
, QED_MSG_STORAGE
, "Starting OOO LL2 queue\n");
2572 rc
= qed_ll2_start_ooo(p_hwfn
, params
);
2574 DP_NOTICE(cdev
, "Failed to start OOO LL2\n");
2579 rc
= qed_llh_add_mac_filter(cdev
, 0, params
->ll2_mac_address
);
2581 DP_NOTICE(cdev
, "Failed to add an LLH filter\n");
2585 ether_addr_copy(cdev
->ll2_mac_address
, params
->ll2_mac_address
);
2590 if (QED_IS_ISCSI_PERSONALITY(p_hwfn
))
2591 qed_ll2_stop_ooo(p_hwfn
);
2593 if (b_is_storage_eng1
)
2594 __qed_ll2_stop(QED_LEADING_HWFN(cdev
));
2596 __qed_ll2_stop(p_hwfn
);
2598 qed_ll2_kill_buffers(cdev
);
2599 cdev
->ll2
->handle
= QED_LL2_UNUSED_HANDLE
;
2603 static int qed_ll2_start_xmit(struct qed_dev
*cdev
, struct sk_buff
*skb
,
2604 unsigned long xmit_flags
)
2606 struct qed_hwfn
*p_hwfn
= QED_AFFIN_HWFN(cdev
);
2607 struct qed_ll2_tx_pkt_info pkt
;
2608 const skb_frag_t
*frag
;
2609 u8 flags
= 0, nr_frags
;
2610 int rc
= -EINVAL
, i
;
2614 if (unlikely(skb
->ip_summed
!= CHECKSUM_NONE
)) {
2615 DP_INFO(cdev
, "Cannot transmit a checksummed packet\n");
2619 /* Cache number of fragments from SKB since SKB may be freed by
2620 * the completion routine after calling qed_ll2_prepare_tx_packet()
2622 nr_frags
= skb_shinfo(skb
)->nr_frags
;
2624 if (1 + nr_frags
> CORE_LL2_TX_MAX_BDS_PER_PACKET
) {
2625 DP_ERR(cdev
, "Cannot transmit a packet with %d fragments\n",
2630 mapping
= dma_map_single(&cdev
->pdev
->dev
, skb
->data
,
2631 skb
->len
, DMA_TO_DEVICE
);
2632 if (unlikely(dma_mapping_error(&cdev
->pdev
->dev
, mapping
))) {
2633 DP_NOTICE(cdev
, "SKB mapping failed\n");
2637 /* Request HW to calculate IP csum */
2638 if (!((vlan_get_protocol(skb
) == htons(ETH_P_IPV6
)) &&
2639 ipv6_hdr(skb
)->nexthdr
== NEXTHDR_IPV6
))
2640 flags
|= BIT(CORE_TX_BD_DATA_IP_CSUM_SHIFT
);
2642 if (skb_vlan_tag_present(skb
)) {
2643 vlan
= skb_vlan_tag_get(skb
);
2644 flags
|= BIT(CORE_TX_BD_DATA_VLAN_INSERTION_SHIFT
);
2647 memset(&pkt
, 0, sizeof(pkt
));
2648 pkt
.num_of_bds
= 1 + nr_frags
;
2650 pkt
.bd_flags
= flags
;
2651 pkt
.tx_dest
= QED_LL2_TX_DEST_NW
;
2652 pkt
.first_frag
= mapping
;
2653 pkt
.first_frag_len
= skb
->len
;
2655 if (test_bit(QED_MF_UFP_SPECIFIC
, &cdev
->mf_bits
) &&
2656 test_bit(QED_LL2_XMIT_FLAGS_FIP_DISCOVERY
, &xmit_flags
))
2657 pkt
.remove_stag
= true;
2659 /* qed_ll2_prepare_tx_packet() may actually send the packet if
2660 * there are no fragments in the skb and subsequently the completion
2661 * routine may run and free the SKB, so no dereferencing the SKB
2662 * beyond this point unless skb has any fragments.
2664 rc
= qed_ll2_prepare_tx_packet(p_hwfn
, cdev
->ll2
->handle
,
2669 for (i
= 0; i
< nr_frags
; i
++) {
2670 frag
= &skb_shinfo(skb
)->frags
[i
];
2672 mapping
= skb_frag_dma_map(&cdev
->pdev
->dev
, frag
, 0,
2673 skb_frag_size(frag
), DMA_TO_DEVICE
);
2675 if (unlikely(dma_mapping_error(&cdev
->pdev
->dev
, mapping
))) {
2677 "Unable to map frag - dropping packet\n");
2682 rc
= qed_ll2_set_fragment_of_tx_packet(p_hwfn
,
2685 skb_frag_size(frag
));
2687 /* if failed not much to do here, partial packet has been posted
2688 * we can't free memory, will need to wait for completion
2697 dma_unmap_single(&cdev
->pdev
->dev
, mapping
, skb
->len
, DMA_TO_DEVICE
);
2702 static int qed_ll2_stats(struct qed_dev
*cdev
, struct qed_ll2_stats
*stats
)
2704 bool b_is_storage_eng1
= qed_ll2_is_storage_eng1(cdev
);
2705 struct qed_hwfn
*p_hwfn
= QED_AFFIN_HWFN(cdev
);
2711 rc
= qed_ll2_get_stats(p_hwfn
, cdev
->ll2
->handle
, stats
);
2713 DP_NOTICE(p_hwfn
, "Failed to get LL2 stats\n");
2717 /* In CMT mode, LL2 is always started on engine 0 for a storage PF */
2718 if (b_is_storage_eng1
) {
2719 rc
= __qed_ll2_get_stats(QED_LEADING_HWFN(cdev
),
2720 cdev
->ll2
->handle
, stats
);
2722 DP_NOTICE(QED_LEADING_HWFN(cdev
),
2723 "Failed to get LL2 stats on engine 0\n");
2731 const struct qed_ll2_ops qed_ll2_ops_pass
= {
2732 .start
= &qed_ll2_start
,
2733 .stop
= &qed_ll2_stop
,
2734 .start_xmit
= &qed_ll2_start_xmit
,
2735 .register_cb_ops
= &qed_ll2_register_cb_ops
,
2736 .get_stats
= &qed_ll2_stats
,
2739 int qed_ll2_alloc_if(struct qed_dev
*cdev
)
2741 cdev
->ll2
= kzalloc(sizeof(*cdev
->ll2
), GFP_KERNEL
);
2742 return cdev
->ll2
? 0 : -ENOMEM
;
2745 void qed_ll2_dealloc_if(struct qed_dev
*cdev
)