1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
2 /* QLogic qed NIC Driver
3 * Copyright (c) 2015-2017 QLogic Corporation
4 * Copyright (c) 2019-2020 Marvell International Ltd.
7 #include <linux/types.h>
8 #include <asm/byteorder.h>
9 #include <linux/dma-mapping.h>
10 #include <linux/if_vlan.h>
11 #include <linux/kernel.h>
12 #include <linux/pci.h>
13 #include <linux/slab.h>
14 #include <linux/stddef.h>
15 #include <linux/workqueue.h>
17 #include <linux/bitops.h>
18 #include <linux/delay.h>
19 #include <linux/errno.h>
20 #include <linux/etherdevice.h>
22 #include <linux/list.h>
23 #include <linux/mutex.h>
24 #include <linux/spinlock.h>
25 #include <linux/string.h>
26 #include <linux/qed/qed_ll2_if.h>
29 #include "qed_dev_api.h"
31 #include "qed_iro_hsi.h"
37 #include "qed_reg_addr.h"
41 #define QED_LL2_RX_REGISTERED(ll2) ((ll2)->rx_queue.b_cb_registered)
42 #define QED_LL2_TX_REGISTERED(ll2) ((ll2)->tx_queue.b_cb_registered)
44 #define QED_LL2_TX_SIZE (256)
45 #define QED_LL2_RX_SIZE (4096)
47 #define QED_LL2_INVALID_STATS_ID 0xff
49 struct qed_cb_ll2_info
{
54 /* Lock protecting LL2 buffer lists in sleepless context */
56 struct list_head list
;
58 const struct qed_ll2_cb_ops
*cbs
;
62 struct qed_ll2_buffer
{
63 struct list_head list
;
68 static u8
qed_ll2_handle_to_stats_id(struct qed_hwfn
*p_hwfn
,
69 u8 ll2_queue_type
, u8 qid
)
73 /* For legacy (RAM based) queues, the stats_id will be set as the
74 * queue_id. Otherwise (context based queue), it will be set to
75 * the "abs_pf_id" offset from the end of the RAM based queue IDs.
76 * If the final value exceeds the total counters amount, return
77 * INVALID value to indicate that the stats for this connection should
80 if (ll2_queue_type
== QED_LL2_RX_TYPE_LEGACY
)
83 stats_id
= MAX_NUM_LL2_RX_RAM_QUEUES
+ p_hwfn
->abs_pf_id
;
85 if (stats_id
< MAX_NUM_LL2_TX_STATS_COUNTERS
)
88 return QED_LL2_INVALID_STATS_ID
;
91 static void qed_ll2b_complete_tx_packet(void *cxt
,
94 dma_addr_t first_frag_addr
,
98 struct qed_hwfn
*p_hwfn
= cxt
;
99 struct qed_dev
*cdev
= p_hwfn
->cdev
;
100 struct sk_buff
*skb
= cookie
;
102 /* All we need to do is release the mapping */
103 dma_unmap_single(&p_hwfn
->cdev
->pdev
->dev
, first_frag_addr
,
104 skb_headlen(skb
), DMA_TO_DEVICE
);
106 if (cdev
->ll2
->cbs
&& cdev
->ll2
->cbs
->tx_cb
)
107 cdev
->ll2
->cbs
->tx_cb(cdev
->ll2
->cb_cookie
, skb
,
110 dev_kfree_skb_any(skb
);
113 static int qed_ll2_alloc_buffer(struct qed_dev
*cdev
,
114 u8
**data
, dma_addr_t
*phys_addr
)
116 size_t size
= cdev
->ll2
->rx_size
+ NET_SKB_PAD
+
117 SKB_DATA_ALIGN(sizeof(struct skb_shared_info
));
119 *data
= kmalloc(size
, GFP_ATOMIC
);
121 DP_INFO(cdev
, "Failed to allocate LL2 buffer data\n");
125 *phys_addr
= dma_map_single(&cdev
->pdev
->dev
,
126 ((*data
) + NET_SKB_PAD
),
127 cdev
->ll2
->rx_size
, DMA_FROM_DEVICE
);
128 if (dma_mapping_error(&cdev
->pdev
->dev
, *phys_addr
)) {
129 DP_INFO(cdev
, "Failed to map LL2 buffer data\n");
137 static int qed_ll2_dealloc_buffer(struct qed_dev
*cdev
,
138 struct qed_ll2_buffer
*buffer
)
140 spin_lock_bh(&cdev
->ll2
->lock
);
142 dma_unmap_single(&cdev
->pdev
->dev
, buffer
->phys_addr
,
143 cdev
->ll2
->rx_size
, DMA_FROM_DEVICE
);
145 list_del(&buffer
->list
);
148 if (!cdev
->ll2
->rx_cnt
)
149 DP_INFO(cdev
, "All LL2 entries were removed\n");
151 spin_unlock_bh(&cdev
->ll2
->lock
);
156 static void qed_ll2_kill_buffers(struct qed_dev
*cdev
)
158 struct qed_ll2_buffer
*buffer
, *tmp_buffer
;
160 list_for_each_entry_safe(buffer
, tmp_buffer
, &cdev
->ll2
->list
, list
)
161 qed_ll2_dealloc_buffer(cdev
, buffer
);
164 static void qed_ll2b_complete_rx_packet(void *cxt
,
165 struct qed_ll2_comp_rx_data
*data
)
167 struct qed_hwfn
*p_hwfn
= cxt
;
168 struct qed_ll2_buffer
*buffer
= data
->cookie
;
169 struct qed_dev
*cdev
= p_hwfn
->cdev
;
170 dma_addr_t new_phys_addr
;
177 (NETIF_MSG_RX_STATUS
| QED_MSG_STORAGE
| NETIF_MSG_PKTDATA
),
178 "Got an LL2 Rx completion: [Buffer at phys 0x%llx, offset 0x%02x] Length 0x%04x Parse_flags 0x%04x vlan 0x%04x Opaque data [0x%08x:0x%08x]\n",
179 (u64
)data
->rx_buf_addr
,
180 data
->u
.placement_offset
,
181 data
->length
.packet_length
,
183 data
->vlan
, data
->opaque_data_0
, data
->opaque_data_1
);
185 if ((cdev
->dp_module
& NETIF_MSG_PKTDATA
) && buffer
->data
) {
186 print_hex_dump(KERN_INFO
, "",
187 DUMP_PREFIX_OFFSET
, 16, 1,
188 buffer
->data
, data
->length
.packet_length
, false);
191 /* Determine if data is valid */
192 if (data
->length
.packet_length
< ETH_HLEN
)
195 /* Allocate a replacement for buffer; Reuse upon failure */
197 rc
= qed_ll2_alloc_buffer(p_hwfn
->cdev
, &new_data
,
200 /* If need to reuse or there's no replacement buffer, repost this */
203 dma_unmap_single(&cdev
->pdev
->dev
, buffer
->phys_addr
,
204 cdev
->ll2
->rx_size
, DMA_FROM_DEVICE
);
206 skb
= slab_build_skb(buffer
->data
);
208 DP_INFO(cdev
, "Failed to build SKB\n");
213 data
->u
.placement_offset
+= NET_SKB_PAD
;
214 skb_reserve(skb
, data
->u
.placement_offset
);
215 skb_put(skb
, data
->length
.packet_length
);
216 skb_checksum_none_assert(skb
);
218 /* Get parital ethernet information instead of eth_type_trans(),
219 * Since we don't have an associated net_device.
221 skb_reset_mac_header(skb
);
222 skb
->protocol
= eth_hdr(skb
)->h_proto
;
224 /* Pass SKB onward */
225 if (cdev
->ll2
->cbs
&& cdev
->ll2
->cbs
->rx_cb
) {
227 __vlan_hwaccel_put_tag(skb
, htons(ETH_P_8021Q
),
229 cdev
->ll2
->cbs
->rx_cb(cdev
->ll2
->cb_cookie
, skb
,
231 data
->opaque_data_1
);
233 DP_VERBOSE(p_hwfn
, (NETIF_MSG_RX_STATUS
| NETIF_MSG_PKTDATA
|
234 QED_MSG_LL2
| QED_MSG_STORAGE
),
235 "Dropping the packet\n");
240 /* Update Buffer information and update FW producer */
241 buffer
->data
= new_data
;
242 buffer
->phys_addr
= new_phys_addr
;
245 rc
= qed_ll2_post_rx_buffer(p_hwfn
, cdev
->ll2
->handle
,
246 buffer
->phys_addr
, 0, buffer
, 1);
248 qed_ll2_dealloc_buffer(cdev
, buffer
);
251 static struct qed_ll2_info
*__qed_ll2_handle_sanity(struct qed_hwfn
*p_hwfn
,
252 u8 connection_handle
,
256 struct qed_ll2_info
*p_ll2_conn
, *p_ret
= NULL
;
258 if (connection_handle
>= QED_MAX_NUM_OF_LL2_CONNECTIONS
)
261 if (!p_hwfn
->p_ll2_info
)
264 p_ll2_conn
= &p_hwfn
->p_ll2_info
[connection_handle
];
268 mutex_lock(&p_ll2_conn
->mutex
);
269 if (p_ll2_conn
->b_active
)
272 mutex_unlock(&p_ll2_conn
->mutex
);
280 static struct qed_ll2_info
*qed_ll2_handle_sanity(struct qed_hwfn
*p_hwfn
,
281 u8 connection_handle
)
283 return __qed_ll2_handle_sanity(p_hwfn
, connection_handle
, false, true);
286 static struct qed_ll2_info
*qed_ll2_handle_sanity_lock(struct qed_hwfn
*p_hwfn
,
287 u8 connection_handle
)
289 return __qed_ll2_handle_sanity(p_hwfn
, connection_handle
, true, true);
292 static struct qed_ll2_info
*qed_ll2_handle_sanity_inactive(struct qed_hwfn
294 u8 connection_handle
)
296 return __qed_ll2_handle_sanity(p_hwfn
, connection_handle
, false, false);
299 static void qed_ll2_txq_flush(struct qed_hwfn
*p_hwfn
, u8 connection_handle
)
301 bool b_last_packet
= false, b_last_frag
= false;
302 struct qed_ll2_tx_packet
*p_pkt
= NULL
;
303 struct qed_ll2_info
*p_ll2_conn
;
304 struct qed_ll2_tx_queue
*p_tx
;
305 unsigned long flags
= 0;
308 p_ll2_conn
= qed_ll2_handle_sanity_inactive(p_hwfn
, connection_handle
);
312 p_tx
= &p_ll2_conn
->tx_queue
;
314 spin_lock_irqsave(&p_tx
->lock
, flags
);
315 while (!list_empty(&p_tx
->active_descq
)) {
316 p_pkt
= list_first_entry(&p_tx
->active_descq
,
317 struct qed_ll2_tx_packet
, list_entry
);
321 list_del(&p_pkt
->list_entry
);
322 b_last_packet
= list_empty(&p_tx
->active_descq
);
323 list_add_tail(&p_pkt
->list_entry
, &p_tx
->free_descq
);
324 spin_unlock_irqrestore(&p_tx
->lock
, flags
);
325 if (p_ll2_conn
->input
.conn_type
== QED_LL2_TYPE_OOO
) {
326 struct qed_ooo_buffer
*p_buffer
;
328 p_buffer
= (struct qed_ooo_buffer
*)p_pkt
->cookie
;
329 qed_ooo_put_free_buffer(p_hwfn
, p_hwfn
->p_ooo_info
,
332 p_tx
->cur_completing_packet
= *p_pkt
;
333 p_tx
->cur_completing_bd_idx
= 1;
335 p_tx
->cur_completing_bd_idx
== p_pkt
->bd_used
;
336 tx_frag
= p_pkt
->bds_set
[0].tx_frag
;
337 p_ll2_conn
->cbs
.tx_release_cb(p_ll2_conn
->cbs
.cookie
,
344 spin_lock_irqsave(&p_tx
->lock
, flags
);
346 spin_unlock_irqrestore(&p_tx
->lock
, flags
);
349 static int qed_ll2_txq_completion(struct qed_hwfn
*p_hwfn
, void *p_cookie
)
351 struct qed_ll2_info
*p_ll2_conn
= p_cookie
;
352 struct qed_ll2_tx_queue
*p_tx
= &p_ll2_conn
->tx_queue
;
353 u16 new_idx
= 0, num_bds
= 0, num_bds_in_packet
= 0;
354 struct qed_ll2_tx_packet
*p_pkt
;
355 bool b_last_frag
= false;
362 spin_lock_irqsave(&p_tx
->lock
, flags
);
363 if (p_tx
->b_completing_packet
) {
368 new_idx
= le16_to_cpu(*p_tx
->p_fw_cons
);
369 num_bds
= ((s16
)new_idx
- (s16
)p_tx
->bds_idx
);
371 if (list_empty(&p_tx
->active_descq
))
374 p_pkt
= list_first_entry(&p_tx
->active_descq
,
375 struct qed_ll2_tx_packet
, list_entry
);
379 p_tx
->b_completing_packet
= true;
380 p_tx
->cur_completing_packet
= *p_pkt
;
381 num_bds_in_packet
= p_pkt
->bd_used
;
382 list_del(&p_pkt
->list_entry
);
384 if (unlikely(num_bds
< num_bds_in_packet
)) {
386 "Rest of BDs does not cover whole packet\n");
390 num_bds
-= num_bds_in_packet
;
391 p_tx
->bds_idx
+= num_bds_in_packet
;
392 while (num_bds_in_packet
--)
393 qed_chain_consume(&p_tx
->txq_chain
);
395 p_tx
->cur_completing_bd_idx
= 1;
396 b_last_frag
= p_tx
->cur_completing_bd_idx
== p_pkt
->bd_used
;
397 list_add_tail(&p_pkt
->list_entry
, &p_tx
->free_descq
);
399 spin_unlock_irqrestore(&p_tx
->lock
, flags
);
401 p_ll2_conn
->cbs
.tx_comp_cb(p_ll2_conn
->cbs
.cookie
,
404 p_pkt
->bds_set
[0].tx_frag
,
405 b_last_frag
, !num_bds
);
407 spin_lock_irqsave(&p_tx
->lock
, flags
);
410 p_tx
->b_completing_packet
= false;
413 spin_unlock_irqrestore(&p_tx
->lock
, flags
);
417 static void qed_ll2_rxq_parse_gsi(struct qed_hwfn
*p_hwfn
,
418 union core_rx_cqe_union
*p_cqe
,
419 struct qed_ll2_comp_rx_data
*data
)
421 data
->parse_flags
= le16_to_cpu(p_cqe
->rx_cqe_gsi
.parse_flags
.flags
);
422 data
->length
.data_length
= le16_to_cpu(p_cqe
->rx_cqe_gsi
.data_length
);
423 data
->vlan
= le16_to_cpu(p_cqe
->rx_cqe_gsi
.vlan
);
424 data
->opaque_data_0
= le32_to_cpu(p_cqe
->rx_cqe_gsi
.src_mac_addrhi
);
425 data
->opaque_data_1
= le16_to_cpu(p_cqe
->rx_cqe_gsi
.src_mac_addrlo
);
426 data
->u
.data_length_error
= p_cqe
->rx_cqe_gsi
.data_length_error
;
427 data
->qp_id
= le16_to_cpu(p_cqe
->rx_cqe_gsi
.qp_id
);
429 data
->src_qp
= le32_to_cpu(p_cqe
->rx_cqe_gsi
.src_qp
);
432 static void qed_ll2_rxq_parse_reg(struct qed_hwfn
*p_hwfn
,
433 union core_rx_cqe_union
*p_cqe
,
434 struct qed_ll2_comp_rx_data
*data
)
436 data
->parse_flags
= le16_to_cpu(p_cqe
->rx_cqe_fp
.parse_flags
.flags
);
437 data
->err_flags
= le16_to_cpu(p_cqe
->rx_cqe_fp
.err_flags
.flags
);
438 data
->length
.packet_length
=
439 le16_to_cpu(p_cqe
->rx_cqe_fp
.packet_length
);
440 data
->vlan
= le16_to_cpu(p_cqe
->rx_cqe_fp
.vlan
);
441 data
->opaque_data_0
= le32_to_cpu(p_cqe
->rx_cqe_fp
.opaque_data
.data
[0]);
442 data
->opaque_data_1
= le32_to_cpu(p_cqe
->rx_cqe_fp
.opaque_data
.data
[1]);
443 data
->u
.placement_offset
= p_cqe
->rx_cqe_fp
.placement_offset
;
447 qed_ll2_handle_slowpath(struct qed_hwfn
*p_hwfn
,
448 struct qed_ll2_info
*p_ll2_conn
,
449 union core_rx_cqe_union
*p_cqe
,
450 unsigned long *p_lock_flags
)
452 struct qed_ll2_rx_queue
*p_rx
= &p_ll2_conn
->rx_queue
;
453 struct core_rx_slow_path_cqe
*sp_cqe
;
455 sp_cqe
= &p_cqe
->rx_cqe_sp
;
456 if (sp_cqe
->ramrod_cmd_id
!= CORE_RAMROD_RX_QUEUE_FLUSH
) {
458 "LL2 - unexpected Rx CQE slowpath ramrod_cmd_id:%d\n",
459 sp_cqe
->ramrod_cmd_id
);
463 if (!p_ll2_conn
->cbs
.slowpath_cb
) {
465 "LL2 - received RX_QUEUE_FLUSH but no callback was provided\n");
469 spin_unlock_irqrestore(&p_rx
->lock
, *p_lock_flags
);
471 p_ll2_conn
->cbs
.slowpath_cb(p_ll2_conn
->cbs
.cookie
,
473 le32_to_cpu(sp_cqe
->opaque_data
.data
[0]),
474 le32_to_cpu(sp_cqe
->opaque_data
.data
[1]));
476 spin_lock_irqsave(&p_rx
->lock
, *p_lock_flags
);
482 qed_ll2_rxq_handle_completion(struct qed_hwfn
*p_hwfn
,
483 struct qed_ll2_info
*p_ll2_conn
,
484 union core_rx_cqe_union
*p_cqe
,
485 unsigned long *p_lock_flags
, bool b_last_cqe
)
487 struct qed_ll2_rx_queue
*p_rx
= &p_ll2_conn
->rx_queue
;
488 struct qed_ll2_rx_packet
*p_pkt
= NULL
;
489 struct qed_ll2_comp_rx_data data
;
491 if (!list_empty(&p_rx
->active_descq
))
492 p_pkt
= list_first_entry(&p_rx
->active_descq
,
493 struct qed_ll2_rx_packet
, list_entry
);
494 if (unlikely(!p_pkt
)) {
496 "[%d] LL2 Rx completion but active_descq is empty\n",
497 p_ll2_conn
->input
.conn_type
);
501 list_del(&p_pkt
->list_entry
);
503 if (p_cqe
->rx_cqe_sp
.type
== CORE_RX_CQE_TYPE_REGULAR
)
504 qed_ll2_rxq_parse_reg(p_hwfn
, p_cqe
, &data
);
506 qed_ll2_rxq_parse_gsi(p_hwfn
, p_cqe
, &data
);
507 if (unlikely(qed_chain_consume(&p_rx
->rxq_chain
) != p_pkt
->rxq_bd
))
509 "Mismatch between active_descq and the LL2 Rx chain\n");
511 list_add_tail(&p_pkt
->list_entry
, &p_rx
->free_descq
);
513 data
.connection_handle
= p_ll2_conn
->my_id
;
514 data
.cookie
= p_pkt
->cookie
;
515 data
.rx_buf_addr
= p_pkt
->rx_buf_addr
;
516 data
.b_last_packet
= b_last_cqe
;
518 spin_unlock_irqrestore(&p_rx
->lock
, *p_lock_flags
);
519 p_ll2_conn
->cbs
.rx_comp_cb(p_ll2_conn
->cbs
.cookie
, &data
);
521 spin_lock_irqsave(&p_rx
->lock
, *p_lock_flags
);
526 static int qed_ll2_rxq_completion(struct qed_hwfn
*p_hwfn
, void *cookie
)
528 struct qed_ll2_info
*p_ll2_conn
= (struct qed_ll2_info
*)cookie
;
529 struct qed_ll2_rx_queue
*p_rx
= &p_ll2_conn
->rx_queue
;
530 union core_rx_cqe_union
*cqe
= NULL
;
531 u16 cq_new_idx
= 0, cq_old_idx
= 0;
532 unsigned long flags
= 0;
538 spin_lock_irqsave(&p_rx
->lock
, flags
);
540 if (!QED_LL2_RX_REGISTERED(p_ll2_conn
)) {
541 spin_unlock_irqrestore(&p_rx
->lock
, flags
);
545 cq_new_idx
= le16_to_cpu(*p_rx
->p_fw_cons
);
546 cq_old_idx
= qed_chain_get_cons_idx(&p_rx
->rcq_chain
);
548 while (cq_new_idx
!= cq_old_idx
) {
549 bool b_last_cqe
= (cq_new_idx
== cq_old_idx
);
552 (union core_rx_cqe_union
*)
553 qed_chain_consume(&p_rx
->rcq_chain
);
554 cq_old_idx
= qed_chain_get_cons_idx(&p_rx
->rcq_chain
);
558 "LL2 [sw. cons %04x, fw. at %04x] - Got Packet of type %02x\n",
559 cq_old_idx
, cq_new_idx
, cqe
->rx_cqe_sp
.type
);
561 switch (cqe
->rx_cqe_sp
.type
) {
562 case CORE_RX_CQE_TYPE_SLOW_PATH
:
563 rc
= qed_ll2_handle_slowpath(p_hwfn
, p_ll2_conn
,
566 case CORE_RX_CQE_TYPE_GSI_OFFLOAD
:
567 case CORE_RX_CQE_TYPE_REGULAR
:
568 rc
= qed_ll2_rxq_handle_completion(p_hwfn
, p_ll2_conn
,
577 spin_unlock_irqrestore(&p_rx
->lock
, flags
);
581 static void qed_ll2_rxq_flush(struct qed_hwfn
*p_hwfn
, u8 connection_handle
)
583 struct qed_ll2_info
*p_ll2_conn
= NULL
;
584 struct qed_ll2_rx_packet
*p_pkt
= NULL
;
585 struct qed_ll2_rx_queue
*p_rx
;
586 unsigned long flags
= 0;
588 p_ll2_conn
= qed_ll2_handle_sanity_inactive(p_hwfn
, connection_handle
);
592 p_rx
= &p_ll2_conn
->rx_queue
;
594 spin_lock_irqsave(&p_rx
->lock
, flags
);
595 while (!list_empty(&p_rx
->active_descq
)) {
596 p_pkt
= list_first_entry(&p_rx
->active_descq
,
597 struct qed_ll2_rx_packet
, list_entry
);
600 list_move_tail(&p_pkt
->list_entry
, &p_rx
->free_descq
);
601 spin_unlock_irqrestore(&p_rx
->lock
, flags
);
603 if (p_ll2_conn
->input
.conn_type
== QED_LL2_TYPE_OOO
) {
604 struct qed_ooo_buffer
*p_buffer
;
606 p_buffer
= (struct qed_ooo_buffer
*)p_pkt
->cookie
;
607 qed_ooo_put_free_buffer(p_hwfn
, p_hwfn
->p_ooo_info
,
610 dma_addr_t rx_buf_addr
= p_pkt
->rx_buf_addr
;
611 void *cookie
= p_pkt
->cookie
;
614 b_last
= list_empty(&p_rx
->active_descq
);
615 p_ll2_conn
->cbs
.rx_release_cb(p_ll2_conn
->cbs
.cookie
,
618 rx_buf_addr
, b_last
);
620 spin_lock_irqsave(&p_rx
->lock
, flags
);
622 spin_unlock_irqrestore(&p_rx
->lock
, flags
);
626 qed_ll2_lb_rxq_handler_slowpath(struct qed_hwfn
*p_hwfn
,
627 struct core_rx_slow_path_cqe
*p_cqe
)
629 struct ooo_opaque
*ooo_opq
;
632 if (p_cqe
->ramrod_cmd_id
!= CORE_RAMROD_RX_QUEUE_FLUSH
)
635 ooo_opq
= (struct ooo_opaque
*)&p_cqe
->opaque_data
;
636 if (ooo_opq
->ooo_opcode
!= TCP_EVENT_DELETE_ISLES
)
639 /* Need to make a flush */
640 cid
= le32_to_cpu(ooo_opq
->cid
);
641 qed_ooo_release_connection_isles(p_hwfn
, p_hwfn
->p_ooo_info
, cid
);
646 static int qed_ll2_lb_rxq_handler(struct qed_hwfn
*p_hwfn
,
647 struct qed_ll2_info
*p_ll2_conn
)
649 struct qed_ll2_rx_queue
*p_rx
= &p_ll2_conn
->rx_queue
;
650 u16 packet_length
= 0, parse_flags
= 0, vlan
= 0;
651 struct qed_ll2_rx_packet
*p_pkt
= NULL
;
652 union core_rx_cqe_union
*cqe
= NULL
;
653 u16 cq_new_idx
= 0, cq_old_idx
= 0;
654 struct qed_ooo_buffer
*p_buffer
;
655 struct ooo_opaque
*ooo_opq
;
656 u8 placement_offset
= 0;
660 cq_new_idx
= le16_to_cpu(*p_rx
->p_fw_cons
);
661 cq_old_idx
= qed_chain_get_cons_idx(&p_rx
->rcq_chain
);
662 if (cq_new_idx
== cq_old_idx
)
665 while (cq_new_idx
!= cq_old_idx
) {
666 struct core_rx_fast_path_cqe
*p_cqe_fp
;
668 cqe
= qed_chain_consume(&p_rx
->rcq_chain
);
669 cq_old_idx
= qed_chain_get_cons_idx(&p_rx
->rcq_chain
);
670 cqe_type
= cqe
->rx_cqe_sp
.type
;
672 if (cqe_type
== CORE_RX_CQE_TYPE_SLOW_PATH
)
673 if (qed_ll2_lb_rxq_handler_slowpath(p_hwfn
,
677 if (unlikely(cqe_type
!= CORE_RX_CQE_TYPE_REGULAR
)) {
679 "Got a non-regular LB LL2 completion [type 0x%02x]\n",
683 p_cqe_fp
= &cqe
->rx_cqe_fp
;
685 placement_offset
= p_cqe_fp
->placement_offset
;
686 parse_flags
= le16_to_cpu(p_cqe_fp
->parse_flags
.flags
);
687 packet_length
= le16_to_cpu(p_cqe_fp
->packet_length
);
688 vlan
= le16_to_cpu(p_cqe_fp
->vlan
);
689 ooo_opq
= (struct ooo_opaque
*)&p_cqe_fp
->opaque_data
;
690 qed_ooo_save_history_entry(p_hwfn
, p_hwfn
->p_ooo_info
, ooo_opq
);
691 cid
= le32_to_cpu(ooo_opq
->cid
);
693 /* Process delete isle first */
694 if (ooo_opq
->drop_size
)
695 qed_ooo_delete_isles(p_hwfn
, p_hwfn
->p_ooo_info
, cid
,
699 if (ooo_opq
->ooo_opcode
== TCP_EVENT_NOP
)
702 /* Now process create/add/join isles */
703 if (unlikely(list_empty(&p_rx
->active_descq
))) {
705 "LL2 OOO RX chain has no submitted buffers\n"
710 p_pkt
= list_first_entry(&p_rx
->active_descq
,
711 struct qed_ll2_rx_packet
, list_entry
);
713 if (likely(ooo_opq
->ooo_opcode
== TCP_EVENT_ADD_NEW_ISLE
||
714 ooo_opq
->ooo_opcode
== TCP_EVENT_ADD_ISLE_RIGHT
||
715 ooo_opq
->ooo_opcode
== TCP_EVENT_ADD_ISLE_LEFT
||
716 ooo_opq
->ooo_opcode
== TCP_EVENT_ADD_PEN
||
717 ooo_opq
->ooo_opcode
== TCP_EVENT_JOIN
)) {
718 if (unlikely(!p_pkt
)) {
720 "LL2 OOO RX packet is not valid\n");
723 list_del(&p_pkt
->list_entry
);
724 p_buffer
= (struct qed_ooo_buffer
*)p_pkt
->cookie
;
725 p_buffer
->packet_length
= packet_length
;
726 p_buffer
->parse_flags
= parse_flags
;
727 p_buffer
->vlan
= vlan
;
728 p_buffer
->placement_offset
= placement_offset
;
729 qed_chain_consume(&p_rx
->rxq_chain
);
730 list_add_tail(&p_pkt
->list_entry
, &p_rx
->free_descq
);
732 switch (ooo_opq
->ooo_opcode
) {
733 case TCP_EVENT_ADD_NEW_ISLE
:
734 qed_ooo_add_new_isle(p_hwfn
,
740 case TCP_EVENT_ADD_ISLE_RIGHT
:
741 qed_ooo_add_new_buffer(p_hwfn
,
748 case TCP_EVENT_ADD_ISLE_LEFT
:
749 qed_ooo_add_new_buffer(p_hwfn
,
757 qed_ooo_add_new_buffer(p_hwfn
,
760 ooo_opq
->ooo_isle
+ 1,
763 qed_ooo_join_isles(p_hwfn
,
765 cid
, ooo_opq
->ooo_isle
);
767 case TCP_EVENT_ADD_PEN
:
768 qed_ooo_put_ready_buffer(p_hwfn
,
775 "Unexpected event (%d) TX OOO completion\n",
776 ooo_opq
->ooo_opcode
);
784 qed_ooo_submit_tx_buffers(struct qed_hwfn
*p_hwfn
,
785 struct qed_ll2_info
*p_ll2_conn
)
787 struct qed_ll2_tx_pkt_info tx_pkt
;
788 struct qed_ooo_buffer
*p_buffer
;
790 dma_addr_t first_frag
;
794 /* Submit Tx buffers here */
795 while ((p_buffer
= qed_ooo_get_ready_buffer(p_hwfn
,
796 p_hwfn
->p_ooo_info
))) {
800 first_frag
= p_buffer
->rx_buffer_phys_addr
+
801 p_buffer
->placement_offset
;
802 SET_FIELD(bd_flags
, CORE_TX_BD_DATA_FORCE_VLAN_MODE
, 1);
803 SET_FIELD(bd_flags
, CORE_TX_BD_DATA_L4_PROTOCOL
, 1);
805 memset(&tx_pkt
, 0, sizeof(tx_pkt
));
806 tx_pkt
.num_of_bds
= 1;
807 tx_pkt
.vlan
= p_buffer
->vlan
;
808 tx_pkt
.bd_flags
= bd_flags
;
809 tx_pkt
.l4_hdr_offset_w
= l4_hdr_offset_w
;
810 switch (p_ll2_conn
->tx_dest
) {
811 case CORE_TX_DEST_NW
:
812 tx_pkt
.tx_dest
= QED_LL2_TX_DEST_NW
;
814 case CORE_TX_DEST_LB
:
815 tx_pkt
.tx_dest
= QED_LL2_TX_DEST_LB
;
817 case CORE_TX_DEST_DROP
:
819 tx_pkt
.tx_dest
= QED_LL2_TX_DEST_DROP
;
822 tx_pkt
.first_frag
= first_frag
;
823 tx_pkt
.first_frag_len
= p_buffer
->packet_length
;
824 tx_pkt
.cookie
= p_buffer
;
826 rc
= qed_ll2_prepare_tx_packet(p_hwfn
, p_ll2_conn
->my_id
,
829 qed_ooo_put_ready_buffer(p_hwfn
, p_hwfn
->p_ooo_info
,
837 qed_ooo_submit_rx_buffers(struct qed_hwfn
*p_hwfn
,
838 struct qed_ll2_info
*p_ll2_conn
)
840 struct qed_ooo_buffer
*p_buffer
;
843 while ((p_buffer
= qed_ooo_get_free_buffer(p_hwfn
,
844 p_hwfn
->p_ooo_info
))) {
845 rc
= qed_ll2_post_rx_buffer(p_hwfn
,
847 p_buffer
->rx_buffer_phys_addr
,
850 qed_ooo_put_free_buffer(p_hwfn
,
851 p_hwfn
->p_ooo_info
, p_buffer
);
857 static int qed_ll2_lb_rxq_completion(struct qed_hwfn
*p_hwfn
, void *p_cookie
)
859 struct qed_ll2_info
*p_ll2_conn
= (struct qed_ll2_info
*)p_cookie
;
865 if (!QED_LL2_RX_REGISTERED(p_ll2_conn
))
868 rc
= qed_ll2_lb_rxq_handler(p_hwfn
, p_ll2_conn
);
872 qed_ooo_submit_rx_buffers(p_hwfn
, p_ll2_conn
);
873 qed_ooo_submit_tx_buffers(p_hwfn
, p_ll2_conn
);
878 static int qed_ll2_lb_txq_completion(struct qed_hwfn
*p_hwfn
, void *p_cookie
)
880 struct qed_ll2_info
*p_ll2_conn
= (struct qed_ll2_info
*)p_cookie
;
881 struct qed_ll2_tx_queue
*p_tx
= &p_ll2_conn
->tx_queue
;
882 struct qed_ll2_tx_packet
*p_pkt
= NULL
;
883 struct qed_ooo_buffer
*p_buffer
;
884 bool b_dont_submit_rx
= false;
885 u16 new_idx
= 0, num_bds
= 0;
888 if (unlikely(!p_ll2_conn
))
891 if (unlikely(!QED_LL2_TX_REGISTERED(p_ll2_conn
)))
894 new_idx
= le16_to_cpu(*p_tx
->p_fw_cons
);
895 num_bds
= ((s16
)new_idx
- (s16
)p_tx
->bds_idx
);
897 if (unlikely(!num_bds
))
901 if (list_empty(&p_tx
->active_descq
))
904 p_pkt
= list_first_entry(&p_tx
->active_descq
,
905 struct qed_ll2_tx_packet
, list_entry
);
906 if (unlikely(!p_pkt
))
909 if (unlikely(p_pkt
->bd_used
!= 1)) {
911 "Unexpectedly many BDs(%d) in TX OOO completion\n",
916 list_del(&p_pkt
->list_entry
);
920 qed_chain_consume(&p_tx
->txq_chain
);
922 p_buffer
= (struct qed_ooo_buffer
*)p_pkt
->cookie
;
923 list_add_tail(&p_pkt
->list_entry
, &p_tx
->free_descq
);
925 if (b_dont_submit_rx
) {
926 qed_ooo_put_free_buffer(p_hwfn
, p_hwfn
->p_ooo_info
,
931 rc
= qed_ll2_post_rx_buffer(p_hwfn
, p_ll2_conn
->my_id
,
932 p_buffer
->rx_buffer_phys_addr
, 0,
935 qed_ooo_put_free_buffer(p_hwfn
,
936 p_hwfn
->p_ooo_info
, p_buffer
);
937 b_dont_submit_rx
= true;
941 qed_ooo_submit_tx_buffers(p_hwfn
, p_ll2_conn
);
946 static void qed_ll2_stop_ooo(struct qed_hwfn
*p_hwfn
)
948 u8
*handle
= &p_hwfn
->pf_params
.iscsi_pf_params
.ll2_ooo_queue_id
;
950 DP_VERBOSE(p_hwfn
, (QED_MSG_STORAGE
| QED_MSG_LL2
),
951 "Stopping LL2 OOO queue [%02x]\n", *handle
);
953 qed_ll2_terminate_connection(p_hwfn
, *handle
);
954 qed_ll2_release_connection(p_hwfn
, *handle
);
955 *handle
= QED_LL2_UNUSED_HANDLE
;
958 static int qed_sp_ll2_rx_queue_start(struct qed_hwfn
*p_hwfn
,
959 struct qed_ll2_info
*p_ll2_conn
,
962 enum qed_ll2_conn_type conn_type
= p_ll2_conn
->input
.conn_type
;
963 struct qed_ll2_rx_queue
*p_rx
= &p_ll2_conn
->rx_queue
;
964 struct core_rx_start_ramrod_data
*p_ramrod
= NULL
;
965 struct qed_spq_entry
*p_ent
= NULL
;
966 struct qed_sp_init_data init_data
;
971 memset(&init_data
, 0, sizeof(init_data
));
972 init_data
.cid
= p_ll2_conn
->cid
;
973 init_data
.opaque_fid
= p_hwfn
->hw_info
.opaque_fid
;
974 init_data
.comp_mode
= QED_SPQ_MODE_EBLOCK
;
976 rc
= qed_sp_init_request(p_hwfn
, &p_ent
,
977 CORE_RAMROD_RX_QUEUE_START
,
978 PROTOCOLID_CORE
, &init_data
);
982 p_ramrod
= &p_ent
->ramrod
.core_rx_queue_start
;
983 memset(p_ramrod
, 0, sizeof(*p_ramrod
));
984 p_ramrod
->sb_id
= cpu_to_le16(qed_int_get_sp_sb_id(p_hwfn
));
985 p_ramrod
->sb_index
= p_rx
->rx_sb_index
;
986 p_ramrod
->complete_event_flg
= 1;
988 p_ramrod
->mtu
= cpu_to_le16(p_ll2_conn
->input
.mtu
);
989 DMA_REGPAIR_LE(p_ramrod
->bd_base
, p_rx
->rxq_chain
.p_phys_addr
);
990 cqe_pbl_size
= (u16
)qed_chain_get_page_cnt(&p_rx
->rcq_chain
);
991 p_ramrod
->num_of_pbl_pages
= cpu_to_le16(cqe_pbl_size
);
992 DMA_REGPAIR_LE(p_ramrod
->cqe_pbl_addr
,
993 qed_chain_get_pbl_phys(&p_rx
->rcq_chain
));
995 p_ramrod
->drop_ttl0_flg
= p_ll2_conn
->input
.rx_drop_ttl0_flg
;
996 p_ramrod
->inner_vlan_stripping_en
=
997 p_ll2_conn
->input
.rx_vlan_removal_en
;
999 if (test_bit(QED_MF_UFP_SPECIFIC
, &p_hwfn
->cdev
->mf_bits
) &&
1000 p_ll2_conn
->input
.conn_type
== QED_LL2_TYPE_FCOE
)
1001 p_ramrod
->report_outer_vlan
= 1;
1002 p_ramrod
->queue_id
= p_ll2_conn
->queue_id
;
1003 p_ramrod
->main_func_queue
= p_ll2_conn
->main_func_queue
? 1 : 0;
1005 if (test_bit(QED_MF_LL2_NON_UNICAST
, &p_hwfn
->cdev
->mf_bits
) &&
1006 p_ramrod
->main_func_queue
&& conn_type
!= QED_LL2_TYPE_ROCE
&&
1007 conn_type
!= QED_LL2_TYPE_IWARP
&&
1008 (!QED_IS_NVMETCP_PERSONALITY(p_hwfn
))) {
1009 p_ramrod
->mf_si_bcast_accept_all
= 1;
1010 p_ramrod
->mf_si_mcast_accept_all
= 1;
1012 p_ramrod
->mf_si_bcast_accept_all
= 0;
1013 p_ramrod
->mf_si_mcast_accept_all
= 0;
1016 p_ramrod
->action_on_error
.error_type
= action_on_error
;
1017 p_ramrod
->gsi_offload_flag
= p_ll2_conn
->input
.gsi_enable
;
1018 p_ramrod
->zero_prod_flg
= 1;
1020 return qed_spq_post(p_hwfn
, p_ent
, NULL
);
1023 static int qed_sp_ll2_tx_queue_start(struct qed_hwfn
*p_hwfn
,
1024 struct qed_ll2_info
*p_ll2_conn
)
1026 enum qed_ll2_conn_type conn_type
= p_ll2_conn
->input
.conn_type
;
1027 struct qed_ll2_tx_queue
*p_tx
= &p_ll2_conn
->tx_queue
;
1028 struct core_tx_start_ramrod_data
*p_ramrod
= NULL
;
1029 struct qed_spq_entry
*p_ent
= NULL
;
1030 struct qed_sp_init_data init_data
;
1031 u16 pq_id
= 0, pbl_size
;
1034 if (!QED_LL2_TX_REGISTERED(p_ll2_conn
))
1037 if (likely(p_ll2_conn
->input
.conn_type
== QED_LL2_TYPE_OOO
))
1038 p_ll2_conn
->tx_stats_en
= 0;
1040 p_ll2_conn
->tx_stats_en
= 1;
1043 memset(&init_data
, 0, sizeof(init_data
));
1044 init_data
.cid
= p_ll2_conn
->cid
;
1045 init_data
.opaque_fid
= p_hwfn
->hw_info
.opaque_fid
;
1046 init_data
.comp_mode
= QED_SPQ_MODE_EBLOCK
;
1048 rc
= qed_sp_init_request(p_hwfn
, &p_ent
,
1049 CORE_RAMROD_TX_QUEUE_START
,
1050 PROTOCOLID_CORE
, &init_data
);
1054 p_ramrod
= &p_ent
->ramrod
.core_tx_queue_start
;
1056 p_ramrod
->sb_id
= cpu_to_le16(qed_int_get_sp_sb_id(p_hwfn
));
1057 p_ramrod
->sb_index
= p_tx
->tx_sb_index
;
1058 p_ramrod
->mtu
= cpu_to_le16(p_ll2_conn
->input
.mtu
);
1059 p_ramrod
->stats_en
= p_ll2_conn
->tx_stats_en
;
1060 p_ramrod
->stats_id
= p_ll2_conn
->tx_stats_id
;
1062 DMA_REGPAIR_LE(p_ramrod
->pbl_base_addr
,
1063 qed_chain_get_pbl_phys(&p_tx
->txq_chain
));
1064 pbl_size
= qed_chain_get_page_cnt(&p_tx
->txq_chain
);
1065 p_ramrod
->pbl_size
= cpu_to_le16(pbl_size
);
1067 switch (p_ll2_conn
->input
.tx_tc
) {
1069 pq_id
= qed_get_cm_pq_idx(p_hwfn
, PQ_FLAGS_LB
);
1072 pq_id
= qed_get_cm_pq_idx(p_hwfn
, PQ_FLAGS_OOO
);
1075 pq_id
= qed_get_cm_pq_idx(p_hwfn
, PQ_FLAGS_OFLD
);
1079 p_ramrod
->qm_pq_id
= cpu_to_le16(pq_id
);
1081 switch (conn_type
) {
1082 case QED_LL2_TYPE_FCOE
:
1083 p_ramrod
->conn_type
= PROTOCOLID_FCOE
;
1085 case QED_LL2_TYPE_TCP_ULP
:
1086 p_ramrod
->conn_type
= PROTOCOLID_TCP_ULP
;
1088 case QED_LL2_TYPE_ROCE
:
1089 p_ramrod
->conn_type
= PROTOCOLID_ROCE
;
1091 case QED_LL2_TYPE_IWARP
:
1092 p_ramrod
->conn_type
= PROTOCOLID_IWARP
;
1094 case QED_LL2_TYPE_OOO
:
1095 if (p_hwfn
->hw_info
.personality
== QED_PCI_ISCSI
||
1096 p_hwfn
->hw_info
.personality
== QED_PCI_NVMETCP
)
1097 p_ramrod
->conn_type
= PROTOCOLID_TCP_ULP
;
1099 p_ramrod
->conn_type
= PROTOCOLID_IWARP
;
1102 p_ramrod
->conn_type
= PROTOCOLID_ETH
;
1103 DP_NOTICE(p_hwfn
, "Unknown connection type: %d\n", conn_type
);
1106 p_ramrod
->gsi_offload_flag
= p_ll2_conn
->input
.gsi_enable
;
1108 rc
= qed_spq_post(p_hwfn
, p_ent
, NULL
);
1112 rc
= qed_db_recovery_add(p_hwfn
->cdev
, p_tx
->doorbell_addr
,
1113 &p_tx
->db_msg
, DB_REC_WIDTH_32B
,
1118 static int qed_sp_ll2_rx_queue_stop(struct qed_hwfn
*p_hwfn
,
1119 struct qed_ll2_info
*p_ll2_conn
)
1121 struct core_rx_stop_ramrod_data
*p_ramrod
= NULL
;
1122 struct qed_spq_entry
*p_ent
= NULL
;
1123 struct qed_sp_init_data init_data
;
1127 memset(&init_data
, 0, sizeof(init_data
));
1128 init_data
.cid
= p_ll2_conn
->cid
;
1129 init_data
.opaque_fid
= p_hwfn
->hw_info
.opaque_fid
;
1130 init_data
.comp_mode
= QED_SPQ_MODE_EBLOCK
;
1132 rc
= qed_sp_init_request(p_hwfn
, &p_ent
,
1133 CORE_RAMROD_RX_QUEUE_STOP
,
1134 PROTOCOLID_CORE
, &init_data
);
1138 p_ramrod
= &p_ent
->ramrod
.core_rx_queue_stop
;
1140 p_ramrod
->complete_event_flg
= 1;
1141 p_ramrod
->queue_id
= p_ll2_conn
->queue_id
;
1143 return qed_spq_post(p_hwfn
, p_ent
, NULL
);
1146 static int qed_sp_ll2_tx_queue_stop(struct qed_hwfn
*p_hwfn
,
1147 struct qed_ll2_info
*p_ll2_conn
)
1149 struct qed_ll2_tx_queue
*p_tx
= &p_ll2_conn
->tx_queue
;
1150 struct qed_spq_entry
*p_ent
= NULL
;
1151 struct qed_sp_init_data init_data
;
1154 qed_db_recovery_del(p_hwfn
->cdev
, p_tx
->doorbell_addr
, &p_tx
->db_msg
);
1157 memset(&init_data
, 0, sizeof(init_data
));
1158 init_data
.cid
= p_ll2_conn
->cid
;
1159 init_data
.opaque_fid
= p_hwfn
->hw_info
.opaque_fid
;
1160 init_data
.comp_mode
= QED_SPQ_MODE_EBLOCK
;
1162 rc
= qed_sp_init_request(p_hwfn
, &p_ent
,
1163 CORE_RAMROD_TX_QUEUE_STOP
,
1164 PROTOCOLID_CORE
, &init_data
);
1168 return qed_spq_post(p_hwfn
, p_ent
, NULL
);
1172 qed_ll2_acquire_connection_rx(struct qed_hwfn
*p_hwfn
,
1173 struct qed_ll2_info
*p_ll2_info
)
1175 struct qed_chain_init_params params
= {
1176 .intended_use
= QED_CHAIN_USE_TO_CONSUME_PRODUCE
,
1177 .cnt_type
= QED_CHAIN_CNT_TYPE_U16
,
1178 .num_elems
= p_ll2_info
->input
.rx_num_desc
,
1180 struct qed_dev
*cdev
= p_hwfn
->cdev
;
1181 struct qed_ll2_rx_packet
*p_descq
;
1185 if (!p_ll2_info
->input
.rx_num_desc
)
1188 params
.mode
= QED_CHAIN_MODE_NEXT_PTR
;
1189 params
.elem_size
= sizeof(struct core_rx_bd
);
1191 rc
= qed_chain_alloc(cdev
, &p_ll2_info
->rx_queue
.rxq_chain
, ¶ms
);
1193 DP_NOTICE(p_hwfn
, "Failed to allocate ll2 rxq chain\n");
1197 capacity
= qed_chain_get_capacity(&p_ll2_info
->rx_queue
.rxq_chain
);
1198 p_descq
= kcalloc(capacity
, sizeof(struct qed_ll2_rx_packet
),
1202 DP_NOTICE(p_hwfn
, "Failed to allocate ll2 Rx desc\n");
1205 p_ll2_info
->rx_queue
.descq_array
= p_descq
;
1207 params
.mode
= QED_CHAIN_MODE_PBL
;
1208 params
.elem_size
= sizeof(struct core_rx_fast_path_cqe
);
1210 rc
= qed_chain_alloc(cdev
, &p_ll2_info
->rx_queue
.rcq_chain
, ¶ms
);
1212 DP_NOTICE(p_hwfn
, "Failed to allocate ll2 rcq chain\n");
1216 DP_VERBOSE(p_hwfn
, QED_MSG_LL2
,
1217 "Allocated LL2 Rxq [Type %08x] with 0x%08x buffers\n",
1218 p_ll2_info
->input
.conn_type
, p_ll2_info
->input
.rx_num_desc
);
1224 static int qed_ll2_acquire_connection_tx(struct qed_hwfn
*p_hwfn
,
1225 struct qed_ll2_info
*p_ll2_info
)
1227 struct qed_chain_init_params params
= {
1228 .mode
= QED_CHAIN_MODE_PBL
,
1229 .intended_use
= QED_CHAIN_USE_TO_CONSUME_PRODUCE
,
1230 .cnt_type
= QED_CHAIN_CNT_TYPE_U16
,
1231 .num_elems
= p_ll2_info
->input
.tx_num_desc
,
1232 .elem_size
= sizeof(struct core_tx_bd
),
1234 struct qed_ll2_tx_packet
*p_descq
;
1239 if (!p_ll2_info
->input
.tx_num_desc
)
1242 rc
= qed_chain_alloc(p_hwfn
->cdev
, &p_ll2_info
->tx_queue
.txq_chain
,
1247 capacity
= qed_chain_get_capacity(&p_ll2_info
->tx_queue
.txq_chain
);
1248 /* All bds_set elements are flexibily added. */
1249 desc_size
= struct_size(p_descq
, bds_set
,
1250 p_ll2_info
->input
.tx_max_bds_per_packet
);
1252 p_descq
= kcalloc(capacity
, desc_size
, GFP_KERNEL
);
1257 p_ll2_info
->tx_queue
.descq_mem
= p_descq
;
1259 DP_VERBOSE(p_hwfn
, QED_MSG_LL2
,
1260 "Allocated LL2 Txq [Type %08x] with 0x%08x buffers\n",
1261 p_ll2_info
->input
.conn_type
, p_ll2_info
->input
.tx_num_desc
);
1266 "Can't allocate memory for Tx LL2 with 0x%08x buffers\n",
1267 p_ll2_info
->input
.tx_num_desc
);
1272 qed_ll2_acquire_connection_ooo(struct qed_hwfn
*p_hwfn
,
1273 struct qed_ll2_info
*p_ll2_info
, u16 mtu
)
1275 struct qed_ooo_buffer
*p_buf
= NULL
;
1280 if (p_ll2_info
->input
.conn_type
!= QED_LL2_TYPE_OOO
)
1283 /* Correct number of requested OOO buffers if needed */
1284 if (!p_ll2_info
->input
.rx_num_ooo_buffers
) {
1285 u16 num_desc
= p_ll2_info
->input
.rx_num_desc
;
1289 p_ll2_info
->input
.rx_num_ooo_buffers
= num_desc
* 2;
1292 for (buf_idx
= 0; buf_idx
< p_ll2_info
->input
.rx_num_ooo_buffers
;
1294 p_buf
= kzalloc(sizeof(*p_buf
), GFP_KERNEL
);
1300 p_buf
->rx_buffer_size
= mtu
+ 26 + ETH_CACHE_LINE_SIZE
;
1301 p_buf
->rx_buffer_size
= (p_buf
->rx_buffer_size
+
1302 ETH_CACHE_LINE_SIZE
- 1) &
1303 ~(ETH_CACHE_LINE_SIZE
- 1);
1304 p_virt
= dma_alloc_coherent(&p_hwfn
->cdev
->pdev
->dev
,
1305 p_buf
->rx_buffer_size
,
1306 &p_buf
->rx_buffer_phys_addr
,
1314 p_buf
->rx_buffer_virt_addr
= p_virt
;
1315 qed_ooo_put_free_buffer(p_hwfn
, p_hwfn
->p_ooo_info
, p_buf
);
1318 DP_VERBOSE(p_hwfn
, QED_MSG_LL2
,
1319 "Allocated [%04x] LL2 OOO buffers [each of size 0x%08x]\n",
1320 p_ll2_info
->input
.rx_num_ooo_buffers
, p_buf
->rx_buffer_size
);
1327 qed_ll2_set_cbs(struct qed_ll2_info
*p_ll2_info
, const struct qed_ll2_cbs
*cbs
)
1329 if (!cbs
|| (!cbs
->rx_comp_cb
||
1330 !cbs
->rx_release_cb
||
1331 !cbs
->tx_comp_cb
|| !cbs
->tx_release_cb
|| !cbs
->cookie
))
1334 p_ll2_info
->cbs
.rx_comp_cb
= cbs
->rx_comp_cb
;
1335 p_ll2_info
->cbs
.rx_release_cb
= cbs
->rx_release_cb
;
1336 p_ll2_info
->cbs
.tx_comp_cb
= cbs
->tx_comp_cb
;
1337 p_ll2_info
->cbs
.tx_release_cb
= cbs
->tx_release_cb
;
1338 p_ll2_info
->cbs
.slowpath_cb
= cbs
->slowpath_cb
;
1339 p_ll2_info
->cbs
.cookie
= cbs
->cookie
;
1344 static void _qed_ll2_calc_allowed_conns(struct qed_hwfn
*p_hwfn
,
1345 struct qed_ll2_acquire_data
*data
,
1346 u8
*start_idx
, u8
*last_idx
)
1348 /* LL2 queues handles will be split as follows:
1349 * First will be the legacy queues, and then the ctx based.
1351 if (data
->input
.rx_conn_type
== QED_LL2_RX_TYPE_LEGACY
) {
1352 *start_idx
= QED_LL2_LEGACY_CONN_BASE_PF
;
1353 *last_idx
= *start_idx
+
1354 QED_MAX_NUM_OF_LEGACY_LL2_CONNS_PF
;
1356 /* QED_LL2_RX_TYPE_CTX */
1357 *start_idx
= QED_LL2_CTX_CONN_BASE_PF
;
1358 *last_idx
= *start_idx
+
1359 QED_MAX_NUM_OF_CTX_LL2_CONNS_PF
;
1363 static enum core_error_handle
1364 qed_ll2_get_error_choice(enum qed_ll2_error_handle err
)
1367 case QED_LL2_DROP_PACKET
:
1368 return LL2_DROP_PACKET
;
1369 case QED_LL2_DO_NOTHING
:
1370 return LL2_DO_NOTHING
;
1371 case QED_LL2_ASSERT
:
1374 return LL2_DO_NOTHING
;
1378 int qed_ll2_acquire_connection(void *cxt
, struct qed_ll2_acquire_data
*data
)
1380 struct qed_hwfn
*p_hwfn
= cxt
;
1381 qed_int_comp_cb_t comp_rx_cb
, comp_tx_cb
;
1382 struct qed_ll2_info
*p_ll2_info
= NULL
;
1383 u8 i
, first_idx
, last_idx
, *p_tx_max
;
1386 if (!data
->p_connection_handle
|| !p_hwfn
->p_ll2_info
)
1389 _qed_ll2_calc_allowed_conns(p_hwfn
, data
, &first_idx
, &last_idx
);
1391 /* Find a free connection to be used */
1392 for (i
= first_idx
; i
< last_idx
; i
++) {
1393 mutex_lock(&p_hwfn
->p_ll2_info
[i
].mutex
);
1394 if (p_hwfn
->p_ll2_info
[i
].b_active
) {
1395 mutex_unlock(&p_hwfn
->p_ll2_info
[i
].mutex
);
1399 p_hwfn
->p_ll2_info
[i
].b_active
= true;
1400 p_ll2_info
= &p_hwfn
->p_ll2_info
[i
];
1401 mutex_unlock(&p_hwfn
->p_ll2_info
[i
].mutex
);
1407 memcpy(&p_ll2_info
->input
, &data
->input
, sizeof(p_ll2_info
->input
));
1409 switch (data
->input
.tx_dest
) {
1410 case QED_LL2_TX_DEST_NW
:
1411 p_ll2_info
->tx_dest
= CORE_TX_DEST_NW
;
1413 case QED_LL2_TX_DEST_LB
:
1414 p_ll2_info
->tx_dest
= CORE_TX_DEST_LB
;
1416 case QED_LL2_TX_DEST_DROP
:
1417 p_ll2_info
->tx_dest
= CORE_TX_DEST_DROP
;
1423 if (data
->input
.conn_type
== QED_LL2_TYPE_OOO
||
1424 data
->input
.secondary_queue
)
1425 p_ll2_info
->main_func_queue
= false;
1427 p_ll2_info
->main_func_queue
= true;
1429 /* Correct maximum number of Tx BDs */
1430 p_tx_max
= &p_ll2_info
->input
.tx_max_bds_per_packet
;
1432 *p_tx_max
= CORE_LL2_TX_MAX_BDS_PER_PACKET
;
1434 *p_tx_max
= min_t(u8
, *p_tx_max
,
1435 CORE_LL2_TX_MAX_BDS_PER_PACKET
);
1437 rc
= qed_ll2_set_cbs(p_ll2_info
, data
->cbs
);
1439 DP_NOTICE(p_hwfn
, "Invalid callback functions\n");
1440 goto q_allocate_fail
;
1443 rc
= qed_ll2_acquire_connection_rx(p_hwfn
, p_ll2_info
);
1445 goto q_allocate_fail
;
1447 rc
= qed_ll2_acquire_connection_tx(p_hwfn
, p_ll2_info
);
1449 goto q_allocate_fail
;
1451 rc
= qed_ll2_acquire_connection_ooo(p_hwfn
, p_ll2_info
,
1454 goto q_allocate_fail
;
1456 /* Register callbacks for the Rx/Tx queues */
1457 if (data
->input
.conn_type
== QED_LL2_TYPE_OOO
) {
1458 comp_rx_cb
= qed_ll2_lb_rxq_completion
;
1459 comp_tx_cb
= qed_ll2_lb_txq_completion
;
1461 comp_rx_cb
= qed_ll2_rxq_completion
;
1462 comp_tx_cb
= qed_ll2_txq_completion
;
1465 if (data
->input
.rx_num_desc
) {
1466 qed_int_register_cb(p_hwfn
, comp_rx_cb
,
1467 &p_hwfn
->p_ll2_info
[i
],
1468 &p_ll2_info
->rx_queue
.rx_sb_index
,
1469 &p_ll2_info
->rx_queue
.p_fw_cons
);
1470 p_ll2_info
->rx_queue
.b_cb_registered
= true;
1473 if (data
->input
.tx_num_desc
) {
1474 qed_int_register_cb(p_hwfn
,
1476 &p_hwfn
->p_ll2_info
[i
],
1477 &p_ll2_info
->tx_queue
.tx_sb_index
,
1478 &p_ll2_info
->tx_queue
.p_fw_cons
);
1479 p_ll2_info
->tx_queue
.b_cb_registered
= true;
1482 *data
->p_connection_handle
= i
;
1486 qed_ll2_release_connection(p_hwfn
, i
);
1490 static int qed_ll2_establish_connection_rx(struct qed_hwfn
*p_hwfn
,
1491 struct qed_ll2_info
*p_ll2_conn
)
1493 enum qed_ll2_error_handle error_input
;
1494 enum core_error_handle error_mode
;
1495 u8 action_on_error
= 0;
1498 if (!QED_LL2_RX_REGISTERED(p_ll2_conn
))
1501 DIRECT_REG_WR(p_ll2_conn
->rx_queue
.set_prod_addr
, 0x0);
1502 error_input
= p_ll2_conn
->input
.ai_err_packet_too_big
;
1503 error_mode
= qed_ll2_get_error_choice(error_input
);
1504 SET_FIELD(action_on_error
,
1505 CORE_RX_ACTION_ON_ERROR_PACKET_TOO_BIG
, error_mode
);
1506 error_input
= p_ll2_conn
->input
.ai_err_no_buf
;
1507 error_mode
= qed_ll2_get_error_choice(error_input
);
1508 SET_FIELD(action_on_error
, CORE_RX_ACTION_ON_ERROR_NO_BUFF
, error_mode
);
1510 rc
= qed_sp_ll2_rx_queue_start(p_hwfn
, p_ll2_conn
, action_on_error
);
1514 if (p_ll2_conn
->rx_queue
.ctx_based
) {
1515 rc
= qed_db_recovery_add(p_hwfn
->cdev
,
1516 p_ll2_conn
->rx_queue
.set_prod_addr
,
1517 &p_ll2_conn
->rx_queue
.db_data
,
1518 DB_REC_WIDTH_64B
, DB_REC_KERNEL
);
1525 qed_ll2_establish_connection_ooo(struct qed_hwfn
*p_hwfn
,
1526 struct qed_ll2_info
*p_ll2_conn
)
1528 if (p_ll2_conn
->input
.conn_type
!= QED_LL2_TYPE_OOO
)
1531 qed_ooo_release_all_isles(p_hwfn
, p_hwfn
->p_ooo_info
);
1532 qed_ooo_submit_rx_buffers(p_hwfn
, p_ll2_conn
);
1535 static inline u8
qed_ll2_handle_to_queue_id(struct qed_hwfn
*p_hwfn
,
1541 if (ll2_queue_type
== QED_LL2_RX_TYPE_LEGACY
)
1542 return p_hwfn
->hw_info
.resc_start
[QED_LL2_RAM_QUEUE
] + handle
;
1544 /* QED_LL2_RX_TYPE_CTX
1545 * FW distinguishes between the legacy queues (ram based) and the
1546 * ctx based queues by the queue_id.
1547 * The first MAX_NUM_LL2_RX_RAM_QUEUES queues are legacy
1548 * and the queue ids above that are ctx base.
1550 qid
= p_hwfn
->hw_info
.resc_start
[QED_LL2_CTX_QUEUE
] +
1551 MAX_NUM_LL2_RX_RAM_QUEUES
;
1553 /* See comment on the acquire connection for how the ll2
1554 * queues handles are divided.
1556 qid
+= (handle
- QED_MAX_NUM_OF_LEGACY_LL2_CONNS_PF
);
1561 int qed_ll2_establish_connection(void *cxt
, u8 connection_handle
)
1563 struct core_conn_context
*p_cxt
;
1564 struct qed_ll2_tx_packet
*p_pkt
;
1565 struct qed_ll2_info
*p_ll2_conn
;
1566 struct qed_hwfn
*p_hwfn
= cxt
;
1567 struct qed_ll2_rx_queue
*p_rx
;
1568 struct qed_ll2_tx_queue
*p_tx
;
1569 struct qed_cxt_info cxt_info
;
1570 struct qed_ptt
*p_ptt
;
1576 p_ptt
= qed_ptt_acquire(p_hwfn
);
1580 p_ll2_conn
= qed_ll2_handle_sanity_lock(p_hwfn
, connection_handle
);
1586 p_rx
= &p_ll2_conn
->rx_queue
;
1587 p_tx
= &p_ll2_conn
->tx_queue
;
1589 qed_chain_reset(&p_rx
->rxq_chain
);
1590 qed_chain_reset(&p_rx
->rcq_chain
);
1591 INIT_LIST_HEAD(&p_rx
->active_descq
);
1592 INIT_LIST_HEAD(&p_rx
->free_descq
);
1593 INIT_LIST_HEAD(&p_rx
->posting_descq
);
1594 spin_lock_init(&p_rx
->lock
);
1595 capacity
= qed_chain_get_capacity(&p_rx
->rxq_chain
);
1596 for (i
= 0; i
< capacity
; i
++)
1597 list_add_tail(&p_rx
->descq_array
[i
].list_entry
,
1599 *p_rx
->p_fw_cons
= 0;
1601 qed_chain_reset(&p_tx
->txq_chain
);
1602 INIT_LIST_HEAD(&p_tx
->active_descq
);
1603 INIT_LIST_HEAD(&p_tx
->free_descq
);
1604 INIT_LIST_HEAD(&p_tx
->sending_descq
);
1605 spin_lock_init(&p_tx
->lock
);
1606 capacity
= qed_chain_get_capacity(&p_tx
->txq_chain
);
1607 /* All bds_set elements are flexibily added. */
1608 desc_size
= struct_size(p_pkt
, bds_set
,
1609 p_ll2_conn
->input
.tx_max_bds_per_packet
);
1611 for (i
= 0; i
< capacity
; i
++) {
1612 p_pkt
= p_tx
->descq_mem
+ desc_size
* i
;
1613 list_add_tail(&p_pkt
->list_entry
, &p_tx
->free_descq
);
1615 p_tx
->cur_completing_bd_idx
= 0;
1617 p_tx
->b_completing_packet
= false;
1618 p_tx
->cur_send_packet
= NULL
;
1619 p_tx
->cur_send_frag_num
= 0;
1620 p_tx
->cur_completing_frag_num
= 0;
1621 *p_tx
->p_fw_cons
= 0;
1623 rc
= qed_cxt_acquire_cid(p_hwfn
, PROTOCOLID_CORE
, &p_ll2_conn
->cid
);
1626 cxt_info
.iid
= p_ll2_conn
->cid
;
1627 rc
= qed_cxt_get_cid_info(p_hwfn
, &cxt_info
);
1629 DP_NOTICE(p_hwfn
, "Cannot find context info for cid=%d\n",
1634 p_cxt
= cxt_info
.p_cxt
;
1636 memset(p_cxt
, 0, sizeof(*p_cxt
));
1638 qid
= qed_ll2_handle_to_queue_id(p_hwfn
, connection_handle
,
1639 p_ll2_conn
->input
.rx_conn_type
);
1640 stats_id
= qed_ll2_handle_to_stats_id(p_hwfn
,
1641 p_ll2_conn
->input
.rx_conn_type
,
1643 p_ll2_conn
->queue_id
= qid
;
1644 p_ll2_conn
->tx_stats_id
= stats_id
;
1646 /* If there is no valid stats id for this connection, disable stats */
1647 if (p_ll2_conn
->tx_stats_id
== QED_LL2_INVALID_STATS_ID
) {
1648 p_ll2_conn
->tx_stats_en
= 0;
1651 "Disabling stats for queue %d - not enough counters\n",
1657 "Establishing ll2 queue. PF %d ctx_based=%d abs qid=%d stats_id=%d\n",
1659 p_ll2_conn
->input
.rx_conn_type
, qid
, stats_id
);
1661 if (p_ll2_conn
->input
.rx_conn_type
== QED_LL2_RX_TYPE_LEGACY
) {
1662 p_rx
->set_prod_addr
=
1663 (u8 __iomem
*)p_hwfn
->regview
+
1664 GET_GTT_REG_ADDR(GTT_BAR0_MAP_REG_TSDM_RAM
,
1665 TSTORM_LL2_RX_PRODS
, qid
);
1667 /* QED_LL2_RX_TYPE_CTX - using doorbell */
1668 p_rx
->ctx_based
= 1;
1670 p_rx
->set_prod_addr
= p_hwfn
->doorbells
+
1671 p_hwfn
->dpi_start_offset
+
1672 DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_LL2_PROD_UPDATE
);
1674 /* prepare db data */
1675 p_rx
->db_data
.icid
= cpu_to_le16((u16
)p_ll2_conn
->cid
);
1676 SET_FIELD(p_rx
->db_data
.params
,
1677 CORE_PWM_PROD_UPDATE_DATA_AGG_CMD
, DB_AGG_CMD_SET
);
1678 SET_FIELD(p_rx
->db_data
.params
,
1679 CORE_PWM_PROD_UPDATE_DATA_RESERVED1
, 0);
1682 p_tx
->doorbell_addr
= (u8 __iomem
*)p_hwfn
->doorbells
+
1683 qed_db_addr(p_ll2_conn
->cid
,
1685 /* prepare db data */
1686 SET_FIELD(p_tx
->db_msg
.params
, CORE_DB_DATA_DEST
, DB_DEST_XCM
);
1687 SET_FIELD(p_tx
->db_msg
.params
, CORE_DB_DATA_AGG_CMD
, DB_AGG_CMD_SET
);
1688 SET_FIELD(p_tx
->db_msg
.params
, CORE_DB_DATA_AGG_VAL_SEL
,
1689 DQ_XCM_CORE_TX_BD_PROD_CMD
);
1690 p_tx
->db_msg
.agg_flags
= DQ_XCM_CORE_DQ_CF_CMD
;
1692 rc
= qed_ll2_establish_connection_rx(p_hwfn
, p_ll2_conn
);
1696 rc
= qed_sp_ll2_tx_queue_start(p_hwfn
, p_ll2_conn
);
1700 if (!QED_IS_RDMA_PERSONALITY(p_hwfn
) &&
1701 !QED_IS_NVMETCP_PERSONALITY(p_hwfn
))
1702 qed_wr(p_hwfn
, p_ptt
, PRS_REG_USE_LIGHT_L2
, 1);
1704 qed_ll2_establish_connection_ooo(p_hwfn
, p_ll2_conn
);
1706 if (p_ll2_conn
->input
.conn_type
== QED_LL2_TYPE_FCOE
) {
1707 if (!test_bit(QED_MF_UFP_SPECIFIC
, &p_hwfn
->cdev
->mf_bits
))
1708 qed_llh_add_protocol_filter(p_hwfn
->cdev
, 0,
1709 QED_LLH_FILTER_ETHERTYPE
,
1711 qed_llh_add_protocol_filter(p_hwfn
->cdev
, 0,
1712 QED_LLH_FILTER_ETHERTYPE
,
1717 qed_ptt_release(p_hwfn
, p_ptt
);
1721 static void qed_ll2_post_rx_buffer_notify_fw(struct qed_hwfn
*p_hwfn
,
1722 struct qed_ll2_rx_queue
*p_rx
,
1723 struct qed_ll2_rx_packet
*p_curp
)
1725 struct qed_ll2_rx_packet
*p_posting_packet
= NULL
;
1726 struct core_ll2_rx_prod rx_prod
= { 0, 0 };
1727 bool b_notify_fw
= false;
1728 u16 bd_prod
, cq_prod
;
1730 /* This handles the flushing of already posted buffers */
1731 while (!list_empty(&p_rx
->posting_descq
)) {
1732 p_posting_packet
= list_first_entry(&p_rx
->posting_descq
,
1733 struct qed_ll2_rx_packet
,
1735 list_move_tail(&p_posting_packet
->list_entry
,
1736 &p_rx
->active_descq
);
1740 /* This handles the supplied packet [if there is one] */
1742 list_add_tail(&p_curp
->list_entry
, &p_rx
->active_descq
);
1749 bd_prod
= qed_chain_get_prod_idx(&p_rx
->rxq_chain
);
1750 cq_prod
= qed_chain_get_prod_idx(&p_rx
->rcq_chain
);
1751 if (p_rx
->ctx_based
) {
1752 /* update producer by giving a doorbell */
1753 p_rx
->db_data
.prod
.bd_prod
= cpu_to_le16(bd_prod
);
1754 p_rx
->db_data
.prod
.cqe_prod
= cpu_to_le16(cq_prod
);
1755 /* Make sure chain element is updated before ringing the
1759 DIRECT_REG_WR64(p_rx
->set_prod_addr
,
1760 *((u64
*)&p_rx
->db_data
));
1762 rx_prod
.bd_prod
= cpu_to_le16(bd_prod
);
1763 rx_prod
.cqe_prod
= cpu_to_le16(cq_prod
);
1765 /* Make sure chain element is updated before ringing the
1770 DIRECT_REG_WR(p_rx
->set_prod_addr
, *((u32
*)&rx_prod
));
1774 int qed_ll2_post_rx_buffer(void *cxt
,
1775 u8 connection_handle
,
1777 u16 buf_len
, void *cookie
, u8 notify_fw
)
1779 struct qed_hwfn
*p_hwfn
= cxt
;
1780 struct core_rx_bd_with_buff_len
*p_curb
= NULL
;
1781 struct qed_ll2_rx_packet
*p_curp
= NULL
;
1782 struct qed_ll2_info
*p_ll2_conn
;
1783 struct qed_ll2_rx_queue
*p_rx
;
1784 unsigned long flags
;
1788 p_ll2_conn
= qed_ll2_handle_sanity(p_hwfn
, connection_handle
);
1791 p_rx
= &p_ll2_conn
->rx_queue
;
1792 if (!p_rx
->set_prod_addr
)
1795 spin_lock_irqsave(&p_rx
->lock
, flags
);
1796 if (!list_empty(&p_rx
->free_descq
))
1797 p_curp
= list_first_entry(&p_rx
->free_descq
,
1798 struct qed_ll2_rx_packet
, list_entry
);
1800 if (qed_chain_get_elem_left(&p_rx
->rxq_chain
) &&
1801 qed_chain_get_elem_left(&p_rx
->rcq_chain
)) {
1802 p_data
= qed_chain_produce(&p_rx
->rxq_chain
);
1803 p_curb
= (struct core_rx_bd_with_buff_len
*)p_data
;
1804 qed_chain_produce(&p_rx
->rcq_chain
);
1808 /* If we're lacking entries, let's try to flush buffers to FW */
1809 if (!p_curp
|| !p_curb
) {
1815 /* We have an Rx packet we can fill */
1816 DMA_REGPAIR_LE(p_curb
->addr
, addr
);
1817 p_curb
->buff_length
= cpu_to_le16(buf_len
);
1818 p_curp
->rx_buf_addr
= addr
;
1819 p_curp
->cookie
= cookie
;
1820 p_curp
->rxq_bd
= p_curb
;
1821 p_curp
->buf_length
= buf_len
;
1822 list_del(&p_curp
->list_entry
);
1824 /* Check if we only want to enqueue this packet without informing FW */
1826 list_add_tail(&p_curp
->list_entry
, &p_rx
->posting_descq
);
1831 qed_ll2_post_rx_buffer_notify_fw(p_hwfn
, p_rx
, p_curp
);
1833 spin_unlock_irqrestore(&p_rx
->lock
, flags
);
1837 static void qed_ll2_prepare_tx_packet_set(struct qed_hwfn
*p_hwfn
,
1838 struct qed_ll2_tx_queue
*p_tx
,
1839 struct qed_ll2_tx_packet
*p_curp
,
1840 struct qed_ll2_tx_pkt_info
*pkt
,
1843 list_del(&p_curp
->list_entry
);
1844 p_curp
->cookie
= pkt
->cookie
;
1845 p_curp
->bd_used
= pkt
->num_of_bds
;
1846 p_curp
->notify_fw
= notify_fw
;
1847 p_tx
->cur_send_packet
= p_curp
;
1848 p_tx
->cur_send_frag_num
= 0;
1850 p_curp
->bds_set
[p_tx
->cur_send_frag_num
].tx_frag
= pkt
->first_frag
;
1851 p_curp
->bds_set
[p_tx
->cur_send_frag_num
].frag_len
= pkt
->first_frag_len
;
1852 p_tx
->cur_send_frag_num
++;
1856 qed_ll2_prepare_tx_packet_set_bd(struct qed_hwfn
*p_hwfn
,
1857 struct qed_ll2_info
*p_ll2
,
1858 struct qed_ll2_tx_packet
*p_curp
,
1859 struct qed_ll2_tx_pkt_info
*pkt
)
1861 struct qed_chain
*p_tx_chain
= &p_ll2
->tx_queue
.txq_chain
;
1862 u16 prod_idx
= qed_chain_get_prod_idx(p_tx_chain
);
1863 struct core_tx_bd
*start_bd
= NULL
;
1864 enum core_roce_flavor_type roce_flavor
;
1865 enum core_tx_dest tx_dest
;
1866 u16 bd_data
= 0, frag_idx
;
1869 roce_flavor
= (pkt
->qed_roce_flavor
== QED_LL2_ROCE
) ? CORE_ROCE
1872 switch (pkt
->tx_dest
) {
1873 case QED_LL2_TX_DEST_NW
:
1874 tx_dest
= CORE_TX_DEST_NW
;
1876 case QED_LL2_TX_DEST_LB
:
1877 tx_dest
= CORE_TX_DEST_LB
;
1879 case QED_LL2_TX_DEST_DROP
:
1880 tx_dest
= CORE_TX_DEST_DROP
;
1883 tx_dest
= CORE_TX_DEST_LB
;
1887 start_bd
= (struct core_tx_bd
*)qed_chain_produce(p_tx_chain
);
1888 if (likely(QED_IS_IWARP_PERSONALITY(p_hwfn
) &&
1889 p_ll2
->input
.conn_type
== QED_LL2_TYPE_OOO
)) {
1890 start_bd
->nw_vlan_or_lb_echo
=
1891 cpu_to_le16(IWARP_LL2_IN_ORDER_TX_QUEUE
);
1893 start_bd
->nw_vlan_or_lb_echo
= cpu_to_le16(pkt
->vlan
);
1894 if (test_bit(QED_MF_UFP_SPECIFIC
, &p_hwfn
->cdev
->mf_bits
) &&
1895 p_ll2
->input
.conn_type
== QED_LL2_TYPE_FCOE
)
1896 pkt
->remove_stag
= true;
1899 bitfield1
= le16_to_cpu(start_bd
->bitfield1
);
1900 SET_FIELD(bitfield1
, CORE_TX_BD_L4_HDR_OFFSET_W
, pkt
->l4_hdr_offset_w
);
1901 SET_FIELD(bitfield1
, CORE_TX_BD_TX_DST
, tx_dest
);
1902 start_bd
->bitfield1
= cpu_to_le16(bitfield1
);
1904 bd_data
|= pkt
->bd_flags
;
1905 SET_FIELD(bd_data
, CORE_TX_BD_DATA_START_BD
, 0x1);
1906 SET_FIELD(bd_data
, CORE_TX_BD_DATA_NBDS
, pkt
->num_of_bds
);
1907 SET_FIELD(bd_data
, CORE_TX_BD_DATA_ROCE_FLAV
, roce_flavor
);
1908 SET_FIELD(bd_data
, CORE_TX_BD_DATA_IP_CSUM
, !!(pkt
->enable_ip_cksum
));
1909 SET_FIELD(bd_data
, CORE_TX_BD_DATA_L4_CSUM
, !!(pkt
->enable_l4_cksum
));
1910 SET_FIELD(bd_data
, CORE_TX_BD_DATA_IP_LEN
, !!(pkt
->calc_ip_len
));
1911 SET_FIELD(bd_data
, CORE_TX_BD_DATA_DISABLE_STAG_INSERTION
,
1912 !!(pkt
->remove_stag
));
1914 start_bd
->bd_data
.as_bitfield
= cpu_to_le16(bd_data
);
1915 DMA_REGPAIR_LE(start_bd
->addr
, pkt
->first_frag
);
1916 start_bd
->nbytes
= cpu_to_le16(pkt
->first_frag_len
);
1919 (NETIF_MSG_TX_QUEUED
| QED_MSG_LL2
),
1920 "LL2 [q 0x%02x cid 0x%08x type 0x%08x] Tx Producer at [0x%04x] - set with a %04x bytes %02x BDs buffer at %08x:%08x\n",
1923 p_ll2
->input
.conn_type
,
1925 pkt
->first_frag_len
,
1927 le32_to_cpu(start_bd
->addr
.hi
),
1928 le32_to_cpu(start_bd
->addr
.lo
));
1930 if (p_ll2
->tx_queue
.cur_send_frag_num
== pkt
->num_of_bds
)
1933 /* Need to provide the packet with additional BDs for frags */
1934 for (frag_idx
= p_ll2
->tx_queue
.cur_send_frag_num
;
1935 frag_idx
< pkt
->num_of_bds
; frag_idx
++) {
1936 struct core_tx_bd
**p_bd
= &p_curp
->bds_set
[frag_idx
].txq_bd
;
1938 *p_bd
= (struct core_tx_bd
*)qed_chain_produce(p_tx_chain
);
1939 (*p_bd
)->bd_data
.as_bitfield
= 0;
1940 (*p_bd
)->bitfield1
= 0;
1941 p_curp
->bds_set
[frag_idx
].tx_frag
= 0;
1942 p_curp
->bds_set
[frag_idx
].frag_len
= 0;
1946 /* This should be called while the Txq spinlock is being held */
1947 static void qed_ll2_tx_packet_notify(struct qed_hwfn
*p_hwfn
,
1948 struct qed_ll2_info
*p_ll2_conn
)
1950 bool b_notify
= p_ll2_conn
->tx_queue
.cur_send_packet
->notify_fw
;
1951 struct qed_ll2_tx_queue
*p_tx
= &p_ll2_conn
->tx_queue
;
1952 struct qed_ll2_tx_packet
*p_pkt
= NULL
;
1955 /* If there are missing BDs, don't do anything now */
1956 if (p_ll2_conn
->tx_queue
.cur_send_frag_num
!=
1957 p_ll2_conn
->tx_queue
.cur_send_packet
->bd_used
)
1960 /* Push the current packet to the list and clean after it */
1961 list_add_tail(&p_ll2_conn
->tx_queue
.cur_send_packet
->list_entry
,
1962 &p_ll2_conn
->tx_queue
.sending_descq
);
1963 p_ll2_conn
->tx_queue
.cur_send_packet
= NULL
;
1964 p_ll2_conn
->tx_queue
.cur_send_frag_num
= 0;
1966 /* Notify FW of packet only if requested to */
1970 bd_prod
= qed_chain_get_prod_idx(&p_ll2_conn
->tx_queue
.txq_chain
);
1972 while (!list_empty(&p_tx
->sending_descq
)) {
1973 p_pkt
= list_first_entry(&p_tx
->sending_descq
,
1974 struct qed_ll2_tx_packet
, list_entry
);
1978 list_move_tail(&p_pkt
->list_entry
, &p_tx
->active_descq
);
1981 p_tx
->db_msg
.spq_prod
= cpu_to_le16(bd_prod
);
1983 /* Make sure the BDs data is updated before ringing the doorbell */
1986 DIRECT_REG_WR(p_tx
->doorbell_addr
, *((u32
*)&p_tx
->db_msg
));
1989 (NETIF_MSG_TX_QUEUED
| QED_MSG_LL2
),
1990 "LL2 [q 0x%02x cid 0x%08x type 0x%08x] Doorbelled [producer 0x%04x]\n",
1991 p_ll2_conn
->queue_id
,
1993 p_ll2_conn
->input
.conn_type
, p_tx
->db_msg
.spq_prod
);
1996 int qed_ll2_prepare_tx_packet(void *cxt
,
1997 u8 connection_handle
,
1998 struct qed_ll2_tx_pkt_info
*pkt
,
2001 struct qed_hwfn
*p_hwfn
= cxt
;
2002 struct qed_ll2_tx_packet
*p_curp
= NULL
;
2003 struct qed_ll2_info
*p_ll2_conn
= NULL
;
2004 struct qed_ll2_tx_queue
*p_tx
;
2005 struct qed_chain
*p_tx_chain
;
2006 unsigned long flags
;
2009 p_ll2_conn
= qed_ll2_handle_sanity(p_hwfn
, connection_handle
);
2010 if (unlikely(!p_ll2_conn
))
2012 p_tx
= &p_ll2_conn
->tx_queue
;
2013 p_tx_chain
= &p_tx
->txq_chain
;
2015 if (unlikely(pkt
->num_of_bds
> p_ll2_conn
->input
.tx_max_bds_per_packet
))
2018 spin_lock_irqsave(&p_tx
->lock
, flags
);
2019 if (unlikely(p_tx
->cur_send_packet
)) {
2024 /* Get entry, but only if we have tx elements for it */
2025 if (unlikely(!list_empty(&p_tx
->free_descq
)))
2026 p_curp
= list_first_entry(&p_tx
->free_descq
,
2027 struct qed_ll2_tx_packet
, list_entry
);
2028 if (unlikely(p_curp
&&
2029 qed_chain_get_elem_left(p_tx_chain
) < pkt
->num_of_bds
))
2032 if (unlikely(!p_curp
)) {
2037 /* Prepare packet and BD, and perhaps send a doorbell to FW */
2038 qed_ll2_prepare_tx_packet_set(p_hwfn
, p_tx
, p_curp
, pkt
, notify_fw
);
2040 qed_ll2_prepare_tx_packet_set_bd(p_hwfn
, p_ll2_conn
, p_curp
, pkt
);
2042 qed_ll2_tx_packet_notify(p_hwfn
, p_ll2_conn
);
2045 spin_unlock_irqrestore(&p_tx
->lock
, flags
);
2049 int qed_ll2_set_fragment_of_tx_packet(void *cxt
,
2050 u8 connection_handle
,
2051 dma_addr_t addr
, u16 nbytes
)
2053 struct qed_ll2_tx_packet
*p_cur_send_packet
= NULL
;
2054 struct qed_hwfn
*p_hwfn
= cxt
;
2055 struct qed_ll2_info
*p_ll2_conn
= NULL
;
2056 u16 cur_send_frag_num
= 0;
2057 struct core_tx_bd
*p_bd
;
2058 unsigned long flags
;
2060 p_ll2_conn
= qed_ll2_handle_sanity(p_hwfn
, connection_handle
);
2061 if (unlikely(!p_ll2_conn
))
2064 if (unlikely(!p_ll2_conn
->tx_queue
.cur_send_packet
))
2067 p_cur_send_packet
= p_ll2_conn
->tx_queue
.cur_send_packet
;
2068 cur_send_frag_num
= p_ll2_conn
->tx_queue
.cur_send_frag_num
;
2070 if (unlikely(cur_send_frag_num
>= p_cur_send_packet
->bd_used
))
2073 /* Fill the BD information, and possibly notify FW */
2074 p_bd
= p_cur_send_packet
->bds_set
[cur_send_frag_num
].txq_bd
;
2075 DMA_REGPAIR_LE(p_bd
->addr
, addr
);
2076 p_bd
->nbytes
= cpu_to_le16(nbytes
);
2077 p_cur_send_packet
->bds_set
[cur_send_frag_num
].tx_frag
= addr
;
2078 p_cur_send_packet
->bds_set
[cur_send_frag_num
].frag_len
= nbytes
;
2080 p_ll2_conn
->tx_queue
.cur_send_frag_num
++;
2082 spin_lock_irqsave(&p_ll2_conn
->tx_queue
.lock
, flags
);
2083 qed_ll2_tx_packet_notify(p_hwfn
, p_ll2_conn
);
2084 spin_unlock_irqrestore(&p_ll2_conn
->tx_queue
.lock
, flags
);
2089 int qed_ll2_terminate_connection(void *cxt
, u8 connection_handle
)
2091 struct qed_hwfn
*p_hwfn
= cxt
;
2092 struct qed_ll2_info
*p_ll2_conn
= NULL
;
2094 struct qed_ptt
*p_ptt
;
2096 p_ptt
= qed_ptt_acquire(p_hwfn
);
2100 p_ll2_conn
= qed_ll2_handle_sanity_lock(p_hwfn
, connection_handle
);
2106 /* Stop Tx & Rx of connection, if needed */
2107 if (QED_LL2_TX_REGISTERED(p_ll2_conn
)) {
2108 p_ll2_conn
->tx_queue
.b_cb_registered
= false;
2109 smp_wmb(); /* Make sure this is seen by ll2_lb_rxq_completion */
2110 rc
= qed_sp_ll2_tx_queue_stop(p_hwfn
, p_ll2_conn
);
2114 qed_ll2_txq_flush(p_hwfn
, connection_handle
);
2115 qed_int_unregister_cb(p_hwfn
, p_ll2_conn
->tx_queue
.tx_sb_index
);
2118 if (QED_LL2_RX_REGISTERED(p_ll2_conn
)) {
2119 p_ll2_conn
->rx_queue
.b_cb_registered
= false;
2120 smp_wmb(); /* Make sure this is seen by ll2_lb_rxq_completion */
2122 if (p_ll2_conn
->rx_queue
.ctx_based
)
2123 qed_db_recovery_del(p_hwfn
->cdev
,
2124 p_ll2_conn
->rx_queue
.set_prod_addr
,
2125 &p_ll2_conn
->rx_queue
.db_data
);
2127 rc
= qed_sp_ll2_rx_queue_stop(p_hwfn
, p_ll2_conn
);
2131 qed_ll2_rxq_flush(p_hwfn
, connection_handle
);
2132 qed_int_unregister_cb(p_hwfn
, p_ll2_conn
->rx_queue
.rx_sb_index
);
2135 if (p_ll2_conn
->input
.conn_type
== QED_LL2_TYPE_OOO
)
2136 qed_ooo_release_all_isles(p_hwfn
, p_hwfn
->p_ooo_info
);
2138 if (p_ll2_conn
->input
.conn_type
== QED_LL2_TYPE_FCOE
) {
2139 if (!test_bit(QED_MF_UFP_SPECIFIC
, &p_hwfn
->cdev
->mf_bits
))
2140 qed_llh_remove_protocol_filter(p_hwfn
->cdev
, 0,
2141 QED_LLH_FILTER_ETHERTYPE
,
2143 qed_llh_remove_protocol_filter(p_hwfn
->cdev
, 0,
2144 QED_LLH_FILTER_ETHERTYPE
,
2149 qed_ptt_release(p_hwfn
, p_ptt
);
2153 static void qed_ll2_release_connection_ooo(struct qed_hwfn
*p_hwfn
,
2154 struct qed_ll2_info
*p_ll2_conn
)
2156 struct qed_ooo_buffer
*p_buffer
;
2158 if (p_ll2_conn
->input
.conn_type
!= QED_LL2_TYPE_OOO
)
2161 qed_ooo_release_all_isles(p_hwfn
, p_hwfn
->p_ooo_info
);
2162 while ((p_buffer
= qed_ooo_get_free_buffer(p_hwfn
,
2163 p_hwfn
->p_ooo_info
))) {
2164 dma_free_coherent(&p_hwfn
->cdev
->pdev
->dev
,
2165 p_buffer
->rx_buffer_size
,
2166 p_buffer
->rx_buffer_virt_addr
,
2167 p_buffer
->rx_buffer_phys_addr
);
2172 void qed_ll2_release_connection(void *cxt
, u8 connection_handle
)
2174 struct qed_hwfn
*p_hwfn
= cxt
;
2175 struct qed_ll2_info
*p_ll2_conn
= NULL
;
2177 p_ll2_conn
= qed_ll2_handle_sanity(p_hwfn
, connection_handle
);
2181 kfree(p_ll2_conn
->tx_queue
.descq_mem
);
2182 qed_chain_free(p_hwfn
->cdev
, &p_ll2_conn
->tx_queue
.txq_chain
);
2184 kfree(p_ll2_conn
->rx_queue
.descq_array
);
2185 qed_chain_free(p_hwfn
->cdev
, &p_ll2_conn
->rx_queue
.rxq_chain
);
2186 qed_chain_free(p_hwfn
->cdev
, &p_ll2_conn
->rx_queue
.rcq_chain
);
2188 qed_cxt_release_cid(p_hwfn
, p_ll2_conn
->cid
);
2190 qed_ll2_release_connection_ooo(p_hwfn
, p_ll2_conn
);
2192 mutex_lock(&p_ll2_conn
->mutex
);
2193 p_ll2_conn
->b_active
= false;
2194 mutex_unlock(&p_ll2_conn
->mutex
);
2197 int qed_ll2_alloc(struct qed_hwfn
*p_hwfn
)
2199 struct qed_ll2_info
*p_ll2_connections
;
2202 /* Allocate LL2's set struct */
2203 p_ll2_connections
= kcalloc(QED_MAX_NUM_OF_LL2_CONNECTIONS
,
2204 sizeof(struct qed_ll2_info
), GFP_KERNEL
);
2205 if (!p_ll2_connections
) {
2206 DP_NOTICE(p_hwfn
, "Failed to allocate `struct qed_ll2'\n");
2210 for (i
= 0; i
< QED_MAX_NUM_OF_LL2_CONNECTIONS
; i
++)
2211 p_ll2_connections
[i
].my_id
= i
;
2213 p_hwfn
->p_ll2_info
= p_ll2_connections
;
2217 void qed_ll2_setup(struct qed_hwfn
*p_hwfn
)
2221 for (i
= 0; i
< QED_MAX_NUM_OF_LL2_CONNECTIONS
; i
++)
2222 mutex_init(&p_hwfn
->p_ll2_info
[i
].mutex
);
2225 void qed_ll2_free(struct qed_hwfn
*p_hwfn
)
2227 if (!p_hwfn
->p_ll2_info
)
2230 kfree(p_hwfn
->p_ll2_info
);
2231 p_hwfn
->p_ll2_info
= NULL
;
2234 static void _qed_ll2_get_port_stats(struct qed_hwfn
*p_hwfn
,
2235 struct qed_ptt
*p_ptt
,
2236 struct qed_ll2_stats
*p_stats
)
2238 struct core_ll2_port_stats port_stats
;
2240 memset(&port_stats
, 0, sizeof(port_stats
));
2241 qed_memcpy_from(p_hwfn
, p_ptt
, &port_stats
,
2242 BAR0_MAP_REG_TSDM_RAM
+
2243 TSTORM_LL2_PORT_STAT_OFFSET(MFW_PORT(p_hwfn
)),
2244 sizeof(port_stats
));
2246 p_stats
->gsi_invalid_hdr
+= HILO_64_REGPAIR(port_stats
.gsi_invalid_hdr
);
2247 p_stats
->gsi_invalid_pkt_length
+=
2248 HILO_64_REGPAIR(port_stats
.gsi_invalid_pkt_length
);
2249 p_stats
->gsi_unsupported_pkt_typ
+=
2250 HILO_64_REGPAIR(port_stats
.gsi_unsupported_pkt_typ
);
2251 p_stats
->gsi_crcchksm_error
+=
2252 HILO_64_REGPAIR(port_stats
.gsi_crcchksm_error
);
2255 static void _qed_ll2_get_tstats(struct qed_hwfn
*p_hwfn
,
2256 struct qed_ptt
*p_ptt
,
2257 struct qed_ll2_info
*p_ll2_conn
,
2258 struct qed_ll2_stats
*p_stats
)
2260 struct core_ll2_tstorm_per_queue_stat tstats
;
2261 u8 qid
= p_ll2_conn
->queue_id
;
2264 memset(&tstats
, 0, sizeof(tstats
));
2265 tstats_addr
= BAR0_MAP_REG_TSDM_RAM
+
2266 CORE_LL2_TSTORM_PER_QUEUE_STAT_OFFSET(qid
);
2267 qed_memcpy_from(p_hwfn
, p_ptt
, &tstats
, tstats_addr
, sizeof(tstats
));
2269 p_stats
->packet_too_big_discard
+=
2270 HILO_64_REGPAIR(tstats
.packet_too_big_discard
);
2271 p_stats
->no_buff_discard
+= HILO_64_REGPAIR(tstats
.no_buff_discard
);
2274 static void _qed_ll2_get_ustats(struct qed_hwfn
*p_hwfn
,
2275 struct qed_ptt
*p_ptt
,
2276 struct qed_ll2_info
*p_ll2_conn
,
2277 struct qed_ll2_stats
*p_stats
)
2279 struct core_ll2_ustorm_per_queue_stat ustats
;
2280 u8 qid
= p_ll2_conn
->queue_id
;
2283 memset(&ustats
, 0, sizeof(ustats
));
2284 ustats_addr
= BAR0_MAP_REG_USDM_RAM
+
2285 CORE_LL2_USTORM_PER_QUEUE_STAT_OFFSET(qid
);
2286 qed_memcpy_from(p_hwfn
, p_ptt
, &ustats
, ustats_addr
, sizeof(ustats
));
2288 p_stats
->rcv_ucast_bytes
+= HILO_64_REGPAIR(ustats
.rcv_ucast_bytes
);
2289 p_stats
->rcv_mcast_bytes
+= HILO_64_REGPAIR(ustats
.rcv_mcast_bytes
);
2290 p_stats
->rcv_bcast_bytes
+= HILO_64_REGPAIR(ustats
.rcv_bcast_bytes
);
2291 p_stats
->rcv_ucast_pkts
+= HILO_64_REGPAIR(ustats
.rcv_ucast_pkts
);
2292 p_stats
->rcv_mcast_pkts
+= HILO_64_REGPAIR(ustats
.rcv_mcast_pkts
);
2293 p_stats
->rcv_bcast_pkts
+= HILO_64_REGPAIR(ustats
.rcv_bcast_pkts
);
2296 static void _qed_ll2_get_pstats(struct qed_hwfn
*p_hwfn
,
2297 struct qed_ptt
*p_ptt
,
2298 struct qed_ll2_info
*p_ll2_conn
,
2299 struct qed_ll2_stats
*p_stats
)
2301 struct core_ll2_pstorm_per_queue_stat pstats
;
2302 u8 stats_id
= p_ll2_conn
->tx_stats_id
;
2305 memset(&pstats
, 0, sizeof(pstats
));
2306 pstats_addr
= BAR0_MAP_REG_PSDM_RAM
+
2307 CORE_LL2_PSTORM_PER_QUEUE_STAT_OFFSET(stats_id
);
2308 qed_memcpy_from(p_hwfn
, p_ptt
, &pstats
, pstats_addr
, sizeof(pstats
));
2310 p_stats
->sent_ucast_bytes
+= HILO_64_REGPAIR(pstats
.sent_ucast_bytes
);
2311 p_stats
->sent_mcast_bytes
+= HILO_64_REGPAIR(pstats
.sent_mcast_bytes
);
2312 p_stats
->sent_bcast_bytes
+= HILO_64_REGPAIR(pstats
.sent_bcast_bytes
);
2313 p_stats
->sent_ucast_pkts
+= HILO_64_REGPAIR(pstats
.sent_ucast_pkts
);
2314 p_stats
->sent_mcast_pkts
+= HILO_64_REGPAIR(pstats
.sent_mcast_pkts
);
2315 p_stats
->sent_bcast_pkts
+= HILO_64_REGPAIR(pstats
.sent_bcast_pkts
);
2318 static int __qed_ll2_get_stats(void *cxt
, u8 connection_handle
,
2319 struct qed_ll2_stats
*p_stats
)
2321 struct qed_hwfn
*p_hwfn
= cxt
;
2322 struct qed_ll2_info
*p_ll2_conn
= NULL
;
2323 struct qed_ptt
*p_ptt
;
2325 if ((connection_handle
>= QED_MAX_NUM_OF_LL2_CONNECTIONS
) ||
2326 !p_hwfn
->p_ll2_info
)
2329 p_ll2_conn
= &p_hwfn
->p_ll2_info
[connection_handle
];
2331 p_ptt
= qed_ptt_acquire(p_hwfn
);
2333 DP_ERR(p_hwfn
, "Failed to acquire ptt\n");
2337 if (p_ll2_conn
->input
.gsi_enable
)
2338 _qed_ll2_get_port_stats(p_hwfn
, p_ptt
, p_stats
);
2340 _qed_ll2_get_tstats(p_hwfn
, p_ptt
, p_ll2_conn
, p_stats
);
2342 _qed_ll2_get_ustats(p_hwfn
, p_ptt
, p_ll2_conn
, p_stats
);
2344 if (p_ll2_conn
->tx_stats_en
)
2345 _qed_ll2_get_pstats(p_hwfn
, p_ptt
, p_ll2_conn
, p_stats
);
2347 qed_ptt_release(p_hwfn
, p_ptt
);
2352 int qed_ll2_get_stats(void *cxt
,
2353 u8 connection_handle
, struct qed_ll2_stats
*p_stats
)
2355 memset(p_stats
, 0, sizeof(*p_stats
));
2356 return __qed_ll2_get_stats(cxt
, connection_handle
, p_stats
);
2359 static void qed_ll2b_release_rx_packet(void *cxt
,
2360 u8 connection_handle
,
2362 dma_addr_t rx_buf_addr
,
2365 struct qed_hwfn
*p_hwfn
= cxt
;
2367 qed_ll2_dealloc_buffer(p_hwfn
->cdev
, cookie
);
2370 static void qed_ll2_register_cb_ops(struct qed_dev
*cdev
,
2371 const struct qed_ll2_cb_ops
*ops
,
2374 cdev
->ll2
->cbs
= ops
;
2375 cdev
->ll2
->cb_cookie
= cookie
;
2378 static struct qed_ll2_cbs ll2_cbs
= {
2379 .rx_comp_cb
= &qed_ll2b_complete_rx_packet
,
2380 .rx_release_cb
= &qed_ll2b_release_rx_packet
,
2381 .tx_comp_cb
= &qed_ll2b_complete_tx_packet
,
2382 .tx_release_cb
= &qed_ll2b_complete_tx_packet
,
2385 static void qed_ll2_set_conn_data(struct qed_hwfn
*p_hwfn
,
2386 struct qed_ll2_acquire_data
*data
,
2387 struct qed_ll2_params
*params
,
2388 enum qed_ll2_conn_type conn_type
,
2389 u8
*handle
, bool lb
)
2391 memset(data
, 0, sizeof(*data
));
2393 data
->input
.conn_type
= conn_type
;
2394 data
->input
.mtu
= params
->mtu
;
2395 data
->input
.rx_num_desc
= QED_LL2_RX_SIZE
;
2396 data
->input
.rx_drop_ttl0_flg
= params
->drop_ttl0_packets
;
2397 data
->input
.rx_vlan_removal_en
= params
->rx_vlan_stripping
;
2398 data
->input
.tx_num_desc
= QED_LL2_TX_SIZE
;
2399 data
->p_connection_handle
= handle
;
2400 data
->cbs
= &ll2_cbs
;
2401 ll2_cbs
.cookie
= p_hwfn
;
2404 data
->input
.tx_tc
= PKT_LB_TC
;
2405 data
->input
.tx_dest
= QED_LL2_TX_DEST_LB
;
2407 data
->input
.tx_tc
= 0;
2408 data
->input
.tx_dest
= QED_LL2_TX_DEST_NW
;
2412 static int qed_ll2_start_ooo(struct qed_hwfn
*p_hwfn
,
2413 struct qed_ll2_params
*params
)
2415 u8
*handle
= &p_hwfn
->pf_params
.iscsi_pf_params
.ll2_ooo_queue_id
;
2416 struct qed_ll2_acquire_data data
;
2419 qed_ll2_set_conn_data(p_hwfn
, &data
, params
,
2420 QED_LL2_TYPE_OOO
, handle
, true);
2422 rc
= qed_ll2_acquire_connection(p_hwfn
, &data
);
2424 DP_INFO(p_hwfn
, "Failed to acquire LL2 OOO connection\n");
2428 rc
= qed_ll2_establish_connection(p_hwfn
, *handle
);
2430 DP_INFO(p_hwfn
, "Failed to establish LL2 OOO connection\n");
2437 qed_ll2_release_connection(p_hwfn
, *handle
);
2439 *handle
= QED_LL2_UNUSED_HANDLE
;
2443 static bool qed_ll2_is_storage_eng1(struct qed_dev
*cdev
)
2445 return (QED_IS_FCOE_PERSONALITY(QED_LEADING_HWFN(cdev
)) ||
2446 QED_IS_ISCSI_PERSONALITY(QED_LEADING_HWFN(cdev
)) ||
2447 QED_IS_NVMETCP_PERSONALITY(QED_LEADING_HWFN(cdev
))) &&
2448 (QED_AFFIN_HWFN(cdev
) != QED_LEADING_HWFN(cdev
));
2451 static int __qed_ll2_stop(struct qed_hwfn
*p_hwfn
)
2453 struct qed_dev
*cdev
= p_hwfn
->cdev
;
2456 rc
= qed_ll2_terminate_connection(p_hwfn
, cdev
->ll2
->handle
);
2458 DP_INFO(cdev
, "Failed to terminate LL2 connection\n");
2460 qed_ll2_release_connection(p_hwfn
, cdev
->ll2
->handle
);
2465 static int qed_ll2_stop(struct qed_dev
*cdev
)
2467 bool b_is_storage_eng1
= qed_ll2_is_storage_eng1(cdev
);
2468 struct qed_hwfn
*p_hwfn
= QED_AFFIN_HWFN(cdev
);
2469 int rc
= 0, rc2
= 0;
2471 if (cdev
->ll2
->handle
== QED_LL2_UNUSED_HANDLE
)
2473 if (!QED_IS_NVMETCP_PERSONALITY(p_hwfn
))
2474 qed_llh_remove_mac_filter(cdev
, 0, cdev
->ll2_mac_address
);
2476 qed_llh_remove_mac_filter(cdev
, 0, cdev
->ll2_mac_address
);
2477 eth_zero_addr(cdev
->ll2_mac_address
);
2479 if (QED_IS_ISCSI_PERSONALITY(p_hwfn
) || QED_IS_NVMETCP_PERSONALITY(p_hwfn
))
2480 qed_ll2_stop_ooo(p_hwfn
);
2482 /* In CMT mode, LL2 is always started on engine 0 for a storage PF */
2483 if (b_is_storage_eng1
) {
2484 rc2
= __qed_ll2_stop(QED_LEADING_HWFN(cdev
));
2486 DP_NOTICE(QED_LEADING_HWFN(cdev
),
2487 "Failed to stop LL2 on engine 0\n");
2490 rc
= __qed_ll2_stop(p_hwfn
);
2492 DP_NOTICE(p_hwfn
, "Failed to stop LL2\n");
2494 qed_ll2_kill_buffers(cdev
);
2496 cdev
->ll2
->handle
= QED_LL2_UNUSED_HANDLE
;
2501 static int __qed_ll2_start(struct qed_hwfn
*p_hwfn
,
2502 struct qed_ll2_params
*params
)
2504 struct qed_ll2_buffer
*buffer
, *tmp_buffer
;
2505 struct qed_dev
*cdev
= p_hwfn
->cdev
;
2506 enum qed_ll2_conn_type conn_type
;
2507 struct qed_ll2_acquire_data data
;
2510 switch (p_hwfn
->hw_info
.personality
) {
2512 conn_type
= QED_LL2_TYPE_FCOE
;
2515 case QED_PCI_NVMETCP
:
2516 conn_type
= QED_LL2_TYPE_TCP_ULP
;
2518 case QED_PCI_ETH_ROCE
:
2519 conn_type
= QED_LL2_TYPE_ROCE
;
2523 conn_type
= QED_LL2_TYPE_TEST
;
2526 qed_ll2_set_conn_data(p_hwfn
, &data
, params
, conn_type
,
2527 &cdev
->ll2
->handle
, false);
2529 rc
= qed_ll2_acquire_connection(p_hwfn
, &data
);
2531 DP_INFO(p_hwfn
, "Failed to acquire LL2 connection\n");
2535 rc
= qed_ll2_establish_connection(p_hwfn
, cdev
->ll2
->handle
);
2537 DP_INFO(p_hwfn
, "Failed to establish LL2 connection\n");
2541 /* Post all Rx buffers to FW */
2542 spin_lock_bh(&cdev
->ll2
->lock
);
2543 rx_cnt
= cdev
->ll2
->rx_cnt
;
2544 list_for_each_entry_safe(buffer
, tmp_buffer
, &cdev
->ll2
->list
, list
) {
2545 rc
= qed_ll2_post_rx_buffer(p_hwfn
,
2547 buffer
->phys_addr
, 0, buffer
, 1);
2550 "Failed to post an Rx buffer; Deleting it\n");
2551 dma_unmap_single(&cdev
->pdev
->dev
, buffer
->phys_addr
,
2552 cdev
->ll2
->rx_size
, DMA_FROM_DEVICE
);
2553 kfree(buffer
->data
);
2554 list_del(&buffer
->list
);
2560 spin_unlock_bh(&cdev
->ll2
->lock
);
2562 if (rx_cnt
== cdev
->ll2
->rx_cnt
) {
2563 DP_NOTICE(p_hwfn
, "Failed passing even a single Rx buffer\n");
2564 goto terminate_conn
;
2566 cdev
->ll2
->rx_cnt
= rx_cnt
;
2571 qed_ll2_terminate_connection(p_hwfn
, cdev
->ll2
->handle
);
2573 qed_ll2_release_connection(p_hwfn
, cdev
->ll2
->handle
);
2577 static int qed_ll2_start(struct qed_dev
*cdev
, struct qed_ll2_params
*params
)
2579 bool b_is_storage_eng1
= qed_ll2_is_storage_eng1(cdev
);
2580 struct qed_hwfn
*p_hwfn
= QED_AFFIN_HWFN(cdev
);
2581 struct qed_ll2_buffer
*buffer
;
2582 int rx_num_desc
, i
, rc
;
2584 if (!is_valid_ether_addr(params
->ll2_mac_address
)) {
2585 DP_NOTICE(cdev
, "Invalid Ethernet address\n");
2589 WARN_ON(!cdev
->ll2
->cbs
);
2591 /* Initialize LL2 locks & lists */
2592 INIT_LIST_HEAD(&cdev
->ll2
->list
);
2593 spin_lock_init(&cdev
->ll2
->lock
);
2595 cdev
->ll2
->rx_size
= PRM_DMA_PAD_BYTES_NUM
+ ETH_HLEN
+
2596 L1_CACHE_BYTES
+ params
->mtu
;
2598 /* Allocate memory for LL2.
2599 * In CMT mode, in case of a storage PF which is affintized to engine 1,
2600 * LL2 is started also on engine 0 and thus we need twofold buffers.
2602 rx_num_desc
= QED_LL2_RX_SIZE
* (b_is_storage_eng1
? 2 : 1);
2603 DP_INFO(cdev
, "Allocating %d LL2 buffers of size %08x bytes\n",
2604 rx_num_desc
, cdev
->ll2
->rx_size
);
2605 for (i
= 0; i
< rx_num_desc
; i
++) {
2606 buffer
= kzalloc(sizeof(*buffer
), GFP_KERNEL
);
2608 DP_INFO(cdev
, "Failed to allocate LL2 buffers\n");
2613 rc
= qed_ll2_alloc_buffer(cdev
, (u8
**)&buffer
->data
,
2614 &buffer
->phys_addr
);
2620 list_add_tail(&buffer
->list
, &cdev
->ll2
->list
);
2623 rc
= __qed_ll2_start(p_hwfn
, params
);
2625 DP_NOTICE(cdev
, "Failed to start LL2\n");
2629 /* In CMT mode, always need to start LL2 on engine 0 for a storage PF,
2630 * since broadcast/mutlicast packets are routed to engine 0.
2632 if (b_is_storage_eng1
) {
2633 rc
= __qed_ll2_start(QED_LEADING_HWFN(cdev
), params
);
2635 DP_NOTICE(QED_LEADING_HWFN(cdev
),
2636 "Failed to start LL2 on engine 0\n");
2641 if (QED_IS_ISCSI_PERSONALITY(p_hwfn
) || QED_IS_NVMETCP_PERSONALITY(p_hwfn
)) {
2642 DP_VERBOSE(cdev
, QED_MSG_STORAGE
, "Starting OOO LL2 queue\n");
2643 rc
= qed_ll2_start_ooo(p_hwfn
, params
);
2645 DP_NOTICE(cdev
, "Failed to start OOO LL2\n");
2650 if (!QED_IS_NVMETCP_PERSONALITY(p_hwfn
)) {
2651 rc
= qed_llh_add_mac_filter(cdev
, 0, params
->ll2_mac_address
);
2653 DP_NOTICE(cdev
, "Failed to add an LLH filter\n");
2658 ether_addr_copy(cdev
->ll2_mac_address
, params
->ll2_mac_address
);
2663 if (QED_IS_ISCSI_PERSONALITY(p_hwfn
) || QED_IS_NVMETCP_PERSONALITY(p_hwfn
))
2664 qed_ll2_stop_ooo(p_hwfn
);
2666 if (b_is_storage_eng1
)
2667 __qed_ll2_stop(QED_LEADING_HWFN(cdev
));
2669 __qed_ll2_stop(p_hwfn
);
2671 qed_ll2_kill_buffers(cdev
);
2672 cdev
->ll2
->handle
= QED_LL2_UNUSED_HANDLE
;
2676 static int qed_ll2_start_xmit(struct qed_dev
*cdev
, struct sk_buff
*skb
,
2677 unsigned long xmit_flags
)
2679 struct qed_hwfn
*p_hwfn
= QED_AFFIN_HWFN(cdev
);
2680 struct qed_ll2_tx_pkt_info pkt
;
2681 const skb_frag_t
*frag
;
2682 u8 flags
= 0, nr_frags
;
2683 int rc
= -EINVAL
, i
;
2687 if (unlikely(skb
->ip_summed
!= CHECKSUM_NONE
)) {
2688 DP_INFO(cdev
, "Cannot transmit a checksummed packet\n");
2692 /* Cache number of fragments from SKB since SKB may be freed by
2693 * the completion routine after calling qed_ll2_prepare_tx_packet()
2695 nr_frags
= skb_shinfo(skb
)->nr_frags
;
2697 if (unlikely(1 + nr_frags
> CORE_LL2_TX_MAX_BDS_PER_PACKET
)) {
2698 DP_ERR(cdev
, "Cannot transmit a packet with %d fragments\n",
2703 mapping
= dma_map_single(&cdev
->pdev
->dev
, skb
->data
,
2704 skb
->len
, DMA_TO_DEVICE
);
2705 if (unlikely(dma_mapping_error(&cdev
->pdev
->dev
, mapping
))) {
2706 DP_NOTICE(cdev
, "SKB mapping failed\n");
2710 /* Request HW to calculate IP csum */
2711 if (!((vlan_get_protocol(skb
) == htons(ETH_P_IPV6
)) &&
2712 ipv6_hdr(skb
)->nexthdr
== NEXTHDR_IPV6
))
2713 flags
|= BIT(CORE_TX_BD_DATA_IP_CSUM_SHIFT
);
2715 if (skb_vlan_tag_present(skb
)) {
2716 vlan
= skb_vlan_tag_get(skb
);
2717 flags
|= BIT(CORE_TX_BD_DATA_VLAN_INSERTION_SHIFT
);
2720 memset(&pkt
, 0, sizeof(pkt
));
2721 pkt
.num_of_bds
= 1 + nr_frags
;
2723 pkt
.bd_flags
= flags
;
2724 pkt
.tx_dest
= QED_LL2_TX_DEST_NW
;
2725 pkt
.first_frag
= mapping
;
2726 pkt
.first_frag_len
= skb
->len
;
2728 if (test_bit(QED_MF_UFP_SPECIFIC
, &cdev
->mf_bits
) &&
2729 test_bit(QED_LL2_XMIT_FLAGS_FIP_DISCOVERY
, &xmit_flags
))
2730 pkt
.remove_stag
= true;
2732 /* qed_ll2_prepare_tx_packet() may actually send the packet if
2733 * there are no fragments in the skb and subsequently the completion
2734 * routine may run and free the SKB, so no dereferencing the SKB
2735 * beyond this point unless skb has any fragments.
2737 rc
= qed_ll2_prepare_tx_packet(p_hwfn
, cdev
->ll2
->handle
,
2742 for (i
= 0; i
< nr_frags
; i
++) {
2743 frag
= &skb_shinfo(skb
)->frags
[i
];
2745 mapping
= skb_frag_dma_map(&cdev
->pdev
->dev
, frag
, 0,
2746 skb_frag_size(frag
), DMA_TO_DEVICE
);
2748 if (unlikely(dma_mapping_error(&cdev
->pdev
->dev
, mapping
))) {
2750 "Unable to map frag - dropping packet\n");
2755 rc
= qed_ll2_set_fragment_of_tx_packet(p_hwfn
,
2758 skb_frag_size(frag
));
2760 /* if failed not much to do here, partial packet has been posted
2761 * we can't free memory, will need to wait for completion
2770 dma_unmap_single(&cdev
->pdev
->dev
, mapping
, skb
->len
, DMA_TO_DEVICE
);
2775 static int qed_ll2_stats(struct qed_dev
*cdev
, struct qed_ll2_stats
*stats
)
2777 bool b_is_storage_eng1
= qed_ll2_is_storage_eng1(cdev
);
2778 struct qed_hwfn
*p_hwfn
= QED_AFFIN_HWFN(cdev
);
2784 rc
= qed_ll2_get_stats(p_hwfn
, cdev
->ll2
->handle
, stats
);
2786 DP_NOTICE(p_hwfn
, "Failed to get LL2 stats\n");
2790 /* In CMT mode, LL2 is always started on engine 0 for a storage PF */
2791 if (b_is_storage_eng1
) {
2792 rc
= __qed_ll2_get_stats(QED_LEADING_HWFN(cdev
),
2793 cdev
->ll2
->handle
, stats
);
2795 DP_NOTICE(QED_LEADING_HWFN(cdev
),
2796 "Failed to get LL2 stats on engine 0\n");
2804 const struct qed_ll2_ops qed_ll2_ops_pass
= {
2805 .start
= &qed_ll2_start
,
2806 .stop
= &qed_ll2_stop
,
2807 .start_xmit
= &qed_ll2_start_xmit
,
2808 .register_cb_ops
= &qed_ll2_register_cb_ops
,
2809 .get_stats
= &qed_ll2_stats
,
2812 int qed_ll2_alloc_if(struct qed_dev
*cdev
)
2814 cdev
->ll2
= kzalloc(sizeof(*cdev
->ll2
), GFP_KERNEL
);
2815 return cdev
->ll2
? 0 : -ENOMEM
;
2818 void qed_ll2_dealloc_if(struct qed_dev
*cdev
)