1 /* QLogic qed NIC Driver
2 * Copyright (c) 2015-2017 QLogic Corporation
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and /or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/types.h>
34 #include <asm/byteorder.h>
35 #include <linux/dma-mapping.h>
36 #include <linux/if_vlan.h>
37 #include <linux/kernel.h>
38 #include <linux/pci.h>
39 #include <linux/slab.h>
40 #include <linux/stddef.h>
41 #include <linux/workqueue.h>
43 #include <linux/bitops.h>
44 #include <linux/delay.h>
45 #include <linux/errno.h>
46 #include <linux/etherdevice.h>
48 #include <linux/list.h>
49 #include <linux/mutex.h>
50 #include <linux/spinlock.h>
51 #include <linux/string.h>
52 #include <linux/qed/qed_ll2_if.h>
55 #include "qed_dev_api.h"
62 #include "qed_reg_addr.h"
66 #define QED_LL2_RX_REGISTERED(ll2) ((ll2)->rx_queue.b_cb_registered)
67 #define QED_LL2_TX_REGISTERED(ll2) ((ll2)->tx_queue.b_cb_registered)
69 #define QED_LL2_TX_SIZE (256)
70 #define QED_LL2_RX_SIZE (4096)
72 struct qed_cb_ll2_info
{
77 /* Lock protecting LL2 buffer lists in sleepless context */
79 struct list_head list
;
81 const struct qed_ll2_cb_ops
*cbs
;
85 struct qed_ll2_buffer
{
86 struct list_head list
;
91 static void qed_ll2b_complete_tx_packet(void *cxt
,
94 dma_addr_t first_frag_addr
,
98 struct qed_hwfn
*p_hwfn
= cxt
;
99 struct qed_dev
*cdev
= p_hwfn
->cdev
;
100 struct sk_buff
*skb
= cookie
;
102 /* All we need to do is release the mapping */
103 dma_unmap_single(&p_hwfn
->cdev
->pdev
->dev
, first_frag_addr
,
104 skb_headlen(skb
), DMA_TO_DEVICE
);
106 if (cdev
->ll2
->cbs
&& cdev
->ll2
->cbs
->tx_cb
)
107 cdev
->ll2
->cbs
->tx_cb(cdev
->ll2
->cb_cookie
, skb
,
110 dev_kfree_skb_any(skb
);
113 static int qed_ll2_alloc_buffer(struct qed_dev
*cdev
,
114 u8
**data
, dma_addr_t
*phys_addr
)
116 *data
= kmalloc(cdev
->ll2
->rx_size
, GFP_ATOMIC
);
118 DP_INFO(cdev
, "Failed to allocate LL2 buffer data\n");
122 *phys_addr
= dma_map_single(&cdev
->pdev
->dev
,
123 ((*data
) + NET_SKB_PAD
),
124 cdev
->ll2
->rx_size
, DMA_FROM_DEVICE
);
125 if (dma_mapping_error(&cdev
->pdev
->dev
, *phys_addr
)) {
126 DP_INFO(cdev
, "Failed to map LL2 buffer data\n");
134 static int qed_ll2_dealloc_buffer(struct qed_dev
*cdev
,
135 struct qed_ll2_buffer
*buffer
)
137 spin_lock_bh(&cdev
->ll2
->lock
);
139 dma_unmap_single(&cdev
->pdev
->dev
, buffer
->phys_addr
,
140 cdev
->ll2
->rx_size
, DMA_FROM_DEVICE
);
142 list_del(&buffer
->list
);
145 if (!cdev
->ll2
->rx_cnt
)
146 DP_INFO(cdev
, "All LL2 entries were removed\n");
148 spin_unlock_bh(&cdev
->ll2
->lock
);
153 static void qed_ll2_kill_buffers(struct qed_dev
*cdev
)
155 struct qed_ll2_buffer
*buffer
, *tmp_buffer
;
157 list_for_each_entry_safe(buffer
, tmp_buffer
, &cdev
->ll2
->list
, list
)
158 qed_ll2_dealloc_buffer(cdev
, buffer
);
161 static void qed_ll2b_complete_rx_packet(void *cxt
,
162 struct qed_ll2_comp_rx_data
*data
)
164 struct qed_hwfn
*p_hwfn
= cxt
;
165 struct qed_ll2_buffer
*buffer
= data
->cookie
;
166 struct qed_dev
*cdev
= p_hwfn
->cdev
;
167 dma_addr_t new_phys_addr
;
174 (NETIF_MSG_RX_STATUS
| QED_MSG_STORAGE
| NETIF_MSG_PKTDATA
),
175 "Got an LL2 Rx completion: [Buffer at phys 0x%llx, offset 0x%02x] Length 0x%04x Parse_flags 0x%04x vlan 0x%04x Opaque data [0x%08x:0x%08x]\n",
176 (u64
)data
->rx_buf_addr
,
177 data
->u
.placement_offset
,
178 data
->length
.packet_length
,
180 data
->vlan
, data
->opaque_data_0
, data
->opaque_data_1
);
182 if ((cdev
->dp_module
& NETIF_MSG_PKTDATA
) && buffer
->data
) {
183 print_hex_dump(KERN_INFO
, "",
184 DUMP_PREFIX_OFFSET
, 16, 1,
185 buffer
->data
, data
->length
.packet_length
, false);
188 /* Determine if data is valid */
189 if (data
->length
.packet_length
< ETH_HLEN
)
192 /* Allocate a replacement for buffer; Reuse upon failure */
194 rc
= qed_ll2_alloc_buffer(p_hwfn
->cdev
, &new_data
,
197 /* If need to reuse or there's no replacement buffer, repost this */
200 dma_unmap_single(&cdev
->pdev
->dev
, buffer
->phys_addr
,
201 cdev
->ll2
->rx_size
, DMA_FROM_DEVICE
);
203 skb
= build_skb(buffer
->data
, 0);
205 DP_INFO(cdev
, "Failed to build SKB\n");
210 data
->u
.placement_offset
+= NET_SKB_PAD
;
211 skb_reserve(skb
, data
->u
.placement_offset
);
212 skb_put(skb
, data
->length
.packet_length
);
213 skb_checksum_none_assert(skb
);
215 /* Get parital ethernet information instead of eth_type_trans(),
216 * Since we don't have an associated net_device.
218 skb_reset_mac_header(skb
);
219 skb
->protocol
= eth_hdr(skb
)->h_proto
;
221 /* Pass SKB onward */
222 if (cdev
->ll2
->cbs
&& cdev
->ll2
->cbs
->rx_cb
) {
224 __vlan_hwaccel_put_tag(skb
, htons(ETH_P_8021Q
),
226 cdev
->ll2
->cbs
->rx_cb(cdev
->ll2
->cb_cookie
, skb
,
228 data
->opaque_data_1
);
230 DP_VERBOSE(p_hwfn
, (NETIF_MSG_RX_STATUS
| NETIF_MSG_PKTDATA
|
231 QED_MSG_LL2
| QED_MSG_STORAGE
),
232 "Dropping the packet\n");
237 /* Update Buffer information and update FW producer */
238 buffer
->data
= new_data
;
239 buffer
->phys_addr
= new_phys_addr
;
242 rc
= qed_ll2_post_rx_buffer(p_hwfn
, cdev
->ll2
->handle
,
243 buffer
->phys_addr
, 0, buffer
, 1);
245 qed_ll2_dealloc_buffer(cdev
, buffer
);
248 static struct qed_ll2_info
*__qed_ll2_handle_sanity(struct qed_hwfn
*p_hwfn
,
249 u8 connection_handle
,
253 struct qed_ll2_info
*p_ll2_conn
, *p_ret
= NULL
;
255 if (connection_handle
>= QED_MAX_NUM_OF_LL2_CONNECTIONS
)
258 if (!p_hwfn
->p_ll2_info
)
261 p_ll2_conn
= &p_hwfn
->p_ll2_info
[connection_handle
];
265 mutex_lock(&p_ll2_conn
->mutex
);
266 if (p_ll2_conn
->b_active
)
269 mutex_unlock(&p_ll2_conn
->mutex
);
277 static struct qed_ll2_info
*qed_ll2_handle_sanity(struct qed_hwfn
*p_hwfn
,
278 u8 connection_handle
)
280 return __qed_ll2_handle_sanity(p_hwfn
, connection_handle
, false, true);
283 static struct qed_ll2_info
*qed_ll2_handle_sanity_lock(struct qed_hwfn
*p_hwfn
,
284 u8 connection_handle
)
286 return __qed_ll2_handle_sanity(p_hwfn
, connection_handle
, true, true);
289 static struct qed_ll2_info
*qed_ll2_handle_sanity_inactive(struct qed_hwfn
291 u8 connection_handle
)
293 return __qed_ll2_handle_sanity(p_hwfn
, connection_handle
, false, false);
296 static void qed_ll2_txq_flush(struct qed_hwfn
*p_hwfn
, u8 connection_handle
)
298 bool b_last_packet
= false, b_last_frag
= false;
299 struct qed_ll2_tx_packet
*p_pkt
= NULL
;
300 struct qed_ll2_info
*p_ll2_conn
;
301 struct qed_ll2_tx_queue
*p_tx
;
302 unsigned long flags
= 0;
305 p_ll2_conn
= qed_ll2_handle_sanity_inactive(p_hwfn
, connection_handle
);
309 p_tx
= &p_ll2_conn
->tx_queue
;
311 spin_lock_irqsave(&p_tx
->lock
, flags
);
312 while (!list_empty(&p_tx
->active_descq
)) {
313 p_pkt
= list_first_entry(&p_tx
->active_descq
,
314 struct qed_ll2_tx_packet
, list_entry
);
318 list_del(&p_pkt
->list_entry
);
319 b_last_packet
= list_empty(&p_tx
->active_descq
);
320 list_add_tail(&p_pkt
->list_entry
, &p_tx
->free_descq
);
321 spin_unlock_irqrestore(&p_tx
->lock
, flags
);
322 if (p_ll2_conn
->input
.conn_type
== QED_LL2_TYPE_OOO
) {
323 struct qed_ooo_buffer
*p_buffer
;
325 p_buffer
= (struct qed_ooo_buffer
*)p_pkt
->cookie
;
326 qed_ooo_put_free_buffer(p_hwfn
, p_hwfn
->p_ooo_info
,
329 p_tx
->cur_completing_packet
= *p_pkt
;
330 p_tx
->cur_completing_bd_idx
= 1;
332 p_tx
->cur_completing_bd_idx
== p_pkt
->bd_used
;
333 tx_frag
= p_pkt
->bds_set
[0].tx_frag
;
334 p_ll2_conn
->cbs
.tx_release_cb(p_ll2_conn
->cbs
.cookie
,
341 spin_lock_irqsave(&p_tx
->lock
, flags
);
343 spin_unlock_irqrestore(&p_tx
->lock
, flags
);
346 static int qed_ll2_txq_completion(struct qed_hwfn
*p_hwfn
, void *p_cookie
)
348 struct qed_ll2_info
*p_ll2_conn
= p_cookie
;
349 struct qed_ll2_tx_queue
*p_tx
= &p_ll2_conn
->tx_queue
;
350 u16 new_idx
= 0, num_bds
= 0, num_bds_in_packet
= 0;
351 struct qed_ll2_tx_packet
*p_pkt
;
352 bool b_last_frag
= false;
356 spin_lock_irqsave(&p_tx
->lock
, flags
);
357 if (p_tx
->b_completing_packet
) {
362 new_idx
= le16_to_cpu(*p_tx
->p_fw_cons
);
363 num_bds
= ((s16
)new_idx
- (s16
)p_tx
->bds_idx
);
365 if (list_empty(&p_tx
->active_descq
))
368 p_pkt
= list_first_entry(&p_tx
->active_descq
,
369 struct qed_ll2_tx_packet
, list_entry
);
373 p_tx
->b_completing_packet
= true;
374 p_tx
->cur_completing_packet
= *p_pkt
;
375 num_bds_in_packet
= p_pkt
->bd_used
;
376 list_del(&p_pkt
->list_entry
);
378 if (num_bds
< num_bds_in_packet
) {
380 "Rest of BDs does not cover whole packet\n");
384 num_bds
-= num_bds_in_packet
;
385 p_tx
->bds_idx
+= num_bds_in_packet
;
386 while (num_bds_in_packet
--)
387 qed_chain_consume(&p_tx
->txq_chain
);
389 p_tx
->cur_completing_bd_idx
= 1;
390 b_last_frag
= p_tx
->cur_completing_bd_idx
== p_pkt
->bd_used
;
391 list_add_tail(&p_pkt
->list_entry
, &p_tx
->free_descq
);
393 spin_unlock_irqrestore(&p_tx
->lock
, flags
);
395 p_ll2_conn
->cbs
.tx_comp_cb(p_ll2_conn
->cbs
.cookie
,
398 p_pkt
->bds_set
[0].tx_frag
,
399 b_last_frag
, !num_bds
);
401 spin_lock_irqsave(&p_tx
->lock
, flags
);
404 p_tx
->b_completing_packet
= false;
407 spin_unlock_irqrestore(&p_tx
->lock
, flags
);
411 static void qed_ll2_rxq_parse_gsi(struct qed_hwfn
*p_hwfn
,
412 union core_rx_cqe_union
*p_cqe
,
413 struct qed_ll2_comp_rx_data
*data
)
415 data
->parse_flags
= le16_to_cpu(p_cqe
->rx_cqe_gsi
.parse_flags
.flags
);
416 data
->length
.data_length
= le16_to_cpu(p_cqe
->rx_cqe_gsi
.data_length
);
417 data
->vlan
= le16_to_cpu(p_cqe
->rx_cqe_gsi
.vlan
);
418 data
->opaque_data_0
= le32_to_cpu(p_cqe
->rx_cqe_gsi
.src_mac_addrhi
);
419 data
->opaque_data_1
= le16_to_cpu(p_cqe
->rx_cqe_gsi
.src_mac_addrlo
);
420 data
->u
.data_length_error
= p_cqe
->rx_cqe_gsi
.data_length_error
;
421 data
->qp_id
= le16_to_cpu(p_cqe
->rx_cqe_gsi
.qp_id
);
423 data
->src_qp
= le32_to_cpu(p_cqe
->rx_cqe_gsi
.src_qp
);
426 static void qed_ll2_rxq_parse_reg(struct qed_hwfn
*p_hwfn
,
427 union core_rx_cqe_union
*p_cqe
,
428 struct qed_ll2_comp_rx_data
*data
)
430 data
->parse_flags
= le16_to_cpu(p_cqe
->rx_cqe_fp
.parse_flags
.flags
);
431 data
->err_flags
= le16_to_cpu(p_cqe
->rx_cqe_fp
.err_flags
.flags
);
432 data
->length
.packet_length
=
433 le16_to_cpu(p_cqe
->rx_cqe_fp
.packet_length
);
434 data
->vlan
= le16_to_cpu(p_cqe
->rx_cqe_fp
.vlan
);
435 data
->opaque_data_0
= le32_to_cpu(p_cqe
->rx_cqe_fp
.opaque_data
.data
[0]);
436 data
->opaque_data_1
= le32_to_cpu(p_cqe
->rx_cqe_fp
.opaque_data
.data
[1]);
437 data
->u
.placement_offset
= p_cqe
->rx_cqe_fp
.placement_offset
;
441 qed_ll2_handle_slowpath(struct qed_hwfn
*p_hwfn
,
442 struct qed_ll2_info
*p_ll2_conn
,
443 union core_rx_cqe_union
*p_cqe
,
444 unsigned long *p_lock_flags
)
446 struct qed_ll2_rx_queue
*p_rx
= &p_ll2_conn
->rx_queue
;
447 struct core_rx_slow_path_cqe
*sp_cqe
;
449 sp_cqe
= &p_cqe
->rx_cqe_sp
;
450 if (sp_cqe
->ramrod_cmd_id
!= CORE_RAMROD_RX_QUEUE_FLUSH
) {
452 "LL2 - unexpected Rx CQE slowpath ramrod_cmd_id:%d\n",
453 sp_cqe
->ramrod_cmd_id
);
457 if (!p_ll2_conn
->cbs
.slowpath_cb
) {
459 "LL2 - received RX_QUEUE_FLUSH but no callback was provided\n");
463 spin_unlock_irqrestore(&p_rx
->lock
, *p_lock_flags
);
465 p_ll2_conn
->cbs
.slowpath_cb(p_ll2_conn
->cbs
.cookie
,
467 le32_to_cpu(sp_cqe
->opaque_data
.data
[0]),
468 le32_to_cpu(sp_cqe
->opaque_data
.data
[1]));
470 spin_lock_irqsave(&p_rx
->lock
, *p_lock_flags
);
476 qed_ll2_rxq_handle_completion(struct qed_hwfn
*p_hwfn
,
477 struct qed_ll2_info
*p_ll2_conn
,
478 union core_rx_cqe_union
*p_cqe
,
479 unsigned long *p_lock_flags
, bool b_last_cqe
)
481 struct qed_ll2_rx_queue
*p_rx
= &p_ll2_conn
->rx_queue
;
482 struct qed_ll2_rx_packet
*p_pkt
= NULL
;
483 struct qed_ll2_comp_rx_data data
;
485 if (!list_empty(&p_rx
->active_descq
))
486 p_pkt
= list_first_entry(&p_rx
->active_descq
,
487 struct qed_ll2_rx_packet
, list_entry
);
490 "[%d] LL2 Rx completion but active_descq is empty\n",
491 p_ll2_conn
->input
.conn_type
);
495 list_del(&p_pkt
->list_entry
);
497 if (p_cqe
->rx_cqe_sp
.type
== CORE_RX_CQE_TYPE_REGULAR
)
498 qed_ll2_rxq_parse_reg(p_hwfn
, p_cqe
, &data
);
500 qed_ll2_rxq_parse_gsi(p_hwfn
, p_cqe
, &data
);
501 if (qed_chain_consume(&p_rx
->rxq_chain
) != p_pkt
->rxq_bd
)
503 "Mismatch between active_descq and the LL2 Rx chain\n");
505 list_add_tail(&p_pkt
->list_entry
, &p_rx
->free_descq
);
507 data
.connection_handle
= p_ll2_conn
->my_id
;
508 data
.cookie
= p_pkt
->cookie
;
509 data
.rx_buf_addr
= p_pkt
->rx_buf_addr
;
510 data
.b_last_packet
= b_last_cqe
;
512 spin_unlock_irqrestore(&p_rx
->lock
, *p_lock_flags
);
513 p_ll2_conn
->cbs
.rx_comp_cb(p_ll2_conn
->cbs
.cookie
, &data
);
515 spin_lock_irqsave(&p_rx
->lock
, *p_lock_flags
);
520 static int qed_ll2_rxq_completion(struct qed_hwfn
*p_hwfn
, void *cookie
)
522 struct qed_ll2_info
*p_ll2_conn
= (struct qed_ll2_info
*)cookie
;
523 struct qed_ll2_rx_queue
*p_rx
= &p_ll2_conn
->rx_queue
;
524 union core_rx_cqe_union
*cqe
= NULL
;
525 u16 cq_new_idx
= 0, cq_old_idx
= 0;
526 unsigned long flags
= 0;
529 spin_lock_irqsave(&p_rx
->lock
, flags
);
530 cq_new_idx
= le16_to_cpu(*p_rx
->p_fw_cons
);
531 cq_old_idx
= qed_chain_get_cons_idx(&p_rx
->rcq_chain
);
533 while (cq_new_idx
!= cq_old_idx
) {
534 bool b_last_cqe
= (cq_new_idx
== cq_old_idx
);
537 (union core_rx_cqe_union
*)
538 qed_chain_consume(&p_rx
->rcq_chain
);
539 cq_old_idx
= qed_chain_get_cons_idx(&p_rx
->rcq_chain
);
543 "LL2 [sw. cons %04x, fw. at %04x] - Got Packet of type %02x\n",
544 cq_old_idx
, cq_new_idx
, cqe
->rx_cqe_sp
.type
);
546 switch (cqe
->rx_cqe_sp
.type
) {
547 case CORE_RX_CQE_TYPE_SLOW_PATH
:
548 rc
= qed_ll2_handle_slowpath(p_hwfn
, p_ll2_conn
,
551 case CORE_RX_CQE_TYPE_GSI_OFFLOAD
:
552 case CORE_RX_CQE_TYPE_REGULAR
:
553 rc
= qed_ll2_rxq_handle_completion(p_hwfn
, p_ll2_conn
,
562 spin_unlock_irqrestore(&p_rx
->lock
, flags
);
566 static void qed_ll2_rxq_flush(struct qed_hwfn
*p_hwfn
, u8 connection_handle
)
568 struct qed_ll2_info
*p_ll2_conn
= NULL
;
569 struct qed_ll2_rx_packet
*p_pkt
= NULL
;
570 struct qed_ll2_rx_queue
*p_rx
;
571 unsigned long flags
= 0;
573 p_ll2_conn
= qed_ll2_handle_sanity_inactive(p_hwfn
, connection_handle
);
577 p_rx
= &p_ll2_conn
->rx_queue
;
579 spin_lock_irqsave(&p_rx
->lock
, flags
);
580 while (!list_empty(&p_rx
->active_descq
)) {
581 p_pkt
= list_first_entry(&p_rx
->active_descq
,
582 struct qed_ll2_rx_packet
, list_entry
);
585 list_move_tail(&p_pkt
->list_entry
, &p_rx
->free_descq
);
586 spin_unlock_irqrestore(&p_rx
->lock
, flags
);
588 if (p_ll2_conn
->input
.conn_type
== QED_LL2_TYPE_OOO
) {
589 struct qed_ooo_buffer
*p_buffer
;
591 p_buffer
= (struct qed_ooo_buffer
*)p_pkt
->cookie
;
592 qed_ooo_put_free_buffer(p_hwfn
, p_hwfn
->p_ooo_info
,
595 dma_addr_t rx_buf_addr
= p_pkt
->rx_buf_addr
;
596 void *cookie
= p_pkt
->cookie
;
599 b_last
= list_empty(&p_rx
->active_descq
);
600 p_ll2_conn
->cbs
.rx_release_cb(p_ll2_conn
->cbs
.cookie
,
603 rx_buf_addr
, b_last
);
605 spin_lock_irqsave(&p_rx
->lock
, flags
);
607 spin_unlock_irqrestore(&p_rx
->lock
, flags
);
611 qed_ll2_lb_rxq_handler_slowpath(struct qed_hwfn
*p_hwfn
,
612 struct core_rx_slow_path_cqe
*p_cqe
)
614 struct ooo_opaque
*iscsi_ooo
;
617 if (p_cqe
->ramrod_cmd_id
!= CORE_RAMROD_RX_QUEUE_FLUSH
)
620 iscsi_ooo
= (struct ooo_opaque
*)&p_cqe
->opaque_data
;
621 if (iscsi_ooo
->ooo_opcode
!= TCP_EVENT_DELETE_ISLES
)
624 /* Need to make a flush */
625 cid
= le32_to_cpu(iscsi_ooo
->cid
);
626 qed_ooo_release_connection_isles(p_hwfn
, p_hwfn
->p_ooo_info
, cid
);
631 static int qed_ll2_lb_rxq_handler(struct qed_hwfn
*p_hwfn
,
632 struct qed_ll2_info
*p_ll2_conn
)
634 struct qed_ll2_rx_queue
*p_rx
= &p_ll2_conn
->rx_queue
;
635 u16 packet_length
= 0, parse_flags
= 0, vlan
= 0;
636 struct qed_ll2_rx_packet
*p_pkt
= NULL
;
637 u32 num_ooo_add_to_peninsula
= 0, cid
;
638 union core_rx_cqe_union
*cqe
= NULL
;
639 u16 cq_new_idx
= 0, cq_old_idx
= 0;
640 struct qed_ooo_buffer
*p_buffer
;
641 struct ooo_opaque
*iscsi_ooo
;
642 u8 placement_offset
= 0;
645 cq_new_idx
= le16_to_cpu(*p_rx
->p_fw_cons
);
646 cq_old_idx
= qed_chain_get_cons_idx(&p_rx
->rcq_chain
);
647 if (cq_new_idx
== cq_old_idx
)
650 while (cq_new_idx
!= cq_old_idx
) {
651 struct core_rx_fast_path_cqe
*p_cqe_fp
;
653 cqe
= qed_chain_consume(&p_rx
->rcq_chain
);
654 cq_old_idx
= qed_chain_get_cons_idx(&p_rx
->rcq_chain
);
655 cqe_type
= cqe
->rx_cqe_sp
.type
;
657 if (cqe_type
== CORE_RX_CQE_TYPE_SLOW_PATH
)
658 if (qed_ll2_lb_rxq_handler_slowpath(p_hwfn
,
662 if (cqe_type
!= CORE_RX_CQE_TYPE_REGULAR
) {
664 "Got a non-regular LB LL2 completion [type 0x%02x]\n",
668 p_cqe_fp
= &cqe
->rx_cqe_fp
;
670 placement_offset
= p_cqe_fp
->placement_offset
;
671 parse_flags
= le16_to_cpu(p_cqe_fp
->parse_flags
.flags
);
672 packet_length
= le16_to_cpu(p_cqe_fp
->packet_length
);
673 vlan
= le16_to_cpu(p_cqe_fp
->vlan
);
674 iscsi_ooo
= (struct ooo_opaque
*)&p_cqe_fp
->opaque_data
;
675 qed_ooo_save_history_entry(p_hwfn
, p_hwfn
->p_ooo_info
,
677 cid
= le32_to_cpu(iscsi_ooo
->cid
);
679 /* Process delete isle first */
680 if (iscsi_ooo
->drop_size
)
681 qed_ooo_delete_isles(p_hwfn
, p_hwfn
->p_ooo_info
, cid
,
682 iscsi_ooo
->drop_isle
,
683 iscsi_ooo
->drop_size
);
685 if (iscsi_ooo
->ooo_opcode
== TCP_EVENT_NOP
)
688 /* Now process create/add/join isles */
689 if (list_empty(&p_rx
->active_descq
)) {
691 "LL2 OOO RX chain has no submitted buffers\n"
696 p_pkt
= list_first_entry(&p_rx
->active_descq
,
697 struct qed_ll2_rx_packet
, list_entry
);
699 if ((iscsi_ooo
->ooo_opcode
== TCP_EVENT_ADD_NEW_ISLE
) ||
700 (iscsi_ooo
->ooo_opcode
== TCP_EVENT_ADD_ISLE_RIGHT
) ||
701 (iscsi_ooo
->ooo_opcode
== TCP_EVENT_ADD_ISLE_LEFT
) ||
702 (iscsi_ooo
->ooo_opcode
== TCP_EVENT_ADD_PEN
) ||
703 (iscsi_ooo
->ooo_opcode
== TCP_EVENT_JOIN
)) {
706 "LL2 OOO RX packet is not valid\n");
709 list_del(&p_pkt
->list_entry
);
710 p_buffer
= (struct qed_ooo_buffer
*)p_pkt
->cookie
;
711 p_buffer
->packet_length
= packet_length
;
712 p_buffer
->parse_flags
= parse_flags
;
713 p_buffer
->vlan
= vlan
;
714 p_buffer
->placement_offset
= placement_offset
;
715 qed_chain_consume(&p_rx
->rxq_chain
);
716 list_add_tail(&p_pkt
->list_entry
, &p_rx
->free_descq
);
718 switch (iscsi_ooo
->ooo_opcode
) {
719 case TCP_EVENT_ADD_NEW_ISLE
:
720 qed_ooo_add_new_isle(p_hwfn
,
726 case TCP_EVENT_ADD_ISLE_RIGHT
:
727 qed_ooo_add_new_buffer(p_hwfn
,
734 case TCP_EVENT_ADD_ISLE_LEFT
:
735 qed_ooo_add_new_buffer(p_hwfn
,
743 qed_ooo_add_new_buffer(p_hwfn
,
746 iscsi_ooo
->ooo_isle
+
750 qed_ooo_join_isles(p_hwfn
,
752 cid
, iscsi_ooo
->ooo_isle
);
754 case TCP_EVENT_ADD_PEN
:
755 num_ooo_add_to_peninsula
++;
756 qed_ooo_put_ready_buffer(p_hwfn
,
763 "Unexpected event (%d) TX OOO completion\n",
764 iscsi_ooo
->ooo_opcode
);
772 qed_ooo_submit_tx_buffers(struct qed_hwfn
*p_hwfn
,
773 struct qed_ll2_info
*p_ll2_conn
)
775 struct qed_ll2_tx_pkt_info tx_pkt
;
776 struct qed_ooo_buffer
*p_buffer
;
778 dma_addr_t first_frag
;
782 /* Submit Tx buffers here */
783 while ((p_buffer
= qed_ooo_get_ready_buffer(p_hwfn
,
784 p_hwfn
->p_ooo_info
))) {
788 first_frag
= p_buffer
->rx_buffer_phys_addr
+
789 p_buffer
->placement_offset
;
790 SET_FIELD(bd_flags
, CORE_TX_BD_DATA_FORCE_VLAN_MODE
, 1);
791 SET_FIELD(bd_flags
, CORE_TX_BD_DATA_L4_PROTOCOL
, 1);
793 memset(&tx_pkt
, 0, sizeof(tx_pkt
));
794 tx_pkt
.num_of_bds
= 1;
795 tx_pkt
.vlan
= p_buffer
->vlan
;
796 tx_pkt
.bd_flags
= bd_flags
;
797 tx_pkt
.l4_hdr_offset_w
= l4_hdr_offset_w
;
798 switch (p_ll2_conn
->tx_dest
) {
799 case CORE_TX_DEST_NW
:
800 tx_pkt
.tx_dest
= QED_LL2_TX_DEST_NW
;
802 case CORE_TX_DEST_LB
:
803 tx_pkt
.tx_dest
= QED_LL2_TX_DEST_LB
;
805 case CORE_TX_DEST_DROP
:
807 tx_pkt
.tx_dest
= QED_LL2_TX_DEST_DROP
;
810 tx_pkt
.first_frag
= first_frag
;
811 tx_pkt
.first_frag_len
= p_buffer
->packet_length
;
812 tx_pkt
.cookie
= p_buffer
;
814 rc
= qed_ll2_prepare_tx_packet(p_hwfn
, p_ll2_conn
->my_id
,
817 qed_ooo_put_ready_buffer(p_hwfn
, p_hwfn
->p_ooo_info
,
825 qed_ooo_submit_rx_buffers(struct qed_hwfn
*p_hwfn
,
826 struct qed_ll2_info
*p_ll2_conn
)
828 struct qed_ooo_buffer
*p_buffer
;
831 while ((p_buffer
= qed_ooo_get_free_buffer(p_hwfn
,
832 p_hwfn
->p_ooo_info
))) {
833 rc
= qed_ll2_post_rx_buffer(p_hwfn
,
835 p_buffer
->rx_buffer_phys_addr
,
838 qed_ooo_put_free_buffer(p_hwfn
,
839 p_hwfn
->p_ooo_info
, p_buffer
);
845 static int qed_ll2_lb_rxq_completion(struct qed_hwfn
*p_hwfn
, void *p_cookie
)
847 struct qed_ll2_info
*p_ll2_conn
= (struct qed_ll2_info
*)p_cookie
;
850 if (!QED_LL2_RX_REGISTERED(p_ll2_conn
))
853 rc
= qed_ll2_lb_rxq_handler(p_hwfn
, p_ll2_conn
);
857 qed_ooo_submit_rx_buffers(p_hwfn
, p_ll2_conn
);
858 qed_ooo_submit_tx_buffers(p_hwfn
, p_ll2_conn
);
863 static int qed_ll2_lb_txq_completion(struct qed_hwfn
*p_hwfn
, void *p_cookie
)
865 struct qed_ll2_info
*p_ll2_conn
= (struct qed_ll2_info
*)p_cookie
;
866 struct qed_ll2_tx_queue
*p_tx
= &p_ll2_conn
->tx_queue
;
867 struct qed_ll2_tx_packet
*p_pkt
= NULL
;
868 struct qed_ooo_buffer
*p_buffer
;
869 bool b_dont_submit_rx
= false;
870 u16 new_idx
= 0, num_bds
= 0;
873 if (!QED_LL2_TX_REGISTERED(p_ll2_conn
))
876 new_idx
= le16_to_cpu(*p_tx
->p_fw_cons
);
877 num_bds
= ((s16
)new_idx
- (s16
)p_tx
->bds_idx
);
883 if (list_empty(&p_tx
->active_descq
))
886 p_pkt
= list_first_entry(&p_tx
->active_descq
,
887 struct qed_ll2_tx_packet
, list_entry
);
891 if (p_pkt
->bd_used
!= 1) {
893 "Unexpectedly many BDs(%d) in TX OOO completion\n",
898 list_del(&p_pkt
->list_entry
);
902 qed_chain_consume(&p_tx
->txq_chain
);
904 p_buffer
= (struct qed_ooo_buffer
*)p_pkt
->cookie
;
905 list_add_tail(&p_pkt
->list_entry
, &p_tx
->free_descq
);
907 if (b_dont_submit_rx
) {
908 qed_ooo_put_free_buffer(p_hwfn
, p_hwfn
->p_ooo_info
,
913 rc
= qed_ll2_post_rx_buffer(p_hwfn
, p_ll2_conn
->my_id
,
914 p_buffer
->rx_buffer_phys_addr
, 0,
917 qed_ooo_put_free_buffer(p_hwfn
,
918 p_hwfn
->p_ooo_info
, p_buffer
);
919 b_dont_submit_rx
= true;
923 qed_ooo_submit_tx_buffers(p_hwfn
, p_ll2_conn
);
928 static void qed_ll2_stop_ooo(struct qed_hwfn
*p_hwfn
)
930 u8
*handle
= &p_hwfn
->pf_params
.iscsi_pf_params
.ll2_ooo_queue_id
;
932 DP_VERBOSE(p_hwfn
, (QED_MSG_STORAGE
| QED_MSG_LL2
),
933 "Stopping LL2 OOO queue [%02x]\n", *handle
);
935 qed_ll2_terminate_connection(p_hwfn
, *handle
);
936 qed_ll2_release_connection(p_hwfn
, *handle
);
937 *handle
= QED_LL2_UNUSED_HANDLE
;
940 static int qed_sp_ll2_rx_queue_start(struct qed_hwfn
*p_hwfn
,
941 struct qed_ll2_info
*p_ll2_conn
,
944 enum qed_ll2_conn_type conn_type
= p_ll2_conn
->input
.conn_type
;
945 struct qed_ll2_rx_queue
*p_rx
= &p_ll2_conn
->rx_queue
;
946 struct core_rx_start_ramrod_data
*p_ramrod
= NULL
;
947 struct qed_spq_entry
*p_ent
= NULL
;
948 struct qed_sp_init_data init_data
;
953 memset(&init_data
, 0, sizeof(init_data
));
954 init_data
.cid
= p_ll2_conn
->cid
;
955 init_data
.opaque_fid
= p_hwfn
->hw_info
.opaque_fid
;
956 init_data
.comp_mode
= QED_SPQ_MODE_EBLOCK
;
958 rc
= qed_sp_init_request(p_hwfn
, &p_ent
,
959 CORE_RAMROD_RX_QUEUE_START
,
960 PROTOCOLID_CORE
, &init_data
);
964 p_ramrod
= &p_ent
->ramrod
.core_rx_queue_start
;
965 memset(p_ramrod
, 0, sizeof(*p_ramrod
));
966 p_ramrod
->sb_id
= cpu_to_le16(qed_int_get_sp_sb_id(p_hwfn
));
967 p_ramrod
->sb_index
= p_rx
->rx_sb_index
;
968 p_ramrod
->complete_event_flg
= 1;
970 p_ramrod
->mtu
= cpu_to_le16(p_ll2_conn
->input
.mtu
);
971 DMA_REGPAIR_LE(p_ramrod
->bd_base
, p_rx
->rxq_chain
.p_phys_addr
);
972 cqe_pbl_size
= (u16
)qed_chain_get_page_cnt(&p_rx
->rcq_chain
);
973 p_ramrod
->num_of_pbl_pages
= cpu_to_le16(cqe_pbl_size
);
974 DMA_REGPAIR_LE(p_ramrod
->cqe_pbl_addr
,
975 qed_chain_get_pbl_phys(&p_rx
->rcq_chain
));
977 p_ramrod
->drop_ttl0_flg
= p_ll2_conn
->input
.rx_drop_ttl0_flg
;
978 p_ramrod
->inner_vlan_stripping_en
=
979 p_ll2_conn
->input
.rx_vlan_removal_en
;
981 if (test_bit(QED_MF_UFP_SPECIFIC
, &p_hwfn
->cdev
->mf_bits
) &&
982 p_ll2_conn
->input
.conn_type
== QED_LL2_TYPE_FCOE
)
983 p_ramrod
->report_outer_vlan
= 1;
984 p_ramrod
->queue_id
= p_ll2_conn
->queue_id
;
985 p_ramrod
->main_func_queue
= p_ll2_conn
->main_func_queue
? 1 : 0;
987 if (test_bit(QED_MF_LL2_NON_UNICAST
, &p_hwfn
->cdev
->mf_bits
) &&
988 p_ramrod
->main_func_queue
&& conn_type
!= QED_LL2_TYPE_ROCE
&&
989 conn_type
!= QED_LL2_TYPE_IWARP
) {
990 p_ramrod
->mf_si_bcast_accept_all
= 1;
991 p_ramrod
->mf_si_mcast_accept_all
= 1;
993 p_ramrod
->mf_si_bcast_accept_all
= 0;
994 p_ramrod
->mf_si_mcast_accept_all
= 0;
997 p_ramrod
->action_on_error
.error_type
= action_on_error
;
998 p_ramrod
->gsi_offload_flag
= p_ll2_conn
->input
.gsi_enable
;
999 p_ramrod
->zero_prod_flg
= 1;
1001 return qed_spq_post(p_hwfn
, p_ent
, NULL
);
1004 static int qed_sp_ll2_tx_queue_start(struct qed_hwfn
*p_hwfn
,
1005 struct qed_ll2_info
*p_ll2_conn
)
1007 enum qed_ll2_conn_type conn_type
= p_ll2_conn
->input
.conn_type
;
1008 struct qed_ll2_tx_queue
*p_tx
= &p_ll2_conn
->tx_queue
;
1009 struct core_tx_start_ramrod_data
*p_ramrod
= NULL
;
1010 struct qed_spq_entry
*p_ent
= NULL
;
1011 struct qed_sp_init_data init_data
;
1012 u16 pq_id
= 0, pbl_size
;
1015 if (!QED_LL2_TX_REGISTERED(p_ll2_conn
))
1018 if (p_ll2_conn
->input
.conn_type
== QED_LL2_TYPE_OOO
)
1019 p_ll2_conn
->tx_stats_en
= 0;
1021 p_ll2_conn
->tx_stats_en
= 1;
1024 memset(&init_data
, 0, sizeof(init_data
));
1025 init_data
.cid
= p_ll2_conn
->cid
;
1026 init_data
.opaque_fid
= p_hwfn
->hw_info
.opaque_fid
;
1027 init_data
.comp_mode
= QED_SPQ_MODE_EBLOCK
;
1029 rc
= qed_sp_init_request(p_hwfn
, &p_ent
,
1030 CORE_RAMROD_TX_QUEUE_START
,
1031 PROTOCOLID_CORE
, &init_data
);
1035 p_ramrod
= &p_ent
->ramrod
.core_tx_queue_start
;
1037 p_ramrod
->sb_id
= cpu_to_le16(qed_int_get_sp_sb_id(p_hwfn
));
1038 p_ramrod
->sb_index
= p_tx
->tx_sb_index
;
1039 p_ramrod
->mtu
= cpu_to_le16(p_ll2_conn
->input
.mtu
);
1040 p_ramrod
->stats_en
= p_ll2_conn
->tx_stats_en
;
1041 p_ramrod
->stats_id
= p_ll2_conn
->tx_stats_id
;
1043 DMA_REGPAIR_LE(p_ramrod
->pbl_base_addr
,
1044 qed_chain_get_pbl_phys(&p_tx
->txq_chain
));
1045 pbl_size
= qed_chain_get_page_cnt(&p_tx
->txq_chain
);
1046 p_ramrod
->pbl_size
= cpu_to_le16(pbl_size
);
1048 switch (p_ll2_conn
->input
.tx_tc
) {
1050 pq_id
= qed_get_cm_pq_idx(p_hwfn
, PQ_FLAGS_LB
);
1053 pq_id
= qed_get_cm_pq_idx(p_hwfn
, PQ_FLAGS_OOO
);
1056 pq_id
= qed_get_cm_pq_idx(p_hwfn
, PQ_FLAGS_OFLD
);
1060 p_ramrod
->qm_pq_id
= cpu_to_le16(pq_id
);
1062 switch (conn_type
) {
1063 case QED_LL2_TYPE_FCOE
:
1064 p_ramrod
->conn_type
= PROTOCOLID_FCOE
;
1066 case QED_LL2_TYPE_ISCSI
:
1067 p_ramrod
->conn_type
= PROTOCOLID_ISCSI
;
1069 case QED_LL2_TYPE_ROCE
:
1070 p_ramrod
->conn_type
= PROTOCOLID_ROCE
;
1072 case QED_LL2_TYPE_IWARP
:
1073 p_ramrod
->conn_type
= PROTOCOLID_IWARP
;
1075 case QED_LL2_TYPE_OOO
:
1076 if (p_hwfn
->hw_info
.personality
== QED_PCI_ISCSI
)
1077 p_ramrod
->conn_type
= PROTOCOLID_ISCSI
;
1079 p_ramrod
->conn_type
= PROTOCOLID_IWARP
;
1082 p_ramrod
->conn_type
= PROTOCOLID_ETH
;
1083 DP_NOTICE(p_hwfn
, "Unknown connection type: %d\n", conn_type
);
1086 p_ramrod
->gsi_offload_flag
= p_ll2_conn
->input
.gsi_enable
;
1088 rc
= qed_spq_post(p_hwfn
, p_ent
, NULL
);
1092 rc
= qed_db_recovery_add(p_hwfn
->cdev
, p_tx
->doorbell_addr
,
1093 &p_tx
->db_msg
, DB_REC_WIDTH_32B
,
1098 static int qed_sp_ll2_rx_queue_stop(struct qed_hwfn
*p_hwfn
,
1099 struct qed_ll2_info
*p_ll2_conn
)
1101 struct core_rx_stop_ramrod_data
*p_ramrod
= NULL
;
1102 struct qed_spq_entry
*p_ent
= NULL
;
1103 struct qed_sp_init_data init_data
;
1107 memset(&init_data
, 0, sizeof(init_data
));
1108 init_data
.cid
= p_ll2_conn
->cid
;
1109 init_data
.opaque_fid
= p_hwfn
->hw_info
.opaque_fid
;
1110 init_data
.comp_mode
= QED_SPQ_MODE_EBLOCK
;
1112 rc
= qed_sp_init_request(p_hwfn
, &p_ent
,
1113 CORE_RAMROD_RX_QUEUE_STOP
,
1114 PROTOCOLID_CORE
, &init_data
);
1118 p_ramrod
= &p_ent
->ramrod
.core_rx_queue_stop
;
1120 p_ramrod
->complete_event_flg
= 1;
1121 p_ramrod
->queue_id
= p_ll2_conn
->queue_id
;
1123 return qed_spq_post(p_hwfn
, p_ent
, NULL
);
1126 static int qed_sp_ll2_tx_queue_stop(struct qed_hwfn
*p_hwfn
,
1127 struct qed_ll2_info
*p_ll2_conn
)
1129 struct qed_ll2_tx_queue
*p_tx
= &p_ll2_conn
->tx_queue
;
1130 struct qed_spq_entry
*p_ent
= NULL
;
1131 struct qed_sp_init_data init_data
;
1133 qed_db_recovery_del(p_hwfn
->cdev
, p_tx
->doorbell_addr
, &p_tx
->db_msg
);
1136 memset(&init_data
, 0, sizeof(init_data
));
1137 init_data
.cid
= p_ll2_conn
->cid
;
1138 init_data
.opaque_fid
= p_hwfn
->hw_info
.opaque_fid
;
1139 init_data
.comp_mode
= QED_SPQ_MODE_EBLOCK
;
1141 rc
= qed_sp_init_request(p_hwfn
, &p_ent
,
1142 CORE_RAMROD_TX_QUEUE_STOP
,
1143 PROTOCOLID_CORE
, &init_data
);
1147 return qed_spq_post(p_hwfn
, p_ent
, NULL
);
1151 qed_ll2_acquire_connection_rx(struct qed_hwfn
*p_hwfn
,
1152 struct qed_ll2_info
*p_ll2_info
)
1154 struct qed_ll2_rx_packet
*p_descq
;
1158 if (!p_ll2_info
->input
.rx_num_desc
)
1161 rc
= qed_chain_alloc(p_hwfn
->cdev
,
1162 QED_CHAIN_USE_TO_CONSUME_PRODUCE
,
1163 QED_CHAIN_MODE_NEXT_PTR
,
1164 QED_CHAIN_CNT_TYPE_U16
,
1165 p_ll2_info
->input
.rx_num_desc
,
1166 sizeof(struct core_rx_bd
),
1167 &p_ll2_info
->rx_queue
.rxq_chain
, NULL
);
1169 DP_NOTICE(p_hwfn
, "Failed to allocate ll2 rxq chain\n");
1173 capacity
= qed_chain_get_capacity(&p_ll2_info
->rx_queue
.rxq_chain
);
1174 p_descq
= kcalloc(capacity
, sizeof(struct qed_ll2_rx_packet
),
1178 DP_NOTICE(p_hwfn
, "Failed to allocate ll2 Rx desc\n");
1181 p_ll2_info
->rx_queue
.descq_array
= p_descq
;
1183 rc
= qed_chain_alloc(p_hwfn
->cdev
,
1184 QED_CHAIN_USE_TO_CONSUME_PRODUCE
,
1186 QED_CHAIN_CNT_TYPE_U16
,
1187 p_ll2_info
->input
.rx_num_desc
,
1188 sizeof(struct core_rx_fast_path_cqe
),
1189 &p_ll2_info
->rx_queue
.rcq_chain
, NULL
);
1191 DP_NOTICE(p_hwfn
, "Failed to allocate ll2 rcq chain\n");
1195 DP_VERBOSE(p_hwfn
, QED_MSG_LL2
,
1196 "Allocated LL2 Rxq [Type %08x] with 0x%08x buffers\n",
1197 p_ll2_info
->input
.conn_type
, p_ll2_info
->input
.rx_num_desc
);
1203 static int qed_ll2_acquire_connection_tx(struct qed_hwfn
*p_hwfn
,
1204 struct qed_ll2_info
*p_ll2_info
)
1206 struct qed_ll2_tx_packet
*p_descq
;
1211 if (!p_ll2_info
->input
.tx_num_desc
)
1214 rc
= qed_chain_alloc(p_hwfn
->cdev
,
1215 QED_CHAIN_USE_TO_CONSUME_PRODUCE
,
1217 QED_CHAIN_CNT_TYPE_U16
,
1218 p_ll2_info
->input
.tx_num_desc
,
1219 sizeof(struct core_tx_bd
),
1220 &p_ll2_info
->tx_queue
.txq_chain
, NULL
);
1224 capacity
= qed_chain_get_capacity(&p_ll2_info
->tx_queue
.txq_chain
);
1225 /* First element is part of the packet, rest are flexibly added */
1226 desc_size
= (sizeof(*p_descq
) +
1227 (p_ll2_info
->input
.tx_max_bds_per_packet
- 1) *
1228 sizeof(p_descq
->bds_set
));
1230 p_descq
= kcalloc(capacity
, desc_size
, GFP_KERNEL
);
1235 p_ll2_info
->tx_queue
.descq_mem
= p_descq
;
1237 DP_VERBOSE(p_hwfn
, QED_MSG_LL2
,
1238 "Allocated LL2 Txq [Type %08x] with 0x%08x buffers\n",
1239 p_ll2_info
->input
.conn_type
, p_ll2_info
->input
.tx_num_desc
);
1244 "Can't allocate memory for Tx LL2 with 0x%08x buffers\n",
1245 p_ll2_info
->input
.tx_num_desc
);
1250 qed_ll2_acquire_connection_ooo(struct qed_hwfn
*p_hwfn
,
1251 struct qed_ll2_info
*p_ll2_info
, u16 mtu
)
1253 struct qed_ooo_buffer
*p_buf
= NULL
;
1258 if (p_ll2_info
->input
.conn_type
!= QED_LL2_TYPE_OOO
)
1261 /* Correct number of requested OOO buffers if needed */
1262 if (!p_ll2_info
->input
.rx_num_ooo_buffers
) {
1263 u16 num_desc
= p_ll2_info
->input
.rx_num_desc
;
1267 p_ll2_info
->input
.rx_num_ooo_buffers
= num_desc
* 2;
1270 for (buf_idx
= 0; buf_idx
< p_ll2_info
->input
.rx_num_ooo_buffers
;
1272 p_buf
= kzalloc(sizeof(*p_buf
), GFP_KERNEL
);
1278 p_buf
->rx_buffer_size
= mtu
+ 26 + ETH_CACHE_LINE_SIZE
;
1279 p_buf
->rx_buffer_size
= (p_buf
->rx_buffer_size
+
1280 ETH_CACHE_LINE_SIZE
- 1) &
1281 ~(ETH_CACHE_LINE_SIZE
- 1);
1282 p_virt
= dma_alloc_coherent(&p_hwfn
->cdev
->pdev
->dev
,
1283 p_buf
->rx_buffer_size
,
1284 &p_buf
->rx_buffer_phys_addr
,
1292 p_buf
->rx_buffer_virt_addr
= p_virt
;
1293 qed_ooo_put_free_buffer(p_hwfn
, p_hwfn
->p_ooo_info
, p_buf
);
1296 DP_VERBOSE(p_hwfn
, QED_MSG_LL2
,
1297 "Allocated [%04x] LL2 OOO buffers [each of size 0x%08x]\n",
1298 p_ll2_info
->input
.rx_num_ooo_buffers
, p_buf
->rx_buffer_size
);
1305 qed_ll2_set_cbs(struct qed_ll2_info
*p_ll2_info
, const struct qed_ll2_cbs
*cbs
)
1307 if (!cbs
|| (!cbs
->rx_comp_cb
||
1308 !cbs
->rx_release_cb
||
1309 !cbs
->tx_comp_cb
|| !cbs
->tx_release_cb
|| !cbs
->cookie
))
1312 p_ll2_info
->cbs
.rx_comp_cb
= cbs
->rx_comp_cb
;
1313 p_ll2_info
->cbs
.rx_release_cb
= cbs
->rx_release_cb
;
1314 p_ll2_info
->cbs
.tx_comp_cb
= cbs
->tx_comp_cb
;
1315 p_ll2_info
->cbs
.tx_release_cb
= cbs
->tx_release_cb
;
1316 p_ll2_info
->cbs
.slowpath_cb
= cbs
->slowpath_cb
;
1317 p_ll2_info
->cbs
.cookie
= cbs
->cookie
;
1322 static void _qed_ll2_calc_allowed_conns(struct qed_hwfn
*p_hwfn
,
1323 struct qed_ll2_acquire_data
*data
,
1324 u8
*start_idx
, u8
*last_idx
)
1326 /* LL2 queues handles will be split as follows:
1327 * First will be the legacy queues, and then the ctx based.
1329 if (data
->input
.rx_conn_type
== QED_LL2_RX_TYPE_LEGACY
) {
1330 *start_idx
= QED_LL2_LEGACY_CONN_BASE_PF
;
1331 *last_idx
= *start_idx
+
1332 QED_MAX_NUM_OF_LEGACY_LL2_CONNS_PF
;
1334 /* QED_LL2_RX_TYPE_CTX */
1335 *start_idx
= QED_LL2_CTX_CONN_BASE_PF
;
1336 *last_idx
= *start_idx
+
1337 QED_MAX_NUM_OF_CTX_LL2_CONNS_PF
;
1341 static enum core_error_handle
1342 qed_ll2_get_error_choice(enum qed_ll2_error_handle err
)
1345 case QED_LL2_DROP_PACKET
:
1346 return LL2_DROP_PACKET
;
1347 case QED_LL2_DO_NOTHING
:
1348 return LL2_DO_NOTHING
;
1349 case QED_LL2_ASSERT
:
1352 return LL2_DO_NOTHING
;
1356 int qed_ll2_acquire_connection(void *cxt
, struct qed_ll2_acquire_data
*data
)
1358 struct qed_hwfn
*p_hwfn
= cxt
;
1359 qed_int_comp_cb_t comp_rx_cb
, comp_tx_cb
;
1360 struct qed_ll2_info
*p_ll2_info
= NULL
;
1361 u8 i
, first_idx
, last_idx
, *p_tx_max
;
1364 if (!data
->p_connection_handle
|| !p_hwfn
->p_ll2_info
)
1367 _qed_ll2_calc_allowed_conns(p_hwfn
, data
, &first_idx
, &last_idx
);
1369 /* Find a free connection to be used */
1370 for (i
= first_idx
; i
< last_idx
; i
++) {
1371 mutex_lock(&p_hwfn
->p_ll2_info
[i
].mutex
);
1372 if (p_hwfn
->p_ll2_info
[i
].b_active
) {
1373 mutex_unlock(&p_hwfn
->p_ll2_info
[i
].mutex
);
1377 p_hwfn
->p_ll2_info
[i
].b_active
= true;
1378 p_ll2_info
= &p_hwfn
->p_ll2_info
[i
];
1379 mutex_unlock(&p_hwfn
->p_ll2_info
[i
].mutex
);
1385 memcpy(&p_ll2_info
->input
, &data
->input
, sizeof(p_ll2_info
->input
));
1387 switch (data
->input
.tx_dest
) {
1388 case QED_LL2_TX_DEST_NW
:
1389 p_ll2_info
->tx_dest
= CORE_TX_DEST_NW
;
1391 case QED_LL2_TX_DEST_LB
:
1392 p_ll2_info
->tx_dest
= CORE_TX_DEST_LB
;
1394 case QED_LL2_TX_DEST_DROP
:
1395 p_ll2_info
->tx_dest
= CORE_TX_DEST_DROP
;
1401 if (data
->input
.conn_type
== QED_LL2_TYPE_OOO
||
1402 data
->input
.secondary_queue
)
1403 p_ll2_info
->main_func_queue
= false;
1405 p_ll2_info
->main_func_queue
= true;
1407 /* Correct maximum number of Tx BDs */
1408 p_tx_max
= &p_ll2_info
->input
.tx_max_bds_per_packet
;
1410 *p_tx_max
= CORE_LL2_TX_MAX_BDS_PER_PACKET
;
1412 *p_tx_max
= min_t(u8
, *p_tx_max
,
1413 CORE_LL2_TX_MAX_BDS_PER_PACKET
);
1415 rc
= qed_ll2_set_cbs(p_ll2_info
, data
->cbs
);
1417 DP_NOTICE(p_hwfn
, "Invalid callback functions\n");
1418 goto q_allocate_fail
;
1421 rc
= qed_ll2_acquire_connection_rx(p_hwfn
, p_ll2_info
);
1423 goto q_allocate_fail
;
1425 rc
= qed_ll2_acquire_connection_tx(p_hwfn
, p_ll2_info
);
1427 goto q_allocate_fail
;
1429 rc
= qed_ll2_acquire_connection_ooo(p_hwfn
, p_ll2_info
,
1432 goto q_allocate_fail
;
1434 /* Register callbacks for the Rx/Tx queues */
1435 if (data
->input
.conn_type
== QED_LL2_TYPE_OOO
) {
1436 comp_rx_cb
= qed_ll2_lb_rxq_completion
;
1437 comp_tx_cb
= qed_ll2_lb_txq_completion
;
1439 comp_rx_cb
= qed_ll2_rxq_completion
;
1440 comp_tx_cb
= qed_ll2_txq_completion
;
1443 if (data
->input
.rx_num_desc
) {
1444 qed_int_register_cb(p_hwfn
, comp_rx_cb
,
1445 &p_hwfn
->p_ll2_info
[i
],
1446 &p_ll2_info
->rx_queue
.rx_sb_index
,
1447 &p_ll2_info
->rx_queue
.p_fw_cons
);
1448 p_ll2_info
->rx_queue
.b_cb_registered
= true;
1451 if (data
->input
.tx_num_desc
) {
1452 qed_int_register_cb(p_hwfn
,
1454 &p_hwfn
->p_ll2_info
[i
],
1455 &p_ll2_info
->tx_queue
.tx_sb_index
,
1456 &p_ll2_info
->tx_queue
.p_fw_cons
);
1457 p_ll2_info
->tx_queue
.b_cb_registered
= true;
1460 *data
->p_connection_handle
= i
;
1464 qed_ll2_release_connection(p_hwfn
, i
);
1468 static int qed_ll2_establish_connection_rx(struct qed_hwfn
*p_hwfn
,
1469 struct qed_ll2_info
*p_ll2_conn
)
1471 enum qed_ll2_error_handle error_input
;
1472 enum core_error_handle error_mode
;
1473 u8 action_on_error
= 0;
1476 if (!QED_LL2_RX_REGISTERED(p_ll2_conn
))
1479 DIRECT_REG_WR(p_ll2_conn
->rx_queue
.set_prod_addr
, 0x0);
1480 error_input
= p_ll2_conn
->input
.ai_err_packet_too_big
;
1481 error_mode
= qed_ll2_get_error_choice(error_input
);
1482 SET_FIELD(action_on_error
,
1483 CORE_RX_ACTION_ON_ERROR_PACKET_TOO_BIG
, error_mode
);
1484 error_input
= p_ll2_conn
->input
.ai_err_no_buf
;
1485 error_mode
= qed_ll2_get_error_choice(error_input
);
1486 SET_FIELD(action_on_error
, CORE_RX_ACTION_ON_ERROR_NO_BUFF
, error_mode
);
1488 rc
= qed_sp_ll2_rx_queue_start(p_hwfn
, p_ll2_conn
, action_on_error
);
1492 if (p_ll2_conn
->rx_queue
.ctx_based
) {
1493 rc
= qed_db_recovery_add(p_hwfn
->cdev
,
1494 p_ll2_conn
->rx_queue
.set_prod_addr
,
1495 &p_ll2_conn
->rx_queue
.db_data
,
1496 DB_REC_WIDTH_64B
, DB_REC_KERNEL
);
1503 qed_ll2_establish_connection_ooo(struct qed_hwfn
*p_hwfn
,
1504 struct qed_ll2_info
*p_ll2_conn
)
1506 if (p_ll2_conn
->input
.conn_type
!= QED_LL2_TYPE_OOO
)
1509 qed_ooo_release_all_isles(p_hwfn
, p_hwfn
->p_ooo_info
);
1510 qed_ooo_submit_rx_buffers(p_hwfn
, p_ll2_conn
);
1513 static inline u8
qed_ll2_handle_to_queue_id(struct qed_hwfn
*p_hwfn
,
1519 if (ll2_queue_type
== QED_LL2_RX_TYPE_LEGACY
)
1520 return p_hwfn
->hw_info
.resc_start
[QED_LL2_RAM_QUEUE
] + handle
;
1522 /* QED_LL2_RX_TYPE_CTX
1523 * FW distinguishes between the legacy queues (ram based) and the
1524 * ctx based queues by the queue_id.
1525 * The first MAX_NUM_LL2_RX_RAM_QUEUES queues are legacy
1526 * and the queue ids above that are ctx base.
1528 qid
= p_hwfn
->hw_info
.resc_start
[QED_LL2_CTX_QUEUE
] +
1529 MAX_NUM_LL2_RX_RAM_QUEUES
;
1531 /* See comment on the acquire connection for how the ll2
1532 * queues handles are divided.
1534 qid
+= (handle
- QED_MAX_NUM_OF_LEGACY_LL2_CONNS_PF
);
1539 int qed_ll2_establish_connection(void *cxt
, u8 connection_handle
)
1541 struct e4_core_conn_context
*p_cxt
;
1542 struct qed_ll2_tx_packet
*p_pkt
;
1543 struct qed_ll2_info
*p_ll2_conn
;
1544 struct qed_hwfn
*p_hwfn
= cxt
;
1545 struct qed_ll2_rx_queue
*p_rx
;
1546 struct qed_ll2_tx_queue
*p_tx
;
1547 struct qed_cxt_info cxt_info
;
1548 struct qed_ptt
*p_ptt
;
1554 p_ptt
= qed_ptt_acquire(p_hwfn
);
1558 p_ll2_conn
= qed_ll2_handle_sanity_lock(p_hwfn
, connection_handle
);
1564 p_rx
= &p_ll2_conn
->rx_queue
;
1565 p_tx
= &p_ll2_conn
->tx_queue
;
1567 qed_chain_reset(&p_rx
->rxq_chain
);
1568 qed_chain_reset(&p_rx
->rcq_chain
);
1569 INIT_LIST_HEAD(&p_rx
->active_descq
);
1570 INIT_LIST_HEAD(&p_rx
->free_descq
);
1571 INIT_LIST_HEAD(&p_rx
->posting_descq
);
1572 spin_lock_init(&p_rx
->lock
);
1573 capacity
= qed_chain_get_capacity(&p_rx
->rxq_chain
);
1574 for (i
= 0; i
< capacity
; i
++)
1575 list_add_tail(&p_rx
->descq_array
[i
].list_entry
,
1577 *p_rx
->p_fw_cons
= 0;
1579 qed_chain_reset(&p_tx
->txq_chain
);
1580 INIT_LIST_HEAD(&p_tx
->active_descq
);
1581 INIT_LIST_HEAD(&p_tx
->free_descq
);
1582 INIT_LIST_HEAD(&p_tx
->sending_descq
);
1583 spin_lock_init(&p_tx
->lock
);
1584 capacity
= qed_chain_get_capacity(&p_tx
->txq_chain
);
1585 /* First element is part of the packet, rest are flexibly added */
1586 desc_size
= (sizeof(*p_pkt
) +
1587 (p_ll2_conn
->input
.tx_max_bds_per_packet
- 1) *
1588 sizeof(p_pkt
->bds_set
));
1590 for (i
= 0; i
< capacity
; i
++) {
1591 p_pkt
= p_tx
->descq_mem
+ desc_size
* i
;
1592 list_add_tail(&p_pkt
->list_entry
, &p_tx
->free_descq
);
1594 p_tx
->cur_completing_bd_idx
= 0;
1596 p_tx
->b_completing_packet
= false;
1597 p_tx
->cur_send_packet
= NULL
;
1598 p_tx
->cur_send_frag_num
= 0;
1599 p_tx
->cur_completing_frag_num
= 0;
1600 *p_tx
->p_fw_cons
= 0;
1602 rc
= qed_cxt_acquire_cid(p_hwfn
, PROTOCOLID_CORE
, &p_ll2_conn
->cid
);
1605 cxt_info
.iid
= p_ll2_conn
->cid
;
1606 rc
= qed_cxt_get_cid_info(p_hwfn
, &cxt_info
);
1608 DP_NOTICE(p_hwfn
, "Cannot find context info for cid=%d\n",
1613 p_cxt
= cxt_info
.p_cxt
;
1615 memset(p_cxt
, 0, sizeof(*p_cxt
));
1617 qid
= qed_ll2_handle_to_queue_id(p_hwfn
, connection_handle
,
1618 p_ll2_conn
->input
.rx_conn_type
);
1619 p_ll2_conn
->queue_id
= qid
;
1620 p_ll2_conn
->tx_stats_id
= qid
;
1622 DP_VERBOSE(p_hwfn
, QED_MSG_LL2
,
1623 "Establishing ll2 queue. PF %d ctx_based=%d abs qid=%d\n",
1624 p_hwfn
->rel_pf_id
, p_ll2_conn
->input
.rx_conn_type
, qid
);
1626 if (p_ll2_conn
->input
.rx_conn_type
== QED_LL2_RX_TYPE_LEGACY
) {
1627 p_rx
->set_prod_addr
= p_hwfn
->regview
+
1628 GTT_BAR0_MAP_REG_TSDM_RAM
+ TSTORM_LL2_RX_PRODS_OFFSET(qid
);
1630 /* QED_LL2_RX_TYPE_CTX - using doorbell */
1631 p_rx
->ctx_based
= 1;
1633 p_rx
->set_prod_addr
= p_hwfn
->doorbells
+
1634 p_hwfn
->dpi_start_offset
+
1635 DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_LL2_PROD_UPDATE
);
1637 /* prepare db data */
1638 p_rx
->db_data
.icid
= cpu_to_le16((u16
)p_ll2_conn
->cid
);
1639 SET_FIELD(p_rx
->db_data
.params
,
1640 CORE_PWM_PROD_UPDATE_DATA_AGG_CMD
, DB_AGG_CMD_SET
);
1641 SET_FIELD(p_rx
->db_data
.params
,
1642 CORE_PWM_PROD_UPDATE_DATA_RESERVED1
, 0);
1645 p_tx
->doorbell_addr
= (u8 __iomem
*)p_hwfn
->doorbells
+
1646 qed_db_addr(p_ll2_conn
->cid
,
1648 /* prepare db data */
1649 SET_FIELD(p_tx
->db_msg
.params
, CORE_DB_DATA_DEST
, DB_DEST_XCM
);
1650 SET_FIELD(p_tx
->db_msg
.params
, CORE_DB_DATA_AGG_CMD
, DB_AGG_CMD_SET
);
1651 SET_FIELD(p_tx
->db_msg
.params
, CORE_DB_DATA_AGG_VAL_SEL
,
1652 DQ_XCM_CORE_TX_BD_PROD_CMD
);
1653 p_tx
->db_msg
.agg_flags
= DQ_XCM_CORE_DQ_CF_CMD
;
1655 rc
= qed_ll2_establish_connection_rx(p_hwfn
, p_ll2_conn
);
1659 rc
= qed_sp_ll2_tx_queue_start(p_hwfn
, p_ll2_conn
);
1663 if (!QED_IS_RDMA_PERSONALITY(p_hwfn
))
1664 qed_wr(p_hwfn
, p_ptt
, PRS_REG_USE_LIGHT_L2
, 1);
1666 qed_ll2_establish_connection_ooo(p_hwfn
, p_ll2_conn
);
1668 if (p_ll2_conn
->input
.conn_type
== QED_LL2_TYPE_FCOE
) {
1669 if (!test_bit(QED_MF_UFP_SPECIFIC
, &p_hwfn
->cdev
->mf_bits
))
1670 qed_llh_add_protocol_filter(p_hwfn
->cdev
, 0,
1671 QED_LLH_FILTER_ETHERTYPE
,
1673 qed_llh_add_protocol_filter(p_hwfn
->cdev
, 0,
1674 QED_LLH_FILTER_ETHERTYPE
,
1679 qed_ptt_release(p_hwfn
, p_ptt
);
1683 static void qed_ll2_post_rx_buffer_notify_fw(struct qed_hwfn
*p_hwfn
,
1684 struct qed_ll2_rx_queue
*p_rx
,
1685 struct qed_ll2_rx_packet
*p_curp
)
1687 struct qed_ll2_rx_packet
*p_posting_packet
= NULL
;
1688 struct core_ll2_rx_prod rx_prod
= { 0, 0 };
1689 bool b_notify_fw
= false;
1690 u16 bd_prod
, cq_prod
;
1692 /* This handles the flushing of already posted buffers */
1693 while (!list_empty(&p_rx
->posting_descq
)) {
1694 p_posting_packet
= list_first_entry(&p_rx
->posting_descq
,
1695 struct qed_ll2_rx_packet
,
1697 list_move_tail(&p_posting_packet
->list_entry
,
1698 &p_rx
->active_descq
);
1702 /* This handles the supplied packet [if there is one] */
1704 list_add_tail(&p_curp
->list_entry
, &p_rx
->active_descq
);
1711 bd_prod
= qed_chain_get_prod_idx(&p_rx
->rxq_chain
);
1712 cq_prod
= qed_chain_get_prod_idx(&p_rx
->rcq_chain
);
1713 if (p_rx
->ctx_based
) {
1714 /* update producer by giving a doorbell */
1715 p_rx
->db_data
.prod
.bd_prod
= cpu_to_le16(bd_prod
);
1716 p_rx
->db_data
.prod
.cqe_prod
= cpu_to_le16(cq_prod
);
1717 /* Make sure chain element is updated before ringing the
1721 DIRECT_REG_WR64(p_rx
->set_prod_addr
,
1722 *((u64
*)&p_rx
->db_data
));
1724 rx_prod
.bd_prod
= cpu_to_le16(bd_prod
);
1725 rx_prod
.cqe_prod
= cpu_to_le16(cq_prod
);
1727 /* Make sure chain element is updated before ringing the
1732 DIRECT_REG_WR(p_rx
->set_prod_addr
, *((u32
*)&rx_prod
));
1736 int qed_ll2_post_rx_buffer(void *cxt
,
1737 u8 connection_handle
,
1739 u16 buf_len
, void *cookie
, u8 notify_fw
)
1741 struct qed_hwfn
*p_hwfn
= cxt
;
1742 struct core_rx_bd_with_buff_len
*p_curb
= NULL
;
1743 struct qed_ll2_rx_packet
*p_curp
= NULL
;
1744 struct qed_ll2_info
*p_ll2_conn
;
1745 struct qed_ll2_rx_queue
*p_rx
;
1746 unsigned long flags
;
1750 p_ll2_conn
= qed_ll2_handle_sanity(p_hwfn
, connection_handle
);
1753 p_rx
= &p_ll2_conn
->rx_queue
;
1755 spin_lock_irqsave(&p_rx
->lock
, flags
);
1756 if (!list_empty(&p_rx
->free_descq
))
1757 p_curp
= list_first_entry(&p_rx
->free_descq
,
1758 struct qed_ll2_rx_packet
, list_entry
);
1760 if (qed_chain_get_elem_left(&p_rx
->rxq_chain
) &&
1761 qed_chain_get_elem_left(&p_rx
->rcq_chain
)) {
1762 p_data
= qed_chain_produce(&p_rx
->rxq_chain
);
1763 p_curb
= (struct core_rx_bd_with_buff_len
*)p_data
;
1764 qed_chain_produce(&p_rx
->rcq_chain
);
1768 /* If we're lacking entires, let's try to flush buffers to FW */
1769 if (!p_curp
|| !p_curb
) {
1775 /* We have an Rx packet we can fill */
1776 DMA_REGPAIR_LE(p_curb
->addr
, addr
);
1777 p_curb
->buff_length
= cpu_to_le16(buf_len
);
1778 p_curp
->rx_buf_addr
= addr
;
1779 p_curp
->cookie
= cookie
;
1780 p_curp
->rxq_bd
= p_curb
;
1781 p_curp
->buf_length
= buf_len
;
1782 list_del(&p_curp
->list_entry
);
1784 /* Check if we only want to enqueue this packet without informing FW */
1786 list_add_tail(&p_curp
->list_entry
, &p_rx
->posting_descq
);
1791 qed_ll2_post_rx_buffer_notify_fw(p_hwfn
, p_rx
, p_curp
);
1793 spin_unlock_irqrestore(&p_rx
->lock
, flags
);
1797 static void qed_ll2_prepare_tx_packet_set(struct qed_hwfn
*p_hwfn
,
1798 struct qed_ll2_tx_queue
*p_tx
,
1799 struct qed_ll2_tx_packet
*p_curp
,
1800 struct qed_ll2_tx_pkt_info
*pkt
,
1803 list_del(&p_curp
->list_entry
);
1804 p_curp
->cookie
= pkt
->cookie
;
1805 p_curp
->bd_used
= pkt
->num_of_bds
;
1806 p_curp
->notify_fw
= notify_fw
;
1807 p_tx
->cur_send_packet
= p_curp
;
1808 p_tx
->cur_send_frag_num
= 0;
1810 p_curp
->bds_set
[p_tx
->cur_send_frag_num
].tx_frag
= pkt
->first_frag
;
1811 p_curp
->bds_set
[p_tx
->cur_send_frag_num
].frag_len
= pkt
->first_frag_len
;
1812 p_tx
->cur_send_frag_num
++;
1816 qed_ll2_prepare_tx_packet_set_bd(struct qed_hwfn
*p_hwfn
,
1817 struct qed_ll2_info
*p_ll2
,
1818 struct qed_ll2_tx_packet
*p_curp
,
1819 struct qed_ll2_tx_pkt_info
*pkt
)
1821 struct qed_chain
*p_tx_chain
= &p_ll2
->tx_queue
.txq_chain
;
1822 u16 prod_idx
= qed_chain_get_prod_idx(p_tx_chain
);
1823 struct core_tx_bd
*start_bd
= NULL
;
1824 enum core_roce_flavor_type roce_flavor
;
1825 enum core_tx_dest tx_dest
;
1826 u16 bd_data
= 0, frag_idx
;
1828 roce_flavor
= (pkt
->qed_roce_flavor
== QED_LL2_ROCE
) ? CORE_ROCE
1831 switch (pkt
->tx_dest
) {
1832 case QED_LL2_TX_DEST_NW
:
1833 tx_dest
= CORE_TX_DEST_NW
;
1835 case QED_LL2_TX_DEST_LB
:
1836 tx_dest
= CORE_TX_DEST_LB
;
1838 case QED_LL2_TX_DEST_DROP
:
1839 tx_dest
= CORE_TX_DEST_DROP
;
1842 tx_dest
= CORE_TX_DEST_LB
;
1846 start_bd
= (struct core_tx_bd
*)qed_chain_produce(p_tx_chain
);
1847 if (QED_IS_IWARP_PERSONALITY(p_hwfn
) &&
1848 p_ll2
->input
.conn_type
== QED_LL2_TYPE_OOO
) {
1849 start_bd
->nw_vlan_or_lb_echo
=
1850 cpu_to_le16(IWARP_LL2_IN_ORDER_TX_QUEUE
);
1852 start_bd
->nw_vlan_or_lb_echo
= cpu_to_le16(pkt
->vlan
);
1853 if (test_bit(QED_MF_UFP_SPECIFIC
, &p_hwfn
->cdev
->mf_bits
) &&
1854 p_ll2
->input
.conn_type
== QED_LL2_TYPE_FCOE
)
1855 pkt
->remove_stag
= true;
1858 SET_FIELD(start_bd
->bitfield1
, CORE_TX_BD_L4_HDR_OFFSET_W
,
1859 cpu_to_le16(pkt
->l4_hdr_offset_w
));
1860 SET_FIELD(start_bd
->bitfield1
, CORE_TX_BD_TX_DST
, tx_dest
);
1861 bd_data
|= pkt
->bd_flags
;
1862 SET_FIELD(bd_data
, CORE_TX_BD_DATA_START_BD
, 0x1);
1863 SET_FIELD(bd_data
, CORE_TX_BD_DATA_NBDS
, pkt
->num_of_bds
);
1864 SET_FIELD(bd_data
, CORE_TX_BD_DATA_ROCE_FLAV
, roce_flavor
);
1865 SET_FIELD(bd_data
, CORE_TX_BD_DATA_IP_CSUM
, !!(pkt
->enable_ip_cksum
));
1866 SET_FIELD(bd_data
, CORE_TX_BD_DATA_L4_CSUM
, !!(pkt
->enable_l4_cksum
));
1867 SET_FIELD(bd_data
, CORE_TX_BD_DATA_IP_LEN
, !!(pkt
->calc_ip_len
));
1868 SET_FIELD(bd_data
, CORE_TX_BD_DATA_DISABLE_STAG_INSERTION
,
1869 !!(pkt
->remove_stag
));
1871 start_bd
->bd_data
.as_bitfield
= cpu_to_le16(bd_data
);
1872 DMA_REGPAIR_LE(start_bd
->addr
, pkt
->first_frag
);
1873 start_bd
->nbytes
= cpu_to_le16(pkt
->first_frag_len
);
1876 (NETIF_MSG_TX_QUEUED
| QED_MSG_LL2
),
1877 "LL2 [q 0x%02x cid 0x%08x type 0x%08x] Tx Producer at [0x%04x] - set with a %04x bytes %02x BDs buffer at %08x:%08x\n",
1880 p_ll2
->input
.conn_type
,
1882 pkt
->first_frag_len
,
1884 le32_to_cpu(start_bd
->addr
.hi
),
1885 le32_to_cpu(start_bd
->addr
.lo
));
1887 if (p_ll2
->tx_queue
.cur_send_frag_num
== pkt
->num_of_bds
)
1890 /* Need to provide the packet with additional BDs for frags */
1891 for (frag_idx
= p_ll2
->tx_queue
.cur_send_frag_num
;
1892 frag_idx
< pkt
->num_of_bds
; frag_idx
++) {
1893 struct core_tx_bd
**p_bd
= &p_curp
->bds_set
[frag_idx
].txq_bd
;
1895 *p_bd
= (struct core_tx_bd
*)qed_chain_produce(p_tx_chain
);
1896 (*p_bd
)->bd_data
.as_bitfield
= 0;
1897 (*p_bd
)->bitfield1
= 0;
1898 p_curp
->bds_set
[frag_idx
].tx_frag
= 0;
1899 p_curp
->bds_set
[frag_idx
].frag_len
= 0;
1903 /* This should be called while the Txq spinlock is being held */
1904 static void qed_ll2_tx_packet_notify(struct qed_hwfn
*p_hwfn
,
1905 struct qed_ll2_info
*p_ll2_conn
)
1907 bool b_notify
= p_ll2_conn
->tx_queue
.cur_send_packet
->notify_fw
;
1908 struct qed_ll2_tx_queue
*p_tx
= &p_ll2_conn
->tx_queue
;
1909 struct qed_ll2_tx_packet
*p_pkt
= NULL
;
1912 /* If there are missing BDs, don't do anything now */
1913 if (p_ll2_conn
->tx_queue
.cur_send_frag_num
!=
1914 p_ll2_conn
->tx_queue
.cur_send_packet
->bd_used
)
1917 /* Push the current packet to the list and clean after it */
1918 list_add_tail(&p_ll2_conn
->tx_queue
.cur_send_packet
->list_entry
,
1919 &p_ll2_conn
->tx_queue
.sending_descq
);
1920 p_ll2_conn
->tx_queue
.cur_send_packet
= NULL
;
1921 p_ll2_conn
->tx_queue
.cur_send_frag_num
= 0;
1923 /* Notify FW of packet only if requested to */
1927 bd_prod
= qed_chain_get_prod_idx(&p_ll2_conn
->tx_queue
.txq_chain
);
1929 while (!list_empty(&p_tx
->sending_descq
)) {
1930 p_pkt
= list_first_entry(&p_tx
->sending_descq
,
1931 struct qed_ll2_tx_packet
, list_entry
);
1935 list_move_tail(&p_pkt
->list_entry
, &p_tx
->active_descq
);
1938 p_tx
->db_msg
.spq_prod
= cpu_to_le16(bd_prod
);
1940 /* Make sure the BDs data is updated before ringing the doorbell */
1943 DIRECT_REG_WR(p_tx
->doorbell_addr
, *((u32
*)&p_tx
->db_msg
));
1946 (NETIF_MSG_TX_QUEUED
| QED_MSG_LL2
),
1947 "LL2 [q 0x%02x cid 0x%08x type 0x%08x] Doorbelled [producer 0x%04x]\n",
1948 p_ll2_conn
->queue_id
,
1950 p_ll2_conn
->input
.conn_type
, p_tx
->db_msg
.spq_prod
);
1953 int qed_ll2_prepare_tx_packet(void *cxt
,
1954 u8 connection_handle
,
1955 struct qed_ll2_tx_pkt_info
*pkt
,
1958 struct qed_hwfn
*p_hwfn
= cxt
;
1959 struct qed_ll2_tx_packet
*p_curp
= NULL
;
1960 struct qed_ll2_info
*p_ll2_conn
= NULL
;
1961 struct qed_ll2_tx_queue
*p_tx
;
1962 struct qed_chain
*p_tx_chain
;
1963 unsigned long flags
;
1966 p_ll2_conn
= qed_ll2_handle_sanity(p_hwfn
, connection_handle
);
1969 p_tx
= &p_ll2_conn
->tx_queue
;
1970 p_tx_chain
= &p_tx
->txq_chain
;
1972 if (pkt
->num_of_bds
> p_ll2_conn
->input
.tx_max_bds_per_packet
)
1975 spin_lock_irqsave(&p_tx
->lock
, flags
);
1976 if (p_tx
->cur_send_packet
) {
1981 /* Get entry, but only if we have tx elements for it */
1982 if (!list_empty(&p_tx
->free_descq
))
1983 p_curp
= list_first_entry(&p_tx
->free_descq
,
1984 struct qed_ll2_tx_packet
, list_entry
);
1985 if (p_curp
&& qed_chain_get_elem_left(p_tx_chain
) < pkt
->num_of_bds
)
1993 /* Prepare packet and BD, and perhaps send a doorbell to FW */
1994 qed_ll2_prepare_tx_packet_set(p_hwfn
, p_tx
, p_curp
, pkt
, notify_fw
);
1996 qed_ll2_prepare_tx_packet_set_bd(p_hwfn
, p_ll2_conn
, p_curp
, pkt
);
1998 qed_ll2_tx_packet_notify(p_hwfn
, p_ll2_conn
);
2001 spin_unlock_irqrestore(&p_tx
->lock
, flags
);
2005 int qed_ll2_set_fragment_of_tx_packet(void *cxt
,
2006 u8 connection_handle
,
2007 dma_addr_t addr
, u16 nbytes
)
2009 struct qed_ll2_tx_packet
*p_cur_send_packet
= NULL
;
2010 struct qed_hwfn
*p_hwfn
= cxt
;
2011 struct qed_ll2_info
*p_ll2_conn
= NULL
;
2012 u16 cur_send_frag_num
= 0;
2013 struct core_tx_bd
*p_bd
;
2014 unsigned long flags
;
2016 p_ll2_conn
= qed_ll2_handle_sanity(p_hwfn
, connection_handle
);
2020 if (!p_ll2_conn
->tx_queue
.cur_send_packet
)
2023 p_cur_send_packet
= p_ll2_conn
->tx_queue
.cur_send_packet
;
2024 cur_send_frag_num
= p_ll2_conn
->tx_queue
.cur_send_frag_num
;
2026 if (cur_send_frag_num
>= p_cur_send_packet
->bd_used
)
2029 /* Fill the BD information, and possibly notify FW */
2030 p_bd
= p_cur_send_packet
->bds_set
[cur_send_frag_num
].txq_bd
;
2031 DMA_REGPAIR_LE(p_bd
->addr
, addr
);
2032 p_bd
->nbytes
= cpu_to_le16(nbytes
);
2033 p_cur_send_packet
->bds_set
[cur_send_frag_num
].tx_frag
= addr
;
2034 p_cur_send_packet
->bds_set
[cur_send_frag_num
].frag_len
= nbytes
;
2036 p_ll2_conn
->tx_queue
.cur_send_frag_num
++;
2038 spin_lock_irqsave(&p_ll2_conn
->tx_queue
.lock
, flags
);
2039 qed_ll2_tx_packet_notify(p_hwfn
, p_ll2_conn
);
2040 spin_unlock_irqrestore(&p_ll2_conn
->tx_queue
.lock
, flags
);
2045 int qed_ll2_terminate_connection(void *cxt
, u8 connection_handle
)
2047 struct qed_hwfn
*p_hwfn
= cxt
;
2048 struct qed_ll2_info
*p_ll2_conn
= NULL
;
2050 struct qed_ptt
*p_ptt
;
2052 p_ptt
= qed_ptt_acquire(p_hwfn
);
2056 p_ll2_conn
= qed_ll2_handle_sanity_lock(p_hwfn
, connection_handle
);
2062 /* Stop Tx & Rx of connection, if needed */
2063 if (QED_LL2_TX_REGISTERED(p_ll2_conn
)) {
2064 p_ll2_conn
->tx_queue
.b_cb_registered
= false;
2065 smp_wmb(); /* Make sure this is seen by ll2_lb_rxq_completion */
2066 rc
= qed_sp_ll2_tx_queue_stop(p_hwfn
, p_ll2_conn
);
2070 qed_ll2_txq_flush(p_hwfn
, connection_handle
);
2071 qed_int_unregister_cb(p_hwfn
, p_ll2_conn
->tx_queue
.tx_sb_index
);
2074 if (QED_LL2_RX_REGISTERED(p_ll2_conn
)) {
2075 p_ll2_conn
->rx_queue
.b_cb_registered
= false;
2076 smp_wmb(); /* Make sure this is seen by ll2_lb_rxq_completion */
2078 if (p_ll2_conn
->rx_queue
.ctx_based
)
2079 qed_db_recovery_del(p_hwfn
->cdev
,
2080 p_ll2_conn
->rx_queue
.set_prod_addr
,
2081 &p_ll2_conn
->rx_queue
.db_data
);
2083 rc
= qed_sp_ll2_rx_queue_stop(p_hwfn
, p_ll2_conn
);
2087 qed_ll2_rxq_flush(p_hwfn
, connection_handle
);
2088 qed_int_unregister_cb(p_hwfn
, p_ll2_conn
->rx_queue
.rx_sb_index
);
2091 if (p_ll2_conn
->input
.conn_type
== QED_LL2_TYPE_OOO
)
2092 qed_ooo_release_all_isles(p_hwfn
, p_hwfn
->p_ooo_info
);
2094 if (p_ll2_conn
->input
.conn_type
== QED_LL2_TYPE_FCOE
) {
2095 if (!test_bit(QED_MF_UFP_SPECIFIC
, &p_hwfn
->cdev
->mf_bits
))
2096 qed_llh_remove_protocol_filter(p_hwfn
->cdev
, 0,
2097 QED_LLH_FILTER_ETHERTYPE
,
2099 qed_llh_remove_protocol_filter(p_hwfn
->cdev
, 0,
2100 QED_LLH_FILTER_ETHERTYPE
,
2105 qed_ptt_release(p_hwfn
, p_ptt
);
2109 static void qed_ll2_release_connection_ooo(struct qed_hwfn
*p_hwfn
,
2110 struct qed_ll2_info
*p_ll2_conn
)
2112 struct qed_ooo_buffer
*p_buffer
;
2114 if (p_ll2_conn
->input
.conn_type
!= QED_LL2_TYPE_OOO
)
2117 qed_ooo_release_all_isles(p_hwfn
, p_hwfn
->p_ooo_info
);
2118 while ((p_buffer
= qed_ooo_get_free_buffer(p_hwfn
,
2119 p_hwfn
->p_ooo_info
))) {
2120 dma_free_coherent(&p_hwfn
->cdev
->pdev
->dev
,
2121 p_buffer
->rx_buffer_size
,
2122 p_buffer
->rx_buffer_virt_addr
,
2123 p_buffer
->rx_buffer_phys_addr
);
2128 void qed_ll2_release_connection(void *cxt
, u8 connection_handle
)
2130 struct qed_hwfn
*p_hwfn
= cxt
;
2131 struct qed_ll2_info
*p_ll2_conn
= NULL
;
2133 p_ll2_conn
= qed_ll2_handle_sanity(p_hwfn
, connection_handle
);
2137 kfree(p_ll2_conn
->tx_queue
.descq_mem
);
2138 qed_chain_free(p_hwfn
->cdev
, &p_ll2_conn
->tx_queue
.txq_chain
);
2140 kfree(p_ll2_conn
->rx_queue
.descq_array
);
2141 qed_chain_free(p_hwfn
->cdev
, &p_ll2_conn
->rx_queue
.rxq_chain
);
2142 qed_chain_free(p_hwfn
->cdev
, &p_ll2_conn
->rx_queue
.rcq_chain
);
2144 qed_cxt_release_cid(p_hwfn
, p_ll2_conn
->cid
);
2146 qed_ll2_release_connection_ooo(p_hwfn
, p_ll2_conn
);
2148 mutex_lock(&p_ll2_conn
->mutex
);
2149 p_ll2_conn
->b_active
= false;
2150 mutex_unlock(&p_ll2_conn
->mutex
);
2153 int qed_ll2_alloc(struct qed_hwfn
*p_hwfn
)
2155 struct qed_ll2_info
*p_ll2_connections
;
2158 /* Allocate LL2's set struct */
2159 p_ll2_connections
= kcalloc(QED_MAX_NUM_OF_LL2_CONNECTIONS
,
2160 sizeof(struct qed_ll2_info
), GFP_KERNEL
);
2161 if (!p_ll2_connections
) {
2162 DP_NOTICE(p_hwfn
, "Failed to allocate `struct qed_ll2'\n");
2166 for (i
= 0; i
< QED_MAX_NUM_OF_LL2_CONNECTIONS
; i
++)
2167 p_ll2_connections
[i
].my_id
= i
;
2169 p_hwfn
->p_ll2_info
= p_ll2_connections
;
2173 void qed_ll2_setup(struct qed_hwfn
*p_hwfn
)
2177 for (i
= 0; i
< QED_MAX_NUM_OF_LL2_CONNECTIONS
; i
++)
2178 mutex_init(&p_hwfn
->p_ll2_info
[i
].mutex
);
2181 void qed_ll2_free(struct qed_hwfn
*p_hwfn
)
2183 if (!p_hwfn
->p_ll2_info
)
2186 kfree(p_hwfn
->p_ll2_info
);
2187 p_hwfn
->p_ll2_info
= NULL
;
2190 static void _qed_ll2_get_port_stats(struct qed_hwfn
*p_hwfn
,
2191 struct qed_ptt
*p_ptt
,
2192 struct qed_ll2_stats
*p_stats
)
2194 struct core_ll2_port_stats port_stats
;
2196 memset(&port_stats
, 0, sizeof(port_stats
));
2197 qed_memcpy_from(p_hwfn
, p_ptt
, &port_stats
,
2198 BAR0_MAP_REG_TSDM_RAM
+
2199 TSTORM_LL2_PORT_STAT_OFFSET(MFW_PORT(p_hwfn
)),
2200 sizeof(port_stats
));
2202 p_stats
->gsi_invalid_hdr
+= HILO_64_REGPAIR(port_stats
.gsi_invalid_hdr
);
2203 p_stats
->gsi_invalid_pkt_length
+=
2204 HILO_64_REGPAIR(port_stats
.gsi_invalid_pkt_length
);
2205 p_stats
->gsi_unsupported_pkt_typ
+=
2206 HILO_64_REGPAIR(port_stats
.gsi_unsupported_pkt_typ
);
2207 p_stats
->gsi_crcchksm_error
+=
2208 HILO_64_REGPAIR(port_stats
.gsi_crcchksm_error
);
2211 static void _qed_ll2_get_tstats(struct qed_hwfn
*p_hwfn
,
2212 struct qed_ptt
*p_ptt
,
2213 struct qed_ll2_info
*p_ll2_conn
,
2214 struct qed_ll2_stats
*p_stats
)
2216 struct core_ll2_tstorm_per_queue_stat tstats
;
2217 u8 qid
= p_ll2_conn
->queue_id
;
2220 memset(&tstats
, 0, sizeof(tstats
));
2221 tstats_addr
= BAR0_MAP_REG_TSDM_RAM
+
2222 CORE_LL2_TSTORM_PER_QUEUE_STAT_OFFSET(qid
);
2223 qed_memcpy_from(p_hwfn
, p_ptt
, &tstats
, tstats_addr
, sizeof(tstats
));
2225 p_stats
->packet_too_big_discard
+=
2226 HILO_64_REGPAIR(tstats
.packet_too_big_discard
);
2227 p_stats
->no_buff_discard
+= HILO_64_REGPAIR(tstats
.no_buff_discard
);
2230 static void _qed_ll2_get_ustats(struct qed_hwfn
*p_hwfn
,
2231 struct qed_ptt
*p_ptt
,
2232 struct qed_ll2_info
*p_ll2_conn
,
2233 struct qed_ll2_stats
*p_stats
)
2235 struct core_ll2_ustorm_per_queue_stat ustats
;
2236 u8 qid
= p_ll2_conn
->queue_id
;
2239 memset(&ustats
, 0, sizeof(ustats
));
2240 ustats_addr
= BAR0_MAP_REG_USDM_RAM
+
2241 CORE_LL2_USTORM_PER_QUEUE_STAT_OFFSET(qid
);
2242 qed_memcpy_from(p_hwfn
, p_ptt
, &ustats
, ustats_addr
, sizeof(ustats
));
2244 p_stats
->rcv_ucast_bytes
+= HILO_64_REGPAIR(ustats
.rcv_ucast_bytes
);
2245 p_stats
->rcv_mcast_bytes
+= HILO_64_REGPAIR(ustats
.rcv_mcast_bytes
);
2246 p_stats
->rcv_bcast_bytes
+= HILO_64_REGPAIR(ustats
.rcv_bcast_bytes
);
2247 p_stats
->rcv_ucast_pkts
+= HILO_64_REGPAIR(ustats
.rcv_ucast_pkts
);
2248 p_stats
->rcv_mcast_pkts
+= HILO_64_REGPAIR(ustats
.rcv_mcast_pkts
);
2249 p_stats
->rcv_bcast_pkts
+= HILO_64_REGPAIR(ustats
.rcv_bcast_pkts
);
2252 static void _qed_ll2_get_pstats(struct qed_hwfn
*p_hwfn
,
2253 struct qed_ptt
*p_ptt
,
2254 struct qed_ll2_info
*p_ll2_conn
,
2255 struct qed_ll2_stats
*p_stats
)
2257 struct core_ll2_pstorm_per_queue_stat pstats
;
2258 u8 stats_id
= p_ll2_conn
->tx_stats_id
;
2261 memset(&pstats
, 0, sizeof(pstats
));
2262 pstats_addr
= BAR0_MAP_REG_PSDM_RAM
+
2263 CORE_LL2_PSTORM_PER_QUEUE_STAT_OFFSET(stats_id
);
2264 qed_memcpy_from(p_hwfn
, p_ptt
, &pstats
, pstats_addr
, sizeof(pstats
));
2266 p_stats
->sent_ucast_bytes
+= HILO_64_REGPAIR(pstats
.sent_ucast_bytes
);
2267 p_stats
->sent_mcast_bytes
+= HILO_64_REGPAIR(pstats
.sent_mcast_bytes
);
2268 p_stats
->sent_bcast_bytes
+= HILO_64_REGPAIR(pstats
.sent_bcast_bytes
);
2269 p_stats
->sent_ucast_pkts
+= HILO_64_REGPAIR(pstats
.sent_ucast_pkts
);
2270 p_stats
->sent_mcast_pkts
+= HILO_64_REGPAIR(pstats
.sent_mcast_pkts
);
2271 p_stats
->sent_bcast_pkts
+= HILO_64_REGPAIR(pstats
.sent_bcast_pkts
);
2274 static int __qed_ll2_get_stats(void *cxt
, u8 connection_handle
,
2275 struct qed_ll2_stats
*p_stats
)
2277 struct qed_hwfn
*p_hwfn
= cxt
;
2278 struct qed_ll2_info
*p_ll2_conn
= NULL
;
2279 struct qed_ptt
*p_ptt
;
2281 if ((connection_handle
>= QED_MAX_NUM_OF_LL2_CONNECTIONS
) ||
2282 !p_hwfn
->p_ll2_info
)
2285 p_ll2_conn
= &p_hwfn
->p_ll2_info
[connection_handle
];
2287 p_ptt
= qed_ptt_acquire(p_hwfn
);
2289 DP_ERR(p_hwfn
, "Failed to acquire ptt\n");
2293 if (p_ll2_conn
->input
.gsi_enable
)
2294 _qed_ll2_get_port_stats(p_hwfn
, p_ptt
, p_stats
);
2296 _qed_ll2_get_tstats(p_hwfn
, p_ptt
, p_ll2_conn
, p_stats
);
2298 _qed_ll2_get_ustats(p_hwfn
, p_ptt
, p_ll2_conn
, p_stats
);
2300 if (p_ll2_conn
->tx_stats_en
)
2301 _qed_ll2_get_pstats(p_hwfn
, p_ptt
, p_ll2_conn
, p_stats
);
2303 qed_ptt_release(p_hwfn
, p_ptt
);
2308 int qed_ll2_get_stats(void *cxt
,
2309 u8 connection_handle
, struct qed_ll2_stats
*p_stats
)
2311 memset(p_stats
, 0, sizeof(*p_stats
));
2312 return __qed_ll2_get_stats(cxt
, connection_handle
, p_stats
);
2315 static void qed_ll2b_release_rx_packet(void *cxt
,
2316 u8 connection_handle
,
2318 dma_addr_t rx_buf_addr
,
2321 struct qed_hwfn
*p_hwfn
= cxt
;
2323 qed_ll2_dealloc_buffer(p_hwfn
->cdev
, cookie
);
2326 static void qed_ll2_register_cb_ops(struct qed_dev
*cdev
,
2327 const struct qed_ll2_cb_ops
*ops
,
2330 cdev
->ll2
->cbs
= ops
;
2331 cdev
->ll2
->cb_cookie
= cookie
;
2334 struct qed_ll2_cbs ll2_cbs
= {
2335 .rx_comp_cb
= &qed_ll2b_complete_rx_packet
,
2336 .rx_release_cb
= &qed_ll2b_release_rx_packet
,
2337 .tx_comp_cb
= &qed_ll2b_complete_tx_packet
,
2338 .tx_release_cb
= &qed_ll2b_complete_tx_packet
,
2341 static void qed_ll2_set_conn_data(struct qed_hwfn
*p_hwfn
,
2342 struct qed_ll2_acquire_data
*data
,
2343 struct qed_ll2_params
*params
,
2344 enum qed_ll2_conn_type conn_type
,
2345 u8
*handle
, bool lb
)
2347 memset(data
, 0, sizeof(*data
));
2349 data
->input
.conn_type
= conn_type
;
2350 data
->input
.mtu
= params
->mtu
;
2351 data
->input
.rx_num_desc
= QED_LL2_RX_SIZE
;
2352 data
->input
.rx_drop_ttl0_flg
= params
->drop_ttl0_packets
;
2353 data
->input
.rx_vlan_removal_en
= params
->rx_vlan_stripping
;
2354 data
->input
.tx_num_desc
= QED_LL2_TX_SIZE
;
2355 data
->p_connection_handle
= handle
;
2356 data
->cbs
= &ll2_cbs
;
2357 ll2_cbs
.cookie
= p_hwfn
;
2360 data
->input
.tx_tc
= PKT_LB_TC
;
2361 data
->input
.tx_dest
= QED_LL2_TX_DEST_LB
;
2363 data
->input
.tx_tc
= 0;
2364 data
->input
.tx_dest
= QED_LL2_TX_DEST_NW
;
2368 static int qed_ll2_start_ooo(struct qed_hwfn
*p_hwfn
,
2369 struct qed_ll2_params
*params
)
2371 u8
*handle
= &p_hwfn
->pf_params
.iscsi_pf_params
.ll2_ooo_queue_id
;
2372 struct qed_ll2_acquire_data data
;
2375 qed_ll2_set_conn_data(p_hwfn
, &data
, params
,
2376 QED_LL2_TYPE_OOO
, handle
, true);
2378 rc
= qed_ll2_acquire_connection(p_hwfn
, &data
);
2380 DP_INFO(p_hwfn
, "Failed to acquire LL2 OOO connection\n");
2384 rc
= qed_ll2_establish_connection(p_hwfn
, *handle
);
2386 DP_INFO(p_hwfn
, "Failed to establish LL2 OOO connection\n");
2393 qed_ll2_release_connection(p_hwfn
, *handle
);
2395 *handle
= QED_LL2_UNUSED_HANDLE
;
2399 static bool qed_ll2_is_storage_eng1(struct qed_dev
*cdev
)
2401 return (QED_IS_FCOE_PERSONALITY(QED_LEADING_HWFN(cdev
)) ||
2402 QED_IS_ISCSI_PERSONALITY(QED_LEADING_HWFN(cdev
))) &&
2403 (QED_AFFIN_HWFN(cdev
) != QED_LEADING_HWFN(cdev
));
2406 static int __qed_ll2_stop(struct qed_hwfn
*p_hwfn
)
2408 struct qed_dev
*cdev
= p_hwfn
->cdev
;
2411 rc
= qed_ll2_terminate_connection(p_hwfn
, cdev
->ll2
->handle
);
2413 DP_INFO(cdev
, "Failed to terminate LL2 connection\n");
2415 qed_ll2_release_connection(p_hwfn
, cdev
->ll2
->handle
);
2420 static int qed_ll2_stop(struct qed_dev
*cdev
)
2422 bool b_is_storage_eng1
= qed_ll2_is_storage_eng1(cdev
);
2423 struct qed_hwfn
*p_hwfn
= QED_AFFIN_HWFN(cdev
);
2424 int rc
= 0, rc2
= 0;
2426 if (cdev
->ll2
->handle
== QED_LL2_UNUSED_HANDLE
)
2429 qed_llh_remove_mac_filter(cdev
, 0, cdev
->ll2_mac_address
);
2430 eth_zero_addr(cdev
->ll2_mac_address
);
2432 if (QED_IS_ISCSI_PERSONALITY(p_hwfn
))
2433 qed_ll2_stop_ooo(p_hwfn
);
2435 /* In CMT mode, LL2 is always started on engine 0 for a storage PF */
2436 if (b_is_storage_eng1
) {
2437 rc2
= __qed_ll2_stop(QED_LEADING_HWFN(cdev
));
2439 DP_NOTICE(QED_LEADING_HWFN(cdev
),
2440 "Failed to stop LL2 on engine 0\n");
2443 rc
= __qed_ll2_stop(p_hwfn
);
2445 DP_NOTICE(p_hwfn
, "Failed to stop LL2\n");
2447 qed_ll2_kill_buffers(cdev
);
2449 cdev
->ll2
->handle
= QED_LL2_UNUSED_HANDLE
;
2454 static int __qed_ll2_start(struct qed_hwfn
*p_hwfn
,
2455 struct qed_ll2_params
*params
)
2457 struct qed_ll2_buffer
*buffer
, *tmp_buffer
;
2458 struct qed_dev
*cdev
= p_hwfn
->cdev
;
2459 enum qed_ll2_conn_type conn_type
;
2460 struct qed_ll2_acquire_data data
;
2463 switch (p_hwfn
->hw_info
.personality
) {
2465 conn_type
= QED_LL2_TYPE_FCOE
;
2468 conn_type
= QED_LL2_TYPE_ISCSI
;
2470 case QED_PCI_ETH_ROCE
:
2471 conn_type
= QED_LL2_TYPE_ROCE
;
2475 conn_type
= QED_LL2_TYPE_TEST
;
2478 qed_ll2_set_conn_data(p_hwfn
, &data
, params
, conn_type
,
2479 &cdev
->ll2
->handle
, false);
2481 rc
= qed_ll2_acquire_connection(p_hwfn
, &data
);
2483 DP_INFO(p_hwfn
, "Failed to acquire LL2 connection\n");
2487 rc
= qed_ll2_establish_connection(p_hwfn
, cdev
->ll2
->handle
);
2489 DP_INFO(p_hwfn
, "Failed to establish LL2 connection\n");
2493 /* Post all Rx buffers to FW */
2494 spin_lock_bh(&cdev
->ll2
->lock
);
2495 rx_cnt
= cdev
->ll2
->rx_cnt
;
2496 list_for_each_entry_safe(buffer
, tmp_buffer
, &cdev
->ll2
->list
, list
) {
2497 rc
= qed_ll2_post_rx_buffer(p_hwfn
,
2499 buffer
->phys_addr
, 0, buffer
, 1);
2502 "Failed to post an Rx buffer; Deleting it\n");
2503 dma_unmap_single(&cdev
->pdev
->dev
, buffer
->phys_addr
,
2504 cdev
->ll2
->rx_size
, DMA_FROM_DEVICE
);
2505 kfree(buffer
->data
);
2506 list_del(&buffer
->list
);
2512 spin_unlock_bh(&cdev
->ll2
->lock
);
2514 if (rx_cnt
== cdev
->ll2
->rx_cnt
) {
2515 DP_NOTICE(p_hwfn
, "Failed passing even a single Rx buffer\n");
2516 goto terminate_conn
;
2518 cdev
->ll2
->rx_cnt
= rx_cnt
;
2523 qed_ll2_terminate_connection(p_hwfn
, cdev
->ll2
->handle
);
2525 qed_ll2_release_connection(p_hwfn
, cdev
->ll2
->handle
);
2529 static int qed_ll2_start(struct qed_dev
*cdev
, struct qed_ll2_params
*params
)
2531 bool b_is_storage_eng1
= qed_ll2_is_storage_eng1(cdev
);
2532 struct qed_hwfn
*p_hwfn
= QED_AFFIN_HWFN(cdev
);
2533 struct qed_ll2_buffer
*buffer
;
2534 int rx_num_desc
, i
, rc
;
2536 if (!is_valid_ether_addr(params
->ll2_mac_address
)) {
2537 DP_NOTICE(cdev
, "Invalid Ethernet address\n");
2541 WARN_ON(!cdev
->ll2
->cbs
);
2543 /* Initialize LL2 locks & lists */
2544 INIT_LIST_HEAD(&cdev
->ll2
->list
);
2545 spin_lock_init(&cdev
->ll2
->lock
);
2547 cdev
->ll2
->rx_size
= NET_SKB_PAD
+ ETH_HLEN
+
2548 L1_CACHE_BYTES
+ params
->mtu
;
2550 /* Allocate memory for LL2.
2551 * In CMT mode, in case of a storage PF which is affintized to engine 1,
2552 * LL2 is started also on engine 0 and thus we need twofold buffers.
2554 rx_num_desc
= QED_LL2_RX_SIZE
* (b_is_storage_eng1
? 2 : 1);
2555 DP_INFO(cdev
, "Allocating %d LL2 buffers of size %08x bytes\n",
2556 rx_num_desc
, cdev
->ll2
->rx_size
);
2557 for (i
= 0; i
< rx_num_desc
; i
++) {
2558 buffer
= kzalloc(sizeof(*buffer
), GFP_KERNEL
);
2560 DP_INFO(cdev
, "Failed to allocate LL2 buffers\n");
2565 rc
= qed_ll2_alloc_buffer(cdev
, (u8
**)&buffer
->data
,
2566 &buffer
->phys_addr
);
2572 list_add_tail(&buffer
->list
, &cdev
->ll2
->list
);
2575 rc
= __qed_ll2_start(p_hwfn
, params
);
2577 DP_NOTICE(cdev
, "Failed to start LL2\n");
2581 /* In CMT mode, always need to start LL2 on engine 0 for a storage PF,
2582 * since broadcast/mutlicast packets are routed to engine 0.
2584 if (b_is_storage_eng1
) {
2585 rc
= __qed_ll2_start(QED_LEADING_HWFN(cdev
), params
);
2587 DP_NOTICE(QED_LEADING_HWFN(cdev
),
2588 "Failed to start LL2 on engine 0\n");
2593 if (QED_IS_ISCSI_PERSONALITY(p_hwfn
)) {
2594 DP_VERBOSE(cdev
, QED_MSG_STORAGE
, "Starting OOO LL2 queue\n");
2595 rc
= qed_ll2_start_ooo(p_hwfn
, params
);
2597 DP_NOTICE(cdev
, "Failed to start OOO LL2\n");
2602 rc
= qed_llh_add_mac_filter(cdev
, 0, params
->ll2_mac_address
);
2604 DP_NOTICE(cdev
, "Failed to add an LLH filter\n");
2608 ether_addr_copy(cdev
->ll2_mac_address
, params
->ll2_mac_address
);
2613 if (QED_IS_ISCSI_PERSONALITY(p_hwfn
))
2614 qed_ll2_stop_ooo(p_hwfn
);
2616 if (b_is_storage_eng1
)
2617 __qed_ll2_stop(QED_LEADING_HWFN(cdev
));
2619 __qed_ll2_stop(p_hwfn
);
2621 qed_ll2_kill_buffers(cdev
);
2622 cdev
->ll2
->handle
= QED_LL2_UNUSED_HANDLE
;
2626 static int qed_ll2_start_xmit(struct qed_dev
*cdev
, struct sk_buff
*skb
,
2627 unsigned long xmit_flags
)
2629 struct qed_hwfn
*p_hwfn
= QED_AFFIN_HWFN(cdev
);
2630 struct qed_ll2_tx_pkt_info pkt
;
2631 const skb_frag_t
*frag
;
2632 u8 flags
= 0, nr_frags
;
2633 int rc
= -EINVAL
, i
;
2637 if (unlikely(skb
->ip_summed
!= CHECKSUM_NONE
)) {
2638 DP_INFO(cdev
, "Cannot transmit a checksummed packet\n");
2642 /* Cache number of fragments from SKB since SKB may be freed by
2643 * the completion routine after calling qed_ll2_prepare_tx_packet()
2645 nr_frags
= skb_shinfo(skb
)->nr_frags
;
2647 if (1 + nr_frags
> CORE_LL2_TX_MAX_BDS_PER_PACKET
) {
2648 DP_ERR(cdev
, "Cannot transmit a packet with %d fragments\n",
2653 mapping
= dma_map_single(&cdev
->pdev
->dev
, skb
->data
,
2654 skb
->len
, DMA_TO_DEVICE
);
2655 if (unlikely(dma_mapping_error(&cdev
->pdev
->dev
, mapping
))) {
2656 DP_NOTICE(cdev
, "SKB mapping failed\n");
2660 /* Request HW to calculate IP csum */
2661 if (!((vlan_get_protocol(skb
) == htons(ETH_P_IPV6
)) &&
2662 ipv6_hdr(skb
)->nexthdr
== NEXTHDR_IPV6
))
2663 flags
|= BIT(CORE_TX_BD_DATA_IP_CSUM_SHIFT
);
2665 if (skb_vlan_tag_present(skb
)) {
2666 vlan
= skb_vlan_tag_get(skb
);
2667 flags
|= BIT(CORE_TX_BD_DATA_VLAN_INSERTION_SHIFT
);
2670 memset(&pkt
, 0, sizeof(pkt
));
2671 pkt
.num_of_bds
= 1 + nr_frags
;
2673 pkt
.bd_flags
= flags
;
2674 pkt
.tx_dest
= QED_LL2_TX_DEST_NW
;
2675 pkt
.first_frag
= mapping
;
2676 pkt
.first_frag_len
= skb
->len
;
2678 if (test_bit(QED_MF_UFP_SPECIFIC
, &cdev
->mf_bits
) &&
2679 test_bit(QED_LL2_XMIT_FLAGS_FIP_DISCOVERY
, &xmit_flags
))
2680 pkt
.remove_stag
= true;
2682 /* qed_ll2_prepare_tx_packet() may actually send the packet if
2683 * there are no fragments in the skb and subsequently the completion
2684 * routine may run and free the SKB, so no dereferencing the SKB
2685 * beyond this point unless skb has any fragments.
2687 rc
= qed_ll2_prepare_tx_packet(p_hwfn
, cdev
->ll2
->handle
,
2692 for (i
= 0; i
< nr_frags
; i
++) {
2693 frag
= &skb_shinfo(skb
)->frags
[i
];
2695 mapping
= skb_frag_dma_map(&cdev
->pdev
->dev
, frag
, 0,
2696 skb_frag_size(frag
), DMA_TO_DEVICE
);
2698 if (unlikely(dma_mapping_error(&cdev
->pdev
->dev
, mapping
))) {
2700 "Unable to map frag - dropping packet\n");
2705 rc
= qed_ll2_set_fragment_of_tx_packet(p_hwfn
,
2708 skb_frag_size(frag
));
2710 /* if failed not much to do here, partial packet has been posted
2711 * we can't free memory, will need to wait for completion
2720 dma_unmap_single(&cdev
->pdev
->dev
, mapping
, skb
->len
, DMA_TO_DEVICE
);
2725 static int qed_ll2_stats(struct qed_dev
*cdev
, struct qed_ll2_stats
*stats
)
2727 bool b_is_storage_eng1
= qed_ll2_is_storage_eng1(cdev
);
2728 struct qed_hwfn
*p_hwfn
= QED_AFFIN_HWFN(cdev
);
2734 rc
= qed_ll2_get_stats(p_hwfn
, cdev
->ll2
->handle
, stats
);
2736 DP_NOTICE(p_hwfn
, "Failed to get LL2 stats\n");
2740 /* In CMT mode, LL2 is always started on engine 0 for a storage PF */
2741 if (b_is_storage_eng1
) {
2742 rc
= __qed_ll2_get_stats(QED_LEADING_HWFN(cdev
),
2743 cdev
->ll2
->handle
, stats
);
2745 DP_NOTICE(QED_LEADING_HWFN(cdev
),
2746 "Failed to get LL2 stats on engine 0\n");
2754 const struct qed_ll2_ops qed_ll2_ops_pass
= {
2755 .start
= &qed_ll2_start
,
2756 .stop
= &qed_ll2_stop
,
2757 .start_xmit
= &qed_ll2_start_xmit
,
2758 .register_cb_ops
= &qed_ll2_register_cb_ops
,
2759 .get_stats
= &qed_ll2_stats
,
2762 int qed_ll2_alloc_if(struct qed_dev
*cdev
)
2764 cdev
->ll2
= kzalloc(sizeof(*cdev
->ll2
), GFP_KERNEL
);
2765 return cdev
->ll2
? 0 : -ENOMEM
;
2768 void qed_ll2_dealloc_if(struct qed_dev
*cdev
)