1 /* QLogic qed NIC Driver
2 * Copyright (c) 2015-2017 QLogic Corporation
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and /or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/types.h>
34 #include <asm/byteorder.h>
35 #include <linux/dma-mapping.h>
36 #include <linux/if_vlan.h>
37 #include <linux/kernel.h>
38 #include <linux/pci.h>
39 #include <linux/slab.h>
40 #include <linux/stddef.h>
41 #include <linux/version.h>
42 #include <linux/workqueue.h>
44 #include <linux/bitops.h>
45 #include <linux/delay.h>
46 #include <linux/errno.h>
47 #include <linux/etherdevice.h>
49 #include <linux/list.h>
50 #include <linux/mutex.h>
51 #include <linux/spinlock.h>
52 #include <linux/string.h>
53 #include <linux/qed/qed_ll2_if.h>
56 #include "qed_dev_api.h"
63 #include "qed_reg_addr.h"
67 #define QED_LL2_RX_REGISTERED(ll2) ((ll2)->rx_queue.b_cb_registred)
68 #define QED_LL2_TX_REGISTERED(ll2) ((ll2)->tx_queue.b_cb_registred)
70 #define QED_LL2_TX_SIZE (256)
71 #define QED_LL2_RX_SIZE (4096)
73 struct qed_cb_ll2_info
{
79 /* Lock protecting LL2 buffer lists in sleepless context */
81 struct list_head list
;
83 const struct qed_ll2_cb_ops
*cbs
;
87 struct qed_ll2_buffer
{
88 struct list_head list
;
93 static void qed_ll2b_complete_tx_packet(struct qed_hwfn
*p_hwfn
,
96 dma_addr_t first_frag_addr
,
100 struct qed_dev
*cdev
= p_hwfn
->cdev
;
101 struct sk_buff
*skb
= cookie
;
103 /* All we need to do is release the mapping */
104 dma_unmap_single(&p_hwfn
->cdev
->pdev
->dev
, first_frag_addr
,
105 skb_headlen(skb
), DMA_TO_DEVICE
);
107 if (cdev
->ll2
->cbs
&& cdev
->ll2
->cbs
->tx_cb
)
108 cdev
->ll2
->cbs
->tx_cb(cdev
->ll2
->cb_cookie
, skb
,
111 if (cdev
->ll2
->frags_mapped
)
112 /* Case where mapped frags were received, need to
113 * free skb with nr_frags marked as 0
115 skb_shinfo(skb
)->nr_frags
= 0;
117 dev_kfree_skb_any(skb
);
120 static int qed_ll2_alloc_buffer(struct qed_dev
*cdev
,
121 u8
**data
, dma_addr_t
*phys_addr
)
123 *data
= kmalloc(cdev
->ll2
->rx_size
, GFP_ATOMIC
);
125 DP_INFO(cdev
, "Failed to allocate LL2 buffer data\n");
129 *phys_addr
= dma_map_single(&cdev
->pdev
->dev
,
130 ((*data
) + NET_SKB_PAD
),
131 cdev
->ll2
->rx_size
, DMA_FROM_DEVICE
);
132 if (dma_mapping_error(&cdev
->pdev
->dev
, *phys_addr
)) {
133 DP_INFO(cdev
, "Failed to map LL2 buffer data\n");
141 static int qed_ll2_dealloc_buffer(struct qed_dev
*cdev
,
142 struct qed_ll2_buffer
*buffer
)
144 spin_lock_bh(&cdev
->ll2
->lock
);
146 dma_unmap_single(&cdev
->pdev
->dev
, buffer
->phys_addr
,
147 cdev
->ll2
->rx_size
, DMA_FROM_DEVICE
);
149 list_del(&buffer
->list
);
152 if (!cdev
->ll2
->rx_cnt
)
153 DP_INFO(cdev
, "All LL2 entries were removed\n");
155 spin_unlock_bh(&cdev
->ll2
->lock
);
160 static void qed_ll2_kill_buffers(struct qed_dev
*cdev
)
162 struct qed_ll2_buffer
*buffer
, *tmp_buffer
;
164 list_for_each_entry_safe(buffer
, tmp_buffer
, &cdev
->ll2
->list
, list
)
165 qed_ll2_dealloc_buffer(cdev
, buffer
);
168 static void qed_ll2b_complete_rx_packet(struct qed_hwfn
*p_hwfn
,
169 u8 connection_handle
,
170 struct qed_ll2_rx_packet
*p_pkt
,
171 struct core_rx_fast_path_cqe
*p_cqe
,
174 u16 packet_length
= le16_to_cpu(p_cqe
->packet_length
);
175 struct qed_ll2_buffer
*buffer
= p_pkt
->cookie
;
176 struct qed_dev
*cdev
= p_hwfn
->cdev
;
177 u16 vlan
= le16_to_cpu(p_cqe
->vlan
);
178 u32 opaque_data_0
, opaque_data_1
;
179 u8 pad
= p_cqe
->placement_offset
;
180 dma_addr_t new_phys_addr
;
186 opaque_data_0
= le32_to_cpu(p_cqe
->opaque_data
.data
[0]);
187 opaque_data_1
= le32_to_cpu(p_cqe
->opaque_data
.data
[1]);
190 (NETIF_MSG_RX_STATUS
| QED_MSG_STORAGE
| NETIF_MSG_PKTDATA
),
191 "Got an LL2 Rx completion: [Buffer at phys 0x%llx, offset 0x%02x] Length 0x%04x Parse_flags 0x%04x vlan 0x%04x Opaque data [0x%08x:0x%08x]\n",
192 (u64
)p_pkt
->rx_buf_addr
, pad
, packet_length
,
193 le16_to_cpu(p_cqe
->parse_flags
.flags
), vlan
,
194 opaque_data_0
, opaque_data_1
);
196 if ((cdev
->dp_module
& NETIF_MSG_PKTDATA
) && buffer
->data
) {
197 print_hex_dump(KERN_INFO
, "",
198 DUMP_PREFIX_OFFSET
, 16, 1,
199 buffer
->data
, packet_length
, false);
202 /* Determine if data is valid */
203 if (packet_length
< ETH_HLEN
)
206 /* Allocate a replacement for buffer; Reuse upon failure */
208 rc
= qed_ll2_alloc_buffer(p_hwfn
->cdev
, &new_data
,
211 /* If need to reuse or there's no replacement buffer, repost this */
214 dma_unmap_single(&cdev
->pdev
->dev
, buffer
->phys_addr
,
215 cdev
->ll2
->rx_size
, DMA_FROM_DEVICE
);
217 skb
= build_skb(buffer
->data
, 0);
224 skb_reserve(skb
, pad
);
225 skb_put(skb
, packet_length
);
226 skb_checksum_none_assert(skb
);
228 /* Get parital ethernet information instead of eth_type_trans(),
229 * Since we don't have an associated net_device.
231 skb_reset_mac_header(skb
);
232 skb
->protocol
= eth_hdr(skb
)->h_proto
;
234 /* Pass SKB onward */
235 if (cdev
->ll2
->cbs
&& cdev
->ll2
->cbs
->rx_cb
) {
237 __vlan_hwaccel_put_tag(skb
, htons(ETH_P_8021Q
), vlan
);
238 cdev
->ll2
->cbs
->rx_cb(cdev
->ll2
->cb_cookie
, skb
,
239 opaque_data_0
, opaque_data_1
);
242 /* Update Buffer information and update FW producer */
243 buffer
->data
= new_data
;
244 buffer
->phys_addr
= new_phys_addr
;
247 rc
= qed_ll2_post_rx_buffer(QED_LEADING_HWFN(cdev
), cdev
->ll2
->handle
,
248 buffer
->phys_addr
, 0, buffer
, 1);
251 qed_ll2_dealloc_buffer(cdev
, buffer
);
254 static struct qed_ll2_info
*__qed_ll2_handle_sanity(struct qed_hwfn
*p_hwfn
,
255 u8 connection_handle
,
259 struct qed_ll2_info
*p_ll2_conn
, *p_ret
= NULL
;
261 if (connection_handle
>= QED_MAX_NUM_OF_LL2_CONNECTIONS
)
264 if (!p_hwfn
->p_ll2_info
)
267 p_ll2_conn
= &p_hwfn
->p_ll2_info
[connection_handle
];
271 mutex_lock(&p_ll2_conn
->mutex
);
272 if (p_ll2_conn
->b_active
)
275 mutex_unlock(&p_ll2_conn
->mutex
);
283 static struct qed_ll2_info
*qed_ll2_handle_sanity(struct qed_hwfn
*p_hwfn
,
284 u8 connection_handle
)
286 return __qed_ll2_handle_sanity(p_hwfn
, connection_handle
, false, true);
289 static struct qed_ll2_info
*qed_ll2_handle_sanity_lock(struct qed_hwfn
*p_hwfn
,
290 u8 connection_handle
)
292 return __qed_ll2_handle_sanity(p_hwfn
, connection_handle
, true, true);
295 static struct qed_ll2_info
*qed_ll2_handle_sanity_inactive(struct qed_hwfn
297 u8 connection_handle
)
299 return __qed_ll2_handle_sanity(p_hwfn
, connection_handle
, false, false);
302 static void qed_ll2_txq_flush(struct qed_hwfn
*p_hwfn
, u8 connection_handle
)
304 bool b_last_packet
= false, b_last_frag
= false;
305 struct qed_ll2_tx_packet
*p_pkt
= NULL
;
306 struct qed_ll2_info
*p_ll2_conn
;
307 struct qed_ll2_tx_queue
*p_tx
;
310 p_ll2_conn
= qed_ll2_handle_sanity_inactive(p_hwfn
, connection_handle
);
314 p_tx
= &p_ll2_conn
->tx_queue
;
316 while (!list_empty(&p_tx
->active_descq
)) {
317 p_pkt
= list_first_entry(&p_tx
->active_descq
,
318 struct qed_ll2_tx_packet
, list_entry
);
322 list_del(&p_pkt
->list_entry
);
323 b_last_packet
= list_empty(&p_tx
->active_descq
);
324 list_add_tail(&p_pkt
->list_entry
, &p_tx
->free_descq
);
325 if (p_ll2_conn
->conn
.conn_type
== QED_LL2_TYPE_ISCSI_OOO
) {
326 struct qed_ooo_buffer
*p_buffer
;
328 p_buffer
= (struct qed_ooo_buffer
*)p_pkt
->cookie
;
329 qed_ooo_put_free_buffer(p_hwfn
, p_hwfn
->p_ooo_info
,
332 p_tx
->cur_completing_packet
= *p_pkt
;
333 p_tx
->cur_completing_bd_idx
= 1;
335 p_tx
->cur_completing_bd_idx
== p_pkt
->bd_used
;
336 tx_frag
= p_pkt
->bds_set
[0].tx_frag
;
337 if (p_ll2_conn
->conn
.gsi_enable
)
338 qed_ll2b_release_tx_gsi_packet(p_hwfn
,
346 qed_ll2b_complete_tx_packet(p_hwfn
,
356 static int qed_ll2_txq_completion(struct qed_hwfn
*p_hwfn
, void *p_cookie
)
358 struct qed_ll2_info
*p_ll2_conn
= p_cookie
;
359 struct qed_ll2_tx_queue
*p_tx
= &p_ll2_conn
->tx_queue
;
360 u16 new_idx
= 0, num_bds
= 0, num_bds_in_packet
= 0;
361 struct qed_ll2_tx_packet
*p_pkt
;
362 bool b_last_frag
= false;
367 spin_lock_irqsave(&p_tx
->lock
, flags
);
368 if (p_tx
->b_completing_packet
) {
373 new_idx
= le16_to_cpu(*p_tx
->p_fw_cons
);
374 num_bds
= ((s16
)new_idx
- (s16
)p_tx
->bds_idx
);
376 if (list_empty(&p_tx
->active_descq
))
379 p_pkt
= list_first_entry(&p_tx
->active_descq
,
380 struct qed_ll2_tx_packet
, list_entry
);
384 p_tx
->b_completing_packet
= true;
385 p_tx
->cur_completing_packet
= *p_pkt
;
386 num_bds_in_packet
= p_pkt
->bd_used
;
387 list_del(&p_pkt
->list_entry
);
389 if (num_bds
< num_bds_in_packet
) {
391 "Rest of BDs does not cover whole packet\n");
395 num_bds
-= num_bds_in_packet
;
396 p_tx
->bds_idx
+= num_bds_in_packet
;
397 while (num_bds_in_packet
--)
398 qed_chain_consume(&p_tx
->txq_chain
);
400 p_tx
->cur_completing_bd_idx
= 1;
401 b_last_frag
= p_tx
->cur_completing_bd_idx
== p_pkt
->bd_used
;
402 list_add_tail(&p_pkt
->list_entry
, &p_tx
->free_descq
);
404 spin_unlock_irqrestore(&p_tx
->lock
, flags
);
405 tx_frag
= p_pkt
->bds_set
[0].tx_frag
;
406 if (p_ll2_conn
->conn
.gsi_enable
)
407 qed_ll2b_complete_tx_gsi_packet(p_hwfn
,
411 b_last_frag
, !num_bds
);
413 qed_ll2b_complete_tx_packet(p_hwfn
,
417 b_last_frag
, !num_bds
);
418 spin_lock_irqsave(&p_tx
->lock
, flags
);
421 p_tx
->b_completing_packet
= false;
424 spin_unlock_irqrestore(&p_tx
->lock
, flags
);
429 qed_ll2_rxq_completion_gsi(struct qed_hwfn
*p_hwfn
,
430 struct qed_ll2_info
*p_ll2_info
,
431 union core_rx_cqe_union
*p_cqe
,
432 unsigned long lock_flags
, bool b_last_cqe
)
434 struct qed_ll2_rx_queue
*p_rx
= &p_ll2_info
->rx_queue
;
435 struct qed_ll2_rx_packet
*p_pkt
= NULL
;
436 u16 packet_length
, parse_flags
, vlan
;
440 if (!list_empty(&p_rx
->active_descq
))
441 p_pkt
= list_first_entry(&p_rx
->active_descq
,
442 struct qed_ll2_rx_packet
, list_entry
);
445 "GSI Rx completion but active_descq is empty\n");
449 list_del(&p_pkt
->list_entry
);
450 parse_flags
= le16_to_cpu(p_cqe
->rx_cqe_gsi
.parse_flags
.flags
);
451 packet_length
= le16_to_cpu(p_cqe
->rx_cqe_gsi
.data_length
);
452 vlan
= le16_to_cpu(p_cqe
->rx_cqe_gsi
.vlan
);
453 src_mac_addrhi
= le32_to_cpu(p_cqe
->rx_cqe_gsi
.src_mac_addrhi
);
454 src_mac_addrlo
= le16_to_cpu(p_cqe
->rx_cqe_gsi
.src_mac_addrlo
);
455 if (qed_chain_consume(&p_rx
->rxq_chain
) != p_pkt
->rxq_bd
)
457 "Mismatch between active_descq and the LL2 Rx chain\n");
458 list_add_tail(&p_pkt
->list_entry
, &p_rx
->free_descq
);
460 spin_unlock_irqrestore(&p_rx
->lock
, lock_flags
);
461 qed_ll2b_complete_rx_gsi_packet(p_hwfn
,
466 p_cqe
->rx_cqe_gsi
.data_length_error
,
470 src_mac_addrlo
, b_last_cqe
);
471 spin_lock_irqsave(&p_rx
->lock
, lock_flags
);
476 static int qed_ll2_rxq_completion_reg(struct qed_hwfn
*p_hwfn
,
477 struct qed_ll2_info
*p_ll2_conn
,
478 union core_rx_cqe_union
*p_cqe
,
479 unsigned long *p_lock_flags
,
482 struct qed_ll2_rx_queue
*p_rx
= &p_ll2_conn
->rx_queue
;
483 struct qed_ll2_rx_packet
*p_pkt
= NULL
;
485 if (!list_empty(&p_rx
->active_descq
))
486 p_pkt
= list_first_entry(&p_rx
->active_descq
,
487 struct qed_ll2_rx_packet
, list_entry
);
490 "LL2 Rx completion but active_descq is empty\n");
493 list_del(&p_pkt
->list_entry
);
495 if (qed_chain_consume(&p_rx
->rxq_chain
) != p_pkt
->rxq_bd
)
497 "Mismatch between active_descq and the LL2 Rx chain\n");
498 list_add_tail(&p_pkt
->list_entry
, &p_rx
->free_descq
);
500 spin_unlock_irqrestore(&p_rx
->lock
, *p_lock_flags
);
501 qed_ll2b_complete_rx_packet(p_hwfn
, p_ll2_conn
->my_id
,
502 p_pkt
, &p_cqe
->rx_cqe_fp
, b_last_cqe
);
503 spin_lock_irqsave(&p_rx
->lock
, *p_lock_flags
);
508 static int qed_ll2_rxq_completion(struct qed_hwfn
*p_hwfn
, void *cookie
)
510 struct qed_ll2_info
*p_ll2_conn
= cookie
;
511 struct qed_ll2_rx_queue
*p_rx
= &p_ll2_conn
->rx_queue
;
512 union core_rx_cqe_union
*cqe
= NULL
;
513 u16 cq_new_idx
= 0, cq_old_idx
= 0;
514 unsigned long flags
= 0;
517 spin_lock_irqsave(&p_rx
->lock
, flags
);
518 cq_new_idx
= le16_to_cpu(*p_rx
->p_fw_cons
);
519 cq_old_idx
= qed_chain_get_cons_idx(&p_rx
->rcq_chain
);
521 while (cq_new_idx
!= cq_old_idx
) {
522 bool b_last_cqe
= (cq_new_idx
== cq_old_idx
);
524 cqe
= qed_chain_consume(&p_rx
->rcq_chain
);
525 cq_old_idx
= qed_chain_get_cons_idx(&p_rx
->rcq_chain
);
529 "LL2 [sw. cons %04x, fw. at %04x] - Got Packet of type %02x\n",
530 cq_old_idx
, cq_new_idx
, cqe
->rx_cqe_sp
.type
);
532 switch (cqe
->rx_cqe_sp
.type
) {
533 case CORE_RX_CQE_TYPE_SLOW_PATH
:
534 DP_NOTICE(p_hwfn
, "LL2 - unexpected Rx CQE slowpath\n");
537 case CORE_RX_CQE_TYPE_GSI_OFFLOAD
:
538 rc
= qed_ll2_rxq_completion_gsi(p_hwfn
, p_ll2_conn
,
539 cqe
, flags
, b_last_cqe
);
541 case CORE_RX_CQE_TYPE_REGULAR
:
542 rc
= qed_ll2_rxq_completion_reg(p_hwfn
, p_ll2_conn
,
551 spin_unlock_irqrestore(&p_rx
->lock
, flags
);
555 static void qed_ll2_rxq_flush(struct qed_hwfn
*p_hwfn
, u8 connection_handle
)
557 struct qed_ll2_info
*p_ll2_conn
= NULL
;
558 struct qed_ll2_rx_packet
*p_pkt
= NULL
;
559 struct qed_ll2_rx_queue
*p_rx
;
561 p_ll2_conn
= qed_ll2_handle_sanity_inactive(p_hwfn
, connection_handle
);
565 p_rx
= &p_ll2_conn
->rx_queue
;
567 while (!list_empty(&p_rx
->active_descq
)) {
568 dma_addr_t rx_buf_addr
;
572 p_pkt
= list_first_entry(&p_rx
->active_descq
,
573 struct qed_ll2_rx_packet
, list_entry
);
577 list_move_tail(&p_pkt
->list_entry
, &p_rx
->free_descq
);
579 if (p_ll2_conn
->conn
.conn_type
== QED_LL2_TYPE_ISCSI_OOO
) {
580 struct qed_ooo_buffer
*p_buffer
;
582 p_buffer
= (struct qed_ooo_buffer
*)p_pkt
->cookie
;
583 qed_ooo_put_free_buffer(p_hwfn
, p_hwfn
->p_ooo_info
,
586 rx_buf_addr
= p_pkt
->rx_buf_addr
;
587 cookie
= p_pkt
->cookie
;
589 b_last
= list_empty(&p_rx
->active_descq
);
594 #if IS_ENABLED(CONFIG_QED_ISCSI)
595 static u8
qed_ll2_convert_rx_parse_to_tx_flags(u16 parse_flags
)
599 if (GET_FIELD(parse_flags
, PARSING_AND_ERR_FLAGS_TAG8021QEXIST
))
600 SET_FIELD(bd_flags
, CORE_TX_BD_FLAGS_VLAN_INSERTION
, 1);
605 static int qed_ll2_lb_rxq_handler(struct qed_hwfn
*p_hwfn
,
606 struct qed_ll2_info
*p_ll2_conn
)
608 struct qed_ll2_rx_queue
*p_rx
= &p_ll2_conn
->rx_queue
;
609 u16 packet_length
= 0, parse_flags
= 0, vlan
= 0;
610 struct qed_ll2_rx_packet
*p_pkt
= NULL
;
611 u32 num_ooo_add_to_peninsula
= 0, cid
;
612 union core_rx_cqe_union
*cqe
= NULL
;
613 u16 cq_new_idx
= 0, cq_old_idx
= 0;
614 struct qed_ooo_buffer
*p_buffer
;
615 struct ooo_opaque
*iscsi_ooo
;
616 u8 placement_offset
= 0;
619 cq_new_idx
= le16_to_cpu(*p_rx
->p_fw_cons
);
620 cq_old_idx
= qed_chain_get_cons_idx(&p_rx
->rcq_chain
);
621 if (cq_new_idx
== cq_old_idx
)
624 while (cq_new_idx
!= cq_old_idx
) {
625 struct core_rx_fast_path_cqe
*p_cqe_fp
;
627 cqe
= qed_chain_consume(&p_rx
->rcq_chain
);
628 cq_old_idx
= qed_chain_get_cons_idx(&p_rx
->rcq_chain
);
629 cqe_type
= cqe
->rx_cqe_sp
.type
;
631 if (cqe_type
!= CORE_RX_CQE_TYPE_REGULAR
) {
633 "Got a non-regular LB LL2 completion [type 0x%02x]\n",
637 p_cqe_fp
= &cqe
->rx_cqe_fp
;
639 placement_offset
= p_cqe_fp
->placement_offset
;
640 parse_flags
= le16_to_cpu(p_cqe_fp
->parse_flags
.flags
);
641 packet_length
= le16_to_cpu(p_cqe_fp
->packet_length
);
642 vlan
= le16_to_cpu(p_cqe_fp
->vlan
);
643 iscsi_ooo
= (struct ooo_opaque
*)&p_cqe_fp
->opaque_data
;
644 qed_ooo_save_history_entry(p_hwfn
, p_hwfn
->p_ooo_info
,
646 cid
= le32_to_cpu(iscsi_ooo
->cid
);
648 /* Process delete isle first */
649 if (iscsi_ooo
->drop_size
)
650 qed_ooo_delete_isles(p_hwfn
, p_hwfn
->p_ooo_info
, cid
,
651 iscsi_ooo
->drop_isle
,
652 iscsi_ooo
->drop_size
);
654 if (iscsi_ooo
->ooo_opcode
== TCP_EVENT_NOP
)
657 /* Now process create/add/join isles */
658 if (list_empty(&p_rx
->active_descq
)) {
660 "LL2 OOO RX chain has no submitted buffers\n"
665 p_pkt
= list_first_entry(&p_rx
->active_descq
,
666 struct qed_ll2_rx_packet
, list_entry
);
668 if ((iscsi_ooo
->ooo_opcode
== TCP_EVENT_ADD_NEW_ISLE
) ||
669 (iscsi_ooo
->ooo_opcode
== TCP_EVENT_ADD_ISLE_RIGHT
) ||
670 (iscsi_ooo
->ooo_opcode
== TCP_EVENT_ADD_ISLE_LEFT
) ||
671 (iscsi_ooo
->ooo_opcode
== TCP_EVENT_ADD_PEN
) ||
672 (iscsi_ooo
->ooo_opcode
== TCP_EVENT_JOIN
)) {
675 "LL2 OOO RX packet is not valid\n");
678 list_del(&p_pkt
->list_entry
);
679 p_buffer
= (struct qed_ooo_buffer
*)p_pkt
->cookie
;
680 p_buffer
->packet_length
= packet_length
;
681 p_buffer
->parse_flags
= parse_flags
;
682 p_buffer
->vlan
= vlan
;
683 p_buffer
->placement_offset
= placement_offset
;
684 qed_chain_consume(&p_rx
->rxq_chain
);
685 list_add_tail(&p_pkt
->list_entry
, &p_rx
->free_descq
);
687 switch (iscsi_ooo
->ooo_opcode
) {
688 case TCP_EVENT_ADD_NEW_ISLE
:
689 qed_ooo_add_new_isle(p_hwfn
,
695 case TCP_EVENT_ADD_ISLE_RIGHT
:
696 qed_ooo_add_new_buffer(p_hwfn
,
703 case TCP_EVENT_ADD_ISLE_LEFT
:
704 qed_ooo_add_new_buffer(p_hwfn
,
712 qed_ooo_add_new_buffer(p_hwfn
,
715 iscsi_ooo
->ooo_isle
+
719 qed_ooo_join_isles(p_hwfn
,
721 cid
, iscsi_ooo
->ooo_isle
);
723 case TCP_EVENT_ADD_PEN
:
724 num_ooo_add_to_peninsula
++;
725 qed_ooo_put_ready_buffer(p_hwfn
,
732 "Unexpected event (%d) TX OOO completion\n",
733 iscsi_ooo
->ooo_opcode
);
741 qed_ooo_submit_tx_buffers(struct qed_hwfn
*p_hwfn
,
742 struct qed_ll2_info
*p_ll2_conn
)
744 struct qed_ooo_buffer
*p_buffer
;
747 dma_addr_t first_frag
;
751 /* Submit Tx buffers here */
752 while ((p_buffer
= qed_ooo_get_ready_buffer(p_hwfn
,
753 p_hwfn
->p_ooo_info
))) {
757 first_frag
= p_buffer
->rx_buffer_phys_addr
+
758 p_buffer
->placement_offset
;
759 parse_flags
= p_buffer
->parse_flags
;
760 bd_flags
= qed_ll2_convert_rx_parse_to_tx_flags(parse_flags
);
761 SET_FIELD(bd_flags
, CORE_TX_BD_FLAGS_FORCE_VLAN_MODE
, 1);
762 SET_FIELD(bd_flags
, CORE_TX_BD_FLAGS_L4_PROTOCOL
, 1);
764 rc
= qed_ll2_prepare_tx_packet(p_hwfn
, p_ll2_conn
->my_id
, 1,
765 p_buffer
->vlan
, bd_flags
,
767 p_ll2_conn
->conn
.tx_dest
, 0,
769 p_buffer
->packet_length
,
772 qed_ooo_put_ready_buffer(p_hwfn
, p_hwfn
->p_ooo_info
,
780 qed_ooo_submit_rx_buffers(struct qed_hwfn
*p_hwfn
,
781 struct qed_ll2_info
*p_ll2_conn
)
783 struct qed_ooo_buffer
*p_buffer
;
786 while ((p_buffer
= qed_ooo_get_free_buffer(p_hwfn
,
787 p_hwfn
->p_ooo_info
))) {
788 rc
= qed_ll2_post_rx_buffer(p_hwfn
,
790 p_buffer
->rx_buffer_phys_addr
,
793 qed_ooo_put_free_buffer(p_hwfn
,
794 p_hwfn
->p_ooo_info
, p_buffer
);
800 static int qed_ll2_lb_rxq_completion(struct qed_hwfn
*p_hwfn
, void *p_cookie
)
802 struct qed_ll2_info
*p_ll2_conn
= (struct qed_ll2_info
*)p_cookie
;
805 rc
= qed_ll2_lb_rxq_handler(p_hwfn
, p_ll2_conn
);
809 qed_ooo_submit_rx_buffers(p_hwfn
, p_ll2_conn
);
810 qed_ooo_submit_tx_buffers(p_hwfn
, p_ll2_conn
);
815 static int qed_ll2_lb_txq_completion(struct qed_hwfn
*p_hwfn
, void *p_cookie
)
817 struct qed_ll2_info
*p_ll2_conn
= (struct qed_ll2_info
*)p_cookie
;
818 struct qed_ll2_tx_queue
*p_tx
= &p_ll2_conn
->tx_queue
;
819 struct qed_ll2_tx_packet
*p_pkt
= NULL
;
820 struct qed_ooo_buffer
*p_buffer
;
821 bool b_dont_submit_rx
= false;
822 u16 new_idx
= 0, num_bds
= 0;
825 new_idx
= le16_to_cpu(*p_tx
->p_fw_cons
);
826 num_bds
= ((s16
)new_idx
- (s16
)p_tx
->bds_idx
);
832 if (list_empty(&p_tx
->active_descq
))
835 p_pkt
= list_first_entry(&p_tx
->active_descq
,
836 struct qed_ll2_tx_packet
, list_entry
);
840 if (p_pkt
->bd_used
!= 1) {
842 "Unexpectedly many BDs(%d) in TX OOO completion\n",
847 list_del(&p_pkt
->list_entry
);
851 qed_chain_consume(&p_tx
->txq_chain
);
853 p_buffer
= (struct qed_ooo_buffer
*)p_pkt
->cookie
;
854 list_add_tail(&p_pkt
->list_entry
, &p_tx
->free_descq
);
856 if (b_dont_submit_rx
) {
857 qed_ooo_put_free_buffer(p_hwfn
, p_hwfn
->p_ooo_info
,
862 rc
= qed_ll2_post_rx_buffer(p_hwfn
, p_ll2_conn
->my_id
,
863 p_buffer
->rx_buffer_phys_addr
, 0,
866 qed_ooo_put_free_buffer(p_hwfn
,
867 p_hwfn
->p_ooo_info
, p_buffer
);
868 b_dont_submit_rx
= true;
872 qed_ooo_submit_tx_buffers(p_hwfn
, p_ll2_conn
);
878 qed_ll2_acquire_connection_ooo(struct qed_hwfn
*p_hwfn
,
879 struct qed_ll2_info
*p_ll2_info
,
880 u16 rx_num_ooo_buffers
, u16 mtu
)
882 struct qed_ooo_buffer
*p_buf
= NULL
;
887 if (p_ll2_info
->conn
.conn_type
!= QED_LL2_TYPE_ISCSI_OOO
)
890 if (!rx_num_ooo_buffers
)
893 for (buf_idx
= 0; buf_idx
< rx_num_ooo_buffers
; buf_idx
++) {
894 p_buf
= kzalloc(sizeof(*p_buf
), GFP_KERNEL
);
900 p_buf
->rx_buffer_size
= mtu
+ 26 + ETH_CACHE_LINE_SIZE
;
901 p_buf
->rx_buffer_size
= (p_buf
->rx_buffer_size
+
902 ETH_CACHE_LINE_SIZE
- 1) &
903 ~(ETH_CACHE_LINE_SIZE
- 1);
904 p_virt
= dma_alloc_coherent(&p_hwfn
->cdev
->pdev
->dev
,
905 p_buf
->rx_buffer_size
,
906 &p_buf
->rx_buffer_phys_addr
,
914 p_buf
->rx_buffer_virt_addr
= p_virt
;
915 qed_ooo_put_free_buffer(p_hwfn
, p_hwfn
->p_ooo_info
, p_buf
);
918 DP_VERBOSE(p_hwfn
, QED_MSG_LL2
,
919 "Allocated [%04x] LL2 OOO buffers [each of size 0x%08x]\n",
920 rx_num_ooo_buffers
, p_buf
->rx_buffer_size
);
927 qed_ll2_establish_connection_ooo(struct qed_hwfn
*p_hwfn
,
928 struct qed_ll2_info
*p_ll2_conn
)
930 if (p_ll2_conn
->conn
.conn_type
!= QED_LL2_TYPE_ISCSI_OOO
)
933 qed_ooo_release_all_isles(p_hwfn
, p_hwfn
->p_ooo_info
);
934 qed_ooo_submit_rx_buffers(p_hwfn
, p_ll2_conn
);
937 static void qed_ll2_release_connection_ooo(struct qed_hwfn
*p_hwfn
,
938 struct qed_ll2_info
*p_ll2_conn
)
940 struct qed_ooo_buffer
*p_buffer
;
942 if (p_ll2_conn
->conn
.conn_type
!= QED_LL2_TYPE_ISCSI_OOO
)
945 qed_ooo_release_all_isles(p_hwfn
, p_hwfn
->p_ooo_info
);
946 while ((p_buffer
= qed_ooo_get_free_buffer(p_hwfn
,
947 p_hwfn
->p_ooo_info
))) {
948 dma_free_coherent(&p_hwfn
->cdev
->pdev
->dev
,
949 p_buffer
->rx_buffer_size
,
950 p_buffer
->rx_buffer_virt_addr
,
951 p_buffer
->rx_buffer_phys_addr
);
956 static void qed_ll2_stop_ooo(struct qed_dev
*cdev
)
958 struct qed_hwfn
*hwfn
= QED_LEADING_HWFN(cdev
);
959 u8
*handle
= &hwfn
->pf_params
.iscsi_pf_params
.ll2_ooo_queue_id
;
961 DP_VERBOSE(cdev
, QED_MSG_STORAGE
, "Stopping LL2 OOO queue [%02x]\n",
964 qed_ll2_terminate_connection(hwfn
, *handle
);
965 qed_ll2_release_connection(hwfn
, *handle
);
966 *handle
= QED_LL2_UNUSED_HANDLE
;
969 static int qed_ll2_start_ooo(struct qed_dev
*cdev
,
970 struct qed_ll2_params
*params
)
972 struct qed_hwfn
*hwfn
= QED_LEADING_HWFN(cdev
);
973 u8
*handle
= &hwfn
->pf_params
.iscsi_pf_params
.ll2_ooo_queue_id
;
974 struct qed_ll2_conn ll2_info
= { 0 };
977 ll2_info
.conn_type
= QED_LL2_TYPE_ISCSI_OOO
;
978 ll2_info
.mtu
= params
->mtu
;
979 ll2_info
.rx_drop_ttl0_flg
= params
->drop_ttl0_packets
;
980 ll2_info
.rx_vlan_removal_en
= params
->rx_vlan_stripping
;
981 ll2_info
.tx_tc
= OOO_LB_TC
;
982 ll2_info
.tx_dest
= CORE_TX_DEST_LB
;
984 rc
= qed_ll2_acquire_connection(hwfn
, &ll2_info
,
985 QED_LL2_RX_SIZE
, QED_LL2_TX_SIZE
,
988 DP_INFO(cdev
, "Failed to acquire LL2 OOO connection\n");
992 rc
= qed_ll2_establish_connection(hwfn
, *handle
);
994 DP_INFO(cdev
, "Failed to establist LL2 OOO connection\n");
1001 qed_ll2_release_connection(hwfn
, *handle
);
1003 *handle
= QED_LL2_UNUSED_HANDLE
;
1006 #else /* IS_ENABLED(CONFIG_QED_ISCSI) */
1007 static int qed_ll2_lb_rxq_completion(struct qed_hwfn
*p_hwfn
,
1008 void *p_cookie
) { return -EINVAL
; }
1009 static int qed_ll2_lb_txq_completion(struct qed_hwfn
*p_hwfn
,
1010 void *p_cookie
) { return -EINVAL
; }
1012 qed_ll2_acquire_connection_ooo(struct qed_hwfn
*p_hwfn
,
1013 struct qed_ll2_info
*p_ll2_info
,
1014 u16 rx_num_ooo_buffers
, u16 mtu
) { return 0; }
1016 qed_ll2_establish_connection_ooo(struct qed_hwfn
*p_hwfn
,
1017 struct qed_ll2_info
*p_ll2_conn
) { return; }
1019 qed_ll2_release_connection_ooo(struct qed_hwfn
*p_hwfn
,
1020 struct qed_ll2_info
*p_ll2_conn
) { return; }
1021 static inline void qed_ll2_stop_ooo(struct qed_dev
*cdev
) { return; }
1022 static inline int qed_ll2_start_ooo(struct qed_dev
*cdev
,
1023 struct qed_ll2_params
*params
)
1025 #endif /* IS_ENABLED(CONFIG_QED_ISCSI) */
1027 static int qed_sp_ll2_rx_queue_start(struct qed_hwfn
*p_hwfn
,
1028 struct qed_ll2_info
*p_ll2_conn
,
1031 enum qed_ll2_conn_type conn_type
= p_ll2_conn
->conn
.conn_type
;
1032 struct qed_ll2_rx_queue
*p_rx
= &p_ll2_conn
->rx_queue
;
1033 struct core_rx_start_ramrod_data
*p_ramrod
= NULL
;
1034 struct qed_spq_entry
*p_ent
= NULL
;
1035 struct qed_sp_init_data init_data
;
1040 memset(&init_data
, 0, sizeof(init_data
));
1041 init_data
.cid
= p_ll2_conn
->cid
;
1042 init_data
.opaque_fid
= p_hwfn
->hw_info
.opaque_fid
;
1043 init_data
.comp_mode
= QED_SPQ_MODE_EBLOCK
;
1045 rc
= qed_sp_init_request(p_hwfn
, &p_ent
,
1046 CORE_RAMROD_RX_QUEUE_START
,
1047 PROTOCOLID_CORE
, &init_data
);
1051 p_ramrod
= &p_ent
->ramrod
.core_rx_queue_start
;
1053 p_ramrod
->sb_id
= cpu_to_le16(qed_int_get_sp_sb_id(p_hwfn
));
1054 p_ramrod
->sb_index
= p_rx
->rx_sb_index
;
1055 p_ramrod
->complete_event_flg
= 1;
1057 p_ramrod
->mtu
= cpu_to_le16(p_ll2_conn
->conn
.mtu
);
1058 DMA_REGPAIR_LE(p_ramrod
->bd_base
,
1059 p_rx
->rxq_chain
.p_phys_addr
);
1060 cqe_pbl_size
= (u16
)qed_chain_get_page_cnt(&p_rx
->rcq_chain
);
1061 p_ramrod
->num_of_pbl_pages
= cpu_to_le16(cqe_pbl_size
);
1062 DMA_REGPAIR_LE(p_ramrod
->cqe_pbl_addr
,
1063 qed_chain_get_pbl_phys(&p_rx
->rcq_chain
));
1065 p_ramrod
->drop_ttl0_flg
= p_ll2_conn
->conn
.rx_drop_ttl0_flg
;
1066 p_ramrod
->inner_vlan_removal_en
= p_ll2_conn
->conn
.rx_vlan_removal_en
;
1067 p_ramrod
->queue_id
= p_ll2_conn
->queue_id
;
1068 p_ramrod
->main_func_queue
= (conn_type
== QED_LL2_TYPE_ISCSI_OOO
) ? 0
1071 if ((IS_MF_DEFAULT(p_hwfn
) || IS_MF_SI(p_hwfn
)) &&
1072 p_ramrod
->main_func_queue
&& (conn_type
!= QED_LL2_TYPE_ROCE
)) {
1073 p_ramrod
->mf_si_bcast_accept_all
= 1;
1074 p_ramrod
->mf_si_mcast_accept_all
= 1;
1076 p_ramrod
->mf_si_bcast_accept_all
= 0;
1077 p_ramrod
->mf_si_mcast_accept_all
= 0;
1080 p_ramrod
->action_on_error
.error_type
= action_on_error
;
1081 p_ramrod
->gsi_offload_flag
= p_ll2_conn
->conn
.gsi_enable
;
1082 return qed_spq_post(p_hwfn
, p_ent
, NULL
);
1085 static int qed_sp_ll2_tx_queue_start(struct qed_hwfn
*p_hwfn
,
1086 struct qed_ll2_info
*p_ll2_conn
)
1088 enum qed_ll2_conn_type conn_type
= p_ll2_conn
->conn
.conn_type
;
1089 struct qed_ll2_tx_queue
*p_tx
= &p_ll2_conn
->tx_queue
;
1090 struct core_tx_start_ramrod_data
*p_ramrod
= NULL
;
1091 struct qed_spq_entry
*p_ent
= NULL
;
1092 struct qed_sp_init_data init_data
;
1093 union qed_qm_pq_params pq_params
;
1094 u16 pq_id
= 0, pbl_size
;
1097 if (!QED_LL2_TX_REGISTERED(p_ll2_conn
))
1100 if (p_ll2_conn
->conn
.conn_type
== QED_LL2_TYPE_ISCSI_OOO
)
1101 p_ll2_conn
->tx_stats_en
= 0;
1103 p_ll2_conn
->tx_stats_en
= 1;
1106 memset(&init_data
, 0, sizeof(init_data
));
1107 init_data
.cid
= p_ll2_conn
->cid
;
1108 init_data
.opaque_fid
= p_hwfn
->hw_info
.opaque_fid
;
1109 init_data
.comp_mode
= QED_SPQ_MODE_EBLOCK
;
1111 rc
= qed_sp_init_request(p_hwfn
, &p_ent
,
1112 CORE_RAMROD_TX_QUEUE_START
,
1113 PROTOCOLID_CORE
, &init_data
);
1117 p_ramrod
= &p_ent
->ramrod
.core_tx_queue_start
;
1119 p_ramrod
->sb_id
= cpu_to_le16(qed_int_get_sp_sb_id(p_hwfn
));
1120 p_ramrod
->sb_index
= p_tx
->tx_sb_index
;
1121 p_ramrod
->mtu
= cpu_to_le16(p_ll2_conn
->conn
.mtu
);
1122 p_ramrod
->stats_en
= p_ll2_conn
->tx_stats_en
;
1123 p_ramrod
->stats_id
= p_ll2_conn
->tx_stats_id
;
1125 DMA_REGPAIR_LE(p_ramrod
->pbl_base_addr
,
1126 qed_chain_get_pbl_phys(&p_tx
->txq_chain
));
1127 pbl_size
= qed_chain_get_page_cnt(&p_tx
->txq_chain
);
1128 p_ramrod
->pbl_size
= cpu_to_le16(pbl_size
);
1130 memset(&pq_params
, 0, sizeof(pq_params
));
1131 pq_params
.core
.tc
= p_ll2_conn
->conn
.tx_tc
;
1132 pq_id
= qed_get_qm_pq(p_hwfn
, PROTOCOLID_CORE
, &pq_params
);
1133 p_ramrod
->qm_pq_id
= cpu_to_le16(pq_id
);
1135 switch (conn_type
) {
1136 case QED_LL2_TYPE_FCOE
:
1137 p_ramrod
->conn_type
= PROTOCOLID_FCOE
;
1139 case QED_LL2_TYPE_ISCSI
:
1140 case QED_LL2_TYPE_ISCSI_OOO
:
1141 p_ramrod
->conn_type
= PROTOCOLID_ISCSI
;
1143 case QED_LL2_TYPE_ROCE
:
1144 p_ramrod
->conn_type
= PROTOCOLID_ROCE
;
1147 p_ramrod
->conn_type
= PROTOCOLID_ETH
;
1148 DP_NOTICE(p_hwfn
, "Unknown connection type: %d\n", conn_type
);
1151 p_ramrod
->gsi_offload_flag
= p_ll2_conn
->conn
.gsi_enable
;
1152 return qed_spq_post(p_hwfn
, p_ent
, NULL
);
1155 static int qed_sp_ll2_rx_queue_stop(struct qed_hwfn
*p_hwfn
,
1156 struct qed_ll2_info
*p_ll2_conn
)
1158 struct core_rx_stop_ramrod_data
*p_ramrod
= NULL
;
1159 struct qed_spq_entry
*p_ent
= NULL
;
1160 struct qed_sp_init_data init_data
;
1164 memset(&init_data
, 0, sizeof(init_data
));
1165 init_data
.cid
= p_ll2_conn
->cid
;
1166 init_data
.opaque_fid
= p_hwfn
->hw_info
.opaque_fid
;
1167 init_data
.comp_mode
= QED_SPQ_MODE_EBLOCK
;
1169 rc
= qed_sp_init_request(p_hwfn
, &p_ent
,
1170 CORE_RAMROD_RX_QUEUE_STOP
,
1171 PROTOCOLID_CORE
, &init_data
);
1175 p_ramrod
= &p_ent
->ramrod
.core_rx_queue_stop
;
1177 p_ramrod
->complete_event_flg
= 1;
1178 p_ramrod
->queue_id
= p_ll2_conn
->queue_id
;
1180 return qed_spq_post(p_hwfn
, p_ent
, NULL
);
1183 static int qed_sp_ll2_tx_queue_stop(struct qed_hwfn
*p_hwfn
,
1184 struct qed_ll2_info
*p_ll2_conn
)
1186 struct qed_spq_entry
*p_ent
= NULL
;
1187 struct qed_sp_init_data init_data
;
1191 memset(&init_data
, 0, sizeof(init_data
));
1192 init_data
.cid
= p_ll2_conn
->cid
;
1193 init_data
.opaque_fid
= p_hwfn
->hw_info
.opaque_fid
;
1194 init_data
.comp_mode
= QED_SPQ_MODE_EBLOCK
;
1196 rc
= qed_sp_init_request(p_hwfn
, &p_ent
,
1197 CORE_RAMROD_TX_QUEUE_STOP
,
1198 PROTOCOLID_CORE
, &init_data
);
1202 return qed_spq_post(p_hwfn
, p_ent
, NULL
);
1206 qed_ll2_acquire_connection_rx(struct qed_hwfn
*p_hwfn
,
1207 struct qed_ll2_info
*p_ll2_info
, u16 rx_num_desc
)
1209 struct qed_ll2_rx_packet
*p_descq
;
1216 rc
= qed_chain_alloc(p_hwfn
->cdev
,
1217 QED_CHAIN_USE_TO_CONSUME_PRODUCE
,
1218 QED_CHAIN_MODE_NEXT_PTR
,
1219 QED_CHAIN_CNT_TYPE_U16
,
1221 sizeof(struct core_rx_bd
),
1222 &p_ll2_info
->rx_queue
.rxq_chain
);
1224 DP_NOTICE(p_hwfn
, "Failed to allocate ll2 rxq chain\n");
1228 capacity
= qed_chain_get_capacity(&p_ll2_info
->rx_queue
.rxq_chain
);
1229 p_descq
= kcalloc(capacity
, sizeof(struct qed_ll2_rx_packet
),
1233 DP_NOTICE(p_hwfn
, "Failed to allocate ll2 Rx desc\n");
1236 p_ll2_info
->rx_queue
.descq_array
= p_descq
;
1238 rc
= qed_chain_alloc(p_hwfn
->cdev
,
1239 QED_CHAIN_USE_TO_CONSUME_PRODUCE
,
1241 QED_CHAIN_CNT_TYPE_U16
,
1243 sizeof(struct core_rx_fast_path_cqe
),
1244 &p_ll2_info
->rx_queue
.rcq_chain
);
1246 DP_NOTICE(p_hwfn
, "Failed to allocate ll2 rcq chain\n");
1250 DP_VERBOSE(p_hwfn
, QED_MSG_LL2
,
1251 "Allocated LL2 Rxq [Type %08x] with 0x%08x buffers\n",
1252 p_ll2_info
->conn
.conn_type
, rx_num_desc
);
1258 static int qed_ll2_acquire_connection_tx(struct qed_hwfn
*p_hwfn
,
1259 struct qed_ll2_info
*p_ll2_info
,
1262 struct qed_ll2_tx_packet
*p_descq
;
1269 rc
= qed_chain_alloc(p_hwfn
->cdev
,
1270 QED_CHAIN_USE_TO_CONSUME_PRODUCE
,
1272 QED_CHAIN_CNT_TYPE_U16
,
1274 sizeof(struct core_tx_bd
),
1275 &p_ll2_info
->tx_queue
.txq_chain
);
1279 capacity
= qed_chain_get_capacity(&p_ll2_info
->tx_queue
.txq_chain
);
1280 p_descq
= kcalloc(capacity
, sizeof(struct qed_ll2_tx_packet
),
1286 p_ll2_info
->tx_queue
.descq_array
= p_descq
;
1288 DP_VERBOSE(p_hwfn
, QED_MSG_LL2
,
1289 "Allocated LL2 Txq [Type %08x] with 0x%08x buffers\n",
1290 p_ll2_info
->conn
.conn_type
, tx_num_desc
);
1295 "Can't allocate memory for Tx LL2 with 0x%08x buffers\n",
1300 int qed_ll2_acquire_connection(struct qed_hwfn
*p_hwfn
,
1301 struct qed_ll2_conn
*p_params
,
1304 u8
*p_connection_handle
)
1306 qed_int_comp_cb_t comp_rx_cb
, comp_tx_cb
;
1307 struct qed_ll2_info
*p_ll2_info
= NULL
;
1311 if (!p_connection_handle
|| !p_hwfn
->p_ll2_info
)
1314 /* Find a free connection to be used */
1315 for (i
= 0; (i
< QED_MAX_NUM_OF_LL2_CONNECTIONS
); i
++) {
1316 mutex_lock(&p_hwfn
->p_ll2_info
[i
].mutex
);
1317 if (p_hwfn
->p_ll2_info
[i
].b_active
) {
1318 mutex_unlock(&p_hwfn
->p_ll2_info
[i
].mutex
);
1322 p_hwfn
->p_ll2_info
[i
].b_active
= true;
1323 p_ll2_info
= &p_hwfn
->p_ll2_info
[i
];
1324 mutex_unlock(&p_hwfn
->p_ll2_info
[i
].mutex
);
1330 p_ll2_info
->conn
= *p_params
;
1332 rc
= qed_ll2_acquire_connection_rx(p_hwfn
, p_ll2_info
, rx_num_desc
);
1334 goto q_allocate_fail
;
1336 rc
= qed_ll2_acquire_connection_tx(p_hwfn
, p_ll2_info
, tx_num_desc
);
1338 goto q_allocate_fail
;
1340 rc
= qed_ll2_acquire_connection_ooo(p_hwfn
, p_ll2_info
,
1341 rx_num_desc
* 2, p_params
->mtu
);
1343 goto q_allocate_fail
;
1345 /* Register callbacks for the Rx/Tx queues */
1346 if (p_params
->conn_type
== QED_LL2_TYPE_ISCSI_OOO
) {
1347 comp_rx_cb
= qed_ll2_lb_rxq_completion
;
1348 comp_tx_cb
= qed_ll2_lb_txq_completion
;
1350 comp_rx_cb
= qed_ll2_rxq_completion
;
1351 comp_tx_cb
= qed_ll2_txq_completion
;
1355 qed_int_register_cb(p_hwfn
, comp_rx_cb
,
1356 &p_hwfn
->p_ll2_info
[i
],
1357 &p_ll2_info
->rx_queue
.rx_sb_index
,
1358 &p_ll2_info
->rx_queue
.p_fw_cons
);
1359 p_ll2_info
->rx_queue
.b_cb_registred
= true;
1363 qed_int_register_cb(p_hwfn
,
1365 &p_hwfn
->p_ll2_info
[i
],
1366 &p_ll2_info
->tx_queue
.tx_sb_index
,
1367 &p_ll2_info
->tx_queue
.p_fw_cons
);
1368 p_ll2_info
->tx_queue
.b_cb_registred
= true;
1371 *p_connection_handle
= i
;
1375 qed_ll2_release_connection(p_hwfn
, i
);
1379 static int qed_ll2_establish_connection_rx(struct qed_hwfn
*p_hwfn
,
1380 struct qed_ll2_info
*p_ll2_conn
)
1382 u8 action_on_error
= 0;
1384 if (!QED_LL2_RX_REGISTERED(p_ll2_conn
))
1387 DIRECT_REG_WR(p_ll2_conn
->rx_queue
.set_prod_addr
, 0x0);
1389 SET_FIELD(action_on_error
,
1390 CORE_RX_ACTION_ON_ERROR_PACKET_TOO_BIG
,
1391 p_ll2_conn
->conn
.ai_err_packet_too_big
);
1392 SET_FIELD(action_on_error
,
1393 CORE_RX_ACTION_ON_ERROR_NO_BUFF
, p_ll2_conn
->conn
.ai_err_no_buf
);
1395 return qed_sp_ll2_rx_queue_start(p_hwfn
, p_ll2_conn
, action_on_error
);
1398 int qed_ll2_establish_connection(struct qed_hwfn
*p_hwfn
, u8 connection_handle
)
1400 struct qed_ll2_info
*p_ll2_conn
;
1401 struct qed_ll2_rx_queue
*p_rx
;
1402 struct qed_ll2_tx_queue
*p_tx
;
1407 p_ll2_conn
= qed_ll2_handle_sanity_lock(p_hwfn
, connection_handle
);
1410 p_rx
= &p_ll2_conn
->rx_queue
;
1411 p_tx
= &p_ll2_conn
->tx_queue
;
1413 qed_chain_reset(&p_rx
->rxq_chain
);
1414 qed_chain_reset(&p_rx
->rcq_chain
);
1415 INIT_LIST_HEAD(&p_rx
->active_descq
);
1416 INIT_LIST_HEAD(&p_rx
->free_descq
);
1417 INIT_LIST_HEAD(&p_rx
->posting_descq
);
1418 spin_lock_init(&p_rx
->lock
);
1419 capacity
= qed_chain_get_capacity(&p_rx
->rxq_chain
);
1420 for (i
= 0; i
< capacity
; i
++)
1421 list_add_tail(&p_rx
->descq_array
[i
].list_entry
,
1423 *p_rx
->p_fw_cons
= 0;
1425 qed_chain_reset(&p_tx
->txq_chain
);
1426 INIT_LIST_HEAD(&p_tx
->active_descq
);
1427 INIT_LIST_HEAD(&p_tx
->free_descq
);
1428 INIT_LIST_HEAD(&p_tx
->sending_descq
);
1429 spin_lock_init(&p_tx
->lock
);
1430 capacity
= qed_chain_get_capacity(&p_tx
->txq_chain
);
1431 for (i
= 0; i
< capacity
; i
++)
1432 list_add_tail(&p_tx
->descq_array
[i
].list_entry
,
1434 p_tx
->cur_completing_bd_idx
= 0;
1436 p_tx
->b_completing_packet
= false;
1437 p_tx
->cur_send_packet
= NULL
;
1438 p_tx
->cur_send_frag_num
= 0;
1439 p_tx
->cur_completing_frag_num
= 0;
1440 *p_tx
->p_fw_cons
= 0;
1442 qed_cxt_acquire_cid(p_hwfn
, PROTOCOLID_CORE
, &p_ll2_conn
->cid
);
1444 qid
= p_hwfn
->hw_info
.resc_start
[QED_LL2_QUEUE
] + connection_handle
;
1445 p_ll2_conn
->queue_id
= qid
;
1446 p_ll2_conn
->tx_stats_id
= qid
;
1447 p_rx
->set_prod_addr
= (u8 __iomem
*)p_hwfn
->regview
+
1448 GTT_BAR0_MAP_REG_TSDM_RAM
+
1449 TSTORM_LL2_RX_PRODS_OFFSET(qid
);
1450 p_tx
->doorbell_addr
= (u8 __iomem
*)p_hwfn
->doorbells
+
1451 qed_db_addr(p_ll2_conn
->cid
,
1454 rc
= qed_ll2_establish_connection_rx(p_hwfn
, p_ll2_conn
);
1458 rc
= qed_sp_ll2_tx_queue_start(p_hwfn
, p_ll2_conn
);
1462 if (p_hwfn
->hw_info
.personality
!= QED_PCI_ETH_ROCE
)
1463 qed_wr(p_hwfn
, p_hwfn
->p_main_ptt
, PRS_REG_USE_LIGHT_L2
, 1);
1465 qed_ll2_establish_connection_ooo(p_hwfn
, p_ll2_conn
);
1467 if (p_ll2_conn
->conn
.conn_type
== QED_LL2_TYPE_FCOE
) {
1468 qed_llh_add_protocol_filter(p_hwfn
, p_hwfn
->p_main_ptt
,
1470 QED_LLH_FILTER_ETHERTYPE
);
1471 qed_llh_add_protocol_filter(p_hwfn
, p_hwfn
->p_main_ptt
,
1473 QED_LLH_FILTER_ETHERTYPE
);
1479 static void qed_ll2_post_rx_buffer_notify_fw(struct qed_hwfn
*p_hwfn
,
1480 struct qed_ll2_rx_queue
*p_rx
,
1481 struct qed_ll2_rx_packet
*p_curp
)
1483 struct qed_ll2_rx_packet
*p_posting_packet
= NULL
;
1484 struct core_ll2_rx_prod rx_prod
= { 0, 0, 0 };
1485 bool b_notify_fw
= false;
1486 u16 bd_prod
, cq_prod
;
1488 /* This handles the flushing of already posted buffers */
1489 while (!list_empty(&p_rx
->posting_descq
)) {
1490 p_posting_packet
= list_first_entry(&p_rx
->posting_descq
,
1491 struct qed_ll2_rx_packet
,
1493 list_move_tail(&p_posting_packet
->list_entry
,
1494 &p_rx
->active_descq
);
1498 /* This handles the supplied packet [if there is one] */
1500 list_add_tail(&p_curp
->list_entry
, &p_rx
->active_descq
);
1507 bd_prod
= qed_chain_get_prod_idx(&p_rx
->rxq_chain
);
1508 cq_prod
= qed_chain_get_prod_idx(&p_rx
->rcq_chain
);
1509 rx_prod
.bd_prod
= cpu_to_le16(bd_prod
);
1510 rx_prod
.cqe_prod
= cpu_to_le16(cq_prod
);
1511 DIRECT_REG_WR(p_rx
->set_prod_addr
, *((u32
*)&rx_prod
));
1514 int qed_ll2_post_rx_buffer(struct qed_hwfn
*p_hwfn
,
1515 u8 connection_handle
,
1517 u16 buf_len
, void *cookie
, u8 notify_fw
)
1519 struct core_rx_bd_with_buff_len
*p_curb
= NULL
;
1520 struct qed_ll2_rx_packet
*p_curp
= NULL
;
1521 struct qed_ll2_info
*p_ll2_conn
;
1522 struct qed_ll2_rx_queue
*p_rx
;
1523 unsigned long flags
;
1527 p_ll2_conn
= qed_ll2_handle_sanity(p_hwfn
, connection_handle
);
1530 p_rx
= &p_ll2_conn
->rx_queue
;
1532 spin_lock_irqsave(&p_rx
->lock
, flags
);
1533 if (!list_empty(&p_rx
->free_descq
))
1534 p_curp
= list_first_entry(&p_rx
->free_descq
,
1535 struct qed_ll2_rx_packet
, list_entry
);
1537 if (qed_chain_get_elem_left(&p_rx
->rxq_chain
) &&
1538 qed_chain_get_elem_left(&p_rx
->rcq_chain
)) {
1539 p_data
= qed_chain_produce(&p_rx
->rxq_chain
);
1540 p_curb
= (struct core_rx_bd_with_buff_len
*)p_data
;
1541 qed_chain_produce(&p_rx
->rcq_chain
);
1545 /* If we're lacking entires, let's try to flush buffers to FW */
1546 if (!p_curp
|| !p_curb
) {
1552 /* We have an Rx packet we can fill */
1553 DMA_REGPAIR_LE(p_curb
->addr
, addr
);
1554 p_curb
->buff_length
= cpu_to_le16(buf_len
);
1555 p_curp
->rx_buf_addr
= addr
;
1556 p_curp
->cookie
= cookie
;
1557 p_curp
->rxq_bd
= p_curb
;
1558 p_curp
->buf_length
= buf_len
;
1559 list_del(&p_curp
->list_entry
);
1561 /* Check if we only want to enqueue this packet without informing FW */
1563 list_add_tail(&p_curp
->list_entry
, &p_rx
->posting_descq
);
1568 qed_ll2_post_rx_buffer_notify_fw(p_hwfn
, p_rx
, p_curp
);
1570 spin_unlock_irqrestore(&p_rx
->lock
, flags
);
1574 static void qed_ll2_prepare_tx_packet_set(struct qed_hwfn
*p_hwfn
,
1575 struct qed_ll2_tx_queue
*p_tx
,
1576 struct qed_ll2_tx_packet
*p_curp
,
1578 dma_addr_t first_frag
,
1579 u16 first_frag_len
, void *p_cookie
,
1582 list_del(&p_curp
->list_entry
);
1583 p_curp
->cookie
= p_cookie
;
1584 p_curp
->bd_used
= num_of_bds
;
1585 p_curp
->notify_fw
= notify_fw
;
1586 p_tx
->cur_send_packet
= p_curp
;
1587 p_tx
->cur_send_frag_num
= 0;
1589 p_curp
->bds_set
[p_tx
->cur_send_frag_num
].tx_frag
= first_frag
;
1590 p_curp
->bds_set
[p_tx
->cur_send_frag_num
].frag_len
= first_frag_len
;
1591 p_tx
->cur_send_frag_num
++;
1594 static void qed_ll2_prepare_tx_packet_set_bd(struct qed_hwfn
*p_hwfn
,
1595 struct qed_ll2_info
*p_ll2
,
1596 struct qed_ll2_tx_packet
*p_curp
,
1598 enum core_tx_dest tx_dest
,
1601 u16 l4_hdr_offset_w
,
1602 enum core_roce_flavor_type type
,
1603 dma_addr_t first_frag
,
1606 struct qed_chain
*p_tx_chain
= &p_ll2
->tx_queue
.txq_chain
;
1607 u16 prod_idx
= qed_chain_get_prod_idx(p_tx_chain
);
1608 struct core_tx_bd
*start_bd
= NULL
;
1611 start_bd
= (struct core_tx_bd
*)qed_chain_produce(p_tx_chain
);
1612 start_bd
->nw_vlan_or_lb_echo
= cpu_to_le16(vlan
);
1613 SET_FIELD(start_bd
->bitfield1
, CORE_TX_BD_L4_HDR_OFFSET_W
,
1614 cpu_to_le16(l4_hdr_offset_w
));
1615 SET_FIELD(start_bd
->bitfield1
, CORE_TX_BD_TX_DST
, tx_dest
);
1616 start_bd
->bd_flags
.as_bitfield
= bd_flags
;
1617 start_bd
->bd_flags
.as_bitfield
|= CORE_TX_BD_FLAGS_START_BD_MASK
<<
1618 CORE_TX_BD_FLAGS_START_BD_SHIFT
;
1619 SET_FIELD(start_bd
->bitfield0
, CORE_TX_BD_NBDS
, num_of_bds
);
1620 SET_FIELD(start_bd
->bitfield0
, CORE_TX_BD_ROCE_FLAV
, type
);
1621 DMA_REGPAIR_LE(start_bd
->addr
, first_frag
);
1622 start_bd
->nbytes
= cpu_to_le16(first_frag_len
);
1625 (NETIF_MSG_TX_QUEUED
| QED_MSG_LL2
),
1626 "LL2 [q 0x%02x cid 0x%08x type 0x%08x] Tx Producer at [0x%04x] - set with a %04x bytes %02x BDs buffer at %08x:%08x\n",
1629 p_ll2
->conn
.conn_type
,
1633 le32_to_cpu(start_bd
->addr
.hi
),
1634 le32_to_cpu(start_bd
->addr
.lo
));
1636 if (p_ll2
->tx_queue
.cur_send_frag_num
== num_of_bds
)
1639 /* Need to provide the packet with additional BDs for frags */
1640 for (frag_idx
= p_ll2
->tx_queue
.cur_send_frag_num
;
1641 frag_idx
< num_of_bds
; frag_idx
++) {
1642 struct core_tx_bd
**p_bd
= &p_curp
->bds_set
[frag_idx
].txq_bd
;
1644 *p_bd
= (struct core_tx_bd
*)qed_chain_produce(p_tx_chain
);
1645 (*p_bd
)->bd_flags
.as_bitfield
= 0;
1646 (*p_bd
)->bitfield1
= 0;
1647 (*p_bd
)->bitfield0
= 0;
1648 p_curp
->bds_set
[frag_idx
].tx_frag
= 0;
1649 p_curp
->bds_set
[frag_idx
].frag_len
= 0;
1653 /* This should be called while the Txq spinlock is being held */
1654 static void qed_ll2_tx_packet_notify(struct qed_hwfn
*p_hwfn
,
1655 struct qed_ll2_info
*p_ll2_conn
)
1657 bool b_notify
= p_ll2_conn
->tx_queue
.cur_send_packet
->notify_fw
;
1658 struct qed_ll2_tx_queue
*p_tx
= &p_ll2_conn
->tx_queue
;
1659 struct qed_ll2_tx_packet
*p_pkt
= NULL
;
1660 struct core_db_data db_msg
= { 0, 0, 0 };
1663 /* If there are missing BDs, don't do anything now */
1664 if (p_ll2_conn
->tx_queue
.cur_send_frag_num
!=
1665 p_ll2_conn
->tx_queue
.cur_send_packet
->bd_used
)
1668 /* Push the current packet to the list and clean after it */
1669 list_add_tail(&p_ll2_conn
->tx_queue
.cur_send_packet
->list_entry
,
1670 &p_ll2_conn
->tx_queue
.sending_descq
);
1671 p_ll2_conn
->tx_queue
.cur_send_packet
= NULL
;
1672 p_ll2_conn
->tx_queue
.cur_send_frag_num
= 0;
1674 /* Notify FW of packet only if requested to */
1678 bd_prod
= qed_chain_get_prod_idx(&p_ll2_conn
->tx_queue
.txq_chain
);
1680 while (!list_empty(&p_tx
->sending_descq
)) {
1681 p_pkt
= list_first_entry(&p_tx
->sending_descq
,
1682 struct qed_ll2_tx_packet
, list_entry
);
1686 list_move_tail(&p_pkt
->list_entry
, &p_tx
->active_descq
);
1689 SET_FIELD(db_msg
.params
, CORE_DB_DATA_DEST
, DB_DEST_XCM
);
1690 SET_FIELD(db_msg
.params
, CORE_DB_DATA_AGG_CMD
, DB_AGG_CMD_SET
);
1691 SET_FIELD(db_msg
.params
, CORE_DB_DATA_AGG_VAL_SEL
,
1692 DQ_XCM_CORE_TX_BD_PROD_CMD
);
1693 db_msg
.agg_flags
= DQ_XCM_CORE_DQ_CF_CMD
;
1694 db_msg
.spq_prod
= cpu_to_le16(bd_prod
);
1696 /* Make sure the BDs data is updated before ringing the doorbell */
1699 DIRECT_REG_WR(p_tx
->doorbell_addr
, *((u32
*)&db_msg
));
1702 (NETIF_MSG_TX_QUEUED
| QED_MSG_LL2
),
1703 "LL2 [q 0x%02x cid 0x%08x type 0x%08x] Doorbelled [producer 0x%04x]\n",
1704 p_ll2_conn
->queue_id
,
1705 p_ll2_conn
->cid
, p_ll2_conn
->conn
.conn_type
, db_msg
.spq_prod
);
1708 int qed_ll2_prepare_tx_packet(struct qed_hwfn
*p_hwfn
,
1709 u8 connection_handle
,
1713 u16 l4_hdr_offset_w
,
1714 enum qed_ll2_tx_dest e_tx_dest
,
1715 enum qed_ll2_roce_flavor_type qed_roce_flavor
,
1716 dma_addr_t first_frag
,
1717 u16 first_frag_len
, void *cookie
, u8 notify_fw
)
1719 struct qed_ll2_tx_packet
*p_curp
= NULL
;
1720 struct qed_ll2_info
*p_ll2_conn
= NULL
;
1721 enum core_roce_flavor_type roce_flavor
;
1722 struct qed_ll2_tx_queue
*p_tx
;
1723 struct qed_chain
*p_tx_chain
;
1724 enum core_tx_dest tx_dest
;
1725 unsigned long flags
;
1728 p_ll2_conn
= qed_ll2_handle_sanity(p_hwfn
, connection_handle
);
1731 p_tx
= &p_ll2_conn
->tx_queue
;
1732 p_tx_chain
= &p_tx
->txq_chain
;
1734 if (num_of_bds
> CORE_LL2_TX_MAX_BDS_PER_PACKET
)
1737 spin_lock_irqsave(&p_tx
->lock
, flags
);
1738 if (p_tx
->cur_send_packet
) {
1743 /* Get entry, but only if we have tx elements for it */
1744 if (!list_empty(&p_tx
->free_descq
))
1745 p_curp
= list_first_entry(&p_tx
->free_descq
,
1746 struct qed_ll2_tx_packet
, list_entry
);
1747 if (p_curp
&& qed_chain_get_elem_left(p_tx_chain
) < num_of_bds
)
1755 tx_dest
= e_tx_dest
== QED_LL2_TX_DEST_NW
? CORE_TX_DEST_NW
:
1757 if (qed_roce_flavor
== QED_LL2_ROCE
) {
1758 roce_flavor
= CORE_ROCE
;
1759 } else if (qed_roce_flavor
== QED_LL2_RROCE
) {
1760 roce_flavor
= CORE_RROCE
;
1766 /* Prepare packet and BD, and perhaps send a doorbell to FW */
1767 qed_ll2_prepare_tx_packet_set(p_hwfn
, p_tx
, p_curp
,
1768 num_of_bds
, first_frag
,
1769 first_frag_len
, cookie
, notify_fw
);
1770 qed_ll2_prepare_tx_packet_set_bd(p_hwfn
, p_ll2_conn
, p_curp
,
1771 num_of_bds
, tx_dest
,
1772 vlan
, bd_flags
, l4_hdr_offset_w
,
1774 first_frag
, first_frag_len
);
1776 qed_ll2_tx_packet_notify(p_hwfn
, p_ll2_conn
);
1779 spin_unlock_irqrestore(&p_tx
->lock
, flags
);
1783 int qed_ll2_set_fragment_of_tx_packet(struct qed_hwfn
*p_hwfn
,
1784 u8 connection_handle
,
1785 dma_addr_t addr
, u16 nbytes
)
1787 struct qed_ll2_tx_packet
*p_cur_send_packet
= NULL
;
1788 struct qed_ll2_info
*p_ll2_conn
= NULL
;
1789 u16 cur_send_frag_num
= 0;
1790 struct core_tx_bd
*p_bd
;
1791 unsigned long flags
;
1793 p_ll2_conn
= qed_ll2_handle_sanity(p_hwfn
, connection_handle
);
1797 if (!p_ll2_conn
->tx_queue
.cur_send_packet
)
1800 p_cur_send_packet
= p_ll2_conn
->tx_queue
.cur_send_packet
;
1801 cur_send_frag_num
= p_ll2_conn
->tx_queue
.cur_send_frag_num
;
1803 if (cur_send_frag_num
>= p_cur_send_packet
->bd_used
)
1806 /* Fill the BD information, and possibly notify FW */
1807 p_bd
= p_cur_send_packet
->bds_set
[cur_send_frag_num
].txq_bd
;
1808 DMA_REGPAIR_LE(p_bd
->addr
, addr
);
1809 p_bd
->nbytes
= cpu_to_le16(nbytes
);
1810 p_cur_send_packet
->bds_set
[cur_send_frag_num
].tx_frag
= addr
;
1811 p_cur_send_packet
->bds_set
[cur_send_frag_num
].frag_len
= nbytes
;
1813 p_ll2_conn
->tx_queue
.cur_send_frag_num
++;
1815 spin_lock_irqsave(&p_ll2_conn
->tx_queue
.lock
, flags
);
1816 qed_ll2_tx_packet_notify(p_hwfn
, p_ll2_conn
);
1817 spin_unlock_irqrestore(&p_ll2_conn
->tx_queue
.lock
, flags
);
1822 int qed_ll2_terminate_connection(struct qed_hwfn
*p_hwfn
, u8 connection_handle
)
1824 struct qed_ll2_info
*p_ll2_conn
= NULL
;
1827 p_ll2_conn
= qed_ll2_handle_sanity_lock(p_hwfn
, connection_handle
);
1831 /* Stop Tx & Rx of connection, if needed */
1832 if (QED_LL2_TX_REGISTERED(p_ll2_conn
)) {
1833 rc
= qed_sp_ll2_tx_queue_stop(p_hwfn
, p_ll2_conn
);
1836 qed_ll2_txq_flush(p_hwfn
, connection_handle
);
1839 if (QED_LL2_RX_REGISTERED(p_ll2_conn
)) {
1840 rc
= qed_sp_ll2_rx_queue_stop(p_hwfn
, p_ll2_conn
);
1843 qed_ll2_rxq_flush(p_hwfn
, connection_handle
);
1846 if (p_ll2_conn
->conn
.conn_type
== QED_LL2_TYPE_ISCSI_OOO
)
1847 qed_ooo_release_all_isles(p_hwfn
, p_hwfn
->p_ooo_info
);
1849 if (p_ll2_conn
->conn
.conn_type
== QED_LL2_TYPE_FCOE
) {
1850 qed_llh_remove_protocol_filter(p_hwfn
, p_hwfn
->p_main_ptt
,
1852 QED_LLH_FILTER_ETHERTYPE
);
1853 qed_llh_remove_protocol_filter(p_hwfn
, p_hwfn
->p_main_ptt
,
1855 QED_LLH_FILTER_ETHERTYPE
);
1861 void qed_ll2_release_connection(struct qed_hwfn
*p_hwfn
, u8 connection_handle
)
1863 struct qed_ll2_info
*p_ll2_conn
= NULL
;
1865 p_ll2_conn
= qed_ll2_handle_sanity(p_hwfn
, connection_handle
);
1869 if (QED_LL2_RX_REGISTERED(p_ll2_conn
)) {
1870 p_ll2_conn
->rx_queue
.b_cb_registred
= false;
1871 qed_int_unregister_cb(p_hwfn
, p_ll2_conn
->rx_queue
.rx_sb_index
);
1874 if (QED_LL2_TX_REGISTERED(p_ll2_conn
)) {
1875 p_ll2_conn
->tx_queue
.b_cb_registred
= false;
1876 qed_int_unregister_cb(p_hwfn
, p_ll2_conn
->tx_queue
.tx_sb_index
);
1879 kfree(p_ll2_conn
->tx_queue
.descq_array
);
1880 qed_chain_free(p_hwfn
->cdev
, &p_ll2_conn
->tx_queue
.txq_chain
);
1882 kfree(p_ll2_conn
->rx_queue
.descq_array
);
1883 qed_chain_free(p_hwfn
->cdev
, &p_ll2_conn
->rx_queue
.rxq_chain
);
1884 qed_chain_free(p_hwfn
->cdev
, &p_ll2_conn
->rx_queue
.rcq_chain
);
1886 qed_cxt_release_cid(p_hwfn
, p_ll2_conn
->cid
);
1888 qed_ll2_release_connection_ooo(p_hwfn
, p_ll2_conn
);
1890 mutex_lock(&p_ll2_conn
->mutex
);
1891 p_ll2_conn
->b_active
= false;
1892 mutex_unlock(&p_ll2_conn
->mutex
);
1895 struct qed_ll2_info
*qed_ll2_alloc(struct qed_hwfn
*p_hwfn
)
1897 struct qed_ll2_info
*p_ll2_connections
;
1900 /* Allocate LL2's set struct */
1901 p_ll2_connections
= kcalloc(QED_MAX_NUM_OF_LL2_CONNECTIONS
,
1902 sizeof(struct qed_ll2_info
), GFP_KERNEL
);
1903 if (!p_ll2_connections
) {
1904 DP_NOTICE(p_hwfn
, "Failed to allocate `struct qed_ll2'\n");
1908 for (i
= 0; i
< QED_MAX_NUM_OF_LL2_CONNECTIONS
; i
++)
1909 p_ll2_connections
[i
].my_id
= i
;
1911 return p_ll2_connections
;
1914 void qed_ll2_setup(struct qed_hwfn
*p_hwfn
,
1915 struct qed_ll2_info
*p_ll2_connections
)
1919 for (i
= 0; i
< QED_MAX_NUM_OF_LL2_CONNECTIONS
; i
++)
1920 mutex_init(&p_ll2_connections
[i
].mutex
);
1923 void qed_ll2_free(struct qed_hwfn
*p_hwfn
,
1924 struct qed_ll2_info
*p_ll2_connections
)
1926 kfree(p_ll2_connections
);
1929 static void _qed_ll2_get_tstats(struct qed_hwfn
*p_hwfn
,
1930 struct qed_ptt
*p_ptt
,
1931 struct qed_ll2_info
*p_ll2_conn
,
1932 struct qed_ll2_stats
*p_stats
)
1934 struct core_ll2_tstorm_per_queue_stat tstats
;
1935 u8 qid
= p_ll2_conn
->queue_id
;
1938 memset(&tstats
, 0, sizeof(tstats
));
1939 tstats_addr
= BAR0_MAP_REG_TSDM_RAM
+
1940 CORE_LL2_TSTORM_PER_QUEUE_STAT_OFFSET(qid
);
1941 qed_memcpy_from(p_hwfn
, p_ptt
, &tstats
, tstats_addr
, sizeof(tstats
));
1943 p_stats
->packet_too_big_discard
=
1944 HILO_64_REGPAIR(tstats
.packet_too_big_discard
);
1945 p_stats
->no_buff_discard
= HILO_64_REGPAIR(tstats
.no_buff_discard
);
1948 static void _qed_ll2_get_ustats(struct qed_hwfn
*p_hwfn
,
1949 struct qed_ptt
*p_ptt
,
1950 struct qed_ll2_info
*p_ll2_conn
,
1951 struct qed_ll2_stats
*p_stats
)
1953 struct core_ll2_ustorm_per_queue_stat ustats
;
1954 u8 qid
= p_ll2_conn
->queue_id
;
1957 memset(&ustats
, 0, sizeof(ustats
));
1958 ustats_addr
= BAR0_MAP_REG_USDM_RAM
+
1959 CORE_LL2_USTORM_PER_QUEUE_STAT_OFFSET(qid
);
1960 qed_memcpy_from(p_hwfn
, p_ptt
, &ustats
, ustats_addr
, sizeof(ustats
));
1962 p_stats
->rcv_ucast_bytes
= HILO_64_REGPAIR(ustats
.rcv_ucast_bytes
);
1963 p_stats
->rcv_mcast_bytes
= HILO_64_REGPAIR(ustats
.rcv_mcast_bytes
);
1964 p_stats
->rcv_bcast_bytes
= HILO_64_REGPAIR(ustats
.rcv_bcast_bytes
);
1965 p_stats
->rcv_ucast_pkts
= HILO_64_REGPAIR(ustats
.rcv_ucast_pkts
);
1966 p_stats
->rcv_mcast_pkts
= HILO_64_REGPAIR(ustats
.rcv_mcast_pkts
);
1967 p_stats
->rcv_bcast_pkts
= HILO_64_REGPAIR(ustats
.rcv_bcast_pkts
);
1970 static void _qed_ll2_get_pstats(struct qed_hwfn
*p_hwfn
,
1971 struct qed_ptt
*p_ptt
,
1972 struct qed_ll2_info
*p_ll2_conn
,
1973 struct qed_ll2_stats
*p_stats
)
1975 struct core_ll2_pstorm_per_queue_stat pstats
;
1976 u8 stats_id
= p_ll2_conn
->tx_stats_id
;
1979 memset(&pstats
, 0, sizeof(pstats
));
1980 pstats_addr
= BAR0_MAP_REG_PSDM_RAM
+
1981 CORE_LL2_PSTORM_PER_QUEUE_STAT_OFFSET(stats_id
);
1982 qed_memcpy_from(p_hwfn
, p_ptt
, &pstats
, pstats_addr
, sizeof(pstats
));
1984 p_stats
->sent_ucast_bytes
= HILO_64_REGPAIR(pstats
.sent_ucast_bytes
);
1985 p_stats
->sent_mcast_bytes
= HILO_64_REGPAIR(pstats
.sent_mcast_bytes
);
1986 p_stats
->sent_bcast_bytes
= HILO_64_REGPAIR(pstats
.sent_bcast_bytes
);
1987 p_stats
->sent_ucast_pkts
= HILO_64_REGPAIR(pstats
.sent_ucast_pkts
);
1988 p_stats
->sent_mcast_pkts
= HILO_64_REGPAIR(pstats
.sent_mcast_pkts
);
1989 p_stats
->sent_bcast_pkts
= HILO_64_REGPAIR(pstats
.sent_bcast_pkts
);
1992 int qed_ll2_get_stats(struct qed_hwfn
*p_hwfn
,
1993 u8 connection_handle
, struct qed_ll2_stats
*p_stats
)
1995 struct qed_ll2_info
*p_ll2_conn
= NULL
;
1996 struct qed_ptt
*p_ptt
;
1998 memset(p_stats
, 0, sizeof(*p_stats
));
2000 if ((connection_handle
>= QED_MAX_NUM_OF_LL2_CONNECTIONS
) ||
2001 !p_hwfn
->p_ll2_info
)
2004 p_ll2_conn
= &p_hwfn
->p_ll2_info
[connection_handle
];
2006 p_ptt
= qed_ptt_acquire(p_hwfn
);
2008 DP_ERR(p_hwfn
, "Failed to acquire ptt\n");
2012 _qed_ll2_get_tstats(p_hwfn
, p_ptt
, p_ll2_conn
, p_stats
);
2013 _qed_ll2_get_ustats(p_hwfn
, p_ptt
, p_ll2_conn
, p_stats
);
2014 if (p_ll2_conn
->tx_stats_en
)
2015 _qed_ll2_get_pstats(p_hwfn
, p_ptt
, p_ll2_conn
, p_stats
);
2017 qed_ptt_release(p_hwfn
, p_ptt
);
2021 static void qed_ll2_register_cb_ops(struct qed_dev
*cdev
,
2022 const struct qed_ll2_cb_ops
*ops
,
2025 cdev
->ll2
->cbs
= ops
;
2026 cdev
->ll2
->cb_cookie
= cookie
;
2029 static int qed_ll2_start(struct qed_dev
*cdev
, struct qed_ll2_params
*params
)
2031 struct qed_ll2_conn ll2_info
;
2032 struct qed_ll2_buffer
*buffer
, *tmp_buffer
;
2033 enum qed_ll2_conn_type conn_type
;
2034 struct qed_ptt
*p_ptt
;
2038 /* Initialize LL2 locks & lists */
2039 INIT_LIST_HEAD(&cdev
->ll2
->list
);
2040 spin_lock_init(&cdev
->ll2
->lock
);
2041 cdev
->ll2
->rx_size
= NET_SKB_PAD
+ ETH_HLEN
+
2042 L1_CACHE_BYTES
+ params
->mtu
;
2043 cdev
->ll2
->frags_mapped
= params
->frags_mapped
;
2045 /*Allocate memory for LL2 */
2046 DP_INFO(cdev
, "Allocating LL2 buffers of size %08x bytes\n",
2047 cdev
->ll2
->rx_size
);
2048 for (i
= 0; i
< QED_LL2_RX_SIZE
; i
++) {
2049 buffer
= kzalloc(sizeof(*buffer
), GFP_KERNEL
);
2051 DP_INFO(cdev
, "Failed to allocate LL2 buffers\n");
2055 rc
= qed_ll2_alloc_buffer(cdev
, (u8
**)&buffer
->data
,
2056 &buffer
->phys_addr
);
2062 list_add_tail(&buffer
->list
, &cdev
->ll2
->list
);
2065 switch (QED_LEADING_HWFN(cdev
)->hw_info
.personality
) {
2067 conn_type
= QED_LL2_TYPE_FCOE
;
2071 conn_type
= QED_LL2_TYPE_ISCSI
;
2074 case QED_PCI_ETH_ROCE
:
2075 conn_type
= QED_LL2_TYPE_ROCE
;
2078 conn_type
= QED_LL2_TYPE_TEST
;
2081 /* Prepare the temporary ll2 information */
2082 memset(&ll2_info
, 0, sizeof(ll2_info
));
2084 ll2_info
.conn_type
= conn_type
;
2085 ll2_info
.mtu
= params
->mtu
;
2086 ll2_info
.rx_drop_ttl0_flg
= params
->drop_ttl0_packets
;
2087 ll2_info
.rx_vlan_removal_en
= params
->rx_vlan_stripping
;
2089 ll2_info
.tx_dest
= CORE_TX_DEST_NW
;
2090 ll2_info
.gsi_enable
= gsi_enable
;
2092 rc
= qed_ll2_acquire_connection(QED_LEADING_HWFN(cdev
), &ll2_info
,
2093 QED_LL2_RX_SIZE
, QED_LL2_TX_SIZE
,
2094 &cdev
->ll2
->handle
);
2096 DP_INFO(cdev
, "Failed to acquire LL2 connection\n");
2100 rc
= qed_ll2_establish_connection(QED_LEADING_HWFN(cdev
),
2103 DP_INFO(cdev
, "Failed to establish LL2 connection\n");
2107 /* Post all Rx buffers to FW */
2108 spin_lock_bh(&cdev
->ll2
->lock
);
2109 list_for_each_entry_safe(buffer
, tmp_buffer
, &cdev
->ll2
->list
, list
) {
2110 rc
= qed_ll2_post_rx_buffer(QED_LEADING_HWFN(cdev
),
2112 buffer
->phys_addr
, 0, buffer
, 1);
2115 "Failed to post an Rx buffer; Deleting it\n");
2116 dma_unmap_single(&cdev
->pdev
->dev
, buffer
->phys_addr
,
2117 cdev
->ll2
->rx_size
, DMA_FROM_DEVICE
);
2118 kfree(buffer
->data
);
2119 list_del(&buffer
->list
);
2122 cdev
->ll2
->rx_cnt
++;
2125 spin_unlock_bh(&cdev
->ll2
->lock
);
2127 if (!cdev
->ll2
->rx_cnt
) {
2128 DP_INFO(cdev
, "Failed passing even a single Rx buffer\n");
2129 goto release_terminate
;
2132 if (!is_valid_ether_addr(params
->ll2_mac_address
)) {
2133 DP_INFO(cdev
, "Invalid Ethernet address\n");
2134 goto release_terminate
;
2137 if (cdev
->hwfns
[0].hw_info
.personality
== QED_PCI_ISCSI
&&
2138 cdev
->hwfns
[0].pf_params
.iscsi_pf_params
.ooo_enable
) {
2139 DP_VERBOSE(cdev
, QED_MSG_STORAGE
, "Starting OOO LL2 queue\n");
2140 rc
= qed_ll2_start_ooo(cdev
, params
);
2143 "Failed to initialize the OOO LL2 queue\n");
2144 goto release_terminate
;
2148 p_ptt
= qed_ptt_acquire(QED_LEADING_HWFN(cdev
));
2150 DP_INFO(cdev
, "Failed to acquire PTT\n");
2151 goto release_terminate
;
2154 rc
= qed_llh_add_mac_filter(QED_LEADING_HWFN(cdev
), p_ptt
,
2155 params
->ll2_mac_address
);
2156 qed_ptt_release(QED_LEADING_HWFN(cdev
), p_ptt
);
2158 DP_ERR(cdev
, "Failed to allocate LLH filter\n");
2159 goto release_terminate_all
;
2162 ether_addr_copy(cdev
->ll2_mac_address
, params
->ll2_mac_address
);
2165 release_terminate_all
:
2168 qed_ll2_terminate_connection(QED_LEADING_HWFN(cdev
), cdev
->ll2
->handle
);
2170 qed_ll2_release_connection(QED_LEADING_HWFN(cdev
), cdev
->ll2
->handle
);
2172 qed_ll2_kill_buffers(cdev
);
2173 cdev
->ll2
->handle
= QED_LL2_UNUSED_HANDLE
;
2177 static int qed_ll2_stop(struct qed_dev
*cdev
)
2179 struct qed_ptt
*p_ptt
;
2182 if (cdev
->ll2
->handle
== QED_LL2_UNUSED_HANDLE
)
2185 p_ptt
= qed_ptt_acquire(QED_LEADING_HWFN(cdev
));
2187 DP_INFO(cdev
, "Failed to acquire PTT\n");
2191 qed_llh_remove_mac_filter(QED_LEADING_HWFN(cdev
), p_ptt
,
2192 cdev
->ll2_mac_address
);
2193 qed_ptt_release(QED_LEADING_HWFN(cdev
), p_ptt
);
2194 eth_zero_addr(cdev
->ll2_mac_address
);
2196 if (cdev
->hwfns
[0].hw_info
.personality
== QED_PCI_ISCSI
&&
2197 cdev
->hwfns
[0].pf_params
.iscsi_pf_params
.ooo_enable
)
2198 qed_ll2_stop_ooo(cdev
);
2200 rc
= qed_ll2_terminate_connection(QED_LEADING_HWFN(cdev
),
2203 DP_INFO(cdev
, "Failed to terminate LL2 connection\n");
2205 qed_ll2_kill_buffers(cdev
);
2207 qed_ll2_release_connection(QED_LEADING_HWFN(cdev
), cdev
->ll2
->handle
);
2208 cdev
->ll2
->handle
= QED_LL2_UNUSED_HANDLE
;
2215 static int qed_ll2_start_xmit(struct qed_dev
*cdev
, struct sk_buff
*skb
)
2217 const skb_frag_t
*frag
;
2218 int rc
= -EINVAL
, i
;
2223 if (unlikely(skb
->ip_summed
!= CHECKSUM_NONE
)) {
2224 DP_INFO(cdev
, "Cannot transmit a checksumed packet\n");
2228 if (1 + skb_shinfo(skb
)->nr_frags
> CORE_LL2_TX_MAX_BDS_PER_PACKET
) {
2229 DP_ERR(cdev
, "Cannot transmit a packet with %d fragments\n",
2230 1 + skb_shinfo(skb
)->nr_frags
);
2234 mapping
= dma_map_single(&cdev
->pdev
->dev
, skb
->data
,
2235 skb
->len
, DMA_TO_DEVICE
);
2236 if (unlikely(dma_mapping_error(&cdev
->pdev
->dev
, mapping
))) {
2237 DP_NOTICE(cdev
, "SKB mapping failed\n");
2241 /* Request HW to calculate IP csum */
2242 if (!((vlan_get_protocol(skb
) == htons(ETH_P_IPV6
)) &&
2243 ipv6_hdr(skb
)->nexthdr
== NEXTHDR_IPV6
))
2244 flags
|= BIT(CORE_TX_BD_FLAGS_IP_CSUM_SHIFT
);
2246 if (skb_vlan_tag_present(skb
)) {
2247 vlan
= skb_vlan_tag_get(skb
);
2248 flags
|= BIT(CORE_TX_BD_FLAGS_VLAN_INSERTION_SHIFT
);
2251 rc
= qed_ll2_prepare_tx_packet(QED_LEADING_HWFN(cdev
),
2253 1 + skb_shinfo(skb
)->nr_frags
,
2254 vlan
, flags
, 0, QED_LL2_TX_DEST_NW
,
2255 0 /* RoCE FLAVOR */,
2256 mapping
, skb
->len
, skb
, 1);
2260 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
2261 frag
= &skb_shinfo(skb
)->frags
[i
];
2262 if (!cdev
->ll2
->frags_mapped
) {
2263 mapping
= skb_frag_dma_map(&cdev
->pdev
->dev
, frag
, 0,
2264 skb_frag_size(frag
),
2267 if (unlikely(dma_mapping_error(&cdev
->pdev
->dev
,
2270 "Unable to map frag - dropping packet\n");
2275 mapping
= page_to_phys(skb_frag_page(frag
)) |
2279 rc
= qed_ll2_set_fragment_of_tx_packet(QED_LEADING_HWFN(cdev
),
2282 skb_frag_size(frag
));
2284 /* if failed not much to do here, partial packet has been posted
2285 * we can't free memory, will need to wait for completion.
2294 dma_unmap_single(&cdev
->pdev
->dev
, mapping
, skb
->len
, DMA_TO_DEVICE
);
2300 static int qed_ll2_stats(struct qed_dev
*cdev
, struct qed_ll2_stats
*stats
)
2305 return qed_ll2_get_stats(QED_LEADING_HWFN(cdev
),
2306 cdev
->ll2
->handle
, stats
);
2309 const struct qed_ll2_ops qed_ll2_ops_pass
= {
2310 .start
= &qed_ll2_start
,
2311 .stop
= &qed_ll2_stop
,
2312 .start_xmit
= &qed_ll2_start_xmit
,
2313 .register_cb_ops
= &qed_ll2_register_cb_ops
,
2314 .get_stats
= &qed_ll2_stats
,
2317 int qed_ll2_alloc_if(struct qed_dev
*cdev
)
2319 cdev
->ll2
= kzalloc(sizeof(*cdev
->ll2
), GFP_KERNEL
);
2320 return cdev
->ll2
? 0 : -ENOMEM
;
2323 void qed_ll2_dealloc_if(struct qed_dev
*cdev
)