1 /* QLogic qed NIC Driver
2 * Copyright (c) 2015-2017 QLogic Corporation
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and /or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/types.h>
34 #include <asm/byteorder.h>
35 #include <linux/dma-mapping.h>
36 #include <linux/if_vlan.h>
37 #include <linux/kernel.h>
38 #include <linux/pci.h>
39 #include <linux/slab.h>
40 #include <linux/stddef.h>
41 #include <linux/workqueue.h>
43 #include <linux/bitops.h>
44 #include <linux/delay.h>
45 #include <linux/errno.h>
46 #include <linux/etherdevice.h>
48 #include <linux/list.h>
49 #include <linux/mutex.h>
50 #include <linux/spinlock.h>
51 #include <linux/string.h>
52 #include <linux/qed/qed_ll2_if.h>
55 #include "qed_dev_api.h"
62 #include "qed_reg_addr.h"
66 #define QED_LL2_RX_REGISTERED(ll2) ((ll2)->rx_queue.b_cb_registred)
67 #define QED_LL2_TX_REGISTERED(ll2) ((ll2)->tx_queue.b_cb_registred)
69 #define QED_LL2_TX_SIZE (256)
70 #define QED_LL2_RX_SIZE (4096)
72 struct qed_cb_ll2_info
{
77 /* Lock protecting LL2 buffer lists in sleepless context */
79 struct list_head list
;
81 const struct qed_ll2_cb_ops
*cbs
;
85 struct qed_ll2_buffer
{
86 struct list_head list
;
91 static void qed_ll2b_complete_tx_packet(void *cxt
,
94 dma_addr_t first_frag_addr
,
98 struct qed_hwfn
*p_hwfn
= cxt
;
99 struct qed_dev
*cdev
= p_hwfn
->cdev
;
100 struct sk_buff
*skb
= cookie
;
102 /* All we need to do is release the mapping */
103 dma_unmap_single(&p_hwfn
->cdev
->pdev
->dev
, first_frag_addr
,
104 skb_headlen(skb
), DMA_TO_DEVICE
);
106 if (cdev
->ll2
->cbs
&& cdev
->ll2
->cbs
->tx_cb
)
107 cdev
->ll2
->cbs
->tx_cb(cdev
->ll2
->cb_cookie
, skb
,
110 dev_kfree_skb_any(skb
);
113 static int qed_ll2_alloc_buffer(struct qed_dev
*cdev
,
114 u8
**data
, dma_addr_t
*phys_addr
)
116 *data
= kmalloc(cdev
->ll2
->rx_size
, GFP_ATOMIC
);
118 DP_INFO(cdev
, "Failed to allocate LL2 buffer data\n");
122 *phys_addr
= dma_map_single(&cdev
->pdev
->dev
,
123 ((*data
) + NET_SKB_PAD
),
124 cdev
->ll2
->rx_size
, DMA_FROM_DEVICE
);
125 if (dma_mapping_error(&cdev
->pdev
->dev
, *phys_addr
)) {
126 DP_INFO(cdev
, "Failed to map LL2 buffer data\n");
134 static int qed_ll2_dealloc_buffer(struct qed_dev
*cdev
,
135 struct qed_ll2_buffer
*buffer
)
137 spin_lock_bh(&cdev
->ll2
->lock
);
139 dma_unmap_single(&cdev
->pdev
->dev
, buffer
->phys_addr
,
140 cdev
->ll2
->rx_size
, DMA_FROM_DEVICE
);
142 list_del(&buffer
->list
);
145 if (!cdev
->ll2
->rx_cnt
)
146 DP_INFO(cdev
, "All LL2 entries were removed\n");
148 spin_unlock_bh(&cdev
->ll2
->lock
);
153 static void qed_ll2_kill_buffers(struct qed_dev
*cdev
)
155 struct qed_ll2_buffer
*buffer
, *tmp_buffer
;
157 list_for_each_entry_safe(buffer
, tmp_buffer
, &cdev
->ll2
->list
, list
)
158 qed_ll2_dealloc_buffer(cdev
, buffer
);
161 static void qed_ll2b_complete_rx_packet(void *cxt
,
162 struct qed_ll2_comp_rx_data
*data
)
164 struct qed_hwfn
*p_hwfn
= cxt
;
165 struct qed_ll2_buffer
*buffer
= data
->cookie
;
166 struct qed_dev
*cdev
= p_hwfn
->cdev
;
167 dma_addr_t new_phys_addr
;
174 (NETIF_MSG_RX_STATUS
| QED_MSG_STORAGE
| NETIF_MSG_PKTDATA
),
175 "Got an LL2 Rx completion: [Buffer at phys 0x%llx, offset 0x%02x] Length 0x%04x Parse_flags 0x%04x vlan 0x%04x Opaque data [0x%08x:0x%08x]\n",
176 (u64
)data
->rx_buf_addr
,
177 data
->u
.placement_offset
,
178 data
->length
.packet_length
,
180 data
->vlan
, data
->opaque_data_0
, data
->opaque_data_1
);
182 if ((cdev
->dp_module
& NETIF_MSG_PKTDATA
) && buffer
->data
) {
183 print_hex_dump(KERN_INFO
, "",
184 DUMP_PREFIX_OFFSET
, 16, 1,
185 buffer
->data
, data
->length
.packet_length
, false);
188 /* Determine if data is valid */
189 if (data
->length
.packet_length
< ETH_HLEN
)
192 /* Allocate a replacement for buffer; Reuse upon failure */
194 rc
= qed_ll2_alloc_buffer(p_hwfn
->cdev
, &new_data
,
197 /* If need to reuse or there's no replacement buffer, repost this */
200 dma_unmap_single(&cdev
->pdev
->dev
, buffer
->phys_addr
,
201 cdev
->ll2
->rx_size
, DMA_FROM_DEVICE
);
203 skb
= build_skb(buffer
->data
, 0);
205 DP_INFO(cdev
, "Failed to build SKB\n");
210 data
->u
.placement_offset
+= NET_SKB_PAD
;
211 skb_reserve(skb
, data
->u
.placement_offset
);
212 skb_put(skb
, data
->length
.packet_length
);
213 skb_checksum_none_assert(skb
);
215 /* Get parital ethernet information instead of eth_type_trans(),
216 * Since we don't have an associated net_device.
218 skb_reset_mac_header(skb
);
219 skb
->protocol
= eth_hdr(skb
)->h_proto
;
221 /* Pass SKB onward */
222 if (cdev
->ll2
->cbs
&& cdev
->ll2
->cbs
->rx_cb
) {
224 __vlan_hwaccel_put_tag(skb
, htons(ETH_P_8021Q
),
226 cdev
->ll2
->cbs
->rx_cb(cdev
->ll2
->cb_cookie
, skb
,
228 data
->opaque_data_1
);
230 DP_VERBOSE(p_hwfn
, (NETIF_MSG_RX_STATUS
| NETIF_MSG_PKTDATA
|
231 QED_MSG_LL2
| QED_MSG_STORAGE
),
232 "Dropping the packet\n");
237 /* Update Buffer information and update FW producer */
238 buffer
->data
= new_data
;
239 buffer
->phys_addr
= new_phys_addr
;
242 rc
= qed_ll2_post_rx_buffer(QED_LEADING_HWFN(cdev
), cdev
->ll2
->handle
,
243 buffer
->phys_addr
, 0, buffer
, 1);
246 qed_ll2_dealloc_buffer(cdev
, buffer
);
249 static struct qed_ll2_info
*__qed_ll2_handle_sanity(struct qed_hwfn
*p_hwfn
,
250 u8 connection_handle
,
254 struct qed_ll2_info
*p_ll2_conn
, *p_ret
= NULL
;
256 if (connection_handle
>= QED_MAX_NUM_OF_LL2_CONNECTIONS
)
259 if (!p_hwfn
->p_ll2_info
)
262 p_ll2_conn
= &p_hwfn
->p_ll2_info
[connection_handle
];
266 mutex_lock(&p_ll2_conn
->mutex
);
267 if (p_ll2_conn
->b_active
)
270 mutex_unlock(&p_ll2_conn
->mutex
);
278 static struct qed_ll2_info
*qed_ll2_handle_sanity(struct qed_hwfn
*p_hwfn
,
279 u8 connection_handle
)
281 return __qed_ll2_handle_sanity(p_hwfn
, connection_handle
, false, true);
284 static struct qed_ll2_info
*qed_ll2_handle_sanity_lock(struct qed_hwfn
*p_hwfn
,
285 u8 connection_handle
)
287 return __qed_ll2_handle_sanity(p_hwfn
, connection_handle
, true, true);
290 static struct qed_ll2_info
*qed_ll2_handle_sanity_inactive(struct qed_hwfn
292 u8 connection_handle
)
294 return __qed_ll2_handle_sanity(p_hwfn
, connection_handle
, false, false);
297 static void qed_ll2_txq_flush(struct qed_hwfn
*p_hwfn
, u8 connection_handle
)
299 bool b_last_packet
= false, b_last_frag
= false;
300 struct qed_ll2_tx_packet
*p_pkt
= NULL
;
301 struct qed_ll2_info
*p_ll2_conn
;
302 struct qed_ll2_tx_queue
*p_tx
;
303 unsigned long flags
= 0;
306 p_ll2_conn
= qed_ll2_handle_sanity_inactive(p_hwfn
, connection_handle
);
310 p_tx
= &p_ll2_conn
->tx_queue
;
312 spin_lock_irqsave(&p_tx
->lock
, flags
);
313 while (!list_empty(&p_tx
->active_descq
)) {
314 p_pkt
= list_first_entry(&p_tx
->active_descq
,
315 struct qed_ll2_tx_packet
, list_entry
);
319 list_del(&p_pkt
->list_entry
);
320 b_last_packet
= list_empty(&p_tx
->active_descq
);
321 list_add_tail(&p_pkt
->list_entry
, &p_tx
->free_descq
);
322 spin_unlock_irqrestore(&p_tx
->lock
, flags
);
323 if (p_ll2_conn
->input
.conn_type
== QED_LL2_TYPE_OOO
) {
324 struct qed_ooo_buffer
*p_buffer
;
326 p_buffer
= (struct qed_ooo_buffer
*)p_pkt
->cookie
;
327 qed_ooo_put_free_buffer(p_hwfn
, p_hwfn
->p_ooo_info
,
330 p_tx
->cur_completing_packet
= *p_pkt
;
331 p_tx
->cur_completing_bd_idx
= 1;
333 p_tx
->cur_completing_bd_idx
== p_pkt
->bd_used
;
334 tx_frag
= p_pkt
->bds_set
[0].tx_frag
;
335 p_ll2_conn
->cbs
.tx_release_cb(p_ll2_conn
->cbs
.cookie
,
342 spin_lock_irqsave(&p_tx
->lock
, flags
);
344 spin_unlock_irqrestore(&p_tx
->lock
, flags
);
347 static int qed_ll2_txq_completion(struct qed_hwfn
*p_hwfn
, void *p_cookie
)
349 struct qed_ll2_info
*p_ll2_conn
= p_cookie
;
350 struct qed_ll2_tx_queue
*p_tx
= &p_ll2_conn
->tx_queue
;
351 u16 new_idx
= 0, num_bds
= 0, num_bds_in_packet
= 0;
352 struct qed_ll2_tx_packet
*p_pkt
;
353 bool b_last_frag
= false;
357 spin_lock_irqsave(&p_tx
->lock
, flags
);
358 if (p_tx
->b_completing_packet
) {
363 new_idx
= le16_to_cpu(*p_tx
->p_fw_cons
);
364 num_bds
= ((s16
)new_idx
- (s16
)p_tx
->bds_idx
);
366 if (list_empty(&p_tx
->active_descq
))
369 p_pkt
= list_first_entry(&p_tx
->active_descq
,
370 struct qed_ll2_tx_packet
, list_entry
);
374 p_tx
->b_completing_packet
= true;
375 p_tx
->cur_completing_packet
= *p_pkt
;
376 num_bds_in_packet
= p_pkt
->bd_used
;
377 list_del(&p_pkt
->list_entry
);
379 if (num_bds
< num_bds_in_packet
) {
381 "Rest of BDs does not cover whole packet\n");
385 num_bds
-= num_bds_in_packet
;
386 p_tx
->bds_idx
+= num_bds_in_packet
;
387 while (num_bds_in_packet
--)
388 qed_chain_consume(&p_tx
->txq_chain
);
390 p_tx
->cur_completing_bd_idx
= 1;
391 b_last_frag
= p_tx
->cur_completing_bd_idx
== p_pkt
->bd_used
;
392 list_add_tail(&p_pkt
->list_entry
, &p_tx
->free_descq
);
394 spin_unlock_irqrestore(&p_tx
->lock
, flags
);
396 p_ll2_conn
->cbs
.tx_comp_cb(p_ll2_conn
->cbs
.cookie
,
399 p_pkt
->bds_set
[0].tx_frag
,
400 b_last_frag
, !num_bds
);
402 spin_lock_irqsave(&p_tx
->lock
, flags
);
405 p_tx
->b_completing_packet
= false;
408 spin_unlock_irqrestore(&p_tx
->lock
, flags
);
412 static void qed_ll2_rxq_parse_gsi(struct qed_hwfn
*p_hwfn
,
413 union core_rx_cqe_union
*p_cqe
,
414 struct qed_ll2_comp_rx_data
*data
)
416 data
->parse_flags
= le16_to_cpu(p_cqe
->rx_cqe_gsi
.parse_flags
.flags
);
417 data
->length
.data_length
= le16_to_cpu(p_cqe
->rx_cqe_gsi
.data_length
);
418 data
->vlan
= le16_to_cpu(p_cqe
->rx_cqe_gsi
.vlan
);
419 data
->opaque_data_0
= le32_to_cpu(p_cqe
->rx_cqe_gsi
.src_mac_addrhi
);
420 data
->opaque_data_1
= le16_to_cpu(p_cqe
->rx_cqe_gsi
.src_mac_addrlo
);
421 data
->u
.data_length_error
= p_cqe
->rx_cqe_gsi
.data_length_error
;
422 data
->qp_id
= le16_to_cpu(p_cqe
->rx_cqe_gsi
.qp_id
);
424 data
->src_qp
= le32_to_cpu(p_cqe
->rx_cqe_gsi
.src_qp
);
427 static void qed_ll2_rxq_parse_reg(struct qed_hwfn
*p_hwfn
,
428 union core_rx_cqe_union
*p_cqe
,
429 struct qed_ll2_comp_rx_data
*data
)
431 data
->parse_flags
= le16_to_cpu(p_cqe
->rx_cqe_fp
.parse_flags
.flags
);
432 data
->err_flags
= le16_to_cpu(p_cqe
->rx_cqe_fp
.err_flags
.flags
);
433 data
->length
.packet_length
=
434 le16_to_cpu(p_cqe
->rx_cqe_fp
.packet_length
);
435 data
->vlan
= le16_to_cpu(p_cqe
->rx_cqe_fp
.vlan
);
436 data
->opaque_data_0
= le32_to_cpu(p_cqe
->rx_cqe_fp
.opaque_data
.data
[0]);
437 data
->opaque_data_1
= le32_to_cpu(p_cqe
->rx_cqe_fp
.opaque_data
.data
[1]);
438 data
->u
.placement_offset
= p_cqe
->rx_cqe_fp
.placement_offset
;
442 qed_ll2_handle_slowpath(struct qed_hwfn
*p_hwfn
,
443 struct qed_ll2_info
*p_ll2_conn
,
444 union core_rx_cqe_union
*p_cqe
,
445 unsigned long *p_lock_flags
)
447 struct qed_ll2_rx_queue
*p_rx
= &p_ll2_conn
->rx_queue
;
448 struct core_rx_slow_path_cqe
*sp_cqe
;
450 sp_cqe
= &p_cqe
->rx_cqe_sp
;
451 if (sp_cqe
->ramrod_cmd_id
!= CORE_RAMROD_RX_QUEUE_FLUSH
) {
453 "LL2 - unexpected Rx CQE slowpath ramrod_cmd_id:%d\n",
454 sp_cqe
->ramrod_cmd_id
);
458 if (!p_ll2_conn
->cbs
.slowpath_cb
) {
460 "LL2 - received RX_QUEUE_FLUSH but no callback was provided\n");
464 spin_unlock_irqrestore(&p_rx
->lock
, *p_lock_flags
);
466 p_ll2_conn
->cbs
.slowpath_cb(p_ll2_conn
->cbs
.cookie
,
468 le32_to_cpu(sp_cqe
->opaque_data
.data
[0]),
469 le32_to_cpu(sp_cqe
->opaque_data
.data
[1]));
471 spin_lock_irqsave(&p_rx
->lock
, *p_lock_flags
);
477 qed_ll2_rxq_handle_completion(struct qed_hwfn
*p_hwfn
,
478 struct qed_ll2_info
*p_ll2_conn
,
479 union core_rx_cqe_union
*p_cqe
,
480 unsigned long *p_lock_flags
, bool b_last_cqe
)
482 struct qed_ll2_rx_queue
*p_rx
= &p_ll2_conn
->rx_queue
;
483 struct qed_ll2_rx_packet
*p_pkt
= NULL
;
484 struct qed_ll2_comp_rx_data data
;
486 if (!list_empty(&p_rx
->active_descq
))
487 p_pkt
= list_first_entry(&p_rx
->active_descq
,
488 struct qed_ll2_rx_packet
, list_entry
);
491 "[%d] LL2 Rx completion but active_descq is empty\n",
492 p_ll2_conn
->input
.conn_type
);
496 list_del(&p_pkt
->list_entry
);
498 if (p_cqe
->rx_cqe_sp
.type
== CORE_RX_CQE_TYPE_REGULAR
)
499 qed_ll2_rxq_parse_reg(p_hwfn
, p_cqe
, &data
);
501 qed_ll2_rxq_parse_gsi(p_hwfn
, p_cqe
, &data
);
502 if (qed_chain_consume(&p_rx
->rxq_chain
) != p_pkt
->rxq_bd
)
504 "Mismatch between active_descq and the LL2 Rx chain\n");
506 list_add_tail(&p_pkt
->list_entry
, &p_rx
->free_descq
);
508 data
.connection_handle
= p_ll2_conn
->my_id
;
509 data
.cookie
= p_pkt
->cookie
;
510 data
.rx_buf_addr
= p_pkt
->rx_buf_addr
;
511 data
.b_last_packet
= b_last_cqe
;
513 spin_unlock_irqrestore(&p_rx
->lock
, *p_lock_flags
);
514 p_ll2_conn
->cbs
.rx_comp_cb(p_ll2_conn
->cbs
.cookie
, &data
);
516 spin_lock_irqsave(&p_rx
->lock
, *p_lock_flags
);
521 static int qed_ll2_rxq_completion(struct qed_hwfn
*p_hwfn
, void *cookie
)
523 struct qed_ll2_info
*p_ll2_conn
= (struct qed_ll2_info
*)cookie
;
524 struct qed_ll2_rx_queue
*p_rx
= &p_ll2_conn
->rx_queue
;
525 union core_rx_cqe_union
*cqe
= NULL
;
526 u16 cq_new_idx
= 0, cq_old_idx
= 0;
527 unsigned long flags
= 0;
530 spin_lock_irqsave(&p_rx
->lock
, flags
);
531 cq_new_idx
= le16_to_cpu(*p_rx
->p_fw_cons
);
532 cq_old_idx
= qed_chain_get_cons_idx(&p_rx
->rcq_chain
);
534 while (cq_new_idx
!= cq_old_idx
) {
535 bool b_last_cqe
= (cq_new_idx
== cq_old_idx
);
538 (union core_rx_cqe_union
*)
539 qed_chain_consume(&p_rx
->rcq_chain
);
540 cq_old_idx
= qed_chain_get_cons_idx(&p_rx
->rcq_chain
);
544 "LL2 [sw. cons %04x, fw. at %04x] - Got Packet of type %02x\n",
545 cq_old_idx
, cq_new_idx
, cqe
->rx_cqe_sp
.type
);
547 switch (cqe
->rx_cqe_sp
.type
) {
548 case CORE_RX_CQE_TYPE_SLOW_PATH
:
549 rc
= qed_ll2_handle_slowpath(p_hwfn
, p_ll2_conn
,
552 case CORE_RX_CQE_TYPE_GSI_OFFLOAD
:
553 case CORE_RX_CQE_TYPE_REGULAR
:
554 rc
= qed_ll2_rxq_handle_completion(p_hwfn
, p_ll2_conn
,
563 spin_unlock_irqrestore(&p_rx
->lock
, flags
);
567 static void qed_ll2_rxq_flush(struct qed_hwfn
*p_hwfn
, u8 connection_handle
)
569 struct qed_ll2_info
*p_ll2_conn
= NULL
;
570 struct qed_ll2_rx_packet
*p_pkt
= NULL
;
571 struct qed_ll2_rx_queue
*p_rx
;
572 unsigned long flags
= 0;
574 p_ll2_conn
= qed_ll2_handle_sanity_inactive(p_hwfn
, connection_handle
);
578 p_rx
= &p_ll2_conn
->rx_queue
;
580 spin_lock_irqsave(&p_rx
->lock
, flags
);
581 while (!list_empty(&p_rx
->active_descq
)) {
582 p_pkt
= list_first_entry(&p_rx
->active_descq
,
583 struct qed_ll2_rx_packet
, list_entry
);
586 list_move_tail(&p_pkt
->list_entry
, &p_rx
->free_descq
);
587 spin_unlock_irqrestore(&p_rx
->lock
, flags
);
589 if (p_ll2_conn
->input
.conn_type
== QED_LL2_TYPE_OOO
) {
590 struct qed_ooo_buffer
*p_buffer
;
592 p_buffer
= (struct qed_ooo_buffer
*)p_pkt
->cookie
;
593 qed_ooo_put_free_buffer(p_hwfn
, p_hwfn
->p_ooo_info
,
596 dma_addr_t rx_buf_addr
= p_pkt
->rx_buf_addr
;
597 void *cookie
= p_pkt
->cookie
;
600 b_last
= list_empty(&p_rx
->active_descq
);
601 p_ll2_conn
->cbs
.rx_release_cb(p_ll2_conn
->cbs
.cookie
,
604 rx_buf_addr
, b_last
);
606 spin_lock_irqsave(&p_rx
->lock
, flags
);
608 spin_unlock_irqrestore(&p_rx
->lock
, flags
);
612 qed_ll2_lb_rxq_handler_slowpath(struct qed_hwfn
*p_hwfn
,
613 struct core_rx_slow_path_cqe
*p_cqe
)
615 struct ooo_opaque
*iscsi_ooo
;
618 if (p_cqe
->ramrod_cmd_id
!= CORE_RAMROD_RX_QUEUE_FLUSH
)
621 iscsi_ooo
= (struct ooo_opaque
*)&p_cqe
->opaque_data
;
622 if (iscsi_ooo
->ooo_opcode
!= TCP_EVENT_DELETE_ISLES
)
625 /* Need to make a flush */
626 cid
= le32_to_cpu(iscsi_ooo
->cid
);
627 qed_ooo_release_connection_isles(p_hwfn
, p_hwfn
->p_ooo_info
, cid
);
632 static int qed_ll2_lb_rxq_handler(struct qed_hwfn
*p_hwfn
,
633 struct qed_ll2_info
*p_ll2_conn
)
635 struct qed_ll2_rx_queue
*p_rx
= &p_ll2_conn
->rx_queue
;
636 u16 packet_length
= 0, parse_flags
= 0, vlan
= 0;
637 struct qed_ll2_rx_packet
*p_pkt
= NULL
;
638 u32 num_ooo_add_to_peninsula
= 0, cid
;
639 union core_rx_cqe_union
*cqe
= NULL
;
640 u16 cq_new_idx
= 0, cq_old_idx
= 0;
641 struct qed_ooo_buffer
*p_buffer
;
642 struct ooo_opaque
*iscsi_ooo
;
643 u8 placement_offset
= 0;
646 cq_new_idx
= le16_to_cpu(*p_rx
->p_fw_cons
);
647 cq_old_idx
= qed_chain_get_cons_idx(&p_rx
->rcq_chain
);
648 if (cq_new_idx
== cq_old_idx
)
651 while (cq_new_idx
!= cq_old_idx
) {
652 struct core_rx_fast_path_cqe
*p_cqe_fp
;
654 cqe
= qed_chain_consume(&p_rx
->rcq_chain
);
655 cq_old_idx
= qed_chain_get_cons_idx(&p_rx
->rcq_chain
);
656 cqe_type
= cqe
->rx_cqe_sp
.type
;
658 if (cqe_type
== CORE_RX_CQE_TYPE_SLOW_PATH
)
659 if (qed_ll2_lb_rxq_handler_slowpath(p_hwfn
,
663 if (cqe_type
!= CORE_RX_CQE_TYPE_REGULAR
) {
665 "Got a non-regular LB LL2 completion [type 0x%02x]\n",
669 p_cqe_fp
= &cqe
->rx_cqe_fp
;
671 placement_offset
= p_cqe_fp
->placement_offset
;
672 parse_flags
= le16_to_cpu(p_cqe_fp
->parse_flags
.flags
);
673 packet_length
= le16_to_cpu(p_cqe_fp
->packet_length
);
674 vlan
= le16_to_cpu(p_cqe_fp
->vlan
);
675 iscsi_ooo
= (struct ooo_opaque
*)&p_cqe_fp
->opaque_data
;
676 qed_ooo_save_history_entry(p_hwfn
, p_hwfn
->p_ooo_info
,
678 cid
= le32_to_cpu(iscsi_ooo
->cid
);
680 /* Process delete isle first */
681 if (iscsi_ooo
->drop_size
)
682 qed_ooo_delete_isles(p_hwfn
, p_hwfn
->p_ooo_info
, cid
,
683 iscsi_ooo
->drop_isle
,
684 iscsi_ooo
->drop_size
);
686 if (iscsi_ooo
->ooo_opcode
== TCP_EVENT_NOP
)
689 /* Now process create/add/join isles */
690 if (list_empty(&p_rx
->active_descq
)) {
692 "LL2 OOO RX chain has no submitted buffers\n"
697 p_pkt
= list_first_entry(&p_rx
->active_descq
,
698 struct qed_ll2_rx_packet
, list_entry
);
700 if ((iscsi_ooo
->ooo_opcode
== TCP_EVENT_ADD_NEW_ISLE
) ||
701 (iscsi_ooo
->ooo_opcode
== TCP_EVENT_ADD_ISLE_RIGHT
) ||
702 (iscsi_ooo
->ooo_opcode
== TCP_EVENT_ADD_ISLE_LEFT
) ||
703 (iscsi_ooo
->ooo_opcode
== TCP_EVENT_ADD_PEN
) ||
704 (iscsi_ooo
->ooo_opcode
== TCP_EVENT_JOIN
)) {
707 "LL2 OOO RX packet is not valid\n");
710 list_del(&p_pkt
->list_entry
);
711 p_buffer
= (struct qed_ooo_buffer
*)p_pkt
->cookie
;
712 p_buffer
->packet_length
= packet_length
;
713 p_buffer
->parse_flags
= parse_flags
;
714 p_buffer
->vlan
= vlan
;
715 p_buffer
->placement_offset
= placement_offset
;
716 qed_chain_consume(&p_rx
->rxq_chain
);
717 list_add_tail(&p_pkt
->list_entry
, &p_rx
->free_descq
);
719 switch (iscsi_ooo
->ooo_opcode
) {
720 case TCP_EVENT_ADD_NEW_ISLE
:
721 qed_ooo_add_new_isle(p_hwfn
,
727 case TCP_EVENT_ADD_ISLE_RIGHT
:
728 qed_ooo_add_new_buffer(p_hwfn
,
735 case TCP_EVENT_ADD_ISLE_LEFT
:
736 qed_ooo_add_new_buffer(p_hwfn
,
744 qed_ooo_add_new_buffer(p_hwfn
,
747 iscsi_ooo
->ooo_isle
+
751 qed_ooo_join_isles(p_hwfn
,
753 cid
, iscsi_ooo
->ooo_isle
);
755 case TCP_EVENT_ADD_PEN
:
756 num_ooo_add_to_peninsula
++;
757 qed_ooo_put_ready_buffer(p_hwfn
,
764 "Unexpected event (%d) TX OOO completion\n",
765 iscsi_ooo
->ooo_opcode
);
773 qed_ooo_submit_tx_buffers(struct qed_hwfn
*p_hwfn
,
774 struct qed_ll2_info
*p_ll2_conn
)
776 struct qed_ll2_tx_pkt_info tx_pkt
;
777 struct qed_ooo_buffer
*p_buffer
;
779 dma_addr_t first_frag
;
783 /* Submit Tx buffers here */
784 while ((p_buffer
= qed_ooo_get_ready_buffer(p_hwfn
,
785 p_hwfn
->p_ooo_info
))) {
789 first_frag
= p_buffer
->rx_buffer_phys_addr
+
790 p_buffer
->placement_offset
;
791 SET_FIELD(bd_flags
, CORE_TX_BD_DATA_FORCE_VLAN_MODE
, 1);
792 SET_FIELD(bd_flags
, CORE_TX_BD_DATA_L4_PROTOCOL
, 1);
794 memset(&tx_pkt
, 0, sizeof(tx_pkt
));
795 tx_pkt
.num_of_bds
= 1;
796 tx_pkt
.vlan
= p_buffer
->vlan
;
797 tx_pkt
.bd_flags
= bd_flags
;
798 tx_pkt
.l4_hdr_offset_w
= l4_hdr_offset_w
;
799 switch (p_ll2_conn
->tx_dest
) {
800 case CORE_TX_DEST_NW
:
801 tx_pkt
.tx_dest
= QED_LL2_TX_DEST_NW
;
803 case CORE_TX_DEST_LB
:
804 tx_pkt
.tx_dest
= QED_LL2_TX_DEST_LB
;
806 case CORE_TX_DEST_DROP
:
808 tx_pkt
.tx_dest
= QED_LL2_TX_DEST_DROP
;
811 tx_pkt
.first_frag
= first_frag
;
812 tx_pkt
.first_frag_len
= p_buffer
->packet_length
;
813 tx_pkt
.cookie
= p_buffer
;
815 rc
= qed_ll2_prepare_tx_packet(p_hwfn
, p_ll2_conn
->my_id
,
818 qed_ooo_put_ready_buffer(p_hwfn
, p_hwfn
->p_ooo_info
,
826 qed_ooo_submit_rx_buffers(struct qed_hwfn
*p_hwfn
,
827 struct qed_ll2_info
*p_ll2_conn
)
829 struct qed_ooo_buffer
*p_buffer
;
832 while ((p_buffer
= qed_ooo_get_free_buffer(p_hwfn
,
833 p_hwfn
->p_ooo_info
))) {
834 rc
= qed_ll2_post_rx_buffer(p_hwfn
,
836 p_buffer
->rx_buffer_phys_addr
,
839 qed_ooo_put_free_buffer(p_hwfn
,
840 p_hwfn
->p_ooo_info
, p_buffer
);
846 static int qed_ll2_lb_rxq_completion(struct qed_hwfn
*p_hwfn
, void *p_cookie
)
848 struct qed_ll2_info
*p_ll2_conn
= (struct qed_ll2_info
*)p_cookie
;
851 if (!QED_LL2_RX_REGISTERED(p_ll2_conn
))
854 rc
= qed_ll2_lb_rxq_handler(p_hwfn
, p_ll2_conn
);
858 qed_ooo_submit_rx_buffers(p_hwfn
, p_ll2_conn
);
859 qed_ooo_submit_tx_buffers(p_hwfn
, p_ll2_conn
);
864 static int qed_ll2_lb_txq_completion(struct qed_hwfn
*p_hwfn
, void *p_cookie
)
866 struct qed_ll2_info
*p_ll2_conn
= (struct qed_ll2_info
*)p_cookie
;
867 struct qed_ll2_tx_queue
*p_tx
= &p_ll2_conn
->tx_queue
;
868 struct qed_ll2_tx_packet
*p_pkt
= NULL
;
869 struct qed_ooo_buffer
*p_buffer
;
870 bool b_dont_submit_rx
= false;
871 u16 new_idx
= 0, num_bds
= 0;
874 if (!QED_LL2_TX_REGISTERED(p_ll2_conn
))
877 new_idx
= le16_to_cpu(*p_tx
->p_fw_cons
);
878 num_bds
= ((s16
)new_idx
- (s16
)p_tx
->bds_idx
);
884 if (list_empty(&p_tx
->active_descq
))
887 p_pkt
= list_first_entry(&p_tx
->active_descq
,
888 struct qed_ll2_tx_packet
, list_entry
);
892 if (p_pkt
->bd_used
!= 1) {
894 "Unexpectedly many BDs(%d) in TX OOO completion\n",
899 list_del(&p_pkt
->list_entry
);
903 qed_chain_consume(&p_tx
->txq_chain
);
905 p_buffer
= (struct qed_ooo_buffer
*)p_pkt
->cookie
;
906 list_add_tail(&p_pkt
->list_entry
, &p_tx
->free_descq
);
908 if (b_dont_submit_rx
) {
909 qed_ooo_put_free_buffer(p_hwfn
, p_hwfn
->p_ooo_info
,
914 rc
= qed_ll2_post_rx_buffer(p_hwfn
, p_ll2_conn
->my_id
,
915 p_buffer
->rx_buffer_phys_addr
, 0,
918 qed_ooo_put_free_buffer(p_hwfn
,
919 p_hwfn
->p_ooo_info
, p_buffer
);
920 b_dont_submit_rx
= true;
924 qed_ooo_submit_tx_buffers(p_hwfn
, p_ll2_conn
);
929 static void qed_ll2_stop_ooo(struct qed_dev
*cdev
)
931 struct qed_hwfn
*hwfn
= QED_LEADING_HWFN(cdev
);
932 u8
*handle
= &hwfn
->pf_params
.iscsi_pf_params
.ll2_ooo_queue_id
;
934 DP_VERBOSE(cdev
, QED_MSG_STORAGE
, "Stopping LL2 OOO queue [%02x]\n",
937 qed_ll2_terminate_connection(hwfn
, *handle
);
938 qed_ll2_release_connection(hwfn
, *handle
);
939 *handle
= QED_LL2_UNUSED_HANDLE
;
942 static int qed_sp_ll2_rx_queue_start(struct qed_hwfn
*p_hwfn
,
943 struct qed_ll2_info
*p_ll2_conn
,
946 enum qed_ll2_conn_type conn_type
= p_ll2_conn
->input
.conn_type
;
947 struct qed_ll2_rx_queue
*p_rx
= &p_ll2_conn
->rx_queue
;
948 struct core_rx_start_ramrod_data
*p_ramrod
= NULL
;
949 struct qed_spq_entry
*p_ent
= NULL
;
950 struct qed_sp_init_data init_data
;
955 memset(&init_data
, 0, sizeof(init_data
));
956 init_data
.cid
= p_ll2_conn
->cid
;
957 init_data
.opaque_fid
= p_hwfn
->hw_info
.opaque_fid
;
958 init_data
.comp_mode
= QED_SPQ_MODE_EBLOCK
;
960 rc
= qed_sp_init_request(p_hwfn
, &p_ent
,
961 CORE_RAMROD_RX_QUEUE_START
,
962 PROTOCOLID_CORE
, &init_data
);
966 p_ramrod
= &p_ent
->ramrod
.core_rx_queue_start
;
968 p_ramrod
->sb_id
= cpu_to_le16(qed_int_get_sp_sb_id(p_hwfn
));
969 p_ramrod
->sb_index
= p_rx
->rx_sb_index
;
970 p_ramrod
->complete_event_flg
= 1;
972 p_ramrod
->mtu
= cpu_to_le16(p_ll2_conn
->input
.mtu
);
973 DMA_REGPAIR_LE(p_ramrod
->bd_base
, p_rx
->rxq_chain
.p_phys_addr
);
974 cqe_pbl_size
= (u16
)qed_chain_get_page_cnt(&p_rx
->rcq_chain
);
975 p_ramrod
->num_of_pbl_pages
= cpu_to_le16(cqe_pbl_size
);
976 DMA_REGPAIR_LE(p_ramrod
->cqe_pbl_addr
,
977 qed_chain_get_pbl_phys(&p_rx
->rcq_chain
));
979 p_ramrod
->drop_ttl0_flg
= p_ll2_conn
->input
.rx_drop_ttl0_flg
;
980 p_ramrod
->inner_vlan_stripping_en
=
981 p_ll2_conn
->input
.rx_vlan_removal_en
;
983 if (test_bit(QED_MF_UFP_SPECIFIC
, &p_hwfn
->cdev
->mf_bits
) &&
984 p_ll2_conn
->input
.conn_type
== QED_LL2_TYPE_FCOE
)
985 p_ramrod
->report_outer_vlan
= 1;
986 p_ramrod
->queue_id
= p_ll2_conn
->queue_id
;
987 p_ramrod
->main_func_queue
= p_ll2_conn
->main_func_queue
? 1 : 0;
989 if (test_bit(QED_MF_LL2_NON_UNICAST
, &p_hwfn
->cdev
->mf_bits
) &&
990 p_ramrod
->main_func_queue
&& conn_type
!= QED_LL2_TYPE_ROCE
&&
991 conn_type
!= QED_LL2_TYPE_IWARP
) {
992 p_ramrod
->mf_si_bcast_accept_all
= 1;
993 p_ramrod
->mf_si_mcast_accept_all
= 1;
995 p_ramrod
->mf_si_bcast_accept_all
= 0;
996 p_ramrod
->mf_si_mcast_accept_all
= 0;
999 p_ramrod
->action_on_error
.error_type
= action_on_error
;
1000 p_ramrod
->gsi_offload_flag
= p_ll2_conn
->input
.gsi_enable
;
1001 return qed_spq_post(p_hwfn
, p_ent
, NULL
);
1004 static int qed_sp_ll2_tx_queue_start(struct qed_hwfn
*p_hwfn
,
1005 struct qed_ll2_info
*p_ll2_conn
)
1007 enum qed_ll2_conn_type conn_type
= p_ll2_conn
->input
.conn_type
;
1008 struct qed_ll2_tx_queue
*p_tx
= &p_ll2_conn
->tx_queue
;
1009 struct core_tx_start_ramrod_data
*p_ramrod
= NULL
;
1010 struct qed_spq_entry
*p_ent
= NULL
;
1011 struct qed_sp_init_data init_data
;
1012 u16 pq_id
= 0, pbl_size
;
1015 if (!QED_LL2_TX_REGISTERED(p_ll2_conn
))
1018 if (p_ll2_conn
->input
.conn_type
== QED_LL2_TYPE_OOO
)
1019 p_ll2_conn
->tx_stats_en
= 0;
1021 p_ll2_conn
->tx_stats_en
= 1;
1024 memset(&init_data
, 0, sizeof(init_data
));
1025 init_data
.cid
= p_ll2_conn
->cid
;
1026 init_data
.opaque_fid
= p_hwfn
->hw_info
.opaque_fid
;
1027 init_data
.comp_mode
= QED_SPQ_MODE_EBLOCK
;
1029 rc
= qed_sp_init_request(p_hwfn
, &p_ent
,
1030 CORE_RAMROD_TX_QUEUE_START
,
1031 PROTOCOLID_CORE
, &init_data
);
1035 p_ramrod
= &p_ent
->ramrod
.core_tx_queue_start
;
1037 p_ramrod
->sb_id
= cpu_to_le16(qed_int_get_sp_sb_id(p_hwfn
));
1038 p_ramrod
->sb_index
= p_tx
->tx_sb_index
;
1039 p_ramrod
->mtu
= cpu_to_le16(p_ll2_conn
->input
.mtu
);
1040 p_ramrod
->stats_en
= p_ll2_conn
->tx_stats_en
;
1041 p_ramrod
->stats_id
= p_ll2_conn
->tx_stats_id
;
1043 DMA_REGPAIR_LE(p_ramrod
->pbl_base_addr
,
1044 qed_chain_get_pbl_phys(&p_tx
->txq_chain
));
1045 pbl_size
= qed_chain_get_page_cnt(&p_tx
->txq_chain
);
1046 p_ramrod
->pbl_size
= cpu_to_le16(pbl_size
);
1048 switch (p_ll2_conn
->input
.tx_tc
) {
1050 pq_id
= qed_get_cm_pq_idx(p_hwfn
, PQ_FLAGS_LB
);
1053 pq_id
= qed_get_cm_pq_idx(p_hwfn
, PQ_FLAGS_OOO
);
1056 pq_id
= qed_get_cm_pq_idx(p_hwfn
, PQ_FLAGS_OFLD
);
1060 p_ramrod
->qm_pq_id
= cpu_to_le16(pq_id
);
1062 switch (conn_type
) {
1063 case QED_LL2_TYPE_FCOE
:
1064 p_ramrod
->conn_type
= PROTOCOLID_FCOE
;
1066 case QED_LL2_TYPE_ISCSI
:
1067 p_ramrod
->conn_type
= PROTOCOLID_ISCSI
;
1069 case QED_LL2_TYPE_ROCE
:
1070 p_ramrod
->conn_type
= PROTOCOLID_ROCE
;
1072 case QED_LL2_TYPE_IWARP
:
1073 p_ramrod
->conn_type
= PROTOCOLID_IWARP
;
1075 case QED_LL2_TYPE_OOO
:
1076 if (p_hwfn
->hw_info
.personality
== QED_PCI_ISCSI
)
1077 p_ramrod
->conn_type
= PROTOCOLID_ISCSI
;
1079 p_ramrod
->conn_type
= PROTOCOLID_IWARP
;
1082 p_ramrod
->conn_type
= PROTOCOLID_ETH
;
1083 DP_NOTICE(p_hwfn
, "Unknown connection type: %d\n", conn_type
);
1086 p_ramrod
->gsi_offload_flag
= p_ll2_conn
->input
.gsi_enable
;
1088 return qed_spq_post(p_hwfn
, p_ent
, NULL
);
1091 static int qed_sp_ll2_rx_queue_stop(struct qed_hwfn
*p_hwfn
,
1092 struct qed_ll2_info
*p_ll2_conn
)
1094 struct core_rx_stop_ramrod_data
*p_ramrod
= NULL
;
1095 struct qed_spq_entry
*p_ent
= NULL
;
1096 struct qed_sp_init_data init_data
;
1100 memset(&init_data
, 0, sizeof(init_data
));
1101 init_data
.cid
= p_ll2_conn
->cid
;
1102 init_data
.opaque_fid
= p_hwfn
->hw_info
.opaque_fid
;
1103 init_data
.comp_mode
= QED_SPQ_MODE_EBLOCK
;
1105 rc
= qed_sp_init_request(p_hwfn
, &p_ent
,
1106 CORE_RAMROD_RX_QUEUE_STOP
,
1107 PROTOCOLID_CORE
, &init_data
);
1111 p_ramrod
= &p_ent
->ramrod
.core_rx_queue_stop
;
1113 p_ramrod
->complete_event_flg
= 1;
1114 p_ramrod
->queue_id
= p_ll2_conn
->queue_id
;
1116 return qed_spq_post(p_hwfn
, p_ent
, NULL
);
1119 static int qed_sp_ll2_tx_queue_stop(struct qed_hwfn
*p_hwfn
,
1120 struct qed_ll2_info
*p_ll2_conn
)
1122 struct qed_spq_entry
*p_ent
= NULL
;
1123 struct qed_sp_init_data init_data
;
1127 memset(&init_data
, 0, sizeof(init_data
));
1128 init_data
.cid
= p_ll2_conn
->cid
;
1129 init_data
.opaque_fid
= p_hwfn
->hw_info
.opaque_fid
;
1130 init_data
.comp_mode
= QED_SPQ_MODE_EBLOCK
;
1132 rc
= qed_sp_init_request(p_hwfn
, &p_ent
,
1133 CORE_RAMROD_TX_QUEUE_STOP
,
1134 PROTOCOLID_CORE
, &init_data
);
1138 return qed_spq_post(p_hwfn
, p_ent
, NULL
);
1142 qed_ll2_acquire_connection_rx(struct qed_hwfn
*p_hwfn
,
1143 struct qed_ll2_info
*p_ll2_info
)
1145 struct qed_ll2_rx_packet
*p_descq
;
1149 if (!p_ll2_info
->input
.rx_num_desc
)
1152 rc
= qed_chain_alloc(p_hwfn
->cdev
,
1153 QED_CHAIN_USE_TO_CONSUME_PRODUCE
,
1154 QED_CHAIN_MODE_NEXT_PTR
,
1155 QED_CHAIN_CNT_TYPE_U16
,
1156 p_ll2_info
->input
.rx_num_desc
,
1157 sizeof(struct core_rx_bd
),
1158 &p_ll2_info
->rx_queue
.rxq_chain
, NULL
);
1160 DP_NOTICE(p_hwfn
, "Failed to allocate ll2 rxq chain\n");
1164 capacity
= qed_chain_get_capacity(&p_ll2_info
->rx_queue
.rxq_chain
);
1165 p_descq
= kcalloc(capacity
, sizeof(struct qed_ll2_rx_packet
),
1169 DP_NOTICE(p_hwfn
, "Failed to allocate ll2 Rx desc\n");
1172 p_ll2_info
->rx_queue
.descq_array
= p_descq
;
1174 rc
= qed_chain_alloc(p_hwfn
->cdev
,
1175 QED_CHAIN_USE_TO_CONSUME_PRODUCE
,
1177 QED_CHAIN_CNT_TYPE_U16
,
1178 p_ll2_info
->input
.rx_num_desc
,
1179 sizeof(struct core_rx_fast_path_cqe
),
1180 &p_ll2_info
->rx_queue
.rcq_chain
, NULL
);
1182 DP_NOTICE(p_hwfn
, "Failed to allocate ll2 rcq chain\n");
1186 DP_VERBOSE(p_hwfn
, QED_MSG_LL2
,
1187 "Allocated LL2 Rxq [Type %08x] with 0x%08x buffers\n",
1188 p_ll2_info
->input
.conn_type
, p_ll2_info
->input
.rx_num_desc
);
1194 static int qed_ll2_acquire_connection_tx(struct qed_hwfn
*p_hwfn
,
1195 struct qed_ll2_info
*p_ll2_info
)
1197 struct qed_ll2_tx_packet
*p_descq
;
1202 if (!p_ll2_info
->input
.tx_num_desc
)
1205 rc
= qed_chain_alloc(p_hwfn
->cdev
,
1206 QED_CHAIN_USE_TO_CONSUME_PRODUCE
,
1208 QED_CHAIN_CNT_TYPE_U16
,
1209 p_ll2_info
->input
.tx_num_desc
,
1210 sizeof(struct core_tx_bd
),
1211 &p_ll2_info
->tx_queue
.txq_chain
, NULL
);
1215 capacity
= qed_chain_get_capacity(&p_ll2_info
->tx_queue
.txq_chain
);
1216 /* First element is part of the packet, rest are flexibly added */
1217 desc_size
= (sizeof(*p_descq
) +
1218 (p_ll2_info
->input
.tx_max_bds_per_packet
- 1) *
1219 sizeof(p_descq
->bds_set
));
1221 p_descq
= kcalloc(capacity
, desc_size
, GFP_KERNEL
);
1226 p_ll2_info
->tx_queue
.descq_mem
= p_descq
;
1228 DP_VERBOSE(p_hwfn
, QED_MSG_LL2
,
1229 "Allocated LL2 Txq [Type %08x] with 0x%08x buffers\n",
1230 p_ll2_info
->input
.conn_type
, p_ll2_info
->input
.tx_num_desc
);
1235 "Can't allocate memory for Tx LL2 with 0x%08x buffers\n",
1236 p_ll2_info
->input
.tx_num_desc
);
1241 qed_ll2_acquire_connection_ooo(struct qed_hwfn
*p_hwfn
,
1242 struct qed_ll2_info
*p_ll2_info
, u16 mtu
)
1244 struct qed_ooo_buffer
*p_buf
= NULL
;
1249 if (p_ll2_info
->input
.conn_type
!= QED_LL2_TYPE_OOO
)
1252 /* Correct number of requested OOO buffers if needed */
1253 if (!p_ll2_info
->input
.rx_num_ooo_buffers
) {
1254 u16 num_desc
= p_ll2_info
->input
.rx_num_desc
;
1258 p_ll2_info
->input
.rx_num_ooo_buffers
= num_desc
* 2;
1261 for (buf_idx
= 0; buf_idx
< p_ll2_info
->input
.rx_num_ooo_buffers
;
1263 p_buf
= kzalloc(sizeof(*p_buf
), GFP_KERNEL
);
1269 p_buf
->rx_buffer_size
= mtu
+ 26 + ETH_CACHE_LINE_SIZE
;
1270 p_buf
->rx_buffer_size
= (p_buf
->rx_buffer_size
+
1271 ETH_CACHE_LINE_SIZE
- 1) &
1272 ~(ETH_CACHE_LINE_SIZE
- 1);
1273 p_virt
= dma_alloc_coherent(&p_hwfn
->cdev
->pdev
->dev
,
1274 p_buf
->rx_buffer_size
,
1275 &p_buf
->rx_buffer_phys_addr
,
1283 p_buf
->rx_buffer_virt_addr
= p_virt
;
1284 qed_ooo_put_free_buffer(p_hwfn
, p_hwfn
->p_ooo_info
, p_buf
);
1287 DP_VERBOSE(p_hwfn
, QED_MSG_LL2
,
1288 "Allocated [%04x] LL2 OOO buffers [each of size 0x%08x]\n",
1289 p_ll2_info
->input
.rx_num_ooo_buffers
, p_buf
->rx_buffer_size
);
1296 qed_ll2_set_cbs(struct qed_ll2_info
*p_ll2_info
, const struct qed_ll2_cbs
*cbs
)
1298 if (!cbs
|| (!cbs
->rx_comp_cb
||
1299 !cbs
->rx_release_cb
||
1300 !cbs
->tx_comp_cb
|| !cbs
->tx_release_cb
|| !cbs
->cookie
))
1303 p_ll2_info
->cbs
.rx_comp_cb
= cbs
->rx_comp_cb
;
1304 p_ll2_info
->cbs
.rx_release_cb
= cbs
->rx_release_cb
;
1305 p_ll2_info
->cbs
.tx_comp_cb
= cbs
->tx_comp_cb
;
1306 p_ll2_info
->cbs
.tx_release_cb
= cbs
->tx_release_cb
;
1307 p_ll2_info
->cbs
.slowpath_cb
= cbs
->slowpath_cb
;
1308 p_ll2_info
->cbs
.cookie
= cbs
->cookie
;
1313 static enum core_error_handle
1314 qed_ll2_get_error_choice(enum qed_ll2_error_handle err
)
1317 case QED_LL2_DROP_PACKET
:
1318 return LL2_DROP_PACKET
;
1319 case QED_LL2_DO_NOTHING
:
1320 return LL2_DO_NOTHING
;
1321 case QED_LL2_ASSERT
:
1324 return LL2_DO_NOTHING
;
1328 int qed_ll2_acquire_connection(void *cxt
, struct qed_ll2_acquire_data
*data
)
1330 struct qed_hwfn
*p_hwfn
= cxt
;
1331 qed_int_comp_cb_t comp_rx_cb
, comp_tx_cb
;
1332 struct qed_ll2_info
*p_ll2_info
= NULL
;
1336 if (!data
->p_connection_handle
|| !p_hwfn
->p_ll2_info
)
1339 /* Find a free connection to be used */
1340 for (i
= 0; (i
< QED_MAX_NUM_OF_LL2_CONNECTIONS
); i
++) {
1341 mutex_lock(&p_hwfn
->p_ll2_info
[i
].mutex
);
1342 if (p_hwfn
->p_ll2_info
[i
].b_active
) {
1343 mutex_unlock(&p_hwfn
->p_ll2_info
[i
].mutex
);
1347 p_hwfn
->p_ll2_info
[i
].b_active
= true;
1348 p_ll2_info
= &p_hwfn
->p_ll2_info
[i
];
1349 mutex_unlock(&p_hwfn
->p_ll2_info
[i
].mutex
);
1355 memcpy(&p_ll2_info
->input
, &data
->input
, sizeof(p_ll2_info
->input
));
1357 switch (data
->input
.tx_dest
) {
1358 case QED_LL2_TX_DEST_NW
:
1359 p_ll2_info
->tx_dest
= CORE_TX_DEST_NW
;
1361 case QED_LL2_TX_DEST_LB
:
1362 p_ll2_info
->tx_dest
= CORE_TX_DEST_LB
;
1364 case QED_LL2_TX_DEST_DROP
:
1365 p_ll2_info
->tx_dest
= CORE_TX_DEST_DROP
;
1371 if (data
->input
.conn_type
== QED_LL2_TYPE_OOO
||
1372 data
->input
.secondary_queue
)
1373 p_ll2_info
->main_func_queue
= false;
1375 p_ll2_info
->main_func_queue
= true;
1377 /* Correct maximum number of Tx BDs */
1378 p_tx_max
= &p_ll2_info
->input
.tx_max_bds_per_packet
;
1380 *p_tx_max
= CORE_LL2_TX_MAX_BDS_PER_PACKET
;
1382 *p_tx_max
= min_t(u8
, *p_tx_max
,
1383 CORE_LL2_TX_MAX_BDS_PER_PACKET
);
1385 rc
= qed_ll2_set_cbs(p_ll2_info
, data
->cbs
);
1387 DP_NOTICE(p_hwfn
, "Invalid callback functions\n");
1388 goto q_allocate_fail
;
1391 rc
= qed_ll2_acquire_connection_rx(p_hwfn
, p_ll2_info
);
1393 goto q_allocate_fail
;
1395 rc
= qed_ll2_acquire_connection_tx(p_hwfn
, p_ll2_info
);
1397 goto q_allocate_fail
;
1399 rc
= qed_ll2_acquire_connection_ooo(p_hwfn
, p_ll2_info
,
1402 goto q_allocate_fail
;
1404 /* Register callbacks for the Rx/Tx queues */
1405 if (data
->input
.conn_type
== QED_LL2_TYPE_OOO
) {
1406 comp_rx_cb
= qed_ll2_lb_rxq_completion
;
1407 comp_tx_cb
= qed_ll2_lb_txq_completion
;
1409 comp_rx_cb
= qed_ll2_rxq_completion
;
1410 comp_tx_cb
= qed_ll2_txq_completion
;
1413 if (data
->input
.rx_num_desc
) {
1414 qed_int_register_cb(p_hwfn
, comp_rx_cb
,
1415 &p_hwfn
->p_ll2_info
[i
],
1416 &p_ll2_info
->rx_queue
.rx_sb_index
,
1417 &p_ll2_info
->rx_queue
.p_fw_cons
);
1418 p_ll2_info
->rx_queue
.b_cb_registred
= true;
1421 if (data
->input
.tx_num_desc
) {
1422 qed_int_register_cb(p_hwfn
,
1424 &p_hwfn
->p_ll2_info
[i
],
1425 &p_ll2_info
->tx_queue
.tx_sb_index
,
1426 &p_ll2_info
->tx_queue
.p_fw_cons
);
1427 p_ll2_info
->tx_queue
.b_cb_registred
= true;
1430 *data
->p_connection_handle
= i
;
1434 qed_ll2_release_connection(p_hwfn
, i
);
1438 static int qed_ll2_establish_connection_rx(struct qed_hwfn
*p_hwfn
,
1439 struct qed_ll2_info
*p_ll2_conn
)
1441 enum qed_ll2_error_handle error_input
;
1442 enum core_error_handle error_mode
;
1443 u8 action_on_error
= 0;
1445 if (!QED_LL2_RX_REGISTERED(p_ll2_conn
))
1448 DIRECT_REG_WR(p_ll2_conn
->rx_queue
.set_prod_addr
, 0x0);
1449 error_input
= p_ll2_conn
->input
.ai_err_packet_too_big
;
1450 error_mode
= qed_ll2_get_error_choice(error_input
);
1451 SET_FIELD(action_on_error
,
1452 CORE_RX_ACTION_ON_ERROR_PACKET_TOO_BIG
, error_mode
);
1453 error_input
= p_ll2_conn
->input
.ai_err_no_buf
;
1454 error_mode
= qed_ll2_get_error_choice(error_input
);
1455 SET_FIELD(action_on_error
, CORE_RX_ACTION_ON_ERROR_NO_BUFF
, error_mode
);
1457 return qed_sp_ll2_rx_queue_start(p_hwfn
, p_ll2_conn
, action_on_error
);
1461 qed_ll2_establish_connection_ooo(struct qed_hwfn
*p_hwfn
,
1462 struct qed_ll2_info
*p_ll2_conn
)
1464 if (p_ll2_conn
->input
.conn_type
!= QED_LL2_TYPE_OOO
)
1467 qed_ooo_release_all_isles(p_hwfn
, p_hwfn
->p_ooo_info
);
1468 qed_ooo_submit_rx_buffers(p_hwfn
, p_ll2_conn
);
1471 int qed_ll2_establish_connection(void *cxt
, u8 connection_handle
)
1473 struct qed_hwfn
*p_hwfn
= cxt
;
1474 struct qed_ll2_info
*p_ll2_conn
;
1475 struct qed_ll2_tx_packet
*p_pkt
;
1476 struct qed_ll2_rx_queue
*p_rx
;
1477 struct qed_ll2_tx_queue
*p_tx
;
1478 struct qed_ptt
*p_ptt
;
1484 p_ptt
= qed_ptt_acquire(p_hwfn
);
1488 p_ll2_conn
= qed_ll2_handle_sanity_lock(p_hwfn
, connection_handle
);
1494 p_rx
= &p_ll2_conn
->rx_queue
;
1495 p_tx
= &p_ll2_conn
->tx_queue
;
1497 qed_chain_reset(&p_rx
->rxq_chain
);
1498 qed_chain_reset(&p_rx
->rcq_chain
);
1499 INIT_LIST_HEAD(&p_rx
->active_descq
);
1500 INIT_LIST_HEAD(&p_rx
->free_descq
);
1501 INIT_LIST_HEAD(&p_rx
->posting_descq
);
1502 spin_lock_init(&p_rx
->lock
);
1503 capacity
= qed_chain_get_capacity(&p_rx
->rxq_chain
);
1504 for (i
= 0; i
< capacity
; i
++)
1505 list_add_tail(&p_rx
->descq_array
[i
].list_entry
,
1507 *p_rx
->p_fw_cons
= 0;
1509 qed_chain_reset(&p_tx
->txq_chain
);
1510 INIT_LIST_HEAD(&p_tx
->active_descq
);
1511 INIT_LIST_HEAD(&p_tx
->free_descq
);
1512 INIT_LIST_HEAD(&p_tx
->sending_descq
);
1513 spin_lock_init(&p_tx
->lock
);
1514 capacity
= qed_chain_get_capacity(&p_tx
->txq_chain
);
1515 /* First element is part of the packet, rest are flexibly added */
1516 desc_size
= (sizeof(*p_pkt
) +
1517 (p_ll2_conn
->input
.tx_max_bds_per_packet
- 1) *
1518 sizeof(p_pkt
->bds_set
));
1520 for (i
= 0; i
< capacity
; i
++) {
1521 p_pkt
= p_tx
->descq_mem
+ desc_size
* i
;
1522 list_add_tail(&p_pkt
->list_entry
, &p_tx
->free_descq
);
1524 p_tx
->cur_completing_bd_idx
= 0;
1526 p_tx
->b_completing_packet
= false;
1527 p_tx
->cur_send_packet
= NULL
;
1528 p_tx
->cur_send_frag_num
= 0;
1529 p_tx
->cur_completing_frag_num
= 0;
1530 *p_tx
->p_fw_cons
= 0;
1532 rc
= qed_cxt_acquire_cid(p_hwfn
, PROTOCOLID_CORE
, &p_ll2_conn
->cid
);
1536 qid
= p_hwfn
->hw_info
.resc_start
[QED_LL2_QUEUE
] + connection_handle
;
1537 p_ll2_conn
->queue_id
= qid
;
1538 p_ll2_conn
->tx_stats_id
= qid
;
1539 p_rx
->set_prod_addr
= (u8 __iomem
*)p_hwfn
->regview
+
1540 GTT_BAR0_MAP_REG_TSDM_RAM
+
1541 TSTORM_LL2_RX_PRODS_OFFSET(qid
);
1542 p_tx
->doorbell_addr
= (u8 __iomem
*)p_hwfn
->doorbells
+
1543 qed_db_addr(p_ll2_conn
->cid
,
1546 rc
= qed_ll2_establish_connection_rx(p_hwfn
, p_ll2_conn
);
1550 rc
= qed_sp_ll2_tx_queue_start(p_hwfn
, p_ll2_conn
);
1554 if (!QED_IS_RDMA_PERSONALITY(p_hwfn
))
1555 qed_wr(p_hwfn
, p_ptt
, PRS_REG_USE_LIGHT_L2
, 1);
1557 qed_ll2_establish_connection_ooo(p_hwfn
, p_ll2_conn
);
1559 if (p_ll2_conn
->input
.conn_type
== QED_LL2_TYPE_FCOE
) {
1560 if (!test_bit(QED_MF_UFP_SPECIFIC
, &p_hwfn
->cdev
->mf_bits
))
1561 qed_llh_add_protocol_filter(p_hwfn
, p_ptt
,
1563 QED_LLH_FILTER_ETHERTYPE
);
1564 qed_llh_add_protocol_filter(p_hwfn
, p_ptt
,
1566 QED_LLH_FILTER_ETHERTYPE
);
1570 qed_ptt_release(p_hwfn
, p_ptt
);
1574 static void qed_ll2_post_rx_buffer_notify_fw(struct qed_hwfn
*p_hwfn
,
1575 struct qed_ll2_rx_queue
*p_rx
,
1576 struct qed_ll2_rx_packet
*p_curp
)
1578 struct qed_ll2_rx_packet
*p_posting_packet
= NULL
;
1579 struct core_ll2_rx_prod rx_prod
= { 0, 0, 0 };
1580 bool b_notify_fw
= false;
1581 u16 bd_prod
, cq_prod
;
1583 /* This handles the flushing of already posted buffers */
1584 while (!list_empty(&p_rx
->posting_descq
)) {
1585 p_posting_packet
= list_first_entry(&p_rx
->posting_descq
,
1586 struct qed_ll2_rx_packet
,
1588 list_move_tail(&p_posting_packet
->list_entry
,
1589 &p_rx
->active_descq
);
1593 /* This handles the supplied packet [if there is one] */
1595 list_add_tail(&p_curp
->list_entry
, &p_rx
->active_descq
);
1602 bd_prod
= qed_chain_get_prod_idx(&p_rx
->rxq_chain
);
1603 cq_prod
= qed_chain_get_prod_idx(&p_rx
->rcq_chain
);
1604 rx_prod
.bd_prod
= cpu_to_le16(bd_prod
);
1605 rx_prod
.cqe_prod
= cpu_to_le16(cq_prod
);
1607 /* Make sure chain element is updated before ringing the doorbell */
1610 DIRECT_REG_WR(p_rx
->set_prod_addr
, *((u32
*)&rx_prod
));
1613 int qed_ll2_post_rx_buffer(void *cxt
,
1614 u8 connection_handle
,
1616 u16 buf_len
, void *cookie
, u8 notify_fw
)
1618 struct qed_hwfn
*p_hwfn
= cxt
;
1619 struct core_rx_bd_with_buff_len
*p_curb
= NULL
;
1620 struct qed_ll2_rx_packet
*p_curp
= NULL
;
1621 struct qed_ll2_info
*p_ll2_conn
;
1622 struct qed_ll2_rx_queue
*p_rx
;
1623 unsigned long flags
;
1627 p_ll2_conn
= qed_ll2_handle_sanity(p_hwfn
, connection_handle
);
1630 p_rx
= &p_ll2_conn
->rx_queue
;
1632 spin_lock_irqsave(&p_rx
->lock
, flags
);
1633 if (!list_empty(&p_rx
->free_descq
))
1634 p_curp
= list_first_entry(&p_rx
->free_descq
,
1635 struct qed_ll2_rx_packet
, list_entry
);
1637 if (qed_chain_get_elem_left(&p_rx
->rxq_chain
) &&
1638 qed_chain_get_elem_left(&p_rx
->rcq_chain
)) {
1639 p_data
= qed_chain_produce(&p_rx
->rxq_chain
);
1640 p_curb
= (struct core_rx_bd_with_buff_len
*)p_data
;
1641 qed_chain_produce(&p_rx
->rcq_chain
);
1645 /* If we're lacking entires, let's try to flush buffers to FW */
1646 if (!p_curp
|| !p_curb
) {
1652 /* We have an Rx packet we can fill */
1653 DMA_REGPAIR_LE(p_curb
->addr
, addr
);
1654 p_curb
->buff_length
= cpu_to_le16(buf_len
);
1655 p_curp
->rx_buf_addr
= addr
;
1656 p_curp
->cookie
= cookie
;
1657 p_curp
->rxq_bd
= p_curb
;
1658 p_curp
->buf_length
= buf_len
;
1659 list_del(&p_curp
->list_entry
);
1661 /* Check if we only want to enqueue this packet without informing FW */
1663 list_add_tail(&p_curp
->list_entry
, &p_rx
->posting_descq
);
1668 qed_ll2_post_rx_buffer_notify_fw(p_hwfn
, p_rx
, p_curp
);
1670 spin_unlock_irqrestore(&p_rx
->lock
, flags
);
1674 static void qed_ll2_prepare_tx_packet_set(struct qed_hwfn
*p_hwfn
,
1675 struct qed_ll2_tx_queue
*p_tx
,
1676 struct qed_ll2_tx_packet
*p_curp
,
1677 struct qed_ll2_tx_pkt_info
*pkt
,
1680 list_del(&p_curp
->list_entry
);
1681 p_curp
->cookie
= pkt
->cookie
;
1682 p_curp
->bd_used
= pkt
->num_of_bds
;
1683 p_curp
->notify_fw
= notify_fw
;
1684 p_tx
->cur_send_packet
= p_curp
;
1685 p_tx
->cur_send_frag_num
= 0;
1687 p_curp
->bds_set
[p_tx
->cur_send_frag_num
].tx_frag
= pkt
->first_frag
;
1688 p_curp
->bds_set
[p_tx
->cur_send_frag_num
].frag_len
= pkt
->first_frag_len
;
1689 p_tx
->cur_send_frag_num
++;
1693 qed_ll2_prepare_tx_packet_set_bd(struct qed_hwfn
*p_hwfn
,
1694 struct qed_ll2_info
*p_ll2
,
1695 struct qed_ll2_tx_packet
*p_curp
,
1696 struct qed_ll2_tx_pkt_info
*pkt
)
1698 struct qed_chain
*p_tx_chain
= &p_ll2
->tx_queue
.txq_chain
;
1699 u16 prod_idx
= qed_chain_get_prod_idx(p_tx_chain
);
1700 struct core_tx_bd
*start_bd
= NULL
;
1701 enum core_roce_flavor_type roce_flavor
;
1702 enum core_tx_dest tx_dest
;
1703 u16 bd_data
= 0, frag_idx
;
1705 roce_flavor
= (pkt
->qed_roce_flavor
== QED_LL2_ROCE
) ? CORE_ROCE
1708 switch (pkt
->tx_dest
) {
1709 case QED_LL2_TX_DEST_NW
:
1710 tx_dest
= CORE_TX_DEST_NW
;
1712 case QED_LL2_TX_DEST_LB
:
1713 tx_dest
= CORE_TX_DEST_LB
;
1715 case QED_LL2_TX_DEST_DROP
:
1716 tx_dest
= CORE_TX_DEST_DROP
;
1719 tx_dest
= CORE_TX_DEST_LB
;
1723 start_bd
= (struct core_tx_bd
*)qed_chain_produce(p_tx_chain
);
1724 if (QED_IS_IWARP_PERSONALITY(p_hwfn
) &&
1725 p_ll2
->input
.conn_type
== QED_LL2_TYPE_OOO
) {
1726 start_bd
->nw_vlan_or_lb_echo
=
1727 cpu_to_le16(IWARP_LL2_IN_ORDER_TX_QUEUE
);
1729 start_bd
->nw_vlan_or_lb_echo
= cpu_to_le16(pkt
->vlan
);
1730 if (test_bit(QED_MF_UFP_SPECIFIC
, &p_hwfn
->cdev
->mf_bits
) &&
1731 p_ll2
->input
.conn_type
== QED_LL2_TYPE_FCOE
)
1732 pkt
->remove_stag
= true;
1735 SET_FIELD(start_bd
->bitfield1
, CORE_TX_BD_L4_HDR_OFFSET_W
,
1736 cpu_to_le16(pkt
->l4_hdr_offset_w
));
1737 SET_FIELD(start_bd
->bitfield1
, CORE_TX_BD_TX_DST
, tx_dest
);
1738 bd_data
|= pkt
->bd_flags
;
1739 SET_FIELD(bd_data
, CORE_TX_BD_DATA_START_BD
, 0x1);
1740 SET_FIELD(bd_data
, CORE_TX_BD_DATA_NBDS
, pkt
->num_of_bds
);
1741 SET_FIELD(bd_data
, CORE_TX_BD_DATA_ROCE_FLAV
, roce_flavor
);
1742 SET_FIELD(bd_data
, CORE_TX_BD_DATA_IP_CSUM
, !!(pkt
->enable_ip_cksum
));
1743 SET_FIELD(bd_data
, CORE_TX_BD_DATA_L4_CSUM
, !!(pkt
->enable_l4_cksum
));
1744 SET_FIELD(bd_data
, CORE_TX_BD_DATA_IP_LEN
, !!(pkt
->calc_ip_len
));
1745 SET_FIELD(bd_data
, CORE_TX_BD_DATA_DISABLE_STAG_INSERTION
,
1746 !!(pkt
->remove_stag
));
1748 start_bd
->bd_data
.as_bitfield
= cpu_to_le16(bd_data
);
1749 DMA_REGPAIR_LE(start_bd
->addr
, pkt
->first_frag
);
1750 start_bd
->nbytes
= cpu_to_le16(pkt
->first_frag_len
);
1753 (NETIF_MSG_TX_QUEUED
| QED_MSG_LL2
),
1754 "LL2 [q 0x%02x cid 0x%08x type 0x%08x] Tx Producer at [0x%04x] - set with a %04x bytes %02x BDs buffer at %08x:%08x\n",
1757 p_ll2
->input
.conn_type
,
1759 pkt
->first_frag_len
,
1761 le32_to_cpu(start_bd
->addr
.hi
),
1762 le32_to_cpu(start_bd
->addr
.lo
));
1764 if (p_ll2
->tx_queue
.cur_send_frag_num
== pkt
->num_of_bds
)
1767 /* Need to provide the packet with additional BDs for frags */
1768 for (frag_idx
= p_ll2
->tx_queue
.cur_send_frag_num
;
1769 frag_idx
< pkt
->num_of_bds
; frag_idx
++) {
1770 struct core_tx_bd
**p_bd
= &p_curp
->bds_set
[frag_idx
].txq_bd
;
1772 *p_bd
= (struct core_tx_bd
*)qed_chain_produce(p_tx_chain
);
1773 (*p_bd
)->bd_data
.as_bitfield
= 0;
1774 (*p_bd
)->bitfield1
= 0;
1775 p_curp
->bds_set
[frag_idx
].tx_frag
= 0;
1776 p_curp
->bds_set
[frag_idx
].frag_len
= 0;
1780 /* This should be called while the Txq spinlock is being held */
1781 static void qed_ll2_tx_packet_notify(struct qed_hwfn
*p_hwfn
,
1782 struct qed_ll2_info
*p_ll2_conn
)
1784 bool b_notify
= p_ll2_conn
->tx_queue
.cur_send_packet
->notify_fw
;
1785 struct qed_ll2_tx_queue
*p_tx
= &p_ll2_conn
->tx_queue
;
1786 struct qed_ll2_tx_packet
*p_pkt
= NULL
;
1787 struct core_db_data db_msg
= { 0, 0, 0 };
1790 /* If there are missing BDs, don't do anything now */
1791 if (p_ll2_conn
->tx_queue
.cur_send_frag_num
!=
1792 p_ll2_conn
->tx_queue
.cur_send_packet
->bd_used
)
1795 /* Push the current packet to the list and clean after it */
1796 list_add_tail(&p_ll2_conn
->tx_queue
.cur_send_packet
->list_entry
,
1797 &p_ll2_conn
->tx_queue
.sending_descq
);
1798 p_ll2_conn
->tx_queue
.cur_send_packet
= NULL
;
1799 p_ll2_conn
->tx_queue
.cur_send_frag_num
= 0;
1801 /* Notify FW of packet only if requested to */
1805 bd_prod
= qed_chain_get_prod_idx(&p_ll2_conn
->tx_queue
.txq_chain
);
1807 while (!list_empty(&p_tx
->sending_descq
)) {
1808 p_pkt
= list_first_entry(&p_tx
->sending_descq
,
1809 struct qed_ll2_tx_packet
, list_entry
);
1813 list_move_tail(&p_pkt
->list_entry
, &p_tx
->active_descq
);
1816 SET_FIELD(db_msg
.params
, CORE_DB_DATA_DEST
, DB_DEST_XCM
);
1817 SET_FIELD(db_msg
.params
, CORE_DB_DATA_AGG_CMD
, DB_AGG_CMD_SET
);
1818 SET_FIELD(db_msg
.params
, CORE_DB_DATA_AGG_VAL_SEL
,
1819 DQ_XCM_CORE_TX_BD_PROD_CMD
);
1820 db_msg
.agg_flags
= DQ_XCM_CORE_DQ_CF_CMD
;
1821 db_msg
.spq_prod
= cpu_to_le16(bd_prod
);
1823 /* Make sure the BDs data is updated before ringing the doorbell */
1826 DIRECT_REG_WR(p_tx
->doorbell_addr
, *((u32
*)&db_msg
));
1829 (NETIF_MSG_TX_QUEUED
| QED_MSG_LL2
),
1830 "LL2 [q 0x%02x cid 0x%08x type 0x%08x] Doorbelled [producer 0x%04x]\n",
1831 p_ll2_conn
->queue_id
,
1833 p_ll2_conn
->input
.conn_type
, db_msg
.spq_prod
);
1836 int qed_ll2_prepare_tx_packet(void *cxt
,
1837 u8 connection_handle
,
1838 struct qed_ll2_tx_pkt_info
*pkt
,
1841 struct qed_hwfn
*p_hwfn
= cxt
;
1842 struct qed_ll2_tx_packet
*p_curp
= NULL
;
1843 struct qed_ll2_info
*p_ll2_conn
= NULL
;
1844 struct qed_ll2_tx_queue
*p_tx
;
1845 struct qed_chain
*p_tx_chain
;
1846 unsigned long flags
;
1849 p_ll2_conn
= qed_ll2_handle_sanity(p_hwfn
, connection_handle
);
1852 p_tx
= &p_ll2_conn
->tx_queue
;
1853 p_tx_chain
= &p_tx
->txq_chain
;
1855 if (pkt
->num_of_bds
> p_ll2_conn
->input
.tx_max_bds_per_packet
)
1858 spin_lock_irqsave(&p_tx
->lock
, flags
);
1859 if (p_tx
->cur_send_packet
) {
1864 /* Get entry, but only if we have tx elements for it */
1865 if (!list_empty(&p_tx
->free_descq
))
1866 p_curp
= list_first_entry(&p_tx
->free_descq
,
1867 struct qed_ll2_tx_packet
, list_entry
);
1868 if (p_curp
&& qed_chain_get_elem_left(p_tx_chain
) < pkt
->num_of_bds
)
1876 /* Prepare packet and BD, and perhaps send a doorbell to FW */
1877 qed_ll2_prepare_tx_packet_set(p_hwfn
, p_tx
, p_curp
, pkt
, notify_fw
);
1879 qed_ll2_prepare_tx_packet_set_bd(p_hwfn
, p_ll2_conn
, p_curp
, pkt
);
1881 qed_ll2_tx_packet_notify(p_hwfn
, p_ll2_conn
);
1884 spin_unlock_irqrestore(&p_tx
->lock
, flags
);
1888 int qed_ll2_set_fragment_of_tx_packet(void *cxt
,
1889 u8 connection_handle
,
1890 dma_addr_t addr
, u16 nbytes
)
1892 struct qed_ll2_tx_packet
*p_cur_send_packet
= NULL
;
1893 struct qed_hwfn
*p_hwfn
= cxt
;
1894 struct qed_ll2_info
*p_ll2_conn
= NULL
;
1895 u16 cur_send_frag_num
= 0;
1896 struct core_tx_bd
*p_bd
;
1897 unsigned long flags
;
1899 p_ll2_conn
= qed_ll2_handle_sanity(p_hwfn
, connection_handle
);
1903 if (!p_ll2_conn
->tx_queue
.cur_send_packet
)
1906 p_cur_send_packet
= p_ll2_conn
->tx_queue
.cur_send_packet
;
1907 cur_send_frag_num
= p_ll2_conn
->tx_queue
.cur_send_frag_num
;
1909 if (cur_send_frag_num
>= p_cur_send_packet
->bd_used
)
1912 /* Fill the BD information, and possibly notify FW */
1913 p_bd
= p_cur_send_packet
->bds_set
[cur_send_frag_num
].txq_bd
;
1914 DMA_REGPAIR_LE(p_bd
->addr
, addr
);
1915 p_bd
->nbytes
= cpu_to_le16(nbytes
);
1916 p_cur_send_packet
->bds_set
[cur_send_frag_num
].tx_frag
= addr
;
1917 p_cur_send_packet
->bds_set
[cur_send_frag_num
].frag_len
= nbytes
;
1919 p_ll2_conn
->tx_queue
.cur_send_frag_num
++;
1921 spin_lock_irqsave(&p_ll2_conn
->tx_queue
.lock
, flags
);
1922 qed_ll2_tx_packet_notify(p_hwfn
, p_ll2_conn
);
1923 spin_unlock_irqrestore(&p_ll2_conn
->tx_queue
.lock
, flags
);
1928 int qed_ll2_terminate_connection(void *cxt
, u8 connection_handle
)
1930 struct qed_hwfn
*p_hwfn
= cxt
;
1931 struct qed_ll2_info
*p_ll2_conn
= NULL
;
1933 struct qed_ptt
*p_ptt
;
1935 p_ptt
= qed_ptt_acquire(p_hwfn
);
1939 p_ll2_conn
= qed_ll2_handle_sanity_lock(p_hwfn
, connection_handle
);
1945 /* Stop Tx & Rx of connection, if needed */
1946 if (QED_LL2_TX_REGISTERED(p_ll2_conn
)) {
1947 p_ll2_conn
->tx_queue
.b_cb_registred
= false;
1948 smp_wmb(); /* Make sure this is seen by ll2_lb_rxq_completion */
1949 rc
= qed_sp_ll2_tx_queue_stop(p_hwfn
, p_ll2_conn
);
1953 qed_ll2_txq_flush(p_hwfn
, connection_handle
);
1954 qed_int_unregister_cb(p_hwfn
, p_ll2_conn
->tx_queue
.tx_sb_index
);
1957 if (QED_LL2_RX_REGISTERED(p_ll2_conn
)) {
1958 p_ll2_conn
->rx_queue
.b_cb_registred
= false;
1959 smp_wmb(); /* Make sure this is seen by ll2_lb_rxq_completion */
1960 rc
= qed_sp_ll2_rx_queue_stop(p_hwfn
, p_ll2_conn
);
1964 qed_ll2_rxq_flush(p_hwfn
, connection_handle
);
1965 qed_int_unregister_cb(p_hwfn
, p_ll2_conn
->rx_queue
.rx_sb_index
);
1968 if (p_ll2_conn
->input
.conn_type
== QED_LL2_TYPE_OOO
)
1969 qed_ooo_release_all_isles(p_hwfn
, p_hwfn
->p_ooo_info
);
1971 if (p_ll2_conn
->input
.conn_type
== QED_LL2_TYPE_FCOE
) {
1972 if (!test_bit(QED_MF_UFP_SPECIFIC
, &p_hwfn
->cdev
->mf_bits
))
1973 qed_llh_remove_protocol_filter(p_hwfn
, p_ptt
,
1975 QED_LLH_FILTER_ETHERTYPE
);
1976 qed_llh_remove_protocol_filter(p_hwfn
, p_ptt
,
1978 QED_LLH_FILTER_ETHERTYPE
);
1982 qed_ptt_release(p_hwfn
, p_ptt
);
1986 static void qed_ll2_release_connection_ooo(struct qed_hwfn
*p_hwfn
,
1987 struct qed_ll2_info
*p_ll2_conn
)
1989 struct qed_ooo_buffer
*p_buffer
;
1991 if (p_ll2_conn
->input
.conn_type
!= QED_LL2_TYPE_OOO
)
1994 qed_ooo_release_all_isles(p_hwfn
, p_hwfn
->p_ooo_info
);
1995 while ((p_buffer
= qed_ooo_get_free_buffer(p_hwfn
,
1996 p_hwfn
->p_ooo_info
))) {
1997 dma_free_coherent(&p_hwfn
->cdev
->pdev
->dev
,
1998 p_buffer
->rx_buffer_size
,
1999 p_buffer
->rx_buffer_virt_addr
,
2000 p_buffer
->rx_buffer_phys_addr
);
2005 void qed_ll2_release_connection(void *cxt
, u8 connection_handle
)
2007 struct qed_hwfn
*p_hwfn
= cxt
;
2008 struct qed_ll2_info
*p_ll2_conn
= NULL
;
2010 p_ll2_conn
= qed_ll2_handle_sanity(p_hwfn
, connection_handle
);
2014 kfree(p_ll2_conn
->tx_queue
.descq_mem
);
2015 qed_chain_free(p_hwfn
->cdev
, &p_ll2_conn
->tx_queue
.txq_chain
);
2017 kfree(p_ll2_conn
->rx_queue
.descq_array
);
2018 qed_chain_free(p_hwfn
->cdev
, &p_ll2_conn
->rx_queue
.rxq_chain
);
2019 qed_chain_free(p_hwfn
->cdev
, &p_ll2_conn
->rx_queue
.rcq_chain
);
2021 qed_cxt_release_cid(p_hwfn
, p_ll2_conn
->cid
);
2023 qed_ll2_release_connection_ooo(p_hwfn
, p_ll2_conn
);
2025 mutex_lock(&p_ll2_conn
->mutex
);
2026 p_ll2_conn
->b_active
= false;
2027 mutex_unlock(&p_ll2_conn
->mutex
);
2030 int qed_ll2_alloc(struct qed_hwfn
*p_hwfn
)
2032 struct qed_ll2_info
*p_ll2_connections
;
2035 /* Allocate LL2's set struct */
2036 p_ll2_connections
= kcalloc(QED_MAX_NUM_OF_LL2_CONNECTIONS
,
2037 sizeof(struct qed_ll2_info
), GFP_KERNEL
);
2038 if (!p_ll2_connections
) {
2039 DP_NOTICE(p_hwfn
, "Failed to allocate `struct qed_ll2'\n");
2043 for (i
= 0; i
< QED_MAX_NUM_OF_LL2_CONNECTIONS
; i
++)
2044 p_ll2_connections
[i
].my_id
= i
;
2046 p_hwfn
->p_ll2_info
= p_ll2_connections
;
2050 void qed_ll2_setup(struct qed_hwfn
*p_hwfn
)
2054 for (i
= 0; i
< QED_MAX_NUM_OF_LL2_CONNECTIONS
; i
++)
2055 mutex_init(&p_hwfn
->p_ll2_info
[i
].mutex
);
2058 void qed_ll2_free(struct qed_hwfn
*p_hwfn
)
2060 if (!p_hwfn
->p_ll2_info
)
2063 kfree(p_hwfn
->p_ll2_info
);
2064 p_hwfn
->p_ll2_info
= NULL
;
2067 static void _qed_ll2_get_port_stats(struct qed_hwfn
*p_hwfn
,
2068 struct qed_ptt
*p_ptt
,
2069 struct qed_ll2_stats
*p_stats
)
2071 struct core_ll2_port_stats port_stats
;
2073 memset(&port_stats
, 0, sizeof(port_stats
));
2074 qed_memcpy_from(p_hwfn
, p_ptt
, &port_stats
,
2075 BAR0_MAP_REG_TSDM_RAM
+
2076 TSTORM_LL2_PORT_STAT_OFFSET(MFW_PORT(p_hwfn
)),
2077 sizeof(port_stats
));
2079 p_stats
->gsi_invalid_hdr
= HILO_64_REGPAIR(port_stats
.gsi_invalid_hdr
);
2080 p_stats
->gsi_invalid_pkt_length
=
2081 HILO_64_REGPAIR(port_stats
.gsi_invalid_pkt_length
);
2082 p_stats
->gsi_unsupported_pkt_typ
=
2083 HILO_64_REGPAIR(port_stats
.gsi_unsupported_pkt_typ
);
2084 p_stats
->gsi_crcchksm_error
=
2085 HILO_64_REGPAIR(port_stats
.gsi_crcchksm_error
);
2088 static void _qed_ll2_get_tstats(struct qed_hwfn
*p_hwfn
,
2089 struct qed_ptt
*p_ptt
,
2090 struct qed_ll2_info
*p_ll2_conn
,
2091 struct qed_ll2_stats
*p_stats
)
2093 struct core_ll2_tstorm_per_queue_stat tstats
;
2094 u8 qid
= p_ll2_conn
->queue_id
;
2097 memset(&tstats
, 0, sizeof(tstats
));
2098 tstats_addr
= BAR0_MAP_REG_TSDM_RAM
+
2099 CORE_LL2_TSTORM_PER_QUEUE_STAT_OFFSET(qid
);
2100 qed_memcpy_from(p_hwfn
, p_ptt
, &tstats
, tstats_addr
, sizeof(tstats
));
2102 p_stats
->packet_too_big_discard
=
2103 HILO_64_REGPAIR(tstats
.packet_too_big_discard
);
2104 p_stats
->no_buff_discard
= HILO_64_REGPAIR(tstats
.no_buff_discard
);
2107 static void _qed_ll2_get_ustats(struct qed_hwfn
*p_hwfn
,
2108 struct qed_ptt
*p_ptt
,
2109 struct qed_ll2_info
*p_ll2_conn
,
2110 struct qed_ll2_stats
*p_stats
)
2112 struct core_ll2_ustorm_per_queue_stat ustats
;
2113 u8 qid
= p_ll2_conn
->queue_id
;
2116 memset(&ustats
, 0, sizeof(ustats
));
2117 ustats_addr
= BAR0_MAP_REG_USDM_RAM
+
2118 CORE_LL2_USTORM_PER_QUEUE_STAT_OFFSET(qid
);
2119 qed_memcpy_from(p_hwfn
, p_ptt
, &ustats
, ustats_addr
, sizeof(ustats
));
2121 p_stats
->rcv_ucast_bytes
= HILO_64_REGPAIR(ustats
.rcv_ucast_bytes
);
2122 p_stats
->rcv_mcast_bytes
= HILO_64_REGPAIR(ustats
.rcv_mcast_bytes
);
2123 p_stats
->rcv_bcast_bytes
= HILO_64_REGPAIR(ustats
.rcv_bcast_bytes
);
2124 p_stats
->rcv_ucast_pkts
= HILO_64_REGPAIR(ustats
.rcv_ucast_pkts
);
2125 p_stats
->rcv_mcast_pkts
= HILO_64_REGPAIR(ustats
.rcv_mcast_pkts
);
2126 p_stats
->rcv_bcast_pkts
= HILO_64_REGPAIR(ustats
.rcv_bcast_pkts
);
2129 static void _qed_ll2_get_pstats(struct qed_hwfn
*p_hwfn
,
2130 struct qed_ptt
*p_ptt
,
2131 struct qed_ll2_info
*p_ll2_conn
,
2132 struct qed_ll2_stats
*p_stats
)
2134 struct core_ll2_pstorm_per_queue_stat pstats
;
2135 u8 stats_id
= p_ll2_conn
->tx_stats_id
;
2138 memset(&pstats
, 0, sizeof(pstats
));
2139 pstats_addr
= BAR0_MAP_REG_PSDM_RAM
+
2140 CORE_LL2_PSTORM_PER_QUEUE_STAT_OFFSET(stats_id
);
2141 qed_memcpy_from(p_hwfn
, p_ptt
, &pstats
, pstats_addr
, sizeof(pstats
));
2143 p_stats
->sent_ucast_bytes
= HILO_64_REGPAIR(pstats
.sent_ucast_bytes
);
2144 p_stats
->sent_mcast_bytes
= HILO_64_REGPAIR(pstats
.sent_mcast_bytes
);
2145 p_stats
->sent_bcast_bytes
= HILO_64_REGPAIR(pstats
.sent_bcast_bytes
);
2146 p_stats
->sent_ucast_pkts
= HILO_64_REGPAIR(pstats
.sent_ucast_pkts
);
2147 p_stats
->sent_mcast_pkts
= HILO_64_REGPAIR(pstats
.sent_mcast_pkts
);
2148 p_stats
->sent_bcast_pkts
= HILO_64_REGPAIR(pstats
.sent_bcast_pkts
);
2151 int qed_ll2_get_stats(void *cxt
,
2152 u8 connection_handle
, struct qed_ll2_stats
*p_stats
)
2154 struct qed_hwfn
*p_hwfn
= cxt
;
2155 struct qed_ll2_info
*p_ll2_conn
= NULL
;
2156 struct qed_ptt
*p_ptt
;
2158 memset(p_stats
, 0, sizeof(*p_stats
));
2160 if ((connection_handle
>= QED_MAX_NUM_OF_LL2_CONNECTIONS
) ||
2161 !p_hwfn
->p_ll2_info
)
2164 p_ll2_conn
= &p_hwfn
->p_ll2_info
[connection_handle
];
2166 p_ptt
= qed_ptt_acquire(p_hwfn
);
2168 DP_ERR(p_hwfn
, "Failed to acquire ptt\n");
2172 if (p_ll2_conn
->input
.gsi_enable
)
2173 _qed_ll2_get_port_stats(p_hwfn
, p_ptt
, p_stats
);
2174 _qed_ll2_get_tstats(p_hwfn
, p_ptt
, p_ll2_conn
, p_stats
);
2175 _qed_ll2_get_ustats(p_hwfn
, p_ptt
, p_ll2_conn
, p_stats
);
2176 if (p_ll2_conn
->tx_stats_en
)
2177 _qed_ll2_get_pstats(p_hwfn
, p_ptt
, p_ll2_conn
, p_stats
);
2179 qed_ptt_release(p_hwfn
, p_ptt
);
2183 static void qed_ll2b_release_rx_packet(void *cxt
,
2184 u8 connection_handle
,
2186 dma_addr_t rx_buf_addr
,
2189 struct qed_hwfn
*p_hwfn
= cxt
;
2191 qed_ll2_dealloc_buffer(p_hwfn
->cdev
, cookie
);
2194 static void qed_ll2_register_cb_ops(struct qed_dev
*cdev
,
2195 const struct qed_ll2_cb_ops
*ops
,
2198 cdev
->ll2
->cbs
= ops
;
2199 cdev
->ll2
->cb_cookie
= cookie
;
2202 struct qed_ll2_cbs ll2_cbs
= {
2203 .rx_comp_cb
= &qed_ll2b_complete_rx_packet
,
2204 .rx_release_cb
= &qed_ll2b_release_rx_packet
,
2205 .tx_comp_cb
= &qed_ll2b_complete_tx_packet
,
2206 .tx_release_cb
= &qed_ll2b_complete_tx_packet
,
2209 static void qed_ll2_set_conn_data(struct qed_dev
*cdev
,
2210 struct qed_ll2_acquire_data
*data
,
2211 struct qed_ll2_params
*params
,
2212 enum qed_ll2_conn_type conn_type
,
2213 u8
*handle
, bool lb
)
2215 memset(data
, 0, sizeof(*data
));
2217 data
->input
.conn_type
= conn_type
;
2218 data
->input
.mtu
= params
->mtu
;
2219 data
->input
.rx_num_desc
= QED_LL2_RX_SIZE
;
2220 data
->input
.rx_drop_ttl0_flg
= params
->drop_ttl0_packets
;
2221 data
->input
.rx_vlan_removal_en
= params
->rx_vlan_stripping
;
2222 data
->input
.tx_num_desc
= QED_LL2_TX_SIZE
;
2223 data
->p_connection_handle
= handle
;
2224 data
->cbs
= &ll2_cbs
;
2225 ll2_cbs
.cookie
= QED_LEADING_HWFN(cdev
);
2228 data
->input
.tx_tc
= PKT_LB_TC
;
2229 data
->input
.tx_dest
= QED_LL2_TX_DEST_LB
;
2231 data
->input
.tx_tc
= 0;
2232 data
->input
.tx_dest
= QED_LL2_TX_DEST_NW
;
2236 static int qed_ll2_start_ooo(struct qed_dev
*cdev
,
2237 struct qed_ll2_params
*params
)
2239 struct qed_hwfn
*hwfn
= QED_LEADING_HWFN(cdev
);
2240 u8
*handle
= &hwfn
->pf_params
.iscsi_pf_params
.ll2_ooo_queue_id
;
2241 struct qed_ll2_acquire_data data
;
2244 qed_ll2_set_conn_data(cdev
, &data
, params
,
2245 QED_LL2_TYPE_OOO
, handle
, true);
2247 rc
= qed_ll2_acquire_connection(hwfn
, &data
);
2249 DP_INFO(cdev
, "Failed to acquire LL2 OOO connection\n");
2253 rc
= qed_ll2_establish_connection(hwfn
, *handle
);
2255 DP_INFO(cdev
, "Failed to establist LL2 OOO connection\n");
2262 qed_ll2_release_connection(hwfn
, *handle
);
2264 *handle
= QED_LL2_UNUSED_HANDLE
;
2268 static int qed_ll2_start(struct qed_dev
*cdev
, struct qed_ll2_params
*params
)
2270 struct qed_ll2_buffer
*buffer
, *tmp_buffer
;
2271 enum qed_ll2_conn_type conn_type
;
2272 struct qed_ll2_acquire_data data
;
2273 struct qed_ptt
*p_ptt
;
2277 /* Initialize LL2 locks & lists */
2278 INIT_LIST_HEAD(&cdev
->ll2
->list
);
2279 spin_lock_init(&cdev
->ll2
->lock
);
2280 cdev
->ll2
->rx_size
= NET_SKB_PAD
+ ETH_HLEN
+
2281 L1_CACHE_BYTES
+ params
->mtu
;
2283 /*Allocate memory for LL2 */
2284 DP_INFO(cdev
, "Allocating LL2 buffers of size %08x bytes\n",
2285 cdev
->ll2
->rx_size
);
2286 for (i
= 0; i
< QED_LL2_RX_SIZE
; i
++) {
2287 buffer
= kzalloc(sizeof(*buffer
), GFP_KERNEL
);
2289 DP_INFO(cdev
, "Failed to allocate LL2 buffers\n");
2293 rc
= qed_ll2_alloc_buffer(cdev
, (u8
**)&buffer
->data
,
2294 &buffer
->phys_addr
);
2300 list_add_tail(&buffer
->list
, &cdev
->ll2
->list
);
2303 switch (QED_LEADING_HWFN(cdev
)->hw_info
.personality
) {
2305 conn_type
= QED_LL2_TYPE_FCOE
;
2308 conn_type
= QED_LL2_TYPE_ISCSI
;
2310 case QED_PCI_ETH_ROCE
:
2311 conn_type
= QED_LL2_TYPE_ROCE
;
2314 conn_type
= QED_LL2_TYPE_TEST
;
2317 qed_ll2_set_conn_data(cdev
, &data
, params
, conn_type
,
2318 &cdev
->ll2
->handle
, false);
2320 rc
= qed_ll2_acquire_connection(QED_LEADING_HWFN(cdev
), &data
);
2322 DP_INFO(cdev
, "Failed to acquire LL2 connection\n");
2326 rc
= qed_ll2_establish_connection(QED_LEADING_HWFN(cdev
),
2329 DP_INFO(cdev
, "Failed to establish LL2 connection\n");
2333 /* Post all Rx buffers to FW */
2334 spin_lock_bh(&cdev
->ll2
->lock
);
2335 list_for_each_entry_safe(buffer
, tmp_buffer
, &cdev
->ll2
->list
, list
) {
2336 rc
= qed_ll2_post_rx_buffer(QED_LEADING_HWFN(cdev
),
2338 buffer
->phys_addr
, 0, buffer
, 1);
2341 "Failed to post an Rx buffer; Deleting it\n");
2342 dma_unmap_single(&cdev
->pdev
->dev
, buffer
->phys_addr
,
2343 cdev
->ll2
->rx_size
, DMA_FROM_DEVICE
);
2344 kfree(buffer
->data
);
2345 list_del(&buffer
->list
);
2348 cdev
->ll2
->rx_cnt
++;
2351 spin_unlock_bh(&cdev
->ll2
->lock
);
2353 if (!cdev
->ll2
->rx_cnt
) {
2354 DP_INFO(cdev
, "Failed passing even a single Rx buffer\n");
2355 goto release_terminate
;
2358 if (!is_valid_ether_addr(params
->ll2_mac_address
)) {
2359 DP_INFO(cdev
, "Invalid Ethernet address\n");
2360 goto release_terminate
;
2363 if (QED_LEADING_HWFN(cdev
)->hw_info
.personality
== QED_PCI_ISCSI
) {
2364 DP_VERBOSE(cdev
, QED_MSG_STORAGE
, "Starting OOO LL2 queue\n");
2365 rc
= qed_ll2_start_ooo(cdev
, params
);
2368 "Failed to initialize the OOO LL2 queue\n");
2369 goto release_terminate
;
2373 p_ptt
= qed_ptt_acquire(QED_LEADING_HWFN(cdev
));
2375 DP_INFO(cdev
, "Failed to acquire PTT\n");
2376 goto release_terminate
;
2379 rc
= qed_llh_add_mac_filter(QED_LEADING_HWFN(cdev
), p_ptt
,
2380 params
->ll2_mac_address
);
2381 qed_ptt_release(QED_LEADING_HWFN(cdev
), p_ptt
);
2383 DP_ERR(cdev
, "Failed to allocate LLH filter\n");
2384 goto release_terminate_all
;
2387 ether_addr_copy(cdev
->ll2_mac_address
, params
->ll2_mac_address
);
2390 release_terminate_all
:
2393 qed_ll2_terminate_connection(QED_LEADING_HWFN(cdev
), cdev
->ll2
->handle
);
2395 qed_ll2_release_connection(QED_LEADING_HWFN(cdev
), cdev
->ll2
->handle
);
2397 qed_ll2_kill_buffers(cdev
);
2398 cdev
->ll2
->handle
= QED_LL2_UNUSED_HANDLE
;
2402 static int qed_ll2_stop(struct qed_dev
*cdev
)
2404 struct qed_ptt
*p_ptt
;
2407 if (cdev
->ll2
->handle
== QED_LL2_UNUSED_HANDLE
)
2410 p_ptt
= qed_ptt_acquire(QED_LEADING_HWFN(cdev
));
2412 DP_INFO(cdev
, "Failed to acquire PTT\n");
2416 qed_llh_remove_mac_filter(QED_LEADING_HWFN(cdev
), p_ptt
,
2417 cdev
->ll2_mac_address
);
2418 qed_ptt_release(QED_LEADING_HWFN(cdev
), p_ptt
);
2419 eth_zero_addr(cdev
->ll2_mac_address
);
2421 if (QED_LEADING_HWFN(cdev
)->hw_info
.personality
== QED_PCI_ISCSI
)
2422 qed_ll2_stop_ooo(cdev
);
2424 rc
= qed_ll2_terminate_connection(QED_LEADING_HWFN(cdev
),
2427 DP_INFO(cdev
, "Failed to terminate LL2 connection\n");
2429 qed_ll2_kill_buffers(cdev
);
2431 qed_ll2_release_connection(QED_LEADING_HWFN(cdev
), cdev
->ll2
->handle
);
2432 cdev
->ll2
->handle
= QED_LL2_UNUSED_HANDLE
;
2439 static int qed_ll2_start_xmit(struct qed_dev
*cdev
, struct sk_buff
*skb
,
2440 unsigned long xmit_flags
)
2442 struct qed_ll2_tx_pkt_info pkt
;
2443 const skb_frag_t
*frag
;
2444 u8 flags
= 0, nr_frags
;
2445 int rc
= -EINVAL
, i
;
2449 if (unlikely(skb
->ip_summed
!= CHECKSUM_NONE
)) {
2450 DP_INFO(cdev
, "Cannot transmit a checksummed packet\n");
2454 /* Cache number of fragments from SKB since SKB may be freed by
2455 * the completion routine after calling qed_ll2_prepare_tx_packet()
2457 nr_frags
= skb_shinfo(skb
)->nr_frags
;
2459 if (1 + nr_frags
> CORE_LL2_TX_MAX_BDS_PER_PACKET
) {
2460 DP_ERR(cdev
, "Cannot transmit a packet with %d fragments\n",
2465 mapping
= dma_map_single(&cdev
->pdev
->dev
, skb
->data
,
2466 skb
->len
, DMA_TO_DEVICE
);
2467 if (unlikely(dma_mapping_error(&cdev
->pdev
->dev
, mapping
))) {
2468 DP_NOTICE(cdev
, "SKB mapping failed\n");
2472 /* Request HW to calculate IP csum */
2473 if (!((vlan_get_protocol(skb
) == htons(ETH_P_IPV6
)) &&
2474 ipv6_hdr(skb
)->nexthdr
== NEXTHDR_IPV6
))
2475 flags
|= BIT(CORE_TX_BD_DATA_IP_CSUM_SHIFT
);
2477 if (skb_vlan_tag_present(skb
)) {
2478 vlan
= skb_vlan_tag_get(skb
);
2479 flags
|= BIT(CORE_TX_BD_DATA_VLAN_INSERTION_SHIFT
);
2482 memset(&pkt
, 0, sizeof(pkt
));
2483 pkt
.num_of_bds
= 1 + nr_frags
;
2485 pkt
.bd_flags
= flags
;
2486 pkt
.tx_dest
= QED_LL2_TX_DEST_NW
;
2487 pkt
.first_frag
= mapping
;
2488 pkt
.first_frag_len
= skb
->len
;
2490 if (test_bit(QED_MF_UFP_SPECIFIC
, &cdev
->mf_bits
) &&
2491 test_bit(QED_LL2_XMIT_FLAGS_FIP_DISCOVERY
, &xmit_flags
))
2492 pkt
.remove_stag
= true;
2494 /* qed_ll2_prepare_tx_packet() may actually send the packet if
2495 * there are no fragments in the skb and subsequently the completion
2496 * routine may run and free the SKB, so no dereferencing the SKB
2497 * beyond this point unless skb has any fragments.
2499 rc
= qed_ll2_prepare_tx_packet(&cdev
->hwfns
[0], cdev
->ll2
->handle
,
2504 for (i
= 0; i
< nr_frags
; i
++) {
2505 frag
= &skb_shinfo(skb
)->frags
[i
];
2507 mapping
= skb_frag_dma_map(&cdev
->pdev
->dev
, frag
, 0,
2508 skb_frag_size(frag
), DMA_TO_DEVICE
);
2510 if (unlikely(dma_mapping_error(&cdev
->pdev
->dev
, mapping
))) {
2512 "Unable to map frag - dropping packet\n");
2517 rc
= qed_ll2_set_fragment_of_tx_packet(QED_LEADING_HWFN(cdev
),
2520 skb_frag_size(frag
));
2522 /* if failed not much to do here, partial packet has been posted
2523 * we can't free memory, will need to wait for completion.
2532 dma_unmap_single(&cdev
->pdev
->dev
, mapping
, skb
->len
, DMA_TO_DEVICE
);
2538 static int qed_ll2_stats(struct qed_dev
*cdev
, struct qed_ll2_stats
*stats
)
2543 return qed_ll2_get_stats(QED_LEADING_HWFN(cdev
),
2544 cdev
->ll2
->handle
, stats
);
2547 const struct qed_ll2_ops qed_ll2_ops_pass
= {
2548 .start
= &qed_ll2_start
,
2549 .stop
= &qed_ll2_stop
,
2550 .start_xmit
= &qed_ll2_start_xmit
,
2551 .register_cb_ops
= &qed_ll2_register_cb_ops
,
2552 .get_stats
= &qed_ll2_stats
,
2555 int qed_ll2_alloc_if(struct qed_dev
*cdev
)
2557 cdev
->ll2
= kzalloc(sizeof(*cdev
->ll2
), GFP_KERNEL
);
2558 return cdev
->ll2
? 0 : -ENOMEM
;
2561 void qed_ll2_dealloc_if(struct qed_dev
*cdev
)