2 * drivers/net/ethernet/mellanox/mlxsw/pci.c
3 * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. Neither the names of the copyright holders nor the names of its
15 * contributors may be used to endorse or promote products derived from
16 * this software without specific prior written permission.
18 * Alternatively, this software may be distributed under the terms of the
19 * GNU General Public License ("GPL") version 2 as published by the Free
20 * Software Foundation.
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 * POSSIBILITY OF SUCH DAMAGE.
35 #include <linux/kernel.h>
36 #include <linux/module.h>
37 #include <linux/export.h>
38 #include <linux/err.h>
39 #include <linux/device.h>
40 #include <linux/pci.h>
41 #include <linux/interrupt.h>
42 #include <linux/wait.h>
43 #include <linux/types.h>
44 #include <linux/skbuff.h>
45 #include <linux/if_vlan.h>
46 #include <linux/log2.h>
47 #include <linux/string.h>
54 #include "resources.h"
56 static const char mlxsw_pci_driver_name
[] = "mlxsw_pci";
58 #define mlxsw_pci_write32(mlxsw_pci, reg, val) \
59 iowrite32be(val, (mlxsw_pci)->hw_addr + (MLXSW_PCI_ ## reg))
60 #define mlxsw_pci_read32(mlxsw_pci, reg) \
61 ioread32be((mlxsw_pci)->hw_addr + (MLXSW_PCI_ ## reg))
63 enum mlxsw_pci_queue_type
{
64 MLXSW_PCI_QUEUE_TYPE_SDQ
,
65 MLXSW_PCI_QUEUE_TYPE_RDQ
,
66 MLXSW_PCI_QUEUE_TYPE_CQ
,
67 MLXSW_PCI_QUEUE_TYPE_EQ
,
70 #define MLXSW_PCI_QUEUE_TYPE_COUNT 4
72 static const u16 mlxsw_pci_doorbell_type_offset
[] = {
73 MLXSW_PCI_DOORBELL_SDQ_OFFSET
, /* for type MLXSW_PCI_QUEUE_TYPE_SDQ */
74 MLXSW_PCI_DOORBELL_RDQ_OFFSET
, /* for type MLXSW_PCI_QUEUE_TYPE_RDQ */
75 MLXSW_PCI_DOORBELL_CQ_OFFSET
, /* for type MLXSW_PCI_QUEUE_TYPE_CQ */
76 MLXSW_PCI_DOORBELL_EQ_OFFSET
, /* for type MLXSW_PCI_QUEUE_TYPE_EQ */
79 static const u16 mlxsw_pci_doorbell_arm_type_offset
[] = {
82 MLXSW_PCI_DOORBELL_ARM_CQ_OFFSET
, /* for type MLXSW_PCI_QUEUE_TYPE_CQ */
83 MLXSW_PCI_DOORBELL_ARM_EQ_OFFSET
, /* for type MLXSW_PCI_QUEUE_TYPE_EQ */
86 struct mlxsw_pci_mem_item
{
92 struct mlxsw_pci_queue_elem_info
{
93 char *elem
; /* pointer to actual dma mapped element mem chunk */
104 struct mlxsw_pci_queue
{
105 spinlock_t lock
; /* for queue accesses */
106 struct mlxsw_pci_mem_item mem_item
;
107 struct mlxsw_pci_queue_elem_info
*elem_info
;
108 u16 producer_counter
;
109 u16 consumer_counter
;
110 u16 count
; /* number of elements in queue */
111 u8 num
; /* queue number */
112 u8 elem_size
; /* size of one element */
113 enum mlxsw_pci_queue_type type
;
114 struct tasklet_struct tasklet
; /* queue processing tasklet */
115 struct mlxsw_pci
*pci
;
129 struct mlxsw_pci_queue_type_group
{
130 struct mlxsw_pci_queue
*q
;
131 u8 count
; /* number of queues in group */
135 struct pci_dev
*pdev
;
137 struct mlxsw_pci_queue_type_group queues
[MLXSW_PCI_QUEUE_TYPE_COUNT
];
139 struct mlxsw_core
*core
;
141 struct mlxsw_pci_mem_item
*items
;
145 struct mlxsw_pci_mem_item out_mbox
;
146 struct mlxsw_pci_mem_item in_mbox
;
147 struct mutex lock
; /* Lock access to command registers */
149 wait_queue_head_t wait
;
156 struct mlxsw_bus_info bus_info
;
157 const struct pci_device_id
*id
;
160 static void mlxsw_pci_queue_tasklet_schedule(struct mlxsw_pci_queue
*q
)
162 tasklet_schedule(&q
->tasklet
);
165 static char *__mlxsw_pci_queue_elem_get(struct mlxsw_pci_queue
*q
,
166 size_t elem_size
, int elem_index
)
168 return q
->mem_item
.buf
+ (elem_size
* elem_index
);
171 static struct mlxsw_pci_queue_elem_info
*
172 mlxsw_pci_queue_elem_info_get(struct mlxsw_pci_queue
*q
, int elem_index
)
174 return &q
->elem_info
[elem_index
];
177 static struct mlxsw_pci_queue_elem_info
*
178 mlxsw_pci_queue_elem_info_producer_get(struct mlxsw_pci_queue
*q
)
180 int index
= q
->producer_counter
& (q
->count
- 1);
182 if ((u16
) (q
->producer_counter
- q
->consumer_counter
) == q
->count
)
184 return mlxsw_pci_queue_elem_info_get(q
, index
);
187 static struct mlxsw_pci_queue_elem_info
*
188 mlxsw_pci_queue_elem_info_consumer_get(struct mlxsw_pci_queue
*q
)
190 int index
= q
->consumer_counter
& (q
->count
- 1);
192 return mlxsw_pci_queue_elem_info_get(q
, index
);
195 static char *mlxsw_pci_queue_elem_get(struct mlxsw_pci_queue
*q
, int elem_index
)
197 return mlxsw_pci_queue_elem_info_get(q
, elem_index
)->elem
;
200 static bool mlxsw_pci_elem_hw_owned(struct mlxsw_pci_queue
*q
, bool owner_bit
)
202 return owner_bit
!= !!(q
->consumer_counter
& q
->count
);
206 mlxsw_pci_queue_sw_elem_get(struct mlxsw_pci_queue
*q
,
207 u32 (*get_elem_owner_func
)(const char *))
209 struct mlxsw_pci_queue_elem_info
*elem_info
;
213 elem_info
= mlxsw_pci_queue_elem_info_consumer_get(q
);
214 elem
= elem_info
->elem
;
215 owner_bit
= get_elem_owner_func(elem
);
216 if (mlxsw_pci_elem_hw_owned(q
, owner_bit
))
218 q
->consumer_counter
++;
219 rmb(); /* make sure we read owned bit before the rest of elem */
223 static struct mlxsw_pci_queue_type_group
*
224 mlxsw_pci_queue_type_group_get(struct mlxsw_pci
*mlxsw_pci
,
225 enum mlxsw_pci_queue_type q_type
)
227 return &mlxsw_pci
->queues
[q_type
];
230 static u8
__mlxsw_pci_queue_count(struct mlxsw_pci
*mlxsw_pci
,
231 enum mlxsw_pci_queue_type q_type
)
233 struct mlxsw_pci_queue_type_group
*queue_group
;
235 queue_group
= mlxsw_pci_queue_type_group_get(mlxsw_pci
, q_type
);
236 return queue_group
->count
;
239 static u8
mlxsw_pci_sdq_count(struct mlxsw_pci
*mlxsw_pci
)
241 return __mlxsw_pci_queue_count(mlxsw_pci
, MLXSW_PCI_QUEUE_TYPE_SDQ
);
244 static u8
mlxsw_pci_cq_count(struct mlxsw_pci
*mlxsw_pci
)
246 return __mlxsw_pci_queue_count(mlxsw_pci
, MLXSW_PCI_QUEUE_TYPE_CQ
);
249 static struct mlxsw_pci_queue
*
250 __mlxsw_pci_queue_get(struct mlxsw_pci
*mlxsw_pci
,
251 enum mlxsw_pci_queue_type q_type
, u8 q_num
)
253 return &mlxsw_pci
->queues
[q_type
].q
[q_num
];
256 static struct mlxsw_pci_queue
*mlxsw_pci_sdq_get(struct mlxsw_pci
*mlxsw_pci
,
259 return __mlxsw_pci_queue_get(mlxsw_pci
,
260 MLXSW_PCI_QUEUE_TYPE_SDQ
, q_num
);
263 static struct mlxsw_pci_queue
*mlxsw_pci_rdq_get(struct mlxsw_pci
*mlxsw_pci
,
266 return __mlxsw_pci_queue_get(mlxsw_pci
,
267 MLXSW_PCI_QUEUE_TYPE_RDQ
, q_num
);
270 static struct mlxsw_pci_queue
*mlxsw_pci_cq_get(struct mlxsw_pci
*mlxsw_pci
,
273 return __mlxsw_pci_queue_get(mlxsw_pci
, MLXSW_PCI_QUEUE_TYPE_CQ
, q_num
);
276 static struct mlxsw_pci_queue
*mlxsw_pci_eq_get(struct mlxsw_pci
*mlxsw_pci
,
279 return __mlxsw_pci_queue_get(mlxsw_pci
, MLXSW_PCI_QUEUE_TYPE_EQ
, q_num
);
282 static void __mlxsw_pci_queue_doorbell_set(struct mlxsw_pci
*mlxsw_pci
,
283 struct mlxsw_pci_queue
*q
,
286 mlxsw_pci_write32(mlxsw_pci
,
287 DOORBELL(mlxsw_pci
->doorbell_offset
,
288 mlxsw_pci_doorbell_type_offset
[q
->type
],
292 static void __mlxsw_pci_queue_doorbell_arm_set(struct mlxsw_pci
*mlxsw_pci
,
293 struct mlxsw_pci_queue
*q
,
296 mlxsw_pci_write32(mlxsw_pci
,
297 DOORBELL(mlxsw_pci
->doorbell_offset
,
298 mlxsw_pci_doorbell_arm_type_offset
[q
->type
],
302 static void mlxsw_pci_queue_doorbell_producer_ring(struct mlxsw_pci
*mlxsw_pci
,
303 struct mlxsw_pci_queue
*q
)
305 wmb(); /* ensure all writes are done before we ring a bell */
306 __mlxsw_pci_queue_doorbell_set(mlxsw_pci
, q
, q
->producer_counter
);
309 static void mlxsw_pci_queue_doorbell_consumer_ring(struct mlxsw_pci
*mlxsw_pci
,
310 struct mlxsw_pci_queue
*q
)
312 wmb(); /* ensure all writes are done before we ring a bell */
313 __mlxsw_pci_queue_doorbell_set(mlxsw_pci
, q
,
314 q
->consumer_counter
+ q
->count
);
318 mlxsw_pci_queue_doorbell_arm_consumer_ring(struct mlxsw_pci
*mlxsw_pci
,
319 struct mlxsw_pci_queue
*q
)
321 wmb(); /* ensure all writes are done before we ring a bell */
322 __mlxsw_pci_queue_doorbell_arm_set(mlxsw_pci
, q
, q
->consumer_counter
);
325 static dma_addr_t
__mlxsw_pci_queue_page_get(struct mlxsw_pci_queue
*q
,
328 return q
->mem_item
.mapaddr
+ MLXSW_PCI_PAGE_SIZE
* page_index
;
331 static int mlxsw_pci_sdq_init(struct mlxsw_pci
*mlxsw_pci
, char *mbox
,
332 struct mlxsw_pci_queue
*q
)
337 q
->producer_counter
= 0;
338 q
->consumer_counter
= 0;
340 /* Set CQ of same number of this SDQ. */
341 mlxsw_cmd_mbox_sw2hw_dq_cq_set(mbox
, q
->num
);
342 mlxsw_cmd_mbox_sw2hw_dq_sdq_tclass_set(mbox
, 3);
343 mlxsw_cmd_mbox_sw2hw_dq_log2_dq_sz_set(mbox
, 3); /* 8 pages */
344 for (i
= 0; i
< MLXSW_PCI_AQ_PAGES
; i
++) {
345 dma_addr_t mapaddr
= __mlxsw_pci_queue_page_get(q
, i
);
347 mlxsw_cmd_mbox_sw2hw_dq_pa_set(mbox
, i
, mapaddr
);
350 err
= mlxsw_cmd_sw2hw_sdq(mlxsw_pci
->core
, mbox
, q
->num
);
353 mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci
, q
);
357 static void mlxsw_pci_sdq_fini(struct mlxsw_pci
*mlxsw_pci
,
358 struct mlxsw_pci_queue
*q
)
360 mlxsw_cmd_hw2sw_sdq(mlxsw_pci
->core
, q
->num
);
363 static int mlxsw_pci_wqe_frag_map(struct mlxsw_pci
*mlxsw_pci
, char *wqe
,
364 int index
, char *frag_data
, size_t frag_len
,
367 struct pci_dev
*pdev
= mlxsw_pci
->pdev
;
370 mapaddr
= pci_map_single(pdev
, frag_data
, frag_len
, direction
);
371 if (unlikely(pci_dma_mapping_error(pdev
, mapaddr
))) {
372 dev_err_ratelimited(&pdev
->dev
, "failed to dma map tx frag\n");
375 mlxsw_pci_wqe_address_set(wqe
, index
, mapaddr
);
376 mlxsw_pci_wqe_byte_count_set(wqe
, index
, frag_len
);
380 static void mlxsw_pci_wqe_frag_unmap(struct mlxsw_pci
*mlxsw_pci
, char *wqe
,
381 int index
, int direction
)
383 struct pci_dev
*pdev
= mlxsw_pci
->pdev
;
384 size_t frag_len
= mlxsw_pci_wqe_byte_count_get(wqe
, index
);
385 dma_addr_t mapaddr
= mlxsw_pci_wqe_address_get(wqe
, index
);
389 pci_unmap_single(pdev
, mapaddr
, frag_len
, direction
);
392 static int mlxsw_pci_rdq_skb_alloc(struct mlxsw_pci
*mlxsw_pci
,
393 struct mlxsw_pci_queue_elem_info
*elem_info
)
395 size_t buf_len
= MLXSW_PORT_MAX_MTU
;
396 char *wqe
= elem_info
->elem
;
400 elem_info
->u
.rdq
.skb
= NULL
;
401 skb
= netdev_alloc_skb_ip_align(NULL
, buf_len
);
405 /* Assume that wqe was previously zeroed. */
407 err
= mlxsw_pci_wqe_frag_map(mlxsw_pci
, wqe
, 0, skb
->data
,
408 buf_len
, DMA_FROM_DEVICE
);
412 elem_info
->u
.rdq
.skb
= skb
;
416 dev_kfree_skb_any(skb
);
420 static void mlxsw_pci_rdq_skb_free(struct mlxsw_pci
*mlxsw_pci
,
421 struct mlxsw_pci_queue_elem_info
*elem_info
)
426 skb
= elem_info
->u
.rdq
.skb
;
427 wqe
= elem_info
->elem
;
429 mlxsw_pci_wqe_frag_unmap(mlxsw_pci
, wqe
, 0, DMA_FROM_DEVICE
);
430 dev_kfree_skb_any(skb
);
433 static int mlxsw_pci_rdq_init(struct mlxsw_pci
*mlxsw_pci
, char *mbox
,
434 struct mlxsw_pci_queue
*q
)
436 struct mlxsw_pci_queue_elem_info
*elem_info
;
437 u8 sdq_count
= mlxsw_pci_sdq_count(mlxsw_pci
);
441 q
->producer_counter
= 0;
442 q
->consumer_counter
= 0;
444 /* Set CQ of same number of this RDQ with base
445 * above SDQ count as the lower ones are assigned to SDQs.
447 mlxsw_cmd_mbox_sw2hw_dq_cq_set(mbox
, sdq_count
+ q
->num
);
448 mlxsw_cmd_mbox_sw2hw_dq_log2_dq_sz_set(mbox
, 3); /* 8 pages */
449 for (i
= 0; i
< MLXSW_PCI_AQ_PAGES
; i
++) {
450 dma_addr_t mapaddr
= __mlxsw_pci_queue_page_get(q
, i
);
452 mlxsw_cmd_mbox_sw2hw_dq_pa_set(mbox
, i
, mapaddr
);
455 err
= mlxsw_cmd_sw2hw_rdq(mlxsw_pci
->core
, mbox
, q
->num
);
459 mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci
, q
);
461 for (i
= 0; i
< q
->count
; i
++) {
462 elem_info
= mlxsw_pci_queue_elem_info_producer_get(q
);
464 err
= mlxsw_pci_rdq_skb_alloc(mlxsw_pci
, elem_info
);
467 /* Everything is set up, ring doorbell to pass elem to HW */
468 q
->producer_counter
++;
469 mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci
, q
);
475 for (i
--; i
>= 0; i
--) {
476 elem_info
= mlxsw_pci_queue_elem_info_get(q
, i
);
477 mlxsw_pci_rdq_skb_free(mlxsw_pci
, elem_info
);
479 mlxsw_cmd_hw2sw_rdq(mlxsw_pci
->core
, q
->num
);
484 static void mlxsw_pci_rdq_fini(struct mlxsw_pci
*mlxsw_pci
,
485 struct mlxsw_pci_queue
*q
)
487 struct mlxsw_pci_queue_elem_info
*elem_info
;
490 mlxsw_cmd_hw2sw_rdq(mlxsw_pci
->core
, q
->num
);
491 for (i
= 0; i
< q
->count
; i
++) {
492 elem_info
= mlxsw_pci_queue_elem_info_get(q
, i
);
493 mlxsw_pci_rdq_skb_free(mlxsw_pci
, elem_info
);
497 static int mlxsw_pci_cq_init(struct mlxsw_pci
*mlxsw_pci
, char *mbox
,
498 struct mlxsw_pci_queue
*q
)
503 q
->consumer_counter
= 0;
505 for (i
= 0; i
< q
->count
; i
++) {
506 char *elem
= mlxsw_pci_queue_elem_get(q
, i
);
508 mlxsw_pci_cqe_owner_set(elem
, 1);
511 mlxsw_cmd_mbox_sw2hw_cq_cv_set(mbox
, 0); /* CQE ver 0 */
512 mlxsw_cmd_mbox_sw2hw_cq_c_eqn_set(mbox
, MLXSW_PCI_EQ_COMP_NUM
);
513 mlxsw_cmd_mbox_sw2hw_cq_st_set(mbox
, 0);
514 mlxsw_cmd_mbox_sw2hw_cq_log_cq_size_set(mbox
, ilog2(q
->count
));
515 for (i
= 0; i
< MLXSW_PCI_AQ_PAGES
; i
++) {
516 dma_addr_t mapaddr
= __mlxsw_pci_queue_page_get(q
, i
);
518 mlxsw_cmd_mbox_sw2hw_cq_pa_set(mbox
, i
, mapaddr
);
520 err
= mlxsw_cmd_sw2hw_cq(mlxsw_pci
->core
, mbox
, q
->num
);
523 mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci
, q
);
524 mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci
, q
);
528 static void mlxsw_pci_cq_fini(struct mlxsw_pci
*mlxsw_pci
,
529 struct mlxsw_pci_queue
*q
)
531 mlxsw_cmd_hw2sw_cq(mlxsw_pci
->core
, q
->num
);
534 static void mlxsw_pci_cqe_sdq_handle(struct mlxsw_pci
*mlxsw_pci
,
535 struct mlxsw_pci_queue
*q
,
536 u16 consumer_counter_limit
,
539 struct pci_dev
*pdev
= mlxsw_pci
->pdev
;
540 struct mlxsw_pci_queue_elem_info
*elem_info
;
546 elem_info
= mlxsw_pci_queue_elem_info_consumer_get(q
);
547 skb
= elem_info
->u
.sdq
.skb
;
548 wqe
= elem_info
->elem
;
549 for (i
= 0; i
< MLXSW_PCI_WQE_SG_ENTRIES
; i
++)
550 mlxsw_pci_wqe_frag_unmap(mlxsw_pci
, wqe
, i
, DMA_TO_DEVICE
);
551 dev_kfree_skb_any(skb
);
552 elem_info
->u
.sdq
.skb
= NULL
;
554 if (q
->consumer_counter
++ != consumer_counter_limit
)
555 dev_dbg_ratelimited(&pdev
->dev
, "Consumer counter does not match limit in SDQ\n");
556 spin_unlock(&q
->lock
);
559 static void mlxsw_pci_cqe_rdq_handle(struct mlxsw_pci
*mlxsw_pci
,
560 struct mlxsw_pci_queue
*q
,
561 u16 consumer_counter_limit
,
564 struct pci_dev
*pdev
= mlxsw_pci
->pdev
;
565 struct mlxsw_pci_queue_elem_info
*elem_info
;
568 struct mlxsw_rx_info rx_info
;
572 elem_info
= mlxsw_pci_queue_elem_info_consumer_get(q
);
573 skb
= elem_info
->u
.sdq
.skb
;
576 wqe
= elem_info
->elem
;
577 mlxsw_pci_wqe_frag_unmap(mlxsw_pci
, wqe
, 0, DMA_FROM_DEVICE
);
579 if (q
->consumer_counter
++ != consumer_counter_limit
)
580 dev_dbg_ratelimited(&pdev
->dev
, "Consumer counter does not match limit in RDQ\n");
582 if (mlxsw_pci_cqe_lag_get(cqe
)) {
583 rx_info
.is_lag
= true;
584 rx_info
.u
.lag_id
= mlxsw_pci_cqe_lag_id_get(cqe
);
585 rx_info
.lag_port_index
= mlxsw_pci_cqe_lag_port_index_get(cqe
);
587 rx_info
.is_lag
= false;
588 rx_info
.u
.sys_port
= mlxsw_pci_cqe_system_port_get(cqe
);
591 rx_info
.trap_id
= mlxsw_pci_cqe_trap_id_get(cqe
);
593 byte_count
= mlxsw_pci_cqe_byte_count_get(cqe
);
594 if (mlxsw_pci_cqe_crc_get(cqe
))
595 byte_count
-= ETH_FCS_LEN
;
596 skb_put(skb
, byte_count
);
597 mlxsw_core_skb_receive(mlxsw_pci
->core
, skb
, &rx_info
);
599 memset(wqe
, 0, q
->elem_size
);
600 err
= mlxsw_pci_rdq_skb_alloc(mlxsw_pci
, elem_info
);
602 dev_dbg_ratelimited(&pdev
->dev
, "Failed to alloc skb for RDQ\n");
603 /* Everything is set up, ring doorbell to pass elem to HW */
604 q
->producer_counter
++;
605 mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci
, q
);
609 static char *mlxsw_pci_cq_sw_cqe_get(struct mlxsw_pci_queue
*q
)
611 return mlxsw_pci_queue_sw_elem_get(q
, mlxsw_pci_cqe_owner_get
);
614 static void mlxsw_pci_cq_tasklet(unsigned long data
)
616 struct mlxsw_pci_queue
*q
= (struct mlxsw_pci_queue
*) data
;
617 struct mlxsw_pci
*mlxsw_pci
= q
->pci
;
620 int credits
= q
->count
>> 1;
622 while ((cqe
= mlxsw_pci_cq_sw_cqe_get(q
))) {
623 u16 wqe_counter
= mlxsw_pci_cqe_wqe_counter_get(cqe
);
624 u8 sendq
= mlxsw_pci_cqe_sr_get(cqe
);
625 u8 dqn
= mlxsw_pci_cqe_dqn_get(cqe
);
628 struct mlxsw_pci_queue
*sdq
;
630 sdq
= mlxsw_pci_sdq_get(mlxsw_pci
, dqn
);
631 mlxsw_pci_cqe_sdq_handle(mlxsw_pci
, sdq
,
633 q
->u
.cq
.comp_sdq_count
++;
635 struct mlxsw_pci_queue
*rdq
;
637 rdq
= mlxsw_pci_rdq_get(mlxsw_pci
, dqn
);
638 mlxsw_pci_cqe_rdq_handle(mlxsw_pci
, rdq
,
640 q
->u
.cq
.comp_rdq_count
++;
642 if (++items
== credits
)
646 mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci
, q
);
647 mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci
, q
);
651 static int mlxsw_pci_eq_init(struct mlxsw_pci
*mlxsw_pci
, char *mbox
,
652 struct mlxsw_pci_queue
*q
)
657 q
->consumer_counter
= 0;
659 for (i
= 0; i
< q
->count
; i
++) {
660 char *elem
= mlxsw_pci_queue_elem_get(q
, i
);
662 mlxsw_pci_eqe_owner_set(elem
, 1);
665 mlxsw_cmd_mbox_sw2hw_eq_int_msix_set(mbox
, 1); /* MSI-X used */
666 mlxsw_cmd_mbox_sw2hw_eq_st_set(mbox
, 1); /* armed */
667 mlxsw_cmd_mbox_sw2hw_eq_log_eq_size_set(mbox
, ilog2(q
->count
));
668 for (i
= 0; i
< MLXSW_PCI_AQ_PAGES
; i
++) {
669 dma_addr_t mapaddr
= __mlxsw_pci_queue_page_get(q
, i
);
671 mlxsw_cmd_mbox_sw2hw_eq_pa_set(mbox
, i
, mapaddr
);
673 err
= mlxsw_cmd_sw2hw_eq(mlxsw_pci
->core
, mbox
, q
->num
);
676 mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci
, q
);
677 mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci
, q
);
681 static void mlxsw_pci_eq_fini(struct mlxsw_pci
*mlxsw_pci
,
682 struct mlxsw_pci_queue
*q
)
684 mlxsw_cmd_hw2sw_eq(mlxsw_pci
->core
, q
->num
);
687 static void mlxsw_pci_eq_cmd_event(struct mlxsw_pci
*mlxsw_pci
, char *eqe
)
689 mlxsw_pci
->cmd
.comp
.status
= mlxsw_pci_eqe_cmd_status_get(eqe
);
690 mlxsw_pci
->cmd
.comp
.out_param
=
691 ((u64
) mlxsw_pci_eqe_cmd_out_param_h_get(eqe
)) << 32 |
692 mlxsw_pci_eqe_cmd_out_param_l_get(eqe
);
693 mlxsw_pci
->cmd
.wait_done
= true;
694 wake_up(&mlxsw_pci
->cmd
.wait
);
697 static char *mlxsw_pci_eq_sw_eqe_get(struct mlxsw_pci_queue
*q
)
699 return mlxsw_pci_queue_sw_elem_get(q
, mlxsw_pci_eqe_owner_get
);
702 static void mlxsw_pci_eq_tasklet(unsigned long data
)
704 struct mlxsw_pci_queue
*q
= (struct mlxsw_pci_queue
*) data
;
705 struct mlxsw_pci
*mlxsw_pci
= q
->pci
;
706 u8 cq_count
= mlxsw_pci_cq_count(mlxsw_pci
);
707 unsigned long active_cqns
[BITS_TO_LONGS(MLXSW_PCI_CQS_MAX
)];
710 bool cq_handle
= false;
712 int credits
= q
->count
>> 1;
714 memset(&active_cqns
, 0, sizeof(active_cqns
));
716 while ((eqe
= mlxsw_pci_eq_sw_eqe_get(q
))) {
717 u8 event_type
= mlxsw_pci_eqe_event_type_get(eqe
);
719 switch (event_type
) {
720 case MLXSW_PCI_EQE_EVENT_TYPE_CMD
:
721 mlxsw_pci_eq_cmd_event(mlxsw_pci
, eqe
);
722 q
->u
.eq
.ev_cmd_count
++;
724 case MLXSW_PCI_EQE_EVENT_TYPE_COMP
:
725 cqn
= mlxsw_pci_eqe_cqn_get(eqe
);
726 set_bit(cqn
, active_cqns
);
728 q
->u
.eq
.ev_comp_count
++;
731 q
->u
.eq
.ev_other_count
++;
733 if (++items
== credits
)
737 mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci
, q
);
738 mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci
, q
);
743 for_each_set_bit(cqn
, active_cqns
, cq_count
) {
744 q
= mlxsw_pci_cq_get(mlxsw_pci
, cqn
);
745 mlxsw_pci_queue_tasklet_schedule(q
);
749 struct mlxsw_pci_queue_ops
{
751 enum mlxsw_pci_queue_type type
;
752 int (*init
)(struct mlxsw_pci
*mlxsw_pci
, char *mbox
,
753 struct mlxsw_pci_queue
*q
);
754 void (*fini
)(struct mlxsw_pci
*mlxsw_pci
,
755 struct mlxsw_pci_queue
*q
);
756 void (*tasklet
)(unsigned long data
);
761 static const struct mlxsw_pci_queue_ops mlxsw_pci_sdq_ops
= {
762 .type
= MLXSW_PCI_QUEUE_TYPE_SDQ
,
763 .init
= mlxsw_pci_sdq_init
,
764 .fini
= mlxsw_pci_sdq_fini
,
765 .elem_count
= MLXSW_PCI_WQE_COUNT
,
766 .elem_size
= MLXSW_PCI_WQE_SIZE
,
769 static const struct mlxsw_pci_queue_ops mlxsw_pci_rdq_ops
= {
770 .type
= MLXSW_PCI_QUEUE_TYPE_RDQ
,
771 .init
= mlxsw_pci_rdq_init
,
772 .fini
= mlxsw_pci_rdq_fini
,
773 .elem_count
= MLXSW_PCI_WQE_COUNT
,
774 .elem_size
= MLXSW_PCI_WQE_SIZE
777 static const struct mlxsw_pci_queue_ops mlxsw_pci_cq_ops
= {
778 .type
= MLXSW_PCI_QUEUE_TYPE_CQ
,
779 .init
= mlxsw_pci_cq_init
,
780 .fini
= mlxsw_pci_cq_fini
,
781 .tasklet
= mlxsw_pci_cq_tasklet
,
782 .elem_count
= MLXSW_PCI_CQE_COUNT
,
783 .elem_size
= MLXSW_PCI_CQE_SIZE
786 static const struct mlxsw_pci_queue_ops mlxsw_pci_eq_ops
= {
787 .type
= MLXSW_PCI_QUEUE_TYPE_EQ
,
788 .init
= mlxsw_pci_eq_init
,
789 .fini
= mlxsw_pci_eq_fini
,
790 .tasklet
= mlxsw_pci_eq_tasklet
,
791 .elem_count
= MLXSW_PCI_EQE_COUNT
,
792 .elem_size
= MLXSW_PCI_EQE_SIZE
795 static int mlxsw_pci_queue_init(struct mlxsw_pci
*mlxsw_pci
, char *mbox
,
796 const struct mlxsw_pci_queue_ops
*q_ops
,
797 struct mlxsw_pci_queue
*q
, u8 q_num
)
799 struct mlxsw_pci_mem_item
*mem_item
= &q
->mem_item
;
803 spin_lock_init(&q
->lock
);
805 q
->count
= q_ops
->elem_count
;
806 q
->elem_size
= q_ops
->elem_size
;
807 q
->type
= q_ops
->type
;
811 tasklet_init(&q
->tasklet
, q_ops
->tasklet
, (unsigned long) q
);
813 mem_item
->size
= MLXSW_PCI_AQ_SIZE
;
814 mem_item
->buf
= pci_alloc_consistent(mlxsw_pci
->pdev
,
819 memset(mem_item
->buf
, 0, mem_item
->size
);
821 q
->elem_info
= kcalloc(q
->count
, sizeof(*q
->elem_info
), GFP_KERNEL
);
824 goto err_elem_info_alloc
;
827 /* Initialize dma mapped elements info elem_info for
828 * future easy access.
830 for (i
= 0; i
< q
->count
; i
++) {
831 struct mlxsw_pci_queue_elem_info
*elem_info
;
833 elem_info
= mlxsw_pci_queue_elem_info_get(q
, i
);
835 __mlxsw_pci_queue_elem_get(q
, q_ops
->elem_size
, i
);
838 mlxsw_cmd_mbox_zero(mbox
);
839 err
= q_ops
->init(mlxsw_pci
, mbox
, q
);
847 pci_free_consistent(mlxsw_pci
->pdev
, mem_item
->size
,
848 mem_item
->buf
, mem_item
->mapaddr
);
852 static void mlxsw_pci_queue_fini(struct mlxsw_pci
*mlxsw_pci
,
853 const struct mlxsw_pci_queue_ops
*q_ops
,
854 struct mlxsw_pci_queue
*q
)
856 struct mlxsw_pci_mem_item
*mem_item
= &q
->mem_item
;
858 q_ops
->fini(mlxsw_pci
, q
);
860 pci_free_consistent(mlxsw_pci
->pdev
, mem_item
->size
,
861 mem_item
->buf
, mem_item
->mapaddr
);
864 static int mlxsw_pci_queue_group_init(struct mlxsw_pci
*mlxsw_pci
, char *mbox
,
865 const struct mlxsw_pci_queue_ops
*q_ops
,
868 struct mlxsw_pci_queue_type_group
*queue_group
;
872 queue_group
= mlxsw_pci_queue_type_group_get(mlxsw_pci
, q_ops
->type
);
873 queue_group
->q
= kcalloc(num_qs
, sizeof(*queue_group
->q
), GFP_KERNEL
);
877 for (i
= 0; i
< num_qs
; i
++) {
878 err
= mlxsw_pci_queue_init(mlxsw_pci
, mbox
, q_ops
,
879 &queue_group
->q
[i
], i
);
883 queue_group
->count
= num_qs
;
888 for (i
--; i
>= 0; i
--)
889 mlxsw_pci_queue_fini(mlxsw_pci
, q_ops
, &queue_group
->q
[i
]);
890 kfree(queue_group
->q
);
894 static void mlxsw_pci_queue_group_fini(struct mlxsw_pci
*mlxsw_pci
,
895 const struct mlxsw_pci_queue_ops
*q_ops
)
897 struct mlxsw_pci_queue_type_group
*queue_group
;
900 queue_group
= mlxsw_pci_queue_type_group_get(mlxsw_pci
, q_ops
->type
);
901 for (i
= 0; i
< queue_group
->count
; i
++)
902 mlxsw_pci_queue_fini(mlxsw_pci
, q_ops
, &queue_group
->q
[i
]);
903 kfree(queue_group
->q
);
906 static int mlxsw_pci_aqs_init(struct mlxsw_pci
*mlxsw_pci
, char *mbox
)
908 struct pci_dev
*pdev
= mlxsw_pci
->pdev
;
919 mlxsw_cmd_mbox_zero(mbox
);
920 err
= mlxsw_cmd_query_aq_cap(mlxsw_pci
->core
, mbox
);
924 num_sdqs
= mlxsw_cmd_mbox_query_aq_cap_max_num_sdqs_get(mbox
);
925 sdq_log2sz
= mlxsw_cmd_mbox_query_aq_cap_log_max_sdq_sz_get(mbox
);
926 num_rdqs
= mlxsw_cmd_mbox_query_aq_cap_max_num_rdqs_get(mbox
);
927 rdq_log2sz
= mlxsw_cmd_mbox_query_aq_cap_log_max_rdq_sz_get(mbox
);
928 num_cqs
= mlxsw_cmd_mbox_query_aq_cap_max_num_cqs_get(mbox
);
929 cq_log2sz
= mlxsw_cmd_mbox_query_aq_cap_log_max_cq_sz_get(mbox
);
930 num_eqs
= mlxsw_cmd_mbox_query_aq_cap_max_num_eqs_get(mbox
);
931 eq_log2sz
= mlxsw_cmd_mbox_query_aq_cap_log_max_eq_sz_get(mbox
);
933 if (num_sdqs
+ num_rdqs
> num_cqs
||
934 num_cqs
> MLXSW_PCI_CQS_MAX
|| num_eqs
!= MLXSW_PCI_EQS_COUNT
) {
935 dev_err(&pdev
->dev
, "Unsupported number of queues\n");
939 if ((1 << sdq_log2sz
!= MLXSW_PCI_WQE_COUNT
) ||
940 (1 << rdq_log2sz
!= MLXSW_PCI_WQE_COUNT
) ||
941 (1 << cq_log2sz
!= MLXSW_PCI_CQE_COUNT
) ||
942 (1 << eq_log2sz
!= MLXSW_PCI_EQE_COUNT
)) {
943 dev_err(&pdev
->dev
, "Unsupported number of async queue descriptors\n");
947 err
= mlxsw_pci_queue_group_init(mlxsw_pci
, mbox
, &mlxsw_pci_eq_ops
,
950 dev_err(&pdev
->dev
, "Failed to initialize event queues\n");
954 err
= mlxsw_pci_queue_group_init(mlxsw_pci
, mbox
, &mlxsw_pci_cq_ops
,
957 dev_err(&pdev
->dev
, "Failed to initialize completion queues\n");
961 err
= mlxsw_pci_queue_group_init(mlxsw_pci
, mbox
, &mlxsw_pci_sdq_ops
,
964 dev_err(&pdev
->dev
, "Failed to initialize send descriptor queues\n");
968 err
= mlxsw_pci_queue_group_init(mlxsw_pci
, mbox
, &mlxsw_pci_rdq_ops
,
971 dev_err(&pdev
->dev
, "Failed to initialize receive descriptor queues\n");
975 /* We have to poll in command interface until queues are initialized */
976 mlxsw_pci
->cmd
.nopoll
= true;
980 mlxsw_pci_queue_group_fini(mlxsw_pci
, &mlxsw_pci_sdq_ops
);
982 mlxsw_pci_queue_group_fini(mlxsw_pci
, &mlxsw_pci_cq_ops
);
984 mlxsw_pci_queue_group_fini(mlxsw_pci
, &mlxsw_pci_eq_ops
);
988 static void mlxsw_pci_aqs_fini(struct mlxsw_pci
*mlxsw_pci
)
990 mlxsw_pci
->cmd
.nopoll
= false;
991 mlxsw_pci_queue_group_fini(mlxsw_pci
, &mlxsw_pci_rdq_ops
);
992 mlxsw_pci_queue_group_fini(mlxsw_pci
, &mlxsw_pci_sdq_ops
);
993 mlxsw_pci_queue_group_fini(mlxsw_pci
, &mlxsw_pci_cq_ops
);
994 mlxsw_pci_queue_group_fini(mlxsw_pci
, &mlxsw_pci_eq_ops
);
998 mlxsw_pci_config_profile_swid_config(struct mlxsw_pci
*mlxsw_pci
,
999 char *mbox
, int index
,
1000 const struct mlxsw_swid_config
*swid
)
1004 if (swid
->used_type
) {
1005 mlxsw_cmd_mbox_config_profile_swid_config_type_set(
1006 mbox
, index
, swid
->type
);
1009 if (swid
->used_properties
) {
1010 mlxsw_cmd_mbox_config_profile_swid_config_properties_set(
1011 mbox
, index
, swid
->properties
);
1014 mlxsw_cmd_mbox_config_profile_swid_config_mask_set(mbox
, index
, mask
);
1017 static int mlxsw_pci_resources_query(struct mlxsw_pci
*mlxsw_pci
, char *mbox
,
1018 struct mlxsw_res
*res
,
1026 /* Not all the versions support resources query */
1030 mlxsw_cmd_mbox_zero(mbox
);
1032 for (index
= 0; index
< MLXSW_CMD_QUERY_RESOURCES_MAX_QUERIES
;
1034 err
= mlxsw_cmd_query_resources(mlxsw_pci
->core
, mbox
, index
);
1038 for (i
= 0; i
< MLXSW_CMD_QUERY_RESOURCES_PER_QUERY
; i
++) {
1039 id
= mlxsw_cmd_mbox_query_resource_id_get(mbox
, i
);
1040 data
= mlxsw_cmd_mbox_query_resource_data_get(mbox
, i
);
1042 if (id
== MLXSW_CMD_QUERY_RESOURCES_TABLE_END_ID
)
1045 mlxsw_res_parse(res
, id
, data
);
1049 /* If after MLXSW_RESOURCES_QUERY_MAX_QUERIES we still didn't get
1050 * MLXSW_RESOURCES_TABLE_END_ID, something went bad in the FW.
1056 mlxsw_pci_profile_get_kvd_sizes(const struct mlxsw_pci
*mlxsw_pci
,
1057 const struct mlxsw_config_profile
*profile
,
1058 struct mlxsw_res
*res
)
1060 u64 single_size
, double_size
, linear_size
;
1063 err
= mlxsw_core_kvd_sizes_get(mlxsw_pci
->core
, profile
,
1064 &single_size
, &double_size
,
1069 MLXSW_RES_SET(res
, KVD_SINGLE_SIZE
, single_size
);
1070 MLXSW_RES_SET(res
, KVD_DOUBLE_SIZE
, double_size
);
1071 MLXSW_RES_SET(res
, KVD_LINEAR_SIZE
, linear_size
);
1076 static int mlxsw_pci_config_profile(struct mlxsw_pci
*mlxsw_pci
, char *mbox
,
1077 const struct mlxsw_config_profile
*profile
,
1078 struct mlxsw_res
*res
)
1083 mlxsw_cmd_mbox_zero(mbox
);
1085 if (profile
->used_max_vepa_channels
) {
1086 mlxsw_cmd_mbox_config_profile_set_max_vepa_channels_set(
1088 mlxsw_cmd_mbox_config_profile_max_vepa_channels_set(
1089 mbox
, profile
->max_vepa_channels
);
1091 if (profile
->used_max_mid
) {
1092 mlxsw_cmd_mbox_config_profile_set_max_mid_set(
1094 mlxsw_cmd_mbox_config_profile_max_mid_set(
1095 mbox
, profile
->max_mid
);
1097 if (profile
->used_max_pgt
) {
1098 mlxsw_cmd_mbox_config_profile_set_max_pgt_set(
1100 mlxsw_cmd_mbox_config_profile_max_pgt_set(
1101 mbox
, profile
->max_pgt
);
1103 if (profile
->used_max_system_port
) {
1104 mlxsw_cmd_mbox_config_profile_set_max_system_port_set(
1106 mlxsw_cmd_mbox_config_profile_max_system_port_set(
1107 mbox
, profile
->max_system_port
);
1109 if (profile
->used_max_vlan_groups
) {
1110 mlxsw_cmd_mbox_config_profile_set_max_vlan_groups_set(
1112 mlxsw_cmd_mbox_config_profile_max_vlan_groups_set(
1113 mbox
, profile
->max_vlan_groups
);
1115 if (profile
->used_max_regions
) {
1116 mlxsw_cmd_mbox_config_profile_set_max_regions_set(
1118 mlxsw_cmd_mbox_config_profile_max_regions_set(
1119 mbox
, profile
->max_regions
);
1121 if (profile
->used_flood_tables
) {
1122 mlxsw_cmd_mbox_config_profile_set_flood_tables_set(
1124 mlxsw_cmd_mbox_config_profile_max_flood_tables_set(
1125 mbox
, profile
->max_flood_tables
);
1126 mlxsw_cmd_mbox_config_profile_max_vid_flood_tables_set(
1127 mbox
, profile
->max_vid_flood_tables
);
1128 mlxsw_cmd_mbox_config_profile_max_fid_offset_flood_tables_set(
1129 mbox
, profile
->max_fid_offset_flood_tables
);
1130 mlxsw_cmd_mbox_config_profile_fid_offset_flood_table_size_set(
1131 mbox
, profile
->fid_offset_flood_table_size
);
1132 mlxsw_cmd_mbox_config_profile_max_fid_flood_tables_set(
1133 mbox
, profile
->max_fid_flood_tables
);
1134 mlxsw_cmd_mbox_config_profile_fid_flood_table_size_set(
1135 mbox
, profile
->fid_flood_table_size
);
1137 if (profile
->used_flood_mode
) {
1138 mlxsw_cmd_mbox_config_profile_set_flood_mode_set(
1140 mlxsw_cmd_mbox_config_profile_flood_mode_set(
1141 mbox
, profile
->flood_mode
);
1143 if (profile
->used_max_ib_mc
) {
1144 mlxsw_cmd_mbox_config_profile_set_max_ib_mc_set(
1146 mlxsw_cmd_mbox_config_profile_max_ib_mc_set(
1147 mbox
, profile
->max_ib_mc
);
1149 if (profile
->used_max_pkey
) {
1150 mlxsw_cmd_mbox_config_profile_set_max_pkey_set(
1152 mlxsw_cmd_mbox_config_profile_max_pkey_set(
1153 mbox
, profile
->max_pkey
);
1155 if (profile
->used_ar_sec
) {
1156 mlxsw_cmd_mbox_config_profile_set_ar_sec_set(
1158 mlxsw_cmd_mbox_config_profile_ar_sec_set(
1159 mbox
, profile
->ar_sec
);
1161 if (profile
->used_adaptive_routing_group_cap
) {
1162 mlxsw_cmd_mbox_config_profile_set_adaptive_routing_group_cap_set(
1164 mlxsw_cmd_mbox_config_profile_adaptive_routing_group_cap_set(
1165 mbox
, profile
->adaptive_routing_group_cap
);
1167 if (MLXSW_RES_VALID(res
, KVD_SIZE
)) {
1168 err
= mlxsw_pci_profile_get_kvd_sizes(mlxsw_pci
, profile
, res
);
1172 mlxsw_cmd_mbox_config_profile_set_kvd_linear_size_set(mbox
, 1);
1173 mlxsw_cmd_mbox_config_profile_kvd_linear_size_set(mbox
,
1174 MLXSW_RES_GET(res
, KVD_LINEAR_SIZE
));
1175 mlxsw_cmd_mbox_config_profile_set_kvd_hash_single_size_set(mbox
,
1177 mlxsw_cmd_mbox_config_profile_kvd_hash_single_size_set(mbox
,
1178 MLXSW_RES_GET(res
, KVD_SINGLE_SIZE
));
1179 mlxsw_cmd_mbox_config_profile_set_kvd_hash_double_size_set(
1181 mlxsw_cmd_mbox_config_profile_kvd_hash_double_size_set(mbox
,
1182 MLXSW_RES_GET(res
, KVD_DOUBLE_SIZE
));
1185 for (i
= 0; i
< MLXSW_CONFIG_PROFILE_SWID_COUNT
; i
++)
1186 mlxsw_pci_config_profile_swid_config(mlxsw_pci
, mbox
, i
,
1187 &profile
->swid_config
[i
]);
1189 return mlxsw_cmd_config_profile_set(mlxsw_pci
->core
, mbox
);
1192 static int mlxsw_pci_boardinfo(struct mlxsw_pci
*mlxsw_pci
, char *mbox
)
1194 struct mlxsw_bus_info
*bus_info
= &mlxsw_pci
->bus_info
;
1197 mlxsw_cmd_mbox_zero(mbox
);
1198 err
= mlxsw_cmd_boardinfo(mlxsw_pci
->core
, mbox
);
1201 mlxsw_cmd_mbox_boardinfo_vsd_memcpy_from(mbox
, bus_info
->vsd
);
1202 mlxsw_cmd_mbox_boardinfo_psid_memcpy_from(mbox
, bus_info
->psid
);
1206 static int mlxsw_pci_fw_area_init(struct mlxsw_pci
*mlxsw_pci
, char *mbox
,
1209 struct mlxsw_pci_mem_item
*mem_item
;
1214 mlxsw_pci
->fw_area
.items
= kcalloc(num_pages
, sizeof(*mem_item
),
1216 if (!mlxsw_pci
->fw_area
.items
)
1218 mlxsw_pci
->fw_area
.count
= num_pages
;
1220 mlxsw_cmd_mbox_zero(mbox
);
1221 for (i
= 0; i
< num_pages
; i
++) {
1222 mem_item
= &mlxsw_pci
->fw_area
.items
[i
];
1224 mem_item
->size
= MLXSW_PCI_PAGE_SIZE
;
1225 mem_item
->buf
= pci_alloc_consistent(mlxsw_pci
->pdev
,
1227 &mem_item
->mapaddr
);
1228 if (!mem_item
->buf
) {
1232 mlxsw_cmd_mbox_map_fa_pa_set(mbox
, nent
, mem_item
->mapaddr
);
1233 mlxsw_cmd_mbox_map_fa_log2size_set(mbox
, nent
, 0); /* 1 page */
1234 if (++nent
== MLXSW_CMD_MAP_FA_VPM_ENTRIES_MAX
) {
1235 err
= mlxsw_cmd_map_fa(mlxsw_pci
->core
, mbox
, nent
);
1237 goto err_cmd_map_fa
;
1239 mlxsw_cmd_mbox_zero(mbox
);
1244 err
= mlxsw_cmd_map_fa(mlxsw_pci
->core
, mbox
, nent
);
1246 goto err_cmd_map_fa
;
1253 for (i
--; i
>= 0; i
--) {
1254 mem_item
= &mlxsw_pci
->fw_area
.items
[i
];
1256 pci_free_consistent(mlxsw_pci
->pdev
, mem_item
->size
,
1257 mem_item
->buf
, mem_item
->mapaddr
);
1259 kfree(mlxsw_pci
->fw_area
.items
);
1263 static void mlxsw_pci_fw_area_fini(struct mlxsw_pci
*mlxsw_pci
)
1265 struct mlxsw_pci_mem_item
*mem_item
;
1268 mlxsw_cmd_unmap_fa(mlxsw_pci
->core
);
1270 for (i
= 0; i
< mlxsw_pci
->fw_area
.count
; i
++) {
1271 mem_item
= &mlxsw_pci
->fw_area
.items
[i
];
1273 pci_free_consistent(mlxsw_pci
->pdev
, mem_item
->size
,
1274 mem_item
->buf
, mem_item
->mapaddr
);
1276 kfree(mlxsw_pci
->fw_area
.items
);
1279 static irqreturn_t
mlxsw_pci_eq_irq_handler(int irq
, void *dev_id
)
1281 struct mlxsw_pci
*mlxsw_pci
= dev_id
;
1282 struct mlxsw_pci_queue
*q
;
1285 for (i
= 0; i
< MLXSW_PCI_EQS_COUNT
; i
++) {
1286 q
= mlxsw_pci_eq_get(mlxsw_pci
, i
);
1287 mlxsw_pci_queue_tasklet_schedule(q
);
1292 static int mlxsw_pci_mbox_alloc(struct mlxsw_pci
*mlxsw_pci
,
1293 struct mlxsw_pci_mem_item
*mbox
)
1295 struct pci_dev
*pdev
= mlxsw_pci
->pdev
;
1298 mbox
->size
= MLXSW_CMD_MBOX_SIZE
;
1299 mbox
->buf
= pci_alloc_consistent(pdev
, MLXSW_CMD_MBOX_SIZE
,
1302 dev_err(&pdev
->dev
, "Failed allocating memory for mailbox\n");
1309 static void mlxsw_pci_mbox_free(struct mlxsw_pci
*mlxsw_pci
,
1310 struct mlxsw_pci_mem_item
*mbox
)
1312 struct pci_dev
*pdev
= mlxsw_pci
->pdev
;
1314 pci_free_consistent(pdev
, MLXSW_CMD_MBOX_SIZE
, mbox
->buf
,
1318 static int mlxsw_pci_init(void *bus_priv
, struct mlxsw_core
*mlxsw_core
,
1319 const struct mlxsw_config_profile
*profile
,
1320 struct mlxsw_res
*res
)
1322 struct mlxsw_pci
*mlxsw_pci
= bus_priv
;
1323 struct pci_dev
*pdev
= mlxsw_pci
->pdev
;
1328 mutex_init(&mlxsw_pci
->cmd
.lock
);
1329 init_waitqueue_head(&mlxsw_pci
->cmd
.wait
);
1331 mlxsw_pci
->core
= mlxsw_core
;
1333 mbox
= mlxsw_cmd_mbox_alloc();
1337 err
= mlxsw_pci_mbox_alloc(mlxsw_pci
, &mlxsw_pci
->cmd
.in_mbox
);
1341 err
= mlxsw_pci_mbox_alloc(mlxsw_pci
, &mlxsw_pci
->cmd
.out_mbox
);
1343 goto err_out_mbox_alloc
;
1345 err
= mlxsw_cmd_query_fw(mlxsw_core
, mbox
);
1349 mlxsw_pci
->bus_info
.fw_rev
.major
=
1350 mlxsw_cmd_mbox_query_fw_fw_rev_major_get(mbox
);
1351 mlxsw_pci
->bus_info
.fw_rev
.minor
=
1352 mlxsw_cmd_mbox_query_fw_fw_rev_minor_get(mbox
);
1353 mlxsw_pci
->bus_info
.fw_rev
.subminor
=
1354 mlxsw_cmd_mbox_query_fw_fw_rev_subminor_get(mbox
);
1356 if (mlxsw_cmd_mbox_query_fw_cmd_interface_rev_get(mbox
) != 1) {
1357 dev_err(&pdev
->dev
, "Unsupported cmd interface revision ID queried from hw\n");
1361 if (mlxsw_cmd_mbox_query_fw_doorbell_page_bar_get(mbox
) != 0) {
1362 dev_err(&pdev
->dev
, "Unsupported doorbell page bar queried from hw\n");
1364 goto err_doorbell_page_bar
;
1367 mlxsw_pci
->doorbell_offset
=
1368 mlxsw_cmd_mbox_query_fw_doorbell_page_offset_get(mbox
);
1370 num_pages
= mlxsw_cmd_mbox_query_fw_fw_pages_get(mbox
);
1371 err
= mlxsw_pci_fw_area_init(mlxsw_pci
, mbox
, num_pages
);
1373 goto err_fw_area_init
;
1375 err
= mlxsw_pci_boardinfo(mlxsw_pci
, mbox
);
1379 err
= mlxsw_pci_resources_query(mlxsw_pci
, mbox
, res
,
1380 profile
->resource_query_enable
);
1382 goto err_query_resources
;
1384 err
= mlxsw_pci_config_profile(mlxsw_pci
, mbox
, profile
, res
);
1386 goto err_config_profile
;
1388 err
= mlxsw_pci_aqs_init(mlxsw_pci
, mbox
);
1392 err
= request_irq(pci_irq_vector(pdev
, 0),
1393 mlxsw_pci_eq_irq_handler
, 0,
1394 mlxsw_pci
->bus_info
.device_kind
, mlxsw_pci
);
1396 dev_err(&pdev
->dev
, "IRQ request failed\n");
1397 goto err_request_eq_irq
;
1403 mlxsw_pci_aqs_fini(mlxsw_pci
);
1406 err_query_resources
:
1408 mlxsw_pci_fw_area_fini(mlxsw_pci
);
1410 err_doorbell_page_bar
:
1413 mlxsw_pci_mbox_free(mlxsw_pci
, &mlxsw_pci
->cmd
.out_mbox
);
1415 mlxsw_pci_mbox_free(mlxsw_pci
, &mlxsw_pci
->cmd
.in_mbox
);
1417 mlxsw_cmd_mbox_free(mbox
);
1421 static void mlxsw_pci_fini(void *bus_priv
)
1423 struct mlxsw_pci
*mlxsw_pci
= bus_priv
;
1425 free_irq(pci_irq_vector(mlxsw_pci
->pdev
, 0), mlxsw_pci
);
1426 mlxsw_pci_aqs_fini(mlxsw_pci
);
1427 mlxsw_pci_fw_area_fini(mlxsw_pci
);
1428 mlxsw_pci_mbox_free(mlxsw_pci
, &mlxsw_pci
->cmd
.out_mbox
);
1429 mlxsw_pci_mbox_free(mlxsw_pci
, &mlxsw_pci
->cmd
.in_mbox
);
1432 static struct mlxsw_pci_queue
*
1433 mlxsw_pci_sdq_pick(struct mlxsw_pci
*mlxsw_pci
,
1434 const struct mlxsw_tx_info
*tx_info
)
1436 u8 sdqn
= tx_info
->local_port
% mlxsw_pci_sdq_count(mlxsw_pci
);
1438 return mlxsw_pci_sdq_get(mlxsw_pci
, sdqn
);
1441 static bool mlxsw_pci_skb_transmit_busy(void *bus_priv
,
1442 const struct mlxsw_tx_info
*tx_info
)
1444 struct mlxsw_pci
*mlxsw_pci
= bus_priv
;
1445 struct mlxsw_pci_queue
*q
= mlxsw_pci_sdq_pick(mlxsw_pci
, tx_info
);
1447 return !mlxsw_pci_queue_elem_info_producer_get(q
);
1450 static int mlxsw_pci_skb_transmit(void *bus_priv
, struct sk_buff
*skb
,
1451 const struct mlxsw_tx_info
*tx_info
)
1453 struct mlxsw_pci
*mlxsw_pci
= bus_priv
;
1454 struct mlxsw_pci_queue
*q
;
1455 struct mlxsw_pci_queue_elem_info
*elem_info
;
1460 if (skb_shinfo(skb
)->nr_frags
> MLXSW_PCI_WQE_SG_ENTRIES
- 1) {
1461 err
= skb_linearize(skb
);
1466 q
= mlxsw_pci_sdq_pick(mlxsw_pci
, tx_info
);
1467 spin_lock_bh(&q
->lock
);
1468 elem_info
= mlxsw_pci_queue_elem_info_producer_get(q
);
1474 elem_info
->u
.sdq
.skb
= skb
;
1476 wqe
= elem_info
->elem
;
1477 mlxsw_pci_wqe_c_set(wqe
, 1); /* always report completion */
1478 mlxsw_pci_wqe_lp_set(wqe
, !!tx_info
->is_emad
);
1479 mlxsw_pci_wqe_type_set(wqe
, MLXSW_PCI_WQE_TYPE_ETHERNET
);
1481 err
= mlxsw_pci_wqe_frag_map(mlxsw_pci
, wqe
, 0, skb
->data
,
1482 skb_headlen(skb
), DMA_TO_DEVICE
);
1486 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
1487 const skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
1489 err
= mlxsw_pci_wqe_frag_map(mlxsw_pci
, wqe
, i
+ 1,
1490 skb_frag_address(frag
),
1491 skb_frag_size(frag
),
1497 /* Set unused sq entries byte count to zero. */
1498 for (i
++; i
< MLXSW_PCI_WQE_SG_ENTRIES
; i
++)
1499 mlxsw_pci_wqe_byte_count_set(wqe
, i
, 0);
1501 /* Everything is set up, ring producer doorbell to get HW going */
1502 q
->producer_counter
++;
1503 mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci
, q
);
1509 mlxsw_pci_wqe_frag_unmap(mlxsw_pci
, wqe
, i
, DMA_TO_DEVICE
);
1511 spin_unlock_bh(&q
->lock
);
1515 static int mlxsw_pci_cmd_exec(void *bus_priv
, u16 opcode
, u8 opcode_mod
,
1516 u32 in_mod
, bool out_mbox_direct
,
1517 char *in_mbox
, size_t in_mbox_size
,
1518 char *out_mbox
, size_t out_mbox_size
,
1521 struct mlxsw_pci
*mlxsw_pci
= bus_priv
;
1522 dma_addr_t in_mapaddr
= mlxsw_pci
->cmd
.in_mbox
.mapaddr
;
1523 dma_addr_t out_mapaddr
= mlxsw_pci
->cmd
.out_mbox
.mapaddr
;
1524 bool evreq
= mlxsw_pci
->cmd
.nopoll
;
1525 unsigned long timeout
= msecs_to_jiffies(MLXSW_PCI_CIR_TIMEOUT_MSECS
);
1526 bool *p_wait_done
= &mlxsw_pci
->cmd
.wait_done
;
1529 *p_status
= MLXSW_CMD_STATUS_OK
;
1531 err
= mutex_lock_interruptible(&mlxsw_pci
->cmd
.lock
);
1536 memcpy(mlxsw_pci
->cmd
.in_mbox
.buf
, in_mbox
, in_mbox_size
);
1537 mlxsw_pci_write32(mlxsw_pci
, CIR_IN_PARAM_HI
, upper_32_bits(in_mapaddr
));
1538 mlxsw_pci_write32(mlxsw_pci
, CIR_IN_PARAM_LO
, lower_32_bits(in_mapaddr
));
1540 mlxsw_pci_write32(mlxsw_pci
, CIR_OUT_PARAM_HI
, upper_32_bits(out_mapaddr
));
1541 mlxsw_pci_write32(mlxsw_pci
, CIR_OUT_PARAM_LO
, lower_32_bits(out_mapaddr
));
1543 mlxsw_pci_write32(mlxsw_pci
, CIR_IN_MODIFIER
, in_mod
);
1544 mlxsw_pci_write32(mlxsw_pci
, CIR_TOKEN
, 0);
1546 *p_wait_done
= false;
1548 wmb(); /* all needs to be written before we write control register */
1549 mlxsw_pci_write32(mlxsw_pci
, CIR_CTRL
,
1550 MLXSW_PCI_CIR_CTRL_GO_BIT
|
1551 (evreq
? MLXSW_PCI_CIR_CTRL_EVREQ_BIT
: 0) |
1552 (opcode_mod
<< MLXSW_PCI_CIR_CTRL_OPCODE_MOD_SHIFT
) |
1558 end
= jiffies
+ timeout
;
1560 u32 ctrl
= mlxsw_pci_read32(mlxsw_pci
, CIR_CTRL
);
1562 if (!(ctrl
& MLXSW_PCI_CIR_CTRL_GO_BIT
)) {
1563 *p_wait_done
= true;
1564 *p_status
= ctrl
>> MLXSW_PCI_CIR_CTRL_STATUS_SHIFT
;
1568 } while (time_before(jiffies
, end
));
1570 wait_event_timeout(mlxsw_pci
->cmd
.wait
, *p_wait_done
, timeout
);
1571 *p_status
= mlxsw_pci
->cmd
.comp
.status
;
1582 if (!err
&& out_mbox
&& out_mbox_direct
) {
1583 /* Some commands don't use output param as address to mailbox
1584 * but they store output directly into registers. In that case,
1585 * copy registers into mbox buffer.
1590 tmp
= cpu_to_be32(mlxsw_pci_read32(mlxsw_pci
,
1592 memcpy(out_mbox
, &tmp
, sizeof(tmp
));
1593 tmp
= cpu_to_be32(mlxsw_pci_read32(mlxsw_pci
,
1595 memcpy(out_mbox
+ sizeof(tmp
), &tmp
, sizeof(tmp
));
1597 } else if (!err
&& out_mbox
) {
1598 memcpy(out_mbox
, mlxsw_pci
->cmd
.out_mbox
.buf
, out_mbox_size
);
1601 mutex_unlock(&mlxsw_pci
->cmd
.lock
);
1606 static int mlxsw_pci_sw_reset(struct mlxsw_pci
*mlxsw_pci
,
1607 const struct pci_device_id
*id
)
1611 mlxsw_pci_write32(mlxsw_pci
, SW_RESET
, MLXSW_PCI_SW_RESET_RST_BIT
);
1612 if (id
->device
== PCI_DEVICE_ID_MELLANOX_SWITCHX2
) {
1613 msleep(MLXSW_PCI_SW_RESET_TIMEOUT_MSECS
);
1617 /* Reset needs to be written before we read control register, and
1618 * we must wait for the HW to become responsive once again
1621 msleep(MLXSW_PCI_SW_RESET_WAIT_MSECS
);
1623 end
= jiffies
+ msecs_to_jiffies(MLXSW_PCI_SW_RESET_TIMEOUT_MSECS
);
1625 u32 val
= mlxsw_pci_read32(mlxsw_pci
, FW_READY
);
1627 if ((val
& MLXSW_PCI_FW_READY_MASK
) == MLXSW_PCI_FW_READY_MAGIC
)
1630 } while (time_before(jiffies
, end
));
1634 static void mlxsw_pci_free_irq_vectors(struct mlxsw_pci
*mlxsw_pci
)
1636 pci_free_irq_vectors(mlxsw_pci
->pdev
);
1639 static int mlxsw_pci_alloc_irq_vectors(struct mlxsw_pci
*mlxsw_pci
)
1643 err
= pci_alloc_irq_vectors(mlxsw_pci
->pdev
, 1, 1, PCI_IRQ_MSIX
);
1645 dev_err(&mlxsw_pci
->pdev
->dev
, "MSI-X init failed\n");
1649 static void mlxsw_pci_reset(void *bus_priv
)
1651 struct mlxsw_pci
*mlxsw_pci
= bus_priv
;
1653 mlxsw_pci_free_irq_vectors(mlxsw_pci
);
1654 mlxsw_pci_sw_reset(mlxsw_pci
, mlxsw_pci
->id
);
1655 mlxsw_pci_alloc_irq_vectors(mlxsw_pci
);
1658 static const struct mlxsw_bus mlxsw_pci_bus
= {
1660 .init
= mlxsw_pci_init
,
1661 .fini
= mlxsw_pci_fini
,
1662 .skb_transmit_busy
= mlxsw_pci_skb_transmit_busy
,
1663 .skb_transmit
= mlxsw_pci_skb_transmit
,
1664 .cmd_exec
= mlxsw_pci_cmd_exec
,
1665 .features
= MLXSW_BUS_F_TXRX
,
1666 .reset
= mlxsw_pci_reset
,
1669 static int mlxsw_pci_probe(struct pci_dev
*pdev
, const struct pci_device_id
*id
)
1671 const char *driver_name
= pdev
->driver
->name
;
1672 struct mlxsw_pci
*mlxsw_pci
;
1675 mlxsw_pci
= kzalloc(sizeof(*mlxsw_pci
), GFP_KERNEL
);
1679 err
= pci_enable_device(pdev
);
1681 dev_err(&pdev
->dev
, "pci_enable_device failed\n");
1682 goto err_pci_enable_device
;
1685 err
= pci_request_regions(pdev
, driver_name
);
1687 dev_err(&pdev
->dev
, "pci_request_regions failed\n");
1688 goto err_pci_request_regions
;
1691 err
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(64));
1693 err
= pci_set_consistent_dma_mask(pdev
, DMA_BIT_MASK(64));
1695 dev_err(&pdev
->dev
, "pci_set_consistent_dma_mask failed\n");
1696 goto err_pci_set_dma_mask
;
1699 err
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(32));
1701 dev_err(&pdev
->dev
, "pci_set_dma_mask failed\n");
1702 goto err_pci_set_dma_mask
;
1706 if (pci_resource_len(pdev
, 0) < MLXSW_PCI_BAR0_SIZE
) {
1707 dev_err(&pdev
->dev
, "invalid PCI region size\n");
1709 goto err_pci_resource_len_check
;
1712 mlxsw_pci
->hw_addr
= ioremap(pci_resource_start(pdev
, 0),
1713 pci_resource_len(pdev
, 0));
1714 if (!mlxsw_pci
->hw_addr
) {
1715 dev_err(&pdev
->dev
, "ioremap failed\n");
1719 pci_set_master(pdev
);
1721 mlxsw_pci
->pdev
= pdev
;
1722 pci_set_drvdata(pdev
, mlxsw_pci
);
1724 err
= mlxsw_pci_sw_reset(mlxsw_pci
, id
);
1726 dev_err(&pdev
->dev
, "Software reset failed\n");
1730 err
= mlxsw_pci_alloc_irq_vectors(mlxsw_pci
);
1732 dev_err(&pdev
->dev
, "MSI-X init failed\n");
1736 mlxsw_pci
->bus_info
.device_kind
= driver_name
;
1737 mlxsw_pci
->bus_info
.device_name
= pci_name(mlxsw_pci
->pdev
);
1738 mlxsw_pci
->bus_info
.dev
= &pdev
->dev
;
1741 err
= mlxsw_core_bus_device_register(&mlxsw_pci
->bus_info
,
1742 &mlxsw_pci_bus
, mlxsw_pci
, false,
1745 dev_err(&pdev
->dev
, "cannot register bus device\n");
1746 goto err_bus_device_register
;
1751 err_bus_device_register
:
1752 mlxsw_pci_free_irq_vectors(mlxsw_pci
);
1755 iounmap(mlxsw_pci
->hw_addr
);
1757 err_pci_resource_len_check
:
1758 err_pci_set_dma_mask
:
1759 pci_release_regions(pdev
);
1760 err_pci_request_regions
:
1761 pci_disable_device(pdev
);
1762 err_pci_enable_device
:
1767 static void mlxsw_pci_remove(struct pci_dev
*pdev
)
1769 struct mlxsw_pci
*mlxsw_pci
= pci_get_drvdata(pdev
);
1771 mlxsw_core_bus_device_unregister(mlxsw_pci
->core
, false);
1772 mlxsw_pci_free_irq_vectors(mlxsw_pci
);
1773 iounmap(mlxsw_pci
->hw_addr
);
1774 pci_release_regions(mlxsw_pci
->pdev
);
1775 pci_disable_device(mlxsw_pci
->pdev
);
1779 int mlxsw_pci_driver_register(struct pci_driver
*pci_driver
)
1781 pci_driver
->probe
= mlxsw_pci_probe
;
1782 pci_driver
->remove
= mlxsw_pci_remove
;
1783 return pci_register_driver(pci_driver
);
1785 EXPORT_SYMBOL(mlxsw_pci_driver_register
);
1787 void mlxsw_pci_driver_unregister(struct pci_driver
*pci_driver
)
1789 pci_unregister_driver(pci_driver
);
1791 EXPORT_SYMBOL(mlxsw_pci_driver_unregister
);
1793 static int __init
mlxsw_pci_module_init(void)
1798 static void __exit
mlxsw_pci_module_exit(void)
1802 module_init(mlxsw_pci_module_init
);
1803 module_exit(mlxsw_pci_module_exit
);
1805 MODULE_LICENSE("Dual BSD/GPL");
1806 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
1807 MODULE_DESCRIPTION("Mellanox switch PCI interface driver");