2 * drivers/net/ethernet/mellanox/mlxsw/core.c
3 * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
5 * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
6 * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
37 #include <linux/kernel.h>
38 #include <linux/module.h>
39 #include <linux/device.h>
40 #include <linux/export.h>
41 #include <linux/err.h>
42 #include <linux/if_link.h>
43 #include <linux/debugfs.h>
44 #include <linux/seq_file.h>
45 #include <linux/u64_stats_sync.h>
46 #include <linux/netdevice.h>
47 #include <linux/completion.h>
48 #include <linux/skbuff.h>
49 #include <linux/etherdevice.h>
50 #include <linux/types.h>
51 #include <linux/string.h>
52 #include <linux/gfp.h>
53 #include <linux/random.h>
54 #include <linux/jiffies.h>
55 #include <linux/mutex.h>
56 #include <linux/rcupdate.h>
57 #include <linux/slab.h>
58 #include <linux/workqueue.h>
59 #include <asm/byteorder.h>
60 #include <net/devlink.h>
61 #include <trace/events/devlink.h>
70 #include "resources.h"
72 static LIST_HEAD(mlxsw_core_driver_list
);
73 static DEFINE_SPINLOCK(mlxsw_core_driver_list_lock
);
75 static const char mlxsw_core_driver_name
[] = "mlxsw_core";
77 static struct dentry
*mlxsw_core_dbg_root
;
79 static struct workqueue_struct
*mlxsw_wq
;
80 static struct workqueue_struct
*mlxsw_owq
;
82 struct mlxsw_core_pcpu_stats
{
83 u64 trap_rx_packets
[MLXSW_TRAP_ID_MAX
];
84 u64 trap_rx_bytes
[MLXSW_TRAP_ID_MAX
];
85 u64 port_rx_packets
[MLXSW_PORT_MAX_PORTS
];
86 u64 port_rx_bytes
[MLXSW_PORT_MAX_PORTS
];
87 struct u64_stats_sync syncp
;
88 u32 trap_rx_dropped
[MLXSW_TRAP_ID_MAX
];
89 u32 port_rx_dropped
[MLXSW_PORT_MAX_PORTS
];
94 struct mlxsw_core_port
{
95 struct devlink_port devlink_port
;
96 void *port_driver_priv
;
100 void *mlxsw_core_port_driver_priv(struct mlxsw_core_port
*mlxsw_core_port
)
102 return mlxsw_core_port
->port_driver_priv
;
104 EXPORT_SYMBOL(mlxsw_core_port_driver_priv
);
106 static bool mlxsw_core_port_check(struct mlxsw_core_port
*mlxsw_core_port
)
108 return mlxsw_core_port
->port_driver_priv
!= NULL
;
112 struct mlxsw_driver
*driver
;
113 const struct mlxsw_bus
*bus
;
115 const struct mlxsw_bus_info
*bus_info
;
116 struct list_head rx_listener_list
;
117 struct list_head event_listener_list
;
120 struct list_head trans_list
;
121 spinlock_t trans_list_lock
; /* protects trans_list writes */
124 struct mlxsw_core_pcpu_stats __percpu
*pcpu_stats
;
125 struct dentry
*dbg_dir
;
127 struct debugfs_blob_wrapper vsd_blob
;
128 struct debugfs_blob_wrapper psid_blob
;
131 u8
*mapping
; /* lag_id+port_index to local_port mapping */
133 struct mlxsw_res res
;
134 struct mlxsw_hwmon
*hwmon
;
135 struct mlxsw_thermal
*thermal
;
136 struct mlxsw_core_port ports
[MLXSW_PORT_MAX_PORTS
];
137 unsigned long driver_priv
[0];
138 /* driver_priv has to be always the last item */
141 void *mlxsw_core_driver_priv(struct mlxsw_core
*mlxsw_core
)
143 return mlxsw_core
->driver_priv
;
145 EXPORT_SYMBOL(mlxsw_core_driver_priv
);
147 struct mlxsw_rx_listener_item
{
148 struct list_head list
;
149 struct mlxsw_rx_listener rxl
;
153 struct mlxsw_event_listener_item
{
154 struct list_head list
;
155 struct mlxsw_event_listener el
;
164 * Destination MAC in EMAD's Ethernet header.
165 * Must be set to 01:02:c9:00:00:01
167 MLXSW_ITEM_BUF(emad
, eth_hdr
, dmac
, 0x00, 6);
170 * Source MAC in EMAD's Ethernet header.
171 * Must be set to 00:02:c9:01:02:03
173 MLXSW_ITEM_BUF(emad
, eth_hdr
, smac
, 0x06, 6);
175 /* emad_eth_hdr_ethertype
176 * Ethertype in EMAD's Ethernet header.
177 * Must be set to 0x8932
179 MLXSW_ITEM32(emad
, eth_hdr
, ethertype
, 0x0C, 16, 16);
181 /* emad_eth_hdr_mlx_proto
183 * Must be set to 0x0.
185 MLXSW_ITEM32(emad
, eth_hdr
, mlx_proto
, 0x0C, 8, 8);
188 * Mellanox protocol version.
189 * Must be set to 0x0.
191 MLXSW_ITEM32(emad
, eth_hdr
, ver
, 0x0C, 4, 4);
195 * Must be set to 0x1 (operation TLV).
197 MLXSW_ITEM32(emad
, op_tlv
, type
, 0x00, 27, 5);
200 * Length of the operation TLV in u32.
201 * Must be set to 0x4.
203 MLXSW_ITEM32(emad
, op_tlv
, len
, 0x00, 16, 11);
206 * Direct route bit. Setting to 1 indicates the EMAD is a direct route
207 * EMAD. DR TLV must follow.
209 * Note: Currently not supported and must not be set.
211 MLXSW_ITEM32(emad
, op_tlv
, dr
, 0x00, 15, 1);
213 /* emad_op_tlv_status
214 * Returned status in case of EMAD response. Must be set to 0 in case
217 * 0x1 - device is busy. Requester should retry
218 * 0x2 - Mellanox protocol version not supported
220 * 0x4 - register not supported
221 * 0x5 - operation class not supported
222 * 0x6 - EMAD method not supported
223 * 0x7 - bad parameter (e.g. port out of range)
224 * 0x8 - resource not available
225 * 0x9 - message receipt acknowledgment. Requester should retry
226 * 0x70 - internal error
228 MLXSW_ITEM32(emad
, op_tlv
, status
, 0x00, 8, 7);
230 /* emad_op_tlv_register_id
231 * Register ID of register within register TLV.
233 MLXSW_ITEM32(emad
, op_tlv
, register_id
, 0x04, 16, 16);
236 * Response bit. Setting to 1 indicates Response, otherwise request.
238 MLXSW_ITEM32(emad
, op_tlv
, r
, 0x04, 15, 1);
240 /* emad_op_tlv_method
244 * 0x3 - send (currently not supported)
247 MLXSW_ITEM32(emad
, op_tlv
, method
, 0x04, 8, 7);
250 * EMAD operation class. Must be set to 0x1 (REG_ACCESS).
252 MLXSW_ITEM32(emad
, op_tlv
, class, 0x04, 0, 8);
255 * EMAD transaction ID. Used for pairing request and response EMADs.
257 MLXSW_ITEM64(emad
, op_tlv
, tid
, 0x08, 0, 64);
261 * Must be set to 0x3 (register TLV).
263 MLXSW_ITEM32(emad
, reg_tlv
, type
, 0x00, 27, 5);
266 * Length of the operation TLV in u32.
268 MLXSW_ITEM32(emad
, reg_tlv
, len
, 0x00, 16, 11);
272 * Must be set to 0x0 (end TLV).
274 MLXSW_ITEM32(emad
, end_tlv
, type
, 0x00, 27, 5);
277 * Length of the end TLV in u32.
280 MLXSW_ITEM32(emad
, end_tlv
, len
, 0x00, 16, 11);
282 enum mlxsw_core_reg_access_type
{
283 MLXSW_CORE_REG_ACCESS_TYPE_QUERY
,
284 MLXSW_CORE_REG_ACCESS_TYPE_WRITE
,
287 static inline const char *
288 mlxsw_core_reg_access_type_str(enum mlxsw_core_reg_access_type type
)
291 case MLXSW_CORE_REG_ACCESS_TYPE_QUERY
:
293 case MLXSW_CORE_REG_ACCESS_TYPE_WRITE
:
299 static void mlxsw_emad_pack_end_tlv(char *end_tlv
)
301 mlxsw_emad_end_tlv_type_set(end_tlv
, MLXSW_EMAD_TLV_TYPE_END
);
302 mlxsw_emad_end_tlv_len_set(end_tlv
, MLXSW_EMAD_END_TLV_LEN
);
305 static void mlxsw_emad_pack_reg_tlv(char *reg_tlv
,
306 const struct mlxsw_reg_info
*reg
,
309 mlxsw_emad_reg_tlv_type_set(reg_tlv
, MLXSW_EMAD_TLV_TYPE_REG
);
310 mlxsw_emad_reg_tlv_len_set(reg_tlv
, reg
->len
/ sizeof(u32
) + 1);
311 memcpy(reg_tlv
+ sizeof(u32
), payload
, reg
->len
);
314 static void mlxsw_emad_pack_op_tlv(char *op_tlv
,
315 const struct mlxsw_reg_info
*reg
,
316 enum mlxsw_core_reg_access_type type
,
319 mlxsw_emad_op_tlv_type_set(op_tlv
, MLXSW_EMAD_TLV_TYPE_OP
);
320 mlxsw_emad_op_tlv_len_set(op_tlv
, MLXSW_EMAD_OP_TLV_LEN
);
321 mlxsw_emad_op_tlv_dr_set(op_tlv
, 0);
322 mlxsw_emad_op_tlv_status_set(op_tlv
, 0);
323 mlxsw_emad_op_tlv_register_id_set(op_tlv
, reg
->id
);
324 mlxsw_emad_op_tlv_r_set(op_tlv
, MLXSW_EMAD_OP_TLV_REQUEST
);
325 if (type
== MLXSW_CORE_REG_ACCESS_TYPE_QUERY
)
326 mlxsw_emad_op_tlv_method_set(op_tlv
,
327 MLXSW_EMAD_OP_TLV_METHOD_QUERY
);
329 mlxsw_emad_op_tlv_method_set(op_tlv
,
330 MLXSW_EMAD_OP_TLV_METHOD_WRITE
);
331 mlxsw_emad_op_tlv_class_set(op_tlv
,
332 MLXSW_EMAD_OP_TLV_CLASS_REG_ACCESS
);
333 mlxsw_emad_op_tlv_tid_set(op_tlv
, tid
);
336 static int mlxsw_emad_construct_eth_hdr(struct sk_buff
*skb
)
338 char *eth_hdr
= skb_push(skb
, MLXSW_EMAD_ETH_HDR_LEN
);
340 mlxsw_emad_eth_hdr_dmac_memcpy_to(eth_hdr
, MLXSW_EMAD_EH_DMAC
);
341 mlxsw_emad_eth_hdr_smac_memcpy_to(eth_hdr
, MLXSW_EMAD_EH_SMAC
);
342 mlxsw_emad_eth_hdr_ethertype_set(eth_hdr
, MLXSW_EMAD_EH_ETHERTYPE
);
343 mlxsw_emad_eth_hdr_mlx_proto_set(eth_hdr
, MLXSW_EMAD_EH_MLX_PROTO
);
344 mlxsw_emad_eth_hdr_ver_set(eth_hdr
, MLXSW_EMAD_EH_PROTO_VERSION
);
346 skb_reset_mac_header(skb
);
351 static void mlxsw_emad_construct(struct sk_buff
*skb
,
352 const struct mlxsw_reg_info
*reg
,
354 enum mlxsw_core_reg_access_type type
,
359 buf
= skb_push(skb
, MLXSW_EMAD_END_TLV_LEN
* sizeof(u32
));
360 mlxsw_emad_pack_end_tlv(buf
);
362 buf
= skb_push(skb
, reg
->len
+ sizeof(u32
));
363 mlxsw_emad_pack_reg_tlv(buf
, reg
, payload
);
365 buf
= skb_push(skb
, MLXSW_EMAD_OP_TLV_LEN
* sizeof(u32
));
366 mlxsw_emad_pack_op_tlv(buf
, reg
, type
, tid
);
368 mlxsw_emad_construct_eth_hdr(skb
);
371 static char *mlxsw_emad_op_tlv(const struct sk_buff
*skb
)
373 return ((char *) (skb
->data
+ MLXSW_EMAD_ETH_HDR_LEN
));
376 static char *mlxsw_emad_reg_tlv(const struct sk_buff
*skb
)
378 return ((char *) (skb
->data
+ MLXSW_EMAD_ETH_HDR_LEN
+
379 MLXSW_EMAD_OP_TLV_LEN
* sizeof(u32
)));
382 static char *mlxsw_emad_reg_payload(const char *op_tlv
)
384 return ((char *) (op_tlv
+ (MLXSW_EMAD_OP_TLV_LEN
+ 1) * sizeof(u32
)));
387 static u64
mlxsw_emad_get_tid(const struct sk_buff
*skb
)
391 op_tlv
= mlxsw_emad_op_tlv(skb
);
392 return mlxsw_emad_op_tlv_tid_get(op_tlv
);
395 static bool mlxsw_emad_is_resp(const struct sk_buff
*skb
)
399 op_tlv
= mlxsw_emad_op_tlv(skb
);
400 return (mlxsw_emad_op_tlv_r_get(op_tlv
) == MLXSW_EMAD_OP_TLV_RESPONSE
);
403 static int mlxsw_emad_process_status(char *op_tlv
,
404 enum mlxsw_emad_op_tlv_status
*p_status
)
406 *p_status
= mlxsw_emad_op_tlv_status_get(op_tlv
);
409 case MLXSW_EMAD_OP_TLV_STATUS_SUCCESS
:
411 case MLXSW_EMAD_OP_TLV_STATUS_BUSY
:
412 case MLXSW_EMAD_OP_TLV_STATUS_MESSAGE_RECEIPT_ACK
:
414 case MLXSW_EMAD_OP_TLV_STATUS_VERSION_NOT_SUPPORTED
:
415 case MLXSW_EMAD_OP_TLV_STATUS_UNKNOWN_TLV
:
416 case MLXSW_EMAD_OP_TLV_STATUS_REGISTER_NOT_SUPPORTED
:
417 case MLXSW_EMAD_OP_TLV_STATUS_CLASS_NOT_SUPPORTED
:
418 case MLXSW_EMAD_OP_TLV_STATUS_METHOD_NOT_SUPPORTED
:
419 case MLXSW_EMAD_OP_TLV_STATUS_BAD_PARAMETER
:
420 case MLXSW_EMAD_OP_TLV_STATUS_RESOURCE_NOT_AVAILABLE
:
421 case MLXSW_EMAD_OP_TLV_STATUS_INTERNAL_ERROR
:
428 mlxsw_emad_process_status_skb(struct sk_buff
*skb
,
429 enum mlxsw_emad_op_tlv_status
*p_status
)
431 return mlxsw_emad_process_status(mlxsw_emad_op_tlv(skb
), p_status
);
434 struct mlxsw_reg_trans
{
435 struct list_head list
;
436 struct list_head bulk_list
;
437 struct mlxsw_core
*core
;
438 struct sk_buff
*tx_skb
;
439 struct mlxsw_tx_info tx_info
;
440 struct delayed_work timeout_dw
;
441 unsigned int retries
;
443 struct completion completion
;
445 mlxsw_reg_trans_cb_t
*cb
;
446 unsigned long cb_priv
;
447 const struct mlxsw_reg_info
*reg
;
448 enum mlxsw_core_reg_access_type type
;
450 enum mlxsw_emad_op_tlv_status emad_status
;
454 #define MLXSW_EMAD_TIMEOUT_MS 200
456 static void mlxsw_emad_trans_timeout_schedule(struct mlxsw_reg_trans
*trans
)
458 unsigned long timeout
= msecs_to_jiffies(MLXSW_EMAD_TIMEOUT_MS
);
460 mlxsw_core_schedule_dw(&trans
->timeout_dw
, timeout
);
463 static int mlxsw_emad_transmit(struct mlxsw_core
*mlxsw_core
,
464 struct mlxsw_reg_trans
*trans
)
469 skb
= skb_copy(trans
->tx_skb
, GFP_KERNEL
);
473 trace_devlink_hwmsg(priv_to_devlink(mlxsw_core
), false, 0,
474 skb
->data
+ mlxsw_core
->driver
->txhdr_len
,
475 skb
->len
- mlxsw_core
->driver
->txhdr_len
);
477 atomic_set(&trans
->active
, 1);
478 err
= mlxsw_core_skb_transmit(mlxsw_core
, skb
, &trans
->tx_info
);
483 mlxsw_emad_trans_timeout_schedule(trans
);
487 static void mlxsw_emad_trans_finish(struct mlxsw_reg_trans
*trans
, int err
)
489 struct mlxsw_core
*mlxsw_core
= trans
->core
;
491 dev_kfree_skb(trans
->tx_skb
);
492 spin_lock_bh(&mlxsw_core
->emad
.trans_list_lock
);
493 list_del_rcu(&trans
->list
);
494 spin_unlock_bh(&mlxsw_core
->emad
.trans_list_lock
);
496 complete(&trans
->completion
);
499 static void mlxsw_emad_transmit_retry(struct mlxsw_core
*mlxsw_core
,
500 struct mlxsw_reg_trans
*trans
)
504 if (trans
->retries
< MLXSW_EMAD_MAX_RETRY
) {
506 err
= mlxsw_emad_transmit(trans
->core
, trans
);
512 mlxsw_emad_trans_finish(trans
, err
);
515 static void mlxsw_emad_trans_timeout_work(struct work_struct
*work
)
517 struct mlxsw_reg_trans
*trans
= container_of(work
,
518 struct mlxsw_reg_trans
,
521 if (!atomic_dec_and_test(&trans
->active
))
524 mlxsw_emad_transmit_retry(trans
->core
, trans
);
527 static void mlxsw_emad_process_response(struct mlxsw_core
*mlxsw_core
,
528 struct mlxsw_reg_trans
*trans
,
533 if (!atomic_dec_and_test(&trans
->active
))
536 err
= mlxsw_emad_process_status_skb(skb
, &trans
->emad_status
);
537 if (err
== -EAGAIN
) {
538 mlxsw_emad_transmit_retry(mlxsw_core
, trans
);
541 char *op_tlv
= mlxsw_emad_op_tlv(skb
);
544 trans
->cb(mlxsw_core
,
545 mlxsw_emad_reg_payload(op_tlv
),
546 trans
->reg
->len
, trans
->cb_priv
);
548 mlxsw_emad_trans_finish(trans
, err
);
552 /* called with rcu read lock held */
553 static void mlxsw_emad_rx_listener_func(struct sk_buff
*skb
, u8 local_port
,
556 struct mlxsw_core
*mlxsw_core
= priv
;
557 struct mlxsw_reg_trans
*trans
;
559 trace_devlink_hwmsg(priv_to_devlink(mlxsw_core
), true, 0,
560 skb
->data
, skb
->len
);
562 if (!mlxsw_emad_is_resp(skb
))
565 list_for_each_entry_rcu(trans
, &mlxsw_core
->emad
.trans_list
, list
) {
566 if (mlxsw_emad_get_tid(skb
) == trans
->tid
) {
567 mlxsw_emad_process_response(mlxsw_core
, trans
, skb
);
576 static const struct mlxsw_listener mlxsw_emad_rx_listener
=
577 MLXSW_RXL(mlxsw_emad_rx_listener_func
, ETHEMAD
, TRAP_TO_CPU
, false,
580 static int mlxsw_emad_init(struct mlxsw_core
*mlxsw_core
)
585 if (!(mlxsw_core
->bus
->features
& MLXSW_BUS_F_TXRX
))
588 /* Set the upper 32 bits of the transaction ID field to a random
589 * number. This allows us to discard EMADs addressed to other
592 get_random_bytes(&tid
, 4);
594 atomic64_set(&mlxsw_core
->emad
.tid
, tid
);
596 INIT_LIST_HEAD(&mlxsw_core
->emad
.trans_list
);
597 spin_lock_init(&mlxsw_core
->emad
.trans_list_lock
);
599 err
= mlxsw_core_trap_register(mlxsw_core
, &mlxsw_emad_rx_listener
,
604 err
= mlxsw_core
->driver
->basic_trap_groups_set(mlxsw_core
);
606 goto err_emad_trap_set
;
607 mlxsw_core
->emad
.use_emad
= true;
612 mlxsw_core_trap_unregister(mlxsw_core
, &mlxsw_emad_rx_listener
,
617 static void mlxsw_emad_fini(struct mlxsw_core
*mlxsw_core
)
620 if (!(mlxsw_core
->bus
->features
& MLXSW_BUS_F_TXRX
))
623 mlxsw_core
->emad
.use_emad
= false;
624 mlxsw_core_trap_unregister(mlxsw_core
, &mlxsw_emad_rx_listener
,
628 static struct sk_buff
*mlxsw_emad_alloc(const struct mlxsw_core
*mlxsw_core
,
634 emad_len
= (reg_len
+ sizeof(u32
) + MLXSW_EMAD_ETH_HDR_LEN
+
635 (MLXSW_EMAD_OP_TLV_LEN
+ MLXSW_EMAD_END_TLV_LEN
) *
636 sizeof(u32
) + mlxsw_core
->driver
->txhdr_len
);
637 if (emad_len
> MLXSW_EMAD_MAX_FRAME_LEN
)
640 skb
= netdev_alloc_skb(NULL
, emad_len
);
643 memset(skb
->data
, 0, emad_len
);
644 skb_reserve(skb
, emad_len
);
649 static int mlxsw_emad_reg_access(struct mlxsw_core
*mlxsw_core
,
650 const struct mlxsw_reg_info
*reg
,
652 enum mlxsw_core_reg_access_type type
,
653 struct mlxsw_reg_trans
*trans
,
654 struct list_head
*bulk_list
,
655 mlxsw_reg_trans_cb_t
*cb
,
656 unsigned long cb_priv
, u64 tid
)
661 dev_dbg(mlxsw_core
->bus_info
->dev
, "EMAD reg access (tid=%llx,reg_id=%x(%s),type=%s)\n",
662 trans
->tid
, reg
->id
, mlxsw_reg_id_str(reg
->id
),
663 mlxsw_core_reg_access_type_str(type
));
665 skb
= mlxsw_emad_alloc(mlxsw_core
, reg
->len
);
669 list_add_tail(&trans
->bulk_list
, bulk_list
);
670 trans
->core
= mlxsw_core
;
672 trans
->tx_info
.local_port
= MLXSW_PORT_CPU_PORT
;
673 trans
->tx_info
.is_emad
= true;
674 INIT_DELAYED_WORK(&trans
->timeout_dw
, mlxsw_emad_trans_timeout_work
);
676 init_completion(&trans
->completion
);
678 trans
->cb_priv
= cb_priv
;
682 mlxsw_emad_construct(skb
, reg
, payload
, type
, trans
->tid
);
683 mlxsw_core
->driver
->txhdr_construct(skb
, &trans
->tx_info
);
685 spin_lock_bh(&mlxsw_core
->emad
.trans_list_lock
);
686 list_add_tail_rcu(&trans
->list
, &mlxsw_core
->emad
.trans_list
);
687 spin_unlock_bh(&mlxsw_core
->emad
.trans_list_lock
);
688 err
= mlxsw_emad_transmit(mlxsw_core
, trans
);
694 spin_lock_bh(&mlxsw_core
->emad
.trans_list_lock
);
695 list_del_rcu(&trans
->list
);
696 spin_unlock_bh(&mlxsw_core
->emad
.trans_list_lock
);
697 list_del(&trans
->bulk_list
);
698 dev_kfree_skb(trans
->tx_skb
);
706 static int mlxsw_core_rx_stats_dbg_read(struct seq_file
*file
, void *data
)
708 struct mlxsw_core
*mlxsw_core
= file
->private;
709 struct mlxsw_core_pcpu_stats
*p
;
710 u64 rx_packets
, rx_bytes
;
711 u64 tmp_rx_packets
, tmp_rx_bytes
;
712 u32 rx_dropped
, rx_invalid
;
716 static const char hdr
[] =
717 " NUM RX_PACKETS RX_BYTES RX_DROPPED\n";
719 seq_printf(file
, hdr
);
720 for (i
= 0; i
< MLXSW_TRAP_ID_MAX
; i
++) {
724 for_each_possible_cpu(j
) {
725 p
= per_cpu_ptr(mlxsw_core
->pcpu_stats
, j
);
727 start
= u64_stats_fetch_begin(&p
->syncp
);
728 tmp_rx_packets
= p
->trap_rx_packets
[i
];
729 tmp_rx_bytes
= p
->trap_rx_bytes
[i
];
730 } while (u64_stats_fetch_retry(&p
->syncp
, start
));
732 rx_packets
+= tmp_rx_packets
;
733 rx_bytes
+= tmp_rx_bytes
;
734 rx_dropped
+= p
->trap_rx_dropped
[i
];
736 seq_printf(file
, "trap %3d %12llu %12llu %10u\n",
737 i
, rx_packets
, rx_bytes
, rx_dropped
);
740 for_each_possible_cpu(j
) {
741 p
= per_cpu_ptr(mlxsw_core
->pcpu_stats
, j
);
742 rx_invalid
+= p
->trap_rx_invalid
;
744 seq_printf(file
, "trap INV %10u\n",
747 for (i
= 0; i
< MLXSW_PORT_MAX_PORTS
; i
++) {
751 for_each_possible_cpu(j
) {
752 p
= per_cpu_ptr(mlxsw_core
->pcpu_stats
, j
);
754 start
= u64_stats_fetch_begin(&p
->syncp
);
755 tmp_rx_packets
= p
->port_rx_packets
[i
];
756 tmp_rx_bytes
= p
->port_rx_bytes
[i
];
757 } while (u64_stats_fetch_retry(&p
->syncp
, start
));
759 rx_packets
+= tmp_rx_packets
;
760 rx_bytes
+= tmp_rx_bytes
;
761 rx_dropped
+= p
->port_rx_dropped
[i
];
763 seq_printf(file
, "port %3d %12llu %12llu %10u\n",
764 i
, rx_packets
, rx_bytes
, rx_dropped
);
767 for_each_possible_cpu(j
) {
768 p
= per_cpu_ptr(mlxsw_core
->pcpu_stats
, j
);
769 rx_invalid
+= p
->port_rx_invalid
;
771 seq_printf(file
, "port INV %10u\n",
776 static int mlxsw_core_rx_stats_dbg_open(struct inode
*inode
, struct file
*f
)
778 struct mlxsw_core
*mlxsw_core
= inode
->i_private
;
780 return single_open(f
, mlxsw_core_rx_stats_dbg_read
, mlxsw_core
);
783 static const struct file_operations mlxsw_core_rx_stats_dbg_ops
= {
784 .owner
= THIS_MODULE
,
785 .open
= mlxsw_core_rx_stats_dbg_open
,
786 .release
= single_release
,
791 int mlxsw_core_driver_register(struct mlxsw_driver
*mlxsw_driver
)
793 spin_lock(&mlxsw_core_driver_list_lock
);
794 list_add_tail(&mlxsw_driver
->list
, &mlxsw_core_driver_list
);
795 spin_unlock(&mlxsw_core_driver_list_lock
);
798 EXPORT_SYMBOL(mlxsw_core_driver_register
);
800 void mlxsw_core_driver_unregister(struct mlxsw_driver
*mlxsw_driver
)
802 spin_lock(&mlxsw_core_driver_list_lock
);
803 list_del(&mlxsw_driver
->list
);
804 spin_unlock(&mlxsw_core_driver_list_lock
);
806 EXPORT_SYMBOL(mlxsw_core_driver_unregister
);
808 static struct mlxsw_driver
*__driver_find(const char *kind
)
810 struct mlxsw_driver
*mlxsw_driver
;
812 list_for_each_entry(mlxsw_driver
, &mlxsw_core_driver_list
, list
) {
813 if (strcmp(mlxsw_driver
->kind
, kind
) == 0)
819 static struct mlxsw_driver
*mlxsw_core_driver_get(const char *kind
)
821 struct mlxsw_driver
*mlxsw_driver
;
823 spin_lock(&mlxsw_core_driver_list_lock
);
824 mlxsw_driver
= __driver_find(kind
);
825 spin_unlock(&mlxsw_core_driver_list_lock
);
829 static void mlxsw_core_driver_put(const char *kind
)
831 struct mlxsw_driver
*mlxsw_driver
;
833 spin_lock(&mlxsw_core_driver_list_lock
);
834 mlxsw_driver
= __driver_find(kind
);
835 spin_unlock(&mlxsw_core_driver_list_lock
);
838 static int mlxsw_core_debugfs_init(struct mlxsw_core
*mlxsw_core
)
840 const struct mlxsw_bus_info
*bus_info
= mlxsw_core
->bus_info
;
842 mlxsw_core
->dbg_dir
= debugfs_create_dir(bus_info
->device_name
,
843 mlxsw_core_dbg_root
);
844 if (!mlxsw_core
->dbg_dir
)
846 debugfs_create_file("rx_stats", S_IRUGO
, mlxsw_core
->dbg_dir
,
847 mlxsw_core
, &mlxsw_core_rx_stats_dbg_ops
);
848 mlxsw_core
->dbg
.vsd_blob
.data
= (void *) &bus_info
->vsd
;
849 mlxsw_core
->dbg
.vsd_blob
.size
= sizeof(bus_info
->vsd
);
850 debugfs_create_blob("vsd", S_IRUGO
, mlxsw_core
->dbg_dir
,
851 &mlxsw_core
->dbg
.vsd_blob
);
852 mlxsw_core
->dbg
.psid_blob
.data
= (void *) &bus_info
->psid
;
853 mlxsw_core
->dbg
.psid_blob
.size
= sizeof(bus_info
->psid
);
854 debugfs_create_blob("psid", S_IRUGO
, mlxsw_core
->dbg_dir
,
855 &mlxsw_core
->dbg
.psid_blob
);
859 static void mlxsw_core_debugfs_fini(struct mlxsw_core
*mlxsw_core
)
861 debugfs_remove_recursive(mlxsw_core
->dbg_dir
);
864 static int mlxsw_devlink_port_split(struct devlink
*devlink
,
865 unsigned int port_index
,
868 struct mlxsw_core
*mlxsw_core
= devlink_priv(devlink
);
870 if (port_index
>= MLXSW_PORT_MAX_PORTS
)
872 if (!mlxsw_core
->driver
->port_split
)
874 return mlxsw_core
->driver
->port_split(mlxsw_core
, port_index
, count
);
877 static int mlxsw_devlink_port_unsplit(struct devlink
*devlink
,
878 unsigned int port_index
)
880 struct mlxsw_core
*mlxsw_core
= devlink_priv(devlink
);
882 if (port_index
>= MLXSW_PORT_MAX_PORTS
)
884 if (!mlxsw_core
->driver
->port_unsplit
)
886 return mlxsw_core
->driver
->port_unsplit(mlxsw_core
, port_index
);
890 mlxsw_devlink_sb_pool_get(struct devlink
*devlink
,
891 unsigned int sb_index
, u16 pool_index
,
892 struct devlink_sb_pool_info
*pool_info
)
894 struct mlxsw_core
*mlxsw_core
= devlink_priv(devlink
);
895 struct mlxsw_driver
*mlxsw_driver
= mlxsw_core
->driver
;
897 if (!mlxsw_driver
->sb_pool_get
)
899 return mlxsw_driver
->sb_pool_get(mlxsw_core
, sb_index
,
900 pool_index
, pool_info
);
904 mlxsw_devlink_sb_pool_set(struct devlink
*devlink
,
905 unsigned int sb_index
, u16 pool_index
, u32 size
,
906 enum devlink_sb_threshold_type threshold_type
)
908 struct mlxsw_core
*mlxsw_core
= devlink_priv(devlink
);
909 struct mlxsw_driver
*mlxsw_driver
= mlxsw_core
->driver
;
911 if (!mlxsw_driver
->sb_pool_set
)
913 return mlxsw_driver
->sb_pool_set(mlxsw_core
, sb_index
,
914 pool_index
, size
, threshold_type
);
917 static void *__dl_port(struct devlink_port
*devlink_port
)
919 return container_of(devlink_port
, struct mlxsw_core_port
, devlink_port
);
922 static int mlxsw_devlink_port_type_set(struct devlink_port
*devlink_port
,
923 enum devlink_port_type port_type
)
925 struct mlxsw_core
*mlxsw_core
= devlink_priv(devlink_port
->devlink
);
926 struct mlxsw_driver
*mlxsw_driver
= mlxsw_core
->driver
;
927 struct mlxsw_core_port
*mlxsw_core_port
= __dl_port(devlink_port
);
929 if (!mlxsw_driver
->port_type_set
)
932 return mlxsw_driver
->port_type_set(mlxsw_core
,
933 mlxsw_core_port
->local_port
,
937 static int mlxsw_devlink_sb_port_pool_get(struct devlink_port
*devlink_port
,
938 unsigned int sb_index
, u16 pool_index
,
941 struct mlxsw_core
*mlxsw_core
= devlink_priv(devlink_port
->devlink
);
942 struct mlxsw_driver
*mlxsw_driver
= mlxsw_core
->driver
;
943 struct mlxsw_core_port
*mlxsw_core_port
= __dl_port(devlink_port
);
945 if (!mlxsw_driver
->sb_port_pool_get
||
946 !mlxsw_core_port_check(mlxsw_core_port
))
948 return mlxsw_driver
->sb_port_pool_get(mlxsw_core_port
, sb_index
,
949 pool_index
, p_threshold
);
952 static int mlxsw_devlink_sb_port_pool_set(struct devlink_port
*devlink_port
,
953 unsigned int sb_index
, u16 pool_index
,
956 struct mlxsw_core
*mlxsw_core
= devlink_priv(devlink_port
->devlink
);
957 struct mlxsw_driver
*mlxsw_driver
= mlxsw_core
->driver
;
958 struct mlxsw_core_port
*mlxsw_core_port
= __dl_port(devlink_port
);
960 if (!mlxsw_driver
->sb_port_pool_set
||
961 !mlxsw_core_port_check(mlxsw_core_port
))
963 return mlxsw_driver
->sb_port_pool_set(mlxsw_core_port
, sb_index
,
964 pool_index
, threshold
);
968 mlxsw_devlink_sb_tc_pool_bind_get(struct devlink_port
*devlink_port
,
969 unsigned int sb_index
, u16 tc_index
,
970 enum devlink_sb_pool_type pool_type
,
971 u16
*p_pool_index
, u32
*p_threshold
)
973 struct mlxsw_core
*mlxsw_core
= devlink_priv(devlink_port
->devlink
);
974 struct mlxsw_driver
*mlxsw_driver
= mlxsw_core
->driver
;
975 struct mlxsw_core_port
*mlxsw_core_port
= __dl_port(devlink_port
);
977 if (!mlxsw_driver
->sb_tc_pool_bind_get
||
978 !mlxsw_core_port_check(mlxsw_core_port
))
980 return mlxsw_driver
->sb_tc_pool_bind_get(mlxsw_core_port
, sb_index
,
982 p_pool_index
, p_threshold
);
986 mlxsw_devlink_sb_tc_pool_bind_set(struct devlink_port
*devlink_port
,
987 unsigned int sb_index
, u16 tc_index
,
988 enum devlink_sb_pool_type pool_type
,
989 u16 pool_index
, u32 threshold
)
991 struct mlxsw_core
*mlxsw_core
= devlink_priv(devlink_port
->devlink
);
992 struct mlxsw_driver
*mlxsw_driver
= mlxsw_core
->driver
;
993 struct mlxsw_core_port
*mlxsw_core_port
= __dl_port(devlink_port
);
995 if (!mlxsw_driver
->sb_tc_pool_bind_set
||
996 !mlxsw_core_port_check(mlxsw_core_port
))
998 return mlxsw_driver
->sb_tc_pool_bind_set(mlxsw_core_port
, sb_index
,
1000 pool_index
, threshold
);
1003 static int mlxsw_devlink_sb_occ_snapshot(struct devlink
*devlink
,
1004 unsigned int sb_index
)
1006 struct mlxsw_core
*mlxsw_core
= devlink_priv(devlink
);
1007 struct mlxsw_driver
*mlxsw_driver
= mlxsw_core
->driver
;
1009 if (!mlxsw_driver
->sb_occ_snapshot
)
1011 return mlxsw_driver
->sb_occ_snapshot(mlxsw_core
, sb_index
);
1014 static int mlxsw_devlink_sb_occ_max_clear(struct devlink
*devlink
,
1015 unsigned int sb_index
)
1017 struct mlxsw_core
*mlxsw_core
= devlink_priv(devlink
);
1018 struct mlxsw_driver
*mlxsw_driver
= mlxsw_core
->driver
;
1020 if (!mlxsw_driver
->sb_occ_max_clear
)
1022 return mlxsw_driver
->sb_occ_max_clear(mlxsw_core
, sb_index
);
1026 mlxsw_devlink_sb_occ_port_pool_get(struct devlink_port
*devlink_port
,
1027 unsigned int sb_index
, u16 pool_index
,
1028 u32
*p_cur
, u32
*p_max
)
1030 struct mlxsw_core
*mlxsw_core
= devlink_priv(devlink_port
->devlink
);
1031 struct mlxsw_driver
*mlxsw_driver
= mlxsw_core
->driver
;
1032 struct mlxsw_core_port
*mlxsw_core_port
= __dl_port(devlink_port
);
1034 if (!mlxsw_driver
->sb_occ_port_pool_get
||
1035 !mlxsw_core_port_check(mlxsw_core_port
))
1037 return mlxsw_driver
->sb_occ_port_pool_get(mlxsw_core_port
, sb_index
,
1038 pool_index
, p_cur
, p_max
);
1042 mlxsw_devlink_sb_occ_tc_port_bind_get(struct devlink_port
*devlink_port
,
1043 unsigned int sb_index
, u16 tc_index
,
1044 enum devlink_sb_pool_type pool_type
,
1045 u32
*p_cur
, u32
*p_max
)
1047 struct mlxsw_core
*mlxsw_core
= devlink_priv(devlink_port
->devlink
);
1048 struct mlxsw_driver
*mlxsw_driver
= mlxsw_core
->driver
;
1049 struct mlxsw_core_port
*mlxsw_core_port
= __dl_port(devlink_port
);
1051 if (!mlxsw_driver
->sb_occ_tc_port_bind_get
||
1052 !mlxsw_core_port_check(mlxsw_core_port
))
1054 return mlxsw_driver
->sb_occ_tc_port_bind_get(mlxsw_core_port
,
1056 pool_type
, p_cur
, p_max
);
1059 static const struct devlink_ops mlxsw_devlink_ops
= {
1060 .port_type_set
= mlxsw_devlink_port_type_set
,
1061 .port_split
= mlxsw_devlink_port_split
,
1062 .port_unsplit
= mlxsw_devlink_port_unsplit
,
1063 .sb_pool_get
= mlxsw_devlink_sb_pool_get
,
1064 .sb_pool_set
= mlxsw_devlink_sb_pool_set
,
1065 .sb_port_pool_get
= mlxsw_devlink_sb_port_pool_get
,
1066 .sb_port_pool_set
= mlxsw_devlink_sb_port_pool_set
,
1067 .sb_tc_pool_bind_get
= mlxsw_devlink_sb_tc_pool_bind_get
,
1068 .sb_tc_pool_bind_set
= mlxsw_devlink_sb_tc_pool_bind_set
,
1069 .sb_occ_snapshot
= mlxsw_devlink_sb_occ_snapshot
,
1070 .sb_occ_max_clear
= mlxsw_devlink_sb_occ_max_clear
,
1071 .sb_occ_port_pool_get
= mlxsw_devlink_sb_occ_port_pool_get
,
1072 .sb_occ_tc_port_bind_get
= mlxsw_devlink_sb_occ_tc_port_bind_get
,
1075 int mlxsw_core_bus_device_register(const struct mlxsw_bus_info
*mlxsw_bus_info
,
1076 const struct mlxsw_bus
*mlxsw_bus
,
1079 const char *device_kind
= mlxsw_bus_info
->device_kind
;
1080 struct mlxsw_core
*mlxsw_core
;
1081 struct mlxsw_driver
*mlxsw_driver
;
1082 struct devlink
*devlink
;
1086 mlxsw_driver
= mlxsw_core_driver_get(device_kind
);
1089 alloc_size
= sizeof(*mlxsw_core
) + mlxsw_driver
->priv_size
;
1090 devlink
= devlink_alloc(&mlxsw_devlink_ops
, alloc_size
);
1093 goto err_devlink_alloc
;
1096 mlxsw_core
= devlink_priv(devlink
);
1097 INIT_LIST_HEAD(&mlxsw_core
->rx_listener_list
);
1098 INIT_LIST_HEAD(&mlxsw_core
->event_listener_list
);
1099 mlxsw_core
->driver
= mlxsw_driver
;
1100 mlxsw_core
->bus
= mlxsw_bus
;
1101 mlxsw_core
->bus_priv
= bus_priv
;
1102 mlxsw_core
->bus_info
= mlxsw_bus_info
;
1104 mlxsw_core
->pcpu_stats
=
1105 netdev_alloc_pcpu_stats(struct mlxsw_core_pcpu_stats
);
1106 if (!mlxsw_core
->pcpu_stats
) {
1108 goto err_alloc_stats
;
1111 err
= mlxsw_bus
->init(bus_priv
, mlxsw_core
, mlxsw_driver
->profile
,
1116 if (MLXSW_CORE_RES_VALID(mlxsw_core
, MAX_LAG
) &&
1117 MLXSW_CORE_RES_VALID(mlxsw_core
, MAX_LAG_MEMBERS
)) {
1118 alloc_size
= sizeof(u8
) *
1119 MLXSW_CORE_RES_GET(mlxsw_core
, MAX_LAG
) *
1120 MLXSW_CORE_RES_GET(mlxsw_core
, MAX_LAG_MEMBERS
);
1121 mlxsw_core
->lag
.mapping
= kzalloc(alloc_size
, GFP_KERNEL
);
1122 if (!mlxsw_core
->lag
.mapping
) {
1124 goto err_alloc_lag_mapping
;
1128 err
= mlxsw_emad_init(mlxsw_core
);
1132 err
= devlink_register(devlink
, mlxsw_bus_info
->dev
);
1134 goto err_devlink_register
;
1136 err
= mlxsw_hwmon_init(mlxsw_core
, mlxsw_bus_info
, &mlxsw_core
->hwmon
);
1138 goto err_hwmon_init
;
1140 err
= mlxsw_thermal_init(mlxsw_core
, mlxsw_bus_info
,
1141 &mlxsw_core
->thermal
);
1143 goto err_thermal_init
;
1145 if (mlxsw_driver
->init
) {
1146 err
= mlxsw_driver
->init(mlxsw_core
, mlxsw_bus_info
);
1148 goto err_driver_init
;
1151 err
= mlxsw_core_debugfs_init(mlxsw_core
);
1153 goto err_debugfs_init
;
1158 if (mlxsw_core
->driver
->fini
)
1159 mlxsw_core
->driver
->fini(mlxsw_core
);
1161 mlxsw_thermal_fini(mlxsw_core
->thermal
);
1164 devlink_unregister(devlink
);
1165 err_devlink_register
:
1166 mlxsw_emad_fini(mlxsw_core
);
1168 kfree(mlxsw_core
->lag
.mapping
);
1169 err_alloc_lag_mapping
:
1170 mlxsw_bus
->fini(bus_priv
);
1172 free_percpu(mlxsw_core
->pcpu_stats
);
1174 devlink_free(devlink
);
1176 mlxsw_core_driver_put(device_kind
);
1179 EXPORT_SYMBOL(mlxsw_core_bus_device_register
);
1181 void mlxsw_core_bus_device_unregister(struct mlxsw_core
*mlxsw_core
)
1183 const char *device_kind
= mlxsw_core
->bus_info
->device_kind
;
1184 struct devlink
*devlink
= priv_to_devlink(mlxsw_core
);
1186 mlxsw_core_debugfs_fini(mlxsw_core
);
1187 if (mlxsw_core
->driver
->fini
)
1188 mlxsw_core
->driver
->fini(mlxsw_core
);
1189 mlxsw_thermal_fini(mlxsw_core
->thermal
);
1190 devlink_unregister(devlink
);
1191 mlxsw_emad_fini(mlxsw_core
);
1192 kfree(mlxsw_core
->lag
.mapping
);
1193 mlxsw_core
->bus
->fini(mlxsw_core
->bus_priv
);
1194 free_percpu(mlxsw_core
->pcpu_stats
);
1195 devlink_free(devlink
);
1196 mlxsw_core_driver_put(device_kind
);
1198 EXPORT_SYMBOL(mlxsw_core_bus_device_unregister
);
1200 bool mlxsw_core_skb_transmit_busy(struct mlxsw_core
*mlxsw_core
,
1201 const struct mlxsw_tx_info
*tx_info
)
1203 return mlxsw_core
->bus
->skb_transmit_busy(mlxsw_core
->bus_priv
,
1206 EXPORT_SYMBOL(mlxsw_core_skb_transmit_busy
);
1208 int mlxsw_core_skb_transmit(struct mlxsw_core
*mlxsw_core
, struct sk_buff
*skb
,
1209 const struct mlxsw_tx_info
*tx_info
)
1211 return mlxsw_core
->bus
->skb_transmit(mlxsw_core
->bus_priv
, skb
,
1214 EXPORT_SYMBOL(mlxsw_core_skb_transmit
);
1216 static bool __is_rx_listener_equal(const struct mlxsw_rx_listener
*rxl_a
,
1217 const struct mlxsw_rx_listener
*rxl_b
)
1219 return (rxl_a
->func
== rxl_b
->func
&&
1220 rxl_a
->local_port
== rxl_b
->local_port
&&
1221 rxl_a
->trap_id
== rxl_b
->trap_id
);
1224 static struct mlxsw_rx_listener_item
*
1225 __find_rx_listener_item(struct mlxsw_core
*mlxsw_core
,
1226 const struct mlxsw_rx_listener
*rxl
,
1229 struct mlxsw_rx_listener_item
*rxl_item
;
1231 list_for_each_entry(rxl_item
, &mlxsw_core
->rx_listener_list
, list
) {
1232 if (__is_rx_listener_equal(&rxl_item
->rxl
, rxl
) &&
1233 rxl_item
->priv
== priv
)
1239 int mlxsw_core_rx_listener_register(struct mlxsw_core
*mlxsw_core
,
1240 const struct mlxsw_rx_listener
*rxl
,
1243 struct mlxsw_rx_listener_item
*rxl_item
;
1245 rxl_item
= __find_rx_listener_item(mlxsw_core
, rxl
, priv
);
1248 rxl_item
= kmalloc(sizeof(*rxl_item
), GFP_KERNEL
);
1251 rxl_item
->rxl
= *rxl
;
1252 rxl_item
->priv
= priv
;
1254 list_add_rcu(&rxl_item
->list
, &mlxsw_core
->rx_listener_list
);
1257 EXPORT_SYMBOL(mlxsw_core_rx_listener_register
);
1259 void mlxsw_core_rx_listener_unregister(struct mlxsw_core
*mlxsw_core
,
1260 const struct mlxsw_rx_listener
*rxl
,
1263 struct mlxsw_rx_listener_item
*rxl_item
;
1265 rxl_item
= __find_rx_listener_item(mlxsw_core
, rxl
, priv
);
1268 list_del_rcu(&rxl_item
->list
);
1272 EXPORT_SYMBOL(mlxsw_core_rx_listener_unregister
);
1274 static void mlxsw_core_event_listener_func(struct sk_buff
*skb
, u8 local_port
,
1277 struct mlxsw_event_listener_item
*event_listener_item
= priv
;
1278 struct mlxsw_reg_info reg
;
1280 char *op_tlv
= mlxsw_emad_op_tlv(skb
);
1281 char *reg_tlv
= mlxsw_emad_reg_tlv(skb
);
1283 reg
.id
= mlxsw_emad_op_tlv_register_id_get(op_tlv
);
1284 reg
.len
= (mlxsw_emad_reg_tlv_len_get(reg_tlv
) - 1) * sizeof(u32
);
1285 payload
= mlxsw_emad_reg_payload(op_tlv
);
1286 event_listener_item
->el
.func(®
, payload
, event_listener_item
->priv
);
1290 static bool __is_event_listener_equal(const struct mlxsw_event_listener
*el_a
,
1291 const struct mlxsw_event_listener
*el_b
)
1293 return (el_a
->func
== el_b
->func
&&
1294 el_a
->trap_id
== el_b
->trap_id
);
1297 static struct mlxsw_event_listener_item
*
1298 __find_event_listener_item(struct mlxsw_core
*mlxsw_core
,
1299 const struct mlxsw_event_listener
*el
,
1302 struct mlxsw_event_listener_item
*el_item
;
1304 list_for_each_entry(el_item
, &mlxsw_core
->event_listener_list
, list
) {
1305 if (__is_event_listener_equal(&el_item
->el
, el
) &&
1306 el_item
->priv
== priv
)
1312 int mlxsw_core_event_listener_register(struct mlxsw_core
*mlxsw_core
,
1313 const struct mlxsw_event_listener
*el
,
1317 struct mlxsw_event_listener_item
*el_item
;
1318 const struct mlxsw_rx_listener rxl
= {
1319 .func
= mlxsw_core_event_listener_func
,
1320 .local_port
= MLXSW_PORT_DONT_CARE
,
1321 .trap_id
= el
->trap_id
,
1324 el_item
= __find_event_listener_item(mlxsw_core
, el
, priv
);
1327 el_item
= kmalloc(sizeof(*el_item
), GFP_KERNEL
);
1331 el_item
->priv
= priv
;
1333 err
= mlxsw_core_rx_listener_register(mlxsw_core
, &rxl
, el_item
);
1335 goto err_rx_listener_register
;
1337 /* No reason to save item if we did not manage to register an RX
1340 list_add_rcu(&el_item
->list
, &mlxsw_core
->event_listener_list
);
1344 err_rx_listener_register
:
1348 EXPORT_SYMBOL(mlxsw_core_event_listener_register
);
1350 void mlxsw_core_event_listener_unregister(struct mlxsw_core
*mlxsw_core
,
1351 const struct mlxsw_event_listener
*el
,
1354 struct mlxsw_event_listener_item
*el_item
;
1355 const struct mlxsw_rx_listener rxl
= {
1356 .func
= mlxsw_core_event_listener_func
,
1357 .local_port
= MLXSW_PORT_DONT_CARE
,
1358 .trap_id
= el
->trap_id
,
1361 el_item
= __find_event_listener_item(mlxsw_core
, el
, priv
);
1364 mlxsw_core_rx_listener_unregister(mlxsw_core
, &rxl
, el_item
);
1365 list_del(&el_item
->list
);
1368 EXPORT_SYMBOL(mlxsw_core_event_listener_unregister
);
1370 static int mlxsw_core_listener_register(struct mlxsw_core
*mlxsw_core
,
1371 const struct mlxsw_listener
*listener
,
1374 if (listener
->is_event
)
1375 return mlxsw_core_event_listener_register(mlxsw_core
,
1376 &listener
->u
.event_listener
,
1379 return mlxsw_core_rx_listener_register(mlxsw_core
,
1380 &listener
->u
.rx_listener
,
1384 static void mlxsw_core_listener_unregister(struct mlxsw_core
*mlxsw_core
,
1385 const struct mlxsw_listener
*listener
,
1388 if (listener
->is_event
)
1389 mlxsw_core_event_listener_unregister(mlxsw_core
,
1390 &listener
->u
.event_listener
,
1393 mlxsw_core_rx_listener_unregister(mlxsw_core
,
1394 &listener
->u
.rx_listener
,
1398 int mlxsw_core_trap_register(struct mlxsw_core
*mlxsw_core
,
1399 const struct mlxsw_listener
*listener
, void *priv
)
1401 char hpkt_pl
[MLXSW_REG_HPKT_LEN
];
1404 err
= mlxsw_core_listener_register(mlxsw_core
, listener
, priv
);
1408 mlxsw_reg_hpkt_pack(hpkt_pl
, listener
->action
, listener
->trap_id
,
1409 listener
->trap_group
, listener
->is_ctrl
);
1410 err
= mlxsw_reg_write(mlxsw_core
, MLXSW_REG(hpkt
), hpkt_pl
);
1417 mlxsw_core_listener_unregister(mlxsw_core
, listener
, priv
);
1420 EXPORT_SYMBOL(mlxsw_core_trap_register
);
1422 void mlxsw_core_trap_unregister(struct mlxsw_core
*mlxsw_core
,
1423 const struct mlxsw_listener
*listener
,
1426 char hpkt_pl
[MLXSW_REG_HPKT_LEN
];
1428 if (!listener
->is_event
) {
1429 mlxsw_reg_hpkt_pack(hpkt_pl
, listener
->unreg_action
,
1430 listener
->trap_id
, listener
->trap_group
,
1432 mlxsw_reg_write(mlxsw_core
, MLXSW_REG(hpkt
), hpkt_pl
);
1435 mlxsw_core_listener_unregister(mlxsw_core
, listener
, priv
);
1437 EXPORT_SYMBOL(mlxsw_core_trap_unregister
);
1439 static u64
mlxsw_core_tid_get(struct mlxsw_core
*mlxsw_core
)
1441 return atomic64_inc_return(&mlxsw_core
->emad
.tid
);
1444 static int mlxsw_core_reg_access_emad(struct mlxsw_core
*mlxsw_core
,
1445 const struct mlxsw_reg_info
*reg
,
1447 enum mlxsw_core_reg_access_type type
,
1448 struct list_head
*bulk_list
,
1449 mlxsw_reg_trans_cb_t
*cb
,
1450 unsigned long cb_priv
)
1452 u64 tid
= mlxsw_core_tid_get(mlxsw_core
);
1453 struct mlxsw_reg_trans
*trans
;
1456 trans
= kzalloc(sizeof(*trans
), GFP_KERNEL
);
1460 err
= mlxsw_emad_reg_access(mlxsw_core
, reg
, payload
, type
, trans
,
1461 bulk_list
, cb
, cb_priv
, tid
);
1469 int mlxsw_reg_trans_query(struct mlxsw_core
*mlxsw_core
,
1470 const struct mlxsw_reg_info
*reg
, char *payload
,
1471 struct list_head
*bulk_list
,
1472 mlxsw_reg_trans_cb_t
*cb
, unsigned long cb_priv
)
1474 return mlxsw_core_reg_access_emad(mlxsw_core
, reg
, payload
,
1475 MLXSW_CORE_REG_ACCESS_TYPE_QUERY
,
1476 bulk_list
, cb
, cb_priv
);
1478 EXPORT_SYMBOL(mlxsw_reg_trans_query
);
1480 int mlxsw_reg_trans_write(struct mlxsw_core
*mlxsw_core
,
1481 const struct mlxsw_reg_info
*reg
, char *payload
,
1482 struct list_head
*bulk_list
,
1483 mlxsw_reg_trans_cb_t
*cb
, unsigned long cb_priv
)
1485 return mlxsw_core_reg_access_emad(mlxsw_core
, reg
, payload
,
1486 MLXSW_CORE_REG_ACCESS_TYPE_WRITE
,
1487 bulk_list
, cb
, cb_priv
);
1489 EXPORT_SYMBOL(mlxsw_reg_trans_write
);
1491 static int mlxsw_reg_trans_wait(struct mlxsw_reg_trans
*trans
)
1493 struct mlxsw_core
*mlxsw_core
= trans
->core
;
1496 wait_for_completion(&trans
->completion
);
1497 cancel_delayed_work_sync(&trans
->timeout_dw
);
1501 dev_warn(mlxsw_core
->bus_info
->dev
, "EMAD retries (%d/%d) (tid=%llx)\n",
1502 trans
->retries
, MLXSW_EMAD_MAX_RETRY
, trans
->tid
);
1504 dev_err(mlxsw_core
->bus_info
->dev
, "EMAD reg access failed (tid=%llx,reg_id=%x(%s),type=%s,status=%x(%s))\n",
1505 trans
->tid
, trans
->reg
->id
,
1506 mlxsw_reg_id_str(trans
->reg
->id
),
1507 mlxsw_core_reg_access_type_str(trans
->type
),
1509 mlxsw_emad_op_tlv_status_str(trans
->emad_status
));
1511 list_del(&trans
->bulk_list
);
1512 kfree_rcu(trans
, rcu
);
1516 int mlxsw_reg_trans_bulk_wait(struct list_head
*bulk_list
)
1518 struct mlxsw_reg_trans
*trans
;
1519 struct mlxsw_reg_trans
*tmp
;
1523 list_for_each_entry_safe(trans
, tmp
, bulk_list
, bulk_list
) {
1524 err
= mlxsw_reg_trans_wait(trans
);
1525 if (err
&& sum_err
== 0)
1526 sum_err
= err
; /* first error to be returned */
1530 EXPORT_SYMBOL(mlxsw_reg_trans_bulk_wait
);
1532 static int mlxsw_core_reg_access_cmd(struct mlxsw_core
*mlxsw_core
,
1533 const struct mlxsw_reg_info
*reg
,
1535 enum mlxsw_core_reg_access_type type
)
1537 enum mlxsw_emad_op_tlv_status status
;
1539 char *in_mbox
, *out_mbox
, *tmp
;
1541 dev_dbg(mlxsw_core
->bus_info
->dev
, "Reg cmd access (reg_id=%x(%s),type=%s)\n",
1542 reg
->id
, mlxsw_reg_id_str(reg
->id
),
1543 mlxsw_core_reg_access_type_str(type
));
1545 in_mbox
= mlxsw_cmd_mbox_alloc();
1549 out_mbox
= mlxsw_cmd_mbox_alloc();
1555 mlxsw_emad_pack_op_tlv(in_mbox
, reg
, type
,
1556 mlxsw_core_tid_get(mlxsw_core
));
1557 tmp
= in_mbox
+ MLXSW_EMAD_OP_TLV_LEN
* sizeof(u32
);
1558 mlxsw_emad_pack_reg_tlv(tmp
, reg
, payload
);
1562 err
= mlxsw_cmd_access_reg(mlxsw_core
, in_mbox
, out_mbox
);
1564 err
= mlxsw_emad_process_status(out_mbox
, &status
);
1566 if (err
== -EAGAIN
&& n_retry
++ < MLXSW_EMAD_MAX_RETRY
)
1568 dev_err(mlxsw_core
->bus_info
->dev
, "Reg cmd access status failed (status=%x(%s))\n",
1569 status
, mlxsw_emad_op_tlv_status_str(status
));
1574 memcpy(payload
, mlxsw_emad_reg_payload(out_mbox
),
1577 mlxsw_cmd_mbox_free(out_mbox
);
1579 mlxsw_cmd_mbox_free(in_mbox
);
1581 dev_err(mlxsw_core
->bus_info
->dev
, "Reg cmd access failed (reg_id=%x(%s),type=%s)\n",
1582 reg
->id
, mlxsw_reg_id_str(reg
->id
),
1583 mlxsw_core_reg_access_type_str(type
));
1587 static void mlxsw_core_reg_access_cb(struct mlxsw_core
*mlxsw_core
,
1588 char *payload
, size_t payload_len
,
1589 unsigned long cb_priv
)
1591 char *orig_payload
= (char *) cb_priv
;
1593 memcpy(orig_payload
, payload
, payload_len
);
1596 static int mlxsw_core_reg_access(struct mlxsw_core
*mlxsw_core
,
1597 const struct mlxsw_reg_info
*reg
,
1599 enum mlxsw_core_reg_access_type type
)
1601 LIST_HEAD(bulk_list
);
1604 /* During initialization EMAD interface is not available to us,
1605 * so we default to command interface. We switch to EMAD interface
1606 * after setting the appropriate traps.
1608 if (!mlxsw_core
->emad
.use_emad
)
1609 return mlxsw_core_reg_access_cmd(mlxsw_core
, reg
,
1612 err
= mlxsw_core_reg_access_emad(mlxsw_core
, reg
,
1613 payload
, type
, &bulk_list
,
1614 mlxsw_core_reg_access_cb
,
1615 (unsigned long) payload
);
1618 return mlxsw_reg_trans_bulk_wait(&bulk_list
);
1621 int mlxsw_reg_query(struct mlxsw_core
*mlxsw_core
,
1622 const struct mlxsw_reg_info
*reg
, char *payload
)
1624 return mlxsw_core_reg_access(mlxsw_core
, reg
, payload
,
1625 MLXSW_CORE_REG_ACCESS_TYPE_QUERY
);
1627 EXPORT_SYMBOL(mlxsw_reg_query
);
1629 int mlxsw_reg_write(struct mlxsw_core
*mlxsw_core
,
1630 const struct mlxsw_reg_info
*reg
, char *payload
)
1632 return mlxsw_core_reg_access(mlxsw_core
, reg
, payload
,
1633 MLXSW_CORE_REG_ACCESS_TYPE_WRITE
);
1635 EXPORT_SYMBOL(mlxsw_reg_write
);
1637 void mlxsw_core_skb_receive(struct mlxsw_core
*mlxsw_core
, struct sk_buff
*skb
,
1638 struct mlxsw_rx_info
*rx_info
)
1640 struct mlxsw_rx_listener_item
*rxl_item
;
1641 const struct mlxsw_rx_listener
*rxl
;
1642 struct mlxsw_core_pcpu_stats
*pcpu_stats
;
1646 if (rx_info
->is_lag
) {
1647 dev_dbg_ratelimited(mlxsw_core
->bus_info
->dev
, "%s: lag_id = %d, lag_port_index = 0x%x\n",
1648 __func__
, rx_info
->u
.lag_id
,
1650 /* Upper layer does not care if the skb came from LAG or not,
1651 * so just get the local_port for the lag port and push it up.
1653 local_port
= mlxsw_core_lag_mapping_get(mlxsw_core
,
1655 rx_info
->lag_port_index
);
1657 local_port
= rx_info
->u
.sys_port
;
1660 dev_dbg_ratelimited(mlxsw_core
->bus_info
->dev
, "%s: local_port = %d, trap_id = 0x%x\n",
1661 __func__
, local_port
, rx_info
->trap_id
);
1663 if ((rx_info
->trap_id
>= MLXSW_TRAP_ID_MAX
) ||
1664 (local_port
>= MLXSW_PORT_MAX_PORTS
))
1668 list_for_each_entry_rcu(rxl_item
, &mlxsw_core
->rx_listener_list
, list
) {
1669 rxl
= &rxl_item
->rxl
;
1670 if ((rxl
->local_port
== MLXSW_PORT_DONT_CARE
||
1671 rxl
->local_port
== local_port
) &&
1672 rxl
->trap_id
== rx_info
->trap_id
) {
1681 pcpu_stats
= this_cpu_ptr(mlxsw_core
->pcpu_stats
);
1682 u64_stats_update_begin(&pcpu_stats
->syncp
);
1683 pcpu_stats
->port_rx_packets
[local_port
]++;
1684 pcpu_stats
->port_rx_bytes
[local_port
] += skb
->len
;
1685 pcpu_stats
->trap_rx_packets
[rx_info
->trap_id
]++;
1686 pcpu_stats
->trap_rx_bytes
[rx_info
->trap_id
] += skb
->len
;
1687 u64_stats_update_end(&pcpu_stats
->syncp
);
1689 rxl
->func(skb
, local_port
, rxl_item
->priv
);
1693 if (rx_info
->trap_id
>= MLXSW_TRAP_ID_MAX
)
1694 this_cpu_inc(mlxsw_core
->pcpu_stats
->trap_rx_invalid
);
1696 this_cpu_inc(mlxsw_core
->pcpu_stats
->trap_rx_dropped
[rx_info
->trap_id
]);
1697 if (local_port
>= MLXSW_PORT_MAX_PORTS
)
1698 this_cpu_inc(mlxsw_core
->pcpu_stats
->port_rx_invalid
);
1700 this_cpu_inc(mlxsw_core
->pcpu_stats
->port_rx_dropped
[local_port
]);
1703 EXPORT_SYMBOL(mlxsw_core_skb_receive
);
1705 static int mlxsw_core_lag_mapping_index(struct mlxsw_core
*mlxsw_core
,
1706 u16 lag_id
, u8 port_index
)
1708 return MLXSW_CORE_RES_GET(mlxsw_core
, MAX_LAG_MEMBERS
) * lag_id
+
1712 void mlxsw_core_lag_mapping_set(struct mlxsw_core
*mlxsw_core
,
1713 u16 lag_id
, u8 port_index
, u8 local_port
)
1715 int index
= mlxsw_core_lag_mapping_index(mlxsw_core
,
1716 lag_id
, port_index
);
1718 mlxsw_core
->lag
.mapping
[index
] = local_port
;
1720 EXPORT_SYMBOL(mlxsw_core_lag_mapping_set
);
1722 u8
mlxsw_core_lag_mapping_get(struct mlxsw_core
*mlxsw_core
,
1723 u16 lag_id
, u8 port_index
)
1725 int index
= mlxsw_core_lag_mapping_index(mlxsw_core
,
1726 lag_id
, port_index
);
1728 return mlxsw_core
->lag
.mapping
[index
];
1730 EXPORT_SYMBOL(mlxsw_core_lag_mapping_get
);
1732 void mlxsw_core_lag_mapping_clear(struct mlxsw_core
*mlxsw_core
,
1733 u16 lag_id
, u8 local_port
)
1737 for (i
= 0; i
< MLXSW_CORE_RES_GET(mlxsw_core
, MAX_LAG_MEMBERS
); i
++) {
1738 int index
= mlxsw_core_lag_mapping_index(mlxsw_core
,
1741 if (mlxsw_core
->lag
.mapping
[index
] == local_port
)
1742 mlxsw_core
->lag
.mapping
[index
] = 0;
1745 EXPORT_SYMBOL(mlxsw_core_lag_mapping_clear
);
1747 bool mlxsw_core_res_valid(struct mlxsw_core
*mlxsw_core
,
1748 enum mlxsw_res_id res_id
)
1750 return mlxsw_res_valid(&mlxsw_core
->res
, res_id
);
1752 EXPORT_SYMBOL(mlxsw_core_res_valid
);
1754 u64
mlxsw_core_res_get(struct mlxsw_core
*mlxsw_core
,
1755 enum mlxsw_res_id res_id
)
1757 return mlxsw_res_get(&mlxsw_core
->res
, res_id
);
1759 EXPORT_SYMBOL(mlxsw_core_res_get
);
1761 int mlxsw_core_port_init(struct mlxsw_core
*mlxsw_core
, u8 local_port
)
1763 struct devlink
*devlink
= priv_to_devlink(mlxsw_core
);
1764 struct mlxsw_core_port
*mlxsw_core_port
=
1765 &mlxsw_core
->ports
[local_port
];
1766 struct devlink_port
*devlink_port
= &mlxsw_core_port
->devlink_port
;
1769 mlxsw_core_port
->local_port
= local_port
;
1770 err
= devlink_port_register(devlink
, devlink_port
, local_port
);
1772 memset(mlxsw_core_port
, 0, sizeof(*mlxsw_core_port
));
1775 EXPORT_SYMBOL(mlxsw_core_port_init
);
1777 void mlxsw_core_port_fini(struct mlxsw_core
*mlxsw_core
, u8 local_port
)
1779 struct mlxsw_core_port
*mlxsw_core_port
=
1780 &mlxsw_core
->ports
[local_port
];
1781 struct devlink_port
*devlink_port
= &mlxsw_core_port
->devlink_port
;
1783 devlink_port_unregister(devlink_port
);
1784 memset(mlxsw_core_port
, 0, sizeof(*mlxsw_core_port
));
1786 EXPORT_SYMBOL(mlxsw_core_port_fini
);
1788 void mlxsw_core_port_eth_set(struct mlxsw_core
*mlxsw_core
, u8 local_port
,
1789 void *port_driver_priv
, struct net_device
*dev
,
1790 bool split
, u32 split_group
)
1792 struct mlxsw_core_port
*mlxsw_core_port
=
1793 &mlxsw_core
->ports
[local_port
];
1794 struct devlink_port
*devlink_port
= &mlxsw_core_port
->devlink_port
;
1796 mlxsw_core_port
->port_driver_priv
= port_driver_priv
;
1798 devlink_port_split_set(devlink_port
, split_group
);
1799 devlink_port_type_eth_set(devlink_port
, dev
);
1801 EXPORT_SYMBOL(mlxsw_core_port_eth_set
);
1803 void mlxsw_core_port_ib_set(struct mlxsw_core
*mlxsw_core
, u8 local_port
,
1804 void *port_driver_priv
)
1806 struct mlxsw_core_port
*mlxsw_core_port
=
1807 &mlxsw_core
->ports
[local_port
];
1808 struct devlink_port
*devlink_port
= &mlxsw_core_port
->devlink_port
;
1810 mlxsw_core_port
->port_driver_priv
= port_driver_priv
;
1811 devlink_port_type_ib_set(devlink_port
, NULL
);
1813 EXPORT_SYMBOL(mlxsw_core_port_ib_set
);
1815 void mlxsw_core_port_clear(struct mlxsw_core
*mlxsw_core
, u8 local_port
,
1816 void *port_driver_priv
)
1818 struct mlxsw_core_port
*mlxsw_core_port
=
1819 &mlxsw_core
->ports
[local_port
];
1820 struct devlink_port
*devlink_port
= &mlxsw_core_port
->devlink_port
;
1822 mlxsw_core_port
->port_driver_priv
= port_driver_priv
;
1823 devlink_port_type_clear(devlink_port
);
1825 EXPORT_SYMBOL(mlxsw_core_port_clear
);
1827 enum devlink_port_type
mlxsw_core_port_type_get(struct mlxsw_core
*mlxsw_core
,
1830 struct mlxsw_core_port
*mlxsw_core_port
=
1831 &mlxsw_core
->ports
[local_port
];
1832 struct devlink_port
*devlink_port
= &mlxsw_core_port
->devlink_port
;
1834 return devlink_port
->type
;
1836 EXPORT_SYMBOL(mlxsw_core_port_type_get
);
1838 static void mlxsw_core_buf_dump_dbg(struct mlxsw_core
*mlxsw_core
,
1839 const char *buf
, size_t size
)
1841 __be32
*m
= (__be32
*) buf
;
1843 int count
= size
/ sizeof(__be32
);
1845 for (i
= count
- 1; i
>= 0; i
--)
1850 for (i
= 0; i
< count
; i
+= 4)
1851 dev_dbg(mlxsw_core
->bus_info
->dev
, "%04x - %08x %08x %08x %08x\n",
1852 i
* 4, be32_to_cpu(m
[i
]), be32_to_cpu(m
[i
+ 1]),
1853 be32_to_cpu(m
[i
+ 2]), be32_to_cpu(m
[i
+ 3]));
1856 int mlxsw_cmd_exec(struct mlxsw_core
*mlxsw_core
, u16 opcode
, u8 opcode_mod
,
1857 u32 in_mod
, bool out_mbox_direct
,
1858 char *in_mbox
, size_t in_mbox_size
,
1859 char *out_mbox
, size_t out_mbox_size
)
1864 BUG_ON(in_mbox_size
% sizeof(u32
) || out_mbox_size
% sizeof(u32
));
1865 if (!mlxsw_core
->bus
->cmd_exec
)
1868 dev_dbg(mlxsw_core
->bus_info
->dev
, "Cmd exec (opcode=%x(%s),opcode_mod=%x,in_mod=%x)\n",
1869 opcode
, mlxsw_cmd_opcode_str(opcode
), opcode_mod
, in_mod
);
1871 dev_dbg(mlxsw_core
->bus_info
->dev
, "Input mailbox:\n");
1872 mlxsw_core_buf_dump_dbg(mlxsw_core
, in_mbox
, in_mbox_size
);
1875 err
= mlxsw_core
->bus
->cmd_exec(mlxsw_core
->bus_priv
, opcode
,
1876 opcode_mod
, in_mod
, out_mbox_direct
,
1877 in_mbox
, in_mbox_size
,
1878 out_mbox
, out_mbox_size
, &status
);
1880 if (err
== -EIO
&& status
!= MLXSW_CMD_STATUS_OK
) {
1881 dev_err(mlxsw_core
->bus_info
->dev
, "Cmd exec failed (opcode=%x(%s),opcode_mod=%x,in_mod=%x,status=%x(%s))\n",
1882 opcode
, mlxsw_cmd_opcode_str(opcode
), opcode_mod
,
1883 in_mod
, status
, mlxsw_cmd_status_str(status
));
1884 } else if (err
== -ETIMEDOUT
) {
1885 dev_err(mlxsw_core
->bus_info
->dev
, "Cmd exec timed-out (opcode=%x(%s),opcode_mod=%x,in_mod=%x)\n",
1886 opcode
, mlxsw_cmd_opcode_str(opcode
), opcode_mod
,
1890 if (!err
&& out_mbox
) {
1891 dev_dbg(mlxsw_core
->bus_info
->dev
, "Output mailbox:\n");
1892 mlxsw_core_buf_dump_dbg(mlxsw_core
, out_mbox
, out_mbox_size
);
1896 EXPORT_SYMBOL(mlxsw_cmd_exec
);
1898 int mlxsw_core_schedule_dw(struct delayed_work
*dwork
, unsigned long delay
)
1900 return queue_delayed_work(mlxsw_wq
, dwork
, delay
);
1902 EXPORT_SYMBOL(mlxsw_core_schedule_dw
);
1904 int mlxsw_core_schedule_odw(struct delayed_work
*dwork
, unsigned long delay
)
1906 return queue_delayed_work(mlxsw_owq
, dwork
, delay
);
1908 EXPORT_SYMBOL(mlxsw_core_schedule_odw
);
1910 void mlxsw_core_flush_owq(void)
1912 flush_workqueue(mlxsw_owq
);
1914 EXPORT_SYMBOL(mlxsw_core_flush_owq
);
1916 static int __init
mlxsw_core_module_init(void)
1920 mlxsw_wq
= alloc_workqueue(mlxsw_core_driver_name
, WQ_MEM_RECLAIM
, 0);
1923 mlxsw_owq
= alloc_ordered_workqueue("%s_ordered", WQ_MEM_RECLAIM
,
1924 mlxsw_core_driver_name
);
1927 goto err_alloc_ordered_workqueue
;
1929 mlxsw_core_dbg_root
= debugfs_create_dir(mlxsw_core_driver_name
, NULL
);
1930 if (!mlxsw_core_dbg_root
) {
1932 goto err_debugfs_create_dir
;
1936 err_debugfs_create_dir
:
1937 destroy_workqueue(mlxsw_owq
);
1938 err_alloc_ordered_workqueue
:
1939 destroy_workqueue(mlxsw_wq
);
1943 static void __exit
mlxsw_core_module_exit(void)
1945 debugfs_remove_recursive(mlxsw_core_dbg_root
);
1946 destroy_workqueue(mlxsw_owq
);
1947 destroy_workqueue(mlxsw_wq
);
1950 module_init(mlxsw_core_module_init
);
1951 module_exit(mlxsw_core_module_exit
);
1953 MODULE_LICENSE("Dual BSD/GPL");
1954 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
1955 MODULE_DESCRIPTION("Mellanox switch device core driver");