2 * drivers/net/ethernet/mellanox/mlxsw/core.c
3 * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
5 * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
6 * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
37 #include <linux/kernel.h>
38 #include <linux/module.h>
39 #include <linux/device.h>
40 #include <linux/export.h>
41 #include <linux/err.h>
42 #include <linux/if_link.h>
43 #include <linux/netdevice.h>
44 #include <linux/completion.h>
45 #include <linux/skbuff.h>
46 #include <linux/etherdevice.h>
47 #include <linux/types.h>
48 #include <linux/string.h>
49 #include <linux/gfp.h>
50 #include <linux/random.h>
51 #include <linux/jiffies.h>
52 #include <linux/mutex.h>
53 #include <linux/rcupdate.h>
54 #include <linux/slab.h>
55 #include <linux/workqueue.h>
56 #include <asm/byteorder.h>
57 #include <net/devlink.h>
58 #include <trace/events/devlink.h>
67 #include "resources.h"
69 static LIST_HEAD(mlxsw_core_driver_list
);
70 static DEFINE_SPINLOCK(mlxsw_core_driver_list_lock
);
72 static const char mlxsw_core_driver_name
[] = "mlxsw_core";
74 static struct workqueue_struct
*mlxsw_wq
;
75 static struct workqueue_struct
*mlxsw_owq
;
77 struct mlxsw_core_port
{
78 struct devlink_port devlink_port
;
79 void *port_driver_priv
;
83 void *mlxsw_core_port_driver_priv(struct mlxsw_core_port
*mlxsw_core_port
)
85 return mlxsw_core_port
->port_driver_priv
;
87 EXPORT_SYMBOL(mlxsw_core_port_driver_priv
);
89 static bool mlxsw_core_port_check(struct mlxsw_core_port
*mlxsw_core_port
)
91 return mlxsw_core_port
->port_driver_priv
!= NULL
;
95 struct mlxsw_driver
*driver
;
96 const struct mlxsw_bus
*bus
;
98 const struct mlxsw_bus_info
*bus_info
;
99 struct workqueue_struct
*emad_wq
;
100 struct list_head rx_listener_list
;
101 struct list_head event_listener_list
;
104 struct list_head trans_list
;
105 spinlock_t trans_list_lock
; /* protects trans_list writes */
109 u8
*mapping
; /* lag_id+port_index to local_port mapping */
111 struct mlxsw_res res
;
112 struct mlxsw_hwmon
*hwmon
;
113 struct mlxsw_thermal
*thermal
;
114 struct mlxsw_core_port
*ports
;
115 unsigned int max_ports
;
117 unsigned long driver_priv
[0];
118 /* driver_priv has to be always the last item */
121 #define MLXSW_PORT_MAX_PORTS_DEFAULT 0x40
123 static int mlxsw_ports_init(struct mlxsw_core
*mlxsw_core
)
125 /* Switch ports are numbered from 1 to queried value */
126 if (MLXSW_CORE_RES_VALID(mlxsw_core
, MAX_SYSTEM_PORT
))
127 mlxsw_core
->max_ports
= MLXSW_CORE_RES_GET(mlxsw_core
,
128 MAX_SYSTEM_PORT
) + 1;
130 mlxsw_core
->max_ports
= MLXSW_PORT_MAX_PORTS_DEFAULT
+ 1;
132 mlxsw_core
->ports
= kcalloc(mlxsw_core
->max_ports
,
133 sizeof(struct mlxsw_core_port
), GFP_KERNEL
);
134 if (!mlxsw_core
->ports
)
140 static void mlxsw_ports_fini(struct mlxsw_core
*mlxsw_core
)
142 kfree(mlxsw_core
->ports
);
145 unsigned int mlxsw_core_max_ports(const struct mlxsw_core
*mlxsw_core
)
147 return mlxsw_core
->max_ports
;
149 EXPORT_SYMBOL(mlxsw_core_max_ports
);
151 void *mlxsw_core_driver_priv(struct mlxsw_core
*mlxsw_core
)
153 return mlxsw_core
->driver_priv
;
155 EXPORT_SYMBOL(mlxsw_core_driver_priv
);
157 struct mlxsw_rx_listener_item
{
158 struct list_head list
;
159 struct mlxsw_rx_listener rxl
;
163 struct mlxsw_event_listener_item
{
164 struct list_head list
;
165 struct mlxsw_event_listener el
;
174 * Destination MAC in EMAD's Ethernet header.
175 * Must be set to 01:02:c9:00:00:01
177 MLXSW_ITEM_BUF(emad
, eth_hdr
, dmac
, 0x00, 6);
180 * Source MAC in EMAD's Ethernet header.
181 * Must be set to 00:02:c9:01:02:03
183 MLXSW_ITEM_BUF(emad
, eth_hdr
, smac
, 0x06, 6);
185 /* emad_eth_hdr_ethertype
186 * Ethertype in EMAD's Ethernet header.
187 * Must be set to 0x8932
189 MLXSW_ITEM32(emad
, eth_hdr
, ethertype
, 0x0C, 16, 16);
191 /* emad_eth_hdr_mlx_proto
193 * Must be set to 0x0.
195 MLXSW_ITEM32(emad
, eth_hdr
, mlx_proto
, 0x0C, 8, 8);
198 * Mellanox protocol version.
199 * Must be set to 0x0.
201 MLXSW_ITEM32(emad
, eth_hdr
, ver
, 0x0C, 4, 4);
205 * Must be set to 0x1 (operation TLV).
207 MLXSW_ITEM32(emad
, op_tlv
, type
, 0x00, 27, 5);
210 * Length of the operation TLV in u32.
211 * Must be set to 0x4.
213 MLXSW_ITEM32(emad
, op_tlv
, len
, 0x00, 16, 11);
216 * Direct route bit. Setting to 1 indicates the EMAD is a direct route
217 * EMAD. DR TLV must follow.
219 * Note: Currently not supported and must not be set.
221 MLXSW_ITEM32(emad
, op_tlv
, dr
, 0x00, 15, 1);
223 /* emad_op_tlv_status
224 * Returned status in case of EMAD response. Must be set to 0 in case
227 * 0x1 - device is busy. Requester should retry
228 * 0x2 - Mellanox protocol version not supported
230 * 0x4 - register not supported
231 * 0x5 - operation class not supported
232 * 0x6 - EMAD method not supported
233 * 0x7 - bad parameter (e.g. port out of range)
234 * 0x8 - resource not available
235 * 0x9 - message receipt acknowledgment. Requester should retry
236 * 0x70 - internal error
238 MLXSW_ITEM32(emad
, op_tlv
, status
, 0x00, 8, 7);
240 /* emad_op_tlv_register_id
241 * Register ID of register within register TLV.
243 MLXSW_ITEM32(emad
, op_tlv
, register_id
, 0x04, 16, 16);
246 * Response bit. Setting to 1 indicates Response, otherwise request.
248 MLXSW_ITEM32(emad
, op_tlv
, r
, 0x04, 15, 1);
250 /* emad_op_tlv_method
254 * 0x3 - send (currently not supported)
257 MLXSW_ITEM32(emad
, op_tlv
, method
, 0x04, 8, 7);
260 * EMAD operation class. Must be set to 0x1 (REG_ACCESS).
262 MLXSW_ITEM32(emad
, op_tlv
, class, 0x04, 0, 8);
265 * EMAD transaction ID. Used for pairing request and response EMADs.
267 MLXSW_ITEM64(emad
, op_tlv
, tid
, 0x08, 0, 64);
271 * Must be set to 0x3 (register TLV).
273 MLXSW_ITEM32(emad
, reg_tlv
, type
, 0x00, 27, 5);
276 * Length of the operation TLV in u32.
278 MLXSW_ITEM32(emad
, reg_tlv
, len
, 0x00, 16, 11);
282 * Must be set to 0x0 (end TLV).
284 MLXSW_ITEM32(emad
, end_tlv
, type
, 0x00, 27, 5);
287 * Length of the end TLV in u32.
290 MLXSW_ITEM32(emad
, end_tlv
, len
, 0x00, 16, 11);
292 enum mlxsw_core_reg_access_type
{
293 MLXSW_CORE_REG_ACCESS_TYPE_QUERY
,
294 MLXSW_CORE_REG_ACCESS_TYPE_WRITE
,
297 static inline const char *
298 mlxsw_core_reg_access_type_str(enum mlxsw_core_reg_access_type type
)
301 case MLXSW_CORE_REG_ACCESS_TYPE_QUERY
:
303 case MLXSW_CORE_REG_ACCESS_TYPE_WRITE
:
309 static void mlxsw_emad_pack_end_tlv(char *end_tlv
)
311 mlxsw_emad_end_tlv_type_set(end_tlv
, MLXSW_EMAD_TLV_TYPE_END
);
312 mlxsw_emad_end_tlv_len_set(end_tlv
, MLXSW_EMAD_END_TLV_LEN
);
315 static void mlxsw_emad_pack_reg_tlv(char *reg_tlv
,
316 const struct mlxsw_reg_info
*reg
,
319 mlxsw_emad_reg_tlv_type_set(reg_tlv
, MLXSW_EMAD_TLV_TYPE_REG
);
320 mlxsw_emad_reg_tlv_len_set(reg_tlv
, reg
->len
/ sizeof(u32
) + 1);
321 memcpy(reg_tlv
+ sizeof(u32
), payload
, reg
->len
);
324 static void mlxsw_emad_pack_op_tlv(char *op_tlv
,
325 const struct mlxsw_reg_info
*reg
,
326 enum mlxsw_core_reg_access_type type
,
329 mlxsw_emad_op_tlv_type_set(op_tlv
, MLXSW_EMAD_TLV_TYPE_OP
);
330 mlxsw_emad_op_tlv_len_set(op_tlv
, MLXSW_EMAD_OP_TLV_LEN
);
331 mlxsw_emad_op_tlv_dr_set(op_tlv
, 0);
332 mlxsw_emad_op_tlv_status_set(op_tlv
, 0);
333 mlxsw_emad_op_tlv_register_id_set(op_tlv
, reg
->id
);
334 mlxsw_emad_op_tlv_r_set(op_tlv
, MLXSW_EMAD_OP_TLV_REQUEST
);
335 if (type
== MLXSW_CORE_REG_ACCESS_TYPE_QUERY
)
336 mlxsw_emad_op_tlv_method_set(op_tlv
,
337 MLXSW_EMAD_OP_TLV_METHOD_QUERY
);
339 mlxsw_emad_op_tlv_method_set(op_tlv
,
340 MLXSW_EMAD_OP_TLV_METHOD_WRITE
);
341 mlxsw_emad_op_tlv_class_set(op_tlv
,
342 MLXSW_EMAD_OP_TLV_CLASS_REG_ACCESS
);
343 mlxsw_emad_op_tlv_tid_set(op_tlv
, tid
);
346 static int mlxsw_emad_construct_eth_hdr(struct sk_buff
*skb
)
348 char *eth_hdr
= skb_push(skb
, MLXSW_EMAD_ETH_HDR_LEN
);
350 mlxsw_emad_eth_hdr_dmac_memcpy_to(eth_hdr
, MLXSW_EMAD_EH_DMAC
);
351 mlxsw_emad_eth_hdr_smac_memcpy_to(eth_hdr
, MLXSW_EMAD_EH_SMAC
);
352 mlxsw_emad_eth_hdr_ethertype_set(eth_hdr
, MLXSW_EMAD_EH_ETHERTYPE
);
353 mlxsw_emad_eth_hdr_mlx_proto_set(eth_hdr
, MLXSW_EMAD_EH_MLX_PROTO
);
354 mlxsw_emad_eth_hdr_ver_set(eth_hdr
, MLXSW_EMAD_EH_PROTO_VERSION
);
356 skb_reset_mac_header(skb
);
361 static void mlxsw_emad_construct(struct sk_buff
*skb
,
362 const struct mlxsw_reg_info
*reg
,
364 enum mlxsw_core_reg_access_type type
,
369 buf
= skb_push(skb
, MLXSW_EMAD_END_TLV_LEN
* sizeof(u32
));
370 mlxsw_emad_pack_end_tlv(buf
);
372 buf
= skb_push(skb
, reg
->len
+ sizeof(u32
));
373 mlxsw_emad_pack_reg_tlv(buf
, reg
, payload
);
375 buf
= skb_push(skb
, MLXSW_EMAD_OP_TLV_LEN
* sizeof(u32
));
376 mlxsw_emad_pack_op_tlv(buf
, reg
, type
, tid
);
378 mlxsw_emad_construct_eth_hdr(skb
);
381 static char *mlxsw_emad_op_tlv(const struct sk_buff
*skb
)
383 return ((char *) (skb
->data
+ MLXSW_EMAD_ETH_HDR_LEN
));
386 static char *mlxsw_emad_reg_tlv(const struct sk_buff
*skb
)
388 return ((char *) (skb
->data
+ MLXSW_EMAD_ETH_HDR_LEN
+
389 MLXSW_EMAD_OP_TLV_LEN
* sizeof(u32
)));
392 static char *mlxsw_emad_reg_payload(const char *op_tlv
)
394 return ((char *) (op_tlv
+ (MLXSW_EMAD_OP_TLV_LEN
+ 1) * sizeof(u32
)));
397 static u64
mlxsw_emad_get_tid(const struct sk_buff
*skb
)
401 op_tlv
= mlxsw_emad_op_tlv(skb
);
402 return mlxsw_emad_op_tlv_tid_get(op_tlv
);
405 static bool mlxsw_emad_is_resp(const struct sk_buff
*skb
)
409 op_tlv
= mlxsw_emad_op_tlv(skb
);
410 return (mlxsw_emad_op_tlv_r_get(op_tlv
) == MLXSW_EMAD_OP_TLV_RESPONSE
);
413 static int mlxsw_emad_process_status(char *op_tlv
,
414 enum mlxsw_emad_op_tlv_status
*p_status
)
416 *p_status
= mlxsw_emad_op_tlv_status_get(op_tlv
);
419 case MLXSW_EMAD_OP_TLV_STATUS_SUCCESS
:
421 case MLXSW_EMAD_OP_TLV_STATUS_BUSY
:
422 case MLXSW_EMAD_OP_TLV_STATUS_MESSAGE_RECEIPT_ACK
:
424 case MLXSW_EMAD_OP_TLV_STATUS_VERSION_NOT_SUPPORTED
:
425 case MLXSW_EMAD_OP_TLV_STATUS_UNKNOWN_TLV
:
426 case MLXSW_EMAD_OP_TLV_STATUS_REGISTER_NOT_SUPPORTED
:
427 case MLXSW_EMAD_OP_TLV_STATUS_CLASS_NOT_SUPPORTED
:
428 case MLXSW_EMAD_OP_TLV_STATUS_METHOD_NOT_SUPPORTED
:
429 case MLXSW_EMAD_OP_TLV_STATUS_BAD_PARAMETER
:
430 case MLXSW_EMAD_OP_TLV_STATUS_RESOURCE_NOT_AVAILABLE
:
431 case MLXSW_EMAD_OP_TLV_STATUS_INTERNAL_ERROR
:
438 mlxsw_emad_process_status_skb(struct sk_buff
*skb
,
439 enum mlxsw_emad_op_tlv_status
*p_status
)
441 return mlxsw_emad_process_status(mlxsw_emad_op_tlv(skb
), p_status
);
444 struct mlxsw_reg_trans
{
445 struct list_head list
;
446 struct list_head bulk_list
;
447 struct mlxsw_core
*core
;
448 struct sk_buff
*tx_skb
;
449 struct mlxsw_tx_info tx_info
;
450 struct delayed_work timeout_dw
;
451 unsigned int retries
;
453 struct completion completion
;
455 mlxsw_reg_trans_cb_t
*cb
;
456 unsigned long cb_priv
;
457 const struct mlxsw_reg_info
*reg
;
458 enum mlxsw_core_reg_access_type type
;
460 enum mlxsw_emad_op_tlv_status emad_status
;
464 #define MLXSW_EMAD_TIMEOUT_MS 200
466 static void mlxsw_emad_trans_timeout_schedule(struct mlxsw_reg_trans
*trans
)
468 unsigned long timeout
= msecs_to_jiffies(MLXSW_EMAD_TIMEOUT_MS
);
470 queue_delayed_work(trans
->core
->emad_wq
, &trans
->timeout_dw
, timeout
);
473 static int mlxsw_emad_transmit(struct mlxsw_core
*mlxsw_core
,
474 struct mlxsw_reg_trans
*trans
)
479 skb
= skb_copy(trans
->tx_skb
, GFP_KERNEL
);
483 trace_devlink_hwmsg(priv_to_devlink(mlxsw_core
), false, 0,
484 skb
->data
+ mlxsw_core
->driver
->txhdr_len
,
485 skb
->len
- mlxsw_core
->driver
->txhdr_len
);
487 atomic_set(&trans
->active
, 1);
488 err
= mlxsw_core_skb_transmit(mlxsw_core
, skb
, &trans
->tx_info
);
493 mlxsw_emad_trans_timeout_schedule(trans
);
497 static void mlxsw_emad_trans_finish(struct mlxsw_reg_trans
*trans
, int err
)
499 struct mlxsw_core
*mlxsw_core
= trans
->core
;
501 dev_kfree_skb(trans
->tx_skb
);
502 spin_lock_bh(&mlxsw_core
->emad
.trans_list_lock
);
503 list_del_rcu(&trans
->list
);
504 spin_unlock_bh(&mlxsw_core
->emad
.trans_list_lock
);
506 complete(&trans
->completion
);
509 static void mlxsw_emad_transmit_retry(struct mlxsw_core
*mlxsw_core
,
510 struct mlxsw_reg_trans
*trans
)
514 if (trans
->retries
< MLXSW_EMAD_MAX_RETRY
) {
516 err
= mlxsw_emad_transmit(trans
->core
, trans
);
522 mlxsw_emad_trans_finish(trans
, err
);
525 static void mlxsw_emad_trans_timeout_work(struct work_struct
*work
)
527 struct mlxsw_reg_trans
*trans
= container_of(work
,
528 struct mlxsw_reg_trans
,
531 if (!atomic_dec_and_test(&trans
->active
))
534 mlxsw_emad_transmit_retry(trans
->core
, trans
);
537 static void mlxsw_emad_process_response(struct mlxsw_core
*mlxsw_core
,
538 struct mlxsw_reg_trans
*trans
,
543 if (!atomic_dec_and_test(&trans
->active
))
546 err
= mlxsw_emad_process_status_skb(skb
, &trans
->emad_status
);
547 if (err
== -EAGAIN
) {
548 mlxsw_emad_transmit_retry(mlxsw_core
, trans
);
551 char *op_tlv
= mlxsw_emad_op_tlv(skb
);
554 trans
->cb(mlxsw_core
,
555 mlxsw_emad_reg_payload(op_tlv
),
556 trans
->reg
->len
, trans
->cb_priv
);
558 mlxsw_emad_trans_finish(trans
, err
);
562 /* called with rcu read lock held */
563 static void mlxsw_emad_rx_listener_func(struct sk_buff
*skb
, u8 local_port
,
566 struct mlxsw_core
*mlxsw_core
= priv
;
567 struct mlxsw_reg_trans
*trans
;
569 trace_devlink_hwmsg(priv_to_devlink(mlxsw_core
), true, 0,
570 skb
->data
, skb
->len
);
572 if (!mlxsw_emad_is_resp(skb
))
575 list_for_each_entry_rcu(trans
, &mlxsw_core
->emad
.trans_list
, list
) {
576 if (mlxsw_emad_get_tid(skb
) == trans
->tid
) {
577 mlxsw_emad_process_response(mlxsw_core
, trans
, skb
);
586 static const struct mlxsw_listener mlxsw_emad_rx_listener
=
587 MLXSW_RXL(mlxsw_emad_rx_listener_func
, ETHEMAD
, TRAP_TO_CPU
, false,
590 static int mlxsw_emad_init(struct mlxsw_core
*mlxsw_core
)
592 struct workqueue_struct
*emad_wq
;
596 if (!(mlxsw_core
->bus
->features
& MLXSW_BUS_F_TXRX
))
599 emad_wq
= alloc_workqueue("mlxsw_core_emad", WQ_MEM_RECLAIM
, 0);
602 mlxsw_core
->emad_wq
= emad_wq
;
604 /* Set the upper 32 bits of the transaction ID field to a random
605 * number. This allows us to discard EMADs addressed to other
608 get_random_bytes(&tid
, 4);
610 atomic64_set(&mlxsw_core
->emad
.tid
, tid
);
612 INIT_LIST_HEAD(&mlxsw_core
->emad
.trans_list
);
613 spin_lock_init(&mlxsw_core
->emad
.trans_list_lock
);
615 err
= mlxsw_core_trap_register(mlxsw_core
, &mlxsw_emad_rx_listener
,
620 err
= mlxsw_core
->driver
->basic_trap_groups_set(mlxsw_core
);
622 goto err_emad_trap_set
;
623 mlxsw_core
->emad
.use_emad
= true;
628 mlxsw_core_trap_unregister(mlxsw_core
, &mlxsw_emad_rx_listener
,
630 destroy_workqueue(mlxsw_core
->emad_wq
);
634 static void mlxsw_emad_fini(struct mlxsw_core
*mlxsw_core
)
637 if (!(mlxsw_core
->bus
->features
& MLXSW_BUS_F_TXRX
))
640 mlxsw_core
->emad
.use_emad
= false;
641 mlxsw_core_trap_unregister(mlxsw_core
, &mlxsw_emad_rx_listener
,
643 destroy_workqueue(mlxsw_core
->emad_wq
);
646 static struct sk_buff
*mlxsw_emad_alloc(const struct mlxsw_core
*mlxsw_core
,
652 emad_len
= (reg_len
+ sizeof(u32
) + MLXSW_EMAD_ETH_HDR_LEN
+
653 (MLXSW_EMAD_OP_TLV_LEN
+ MLXSW_EMAD_END_TLV_LEN
) *
654 sizeof(u32
) + mlxsw_core
->driver
->txhdr_len
);
655 if (emad_len
> MLXSW_EMAD_MAX_FRAME_LEN
)
658 skb
= netdev_alloc_skb(NULL
, emad_len
);
661 memset(skb
->data
, 0, emad_len
);
662 skb_reserve(skb
, emad_len
);
667 static int mlxsw_emad_reg_access(struct mlxsw_core
*mlxsw_core
,
668 const struct mlxsw_reg_info
*reg
,
670 enum mlxsw_core_reg_access_type type
,
671 struct mlxsw_reg_trans
*trans
,
672 struct list_head
*bulk_list
,
673 mlxsw_reg_trans_cb_t
*cb
,
674 unsigned long cb_priv
, u64 tid
)
679 dev_dbg(mlxsw_core
->bus_info
->dev
, "EMAD reg access (tid=%llx,reg_id=%x(%s),type=%s)\n",
680 tid
, reg
->id
, mlxsw_reg_id_str(reg
->id
),
681 mlxsw_core_reg_access_type_str(type
));
683 skb
= mlxsw_emad_alloc(mlxsw_core
, reg
->len
);
687 list_add_tail(&trans
->bulk_list
, bulk_list
);
688 trans
->core
= mlxsw_core
;
690 trans
->tx_info
.local_port
= MLXSW_PORT_CPU_PORT
;
691 trans
->tx_info
.is_emad
= true;
692 INIT_DELAYED_WORK(&trans
->timeout_dw
, mlxsw_emad_trans_timeout_work
);
694 init_completion(&trans
->completion
);
696 trans
->cb_priv
= cb_priv
;
700 mlxsw_emad_construct(skb
, reg
, payload
, type
, trans
->tid
);
701 mlxsw_core
->driver
->txhdr_construct(skb
, &trans
->tx_info
);
703 spin_lock_bh(&mlxsw_core
->emad
.trans_list_lock
);
704 list_add_tail_rcu(&trans
->list
, &mlxsw_core
->emad
.trans_list
);
705 spin_unlock_bh(&mlxsw_core
->emad
.trans_list_lock
);
706 err
= mlxsw_emad_transmit(mlxsw_core
, trans
);
712 spin_lock_bh(&mlxsw_core
->emad
.trans_list_lock
);
713 list_del_rcu(&trans
->list
);
714 spin_unlock_bh(&mlxsw_core
->emad
.trans_list_lock
);
715 list_del(&trans
->bulk_list
);
716 dev_kfree_skb(trans
->tx_skb
);
724 int mlxsw_core_driver_register(struct mlxsw_driver
*mlxsw_driver
)
726 spin_lock(&mlxsw_core_driver_list_lock
);
727 list_add_tail(&mlxsw_driver
->list
, &mlxsw_core_driver_list
);
728 spin_unlock(&mlxsw_core_driver_list_lock
);
731 EXPORT_SYMBOL(mlxsw_core_driver_register
);
733 void mlxsw_core_driver_unregister(struct mlxsw_driver
*mlxsw_driver
)
735 spin_lock(&mlxsw_core_driver_list_lock
);
736 list_del(&mlxsw_driver
->list
);
737 spin_unlock(&mlxsw_core_driver_list_lock
);
739 EXPORT_SYMBOL(mlxsw_core_driver_unregister
);
741 static struct mlxsw_driver
*__driver_find(const char *kind
)
743 struct mlxsw_driver
*mlxsw_driver
;
745 list_for_each_entry(mlxsw_driver
, &mlxsw_core_driver_list
, list
) {
746 if (strcmp(mlxsw_driver
->kind
, kind
) == 0)
752 static struct mlxsw_driver
*mlxsw_core_driver_get(const char *kind
)
754 struct mlxsw_driver
*mlxsw_driver
;
756 spin_lock(&mlxsw_core_driver_list_lock
);
757 mlxsw_driver
= __driver_find(kind
);
758 spin_unlock(&mlxsw_core_driver_list_lock
);
762 static void mlxsw_core_driver_put(const char *kind
)
764 struct mlxsw_driver
*mlxsw_driver
;
766 spin_lock(&mlxsw_core_driver_list_lock
);
767 mlxsw_driver
= __driver_find(kind
);
768 spin_unlock(&mlxsw_core_driver_list_lock
);
771 static int mlxsw_devlink_port_split(struct devlink
*devlink
,
772 unsigned int port_index
,
775 struct mlxsw_core
*mlxsw_core
= devlink_priv(devlink
);
777 if (port_index
>= mlxsw_core
->max_ports
)
779 if (!mlxsw_core
->driver
->port_split
)
781 return mlxsw_core
->driver
->port_split(mlxsw_core
, port_index
, count
);
784 static int mlxsw_devlink_port_unsplit(struct devlink
*devlink
,
785 unsigned int port_index
)
787 struct mlxsw_core
*mlxsw_core
= devlink_priv(devlink
);
789 if (port_index
>= mlxsw_core
->max_ports
)
791 if (!mlxsw_core
->driver
->port_unsplit
)
793 return mlxsw_core
->driver
->port_unsplit(mlxsw_core
, port_index
);
797 mlxsw_devlink_sb_pool_get(struct devlink
*devlink
,
798 unsigned int sb_index
, u16 pool_index
,
799 struct devlink_sb_pool_info
*pool_info
)
801 struct mlxsw_core
*mlxsw_core
= devlink_priv(devlink
);
802 struct mlxsw_driver
*mlxsw_driver
= mlxsw_core
->driver
;
804 if (!mlxsw_driver
->sb_pool_get
)
806 return mlxsw_driver
->sb_pool_get(mlxsw_core
, sb_index
,
807 pool_index
, pool_info
);
811 mlxsw_devlink_sb_pool_set(struct devlink
*devlink
,
812 unsigned int sb_index
, u16 pool_index
, u32 size
,
813 enum devlink_sb_threshold_type threshold_type
)
815 struct mlxsw_core
*mlxsw_core
= devlink_priv(devlink
);
816 struct mlxsw_driver
*mlxsw_driver
= mlxsw_core
->driver
;
818 if (!mlxsw_driver
->sb_pool_set
)
820 return mlxsw_driver
->sb_pool_set(mlxsw_core
, sb_index
,
821 pool_index
, size
, threshold_type
);
824 static void *__dl_port(struct devlink_port
*devlink_port
)
826 return container_of(devlink_port
, struct mlxsw_core_port
, devlink_port
);
829 static int mlxsw_devlink_port_type_set(struct devlink_port
*devlink_port
,
830 enum devlink_port_type port_type
)
832 struct mlxsw_core
*mlxsw_core
= devlink_priv(devlink_port
->devlink
);
833 struct mlxsw_driver
*mlxsw_driver
= mlxsw_core
->driver
;
834 struct mlxsw_core_port
*mlxsw_core_port
= __dl_port(devlink_port
);
836 if (!mlxsw_driver
->port_type_set
)
839 return mlxsw_driver
->port_type_set(mlxsw_core
,
840 mlxsw_core_port
->local_port
,
844 static int mlxsw_devlink_sb_port_pool_get(struct devlink_port
*devlink_port
,
845 unsigned int sb_index
, u16 pool_index
,
848 struct mlxsw_core
*mlxsw_core
= devlink_priv(devlink_port
->devlink
);
849 struct mlxsw_driver
*mlxsw_driver
= mlxsw_core
->driver
;
850 struct mlxsw_core_port
*mlxsw_core_port
= __dl_port(devlink_port
);
852 if (!mlxsw_driver
->sb_port_pool_get
||
853 !mlxsw_core_port_check(mlxsw_core_port
))
855 return mlxsw_driver
->sb_port_pool_get(mlxsw_core_port
, sb_index
,
856 pool_index
, p_threshold
);
859 static int mlxsw_devlink_sb_port_pool_set(struct devlink_port
*devlink_port
,
860 unsigned int sb_index
, u16 pool_index
,
863 struct mlxsw_core
*mlxsw_core
= devlink_priv(devlink_port
->devlink
);
864 struct mlxsw_driver
*mlxsw_driver
= mlxsw_core
->driver
;
865 struct mlxsw_core_port
*mlxsw_core_port
= __dl_port(devlink_port
);
867 if (!mlxsw_driver
->sb_port_pool_set
||
868 !mlxsw_core_port_check(mlxsw_core_port
))
870 return mlxsw_driver
->sb_port_pool_set(mlxsw_core_port
, sb_index
,
871 pool_index
, threshold
);
875 mlxsw_devlink_sb_tc_pool_bind_get(struct devlink_port
*devlink_port
,
876 unsigned int sb_index
, u16 tc_index
,
877 enum devlink_sb_pool_type pool_type
,
878 u16
*p_pool_index
, u32
*p_threshold
)
880 struct mlxsw_core
*mlxsw_core
= devlink_priv(devlink_port
->devlink
);
881 struct mlxsw_driver
*mlxsw_driver
= mlxsw_core
->driver
;
882 struct mlxsw_core_port
*mlxsw_core_port
= __dl_port(devlink_port
);
884 if (!mlxsw_driver
->sb_tc_pool_bind_get
||
885 !mlxsw_core_port_check(mlxsw_core_port
))
887 return mlxsw_driver
->sb_tc_pool_bind_get(mlxsw_core_port
, sb_index
,
889 p_pool_index
, p_threshold
);
893 mlxsw_devlink_sb_tc_pool_bind_set(struct devlink_port
*devlink_port
,
894 unsigned int sb_index
, u16 tc_index
,
895 enum devlink_sb_pool_type pool_type
,
896 u16 pool_index
, u32 threshold
)
898 struct mlxsw_core
*mlxsw_core
= devlink_priv(devlink_port
->devlink
);
899 struct mlxsw_driver
*mlxsw_driver
= mlxsw_core
->driver
;
900 struct mlxsw_core_port
*mlxsw_core_port
= __dl_port(devlink_port
);
902 if (!mlxsw_driver
->sb_tc_pool_bind_set
||
903 !mlxsw_core_port_check(mlxsw_core_port
))
905 return mlxsw_driver
->sb_tc_pool_bind_set(mlxsw_core_port
, sb_index
,
907 pool_index
, threshold
);
910 static int mlxsw_devlink_sb_occ_snapshot(struct devlink
*devlink
,
911 unsigned int sb_index
)
913 struct mlxsw_core
*mlxsw_core
= devlink_priv(devlink
);
914 struct mlxsw_driver
*mlxsw_driver
= mlxsw_core
->driver
;
916 if (!mlxsw_driver
->sb_occ_snapshot
)
918 return mlxsw_driver
->sb_occ_snapshot(mlxsw_core
, sb_index
);
921 static int mlxsw_devlink_sb_occ_max_clear(struct devlink
*devlink
,
922 unsigned int sb_index
)
924 struct mlxsw_core
*mlxsw_core
= devlink_priv(devlink
);
925 struct mlxsw_driver
*mlxsw_driver
= mlxsw_core
->driver
;
927 if (!mlxsw_driver
->sb_occ_max_clear
)
929 return mlxsw_driver
->sb_occ_max_clear(mlxsw_core
, sb_index
);
933 mlxsw_devlink_sb_occ_port_pool_get(struct devlink_port
*devlink_port
,
934 unsigned int sb_index
, u16 pool_index
,
935 u32
*p_cur
, u32
*p_max
)
937 struct mlxsw_core
*mlxsw_core
= devlink_priv(devlink_port
->devlink
);
938 struct mlxsw_driver
*mlxsw_driver
= mlxsw_core
->driver
;
939 struct mlxsw_core_port
*mlxsw_core_port
= __dl_port(devlink_port
);
941 if (!mlxsw_driver
->sb_occ_port_pool_get
||
942 !mlxsw_core_port_check(mlxsw_core_port
))
944 return mlxsw_driver
->sb_occ_port_pool_get(mlxsw_core_port
, sb_index
,
945 pool_index
, p_cur
, p_max
);
949 mlxsw_devlink_sb_occ_tc_port_bind_get(struct devlink_port
*devlink_port
,
950 unsigned int sb_index
, u16 tc_index
,
951 enum devlink_sb_pool_type pool_type
,
952 u32
*p_cur
, u32
*p_max
)
954 struct mlxsw_core
*mlxsw_core
= devlink_priv(devlink_port
->devlink
);
955 struct mlxsw_driver
*mlxsw_driver
= mlxsw_core
->driver
;
956 struct mlxsw_core_port
*mlxsw_core_port
= __dl_port(devlink_port
);
958 if (!mlxsw_driver
->sb_occ_tc_port_bind_get
||
959 !mlxsw_core_port_check(mlxsw_core_port
))
961 return mlxsw_driver
->sb_occ_tc_port_bind_get(mlxsw_core_port
,
963 pool_type
, p_cur
, p_max
);
966 static int mlxsw_devlink_core_bus_device_reload(struct devlink
*devlink
)
968 struct mlxsw_core
*mlxsw_core
= devlink_priv(devlink
);
969 const struct mlxsw_bus
*mlxsw_bus
= mlxsw_core
->bus
;
972 if (!mlxsw_bus
->reset
)
975 mlxsw_core_bus_device_unregister(mlxsw_core
, true);
976 mlxsw_bus
->reset(mlxsw_core
->bus_priv
);
977 err
= mlxsw_core_bus_device_register(mlxsw_core
->bus_info
,
979 mlxsw_core
->bus_priv
, true,
982 mlxsw_core
->reload_fail
= true;
986 static const struct devlink_ops mlxsw_devlink_ops
= {
987 .reload
= mlxsw_devlink_core_bus_device_reload
,
988 .port_type_set
= mlxsw_devlink_port_type_set
,
989 .port_split
= mlxsw_devlink_port_split
,
990 .port_unsplit
= mlxsw_devlink_port_unsplit
,
991 .sb_pool_get
= mlxsw_devlink_sb_pool_get
,
992 .sb_pool_set
= mlxsw_devlink_sb_pool_set
,
993 .sb_port_pool_get
= mlxsw_devlink_sb_port_pool_get
,
994 .sb_port_pool_set
= mlxsw_devlink_sb_port_pool_set
,
995 .sb_tc_pool_bind_get
= mlxsw_devlink_sb_tc_pool_bind_get
,
996 .sb_tc_pool_bind_set
= mlxsw_devlink_sb_tc_pool_bind_set
,
997 .sb_occ_snapshot
= mlxsw_devlink_sb_occ_snapshot
,
998 .sb_occ_max_clear
= mlxsw_devlink_sb_occ_max_clear
,
999 .sb_occ_port_pool_get
= mlxsw_devlink_sb_occ_port_pool_get
,
1000 .sb_occ_tc_port_bind_get
= mlxsw_devlink_sb_occ_tc_port_bind_get
,
1003 int mlxsw_core_bus_device_register(const struct mlxsw_bus_info
*mlxsw_bus_info
,
1004 const struct mlxsw_bus
*mlxsw_bus
,
1005 void *bus_priv
, bool reload
,
1006 struct devlink
*devlink
)
1008 const char *device_kind
= mlxsw_bus_info
->device_kind
;
1009 struct mlxsw_core
*mlxsw_core
;
1010 struct mlxsw_driver
*mlxsw_driver
;
1014 mlxsw_driver
= mlxsw_core_driver_get(device_kind
);
1019 alloc_size
= sizeof(*mlxsw_core
) + mlxsw_driver
->priv_size
;
1020 devlink
= devlink_alloc(&mlxsw_devlink_ops
, alloc_size
);
1023 goto err_devlink_alloc
;
1027 mlxsw_core
= devlink_priv(devlink
);
1028 INIT_LIST_HEAD(&mlxsw_core
->rx_listener_list
);
1029 INIT_LIST_HEAD(&mlxsw_core
->event_listener_list
);
1030 mlxsw_core
->driver
= mlxsw_driver
;
1031 mlxsw_core
->bus
= mlxsw_bus
;
1032 mlxsw_core
->bus_priv
= bus_priv
;
1033 mlxsw_core
->bus_info
= mlxsw_bus_info
;
1035 err
= mlxsw_bus
->init(bus_priv
, mlxsw_core
, mlxsw_driver
->profile
,
1040 if (mlxsw_driver
->resources_register
&& !reload
) {
1041 err
= mlxsw_driver
->resources_register(mlxsw_core
);
1043 goto err_register_resources
;
1046 err
= mlxsw_ports_init(mlxsw_core
);
1048 goto err_ports_init
;
1050 if (MLXSW_CORE_RES_VALID(mlxsw_core
, MAX_LAG
) &&
1051 MLXSW_CORE_RES_VALID(mlxsw_core
, MAX_LAG_MEMBERS
)) {
1052 alloc_size
= sizeof(u8
) *
1053 MLXSW_CORE_RES_GET(mlxsw_core
, MAX_LAG
) *
1054 MLXSW_CORE_RES_GET(mlxsw_core
, MAX_LAG_MEMBERS
);
1055 mlxsw_core
->lag
.mapping
= kzalloc(alloc_size
, GFP_KERNEL
);
1056 if (!mlxsw_core
->lag
.mapping
) {
1058 goto err_alloc_lag_mapping
;
1062 err
= mlxsw_emad_init(mlxsw_core
);
1067 err
= devlink_register(devlink
, mlxsw_bus_info
->dev
);
1069 goto err_devlink_register
;
1072 err
= mlxsw_hwmon_init(mlxsw_core
, mlxsw_bus_info
, &mlxsw_core
->hwmon
);
1074 goto err_hwmon_init
;
1076 err
= mlxsw_thermal_init(mlxsw_core
, mlxsw_bus_info
,
1077 &mlxsw_core
->thermal
);
1079 goto err_thermal_init
;
1081 if (mlxsw_driver
->init
) {
1082 err
= mlxsw_driver
->init(mlxsw_core
, mlxsw_bus_info
);
1084 goto err_driver_init
;
1090 mlxsw_thermal_fini(mlxsw_core
->thermal
);
1094 devlink_unregister(devlink
);
1095 err_devlink_register
:
1096 mlxsw_emad_fini(mlxsw_core
);
1098 kfree(mlxsw_core
->lag
.mapping
);
1099 err_alloc_lag_mapping
:
1100 mlxsw_ports_fini(mlxsw_core
);
1102 mlxsw_bus
->fini(bus_priv
);
1105 devlink_resources_unregister(devlink
, NULL
);
1106 err_register_resources
:
1108 devlink_free(devlink
);
1110 mlxsw_core_driver_put(device_kind
);
1113 EXPORT_SYMBOL(mlxsw_core_bus_device_register
);
1115 void mlxsw_core_bus_device_unregister(struct mlxsw_core
*mlxsw_core
,
1118 const char *device_kind
= mlxsw_core
->bus_info
->device_kind
;
1119 struct devlink
*devlink
= priv_to_devlink(mlxsw_core
);
1121 if (mlxsw_core
->reload_fail
)
1124 if (mlxsw_core
->driver
->fini
)
1125 mlxsw_core
->driver
->fini(mlxsw_core
);
1126 mlxsw_thermal_fini(mlxsw_core
->thermal
);
1128 devlink_unregister(devlink
);
1129 mlxsw_emad_fini(mlxsw_core
);
1130 kfree(mlxsw_core
->lag
.mapping
);
1131 mlxsw_ports_fini(mlxsw_core
);
1133 devlink_resources_unregister(devlink
, NULL
);
1134 mlxsw_core
->bus
->fini(mlxsw_core
->bus_priv
);
1138 devlink_free(devlink
);
1139 mlxsw_core_driver_put(device_kind
);
1141 EXPORT_SYMBOL(mlxsw_core_bus_device_unregister
);
1143 bool mlxsw_core_skb_transmit_busy(struct mlxsw_core
*mlxsw_core
,
1144 const struct mlxsw_tx_info
*tx_info
)
1146 return mlxsw_core
->bus
->skb_transmit_busy(mlxsw_core
->bus_priv
,
1149 EXPORT_SYMBOL(mlxsw_core_skb_transmit_busy
);
1151 int mlxsw_core_skb_transmit(struct mlxsw_core
*mlxsw_core
, struct sk_buff
*skb
,
1152 const struct mlxsw_tx_info
*tx_info
)
1154 return mlxsw_core
->bus
->skb_transmit(mlxsw_core
->bus_priv
, skb
,
1157 EXPORT_SYMBOL(mlxsw_core_skb_transmit
);
1159 static bool __is_rx_listener_equal(const struct mlxsw_rx_listener
*rxl_a
,
1160 const struct mlxsw_rx_listener
*rxl_b
)
1162 return (rxl_a
->func
== rxl_b
->func
&&
1163 rxl_a
->local_port
== rxl_b
->local_port
&&
1164 rxl_a
->trap_id
== rxl_b
->trap_id
);
1167 static struct mlxsw_rx_listener_item
*
1168 __find_rx_listener_item(struct mlxsw_core
*mlxsw_core
,
1169 const struct mlxsw_rx_listener
*rxl
,
1172 struct mlxsw_rx_listener_item
*rxl_item
;
1174 list_for_each_entry(rxl_item
, &mlxsw_core
->rx_listener_list
, list
) {
1175 if (__is_rx_listener_equal(&rxl_item
->rxl
, rxl
) &&
1176 rxl_item
->priv
== priv
)
1182 int mlxsw_core_rx_listener_register(struct mlxsw_core
*mlxsw_core
,
1183 const struct mlxsw_rx_listener
*rxl
,
1186 struct mlxsw_rx_listener_item
*rxl_item
;
1188 rxl_item
= __find_rx_listener_item(mlxsw_core
, rxl
, priv
);
1191 rxl_item
= kmalloc(sizeof(*rxl_item
), GFP_KERNEL
);
1194 rxl_item
->rxl
= *rxl
;
1195 rxl_item
->priv
= priv
;
1197 list_add_rcu(&rxl_item
->list
, &mlxsw_core
->rx_listener_list
);
1200 EXPORT_SYMBOL(mlxsw_core_rx_listener_register
);
1202 void mlxsw_core_rx_listener_unregister(struct mlxsw_core
*mlxsw_core
,
1203 const struct mlxsw_rx_listener
*rxl
,
1206 struct mlxsw_rx_listener_item
*rxl_item
;
1208 rxl_item
= __find_rx_listener_item(mlxsw_core
, rxl
, priv
);
1211 list_del_rcu(&rxl_item
->list
);
1215 EXPORT_SYMBOL(mlxsw_core_rx_listener_unregister
);
1217 static void mlxsw_core_event_listener_func(struct sk_buff
*skb
, u8 local_port
,
1220 struct mlxsw_event_listener_item
*event_listener_item
= priv
;
1221 struct mlxsw_reg_info reg
;
1223 char *op_tlv
= mlxsw_emad_op_tlv(skb
);
1224 char *reg_tlv
= mlxsw_emad_reg_tlv(skb
);
1226 reg
.id
= mlxsw_emad_op_tlv_register_id_get(op_tlv
);
1227 reg
.len
= (mlxsw_emad_reg_tlv_len_get(reg_tlv
) - 1) * sizeof(u32
);
1228 payload
= mlxsw_emad_reg_payload(op_tlv
);
1229 event_listener_item
->el
.func(®
, payload
, event_listener_item
->priv
);
1233 static bool __is_event_listener_equal(const struct mlxsw_event_listener
*el_a
,
1234 const struct mlxsw_event_listener
*el_b
)
1236 return (el_a
->func
== el_b
->func
&&
1237 el_a
->trap_id
== el_b
->trap_id
);
1240 static struct mlxsw_event_listener_item
*
1241 __find_event_listener_item(struct mlxsw_core
*mlxsw_core
,
1242 const struct mlxsw_event_listener
*el
,
1245 struct mlxsw_event_listener_item
*el_item
;
1247 list_for_each_entry(el_item
, &mlxsw_core
->event_listener_list
, list
) {
1248 if (__is_event_listener_equal(&el_item
->el
, el
) &&
1249 el_item
->priv
== priv
)
1255 int mlxsw_core_event_listener_register(struct mlxsw_core
*mlxsw_core
,
1256 const struct mlxsw_event_listener
*el
,
1260 struct mlxsw_event_listener_item
*el_item
;
1261 const struct mlxsw_rx_listener rxl
= {
1262 .func
= mlxsw_core_event_listener_func
,
1263 .local_port
= MLXSW_PORT_DONT_CARE
,
1264 .trap_id
= el
->trap_id
,
1267 el_item
= __find_event_listener_item(mlxsw_core
, el
, priv
);
1270 el_item
= kmalloc(sizeof(*el_item
), GFP_KERNEL
);
1274 el_item
->priv
= priv
;
1276 err
= mlxsw_core_rx_listener_register(mlxsw_core
, &rxl
, el_item
);
1278 goto err_rx_listener_register
;
1280 /* No reason to save item if we did not manage to register an RX
1283 list_add_rcu(&el_item
->list
, &mlxsw_core
->event_listener_list
);
1287 err_rx_listener_register
:
1291 EXPORT_SYMBOL(mlxsw_core_event_listener_register
);
1293 void mlxsw_core_event_listener_unregister(struct mlxsw_core
*mlxsw_core
,
1294 const struct mlxsw_event_listener
*el
,
1297 struct mlxsw_event_listener_item
*el_item
;
1298 const struct mlxsw_rx_listener rxl
= {
1299 .func
= mlxsw_core_event_listener_func
,
1300 .local_port
= MLXSW_PORT_DONT_CARE
,
1301 .trap_id
= el
->trap_id
,
1304 el_item
= __find_event_listener_item(mlxsw_core
, el
, priv
);
1307 mlxsw_core_rx_listener_unregister(mlxsw_core
, &rxl
, el_item
);
1308 list_del(&el_item
->list
);
1311 EXPORT_SYMBOL(mlxsw_core_event_listener_unregister
);
1313 static int mlxsw_core_listener_register(struct mlxsw_core
*mlxsw_core
,
1314 const struct mlxsw_listener
*listener
,
1317 if (listener
->is_event
)
1318 return mlxsw_core_event_listener_register(mlxsw_core
,
1319 &listener
->u
.event_listener
,
1322 return mlxsw_core_rx_listener_register(mlxsw_core
,
1323 &listener
->u
.rx_listener
,
1327 static void mlxsw_core_listener_unregister(struct mlxsw_core
*mlxsw_core
,
1328 const struct mlxsw_listener
*listener
,
1331 if (listener
->is_event
)
1332 mlxsw_core_event_listener_unregister(mlxsw_core
,
1333 &listener
->u
.event_listener
,
1336 mlxsw_core_rx_listener_unregister(mlxsw_core
,
1337 &listener
->u
.rx_listener
,
1341 int mlxsw_core_trap_register(struct mlxsw_core
*mlxsw_core
,
1342 const struct mlxsw_listener
*listener
, void *priv
)
1344 char hpkt_pl
[MLXSW_REG_HPKT_LEN
];
1347 err
= mlxsw_core_listener_register(mlxsw_core
, listener
, priv
);
1351 mlxsw_reg_hpkt_pack(hpkt_pl
, listener
->action
, listener
->trap_id
,
1352 listener
->trap_group
, listener
->is_ctrl
);
1353 err
= mlxsw_reg_write(mlxsw_core
, MLXSW_REG(hpkt
), hpkt_pl
);
1360 mlxsw_core_listener_unregister(mlxsw_core
, listener
, priv
);
1363 EXPORT_SYMBOL(mlxsw_core_trap_register
);
1365 void mlxsw_core_trap_unregister(struct mlxsw_core
*mlxsw_core
,
1366 const struct mlxsw_listener
*listener
,
1369 char hpkt_pl
[MLXSW_REG_HPKT_LEN
];
1371 if (!listener
->is_event
) {
1372 mlxsw_reg_hpkt_pack(hpkt_pl
, listener
->unreg_action
,
1373 listener
->trap_id
, listener
->trap_group
,
1375 mlxsw_reg_write(mlxsw_core
, MLXSW_REG(hpkt
), hpkt_pl
);
1378 mlxsw_core_listener_unregister(mlxsw_core
, listener
, priv
);
1380 EXPORT_SYMBOL(mlxsw_core_trap_unregister
);
1382 static u64
mlxsw_core_tid_get(struct mlxsw_core
*mlxsw_core
)
1384 return atomic64_inc_return(&mlxsw_core
->emad
.tid
);
1387 static int mlxsw_core_reg_access_emad(struct mlxsw_core
*mlxsw_core
,
1388 const struct mlxsw_reg_info
*reg
,
1390 enum mlxsw_core_reg_access_type type
,
1391 struct list_head
*bulk_list
,
1392 mlxsw_reg_trans_cb_t
*cb
,
1393 unsigned long cb_priv
)
1395 u64 tid
= mlxsw_core_tid_get(mlxsw_core
);
1396 struct mlxsw_reg_trans
*trans
;
1399 trans
= kzalloc(sizeof(*trans
), GFP_KERNEL
);
1403 err
= mlxsw_emad_reg_access(mlxsw_core
, reg
, payload
, type
, trans
,
1404 bulk_list
, cb
, cb_priv
, tid
);
1412 int mlxsw_reg_trans_query(struct mlxsw_core
*mlxsw_core
,
1413 const struct mlxsw_reg_info
*reg
, char *payload
,
1414 struct list_head
*bulk_list
,
1415 mlxsw_reg_trans_cb_t
*cb
, unsigned long cb_priv
)
1417 return mlxsw_core_reg_access_emad(mlxsw_core
, reg
, payload
,
1418 MLXSW_CORE_REG_ACCESS_TYPE_QUERY
,
1419 bulk_list
, cb
, cb_priv
);
1421 EXPORT_SYMBOL(mlxsw_reg_trans_query
);
1423 int mlxsw_reg_trans_write(struct mlxsw_core
*mlxsw_core
,
1424 const struct mlxsw_reg_info
*reg
, char *payload
,
1425 struct list_head
*bulk_list
,
1426 mlxsw_reg_trans_cb_t
*cb
, unsigned long cb_priv
)
1428 return mlxsw_core_reg_access_emad(mlxsw_core
, reg
, payload
,
1429 MLXSW_CORE_REG_ACCESS_TYPE_WRITE
,
1430 bulk_list
, cb
, cb_priv
);
1432 EXPORT_SYMBOL(mlxsw_reg_trans_write
);
1434 static int mlxsw_reg_trans_wait(struct mlxsw_reg_trans
*trans
)
1436 struct mlxsw_core
*mlxsw_core
= trans
->core
;
1439 wait_for_completion(&trans
->completion
);
1440 cancel_delayed_work_sync(&trans
->timeout_dw
);
1444 dev_warn(mlxsw_core
->bus_info
->dev
, "EMAD retries (%d/%d) (tid=%llx)\n",
1445 trans
->retries
, MLXSW_EMAD_MAX_RETRY
, trans
->tid
);
1447 dev_err(mlxsw_core
->bus_info
->dev
, "EMAD reg access failed (tid=%llx,reg_id=%x(%s),type=%s,status=%x(%s))\n",
1448 trans
->tid
, trans
->reg
->id
,
1449 mlxsw_reg_id_str(trans
->reg
->id
),
1450 mlxsw_core_reg_access_type_str(trans
->type
),
1452 mlxsw_emad_op_tlv_status_str(trans
->emad_status
));
1454 list_del(&trans
->bulk_list
);
1455 kfree_rcu(trans
, rcu
);
1459 int mlxsw_reg_trans_bulk_wait(struct list_head
*bulk_list
)
1461 struct mlxsw_reg_trans
*trans
;
1462 struct mlxsw_reg_trans
*tmp
;
1466 list_for_each_entry_safe(trans
, tmp
, bulk_list
, bulk_list
) {
1467 err
= mlxsw_reg_trans_wait(trans
);
1468 if (err
&& sum_err
== 0)
1469 sum_err
= err
; /* first error to be returned */
1473 EXPORT_SYMBOL(mlxsw_reg_trans_bulk_wait
);
1475 static int mlxsw_core_reg_access_cmd(struct mlxsw_core
*mlxsw_core
,
1476 const struct mlxsw_reg_info
*reg
,
1478 enum mlxsw_core_reg_access_type type
)
1480 enum mlxsw_emad_op_tlv_status status
;
1482 char *in_mbox
, *out_mbox
, *tmp
;
1484 dev_dbg(mlxsw_core
->bus_info
->dev
, "Reg cmd access (reg_id=%x(%s),type=%s)\n",
1485 reg
->id
, mlxsw_reg_id_str(reg
->id
),
1486 mlxsw_core_reg_access_type_str(type
));
1488 in_mbox
= mlxsw_cmd_mbox_alloc();
1492 out_mbox
= mlxsw_cmd_mbox_alloc();
1498 mlxsw_emad_pack_op_tlv(in_mbox
, reg
, type
,
1499 mlxsw_core_tid_get(mlxsw_core
));
1500 tmp
= in_mbox
+ MLXSW_EMAD_OP_TLV_LEN
* sizeof(u32
);
1501 mlxsw_emad_pack_reg_tlv(tmp
, reg
, payload
);
1505 err
= mlxsw_cmd_access_reg(mlxsw_core
, in_mbox
, out_mbox
);
1507 err
= mlxsw_emad_process_status(out_mbox
, &status
);
1509 if (err
== -EAGAIN
&& n_retry
++ < MLXSW_EMAD_MAX_RETRY
)
1511 dev_err(mlxsw_core
->bus_info
->dev
, "Reg cmd access status failed (status=%x(%s))\n",
1512 status
, mlxsw_emad_op_tlv_status_str(status
));
1517 memcpy(payload
, mlxsw_emad_reg_payload(out_mbox
),
1520 mlxsw_cmd_mbox_free(out_mbox
);
1522 mlxsw_cmd_mbox_free(in_mbox
);
1524 dev_err(mlxsw_core
->bus_info
->dev
, "Reg cmd access failed (reg_id=%x(%s),type=%s)\n",
1525 reg
->id
, mlxsw_reg_id_str(reg
->id
),
1526 mlxsw_core_reg_access_type_str(type
));
1530 static void mlxsw_core_reg_access_cb(struct mlxsw_core
*mlxsw_core
,
1531 char *payload
, size_t payload_len
,
1532 unsigned long cb_priv
)
1534 char *orig_payload
= (char *) cb_priv
;
1536 memcpy(orig_payload
, payload
, payload_len
);
1539 static int mlxsw_core_reg_access(struct mlxsw_core
*mlxsw_core
,
1540 const struct mlxsw_reg_info
*reg
,
1542 enum mlxsw_core_reg_access_type type
)
1544 LIST_HEAD(bulk_list
);
1547 /* During initialization EMAD interface is not available to us,
1548 * so we default to command interface. We switch to EMAD interface
1549 * after setting the appropriate traps.
1551 if (!mlxsw_core
->emad
.use_emad
)
1552 return mlxsw_core_reg_access_cmd(mlxsw_core
, reg
,
1555 err
= mlxsw_core_reg_access_emad(mlxsw_core
, reg
,
1556 payload
, type
, &bulk_list
,
1557 mlxsw_core_reg_access_cb
,
1558 (unsigned long) payload
);
1561 return mlxsw_reg_trans_bulk_wait(&bulk_list
);
1564 int mlxsw_reg_query(struct mlxsw_core
*mlxsw_core
,
1565 const struct mlxsw_reg_info
*reg
, char *payload
)
1567 return mlxsw_core_reg_access(mlxsw_core
, reg
, payload
,
1568 MLXSW_CORE_REG_ACCESS_TYPE_QUERY
);
1570 EXPORT_SYMBOL(mlxsw_reg_query
);
1572 int mlxsw_reg_write(struct mlxsw_core
*mlxsw_core
,
1573 const struct mlxsw_reg_info
*reg
, char *payload
)
1575 return mlxsw_core_reg_access(mlxsw_core
, reg
, payload
,
1576 MLXSW_CORE_REG_ACCESS_TYPE_WRITE
);
1578 EXPORT_SYMBOL(mlxsw_reg_write
);
1580 void mlxsw_core_skb_receive(struct mlxsw_core
*mlxsw_core
, struct sk_buff
*skb
,
1581 struct mlxsw_rx_info
*rx_info
)
1583 struct mlxsw_rx_listener_item
*rxl_item
;
1584 const struct mlxsw_rx_listener
*rxl
;
1588 if (rx_info
->is_lag
) {
1589 dev_dbg_ratelimited(mlxsw_core
->bus_info
->dev
, "%s: lag_id = %d, lag_port_index = 0x%x\n",
1590 __func__
, rx_info
->u
.lag_id
,
1592 /* Upper layer does not care if the skb came from LAG or not,
1593 * so just get the local_port for the lag port and push it up.
1595 local_port
= mlxsw_core_lag_mapping_get(mlxsw_core
,
1597 rx_info
->lag_port_index
);
1599 local_port
= rx_info
->u
.sys_port
;
1602 dev_dbg_ratelimited(mlxsw_core
->bus_info
->dev
, "%s: local_port = %d, trap_id = 0x%x\n",
1603 __func__
, local_port
, rx_info
->trap_id
);
1605 if ((rx_info
->trap_id
>= MLXSW_TRAP_ID_MAX
) ||
1606 (local_port
>= mlxsw_core
->max_ports
))
1610 list_for_each_entry_rcu(rxl_item
, &mlxsw_core
->rx_listener_list
, list
) {
1611 rxl
= &rxl_item
->rxl
;
1612 if ((rxl
->local_port
== MLXSW_PORT_DONT_CARE
||
1613 rxl
->local_port
== local_port
) &&
1614 rxl
->trap_id
== rx_info
->trap_id
) {
1623 rxl
->func(skb
, local_port
, rxl_item
->priv
);
1629 EXPORT_SYMBOL(mlxsw_core_skb_receive
);
1631 static int mlxsw_core_lag_mapping_index(struct mlxsw_core
*mlxsw_core
,
1632 u16 lag_id
, u8 port_index
)
1634 return MLXSW_CORE_RES_GET(mlxsw_core
, MAX_LAG_MEMBERS
) * lag_id
+
1638 void mlxsw_core_lag_mapping_set(struct mlxsw_core
*mlxsw_core
,
1639 u16 lag_id
, u8 port_index
, u8 local_port
)
1641 int index
= mlxsw_core_lag_mapping_index(mlxsw_core
,
1642 lag_id
, port_index
);
1644 mlxsw_core
->lag
.mapping
[index
] = local_port
;
1646 EXPORT_SYMBOL(mlxsw_core_lag_mapping_set
);
1648 u8
mlxsw_core_lag_mapping_get(struct mlxsw_core
*mlxsw_core
,
1649 u16 lag_id
, u8 port_index
)
1651 int index
= mlxsw_core_lag_mapping_index(mlxsw_core
,
1652 lag_id
, port_index
);
1654 return mlxsw_core
->lag
.mapping
[index
];
1656 EXPORT_SYMBOL(mlxsw_core_lag_mapping_get
);
1658 void mlxsw_core_lag_mapping_clear(struct mlxsw_core
*mlxsw_core
,
1659 u16 lag_id
, u8 local_port
)
1663 for (i
= 0; i
< MLXSW_CORE_RES_GET(mlxsw_core
, MAX_LAG_MEMBERS
); i
++) {
1664 int index
= mlxsw_core_lag_mapping_index(mlxsw_core
,
1667 if (mlxsw_core
->lag
.mapping
[index
] == local_port
)
1668 mlxsw_core
->lag
.mapping
[index
] = 0;
1671 EXPORT_SYMBOL(mlxsw_core_lag_mapping_clear
);
1673 bool mlxsw_core_res_valid(struct mlxsw_core
*mlxsw_core
,
1674 enum mlxsw_res_id res_id
)
1676 return mlxsw_res_valid(&mlxsw_core
->res
, res_id
);
1678 EXPORT_SYMBOL(mlxsw_core_res_valid
);
1680 u64
mlxsw_core_res_get(struct mlxsw_core
*mlxsw_core
,
1681 enum mlxsw_res_id res_id
)
1683 return mlxsw_res_get(&mlxsw_core
->res
, res_id
);
1685 EXPORT_SYMBOL(mlxsw_core_res_get
);
1687 int mlxsw_core_port_init(struct mlxsw_core
*mlxsw_core
, u8 local_port
)
1689 struct devlink
*devlink
= priv_to_devlink(mlxsw_core
);
1690 struct mlxsw_core_port
*mlxsw_core_port
=
1691 &mlxsw_core
->ports
[local_port
];
1692 struct devlink_port
*devlink_port
= &mlxsw_core_port
->devlink_port
;
1695 mlxsw_core_port
->local_port
= local_port
;
1696 err
= devlink_port_register(devlink
, devlink_port
, local_port
);
1698 memset(mlxsw_core_port
, 0, sizeof(*mlxsw_core_port
));
1701 EXPORT_SYMBOL(mlxsw_core_port_init
);
1703 void mlxsw_core_port_fini(struct mlxsw_core
*mlxsw_core
, u8 local_port
)
1705 struct mlxsw_core_port
*mlxsw_core_port
=
1706 &mlxsw_core
->ports
[local_port
];
1707 struct devlink_port
*devlink_port
= &mlxsw_core_port
->devlink_port
;
1709 devlink_port_unregister(devlink_port
);
1710 memset(mlxsw_core_port
, 0, sizeof(*mlxsw_core_port
));
1712 EXPORT_SYMBOL(mlxsw_core_port_fini
);
1714 void mlxsw_core_port_eth_set(struct mlxsw_core
*mlxsw_core
, u8 local_port
,
1715 void *port_driver_priv
, struct net_device
*dev
,
1716 bool split
, u32 split_group
)
1718 struct mlxsw_core_port
*mlxsw_core_port
=
1719 &mlxsw_core
->ports
[local_port
];
1720 struct devlink_port
*devlink_port
= &mlxsw_core_port
->devlink_port
;
1722 mlxsw_core_port
->port_driver_priv
= port_driver_priv
;
1724 devlink_port_split_set(devlink_port
, split_group
);
1725 devlink_port_type_eth_set(devlink_port
, dev
);
1727 EXPORT_SYMBOL(mlxsw_core_port_eth_set
);
1729 void mlxsw_core_port_ib_set(struct mlxsw_core
*mlxsw_core
, u8 local_port
,
1730 void *port_driver_priv
)
1732 struct mlxsw_core_port
*mlxsw_core_port
=
1733 &mlxsw_core
->ports
[local_port
];
1734 struct devlink_port
*devlink_port
= &mlxsw_core_port
->devlink_port
;
1736 mlxsw_core_port
->port_driver_priv
= port_driver_priv
;
1737 devlink_port_type_ib_set(devlink_port
, NULL
);
1739 EXPORT_SYMBOL(mlxsw_core_port_ib_set
);
1741 void mlxsw_core_port_clear(struct mlxsw_core
*mlxsw_core
, u8 local_port
,
1742 void *port_driver_priv
)
1744 struct mlxsw_core_port
*mlxsw_core_port
=
1745 &mlxsw_core
->ports
[local_port
];
1746 struct devlink_port
*devlink_port
= &mlxsw_core_port
->devlink_port
;
1748 mlxsw_core_port
->port_driver_priv
= port_driver_priv
;
1749 devlink_port_type_clear(devlink_port
);
1751 EXPORT_SYMBOL(mlxsw_core_port_clear
);
1753 enum devlink_port_type
mlxsw_core_port_type_get(struct mlxsw_core
*mlxsw_core
,
1756 struct mlxsw_core_port
*mlxsw_core_port
=
1757 &mlxsw_core
->ports
[local_port
];
1758 struct devlink_port
*devlink_port
= &mlxsw_core_port
->devlink_port
;
1760 return devlink_port
->type
;
1762 EXPORT_SYMBOL(mlxsw_core_port_type_get
);
1764 static void mlxsw_core_buf_dump_dbg(struct mlxsw_core
*mlxsw_core
,
1765 const char *buf
, size_t size
)
1767 __be32
*m
= (__be32
*) buf
;
1769 int count
= size
/ sizeof(__be32
);
1771 for (i
= count
- 1; i
>= 0; i
--)
1776 for (i
= 0; i
< count
; i
+= 4)
1777 dev_dbg(mlxsw_core
->bus_info
->dev
, "%04x - %08x %08x %08x %08x\n",
1778 i
* 4, be32_to_cpu(m
[i
]), be32_to_cpu(m
[i
+ 1]),
1779 be32_to_cpu(m
[i
+ 2]), be32_to_cpu(m
[i
+ 3]));
1782 int mlxsw_cmd_exec(struct mlxsw_core
*mlxsw_core
, u16 opcode
, u8 opcode_mod
,
1783 u32 in_mod
, bool out_mbox_direct
,
1784 char *in_mbox
, size_t in_mbox_size
,
1785 char *out_mbox
, size_t out_mbox_size
)
1790 BUG_ON(in_mbox_size
% sizeof(u32
) || out_mbox_size
% sizeof(u32
));
1791 if (!mlxsw_core
->bus
->cmd_exec
)
1794 dev_dbg(mlxsw_core
->bus_info
->dev
, "Cmd exec (opcode=%x(%s),opcode_mod=%x,in_mod=%x)\n",
1795 opcode
, mlxsw_cmd_opcode_str(opcode
), opcode_mod
, in_mod
);
1797 dev_dbg(mlxsw_core
->bus_info
->dev
, "Input mailbox:\n");
1798 mlxsw_core_buf_dump_dbg(mlxsw_core
, in_mbox
, in_mbox_size
);
1801 err
= mlxsw_core
->bus
->cmd_exec(mlxsw_core
->bus_priv
, opcode
,
1802 opcode_mod
, in_mod
, out_mbox_direct
,
1803 in_mbox
, in_mbox_size
,
1804 out_mbox
, out_mbox_size
, &status
);
1806 if (err
== -EIO
&& status
!= MLXSW_CMD_STATUS_OK
) {
1807 dev_err(mlxsw_core
->bus_info
->dev
, "Cmd exec failed (opcode=%x(%s),opcode_mod=%x,in_mod=%x,status=%x(%s))\n",
1808 opcode
, mlxsw_cmd_opcode_str(opcode
), opcode_mod
,
1809 in_mod
, status
, mlxsw_cmd_status_str(status
));
1810 } else if (err
== -ETIMEDOUT
) {
1811 dev_err(mlxsw_core
->bus_info
->dev
, "Cmd exec timed-out (opcode=%x(%s),opcode_mod=%x,in_mod=%x)\n",
1812 opcode
, mlxsw_cmd_opcode_str(opcode
), opcode_mod
,
1816 if (!err
&& out_mbox
) {
1817 dev_dbg(mlxsw_core
->bus_info
->dev
, "Output mailbox:\n");
1818 mlxsw_core_buf_dump_dbg(mlxsw_core
, out_mbox
, out_mbox_size
);
1822 EXPORT_SYMBOL(mlxsw_cmd_exec
);
1824 int mlxsw_core_schedule_dw(struct delayed_work
*dwork
, unsigned long delay
)
1826 return queue_delayed_work(mlxsw_wq
, dwork
, delay
);
1828 EXPORT_SYMBOL(mlxsw_core_schedule_dw
);
1830 bool mlxsw_core_schedule_work(struct work_struct
*work
)
1832 return queue_work(mlxsw_owq
, work
);
1834 EXPORT_SYMBOL(mlxsw_core_schedule_work
);
1836 void mlxsw_core_flush_owq(void)
1838 flush_workqueue(mlxsw_owq
);
1840 EXPORT_SYMBOL(mlxsw_core_flush_owq
);
1842 int mlxsw_core_kvd_sizes_get(struct mlxsw_core
*mlxsw_core
,
1843 const struct mlxsw_config_profile
*profile
,
1844 u64
*p_single_size
, u64
*p_double_size
,
1847 struct mlxsw_driver
*driver
= mlxsw_core
->driver
;
1849 if (!driver
->kvd_sizes_get
)
1852 return driver
->kvd_sizes_get(mlxsw_core
, profile
,
1853 p_single_size
, p_double_size
,
1856 EXPORT_SYMBOL(mlxsw_core_kvd_sizes_get
);
1858 static int __init
mlxsw_core_module_init(void)
1862 mlxsw_wq
= alloc_workqueue(mlxsw_core_driver_name
, WQ_MEM_RECLAIM
, 0);
1865 mlxsw_owq
= alloc_ordered_workqueue("%s_ordered", WQ_MEM_RECLAIM
,
1866 mlxsw_core_driver_name
);
1869 goto err_alloc_ordered_workqueue
;
1873 err_alloc_ordered_workqueue
:
1874 destroy_workqueue(mlxsw_wq
);
1878 static void __exit
mlxsw_core_module_exit(void)
1880 destroy_workqueue(mlxsw_owq
);
1881 destroy_workqueue(mlxsw_wq
);
1884 module_init(mlxsw_core_module_init
);
1885 module_exit(mlxsw_core_module_exit
);
1887 MODULE_LICENSE("Dual BSD/GPL");
1888 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
1889 MODULE_DESCRIPTION("Mellanox switch device core driver");