stmmac: fix a filter problem after resuming.
[linux/fpc-iii.git] / drivers / net / ethernet / mellanox / mlxsw / core.c
blob97f0d93caf994e25908ec2239a15a2aedd71a8ef
1 /*
2 * drivers/net/ethernet/mellanox/mlxsw/core.c
3 * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
5 * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
6 * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
37 #include <linux/kernel.h>
38 #include <linux/module.h>
39 #include <linux/device.h>
40 #include <linux/export.h>
41 #include <linux/err.h>
42 #include <linux/if_link.h>
43 #include <linux/debugfs.h>
44 #include <linux/seq_file.h>
45 #include <linux/u64_stats_sync.h>
46 #include <linux/netdevice.h>
47 #include <linux/wait.h>
48 #include <linux/skbuff.h>
49 #include <linux/etherdevice.h>
50 #include <linux/types.h>
51 #include <linux/string.h>
52 #include <linux/gfp.h>
53 #include <linux/random.h>
54 #include <linux/jiffies.h>
55 #include <linux/mutex.h>
56 #include <linux/rcupdate.h>
57 #include <linux/slab.h>
58 #include <asm/byteorder.h>
60 #include "core.h"
61 #include "item.h"
62 #include "cmd.h"
63 #include "port.h"
64 #include "trap.h"
65 #include "emad.h"
66 #include "reg.h"
68 static LIST_HEAD(mlxsw_core_driver_list);
69 static DEFINE_SPINLOCK(mlxsw_core_driver_list_lock);
71 static const char mlxsw_core_driver_name[] = "mlxsw_core";
73 static struct dentry *mlxsw_core_dbg_root;
75 struct mlxsw_core_pcpu_stats {
76 u64 trap_rx_packets[MLXSW_TRAP_ID_MAX];
77 u64 trap_rx_bytes[MLXSW_TRAP_ID_MAX];
78 u64 port_rx_packets[MLXSW_PORT_MAX_PORTS];
79 u64 port_rx_bytes[MLXSW_PORT_MAX_PORTS];
80 struct u64_stats_sync syncp;
81 u32 trap_rx_dropped[MLXSW_TRAP_ID_MAX];
82 u32 port_rx_dropped[MLXSW_PORT_MAX_PORTS];
83 u32 trap_rx_invalid;
84 u32 port_rx_invalid;
87 struct mlxsw_core {
88 struct mlxsw_driver *driver;
89 const struct mlxsw_bus *bus;
90 void *bus_priv;
91 const struct mlxsw_bus_info *bus_info;
92 struct list_head rx_listener_list;
93 struct list_head event_listener_list;
94 struct {
95 struct sk_buff *resp_skb;
96 u64 tid;
97 wait_queue_head_t wait;
98 bool trans_active;
99 struct mutex lock; /* One EMAD transaction at a time. */
100 bool use_emad;
101 } emad;
102 struct mlxsw_core_pcpu_stats __percpu *pcpu_stats;
103 struct dentry *dbg_dir;
104 struct {
105 struct debugfs_blob_wrapper vsd_blob;
106 struct debugfs_blob_wrapper psid_blob;
107 } dbg;
108 unsigned long driver_priv[0];
109 /* driver_priv has to be always the last item */
112 struct mlxsw_rx_listener_item {
113 struct list_head list;
114 struct mlxsw_rx_listener rxl;
115 void *priv;
118 struct mlxsw_event_listener_item {
119 struct list_head list;
120 struct mlxsw_event_listener el;
121 void *priv;
124 /******************
125 * EMAD processing
126 ******************/
128 /* emad_eth_hdr_dmac
129 * Destination MAC in EMAD's Ethernet header.
130 * Must be set to 01:02:c9:00:00:01
132 MLXSW_ITEM_BUF(emad, eth_hdr, dmac, 0x00, 6);
134 /* emad_eth_hdr_smac
135 * Source MAC in EMAD's Ethernet header.
136 * Must be set to 00:02:c9:01:02:03
138 MLXSW_ITEM_BUF(emad, eth_hdr, smac, 0x06, 6);
140 /* emad_eth_hdr_ethertype
141 * Ethertype in EMAD's Ethernet header.
142 * Must be set to 0x8932
144 MLXSW_ITEM32(emad, eth_hdr, ethertype, 0x0C, 16, 16);
146 /* emad_eth_hdr_mlx_proto
147 * Mellanox protocol.
148 * Must be set to 0x0.
150 MLXSW_ITEM32(emad, eth_hdr, mlx_proto, 0x0C, 8, 8);
152 /* emad_eth_hdr_ver
153 * Mellanox protocol version.
154 * Must be set to 0x0.
156 MLXSW_ITEM32(emad, eth_hdr, ver, 0x0C, 4, 4);
158 /* emad_op_tlv_type
159 * Type of the TLV.
160 * Must be set to 0x1 (operation TLV).
162 MLXSW_ITEM32(emad, op_tlv, type, 0x00, 27, 5);
164 /* emad_op_tlv_len
165 * Length of the operation TLV in u32.
166 * Must be set to 0x4.
168 MLXSW_ITEM32(emad, op_tlv, len, 0x00, 16, 11);
170 /* emad_op_tlv_dr
171 * Direct route bit. Setting to 1 indicates the EMAD is a direct route
172 * EMAD. DR TLV must follow.
174 * Note: Currently not supported and must not be set.
176 MLXSW_ITEM32(emad, op_tlv, dr, 0x00, 15, 1);
178 /* emad_op_tlv_status
179 * Returned status in case of EMAD response. Must be set to 0 in case
180 * of EMAD request.
181 * 0x0 - success
182 * 0x1 - device is busy. Requester should retry
183 * 0x2 - Mellanox protocol version not supported
184 * 0x3 - unknown TLV
185 * 0x4 - register not supported
186 * 0x5 - operation class not supported
187 * 0x6 - EMAD method not supported
188 * 0x7 - bad parameter (e.g. port out of range)
189 * 0x8 - resource not available
190 * 0x9 - message receipt acknowledgment. Requester should retry
191 * 0x70 - internal error
193 MLXSW_ITEM32(emad, op_tlv, status, 0x00, 8, 7);
195 /* emad_op_tlv_register_id
196 * Register ID of register within register TLV.
198 MLXSW_ITEM32(emad, op_tlv, register_id, 0x04, 16, 16);
200 /* emad_op_tlv_r
201 * Response bit. Setting to 1 indicates Response, otherwise request.
203 MLXSW_ITEM32(emad, op_tlv, r, 0x04, 15, 1);
205 /* emad_op_tlv_method
206 * EMAD method type.
207 * 0x1 - query
208 * 0x2 - write
209 * 0x3 - send (currently not supported)
210 * 0x4 - event
212 MLXSW_ITEM32(emad, op_tlv, method, 0x04, 8, 7);
214 /* emad_op_tlv_class
215 * EMAD operation class. Must be set to 0x1 (REG_ACCESS).
217 MLXSW_ITEM32(emad, op_tlv, class, 0x04, 0, 8);
219 /* emad_op_tlv_tid
220 * EMAD transaction ID. Used for pairing request and response EMADs.
222 MLXSW_ITEM64(emad, op_tlv, tid, 0x08, 0, 64);
224 /* emad_reg_tlv_type
225 * Type of the TLV.
226 * Must be set to 0x3 (register TLV).
228 MLXSW_ITEM32(emad, reg_tlv, type, 0x00, 27, 5);
230 /* emad_reg_tlv_len
231 * Length of the operation TLV in u32.
233 MLXSW_ITEM32(emad, reg_tlv, len, 0x00, 16, 11);
235 /* emad_end_tlv_type
236 * Type of the TLV.
237 * Must be set to 0x0 (end TLV).
239 MLXSW_ITEM32(emad, end_tlv, type, 0x00, 27, 5);
241 /* emad_end_tlv_len
242 * Length of the end TLV in u32.
243 * Must be set to 1.
245 MLXSW_ITEM32(emad, end_tlv, len, 0x00, 16, 11);
247 enum mlxsw_core_reg_access_type {
248 MLXSW_CORE_REG_ACCESS_TYPE_QUERY,
249 MLXSW_CORE_REG_ACCESS_TYPE_WRITE,
252 static inline const char *
253 mlxsw_core_reg_access_type_str(enum mlxsw_core_reg_access_type type)
255 switch (type) {
256 case MLXSW_CORE_REG_ACCESS_TYPE_QUERY:
257 return "query";
258 case MLXSW_CORE_REG_ACCESS_TYPE_WRITE:
259 return "write";
261 BUG();
264 static void mlxsw_emad_pack_end_tlv(char *end_tlv)
266 mlxsw_emad_end_tlv_type_set(end_tlv, MLXSW_EMAD_TLV_TYPE_END);
267 mlxsw_emad_end_tlv_len_set(end_tlv, MLXSW_EMAD_END_TLV_LEN);
270 static void mlxsw_emad_pack_reg_tlv(char *reg_tlv,
271 const struct mlxsw_reg_info *reg,
272 char *payload)
274 mlxsw_emad_reg_tlv_type_set(reg_tlv, MLXSW_EMAD_TLV_TYPE_REG);
275 mlxsw_emad_reg_tlv_len_set(reg_tlv, reg->len / sizeof(u32) + 1);
276 memcpy(reg_tlv + sizeof(u32), payload, reg->len);
279 static void mlxsw_emad_pack_op_tlv(char *op_tlv,
280 const struct mlxsw_reg_info *reg,
281 enum mlxsw_core_reg_access_type type,
282 struct mlxsw_core *mlxsw_core)
284 mlxsw_emad_op_tlv_type_set(op_tlv, MLXSW_EMAD_TLV_TYPE_OP);
285 mlxsw_emad_op_tlv_len_set(op_tlv, MLXSW_EMAD_OP_TLV_LEN);
286 mlxsw_emad_op_tlv_dr_set(op_tlv, 0);
287 mlxsw_emad_op_tlv_status_set(op_tlv, 0);
288 mlxsw_emad_op_tlv_register_id_set(op_tlv, reg->id);
289 mlxsw_emad_op_tlv_r_set(op_tlv, MLXSW_EMAD_OP_TLV_REQUEST);
290 if (type == MLXSW_CORE_REG_ACCESS_TYPE_QUERY)
291 mlxsw_emad_op_tlv_method_set(op_tlv,
292 MLXSW_EMAD_OP_TLV_METHOD_QUERY);
293 else
294 mlxsw_emad_op_tlv_method_set(op_tlv,
295 MLXSW_EMAD_OP_TLV_METHOD_WRITE);
296 mlxsw_emad_op_tlv_class_set(op_tlv,
297 MLXSW_EMAD_OP_TLV_CLASS_REG_ACCESS);
298 mlxsw_emad_op_tlv_tid_set(op_tlv, mlxsw_core->emad.tid);
301 static int mlxsw_emad_construct_eth_hdr(struct sk_buff *skb)
303 char *eth_hdr = skb_push(skb, MLXSW_EMAD_ETH_HDR_LEN);
305 mlxsw_emad_eth_hdr_dmac_memcpy_to(eth_hdr, MLXSW_EMAD_EH_DMAC);
306 mlxsw_emad_eth_hdr_smac_memcpy_to(eth_hdr, MLXSW_EMAD_EH_SMAC);
307 mlxsw_emad_eth_hdr_ethertype_set(eth_hdr, MLXSW_EMAD_EH_ETHERTYPE);
308 mlxsw_emad_eth_hdr_mlx_proto_set(eth_hdr, MLXSW_EMAD_EH_MLX_PROTO);
309 mlxsw_emad_eth_hdr_ver_set(eth_hdr, MLXSW_EMAD_EH_PROTO_VERSION);
311 skb_reset_mac_header(skb);
313 return 0;
316 static void mlxsw_emad_construct(struct sk_buff *skb,
317 const struct mlxsw_reg_info *reg,
318 char *payload,
319 enum mlxsw_core_reg_access_type type,
320 struct mlxsw_core *mlxsw_core)
322 char *buf;
324 buf = skb_push(skb, MLXSW_EMAD_END_TLV_LEN * sizeof(u32));
325 mlxsw_emad_pack_end_tlv(buf);
327 buf = skb_push(skb, reg->len + sizeof(u32));
328 mlxsw_emad_pack_reg_tlv(buf, reg, payload);
330 buf = skb_push(skb, MLXSW_EMAD_OP_TLV_LEN * sizeof(u32));
331 mlxsw_emad_pack_op_tlv(buf, reg, type, mlxsw_core);
333 mlxsw_emad_construct_eth_hdr(skb);
336 static char *mlxsw_emad_op_tlv(const struct sk_buff *skb)
338 return ((char *) (skb->data + MLXSW_EMAD_ETH_HDR_LEN));
341 static char *mlxsw_emad_reg_tlv(const struct sk_buff *skb)
343 return ((char *) (skb->data + MLXSW_EMAD_ETH_HDR_LEN +
344 MLXSW_EMAD_OP_TLV_LEN * sizeof(u32)));
347 static char *mlxsw_emad_reg_payload(const char *op_tlv)
349 return ((char *) (op_tlv + (MLXSW_EMAD_OP_TLV_LEN + 1) * sizeof(u32)));
352 static u64 mlxsw_emad_get_tid(const struct sk_buff *skb)
354 char *op_tlv;
356 op_tlv = mlxsw_emad_op_tlv(skb);
357 return mlxsw_emad_op_tlv_tid_get(op_tlv);
360 static bool mlxsw_emad_is_resp(const struct sk_buff *skb)
362 char *op_tlv;
364 op_tlv = mlxsw_emad_op_tlv(skb);
365 return (mlxsw_emad_op_tlv_r_get(op_tlv) == MLXSW_EMAD_OP_TLV_RESPONSE);
368 #define MLXSW_EMAD_TIMEOUT_MS 200
370 static int __mlxsw_emad_transmit(struct mlxsw_core *mlxsw_core,
371 struct sk_buff *skb,
372 const struct mlxsw_tx_info *tx_info)
374 int err;
375 int ret;
377 mlxsw_core->emad.trans_active = true;
379 err = mlxsw_core_skb_transmit(mlxsw_core->driver_priv, skb, tx_info);
380 if (err) {
381 dev_err(mlxsw_core->bus_info->dev, "Failed to transmit EMAD (tid=%llx)\n",
382 mlxsw_core->emad.tid);
383 dev_kfree_skb(skb);
384 goto trans_inactive_out;
387 ret = wait_event_timeout(mlxsw_core->emad.wait,
388 !(mlxsw_core->emad.trans_active),
389 msecs_to_jiffies(MLXSW_EMAD_TIMEOUT_MS));
390 if (!ret) {
391 dev_warn(mlxsw_core->bus_info->dev, "EMAD timed-out (tid=%llx)\n",
392 mlxsw_core->emad.tid);
393 err = -EIO;
394 goto trans_inactive_out;
397 return 0;
399 trans_inactive_out:
400 mlxsw_core->emad.trans_active = false;
401 return err;
404 static int mlxsw_emad_process_status(struct mlxsw_core *mlxsw_core,
405 char *op_tlv)
407 enum mlxsw_emad_op_tlv_status status;
408 u64 tid;
410 status = mlxsw_emad_op_tlv_status_get(op_tlv);
411 tid = mlxsw_emad_op_tlv_tid_get(op_tlv);
413 switch (status) {
414 case MLXSW_EMAD_OP_TLV_STATUS_SUCCESS:
415 return 0;
416 case MLXSW_EMAD_OP_TLV_STATUS_BUSY:
417 case MLXSW_EMAD_OP_TLV_STATUS_MESSAGE_RECEIPT_ACK:
418 dev_warn(mlxsw_core->bus_info->dev, "Reg access status again (tid=%llx,status=%x(%s))\n",
419 tid, status, mlxsw_emad_op_tlv_status_str(status));
420 return -EAGAIN;
421 case MLXSW_EMAD_OP_TLV_STATUS_VERSION_NOT_SUPPORTED:
422 case MLXSW_EMAD_OP_TLV_STATUS_UNKNOWN_TLV:
423 case MLXSW_EMAD_OP_TLV_STATUS_REGISTER_NOT_SUPPORTED:
424 case MLXSW_EMAD_OP_TLV_STATUS_CLASS_NOT_SUPPORTED:
425 case MLXSW_EMAD_OP_TLV_STATUS_METHOD_NOT_SUPPORTED:
426 case MLXSW_EMAD_OP_TLV_STATUS_BAD_PARAMETER:
427 case MLXSW_EMAD_OP_TLV_STATUS_RESOURCE_NOT_AVAILABLE:
428 case MLXSW_EMAD_OP_TLV_STATUS_INTERNAL_ERROR:
429 default:
430 dev_err(mlxsw_core->bus_info->dev, "Reg access status failed (tid=%llx,status=%x(%s))\n",
431 tid, status, mlxsw_emad_op_tlv_status_str(status));
432 return -EIO;
436 static int mlxsw_emad_process_status_skb(struct mlxsw_core *mlxsw_core,
437 struct sk_buff *skb)
439 return mlxsw_emad_process_status(mlxsw_core, mlxsw_emad_op_tlv(skb));
442 static int mlxsw_emad_transmit(struct mlxsw_core *mlxsw_core,
443 struct sk_buff *skb,
444 const struct mlxsw_tx_info *tx_info)
446 struct sk_buff *trans_skb;
447 int n_retry;
448 int err;
450 n_retry = 0;
451 retry:
452 /* We copy the EMAD to a new skb, since we might need
453 * to retransmit it in case of failure.
455 trans_skb = skb_copy(skb, GFP_KERNEL);
456 if (!trans_skb) {
457 err = -ENOMEM;
458 goto out;
461 err = __mlxsw_emad_transmit(mlxsw_core, trans_skb, tx_info);
462 if (!err) {
463 struct sk_buff *resp_skb = mlxsw_core->emad.resp_skb;
465 err = mlxsw_emad_process_status_skb(mlxsw_core, resp_skb);
466 if (err)
467 dev_kfree_skb(resp_skb);
468 if (!err || err != -EAGAIN)
469 goto out;
471 if (n_retry++ < MLXSW_EMAD_MAX_RETRY)
472 goto retry;
474 out:
475 dev_kfree_skb(skb);
476 mlxsw_core->emad.tid++;
477 return err;
480 static void mlxsw_emad_rx_listener_func(struct sk_buff *skb, u8 local_port,
481 void *priv)
483 struct mlxsw_core *mlxsw_core = priv;
485 if (mlxsw_emad_is_resp(skb) &&
486 mlxsw_core->emad.trans_active &&
487 mlxsw_emad_get_tid(skb) == mlxsw_core->emad.tid) {
488 mlxsw_core->emad.resp_skb = skb;
489 mlxsw_core->emad.trans_active = false;
490 wake_up(&mlxsw_core->emad.wait);
491 } else {
492 dev_kfree_skb(skb);
496 static const struct mlxsw_rx_listener mlxsw_emad_rx_listener = {
497 .func = mlxsw_emad_rx_listener_func,
498 .local_port = MLXSW_PORT_DONT_CARE,
499 .trap_id = MLXSW_TRAP_ID_ETHEMAD,
502 static int mlxsw_emad_traps_set(struct mlxsw_core *mlxsw_core)
504 char htgt_pl[MLXSW_REG_HTGT_LEN];
505 char hpkt_pl[MLXSW_REG_HPKT_LEN];
506 int err;
508 mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_EMAD);
509 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl);
510 if (err)
511 return err;
513 mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_TRAP_TO_CPU,
514 MLXSW_TRAP_ID_ETHEMAD);
515 return mlxsw_reg_write(mlxsw_core, MLXSW_REG(hpkt), hpkt_pl);
518 static int mlxsw_emad_init(struct mlxsw_core *mlxsw_core)
520 int err;
522 /* Set the upper 32 bits of the transaction ID field to a random
523 * number. This allows us to discard EMADs addressed to other
524 * devices.
526 get_random_bytes(&mlxsw_core->emad.tid, 4);
527 mlxsw_core->emad.tid = mlxsw_core->emad.tid << 32;
529 init_waitqueue_head(&mlxsw_core->emad.wait);
530 mlxsw_core->emad.trans_active = false;
531 mutex_init(&mlxsw_core->emad.lock);
533 err = mlxsw_core_rx_listener_register(mlxsw_core,
534 &mlxsw_emad_rx_listener,
535 mlxsw_core);
536 if (err)
537 return err;
539 err = mlxsw_emad_traps_set(mlxsw_core);
540 if (err)
541 goto err_emad_trap_set;
543 mlxsw_core->emad.use_emad = true;
545 return 0;
547 err_emad_trap_set:
548 mlxsw_core_rx_listener_unregister(mlxsw_core,
549 &mlxsw_emad_rx_listener,
550 mlxsw_core);
551 return err;
554 static void mlxsw_emad_fini(struct mlxsw_core *mlxsw_core)
556 char hpkt_pl[MLXSW_REG_HPKT_LEN];
558 mlxsw_core->emad.use_emad = false;
559 mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_DISCARD,
560 MLXSW_TRAP_ID_ETHEMAD);
561 mlxsw_reg_write(mlxsw_core, MLXSW_REG(hpkt), hpkt_pl);
563 mlxsw_core_rx_listener_unregister(mlxsw_core,
564 &mlxsw_emad_rx_listener,
565 mlxsw_core);
568 static struct sk_buff *mlxsw_emad_alloc(const struct mlxsw_core *mlxsw_core,
569 u16 reg_len)
571 struct sk_buff *skb;
572 u16 emad_len;
574 emad_len = (reg_len + sizeof(u32) + MLXSW_EMAD_ETH_HDR_LEN +
575 (MLXSW_EMAD_OP_TLV_LEN + MLXSW_EMAD_END_TLV_LEN) *
576 sizeof(u32) + mlxsw_core->driver->txhdr_len);
577 if (emad_len > MLXSW_EMAD_MAX_FRAME_LEN)
578 return NULL;
580 skb = netdev_alloc_skb(NULL, emad_len);
581 if (!skb)
582 return NULL;
583 memset(skb->data, 0, emad_len);
584 skb_reserve(skb, emad_len);
586 return skb;
589 /*****************
590 * Core functions
591 *****************/
593 static int mlxsw_core_rx_stats_dbg_read(struct seq_file *file, void *data)
595 struct mlxsw_core *mlxsw_core = file->private;
596 struct mlxsw_core_pcpu_stats *p;
597 u64 rx_packets, rx_bytes;
598 u64 tmp_rx_packets, tmp_rx_bytes;
599 u32 rx_dropped, rx_invalid;
600 unsigned int start;
601 int i;
602 int j;
603 static const char hdr[] =
604 " NUM RX_PACKETS RX_BYTES RX_DROPPED\n";
606 seq_printf(file, hdr);
607 for (i = 0; i < MLXSW_TRAP_ID_MAX; i++) {
608 rx_packets = 0;
609 rx_bytes = 0;
610 rx_dropped = 0;
611 for_each_possible_cpu(j) {
612 p = per_cpu_ptr(mlxsw_core->pcpu_stats, j);
613 do {
614 start = u64_stats_fetch_begin(&p->syncp);
615 tmp_rx_packets = p->trap_rx_packets[i];
616 tmp_rx_bytes = p->trap_rx_bytes[i];
617 } while (u64_stats_fetch_retry(&p->syncp, start));
619 rx_packets += tmp_rx_packets;
620 rx_bytes += tmp_rx_bytes;
621 rx_dropped += p->trap_rx_dropped[i];
623 seq_printf(file, "trap %3d %12llu %12llu %10u\n",
624 i, rx_packets, rx_bytes, rx_dropped);
626 rx_invalid = 0;
627 for_each_possible_cpu(j) {
628 p = per_cpu_ptr(mlxsw_core->pcpu_stats, j);
629 rx_invalid += p->trap_rx_invalid;
631 seq_printf(file, "trap INV %10u\n",
632 rx_invalid);
634 for (i = 0; i < MLXSW_PORT_MAX_PORTS; i++) {
635 rx_packets = 0;
636 rx_bytes = 0;
637 rx_dropped = 0;
638 for_each_possible_cpu(j) {
639 p = per_cpu_ptr(mlxsw_core->pcpu_stats, j);
640 do {
641 start = u64_stats_fetch_begin(&p->syncp);
642 tmp_rx_packets = p->port_rx_packets[i];
643 tmp_rx_bytes = p->port_rx_bytes[i];
644 } while (u64_stats_fetch_retry(&p->syncp, start));
646 rx_packets += tmp_rx_packets;
647 rx_bytes += tmp_rx_bytes;
648 rx_dropped += p->port_rx_dropped[i];
650 seq_printf(file, "port %3d %12llu %12llu %10u\n",
651 i, rx_packets, rx_bytes, rx_dropped);
653 rx_invalid = 0;
654 for_each_possible_cpu(j) {
655 p = per_cpu_ptr(mlxsw_core->pcpu_stats, j);
656 rx_invalid += p->port_rx_invalid;
658 seq_printf(file, "port INV %10u\n",
659 rx_invalid);
660 return 0;
663 static int mlxsw_core_rx_stats_dbg_open(struct inode *inode, struct file *f)
665 struct mlxsw_core *mlxsw_core = inode->i_private;
667 return single_open(f, mlxsw_core_rx_stats_dbg_read, mlxsw_core);
670 static const struct file_operations mlxsw_core_rx_stats_dbg_ops = {
671 .owner = THIS_MODULE,
672 .open = mlxsw_core_rx_stats_dbg_open,
673 .release = single_release,
674 .read = seq_read,
675 .llseek = seq_lseek
678 static void mlxsw_core_buf_dump_dbg(struct mlxsw_core *mlxsw_core,
679 const char *buf, size_t size)
681 __be32 *m = (__be32 *) buf;
682 int i;
683 int count = size / sizeof(__be32);
685 for (i = count - 1; i >= 0; i--)
686 if (m[i])
687 break;
688 i++;
689 count = i ? i : 1;
690 for (i = 0; i < count; i += 4)
691 dev_dbg(mlxsw_core->bus_info->dev, "%04x - %08x %08x %08x %08x\n",
692 i * 4, be32_to_cpu(m[i]), be32_to_cpu(m[i + 1]),
693 be32_to_cpu(m[i + 2]), be32_to_cpu(m[i + 3]));
696 int mlxsw_core_driver_register(struct mlxsw_driver *mlxsw_driver)
698 spin_lock(&mlxsw_core_driver_list_lock);
699 list_add_tail(&mlxsw_driver->list, &mlxsw_core_driver_list);
700 spin_unlock(&mlxsw_core_driver_list_lock);
701 return 0;
703 EXPORT_SYMBOL(mlxsw_core_driver_register);
705 void mlxsw_core_driver_unregister(struct mlxsw_driver *mlxsw_driver)
707 spin_lock(&mlxsw_core_driver_list_lock);
708 list_del(&mlxsw_driver->list);
709 spin_unlock(&mlxsw_core_driver_list_lock);
711 EXPORT_SYMBOL(mlxsw_core_driver_unregister);
713 static struct mlxsw_driver *__driver_find(const char *kind)
715 struct mlxsw_driver *mlxsw_driver;
717 list_for_each_entry(mlxsw_driver, &mlxsw_core_driver_list, list) {
718 if (strcmp(mlxsw_driver->kind, kind) == 0)
719 return mlxsw_driver;
721 return NULL;
724 static struct mlxsw_driver *mlxsw_core_driver_get(const char *kind)
726 struct mlxsw_driver *mlxsw_driver;
728 spin_lock(&mlxsw_core_driver_list_lock);
729 mlxsw_driver = __driver_find(kind);
730 if (!mlxsw_driver) {
731 spin_unlock(&mlxsw_core_driver_list_lock);
732 request_module(MLXSW_MODULE_ALIAS_PREFIX "%s", kind);
733 spin_lock(&mlxsw_core_driver_list_lock);
734 mlxsw_driver = __driver_find(kind);
736 if (mlxsw_driver) {
737 if (!try_module_get(mlxsw_driver->owner))
738 mlxsw_driver = NULL;
741 spin_unlock(&mlxsw_core_driver_list_lock);
742 return mlxsw_driver;
745 static void mlxsw_core_driver_put(const char *kind)
747 struct mlxsw_driver *mlxsw_driver;
749 spin_lock(&mlxsw_core_driver_list_lock);
750 mlxsw_driver = __driver_find(kind);
751 spin_unlock(&mlxsw_core_driver_list_lock);
752 if (!mlxsw_driver)
753 return;
754 module_put(mlxsw_driver->owner);
757 static int mlxsw_core_debugfs_init(struct mlxsw_core *mlxsw_core)
759 const struct mlxsw_bus_info *bus_info = mlxsw_core->bus_info;
761 mlxsw_core->dbg_dir = debugfs_create_dir(bus_info->device_name,
762 mlxsw_core_dbg_root);
763 if (!mlxsw_core->dbg_dir)
764 return -ENOMEM;
765 debugfs_create_file("rx_stats", S_IRUGO, mlxsw_core->dbg_dir,
766 mlxsw_core, &mlxsw_core_rx_stats_dbg_ops);
767 mlxsw_core->dbg.vsd_blob.data = (void *) &bus_info->vsd;
768 mlxsw_core->dbg.vsd_blob.size = sizeof(bus_info->vsd);
769 debugfs_create_blob("vsd", S_IRUGO, mlxsw_core->dbg_dir,
770 &mlxsw_core->dbg.vsd_blob);
771 mlxsw_core->dbg.psid_blob.data = (void *) &bus_info->psid;
772 mlxsw_core->dbg.psid_blob.size = sizeof(bus_info->psid);
773 debugfs_create_blob("psid", S_IRUGO, mlxsw_core->dbg_dir,
774 &mlxsw_core->dbg.psid_blob);
775 return 0;
778 static void mlxsw_core_debugfs_fini(struct mlxsw_core *mlxsw_core)
780 debugfs_remove_recursive(mlxsw_core->dbg_dir);
783 int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
784 const struct mlxsw_bus *mlxsw_bus,
785 void *bus_priv)
787 const char *device_kind = mlxsw_bus_info->device_kind;
788 struct mlxsw_core *mlxsw_core;
789 struct mlxsw_driver *mlxsw_driver;
790 size_t alloc_size;
791 int err;
793 mlxsw_driver = mlxsw_core_driver_get(device_kind);
794 if (!mlxsw_driver)
795 return -EINVAL;
796 alloc_size = sizeof(*mlxsw_core) + mlxsw_driver->priv_size;
797 mlxsw_core = kzalloc(alloc_size, GFP_KERNEL);
798 if (!mlxsw_core) {
799 err = -ENOMEM;
800 goto err_core_alloc;
803 INIT_LIST_HEAD(&mlxsw_core->rx_listener_list);
804 INIT_LIST_HEAD(&mlxsw_core->event_listener_list);
805 mlxsw_core->driver = mlxsw_driver;
806 mlxsw_core->bus = mlxsw_bus;
807 mlxsw_core->bus_priv = bus_priv;
808 mlxsw_core->bus_info = mlxsw_bus_info;
810 mlxsw_core->pcpu_stats =
811 netdev_alloc_pcpu_stats(struct mlxsw_core_pcpu_stats);
812 if (!mlxsw_core->pcpu_stats) {
813 err = -ENOMEM;
814 goto err_alloc_stats;
817 err = mlxsw_bus->init(bus_priv, mlxsw_core, mlxsw_driver->profile);
818 if (err)
819 goto err_bus_init;
821 err = mlxsw_emad_init(mlxsw_core);
822 if (err)
823 goto err_emad_init;
825 err = mlxsw_driver->init(mlxsw_core->driver_priv, mlxsw_core,
826 mlxsw_bus_info);
827 if (err)
828 goto err_driver_init;
830 err = mlxsw_core_debugfs_init(mlxsw_core);
831 if (err)
832 goto err_debugfs_init;
834 return 0;
836 err_debugfs_init:
837 mlxsw_core->driver->fini(mlxsw_core->driver_priv);
838 err_driver_init:
839 mlxsw_emad_fini(mlxsw_core);
840 err_emad_init:
841 mlxsw_bus->fini(bus_priv);
842 err_bus_init:
843 free_percpu(mlxsw_core->pcpu_stats);
844 err_alloc_stats:
845 kfree(mlxsw_core);
846 err_core_alloc:
847 mlxsw_core_driver_put(device_kind);
848 return err;
850 EXPORT_SYMBOL(mlxsw_core_bus_device_register);
852 void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core)
854 const char *device_kind = mlxsw_core->bus_info->device_kind;
856 mlxsw_core_debugfs_fini(mlxsw_core);
857 mlxsw_core->driver->fini(mlxsw_core->driver_priv);
858 mlxsw_emad_fini(mlxsw_core);
859 mlxsw_core->bus->fini(mlxsw_core->bus_priv);
860 free_percpu(mlxsw_core->pcpu_stats);
861 kfree(mlxsw_core);
862 mlxsw_core_driver_put(device_kind);
864 EXPORT_SYMBOL(mlxsw_core_bus_device_unregister);
866 static struct mlxsw_core *__mlxsw_core_get(void *driver_priv)
868 return container_of(driver_priv, struct mlxsw_core, driver_priv);
871 bool mlxsw_core_skb_transmit_busy(void *driver_priv,
872 const struct mlxsw_tx_info *tx_info)
874 struct mlxsw_core *mlxsw_core = __mlxsw_core_get(driver_priv);
876 return mlxsw_core->bus->skb_transmit_busy(mlxsw_core->bus_priv,
877 tx_info);
879 EXPORT_SYMBOL(mlxsw_core_skb_transmit_busy);
881 int mlxsw_core_skb_transmit(void *driver_priv, struct sk_buff *skb,
882 const struct mlxsw_tx_info *tx_info)
884 struct mlxsw_core *mlxsw_core = __mlxsw_core_get(driver_priv);
886 return mlxsw_core->bus->skb_transmit(mlxsw_core->bus_priv, skb,
887 tx_info);
889 EXPORT_SYMBOL(mlxsw_core_skb_transmit);
891 static bool __is_rx_listener_equal(const struct mlxsw_rx_listener *rxl_a,
892 const struct mlxsw_rx_listener *rxl_b)
894 return (rxl_a->func == rxl_b->func &&
895 rxl_a->local_port == rxl_b->local_port &&
896 rxl_a->trap_id == rxl_b->trap_id);
899 static struct mlxsw_rx_listener_item *
900 __find_rx_listener_item(struct mlxsw_core *mlxsw_core,
901 const struct mlxsw_rx_listener *rxl,
902 void *priv)
904 struct mlxsw_rx_listener_item *rxl_item;
906 list_for_each_entry(rxl_item, &mlxsw_core->rx_listener_list, list) {
907 if (__is_rx_listener_equal(&rxl_item->rxl, rxl) &&
908 rxl_item->priv == priv)
909 return rxl_item;
911 return NULL;
914 int mlxsw_core_rx_listener_register(struct mlxsw_core *mlxsw_core,
915 const struct mlxsw_rx_listener *rxl,
916 void *priv)
918 struct mlxsw_rx_listener_item *rxl_item;
920 rxl_item = __find_rx_listener_item(mlxsw_core, rxl, priv);
921 if (rxl_item)
922 return -EEXIST;
923 rxl_item = kmalloc(sizeof(*rxl_item), GFP_KERNEL);
924 if (!rxl_item)
925 return -ENOMEM;
926 rxl_item->rxl = *rxl;
927 rxl_item->priv = priv;
929 list_add_rcu(&rxl_item->list, &mlxsw_core->rx_listener_list);
930 return 0;
932 EXPORT_SYMBOL(mlxsw_core_rx_listener_register);
934 void mlxsw_core_rx_listener_unregister(struct mlxsw_core *mlxsw_core,
935 const struct mlxsw_rx_listener *rxl,
936 void *priv)
938 struct mlxsw_rx_listener_item *rxl_item;
940 rxl_item = __find_rx_listener_item(mlxsw_core, rxl, priv);
941 if (!rxl_item)
942 return;
943 list_del_rcu(&rxl_item->list);
944 synchronize_rcu();
945 kfree(rxl_item);
947 EXPORT_SYMBOL(mlxsw_core_rx_listener_unregister);
949 static void mlxsw_core_event_listener_func(struct sk_buff *skb, u8 local_port,
950 void *priv)
952 struct mlxsw_event_listener_item *event_listener_item = priv;
953 struct mlxsw_reg_info reg;
954 char *payload;
955 char *op_tlv = mlxsw_emad_op_tlv(skb);
956 char *reg_tlv = mlxsw_emad_reg_tlv(skb);
958 reg.id = mlxsw_emad_op_tlv_register_id_get(op_tlv);
959 reg.len = (mlxsw_emad_reg_tlv_len_get(reg_tlv) - 1) * sizeof(u32);
960 payload = mlxsw_emad_reg_payload(op_tlv);
961 event_listener_item->el.func(&reg, payload, event_listener_item->priv);
962 dev_kfree_skb(skb);
965 static bool __is_event_listener_equal(const struct mlxsw_event_listener *el_a,
966 const struct mlxsw_event_listener *el_b)
968 return (el_a->func == el_b->func &&
969 el_a->trap_id == el_b->trap_id);
972 static struct mlxsw_event_listener_item *
973 __find_event_listener_item(struct mlxsw_core *mlxsw_core,
974 const struct mlxsw_event_listener *el,
975 void *priv)
977 struct mlxsw_event_listener_item *el_item;
979 list_for_each_entry(el_item, &mlxsw_core->event_listener_list, list) {
980 if (__is_event_listener_equal(&el_item->el, el) &&
981 el_item->priv == priv)
982 return el_item;
984 return NULL;
987 int mlxsw_core_event_listener_register(struct mlxsw_core *mlxsw_core,
988 const struct mlxsw_event_listener *el,
989 void *priv)
991 int err;
992 struct mlxsw_event_listener_item *el_item;
993 const struct mlxsw_rx_listener rxl = {
994 .func = mlxsw_core_event_listener_func,
995 .local_port = MLXSW_PORT_DONT_CARE,
996 .trap_id = el->trap_id,
999 el_item = __find_event_listener_item(mlxsw_core, el, priv);
1000 if (el_item)
1001 return -EEXIST;
1002 el_item = kmalloc(sizeof(*el_item), GFP_KERNEL);
1003 if (!el_item)
1004 return -ENOMEM;
1005 el_item->el = *el;
1006 el_item->priv = priv;
1008 err = mlxsw_core_rx_listener_register(mlxsw_core, &rxl, el_item);
1009 if (err)
1010 goto err_rx_listener_register;
1012 /* No reason to save item if we did not manage to register an RX
1013 * listener for it.
1015 list_add_rcu(&el_item->list, &mlxsw_core->event_listener_list);
1017 return 0;
1019 err_rx_listener_register:
1020 kfree(el_item);
1021 return err;
1023 EXPORT_SYMBOL(mlxsw_core_event_listener_register);
1025 void mlxsw_core_event_listener_unregister(struct mlxsw_core *mlxsw_core,
1026 const struct mlxsw_event_listener *el,
1027 void *priv)
1029 struct mlxsw_event_listener_item *el_item;
1030 const struct mlxsw_rx_listener rxl = {
1031 .func = mlxsw_core_event_listener_func,
1032 .local_port = MLXSW_PORT_DONT_CARE,
1033 .trap_id = el->trap_id,
1036 el_item = __find_event_listener_item(mlxsw_core, el, priv);
1037 if (!el_item)
1038 return;
1039 mlxsw_core_rx_listener_unregister(mlxsw_core, &rxl, el_item);
1040 list_del(&el_item->list);
1041 kfree(el_item);
1043 EXPORT_SYMBOL(mlxsw_core_event_listener_unregister);
1045 static int mlxsw_core_reg_access_emad(struct mlxsw_core *mlxsw_core,
1046 const struct mlxsw_reg_info *reg,
1047 char *payload,
1048 enum mlxsw_core_reg_access_type type)
1050 int err;
1051 char *op_tlv;
1052 struct sk_buff *skb;
1053 struct mlxsw_tx_info tx_info = {
1054 .local_port = MLXSW_PORT_CPU_PORT,
1055 .is_emad = true,
1058 skb = mlxsw_emad_alloc(mlxsw_core, reg->len);
1059 if (!skb)
1060 return -ENOMEM;
1062 mlxsw_emad_construct(skb, reg, payload, type, mlxsw_core);
1063 mlxsw_core->driver->txhdr_construct(skb, &tx_info);
1065 dev_dbg(mlxsw_core->bus_info->dev, "EMAD send (tid=%llx)\n",
1066 mlxsw_core->emad.tid);
1067 mlxsw_core_buf_dump_dbg(mlxsw_core, skb->data, skb->len);
1069 err = mlxsw_emad_transmit(mlxsw_core, skb, &tx_info);
1070 if (!err) {
1071 op_tlv = mlxsw_emad_op_tlv(mlxsw_core->emad.resp_skb);
1072 memcpy(payload, mlxsw_emad_reg_payload(op_tlv),
1073 reg->len);
1075 dev_dbg(mlxsw_core->bus_info->dev, "EMAD recv (tid=%llx)\n",
1076 mlxsw_core->emad.tid - 1);
1077 mlxsw_core_buf_dump_dbg(mlxsw_core,
1078 mlxsw_core->emad.resp_skb->data,
1079 mlxsw_core->emad.resp_skb->len);
1081 dev_kfree_skb(mlxsw_core->emad.resp_skb);
1084 return err;
1087 static int mlxsw_core_reg_access_cmd(struct mlxsw_core *mlxsw_core,
1088 const struct mlxsw_reg_info *reg,
1089 char *payload,
1090 enum mlxsw_core_reg_access_type type)
1092 int err, n_retry;
1093 char *in_mbox, *out_mbox, *tmp;
1095 in_mbox = mlxsw_cmd_mbox_alloc();
1096 if (!in_mbox)
1097 return -ENOMEM;
1099 out_mbox = mlxsw_cmd_mbox_alloc();
1100 if (!out_mbox) {
1101 err = -ENOMEM;
1102 goto free_in_mbox;
1105 mlxsw_emad_pack_op_tlv(in_mbox, reg, type, mlxsw_core);
1106 tmp = in_mbox + MLXSW_EMAD_OP_TLV_LEN * sizeof(u32);
1107 mlxsw_emad_pack_reg_tlv(tmp, reg, payload);
1109 n_retry = 0;
1110 retry:
1111 err = mlxsw_cmd_access_reg(mlxsw_core, in_mbox, out_mbox);
1112 if (!err) {
1113 err = mlxsw_emad_process_status(mlxsw_core, out_mbox);
1114 if (err == -EAGAIN && n_retry++ < MLXSW_EMAD_MAX_RETRY)
1115 goto retry;
1118 if (!err)
1119 memcpy(payload, mlxsw_emad_reg_payload(out_mbox),
1120 reg->len);
1122 mlxsw_core->emad.tid++;
1123 mlxsw_cmd_mbox_free(out_mbox);
1124 free_in_mbox:
1125 mlxsw_cmd_mbox_free(in_mbox);
1126 return err;
1129 static int mlxsw_core_reg_access(struct mlxsw_core *mlxsw_core,
1130 const struct mlxsw_reg_info *reg,
1131 char *payload,
1132 enum mlxsw_core_reg_access_type type)
1134 u64 cur_tid;
1135 int err;
1137 if (mutex_lock_interruptible(&mlxsw_core->emad.lock)) {
1138 dev_err(mlxsw_core->bus_info->dev, "Reg access interrupted (reg_id=%x(%s),type=%s)\n",
1139 reg->id, mlxsw_reg_id_str(reg->id),
1140 mlxsw_core_reg_access_type_str(type));
1141 return -EINTR;
1144 cur_tid = mlxsw_core->emad.tid;
1145 dev_dbg(mlxsw_core->bus_info->dev, "Reg access (tid=%llx,reg_id=%x(%s),type=%s)\n",
1146 cur_tid, reg->id, mlxsw_reg_id_str(reg->id),
1147 mlxsw_core_reg_access_type_str(type));
1149 /* During initialization EMAD interface is not available to us,
1150 * so we default to command interface. We switch to EMAD interface
1151 * after setting the appropriate traps.
1153 if (!mlxsw_core->emad.use_emad)
1154 err = mlxsw_core_reg_access_cmd(mlxsw_core, reg,
1155 payload, type);
1156 else
1157 err = mlxsw_core_reg_access_emad(mlxsw_core, reg,
1158 payload, type);
1160 if (err)
1161 dev_err(mlxsw_core->bus_info->dev, "Reg access failed (tid=%llx,reg_id=%x(%s),type=%s)\n",
1162 cur_tid, reg->id, mlxsw_reg_id_str(reg->id),
1163 mlxsw_core_reg_access_type_str(type));
1165 mutex_unlock(&mlxsw_core->emad.lock);
1166 return err;
1169 int mlxsw_reg_query(struct mlxsw_core *mlxsw_core,
1170 const struct mlxsw_reg_info *reg, char *payload)
1172 return mlxsw_core_reg_access(mlxsw_core, reg, payload,
1173 MLXSW_CORE_REG_ACCESS_TYPE_QUERY);
1175 EXPORT_SYMBOL(mlxsw_reg_query);
1177 int mlxsw_reg_write(struct mlxsw_core *mlxsw_core,
1178 const struct mlxsw_reg_info *reg, char *payload)
1180 return mlxsw_core_reg_access(mlxsw_core, reg, payload,
1181 MLXSW_CORE_REG_ACCESS_TYPE_WRITE);
1183 EXPORT_SYMBOL(mlxsw_reg_write);
1185 void mlxsw_core_skb_receive(struct mlxsw_core *mlxsw_core, struct sk_buff *skb,
1186 struct mlxsw_rx_info *rx_info)
1188 struct mlxsw_rx_listener_item *rxl_item;
1189 const struct mlxsw_rx_listener *rxl;
1190 struct mlxsw_core_pcpu_stats *pcpu_stats;
1191 u8 local_port = rx_info->sys_port;
1192 bool found = false;
1194 dev_dbg_ratelimited(mlxsw_core->bus_info->dev, "%s: sys_port = %d, trap_id = 0x%x\n",
1195 __func__, rx_info->sys_port, rx_info->trap_id);
1197 if ((rx_info->trap_id >= MLXSW_TRAP_ID_MAX) ||
1198 (local_port >= MLXSW_PORT_MAX_PORTS))
1199 goto drop;
1201 rcu_read_lock();
1202 list_for_each_entry_rcu(rxl_item, &mlxsw_core->rx_listener_list, list) {
1203 rxl = &rxl_item->rxl;
1204 if ((rxl->local_port == MLXSW_PORT_DONT_CARE ||
1205 rxl->local_port == local_port) &&
1206 rxl->trap_id == rx_info->trap_id) {
1207 found = true;
1208 break;
1211 rcu_read_unlock();
1212 if (!found)
1213 goto drop;
1215 pcpu_stats = this_cpu_ptr(mlxsw_core->pcpu_stats);
1216 u64_stats_update_begin(&pcpu_stats->syncp);
1217 pcpu_stats->port_rx_packets[local_port]++;
1218 pcpu_stats->port_rx_bytes[local_port] += skb->len;
1219 pcpu_stats->trap_rx_packets[rx_info->trap_id]++;
1220 pcpu_stats->trap_rx_bytes[rx_info->trap_id] += skb->len;
1221 u64_stats_update_end(&pcpu_stats->syncp);
1223 rxl->func(skb, local_port, rxl_item->priv);
1224 return;
1226 drop:
1227 if (rx_info->trap_id >= MLXSW_TRAP_ID_MAX)
1228 this_cpu_inc(mlxsw_core->pcpu_stats->trap_rx_invalid);
1229 else
1230 this_cpu_inc(mlxsw_core->pcpu_stats->trap_rx_dropped[rx_info->trap_id]);
1231 if (local_port >= MLXSW_PORT_MAX_PORTS)
1232 this_cpu_inc(mlxsw_core->pcpu_stats->port_rx_invalid);
1233 else
1234 this_cpu_inc(mlxsw_core->pcpu_stats->port_rx_dropped[local_port]);
1235 dev_kfree_skb(skb);
1237 EXPORT_SYMBOL(mlxsw_core_skb_receive);
1239 int mlxsw_cmd_exec(struct mlxsw_core *mlxsw_core, u16 opcode, u8 opcode_mod,
1240 u32 in_mod, bool out_mbox_direct,
1241 char *in_mbox, size_t in_mbox_size,
1242 char *out_mbox, size_t out_mbox_size)
1244 u8 status;
1245 int err;
1247 BUG_ON(in_mbox_size % sizeof(u32) || out_mbox_size % sizeof(u32));
1248 if (!mlxsw_core->bus->cmd_exec)
1249 return -EOPNOTSUPP;
1251 dev_dbg(mlxsw_core->bus_info->dev, "Cmd exec (opcode=%x(%s),opcode_mod=%x,in_mod=%x)\n",
1252 opcode, mlxsw_cmd_opcode_str(opcode), opcode_mod, in_mod);
1253 if (in_mbox) {
1254 dev_dbg(mlxsw_core->bus_info->dev, "Input mailbox:\n");
1255 mlxsw_core_buf_dump_dbg(mlxsw_core, in_mbox, in_mbox_size);
1258 err = mlxsw_core->bus->cmd_exec(mlxsw_core->bus_priv, opcode,
1259 opcode_mod, in_mod, out_mbox_direct,
1260 in_mbox, in_mbox_size,
1261 out_mbox, out_mbox_size, &status);
1263 if (err == -EIO && status != MLXSW_CMD_STATUS_OK) {
1264 dev_err(mlxsw_core->bus_info->dev, "Cmd exec failed (opcode=%x(%s),opcode_mod=%x,in_mod=%x,status=%x(%s))\n",
1265 opcode, mlxsw_cmd_opcode_str(opcode), opcode_mod,
1266 in_mod, status, mlxsw_cmd_status_str(status));
1267 } else if (err == -ETIMEDOUT) {
1268 dev_err(mlxsw_core->bus_info->dev, "Cmd exec timed-out (opcode=%x(%s),opcode_mod=%x,in_mod=%x)\n",
1269 opcode, mlxsw_cmd_opcode_str(opcode), opcode_mod,
1270 in_mod);
1273 if (!err && out_mbox) {
1274 dev_dbg(mlxsw_core->bus_info->dev, "Output mailbox:\n");
1275 mlxsw_core_buf_dump_dbg(mlxsw_core, out_mbox, out_mbox_size);
1277 return err;
1279 EXPORT_SYMBOL(mlxsw_cmd_exec);
1281 static int __init mlxsw_core_module_init(void)
1283 mlxsw_core_dbg_root = debugfs_create_dir(mlxsw_core_driver_name, NULL);
1284 if (!mlxsw_core_dbg_root)
1285 return -ENOMEM;
1286 return 0;
1289 static void __exit mlxsw_core_module_exit(void)
1291 debugfs_remove_recursive(mlxsw_core_dbg_root);
1294 module_init(mlxsw_core_module_init);
1295 module_exit(mlxsw_core_module_exit);
1297 MODULE_LICENSE("Dual BSD/GPL");
1298 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
1299 MODULE_DESCRIPTION("Mellanox switch device core driver");