1 // SPDX-License-Identifier: GPL-2.0+
2 /* Microchip Sparx5 Switch driver
4 * Copyright (c) 2021 Microchip Technology Inc. and its subsidiaries.
6 * The Sparx5 Chip Register Model can be browsed at this location:
7 * https://github.com/microchip-ung/sparx-5_reginfo
10 #include <linux/types.h>
11 #include <linux/skbuff.h>
12 #include <linux/netdevice.h>
13 #include <linux/interrupt.h>
15 #include <linux/dma-mapping.h>
17 #include "sparx5_main_regs.h"
18 #include "sparx5_main.h"
19 #include "sparx5_port.h"
21 #define FDMA_XTR_CHANNEL 6
22 #define FDMA_INJ_CHANNEL 0
24 #define FDMA_XTR_BUFFER_SIZE 2048
27 static int sparx5_fdma_tx_dataptr_cb(struct fdma
*fdma
, int dcb
, int db
,
30 *dataptr
= fdma
->dma
+ (sizeof(struct fdma_dcb
) * fdma
->n_dcbs
) +
31 ((dcb
* fdma
->n_dbs
+ db
) * fdma
->db_size
);
36 static int sparx5_fdma_rx_dataptr_cb(struct fdma
*fdma
, int dcb
, int db
,
39 struct sparx5
*sparx5
= fdma
->priv
;
40 struct sparx5_rx
*rx
= &sparx5
->rx
;
43 skb
= __netdev_alloc_skb(rx
->ndev
, fdma
->db_size
, GFP_ATOMIC
);
47 *dataptr
= virt_to_phys(skb
->data
);
49 rx
->skb
[dcb
][db
] = skb
;
54 static void sparx5_fdma_rx_activate(struct sparx5
*sparx5
, struct sparx5_rx
*rx
)
56 struct fdma
*fdma
= &rx
->fdma
;
58 /* Write the buffer address in the LLP and LLP1 regs */
59 spx5_wr(((u64
)fdma
->dma
) & GENMASK(31, 0), sparx5
,
60 FDMA_DCB_LLP(fdma
->channel_id
));
61 spx5_wr(((u64
)fdma
->dma
) >> 32, sparx5
,
62 FDMA_DCB_LLP1(fdma
->channel_id
));
64 /* Set the number of RX DBs to be used, and DB end-of-frame interrupt */
65 spx5_wr(FDMA_CH_CFG_CH_DCB_DB_CNT_SET(fdma
->n_dbs
) |
66 FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY_SET(1) |
67 FDMA_CH_CFG_CH_INJ_PORT_SET(XTR_QUEUE
),
68 sparx5
, FDMA_CH_CFG(fdma
->channel_id
));
70 /* Set the RX Watermark to max */
71 spx5_rmw(FDMA_XTR_CFG_XTR_FIFO_WM_SET(31), FDMA_XTR_CFG_XTR_FIFO_WM
,
76 spx5_rmw(FDMA_PORT_CTRL_XTR_STOP_SET(0), FDMA_PORT_CTRL_XTR_STOP
,
77 sparx5
, FDMA_PORT_CTRL(0));
79 /* Enable RX channel DB interrupt */
80 spx5_rmw(BIT(fdma
->channel_id
),
81 BIT(fdma
->channel_id
) & FDMA_INTR_DB_ENA_INTR_DB_ENA
,
82 sparx5
, FDMA_INTR_DB_ENA
);
84 /* Activate the RX channel */
85 spx5_wr(BIT(fdma
->channel_id
), sparx5
, FDMA_CH_ACTIVATE
);
88 static void sparx5_fdma_rx_deactivate(struct sparx5
*sparx5
, struct sparx5_rx
*rx
)
90 struct fdma
*fdma
= &rx
->fdma
;
92 /* Deactivate the RX channel */
93 spx5_rmw(0, BIT(fdma
->channel_id
) & FDMA_CH_ACTIVATE_CH_ACTIVATE
,
94 sparx5
, FDMA_CH_ACTIVATE
);
96 /* Disable RX channel DB interrupt */
97 spx5_rmw(0, BIT(fdma
->channel_id
) & FDMA_INTR_DB_ENA_INTR_DB_ENA
,
98 sparx5
, FDMA_INTR_DB_ENA
);
101 spx5_rmw(FDMA_PORT_CTRL_XTR_STOP_SET(1), FDMA_PORT_CTRL_XTR_STOP
,
102 sparx5
, FDMA_PORT_CTRL(0));
105 static void sparx5_fdma_tx_activate(struct sparx5
*sparx5
, struct sparx5_tx
*tx
)
107 struct fdma
*fdma
= &tx
->fdma
;
109 /* Write the buffer address in the LLP and LLP1 regs */
110 spx5_wr(((u64
)fdma
->dma
) & GENMASK(31, 0), sparx5
,
111 FDMA_DCB_LLP(fdma
->channel_id
));
112 spx5_wr(((u64
)fdma
->dma
) >> 32, sparx5
,
113 FDMA_DCB_LLP1(fdma
->channel_id
));
115 /* Set the number of TX DBs to be used, and DB end-of-frame interrupt */
116 spx5_wr(FDMA_CH_CFG_CH_DCB_DB_CNT_SET(fdma
->n_dbs
) |
117 FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY_SET(1) |
118 FDMA_CH_CFG_CH_INJ_PORT_SET(INJ_QUEUE
),
119 sparx5
, FDMA_CH_CFG(fdma
->channel_id
));
122 spx5_rmw(FDMA_PORT_CTRL_INJ_STOP_SET(0), FDMA_PORT_CTRL_INJ_STOP
,
123 sparx5
, FDMA_PORT_CTRL(0));
125 /* Activate the channel */
126 spx5_wr(BIT(fdma
->channel_id
), sparx5
, FDMA_CH_ACTIVATE
);
129 static void sparx5_fdma_tx_deactivate(struct sparx5
*sparx5
, struct sparx5_tx
*tx
)
131 /* Disable the channel */
132 spx5_rmw(0, BIT(tx
->fdma
.channel_id
) & FDMA_CH_ACTIVATE_CH_ACTIVATE
,
133 sparx5
, FDMA_CH_ACTIVATE
);
136 static void sparx5_fdma_reload(struct sparx5
*sparx5
, struct fdma
*fdma
)
138 /* Reload the RX channel */
139 spx5_wr(BIT(fdma
->channel_id
), sparx5
, FDMA_CH_RELOAD
);
142 static bool sparx5_fdma_rx_get_frame(struct sparx5
*sparx5
, struct sparx5_rx
*rx
)
144 struct fdma
*fdma
= &rx
->fdma
;
145 struct sparx5_port
*port
;
146 struct fdma_db
*db_hw
;
147 struct frame_info fi
;
150 /* Check if the DCB is done */
151 db_hw
= fdma_db_next_get(fdma
);
152 if (unlikely(!fdma_db_is_done(db_hw
)))
154 skb
= rx
->skb
[fdma
->dcb_index
][fdma
->db_index
];
155 skb_put(skb
, fdma_db_len_get(db_hw
));
156 /* Now do the normal processing of the skb */
157 sparx5_ifh_parse(sparx5
, (u32
*)skb
->data
, &fi
);
158 /* Map to port netdev */
159 port
= fi
.src_port
< sparx5
->data
->consts
->n_ports
?
160 sparx5
->ports
[fi
.src_port
] :
162 if (!port
|| !port
->ndev
) {
163 dev_err(sparx5
->dev
, "Data on inactive port %d\n", fi
.src_port
);
164 sparx5_xtr_flush(sparx5
, XTR_QUEUE
);
167 skb
->dev
= port
->ndev
;
168 skb_pull(skb
, IFH_LEN
* sizeof(u32
));
169 if (likely(!(skb
->dev
->features
& NETIF_F_RXFCS
)))
170 skb_trim(skb
, skb
->len
- ETH_FCS_LEN
);
172 sparx5_ptp_rxtstamp(sparx5
, skb
, fi
.timestamp
);
173 skb
->protocol
= eth_type_trans(skb
, skb
->dev
);
174 /* Everything we see on an interface that is in the HW bridge
175 * has already been forwarded
177 if (test_bit(port
->portno
, sparx5
->bridge_mask
))
178 skb
->offload_fwd_mark
= 1;
179 skb
->dev
->stats
.rx_bytes
+= skb
->len
;
180 skb
->dev
->stats
.rx_packets
++;
182 netif_receive_skb(skb
);
186 static int sparx5_fdma_napi_callback(struct napi_struct
*napi
, int weight
)
188 struct sparx5_rx
*rx
= container_of(napi
, struct sparx5_rx
, napi
);
189 struct sparx5
*sparx5
= container_of(rx
, struct sparx5
, rx
);
190 struct fdma
*fdma
= &rx
->fdma
;
193 while (counter
< weight
&& sparx5_fdma_rx_get_frame(sparx5
, rx
)) {
194 fdma_db_advance(fdma
);
196 /* Check if the DCB can be reused */
197 if (fdma_dcb_is_reusable(fdma
))
199 fdma_dcb_add(fdma
, fdma
->dcb_index
,
200 FDMA_DCB_INFO_DATAL(fdma
->db_size
),
201 FDMA_DCB_STATUS_INTR
);
203 fdma_dcb_advance(fdma
);
205 if (counter
< weight
) {
206 napi_complete_done(&rx
->napi
, counter
);
207 spx5_rmw(BIT(fdma
->channel_id
),
208 BIT(fdma
->channel_id
) & FDMA_INTR_DB_ENA_INTR_DB_ENA
,
209 sparx5
, FDMA_INTR_DB_ENA
);
212 sparx5_fdma_reload(sparx5
, fdma
);
216 int sparx5_fdma_xmit(struct sparx5
*sparx5
, u32
*ifh
, struct sk_buff
*skb
)
218 struct sparx5_tx
*tx
= &sparx5
->tx
;
219 struct fdma
*fdma
= &tx
->fdma
;
220 static bool first_time
= true;
223 fdma_dcb_advance(fdma
);
224 if (!fdma_db_is_done(fdma_db_get(fdma
, fdma
->dcb_index
, 0)))
227 /* Get the virtual address of the dataptr for the next DB */
228 virt_addr
= ((u8
*)fdma
->dcbs
+
229 (sizeof(struct fdma_dcb
) * fdma
->n_dcbs
) +
230 ((fdma
->dcb_index
* fdma
->n_dbs
) * fdma
->db_size
));
232 memcpy(virt_addr
, ifh
, IFH_LEN
* 4);
233 memcpy(virt_addr
+ IFH_LEN
* 4, skb
->data
, skb
->len
);
235 fdma_dcb_add(fdma
, fdma
->dcb_index
, 0,
236 FDMA_DCB_STATUS_SOF
|
237 FDMA_DCB_STATUS_EOF
|
238 FDMA_DCB_STATUS_BLOCKO(0) |
239 FDMA_DCB_STATUS_BLOCKL(skb
->len
+ IFH_LEN
* 4 + 4));
242 sparx5_fdma_tx_activate(sparx5
, tx
);
245 sparx5_fdma_reload(sparx5
, fdma
);
250 static int sparx5_fdma_rx_alloc(struct sparx5
*sparx5
)
252 struct sparx5_rx
*rx
= &sparx5
->rx
;
253 struct fdma
*fdma
= &rx
->fdma
;
256 err
= fdma_alloc_phys(fdma
);
260 fdma_dcbs_init(fdma
, FDMA_DCB_INFO_DATAL(fdma
->db_size
),
261 FDMA_DCB_STATUS_INTR
);
263 netif_napi_add_weight(rx
->ndev
, &rx
->napi
, sparx5_fdma_napi_callback
,
265 napi_enable(&rx
->napi
);
266 sparx5_fdma_rx_activate(sparx5
, rx
);
270 static int sparx5_fdma_tx_alloc(struct sparx5
*sparx5
)
272 struct sparx5_tx
*tx
= &sparx5
->tx
;
273 struct fdma
*fdma
= &tx
->fdma
;
276 err
= fdma_alloc_phys(fdma
);
280 fdma_dcbs_init(fdma
, FDMA_DCB_INFO_DATAL(fdma
->db_size
),
281 FDMA_DCB_STATUS_DONE
);
286 static void sparx5_fdma_rx_init(struct sparx5
*sparx5
,
287 struct sparx5_rx
*rx
, int channel
)
289 struct fdma
*fdma
= &rx
->fdma
;
292 fdma
->channel_id
= channel
;
293 fdma
->n_dcbs
= FDMA_DCB_MAX
;
294 fdma
->n_dbs
= FDMA_RX_DCB_MAX_DBS
;
296 fdma
->db_size
= ALIGN(FDMA_XTR_BUFFER_SIZE
, PAGE_SIZE
);
297 fdma
->size
= fdma_get_size(&sparx5
->rx
.fdma
);
298 fdma
->ops
.dataptr_cb
= &sparx5_fdma_rx_dataptr_cb
;
299 fdma
->ops
.nextptr_cb
= &fdma_nextptr_cb
;
300 /* Fetch a netdev for SKB and NAPI use, any will do */
301 for (idx
= 0; idx
< sparx5
->data
->consts
->n_ports
; ++idx
) {
302 struct sparx5_port
*port
= sparx5
->ports
[idx
];
304 if (port
&& port
->ndev
) {
305 rx
->ndev
= port
->ndev
;
311 static void sparx5_fdma_tx_init(struct sparx5
*sparx5
,
312 struct sparx5_tx
*tx
, int channel
)
314 struct fdma
*fdma
= &tx
->fdma
;
316 fdma
->channel_id
= channel
;
317 fdma
->n_dcbs
= FDMA_DCB_MAX
;
318 fdma
->n_dbs
= FDMA_TX_DCB_MAX_DBS
;
320 fdma
->db_size
= ALIGN(FDMA_XTR_BUFFER_SIZE
, PAGE_SIZE
);
321 fdma
->size
= fdma_get_size_contiguous(&sparx5
->tx
.fdma
);
322 fdma
->ops
.dataptr_cb
= &sparx5_fdma_tx_dataptr_cb
;
323 fdma
->ops
.nextptr_cb
= &fdma_nextptr_cb
;
326 irqreturn_t
sparx5_fdma_handler(int irq
, void *args
)
328 struct sparx5
*sparx5
= args
;
331 db
= spx5_rd(sparx5
, FDMA_INTR_DB
);
332 err
= spx5_rd(sparx5
, FDMA_INTR_ERR
);
333 /* Clear interrupt */
335 spx5_wr(0, sparx5
, FDMA_INTR_DB_ENA
);
336 spx5_wr(db
, sparx5
, FDMA_INTR_DB
);
337 napi_schedule(&sparx5
->rx
.napi
);
340 u32 err_type
= spx5_rd(sparx5
, FDMA_ERRORS
);
342 dev_err_ratelimited(sparx5
->dev
,
343 "ERR: int: %#x, type: %#x\n",
345 spx5_wr(err
, sparx5
, FDMA_INTR_ERR
);
346 spx5_wr(err_type
, sparx5
, FDMA_ERRORS
);
351 static void sparx5_fdma_injection_mode(struct sparx5
*sparx5
)
353 const int byte_swap
= 1;
357 /* Change mode to fdma extraction and injection */
358 spx5_wr(QS_XTR_GRP_CFG_MODE_SET(2) |
359 QS_XTR_GRP_CFG_STATUS_WORD_POS_SET(1) |
360 QS_XTR_GRP_CFG_BYTE_SWAP_SET(byte_swap
),
361 sparx5
, QS_XTR_GRP_CFG(XTR_QUEUE
));
362 spx5_wr(QS_INJ_GRP_CFG_MODE_SET(2) |
363 QS_INJ_GRP_CFG_BYTE_SWAP_SET(byte_swap
),
364 sparx5
, QS_INJ_GRP_CFG(INJ_QUEUE
));
366 /* CPU ports capture setup */
367 for (portno
= sparx5_get_internal_port(sparx5
, SPX5_PORT_CPU_0
);
368 portno
<= sparx5_get_internal_port(sparx5
, SPX5_PORT_CPU_1
);
370 /* ASM CPU port: No preamble, IFH, enable padding */
371 spx5_wr(ASM_PORT_CFG_PAD_ENA_SET(1) |
372 ASM_PORT_CFG_NO_PREAMBLE_ENA_SET(1) |
373 ASM_PORT_CFG_INJ_FORMAT_CFG_SET(1), /* 1 = IFH */
374 sparx5
, ASM_PORT_CFG(portno
));
376 /* Reset WM cnt to unclog queued frames */
377 spx5_rmw(DSM_DEV_TX_STOP_WM_CFG_DEV_TX_CNT_CLR_SET(1),
378 DSM_DEV_TX_STOP_WM_CFG_DEV_TX_CNT_CLR
,
380 DSM_DEV_TX_STOP_WM_CFG(portno
));
382 /* Set Disassembler Stop Watermark level */
383 spx5_rmw(DSM_DEV_TX_STOP_WM_CFG_DEV_TX_STOP_WM_SET(100),
384 DSM_DEV_TX_STOP_WM_CFG_DEV_TX_STOP_WM
,
386 DSM_DEV_TX_STOP_WM_CFG(portno
));
388 /* Enable port in queue system */
389 urgency
= sparx5_port_fwd_urg(sparx5
, SPEED_2500
);
390 spx5_rmw(QFWD_SWITCH_PORT_MODE_PORT_ENA_SET(1) |
391 QFWD_SWITCH_PORT_MODE_FWD_URGENCY_SET(urgency
),
392 QFWD_SWITCH_PORT_MODE_PORT_ENA
|
393 QFWD_SWITCH_PORT_MODE_FWD_URGENCY
,
395 QFWD_SWITCH_PORT_MODE(portno
));
397 /* Disable Disassembler buffer underrun watchdog
398 * to avoid truncated packets in XTR
400 spx5_rmw(DSM_BUF_CFG_UNDERFLOW_WATCHDOG_DIS_SET(1),
401 DSM_BUF_CFG_UNDERFLOW_WATCHDOG_DIS
,
403 DSM_BUF_CFG(portno
));
405 /* Disabling frame aging */
406 spx5_rmw(HSCH_PORT_MODE_AGE_DIS_SET(1),
407 HSCH_PORT_MODE_AGE_DIS
,
409 HSCH_PORT_MODE(portno
));
413 int sparx5_fdma_start(struct sparx5
*sparx5
)
417 /* Reset FDMA state */
418 spx5_wr(FDMA_CTRL_NRESET_SET(0), sparx5
, FDMA_CTRL
);
419 spx5_wr(FDMA_CTRL_NRESET_SET(1), sparx5
, FDMA_CTRL
);
421 /* Force ACP caching but disable read/write allocation */
422 spx5_rmw(CPU_PROC_CTRL_ACP_CACHE_FORCE_ENA_SET(1) |
423 CPU_PROC_CTRL_ACP_AWCACHE_SET(0) |
424 CPU_PROC_CTRL_ACP_ARCACHE_SET(0),
425 CPU_PROC_CTRL_ACP_CACHE_FORCE_ENA
|
426 CPU_PROC_CTRL_ACP_AWCACHE
|
427 CPU_PROC_CTRL_ACP_ARCACHE
,
428 sparx5
, CPU_PROC_CTRL
);
430 sparx5_fdma_injection_mode(sparx5
);
431 sparx5_fdma_rx_init(sparx5
, &sparx5
->rx
, FDMA_XTR_CHANNEL
);
432 sparx5_fdma_tx_init(sparx5
, &sparx5
->tx
, FDMA_INJ_CHANNEL
);
433 err
= sparx5_fdma_rx_alloc(sparx5
);
435 dev_err(sparx5
->dev
, "Could not allocate RX buffers: %d\n", err
);
438 err
= sparx5_fdma_tx_alloc(sparx5
);
440 dev_err(sparx5
->dev
, "Could not allocate TX buffers: %d\n", err
);
446 static u32
sparx5_fdma_port_ctrl(struct sparx5
*sparx5
)
448 return spx5_rd(sparx5
, FDMA_PORT_CTRL(0));
451 int sparx5_fdma_stop(struct sparx5
*sparx5
)
455 napi_disable(&sparx5
->rx
.napi
);
456 /* Stop the fdma and channel interrupts */
457 sparx5_fdma_rx_deactivate(sparx5
, &sparx5
->rx
);
458 sparx5_fdma_tx_deactivate(sparx5
, &sparx5
->tx
);
459 /* Wait for the RX channel to stop */
460 read_poll_timeout(sparx5_fdma_port_ctrl
, val
,
461 FDMA_PORT_CTRL_XTR_BUF_IS_EMPTY_GET(val
) == 0,
462 500, 10000, 0, sparx5
);
463 fdma_free_phys(&sparx5
->rx
.fdma
);
464 fdma_free_phys(&sparx5
->tx
.fdma
);