1 // SPDX-License-Identifier: GPL-2.0+
2 /* Microchip Sparx5 Switch driver
4 * Copyright (c) 2021 Microchip Technology Inc. and its subsidiaries.
7 #include "sparx5_main_regs.h"
8 #include "sparx5_main.h"
10 #define XTR_EOF_0 ntohl((__force __be32)0x80000000u)
11 #define XTR_EOF_1 ntohl((__force __be32)0x80000001u)
12 #define XTR_EOF_2 ntohl((__force __be32)0x80000002u)
13 #define XTR_EOF_3 ntohl((__force __be32)0x80000003u)
14 #define XTR_PRUNED ntohl((__force __be32)0x80000004u)
15 #define XTR_ABORT ntohl((__force __be32)0x80000005u)
16 #define XTR_ESCAPE ntohl((__force __be32)0x80000006u)
17 #define XTR_NOT_READY ntohl((__force __be32)0x80000007u)
19 #define XTR_VALID_BYTES(x) (4 - ((x) & 3))
21 #define INJ_TIMEOUT_NS 50000
23 void sparx5_xtr_flush(struct sparx5
*sparx5
, u8 grp
)
26 spx5_wr(QS_XTR_FLUSH_FLUSH_SET(BIT(grp
)), sparx5
, QS_XTR_FLUSH
);
31 /* All Queues normal */
32 spx5_wr(0, sparx5
, QS_XTR_FLUSH
);
35 void sparx5_ifh_parse(struct sparx5
*sparx5
, u32
*ifh
, struct frame_info
*info
)
37 u8
*xtr_hdr
= (u8
*)ifh
;
39 /* FWD is bit 45-72 (28 bits), but we only read the 27 LSB for now */
41 ((u32
)xtr_hdr
[27] << 24) |
42 ((u32
)xtr_hdr
[28] << 16) |
43 ((u32
)xtr_hdr
[29] << 8) |
44 ((u32
)xtr_hdr
[30] << 0);
46 info
->src_port
= spx5_field_get(GENMASK(is_sparx5(sparx5
) ? 7 : 6, 1),
50 * Bit 270-271 are occasionally unexpectedly set by the hardware,
51 * clear bits before extracting timestamp
54 ((u64
)(xtr_hdr
[2] & GENMASK(5, 0)) << 24) |
55 ((u64
)xtr_hdr
[3] << 16) |
56 ((u64
)xtr_hdr
[4] << 8) |
57 ((u64
)xtr_hdr
[5] << 0);
60 static void sparx5_xtr_grp(struct sparx5
*sparx5
, u8 grp
, bool byte_swap
)
62 bool eof_flag
= false, pruned_flag
= false, abort_flag
= false;
63 struct net_device
*netdev
;
64 struct sparx5_port
*port
;
72 for (i
= 0; i
< IFH_LEN
; i
++)
73 ifh
[i
] = spx5_rd(sparx5
, QS_XTR_RD(grp
));
75 /* Decode IFH (what's needed) */
76 sparx5_ifh_parse(sparx5
, ifh
, &fi
);
78 /* Map to port netdev */
79 port
= fi
.src_port
< sparx5
->data
->consts
->n_ports
?
80 sparx5
->ports
[fi
.src_port
] : NULL
;
81 if (!port
|| !port
->ndev
) {
82 dev_err(sparx5
->dev
, "Data on inactive port %d\n", fi
.src_port
);
83 sparx5_xtr_flush(sparx5
, grp
);
87 /* Have netdev, get skb */
89 skb
= netdev_alloc_skb(netdev
, netdev
->mtu
+ ETH_HLEN
);
91 sparx5_xtr_flush(sparx5
, grp
);
92 dev_err(sparx5
->dev
, "No skb allocated\n");
93 netdev
->stats
.rx_dropped
++;
96 rxbuf
= (u32
*)skb
->data
;
98 /* Now, pull frame data */
100 u32 val
= spx5_rd(sparx5
, QS_XTR_RD(grp
));
104 cmp
= ntohl((__force __be32
)val
);
110 /* No accompanying data */
118 /* This assumes STATUS_WORD_POS == 1, Status
119 * just after last data
122 val
= ntohl((__force __be32
)val
);
123 byte_cnt
-= (4 - XTR_VALID_BYTES(val
));
127 /* But get the last 4 bytes as well */
132 *rxbuf
= spx5_rd(sparx5
, QS_XTR_RD(grp
));
143 if (abort_flag
|| pruned_flag
|| !eof_flag
) {
144 netdev_err(netdev
, "Discarded frame: abort:%d pruned:%d eof:%d\n",
145 abort_flag
, pruned_flag
, eof_flag
);
147 netdev
->stats
.rx_dropped
++;
151 /* Everything we see on an interface that is in the HW bridge
152 * has already been forwarded
154 if (test_bit(port
->portno
, sparx5
->bridge_mask
))
155 skb
->offload_fwd_mark
= 1;
158 skb_put(skb
, byte_cnt
- ETH_FCS_LEN
);
160 sparx5_ptp_rxtstamp(sparx5
, skb
, fi
.timestamp
);
161 skb
->protocol
= eth_type_trans(skb
, netdev
);
162 netdev
->stats
.rx_bytes
+= skb
->len
;
163 netdev
->stats
.rx_packets
++;
167 static int sparx5_inject(struct sparx5
*sparx5
,
170 struct net_device
*ndev
)
176 val
= spx5_rd(sparx5
, QS_INJ_STATUS
);
177 if (!(QS_INJ_STATUS_FIFO_RDY_GET(val
) & BIT(grp
))) {
178 pr_err_ratelimited("Injection: Queue not ready: 0x%lx\n",
179 QS_INJ_STATUS_FIFO_RDY_GET(val
));
184 spx5_wr(QS_INJ_CTRL_SOF_SET(1) |
185 QS_INJ_CTRL_GAP_SIZE_SET(1),
186 sparx5
, QS_INJ_CTRL(grp
));
188 /* Write the IFH to the chip. */
189 for (w
= 0; w
< IFH_LEN
; w
++)
190 spx5_wr(ifh
[w
], sparx5
, QS_INJ_WR(grp
));
192 /* Write words, round up */
193 count
= DIV_ROUND_UP(skb
->len
, 4);
195 for (w
= 0; w
< count
; w
++, buf
+= 4) {
196 val
= get_unaligned((const u32
*)buf
);
197 spx5_wr(val
, sparx5
, QS_INJ_WR(grp
));
201 while (w
< (60 / 4)) {
202 spx5_wr(0, sparx5
, QS_INJ_WR(grp
));
206 /* Indicate EOF and valid bytes in last word */
207 spx5_wr(QS_INJ_CTRL_GAP_SIZE_SET(1) |
208 QS_INJ_CTRL_VLD_BYTES_SET(skb
->len
< 60 ? 0 : skb
->len
% 4) |
209 QS_INJ_CTRL_EOF_SET(1),
210 sparx5
, QS_INJ_CTRL(grp
));
213 spx5_wr(0, sparx5
, QS_INJ_WR(grp
));
216 val
= spx5_rd(sparx5
, QS_INJ_STATUS
);
217 if (QS_INJ_STATUS_WMARK_REACHED_GET(val
) & BIT(grp
)) {
218 struct sparx5_port
*port
= netdev_priv(ndev
);
220 pr_err_ratelimited("Injection: Watermark reached: 0x%lx\n",
221 QS_INJ_STATUS_WMARK_REACHED_GET(val
));
222 netif_stop_queue(ndev
);
223 hrtimer_start(&port
->inj_timer
, INJ_TIMEOUT_NS
,
230 netdev_tx_t
sparx5_port_xmit_impl(struct sk_buff
*skb
, struct net_device
*dev
)
232 struct net_device_stats
*stats
= &dev
->stats
;
233 struct sparx5_port
*port
= netdev_priv(dev
);
234 struct sparx5
*sparx5
= port
->sparx5
;
238 memset(ifh
, 0, IFH_LEN
* 4);
239 sparx5_set_port_ifh(sparx5
, ifh
, port
->portno
);
241 if (sparx5
->ptp
&& skb_shinfo(skb
)->tx_flags
& SKBTX_HW_TSTAMP
) {
242 if (sparx5_ptp_txtstamp_request(port
, skb
) < 0)
243 return NETDEV_TX_BUSY
;
245 sparx5_set_port_ifh_rew_op(ifh
, SPARX5_SKB_CB(skb
)->rew_op
);
246 sparx5_set_port_ifh_pdu_type(sparx5
, ifh
,
247 SPARX5_SKB_CB(skb
)->pdu_type
);
248 sparx5_set_port_ifh_pdu_w16_offset(sparx5
, ifh
,
249 SPARX5_SKB_CB(skb
)->pdu_w16_offset
);
250 sparx5_set_port_ifh_timestamp(sparx5
, ifh
,
251 SPARX5_SKB_CB(skb
)->ts_id
);
254 skb_tx_timestamp(skb
);
255 spin_lock(&sparx5
->tx_lock
);
256 if (sparx5
->fdma_irq
> 0)
257 ret
= sparx5_fdma_xmit(sparx5
, ifh
, skb
);
259 ret
= sparx5_inject(sparx5
, ifh
, skb
, dev
);
260 spin_unlock(&sparx5
->tx_lock
);
267 stats
->tx_bytes
+= skb
->len
;
269 sparx5
->tx
.packets
++;
271 if (skb_shinfo(skb
)->tx_flags
& SKBTX_HW_TSTAMP
&&
272 SPARX5_SKB_CB(skb
)->rew_op
== IFH_REW_OP_TWO_STEP_PTP
)
275 dev_consume_skb_any(skb
);
279 sparx5
->tx
.dropped
++;
280 dev_kfree_skb_any(skb
);
283 if (skb_shinfo(skb
)->tx_flags
& SKBTX_HW_TSTAMP
&&
284 SPARX5_SKB_CB(skb
)->rew_op
== IFH_REW_OP_TWO_STEP_PTP
)
285 sparx5_ptp_txtstamp_release(port
, skb
);
286 return NETDEV_TX_BUSY
;
289 static enum hrtimer_restart
sparx5_injection_timeout(struct hrtimer
*tmr
)
291 struct sparx5_port
*port
= container_of(tmr
, struct sparx5_port
,
296 val
= spx5_rd(port
->sparx5
, QS_INJ_STATUS
);
297 if (QS_INJ_STATUS_WMARK_REACHED_GET(val
) & BIT(grp
)) {
298 pr_err_ratelimited("Injection: Reset watermark count\n");
299 /* Reset Watermark count to restart */
300 spx5_rmw(DSM_DEV_TX_STOP_WM_CFG_DEV_TX_CNT_CLR_SET(1),
301 DSM_DEV_TX_STOP_WM_CFG_DEV_TX_CNT_CLR
,
303 DSM_DEV_TX_STOP_WM_CFG(port
->portno
));
305 netif_wake_queue(port
->ndev
);
306 return HRTIMER_NORESTART
;
309 int sparx5_manual_injection_mode(struct sparx5
*sparx5
)
311 const int byte_swap
= 1;
314 /* Change mode to manual extraction and injection */
315 spx5_wr(QS_XTR_GRP_CFG_MODE_SET(1) |
316 QS_XTR_GRP_CFG_STATUS_WORD_POS_SET(1) |
317 QS_XTR_GRP_CFG_BYTE_SWAP_SET(byte_swap
),
318 sparx5
, QS_XTR_GRP_CFG(XTR_QUEUE
));
319 spx5_wr(QS_INJ_GRP_CFG_MODE_SET(1) |
320 QS_INJ_GRP_CFG_BYTE_SWAP_SET(byte_swap
),
321 sparx5
, QS_INJ_GRP_CFG(INJ_QUEUE
));
323 /* CPU ports capture setup */
324 for (portno
= sparx5_get_internal_port(sparx5
, SPX5_PORT_CPU_0
);
325 portno
<= sparx5_get_internal_port(sparx5
, SPX5_PORT_CPU_1
);
327 /* ASM CPU port: No preamble, IFH, enable padding */
328 spx5_wr(ASM_PORT_CFG_PAD_ENA_SET(1) |
329 ASM_PORT_CFG_NO_PREAMBLE_ENA_SET(1) |
330 ASM_PORT_CFG_INJ_FORMAT_CFG_SET(1), /* 1 = IFH */
331 sparx5
, ASM_PORT_CFG(portno
));
333 /* Reset WM cnt to unclog queued frames */
334 spx5_rmw(DSM_DEV_TX_STOP_WM_CFG_DEV_TX_CNT_CLR_SET(1),
335 DSM_DEV_TX_STOP_WM_CFG_DEV_TX_CNT_CLR
,
337 DSM_DEV_TX_STOP_WM_CFG(portno
));
339 /* Set Disassembler Stop Watermark level */
340 spx5_rmw(DSM_DEV_TX_STOP_WM_CFG_DEV_TX_STOP_WM_SET(0),
341 DSM_DEV_TX_STOP_WM_CFG_DEV_TX_STOP_WM
,
343 DSM_DEV_TX_STOP_WM_CFG(portno
));
345 /* Enable Disassembler buffer underrun watchdog
347 spx5_rmw(DSM_BUF_CFG_UNDERFLOW_WATCHDOG_DIS_SET(0),
348 DSM_BUF_CFG_UNDERFLOW_WATCHDOG_DIS
,
350 DSM_BUF_CFG(portno
));
355 irqreturn_t
sparx5_xtr_handler(int irq
, void *_sparx5
)
357 struct sparx5
*s5
= _sparx5
;
360 /* Check data in queue */
361 while (spx5_rd(s5
, QS_XTR_DATA_PRESENT
) & BIT(XTR_QUEUE
) && poll
-- > 0)
362 sparx5_xtr_grp(s5
, XTR_QUEUE
, false);
367 void sparx5_port_inj_timer_setup(struct sparx5_port
*port
)
369 hrtimer_init(&port
->inj_timer
, CLOCK_MONOTONIC
, HRTIMER_MODE_REL
);
370 port
->inj_timer
.function
= sparx5_injection_timeout
;