1 // SPDX-License-Identifier: GPL-2.0-only
3 * This contains the functions to handle the descriptors for DesignWare databook
6 * Copyright (C) 2015 STMicroelectronics Ltd
8 * Author: Alexandre Torgue <alexandre.torgue@st.com>
11 #include <linux/stmmac.h>
14 #include "dwmac4_descs.h"
16 static int dwmac4_wrback_get_tx_status(void *data
, struct stmmac_extra_stats
*x
,
20 struct net_device_stats
*stats
= (struct net_device_stats
*)data
;
24 tdes3
= le32_to_cpu(p
->des3
);
26 /* Get tx owner first */
27 if (unlikely(tdes3
& TDES3_OWN
))
30 /* Verify tx error by looking at the last segment. */
31 if (likely(!(tdes3
& TDES3_LAST_DESCRIPTOR
)))
34 if (unlikely(tdes3
& TDES3_ERROR_SUMMARY
)) {
35 if (unlikely(tdes3
& TDES3_JABBER_TIMEOUT
))
37 if (unlikely(tdes3
& TDES3_PACKET_FLUSHED
))
38 x
->tx_frame_flushed
++;
39 if (unlikely(tdes3
& TDES3_LOSS_CARRIER
)) {
41 stats
->tx_carrier_errors
++;
43 if (unlikely(tdes3
& TDES3_NO_CARRIER
)) {
45 stats
->tx_carrier_errors
++;
47 if (unlikely((tdes3
& TDES3_LATE_COLLISION
) ||
48 (tdes3
& TDES3_EXCESSIVE_COLLISION
)))
50 (tdes3
& TDES3_COLLISION_COUNT_MASK
)
51 >> TDES3_COLLISION_COUNT_SHIFT
;
53 if (unlikely(tdes3
& TDES3_EXCESSIVE_DEFERRAL
))
56 if (unlikely(tdes3
& TDES3_UNDERFLOW_ERROR
))
59 if (unlikely(tdes3
& TDES3_IP_HDR_ERROR
))
60 x
->tx_ip_header_error
++;
62 if (unlikely(tdes3
& TDES3_PAYLOAD_ERROR
))
63 x
->tx_payload_error
++;
68 if (unlikely(tdes3
& TDES3_DEFERRED
))
74 static int dwmac4_wrback_get_rx_status(void *data
, struct stmmac_extra_stats
*x
,
77 struct net_device_stats
*stats
= (struct net_device_stats
*)data
;
78 unsigned int rdes1
= le32_to_cpu(p
->des1
);
79 unsigned int rdes2
= le32_to_cpu(p
->des2
);
80 unsigned int rdes3
= le32_to_cpu(p
->des3
);
84 if (unlikely(rdes3
& RDES3_OWN
))
87 if (unlikely(rdes3
& RDES3_CONTEXT_DESCRIPTOR
))
89 if (likely(!(rdes3
& RDES3_LAST_DESCRIPTOR
)))
92 if (unlikely(rdes3
& RDES3_ERROR_SUMMARY
)) {
93 if (unlikely(rdes3
& RDES3_GIANT_PACKET
))
94 stats
->rx_length_errors
++;
95 if (unlikely(rdes3
& RDES3_OVERFLOW_ERROR
))
96 x
->rx_gmac_overflow
++;
98 if (unlikely(rdes3
& RDES3_RECEIVE_WATCHDOG
))
101 if (unlikely(rdes3
& RDES3_RECEIVE_ERROR
))
104 if (unlikely(rdes3
& RDES3_CRC_ERROR
)) {
106 stats
->rx_crc_errors
++;
109 if (unlikely(rdes3
& RDES3_DRIBBLE_ERROR
))
115 message_type
= (rdes1
& ERDES4_MSG_TYPE_MASK
) >> 8;
117 if (rdes1
& RDES1_IP_HDR_ERROR
)
119 if (rdes1
& RDES1_IP_CSUM_BYPASSED
)
120 x
->ip_csum_bypassed
++;
121 if (rdes1
& RDES1_IPV4_HEADER
)
123 if (rdes1
& RDES1_IPV6_HEADER
)
126 if (message_type
== RDES_EXT_NO_PTP
)
127 x
->no_ptp_rx_msg_type_ext
++;
128 else if (message_type
== RDES_EXT_SYNC
)
129 x
->ptp_rx_msg_type_sync
++;
130 else if (message_type
== RDES_EXT_FOLLOW_UP
)
131 x
->ptp_rx_msg_type_follow_up
++;
132 else if (message_type
== RDES_EXT_DELAY_REQ
)
133 x
->ptp_rx_msg_type_delay_req
++;
134 else if (message_type
== RDES_EXT_DELAY_RESP
)
135 x
->ptp_rx_msg_type_delay_resp
++;
136 else if (message_type
== RDES_EXT_PDELAY_REQ
)
137 x
->ptp_rx_msg_type_pdelay_req
++;
138 else if (message_type
== RDES_EXT_PDELAY_RESP
)
139 x
->ptp_rx_msg_type_pdelay_resp
++;
140 else if (message_type
== RDES_EXT_PDELAY_FOLLOW_UP
)
141 x
->ptp_rx_msg_type_pdelay_follow_up
++;
142 else if (message_type
== RDES_PTP_ANNOUNCE
)
143 x
->ptp_rx_msg_type_announce
++;
144 else if (message_type
== RDES_PTP_MANAGEMENT
)
145 x
->ptp_rx_msg_type_management
++;
146 else if (message_type
== RDES_PTP_PKT_RESERVED_TYPE
)
147 x
->ptp_rx_msg_pkt_reserved_type
++;
149 if (rdes1
& RDES1_PTP_PACKET_TYPE
)
151 if (rdes1
& RDES1_PTP_VER
)
153 if (rdes1
& RDES1_TIMESTAMP_DROPPED
)
154 x
->timestamp_dropped
++;
156 if (unlikely(rdes2
& RDES2_SA_FILTER_FAIL
)) {
157 x
->sa_rx_filter_fail
++;
160 if (unlikely(rdes2
& RDES2_DA_FILTER_FAIL
)) {
161 x
->da_rx_filter_fail
++;
165 if (rdes2
& RDES2_L3_FILTER_MATCH
)
166 x
->l3_filter_match
++;
167 if (rdes2
& RDES2_L4_FILTER_MATCH
)
168 x
->l4_filter_match
++;
169 if ((rdes2
& RDES2_L3_L4_FILT_NB_MATCH_MASK
)
170 >> RDES2_L3_L4_FILT_NB_MATCH_SHIFT
)
171 x
->l3_l4_filter_no_match
++;
176 static int dwmac4_rd_get_tx_len(struct dma_desc
*p
)
178 return (le32_to_cpu(p
->des2
) & TDES2_BUFFER1_SIZE_MASK
);
181 static int dwmac4_get_tx_owner(struct dma_desc
*p
)
183 return (le32_to_cpu(p
->des3
) & TDES3_OWN
) >> TDES3_OWN_SHIFT
;
186 static void dwmac4_set_tx_owner(struct dma_desc
*p
)
188 p
->des3
|= cpu_to_le32(TDES3_OWN
);
191 static void dwmac4_set_rx_owner(struct dma_desc
*p
, int disable_rx_ic
)
193 p
->des3
|= cpu_to_le32(RDES3_OWN
| RDES3_BUFFER1_VALID_ADDR
);
196 p
->des3
|= cpu_to_le32(RDES3_INT_ON_COMPLETION_EN
);
199 static int dwmac4_get_tx_ls(struct dma_desc
*p
)
201 return (le32_to_cpu(p
->des3
) & TDES3_LAST_DESCRIPTOR
)
202 >> TDES3_LAST_DESCRIPTOR_SHIFT
;
205 static int dwmac4_wrback_get_rx_frame_len(struct dma_desc
*p
, int rx_coe
)
207 return (le32_to_cpu(p
->des3
) & RDES3_PACKET_SIZE_MASK
);
210 static void dwmac4_rd_enable_tx_timestamp(struct dma_desc
*p
)
212 p
->des2
|= cpu_to_le32(TDES2_TIMESTAMP_ENABLE
);
215 static int dwmac4_wrback_get_tx_timestamp_status(struct dma_desc
*p
)
217 /* Context type from W/B descriptor must be zero */
218 if (le32_to_cpu(p
->des3
) & TDES3_CONTEXT_TYPE
)
221 /* Tx Timestamp Status is 1 so des0 and des1'll have valid values */
222 if (le32_to_cpu(p
->des3
) & TDES3_TIMESTAMP_STATUS
)
228 static inline void dwmac4_get_timestamp(void *desc
, u32 ats
, u64
*ts
)
230 struct dma_desc
*p
= (struct dma_desc
*)desc
;
233 ns
= le32_to_cpu(p
->des0
);
234 /* convert high/sec time stamp value to nanosecond */
235 ns
+= le32_to_cpu(p
->des1
) * 1000000000ULL;
240 static int dwmac4_rx_check_timestamp(void *desc
)
242 struct dma_desc
*p
= (struct dma_desc
*)desc
;
243 unsigned int rdes0
= le32_to_cpu(p
->des0
);
244 unsigned int rdes1
= le32_to_cpu(p
->des1
);
245 unsigned int rdes3
= le32_to_cpu(p
->des3
);
249 own
= rdes3
& RDES3_OWN
;
250 ctxt
= ((rdes3
& RDES3_CONTEXT_DESCRIPTOR
)
251 >> RDES3_CONTEXT_DESCRIPTOR_SHIFT
);
253 if (likely(!own
&& ctxt
)) {
254 if ((rdes0
== 0xffffffff) && (rdes1
== 0xffffffff))
255 /* Corrupted value */
258 /* A valid Timestamp is ready to be read */
262 /* Timestamp not ready */
266 static int dwmac4_wrback_get_rx_timestamp_status(void *desc
, void *next_desc
,
269 struct dma_desc
*p
= (struct dma_desc
*)desc
;
272 /* Get the status from normal w/b descriptor */
273 if (likely(le32_to_cpu(p
->des3
) & RDES3_RDES1_VALID
)) {
274 if (likely(le32_to_cpu(p
->des1
) & RDES1_TIMESTAMP_AVAILABLE
)) {
277 /* Check if timestamp is OK from context descriptor */
279 ret
= dwmac4_rx_check_timestamp(next_desc
);
284 } while ((ret
== 1) && (i
< 10));
291 if (likely(ret
== 0))
297 static void dwmac4_rd_init_rx_desc(struct dma_desc
*p
, int disable_rx_ic
,
298 int mode
, int end
, int bfsize
)
300 dwmac4_set_rx_owner(p
, disable_rx_ic
);
303 static void dwmac4_rd_init_tx_desc(struct dma_desc
*p
, int mode
, int end
)
311 static void dwmac4_rd_prepare_tx_desc(struct dma_desc
*p
, int is_fs
, int len
,
312 bool csum_flag
, int mode
, bool tx_own
,
313 bool ls
, unsigned int tot_pkt_len
)
315 unsigned int tdes3
= le32_to_cpu(p
->des3
);
317 p
->des2
|= cpu_to_le32(len
& TDES2_BUFFER1_SIZE_MASK
);
319 tdes3
|= tot_pkt_len
& TDES3_PACKET_SIZE_MASK
;
321 tdes3
|= TDES3_FIRST_DESCRIPTOR
;
323 tdes3
&= ~TDES3_FIRST_DESCRIPTOR
;
325 if (likely(csum_flag
))
326 tdes3
|= (TX_CIC_FULL
<< TDES3_CHECKSUM_INSERTION_SHIFT
);
328 tdes3
&= ~(TX_CIC_FULL
<< TDES3_CHECKSUM_INSERTION_SHIFT
);
331 tdes3
|= TDES3_LAST_DESCRIPTOR
;
333 tdes3
&= ~TDES3_LAST_DESCRIPTOR
;
335 /* Finally set the OWN bit. Later the DMA will start! */
340 /* When the own bit, for the first frame, has to be set, all
341 * descriptors for the same frame has to be set before, to
342 * avoid race condition.
346 p
->des3
= cpu_to_le32(tdes3
);
349 static void dwmac4_rd_prepare_tso_tx_desc(struct dma_desc
*p
, int is_fs
,
350 int len1
, int len2
, bool tx_own
,
351 bool ls
, unsigned int tcphdrlen
,
352 unsigned int tcppayloadlen
)
354 unsigned int tdes3
= le32_to_cpu(p
->des3
);
357 p
->des2
|= cpu_to_le32((len1
& TDES2_BUFFER1_SIZE_MASK
));
360 p
->des2
|= cpu_to_le32((len2
<< TDES2_BUFFER2_SIZE_MASK_SHIFT
)
361 & TDES2_BUFFER2_SIZE_MASK
);
364 tdes3
|= TDES3_FIRST_DESCRIPTOR
|
365 TDES3_TCP_SEGMENTATION_ENABLE
|
366 ((tcphdrlen
<< TDES3_HDR_LEN_SHIFT
) &
367 TDES3_SLOT_NUMBER_MASK
) |
368 ((tcppayloadlen
& TDES3_TCP_PKT_PAYLOAD_MASK
));
370 tdes3
&= ~TDES3_FIRST_DESCRIPTOR
;
374 tdes3
|= TDES3_LAST_DESCRIPTOR
;
376 tdes3
&= ~TDES3_LAST_DESCRIPTOR
;
378 /* Finally set the OWN bit. Later the DMA will start! */
383 /* When the own bit, for the first frame, has to be set, all
384 * descriptors for the same frame has to be set before, to
385 * avoid race condition.
389 p
->des3
= cpu_to_le32(tdes3
);
392 static void dwmac4_release_tx_desc(struct dma_desc
*p
, int mode
)
400 static void dwmac4_rd_set_tx_ic(struct dma_desc
*p
)
402 p
->des2
|= cpu_to_le32(TDES2_INTERRUPT_ON_COMPLETION
);
405 static void dwmac4_display_ring(void *head
, unsigned int size
, bool rx
)
407 struct dma_desc
*p
= (struct dma_desc
*)head
;
410 pr_info("%s descriptor ring:\n", rx
? "RX" : "TX");
412 for (i
= 0; i
< size
; i
++) {
413 pr_info("%03d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
414 i
, (unsigned int)virt_to_phys(p
),
415 le32_to_cpu(p
->des0
), le32_to_cpu(p
->des1
),
416 le32_to_cpu(p
->des2
), le32_to_cpu(p
->des3
));
421 static void dwmac4_set_mss_ctxt(struct dma_desc
*p
, unsigned int mss
)
425 p
->des2
= cpu_to_le32(mss
);
426 p
->des3
= cpu_to_le32(TDES3_CONTEXT_TYPE
| TDES3_CTXT_TCMSSV
);
429 static void dwmac4_get_addr(struct dma_desc
*p
, unsigned int *addr
)
431 *addr
= le32_to_cpu(p
->des0
);
434 static void dwmac4_set_addr(struct dma_desc
*p
, dma_addr_t addr
)
436 p
->des0
= cpu_to_le32(lower_32_bits(addr
));
437 p
->des1
= cpu_to_le32(upper_32_bits(addr
));
440 static void dwmac4_clear(struct dma_desc
*p
)
448 static void dwmac4_set_sarc(struct dma_desc
*p
, u32 sarc_type
)
450 sarc_type
<<= TDES3_SA_INSERT_CTRL_SHIFT
;
452 p
->des3
|= cpu_to_le32(sarc_type
& TDES3_SA_INSERT_CTRL_MASK
);
455 static int set_16kib_bfsize(int mtu
)
459 if (unlikely(mtu
>= BUF_SIZE_8KiB
))
460 ret
= BUF_SIZE_16KiB
;
464 static void dwmac4_set_vlan_tag(struct dma_desc
*p
, u16 tag
, u16 inner_tag
,
474 u32 des
= inner_tag
<< TDES2_IVT_SHIFT
;
476 des
&= TDES2_IVT_MASK
;
477 p
->des2
= cpu_to_le32(des
);
479 des
= inner_type
<< TDES3_IVTIR_SHIFT
;
480 des
&= TDES3_IVTIR_MASK
;
481 p
->des3
= cpu_to_le32(des
| TDES3_IVLTV
);
485 p
->des3
|= cpu_to_le32(tag
& TDES3_VLAN_TAG
);
486 p
->des3
|= cpu_to_le32(TDES3_VLTV
);
488 p
->des3
|= cpu_to_le32(TDES3_CONTEXT_TYPE
);
491 static void dwmac4_set_vlan(struct dma_desc
*p
, u32 type
)
493 type
<<= TDES2_VLAN_TAG_SHIFT
;
494 p
->des2
|= cpu_to_le32(type
& TDES2_VLAN_TAG_MASK
);
497 static int dwmac4_get_rx_header_len(struct dma_desc
*p
, unsigned int *len
)
499 *len
= le32_to_cpu(p
->des2
) & RDES2_HL
;
503 static void dwmac4_set_sec_addr(struct dma_desc
*p
, dma_addr_t addr
)
505 p
->des2
= cpu_to_le32(lower_32_bits(addr
));
506 p
->des3
= cpu_to_le32(upper_32_bits(addr
) | RDES3_BUFFER2_VALID_ADDR
);
509 static void dwmac4_set_tbs(struct dma_edesc
*p
, u32 sec
, u32 nsec
)
511 p
->des4
= cpu_to_le32((sec
& TDES4_LT
) | TDES4_LTV
);
512 p
->des5
= cpu_to_le32(nsec
& TDES5_LT
);
517 const struct stmmac_desc_ops dwmac4_desc_ops
= {
518 .tx_status
= dwmac4_wrback_get_tx_status
,
519 .rx_status
= dwmac4_wrback_get_rx_status
,
520 .get_tx_len
= dwmac4_rd_get_tx_len
,
521 .get_tx_owner
= dwmac4_get_tx_owner
,
522 .set_tx_owner
= dwmac4_set_tx_owner
,
523 .set_rx_owner
= dwmac4_set_rx_owner
,
524 .get_tx_ls
= dwmac4_get_tx_ls
,
525 .get_rx_frame_len
= dwmac4_wrback_get_rx_frame_len
,
526 .enable_tx_timestamp
= dwmac4_rd_enable_tx_timestamp
,
527 .get_tx_timestamp_status
= dwmac4_wrback_get_tx_timestamp_status
,
528 .get_rx_timestamp_status
= dwmac4_wrback_get_rx_timestamp_status
,
529 .get_timestamp
= dwmac4_get_timestamp
,
530 .set_tx_ic
= dwmac4_rd_set_tx_ic
,
531 .prepare_tx_desc
= dwmac4_rd_prepare_tx_desc
,
532 .prepare_tso_tx_desc
= dwmac4_rd_prepare_tso_tx_desc
,
533 .release_tx_desc
= dwmac4_release_tx_desc
,
534 .init_rx_desc
= dwmac4_rd_init_rx_desc
,
535 .init_tx_desc
= dwmac4_rd_init_tx_desc
,
536 .display_ring
= dwmac4_display_ring
,
537 .set_mss
= dwmac4_set_mss_ctxt
,
538 .get_addr
= dwmac4_get_addr
,
539 .set_addr
= dwmac4_set_addr
,
540 .clear
= dwmac4_clear
,
541 .set_sarc
= dwmac4_set_sarc
,
542 .set_vlan_tag
= dwmac4_set_vlan_tag
,
543 .set_vlan
= dwmac4_set_vlan
,
544 .get_rx_header_len
= dwmac4_get_rx_header_len
,
545 .set_sec_addr
= dwmac4_set_sec_addr
,
546 .set_tbs
= dwmac4_set_tbs
,
549 const struct stmmac_mode_ops dwmac4_ring_mode_ops
= {
550 .set_16kib_bfsize
= set_16kib_bfsize
,