1 // SPDX-License-Identifier: GPL-2.0-only
2 /*******************************************************************************
3 This contains the functions to handle the enhanced descriptors.
5 Copyright (C) 2007-2014 STMicroelectronics Ltd
8 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
9 *******************************************************************************/
11 #include <linux/stmmac.h>
13 #include "descs_com.h"
15 static int enh_desc_get_tx_status(void *data
, struct stmmac_extra_stats
*x
,
16 struct dma_desc
*p
, void __iomem
*ioaddr
)
18 struct net_device_stats
*stats
= (struct net_device_stats
*)data
;
19 unsigned int tdes0
= le32_to_cpu(p
->des0
);
22 /* Get tx owner first */
23 if (unlikely(tdes0
& ETDES0_OWN
))
26 /* Verify tx error by looking at the last segment. */
27 if (likely(!(tdes0
& ETDES0_LAST_SEGMENT
)))
30 if (unlikely(tdes0
& ETDES0_ERROR_SUMMARY
)) {
31 if (unlikely(tdes0
& ETDES0_JABBER_TIMEOUT
))
34 if (unlikely(tdes0
& ETDES0_FRAME_FLUSHED
)) {
35 x
->tx_frame_flushed
++;
36 dwmac_dma_flush_tx_fifo(ioaddr
);
39 if (unlikely(tdes0
& ETDES0_LOSS_CARRIER
)) {
41 stats
->tx_carrier_errors
++;
43 if (unlikely(tdes0
& ETDES0_NO_CARRIER
)) {
45 stats
->tx_carrier_errors
++;
47 if (unlikely((tdes0
& ETDES0_LATE_COLLISION
) ||
48 (tdes0
& ETDES0_EXCESSIVE_COLLISIONS
)))
50 (tdes0
& ETDES0_COLLISION_COUNT_MASK
) >> 3;
52 if (unlikely(tdes0
& ETDES0_EXCESSIVE_DEFERRAL
))
55 if (unlikely(tdes0
& ETDES0_UNDERFLOW_ERROR
)) {
56 dwmac_dma_flush_tx_fifo(ioaddr
);
60 if (unlikely(tdes0
& ETDES0_IP_HEADER_ERROR
))
61 x
->tx_ip_header_error
++;
63 if (unlikely(tdes0
& ETDES0_PAYLOAD_ERROR
)) {
64 x
->tx_payload_error
++;
65 dwmac_dma_flush_tx_fifo(ioaddr
);
71 if (unlikely(tdes0
& ETDES0_DEFERRED
))
74 #ifdef STMMAC_VLAN_TAG_USED
75 if (tdes0
& ETDES0_VLAN_FRAME
)
82 static int enh_desc_get_tx_len(struct dma_desc
*p
)
84 return (le32_to_cpu(p
->des1
) & ETDES1_BUFFER1_SIZE_MASK
);
87 static int enh_desc_coe_rdes0(int ipc_err
, int type
, int payload_err
)
90 u32 status
= (type
<< 2 | ipc_err
<< 1 | payload_err
) & 0x7;
92 /* bits 5 7 0 | Frame status
93 * ----------------------------------------------------------
94 * 0 0 0 | IEEE 802.3 Type frame (length < 1536 octects)
95 * 1 0 0 | IPv4/6 No CSUM errorS.
96 * 1 0 1 | IPv4/6 CSUM PAYLOAD error
97 * 1 1 0 | IPv4/6 CSUM IP HR error
98 * 1 1 1 | IPv4/6 IP PAYLOAD AND HEADER errorS
99 * 0 0 1 | IPv4/6 unsupported IP PAYLOAD
100 * 0 1 1 | COE bypassed.. no IPv4/6 frame
105 else if (status
== 0x4)
107 else if (status
== 0x5)
109 else if (status
== 0x6)
111 else if (status
== 0x7)
113 else if (status
== 0x1)
115 else if (status
== 0x3)
120 static void enh_desc_get_ext_status(void *data
, struct stmmac_extra_stats
*x
,
121 struct dma_extended_desc
*p
)
123 unsigned int rdes0
= le32_to_cpu(p
->basic
.des0
);
124 unsigned int rdes4
= le32_to_cpu(p
->des4
);
126 if (unlikely(rdes0
& ERDES0_RX_MAC_ADDR
)) {
127 int message_type
= (rdes4
& ERDES4_MSG_TYPE_MASK
) >> 8;
129 if (rdes4
& ERDES4_IP_HDR_ERR
)
131 if (rdes4
& ERDES4_IP_PAYLOAD_ERR
)
133 if (rdes4
& ERDES4_IP_CSUM_BYPASSED
)
134 x
->ip_csum_bypassed
++;
135 if (rdes4
& ERDES4_IPV4_PKT_RCVD
)
137 if (rdes4
& ERDES4_IPV6_PKT_RCVD
)
140 if (message_type
== RDES_EXT_NO_PTP
)
141 x
->no_ptp_rx_msg_type_ext
++;
142 else if (message_type
== RDES_EXT_SYNC
)
143 x
->ptp_rx_msg_type_sync
++;
144 else if (message_type
== RDES_EXT_FOLLOW_UP
)
145 x
->ptp_rx_msg_type_follow_up
++;
146 else if (message_type
== RDES_EXT_DELAY_REQ
)
147 x
->ptp_rx_msg_type_delay_req
++;
148 else if (message_type
== RDES_EXT_DELAY_RESP
)
149 x
->ptp_rx_msg_type_delay_resp
++;
150 else if (message_type
== RDES_EXT_PDELAY_REQ
)
151 x
->ptp_rx_msg_type_pdelay_req
++;
152 else if (message_type
== RDES_EXT_PDELAY_RESP
)
153 x
->ptp_rx_msg_type_pdelay_resp
++;
154 else if (message_type
== RDES_EXT_PDELAY_FOLLOW_UP
)
155 x
->ptp_rx_msg_type_pdelay_follow_up
++;
156 else if (message_type
== RDES_PTP_ANNOUNCE
)
157 x
->ptp_rx_msg_type_announce
++;
158 else if (message_type
== RDES_PTP_MANAGEMENT
)
159 x
->ptp_rx_msg_type_management
++;
160 else if (message_type
== RDES_PTP_PKT_RESERVED_TYPE
)
161 x
->ptp_rx_msg_pkt_reserved_type
++;
163 if (rdes4
& ERDES4_PTP_FRAME_TYPE
)
165 if (rdes4
& ERDES4_PTP_VER
)
167 if (rdes4
& ERDES4_TIMESTAMP_DROPPED
)
168 x
->timestamp_dropped
++;
169 if (rdes4
& ERDES4_AV_PKT_RCVD
)
171 if (rdes4
& ERDES4_AV_TAGGED_PKT_RCVD
)
172 x
->av_tagged_pkt_rcvd
++;
173 if ((rdes4
& ERDES4_VLAN_TAG_PRI_VAL_MASK
) >> 18)
174 x
->vlan_tag_priority_val
++;
175 if (rdes4
& ERDES4_L3_FILTER_MATCH
)
176 x
->l3_filter_match
++;
177 if (rdes4
& ERDES4_L4_FILTER_MATCH
)
178 x
->l4_filter_match
++;
179 if ((rdes4
& ERDES4_L3_L4_FILT_NO_MATCH_MASK
) >> 26)
180 x
->l3_l4_filter_no_match
++;
184 static int enh_desc_get_rx_status(void *data
, struct stmmac_extra_stats
*x
,
187 struct net_device_stats
*stats
= (struct net_device_stats
*)data
;
188 unsigned int rdes0
= le32_to_cpu(p
->des0
);
189 int ret
= good_frame
;
191 if (unlikely(rdes0
& RDES0_OWN
))
194 if (unlikely(!(rdes0
& RDES0_LAST_DESCRIPTOR
))) {
195 stats
->rx_length_errors
++;
196 return discard_frame
;
199 if (unlikely(rdes0
& RDES0_ERROR_SUMMARY
)) {
200 if (unlikely(rdes0
& RDES0_DESCRIPTOR_ERROR
)) {
202 stats
->rx_length_errors
++;
204 if (unlikely(rdes0
& RDES0_OVERFLOW_ERROR
))
205 x
->rx_gmac_overflow
++;
207 if (unlikely(rdes0
& RDES0_IPC_CSUM_ERROR
))
208 pr_err("\tIPC Csum Error/Giant frame\n");
210 if (unlikely(rdes0
& RDES0_COLLISION
))
212 if (unlikely(rdes0
& RDES0_RECEIVE_WATCHDOG
))
215 if (unlikely(rdes0
& RDES0_MII_ERROR
)) /* GMII */
218 if (unlikely(rdes0
& RDES0_CRC_ERROR
)) {
220 stats
->rx_crc_errors
++;
225 /* After a payload csum error, the ES bit is set.
226 * It doesn't match with the information reported into the databook.
227 * At any rate, we need to understand if the CSUM hw computation is ok
228 * and report this info to the upper layers. */
229 if (likely(ret
== good_frame
))
230 ret
= enh_desc_coe_rdes0(!!(rdes0
& RDES0_IPC_CSUM_ERROR
),
231 !!(rdes0
& RDES0_FRAME_TYPE
),
232 !!(rdes0
& ERDES0_RX_MAC_ADDR
));
234 if (unlikely(rdes0
& RDES0_DRIBBLING
))
237 if (unlikely(rdes0
& RDES0_SA_FILTER_FAIL
)) {
238 x
->sa_rx_filter_fail
++;
241 if (unlikely(rdes0
& RDES0_DA_FILTER_FAIL
)) {
242 x
->da_rx_filter_fail
++;
245 if (unlikely(rdes0
& RDES0_LENGTH_ERROR
)) {
249 #ifdef STMMAC_VLAN_TAG_USED
250 if (rdes0
& RDES0_VLAN_TAG
)
257 static void enh_desc_init_rx_desc(struct dma_desc
*p
, int disable_rx_ic
,
258 int mode
, int end
, int bfsize
)
262 p
->des0
|= cpu_to_le32(RDES0_OWN
);
264 bfsize1
= min(bfsize
, BUF_SIZE_8KiB
);
265 p
->des1
|= cpu_to_le32(bfsize1
& ERDES1_BUFFER1_SIZE_MASK
);
267 if (mode
== STMMAC_CHAIN_MODE
)
268 ehn_desc_rx_set_on_chain(p
);
270 ehn_desc_rx_set_on_ring(p
, end
, bfsize
);
273 p
->des1
|= cpu_to_le32(ERDES1_DISABLE_IC
);
276 static void enh_desc_init_tx_desc(struct dma_desc
*p
, int mode
, int end
)
278 p
->des0
&= cpu_to_le32(~ETDES0_OWN
);
279 if (mode
== STMMAC_CHAIN_MODE
)
280 enh_desc_end_tx_desc_on_chain(p
);
282 enh_desc_end_tx_desc_on_ring(p
, end
);
285 static int enh_desc_get_tx_owner(struct dma_desc
*p
)
287 return (le32_to_cpu(p
->des0
) & ETDES0_OWN
) >> 31;
290 static void enh_desc_set_tx_owner(struct dma_desc
*p
)
292 p
->des0
|= cpu_to_le32(ETDES0_OWN
);
295 static void enh_desc_set_rx_owner(struct dma_desc
*p
, int disable_rx_ic
)
297 p
->des0
|= cpu_to_le32(RDES0_OWN
);
300 static int enh_desc_get_tx_ls(struct dma_desc
*p
)
302 return (le32_to_cpu(p
->des0
) & ETDES0_LAST_SEGMENT
) >> 29;
305 static void enh_desc_release_tx_desc(struct dma_desc
*p
, int mode
)
307 int ter
= (le32_to_cpu(p
->des0
) & ETDES0_END_RING
) >> 21;
309 memset(p
, 0, offsetof(struct dma_desc
, des2
));
310 if (mode
== STMMAC_CHAIN_MODE
)
311 enh_desc_end_tx_desc_on_chain(p
);
313 enh_desc_end_tx_desc_on_ring(p
, ter
);
316 static void enh_desc_prepare_tx_desc(struct dma_desc
*p
, int is_fs
, int len
,
317 bool csum_flag
, int mode
, bool tx_own
,
318 bool ls
, unsigned int tot_pkt_len
)
320 unsigned int tdes0
= le32_to_cpu(p
->des0
);
322 if (mode
== STMMAC_CHAIN_MODE
)
323 enh_set_tx_desc_len_on_chain(p
, len
);
325 enh_set_tx_desc_len_on_ring(p
, len
);
328 tdes0
|= ETDES0_FIRST_SEGMENT
;
330 tdes0
&= ~ETDES0_FIRST_SEGMENT
;
332 if (likely(csum_flag
))
333 tdes0
|= (TX_CIC_FULL
<< ETDES0_CHECKSUM_INSERTION_SHIFT
);
335 tdes0
&= ~(TX_CIC_FULL
<< ETDES0_CHECKSUM_INSERTION_SHIFT
);
338 tdes0
|= ETDES0_LAST_SEGMENT
;
340 /* Finally set the OWN bit. Later the DMA will start! */
345 /* When the own bit, for the first frame, has to be set, all
346 * descriptors for the same frame has to be set before, to
347 * avoid race condition.
351 p
->des0
= cpu_to_le32(tdes0
);
354 static void enh_desc_set_tx_ic(struct dma_desc
*p
)
356 p
->des0
|= cpu_to_le32(ETDES0_INTERRUPT
);
359 static int enh_desc_get_rx_frame_len(struct dma_desc
*p
, int rx_coe_type
)
361 unsigned int csum
= 0;
362 /* The type-1 checksum offload engines append the checksum at
363 * the end of frame and the two bytes of checksum are added in
365 * Adjust for that in the framelen for type-1 checksum offload
368 if (rx_coe_type
== STMMAC_RX_COE_TYPE1
)
371 return (((le32_to_cpu(p
->des0
) & RDES0_FRAME_LEN_MASK
)
372 >> RDES0_FRAME_LEN_SHIFT
) - csum
);
375 static void enh_desc_enable_tx_timestamp(struct dma_desc
*p
)
377 p
->des0
|= cpu_to_le32(ETDES0_TIME_STAMP_ENABLE
);
380 static int enh_desc_get_tx_timestamp_status(struct dma_desc
*p
)
382 return (le32_to_cpu(p
->des0
) & ETDES0_TIME_STAMP_STATUS
) >> 17;
385 static void enh_desc_get_timestamp(void *desc
, u32 ats
, u64
*ts
)
390 struct dma_extended_desc
*p
= (struct dma_extended_desc
*)desc
;
391 ns
= le32_to_cpu(p
->des6
);
392 /* convert high/sec time stamp value to nanosecond */
393 ns
+= le32_to_cpu(p
->des7
) * 1000000000ULL;
395 struct dma_desc
*p
= (struct dma_desc
*)desc
;
396 ns
= le32_to_cpu(p
->des2
);
397 ns
+= le32_to_cpu(p
->des3
) * 1000000000ULL;
403 static int enh_desc_get_rx_timestamp_status(void *desc
, void *next_desc
,
407 struct dma_extended_desc
*p
= (struct dma_extended_desc
*)desc
;
408 return (le32_to_cpu(p
->basic
.des0
) & RDES0_IPC_CSUM_ERROR
) >> 7;
410 struct dma_desc
*p
= (struct dma_desc
*)desc
;
411 if ((le32_to_cpu(p
->des2
) == 0xffffffff) &&
412 (le32_to_cpu(p
->des3
) == 0xffffffff))
413 /* timestamp is corrupted, hence don't store it */
420 static void enh_desc_display_ring(void *head
, unsigned int size
, bool rx
)
422 struct dma_extended_desc
*ep
= (struct dma_extended_desc
*)head
;
425 pr_info("Extended %s descriptor ring:\n", rx
? "RX" : "TX");
427 for (i
= 0; i
< size
; i
++) {
431 pr_info("%03d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
432 i
, (unsigned int)virt_to_phys(ep
),
433 (unsigned int)x
, (unsigned int)(x
>> 32),
434 ep
->basic
.des2
, ep
->basic
.des3
);
440 static void enh_desc_get_addr(struct dma_desc
*p
, unsigned int *addr
)
442 *addr
= le32_to_cpu(p
->des2
);
445 static void enh_desc_set_addr(struct dma_desc
*p
, dma_addr_t addr
)
447 p
->des2
= cpu_to_le32(addr
);
450 static void enh_desc_clear(struct dma_desc
*p
)
455 const struct stmmac_desc_ops enh_desc_ops
= {
456 .tx_status
= enh_desc_get_tx_status
,
457 .rx_status
= enh_desc_get_rx_status
,
458 .get_tx_len
= enh_desc_get_tx_len
,
459 .init_rx_desc
= enh_desc_init_rx_desc
,
460 .init_tx_desc
= enh_desc_init_tx_desc
,
461 .get_tx_owner
= enh_desc_get_tx_owner
,
462 .release_tx_desc
= enh_desc_release_tx_desc
,
463 .prepare_tx_desc
= enh_desc_prepare_tx_desc
,
464 .set_tx_ic
= enh_desc_set_tx_ic
,
465 .get_tx_ls
= enh_desc_get_tx_ls
,
466 .set_tx_owner
= enh_desc_set_tx_owner
,
467 .set_rx_owner
= enh_desc_set_rx_owner
,
468 .get_rx_frame_len
= enh_desc_get_rx_frame_len
,
469 .rx_extended_status
= enh_desc_get_ext_status
,
470 .enable_tx_timestamp
= enh_desc_enable_tx_timestamp
,
471 .get_tx_timestamp_status
= enh_desc_get_tx_timestamp_status
,
472 .get_timestamp
= enh_desc_get_timestamp
,
473 .get_rx_timestamp_status
= enh_desc_get_rx_timestamp_status
,
474 .display_ring
= enh_desc_display_ring
,
475 .get_addr
= enh_desc_get_addr
,
476 .set_addr
= enh_desc_set_addr
,
477 .clear
= enh_desc_clear
,