1 // SPDX-License-Identifier: GPL-2.0-only
2 /*******************************************************************************
3 This contains the functions to handle the normal descriptors.
5 Copyright (C) 2007-2009 STMicroelectronics Ltd
8 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
9 *******************************************************************************/
11 #include <linux/stmmac.h>
13 #include "descs_com.h"
15 static int ndesc_get_tx_status(void *data
, struct stmmac_extra_stats
*x
,
16 struct dma_desc
*p
, void __iomem
*ioaddr
)
18 struct net_device_stats
*stats
= (struct net_device_stats
*)data
;
19 unsigned int tdes0
= le32_to_cpu(p
->des0
);
20 unsigned int tdes1
= le32_to_cpu(p
->des1
);
23 /* Get tx owner first */
24 if (unlikely(tdes0
& TDES0_OWN
))
27 /* Verify tx error by looking at the last segment. */
28 if (likely(!(tdes1
& TDES1_LAST_SEGMENT
)))
31 if (unlikely(tdes0
& TDES0_ERROR_SUMMARY
)) {
32 if (unlikely(tdes0
& TDES0_UNDERFLOW_ERROR
)) {
34 stats
->tx_fifo_errors
++;
36 if (unlikely(tdes0
& TDES0_NO_CARRIER
)) {
38 stats
->tx_carrier_errors
++;
40 if (unlikely(tdes0
& TDES0_LOSS_CARRIER
)) {
42 stats
->tx_carrier_errors
++;
44 if (unlikely((tdes0
& TDES0_EXCESSIVE_DEFERRAL
) ||
45 (tdes0
& TDES0_EXCESSIVE_COLLISIONS
) ||
46 (tdes0
& TDES0_LATE_COLLISION
))) {
47 unsigned int collisions
;
49 collisions
= (tdes0
& TDES0_COLLISION_COUNT_MASK
) >> 3;
50 stats
->collisions
+= collisions
;
55 if (tdes0
& TDES0_VLAN_FRAME
)
58 if (unlikely(tdes0
& TDES0_DEFERRED
))
64 static int ndesc_get_tx_len(struct dma_desc
*p
)
66 return (le32_to_cpu(p
->des1
) & RDES1_BUFFER1_SIZE_MASK
);
69 /* This function verifies if each incoming frame has some errors
70 * and, if required, updates the multicast statistics.
71 * In case of success, it returns good_frame because the GMAC device
72 * is supposed to be able to compute the csum in HW. */
73 static int ndesc_get_rx_status(void *data
, struct stmmac_extra_stats
*x
,
77 unsigned int rdes0
= le32_to_cpu(p
->des0
);
78 struct net_device_stats
*stats
= (struct net_device_stats
*)data
;
80 if (unlikely(rdes0
& RDES0_OWN
))
83 if (unlikely(!(rdes0
& RDES0_LAST_DESCRIPTOR
))) {
84 stats
->rx_length_errors
++;
88 if (unlikely(rdes0
& RDES0_ERROR_SUMMARY
)) {
89 if (unlikely(rdes0
& RDES0_DESCRIPTOR_ERROR
))
91 if (unlikely(rdes0
& RDES0_SA_FILTER_FAIL
))
93 if (unlikely(rdes0
& RDES0_OVERFLOW_ERROR
))
95 if (unlikely(rdes0
& RDES0_IPC_CSUM_ERROR
))
97 if (unlikely(rdes0
& RDES0_COLLISION
)) {
101 if (unlikely(rdes0
& RDES0_CRC_ERROR
)) {
103 stats
->rx_crc_errors
++;
107 if (unlikely(rdes0
& RDES0_DRIBBLING
))
110 if (unlikely(rdes0
& RDES0_LENGTH_ERROR
)) {
114 if (unlikely(rdes0
& RDES0_MII_ERROR
)) {
118 #ifdef STMMAC_VLAN_TAG_USED
119 if (rdes0
& RDES0_VLAN_TAG
)
125 static void ndesc_init_rx_desc(struct dma_desc
*p
, int disable_rx_ic
, int mode
,
130 p
->des0
|= cpu_to_le32(RDES0_OWN
);
132 bfsize1
= min(bfsize
, BUF_SIZE_2KiB
- 1);
133 p
->des1
|= cpu_to_le32(bfsize1
& RDES1_BUFFER1_SIZE_MASK
);
135 if (mode
== STMMAC_CHAIN_MODE
)
136 ndesc_rx_set_on_chain(p
, end
);
138 ndesc_rx_set_on_ring(p
, end
, bfsize
);
141 p
->des1
|= cpu_to_le32(RDES1_DISABLE_IC
);
144 static void ndesc_init_tx_desc(struct dma_desc
*p
, int mode
, int end
)
146 p
->des0
&= cpu_to_le32(~TDES0_OWN
);
147 if (mode
== STMMAC_CHAIN_MODE
)
148 ndesc_tx_set_on_chain(p
);
150 ndesc_end_tx_desc_on_ring(p
, end
);
153 static int ndesc_get_tx_owner(struct dma_desc
*p
)
155 return (le32_to_cpu(p
->des0
) & TDES0_OWN
) >> 31;
158 static void ndesc_set_tx_owner(struct dma_desc
*p
)
160 p
->des0
|= cpu_to_le32(TDES0_OWN
);
163 static void ndesc_set_rx_owner(struct dma_desc
*p
, int disable_rx_ic
)
165 p
->des0
|= cpu_to_le32(RDES0_OWN
);
168 static int ndesc_get_tx_ls(struct dma_desc
*p
)
170 return (le32_to_cpu(p
->des1
) & TDES1_LAST_SEGMENT
) >> 30;
173 static void ndesc_release_tx_desc(struct dma_desc
*p
, int mode
)
175 int ter
= (le32_to_cpu(p
->des1
) & TDES1_END_RING
) >> 25;
177 memset(p
, 0, offsetof(struct dma_desc
, des2
));
178 if (mode
== STMMAC_CHAIN_MODE
)
179 ndesc_tx_set_on_chain(p
);
181 ndesc_end_tx_desc_on_ring(p
, ter
);
184 static void ndesc_prepare_tx_desc(struct dma_desc
*p
, int is_fs
, int len
,
185 bool csum_flag
, int mode
, bool tx_own
,
186 bool ls
, unsigned int tot_pkt_len
)
188 unsigned int tdes1
= le32_to_cpu(p
->des1
);
191 tdes1
|= TDES1_FIRST_SEGMENT
;
193 tdes1
&= ~TDES1_FIRST_SEGMENT
;
195 if (likely(csum_flag
))
196 tdes1
|= (TX_CIC_FULL
) << TDES1_CHECKSUM_INSERTION_SHIFT
;
198 tdes1
&= ~(TX_CIC_FULL
<< TDES1_CHECKSUM_INSERTION_SHIFT
);
201 tdes1
|= TDES1_LAST_SEGMENT
;
203 p
->des1
= cpu_to_le32(tdes1
);
205 if (mode
== STMMAC_CHAIN_MODE
)
206 norm_set_tx_desc_len_on_chain(p
, len
);
208 norm_set_tx_desc_len_on_ring(p
, len
);
211 p
->des0
|= cpu_to_le32(TDES0_OWN
);
214 static void ndesc_set_tx_ic(struct dma_desc
*p
)
216 p
->des1
|= cpu_to_le32(TDES1_INTERRUPT
);
219 static int ndesc_get_rx_frame_len(struct dma_desc
*p
, int rx_coe_type
)
221 unsigned int csum
= 0;
223 /* The type-1 checksum offload engines append the checksum at
224 * the end of frame and the two bytes of checksum are added in
226 * Adjust for that in the framelen for type-1 checksum offload
229 if (rx_coe_type
== STMMAC_RX_COE_TYPE1
)
232 return (((le32_to_cpu(p
->des0
) & RDES0_FRAME_LEN_MASK
)
233 >> RDES0_FRAME_LEN_SHIFT
) -
238 static void ndesc_enable_tx_timestamp(struct dma_desc
*p
)
240 p
->des1
|= cpu_to_le32(TDES1_TIME_STAMP_ENABLE
);
243 static int ndesc_get_tx_timestamp_status(struct dma_desc
*p
)
245 return (le32_to_cpu(p
->des0
) & TDES0_TIME_STAMP_STATUS
) >> 17;
248 static void ndesc_get_timestamp(void *desc
, u32 ats
, u64
*ts
)
250 struct dma_desc
*p
= (struct dma_desc
*)desc
;
253 ns
= le32_to_cpu(p
->des2
);
254 /* convert high/sec time stamp value to nanosecond */
255 ns
+= le32_to_cpu(p
->des3
) * 1000000000ULL;
260 static int ndesc_get_rx_timestamp_status(void *desc
, void *next_desc
, u32 ats
)
262 struct dma_desc
*p
= (struct dma_desc
*)desc
;
264 if ((le32_to_cpu(p
->des2
) == 0xffffffff) &&
265 (le32_to_cpu(p
->des3
) == 0xffffffff))
266 /* timestamp is corrupted, hence don't store it */
272 static void ndesc_display_ring(void *head
, unsigned int size
, bool rx
)
274 struct dma_desc
*p
= (struct dma_desc
*)head
;
277 pr_info("%s descriptor ring:\n", rx
? "RX" : "TX");
279 for (i
= 0; i
< size
; i
++) {
283 pr_info("%03d [0x%x]: 0x%x 0x%x 0x%x 0x%x",
284 i
, (unsigned int)virt_to_phys(p
),
285 (unsigned int)x
, (unsigned int)(x
>> 32),
292 static void ndesc_get_addr(struct dma_desc
*p
, unsigned int *addr
)
294 *addr
= le32_to_cpu(p
->des2
);
297 static void ndesc_set_addr(struct dma_desc
*p
, dma_addr_t addr
)
299 p
->des2
= cpu_to_le32(addr
);
302 static void ndesc_clear(struct dma_desc
*p
)
307 const struct stmmac_desc_ops ndesc_ops
= {
308 .tx_status
= ndesc_get_tx_status
,
309 .rx_status
= ndesc_get_rx_status
,
310 .get_tx_len
= ndesc_get_tx_len
,
311 .init_rx_desc
= ndesc_init_rx_desc
,
312 .init_tx_desc
= ndesc_init_tx_desc
,
313 .get_tx_owner
= ndesc_get_tx_owner
,
314 .release_tx_desc
= ndesc_release_tx_desc
,
315 .prepare_tx_desc
= ndesc_prepare_tx_desc
,
316 .set_tx_ic
= ndesc_set_tx_ic
,
317 .get_tx_ls
= ndesc_get_tx_ls
,
318 .set_tx_owner
= ndesc_set_tx_owner
,
319 .set_rx_owner
= ndesc_set_rx_owner
,
320 .get_rx_frame_len
= ndesc_get_rx_frame_len
,
321 .enable_tx_timestamp
= ndesc_enable_tx_timestamp
,
322 .get_tx_timestamp_status
= ndesc_get_tx_timestamp_status
,
323 .get_timestamp
= ndesc_get_timestamp
,
324 .get_rx_timestamp_status
= ndesc_get_rx_timestamp_status
,
325 .display_ring
= ndesc_display_ring
,
326 .get_addr
= ndesc_get_addr
,
327 .set_addr
= ndesc_set_addr
,
328 .clear
= ndesc_clear
,