1 /*******************************************************************************
2 This contains the functions to handle the enhanced descriptors.
4 Copyright (C) 2007-2014 STMicroelectronics Ltd
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 The full GNU General Public License is included in this distribution in
16 the file called "COPYING".
18 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
19 *******************************************************************************/
21 #include <linux/stmmac.h>
23 #include "descs_com.h"
25 static int enh_desc_get_tx_status(void *data
, struct stmmac_extra_stats
*x
,
26 struct dma_desc
*p
, void __iomem
*ioaddr
)
28 struct net_device_stats
*stats
= (struct net_device_stats
*)data
;
29 unsigned int tdes0
= le32_to_cpu(p
->des0
);
32 /* Get tx owner first */
33 if (unlikely(tdes0
& ETDES0_OWN
))
36 /* Verify tx error by looking at the last segment. */
37 if (likely(!(tdes0
& ETDES0_LAST_SEGMENT
)))
40 if (unlikely(tdes0
& ETDES0_ERROR_SUMMARY
)) {
41 if (unlikely(tdes0
& ETDES0_JABBER_TIMEOUT
))
44 if (unlikely(tdes0
& ETDES0_FRAME_FLUSHED
)) {
45 x
->tx_frame_flushed
++;
46 dwmac_dma_flush_tx_fifo(ioaddr
);
49 if (unlikely(tdes0
& ETDES0_LOSS_CARRIER
)) {
51 stats
->tx_carrier_errors
++;
53 if (unlikely(tdes0
& ETDES0_NO_CARRIER
)) {
55 stats
->tx_carrier_errors
++;
57 if (unlikely((tdes0
& ETDES0_LATE_COLLISION
) ||
58 (tdes0
& ETDES0_EXCESSIVE_COLLISIONS
)))
60 (tdes0
& ETDES0_COLLISION_COUNT_MASK
) >> 3;
62 if (unlikely(tdes0
& ETDES0_EXCESSIVE_DEFERRAL
))
65 if (unlikely(tdes0
& ETDES0_UNDERFLOW_ERROR
)) {
66 dwmac_dma_flush_tx_fifo(ioaddr
);
70 if (unlikely(tdes0
& ETDES0_IP_HEADER_ERROR
))
71 x
->tx_ip_header_error
++;
73 if (unlikely(tdes0
& ETDES0_PAYLOAD_ERROR
)) {
74 x
->tx_payload_error
++;
75 dwmac_dma_flush_tx_fifo(ioaddr
);
81 if (unlikely(tdes0
& ETDES0_DEFERRED
))
84 #ifdef STMMAC_VLAN_TAG_USED
85 if (tdes0
& ETDES0_VLAN_FRAME
)
92 static int enh_desc_get_tx_len(struct dma_desc
*p
)
94 return (le32_to_cpu(p
->des1
) & ETDES1_BUFFER1_SIZE_MASK
);
97 static int enh_desc_coe_rdes0(int ipc_err
, int type
, int payload_err
)
100 u32 status
= (type
<< 2 | ipc_err
<< 1 | payload_err
) & 0x7;
102 /* bits 5 7 0 | Frame status
103 * ----------------------------------------------------------
104 * 0 0 0 | IEEE 802.3 Type frame (length < 1536 octects)
105 * 1 0 0 | IPv4/6 No CSUM errorS.
106 * 1 0 1 | IPv4/6 CSUM PAYLOAD error
107 * 1 1 0 | IPv4/6 CSUM IP HR error
108 * 1 1 1 | IPv4/6 IP PAYLOAD AND HEADER errorS
109 * 0 0 1 | IPv4/6 unsupported IP PAYLOAD
110 * 0 1 1 | COE bypassed.. no IPv4/6 frame
115 else if (status
== 0x4)
117 else if (status
== 0x5)
119 else if (status
== 0x6)
121 else if (status
== 0x7)
123 else if (status
== 0x1)
125 else if (status
== 0x3)
130 static void enh_desc_get_ext_status(void *data
, struct stmmac_extra_stats
*x
,
131 struct dma_extended_desc
*p
)
133 unsigned int rdes0
= le32_to_cpu(p
->basic
.des0
);
134 unsigned int rdes4
= le32_to_cpu(p
->des4
);
136 if (unlikely(rdes0
& ERDES0_RX_MAC_ADDR
)) {
137 int message_type
= (rdes4
& ERDES4_MSG_TYPE_MASK
) >> 8;
139 if (rdes4
& ERDES4_IP_HDR_ERR
)
141 if (rdes4
& ERDES4_IP_PAYLOAD_ERR
)
143 if (rdes4
& ERDES4_IP_CSUM_BYPASSED
)
144 x
->ip_csum_bypassed
++;
145 if (rdes4
& ERDES4_IPV4_PKT_RCVD
)
147 if (rdes4
& ERDES4_IPV6_PKT_RCVD
)
150 if (message_type
== RDES_EXT_NO_PTP
)
151 x
->no_ptp_rx_msg_type_ext
++;
152 else if (message_type
== RDES_EXT_SYNC
)
153 x
->ptp_rx_msg_type_sync
++;
154 else if (message_type
== RDES_EXT_FOLLOW_UP
)
155 x
->ptp_rx_msg_type_follow_up
++;
156 else if (message_type
== RDES_EXT_DELAY_REQ
)
157 x
->ptp_rx_msg_type_delay_req
++;
158 else if (message_type
== RDES_EXT_DELAY_RESP
)
159 x
->ptp_rx_msg_type_delay_resp
++;
160 else if (message_type
== RDES_EXT_PDELAY_REQ
)
161 x
->ptp_rx_msg_type_pdelay_req
++;
162 else if (message_type
== RDES_EXT_PDELAY_RESP
)
163 x
->ptp_rx_msg_type_pdelay_resp
++;
164 else if (message_type
== RDES_EXT_PDELAY_FOLLOW_UP
)
165 x
->ptp_rx_msg_type_pdelay_follow_up
++;
166 else if (message_type
== RDES_PTP_ANNOUNCE
)
167 x
->ptp_rx_msg_type_announce
++;
168 else if (message_type
== RDES_PTP_MANAGEMENT
)
169 x
->ptp_rx_msg_type_management
++;
170 else if (message_type
== RDES_PTP_PKT_RESERVED_TYPE
)
171 x
->ptp_rx_msg_pkt_reserved_type
++;
173 if (rdes4
& ERDES4_PTP_FRAME_TYPE
)
175 if (rdes4
& ERDES4_PTP_VER
)
177 if (rdes4
& ERDES4_TIMESTAMP_DROPPED
)
178 x
->timestamp_dropped
++;
179 if (rdes4
& ERDES4_AV_PKT_RCVD
)
181 if (rdes4
& ERDES4_AV_TAGGED_PKT_RCVD
)
182 x
->av_tagged_pkt_rcvd
++;
183 if ((rdes4
& ERDES4_VLAN_TAG_PRI_VAL_MASK
) >> 18)
184 x
->vlan_tag_priority_val
++;
185 if (rdes4
& ERDES4_L3_FILTER_MATCH
)
186 x
->l3_filter_match
++;
187 if (rdes4
& ERDES4_L4_FILTER_MATCH
)
188 x
->l4_filter_match
++;
189 if ((rdes4
& ERDES4_L3_L4_FILT_NO_MATCH_MASK
) >> 26)
190 x
->l3_l4_filter_no_match
++;
194 static int enh_desc_get_rx_status(void *data
, struct stmmac_extra_stats
*x
,
197 struct net_device_stats
*stats
= (struct net_device_stats
*)data
;
198 unsigned int rdes0
= le32_to_cpu(p
->des0
);
199 int ret
= good_frame
;
201 if (unlikely(rdes0
& RDES0_OWN
))
204 if (unlikely(rdes0
& RDES0_ERROR_SUMMARY
)) {
205 if (unlikely(rdes0
& RDES0_DESCRIPTOR_ERROR
)) {
207 stats
->rx_length_errors
++;
209 if (unlikely(rdes0
& RDES0_OVERFLOW_ERROR
))
210 x
->rx_gmac_overflow
++;
212 if (unlikely(rdes0
& RDES0_IPC_CSUM_ERROR
))
213 pr_err("\tIPC Csum Error/Giant frame\n");
215 if (unlikely(rdes0
& RDES0_COLLISION
))
217 if (unlikely(rdes0
& RDES0_RECEIVE_WATCHDOG
))
220 if (unlikely(rdes0
& RDES0_MII_ERROR
)) /* GMII */
223 if (unlikely(rdes0
& RDES0_CRC_ERROR
)) {
225 stats
->rx_crc_errors
++;
230 /* After a payload csum error, the ES bit is set.
231 * It doesn't match with the information reported into the databook.
232 * At any rate, we need to understand if the CSUM hw computation is ok
233 * and report this info to the upper layers. */
234 ret
= enh_desc_coe_rdes0(!!(rdes0
& RDES0_IPC_CSUM_ERROR
),
235 !!(rdes0
& RDES0_FRAME_TYPE
),
236 !!(rdes0
& ERDES0_RX_MAC_ADDR
));
238 if (unlikely(rdes0
& RDES0_DRIBBLING
))
241 if (unlikely(rdes0
& RDES0_SA_FILTER_FAIL
)) {
242 x
->sa_rx_filter_fail
++;
245 if (unlikely(rdes0
& RDES0_DA_FILTER_FAIL
)) {
246 x
->da_rx_filter_fail
++;
249 if (unlikely(rdes0
& RDES0_LENGTH_ERROR
)) {
253 #ifdef STMMAC_VLAN_TAG_USED
254 if (rdes0
& RDES0_VLAN_TAG
)
261 static void enh_desc_init_rx_desc(struct dma_desc
*p
, int disable_rx_ic
,
264 p
->des0
|= cpu_to_le32(RDES0_OWN
);
265 p
->des1
|= cpu_to_le32((BUF_SIZE_8KiB
- 1) & ERDES1_BUFFER1_SIZE_MASK
);
267 if (mode
== STMMAC_CHAIN_MODE
)
268 ehn_desc_rx_set_on_chain(p
);
270 ehn_desc_rx_set_on_ring(p
, end
);
273 p
->des1
|= cpu_to_le32(ERDES1_DISABLE_IC
);
276 static void enh_desc_init_tx_desc(struct dma_desc
*p
, int mode
, int end
)
278 p
->des0
&= cpu_to_le32(~ETDES0_OWN
);
279 if (mode
== STMMAC_CHAIN_MODE
)
280 enh_desc_end_tx_desc_on_chain(p
);
282 enh_desc_end_tx_desc_on_ring(p
, end
);
285 static int enh_desc_get_tx_owner(struct dma_desc
*p
)
287 return (le32_to_cpu(p
->des0
) & ETDES0_OWN
) >> 31;
290 static void enh_desc_set_tx_owner(struct dma_desc
*p
)
292 p
->des0
|= cpu_to_le32(ETDES0_OWN
);
295 static void enh_desc_set_rx_owner(struct dma_desc
*p
)
297 p
->des0
|= cpu_to_le32(RDES0_OWN
);
300 static int enh_desc_get_tx_ls(struct dma_desc
*p
)
302 return (le32_to_cpu(p
->des0
) & ETDES0_LAST_SEGMENT
) >> 29;
305 static void enh_desc_release_tx_desc(struct dma_desc
*p
, int mode
)
307 int ter
= (le32_to_cpu(p
->des0
) & ETDES0_END_RING
) >> 21;
309 memset(p
, 0, offsetof(struct dma_desc
, des2
));
310 if (mode
== STMMAC_CHAIN_MODE
)
311 enh_desc_end_tx_desc_on_chain(p
);
313 enh_desc_end_tx_desc_on_ring(p
, ter
);
316 static void enh_desc_prepare_tx_desc(struct dma_desc
*p
, int is_fs
, int len
,
317 bool csum_flag
, int mode
, bool tx_own
,
320 unsigned int tdes0
= le32_to_cpu(p
->des0
);
322 if (mode
== STMMAC_CHAIN_MODE
)
323 enh_set_tx_desc_len_on_chain(p
, len
);
325 enh_set_tx_desc_len_on_ring(p
, len
);
328 tdes0
|= ETDES0_FIRST_SEGMENT
;
330 tdes0
&= ~ETDES0_FIRST_SEGMENT
;
332 if (likely(csum_flag
))
333 tdes0
|= (TX_CIC_FULL
<< ETDES0_CHECKSUM_INSERTION_SHIFT
);
335 tdes0
&= ~(TX_CIC_FULL
<< ETDES0_CHECKSUM_INSERTION_SHIFT
);
338 tdes0
|= ETDES0_LAST_SEGMENT
;
340 /* Finally set the OWN bit. Later the DMA will start! */
345 /* When the own bit, for the first frame, has to be set, all
346 * descriptors for the same frame has to be set before, to
347 * avoid race condition.
351 p
->des0
= cpu_to_le32(tdes0
);
354 static void enh_desc_set_tx_ic(struct dma_desc
*p
)
356 p
->des0
|= cpu_to_le32(ETDES0_INTERRUPT
);
359 static int enh_desc_get_rx_frame_len(struct dma_desc
*p
, int rx_coe_type
)
361 unsigned int csum
= 0;
362 /* The type-1 checksum offload engines append the checksum at
363 * the end of frame and the two bytes of checksum are added in
365 * Adjust for that in the framelen for type-1 checksum offload
368 if (rx_coe_type
== STMMAC_RX_COE_TYPE1
)
371 return (((le32_to_cpu(p
->des0
) & RDES0_FRAME_LEN_MASK
)
372 >> RDES0_FRAME_LEN_SHIFT
) - csum
);
375 static void enh_desc_enable_tx_timestamp(struct dma_desc
*p
)
377 p
->des0
|= cpu_to_le32(ETDES0_TIME_STAMP_ENABLE
);
380 static int enh_desc_get_tx_timestamp_status(struct dma_desc
*p
)
382 return (le32_to_cpu(p
->des0
) & ETDES0_TIME_STAMP_STATUS
) >> 17;
385 static u64
enh_desc_get_timestamp(void *desc
, u32 ats
)
390 struct dma_extended_desc
*p
= (struct dma_extended_desc
*)desc
;
391 ns
= le32_to_cpu(p
->des6
);
392 /* convert high/sec time stamp value to nanosecond */
393 ns
+= le32_to_cpu(p
->des7
) * 1000000000ULL;
395 struct dma_desc
*p
= (struct dma_desc
*)desc
;
396 ns
= le32_to_cpu(p
->des2
);
397 ns
+= le32_to_cpu(p
->des3
) * 1000000000ULL;
403 static int enh_desc_get_rx_timestamp_status(void *desc
, u32 ats
)
406 struct dma_extended_desc
*p
= (struct dma_extended_desc
*)desc
;
407 return (le32_to_cpu(p
->basic
.des0
) & RDES0_IPC_CSUM_ERROR
) >> 7;
409 struct dma_desc
*p
= (struct dma_desc
*)desc
;
410 if ((le32_to_cpu(p
->des2
) == 0xffffffff) &&
411 (le32_to_cpu(p
->des3
) == 0xffffffff))
412 /* timestamp is corrupted, hence don't store it */
419 static void enh_desc_display_ring(void *head
, unsigned int size
, bool rx
)
421 struct dma_extended_desc
*ep
= (struct dma_extended_desc
*)head
;
424 pr_info("Extended %s descriptor ring:\n", rx
? "RX" : "TX");
426 for (i
= 0; i
< size
; i
++) {
430 pr_info("%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
431 i
, (unsigned int)virt_to_phys(ep
),
432 (unsigned int)x
, (unsigned int)(x
>> 32),
433 ep
->basic
.des2
, ep
->basic
.des3
);
439 const struct stmmac_desc_ops enh_desc_ops
= {
440 .tx_status
= enh_desc_get_tx_status
,
441 .rx_status
= enh_desc_get_rx_status
,
442 .get_tx_len
= enh_desc_get_tx_len
,
443 .init_rx_desc
= enh_desc_init_rx_desc
,
444 .init_tx_desc
= enh_desc_init_tx_desc
,
445 .get_tx_owner
= enh_desc_get_tx_owner
,
446 .release_tx_desc
= enh_desc_release_tx_desc
,
447 .prepare_tx_desc
= enh_desc_prepare_tx_desc
,
448 .set_tx_ic
= enh_desc_set_tx_ic
,
449 .get_tx_ls
= enh_desc_get_tx_ls
,
450 .set_tx_owner
= enh_desc_set_tx_owner
,
451 .set_rx_owner
= enh_desc_set_rx_owner
,
452 .get_rx_frame_len
= enh_desc_get_rx_frame_len
,
453 .rx_extended_status
= enh_desc_get_ext_status
,
454 .enable_tx_timestamp
= enh_desc_enable_tx_timestamp
,
455 .get_tx_timestamp_status
= enh_desc_get_tx_timestamp_status
,
456 .get_timestamp
= enh_desc_get_timestamp
,
457 .get_rx_timestamp_status
= enh_desc_get_rx_timestamp_status
,
458 .display_ring
= enh_desc_display_ring
,