1 /*******************************************************************************
2 This contains the functions to handle the enhanced descriptors.
4 Copyright (C) 2007-2014 STMicroelectronics Ltd
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
22 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
23 *******************************************************************************/
25 #include <linux/stmmac.h>
27 #include "descs_com.h"
29 static int enh_desc_get_tx_status(void *data
, struct stmmac_extra_stats
*x
,
30 struct dma_desc
*p
, void __iomem
*ioaddr
)
32 struct net_device_stats
*stats
= (struct net_device_stats
*)data
;
33 unsigned int tdes0
= p
->des0
;
36 /* Get tx owner first */
37 if (unlikely(tdes0
& ETDES0_OWN
))
40 /* Verify tx error by looking at the last segment. */
41 if (likely(!(tdes0
& ETDES0_LAST_SEGMENT
)))
44 if (unlikely(tdes0
& ETDES0_ERROR_SUMMARY
)) {
45 if (unlikely(tdes0
& ETDES0_JABBER_TIMEOUT
))
48 if (unlikely(tdes0
& ETDES0_FRAME_FLUSHED
)) {
49 x
->tx_frame_flushed
++;
50 dwmac_dma_flush_tx_fifo(ioaddr
);
53 if (unlikely(tdes0
& ETDES0_LOSS_CARRIER
)) {
55 stats
->tx_carrier_errors
++;
57 if (unlikely(tdes0
& ETDES0_NO_CARRIER
)) {
59 stats
->tx_carrier_errors
++;
61 if (unlikely((tdes0
& ETDES0_LATE_COLLISION
) ||
62 (tdes0
& ETDES0_EXCESSIVE_COLLISIONS
)))
64 (tdes0
& ETDES0_COLLISION_COUNT_MASK
) >> 3;
66 if (unlikely(tdes0
& ETDES0_EXCESSIVE_DEFERRAL
))
69 if (unlikely(tdes0
& ETDES0_UNDERFLOW_ERROR
)) {
70 dwmac_dma_flush_tx_fifo(ioaddr
);
74 if (unlikely(tdes0
& ETDES0_IP_HEADER_ERROR
))
75 x
->tx_ip_header_error
++;
77 if (unlikely(tdes0
& ETDES0_PAYLOAD_ERROR
)) {
78 x
->tx_payload_error
++;
79 dwmac_dma_flush_tx_fifo(ioaddr
);
85 if (unlikely(tdes0
& ETDES0_DEFERRED
))
88 #ifdef STMMAC_VLAN_TAG_USED
89 if (tdes0
& ETDES0_VLAN_FRAME
)
96 static int enh_desc_get_tx_len(struct dma_desc
*p
)
98 return (p
->des1
& ETDES1_BUFFER1_SIZE_MASK
);
101 static int enh_desc_coe_rdes0(int ipc_err
, int type
, int payload_err
)
103 int ret
= good_frame
;
104 u32 status
= (type
<< 2 | ipc_err
<< 1 | payload_err
) & 0x7;
106 /* bits 5 7 0 | Frame status
107 * ----------------------------------------------------------
108 * 0 0 0 | IEEE 802.3 Type frame (length < 1536 octects)
109 * 1 0 0 | IPv4/6 No CSUM errorS.
110 * 1 0 1 | IPv4/6 CSUM PAYLOAD error
111 * 1 1 0 | IPv4/6 CSUM IP HR error
112 * 1 1 1 | IPv4/6 IP PAYLOAD AND HEADER errorS
113 * 0 0 1 | IPv4/6 unsupported IP PAYLOAD
114 * 0 1 1 | COE bypassed.. no IPv4/6 frame
119 else if (status
== 0x4)
121 else if (status
== 0x5)
123 else if (status
== 0x6)
125 else if (status
== 0x7)
127 else if (status
== 0x1)
129 else if (status
== 0x3)
134 static void enh_desc_get_ext_status(void *data
, struct stmmac_extra_stats
*x
,
135 struct dma_extended_desc
*p
)
137 unsigned int rdes0
= p
->basic
.des0
;
138 unsigned int rdes4
= p
->des4
;
140 if (unlikely(rdes0
& ERDES0_RX_MAC_ADDR
)) {
141 int message_type
= (rdes4
& ERDES4_MSG_TYPE_MASK
) >> 8;
143 if (rdes4
& ERDES4_IP_HDR_ERR
)
145 if (rdes4
& ERDES4_IP_PAYLOAD_ERR
)
147 if (rdes4
& ERDES4_IP_CSUM_BYPASSED
)
148 x
->ip_csum_bypassed
++;
149 if (rdes4
& ERDES4_IPV4_PKT_RCVD
)
151 if (rdes4
& ERDES4_IPV6_PKT_RCVD
)
153 if (message_type
== RDES_EXT_SYNC
)
154 x
->rx_msg_type_sync
++;
155 else if (message_type
== RDES_EXT_FOLLOW_UP
)
156 x
->rx_msg_type_follow_up
++;
157 else if (message_type
== RDES_EXT_DELAY_REQ
)
158 x
->rx_msg_type_delay_req
++;
159 else if (message_type
== RDES_EXT_DELAY_RESP
)
160 x
->rx_msg_type_delay_resp
++;
161 else if (message_type
== RDES_EXT_PDELAY_REQ
)
162 x
->rx_msg_type_pdelay_req
++;
163 else if (message_type
== RDES_EXT_PDELAY_RESP
)
164 x
->rx_msg_type_pdelay_resp
++;
165 else if (message_type
== RDES_EXT_PDELAY_FOLLOW_UP
)
166 x
->rx_msg_type_pdelay_follow_up
++;
168 x
->rx_msg_type_ext_no_ptp
++;
169 if (rdes4
& ERDES4_PTP_FRAME_TYPE
)
171 if (rdes4
& ERDES4_PTP_VER
)
173 if (rdes4
& ERDES4_TIMESTAMP_DROPPED
)
174 x
->timestamp_dropped
++;
175 if (rdes4
& ERDES4_AV_PKT_RCVD
)
177 if (rdes4
& ERDES4_AV_TAGGED_PKT_RCVD
)
178 x
->av_tagged_pkt_rcvd
++;
179 if ((rdes4
& ERDES4_VLAN_TAG_PRI_VAL_MASK
) >> 18)
180 x
->vlan_tag_priority_val
++;
181 if (rdes4
& ERDES4_L3_FILTER_MATCH
)
182 x
->l3_filter_match
++;
183 if (rdes4
& ERDES4_L4_FILTER_MATCH
)
184 x
->l4_filter_match
++;
185 if ((rdes4
& ERDES4_L3_L4_FILT_NO_MATCH_MASK
) >> 26)
186 x
->l3_l4_filter_no_match
++;
190 static int enh_desc_get_rx_status(void *data
, struct stmmac_extra_stats
*x
,
193 struct net_device_stats
*stats
= (struct net_device_stats
*)data
;
194 unsigned int rdes0
= p
->des0
;
195 int ret
= good_frame
;
197 if (unlikely(rdes0
& RDES0_OWN
))
200 if (unlikely(rdes0
& RDES0_ERROR_SUMMARY
)) {
201 if (unlikely(rdes0
& RDES0_DESCRIPTOR_ERROR
)) {
203 stats
->rx_length_errors
++;
205 if (unlikely(rdes0
& RDES0_OVERFLOW_ERROR
))
206 x
->rx_gmac_overflow
++;
208 if (unlikely(rdes0
& RDES0_IPC_CSUM_ERROR
))
209 pr_err("\tIPC Csum Error/Giant frame\n");
211 if (unlikely(rdes0
& RDES0_COLLISION
))
213 if (unlikely(rdes0
& RDES0_RECEIVE_WATCHDOG
))
216 if (unlikely(rdes0
& RDES0_MII_ERROR
)) /* GMII */
219 if (unlikely(rdes0
& RDES0_CRC_ERROR
)) {
221 stats
->rx_crc_errors
++;
226 /* After a payload csum error, the ES bit is set.
227 * It doesn't match with the information reported into the databook.
228 * At any rate, we need to understand if the CSUM hw computation is ok
229 * and report this info to the upper layers. */
230 ret
= enh_desc_coe_rdes0(!!(rdes0
& RDES0_IPC_CSUM_ERROR
),
231 !!(rdes0
& RDES0_FRAME_TYPE
),
232 !!(rdes0
& ERDES0_RX_MAC_ADDR
));
234 if (unlikely(rdes0
& RDES0_DRIBBLING
))
237 if (unlikely(rdes0
& RDES0_SA_FILTER_FAIL
)) {
238 x
->sa_rx_filter_fail
++;
241 if (unlikely(rdes0
& RDES0_DA_FILTER_FAIL
)) {
242 x
->da_rx_filter_fail
++;
245 if (unlikely(rdes0
& RDES0_LENGTH_ERROR
)) {
249 #ifdef STMMAC_VLAN_TAG_USED
250 if (rdes0
& RDES0_VLAN_TAG
)
257 static void enh_desc_init_rx_desc(struct dma_desc
*p
, int disable_rx_ic
,
260 p
->des0
|= RDES0_OWN
;
261 p
->des1
|= ((BUF_SIZE_8KiB
- 1) & ERDES1_BUFFER1_SIZE_MASK
);
263 if (mode
== STMMAC_CHAIN_MODE
)
264 ehn_desc_rx_set_on_chain(p
);
266 ehn_desc_rx_set_on_ring(p
, end
);
269 p
->des1
|= ERDES1_DISABLE_IC
;
272 static void enh_desc_init_tx_desc(struct dma_desc
*p
, int mode
, int end
)
274 p
->des0
&= ~ETDES0_OWN
;
275 if (mode
== STMMAC_CHAIN_MODE
)
276 enh_desc_end_tx_desc_on_chain(p
);
278 enh_desc_end_tx_desc_on_ring(p
, end
);
281 static int enh_desc_get_tx_owner(struct dma_desc
*p
)
283 return (p
->des0
& ETDES0_OWN
) >> 31;
286 static void enh_desc_set_tx_owner(struct dma_desc
*p
)
288 p
->des0
|= ETDES0_OWN
;
291 static void enh_desc_set_rx_owner(struct dma_desc
*p
)
293 p
->des0
|= RDES0_OWN
;
296 static int enh_desc_get_tx_ls(struct dma_desc
*p
)
298 return (p
->des0
& ETDES0_LAST_SEGMENT
) >> 29;
301 static void enh_desc_release_tx_desc(struct dma_desc
*p
, int mode
)
303 int ter
= (p
->des0
& ETDES0_END_RING
) >> 21;
305 memset(p
, 0, offsetof(struct dma_desc
, des2
));
306 if (mode
== STMMAC_CHAIN_MODE
)
307 enh_desc_end_tx_desc_on_chain(p
);
309 enh_desc_end_tx_desc_on_ring(p
, ter
);
312 static void enh_desc_prepare_tx_desc(struct dma_desc
*p
, int is_fs
, int len
,
313 bool csum_flag
, int mode
, bool tx_own
,
316 unsigned int tdes0
= p
->des0
;
318 if (mode
== STMMAC_CHAIN_MODE
)
319 enh_set_tx_desc_len_on_chain(p
, len
);
321 enh_set_tx_desc_len_on_ring(p
, len
);
324 tdes0
|= ETDES0_FIRST_SEGMENT
;
326 tdes0
&= ~ETDES0_FIRST_SEGMENT
;
328 if (likely(csum_flag
))
329 tdes0
|= (TX_CIC_FULL
<< ETDES0_CHECKSUM_INSERTION_SHIFT
);
331 tdes0
&= ~(TX_CIC_FULL
<< ETDES0_CHECKSUM_INSERTION_SHIFT
);
334 tdes0
|= ETDES0_LAST_SEGMENT
;
336 /* Finally set the OWN bit. Later the DMA will start! */
341 /* When the own bit, for the first frame, has to be set, all
342 * descriptors for the same frame has to be set before, to
343 * avoid race condition.
350 static void enh_desc_set_tx_ic(struct dma_desc
*p
)
352 p
->des0
|= ETDES0_INTERRUPT
;
355 static int enh_desc_get_rx_frame_len(struct dma_desc
*p
, int rx_coe_type
)
357 unsigned int csum
= 0;
358 /* The type-1 checksum offload engines append the checksum at
359 * the end of frame and the two bytes of checksum are added in
361 * Adjust for that in the framelen for type-1 checksum offload
364 if (rx_coe_type
== STMMAC_RX_COE_TYPE1
)
367 return (((p
->des0
& RDES0_FRAME_LEN_MASK
) >> RDES0_FRAME_LEN_SHIFT
) -
371 static void enh_desc_enable_tx_timestamp(struct dma_desc
*p
)
373 p
->des0
|= ETDES0_TIME_STAMP_ENABLE
;
376 static int enh_desc_get_tx_timestamp_status(struct dma_desc
*p
)
378 return (p
->des0
& ETDES0_TIME_STAMP_STATUS
) >> 17;
381 static u64
enh_desc_get_timestamp(void *desc
, u32 ats
)
386 struct dma_extended_desc
*p
= (struct dma_extended_desc
*)desc
;
388 /* convert high/sec time stamp value to nanosecond */
389 ns
+= p
->des7
* 1000000000ULL;
391 struct dma_desc
*p
= (struct dma_desc
*)desc
;
393 ns
+= p
->des3
* 1000000000ULL;
399 static int enh_desc_get_rx_timestamp_status(void *desc
, u32 ats
)
402 struct dma_extended_desc
*p
= (struct dma_extended_desc
*)desc
;
403 return (p
->basic
.des0
& RDES0_IPC_CSUM_ERROR
) >> 7;
405 struct dma_desc
*p
= (struct dma_desc
*)desc
;
406 if ((p
->des2
== 0xffffffff) && (p
->des3
== 0xffffffff))
407 /* timestamp is corrupted, hence don't store it */
414 static void enh_desc_display_ring(void *head
, unsigned int size
, bool rx
)
416 struct dma_extended_desc
*ep
= (struct dma_extended_desc
*)head
;
419 pr_info("Extended %s descriptor ring:\n", rx
? "RX" : "TX");
421 for (i
= 0; i
< size
; i
++) {
425 pr_info("%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
426 i
, (unsigned int)virt_to_phys(ep
),
427 (unsigned int)x
, (unsigned int)(x
>> 32),
428 ep
->basic
.des2
, ep
->basic
.des3
);
434 const struct stmmac_desc_ops enh_desc_ops
= {
435 .tx_status
= enh_desc_get_tx_status
,
436 .rx_status
= enh_desc_get_rx_status
,
437 .get_tx_len
= enh_desc_get_tx_len
,
438 .init_rx_desc
= enh_desc_init_rx_desc
,
439 .init_tx_desc
= enh_desc_init_tx_desc
,
440 .get_tx_owner
= enh_desc_get_tx_owner
,
441 .release_tx_desc
= enh_desc_release_tx_desc
,
442 .prepare_tx_desc
= enh_desc_prepare_tx_desc
,
443 .set_tx_ic
= enh_desc_set_tx_ic
,
444 .get_tx_ls
= enh_desc_get_tx_ls
,
445 .set_tx_owner
= enh_desc_set_tx_owner
,
446 .set_rx_owner
= enh_desc_set_rx_owner
,
447 .get_rx_frame_len
= enh_desc_get_rx_frame_len
,
448 .rx_extended_status
= enh_desc_get_ext_status
,
449 .enable_tx_timestamp
= enh_desc_enable_tx_timestamp
,
450 .get_tx_timestamp_status
= enh_desc_get_tx_timestamp_status
,
451 .get_timestamp
= enh_desc_get_timestamp
,
452 .get_rx_timestamp_status
= enh_desc_get_rx_timestamp_status
,
453 .display_ring
= enh_desc_display_ring
,