1 // SPDX-License-Identifier: GPL-2.0-only
2 /*******************************************************************************
3 Specialised functions for managing Ring mode
5 Copyright(C) 2011 STMicroelectronics Ltd
7 It defines all the functions used to handle the normal/enhanced
8 descriptors in case of the DMA is configured to work in chained or
12 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
13 *******************************************************************************/
17 static int jumbo_frm(void *p
, struct sk_buff
*skb
, int csum
)
19 struct stmmac_tx_queue
*tx_q
= (struct stmmac_tx_queue
*)p
;
20 unsigned int nopaged_len
= skb_headlen(skb
);
21 struct stmmac_priv
*priv
= tx_q
->priv_data
;
22 unsigned int entry
= tx_q
->cur_tx
;
23 unsigned int bmax
, len
, des2
;
24 struct dma_desc
*desc
;
26 if (priv
->extend_desc
)
27 desc
= (struct dma_desc
*)(tx_q
->dma_etx
+ entry
);
29 desc
= tx_q
->dma_tx
+ entry
;
31 if (priv
->plat
->enh_desc
)
36 len
= nopaged_len
- bmax
;
38 if (nopaged_len
> BUF_SIZE_8KiB
) {
40 des2
= dma_map_single(priv
->device
, skb
->data
, bmax
,
42 desc
->des2
= cpu_to_le32(des2
);
43 if (dma_mapping_error(priv
->device
, des2
))
46 tx_q
->tx_skbuff_dma
[entry
].buf
= des2
;
47 tx_q
->tx_skbuff_dma
[entry
].len
= bmax
;
48 tx_q
->tx_skbuff_dma
[entry
].is_jumbo
= true;
50 desc
->des3
= cpu_to_le32(des2
+ BUF_SIZE_4KiB
);
51 stmmac_prepare_tx_desc(priv
, desc
, 1, bmax
, csum
,
52 STMMAC_RING_MODE
, 0, false, skb
->len
);
53 tx_q
->tx_skbuff
[entry
] = NULL
;
54 entry
= STMMAC_GET_ENTRY(entry
, priv
->dma_tx_size
);
56 if (priv
->extend_desc
)
57 desc
= (struct dma_desc
*)(tx_q
->dma_etx
+ entry
);
59 desc
= tx_q
->dma_tx
+ entry
;
61 des2
= dma_map_single(priv
->device
, skb
->data
+ bmax
, len
,
63 desc
->des2
= cpu_to_le32(des2
);
64 if (dma_mapping_error(priv
->device
, des2
))
66 tx_q
->tx_skbuff_dma
[entry
].buf
= des2
;
67 tx_q
->tx_skbuff_dma
[entry
].len
= len
;
68 tx_q
->tx_skbuff_dma
[entry
].is_jumbo
= true;
70 desc
->des3
= cpu_to_le32(des2
+ BUF_SIZE_4KiB
);
71 stmmac_prepare_tx_desc(priv
, desc
, 0, len
, csum
,
72 STMMAC_RING_MODE
, 1, !skb_is_nonlinear(skb
),
75 des2
= dma_map_single(priv
->device
, skb
->data
,
76 nopaged_len
, DMA_TO_DEVICE
);
77 desc
->des2
= cpu_to_le32(des2
);
78 if (dma_mapping_error(priv
->device
, des2
))
80 tx_q
->tx_skbuff_dma
[entry
].buf
= des2
;
81 tx_q
->tx_skbuff_dma
[entry
].len
= nopaged_len
;
82 tx_q
->tx_skbuff_dma
[entry
].is_jumbo
= true;
83 desc
->des3
= cpu_to_le32(des2
+ BUF_SIZE_4KiB
);
84 stmmac_prepare_tx_desc(priv
, desc
, 1, nopaged_len
, csum
,
85 STMMAC_RING_MODE
, 0, !skb_is_nonlinear(skb
),
94 static unsigned int is_jumbo_frm(int len
, int enh_desc
)
98 if (len
>= BUF_SIZE_4KiB
)
104 static void refill_desc3(void *priv_ptr
, struct dma_desc
*p
)
106 struct stmmac_rx_queue
*rx_q
= priv_ptr
;
107 struct stmmac_priv
*priv
= rx_q
->priv_data
;
109 /* Fill DES3 in case of RING mode */
110 if (priv
->dma_buf_sz
== BUF_SIZE_16KiB
)
111 p
->des3
= cpu_to_le32(le32_to_cpu(p
->des2
) + BUF_SIZE_8KiB
);
114 /* In ring mode we need to fill the desc3 because it is used as buffer */
115 static void init_desc3(struct dma_desc
*p
)
117 p
->des3
= cpu_to_le32(le32_to_cpu(p
->des2
) + BUF_SIZE_8KiB
);
120 static void clean_desc3(void *priv_ptr
, struct dma_desc
*p
)
122 struct stmmac_tx_queue
*tx_q
= (struct stmmac_tx_queue
*)priv_ptr
;
123 struct stmmac_priv
*priv
= tx_q
->priv_data
;
124 unsigned int entry
= tx_q
->dirty_tx
;
126 /* des3 is only used for jumbo frames tx or time stamping */
127 if (unlikely(tx_q
->tx_skbuff_dma
[entry
].is_jumbo
||
128 (tx_q
->tx_skbuff_dma
[entry
].last_segment
&&
129 !priv
->extend_desc
&& priv
->hwts_tx_en
)))
133 static int set_16kib_bfsize(int mtu
)
136 if (unlikely(mtu
> BUF_SIZE_8KiB
))
137 ret
= BUF_SIZE_16KiB
;
141 const struct stmmac_mode_ops ring_mode_ops
= {
142 .is_jumbo_frm
= is_jumbo_frm
,
143 .jumbo_frm
= jumbo_frm
,
144 .refill_desc3
= refill_desc3
,
145 .init_desc3
= init_desc3
,
146 .clean_desc3
= clean_desc3
,
147 .set_16kib_bfsize
= set_16kib_bfsize
,