1 // SPDX-License-Identifier: GPL-2.0-only
3 * This is the driver for the GMAC on-chip Ethernet controller for ST SoCs.
4 * DWC Ether MAC version 4.xx has been used for developing this code.
6 * This contains the functions to handle the dma.
8 * Copyright (C) 2015 STMicroelectronics Ltd
10 * Author: Alexandre Torgue <alexandre.torgue@st.com>
15 #include "dwmac4_dma.h"
17 static void dwmac4_dma_axi(void __iomem
*ioaddr
, struct stmmac_axi
*axi
)
19 u32 value
= readl(ioaddr
+ DMA_SYS_BUS_MODE
);
22 pr_info("dwmac4: Master AXI performs %s burst length\n",
23 (value
& DMA_SYS_BUS_FB
) ? "fixed" : "any");
26 value
|= DMA_AXI_EN_LPI
;
28 value
|= DMA_AXI_LPI_XIT_FRM
;
30 value
&= ~DMA_AXI_WR_OSR_LMT
;
31 value
|= (axi
->axi_wr_osr_lmt
& DMA_AXI_OSR_MAX
) <<
32 DMA_AXI_WR_OSR_LMT_SHIFT
;
34 value
&= ~DMA_AXI_RD_OSR_LMT
;
35 value
|= (axi
->axi_rd_osr_lmt
& DMA_AXI_OSR_MAX
) <<
36 DMA_AXI_RD_OSR_LMT_SHIFT
;
38 /* Depending on the UNDEF bit the Master AXI will perform any burst
39 * length according to the BLEN programmed (by default all BLEN are
42 for (i
= 0; i
< AXI_BLEN
; i
++) {
43 switch (axi
->axi_blen
[i
]) {
45 value
|= DMA_AXI_BLEN256
;
48 value
|= DMA_AXI_BLEN128
;
51 value
|= DMA_AXI_BLEN64
;
54 value
|= DMA_AXI_BLEN32
;
57 value
|= DMA_AXI_BLEN16
;
60 value
|= DMA_AXI_BLEN8
;
63 value
|= DMA_AXI_BLEN4
;
68 writel(value
, ioaddr
+ DMA_SYS_BUS_MODE
);
71 static void dwmac4_dma_init_rx_chan(void __iomem
*ioaddr
,
72 struct stmmac_dma_cfg
*dma_cfg
,
73 dma_addr_t dma_rx_phy
, u32 chan
)
76 u32 rxpbl
= dma_cfg
->rxpbl
?: dma_cfg
->pbl
;
78 value
= readl(ioaddr
+ DMA_CHAN_RX_CONTROL(chan
));
79 value
= value
| (rxpbl
<< DMA_BUS_MODE_RPBL_SHIFT
);
80 writel(value
, ioaddr
+ DMA_CHAN_RX_CONTROL(chan
));
82 if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT
) && likely(dma_cfg
->eame
))
83 writel(upper_32_bits(dma_rx_phy
),
84 ioaddr
+ DMA_CHAN_RX_BASE_ADDR_HI(chan
));
86 writel(lower_32_bits(dma_rx_phy
), ioaddr
+ DMA_CHAN_RX_BASE_ADDR(chan
));
89 static void dwmac4_dma_init_tx_chan(void __iomem
*ioaddr
,
90 struct stmmac_dma_cfg
*dma_cfg
,
91 dma_addr_t dma_tx_phy
, u32 chan
)
94 u32 txpbl
= dma_cfg
->txpbl
?: dma_cfg
->pbl
;
96 value
= readl(ioaddr
+ DMA_CHAN_TX_CONTROL(chan
));
97 value
= value
| (txpbl
<< DMA_BUS_MODE_PBL_SHIFT
);
99 /* Enable OSP to get best performance */
100 value
|= DMA_CONTROL_OSP
;
102 writel(value
, ioaddr
+ DMA_CHAN_TX_CONTROL(chan
));
104 if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT
) && likely(dma_cfg
->eame
))
105 writel(upper_32_bits(dma_tx_phy
),
106 ioaddr
+ DMA_CHAN_TX_BASE_ADDR_HI(chan
));
108 writel(lower_32_bits(dma_tx_phy
), ioaddr
+ DMA_CHAN_TX_BASE_ADDR(chan
));
111 static void dwmac4_dma_init_channel(void __iomem
*ioaddr
,
112 struct stmmac_dma_cfg
*dma_cfg
, u32 chan
)
116 /* common channel control register config */
117 value
= readl(ioaddr
+ DMA_CHAN_CONTROL(chan
));
119 value
= value
| DMA_BUS_MODE_PBL
;
120 writel(value
, ioaddr
+ DMA_CHAN_CONTROL(chan
));
122 /* Mask interrupts by writing to CSR7 */
123 writel(DMA_CHAN_INTR_DEFAULT_MASK
,
124 ioaddr
+ DMA_CHAN_INTR_ENA(chan
));
127 static void dwmac4_dma_init(void __iomem
*ioaddr
,
128 struct stmmac_dma_cfg
*dma_cfg
, int atds
)
130 u32 value
= readl(ioaddr
+ DMA_SYS_BUS_MODE
);
132 /* Set the Fixed burst mode */
133 if (dma_cfg
->fixed_burst
)
134 value
|= DMA_SYS_BUS_FB
;
136 /* Mixed Burst has no effect when fb is set */
137 if (dma_cfg
->mixed_burst
)
138 value
|= DMA_SYS_BUS_MB
;
141 value
|= DMA_SYS_BUS_AAL
;
144 value
|= DMA_SYS_BUS_EAME
;
146 writel(value
, ioaddr
+ DMA_SYS_BUS_MODE
);
149 static void _dwmac4_dump_dma_regs(void __iomem
*ioaddr
, u32 channel
,
152 reg_space
[DMA_CHAN_CONTROL(channel
) / 4] =
153 readl(ioaddr
+ DMA_CHAN_CONTROL(channel
));
154 reg_space
[DMA_CHAN_TX_CONTROL(channel
) / 4] =
155 readl(ioaddr
+ DMA_CHAN_TX_CONTROL(channel
));
156 reg_space
[DMA_CHAN_RX_CONTROL(channel
) / 4] =
157 readl(ioaddr
+ DMA_CHAN_RX_CONTROL(channel
));
158 reg_space
[DMA_CHAN_TX_BASE_ADDR(channel
) / 4] =
159 readl(ioaddr
+ DMA_CHAN_TX_BASE_ADDR(channel
));
160 reg_space
[DMA_CHAN_RX_BASE_ADDR(channel
) / 4] =
161 readl(ioaddr
+ DMA_CHAN_RX_BASE_ADDR(channel
));
162 reg_space
[DMA_CHAN_TX_END_ADDR(channel
) / 4] =
163 readl(ioaddr
+ DMA_CHAN_TX_END_ADDR(channel
));
164 reg_space
[DMA_CHAN_RX_END_ADDR(channel
) / 4] =
165 readl(ioaddr
+ DMA_CHAN_RX_END_ADDR(channel
));
166 reg_space
[DMA_CHAN_TX_RING_LEN(channel
) / 4] =
167 readl(ioaddr
+ DMA_CHAN_TX_RING_LEN(channel
));
168 reg_space
[DMA_CHAN_RX_RING_LEN(channel
) / 4] =
169 readl(ioaddr
+ DMA_CHAN_RX_RING_LEN(channel
));
170 reg_space
[DMA_CHAN_INTR_ENA(channel
) / 4] =
171 readl(ioaddr
+ DMA_CHAN_INTR_ENA(channel
));
172 reg_space
[DMA_CHAN_RX_WATCHDOG(channel
) / 4] =
173 readl(ioaddr
+ DMA_CHAN_RX_WATCHDOG(channel
));
174 reg_space
[DMA_CHAN_SLOT_CTRL_STATUS(channel
) / 4] =
175 readl(ioaddr
+ DMA_CHAN_SLOT_CTRL_STATUS(channel
));
176 reg_space
[DMA_CHAN_CUR_TX_DESC(channel
) / 4] =
177 readl(ioaddr
+ DMA_CHAN_CUR_TX_DESC(channel
));
178 reg_space
[DMA_CHAN_CUR_RX_DESC(channel
) / 4] =
179 readl(ioaddr
+ DMA_CHAN_CUR_RX_DESC(channel
));
180 reg_space
[DMA_CHAN_CUR_TX_BUF_ADDR(channel
) / 4] =
181 readl(ioaddr
+ DMA_CHAN_CUR_TX_BUF_ADDR(channel
));
182 reg_space
[DMA_CHAN_CUR_RX_BUF_ADDR(channel
) / 4] =
183 readl(ioaddr
+ DMA_CHAN_CUR_RX_BUF_ADDR(channel
));
184 reg_space
[DMA_CHAN_STATUS(channel
) / 4] =
185 readl(ioaddr
+ DMA_CHAN_STATUS(channel
));
188 static void dwmac4_dump_dma_regs(void __iomem
*ioaddr
, u32
*reg_space
)
192 for (i
= 0; i
< DMA_CHANNEL_NB_MAX
; i
++)
193 _dwmac4_dump_dma_regs(ioaddr
, i
, reg_space
);
196 static void dwmac4_rx_watchdog(void __iomem
*ioaddr
, u32 riwt
, u32 number_chan
)
200 for (chan
= 0; chan
< number_chan
; chan
++)
201 writel(riwt
, ioaddr
+ DMA_CHAN_RX_WATCHDOG(chan
));
204 static void dwmac4_dma_rx_chan_op_mode(void __iomem
*ioaddr
, int mode
,
205 u32 channel
, int fifosz
, u8 qmode
)
207 unsigned int rqs
= fifosz
/ 256 - 1;
208 u32 mtl_rx_op
, mtl_rx_int
;
210 mtl_rx_op
= readl(ioaddr
+ MTL_CHAN_RX_OP_MODE(channel
));
212 if (mode
== SF_DMA_MODE
) {
213 pr_debug("GMAC: enable RX store and forward mode\n");
214 mtl_rx_op
|= MTL_OP_MODE_RSF
;
216 pr_debug("GMAC: disable RX SF mode (threshold %d)\n", mode
);
217 mtl_rx_op
&= ~MTL_OP_MODE_RSF
;
218 mtl_rx_op
&= MTL_OP_MODE_RTC_MASK
;
220 mtl_rx_op
|= MTL_OP_MODE_RTC_32
;
222 mtl_rx_op
|= MTL_OP_MODE_RTC_64
;
224 mtl_rx_op
|= MTL_OP_MODE_RTC_96
;
226 mtl_rx_op
|= MTL_OP_MODE_RTC_128
;
229 mtl_rx_op
&= ~MTL_OP_MODE_RQS_MASK
;
230 mtl_rx_op
|= rqs
<< MTL_OP_MODE_RQS_SHIFT
;
232 /* Enable flow control only if each channel gets 4 KiB or more FIFO and
233 * only if channel is not an AVB channel.
235 if ((fifosz
>= 4096) && (qmode
!= MTL_QUEUE_AVB
)) {
236 unsigned int rfd
, rfa
;
238 mtl_rx_op
|= MTL_OP_MODE_EHFC
;
240 /* Set Threshold for Activating Flow Control to min 2 frames,
241 * i.e. 1500 * 2 = 3000 bytes.
243 * Set Threshold for Deactivating Flow Control to min 1 frame,
248 /* This violates the above formula because of FIFO size
249 * limit therefore overflow may occur in spite of this.
251 rfd
= 0x03; /* Full-2.5K */
252 rfa
= 0x01; /* Full-1.5K */
256 rfd
= 0x07; /* Full-4.5K */
257 rfa
= 0x04; /* Full-3K */
261 mtl_rx_op
&= ~MTL_OP_MODE_RFD_MASK
;
262 mtl_rx_op
|= rfd
<< MTL_OP_MODE_RFD_SHIFT
;
264 mtl_rx_op
&= ~MTL_OP_MODE_RFA_MASK
;
265 mtl_rx_op
|= rfa
<< MTL_OP_MODE_RFA_SHIFT
;
268 writel(mtl_rx_op
, ioaddr
+ MTL_CHAN_RX_OP_MODE(channel
));
270 /* Enable MTL RX overflow */
271 mtl_rx_int
= readl(ioaddr
+ MTL_CHAN_INT_CTRL(channel
));
272 writel(mtl_rx_int
| MTL_RX_OVERFLOW_INT_EN
,
273 ioaddr
+ MTL_CHAN_INT_CTRL(channel
));
276 static void dwmac4_dma_tx_chan_op_mode(void __iomem
*ioaddr
, int mode
,
277 u32 channel
, int fifosz
, u8 qmode
)
279 u32 mtl_tx_op
= readl(ioaddr
+ MTL_CHAN_TX_OP_MODE(channel
));
280 unsigned int tqs
= fifosz
/ 256 - 1;
282 if (mode
== SF_DMA_MODE
) {
283 pr_debug("GMAC: enable TX store and forward mode\n");
284 /* Transmit COE type 2 cannot be done in cut-through mode. */
285 mtl_tx_op
|= MTL_OP_MODE_TSF
;
287 pr_debug("GMAC: disabling TX SF (threshold %d)\n", mode
);
288 mtl_tx_op
&= ~MTL_OP_MODE_TSF
;
289 mtl_tx_op
&= MTL_OP_MODE_TTC_MASK
;
290 /* Set the transmit threshold */
292 mtl_tx_op
|= MTL_OP_MODE_TTC_32
;
294 mtl_tx_op
|= MTL_OP_MODE_TTC_64
;
296 mtl_tx_op
|= MTL_OP_MODE_TTC_96
;
297 else if (mode
<= 128)
298 mtl_tx_op
|= MTL_OP_MODE_TTC_128
;
299 else if (mode
<= 192)
300 mtl_tx_op
|= MTL_OP_MODE_TTC_192
;
301 else if (mode
<= 256)
302 mtl_tx_op
|= MTL_OP_MODE_TTC_256
;
303 else if (mode
<= 384)
304 mtl_tx_op
|= MTL_OP_MODE_TTC_384
;
306 mtl_tx_op
|= MTL_OP_MODE_TTC_512
;
308 /* For an IP with DWC_EQOS_NUM_TXQ == 1, the fields TXQEN and TQS are RO
309 * with reset values: TXQEN on, TQS == DWC_EQOS_TXFIFO_SIZE.
310 * For an IP with DWC_EQOS_NUM_TXQ > 1, the fields TXQEN and TQS are R/W
311 * with reset values: TXQEN off, TQS 256 bytes.
313 * TXQEN must be written for multi-channel operation and TQS must
314 * reflect the available fifo size per queue (total fifo size / number
315 * of enabled queues).
317 mtl_tx_op
&= ~MTL_OP_MODE_TXQEN_MASK
;
318 if (qmode
!= MTL_QUEUE_AVB
)
319 mtl_tx_op
|= MTL_OP_MODE_TXQEN
;
321 mtl_tx_op
|= MTL_OP_MODE_TXQEN_AV
;
322 mtl_tx_op
&= ~MTL_OP_MODE_TQS_MASK
;
323 mtl_tx_op
|= tqs
<< MTL_OP_MODE_TQS_SHIFT
;
325 writel(mtl_tx_op
, ioaddr
+ MTL_CHAN_TX_OP_MODE(channel
));
328 static void dwmac4_get_hw_feature(void __iomem
*ioaddr
,
329 struct dma_features
*dma_cap
)
331 u32 hw_cap
= readl(ioaddr
+ GMAC_HW_FEATURE0
);
333 /* MAC HW feature0 */
334 dma_cap
->mbps_10_100
= (hw_cap
& GMAC_HW_FEAT_MIISEL
);
335 dma_cap
->mbps_1000
= (hw_cap
& GMAC_HW_FEAT_GMIISEL
) >> 1;
336 dma_cap
->half_duplex
= (hw_cap
& GMAC_HW_FEAT_HDSEL
) >> 2;
337 dma_cap
->vlhash
= (hw_cap
& GMAC_HW_FEAT_VLHASH
) >> 4;
338 dma_cap
->multi_addr
= (hw_cap
& GMAC_HW_FEAT_ADDMAC
) >> 18;
339 dma_cap
->pcs
= (hw_cap
& GMAC_HW_FEAT_PCSSEL
) >> 3;
340 dma_cap
->sma_mdio
= (hw_cap
& GMAC_HW_FEAT_SMASEL
) >> 5;
341 dma_cap
->pmt_remote_wake_up
= (hw_cap
& GMAC_HW_FEAT_RWKSEL
) >> 6;
342 dma_cap
->pmt_magic_frame
= (hw_cap
& GMAC_HW_FEAT_MGKSEL
) >> 7;
344 dma_cap
->rmon
= (hw_cap
& GMAC_HW_FEAT_MMCSEL
) >> 8;
346 dma_cap
->atime_stamp
= (hw_cap
& GMAC_HW_FEAT_TSSEL
) >> 12;
347 /* 802.3az - Energy-Efficient Ethernet (EEE) */
348 dma_cap
->eee
= (hw_cap
& GMAC_HW_FEAT_EEESEL
) >> 13;
350 dma_cap
->tx_coe
= (hw_cap
& GMAC_HW_FEAT_TXCOSEL
) >> 14;
351 dma_cap
->rx_coe
= (hw_cap
& GMAC_HW_FEAT_RXCOESEL
) >> 16;
352 dma_cap
->vlins
= (hw_cap
& GMAC_HW_FEAT_SAVLANINS
) >> 27;
353 dma_cap
->arpoffsel
= (hw_cap
& GMAC_HW_FEAT_ARPOFFSEL
) >> 9;
355 /* MAC HW feature1 */
356 hw_cap
= readl(ioaddr
+ GMAC_HW_FEATURE1
);
357 dma_cap
->l3l4fnum
= (hw_cap
& GMAC_HW_FEAT_L3L4FNUM
) >> 27;
358 dma_cap
->hash_tb_sz
= (hw_cap
& GMAC_HW_HASH_TB_SZ
) >> 24;
359 dma_cap
->av
= (hw_cap
& GMAC_HW_FEAT_AVSEL
) >> 20;
360 dma_cap
->tsoen
= (hw_cap
& GMAC_HW_TSOEN
) >> 18;
361 dma_cap
->sphen
= (hw_cap
& GMAC_HW_FEAT_SPHEN
) >> 17;
363 dma_cap
->addr64
= (hw_cap
& GMAC_HW_ADDR64
) >> 14;
364 switch (dma_cap
->addr64
) {
366 dma_cap
->addr64
= 32;
369 dma_cap
->addr64
= 40;
372 dma_cap
->addr64
= 48;
375 dma_cap
->addr64
= 32;
379 /* RX and TX FIFO sizes are encoded as log2(n / 128). Undo that by
380 * shifting and store the sizes in bytes.
382 dma_cap
->tx_fifo_size
= 128 << ((hw_cap
& GMAC_HW_TXFIFOSIZE
) >> 6);
383 dma_cap
->rx_fifo_size
= 128 << ((hw_cap
& GMAC_HW_RXFIFOSIZE
) >> 0);
384 /* MAC HW feature2 */
385 hw_cap
= readl(ioaddr
+ GMAC_HW_FEATURE2
);
386 /* TX and RX number of channels */
387 dma_cap
->number_rx_channel
=
388 ((hw_cap
& GMAC_HW_FEAT_RXCHCNT
) >> 12) + 1;
389 dma_cap
->number_tx_channel
=
390 ((hw_cap
& GMAC_HW_FEAT_TXCHCNT
) >> 18) + 1;
391 /* TX and RX number of queues */
392 dma_cap
->number_rx_queues
=
393 ((hw_cap
& GMAC_HW_FEAT_RXQCNT
) >> 0) + 1;
394 dma_cap
->number_tx_queues
=
395 ((hw_cap
& GMAC_HW_FEAT_TXQCNT
) >> 6) + 1;
397 dma_cap
->pps_out_num
= (hw_cap
& GMAC_HW_FEAT_PPSOUTNUM
) >> 24;
400 dma_cap
->time_stamp
= 0;
402 /* MAC HW feature3 */
403 hw_cap
= readl(ioaddr
+ GMAC_HW_FEATURE3
);
406 dma_cap
->asp
= (hw_cap
& GMAC_HW_FEAT_ASP
) >> 28;
407 dma_cap
->tbssel
= (hw_cap
& GMAC_HW_FEAT_TBSSEL
) >> 27;
408 dma_cap
->fpesel
= (hw_cap
& GMAC_HW_FEAT_FPESEL
) >> 26;
409 dma_cap
->estwid
= (hw_cap
& GMAC_HW_FEAT_ESTWID
) >> 20;
410 dma_cap
->estdep
= (hw_cap
& GMAC_HW_FEAT_ESTDEP
) >> 17;
411 dma_cap
->estsel
= (hw_cap
& GMAC_HW_FEAT_ESTSEL
) >> 16;
412 dma_cap
->frpes
= (hw_cap
& GMAC_HW_FEAT_FRPES
) >> 13;
413 dma_cap
->frpbs
= (hw_cap
& GMAC_HW_FEAT_FRPBS
) >> 11;
414 dma_cap
->frpsel
= (hw_cap
& GMAC_HW_FEAT_FRPSEL
) >> 10;
415 dma_cap
->dvlan
= (hw_cap
& GMAC_HW_FEAT_DVLAN
) >> 5;
418 /* Enable/disable TSO feature and set MSS */
419 static void dwmac4_enable_tso(void __iomem
*ioaddr
, bool en
, u32 chan
)
425 value
= readl(ioaddr
+ DMA_CHAN_TX_CONTROL(chan
));
426 writel(value
| DMA_CONTROL_TSE
,
427 ioaddr
+ DMA_CHAN_TX_CONTROL(chan
));
430 value
= readl(ioaddr
+ DMA_CHAN_TX_CONTROL(chan
));
431 writel(value
& ~DMA_CONTROL_TSE
,
432 ioaddr
+ DMA_CHAN_TX_CONTROL(chan
));
436 static void dwmac4_qmode(void __iomem
*ioaddr
, u32 channel
, u8 qmode
)
438 u32 mtl_tx_op
= readl(ioaddr
+ MTL_CHAN_TX_OP_MODE(channel
));
440 mtl_tx_op
&= ~MTL_OP_MODE_TXQEN_MASK
;
441 if (qmode
!= MTL_QUEUE_AVB
)
442 mtl_tx_op
|= MTL_OP_MODE_TXQEN
;
444 mtl_tx_op
|= MTL_OP_MODE_TXQEN_AV
;
446 writel(mtl_tx_op
, ioaddr
+ MTL_CHAN_TX_OP_MODE(channel
));
449 static void dwmac4_set_bfsize(void __iomem
*ioaddr
, int bfsize
, u32 chan
)
451 u32 value
= readl(ioaddr
+ DMA_CHAN_RX_CONTROL(chan
));
453 value
&= ~DMA_RBSZ_MASK
;
454 value
|= (bfsize
<< DMA_RBSZ_SHIFT
) & DMA_RBSZ_MASK
;
456 writel(value
, ioaddr
+ DMA_CHAN_RX_CONTROL(chan
));
459 static void dwmac4_enable_sph(void __iomem
*ioaddr
, bool en
, u32 chan
)
461 u32 value
= readl(ioaddr
+ GMAC_EXT_CONFIG
);
463 value
&= ~GMAC_CONFIG_HDSMS
;
464 value
|= GMAC_CONFIG_HDSMS_256
; /* Segment max 256 bytes */
465 writel(value
, ioaddr
+ GMAC_EXT_CONFIG
);
467 value
= readl(ioaddr
+ DMA_CHAN_CONTROL(chan
));
469 value
|= DMA_CONTROL_SPH
;
471 value
&= ~DMA_CONTROL_SPH
;
472 writel(value
, ioaddr
+ DMA_CHAN_CONTROL(chan
));
475 static int dwmac4_enable_tbs(void __iomem
*ioaddr
, bool en
, u32 chan
)
477 u32 value
= readl(ioaddr
+ DMA_CHAN_TX_CONTROL(chan
));
480 value
|= DMA_CONTROL_EDSE
;
482 value
&= ~DMA_CONTROL_EDSE
;
484 writel(value
, ioaddr
+ DMA_CHAN_TX_CONTROL(chan
));
486 value
= readl(ioaddr
+ DMA_CHAN_TX_CONTROL(chan
)) & DMA_CONTROL_EDSE
;
490 writel(DMA_TBS_DEF_FTOS
, ioaddr
+ DMA_TBS_CTRL
);
494 const struct stmmac_dma_ops dwmac4_dma_ops
= {
495 .reset
= dwmac4_dma_reset
,
496 .init
= dwmac4_dma_init
,
497 .init_chan
= dwmac4_dma_init_channel
,
498 .init_rx_chan
= dwmac4_dma_init_rx_chan
,
499 .init_tx_chan
= dwmac4_dma_init_tx_chan
,
500 .axi
= dwmac4_dma_axi
,
501 .dump_regs
= dwmac4_dump_dma_regs
,
502 .dma_rx_mode
= dwmac4_dma_rx_chan_op_mode
,
503 .dma_tx_mode
= dwmac4_dma_tx_chan_op_mode
,
504 .enable_dma_irq
= dwmac4_enable_dma_irq
,
505 .disable_dma_irq
= dwmac4_disable_dma_irq
,
506 .start_tx
= dwmac4_dma_start_tx
,
507 .stop_tx
= dwmac4_dma_stop_tx
,
508 .start_rx
= dwmac4_dma_start_rx
,
509 .stop_rx
= dwmac4_dma_stop_rx
,
510 .dma_interrupt
= dwmac4_dma_interrupt
,
511 .get_hw_feature
= dwmac4_get_hw_feature
,
512 .rx_watchdog
= dwmac4_rx_watchdog
,
513 .set_rx_ring_len
= dwmac4_set_rx_ring_len
,
514 .set_tx_ring_len
= dwmac4_set_tx_ring_len
,
515 .set_rx_tail_ptr
= dwmac4_set_rx_tail_ptr
,
516 .set_tx_tail_ptr
= dwmac4_set_tx_tail_ptr
,
517 .enable_tso
= dwmac4_enable_tso
,
518 .qmode
= dwmac4_qmode
,
519 .set_bfsize
= dwmac4_set_bfsize
,
520 .enable_sph
= dwmac4_enable_sph
,
523 const struct stmmac_dma_ops dwmac410_dma_ops
= {
524 .reset
= dwmac4_dma_reset
,
525 .init
= dwmac4_dma_init
,
526 .init_chan
= dwmac4_dma_init_channel
,
527 .init_rx_chan
= dwmac4_dma_init_rx_chan
,
528 .init_tx_chan
= dwmac4_dma_init_tx_chan
,
529 .axi
= dwmac4_dma_axi
,
530 .dump_regs
= dwmac4_dump_dma_regs
,
531 .dma_rx_mode
= dwmac4_dma_rx_chan_op_mode
,
532 .dma_tx_mode
= dwmac4_dma_tx_chan_op_mode
,
533 .enable_dma_irq
= dwmac410_enable_dma_irq
,
534 .disable_dma_irq
= dwmac4_disable_dma_irq
,
535 .start_tx
= dwmac4_dma_start_tx
,
536 .stop_tx
= dwmac4_dma_stop_tx
,
537 .start_rx
= dwmac4_dma_start_rx
,
538 .stop_rx
= dwmac4_dma_stop_rx
,
539 .dma_interrupt
= dwmac4_dma_interrupt
,
540 .get_hw_feature
= dwmac4_get_hw_feature
,
541 .rx_watchdog
= dwmac4_rx_watchdog
,
542 .set_rx_ring_len
= dwmac4_set_rx_ring_len
,
543 .set_tx_ring_len
= dwmac4_set_tx_ring_len
,
544 .set_rx_tail_ptr
= dwmac4_set_rx_tail_ptr
,
545 .set_tx_tail_ptr
= dwmac4_set_tx_tail_ptr
,
546 .enable_tso
= dwmac4_enable_tso
,
547 .qmode
= dwmac4_qmode
,
548 .set_bfsize
= dwmac4_set_bfsize
,
549 .enable_sph
= dwmac4_enable_sph
,
550 .enable_tbs
= dwmac4_enable_tbs
,