Linux 4.16.11
[linux/fpc-iii.git] / drivers / net / ethernet / samsung / sxgbe / sxgbe_dma.c
blobbb9b5b8afc5f4417bae05c4ef0e1ea02f84b7421
1 /* 10G controller driver for Samsung SoCs
3 * Copyright (C) 2013 Samsung Electronics Co., Ltd.
4 * http://www.samsung.com
6 * Author: Siva Reddy Kallam <siva.kallam@samsung.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
12 #include <linux/delay.h>
13 #include <linux/export.h>
14 #include <linux/io.h>
15 #include <linux/netdevice.h>
16 #include <linux/phy.h>
18 #include "sxgbe_common.h"
19 #include "sxgbe_dma.h"
20 #include "sxgbe_reg.h"
21 #include "sxgbe_desc.h"
23 /* DMA core initialization */
24 static int sxgbe_dma_init(void __iomem *ioaddr, int fix_burst, int burst_map)
26 u32 reg_val;
28 reg_val = readl(ioaddr + SXGBE_DMA_SYSBUS_MODE_REG);
30 /* if fix_burst = 0, Set UNDEF = 1 of DMA_Sys_Mode Register.
31 * if fix_burst = 1, Set UNDEF = 0 of DMA_Sys_Mode Register.
32 * burst_map is bitmap for BLEN[4, 8, 16, 32, 64, 128 and 256].
33 * Set burst_map irrespective of fix_burst value.
35 if (!fix_burst)
36 reg_val |= SXGBE_DMA_AXI_UNDEF_BURST;
38 /* write burst len map */
39 reg_val |= (burst_map << SXGBE_DMA_BLENMAP_LSHIFT);
41 writel(reg_val, ioaddr + SXGBE_DMA_SYSBUS_MODE_REG);
43 return 0;
46 static void sxgbe_dma_channel_init(void __iomem *ioaddr, int cha_num,
47 int fix_burst, int pbl, dma_addr_t dma_tx,
48 dma_addr_t dma_rx, int t_rsize, int r_rsize)
50 u32 reg_val;
51 dma_addr_t dma_addr;
53 reg_val = readl(ioaddr + SXGBE_DMA_CHA_CTL_REG(cha_num));
54 /* set the pbl */
55 if (fix_burst) {
56 reg_val |= SXGBE_DMA_PBL_X8MODE;
57 writel(reg_val, ioaddr + SXGBE_DMA_CHA_CTL_REG(cha_num));
58 /* program the TX pbl */
59 reg_val = readl(ioaddr + SXGBE_DMA_CHA_TXCTL_REG(cha_num));
60 reg_val |= (pbl << SXGBE_DMA_TXPBL_LSHIFT);
61 writel(reg_val, ioaddr + SXGBE_DMA_CHA_TXCTL_REG(cha_num));
62 /* program the RX pbl */
63 reg_val = readl(ioaddr + SXGBE_DMA_CHA_RXCTL_REG(cha_num));
64 reg_val |= (pbl << SXGBE_DMA_RXPBL_LSHIFT);
65 writel(reg_val, ioaddr + SXGBE_DMA_CHA_RXCTL_REG(cha_num));
68 /* program desc registers */
69 writel(upper_32_bits(dma_tx),
70 ioaddr + SXGBE_DMA_CHA_TXDESC_HADD_REG(cha_num));
71 writel(lower_32_bits(dma_tx),
72 ioaddr + SXGBE_DMA_CHA_TXDESC_LADD_REG(cha_num));
74 writel(upper_32_bits(dma_rx),
75 ioaddr + SXGBE_DMA_CHA_RXDESC_HADD_REG(cha_num));
76 writel(lower_32_bits(dma_rx),
77 ioaddr + SXGBE_DMA_CHA_RXDESC_LADD_REG(cha_num));
79 /* program tail pointers */
80 /* assumption: upper 32 bits are constant and
81 * same as TX/RX desc list
83 dma_addr = dma_tx + ((t_rsize - 1) * SXGBE_DESC_SIZE_BYTES);
84 writel(lower_32_bits(dma_addr),
85 ioaddr + SXGBE_DMA_CHA_TXDESC_TAILPTR_REG(cha_num));
87 dma_addr = dma_rx + ((r_rsize - 1) * SXGBE_DESC_SIZE_BYTES);
88 writel(lower_32_bits(dma_addr),
89 ioaddr + SXGBE_DMA_CHA_RXDESC_LADD_REG(cha_num));
90 /* program the ring sizes */
91 writel(t_rsize - 1, ioaddr + SXGBE_DMA_CHA_TXDESC_RINGLEN_REG(cha_num));
92 writel(r_rsize - 1, ioaddr + SXGBE_DMA_CHA_RXDESC_RINGLEN_REG(cha_num));
94 /* Enable TX/RX interrupts */
95 writel(SXGBE_DMA_ENA_INT,
96 ioaddr + SXGBE_DMA_CHA_INT_ENABLE_REG(cha_num));
99 static void sxgbe_enable_dma_transmission(void __iomem *ioaddr, int cha_num)
101 u32 tx_config;
103 tx_config = readl(ioaddr + SXGBE_DMA_CHA_TXCTL_REG(cha_num));
104 tx_config |= SXGBE_TX_START_DMA;
105 writel(tx_config, ioaddr + SXGBE_DMA_CHA_TXCTL_REG(cha_num));
108 static void sxgbe_enable_dma_irq(void __iomem *ioaddr, int dma_cnum)
110 /* Enable TX/RX interrupts */
111 writel(SXGBE_DMA_ENA_INT,
112 ioaddr + SXGBE_DMA_CHA_INT_ENABLE_REG(dma_cnum));
115 static void sxgbe_disable_dma_irq(void __iomem *ioaddr, int dma_cnum)
117 /* Disable TX/RX interrupts */
118 writel(0, ioaddr + SXGBE_DMA_CHA_INT_ENABLE_REG(dma_cnum));
121 static void sxgbe_dma_start_tx(void __iomem *ioaddr, int tchannels)
123 int cnum;
124 u32 tx_ctl_reg;
126 for (cnum = 0; cnum < tchannels; cnum++) {
127 tx_ctl_reg = readl(ioaddr + SXGBE_DMA_CHA_TXCTL_REG(cnum));
128 tx_ctl_reg |= SXGBE_TX_ENABLE;
129 writel(tx_ctl_reg,
130 ioaddr + SXGBE_DMA_CHA_TXCTL_REG(cnum));
134 static void sxgbe_dma_start_tx_queue(void __iomem *ioaddr, int dma_cnum)
136 u32 tx_ctl_reg;
138 tx_ctl_reg = readl(ioaddr + SXGBE_DMA_CHA_TXCTL_REG(dma_cnum));
139 tx_ctl_reg |= SXGBE_TX_ENABLE;
140 writel(tx_ctl_reg, ioaddr + SXGBE_DMA_CHA_TXCTL_REG(dma_cnum));
143 static void sxgbe_dma_stop_tx_queue(void __iomem *ioaddr, int dma_cnum)
145 u32 tx_ctl_reg;
147 tx_ctl_reg = readl(ioaddr + SXGBE_DMA_CHA_TXCTL_REG(dma_cnum));
148 tx_ctl_reg &= ~(SXGBE_TX_ENABLE);
149 writel(tx_ctl_reg, ioaddr + SXGBE_DMA_CHA_TXCTL_REG(dma_cnum));
152 static void sxgbe_dma_stop_tx(void __iomem *ioaddr, int tchannels)
154 int cnum;
155 u32 tx_ctl_reg;
157 for (cnum = 0; cnum < tchannels; cnum++) {
158 tx_ctl_reg = readl(ioaddr + SXGBE_DMA_CHA_TXCTL_REG(cnum));
159 tx_ctl_reg &= ~(SXGBE_TX_ENABLE);
160 writel(tx_ctl_reg, ioaddr + SXGBE_DMA_CHA_TXCTL_REG(cnum));
164 static void sxgbe_dma_start_rx(void __iomem *ioaddr, int rchannels)
166 int cnum;
167 u32 rx_ctl_reg;
169 for (cnum = 0; cnum < rchannels; cnum++) {
170 rx_ctl_reg = readl(ioaddr + SXGBE_DMA_CHA_RXCTL_REG(cnum));
171 rx_ctl_reg |= SXGBE_RX_ENABLE;
172 writel(rx_ctl_reg,
173 ioaddr + SXGBE_DMA_CHA_RXCTL_REG(cnum));
177 static void sxgbe_dma_stop_rx(void __iomem *ioaddr, int rchannels)
179 int cnum;
180 u32 rx_ctl_reg;
182 for (cnum = 0; cnum < rchannels; cnum++) {
183 rx_ctl_reg = readl(ioaddr + SXGBE_DMA_CHA_RXCTL_REG(cnum));
184 rx_ctl_reg &= ~(SXGBE_RX_ENABLE);
185 writel(rx_ctl_reg, ioaddr + SXGBE_DMA_CHA_RXCTL_REG(cnum));
189 static int sxgbe_tx_dma_int_status(void __iomem *ioaddr, int channel_no,
190 struct sxgbe_extra_stats *x)
192 u32 int_status = readl(ioaddr + SXGBE_DMA_CHA_STATUS_REG(channel_no));
193 u32 clear_val = 0;
194 u32 ret_val = 0;
196 /* TX Normal Interrupt Summary */
197 if (likely(int_status & SXGBE_DMA_INT_STATUS_NIS)) {
198 x->normal_irq_n++;
199 if (int_status & SXGBE_DMA_INT_STATUS_TI) {
200 ret_val |= handle_tx;
201 x->tx_normal_irq_n++;
202 clear_val |= SXGBE_DMA_INT_STATUS_TI;
205 if (int_status & SXGBE_DMA_INT_STATUS_TBU) {
206 x->tx_underflow_irq++;
207 ret_val |= tx_bump_tc;
208 clear_val |= SXGBE_DMA_INT_STATUS_TBU;
210 } else if (unlikely(int_status & SXGBE_DMA_INT_STATUS_AIS)) {
211 /* TX Abnormal Interrupt Summary */
212 if (int_status & SXGBE_DMA_INT_STATUS_TPS) {
213 ret_val |= tx_hard_error;
214 clear_val |= SXGBE_DMA_INT_STATUS_TPS;
215 x->tx_process_stopped_irq++;
218 if (int_status & SXGBE_DMA_INT_STATUS_FBE) {
219 ret_val |= tx_hard_error;
220 x->fatal_bus_error_irq++;
222 /* Assumption: FBE bit is the combination of
223 * all the bus access erros and cleared when
224 * the respective error bits cleared
227 /* check for actual cause */
228 if (int_status & SXGBE_DMA_INT_STATUS_TEB0) {
229 x->tx_read_transfer_err++;
230 clear_val |= SXGBE_DMA_INT_STATUS_TEB0;
231 } else {
232 x->tx_write_transfer_err++;
235 if (int_status & SXGBE_DMA_INT_STATUS_TEB1) {
236 x->tx_desc_access_err++;
237 clear_val |= SXGBE_DMA_INT_STATUS_TEB1;
238 } else {
239 x->tx_buffer_access_err++;
242 if (int_status & SXGBE_DMA_INT_STATUS_TEB2) {
243 x->tx_data_transfer_err++;
244 clear_val |= SXGBE_DMA_INT_STATUS_TEB2;
248 /* context descriptor error */
249 if (int_status & SXGBE_DMA_INT_STATUS_CTXTERR) {
250 x->tx_ctxt_desc_err++;
251 clear_val |= SXGBE_DMA_INT_STATUS_CTXTERR;
255 /* clear the served bits */
256 writel(clear_val, ioaddr + SXGBE_DMA_CHA_STATUS_REG(channel_no));
258 return ret_val;
261 static int sxgbe_rx_dma_int_status(void __iomem *ioaddr, int channel_no,
262 struct sxgbe_extra_stats *x)
264 u32 int_status = readl(ioaddr + SXGBE_DMA_CHA_STATUS_REG(channel_no));
265 u32 clear_val = 0;
266 u32 ret_val = 0;
268 /* RX Normal Interrupt Summary */
269 if (likely(int_status & SXGBE_DMA_INT_STATUS_NIS)) {
270 x->normal_irq_n++;
271 if (int_status & SXGBE_DMA_INT_STATUS_RI) {
272 ret_val |= handle_rx;
273 x->rx_normal_irq_n++;
274 clear_val |= SXGBE_DMA_INT_STATUS_RI;
276 } else if (unlikely(int_status & SXGBE_DMA_INT_STATUS_AIS)) {
277 /* RX Abnormal Interrupt Summary */
278 if (int_status & SXGBE_DMA_INT_STATUS_RBU) {
279 ret_val |= rx_bump_tc;
280 clear_val |= SXGBE_DMA_INT_STATUS_RBU;
281 x->rx_underflow_irq++;
284 if (int_status & SXGBE_DMA_INT_STATUS_RPS) {
285 ret_val |= rx_hard_error;
286 clear_val |= SXGBE_DMA_INT_STATUS_RPS;
287 x->rx_process_stopped_irq++;
290 if (int_status & SXGBE_DMA_INT_STATUS_FBE) {
291 ret_val |= rx_hard_error;
292 x->fatal_bus_error_irq++;
294 /* Assumption: FBE bit is the combination of
295 * all the bus access erros and cleared when
296 * the respective error bits cleared
299 /* check for actual cause */
300 if (int_status & SXGBE_DMA_INT_STATUS_REB0) {
301 x->rx_read_transfer_err++;
302 clear_val |= SXGBE_DMA_INT_STATUS_REB0;
303 } else {
304 x->rx_write_transfer_err++;
307 if (int_status & SXGBE_DMA_INT_STATUS_REB1) {
308 x->rx_desc_access_err++;
309 clear_val |= SXGBE_DMA_INT_STATUS_REB1;
310 } else {
311 x->rx_buffer_access_err++;
314 if (int_status & SXGBE_DMA_INT_STATUS_REB2) {
315 x->rx_data_transfer_err++;
316 clear_val |= SXGBE_DMA_INT_STATUS_REB2;
321 /* clear the served bits */
322 writel(clear_val, ioaddr + SXGBE_DMA_CHA_STATUS_REG(channel_no));
324 return ret_val;
327 /* Program the HW RX Watchdog */
328 static void sxgbe_dma_rx_watchdog(void __iomem *ioaddr, u32 riwt)
330 u32 que_num;
332 SXGBE_FOR_EACH_QUEUE(SXGBE_RX_QUEUES, que_num) {
333 writel(riwt,
334 ioaddr + SXGBE_DMA_CHA_INT_RXWATCHTMR_REG(que_num));
338 static void sxgbe_enable_tso(void __iomem *ioaddr, u8 chan_num)
340 u32 ctrl;
342 ctrl = readl(ioaddr + SXGBE_DMA_CHA_TXCTL_REG(chan_num));
343 ctrl |= SXGBE_DMA_CHA_TXCTL_TSE_ENABLE;
344 writel(ctrl, ioaddr + SXGBE_DMA_CHA_TXCTL_REG(chan_num));
347 static const struct sxgbe_dma_ops sxgbe_dma_ops = {
348 .init = sxgbe_dma_init,
349 .cha_init = sxgbe_dma_channel_init,
350 .enable_dma_transmission = sxgbe_enable_dma_transmission,
351 .enable_dma_irq = sxgbe_enable_dma_irq,
352 .disable_dma_irq = sxgbe_disable_dma_irq,
353 .start_tx = sxgbe_dma_start_tx,
354 .start_tx_queue = sxgbe_dma_start_tx_queue,
355 .stop_tx = sxgbe_dma_stop_tx,
356 .stop_tx_queue = sxgbe_dma_stop_tx_queue,
357 .start_rx = sxgbe_dma_start_rx,
358 .stop_rx = sxgbe_dma_stop_rx,
359 .tx_dma_int_status = sxgbe_tx_dma_int_status,
360 .rx_dma_int_status = sxgbe_rx_dma_int_status,
361 .rx_watchdog = sxgbe_dma_rx_watchdog,
362 .enable_tso = sxgbe_enable_tso,
365 const struct sxgbe_dma_ops *sxgbe_get_dma_ops(void)
367 return &sxgbe_dma_ops;