2 * Copyright (C) 2010 NVIDIA Corporation.
3 * Copyright (C) 2010 Google, Inc.
5 * This software is licensed under the terms of the GNU General Public
6 * License version 2, as published by the Free Software Foundation, and
7 * may be copied, distributed, and modified under those terms.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
16 #include <linux/kernel.h>
19 #include <linux/dmaengine.h>
20 #include <linux/dma-mapping.h>
21 #include <linux/spinlock.h>
22 #include <linux/completion.h>
23 #include <linux/sched.h>
24 #include <linux/mutex.h>
29 #if defined(CONFIG_TEGRA20_APB_DMA)
30 static DEFINE_MUTEX(tegra_apb_dma_lock
);
31 static u32
*tegra_apb_bb
;
32 static dma_addr_t tegra_apb_bb_phys
;
33 static DECLARE_COMPLETION(tegra_apb_wait
);
35 static u32
tegra_apb_readl_direct(unsigned long offset
);
36 static void tegra_apb_writel_direct(u32 value
, unsigned long offset
);
38 static struct dma_chan
*tegra_apb_dma_chan
;
39 static struct dma_slave_config dma_sconfig
;
41 static bool tegra_apb_dma_init(void)
45 mutex_lock(&tegra_apb_dma_lock
);
47 /* Check to see if we raced to setup */
48 if (tegra_apb_dma_chan
)
52 dma_cap_set(DMA_SLAVE
, mask
);
53 tegra_apb_dma_chan
= dma_request_channel(mask
, NULL
, NULL
);
54 if (!tegra_apb_dma_chan
) {
56 * This is common until the device is probed, so don't
59 pr_debug("%s: can not allocate dma channel\n", __func__
);
63 tegra_apb_bb
= dma_alloc_coherent(NULL
, sizeof(u32
),
64 &tegra_apb_bb_phys
, GFP_KERNEL
);
66 pr_err("%s: can not allocate bounce buffer\n", __func__
);
70 dma_sconfig
.src_addr_width
= DMA_SLAVE_BUSWIDTH_4_BYTES
;
71 dma_sconfig
.dst_addr_width
= DMA_SLAVE_BUSWIDTH_4_BYTES
;
72 dma_sconfig
.src_maxburst
= 1;
73 dma_sconfig
.dst_maxburst
= 1;
76 mutex_unlock(&tegra_apb_dma_lock
);
80 dma_release_channel(tegra_apb_dma_chan
);
81 tegra_apb_dma_chan
= NULL
;
84 mutex_unlock(&tegra_apb_dma_lock
);
88 static void apb_dma_complete(void *args
)
90 complete(&tegra_apb_wait
);
93 static int do_dma_transfer(unsigned long apb_add
,
94 enum dma_transfer_direction dir
)
96 struct dma_async_tx_descriptor
*dma_desc
;
99 if (dir
== DMA_DEV_TO_MEM
)
100 dma_sconfig
.src_addr
= apb_add
;
102 dma_sconfig
.dst_addr
= apb_add
;
104 ret
= dmaengine_slave_config(tegra_apb_dma_chan
, &dma_sconfig
);
108 dma_desc
= dmaengine_prep_slave_single(tegra_apb_dma_chan
,
109 tegra_apb_bb_phys
, sizeof(u32
), dir
,
110 DMA_PREP_INTERRUPT
| DMA_CTRL_ACK
);
114 dma_desc
->callback
= apb_dma_complete
;
115 dma_desc
->callback_param
= NULL
;
117 INIT_COMPLETION(tegra_apb_wait
);
119 dmaengine_submit(dma_desc
);
120 dma_async_issue_pending(tegra_apb_dma_chan
);
121 ret
= wait_for_completion_timeout(&tegra_apb_wait
,
122 msecs_to_jiffies(50));
124 if (WARN(ret
== 0, "apb read dma timed out")) {
125 dmaengine_terminate_all(tegra_apb_dma_chan
);
131 static u32
tegra_apb_readl_using_dma(unsigned long offset
)
135 if (!tegra_apb_dma_chan
&& !tegra_apb_dma_init())
136 return tegra_apb_readl_direct(offset
);
138 mutex_lock(&tegra_apb_dma_lock
);
139 ret
= do_dma_transfer(offset
, DMA_DEV_TO_MEM
);
141 pr_err("error in reading offset 0x%08lx using dma\n", offset
);
142 *(u32
*)tegra_apb_bb
= 0;
144 mutex_unlock(&tegra_apb_dma_lock
);
145 return *((u32
*)tegra_apb_bb
);
148 static void tegra_apb_writel_using_dma(u32 value
, unsigned long offset
)
152 if (!tegra_apb_dma_chan
&& !tegra_apb_dma_init()) {
153 tegra_apb_writel_direct(value
, offset
);
157 mutex_lock(&tegra_apb_dma_lock
);
158 *((u32
*)tegra_apb_bb
) = value
;
159 ret
= do_dma_transfer(offset
, DMA_MEM_TO_DEV
);
161 pr_err("error in writing offset 0x%08lx using dma\n", offset
);
162 mutex_unlock(&tegra_apb_dma_lock
);
165 #define tegra_apb_readl_using_dma tegra_apb_readl_direct
166 #define tegra_apb_writel_using_dma tegra_apb_writel_direct
169 typedef u32 (*apbio_read_fptr
)(unsigned long offset
);
170 typedef void (*apbio_write_fptr
)(u32 value
, unsigned long offset
);
172 static apbio_read_fptr apbio_read
;
173 static apbio_write_fptr apbio_write
;
175 static u32
tegra_apb_readl_direct(unsigned long offset
)
177 return readl(IO_ADDRESS(offset
));
180 static void tegra_apb_writel_direct(u32 value
, unsigned long offset
)
182 writel(value
, IO_ADDRESS(offset
));
185 void tegra_apb_io_init(void)
187 /* Need to use dma only when it is Tegra20 based platform */
188 if (of_machine_is_compatible("nvidia,tegra20") ||
189 !of_have_populated_dt()) {
190 apbio_read
= tegra_apb_readl_using_dma
;
191 apbio_write
= tegra_apb_writel_using_dma
;
193 apbio_read
= tegra_apb_readl_direct
;
194 apbio_write
= tegra_apb_writel_direct
;
198 u32
tegra_apb_readl(unsigned long offset
)
200 return apbio_read(offset
);
203 void tegra_apb_writel(u32 value
, unsigned long offset
)
205 apbio_write(value
, offset
);