2 * Copyright (C) 2010 NVIDIA Corporation.
3 * Copyright (C) 2010 Google, Inc.
5 * This software is licensed under the terms of the GNU General Public
6 * License version 2, as published by the Free Software Foundation, and
7 * may be copied, distributed, and modified under those terms.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
16 #include <linux/kernel.h>
18 #include <mach/iomap.h>
20 #include <linux/dmaengine.h>
21 #include <linux/dma-mapping.h>
22 #include <linux/spinlock.h>
23 #include <linux/completion.h>
24 #include <linux/sched.h>
25 #include <linux/mutex.h>
31 #if defined(CONFIG_TEGRA_SYSTEM_DMA) || defined(CONFIG_TEGRA20_APB_DMA)
32 static DEFINE_MUTEX(tegra_apb_dma_lock
);
33 static u32
*tegra_apb_bb
;
34 static dma_addr_t tegra_apb_bb_phys
;
35 static DECLARE_COMPLETION(tegra_apb_wait
);
37 static u32
tegra_apb_readl_direct(unsigned long offset
);
38 static void tegra_apb_writel_direct(u32 value
, unsigned long offset
);
40 #if defined(CONFIG_TEGRA_SYSTEM_DMA)
41 static struct tegra_dma_channel
*tegra_apb_dma
;
43 bool tegra_apb_init(void)
45 struct tegra_dma_channel
*ch
;
47 mutex_lock(&tegra_apb_dma_lock
);
49 /* Check to see if we raced to setup */
53 ch
= tegra_dma_allocate_channel(TEGRA_DMA_MODE_ONESHOT
|
59 tegra_apb_bb
= dma_alloc_coherent(NULL
, sizeof(u32
),
60 &tegra_apb_bb_phys
, GFP_KERNEL
);
62 pr_err("%s: can not allocate bounce buffer\n", __func__
);
63 tegra_dma_free_channel(ch
);
69 mutex_unlock(&tegra_apb_dma_lock
);
73 mutex_unlock(&tegra_apb_dma_lock
);
77 static void apb_dma_complete(struct tegra_dma_req
*req
)
79 complete(&tegra_apb_wait
);
82 static u32
tegra_apb_readl_using_dma(unsigned long offset
)
84 struct tegra_dma_req req
;
87 if (!tegra_apb_dma
&& !tegra_apb_init())
88 return tegra_apb_readl_direct(offset
);
90 mutex_lock(&tegra_apb_dma_lock
);
91 req
.complete
= apb_dma_complete
;
93 req
.dest_addr
= tegra_apb_bb_phys
;
94 req
.dest_bus_width
= 32;
96 req
.source_addr
= offset
;
97 req
.source_bus_width
= 32;
99 req
.req_sel
= TEGRA_DMA_REQ_SEL_CNTR
;
102 INIT_COMPLETION(tegra_apb_wait
);
104 tegra_dma_enqueue_req(tegra_apb_dma
, &req
);
106 ret
= wait_for_completion_timeout(&tegra_apb_wait
,
107 msecs_to_jiffies(50));
109 if (WARN(ret
== 0, "apb read dma timed out")) {
110 tegra_dma_dequeue_req(tegra_apb_dma
, &req
);
111 *(u32
*)tegra_apb_bb
= 0;
114 mutex_unlock(&tegra_apb_dma_lock
);
115 return *((u32
*)tegra_apb_bb
);
118 static void tegra_apb_writel_using_dma(u32 value
, unsigned long offset
)
120 struct tegra_dma_req req
;
123 if (!tegra_apb_dma
&& !tegra_apb_init()) {
124 tegra_apb_writel_direct(value
, offset
);
128 mutex_lock(&tegra_apb_dma_lock
);
129 *((u32
*)tegra_apb_bb
) = value
;
130 req
.complete
= apb_dma_complete
;
132 req
.dest_addr
= offset
;
134 req
.dest_bus_width
= 32;
135 req
.source_addr
= tegra_apb_bb_phys
;
136 req
.source_bus_width
= 32;
138 req
.req_sel
= TEGRA_DMA_REQ_SEL_CNTR
;
141 INIT_COMPLETION(tegra_apb_wait
);
143 tegra_dma_enqueue_req(tegra_apb_dma
, &req
);
145 ret
= wait_for_completion_timeout(&tegra_apb_wait
,
146 msecs_to_jiffies(50));
148 if (WARN(ret
== 0, "apb write dma timed out"))
149 tegra_dma_dequeue_req(tegra_apb_dma
, &req
);
151 mutex_unlock(&tegra_apb_dma_lock
);
155 static struct dma_chan
*tegra_apb_dma_chan
;
156 static struct dma_slave_config dma_sconfig
;
158 bool tegra_apb_dma_init(void)
162 mutex_lock(&tegra_apb_dma_lock
);
164 /* Check to see if we raced to setup */
165 if (tegra_apb_dma_chan
)
169 dma_cap_set(DMA_SLAVE
, mask
);
170 tegra_apb_dma_chan
= dma_request_channel(mask
, NULL
, NULL
);
171 if (!tegra_apb_dma_chan
) {
173 * This is common until the device is probed, so don't
176 pr_debug("%s: can not allocate dma channel\n", __func__
);
180 tegra_apb_bb
= dma_alloc_coherent(NULL
, sizeof(u32
),
181 &tegra_apb_bb_phys
, GFP_KERNEL
);
183 pr_err("%s: can not allocate bounce buffer\n", __func__
);
187 dma_sconfig
.src_addr_width
= DMA_SLAVE_BUSWIDTH_4_BYTES
;
188 dma_sconfig
.dst_addr_width
= DMA_SLAVE_BUSWIDTH_4_BYTES
;
189 dma_sconfig
.slave_id
= TEGRA_DMA_REQ_SEL_CNTR
;
190 dma_sconfig
.src_maxburst
= 1;
191 dma_sconfig
.dst_maxburst
= 1;
194 mutex_unlock(&tegra_apb_dma_lock
);
198 dma_release_channel(tegra_apb_dma_chan
);
199 tegra_apb_dma_chan
= NULL
;
202 mutex_unlock(&tegra_apb_dma_lock
);
206 static void apb_dma_complete(void *args
)
208 complete(&tegra_apb_wait
);
211 static int do_dma_transfer(unsigned long apb_add
,
212 enum dma_transfer_direction dir
)
214 struct dma_async_tx_descriptor
*dma_desc
;
217 if (dir
== DMA_DEV_TO_MEM
)
218 dma_sconfig
.src_addr
= apb_add
;
220 dma_sconfig
.dst_addr
= apb_add
;
222 ret
= dmaengine_slave_config(tegra_apb_dma_chan
, &dma_sconfig
);
226 dma_desc
= dmaengine_prep_slave_single(tegra_apb_dma_chan
,
227 tegra_apb_bb_phys
, sizeof(u32
), dir
,
228 DMA_PREP_INTERRUPT
| DMA_CTRL_ACK
);
232 dma_desc
->callback
= apb_dma_complete
;
233 dma_desc
->callback_param
= NULL
;
235 INIT_COMPLETION(tegra_apb_wait
);
237 dmaengine_submit(dma_desc
);
238 dma_async_issue_pending(tegra_apb_dma_chan
);
239 ret
= wait_for_completion_timeout(&tegra_apb_wait
,
240 msecs_to_jiffies(50));
242 if (WARN(ret
== 0, "apb read dma timed out")) {
243 dmaengine_terminate_all(tegra_apb_dma_chan
);
249 static u32
tegra_apb_readl_using_dma(unsigned long offset
)
253 if (!tegra_apb_dma_chan
&& !tegra_apb_dma_init())
254 return tegra_apb_readl_direct(offset
);
256 mutex_lock(&tegra_apb_dma_lock
);
257 ret
= do_dma_transfer(offset
, DMA_DEV_TO_MEM
);
259 pr_err("error in reading offset 0x%08lx using dma\n", offset
);
260 *(u32
*)tegra_apb_bb
= 0;
262 mutex_unlock(&tegra_apb_dma_lock
);
263 return *((u32
*)tegra_apb_bb
);
266 static void tegra_apb_writel_using_dma(u32 value
, unsigned long offset
)
270 if (!tegra_apb_dma_chan
&& !tegra_apb_dma_init()) {
271 tegra_apb_writel_direct(value
, offset
);
275 mutex_lock(&tegra_apb_dma_lock
);
276 *((u32
*)tegra_apb_bb
) = value
;
277 ret
= do_dma_transfer(offset
, DMA_MEM_TO_DEV
);
279 pr_err("error in writing offset 0x%08lx using dma\n", offset
);
280 mutex_unlock(&tegra_apb_dma_lock
);
284 #define tegra_apb_readl_using_dma tegra_apb_readl_direct
285 #define tegra_apb_writel_using_dma tegra_apb_writel_direct
288 typedef u32 (*apbio_read_fptr
)(unsigned long offset
);
289 typedef void (*apbio_write_fptr
)(u32 value
, unsigned long offset
);
291 static apbio_read_fptr apbio_read
;
292 static apbio_write_fptr apbio_write
;
294 static u32
tegra_apb_readl_direct(unsigned long offset
)
296 return readl(IO_TO_VIRT(offset
));
299 static void tegra_apb_writel_direct(u32 value
, unsigned long offset
)
301 writel(value
, IO_TO_VIRT(offset
));
304 void tegra_apb_io_init(void)
306 /* Need to use dma only when it is Tegra20 based platform */
307 if (of_machine_is_compatible("nvidia,tegra20") ||
308 !of_have_populated_dt()) {
309 apbio_read
= tegra_apb_readl_using_dma
;
310 apbio_write
= tegra_apb_writel_using_dma
;
312 apbio_read
= tegra_apb_readl_direct
;
313 apbio_write
= tegra_apb_writel_direct
;
317 u32
tegra_apb_readl(unsigned long offset
)
319 return apbio_read(offset
);
322 void tegra_apb_writel(u32 value
, unsigned long offset
)
324 apbio_write(value
, offset
);