2 * Copyright (C) 2010 NVIDIA Corporation.
3 * Copyright (C) 2010 Google, Inc.
5 * This software is licensed under the terms of the GNU General Public
6 * License version 2, as published by the Free Software Foundation, and
7 * may be copied, distributed, and modified under those terms.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
16 #include <linux/kernel.h>
18 #include <linux/dma-mapping.h>
19 #include <linux/spinlock.h>
20 #include <linux/completion.h>
21 #include <linux/sched.h>
22 #include <linux/mutex.h>
25 #include <mach/iomap.h>
29 static DEFINE_MUTEX(tegra_apb_dma_lock
);
31 static struct tegra_dma_channel
*tegra_apb_dma
;
32 static u32
*tegra_apb_bb
;
33 static dma_addr_t tegra_apb_bb_phys
;
34 static DECLARE_COMPLETION(tegra_apb_wait
);
36 bool tegra_apb_init(void)
38 struct tegra_dma_channel
*ch
;
40 mutex_lock(&tegra_apb_dma_lock
);
42 /* Check to see if we raced to setup */
46 ch
= tegra_dma_allocate_channel(TEGRA_DMA_MODE_ONESHOT
|
52 tegra_apb_bb
= dma_alloc_coherent(NULL
, sizeof(u32
),
53 &tegra_apb_bb_phys
, GFP_KERNEL
);
55 pr_err("%s: can not allocate bounce buffer\n", __func__
);
56 tegra_dma_free_channel(ch
);
62 mutex_unlock(&tegra_apb_dma_lock
);
66 mutex_unlock(&tegra_apb_dma_lock
);
70 static void apb_dma_complete(struct tegra_dma_req
*req
)
72 complete(&tegra_apb_wait
);
75 u32
tegra_apb_readl(unsigned long offset
)
77 struct tegra_dma_req req
;
80 if (!tegra_apb_dma
&& !tegra_apb_init())
81 return readl(IO_TO_VIRT(offset
));
83 mutex_lock(&tegra_apb_dma_lock
);
84 req
.complete
= apb_dma_complete
;
86 req
.dest_addr
= tegra_apb_bb_phys
;
87 req
.dest_bus_width
= 32;
89 req
.source_addr
= offset
;
90 req
.source_bus_width
= 32;
92 req
.req_sel
= TEGRA_DMA_REQ_SEL_CNTR
;
95 INIT_COMPLETION(tegra_apb_wait
);
97 tegra_dma_enqueue_req(tegra_apb_dma
, &req
);
99 ret
= wait_for_completion_timeout(&tegra_apb_wait
,
100 msecs_to_jiffies(50));
102 if (WARN(ret
== 0, "apb read dma timed out")) {
103 tegra_dma_dequeue_req(tegra_apb_dma
, &req
);
104 *(u32
*)tegra_apb_bb
= 0;
107 mutex_unlock(&tegra_apb_dma_lock
);
108 return *((u32
*)tegra_apb_bb
);
111 void tegra_apb_writel(u32 value
, unsigned long offset
)
113 struct tegra_dma_req req
;
116 if (!tegra_apb_dma
&& !tegra_apb_init()) {
117 writel(value
, IO_TO_VIRT(offset
));
121 mutex_lock(&tegra_apb_dma_lock
);
122 *((u32
*)tegra_apb_bb
) = value
;
123 req
.complete
= apb_dma_complete
;
125 req
.dest_addr
= offset
;
127 req
.dest_bus_width
= 32;
128 req
.source_addr
= tegra_apb_bb_phys
;
129 req
.source_bus_width
= 32;
131 req
.req_sel
= TEGRA_DMA_REQ_SEL_CNTR
;
134 INIT_COMPLETION(tegra_apb_wait
);
136 tegra_dma_enqueue_req(tegra_apb_dma
, &req
);
138 ret
= wait_for_completion_timeout(&tegra_apb_wait
,
139 msecs_to_jiffies(50));
141 if (WARN(ret
== 0, "apb write dma timed out"))
142 tegra_dma_dequeue_req(tegra_apb_dma
, &req
);
144 mutex_unlock(&tegra_apb_dma_lock
);