2 * Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
22 #include <core/tegra.h>
23 #ifdef CONFIG_NOUVEAU_PLATFORM_DRIVER
26 #if IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)
27 #include <asm/dma-iommu.h>
31 nvkm_device_tegra_power_up(struct nvkm_device_tegra
*tdev
)
36 ret
= regulator_enable(tdev
->vdd
);
41 ret
= clk_prepare_enable(tdev
->clk
);
45 ret
= clk_prepare_enable(tdev
->clk_ref
);
49 ret
= clk_prepare_enable(tdev
->clk_pwr
);
52 clk_set_rate(tdev
->clk_pwr
, 204000000);
55 if (!tdev
->pdev
->dev
.pm_domain
) {
56 reset_control_assert(tdev
->rst
);
59 ret
= tegra_powergate_remove_clamping(TEGRA_POWERGATE_3D
);
64 reset_control_deassert(tdev
->rst
);
71 clk_disable_unprepare(tdev
->clk_pwr
);
74 clk_disable_unprepare(tdev
->clk_ref
);
76 clk_disable_unprepare(tdev
->clk
);
79 regulator_disable(tdev
->vdd
);
85 nvkm_device_tegra_power_down(struct nvkm_device_tegra
*tdev
)
89 clk_disable_unprepare(tdev
->clk_pwr
);
91 clk_disable_unprepare(tdev
->clk_ref
);
92 clk_disable_unprepare(tdev
->clk
);
96 ret
= regulator_disable(tdev
->vdd
);
105 nvkm_device_tegra_probe_iommu(struct nvkm_device_tegra
*tdev
)
107 #if IS_ENABLED(CONFIG_IOMMU_API)
108 struct device
*dev
= &tdev
->pdev
->dev
;
109 unsigned long pgsize_bitmap
;
112 #if IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)
113 if (dev
->archdata
.mapping
) {
114 struct dma_iommu_mapping
*mapping
= to_dma_iommu_mapping(dev
);
116 arm_iommu_detach_device(dev
);
117 arm_iommu_release_mapping(mapping
);
121 if (!tdev
->func
->iommu_bit
)
124 mutex_init(&tdev
->iommu
.mutex
);
126 if (iommu_present(&platform_bus_type
)) {
127 tdev
->iommu
.domain
= iommu_domain_alloc(&platform_bus_type
);
128 if (!tdev
->iommu
.domain
)
132 * A IOMMU is only usable if it supports page sizes smaller
133 * or equal to the system's PAGE_SIZE, with a preference if
136 pgsize_bitmap
= tdev
->iommu
.domain
->ops
->pgsize_bitmap
;
137 if (pgsize_bitmap
& PAGE_SIZE
) {
138 tdev
->iommu
.pgshift
= PAGE_SHIFT
;
140 tdev
->iommu
.pgshift
= fls(pgsize_bitmap
& ~PAGE_MASK
);
141 if (tdev
->iommu
.pgshift
== 0) {
142 dev_warn(dev
, "unsupported IOMMU page size\n");
145 tdev
->iommu
.pgshift
-= 1;
148 ret
= iommu_attach_device(tdev
->iommu
.domain
, dev
);
152 ret
= nvkm_mm_init(&tdev
->iommu
.mm
, 0, 0,
153 (1ULL << tdev
->func
->iommu_bit
) >>
154 tdev
->iommu
.pgshift
, 1);
162 iommu_detach_device(tdev
->iommu
.domain
, dev
);
165 iommu_domain_free(tdev
->iommu
.domain
);
168 tdev
->iommu
.domain
= NULL
;
169 tdev
->iommu
.pgshift
= 0;
170 dev_err(dev
, "cannot initialize IOMMU MM\n");
175 nvkm_device_tegra_remove_iommu(struct nvkm_device_tegra
*tdev
)
177 #if IS_ENABLED(CONFIG_IOMMU_API)
178 if (tdev
->iommu
.domain
) {
179 nvkm_mm_fini(&tdev
->iommu
.mm
);
180 iommu_detach_device(tdev
->iommu
.domain
, tdev
->device
.dev
);
181 iommu_domain_free(tdev
->iommu
.domain
);
186 static struct nvkm_device_tegra
*
187 nvkm_device_tegra(struct nvkm_device
*device
)
189 return container_of(device
, struct nvkm_device_tegra
, device
);
192 static struct resource
*
193 nvkm_device_tegra_resource(struct nvkm_device
*device
, unsigned bar
)
195 struct nvkm_device_tegra
*tdev
= nvkm_device_tegra(device
);
196 return platform_get_resource(tdev
->pdev
, IORESOURCE_MEM
, bar
);
199 static resource_size_t
200 nvkm_device_tegra_resource_addr(struct nvkm_device
*device
, unsigned bar
)
202 struct resource
*res
= nvkm_device_tegra_resource(device
, bar
);
203 return res
? res
->start
: 0;
206 static resource_size_t
207 nvkm_device_tegra_resource_size(struct nvkm_device
*device
, unsigned bar
)
209 struct resource
*res
= nvkm_device_tegra_resource(device
, bar
);
210 return res
? resource_size(res
) : 0;
214 nvkm_device_tegra_intr(int irq
, void *arg
)
216 struct nvkm_device_tegra
*tdev
= arg
;
217 struct nvkm_device
*device
= &tdev
->device
;
218 bool handled
= false;
219 nvkm_mc_intr_unarm(device
);
220 nvkm_mc_intr(device
, &handled
);
221 nvkm_mc_intr_rearm(device
);
222 return handled
? IRQ_HANDLED
: IRQ_NONE
;
226 nvkm_device_tegra_fini(struct nvkm_device
*device
, bool suspend
)
228 struct nvkm_device_tegra
*tdev
= nvkm_device_tegra(device
);
230 free_irq(tdev
->irq
, tdev
);
236 nvkm_device_tegra_init(struct nvkm_device
*device
)
238 struct nvkm_device_tegra
*tdev
= nvkm_device_tegra(device
);
241 irq
= platform_get_irq_byname(tdev
->pdev
, "stall");
245 ret
= request_irq(irq
, nvkm_device_tegra_intr
,
246 IRQF_SHARED
, "nvkm", tdev
);
255 nvkm_device_tegra_dtor(struct nvkm_device
*device
)
257 struct nvkm_device_tegra
*tdev
= nvkm_device_tegra(device
);
258 nvkm_device_tegra_power_down(tdev
);
259 nvkm_device_tegra_remove_iommu(tdev
);
263 static const struct nvkm_device_func
264 nvkm_device_tegra_func
= {
265 .tegra
= nvkm_device_tegra
,
266 .dtor
= nvkm_device_tegra_dtor
,
267 .init
= nvkm_device_tegra_init
,
268 .fini
= nvkm_device_tegra_fini
,
269 .resource_addr
= nvkm_device_tegra_resource_addr
,
270 .resource_size
= nvkm_device_tegra_resource_size
,
271 .cpu_coherent
= false,
275 nvkm_device_tegra_new(const struct nvkm_device_tegra_func
*func
,
276 struct platform_device
*pdev
,
277 const char *cfg
, const char *dbg
,
278 bool detect
, bool mmio
, u64 subdev_mask
,
279 struct nvkm_device
**pdevice
)
281 struct nvkm_device_tegra
*tdev
;
285 if (!(tdev
= kzalloc(sizeof(*tdev
), GFP_KERNEL
)))
291 if (func
->require_vdd
) {
292 tdev
->vdd
= devm_regulator_get(&pdev
->dev
, "vdd");
293 if (IS_ERR(tdev
->vdd
)) {
294 ret
= PTR_ERR(tdev
->vdd
);
299 tdev
->rst
= devm_reset_control_get(&pdev
->dev
, "gpu");
300 if (IS_ERR(tdev
->rst
)) {
301 ret
= PTR_ERR(tdev
->rst
);
305 tdev
->clk
= devm_clk_get(&pdev
->dev
, "gpu");
306 if (IS_ERR(tdev
->clk
)) {
307 ret
= PTR_ERR(tdev
->clk
);
311 rate
= clk_get_rate(tdev
->clk
);
313 ret
= clk_set_rate(tdev
->clk
, ULONG_MAX
);
317 rate
= clk_get_rate(tdev
->clk
);
319 dev_dbg(&pdev
->dev
, "GPU clock set to %lu\n", rate
);
322 if (func
->require_ref_clk
)
323 tdev
->clk_ref
= devm_clk_get(&pdev
->dev
, "ref");
324 if (IS_ERR(tdev
->clk_ref
)) {
325 ret
= PTR_ERR(tdev
->clk_ref
);
329 tdev
->clk_pwr
= devm_clk_get(&pdev
->dev
, "pwr");
330 if (IS_ERR(tdev
->clk_pwr
)) {
331 ret
= PTR_ERR(tdev
->clk_pwr
);
336 * The IOMMU bit defines the upper limit of the GPU-addressable space.
338 ret
= dma_set_mask(&pdev
->dev
, DMA_BIT_MASK(tdev
->func
->iommu_bit
));
342 nvkm_device_tegra_probe_iommu(tdev
);
344 ret
= nvkm_device_tegra_power_up(tdev
);
348 tdev
->gpu_speedo
= tegra_sku_info
.gpu_speedo_value
;
349 tdev
->gpu_speedo_id
= tegra_sku_info
.gpu_speedo_id
;
350 ret
= nvkm_device_ctor(&nvkm_device_tegra_func
, NULL
, &pdev
->dev
,
351 NVKM_DEVICE_TEGRA
, pdev
->id
, NULL
,
352 cfg
, dbg
, detect
, mmio
, subdev_mask
,
357 *pdevice
= &tdev
->device
;
362 nvkm_device_tegra_power_down(tdev
);
364 nvkm_device_tegra_remove_iommu(tdev
);
371 nvkm_device_tegra_new(const struct nvkm_device_tegra_func
*func
,
372 struct platform_device
*pdev
,
373 const char *cfg
, const char *dbg
,
374 bool detect
, bool mmio
, u64 subdev_mask
,
375 struct nvkm_device
**pdevice
)