2 * Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
22 #include <core/tegra.h>
23 #ifdef CONFIG_NOUVEAU_PLATFORM_DRIVER
27 nvkm_device_tegra_power_up(struct nvkm_device_tegra
*tdev
)
32 ret
= regulator_enable(tdev
->vdd
);
37 ret
= clk_prepare_enable(tdev
->clk
);
41 ret
= clk_prepare_enable(tdev
->clk_ref
);
45 ret
= clk_prepare_enable(tdev
->clk_pwr
);
48 clk_set_rate(tdev
->clk_pwr
, 204000000);
51 reset_control_assert(tdev
->rst
);
54 if (!tdev
->pdev
->dev
.pm_domain
) {
55 ret
= tegra_powergate_remove_clamping(TEGRA_POWERGATE_3D
);
61 reset_control_deassert(tdev
->rst
);
67 clk_disable_unprepare(tdev
->clk_pwr
);
70 clk_disable_unprepare(tdev
->clk_ref
);
72 clk_disable_unprepare(tdev
->clk
);
75 regulator_disable(tdev
->vdd
);
81 nvkm_device_tegra_power_down(struct nvkm_device_tegra
*tdev
)
85 clk_disable_unprepare(tdev
->clk_pwr
);
87 clk_disable_unprepare(tdev
->clk_ref
);
88 clk_disable_unprepare(tdev
->clk
);
92 ret
= regulator_disable(tdev
->vdd
);
101 nvkm_device_tegra_probe_iommu(struct nvkm_device_tegra
*tdev
)
103 #if IS_ENABLED(CONFIG_IOMMU_API)
104 struct device
*dev
= &tdev
->pdev
->dev
;
105 unsigned long pgsize_bitmap
;
108 if (!tdev
->func
->iommu_bit
)
111 mutex_init(&tdev
->iommu
.mutex
);
113 if (iommu_present(&platform_bus_type
)) {
114 tdev
->iommu
.domain
= iommu_domain_alloc(&platform_bus_type
);
115 if (!tdev
->iommu
.domain
)
119 * A IOMMU is only usable if it supports page sizes smaller
120 * or equal to the system's PAGE_SIZE, with a preference if
123 pgsize_bitmap
= tdev
->iommu
.domain
->ops
->pgsize_bitmap
;
124 if (pgsize_bitmap
& PAGE_SIZE
) {
125 tdev
->iommu
.pgshift
= PAGE_SHIFT
;
127 tdev
->iommu
.pgshift
= fls(pgsize_bitmap
& ~PAGE_MASK
);
128 if (tdev
->iommu
.pgshift
== 0) {
129 dev_warn(dev
, "unsupported IOMMU page size\n");
132 tdev
->iommu
.pgshift
-= 1;
135 ret
= iommu_attach_device(tdev
->iommu
.domain
, dev
);
139 ret
= nvkm_mm_init(&tdev
->iommu
.mm
, 0, 0,
140 (1ULL << tdev
->func
->iommu_bit
) >>
141 tdev
->iommu
.pgshift
, 1);
149 iommu_detach_device(tdev
->iommu
.domain
, dev
);
152 iommu_domain_free(tdev
->iommu
.domain
);
155 tdev
->iommu
.domain
= NULL
;
156 tdev
->iommu
.pgshift
= 0;
157 dev_err(dev
, "cannot initialize IOMMU MM\n");
162 nvkm_device_tegra_remove_iommu(struct nvkm_device_tegra
*tdev
)
164 #if IS_ENABLED(CONFIG_IOMMU_API)
165 if (tdev
->iommu
.domain
) {
166 nvkm_mm_fini(&tdev
->iommu
.mm
);
167 iommu_detach_device(tdev
->iommu
.domain
, tdev
->device
.dev
);
168 iommu_domain_free(tdev
->iommu
.domain
);
173 static struct nvkm_device_tegra
*
174 nvkm_device_tegra(struct nvkm_device
*device
)
176 return container_of(device
, struct nvkm_device_tegra
, device
);
179 static struct resource
*
180 nvkm_device_tegra_resource(struct nvkm_device
*device
, unsigned bar
)
182 struct nvkm_device_tegra
*tdev
= nvkm_device_tegra(device
);
183 return platform_get_resource(tdev
->pdev
, IORESOURCE_MEM
, bar
);
186 static resource_size_t
187 nvkm_device_tegra_resource_addr(struct nvkm_device
*device
, unsigned bar
)
189 struct resource
*res
= nvkm_device_tegra_resource(device
, bar
);
190 return res
? res
->start
: 0;
193 static resource_size_t
194 nvkm_device_tegra_resource_size(struct nvkm_device
*device
, unsigned bar
)
196 struct resource
*res
= nvkm_device_tegra_resource(device
, bar
);
197 return res
? resource_size(res
) : 0;
201 nvkm_device_tegra_intr(int irq
, void *arg
)
203 struct nvkm_device_tegra
*tdev
= arg
;
204 struct nvkm_device
*device
= &tdev
->device
;
205 bool handled
= false;
206 nvkm_mc_intr_unarm(device
);
207 nvkm_mc_intr(device
, &handled
);
208 nvkm_mc_intr_rearm(device
);
209 return handled
? IRQ_HANDLED
: IRQ_NONE
;
213 nvkm_device_tegra_fini(struct nvkm_device
*device
, bool suspend
)
215 struct nvkm_device_tegra
*tdev
= nvkm_device_tegra(device
);
217 free_irq(tdev
->irq
, tdev
);
223 nvkm_device_tegra_init(struct nvkm_device
*device
)
225 struct nvkm_device_tegra
*tdev
= nvkm_device_tegra(device
);
228 irq
= platform_get_irq_byname(tdev
->pdev
, "stall");
232 ret
= request_irq(irq
, nvkm_device_tegra_intr
,
233 IRQF_SHARED
, "nvkm", tdev
);
242 nvkm_device_tegra_dtor(struct nvkm_device
*device
)
244 struct nvkm_device_tegra
*tdev
= nvkm_device_tegra(device
);
245 nvkm_device_tegra_power_down(tdev
);
246 nvkm_device_tegra_remove_iommu(tdev
);
250 static const struct nvkm_device_func
251 nvkm_device_tegra_func
= {
252 .tegra
= nvkm_device_tegra
,
253 .dtor
= nvkm_device_tegra_dtor
,
254 .init
= nvkm_device_tegra_init
,
255 .fini
= nvkm_device_tegra_fini
,
256 .resource_addr
= nvkm_device_tegra_resource_addr
,
257 .resource_size
= nvkm_device_tegra_resource_size
,
258 .cpu_coherent
= false,
262 nvkm_device_tegra_new(const struct nvkm_device_tegra_func
*func
,
263 struct platform_device
*pdev
,
264 const char *cfg
, const char *dbg
,
265 bool detect
, bool mmio
, u64 subdev_mask
,
266 struct nvkm_device
**pdevice
)
268 struct nvkm_device_tegra
*tdev
;
271 if (!(tdev
= kzalloc(sizeof(*tdev
), GFP_KERNEL
)))
277 if (func
->require_vdd
) {
278 tdev
->vdd
= devm_regulator_get(&pdev
->dev
, "vdd");
279 if (IS_ERR(tdev
->vdd
)) {
280 ret
= PTR_ERR(tdev
->vdd
);
285 tdev
->rst
= devm_reset_control_get(&pdev
->dev
, "gpu");
286 if (IS_ERR(tdev
->rst
)) {
287 ret
= PTR_ERR(tdev
->rst
);
291 tdev
->clk
= devm_clk_get(&pdev
->dev
, "gpu");
292 if (IS_ERR(tdev
->clk
)) {
293 ret
= PTR_ERR(tdev
->clk
);
297 if (func
->require_ref_clk
)
298 tdev
->clk_ref
= devm_clk_get(&pdev
->dev
, "ref");
299 if (IS_ERR(tdev
->clk_ref
)) {
300 ret
= PTR_ERR(tdev
->clk_ref
);
304 tdev
->clk_pwr
= devm_clk_get(&pdev
->dev
, "pwr");
305 if (IS_ERR(tdev
->clk_pwr
)) {
306 ret
= PTR_ERR(tdev
->clk_pwr
);
311 * The IOMMU bit defines the upper limit of the GPU-addressable space.
313 ret
= dma_set_mask(&pdev
->dev
, DMA_BIT_MASK(tdev
->func
->iommu_bit
));
317 nvkm_device_tegra_probe_iommu(tdev
);
319 ret
= nvkm_device_tegra_power_up(tdev
);
323 tdev
->gpu_speedo
= tegra_sku_info
.gpu_speedo_value
;
324 tdev
->gpu_speedo_id
= tegra_sku_info
.gpu_speedo_id
;
325 ret
= nvkm_device_ctor(&nvkm_device_tegra_func
, NULL
, &pdev
->dev
,
326 NVKM_DEVICE_TEGRA
, pdev
->id
, NULL
,
327 cfg
, dbg
, detect
, mmio
, subdev_mask
,
332 *pdevice
= &tdev
->device
;
337 nvkm_device_tegra_power_down(tdev
);
339 nvkm_device_tegra_remove_iommu(tdev
);
346 nvkm_device_tegra_new(const struct nvkm_device_tegra_func
*func
,
347 struct platform_device
*pdev
,
348 const char *cfg
, const char *dbg
,
349 bool detect
, bool mmio
, u64 subdev_mask
,
350 struct nvkm_device
**pdevice
)