2 * Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
22 #include <core/tegra.h>
23 #ifdef CONFIG_NOUVEAU_PLATFORM_DRIVER
27 nvkm_device_tegra_power_up(struct nvkm_device_tegra
*tdev
)
31 ret
= regulator_enable(tdev
->vdd
);
35 ret
= clk_prepare_enable(tdev
->clk
);
39 ret
= clk_prepare_enable(tdev
->clk_ref
);
43 ret
= clk_prepare_enable(tdev
->clk_pwr
);
46 clk_set_rate(tdev
->clk_pwr
, 204000000);
49 reset_control_assert(tdev
->rst
);
52 ret
= tegra_powergate_remove_clamping(TEGRA_POWERGATE_3D
);
57 reset_control_deassert(tdev
->rst
);
63 clk_disable_unprepare(tdev
->clk_pwr
);
66 clk_disable_unprepare(tdev
->clk_ref
);
68 clk_disable_unprepare(tdev
->clk
);
70 regulator_disable(tdev
->vdd
);
76 nvkm_device_tegra_power_down(struct nvkm_device_tegra
*tdev
)
78 reset_control_assert(tdev
->rst
);
81 clk_disable_unprepare(tdev
->clk_pwr
);
83 clk_disable_unprepare(tdev
->clk_ref
);
84 clk_disable_unprepare(tdev
->clk
);
87 return regulator_disable(tdev
->vdd
);
91 nvkm_device_tegra_probe_iommu(struct nvkm_device_tegra
*tdev
)
93 #if IS_ENABLED(CONFIG_IOMMU_API)
94 struct device
*dev
= &tdev
->pdev
->dev
;
95 unsigned long pgsize_bitmap
;
98 if (!tdev
->func
->iommu_bit
)
101 mutex_init(&tdev
->iommu
.mutex
);
103 if (iommu_present(&platform_bus_type
)) {
104 tdev
->iommu
.domain
= iommu_domain_alloc(&platform_bus_type
);
105 if (IS_ERR(tdev
->iommu
.domain
))
109 * A IOMMU is only usable if it supports page sizes smaller
110 * or equal to the system's PAGE_SIZE, with a preference if
113 pgsize_bitmap
= tdev
->iommu
.domain
->ops
->pgsize_bitmap
;
114 if (pgsize_bitmap
& PAGE_SIZE
) {
115 tdev
->iommu
.pgshift
= PAGE_SHIFT
;
117 tdev
->iommu
.pgshift
= fls(pgsize_bitmap
& ~PAGE_MASK
);
118 if (tdev
->iommu
.pgshift
== 0) {
119 dev_warn(dev
, "unsupported IOMMU page size\n");
122 tdev
->iommu
.pgshift
-= 1;
125 ret
= iommu_attach_device(tdev
->iommu
.domain
, dev
);
129 ret
= nvkm_mm_init(&tdev
->iommu
.mm
, 0,
130 (1ULL << tdev
->func
->iommu_bit
) >>
131 tdev
->iommu
.pgshift
, 1);
139 iommu_detach_device(tdev
->iommu
.domain
, dev
);
142 iommu_domain_free(tdev
->iommu
.domain
);
145 tdev
->iommu
.domain
= NULL
;
146 tdev
->iommu
.pgshift
= 0;
147 dev_err(dev
, "cannot initialize IOMMU MM\n");
152 nvkm_device_tegra_remove_iommu(struct nvkm_device_tegra
*tdev
)
154 #if IS_ENABLED(CONFIG_IOMMU_API)
155 if (tdev
->iommu
.domain
) {
156 nvkm_mm_fini(&tdev
->iommu
.mm
);
157 iommu_detach_device(tdev
->iommu
.domain
, tdev
->device
.dev
);
158 iommu_domain_free(tdev
->iommu
.domain
);
163 static struct nvkm_device_tegra
*
164 nvkm_device_tegra(struct nvkm_device
*device
)
166 return container_of(device
, struct nvkm_device_tegra
, device
);
169 static struct resource
*
170 nvkm_device_tegra_resource(struct nvkm_device
*device
, unsigned bar
)
172 struct nvkm_device_tegra
*tdev
= nvkm_device_tegra(device
);
173 return platform_get_resource(tdev
->pdev
, IORESOURCE_MEM
, bar
);
176 static resource_size_t
177 nvkm_device_tegra_resource_addr(struct nvkm_device
*device
, unsigned bar
)
179 struct resource
*res
= nvkm_device_tegra_resource(device
, bar
);
180 return res
? res
->start
: 0;
183 static resource_size_t
184 nvkm_device_tegra_resource_size(struct nvkm_device
*device
, unsigned bar
)
186 struct resource
*res
= nvkm_device_tegra_resource(device
, bar
);
187 return res
? resource_size(res
) : 0;
191 nvkm_device_tegra_intr(int irq
, void *arg
)
193 struct nvkm_device_tegra
*tdev
= arg
;
194 struct nvkm_device
*device
= &tdev
->device
;
195 bool handled
= false;
196 nvkm_mc_intr_unarm(device
);
197 nvkm_mc_intr(device
, &handled
);
198 nvkm_mc_intr_rearm(device
);
199 return handled
? IRQ_HANDLED
: IRQ_NONE
;
203 nvkm_device_tegra_fini(struct nvkm_device
*device
, bool suspend
)
205 struct nvkm_device_tegra
*tdev
= nvkm_device_tegra(device
);
207 free_irq(tdev
->irq
, tdev
);
213 nvkm_device_tegra_init(struct nvkm_device
*device
)
215 struct nvkm_device_tegra
*tdev
= nvkm_device_tegra(device
);
218 irq
= platform_get_irq_byname(tdev
->pdev
, "stall");
222 ret
= request_irq(irq
, nvkm_device_tegra_intr
,
223 IRQF_SHARED
, "nvkm", tdev
);
232 nvkm_device_tegra_dtor(struct nvkm_device
*device
)
234 struct nvkm_device_tegra
*tdev
= nvkm_device_tegra(device
);
235 nvkm_device_tegra_power_down(tdev
);
236 nvkm_device_tegra_remove_iommu(tdev
);
240 static const struct nvkm_device_func
241 nvkm_device_tegra_func
= {
242 .tegra
= nvkm_device_tegra
,
243 .dtor
= nvkm_device_tegra_dtor
,
244 .init
= nvkm_device_tegra_init
,
245 .fini
= nvkm_device_tegra_fini
,
246 .resource_addr
= nvkm_device_tegra_resource_addr
,
247 .resource_size
= nvkm_device_tegra_resource_size
,
248 .cpu_coherent
= false,
252 nvkm_device_tegra_new(const struct nvkm_device_tegra_func
*func
,
253 struct platform_device
*pdev
,
254 const char *cfg
, const char *dbg
,
255 bool detect
, bool mmio
, u64 subdev_mask
,
256 struct nvkm_device
**pdevice
)
258 struct nvkm_device_tegra
*tdev
;
261 if (!(tdev
= kzalloc(sizeof(*tdev
), GFP_KERNEL
)))
267 tdev
->vdd
= devm_regulator_get(&pdev
->dev
, "vdd");
268 if (IS_ERR(tdev
->vdd
)) {
269 ret
= PTR_ERR(tdev
->vdd
);
273 tdev
->rst
= devm_reset_control_get(&pdev
->dev
, "gpu");
274 if (IS_ERR(tdev
->rst
)) {
275 ret
= PTR_ERR(tdev
->rst
);
279 tdev
->clk
= devm_clk_get(&pdev
->dev
, "gpu");
280 if (IS_ERR(tdev
->clk
)) {
281 ret
= PTR_ERR(tdev
->clk
);
285 if (func
->require_ref_clk
)
286 tdev
->clk_ref
= devm_clk_get(&pdev
->dev
, "ref");
287 if (IS_ERR(tdev
->clk_ref
)) {
288 ret
= PTR_ERR(tdev
->clk_ref
);
292 tdev
->clk_pwr
= devm_clk_get(&pdev
->dev
, "pwr");
293 if (IS_ERR(tdev
->clk_pwr
)) {
294 ret
= PTR_ERR(tdev
->clk_pwr
);
299 * The IOMMU bit defines the upper limit of the GPU-addressable space.
300 * This will be refined in nouveau_ttm_init but we need to do it early
301 * for instmem to behave properly
303 ret
= dma_set_mask(&pdev
->dev
, DMA_BIT_MASK(tdev
->func
->iommu_bit
));
307 nvkm_device_tegra_probe_iommu(tdev
);
309 ret
= nvkm_device_tegra_power_up(tdev
);
313 tdev
->gpu_speedo
= tegra_sku_info
.gpu_speedo_value
;
314 tdev
->gpu_speedo_id
= tegra_sku_info
.gpu_speedo_id
;
315 ret
= nvkm_device_ctor(&nvkm_device_tegra_func
, NULL
, &pdev
->dev
,
316 NVKM_DEVICE_TEGRA
, pdev
->id
, NULL
,
317 cfg
, dbg
, detect
, mmio
, subdev_mask
,
322 *pdevice
= &tdev
->device
;
327 nvkm_device_tegra_power_down(tdev
);
329 nvkm_device_tegra_remove_iommu(tdev
);
336 nvkm_device_tegra_new(const struct nvkm_device_tegra_func
*func
,
337 struct platform_device
*pdev
,
338 const char *cfg
, const char *dbg
,
339 bool detect
, bool mmio
, u64 subdev_mask
,
340 struct nvkm_device
**pdevice
)