2 * Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
22 #include <core/tegra.h>
23 #ifdef CONFIG_NOUVEAU_PLATFORM_DRIVER
27 nvkm_device_tegra_power_up(struct nvkm_device_tegra
*tdev
)
31 ret
= regulator_enable(tdev
->vdd
);
35 ret
= clk_prepare_enable(tdev
->clk
);
38 ret
= clk_prepare_enable(tdev
->clk_pwr
);
41 clk_set_rate(tdev
->clk_pwr
, 204000000);
44 reset_control_assert(tdev
->rst
);
47 ret
= tegra_powergate_remove_clamping(TEGRA_POWERGATE_3D
);
52 reset_control_deassert(tdev
->rst
);
58 clk_disable_unprepare(tdev
->clk_pwr
);
60 clk_disable_unprepare(tdev
->clk
);
62 regulator_disable(tdev
->vdd
);
68 nvkm_device_tegra_power_down(struct nvkm_device_tegra
*tdev
)
70 reset_control_assert(tdev
->rst
);
73 clk_disable_unprepare(tdev
->clk_pwr
);
74 clk_disable_unprepare(tdev
->clk
);
77 return regulator_disable(tdev
->vdd
);
81 nvkm_device_tegra_probe_iommu(struct nvkm_device_tegra
*tdev
)
83 #if IS_ENABLED(CONFIG_IOMMU_API)
84 struct device
*dev
= &tdev
->pdev
->dev
;
85 unsigned long pgsize_bitmap
;
88 if (!tdev
->func
->iommu_bit
)
91 mutex_init(&tdev
->iommu
.mutex
);
93 if (iommu_present(&platform_bus_type
)) {
94 tdev
->iommu
.domain
= iommu_domain_alloc(&platform_bus_type
);
95 if (IS_ERR(tdev
->iommu
.domain
))
99 * A IOMMU is only usable if it supports page sizes smaller
100 * or equal to the system's PAGE_SIZE, with a preference if
103 pgsize_bitmap
= tdev
->iommu
.domain
->ops
->pgsize_bitmap
;
104 if (pgsize_bitmap
& PAGE_SIZE
) {
105 tdev
->iommu
.pgshift
= PAGE_SHIFT
;
107 tdev
->iommu
.pgshift
= fls(pgsize_bitmap
& ~PAGE_MASK
);
108 if (tdev
->iommu
.pgshift
== 0) {
109 dev_warn(dev
, "unsupported IOMMU page size\n");
112 tdev
->iommu
.pgshift
-= 1;
115 ret
= iommu_attach_device(tdev
->iommu
.domain
, dev
);
119 ret
= nvkm_mm_init(&tdev
->iommu
.mm
, 0,
120 (1ULL << tdev
->func
->iommu_bit
) >>
121 tdev
->iommu
.pgshift
, 1);
129 iommu_detach_device(tdev
->iommu
.domain
, dev
);
132 iommu_domain_free(tdev
->iommu
.domain
);
135 tdev
->iommu
.domain
= NULL
;
136 tdev
->iommu
.pgshift
= 0;
137 dev_err(dev
, "cannot initialize IOMMU MM\n");
142 nvkm_device_tegra_remove_iommu(struct nvkm_device_tegra
*tdev
)
144 #if IS_ENABLED(CONFIG_IOMMU_API)
145 if (tdev
->iommu
.domain
) {
146 nvkm_mm_fini(&tdev
->iommu
.mm
);
147 iommu_detach_device(tdev
->iommu
.domain
, tdev
->device
.dev
);
148 iommu_domain_free(tdev
->iommu
.domain
);
153 static struct nvkm_device_tegra
*
154 nvkm_device_tegra(struct nvkm_device
*device
)
156 return container_of(device
, struct nvkm_device_tegra
, device
);
159 static struct resource
*
160 nvkm_device_tegra_resource(struct nvkm_device
*device
, unsigned bar
)
162 struct nvkm_device_tegra
*tdev
= nvkm_device_tegra(device
);
163 return platform_get_resource(tdev
->pdev
, IORESOURCE_MEM
, bar
);
166 static resource_size_t
167 nvkm_device_tegra_resource_addr(struct nvkm_device
*device
, unsigned bar
)
169 struct resource
*res
= nvkm_device_tegra_resource(device
, bar
);
170 return res
? res
->start
: 0;
173 static resource_size_t
174 nvkm_device_tegra_resource_size(struct nvkm_device
*device
, unsigned bar
)
176 struct resource
*res
= nvkm_device_tegra_resource(device
, bar
);
177 return res
? resource_size(res
) : 0;
181 nvkm_device_tegra_intr(int irq
, void *arg
)
183 struct nvkm_device_tegra
*tdev
= arg
;
184 struct nvkm_mc
*mc
= tdev
->device
.mc
;
185 bool handled
= false;
187 nvkm_mc_intr_unarm(mc
);
188 nvkm_mc_intr(mc
, &handled
);
189 nvkm_mc_intr_rearm(mc
);
191 return handled
? IRQ_HANDLED
: IRQ_NONE
;
195 nvkm_device_tegra_fini(struct nvkm_device
*device
, bool suspend
)
197 struct nvkm_device_tegra
*tdev
= nvkm_device_tegra(device
);
199 free_irq(tdev
->irq
, tdev
);
205 nvkm_device_tegra_init(struct nvkm_device
*device
)
207 struct nvkm_device_tegra
*tdev
= nvkm_device_tegra(device
);
210 irq
= platform_get_irq_byname(tdev
->pdev
, "stall");
214 ret
= request_irq(irq
, nvkm_device_tegra_intr
,
215 IRQF_SHARED
, "nvkm", tdev
);
224 nvkm_device_tegra_dtor(struct nvkm_device
*device
)
226 struct nvkm_device_tegra
*tdev
= nvkm_device_tegra(device
);
227 nvkm_device_tegra_power_down(tdev
);
228 nvkm_device_tegra_remove_iommu(tdev
);
232 static const struct nvkm_device_func
233 nvkm_device_tegra_func
= {
234 .tegra
= nvkm_device_tegra
,
235 .dtor
= nvkm_device_tegra_dtor
,
236 .init
= nvkm_device_tegra_init
,
237 .fini
= nvkm_device_tegra_fini
,
238 .resource_addr
= nvkm_device_tegra_resource_addr
,
239 .resource_size
= nvkm_device_tegra_resource_size
,
240 .cpu_coherent
= false,
244 nvkm_device_tegra_new(const struct nvkm_device_tegra_func
*func
,
245 struct platform_device
*pdev
,
246 const char *cfg
, const char *dbg
,
247 bool detect
, bool mmio
, u64 subdev_mask
,
248 struct nvkm_device
**pdevice
)
250 struct nvkm_device_tegra
*tdev
;
253 if (!(tdev
= kzalloc(sizeof(*tdev
), GFP_KERNEL
)))
255 *pdevice
= &tdev
->device
;
260 tdev
->vdd
= devm_regulator_get(&pdev
->dev
, "vdd");
261 if (IS_ERR(tdev
->vdd
))
262 return PTR_ERR(tdev
->vdd
);
264 tdev
->rst
= devm_reset_control_get(&pdev
->dev
, "gpu");
265 if (IS_ERR(tdev
->rst
))
266 return PTR_ERR(tdev
->rst
);
268 tdev
->clk
= devm_clk_get(&pdev
->dev
, "gpu");
269 if (IS_ERR(tdev
->clk
))
270 return PTR_ERR(tdev
->clk
);
272 tdev
->clk_pwr
= devm_clk_get(&pdev
->dev
, "pwr");
273 if (IS_ERR(tdev
->clk_pwr
))
274 return PTR_ERR(tdev
->clk_pwr
);
276 nvkm_device_tegra_probe_iommu(tdev
);
278 ret
= nvkm_device_tegra_power_up(tdev
);
282 tdev
->gpu_speedo
= tegra_sku_info
.gpu_speedo_value
;
283 ret
= nvkm_device_ctor(&nvkm_device_tegra_func
, NULL
, &pdev
->dev
,
284 NVKM_DEVICE_TEGRA
, pdev
->id
, NULL
,
285 cfg
, dbg
, detect
, mmio
, subdev_mask
,
294 nvkm_device_tegra_new(const struct nvkm_device_tegra_func
*func
,
295 struct platform_device
*pdev
,
296 const char *cfg
, const char *dbg
,
297 bool detect
, bool mmio
, u64 subdev_mask
,
298 struct nvkm_device
**pdevice
)