Merge tag 'io_uring-5.11-2021-01-16' of git://git.kernel.dk/linux-block
[linux/fpc-iii.git] / drivers / iommu / tegra-gart.c
blobfac720273889c3ed92fca3261360b77c3ae8145f
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * IOMMU API for Graphics Address Relocation Table on Tegra20
5 * Copyright (c) 2010-2012, NVIDIA CORPORATION. All rights reserved.
7 * Author: Hiroshi DOYU <hdoyu@nvidia.com>
8 */
10 #define dev_fmt(fmt) "gart: " fmt
12 #include <linux/io.h>
13 #include <linux/iommu.h>
14 #include <linux/moduleparam.h>
15 #include <linux/platform_device.h>
16 #include <linux/slab.h>
17 #include <linux/spinlock.h>
18 #include <linux/vmalloc.h>
20 #include <soc/tegra/mc.h>
22 #define GART_REG_BASE 0x24
23 #define GART_CONFIG (0x24 - GART_REG_BASE)
24 #define GART_ENTRY_ADDR (0x28 - GART_REG_BASE)
25 #define GART_ENTRY_DATA (0x2c - GART_REG_BASE)
27 #define GART_ENTRY_PHYS_ADDR_VALID BIT(31)
29 #define GART_PAGE_SHIFT 12
30 #define GART_PAGE_SIZE (1 << GART_PAGE_SHIFT)
31 #define GART_PAGE_MASK GENMASK(30, GART_PAGE_SHIFT)
33 /* bitmap of the page sizes currently supported */
34 #define GART_IOMMU_PGSIZES (GART_PAGE_SIZE)
36 struct gart_device {
37 void __iomem *regs;
38 u32 *savedata;
39 unsigned long iovmm_base; /* offset to vmm_area start */
40 unsigned long iovmm_end; /* offset to vmm_area end */
41 spinlock_t pte_lock; /* for pagetable */
42 spinlock_t dom_lock; /* for active domain */
43 unsigned int active_devices; /* number of active devices */
44 struct iommu_domain *active_domain; /* current active domain */
45 struct iommu_device iommu; /* IOMMU Core handle */
46 struct device *dev;
49 static struct gart_device *gart_handle; /* unique for a system */
51 static bool gart_debug;
54 * Any interaction between any block on PPSB and a block on APB or AHB
55 * must have these read-back to ensure the APB/AHB bus transaction is
56 * complete before initiating activity on the PPSB block.
58 #define FLUSH_GART_REGS(gart) readl_relaxed((gart)->regs + GART_CONFIG)
60 #define for_each_gart_pte(gart, iova) \
61 for (iova = gart->iovmm_base; \
62 iova < gart->iovmm_end; \
63 iova += GART_PAGE_SIZE)
65 static inline void gart_set_pte(struct gart_device *gart,
66 unsigned long iova, unsigned long pte)
68 writel_relaxed(iova, gart->regs + GART_ENTRY_ADDR);
69 writel_relaxed(pte, gart->regs + GART_ENTRY_DATA);
72 static inline unsigned long gart_read_pte(struct gart_device *gart,
73 unsigned long iova)
75 unsigned long pte;
77 writel_relaxed(iova, gart->regs + GART_ENTRY_ADDR);
78 pte = readl_relaxed(gart->regs + GART_ENTRY_DATA);
80 return pte;
83 static void do_gart_setup(struct gart_device *gart, const u32 *data)
85 unsigned long iova;
87 for_each_gart_pte(gart, iova)
88 gart_set_pte(gart, iova, data ? *(data++) : 0);
90 writel_relaxed(1, gart->regs + GART_CONFIG);
91 FLUSH_GART_REGS(gart);
94 static inline bool gart_iova_range_invalid(struct gart_device *gart,
95 unsigned long iova, size_t bytes)
97 return unlikely(iova < gart->iovmm_base || bytes != GART_PAGE_SIZE ||
98 iova + bytes > gart->iovmm_end);
101 static inline bool gart_pte_valid(struct gart_device *gart, unsigned long iova)
103 return !!(gart_read_pte(gart, iova) & GART_ENTRY_PHYS_ADDR_VALID);
106 static int gart_iommu_attach_dev(struct iommu_domain *domain,
107 struct device *dev)
109 struct gart_device *gart = gart_handle;
110 int ret = 0;
112 spin_lock(&gart->dom_lock);
114 if (gart->active_domain && gart->active_domain != domain) {
115 ret = -EBUSY;
116 } else if (dev_iommu_priv_get(dev) != domain) {
117 dev_iommu_priv_set(dev, domain);
118 gart->active_domain = domain;
119 gart->active_devices++;
122 spin_unlock(&gart->dom_lock);
124 return ret;
127 static void gart_iommu_detach_dev(struct iommu_domain *domain,
128 struct device *dev)
130 struct gart_device *gart = gart_handle;
132 spin_lock(&gart->dom_lock);
134 if (dev_iommu_priv_get(dev) == domain) {
135 dev_iommu_priv_set(dev, NULL);
137 if (--gart->active_devices == 0)
138 gart->active_domain = NULL;
141 spin_unlock(&gart->dom_lock);
144 static struct iommu_domain *gart_iommu_domain_alloc(unsigned type)
146 struct iommu_domain *domain;
148 if (type != IOMMU_DOMAIN_UNMANAGED)
149 return NULL;
151 domain = kzalloc(sizeof(*domain), GFP_KERNEL);
152 if (domain) {
153 domain->geometry.aperture_start = gart_handle->iovmm_base;
154 domain->geometry.aperture_end = gart_handle->iovmm_end - 1;
155 domain->geometry.force_aperture = true;
158 return domain;
161 static void gart_iommu_domain_free(struct iommu_domain *domain)
163 WARN_ON(gart_handle->active_domain == domain);
164 kfree(domain);
167 static inline int __gart_iommu_map(struct gart_device *gart, unsigned long iova,
168 unsigned long pa)
170 if (unlikely(gart_debug && gart_pte_valid(gart, iova))) {
171 dev_err(gart->dev, "Page entry is in-use\n");
172 return -EINVAL;
175 gart_set_pte(gart, iova, GART_ENTRY_PHYS_ADDR_VALID | pa);
177 return 0;
180 static int gart_iommu_map(struct iommu_domain *domain, unsigned long iova,
181 phys_addr_t pa, size_t bytes, int prot, gfp_t gfp)
183 struct gart_device *gart = gart_handle;
184 int ret;
186 if (gart_iova_range_invalid(gart, iova, bytes))
187 return -EINVAL;
189 spin_lock(&gart->pte_lock);
190 ret = __gart_iommu_map(gart, iova, (unsigned long)pa);
191 spin_unlock(&gart->pte_lock);
193 return ret;
196 static inline int __gart_iommu_unmap(struct gart_device *gart,
197 unsigned long iova)
199 if (unlikely(gart_debug && !gart_pte_valid(gart, iova))) {
200 dev_err(gart->dev, "Page entry is invalid\n");
201 return -EINVAL;
204 gart_set_pte(gart, iova, 0);
206 return 0;
209 static size_t gart_iommu_unmap(struct iommu_domain *domain, unsigned long iova,
210 size_t bytes, struct iommu_iotlb_gather *gather)
212 struct gart_device *gart = gart_handle;
213 int err;
215 if (gart_iova_range_invalid(gart, iova, bytes))
216 return 0;
218 spin_lock(&gart->pte_lock);
219 err = __gart_iommu_unmap(gart, iova);
220 spin_unlock(&gart->pte_lock);
222 return err ? 0 : bytes;
225 static phys_addr_t gart_iommu_iova_to_phys(struct iommu_domain *domain,
226 dma_addr_t iova)
228 struct gart_device *gart = gart_handle;
229 unsigned long pte;
231 if (gart_iova_range_invalid(gart, iova, GART_PAGE_SIZE))
232 return -EINVAL;
234 spin_lock(&gart->pte_lock);
235 pte = gart_read_pte(gart, iova);
236 spin_unlock(&gart->pte_lock);
238 return pte & GART_PAGE_MASK;
241 static bool gart_iommu_capable(enum iommu_cap cap)
243 return false;
246 static struct iommu_device *gart_iommu_probe_device(struct device *dev)
248 if (!dev_iommu_fwspec_get(dev))
249 return ERR_PTR(-ENODEV);
251 return &gart_handle->iommu;
254 static void gart_iommu_release_device(struct device *dev)
258 static int gart_iommu_of_xlate(struct device *dev,
259 struct of_phandle_args *args)
261 return 0;
264 static void gart_iommu_sync_map(struct iommu_domain *domain)
266 FLUSH_GART_REGS(gart_handle);
269 static void gart_iommu_sync(struct iommu_domain *domain,
270 struct iommu_iotlb_gather *gather)
272 gart_iommu_sync_map(domain);
275 static const struct iommu_ops gart_iommu_ops = {
276 .capable = gart_iommu_capable,
277 .domain_alloc = gart_iommu_domain_alloc,
278 .domain_free = gart_iommu_domain_free,
279 .attach_dev = gart_iommu_attach_dev,
280 .detach_dev = gart_iommu_detach_dev,
281 .probe_device = gart_iommu_probe_device,
282 .release_device = gart_iommu_release_device,
283 .device_group = generic_device_group,
284 .map = gart_iommu_map,
285 .unmap = gart_iommu_unmap,
286 .iova_to_phys = gart_iommu_iova_to_phys,
287 .pgsize_bitmap = GART_IOMMU_PGSIZES,
288 .of_xlate = gart_iommu_of_xlate,
289 .iotlb_sync_map = gart_iommu_sync_map,
290 .iotlb_sync = gart_iommu_sync,
293 int tegra_gart_suspend(struct gart_device *gart)
295 u32 *data = gart->savedata;
296 unsigned long iova;
299 * All GART users shall be suspended at this point. Disable
300 * address translation to trap all GART accesses as invalid
301 * memory accesses.
303 writel_relaxed(0, gart->regs + GART_CONFIG);
304 FLUSH_GART_REGS(gart);
306 for_each_gart_pte(gart, iova)
307 *(data++) = gart_read_pte(gart, iova);
309 return 0;
312 int tegra_gart_resume(struct gart_device *gart)
314 do_gart_setup(gart, gart->savedata);
316 return 0;
319 struct gart_device *tegra_gart_probe(struct device *dev, struct tegra_mc *mc)
321 struct gart_device *gart;
322 struct resource *res;
323 int err;
325 BUILD_BUG_ON(PAGE_SHIFT != GART_PAGE_SHIFT);
327 /* the GART memory aperture is required */
328 res = platform_get_resource(to_platform_device(dev), IORESOURCE_MEM, 1);
329 if (!res) {
330 dev_err(dev, "Memory aperture resource unavailable\n");
331 return ERR_PTR(-ENXIO);
334 gart = kzalloc(sizeof(*gart), GFP_KERNEL);
335 if (!gart)
336 return ERR_PTR(-ENOMEM);
338 gart_handle = gart;
340 gart->dev = dev;
341 gart->regs = mc->regs + GART_REG_BASE;
342 gart->iovmm_base = res->start;
343 gart->iovmm_end = res->end + 1;
344 spin_lock_init(&gart->pte_lock);
345 spin_lock_init(&gart->dom_lock);
347 do_gart_setup(gart, NULL);
349 err = iommu_device_sysfs_add(&gart->iommu, dev, NULL, "gart");
350 if (err)
351 goto free_gart;
353 iommu_device_set_ops(&gart->iommu, &gart_iommu_ops);
354 iommu_device_set_fwnode(&gart->iommu, dev->fwnode);
356 err = iommu_device_register(&gart->iommu);
357 if (err)
358 goto remove_sysfs;
360 gart->savedata = vmalloc(resource_size(res) / GART_PAGE_SIZE *
361 sizeof(u32));
362 if (!gart->savedata) {
363 err = -ENOMEM;
364 goto unregister_iommu;
367 return gart;
369 unregister_iommu:
370 iommu_device_unregister(&gart->iommu);
371 remove_sysfs:
372 iommu_device_sysfs_remove(&gart->iommu);
373 free_gart:
374 kfree(gart);
376 return ERR_PTR(err);
379 module_param(gart_debug, bool, 0644);
380 MODULE_PARM_DESC(gart_debug, "Enable GART debugging");