PM / sleep: Asynchronous threads for suspend_noirq
[linux/fpc-iii.git] / drivers / gpu / drm / msm / msm_iommu.c
blob92b7459862314a6e41756fae21d5496329b0be5c
1 /*
2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
18 #include "msm_drv.h"
19 #include "msm_mmu.h"
21 struct msm_iommu {
22 struct msm_mmu base;
23 struct iommu_domain *domain;
25 #define to_msm_iommu(x) container_of(x, struct msm_iommu, base)
27 static int msm_fault_handler(struct iommu_domain *iommu, struct device *dev,
28 unsigned long iova, int flags, void *arg)
30 DBG("*** fault: iova=%08lx, flags=%d", iova, flags);
31 return 0;
34 static int msm_iommu_attach(struct msm_mmu *mmu, const char **names, int cnt)
36 struct drm_device *dev = mmu->dev;
37 struct msm_iommu *iommu = to_msm_iommu(mmu);
38 int i, ret;
40 for (i = 0; i < cnt; i++) {
41 struct device *msm_iommu_get_ctx(const char *ctx_name);
42 struct device *ctx = msm_iommu_get_ctx(names[i]);
43 if (IS_ERR_OR_NULL(ctx))
44 continue;
45 ret = iommu_attach_device(iommu->domain, ctx);
46 if (ret) {
47 dev_warn(dev->dev, "could not attach iommu to %s", names[i]);
48 return ret;
52 return 0;
55 static int msm_iommu_map(struct msm_mmu *mmu, uint32_t iova,
56 struct sg_table *sgt, unsigned len, int prot)
58 struct msm_iommu *iommu = to_msm_iommu(mmu);
59 struct iommu_domain *domain = iommu->domain;
60 struct scatterlist *sg;
61 unsigned int da = iova;
62 unsigned int i, j;
63 int ret;
65 if (!domain || !sgt)
66 return -EINVAL;
68 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
69 u32 pa = sg_phys(sg) - sg->offset;
70 size_t bytes = sg->length + sg->offset;
72 VERB("map[%d]: %08x %08x(%x)", i, iova, pa, bytes);
74 ret = iommu_map(domain, da, pa, bytes, prot);
75 if (ret)
76 goto fail;
78 da += bytes;
81 return 0;
83 fail:
84 da = iova;
86 for_each_sg(sgt->sgl, sg, i, j) {
87 size_t bytes = sg->length + sg->offset;
88 iommu_unmap(domain, da, bytes);
89 da += bytes;
91 return ret;
94 static int msm_iommu_unmap(struct msm_mmu *mmu, uint32_t iova,
95 struct sg_table *sgt, unsigned len)
97 struct msm_iommu *iommu = to_msm_iommu(mmu);
98 struct iommu_domain *domain = iommu->domain;
99 struct scatterlist *sg;
100 unsigned int da = iova;
101 int i;
103 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
104 size_t bytes = sg->length + sg->offset;
105 size_t unmapped;
107 unmapped = iommu_unmap(domain, da, bytes);
108 if (unmapped < bytes)
109 return unmapped;
111 VERB("unmap[%d]: %08x(%x)", i, iova, bytes);
113 BUG_ON(!IS_ALIGNED(bytes, PAGE_SIZE));
115 da += bytes;
118 return 0;
121 static void msm_iommu_destroy(struct msm_mmu *mmu)
123 struct msm_iommu *iommu = to_msm_iommu(mmu);
124 iommu_domain_free(iommu->domain);
125 kfree(iommu);
128 static const struct msm_mmu_funcs funcs = {
129 .attach = msm_iommu_attach,
130 .map = msm_iommu_map,
131 .unmap = msm_iommu_unmap,
132 .destroy = msm_iommu_destroy,
135 struct msm_mmu *msm_iommu_new(struct drm_device *dev, struct iommu_domain *domain)
137 struct msm_iommu *iommu;
139 iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
140 if (!iommu)
141 return ERR_PTR(-ENOMEM);
143 iommu->domain = domain;
144 msm_mmu_init(&iommu->base, dev, &funcs);
145 iommu_set_fault_handler(domain, msm_fault_handler, dev);
147 return &iommu->base;