2 * Copyright (C) 2016 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
23 msm_gem_address_space_destroy(struct kref
*kref
)
25 struct msm_gem_address_space
*aspace
= container_of(kref
,
26 struct msm_gem_address_space
, kref
);
28 drm_mm_takedown(&aspace
->mm
);
30 aspace
->mmu
->funcs
->destroy(aspace
->mmu
);
35 void msm_gem_address_space_put(struct msm_gem_address_space
*aspace
)
38 kref_put(&aspace
->kref
, msm_gem_address_space_destroy
);
41 /* Actually unmap memory for the vma */
42 void msm_gem_purge_vma(struct msm_gem_address_space
*aspace
,
43 struct msm_gem_vma
*vma
)
45 unsigned size
= vma
->node
.size
<< PAGE_SHIFT
;
47 /* Print a message if we try to purge a vma in use */
48 if (WARN_ON(vma
->inuse
> 0))
51 /* Don't do anything if the memory isn't mapped */
56 aspace
->mmu
->funcs
->unmap(aspace
->mmu
, vma
->iova
, size
);
61 /* Remove reference counts for the mapping */
62 void msm_gem_unmap_vma(struct msm_gem_address_space
*aspace
,
63 struct msm_gem_vma
*vma
)
65 if (!WARN_ON(!vma
->iova
))
70 msm_gem_map_vma(struct msm_gem_address_space
*aspace
,
71 struct msm_gem_vma
*vma
, int prot
,
72 struct sg_table
*sgt
, int npages
)
74 unsigned size
= npages
<< PAGE_SHIFT
;
77 if (WARN_ON(!vma
->iova
))
80 /* Increase the usage counter */
89 ret
= aspace
->mmu
->funcs
->map(aspace
->mmu
, vma
->iova
, sgt
,
98 /* Close an iova. Warn if it is still in use */
99 void msm_gem_close_vma(struct msm_gem_address_space
*aspace
,
100 struct msm_gem_vma
*vma
)
102 if (WARN_ON(vma
->inuse
> 0 || vma
->mapped
))
105 spin_lock(&aspace
->lock
);
107 drm_mm_remove_node(&vma
->node
);
108 spin_unlock(&aspace
->lock
);
112 msm_gem_address_space_put(aspace
);
115 /* Initialize a new vma and allocate an iova for it */
116 int msm_gem_init_vma(struct msm_gem_address_space
*aspace
,
117 struct msm_gem_vma
*vma
, int npages
)
121 if (WARN_ON(vma
->iova
))
124 spin_lock(&aspace
->lock
);
125 ret
= drm_mm_insert_node(&aspace
->mm
, &vma
->node
, npages
);
126 spin_unlock(&aspace
->lock
);
131 vma
->iova
= vma
->node
.start
<< PAGE_SHIFT
;
134 kref_get(&aspace
->kref
);
140 struct msm_gem_address_space
*
141 msm_gem_address_space_create(struct device
*dev
, struct iommu_domain
*domain
,
144 struct msm_gem_address_space
*aspace
;
145 u64 size
= domain
->geometry
.aperture_end
-
146 domain
->geometry
.aperture_start
;
148 aspace
= kzalloc(sizeof(*aspace
), GFP_KERNEL
);
150 return ERR_PTR(-ENOMEM
);
152 spin_lock_init(&aspace
->lock
);
154 aspace
->mmu
= msm_iommu_new(dev
, domain
);
156 drm_mm_init(&aspace
->mm
, (domain
->geometry
.aperture_start
>> PAGE_SHIFT
),
159 kref_init(&aspace
->kref
);
164 struct msm_gem_address_space
*
165 msm_gem_address_space_create_a2xx(struct device
*dev
, struct msm_gpu
*gpu
,
166 const char *name
, uint64_t va_start
, uint64_t va_end
)
168 struct msm_gem_address_space
*aspace
;
169 u64 size
= va_end
- va_start
;
171 aspace
= kzalloc(sizeof(*aspace
), GFP_KERNEL
);
173 return ERR_PTR(-ENOMEM
);
175 spin_lock_init(&aspace
->lock
);
177 aspace
->mmu
= msm_gpummu_new(dev
, gpu
);
179 drm_mm_init(&aspace
->mm
, (va_start
>> PAGE_SHIFT
),
182 kref_init(&aspace
->kref
);