Set memory attributes on page
[pscnv.git] / pscnv / pscnv_vm.c
blob93fef5b3cf768da695fa8a2fb29278b08c64b222
1 /*
2 * CDDL HEADER START
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
19 * CDDL HEADER END
23 * Copyright 2010 PathScale Inc. All rights reserved.
24 * Use is subject to license terms.
27 #include "drm.h"
28 #include "nouveau_drv.h"
29 #include "pscnv_mem.h"
30 #include "pscnv_vm.h"
31 #include "pscnv_chan.h"
34 static int pscnv_vspace_bind (struct pscnv_vspace *vs, int fake) {
35 struct drm_nouveau_private *dev_priv = vs->dev->dev_private;
36 unsigned long flags;
37 int i;
38 BUG_ON(vs->vid);
39 spin_lock_irqsave(&dev_priv->vm->vs_lock, flags);
40 if (fake) {
41 vs->vid = -fake;
42 BUG_ON(dev_priv->vm->fake_vspaces[fake]);
43 dev_priv->vm->fake_vspaces[fake] = vs;
44 spin_unlock_irqrestore(&dev_priv->vm->vs_lock, flags);
45 return 0;
46 } else {
47 for (i = 1; i < 128; i++)
48 if (!dev_priv->vm->vspaces[i]) {
49 vs->vid = i;
50 dev_priv->vm->vspaces[i] = vs;
51 spin_unlock_irqrestore(&dev_priv->vm->vs_lock, flags);
52 return 0;
54 spin_unlock_irqrestore(&dev_priv->vm->vs_lock, flags);
55 NV_ERROR(vs->dev, "VM: Out of vspaces\n");
56 return -ENOSPC;
60 static void pscnv_vspace_unbind (struct pscnv_vspace *vs) {
61 struct drm_nouveau_private *dev_priv = vs->dev->dev_private;
62 unsigned long flags;
63 spin_lock_irqsave(&dev_priv->vm->vs_lock, flags);
64 if (vs->vid < 0) {
65 BUG_ON(dev_priv->vm->fake_vspaces[-vs->vid] != vs);
66 dev_priv->vm->fake_vspaces[-vs->vid] = 0;
67 } else {
68 BUG_ON(dev_priv->vm->vspaces[vs->vid] != vs);
69 dev_priv->vm->vspaces[vs->vid] = 0;
71 vs->vid = 0;
72 spin_unlock_irqrestore(&dev_priv->vm->vs_lock, flags);
75 struct pscnv_vspace *
76 pscnv_vspace_new (struct drm_device *dev, uint64_t size, uint32_t flags, int fake) {
77 struct drm_nouveau_private *dev_priv = dev->dev_private;
78 struct pscnv_vspace *res = kzalloc(sizeof *res, GFP_KERNEL);
79 if (!res) {
80 NV_ERROR(dev, "VM: Couldn't alloc vspace\n");
81 return 0;
83 res->dev = dev;
84 res->size = size;
85 res->flags = flags;
86 kref_init(&res->ref);
87 mutex_init(&res->lock);
88 if (pscnv_vspace_bind(res, fake)) {
89 kfree(res);
90 return 0;
92 NV_INFO(dev, "VM: Allocating vspace %d\n", res->vid);
93 if (dev_priv->vm->do_vspace_new(res)) {
94 pscnv_vspace_unbind(res);
95 kfree(res);
96 return 0;
98 return res;
101 static void
102 pscnv_vspace_free_unmap(struct pscnv_mm_node *node) {
103 struct pscnv_bo *bo = node->tag;
104 drm_gem_object_unreference_unlocked(bo->gem);
105 pscnv_mm_free(node);
108 void pscnv_vspace_ref_free(struct kref *ref) {
109 struct pscnv_vspace *vs = container_of(ref, struct pscnv_vspace, ref);
110 struct drm_nouveau_private *dev_priv = vs->dev->dev_private;
111 NV_INFO(vs->dev, "VM: Freeing vspace %d\n", vs->vid);
112 if (vs->vid < 0)
113 pscnv_mm_takedown(vs->mm, pscnv_mm_free);
114 else
115 pscnv_mm_takedown(vs->mm, pscnv_vspace_free_unmap);
116 dev_priv->vm->do_vspace_free(vs);
117 pscnv_vspace_unbind(vs);
118 kfree(vs);
121 static int
122 pscnv_vspace_unmap_node_unlocked(struct pscnv_mm_node *node) {
123 struct pscnv_vspace *vs = node->tag2;
124 struct drm_nouveau_private *dev_priv = vs->dev->dev_private;
125 struct pscnv_bo *bo = node->tag;
126 if (pscnv_vm_debug >= 1) {
127 NV_INFO(vs->dev, "VM: vspace %d: Unmapping range %llx-%llx.\n", vs->vid, node->start, node->start + node->size);
129 dev_priv->vm->do_unmap(vs, node->start, node->size);
131 if (vs->vid >= 0) {
132 drm_gem_object_unreference(bo->gem);
134 pscnv_mm_free(node);
135 return 0;
139 pscnv_vspace_map(struct pscnv_vspace *vs, struct pscnv_bo *bo,
140 uint64_t start, uint64_t end, int back,
141 struct pscnv_mm_node **res)
143 struct pscnv_mm_node *node;
144 int ret;
145 struct drm_nouveau_private *dev_priv = vs->dev->dev_private;
146 mutex_lock(&vs->lock);
147 ret = dev_priv->vm->place_map(vs, bo, start, end, back, &node);
148 if (ret) {
149 mutex_unlock(&vs->lock);
150 return ret;
152 node->tag = bo;
153 node->tag2 = vs;
154 if (pscnv_vm_debug >= 1)
155 NV_INFO(vs->dev, "VM: vspace %d: Mapping BO %x/%d at %llx-%llx.\n", vs->vid, bo->cookie, bo->serial, node->start,
156 node->start + node->size);
157 ret = dev_priv->vm->do_map(vs, bo, node->start);
158 if (ret) {
159 pscnv_vspace_unmap_node_unlocked(node);
161 *res = node;
162 mutex_unlock(&vs->lock);
163 return ret;
167 pscnv_vspace_unmap_node(struct pscnv_mm_node *node) {
168 struct pscnv_vspace *vs = node->tag2;
169 int ret;
170 mutex_lock(&vs->lock);
171 ret = pscnv_vspace_unmap_node_unlocked(node);
172 mutex_unlock(&vs->lock);
173 return ret;
177 pscnv_vspace_unmap(struct pscnv_vspace *vs, uint64_t start) {
178 int ret;
179 mutex_lock(&vs->lock);
180 ret = pscnv_vspace_unmap_node_unlocked(pscnv_mm_find_node(vs->mm, start));
181 mutex_unlock(&vs->lock);
182 return ret;
185 #ifdef __linux__
187 static struct vm_operations_struct pscnv_vram_ops = {
188 .open = drm_gem_vm_open,
189 .close = drm_gem_vm_close,
192 static struct vm_operations_struct pscnv_sysram_ops = {
193 .open = drm_gem_vm_open,
194 .close = drm_gem_vm_close,
195 .fault = pscnv_sysram_vm_fault,
198 int pscnv_mmap(struct file *filp, struct vm_area_struct *vma)
200 struct drm_file *priv = filp->private_data;
201 struct drm_device *dev = priv->minor->dev;
202 struct drm_nouveau_private *dev_priv = dev->dev_private;
203 struct drm_gem_object *obj;
204 struct pscnv_bo *bo;
205 int ret;
207 if (vma->vm_pgoff * PAGE_SIZE < (1ull << 31))
208 return drm_mmap(filp, vma);
210 if (vma->vm_pgoff * PAGE_SIZE < (1ull << 32))
211 return pscnv_chan_mmap(filp, vma);
213 obj = drm_gem_object_lookup(dev, priv, (vma->vm_pgoff * PAGE_SIZE) >> 32);
214 if (!obj)
215 return -ENOENT;
216 bo = obj->driver_private;
218 if (vma->vm_end - vma->vm_start > bo->size) {
219 drm_gem_object_unreference_unlocked(obj);
220 return -EINVAL;
222 switch (bo->flags & PSCNV_GEM_MEMTYPE_MASK) {
223 case PSCNV_GEM_VRAM_SMALL:
224 case PSCNV_GEM_VRAM_LARGE:
225 if ((ret = dev_priv->vm->map_user(bo))) {
226 drm_gem_object_unreference_unlocked(obj);
227 return ret;
230 vma->vm_flags |= VM_RESERVED | VM_IO | VM_PFNMAP | VM_DONTEXPAND;
231 vma->vm_ops = &pscnv_vram_ops;
232 vma->vm_private_data = obj;
233 vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
235 vma->vm_file = filp;
237 return remap_pfn_range(vma, vma->vm_start,
238 (dev_priv->fb_phys + bo->map1->start) >> PAGE_SHIFT,
239 vma->vm_end - vma->vm_start, PAGE_SHARED);
240 case PSCNV_GEM_SYSRAM_SNOOP:
241 case PSCNV_GEM_SYSRAM_NOSNOOP:
242 /* XXX */
243 vma->vm_flags |= VM_RESERVED;
244 vma->vm_ops = &pscnv_sysram_ops;
245 vma->vm_private_data = obj;
247 vma->vm_file = filp;
249 return 0;
250 default:
251 drm_gem_object_unreference_unlocked(obj);
252 return -ENOSYS;
256 #endif