4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright 2010 PathScale Inc. All rights reserved.
24 * Use is subject to license terms.
28 #include "nouveau_drv.h"
29 #include "pscnv_mem.h"
31 #include "pscnv_chan.h"
34 static int pscnv_vspace_bind (struct pscnv_vspace
*vs
, int fake
) {
35 struct drm_nouveau_private
*dev_priv
= vs
->dev
->dev_private
;
39 spin_lock_irqsave(&dev_priv
->vm
->vs_lock
, flags
);
42 BUG_ON(dev_priv
->vm
->fake_vspaces
[fake
]);
43 dev_priv
->vm
->fake_vspaces
[fake
] = vs
;
44 spin_unlock_irqrestore(&dev_priv
->vm
->vs_lock
, flags
);
47 for (i
= 1; i
< 128; i
++)
48 if (!dev_priv
->vm
->vspaces
[i
]) {
50 dev_priv
->vm
->vspaces
[i
] = vs
;
51 spin_unlock_irqrestore(&dev_priv
->vm
->vs_lock
, flags
);
54 spin_unlock_irqrestore(&dev_priv
->vm
->vs_lock
, flags
);
55 NV_ERROR(vs
->dev
, "VM: Out of vspaces\n");
60 static void pscnv_vspace_unbind (struct pscnv_vspace
*vs
) {
61 struct drm_nouveau_private
*dev_priv
= vs
->dev
->dev_private
;
63 spin_lock_irqsave(&dev_priv
->vm
->vs_lock
, flags
);
65 BUG_ON(dev_priv
->vm
->fake_vspaces
[-vs
->vid
] != vs
);
66 dev_priv
->vm
->fake_vspaces
[-vs
->vid
] = 0;
68 BUG_ON(dev_priv
->vm
->vspaces
[vs
->vid
] != vs
);
69 dev_priv
->vm
->vspaces
[vs
->vid
] = 0;
72 spin_unlock_irqrestore(&dev_priv
->vm
->vs_lock
, flags
);
76 pscnv_vspace_new (struct drm_device
*dev
, uint64_t size
, uint32_t flags
, int fake
) {
77 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
78 struct pscnv_vspace
*res
= kzalloc(sizeof *res
, GFP_KERNEL
);
80 NV_ERROR(dev
, "VM: Couldn't alloc vspace\n");
87 mutex_init(&res
->lock
);
88 if (pscnv_vspace_bind(res
, fake
)) {
92 NV_INFO(dev
, "VM: Allocating vspace %d\n", res
->vid
);
93 if (dev_priv
->vm
->do_vspace_new(res
)) {
94 pscnv_vspace_unbind(res
);
102 pscnv_vspace_free_unmap(struct pscnv_mm_node
*node
) {
103 struct pscnv_bo
*bo
= node
->tag
;
104 drm_gem_object_unreference_unlocked(bo
->gem
);
108 void pscnv_vspace_ref_free(struct kref
*ref
) {
109 struct pscnv_vspace
*vs
= container_of(ref
, struct pscnv_vspace
, ref
);
110 struct drm_nouveau_private
*dev_priv
= vs
->dev
->dev_private
;
111 NV_INFO(vs
->dev
, "VM: Freeing vspace %d\n", vs
->vid
);
113 pscnv_mm_takedown(vs
->mm
, pscnv_mm_free
);
115 pscnv_mm_takedown(vs
->mm
, pscnv_vspace_free_unmap
);
116 dev_priv
->vm
->do_vspace_free(vs
);
117 pscnv_vspace_unbind(vs
);
122 pscnv_vspace_unmap_node_unlocked(struct pscnv_mm_node
*node
) {
123 struct pscnv_vspace
*vs
= node
->tag2
;
124 struct drm_nouveau_private
*dev_priv
= vs
->dev
->dev_private
;
125 struct pscnv_bo
*bo
= node
->tag
;
126 if (pscnv_vm_debug
>= 1) {
127 NV_INFO(vs
->dev
, "VM: vspace %d: Unmapping range %llx-%llx.\n", vs
->vid
, node
->start
, node
->start
+ node
->size
);
129 dev_priv
->vm
->do_unmap(vs
, node
->start
, node
->size
);
132 drm_gem_object_unreference(bo
->gem
);
139 pscnv_vspace_map(struct pscnv_vspace
*vs
, struct pscnv_bo
*bo
,
140 uint64_t start
, uint64_t end
, int back
,
141 struct pscnv_mm_node
**res
)
143 struct pscnv_mm_node
*node
;
145 struct drm_nouveau_private
*dev_priv
= vs
->dev
->dev_private
;
146 mutex_lock(&vs
->lock
);
147 ret
= dev_priv
->vm
->place_map(vs
, bo
, start
, end
, back
, &node
);
149 mutex_unlock(&vs
->lock
);
154 if (pscnv_vm_debug
>= 1)
155 NV_INFO(vs
->dev
, "VM: vspace %d: Mapping BO %x/%d at %llx-%llx.\n", vs
->vid
, bo
->cookie
, bo
->serial
, node
->start
,
156 node
->start
+ node
->size
);
157 ret
= dev_priv
->vm
->do_map(vs
, bo
, node
->start
);
159 pscnv_vspace_unmap_node_unlocked(node
);
162 mutex_unlock(&vs
->lock
);
167 pscnv_vspace_unmap_node(struct pscnv_mm_node
*node
) {
168 struct pscnv_vspace
*vs
= node
->tag2
;
170 mutex_lock(&vs
->lock
);
171 ret
= pscnv_vspace_unmap_node_unlocked(node
);
172 mutex_unlock(&vs
->lock
);
177 pscnv_vspace_unmap(struct pscnv_vspace
*vs
, uint64_t start
) {
179 mutex_lock(&vs
->lock
);
180 ret
= pscnv_vspace_unmap_node_unlocked(pscnv_mm_find_node(vs
->mm
, start
));
181 mutex_unlock(&vs
->lock
);
187 static struct vm_operations_struct pscnv_vram_ops
= {
188 .open
= drm_gem_vm_open
,
189 .close
= drm_gem_vm_close
,
192 static struct vm_operations_struct pscnv_sysram_ops
= {
193 .open
= drm_gem_vm_open
,
194 .close
= drm_gem_vm_close
,
195 .fault
= pscnv_sysram_vm_fault
,
198 int pscnv_mmap(struct file
*filp
, struct vm_area_struct
*vma
)
200 struct drm_file
*priv
= filp
->private_data
;
201 struct drm_device
*dev
= priv
->minor
->dev
;
202 struct drm_nouveau_private
*dev_priv
= dev
->dev_private
;
203 struct drm_gem_object
*obj
;
207 if (vma
->vm_pgoff
* PAGE_SIZE
< (1ull << 31))
208 return drm_mmap(filp
, vma
);
210 if (vma
->vm_pgoff
* PAGE_SIZE
< (1ull << 32))
211 return pscnv_chan_mmap(filp
, vma
);
213 obj
= drm_gem_object_lookup(dev
, priv
, (vma
->vm_pgoff
* PAGE_SIZE
) >> 32);
216 bo
= obj
->driver_private
;
218 if (vma
->vm_end
- vma
->vm_start
> bo
->size
) {
219 drm_gem_object_unreference_unlocked(obj
);
222 switch (bo
->flags
& PSCNV_GEM_MEMTYPE_MASK
) {
223 case PSCNV_GEM_VRAM_SMALL
:
224 case PSCNV_GEM_VRAM_LARGE
:
225 if ((ret
= dev_priv
->vm
->map_user(bo
))) {
226 drm_gem_object_unreference_unlocked(obj
);
230 vma
->vm_flags
|= VM_RESERVED
| VM_IO
| VM_PFNMAP
| VM_DONTEXPAND
;
231 vma
->vm_ops
= &pscnv_vram_ops
;
232 vma
->vm_private_data
= obj
;
233 vma
->vm_page_prot
= pgprot_writecombine(vm_get_page_prot(vma
->vm_flags
));
237 return remap_pfn_range(vma
, vma
->vm_start
,
238 (dev_priv
->fb_phys
+ bo
->map1
->start
) >> PAGE_SHIFT
,
239 vma
->vm_end
- vma
->vm_start
, PAGE_SHARED
);
240 case PSCNV_GEM_SYSRAM_SNOOP
:
241 case PSCNV_GEM_SYSRAM_NOSNOOP
:
243 vma
->vm_flags
|= VM_RESERVED
;
244 vma
->vm_ops
= &pscnv_sysram_ops
;
245 vma
->vm_private_data
= obj
;
251 drm_gem_object_unreference_unlocked(obj
);