3 * Memory mapping for DRM
5 * \author Rickard E. (Rik) Faith <faith@valinux.com>
6 * \author Gareth Hughes <gareth@valinux.com>
10 * Created: Mon Jan 4 08:58:31 1999 by faith@valinux.com
12 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
13 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
14 * All Rights Reserved.
16 * Permission is hereby granted, free of charge, to any person obtaining a
17 * copy of this software and associated documentation files (the "Software"),
18 * to deal in the Software without restriction, including without limitation
19 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
20 * and/or sell copies of the Software, and to permit persons to whom the
21 * Software is furnished to do so, subject to the following conditions:
23 * The above copyright notice and this permission notice (including the next
24 * paragraph) shall be included in all copies or substantial portions of the
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
28 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
29 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
30 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
31 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
32 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
33 * OTHER DEALINGS IN THE SOFTWARE.
37 #include <linux/export.h>
39 #include <linux/efi.h>
40 #include <linux/slab.h>
43 static void drm_vm_open(struct vm_area_struct
*vma
);
44 static void drm_vm_close(struct vm_area_struct
*vma
);
46 static pgprot_t
drm_io_prot(uint32_t map_type
, struct vm_area_struct
*vma
)
48 pgprot_t tmp
= vm_get_page_prot(vma
->vm_flags
);
50 #if defined(__i386__) || defined(__x86_64__)
51 if (boot_cpu_data
.x86
> 3 && map_type
!= _DRM_AGP
) {
52 pgprot_val(tmp
) |= _PAGE_PCD
;
53 pgprot_val(tmp
) &= ~_PAGE_PWT
;
55 #elif defined(__powerpc__)
56 pgprot_val(tmp
) |= _PAGE_NO_CACHE
;
57 if (map_type
== _DRM_REGISTERS
)
58 pgprot_val(tmp
) |= _PAGE_GUARDED
;
59 #elif defined(__ia64__)
60 if (efi_range_is_wc(vma
->vm_start
, vma
->vm_end
-
62 tmp
= pgprot_writecombine(tmp
);
64 tmp
= pgprot_noncached(tmp
);
65 #elif defined(__sparc__) || defined(__arm__)
66 tmp
= pgprot_noncached(tmp
);
71 static pgprot_t
drm_dma_prot(uint32_t map_type
, struct vm_area_struct
*vma
)
73 pgprot_t tmp
= vm_get_page_prot(vma
->vm_flags
);
75 #if defined(__powerpc__) && defined(CONFIG_NOT_COHERENT_CACHE)
76 tmp
|= _PAGE_NO_CACHE
;
82 * \c fault method for AGP virtual memory.
84 * \param vma virtual memory area.
85 * \param address access address.
86 * \return pointer to the page structure.
88 * Find the right map and if it's AGP memory find the real physical page to
89 * map, get the page, increment the use count and return it.
92 static int drm_do_vm_fault(struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
94 struct drm_file
*priv
= vma
->vm_file
->private_data
;
95 struct drm_device
*dev
= priv
->minor
->dev
;
96 struct drm_local_map
*map
= NULL
;
97 struct drm_map_list
*r_list
;
98 struct drm_hash_item
*hash
;
103 if (!drm_core_has_AGP(dev
))
106 if (!dev
->agp
|| !dev
->agp
->cant_use_aperture
)
109 if (drm_ht_find_item(&dev
->map_hash
, vma
->vm_pgoff
, &hash
))
112 r_list
= drm_hash_entry(hash
, struct drm_map_list
, hash
);
115 if (map
&& map
->type
== _DRM_AGP
) {
117 * Using vm_pgoff as a selector forces us to use this unusual
120 resource_size_t offset
= (unsigned long)vmf
->virtual_address
-
122 resource_size_t baddr
= map
->offset
+ offset
;
123 struct drm_agp_mem
*agpmem
;
128 * Adjust to a bus-relative address
130 baddr
-= dev
->hose
->mem_space
->start
;
134 * It's AGP memory - find the real physical page to map
136 list_for_each_entry(agpmem
, &dev
->agp
->memory
, head
) {
137 if (agpmem
->bound
<= baddr
&&
138 agpmem
->bound
+ agpmem
->pages
* PAGE_SIZE
> baddr
)
142 if (&agpmem
->head
== &dev
->agp
->memory
)
146 * Get the page, inc the use count, and return it
148 offset
= (baddr
- agpmem
->bound
) >> PAGE_SHIFT
;
149 page
= agpmem
->memory
->pages
[offset
];
154 ("baddr = 0x%llx page = 0x%p, offset = 0x%llx, count=%d\n",
155 (unsigned long long)baddr
,
156 agpmem
->memory
->pages
[offset
],
157 (unsigned long long)offset
,
162 return VM_FAULT_SIGBUS
; /* Disallow mremap */
164 #else /* __OS_HAS_AGP */
165 static int drm_do_vm_fault(struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
167 return VM_FAULT_SIGBUS
;
169 #endif /* __OS_HAS_AGP */
172 * \c nopage method for shared virtual memory.
174 * \param vma virtual memory area.
175 * \param address access address.
176 * \return pointer to the page structure.
178 * Get the mapping, find the real physical page to map, get the page, and
181 static int drm_do_vm_shm_fault(struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
183 struct drm_local_map
*map
= vma
->vm_private_data
;
184 unsigned long offset
;
189 return VM_FAULT_SIGBUS
; /* Nothing allocated */
191 offset
= (unsigned long)vmf
->virtual_address
- vma
->vm_start
;
192 i
= (unsigned long)map
->handle
+ offset
;
193 page
= vmalloc_to_page((void *)i
);
195 return VM_FAULT_SIGBUS
;
199 DRM_DEBUG("shm_fault 0x%lx\n", offset
);
204 * \c close method for shared virtual memory.
206 * \param vma virtual memory area.
208 * Deletes map information if we are the last
209 * person to close a mapping and it's not in the global maplist.
211 static void drm_vm_shm_close(struct vm_area_struct
*vma
)
213 struct drm_file
*priv
= vma
->vm_file
->private_data
;
214 struct drm_device
*dev
= priv
->minor
->dev
;
215 struct drm_vma_entry
*pt
, *temp
;
216 struct drm_local_map
*map
;
217 struct drm_map_list
*r_list
;
220 DRM_DEBUG("0x%08lx,0x%08lx\n",
221 vma
->vm_start
, vma
->vm_end
- vma
->vm_start
);
222 atomic_dec(&dev
->vma_count
);
224 map
= vma
->vm_private_data
;
226 mutex_lock(&dev
->struct_mutex
);
227 list_for_each_entry_safe(pt
, temp
, &dev
->vmalist
, head
) {
228 if (pt
->vma
->vm_private_data
== map
)
230 if (pt
->vma
== vma
) {
236 /* We were the only map that was found */
237 if (found_maps
== 1 && map
->flags
& _DRM_REMOVABLE
) {
238 /* Check to see if we are in the maplist, if we are not, then
239 * we delete this mappings information.
242 list_for_each_entry(r_list
, &dev
->maplist
, head
) {
243 if (r_list
->map
== map
)
248 drm_dma_handle_t dmah
;
252 case _DRM_FRAME_BUFFER
:
253 if (drm_core_has_MTRR(dev
) && map
->mtrr
>= 0) {
255 retcode
= mtrr_del(map
->mtrr
,
258 DRM_DEBUG("mtrr_del = %d\n", retcode
);
260 iounmap(map
->handle
);
266 case _DRM_SCATTER_GATHER
:
268 case _DRM_CONSISTENT
:
269 dmah
.vaddr
= map
->handle
;
270 dmah
.busaddr
= map
->offset
;
271 dmah
.size
= map
->size
;
272 __drm_pci_free(dev
, &dmah
);
275 DRM_ERROR("tried to rmmap GEM object\n");
281 mutex_unlock(&dev
->struct_mutex
);
285 * \c fault method for DMA virtual memory.
287 * \param vma virtual memory area.
288 * \param address access address.
289 * \return pointer to the page structure.
291 * Determine the page number from the page offset and get it from drm_device_dma::pagelist.
293 static int drm_do_vm_dma_fault(struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
295 struct drm_file
*priv
= vma
->vm_file
->private_data
;
296 struct drm_device
*dev
= priv
->minor
->dev
;
297 struct drm_device_dma
*dma
= dev
->dma
;
298 unsigned long offset
;
299 unsigned long page_nr
;
303 return VM_FAULT_SIGBUS
; /* Error */
305 return VM_FAULT_SIGBUS
; /* Nothing allocated */
307 offset
= (unsigned long)vmf
->virtual_address
- vma
->vm_start
; /* vm_[pg]off[set] should be 0 */
308 page_nr
= offset
>> PAGE_SHIFT
; /* page_nr could just be vmf->pgoff */
309 page
= virt_to_page((dma
->pagelist
[page_nr
] + (offset
& (~PAGE_MASK
))));
314 DRM_DEBUG("dma_fault 0x%lx (page %lu)\n", offset
, page_nr
);
319 * \c fault method for scatter-gather virtual memory.
321 * \param vma virtual memory area.
322 * \param address access address.
323 * \return pointer to the page structure.
325 * Determine the map offset from the page offset and get it from drm_sg_mem::pagelist.
327 static int drm_do_vm_sg_fault(struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
329 struct drm_local_map
*map
= vma
->vm_private_data
;
330 struct drm_file
*priv
= vma
->vm_file
->private_data
;
331 struct drm_device
*dev
= priv
->minor
->dev
;
332 struct drm_sg_mem
*entry
= dev
->sg
;
333 unsigned long offset
;
334 unsigned long map_offset
;
335 unsigned long page_offset
;
339 return VM_FAULT_SIGBUS
; /* Error */
340 if (!entry
->pagelist
)
341 return VM_FAULT_SIGBUS
; /* Nothing allocated */
343 offset
= (unsigned long)vmf
->virtual_address
- vma
->vm_start
;
344 map_offset
= map
->offset
- (unsigned long)dev
->sg
->virtual;
345 page_offset
= (offset
>> PAGE_SHIFT
) + (map_offset
>> PAGE_SHIFT
);
346 page
= entry
->pagelist
[page_offset
];
353 static int drm_vm_fault(struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
355 return drm_do_vm_fault(vma
, vmf
);
358 static int drm_vm_shm_fault(struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
360 return drm_do_vm_shm_fault(vma
, vmf
);
363 static int drm_vm_dma_fault(struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
365 return drm_do_vm_dma_fault(vma
, vmf
);
368 static int drm_vm_sg_fault(struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
370 return drm_do_vm_sg_fault(vma
, vmf
);
373 /** AGP virtual memory operations */
374 static const struct vm_operations_struct drm_vm_ops
= {
375 .fault
= drm_vm_fault
,
377 .close
= drm_vm_close
,
380 /** Shared virtual memory operations */
381 static const struct vm_operations_struct drm_vm_shm_ops
= {
382 .fault
= drm_vm_shm_fault
,
384 .close
= drm_vm_shm_close
,
387 /** DMA virtual memory operations */
388 static const struct vm_operations_struct drm_vm_dma_ops
= {
389 .fault
= drm_vm_dma_fault
,
391 .close
= drm_vm_close
,
394 /** Scatter-gather virtual memory operations */
395 static const struct vm_operations_struct drm_vm_sg_ops
= {
396 .fault
= drm_vm_sg_fault
,
398 .close
= drm_vm_close
,
402 * \c open method for shared virtual memory.
404 * \param vma virtual memory area.
406 * Create a new drm_vma_entry structure as the \p vma private data entry and
407 * add it to drm_device::vmalist.
409 void drm_vm_open_locked(struct vm_area_struct
*vma
)
411 struct drm_file
*priv
= vma
->vm_file
->private_data
;
412 struct drm_device
*dev
= priv
->minor
->dev
;
413 struct drm_vma_entry
*vma_entry
;
415 DRM_DEBUG("0x%08lx,0x%08lx\n",
416 vma
->vm_start
, vma
->vm_end
- vma
->vm_start
);
417 atomic_inc(&dev
->vma_count
);
419 vma_entry
= kmalloc(sizeof(*vma_entry
), GFP_KERNEL
);
421 vma_entry
->vma
= vma
;
422 vma_entry
->pid
= current
->pid
;
423 list_add(&vma_entry
->head
, &dev
->vmalist
);
427 static void drm_vm_open(struct vm_area_struct
*vma
)
429 struct drm_file
*priv
= vma
->vm_file
->private_data
;
430 struct drm_device
*dev
= priv
->minor
->dev
;
432 mutex_lock(&dev
->struct_mutex
);
433 drm_vm_open_locked(vma
);
434 mutex_unlock(&dev
->struct_mutex
);
437 void drm_vm_close_locked(struct vm_area_struct
*vma
)
439 struct drm_file
*priv
= vma
->vm_file
->private_data
;
440 struct drm_device
*dev
= priv
->minor
->dev
;
441 struct drm_vma_entry
*pt
, *temp
;
443 DRM_DEBUG("0x%08lx,0x%08lx\n",
444 vma
->vm_start
, vma
->vm_end
- vma
->vm_start
);
445 atomic_dec(&dev
->vma_count
);
447 list_for_each_entry_safe(pt
, temp
, &dev
->vmalist
, head
) {
448 if (pt
->vma
== vma
) {
457 * \c close method for all virtual memory types.
459 * \param vma virtual memory area.
461 * Search the \p vma private data entry in drm_device::vmalist, unlink it, and
464 static void drm_vm_close(struct vm_area_struct
*vma
)
466 struct drm_file
*priv
= vma
->vm_file
->private_data
;
467 struct drm_device
*dev
= priv
->minor
->dev
;
469 mutex_lock(&dev
->struct_mutex
);
470 drm_vm_close_locked(vma
);
471 mutex_unlock(&dev
->struct_mutex
);
477 * \param file_priv DRM file private.
478 * \param vma virtual memory area.
479 * \return zero on success or a negative number on failure.
481 * Sets the virtual memory area operations structure to vm_dma_ops, the file
482 * pointer, and calls vm_open().
484 static int drm_mmap_dma(struct file
*filp
, struct vm_area_struct
*vma
)
486 struct drm_file
*priv
= filp
->private_data
;
487 struct drm_device
*dev
;
488 struct drm_device_dma
*dma
;
489 unsigned long length
= vma
->vm_end
- vma
->vm_start
;
491 dev
= priv
->minor
->dev
;
493 DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n",
494 vma
->vm_start
, vma
->vm_end
, vma
->vm_pgoff
);
496 /* Length must match exact page count */
497 if (!dma
|| (length
>> PAGE_SHIFT
) != dma
->page_count
) {
501 if (!capable(CAP_SYS_ADMIN
) &&
502 (dma
->flags
& _DRM_DMA_USE_PCI_RO
)) {
503 vma
->vm_flags
&= ~(VM_WRITE
| VM_MAYWRITE
);
504 #if defined(__i386__) || defined(__x86_64__)
505 pgprot_val(vma
->vm_page_prot
) &= ~_PAGE_RW
;
507 /* Ye gads this is ugly. With more thought
508 we could move this up higher and use
509 `protection_map' instead. */
513 (__pte(pgprot_val(vma
->vm_page_prot
)))));
517 vma
->vm_ops
= &drm_vm_dma_ops
;
519 vma
->vm_flags
|= VM_RESERVED
; /* Don't swap */
520 vma
->vm_flags
|= VM_DONTEXPAND
;
522 vma
->vm_file
= filp
; /* Needed for drm_vm_open() */
523 drm_vm_open_locked(vma
);
527 static resource_size_t
drm_core_get_reg_ofs(struct drm_device
*dev
)
530 return dev
->hose
->dense_mem_base
;
539 * \param file_priv DRM file private.
540 * \param vma virtual memory area.
541 * \return zero on success or a negative number on failure.
543 * If the virtual memory area has no offset associated with it then it's a DMA
544 * area, so calls mmap_dma(). Otherwise searches the map in drm_device::maplist,
545 * checks that the restricted flag is not set, sets the virtual memory operations
546 * according to the mapping type and remaps the pages. Finally sets the file
547 * pointer and calls vm_open().
549 int drm_mmap_locked(struct file
*filp
, struct vm_area_struct
*vma
)
551 struct drm_file
*priv
= filp
->private_data
;
552 struct drm_device
*dev
= priv
->minor
->dev
;
553 struct drm_local_map
*map
= NULL
;
554 resource_size_t offset
= 0;
555 struct drm_hash_item
*hash
;
557 DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n",
558 vma
->vm_start
, vma
->vm_end
, vma
->vm_pgoff
);
560 if (!priv
->authenticated
)
563 /* We check for "dma". On Apple's UniNorth, it's valid to have
564 * the AGP mapped at physical address 0
570 || dev
->agp
->agp_info
.device
->vendor
!= PCI_VENDOR_ID_APPLE
)
573 return drm_mmap_dma(filp
, vma
);
575 if (drm_ht_find_item(&dev
->map_hash
, vma
->vm_pgoff
, &hash
)) {
576 DRM_ERROR("Could not find map\n");
580 map
= drm_hash_entry(hash
, struct drm_map_list
, hash
)->map
;
581 if (!map
|| ((map
->flags
& _DRM_RESTRICTED
) && !capable(CAP_SYS_ADMIN
)))
584 /* Check for valid size. */
585 if (map
->size
< vma
->vm_end
- vma
->vm_start
)
588 if (!capable(CAP_SYS_ADMIN
) && (map
->flags
& _DRM_READ_ONLY
)) {
589 vma
->vm_flags
&= ~(VM_WRITE
| VM_MAYWRITE
);
590 #if defined(__i386__) || defined(__x86_64__)
591 pgprot_val(vma
->vm_page_prot
) &= ~_PAGE_RW
;
593 /* Ye gads this is ugly. With more thought
594 we could move this up higher and use
595 `protection_map' instead. */
599 (__pte(pgprot_val(vma
->vm_page_prot
)))));
604 #if !defined(__arm__)
606 if (drm_core_has_AGP(dev
) && dev
->agp
->cant_use_aperture
) {
608 * On some platforms we can't talk to bus dma address from the CPU, so for
609 * memory of type DRM_AGP, we'll deal with sorting out the real physical
610 * pages and mappings in fault()
612 #if defined(__powerpc__)
613 pgprot_val(vma
->vm_page_prot
) |= _PAGE_NO_CACHE
;
615 vma
->vm_ops
= &drm_vm_ops
;
618 /* fall through to _DRM_FRAME_BUFFER... */
620 case _DRM_FRAME_BUFFER
:
622 offset
= drm_core_get_reg_ofs(dev
);
623 vma
->vm_flags
|= VM_IO
; /* not in core dump */
624 vma
->vm_page_prot
= drm_io_prot(map
->type
, vma
);
625 #if !defined(__arm__)
626 if (io_remap_pfn_range(vma
, vma
->vm_start
,
627 (map
->offset
+ offset
) >> PAGE_SHIFT
,
628 vma
->vm_end
- vma
->vm_start
,
632 if (remap_pfn_range(vma
, vma
->vm_start
,
633 (map
->offset
+ offset
) >> PAGE_SHIFT
,
634 vma
->vm_end
- vma
->vm_start
,
639 DRM_DEBUG(" Type = %d; start = 0x%lx, end = 0x%lx,"
640 " offset = 0x%llx\n",
642 vma
->vm_start
, vma
->vm_end
, (unsigned long long)(map
->offset
+ offset
));
644 vma
->vm_ops
= &drm_vm_ops
;
646 case _DRM_CONSISTENT
:
647 /* Consistent memory is really like shared memory. But
648 * it's allocated in a different way, so avoid fault */
649 if (remap_pfn_range(vma
, vma
->vm_start
,
650 page_to_pfn(virt_to_page(map
->handle
)),
651 vma
->vm_end
- vma
->vm_start
, vma
->vm_page_prot
))
653 vma
->vm_page_prot
= drm_dma_prot(map
->type
, vma
);
654 /* fall through to _DRM_SHM */
656 vma
->vm_ops
= &drm_vm_shm_ops
;
657 vma
->vm_private_data
= (void *)map
;
658 /* Don't let this area swap. Change when
659 DRM_KERNEL advisory is supported. */
660 vma
->vm_flags
|= VM_RESERVED
;
662 case _DRM_SCATTER_GATHER
:
663 vma
->vm_ops
= &drm_vm_sg_ops
;
664 vma
->vm_private_data
= (void *)map
;
665 vma
->vm_flags
|= VM_RESERVED
;
666 vma
->vm_page_prot
= drm_dma_prot(map
->type
, vma
);
669 return -EINVAL
; /* This should never happen. */
671 vma
->vm_flags
|= VM_RESERVED
; /* Don't swap */
672 vma
->vm_flags
|= VM_DONTEXPAND
;
674 vma
->vm_file
= filp
; /* Needed for drm_vm_open() */
675 drm_vm_open_locked(vma
);
679 int drm_mmap(struct file
*filp
, struct vm_area_struct
*vma
)
681 struct drm_file
*priv
= filp
->private_data
;
682 struct drm_device
*dev
= priv
->minor
->dev
;
685 mutex_lock(&dev
->struct_mutex
);
686 ret
= drm_mmap_locked(filp
, vma
);
687 mutex_unlock(&dev
->struct_mutex
);
691 EXPORT_SYMBOL(drm_mmap
);