3 * Memory mapping for DRM
5 * \author Rickard E. (Rik) Faith <faith@valinux.com>
6 * \author Gareth Hughes <gareth@valinux.com>
10 * Created: Mon Jan 4 08:58:31 1999 by faith@valinux.com
12 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
13 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
14 * All Rights Reserved.
16 * Permission is hereby granted, free of charge, to any person obtaining a
17 * copy of this software and associated documentation files (the "Software"),
18 * to deal in the Software without restriction, including without limitation
19 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
20 * and/or sell copies of the Software, and to permit persons to whom the
21 * Software is furnished to do so, subject to the following conditions:
23 * The above copyright notice and this permission notice (including the next
24 * paragraph) shall be included in all copies or substantial portions of the
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
28 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
29 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
30 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
31 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
32 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
33 * OTHER DEALINGS IN THE SOFTWARE.
36 #include <linux/export.h>
37 #include <linux/pci.h>
38 #include <linux/seq_file.h>
39 #include <linux/vmalloc.h>
42 #include <linux/efi.h>
43 #include <linux/slab.h>
45 #include <linux/mem_encrypt.h>
47 #include <asm/pgtable.h>
49 #include <drm/drm_agpsupport.h>
50 #include <drm/drm_device.h>
51 #include <drm/drm_drv.h>
52 #include <drm/drm_file.h>
53 #include <drm/drm_framebuffer.h>
54 #include <drm/drm_gem.h>
55 #include <drm/drm_print.h>
57 #include "drm_internal.h"
58 #include "drm_legacy.h"
60 struct drm_vma_entry
{
61 struct list_head head
;
62 struct vm_area_struct
*vma
;
66 static void drm_vm_open(struct vm_area_struct
*vma
);
67 static void drm_vm_close(struct vm_area_struct
*vma
);
69 static pgprot_t
drm_io_prot(struct drm_local_map
*map
,
70 struct vm_area_struct
*vma
)
72 pgprot_t tmp
= vm_get_page_prot(vma
->vm_flags
);
74 /* We don't want graphics memory to be mapped encrypted */
75 tmp
= pgprot_decrypted(tmp
);
77 #if defined(__i386__) || defined(__x86_64__) || defined(__powerpc__) || \
79 if (map
->type
== _DRM_REGISTERS
&& !(map
->flags
& _DRM_WRITE_COMBINING
))
80 tmp
= pgprot_noncached(tmp
);
82 tmp
= pgprot_writecombine(tmp
);
83 #elif defined(__ia64__)
84 if (efi_range_is_wc(vma
->vm_start
, vma
->vm_end
-
86 tmp
= pgprot_writecombine(tmp
);
88 tmp
= pgprot_noncached(tmp
);
89 #elif defined(__sparc__) || defined(__arm__)
90 tmp
= pgprot_noncached(tmp
);
95 static pgprot_t
drm_dma_prot(uint32_t map_type
, struct vm_area_struct
*vma
)
97 pgprot_t tmp
= vm_get_page_prot(vma
->vm_flags
);
99 #if defined(__powerpc__) && defined(CONFIG_NOT_COHERENT_CACHE)
100 tmp
= pgprot_noncached_wc(tmp
);
106 * \c fault method for AGP virtual memory.
108 * \param vma virtual memory area.
109 * \param address access address.
110 * \return pointer to the page structure.
112 * Find the right map and if it's AGP memory find the real physical page to
113 * map, get the page, increment the use count and return it.
115 #if IS_ENABLED(CONFIG_AGP)
116 static vm_fault_t
drm_vm_fault(struct vm_fault
*vmf
)
118 struct vm_area_struct
*vma
= vmf
->vma
;
119 struct drm_file
*priv
= vma
->vm_file
->private_data
;
120 struct drm_device
*dev
= priv
->minor
->dev
;
121 struct drm_local_map
*map
= NULL
;
122 struct drm_map_list
*r_list
;
123 struct drm_hash_item
*hash
;
131 if (!dev
->agp
|| !dev
->agp
->cant_use_aperture
)
134 if (drm_ht_find_item(&dev
->map_hash
, vma
->vm_pgoff
, &hash
))
137 r_list
= drm_hash_entry(hash
, struct drm_map_list
, hash
);
140 if (map
&& map
->type
== _DRM_AGP
) {
142 * Using vm_pgoff as a selector forces us to use this unusual
145 resource_size_t offset
= vmf
->address
- vma
->vm_start
;
146 resource_size_t baddr
= map
->offset
+ offset
;
147 struct drm_agp_mem
*agpmem
;
152 * Adjust to a bus-relative address
154 baddr
-= dev
->hose
->mem_space
->start
;
158 * It's AGP memory - find the real physical page to map
160 list_for_each_entry(agpmem
, &dev
->agp
->memory
, head
) {
161 if (agpmem
->bound
<= baddr
&&
162 agpmem
->bound
+ agpmem
->pages
* PAGE_SIZE
> baddr
)
166 if (&agpmem
->head
== &dev
->agp
->memory
)
170 * Get the page, inc the use count, and return it
172 offset
= (baddr
- agpmem
->bound
) >> PAGE_SHIFT
;
173 page
= agpmem
->memory
->pages
[offset
];
178 ("baddr = 0x%llx page = 0x%p, offset = 0x%llx, count=%d\n",
179 (unsigned long long)baddr
,
180 agpmem
->memory
->pages
[offset
],
181 (unsigned long long)offset
,
186 return VM_FAULT_SIGBUS
; /* Disallow mremap */
189 static vm_fault_t
drm_vm_fault(struct vm_fault
*vmf
)
191 return VM_FAULT_SIGBUS
;
196 * \c nopage method for shared virtual memory.
198 * \param vma virtual memory area.
199 * \param address access address.
200 * \return pointer to the page structure.
202 * Get the mapping, find the real physical page to map, get the page, and
205 static vm_fault_t
drm_vm_shm_fault(struct vm_fault
*vmf
)
207 struct vm_area_struct
*vma
= vmf
->vma
;
208 struct drm_local_map
*map
= vma
->vm_private_data
;
209 unsigned long offset
;
214 return VM_FAULT_SIGBUS
; /* Nothing allocated */
216 offset
= vmf
->address
- vma
->vm_start
;
217 i
= (unsigned long)map
->handle
+ offset
;
218 page
= vmalloc_to_page((void *)i
);
220 return VM_FAULT_SIGBUS
;
224 DRM_DEBUG("shm_fault 0x%lx\n", offset
);
229 * \c close method for shared virtual memory.
231 * \param vma virtual memory area.
233 * Deletes map information if we are the last
234 * person to close a mapping and it's not in the global maplist.
236 static void drm_vm_shm_close(struct vm_area_struct
*vma
)
238 struct drm_file
*priv
= vma
->vm_file
->private_data
;
239 struct drm_device
*dev
= priv
->minor
->dev
;
240 struct drm_vma_entry
*pt
, *temp
;
241 struct drm_local_map
*map
;
242 struct drm_map_list
*r_list
;
245 DRM_DEBUG("0x%08lx,0x%08lx\n",
246 vma
->vm_start
, vma
->vm_end
- vma
->vm_start
);
248 map
= vma
->vm_private_data
;
250 mutex_lock(&dev
->struct_mutex
);
251 list_for_each_entry_safe(pt
, temp
, &dev
->vmalist
, head
) {
252 if (pt
->vma
->vm_private_data
== map
)
254 if (pt
->vma
== vma
) {
260 /* We were the only map that was found */
261 if (found_maps
== 1 && map
->flags
& _DRM_REMOVABLE
) {
262 /* Check to see if we are in the maplist, if we are not, then
263 * we delete this mappings information.
266 list_for_each_entry(r_list
, &dev
->maplist
, head
) {
267 if (r_list
->map
== map
)
272 drm_dma_handle_t dmah
;
276 case _DRM_FRAME_BUFFER
:
277 arch_phys_wc_del(map
->mtrr
);
278 iounmap(map
->handle
);
284 case _DRM_SCATTER_GATHER
:
286 case _DRM_CONSISTENT
:
287 dmah
.vaddr
= map
->handle
;
288 dmah
.busaddr
= map
->offset
;
289 dmah
.size
= map
->size
;
290 __drm_legacy_pci_free(dev
, &dmah
);
296 mutex_unlock(&dev
->struct_mutex
);
300 * \c fault method for DMA virtual memory.
302 * \param address access address.
303 * \return pointer to the page structure.
305 * Determine the page number from the page offset and get it from drm_device_dma::pagelist.
307 static vm_fault_t
drm_vm_dma_fault(struct vm_fault
*vmf
)
309 struct vm_area_struct
*vma
= vmf
->vma
;
310 struct drm_file
*priv
= vma
->vm_file
->private_data
;
311 struct drm_device
*dev
= priv
->minor
->dev
;
312 struct drm_device_dma
*dma
= dev
->dma
;
313 unsigned long offset
;
314 unsigned long page_nr
;
318 return VM_FAULT_SIGBUS
; /* Error */
320 return VM_FAULT_SIGBUS
; /* Nothing allocated */
322 offset
= vmf
->address
- vma
->vm_start
;
323 /* vm_[pg]off[set] should be 0 */
324 page_nr
= offset
>> PAGE_SHIFT
; /* page_nr could just be vmf->pgoff */
325 page
= virt_to_page((void *)dma
->pagelist
[page_nr
]);
330 DRM_DEBUG("dma_fault 0x%lx (page %lu)\n", offset
, page_nr
);
335 * \c fault method for scatter-gather virtual memory.
337 * \param address access address.
338 * \return pointer to the page structure.
340 * Determine the map offset from the page offset and get it from drm_sg_mem::pagelist.
342 static vm_fault_t
drm_vm_sg_fault(struct vm_fault
*vmf
)
344 struct vm_area_struct
*vma
= vmf
->vma
;
345 struct drm_local_map
*map
= vma
->vm_private_data
;
346 struct drm_file
*priv
= vma
->vm_file
->private_data
;
347 struct drm_device
*dev
= priv
->minor
->dev
;
348 struct drm_sg_mem
*entry
= dev
->sg
;
349 unsigned long offset
;
350 unsigned long map_offset
;
351 unsigned long page_offset
;
355 return VM_FAULT_SIGBUS
; /* Error */
356 if (!entry
->pagelist
)
357 return VM_FAULT_SIGBUS
; /* Nothing allocated */
359 offset
= vmf
->address
- vma
->vm_start
;
360 map_offset
= map
->offset
- (unsigned long)dev
->sg
->virtual;
361 page_offset
= (offset
>> PAGE_SHIFT
) + (map_offset
>> PAGE_SHIFT
);
362 page
= entry
->pagelist
[page_offset
];
369 /** AGP virtual memory operations */
370 static const struct vm_operations_struct drm_vm_ops
= {
371 .fault
= drm_vm_fault
,
373 .close
= drm_vm_close
,
376 /** Shared virtual memory operations */
377 static const struct vm_operations_struct drm_vm_shm_ops
= {
378 .fault
= drm_vm_shm_fault
,
380 .close
= drm_vm_shm_close
,
383 /** DMA virtual memory operations */
384 static const struct vm_operations_struct drm_vm_dma_ops
= {
385 .fault
= drm_vm_dma_fault
,
387 .close
= drm_vm_close
,
390 /** Scatter-gather virtual memory operations */
391 static const struct vm_operations_struct drm_vm_sg_ops
= {
392 .fault
= drm_vm_sg_fault
,
394 .close
= drm_vm_close
,
397 static void drm_vm_open_locked(struct drm_device
*dev
,
398 struct vm_area_struct
*vma
)
400 struct drm_vma_entry
*vma_entry
;
402 DRM_DEBUG("0x%08lx,0x%08lx\n",
403 vma
->vm_start
, vma
->vm_end
- vma
->vm_start
);
405 vma_entry
= kmalloc(sizeof(*vma_entry
), GFP_KERNEL
);
407 vma_entry
->vma
= vma
;
408 vma_entry
->pid
= current
->pid
;
409 list_add(&vma_entry
->head
, &dev
->vmalist
);
413 static void drm_vm_open(struct vm_area_struct
*vma
)
415 struct drm_file
*priv
= vma
->vm_file
->private_data
;
416 struct drm_device
*dev
= priv
->minor
->dev
;
418 mutex_lock(&dev
->struct_mutex
);
419 drm_vm_open_locked(dev
, vma
);
420 mutex_unlock(&dev
->struct_mutex
);
423 static void drm_vm_close_locked(struct drm_device
*dev
,
424 struct vm_area_struct
*vma
)
426 struct drm_vma_entry
*pt
, *temp
;
428 DRM_DEBUG("0x%08lx,0x%08lx\n",
429 vma
->vm_start
, vma
->vm_end
- vma
->vm_start
);
431 list_for_each_entry_safe(pt
, temp
, &dev
->vmalist
, head
) {
432 if (pt
->vma
== vma
) {
441 * \c close method for all virtual memory types.
443 * \param vma virtual memory area.
445 * Search the \p vma private data entry in drm_device::vmalist, unlink it, and
448 static void drm_vm_close(struct vm_area_struct
*vma
)
450 struct drm_file
*priv
= vma
->vm_file
->private_data
;
451 struct drm_device
*dev
= priv
->minor
->dev
;
453 mutex_lock(&dev
->struct_mutex
);
454 drm_vm_close_locked(dev
, vma
);
455 mutex_unlock(&dev
->struct_mutex
);
461 * \param file_priv DRM file private.
462 * \param vma virtual memory area.
463 * \return zero on success or a negative number on failure.
465 * Sets the virtual memory area operations structure to vm_dma_ops, the file
466 * pointer, and calls vm_open().
468 static int drm_mmap_dma(struct file
*filp
, struct vm_area_struct
*vma
)
470 struct drm_file
*priv
= filp
->private_data
;
471 struct drm_device
*dev
;
472 struct drm_device_dma
*dma
;
473 unsigned long length
= vma
->vm_end
- vma
->vm_start
;
475 dev
= priv
->minor
->dev
;
477 DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n",
478 vma
->vm_start
, vma
->vm_end
, vma
->vm_pgoff
);
480 /* Length must match exact page count */
481 if (!dma
|| (length
>> PAGE_SHIFT
) != dma
->page_count
) {
485 if (!capable(CAP_SYS_ADMIN
) &&
486 (dma
->flags
& _DRM_DMA_USE_PCI_RO
)) {
487 vma
->vm_flags
&= ~(VM_WRITE
| VM_MAYWRITE
);
488 #if defined(__i386__) || defined(__x86_64__)
489 pgprot_val(vma
->vm_page_prot
) &= ~_PAGE_RW
;
491 /* Ye gads this is ugly. With more thought
492 we could move this up higher and use
493 `protection_map' instead. */
497 (__pte(pgprot_val(vma
->vm_page_prot
)))));
501 vma
->vm_ops
= &drm_vm_dma_ops
;
503 vma
->vm_flags
|= VM_DONTEXPAND
| VM_DONTDUMP
;
505 drm_vm_open_locked(dev
, vma
);
509 static resource_size_t
drm_core_get_reg_ofs(struct drm_device
*dev
)
512 return dev
->hose
->dense_mem_base
;
521 * \param file_priv DRM file private.
522 * \param vma virtual memory area.
523 * \return zero on success or a negative number on failure.
525 * If the virtual memory area has no offset associated with it then it's a DMA
526 * area, so calls mmap_dma(). Otherwise searches the map in drm_device::maplist,
527 * checks that the restricted flag is not set, sets the virtual memory operations
528 * according to the mapping type and remaps the pages. Finally sets the file
529 * pointer and calls vm_open().
531 static int drm_mmap_locked(struct file
*filp
, struct vm_area_struct
*vma
)
533 struct drm_file
*priv
= filp
->private_data
;
534 struct drm_device
*dev
= priv
->minor
->dev
;
535 struct drm_local_map
*map
= NULL
;
536 resource_size_t offset
= 0;
537 struct drm_hash_item
*hash
;
539 DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n",
540 vma
->vm_start
, vma
->vm_end
, vma
->vm_pgoff
);
542 if (!priv
->authenticated
)
545 /* We check for "dma". On Apple's UniNorth, it's valid to have
546 * the AGP mapped at physical address 0
550 #if IS_ENABLED(CONFIG_AGP)
552 || dev
->agp
->agp_info
.device
->vendor
!= PCI_VENDOR_ID_APPLE
)
555 return drm_mmap_dma(filp
, vma
);
557 if (drm_ht_find_item(&dev
->map_hash
, vma
->vm_pgoff
, &hash
)) {
558 DRM_ERROR("Could not find map\n");
562 map
= drm_hash_entry(hash
, struct drm_map_list
, hash
)->map
;
563 if (!map
|| ((map
->flags
& _DRM_RESTRICTED
) && !capable(CAP_SYS_ADMIN
)))
566 /* Check for valid size. */
567 if (map
->size
< vma
->vm_end
- vma
->vm_start
)
570 if (!capable(CAP_SYS_ADMIN
) && (map
->flags
& _DRM_READ_ONLY
)) {
571 vma
->vm_flags
&= ~(VM_WRITE
| VM_MAYWRITE
);
572 #if defined(__i386__) || defined(__x86_64__)
573 pgprot_val(vma
->vm_page_prot
) &= ~_PAGE_RW
;
575 /* Ye gads this is ugly. With more thought
576 we could move this up higher and use
577 `protection_map' instead. */
581 (__pte(pgprot_val(vma
->vm_page_prot
)))));
586 #if !defined(__arm__)
588 if (dev
->agp
&& dev
->agp
->cant_use_aperture
) {
590 * On some platforms we can't talk to bus dma address from the CPU, so for
591 * memory of type DRM_AGP, we'll deal with sorting out the real physical
592 * pages and mappings in fault()
594 #if defined(__powerpc__)
595 vma
->vm_page_prot
= pgprot_noncached(vma
->vm_page_prot
);
597 vma
->vm_ops
= &drm_vm_ops
;
601 /* fall through - to _DRM_FRAME_BUFFER... */
602 case _DRM_FRAME_BUFFER
:
604 offset
= drm_core_get_reg_ofs(dev
);
605 vma
->vm_page_prot
= drm_io_prot(map
, vma
);
606 if (io_remap_pfn_range(vma
, vma
->vm_start
,
607 (map
->offset
+ offset
) >> PAGE_SHIFT
,
608 vma
->vm_end
- vma
->vm_start
,
611 DRM_DEBUG(" Type = %d; start = 0x%lx, end = 0x%lx,"
612 " offset = 0x%llx\n",
614 vma
->vm_start
, vma
->vm_end
, (unsigned long long)(map
->offset
+ offset
));
616 vma
->vm_ops
= &drm_vm_ops
;
618 case _DRM_CONSISTENT
:
619 /* Consistent memory is really like shared memory. But
620 * it's allocated in a different way, so avoid fault */
621 if (remap_pfn_range(vma
, vma
->vm_start
,
622 page_to_pfn(virt_to_page(map
->handle
)),
623 vma
->vm_end
- vma
->vm_start
, vma
->vm_page_prot
))
625 vma
->vm_page_prot
= drm_dma_prot(map
->type
, vma
);
626 /* fall through - to _DRM_SHM */
628 vma
->vm_ops
= &drm_vm_shm_ops
;
629 vma
->vm_private_data
= (void *)map
;
631 case _DRM_SCATTER_GATHER
:
632 vma
->vm_ops
= &drm_vm_sg_ops
;
633 vma
->vm_private_data
= (void *)map
;
634 vma
->vm_page_prot
= drm_dma_prot(map
->type
, vma
);
637 return -EINVAL
; /* This should never happen. */
639 vma
->vm_flags
|= VM_DONTEXPAND
| VM_DONTDUMP
;
641 drm_vm_open_locked(dev
, vma
);
645 int drm_legacy_mmap(struct file
*filp
, struct vm_area_struct
*vma
)
647 struct drm_file
*priv
= filp
->private_data
;
648 struct drm_device
*dev
= priv
->minor
->dev
;
651 if (drm_dev_is_unplugged(dev
))
654 mutex_lock(&dev
->struct_mutex
);
655 ret
= drm_mmap_locked(filp
, vma
);
656 mutex_unlock(&dev
->struct_mutex
);
660 EXPORT_SYMBOL(drm_legacy_mmap
);
662 #if IS_ENABLED(CONFIG_DRM_LEGACY)
663 void drm_legacy_vma_flush(struct drm_device
*dev
)
665 struct drm_vma_entry
*vma
, *vma_temp
;
667 /* Clear vma list (only needed for legacy drivers) */
668 list_for_each_entry_safe(vma
, vma_temp
, &dev
->vmalist
, head
) {
669 list_del(&vma
->head
);