3 * Memory mapping for DRM
5 * \author Rickard E. (Rik) Faith <faith@valinux.com>
6 * \author Gareth Hughes <gareth@valinux.com>
10 * Created: Mon Jan 4 08:58:31 1999 by faith@valinux.com
12 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
13 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
14 * All Rights Reserved.
16 * Permission is hereby granted, free of charge, to any person obtaining a
17 * copy of this software and associated documentation files (the "Software"),
18 * to deal in the Software without restriction, including without limitation
19 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
20 * and/or sell copies of the Software, and to permit persons to whom the
21 * Software is furnished to do so, subject to the following conditions:
23 * The above copyright notice and this permission notice (including the next
24 * paragraph) shall be included in all copies or substantial portions of the
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
28 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
29 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
30 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
31 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
32 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
33 * OTHER DEALINGS IN THE SOFTWARE.
37 #include <linux/export.h>
38 #include <linux/seq_file.h>
40 #include <linux/efi.h>
41 #include <linux/slab.h>
43 #include <linux/mem_encrypt.h>
44 #include <asm/pgtable.h>
45 #include "drm_internal.h"
46 #include "drm_legacy.h"
48 struct drm_vma_entry
{
49 struct list_head head
;
50 struct vm_area_struct
*vma
;
54 static void drm_vm_open(struct vm_area_struct
*vma
);
55 static void drm_vm_close(struct vm_area_struct
*vma
);
57 static pgprot_t
drm_io_prot(struct drm_local_map
*map
,
58 struct vm_area_struct
*vma
)
60 pgprot_t tmp
= vm_get_page_prot(vma
->vm_flags
);
62 /* We don't want graphics memory to be mapped encrypted */
63 tmp
= pgprot_decrypted(tmp
);
65 #if defined(__i386__) || defined(__x86_64__) || defined(__powerpc__)
66 if (map
->type
== _DRM_REGISTERS
&& !(map
->flags
& _DRM_WRITE_COMBINING
))
67 tmp
= pgprot_noncached(tmp
);
69 tmp
= pgprot_writecombine(tmp
);
70 #elif defined(__ia64__)
71 if (efi_range_is_wc(vma
->vm_start
, vma
->vm_end
-
73 tmp
= pgprot_writecombine(tmp
);
75 tmp
= pgprot_noncached(tmp
);
76 #elif defined(__sparc__) || defined(__arm__) || defined(__mips__)
77 tmp
= pgprot_noncached(tmp
);
82 static pgprot_t
drm_dma_prot(uint32_t map_type
, struct vm_area_struct
*vma
)
84 pgprot_t tmp
= vm_get_page_prot(vma
->vm_flags
);
86 #if defined(__powerpc__) && defined(CONFIG_NOT_COHERENT_CACHE)
87 tmp
= pgprot_noncached_wc(tmp
);
93 * \c fault method for AGP virtual memory.
95 * \param vma virtual memory area.
96 * \param address access address.
97 * \return pointer to the page structure.
99 * Find the right map and if it's AGP memory find the real physical page to
100 * map, get the page, increment the use count and return it.
102 #if IS_ENABLED(CONFIG_AGP)
103 static int drm_vm_fault(struct vm_fault
*vmf
)
105 struct vm_area_struct
*vma
= vmf
->vma
;
106 struct drm_file
*priv
= vma
->vm_file
->private_data
;
107 struct drm_device
*dev
= priv
->minor
->dev
;
108 struct drm_local_map
*map
= NULL
;
109 struct drm_map_list
*r_list
;
110 struct drm_hash_item
*hash
;
118 if (!dev
->agp
|| !dev
->agp
->cant_use_aperture
)
121 if (drm_ht_find_item(&dev
->map_hash
, vma
->vm_pgoff
, &hash
))
124 r_list
= drm_hash_entry(hash
, struct drm_map_list
, hash
);
127 if (map
&& map
->type
== _DRM_AGP
) {
129 * Using vm_pgoff as a selector forces us to use this unusual
132 resource_size_t offset
= vmf
->address
- vma
->vm_start
;
133 resource_size_t baddr
= map
->offset
+ offset
;
134 struct drm_agp_mem
*agpmem
;
139 * Adjust to a bus-relative address
141 baddr
-= dev
->hose
->mem_space
->start
;
145 * It's AGP memory - find the real physical page to map
147 list_for_each_entry(agpmem
, &dev
->agp
->memory
, head
) {
148 if (agpmem
->bound
<= baddr
&&
149 agpmem
->bound
+ agpmem
->pages
* PAGE_SIZE
> baddr
)
153 if (&agpmem
->head
== &dev
->agp
->memory
)
157 * Get the page, inc the use count, and return it
159 offset
= (baddr
- agpmem
->bound
) >> PAGE_SHIFT
;
160 page
= agpmem
->memory
->pages
[offset
];
165 ("baddr = 0x%llx page = 0x%p, offset = 0x%llx, count=%d\n",
166 (unsigned long long)baddr
,
167 agpmem
->memory
->pages
[offset
],
168 (unsigned long long)offset
,
173 return VM_FAULT_SIGBUS
; /* Disallow mremap */
176 static int drm_vm_fault(struct vm_fault
*vmf
)
178 return VM_FAULT_SIGBUS
;
183 * \c nopage method for shared virtual memory.
185 * \param vma virtual memory area.
186 * \param address access address.
187 * \return pointer to the page structure.
189 * Get the mapping, find the real physical page to map, get the page, and
192 static int drm_vm_shm_fault(struct vm_fault
*vmf
)
194 struct vm_area_struct
*vma
= vmf
->vma
;
195 struct drm_local_map
*map
= vma
->vm_private_data
;
196 unsigned long offset
;
201 return VM_FAULT_SIGBUS
; /* Nothing allocated */
203 offset
= vmf
->address
- vma
->vm_start
;
204 i
= (unsigned long)map
->handle
+ offset
;
205 page
= vmalloc_to_page((void *)i
);
207 return VM_FAULT_SIGBUS
;
211 DRM_DEBUG("shm_fault 0x%lx\n", offset
);
216 * \c close method for shared virtual memory.
218 * \param vma virtual memory area.
220 * Deletes map information if we are the last
221 * person to close a mapping and it's not in the global maplist.
223 static void drm_vm_shm_close(struct vm_area_struct
*vma
)
225 struct drm_file
*priv
= vma
->vm_file
->private_data
;
226 struct drm_device
*dev
= priv
->minor
->dev
;
227 struct drm_vma_entry
*pt
, *temp
;
228 struct drm_local_map
*map
;
229 struct drm_map_list
*r_list
;
232 DRM_DEBUG("0x%08lx,0x%08lx\n",
233 vma
->vm_start
, vma
->vm_end
- vma
->vm_start
);
235 map
= vma
->vm_private_data
;
237 mutex_lock(&dev
->struct_mutex
);
238 list_for_each_entry_safe(pt
, temp
, &dev
->vmalist
, head
) {
239 if (pt
->vma
->vm_private_data
== map
)
241 if (pt
->vma
== vma
) {
247 /* We were the only map that was found */
248 if (found_maps
== 1 && map
->flags
& _DRM_REMOVABLE
) {
249 /* Check to see if we are in the maplist, if we are not, then
250 * we delete this mappings information.
253 list_for_each_entry(r_list
, &dev
->maplist
, head
) {
254 if (r_list
->map
== map
)
259 drm_dma_handle_t dmah
;
263 case _DRM_FRAME_BUFFER
:
264 arch_phys_wc_del(map
->mtrr
);
265 iounmap(map
->handle
);
271 case _DRM_SCATTER_GATHER
:
273 case _DRM_CONSISTENT
:
274 dmah
.vaddr
= map
->handle
;
275 dmah
.busaddr
= map
->offset
;
276 dmah
.size
= map
->size
;
277 __drm_legacy_pci_free(dev
, &dmah
);
283 mutex_unlock(&dev
->struct_mutex
);
287 * \c fault method for DMA virtual memory.
289 * \param address access address.
290 * \return pointer to the page structure.
292 * Determine the page number from the page offset and get it from drm_device_dma::pagelist.
294 static int drm_vm_dma_fault(struct vm_fault
*vmf
)
296 struct vm_area_struct
*vma
= vmf
->vma
;
297 struct drm_file
*priv
= vma
->vm_file
->private_data
;
298 struct drm_device
*dev
= priv
->minor
->dev
;
299 struct drm_device_dma
*dma
= dev
->dma
;
300 unsigned long offset
;
301 unsigned long page_nr
;
305 return VM_FAULT_SIGBUS
; /* Error */
307 return VM_FAULT_SIGBUS
; /* Nothing allocated */
309 offset
= vmf
->address
- vma
->vm_start
;
310 /* vm_[pg]off[set] should be 0 */
311 page_nr
= offset
>> PAGE_SHIFT
; /* page_nr could just be vmf->pgoff */
312 page
= virt_to_page((void *)dma
->pagelist
[page_nr
]);
317 DRM_DEBUG("dma_fault 0x%lx (page %lu)\n", offset
, page_nr
);
322 * \c fault method for scatter-gather virtual memory.
324 * \param address access address.
325 * \return pointer to the page structure.
327 * Determine the map offset from the page offset and get it from drm_sg_mem::pagelist.
329 static int drm_vm_sg_fault(struct vm_fault
*vmf
)
331 struct vm_area_struct
*vma
= vmf
->vma
;
332 struct drm_local_map
*map
= vma
->vm_private_data
;
333 struct drm_file
*priv
= vma
->vm_file
->private_data
;
334 struct drm_device
*dev
= priv
->minor
->dev
;
335 struct drm_sg_mem
*entry
= dev
->sg
;
336 unsigned long offset
;
337 unsigned long map_offset
;
338 unsigned long page_offset
;
342 return VM_FAULT_SIGBUS
; /* Error */
343 if (!entry
->pagelist
)
344 return VM_FAULT_SIGBUS
; /* Nothing allocated */
346 offset
= vmf
->address
- vma
->vm_start
;
347 map_offset
= map
->offset
- (unsigned long)dev
->sg
->virtual;
348 page_offset
= (offset
>> PAGE_SHIFT
) + (map_offset
>> PAGE_SHIFT
);
349 page
= entry
->pagelist
[page_offset
];
356 /** AGP virtual memory operations */
357 static const struct vm_operations_struct drm_vm_ops
= {
358 .fault
= drm_vm_fault
,
360 .close
= drm_vm_close
,
363 /** Shared virtual memory operations */
364 static const struct vm_operations_struct drm_vm_shm_ops
= {
365 .fault
= drm_vm_shm_fault
,
367 .close
= drm_vm_shm_close
,
370 /** DMA virtual memory operations */
371 static const struct vm_operations_struct drm_vm_dma_ops
= {
372 .fault
= drm_vm_dma_fault
,
374 .close
= drm_vm_close
,
377 /** Scatter-gather virtual memory operations */
378 static const struct vm_operations_struct drm_vm_sg_ops
= {
379 .fault
= drm_vm_sg_fault
,
381 .close
= drm_vm_close
,
384 static void drm_vm_open_locked(struct drm_device
*dev
,
385 struct vm_area_struct
*vma
)
387 struct drm_vma_entry
*vma_entry
;
389 DRM_DEBUG("0x%08lx,0x%08lx\n",
390 vma
->vm_start
, vma
->vm_end
- vma
->vm_start
);
392 vma_entry
= kmalloc(sizeof(*vma_entry
), GFP_KERNEL
);
394 vma_entry
->vma
= vma
;
395 vma_entry
->pid
= current
->pid
;
396 list_add(&vma_entry
->head
, &dev
->vmalist
);
400 static void drm_vm_open(struct vm_area_struct
*vma
)
402 struct drm_file
*priv
= vma
->vm_file
->private_data
;
403 struct drm_device
*dev
= priv
->minor
->dev
;
405 mutex_lock(&dev
->struct_mutex
);
406 drm_vm_open_locked(dev
, vma
);
407 mutex_unlock(&dev
->struct_mutex
);
410 static void drm_vm_close_locked(struct drm_device
*dev
,
411 struct vm_area_struct
*vma
)
413 struct drm_vma_entry
*pt
, *temp
;
415 DRM_DEBUG("0x%08lx,0x%08lx\n",
416 vma
->vm_start
, vma
->vm_end
- vma
->vm_start
);
418 list_for_each_entry_safe(pt
, temp
, &dev
->vmalist
, head
) {
419 if (pt
->vma
== vma
) {
428 * \c close method for all virtual memory types.
430 * \param vma virtual memory area.
432 * Search the \p vma private data entry in drm_device::vmalist, unlink it, and
435 static void drm_vm_close(struct vm_area_struct
*vma
)
437 struct drm_file
*priv
= vma
->vm_file
->private_data
;
438 struct drm_device
*dev
= priv
->minor
->dev
;
440 mutex_lock(&dev
->struct_mutex
);
441 drm_vm_close_locked(dev
, vma
);
442 mutex_unlock(&dev
->struct_mutex
);
448 * \param file_priv DRM file private.
449 * \param vma virtual memory area.
450 * \return zero on success or a negative number on failure.
452 * Sets the virtual memory area operations structure to vm_dma_ops, the file
453 * pointer, and calls vm_open().
455 static int drm_mmap_dma(struct file
*filp
, struct vm_area_struct
*vma
)
457 struct drm_file
*priv
= filp
->private_data
;
458 struct drm_device
*dev
;
459 struct drm_device_dma
*dma
;
460 unsigned long length
= vma
->vm_end
- vma
->vm_start
;
462 dev
= priv
->minor
->dev
;
464 DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n",
465 vma
->vm_start
, vma
->vm_end
, vma
->vm_pgoff
);
467 /* Length must match exact page count */
468 if (!dma
|| (length
>> PAGE_SHIFT
) != dma
->page_count
) {
472 if (!capable(CAP_SYS_ADMIN
) &&
473 (dma
->flags
& _DRM_DMA_USE_PCI_RO
)) {
474 vma
->vm_flags
&= ~(VM_WRITE
| VM_MAYWRITE
);
475 #if defined(__i386__) || defined(__x86_64__)
476 pgprot_val(vma
->vm_page_prot
) &= ~_PAGE_RW
;
478 /* Ye gads this is ugly. With more thought
479 we could move this up higher and use
480 `protection_map' instead. */
484 (__pte(pgprot_val(vma
->vm_page_prot
)))));
488 vma
->vm_ops
= &drm_vm_dma_ops
;
490 vma
->vm_flags
|= VM_DONTEXPAND
| VM_DONTDUMP
;
492 drm_vm_open_locked(dev
, vma
);
496 static resource_size_t
drm_core_get_reg_ofs(struct drm_device
*dev
)
499 return dev
->hose
->dense_mem_base
;
508 * \param file_priv DRM file private.
509 * \param vma virtual memory area.
510 * \return zero on success or a negative number on failure.
512 * If the virtual memory area has no offset associated with it then it's a DMA
513 * area, so calls mmap_dma(). Otherwise searches the map in drm_device::maplist,
514 * checks that the restricted flag is not set, sets the virtual memory operations
515 * according to the mapping type and remaps the pages. Finally sets the file
516 * pointer and calls vm_open().
518 static int drm_mmap_locked(struct file
*filp
, struct vm_area_struct
*vma
)
520 struct drm_file
*priv
= filp
->private_data
;
521 struct drm_device
*dev
= priv
->minor
->dev
;
522 struct drm_local_map
*map
= NULL
;
523 resource_size_t offset
= 0;
524 struct drm_hash_item
*hash
;
526 DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n",
527 vma
->vm_start
, vma
->vm_end
, vma
->vm_pgoff
);
529 if (!priv
->authenticated
)
532 /* We check for "dma". On Apple's UniNorth, it's valid to have
533 * the AGP mapped at physical address 0
537 #if IS_ENABLED(CONFIG_AGP)
539 || dev
->agp
->agp_info
.device
->vendor
!= PCI_VENDOR_ID_APPLE
)
542 return drm_mmap_dma(filp
, vma
);
544 if (drm_ht_find_item(&dev
->map_hash
, vma
->vm_pgoff
, &hash
)) {
545 DRM_ERROR("Could not find map\n");
549 map
= drm_hash_entry(hash
, struct drm_map_list
, hash
)->map
;
550 if (!map
|| ((map
->flags
& _DRM_RESTRICTED
) && !capable(CAP_SYS_ADMIN
)))
553 /* Check for valid size. */
554 if (map
->size
< vma
->vm_end
- vma
->vm_start
)
557 if (!capable(CAP_SYS_ADMIN
) && (map
->flags
& _DRM_READ_ONLY
)) {
558 vma
->vm_flags
&= ~(VM_WRITE
| VM_MAYWRITE
);
559 #if defined(__i386__) || defined(__x86_64__)
560 pgprot_val(vma
->vm_page_prot
) &= ~_PAGE_RW
;
562 /* Ye gads this is ugly. With more thought
563 we could move this up higher and use
564 `protection_map' instead. */
568 (__pte(pgprot_val(vma
->vm_page_prot
)))));
573 #if !defined(__arm__)
575 if (dev
->agp
&& dev
->agp
->cant_use_aperture
) {
577 * On some platforms we can't talk to bus dma address from the CPU, so for
578 * memory of type DRM_AGP, we'll deal with sorting out the real physical
579 * pages and mappings in fault()
581 #if defined(__powerpc__)
582 vma
->vm_page_prot
= pgprot_noncached(vma
->vm_page_prot
);
584 vma
->vm_ops
= &drm_vm_ops
;
587 /* fall through to _DRM_FRAME_BUFFER... */
589 case _DRM_FRAME_BUFFER
:
591 offset
= drm_core_get_reg_ofs(dev
);
592 vma
->vm_page_prot
= drm_io_prot(map
, vma
);
593 if (io_remap_pfn_range(vma
, vma
->vm_start
,
594 (map
->offset
+ offset
) >> PAGE_SHIFT
,
595 vma
->vm_end
- vma
->vm_start
,
598 DRM_DEBUG(" Type = %d; start = 0x%lx, end = 0x%lx,"
599 " offset = 0x%llx\n",
601 vma
->vm_start
, vma
->vm_end
, (unsigned long long)(map
->offset
+ offset
));
603 vma
->vm_ops
= &drm_vm_ops
;
605 case _DRM_CONSISTENT
:
606 /* Consistent memory is really like shared memory. But
607 * it's allocated in a different way, so avoid fault */
608 if (remap_pfn_range(vma
, vma
->vm_start
,
609 page_to_pfn(virt_to_page(map
->handle
)),
610 vma
->vm_end
- vma
->vm_start
, vma
->vm_page_prot
))
612 vma
->vm_page_prot
= drm_dma_prot(map
->type
, vma
);
613 /* fall through to _DRM_SHM */
615 vma
->vm_ops
= &drm_vm_shm_ops
;
616 vma
->vm_private_data
= (void *)map
;
618 case _DRM_SCATTER_GATHER
:
619 vma
->vm_ops
= &drm_vm_sg_ops
;
620 vma
->vm_private_data
= (void *)map
;
621 vma
->vm_page_prot
= drm_dma_prot(map
->type
, vma
);
624 return -EINVAL
; /* This should never happen. */
626 vma
->vm_flags
|= VM_DONTEXPAND
| VM_DONTDUMP
;
628 drm_vm_open_locked(dev
, vma
);
632 int drm_legacy_mmap(struct file
*filp
, struct vm_area_struct
*vma
)
634 struct drm_file
*priv
= filp
->private_data
;
635 struct drm_device
*dev
= priv
->minor
->dev
;
638 if (drm_dev_is_unplugged(dev
))
641 mutex_lock(&dev
->struct_mutex
);
642 ret
= drm_mmap_locked(filp
, vma
);
643 mutex_unlock(&dev
->struct_mutex
);
647 EXPORT_SYMBOL(drm_legacy_mmap
);
649 void drm_legacy_vma_flush(struct drm_device
*dev
)
651 struct drm_vma_entry
*vma
, *vma_temp
;
653 /* Clear vma list (only needed for legacy drivers) */
654 list_for_each_entry_safe(vma
, vma_temp
, &dev
->vmalist
, head
) {
655 list_del(&vma
->head
);