3 * Memory mapping for DRM
5 * \author Rickard E. (Rik) Faith <faith@valinux.com>
6 * \author Gareth Hughes <gareth@valinux.com>
10 * Created: Mon Jan 4 08:58:31 1999 by faith@valinux.com
12 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
13 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
14 * All Rights Reserved.
16 * Permission is hereby granted, free of charge, to any person obtaining a
17 * copy of this software and associated documentation files (the "Software"),
18 * to deal in the Software without restriction, including without limitation
19 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
20 * and/or sell copies of the Software, and to permit persons to whom the
21 * Software is furnished to do so, subject to the following conditions:
23 * The above copyright notice and this permission notice (including the next
24 * paragraph) shall be included in all copies or substantial portions of the
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
28 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
29 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
30 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
31 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
32 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
33 * OTHER DEALINGS IN THE SOFTWARE.
38 #include <linux/efi.h>
43 * \c nopage method for AGP virtual memory.
45 * \param vma virtual memory area.
46 * \param address access address.
47 * \return pointer to the page structure.
49 * Find the right map and if it's AGP memory find the real physical page to
50 * map, get the page, increment the use count and return it.
53 static __inline__
struct page
*drm_do_vm_nopage(struct vm_area_struct
*vma
,
54 unsigned long address
)
56 drm_file_t
*priv
= vma
->vm_file
->private_data
;
57 drm_device_t
*dev
= priv
->head
->dev
;
58 drm_map_t
*map
= NULL
;
59 drm_map_list_t
*r_list
;
60 struct list_head
*list
;
65 if (!drm_core_has_AGP(dev
))
68 if(!dev
->agp
|| !dev
->agp
->cant_use_aperture
) goto vm_nopage_error
;
70 list_for_each(list
, &dev
->maplist
->head
) {
71 r_list
= list_entry(list
, drm_map_list_t
, head
);
74 if (map
->offset
== VM_OFFSET(vma
)) break;
77 if (map
&& map
->type
== _DRM_AGP
) {
78 unsigned long offset
= address
- vma
->vm_start
;
79 unsigned long baddr
= VM_OFFSET(vma
) + offset
;
80 struct drm_agp_mem
*agpmem
;
85 * Adjust to a bus-relative address
87 baddr
-= dev
->hose
->mem_space
->start
;
91 * It's AGP memory - find the real physical page to map
93 for(agpmem
= dev
->agp
->memory
; agpmem
; agpmem
= agpmem
->next
) {
94 if (agpmem
->bound
<= baddr
&&
95 agpmem
->bound
+ agpmem
->pages
* PAGE_SIZE
> baddr
)
99 if (!agpmem
) goto vm_nopage_error
;
102 * Get the page, inc the use count, and return it
104 offset
= (baddr
- agpmem
->bound
) >> PAGE_SHIFT
;
105 page
= virt_to_page(__va(agpmem
->memory
->memory
[offset
]));
108 DRM_DEBUG("baddr = 0x%lx page = 0x%p, offset = 0x%lx, count=%d\n",
109 baddr
, __va(agpmem
->memory
->memory
[offset
]), offset
,
115 return NOPAGE_SIGBUS
; /* Disallow mremap */
117 #else /* __OS_HAS_AGP */
118 static __inline__
struct page
*drm_do_vm_nopage(struct vm_area_struct
*vma
,
119 unsigned long address
)
121 return NOPAGE_SIGBUS
;
123 #endif /* __OS_HAS_AGP */
126 * \c nopage method for shared virtual memory.
128 * \param vma virtual memory area.
129 * \param address access address.
130 * \return pointer to the page structure.
132 * Get the the mapping, find the real physical page to map, get the page, and
135 static __inline__
struct page
*drm_do_vm_shm_nopage(struct vm_area_struct
*vma
,
136 unsigned long address
)
138 drm_map_t
*map
= (drm_map_t
*)vma
->vm_private_data
;
139 unsigned long offset
;
143 if (address
> vma
->vm_end
) return NOPAGE_SIGBUS
; /* Disallow mremap */
144 if (!map
) return NOPAGE_OOM
; /* Nothing allocated */
146 offset
= address
- vma
->vm_start
;
147 i
= (unsigned long)map
->handle
+ offset
;
148 page
= vmalloc_to_page((void *)i
);
153 DRM_DEBUG("shm_nopage 0x%lx\n", address
);
159 * \c close method for shared virtual memory.
161 * \param vma virtual memory area.
163 * Deletes map information if we are the last
164 * person to close a mapping and it's not in the global maplist.
166 void drm_vm_shm_close(struct vm_area_struct
*vma
)
168 drm_file_t
*priv
= vma
->vm_file
->private_data
;
169 drm_device_t
*dev
= priv
->head
->dev
;
170 drm_vma_entry_t
*pt
, *prev
, *next
;
172 drm_map_list_t
*r_list
;
173 struct list_head
*list
;
176 DRM_DEBUG("0x%08lx,0x%08lx\n",
177 vma
->vm_start
, vma
->vm_end
- vma
->vm_start
);
178 atomic_dec(&dev
->vma_count
);
180 map
= vma
->vm_private_data
;
182 down(&dev
->struct_sem
);
183 for (pt
= dev
->vmalist
, prev
= NULL
; pt
; pt
= next
) {
185 if (pt
->vma
->vm_private_data
== map
) found_maps
++;
186 if (pt
->vma
== vma
) {
188 prev
->next
= pt
->next
;
190 dev
->vmalist
= pt
->next
;
192 drm_free(pt
, sizeof(*pt
), DRM_MEM_VMAS
);
197 /* We were the only map that was found */
198 if(found_maps
== 1 &&
199 map
->flags
& _DRM_REMOVABLE
) {
200 /* Check to see if we are in the maplist, if we are not, then
201 * we delete this mappings information.
204 list
= &dev
->maplist
->head
;
205 list_for_each(list
, &dev
->maplist
->head
) {
206 r_list
= list_entry(list
, drm_map_list_t
, head
);
207 if (r_list
->map
== map
) found_maps
++;
213 case _DRM_FRAME_BUFFER
:
214 if (drm_core_has_MTRR(dev
) && map
->mtrr
>= 0) {
216 retcode
= mtrr_del(map
->mtrr
,
219 DRM_DEBUG("mtrr_del = %d\n", retcode
);
221 drm_ioremapfree(map
->handle
, map
->size
, dev
);
227 case _DRM_SCATTER_GATHER
:
230 drm_free(map
, sizeof(*map
), DRM_MEM_MAPS
);
233 up(&dev
->struct_sem
);
237 * \c nopage method for DMA virtual memory.
239 * \param vma virtual memory area.
240 * \param address access address.
241 * \return pointer to the page structure.
243 * Determine the page number from the page offset and get it from drm_device_dma::pagelist.
245 static __inline__
struct page
*drm_do_vm_dma_nopage(struct vm_area_struct
*vma
,
246 unsigned long address
)
248 drm_file_t
*priv
= vma
->vm_file
->private_data
;
249 drm_device_t
*dev
= priv
->head
->dev
;
250 drm_device_dma_t
*dma
= dev
->dma
;
251 unsigned long offset
;
252 unsigned long page_nr
;
255 if (!dma
) return NOPAGE_SIGBUS
; /* Error */
256 if (address
> vma
->vm_end
) return NOPAGE_SIGBUS
; /* Disallow mremap */
257 if (!dma
->pagelist
) return NOPAGE_OOM
; /* Nothing allocated */
259 offset
= address
- vma
->vm_start
; /* vm_[pg]off[set] should be 0 */
260 page_nr
= offset
>> PAGE_SHIFT
;
261 page
= virt_to_page((dma
->pagelist
[page_nr
] +
262 (offset
& (~PAGE_MASK
))));
266 DRM_DEBUG("dma_nopage 0x%lx (page %lu)\n", address
, page_nr
);
271 * \c nopage method for scatter-gather virtual memory.
273 * \param vma virtual memory area.
274 * \param address access address.
275 * \return pointer to the page structure.
277 * Determine the map offset from the page offset and get it from drm_sg_mem::pagelist.
279 static __inline__
struct page
*drm_do_vm_sg_nopage(struct vm_area_struct
*vma
,
280 unsigned long address
)
282 drm_map_t
*map
= (drm_map_t
*)vma
->vm_private_data
;
283 drm_file_t
*priv
= vma
->vm_file
->private_data
;
284 drm_device_t
*dev
= priv
->head
->dev
;
285 drm_sg_mem_t
*entry
= dev
->sg
;
286 unsigned long offset
;
287 unsigned long map_offset
;
288 unsigned long page_offset
;
291 if (!entry
) return NOPAGE_SIGBUS
; /* Error */
292 if (address
> vma
->vm_end
) return NOPAGE_SIGBUS
; /* Disallow mremap */
293 if (!entry
->pagelist
) return NOPAGE_OOM
; /* Nothing allocated */
296 offset
= address
- vma
->vm_start
;
297 map_offset
= map
->offset
- dev
->sg
->handle
;
298 page_offset
= (offset
>> PAGE_SHIFT
) + (map_offset
>> PAGE_SHIFT
);
299 page
= entry
->pagelist
[page_offset
];
306 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0)
308 static struct page
*drm_vm_nopage(struct vm_area_struct
*vma
,
309 unsigned long address
,
311 if (type
) *type
= VM_FAULT_MINOR
;
312 return drm_do_vm_nopage(vma
, address
);
315 static struct page
*drm_vm_shm_nopage(struct vm_area_struct
*vma
,
316 unsigned long address
,
318 if (type
) *type
= VM_FAULT_MINOR
;
319 return drm_do_vm_shm_nopage(vma
, address
);
322 static struct page
*drm_vm_dma_nopage(struct vm_area_struct
*vma
,
323 unsigned long address
,
325 if (type
) *type
= VM_FAULT_MINOR
;
326 return drm_do_vm_dma_nopage(vma
, address
);
329 static struct page
*drm_vm_sg_nopage(struct vm_area_struct
*vma
,
330 unsigned long address
,
332 if (type
) *type
= VM_FAULT_MINOR
;
333 return drm_do_vm_sg_nopage(vma
, address
);
336 #else /* LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,0) */
338 static struct page
*drm_vm_nopage(struct vm_area_struct
*vma
,
339 unsigned long address
,
341 return drm_do_vm_nopage(vma
, address
);
344 static struct page
*drm_vm_shm_nopage(struct vm_area_struct
*vma
,
345 unsigned long address
,
347 return drm_do_vm_shm_nopage(vma
, address
);
350 static struct page
*drm_vm_dma_nopage(struct vm_area_struct
*vma
,
351 unsigned long address
,
353 return drm_do_vm_dma_nopage(vma
, address
);
356 static struct page
*drm_vm_sg_nopage(struct vm_area_struct
*vma
,
357 unsigned long address
,
359 return drm_do_vm_sg_nopage(vma
, address
);
365 /** AGP virtual memory operations */
366 static struct vm_operations_struct drm_vm_ops
= {
367 .nopage
= drm_vm_nopage
,
369 .close
= drm_vm_close
,
372 /** Shared virtual memory operations */
373 static struct vm_operations_struct drm_vm_shm_ops
= {
374 .nopage
= drm_vm_shm_nopage
,
376 .close
= drm_vm_shm_close
,
379 /** DMA virtual memory operations */
380 static struct vm_operations_struct drm_vm_dma_ops
= {
381 .nopage
= drm_vm_dma_nopage
,
383 .close
= drm_vm_close
,
386 /** Scatter-gather virtual memory operations */
387 static struct vm_operations_struct drm_vm_sg_ops
= {
388 .nopage
= drm_vm_sg_nopage
,
390 .close
= drm_vm_close
,
395 * \c open method for shared virtual memory.
397 * \param vma virtual memory area.
399 * Create a new drm_vma_entry structure as the \p vma private data entry and
400 * add it to drm_device::vmalist.
402 void drm_vm_open(struct vm_area_struct
*vma
)
404 drm_file_t
*priv
= vma
->vm_file
->private_data
;
405 drm_device_t
*dev
= priv
->head
->dev
;
406 drm_vma_entry_t
*vma_entry
;
408 DRM_DEBUG("0x%08lx,0x%08lx\n",
409 vma
->vm_start
, vma
->vm_end
- vma
->vm_start
);
410 atomic_inc(&dev
->vma_count
);
412 vma_entry
= drm_alloc(sizeof(*vma_entry
), DRM_MEM_VMAS
);
414 down(&dev
->struct_sem
);
415 vma_entry
->vma
= vma
;
416 vma_entry
->next
= dev
->vmalist
;
417 vma_entry
->pid
= current
->pid
;
418 dev
->vmalist
= vma_entry
;
419 up(&dev
->struct_sem
);
424 * \c close method for all virtual memory types.
426 * \param vma virtual memory area.
428 * Search the \p vma private data entry in drm_device::vmalist, unlink it, and
431 void drm_vm_close(struct vm_area_struct
*vma
)
433 drm_file_t
*priv
= vma
->vm_file
->private_data
;
434 drm_device_t
*dev
= priv
->head
->dev
;
435 drm_vma_entry_t
*pt
, *prev
;
437 DRM_DEBUG("0x%08lx,0x%08lx\n",
438 vma
->vm_start
, vma
->vm_end
- vma
->vm_start
);
439 atomic_dec(&dev
->vma_count
);
441 down(&dev
->struct_sem
);
442 for (pt
= dev
->vmalist
, prev
= NULL
; pt
; prev
= pt
, pt
= pt
->next
) {
443 if (pt
->vma
== vma
) {
445 prev
->next
= pt
->next
;
447 dev
->vmalist
= pt
->next
;
449 drm_free(pt
, sizeof(*pt
), DRM_MEM_VMAS
);
453 up(&dev
->struct_sem
);
459 * \param filp file pointer.
460 * \param vma virtual memory area.
461 * \return zero on success or a negative number on failure.
463 * Sets the virtual memory area operations structure to vm_dma_ops, the file
464 * pointer, and calls vm_open().
466 int drm_mmap_dma(struct file
*filp
, struct vm_area_struct
*vma
)
468 drm_file_t
*priv
= filp
->private_data
;
470 drm_device_dma_t
*dma
;
471 unsigned long length
= vma
->vm_end
- vma
->vm_start
;
474 dev
= priv
->head
->dev
;
476 DRM_DEBUG("start = 0x%lx, end = 0x%lx, offset = 0x%lx\n",
477 vma
->vm_start
, vma
->vm_end
, VM_OFFSET(vma
));
479 /* Length must match exact page count */
480 if (!dma
|| (length
>> PAGE_SHIFT
) != dma
->page_count
) {
486 vma
->vm_ops
= &drm_vm_dma_ops
;
488 #if LINUX_VERSION_CODE <= 0x02040e /* KERNEL_VERSION(2,4,14) */
489 vma
->vm_flags
|= VM_LOCKED
| VM_SHM
; /* Don't swap */
491 vma
->vm_flags
|= VM_RESERVED
; /* Don't swap */
494 vma
->vm_file
= filp
; /* Needed for drm_vm_open() */
499 unsigned long drm_core_get_map_ofs(drm_map_t
*map
)
503 EXPORT_SYMBOL(drm_core_get_map_ofs
);
505 unsigned long drm_core_get_reg_ofs(struct drm_device
*dev
)
508 return dev
->hose
->dense_mem_base
- dev
->hose
->mem_space
->start
;
513 EXPORT_SYMBOL(drm_core_get_reg_ofs
);
518 * \param filp file pointer.
519 * \param vma virtual memory area.
520 * \return zero on success or a negative number on failure.
522 * If the virtual memory area has no offset associated with it then it's a DMA
523 * area, so calls mmap_dma(). Otherwise searches the map in drm_device::maplist,
524 * checks that the restricted flag is not set, sets the virtual memory operations
525 * according to the mapping type and remaps the pages. Finally sets the file
526 * pointer and calls vm_open().
528 int drm_mmap(struct file
*filp
, struct vm_area_struct
*vma
)
530 drm_file_t
*priv
= filp
->private_data
;
531 drm_device_t
*dev
= priv
->head
->dev
;
532 drm_map_t
*map
= NULL
;
533 drm_map_list_t
*r_list
;
534 unsigned long offset
= 0;
535 struct list_head
*list
;
537 DRM_DEBUG("start = 0x%lx, end = 0x%lx, offset = 0x%lx\n",
538 vma
->vm_start
, vma
->vm_end
, VM_OFFSET(vma
));
540 if ( !priv
->authenticated
) return -EACCES
;
542 /* We check for "dma". On Apple's UniNorth, it's valid to have
543 * the AGP mapped at physical address 0
548 && (!dev
->agp
|| dev
->agp
->agp_info
.device
->vendor
!= PCI_VENDOR_ID_APPLE
)
551 return drm_mmap_dma(filp
, vma
);
553 /* A sequential search of a linked list is
554 fine here because: 1) there will only be
555 about 5-10 entries in the list and, 2) a
556 DRI client only has to do this mapping
557 once, so it doesn't have to be optimized
558 for performance, even if the list was a
560 list_for_each(list
, &dev
->maplist
->head
) {
563 r_list
= list_entry(list
, drm_map_list_t
, head
);
566 off
= dev
->driver
->get_map_ofs(map
);
567 if (off
== VM_OFFSET(vma
)) break;
570 if (!map
|| ((map
->flags
&_DRM_RESTRICTED
) && !capable(CAP_SYS_ADMIN
)))
573 /* Check for valid size. */
574 if (map
->size
!= vma
->vm_end
- vma
->vm_start
) return -EINVAL
;
576 if (!capable(CAP_SYS_ADMIN
) && (map
->flags
& _DRM_READ_ONLY
)) {
577 vma
->vm_flags
&= ~(VM_WRITE
| VM_MAYWRITE
);
578 #if defined(__i386__) || defined(__x86_64__)
579 pgprot_val(vma
->vm_page_prot
) &= ~_PAGE_RW
;
581 /* Ye gads this is ugly. With more thought
582 we could move this up higher and use
583 `protection_map' instead. */
584 vma
->vm_page_prot
= __pgprot(pte_val(pte_wrprotect(
585 __pte(pgprot_val(vma
->vm_page_prot
)))));
591 if (drm_core_has_AGP(dev
) && dev
->agp
->cant_use_aperture
) {
593 * On some platforms we can't talk to bus dma address from the CPU, so for
594 * memory of type DRM_AGP, we'll deal with sorting out the real physical
595 * pages and mappings in nopage()
597 #if defined(__powerpc__)
598 pgprot_val(vma
->vm_page_prot
) |= _PAGE_NO_CACHE
;
600 vma
->vm_ops
= &drm_vm_ops
;
603 /* fall through to _DRM_FRAME_BUFFER... */
604 case _DRM_FRAME_BUFFER
:
606 if (VM_OFFSET(vma
) >= __pa(high_memory
)) {
607 #if defined(__i386__) || defined(__x86_64__)
608 if (boot_cpu_data
.x86
> 3 && map
->type
!= _DRM_AGP
) {
609 pgprot_val(vma
->vm_page_prot
) |= _PAGE_PCD
;
610 pgprot_val(vma
->vm_page_prot
) &= ~_PAGE_PWT
;
612 #elif defined(__powerpc__)
613 pgprot_val(vma
->vm_page_prot
) |= _PAGE_NO_CACHE
| _PAGE_GUARDED
;
615 vma
->vm_flags
|= VM_IO
; /* not in core dump */
617 #if defined(__ia64__)
618 if (efi_range_is_wc(vma
->vm_start
, vma
->vm_end
-
621 pgprot_writecombine(vma
->vm_page_prot
);
624 pgprot_noncached(vma
->vm_page_prot
);
626 offset
= dev
->driver
->get_reg_ofs(dev
);
628 if (io_remap_pfn_range(DRM_RPR_ARG(vma
) vma
->vm_start
,
629 (VM_OFFSET(vma
) + offset
) >> PAGE_SHIFT
,
630 vma
->vm_end
- vma
->vm_start
,
633 if (io_remap_pfn_range(vma
, vma
->vm_start
,
634 (VM_OFFSET(vma
) + offset
) >> PAGE_SHIFT
,
635 vma
->vm_end
- vma
->vm_start
,
639 DRM_DEBUG(" Type = %d; start = 0x%lx, end = 0x%lx,"
642 vma
->vm_start
, vma
->vm_end
, VM_OFFSET(vma
) + offset
);
643 vma
->vm_ops
= &drm_vm_ops
;
646 vma
->vm_ops
= &drm_vm_shm_ops
;
647 vma
->vm_private_data
= (void *)map
;
648 /* Don't let this area swap. Change when
649 DRM_KERNEL advisory is supported. */
650 #if LINUX_VERSION_CODE <= 0x02040e /* KERNEL_VERSION(2,4,14) */
651 vma
->vm_flags
|= VM_LOCKED
;
653 vma
->vm_flags
|= VM_RESERVED
;
656 case _DRM_SCATTER_GATHER
:
657 vma
->vm_ops
= &drm_vm_sg_ops
;
658 vma
->vm_private_data
= (void *)map
;
659 #if LINUX_VERSION_CODE <= 0x02040e /* KERNEL_VERSION(2,4,14) */
660 vma
->vm_flags
|= VM_LOCKED
;
662 vma
->vm_flags
|= VM_RESERVED
;
666 return -EINVAL
; /* This should never happen. */
668 #if LINUX_VERSION_CODE <= 0x02040e /* KERNEL_VERSION(2,4,14) */
669 vma
->vm_flags
|= VM_LOCKED
| VM_SHM
; /* Don't swap */
671 vma
->vm_flags
|= VM_RESERVED
; /* Don't swap */
674 vma
->vm_file
= filp
; /* Needed for drm_vm_open() */
678 EXPORT_SYMBOL(drm_mmap
);