2 * Copyright (c) 2009, Intel Corporation.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 * Eric Anholt <eric@anholt.net>
30 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
31 * Use is subject to license terms.
35 #include <vm/seg_kmem.h>
36 #include <vm/seg_kp.h>
37 #include <vm/seg_map.h>
38 #include <sys/fcntl.h>
39 #include <sys/vnode.h>
41 #include <sys/bitmap.h>
43 #include <sys/sunddi.h>
44 #include <sys/gfx_private.h>
51 * This file provides some of the base ioctls and library routines for
52 * the graphics memory manager implemented by each device driver.
54 * Because various devices have different requirements in terms of
55 * synchronization and migration strategies, implementing that is left up to
56 * the driver, and all that the general API provides should be generic --
57 * allocating objects, reading/writing data with the cpu, freeing objects.
58 * Even there, platform-dependent optimizations for reading/writing data with
59 * the CPU mean we'll likely hook those out to driver-specific calls. However,
60 * the DRI2 implementation wants to have at least allocate/mmap be generic.
62 * The goal was to have swap-backed object allocation managed through
63 * struct file. However, file descriptors as handles to a struct file have
65 * - Process limits prevent more than 1024 or so being used at a time by
67 * - Inability to allocate high fds will aggravate the X Server's select()
68 * handling, and likely that of many GL client applications as well.
70 * This led to a plan of using our own integer IDs(called handles, following
71 * DRM terminology) to mimic fds, and implement the fd syscalls we need as
72 * ioctls. The objects themselves will still include the struct file so
73 * that we can transition to fds if the required kernel infrastructure shows
74 * up at a later date, and as our interface with shmfs for memory allocation.
78 idr_list_init(struct idr_list
*head
)
80 struct idr_list
*entry
;
81 /* HASH for accelerate */
82 entry
= kmem_zalloc(DRM_GEM_OBJIDR_HASHNODE
83 * sizeof (struct idr_list
), KM_SLEEP
);
85 for (int i
= 0; i
< DRM_GEM_OBJIDR_HASHNODE
; i
++) {
86 INIT_LIST_HEAD(&entry
[i
]);
91 idr_list_get_new_above(struct idr_list
*head
,
92 struct drm_gem_object
*obj
,
95 struct idr_list
*entry
;
97 entry
= kmem_zalloc(sizeof (*entry
), KM_SLEEP
);
98 key
= obj
->name
% DRM_GEM_OBJIDR_HASHNODE
;
99 list_add(entry
, &head
->next
[key
], NULL
);
101 entry
->handle
= obj
->name
;
102 *handlep
= obj
->name
;
106 struct drm_gem_object
*
107 idr_list_find(struct idr_list
*head
,
110 struct idr_list
*entry
;
112 key
= name
% DRM_GEM_OBJIDR_HASHNODE
;
114 list_for_each(entry
, &head
->next
[key
]) {
115 if (entry
->handle
== name
)
122 idr_list_remove(struct idr_list
*head
,
125 struct idr_list
*entry
, *temp
;
127 key
= name
% DRM_GEM_OBJIDR_HASHNODE
;
128 list_for_each_safe(entry
, temp
, &head
->next
[key
]) {
129 if (entry
->handle
== name
) {
131 kmem_free(entry
, sizeof (*entry
));
135 DRM_ERROR("Failed to remove the object %d", name
);
140 idr_list_free(struct idr_list
*head
)
142 struct idr_list
*entry
, *temp
;
143 for (int key
= 0; key
< DRM_GEM_OBJIDR_HASHNODE
; key
++) {
144 list_for_each_safe(entry
, temp
, &head
->next
[key
]) {
146 kmem_free(entry
, sizeof (*entry
));
149 kmem_free(head
->next
,
150 DRM_GEM_OBJIDR_HASHNODE
* sizeof (struct idr_list
));
155 idr_list_empty(struct idr_list
*head
)
158 for (int key
= 0; key
< DRM_GEM_OBJIDR_HASHNODE
; key
++) {
159 empty
= list_empty(&(head
)->next
[key
]);
166 static uint32_t shfile_name
= 0;
167 #define SHFILE_NAME_MAX 0xffffffff
170 * will be set to 1 for 32 bit x86 systems only, in startup.c
172 extern int segkp_fromheap
;
173 extern ulong_t
*segkp_bitmap
;
176 drm_gem_object_reference(struct drm_gem_object
*obj
)
178 atomic_inc(&obj
->refcount
);
182 drm_gem_object_unreference(struct drm_gem_object
*obj
)
187 atomic_sub(1, &obj
->refcount
);
188 if (obj
->refcount
== 0)
189 drm_gem_object_free(obj
);
193 drm_gem_object_handle_reference(struct drm_gem_object
*obj
)
195 drm_gem_object_reference(obj
);
196 atomic_inc(&obj
->handlecount
);
200 drm_gem_object_handle_unreference(struct drm_gem_object
*obj
)
206 * Must bump handle count first as this may be the last
207 * ref, in which case the object would disappear before we
210 atomic_sub(1, &obj
->handlecount
);
211 if (obj
->handlecount
== 0)
212 drm_gem_object_handle_free(obj
);
213 drm_gem_object_unreference(obj
);
217 * Initialize the GEM device fields
221 drm_gem_init(struct drm_device
*dev
)
223 mutex_init(&dev
->object_name_lock
, NULL
, MUTEX_DRIVER
, NULL
);
224 idr_list_init(&dev
->object_name_idr
);
226 atomic_set(&dev
->object_count
, 0);
227 atomic_set(&dev
->object_memory
, 0);
228 atomic_set(&dev
->pin_count
, 0);
229 atomic_set(&dev
->pin_memory
, 0);
230 atomic_set(&dev
->gtt_count
, 0);
231 atomic_set(&dev
->gtt_memory
, 0);
236 * Allocate a GEM object of the specified size with shmfs backing store
238 struct drm_gem_object
*
239 drm_gem_object_alloc(struct drm_device
*dev
, size_t size
)
241 static ddi_dma_attr_t dma_attr
= {
243 0U, /* dma_attr_addr_lo */
244 0xffffffffU
, /* dma_attr_addr_hi */
245 0xffffffffU
, /* dma_attr_count_max */
246 4096, /* dma_attr_align */
247 0x1fffU
, /* dma_attr_burstsizes */
248 1, /* dma_attr_minxfer */
249 0xffffffffU
, /* dma_attr_maxxfer */
250 0xffffffffU
, /* dma_attr_seg */
251 1, /* dma_attr_sgllen, variable */
252 4, /* dma_attr_granular */
253 0 /* dma_attr_flags */
255 static ddi_device_acc_attr_t acc_attr
= {
260 struct drm_gem_object
*obj
;
261 ddi_dma_cookie_t cookie
;
263 drm_local_map_t
*map
;
265 pgcnt_t real_pgcnt
, pgcnt
= btopr(size
);
266 uint32_t paddr
, cookie_end
;
269 obj
= kmem_zalloc(sizeof (struct drm_gem_object
), KM_NOSLEEP
);
277 if (shfile_name
== SHFILE_NAME_MAX
) {
278 DRM_ERROR("No name space for object");
281 obj
->name
= ++shfile_name
;
284 dma_attr
.dma_attr_sgllen
= (int)pgcnt
;
286 if (ddi_dma_alloc_handle(dev
->dip
, &dma_attr
,
287 DDI_DMA_DONTWAIT
, NULL
, &obj
->dma_hdl
)) {
288 DRM_ERROR("drm_gem_object_alloc: "
289 "ddi_dma_alloc_handle failed");
292 if (ddi_dma_mem_alloc(obj
->dma_hdl
, ptob(pgcnt
), &acc_attr
,
293 IOMEM_DATA_UC_WR_COMBINE
, DDI_DMA_DONTWAIT
, NULL
,
294 &obj
->kaddr
, &obj
->real_size
, &obj
->acc_hdl
)) {
295 DRM_ERROR("drm_gem_object_alloc: "
296 "ddi_dma_mem_alloc failed");
299 if (ddi_dma_addr_bind_handle(obj
->dma_hdl
, NULL
,
300 obj
->kaddr
, obj
->real_size
, DDI_DMA_RDWR
,
301 DDI_DMA_DONTWAIT
, NULL
, &cookie
, &cookie_cnt
)
303 DRM_ERROR("drm_gem_object_alloc: "
304 "ddi_dma_addr_bind_handle failed");
308 real_pgcnt
= btopr(obj
->real_size
);
310 obj
->pfnarray
= kmem_zalloc(real_pgcnt
* sizeof (pfn_t
), KM_NOSLEEP
);
311 if (obj
->pfnarray
== NULL
) {
314 for (n
= 0, i
= 1; ; i
++) {
315 for (paddr
= cookie
.dmac_address
,
316 cookie_end
= cookie
.dmac_address
+ cookie
.dmac_size
;
319 obj
->pfnarray
[n
++] = btop(paddr
);
325 ddi_dma_nextcookie(obj
->dma_hdl
, &cookie
);
329 map
= drm_alloc(sizeof (struct drm_local_map
), DRM_MEM_MAPS
);
335 map
->offset
= (uintptr_t)map
->handle
;
336 map
->offset
&= 0xffffffffUL
;
337 map
->dev_addr
= map
->handle
;
338 map
->size
= obj
->real_size
;
339 map
->type
= _DRM_TTM
;
340 map
->flags
= _DRM_WRITE_COMBINING
| _DRM_REMOVABLE
;
341 map
->drm_umem_cookie
=
342 gfxp_umem_cookie_init(obj
->kaddr
, obj
->real_size
);
343 if (map
->drm_umem_cookie
== NULL
) {
349 atomic_set(&obj
->refcount
, 1);
350 atomic_set(&obj
->handlecount
, 1);
351 if (dev
->driver
->gem_init_object
!= NULL
&&
352 dev
->driver
->gem_init_object(obj
) != 0) {
355 atomic_inc(&dev
->object_count
);
356 atomic_add(obj
->size
, &dev
->object_memory
);
361 gfxp_umem_cookie_destroy(map
->drm_umem_cookie
);
363 drm_free(map
, sizeof (struct drm_local_map
), DRM_MEM_MAPS
);
365 kmem_free(obj
->pfnarray
, real_pgcnt
* sizeof (pfn_t
));
367 (void) ddi_dma_unbind_handle(obj
->dma_hdl
);
369 ddi_dma_mem_free(&obj
->acc_hdl
);
371 ddi_dma_free_handle(&obj
->dma_hdl
);
373 kmem_free(obj
, sizeof (struct drm_gem_object
));
379 * Removes the mapping from handle to filp for this object.
382 drm_gem_handle_delete(struct drm_file
*filp
, int handle
)
384 struct drm_device
*dev
;
385 struct drm_gem_object
*obj
;
388 * This is gross. The idr system doesn't let us try a delete and
389 * return an error code. It just spews if you fail at deleting.
390 * So, we have to grab a lock around finding the object and then
391 * doing the delete on it and dropping the refcount, or the user
392 * could race us to double-decrement the refcount and cause a
393 * use-after-free later. Given the frequency of our handle lookups,
394 * we may want to use ida for number allocation and a hash table
395 * for the pointers, anyway.
397 spin_lock(&filp
->table_lock
);
399 /* Check if we currently have a reference on the object */
400 obj
= idr_list_find(&filp
->object_idr
, handle
);
402 spin_unlock(&filp
->table_lock
);
403 DRM_ERROR("obj %d is not in tne list, failed to close", handle
);
408 /* Release reference and decrement refcount. */
409 err
= idr_list_remove(&filp
->object_idr
, handle
);
411 DRM_ERROR("%s", __func__
);
413 spin_unlock(&filp
->table_lock
);
415 spin_lock(&dev
->struct_mutex
);
416 drm_gem_object_handle_unreference(obj
);
417 spin_unlock(&dev
->struct_mutex
);
422 * Create a handle for this object. This adds a handle reference
423 * to the object, which includes a regular reference count. Callers
424 * will likely want to dereference the object afterwards.
427 drm_gem_handle_create(struct drm_file
*file_priv
,
428 struct drm_gem_object
*obj
,
434 * Get the user-visible handle using idr.
437 /* ensure there is space available to allocate a handle */
439 /* do the allocation under our spinlock */
440 spin_lock(&file_priv
->table_lock
);
441 ret
= idr_list_get_new_above(&file_priv
->object_idr
, obj
, handlep
);
442 spin_unlock(&file_priv
->table_lock
);
447 DRM_ERROR("Failed to create handle");
451 drm_gem_object_handle_reference(obj
);
455 /* Returns a reference to the object named by the handle. */
456 struct drm_gem_object
*
457 drm_gem_object_lookup(struct drm_file
*filp
,
460 struct drm_gem_object
*obj
;
462 spin_lock(&filp
->table_lock
);
464 /* Check if we currently have a reference on the object */
465 obj
= idr_list_find(&filp
->object_idr
, handle
);
467 spin_unlock(&filp
->table_lock
);
468 DRM_ERROR("object_lookup failed, handle %d", handle
);
472 drm_gem_object_reference(obj
);
474 spin_unlock(&filp
->table_lock
);
480 * Releases the handle to an mm object.
484 drm_gem_close_ioctl(DRM_IOCTL_ARGS
)
487 struct drm_gem_close args
;
490 if (!(dev
->driver
->use_gem
== 1))
493 DRM_COPYFROM_WITH_RETURN(&args
,
494 (void *)data
, sizeof (args
));
496 ret
= drm_gem_handle_delete(fpriv
, args
.handle
);
502 * Create a global name for an object, returning the name.
504 * Note that the name does not hold a reference; when the object
505 * is freed, the name goes away.
509 drm_gem_flink_ioctl(DRM_IOCTL_ARGS
)
512 struct drm_gem_flink args
;
513 struct drm_gem_object
*obj
;
516 if (!(dev
->driver
->use_gem
== 1))
519 DRM_COPYFROM_WITH_RETURN(&args
,
520 (void *)data
, sizeof (args
));
521 obj
= drm_gem_object_lookup(fpriv
, args
.handle
);
524 handle
= args
.handle
;
525 spin_lock(&dev
->object_name_lock
);
527 /* only creat a node in object_name_idr, no update anything */
528 ret
= idr_list_get_new_above(&dev
->object_name_idr
,
530 obj
->flink
= obj
->name
;
531 /* Allocate a reference for the name table. */
532 drm_gem_object_reference(obj
);
535 * Leave the reference from the lookup around as the
536 * name table now holds one
538 args
.name
= obj
->name
;
540 spin_unlock(&dev
->object_name_lock
);
541 ret
= DRM_COPY_TO_USER((void *) data
, &args
, sizeof (args
));
543 DRM_ERROR(" gem flink error! %d", ret
);
545 spin_lock(&dev
->struct_mutex
);
546 drm_gem_object_unreference(obj
);
547 spin_unlock(&dev
->struct_mutex
);
553 * Open an object using the global name, returning a handle and the size.
555 * This handle (of course) holds a reference to the object, so the object
556 * will not go away until the handle is deleted.
560 drm_gem_open_ioctl(DRM_IOCTL_ARGS
)
563 struct drm_gem_open args
;
564 struct drm_gem_object
*obj
;
568 if (!(dev
->driver
->use_gem
== 1)) {
569 DRM_ERROR("Not support GEM");
572 DRM_COPYFROM_WITH_RETURN(&args
,
573 (void *) data
, sizeof (args
));
575 spin_lock(&dev
->object_name_lock
);
577 obj
= idr_list_find(&dev
->object_name_idr
, args
.name
);
580 drm_gem_object_reference(obj
);
581 spin_unlock(&dev
->object_name_lock
);
583 DRM_ERROR("Can't find the obj %d", args
.name
);
587 ret
= drm_gem_handle_create(fpriv
, obj
, &handle
);
588 spin_lock(&dev
->struct_mutex
);
589 drm_gem_object_unreference(obj
);
590 spin_unlock(&dev
->struct_mutex
);
592 args
.handle
= args
.name
;
593 args
.size
= obj
->size
;
595 ret
= DRM_COPY_TO_USER((void *) data
, &args
, sizeof (args
));
597 DRM_ERROR(" gem open error! %d", ret
);
602 * Called at device open time, sets up the structure for handling refcounting
606 drm_gem_open(struct drm_file
*file_private
)
608 idr_list_init(&file_private
->object_idr
);
609 mutex_init(&file_private
->table_lock
, NULL
, MUTEX_DRIVER
, NULL
);
613 * Called at device close to release the file's
614 * handle references on objects.
617 drm_gem_object_release_handle(struct drm_gem_object
*obj
)
619 drm_gem_object_handle_unreference(obj
);
623 * Called at close time when the filp is going away.
625 * Releases any remaining references on objects by this filp.
628 drm_gem_release(struct drm_device
*dev
, struct drm_file
*file_private
)
630 struct idr_list
*entry
;
631 spin_lock(&dev
->struct_mutex
);
633 idr_list_for_each(entry
, &file_private
->object_idr
)
634 drm_gem_object_release_handle(entry
->obj
);
636 idr_list_free(&file_private
->object_idr
);
637 spin_unlock(&dev
->struct_mutex
);
642 * Called after the last reference to the object has been lost.
647 drm_gem_object_free(struct drm_gem_object
*obj
)
649 struct drm_device
*dev
= obj
->dev
;
650 struct drm_local_map
*map
= obj
->map
;
652 if (dev
->driver
->gem_free_object
!= NULL
)
653 dev
->driver
->gem_free_object(obj
);
655 gfxp_umem_cookie_destroy(map
->drm_umem_cookie
);
656 drm_free(map
, sizeof (struct drm_local_map
), DRM_MEM_MAPS
);
658 kmem_free(obj
->pfnarray
, btopr(obj
->real_size
) * sizeof (pfn_t
));
660 (void) ddi_dma_unbind_handle(obj
->dma_hdl
);
661 ddi_dma_mem_free(&obj
->acc_hdl
);
662 ddi_dma_free_handle(&obj
->dma_hdl
);
664 atomic_dec(&dev
->object_count
);
665 atomic_sub(obj
->size
, &dev
->object_memory
);
666 kmem_free(obj
, sizeof (struct drm_gem_object
));
670 * Called after the last handle to the object has been closed
672 * Removes any name for the object. Note that this must be
673 * called before drm_gem_object_free or we'll be touching
677 drm_gem_object_handle_free(struct drm_gem_object
*obj
)
680 struct drm_device
*dev
= obj
->dev
;
681 /* Remove any name for this object */
682 spin_lock(&dev
->object_name_lock
);
684 err
= idr_list_remove(&dev
->object_name_idr
, obj
->name
);
686 DRM_ERROR("%s", __func__
);
688 spin_unlock(&dev
->object_name_lock
);
690 * The object name held a reference to this object, drop
693 drm_gem_object_unreference(obj
);
696 spin_unlock(&dev
->object_name_lock
);