2 * \author Rickard E. (Rik) Faith <faith@valinux.com>
3 * \author Daryll Strauss <daryll@valinux.com>
4 * \author Gareth Hughes <gareth@valinux.com>
8 * Created: Mon Jan 4 08:58:31 1999 by faith@valinux.com
10 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
11 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
12 * All Rights Reserved.
14 * Permission is hereby granted, free of charge, to any person obtaining a
15 * copy of this software and associated documentation files (the "Software"),
16 * to deal in the Software without restriction, including without limitation
17 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
18 * and/or sell copies of the Software, and to permit persons to whom the
19 * Software is furnished to do so, subject to the following conditions:
21 * The above copyright notice and this permission notice (including the next
22 * paragraph) shall be included in all copies or substantial portions of the
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
26 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
27 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
28 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
29 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
30 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
31 * OTHER DEALINGS IN THE SOFTWARE.
34 #include <linux/anon_inodes.h>
35 #include <linux/dma-fence.h>
36 #include <linux/file.h>
37 #include <linux/module.h>
38 #include <linux/pci.h>
39 #include <linux/poll.h>
40 #include <linux/slab.h>
41 #include <linux/vga_switcheroo.h>
43 #include <drm/drm_client_event.h>
44 #include <drm/drm_drv.h>
45 #include <drm/drm_file.h>
46 #include <drm/drm_gem.h>
47 #include <drm/drm_print.h>
49 #include "drm_crtc_internal.h"
50 #include "drm_internal.h"
52 /* from BKL pushdown */
53 DEFINE_MUTEX(drm_global_mutex
);
55 bool drm_dev_needs_global_mutex(struct drm_device
*dev
)
58 * The deprecated ->load callback must be called after the driver is
59 * already registered. This means such drivers rely on the BKL to make
60 * sure an open can't proceed until the driver is actually fully set up.
61 * Similar hilarity holds for the unload callback.
63 if (dev
->driver
->load
|| dev
->driver
->unload
)
70 * DOC: file operations
72 * Drivers must define the file operations structure that forms the DRM
73 * userspace API entry point, even though most of those operations are
74 * implemented in the DRM core. The resulting &struct file_operations must be
75 * stored in the &drm_driver.fops field. The mandatory functions are drm_open(),
76 * drm_read(), drm_ioctl() and drm_compat_ioctl() if CONFIG_COMPAT is enabled
77 * Note that drm_compat_ioctl will be NULL if CONFIG_COMPAT=n, so there's no
78 * need to sprinkle #ifdef into the code. Drivers which implement private ioctls
79 * that require 32/64 bit compatibility support must provide their own
80 * &file_operations.compat_ioctl handler that processes private ioctls and calls
81 * drm_compat_ioctl() for core ioctls.
83 * In addition drm_read() and drm_poll() provide support for DRM events. DRM
84 * events are a generic and extensible means to send asynchronous events to
85 * userspace through the file descriptor. They are used to send vblank event and
86 * page flip completions by the KMS API. But drivers can also use it for their
87 * own needs, e.g. to signal completion of rendering.
89 * For the driver-side event interface see drm_event_reserve_init() and
90 * drm_send_event() as the main starting points.
92 * The memory mapping implementation will vary depending on how the driver
93 * manages memory. For GEM-based drivers this is drm_gem_mmap().
95 * No other file operations are supported by the DRM userspace API. Overall the
96 * following is an example &file_operations structure::
98 * static const example_drm_fops = {
99 * .owner = THIS_MODULE,
101 * .release = drm_release,
102 * .unlocked_ioctl = drm_ioctl,
103 * .compat_ioctl = drm_compat_ioctl, // NULL if CONFIG_COMPAT=n
106 * .mmap = drm_gem_mmap,
109 * For plain GEM based drivers there is the DEFINE_DRM_GEM_FOPS() macro, and for
110 * DMA based drivers there is the DEFINE_DRM_GEM_DMA_FOPS() macro to make this
113 * The driver's &file_operations must be stored in &drm_driver.fops.
115 * For driver-private IOCTL handling see the more detailed discussion in
116 * :ref:`IOCTL support in the userland interfaces chapter<drm_driver_ioctl>`.
120 * drm_file_alloc - allocate file context
121 * @minor: minor to allocate on
123 * This allocates a new DRM file context. It is not linked into any context and
124 * can be used by the caller freely. Note that the context keeps a pointer to
125 * @minor, so it must be freed before @minor is.
128 * Pointer to newly allocated context, ERR_PTR on failure.
130 struct drm_file
*drm_file_alloc(struct drm_minor
*minor
)
132 static atomic64_t ident
= ATOMIC64_INIT(0);
133 struct drm_device
*dev
= minor
->dev
;
134 struct drm_file
*file
;
137 file
= kzalloc(sizeof(*file
), GFP_KERNEL
);
139 return ERR_PTR(-ENOMEM
);
141 /* Get a unique identifier for fdinfo: */
142 file
->client_id
= atomic64_inc_return(&ident
);
143 rcu_assign_pointer(file
->pid
, get_pid(task_tgid(current
)));
146 /* for compatibility root is always authenticated */
147 file
->authenticated
= capable(CAP_SYS_ADMIN
);
149 INIT_LIST_HEAD(&file
->lhead
);
150 INIT_LIST_HEAD(&file
->fbs
);
151 mutex_init(&file
->fbs_lock
);
152 INIT_LIST_HEAD(&file
->blobs
);
153 INIT_LIST_HEAD(&file
->pending_event_list
);
154 INIT_LIST_HEAD(&file
->event_list
);
155 init_waitqueue_head(&file
->event_wait
);
156 file
->event_space
= 4096; /* set aside 4k for event buffer */
158 spin_lock_init(&file
->master_lookup_lock
);
159 mutex_init(&file
->event_read_lock
);
160 mutex_init(&file
->client_name_lock
);
162 if (drm_core_check_feature(dev
, DRIVER_GEM
))
163 drm_gem_open(dev
, file
);
165 if (drm_core_check_feature(dev
, DRIVER_SYNCOBJ
))
166 drm_syncobj_open(file
);
168 drm_prime_init_file_private(&file
->prime
);
170 if (dev
->driver
->open
) {
171 ret
= dev
->driver
->open(dev
, file
);
173 goto out_prime_destroy
;
179 drm_prime_destroy_file_private(&file
->prime
);
180 if (drm_core_check_feature(dev
, DRIVER_SYNCOBJ
))
181 drm_syncobj_release(file
);
182 if (drm_core_check_feature(dev
, DRIVER_GEM
))
183 drm_gem_release(dev
, file
);
184 put_pid(rcu_access_pointer(file
->pid
));
190 static void drm_events_release(struct drm_file
*file_priv
)
192 struct drm_device
*dev
= file_priv
->minor
->dev
;
193 struct drm_pending_event
*e
, *et
;
196 spin_lock_irqsave(&dev
->event_lock
, flags
);
198 /* Unlink pending events */
199 list_for_each_entry_safe(e
, et
, &file_priv
->pending_event_list
,
201 list_del(&e
->pending_link
);
205 /* Remove unconsumed events */
206 list_for_each_entry_safe(e
, et
, &file_priv
->event_list
, link
) {
211 spin_unlock_irqrestore(&dev
->event_lock
, flags
);
215 * drm_file_free - free file context
216 * @file: context to free, or NULL
218 * This destroys and deallocates a DRM file context previously allocated via
219 * drm_file_alloc(). The caller must make sure to unlink it from any contexts
220 * before calling this.
222 * If NULL is passed, this is a no-op.
224 void drm_file_free(struct drm_file
*file
)
226 struct drm_device
*dev
;
231 dev
= file
->minor
->dev
;
233 drm_dbg_core(dev
, "comm=\"%s\", pid=%d, dev=0x%lx, open_count=%d\n",
234 current
->comm
, task_pid_nr(current
),
235 (long)old_encode_dev(file
->minor
->kdev
->devt
),
236 atomic_read(&dev
->open_count
));
238 drm_events_release(file
);
240 if (drm_core_check_feature(dev
, DRIVER_MODESET
)) {
241 drm_fb_release(file
);
242 drm_property_destroy_user_blobs(dev
, file
);
245 if (drm_core_check_feature(dev
, DRIVER_SYNCOBJ
))
246 drm_syncobj_release(file
);
248 if (drm_core_check_feature(dev
, DRIVER_GEM
))
249 drm_gem_release(dev
, file
);
251 if (drm_is_primary_client(file
))
252 drm_master_release(file
);
254 if (dev
->driver
->postclose
)
255 dev
->driver
->postclose(dev
, file
);
257 drm_prime_destroy_file_private(&file
->prime
);
259 WARN_ON(!list_empty(&file
->event_list
));
261 put_pid(rcu_access_pointer(file
->pid
));
263 mutex_destroy(&file
->client_name_lock
);
264 kfree(file
->client_name
);
269 static void drm_close_helper(struct file
*filp
)
271 struct drm_file
*file_priv
= filp
->private_data
;
272 struct drm_device
*dev
= file_priv
->minor
->dev
;
274 mutex_lock(&dev
->filelist_mutex
);
275 list_del(&file_priv
->lhead
);
276 mutex_unlock(&dev
->filelist_mutex
);
278 drm_file_free(file_priv
);
282 * Check whether DRI will run on this CPU.
284 * \return non-zero if the DRI will run on this CPU, or zero otherwise.
286 static int drm_cpu_valid(void)
288 #if defined(__sparc__) && !defined(__sparc_v9__)
289 return 0; /* No cmpxchg before v9 sparc. */
295 * Called whenever a process opens a drm node
297 * \param filp file pointer.
298 * \param minor acquired minor-object.
299 * \return zero on success or a negative number on failure.
301 * Creates and initializes a drm_file structure for the file private data in \p
302 * filp and add it into the double linked list in \p dev.
304 int drm_open_helper(struct file
*filp
, struct drm_minor
*minor
)
306 struct drm_device
*dev
= minor
->dev
;
307 struct drm_file
*priv
;
310 if (filp
->f_flags
& O_EXCL
)
311 return -EBUSY
; /* No exclusive opens */
312 if (!drm_cpu_valid())
314 if (dev
->switch_power_state
!= DRM_SWITCH_POWER_ON
&&
315 dev
->switch_power_state
!= DRM_SWITCH_POWER_DYNAMIC_OFF
)
317 if (WARN_ON_ONCE(!(filp
->f_op
->fop_flags
& FOP_UNSIGNED_OFFSET
)))
320 drm_dbg_core(dev
, "comm=\"%s\", pid=%d, minor=%d\n",
321 current
->comm
, task_pid_nr(current
), minor
->index
);
323 priv
= drm_file_alloc(minor
);
325 return PTR_ERR(priv
);
327 if (drm_is_primary_client(priv
)) {
328 ret
= drm_master_open(priv
);
335 filp
->private_data
= priv
;
338 mutex_lock(&dev
->filelist_mutex
);
339 list_add(&priv
->lhead
, &dev
->filelist
);
340 mutex_unlock(&dev
->filelist_mutex
);
346 * drm_open - open method for DRM file
347 * @inode: device inode
348 * @filp: file pointer.
350 * This function must be used by drivers as their &file_operations.open method.
351 * It looks up the correct DRM device and instantiates all the per-file
352 * resources for it. It also calls the &drm_driver.open driver callback.
355 * 0 on success or negative errno value on failure.
357 int drm_open(struct inode
*inode
, struct file
*filp
)
359 struct drm_device
*dev
;
360 struct drm_minor
*minor
;
363 minor
= drm_minor_acquire(&drm_minors_xa
, iminor(inode
));
365 return PTR_ERR(minor
);
368 if (drm_dev_needs_global_mutex(dev
))
369 mutex_lock(&drm_global_mutex
);
371 atomic_fetch_inc(&dev
->open_count
);
373 /* share address_space across all char-devs of a single device */
374 filp
->f_mapping
= dev
->anon_inode
->i_mapping
;
376 retcode
= drm_open_helper(filp
, minor
);
380 if (drm_dev_needs_global_mutex(dev
))
381 mutex_unlock(&drm_global_mutex
);
386 atomic_dec(&dev
->open_count
);
387 if (drm_dev_needs_global_mutex(dev
))
388 mutex_unlock(&drm_global_mutex
);
389 drm_minor_release(minor
);
392 EXPORT_SYMBOL(drm_open
);
394 static void drm_lastclose(struct drm_device
*dev
)
396 drm_client_dev_restore(dev
);
398 if (dev_is_pci(dev
->dev
))
399 vga_switcheroo_process_delayed_switch();
403 * drm_release - release method for DRM file
404 * @inode: device inode
405 * @filp: file pointer.
407 * This function must be used by drivers as their &file_operations.release
408 * method. It frees any resources associated with the open file. If this
409 * is the last open file for the DRM device, it also restores the active
410 * in-kernel DRM client.
413 * Always succeeds and returns 0.
415 int drm_release(struct inode
*inode
, struct file
*filp
)
417 struct drm_file
*file_priv
= filp
->private_data
;
418 struct drm_minor
*minor
= file_priv
->minor
;
419 struct drm_device
*dev
= minor
->dev
;
421 if (drm_dev_needs_global_mutex(dev
))
422 mutex_lock(&drm_global_mutex
);
424 drm_dbg_core(dev
, "open_count = %d\n", atomic_read(&dev
->open_count
));
426 drm_close_helper(filp
);
428 if (atomic_dec_and_test(&dev
->open_count
))
431 if (drm_dev_needs_global_mutex(dev
))
432 mutex_unlock(&drm_global_mutex
);
434 drm_minor_release(minor
);
438 EXPORT_SYMBOL(drm_release
);
440 void drm_file_update_pid(struct drm_file
*filp
)
442 struct drm_device
*dev
;
443 struct pid
*pid
, *old
;
446 * Master nodes need to keep the original ownership in order for
447 * drm_master_check_perm to keep working correctly. (See comment in
450 if (filp
->was_master
)
453 pid
= task_tgid(current
);
456 * Quick unlocked check since the model is a single handover followed by
457 * exclusive repeated use.
459 if (pid
== rcu_access_pointer(filp
->pid
))
462 dev
= filp
->minor
->dev
;
463 mutex_lock(&dev
->filelist_mutex
);
465 old
= rcu_replace_pointer(filp
->pid
, pid
, 1);
466 mutex_unlock(&dev
->filelist_mutex
);
473 * drm_release_noglobal - release method for DRM file
474 * @inode: device inode
475 * @filp: file pointer.
477 * This function may be used by drivers as their &file_operations.release
478 * method. It frees any resources associated with the open file prior to taking
479 * the drm_global_mutex. If this is the last open file for the DRM device, it
480 * then restores the active in-kernel DRM client.
483 * Always succeeds and returns 0.
485 int drm_release_noglobal(struct inode
*inode
, struct file
*filp
)
487 struct drm_file
*file_priv
= filp
->private_data
;
488 struct drm_minor
*minor
= file_priv
->minor
;
489 struct drm_device
*dev
= minor
->dev
;
491 drm_close_helper(filp
);
493 if (atomic_dec_and_mutex_lock(&dev
->open_count
, &drm_global_mutex
)) {
495 mutex_unlock(&drm_global_mutex
);
498 drm_minor_release(minor
);
502 EXPORT_SYMBOL(drm_release_noglobal
);
505 * drm_read - read method for DRM file
506 * @filp: file pointer
507 * @buffer: userspace destination pointer for the read
508 * @count: count in bytes to read
509 * @offset: offset to read
511 * This function must be used by drivers as their &file_operations.read
512 * method if they use DRM events for asynchronous signalling to userspace.
513 * Since events are used by the KMS API for vblank and page flip completion this
514 * means all modern display drivers must use it.
516 * @offset is ignored, DRM events are read like a pipe. Polling support is
517 * provided by drm_poll().
519 * This function will only ever read a full event. Therefore userspace must
520 * supply a big enough buffer to fit any event to ensure forward progress. Since
521 * the maximum event space is currently 4K it's recommended to just use that for
525 * Number of bytes read (always aligned to full events, and can be 0) or a
526 * negative error code on failure.
528 ssize_t
drm_read(struct file
*filp
, char __user
*buffer
,
529 size_t count
, loff_t
*offset
)
531 struct drm_file
*file_priv
= filp
->private_data
;
532 struct drm_device
*dev
= file_priv
->minor
->dev
;
535 ret
= mutex_lock_interruptible(&file_priv
->event_read_lock
);
540 struct drm_pending_event
*e
= NULL
;
542 spin_lock_irq(&dev
->event_lock
);
543 if (!list_empty(&file_priv
->event_list
)) {
544 e
= list_first_entry(&file_priv
->event_list
,
545 struct drm_pending_event
, link
);
546 file_priv
->event_space
+= e
->event
->length
;
549 spin_unlock_irq(&dev
->event_lock
);
555 if (filp
->f_flags
& O_NONBLOCK
) {
560 mutex_unlock(&file_priv
->event_read_lock
);
561 ret
= wait_event_interruptible(file_priv
->event_wait
,
562 !list_empty(&file_priv
->event_list
));
564 ret
= mutex_lock_interruptible(&file_priv
->event_read_lock
);
568 unsigned length
= e
->event
->length
;
570 if (length
> count
- ret
) {
572 spin_lock_irq(&dev
->event_lock
);
573 file_priv
->event_space
-= length
;
574 list_add(&e
->link
, &file_priv
->event_list
);
575 spin_unlock_irq(&dev
->event_lock
);
576 wake_up_interruptible_poll(&file_priv
->event_wait
,
577 EPOLLIN
| EPOLLRDNORM
);
581 if (copy_to_user(buffer
+ ret
, e
->event
, length
)) {
591 mutex_unlock(&file_priv
->event_read_lock
);
595 EXPORT_SYMBOL(drm_read
);
598 * drm_poll - poll method for DRM file
599 * @filp: file pointer
600 * @wait: poll waiter table
602 * This function must be used by drivers as their &file_operations.read method
603 * if they use DRM events for asynchronous signalling to userspace. Since
604 * events are used by the KMS API for vblank and page flip completion this means
605 * all modern display drivers must use it.
607 * See also drm_read().
610 * Mask of POLL flags indicating the current status of the file.
612 __poll_t
drm_poll(struct file
*filp
, struct poll_table_struct
*wait
)
614 struct drm_file
*file_priv
= filp
->private_data
;
617 poll_wait(filp
, &file_priv
->event_wait
, wait
);
619 if (!list_empty(&file_priv
->event_list
))
620 mask
|= EPOLLIN
| EPOLLRDNORM
;
624 EXPORT_SYMBOL(drm_poll
);
627 * drm_event_reserve_init_locked - init a DRM event and reserve space for it
629 * @file_priv: DRM file private data
630 * @p: tracking structure for the pending event
631 * @e: actual event data to deliver to userspace
633 * This function prepares the passed in event for eventual delivery. If the event
634 * doesn't get delivered (because the IOCTL fails later on, before queuing up
635 * anything) then the even must be cancelled and freed using
636 * drm_event_cancel_free(). Successfully initialized events should be sent out
637 * using drm_send_event() or drm_send_event_locked() to signal completion of the
638 * asynchronous event to userspace.
640 * If callers embedded @p into a larger structure it must be allocated with
641 * kmalloc and @p must be the first member element.
643 * This is the locked version of drm_event_reserve_init() for callers which
644 * already hold &drm_device.event_lock.
647 * 0 on success or a negative error code on failure.
649 int drm_event_reserve_init_locked(struct drm_device
*dev
,
650 struct drm_file
*file_priv
,
651 struct drm_pending_event
*p
,
654 if (file_priv
->event_space
< e
->length
)
657 file_priv
->event_space
-= e
->length
;
660 list_add(&p
->pending_link
, &file_priv
->pending_event_list
);
661 p
->file_priv
= file_priv
;
665 EXPORT_SYMBOL(drm_event_reserve_init_locked
);
668 * drm_event_reserve_init - init a DRM event and reserve space for it
670 * @file_priv: DRM file private data
671 * @p: tracking structure for the pending event
672 * @e: actual event data to deliver to userspace
674 * This function prepares the passed in event for eventual delivery. If the event
675 * doesn't get delivered (because the IOCTL fails later on, before queuing up
676 * anything) then the even must be cancelled and freed using
677 * drm_event_cancel_free(). Successfully initialized events should be sent out
678 * using drm_send_event() or drm_send_event_locked() to signal completion of the
679 * asynchronous event to userspace.
681 * If callers embedded @p into a larger structure it must be allocated with
682 * kmalloc and @p must be the first member element.
684 * Callers which already hold &drm_device.event_lock should use
685 * drm_event_reserve_init_locked() instead.
688 * 0 on success or a negative error code on failure.
690 int drm_event_reserve_init(struct drm_device
*dev
,
691 struct drm_file
*file_priv
,
692 struct drm_pending_event
*p
,
698 spin_lock_irqsave(&dev
->event_lock
, flags
);
699 ret
= drm_event_reserve_init_locked(dev
, file_priv
, p
, e
);
700 spin_unlock_irqrestore(&dev
->event_lock
, flags
);
704 EXPORT_SYMBOL(drm_event_reserve_init
);
707 * drm_event_cancel_free - free a DRM event and release its space
709 * @p: tracking structure for the pending event
711 * This function frees the event @p initialized with drm_event_reserve_init()
712 * and releases any allocated space. It is used to cancel an event when the
713 * nonblocking operation could not be submitted and needed to be aborted.
715 void drm_event_cancel_free(struct drm_device
*dev
,
716 struct drm_pending_event
*p
)
720 spin_lock_irqsave(&dev
->event_lock
, flags
);
722 p
->file_priv
->event_space
+= p
->event
->length
;
723 list_del(&p
->pending_link
);
725 spin_unlock_irqrestore(&dev
->event_lock
, flags
);
728 dma_fence_put(p
->fence
);
732 EXPORT_SYMBOL(drm_event_cancel_free
);
734 static void drm_send_event_helper(struct drm_device
*dev
,
735 struct drm_pending_event
*e
, ktime_t timestamp
)
737 assert_spin_locked(&dev
->event_lock
);
740 complete_all(e
->completion
);
741 e
->completion_release(e
->completion
);
742 e
->completion
= NULL
;
747 dma_fence_signal_timestamp(e
->fence
, timestamp
);
749 dma_fence_signal(e
->fence
);
750 dma_fence_put(e
->fence
);
758 list_del(&e
->pending_link
);
759 list_add_tail(&e
->link
,
760 &e
->file_priv
->event_list
);
761 wake_up_interruptible_poll(&e
->file_priv
->event_wait
,
762 EPOLLIN
| EPOLLRDNORM
);
766 * drm_send_event_timestamp_locked - send DRM event to file descriptor
768 * @e: DRM event to deliver
769 * @timestamp: timestamp to set for the fence event in kernel's CLOCK_MONOTONIC
772 * This function sends the event @e, initialized with drm_event_reserve_init(),
773 * to its associated userspace DRM file. Callers must already hold
774 * &drm_device.event_lock.
776 * Note that the core will take care of unlinking and disarming events when the
777 * corresponding DRM file is closed. Drivers need not worry about whether the
778 * DRM file for this event still exists and can call this function upon
779 * completion of the asynchronous work unconditionally.
781 void drm_send_event_timestamp_locked(struct drm_device
*dev
,
782 struct drm_pending_event
*e
, ktime_t timestamp
)
784 drm_send_event_helper(dev
, e
, timestamp
);
786 EXPORT_SYMBOL(drm_send_event_timestamp_locked
);
789 * drm_send_event_locked - send DRM event to file descriptor
791 * @e: DRM event to deliver
793 * This function sends the event @e, initialized with drm_event_reserve_init(),
794 * to its associated userspace DRM file. Callers must already hold
795 * &drm_device.event_lock, see drm_send_event() for the unlocked version.
797 * Note that the core will take care of unlinking and disarming events when the
798 * corresponding DRM file is closed. Drivers need not worry about whether the
799 * DRM file for this event still exists and can call this function upon
800 * completion of the asynchronous work unconditionally.
802 void drm_send_event_locked(struct drm_device
*dev
, struct drm_pending_event
*e
)
804 drm_send_event_helper(dev
, e
, 0);
806 EXPORT_SYMBOL(drm_send_event_locked
);
809 * drm_send_event - send DRM event to file descriptor
811 * @e: DRM event to deliver
813 * This function sends the event @e, initialized with drm_event_reserve_init(),
814 * to its associated userspace DRM file. This function acquires
815 * &drm_device.event_lock, see drm_send_event_locked() for callers which already
818 * Note that the core will take care of unlinking and disarming events when the
819 * corresponding DRM file is closed. Drivers need not worry about whether the
820 * DRM file for this event still exists and can call this function upon
821 * completion of the asynchronous work unconditionally.
823 void drm_send_event(struct drm_device
*dev
, struct drm_pending_event
*e
)
825 unsigned long irqflags
;
827 spin_lock_irqsave(&dev
->event_lock
, irqflags
);
828 drm_send_event_helper(dev
, e
, 0);
829 spin_unlock_irqrestore(&dev
->event_lock
, irqflags
);
831 EXPORT_SYMBOL(drm_send_event
);
833 static void print_size(struct drm_printer
*p
, const char *stat
,
834 const char *region
, u64 sz
)
836 const char *units
[] = {"", " KiB", " MiB"};
839 for (u
= 0; u
< ARRAY_SIZE(units
) - 1; u
++) {
840 if (sz
== 0 || !IS_ALIGNED(sz
, SZ_1K
))
842 sz
= div_u64(sz
, SZ_1K
);
845 drm_printf(p
, "drm-%s-%s:\t%llu%s\n", stat
, region
, sz
, units
[u
]);
849 * drm_print_memory_stats - A helper to print memory stats
850 * @p: The printer to print output to
851 * @stats: The collected memory stats
852 * @supported_status: Bitmask of optional stats which are available
853 * @region: The memory region
856 void drm_print_memory_stats(struct drm_printer
*p
,
857 const struct drm_memory_stats
*stats
,
858 enum drm_gem_object_status supported_status
,
861 print_size(p
, "total", region
, stats
->private + stats
->shared
);
862 print_size(p
, "shared", region
, stats
->shared
);
863 print_size(p
, "active", region
, stats
->active
);
865 if (supported_status
& DRM_GEM_OBJECT_RESIDENT
)
866 print_size(p
, "resident", region
, stats
->resident
);
868 if (supported_status
& DRM_GEM_OBJECT_PURGEABLE
)
869 print_size(p
, "purgeable", region
, stats
->purgeable
);
871 EXPORT_SYMBOL(drm_print_memory_stats
);
874 * drm_show_memory_stats - Helper to collect and show standard fdinfo memory stats
875 * @p: the printer to print output to
876 * @file: the DRM file
878 * Helper to iterate over GEM objects with a handle allocated in the specified
881 void drm_show_memory_stats(struct drm_printer
*p
, struct drm_file
*file
)
883 struct drm_gem_object
*obj
;
884 struct drm_memory_stats status
= {};
885 enum drm_gem_object_status supported_status
= 0;
888 spin_lock(&file
->table_lock
);
889 idr_for_each_entry (&file
->object_idr
, obj
, id
) {
890 enum drm_gem_object_status s
= 0;
891 size_t add_size
= (obj
->funcs
&& obj
->funcs
->rss
) ?
892 obj
->funcs
->rss(obj
) : obj
->size
;
894 if (obj
->funcs
&& obj
->funcs
->status
) {
895 s
= obj
->funcs
->status(obj
);
896 supported_status
= DRM_GEM_OBJECT_RESIDENT
|
897 DRM_GEM_OBJECT_PURGEABLE
;
900 if (drm_gem_object_is_shared_for_memory_stats(obj
)) {
901 status
.shared
+= obj
->size
;
903 status
.private += obj
->size
;
906 if (s
& DRM_GEM_OBJECT_RESIDENT
) {
907 status
.resident
+= add_size
;
909 /* If already purged or not yet backed by pages, don't
910 * count it as purgeable:
912 s
&= ~DRM_GEM_OBJECT_PURGEABLE
;
915 if (!dma_resv_test_signaled(obj
->resv
, dma_resv_usage_rw(true))) {
916 status
.active
+= add_size
;
918 /* If still active, don't count as purgeable: */
919 s
&= ~DRM_GEM_OBJECT_PURGEABLE
;
922 if (s
& DRM_GEM_OBJECT_PURGEABLE
)
923 status
.purgeable
+= add_size
;
925 spin_unlock(&file
->table_lock
);
927 drm_print_memory_stats(p
, &status
, supported_status
, "memory");
929 EXPORT_SYMBOL(drm_show_memory_stats
);
932 * drm_show_fdinfo - helper for drm file fops
934 * @f: the device file instance
936 * Helper to implement fdinfo, for userspace to query usage stats, etc, of a
937 * process using the GPU. See also &drm_driver.show_fdinfo.
939 * For text output format description please see Documentation/gpu/drm-usage-stats.rst
941 void drm_show_fdinfo(struct seq_file
*m
, struct file
*f
)
943 struct drm_file
*file
= f
->private_data
;
944 struct drm_device
*dev
= file
->minor
->dev
;
945 struct drm_printer p
= drm_seq_file_printer(m
);
947 drm_printf(&p
, "drm-driver:\t%s\n", dev
->driver
->name
);
948 drm_printf(&p
, "drm-client-id:\t%llu\n", file
->client_id
);
950 if (dev_is_pci(dev
->dev
)) {
951 struct pci_dev
*pdev
= to_pci_dev(dev
->dev
);
953 drm_printf(&p
, "drm-pdev:\t%04x:%02x:%02x.%d\n",
954 pci_domain_nr(pdev
->bus
), pdev
->bus
->number
,
955 PCI_SLOT(pdev
->devfn
), PCI_FUNC(pdev
->devfn
));
958 mutex_lock(&file
->client_name_lock
);
959 if (file
->client_name
)
960 drm_printf(&p
, "drm-client-name:\t%s\n", file
->client_name
);
961 mutex_unlock(&file
->client_name_lock
);
963 if (dev
->driver
->show_fdinfo
)
964 dev
->driver
->show_fdinfo(&p
, file
);
966 EXPORT_SYMBOL(drm_show_fdinfo
);
969 * mock_drm_getfile - Create a new struct file for the drm device
970 * @minor: drm minor to wrap (e.g. #drm_device.primary)
971 * @flags: file creation mode (O_RDWR etc)
973 * This create a new struct file that wraps a DRM file context around a
974 * DRM minor. This mimicks userspace opening e.g. /dev/dri/card0, but without
975 * invoking userspace. The struct file may be operated on using its f_op
976 * (the drm_device.driver.fops) to mimick userspace operations, or be supplied
977 * to userspace facing functions as an internal/anonymous client.
980 * Pointer to newly created struct file, ERR_PTR on failure.
982 struct file
*mock_drm_getfile(struct drm_minor
*minor
, unsigned int flags
)
984 struct drm_device
*dev
= minor
->dev
;
985 struct drm_file
*priv
;
988 priv
= drm_file_alloc(minor
);
990 return ERR_CAST(priv
);
992 file
= anon_inode_getfile("drm", dev
->driver
->fops
, priv
, flags
);
998 /* Everyone shares a single global address space */
999 file
->f_mapping
= dev
->anon_inode
->i_mapping
;
1006 EXPORT_SYMBOL_FOR_TESTS_ONLY(mock_drm_getfile
);