3 * File operations for DRM
5 * \author Rickard E. (Rik) Faith <faith@valinux.com>
6 * \author Daryll Strauss <daryll@valinux.com>
7 * \author Gareth Hughes <gareth@valinux.com>
11 * Created: Mon Jan 4 08:58:31 1999 by faith@valinux.com
13 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
14 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
15 * All Rights Reserved.
17 * Permission is hereby granted, free of charge, to any person obtaining a
18 * copy of this software and associated documentation files (the "Software"),
19 * to deal in the Software without restriction, including without limitation
20 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
21 * and/or sell copies of the Software, and to permit persons to whom the
22 * Software is furnished to do so, subject to the following conditions:
24 * The above copyright notice and this permission notice (including the next
25 * paragraph) shall be included in all copies or substantial portions of the
28 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
29 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
30 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
31 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
32 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
33 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
34 * OTHER DEALINGS IN THE SOFTWARE.
38 #include "drm_sarea.h"
39 #include <linux/poll.h>
41 static int drm_open_helper(struct inode
*inode
, struct file
*filp
,
44 static int drm_setup(drm_device_t
* dev
)
50 if (dev
->driver
->firstopen
) {
51 ret
= dev
->driver
->firstopen(dev
);
56 dev
->magicfree
.next
= NULL
;
58 /* prebuild the SAREA */
59 i
= drm_addmap(dev
, 0, SAREA_MAX
, _DRM_SHM
, _DRM_CONTAINS_LOCK
, &map
);
63 atomic_set(&dev
->ioctl_count
, 0);
64 atomic_set(&dev
->vma_count
, 0);
66 atomic_set(&dev
->buf_alloc
, 0);
68 if (drm_core_check_feature(dev
, DRIVER_HAVE_DMA
)) {
69 i
= drm_dma_setup(dev
);
74 for (i
= 0; i
< ARRAY_SIZE(dev
->counts
); i
++)
75 atomic_set(&dev
->counts
[i
], 0);
77 drm_ht_create(&dev
->magiclist
, DRM_MAGIC_HASH_ORDER
);
78 INIT_LIST_HEAD(&dev
->magicfree
);
80 dev
->ctxlist
= drm_alloc(sizeof(*dev
->ctxlist
), DRM_MEM_CTXLIST
);
81 if (dev
->ctxlist
== NULL
)
83 memset(dev
->ctxlist
, 0, sizeof(*dev
->ctxlist
));
84 INIT_LIST_HEAD(&dev
->ctxlist
->head
);
87 dev
->sigdata
.lock
= dev
->lock
.hw_lock
= NULL
;
88 init_waitqueue_head(&dev
->lock
.lock_queue
);
90 dev
->queue_reserved
= 0;
92 dev
->queuelist
= NULL
;
94 dev
->context_flag
= 0;
95 dev
->interrupt_flag
= 0;
97 dev
->last_context
= 0;
99 dev
->last_checked
= 0;
100 init_waitqueue_head(&dev
->context_wait
);
106 dev
->buf_async
= NULL
;
107 init_waitqueue_head(&dev
->buf_readers
);
108 init_waitqueue_head(&dev
->buf_writers
);
113 * The kernel's context could be created here, but is now created
114 * in drm_dma_enqueue. This is more resource-efficient for
115 * hardware that does not do DMA, but may mean that
116 * drm_select_queue fails between the time the interrupt is
117 * initialized and the time the queues are initialized.
126 * \param inode device inode
127 * \param filp file pointer.
128 * \return zero on success or a negative number on failure.
130 * Searches the DRM device with the same minor number, calls open_helper(), and
131 * increments the device open count. If the open count was previous at zero,
132 * i.e., it's the first that the device is open, then calls setup().
134 int drm_open(struct inode
*inode
, struct file
*filp
)
136 drm_device_t
*dev
= NULL
;
137 int minor
= iminor(inode
);
140 if (!((minor
>= 0) && (minor
< drm_cards_limit
)))
143 if (!drm_heads
[minor
])
146 if (!(dev
= drm_heads
[minor
]->dev
))
149 retcode
= drm_open_helper(inode
, filp
, dev
);
151 atomic_inc(&dev
->counts
[_DRM_STAT_OPENS
]);
152 spin_lock(&dev
->count_lock
);
153 if (!dev
->open_count
++) {
154 spin_unlock(&dev
->count_lock
);
155 return drm_setup(dev
);
157 spin_unlock(&dev
->count_lock
);
162 EXPORT_SYMBOL(drm_open
);
165 * File \c open operation.
167 * \param inode device inode.
168 * \param filp file pointer.
170 * Puts the dev->fops corresponding to the device minor number into
171 * \p filp, call the \c open method, and restore the file operations.
173 int drm_stub_open(struct inode
*inode
, struct file
*filp
)
175 drm_device_t
*dev
= NULL
;
176 int minor
= iminor(inode
);
178 const struct file_operations
*old_fops
;
182 if (!((minor
>= 0) && (minor
< drm_cards_limit
)))
185 if (!drm_heads
[minor
])
188 if (!(dev
= drm_heads
[minor
]->dev
))
191 old_fops
= filp
->f_op
;
192 filp
->f_op
= fops_get(&dev
->driver
->fops
);
193 if (filp
->f_op
->open
&& (err
= filp
->f_op
->open(inode
, filp
))) {
194 fops_put(filp
->f_op
);
195 filp
->f_op
= fops_get(old_fops
);
203 * Check whether DRI will run on this CPU.
205 * \return non-zero if the DRI will run on this CPU, or zero otherwise.
207 static int drm_cpu_valid(void)
209 #if defined(__i386__)
210 if (boot_cpu_data
.x86
== 3)
211 return 0; /* No cmpxchg on a 386 */
213 #if defined(__sparc__) && !defined(__sparc_v9__)
214 return 0; /* No cmpxchg before v9 sparc. */
220 * Called whenever a process opens /dev/drm.
222 * \param inode device inode.
223 * \param filp file pointer.
225 * \return zero on success or a negative number on failure.
227 * Creates and initializes a drm_file structure for the file private data in \p
228 * filp and add it into the double linked list in \p dev.
230 static int drm_open_helper(struct inode
*inode
, struct file
*filp
,
233 int minor
= iminor(inode
);
237 if (filp
->f_flags
& O_EXCL
)
238 return -EBUSY
; /* No exclusive opens */
239 if (!drm_cpu_valid())
242 DRM_DEBUG("pid = %d, minor = %d\n", current
->pid
, minor
);
244 priv
= drm_alloc(sizeof(*priv
), DRM_MEM_FILES
);
248 memset(priv
, 0, sizeof(*priv
));
249 filp
->private_data
= priv
;
250 priv
->uid
= current
->euid
;
251 priv
->pid
= current
->pid
;
253 priv
->head
= drm_heads
[minor
];
254 priv
->ioctl_count
= 0;
255 /* for compatibility root is always authenticated */
256 priv
->authenticated
= capable(CAP_SYS_ADMIN
);
257 priv
->lock_count
= 0;
259 if (dev
->driver
->open
) {
260 ret
= dev
->driver
->open(dev
, priv
);
265 mutex_lock(&dev
->struct_mutex
);
266 if (!dev
->file_last
) {
269 dev
->file_first
= priv
;
270 dev
->file_last
= priv
;
271 /* first opener automatically becomes master */
275 priv
->prev
= dev
->file_last
;
276 dev
->file_last
->next
= priv
;
277 dev
->file_last
= priv
;
279 mutex_unlock(&dev
->struct_mutex
);
286 struct pci_dev
*pci_dev
;
287 pci_dev
= pci_get_class(PCI_CLASS_DISPLAY_VGA
<< 8, NULL
);
289 dev
->hose
= pci_dev
->sysdata
;
290 pci_dev_put(pci_dev
);
293 struct pci_bus
*b
= pci_bus_b(pci_root_buses
.next
);
295 dev
->hose
= b
->sysdata
;
302 drm_free(priv
, sizeof(*priv
), DRM_MEM_FILES
);
303 filp
->private_data
= NULL
;
308 int drm_fasync(int fd
, struct file
*filp
, int on
)
310 drm_file_t
*priv
= filp
->private_data
;
311 drm_device_t
*dev
= priv
->head
->dev
;
314 DRM_DEBUG("fd = %d, device = 0x%lx\n", fd
,
315 (long)old_encode_dev(priv
->head
->device
));
316 retcode
= fasync_helper(fd
, filp
, on
, &dev
->buf_async
);
321 EXPORT_SYMBOL(drm_fasync
);
326 * \param inode device inode
327 * \param filp file pointer.
328 * \return zero on success or a negative number on failure.
330 * If the hardware lock is held then free it, and take it again for the kernel
331 * context since it's necessary to reclaim buffers. Unlink the file private
332 * data from its list and free it. Decreases the open count and if it reaches
333 * zero calls drm_lastclose().
335 int drm_release(struct inode
*inode
, struct file
*filp
)
337 drm_file_t
*priv
= filp
->private_data
;
342 dev
= priv
->head
->dev
;
344 DRM_DEBUG("open_count = %d\n", dev
->open_count
);
346 if (dev
->driver
->preclose
)
347 dev
->driver
->preclose(dev
, filp
);
349 /* ========================================================
350 * Begin inline drm_release
353 DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
354 current
->pid
, (long)old_encode_dev(priv
->head
->device
),
357 if (priv
->lock_count
&& dev
->lock
.hw_lock
&&
358 _DRM_LOCK_IS_HELD(dev
->lock
.hw_lock
->lock
) &&
359 dev
->lock
.filp
== filp
) {
360 DRM_DEBUG("File %p released, freeing lock for context %d\n",
361 filp
, _DRM_LOCKING_CONTEXT(dev
->lock
.hw_lock
->lock
));
363 if (dev
->driver
->reclaim_buffers_locked
)
364 dev
->driver
->reclaim_buffers_locked(dev
, filp
);
366 drm_lock_free(dev
, &dev
->lock
.hw_lock
->lock
,
367 _DRM_LOCKING_CONTEXT(dev
->lock
.hw_lock
->lock
));
369 /* FIXME: may require heavy-handed reset of
370 hardware at this point, possibly
371 processed via a callback to the X
373 } else if (dev
->driver
->reclaim_buffers_locked
&& priv
->lock_count
374 && dev
->lock
.hw_lock
) {
375 /* The lock is required to reclaim buffers */
376 DECLARE_WAITQUEUE(entry
, current
);
378 add_wait_queue(&dev
->lock
.lock_queue
, &entry
);
380 __set_current_state(TASK_INTERRUPTIBLE
);
381 if (!dev
->lock
.hw_lock
) {
382 /* Device has been unregistered */
386 if (drm_lock_take(&dev
->lock
.hw_lock
->lock
,
387 DRM_KERNEL_CONTEXT
)) {
388 dev
->lock
.filp
= filp
;
389 dev
->lock
.lock_time
= jiffies
;
390 atomic_inc(&dev
->counts
[_DRM_STAT_LOCKS
]);
391 break; /* Got lock */
395 if (signal_pending(current
)) {
396 retcode
= -ERESTARTSYS
;
400 __set_current_state(TASK_RUNNING
);
401 remove_wait_queue(&dev
->lock
.lock_queue
, &entry
);
403 dev
->driver
->reclaim_buffers_locked(dev
, filp
);
404 drm_lock_free(dev
, &dev
->lock
.hw_lock
->lock
,
409 if (drm_core_check_feature(dev
, DRIVER_HAVE_DMA
) &&
410 !dev
->driver
->reclaim_buffers_locked
) {
411 dev
->driver
->reclaim_buffers(dev
, filp
);
414 drm_fasync(-1, filp
, 0);
416 mutex_lock(&dev
->ctxlist_mutex
);
417 if (dev
->ctxlist
&& (!list_empty(&dev
->ctxlist
->head
))) {
418 drm_ctx_list_t
*pos
, *n
;
420 list_for_each_entry_safe(pos
, n
, &dev
->ctxlist
->head
, head
) {
421 if (pos
->tag
== priv
&&
422 pos
->handle
!= DRM_KERNEL_CONTEXT
) {
423 if (dev
->driver
->context_dtor
)
424 dev
->driver
->context_dtor(dev
,
427 drm_ctxbitmap_free(dev
, pos
->handle
);
429 list_del(&pos
->head
);
430 drm_free(pos
, sizeof(*pos
), DRM_MEM_CTXLIST
);
435 mutex_unlock(&dev
->ctxlist_mutex
);
437 mutex_lock(&dev
->struct_mutex
);
438 if (priv
->remove_auth_on_close
== 1) {
439 drm_file_t
*temp
= dev
->file_first
;
441 temp
->authenticated
= 0;
446 priv
->prev
->next
= priv
->next
;
448 dev
->file_first
= priv
->next
;
451 priv
->next
->prev
= priv
->prev
;
453 dev
->file_last
= priv
->prev
;
455 mutex_unlock(&dev
->struct_mutex
);
457 if (dev
->driver
->postclose
)
458 dev
->driver
->postclose(dev
, priv
);
459 drm_free(priv
, sizeof(*priv
), DRM_MEM_FILES
);
461 /* ========================================================
462 * End inline drm_release
465 atomic_inc(&dev
->counts
[_DRM_STAT_CLOSES
]);
466 spin_lock(&dev
->count_lock
);
467 if (!--dev
->open_count
) {
468 if (atomic_read(&dev
->ioctl_count
) || dev
->blocked
) {
469 DRM_ERROR("Device busy: %d %d\n",
470 atomic_read(&dev
->ioctl_count
), dev
->blocked
);
471 spin_unlock(&dev
->count_lock
);
475 spin_unlock(&dev
->count_lock
);
477 return drm_lastclose(dev
);
479 spin_unlock(&dev
->count_lock
);
485 EXPORT_SYMBOL(drm_release
);
488 unsigned int drm_poll(struct file
*filp
, struct poll_table_struct
*wait
)
492 EXPORT_SYMBOL(drm_poll
);