1 /* drm_context.h -- IOCTLs for generic contexts -*- linux-c -*-
2 * Created: Fri Nov 24 18:31:37 2000 by gareth@valinux.com
4 * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
5 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
25 * OTHER DEALINGS IN THE SOFTWARE.
28 * Rickard E. (Rik) Faith <faith@valinux.com>
29 * Gareth Hughes <gareth@valinux.com>
31 * 2001-11-16 Torsten Duwe <duwe@caldera.de>
32 * added context constructor/destructor hooks,
33 * needed by SiS driver's memory management.
36 /* ================================================================
37 * Old-style context support -- only used by gamma.
41 /* The drm_read and drm_write_string code (especially that which manages
42 the circular buffer), is based on Alessandro Rubini's LINUX DEVICE
43 DRIVERS (Cambridge: O'Reilly, 1998), pages 111-113. */
45 ssize_t
gamma_fops_read(struct file
*filp
, char __user
*buf
, size_t count
, loff_t
*off
)
47 drm_file_t
*priv
= filp
->private_data
;
48 drm_device_t
*dev
= priv
->dev
;
54 DRM_DEBUG("%p, %p\n", dev
->buf_rp
, dev
->buf_wp
);
56 while (dev
->buf_rp
== dev
->buf_wp
) {
57 DRM_DEBUG(" sleeping\n");
58 if (filp
->f_flags
& O_NONBLOCK
) {
61 interruptible_sleep_on(&dev
->buf_readers
);
62 if (signal_pending(current
)) {
63 DRM_DEBUG(" interrupted\n");
66 DRM_DEBUG(" awake\n");
69 left
= (dev
->buf_rp
+ DRM_BSZ
- dev
->buf_wp
) % DRM_BSZ
;
70 avail
= DRM_BSZ
- left
;
71 send
= DRM_MIN(avail
, count
);
74 if (dev
->buf_wp
> dev
->buf_rp
) {
75 cur
= DRM_MIN(send
, dev
->buf_wp
- dev
->buf_rp
);
77 cur
= DRM_MIN(send
, dev
->buf_end
- dev
->buf_rp
);
79 if (copy_to_user(buf
, dev
->buf_rp
, cur
))
82 if (dev
->buf_rp
== dev
->buf_end
) dev
->buf_rp
= dev
->buf
;
86 wake_up_interruptible(&dev
->buf_writers
);
87 return DRM_MIN(avail
, count
);
91 /* In an incredibly convoluted setup, the kernel module actually calls
92 * back into the X server to perform context switches on behalf of the
95 int DRM(write_string
)(drm_device_t
*dev
, const char *s
)
97 int left
= (dev
->buf_rp
+ DRM_BSZ
- dev
->buf_wp
) % DRM_BSZ
;
101 DRM_DEBUG("%d left, %d to send (%p, %p)\n",
102 left
, send
, dev
->buf_rp
, dev
->buf_wp
);
104 if (left
== 1 || dev
->buf_wp
!= dev
->buf_rp
) {
105 DRM_ERROR("Buffer not empty (%d left, wp = %p, rp = %p)\n",
112 if (dev
->buf_wp
>= dev
->buf_rp
) {
113 count
= DRM_MIN(send
, dev
->buf_end
- dev
->buf_wp
);
114 if (count
== left
) --count
; /* Leave a hole */
116 count
= DRM_MIN(send
, dev
->buf_rp
- dev
->buf_wp
- 1);
118 strncpy(dev
->buf_wp
, s
, count
);
119 dev
->buf_wp
+= count
;
120 if (dev
->buf_wp
== dev
->buf_end
) dev
->buf_wp
= dev
->buf
;
124 if (dev
->buf_async
) kill_fasync(&dev
->buf_async
, SIGIO
, POLL_IN
);
126 DRM_DEBUG("waking\n");
127 wake_up_interruptible(&dev
->buf_readers
);
131 unsigned int gamma_fops_poll(struct file
*filp
, struct poll_table_struct
*wait
)
133 drm_file_t
*priv
= filp
->private_data
;
134 drm_device_t
*dev
= priv
->dev
;
136 poll_wait(filp
, &dev
->buf_readers
, wait
);
137 if (dev
->buf_wp
!= dev
->buf_rp
) return POLLIN
| POLLRDNORM
;
141 int DRM(context_switch
)(drm_device_t
*dev
, int old
, int new)
146 if (test_and_set_bit(0, &dev
->context_flag
)) {
147 DRM_ERROR("Reentering -- FIXME\n");
151 DRM_DEBUG("Context switch from %d to %d\n", old
, new);
153 if (new >= dev
->queue_count
) {
154 clear_bit(0, &dev
->context_flag
);
158 if (new == dev
->last_context
) {
159 clear_bit(0, &dev
->context_flag
);
163 q
= dev
->queuelist
[new];
164 atomic_inc(&q
->use_count
);
165 if (atomic_read(&q
->use_count
) == 1) {
166 atomic_dec(&q
->use_count
);
167 clear_bit(0, &dev
->context_flag
);
171 /* This causes the X server to wake up & do a bunch of hardware
172 * interaction to actually effect the context switch.
174 sprintf(buf
, "C %d %d\n", old
, new);
175 DRM(write_string
)(dev
, buf
);
177 atomic_dec(&q
->use_count
);
182 int DRM(context_switch_complete
)(drm_device_t
*dev
, int new)
184 drm_device_dma_t
*dma
= dev
->dma
;
186 dev
->last_context
= new; /* PRE/POST: This is the _only_ writer. */
187 dev
->last_switch
= jiffies
;
189 if (!_DRM_LOCK_IS_HELD(dev
->lock
.hw_lock
->lock
)) {
190 DRM_ERROR("Lock isn't held after context switch\n");
193 if (!dma
|| !(dma
->next_buffer
&& dma
->next_buffer
->while_locked
)) {
194 if (DRM(lock_free
)(dev
, &dev
->lock
.hw_lock
->lock
,
195 DRM_KERNEL_CONTEXT
)) {
196 DRM_ERROR("Cannot free lock\n");
200 clear_bit(0, &dev
->context_flag
);
201 wake_up_interruptible(&dev
->context_wait
);
206 static int DRM(init_queue
)(drm_device_t
*dev
, drm_queue_t
*q
, drm_ctx_t
*ctx
)
210 if (atomic_read(&q
->use_count
) != 1
211 || atomic_read(&q
->finalization
)
212 || atomic_read(&q
->block_count
)) {
213 DRM_ERROR("New queue is already in use: u%d f%d b%d\n",
214 atomic_read(&q
->use_count
),
215 atomic_read(&q
->finalization
),
216 atomic_read(&q
->block_count
));
219 atomic_set(&q
->finalization
, 0);
220 atomic_set(&q
->block_count
, 0);
221 atomic_set(&q
->block_read
, 0);
222 atomic_set(&q
->block_write
, 0);
223 atomic_set(&q
->total_queued
, 0);
224 atomic_set(&q
->total_flushed
, 0);
225 atomic_set(&q
->total_locks
, 0);
227 init_waitqueue_head(&q
->write_queue
);
228 init_waitqueue_head(&q
->read_queue
);
229 init_waitqueue_head(&q
->flush_queue
);
231 q
->flags
= ctx
->flags
;
233 DRM(waitlist_create
)(&q
->waitlist
, dev
->dma
->buf_count
);
240 PRE: 1) dev->queuelist[0..dev->queue_count] is allocated and will not
241 disappear (so all deallocation must be done after IOCTLs are off)
242 2) dev->queue_count < dev->queue_slots
243 3) dev->queuelist[i].use_count == 0 and
244 dev->queuelist[i].finalization == 0 if i not in use
245 POST: 1) dev->queuelist[i].use_count == 1
246 2) dev->queue_count < dev->queue_slots */
248 static int DRM(alloc_queue
)(drm_device_t
*dev
)
254 /* Check for a free queue */
255 for (i
= 0; i
< dev
->queue_count
; i
++) {
256 atomic_inc(&dev
->queuelist
[i
]->use_count
);
257 if (atomic_read(&dev
->queuelist
[i
]->use_count
) == 1
258 && !atomic_read(&dev
->queuelist
[i
]->finalization
)) {
259 DRM_DEBUG("%d (free)\n", i
);
262 atomic_dec(&dev
->queuelist
[i
]->use_count
);
264 /* Allocate a new queue */
265 down(&dev
->struct_sem
);
267 queue
= DRM(alloc
)(sizeof(*queue
), DRM_MEM_QUEUES
);
268 memset(queue
, 0, sizeof(*queue
));
269 atomic_set(&queue
->use_count
, 1);
272 if (dev
->queue_count
>= dev
->queue_slots
) {
273 oldslots
= dev
->queue_slots
* sizeof(*dev
->queuelist
);
274 if (!dev
->queue_slots
) dev
->queue_slots
= 1;
275 dev
->queue_slots
*= 2;
276 newslots
= dev
->queue_slots
* sizeof(*dev
->queuelist
);
278 dev
->queuelist
= DRM(realloc
)(dev
->queuelist
,
282 if (!dev
->queuelist
) {
283 up(&dev
->struct_sem
);
284 DRM_DEBUG("out of memory\n");
288 dev
->queuelist
[dev
->queue_count
-1] = queue
;
290 up(&dev
->struct_sem
);
291 DRM_DEBUG("%d (new)\n", dev
->queue_count
- 1);
292 return dev
->queue_count
- 1;
295 int DRM(resctx
)(struct inode
*inode
, struct file
*filp
,
296 unsigned int cmd
, unsigned long arg
)
298 drm_ctx_res_t __user
*argp
= (void __user
*)arg
;
303 DRM_DEBUG("%d\n", DRM_RESERVED_CONTEXTS
);
304 if (copy_from_user(&res
, argp
, sizeof(res
)))
306 if (res
.count
>= DRM_RESERVED_CONTEXTS
) {
307 memset(&ctx
, 0, sizeof(ctx
));
308 for (i
= 0; i
< DRM_RESERVED_CONTEXTS
; i
++) {
310 if (copy_to_user(&res
.contexts
[i
],
316 res
.count
= DRM_RESERVED_CONTEXTS
;
317 if (copy_to_user(argp
, &res
, sizeof(res
)))
322 int DRM(addctx
)(struct inode
*inode
, struct file
*filp
,
323 unsigned int cmd
, unsigned long arg
)
325 drm_file_t
*priv
= filp
->private_data
;
326 drm_device_t
*dev
= priv
->dev
;
328 drm_ctx_t __user
*argp
= (void __user
*)arg
;
330 if (copy_from_user(&ctx
, argp
, sizeof(ctx
)))
332 if ((ctx
.handle
= DRM(alloc_queue
)(dev
)) == DRM_KERNEL_CONTEXT
) {
333 /* Init kernel's context and get a new one. */
334 DRM(init_queue
)(dev
, dev
->queuelist
[ctx
.handle
], &ctx
);
335 ctx
.handle
= DRM(alloc_queue
)(dev
);
337 DRM(init_queue
)(dev
, dev
->queuelist
[ctx
.handle
], &ctx
);
338 DRM_DEBUG("%d\n", ctx
.handle
);
339 if (copy_to_user(argp
, &ctx
, sizeof(ctx
)))
344 int DRM(modctx
)(struct inode
*inode
, struct file
*filp
,
345 unsigned int cmd
, unsigned long arg
)
347 drm_file_t
*priv
= filp
->private_data
;
348 drm_device_t
*dev
= priv
->dev
;
352 if (copy_from_user(&ctx
, (drm_ctx_t __user
*)arg
, sizeof(ctx
)))
355 DRM_DEBUG("%d\n", ctx
.handle
);
357 if (ctx
.handle
< 0 || ctx
.handle
>= dev
->queue_count
) return -EINVAL
;
358 q
= dev
->queuelist
[ctx
.handle
];
360 atomic_inc(&q
->use_count
);
361 if (atomic_read(&q
->use_count
) == 1) {
362 /* No longer in use */
363 atomic_dec(&q
->use_count
);
367 if (DRM_BUFCOUNT(&q
->waitlist
)) {
368 atomic_dec(&q
->use_count
);
372 q
->flags
= ctx
.flags
;
374 atomic_dec(&q
->use_count
);
378 int DRM(getctx
)(struct inode
*inode
, struct file
*filp
,
379 unsigned int cmd
, unsigned long arg
)
381 drm_file_t
*priv
= filp
->private_data
;
382 drm_device_t
*dev
= priv
->dev
;
383 drm_ctx_t __user
*argp
= (void __user
*)arg
;
387 if (copy_from_user(&ctx
, argp
, sizeof(ctx
)))
390 DRM_DEBUG("%d\n", ctx
.handle
);
392 if (ctx
.handle
>= dev
->queue_count
) return -EINVAL
;
393 q
= dev
->queuelist
[ctx
.handle
];
395 atomic_inc(&q
->use_count
);
396 if (atomic_read(&q
->use_count
) == 1) {
397 /* No longer in use */
398 atomic_dec(&q
->use_count
);
402 ctx
.flags
= q
->flags
;
403 atomic_dec(&q
->use_count
);
405 if (copy_to_user(argp
, &ctx
, sizeof(ctx
)))
411 int DRM(switchctx
)(struct inode
*inode
, struct file
*filp
,
412 unsigned int cmd
, unsigned long arg
)
414 drm_file_t
*priv
= filp
->private_data
;
415 drm_device_t
*dev
= priv
->dev
;
418 if (copy_from_user(&ctx
, (drm_ctx_t __user
*)arg
, sizeof(ctx
)))
420 DRM_DEBUG("%d\n", ctx
.handle
);
421 return DRM(context_switch
)(dev
, dev
->last_context
, ctx
.handle
);
424 int DRM(newctx
)(struct inode
*inode
, struct file
*filp
,
425 unsigned int cmd
, unsigned long arg
)
427 drm_file_t
*priv
= filp
->private_data
;
428 drm_device_t
*dev
= priv
->dev
;
431 if (copy_from_user(&ctx
, (drm_ctx_t __user
*)arg
, sizeof(ctx
)))
433 DRM_DEBUG("%d\n", ctx
.handle
);
434 DRM(context_switch_complete
)(dev
, ctx
.handle
);
439 int DRM(rmctx
)(struct inode
*inode
, struct file
*filp
,
440 unsigned int cmd
, unsigned long arg
)
442 drm_file_t
*priv
= filp
->private_data
;
443 drm_device_t
*dev
= priv
->dev
;
448 if (copy_from_user(&ctx
, (drm_ctx_t __user
*)arg
, sizeof(ctx
)))
450 DRM_DEBUG("%d\n", ctx
.handle
);
452 if (ctx
.handle
>= dev
->queue_count
) return -EINVAL
;
453 q
= dev
->queuelist
[ctx
.handle
];
455 atomic_inc(&q
->use_count
);
456 if (atomic_read(&q
->use_count
) == 1) {
457 /* No longer in use */
458 atomic_dec(&q
->use_count
);
462 atomic_inc(&q
->finalization
); /* Mark queue in finalization state */
463 atomic_sub(2, &q
->use_count
); /* Mark queue as unused (pending
466 while (test_and_set_bit(0, &dev
->interrupt_flag
)) {
468 if (signal_pending(current
)) {
469 clear_bit(0, &dev
->interrupt_flag
);
473 /* Remove queued buffers */
474 while ((buf
= DRM(waitlist_get
)(&q
->waitlist
))) {
475 DRM(free_buffer
)(dev
, buf
);
477 clear_bit(0, &dev
->interrupt_flag
);
479 /* Wakeup blocked processes */
480 wake_up_interruptible(&q
->read_queue
);
481 wake_up_interruptible(&q
->write_queue
);
482 wake_up_interruptible(&q
->flush_queue
);
484 /* Finalization over. Queue is made
485 available when both use_count and
486 finalization become 0, which won't
487 happen until all the waiting processes
489 atomic_dec(&q
->finalization
);