Linux 4.19.133
[linux/fpc-iii.git] / drivers / gpu / drm / vmwgfx / vmwgfx_cmdbuf.c
blobce1ad7cd78996cf4fe652d76a259562b4c04f549
1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /**************************************************************************
4 * Copyright 2015 VMware, Inc., Palo Alto, CA., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 #include <drm/ttm/ttm_bo_api.h>
30 #include "vmwgfx_drv.h"
33 * Size of inline command buffers. Try to make sure that a page size is a
34 * multiple of the DMA pool allocation size.
36 #define VMW_CMDBUF_INLINE_ALIGN 64
37 #define VMW_CMDBUF_INLINE_SIZE \
38 (1024 - ALIGN(sizeof(SVGACBHeader), VMW_CMDBUF_INLINE_ALIGN))
40 /**
41 * struct vmw_cmdbuf_context - Command buffer context queues
43 * @submitted: List of command buffers that have been submitted to the
44 * manager but not yet submitted to hardware.
45 * @hw_submitted: List of command buffers submitted to hardware.
46 * @preempted: List of preempted command buffers.
47 * @num_hw_submitted: Number of buffers currently being processed by hardware
49 struct vmw_cmdbuf_context {
50 struct list_head submitted;
51 struct list_head hw_submitted;
52 struct list_head preempted;
53 unsigned num_hw_submitted;
54 bool block_submission;
57 /**
58 * struct vmw_cmdbuf_man: - Command buffer manager
60 * @cur_mutex: Mutex protecting the command buffer used for incremental small
61 * kernel command submissions, @cur.
62 * @space_mutex: Mutex to protect against starvation when we allocate
63 * main pool buffer space.
64 * @error_mutex: Mutex to serialize the work queue error handling.
65 * Note this is not needed if the same workqueue handler
66 * can't race with itself...
67 * @work: A struct work_struct implementeing command buffer error handling.
68 * Immutable.
69 * @dev_priv: Pointer to the device private struct. Immutable.
70 * @ctx: Array of command buffer context queues. The queues and the context
71 * data is protected by @lock.
72 * @error: List of command buffers that have caused device errors.
73 * Protected by @lock.
74 * @mm: Range manager for the command buffer space. Manager allocations and
75 * frees are protected by @lock.
76 * @cmd_space: Buffer object for the command buffer space, unless we were
77 * able to make a contigous coherent DMA memory allocation, @handle. Immutable.
78 * @map_obj: Mapping state for @cmd_space. Immutable.
79 * @map: Pointer to command buffer space. May be a mapped buffer object or
80 * a contigous coherent DMA memory allocation. Immutable.
81 * @cur: Command buffer for small kernel command submissions. Protected by
82 * the @cur_mutex.
83 * @cur_pos: Space already used in @cur. Protected by @cur_mutex.
84 * @default_size: Default size for the @cur command buffer. Immutable.
85 * @max_hw_submitted: Max number of in-flight command buffers the device can
86 * handle. Immutable.
87 * @lock: Spinlock protecting command submission queues.
88 * @header: Pool of DMA memory for device command buffer headers.
89 * Internal protection.
90 * @dheaders: Pool of DMA memory for device command buffer headers with trailing
91 * space for inline data. Internal protection.
92 * @alloc_queue: Wait queue for processes waiting to allocate command buffer
93 * space.
94 * @idle_queue: Wait queue for processes waiting for command buffer idle.
95 * @irq_on: Whether the process function has requested irq to be turned on.
96 * Protected by @lock.
97 * @using_mob: Whether the command buffer space is a MOB or a contigous DMA
98 * allocation. Immutable.
99 * @has_pool: Has a large pool of DMA memory which allows larger allocations.
100 * Typically this is false only during bootstrap.
101 * @handle: DMA address handle for the command buffer space if @using_mob is
102 * false. Immutable.
103 * @size: The size of the command buffer space. Immutable.
104 * @num_contexts: Number of contexts actually enabled.
106 struct vmw_cmdbuf_man {
107 struct mutex cur_mutex;
108 struct mutex space_mutex;
109 struct mutex error_mutex;
110 struct work_struct work;
111 struct vmw_private *dev_priv;
112 struct vmw_cmdbuf_context ctx[SVGA_CB_CONTEXT_MAX];
113 struct list_head error;
114 struct drm_mm mm;
115 struct ttm_buffer_object *cmd_space;
116 struct ttm_bo_kmap_obj map_obj;
117 u8 *map;
118 struct vmw_cmdbuf_header *cur;
119 size_t cur_pos;
120 size_t default_size;
121 unsigned max_hw_submitted;
122 spinlock_t lock;
123 struct dma_pool *headers;
124 struct dma_pool *dheaders;
125 wait_queue_head_t alloc_queue;
126 wait_queue_head_t idle_queue;
127 bool irq_on;
128 bool using_mob;
129 bool has_pool;
130 dma_addr_t handle;
131 size_t size;
132 u32 num_contexts;
136 * struct vmw_cmdbuf_header - Command buffer metadata
138 * @man: The command buffer manager.
139 * @cb_header: Device command buffer header, allocated from a DMA pool.
140 * @cb_context: The device command buffer context.
141 * @list: List head for attaching to the manager lists.
142 * @node: The range manager node.
143 * @handle. The DMA address of @cb_header. Handed to the device on command
144 * buffer submission.
145 * @cmd: Pointer to the command buffer space of this buffer.
146 * @size: Size of the command buffer space of this buffer.
147 * @reserved: Reserved space of this buffer.
148 * @inline_space: Whether inline command buffer space is used.
150 struct vmw_cmdbuf_header {
151 struct vmw_cmdbuf_man *man;
152 SVGACBHeader *cb_header;
153 SVGACBContext cb_context;
154 struct list_head list;
155 struct drm_mm_node node;
156 dma_addr_t handle;
157 u8 *cmd;
158 size_t size;
159 size_t reserved;
160 bool inline_space;
164 * struct vmw_cmdbuf_dheader - Device command buffer header with inline
165 * command buffer space.
167 * @cb_header: Device command buffer header.
168 * @cmd: Inline command buffer space.
170 struct vmw_cmdbuf_dheader {
171 SVGACBHeader cb_header;
172 u8 cmd[VMW_CMDBUF_INLINE_SIZE] __aligned(VMW_CMDBUF_INLINE_ALIGN);
176 * struct vmw_cmdbuf_alloc_info - Command buffer space allocation metadata
178 * @page_size: Size of requested command buffer space in pages.
179 * @node: Pointer to the range manager node.
180 * @done: True if this allocation has succeeded.
182 struct vmw_cmdbuf_alloc_info {
183 size_t page_size;
184 struct drm_mm_node *node;
185 bool done;
188 /* Loop over each context in the command buffer manager. */
189 #define for_each_cmdbuf_ctx(_man, _i, _ctx) \
190 for (_i = 0, _ctx = &(_man)->ctx[0]; (_i) < (_man)->num_contexts; \
191 ++(_i), ++(_ctx))
193 static int vmw_cmdbuf_startstop(struct vmw_cmdbuf_man *man, u32 context,
194 bool enable);
195 static int vmw_cmdbuf_preempt(struct vmw_cmdbuf_man *man, u32 context);
198 * vmw_cmdbuf_cur_lock - Helper to lock the cur_mutex.
200 * @man: The range manager.
201 * @interruptible: Whether to wait interruptible when locking.
203 static int vmw_cmdbuf_cur_lock(struct vmw_cmdbuf_man *man, bool interruptible)
205 if (interruptible) {
206 if (mutex_lock_interruptible(&man->cur_mutex))
207 return -ERESTARTSYS;
208 } else {
209 mutex_lock(&man->cur_mutex);
212 return 0;
216 * vmw_cmdbuf_cur_unlock - Helper to unlock the cur_mutex.
218 * @man: The range manager.
220 static void vmw_cmdbuf_cur_unlock(struct vmw_cmdbuf_man *man)
222 mutex_unlock(&man->cur_mutex);
226 * vmw_cmdbuf_header_inline_free - Free a struct vmw_cmdbuf_header that has
227 * been used for the device context with inline command buffers.
228 * Need not be called locked.
230 * @header: Pointer to the header to free.
232 static void vmw_cmdbuf_header_inline_free(struct vmw_cmdbuf_header *header)
234 struct vmw_cmdbuf_dheader *dheader;
236 if (WARN_ON_ONCE(!header->inline_space))
237 return;
239 dheader = container_of(header->cb_header, struct vmw_cmdbuf_dheader,
240 cb_header);
241 dma_pool_free(header->man->dheaders, dheader, header->handle);
242 kfree(header);
246 * __vmw_cmdbuf_header_free - Free a struct vmw_cmdbuf_header and its
247 * associated structures.
249 * header: Pointer to the header to free.
251 * For internal use. Must be called with man::lock held.
253 static void __vmw_cmdbuf_header_free(struct vmw_cmdbuf_header *header)
255 struct vmw_cmdbuf_man *man = header->man;
257 lockdep_assert_held_once(&man->lock);
259 if (header->inline_space) {
260 vmw_cmdbuf_header_inline_free(header);
261 return;
264 drm_mm_remove_node(&header->node);
265 wake_up_all(&man->alloc_queue);
266 if (header->cb_header)
267 dma_pool_free(man->headers, header->cb_header,
268 header->handle);
269 kfree(header);
273 * vmw_cmdbuf_header_free - Free a struct vmw_cmdbuf_header and its
274 * associated structures.
276 * @header: Pointer to the header to free.
278 void vmw_cmdbuf_header_free(struct vmw_cmdbuf_header *header)
280 struct vmw_cmdbuf_man *man = header->man;
282 /* Avoid locking if inline_space */
283 if (header->inline_space) {
284 vmw_cmdbuf_header_inline_free(header);
285 return;
287 spin_lock(&man->lock);
288 __vmw_cmdbuf_header_free(header);
289 spin_unlock(&man->lock);
294 * vmw_cmbuf_header_submit: Submit a command buffer to hardware.
296 * @header: The header of the buffer to submit.
298 static int vmw_cmdbuf_header_submit(struct vmw_cmdbuf_header *header)
300 struct vmw_cmdbuf_man *man = header->man;
301 u32 val;
303 val = upper_32_bits(header->handle);
304 vmw_write(man->dev_priv, SVGA_REG_COMMAND_HIGH, val);
306 val = lower_32_bits(header->handle);
307 val |= header->cb_context & SVGA_CB_CONTEXT_MASK;
308 vmw_write(man->dev_priv, SVGA_REG_COMMAND_LOW, val);
310 return header->cb_header->status;
314 * vmw_cmdbuf_ctx_init: Initialize a command buffer context.
316 * @ctx: The command buffer context to initialize
318 static void vmw_cmdbuf_ctx_init(struct vmw_cmdbuf_context *ctx)
320 INIT_LIST_HEAD(&ctx->hw_submitted);
321 INIT_LIST_HEAD(&ctx->submitted);
322 INIT_LIST_HEAD(&ctx->preempted);
323 ctx->num_hw_submitted = 0;
327 * vmw_cmdbuf_ctx_submit: Submit command buffers from a command buffer
328 * context.
330 * @man: The command buffer manager.
331 * @ctx: The command buffer context.
333 * Submits command buffers to hardware until there are no more command
334 * buffers to submit or the hardware can't handle more command buffers.
336 static void vmw_cmdbuf_ctx_submit(struct vmw_cmdbuf_man *man,
337 struct vmw_cmdbuf_context *ctx)
339 while (ctx->num_hw_submitted < man->max_hw_submitted &&
340 !list_empty(&ctx->submitted) &&
341 !ctx->block_submission) {
342 struct vmw_cmdbuf_header *entry;
343 SVGACBStatus status;
345 entry = list_first_entry(&ctx->submitted,
346 struct vmw_cmdbuf_header,
347 list);
349 status = vmw_cmdbuf_header_submit(entry);
351 /* This should never happen */
352 if (WARN_ON_ONCE(status == SVGA_CB_STATUS_QUEUE_FULL)) {
353 entry->cb_header->status = SVGA_CB_STATUS_NONE;
354 break;
357 list_del(&entry->list);
358 list_add_tail(&entry->list, &ctx->hw_submitted);
359 ctx->num_hw_submitted++;
365 * vmw_cmdbuf_ctx_submit: Process a command buffer context.
367 * @man: The command buffer manager.
368 * @ctx: The command buffer context.
370 * Submit command buffers to hardware if possible, and process finished
371 * buffers. Typically freeing them, but on preemption or error take
372 * appropriate action. Wake up waiters if appropriate.
374 static void vmw_cmdbuf_ctx_process(struct vmw_cmdbuf_man *man,
375 struct vmw_cmdbuf_context *ctx,
376 int *notempty)
378 struct vmw_cmdbuf_header *entry, *next;
380 vmw_cmdbuf_ctx_submit(man, ctx);
382 list_for_each_entry_safe(entry, next, &ctx->hw_submitted, list) {
383 SVGACBStatus status = entry->cb_header->status;
385 if (status == SVGA_CB_STATUS_NONE)
386 break;
388 list_del(&entry->list);
389 wake_up_all(&man->idle_queue);
390 ctx->num_hw_submitted--;
391 switch (status) {
392 case SVGA_CB_STATUS_COMPLETED:
393 __vmw_cmdbuf_header_free(entry);
394 break;
395 case SVGA_CB_STATUS_COMMAND_ERROR:
396 entry->cb_header->status = SVGA_CB_STATUS_NONE;
397 list_add_tail(&entry->list, &man->error);
398 schedule_work(&man->work);
399 break;
400 case SVGA_CB_STATUS_PREEMPTED:
401 entry->cb_header->status = SVGA_CB_STATUS_NONE;
402 list_add_tail(&entry->list, &ctx->preempted);
403 break;
404 case SVGA_CB_STATUS_CB_HEADER_ERROR:
405 WARN_ONCE(true, "Command buffer header error.\n");
406 __vmw_cmdbuf_header_free(entry);
407 break;
408 default:
409 WARN_ONCE(true, "Undefined command buffer status.\n");
410 __vmw_cmdbuf_header_free(entry);
411 break;
415 vmw_cmdbuf_ctx_submit(man, ctx);
416 if (!list_empty(&ctx->submitted))
417 (*notempty)++;
421 * vmw_cmdbuf_man_process - Process all command buffer contexts and
422 * switch on and off irqs as appropriate.
424 * @man: The command buffer manager.
426 * Calls vmw_cmdbuf_ctx_process() on all contexts. If any context has
427 * command buffers left that are not submitted to hardware, Make sure
428 * IRQ handling is turned on. Otherwise, make sure it's turned off.
430 static void vmw_cmdbuf_man_process(struct vmw_cmdbuf_man *man)
432 int notempty;
433 struct vmw_cmdbuf_context *ctx;
434 int i;
436 retry:
437 notempty = 0;
438 for_each_cmdbuf_ctx(man, i, ctx)
439 vmw_cmdbuf_ctx_process(man, ctx, &notempty);
441 if (man->irq_on && !notempty) {
442 vmw_generic_waiter_remove(man->dev_priv,
443 SVGA_IRQFLAG_COMMAND_BUFFER,
444 &man->dev_priv->cmdbuf_waiters);
445 man->irq_on = false;
446 } else if (!man->irq_on && notempty) {
447 vmw_generic_waiter_add(man->dev_priv,
448 SVGA_IRQFLAG_COMMAND_BUFFER,
449 &man->dev_priv->cmdbuf_waiters);
450 man->irq_on = true;
452 /* Rerun in case we just missed an irq. */
453 goto retry;
458 * vmw_cmdbuf_ctx_add - Schedule a command buffer for submission on a
459 * command buffer context
461 * @man: The command buffer manager.
462 * @header: The header of the buffer to submit.
463 * @cb_context: The command buffer context to use.
465 * This function adds @header to the "submitted" queue of the command
466 * buffer context identified by @cb_context. It then calls the command buffer
467 * manager processing to potentially submit the buffer to hardware.
468 * @man->lock needs to be held when calling this function.
470 static void vmw_cmdbuf_ctx_add(struct vmw_cmdbuf_man *man,
471 struct vmw_cmdbuf_header *header,
472 SVGACBContext cb_context)
474 if (!(header->cb_header->flags & SVGA_CB_FLAG_DX_CONTEXT))
475 header->cb_header->dxContext = 0;
476 header->cb_context = cb_context;
477 list_add_tail(&header->list, &man->ctx[cb_context].submitted);
479 vmw_cmdbuf_man_process(man);
483 * vmw_cmdbuf_irqthread - The main part of the command buffer interrupt
484 * handler implemented as a threaded irq task.
486 * @man: Pointer to the command buffer manager.
488 * The bottom half of the interrupt handler simply calls into the
489 * command buffer processor to free finished buffers and submit any
490 * queued buffers to hardware.
492 void vmw_cmdbuf_irqthread(struct vmw_cmdbuf_man *man)
494 spin_lock(&man->lock);
495 vmw_cmdbuf_man_process(man);
496 spin_unlock(&man->lock);
500 * vmw_cmdbuf_work_func - The deferred work function that handles
501 * command buffer errors.
503 * @work: The work func closure argument.
505 * Restarting the command buffer context after an error requires process
506 * context, so it is deferred to this work function.
508 static void vmw_cmdbuf_work_func(struct work_struct *work)
510 struct vmw_cmdbuf_man *man =
511 container_of(work, struct vmw_cmdbuf_man, work);
512 struct vmw_cmdbuf_header *entry, *next;
513 uint32_t dummy;
514 bool send_fence = false;
515 struct list_head restart_head[SVGA_CB_CONTEXT_MAX];
516 int i;
517 struct vmw_cmdbuf_context *ctx;
518 bool global_block = false;
520 for_each_cmdbuf_ctx(man, i, ctx)
521 INIT_LIST_HEAD(&restart_head[i]);
523 mutex_lock(&man->error_mutex);
524 spin_lock(&man->lock);
525 list_for_each_entry_safe(entry, next, &man->error, list) {
526 SVGACBHeader *cb_hdr = entry->cb_header;
527 SVGA3dCmdHeader *header = (SVGA3dCmdHeader *)
528 (entry->cmd + cb_hdr->errorOffset);
529 u32 error_cmd_size, new_start_offset;
530 const char *cmd_name;
532 list_del_init(&entry->list);
533 global_block = true;
535 if (!vmw_cmd_describe(header, &error_cmd_size, &cmd_name)) {
536 DRM_ERROR("Unknown command causing device error.\n");
537 DRM_ERROR("Command buffer offset is %lu\n",
538 (unsigned long) cb_hdr->errorOffset);
539 __vmw_cmdbuf_header_free(entry);
540 send_fence = true;
541 continue;
544 DRM_ERROR("Command \"%s\" causing device error.\n", cmd_name);
545 DRM_ERROR("Command buffer offset is %lu\n",
546 (unsigned long) cb_hdr->errorOffset);
547 DRM_ERROR("Command size is %lu\n",
548 (unsigned long) error_cmd_size);
550 new_start_offset = cb_hdr->errorOffset + error_cmd_size;
552 if (new_start_offset >= cb_hdr->length) {
553 __vmw_cmdbuf_header_free(entry);
554 send_fence = true;
555 continue;
558 if (man->using_mob)
559 cb_hdr->ptr.mob.mobOffset += new_start_offset;
560 else
561 cb_hdr->ptr.pa += (u64) new_start_offset;
563 entry->cmd += new_start_offset;
564 cb_hdr->length -= new_start_offset;
565 cb_hdr->errorOffset = 0;
566 cb_hdr->offset = 0;
568 list_add_tail(&entry->list, &restart_head[entry->cb_context]);
571 for_each_cmdbuf_ctx(man, i, ctx)
572 man->ctx[i].block_submission = true;
574 spin_unlock(&man->lock);
576 /* Preempt all contexts */
577 if (global_block && vmw_cmdbuf_preempt(man, 0))
578 DRM_ERROR("Failed preempting command buffer contexts\n");
580 spin_lock(&man->lock);
581 for_each_cmdbuf_ctx(man, i, ctx) {
582 /* Move preempted command buffers to the preempted queue. */
583 vmw_cmdbuf_ctx_process(man, ctx, &dummy);
586 * Add the preempted queue after the command buffer
587 * that caused an error.
589 list_splice_init(&ctx->preempted, restart_head[i].prev);
592 * Finally add all command buffers first in the submitted
593 * queue, to rerun them.
596 ctx->block_submission = false;
597 list_splice_init(&restart_head[i], &ctx->submitted);
600 vmw_cmdbuf_man_process(man);
601 spin_unlock(&man->lock);
603 if (global_block && vmw_cmdbuf_startstop(man, 0, true))
604 DRM_ERROR("Failed restarting command buffer contexts\n");
606 /* Send a new fence in case one was removed */
607 if (send_fence) {
608 vmw_fifo_send_fence(man->dev_priv, &dummy);
609 wake_up_all(&man->idle_queue);
612 mutex_unlock(&man->error_mutex);
616 * vmw_cmdbuf_man idle - Check whether the command buffer manager is idle.
618 * @man: The command buffer manager.
619 * @check_preempted: Check also the preempted queue for pending command buffers.
622 static bool vmw_cmdbuf_man_idle(struct vmw_cmdbuf_man *man,
623 bool check_preempted)
625 struct vmw_cmdbuf_context *ctx;
626 bool idle = false;
627 int i;
629 spin_lock(&man->lock);
630 vmw_cmdbuf_man_process(man);
631 for_each_cmdbuf_ctx(man, i, ctx) {
632 if (!list_empty(&ctx->submitted) ||
633 !list_empty(&ctx->hw_submitted) ||
634 (check_preempted && !list_empty(&ctx->preempted)))
635 goto out_unlock;
638 idle = list_empty(&man->error);
640 out_unlock:
641 spin_unlock(&man->lock);
643 return idle;
647 * __vmw_cmdbuf_cur_flush - Flush the current command buffer for small kernel
648 * command submissions
650 * @man: The command buffer manager.
652 * Flushes the current command buffer without allocating a new one. A new one
653 * is automatically allocated when needed. Call with @man->cur_mutex held.
655 static void __vmw_cmdbuf_cur_flush(struct vmw_cmdbuf_man *man)
657 struct vmw_cmdbuf_header *cur = man->cur;
659 WARN_ON(!mutex_is_locked(&man->cur_mutex));
661 if (!cur)
662 return;
664 spin_lock(&man->lock);
665 if (man->cur_pos == 0) {
666 __vmw_cmdbuf_header_free(cur);
667 goto out_unlock;
670 man->cur->cb_header->length = man->cur_pos;
671 vmw_cmdbuf_ctx_add(man, man->cur, SVGA_CB_CONTEXT_0);
672 out_unlock:
673 spin_unlock(&man->lock);
674 man->cur = NULL;
675 man->cur_pos = 0;
679 * vmw_cmdbuf_cur_flush - Flush the current command buffer for small kernel
680 * command submissions
682 * @man: The command buffer manager.
683 * @interruptible: Whether to sleep interruptible when sleeping.
685 * Flushes the current command buffer without allocating a new one. A new one
686 * is automatically allocated when needed.
688 int vmw_cmdbuf_cur_flush(struct vmw_cmdbuf_man *man,
689 bool interruptible)
691 int ret = vmw_cmdbuf_cur_lock(man, interruptible);
693 if (ret)
694 return ret;
696 __vmw_cmdbuf_cur_flush(man);
697 vmw_cmdbuf_cur_unlock(man);
699 return 0;
703 * vmw_cmdbuf_idle - Wait for command buffer manager idle.
705 * @man: The command buffer manager.
706 * @interruptible: Sleep interruptible while waiting.
707 * @timeout: Time out after this many ticks.
709 * Wait until the command buffer manager has processed all command buffers,
710 * or until a timeout occurs. If a timeout occurs, the function will return
711 * -EBUSY.
713 int vmw_cmdbuf_idle(struct vmw_cmdbuf_man *man, bool interruptible,
714 unsigned long timeout)
716 int ret;
718 ret = vmw_cmdbuf_cur_flush(man, interruptible);
719 vmw_generic_waiter_add(man->dev_priv,
720 SVGA_IRQFLAG_COMMAND_BUFFER,
721 &man->dev_priv->cmdbuf_waiters);
723 if (interruptible) {
724 ret = wait_event_interruptible_timeout
725 (man->idle_queue, vmw_cmdbuf_man_idle(man, true),
726 timeout);
727 } else {
728 ret = wait_event_timeout
729 (man->idle_queue, vmw_cmdbuf_man_idle(man, true),
730 timeout);
732 vmw_generic_waiter_remove(man->dev_priv,
733 SVGA_IRQFLAG_COMMAND_BUFFER,
734 &man->dev_priv->cmdbuf_waiters);
735 if (ret == 0) {
736 if (!vmw_cmdbuf_man_idle(man, true))
737 ret = -EBUSY;
738 else
739 ret = 0;
741 if (ret > 0)
742 ret = 0;
744 return ret;
748 * vmw_cmdbuf_try_alloc - Try to allocate buffer space from the main pool.
750 * @man: The command buffer manager.
751 * @info: Allocation info. Will hold the size on entry and allocated mm node
752 * on successful return.
754 * Try to allocate buffer space from the main pool. Returns true if succeeded.
755 * If a fatal error was hit, the error code is returned in @info->ret.
757 static bool vmw_cmdbuf_try_alloc(struct vmw_cmdbuf_man *man,
758 struct vmw_cmdbuf_alloc_info *info)
760 int ret;
762 if (info->done)
763 return true;
765 memset(info->node, 0, sizeof(*info->node));
766 spin_lock(&man->lock);
767 ret = drm_mm_insert_node(&man->mm, info->node, info->page_size);
768 if (ret) {
769 vmw_cmdbuf_man_process(man);
770 ret = drm_mm_insert_node(&man->mm, info->node, info->page_size);
773 spin_unlock(&man->lock);
774 info->done = !ret;
776 return info->done;
780 * vmw_cmdbuf_alloc_space - Allocate buffer space from the main pool.
782 * @man: The command buffer manager.
783 * @node: Pointer to pre-allocated range-manager node.
784 * @size: The size of the allocation.
785 * @interruptible: Whether to sleep interruptible while waiting for space.
787 * This function allocates buffer space from the main pool, and if there is
788 * no space available ATM, it turns on IRQ handling and sleeps waiting for it to
789 * become available.
791 static int vmw_cmdbuf_alloc_space(struct vmw_cmdbuf_man *man,
792 struct drm_mm_node *node,
793 size_t size,
794 bool interruptible)
796 struct vmw_cmdbuf_alloc_info info;
798 info.page_size = PAGE_ALIGN(size) >> PAGE_SHIFT;
799 info.node = node;
800 info.done = false;
803 * To prevent starvation of large requests, only one allocating call
804 * at a time waiting for space.
806 if (interruptible) {
807 if (mutex_lock_interruptible(&man->space_mutex))
808 return -ERESTARTSYS;
809 } else {
810 mutex_lock(&man->space_mutex);
813 /* Try to allocate space without waiting. */
814 if (vmw_cmdbuf_try_alloc(man, &info))
815 goto out_unlock;
817 vmw_generic_waiter_add(man->dev_priv,
818 SVGA_IRQFLAG_COMMAND_BUFFER,
819 &man->dev_priv->cmdbuf_waiters);
821 if (interruptible) {
822 int ret;
824 ret = wait_event_interruptible
825 (man->alloc_queue, vmw_cmdbuf_try_alloc(man, &info));
826 if (ret) {
827 vmw_generic_waiter_remove
828 (man->dev_priv, SVGA_IRQFLAG_COMMAND_BUFFER,
829 &man->dev_priv->cmdbuf_waiters);
830 mutex_unlock(&man->space_mutex);
831 return ret;
833 } else {
834 wait_event(man->alloc_queue, vmw_cmdbuf_try_alloc(man, &info));
836 vmw_generic_waiter_remove(man->dev_priv,
837 SVGA_IRQFLAG_COMMAND_BUFFER,
838 &man->dev_priv->cmdbuf_waiters);
840 out_unlock:
841 mutex_unlock(&man->space_mutex);
843 return 0;
847 * vmw_cmdbuf_space_pool - Set up a command buffer header with command buffer
848 * space from the main pool.
850 * @man: The command buffer manager.
851 * @header: Pointer to the header to set up.
852 * @size: The requested size of the buffer space.
853 * @interruptible: Whether to sleep interruptible while waiting for space.
855 static int vmw_cmdbuf_space_pool(struct vmw_cmdbuf_man *man,
856 struct vmw_cmdbuf_header *header,
857 size_t size,
858 bool interruptible)
860 SVGACBHeader *cb_hdr;
861 size_t offset;
862 int ret;
864 if (!man->has_pool)
865 return -ENOMEM;
867 ret = vmw_cmdbuf_alloc_space(man, &header->node, size, interruptible);
869 if (ret)
870 return ret;
872 header->cb_header = dma_pool_zalloc(man->headers, GFP_KERNEL,
873 &header->handle);
874 if (!header->cb_header) {
875 ret = -ENOMEM;
876 goto out_no_cb_header;
879 header->size = header->node.size << PAGE_SHIFT;
880 cb_hdr = header->cb_header;
881 offset = header->node.start << PAGE_SHIFT;
882 header->cmd = man->map + offset;
883 if (man->using_mob) {
884 cb_hdr->flags = SVGA_CB_FLAG_MOB;
885 cb_hdr->ptr.mob.mobid = man->cmd_space->mem.start;
886 cb_hdr->ptr.mob.mobOffset = offset;
887 } else {
888 cb_hdr->ptr.pa = (u64)man->handle + (u64)offset;
891 return 0;
893 out_no_cb_header:
894 spin_lock(&man->lock);
895 drm_mm_remove_node(&header->node);
896 spin_unlock(&man->lock);
898 return ret;
902 * vmw_cmdbuf_space_inline - Set up a command buffer header with
903 * inline command buffer space.
905 * @man: The command buffer manager.
906 * @header: Pointer to the header to set up.
907 * @size: The requested size of the buffer space.
909 static int vmw_cmdbuf_space_inline(struct vmw_cmdbuf_man *man,
910 struct vmw_cmdbuf_header *header,
911 int size)
913 struct vmw_cmdbuf_dheader *dheader;
914 SVGACBHeader *cb_hdr;
916 if (WARN_ON_ONCE(size > VMW_CMDBUF_INLINE_SIZE))
917 return -ENOMEM;
919 dheader = dma_pool_zalloc(man->dheaders, GFP_KERNEL,
920 &header->handle);
921 if (!dheader)
922 return -ENOMEM;
924 header->inline_space = true;
925 header->size = VMW_CMDBUF_INLINE_SIZE;
926 cb_hdr = &dheader->cb_header;
927 header->cb_header = cb_hdr;
928 header->cmd = dheader->cmd;
929 cb_hdr->status = SVGA_CB_STATUS_NONE;
930 cb_hdr->flags = SVGA_CB_FLAG_NONE;
931 cb_hdr->ptr.pa = (u64)header->handle +
932 (u64)offsetof(struct vmw_cmdbuf_dheader, cmd);
934 return 0;
938 * vmw_cmdbuf_alloc - Allocate a command buffer header complete with
939 * command buffer space.
941 * @man: The command buffer manager.
942 * @size: The requested size of the buffer space.
943 * @interruptible: Whether to sleep interruptible while waiting for space.
944 * @p_header: points to a header pointer to populate on successful return.
946 * Returns a pointer to command buffer space if successful. Otherwise
947 * returns an error pointer. The header pointer returned in @p_header should
948 * be used for upcoming calls to vmw_cmdbuf_reserve() and vmw_cmdbuf_commit().
950 void *vmw_cmdbuf_alloc(struct vmw_cmdbuf_man *man,
951 size_t size, bool interruptible,
952 struct vmw_cmdbuf_header **p_header)
954 struct vmw_cmdbuf_header *header;
955 int ret = 0;
957 *p_header = NULL;
959 header = kzalloc(sizeof(*header), GFP_KERNEL);
960 if (!header)
961 return ERR_PTR(-ENOMEM);
963 if (size <= VMW_CMDBUF_INLINE_SIZE)
964 ret = vmw_cmdbuf_space_inline(man, header, size);
965 else
966 ret = vmw_cmdbuf_space_pool(man, header, size, interruptible);
968 if (ret) {
969 kfree(header);
970 return ERR_PTR(ret);
973 header->man = man;
974 INIT_LIST_HEAD(&header->list);
975 header->cb_header->status = SVGA_CB_STATUS_NONE;
976 *p_header = header;
978 return header->cmd;
982 * vmw_cmdbuf_reserve_cur - Reserve space for commands in the current
983 * command buffer.
985 * @man: The command buffer manager.
986 * @size: The requested size of the commands.
987 * @ctx_id: The context id if any. Otherwise set to SVGA3D_REG_INVALID.
988 * @interruptible: Whether to sleep interruptible while waiting for space.
990 * Returns a pointer to command buffer space if successful. Otherwise
991 * returns an error pointer.
993 static void *vmw_cmdbuf_reserve_cur(struct vmw_cmdbuf_man *man,
994 size_t size,
995 int ctx_id,
996 bool interruptible)
998 struct vmw_cmdbuf_header *cur;
999 void *ret;
1001 if (vmw_cmdbuf_cur_lock(man, interruptible))
1002 return ERR_PTR(-ERESTARTSYS);
1004 cur = man->cur;
1005 if (cur && (size + man->cur_pos > cur->size ||
1006 ((cur->cb_header->flags & SVGA_CB_FLAG_DX_CONTEXT) &&
1007 ctx_id != cur->cb_header->dxContext)))
1008 __vmw_cmdbuf_cur_flush(man);
1010 if (!man->cur) {
1011 ret = vmw_cmdbuf_alloc(man,
1012 max_t(size_t, size, man->default_size),
1013 interruptible, &man->cur);
1014 if (IS_ERR(ret)) {
1015 vmw_cmdbuf_cur_unlock(man);
1016 return ret;
1019 cur = man->cur;
1022 if (ctx_id != SVGA3D_INVALID_ID) {
1023 cur->cb_header->flags |= SVGA_CB_FLAG_DX_CONTEXT;
1024 cur->cb_header->dxContext = ctx_id;
1027 cur->reserved = size;
1029 return (void *) (man->cur->cmd + man->cur_pos);
1033 * vmw_cmdbuf_commit_cur - Commit commands in the current command buffer.
1035 * @man: The command buffer manager.
1036 * @size: The size of the commands actually written.
1037 * @flush: Whether to flush the command buffer immediately.
1039 static void vmw_cmdbuf_commit_cur(struct vmw_cmdbuf_man *man,
1040 size_t size, bool flush)
1042 struct vmw_cmdbuf_header *cur = man->cur;
1044 WARN_ON(!mutex_is_locked(&man->cur_mutex));
1046 WARN_ON(size > cur->reserved);
1047 man->cur_pos += size;
1048 if (!size)
1049 cur->cb_header->flags &= ~SVGA_CB_FLAG_DX_CONTEXT;
1050 if (flush)
1051 __vmw_cmdbuf_cur_flush(man);
1052 vmw_cmdbuf_cur_unlock(man);
1056 * vmw_cmdbuf_reserve - Reserve space for commands in a command buffer.
1058 * @man: The command buffer manager.
1059 * @size: The requested size of the commands.
1060 * @ctx_id: The context id if any. Otherwise set to SVGA3D_REG_INVALID.
1061 * @interruptible: Whether to sleep interruptible while waiting for space.
1062 * @header: Header of the command buffer. NULL if the current command buffer
1063 * should be used.
1065 * Returns a pointer to command buffer space if successful. Otherwise
1066 * returns an error pointer.
1068 void *vmw_cmdbuf_reserve(struct vmw_cmdbuf_man *man, size_t size,
1069 int ctx_id, bool interruptible,
1070 struct vmw_cmdbuf_header *header)
1072 if (!header)
1073 return vmw_cmdbuf_reserve_cur(man, size, ctx_id, interruptible);
1075 if (size > header->size)
1076 return ERR_PTR(-EINVAL);
1078 if (ctx_id != SVGA3D_INVALID_ID) {
1079 header->cb_header->flags |= SVGA_CB_FLAG_DX_CONTEXT;
1080 header->cb_header->dxContext = ctx_id;
1083 header->reserved = size;
1084 return header->cmd;
1088 * vmw_cmdbuf_commit - Commit commands in a command buffer.
1090 * @man: The command buffer manager.
1091 * @size: The size of the commands actually written.
1092 * @header: Header of the command buffer. NULL if the current command buffer
1093 * should be used.
1094 * @flush: Whether to flush the command buffer immediately.
1096 void vmw_cmdbuf_commit(struct vmw_cmdbuf_man *man, size_t size,
1097 struct vmw_cmdbuf_header *header, bool flush)
1099 if (!header) {
1100 vmw_cmdbuf_commit_cur(man, size, flush);
1101 return;
1104 (void) vmw_cmdbuf_cur_lock(man, false);
1105 __vmw_cmdbuf_cur_flush(man);
1106 WARN_ON(size > header->reserved);
1107 man->cur = header;
1108 man->cur_pos = size;
1109 if (!size)
1110 header->cb_header->flags &= ~SVGA_CB_FLAG_DX_CONTEXT;
1111 if (flush)
1112 __vmw_cmdbuf_cur_flush(man);
1113 vmw_cmdbuf_cur_unlock(man);
1118 * vmw_cmdbuf_send_device_command - Send a command through the device context.
1120 * @man: The command buffer manager.
1121 * @command: Pointer to the command to send.
1122 * @size: Size of the command.
1124 * Synchronously sends a device context command.
1126 static int vmw_cmdbuf_send_device_command(struct vmw_cmdbuf_man *man,
1127 const void *command,
1128 size_t size)
1130 struct vmw_cmdbuf_header *header;
1131 int status;
1132 void *cmd = vmw_cmdbuf_alloc(man, size, false, &header);
1134 if (IS_ERR(cmd))
1135 return PTR_ERR(cmd);
1137 memcpy(cmd, command, size);
1138 header->cb_header->length = size;
1139 header->cb_context = SVGA_CB_CONTEXT_DEVICE;
1140 spin_lock(&man->lock);
1141 status = vmw_cmdbuf_header_submit(header);
1142 spin_unlock(&man->lock);
1143 vmw_cmdbuf_header_free(header);
1145 if (status != SVGA_CB_STATUS_COMPLETED) {
1146 DRM_ERROR("Device context command failed with status %d\n",
1147 status);
1148 return -EINVAL;
1151 return 0;
1155 * vmw_cmdbuf_preempt - Send a preempt command through the device
1156 * context.
1158 * @man: The command buffer manager.
1160 * Synchronously sends a preempt command.
1162 static int vmw_cmdbuf_preempt(struct vmw_cmdbuf_man *man, u32 context)
1164 struct {
1165 uint32 id;
1166 SVGADCCmdPreempt body;
1167 } __packed cmd;
1169 cmd.id = SVGA_DC_CMD_PREEMPT;
1170 cmd.body.context = SVGA_CB_CONTEXT_0 + context;
1171 cmd.body.ignoreIDZero = 0;
1173 return vmw_cmdbuf_send_device_command(man, &cmd, sizeof(cmd));
1178 * vmw_cmdbuf_startstop - Send a start / stop command through the device
1179 * context.
1181 * @man: The command buffer manager.
1182 * @enable: Whether to enable or disable the context.
1184 * Synchronously sends a device start / stop context command.
1186 static int vmw_cmdbuf_startstop(struct vmw_cmdbuf_man *man, u32 context,
1187 bool enable)
1189 struct {
1190 uint32 id;
1191 SVGADCCmdStartStop body;
1192 } __packed cmd;
1194 cmd.id = SVGA_DC_CMD_START_STOP_CONTEXT;
1195 cmd.body.enable = (enable) ? 1 : 0;
1196 cmd.body.context = SVGA_CB_CONTEXT_0 + context;
1198 return vmw_cmdbuf_send_device_command(man, &cmd, sizeof(cmd));
1202 * vmw_cmdbuf_set_pool_size - Set command buffer manager sizes
1204 * @man: The command buffer manager.
1205 * @size: The size of the main space pool.
1206 * @default_size: The default size of the command buffer for small kernel
1207 * submissions.
1209 * Set the size and allocate the main command buffer space pool,
1210 * as well as the default size of the command buffer for
1211 * small kernel submissions. If successful, this enables large command
1212 * submissions. Note that this function requires that rudimentary command
1213 * submission is already available and that the MOB memory manager is alive.
1214 * Returns 0 on success. Negative error code on failure.
1216 int vmw_cmdbuf_set_pool_size(struct vmw_cmdbuf_man *man,
1217 size_t size, size_t default_size)
1219 struct vmw_private *dev_priv = man->dev_priv;
1220 bool dummy;
1221 int ret;
1223 if (man->has_pool)
1224 return -EINVAL;
1226 /* First, try to allocate a huge chunk of DMA memory */
1227 size = PAGE_ALIGN(size);
1228 man->map = dma_alloc_coherent(&dev_priv->dev->pdev->dev, size,
1229 &man->handle, GFP_KERNEL);
1230 if (man->map) {
1231 man->using_mob = false;
1232 } else {
1234 * DMA memory failed. If we can have command buffers in a
1235 * MOB, try to use that instead. Note that this will
1236 * actually call into the already enabled manager, when
1237 * binding the MOB.
1239 if (!(dev_priv->capabilities & SVGA_CAP_DX))
1240 return -ENOMEM;
1242 ret = ttm_bo_create(&dev_priv->bdev, size, ttm_bo_type_device,
1243 &vmw_mob_ne_placement, 0, false,
1244 &man->cmd_space);
1245 if (ret)
1246 return ret;
1248 man->using_mob = true;
1249 ret = ttm_bo_kmap(man->cmd_space, 0, size >> PAGE_SHIFT,
1250 &man->map_obj);
1251 if (ret)
1252 goto out_no_map;
1254 man->map = ttm_kmap_obj_virtual(&man->map_obj, &dummy);
1257 man->size = size;
1258 drm_mm_init(&man->mm, 0, size >> PAGE_SHIFT);
1260 man->has_pool = true;
1263 * For now, set the default size to VMW_CMDBUF_INLINE_SIZE to
1264 * prevent deadlocks from happening when vmw_cmdbuf_space_pool()
1265 * needs to wait for space and we block on further command
1266 * submissions to be able to free up space.
1268 man->default_size = VMW_CMDBUF_INLINE_SIZE;
1269 DRM_INFO("Using command buffers with %s pool.\n",
1270 (man->using_mob) ? "MOB" : "DMA");
1272 return 0;
1274 out_no_map:
1275 if (man->using_mob)
1276 ttm_bo_unref(&man->cmd_space);
1278 return ret;
1282 * vmw_cmdbuf_man_create: Create a command buffer manager and enable it for
1283 * inline command buffer submissions only.
1285 * @dev_priv: Pointer to device private structure.
1287 * Returns a pointer to a cummand buffer manager to success or error pointer
1288 * on failure. The command buffer manager will be enabled for submissions of
1289 * size VMW_CMDBUF_INLINE_SIZE only.
1291 struct vmw_cmdbuf_man *vmw_cmdbuf_man_create(struct vmw_private *dev_priv)
1293 struct vmw_cmdbuf_man *man;
1294 struct vmw_cmdbuf_context *ctx;
1295 unsigned int i;
1296 int ret;
1298 if (!(dev_priv->capabilities & SVGA_CAP_COMMAND_BUFFERS))
1299 return ERR_PTR(-ENOSYS);
1301 man = kzalloc(sizeof(*man), GFP_KERNEL);
1302 if (!man)
1303 return ERR_PTR(-ENOMEM);
1305 man->num_contexts = (dev_priv->capabilities & SVGA_CAP_HP_CMD_QUEUE) ?
1306 2 : 1;
1307 man->headers = dma_pool_create("vmwgfx cmdbuf",
1308 &dev_priv->dev->pdev->dev,
1309 sizeof(SVGACBHeader),
1310 64, PAGE_SIZE);
1311 if (!man->headers) {
1312 ret = -ENOMEM;
1313 goto out_no_pool;
1316 man->dheaders = dma_pool_create("vmwgfx inline cmdbuf",
1317 &dev_priv->dev->pdev->dev,
1318 sizeof(struct vmw_cmdbuf_dheader),
1319 64, PAGE_SIZE);
1320 if (!man->dheaders) {
1321 ret = -ENOMEM;
1322 goto out_no_dpool;
1325 for_each_cmdbuf_ctx(man, i, ctx)
1326 vmw_cmdbuf_ctx_init(ctx);
1328 INIT_LIST_HEAD(&man->error);
1329 spin_lock_init(&man->lock);
1330 mutex_init(&man->cur_mutex);
1331 mutex_init(&man->space_mutex);
1332 mutex_init(&man->error_mutex);
1333 man->default_size = VMW_CMDBUF_INLINE_SIZE;
1334 init_waitqueue_head(&man->alloc_queue);
1335 init_waitqueue_head(&man->idle_queue);
1336 man->dev_priv = dev_priv;
1337 man->max_hw_submitted = SVGA_CB_MAX_QUEUED_PER_CONTEXT - 1;
1338 INIT_WORK(&man->work, &vmw_cmdbuf_work_func);
1339 vmw_generic_waiter_add(dev_priv, SVGA_IRQFLAG_ERROR,
1340 &dev_priv->error_waiters);
1341 ret = vmw_cmdbuf_startstop(man, 0, true);
1342 if (ret) {
1343 DRM_ERROR("Failed starting command buffer contexts\n");
1344 vmw_cmdbuf_man_destroy(man);
1345 return ERR_PTR(ret);
1348 return man;
1350 out_no_dpool:
1351 dma_pool_destroy(man->headers);
1352 out_no_pool:
1353 kfree(man);
1355 return ERR_PTR(ret);
1359 * vmw_cmdbuf_remove_pool - Take down the main buffer space pool.
1361 * @man: Pointer to a command buffer manager.
1363 * This function removes the main buffer space pool, and should be called
1364 * before MOB memory management is removed. When this function has been called,
1365 * only small command buffer submissions of size VMW_CMDBUF_INLINE_SIZE or
1366 * less are allowed, and the default size of the command buffer for small kernel
1367 * submissions is also set to this size.
1369 void vmw_cmdbuf_remove_pool(struct vmw_cmdbuf_man *man)
1371 if (!man->has_pool)
1372 return;
1374 man->has_pool = false;
1375 man->default_size = VMW_CMDBUF_INLINE_SIZE;
1376 (void) vmw_cmdbuf_idle(man, false, 10*HZ);
1377 if (man->using_mob) {
1378 (void) ttm_bo_kunmap(&man->map_obj);
1379 ttm_bo_unref(&man->cmd_space);
1380 } else {
1381 dma_free_coherent(&man->dev_priv->dev->pdev->dev,
1382 man->size, man->map, man->handle);
1387 * vmw_cmdbuf_man_destroy - Take down a command buffer manager.
1389 * @man: Pointer to a command buffer manager.
1391 * This function idles and then destroys a command buffer manager.
1393 void vmw_cmdbuf_man_destroy(struct vmw_cmdbuf_man *man)
1395 WARN_ON_ONCE(man->has_pool);
1396 (void) vmw_cmdbuf_idle(man, false, 10*HZ);
1398 if (vmw_cmdbuf_startstop(man, 0, false))
1399 DRM_ERROR("Failed stopping command buffer contexts.\n");
1401 vmw_generic_waiter_remove(man->dev_priv, SVGA_IRQFLAG_ERROR,
1402 &man->dev_priv->error_waiters);
1403 (void) cancel_work_sync(&man->work);
1404 dma_pool_destroy(man->dheaders);
1405 dma_pool_destroy(man->headers);
1406 mutex_destroy(&man->cur_mutex);
1407 mutex_destroy(&man->space_mutex);
1408 mutex_destroy(&man->error_mutex);
1409 kfree(man);