1 /**************************************************************************
3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 #include "vmwgfx_drv.h"
30 #include "ttm/ttm_placement.h"
32 int vmw_fifo_init(struct vmw_private
*dev_priv
, struct vmw_fifo_state
*fifo
)
34 __le32 __iomem
*fifo_mem
= dev_priv
->mmio_virt
;
40 fifo
->static_buffer_size
= VMWGFX_FIFO_STATIC_SIZE
;
41 fifo
->static_buffer
= vmalloc(fifo
->static_buffer_size
);
42 if (unlikely(fifo
->static_buffer
== NULL
))
45 fifo
->last_buffer_size
= VMWGFX_FIFO_STATIC_SIZE
;
46 fifo
->last_data_size
= 0;
47 fifo
->last_buffer_add
= false;
48 fifo
->last_buffer
= vmalloc(fifo
->last_buffer_size
);
49 if (unlikely(fifo
->last_buffer
== NULL
)) {
54 fifo
->dynamic_buffer
= NULL
;
55 fifo
->reserved_size
= 0;
56 fifo
->using_bounce_buffer
= false;
58 init_rwsem(&fifo
->rwsem
);
61 * Allow mapping the first page read-only to user-space.
64 DRM_INFO("width %d\n", vmw_read(dev_priv
, SVGA_REG_WIDTH
));
65 DRM_INFO("height %d\n", vmw_read(dev_priv
, SVGA_REG_HEIGHT
));
66 DRM_INFO("bpp %d\n", vmw_read(dev_priv
, SVGA_REG_BITS_PER_PIXEL
));
68 mutex_lock(&dev_priv
->hw_mutex
);
69 dev_priv
->enable_state
= vmw_read(dev_priv
, SVGA_REG_ENABLE
);
70 dev_priv
->config_done_state
= vmw_read(dev_priv
, SVGA_REG_CONFIG_DONE
);
71 vmw_write(dev_priv
, SVGA_REG_ENABLE
, 1);
74 if (dev_priv
->capabilities
& SVGA_CAP_EXTENDED_FIFO
)
75 min
= vmw_read(dev_priv
, SVGA_REG_MEM_REGS
);
81 iowrite32(min
, fifo_mem
+ SVGA_FIFO_MIN
);
82 iowrite32(dev_priv
->mmio_size
, fifo_mem
+ SVGA_FIFO_MAX
);
84 iowrite32(min
, fifo_mem
+ SVGA_FIFO_NEXT_CMD
);
85 iowrite32(min
, fifo_mem
+ SVGA_FIFO_STOP
);
86 iowrite32(0, fifo_mem
+ SVGA_FIFO_BUSY
);
89 vmw_write(dev_priv
, SVGA_REG_CONFIG_DONE
, 1);
90 mutex_unlock(&dev_priv
->hw_mutex
);
92 max
= ioread32(fifo_mem
+ SVGA_FIFO_MAX
);
93 min
= ioread32(fifo_mem
+ SVGA_FIFO_MIN
);
94 fifo
->capabilities
= ioread32(fifo_mem
+ SVGA_FIFO_CAPABILITIES
);
96 DRM_INFO("Fifo max 0x%08x min 0x%08x cap 0x%08x\n",
99 (unsigned int) fifo
->capabilities
);
101 dev_priv
->fence_seq
= dev_priv
->last_read_sequence
;
102 iowrite32(dev_priv
->last_read_sequence
, fifo_mem
+ SVGA_FIFO_FENCE
);
104 return vmw_fifo_send_fence(dev_priv
, &dummy
);
106 vfree(fifo
->static_buffer
);
107 fifo
->static_buffer
= NULL
;
111 void vmw_fifo_ping_host(struct vmw_private
*dev_priv
, uint32_t reason
)
113 __le32 __iomem
*fifo_mem
= dev_priv
->mmio_virt
;
115 mutex_lock(&dev_priv
->hw_mutex
);
117 if (unlikely(ioread32(fifo_mem
+ SVGA_FIFO_BUSY
) == 0)) {
118 iowrite32(1, fifo_mem
+ SVGA_FIFO_BUSY
);
119 vmw_write(dev_priv
, SVGA_REG_SYNC
, reason
);
122 mutex_unlock(&dev_priv
->hw_mutex
);
125 void vmw_fifo_release(struct vmw_private
*dev_priv
, struct vmw_fifo_state
*fifo
)
127 __le32 __iomem
*fifo_mem
= dev_priv
->mmio_virt
;
129 mutex_lock(&dev_priv
->hw_mutex
);
131 while (vmw_read(dev_priv
, SVGA_REG_BUSY
) != 0)
132 vmw_write(dev_priv
, SVGA_REG_SYNC
, SVGA_SYNC_GENERIC
);
134 dev_priv
->last_read_sequence
= ioread32(fifo_mem
+ SVGA_FIFO_FENCE
);
136 vmw_write(dev_priv
, SVGA_REG_CONFIG_DONE
,
137 dev_priv
->config_done_state
);
138 vmw_write(dev_priv
, SVGA_REG_ENABLE
,
139 dev_priv
->enable_state
);
141 mutex_unlock(&dev_priv
->hw_mutex
);
143 if (likely(fifo
->last_buffer
!= NULL
)) {
144 vfree(fifo
->last_buffer
);
145 fifo
->last_buffer
= NULL
;
148 if (likely(fifo
->static_buffer
!= NULL
)) {
149 vfree(fifo
->static_buffer
);
150 fifo
->static_buffer
= NULL
;
153 if (likely(fifo
->dynamic_buffer
!= NULL
)) {
154 vfree(fifo
->dynamic_buffer
);
155 fifo
->dynamic_buffer
= NULL
;
159 static bool vmw_fifo_is_full(struct vmw_private
*dev_priv
, uint32_t bytes
)
161 __le32 __iomem
*fifo_mem
= dev_priv
->mmio_virt
;
162 uint32_t max
= ioread32(fifo_mem
+ SVGA_FIFO_MAX
);
163 uint32_t next_cmd
= ioread32(fifo_mem
+ SVGA_FIFO_NEXT_CMD
);
164 uint32_t min
= ioread32(fifo_mem
+ SVGA_FIFO_MIN
);
165 uint32_t stop
= ioread32(fifo_mem
+ SVGA_FIFO_STOP
);
167 return ((max
- next_cmd
) + (stop
- min
) <= bytes
);
170 static int vmw_fifo_wait_noirq(struct vmw_private
*dev_priv
,
171 uint32_t bytes
, bool interruptible
,
172 unsigned long timeout
)
175 unsigned long end_jiffies
= jiffies
+ timeout
;
178 DRM_INFO("Fifo wait noirq.\n");
181 prepare_to_wait(&dev_priv
->fifo_queue
, &__wait
,
183 TASK_INTERRUPTIBLE
: TASK_UNINTERRUPTIBLE
);
184 if (!vmw_fifo_is_full(dev_priv
, bytes
))
186 if (time_after_eq(jiffies
, end_jiffies
)) {
188 DRM_ERROR("SVGA device lockup.\n");
192 if (interruptible
&& signal_pending(current
)) {
197 finish_wait(&dev_priv
->fifo_queue
, &__wait
);
198 wake_up_all(&dev_priv
->fifo_queue
);
199 DRM_INFO("Fifo noirq exit.\n");
203 static int vmw_fifo_wait(struct vmw_private
*dev_priv
,
204 uint32_t bytes
, bool interruptible
,
205 unsigned long timeout
)
208 unsigned long irq_flags
;
210 if (likely(!vmw_fifo_is_full(dev_priv
, bytes
)))
213 vmw_fifo_ping_host(dev_priv
, SVGA_SYNC_FIFOFULL
);
214 if (!(dev_priv
->capabilities
& SVGA_CAP_IRQMASK
))
215 return vmw_fifo_wait_noirq(dev_priv
, bytes
,
216 interruptible
, timeout
);
218 mutex_lock(&dev_priv
->hw_mutex
);
219 if (atomic_add_return(1, &dev_priv
->fifo_queue_waiters
) > 0) {
220 spin_lock_irqsave(&dev_priv
->irq_lock
, irq_flags
);
221 outl(SVGA_IRQFLAG_FIFO_PROGRESS
,
222 dev_priv
->io_start
+ VMWGFX_IRQSTATUS_PORT
);
223 vmw_write(dev_priv
, SVGA_REG_IRQMASK
,
224 vmw_read(dev_priv
, SVGA_REG_IRQMASK
) |
225 SVGA_IRQFLAG_FIFO_PROGRESS
);
226 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irq_flags
);
228 mutex_unlock(&dev_priv
->hw_mutex
);
231 ret
= wait_event_interruptible_timeout
232 (dev_priv
->fifo_queue
,
233 !vmw_fifo_is_full(dev_priv
, bytes
), timeout
);
235 ret
= wait_event_timeout
236 (dev_priv
->fifo_queue
,
237 !vmw_fifo_is_full(dev_priv
, bytes
), timeout
);
239 if (unlikely(ret
== 0))
241 else if (likely(ret
> 0))
244 mutex_lock(&dev_priv
->hw_mutex
);
245 if (atomic_dec_and_test(&dev_priv
->fifo_queue_waiters
)) {
246 spin_lock_irqsave(&dev_priv
->irq_lock
, irq_flags
);
247 vmw_write(dev_priv
, SVGA_REG_IRQMASK
,
248 vmw_read(dev_priv
, SVGA_REG_IRQMASK
) &
249 ~SVGA_IRQFLAG_FIFO_PROGRESS
);
250 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irq_flags
);
252 mutex_unlock(&dev_priv
->hw_mutex
);
257 void *vmw_fifo_reserve(struct vmw_private
*dev_priv
, uint32_t bytes
)
259 struct vmw_fifo_state
*fifo_state
= &dev_priv
->fifo
;
260 __le32 __iomem
*fifo_mem
= dev_priv
->mmio_virt
;
264 uint32_t reserveable
= fifo_state
->capabilities
& SVGA_FIFO_CAP_RESERVE
;
267 down_write(&fifo_state
->rwsem
);
268 max
= ioread32(fifo_mem
+ SVGA_FIFO_MAX
);
269 min
= ioread32(fifo_mem
+ SVGA_FIFO_MIN
);
270 next_cmd
= ioread32(fifo_mem
+ SVGA_FIFO_NEXT_CMD
);
272 if (unlikely(bytes
>= (max
- min
)))
275 BUG_ON(fifo_state
->reserved_size
!= 0);
276 BUG_ON(fifo_state
->dynamic_buffer
!= NULL
);
278 fifo_state
->reserved_size
= bytes
;
281 uint32_t stop
= ioread32(fifo_mem
+ SVGA_FIFO_STOP
);
282 bool need_bounce
= false;
283 bool reserve_in_place
= false;
285 if (next_cmd
>= stop
) {
286 if (likely((next_cmd
+ bytes
< max
||
287 (next_cmd
+ bytes
== max
&& stop
> min
))))
288 reserve_in_place
= true;
290 else if (vmw_fifo_is_full(dev_priv
, bytes
)) {
291 ret
= vmw_fifo_wait(dev_priv
, bytes
,
293 if (unlikely(ret
!= 0))
300 if (likely((next_cmd
+ bytes
< stop
)))
301 reserve_in_place
= true;
303 ret
= vmw_fifo_wait(dev_priv
, bytes
,
305 if (unlikely(ret
!= 0))
310 if (reserve_in_place
) {
311 if (reserveable
|| bytes
<= sizeof(uint32_t)) {
312 fifo_state
->using_bounce_buffer
= false;
315 iowrite32(bytes
, fifo_mem
+
317 return fifo_mem
+ (next_cmd
>> 2);
324 fifo_state
->using_bounce_buffer
= true;
325 if (bytes
< fifo_state
->static_buffer_size
)
326 return fifo_state
->static_buffer
;
328 fifo_state
->dynamic_buffer
= vmalloc(bytes
);
329 return fifo_state
->dynamic_buffer
;
334 fifo_state
->reserved_size
= 0;
335 up_write(&fifo_state
->rwsem
);
339 static void vmw_fifo_res_copy(struct vmw_fifo_state
*fifo_state
,
340 __le32 __iomem
*fifo_mem
,
342 uint32_t max
, uint32_t min
, uint32_t bytes
)
344 uint32_t chunk_size
= max
- next_cmd
;
346 uint32_t *buffer
= (fifo_state
->dynamic_buffer
!= NULL
) ?
347 fifo_state
->dynamic_buffer
: fifo_state
->static_buffer
;
349 if (bytes
< chunk_size
)
352 iowrite32(bytes
, fifo_mem
+ SVGA_FIFO_RESERVED
);
354 memcpy_toio(fifo_mem
+ (next_cmd
>> 2), buffer
, chunk_size
);
355 rest
= bytes
- chunk_size
;
357 memcpy_toio(fifo_mem
+ (min
>> 2), buffer
+ (chunk_size
>> 2),
361 static void vmw_fifo_slow_copy(struct vmw_fifo_state
*fifo_state
,
362 __le32 __iomem
*fifo_mem
,
364 uint32_t max
, uint32_t min
, uint32_t bytes
)
366 uint32_t *buffer
= (fifo_state
->dynamic_buffer
!= NULL
) ?
367 fifo_state
->dynamic_buffer
: fifo_state
->static_buffer
;
370 iowrite32(*buffer
++, fifo_mem
+ (next_cmd
>> 2));
371 next_cmd
+= sizeof(uint32_t);
372 if (unlikely(next_cmd
== max
))
375 iowrite32(next_cmd
, fifo_mem
+ SVGA_FIFO_NEXT_CMD
);
377 bytes
-= sizeof(uint32_t);
381 void vmw_fifo_commit(struct vmw_private
*dev_priv
, uint32_t bytes
)
383 struct vmw_fifo_state
*fifo_state
= &dev_priv
->fifo
;
384 __le32 __iomem
*fifo_mem
= dev_priv
->mmio_virt
;
385 uint32_t next_cmd
= ioread32(fifo_mem
+ SVGA_FIFO_NEXT_CMD
);
386 uint32_t max
= ioread32(fifo_mem
+ SVGA_FIFO_MAX
);
387 uint32_t min
= ioread32(fifo_mem
+ SVGA_FIFO_MIN
);
388 bool reserveable
= fifo_state
->capabilities
& SVGA_FIFO_CAP_RESERVE
;
390 BUG_ON((bytes
& 3) != 0);
391 BUG_ON(bytes
> fifo_state
->reserved_size
);
393 fifo_state
->reserved_size
= 0;
395 if (fifo_state
->using_bounce_buffer
) {
397 vmw_fifo_res_copy(fifo_state
, fifo_mem
,
398 next_cmd
, max
, min
, bytes
);
400 vmw_fifo_slow_copy(fifo_state
, fifo_mem
,
401 next_cmd
, max
, min
, bytes
);
403 if (fifo_state
->dynamic_buffer
) {
404 vfree(fifo_state
->dynamic_buffer
);
405 fifo_state
->dynamic_buffer
= NULL
;
410 if (fifo_state
->using_bounce_buffer
|| reserveable
) {
413 next_cmd
-= max
- min
;
415 iowrite32(next_cmd
, fifo_mem
+ SVGA_FIFO_NEXT_CMD
);
419 iowrite32(0, fifo_mem
+ SVGA_FIFO_RESERVED
);
421 vmw_fifo_ping_host(dev_priv
, SVGA_SYNC_GENERIC
);
422 up_write(&fifo_state
->rwsem
);
425 int vmw_fifo_send_fence(struct vmw_private
*dev_priv
, uint32_t *sequence
)
427 struct vmw_fifo_state
*fifo_state
= &dev_priv
->fifo
;
428 struct svga_fifo_cmd_fence
*cmd_fence
;
431 uint32_t bytes
= sizeof(__le32
) + sizeof(*cmd_fence
);
433 fm
= vmw_fifo_reserve(dev_priv
, bytes
);
434 if (unlikely(fm
== NULL
)) {
435 down_write(&fifo_state
->rwsem
);
436 *sequence
= dev_priv
->fence_seq
;
437 up_write(&fifo_state
->rwsem
);
439 (void)vmw_fallback_wait(dev_priv
, false, true, *sequence
,
445 *sequence
= dev_priv
->fence_seq
++;
446 } while (*sequence
== 0);
448 if (!(fifo_state
->capabilities
& SVGA_FIFO_CAP_FENCE
)) {
451 * Don't request hardware to send a fence. The
452 * waiting code in vmwgfx_irq.c will emulate this.
455 vmw_fifo_commit(dev_priv
, 0);
459 *(__le32
*) fm
= cpu_to_le32(SVGA_CMD_FENCE
);
460 cmd_fence
= (struct svga_fifo_cmd_fence
*)
461 ((unsigned long)fm
+ sizeof(__le32
));
463 iowrite32(*sequence
, &cmd_fence
->fence
);
464 fifo_state
->last_buffer_add
= true;
465 vmw_fifo_commit(dev_priv
, bytes
);
466 fifo_state
->last_buffer_add
= false;
473 * Map the first page of the FIFO read-only to user-space.
476 static int vmw_fifo_vm_fault(struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
479 unsigned long address
= (unsigned long)vmf
->virtual_address
;
481 if (address
!= vma
->vm_start
)
482 return VM_FAULT_SIGBUS
;
484 ret
= vm_insert_pfn(vma
, address
, vma
->vm_pgoff
);
485 if (likely(ret
== -EBUSY
|| ret
== 0))
486 return VM_FAULT_NOPAGE
;
487 else if (ret
== -ENOMEM
)
490 return VM_FAULT_SIGBUS
;
493 static struct vm_operations_struct vmw_fifo_vm_ops
= {
494 .fault
= vmw_fifo_vm_fault
,
499 int vmw_fifo_mmap(struct file
*filp
, struct vm_area_struct
*vma
)
501 struct drm_file
*file_priv
;
502 struct vmw_private
*dev_priv
;
504 file_priv
= (struct drm_file
*)filp
->private_data
;
505 dev_priv
= vmw_priv(file_priv
->minor
->dev
);
507 if (vma
->vm_pgoff
!= (dev_priv
->mmio_start
>> PAGE_SHIFT
) ||
508 (vma
->vm_end
- vma
->vm_start
) != PAGE_SIZE
)
511 vma
->vm_flags
&= ~(VM_WRITE
| VM_MAYWRITE
);
512 vma
->vm_flags
|= VM_IO
| VM_PFNMAP
| VM_DONTEXPAND
| VM_SHARED
;
513 vma
->vm_page_prot
= vm_get_page_prot(vma
->vm_flags
);
514 vma
->vm_page_prot
= ttm_io_prot(TTM_PL_FLAG_UNCACHED
,
516 vma
->vm_ops
= &vmw_fifo_vm_ops
;