add place-holder directory for the a3000 wd533c93 scsi controller implementation.
[AROS.git] / workbench / hidds / vmwaresvga / vmwaresvga_hardware.c
blob509e0d480c59ff36c397c8c7e2aee372a0f0fdeb
1 /*
2 Copyright © 1995-2019, The AROS Development Team. All rights reserved.
3 $Id$
5 Desc: vmware svga hardware functions
6 Lang: English
7 */
9 #ifdef DEBUG
10 #undef DEBUG
11 #endif
12 #define DEBUG 0 /* no SysBase */
13 #include <aros/debug.h>
15 #include <asm/io.h>
17 #include <limits.h>
18 #include <exec/ports.h>
19 #include <devices/timer.h>
20 #include <proto/exec.h>
22 #include "vmwaresvga_intern.h"
24 #if (DEBUG0)
25 #define DIRQ(x) x
26 #define DDMG(x)
27 #define DFIFO(x)
28 #define DFIFOINF(x) x
29 #define DFIFOBUF(x) x
30 #define DFENCE(x) x
31 #define DREFRESH(x)
32 #define DCURSOR(x)
33 #else
34 #define DIRQ(x)
35 #define DDMG(x)
36 #define DFIFO(x)
37 #define DFIFOINF(x)
38 #define DFIFOBUF(x)
39 #define DFENCE(x)
40 #define DREFRESH(x)
41 #define DCURSOR(x)
42 #endif
44 ULONG vmwareReadReg(struct HWData *data, ULONG reg)
46 outl(reg, data->indexReg);
47 return inl(data->valueReg);
50 VOID vmwareWriteReg(struct HWData *data, ULONG reg, ULONG val)
52 outl(reg, data->indexReg);
53 outl(val, data->valueReg);
56 VOID vmwareHandlerIRQ(struct HWData *data, void *unused)
58 UWORD port = (UWORD)((IPTR)data->iobase + SVGA_IRQSTATUS_PORT);
59 ULONG irqFlags = inl(port);
60 outl(irqFlags, port);
62 DIRQ(bug("[VMWareSVGA] %s(%08x)\n", __func__, irqFlags);)
63 if (!irqFlags)
65 D(bug("[VMWareSVGA] %s: Spurrious IRQ!\n", __func__);)
67 else
73 #undef SysBase
74 extern struct ExecBase *SysBase;
78 APTR VMWareSVGA_MemAlloc(struct HWData *data, ULONG size)
80 D(bug("[VMWareSVGA:HW] %s(%d)\n", __func__, size);)
81 return AllocMem(size, MEMF_CLEAR|MEMF_31BIT);
84 VOID VMWareSVGA_MemFree(struct HWData *data, APTR addr, ULONG size)
86 D(bug("[VMWareSVGA:HW] %s(0x%p)\n", __func__, addr);)
87 FreeMem(addr, size);
91 /**********/
93 ULONG getVMWareSVGAID(struct HWData *data)
95 ULONG id;
97 D(bug("[VMWareSVGA:HW] %s()\n", __func__);)
99 vmwareWriteReg(data, SVGA_REG_ID, SVGA_ID_2);
100 id = vmwareReadReg(data, SVGA_REG_ID);
101 if (id == SVGA_ID_2)
102 return id;
103 vmwareWriteReg(data, SVGA_REG_ID, SVGA_ID_1);
104 id = vmwareReadReg(data, SVGA_REG_ID);
105 if (id == SVGA_ID_1)
106 return id;
107 if (id == SVGA_ID_0)
108 return id;
109 return SVGA_ID_INVALID;
112 BOOL hasCapVMWareSVGAFIFO(struct HWData *data, ULONG fifocap)
114 volatile ULONG *fifo = data->mmiobase;
115 if ((data->capabilities & SVGA_CAP_EXTENDED_FIFO) &&
116 (fifo[SVGA_FIFO_MIN] > (fifocap << 2)))
118 return TRUE;
120 return FALSE;
123 VOID initVMWareSVGAFIFO(struct HWData *data)
125 volatile ULONG *fifo = data->mmiobase;
127 DFIFOINF(bug("[VMWareSVGA:HW] %s()\n", __func__);)
129 if (!data->fifomin)
131 if (data->capabilities & SVGA_CAP_EXTENDED_FIFO)
133 DFIFOINF(bug("[VMWareSVGA:HW] %s: Extended FIFO Capabilities\n", __func__);)
134 data->fifomin = vmwareReadReg(data, SVGA_REG_MEM_REGS);
135 data->fifocapabilities = fifo[SVGA_FIFO_CAPABILITIES];
136 DFIFOINF(
137 if (data->fifocapabilities & SVGA_FIFO_CAP_FENCE)
138 bug("[VMWareSVGA] %s: Fence\n", __func__);
139 if (data->fifocapabilities & SVGA_FIFO_CAP_ACCELFRONT)
140 bug("[VMWareSVGA] %s: Accelerate Front\n", __func__);
141 if (data->fifocapabilities & SVGA_FIFO_CAP_PITCHLOCK)
142 bug("[VMWareSVGA] %s: Pitchlock\n", __func__);
143 if (data->fifocapabilities & SVGA_FIFO_CAP_VIDEO)
144 bug("[VMWareSVGA] %s: Video\n", __func__);
145 if (data->fifocapabilities & SVGA_FIFO_CAP_CURSOR_BYPASS_3)
146 bug("[VMWareSVGA] %s: Cursor-Bypass3\n", __func__);
147 if (data->fifocapabilities & SVGA_FIFO_CAP_ESCAPE)
148 bug("[VMWareSVGA] %s: Escape\n", __func__);
149 if (data->fifocapabilities & SVGA_FIFO_CAP_RESERVE)
150 bug("[VMWareSVGA] %s: FIFO Reserve\n", __func__);
151 if (data->fifocapabilities & SVGA_FIFO_CAP_SCREEN_OBJECT)
152 bug("[VMWareSVGA] %s: Screen-Object\n", __func__);
153 if (data->fifocapabilities & SVGA_FIFO_CAP_GMR2)
154 bug("[VMWareSVGA] %s: GMR2\n", __func__);
155 if (data->fifocapabilities & SVGA_FIFO_CAP_3D_HWVERSION_REVISED)
156 bug("[VMWareSVGA] %s: 3DHWVersion-Revised\n", __func__);
157 if (data->fifocapabilities & SVGA_FIFO_CAP_SCREEN_OBJECT_2)
158 bug("[VMWareSVGA] %s: Screen-Object2\n", __func__);
161 else
162 data->fifomin = SVGA_FIFO_CAPABILITIES;
164 DFIFOINF(bug("[VMWareSVGA:HW] %s: FIFO Min Regs = %d\n", __func__, data->fifomin));
166 data->mmiosize = vmwareReadReg(data, SVGA_REG_MEM_SIZE);
168 vmwareWriteReg(data, SVGA_REG_CONFIG_DONE, 0); //Stop vmware from reading the fifo
170 fifo[SVGA_FIFO_MIN] = (data->fifomin << VMWFIFO_CMD_SIZESHIFT);
171 fifo[SVGA_FIFO_MAX] = data->mmiosize;
172 fifo[SVGA_FIFO_NEXT_CMD] = fifo[SVGA_FIFO_MIN];
173 fifo[SVGA_FIFO_STOP] = fifo[SVGA_FIFO_MIN];
175 vmwareWriteReg(data, SVGA_REG_CONFIG_DONE, 1);
177 data->fifocmdbuf.buffer = AllocMem(VMW_COMMAND_SIZE, MEMF_CLEAR|MEMF_ANY);
178 InitSemaphore((struct SignalSemaphore *)&data->fifocmdbuf.fifocmdsema);
179 data->fifocmdbuf.size = VMW_COMMAND_SIZE;
180 data->fifocmdbuf.used =0;
181 data->fifocmdbuf.reserved = 0;
182 DFIFOINF(
183 bug("[VMWareSVGA:HW] %s: FIFO Cmd @ 0x%p initialised\n", __func__, &data->fifocmdbuf);
184 bug("[VMWareSVGA:HW] %s: Cmd bounce-buffer @ 0x%p\n", __func__, data->fifocmdbuf.buffer);
187 if (hasCapVMWareSVGAFIFO(data, SVGA_FIFO_GUEST_3D_HWVERSION))
189 DFIFOINF(bug("[VMWareSVGA:HW] %s: Setting GUEST_3D_HWVERSION = %d\n", __func__, SVGA3D_HWVERSION_CURRENT);)
190 fifo[SVGA_FIFO_GUEST_3D_HWVERSION] = SVGA3D_HWVERSION_CURRENT;
195 void waitVMWareSVGAFIFO(struct HWData *data)
197 bug("[VMWareSVGA:HW] %s()\n", __func__);
198 if (hasCapVMWareSVGAFIFO(data, SVGA_FIFO_FENCE_GOAL) &&
199 (data->capabilities & SVGA_CAP_IRQMASK)) {
200 #if (0)
202 * On hosts which support interrupts, we can sleep until the
203 * FIFO_PROGRESS interrupt occurs. This is the most efficient
204 * thing to do when the FIFO fills up.
206 * As with the IRQ-based SVGA_SyncToFence(), this will only work
207 * on Workstation 6.5 virtual machines and later.
210 vmwareWriteReg(data, SVGA_REG_IRQMASK, SVGA_IRQFLAG_FIFO_PROGRESS);
211 // TODO: wait for the irq...
212 vmwareWriteReg(data, SVGA_REG_IRQMASK, 0);
213 #endif
214 } else {
216 * Fallback implementation: Perform one iteration of the
217 * legacy-style sync. This synchronously processes FIFO commands
218 * for an arbitrary amount of time, then returns control back to
219 * the guest CPU.
221 syncVMWareSVGAFIFO(data);
222 vmwareReadReg(data, SVGA_REG_BUSY);
226 APTR reserveVMWareSVGAFIFO(struct HWData *data, ULONG size)
228 volatile ULONG *fifo = data->mmiobase;
229 ULONG max = fifo[SVGA_FIFO_MAX];
230 ULONG min = fifo[SVGA_FIFO_MIN];
231 ULONG cmdNext = fifo[SVGA_FIFO_NEXT_CMD];
232 BOOL canreserve = FALSE;
234 DFIFOBUF(bug("[VMWareSVGA:HW] %s(%d)\n", __func__, size);)
236 if (data->fifocapabilities & SVGA_FIFO_CAP_RESERVE)
238 DFIFOBUF(bug("[VMWareSVGA:HW] %s: reserve supported\n", __func__);)
239 canreserve = TRUE;
242 if (size > VMW_COMMAND_SIZE || (size > (max - min))) {
243 bug("[VMWareSVGA:HW] %s: FIFO command too large", __func__);
244 return NULL;
247 if (size % VMWFIFO_CMD_SIZE) {
248 bug("[VMWareSVGA:HW] %s: size of %d not 32bit-aligned!!\n", __func__, size);
249 return NULL;
252 ObtainSemaphore((struct SignalSemaphore *)&data->fifocmdbuf.fifocmdsema);
253 data->fifocmdbuf.reserved = size;
255 while (1) {
256 ULONG stop = fifo[SVGA_FIFO_STOP];
257 BOOL reserveInPlace = FALSE;
258 BOOL needBounce = FALSE;
261 * Find a strategy for dealing with "size" of data:
262 * - reserve in place, if there's room and the FIFO supports it
263 * - reserve in bounce buffer, if there's room in FIFO but not
264 * contiguous or FIFO can't safely handle reservations
265 * - otherwise, sync the FIFO and try again.
268 if (cmdNext >= stop) {
269 /* There is no valid FIFO data between cmdNext and max */
271 if (cmdNext + size < max ||
272 (cmdNext + size == max && stop > min)) {
274 * Fastest path 1: There is already enough contiguous space
275 * between cmdNext and max (the end of the buffer).
277 * Note the edge case: If the "<" path succeeds, we can
278 * quickly return without performing any other tests. If
279 * we end up on the "==" path, we're writing exactly up to
280 * the top of the FIFO and we still need to make sure that
281 * there is at least one unused DWORD at the bottom, in
282 * order to be sure we don't fill the FIFO entirely.
284 * If the "==" test succeeds, but stop <= min (the FIFO
285 * would be completely full if we were to reserve this
286 * much space) we'll end up hitting the FIFOFull path below.
288 reserveInPlace = TRUE;
289 } else if ((max - cmdNext) + (stop - min) <= size) {
291 * We have to split the FIFO command into two pieces,
292 * but there still isn't enough total free space in
293 * the FIFO to store it.
295 * Note the "<=". We need to keep at least one DWORD
296 * of the FIFO free at all times, or we won't be able
297 * to tell the difference between full and empty.
299 waitVMWareSVGAFIFO(data);
300 } else {
302 * Data fits in FIFO but only if we split it.
303 * Need to bounce to guarantee contiguous buffer.
305 needBounce = TRUE;
308 } else {
309 /* There is FIFO data between cmdNext and max */
311 if (cmdNext + size < stop) {
313 * Fastest path 2: There is already enough contiguous space
314 * between cmdNext and stop.
316 reserveInPlace = TRUE;
317 } else {
319 * There isn't enough room between cmdNext and stop.
320 * The FIFO is too full to accept this command.
322 waitVMWareSVGAFIFO(data);
327 * If we decided we can write directly to the FIFO, make sure
328 * the VMX can safely support this.
330 if (reserveInPlace) {
331 if (canreserve || size <= sizeof(ULONG)) {
332 data->bbused = FALSE;
333 if (canreserve) {
334 fifo[SVGA_FIFO_RESERVED] = size;
336 ReleaseSemaphore((struct SignalSemaphore *)&data->fifocmdbuf.fifocmdsema);
337 return cmdNext + (UBYTE *)fifo;
338 } else {
340 * Need to bounce because we can't trust the VMX to safely
341 * handle uncommitted data in FIFO.
343 needBounce = TRUE;
348 * If we reach here, either we found a full FIFO, called
349 * waitVMWareSVGAFIFO to make more room, and want to try again, or we
350 * decided to use a bounce buffer instead.
352 if (needBounce) {
353 data->bbused = TRUE;
354 ReleaseSemaphore((struct SignalSemaphore *)&data->fifocmdbuf.fifocmdsema);
355 return data->fifocmdbuf.buffer;
357 } /* while (1) */
360 VOID commitVMWareSVGAFIFO(struct HWData *data, ULONG size)
362 volatile ULONG *fifo = data->mmiobase;
363 ULONG max = fifo[SVGA_FIFO_MAX];
364 ULONG min = fifo[SVGA_FIFO_MIN];
365 ULONG cmdNext = fifo[SVGA_FIFO_NEXT_CMD];
366 BOOL canreserve = FALSE;
368 DFIFOBUF(bug("[VMWareSVGA:HW] %s(%d)\n", __func__, size);)
370 if (data->fifocapabilities & SVGA_FIFO_CAP_RESERVE)
372 DFIFOBUF(bug("[VMWareSVGA:HW] %s: reserve supported\n", __func__);)
373 canreserve = TRUE;
376 if (data->fifocmdbuf.reserved == 0) {
377 bug("[VMWareSVGA:HW] %s: COMMIT called before RESERVE!!\n", __func__);
378 return;
381 ObtainSemaphore((struct SignalSemaphore *)&data->fifocmdbuf.fifocmdsema);
383 data->fifocmdbuf.used += data->fifocmdbuf.reserved;
384 data->fifocmdbuf.reserved = 0;
386 if (data->bbused) {
388 * Slow paths: copy out of a bounce buffer.
390 UBYTE *buffer = data->fifocmdbuf.buffer;
392 if (canreserve) {
394 * Slow path: bulk copy out of a bounce buffer in two chunks.
396 * Note that the second chunk may be zero-length if the reserved
397 * size was large enough to wrap around but the commit size was
398 * small enough that everything fit contiguously into the FIFO.
400 * Note also that we didn't need to tell the FIFO about the
401 * reservation in the bounce buffer, but we do need to tell it
402 * about the data we're bouncing from there into the FIFO.
404 ULONG chunkSize;
405 if (size > (max - cmdNext))
406 chunkSize = max - cmdNext;
407 else
408 chunkSize = size;
409 bug("[VMWareSVGA:HW] %s: chunk size %d, size %d\n", __func__, chunkSize, size);
410 fifo[SVGA_FIFO_RESERVED] = size;
411 memcpy(cmdNext + (UBYTE *) fifo, buffer, chunkSize);
412 memcpy(min + (UBYTE *) fifo, buffer + chunkSize, size - chunkSize);
413 } else {
415 * Slowest path: copy one ULONG at a time, updating NEXT_CMD as
416 * we go, so that we bound how much data the guest has written
417 * and the host doesn't know to checkpoint.
420 ULONG *dword = (ULONG *)buffer;
422 DFIFOBUF(
423 SVGA3dCmdHeader *header = (SVGA3dCmdHeader *)buffer;
424 bug("[VMWareSVGA:HW] %s: copying %dbytes (cmd %d, size %d)\n", __func__, size, header->id, header->size);
427 while (size > 0) {
428 fifo[cmdNext >> VMWFIFO_CMD_SIZESHIFT] = *dword++;
429 cmdNext += VMWFIFO_CMD_SIZE;
430 if (cmdNext == max) {
431 cmdNext = min;
433 fifo[SVGA_FIFO_NEXT_CMD] = cmdNext;
434 size -= VMWFIFO_CMD_SIZE;
440 * Atomically update NEXT_CMD, if we didn't already
442 if (!data->bbused || canreserve) {
443 cmdNext += size;
444 if (cmdNext >= max) {
445 cmdNext -= max - min;
447 fifo[SVGA_FIFO_NEXT_CMD] = cmdNext;
451 * Clear the reservation in the FIFO.
453 if (canreserve) {
454 fifo[SVGA_FIFO_RESERVED] = 0;
456 ReleaseSemaphore((struct SignalSemaphore *)&data->fifocmdbuf.fifocmdsema);
459 VOID flushVMWareSVGAFIFO(struct HWData *data, ULONG *fence)
461 DFIFOBUF(bug("[VMWareSVGA:HW] %s()\n", __func__);)
463 if (data->fifocmdbuf.reserved)
465 *fence = fenceVMWareSVGAFIFO(data);
466 commitVMWareSVGAFIFO(data, data->fifocmdbuf.reserved);
470 ULONG fenceVMWareSVGAFIFO(struct HWData *data)
472 ULONG fence = 1;
474 if (hasCapVMWareSVGAFIFO(data, SVGA_FIFO_FENCE) && (data->fifocapabilities & SVGA_FIFO_CAP_FENCE))
476 fence = data->fence++;
477 if (fence == 0) fence = 1;
479 DFENCE(bug("[VMWareSVGA:HW] %s: inserting fence #%d\n", __func__, fence);)
481 writeVMWareSVGAFIFO(data, SVGA_CMD_FENCE);
482 writeVMWareSVGAFIFO(data, fence);
483 syncVMWareSVGAFIFO(data);
485 return fence;
488 VOID syncfenceVMWareSVGAFIFO(struct HWData *data, ULONG fence)
490 volatile ULONG *fifo;
492 if (hasCapVMWareSVGAFIFO(data, SVGA_FIFO_FENCE) && (data->fifocapabilities & SVGA_FIFO_CAP_FENCE))
494 fifo = data->mmiobase;
495 if ((LONG)(fifo[SVGA_FIFO_FENCE] - fence) < 0)
497 DFENCE(bug("[VMWareSVGA:HW] %s: fence #%d hasnt been reached yet...\n", __func__, fence);)
503 VOID syncVMWareSVGAFIFO(struct HWData *data)
505 DFIFO(bug("[VMWareSVGA:HW] %s()\n", __func__);)
507 vmwareWriteReg(data, SVGA_REG_SYNC, 1);
510 VOID writeVMWareSVGAFIFO(struct HWData *data, ULONG val)
512 volatile ULONG *fifo;
514 DFIFO(bug("[VMWareSVGA:HW] %s()\n", __func__);)
516 fifo = data->mmiobase;
517 if (
518 (fifo[SVGA_FIFO_NEXT_CMD] + VMWFIFO_CMD_SIZE == fifo[SVGA_FIFO_STOP]) ||
520 (fifo[SVGA_FIFO_NEXT_CMD] == (fifo[SVGA_FIFO_MAX] - VMWFIFO_CMD_SIZE)) &&
521 (fifo[SVGA_FIFO_STOP] == fifo[SVGA_FIFO_MIN])
524 syncVMWareSVGAFIFO(data);
526 fifo[fifo[SVGA_FIFO_NEXT_CMD] >> VMWFIFO_CMD_SIZESHIFT] = val;
527 fifo[SVGA_FIFO_NEXT_CMD] += VMWFIFO_CMD_SIZE;
529 if (fifo[SVGA_FIFO_NEXT_CMD] == fifo[SVGA_FIFO_MAX])
530 fifo[SVGA_FIFO_NEXT_CMD] = fifo[SVGA_FIFO_MIN];
533 BOOL initVMWareSVGAHW(struct HWData *data, OOP_Object *device)
535 ULONG id;
537 D(bug("[VMWareSVGA:HW] %s()\n", __func__);)
539 id = getVMWareSVGAID(data);
540 if (id == SVGA_ID_INVALID)
541 return FALSE;
543 data->maskPool = CreatePool(MEMF_ANY, (32 << 3), (32 << 2));
545 if (id >= SVGA_ID_1)
547 volatile ULONG *fifo = data->mmiobase;
548 data->capabilities = vmwareReadReg(data, SVGA_REG_CAPABILITIES);
551 if (data->capabilities & SVGA_CAP_8BIT_EMULATION)
553 data->bitsperpixel = vmwareReadReg(data, SVGA_REG_HOST_BITS_PER_PIXEL);
554 vmwareWriteReg(data, SVGA_REG_BITS_PER_PIXEL, data->bitsperpixel);
556 data->bitsperpixel = vmwareReadReg(data, SVGA_REG_BITS_PER_PIXEL);
558 data->depth = vmwareReadReg(data, SVGA_REG_DEPTH);
559 data->maxwidth = vmwareReadReg(data, SVGA_REG_MAX_WIDTH);
560 data->maxheight = vmwareReadReg(data, SVGA_REG_MAX_HEIGHT);
561 data->redmask = vmwareReadReg(data, SVGA_REG_RED_MASK);
562 data->greenmask = vmwareReadReg(data, SVGA_REG_GREEN_MASK);
563 data->bluemask = vmwareReadReg(data, SVGA_REG_BLUE_MASK);
564 data->bytesperpixel = 1;
566 if (data->depth>16)
567 data->bytesperpixel = 4;
568 else if (data->depth>8)
569 data->bytesperpixel = 2;
571 if (data->capabilities & SVGA_CAP_MULTIMON)
573 data->displaycount = vmwareReadReg(data, SVGA_REG_NUM_DISPLAYS);
575 else
577 data->displaycount = 1;
580 data->vramsize = vmwareReadReg(data, SVGA_REG_VRAM_SIZE);
581 data->vrambase = (APTR)(IPTR)vmwareReadReg(data, SVGA_REG_FB_START);
582 data->pseudocolor = vmwareReadReg(data, SVGA_REG_PSEUDOCOLOR);
585 bug("[VMWareSVGA:HW] %s: VRAM at 0x%08x size %d\n", __func__, data->vrambase, data->vramsize);
586 bug("[VMWareSVGA:HW] %s: no.displays: %d\n", __func__, data->displaycount);
587 bug("[VMWareSVGA:HW] %s: depth: %d\n", __func__, data->depth);
588 bug("[VMWareSVGA:HW] %s: bpp : %d\n", __func__, data->bitsperpixel);
591 VMWareSVGA_RestartRenderTask(data);
593 return TRUE;
596 VOID getModeCfgVMWareSVGA(struct HWData *data)
598 D(bug("[VMWareSVGA:HW] %s()\n", __func__);)
600 data->fboffset = vmwareReadReg(data, SVGA_REG_FB_OFFSET);
601 data->bytesperline = vmwareReadReg(data, SVGA_REG_BYTES_PER_LINE);
602 data->depth = vmwareReadReg(data, SVGA_REG_DEPTH);
603 data->redmask = vmwareReadReg(data, SVGA_REG_RED_MASK);
604 data->greenmask = vmwareReadReg(data, SVGA_REG_GREEN_MASK);
605 data->bluemask = vmwareReadReg(data, SVGA_REG_BLUE_MASK);
606 data->pseudocolor = vmwareReadReg(data, SVGA_REG_PSEUDOCOLOR);
608 data->display_width = vmwareReadReg(data, SVGA_REG_WIDTH);
609 data->display_height = vmwareReadReg(data, SVGA_REG_HEIGHT);
610 D(bug("[VMWareSVGA:HW] %s: %dx%d\n", __func__, data->display_width, data->display_height));
613 VOID enableVMWareSVGA(struct HWData *data)
615 vmwareWriteReg(data, SVGA_REG_ENABLE, SVGA_REG_ENABLE_ENABLE);
618 VOID disableVMWareSVGA(struct HWData *data)
620 vmwareWriteReg(data, SVGA_REG_ENABLE, (SVGA_REG_ENABLE_HIDE|SVGA_REG_ENABLE_ENABLE));
623 VOID initDisplayVMWareSVGA(struct HWData *data)
625 enableVMWareSVGA(data);
626 getModeCfgVMWareSVGA(data);
627 initVMWareSVGAFIFO(data);
630 VOID setModeVMWareSVGA(struct HWData *data, ULONG width, ULONG height)
632 D(bug("[VMWareSVGA:HW] %s(%dx%d)\n", __func__, width, height));
634 disableVMWareSVGA(data);
636 vmwareWriteReg(data, SVGA_REG_WIDTH, width);
637 vmwareWriteReg(data, SVGA_REG_HEIGHT, height);
639 if (data->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY)
641 D(bug("[VMWareSVGA:HW] %s: Adjusting Display Topology\n", __func__);)
643 vmwareWriteReg(data, SVGA_REG_DISPLAY_ID, 0);
644 vmwareWriteReg(data, SVGA_REG_DISPLAY_IS_PRIMARY, TRUE);
646 vmwareWriteReg(data, SVGA_REG_DISPLAY_POSITION_X, 0);
647 vmwareWriteReg(data, SVGA_REG_DISPLAY_POSITION_Y, 0);
648 vmwareWriteReg(data, SVGA_REG_DISPLAY_WIDTH, width);
649 vmwareWriteReg(data, SVGA_REG_DISPLAY_HEIGHT, height);
652 if (data->capabilities & SVGA_CAP_8BIT_EMULATION)
653 vmwareWriteReg(data, SVGA_REG_BITS_PER_PIXEL, data->bitsperpixel);
655 initDisplayVMWareSVGA(data);
658 VOID refreshAreaVMWareSVGA(struct HWData *data, struct Box *box)
660 DREFRESH(bug("[VMWareSVGA:HW] %s()\n", __func__);)
662 writeVMWareSVGAFIFO(data, SVGA_CMD_UPDATE);
663 writeVMWareSVGAFIFO(data, box->x1);
664 writeVMWareSVGAFIFO(data, box->y1);
665 writeVMWareSVGAFIFO(data, box->x2-box->x1+1);
666 writeVMWareSVGAFIFO(data, box->y2-box->y1+1);
669 VOID rectFillVMWareSVGA(struct HWData *data, ULONG color, LONG x, LONG y, LONG width, LONG height)
671 D(bug("[VMWareSVGA:HW] %s()\n", __func__);)
673 writeVMWareSVGAFIFO(data, SVGA_CMD_RECT_FILL);
674 writeVMWareSVGAFIFO(data, color);
675 writeVMWareSVGAFIFO(data, x);
676 writeVMWareSVGAFIFO(data, y);
677 writeVMWareSVGAFIFO(data, width);
678 writeVMWareSVGAFIFO(data, height);
679 syncVMWareSVGAFIFO(data);
682 VOID ropFillVMWareSVGA(struct HWData *data, ULONG color, LONG x, LONG y, LONG width, LONG height, ULONG mode)
684 D(bug("[VMWareSVGA:HW] %s()\n", __func__);)
686 writeVMWareSVGAFIFO(data, SVGA_CMD_RECT_ROP_FILL);
687 writeVMWareSVGAFIFO(data, color);
688 writeVMWareSVGAFIFO(data, x);
689 writeVMWareSVGAFIFO(data, y);
690 writeVMWareSVGAFIFO(data, width);
691 writeVMWareSVGAFIFO(data, height);
692 writeVMWareSVGAFIFO(data, mode);
693 syncVMWareSVGAFIFO(data);
696 VOID ropCopyVMWareSVGA(struct HWData *data, LONG sx, LONG sy, LONG dx, LONG dy, ULONG width, ULONG height, ULONG mode)
698 D(bug("[VMWareSVGA:HW] %s()\n", __func__);)
700 writeVMWareSVGAFIFO(data, SVGA_CMD_RECT_ROP_COPY);
701 writeVMWareSVGAFIFO(data, sx);
702 writeVMWareSVGAFIFO(data, sy);
703 writeVMWareSVGAFIFO(data, dx);
704 writeVMWareSVGAFIFO(data, dy);
705 writeVMWareSVGAFIFO(data, width);
706 writeVMWareSVGAFIFO(data, height);
707 writeVMWareSVGAFIFO(data, mode);
708 syncVMWareSVGAFIFO(data);
711 VOID defineCursorVMWareSVGA(struct HWData *data, struct MouseData *mouse)
713 ULONG size = mouse->width * mouse->height;
714 ULONG *xorMask, *xorBuffer;
715 ULONG *image = mouse->shape;
716 int i;
718 DCURSOR(bug("[VMWareSVGA:HW] %s()\n", __func__);)
720 xorMask = (ULONG *)AllocVecPooled(data->maskPool, size << 2);
721 if (xorMask)
723 DCURSOR(bug("[VMWareSVGA:HW] %s: xormask @ 0x%p\n", __func__, xorMask);)
724 xorBuffer = xorMask;
725 for (i = 0; i < size; i++)
727 ULONG pixel = *image ++;
728 *xorBuffer = pixel;
729 xorBuffer++;
732 if (data->capabilities & SVGA_CAP_ALPHA_CURSOR)
734 DCURSOR(bug("[VMWareSVGA:HW] %s: rendering Alpha Cursor\n", __func__);)
735 writeVMWareSVGAFIFO(data, SVGA_CMD_DEFINE_ALPHA_CURSOR);
736 writeVMWareSVGAFIFO(data, VMWCURSOR_ID); // id
737 writeVMWareSVGAFIFO(data, 0); // hotspot x
738 writeVMWareSVGAFIFO(data, 0); // hotspot y
739 writeVMWareSVGAFIFO(data, mouse->width); // width
740 writeVMWareSVGAFIFO(data, mouse->height); // height
742 xorBuffer = xorMask;
743 for (i = 0; i < size; i++)
745 writeVMWareSVGAFIFO(data, *xorBuffer++);
748 syncVMWareSVGAFIFO(data);
750 else
752 #if (0)
753 xandBuffer = (ULONG *)AllocVecPooled(data->maskPool, size << 2);
754 if (xandBuffer)
756 DCURSOR(bug("[VMWareSVGA:HW] %s: rendering masked Cursor\n", __func__);)
757 writeVMWareSVGAFIFO(data, SVGA_CMD_DEFINE_CURSOR);
758 writeVMWareSVGAFIFO(data, VMWCURSOR_ID); // id
759 writeVMWareSVGAFIFO(data, 0); // hotspot x
760 writeVMWareSVGAFIFO(data, 0); // hotspot y
761 writeVMWareSVGAFIFO(data, mouse->width); // width
762 writeVMWareSVGAFIFO(data, mouse->height); // height
764 for (i = 0; i < size; i++)
766 writeVMWareSVGAFIFO(data, *xandBuffer++);
769 xorBuffer = xorMask;
770 for (i = 0; i < size; i++)
772 writeVMWareSVGAFIFO(data, *xorBuffer++);
775 syncVMWareSVGAFIFO(data);
776 FreeVecPooled(data->maskPool, xandBuffer);
778 #endif
780 FreeVecPooled(data->maskPool, xorMask);
784 VOID displayCursorVMWareSVGA(struct HWData *data, LONG mode)
786 volatile ULONG *fifo = data->mmiobase;
788 DCURSOR(bug("[VMWareSVGA:HW] %s()\n", __func__);)
790 if (hasCapVMWareSVGAFIFO(data, SVGA_FIFO_CURSOR_COUNT) && (data->fifocapabilities & SVGA_FIFO_CAP_CURSOR_BYPASS_3))
792 ULONG count;
793 fifo[SVGA_FIFO_CURSOR_ON] = mode;
794 count = fifo[SVGA_FIFO_CURSOR_COUNT];
795 fifo[SVGA_FIFO_CURSOR_COUNT] = ++count;
797 else
799 if (data->capabilities & SVGA_CAP_CURSOR_BYPASS_2)
801 vmwareWriteReg(data, SVGA_REG_CURSOR_ID, VMWCURSOR_ID);
802 vmwareWriteReg(data, SVGA_REG_CURSOR_ON, mode);
804 else
806 writeVMWareSVGAFIFO(data, SVGA_CMD_DISPLAY_CURSOR);
807 writeVMWareSVGAFIFO(data, VMWCURSOR_ID);
808 writeVMWareSVGAFIFO(data, mode);
809 syncVMWareSVGAFIFO(data);
814 VOID moveCursorVMWareSVGA(struct HWData *data, LONG x, LONG y)
816 volatile ULONG *fifo = data->mmiobase;
818 DCURSOR(bug("[VMWareSVGA:HW] %s()\n", __func__);)
820 if (hasCapVMWareSVGAFIFO(data, SVGA_FIFO_CURSOR_COUNT) && (data->fifocapabilities & SVGA_FIFO_CAP_CURSOR_BYPASS_3))
822 ULONG count;
823 fifo[SVGA_FIFO_CURSOR_ON] = SVGA_CURSOR_ON_SHOW;
824 fifo[SVGA_FIFO_CURSOR_X] = x;
825 fifo[SVGA_FIFO_CURSOR_Y] = y;
826 count = fifo[SVGA_FIFO_CURSOR_COUNT];
827 fifo[SVGA_FIFO_CURSOR_COUNT] = ++count;
829 else
831 if (data->capabilities & SVGA_CAP_CURSOR_BYPASS_2)
833 vmwareWriteReg(data, SVGA_REG_CURSOR_ID, VMWCURSOR_ID);
834 vmwareWriteReg(data, SVGA_REG_CURSOR_X, x);
835 vmwareWriteReg(data, SVGA_REG_CURSOR_Y, y);
836 vmwareWriteReg(data, SVGA_REG_CURSOR_ON, SVGA_CURSOR_ON_SHOW);
838 else
840 writeVMWareSVGAFIFO(data, SVGA_CMD_MOVE_CURSOR);
841 writeVMWareSVGAFIFO(data, x);
842 writeVMWareSVGAFIFO(data, y);
843 syncVMWareSVGAFIFO(data);
848 VOID VMWareSVGA_Damage_Reset(struct HWData *hwdata)
850 DDMG(bug("[VMWareSVGA:HW] %s()\n", __func__);)
852 ObtainSemaphore(&hwdata->damage_control);
854 hwdata->delta_damage.x1 = INT_MAX;
855 hwdata->delta_damage.y1 = INT_MAX;
856 hwdata->delta_damage.x2 = INT_MIN;
857 hwdata->delta_damage.y2 = INT_MIN;
859 ReleaseSemaphore(&hwdata->damage_control);
862 VOID VMWareSVGA_Damage_DeltaAdd(struct HWData *hwdata, struct Box *box)
864 ULONG tmpval;
866 DDMG(bug("[VMWareSVGA:HW] %s()\n", __func__);)
868 if (box->x1 > box->x2)
870 tmpval = box->x2;
871 box->x2 = box->x1;
872 box->x1 = tmpval;
874 if (box->y1 > box->y2)
876 tmpval = box->y2;
877 box->y2 = box->y1;
878 box->y1 = tmpval;
881 ObtainSemaphore(&hwdata->damage_control);
882 if (box->x1 < hwdata->delta_damage.x1)
884 hwdata->delta_damage.x1 = box->x1;
886 if (box->y1 < hwdata->delta_damage.y1)
888 hwdata->delta_damage.y1 = box->y1;
890 if (box->x2 > hwdata->delta_damage.x2)
892 hwdata->delta_damage.x2 = box->x2;
894 if (box->y2 > hwdata->delta_damage.y2)
896 hwdata->delta_damage.y2 = box->y2;
898 #if defined(VMWAREGFX_IMMEDIATEDRAW)
899 if (hwdata->shown)
901 refreshAreaVMWareSVGA(hwdata, &hwdata->delta_damage);
902 VMWareSVGA_Damage_Reset(hwdata);
903 syncVMWareSVGAFIFO(hwdata);
905 #endif
906 ReleaseSemaphore(&hwdata->damage_control);
909 #if !defined(VMWAREGFX_IMMEDIATEDRAW)
910 VOID VMWareSVGA_RenderTask(struct HWData *hwdata)
912 struct MsgPort render_thread_message_port;
913 struct timerequest *timer_request;
914 struct IORequest *timer_request_as_io_request;
915 ULONG request_size = sizeof(struct timerequest);
916 BYTE running;
918 render_thread_message_port.mp_Flags = PA_SIGNAL;
919 render_thread_message_port.mp_Node.ln_Type = NT_MSGPORT;
920 render_thread_message_port.mp_MsgList.lh_TailPred = (struct Node *)&render_thread_message_port.mp_MsgList;
921 render_thread_message_port.mp_MsgList.lh_Tail = 0;
922 render_thread_message_port.mp_MsgList.lh_Head = (struct Node *)&render_thread_message_port.mp_MsgList.lh_Tail;
924 render_thread_message_port.mp_SigBit = AllocSignal(-1);
925 render_thread_message_port.mp_SigTask = FindTask(0);
927 timer_request = AllocMem(request_size, MEMF_CLEAR | MEMF_PUBLIC);
928 timer_request_as_io_request = (void *)timer_request;
930 timer_request_as_io_request->io_Message.mn_Node.ln_Type = NT_MESSAGE;
931 timer_request_as_io_request->io_Message.mn_ReplyPort = &render_thread_message_port;
932 timer_request_as_io_request->io_Message.mn_Length = request_size;
934 OpenDevice("timer.device", UNIT_MICROHZ, timer_request_as_io_request, 0);
936 timer_request->tr_node.io_Command = TR_ADDREQUEST;
937 timer_request->tr_time.tv_secs = 0;
938 timer_request->tr_time.tv_micro = 20000;
939 SendIO(timer_request_as_io_request);
941 running = 1;
942 while (running) // TODO: If you'll ever implement GFX Driver hot swap, you will want to unlock this condition and let the RenderTask terminate
944 if (!vmwareReadReg(hwdata, SVGA_REG_BUSY))
946 ObtainSemaphore(&hwdata->damage_control);
947 struct Box *damage = &hwdata->delta_damage;
949 if ((damage->x2 > damage->x1) && (damage->y2 > damage->y1))
951 refreshAreaVMWareSVGA(hwdata, &hwdata->delta_damage);
952 VMWareSVGA_Damage_Reset(hwdata);
953 syncVMWareSVGAFIFO(hwdata);
955 ReleaseSemaphore(&hwdata->damage_control);
958 WaitIO(timer_request_as_io_request);
959 GetMsg(&render_thread_message_port); // TODO: Did we have to reply to this? Oh memory ...
961 timer_request->tr_node.io_Command = TR_ADDREQUEST;
962 timer_request->tr_time.tv_secs = 0;
963 timer_request->tr_time.tv_micro = 20000; // TODO: This should be adaptive. We would need to know the CPU load and increase the delay to avoid burning all of the CPU time
964 SendIO(timer_request_as_io_request);
967 CloseDevice(timer_request_as_io_request);
969 #endif
971 VOID VMWareSVGA_RestartRenderTask(struct HWData *hwdata)
973 // TODO: CleanUp and add defenses here
975 InitSemaphore(&hwdata->damage_control);
976 #if !defined(VMWAREGFX_IMMEDIATEDRAW)
977 hwdata->render_task = NewCreateTask(TASKTAG_PC,
978 VMWareSVGA_RenderTask,
979 TASKTAG_NAME, "VMWare Render Task",
980 TASKTAG_PRI, 1,
981 TASKTAG_ARG1, hwdata,
982 TAG_DONE);
983 #endif