use 32bit allocations .. mark the fifo command structure as volatile.
[AROS.git] / workbench / hidds / vmwaresvga / vmwaresvga_hardware.c
blobce8e82aa841db65332fe44a9fe9864280c6993f8
1 /*
2 Copyright © 1995-2019, The AROS Development Team. All rights reserved.
3 $Id$
5 Desc: vmware svga hardware functions
6 Lang: English
7 */
9 #ifdef DEBUG
10 #undef DEBUG
11 #endif
12 #define DEBUG 0 /* no SysBase */
13 #include <aros/debug.h>
15 #include <asm/io.h>
17 #include <limits.h>
18 #include <exec/ports.h>
19 #include <devices/timer.h>
20 #include <proto/exec.h>
22 #include "vmwaresvga_intern.h"
24 #if (DEBUG0)
25 #define DIRQ(x) x
26 #define DDMG(x)
27 #define DFIFO(x)
28 #define DFIFOINF(x) x
29 #define DFIFOBUF(x) x
30 #define DFENCE(x) x
31 #define DREFRESH(x)
32 #define DCURSOR(x)
33 #else
34 #define DIRQ(x)
35 #define DDMG(x)
36 #define DFIFO(x)
37 #define DFIFOINF(x)
38 #define DFIFOBUF(x)
39 #define DFENCE(x)
40 #define DREFRESH(x)
41 #define DCURSOR(x)
42 #endif
44 ULONG vmwareReadReg(struct HWData *data, ULONG reg)
46 outl(reg, data->indexReg);
47 return inl(data->valueReg);
50 VOID vmwareWriteReg(struct HWData *data, ULONG reg, ULONG val)
52 outl(reg, data->indexReg);
53 outl(val, data->valueReg);
56 VOID vmwareHandlerIRQ(struct HWData *data, void *unused)
58 UWORD port = (UWORD)((IPTR)data->iobase + SVGA_IRQSTATUS_PORT);
59 ULONG irqFlags = inl(port);
60 outl(irqFlags, port);
62 DIRQ(bug("[VMWareSVGA] %s(%08x)\n", __func__, irqFlags);)
63 if (!irqFlags)
65 D(bug("[VMWareSVGA] %s: Spurrious IRQ!\n", __func__);)
67 else
73 #undef SysBase
74 extern struct ExecBase *SysBase;
78 APTR VMWareSVGA_MemAlloc(struct HWData *data, ULONG size)
80 D(bug("[VMWareSVGA:HW] %s(%d)\n", __func__, size);)
81 return AllocMem(size, MEMF_CLEAR|MEMF_31BIT);
84 VOID VMWareSVGA_MemFree(struct HWData *data, APTR addr, ULONG size)
86 D(bug("[VMWareSVGA:HW] %s(0x%p)\n", __func__, addr);)
87 FreeMem(addr, size);
91 /**********/
93 ULONG getVMWareSVGAID(struct HWData *data)
95 ULONG id;
97 D(bug("[VMWareSVGA:HW] %s()\n", __func__);)
99 vmwareWriteReg(data, SVGA_REG_ID, SVGA_ID_2);
100 id = vmwareReadReg(data, SVGA_REG_ID);
101 if (id == SVGA_ID_2)
102 return id;
103 vmwareWriteReg(data, SVGA_REG_ID, SVGA_ID_1);
104 id = vmwareReadReg(data, SVGA_REG_ID);
105 if (id == SVGA_ID_1)
106 return id;
107 if (id == SVGA_ID_0)
108 return id;
109 return SVGA_ID_INVALID;
112 BOOL hasCapVMWareSVGAFIFO(struct HWData *data, ULONG fifocap)
114 volatile ULONG *fifo = data->mmiobase;
115 if ((data->capabilities & SVGA_CAP_EXTENDED_FIFO) &&
116 (fifo[SVGA_FIFO_MIN] > (fifocap << 2)))
118 return TRUE;
120 return FALSE;
123 VOID initVMWareSVGAFIFO(struct HWData *data)
125 volatile ULONG *fifo = data->mmiobase;
127 DFIFOINF(bug("[VMWareSVGA:HW] %s()\n", __func__);)
129 if (!data->fifomin)
131 if (data->capabilities & SVGA_CAP_EXTENDED_FIFO)
133 DFIFOINF(bug("[VMWareSVGA:HW] %s: Extended FIFO Capabilities\n", __func__);)
134 data->fifomin = vmwareReadReg(data, SVGA_REG_MEM_REGS);
135 data->fifocapabilities = fifo[SVGA_FIFO_CAPABILITIES];
136 DFIFOINF(
137 if (data->fifocapabilities & SVGA_FIFO_CAP_FENCE)
138 bug("[VMWareSVGA] %s: Fence\n", __func__);
139 if (data->fifocapabilities & SVGA_FIFO_CAP_ACCELFRONT)
140 bug("[VMWareSVGA] %s: Accelerate Front\n", __func__);
141 if (data->fifocapabilities & SVGA_FIFO_CAP_PITCHLOCK)
142 bug("[VMWareSVGA] %s: Pitchlock\n", __func__);
143 if (data->fifocapabilities & SVGA_FIFO_CAP_VIDEO)
144 bug("[VMWareSVGA] %s: Video\n", __func__);
145 if (data->fifocapabilities & SVGA_FIFO_CAP_CURSOR_BYPASS_3)
146 bug("[VMWareSVGA] %s: Cursor-Bypass3\n", __func__);
147 if (data->fifocapabilities & SVGA_FIFO_CAP_ESCAPE)
148 bug("[VMWareSVGA] %s: Escape\n", __func__);
149 if (data->fifocapabilities & SVGA_FIFO_CAP_RESERVE)
150 bug("[VMWareSVGA] %s: FIFO Reserve\n", __func__);
151 if (data->fifocapabilities & SVGA_FIFO_CAP_SCREEN_OBJECT)
152 bug("[VMWareSVGA] %s: Screen-Object\n", __func__);
153 if (data->fifocapabilities & SVGA_FIFO_CAP_GMR2)
154 bug("[VMWareSVGA] %s: GMR2\n", __func__);
155 if (data->fifocapabilities & SVGA_FIFO_CAP_3D_HWVERSION_REVISED)
156 bug("[VMWareSVGA] %s: 3DHWVersion-Revised\n", __func__);
157 if (data->fifocapabilities & SVGA_FIFO_CAP_SCREEN_OBJECT_2)
158 bug("[VMWareSVGA] %s: Screen-Object2\n", __func__);
161 else
162 data->fifomin = SVGA_FIFO_CAPABILITIES;
164 DFIFOINF(bug("[VMWareSVGA:HW] %s: FIFO Min Regs = %d\n", __func__, data->fifomin));
166 data->mmiosize = vmwareReadReg(data, SVGA_REG_MEM_SIZE);
168 vmwareWriteReg(data, SVGA_REG_CONFIG_DONE, 0); //Stop vmware from reading the fifo
170 fifo[SVGA_FIFO_MIN] = (data->fifomin << VMWFIFO_CMD_SIZESHIFT);
171 fifo[SVGA_FIFO_MAX] = data->mmiosize;
172 fifo[SVGA_FIFO_NEXT_CMD] = fifo[SVGA_FIFO_MIN];
173 fifo[SVGA_FIFO_STOP] = fifo[SVGA_FIFO_MIN];
175 vmwareWriteReg(data, SVGA_REG_CONFIG_DONE, 1);
177 if (hasCapVMWareSVGAFIFO(data, SVGA_FIFO_GUEST_3D_HWVERSION))
179 DFIFOINF(bug("[VMWareSVGA:HW] %s: Setting GUEST_3D_HWVERSION = %d\n", __func__, SVGA3D_HWVERSION_CURRENT);)
180 fifo[SVGA_FIFO_GUEST_3D_HWVERSION] = SVGA3D_HWVERSION_CURRENT;
182 data->fifocmdbuf.buffer = AllocMem(VMW_COMMAND_SIZE, MEMF_CLEAR|MEMF_ANY);
183 bug("[VMWareSVGA:HW] %s: FIFO Cmd bounce-buffer @ 0x%p\n", __func__, data->fifocmdbuf.buffer);
184 InitSemaphore((struct SignalSemaphore *)&data->fifocmdbuf.fifocmdsema);
187 void waitVMWareSVGAFIFO(struct HWData *data)
189 bug("[VMWareSVGA:HW] %s()\n", __func__);
190 if (hasCapVMWareSVGAFIFO(data, SVGA_FIFO_FENCE_GOAL) &&
191 (data->capabilities & SVGA_CAP_IRQMASK)) {
192 #if (0)
194 * On hosts which support interrupts, we can sleep until the
195 * FIFO_PROGRESS interrupt occurs. This is the most efficient
196 * thing to do when the FIFO fills up.
198 * As with the IRQ-based SVGA_SyncToFence(), this will only work
199 * on Workstation 6.5 virtual machines and later.
202 vmwareWriteReg(data, SVGA_REG_IRQMASK, SVGA_IRQFLAG_FIFO_PROGRESS);
203 // TODO: wait for the irq...
204 vmwareWriteReg(data, SVGA_REG_IRQMASK, 0);
205 #endif
206 } else {
208 * Fallback implementation: Perform one iteration of the
209 * legacy-style sync. This synchronously processes FIFO commands
210 * for an arbitrary amount of time, then returns control back to
211 * the guest CPU.
213 syncVMWareSVGAFIFO(data);
214 vmwareReadReg(data, SVGA_REG_BUSY);
218 APTR reserveVMWareSVGAFIFO(struct HWData *data, ULONG size)
220 volatile ULONG *fifo = data->mmiobase;
221 ULONG max = fifo[SVGA_FIFO_MAX];
222 ULONG min = fifo[SVGA_FIFO_MIN];
223 ULONG cmdNext = fifo[SVGA_FIFO_NEXT_CMD];
224 BOOL canreserve = FALSE;
226 DFIFOBUF(bug("[VMWareSVGA:HW] %s(%d)\n", __func__, size);)
228 if (data->fifocapabilities & SVGA_FIFO_CAP_RESERVE)
230 DFIFOBUF(bug("[VMWareSVGA:HW] %s: reserve supported\n", __func__);)
231 canreserve = TRUE;
234 if (size > VMW_COMMAND_SIZE || (size > (max - min))) {
235 bug("[VMWareSVGA:HW] %s: FIFO command too large", __func__);
236 return NULL;
239 if (size % VMWFIFO_CMD_SIZE) {
240 bug("[VMWareSVGA:HW] %s: size of %d not 32bit-aligned!!\n", __func__, size);
241 return NULL;
244 ObtainSemaphore((struct SignalSemaphore *)&data->fifocmdbuf.fifocmdsema);
245 data->fifocmdbuf.reserved = size;
247 while (1) {
248 ULONG stop = fifo[SVGA_FIFO_STOP];
249 BOOL reserveInPlace = FALSE;
250 BOOL needBounce = FALSE;
253 * Find a strategy for dealing with "size" of data:
254 * - reserve in place, if there's room and the FIFO supports it
255 * - reserve in bounce buffer, if there's room in FIFO but not
256 * contiguous or FIFO can't safely handle reservations
257 * - otherwise, sync the FIFO and try again.
260 if (cmdNext >= stop) {
261 /* There is no valid FIFO data between cmdNext and max */
263 if (cmdNext + size < max ||
264 (cmdNext + size == max && stop > min)) {
266 * Fastest path 1: There is already enough contiguous space
267 * between cmdNext and max (the end of the buffer).
269 * Note the edge case: If the "<" path succeeds, we can
270 * quickly return without performing any other tests. If
271 * we end up on the "==" path, we're writing exactly up to
272 * the top of the FIFO and we still need to make sure that
273 * there is at least one unused DWORD at the bottom, in
274 * order to be sure we don't fill the FIFO entirely.
276 * If the "==" test succeeds, but stop <= min (the FIFO
277 * would be completely full if we were to reserve this
278 * much space) we'll end up hitting the FIFOFull path below.
280 reserveInPlace = TRUE;
281 } else if ((max - cmdNext) + (stop - min) <= size) {
283 * We have to split the FIFO command into two pieces,
284 * but there still isn't enough total free space in
285 * the FIFO to store it.
287 * Note the "<=". We need to keep at least one DWORD
288 * of the FIFO free at all times, or we won't be able
289 * to tell the difference between full and empty.
291 waitVMWareSVGAFIFO(data);
292 } else {
294 * Data fits in FIFO but only if we split it.
295 * Need to bounce to guarantee contiguous buffer.
297 needBounce = TRUE;
300 } else {
301 /* There is FIFO data between cmdNext and max */
303 if (cmdNext + size < stop) {
305 * Fastest path 2: There is already enough contiguous space
306 * between cmdNext and stop.
308 reserveInPlace = TRUE;
309 } else {
311 * There isn't enough room between cmdNext and stop.
312 * The FIFO is too full to accept this command.
314 waitVMWareSVGAFIFO(data);
319 * If we decided we can write directly to the FIFO, make sure
320 * the VMX can safely support this.
322 if (reserveInPlace) {
323 if (canreserve || size <= sizeof(ULONG)) {
324 data->bbused = FALSE;
325 if (canreserve) {
326 fifo[SVGA_FIFO_RESERVED] = size;
328 ReleaseSemaphore((struct SignalSemaphore *)&data->fifocmdbuf.fifocmdsema);
329 return cmdNext + (UBYTE *)fifo;
330 } else {
332 * Need to bounce because we can't trust the VMX to safely
333 * handle uncommitted data in FIFO.
335 needBounce = TRUE;
340 * If we reach here, either we found a full FIFO, called
341 * waitVMWareSVGAFIFO to make more room, and want to try again, or we
342 * decided to use a bounce buffer instead.
344 if (needBounce) {
345 data->bbused = TRUE;
346 ReleaseSemaphore((struct SignalSemaphore *)&data->fifocmdbuf.fifocmdsema);
347 return data->fifocmdbuf.buffer;
349 } /* while (1) */
352 VOID commitVMWareSVGAFIFO(struct HWData *data, ULONG size)
354 volatile ULONG *fifo = data->mmiobase;
355 ULONG max = fifo[SVGA_FIFO_MAX];
356 ULONG min = fifo[SVGA_FIFO_MIN];
357 ULONG cmdNext = fifo[SVGA_FIFO_NEXT_CMD];
358 BOOL canreserve = FALSE;
360 DFIFOBUF(bug("[VMWareSVGA:HW] %s(%d)\n", __func__, size);)
362 if (data->fifocapabilities & SVGA_FIFO_CAP_RESERVE)
364 DFIFOBUF(bug("[VMWareSVGA:HW] %s: reserve supported\n", __func__);)
365 canreserve = TRUE;
368 if (data->fifocmdbuf.reserved == 0) {
369 bug("[VMWareSVGA:HW] %s: COMMIT called before RESERVE!!\n", __func__);
370 return;
373 ObtainSemaphore((struct SignalSemaphore *)&data->fifocmdbuf.fifocmdsema);
375 data->fifocmdbuf.used += data->fifocmdbuf.reserved;
376 data->fifocmdbuf.reserved = 0;
378 if (data->bbused) {
380 * Slow paths: copy out of a bounce buffer.
382 UBYTE *buffer = data->fifocmdbuf.buffer;
384 if (canreserve) {
386 * Slow path: bulk copy out of a bounce buffer in two chunks.
388 * Note that the second chunk may be zero-length if the reserved
389 * size was large enough to wrap around but the commit size was
390 * small enough that everything fit contiguously into the FIFO.
392 * Note also that we didn't need to tell the FIFO about the
393 * reservation in the bounce buffer, but we do need to tell it
394 * about the data we're bouncing from there into the FIFO.
396 ULONG chunkSize;
397 if (size > (max - cmdNext))
398 chunkSize = max - cmdNext;
399 else
400 chunkSize = size;
401 bug("[VMWareSVGA:HW] %s: chunk size %d, size %d\n", __func__, chunkSize, size);
402 fifo[SVGA_FIFO_RESERVED] = size;
403 memcpy(cmdNext + (UBYTE *) fifo, buffer, chunkSize);
404 memcpy(min + (UBYTE *) fifo, buffer + chunkSize, size - chunkSize);
405 } else {
407 * Slowest path: copy one ULONG at a time, updating NEXT_CMD as
408 * we go, so that we bound how much data the guest has written
409 * and the host doesn't know to checkpoint.
412 ULONG *dword = (ULONG *)buffer;
414 bug("[VMWareSVGA:HW] %s: copying %dbytes\n", __func__, size);
416 while (size > 0) {
417 fifo[cmdNext >> VMWFIFO_CMD_SIZESHIFT] = *dword++;
418 cmdNext += VMWFIFO_CMD_SIZE;
419 if (cmdNext == max) {
420 cmdNext = min;
422 fifo[SVGA_FIFO_NEXT_CMD] = cmdNext;
423 size -= VMWFIFO_CMD_SIZE;
429 * Atomically update NEXT_CMD, if we didn't already
431 if (!data->bbused || canreserve) {
432 cmdNext += size;
433 if (cmdNext >= max) {
434 cmdNext -= max - min;
436 fifo[SVGA_FIFO_NEXT_CMD] = cmdNext;
440 * Clear the reservation in the FIFO.
442 if (canreserve) {
443 fifo[SVGA_FIFO_RESERVED] = 0;
445 ReleaseSemaphore((struct SignalSemaphore *)&data->fifocmdbuf.fifocmdsema);
448 VOID flushVMWareSVGAFIFO(struct HWData *data, ULONG *fence)
450 DFIFOBUF(bug("[VMWareSVGA:HW] %s()\n", __func__);)
452 if (data->fifocmdbuf.reserved)
454 *fence = fenceVMWareSVGAFIFO(data);
455 commitVMWareSVGAFIFO(data, data->fifocmdbuf.reserved);
459 ULONG fenceVMWareSVGAFIFO(struct HWData *data)
461 ULONG fence = 1;
463 if (hasCapVMWareSVGAFIFO(data, SVGA_FIFO_FENCE) && (data->fifocapabilities & SVGA_FIFO_CAP_FENCE))
465 fence = data->fence++;
466 if (fence == 0) fence = 1;
468 DFENCE(bug("[VMWareSVGA:HW] %s: inserting fence #%d\n", __func__, fence);)
470 writeVMWareSVGAFIFO(data, SVGA_CMD_FENCE);
471 writeVMWareSVGAFIFO(data, fence);
472 syncVMWareSVGAFIFO(data);
474 return fence;
477 VOID syncfenceVMWareSVGAFIFO(struct HWData *data, ULONG fence)
479 volatile ULONG *fifo;
481 if (hasCapVMWareSVGAFIFO(data, SVGA_FIFO_FENCE) && (data->fifocapabilities & SVGA_FIFO_CAP_FENCE))
483 fifo = data->mmiobase;
484 if ((LONG)(fifo[SVGA_FIFO_FENCE] - fence) < 0)
486 DFENCE(bug("[VMWareSVGA:HW] %s: fence #%d hasnt been reached yet...\n", __func__, fence);)
492 VOID syncVMWareSVGAFIFO(struct HWData *data)
494 DFIFO(bug("[VMWareSVGA:HW] %s()\n", __func__);)
496 vmwareWriteReg(data, SVGA_REG_SYNC, 1);
499 VOID writeVMWareSVGAFIFO(struct HWData *data, ULONG val)
501 volatile ULONG *fifo;
503 DFIFO(bug("[VMWareSVGA:HW] %s()\n", __func__);)
505 fifo = data->mmiobase;
506 if (
507 (fifo[SVGA_FIFO_NEXT_CMD] + VMWFIFO_CMD_SIZE == fifo[SVGA_FIFO_STOP]) ||
509 (fifo[SVGA_FIFO_NEXT_CMD] == (fifo[SVGA_FIFO_MAX] - VMWFIFO_CMD_SIZE)) &&
510 (fifo[SVGA_FIFO_STOP] == fifo[SVGA_FIFO_MIN])
513 syncVMWareSVGAFIFO(data);
515 fifo[fifo[SVGA_FIFO_NEXT_CMD] >> VMWFIFO_CMD_SIZESHIFT] = val;
516 fifo[SVGA_FIFO_NEXT_CMD] += VMWFIFO_CMD_SIZE;
518 if (fifo[SVGA_FIFO_NEXT_CMD] == fifo[SVGA_FIFO_MAX])
519 fifo[SVGA_FIFO_NEXT_CMD] = fifo[SVGA_FIFO_MIN];
522 BOOL initVMWareSVGAHW(struct HWData *data, OOP_Object *device)
524 ULONG id;
526 D(bug("[VMWareSVGA:HW] %s()\n", __func__);)
528 id = getVMWareSVGAID(data);
529 if (id == SVGA_ID_INVALID)
530 return FALSE;
532 data->maskPool = CreatePool(MEMF_ANY, (32 << 3), (32 << 2));
534 if (id >= SVGA_ID_1)
536 volatile ULONG *fifo = data->mmiobase;
537 data->capabilities = vmwareReadReg(data, SVGA_REG_CAPABILITIES);
540 if (data->capabilities & SVGA_CAP_8BIT_EMULATION)
542 data->bitsperpixel = vmwareReadReg(data, SVGA_REG_HOST_BITS_PER_PIXEL);
543 vmwareWriteReg(data,SVGA_REG_BITS_PER_PIXEL, data->bitsperpixel);
545 data->bitsperpixel = vmwareReadReg(data, SVGA_REG_BITS_PER_PIXEL);
547 data->depth = vmwareReadReg(data, SVGA_REG_DEPTH);
548 data->maxwidth = vmwareReadReg(data, SVGA_REG_MAX_WIDTH);
549 data->maxheight = vmwareReadReg(data, SVGA_REG_MAX_HEIGHT);
550 data->redmask = vmwareReadReg(data, SVGA_REG_RED_MASK);
551 data->greenmask = vmwareReadReg(data, SVGA_REG_GREEN_MASK);
552 data->bluemask = vmwareReadReg(data, SVGA_REG_BLUE_MASK);
553 data->bytesperpixel = 1;
555 if (data->depth>16)
556 data->bytesperpixel = 4;
557 else if (data->depth>8)
558 data->bytesperpixel = 2;
560 if (data->capabilities & SVGA_CAP_MULTIMON)
562 data->displaycount = vmwareReadReg(data, SVGA_REG_NUM_DISPLAYS);
564 else
566 data->displaycount = 1;
569 data->vramsize = vmwareReadReg(data, SVGA_REG_VRAM_SIZE);
570 data->vrambase = (APTR)(IPTR)vmwareReadReg(data, SVGA_REG_FB_START);
571 data->pseudocolor = vmwareReadReg(data, SVGA_REG_PSEUDOCOLOR);
574 bug("[VMWareSVGA:HW] %s: VRAM at 0x%08x size %d\n", __func__, data->vrambase, data->vramsize);
575 bug("[VMWareSVGA:HW] %s: no.displays: %d\n", __func__, data->displaycount);
576 bug("[VMWareSVGA:HW] %s: depth: %d\n", __func__, data->depth);
577 bug("[VMWareSVGA:HW] %s: bpp : %d\n", __func__, data->bitsperpixel);
580 VMWareSVGA_RestartRenderTask(data);
582 return TRUE;
585 VOID getModeCfgVMWareSVGA(struct HWData *data)
587 D(bug("[VMWareSVGA:HW] %s()\n", __func__);)
589 data->fboffset = vmwareReadReg(data, SVGA_REG_FB_OFFSET);
590 data->bytesperline = vmwareReadReg(data, SVGA_REG_BYTES_PER_LINE);
591 data->depth = vmwareReadReg(data, SVGA_REG_DEPTH);
592 data->redmask = vmwareReadReg(data, SVGA_REG_RED_MASK);
593 data->greenmask = vmwareReadReg(data, SVGA_REG_GREEN_MASK);
594 data->bluemask = vmwareReadReg(data, SVGA_REG_BLUE_MASK);
595 data->pseudocolor = vmwareReadReg(data, SVGA_REG_PSEUDOCOLOR);
597 data->display_width = vmwareReadReg(data, SVGA_REG_WIDTH);
598 data->display_height = vmwareReadReg(data, SVGA_REG_HEIGHT);
599 D(bug("[VMWareSVGA:HW] %s: %dx%d\n", __func__, data->display_width, data->display_height));
602 VOID enableVMWareSVGA(struct HWData *data)
604 vmwareWriteReg(data, SVGA_REG_ENABLE, 1);
607 VOID disableVMWareSVGA(struct HWData *data)
609 vmwareWriteReg(data, SVGA_REG_ENABLE, 0);
612 VOID initDisplayVMWareSVGA(struct HWData *data)
614 enableVMWareSVGA(data);
615 getModeCfgVMWareSVGA(data);
616 initVMWareSVGAFIFO(data);
619 VOID setModeVMWareSVGA(struct HWData *data, ULONG width, ULONG height)
621 D(bug("[VMWareSVGA:HW] %s(%dx%d)\n", __func__, width, height));
623 disableVMWareSVGA(data);
625 vmwareWriteReg(data, SVGA_REG_WIDTH, width);
626 vmwareWriteReg(data, SVGA_REG_HEIGHT, height);
628 if (data->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY)
630 D(bug("[VMWareSVGA:HW] %s: Adjusting Display Topology\n", __func__);)
632 vmwareWriteReg(data, SVGA_REG_DISPLAY_ID, 0);
633 vmwareWriteReg(data, SVGA_REG_DISPLAY_IS_PRIMARY, TRUE);
635 vmwareWriteReg(data, SVGA_REG_DISPLAY_POSITION_X, 0);
636 vmwareWriteReg(data, SVGA_REG_DISPLAY_POSITION_Y, 0);
637 vmwareWriteReg(data, SVGA_REG_DISPLAY_WIDTH, width);
638 vmwareWriteReg(data, SVGA_REG_DISPLAY_HEIGHT, height);
641 if (data->capabilities & SVGA_CAP_8BIT_EMULATION)
642 vmwareWriteReg(data, SVGA_REG_BITS_PER_PIXEL, data->bitsperpixel);
644 initDisplayVMWareSVGA(data);
647 VOID refreshAreaVMWareSVGA(struct HWData *data, struct Box *box)
649 DREFRESH(bug("[VMWareSVGA:HW] %s()\n", __func__);)
651 writeVMWareSVGAFIFO(data, SVGA_CMD_UPDATE);
652 writeVMWareSVGAFIFO(data, box->x1);
653 writeVMWareSVGAFIFO(data, box->y1);
654 writeVMWareSVGAFIFO(data, box->x2-box->x1+1);
655 writeVMWareSVGAFIFO(data, box->y2-box->y1+1);
658 VOID rectFillVMWareSVGA(struct HWData *data, ULONG color, LONG x, LONG y, LONG width, LONG height)
660 D(bug("[VMWareSVGA:HW] %s()\n", __func__);)
662 writeVMWareSVGAFIFO(data, SVGA_CMD_RECT_FILL);
663 writeVMWareSVGAFIFO(data, color);
664 writeVMWareSVGAFIFO(data, x);
665 writeVMWareSVGAFIFO(data, y);
666 writeVMWareSVGAFIFO(data, width);
667 writeVMWareSVGAFIFO(data, height);
668 syncVMWareSVGAFIFO(data);
671 VOID ropFillVMWareSVGA(struct HWData *data, ULONG color, LONG x, LONG y, LONG width, LONG height, ULONG mode)
673 D(bug("[VMWareSVGA:HW] %s()\n", __func__);)
675 writeVMWareSVGAFIFO(data, SVGA_CMD_RECT_ROP_FILL);
676 writeVMWareSVGAFIFO(data, color);
677 writeVMWareSVGAFIFO(data, x);
678 writeVMWareSVGAFIFO(data, y);
679 writeVMWareSVGAFIFO(data, width);
680 writeVMWareSVGAFIFO(data, height);
681 writeVMWareSVGAFIFO(data, mode);
682 syncVMWareSVGAFIFO(data);
685 VOID ropCopyVMWareSVGA(struct HWData *data, LONG sx, LONG sy, LONG dx, LONG dy, ULONG width, ULONG height, ULONG mode)
687 D(bug("[VMWareSVGA:HW] %s()\n", __func__);)
689 writeVMWareSVGAFIFO(data, SVGA_CMD_RECT_ROP_COPY);
690 writeVMWareSVGAFIFO(data, sx);
691 writeVMWareSVGAFIFO(data, sy);
692 writeVMWareSVGAFIFO(data, dx);
693 writeVMWareSVGAFIFO(data, dy);
694 writeVMWareSVGAFIFO(data, width);
695 writeVMWareSVGAFIFO(data, height);
696 writeVMWareSVGAFIFO(data, mode);
697 syncVMWareSVGAFIFO(data);
700 VOID defineCursorVMWareSVGA(struct HWData *data, struct MouseData *mouse)
702 ULONG size = mouse->width * mouse->height;
703 ULONG *xorMask, *xorBuffer;
704 ULONG *image = mouse->shape;
705 int i;
707 DCURSOR(bug("[VMWareSVGA:HW] %s()\n", __func__);)
709 xorMask = (ULONG *)AllocVecPooled(data->maskPool, size << 2);
710 if (xorMask)
712 DCURSOR(bug("[VMWareSVGA:HW] %s: xormask @ 0x%p\n", __func__, xorMask);)
713 xorBuffer = xorMask;
714 for (i = 0; i < size; i++)
716 ULONG pixel = *image ++;
717 *xorBuffer = pixel;
718 xorBuffer++;
721 if (data->capabilities & SVGA_CAP_ALPHA_CURSOR)
723 DCURSOR(bug("[VMWareSVGA:HW] %s: rendering Alpha Cursor\n", __func__);)
724 writeVMWareSVGAFIFO(data, SVGA_CMD_DEFINE_ALPHA_CURSOR);
725 writeVMWareSVGAFIFO(data, VMWCURSOR_ID); // id
726 writeVMWareSVGAFIFO(data, 0); // hotspot x
727 writeVMWareSVGAFIFO(data, 0); // hotspot y
728 writeVMWareSVGAFIFO(data, mouse->width); // width
729 writeVMWareSVGAFIFO(data, mouse->height); // height
731 xorBuffer = xorMask;
732 for (i = 0; i < size; i++)
734 writeVMWareSVGAFIFO(data, *xorBuffer++);
737 syncVMWareSVGAFIFO(data);
739 else
741 #if (0)
742 xandBuffer = (ULONG *)AllocVecPooled(data->maskPool, size << 2);
743 if (xandBuffer)
745 DCURSOR(bug("[VMWareSVGA:HW] %s: rendering masked Cursor\n", __func__);)
746 writeVMWareSVGAFIFO(data, SVGA_CMD_DEFINE_CURSOR);
747 writeVMWareSVGAFIFO(data, VMWCURSOR_ID); // id
748 writeVMWareSVGAFIFO(data, 0); // hotspot x
749 writeVMWareSVGAFIFO(data, 0); // hotspot y
750 writeVMWareSVGAFIFO(data, mouse->width); // width
751 writeVMWareSVGAFIFO(data, mouse->height); // height
753 for (i = 0; i < size; i++)
755 writeVMWareSVGAFIFO(data, *xandBuffer++);
758 xorBuffer = xorMask;
759 for (i = 0; i < size; i++)
761 writeVMWareSVGAFIFO(data, *xorBuffer++);
764 syncVMWareSVGAFIFO(data);
765 FreeVecPooled(data->maskPool, xandBuffer);
767 #endif
769 FreeVecPooled(data->maskPool, xorMask);
773 VOID displayCursorVMWareSVGA(struct HWData *data, LONG mode)
775 volatile ULONG *fifo = data->mmiobase;
777 DCURSOR(bug("[VMWareSVGA:HW] %s()\n", __func__);)
779 if (hasCapVMWareSVGAFIFO(data, SVGA_FIFO_CURSOR_COUNT) && (data->fifocapabilities & SVGA_FIFO_CAP_CURSOR_BYPASS_3))
781 ULONG count;
782 fifo[SVGA_FIFO_CURSOR_ON] = mode;
783 count = fifo[SVGA_FIFO_CURSOR_COUNT];
784 fifo[SVGA_FIFO_CURSOR_COUNT] = ++count;
786 else
788 if (data->capabilities & SVGA_CAP_CURSOR_BYPASS_2)
790 vmwareWriteReg(data, SVGA_REG_CURSOR_ID, VMWCURSOR_ID);
791 vmwareWriteReg(data, SVGA_REG_CURSOR_ON, mode);
793 else
795 writeVMWareSVGAFIFO(data, SVGA_CMD_DISPLAY_CURSOR);
796 writeVMWareSVGAFIFO(data, VMWCURSOR_ID);
797 writeVMWareSVGAFIFO(data, mode);
798 syncVMWareSVGAFIFO(data);
803 VOID moveCursorVMWareSVGA(struct HWData *data, LONG x, LONG y)
805 volatile ULONG *fifo = data->mmiobase;
807 DCURSOR(bug("[VMWareSVGA:HW] %s()\n", __func__);)
809 if (hasCapVMWareSVGAFIFO(data, SVGA_FIFO_CURSOR_COUNT) && (data->fifocapabilities & SVGA_FIFO_CAP_CURSOR_BYPASS_3))
811 ULONG count;
812 fifo[SVGA_FIFO_CURSOR_ON] = SVGA_CURSOR_ON_SHOW;
813 fifo[SVGA_FIFO_CURSOR_X] = x;
814 fifo[SVGA_FIFO_CURSOR_Y] = y;
815 count = fifo[SVGA_FIFO_CURSOR_COUNT];
816 fifo[SVGA_FIFO_CURSOR_COUNT] = ++count;
818 else
820 if (data->capabilities & SVGA_CAP_CURSOR_BYPASS_2)
822 vmwareWriteReg(data, SVGA_REG_CURSOR_ID, VMWCURSOR_ID);
823 vmwareWriteReg(data, SVGA_REG_CURSOR_X, x);
824 vmwareWriteReg(data, SVGA_REG_CURSOR_Y, y);
825 vmwareWriteReg(data, SVGA_REG_CURSOR_ON, SVGA_CURSOR_ON_SHOW);
827 else
829 writeVMWareSVGAFIFO(data, SVGA_CMD_MOVE_CURSOR);
830 writeVMWareSVGAFIFO(data, x);
831 writeVMWareSVGAFIFO(data, y);
832 syncVMWareSVGAFIFO(data);
837 VOID VMWareSVGA_Damage_Reset(struct HWData *hwdata)
839 DDMG(bug("[VMWareSVGA:HW] %s()\n", __func__);)
841 ObtainSemaphore(&hwdata->damage_control);
843 hwdata->delta_damage.x1 = INT_MAX;
844 hwdata->delta_damage.y1 = INT_MAX;
845 hwdata->delta_damage.x2 = INT_MIN;
846 hwdata->delta_damage.y2 = INT_MIN;
848 ReleaseSemaphore(&hwdata->damage_control);
851 VOID VMWareSVGA_Damage_DeltaAdd(struct HWData *hwdata, struct Box *box)
853 ULONG tmpval;
855 DDMG(bug("[VMWareSVGA:HW] %s()\n", __func__);)
857 if (box->x1 > box->x2)
859 tmpval = box->x2;
860 box->x2 = box->x1;
861 box->x1 = tmpval;
863 if (box->y1 > box->y2)
865 tmpval = box->y2;
866 box->y2 = box->y1;
867 box->y1 = tmpval;
870 ObtainSemaphore(&hwdata->damage_control);
871 if (box->x1 < hwdata->delta_damage.x1)
873 hwdata->delta_damage.x1 = box->x1;
875 if (box->y1 < hwdata->delta_damage.y1)
877 hwdata->delta_damage.y1 = box->y1;
879 if (box->x2 > hwdata->delta_damage.x2)
881 hwdata->delta_damage.x2 = box->x2;
883 if (box->y2 > hwdata->delta_damage.y2)
885 hwdata->delta_damage.y2 = box->y2;
887 #if defined(VMWAREGFX_IMMEDIATEDRAW)
888 if (hwdata->shown)
890 refreshAreaVMWareSVGA(hwdata, &hwdata->delta_damage);
891 VMWareSVGA_Damage_Reset(hwdata);
892 syncVMWareSVGAFIFO(hwdata);
894 #endif
895 ReleaseSemaphore(&hwdata->damage_control);
898 #if !defined(VMWAREGFX_IMMEDIATEDRAW)
899 VOID VMWareSVGA_RenderTask(struct HWData *hwdata)
901 struct MsgPort render_thread_message_port;
902 struct timerequest *timer_request;
903 struct IORequest *timer_request_as_io_request;
904 ULONG request_size = sizeof(struct timerequest);
905 BYTE running;
907 render_thread_message_port.mp_Flags = PA_SIGNAL;
908 render_thread_message_port.mp_Node.ln_Type = NT_MSGPORT;
909 render_thread_message_port.mp_MsgList.lh_TailPred = (struct Node *)&render_thread_message_port.mp_MsgList;
910 render_thread_message_port.mp_MsgList.lh_Tail = 0;
911 render_thread_message_port.mp_MsgList.lh_Head = (struct Node *)&render_thread_message_port.mp_MsgList.lh_Tail;
913 render_thread_message_port.mp_SigBit = AllocSignal(-1);
914 render_thread_message_port.mp_SigTask = FindTask(0);
916 timer_request = AllocMem(request_size, MEMF_CLEAR | MEMF_PUBLIC);
917 timer_request_as_io_request = (void *)timer_request;
919 timer_request_as_io_request->io_Message.mn_Node.ln_Type = NT_MESSAGE;
920 timer_request_as_io_request->io_Message.mn_ReplyPort = &render_thread_message_port;
921 timer_request_as_io_request->io_Message.mn_Length = request_size;
923 OpenDevice("timer.device", UNIT_MICROHZ, timer_request_as_io_request, 0);
925 timer_request->tr_node.io_Command = TR_ADDREQUEST;
926 timer_request->tr_time.tv_secs = 0;
927 timer_request->tr_time.tv_micro = 20000;
928 SendIO(timer_request_as_io_request);
930 running = 1;
931 while (running) // TODO: If you'll ever implement GFX Driver hot swap, you will want to unlock this condition and let the RenderTask terminate
933 if (!vmwareReadReg(hwdata, SVGA_REG_BUSY))
935 ObtainSemaphore(&hwdata->damage_control);
936 struct Box *damage = &hwdata->delta_damage;
938 if ((damage->x2 > damage->x1) && (damage->y2 > damage->y1))
940 refreshAreaVMWareSVGA(hwdata, &hwdata->delta_damage);
941 VMWareSVGA_Damage_Reset(hwdata);
942 syncVMWareSVGAFIFO(hwdata);
944 ReleaseSemaphore(&hwdata->damage_control);
947 WaitIO(timer_request_as_io_request);
948 GetMsg(&render_thread_message_port); // TODO: Did we have to reply to this? Oh memory ...
950 timer_request->tr_node.io_Command = TR_ADDREQUEST;
951 timer_request->tr_time.tv_secs = 0;
952 timer_request->tr_time.tv_micro = 20000; // TODO: This should be adaptive. We would need to know the CPU load and increase the delay to avoid burning all of the CPU time
953 SendIO(timer_request_as_io_request);
956 CloseDevice(timer_request_as_io_request);
958 #endif
960 VOID VMWareSVGA_RestartRenderTask(struct HWData *hwdata)
962 // TODO: CleanUp and add defenses here
964 InitSemaphore(&hwdata->damage_control);
965 #if !defined(VMWAREGFX_IMMEDIATEDRAW)
966 hwdata->render_task = NewCreateTask(TASKTAG_PC,
967 VMWareSVGA_RenderTask,
968 TASKTAG_NAME, "VMWare Render Task",
969 TASKTAG_PRI, 1,
970 TASKTAG_ARG1, hwdata,
971 TAG_DONE);
972 #endif