wip commit.
[AROS.git] / workbench / hidds / vmwaresvga / vmwaresvga_hardware.c
blobf16ea9a1becc2f098593874cef3fa312c909a171
1 /*
2 Copyright © 1995-2019, The AROS Development Team. All rights reserved.
3 $Id$
5 Desc: vmware svga hardware functions
6 Lang: English
7 */
9 #ifdef DEBUG
10 #undef DEBUG
11 #endif
12 #define DEBUG 0 /* no SysBase */
13 #include <aros/debug.h>
15 #include <asm/io.h>
17 #include <limits.h>
18 #include <exec/ports.h>
19 #include <devices/timer.h>
20 #include <proto/exec.h>
22 #include "vmwaresvga_intern.h"
24 #if (DEBUG0)
25 #define DIRQ(x) x
26 #define DDMG(x)
27 #define DFIFO(x)
28 #define DFIFOINF(x) x
29 #define DFENCE(x) x
30 #define DREFRESH(x)
31 #define DCURSOR(x)
32 #else
33 #define DIRQ(x)
34 #define DDMG(x)
35 #define DFIFO(x)
36 #define DFIFOINF(x)
37 #define DFENCE(x)
38 #define DREFRESH(x)
39 #define DCURSOR(x)
40 #endif
42 ULONG vmwareReadReg(struct HWData *data, ULONG reg)
44 outl(reg, data->indexReg);
45 return inl(data->valueReg);
48 VOID vmwareWriteReg(struct HWData *data, ULONG reg, ULONG val)
50 outl(reg, data->indexReg);
51 outl(val, data->valueReg);
54 VOID vmwareHandlerIRQ(struct HWData *data, void *unused)
56 UWORD port = (UWORD)((IPTR)data->iobase + SVGA_IRQSTATUS_PORT);
57 ULONG irqFlags = inl(port);
58 outl(irqFlags, port);
60 DIRQ(bug("[VMWareSVGA] %s(%08x)\n", __func__, irqFlags);)
61 if (!irqFlags)
63 D(bug("[VMWareSVGA] %s: Spurrious IRQ!\n", __func__);)
65 else
71 #undef SysBase
72 extern struct ExecBase *SysBase;
74 ULONG getVMWareSVGAID(struct HWData *data)
76 ULONG id;
78 D(bug("[VMWareSVGA:HW] %s()\n", __func__);)
80 vmwareWriteReg(data, SVGA_REG_ID, SVGA_ID_2);
81 id = vmwareReadReg(data, SVGA_REG_ID);
82 if (id == SVGA_ID_2)
83 return id;
84 vmwareWriteReg(data, SVGA_REG_ID, SVGA_ID_1);
85 id = vmwareReadReg(data, SVGA_REG_ID);
86 if (id == SVGA_ID_1)
87 return id;
88 if (id == SVGA_ID_0)
89 return id;
90 return SVGA_ID_INVALID;
93 BOOL hasCapVMWareSVGAFIFO(struct HWData *data, ULONG fifocap)
95 volatile ULONG *fifo = data->mmiobase;
96 if ((data->capabilities & SVGA_CAP_EXTENDED_FIFO) &&
97 (fifo[SVGA_FIFO_MIN] > (fifocap << 2)))
99 return TRUE;
101 return FALSE;
104 VOID initVMWareSVGAFIFO(struct HWData *data)
106 volatile ULONG *fifo = data->mmiobase;
108 DFIFOINF(bug("[VMWareSVGA:HW] %s()\n", __func__);)
110 if (!data->fifomin)
112 if (data->capabilities & SVGA_CAP_EXTENDED_FIFO)
114 DFIFOINF(bug("[VMWareSVGA:HW] %s: Extended FIFO Capabilities\n", __func__);)
115 data->fifomin = vmwareReadReg(data, SVGA_REG_MEM_REGS);
116 data->fifocapabilities = fifo[SVGA_FIFO_CAPABILITIES];
117 DFIFOINF(
118 if (data->fifocapabilities & SVGA_FIFO_CAP_FENCE)
119 bug("[VMWareSVGA] %s: Fence\n", __func__);
120 if (data->fifocapabilities & SVGA_FIFO_CAP_ACCELFRONT)
121 bug("[VMWareSVGA] %s: Accelerate Front\n", __func__);
122 if (data->fifocapabilities & SVGA_FIFO_CAP_PITCHLOCK)
123 bug("[VMWareSVGA] %s: Pitchlock\n", __func__);
124 if (data->fifocapabilities & SVGA_FIFO_CAP_VIDEO)
125 bug("[VMWareSVGA] %s: Video\n", __func__);
126 if (data->fifocapabilities & SVGA_FIFO_CAP_CURSOR_BYPASS_3)
127 bug("[VMWareSVGA] %s: Cursor-Bypass3\n", __func__);
128 if (data->fifocapabilities & SVGA_FIFO_CAP_ESCAPE)
129 bug("[VMWareSVGA] %s: Escape\n", __func__);
130 if (data->fifocapabilities & SVGA_FIFO_CAP_RESERVE)
131 bug("[VMWareSVGA] %s: FIFO Reserve\n", __func__);
132 if (data->fifocapabilities & SVGA_FIFO_CAP_SCREEN_OBJECT)
133 bug("[VMWareSVGA] %s: Screen-Object\n", __func__);
134 if (data->fifocapabilities & SVGA_FIFO_CAP_GMR2)
135 bug("[VMWareSVGA] %s: GMR2\n", __func__);
136 if (data->fifocapabilities & SVGA_FIFO_CAP_3D_HWVERSION_REVISED)
137 bug("[VMWareSVGA] %s: 3DHWVersion-Revised\n", __func__);
138 if (data->fifocapabilities & SVGA_FIFO_CAP_SCREEN_OBJECT_2)
139 bug("[VMWareSVGA] %s: Screen-Object2\n", __func__);
142 else
143 data->fifomin = SVGA_FIFO_CAPABILITIES;
145 DFIFOINF(bug("[VMWareSVGA:HW] %s: FIFO Min Regs = %d\n", __func__, data->fifomin));
147 data->mmiosize = vmwareReadReg(data, SVGA_REG_MEM_SIZE);
149 vmwareWriteReg(data, SVGA_REG_CONFIG_DONE, 0); //Stop vmware from reading the fifo
151 fifo[SVGA_FIFO_MIN] = (data->fifomin << VMWFIFO_CMD_SIZESHIFT);
152 fifo[SVGA_FIFO_MAX] = data->mmiosize;
153 fifo[SVGA_FIFO_NEXT_CMD] = fifo[SVGA_FIFO_MIN];
154 fifo[SVGA_FIFO_STOP] = fifo[SVGA_FIFO_MIN];
156 vmwareWriteReg(data, SVGA_REG_CONFIG_DONE, 1);
158 if (hasCapVMWareSVGAFIFO(data, SVGA_FIFO_GUEST_3D_HWVERSION))
160 DFIFOINF(bug("[VMWareSVGA:HW] %s: Setting GUEST_3D_HWVERSION = %d\n", __func__, SVGA3D_HWVERSION_CURRENT));
161 fifo[SVGA_FIFO_GUEST_3D_HWVERSION] = SVGA3D_HWVERSION_CURRENT;
165 APTR reserveVMWareSVGAFIFO(struct HWData *data, ULONG size)
167 volatile ULONG *fifo = data->mmiobase;
168 ULONG max = fifo[SVGA_FIFO_MAX];
169 ULONG min = fifo[SVGA_FIFO_MIN];
170 ULONG cmdNext = fifo[SVGA_FIFO_NEXT_CMD];
171 if (size % VMWFIFO_CMD_SIZE) {
172 DFIFOINF(bug("[VMWareSVGA:HW] %s: size of %d not 32bit-aligned!!\n", __func__, size);)
176 VOID commitVMWareSVGAFIFO(struct HWData *data, ULONG size)
178 volatile ULONG *fifo = data->mmiobase;
179 ULONG max = fifo[SVGA_FIFO_MAX];
180 ULONG min = fifo[SVGA_FIFO_MIN];
181 ULONG cmdNext = fifo[SVGA_FIFO_NEXT_CMD];
184 ULONG fenceVMWareSVGAFIFO(struct HWData *data)
186 ULONG fence = 1;
188 if (hasCapVMWareSVGAFIFO(data, SVGA_FIFO_FENCE) && (data->fifocapabilities & SVGA_FIFO_CAP_FENCE))
190 fence = data->fence++;
191 if (fence == 0) fence = 1;
193 DFENCE(bug("[VMWareSVGA:HW] %s: inserting fence #%d\n", __func__, fence);)
195 writeVMWareSVGAFIFO(data, SVGA_CMD_FENCE);
196 writeVMWareSVGAFIFO(data, fence);
197 syncVMWareSVGAFIFO(data);
199 return fence;
202 VOID syncfenceVMWareSVGAFIFO(struct HWData *data, ULONG fence)
204 volatile ULONG *fifo;
206 if (hasCapVMWareSVGAFIFO(data, SVGA_FIFO_FENCE) && (data->fifocapabilities & SVGA_FIFO_CAP_FENCE))
208 fifo = data->mmiobase;
209 if ((LONG)(fifo[SVGA_FIFO_FENCE] - fence) < 0)
211 DFENCE(bug("[VMWareSVGA:HW] %s: fence #%d hasnt been reached yet...\n", __func__, fence);)
217 VOID syncVMWareSVGAFIFO(struct HWData *data)
219 DFIFO(bug("[VMWareSVGA:HW] %s()\n", __func__);)
221 vmwareWriteReg(data, SVGA_REG_SYNC, 1);
224 VOID writeVMWareSVGAFIFO(struct HWData *data, ULONG val)
226 volatile ULONG *fifo;
228 DFIFO(bug("[VMWareSVGA:HW] %s()\n", __func__);)
230 fifo = data->mmiobase;
231 if (
232 (fifo[SVGA_FIFO_NEXT_CMD] + VMWFIFO_CMD_SIZE == fifo[SVGA_FIFO_STOP]) ||
234 (fifo[SVGA_FIFO_NEXT_CMD] == (fifo[SVGA_FIFO_MAX] - VMWFIFO_CMD_SIZE)) &&
235 (fifo[SVGA_FIFO_STOP] == fifo[SVGA_FIFO_MIN])
238 syncVMWareSVGAFIFO(data);
240 fifo[fifo[SVGA_FIFO_NEXT_CMD] >> VMWFIFO_CMD_SIZESHIFT] = val;
241 fifo[SVGA_FIFO_NEXT_CMD] += VMWFIFO_CMD_SIZE;
243 if (fifo[SVGA_FIFO_NEXT_CMD] == fifo[SVGA_FIFO_MAX])
244 fifo[SVGA_FIFO_NEXT_CMD] = fifo[SVGA_FIFO_MIN];
247 BOOL initVMWareSVGAHW(struct HWData *data, OOP_Object *device)
249 ULONG id;
251 D(bug("[VMWareSVGA:HW] %s()\n", __func__);)
253 id = getVMWareSVGAID(data);
254 if (id == SVGA_ID_INVALID)
255 return FALSE;
257 data->maskPool = CreatePool(MEMF_ANY, (32 << 3), (32 << 2));
259 if (id >= SVGA_ID_1)
261 volatile ULONG *fifo = data->mmiobase;
262 data->capabilities = vmwareReadReg(data, SVGA_REG_CAPABILITIES);
265 if (data->capabilities & SVGA_CAP_8BIT_EMULATION)
267 data->bitsperpixel = vmwareReadReg(data, SVGA_REG_HOST_BITS_PER_PIXEL);
268 vmwareWriteReg(data,SVGA_REG_BITS_PER_PIXEL, data->bitsperpixel);
270 data->bitsperpixel = vmwareReadReg(data, SVGA_REG_BITS_PER_PIXEL);
272 data->depth = vmwareReadReg(data, SVGA_REG_DEPTH);
273 data->maxwidth = vmwareReadReg(data, SVGA_REG_MAX_WIDTH);
274 data->maxheight = vmwareReadReg(data, SVGA_REG_MAX_HEIGHT);
275 data->redmask = vmwareReadReg(data, SVGA_REG_RED_MASK);
276 data->greenmask = vmwareReadReg(data, SVGA_REG_GREEN_MASK);
277 data->bluemask = vmwareReadReg(data, SVGA_REG_BLUE_MASK);
278 data->bytesperpixel = 1;
280 if (data->depth>16)
281 data->bytesperpixel = 4;
282 else if (data->depth>8)
283 data->bytesperpixel = 2;
285 if (data->capabilities & SVGA_CAP_MULTIMON)
287 data->displaycount = vmwareReadReg(data, SVGA_REG_NUM_DISPLAYS);
289 else
291 data->displaycount = 1;
294 data->vramsize = vmwareReadReg(data, SVGA_REG_VRAM_SIZE);
295 data->vrambase = (APTR)(IPTR)vmwareReadReg(data, SVGA_REG_FB_START);
296 data->pseudocolor = vmwareReadReg(data, SVGA_REG_PSEUDOCOLOR);
299 bug("[VMWareSVGA:HW] %s: VRAM at 0x%08x size %d\n", __func__, data->vrambase, data->vramsize);
300 bug("[VMWareSVGA:HW] %s: no.displays: %d\n", __func__, data->displaycount);
301 bug("[VMWareSVGA:HW] %s: depth: %d\n", __func__, data->depth);
302 bug("[VMWareSVGA:HW] %s: bpp : %d\n", __func__, data->bitsperpixel);
305 VMWareSVGA_RestartRenderTask(data);
307 return TRUE;
310 VOID getModeCfgVMWareSVGA(struct HWData *data)
312 D(bug("[VMWareSVGA:HW] %s()\n", __func__);)
314 data->fboffset = vmwareReadReg(data, SVGA_REG_FB_OFFSET);
315 data->bytesperline = vmwareReadReg(data, SVGA_REG_BYTES_PER_LINE);
316 data->depth = vmwareReadReg(data, SVGA_REG_DEPTH);
317 data->redmask = vmwareReadReg(data, SVGA_REG_RED_MASK);
318 data->greenmask = vmwareReadReg(data, SVGA_REG_GREEN_MASK);
319 data->bluemask = vmwareReadReg(data, SVGA_REG_BLUE_MASK);
320 data->pseudocolor = vmwareReadReg(data, SVGA_REG_PSEUDOCOLOR);
322 data->display_width = vmwareReadReg(data, SVGA_REG_WIDTH);
323 data->display_height = vmwareReadReg(data, SVGA_REG_HEIGHT);
324 D(bug("[VMWareSVGA:HW] %s: %dx%d\n", __func__, data->display_width, data->display_height));
327 VOID enableVMWareSVGA(struct HWData *data)
329 vmwareWriteReg(data, SVGA_REG_ENABLE, 1);
332 VOID disableVMWareSVGA(struct HWData *data)
334 vmwareWriteReg(data, SVGA_REG_ENABLE, 0);
337 VOID initDisplayVMWareSVGA(struct HWData *data)
339 enableVMWareSVGA(data);
340 getModeCfgVMWareSVGA(data);
341 initVMWareSVGAFIFO(data);
344 VOID setModeVMWareSVGA(struct HWData *data, ULONG width, ULONG height)
346 D(bug("[VMWareSVGA:HW] %s(%dx%d)\n", __func__, width, height));
348 disableVMWareSVGA(data);
350 vmwareWriteReg(data, SVGA_REG_WIDTH, width);
351 vmwareWriteReg(data, SVGA_REG_HEIGHT, height);
353 if (data->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY)
355 D(bug("[VMWareSVGA:HW] %s: Adjusting Display Topology\n", __func__);)
357 vmwareWriteReg(data, SVGA_REG_DISPLAY_ID, 0);
358 vmwareWriteReg(data, SVGA_REG_DISPLAY_IS_PRIMARY, TRUE);
360 vmwareWriteReg(data, SVGA_REG_DISPLAY_POSITION_X, 0);
361 vmwareWriteReg(data, SVGA_REG_DISPLAY_POSITION_Y, 0);
362 vmwareWriteReg(data, SVGA_REG_DISPLAY_WIDTH, width);
363 vmwareWriteReg(data, SVGA_REG_DISPLAY_HEIGHT, height);
366 if (data->capabilities & SVGA_CAP_8BIT_EMULATION)
367 vmwareWriteReg(data, SVGA_REG_BITS_PER_PIXEL, data->bitsperpixel);
369 initDisplayVMWareSVGA(data);
372 VOID refreshAreaVMWareSVGA(struct HWData *data, struct Box *box)
374 DREFRESH(bug("[VMWareSVGA:HW] %s()\n", __func__);)
376 writeVMWareSVGAFIFO(data, SVGA_CMD_UPDATE);
377 writeVMWareSVGAFIFO(data, box->x1);
378 writeVMWareSVGAFIFO(data, box->y1);
379 writeVMWareSVGAFIFO(data, box->x2-box->x1+1);
380 writeVMWareSVGAFIFO(data, box->y2-box->y1+1);
383 VOID rectFillVMWareSVGA(struct HWData *data, ULONG color, LONG x, LONG y, LONG width, LONG height)
385 D(bug("[VMWareSVGA:HW] %s()\n", __func__);)
387 writeVMWareSVGAFIFO(data, SVGA_CMD_RECT_FILL);
388 writeVMWareSVGAFIFO(data, color);
389 writeVMWareSVGAFIFO(data, x);
390 writeVMWareSVGAFIFO(data, y);
391 writeVMWareSVGAFIFO(data, width);
392 writeVMWareSVGAFIFO(data, height);
393 syncVMWareSVGAFIFO(data);
396 VOID ropFillVMWareSVGA(struct HWData *data, ULONG color, LONG x, LONG y, LONG width, LONG height, ULONG mode)
398 D(bug("[VMWareSVGA:HW] %s()\n", __func__);)
400 writeVMWareSVGAFIFO(data, SVGA_CMD_RECT_ROP_FILL);
401 writeVMWareSVGAFIFO(data, color);
402 writeVMWareSVGAFIFO(data, x);
403 writeVMWareSVGAFIFO(data, y);
404 writeVMWareSVGAFIFO(data, width);
405 writeVMWareSVGAFIFO(data, height);
406 writeVMWareSVGAFIFO(data, mode);
407 syncVMWareSVGAFIFO(data);
410 VOID ropCopyVMWareSVGA(struct HWData *data, LONG sx, LONG sy, LONG dx, LONG dy, ULONG width, ULONG height, ULONG mode)
412 D(bug("[VMWareSVGA:HW] %s()\n", __func__);)
414 writeVMWareSVGAFIFO(data, SVGA_CMD_RECT_ROP_COPY);
415 writeVMWareSVGAFIFO(data, sx);
416 writeVMWareSVGAFIFO(data, sy);
417 writeVMWareSVGAFIFO(data, dx);
418 writeVMWareSVGAFIFO(data, dy);
419 writeVMWareSVGAFIFO(data, width);
420 writeVMWareSVGAFIFO(data, height);
421 writeVMWareSVGAFIFO(data, mode);
422 syncVMWareSVGAFIFO(data);
425 VOID defineCursorVMWareSVGA(struct HWData *data, struct MouseData *mouse)
427 ULONG size = mouse->width * mouse->height;
428 ULONG *xorMask, *xorBuffer;
429 ULONG *image = mouse->shape;
430 int i;
432 DCURSOR(bug("[VMWareSVGA:HW] %s()\n", __func__);)
434 xorMask = (ULONG *)AllocVecPooled(data->maskPool, size << 2);
435 if (xorMask)
437 DCURSOR(bug("[VMWareSVGA:HW] %s: xormask @ 0x%p\n", __func__, xorMask);)
438 xorBuffer = xorMask;
439 for (i = 0; i < size; i++)
441 ULONG pixel = *image ++;
442 *xorBuffer = pixel;
443 xorBuffer++;
446 if (data->capabilities & SVGA_CAP_ALPHA_CURSOR)
448 DCURSOR(bug("[VMWareSVGA:HW] %s: rendering Alpha Cursor\n", __func__);)
449 writeVMWareSVGAFIFO(data, SVGA_CMD_DEFINE_ALPHA_CURSOR);
450 writeVMWareSVGAFIFO(data, VMWCURSOR_ID); // id
451 writeVMWareSVGAFIFO(data, 0); // hotspot x
452 writeVMWareSVGAFIFO(data, 0); // hotspot y
453 writeVMWareSVGAFIFO(data, mouse->width); // width
454 writeVMWareSVGAFIFO(data, mouse->height); // height
456 xorBuffer = xorMask;
457 for (i = 0; i < size; i++)
459 writeVMWareSVGAFIFO(data, *xorBuffer++);
462 syncVMWareSVGAFIFO(data);
464 else
466 #if (0)
467 xandBuffer = (ULONG *)AllocVecPooled(data->maskPool, size << 2);
468 if (xandBuffer)
470 DCURSOR(bug("[VMWareSVGA:HW] %s: rendering masked Cursor\n", __func__);)
471 writeVMWareSVGAFIFO(data, SVGA_CMD_DEFINE_CURSOR);
472 writeVMWareSVGAFIFO(data, VMWCURSOR_ID); // id
473 writeVMWareSVGAFIFO(data, 0); // hotspot x
474 writeVMWareSVGAFIFO(data, 0); // hotspot y
475 writeVMWareSVGAFIFO(data, mouse->width); // width
476 writeVMWareSVGAFIFO(data, mouse->height); // height
478 for (i = 0; i < size; i++)
480 writeVMWareSVGAFIFO(data, *xandBuffer++);
483 xorBuffer = xorMask;
484 for (i = 0; i < size; i++)
486 writeVMWareSVGAFIFO(data, *xorBuffer++);
489 syncVMWareSVGAFIFO(data);
490 FreeVecPooled(data->maskPool, xandBuffer);
492 #endif
494 FreeVecPooled(data->maskPool, xorMask);
498 VOID displayCursorVMWareSVGA(struct HWData *data, LONG mode)
500 volatile ULONG *fifo = data->mmiobase;
502 DCURSOR(bug("[VMWareSVGA:HW] %s()\n", __func__);)
504 if (hasCapVMWareSVGAFIFO(data, SVGA_FIFO_CURSOR_COUNT) && (data->fifocapabilities & SVGA_FIFO_CAP_CURSOR_BYPASS_3))
506 ULONG count;
507 fifo[SVGA_FIFO_CURSOR_ON] = mode;
508 count = fifo[SVGA_FIFO_CURSOR_COUNT];
509 fifo[SVGA_FIFO_CURSOR_COUNT] = ++count;
511 else
513 if (data->capabilities & SVGA_CAP_CURSOR_BYPASS_2)
515 vmwareWriteReg(data, SVGA_REG_CURSOR_ID, VMWCURSOR_ID);
516 vmwareWriteReg(data, SVGA_REG_CURSOR_ON, mode);
518 else
520 writeVMWareSVGAFIFO(data, SVGA_CMD_DISPLAY_CURSOR);
521 writeVMWareSVGAFIFO(data, VMWCURSOR_ID);
522 writeVMWareSVGAFIFO(data, mode);
523 syncVMWareSVGAFIFO(data);
528 VOID moveCursorVMWareSVGA(struct HWData *data, LONG x, LONG y)
530 volatile ULONG *fifo = data->mmiobase;
532 DCURSOR(bug("[VMWareSVGA:HW] %s()\n", __func__);)
534 if (hasCapVMWareSVGAFIFO(data, SVGA_FIFO_CURSOR_COUNT) && (data->fifocapabilities & SVGA_FIFO_CAP_CURSOR_BYPASS_3))
536 ULONG count;
537 fifo[SVGA_FIFO_CURSOR_ON] = SVGA_CURSOR_ON_SHOW;
538 fifo[SVGA_FIFO_CURSOR_X] = x;
539 fifo[SVGA_FIFO_CURSOR_Y] = y;
540 count = fifo[SVGA_FIFO_CURSOR_COUNT];
541 fifo[SVGA_FIFO_CURSOR_COUNT] = ++count;
543 else
545 if (data->capabilities & SVGA_CAP_CURSOR_BYPASS_2)
547 vmwareWriteReg(data, SVGA_REG_CURSOR_ID, VMWCURSOR_ID);
548 vmwareWriteReg(data, SVGA_REG_CURSOR_X, x);
549 vmwareWriteReg(data, SVGA_REG_CURSOR_Y, y);
550 vmwareWriteReg(data, SVGA_REG_CURSOR_ON, SVGA_CURSOR_ON_SHOW);
552 else
554 writeVMWareSVGAFIFO(data, SVGA_CMD_MOVE_CURSOR);
555 writeVMWareSVGAFIFO(data, x);
556 writeVMWareSVGAFIFO(data, y);
557 syncVMWareSVGAFIFO(data);
562 VOID VMWareSVGA_Damage_Reset(struct HWData *hwdata)
564 DDMG(bug("[VMWareSVGA:HW] %s()\n", __func__);)
566 ObtainSemaphore(&hwdata->damage_control);
568 hwdata->delta_damage.x1 = INT_MAX;
569 hwdata->delta_damage.y1 = INT_MAX;
570 hwdata->delta_damage.x2 = INT_MIN;
571 hwdata->delta_damage.y2 = INT_MIN;
573 ReleaseSemaphore(&hwdata->damage_control);
576 VOID VMWareSVGA_Damage_DeltaAdd(struct HWData *hwdata, struct Box *box)
578 ULONG tmpval;
580 DDMG(bug("[VMWareSVGA:HW] %s()\n", __func__);)
582 if (box->x1 > box->x2)
584 tmpval = box->x2;
585 box->x2 = box->x1;
586 box->x1 = tmpval;
588 if (box->y1 > box->y2)
590 tmpval = box->y2;
591 box->y2 = box->y1;
592 box->y1 = tmpval;
595 ObtainSemaphore(&hwdata->damage_control);
596 if (box->x1 < hwdata->delta_damage.x1)
598 hwdata->delta_damage.x1 = box->x1;
600 if (box->y1 < hwdata->delta_damage.y1)
602 hwdata->delta_damage.y1 = box->y1;
604 if (box->x2 > hwdata->delta_damage.x2)
606 hwdata->delta_damage.x2 = box->x2;
608 if (box->y2 > hwdata->delta_damage.y2)
610 hwdata->delta_damage.y2 = box->y2;
612 #if defined(VMWAREGFX_IMMEDIATEDRAW)
613 if (hwdata->shown)
615 refreshAreaVMWareSVGA(hwdata, &hwdata->delta_damage);
616 VMWareSVGA_Damage_Reset(hwdata);
617 syncVMWareSVGAFIFO(hwdata);
619 #endif
620 ReleaseSemaphore(&hwdata->damage_control);
623 #if !defined(VMWAREGFX_IMMEDIATEDRAW)
624 VOID VMWareSVGA_RenderTask(struct HWData *hwdata)
626 struct MsgPort render_thread_message_port;
627 struct timerequest *timer_request;
628 struct IORequest *timer_request_as_io_request;
629 ULONG request_size = sizeof(struct timerequest);
630 BYTE running;
632 render_thread_message_port.mp_Flags = PA_SIGNAL;
633 render_thread_message_port.mp_Node.ln_Type = NT_MSGPORT;
634 render_thread_message_port.mp_MsgList.lh_TailPred = (struct Node *)&render_thread_message_port.mp_MsgList;
635 render_thread_message_port.mp_MsgList.lh_Tail = 0;
636 render_thread_message_port.mp_MsgList.lh_Head = (struct Node *)&render_thread_message_port.mp_MsgList.lh_Tail;
638 render_thread_message_port.mp_SigBit = AllocSignal(-1);
639 render_thread_message_port.mp_SigTask = FindTask(0);
641 timer_request = AllocMem(request_size, MEMF_CLEAR | MEMF_PUBLIC);
642 timer_request_as_io_request = (void *)timer_request;
644 timer_request_as_io_request->io_Message.mn_Node.ln_Type = NT_MESSAGE;
645 timer_request_as_io_request->io_Message.mn_ReplyPort = &render_thread_message_port;
646 timer_request_as_io_request->io_Message.mn_Length = request_size;
648 OpenDevice("timer.device", UNIT_MICROHZ, timer_request_as_io_request, 0);
650 timer_request->tr_node.io_Command = TR_ADDREQUEST;
651 timer_request->tr_time.tv_secs = 0;
652 timer_request->tr_time.tv_micro = 20000;
653 SendIO(timer_request_as_io_request);
655 running = 1;
656 while (running) // TODO: If you'll ever implement GFX Driver hot swap, you will want to unlock this condition and let the RenderTask terminate
658 if (!vmwareReadReg(hwdata, SVGA_REG_BUSY))
660 ObtainSemaphore(&hwdata->damage_control);
661 struct Box *damage = &hwdata->delta_damage;
663 if ((damage->x2 > damage->x1) && (damage->y2 > damage->y1))
665 refreshAreaVMWareSVGA(hwdata, &hwdata->delta_damage);
666 VMWareSVGA_Damage_Reset(hwdata);
667 syncVMWareSVGAFIFO(hwdata);
669 ReleaseSemaphore(&hwdata->damage_control);
672 WaitIO(timer_request_as_io_request);
673 GetMsg(&render_thread_message_port); // TODO: Did we have to reply to this? Oh memory ...
675 timer_request->tr_node.io_Command = TR_ADDREQUEST;
676 timer_request->tr_time.tv_secs = 0;
677 timer_request->tr_time.tv_micro = 20000; // TODO: This should be adaptive. We would need to know the CPU load and increase the delay to avoid burning all of the CPU time
678 SendIO(timer_request_as_io_request);
681 CloseDevice(timer_request_as_io_request);
683 #endif
685 VOID VMWareSVGA_RestartRenderTask(struct HWData *hwdata)
687 // TODO: CleanUp and add defenses here
689 InitSemaphore(&hwdata->damage_control);
690 #if !defined(VMWAREGFX_IMMEDIATEDRAW)
691 hwdata->render_task = NewCreateTask(TASKTAG_PC,
692 VMWareSVGA_RenderTask,
693 TASKTAG_NAME, "VMWare Render Task",
694 TASKTAG_PRI, 1,
695 TASKTAG_ARG1, hwdata,
696 TAG_DONE);
697 #endif