expose the gfx drivers memory attributes via the gfx hidd attributes.
[AROS.git] / workbench / hidds / vmwaresvga / vmwaresvga_hardware.c
blob797f888f75480ab609214a9ca0a31f7d3f1f12f0
1 /*
2 Copyright © 1995-2019, The AROS Development Team. All rights reserved.
3 $Id$
5 Desc: vmware svga hardware functions
6 Lang: English
7 */
9 #ifdef DEBUG
10 #undef DEBUG
11 #endif
12 #define DEBUG 1 /* no SysBase */
13 #include <aros/debug.h>
15 #include <asm/io.h>
17 #include <limits.h>
18 #include <exec/ports.h>
19 #include <devices/timer.h>
20 #include <proto/exec.h>
22 #include "vmwaresvga_intern.h"
24 ULONG vmwareReadReg(struct HWData *data, ULONG reg)
26 outl(reg, data->indexReg);
27 return inl(data->valueReg);
30 void vmwareWriteReg(struct HWData *data, ULONG reg, ULONG val)
32 outl(reg, data->indexReg);
33 outl(val, data->valueReg);
36 #undef SysBase
37 extern struct ExecBase *SysBase;
39 ULONG getVMWareSVGAID(struct HWData *data)
41 ULONG id;
43 vmwareWriteReg(data, SVGA_REG_ID, SVGA_ID_2);
44 id = vmwareReadReg(data, SVGA_REG_ID);
45 if (id == SVGA_ID_2)
46 return id;
47 vmwareWriteReg(data, SVGA_REG_ID, SVGA_ID_1);
48 id = vmwareReadReg(data, SVGA_REG_ID);
49 if (id == SVGA_ID_1)
50 return id;
51 if (id == SVGA_ID_0)
52 return id;
53 return SVGA_ID_INVALID;
56 VOID initVMWareSVGAFIFO(struct HWData *data)
58 ULONG *fifo;
59 ULONG fifomin;
61 vmwareWriteReg(data, SVGA_REG_CONFIG_DONE, 0); //Stop vmware from reading the fifo
63 fifo = data->mmiobase = (APTR)(IPTR)vmwareReadReg(data, SVGA_REG_MEM_START);
64 data->mmiosize = vmwareReadReg(data, SVGA_REG_MEM_SIZE) & ~3;
66 if (data->capabilities & SVGA_CAP_EXTENDED_FIFO)
67 fifomin = vmwareReadReg(data, SVGA_REG_MEM_REGS);
68 else
69 fifomin =4;
71 fifo[SVGA_FIFO_MIN] = fifomin * sizeof(ULONG);
73 /* reduce size of FIFO queue to prevent VMWare from failing */
74 fifo[SVGA_FIFO_MAX] = (data->mmiosize > 65536) ? 65536 : data->mmiosize;
75 fifo[SVGA_FIFO_NEXT_CMD] = fifomin * sizeof(ULONG);
76 fifo[SVGA_FIFO_STOP] = fifomin * sizeof(ULONG);
78 vmwareWriteReg(data, SVGA_REG_CONFIG_DONE, 1);
81 VOID syncVMWareSVGAFIFO(struct HWData *data)
83 vmwareWriteReg(data, SVGA_REG_SYNC, 1);
86 VOID writeVMWareSVGAFIFO(struct HWData *data, ULONG val)
88 ULONG *fifo;
90 fifo = data->mmiobase;
91 if (
92 (fifo[SVGA_FIFO_NEXT_CMD]+4 == fifo[SVGA_FIFO_STOP]) ||
94 (fifo[SVGA_FIFO_NEXT_CMD] == (fifo[SVGA_FIFO_MAX]-4)) &&
95 (fifo[SVGA_FIFO_STOP] == fifo[SVGA_FIFO_MIN])
98 syncVMWareSVGAFIFO(data);
100 fifo[fifo[SVGA_FIFO_NEXT_CMD] / 4] = val;
101 fifo[SVGA_FIFO_NEXT_CMD] += 4;
103 if (fifo[SVGA_FIFO_NEXT_CMD] == fifo[SVGA_FIFO_MAX])
104 fifo[SVGA_FIFO_NEXT_CMD] = fifo[SVGA_FIFO_MIN];
107 BOOL initVMWareSVGAHW(struct HWData *data, OOP_Object *device)
109 ULONG id;
111 id = getVMWareSVGAID(data);
112 if ((id == SVGA_ID_0) || (id == SVGA_ID_INVALID))
114 return FALSE;
117 initVMWareSVGAFIFO(data);
119 data->capabilities = vmwareReadReg(data, SVGA_REG_CAPABILITIES);
121 if (data->capabilities & SVGA_CAP_8BIT_EMULATION)
123 data->bitsperpixel = vmwareReadReg(data, SVGA_REG_HOST_BITS_PER_PIXEL);
124 vmwareWriteReg(data,SVGA_REG_BITS_PER_PIXEL, data->bitsperpixel);
126 data->bitsperpixel = vmwareReadReg(data, SVGA_REG_BITS_PER_PIXEL);
128 data->depth = vmwareReadReg(data, SVGA_REG_DEPTH);
129 data->maxwidth = vmwareReadReg(data, SVGA_REG_MAX_WIDTH);
130 data->maxheight = vmwareReadReg(data, SVGA_REG_MAX_HEIGHT);
131 data->redmask = vmwareReadReg(data, SVGA_REG_RED_MASK);
132 data->greenmask = vmwareReadReg(data, SVGA_REG_GREEN_MASK);
133 data->bluemask = vmwareReadReg(data, SVGA_REG_BLUE_MASK);
134 data->bytesperpixel = 1;
136 if (data->depth>16)
137 data->bytesperpixel = 4;
138 else if (data->depth>8)
139 data->bytesperpixel = 2;
141 if (data->capabilities & SVGA_CAP_MULTIMON)
143 data->displaycount = vmwareReadReg(data, SVGA_REG_NUM_DISPLAYS);
145 else
147 data->displaycount = 1;
150 data->vramsize = vmwareReadReg(data, SVGA_REG_VRAM_SIZE);
151 data->vrambase = (APTR)(IPTR)vmwareReadReg(data, SVGA_REG_FB_START);
152 data->pseudocolor = vmwareReadReg(data, SVGA_REG_PSEUDOCOLOR);
154 D(bug("[VMWareSVGA:HW] %s: VRAM at 0x%08x size %d\n", __func__, data->vrambase, data->vramsize));
155 D(bug("[VMWareSVGA:HW] %s: no.displays: %d\n", __func__, data->displaycount));
156 D(bug("[VMWareSVGA:HW] %s: caps : 0x%08x\n", __func__, data->capabilities));
157 D(bug("[VMWareSVGA:HW] %s: no.displays: %d\n", __func__, data->displaycount));
158 D(bug("[VMWareSVGA:HW] %s: depth: %d\n", __func__, data->depth));
159 D(bug("[VMWareSVGA:HW] %s: bpp : %d\n", __func__, data->bitsperpixel));
160 D(bug("[VMWareSVGA:HW] %s: maxw: %d\n", __func__, data->maxwidth));
161 D(bug("[VMWareSVGA:HW] %s: maxh: %d\n", __func__, data->maxheight));
163 VMWareSVGA_RestartRenderTask(data);
165 return TRUE;
168 VOID setModeVMWareSVGA(struct HWData *data, ULONG width, ULONG height)
170 D(bug("[VMWareSVGA:HW] %s(%dx%d)\n", __func__, width, height));
172 vmwareWriteReg(data, SVGA_REG_ENABLE, 0);
174 vmwareWriteReg(data, SVGA_REG_WIDTH, width);
175 vmwareWriteReg(data, SVGA_REG_HEIGHT, height);
177 if (data->capabilities & SVGA_CAP_8BIT_EMULATION)
178 vmwareWriteReg(data, SVGA_REG_BITS_PER_PIXEL,data->bitsperpixel);
180 vmwareWriteReg(data, SVGA_REG_ENABLE, 1);
182 data->fboffset = vmwareReadReg(data, SVGA_REG_FB_OFFSET);
183 data->bytesperline = vmwareReadReg(data, SVGA_REG_BYTES_PER_LINE);
184 data->depth = vmwareReadReg(data, SVGA_REG_DEPTH);
185 data->redmask = vmwareReadReg(data, SVGA_REG_RED_MASK);
186 data->greenmask = vmwareReadReg(data, SVGA_REG_GREEN_MASK);
187 data->bluemask = vmwareReadReg(data, SVGA_REG_BLUE_MASK);
188 data->pseudocolor = vmwareReadReg(data, SVGA_REG_PSEUDOCOLOR);
190 data->display_width = vmwareReadReg(data, SVGA_REG_WIDTH);
191 data->display_height = vmwareReadReg(data, SVGA_REG_HEIGHT);
194 VOID refreshAreaVMWareSVGA(struct HWData *data, struct Box *box)
196 writeVMWareSVGAFIFO(data, SVGA_CMD_UPDATE);
197 writeVMWareSVGAFIFO(data, box->x1);
198 writeVMWareSVGAFIFO(data, box->y1);
199 writeVMWareSVGAFIFO(data, box->x2-box->x1+1);
200 writeVMWareSVGAFIFO(data, box->y2-box->y1+1);
203 VOID rectFillVMWareSVGA(struct HWData *data, ULONG color, LONG x, LONG y, LONG width, LONG height)
205 writeVMWareSVGAFIFO(data, SVGA_CMD_RECT_FILL);
206 writeVMWareSVGAFIFO(data, color);
207 writeVMWareSVGAFIFO(data, x);
208 writeVMWareSVGAFIFO(data, y);
209 writeVMWareSVGAFIFO(data, width);
210 writeVMWareSVGAFIFO(data, height);
211 syncVMWareSVGAFIFO(data);
214 VOID ropFillVMWareSVGA(struct HWData *data, ULONG color, LONG x, LONG y, LONG width, LONG height, ULONG mode)
216 writeVMWareSVGAFIFO(data, SVGA_CMD_RECT_ROP_FILL);
217 writeVMWareSVGAFIFO(data, color);
218 writeVMWareSVGAFIFO(data, x);
219 writeVMWareSVGAFIFO(data, y);
220 writeVMWareSVGAFIFO(data, width);
221 writeVMWareSVGAFIFO(data, height);
222 writeVMWareSVGAFIFO(data, mode);
223 syncVMWareSVGAFIFO(data);
226 VOID ropCopyVMWareSVGA(struct HWData *data, LONG sx, LONG sy, LONG dx, LONG dy, ULONG width, ULONG height, ULONG mode)
228 writeVMWareSVGAFIFO(data, SVGA_CMD_RECT_ROP_COPY);
229 writeVMWareSVGAFIFO(data, sx);
230 writeVMWareSVGAFIFO(data, sy);
231 writeVMWareSVGAFIFO(data, dx);
232 writeVMWareSVGAFIFO(data, dy);
233 writeVMWareSVGAFIFO(data, width);
234 writeVMWareSVGAFIFO(data, height);
235 writeVMWareSVGAFIFO(data, mode);
236 syncVMWareSVGAFIFO(data);
239 VOID defineCursorVMWareSVGA(struct HWData *data, struct MouseData *mouse)
241 int i;
242 ULONG size = mouse->width * mouse->height;
243 // TODO: This is utterly disgusting. Should be moved to either a proper static data area, or dynamic area down to the render task
244 ULONG xorMask[size];
245 ULONG *image = mouse->shape;
246 ULONG *xorBuffer;
248 writeVMWareSVGAFIFO(data, SVGA_CMD_DEFINE_ALPHA_CURSOR);
249 writeVMWareSVGAFIFO(data, 0); // id
250 writeVMWareSVGAFIFO(data, 0); // hotspot x
251 writeVMWareSVGAFIFO(data, 0); // hotspot y
252 writeVMWareSVGAFIFO(data, mouse->width); // width
253 writeVMWareSVGAFIFO(data, mouse->height); // height
255 xorBuffer = xorMask;
257 for (i = 0; i < size; i++)
259 ULONG pixel = *image ++;
260 *xorBuffer = pixel;
261 xorBuffer++;
264 xorBuffer = xorMask;
265 for (i = 0; i < size; i++)
267 writeVMWareSVGAFIFO(data, *xorBuffer++);
270 syncVMWareSVGAFIFO(data);
273 VOID displayCursorVMWareSVGA(struct HWData *data, LONG mode)
275 #if 0
276 writeVMWareSVGAFIFO(data, SVGA_CMD_DISPLAY_CURSOR);
277 writeVMWareSVGAFIFO(data, 1);
278 writeVMWareSVGAFIFO(data, mode);
279 syncVMWareSVGAFIFO(data);
280 #else
281 vmwareWriteReg(data, SVGA_REG_CURSOR_ID, 1);
282 vmwareWriteReg(data, SVGA_REG_CURSOR_ON, mode);
283 #endif
286 VOID moveCursorVMWareSVGA(struct HWData *data, LONG x, LONG y)
288 #if 0
289 writeVMWareSVGAFIFO(data, SVGA_CMD_MOVE_CURSOR);
290 writeVMWareSVGAFIFO(data, x);
291 writeVMWareSVGAFIFO(data, y);
292 syncVMWareSVGAFIFO(data);
293 #else
294 vmwareWriteReg(data, SVGA_REG_CURSOR_ID, 1);
295 vmwareWriteReg(data, SVGA_REG_CURSOR_X, x);
296 vmwareWriteReg(data, SVGA_REG_CURSOR_Y, y);
297 vmwareWriteReg(data, SVGA_REG_CURSOR_ON, 1);
298 #endif
301 VOID VMWareSVGA_Damage_Reset(struct HWData *hwdata)
303 ObtainSemaphore(&hwdata->damage_control);
305 hwdata->delta_damage.x1 = INT_MAX;
306 hwdata->delta_damage.y1 = INT_MAX;
307 hwdata->delta_damage.x2 = INT_MIN;
308 hwdata->delta_damage.y2 = INT_MIN;
310 ReleaseSemaphore(&hwdata->damage_control);
313 VOID VMWareSVGA_Damage_DeltaAdd(struct HWData *hwdata, struct Box box)
315 ObtainSemaphore(&hwdata->damage_control);
317 if (box.x1 < hwdata->delta_damage.x1)
319 hwdata->delta_damage.x1 = box.x1;
321 if (box.y1 < hwdata->delta_damage.y1)
323 hwdata->delta_damage.y1 = box.y1;
325 if (box.x2 > hwdata->delta_damage.x2)
327 hwdata->delta_damage.x2 = box.x2;
329 if (box.y2 > hwdata->delta_damage.y2)
331 hwdata->delta_damage.y2 = box.y2;
333 ReleaseSemaphore(&hwdata->damage_control);
336 void VMWareSVGA_RenderTask(struct HWData *hwdata)
338 struct MsgPort render_thread_message_port;
339 struct timerequest *timer_request;
340 struct IORequest *timer_request_as_io_request;
341 ULONG request_size = sizeof(struct timerequest);
342 BYTE running;
344 render_thread_message_port.mp_Flags = PA_SIGNAL;
345 render_thread_message_port.mp_Node.ln_Type = NT_MSGPORT;
346 render_thread_message_port.mp_MsgList.lh_TailPred = (struct Node *)&render_thread_message_port.mp_MsgList;
347 render_thread_message_port.mp_MsgList.lh_Tail = 0;
348 render_thread_message_port.mp_MsgList.lh_Head = (struct Node *)&render_thread_message_port.mp_MsgList.lh_Tail;
350 render_thread_message_port.mp_SigBit = AllocSignal(-1);
351 render_thread_message_port.mp_SigTask = FindTask(0);
353 timer_request = AllocMem(request_size, MEMF_CLEAR | MEMF_PUBLIC);
354 timer_request_as_io_request = (void *)timer_request;
356 timer_request_as_io_request->io_Message.mn_Node.ln_Type = NT_MESSAGE;
357 timer_request_as_io_request->io_Message.mn_ReplyPort = &render_thread_message_port;
358 timer_request_as_io_request->io_Message.mn_Length = request_size;
360 OpenDevice("timer.device", UNIT_MICROHZ, timer_request_as_io_request, 0);
362 timer_request->tr_node.io_Command = TR_ADDREQUEST;
363 timer_request->tr_time.tv_secs = 0;
364 timer_request->tr_time.tv_micro = 20000;
365 SendIO(timer_request_as_io_request);
367 running = 1;
368 while (running) // TODO: If you'll ever implement GFX Driver hot swap, you will want to unlock this condition and let the RenderTask terminate
370 if (!vmwareReadReg(hwdata, SVGA_REG_BUSY))
372 ObtainSemaphore(&hwdata->damage_control);
373 struct Box damage = hwdata->delta_damage;
374 struct Box all_damage = {0, 0, hwdata->display_width, hwdata->display_height};
376 if (damage.x2 > damage.x1 && damage.y2 > damage.y1)
378 refreshAreaVMWareSVGA(hwdata, &all_damage);
379 VMWareSVGA_Damage_Reset(hwdata);
380 vmwareWriteReg(hwdata, SVGA_REG_SYNC, 1);
382 ReleaseSemaphore(&hwdata->damage_control);
385 WaitIO(timer_request_as_io_request);
386 GetMsg(&render_thread_message_port); // TODO: Did we have to reply to this? Oh memory ...
388 timer_request->tr_node.io_Command = TR_ADDREQUEST;
389 timer_request->tr_time.tv_secs = 0;
390 timer_request->tr_time.tv_micro = 20000; // TODO: This should be adaptive. We would need to know the CPU load and increase the delay to avoid burning all of the CPU time
391 SendIO(timer_request_as_io_request);
394 CloseDevice(timer_request_as_io_request);
397 VOID VMWareSVGA_RestartRenderTask(struct HWData *hwdata)
399 // TODO: CleanUp and add defenses here
401 InitSemaphore(&hwdata->damage_control);
403 hwdata->render_task = NewCreateTask(TASKTAG_PC,
404 VMWareSVGA_RenderTask,
405 TASKTAG_NAME, "VMWare Render Task",
406 TASKTAG_PRI, 1,
407 TASKTAG_ARG1, hwdata,
408 TAG_DONE);