vfs: check userland buffers before reading them.
[haiku.git] / src / add-ons / accelerants / intel_extreme / engine.cpp
blob231fb78d802b1b6bf239adeef2f6c27f1560758e
1 /*
2 * Copyright 2006-2007, Haiku, Inc. All Rights Reserved.
3 * Distributed under the terms of the MIT License.
5 * Authors:
6 * Axel Dörfler, axeld@pinc-software.de
7 */
10 #include <Debug.h>
12 #include "accelerant.h"
13 #include "accelerant_protos.h"
14 #include "commands.h"
17 #undef TRACE
18 //#define TRACE_ENGINE
19 #ifdef TRACE_ENGINE
20 # define TRACE(x...) _sPrintf("intel_extreme: " x)
21 #else
22 # define TRACE(x...)
23 #endif
25 #define ERROR(x...) _sPrintf("intel_extreme: " x)
26 #define CALLED(x...) TRACE("CALLED %s\n", __PRETTY_FUNCTION__)
29 static engine_token sEngineToken = {1, 0 /*B_2D_ACCELERATION*/, NULL};
32 QueueCommands::QueueCommands(ring_buffer &ring)
34 fRingBuffer(ring)
36 acquire_lock(&fRingBuffer.lock);
40 QueueCommands::~QueueCommands()
42 if (fRingBuffer.position & 0x07) {
43 // make sure the command is properly aligned
44 Write(COMMAND_NOOP);
47 // We must make sure memory is written back in case the ring buffer
48 // is in write combining mode - releasing the lock does this, as the
49 // buffer is flushed on a locked memory operation (which is what this
50 // benaphore does), but it must happen before writing the new tail...
51 int32 flush;
52 atomic_add(&flush, 1);
54 write32(fRingBuffer.register_base + RING_BUFFER_TAIL, fRingBuffer.position);
56 release_lock(&fRingBuffer.lock);
60 void
61 QueueCommands::Put(struct command &command, size_t size)
63 uint32 count = size / sizeof(uint32);
64 uint32 *data = command.Data();
66 MakeSpace(count);
68 for (uint32 i = 0; i < count; i++) {
69 Write(data[i]);
74 void
75 QueueCommands::PutFlush()
77 MakeSpace(2);
79 Write(COMMAND_FLUSH);
80 Write(COMMAND_NOOP);
84 void
85 QueueCommands::PutWaitFor(uint32 event)
87 MakeSpace(2);
89 Write(COMMAND_WAIT_FOR_EVENT | event);
90 Write(COMMAND_NOOP);
94 void
95 QueueCommands::PutOverlayFlip(uint32 mode, bool updateCoefficients)
97 MakeSpace(2);
99 Write(COMMAND_OVERLAY_FLIP | mode);
101 uint32 registers;
102 // G33 does not need a physical address for the overlay registers
103 if (intel_uses_physical_overlay(*gInfo->shared_info))
104 registers = gInfo->shared_info->physical_overlay_registers;
105 else
106 registers = gInfo->shared_info->overlay_offset;
108 Write(registers | (updateCoefficients ? OVERLAY_UPDATE_COEFFICIENTS : 0));
112 void
113 QueueCommands::MakeSpace(uint32 size)
115 ASSERT((size & 1) == 0);
117 size *= sizeof(uint32);
118 bigtime_t start = system_time();
120 while (fRingBuffer.space_left < size) {
121 // wait until more space is free
122 uint32 head = read32(fRingBuffer.register_base + RING_BUFFER_HEAD)
123 & INTEL_RING_BUFFER_HEAD_MASK;
125 if (head <= fRingBuffer.position)
126 head += fRingBuffer.size;
128 fRingBuffer.space_left = head - fRingBuffer.position;
130 if (fRingBuffer.space_left < size) {
131 if (system_time() > start + 1000000LL) {
132 ERROR("engine stalled, head %" B_PRIx32 "\n", head);
133 break;
135 spin(10);
139 fRingBuffer.space_left -= size;
143 void
144 QueueCommands::Write(uint32 data)
146 uint32 *target = (uint32 *)(fRingBuffer.base + fRingBuffer.position);
147 *target = data;
149 fRingBuffer.position = (fRingBuffer.position + sizeof(uint32))
150 & (fRingBuffer.size - 1);
154 // #pragma mark -
157 void
158 uninit_ring_buffer(ring_buffer &ringBuffer)
160 uninit_lock(&ringBuffer.lock);
161 write32(ringBuffer.register_base + RING_BUFFER_CONTROL, 0);
165 void
166 setup_ring_buffer(ring_buffer &ringBuffer, const char* name)
168 TRACE("Setup ring buffer %s, offset %lx, size %lx\n", name,
169 ringBuffer.offset, ringBuffer.size);
171 if (init_lock(&ringBuffer.lock, name) < B_OK) {
172 // disable ring buffer
173 ringBuffer.size = 0;
174 return;
177 uint32 ring = ringBuffer.register_base;
178 ringBuffer.position = 0;
179 ringBuffer.space_left = ringBuffer.size;
181 write32(ring + RING_BUFFER_TAIL, 0);
182 write32(ring + RING_BUFFER_START, ringBuffer.offset);
183 write32(ring + RING_BUFFER_CONTROL,
184 ((ringBuffer.size - B_PAGE_SIZE) & INTEL_RING_BUFFER_SIZE_MASK)
185 | INTEL_RING_BUFFER_ENABLED);
189 // #pragma mark - engine management
192 /*! Return number of hardware engines */
193 uint32
194 intel_accelerant_engine_count(void)
196 CALLED();
197 return 1;
201 status_t
202 intel_acquire_engine(uint32 capabilities, uint32 maxWait, sync_token* syncToken,
203 engine_token** _engineToken)
205 CALLED();
206 *_engineToken = &sEngineToken;
208 if (acquire_lock(&gInfo->shared_info->engine_lock) != B_OK)
209 return B_ERROR;
211 if (syncToken)
212 intel_sync_to_token(syncToken);
214 return B_OK;
218 status_t
219 intel_release_engine(engine_token* engineToken, sync_token* syncToken)
221 CALLED();
222 if (syncToken != NULL)
223 syncToken->engine_id = engineToken->engine_id;
225 release_lock(&gInfo->shared_info->engine_lock);
226 return B_OK;
230 void
231 intel_wait_engine_idle(void)
233 CALLED();
236 QueueCommands queue(gInfo->shared_info->primary_ring_buffer);
237 queue.PutFlush();
240 // TODO: this should only be a temporary solution!
241 // a better way to do this would be to acquire the engine's lock and
242 // sync to the latest token
244 bigtime_t start = system_time();
246 ring_buffer &ring = gInfo->shared_info->primary_ring_buffer;
247 uint32 head, tail;
248 while (true) {
249 head = read32(ring.register_base + RING_BUFFER_HEAD)
250 & INTEL_RING_BUFFER_HEAD_MASK;
251 tail = read32(ring.register_base + RING_BUFFER_TAIL)
252 & INTEL_RING_BUFFER_HEAD_MASK;
254 if (head == tail)
255 break;
257 if (system_time() > start + 1000000LL) {
258 // the engine seems to be locked up!
259 ERROR("engine locked up, head %" B_PRIx32 "!\n", head);
260 break;
263 spin(10);
268 status_t
269 intel_get_sync_token(engine_token* engineToken, sync_token* syncToken)
271 CALLED();
272 return B_OK;
276 status_t
277 intel_sync_to_token(sync_token* syncToken)
279 CALLED();
280 intel_wait_engine_idle();
281 return B_OK;
285 // #pragma mark - engine acceleration
288 void
289 intel_screen_to_screen_blit(engine_token* token, blit_params* params,
290 uint32 count)
292 QueueCommands queue(gInfo->shared_info->primary_ring_buffer);
294 for (uint32 i = 0; i < count; i++) {
295 xy_source_blit_command blit;
296 blit.source_left = params[i].src_left;
297 blit.source_top = params[i].src_top;
298 blit.dest_left = params[i].dest_left;
299 blit.dest_top = params[i].dest_top;
300 blit.dest_right = params[i].dest_left + params[i].width + 1;
301 blit.dest_bottom = params[i].dest_top + params[i].height + 1;
303 queue.Put(blit, sizeof(blit));
308 void
309 intel_fill_rectangle(engine_token* token, uint32 color,
310 fill_rect_params* params, uint32 count)
312 QueueCommands queue(gInfo->shared_info->primary_ring_buffer);
314 for (uint32 i = 0; i < count; i++) {
315 xy_color_blit_command blit(false);
316 blit.dest_left = params[i].left;
317 blit.dest_top = params[i].top;
318 blit.dest_right = params[i].right + 1;
319 blit.dest_bottom = params[i].bottom + 1;
320 blit.color = color;
322 queue.Put(blit, sizeof(blit));
327 void
328 intel_invert_rectangle(engine_token* token, fill_rect_params* params,
329 uint32 count)
331 QueueCommands queue(gInfo->shared_info->primary_ring_buffer);
333 for (uint32 i = 0; i < count; i++) {
334 xy_color_blit_command blit(true);
335 blit.dest_left = params[i].left;
336 blit.dest_top = params[i].top;
337 blit.dest_right = params[i].right + 1;
338 blit.dest_bottom = params[i].bottom + 1;
339 blit.color = 0xffffffff;
341 queue.Put(blit, sizeof(blit));
346 void
347 intel_fill_span(engine_token* token, uint32 color, uint16* _params,
348 uint32 count)
350 struct params {
351 uint16 top;
352 uint16 left;
353 uint16 right;
354 } *params = (struct params*)_params;
356 QueueCommands queue(gInfo->shared_info->primary_ring_buffer);
358 xy_setup_mono_pattern_command setup;
359 setup.background_color = color;
360 setup.pattern = 0;
361 queue.Put(setup, sizeof(setup));
363 for (uint32 i = 0; i < count; i++) {
364 xy_scanline_blit_command blit;
365 blit.dest_left = params[i].left;
366 blit.dest_top = params[i].top;
367 blit.dest_right = params[i].right;
368 blit.dest_bottom = params[i].top;