rasapi32: Update spec file.
[wine/zf.git] / dlls / rtworkq / queue.c
blob6817b06f284a6ab5a4c7ddc3a8fa220bd42c02d8
1 /*
2 * Copyright 2019-2020 Nikolay Sivov for CodeWeavers
4 * This library is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU Lesser General Public
6 * License as published by the Free Software Foundation; either
7 * version 2.1 of the License, or (at your option) any later version.
9 * This library is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * Lesser General Public License for more details.
14 * You should have received a copy of the GNU Lesser General Public
15 * License along with this library; if not, write to the Free Software
16 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
19 #include <assert.h>
21 #define COBJMACROS
22 #define NONAMELESSUNION
24 #include "initguid.h"
25 #include "rtworkq.h"
26 #include "wine/debug.h"
27 #include "wine/heap.h"
28 #include "wine/list.h"
30 WINE_DEFAULT_DEBUG_CHANNEL(mfplat);
32 #define FIRST_USER_QUEUE_HANDLE 5
33 #define MAX_USER_QUEUE_HANDLES 124
35 #define WAIT_ITEM_KEY_MASK (0x82000000)
36 #define SCHEDULED_ITEM_KEY_MASK (0x80000000)
38 static LONG next_item_key;
40 static RTWQWORKITEM_KEY get_item_key(DWORD mask, DWORD key)
42 return ((RTWQWORKITEM_KEY)mask << 32) | key;
45 static RTWQWORKITEM_KEY generate_item_key(DWORD mask)
47 return get_item_key(mask, InterlockedIncrement(&next_item_key));
50 struct queue_handle
52 void *obj;
53 LONG refcount;
54 WORD generation;
57 static struct queue_handle user_queues[MAX_USER_QUEUE_HANDLES];
58 static struct queue_handle *next_free_user_queue;
59 static struct queue_handle *next_unused_user_queue = user_queues;
60 static WORD queue_generation;
61 static DWORD shared_mt_queue;
63 static CRITICAL_SECTION queues_section;
64 static CRITICAL_SECTION_DEBUG queues_critsect_debug =
66 0, 0, &queues_section,
67 { &queues_critsect_debug.ProcessLocksList, &queues_critsect_debug.ProcessLocksList },
68 0, 0, { (DWORD_PTR)(__FILE__ ": queues_section") }
70 static CRITICAL_SECTION queues_section = { &queues_critsect_debug, -1, 0, 0, 0, 0 };
72 static LONG platform_lock;
73 static CO_MTA_USAGE_COOKIE mta_cookie;
75 static struct queue_handle *get_queue_obj(DWORD handle)
77 unsigned int idx = HIWORD(handle) - FIRST_USER_QUEUE_HANDLE;
79 if (idx < MAX_USER_QUEUE_HANDLES && user_queues[idx].refcount)
81 if (LOWORD(handle) == user_queues[idx].generation)
82 return &user_queues[idx];
85 return NULL;
88 /* Should be kept in sync with corresponding MFASYNC_CALLBACK_ constants. */
89 enum rtwq_callback_queue_id
91 RTWQ_CALLBACK_QUEUE_UNDEFINED = 0x00000000,
92 RTWQ_CALLBACK_QUEUE_STANDARD = 0x00000001,
93 RTWQ_CALLBACK_QUEUE_RT = 0x00000002,
94 RTWQ_CALLBACK_QUEUE_IO = 0x00000003,
95 RTWQ_CALLBACK_QUEUE_TIMER = 0x00000004,
96 RTWQ_CALLBACK_QUEUE_MULTITHREADED = 0x00000005,
97 RTWQ_CALLBACK_QUEUE_LONG_FUNCTION = 0x00000007,
98 RTWQ_CALLBACK_QUEUE_PRIVATE_MASK = 0xffff0000,
99 RTWQ_CALLBACK_QUEUE_ALL = 0xffffffff,
102 /* Should be kept in sync with corresponding MFASYNC_ constants. */
103 enum rtwq_callback_flags
105 RTWQ_FAST_IO_PROCESSING_CALLBACK = 0x00000001,
106 RTWQ_SIGNAL_CALLBACK = 0x00000002,
107 RTWQ_BLOCKING_CALLBACK = 0x00000004,
108 RTWQ_REPLY_CALLBACK = 0x00000008,
109 RTWQ_LOCALIZE_REMOTE_CALLBACK = 0x00000010,
112 enum system_queue_index
114 SYS_QUEUE_STANDARD = 0,
115 SYS_QUEUE_RT,
116 SYS_QUEUE_IO,
117 SYS_QUEUE_TIMER,
118 SYS_QUEUE_MULTITHREADED,
119 SYS_QUEUE_DO_NOT_USE,
120 SYS_QUEUE_LONG_FUNCTION,
121 SYS_QUEUE_COUNT,
124 struct work_item
126 IUnknown IUnknown_iface;
127 LONG refcount;
128 struct list entry;
129 IRtwqAsyncResult *result;
130 IRtwqAsyncResult *reply_result;
131 struct queue *queue;
132 RTWQWORKITEM_KEY key;
133 LONG priority;
134 DWORD flags;
135 PTP_SIMPLE_CALLBACK finalization_callback;
136 union
138 TP_WAIT *wait_object;
139 TP_TIMER *timer_object;
140 } u;
143 static struct work_item *work_item_impl_from_IUnknown(IUnknown *iface)
145 return CONTAINING_RECORD(iface, struct work_item, IUnknown_iface);
148 static const TP_CALLBACK_PRIORITY priorities[] =
150 TP_CALLBACK_PRIORITY_HIGH,
151 TP_CALLBACK_PRIORITY_NORMAL,
152 TP_CALLBACK_PRIORITY_LOW,
155 struct queue;
156 struct queue_desc;
158 struct queue_ops
160 HRESULT (*init)(const struct queue_desc *desc, struct queue *queue);
161 BOOL (*shutdown)(struct queue *queue);
162 void (*submit)(struct queue *queue, struct work_item *item);
165 struct queue_desc
167 RTWQ_WORKQUEUE_TYPE queue_type;
168 const struct queue_ops *ops;
169 DWORD target_queue;
172 struct queue
174 IRtwqAsyncCallback IRtwqAsyncCallback_iface;
175 const struct queue_ops *ops;
176 TP_POOL *pool;
177 TP_CALLBACK_ENVIRON_V3 envs[ARRAY_SIZE(priorities)];
178 CRITICAL_SECTION cs;
179 struct list pending_items;
180 DWORD id;
181 /* Data used for serial queues only. */
182 PTP_SIMPLE_CALLBACK finalization_callback;
183 DWORD target_queue;
186 static void shutdown_queue(struct queue *queue);
188 static HRESULT lock_user_queue(DWORD queue)
190 HRESULT hr = RTWQ_E_INVALID_WORKQUEUE;
191 struct queue_handle *entry;
193 if (!(queue & RTWQ_CALLBACK_QUEUE_PRIVATE_MASK))
194 return S_OK;
196 EnterCriticalSection(&queues_section);
197 entry = get_queue_obj(queue);
198 if (entry && entry->refcount)
200 entry->refcount++;
201 hr = S_OK;
203 LeaveCriticalSection(&queues_section);
204 return hr;
207 static HRESULT unlock_user_queue(DWORD queue)
209 HRESULT hr = RTWQ_E_INVALID_WORKQUEUE;
210 struct queue_handle *entry;
212 if (!(queue & RTWQ_CALLBACK_QUEUE_PRIVATE_MASK))
213 return S_OK;
215 EnterCriticalSection(&queues_section);
216 entry = get_queue_obj(queue);
217 if (entry && entry->refcount)
219 if (--entry->refcount == 0)
221 if (shared_mt_queue == queue) shared_mt_queue = 0;
222 shutdown_queue((struct queue *)entry->obj);
223 heap_free(entry->obj);
224 entry->obj = next_free_user_queue;
225 next_free_user_queue = entry;
227 hr = S_OK;
229 LeaveCriticalSection(&queues_section);
230 return hr;
233 static struct queue *queue_impl_from_IRtwqAsyncCallback(IRtwqAsyncCallback *iface)
235 return CONTAINING_RECORD(iface, struct queue, IRtwqAsyncCallback_iface);
238 static HRESULT WINAPI queue_serial_callback_QueryInterface(IRtwqAsyncCallback *iface, REFIID riid, void **obj)
240 if (IsEqualIID(riid, &IID_IRtwqAsyncCallback) ||
241 IsEqualIID(riid, &IID_IUnknown))
243 *obj = iface;
244 IRtwqAsyncCallback_AddRef(iface);
245 return S_OK;
248 *obj = NULL;
249 return E_NOINTERFACE;
252 static ULONG WINAPI queue_serial_callback_AddRef(IRtwqAsyncCallback *iface)
254 return 2;
257 static ULONG WINAPI queue_serial_callback_Release(IRtwqAsyncCallback *iface)
259 return 1;
262 static HRESULT WINAPI queue_serial_callback_GetParameters(IRtwqAsyncCallback *iface, DWORD *flags, DWORD *queue_id)
264 struct queue *queue = queue_impl_from_IRtwqAsyncCallback(iface);
266 *flags = 0;
267 *queue_id = queue->id;
269 return S_OK;
272 static HRESULT WINAPI queue_serial_callback_Invoke(IRtwqAsyncCallback *iface, IRtwqAsyncResult *result)
274 /* Reply callback won't be called in a regular way, pending items and chained queues will make it
275 unnecessary complicated to reach actual work queue that's able to execute this item. Instead
276 serial queues are cleaned up right away on submit(). */
277 return S_OK;
280 static const IRtwqAsyncCallbackVtbl queue_serial_callback_vtbl =
282 queue_serial_callback_QueryInterface,
283 queue_serial_callback_AddRef,
284 queue_serial_callback_Release,
285 queue_serial_callback_GetParameters,
286 queue_serial_callback_Invoke,
289 static struct queue system_queues[SYS_QUEUE_COUNT];
291 static struct queue *get_system_queue(DWORD queue_id)
293 switch (queue_id)
295 case RTWQ_CALLBACK_QUEUE_STANDARD:
296 case RTWQ_CALLBACK_QUEUE_RT:
297 case RTWQ_CALLBACK_QUEUE_IO:
298 case RTWQ_CALLBACK_QUEUE_TIMER:
299 case RTWQ_CALLBACK_QUEUE_MULTITHREADED:
300 case RTWQ_CALLBACK_QUEUE_LONG_FUNCTION:
301 return &system_queues[queue_id - 1];
302 default:
303 return NULL;
307 static HRESULT grab_queue(DWORD queue_id, struct queue **ret);
309 static void CALLBACK standard_queue_cleanup_callback(void *object_data, void *group_data)
313 static HRESULT pool_queue_init(const struct queue_desc *desc, struct queue *queue)
315 TP_CALLBACK_ENVIRON_V3 env;
316 unsigned int max_thread, i;
318 queue->pool = CreateThreadpool(NULL);
320 memset(&env, 0, sizeof(env));
321 env.Version = 3;
322 env.Size = sizeof(env);
323 env.Pool = queue->pool;
324 env.CleanupGroup = CreateThreadpoolCleanupGroup();
325 env.CleanupGroupCancelCallback = standard_queue_cleanup_callback;
326 env.CallbackPriority = TP_CALLBACK_PRIORITY_NORMAL;
327 for (i = 0; i < ARRAY_SIZE(queue->envs); ++i)
329 queue->envs[i] = env;
330 queue->envs[i].CallbackPriority = priorities[i];
332 list_init(&queue->pending_items);
333 InitializeCriticalSection(&queue->cs);
335 max_thread = (desc->queue_type == RTWQ_STANDARD_WORKQUEUE || desc->queue_type == RTWQ_WINDOW_WORKQUEUE) ? 1 : 4;
337 SetThreadpoolThreadMinimum(queue->pool, 1);
338 SetThreadpoolThreadMaximum(queue->pool, max_thread);
340 if (desc->queue_type == RTWQ_WINDOW_WORKQUEUE)
341 FIXME("RTWQ_WINDOW_WORKQUEUE is not supported.\n");
343 return S_OK;
346 static BOOL pool_queue_shutdown(struct queue *queue)
348 if (!queue->pool)
349 return FALSE;
351 CloseThreadpoolCleanupGroupMembers(queue->envs[0].CleanupGroup, TRUE, NULL);
352 CloseThreadpool(queue->pool);
353 queue->pool = NULL;
355 return TRUE;
358 static void CALLBACK standard_queue_worker(TP_CALLBACK_INSTANCE *instance, void *context, TP_WORK *work)
360 struct work_item *item = context;
361 RTWQASYNCRESULT *result = (RTWQASYNCRESULT *)item->result;
363 TRACE("result object %p.\n", result);
365 /* Submitting from serial queue in reply mode, use different result object acting as receipt token.
366 It's submitted to user callback still, but when invoked, special serial queue callback will be used
367 to ensure correct destination queue. */
369 IRtwqAsyncCallback_Invoke(result->pCallback, item->reply_result ? item->reply_result : item->result);
371 IUnknown_Release(&item->IUnknown_iface);
374 static void pool_queue_submit(struct queue *queue, struct work_item *item)
376 TP_CALLBACK_PRIORITY callback_priority;
377 TP_CALLBACK_ENVIRON_V3 env;
378 TP_WORK *work_object;
380 if (item->priority == 0)
381 callback_priority = TP_CALLBACK_PRIORITY_NORMAL;
382 else if (item->priority < 0)
383 callback_priority = TP_CALLBACK_PRIORITY_LOW;
384 else
385 callback_priority = TP_CALLBACK_PRIORITY_HIGH;
387 env = queue->envs[callback_priority];
388 env.FinalizationCallback = item->finalization_callback;
389 /* Worker pool callback will release one reference. Grab one more to keep object alive when
390 we need finalization callback. */
391 if (item->finalization_callback)
392 IUnknown_AddRef(&item->IUnknown_iface);
393 work_object = CreateThreadpoolWork(standard_queue_worker, item, (TP_CALLBACK_ENVIRON *)&env);
394 SubmitThreadpoolWork(work_object);
396 TRACE("dispatched %p.\n", item->result);
399 static const struct queue_ops pool_queue_ops =
401 pool_queue_init,
402 pool_queue_shutdown,
403 pool_queue_submit,
406 static struct work_item * serial_queue_get_next(struct queue *queue, struct work_item *item)
408 struct work_item *next_item = NULL;
410 list_remove(&item->entry);
411 if (!list_empty(&item->queue->pending_items))
412 next_item = LIST_ENTRY(list_head(&item->queue->pending_items), struct work_item, entry);
414 return next_item;
417 static void CALLBACK serial_queue_finalization_callback(PTP_CALLBACK_INSTANCE instance, void *user_data)
419 struct work_item *item = (struct work_item *)user_data, *next_item;
420 struct queue *target_queue, *queue = item->queue;
421 HRESULT hr;
423 EnterCriticalSection(&queue->cs);
425 if ((next_item = serial_queue_get_next(queue, item)))
427 if (SUCCEEDED(hr = grab_queue(queue->target_queue, &target_queue)))
428 target_queue->ops->submit(target_queue, next_item);
429 else
430 WARN("Failed to grab queue for id %#x, hr %#x.\n", queue->target_queue, hr);
433 LeaveCriticalSection(&queue->cs);
435 IUnknown_Release(&item->IUnknown_iface);
438 static HRESULT serial_queue_init(const struct queue_desc *desc, struct queue *queue)
440 queue->IRtwqAsyncCallback_iface.lpVtbl = &queue_serial_callback_vtbl;
441 queue->target_queue = desc->target_queue;
442 lock_user_queue(queue->target_queue);
443 queue->finalization_callback = serial_queue_finalization_callback;
445 return S_OK;
448 static BOOL serial_queue_shutdown(struct queue *queue)
450 unlock_user_queue(queue->target_queue);
452 return TRUE;
455 static struct work_item * serial_queue_is_ack_token(struct queue *queue, struct work_item *item)
457 RTWQASYNCRESULT *async_result = (RTWQASYNCRESULT *)item->result;
458 struct work_item *head;
460 if (list_empty(&queue->pending_items))
461 return NULL;
463 head = LIST_ENTRY(list_head(&queue->pending_items), struct work_item, entry);
464 if (head->reply_result == item->result && async_result->pCallback == &queue->IRtwqAsyncCallback_iface)
465 return head;
467 return NULL;
470 static void serial_queue_submit(struct queue *queue, struct work_item *item)
472 struct work_item *head, *next_item = NULL;
473 struct queue *target_queue;
474 HRESULT hr;
476 /* In reply mode queue will advance when 'reply_result' is invoked, in regular mode it will advance automatically,
477 via finalization callback. */
479 if (item->flags & RTWQ_REPLY_CALLBACK)
481 if (FAILED(hr = RtwqCreateAsyncResult(NULL, &queue->IRtwqAsyncCallback_iface, NULL, &item->reply_result)))
482 WARN("Failed to create reply object, hr %#x.\n", hr);
484 else
485 item->finalization_callback = queue->finalization_callback;
487 /* Serial queues could be chained together, detach from current queue before transitioning item to this one.
488 Items are not detached when submitted to pool queues, because pool queues won't forward them further. */
489 EnterCriticalSection(&item->queue->cs);
490 list_remove(&item->entry);
491 LeaveCriticalSection(&item->queue->cs);
493 EnterCriticalSection(&queue->cs);
495 item->queue = queue;
497 if ((head = serial_queue_is_ack_token(queue, item)))
499 /* Ack receipt token - pop waiting item, advance. */
500 next_item = serial_queue_get_next(queue, head);
501 IUnknown_Release(&head->IUnknown_iface);
503 else
505 if (list_empty(&queue->pending_items))
506 next_item = item;
507 list_add_tail(&queue->pending_items, &item->entry);
508 IUnknown_AddRef(&item->IUnknown_iface);
511 if (next_item)
513 if (SUCCEEDED(hr = grab_queue(queue->target_queue, &target_queue)))
514 target_queue->ops->submit(target_queue, next_item);
515 else
516 WARN("Failed to grab queue for id %#x, hr %#x.\n", queue->target_queue, hr);
519 LeaveCriticalSection(&queue->cs);
522 static const struct queue_ops serial_queue_ops =
524 serial_queue_init,
525 serial_queue_shutdown,
526 serial_queue_submit,
529 static HRESULT WINAPI work_item_QueryInterface(IUnknown *iface, REFIID riid, void **obj)
531 if (IsEqualIID(riid, &IID_IUnknown))
533 *obj = iface;
534 IUnknown_AddRef(iface);
535 return S_OK;
538 *obj = NULL;
539 return E_NOINTERFACE;
542 static ULONG WINAPI work_item_AddRef(IUnknown *iface)
544 struct work_item *item = work_item_impl_from_IUnknown(iface);
545 return InterlockedIncrement(&item->refcount);
548 static ULONG WINAPI work_item_Release(IUnknown *iface)
550 struct work_item *item = work_item_impl_from_IUnknown(iface);
551 ULONG refcount = InterlockedDecrement(&item->refcount);
553 if (!refcount)
555 if (item->reply_result)
556 IRtwqAsyncResult_Release(item->reply_result);
557 IRtwqAsyncResult_Release(item->result);
558 heap_free(item);
561 return refcount;
564 static const IUnknownVtbl work_item_vtbl =
566 work_item_QueryInterface,
567 work_item_AddRef,
568 work_item_Release,
571 static struct work_item * alloc_work_item(struct queue *queue, LONG priority, IRtwqAsyncResult *result)
573 RTWQASYNCRESULT *async_result = (RTWQASYNCRESULT *)result;
574 DWORD flags = 0, queue_id = 0;
575 struct work_item *item;
577 item = heap_alloc_zero(sizeof(*item));
579 item->IUnknown_iface.lpVtbl = &work_item_vtbl;
580 item->result = result;
581 IRtwqAsyncResult_AddRef(item->result);
582 item->refcount = 1;
583 item->queue = queue;
584 list_init(&item->entry);
585 item->priority = priority;
587 if (SUCCEEDED(IRtwqAsyncCallback_GetParameters(async_result->pCallback, &flags, &queue_id)))
588 item->flags = flags;
590 return item;
593 static void init_work_queue(const struct queue_desc *desc, struct queue *queue)
595 assert(desc->ops != NULL);
597 queue->ops = desc->ops;
598 if (SUCCEEDED(queue->ops->init(desc, queue)))
600 list_init(&queue->pending_items);
601 InitializeCriticalSection(&queue->cs);
605 static HRESULT grab_queue(DWORD queue_id, struct queue **ret)
607 struct queue *queue = get_system_queue(queue_id);
608 RTWQ_WORKQUEUE_TYPE queue_type;
609 struct queue_handle *entry;
611 *ret = NULL;
613 if (!system_queues[SYS_QUEUE_STANDARD].pool)
614 return RTWQ_E_SHUTDOWN;
616 if (queue && queue->pool)
618 *ret = queue;
619 return S_OK;
621 else if (queue)
623 struct queue_desc desc;
625 EnterCriticalSection(&queues_section);
626 switch (queue_id)
628 case RTWQ_CALLBACK_QUEUE_IO:
629 case RTWQ_CALLBACK_QUEUE_MULTITHREADED:
630 case RTWQ_CALLBACK_QUEUE_LONG_FUNCTION:
631 queue_type = RTWQ_MULTITHREADED_WORKQUEUE;
632 break;
633 default:
634 queue_type = RTWQ_STANDARD_WORKQUEUE;
637 desc.queue_type = queue_type;
638 desc.ops = &pool_queue_ops;
639 desc.target_queue = 0;
640 init_work_queue(&desc, queue);
641 LeaveCriticalSection(&queues_section);
642 *ret = queue;
643 return S_OK;
646 /* Handles user queues. */
647 if ((entry = get_queue_obj(queue_id)))
648 *ret = entry->obj;
650 return *ret ? S_OK : RTWQ_E_INVALID_WORKQUEUE;
653 static void shutdown_queue(struct queue *queue)
655 struct work_item *item, *item2;
657 if (!queue->ops || !queue->ops->shutdown(queue))
658 return;
660 EnterCriticalSection(&queue->cs);
661 LIST_FOR_EACH_ENTRY_SAFE(item, item2, &queue->pending_items, struct work_item, entry)
663 list_remove(&item->entry);
664 IUnknown_Release(&item->IUnknown_iface);
666 LeaveCriticalSection(&queue->cs);
668 DeleteCriticalSection(&queue->cs);
670 memset(queue, 0, sizeof(*queue));
673 static HRESULT queue_submit_item(struct queue *queue, LONG priority, IRtwqAsyncResult *result)
675 struct work_item *item;
677 if (!(item = alloc_work_item(queue, priority, result)))
678 return E_OUTOFMEMORY;
680 queue->ops->submit(queue, item);
682 return S_OK;
685 static HRESULT queue_put_work_item(DWORD queue_id, LONG priority, IRtwqAsyncResult *result)
687 struct queue *queue;
688 HRESULT hr;
690 if (FAILED(hr = grab_queue(queue_id, &queue)))
691 return hr;
693 return queue_submit_item(queue, priority, result);
696 static HRESULT invoke_async_callback(IRtwqAsyncResult *result)
698 RTWQASYNCRESULT *result_data = (RTWQASYNCRESULT *)result;
699 DWORD queue = RTWQ_CALLBACK_QUEUE_STANDARD, flags;
700 HRESULT hr;
702 if (FAILED(IRtwqAsyncCallback_GetParameters(result_data->pCallback, &flags, &queue)))
703 queue = RTWQ_CALLBACK_QUEUE_STANDARD;
705 if (FAILED(lock_user_queue(queue)))
706 queue = RTWQ_CALLBACK_QUEUE_STANDARD;
708 hr = queue_put_work_item(queue, 0, result);
710 unlock_user_queue(queue);
712 return hr;
715 static void queue_release_pending_item(struct work_item *item)
717 EnterCriticalSection(&item->queue->cs);
718 if (item->key)
720 list_remove(&item->entry);
721 item->key = 0;
722 IUnknown_Release(&item->IUnknown_iface);
724 LeaveCriticalSection(&item->queue->cs);
727 static void CALLBACK waiting_item_callback(TP_CALLBACK_INSTANCE *instance, void *context, TP_WAIT *wait,
728 TP_WAIT_RESULT wait_result)
730 struct work_item *item = context;
732 TRACE("result object %p.\n", item->result);
734 invoke_async_callback(item->result);
736 IUnknown_Release(&item->IUnknown_iface);
739 static void CALLBACK waiting_item_cancelable_callback(TP_CALLBACK_INSTANCE *instance, void *context, TP_WAIT *wait,
740 TP_WAIT_RESULT wait_result)
742 struct work_item *item = context;
744 TRACE("result object %p.\n", item->result);
746 queue_release_pending_item(item);
748 invoke_async_callback(item->result);
750 IUnknown_Release(&item->IUnknown_iface);
753 static void CALLBACK scheduled_item_callback(TP_CALLBACK_INSTANCE *instance, void *context, TP_TIMER *timer)
755 struct work_item *item = context;
757 TRACE("result object %p.\n", item->result);
759 invoke_async_callback(item->result);
761 IUnknown_Release(&item->IUnknown_iface);
764 static void CALLBACK scheduled_item_cancelable_callback(TP_CALLBACK_INSTANCE *instance, void *context, TP_TIMER *timer)
766 struct work_item *item = context;
768 TRACE("result object %p.\n", item->result);
770 queue_release_pending_item(item);
772 invoke_async_callback(item->result);
774 IUnknown_Release(&item->IUnknown_iface);
777 static void CALLBACK periodic_item_callback(TP_CALLBACK_INSTANCE *instance, void *context, TP_TIMER *timer)
779 struct work_item *item = context;
781 IUnknown_AddRef(&item->IUnknown_iface);
783 invoke_async_callback(item->result);
785 IUnknown_Release(&item->IUnknown_iface);
788 static void queue_mark_item_pending(DWORD mask, struct work_item *item, RTWQWORKITEM_KEY *key)
790 *key = generate_item_key(mask);
791 item->key = *key;
793 EnterCriticalSection(&item->queue->cs);
794 list_add_tail(&item->queue->pending_items, &item->entry);
795 IUnknown_AddRef(&item->IUnknown_iface);
796 LeaveCriticalSection(&item->queue->cs);
799 static HRESULT queue_submit_wait(struct queue *queue, HANDLE event, LONG priority, IRtwqAsyncResult *result,
800 RTWQWORKITEM_KEY *key)
802 PTP_WAIT_CALLBACK callback;
803 struct work_item *item;
805 if (!(item = alloc_work_item(queue, priority, result)))
806 return E_OUTOFMEMORY;
808 if (key)
810 queue_mark_item_pending(WAIT_ITEM_KEY_MASK, item, key);
811 callback = waiting_item_cancelable_callback;
813 else
814 callback = waiting_item_callback;
816 item->u.wait_object = CreateThreadpoolWait(callback, item,
817 (TP_CALLBACK_ENVIRON *)&queue->envs[TP_CALLBACK_PRIORITY_NORMAL]);
818 SetThreadpoolWait(item->u.wait_object, event, NULL);
820 TRACE("dispatched %p.\n", result);
822 return S_OK;
825 static HRESULT queue_submit_timer(struct queue *queue, IRtwqAsyncResult *result, INT64 timeout, DWORD period,
826 RTWQWORKITEM_KEY *key)
828 PTP_TIMER_CALLBACK callback;
829 struct work_item *item;
830 FILETIME filetime;
831 LARGE_INTEGER t;
833 if (!(item = alloc_work_item(queue, 0, result)))
834 return E_OUTOFMEMORY;
836 if (key)
838 queue_mark_item_pending(SCHEDULED_ITEM_KEY_MASK, item, key);
841 if (period)
842 callback = periodic_item_callback;
843 else
844 callback = key ? scheduled_item_cancelable_callback : scheduled_item_callback;
846 t.QuadPart = timeout * 1000 * 10;
847 filetime.dwLowDateTime = t.u.LowPart;
848 filetime.dwHighDateTime = t.u.HighPart;
850 item->u.timer_object = CreateThreadpoolTimer(callback, item,
851 (TP_CALLBACK_ENVIRON *)&queue->envs[TP_CALLBACK_PRIORITY_NORMAL]);
852 SetThreadpoolTimer(item->u.timer_object, &filetime, period, 0);
854 TRACE("dispatched %p.\n", result);
856 return S_OK;
859 static HRESULT queue_cancel_item(struct queue *queue, RTWQWORKITEM_KEY key)
861 HRESULT hr = RTWQ_E_NOT_FOUND;
862 struct work_item *item;
864 EnterCriticalSection(&queue->cs);
865 LIST_FOR_EACH_ENTRY(item, &queue->pending_items, struct work_item, entry)
867 if (item->key == key)
869 key >>= 32;
870 if ((key & WAIT_ITEM_KEY_MASK) == WAIT_ITEM_KEY_MASK)
872 IRtwqAsyncResult_SetStatus(item->result, RTWQ_E_OPERATION_CANCELLED);
873 invoke_async_callback(item->result);
874 CloseThreadpoolWait(item->u.wait_object);
876 else if ((key & SCHEDULED_ITEM_KEY_MASK) == SCHEDULED_ITEM_KEY_MASK)
877 CloseThreadpoolTimer(item->u.timer_object);
878 else
879 WARN("Unknown item key mask %#x.\n", (DWORD)key);
880 queue_release_pending_item(item);
881 hr = S_OK;
882 break;
885 LeaveCriticalSection(&queue->cs);
887 return hr;
890 static HRESULT alloc_user_queue(const struct queue_desc *desc, DWORD *queue_id)
892 struct queue_handle *entry;
893 struct queue *queue;
894 unsigned int idx;
896 *queue_id = RTWQ_CALLBACK_QUEUE_UNDEFINED;
898 if (platform_lock <= 0)
899 return RTWQ_E_SHUTDOWN;
901 queue = heap_alloc_zero(sizeof(*queue));
902 if (!queue)
903 return E_OUTOFMEMORY;
905 init_work_queue(desc, queue);
907 EnterCriticalSection(&queues_section);
909 entry = next_free_user_queue;
910 if (entry)
911 next_free_user_queue = entry->obj;
912 else if (next_unused_user_queue < user_queues + MAX_USER_QUEUE_HANDLES)
913 entry = next_unused_user_queue++;
914 else
916 LeaveCriticalSection(&queues_section);
917 heap_free(queue);
918 WARN("Out of user queue handles.\n");
919 return E_OUTOFMEMORY;
922 entry->refcount = 1;
923 entry->obj = queue;
924 if (++queue_generation == 0xffff) queue_generation = 1;
925 entry->generation = queue_generation;
926 idx = entry - user_queues + FIRST_USER_QUEUE_HANDLE;
927 *queue_id = (idx << 16) | entry->generation;
929 LeaveCriticalSection(&queues_section);
931 return S_OK;
934 struct async_result
936 RTWQASYNCRESULT result;
937 LONG refcount;
938 IUnknown *object;
939 IUnknown *state;
942 static struct async_result *impl_from_IRtwqAsyncResult(IRtwqAsyncResult *iface)
944 return CONTAINING_RECORD(iface, struct async_result, result.AsyncResult);
947 static HRESULT WINAPI async_result_QueryInterface(IRtwqAsyncResult *iface, REFIID riid, void **obj)
949 TRACE("%p, %s, %p.\n", iface, debugstr_guid(riid), obj);
951 if (IsEqualIID(riid, &IID_IRtwqAsyncResult) ||
952 IsEqualIID(riid, &IID_IUnknown))
954 *obj = iface;
955 IRtwqAsyncResult_AddRef(iface);
956 return S_OK;
959 *obj = NULL;
960 WARN("Unsupported interface %s.\n", debugstr_guid(riid));
961 return E_NOINTERFACE;
964 static ULONG WINAPI async_result_AddRef(IRtwqAsyncResult *iface)
966 struct async_result *result = impl_from_IRtwqAsyncResult(iface);
967 ULONG refcount = InterlockedIncrement(&result->refcount);
969 TRACE("%p, %u.\n", iface, refcount);
971 return refcount;
974 static ULONG WINAPI async_result_Release(IRtwqAsyncResult *iface)
976 struct async_result *result = impl_from_IRtwqAsyncResult(iface);
977 ULONG refcount = InterlockedDecrement(&result->refcount);
979 TRACE("%p, %u.\n", iface, refcount);
981 if (!refcount)
983 if (result->result.pCallback)
984 IRtwqAsyncCallback_Release(result->result.pCallback);
985 if (result->object)
986 IUnknown_Release(result->object);
987 if (result->state)
988 IUnknown_Release(result->state);
989 if (result->result.hEvent)
990 CloseHandle(result->result.hEvent);
991 heap_free(result);
993 RtwqUnlockPlatform();
996 return refcount;
999 static HRESULT WINAPI async_result_GetState(IRtwqAsyncResult *iface, IUnknown **state)
1001 struct async_result *result = impl_from_IRtwqAsyncResult(iface);
1003 TRACE("%p, %p.\n", iface, state);
1005 if (!result->state)
1006 return E_POINTER;
1008 *state = result->state;
1009 IUnknown_AddRef(*state);
1011 return S_OK;
1014 static HRESULT WINAPI async_result_GetStatus(IRtwqAsyncResult *iface)
1016 struct async_result *result = impl_from_IRtwqAsyncResult(iface);
1018 TRACE("%p.\n", iface);
1020 return result->result.hrStatusResult;
1023 static HRESULT WINAPI async_result_SetStatus(IRtwqAsyncResult *iface, HRESULT status)
1025 struct async_result *result = impl_from_IRtwqAsyncResult(iface);
1027 TRACE("%p, %#x.\n", iface, status);
1029 result->result.hrStatusResult = status;
1031 return S_OK;
1034 static HRESULT WINAPI async_result_GetObject(IRtwqAsyncResult *iface, IUnknown **object)
1036 struct async_result *result = impl_from_IRtwqAsyncResult(iface);
1038 TRACE("%p, %p.\n", iface, object);
1040 if (!result->object)
1041 return E_POINTER;
1043 *object = result->object;
1044 IUnknown_AddRef(*object);
1046 return S_OK;
1049 static IUnknown * WINAPI async_result_GetStateNoAddRef(IRtwqAsyncResult *iface)
1051 struct async_result *result = impl_from_IRtwqAsyncResult(iface);
1053 TRACE("%p.\n", iface);
1055 return result->state;
1058 static const IRtwqAsyncResultVtbl async_result_vtbl =
1060 async_result_QueryInterface,
1061 async_result_AddRef,
1062 async_result_Release,
1063 async_result_GetState,
1064 async_result_GetStatus,
1065 async_result_SetStatus,
1066 async_result_GetObject,
1067 async_result_GetStateNoAddRef,
1070 static HRESULT create_async_result(IUnknown *object, IRtwqAsyncCallback *callback, IUnknown *state, IRtwqAsyncResult **out)
1072 struct async_result *result;
1074 if (!out)
1075 return E_INVALIDARG;
1077 result = heap_alloc_zero(sizeof(*result));
1078 if (!result)
1079 return E_OUTOFMEMORY;
1081 RtwqLockPlatform();
1083 result->result.AsyncResult.lpVtbl = &async_result_vtbl;
1084 result->refcount = 1;
1085 result->object = object;
1086 if (result->object)
1087 IUnknown_AddRef(result->object);
1088 result->result.pCallback = callback;
1089 if (result->result.pCallback)
1090 IRtwqAsyncCallback_AddRef(result->result.pCallback);
1091 result->state = state;
1092 if (result->state)
1093 IUnknown_AddRef(result->state);
1095 *out = &result->result.AsyncResult;
1097 TRACE("Created async result object %p.\n", *out);
1099 return S_OK;
1102 HRESULT WINAPI RtwqCreateAsyncResult(IUnknown *object, IRtwqAsyncCallback *callback, IUnknown *state,
1103 IRtwqAsyncResult **out)
1105 TRACE("%p, %p, %p, %p.\n", object, callback, state, out);
1107 return create_async_result(object, callback, state, out);
1110 HRESULT WINAPI RtwqLockPlatform(void)
1112 InterlockedIncrement(&platform_lock);
1114 return S_OK;
1117 HRESULT WINAPI RtwqUnlockPlatform(void)
1119 InterlockedDecrement(&platform_lock);
1121 return S_OK;
1124 static void init_system_queues(void)
1126 struct queue_desc desc;
1127 HRESULT hr;
1129 /* Always initialize standard queue, keep the rest lazy. */
1131 EnterCriticalSection(&queues_section);
1133 if (system_queues[SYS_QUEUE_STANDARD].pool)
1135 LeaveCriticalSection(&queues_section);
1136 return;
1139 if (FAILED(hr = CoIncrementMTAUsage(&mta_cookie)))
1140 WARN("Failed to initialize MTA, hr %#x.\n", hr);
1142 desc.queue_type = RTWQ_STANDARD_WORKQUEUE;
1143 desc.ops = &pool_queue_ops;
1144 desc.target_queue = 0;
1145 init_work_queue(&desc, &system_queues[SYS_QUEUE_STANDARD]);
1147 LeaveCriticalSection(&queues_section);
1150 HRESULT WINAPI RtwqStartup(void)
1152 if (InterlockedIncrement(&platform_lock) == 1)
1154 init_system_queues();
1157 return S_OK;
1160 static void shutdown_system_queues(void)
1162 unsigned int i;
1163 HRESULT hr;
1165 EnterCriticalSection(&queues_section);
1167 for (i = 0; i < ARRAY_SIZE(system_queues); ++i)
1169 shutdown_queue(&system_queues[i]);
1172 if (FAILED(hr = CoDecrementMTAUsage(mta_cookie)))
1173 WARN("Failed to uninitialize MTA, hr %#x.\n", hr);
1175 LeaveCriticalSection(&queues_section);
1178 HRESULT WINAPI RtwqShutdown(void)
1180 if (platform_lock <= 0)
1181 return S_OK;
1183 if (InterlockedExchangeAdd(&platform_lock, -1) == 1)
1185 shutdown_system_queues();
1188 return S_OK;
1191 HRESULT WINAPI RtwqPutWaitingWorkItem(HANDLE event, LONG priority, IRtwqAsyncResult *result, RTWQWORKITEM_KEY *key)
1193 struct queue *queue;
1194 HRESULT hr;
1196 TRACE("%p, %d, %p, %p.\n", event, priority, result, key);
1198 if (FAILED(hr = grab_queue(RTWQ_CALLBACK_QUEUE_TIMER, &queue)))
1199 return hr;
1201 hr = queue_submit_wait(queue, event, priority, result, key);
1203 return hr;
1206 static HRESULT schedule_work_item(IRtwqAsyncResult *result, INT64 timeout, RTWQWORKITEM_KEY *key)
1208 struct queue *queue;
1209 HRESULT hr;
1211 if (FAILED(hr = grab_queue(RTWQ_CALLBACK_QUEUE_TIMER, &queue)))
1212 return hr;
1214 TRACE("%p, %s, %p.\n", result, wine_dbgstr_longlong(timeout), key);
1216 return queue_submit_timer(queue, result, timeout, 0, key);
1219 HRESULT WINAPI RtwqScheduleWorkItem(IRtwqAsyncResult *result, INT64 timeout, RTWQWORKITEM_KEY *key)
1221 TRACE("%p, %s, %p.\n", result, wine_dbgstr_longlong(timeout), key);
1223 return schedule_work_item(result, timeout, key);
1226 struct periodic_callback
1228 IRtwqAsyncCallback IRtwqAsyncCallback_iface;
1229 LONG refcount;
1230 RTWQPERIODICCALLBACK callback;
1233 static struct periodic_callback *impl_from_IRtwqAsyncCallback(IRtwqAsyncCallback *iface)
1235 return CONTAINING_RECORD(iface, struct periodic_callback, IRtwqAsyncCallback_iface);
1238 static HRESULT WINAPI periodic_callback_QueryInterface(IRtwqAsyncCallback *iface, REFIID riid, void **obj)
1240 if (IsEqualIID(riid, &IID_IRtwqAsyncCallback) ||
1241 IsEqualIID(riid, &IID_IUnknown))
1243 *obj = iface;
1244 IRtwqAsyncCallback_AddRef(iface);
1245 return S_OK;
1248 *obj = NULL;
1249 return E_NOINTERFACE;
1252 static ULONG WINAPI periodic_callback_AddRef(IRtwqAsyncCallback *iface)
1254 struct periodic_callback *callback = impl_from_IRtwqAsyncCallback(iface);
1255 ULONG refcount = InterlockedIncrement(&callback->refcount);
1257 TRACE("%p, %u.\n", iface, refcount);
1259 return refcount;
1262 static ULONG WINAPI periodic_callback_Release(IRtwqAsyncCallback *iface)
1264 struct periodic_callback *callback = impl_from_IRtwqAsyncCallback(iface);
1265 ULONG refcount = InterlockedDecrement(&callback->refcount);
1267 TRACE("%p, %u.\n", iface, refcount);
1269 if (!refcount)
1270 heap_free(callback);
1272 return refcount;
1275 static HRESULT WINAPI periodic_callback_GetParameters(IRtwqAsyncCallback *iface, DWORD *flags, DWORD *queue)
1277 return E_NOTIMPL;
1280 static HRESULT WINAPI periodic_callback_Invoke(IRtwqAsyncCallback *iface, IRtwqAsyncResult *result)
1282 struct periodic_callback *callback = impl_from_IRtwqAsyncCallback(iface);
1283 IUnknown *context = NULL;
1285 if (FAILED(IRtwqAsyncResult_GetObject(result, &context)))
1286 WARN("Expected object to be set for result object.\n");
1288 callback->callback(context);
1290 if (context)
1291 IUnknown_Release(context);
1293 return S_OK;
1296 static const IRtwqAsyncCallbackVtbl periodic_callback_vtbl =
1298 periodic_callback_QueryInterface,
1299 periodic_callback_AddRef,
1300 periodic_callback_Release,
1301 periodic_callback_GetParameters,
1302 periodic_callback_Invoke,
1305 static HRESULT create_periodic_callback_obj(RTWQPERIODICCALLBACK callback, IRtwqAsyncCallback **out)
1307 struct periodic_callback *object;
1309 object = heap_alloc(sizeof(*object));
1310 if (!object)
1311 return E_OUTOFMEMORY;
1313 object->IRtwqAsyncCallback_iface.lpVtbl = &periodic_callback_vtbl;
1314 object->refcount = 1;
1315 object->callback = callback;
1317 *out = &object->IRtwqAsyncCallback_iface;
1319 return S_OK;
1322 HRESULT WINAPI RtwqAddPeriodicCallback(RTWQPERIODICCALLBACK callback, IUnknown *context, DWORD *key)
1324 IRtwqAsyncCallback *periodic_callback;
1325 RTWQWORKITEM_KEY workitem_key;
1326 IRtwqAsyncResult *result;
1327 struct queue *queue;
1328 HRESULT hr;
1330 TRACE("%p, %p, %p.\n", callback, context, key);
1332 if (FAILED(hr = grab_queue(RTWQ_CALLBACK_QUEUE_TIMER, &queue)))
1333 return hr;
1335 if (FAILED(hr = create_periodic_callback_obj(callback, &periodic_callback)))
1336 return hr;
1338 hr = create_async_result(context, periodic_callback, NULL, &result);
1339 IRtwqAsyncCallback_Release(periodic_callback);
1340 if (FAILED(hr))
1341 return hr;
1343 /* Same period MFGetTimerPeriodicity() returns. */
1344 hr = queue_submit_timer(queue, result, 0, 10, key ? &workitem_key : NULL);
1346 IRtwqAsyncResult_Release(result);
1348 if (key)
1349 *key = workitem_key;
1351 return S_OK;
1354 HRESULT WINAPI RtwqRemovePeriodicCallback(DWORD key)
1356 struct queue *queue;
1357 HRESULT hr;
1359 TRACE("%#x.\n", key);
1361 if (FAILED(hr = grab_queue(RTWQ_CALLBACK_QUEUE_TIMER, &queue)))
1362 return hr;
1364 return queue_cancel_item(queue, get_item_key(SCHEDULED_ITEM_KEY_MASK, key));
1367 HRESULT WINAPI RtwqCancelWorkItem(RTWQWORKITEM_KEY key)
1369 struct queue *queue;
1370 HRESULT hr;
1372 TRACE("%s.\n", wine_dbgstr_longlong(key));
1374 if (FAILED(hr = grab_queue(RTWQ_CALLBACK_QUEUE_TIMER, &queue)))
1375 return hr;
1377 return queue_cancel_item(queue, key);
1380 HRESULT WINAPI RtwqInvokeCallback(IRtwqAsyncResult *result)
1382 TRACE("%p.\n", result);
1384 return invoke_async_callback(result);
1387 HRESULT WINAPI RtwqPutWorkItem(DWORD queue, LONG priority, IRtwqAsyncResult *result)
1389 TRACE("%#x, %d, %p.\n", queue, priority, result);
1391 return queue_put_work_item(queue, priority, result);
1394 HRESULT WINAPI RtwqAllocateWorkQueue(RTWQ_WORKQUEUE_TYPE queue_type, DWORD *queue)
1396 struct queue_desc desc;
1398 TRACE("%d, %p.\n", queue_type, queue);
1400 desc.queue_type = queue_type;
1401 desc.ops = &pool_queue_ops;
1402 desc.target_queue = 0;
1403 return alloc_user_queue(&desc, queue);
1406 HRESULT WINAPI RtwqLockWorkQueue(DWORD queue)
1408 TRACE("%#x.\n", queue);
1410 return lock_user_queue(queue);
1413 HRESULT WINAPI RtwqUnlockWorkQueue(DWORD queue)
1415 TRACE("%#x.\n", queue);
1417 return unlock_user_queue(queue);
1420 HRESULT WINAPI RtwqSetLongRunning(DWORD queue_id, BOOL enable)
1422 struct queue *queue;
1423 HRESULT hr;
1424 int i;
1426 TRACE("%#x, %d.\n", queue_id, enable);
1428 lock_user_queue(queue_id);
1430 if (SUCCEEDED(hr = grab_queue(queue_id, &queue)))
1432 for (i = 0; i < ARRAY_SIZE(queue->envs); ++i)
1433 queue->envs[i].u.s.LongFunction = !!enable;
1436 unlock_user_queue(queue_id);
1438 return hr;
1441 HRESULT WINAPI RtwqLockSharedWorkQueue(const WCHAR *usageclass, LONG priority, DWORD *taskid, DWORD *queue)
1443 struct queue_desc desc;
1444 HRESULT hr;
1446 TRACE("%s, %d, %p, %p.\n", debugstr_w(usageclass), priority, taskid, queue);
1448 if (!usageclass)
1449 return E_POINTER;
1451 if (!*usageclass && taskid)
1452 return E_INVALIDARG;
1454 if (*usageclass)
1455 FIXME("Class name is ignored.\n");
1457 EnterCriticalSection(&queues_section);
1459 if (shared_mt_queue)
1460 hr = lock_user_queue(shared_mt_queue);
1461 else
1463 desc.queue_type = RTWQ_MULTITHREADED_WORKQUEUE;
1464 desc.ops = &pool_queue_ops;
1465 desc.target_queue = 0;
1466 hr = alloc_user_queue(&desc, &shared_mt_queue);
1469 *queue = shared_mt_queue;
1471 LeaveCriticalSection(&queues_section);
1473 return hr;
1476 HRESULT WINAPI RtwqSetDeadline(DWORD queue_id, LONGLONG deadline, HANDLE *request)
1478 FIXME("%#x, %s, %p.\n", queue_id, wine_dbgstr_longlong(deadline), request);
1480 return E_NOTIMPL;
1483 HRESULT WINAPI RtwqSetDeadline2(DWORD queue_id, LONGLONG deadline, LONGLONG predeadline, HANDLE *request)
1485 FIXME("%#x, %s, %s, %p.\n", queue_id, wine_dbgstr_longlong(deadline), wine_dbgstr_longlong(predeadline), request);
1487 return E_NOTIMPL;
1490 HRESULT WINAPI RtwqCancelDeadline(HANDLE request)
1492 FIXME("%p.\n", request);
1494 return E_NOTIMPL;
1497 HRESULT WINAPI RtwqAllocateSerialWorkQueue(DWORD target_queue, DWORD *queue)
1499 struct queue_desc desc;
1501 TRACE("%#x, %p.\n", target_queue, queue);
1503 desc.queue_type = RTWQ_STANDARD_WORKQUEUE;
1504 desc.ops = &serial_queue_ops;
1505 desc.target_queue = target_queue;
1506 return alloc_user_queue(&desc, queue);
1509 HRESULT WINAPI RtwqJoinWorkQueue(DWORD queue, HANDLE hFile, HANDLE *cookie)
1511 FIXME("%#x, %p, %p.\n", queue, hFile, cookie);
1513 return E_NOTIMPL;
1516 HRESULT WINAPI RtwqUnjoinWorkQueue(DWORD queue, HANDLE cookie)
1518 FIXME("%#x, %p.\n", queue, cookie);
1520 return E_NOTIMPL;
1523 HRESULT WINAPI RtwqGetWorkQueueMMCSSClass(DWORD queue, WCHAR *class, DWORD *length)
1525 FIXME("%#x, %p, %p.\n", queue, class, length);
1527 return E_NOTIMPL;
1530 HRESULT WINAPI RtwqGetWorkQueueMMCSSTaskId(DWORD queue, DWORD *taskid)
1532 FIXME("%#x, %p.\n", queue, taskid);
1534 return E_NOTIMPL;
1537 HRESULT WINAPI RtwqGetWorkQueueMMCSSPriority(DWORD queue, LONG *priority)
1539 FIXME("%#x, %p.\n", queue, priority);
1541 return E_NOTIMPL;
1544 HRESULT WINAPI RtwqRegisterPlatformWithMMCSS(const WCHAR *class, DWORD *taskid, LONG priority)
1546 FIXME("%s, %p, %d.\n", debugstr_w(class), taskid, priority);
1548 return E_NOTIMPL;
1551 HRESULT WINAPI RtwqUnregisterPlatformFromMMCSS(void)
1553 FIXME("\n");
1555 return E_NOTIMPL;
1558 HRESULT WINAPI RtwqBeginRegisterWorkQueueWithMMCSS(DWORD queue, const WCHAR *class, DWORD taskid, LONG priority,
1559 IRtwqAsyncCallback *callback, IUnknown *state)
1561 FIXME("%#x, %s, %u, %d, %p, %p.\n", queue, debugstr_w(class), taskid, priority, callback, state);
1563 return E_NOTIMPL;
1566 HRESULT WINAPI RtwqEndRegisterWorkQueueWithMMCSS(IRtwqAsyncResult *result, DWORD *taskid)
1568 FIXME("%p, %p.\n", result, taskid);
1570 return E_NOTIMPL;
1573 HRESULT WINAPI RtwqBeginUnregisterWorkQueueWithMMCSS(DWORD queue, IRtwqAsyncCallback *callback, IUnknown *state)
1575 FIXME("%#x, %p, %p.\n", queue, callback, state);
1577 return E_NOTIMPL;
1580 HRESULT WINAPI RtwqEndUnregisterWorkQueueWithMMCSS(IRtwqAsyncResult *result)
1582 FIXME("%p.\n", result);
1584 return E_NOTIMPL;
1587 HRESULT WINAPI RtwqRegisterPlatformEvents(IRtwqPlatformEvents *events)
1589 FIXME("%p.\n", events);
1591 return E_NOTIMPL;
1594 HRESULT WINAPI RtwqUnregisterPlatformEvents(IRtwqPlatformEvents *events)
1596 FIXME("%p.\n", events);
1598 return E_NOTIMPL;