Linux 4.19.133
[linux/fpc-iii.git] / drivers / acpi / acpica / dsmethod.c
blobdd4deb678d13ea24df8de76cf6e4181a91bf99e1
1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /******************************************************************************
4 * Module Name: dsmethod - Parser/Interpreter interface - control method parsing
6 * Copyright (C) 2000 - 2018, Intel Corp.
8 *****************************************************************************/
10 #include <acpi/acpi.h>
11 #include "accommon.h"
12 #include "acdispat.h"
13 #include "acinterp.h"
14 #include "acnamesp.h"
15 #include "acparser.h"
16 #include "amlcode.h"
17 #include "acdebug.h"
19 #define _COMPONENT ACPI_DISPATCHER
20 ACPI_MODULE_NAME("dsmethod")
22 /* Local prototypes */
23 static acpi_status
24 acpi_ds_detect_named_opcodes(struct acpi_walk_state *walk_state,
25 union acpi_parse_object **out_op);
27 static acpi_status
28 acpi_ds_create_method_mutex(union acpi_operand_object *method_desc);
30 /*******************************************************************************
32 * FUNCTION: acpi_ds_auto_serialize_method
34 * PARAMETERS: node - Namespace Node of the method
35 * obj_desc - Method object attached to node
37 * RETURN: Status
39 * DESCRIPTION: Parse a control method AML to scan for control methods that
40 * need serialization due to the creation of named objects.
42 * NOTE: It is a bit of overkill to mark all such methods serialized, since
43 * there is only a problem if the method actually blocks during execution.
44 * A blocking operation is, for example, a Sleep() operation, or any access
45 * to an operation region. However, it is probably not possible to easily
46 * detect whether a method will block or not, so we simply mark all suspicious
47 * methods as serialized.
49 * NOTE2: This code is essentially a generic routine for parsing a single
50 * control method.
52 ******************************************************************************/
54 acpi_status
55 acpi_ds_auto_serialize_method(struct acpi_namespace_node *node,
56 union acpi_operand_object *obj_desc)
58 acpi_status status;
59 union acpi_parse_object *op = NULL;
60 struct acpi_walk_state *walk_state;
62 ACPI_FUNCTION_TRACE_PTR(ds_auto_serialize_method, node);
64 ACPI_DEBUG_PRINT((ACPI_DB_PARSE,
65 "Method auto-serialization parse [%4.4s] %p\n",
66 acpi_ut_get_node_name(node), node));
68 /* Create/Init a root op for the method parse tree */
70 op = acpi_ps_alloc_op(AML_METHOD_OP, obj_desc->method.aml_start);
71 if (!op) {
72 return_ACPI_STATUS(AE_NO_MEMORY);
75 acpi_ps_set_name(op, node->name.integer);
76 op->common.node = node;
78 /* Create and initialize a new walk state */
80 walk_state =
81 acpi_ds_create_walk_state(node->owner_id, NULL, NULL, NULL);
82 if (!walk_state) {
83 acpi_ps_free_op(op);
84 return_ACPI_STATUS(AE_NO_MEMORY);
87 status = acpi_ds_init_aml_walk(walk_state, op, node,
88 obj_desc->method.aml_start,
89 obj_desc->method.aml_length, NULL, 0);
90 if (ACPI_FAILURE(status)) {
91 acpi_ds_delete_walk_state(walk_state);
92 acpi_ps_free_op(op);
93 return_ACPI_STATUS(status);
96 walk_state->descending_callback = acpi_ds_detect_named_opcodes;
98 /* Parse the method, scan for creation of named objects */
100 status = acpi_ps_parse_aml(walk_state);
102 acpi_ps_delete_parse_tree(op);
103 return_ACPI_STATUS(status);
106 /*******************************************************************************
108 * FUNCTION: acpi_ds_detect_named_opcodes
110 * PARAMETERS: walk_state - Current state of the parse tree walk
111 * out_op - Unused, required for parser interface
113 * RETURN: Status
115 * DESCRIPTION: Descending callback used during the loading of ACPI tables.
116 * Currently used to detect methods that must be marked serialized
117 * in order to avoid problems with the creation of named objects.
119 ******************************************************************************/
121 static acpi_status
122 acpi_ds_detect_named_opcodes(struct acpi_walk_state *walk_state,
123 union acpi_parse_object **out_op)
126 ACPI_FUNCTION_NAME(acpi_ds_detect_named_opcodes);
128 /* We are only interested in opcodes that create a new name */
130 if (!
131 (walk_state->op_info->
132 flags & (AML_NAMED | AML_CREATE | AML_FIELD))) {
133 return (AE_OK);
137 * At this point, we know we have a Named object opcode.
138 * Mark the method as serialized. Later code will create a mutex for
139 * this method to enforce serialization.
141 * Note, ACPI_METHOD_IGNORE_SYNC_LEVEL flag means that we will ignore the
142 * Sync Level mechanism for this method, even though it is now serialized.
143 * Otherwise, there can be conflicts with existing ASL code that actually
144 * uses sync levels.
146 walk_state->method_desc->method.sync_level = 0;
147 walk_state->method_desc->method.info_flags |=
148 (ACPI_METHOD_SERIALIZED | ACPI_METHOD_IGNORE_SYNC_LEVEL);
150 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
151 "Method serialized [%4.4s] %p - [%s] (%4.4X)\n",
152 walk_state->method_node->name.ascii,
153 walk_state->method_node, walk_state->op_info->name,
154 walk_state->opcode));
156 /* Abort the parse, no need to examine this method any further */
158 return (AE_CTRL_TERMINATE);
161 /*******************************************************************************
163 * FUNCTION: acpi_ds_method_error
165 * PARAMETERS: status - Execution status
166 * walk_state - Current state
168 * RETURN: Status
170 * DESCRIPTION: Called on method error. Invoke the global exception handler if
171 * present, dump the method data if the debugger is configured
173 * Note: Allows the exception handler to change the status code
175 ******************************************************************************/
177 acpi_status
178 acpi_ds_method_error(acpi_status status, struct acpi_walk_state *walk_state)
180 u32 aml_offset;
181 acpi_name name = 0;
183 ACPI_FUNCTION_ENTRY();
185 /* Ignore AE_OK and control exception codes */
187 if (ACPI_SUCCESS(status) || (status & AE_CODE_CONTROL)) {
188 return (status);
191 /* Invoke the global exception handler */
193 if (acpi_gbl_exception_handler) {
195 /* Exit the interpreter, allow handler to execute methods */
197 acpi_ex_exit_interpreter();
200 * Handler can map the exception code to anything it wants, including
201 * AE_OK, in which case the executing method will not be aborted.
203 aml_offset = (u32)ACPI_PTR_DIFF(walk_state->aml,
204 walk_state->parser_state.
205 aml_start);
207 if (walk_state->method_node) {
208 name = walk_state->method_node->name.integer;
209 } else if (walk_state->deferred_node) {
210 name = walk_state->deferred_node->name.integer;
213 status = acpi_gbl_exception_handler(status, name,
214 walk_state->opcode,
215 aml_offset, NULL);
216 acpi_ex_enter_interpreter();
219 acpi_ds_clear_implicit_return(walk_state);
221 if (ACPI_FAILURE(status)) {
222 acpi_ds_dump_method_stack(status, walk_state, walk_state->op);
224 /* Display method locals/args if debugger is present */
226 #ifdef ACPI_DEBUGGER
227 acpi_db_dump_method_info(status, walk_state);
228 #endif
231 return (status);
234 /*******************************************************************************
236 * FUNCTION: acpi_ds_create_method_mutex
238 * PARAMETERS: obj_desc - The method object
240 * RETURN: Status
242 * DESCRIPTION: Create a mutex object for a serialized control method
244 ******************************************************************************/
246 static acpi_status
247 acpi_ds_create_method_mutex(union acpi_operand_object *method_desc)
249 union acpi_operand_object *mutex_desc;
250 acpi_status status;
252 ACPI_FUNCTION_TRACE(ds_create_method_mutex);
254 /* Create the new mutex object */
256 mutex_desc = acpi_ut_create_internal_object(ACPI_TYPE_MUTEX);
257 if (!mutex_desc) {
258 return_ACPI_STATUS(AE_NO_MEMORY);
261 /* Create the actual OS Mutex */
263 status = acpi_os_create_mutex(&mutex_desc->mutex.os_mutex);
264 if (ACPI_FAILURE(status)) {
265 acpi_ut_delete_object_desc(mutex_desc);
266 return_ACPI_STATUS(status);
269 mutex_desc->mutex.sync_level = method_desc->method.sync_level;
270 method_desc->method.mutex = mutex_desc;
271 return_ACPI_STATUS(AE_OK);
274 /*******************************************************************************
276 * FUNCTION: acpi_ds_begin_method_execution
278 * PARAMETERS: method_node - Node of the method
279 * obj_desc - The method object
280 * walk_state - current state, NULL if not yet executing
281 * a method.
283 * RETURN: Status
285 * DESCRIPTION: Prepare a method for execution. Parses the method if necessary,
286 * increments the thread count, and waits at the method semaphore
287 * for clearance to execute.
289 ******************************************************************************/
291 acpi_status
292 acpi_ds_begin_method_execution(struct acpi_namespace_node *method_node,
293 union acpi_operand_object *obj_desc,
294 struct acpi_walk_state *walk_state)
296 acpi_status status = AE_OK;
298 ACPI_FUNCTION_TRACE_PTR(ds_begin_method_execution, method_node);
300 if (!method_node) {
301 return_ACPI_STATUS(AE_NULL_ENTRY);
304 acpi_ex_start_trace_method(method_node, obj_desc, walk_state);
306 /* Prevent wraparound of thread count */
308 if (obj_desc->method.thread_count == ACPI_UINT8_MAX) {
309 ACPI_ERROR((AE_INFO,
310 "Method reached maximum reentrancy limit (255)"));
311 return_ACPI_STATUS(AE_AML_METHOD_LIMIT);
315 * If this method is serialized, we need to acquire the method mutex.
317 if (obj_desc->method.info_flags & ACPI_METHOD_SERIALIZED) {
319 * Create a mutex for the method if it is defined to be Serialized
320 * and a mutex has not already been created. We defer the mutex creation
321 * until a method is actually executed, to minimize the object count
323 if (!obj_desc->method.mutex) {
324 status = acpi_ds_create_method_mutex(obj_desc);
325 if (ACPI_FAILURE(status)) {
326 return_ACPI_STATUS(status);
331 * The current_sync_level (per-thread) must be less than or equal to
332 * the sync level of the method. This mechanism provides some
333 * deadlock prevention.
335 * If the method was auto-serialized, we just ignore the sync level
336 * mechanism, because auto-serialization of methods can interfere
337 * with ASL code that actually uses sync levels.
339 * Top-level method invocation has no walk state at this point
341 if (walk_state &&
342 (!(obj_desc->method.
343 info_flags & ACPI_METHOD_IGNORE_SYNC_LEVEL))
344 && (walk_state->thread->current_sync_level >
345 obj_desc->method.mutex->mutex.sync_level)) {
346 ACPI_ERROR((AE_INFO,
347 "Cannot acquire Mutex for method [%4.4s]"
348 ", current SyncLevel is too large (%u)",
349 acpi_ut_get_node_name(method_node),
350 walk_state->thread->current_sync_level));
352 return_ACPI_STATUS(AE_AML_MUTEX_ORDER);
356 * Obtain the method mutex if necessary. Do not acquire mutex for a
357 * recursive call.
359 if (!walk_state ||
360 !obj_desc->method.mutex->mutex.thread_id ||
361 (walk_state->thread->thread_id !=
362 obj_desc->method.mutex->mutex.thread_id)) {
364 * Acquire the method mutex. This releases the interpreter if we
365 * block (and reacquires it before it returns)
367 status =
368 acpi_ex_system_wait_mutex(obj_desc->method.mutex->
369 mutex.os_mutex,
370 ACPI_WAIT_FOREVER);
371 if (ACPI_FAILURE(status)) {
372 return_ACPI_STATUS(status);
375 /* Update the mutex and walk info and save the original sync_level */
377 if (walk_state) {
378 obj_desc->method.mutex->mutex.
379 original_sync_level =
380 walk_state->thread->current_sync_level;
382 obj_desc->method.mutex->mutex.thread_id =
383 walk_state->thread->thread_id;
386 * Update the current sync_level only if this is not an auto-
387 * serialized method. In the auto case, we have to ignore
388 * the sync level for the method mutex (created for the
389 * auto-serialization) because we have no idea of what the
390 * sync level should be. Therefore, just ignore it.
392 if (!(obj_desc->method.info_flags &
393 ACPI_METHOD_IGNORE_SYNC_LEVEL)) {
394 walk_state->thread->current_sync_level =
395 obj_desc->method.sync_level;
397 } else {
398 obj_desc->method.mutex->mutex.
399 original_sync_level =
400 obj_desc->method.mutex->mutex.sync_level;
402 obj_desc->method.mutex->mutex.thread_id =
403 acpi_os_get_thread_id();
407 /* Always increase acquisition depth */
409 obj_desc->method.mutex->mutex.acquisition_depth++;
413 * Allocate an Owner ID for this method, only if this is the first thread
414 * to begin concurrent execution. We only need one owner_id, even if the
415 * method is invoked recursively.
417 if (!obj_desc->method.owner_id) {
418 status = acpi_ut_allocate_owner_id(&obj_desc->method.owner_id);
419 if (ACPI_FAILURE(status)) {
420 goto cleanup;
425 * Increment the method parse tree thread count since it has been
426 * reentered one more time (even if it is the same thread)
428 obj_desc->method.thread_count++;
429 acpi_method_count++;
430 return_ACPI_STATUS(status);
432 cleanup:
433 /* On error, must release the method mutex (if present) */
435 if (obj_desc->method.mutex) {
436 acpi_os_release_mutex(obj_desc->method.mutex->mutex.os_mutex);
438 return_ACPI_STATUS(status);
441 /*******************************************************************************
443 * FUNCTION: acpi_ds_call_control_method
445 * PARAMETERS: thread - Info for this thread
446 * this_walk_state - Current walk state
447 * op - Current Op to be walked
449 * RETURN: Status
451 * DESCRIPTION: Transfer execution to a called control method
453 ******************************************************************************/
455 acpi_status
456 acpi_ds_call_control_method(struct acpi_thread_state *thread,
457 struct acpi_walk_state *this_walk_state,
458 union acpi_parse_object *op)
460 acpi_status status;
461 struct acpi_namespace_node *method_node;
462 struct acpi_walk_state *next_walk_state = NULL;
463 union acpi_operand_object *obj_desc;
464 struct acpi_evaluate_info *info;
465 u32 i;
467 ACPI_FUNCTION_TRACE_PTR(ds_call_control_method, this_walk_state);
469 ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
470 "Calling method %p, currentstate=%p\n",
471 this_walk_state->prev_op, this_walk_state));
474 * Get the namespace entry for the control method we are about to call
476 method_node = this_walk_state->method_call_node;
477 if (!method_node) {
478 return_ACPI_STATUS(AE_NULL_ENTRY);
481 obj_desc = acpi_ns_get_attached_object(method_node);
482 if (!obj_desc) {
483 return_ACPI_STATUS(AE_NULL_OBJECT);
486 /* Init for new method, possibly wait on method mutex */
488 status =
489 acpi_ds_begin_method_execution(method_node, obj_desc,
490 this_walk_state);
491 if (ACPI_FAILURE(status)) {
492 return_ACPI_STATUS(status);
495 /* Begin method parse/execution. Create a new walk state */
497 next_walk_state =
498 acpi_ds_create_walk_state(obj_desc->method.owner_id, NULL, obj_desc,
499 thread);
500 if (!next_walk_state) {
501 status = AE_NO_MEMORY;
502 goto cleanup;
506 * The resolved arguments were put on the previous walk state's operand
507 * stack. Operands on the previous walk state stack always
508 * start at index 0. Also, null terminate the list of arguments
510 this_walk_state->operands[this_walk_state->num_operands] = NULL;
513 * Allocate and initialize the evaluation information block
514 * TBD: this is somewhat inefficient, should change interface to
515 * ds_init_aml_walk. For now, keeps this struct off the CPU stack
517 info = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_evaluate_info));
518 if (!info) {
519 status = AE_NO_MEMORY;
520 goto cleanup;
523 info->parameters = &this_walk_state->operands[0];
525 status = acpi_ds_init_aml_walk(next_walk_state, NULL, method_node,
526 obj_desc->method.aml_start,
527 obj_desc->method.aml_length, info,
528 ACPI_IMODE_EXECUTE);
530 ACPI_FREE(info);
531 if (ACPI_FAILURE(status)) {
532 goto cleanup;
536 * Delete the operands on the previous walkstate operand stack
537 * (they were copied to new objects)
539 for (i = 0; i < obj_desc->method.param_count; i++) {
540 acpi_ut_remove_reference(this_walk_state->operands[i]);
541 this_walk_state->operands[i] = NULL;
544 /* Clear the operand stack */
546 this_walk_state->num_operands = 0;
548 ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
549 "**** Begin nested execution of [%4.4s] **** WalkState=%p\n",
550 method_node->name.ascii, next_walk_state));
552 /* Invoke an internal method if necessary */
554 if (obj_desc->method.info_flags & ACPI_METHOD_INTERNAL_ONLY) {
555 status =
556 obj_desc->method.dispatch.implementation(next_walk_state);
557 if (status == AE_OK) {
558 status = AE_CTRL_TERMINATE;
562 return_ACPI_STATUS(status);
564 cleanup:
566 /* On error, we must terminate the method properly */
568 acpi_ds_terminate_control_method(obj_desc, next_walk_state);
569 acpi_ds_delete_walk_state(next_walk_state);
571 return_ACPI_STATUS(status);
574 /*******************************************************************************
576 * FUNCTION: acpi_ds_restart_control_method
578 * PARAMETERS: walk_state - State for preempted method (caller)
579 * return_desc - Return value from the called method
581 * RETURN: Status
583 * DESCRIPTION: Restart a method that was preempted by another (nested) method
584 * invocation. Handle the return value (if any) from the callee.
586 ******************************************************************************/
588 acpi_status
589 acpi_ds_restart_control_method(struct acpi_walk_state *walk_state,
590 union acpi_operand_object *return_desc)
592 acpi_status status;
593 int same_as_implicit_return;
595 ACPI_FUNCTION_TRACE_PTR(ds_restart_control_method, walk_state);
597 ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
598 "****Restart [%4.4s] Op %p ReturnValueFromCallee %p\n",
599 acpi_ut_get_node_name(walk_state->method_node),
600 walk_state->method_call_op, return_desc));
602 ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
603 " ReturnFromThisMethodUsed?=%X ResStack %p Walk %p\n",
604 walk_state->return_used,
605 walk_state->results, walk_state));
607 /* Did the called method return a value? */
609 if (return_desc) {
611 /* Is the implicit return object the same as the return desc? */
613 same_as_implicit_return =
614 (walk_state->implicit_return_obj == return_desc);
616 /* Are we actually going to use the return value? */
618 if (walk_state->return_used) {
620 /* Save the return value from the previous method */
622 status = acpi_ds_result_push(return_desc, walk_state);
623 if (ACPI_FAILURE(status)) {
624 acpi_ut_remove_reference(return_desc);
625 return_ACPI_STATUS(status);
629 * Save as THIS method's return value in case it is returned
630 * immediately to yet another method
632 walk_state->return_desc = return_desc;
636 * The following code is the optional support for the so-called
637 * "implicit return". Some AML code assumes that the last value of the
638 * method is "implicitly" returned to the caller, in the absence of an
639 * explicit return value.
641 * Just save the last result of the method as the return value.
643 * NOTE: this is optional because the ASL language does not actually
644 * support this behavior.
646 else if (!acpi_ds_do_implicit_return
647 (return_desc, walk_state, FALSE)
648 || same_as_implicit_return) {
650 * Delete the return value if it will not be used by the
651 * calling method or remove one reference if the explicit return
652 * is the same as the implicit return value.
654 acpi_ut_remove_reference(return_desc);
658 return_ACPI_STATUS(AE_OK);
661 /*******************************************************************************
663 * FUNCTION: acpi_ds_terminate_control_method
665 * PARAMETERS: method_desc - Method object
666 * walk_state - State associated with the method
668 * RETURN: None
670 * DESCRIPTION: Terminate a control method. Delete everything that the method
671 * created, delete all locals and arguments, and delete the parse
672 * tree if requested.
674 * MUTEX: Interpreter is locked
676 ******************************************************************************/
678 void
679 acpi_ds_terminate_control_method(union acpi_operand_object *method_desc,
680 struct acpi_walk_state *walk_state)
683 ACPI_FUNCTION_TRACE_PTR(ds_terminate_control_method, walk_state);
685 /* method_desc is required, walk_state is optional */
687 if (!method_desc) {
688 return_VOID;
691 if (walk_state) {
693 /* Delete all arguments and locals */
695 acpi_ds_method_data_delete_all(walk_state);
698 * Delete any namespace objects created anywhere within the
699 * namespace by the execution of this method. Unless:
700 * 1) This method is a module-level executable code method, in which
701 * case we want make the objects permanent.
702 * 2) There are other threads executing the method, in which case we
703 * will wait until the last thread has completed.
705 if (!(method_desc->method.info_flags & ACPI_METHOD_MODULE_LEVEL)
706 && (method_desc->method.thread_count == 1)) {
708 /* Delete any direct children of (created by) this method */
710 (void)acpi_ex_exit_interpreter();
711 acpi_ns_delete_namespace_subtree(walk_state->
712 method_node);
713 (void)acpi_ex_enter_interpreter();
716 * Delete any objects that were created by this method
717 * elsewhere in the namespace (if any were created).
718 * Use of the ACPI_METHOD_MODIFIED_NAMESPACE optimizes the
719 * deletion such that we don't have to perform an entire
720 * namespace walk for every control method execution.
722 if (method_desc->method.
723 info_flags & ACPI_METHOD_MODIFIED_NAMESPACE) {
724 (void)acpi_ex_exit_interpreter();
725 acpi_ns_delete_namespace_by_owner(method_desc->
726 method.
727 owner_id);
728 (void)acpi_ex_enter_interpreter();
729 method_desc->method.info_flags &=
730 ~ACPI_METHOD_MODIFIED_NAMESPACE;
735 * If method is serialized, release the mutex and restore the
736 * current sync level for this thread
738 if (method_desc->method.mutex) {
740 /* Acquisition Depth handles recursive calls */
742 method_desc->method.mutex->mutex.acquisition_depth--;
743 if (!method_desc->method.mutex->mutex.acquisition_depth) {
744 walk_state->thread->current_sync_level =
745 method_desc->method.mutex->mutex.
746 original_sync_level;
748 acpi_os_release_mutex(method_desc->method.
749 mutex->mutex.os_mutex);
750 method_desc->method.mutex->mutex.thread_id = 0;
755 /* Decrement the thread count on the method */
757 if (method_desc->method.thread_count) {
758 method_desc->method.thread_count--;
759 } else {
760 ACPI_ERROR((AE_INFO, "Invalid zero thread count in method"));
763 /* Are there any other threads currently executing this method? */
765 if (method_desc->method.thread_count) {
767 * Additional threads. Do not release the owner_id in this case,
768 * we immediately reuse it for the next thread executing this method
770 ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
771 "*** Completed execution of one thread, %u threads remaining\n",
772 method_desc->method.thread_count));
773 } else {
774 /* This is the only executing thread for this method */
777 * Support to dynamically change a method from not_serialized to
778 * Serialized if it appears that the method is incorrectly written and
779 * does not support multiple thread execution. The best example of this
780 * is if such a method creates namespace objects and blocks. A second
781 * thread will fail with an AE_ALREADY_EXISTS exception.
783 * This code is here because we must wait until the last thread exits
784 * before marking the method as serialized.
786 if (method_desc->method.
787 info_flags & ACPI_METHOD_SERIALIZED_PENDING) {
788 if (walk_state) {
789 ACPI_INFO(("Marking method %4.4s as Serialized "
790 "because of AE_ALREADY_EXISTS error",
791 walk_state->method_node->name.
792 ascii));
796 * Method tried to create an object twice and was marked as
797 * "pending serialized". The probable cause is that the method
798 * cannot handle reentrancy.
800 * The method was created as not_serialized, but it tried to create
801 * a named object and then blocked, causing the second thread
802 * entrance to begin and then fail. Workaround this problem by
803 * marking the method permanently as Serialized when the last
804 * thread exits here.
806 method_desc->method.info_flags &=
807 ~ACPI_METHOD_SERIALIZED_PENDING;
809 method_desc->method.info_flags |=
810 (ACPI_METHOD_SERIALIZED |
811 ACPI_METHOD_IGNORE_SYNC_LEVEL);
812 method_desc->method.sync_level = 0;
815 /* No more threads, we can free the owner_id */
817 if (!
818 (method_desc->method.
819 info_flags & ACPI_METHOD_MODULE_LEVEL)) {
820 acpi_ut_release_owner_id(&method_desc->method.owner_id);
824 acpi_ex_stop_trace_method((struct acpi_namespace_node *)method_desc->
825 method.node, method_desc, walk_state);
827 return_VOID;