[POWERPC] Allow ptrace write to pt_regs trap and orig_r3
[linux-ginger.git] / drivers / acpi / dispatcher / dsmethod.c
blob1cbe6190582494ed9f3557c58a17b659644f4979
1 /******************************************************************************
3 * Module Name: dsmethod - Parser/Interpreter interface - control method parsing
5 *****************************************************************************/
7 /*
8 * Copyright (C) 2000 - 2007, R. Byron Moore
9 * All rights reserved.
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions, and the following disclaimer,
16 * without modification.
17 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
18 * substantially similar to the "NO WARRANTY" disclaimer below
19 * ("Disclaimer") and any redistribution must be conditioned upon
20 * including a substantially similar Disclaimer requirement for further
21 * binary redistribution.
22 * 3. Neither the names of the above-listed copyright holders nor the names
23 * of any contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
26 * Alternatively, this software may be distributed under the terms of the
27 * GNU General Public License ("GPL") version 2 as published by the Free
28 * Software Foundation.
30 * NO WARRANTY
31 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
32 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
33 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
34 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
35 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
36 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
37 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
38 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
39 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
40 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
41 * POSSIBILITY OF SUCH DAMAGES.
44 #include <acpi/acpi.h>
45 #include <acpi/acparser.h>
46 #include <acpi/amlcode.h>
47 #include <acpi/acdispat.h>
48 #include <acpi/acinterp.h>
49 #include <acpi/acnamesp.h>
50 #include <acpi/acdisasm.h>
52 #define _COMPONENT ACPI_DISPATCHER
53 ACPI_MODULE_NAME("dsmethod")
55 /* Local prototypes */
56 static acpi_status
57 acpi_ds_create_method_mutex(union acpi_operand_object *method_desc);
59 /*******************************************************************************
61 * FUNCTION: acpi_ds_method_error
63 * PARAMETERS: Status - Execution status
64 * walk_state - Current state
66 * RETURN: Status
68 * DESCRIPTION: Called on method error. Invoke the global exception handler if
69 * present, dump the method data if the disassembler is configured
71 * Note: Allows the exception handler to change the status code
73 ******************************************************************************/
75 acpi_status
76 acpi_ds_method_error(acpi_status status, struct acpi_walk_state *walk_state)
78 ACPI_FUNCTION_ENTRY();
80 /* Ignore AE_OK and control exception codes */
82 if (ACPI_SUCCESS(status) || (status & AE_CODE_CONTROL)) {
83 return (status);
86 /* Invoke the global exception handler */
88 if (acpi_gbl_exception_handler) {
90 /* Exit the interpreter, allow handler to execute methods */
92 acpi_ex_exit_interpreter();
95 * Handler can map the exception code to anything it wants, including
96 * AE_OK, in which case the executing method will not be aborted.
98 status = acpi_gbl_exception_handler(status,
99 walk_state->method_node ?
100 walk_state->method_node->
101 name.integer : 0,
102 walk_state->opcode,
103 walk_state->aml_offset,
104 NULL);
105 (void)acpi_ex_enter_interpreter();
107 #ifdef ACPI_DISASSEMBLER
108 if (ACPI_FAILURE(status)) {
110 /* Display method locals/args if disassembler is present */
112 acpi_dm_dump_method_info(status, walk_state, walk_state->op);
114 #endif
116 return (status);
119 /*******************************************************************************
121 * FUNCTION: acpi_ds_create_method_mutex
123 * PARAMETERS: obj_desc - The method object
125 * RETURN: Status
127 * DESCRIPTION: Create a mutex object for a serialized control method
129 ******************************************************************************/
131 static acpi_status
132 acpi_ds_create_method_mutex(union acpi_operand_object *method_desc)
134 union acpi_operand_object *mutex_desc;
135 acpi_status status;
137 ACPI_FUNCTION_TRACE(ds_create_method_mutex);
139 /* Create the new mutex object */
141 mutex_desc = acpi_ut_create_internal_object(ACPI_TYPE_MUTEX);
142 if (!mutex_desc) {
143 return_ACPI_STATUS(AE_NO_MEMORY);
146 /* Create the actual OS Mutex */
148 status = acpi_os_create_mutex(&mutex_desc->mutex.os_mutex);
149 if (ACPI_FAILURE(status)) {
150 return_ACPI_STATUS(status);
153 mutex_desc->mutex.sync_level = method_desc->method.sync_level;
154 method_desc->method.mutex = mutex_desc;
155 return_ACPI_STATUS(AE_OK);
158 /*******************************************************************************
160 * FUNCTION: acpi_ds_begin_method_execution
162 * PARAMETERS: method_node - Node of the method
163 * obj_desc - The method object
164 * walk_state - current state, NULL if not yet executing
165 * a method.
167 * RETURN: Status
169 * DESCRIPTION: Prepare a method for execution. Parses the method if necessary,
170 * increments the thread count, and waits at the method semaphore
171 * for clearance to execute.
173 ******************************************************************************/
175 acpi_status
176 acpi_ds_begin_method_execution(struct acpi_namespace_node *method_node,
177 union acpi_operand_object *obj_desc,
178 struct acpi_walk_state *walk_state)
180 acpi_status status = AE_OK;
182 ACPI_FUNCTION_TRACE_PTR(ds_begin_method_execution, method_node);
184 if (!method_node) {
185 return_ACPI_STATUS(AE_NULL_ENTRY);
188 /* Prevent wraparound of thread count */
190 if (obj_desc->method.thread_count == ACPI_UINT8_MAX) {
191 ACPI_ERROR((AE_INFO,
192 "Method reached maximum reentrancy limit (255)"));
193 return_ACPI_STATUS(AE_AML_METHOD_LIMIT);
197 * If this method is serialized, we need to acquire the method mutex.
199 if (obj_desc->method.method_flags & AML_METHOD_SERIALIZED) {
201 * Create a mutex for the method if it is defined to be Serialized
202 * and a mutex has not already been created. We defer the mutex creation
203 * until a method is actually executed, to minimize the object count
205 if (!obj_desc->method.mutex) {
206 status = acpi_ds_create_method_mutex(obj_desc);
207 if (ACPI_FAILURE(status)) {
208 return_ACPI_STATUS(status);
213 * The current_sync_level (per-thread) must be less than or equal to
214 * the sync level of the method. This mechanism provides some
215 * deadlock prevention
217 * Top-level method invocation has no walk state at this point
219 if (walk_state &&
220 (walk_state->thread->current_sync_level >
221 obj_desc->method.mutex->mutex.sync_level)) {
222 ACPI_ERROR((AE_INFO,
223 "Cannot acquire Mutex for method [%4.4s], current SyncLevel is too large (%d)",
224 acpi_ut_get_node_name(method_node),
225 walk_state->thread->current_sync_level));
227 return_ACPI_STATUS(AE_AML_MUTEX_ORDER);
231 * Obtain the method mutex if necessary. Do not acquire mutex for a
232 * recursive call.
234 if (!walk_state ||
235 !obj_desc->method.mutex->mutex.owner_thread ||
236 (walk_state->thread !=
237 obj_desc->method.mutex->mutex.owner_thread)) {
239 * Acquire the method mutex. This releases the interpreter if we
240 * block (and reacquires it before it returns)
242 status =
243 acpi_ex_system_wait_mutex(obj_desc->method.mutex->
244 mutex.os_mutex,
245 ACPI_WAIT_FOREVER);
246 if (ACPI_FAILURE(status)) {
247 return_ACPI_STATUS(status);
250 /* Update the mutex and walk info and save the original sync_level */
252 if (walk_state) {
253 obj_desc->method.mutex->mutex.
254 original_sync_level =
255 walk_state->thread->current_sync_level;
257 obj_desc->method.mutex->mutex.owner_thread =
258 walk_state->thread;
259 walk_state->thread->current_sync_level =
260 obj_desc->method.sync_level;
261 } else {
262 obj_desc->method.mutex->mutex.
263 original_sync_level =
264 obj_desc->method.mutex->mutex.sync_level;
268 /* Always increase acquisition depth */
270 obj_desc->method.mutex->mutex.acquisition_depth++;
274 * Allocate an Owner ID for this method, only if this is the first thread
275 * to begin concurrent execution. We only need one owner_id, even if the
276 * method is invoked recursively.
278 if (!obj_desc->method.owner_id) {
279 status = acpi_ut_allocate_owner_id(&obj_desc->method.owner_id);
280 if (ACPI_FAILURE(status)) {
281 goto cleanup;
286 * Increment the method parse tree thread count since it has been
287 * reentered one more time (even if it is the same thread)
289 obj_desc->method.thread_count++;
290 return_ACPI_STATUS(status);
292 cleanup:
293 /* On error, must release the method mutex (if present) */
295 if (obj_desc->method.mutex) {
296 acpi_os_release_mutex(obj_desc->method.mutex->mutex.os_mutex);
298 return_ACPI_STATUS(status);
301 /*******************************************************************************
303 * FUNCTION: acpi_ds_call_control_method
305 * PARAMETERS: Thread - Info for this thread
306 * this_walk_state - Current walk state
307 * Op - Current Op to be walked
309 * RETURN: Status
311 * DESCRIPTION: Transfer execution to a called control method
313 ******************************************************************************/
315 acpi_status
316 acpi_ds_call_control_method(struct acpi_thread_state *thread,
317 struct acpi_walk_state *this_walk_state,
318 union acpi_parse_object *op)
320 acpi_status status;
321 struct acpi_namespace_node *method_node;
322 struct acpi_walk_state *next_walk_state = NULL;
323 union acpi_operand_object *obj_desc;
324 struct acpi_evaluate_info *info;
325 u32 i;
327 ACPI_FUNCTION_TRACE_PTR(ds_call_control_method, this_walk_state);
329 ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
330 "Calling method %p, currentstate=%p\n",
331 this_walk_state->prev_op, this_walk_state));
334 * Get the namespace entry for the control method we are about to call
336 method_node = this_walk_state->method_call_node;
337 if (!method_node) {
338 return_ACPI_STATUS(AE_NULL_ENTRY);
341 obj_desc = acpi_ns_get_attached_object(method_node);
342 if (!obj_desc) {
343 return_ACPI_STATUS(AE_NULL_OBJECT);
346 /* Init for new method, possibly wait on method mutex */
348 status = acpi_ds_begin_method_execution(method_node, obj_desc,
349 this_walk_state);
350 if (ACPI_FAILURE(status)) {
351 return_ACPI_STATUS(status);
354 /* Begin method parse/execution. Create a new walk state */
356 next_walk_state = acpi_ds_create_walk_state(obj_desc->method.owner_id,
357 NULL, obj_desc, thread);
358 if (!next_walk_state) {
359 status = AE_NO_MEMORY;
360 goto cleanup;
364 * The resolved arguments were put on the previous walk state's operand
365 * stack. Operands on the previous walk state stack always
366 * start at index 0. Also, null terminate the list of arguments
368 this_walk_state->operands[this_walk_state->num_operands] = NULL;
371 * Allocate and initialize the evaluation information block
372 * TBD: this is somewhat inefficient, should change interface to
373 * ds_init_aml_walk. For now, keeps this struct off the CPU stack
375 info = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_evaluate_info));
376 if (!info) {
377 return_ACPI_STATUS(AE_NO_MEMORY);
380 info->parameters = &this_walk_state->operands[0];
381 info->parameter_type = ACPI_PARAM_ARGS;
383 status = acpi_ds_init_aml_walk(next_walk_state, NULL, method_node,
384 obj_desc->method.aml_start,
385 obj_desc->method.aml_length, info,
386 ACPI_IMODE_EXECUTE);
388 ACPI_FREE(info);
389 if (ACPI_FAILURE(status)) {
390 goto cleanup;
394 * Delete the operands on the previous walkstate operand stack
395 * (they were copied to new objects)
397 for (i = 0; i < obj_desc->method.param_count; i++) {
398 acpi_ut_remove_reference(this_walk_state->operands[i]);
399 this_walk_state->operands[i] = NULL;
402 /* Clear the operand stack */
404 this_walk_state->num_operands = 0;
406 ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
407 "**** Begin nested execution of [%4.4s] **** WalkState=%p\n",
408 method_node->name.ascii, next_walk_state));
410 /* Invoke an internal method if necessary */
412 if (obj_desc->method.method_flags & AML_METHOD_INTERNAL_ONLY) {
413 status = obj_desc->method.implementation(next_walk_state);
416 return_ACPI_STATUS(status);
418 cleanup:
420 /* On error, we must terminate the method properly */
422 acpi_ds_terminate_control_method(obj_desc, next_walk_state);
423 if (next_walk_state) {
424 acpi_ds_delete_walk_state(next_walk_state);
427 return_ACPI_STATUS(status);
430 /*******************************************************************************
432 * FUNCTION: acpi_ds_restart_control_method
434 * PARAMETERS: walk_state - State for preempted method (caller)
435 * return_desc - Return value from the called method
437 * RETURN: Status
439 * DESCRIPTION: Restart a method that was preempted by another (nested) method
440 * invocation. Handle the return value (if any) from the callee.
442 ******************************************************************************/
444 acpi_status
445 acpi_ds_restart_control_method(struct acpi_walk_state *walk_state,
446 union acpi_operand_object *return_desc)
448 acpi_status status;
449 int same_as_implicit_return;
451 ACPI_FUNCTION_TRACE_PTR(ds_restart_control_method, walk_state);
453 ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
454 "****Restart [%4.4s] Op %p ReturnValueFromCallee %p\n",
455 acpi_ut_get_node_name(walk_state->method_node),
456 walk_state->method_call_op, return_desc));
458 ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
459 " ReturnFromThisMethodUsed?=%X ResStack %p Walk %p\n",
460 walk_state->return_used,
461 walk_state->results, walk_state));
463 /* Did the called method return a value? */
465 if (return_desc) {
467 /* Is the implicit return object the same as the return desc? */
469 same_as_implicit_return =
470 (walk_state->implicit_return_obj == return_desc);
472 /* Are we actually going to use the return value? */
474 if (walk_state->return_used) {
476 /* Save the return value from the previous method */
478 status = acpi_ds_result_push(return_desc, walk_state);
479 if (ACPI_FAILURE(status)) {
480 acpi_ut_remove_reference(return_desc);
481 return_ACPI_STATUS(status);
485 * Save as THIS method's return value in case it is returned
486 * immediately to yet another method
488 walk_state->return_desc = return_desc;
492 * The following code is the optional support for the so-called
493 * "implicit return". Some AML code assumes that the last value of the
494 * method is "implicitly" returned to the caller, in the absence of an
495 * explicit return value.
497 * Just save the last result of the method as the return value.
499 * NOTE: this is optional because the ASL language does not actually
500 * support this behavior.
502 else if (!acpi_ds_do_implicit_return
503 (return_desc, walk_state, FALSE)
504 || same_as_implicit_return) {
506 * Delete the return value if it will not be used by the
507 * calling method or remove one reference if the explicit return
508 * is the same as the implicit return value.
510 acpi_ut_remove_reference(return_desc);
514 return_ACPI_STATUS(AE_OK);
517 /*******************************************************************************
519 * FUNCTION: acpi_ds_terminate_control_method
521 * PARAMETERS: method_desc - Method object
522 * walk_state - State associated with the method
524 * RETURN: None
526 * DESCRIPTION: Terminate a control method. Delete everything that the method
527 * created, delete all locals and arguments, and delete the parse
528 * tree if requested.
530 * MUTEX: Interpreter is locked
532 ******************************************************************************/
534 void
535 acpi_ds_terminate_control_method(union acpi_operand_object *method_desc,
536 struct acpi_walk_state *walk_state)
538 struct acpi_namespace_node *method_node;
539 acpi_status status;
541 ACPI_FUNCTION_TRACE_PTR(ds_terminate_control_method, walk_state);
543 /* method_desc is required, walk_state is optional */
545 if (!method_desc) {
546 return_VOID;
549 if (walk_state) {
551 /* Delete all arguments and locals */
553 acpi_ds_method_data_delete_all(walk_state);
557 * If method is serialized, release the mutex and restore the
558 * current sync level for this thread
560 if (method_desc->method.mutex) {
562 /* Acquisition Depth handles recursive calls */
564 method_desc->method.mutex->mutex.acquisition_depth--;
565 if (!method_desc->method.mutex->mutex.acquisition_depth) {
566 walk_state->thread->current_sync_level =
567 method_desc->method.mutex->mutex.
568 original_sync_level;
570 acpi_os_release_mutex(method_desc->method.mutex->mutex.
571 os_mutex);
572 method_desc->method.mutex->mutex.owner_thread = NULL;
576 if (walk_state) {
578 * Delete any objects created by this method during execution.
579 * The method Node is stored in the walk state
581 method_node = walk_state->method_node;
584 * Delete any namespace objects created anywhere within
585 * the namespace by the execution of this method
587 acpi_ns_delete_namespace_by_owner(method_desc->method.owner_id);
590 /* Decrement the thread count on the method */
592 if (method_desc->method.thread_count) {
593 method_desc->method.thread_count--;
594 } else {
595 ACPI_ERROR((AE_INFO, "Invalid zero thread count in method"));
598 /* Are there any other threads currently executing this method? */
600 if (method_desc->method.thread_count) {
602 * Additional threads. Do not release the owner_id in this case,
603 * we immediately reuse it for the next thread executing this method
605 ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
606 "*** Completed execution of one thread, %d threads remaining\n",
607 method_desc->method.thread_count));
608 } else {
609 /* This is the only executing thread for this method */
612 * Support to dynamically change a method from not_serialized to
613 * Serialized if it appears that the method is incorrectly written and
614 * does not support multiple thread execution. The best example of this
615 * is if such a method creates namespace objects and blocks. A second
616 * thread will fail with an AE_ALREADY_EXISTS exception
618 * This code is here because we must wait until the last thread exits
619 * before creating the synchronization semaphore.
621 if ((method_desc->method.method_flags & AML_METHOD_SERIALIZED)
622 && (!method_desc->method.mutex)) {
623 status = acpi_ds_create_method_mutex(method_desc);
626 /* No more threads, we can free the owner_id */
628 acpi_ut_release_owner_id(&method_desc->method.owner_id);
631 return_VOID;