1 /******************************************************************************
3 * Module Name: evgpeblk - GPE block creation and initialization.
5 *****************************************************************************/
8 * Copyright (C) 2000 - 2010, Intel Corp.
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions, and the following disclaimer,
16 * without modification.
17 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
18 * substantially similar to the "NO WARRANTY" disclaimer below
19 * ("Disclaimer") and any redistribution must be conditioned upon
20 * including a substantially similar Disclaimer requirement for further
21 * binary redistribution.
22 * 3. Neither the names of the above-listed copyright holders nor the names
23 * of any contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
26 * Alternatively, this software may be distributed under the terms of the
27 * GNU General Public License ("GPL") version 2 as published by the Free
28 * Software Foundation.
31 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
32 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
33 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
34 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
35 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
36 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
37 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
38 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
39 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
40 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
41 * POSSIBILITY OF SUCH DAMAGES.
44 #include <acpi/acpi.h>
49 #define _COMPONENT ACPI_EVENTS
50 ACPI_MODULE_NAME("evgpeblk")
52 /* Local prototypes */
54 acpi_ev_save_method_info(acpi_handle obj_handle
,
55 u32 level
, void *obj_desc
, void **return_value
);
58 acpi_ev_match_prw_and_gpe(acpi_handle obj_handle
,
59 u32 level
, void *info
, void **return_value
);
61 static struct acpi_gpe_xrupt_info
*acpi_ev_get_gpe_xrupt_block(u32
65 acpi_ev_delete_gpe_xrupt(struct acpi_gpe_xrupt_info
*gpe_xrupt
);
68 acpi_ev_install_gpe_block(struct acpi_gpe_block_info
*gpe_block
,
69 u32 interrupt_number
);
72 acpi_ev_create_gpe_info_blocks(struct acpi_gpe_block_info
*gpe_block
);
74 /*******************************************************************************
76 * FUNCTION: acpi_ev_valid_gpe_event
78 * PARAMETERS: gpe_event_info - Info for this GPE
80 * RETURN: TRUE if the gpe_event is valid
82 * DESCRIPTION: Validate a GPE event. DO NOT CALL FROM INTERRUPT LEVEL.
83 * Should be called only when the GPE lists are semaphore locked
84 * and not subject to change.
86 ******************************************************************************/
88 u8
acpi_ev_valid_gpe_event(struct acpi_gpe_event_info
*gpe_event_info
)
90 struct acpi_gpe_xrupt_info
*gpe_xrupt_block
;
91 struct acpi_gpe_block_info
*gpe_block
;
93 ACPI_FUNCTION_ENTRY();
95 /* No need for spin lock since we are not changing any list elements */
97 /* Walk the GPE interrupt levels */
99 gpe_xrupt_block
= acpi_gbl_gpe_xrupt_list_head
;
100 while (gpe_xrupt_block
) {
101 gpe_block
= gpe_xrupt_block
->gpe_block_list_head
;
103 /* Walk the GPE blocks on this interrupt level */
106 if ((&gpe_block
->event_info
[0] <= gpe_event_info
) &&
107 (&gpe_block
->event_info
[((acpi_size
)
109 register_count
) * 8] >
114 gpe_block
= gpe_block
->next
;
117 gpe_xrupt_block
= gpe_xrupt_block
->next
;
123 /*******************************************************************************
125 * FUNCTION: acpi_ev_walk_gpe_list
127 * PARAMETERS: gpe_walk_callback - Routine called for each GPE block
128 * Context - Value passed to callback
132 * DESCRIPTION: Walk the GPE lists.
134 ******************************************************************************/
137 acpi_ev_walk_gpe_list(acpi_gpe_callback gpe_walk_callback
, void *context
)
139 struct acpi_gpe_block_info
*gpe_block
;
140 struct acpi_gpe_xrupt_info
*gpe_xrupt_info
;
141 acpi_status status
= AE_OK
;
142 acpi_cpu_flags flags
;
144 ACPI_FUNCTION_TRACE(ev_walk_gpe_list
);
146 flags
= acpi_os_acquire_lock(acpi_gbl_gpe_lock
);
148 /* Walk the interrupt level descriptor list */
150 gpe_xrupt_info
= acpi_gbl_gpe_xrupt_list_head
;
151 while (gpe_xrupt_info
) {
153 /* Walk all Gpe Blocks attached to this interrupt level */
155 gpe_block
= gpe_xrupt_info
->gpe_block_list_head
;
158 /* One callback per GPE block */
161 gpe_walk_callback(gpe_xrupt_info
, gpe_block
,
163 if (ACPI_FAILURE(status
)) {
164 if (status
== AE_CTRL_END
) { /* Callback abort */
167 goto unlock_and_exit
;
170 gpe_block
= gpe_block
->next
;
173 gpe_xrupt_info
= gpe_xrupt_info
->next
;
177 acpi_os_release_lock(acpi_gbl_gpe_lock
, flags
);
178 return_ACPI_STATUS(status
);
181 /*******************************************************************************
183 * FUNCTION: acpi_ev_delete_gpe_handlers
185 * PARAMETERS: gpe_xrupt_info - GPE Interrupt info
186 * gpe_block - Gpe Block info
190 * DESCRIPTION: Delete all Handler objects found in the GPE data structs.
191 * Used only prior to termination.
193 ******************************************************************************/
196 acpi_ev_delete_gpe_handlers(struct acpi_gpe_xrupt_info
*gpe_xrupt_info
,
197 struct acpi_gpe_block_info
*gpe_block
,
200 struct acpi_gpe_event_info
*gpe_event_info
;
204 ACPI_FUNCTION_TRACE(ev_delete_gpe_handlers
);
206 /* Examine each GPE Register within the block */
208 for (i
= 0; i
< gpe_block
->register_count
; i
++) {
210 /* Now look at the individual GPEs in this byte register */
212 for (j
= 0; j
< ACPI_GPE_REGISTER_WIDTH
; j
++) {
213 gpe_event_info
= &gpe_block
->event_info
[((acpi_size
) i
*
214 ACPI_GPE_REGISTER_WIDTH
)
217 if ((gpe_event_info
->flags
& ACPI_GPE_DISPATCH_MASK
) ==
218 ACPI_GPE_DISPATCH_HANDLER
) {
219 ACPI_FREE(gpe_event_info
->dispatch
.handler
);
220 gpe_event_info
->dispatch
.handler
= NULL
;
221 gpe_event_info
->flags
&=
222 ~ACPI_GPE_DISPATCH_MASK
;
227 return_ACPI_STATUS(AE_OK
);
230 /*******************************************************************************
232 * FUNCTION: acpi_ev_save_method_info
234 * PARAMETERS: Callback from walk_namespace
238 * DESCRIPTION: Called from acpi_walk_namespace. Expects each object to be a
239 * control method under the _GPE portion of the namespace.
240 * Extract the name and GPE type from the object, saving this
241 * information for quick lookup during GPE dispatch
243 * The name of each GPE control method is of the form:
246 * L - means that the GPE is level triggered
247 * E - means that the GPE is edge triggered
248 * xx - is the GPE number [in HEX]
250 ******************************************************************************/
253 acpi_ev_save_method_info(acpi_handle obj_handle
,
254 u32 level
, void *obj_desc
, void **return_value
)
256 struct acpi_gpe_block_info
*gpe_block
= (void *)obj_desc
;
257 struct acpi_gpe_event_info
*gpe_event_info
;
259 char name
[ACPI_NAME_SIZE
+ 1];
262 ACPI_FUNCTION_TRACE(ev_save_method_info
);
265 * _Lxx and _Exx GPE method support
267 * 1) Extract the name from the object and convert to a string
269 ACPI_MOVE_32_TO_32(name
,
270 &((struct acpi_namespace_node
*)obj_handle
)->name
.
272 name
[ACPI_NAME_SIZE
] = 0;
275 * 2) Edge/Level determination is based on the 2nd character
278 * NOTE: Default GPE type is RUNTIME. May be changed later to WAKE
279 * if a _PRW object is found that points to this GPE.
283 type
= ACPI_GPE_LEVEL_TRIGGERED
;
287 type
= ACPI_GPE_EDGE_TRIGGERED
;
291 /* Unknown method type, just ignore it! */
293 ACPI_DEBUG_PRINT((ACPI_DB_LOAD
,
294 "Ignoring unknown GPE method type: %s "
295 "(name not of form _Lxx or _Exx)", name
));
296 return_ACPI_STATUS(AE_OK
);
299 /* Convert the last two characters of the name to the GPE Number */
301 gpe_number
= ACPI_STRTOUL(&name
[2], NULL
, 16);
302 if (gpe_number
== ACPI_UINT32_MAX
) {
304 /* Conversion failed; invalid method, just ignore it */
306 ACPI_DEBUG_PRINT((ACPI_DB_LOAD
,
307 "Could not extract GPE number from name: %s "
308 "(name is not of form _Lxx or _Exx)", name
));
309 return_ACPI_STATUS(AE_OK
);
312 /* Ensure that we have a valid GPE number for this GPE block */
314 if ((gpe_number
< gpe_block
->block_base_number
) ||
315 (gpe_number
>= (gpe_block
->block_base_number
+
316 (gpe_block
->register_count
* 8)))) {
318 * Not valid for this GPE block, just ignore it. However, it may be
319 * valid for a different GPE block, since GPE0 and GPE1 methods both
320 * appear under \_GPE.
322 return_ACPI_STATUS(AE_OK
);
326 * Now we can add this information to the gpe_event_info block for use
327 * during dispatch of this GPE.
330 &gpe_block
->event_info
[gpe_number
- gpe_block
->block_base_number
];
332 gpe_event_info
->flags
= (u8
) (type
| ACPI_GPE_DISPATCH_METHOD
);
334 gpe_event_info
->dispatch
.method_node
=
335 (struct acpi_namespace_node
*)obj_handle
;
337 ACPI_DEBUG_PRINT((ACPI_DB_LOAD
,
338 "Registered GPE method %s as GPE number 0x%.2X\n",
340 return_ACPI_STATUS(AE_OK
);
343 /*******************************************************************************
345 * FUNCTION: acpi_ev_match_prw_and_gpe
347 * PARAMETERS: Callback from walk_namespace
349 * RETURN: Status. NOTE: We ignore errors so that the _PRW walk is
350 * not aborted on a single _PRW failure.
352 * DESCRIPTION: Called from acpi_walk_namespace. Expects each object to be a
353 * Device. Run the _PRW method. If present, extract the GPE
354 * number and mark the GPE as a WAKE GPE.
356 ******************************************************************************/
359 acpi_ev_match_prw_and_gpe(acpi_handle obj_handle
,
360 u32 level
, void *info
, void **return_value
)
362 struct acpi_gpe_walk_info
*gpe_info
= (void *)info
;
363 struct acpi_namespace_node
*gpe_device
;
364 struct acpi_gpe_block_info
*gpe_block
;
365 struct acpi_namespace_node
*target_gpe_device
;
366 struct acpi_gpe_event_info
*gpe_event_info
;
367 union acpi_operand_object
*pkg_desc
;
368 union acpi_operand_object
*obj_desc
;
372 ACPI_FUNCTION_TRACE(ev_match_prw_and_gpe
);
374 /* Check for a _PRW method under this device */
376 status
= acpi_ut_evaluate_object(obj_handle
, METHOD_NAME__PRW
,
377 ACPI_BTYPE_PACKAGE
, &pkg_desc
);
378 if (ACPI_FAILURE(status
)) {
380 /* Ignore all errors from _PRW, we don't want to abort the subsystem */
382 return_ACPI_STATUS(AE_OK
);
385 /* The returned _PRW package must have at least two elements */
387 if (pkg_desc
->package
.count
< 2) {
391 /* Extract pointers from the input context */
393 gpe_device
= gpe_info
->gpe_device
;
394 gpe_block
= gpe_info
->gpe_block
;
397 * The _PRW object must return a package, we are only interested in the
400 obj_desc
= pkg_desc
->package
.elements
[0];
402 if (obj_desc
->common
.type
== ACPI_TYPE_INTEGER
) {
404 /* Use FADT-defined GPE device (from definition of _PRW) */
406 target_gpe_device
= acpi_gbl_fadt_gpe_device
;
408 /* Integer is the GPE number in the FADT described GPE blocks */
410 gpe_number
= (u32
) obj_desc
->integer
.value
;
411 } else if (obj_desc
->common
.type
== ACPI_TYPE_PACKAGE
) {
413 /* Package contains a GPE reference and GPE number within a GPE block */
415 if ((obj_desc
->package
.count
< 2) ||
416 ((obj_desc
->package
.elements
[0])->common
.type
!=
417 ACPI_TYPE_LOCAL_REFERENCE
) ||
418 ((obj_desc
->package
.elements
[1])->common
.type
!=
419 ACPI_TYPE_INTEGER
)) {
423 /* Get GPE block reference and decode */
426 obj_desc
->package
.elements
[0]->reference
.node
;
427 gpe_number
= (u32
) obj_desc
->package
.elements
[1]->integer
.value
;
429 /* Unknown type, just ignore it */
435 * Is this GPE within this block?
437 * TRUE if and only if these conditions are true:
438 * 1) The GPE devices match.
439 * 2) The GPE index(number) is within the range of the Gpe Block
440 * associated with the GPE device.
442 if ((gpe_device
== target_gpe_device
) &&
443 (gpe_number
>= gpe_block
->block_base_number
) &&
444 (gpe_number
< gpe_block
->block_base_number
+
445 (gpe_block
->register_count
* 8))) {
446 gpe_event_info
= &gpe_block
->event_info
[gpe_number
-
450 gpe_event_info
->flags
|= ACPI_GPE_CAN_WAKE
;
454 acpi_ut_remove_reference(pkg_desc
);
455 return_ACPI_STATUS(AE_OK
);
458 /*******************************************************************************
460 * FUNCTION: acpi_ev_get_gpe_xrupt_block
462 * PARAMETERS: interrupt_number - Interrupt for a GPE block
464 * RETURN: A GPE interrupt block
466 * DESCRIPTION: Get or Create a GPE interrupt block. There is one interrupt
467 * block per unique interrupt level used for GPEs. Should be
468 * called only when the GPE lists are semaphore locked and not
471 ******************************************************************************/
473 static struct acpi_gpe_xrupt_info
*acpi_ev_get_gpe_xrupt_block(u32
476 struct acpi_gpe_xrupt_info
*next_gpe_xrupt
;
477 struct acpi_gpe_xrupt_info
*gpe_xrupt
;
479 acpi_cpu_flags flags
;
481 ACPI_FUNCTION_TRACE(ev_get_gpe_xrupt_block
);
483 /* No need for lock since we are not changing any list elements here */
485 next_gpe_xrupt
= acpi_gbl_gpe_xrupt_list_head
;
486 while (next_gpe_xrupt
) {
487 if (next_gpe_xrupt
->interrupt_number
== interrupt_number
) {
488 return_PTR(next_gpe_xrupt
);
491 next_gpe_xrupt
= next_gpe_xrupt
->next
;
494 /* Not found, must allocate a new xrupt descriptor */
496 gpe_xrupt
= ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_gpe_xrupt_info
));
501 gpe_xrupt
->interrupt_number
= interrupt_number
;
503 /* Install new interrupt descriptor with spin lock */
505 flags
= acpi_os_acquire_lock(acpi_gbl_gpe_lock
);
506 if (acpi_gbl_gpe_xrupt_list_head
) {
507 next_gpe_xrupt
= acpi_gbl_gpe_xrupt_list_head
;
508 while (next_gpe_xrupt
->next
) {
509 next_gpe_xrupt
= next_gpe_xrupt
->next
;
512 next_gpe_xrupt
->next
= gpe_xrupt
;
513 gpe_xrupt
->previous
= next_gpe_xrupt
;
515 acpi_gbl_gpe_xrupt_list_head
= gpe_xrupt
;
517 acpi_os_release_lock(acpi_gbl_gpe_lock
, flags
);
519 /* Install new interrupt handler if not SCI_INT */
521 if (interrupt_number
!= acpi_gbl_FADT
.sci_interrupt
) {
522 status
= acpi_os_install_interrupt_handler(interrupt_number
,
523 acpi_ev_gpe_xrupt_handler
,
525 if (ACPI_FAILURE(status
)) {
527 "Could not install GPE interrupt handler at level 0x%X",
533 return_PTR(gpe_xrupt
);
536 /*******************************************************************************
538 * FUNCTION: acpi_ev_delete_gpe_xrupt
540 * PARAMETERS: gpe_xrupt - A GPE interrupt info block
544 * DESCRIPTION: Remove and free a gpe_xrupt block. Remove an associated
545 * interrupt handler if not the SCI interrupt.
547 ******************************************************************************/
550 acpi_ev_delete_gpe_xrupt(struct acpi_gpe_xrupt_info
*gpe_xrupt
)
553 acpi_cpu_flags flags
;
555 ACPI_FUNCTION_TRACE(ev_delete_gpe_xrupt
);
557 /* We never want to remove the SCI interrupt handler */
559 if (gpe_xrupt
->interrupt_number
== acpi_gbl_FADT
.sci_interrupt
) {
560 gpe_xrupt
->gpe_block_list_head
= NULL
;
561 return_ACPI_STATUS(AE_OK
);
564 /* Disable this interrupt */
567 acpi_os_remove_interrupt_handler(gpe_xrupt
->interrupt_number
,
568 acpi_ev_gpe_xrupt_handler
);
569 if (ACPI_FAILURE(status
)) {
570 return_ACPI_STATUS(status
);
573 /* Unlink the interrupt block with lock */
575 flags
= acpi_os_acquire_lock(acpi_gbl_gpe_lock
);
576 if (gpe_xrupt
->previous
) {
577 gpe_xrupt
->previous
->next
= gpe_xrupt
->next
;
579 /* No previous, update list head */
581 acpi_gbl_gpe_xrupt_list_head
= gpe_xrupt
->next
;
584 if (gpe_xrupt
->next
) {
585 gpe_xrupt
->next
->previous
= gpe_xrupt
->previous
;
587 acpi_os_release_lock(acpi_gbl_gpe_lock
, flags
);
591 ACPI_FREE(gpe_xrupt
);
592 return_ACPI_STATUS(AE_OK
);
595 /*******************************************************************************
597 * FUNCTION: acpi_ev_install_gpe_block
599 * PARAMETERS: gpe_block - New GPE block
600 * interrupt_number - Xrupt to be associated with this
605 * DESCRIPTION: Install new GPE block with mutex support
607 ******************************************************************************/
610 acpi_ev_install_gpe_block(struct acpi_gpe_block_info
*gpe_block
,
611 u32 interrupt_number
)
613 struct acpi_gpe_block_info
*next_gpe_block
;
614 struct acpi_gpe_xrupt_info
*gpe_xrupt_block
;
616 acpi_cpu_flags flags
;
618 ACPI_FUNCTION_TRACE(ev_install_gpe_block
);
620 status
= acpi_ut_acquire_mutex(ACPI_MTX_EVENTS
);
621 if (ACPI_FAILURE(status
)) {
622 return_ACPI_STATUS(status
);
625 gpe_xrupt_block
= acpi_ev_get_gpe_xrupt_block(interrupt_number
);
626 if (!gpe_xrupt_block
) {
627 status
= AE_NO_MEMORY
;
628 goto unlock_and_exit
;
631 /* Install the new block at the end of the list with lock */
633 flags
= acpi_os_acquire_lock(acpi_gbl_gpe_lock
);
634 if (gpe_xrupt_block
->gpe_block_list_head
) {
635 next_gpe_block
= gpe_xrupt_block
->gpe_block_list_head
;
636 while (next_gpe_block
->next
) {
637 next_gpe_block
= next_gpe_block
->next
;
640 next_gpe_block
->next
= gpe_block
;
641 gpe_block
->previous
= next_gpe_block
;
643 gpe_xrupt_block
->gpe_block_list_head
= gpe_block
;
646 gpe_block
->xrupt_block
= gpe_xrupt_block
;
647 acpi_os_release_lock(acpi_gbl_gpe_lock
, flags
);
650 status
= acpi_ut_release_mutex(ACPI_MTX_EVENTS
);
651 return_ACPI_STATUS(status
);
654 /*******************************************************************************
656 * FUNCTION: acpi_ev_delete_gpe_block
658 * PARAMETERS: gpe_block - Existing GPE block
662 * DESCRIPTION: Remove a GPE block
664 ******************************************************************************/
666 acpi_status
acpi_ev_delete_gpe_block(struct acpi_gpe_block_info
*gpe_block
)
669 acpi_cpu_flags flags
;
671 ACPI_FUNCTION_TRACE(ev_install_gpe_block
);
673 status
= acpi_ut_acquire_mutex(ACPI_MTX_EVENTS
);
674 if (ACPI_FAILURE(status
)) {
675 return_ACPI_STATUS(status
);
678 /* Disable all GPEs in this block */
681 acpi_hw_disable_gpe_block(gpe_block
->xrupt_block
, gpe_block
, NULL
);
683 if (!gpe_block
->previous
&& !gpe_block
->next
) {
685 /* This is the last gpe_block on this interrupt */
687 status
= acpi_ev_delete_gpe_xrupt(gpe_block
->xrupt_block
);
688 if (ACPI_FAILURE(status
)) {
689 goto unlock_and_exit
;
692 /* Remove the block on this interrupt with lock */
694 flags
= acpi_os_acquire_lock(acpi_gbl_gpe_lock
);
695 if (gpe_block
->previous
) {
696 gpe_block
->previous
->next
= gpe_block
->next
;
698 gpe_block
->xrupt_block
->gpe_block_list_head
=
702 if (gpe_block
->next
) {
703 gpe_block
->next
->previous
= gpe_block
->previous
;
705 acpi_os_release_lock(acpi_gbl_gpe_lock
, flags
);
708 acpi_current_gpe_count
-=
709 gpe_block
->register_count
* ACPI_GPE_REGISTER_WIDTH
;
711 /* Free the gpe_block */
713 ACPI_FREE(gpe_block
->register_info
);
714 ACPI_FREE(gpe_block
->event_info
);
715 ACPI_FREE(gpe_block
);
718 status
= acpi_ut_release_mutex(ACPI_MTX_EVENTS
);
719 return_ACPI_STATUS(status
);
722 /*******************************************************************************
724 * FUNCTION: acpi_ev_create_gpe_info_blocks
726 * PARAMETERS: gpe_block - New GPE block
730 * DESCRIPTION: Create the register_info and event_info blocks for this GPE block
732 ******************************************************************************/
735 acpi_ev_create_gpe_info_blocks(struct acpi_gpe_block_info
*gpe_block
)
737 struct acpi_gpe_register_info
*gpe_register_info
= NULL
;
738 struct acpi_gpe_event_info
*gpe_event_info
= NULL
;
739 struct acpi_gpe_event_info
*this_event
;
740 struct acpi_gpe_register_info
*this_register
;
745 ACPI_FUNCTION_TRACE(ev_create_gpe_info_blocks
);
747 /* Allocate the GPE register information block */
749 gpe_register_info
= ACPI_ALLOCATE_ZEROED((acpi_size
) gpe_block
->
752 acpi_gpe_register_info
));
753 if (!gpe_register_info
) {
755 "Could not allocate the GpeRegisterInfo table"));
756 return_ACPI_STATUS(AE_NO_MEMORY
);
760 * Allocate the GPE event_info block. There are eight distinct GPEs
761 * per register. Initialization to zeros is sufficient.
763 gpe_event_info
= ACPI_ALLOCATE_ZEROED(((acpi_size
) gpe_block
->
765 ACPI_GPE_REGISTER_WIDTH
) *
767 acpi_gpe_event_info
));
768 if (!gpe_event_info
) {
770 "Could not allocate the GpeEventInfo table"));
771 status
= AE_NO_MEMORY
;
775 /* Save the new Info arrays in the GPE block */
777 gpe_block
->register_info
= gpe_register_info
;
778 gpe_block
->event_info
= gpe_event_info
;
781 * Initialize the GPE Register and Event structures. A goal of these
782 * tables is to hide the fact that there are two separate GPE register
783 * sets in a given GPE hardware block, the status registers occupy the
784 * first half, and the enable registers occupy the second half.
786 this_register
= gpe_register_info
;
787 this_event
= gpe_event_info
;
789 for (i
= 0; i
< gpe_block
->register_count
; i
++) {
791 /* Init the register_info for this GPE register (8 GPEs) */
793 this_register
->base_gpe_number
=
794 (u8
) (gpe_block
->block_base_number
+
795 (i
* ACPI_GPE_REGISTER_WIDTH
));
797 this_register
->status_address
.address
=
798 gpe_block
->block_address
.address
+ i
;
800 this_register
->enable_address
.address
=
801 gpe_block
->block_address
.address
+ i
+
802 gpe_block
->register_count
;
804 this_register
->status_address
.space_id
=
805 gpe_block
->block_address
.space_id
;
806 this_register
->enable_address
.space_id
=
807 gpe_block
->block_address
.space_id
;
808 this_register
->status_address
.bit_width
=
809 ACPI_GPE_REGISTER_WIDTH
;
810 this_register
->enable_address
.bit_width
=
811 ACPI_GPE_REGISTER_WIDTH
;
812 this_register
->status_address
.bit_offset
= 0;
813 this_register
->enable_address
.bit_offset
= 0;
815 /* Init the event_info for each GPE within this register */
817 for (j
= 0; j
< ACPI_GPE_REGISTER_WIDTH
; j
++) {
818 this_event
->gpe_number
=
819 (u8
) (this_register
->base_gpe_number
+ j
);
820 this_event
->register_info
= this_register
;
824 /* Disable all GPEs within this register */
826 status
= acpi_hw_write(0x00, &this_register
->enable_address
);
827 if (ACPI_FAILURE(status
)) {
831 /* Clear any pending GPE events within this register */
833 status
= acpi_hw_write(0xFF, &this_register
->status_address
);
834 if (ACPI_FAILURE(status
)) {
841 return_ACPI_STATUS(AE_OK
);
844 if (gpe_register_info
) {
845 ACPI_FREE(gpe_register_info
);
847 if (gpe_event_info
) {
848 ACPI_FREE(gpe_event_info
);
851 return_ACPI_STATUS(status
);
854 /*******************************************************************************
856 * FUNCTION: acpi_ev_create_gpe_block
858 * PARAMETERS: gpe_device - Handle to the parent GPE block
859 * gpe_block_address - Address and space_iD
860 * register_count - Number of GPE register pairs in the block
861 * gpe_block_base_number - Starting GPE number for the block
862 * interrupt_number - H/W interrupt for the block
863 * return_gpe_block - Where the new block descriptor is returned
867 * DESCRIPTION: Create and Install a block of GPE registers. All GPEs within
868 * the block are disabled at exit.
869 * Note: Assumes namespace is locked.
871 ******************************************************************************/
874 acpi_ev_create_gpe_block(struct acpi_namespace_node
*gpe_device
,
875 struct acpi_generic_address
*gpe_block_address
,
877 u8 gpe_block_base_number
,
878 u32 interrupt_number
,
879 struct acpi_gpe_block_info
**return_gpe_block
)
882 struct acpi_gpe_block_info
*gpe_block
;
884 ACPI_FUNCTION_TRACE(ev_create_gpe_block
);
886 if (!register_count
) {
887 return_ACPI_STATUS(AE_OK
);
890 /* Allocate a new GPE block */
892 gpe_block
= ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_gpe_block_info
));
894 return_ACPI_STATUS(AE_NO_MEMORY
);
897 /* Initialize the new GPE block */
899 gpe_block
->node
= gpe_device
;
900 gpe_block
->register_count
= register_count
;
901 gpe_block
->block_base_number
= gpe_block_base_number
;
903 ACPI_MEMCPY(&gpe_block
->block_address
, gpe_block_address
,
904 sizeof(struct acpi_generic_address
));
907 * Create the register_info and event_info sub-structures
908 * Note: disables and clears all GPEs in the block
910 status
= acpi_ev_create_gpe_info_blocks(gpe_block
);
911 if (ACPI_FAILURE(status
)) {
912 ACPI_FREE(gpe_block
);
913 return_ACPI_STATUS(status
);
916 /* Install the new block in the global lists */
918 status
= acpi_ev_install_gpe_block(gpe_block
, interrupt_number
);
919 if (ACPI_FAILURE(status
)) {
920 ACPI_FREE(gpe_block
);
921 return_ACPI_STATUS(status
);
924 /* Find all GPE methods (_Lxx, _Exx) for this block */
926 status
= acpi_ns_walk_namespace(ACPI_TYPE_METHOD
, gpe_device
,
927 ACPI_UINT32_MAX
, ACPI_NS_WALK_NO_UNLOCK
,
928 acpi_ev_save_method_info
, NULL
,
931 /* Return the new block */
933 if (return_gpe_block
) {
934 (*return_gpe_block
) = gpe_block
;
937 ACPI_DEBUG_PRINT((ACPI_DB_INIT
,
938 "GPE %02X to %02X [%4.4s] %u regs on int 0x%X\n",
939 (u32
) gpe_block
->block_base_number
,
940 (u32
) (gpe_block
->block_base_number
+
941 ((gpe_block
->register_count
*
942 ACPI_GPE_REGISTER_WIDTH
) - 1)),
943 gpe_device
->name
.ascii
, gpe_block
->register_count
,
946 /* Update global count of currently available GPEs */
948 acpi_current_gpe_count
+= register_count
* ACPI_GPE_REGISTER_WIDTH
;
949 return_ACPI_STATUS(AE_OK
);
952 /*******************************************************************************
954 * FUNCTION: acpi_ev_initialize_gpe_block
956 * PARAMETERS: gpe_device - Handle to the parent GPE block
957 * gpe_block - Gpe Block info
961 * DESCRIPTION: Initialize and enable a GPE block. First find and run any
962 * _PRT methods associated with the block, then enable the
964 * Note: Assumes namespace is locked.
966 ******************************************************************************/
969 acpi_ev_initialize_gpe_block(struct acpi_namespace_node
*gpe_device
,
970 struct acpi_gpe_block_info
*gpe_block
)
972 struct acpi_gpe_event_info
*gpe_event_info
;
973 struct acpi_gpe_walk_info gpe_info
;
975 u32 gpe_enabled_count
;
979 ACPI_FUNCTION_TRACE(ev_initialize_gpe_block
);
981 /* Ignore a null GPE block (e.g., if no GPE block 1 exists) */
984 return_ACPI_STATUS(AE_OK
);
988 * Runtime option: Should wake GPEs be enabled at runtime? The default
989 * is no, they should only be enabled just as the machine goes to sleep.
991 if (acpi_gbl_leave_wake_gpes_disabled
) {
993 * Differentiate runtime vs wake GPEs, via the _PRW control methods.
994 * Each GPE that has one or more _PRWs that reference it is by
995 * definition a wake GPE and will not be enabled while the machine
998 gpe_info
.gpe_block
= gpe_block
;
999 gpe_info
.gpe_device
= gpe_device
;
1001 acpi_ns_walk_namespace(ACPI_TYPE_DEVICE
, ACPI_ROOT_OBJECT
,
1002 ACPI_UINT32_MAX
, ACPI_NS_WALK_UNLOCK
,
1003 acpi_ev_match_prw_and_gpe
, NULL
,
1008 * Enable all GPEs that have a corresponding method and aren't
1009 * capable of generating wakeups. Any other GPEs within this block
1010 * must be enabled via the acpi_enable_gpe() interface.
1013 gpe_enabled_count
= 0;
1014 if (gpe_device
== acpi_gbl_fadt_gpe_device
)
1017 for (i
= 0; i
< gpe_block
->register_count
; i
++) {
1018 for (j
= 0; j
< ACPI_GPE_REGISTER_WIDTH
; j
++) {
1020 acpi_size gpe_index
;
1023 /* Get the info block for this particular GPE */
1024 gpe_index
= (acpi_size
)i
* ACPI_GPE_REGISTER_WIDTH
+ j
;
1025 gpe_event_info
= &gpe_block
->event_info
[gpe_index
];
1027 if (gpe_event_info
->flags
& ACPI_GPE_CAN_WAKE
) {
1029 if (acpi_gbl_leave_wake_gpes_disabled
)
1033 if (!(gpe_event_info
->flags
& ACPI_GPE_DISPATCH_METHOD
))
1036 gpe_number
= gpe_index
+ gpe_block
->block_base_number
;
1037 status
= acpi_enable_gpe(gpe_device
, gpe_number
,
1038 ACPI_GPE_TYPE_RUNTIME
);
1039 if (ACPI_FAILURE(status
))
1040 ACPI_ERROR((AE_INFO
,
1041 "Failed to enable GPE %02X\n",
1044 gpe_enabled_count
++;
1048 ACPI_DEBUG_PRINT((ACPI_DB_INIT
,
1049 "Found %u Wake, Enabled %u Runtime GPEs in this block\n",
1050 wake_gpe_count
, gpe_enabled_count
));
1052 return_ACPI_STATUS(AE_OK
);
1055 /*******************************************************************************
1057 * FUNCTION: acpi_ev_gpe_initialize
1063 * DESCRIPTION: Initialize the GPE data structures
1065 ******************************************************************************/
1067 acpi_status
acpi_ev_gpe_initialize(void)
1069 u32 register_count0
= 0;
1070 u32 register_count1
= 0;
1071 u32 gpe_number_max
= 0;
1074 ACPI_FUNCTION_TRACE(ev_gpe_initialize
);
1076 status
= acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE
);
1077 if (ACPI_FAILURE(status
)) {
1078 return_ACPI_STATUS(status
);
1082 * Initialize the GPE Block(s) defined in the FADT
1084 * Why the GPE register block lengths are divided by 2: From the ACPI
1085 * Spec, section "General-Purpose Event Registers", we have:
1087 * "Each register block contains two registers of equal length
1088 * GPEx_STS and GPEx_EN (where x is 0 or 1). The length of the
1089 * GPE0_STS and GPE0_EN registers is equal to half the GPE0_LEN
1090 * The length of the GPE1_STS and GPE1_EN registers is equal to
1091 * half the GPE1_LEN. If a generic register block is not supported
1092 * then its respective block pointer and block length values in the
1093 * FADT table contain zeros. The GPE0_LEN and GPE1_LEN do not need
1094 * to be the same size."
1098 * Determine the maximum GPE number for this machine.
1100 * Note: both GPE0 and GPE1 are optional, and either can exist without
1103 * If EITHER the register length OR the block address are zero, then that
1104 * particular block is not supported.
1106 if (acpi_gbl_FADT
.gpe0_block_length
&&
1107 acpi_gbl_FADT
.xgpe0_block
.address
) {
1109 /* GPE block 0 exists (has both length and address > 0) */
1111 register_count0
= (u16
) (acpi_gbl_FADT
.gpe0_block_length
/ 2);
1114 (register_count0
* ACPI_GPE_REGISTER_WIDTH
) - 1;
1116 /* Install GPE Block 0 */
1118 status
= acpi_ev_create_gpe_block(acpi_gbl_fadt_gpe_device
,
1119 &acpi_gbl_FADT
.xgpe0_block
,
1121 acpi_gbl_FADT
.sci_interrupt
,
1122 &acpi_gbl_gpe_fadt_blocks
[0]);
1124 if (ACPI_FAILURE(status
)) {
1125 ACPI_EXCEPTION((AE_INFO
, status
,
1126 "Could not create GPE Block 0"));
1130 if (acpi_gbl_FADT
.gpe1_block_length
&&
1131 acpi_gbl_FADT
.xgpe1_block
.address
) {
1133 /* GPE block 1 exists (has both length and address > 0) */
1135 register_count1
= (u16
) (acpi_gbl_FADT
.gpe1_block_length
/ 2);
1137 /* Check for GPE0/GPE1 overlap (if both banks exist) */
1139 if ((register_count0
) &&
1140 (gpe_number_max
>= acpi_gbl_FADT
.gpe1_base
)) {
1141 ACPI_ERROR((AE_INFO
,
1142 "GPE0 block (GPE 0 to %d) overlaps the GPE1 block "
1143 "(GPE %d to %d) - Ignoring GPE1",
1144 gpe_number_max
, acpi_gbl_FADT
.gpe1_base
,
1145 acpi_gbl_FADT
.gpe1_base
+
1147 ACPI_GPE_REGISTER_WIDTH
) - 1)));
1149 /* Ignore GPE1 block by setting the register count to zero */
1151 register_count1
= 0;
1153 /* Install GPE Block 1 */
1156 acpi_ev_create_gpe_block(acpi_gbl_fadt_gpe_device
,
1157 &acpi_gbl_FADT
.xgpe1_block
,
1159 acpi_gbl_FADT
.gpe1_base
,
1162 &acpi_gbl_gpe_fadt_blocks
1165 if (ACPI_FAILURE(status
)) {
1166 ACPI_EXCEPTION((AE_INFO
, status
,
1167 "Could not create GPE Block 1"));
1171 * GPE0 and GPE1 do not have to be contiguous in the GPE number
1172 * space. However, GPE0 always starts at GPE number zero.
1174 gpe_number_max
= acpi_gbl_FADT
.gpe1_base
+
1175 ((register_count1
* ACPI_GPE_REGISTER_WIDTH
) - 1);
1179 /* Exit if there are no GPE registers */
1181 if ((register_count0
+ register_count1
) == 0) {
1183 /* GPEs are not required by ACPI, this is OK */
1185 ACPI_DEBUG_PRINT((ACPI_DB_INIT
,
1186 "There are no GPE blocks defined in the FADT\n"));
1191 /* Check for Max GPE number out-of-range */
1193 if (gpe_number_max
> ACPI_GPE_MAX
) {
1194 ACPI_ERROR((AE_INFO
,
1195 "Maximum GPE number from FADT is too large: 0x%X",
1197 status
= AE_BAD_VALUE
;
1202 (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE
);
1203 return_ACPI_STATUS(AE_OK
);