Merge tag 'trace-v5.11-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt...
[linux/fpc-iii.git] / drivers / acpi / acpica / evgpeblk.c
blobf5298be4273a879471ffa7da5596825d6f5b8688
1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /******************************************************************************
4 * Module Name: evgpeblk - GPE block creation and initialization.
6 * Copyright (C) 2000 - 2020, Intel Corp.
8 *****************************************************************************/
10 #include <acpi/acpi.h>
11 #include "accommon.h"
12 #include "acevents.h"
13 #include "acnamesp.h"
15 #define _COMPONENT ACPI_EVENTS
16 ACPI_MODULE_NAME("evgpeblk")
17 #if (!ACPI_REDUCED_HARDWARE) /* Entire module */
18 /* Local prototypes */
19 static acpi_status
20 acpi_ev_install_gpe_block(struct acpi_gpe_block_info *gpe_block,
21 u32 interrupt_number);
23 static acpi_status
24 acpi_ev_create_gpe_info_blocks(struct acpi_gpe_block_info *gpe_block);
26 /*******************************************************************************
28 * FUNCTION: acpi_ev_install_gpe_block
30 * PARAMETERS: gpe_block - New GPE block
31 * interrupt_number - Xrupt to be associated with this
32 * GPE block
34 * RETURN: Status
36 * DESCRIPTION: Install new GPE block with mutex support
38 ******************************************************************************/
40 static acpi_status
41 acpi_ev_install_gpe_block(struct acpi_gpe_block_info *gpe_block,
42 u32 interrupt_number)
44 struct acpi_gpe_block_info *next_gpe_block;
45 struct acpi_gpe_xrupt_info *gpe_xrupt_block;
46 acpi_status status;
47 acpi_cpu_flags flags;
49 ACPI_FUNCTION_TRACE(ev_install_gpe_block);
51 status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
52 if (ACPI_FAILURE(status)) {
53 return_ACPI_STATUS(status);
56 status =
57 acpi_ev_get_gpe_xrupt_block(interrupt_number, &gpe_xrupt_block);
58 if (ACPI_FAILURE(status)) {
59 goto unlock_and_exit;
62 /* Install the new block at the end of the list with lock */
64 flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
65 if (gpe_xrupt_block->gpe_block_list_head) {
66 next_gpe_block = gpe_xrupt_block->gpe_block_list_head;
67 while (next_gpe_block->next) {
68 next_gpe_block = next_gpe_block->next;
71 next_gpe_block->next = gpe_block;
72 gpe_block->previous = next_gpe_block;
73 } else {
74 gpe_xrupt_block->gpe_block_list_head = gpe_block;
77 gpe_block->xrupt_block = gpe_xrupt_block;
78 acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
80 unlock_and_exit:
81 (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
82 return_ACPI_STATUS(status);
85 /*******************************************************************************
87 * FUNCTION: acpi_ev_delete_gpe_block
89 * PARAMETERS: gpe_block - Existing GPE block
91 * RETURN: Status
93 * DESCRIPTION: Remove a GPE block
95 ******************************************************************************/
97 acpi_status acpi_ev_delete_gpe_block(struct acpi_gpe_block_info *gpe_block)
99 acpi_status status;
100 acpi_cpu_flags flags;
102 ACPI_FUNCTION_TRACE(ev_install_gpe_block);
104 status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
105 if (ACPI_FAILURE(status)) {
106 return_ACPI_STATUS(status);
109 /* Disable all GPEs in this block */
111 status =
112 acpi_hw_disable_gpe_block(gpe_block->xrupt_block, gpe_block, NULL);
113 if (ACPI_FAILURE(status)) {
114 return_ACPI_STATUS(status);
117 if (!gpe_block->previous && !gpe_block->next) {
119 /* This is the last gpe_block on this interrupt */
121 status = acpi_ev_delete_gpe_xrupt(gpe_block->xrupt_block);
122 if (ACPI_FAILURE(status)) {
123 goto unlock_and_exit;
125 } else {
126 /* Remove the block on this interrupt with lock */
128 flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
129 if (gpe_block->previous) {
130 gpe_block->previous->next = gpe_block->next;
131 } else {
132 gpe_block->xrupt_block->gpe_block_list_head =
133 gpe_block->next;
136 if (gpe_block->next) {
137 gpe_block->next->previous = gpe_block->previous;
140 acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
143 acpi_current_gpe_count -= gpe_block->gpe_count;
145 /* Free the gpe_block */
147 ACPI_FREE(gpe_block->register_info);
148 ACPI_FREE(gpe_block->event_info);
149 ACPI_FREE(gpe_block);
151 unlock_and_exit:
152 status = acpi_ut_release_mutex(ACPI_MTX_EVENTS);
153 return_ACPI_STATUS(status);
156 /*******************************************************************************
158 * FUNCTION: acpi_ev_create_gpe_info_blocks
160 * PARAMETERS: gpe_block - New GPE block
162 * RETURN: Status
164 * DESCRIPTION: Create the register_info and event_info blocks for this GPE block
166 ******************************************************************************/
168 static acpi_status
169 acpi_ev_create_gpe_info_blocks(struct acpi_gpe_block_info *gpe_block)
171 struct acpi_gpe_register_info *gpe_register_info = NULL;
172 struct acpi_gpe_event_info *gpe_event_info = NULL;
173 struct acpi_gpe_event_info *this_event;
174 struct acpi_gpe_register_info *this_register;
175 u32 i;
176 u32 j;
177 acpi_status status;
179 ACPI_FUNCTION_TRACE(ev_create_gpe_info_blocks);
181 /* Allocate the GPE register information block */
183 gpe_register_info = ACPI_ALLOCATE_ZEROED((acpi_size)gpe_block->
184 register_count *
185 sizeof(struct
186 acpi_gpe_register_info));
187 if (!gpe_register_info) {
188 ACPI_ERROR((AE_INFO,
189 "Could not allocate the GpeRegisterInfo table"));
190 return_ACPI_STATUS(AE_NO_MEMORY);
194 * Allocate the GPE event_info block. There are eight distinct GPEs
195 * per register. Initialization to zeros is sufficient.
197 gpe_event_info = ACPI_ALLOCATE_ZEROED((acpi_size)gpe_block->gpe_count *
198 sizeof(struct
199 acpi_gpe_event_info));
200 if (!gpe_event_info) {
201 ACPI_ERROR((AE_INFO,
202 "Could not allocate the GpeEventInfo table"));
203 status = AE_NO_MEMORY;
204 goto error_exit;
207 /* Save the new Info arrays in the GPE block */
209 gpe_block->register_info = gpe_register_info;
210 gpe_block->event_info = gpe_event_info;
213 * Initialize the GPE Register and Event structures. A goal of these
214 * tables is to hide the fact that there are two separate GPE register
215 * sets in a given GPE hardware block, the status registers occupy the
216 * first half, and the enable registers occupy the second half.
218 this_register = gpe_register_info;
219 this_event = gpe_event_info;
221 for (i = 0; i < gpe_block->register_count; i++) {
223 /* Init the register_info for this GPE register (8 GPEs) */
225 this_register->base_gpe_number = (u16)
226 (gpe_block->block_base_number +
227 (i * ACPI_GPE_REGISTER_WIDTH));
229 this_register->status_address.address = gpe_block->address + i;
231 this_register->enable_address.address =
232 gpe_block->address + i + gpe_block->register_count;
234 this_register->status_address.space_id = gpe_block->space_id;
235 this_register->enable_address.space_id = gpe_block->space_id;
237 /* Init the event_info for each GPE within this register */
239 for (j = 0; j < ACPI_GPE_REGISTER_WIDTH; j++) {
240 this_event->gpe_number =
241 (u8) (this_register->base_gpe_number + j);
242 this_event->register_info = this_register;
243 this_event++;
246 /* Disable all GPEs within this register */
248 status = acpi_hw_gpe_write(0x00, &this_register->enable_address);
249 if (ACPI_FAILURE(status)) {
250 goto error_exit;
253 /* Clear any pending GPE events within this register */
255 status = acpi_hw_gpe_write(0xFF, &this_register->status_address);
256 if (ACPI_FAILURE(status)) {
257 goto error_exit;
260 this_register++;
263 return_ACPI_STATUS(AE_OK);
265 error_exit:
266 if (gpe_register_info) {
267 ACPI_FREE(gpe_register_info);
269 if (gpe_event_info) {
270 ACPI_FREE(gpe_event_info);
273 return_ACPI_STATUS(status);
276 /*******************************************************************************
278 * FUNCTION: acpi_ev_create_gpe_block
280 * PARAMETERS: gpe_device - Handle to the parent GPE block
281 * gpe_block_address - Address and space_ID
282 * register_count - Number of GPE register pairs in the block
283 * gpe_block_base_number - Starting GPE number for the block
284 * interrupt_number - H/W interrupt for the block
285 * return_gpe_block - Where the new block descriptor is returned
287 * RETURN: Status
289 * DESCRIPTION: Create and Install a block of GPE registers. All GPEs within
290 * the block are disabled at exit.
291 * Note: Assumes namespace is locked.
293 ******************************************************************************/
295 acpi_status
296 acpi_ev_create_gpe_block(struct acpi_namespace_node *gpe_device,
297 u64 address,
298 u8 space_id,
299 u32 register_count,
300 u16 gpe_block_base_number,
301 u32 interrupt_number,
302 struct acpi_gpe_block_info **return_gpe_block)
304 acpi_status status;
305 struct acpi_gpe_block_info *gpe_block;
306 struct acpi_gpe_walk_info walk_info;
308 ACPI_FUNCTION_TRACE(ev_create_gpe_block);
310 if (!register_count) {
311 return_ACPI_STATUS(AE_OK);
314 /* Validate the space_ID */
316 if ((space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY) &&
317 (space_id != ACPI_ADR_SPACE_SYSTEM_IO)) {
318 ACPI_ERROR((AE_INFO,
319 "Unsupported address space: 0x%X", space_id));
320 return_ACPI_STATUS(AE_SUPPORT);
323 if (space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
324 status = acpi_hw_validate_io_block(address,
325 ACPI_GPE_REGISTER_WIDTH,
326 register_count);
327 if (ACPI_FAILURE(status))
328 return_ACPI_STATUS(status);
331 /* Allocate a new GPE block */
333 gpe_block = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_gpe_block_info));
334 if (!gpe_block) {
335 return_ACPI_STATUS(AE_NO_MEMORY);
338 /* Initialize the new GPE block */
340 gpe_block->address = address;
341 gpe_block->space_id = space_id;
342 gpe_block->node = gpe_device;
343 gpe_block->gpe_count = (u16)(register_count * ACPI_GPE_REGISTER_WIDTH);
344 gpe_block->initialized = FALSE;
345 gpe_block->register_count = register_count;
346 gpe_block->block_base_number = gpe_block_base_number;
349 * Create the register_info and event_info sub-structures
350 * Note: disables and clears all GPEs in the block
352 status = acpi_ev_create_gpe_info_blocks(gpe_block);
353 if (ACPI_FAILURE(status)) {
354 ACPI_FREE(gpe_block);
355 return_ACPI_STATUS(status);
358 /* Install the new block in the global lists */
360 status = acpi_ev_install_gpe_block(gpe_block, interrupt_number);
361 if (ACPI_FAILURE(status)) {
362 ACPI_FREE(gpe_block->register_info);
363 ACPI_FREE(gpe_block->event_info);
364 ACPI_FREE(gpe_block);
365 return_ACPI_STATUS(status);
368 acpi_gbl_all_gpes_initialized = FALSE;
370 /* Find all GPE methods (_Lxx or_Exx) for this block */
372 walk_info.gpe_block = gpe_block;
373 walk_info.gpe_device = gpe_device;
374 walk_info.execute_by_owner_id = FALSE;
376 (void)acpi_ns_walk_namespace(ACPI_TYPE_METHOD, gpe_device,
377 ACPI_UINT32_MAX, ACPI_NS_WALK_NO_UNLOCK,
378 acpi_ev_match_gpe_method, NULL, &walk_info,
379 NULL);
381 /* Return the new block */
383 if (return_gpe_block) {
384 (*return_gpe_block) = gpe_block;
387 ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT,
388 " Initialized GPE %02X to %02X [%4.4s] %u regs on interrupt 0x%X%s\n",
389 (u32)gpe_block->block_base_number,
390 (u32)(gpe_block->block_base_number +
391 (gpe_block->gpe_count - 1)),
392 gpe_device->name.ascii, gpe_block->register_count,
393 interrupt_number,
394 interrupt_number ==
395 acpi_gbl_FADT.sci_interrupt ? " (SCI)" : ""));
397 /* Update global count of currently available GPEs */
399 acpi_current_gpe_count += gpe_block->gpe_count;
400 return_ACPI_STATUS(AE_OK);
403 /*******************************************************************************
405 * FUNCTION: acpi_ev_initialize_gpe_block
407 * PARAMETERS: acpi_gpe_callback
409 * RETURN: Status
411 * DESCRIPTION: Initialize and enable a GPE block. Enable GPEs that have
412 * associated methods.
413 * Note: Assumes namespace is locked.
415 ******************************************************************************/
417 acpi_status
418 acpi_ev_initialize_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
419 struct acpi_gpe_block_info *gpe_block,
420 void *context)
422 acpi_status status;
423 struct acpi_gpe_event_info *gpe_event_info;
424 u32 gpe_enabled_count;
425 u32 gpe_index;
426 u32 i;
427 u32 j;
428 u8 *is_polling_needed = context;
429 ACPI_ERROR_ONLY(u32 gpe_number);
431 ACPI_FUNCTION_TRACE(ev_initialize_gpe_block);
434 * Ignore a null GPE block (e.g., if no GPE block 1 exists), and
435 * any GPE blocks that have been initialized already.
437 if (!gpe_block || gpe_block->initialized) {
438 return_ACPI_STATUS(AE_OK);
442 * Enable all GPEs that have a corresponding method and have the
443 * ACPI_GPE_CAN_WAKE flag unset. Any other GPEs within this block
444 * must be enabled via the acpi_enable_gpe() interface.
446 gpe_enabled_count = 0;
448 for (i = 0; i < gpe_block->register_count; i++) {
449 for (j = 0; j < ACPI_GPE_REGISTER_WIDTH; j++) {
451 /* Get the info block for this particular GPE */
453 gpe_index = (i * ACPI_GPE_REGISTER_WIDTH) + j;
454 gpe_event_info = &gpe_block->event_info[gpe_index];
455 ACPI_ERROR_ONLY(gpe_number =
456 gpe_block->block_base_number +
457 gpe_index);
458 gpe_event_info->flags |= ACPI_GPE_INITIALIZED;
461 * Ignore GPEs that have no corresponding _Lxx/_Exx method
462 * and GPEs that are used for wakeup
464 if ((ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags) !=
465 ACPI_GPE_DISPATCH_METHOD)
466 || (gpe_event_info->flags & ACPI_GPE_CAN_WAKE)) {
467 continue;
470 status = acpi_ev_add_gpe_reference(gpe_event_info, FALSE);
471 if (ACPI_FAILURE(status)) {
472 ACPI_EXCEPTION((AE_INFO, status,
473 "Could not enable GPE 0x%02X",
474 gpe_number));
475 continue;
478 gpe_event_info->flags |= ACPI_GPE_AUTO_ENABLED;
480 if (is_polling_needed &&
481 ACPI_GPE_IS_POLLING_NEEDED(gpe_event_info)) {
482 *is_polling_needed = TRUE;
485 gpe_enabled_count++;
489 if (gpe_enabled_count) {
490 ACPI_INFO(("Enabled %u GPEs in block %02X to %02X",
491 gpe_enabled_count, (u32)gpe_block->block_base_number,
492 (u32)(gpe_block->block_base_number +
493 (gpe_block->gpe_count - 1))));
496 gpe_block->initialized = TRUE;
498 return_ACPI_STATUS(AE_OK);
501 #endif /* !ACPI_REDUCED_HARDWARE */