mm/hmm.c: remove superfluous RCU protection around radix tree lookup
[linux/fpc-iii.git] / drivers / acpi / acpica / evgpeblk.c
blobb253063b09d39c1c3c5bb3c81a375c8cec8756cf
1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /******************************************************************************
4 * Module Name: evgpeblk - GPE block creation and initialization.
6 * Copyright (C) 2000 - 2018, Intel Corp.
8 *****************************************************************************/
10 #include <acpi/acpi.h>
11 #include "accommon.h"
12 #include "acevents.h"
13 #include "acnamesp.h"
15 #define _COMPONENT ACPI_EVENTS
16 ACPI_MODULE_NAME("evgpeblk")
17 #if (!ACPI_REDUCED_HARDWARE) /* Entire module */
18 /* Local prototypes */
19 static acpi_status
20 acpi_ev_install_gpe_block(struct acpi_gpe_block_info *gpe_block,
21 u32 interrupt_number);
23 static acpi_status
24 acpi_ev_create_gpe_info_blocks(struct acpi_gpe_block_info *gpe_block);
26 /*******************************************************************************
28 * FUNCTION: acpi_ev_install_gpe_block
30 * PARAMETERS: gpe_block - New GPE block
31 * interrupt_number - Xrupt to be associated with this
32 * GPE block
34 * RETURN: Status
36 * DESCRIPTION: Install new GPE block with mutex support
38 ******************************************************************************/
40 static acpi_status
41 acpi_ev_install_gpe_block(struct acpi_gpe_block_info *gpe_block,
42 u32 interrupt_number)
44 struct acpi_gpe_block_info *next_gpe_block;
45 struct acpi_gpe_xrupt_info *gpe_xrupt_block;
46 acpi_status status;
47 acpi_cpu_flags flags;
49 ACPI_FUNCTION_TRACE(ev_install_gpe_block);
51 status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
52 if (ACPI_FAILURE(status)) {
53 return_ACPI_STATUS(status);
56 status =
57 acpi_ev_get_gpe_xrupt_block(interrupt_number, &gpe_xrupt_block);
58 if (ACPI_FAILURE(status)) {
59 goto unlock_and_exit;
62 /* Install the new block at the end of the list with lock */
64 flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
65 if (gpe_xrupt_block->gpe_block_list_head) {
66 next_gpe_block = gpe_xrupt_block->gpe_block_list_head;
67 while (next_gpe_block->next) {
68 next_gpe_block = next_gpe_block->next;
71 next_gpe_block->next = gpe_block;
72 gpe_block->previous = next_gpe_block;
73 } else {
74 gpe_xrupt_block->gpe_block_list_head = gpe_block;
77 gpe_block->xrupt_block = gpe_xrupt_block;
78 acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
80 unlock_and_exit:
81 (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
82 return_ACPI_STATUS(status);
85 /*******************************************************************************
87 * FUNCTION: acpi_ev_delete_gpe_block
89 * PARAMETERS: gpe_block - Existing GPE block
91 * RETURN: Status
93 * DESCRIPTION: Remove a GPE block
95 ******************************************************************************/
97 acpi_status acpi_ev_delete_gpe_block(struct acpi_gpe_block_info *gpe_block)
99 acpi_status status;
100 acpi_cpu_flags flags;
102 ACPI_FUNCTION_TRACE(ev_install_gpe_block);
104 status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
105 if (ACPI_FAILURE(status)) {
106 return_ACPI_STATUS(status);
109 /* Disable all GPEs in this block */
111 status =
112 acpi_hw_disable_gpe_block(gpe_block->xrupt_block, gpe_block, NULL);
114 if (!gpe_block->previous && !gpe_block->next) {
116 /* This is the last gpe_block on this interrupt */
118 status = acpi_ev_delete_gpe_xrupt(gpe_block->xrupt_block);
119 if (ACPI_FAILURE(status)) {
120 goto unlock_and_exit;
122 } else {
123 /* Remove the block on this interrupt with lock */
125 flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
126 if (gpe_block->previous) {
127 gpe_block->previous->next = gpe_block->next;
128 } else {
129 gpe_block->xrupt_block->gpe_block_list_head =
130 gpe_block->next;
133 if (gpe_block->next) {
134 gpe_block->next->previous = gpe_block->previous;
137 acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
140 acpi_current_gpe_count -= gpe_block->gpe_count;
142 /* Free the gpe_block */
144 ACPI_FREE(gpe_block->register_info);
145 ACPI_FREE(gpe_block->event_info);
146 ACPI_FREE(gpe_block);
148 unlock_and_exit:
149 status = acpi_ut_release_mutex(ACPI_MTX_EVENTS);
150 return_ACPI_STATUS(status);
153 /*******************************************************************************
155 * FUNCTION: acpi_ev_create_gpe_info_blocks
157 * PARAMETERS: gpe_block - New GPE block
159 * RETURN: Status
161 * DESCRIPTION: Create the register_info and event_info blocks for this GPE block
163 ******************************************************************************/
165 static acpi_status
166 acpi_ev_create_gpe_info_blocks(struct acpi_gpe_block_info *gpe_block)
168 struct acpi_gpe_register_info *gpe_register_info = NULL;
169 struct acpi_gpe_event_info *gpe_event_info = NULL;
170 struct acpi_gpe_event_info *this_event;
171 struct acpi_gpe_register_info *this_register;
172 u32 i;
173 u32 j;
174 acpi_status status;
176 ACPI_FUNCTION_TRACE(ev_create_gpe_info_blocks);
178 /* Allocate the GPE register information block */
180 gpe_register_info = ACPI_ALLOCATE_ZEROED((acpi_size)gpe_block->
181 register_count *
182 sizeof(struct
183 acpi_gpe_register_info));
184 if (!gpe_register_info) {
185 ACPI_ERROR((AE_INFO,
186 "Could not allocate the GpeRegisterInfo table"));
187 return_ACPI_STATUS(AE_NO_MEMORY);
191 * Allocate the GPE event_info block. There are eight distinct GPEs
192 * per register. Initialization to zeros is sufficient.
194 gpe_event_info = ACPI_ALLOCATE_ZEROED((acpi_size)gpe_block->gpe_count *
195 sizeof(struct
196 acpi_gpe_event_info));
197 if (!gpe_event_info) {
198 ACPI_ERROR((AE_INFO,
199 "Could not allocate the GpeEventInfo table"));
200 status = AE_NO_MEMORY;
201 goto error_exit;
204 /* Save the new Info arrays in the GPE block */
206 gpe_block->register_info = gpe_register_info;
207 gpe_block->event_info = gpe_event_info;
210 * Initialize the GPE Register and Event structures. A goal of these
211 * tables is to hide the fact that there are two separate GPE register
212 * sets in a given GPE hardware block, the status registers occupy the
213 * first half, and the enable registers occupy the second half.
215 this_register = gpe_register_info;
216 this_event = gpe_event_info;
218 for (i = 0; i < gpe_block->register_count; i++) {
220 /* Init the register_info for this GPE register (8 GPEs) */
222 this_register->base_gpe_number = (u16)
223 (gpe_block->block_base_number +
224 (i * ACPI_GPE_REGISTER_WIDTH));
226 this_register->status_address.address = gpe_block->address + i;
228 this_register->enable_address.address =
229 gpe_block->address + i + gpe_block->register_count;
231 this_register->status_address.space_id = gpe_block->space_id;
232 this_register->enable_address.space_id = gpe_block->space_id;
233 this_register->status_address.bit_width =
234 ACPI_GPE_REGISTER_WIDTH;
235 this_register->enable_address.bit_width =
236 ACPI_GPE_REGISTER_WIDTH;
237 this_register->status_address.bit_offset = 0;
238 this_register->enable_address.bit_offset = 0;
240 /* Init the event_info for each GPE within this register */
242 for (j = 0; j < ACPI_GPE_REGISTER_WIDTH; j++) {
243 this_event->gpe_number =
244 (u8) (this_register->base_gpe_number + j);
245 this_event->register_info = this_register;
246 this_event++;
249 /* Disable all GPEs within this register */
251 status = acpi_hw_write(0x00, &this_register->enable_address);
252 if (ACPI_FAILURE(status)) {
253 goto error_exit;
256 /* Clear any pending GPE events within this register */
258 status = acpi_hw_write(0xFF, &this_register->status_address);
259 if (ACPI_FAILURE(status)) {
260 goto error_exit;
263 this_register++;
266 return_ACPI_STATUS(AE_OK);
268 error_exit:
269 if (gpe_register_info) {
270 ACPI_FREE(gpe_register_info);
272 if (gpe_event_info) {
273 ACPI_FREE(gpe_event_info);
276 return_ACPI_STATUS(status);
279 /*******************************************************************************
281 * FUNCTION: acpi_ev_create_gpe_block
283 * PARAMETERS: gpe_device - Handle to the parent GPE block
284 * gpe_block_address - Address and space_ID
285 * register_count - Number of GPE register pairs in the block
286 * gpe_block_base_number - Starting GPE number for the block
287 * interrupt_number - H/W interrupt for the block
288 * return_gpe_block - Where the new block descriptor is returned
290 * RETURN: Status
292 * DESCRIPTION: Create and Install a block of GPE registers. All GPEs within
293 * the block are disabled at exit.
294 * Note: Assumes namespace is locked.
296 ******************************************************************************/
298 acpi_status
299 acpi_ev_create_gpe_block(struct acpi_namespace_node *gpe_device,
300 u64 address,
301 u8 space_id,
302 u32 register_count,
303 u16 gpe_block_base_number,
304 u32 interrupt_number,
305 struct acpi_gpe_block_info **return_gpe_block)
307 acpi_status status;
308 struct acpi_gpe_block_info *gpe_block;
309 struct acpi_gpe_walk_info walk_info;
311 ACPI_FUNCTION_TRACE(ev_create_gpe_block);
313 if (!register_count) {
314 return_ACPI_STATUS(AE_OK);
317 /* Allocate a new GPE block */
319 gpe_block = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_gpe_block_info));
320 if (!gpe_block) {
321 return_ACPI_STATUS(AE_NO_MEMORY);
324 /* Initialize the new GPE block */
326 gpe_block->address = address;
327 gpe_block->space_id = space_id;
328 gpe_block->node = gpe_device;
329 gpe_block->gpe_count = (u16)(register_count * ACPI_GPE_REGISTER_WIDTH);
330 gpe_block->initialized = FALSE;
331 gpe_block->register_count = register_count;
332 gpe_block->block_base_number = gpe_block_base_number;
335 * Create the register_info and event_info sub-structures
336 * Note: disables and clears all GPEs in the block
338 status = acpi_ev_create_gpe_info_blocks(gpe_block);
339 if (ACPI_FAILURE(status)) {
340 ACPI_FREE(gpe_block);
341 return_ACPI_STATUS(status);
344 /* Install the new block in the global lists */
346 status = acpi_ev_install_gpe_block(gpe_block, interrupt_number);
347 if (ACPI_FAILURE(status)) {
348 ACPI_FREE(gpe_block->register_info);
349 ACPI_FREE(gpe_block->event_info);
350 ACPI_FREE(gpe_block);
351 return_ACPI_STATUS(status);
354 acpi_gbl_all_gpes_initialized = FALSE;
356 /* Find all GPE methods (_Lxx or_Exx) for this block */
358 walk_info.gpe_block = gpe_block;
359 walk_info.gpe_device = gpe_device;
360 walk_info.execute_by_owner_id = FALSE;
362 status = acpi_ns_walk_namespace(ACPI_TYPE_METHOD, gpe_device,
363 ACPI_UINT32_MAX, ACPI_NS_WALK_NO_UNLOCK,
364 acpi_ev_match_gpe_method, NULL,
365 &walk_info, NULL);
367 /* Return the new block */
369 if (return_gpe_block) {
370 (*return_gpe_block) = gpe_block;
373 ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT,
374 " Initialized GPE %02X to %02X [%4.4s] %u regs on interrupt 0x%X%s\n",
375 (u32)gpe_block->block_base_number,
376 (u32)(gpe_block->block_base_number +
377 (gpe_block->gpe_count - 1)),
378 gpe_device->name.ascii, gpe_block->register_count,
379 interrupt_number,
380 interrupt_number ==
381 acpi_gbl_FADT.sci_interrupt ? " (SCI)" : ""));
383 /* Update global count of currently available GPEs */
385 acpi_current_gpe_count += gpe_block->gpe_count;
386 return_ACPI_STATUS(AE_OK);
389 /*******************************************************************************
391 * FUNCTION: acpi_ev_initialize_gpe_block
393 * PARAMETERS: acpi_gpe_callback
395 * RETURN: Status
397 * DESCRIPTION: Initialize and enable a GPE block. Enable GPEs that have
398 * associated methods.
399 * Note: Assumes namespace is locked.
401 ******************************************************************************/
403 acpi_status
404 acpi_ev_initialize_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
405 struct acpi_gpe_block_info *gpe_block,
406 void *context)
408 acpi_status status;
409 struct acpi_gpe_event_info *gpe_event_info;
410 u32 gpe_enabled_count;
411 u32 gpe_index;
412 u32 i;
413 u32 j;
414 u8 *is_polling_needed = context;
415 ACPI_ERROR_ONLY(u32 gpe_number);
417 ACPI_FUNCTION_TRACE(ev_initialize_gpe_block);
420 * Ignore a null GPE block (e.g., if no GPE block 1 exists), and
421 * any GPE blocks that have been initialized already.
423 if (!gpe_block || gpe_block->initialized) {
424 return_ACPI_STATUS(AE_OK);
428 * Enable all GPEs that have a corresponding method and have the
429 * ACPI_GPE_CAN_WAKE flag unset. Any other GPEs within this block
430 * must be enabled via the acpi_enable_gpe() interface.
432 gpe_enabled_count = 0;
434 for (i = 0; i < gpe_block->register_count; i++) {
435 for (j = 0; j < ACPI_GPE_REGISTER_WIDTH; j++) {
437 /* Get the info block for this particular GPE */
439 gpe_index = (i * ACPI_GPE_REGISTER_WIDTH) + j;
440 gpe_event_info = &gpe_block->event_info[gpe_index];
441 ACPI_ERROR_ONLY(gpe_number =
442 gpe_block->block_base_number +
443 gpe_index);
444 gpe_event_info->flags |= ACPI_GPE_INITIALIZED;
447 * Ignore GPEs that have no corresponding _Lxx/_Exx method
448 * and GPEs that are used for wakeup
450 if ((ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags) !=
451 ACPI_GPE_DISPATCH_METHOD)
452 || (gpe_event_info->flags & ACPI_GPE_CAN_WAKE)) {
453 continue;
456 status = acpi_ev_add_gpe_reference(gpe_event_info);
457 if (ACPI_FAILURE(status)) {
458 ACPI_EXCEPTION((AE_INFO, status,
459 "Could not enable GPE 0x%02X",
460 gpe_number));
461 continue;
464 gpe_event_info->flags |= ACPI_GPE_AUTO_ENABLED;
466 if (is_polling_needed &&
467 ACPI_GPE_IS_POLLING_NEEDED(gpe_event_info)) {
468 *is_polling_needed = TRUE;
471 gpe_enabled_count++;
475 if (gpe_enabled_count) {
476 ACPI_INFO(("Enabled %u GPEs in block %02X to %02X",
477 gpe_enabled_count, (u32)gpe_block->block_base_number,
478 (u32)(gpe_block->block_base_number +
479 (gpe_block->gpe_count - 1))));
482 gpe_block->initialized = TRUE;
484 return_ACPI_STATUS(AE_OK);
487 #endif /* !ACPI_REDUCED_HARDWARE */