2 * acpi_osl.c - OS-dependent functions ($Revision: 83 $)
4 * Copyright (C) 2000 Andrew Henroid
5 * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
6 * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
7 * Copyright (c) 2008 Intel Corporation
8 * Author: Matthew Wilcox <willy@linux.intel.com>
10 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
22 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
26 #include <linux/module.h>
27 #include <linux/kernel.h>
28 #include <linux/slab.h>
30 #include <linux/highmem.h>
31 #include <linux/pci.h>
32 #include <linux/interrupt.h>
33 #include <linux/kmod.h>
34 #include <linux/delay.h>
35 #include <linux/workqueue.h>
36 #include <linux/nmi.h>
37 #include <linux/acpi.h>
38 #include <linux/efi.h>
39 #include <linux/ioport.h>
40 #include <linux/list.h>
41 #include <linux/jiffies.h>
42 #include <linux/semaphore.h>
45 #include <asm/uaccess.h>
46 #include <linux/io-64-nonatomic-lo-hi.h>
50 #define _COMPONENT ACPI_OS_SERVICES
51 ACPI_MODULE_NAME("osl");
54 acpi_osd_exec_callback function
;
56 struct work_struct work
;
59 #ifdef CONFIG_ACPI_CUSTOM_DSDT
60 #include CONFIG_ACPI_CUSTOM_DSDT_FILE
63 #ifdef ENABLE_DEBUGGER
64 #include <linux/kdb.h>
66 /* stuff for debugger support */
68 EXPORT_SYMBOL(acpi_in_debugger
);
69 #endif /*ENABLE_DEBUGGER */
71 static int (*__acpi_os_prepare_sleep
)(u8 sleep_state
, u32 pm1a_ctrl
,
73 static int (*__acpi_os_prepare_extended_sleep
)(u8 sleep_state
, u32 val_a
,
76 static acpi_osd_handler acpi_irq_handler
;
77 static void *acpi_irq_context
;
78 static struct workqueue_struct
*kacpid_wq
;
79 static struct workqueue_struct
*kacpi_notify_wq
;
80 static struct workqueue_struct
*kacpi_hotplug_wq
;
81 static bool acpi_os_initialized
;
82 unsigned int acpi_sci_irq
= INVALID_ACPI_IRQ
;
85 * This list of permanent mappings is for memory that may be accessed from
86 * interrupt context, where we can't do the ioremap().
89 struct list_head list
;
91 acpi_physical_address phys
;
93 unsigned long refcount
;
96 static LIST_HEAD(acpi_ioremaps
);
97 static DEFINE_MUTEX(acpi_ioremap_lock
);
99 static void __init
acpi_osi_setup_late(void);
102 * The story of _OSI(Linux)
104 * From pre-history through Linux-2.6.22,
105 * Linux responded TRUE upon a BIOS OSI(Linux) query.
107 * Unfortunately, reference BIOS writers got wind of this
108 * and put OSI(Linux) in their example code, quickly exposing
109 * this string as ill-conceived and opening the door to
110 * an un-bounded number of BIOS incompatibilities.
112 * For example, OSI(Linux) was used on resume to re-POST a
113 * video card on one system, because Linux at that time
114 * could not do a speedy restore in its native driver.
115 * But then upon gaining quick native restore capability,
116 * Linux has no way to tell the BIOS to skip the time-consuming
117 * POST -- putting Linux at a permanent performance disadvantage.
118 * On another system, the BIOS writer used OSI(Linux)
119 * to infer native OS support for IPMI! On other systems,
120 * OSI(Linux) simply got in the way of Linux claiming to
121 * be compatible with other operating systems, exposing
122 * BIOS issues such as skipped device initialization.
124 * So "Linux" turned out to be a really poor chose of
125 * OSI string, and from Linux-2.6.23 onward we respond FALSE.
127 * BIOS writers should NOT query _OSI(Linux) on future systems.
128 * Linux will complain on the console when it sees it, and return FALSE.
129 * To get Linux to return TRUE for your system will require
130 * a kernel source update to add a DMI entry,
131 * or boot with "acpi_osi=Linux"
134 static struct osi_linux
{
135 unsigned int enable
:1;
137 unsigned int cmdline
:1;
138 u8 default_disabling
;
139 } osi_linux
= {0, 0, 0, 0};
141 static u32
acpi_osi_handler(acpi_string interface
, u32 supported
)
143 if (!strcmp("Linux", interface
)) {
145 printk_once(KERN_NOTICE FW_BUG PREFIX
146 "BIOS _OSI(Linux) query %s%s\n",
147 osi_linux
.enable
? "honored" : "ignored",
148 osi_linux
.cmdline
? " via cmdline" :
149 osi_linux
.dmi
? " via DMI" : "");
152 if (!strcmp("Darwin", interface
)) {
154 * Apple firmware will behave poorly if it receives positive
155 * answers to "Darwin" and any other OS. Respond positively
156 * to Darwin and then disable all other vendor strings.
158 acpi_update_interfaces(ACPI_DISABLE_ALL_VENDOR_STRINGS
);
159 supported
= ACPI_UINT32_MAX
;
165 static void __init
acpi_request_region (struct acpi_generic_address
*gas
,
166 unsigned int length
, char *desc
)
170 /* Handle possible alignment issues */
171 memcpy(&addr
, &gas
->address
, sizeof(addr
));
172 if (!addr
|| !length
)
175 /* Resources are never freed */
176 if (gas
->space_id
== ACPI_ADR_SPACE_SYSTEM_IO
)
177 request_region(addr
, length
, desc
);
178 else if (gas
->space_id
== ACPI_ADR_SPACE_SYSTEM_MEMORY
)
179 request_mem_region(addr
, length
, desc
);
182 static int __init
acpi_reserve_resources(void)
184 acpi_request_region(&acpi_gbl_FADT
.xpm1a_event_block
, acpi_gbl_FADT
.pm1_event_length
,
185 "ACPI PM1a_EVT_BLK");
187 acpi_request_region(&acpi_gbl_FADT
.xpm1b_event_block
, acpi_gbl_FADT
.pm1_event_length
,
188 "ACPI PM1b_EVT_BLK");
190 acpi_request_region(&acpi_gbl_FADT
.xpm1a_control_block
, acpi_gbl_FADT
.pm1_control_length
,
191 "ACPI PM1a_CNT_BLK");
193 acpi_request_region(&acpi_gbl_FADT
.xpm1b_control_block
, acpi_gbl_FADT
.pm1_control_length
,
194 "ACPI PM1b_CNT_BLK");
196 if (acpi_gbl_FADT
.pm_timer_length
== 4)
197 acpi_request_region(&acpi_gbl_FADT
.xpm_timer_block
, 4, "ACPI PM_TMR");
199 acpi_request_region(&acpi_gbl_FADT
.xpm2_control_block
, acpi_gbl_FADT
.pm2_control_length
,
202 /* Length of GPE blocks must be a non-negative multiple of 2 */
204 if (!(acpi_gbl_FADT
.gpe0_block_length
& 0x1))
205 acpi_request_region(&acpi_gbl_FADT
.xgpe0_block
,
206 acpi_gbl_FADT
.gpe0_block_length
, "ACPI GPE0_BLK");
208 if (!(acpi_gbl_FADT
.gpe1_block_length
& 0x1))
209 acpi_request_region(&acpi_gbl_FADT
.xgpe1_block
,
210 acpi_gbl_FADT
.gpe1_block_length
, "ACPI GPE1_BLK");
214 fs_initcall_sync(acpi_reserve_resources
);
216 void acpi_os_printf(const char *fmt
, ...)
220 acpi_os_vprintf(fmt
, args
);
224 void acpi_os_vprintf(const char *fmt
, va_list args
)
226 static char buffer
[512];
228 vsprintf(buffer
, fmt
, args
);
230 #ifdef ENABLE_DEBUGGER
231 if (acpi_in_debugger
) {
232 kdb_printf("%s", buffer
);
234 printk(KERN_CONT
"%s", buffer
);
237 printk(KERN_CONT
"%s", buffer
);
242 static unsigned long acpi_rsdp
;
243 static int __init
setup_acpi_rsdp(char *arg
)
245 if (kstrtoul(arg
, 16, &acpi_rsdp
))
249 early_param("acpi_rsdp", setup_acpi_rsdp
);
252 acpi_physical_address __init
acpi_os_get_root_pointer(void)
259 if (efi_enabled(EFI_CONFIG_TABLES
)) {
260 if (efi
.acpi20
!= EFI_INVALID_TABLE_ADDR
)
262 else if (efi
.acpi
!= EFI_INVALID_TABLE_ADDR
)
265 printk(KERN_ERR PREFIX
266 "System description tables not found\n");
269 } else if (IS_ENABLED(CONFIG_ACPI_LEGACY_TABLES_LOOKUP
)) {
270 acpi_physical_address pa
= 0;
272 acpi_find_root_pointer(&pa
);
279 /* Must be called with 'acpi_ioremap_lock' or RCU read lock held. */
280 static struct acpi_ioremap
*
281 acpi_map_lookup(acpi_physical_address phys
, acpi_size size
)
283 struct acpi_ioremap
*map
;
285 list_for_each_entry_rcu(map
, &acpi_ioremaps
, list
)
286 if (map
->phys
<= phys
&&
287 phys
+ size
<= map
->phys
+ map
->size
)
293 /* Must be called with 'acpi_ioremap_lock' or RCU read lock held. */
294 static void __iomem
*
295 acpi_map_vaddr_lookup(acpi_physical_address phys
, unsigned int size
)
297 struct acpi_ioremap
*map
;
299 map
= acpi_map_lookup(phys
, size
);
301 return map
->virt
+ (phys
- map
->phys
);
306 void __iomem
*acpi_os_get_iomem(acpi_physical_address phys
, unsigned int size
)
308 struct acpi_ioremap
*map
;
309 void __iomem
*virt
= NULL
;
311 mutex_lock(&acpi_ioremap_lock
);
312 map
= acpi_map_lookup(phys
, size
);
314 virt
= map
->virt
+ (phys
- map
->phys
);
317 mutex_unlock(&acpi_ioremap_lock
);
320 EXPORT_SYMBOL_GPL(acpi_os_get_iomem
);
322 /* Must be called with 'acpi_ioremap_lock' or RCU read lock held. */
323 static struct acpi_ioremap
*
324 acpi_map_lookup_virt(void __iomem
*virt
, acpi_size size
)
326 struct acpi_ioremap
*map
;
328 list_for_each_entry_rcu(map
, &acpi_ioremaps
, list
)
329 if (map
->virt
<= virt
&&
330 virt
+ size
<= map
->virt
+ map
->size
)
336 #if defined(CONFIG_IA64) || defined(CONFIG_ARM64)
337 /* ioremap will take care of cache attributes */
338 #define should_use_kmap(pfn) 0
340 #define should_use_kmap(pfn) page_is_ram(pfn)
343 static void __iomem
*acpi_map(acpi_physical_address pg_off
, unsigned long pg_sz
)
347 pfn
= pg_off
>> PAGE_SHIFT
;
348 if (should_use_kmap(pfn
)) {
349 if (pg_sz
> PAGE_SIZE
)
351 return (void __iomem __force
*)kmap(pfn_to_page(pfn
));
353 return acpi_os_ioremap(pg_off
, pg_sz
);
356 static void acpi_unmap(acpi_physical_address pg_off
, void __iomem
*vaddr
)
360 pfn
= pg_off
>> PAGE_SHIFT
;
361 if (should_use_kmap(pfn
))
362 kunmap(pfn_to_page(pfn
));
367 void __iomem
*__init_refok
368 acpi_os_map_iomem(acpi_physical_address phys
, acpi_size size
)
370 struct acpi_ioremap
*map
;
372 acpi_physical_address pg_off
;
375 if (phys
> ULONG_MAX
) {
376 printk(KERN_ERR PREFIX
"Cannot map memory that high\n");
380 if (!acpi_gbl_permanent_mmap
)
381 return __acpi_map_table((unsigned long)phys
, size
);
383 mutex_lock(&acpi_ioremap_lock
);
384 /* Check if there's a suitable mapping already. */
385 map
= acpi_map_lookup(phys
, size
);
391 map
= kzalloc(sizeof(*map
), GFP_KERNEL
);
393 mutex_unlock(&acpi_ioremap_lock
);
397 pg_off
= round_down(phys
, PAGE_SIZE
);
398 pg_sz
= round_up(phys
+ size
, PAGE_SIZE
) - pg_off
;
399 virt
= acpi_map(pg_off
, pg_sz
);
401 mutex_unlock(&acpi_ioremap_lock
);
406 INIT_LIST_HEAD(&map
->list
);
412 list_add_tail_rcu(&map
->list
, &acpi_ioremaps
);
415 mutex_unlock(&acpi_ioremap_lock
);
416 return map
->virt
+ (phys
- map
->phys
);
418 EXPORT_SYMBOL_GPL(acpi_os_map_iomem
);
421 acpi_os_map_memory(acpi_physical_address phys
, acpi_size size
)
423 return (void *)acpi_os_map_iomem(phys
, size
);
425 EXPORT_SYMBOL_GPL(acpi_os_map_memory
);
427 static void acpi_os_drop_map_ref(struct acpi_ioremap
*map
)
429 if (!--map
->refcount
)
430 list_del_rcu(&map
->list
);
433 static void acpi_os_map_cleanup(struct acpi_ioremap
*map
)
435 if (!map
->refcount
) {
436 synchronize_rcu_expedited();
437 acpi_unmap(map
->phys
, map
->virt
);
442 void __ref
acpi_os_unmap_iomem(void __iomem
*virt
, acpi_size size
)
444 struct acpi_ioremap
*map
;
446 if (!acpi_gbl_permanent_mmap
) {
447 __acpi_unmap_table(virt
, size
);
451 mutex_lock(&acpi_ioremap_lock
);
452 map
= acpi_map_lookup_virt(virt
, size
);
454 mutex_unlock(&acpi_ioremap_lock
);
455 WARN(true, PREFIX
"%s: bad address %p\n", __func__
, virt
);
458 acpi_os_drop_map_ref(map
);
459 mutex_unlock(&acpi_ioremap_lock
);
461 acpi_os_map_cleanup(map
);
463 EXPORT_SYMBOL_GPL(acpi_os_unmap_iomem
);
465 void __ref
acpi_os_unmap_memory(void *virt
, acpi_size size
)
467 return acpi_os_unmap_iomem((void __iomem
*)virt
, size
);
469 EXPORT_SYMBOL_GPL(acpi_os_unmap_memory
);
471 void __init
early_acpi_os_unmap_memory(void __iomem
*virt
, acpi_size size
)
473 if (!acpi_gbl_permanent_mmap
)
474 __acpi_unmap_table(virt
, size
);
477 int acpi_os_map_generic_address(struct acpi_generic_address
*gas
)
482 if (gas
->space_id
!= ACPI_ADR_SPACE_SYSTEM_MEMORY
)
485 /* Handle possible alignment issues */
486 memcpy(&addr
, &gas
->address
, sizeof(addr
));
487 if (!addr
|| !gas
->bit_width
)
490 virt
= acpi_os_map_iomem(addr
, gas
->bit_width
/ 8);
496 EXPORT_SYMBOL(acpi_os_map_generic_address
);
498 void acpi_os_unmap_generic_address(struct acpi_generic_address
*gas
)
501 struct acpi_ioremap
*map
;
503 if (gas
->space_id
!= ACPI_ADR_SPACE_SYSTEM_MEMORY
)
506 /* Handle possible alignment issues */
507 memcpy(&addr
, &gas
->address
, sizeof(addr
));
508 if (!addr
|| !gas
->bit_width
)
511 mutex_lock(&acpi_ioremap_lock
);
512 map
= acpi_map_lookup(addr
, gas
->bit_width
/ 8);
514 mutex_unlock(&acpi_ioremap_lock
);
517 acpi_os_drop_map_ref(map
);
518 mutex_unlock(&acpi_ioremap_lock
);
520 acpi_os_map_cleanup(map
);
522 EXPORT_SYMBOL(acpi_os_unmap_generic_address
);
524 #ifdef ACPI_FUTURE_USAGE
526 acpi_os_get_physical_address(void *virt
, acpi_physical_address
* phys
)
529 return AE_BAD_PARAMETER
;
531 *phys
= virt_to_phys(virt
);
537 #ifdef CONFIG_ACPI_REV_OVERRIDE_POSSIBLE
538 static bool acpi_rev_override
;
540 int __init
acpi_rev_override_setup(char *str
)
542 acpi_rev_override
= true;
545 __setup("acpi_rev_override", acpi_rev_override_setup
);
547 #define acpi_rev_override false
550 #define ACPI_MAX_OVERRIDE_LEN 100
552 static char acpi_os_name
[ACPI_MAX_OVERRIDE_LEN
];
555 acpi_os_predefined_override(const struct acpi_predefined_names
*init_val
,
558 if (!init_val
|| !new_val
)
559 return AE_BAD_PARAMETER
;
562 if (!memcmp(init_val
->name
, "_OS_", 4) && strlen(acpi_os_name
)) {
563 printk(KERN_INFO PREFIX
"Overriding _OS definition to '%s'\n",
565 *new_val
= acpi_os_name
;
568 if (!memcmp(init_val
->name
, "_REV", 4) && acpi_rev_override
) {
569 printk(KERN_INFO PREFIX
"Overriding _REV return value to 5\n");
570 *new_val
= (char *)5;
576 #ifdef CONFIG_ACPI_INITRD_TABLE_OVERRIDE
577 #include <linux/earlycpio.h>
578 #include <linux/memblock.h>
580 static u64 acpi_tables_addr
;
581 static int all_tables_size
;
583 /* Copied from acpica/tbutils.c:acpi_tb_checksum() */
584 static u8 __init
acpi_table_checksum(u8
*buffer
, u32 length
)
587 u8
*end
= buffer
+ length
;
590 sum
= (u8
) (sum
+ *(buffer
++));
594 /* All but ACPI_SIG_RSDP and ACPI_SIG_FACS: */
595 static const char * const table_sigs
[] = {
596 ACPI_SIG_BERT
, ACPI_SIG_CPEP
, ACPI_SIG_ECDT
, ACPI_SIG_EINJ
,
597 ACPI_SIG_ERST
, ACPI_SIG_HEST
, ACPI_SIG_MADT
, ACPI_SIG_MSCT
,
598 ACPI_SIG_SBST
, ACPI_SIG_SLIT
, ACPI_SIG_SRAT
, ACPI_SIG_ASF
,
599 ACPI_SIG_BOOT
, ACPI_SIG_DBGP
, ACPI_SIG_DMAR
, ACPI_SIG_HPET
,
600 ACPI_SIG_IBFT
, ACPI_SIG_IVRS
, ACPI_SIG_MCFG
, ACPI_SIG_MCHI
,
601 ACPI_SIG_SLIC
, ACPI_SIG_SPCR
, ACPI_SIG_SPMI
, ACPI_SIG_TCPA
,
602 ACPI_SIG_UEFI
, ACPI_SIG_WAET
, ACPI_SIG_WDAT
, ACPI_SIG_WDDT
,
603 ACPI_SIG_WDRT
, ACPI_SIG_DSDT
, ACPI_SIG_FADT
, ACPI_SIG_PSDT
,
604 ACPI_SIG_RSDT
, ACPI_SIG_XSDT
, ACPI_SIG_SSDT
, NULL
};
606 #define ACPI_HEADER_SIZE sizeof(struct acpi_table_header)
608 #define ACPI_OVERRIDE_TABLES 64
609 static struct cpio_data __initdata acpi_initrd_files
[ACPI_OVERRIDE_TABLES
];
611 #define MAP_CHUNK_SIZE (NR_FIX_BTMAPS << PAGE_SHIFT)
613 void __init
acpi_initrd_override(void *data
, size_t size
)
615 int sig
, no
, table_nr
= 0, total_offset
= 0;
617 struct acpi_table_header
*table
;
618 char cpio_path
[32] = "kernel/firmware/acpi/";
619 struct cpio_data file
;
621 if (data
== NULL
|| size
== 0)
624 for (no
= 0; no
< ACPI_OVERRIDE_TABLES
; no
++) {
625 file
= find_cpio_data(cpio_path
, data
, size
, &offset
);
632 if (file
.size
< sizeof(struct acpi_table_header
)) {
633 pr_err("ACPI OVERRIDE: Table smaller than ACPI header [%s%s]\n",
634 cpio_path
, file
.name
);
640 for (sig
= 0; table_sigs
[sig
]; sig
++)
641 if (!memcmp(table
->signature
, table_sigs
[sig
], 4))
644 if (!table_sigs
[sig
]) {
645 pr_err("ACPI OVERRIDE: Unknown signature [%s%s]\n",
646 cpio_path
, file
.name
);
649 if (file
.size
!= table
->length
) {
650 pr_err("ACPI OVERRIDE: File length does not match table length [%s%s]\n",
651 cpio_path
, file
.name
);
654 if (acpi_table_checksum(file
.data
, table
->length
)) {
655 pr_err("ACPI OVERRIDE: Bad table checksum [%s%s]\n",
656 cpio_path
, file
.name
);
660 pr_info("%4.4s ACPI table found in initrd [%s%s][0x%x]\n",
661 table
->signature
, cpio_path
, file
.name
, table
->length
);
663 all_tables_size
+= table
->length
;
664 acpi_initrd_files
[table_nr
].data
= file
.data
;
665 acpi_initrd_files
[table_nr
].size
= file
.size
;
672 memblock_find_in_range(0, max_low_pfn_mapped
<< PAGE_SHIFT
,
673 all_tables_size
, PAGE_SIZE
);
674 if (!acpi_tables_addr
) {
679 * Only calling e820_add_reserve does not work and the
680 * tables are invalid (memory got used) later.
681 * memblock_reserve works as expected and the tables won't get modified.
682 * But it's not enough on X86 because ioremap will
683 * complain later (used by acpi_os_map_memory) that the pages
684 * that should get mapped are not marked "reserved".
685 * Both memblock_reserve and e820_add_region (via arch_reserve_mem_area)
688 memblock_reserve(acpi_tables_addr
, all_tables_size
);
689 arch_reserve_mem_area(acpi_tables_addr
, all_tables_size
);
692 * early_ioremap only can remap 256k one time. If we map all
693 * tables one time, we will hit the limit. Need to map chunks
694 * one by one during copying the same as that in relocate_initrd().
696 for (no
= 0; no
< table_nr
; no
++) {
697 unsigned char *src_p
= acpi_initrd_files
[no
].data
;
698 phys_addr_t size
= acpi_initrd_files
[no
].size
;
699 phys_addr_t dest_addr
= acpi_tables_addr
+ total_offset
;
700 phys_addr_t slop
, clen
;
703 total_offset
+= size
;
706 slop
= dest_addr
& ~PAGE_MASK
;
708 if (clen
> MAP_CHUNK_SIZE
- slop
)
709 clen
= MAP_CHUNK_SIZE
- slop
;
710 dest_p
= early_ioremap(dest_addr
& PAGE_MASK
,
712 memcpy(dest_p
+ slop
, src_p
, clen
);
713 early_iounmap(dest_p
, clen
+ slop
);
720 #endif /* CONFIG_ACPI_INITRD_TABLE_OVERRIDE */
722 static void acpi_table_taint(struct acpi_table_header
*table
)
725 "Override [%4.4s-%8.8s], this is unsafe: tainting kernel\n",
726 table
->signature
, table
->oem_table_id
);
727 add_taint(TAINT_OVERRIDDEN_ACPI_TABLE
, LOCKDEP_NOW_UNRELIABLE
);
732 acpi_os_table_override(struct acpi_table_header
* existing_table
,
733 struct acpi_table_header
** new_table
)
735 if (!existing_table
|| !new_table
)
736 return AE_BAD_PARAMETER
;
740 #ifdef CONFIG_ACPI_CUSTOM_DSDT
741 if (strncmp(existing_table
->signature
, "DSDT", 4) == 0)
742 *new_table
= (struct acpi_table_header
*)AmlCode
;
744 if (*new_table
!= NULL
)
745 acpi_table_taint(existing_table
);
750 acpi_os_physical_table_override(struct acpi_table_header
*existing_table
,
751 acpi_physical_address
*address
,
754 #ifndef CONFIG_ACPI_INITRD_TABLE_OVERRIDE
759 int table_offset
= 0;
760 struct acpi_table_header
*table
;
765 if (!acpi_tables_addr
)
769 if (table_offset
+ ACPI_HEADER_SIZE
> all_tables_size
) {
774 table
= acpi_os_map_memory(acpi_tables_addr
+ table_offset
,
777 if (table_offset
+ table
->length
> all_tables_size
) {
778 acpi_os_unmap_memory(table
, ACPI_HEADER_SIZE
);
783 table_offset
+= table
->length
;
785 if (memcmp(existing_table
->signature
, table
->signature
, 4)) {
786 acpi_os_unmap_memory(table
,
791 /* Only override tables with matching oem id */
792 if (memcmp(table
->oem_table_id
, existing_table
->oem_table_id
,
793 ACPI_OEM_TABLE_ID_SIZE
)) {
794 acpi_os_unmap_memory(table
,
799 table_offset
-= table
->length
;
800 *table_length
= table
->length
;
801 acpi_os_unmap_memory(table
, ACPI_HEADER_SIZE
);
802 *address
= acpi_tables_addr
+ table_offset
;
804 } while (table_offset
+ ACPI_HEADER_SIZE
< all_tables_size
);
807 acpi_table_taint(existing_table
);
812 static irqreturn_t
acpi_irq(int irq
, void *dev_id
)
816 handled
= (*acpi_irq_handler
) (acpi_irq_context
);
822 acpi_irq_not_handled
++;
828 acpi_os_install_interrupt_handler(u32 gsi
, acpi_osd_handler handler
,
833 acpi_irq_stats_init();
836 * ACPI interrupts different from the SCI in our copy of the FADT are
839 if (gsi
!= acpi_gbl_FADT
.sci_interrupt
)
840 return AE_BAD_PARAMETER
;
842 if (acpi_irq_handler
)
843 return AE_ALREADY_ACQUIRED
;
845 if (acpi_gsi_to_irq(gsi
, &irq
) < 0) {
846 printk(KERN_ERR PREFIX
"SCI (ACPI GSI %d) not registered\n",
851 acpi_irq_handler
= handler
;
852 acpi_irq_context
= context
;
853 if (request_irq(irq
, acpi_irq
, IRQF_SHARED
, "acpi", acpi_irq
)) {
854 printk(KERN_ERR PREFIX
"SCI (IRQ%d) allocation failed\n", irq
);
855 acpi_irq_handler
= NULL
;
856 return AE_NOT_ACQUIRED
;
863 acpi_status
acpi_os_remove_interrupt_handler(u32 gsi
, acpi_osd_handler handler
)
865 if (gsi
!= acpi_gbl_FADT
.sci_interrupt
|| !acpi_sci_irq_valid())
866 return AE_BAD_PARAMETER
;
868 free_irq(acpi_sci_irq
, acpi_irq
);
869 acpi_irq_handler
= NULL
;
870 acpi_sci_irq
= INVALID_ACPI_IRQ
;
876 * Running in interpreter thread context, safe to sleep
879 void acpi_os_sleep(u64 ms
)
884 void acpi_os_stall(u32 us
)
892 touch_nmi_watchdog();
898 * Support ACPI 3.0 AML Timer operand
899 * Returns 64-bit free-running, monotonically increasing timer
900 * with 100ns granularity
902 u64
acpi_os_get_timer(void)
904 u64 time_ns
= ktime_to_ns(ktime_get());
905 do_div(time_ns
, 100);
909 acpi_status
acpi_os_read_port(acpi_io_address port
, u32
* value
, u32 width
)
918 *(u8
*) value
= inb(port
);
919 } else if (width
<= 16) {
920 *(u16
*) value
= inw(port
);
921 } else if (width
<= 32) {
922 *(u32
*) value
= inl(port
);
930 EXPORT_SYMBOL(acpi_os_read_port
);
932 acpi_status
acpi_os_write_port(acpi_io_address port
, u32 value
, u32 width
)
936 } else if (width
<= 16) {
938 } else if (width
<= 32) {
947 EXPORT_SYMBOL(acpi_os_write_port
);
950 acpi_os_read_memory(acpi_physical_address phys_addr
, u64
*value
, u32 width
)
952 void __iomem
*virt_addr
;
953 unsigned int size
= width
/ 8;
958 virt_addr
= acpi_map_vaddr_lookup(phys_addr
, size
);
961 virt_addr
= acpi_os_ioremap(phys_addr
, size
);
963 return AE_BAD_ADDRESS
;
972 *(u8
*) value
= readb(virt_addr
);
975 *(u16
*) value
= readw(virt_addr
);
978 *(u32
*) value
= readl(virt_addr
);
981 *(u64
*) value
= readq(virt_addr
);
996 acpi_os_write_memory(acpi_physical_address phys_addr
, u64 value
, u32 width
)
998 void __iomem
*virt_addr
;
999 unsigned int size
= width
/ 8;
1003 virt_addr
= acpi_map_vaddr_lookup(phys_addr
, size
);
1006 virt_addr
= acpi_os_ioremap(phys_addr
, size
);
1008 return AE_BAD_ADDRESS
;
1014 writeb(value
, virt_addr
);
1017 writew(value
, virt_addr
);
1020 writel(value
, virt_addr
);
1023 writeq(value
, virt_addr
);
1038 acpi_os_read_pci_configuration(struct acpi_pci_id
* pci_id
, u32 reg
,
1039 u64
*value
, u32 width
)
1045 return AE_BAD_PARAMETER
;
1061 result
= raw_pci_read(pci_id
->segment
, pci_id
->bus
,
1062 PCI_DEVFN(pci_id
->device
, pci_id
->function
),
1063 reg
, size
, &value32
);
1066 return (result
? AE_ERROR
: AE_OK
);
1070 acpi_os_write_pci_configuration(struct acpi_pci_id
* pci_id
, u32 reg
,
1071 u64 value
, u32 width
)
1089 result
= raw_pci_write(pci_id
->segment
, pci_id
->bus
,
1090 PCI_DEVFN(pci_id
->device
, pci_id
->function
),
1093 return (result
? AE_ERROR
: AE_OK
);
1096 static void acpi_os_execute_deferred(struct work_struct
*work
)
1098 struct acpi_os_dpc
*dpc
= container_of(work
, struct acpi_os_dpc
, work
);
1100 dpc
->function(dpc
->context
);
1104 /*******************************************************************************
1106 * FUNCTION: acpi_os_execute
1108 * PARAMETERS: Type - Type of the callback
1109 * Function - Function to be executed
1110 * Context - Function parameters
1114 * DESCRIPTION: Depending on type, either queues function for deferred execution or
1115 * immediately executes function on a separate thread.
1117 ******************************************************************************/
1119 acpi_status
acpi_os_execute(acpi_execute_type type
,
1120 acpi_osd_exec_callback function
, void *context
)
1122 acpi_status status
= AE_OK
;
1123 struct acpi_os_dpc
*dpc
;
1124 struct workqueue_struct
*queue
;
1126 ACPI_DEBUG_PRINT((ACPI_DB_EXEC
,
1127 "Scheduling function [%p(%p)] for deferred execution.\n",
1128 function
, context
));
1131 * Allocate/initialize DPC structure. Note that this memory will be
1132 * freed by the callee. The kernel handles the work_struct list in a
1133 * way that allows us to also free its memory inside the callee.
1134 * Because we may want to schedule several tasks with different
1135 * parameters we can't use the approach some kernel code uses of
1136 * having a static work_struct.
1139 dpc
= kzalloc(sizeof(struct acpi_os_dpc
), GFP_ATOMIC
);
1141 return AE_NO_MEMORY
;
1143 dpc
->function
= function
;
1144 dpc
->context
= context
;
1147 * To prevent lockdep from complaining unnecessarily, make sure that
1148 * there is a different static lockdep key for each workqueue by using
1149 * INIT_WORK() for each of them separately.
1151 if (type
== OSL_NOTIFY_HANDLER
) {
1152 queue
= kacpi_notify_wq
;
1153 INIT_WORK(&dpc
->work
, acpi_os_execute_deferred
);
1156 INIT_WORK(&dpc
->work
, acpi_os_execute_deferred
);
1160 * On some machines, a software-initiated SMI causes corruption unless
1161 * the SMI runs on CPU 0. An SMI can be initiated by any AML, but
1162 * typically it's done in GPE-related methods that are run via
1163 * workqueues, so we can avoid the known corruption cases by always
1164 * queueing on CPU 0.
1166 ret
= queue_work_on(0, queue
, &dpc
->work
);
1169 printk(KERN_ERR PREFIX
1170 "Call to queue_work() failed.\n");
1176 EXPORT_SYMBOL(acpi_os_execute
);
1178 void acpi_os_wait_events_complete(void)
1181 * Make sure the GPE handler or the fixed event handler is not used
1182 * on another CPU after removal.
1184 if (acpi_sci_irq_valid())
1185 synchronize_hardirq(acpi_sci_irq
);
1186 flush_workqueue(kacpid_wq
);
1187 flush_workqueue(kacpi_notify_wq
);
1190 struct acpi_hp_work
{
1191 struct work_struct work
;
1192 struct acpi_device
*adev
;
1196 static void acpi_hotplug_work_fn(struct work_struct
*work
)
1198 struct acpi_hp_work
*hpw
= container_of(work
, struct acpi_hp_work
, work
);
1200 acpi_os_wait_events_complete();
1201 acpi_device_hotplug(hpw
->adev
, hpw
->src
);
1205 acpi_status
acpi_hotplug_schedule(struct acpi_device
*adev
, u32 src
)
1207 struct acpi_hp_work
*hpw
;
1209 ACPI_DEBUG_PRINT((ACPI_DB_EXEC
,
1210 "Scheduling hotplug event (%p, %u) for deferred execution.\n",
1213 hpw
= kmalloc(sizeof(*hpw
), GFP_KERNEL
);
1215 return AE_NO_MEMORY
;
1217 INIT_WORK(&hpw
->work
, acpi_hotplug_work_fn
);
1221 * We can't run hotplug code in kacpid_wq/kacpid_notify_wq etc., because
1222 * the hotplug code may call driver .remove() functions, which may
1223 * invoke flush_scheduled_work()/acpi_os_wait_events_complete() to flush
1226 if (!queue_work(kacpi_hotplug_wq
, &hpw
->work
)) {
1233 bool acpi_queue_hotplug_work(struct work_struct
*work
)
1235 return queue_work(kacpi_hotplug_wq
, work
);
1239 acpi_os_create_semaphore(u32 max_units
, u32 initial_units
, acpi_handle
* handle
)
1241 struct semaphore
*sem
= NULL
;
1243 sem
= acpi_os_allocate_zeroed(sizeof(struct semaphore
));
1245 return AE_NO_MEMORY
;
1247 sema_init(sem
, initial_units
);
1249 *handle
= (acpi_handle
*) sem
;
1251 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX
, "Creating semaphore[%p|%d].\n",
1252 *handle
, initial_units
));
1258 * TODO: A better way to delete semaphores? Linux doesn't have a
1259 * 'delete_semaphore()' function -- may result in an invalid
1260 * pointer dereference for non-synchronized consumers. Should
1261 * we at least check for blocked threads and signal/cancel them?
1264 acpi_status
acpi_os_delete_semaphore(acpi_handle handle
)
1266 struct semaphore
*sem
= (struct semaphore
*)handle
;
1269 return AE_BAD_PARAMETER
;
1271 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX
, "Deleting semaphore[%p].\n", handle
));
1273 BUG_ON(!list_empty(&sem
->wait_list
));
1281 * TODO: Support for units > 1?
1283 acpi_status
acpi_os_wait_semaphore(acpi_handle handle
, u32 units
, u16 timeout
)
1285 acpi_status status
= AE_OK
;
1286 struct semaphore
*sem
= (struct semaphore
*)handle
;
1290 if (!acpi_os_initialized
)
1293 if (!sem
|| (units
< 1))
1294 return AE_BAD_PARAMETER
;
1299 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX
, "Waiting for semaphore[%p|%d|%d]\n",
1300 handle
, units
, timeout
));
1302 if (timeout
== ACPI_WAIT_FOREVER
)
1303 jiffies
= MAX_SCHEDULE_TIMEOUT
;
1305 jiffies
= msecs_to_jiffies(timeout
);
1307 ret
= down_timeout(sem
, jiffies
);
1311 if (ACPI_FAILURE(status
)) {
1312 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX
,
1313 "Failed to acquire semaphore[%p|%d|%d], %s",
1314 handle
, units
, timeout
,
1315 acpi_format_exception(status
)));
1317 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX
,
1318 "Acquired semaphore[%p|%d|%d]", handle
,
1326 * TODO: Support for units > 1?
1328 acpi_status
acpi_os_signal_semaphore(acpi_handle handle
, u32 units
)
1330 struct semaphore
*sem
= (struct semaphore
*)handle
;
1332 if (!acpi_os_initialized
)
1335 if (!sem
|| (units
< 1))
1336 return AE_BAD_PARAMETER
;
1341 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX
, "Signaling semaphore[%p|%d]\n", handle
,
1349 acpi_status
acpi_os_get_line(char *buffer
, u32 buffer_length
, u32
*bytes_read
)
1351 #ifdef ENABLE_DEBUGGER
1352 if (acpi_in_debugger
) {
1355 kdb_read(buffer
, buffer_length
);
1357 /* remove the CR kdb includes */
1358 chars
= strlen(buffer
) - 1;
1359 buffer
[chars
] = '\0';
1366 acpi_status
acpi_os_signal(u32 function
, void *info
)
1369 case ACPI_SIGNAL_FATAL
:
1370 printk(KERN_ERR PREFIX
"Fatal opcode executed\n");
1372 case ACPI_SIGNAL_BREAKPOINT
:
1375 * ACPI spec. says to treat it as a NOP unless
1376 * you are debugging. So if/when we integrate
1377 * AML debugger into the kernel debugger its
1378 * hook will go here. But until then it is
1379 * not useful to print anything on breakpoints.
1389 static int __init
acpi_os_name_setup(char *str
)
1391 char *p
= acpi_os_name
;
1392 int count
= ACPI_MAX_OVERRIDE_LEN
- 1;
1397 for (; count
-- && *str
; str
++) {
1398 if (isalnum(*str
) || *str
== ' ' || *str
== ':')
1400 else if (*str
== '\'' || *str
== '"')
1411 __setup("acpi_os_name=", acpi_os_name_setup
);
1413 #define OSI_STRING_LENGTH_MAX 64 /* arbitrary */
1414 #define OSI_STRING_ENTRIES_MAX 16 /* arbitrary */
1416 struct osi_setup_entry
{
1417 char string
[OSI_STRING_LENGTH_MAX
];
1421 static struct osi_setup_entry
1422 osi_setup_entries
[OSI_STRING_ENTRIES_MAX
] __initdata
= {
1423 {"Module Device", true},
1424 {"Processor Device", true},
1425 {"3.0 _SCP Extensions", true},
1426 {"Processor Aggregator Device", true},
1429 void __init
acpi_osi_setup(char *str
)
1431 struct osi_setup_entry
*osi
;
1435 if (!acpi_gbl_create_osi_method
)
1438 if (str
== NULL
|| *str
== '\0') {
1439 printk(KERN_INFO PREFIX
"_OSI method disabled\n");
1440 acpi_gbl_create_osi_method
= FALSE
;
1447 /* Do not override acpi_osi=!* */
1448 if (!osi_linux
.default_disabling
)
1449 osi_linux
.default_disabling
=
1450 ACPI_DISABLE_ALL_VENDOR_STRINGS
;
1452 } else if (*str
== '*') {
1453 osi_linux
.default_disabling
= ACPI_DISABLE_ALL_STRINGS
;
1454 for (i
= 0; i
< OSI_STRING_ENTRIES_MAX
; i
++) {
1455 osi
= &osi_setup_entries
[i
];
1456 osi
->enable
= false;
1463 for (i
= 0; i
< OSI_STRING_ENTRIES_MAX
; i
++) {
1464 osi
= &osi_setup_entries
[i
];
1465 if (!strcmp(osi
->string
, str
)) {
1466 osi
->enable
= enable
;
1468 } else if (osi
->string
[0] == '\0') {
1469 osi
->enable
= enable
;
1470 strncpy(osi
->string
, str
, OSI_STRING_LENGTH_MAX
);
1476 static void __init
set_osi_linux(unsigned int enable
)
1478 if (osi_linux
.enable
!= enable
)
1479 osi_linux
.enable
= enable
;
1481 if (osi_linux
.enable
)
1482 acpi_osi_setup("Linux");
1484 acpi_osi_setup("!Linux");
1489 static void __init
acpi_cmdline_osi_linux(unsigned int enable
)
1491 osi_linux
.cmdline
= 1; /* cmdline set the default and override DMI */
1493 set_osi_linux(enable
);
1498 void __init
acpi_dmi_osi_linux(int enable
, const struct dmi_system_id
*d
)
1500 printk(KERN_NOTICE PREFIX
"DMI detected: %s\n", d
->ident
);
1505 osi_linux
.dmi
= 1; /* DMI knows that this box asks OSI(Linux) */
1506 set_osi_linux(enable
);
1512 * Modify the list of "OS Interfaces" reported to BIOS via _OSI
1514 * empty string disables _OSI
1515 * string starting with '!' disables that string
1516 * otherwise string is added to list, augmenting built-in strings
1518 static void __init
acpi_osi_setup_late(void)
1520 struct osi_setup_entry
*osi
;
1525 if (osi_linux
.default_disabling
) {
1526 status
= acpi_update_interfaces(osi_linux
.default_disabling
);
1528 if (ACPI_SUCCESS(status
))
1529 printk(KERN_INFO PREFIX
"Disabled all _OSI OS vendors%s\n",
1530 osi_linux
.default_disabling
==
1531 ACPI_DISABLE_ALL_STRINGS
?
1532 " and feature groups" : "");
1535 for (i
= 0; i
< OSI_STRING_ENTRIES_MAX
; i
++) {
1536 osi
= &osi_setup_entries
[i
];
1542 status
= acpi_install_interface(str
);
1544 if (ACPI_SUCCESS(status
))
1545 printk(KERN_INFO PREFIX
"Added _OSI(%s)\n", str
);
1547 status
= acpi_remove_interface(str
);
1549 if (ACPI_SUCCESS(status
))
1550 printk(KERN_INFO PREFIX
"Deleted _OSI(%s)\n", str
);
1555 static int __init
osi_setup(char *str
)
1557 if (str
&& !strcmp("Linux", str
))
1558 acpi_cmdline_osi_linux(1);
1559 else if (str
&& !strcmp("!Linux", str
))
1560 acpi_cmdline_osi_linux(0);
1562 acpi_osi_setup(str
);
1567 __setup("acpi_osi=", osi_setup
);
1570 * Disable the auto-serialization of named objects creation methods.
1572 * This feature is enabled by default. It marks the AML control methods
1573 * that contain the opcodes to create named objects as "Serialized".
1575 static int __init
acpi_no_auto_serialize_setup(char *str
)
1577 acpi_gbl_auto_serialize_methods
= FALSE
;
1578 pr_info("ACPI: auto-serialization disabled\n");
1583 __setup("acpi_no_auto_serialize", acpi_no_auto_serialize_setup
);
1585 /* Check of resource interference between native drivers and ACPI
1586 * OperationRegions (SystemIO and System Memory only).
1587 * IO ports and memory declared in ACPI might be used by the ACPI subsystem
1588 * in arbitrary AML code and can interfere with legacy drivers.
1589 * acpi_enforce_resources= can be set to:
1591 * - strict (default) (2)
1592 * -> further driver trying to access the resources will not load
1594 * -> further driver trying to access the resources will load, but you
1595 * get a system message that something might go wrong...
1598 * -> ACPI Operation Region resources will not be registered
1601 #define ENFORCE_RESOURCES_STRICT 2
1602 #define ENFORCE_RESOURCES_LAX 1
1603 #define ENFORCE_RESOURCES_NO 0
1605 static unsigned int acpi_enforce_resources
= ENFORCE_RESOURCES_STRICT
;
1607 static int __init
acpi_enforce_resources_setup(char *str
)
1609 if (str
== NULL
|| *str
== '\0')
1612 if (!strcmp("strict", str
))
1613 acpi_enforce_resources
= ENFORCE_RESOURCES_STRICT
;
1614 else if (!strcmp("lax", str
))
1615 acpi_enforce_resources
= ENFORCE_RESOURCES_LAX
;
1616 else if (!strcmp("no", str
))
1617 acpi_enforce_resources
= ENFORCE_RESOURCES_NO
;
1622 __setup("acpi_enforce_resources=", acpi_enforce_resources_setup
);
1624 /* Check for resource conflicts between ACPI OperationRegions and native
1626 int acpi_check_resource_conflict(const struct resource
*res
)
1628 acpi_adr_space_type space_id
;
1633 if (acpi_enforce_resources
== ENFORCE_RESOURCES_NO
)
1635 if (!(res
->flags
& IORESOURCE_IO
) && !(res
->flags
& IORESOURCE_MEM
))
1638 if (res
->flags
& IORESOURCE_IO
)
1639 space_id
= ACPI_ADR_SPACE_SYSTEM_IO
;
1641 space_id
= ACPI_ADR_SPACE_SYSTEM_MEMORY
;
1643 length
= resource_size(res
);
1644 if (acpi_enforce_resources
!= ENFORCE_RESOURCES_NO
)
1646 clash
= acpi_check_address_range(space_id
, res
->start
, length
, warn
);
1649 if (acpi_enforce_resources
!= ENFORCE_RESOURCES_NO
) {
1650 if (acpi_enforce_resources
== ENFORCE_RESOURCES_LAX
)
1651 printk(KERN_NOTICE
"ACPI: This conflict may"
1652 " cause random problems and system"
1654 printk(KERN_INFO
"ACPI: If an ACPI driver is available"
1655 " for this device, you should use it instead of"
1656 " the native driver\n");
1658 if (acpi_enforce_resources
== ENFORCE_RESOURCES_STRICT
)
1663 EXPORT_SYMBOL(acpi_check_resource_conflict
);
1665 int acpi_check_region(resource_size_t start
, resource_size_t n
,
1668 struct resource res
= {
1670 .end
= start
+ n
- 1,
1672 .flags
= IORESOURCE_IO
,
1675 return acpi_check_resource_conflict(&res
);
1677 EXPORT_SYMBOL(acpi_check_region
);
1680 * Let drivers know whether the resource checks are effective
1682 int acpi_resources_are_enforced(void)
1684 return acpi_enforce_resources
== ENFORCE_RESOURCES_STRICT
;
1686 EXPORT_SYMBOL(acpi_resources_are_enforced
);
1688 bool acpi_osi_is_win8(void)
1690 return acpi_gbl_osi_data
>= ACPI_OSI_WIN_8
;
1692 EXPORT_SYMBOL(acpi_osi_is_win8
);
1695 * Deallocate the memory for a spinlock.
1697 void acpi_os_delete_lock(acpi_spinlock handle
)
1703 * Acquire a spinlock.
1705 * handle is a pointer to the spinlock_t.
1708 acpi_cpu_flags
acpi_os_acquire_lock(acpi_spinlock lockp
)
1710 acpi_cpu_flags flags
;
1711 spin_lock_irqsave(lockp
, flags
);
1716 * Release a spinlock. See above.
1719 void acpi_os_release_lock(acpi_spinlock lockp
, acpi_cpu_flags flags
)
1721 spin_unlock_irqrestore(lockp
, flags
);
1724 #ifndef ACPI_USE_LOCAL_CACHE
1726 /*******************************************************************************
1728 * FUNCTION: acpi_os_create_cache
1730 * PARAMETERS: name - Ascii name for the cache
1731 * size - Size of each cached object
1732 * depth - Maximum depth of the cache (in objects) <ignored>
1733 * cache - Where the new cache object is returned
1737 * DESCRIPTION: Create a cache object
1739 ******************************************************************************/
1742 acpi_os_create_cache(char *name
, u16 size
, u16 depth
, acpi_cache_t
** cache
)
1744 *cache
= kmem_cache_create(name
, size
, 0, 0, NULL
);
1751 /*******************************************************************************
1753 * FUNCTION: acpi_os_purge_cache
1755 * PARAMETERS: Cache - Handle to cache object
1759 * DESCRIPTION: Free all objects within the requested cache.
1761 ******************************************************************************/
1763 acpi_status
acpi_os_purge_cache(acpi_cache_t
* cache
)
1765 kmem_cache_shrink(cache
);
1769 /*******************************************************************************
1771 * FUNCTION: acpi_os_delete_cache
1773 * PARAMETERS: Cache - Handle to cache object
1777 * DESCRIPTION: Free all objects within the requested cache and delete the
1780 ******************************************************************************/
1782 acpi_status
acpi_os_delete_cache(acpi_cache_t
* cache
)
1784 kmem_cache_destroy(cache
);
1788 /*******************************************************************************
1790 * FUNCTION: acpi_os_release_object
1792 * PARAMETERS: Cache - Handle to cache object
1793 * Object - The object to be released
1797 * DESCRIPTION: Release an object to the specified cache. If cache is full,
1798 * the object is deleted.
1800 ******************************************************************************/
1802 acpi_status
acpi_os_release_object(acpi_cache_t
* cache
, void *object
)
1804 kmem_cache_free(cache
, object
);
1809 static int __init
acpi_no_static_ssdt_setup(char *s
)
1811 acpi_gbl_disable_ssdt_table_install
= TRUE
;
1812 pr_info("ACPI: static SSDT installation disabled\n");
1817 early_param("acpi_no_static_ssdt", acpi_no_static_ssdt_setup
);
1819 static int __init
acpi_disable_return_repair(char *s
)
1821 printk(KERN_NOTICE PREFIX
1822 "ACPI: Predefined validation mechanism disabled\n");
1823 acpi_gbl_disable_auto_repair
= TRUE
;
1828 __setup("acpica_no_return_repair", acpi_disable_return_repair
);
1830 acpi_status __init
acpi_os_initialize(void)
1832 acpi_os_map_generic_address(&acpi_gbl_FADT
.xpm1a_event_block
);
1833 acpi_os_map_generic_address(&acpi_gbl_FADT
.xpm1b_event_block
);
1834 acpi_os_map_generic_address(&acpi_gbl_FADT
.xgpe0_block
);
1835 acpi_os_map_generic_address(&acpi_gbl_FADT
.xgpe1_block
);
1836 if (acpi_gbl_FADT
.flags
& ACPI_FADT_RESET_REGISTER
) {
1838 * Use acpi_os_map_generic_address to pre-map the reset
1839 * register if it's in system memory.
1843 rv
= acpi_os_map_generic_address(&acpi_gbl_FADT
.reset_register
);
1844 pr_debug(PREFIX
"%s: map reset_reg status %d\n", __func__
, rv
);
1846 acpi_os_initialized
= true;
1851 acpi_status __init
acpi_os_initialize1(void)
1853 kacpid_wq
= alloc_workqueue("kacpid", 0, 1);
1854 kacpi_notify_wq
= alloc_workqueue("kacpi_notify", 0, 1);
1855 kacpi_hotplug_wq
= alloc_ordered_workqueue("kacpi_hotplug", 0);
1857 BUG_ON(!kacpi_notify_wq
);
1858 BUG_ON(!kacpi_hotplug_wq
);
1859 acpi_install_interface_handler(acpi_osi_handler
);
1860 acpi_osi_setup_late();
1864 acpi_status
acpi_os_terminate(void)
1866 if (acpi_irq_handler
) {
1867 acpi_os_remove_interrupt_handler(acpi_gbl_FADT
.sci_interrupt
,
1871 acpi_os_unmap_generic_address(&acpi_gbl_FADT
.xgpe1_block
);
1872 acpi_os_unmap_generic_address(&acpi_gbl_FADT
.xgpe0_block
);
1873 acpi_os_unmap_generic_address(&acpi_gbl_FADT
.xpm1b_event_block
);
1874 acpi_os_unmap_generic_address(&acpi_gbl_FADT
.xpm1a_event_block
);
1875 if (acpi_gbl_FADT
.flags
& ACPI_FADT_RESET_REGISTER
)
1876 acpi_os_unmap_generic_address(&acpi_gbl_FADT
.reset_register
);
1878 destroy_workqueue(kacpid_wq
);
1879 destroy_workqueue(kacpi_notify_wq
);
1880 destroy_workqueue(kacpi_hotplug_wq
);
1885 acpi_status
acpi_os_prepare_sleep(u8 sleep_state
, u32 pm1a_control
,
1889 if (__acpi_os_prepare_sleep
)
1890 rc
= __acpi_os_prepare_sleep(sleep_state
,
1891 pm1a_control
, pm1b_control
);
1895 return AE_CTRL_SKIP
;
1900 void acpi_os_set_prepare_sleep(int (*func
)(u8 sleep_state
,
1901 u32 pm1a_ctrl
, u32 pm1b_ctrl
))
1903 __acpi_os_prepare_sleep
= func
;
1906 acpi_status
acpi_os_prepare_extended_sleep(u8 sleep_state
, u32 val_a
,
1910 if (__acpi_os_prepare_extended_sleep
)
1911 rc
= __acpi_os_prepare_extended_sleep(sleep_state
,
1916 return AE_CTRL_SKIP
;
1921 void acpi_os_set_prepare_extended_sleep(int (*func
)(u8 sleep_state
,
1922 u32 val_a
, u32 val_b
))
1924 __acpi_os_prepare_extended_sleep
= func
;