dm writecache: add cond_resched to loop in persistent_memory_claim()
[linux/fpc-iii.git] / drivers / acpi / scan.c
blob1b255e98de4d061b4d8aa000b691ac073c6e503e
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * scan.c - support for transforming the ACPI namespace into individual objects
4 */
6 #include <linux/module.h>
7 #include <linux/init.h>
8 #include <linux/slab.h>
9 #include <linux/kernel.h>
10 #include <linux/acpi.h>
11 #include <linux/acpi_iort.h>
12 #include <linux/signal.h>
13 #include <linux/kthread.h>
14 #include <linux/dmi.h>
15 #include <linux/nls.h>
16 #include <linux/dma-mapping.h>
17 #include <linux/platform_data/x86/apple.h>
19 #include <asm/pgtable.h>
21 #include "internal.h"
23 #define _COMPONENT ACPI_BUS_COMPONENT
24 ACPI_MODULE_NAME("scan");
25 extern struct acpi_device *acpi_root;
27 #define ACPI_BUS_CLASS "system_bus"
28 #define ACPI_BUS_HID "LNXSYBUS"
29 #define ACPI_BUS_DEVICE_NAME "System Bus"
31 #define ACPI_IS_ROOT_DEVICE(device) (!(device)->parent)
33 #define INVALID_ACPI_HANDLE ((acpi_handle)empty_zero_page)
35 static const char *dummy_hid = "device";
37 static LIST_HEAD(acpi_dep_list);
38 static DEFINE_MUTEX(acpi_dep_list_lock);
39 LIST_HEAD(acpi_bus_id_list);
40 static DEFINE_MUTEX(acpi_scan_lock);
41 static LIST_HEAD(acpi_scan_handlers_list);
42 DEFINE_MUTEX(acpi_device_lock);
43 LIST_HEAD(acpi_wakeup_device_list);
44 static DEFINE_MUTEX(acpi_hp_context_lock);
47 * The UART device described by the SPCR table is the only object which needs
48 * special-casing. Everything else is covered by ACPI namespace paths in STAO
49 * table.
51 static u64 spcr_uart_addr;
53 struct acpi_dep_data {
54 struct list_head node;
55 acpi_handle master;
56 acpi_handle slave;
59 void acpi_scan_lock_acquire(void)
61 mutex_lock(&acpi_scan_lock);
63 EXPORT_SYMBOL_GPL(acpi_scan_lock_acquire);
65 void acpi_scan_lock_release(void)
67 mutex_unlock(&acpi_scan_lock);
69 EXPORT_SYMBOL_GPL(acpi_scan_lock_release);
71 void acpi_lock_hp_context(void)
73 mutex_lock(&acpi_hp_context_lock);
76 void acpi_unlock_hp_context(void)
78 mutex_unlock(&acpi_hp_context_lock);
81 void acpi_initialize_hp_context(struct acpi_device *adev,
82 struct acpi_hotplug_context *hp,
83 int (*notify)(struct acpi_device *, u32),
84 void (*uevent)(struct acpi_device *, u32))
86 acpi_lock_hp_context();
87 hp->notify = notify;
88 hp->uevent = uevent;
89 acpi_set_hp_context(adev, hp);
90 acpi_unlock_hp_context();
92 EXPORT_SYMBOL_GPL(acpi_initialize_hp_context);
94 int acpi_scan_add_handler(struct acpi_scan_handler *handler)
96 if (!handler)
97 return -EINVAL;
99 list_add_tail(&handler->list_node, &acpi_scan_handlers_list);
100 return 0;
103 int acpi_scan_add_handler_with_hotplug(struct acpi_scan_handler *handler,
104 const char *hotplug_profile_name)
106 int error;
108 error = acpi_scan_add_handler(handler);
109 if (error)
110 return error;
112 acpi_sysfs_add_hotplug_profile(&handler->hotplug, hotplug_profile_name);
113 return 0;
116 bool acpi_scan_is_offline(struct acpi_device *adev, bool uevent)
118 struct acpi_device_physical_node *pn;
119 bool offline = true;
120 char *envp[] = { "EVENT=offline", NULL };
123 * acpi_container_offline() calls this for all of the container's
124 * children under the container's physical_node_lock lock.
126 mutex_lock_nested(&adev->physical_node_lock, SINGLE_DEPTH_NESTING);
128 list_for_each_entry(pn, &adev->physical_node_list, node)
129 if (device_supports_offline(pn->dev) && !pn->dev->offline) {
130 if (uevent)
131 kobject_uevent_env(&pn->dev->kobj, KOBJ_CHANGE, envp);
133 offline = false;
134 break;
137 mutex_unlock(&adev->physical_node_lock);
138 return offline;
141 static acpi_status acpi_bus_offline(acpi_handle handle, u32 lvl, void *data,
142 void **ret_p)
144 struct acpi_device *device = NULL;
145 struct acpi_device_physical_node *pn;
146 bool second_pass = (bool)data;
147 acpi_status status = AE_OK;
149 if (acpi_bus_get_device(handle, &device))
150 return AE_OK;
152 if (device->handler && !device->handler->hotplug.enabled) {
153 *ret_p = &device->dev;
154 return AE_SUPPORT;
157 mutex_lock(&device->physical_node_lock);
159 list_for_each_entry(pn, &device->physical_node_list, node) {
160 int ret;
162 if (second_pass) {
163 /* Skip devices offlined by the first pass. */
164 if (pn->put_online)
165 continue;
166 } else {
167 pn->put_online = false;
169 ret = device_offline(pn->dev);
170 if (ret >= 0) {
171 pn->put_online = !ret;
172 } else {
173 *ret_p = pn->dev;
174 if (second_pass) {
175 status = AE_ERROR;
176 break;
181 mutex_unlock(&device->physical_node_lock);
183 return status;
186 static acpi_status acpi_bus_online(acpi_handle handle, u32 lvl, void *data,
187 void **ret_p)
189 struct acpi_device *device = NULL;
190 struct acpi_device_physical_node *pn;
192 if (acpi_bus_get_device(handle, &device))
193 return AE_OK;
195 mutex_lock(&device->physical_node_lock);
197 list_for_each_entry(pn, &device->physical_node_list, node)
198 if (pn->put_online) {
199 device_online(pn->dev);
200 pn->put_online = false;
203 mutex_unlock(&device->physical_node_lock);
205 return AE_OK;
208 static int acpi_scan_try_to_offline(struct acpi_device *device)
210 acpi_handle handle = device->handle;
211 struct device *errdev = NULL;
212 acpi_status status;
215 * Carry out two passes here and ignore errors in the first pass,
216 * because if the devices in question are memory blocks and
217 * CONFIG_MEMCG is set, one of the blocks may hold data structures
218 * that the other blocks depend on, but it is not known in advance which
219 * block holds them.
221 * If the first pass is successful, the second one isn't needed, though.
223 status = acpi_walk_namespace(ACPI_TYPE_ANY, handle, ACPI_UINT32_MAX,
224 NULL, acpi_bus_offline, (void *)false,
225 (void **)&errdev);
226 if (status == AE_SUPPORT) {
227 dev_warn(errdev, "Offline disabled.\n");
228 acpi_walk_namespace(ACPI_TYPE_ANY, handle, ACPI_UINT32_MAX,
229 acpi_bus_online, NULL, NULL, NULL);
230 return -EPERM;
232 acpi_bus_offline(handle, 0, (void *)false, (void **)&errdev);
233 if (errdev) {
234 errdev = NULL;
235 acpi_walk_namespace(ACPI_TYPE_ANY, handle, ACPI_UINT32_MAX,
236 NULL, acpi_bus_offline, (void *)true,
237 (void **)&errdev);
238 if (!errdev)
239 acpi_bus_offline(handle, 0, (void *)true,
240 (void **)&errdev);
242 if (errdev) {
243 dev_warn(errdev, "Offline failed.\n");
244 acpi_bus_online(handle, 0, NULL, NULL);
245 acpi_walk_namespace(ACPI_TYPE_ANY, handle,
246 ACPI_UINT32_MAX, acpi_bus_online,
247 NULL, NULL, NULL);
248 return -EBUSY;
251 return 0;
254 static int acpi_scan_hot_remove(struct acpi_device *device)
256 acpi_handle handle = device->handle;
257 unsigned long long sta;
258 acpi_status status;
260 if (device->handler && device->handler->hotplug.demand_offline) {
261 if (!acpi_scan_is_offline(device, true))
262 return -EBUSY;
263 } else {
264 int error = acpi_scan_try_to_offline(device);
265 if (error)
266 return error;
269 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
270 "Hot-removing device %s...\n", dev_name(&device->dev)));
272 acpi_bus_trim(device);
274 acpi_evaluate_lck(handle, 0);
276 * TBD: _EJD support.
278 status = acpi_evaluate_ej0(handle);
279 if (status == AE_NOT_FOUND)
280 return -ENODEV;
281 else if (ACPI_FAILURE(status))
282 return -EIO;
285 * Verify if eject was indeed successful. If not, log an error
286 * message. No need to call _OST since _EJ0 call was made OK.
288 status = acpi_evaluate_integer(handle, "_STA", NULL, &sta);
289 if (ACPI_FAILURE(status)) {
290 acpi_handle_warn(handle,
291 "Status check after eject failed (0x%x)\n", status);
292 } else if (sta & ACPI_STA_DEVICE_ENABLED) {
293 acpi_handle_warn(handle,
294 "Eject incomplete - status 0x%llx\n", sta);
297 return 0;
300 static int acpi_scan_device_not_present(struct acpi_device *adev)
302 if (!acpi_device_enumerated(adev)) {
303 dev_warn(&adev->dev, "Still not present\n");
304 return -EALREADY;
306 acpi_bus_trim(adev);
307 return 0;
310 static int acpi_scan_device_check(struct acpi_device *adev)
312 int error;
314 acpi_bus_get_status(adev);
315 if (adev->status.present || adev->status.functional) {
317 * This function is only called for device objects for which
318 * matching scan handlers exist. The only situation in which
319 * the scan handler is not attached to this device object yet
320 * is when the device has just appeared (either it wasn't
321 * present at all before or it was removed and then added
322 * again).
324 if (adev->handler) {
325 dev_warn(&adev->dev, "Already enumerated\n");
326 return -EALREADY;
328 error = acpi_bus_scan(adev->handle);
329 if (error) {
330 dev_warn(&adev->dev, "Namespace scan failure\n");
331 return error;
333 if (!adev->handler) {
334 dev_warn(&adev->dev, "Enumeration failure\n");
335 error = -ENODEV;
337 } else {
338 error = acpi_scan_device_not_present(adev);
340 return error;
343 static int acpi_scan_bus_check(struct acpi_device *adev)
345 struct acpi_scan_handler *handler = adev->handler;
346 struct acpi_device *child;
347 int error;
349 acpi_bus_get_status(adev);
350 if (!(adev->status.present || adev->status.functional)) {
351 acpi_scan_device_not_present(adev);
352 return 0;
354 if (handler && handler->hotplug.scan_dependent)
355 return handler->hotplug.scan_dependent(adev);
357 error = acpi_bus_scan(adev->handle);
358 if (error) {
359 dev_warn(&adev->dev, "Namespace scan failure\n");
360 return error;
362 list_for_each_entry(child, &adev->children, node) {
363 error = acpi_scan_bus_check(child);
364 if (error)
365 return error;
367 return 0;
370 static int acpi_generic_hotplug_event(struct acpi_device *adev, u32 type)
372 switch (type) {
373 case ACPI_NOTIFY_BUS_CHECK:
374 return acpi_scan_bus_check(adev);
375 case ACPI_NOTIFY_DEVICE_CHECK:
376 return acpi_scan_device_check(adev);
377 case ACPI_NOTIFY_EJECT_REQUEST:
378 case ACPI_OST_EC_OSPM_EJECT:
379 if (adev->handler && !adev->handler->hotplug.enabled) {
380 dev_info(&adev->dev, "Eject disabled\n");
381 return -EPERM;
383 acpi_evaluate_ost(adev->handle, ACPI_NOTIFY_EJECT_REQUEST,
384 ACPI_OST_SC_EJECT_IN_PROGRESS, NULL);
385 return acpi_scan_hot_remove(adev);
387 return -EINVAL;
390 void acpi_device_hotplug(struct acpi_device *adev, u32 src)
392 u32 ost_code = ACPI_OST_SC_NON_SPECIFIC_FAILURE;
393 int error = -ENODEV;
395 lock_device_hotplug();
396 mutex_lock(&acpi_scan_lock);
399 * The device object's ACPI handle cannot become invalid as long as we
400 * are holding acpi_scan_lock, but it might have become invalid before
401 * that lock was acquired.
403 if (adev->handle == INVALID_ACPI_HANDLE)
404 goto err_out;
406 if (adev->flags.is_dock_station) {
407 error = dock_notify(adev, src);
408 } else if (adev->flags.hotplug_notify) {
409 error = acpi_generic_hotplug_event(adev, src);
410 } else {
411 int (*notify)(struct acpi_device *, u32);
413 acpi_lock_hp_context();
414 notify = adev->hp ? adev->hp->notify : NULL;
415 acpi_unlock_hp_context();
417 * There may be additional notify handlers for device objects
418 * without the .event() callback, so ignore them here.
420 if (notify)
421 error = notify(adev, src);
422 else
423 goto out;
425 switch (error) {
426 case 0:
427 ost_code = ACPI_OST_SC_SUCCESS;
428 break;
429 case -EPERM:
430 ost_code = ACPI_OST_SC_EJECT_NOT_SUPPORTED;
431 break;
432 case -EBUSY:
433 ost_code = ACPI_OST_SC_DEVICE_BUSY;
434 break;
435 default:
436 ost_code = ACPI_OST_SC_NON_SPECIFIC_FAILURE;
437 break;
440 err_out:
441 acpi_evaluate_ost(adev->handle, src, ost_code, NULL);
443 out:
444 acpi_bus_put_acpi_device(adev);
445 mutex_unlock(&acpi_scan_lock);
446 unlock_device_hotplug();
449 static void acpi_free_power_resources_lists(struct acpi_device *device)
451 int i;
453 if (device->wakeup.flags.valid)
454 acpi_power_resources_list_free(&device->wakeup.resources);
456 if (!device->power.flags.power_resources)
457 return;
459 for (i = ACPI_STATE_D0; i <= ACPI_STATE_D3_HOT; i++) {
460 struct acpi_device_power_state *ps = &device->power.states[i];
461 acpi_power_resources_list_free(&ps->resources);
465 static void acpi_device_release(struct device *dev)
467 struct acpi_device *acpi_dev = to_acpi_device(dev);
469 acpi_free_properties(acpi_dev);
470 acpi_free_pnp_ids(&acpi_dev->pnp);
471 acpi_free_power_resources_lists(acpi_dev);
472 kfree(acpi_dev);
475 static void acpi_device_del(struct acpi_device *device)
477 struct acpi_device_bus_id *acpi_device_bus_id;
479 mutex_lock(&acpi_device_lock);
480 if (device->parent)
481 list_del(&device->node);
483 list_for_each_entry(acpi_device_bus_id, &acpi_bus_id_list, node)
484 if (!strcmp(acpi_device_bus_id->bus_id,
485 acpi_device_hid(device))) {
486 if (acpi_device_bus_id->instance_no > 0)
487 acpi_device_bus_id->instance_no--;
488 else {
489 list_del(&acpi_device_bus_id->node);
490 kfree(acpi_device_bus_id);
492 break;
495 list_del(&device->wakeup_list);
496 mutex_unlock(&acpi_device_lock);
498 acpi_power_add_remove_device(device, false);
499 acpi_device_remove_files(device);
500 if (device->remove)
501 device->remove(device);
503 device_del(&device->dev);
506 static BLOCKING_NOTIFIER_HEAD(acpi_reconfig_chain);
508 static LIST_HEAD(acpi_device_del_list);
509 static DEFINE_MUTEX(acpi_device_del_lock);
511 static void acpi_device_del_work_fn(struct work_struct *work_not_used)
513 for (;;) {
514 struct acpi_device *adev;
516 mutex_lock(&acpi_device_del_lock);
518 if (list_empty(&acpi_device_del_list)) {
519 mutex_unlock(&acpi_device_del_lock);
520 break;
522 adev = list_first_entry(&acpi_device_del_list,
523 struct acpi_device, del_list);
524 list_del(&adev->del_list);
526 mutex_unlock(&acpi_device_del_lock);
528 blocking_notifier_call_chain(&acpi_reconfig_chain,
529 ACPI_RECONFIG_DEVICE_REMOVE, adev);
531 acpi_device_del(adev);
533 * Drop references to all power resources that might have been
534 * used by the device.
536 acpi_power_transition(adev, ACPI_STATE_D3_COLD);
537 put_device(&adev->dev);
542 * acpi_scan_drop_device - Drop an ACPI device object.
543 * @handle: Handle of an ACPI namespace node, not used.
544 * @context: Address of the ACPI device object to drop.
546 * This is invoked by acpi_ns_delete_node() during the removal of the ACPI
547 * namespace node the device object pointed to by @context is attached to.
549 * The unregistration is carried out asynchronously to avoid running
550 * acpi_device_del() under the ACPICA's namespace mutex and the list is used to
551 * ensure the correct ordering (the device objects must be unregistered in the
552 * same order in which the corresponding namespace nodes are deleted).
554 static void acpi_scan_drop_device(acpi_handle handle, void *context)
556 static DECLARE_WORK(work, acpi_device_del_work_fn);
557 struct acpi_device *adev = context;
559 mutex_lock(&acpi_device_del_lock);
562 * Use the ACPI hotplug workqueue which is ordered, so this work item
563 * won't run after any hotplug work items submitted subsequently. That
564 * prevents attempts to register device objects identical to those being
565 * deleted from happening concurrently (such attempts result from
566 * hotplug events handled via the ACPI hotplug workqueue). It also will
567 * run after all of the work items submitted previosuly, which helps
568 * those work items to ensure that they are not accessing stale device
569 * objects.
571 if (list_empty(&acpi_device_del_list))
572 acpi_queue_hotplug_work(&work);
574 list_add_tail(&adev->del_list, &acpi_device_del_list);
575 /* Make acpi_ns_validate_handle() return NULL for this handle. */
576 adev->handle = INVALID_ACPI_HANDLE;
578 mutex_unlock(&acpi_device_del_lock);
581 static int acpi_get_device_data(acpi_handle handle, struct acpi_device **device,
582 void (*callback)(void *))
584 acpi_status status;
586 if (!device)
587 return -EINVAL;
589 status = acpi_get_data_full(handle, acpi_scan_drop_device,
590 (void **)device, callback);
591 if (ACPI_FAILURE(status) || !*device) {
592 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No context for object [%p]\n",
593 handle));
594 return -ENODEV;
596 return 0;
599 int acpi_bus_get_device(acpi_handle handle, struct acpi_device **device)
601 return acpi_get_device_data(handle, device, NULL);
603 EXPORT_SYMBOL(acpi_bus_get_device);
605 static void get_acpi_device(void *dev)
607 if (dev)
608 get_device(&((struct acpi_device *)dev)->dev);
611 struct acpi_device *acpi_bus_get_acpi_device(acpi_handle handle)
613 struct acpi_device *adev = NULL;
615 acpi_get_device_data(handle, &adev, get_acpi_device);
616 return adev;
619 void acpi_bus_put_acpi_device(struct acpi_device *adev)
621 put_device(&adev->dev);
624 int acpi_device_add(struct acpi_device *device,
625 void (*release)(struct device *))
627 int result;
628 struct acpi_device_bus_id *acpi_device_bus_id, *new_bus_id;
629 int found = 0;
631 if (device->handle) {
632 acpi_status status;
634 status = acpi_attach_data(device->handle, acpi_scan_drop_device,
635 device);
636 if (ACPI_FAILURE(status)) {
637 acpi_handle_err(device->handle,
638 "Unable to attach device data\n");
639 return -ENODEV;
644 * Linkage
645 * -------
646 * Link this device to its parent and siblings.
648 INIT_LIST_HEAD(&device->children);
649 INIT_LIST_HEAD(&device->node);
650 INIT_LIST_HEAD(&device->wakeup_list);
651 INIT_LIST_HEAD(&device->physical_node_list);
652 INIT_LIST_HEAD(&device->del_list);
653 mutex_init(&device->physical_node_lock);
655 new_bus_id = kzalloc(sizeof(struct acpi_device_bus_id), GFP_KERNEL);
656 if (!new_bus_id) {
657 pr_err(PREFIX "Memory allocation error\n");
658 result = -ENOMEM;
659 goto err_detach;
662 mutex_lock(&acpi_device_lock);
664 * Find suitable bus_id and instance number in acpi_bus_id_list
665 * If failed, create one and link it into acpi_bus_id_list
667 list_for_each_entry(acpi_device_bus_id, &acpi_bus_id_list, node) {
668 if (!strcmp(acpi_device_bus_id->bus_id,
669 acpi_device_hid(device))) {
670 acpi_device_bus_id->instance_no++;
671 found = 1;
672 kfree(new_bus_id);
673 break;
676 if (!found) {
677 acpi_device_bus_id = new_bus_id;
678 strcpy(acpi_device_bus_id->bus_id, acpi_device_hid(device));
679 acpi_device_bus_id->instance_no = 0;
680 list_add_tail(&acpi_device_bus_id->node, &acpi_bus_id_list);
682 dev_set_name(&device->dev, "%s:%02x", acpi_device_bus_id->bus_id, acpi_device_bus_id->instance_no);
684 if (device->parent)
685 list_add_tail(&device->node, &device->parent->children);
687 if (device->wakeup.flags.valid)
688 list_add_tail(&device->wakeup_list, &acpi_wakeup_device_list);
689 mutex_unlock(&acpi_device_lock);
691 if (device->parent)
692 device->dev.parent = &device->parent->dev;
693 device->dev.bus = &acpi_bus_type;
694 device->dev.release = release;
695 result = device_add(&device->dev);
696 if (result) {
697 dev_err(&device->dev, "Error registering device\n");
698 goto err;
701 result = acpi_device_setup_files(device);
702 if (result)
703 printk(KERN_ERR PREFIX "Error creating sysfs interface for device %s\n",
704 dev_name(&device->dev));
706 return 0;
708 err:
709 mutex_lock(&acpi_device_lock);
710 if (device->parent)
711 list_del(&device->node);
712 list_del(&device->wakeup_list);
713 mutex_unlock(&acpi_device_lock);
715 err_detach:
716 acpi_detach_data(device->handle, acpi_scan_drop_device);
717 return result;
720 /* --------------------------------------------------------------------------
721 Device Enumeration
722 -------------------------------------------------------------------------- */
723 static struct acpi_device *acpi_bus_get_parent(acpi_handle handle)
725 struct acpi_device *device = NULL;
726 acpi_status status;
729 * Fixed hardware devices do not appear in the namespace and do not
730 * have handles, but we fabricate acpi_devices for them, so we have
731 * to deal with them specially.
733 if (!handle)
734 return acpi_root;
736 do {
737 status = acpi_get_parent(handle, &handle);
738 if (ACPI_FAILURE(status))
739 return status == AE_NULL_ENTRY ? NULL : acpi_root;
740 } while (acpi_bus_get_device(handle, &device));
741 return device;
744 acpi_status
745 acpi_bus_get_ejd(acpi_handle handle, acpi_handle *ejd)
747 acpi_status status;
748 acpi_handle tmp;
749 struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
750 union acpi_object *obj;
752 status = acpi_get_handle(handle, "_EJD", &tmp);
753 if (ACPI_FAILURE(status))
754 return status;
756 status = acpi_evaluate_object(handle, "_EJD", NULL, &buffer);
757 if (ACPI_SUCCESS(status)) {
758 obj = buffer.pointer;
759 status = acpi_get_handle(ACPI_ROOT_OBJECT, obj->string.pointer,
760 ejd);
761 kfree(buffer.pointer);
763 return status;
765 EXPORT_SYMBOL_GPL(acpi_bus_get_ejd);
767 static int acpi_bus_extract_wakeup_device_power_package(struct acpi_device *dev)
769 acpi_handle handle = dev->handle;
770 struct acpi_device_wakeup *wakeup = &dev->wakeup;
771 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
772 union acpi_object *package = NULL;
773 union acpi_object *element = NULL;
774 acpi_status status;
775 int err = -ENODATA;
777 INIT_LIST_HEAD(&wakeup->resources);
779 /* _PRW */
780 status = acpi_evaluate_object(handle, "_PRW", NULL, &buffer);
781 if (ACPI_FAILURE(status)) {
782 ACPI_EXCEPTION((AE_INFO, status, "Evaluating _PRW"));
783 return err;
786 package = (union acpi_object *)buffer.pointer;
788 if (!package || package->package.count < 2)
789 goto out;
791 element = &(package->package.elements[0]);
792 if (!element)
793 goto out;
795 if (element->type == ACPI_TYPE_PACKAGE) {
796 if ((element->package.count < 2) ||
797 (element->package.elements[0].type !=
798 ACPI_TYPE_LOCAL_REFERENCE)
799 || (element->package.elements[1].type != ACPI_TYPE_INTEGER))
800 goto out;
802 wakeup->gpe_device =
803 element->package.elements[0].reference.handle;
804 wakeup->gpe_number =
805 (u32) element->package.elements[1].integer.value;
806 } else if (element->type == ACPI_TYPE_INTEGER) {
807 wakeup->gpe_device = NULL;
808 wakeup->gpe_number = element->integer.value;
809 } else {
810 goto out;
813 element = &(package->package.elements[1]);
814 if (element->type != ACPI_TYPE_INTEGER)
815 goto out;
817 wakeup->sleep_state = element->integer.value;
819 err = acpi_extract_power_resources(package, 2, &wakeup->resources);
820 if (err)
821 goto out;
823 if (!list_empty(&wakeup->resources)) {
824 int sleep_state;
826 err = acpi_power_wakeup_list_init(&wakeup->resources,
827 &sleep_state);
828 if (err) {
829 acpi_handle_warn(handle, "Retrieving current states "
830 "of wakeup power resources failed\n");
831 acpi_power_resources_list_free(&wakeup->resources);
832 goto out;
834 if (sleep_state < wakeup->sleep_state) {
835 acpi_handle_warn(handle, "Overriding _PRW sleep state "
836 "(S%d) by S%d from power resources\n",
837 (int)wakeup->sleep_state, sleep_state);
838 wakeup->sleep_state = sleep_state;
842 out:
843 kfree(buffer.pointer);
844 return err;
847 static bool acpi_wakeup_gpe_init(struct acpi_device *device)
849 static const struct acpi_device_id button_device_ids[] = {
850 {"PNP0C0C", 0}, /* Power button */
851 {"PNP0C0D", 0}, /* Lid */
852 {"PNP0C0E", 0}, /* Sleep button */
853 {"", 0},
855 struct acpi_device_wakeup *wakeup = &device->wakeup;
856 acpi_status status;
858 wakeup->flags.notifier_present = 0;
860 /* Power button, Lid switch always enable wakeup */
861 if (!acpi_match_device_ids(device, button_device_ids)) {
862 if (!acpi_match_device_ids(device, &button_device_ids[1])) {
863 /* Do not use Lid/sleep button for S5 wakeup */
864 if (wakeup->sleep_state == ACPI_STATE_S5)
865 wakeup->sleep_state = ACPI_STATE_S4;
867 acpi_mark_gpe_for_wake(wakeup->gpe_device, wakeup->gpe_number);
868 device_set_wakeup_capable(&device->dev, true);
869 return true;
872 status = acpi_setup_gpe_for_wake(device->handle, wakeup->gpe_device,
873 wakeup->gpe_number);
874 return ACPI_SUCCESS(status);
877 static void acpi_bus_get_wakeup_device_flags(struct acpi_device *device)
879 int err;
881 /* Presence of _PRW indicates wake capable */
882 if (!acpi_has_method(device->handle, "_PRW"))
883 return;
885 err = acpi_bus_extract_wakeup_device_power_package(device);
886 if (err) {
887 dev_err(&device->dev, "_PRW evaluation error: %d\n", err);
888 return;
891 device->wakeup.flags.valid = acpi_wakeup_gpe_init(device);
892 device->wakeup.prepare_count = 0;
894 * Call _PSW/_DSW object to disable its ability to wake the sleeping
895 * system for the ACPI device with the _PRW object.
896 * The _PSW object is deprecated in ACPI 3.0 and is replaced by _DSW.
897 * So it is necessary to call _DSW object first. Only when it is not
898 * present will the _PSW object used.
900 err = acpi_device_sleep_wake(device, 0, 0, 0);
901 if (err)
902 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
903 "error in _DSW or _PSW evaluation\n"));
906 static void acpi_bus_init_power_state(struct acpi_device *device, int state)
908 struct acpi_device_power_state *ps = &device->power.states[state];
909 char pathname[5] = { '_', 'P', 'R', '0' + state, '\0' };
910 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
911 acpi_status status;
913 INIT_LIST_HEAD(&ps->resources);
915 /* Evaluate "_PRx" to get referenced power resources */
916 status = acpi_evaluate_object(device->handle, pathname, NULL, &buffer);
917 if (ACPI_SUCCESS(status)) {
918 union acpi_object *package = buffer.pointer;
920 if (buffer.length && package
921 && package->type == ACPI_TYPE_PACKAGE
922 && package->package.count)
923 acpi_extract_power_resources(package, 0, &ps->resources);
925 ACPI_FREE(buffer.pointer);
928 /* Evaluate "_PSx" to see if we can do explicit sets */
929 pathname[2] = 'S';
930 if (acpi_has_method(device->handle, pathname))
931 ps->flags.explicit_set = 1;
933 /* State is valid if there are means to put the device into it. */
934 if (!list_empty(&ps->resources) || ps->flags.explicit_set)
935 ps->flags.valid = 1;
937 ps->power = -1; /* Unknown - driver assigned */
938 ps->latency = -1; /* Unknown - driver assigned */
941 static void acpi_bus_get_power_flags(struct acpi_device *device)
943 u32 i;
945 /* Presence of _PS0|_PR0 indicates 'power manageable' */
946 if (!acpi_has_method(device->handle, "_PS0") &&
947 !acpi_has_method(device->handle, "_PR0"))
948 return;
950 device->flags.power_manageable = 1;
953 * Power Management Flags
955 if (acpi_has_method(device->handle, "_PSC"))
956 device->power.flags.explicit_get = 1;
958 if (acpi_has_method(device->handle, "_IRC"))
959 device->power.flags.inrush_current = 1;
961 if (acpi_has_method(device->handle, "_DSW"))
962 device->power.flags.dsw_present = 1;
965 * Enumerate supported power management states
967 for (i = ACPI_STATE_D0; i <= ACPI_STATE_D3_HOT; i++)
968 acpi_bus_init_power_state(device, i);
970 INIT_LIST_HEAD(&device->power.states[ACPI_STATE_D3_COLD].resources);
972 /* Set the defaults for D0 and D3hot (always supported). */
973 device->power.states[ACPI_STATE_D0].flags.valid = 1;
974 device->power.states[ACPI_STATE_D0].power = 100;
975 device->power.states[ACPI_STATE_D3_HOT].flags.valid = 1;
978 * Use power resources only if the D0 list of them is populated, because
979 * some platforms may provide _PR3 only to indicate D3cold support and
980 * in those cases the power resources list returned by it may be bogus.
982 if (!list_empty(&device->power.states[ACPI_STATE_D0].resources)) {
983 device->power.flags.power_resources = 1;
985 * D3cold is supported if the D3hot list of power resources is
986 * not empty.
988 if (!list_empty(&device->power.states[ACPI_STATE_D3_HOT].resources))
989 device->power.states[ACPI_STATE_D3_COLD].flags.valid = 1;
992 if (acpi_bus_init_power(device))
993 device->flags.power_manageable = 0;
996 static void acpi_bus_get_flags(struct acpi_device *device)
998 /* Presence of _STA indicates 'dynamic_status' */
999 if (acpi_has_method(device->handle, "_STA"))
1000 device->flags.dynamic_status = 1;
1002 /* Presence of _RMV indicates 'removable' */
1003 if (acpi_has_method(device->handle, "_RMV"))
1004 device->flags.removable = 1;
1006 /* Presence of _EJD|_EJ0 indicates 'ejectable' */
1007 if (acpi_has_method(device->handle, "_EJD") ||
1008 acpi_has_method(device->handle, "_EJ0"))
1009 device->flags.ejectable = 1;
1012 static void acpi_device_get_busid(struct acpi_device *device)
1014 char bus_id[5] = { '?', 0 };
1015 struct acpi_buffer buffer = { sizeof(bus_id), bus_id };
1016 int i = 0;
1019 * Bus ID
1020 * ------
1021 * The device's Bus ID is simply the object name.
1022 * TBD: Shouldn't this value be unique (within the ACPI namespace)?
1024 if (ACPI_IS_ROOT_DEVICE(device)) {
1025 strcpy(device->pnp.bus_id, "ACPI");
1026 return;
1029 switch (device->device_type) {
1030 case ACPI_BUS_TYPE_POWER_BUTTON:
1031 strcpy(device->pnp.bus_id, "PWRF");
1032 break;
1033 case ACPI_BUS_TYPE_SLEEP_BUTTON:
1034 strcpy(device->pnp.bus_id, "SLPF");
1035 break;
1036 case ACPI_BUS_TYPE_ECDT_EC:
1037 strcpy(device->pnp.bus_id, "ECDT");
1038 break;
1039 default:
1040 acpi_get_name(device->handle, ACPI_SINGLE_NAME, &buffer);
1041 /* Clean up trailing underscores (if any) */
1042 for (i = 3; i > 1; i--) {
1043 if (bus_id[i] == '_')
1044 bus_id[i] = '\0';
1045 else
1046 break;
1048 strcpy(device->pnp.bus_id, bus_id);
1049 break;
1054 * acpi_ata_match - see if an acpi object is an ATA device
1056 * If an acpi object has one of the ACPI ATA methods defined,
1057 * then we can safely call it an ATA device.
1059 bool acpi_ata_match(acpi_handle handle)
1061 return acpi_has_method(handle, "_GTF") ||
1062 acpi_has_method(handle, "_GTM") ||
1063 acpi_has_method(handle, "_STM") ||
1064 acpi_has_method(handle, "_SDD");
1068 * acpi_bay_match - see if an acpi object is an ejectable driver bay
1070 * If an acpi object is ejectable and has one of the ACPI ATA methods defined,
1071 * then we can safely call it an ejectable drive bay
1073 bool acpi_bay_match(acpi_handle handle)
1075 acpi_handle phandle;
1077 if (!acpi_has_method(handle, "_EJ0"))
1078 return false;
1079 if (acpi_ata_match(handle))
1080 return true;
1081 if (ACPI_FAILURE(acpi_get_parent(handle, &phandle)))
1082 return false;
1084 return acpi_ata_match(phandle);
1087 bool acpi_device_is_battery(struct acpi_device *adev)
1089 struct acpi_hardware_id *hwid;
1091 list_for_each_entry(hwid, &adev->pnp.ids, list)
1092 if (!strcmp("PNP0C0A", hwid->id))
1093 return true;
1095 return false;
1098 static bool is_ejectable_bay(struct acpi_device *adev)
1100 acpi_handle handle = adev->handle;
1102 if (acpi_has_method(handle, "_EJ0") && acpi_device_is_battery(adev))
1103 return true;
1105 return acpi_bay_match(handle);
1109 * acpi_dock_match - see if an acpi object has a _DCK method
1111 bool acpi_dock_match(acpi_handle handle)
1113 return acpi_has_method(handle, "_DCK");
1116 static acpi_status
1117 acpi_backlight_cap_match(acpi_handle handle, u32 level, void *context,
1118 void **return_value)
1120 long *cap = context;
1122 if (acpi_has_method(handle, "_BCM") &&
1123 acpi_has_method(handle, "_BCL")) {
1124 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found generic backlight "
1125 "support\n"));
1126 *cap |= ACPI_VIDEO_BACKLIGHT;
1127 /* We have backlight support, no need to scan further */
1128 return AE_CTRL_TERMINATE;
1130 return 0;
1133 /* Returns true if the ACPI object is a video device which can be
1134 * handled by video.ko.
1135 * The device will get a Linux specific CID added in scan.c to
1136 * identify the device as an ACPI graphics device
1137 * Be aware that the graphics device may not be physically present
1138 * Use acpi_video_get_capabilities() to detect general ACPI video
1139 * capabilities of present cards
1141 long acpi_is_video_device(acpi_handle handle)
1143 long video_caps = 0;
1145 /* Is this device able to support video switching ? */
1146 if (acpi_has_method(handle, "_DOD") || acpi_has_method(handle, "_DOS"))
1147 video_caps |= ACPI_VIDEO_OUTPUT_SWITCHING;
1149 /* Is this device able to retrieve a video ROM ? */
1150 if (acpi_has_method(handle, "_ROM"))
1151 video_caps |= ACPI_VIDEO_ROM_AVAILABLE;
1153 /* Is this device able to configure which video head to be POSTed ? */
1154 if (acpi_has_method(handle, "_VPO") &&
1155 acpi_has_method(handle, "_GPD") &&
1156 acpi_has_method(handle, "_SPD"))
1157 video_caps |= ACPI_VIDEO_DEVICE_POSTING;
1159 /* Only check for backlight functionality if one of the above hit. */
1160 if (video_caps)
1161 acpi_walk_namespace(ACPI_TYPE_DEVICE, handle,
1162 ACPI_UINT32_MAX, acpi_backlight_cap_match, NULL,
1163 &video_caps, NULL);
1165 return video_caps;
1167 EXPORT_SYMBOL(acpi_is_video_device);
1169 const char *acpi_device_hid(struct acpi_device *device)
1171 struct acpi_hardware_id *hid;
1173 if (list_empty(&device->pnp.ids))
1174 return dummy_hid;
1176 hid = list_first_entry(&device->pnp.ids, struct acpi_hardware_id, list);
1177 return hid->id;
1179 EXPORT_SYMBOL(acpi_device_hid);
1181 static void acpi_add_id(struct acpi_device_pnp *pnp, const char *dev_id)
1183 struct acpi_hardware_id *id;
1185 id = kmalloc(sizeof(*id), GFP_KERNEL);
1186 if (!id)
1187 return;
1189 id->id = kstrdup_const(dev_id, GFP_KERNEL);
1190 if (!id->id) {
1191 kfree(id);
1192 return;
1195 list_add_tail(&id->list, &pnp->ids);
1196 pnp->type.hardware_id = 1;
1200 * Old IBM workstations have a DSDT bug wherein the SMBus object
1201 * lacks the SMBUS01 HID and the methods do not have the necessary "_"
1202 * prefix. Work around this.
1204 static bool acpi_ibm_smbus_match(acpi_handle handle)
1206 char node_name[ACPI_PATH_SEGMENT_LENGTH];
1207 struct acpi_buffer path = { sizeof(node_name), node_name };
1209 if (!dmi_name_in_vendors("IBM"))
1210 return false;
1212 /* Look for SMBS object */
1213 if (ACPI_FAILURE(acpi_get_name(handle, ACPI_SINGLE_NAME, &path)) ||
1214 strcmp("SMBS", path.pointer))
1215 return false;
1217 /* Does it have the necessary (but misnamed) methods? */
1218 if (acpi_has_method(handle, "SBI") &&
1219 acpi_has_method(handle, "SBR") &&
1220 acpi_has_method(handle, "SBW"))
1221 return true;
1223 return false;
1226 static bool acpi_object_is_system_bus(acpi_handle handle)
1228 acpi_handle tmp;
1230 if (ACPI_SUCCESS(acpi_get_handle(NULL, "\\_SB", &tmp)) &&
1231 tmp == handle)
1232 return true;
1233 if (ACPI_SUCCESS(acpi_get_handle(NULL, "\\_TZ", &tmp)) &&
1234 tmp == handle)
1235 return true;
1237 return false;
1240 static void acpi_set_pnp_ids(acpi_handle handle, struct acpi_device_pnp *pnp,
1241 int device_type)
1243 acpi_status status;
1244 struct acpi_device_info *info;
1245 struct acpi_pnp_device_id_list *cid_list;
1246 int i;
1248 switch (device_type) {
1249 case ACPI_BUS_TYPE_DEVICE:
1250 if (handle == ACPI_ROOT_OBJECT) {
1251 acpi_add_id(pnp, ACPI_SYSTEM_HID);
1252 break;
1255 status = acpi_get_object_info(handle, &info);
1256 if (ACPI_FAILURE(status)) {
1257 pr_err(PREFIX "%s: Error reading device info\n",
1258 __func__);
1259 return;
1262 if (info->valid & ACPI_VALID_HID) {
1263 acpi_add_id(pnp, info->hardware_id.string);
1264 pnp->type.platform_id = 1;
1266 if (info->valid & ACPI_VALID_CID) {
1267 cid_list = &info->compatible_id_list;
1268 for (i = 0; i < cid_list->count; i++)
1269 acpi_add_id(pnp, cid_list->ids[i].string);
1271 if (info->valid & ACPI_VALID_ADR) {
1272 pnp->bus_address = info->address;
1273 pnp->type.bus_address = 1;
1275 if (info->valid & ACPI_VALID_UID)
1276 pnp->unique_id = kstrdup(info->unique_id.string,
1277 GFP_KERNEL);
1278 if (info->valid & ACPI_VALID_CLS)
1279 acpi_add_id(pnp, info->class_code.string);
1281 kfree(info);
1284 * Some devices don't reliably have _HIDs & _CIDs, so add
1285 * synthetic HIDs to make sure drivers can find them.
1287 if (acpi_is_video_device(handle))
1288 acpi_add_id(pnp, ACPI_VIDEO_HID);
1289 else if (acpi_bay_match(handle))
1290 acpi_add_id(pnp, ACPI_BAY_HID);
1291 else if (acpi_dock_match(handle))
1292 acpi_add_id(pnp, ACPI_DOCK_HID);
1293 else if (acpi_ibm_smbus_match(handle))
1294 acpi_add_id(pnp, ACPI_SMBUS_IBM_HID);
1295 else if (list_empty(&pnp->ids) &&
1296 acpi_object_is_system_bus(handle)) {
1297 /* \_SB, \_TZ, LNXSYBUS */
1298 acpi_add_id(pnp, ACPI_BUS_HID);
1299 strcpy(pnp->device_name, ACPI_BUS_DEVICE_NAME);
1300 strcpy(pnp->device_class, ACPI_BUS_CLASS);
1303 break;
1304 case ACPI_BUS_TYPE_POWER:
1305 acpi_add_id(pnp, ACPI_POWER_HID);
1306 break;
1307 case ACPI_BUS_TYPE_PROCESSOR:
1308 acpi_add_id(pnp, ACPI_PROCESSOR_OBJECT_HID);
1309 break;
1310 case ACPI_BUS_TYPE_THERMAL:
1311 acpi_add_id(pnp, ACPI_THERMAL_HID);
1312 break;
1313 case ACPI_BUS_TYPE_POWER_BUTTON:
1314 acpi_add_id(pnp, ACPI_BUTTON_HID_POWERF);
1315 break;
1316 case ACPI_BUS_TYPE_SLEEP_BUTTON:
1317 acpi_add_id(pnp, ACPI_BUTTON_HID_SLEEPF);
1318 break;
1319 case ACPI_BUS_TYPE_ECDT_EC:
1320 acpi_add_id(pnp, ACPI_ECDT_HID);
1321 break;
1325 void acpi_free_pnp_ids(struct acpi_device_pnp *pnp)
1327 struct acpi_hardware_id *id, *tmp;
1329 list_for_each_entry_safe(id, tmp, &pnp->ids, list) {
1330 kfree_const(id->id);
1331 kfree(id);
1333 kfree(pnp->unique_id);
1337 * acpi_dma_supported - Check DMA support for the specified device.
1338 * @adev: The pointer to acpi device
1340 * Return false if DMA is not supported. Otherwise, return true
1342 bool acpi_dma_supported(struct acpi_device *adev)
1344 if (!adev)
1345 return false;
1347 if (adev->flags.cca_seen)
1348 return true;
1351 * Per ACPI 6.0 sec 6.2.17, assume devices can do cache-coherent
1352 * DMA on "Intel platforms". Presumably that includes all x86 and
1353 * ia64, and other arches will set CONFIG_ACPI_CCA_REQUIRED=y.
1355 if (!IS_ENABLED(CONFIG_ACPI_CCA_REQUIRED))
1356 return true;
1358 return false;
1362 * acpi_get_dma_attr - Check the supported DMA attr for the specified device.
1363 * @adev: The pointer to acpi device
1365 * Return enum dev_dma_attr.
1367 enum dev_dma_attr acpi_get_dma_attr(struct acpi_device *adev)
1369 if (!acpi_dma_supported(adev))
1370 return DEV_DMA_NOT_SUPPORTED;
1372 if (adev->flags.coherent_dma)
1373 return DEV_DMA_COHERENT;
1374 else
1375 return DEV_DMA_NON_COHERENT;
1379 * acpi_dma_get_range() - Get device DMA parameters.
1381 * @dev: device to configure
1382 * @dma_addr: pointer device DMA address result
1383 * @offset: pointer to the DMA offset result
1384 * @size: pointer to DMA range size result
1386 * Evaluate DMA regions and return respectively DMA region start, offset
1387 * and size in dma_addr, offset and size on parsing success; it does not
1388 * update the passed in values on failure.
1390 * Return 0 on success, < 0 on failure.
1392 int acpi_dma_get_range(struct device *dev, u64 *dma_addr, u64 *offset,
1393 u64 *size)
1395 struct acpi_device *adev;
1396 LIST_HEAD(list);
1397 struct resource_entry *rentry;
1398 int ret;
1399 struct device *dma_dev = dev;
1400 u64 len, dma_start = U64_MAX, dma_end = 0, dma_offset = 0;
1403 * Walk the device tree chasing an ACPI companion with a _DMA
1404 * object while we go. Stop if we find a device with an ACPI
1405 * companion containing a _DMA method.
1407 do {
1408 adev = ACPI_COMPANION(dma_dev);
1409 if (adev && acpi_has_method(adev->handle, METHOD_NAME__DMA))
1410 break;
1412 dma_dev = dma_dev->parent;
1413 } while (dma_dev);
1415 if (!dma_dev)
1416 return -ENODEV;
1418 if (!acpi_has_method(adev->handle, METHOD_NAME__CRS)) {
1419 acpi_handle_warn(adev->handle, "_DMA is valid only if _CRS is present\n");
1420 return -EINVAL;
1423 ret = acpi_dev_get_dma_resources(adev, &list);
1424 if (ret > 0) {
1425 list_for_each_entry(rentry, &list, node) {
1426 if (dma_offset && rentry->offset != dma_offset) {
1427 ret = -EINVAL;
1428 dev_warn(dma_dev, "Can't handle multiple windows with different offsets\n");
1429 goto out;
1431 dma_offset = rentry->offset;
1433 /* Take lower and upper limits */
1434 if (rentry->res->start < dma_start)
1435 dma_start = rentry->res->start;
1436 if (rentry->res->end > dma_end)
1437 dma_end = rentry->res->end;
1440 if (dma_start >= dma_end) {
1441 ret = -EINVAL;
1442 dev_dbg(dma_dev, "Invalid DMA regions configuration\n");
1443 goto out;
1446 *dma_addr = dma_start - dma_offset;
1447 len = dma_end - dma_start;
1448 *size = max(len, len + 1);
1449 *offset = dma_offset;
1451 out:
1452 acpi_dev_free_resource_list(&list);
1454 return ret >= 0 ? 0 : ret;
1458 * acpi_dma_configure - Set-up DMA configuration for the device.
1459 * @dev: The pointer to the device
1460 * @attr: device dma attributes
1462 int acpi_dma_configure(struct device *dev, enum dev_dma_attr attr)
1464 const struct iommu_ops *iommu;
1465 u64 dma_addr = 0, size = 0;
1467 if (attr == DEV_DMA_NOT_SUPPORTED) {
1468 set_dma_ops(dev, &dma_dummy_ops);
1469 return 0;
1472 iort_dma_setup(dev, &dma_addr, &size);
1474 iommu = iort_iommu_configure(dev);
1475 if (PTR_ERR(iommu) == -EPROBE_DEFER)
1476 return -EPROBE_DEFER;
1478 arch_setup_dma_ops(dev, dma_addr, size,
1479 iommu, attr == DEV_DMA_COHERENT);
1481 return 0;
1483 EXPORT_SYMBOL_GPL(acpi_dma_configure);
1485 static void acpi_init_coherency(struct acpi_device *adev)
1487 unsigned long long cca = 0;
1488 acpi_status status;
1489 struct acpi_device *parent = adev->parent;
1491 if (parent && parent->flags.cca_seen) {
1493 * From ACPI spec, OSPM will ignore _CCA if an ancestor
1494 * already saw one.
1496 adev->flags.cca_seen = 1;
1497 cca = parent->flags.coherent_dma;
1498 } else {
1499 status = acpi_evaluate_integer(adev->handle, "_CCA",
1500 NULL, &cca);
1501 if (ACPI_SUCCESS(status))
1502 adev->flags.cca_seen = 1;
1503 else if (!IS_ENABLED(CONFIG_ACPI_CCA_REQUIRED))
1505 * If architecture does not specify that _CCA is
1506 * required for DMA-able devices (e.g. x86),
1507 * we default to _CCA=1.
1509 cca = 1;
1510 else
1511 acpi_handle_debug(adev->handle,
1512 "ACPI device is missing _CCA.\n");
1515 adev->flags.coherent_dma = cca;
1518 static int acpi_check_serial_bus_slave(struct acpi_resource *ares, void *data)
1520 bool *is_serial_bus_slave_p = data;
1522 if (ares->type != ACPI_RESOURCE_TYPE_SERIAL_BUS)
1523 return 1;
1525 *is_serial_bus_slave_p = true;
1527 /* no need to do more checking */
1528 return -1;
1531 static bool acpi_is_indirect_io_slave(struct acpi_device *device)
1533 struct acpi_device *parent = device->parent;
1534 static const struct acpi_device_id indirect_io_hosts[] = {
1535 {"HISI0191", 0},
1539 return parent && !acpi_match_device_ids(parent, indirect_io_hosts);
1542 static bool acpi_device_enumeration_by_parent(struct acpi_device *device)
1544 struct list_head resource_list;
1545 bool is_serial_bus_slave = false;
1547 * These devices have multiple I2cSerialBus resources and an i2c-client
1548 * must be instantiated for each, each with its own i2c_device_id.
1549 * Normally we only instantiate an i2c-client for the first resource,
1550 * using the ACPI HID as id. These special cases are handled by the
1551 * drivers/platform/x86/i2c-multi-instantiate.c driver, which knows
1552 * which i2c_device_id to use for each resource.
1554 static const struct acpi_device_id i2c_multi_instantiate_ids[] = {
1555 {"BSG1160", },
1556 {"BSG2150", },
1557 {"INT33FE", },
1558 {"INT3515", },
1562 if (acpi_is_indirect_io_slave(device))
1563 return true;
1565 /* Macs use device properties in lieu of _CRS resources */
1566 if (x86_apple_machine &&
1567 (fwnode_property_present(&device->fwnode, "spiSclkPeriod") ||
1568 fwnode_property_present(&device->fwnode, "i2cAddress") ||
1569 fwnode_property_present(&device->fwnode, "baud")))
1570 return true;
1572 /* Instantiate a pdev for the i2c-multi-instantiate drv to bind to */
1573 if (!acpi_match_device_ids(device, i2c_multi_instantiate_ids))
1574 return false;
1576 INIT_LIST_HEAD(&resource_list);
1577 acpi_dev_get_resources(device, &resource_list,
1578 acpi_check_serial_bus_slave,
1579 &is_serial_bus_slave);
1580 acpi_dev_free_resource_list(&resource_list);
1582 return is_serial_bus_slave;
1585 void acpi_init_device_object(struct acpi_device *device, acpi_handle handle,
1586 int type, unsigned long long sta)
1588 INIT_LIST_HEAD(&device->pnp.ids);
1589 device->device_type = type;
1590 device->handle = handle;
1591 device->parent = acpi_bus_get_parent(handle);
1592 device->fwnode.ops = &acpi_device_fwnode_ops;
1593 acpi_set_device_status(device, sta);
1594 acpi_device_get_busid(device);
1595 acpi_set_pnp_ids(handle, &device->pnp, type);
1596 acpi_init_properties(device);
1597 acpi_bus_get_flags(device);
1598 device->flags.match_driver = false;
1599 device->flags.initialized = true;
1600 device->flags.enumeration_by_parent =
1601 acpi_device_enumeration_by_parent(device);
1602 acpi_device_clear_enumerated(device);
1603 device_initialize(&device->dev);
1604 dev_set_uevent_suppress(&device->dev, true);
1605 acpi_init_coherency(device);
1606 /* Assume there are unmet deps until acpi_device_dep_initialize() runs */
1607 device->dep_unmet = 1;
1610 void acpi_device_add_finalize(struct acpi_device *device)
1612 dev_set_uevent_suppress(&device->dev, false);
1613 kobject_uevent(&device->dev.kobj, KOBJ_ADD);
1616 static int acpi_add_single_object(struct acpi_device **child,
1617 acpi_handle handle, int type,
1618 unsigned long long sta)
1620 int result;
1621 struct acpi_device *device;
1622 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
1624 device = kzalloc(sizeof(struct acpi_device), GFP_KERNEL);
1625 if (!device) {
1626 printk(KERN_ERR PREFIX "Memory allocation error\n");
1627 return -ENOMEM;
1630 acpi_init_device_object(device, handle, type, sta);
1632 * For ACPI_BUS_TYPE_DEVICE getting the status is delayed till here so
1633 * that we can call acpi_bus_get_status() and use its quirk handling.
1634 * Note this must be done before the get power-/wakeup_dev-flags calls.
1636 if (type == ACPI_BUS_TYPE_DEVICE)
1637 if (acpi_bus_get_status(device) < 0)
1638 acpi_set_device_status(device, 0);
1640 acpi_bus_get_power_flags(device);
1641 acpi_bus_get_wakeup_device_flags(device);
1643 result = acpi_device_add(device, acpi_device_release);
1644 if (result) {
1645 acpi_device_release(&device->dev);
1646 return result;
1649 acpi_power_add_remove_device(device, true);
1650 acpi_device_add_finalize(device);
1651 acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer);
1652 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Added %s [%s] parent %s\n",
1653 dev_name(&device->dev), (char *) buffer.pointer,
1654 device->parent ? dev_name(&device->parent->dev) : "(null)"));
1655 kfree(buffer.pointer);
1656 *child = device;
1657 return 0;
1660 static acpi_status acpi_get_resource_memory(struct acpi_resource *ares,
1661 void *context)
1663 struct resource *res = context;
1665 if (acpi_dev_resource_memory(ares, res))
1666 return AE_CTRL_TERMINATE;
1668 return AE_OK;
1671 static bool acpi_device_should_be_hidden(acpi_handle handle)
1673 acpi_status status;
1674 struct resource res;
1676 /* Check if it should ignore the UART device */
1677 if (!(spcr_uart_addr && acpi_has_method(handle, METHOD_NAME__CRS)))
1678 return false;
1681 * The UART device described in SPCR table is assumed to have only one
1682 * memory resource present. So we only look for the first one here.
1684 status = acpi_walk_resources(handle, METHOD_NAME__CRS,
1685 acpi_get_resource_memory, &res);
1686 if (ACPI_FAILURE(status) || res.start != spcr_uart_addr)
1687 return false;
1689 acpi_handle_info(handle, "The UART device @%pa in SPCR table will be hidden\n",
1690 &res.start);
1692 return true;
1695 static int acpi_bus_type_and_status(acpi_handle handle, int *type,
1696 unsigned long long *sta)
1698 acpi_status status;
1699 acpi_object_type acpi_type;
1701 status = acpi_get_type(handle, &acpi_type);
1702 if (ACPI_FAILURE(status))
1703 return -ENODEV;
1705 switch (acpi_type) {
1706 case ACPI_TYPE_ANY: /* for ACPI_ROOT_OBJECT */
1707 case ACPI_TYPE_DEVICE:
1708 if (acpi_device_should_be_hidden(handle))
1709 return -ENODEV;
1711 *type = ACPI_BUS_TYPE_DEVICE;
1713 * acpi_add_single_object updates this once we've an acpi_device
1714 * so that acpi_bus_get_status' quirk handling can be used.
1716 *sta = ACPI_STA_DEFAULT;
1717 break;
1718 case ACPI_TYPE_PROCESSOR:
1719 *type = ACPI_BUS_TYPE_PROCESSOR;
1720 status = acpi_bus_get_status_handle(handle, sta);
1721 if (ACPI_FAILURE(status))
1722 return -ENODEV;
1723 break;
1724 case ACPI_TYPE_THERMAL:
1725 *type = ACPI_BUS_TYPE_THERMAL;
1726 *sta = ACPI_STA_DEFAULT;
1727 break;
1728 case ACPI_TYPE_POWER:
1729 *type = ACPI_BUS_TYPE_POWER;
1730 *sta = ACPI_STA_DEFAULT;
1731 break;
1732 default:
1733 return -ENODEV;
1736 return 0;
1739 bool acpi_device_is_present(const struct acpi_device *adev)
1741 return adev->status.present || adev->status.functional;
1744 static bool acpi_scan_handler_matching(struct acpi_scan_handler *handler,
1745 const char *idstr,
1746 const struct acpi_device_id **matchid)
1748 const struct acpi_device_id *devid;
1750 if (handler->match)
1751 return handler->match(idstr, matchid);
1753 for (devid = handler->ids; devid->id[0]; devid++)
1754 if (!strcmp((char *)devid->id, idstr)) {
1755 if (matchid)
1756 *matchid = devid;
1758 return true;
1761 return false;
1764 static struct acpi_scan_handler *acpi_scan_match_handler(const char *idstr,
1765 const struct acpi_device_id **matchid)
1767 struct acpi_scan_handler *handler;
1769 list_for_each_entry(handler, &acpi_scan_handlers_list, list_node)
1770 if (acpi_scan_handler_matching(handler, idstr, matchid))
1771 return handler;
1773 return NULL;
1776 void acpi_scan_hotplug_enabled(struct acpi_hotplug_profile *hotplug, bool val)
1778 if (!!hotplug->enabled == !!val)
1779 return;
1781 mutex_lock(&acpi_scan_lock);
1783 hotplug->enabled = val;
1785 mutex_unlock(&acpi_scan_lock);
1788 static void acpi_scan_init_hotplug(struct acpi_device *adev)
1790 struct acpi_hardware_id *hwid;
1792 if (acpi_dock_match(adev->handle) || is_ejectable_bay(adev)) {
1793 acpi_dock_add(adev);
1794 return;
1796 list_for_each_entry(hwid, &adev->pnp.ids, list) {
1797 struct acpi_scan_handler *handler;
1799 handler = acpi_scan_match_handler(hwid->id, NULL);
1800 if (handler) {
1801 adev->flags.hotplug_notify = true;
1802 break;
1807 static void acpi_device_dep_initialize(struct acpi_device *adev)
1809 struct acpi_dep_data *dep;
1810 struct acpi_handle_list dep_devices;
1811 acpi_status status;
1812 int i;
1814 adev->dep_unmet = 0;
1816 if (!acpi_has_method(adev->handle, "_DEP"))
1817 return;
1819 status = acpi_evaluate_reference(adev->handle, "_DEP", NULL,
1820 &dep_devices);
1821 if (ACPI_FAILURE(status)) {
1822 dev_dbg(&adev->dev, "Failed to evaluate _DEP.\n");
1823 return;
1826 for (i = 0; i < dep_devices.count; i++) {
1827 struct acpi_device_info *info;
1828 int skip;
1830 status = acpi_get_object_info(dep_devices.handles[i], &info);
1831 if (ACPI_FAILURE(status)) {
1832 dev_dbg(&adev->dev, "Error reading _DEP device info\n");
1833 continue;
1837 * Skip the dependency of Windows System Power
1838 * Management Controller
1840 skip = info->valid & ACPI_VALID_HID &&
1841 !strcmp(info->hardware_id.string, "INT3396");
1843 kfree(info);
1845 if (skip)
1846 continue;
1848 dep = kzalloc(sizeof(struct acpi_dep_data), GFP_KERNEL);
1849 if (!dep)
1850 return;
1852 dep->master = dep_devices.handles[i];
1853 dep->slave = adev->handle;
1854 adev->dep_unmet++;
1856 mutex_lock(&acpi_dep_list_lock);
1857 list_add_tail(&dep->node , &acpi_dep_list);
1858 mutex_unlock(&acpi_dep_list_lock);
1862 static acpi_status acpi_bus_check_add(acpi_handle handle, u32 lvl_not_used,
1863 void *not_used, void **return_value)
1865 struct acpi_device *device = NULL;
1866 int type;
1867 unsigned long long sta;
1868 int result;
1870 acpi_bus_get_device(handle, &device);
1871 if (device)
1872 goto out;
1874 result = acpi_bus_type_and_status(handle, &type, &sta);
1875 if (result)
1876 return AE_OK;
1878 if (type == ACPI_BUS_TYPE_POWER) {
1879 acpi_add_power_resource(handle);
1880 return AE_OK;
1883 acpi_add_single_object(&device, handle, type, sta);
1884 if (!device)
1885 return AE_CTRL_DEPTH;
1887 acpi_scan_init_hotplug(device);
1888 acpi_device_dep_initialize(device);
1890 out:
1891 if (!*return_value)
1892 *return_value = device;
1894 return AE_OK;
1897 static void acpi_default_enumeration(struct acpi_device *device)
1900 * Do not enumerate devices with enumeration_by_parent flag set as
1901 * they will be enumerated by their respective parents.
1903 if (!device->flags.enumeration_by_parent) {
1904 acpi_create_platform_device(device, NULL);
1905 acpi_device_set_enumerated(device);
1906 } else {
1907 blocking_notifier_call_chain(&acpi_reconfig_chain,
1908 ACPI_RECONFIG_DEVICE_ADD, device);
1912 static const struct acpi_device_id generic_device_ids[] = {
1913 {ACPI_DT_NAMESPACE_HID, },
1914 {"", },
1917 static int acpi_generic_device_attach(struct acpi_device *adev,
1918 const struct acpi_device_id *not_used)
1921 * Since ACPI_DT_NAMESPACE_HID is the only ID handled here, the test
1922 * below can be unconditional.
1924 if (adev->data.of_compatible)
1925 acpi_default_enumeration(adev);
1927 return 1;
1930 static struct acpi_scan_handler generic_device_handler = {
1931 .ids = generic_device_ids,
1932 .attach = acpi_generic_device_attach,
1935 static int acpi_scan_attach_handler(struct acpi_device *device)
1937 struct acpi_hardware_id *hwid;
1938 int ret = 0;
1940 list_for_each_entry(hwid, &device->pnp.ids, list) {
1941 const struct acpi_device_id *devid;
1942 struct acpi_scan_handler *handler;
1944 handler = acpi_scan_match_handler(hwid->id, &devid);
1945 if (handler) {
1946 if (!handler->attach) {
1947 device->pnp.type.platform_id = 0;
1948 continue;
1950 device->handler = handler;
1951 ret = handler->attach(device, devid);
1952 if (ret > 0)
1953 break;
1955 device->handler = NULL;
1956 if (ret < 0)
1957 break;
1961 return ret;
1964 static void acpi_bus_attach(struct acpi_device *device)
1966 struct acpi_device *child;
1967 acpi_handle ejd;
1968 int ret;
1970 if (ACPI_SUCCESS(acpi_bus_get_ejd(device->handle, &ejd)))
1971 register_dock_dependent_device(device, ejd);
1973 acpi_bus_get_status(device);
1974 /* Skip devices that are not present. */
1975 if (!acpi_device_is_present(device)) {
1976 device->flags.initialized = false;
1977 acpi_device_clear_enumerated(device);
1978 device->flags.power_manageable = 0;
1979 return;
1981 if (device->handler)
1982 goto ok;
1984 if (!device->flags.initialized) {
1985 device->flags.power_manageable =
1986 device->power.states[ACPI_STATE_D0].flags.valid;
1987 if (acpi_bus_init_power(device))
1988 device->flags.power_manageable = 0;
1990 device->flags.initialized = true;
1991 } else if (device->flags.visited) {
1992 goto ok;
1995 ret = acpi_scan_attach_handler(device);
1996 if (ret < 0)
1997 return;
1999 device->flags.match_driver = true;
2000 if (ret > 0 && !device->flags.enumeration_by_parent) {
2001 acpi_device_set_enumerated(device);
2002 goto ok;
2005 ret = device_attach(&device->dev);
2006 if (ret < 0)
2007 return;
2009 if (device->pnp.type.platform_id || device->flags.enumeration_by_parent)
2010 acpi_default_enumeration(device);
2011 else
2012 acpi_device_set_enumerated(device);
2015 list_for_each_entry(child, &device->children, node)
2016 acpi_bus_attach(child);
2018 if (device->handler && device->handler->hotplug.notify_online)
2019 device->handler->hotplug.notify_online(device);
2022 void acpi_walk_dep_device_list(acpi_handle handle)
2024 struct acpi_dep_data *dep, *tmp;
2025 struct acpi_device *adev;
2027 mutex_lock(&acpi_dep_list_lock);
2028 list_for_each_entry_safe(dep, tmp, &acpi_dep_list, node) {
2029 if (dep->master == handle) {
2030 acpi_bus_get_device(dep->slave, &adev);
2031 if (!adev)
2032 continue;
2034 adev->dep_unmet--;
2035 if (!adev->dep_unmet)
2036 acpi_bus_attach(adev);
2037 list_del(&dep->node);
2038 kfree(dep);
2041 mutex_unlock(&acpi_dep_list_lock);
2043 EXPORT_SYMBOL_GPL(acpi_walk_dep_device_list);
2046 * acpi_bus_scan - Add ACPI device node objects in a given namespace scope.
2047 * @handle: Root of the namespace scope to scan.
2049 * Scan a given ACPI tree (probably recently hot-plugged) and create and add
2050 * found devices.
2052 * If no devices were found, -ENODEV is returned, but it does not mean that
2053 * there has been a real error. There just have been no suitable ACPI objects
2054 * in the table trunk from which the kernel could create a device and add an
2055 * appropriate driver.
2057 * Must be called under acpi_scan_lock.
2059 int acpi_bus_scan(acpi_handle handle)
2061 void *device = NULL;
2063 if (ACPI_SUCCESS(acpi_bus_check_add(handle, 0, NULL, &device)))
2064 acpi_walk_namespace(ACPI_TYPE_ANY, handle, ACPI_UINT32_MAX,
2065 acpi_bus_check_add, NULL, NULL, &device);
2067 if (device) {
2068 acpi_bus_attach(device);
2069 return 0;
2071 return -ENODEV;
2073 EXPORT_SYMBOL(acpi_bus_scan);
2076 * acpi_bus_trim - Detach scan handlers and drivers from ACPI device objects.
2077 * @adev: Root of the ACPI namespace scope to walk.
2079 * Must be called under acpi_scan_lock.
2081 void acpi_bus_trim(struct acpi_device *adev)
2083 struct acpi_scan_handler *handler = adev->handler;
2084 struct acpi_device *child;
2086 list_for_each_entry_reverse(child, &adev->children, node)
2087 acpi_bus_trim(child);
2089 adev->flags.match_driver = false;
2090 if (handler) {
2091 if (handler->detach)
2092 handler->detach(adev);
2094 adev->handler = NULL;
2095 } else {
2096 device_release_driver(&adev->dev);
2099 * Most likely, the device is going away, so put it into D3cold before
2100 * that.
2102 acpi_device_set_power(adev, ACPI_STATE_D3_COLD);
2103 adev->flags.initialized = false;
2104 acpi_device_clear_enumerated(adev);
2106 EXPORT_SYMBOL_GPL(acpi_bus_trim);
2108 int acpi_bus_register_early_device(int type)
2110 struct acpi_device *device = NULL;
2111 int result;
2113 result = acpi_add_single_object(&device, NULL,
2114 type, ACPI_STA_DEFAULT);
2115 if (result)
2116 return result;
2118 device->flags.match_driver = true;
2119 return device_attach(&device->dev);
2121 EXPORT_SYMBOL_GPL(acpi_bus_register_early_device);
2123 static int acpi_bus_scan_fixed(void)
2125 int result = 0;
2128 * Enumerate all fixed-feature devices.
2130 if (!(acpi_gbl_FADT.flags & ACPI_FADT_POWER_BUTTON)) {
2131 struct acpi_device *device = NULL;
2133 result = acpi_add_single_object(&device, NULL,
2134 ACPI_BUS_TYPE_POWER_BUTTON,
2135 ACPI_STA_DEFAULT);
2136 if (result)
2137 return result;
2139 device->flags.match_driver = true;
2140 result = device_attach(&device->dev);
2141 if (result < 0)
2142 return result;
2144 device_init_wakeup(&device->dev, true);
2147 if (!(acpi_gbl_FADT.flags & ACPI_FADT_SLEEP_BUTTON)) {
2148 struct acpi_device *device = NULL;
2150 result = acpi_add_single_object(&device, NULL,
2151 ACPI_BUS_TYPE_SLEEP_BUTTON,
2152 ACPI_STA_DEFAULT);
2153 if (result)
2154 return result;
2156 device->flags.match_driver = true;
2157 result = device_attach(&device->dev);
2160 return result < 0 ? result : 0;
2163 static void __init acpi_get_spcr_uart_addr(void)
2165 acpi_status status;
2166 struct acpi_table_spcr *spcr_ptr;
2168 status = acpi_get_table(ACPI_SIG_SPCR, 0,
2169 (struct acpi_table_header **)&spcr_ptr);
2170 if (ACPI_SUCCESS(status))
2171 spcr_uart_addr = spcr_ptr->serial_port.address;
2172 else
2173 printk(KERN_WARNING PREFIX "STAO table present, but SPCR is missing\n");
2176 static bool acpi_scan_initialized;
2178 int __init acpi_scan_init(void)
2180 int result;
2181 acpi_status status;
2182 struct acpi_table_stao *stao_ptr;
2184 acpi_pci_root_init();
2185 acpi_pci_link_init();
2186 acpi_processor_init();
2187 acpi_platform_init();
2188 acpi_lpss_init();
2189 acpi_apd_init();
2190 acpi_cmos_rtc_init();
2191 acpi_container_init();
2192 acpi_memory_hotplug_init();
2193 acpi_watchdog_init();
2194 acpi_pnp_init();
2195 acpi_int340x_thermal_init();
2196 acpi_amba_init();
2197 acpi_init_lpit();
2199 acpi_scan_add_handler(&generic_device_handler);
2202 * If there is STAO table, check whether it needs to ignore the UART
2203 * device in SPCR table.
2205 status = acpi_get_table(ACPI_SIG_STAO, 0,
2206 (struct acpi_table_header **)&stao_ptr);
2207 if (ACPI_SUCCESS(status)) {
2208 if (stao_ptr->header.length > sizeof(struct acpi_table_stao))
2209 printk(KERN_INFO PREFIX "STAO Name List not yet supported.");
2211 if (stao_ptr->ignore_uart)
2212 acpi_get_spcr_uart_addr();
2215 acpi_gpe_apply_masked_gpes();
2216 acpi_update_all_gpes();
2219 * Although we call __add_memory() that is documented to require the
2220 * device_hotplug_lock, it is not necessary here because this is an
2221 * early code when userspace or any other code path cannot trigger
2222 * hotplug/hotunplug operations.
2224 mutex_lock(&acpi_scan_lock);
2226 * Enumerate devices in the ACPI namespace.
2228 result = acpi_bus_scan(ACPI_ROOT_OBJECT);
2229 if (result)
2230 goto out;
2232 result = acpi_bus_get_device(ACPI_ROOT_OBJECT, &acpi_root);
2233 if (result)
2234 goto out;
2236 /* Fixed feature devices do not exist on HW-reduced platform */
2237 if (!acpi_gbl_reduced_hardware) {
2238 result = acpi_bus_scan_fixed();
2239 if (result) {
2240 acpi_detach_data(acpi_root->handle,
2241 acpi_scan_drop_device);
2242 acpi_device_del(acpi_root);
2243 put_device(&acpi_root->dev);
2244 goto out;
2248 acpi_scan_initialized = true;
2250 out:
2251 mutex_unlock(&acpi_scan_lock);
2252 return result;
2255 static struct acpi_probe_entry *ape;
2256 static int acpi_probe_count;
2257 static DEFINE_MUTEX(acpi_probe_mutex);
2259 static int __init acpi_match_madt(union acpi_subtable_headers *header,
2260 const unsigned long end)
2262 if (!ape->subtable_valid || ape->subtable_valid(&header->common, ape))
2263 if (!ape->probe_subtbl(header, end))
2264 acpi_probe_count++;
2266 return 0;
2269 int __init __acpi_probe_device_table(struct acpi_probe_entry *ap_head, int nr)
2271 int count = 0;
2273 if (acpi_disabled)
2274 return 0;
2276 mutex_lock(&acpi_probe_mutex);
2277 for (ape = ap_head; nr; ape++, nr--) {
2278 if (ACPI_COMPARE_NAMESEG(ACPI_SIG_MADT, ape->id)) {
2279 acpi_probe_count = 0;
2280 acpi_table_parse_madt(ape->type, acpi_match_madt, 0);
2281 count += acpi_probe_count;
2282 } else {
2283 int res;
2284 res = acpi_table_parse(ape->id, ape->probe_table);
2285 if (!res)
2286 count++;
2289 mutex_unlock(&acpi_probe_mutex);
2291 return count;
2294 struct acpi_table_events_work {
2295 struct work_struct work;
2296 void *table;
2297 u32 event;
2300 static void acpi_table_events_fn(struct work_struct *work)
2302 struct acpi_table_events_work *tew;
2304 tew = container_of(work, struct acpi_table_events_work, work);
2306 if (tew->event == ACPI_TABLE_EVENT_LOAD) {
2307 acpi_scan_lock_acquire();
2308 acpi_bus_scan(ACPI_ROOT_OBJECT);
2309 acpi_scan_lock_release();
2312 kfree(tew);
2315 void acpi_scan_table_handler(u32 event, void *table, void *context)
2317 struct acpi_table_events_work *tew;
2319 if (!acpi_scan_initialized)
2320 return;
2322 if (event != ACPI_TABLE_EVENT_LOAD)
2323 return;
2325 tew = kmalloc(sizeof(*tew), GFP_KERNEL);
2326 if (!tew)
2327 return;
2329 INIT_WORK(&tew->work, acpi_table_events_fn);
2330 tew->table = table;
2331 tew->event = event;
2333 schedule_work(&tew->work);
2336 int acpi_reconfig_notifier_register(struct notifier_block *nb)
2338 return blocking_notifier_chain_register(&acpi_reconfig_chain, nb);
2340 EXPORT_SYMBOL(acpi_reconfig_notifier_register);
2342 int acpi_reconfig_notifier_unregister(struct notifier_block *nb)
2344 return blocking_notifier_chain_unregister(&acpi_reconfig_chain, nb);
2346 EXPORT_SYMBOL(acpi_reconfig_notifier_unregister);