1 // SPDX-License-Identifier: GPL-2.0
3 * firmware_class.c - Multi purpose firmware loading support
5 * Copyright (c) 2003 Manuel Estrada Sainz
7 * Please see Documentation/firmware_class/ for more information.
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13 #include <linux/capability.h>
14 #include <linux/device.h>
15 #include <linux/module.h>
16 #include <linux/init.h>
17 #include <linux/timer.h>
18 #include <linux/vmalloc.h>
19 #include <linux/interrupt.h>
20 #include <linux/bitops.h>
21 #include <linux/mutex.h>
22 #include <linux/workqueue.h>
23 #include <linux/highmem.h>
24 #include <linux/firmware.h>
25 #include <linux/slab.h>
26 #include <linux/sched.h>
27 #include <linux/file.h>
28 #include <linux/list.h>
30 #include <linux/async.h>
32 #include <linux/suspend.h>
33 #include <linux/syscore_ops.h>
34 #include <linux/reboot.h>
35 #include <linux/security.h>
37 #include <generated/utsrelease.h>
41 MODULE_AUTHOR("Manuel Estrada Sainz");
42 MODULE_DESCRIPTION("Multi purpose firmware loading support");
43 MODULE_LICENSE("GPL");
53 * Concurrent request_firmware() for the same firmware need to be
54 * serialized. struct fw_state is simple state machine which hold the
55 * state of the firmware loading.
58 struct completion completion
;
59 enum fw_status status
;
62 /* firmware behavior options */
63 #define FW_OPT_UEVENT (1U << 0)
64 #define FW_OPT_NOWAIT (1U << 1)
65 #define FW_OPT_USERHELPER (1U << 2)
66 #define FW_OPT_NO_WARN (1U << 3)
67 #define FW_OPT_NOCACHE (1U << 4)
68 #define FW_OPT_NOFALLBACK (1U << 5)
70 struct firmware_cache
{
71 /* firmware_buf instance will be added into the below list */
73 struct list_head head
;
76 #ifdef CONFIG_PM_SLEEP
78 * Names of firmware images which have been cached successfully
79 * will be added into the below list so that device uncache
80 * helper can trace which firmware images have been cached
84 struct list_head fw_names
;
86 struct delayed_work work
;
88 struct notifier_block pm_notify
;
94 struct list_head list
;
95 struct firmware_cache
*fwc
;
96 struct fw_state fw_st
;
99 size_t allocated_size
;
100 #ifdef CONFIG_FW_LOADER_USER_HELPER
106 struct list_head pending_list
;
111 struct fw_cache_entry
{
112 struct list_head list
;
116 struct fw_name_devm
{
121 static inline struct fw_priv
*to_fw_priv(struct kref
*ref
)
123 return container_of(ref
, struct fw_priv
, ref
);
126 #define FW_LOADER_NO_CACHE 0
127 #define FW_LOADER_START_CACHE 1
129 /* fw_lock could be moved to 'struct fw_sysfs' but since it is just
130 * guarding for corner cases a global lock should be OK */
131 static DEFINE_MUTEX(fw_lock
);
133 static struct firmware_cache fw_cache
;
135 /* Builtin firmware support */
137 #ifdef CONFIG_FW_LOADER
139 extern struct builtin_fw __start_builtin_fw
[];
140 extern struct builtin_fw __end_builtin_fw
[];
142 static void fw_copy_to_prealloc_buf(struct firmware
*fw
,
143 void *buf
, size_t size
)
145 if (!buf
|| size
< fw
->size
)
147 memcpy(buf
, fw
->data
, fw
->size
);
150 static bool fw_get_builtin_firmware(struct firmware
*fw
, const char *name
,
151 void *buf
, size_t size
)
153 struct builtin_fw
*b_fw
;
155 for (b_fw
= __start_builtin_fw
; b_fw
!= __end_builtin_fw
; b_fw
++) {
156 if (strcmp(name
, b_fw
->name
) == 0) {
157 fw
->size
= b_fw
->size
;
158 fw
->data
= b_fw
->data
;
159 fw_copy_to_prealloc_buf(fw
, buf
, size
);
168 static bool fw_is_builtin_firmware(const struct firmware
*fw
)
170 struct builtin_fw
*b_fw
;
172 for (b_fw
= __start_builtin_fw
; b_fw
!= __end_builtin_fw
; b_fw
++)
173 if (fw
->data
== b_fw
->data
)
179 #else /* Module case - no builtin firmware support */
181 static inline bool fw_get_builtin_firmware(struct firmware
*fw
,
182 const char *name
, void *buf
,
188 static inline bool fw_is_builtin_firmware(const struct firmware
*fw
)
194 static int loading_timeout
= 60; /* In seconds */
196 static inline long firmware_loading_timeout(void)
198 return loading_timeout
> 0 ? loading_timeout
* HZ
: MAX_JIFFY_OFFSET
;
201 static void fw_state_init(struct fw_priv
*fw_priv
)
203 struct fw_state
*fw_st
= &fw_priv
->fw_st
;
205 init_completion(&fw_st
->completion
);
206 fw_st
->status
= FW_STATUS_UNKNOWN
;
209 static int __fw_state_wait_common(struct fw_priv
*fw_priv
, long timeout
)
211 struct fw_state
*fw_st
= &fw_priv
->fw_st
;
214 ret
= wait_for_completion_killable_timeout(&fw_st
->completion
, timeout
);
215 if (ret
!= 0 && fw_st
->status
== FW_STATUS_ABORTED
)
220 return ret
< 0 ? ret
: 0;
223 static void __fw_state_set(struct fw_priv
*fw_priv
,
224 enum fw_status status
)
226 struct fw_state
*fw_st
= &fw_priv
->fw_st
;
228 WRITE_ONCE(fw_st
->status
, status
);
230 if (status
== FW_STATUS_DONE
|| status
== FW_STATUS_ABORTED
)
231 complete_all(&fw_st
->completion
);
234 static inline void fw_state_start(struct fw_priv
*fw_priv
)
236 __fw_state_set(fw_priv
, FW_STATUS_LOADING
);
239 static inline void fw_state_done(struct fw_priv
*fw_priv
)
241 __fw_state_set(fw_priv
, FW_STATUS_DONE
);
244 static inline void fw_state_aborted(struct fw_priv
*fw_priv
)
246 __fw_state_set(fw_priv
, FW_STATUS_ABORTED
);
249 static inline int fw_state_wait(struct fw_priv
*fw_priv
)
251 return __fw_state_wait_common(fw_priv
, MAX_SCHEDULE_TIMEOUT
);
254 static bool __fw_state_check(struct fw_priv
*fw_priv
,
255 enum fw_status status
)
257 struct fw_state
*fw_st
= &fw_priv
->fw_st
;
259 return fw_st
->status
== status
;
262 static inline bool fw_state_is_aborted(struct fw_priv
*fw_priv
)
264 return __fw_state_check(fw_priv
, FW_STATUS_ABORTED
);
267 #ifdef CONFIG_FW_LOADER_USER_HELPER
269 static inline bool fw_sysfs_done(struct fw_priv
*fw_priv
)
271 return __fw_state_check(fw_priv
, FW_STATUS_DONE
);
274 static inline bool fw_sysfs_loading(struct fw_priv
*fw_priv
)
276 return __fw_state_check(fw_priv
, FW_STATUS_LOADING
);
279 static inline int fw_sysfs_wait_timeout(struct fw_priv
*fw_priv
, long timeout
)
281 return __fw_state_wait_common(fw_priv
, timeout
);
284 #endif /* CONFIG_FW_LOADER_USER_HELPER */
286 static int fw_cache_piggyback_on_request(const char *name
);
288 static struct fw_priv
*__allocate_fw_priv(const char *fw_name
,
289 struct firmware_cache
*fwc
,
290 void *dbuf
, size_t size
)
292 struct fw_priv
*fw_priv
;
294 fw_priv
= kzalloc(sizeof(*fw_priv
), GFP_ATOMIC
);
298 fw_priv
->fw_name
= kstrdup_const(fw_name
, GFP_ATOMIC
);
299 if (!fw_priv
->fw_name
) {
304 kref_init(&fw_priv
->ref
);
306 fw_priv
->data
= dbuf
;
307 fw_priv
->allocated_size
= size
;
308 fw_state_init(fw_priv
);
309 #ifdef CONFIG_FW_LOADER_USER_HELPER
310 INIT_LIST_HEAD(&fw_priv
->pending_list
);
313 pr_debug("%s: fw-%s fw_priv=%p\n", __func__
, fw_name
, fw_priv
);
318 static struct fw_priv
*__lookup_fw_priv(const char *fw_name
)
321 struct firmware_cache
*fwc
= &fw_cache
;
323 list_for_each_entry(tmp
, &fwc
->head
, list
)
324 if (!strcmp(tmp
->fw_name
, fw_name
))
329 /* Returns 1 for batching firmware requests with the same name */
330 static int alloc_lookup_fw_priv(const char *fw_name
,
331 struct firmware_cache
*fwc
,
332 struct fw_priv
**fw_priv
, void *dbuf
,
337 spin_lock(&fwc
->lock
);
338 tmp
= __lookup_fw_priv(fw_name
);
341 spin_unlock(&fwc
->lock
);
343 pr_debug("batched request - sharing the same struct fw_priv and lookup for multiple requests\n");
346 tmp
= __allocate_fw_priv(fw_name
, fwc
, dbuf
, size
);
348 list_add(&tmp
->list
, &fwc
->head
);
349 spin_unlock(&fwc
->lock
);
353 return tmp
? 0 : -ENOMEM
;
356 static void __free_fw_priv(struct kref
*ref
)
357 __releases(&fwc
->lock
)
359 struct fw_priv
*fw_priv
= to_fw_priv(ref
);
360 struct firmware_cache
*fwc
= fw_priv
->fwc
;
362 pr_debug("%s: fw-%s fw_priv=%p data=%p size=%u\n",
363 __func__
, fw_priv
->fw_name
, fw_priv
, fw_priv
->data
,
364 (unsigned int)fw_priv
->size
);
366 list_del(&fw_priv
->list
);
367 spin_unlock(&fwc
->lock
);
369 #ifdef CONFIG_FW_LOADER_USER_HELPER
370 if (fw_priv
->is_paged_buf
) {
372 vunmap(fw_priv
->data
);
373 for (i
= 0; i
< fw_priv
->nr_pages
; i
++)
374 __free_page(fw_priv
->pages
[i
]);
375 vfree(fw_priv
->pages
);
378 if (!fw_priv
->allocated_size
)
379 vfree(fw_priv
->data
);
380 kfree_const(fw_priv
->fw_name
);
384 static void free_fw_priv(struct fw_priv
*fw_priv
)
386 struct firmware_cache
*fwc
= fw_priv
->fwc
;
387 spin_lock(&fwc
->lock
);
388 if (!kref_put(&fw_priv
->ref
, __free_fw_priv
))
389 spin_unlock(&fwc
->lock
);
392 /* direct firmware loading support */
393 static char fw_path_para
[256];
394 static const char * const fw_path
[] = {
396 "/lib/firmware/updates/" UTS_RELEASE
,
397 "/lib/firmware/updates",
398 "/lib/firmware/" UTS_RELEASE
,
403 * Typical usage is that passing 'firmware_class.path=$CUSTOMIZED_PATH'
404 * from kernel command line because firmware_class is generally built in
405 * kernel instead of module.
407 module_param_string(path
, fw_path_para
, sizeof(fw_path_para
), 0644);
408 MODULE_PARM_DESC(path
, "customized firmware image search path with a higher priority than default path");
411 fw_get_filesystem_firmware(struct device
*device
, struct fw_priv
*fw_priv
)
417 enum kernel_read_file_id id
= READING_FIRMWARE
;
418 size_t msize
= INT_MAX
;
420 /* Already populated data member means we're loading into a buffer */
422 id
= READING_FIRMWARE_PREALLOC_BUFFER
;
423 msize
= fw_priv
->allocated_size
;
430 for (i
= 0; i
< ARRAY_SIZE(fw_path
); i
++) {
431 /* skip the unset customized path */
435 len
= snprintf(path
, PATH_MAX
, "%s/%s",
436 fw_path
[i
], fw_priv
->fw_name
);
437 if (len
>= PATH_MAX
) {
443 rc
= kernel_read_file_from_path(path
, &fw_priv
->data
, &size
,
447 dev_dbg(device
, "loading %s failed with error %d\n",
450 dev_warn(device
, "loading %s failed with error %d\n",
454 dev_dbg(device
, "direct-loading %s\n", fw_priv
->fw_name
);
455 fw_priv
->size
= size
;
456 fw_state_done(fw_priv
);
464 /* firmware holds the ownership of pages */
465 static void firmware_free_data(const struct firmware
*fw
)
467 /* Loaded directly? */
472 free_fw_priv(fw
->priv
);
475 /* store the pages buffer info firmware from buf */
476 static void fw_set_page_data(struct fw_priv
*fw_priv
, struct firmware
*fw
)
479 #ifdef CONFIG_FW_LOADER_USER_HELPER
480 fw
->pages
= fw_priv
->pages
;
482 fw
->size
= fw_priv
->size
;
483 fw
->data
= fw_priv
->data
;
485 pr_debug("%s: fw-%s fw_priv=%p data=%p size=%u\n",
486 __func__
, fw_priv
->fw_name
, fw_priv
, fw_priv
->data
,
487 (unsigned int)fw_priv
->size
);
490 #ifdef CONFIG_PM_SLEEP
491 static void fw_name_devm_release(struct device
*dev
, void *res
)
493 struct fw_name_devm
*fwn
= res
;
495 if (fwn
->magic
== (unsigned long)&fw_cache
)
496 pr_debug("%s: fw_name-%s devm-%p released\n",
497 __func__
, fwn
->name
, res
);
498 kfree_const(fwn
->name
);
501 static int fw_devm_match(struct device
*dev
, void *res
,
504 struct fw_name_devm
*fwn
= res
;
506 return (fwn
->magic
== (unsigned long)&fw_cache
) &&
507 !strcmp(fwn
->name
, match_data
);
510 static struct fw_name_devm
*fw_find_devm_name(struct device
*dev
,
513 struct fw_name_devm
*fwn
;
515 fwn
= devres_find(dev
, fw_name_devm_release
,
516 fw_devm_match
, (void *)name
);
520 /* add firmware name into devres list */
521 static int fw_add_devm_name(struct device
*dev
, const char *name
)
523 struct fw_name_devm
*fwn
;
525 fwn
= fw_find_devm_name(dev
, name
);
529 fwn
= devres_alloc(fw_name_devm_release
, sizeof(struct fw_name_devm
),
533 fwn
->name
= kstrdup_const(name
, GFP_KERNEL
);
539 fwn
->magic
= (unsigned long)&fw_cache
;
540 devres_add(dev
, fwn
);
545 static int fw_add_devm_name(struct device
*dev
, const char *name
)
551 static int assign_fw(struct firmware
*fw
, struct device
*device
,
552 unsigned int opt_flags
)
554 struct fw_priv
*fw_priv
= fw
->priv
;
556 mutex_lock(&fw_lock
);
557 if (!fw_priv
->size
|| fw_state_is_aborted(fw_priv
)) {
558 mutex_unlock(&fw_lock
);
563 * add firmware name into devres list so that we can auto cache
564 * and uncache firmware for device.
566 * device may has been deleted already, but the problem
567 * should be fixed in devres or driver core.
569 /* don't cache firmware handled without uevent */
570 if (device
&& (opt_flags
& FW_OPT_UEVENT
) &&
571 !(opt_flags
& FW_OPT_NOCACHE
))
572 fw_add_devm_name(device
, fw_priv
->fw_name
);
575 * After caching firmware image is started, let it piggyback
576 * on request firmware.
578 if (!(opt_flags
& FW_OPT_NOCACHE
) &&
579 fw_priv
->fwc
->state
== FW_LOADER_START_CACHE
) {
580 if (fw_cache_piggyback_on_request(fw_priv
->fw_name
))
581 kref_get(&fw_priv
->ref
);
584 /* pass the pages buffer to driver at the last minute */
585 fw_set_page_data(fw_priv
, fw
);
586 mutex_unlock(&fw_lock
);
591 * user-mode helper code
593 #ifdef CONFIG_FW_LOADER_USER_HELPER
597 struct fw_priv
*fw_priv
;
601 static struct fw_sysfs
*to_fw_sysfs(struct device
*dev
)
603 return container_of(dev
, struct fw_sysfs
, dev
);
606 static void __fw_load_abort(struct fw_priv
*fw_priv
)
609 * There is a small window in which user can write to 'loading'
610 * between loading done and disappearance of 'loading'
612 if (fw_sysfs_done(fw_priv
))
615 list_del_init(&fw_priv
->pending_list
);
616 fw_state_aborted(fw_priv
);
619 static void fw_load_abort(struct fw_sysfs
*fw_sysfs
)
621 struct fw_priv
*fw_priv
= fw_sysfs
->fw_priv
;
623 __fw_load_abort(fw_priv
);
626 static LIST_HEAD(pending_fw_head
);
628 static void kill_pending_fw_fallback_reqs(bool only_kill_custom
)
630 struct fw_priv
*fw_priv
;
631 struct fw_priv
*next
;
633 mutex_lock(&fw_lock
);
634 list_for_each_entry_safe(fw_priv
, next
, &pending_fw_head
,
636 if (!fw_priv
->need_uevent
|| !only_kill_custom
)
637 __fw_load_abort(fw_priv
);
639 mutex_unlock(&fw_lock
);
642 static ssize_t
timeout_show(struct class *class, struct class_attribute
*attr
,
645 return sprintf(buf
, "%d\n", loading_timeout
);
649 * firmware_timeout_store - set number of seconds to wait for firmware
650 * @class: device class pointer
651 * @attr: device attribute pointer
652 * @buf: buffer to scan for timeout value
653 * @count: number of bytes in @buf
655 * Sets the number of seconds to wait for the firmware. Once
656 * this expires an error will be returned to the driver and no
657 * firmware will be provided.
659 * Note: zero means 'wait forever'.
661 static ssize_t
timeout_store(struct class *class, struct class_attribute
*attr
,
662 const char *buf
, size_t count
)
664 loading_timeout
= simple_strtol(buf
, NULL
, 10);
665 if (loading_timeout
< 0)
670 static CLASS_ATTR_RW(timeout
);
672 static struct attribute
*firmware_class_attrs
[] = {
673 &class_attr_timeout
.attr
,
676 ATTRIBUTE_GROUPS(firmware_class
);
678 static void fw_dev_release(struct device
*dev
)
680 struct fw_sysfs
*fw_sysfs
= to_fw_sysfs(dev
);
685 static int do_firmware_uevent(struct fw_sysfs
*fw_sysfs
, struct kobj_uevent_env
*env
)
687 if (add_uevent_var(env
, "FIRMWARE=%s", fw_sysfs
->fw_priv
->fw_name
))
689 if (add_uevent_var(env
, "TIMEOUT=%i", loading_timeout
))
691 if (add_uevent_var(env
, "ASYNC=%d", fw_sysfs
->nowait
))
697 static int firmware_uevent(struct device
*dev
, struct kobj_uevent_env
*env
)
699 struct fw_sysfs
*fw_sysfs
= to_fw_sysfs(dev
);
702 mutex_lock(&fw_lock
);
703 if (fw_sysfs
->fw_priv
)
704 err
= do_firmware_uevent(fw_sysfs
, env
);
705 mutex_unlock(&fw_lock
);
709 static struct class firmware_class
= {
711 .class_groups
= firmware_class_groups
,
712 .dev_uevent
= firmware_uevent
,
713 .dev_release
= fw_dev_release
,
716 static inline int register_sysfs_loader(void)
718 return class_register(&firmware_class
);
721 static inline void unregister_sysfs_loader(void)
723 class_unregister(&firmware_class
);
726 static ssize_t
firmware_loading_show(struct device
*dev
,
727 struct device_attribute
*attr
, char *buf
)
729 struct fw_sysfs
*fw_sysfs
= to_fw_sysfs(dev
);
732 mutex_lock(&fw_lock
);
733 if (fw_sysfs
->fw_priv
)
734 loading
= fw_sysfs_loading(fw_sysfs
->fw_priv
);
735 mutex_unlock(&fw_lock
);
737 return sprintf(buf
, "%d\n", loading
);
740 /* Some architectures don't have PAGE_KERNEL_RO */
741 #ifndef PAGE_KERNEL_RO
742 #define PAGE_KERNEL_RO PAGE_KERNEL
745 /* one pages buffer should be mapped/unmapped only once */
746 static int map_fw_priv_pages(struct fw_priv
*fw_priv
)
748 if (!fw_priv
->is_paged_buf
)
751 vunmap(fw_priv
->data
);
752 fw_priv
->data
= vmap(fw_priv
->pages
, fw_priv
->nr_pages
, 0,
760 * firmware_loading_store - set value in the 'loading' control file
761 * @dev: device pointer
762 * @attr: device attribute pointer
763 * @buf: buffer to scan for loading control value
764 * @count: number of bytes in @buf
766 * The relevant values are:
768 * 1: Start a load, discarding any previous partial load.
769 * 0: Conclude the load and hand the data to the driver code.
770 * -1: Conclude the load with an error and discard any written data.
772 static ssize_t
firmware_loading_store(struct device
*dev
,
773 struct device_attribute
*attr
,
774 const char *buf
, size_t count
)
776 struct fw_sysfs
*fw_sysfs
= to_fw_sysfs(dev
);
777 struct fw_priv
*fw_priv
;
778 ssize_t written
= count
;
779 int loading
= simple_strtol(buf
, NULL
, 10);
782 mutex_lock(&fw_lock
);
783 fw_priv
= fw_sysfs
->fw_priv
;
784 if (fw_state_is_aborted(fw_priv
))
789 /* discarding any previous partial load */
790 if (!fw_sysfs_done(fw_priv
)) {
791 for (i
= 0; i
< fw_priv
->nr_pages
; i
++)
792 __free_page(fw_priv
->pages
[i
]);
793 vfree(fw_priv
->pages
);
794 fw_priv
->pages
= NULL
;
795 fw_priv
->page_array_size
= 0;
796 fw_priv
->nr_pages
= 0;
797 fw_state_start(fw_priv
);
801 if (fw_sysfs_loading(fw_priv
)) {
805 * Several loading requests may be pending on
806 * one same firmware buf, so let all requests
807 * see the mapped 'buf->data' once the loading
810 rc
= map_fw_priv_pages(fw_priv
);
812 dev_err(dev
, "%s: map pages failed\n",
815 rc
= security_kernel_post_read_file(NULL
,
816 fw_priv
->data
, fw_priv
->size
,
820 * Same logic as fw_load_abort, only the DONE bit
821 * is ignored and we set ABORT only on failure.
823 list_del_init(&fw_priv
->pending_list
);
825 fw_state_aborted(fw_priv
);
828 fw_state_done(fw_priv
);
834 dev_err(dev
, "%s: unexpected value (%d)\n", __func__
, loading
);
837 fw_load_abort(fw_sysfs
);
841 mutex_unlock(&fw_lock
);
845 static DEVICE_ATTR(loading
, 0644, firmware_loading_show
, firmware_loading_store
);
847 static void firmware_rw_data(struct fw_priv
*fw_priv
, char *buffer
,
848 loff_t offset
, size_t count
, bool read
)
851 memcpy(buffer
, fw_priv
->data
+ offset
, count
);
853 memcpy(fw_priv
->data
+ offset
, buffer
, count
);
856 static void firmware_rw(struct fw_priv
*fw_priv
, char *buffer
,
857 loff_t offset
, size_t count
, bool read
)
861 int page_nr
= offset
>> PAGE_SHIFT
;
862 int page_ofs
= offset
& (PAGE_SIZE
-1);
863 int page_cnt
= min_t(size_t, PAGE_SIZE
- page_ofs
, count
);
865 page_data
= kmap(fw_priv
->pages
[page_nr
]);
868 memcpy(buffer
, page_data
+ page_ofs
, page_cnt
);
870 memcpy(page_data
+ page_ofs
, buffer
, page_cnt
);
872 kunmap(fw_priv
->pages
[page_nr
]);
879 static ssize_t
firmware_data_read(struct file
*filp
, struct kobject
*kobj
,
880 struct bin_attribute
*bin_attr
,
881 char *buffer
, loff_t offset
, size_t count
)
883 struct device
*dev
= kobj_to_dev(kobj
);
884 struct fw_sysfs
*fw_sysfs
= to_fw_sysfs(dev
);
885 struct fw_priv
*fw_priv
;
888 mutex_lock(&fw_lock
);
889 fw_priv
= fw_sysfs
->fw_priv
;
890 if (!fw_priv
|| fw_sysfs_done(fw_priv
)) {
894 if (offset
> fw_priv
->size
) {
898 if (count
> fw_priv
->size
- offset
)
899 count
= fw_priv
->size
- offset
;
904 firmware_rw_data(fw_priv
, buffer
, offset
, count
, true);
906 firmware_rw(fw_priv
, buffer
, offset
, count
, true);
909 mutex_unlock(&fw_lock
);
913 static int fw_realloc_pages(struct fw_sysfs
*fw_sysfs
, int min_size
)
915 struct fw_priv
*fw_priv
= fw_sysfs
->fw_priv
;
916 int pages_needed
= PAGE_ALIGN(min_size
) >> PAGE_SHIFT
;
918 /* If the array of pages is too small, grow it... */
919 if (fw_priv
->page_array_size
< pages_needed
) {
920 int new_array_size
= max(pages_needed
,
921 fw_priv
->page_array_size
* 2);
922 struct page
**new_pages
;
924 new_pages
= vmalloc(new_array_size
* sizeof(void *));
926 fw_load_abort(fw_sysfs
);
929 memcpy(new_pages
, fw_priv
->pages
,
930 fw_priv
->page_array_size
* sizeof(void *));
931 memset(&new_pages
[fw_priv
->page_array_size
], 0, sizeof(void *) *
932 (new_array_size
- fw_priv
->page_array_size
));
933 vfree(fw_priv
->pages
);
934 fw_priv
->pages
= new_pages
;
935 fw_priv
->page_array_size
= new_array_size
;
938 while (fw_priv
->nr_pages
< pages_needed
) {
939 fw_priv
->pages
[fw_priv
->nr_pages
] =
940 alloc_page(GFP_KERNEL
| __GFP_HIGHMEM
);
942 if (!fw_priv
->pages
[fw_priv
->nr_pages
]) {
943 fw_load_abort(fw_sysfs
);
952 * firmware_data_write - write method for firmware
953 * @filp: open sysfs file
954 * @kobj: kobject for the device
955 * @bin_attr: bin_attr structure
956 * @buffer: buffer being written
957 * @offset: buffer offset for write in total data store area
958 * @count: buffer size
960 * Data written to the 'data' attribute will be later handed to
961 * the driver as a firmware image.
963 static ssize_t
firmware_data_write(struct file
*filp
, struct kobject
*kobj
,
964 struct bin_attribute
*bin_attr
,
965 char *buffer
, loff_t offset
, size_t count
)
967 struct device
*dev
= kobj_to_dev(kobj
);
968 struct fw_sysfs
*fw_sysfs
= to_fw_sysfs(dev
);
969 struct fw_priv
*fw_priv
;
972 if (!capable(CAP_SYS_RAWIO
))
975 mutex_lock(&fw_lock
);
976 fw_priv
= fw_sysfs
->fw_priv
;
977 if (!fw_priv
|| fw_sysfs_done(fw_priv
)) {
983 if (offset
+ count
> fw_priv
->allocated_size
) {
987 firmware_rw_data(fw_priv
, buffer
, offset
, count
, false);
990 retval
= fw_realloc_pages(fw_sysfs
, offset
+ count
);
995 firmware_rw(fw_priv
, buffer
, offset
, count
, false);
998 fw_priv
->size
= max_t(size_t, offset
+ count
, fw_priv
->size
);
1000 mutex_unlock(&fw_lock
);
1004 static struct bin_attribute firmware_attr_data
= {
1005 .attr
= { .name
= "data", .mode
= 0644 },
1007 .read
= firmware_data_read
,
1008 .write
= firmware_data_write
,
1011 static struct attribute
*fw_dev_attrs
[] = {
1012 &dev_attr_loading
.attr
,
1016 static struct bin_attribute
*fw_dev_bin_attrs
[] = {
1017 &firmware_attr_data
,
1021 static const struct attribute_group fw_dev_attr_group
= {
1022 .attrs
= fw_dev_attrs
,
1023 .bin_attrs
= fw_dev_bin_attrs
,
1026 static const struct attribute_group
*fw_dev_attr_groups
[] = {
1031 static struct fw_sysfs
*
1032 fw_create_instance(struct firmware
*firmware
, const char *fw_name
,
1033 struct device
*device
, unsigned int opt_flags
)
1035 struct fw_sysfs
*fw_sysfs
;
1036 struct device
*f_dev
;
1038 fw_sysfs
= kzalloc(sizeof(*fw_sysfs
), GFP_KERNEL
);
1040 fw_sysfs
= ERR_PTR(-ENOMEM
);
1044 fw_sysfs
->nowait
= !!(opt_flags
& FW_OPT_NOWAIT
);
1045 fw_sysfs
->fw
= firmware
;
1046 f_dev
= &fw_sysfs
->dev
;
1048 device_initialize(f_dev
);
1049 dev_set_name(f_dev
, "%s", fw_name
);
1050 f_dev
->parent
= device
;
1051 f_dev
->class = &firmware_class
;
1052 f_dev
->groups
= fw_dev_attr_groups
;
1057 /* load a firmware via user helper */
1058 static int _request_firmware_load(struct fw_sysfs
*fw_sysfs
,
1059 unsigned int opt_flags
, long timeout
)
1062 struct device
*f_dev
= &fw_sysfs
->dev
;
1063 struct fw_priv
*fw_priv
= fw_sysfs
->fw_priv
;
1065 /* fall back on userspace loading */
1067 fw_priv
->is_paged_buf
= true;
1069 dev_set_uevent_suppress(f_dev
, true);
1071 retval
= device_add(f_dev
);
1073 dev_err(f_dev
, "%s: device_register failed\n", __func__
);
1077 mutex_lock(&fw_lock
);
1078 list_add(&fw_priv
->pending_list
, &pending_fw_head
);
1079 mutex_unlock(&fw_lock
);
1081 if (opt_flags
& FW_OPT_UEVENT
) {
1082 fw_priv
->need_uevent
= true;
1083 dev_set_uevent_suppress(f_dev
, false);
1084 dev_dbg(f_dev
, "firmware: requesting %s\n", fw_priv
->fw_name
);
1085 kobject_uevent(&fw_sysfs
->dev
.kobj
, KOBJ_ADD
);
1087 timeout
= MAX_JIFFY_OFFSET
;
1090 retval
= fw_sysfs_wait_timeout(fw_priv
, timeout
);
1092 mutex_lock(&fw_lock
);
1093 fw_load_abort(fw_sysfs
);
1094 mutex_unlock(&fw_lock
);
1097 if (fw_state_is_aborted(fw_priv
)) {
1098 if (retval
== -ERESTARTSYS
)
1102 } else if (fw_priv
->is_paged_buf
&& !fw_priv
->data
)
1111 static int fw_load_from_user_helper(struct firmware
*firmware
,
1112 const char *name
, struct device
*device
,
1113 unsigned int opt_flags
)
1115 struct fw_sysfs
*fw_sysfs
;
1119 timeout
= firmware_loading_timeout();
1120 if (opt_flags
& FW_OPT_NOWAIT
) {
1121 timeout
= usermodehelper_read_lock_wait(timeout
);
1123 dev_dbg(device
, "firmware: %s loading timed out\n",
1128 ret
= usermodehelper_read_trylock();
1130 dev_err(device
, "firmware: %s will not be loaded\n",
1136 fw_sysfs
= fw_create_instance(firmware
, name
, device
, opt_flags
);
1137 if (IS_ERR(fw_sysfs
)) {
1138 ret
= PTR_ERR(fw_sysfs
);
1142 fw_sysfs
->fw_priv
= firmware
->priv
;
1143 ret
= _request_firmware_load(fw_sysfs
, opt_flags
, timeout
);
1146 ret
= assign_fw(firmware
, device
, opt_flags
);
1149 usermodehelper_read_unlock();
1154 #ifdef CONFIG_FW_LOADER_USER_HELPER_FALLBACK
1155 static bool fw_force_sysfs_fallback(unsigned int opt_flags
)
1160 static bool fw_force_sysfs_fallback(unsigned int opt_flags
)
1162 if (!(opt_flags
& FW_OPT_USERHELPER
))
1168 static bool fw_run_sysfs_fallback(unsigned int opt_flags
)
1170 if ((opt_flags
& FW_OPT_NOFALLBACK
))
1173 return fw_force_sysfs_fallback(opt_flags
);
1176 static int fw_sysfs_fallback(struct firmware
*fw
, const char *name
,
1177 struct device
*device
,
1178 unsigned int opt_flags
,
1181 if (!fw_run_sysfs_fallback(opt_flags
))
1184 dev_warn(device
, "Falling back to user helper\n");
1185 return fw_load_from_user_helper(fw
, name
, device
, opt_flags
);
1187 #else /* CONFIG_FW_LOADER_USER_HELPER */
1188 static int fw_sysfs_fallback(struct firmware
*fw
, const char *name
,
1189 struct device
*device
,
1190 unsigned int opt_flags
,
1193 /* Keep carrying over the same error */
1197 static inline void kill_pending_fw_fallback_reqs(bool only_kill_custom
) { }
1199 static inline int register_sysfs_loader(void)
1204 static inline void unregister_sysfs_loader(void)
1208 #endif /* CONFIG_FW_LOADER_USER_HELPER */
1210 /* prepare firmware and firmware_buf structs;
1211 * return 0 if a firmware is already assigned, 1 if need to load one,
1212 * or a negative error code
1215 _request_firmware_prepare(struct firmware
**firmware_p
, const char *name
,
1216 struct device
*device
, void *dbuf
, size_t size
)
1218 struct firmware
*firmware
;
1219 struct fw_priv
*fw_priv
;
1222 *firmware_p
= firmware
= kzalloc(sizeof(*firmware
), GFP_KERNEL
);
1224 dev_err(device
, "%s: kmalloc(struct firmware) failed\n",
1229 if (fw_get_builtin_firmware(firmware
, name
, dbuf
, size
)) {
1230 dev_dbg(device
, "using built-in %s\n", name
);
1231 return 0; /* assigned */
1234 ret
= alloc_lookup_fw_priv(name
, &fw_cache
, &fw_priv
, dbuf
, size
);
1237 * bind with 'priv' now to avoid warning in failure path
1238 * of requesting firmware.
1240 firmware
->priv
= fw_priv
;
1243 ret
= fw_state_wait(fw_priv
);
1245 fw_set_page_data(fw_priv
, firmware
);
1246 return 0; /* assigned */
1252 return 1; /* need to load */
1256 * Batched requests need only one wake, we need to do this step last due to the
1257 * fallback mechanism. The buf is protected with kref_get(), and it won't be
1258 * released until the last user calls release_firmware().
1260 * Failed batched requests are possible as well, in such cases we just share
1261 * the struct fw_priv and won't release it until all requests are woken
1262 * and have gone through this same path.
1264 static void fw_abort_batch_reqs(struct firmware
*fw
)
1266 struct fw_priv
*fw_priv
;
1268 /* Loaded directly? */
1269 if (!fw
|| !fw
->priv
)
1273 if (!fw_state_is_aborted(fw_priv
))
1274 fw_state_aborted(fw_priv
);
1277 /* called from request_firmware() and request_firmware_work_func() */
1279 _request_firmware(const struct firmware
**firmware_p
, const char *name
,
1280 struct device
*device
, void *buf
, size_t size
,
1281 unsigned int opt_flags
)
1283 struct firmware
*fw
= NULL
;
1289 if (!name
|| name
[0] == '\0') {
1294 ret
= _request_firmware_prepare(&fw
, name
, device
, buf
, size
);
1295 if (ret
<= 0) /* error or already assigned */
1298 ret
= fw_get_filesystem_firmware(device
, fw
->priv
);
1300 if (!(opt_flags
& FW_OPT_NO_WARN
))
1302 "Direct firmware load for %s failed with error %d\n",
1304 ret
= fw_sysfs_fallback(fw
, name
, device
, opt_flags
, ret
);
1306 ret
= assign_fw(fw
, device
, opt_flags
);
1310 fw_abort_batch_reqs(fw
);
1311 release_firmware(fw
);
1320 * request_firmware: - send firmware request and wait for it
1321 * @firmware_p: pointer to firmware image
1322 * @name: name of firmware file
1323 * @device: device for which firmware is being loaded
1325 * @firmware_p will be used to return a firmware image by the name
1326 * of @name for device @device.
1328 * Should be called from user context where sleeping is allowed.
1330 * @name will be used as $FIRMWARE in the uevent environment and
1331 * should be distinctive enough not to be confused with any other
1332 * firmware image for this or any other device.
1334 * Caller must hold the reference count of @device.
1336 * The function can be called safely inside device's suspend and
1340 request_firmware(const struct firmware
**firmware_p
, const char *name
,
1341 struct device
*device
)
1345 /* Need to pin this module until return */
1346 __module_get(THIS_MODULE
);
1347 ret
= _request_firmware(firmware_p
, name
, device
, NULL
, 0,
1349 module_put(THIS_MODULE
);
1352 EXPORT_SYMBOL(request_firmware
);
1355 * request_firmware_direct: - load firmware directly without usermode helper
1356 * @firmware_p: pointer to firmware image
1357 * @name: name of firmware file
1358 * @device: device for which firmware is being loaded
1360 * This function works pretty much like request_firmware(), but this doesn't
1361 * fall back to usermode helper even if the firmware couldn't be loaded
1362 * directly from fs. Hence it's useful for loading optional firmwares, which
1363 * aren't always present, without extra long timeouts of udev.
1365 int request_firmware_direct(const struct firmware
**firmware_p
,
1366 const char *name
, struct device
*device
)
1370 __module_get(THIS_MODULE
);
1371 ret
= _request_firmware(firmware_p
, name
, device
, NULL
, 0,
1372 FW_OPT_UEVENT
| FW_OPT_NO_WARN
|
1374 module_put(THIS_MODULE
);
1377 EXPORT_SYMBOL_GPL(request_firmware_direct
);
1380 * request_firmware_into_buf - load firmware into a previously allocated buffer
1381 * @firmware_p: pointer to firmware image
1382 * @name: name of firmware file
1383 * @device: device for which firmware is being loaded and DMA region allocated
1384 * @buf: address of buffer to load firmware into
1385 * @size: size of buffer
1387 * This function works pretty much like request_firmware(), but it doesn't
1388 * allocate a buffer to hold the firmware data. Instead, the firmware
1389 * is loaded directly into the buffer pointed to by @buf and the @firmware_p
1390 * data member is pointed at @buf.
1392 * This function doesn't cache firmware either.
1395 request_firmware_into_buf(const struct firmware
**firmware_p
, const char *name
,
1396 struct device
*device
, void *buf
, size_t size
)
1400 __module_get(THIS_MODULE
);
1401 ret
= _request_firmware(firmware_p
, name
, device
, buf
, size
,
1402 FW_OPT_UEVENT
| FW_OPT_NOCACHE
);
1403 module_put(THIS_MODULE
);
1406 EXPORT_SYMBOL(request_firmware_into_buf
);
1409 * release_firmware: - release the resource associated with a firmware image
1410 * @fw: firmware resource to release
1412 void release_firmware(const struct firmware
*fw
)
1415 if (!fw_is_builtin_firmware(fw
))
1416 firmware_free_data(fw
);
1420 EXPORT_SYMBOL(release_firmware
);
1423 struct firmware_work
{
1424 struct work_struct work
;
1425 struct module
*module
;
1427 struct device
*device
;
1429 void (*cont
)(const struct firmware
*fw
, void *context
);
1430 unsigned int opt_flags
;
1433 static void request_firmware_work_func(struct work_struct
*work
)
1435 struct firmware_work
*fw_work
;
1436 const struct firmware
*fw
;
1438 fw_work
= container_of(work
, struct firmware_work
, work
);
1440 _request_firmware(&fw
, fw_work
->name
, fw_work
->device
, NULL
, 0,
1441 fw_work
->opt_flags
);
1442 fw_work
->cont(fw
, fw_work
->context
);
1443 put_device(fw_work
->device
); /* taken in request_firmware_nowait() */
1445 module_put(fw_work
->module
);
1446 kfree_const(fw_work
->name
);
1451 * request_firmware_nowait - asynchronous version of request_firmware
1452 * @module: module requesting the firmware
1453 * @uevent: sends uevent to copy the firmware image if this flag
1454 * is non-zero else the firmware copy must be done manually.
1455 * @name: name of firmware file
1456 * @device: device for which firmware is being loaded
1457 * @gfp: allocation flags
1458 * @context: will be passed over to @cont, and
1459 * @fw may be %NULL if firmware request fails.
1460 * @cont: function will be called asynchronously when the firmware
1463 * Caller must hold the reference count of @device.
1465 * Asynchronous variant of request_firmware() for user contexts:
1466 * - sleep for as small periods as possible since it may
1467 * increase kernel boot time of built-in device drivers
1468 * requesting firmware in their ->probe() methods, if
1469 * @gfp is GFP_KERNEL.
1471 * - can't sleep at all if @gfp is GFP_ATOMIC.
1474 request_firmware_nowait(
1475 struct module
*module
, bool uevent
,
1476 const char *name
, struct device
*device
, gfp_t gfp
, void *context
,
1477 void (*cont
)(const struct firmware
*fw
, void *context
))
1479 struct firmware_work
*fw_work
;
1481 fw_work
= kzalloc(sizeof(struct firmware_work
), gfp
);
1485 fw_work
->module
= module
;
1486 fw_work
->name
= kstrdup_const(name
, gfp
);
1487 if (!fw_work
->name
) {
1491 fw_work
->device
= device
;
1492 fw_work
->context
= context
;
1493 fw_work
->cont
= cont
;
1494 fw_work
->opt_flags
= FW_OPT_NOWAIT
|
1495 (uevent
? FW_OPT_UEVENT
: FW_OPT_USERHELPER
);
1497 if (!try_module_get(module
)) {
1498 kfree_const(fw_work
->name
);
1503 get_device(fw_work
->device
);
1504 INIT_WORK(&fw_work
->work
, request_firmware_work_func
);
1505 schedule_work(&fw_work
->work
);
1508 EXPORT_SYMBOL(request_firmware_nowait
);
1510 #ifdef CONFIG_PM_SLEEP
1511 static ASYNC_DOMAIN_EXCLUSIVE(fw_cache_domain
);
1514 * cache_firmware - cache one firmware image in kernel memory space
1515 * @fw_name: the firmware image name
1517 * Cache firmware in kernel memory so that drivers can use it when
1518 * system isn't ready for them to request firmware image from userspace.
1519 * Once it returns successfully, driver can use request_firmware or its
1520 * nowait version to get the cached firmware without any interacting
1523 * Return 0 if the firmware image has been cached successfully
1524 * Return !0 otherwise
1527 static int cache_firmware(const char *fw_name
)
1530 const struct firmware
*fw
;
1532 pr_debug("%s: %s\n", __func__
, fw_name
);
1534 ret
= request_firmware(&fw
, fw_name
, NULL
);
1538 pr_debug("%s: %s ret=%d\n", __func__
, fw_name
, ret
);
1543 static struct fw_priv
*lookup_fw_priv(const char *fw_name
)
1545 struct fw_priv
*tmp
;
1546 struct firmware_cache
*fwc
= &fw_cache
;
1548 spin_lock(&fwc
->lock
);
1549 tmp
= __lookup_fw_priv(fw_name
);
1550 spin_unlock(&fwc
->lock
);
1556 * uncache_firmware - remove one cached firmware image
1557 * @fw_name: the firmware image name
1559 * Uncache one firmware image which has been cached successfully
1562 * Return 0 if the firmware cache has been removed successfully
1563 * Return !0 otherwise
1566 static int uncache_firmware(const char *fw_name
)
1568 struct fw_priv
*fw_priv
;
1571 pr_debug("%s: %s\n", __func__
, fw_name
);
1573 if (fw_get_builtin_firmware(&fw
, fw_name
, NULL
, 0))
1576 fw_priv
= lookup_fw_priv(fw_name
);
1578 free_fw_priv(fw_priv
);
1585 static struct fw_cache_entry
*alloc_fw_cache_entry(const char *name
)
1587 struct fw_cache_entry
*fce
;
1589 fce
= kzalloc(sizeof(*fce
), GFP_ATOMIC
);
1593 fce
->name
= kstrdup_const(name
, GFP_ATOMIC
);
1603 static int __fw_entry_found(const char *name
)
1605 struct firmware_cache
*fwc
= &fw_cache
;
1606 struct fw_cache_entry
*fce
;
1608 list_for_each_entry(fce
, &fwc
->fw_names
, list
) {
1609 if (!strcmp(fce
->name
, name
))
1615 static int fw_cache_piggyback_on_request(const char *name
)
1617 struct firmware_cache
*fwc
= &fw_cache
;
1618 struct fw_cache_entry
*fce
;
1621 spin_lock(&fwc
->name_lock
);
1622 if (__fw_entry_found(name
))
1625 fce
= alloc_fw_cache_entry(name
);
1628 list_add(&fce
->list
, &fwc
->fw_names
);
1629 pr_debug("%s: fw: %s\n", __func__
, name
);
1632 spin_unlock(&fwc
->name_lock
);
1636 static void free_fw_cache_entry(struct fw_cache_entry
*fce
)
1638 kfree_const(fce
->name
);
1642 static void __async_dev_cache_fw_image(void *fw_entry
,
1643 async_cookie_t cookie
)
1645 struct fw_cache_entry
*fce
= fw_entry
;
1646 struct firmware_cache
*fwc
= &fw_cache
;
1649 ret
= cache_firmware(fce
->name
);
1651 spin_lock(&fwc
->name_lock
);
1652 list_del(&fce
->list
);
1653 spin_unlock(&fwc
->name_lock
);
1655 free_fw_cache_entry(fce
);
1659 /* called with dev->devres_lock held */
1660 static void dev_create_fw_entry(struct device
*dev
, void *res
,
1663 struct fw_name_devm
*fwn
= res
;
1664 const char *fw_name
= fwn
->name
;
1665 struct list_head
*head
= data
;
1666 struct fw_cache_entry
*fce
;
1668 fce
= alloc_fw_cache_entry(fw_name
);
1670 list_add(&fce
->list
, head
);
1673 static int devm_name_match(struct device
*dev
, void *res
,
1676 struct fw_name_devm
*fwn
= res
;
1677 return (fwn
->magic
== (unsigned long)match_data
);
1680 static void dev_cache_fw_image(struct device
*dev
, void *data
)
1683 struct fw_cache_entry
*fce
;
1684 struct fw_cache_entry
*fce_next
;
1685 struct firmware_cache
*fwc
= &fw_cache
;
1687 devres_for_each_res(dev
, fw_name_devm_release
,
1688 devm_name_match
, &fw_cache
,
1689 dev_create_fw_entry
, &todo
);
1691 list_for_each_entry_safe(fce
, fce_next
, &todo
, list
) {
1692 list_del(&fce
->list
);
1694 spin_lock(&fwc
->name_lock
);
1695 /* only one cache entry for one firmware */
1696 if (!__fw_entry_found(fce
->name
)) {
1697 list_add(&fce
->list
, &fwc
->fw_names
);
1699 free_fw_cache_entry(fce
);
1702 spin_unlock(&fwc
->name_lock
);
1705 async_schedule_domain(__async_dev_cache_fw_image
,
1711 static void __device_uncache_fw_images(void)
1713 struct firmware_cache
*fwc
= &fw_cache
;
1714 struct fw_cache_entry
*fce
;
1716 spin_lock(&fwc
->name_lock
);
1717 while (!list_empty(&fwc
->fw_names
)) {
1718 fce
= list_entry(fwc
->fw_names
.next
,
1719 struct fw_cache_entry
, list
);
1720 list_del(&fce
->list
);
1721 spin_unlock(&fwc
->name_lock
);
1723 uncache_firmware(fce
->name
);
1724 free_fw_cache_entry(fce
);
1726 spin_lock(&fwc
->name_lock
);
1728 spin_unlock(&fwc
->name_lock
);
1732 * device_cache_fw_images - cache devices' firmware
1734 * If one device called request_firmware or its nowait version
1735 * successfully before, the firmware names are recored into the
1736 * device's devres link list, so device_cache_fw_images can call
1737 * cache_firmware() to cache these firmwares for the device,
1738 * then the device driver can load its firmwares easily at
1739 * time when system is not ready to complete loading firmware.
1741 static void device_cache_fw_images(void)
1743 struct firmware_cache
*fwc
= &fw_cache
;
1747 pr_debug("%s\n", __func__
);
1749 /* cancel uncache work */
1750 cancel_delayed_work_sync(&fwc
->work
);
1753 * use small loading timeout for caching devices' firmware
1754 * because all these firmware images have been loaded
1755 * successfully at lease once, also system is ready for
1756 * completing firmware loading now. The maximum size of
1757 * firmware in current distributions is about 2M bytes,
1758 * so 10 secs should be enough.
1760 old_timeout
= loading_timeout
;
1761 loading_timeout
= 10;
1763 mutex_lock(&fw_lock
);
1764 fwc
->state
= FW_LOADER_START_CACHE
;
1765 dpm_for_each_dev(NULL
, dev_cache_fw_image
);
1766 mutex_unlock(&fw_lock
);
1768 /* wait for completion of caching firmware for all devices */
1769 async_synchronize_full_domain(&fw_cache_domain
);
1771 loading_timeout
= old_timeout
;
1775 * device_uncache_fw_images - uncache devices' firmware
1777 * uncache all firmwares which have been cached successfully
1778 * by device_uncache_fw_images earlier
1780 static void device_uncache_fw_images(void)
1782 pr_debug("%s\n", __func__
);
1783 __device_uncache_fw_images();
1786 static void device_uncache_fw_images_work(struct work_struct
*work
)
1788 device_uncache_fw_images();
1792 * device_uncache_fw_images_delay - uncache devices firmwares
1793 * @delay: number of milliseconds to delay uncache device firmwares
1795 * uncache all devices's firmwares which has been cached successfully
1796 * by device_cache_fw_images after @delay milliseconds.
1798 static void device_uncache_fw_images_delay(unsigned long delay
)
1800 queue_delayed_work(system_power_efficient_wq
, &fw_cache
.work
,
1801 msecs_to_jiffies(delay
));
1804 static int fw_pm_notify(struct notifier_block
*notify_block
,
1805 unsigned long mode
, void *unused
)
1808 case PM_HIBERNATION_PREPARE
:
1809 case PM_SUSPEND_PREPARE
:
1810 case PM_RESTORE_PREPARE
:
1812 * kill pending fallback requests with a custom fallback
1813 * to avoid stalling suspend.
1815 kill_pending_fw_fallback_reqs(true);
1816 device_cache_fw_images();
1819 case PM_POST_SUSPEND
:
1820 case PM_POST_HIBERNATION
:
1821 case PM_POST_RESTORE
:
1823 * In case that system sleep failed and syscore_suspend is
1826 mutex_lock(&fw_lock
);
1827 fw_cache
.state
= FW_LOADER_NO_CACHE
;
1828 mutex_unlock(&fw_lock
);
1830 device_uncache_fw_images_delay(10 * MSEC_PER_SEC
);
1837 /* stop caching firmware once syscore_suspend is reached */
1838 static int fw_suspend(void)
1840 fw_cache
.state
= FW_LOADER_NO_CACHE
;
1844 static struct syscore_ops fw_syscore_ops
= {
1845 .suspend
= fw_suspend
,
1848 static int __init
register_fw_pm_ops(void)
1852 spin_lock_init(&fw_cache
.name_lock
);
1853 INIT_LIST_HEAD(&fw_cache
.fw_names
);
1855 INIT_DELAYED_WORK(&fw_cache
.work
,
1856 device_uncache_fw_images_work
);
1858 fw_cache
.pm_notify
.notifier_call
= fw_pm_notify
;
1859 ret
= register_pm_notifier(&fw_cache
.pm_notify
);
1863 register_syscore_ops(&fw_syscore_ops
);
1868 static inline void unregister_fw_pm_ops(void)
1870 unregister_syscore_ops(&fw_syscore_ops
);
1871 unregister_pm_notifier(&fw_cache
.pm_notify
);
1874 static int fw_cache_piggyback_on_request(const char *name
)
1878 static inline int register_fw_pm_ops(void)
1882 static inline void unregister_fw_pm_ops(void)
1887 static void __init
fw_cache_init(void)
1889 spin_lock_init(&fw_cache
.lock
);
1890 INIT_LIST_HEAD(&fw_cache
.head
);
1891 fw_cache
.state
= FW_LOADER_NO_CACHE
;
1894 static int fw_shutdown_notify(struct notifier_block
*unused1
,
1895 unsigned long unused2
, void *unused3
)
1898 * Kill all pending fallback requests to avoid both stalling shutdown,
1899 * and avoid a deadlock with the usermode_lock.
1901 kill_pending_fw_fallback_reqs(false);
1906 static struct notifier_block fw_shutdown_nb
= {
1907 .notifier_call
= fw_shutdown_notify
,
1910 static int __init
firmware_class_init(void)
1914 /* No need to unfold these on exit */
1917 ret
= register_fw_pm_ops();
1921 ret
= register_reboot_notifier(&fw_shutdown_nb
);
1925 return register_sysfs_loader();
1928 unregister_fw_pm_ops();
1932 static void __exit
firmware_class_exit(void)
1934 unregister_fw_pm_ops();
1935 unregister_reboot_notifier(&fw_shutdown_nb
);
1936 unregister_sysfs_loader();
1939 fs_initcall(firmware_class_init
);
1940 module_exit(firmware_class_exit
);