2 * firmware_class.c - Multi purpose firmware loading support
4 * Copyright (c) 2003 Manuel Estrada Sainz
6 * Please see Documentation/firmware_class/ for more information.
10 #include <linux/capability.h>
11 #include <linux/device.h>
12 #include <linux/module.h>
13 #include <linux/init.h>
14 #include <linux/timer.h>
15 #include <linux/vmalloc.h>
16 #include <linux/interrupt.h>
17 #include <linux/bitops.h>
18 #include <linux/mutex.h>
19 #include <linux/workqueue.h>
20 #include <linux/highmem.h>
21 #include <linux/firmware.h>
22 #include <linux/slab.h>
23 #include <linux/sched.h>
24 #include <linux/file.h>
25 #include <linux/list.h>
27 #include <linux/async.h>
29 #include <linux/suspend.h>
30 #include <linux/syscore_ops.h>
31 #include <linux/reboot.h>
32 #include <linux/security.h>
34 #include <generated/utsrelease.h>
38 MODULE_AUTHOR("Manuel Estrada Sainz");
39 MODULE_DESCRIPTION("Multi purpose firmware loading support");
40 MODULE_LICENSE("GPL");
42 /* Builtin firmware support */
44 #ifdef CONFIG_FW_LOADER
46 extern struct builtin_fw __start_builtin_fw
[];
47 extern struct builtin_fw __end_builtin_fw
[];
49 static bool fw_get_builtin_firmware(struct firmware
*fw
, const char *name
,
50 void *buf
, size_t size
)
52 struct builtin_fw
*b_fw
;
54 for (b_fw
= __start_builtin_fw
; b_fw
!= __end_builtin_fw
; b_fw
++) {
55 if (strcmp(name
, b_fw
->name
) == 0) {
56 fw
->size
= b_fw
->size
;
57 fw
->data
= b_fw
->data
;
59 if (buf
&& fw
->size
<= size
)
60 memcpy(buf
, fw
->data
, fw
->size
);
68 static bool fw_is_builtin_firmware(const struct firmware
*fw
)
70 struct builtin_fw
*b_fw
;
72 for (b_fw
= __start_builtin_fw
; b_fw
!= __end_builtin_fw
; b_fw
++)
73 if (fw
->data
== b_fw
->data
)
79 #else /* Module case - no builtin firmware support */
81 static inline bool fw_get_builtin_firmware(struct firmware
*fw
,
82 const char *name
, void *buf
,
88 static inline bool fw_is_builtin_firmware(const struct firmware
*fw
)
100 static int loading_timeout
= 60; /* In seconds */
102 static inline long firmware_loading_timeout(void)
104 return loading_timeout
> 0 ? loading_timeout
* HZ
: MAX_JIFFY_OFFSET
;
107 /* firmware behavior options */
108 #define FW_OPT_UEVENT (1U << 0)
109 #define FW_OPT_NOWAIT (1U << 1)
110 #ifdef CONFIG_FW_LOADER_USER_HELPER
111 #define FW_OPT_USERHELPER (1U << 2)
113 #define FW_OPT_USERHELPER 0
115 #ifdef CONFIG_FW_LOADER_USER_HELPER_FALLBACK
116 #define FW_OPT_FALLBACK FW_OPT_USERHELPER
118 #define FW_OPT_FALLBACK 0
120 #define FW_OPT_NO_WARN (1U << 3)
121 #define FW_OPT_NOCACHE (1U << 4)
123 struct firmware_cache
{
124 /* firmware_buf instance will be added into the below list */
126 struct list_head head
;
129 #ifdef CONFIG_PM_SLEEP
131 * Names of firmware images which have been cached successfully
132 * will be added into the below list so that device uncache
133 * helper can trace which firmware images have been cached
136 spinlock_t name_lock
;
137 struct list_head fw_names
;
139 struct delayed_work work
;
141 struct notifier_block pm_notify
;
145 struct firmware_buf
{
147 struct list_head list
;
148 struct completion completion
;
149 struct firmware_cache
*fwc
;
150 unsigned long status
;
153 size_t allocated_size
;
154 #ifdef CONFIG_FW_LOADER_USER_HELPER
160 struct list_head pending_list
;
165 struct fw_cache_entry
{
166 struct list_head list
;
170 struct fw_name_devm
{
175 #define to_fwbuf(d) container_of(d, struct firmware_buf, ref)
177 #define FW_LOADER_NO_CACHE 0
178 #define FW_LOADER_START_CACHE 1
180 static int fw_cache_piggyback_on_request(const char *name
);
182 /* fw_lock could be moved to 'struct firmware_priv' but since it is just
183 * guarding for corner cases a global lock should be OK */
184 static DEFINE_MUTEX(fw_lock
);
186 static struct firmware_cache fw_cache
;
188 static struct firmware_buf
*__allocate_fw_buf(const char *fw_name
,
189 struct firmware_cache
*fwc
,
190 void *dbuf
, size_t size
)
192 struct firmware_buf
*buf
;
194 buf
= kzalloc(sizeof(*buf
), GFP_ATOMIC
);
198 buf
->fw_id
= kstrdup_const(fw_name
, GFP_ATOMIC
);
204 kref_init(&buf
->ref
);
207 buf
->allocated_size
= size
;
208 init_completion(&buf
->completion
);
209 #ifdef CONFIG_FW_LOADER_USER_HELPER
210 INIT_LIST_HEAD(&buf
->pending_list
);
213 pr_debug("%s: fw-%s buf=%p\n", __func__
, fw_name
, buf
);
218 static struct firmware_buf
*__fw_lookup_buf(const char *fw_name
)
220 struct firmware_buf
*tmp
;
221 struct firmware_cache
*fwc
= &fw_cache
;
223 list_for_each_entry(tmp
, &fwc
->head
, list
)
224 if (!strcmp(tmp
->fw_id
, fw_name
))
229 static int fw_lookup_and_allocate_buf(const char *fw_name
,
230 struct firmware_cache
*fwc
,
231 struct firmware_buf
**buf
, void *dbuf
,
234 struct firmware_buf
*tmp
;
236 spin_lock(&fwc
->lock
);
237 tmp
= __fw_lookup_buf(fw_name
);
240 spin_unlock(&fwc
->lock
);
244 tmp
= __allocate_fw_buf(fw_name
, fwc
, dbuf
, size
);
246 list_add(&tmp
->list
, &fwc
->head
);
247 spin_unlock(&fwc
->lock
);
251 return tmp
? 0 : -ENOMEM
;
254 static void __fw_free_buf(struct kref
*ref
)
255 __releases(&fwc
->lock
)
257 struct firmware_buf
*buf
= to_fwbuf(ref
);
258 struct firmware_cache
*fwc
= buf
->fwc
;
260 pr_debug("%s: fw-%s buf=%p data=%p size=%u\n",
261 __func__
, buf
->fw_id
, buf
, buf
->data
,
262 (unsigned int)buf
->size
);
264 list_del(&buf
->list
);
265 spin_unlock(&fwc
->lock
);
267 #ifdef CONFIG_FW_LOADER_USER_HELPER
268 if (buf
->is_paged_buf
) {
271 for (i
= 0; i
< buf
->nr_pages
; i
++)
272 __free_page(buf
->pages
[i
]);
276 if (!buf
->allocated_size
)
278 kfree_const(buf
->fw_id
);
282 static void fw_free_buf(struct firmware_buf
*buf
)
284 struct firmware_cache
*fwc
= buf
->fwc
;
285 spin_lock(&fwc
->lock
);
286 if (!kref_put(&buf
->ref
, __fw_free_buf
))
287 spin_unlock(&fwc
->lock
);
290 /* direct firmware loading support */
291 static char fw_path_para
[256];
292 static const char * const fw_path
[] = {
294 "/lib/firmware/updates/" UTS_RELEASE
,
295 "/lib/firmware/updates",
296 "/lib/firmware/" UTS_RELEASE
,
301 * Typical usage is that passing 'firmware_class.path=$CUSTOMIZED_PATH'
302 * from kernel command line because firmware_class is generally built in
303 * kernel instead of module.
305 module_param_string(path
, fw_path_para
, sizeof(fw_path_para
), 0644);
306 MODULE_PARM_DESC(path
, "customized firmware image search path with a higher priority than default path");
308 static void fw_finish_direct_load(struct device
*device
,
309 struct firmware_buf
*buf
)
311 mutex_lock(&fw_lock
);
312 set_bit(FW_STATUS_DONE
, &buf
->status
);
313 complete_all(&buf
->completion
);
314 mutex_unlock(&fw_lock
);
318 fw_get_filesystem_firmware(struct device
*device
, struct firmware_buf
*buf
)
324 enum kernel_read_file_id id
= READING_FIRMWARE
;
325 size_t msize
= INT_MAX
;
327 /* Already populated data member means we're loading into a buffer */
329 id
= READING_FIRMWARE_PREALLOC_BUFFER
;
330 msize
= buf
->allocated_size
;
337 for (i
= 0; i
< ARRAY_SIZE(fw_path
); i
++) {
338 /* skip the unset customized path */
342 len
= snprintf(path
, PATH_MAX
, "%s/%s",
343 fw_path
[i
], buf
->fw_id
);
344 if (len
>= PATH_MAX
) {
350 rc
= kernel_read_file_from_path(path
, &buf
->data
, &size
, msize
,
354 dev_dbg(device
, "loading %s failed with error %d\n",
357 dev_warn(device
, "loading %s failed with error %d\n",
361 dev_dbg(device
, "direct-loading %s\n", buf
->fw_id
);
363 fw_finish_direct_load(device
, buf
);
371 /* firmware holds the ownership of pages */
372 static void firmware_free_data(const struct firmware
*fw
)
374 /* Loaded directly? */
379 fw_free_buf(fw
->priv
);
382 /* store the pages buffer info firmware from buf */
383 static void fw_set_page_data(struct firmware_buf
*buf
, struct firmware
*fw
)
386 #ifdef CONFIG_FW_LOADER_USER_HELPER
387 fw
->pages
= buf
->pages
;
389 fw
->size
= buf
->size
;
390 fw
->data
= buf
->data
;
392 pr_debug("%s: fw-%s buf=%p data=%p size=%u\n",
393 __func__
, buf
->fw_id
, buf
, buf
->data
,
394 (unsigned int)buf
->size
);
397 #ifdef CONFIG_PM_SLEEP
398 static void fw_name_devm_release(struct device
*dev
, void *res
)
400 struct fw_name_devm
*fwn
= res
;
402 if (fwn
->magic
== (unsigned long)&fw_cache
)
403 pr_debug("%s: fw_name-%s devm-%p released\n",
404 __func__
, fwn
->name
, res
);
405 kfree_const(fwn
->name
);
408 static int fw_devm_match(struct device
*dev
, void *res
,
411 struct fw_name_devm
*fwn
= res
;
413 return (fwn
->magic
== (unsigned long)&fw_cache
) &&
414 !strcmp(fwn
->name
, match_data
);
417 static struct fw_name_devm
*fw_find_devm_name(struct device
*dev
,
420 struct fw_name_devm
*fwn
;
422 fwn
= devres_find(dev
, fw_name_devm_release
,
423 fw_devm_match
, (void *)name
);
427 /* add firmware name into devres list */
428 static int fw_add_devm_name(struct device
*dev
, const char *name
)
430 struct fw_name_devm
*fwn
;
432 fwn
= fw_find_devm_name(dev
, name
);
436 fwn
= devres_alloc(fw_name_devm_release
, sizeof(struct fw_name_devm
),
440 fwn
->name
= kstrdup_const(name
, GFP_KERNEL
);
446 fwn
->magic
= (unsigned long)&fw_cache
;
447 devres_add(dev
, fwn
);
452 static int fw_add_devm_name(struct device
*dev
, const char *name
)
460 * user-mode helper code
462 #ifdef CONFIG_FW_LOADER_USER_HELPER
463 struct firmware_priv
{
466 struct firmware_buf
*buf
;
470 static struct firmware_priv
*to_firmware_priv(struct device
*dev
)
472 return container_of(dev
, struct firmware_priv
, dev
);
475 static void __fw_load_abort(struct firmware_buf
*buf
)
478 * There is a small window in which user can write to 'loading'
479 * between loading done and disappearance of 'loading'
481 if (test_bit(FW_STATUS_DONE
, &buf
->status
))
484 list_del_init(&buf
->pending_list
);
485 set_bit(FW_STATUS_ABORT
, &buf
->status
);
486 complete_all(&buf
->completion
);
489 static void fw_load_abort(struct firmware_priv
*fw_priv
)
491 struct firmware_buf
*buf
= fw_priv
->buf
;
493 __fw_load_abort(buf
);
495 /* avoid user action after loading abort */
499 #define is_fw_load_aborted(buf) \
500 test_bit(FW_STATUS_ABORT, &(buf)->status)
502 static LIST_HEAD(pending_fw_head
);
504 /* reboot notifier for avoid deadlock with usermode_lock */
505 static int fw_shutdown_notify(struct notifier_block
*unused1
,
506 unsigned long unused2
, void *unused3
)
508 mutex_lock(&fw_lock
);
509 while (!list_empty(&pending_fw_head
))
510 __fw_load_abort(list_first_entry(&pending_fw_head
,
513 mutex_unlock(&fw_lock
);
517 static struct notifier_block fw_shutdown_nb
= {
518 .notifier_call
= fw_shutdown_notify
,
521 static ssize_t
timeout_show(struct class *class, struct class_attribute
*attr
,
524 return sprintf(buf
, "%d\n", loading_timeout
);
528 * firmware_timeout_store - set number of seconds to wait for firmware
529 * @class: device class pointer
530 * @attr: device attribute pointer
531 * @buf: buffer to scan for timeout value
532 * @count: number of bytes in @buf
534 * Sets the number of seconds to wait for the firmware. Once
535 * this expires an error will be returned to the driver and no
536 * firmware will be provided.
538 * Note: zero means 'wait forever'.
540 static ssize_t
timeout_store(struct class *class, struct class_attribute
*attr
,
541 const char *buf
, size_t count
)
543 loading_timeout
= simple_strtol(buf
, NULL
, 10);
544 if (loading_timeout
< 0)
550 static struct class_attribute firmware_class_attrs
[] = {
555 static void fw_dev_release(struct device
*dev
)
557 struct firmware_priv
*fw_priv
= to_firmware_priv(dev
);
562 static int do_firmware_uevent(struct firmware_priv
*fw_priv
, struct kobj_uevent_env
*env
)
564 if (add_uevent_var(env
, "FIRMWARE=%s", fw_priv
->buf
->fw_id
))
566 if (add_uevent_var(env
, "TIMEOUT=%i", loading_timeout
))
568 if (add_uevent_var(env
, "ASYNC=%d", fw_priv
->nowait
))
574 static int firmware_uevent(struct device
*dev
, struct kobj_uevent_env
*env
)
576 struct firmware_priv
*fw_priv
= to_firmware_priv(dev
);
579 mutex_lock(&fw_lock
);
581 err
= do_firmware_uevent(fw_priv
, env
);
582 mutex_unlock(&fw_lock
);
586 static struct class firmware_class
= {
588 .class_attrs
= firmware_class_attrs
,
589 .dev_uevent
= firmware_uevent
,
590 .dev_release
= fw_dev_release
,
593 static ssize_t
firmware_loading_show(struct device
*dev
,
594 struct device_attribute
*attr
, char *buf
)
596 struct firmware_priv
*fw_priv
= to_firmware_priv(dev
);
599 mutex_lock(&fw_lock
);
601 loading
= test_bit(FW_STATUS_LOADING
, &fw_priv
->buf
->status
);
602 mutex_unlock(&fw_lock
);
604 return sprintf(buf
, "%d\n", loading
);
607 /* Some architectures don't have PAGE_KERNEL_RO */
608 #ifndef PAGE_KERNEL_RO
609 #define PAGE_KERNEL_RO PAGE_KERNEL
612 /* one pages buffer should be mapped/unmapped only once */
613 static int fw_map_pages_buf(struct firmware_buf
*buf
)
615 if (!buf
->is_paged_buf
)
619 buf
->data
= vmap(buf
->pages
, buf
->nr_pages
, 0, PAGE_KERNEL_RO
);
626 * firmware_loading_store - set value in the 'loading' control file
627 * @dev: device pointer
628 * @attr: device attribute pointer
629 * @buf: buffer to scan for loading control value
630 * @count: number of bytes in @buf
632 * The relevant values are:
634 * 1: Start a load, discarding any previous partial load.
635 * 0: Conclude the load and hand the data to the driver code.
636 * -1: Conclude the load with an error and discard any written data.
638 static ssize_t
firmware_loading_store(struct device
*dev
,
639 struct device_attribute
*attr
,
640 const char *buf
, size_t count
)
642 struct firmware_priv
*fw_priv
= to_firmware_priv(dev
);
643 struct firmware_buf
*fw_buf
;
644 ssize_t written
= count
;
645 int loading
= simple_strtol(buf
, NULL
, 10);
648 mutex_lock(&fw_lock
);
649 fw_buf
= fw_priv
->buf
;
655 /* discarding any previous partial load */
656 if (!test_bit(FW_STATUS_DONE
, &fw_buf
->status
)) {
657 for (i
= 0; i
< fw_buf
->nr_pages
; i
++)
658 __free_page(fw_buf
->pages
[i
]);
659 vfree(fw_buf
->pages
);
660 fw_buf
->pages
= NULL
;
661 fw_buf
->page_array_size
= 0;
662 fw_buf
->nr_pages
= 0;
663 set_bit(FW_STATUS_LOADING
, &fw_buf
->status
);
667 if (test_bit(FW_STATUS_LOADING
, &fw_buf
->status
)) {
670 set_bit(FW_STATUS_DONE
, &fw_buf
->status
);
671 clear_bit(FW_STATUS_LOADING
, &fw_buf
->status
);
674 * Several loading requests may be pending on
675 * one same firmware buf, so let all requests
676 * see the mapped 'buf->data' once the loading
679 rc
= fw_map_pages_buf(fw_buf
);
681 dev_err(dev
, "%s: map pages failed\n",
684 rc
= security_kernel_post_read_file(NULL
,
685 fw_buf
->data
, fw_buf
->size
,
689 * Same logic as fw_load_abort, only the DONE bit
690 * is ignored and we set ABORT only on failure.
692 list_del_init(&fw_buf
->pending_list
);
694 set_bit(FW_STATUS_ABORT
, &fw_buf
->status
);
697 complete_all(&fw_buf
->completion
);
702 dev_err(dev
, "%s: unexpected value (%d)\n", __func__
, loading
);
705 fw_load_abort(fw_priv
);
709 mutex_unlock(&fw_lock
);
713 static DEVICE_ATTR(loading
, 0644, firmware_loading_show
, firmware_loading_store
);
715 static void firmware_rw_buf(struct firmware_buf
*buf
, char *buffer
,
716 loff_t offset
, size_t count
, bool read
)
719 memcpy(buffer
, buf
->data
+ offset
, count
);
721 memcpy(buf
->data
+ offset
, buffer
, count
);
724 static void firmware_rw(struct firmware_buf
*buf
, char *buffer
,
725 loff_t offset
, size_t count
, bool read
)
729 int page_nr
= offset
>> PAGE_SHIFT
;
730 int page_ofs
= offset
& (PAGE_SIZE
-1);
731 int page_cnt
= min_t(size_t, PAGE_SIZE
- page_ofs
, count
);
733 page_data
= kmap(buf
->pages
[page_nr
]);
736 memcpy(buffer
, page_data
+ page_ofs
, page_cnt
);
738 memcpy(page_data
+ page_ofs
, buffer
, page_cnt
);
740 kunmap(buf
->pages
[page_nr
]);
747 static ssize_t
firmware_data_read(struct file
*filp
, struct kobject
*kobj
,
748 struct bin_attribute
*bin_attr
,
749 char *buffer
, loff_t offset
, size_t count
)
751 struct device
*dev
= kobj_to_dev(kobj
);
752 struct firmware_priv
*fw_priv
= to_firmware_priv(dev
);
753 struct firmware_buf
*buf
;
756 mutex_lock(&fw_lock
);
758 if (!buf
|| test_bit(FW_STATUS_DONE
, &buf
->status
)) {
762 if (offset
> buf
->size
) {
766 if (count
> buf
->size
- offset
)
767 count
= buf
->size
- offset
;
772 firmware_rw_buf(buf
, buffer
, offset
, count
, true);
774 firmware_rw(buf
, buffer
, offset
, count
, true);
777 mutex_unlock(&fw_lock
);
781 static int fw_realloc_buffer(struct firmware_priv
*fw_priv
, int min_size
)
783 struct firmware_buf
*buf
= fw_priv
->buf
;
784 int pages_needed
= PAGE_ALIGN(min_size
) >> PAGE_SHIFT
;
786 /* If the array of pages is too small, grow it... */
787 if (buf
->page_array_size
< pages_needed
) {
788 int new_array_size
= max(pages_needed
,
789 buf
->page_array_size
* 2);
790 struct page
**new_pages
;
792 new_pages
= vmalloc(new_array_size
* sizeof(void *));
794 fw_load_abort(fw_priv
);
797 memcpy(new_pages
, buf
->pages
,
798 buf
->page_array_size
* sizeof(void *));
799 memset(&new_pages
[buf
->page_array_size
], 0, sizeof(void *) *
800 (new_array_size
- buf
->page_array_size
));
802 buf
->pages
= new_pages
;
803 buf
->page_array_size
= new_array_size
;
806 while (buf
->nr_pages
< pages_needed
) {
807 buf
->pages
[buf
->nr_pages
] =
808 alloc_page(GFP_KERNEL
| __GFP_HIGHMEM
);
810 if (!buf
->pages
[buf
->nr_pages
]) {
811 fw_load_abort(fw_priv
);
820 * firmware_data_write - write method for firmware
821 * @filp: open sysfs file
822 * @kobj: kobject for the device
823 * @bin_attr: bin_attr structure
824 * @buffer: buffer being written
825 * @offset: buffer offset for write in total data store area
826 * @count: buffer size
828 * Data written to the 'data' attribute will be later handed to
829 * the driver as a firmware image.
831 static ssize_t
firmware_data_write(struct file
*filp
, struct kobject
*kobj
,
832 struct bin_attribute
*bin_attr
,
833 char *buffer
, loff_t offset
, size_t count
)
835 struct device
*dev
= kobj_to_dev(kobj
);
836 struct firmware_priv
*fw_priv
= to_firmware_priv(dev
);
837 struct firmware_buf
*buf
;
840 if (!capable(CAP_SYS_RAWIO
))
843 mutex_lock(&fw_lock
);
845 if (!buf
|| test_bit(FW_STATUS_DONE
, &buf
->status
)) {
851 if (offset
+ count
> buf
->allocated_size
) {
855 firmware_rw_buf(buf
, buffer
, offset
, count
, false);
858 retval
= fw_realloc_buffer(fw_priv
, offset
+ count
);
863 firmware_rw(buf
, buffer
, offset
, count
, false);
866 buf
->size
= max_t(size_t, offset
+ count
, buf
->size
);
868 mutex_unlock(&fw_lock
);
872 static struct bin_attribute firmware_attr_data
= {
873 .attr
= { .name
= "data", .mode
= 0644 },
875 .read
= firmware_data_read
,
876 .write
= firmware_data_write
,
879 static struct attribute
*fw_dev_attrs
[] = {
880 &dev_attr_loading
.attr
,
884 static struct bin_attribute
*fw_dev_bin_attrs
[] = {
889 static const struct attribute_group fw_dev_attr_group
= {
890 .attrs
= fw_dev_attrs
,
891 .bin_attrs
= fw_dev_bin_attrs
,
894 static const struct attribute_group
*fw_dev_attr_groups
[] = {
899 static struct firmware_priv
*
900 fw_create_instance(struct firmware
*firmware
, const char *fw_name
,
901 struct device
*device
, unsigned int opt_flags
)
903 struct firmware_priv
*fw_priv
;
904 struct device
*f_dev
;
906 fw_priv
= kzalloc(sizeof(*fw_priv
), GFP_KERNEL
);
908 fw_priv
= ERR_PTR(-ENOMEM
);
912 fw_priv
->nowait
= !!(opt_flags
& FW_OPT_NOWAIT
);
913 fw_priv
->fw
= firmware
;
914 f_dev
= &fw_priv
->dev
;
916 device_initialize(f_dev
);
917 dev_set_name(f_dev
, "%s", fw_name
);
918 f_dev
->parent
= device
;
919 f_dev
->class = &firmware_class
;
920 f_dev
->groups
= fw_dev_attr_groups
;
925 /* load a firmware via user helper */
926 static int _request_firmware_load(struct firmware_priv
*fw_priv
,
927 unsigned int opt_flags
, long timeout
)
930 struct device
*f_dev
= &fw_priv
->dev
;
931 struct firmware_buf
*buf
= fw_priv
->buf
;
933 /* fall back on userspace loading */
935 buf
->is_paged_buf
= true;
937 dev_set_uevent_suppress(f_dev
, true);
939 retval
= device_add(f_dev
);
941 dev_err(f_dev
, "%s: device_register failed\n", __func__
);
945 mutex_lock(&fw_lock
);
946 list_add(&buf
->pending_list
, &pending_fw_head
);
947 mutex_unlock(&fw_lock
);
949 if (opt_flags
& FW_OPT_UEVENT
) {
950 buf
->need_uevent
= true;
951 dev_set_uevent_suppress(f_dev
, false);
952 dev_dbg(f_dev
, "firmware: requesting %s\n", buf
->fw_id
);
953 kobject_uevent(&fw_priv
->dev
.kobj
, KOBJ_ADD
);
955 timeout
= MAX_JIFFY_OFFSET
;
958 timeout
= wait_for_completion_interruptible_timeout(&buf
->completion
,
960 if (timeout
== -ERESTARTSYS
|| !timeout
) {
962 mutex_lock(&fw_lock
);
963 fw_load_abort(fw_priv
);
964 mutex_unlock(&fw_lock
);
965 } else if (timeout
> 0) {
969 if (is_fw_load_aborted(buf
))
971 else if (buf
->is_paged_buf
&& !buf
->data
)
980 static int fw_load_from_user_helper(struct firmware
*firmware
,
981 const char *name
, struct device
*device
,
982 unsigned int opt_flags
, long timeout
)
984 struct firmware_priv
*fw_priv
;
986 fw_priv
= fw_create_instance(firmware
, name
, device
, opt_flags
);
988 return PTR_ERR(fw_priv
);
990 fw_priv
->buf
= firmware
->priv
;
991 return _request_firmware_load(fw_priv
, opt_flags
, timeout
);
994 #ifdef CONFIG_PM_SLEEP
995 /* kill pending requests without uevent to avoid blocking suspend */
996 static void kill_requests_without_uevent(void)
998 struct firmware_buf
*buf
;
999 struct firmware_buf
*next
;
1001 mutex_lock(&fw_lock
);
1002 list_for_each_entry_safe(buf
, next
, &pending_fw_head
, pending_list
) {
1003 if (!buf
->need_uevent
)
1004 __fw_load_abort(buf
);
1006 mutex_unlock(&fw_lock
);
1010 #else /* CONFIG_FW_LOADER_USER_HELPER */
1012 fw_load_from_user_helper(struct firmware
*firmware
, const char *name
,
1013 struct device
*device
, unsigned int opt_flags
,
1019 /* No abort during direct loading */
1020 #define is_fw_load_aborted(buf) false
1022 #ifdef CONFIG_PM_SLEEP
1023 static inline void kill_requests_without_uevent(void) { }
1026 #endif /* CONFIG_FW_LOADER_USER_HELPER */
1029 /* wait until the shared firmware_buf becomes ready (or error) */
1030 static int sync_cached_firmware_buf(struct firmware_buf
*buf
)
1034 mutex_lock(&fw_lock
);
1035 while (!test_bit(FW_STATUS_DONE
, &buf
->status
)) {
1036 if (is_fw_load_aborted(buf
)) {
1040 mutex_unlock(&fw_lock
);
1041 ret
= wait_for_completion_interruptible(&buf
->completion
);
1042 mutex_lock(&fw_lock
);
1044 mutex_unlock(&fw_lock
);
1048 /* prepare firmware and firmware_buf structs;
1049 * return 0 if a firmware is already assigned, 1 if need to load one,
1050 * or a negative error code
1053 _request_firmware_prepare(struct firmware
**firmware_p
, const char *name
,
1054 struct device
*device
, void *dbuf
, size_t size
)
1056 struct firmware
*firmware
;
1057 struct firmware_buf
*buf
;
1060 *firmware_p
= firmware
= kzalloc(sizeof(*firmware
), GFP_KERNEL
);
1062 dev_err(device
, "%s: kmalloc(struct firmware) failed\n",
1067 if (fw_get_builtin_firmware(firmware
, name
, dbuf
, size
)) {
1068 dev_dbg(device
, "using built-in %s\n", name
);
1069 return 0; /* assigned */
1072 ret
= fw_lookup_and_allocate_buf(name
, &fw_cache
, &buf
, dbuf
, size
);
1075 * bind with 'buf' now to avoid warning in failure path
1076 * of requesting firmware.
1078 firmware
->priv
= buf
;
1081 ret
= sync_cached_firmware_buf(buf
);
1083 fw_set_page_data(buf
, firmware
);
1084 return 0; /* assigned */
1090 return 1; /* need to load */
1093 static int assign_firmware_buf(struct firmware
*fw
, struct device
*device
,
1094 unsigned int opt_flags
)
1096 struct firmware_buf
*buf
= fw
->priv
;
1098 mutex_lock(&fw_lock
);
1099 if (!buf
->size
|| is_fw_load_aborted(buf
)) {
1100 mutex_unlock(&fw_lock
);
1105 * add firmware name into devres list so that we can auto cache
1106 * and uncache firmware for device.
1108 * device may has been deleted already, but the problem
1109 * should be fixed in devres or driver core.
1111 /* don't cache firmware handled without uevent */
1112 if (device
&& (opt_flags
& FW_OPT_UEVENT
) &&
1113 !(opt_flags
& FW_OPT_NOCACHE
))
1114 fw_add_devm_name(device
, buf
->fw_id
);
1117 * After caching firmware image is started, let it piggyback
1118 * on request firmware.
1120 if (!(opt_flags
& FW_OPT_NOCACHE
) &&
1121 buf
->fwc
->state
== FW_LOADER_START_CACHE
) {
1122 if (fw_cache_piggyback_on_request(buf
->fw_id
))
1123 kref_get(&buf
->ref
);
1126 /* pass the pages buffer to driver at the last minute */
1127 fw_set_page_data(buf
, fw
);
1128 mutex_unlock(&fw_lock
);
1132 /* called from request_firmware() and request_firmware_work_func() */
1134 _request_firmware(const struct firmware
**firmware_p
, const char *name
,
1135 struct device
*device
, void *buf
, size_t size
,
1136 unsigned int opt_flags
)
1138 struct firmware
*fw
= NULL
;
1145 if (!name
|| name
[0] == '\0') {
1150 ret
= _request_firmware_prepare(&fw
, name
, device
, buf
, size
);
1151 if (ret
<= 0) /* error or already assigned */
1155 timeout
= firmware_loading_timeout();
1156 if (opt_flags
& FW_OPT_NOWAIT
) {
1157 timeout
= usermodehelper_read_lock_wait(timeout
);
1159 dev_dbg(device
, "firmware: %s loading timed out\n",
1165 ret
= usermodehelper_read_trylock();
1167 dev_err(device
, "firmware: %s will not be loaded\n",
1173 ret
= fw_get_filesystem_firmware(device
, fw
->priv
);
1175 if (!(opt_flags
& FW_OPT_NO_WARN
))
1177 "Direct firmware load for %s failed with error %d\n",
1179 if (opt_flags
& FW_OPT_USERHELPER
) {
1180 dev_warn(device
, "Falling back to user helper\n");
1181 ret
= fw_load_from_user_helper(fw
, name
, device
,
1182 opt_flags
, timeout
);
1187 ret
= assign_firmware_buf(fw
, device
, opt_flags
);
1189 usermodehelper_read_unlock();
1193 release_firmware(fw
);
1202 * request_firmware: - send firmware request and wait for it
1203 * @firmware_p: pointer to firmware image
1204 * @name: name of firmware file
1205 * @device: device for which firmware is being loaded
1207 * @firmware_p will be used to return a firmware image by the name
1208 * of @name for device @device.
1210 * Should be called from user context where sleeping is allowed.
1212 * @name will be used as $FIRMWARE in the uevent environment and
1213 * should be distinctive enough not to be confused with any other
1214 * firmware image for this or any other device.
1216 * Caller must hold the reference count of @device.
1218 * The function can be called safely inside device's suspend and
1222 request_firmware(const struct firmware
**firmware_p
, const char *name
,
1223 struct device
*device
)
1227 /* Need to pin this module until return */
1228 __module_get(THIS_MODULE
);
1229 ret
= _request_firmware(firmware_p
, name
, device
, NULL
, 0,
1230 FW_OPT_UEVENT
| FW_OPT_FALLBACK
);
1231 module_put(THIS_MODULE
);
1234 EXPORT_SYMBOL(request_firmware
);
1237 * request_firmware_direct: - load firmware directly without usermode helper
1238 * @firmware_p: pointer to firmware image
1239 * @name: name of firmware file
1240 * @device: device for which firmware is being loaded
1242 * This function works pretty much like request_firmware(), but this doesn't
1243 * fall back to usermode helper even if the firmware couldn't be loaded
1244 * directly from fs. Hence it's useful for loading optional firmwares, which
1245 * aren't always present, without extra long timeouts of udev.
1247 int request_firmware_direct(const struct firmware
**firmware_p
,
1248 const char *name
, struct device
*device
)
1252 __module_get(THIS_MODULE
);
1253 ret
= _request_firmware(firmware_p
, name
, device
, NULL
, 0,
1254 FW_OPT_UEVENT
| FW_OPT_NO_WARN
);
1255 module_put(THIS_MODULE
);
1258 EXPORT_SYMBOL_GPL(request_firmware_direct
);
1261 * request_firmware_into_buf - load firmware into a previously allocated buffer
1262 * @firmware_p: pointer to firmware image
1263 * @name: name of firmware file
1264 * @device: device for which firmware is being loaded and DMA region allocated
1265 * @buf: address of buffer to load firmware into
1266 * @size: size of buffer
1268 * This function works pretty much like request_firmware(), but it doesn't
1269 * allocate a buffer to hold the firmware data. Instead, the firmware
1270 * is loaded directly into the buffer pointed to by @buf and the @firmware_p
1271 * data member is pointed at @buf.
1273 * This function doesn't cache firmware either.
1276 request_firmware_into_buf(const struct firmware
**firmware_p
, const char *name
,
1277 struct device
*device
, void *buf
, size_t size
)
1281 __module_get(THIS_MODULE
);
1282 ret
= _request_firmware(firmware_p
, name
, device
, buf
, size
,
1283 FW_OPT_UEVENT
| FW_OPT_FALLBACK
|
1285 module_put(THIS_MODULE
);
1288 EXPORT_SYMBOL(request_firmware_into_buf
);
1291 * release_firmware: - release the resource associated with a firmware image
1292 * @fw: firmware resource to release
1294 void release_firmware(const struct firmware
*fw
)
1297 if (!fw_is_builtin_firmware(fw
))
1298 firmware_free_data(fw
);
1302 EXPORT_SYMBOL(release_firmware
);
1305 struct firmware_work
{
1306 struct work_struct work
;
1307 struct module
*module
;
1309 struct device
*device
;
1311 void (*cont
)(const struct firmware
*fw
, void *context
);
1312 unsigned int opt_flags
;
1315 static void request_firmware_work_func(struct work_struct
*work
)
1317 struct firmware_work
*fw_work
;
1318 const struct firmware
*fw
;
1320 fw_work
= container_of(work
, struct firmware_work
, work
);
1322 _request_firmware(&fw
, fw_work
->name
, fw_work
->device
, NULL
, 0,
1323 fw_work
->opt_flags
);
1324 fw_work
->cont(fw
, fw_work
->context
);
1325 put_device(fw_work
->device
); /* taken in request_firmware_nowait() */
1327 module_put(fw_work
->module
);
1328 kfree_const(fw_work
->name
);
1333 * request_firmware_nowait - asynchronous version of request_firmware
1334 * @module: module requesting the firmware
1335 * @uevent: sends uevent to copy the firmware image if this flag
1336 * is non-zero else the firmware copy must be done manually.
1337 * @name: name of firmware file
1338 * @device: device for which firmware is being loaded
1339 * @gfp: allocation flags
1340 * @context: will be passed over to @cont, and
1341 * @fw may be %NULL if firmware request fails.
1342 * @cont: function will be called asynchronously when the firmware
1345 * Caller must hold the reference count of @device.
1347 * Asynchronous variant of request_firmware() for user contexts:
1348 * - sleep for as small periods as possible since it may
1349 * increase kernel boot time of built-in device drivers
1350 * requesting firmware in their ->probe() methods, if
1351 * @gfp is GFP_KERNEL.
1353 * - can't sleep at all if @gfp is GFP_ATOMIC.
1356 request_firmware_nowait(
1357 struct module
*module
, bool uevent
,
1358 const char *name
, struct device
*device
, gfp_t gfp
, void *context
,
1359 void (*cont
)(const struct firmware
*fw
, void *context
))
1361 struct firmware_work
*fw_work
;
1363 fw_work
= kzalloc(sizeof(struct firmware_work
), gfp
);
1367 fw_work
->module
= module
;
1368 fw_work
->name
= kstrdup_const(name
, gfp
);
1369 if (!fw_work
->name
) {
1373 fw_work
->device
= device
;
1374 fw_work
->context
= context
;
1375 fw_work
->cont
= cont
;
1376 fw_work
->opt_flags
= FW_OPT_NOWAIT
| FW_OPT_FALLBACK
|
1377 (uevent
? FW_OPT_UEVENT
: FW_OPT_USERHELPER
);
1379 if (!try_module_get(module
)) {
1380 kfree_const(fw_work
->name
);
1385 get_device(fw_work
->device
);
1386 INIT_WORK(&fw_work
->work
, request_firmware_work_func
);
1387 schedule_work(&fw_work
->work
);
1390 EXPORT_SYMBOL(request_firmware_nowait
);
1392 #ifdef CONFIG_PM_SLEEP
1393 static ASYNC_DOMAIN_EXCLUSIVE(fw_cache_domain
);
1396 * cache_firmware - cache one firmware image in kernel memory space
1397 * @fw_name: the firmware image name
1399 * Cache firmware in kernel memory so that drivers can use it when
1400 * system isn't ready for them to request firmware image from userspace.
1401 * Once it returns successfully, driver can use request_firmware or its
1402 * nowait version to get the cached firmware without any interacting
1405 * Return 0 if the firmware image has been cached successfully
1406 * Return !0 otherwise
1409 static int cache_firmware(const char *fw_name
)
1412 const struct firmware
*fw
;
1414 pr_debug("%s: %s\n", __func__
, fw_name
);
1416 ret
= request_firmware(&fw
, fw_name
, NULL
);
1420 pr_debug("%s: %s ret=%d\n", __func__
, fw_name
, ret
);
1425 static struct firmware_buf
*fw_lookup_buf(const char *fw_name
)
1427 struct firmware_buf
*tmp
;
1428 struct firmware_cache
*fwc
= &fw_cache
;
1430 spin_lock(&fwc
->lock
);
1431 tmp
= __fw_lookup_buf(fw_name
);
1432 spin_unlock(&fwc
->lock
);
1438 * uncache_firmware - remove one cached firmware image
1439 * @fw_name: the firmware image name
1441 * Uncache one firmware image which has been cached successfully
1444 * Return 0 if the firmware cache has been removed successfully
1445 * Return !0 otherwise
1448 static int uncache_firmware(const char *fw_name
)
1450 struct firmware_buf
*buf
;
1453 pr_debug("%s: %s\n", __func__
, fw_name
);
1455 if (fw_get_builtin_firmware(&fw
, fw_name
, NULL
, 0))
1458 buf
= fw_lookup_buf(fw_name
);
1467 static struct fw_cache_entry
*alloc_fw_cache_entry(const char *name
)
1469 struct fw_cache_entry
*fce
;
1471 fce
= kzalloc(sizeof(*fce
), GFP_ATOMIC
);
1475 fce
->name
= kstrdup_const(name
, GFP_ATOMIC
);
1485 static int __fw_entry_found(const char *name
)
1487 struct firmware_cache
*fwc
= &fw_cache
;
1488 struct fw_cache_entry
*fce
;
1490 list_for_each_entry(fce
, &fwc
->fw_names
, list
) {
1491 if (!strcmp(fce
->name
, name
))
1497 static int fw_cache_piggyback_on_request(const char *name
)
1499 struct firmware_cache
*fwc
= &fw_cache
;
1500 struct fw_cache_entry
*fce
;
1503 spin_lock(&fwc
->name_lock
);
1504 if (__fw_entry_found(name
))
1507 fce
= alloc_fw_cache_entry(name
);
1510 list_add(&fce
->list
, &fwc
->fw_names
);
1511 pr_debug("%s: fw: %s\n", __func__
, name
);
1514 spin_unlock(&fwc
->name_lock
);
1518 static void free_fw_cache_entry(struct fw_cache_entry
*fce
)
1520 kfree_const(fce
->name
);
1524 static void __async_dev_cache_fw_image(void *fw_entry
,
1525 async_cookie_t cookie
)
1527 struct fw_cache_entry
*fce
= fw_entry
;
1528 struct firmware_cache
*fwc
= &fw_cache
;
1531 ret
= cache_firmware(fce
->name
);
1533 spin_lock(&fwc
->name_lock
);
1534 list_del(&fce
->list
);
1535 spin_unlock(&fwc
->name_lock
);
1537 free_fw_cache_entry(fce
);
1541 /* called with dev->devres_lock held */
1542 static void dev_create_fw_entry(struct device
*dev
, void *res
,
1545 struct fw_name_devm
*fwn
= res
;
1546 const char *fw_name
= fwn
->name
;
1547 struct list_head
*head
= data
;
1548 struct fw_cache_entry
*fce
;
1550 fce
= alloc_fw_cache_entry(fw_name
);
1552 list_add(&fce
->list
, head
);
1555 static int devm_name_match(struct device
*dev
, void *res
,
1558 struct fw_name_devm
*fwn
= res
;
1559 return (fwn
->magic
== (unsigned long)match_data
);
1562 static void dev_cache_fw_image(struct device
*dev
, void *data
)
1565 struct fw_cache_entry
*fce
;
1566 struct fw_cache_entry
*fce_next
;
1567 struct firmware_cache
*fwc
= &fw_cache
;
1569 devres_for_each_res(dev
, fw_name_devm_release
,
1570 devm_name_match
, &fw_cache
,
1571 dev_create_fw_entry
, &todo
);
1573 list_for_each_entry_safe(fce
, fce_next
, &todo
, list
) {
1574 list_del(&fce
->list
);
1576 spin_lock(&fwc
->name_lock
);
1577 /* only one cache entry for one firmware */
1578 if (!__fw_entry_found(fce
->name
)) {
1579 list_add(&fce
->list
, &fwc
->fw_names
);
1581 free_fw_cache_entry(fce
);
1584 spin_unlock(&fwc
->name_lock
);
1587 async_schedule_domain(__async_dev_cache_fw_image
,
1593 static void __device_uncache_fw_images(void)
1595 struct firmware_cache
*fwc
= &fw_cache
;
1596 struct fw_cache_entry
*fce
;
1598 spin_lock(&fwc
->name_lock
);
1599 while (!list_empty(&fwc
->fw_names
)) {
1600 fce
= list_entry(fwc
->fw_names
.next
,
1601 struct fw_cache_entry
, list
);
1602 list_del(&fce
->list
);
1603 spin_unlock(&fwc
->name_lock
);
1605 uncache_firmware(fce
->name
);
1606 free_fw_cache_entry(fce
);
1608 spin_lock(&fwc
->name_lock
);
1610 spin_unlock(&fwc
->name_lock
);
1614 * device_cache_fw_images - cache devices' firmware
1616 * If one device called request_firmware or its nowait version
1617 * successfully before, the firmware names are recored into the
1618 * device's devres link list, so device_cache_fw_images can call
1619 * cache_firmware() to cache these firmwares for the device,
1620 * then the device driver can load its firmwares easily at
1621 * time when system is not ready to complete loading firmware.
1623 static void device_cache_fw_images(void)
1625 struct firmware_cache
*fwc
= &fw_cache
;
1629 pr_debug("%s\n", __func__
);
1631 /* cancel uncache work */
1632 cancel_delayed_work_sync(&fwc
->work
);
1635 * use small loading timeout for caching devices' firmware
1636 * because all these firmware images have been loaded
1637 * successfully at lease once, also system is ready for
1638 * completing firmware loading now. The maximum size of
1639 * firmware in current distributions is about 2M bytes,
1640 * so 10 secs should be enough.
1642 old_timeout
= loading_timeout
;
1643 loading_timeout
= 10;
1645 mutex_lock(&fw_lock
);
1646 fwc
->state
= FW_LOADER_START_CACHE
;
1647 dpm_for_each_dev(NULL
, dev_cache_fw_image
);
1648 mutex_unlock(&fw_lock
);
1650 /* wait for completion of caching firmware for all devices */
1651 async_synchronize_full_domain(&fw_cache_domain
);
1653 loading_timeout
= old_timeout
;
1657 * device_uncache_fw_images - uncache devices' firmware
1659 * uncache all firmwares which have been cached successfully
1660 * by device_uncache_fw_images earlier
1662 static void device_uncache_fw_images(void)
1664 pr_debug("%s\n", __func__
);
1665 __device_uncache_fw_images();
1668 static void device_uncache_fw_images_work(struct work_struct
*work
)
1670 device_uncache_fw_images();
1674 * device_uncache_fw_images_delay - uncache devices firmwares
1675 * @delay: number of milliseconds to delay uncache device firmwares
1677 * uncache all devices's firmwares which has been cached successfully
1678 * by device_cache_fw_images after @delay milliseconds.
1680 static void device_uncache_fw_images_delay(unsigned long delay
)
1682 queue_delayed_work(system_power_efficient_wq
, &fw_cache
.work
,
1683 msecs_to_jiffies(delay
));
1686 static int fw_pm_notify(struct notifier_block
*notify_block
,
1687 unsigned long mode
, void *unused
)
1690 case PM_HIBERNATION_PREPARE
:
1691 case PM_SUSPEND_PREPARE
:
1692 case PM_RESTORE_PREPARE
:
1693 kill_requests_without_uevent();
1694 device_cache_fw_images();
1697 case PM_POST_SUSPEND
:
1698 case PM_POST_HIBERNATION
:
1699 case PM_POST_RESTORE
:
1701 * In case that system sleep failed and syscore_suspend is
1704 mutex_lock(&fw_lock
);
1705 fw_cache
.state
= FW_LOADER_NO_CACHE
;
1706 mutex_unlock(&fw_lock
);
1708 device_uncache_fw_images_delay(10 * MSEC_PER_SEC
);
1715 /* stop caching firmware once syscore_suspend is reached */
1716 static int fw_suspend(void)
1718 fw_cache
.state
= FW_LOADER_NO_CACHE
;
1722 static struct syscore_ops fw_syscore_ops
= {
1723 .suspend
= fw_suspend
,
1726 static int fw_cache_piggyback_on_request(const char *name
)
1732 static void __init
fw_cache_init(void)
1734 spin_lock_init(&fw_cache
.lock
);
1735 INIT_LIST_HEAD(&fw_cache
.head
);
1736 fw_cache
.state
= FW_LOADER_NO_CACHE
;
1738 #ifdef CONFIG_PM_SLEEP
1739 spin_lock_init(&fw_cache
.name_lock
);
1740 INIT_LIST_HEAD(&fw_cache
.fw_names
);
1742 INIT_DELAYED_WORK(&fw_cache
.work
,
1743 device_uncache_fw_images_work
);
1745 fw_cache
.pm_notify
.notifier_call
= fw_pm_notify
;
1746 register_pm_notifier(&fw_cache
.pm_notify
);
1748 register_syscore_ops(&fw_syscore_ops
);
1752 static int __init
firmware_class_init(void)
1755 #ifdef CONFIG_FW_LOADER_USER_HELPER
1756 register_reboot_notifier(&fw_shutdown_nb
);
1757 return class_register(&firmware_class
);
1763 static void __exit
firmware_class_exit(void)
1765 #ifdef CONFIG_PM_SLEEP
1766 unregister_syscore_ops(&fw_syscore_ops
);
1767 unregister_pm_notifier(&fw_cache
.pm_notify
);
1769 #ifdef CONFIG_FW_LOADER_USER_HELPER
1770 unregister_reboot_notifier(&fw_shutdown_nb
);
1771 class_unregister(&firmware_class
);
1775 fs_initcall(firmware_class_init
);
1776 module_exit(firmware_class_exit
);