2 * firmware_class.c - Multi purpose firmware loading support
4 * Copyright (c) 2003 Manuel Estrada Sainz
6 * Please see Documentation/firmware_class/ for more information.
10 #include <linux/capability.h>
11 #include <linux/device.h>
12 #include <linux/module.h>
13 #include <linux/init.h>
14 #include <linux/timer.h>
15 #include <linux/vmalloc.h>
16 #include <linux/interrupt.h>
17 #include <linux/bitops.h>
18 #include <linux/mutex.h>
19 #include <linux/workqueue.h>
20 #include <linux/highmem.h>
21 #include <linux/firmware.h>
22 #include <linux/slab.h>
23 #include <linux/sched.h>
24 #include <linux/file.h>
25 #include <linux/list.h>
26 #include <linux/async.h>
28 #include <linux/suspend.h>
29 #include <linux/syscore_ops.h>
30 #include <linux/reboot.h>
32 #include <generated/utsrelease.h>
36 MODULE_AUTHOR("Manuel Estrada Sainz");
37 MODULE_DESCRIPTION("Multi purpose firmware loading support");
38 MODULE_LICENSE("GPL");
40 /* Builtin firmware support */
42 #ifdef CONFIG_FW_LOADER
44 extern struct builtin_fw __start_builtin_fw
[];
45 extern struct builtin_fw __end_builtin_fw
[];
47 static bool fw_get_builtin_firmware(struct firmware
*fw
, const char *name
)
49 struct builtin_fw
*b_fw
;
51 for (b_fw
= __start_builtin_fw
; b_fw
!= __end_builtin_fw
; b_fw
++) {
52 if (strcmp(name
, b_fw
->name
) == 0) {
53 fw
->size
= b_fw
->size
;
54 fw
->data
= b_fw
->data
;
62 static bool fw_is_builtin_firmware(const struct firmware
*fw
)
64 struct builtin_fw
*b_fw
;
66 for (b_fw
= __start_builtin_fw
; b_fw
!= __end_builtin_fw
; b_fw
++)
67 if (fw
->data
== b_fw
->data
)
73 #else /* Module case - no builtin firmware support */
75 static inline bool fw_get_builtin_firmware(struct firmware
*fw
, const char *name
)
80 static inline bool fw_is_builtin_firmware(const struct firmware
*fw
)
92 static int loading_timeout
= 60; /* In seconds */
94 static inline long firmware_loading_timeout(void)
96 return loading_timeout
> 0 ? loading_timeout
* HZ
: MAX_SCHEDULE_TIMEOUT
;
99 struct firmware_cache
{
100 /* firmware_buf instance will be added into the below list */
102 struct list_head head
;
105 #ifdef CONFIG_PM_SLEEP
107 * Names of firmware images which have been cached successfully
108 * will be added into the below list so that device uncache
109 * helper can trace which firmware images have been cached
112 spinlock_t name_lock
;
113 struct list_head fw_names
;
115 struct delayed_work work
;
117 struct notifier_block pm_notify
;
121 struct firmware_buf
{
123 struct list_head list
;
124 struct completion completion
;
125 struct firmware_cache
*fwc
;
126 unsigned long status
;
129 #ifdef CONFIG_FW_LOADER_USER_HELPER
135 struct list_head pending_list
;
140 struct fw_cache_entry
{
141 struct list_head list
;
145 struct fw_name_devm
{
150 #define to_fwbuf(d) container_of(d, struct firmware_buf, ref)
152 #define FW_LOADER_NO_CACHE 0
153 #define FW_LOADER_START_CACHE 1
155 static int fw_cache_piggyback_on_request(const char *name
);
157 /* fw_lock could be moved to 'struct firmware_priv' but since it is just
158 * guarding for corner cases a global lock should be OK */
159 static DEFINE_MUTEX(fw_lock
);
161 static struct firmware_cache fw_cache
;
163 static struct firmware_buf
*__allocate_fw_buf(const char *fw_name
,
164 struct firmware_cache
*fwc
)
166 struct firmware_buf
*buf
;
168 buf
= kzalloc(sizeof(*buf
) + strlen(fw_name
) + 1 , GFP_ATOMIC
);
173 kref_init(&buf
->ref
);
174 strcpy(buf
->fw_id
, fw_name
);
176 init_completion(&buf
->completion
);
177 #ifdef CONFIG_FW_LOADER_USER_HELPER
178 INIT_LIST_HEAD(&buf
->pending_list
);
181 pr_debug("%s: fw-%s buf=%p\n", __func__
, fw_name
, buf
);
186 static struct firmware_buf
*__fw_lookup_buf(const char *fw_name
)
188 struct firmware_buf
*tmp
;
189 struct firmware_cache
*fwc
= &fw_cache
;
191 list_for_each_entry(tmp
, &fwc
->head
, list
)
192 if (!strcmp(tmp
->fw_id
, fw_name
))
197 static int fw_lookup_and_allocate_buf(const char *fw_name
,
198 struct firmware_cache
*fwc
,
199 struct firmware_buf
**buf
)
201 struct firmware_buf
*tmp
;
203 spin_lock(&fwc
->lock
);
204 tmp
= __fw_lookup_buf(fw_name
);
207 spin_unlock(&fwc
->lock
);
211 tmp
= __allocate_fw_buf(fw_name
, fwc
);
213 list_add(&tmp
->list
, &fwc
->head
);
214 spin_unlock(&fwc
->lock
);
218 return tmp
? 0 : -ENOMEM
;
221 static void __fw_free_buf(struct kref
*ref
)
223 struct firmware_buf
*buf
= to_fwbuf(ref
);
224 struct firmware_cache
*fwc
= buf
->fwc
;
226 pr_debug("%s: fw-%s buf=%p data=%p size=%u\n",
227 __func__
, buf
->fw_id
, buf
, buf
->data
,
228 (unsigned int)buf
->size
);
230 list_del(&buf
->list
);
231 spin_unlock(&fwc
->lock
);
233 #ifdef CONFIG_FW_LOADER_USER_HELPER
234 if (buf
->is_paged_buf
) {
237 for (i
= 0; i
< buf
->nr_pages
; i
++)
238 __free_page(buf
->pages
[i
]);
246 static void fw_free_buf(struct firmware_buf
*buf
)
248 struct firmware_cache
*fwc
= buf
->fwc
;
249 spin_lock(&fwc
->lock
);
250 if (!kref_put(&buf
->ref
, __fw_free_buf
))
251 spin_unlock(&fwc
->lock
);
254 /* direct firmware loading support */
255 static char fw_path_para
[256];
256 static const char * const fw_path
[] = {
258 "/lib/firmware/updates/" UTS_RELEASE
,
259 "/lib/firmware/updates",
260 "/lib/firmware/" UTS_RELEASE
,
265 * Typical usage is that passing 'firmware_class.path=$CUSTOMIZED_PATH'
266 * from kernel command line because firmware_class is generally built in
267 * kernel instead of module.
269 module_param_string(path
, fw_path_para
, sizeof(fw_path_para
), 0644);
270 MODULE_PARM_DESC(path
, "customized firmware image search path with a higher priority than default path");
272 /* Don't inline this: 'struct kstat' is biggish */
273 static noinline_for_stack
long fw_file_size(struct file
*file
)
276 if (vfs_getattr(&file
->f_path
, &st
))
278 if (!S_ISREG(st
.mode
))
280 if (st
.size
!= (long)st
.size
)
285 static bool fw_read_file_contents(struct file
*file
, struct firmware_buf
*fw_buf
)
290 size
= fw_file_size(file
);
296 if (kernel_read(file
, 0, buf
, size
) != size
) {
305 static bool fw_get_filesystem_firmware(struct device
*device
,
306 struct firmware_buf
*buf
)
309 bool success
= false;
310 char *path
= __getname();
312 for (i
= 0; i
< ARRAY_SIZE(fw_path
); i
++) {
315 /* skip the unset customized path */
319 snprintf(path
, PATH_MAX
, "%s/%s", fw_path
[i
], buf
->fw_id
);
321 file
= filp_open(path
, O_RDONLY
, 0);
324 success
= fw_read_file_contents(file
, buf
);
332 dev_dbg(device
, "firmware: direct-loading firmware %s\n",
334 mutex_lock(&fw_lock
);
335 set_bit(FW_STATUS_DONE
, &buf
->status
);
336 complete_all(&buf
->completion
);
337 mutex_unlock(&fw_lock
);
343 /* firmware holds the ownership of pages */
344 static void firmware_free_data(const struct firmware
*fw
)
346 /* Loaded directly? */
351 fw_free_buf(fw
->priv
);
354 /* store the pages buffer info firmware from buf */
355 static void fw_set_page_data(struct firmware_buf
*buf
, struct firmware
*fw
)
358 #ifdef CONFIG_FW_LOADER_USER_HELPER
359 fw
->pages
= buf
->pages
;
361 fw
->size
= buf
->size
;
362 fw
->data
= buf
->data
;
364 pr_debug("%s: fw-%s buf=%p data=%p size=%u\n",
365 __func__
, buf
->fw_id
, buf
, buf
->data
,
366 (unsigned int)buf
->size
);
369 #ifdef CONFIG_PM_SLEEP
370 static void fw_name_devm_release(struct device
*dev
, void *res
)
372 struct fw_name_devm
*fwn
= res
;
374 if (fwn
->magic
== (unsigned long)&fw_cache
)
375 pr_debug("%s: fw_name-%s devm-%p released\n",
376 __func__
, fwn
->name
, res
);
379 static int fw_devm_match(struct device
*dev
, void *res
,
382 struct fw_name_devm
*fwn
= res
;
384 return (fwn
->magic
== (unsigned long)&fw_cache
) &&
385 !strcmp(fwn
->name
, match_data
);
388 static struct fw_name_devm
*fw_find_devm_name(struct device
*dev
,
391 struct fw_name_devm
*fwn
;
393 fwn
= devres_find(dev
, fw_name_devm_release
,
394 fw_devm_match
, (void *)name
);
398 /* add firmware name into devres list */
399 static int fw_add_devm_name(struct device
*dev
, const char *name
)
401 struct fw_name_devm
*fwn
;
403 fwn
= fw_find_devm_name(dev
, name
);
407 fwn
= devres_alloc(fw_name_devm_release
, sizeof(struct fw_name_devm
) +
408 strlen(name
) + 1, GFP_KERNEL
);
412 fwn
->magic
= (unsigned long)&fw_cache
;
413 strcpy(fwn
->name
, name
);
414 devres_add(dev
, fwn
);
419 static int fw_add_devm_name(struct device
*dev
, const char *name
)
427 * user-mode helper code
429 #ifdef CONFIG_FW_LOADER_USER_HELPER
430 struct firmware_priv
{
431 struct delayed_work timeout_work
;
434 struct firmware_buf
*buf
;
438 static struct firmware_priv
*to_firmware_priv(struct device
*dev
)
440 return container_of(dev
, struct firmware_priv
, dev
);
443 static void __fw_load_abort(struct firmware_buf
*buf
)
446 * There is a small window in which user can write to 'loading'
447 * between loading done and disappearance of 'loading'
449 if (test_bit(FW_STATUS_DONE
, &buf
->status
))
452 list_del_init(&buf
->pending_list
);
453 set_bit(FW_STATUS_ABORT
, &buf
->status
);
454 complete_all(&buf
->completion
);
457 static void fw_load_abort(struct firmware_priv
*fw_priv
)
459 struct firmware_buf
*buf
= fw_priv
->buf
;
461 __fw_load_abort(buf
);
463 /* avoid user action after loading abort */
467 #define is_fw_load_aborted(buf) \
468 test_bit(FW_STATUS_ABORT, &(buf)->status)
470 static LIST_HEAD(pending_fw_head
);
472 /* reboot notifier for avoid deadlock with usermode_lock */
473 static int fw_shutdown_notify(struct notifier_block
*unused1
,
474 unsigned long unused2
, void *unused3
)
476 mutex_lock(&fw_lock
);
477 while (!list_empty(&pending_fw_head
))
478 __fw_load_abort(list_first_entry(&pending_fw_head
,
481 mutex_unlock(&fw_lock
);
485 static struct notifier_block fw_shutdown_nb
= {
486 .notifier_call
= fw_shutdown_notify
,
489 static ssize_t
timeout_show(struct class *class, struct class_attribute
*attr
,
492 return sprintf(buf
, "%d\n", loading_timeout
);
496 * firmware_timeout_store - set number of seconds to wait for firmware
497 * @class: device class pointer
498 * @attr: device attribute pointer
499 * @buf: buffer to scan for timeout value
500 * @count: number of bytes in @buf
502 * Sets the number of seconds to wait for the firmware. Once
503 * this expires an error will be returned to the driver and no
504 * firmware will be provided.
506 * Note: zero means 'wait forever'.
508 static ssize_t
timeout_store(struct class *class, struct class_attribute
*attr
,
509 const char *buf
, size_t count
)
511 loading_timeout
= simple_strtol(buf
, NULL
, 10);
512 if (loading_timeout
< 0)
518 static struct class_attribute firmware_class_attrs
[] = {
523 static void fw_dev_release(struct device
*dev
)
525 struct firmware_priv
*fw_priv
= to_firmware_priv(dev
);
530 static int firmware_uevent(struct device
*dev
, struct kobj_uevent_env
*env
)
532 struct firmware_priv
*fw_priv
= to_firmware_priv(dev
);
534 if (add_uevent_var(env
, "FIRMWARE=%s", fw_priv
->buf
->fw_id
))
536 if (add_uevent_var(env
, "TIMEOUT=%i", loading_timeout
))
538 if (add_uevent_var(env
, "ASYNC=%d", fw_priv
->nowait
))
544 static struct class firmware_class
= {
546 .class_attrs
= firmware_class_attrs
,
547 .dev_uevent
= firmware_uevent
,
548 .dev_release
= fw_dev_release
,
551 static ssize_t
firmware_loading_show(struct device
*dev
,
552 struct device_attribute
*attr
, char *buf
)
554 struct firmware_priv
*fw_priv
= to_firmware_priv(dev
);
557 mutex_lock(&fw_lock
);
559 loading
= test_bit(FW_STATUS_LOADING
, &fw_priv
->buf
->status
);
560 mutex_unlock(&fw_lock
);
562 return sprintf(buf
, "%d\n", loading
);
565 /* Some architectures don't have PAGE_KERNEL_RO */
566 #ifndef PAGE_KERNEL_RO
567 #define PAGE_KERNEL_RO PAGE_KERNEL
570 /* one pages buffer should be mapped/unmapped only once */
571 static int fw_map_pages_buf(struct firmware_buf
*buf
)
573 if (!buf
->is_paged_buf
)
578 buf
->data
= vmap(buf
->pages
, buf
->nr_pages
, 0, PAGE_KERNEL_RO
);
585 * firmware_loading_store - set value in the 'loading' control file
586 * @dev: device pointer
587 * @attr: device attribute pointer
588 * @buf: buffer to scan for loading control value
589 * @count: number of bytes in @buf
591 * The relevant values are:
593 * 1: Start a load, discarding any previous partial load.
594 * 0: Conclude the load and hand the data to the driver code.
595 * -1: Conclude the load with an error and discard any written data.
597 static ssize_t
firmware_loading_store(struct device
*dev
,
598 struct device_attribute
*attr
,
599 const char *buf
, size_t count
)
601 struct firmware_priv
*fw_priv
= to_firmware_priv(dev
);
602 struct firmware_buf
*fw_buf
;
603 int loading
= simple_strtol(buf
, NULL
, 10);
606 mutex_lock(&fw_lock
);
607 fw_buf
= fw_priv
->buf
;
613 /* discarding any previous partial load */
614 if (!test_bit(FW_STATUS_DONE
, &fw_buf
->status
)) {
615 for (i
= 0; i
< fw_buf
->nr_pages
; i
++)
616 __free_page(fw_buf
->pages
[i
]);
617 kfree(fw_buf
->pages
);
618 fw_buf
->pages
= NULL
;
619 fw_buf
->page_array_size
= 0;
620 fw_buf
->nr_pages
= 0;
621 set_bit(FW_STATUS_LOADING
, &fw_buf
->status
);
625 if (test_bit(FW_STATUS_LOADING
, &fw_buf
->status
)) {
626 set_bit(FW_STATUS_DONE
, &fw_buf
->status
);
627 clear_bit(FW_STATUS_LOADING
, &fw_buf
->status
);
630 * Several loading requests may be pending on
631 * one same firmware buf, so let all requests
632 * see the mapped 'buf->data' once the loading
635 fw_map_pages_buf(fw_buf
);
636 list_del_init(&fw_buf
->pending_list
);
637 complete_all(&fw_buf
->completion
);
642 dev_err(dev
, "%s: unexpected value (%d)\n", __func__
, loading
);
645 fw_load_abort(fw_priv
);
649 mutex_unlock(&fw_lock
);
653 static DEVICE_ATTR(loading
, 0644, firmware_loading_show
, firmware_loading_store
);
655 static ssize_t
firmware_data_read(struct file
*filp
, struct kobject
*kobj
,
656 struct bin_attribute
*bin_attr
,
657 char *buffer
, loff_t offset
, size_t count
)
659 struct device
*dev
= kobj_to_dev(kobj
);
660 struct firmware_priv
*fw_priv
= to_firmware_priv(dev
);
661 struct firmware_buf
*buf
;
664 mutex_lock(&fw_lock
);
666 if (!buf
|| test_bit(FW_STATUS_DONE
, &buf
->status
)) {
670 if (offset
> buf
->size
) {
674 if (count
> buf
->size
- offset
)
675 count
= buf
->size
- offset
;
681 int page_nr
= offset
>> PAGE_SHIFT
;
682 int page_ofs
= offset
& (PAGE_SIZE
-1);
683 int page_cnt
= min_t(size_t, PAGE_SIZE
- page_ofs
, count
);
685 page_data
= kmap(buf
->pages
[page_nr
]);
687 memcpy(buffer
, page_data
+ page_ofs
, page_cnt
);
689 kunmap(buf
->pages
[page_nr
]);
695 mutex_unlock(&fw_lock
);
699 static int fw_realloc_buffer(struct firmware_priv
*fw_priv
, int min_size
)
701 struct firmware_buf
*buf
= fw_priv
->buf
;
702 int pages_needed
= ALIGN(min_size
, PAGE_SIZE
) >> PAGE_SHIFT
;
704 /* If the array of pages is too small, grow it... */
705 if (buf
->page_array_size
< pages_needed
) {
706 int new_array_size
= max(pages_needed
,
707 buf
->page_array_size
* 2);
708 struct page
**new_pages
;
710 new_pages
= kmalloc(new_array_size
* sizeof(void *),
713 fw_load_abort(fw_priv
);
716 memcpy(new_pages
, buf
->pages
,
717 buf
->page_array_size
* sizeof(void *));
718 memset(&new_pages
[buf
->page_array_size
], 0, sizeof(void *) *
719 (new_array_size
- buf
->page_array_size
));
721 buf
->pages
= new_pages
;
722 buf
->page_array_size
= new_array_size
;
725 while (buf
->nr_pages
< pages_needed
) {
726 buf
->pages
[buf
->nr_pages
] =
727 alloc_page(GFP_KERNEL
| __GFP_HIGHMEM
);
729 if (!buf
->pages
[buf
->nr_pages
]) {
730 fw_load_abort(fw_priv
);
739 * firmware_data_write - write method for firmware
740 * @filp: open sysfs file
741 * @kobj: kobject for the device
742 * @bin_attr: bin_attr structure
743 * @buffer: buffer being written
744 * @offset: buffer offset for write in total data store area
745 * @count: buffer size
747 * Data written to the 'data' attribute will be later handed to
748 * the driver as a firmware image.
750 static ssize_t
firmware_data_write(struct file
*filp
, struct kobject
*kobj
,
751 struct bin_attribute
*bin_attr
,
752 char *buffer
, loff_t offset
, size_t count
)
754 struct device
*dev
= kobj_to_dev(kobj
);
755 struct firmware_priv
*fw_priv
= to_firmware_priv(dev
);
756 struct firmware_buf
*buf
;
759 if (!capable(CAP_SYS_RAWIO
))
762 mutex_lock(&fw_lock
);
764 if (!buf
|| test_bit(FW_STATUS_DONE
, &buf
->status
)) {
769 retval
= fw_realloc_buffer(fw_priv
, offset
+ count
);
777 int page_nr
= offset
>> PAGE_SHIFT
;
778 int page_ofs
= offset
& (PAGE_SIZE
- 1);
779 int page_cnt
= min_t(size_t, PAGE_SIZE
- page_ofs
, count
);
781 page_data
= kmap(buf
->pages
[page_nr
]);
783 memcpy(page_data
+ page_ofs
, buffer
, page_cnt
);
785 kunmap(buf
->pages
[page_nr
]);
791 buf
->size
= max_t(size_t, offset
, buf
->size
);
793 mutex_unlock(&fw_lock
);
797 static struct bin_attribute firmware_attr_data
= {
798 .attr
= { .name
= "data", .mode
= 0644 },
800 .read
= firmware_data_read
,
801 .write
= firmware_data_write
,
804 static void firmware_class_timeout_work(struct work_struct
*work
)
806 struct firmware_priv
*fw_priv
= container_of(work
,
807 struct firmware_priv
, timeout_work
.work
);
809 mutex_lock(&fw_lock
);
810 fw_load_abort(fw_priv
);
811 mutex_unlock(&fw_lock
);
814 static struct firmware_priv
*
815 fw_create_instance(struct firmware
*firmware
, const char *fw_name
,
816 struct device
*device
, bool uevent
, bool nowait
)
818 struct firmware_priv
*fw_priv
;
819 struct device
*f_dev
;
821 fw_priv
= kzalloc(sizeof(*fw_priv
), GFP_KERNEL
);
823 dev_err(device
, "%s: kmalloc failed\n", __func__
);
824 fw_priv
= ERR_PTR(-ENOMEM
);
828 fw_priv
->nowait
= nowait
;
829 fw_priv
->fw
= firmware
;
830 INIT_DELAYED_WORK(&fw_priv
->timeout_work
,
831 firmware_class_timeout_work
);
833 f_dev
= &fw_priv
->dev
;
835 device_initialize(f_dev
);
836 dev_set_name(f_dev
, "%s", fw_name
);
837 f_dev
->parent
= device
;
838 f_dev
->class = &firmware_class
;
843 /* load a firmware via user helper */
844 static int _request_firmware_load(struct firmware_priv
*fw_priv
, bool uevent
,
848 struct device
*f_dev
= &fw_priv
->dev
;
849 struct firmware_buf
*buf
= fw_priv
->buf
;
851 /* fall back on userspace loading */
852 buf
->is_paged_buf
= true;
854 dev_set_uevent_suppress(f_dev
, true);
856 retval
= device_add(f_dev
);
858 dev_err(f_dev
, "%s: device_register failed\n", __func__
);
862 retval
= device_create_bin_file(f_dev
, &firmware_attr_data
);
864 dev_err(f_dev
, "%s: sysfs_create_bin_file failed\n", __func__
);
868 mutex_lock(&fw_lock
);
869 list_add(&buf
->pending_list
, &pending_fw_head
);
870 mutex_unlock(&fw_lock
);
872 retval
= device_create_file(f_dev
, &dev_attr_loading
);
874 mutex_lock(&fw_lock
);
875 list_del_init(&buf
->pending_list
);
876 mutex_unlock(&fw_lock
);
877 dev_err(f_dev
, "%s: device_create_file failed\n", __func__
);
878 goto err_del_bin_attr
;
882 buf
->need_uevent
= true;
883 dev_set_uevent_suppress(f_dev
, false);
884 dev_dbg(f_dev
, "firmware: requesting %s\n", buf
->fw_id
);
885 if (timeout
!= MAX_SCHEDULE_TIMEOUT
)
886 schedule_delayed_work(&fw_priv
->timeout_work
, timeout
);
888 kobject_uevent(&fw_priv
->dev
.kobj
, KOBJ_ADD
);
891 wait_for_completion(&buf
->completion
);
893 cancel_delayed_work_sync(&fw_priv
->timeout_work
);
895 device_remove_file(f_dev
, &dev_attr_loading
);
897 device_remove_bin_file(f_dev
, &firmware_attr_data
);
905 static int fw_load_from_user_helper(struct firmware
*firmware
,
906 const char *name
, struct device
*device
,
907 bool uevent
, bool nowait
, long timeout
)
909 struct firmware_priv
*fw_priv
;
911 fw_priv
= fw_create_instance(firmware
, name
, device
, uevent
, nowait
);
913 return PTR_ERR(fw_priv
);
915 fw_priv
->buf
= firmware
->priv
;
916 return _request_firmware_load(fw_priv
, uevent
, timeout
);
919 #ifdef CONFIG_PM_SLEEP
920 /* kill pending requests without uevent to avoid blocking suspend */
921 static void kill_requests_without_uevent(void)
923 struct firmware_buf
*buf
;
924 struct firmware_buf
*next
;
926 mutex_lock(&fw_lock
);
927 list_for_each_entry_safe(buf
, next
, &pending_fw_head
, pending_list
) {
928 if (!buf
->need_uevent
)
929 __fw_load_abort(buf
);
931 mutex_unlock(&fw_lock
);
935 #else /* CONFIG_FW_LOADER_USER_HELPER */
937 fw_load_from_user_helper(struct firmware
*firmware
, const char *name
,
938 struct device
*device
, bool uevent
, bool nowait
,
944 /* No abort during direct loading */
945 #define is_fw_load_aborted(buf) false
947 #ifdef CONFIG_PM_SLEEP
948 static inline void kill_requests_without_uevent(void) { }
951 #endif /* CONFIG_FW_LOADER_USER_HELPER */
954 /* wait until the shared firmware_buf becomes ready (or error) */
955 static int sync_cached_firmware_buf(struct firmware_buf
*buf
)
959 mutex_lock(&fw_lock
);
960 while (!test_bit(FW_STATUS_DONE
, &buf
->status
)) {
961 if (is_fw_load_aborted(buf
)) {
965 mutex_unlock(&fw_lock
);
966 wait_for_completion(&buf
->completion
);
967 mutex_lock(&fw_lock
);
969 mutex_unlock(&fw_lock
);
973 /* prepare firmware and firmware_buf structs;
974 * return 0 if a firmware is already assigned, 1 if need to load one,
975 * or a negative error code
978 _request_firmware_prepare(struct firmware
**firmware_p
, const char *name
,
979 struct device
*device
)
981 struct firmware
*firmware
;
982 struct firmware_buf
*buf
;
985 *firmware_p
= firmware
= kzalloc(sizeof(*firmware
), GFP_KERNEL
);
987 dev_err(device
, "%s: kmalloc(struct firmware) failed\n",
992 if (fw_get_builtin_firmware(firmware
, name
)) {
993 dev_dbg(device
, "firmware: using built-in firmware %s\n", name
);
994 return 0; /* assigned */
997 ret
= fw_lookup_and_allocate_buf(name
, &fw_cache
, &buf
);
1000 * bind with 'buf' now to avoid warning in failure path
1001 * of requesting firmware.
1003 firmware
->priv
= buf
;
1006 ret
= sync_cached_firmware_buf(buf
);
1008 fw_set_page_data(buf
, firmware
);
1009 return 0; /* assigned */
1015 return 1; /* need to load */
1018 static int assign_firmware_buf(struct firmware
*fw
, struct device
*device
,
1021 struct firmware_buf
*buf
= fw
->priv
;
1023 mutex_lock(&fw_lock
);
1024 if (!buf
->size
|| is_fw_load_aborted(buf
)) {
1025 mutex_unlock(&fw_lock
);
1030 * add firmware name into devres list so that we can auto cache
1031 * and uncache firmware for device.
1033 * device may has been deleted already, but the problem
1034 * should be fixed in devres or driver core.
1036 if (device
&& !skip_cache
)
1037 fw_add_devm_name(device
, buf
->fw_id
);
1040 * After caching firmware image is started, let it piggyback
1041 * on request firmware.
1043 if (buf
->fwc
->state
== FW_LOADER_START_CACHE
) {
1044 if (fw_cache_piggyback_on_request(buf
->fw_id
))
1045 kref_get(&buf
->ref
);
1048 /* pass the pages buffer to driver at the last minute */
1049 fw_set_page_data(buf
, fw
);
1050 mutex_unlock(&fw_lock
);
1054 /* called from request_firmware() and request_firmware_work_func() */
1056 _request_firmware(const struct firmware
**firmware_p
, const char *name
,
1057 struct device
*device
, bool uevent
, bool nowait
)
1059 struct firmware
*fw
;
1066 ret
= _request_firmware_prepare(&fw
, name
, device
);
1067 if (ret
<= 0) /* error or already assigned */
1071 timeout
= firmware_loading_timeout();
1073 timeout
= usermodehelper_read_lock_wait(timeout
);
1075 dev_dbg(device
, "firmware: %s loading timed out\n",
1081 ret
= usermodehelper_read_trylock();
1083 dev_err(device
, "firmware: %s will not be loaded\n",
1089 if (!fw_get_filesystem_firmware(device
, fw
->priv
))
1090 ret
= fw_load_from_user_helper(fw
, name
, device
,
1091 uevent
, nowait
, timeout
);
1093 /* don't cache firmware handled without uevent */
1095 ret
= assign_firmware_buf(fw
, device
, !uevent
);
1097 usermodehelper_read_unlock();
1101 release_firmware(fw
);
1110 * request_firmware: - send firmware request and wait for it
1111 * @firmware_p: pointer to firmware image
1112 * @name: name of firmware file
1113 * @device: device for which firmware is being loaded
1115 * @firmware_p will be used to return a firmware image by the name
1116 * of @name for device @device.
1118 * Should be called from user context where sleeping is allowed.
1120 * @name will be used as $FIRMWARE in the uevent environment and
1121 * should be distinctive enough not to be confused with any other
1122 * firmware image for this or any other device.
1124 * Caller must hold the reference count of @device.
1126 * The function can be called safely inside device's suspend and
1130 request_firmware(const struct firmware
**firmware_p
, const char *name
,
1131 struct device
*device
)
1135 /* Need to pin this module until return */
1136 __module_get(THIS_MODULE
);
1137 ret
= _request_firmware(firmware_p
, name
, device
, true, false);
1138 module_put(THIS_MODULE
);
1141 EXPORT_SYMBOL(request_firmware
);
1144 * release_firmware: - release the resource associated with a firmware image
1145 * @fw: firmware resource to release
1147 void release_firmware(const struct firmware
*fw
)
1150 if (!fw_is_builtin_firmware(fw
))
1151 firmware_free_data(fw
);
1155 EXPORT_SYMBOL(release_firmware
);
1158 struct firmware_work
{
1159 struct work_struct work
;
1160 struct module
*module
;
1162 struct device
*device
;
1164 void (*cont
)(const struct firmware
*fw
, void *context
);
1168 static void request_firmware_work_func(struct work_struct
*work
)
1170 struct firmware_work
*fw_work
;
1171 const struct firmware
*fw
;
1173 fw_work
= container_of(work
, struct firmware_work
, work
);
1175 _request_firmware(&fw
, fw_work
->name
, fw_work
->device
,
1176 fw_work
->uevent
, true);
1177 fw_work
->cont(fw
, fw_work
->context
);
1178 put_device(fw_work
->device
); /* taken in request_firmware_nowait() */
1180 module_put(fw_work
->module
);
1185 * request_firmware_nowait - asynchronous version of request_firmware
1186 * @module: module requesting the firmware
1187 * @uevent: sends uevent to copy the firmware image if this flag
1188 * is non-zero else the firmware copy must be done manually.
1189 * @name: name of firmware file
1190 * @device: device for which firmware is being loaded
1191 * @gfp: allocation flags
1192 * @context: will be passed over to @cont, and
1193 * @fw may be %NULL if firmware request fails.
1194 * @cont: function will be called asynchronously when the firmware
1197 * Caller must hold the reference count of @device.
1199 * Asynchronous variant of request_firmware() for user contexts:
1200 * - sleep for as small periods as possible since it may
1201 * increase kernel boot time of built-in device drivers
1202 * requesting firmware in their ->probe() methods, if
1203 * @gfp is GFP_KERNEL.
1205 * - can't sleep at all if @gfp is GFP_ATOMIC.
1208 request_firmware_nowait(
1209 struct module
*module
, bool uevent
,
1210 const char *name
, struct device
*device
, gfp_t gfp
, void *context
,
1211 void (*cont
)(const struct firmware
*fw
, void *context
))
1213 struct firmware_work
*fw_work
;
1215 fw_work
= kzalloc(sizeof (struct firmware_work
), gfp
);
1219 fw_work
->module
= module
;
1220 fw_work
->name
= name
;
1221 fw_work
->device
= device
;
1222 fw_work
->context
= context
;
1223 fw_work
->cont
= cont
;
1224 fw_work
->uevent
= uevent
;
1226 if (!try_module_get(module
)) {
1231 get_device(fw_work
->device
);
1232 INIT_WORK(&fw_work
->work
, request_firmware_work_func
);
1233 schedule_work(&fw_work
->work
);
1236 EXPORT_SYMBOL(request_firmware_nowait
);
1238 #ifdef CONFIG_PM_SLEEP
1239 static ASYNC_DOMAIN_EXCLUSIVE(fw_cache_domain
);
1242 * cache_firmware - cache one firmware image in kernel memory space
1243 * @fw_name: the firmware image name
1245 * Cache firmware in kernel memory so that drivers can use it when
1246 * system isn't ready for them to request firmware image from userspace.
1247 * Once it returns successfully, driver can use request_firmware or its
1248 * nowait version to get the cached firmware without any interacting
1251 * Return 0 if the firmware image has been cached successfully
1252 * Return !0 otherwise
1255 static int cache_firmware(const char *fw_name
)
1258 const struct firmware
*fw
;
1260 pr_debug("%s: %s\n", __func__
, fw_name
);
1262 ret
= request_firmware(&fw
, fw_name
, NULL
);
1266 pr_debug("%s: %s ret=%d\n", __func__
, fw_name
, ret
);
1271 static struct firmware_buf
*fw_lookup_buf(const char *fw_name
)
1273 struct firmware_buf
*tmp
;
1274 struct firmware_cache
*fwc
= &fw_cache
;
1276 spin_lock(&fwc
->lock
);
1277 tmp
= __fw_lookup_buf(fw_name
);
1278 spin_unlock(&fwc
->lock
);
1284 * uncache_firmware - remove one cached firmware image
1285 * @fw_name: the firmware image name
1287 * Uncache one firmware image which has been cached successfully
1290 * Return 0 if the firmware cache has been removed successfully
1291 * Return !0 otherwise
1294 static int uncache_firmware(const char *fw_name
)
1296 struct firmware_buf
*buf
;
1299 pr_debug("%s: %s\n", __func__
, fw_name
);
1301 if (fw_get_builtin_firmware(&fw
, fw_name
))
1304 buf
= fw_lookup_buf(fw_name
);
1313 static struct fw_cache_entry
*alloc_fw_cache_entry(const char *name
)
1315 struct fw_cache_entry
*fce
;
1317 fce
= kzalloc(sizeof(*fce
) + strlen(name
) + 1, GFP_ATOMIC
);
1321 strcpy(fce
->name
, name
);
1326 static int __fw_entry_found(const char *name
)
1328 struct firmware_cache
*fwc
= &fw_cache
;
1329 struct fw_cache_entry
*fce
;
1331 list_for_each_entry(fce
, &fwc
->fw_names
, list
) {
1332 if (!strcmp(fce
->name
, name
))
1338 static int fw_cache_piggyback_on_request(const char *name
)
1340 struct firmware_cache
*fwc
= &fw_cache
;
1341 struct fw_cache_entry
*fce
;
1344 spin_lock(&fwc
->name_lock
);
1345 if (__fw_entry_found(name
))
1348 fce
= alloc_fw_cache_entry(name
);
1351 list_add(&fce
->list
, &fwc
->fw_names
);
1352 pr_debug("%s: fw: %s\n", __func__
, name
);
1355 spin_unlock(&fwc
->name_lock
);
1359 static void free_fw_cache_entry(struct fw_cache_entry
*fce
)
1364 static void __async_dev_cache_fw_image(void *fw_entry
,
1365 async_cookie_t cookie
)
1367 struct fw_cache_entry
*fce
= fw_entry
;
1368 struct firmware_cache
*fwc
= &fw_cache
;
1371 ret
= cache_firmware(fce
->name
);
1373 spin_lock(&fwc
->name_lock
);
1374 list_del(&fce
->list
);
1375 spin_unlock(&fwc
->name_lock
);
1377 free_fw_cache_entry(fce
);
1381 /* called with dev->devres_lock held */
1382 static void dev_create_fw_entry(struct device
*dev
, void *res
,
1385 struct fw_name_devm
*fwn
= res
;
1386 const char *fw_name
= fwn
->name
;
1387 struct list_head
*head
= data
;
1388 struct fw_cache_entry
*fce
;
1390 fce
= alloc_fw_cache_entry(fw_name
);
1392 list_add(&fce
->list
, head
);
1395 static int devm_name_match(struct device
*dev
, void *res
,
1398 struct fw_name_devm
*fwn
= res
;
1399 return (fwn
->magic
== (unsigned long)match_data
);
1402 static void dev_cache_fw_image(struct device
*dev
, void *data
)
1405 struct fw_cache_entry
*fce
;
1406 struct fw_cache_entry
*fce_next
;
1407 struct firmware_cache
*fwc
= &fw_cache
;
1409 devres_for_each_res(dev
, fw_name_devm_release
,
1410 devm_name_match
, &fw_cache
,
1411 dev_create_fw_entry
, &todo
);
1413 list_for_each_entry_safe(fce
, fce_next
, &todo
, list
) {
1414 list_del(&fce
->list
);
1416 spin_lock(&fwc
->name_lock
);
1417 /* only one cache entry for one firmware */
1418 if (!__fw_entry_found(fce
->name
)) {
1419 list_add(&fce
->list
, &fwc
->fw_names
);
1421 free_fw_cache_entry(fce
);
1424 spin_unlock(&fwc
->name_lock
);
1427 async_schedule_domain(__async_dev_cache_fw_image
,
1433 static void __device_uncache_fw_images(void)
1435 struct firmware_cache
*fwc
= &fw_cache
;
1436 struct fw_cache_entry
*fce
;
1438 spin_lock(&fwc
->name_lock
);
1439 while (!list_empty(&fwc
->fw_names
)) {
1440 fce
= list_entry(fwc
->fw_names
.next
,
1441 struct fw_cache_entry
, list
);
1442 list_del(&fce
->list
);
1443 spin_unlock(&fwc
->name_lock
);
1445 uncache_firmware(fce
->name
);
1446 free_fw_cache_entry(fce
);
1448 spin_lock(&fwc
->name_lock
);
1450 spin_unlock(&fwc
->name_lock
);
1454 * device_cache_fw_images - cache devices' firmware
1456 * If one device called request_firmware or its nowait version
1457 * successfully before, the firmware names are recored into the
1458 * device's devres link list, so device_cache_fw_images can call
1459 * cache_firmware() to cache these firmwares for the device,
1460 * then the device driver can load its firmwares easily at
1461 * time when system is not ready to complete loading firmware.
1463 static void device_cache_fw_images(void)
1465 struct firmware_cache
*fwc
= &fw_cache
;
1469 pr_debug("%s\n", __func__
);
1471 /* cancel uncache work */
1472 cancel_delayed_work_sync(&fwc
->work
);
1475 * use small loading timeout for caching devices' firmware
1476 * because all these firmware images have been loaded
1477 * successfully at lease once, also system is ready for
1478 * completing firmware loading now. The maximum size of
1479 * firmware in current distributions is about 2M bytes,
1480 * so 10 secs should be enough.
1482 old_timeout
= loading_timeout
;
1483 loading_timeout
= 10;
1485 mutex_lock(&fw_lock
);
1486 fwc
->state
= FW_LOADER_START_CACHE
;
1487 dpm_for_each_dev(NULL
, dev_cache_fw_image
);
1488 mutex_unlock(&fw_lock
);
1490 /* wait for completion of caching firmware for all devices */
1491 async_synchronize_full_domain(&fw_cache_domain
);
1493 loading_timeout
= old_timeout
;
1497 * device_uncache_fw_images - uncache devices' firmware
1499 * uncache all firmwares which have been cached successfully
1500 * by device_uncache_fw_images earlier
1502 static void device_uncache_fw_images(void)
1504 pr_debug("%s\n", __func__
);
1505 __device_uncache_fw_images();
1508 static void device_uncache_fw_images_work(struct work_struct
*work
)
1510 device_uncache_fw_images();
1514 * device_uncache_fw_images_delay - uncache devices firmwares
1515 * @delay: number of milliseconds to delay uncache device firmwares
1517 * uncache all devices's firmwares which has been cached successfully
1518 * by device_cache_fw_images after @delay milliseconds.
1520 static void device_uncache_fw_images_delay(unsigned long delay
)
1522 schedule_delayed_work(&fw_cache
.work
,
1523 msecs_to_jiffies(delay
));
1526 static int fw_pm_notify(struct notifier_block
*notify_block
,
1527 unsigned long mode
, void *unused
)
1530 case PM_HIBERNATION_PREPARE
:
1531 case PM_SUSPEND_PREPARE
:
1532 case PM_RESTORE_PREPARE
:
1533 kill_requests_without_uevent();
1534 device_cache_fw_images();
1537 case PM_POST_SUSPEND
:
1538 case PM_POST_HIBERNATION
:
1539 case PM_POST_RESTORE
:
1541 * In case that system sleep failed and syscore_suspend is
1544 mutex_lock(&fw_lock
);
1545 fw_cache
.state
= FW_LOADER_NO_CACHE
;
1546 mutex_unlock(&fw_lock
);
1548 device_uncache_fw_images_delay(10 * MSEC_PER_SEC
);
1555 /* stop caching firmware once syscore_suspend is reached */
1556 static int fw_suspend(void)
1558 fw_cache
.state
= FW_LOADER_NO_CACHE
;
1562 static struct syscore_ops fw_syscore_ops
= {
1563 .suspend
= fw_suspend
,
1566 static int fw_cache_piggyback_on_request(const char *name
)
1572 static void __init
fw_cache_init(void)
1574 spin_lock_init(&fw_cache
.lock
);
1575 INIT_LIST_HEAD(&fw_cache
.head
);
1576 fw_cache
.state
= FW_LOADER_NO_CACHE
;
1578 #ifdef CONFIG_PM_SLEEP
1579 spin_lock_init(&fw_cache
.name_lock
);
1580 INIT_LIST_HEAD(&fw_cache
.fw_names
);
1582 INIT_DELAYED_WORK(&fw_cache
.work
,
1583 device_uncache_fw_images_work
);
1585 fw_cache
.pm_notify
.notifier_call
= fw_pm_notify
;
1586 register_pm_notifier(&fw_cache
.pm_notify
);
1588 register_syscore_ops(&fw_syscore_ops
);
1592 static int __init
firmware_class_init(void)
1595 #ifdef CONFIG_FW_LOADER_USER_HELPER
1596 register_reboot_notifier(&fw_shutdown_nb
);
1597 return class_register(&firmware_class
);
1603 static void __exit
firmware_class_exit(void)
1605 #ifdef CONFIG_PM_SLEEP
1606 unregister_syscore_ops(&fw_syscore_ops
);
1607 unregister_pm_notifier(&fw_cache
.pm_notify
);
1609 #ifdef CONFIG_FW_LOADER_USER_HELPER
1610 unregister_reboot_notifier(&fw_shutdown_nb
);
1611 class_unregister(&firmware_class
);
1615 fs_initcall(firmware_class_init
);
1616 module_exit(firmware_class_exit
);