1 // SPDX-License-Identifier: GPL-2.0
3 #include <linux/types.h>
4 #include <linux/kconfig.h>
5 #include <linux/list.h>
6 #include <linux/slab.h>
7 #include <linux/security.h>
8 #include <linux/highmem.h>
10 #include <linux/sysctl.h>
11 #include <linux/vmalloc.h>
12 #include <linux/module.h>
18 * firmware fallback mechanism
21 MODULE_IMPORT_NS(FIRMWARE_LOADER_PRIVATE
);
23 extern struct firmware_fallback_config fw_fallback_config
;
25 /* These getters are vetted to use int properly */
26 static inline int __firmware_loading_timeout(void)
28 return fw_fallback_config
.loading_timeout
;
31 /* These setters are vetted to use int properly */
32 static void __fw_fallback_set_timeout(int timeout
)
34 fw_fallback_config
.loading_timeout
= timeout
;
38 * use small loading timeout for caching devices' firmware because all these
39 * firmware images have been loaded successfully at lease once, also system is
40 * ready for completing firmware loading now. The maximum size of firmware in
41 * current distributions is about 2M bytes, so 10 secs should be enough.
43 void fw_fallback_set_cache_timeout(void)
45 fw_fallback_config
.old_timeout
= __firmware_loading_timeout();
46 __fw_fallback_set_timeout(10);
49 /* Restores the timeout to the value last configured during normal operation */
50 void fw_fallback_set_default_timeout(void)
52 __fw_fallback_set_timeout(fw_fallback_config
.old_timeout
);
55 static long firmware_loading_timeout(void)
57 return __firmware_loading_timeout() > 0 ?
58 __firmware_loading_timeout() * HZ
: MAX_JIFFY_OFFSET
;
61 static inline bool fw_sysfs_done(struct fw_priv
*fw_priv
)
63 return __fw_state_check(fw_priv
, FW_STATUS_DONE
);
66 static inline bool fw_sysfs_loading(struct fw_priv
*fw_priv
)
68 return __fw_state_check(fw_priv
, FW_STATUS_LOADING
);
71 static inline int fw_sysfs_wait_timeout(struct fw_priv
*fw_priv
, long timeout
)
73 return __fw_state_wait_common(fw_priv
, timeout
);
79 struct fw_priv
*fw_priv
;
83 static struct fw_sysfs
*to_fw_sysfs(struct device
*dev
)
85 return container_of(dev
, struct fw_sysfs
, dev
);
88 static void __fw_load_abort(struct fw_priv
*fw_priv
)
91 * There is a small window in which user can write to 'loading'
92 * between loading done and disappearance of 'loading'
94 if (fw_sysfs_done(fw_priv
))
97 list_del_init(&fw_priv
->pending_list
);
98 fw_state_aborted(fw_priv
);
101 static void fw_load_abort(struct fw_sysfs
*fw_sysfs
)
103 struct fw_priv
*fw_priv
= fw_sysfs
->fw_priv
;
105 __fw_load_abort(fw_priv
);
108 static LIST_HEAD(pending_fw_head
);
110 void kill_pending_fw_fallback_reqs(bool only_kill_custom
)
112 struct fw_priv
*fw_priv
;
113 struct fw_priv
*next
;
115 mutex_lock(&fw_lock
);
116 list_for_each_entry_safe(fw_priv
, next
, &pending_fw_head
,
118 if (!fw_priv
->need_uevent
|| !only_kill_custom
)
119 __fw_load_abort(fw_priv
);
121 mutex_unlock(&fw_lock
);
124 static ssize_t
timeout_show(struct class *class, struct class_attribute
*attr
,
127 return sysfs_emit(buf
, "%d\n", __firmware_loading_timeout());
131 * timeout_store() - set number of seconds to wait for firmware
132 * @class: device class pointer
133 * @attr: device attribute pointer
134 * @buf: buffer to scan for timeout value
135 * @count: number of bytes in @buf
137 * Sets the number of seconds to wait for the firmware. Once
138 * this expires an error will be returned to the driver and no
139 * firmware will be provided.
141 * Note: zero means 'wait forever'.
143 static ssize_t
timeout_store(struct class *class, struct class_attribute
*attr
,
144 const char *buf
, size_t count
)
146 int tmp_loading_timeout
= simple_strtol(buf
, NULL
, 10);
148 if (tmp_loading_timeout
< 0)
149 tmp_loading_timeout
= 0;
151 __fw_fallback_set_timeout(tmp_loading_timeout
);
155 static CLASS_ATTR_RW(timeout
);
157 static struct attribute
*firmware_class_attrs
[] = {
158 &class_attr_timeout
.attr
,
161 ATTRIBUTE_GROUPS(firmware_class
);
163 static void fw_dev_release(struct device
*dev
)
165 struct fw_sysfs
*fw_sysfs
= to_fw_sysfs(dev
);
170 static int do_firmware_uevent(struct fw_sysfs
*fw_sysfs
, struct kobj_uevent_env
*env
)
172 if (add_uevent_var(env
, "FIRMWARE=%s", fw_sysfs
->fw_priv
->fw_name
))
174 if (add_uevent_var(env
, "TIMEOUT=%i", __firmware_loading_timeout()))
176 if (add_uevent_var(env
, "ASYNC=%d", fw_sysfs
->nowait
))
182 static int firmware_uevent(struct device
*dev
, struct kobj_uevent_env
*env
)
184 struct fw_sysfs
*fw_sysfs
= to_fw_sysfs(dev
);
187 mutex_lock(&fw_lock
);
188 if (fw_sysfs
->fw_priv
)
189 err
= do_firmware_uevent(fw_sysfs
, env
);
190 mutex_unlock(&fw_lock
);
194 static struct class firmware_class
= {
196 .class_groups
= firmware_class_groups
,
197 .dev_uevent
= firmware_uevent
,
198 .dev_release
= fw_dev_release
,
201 int register_sysfs_loader(void)
203 return class_register(&firmware_class
);
206 void unregister_sysfs_loader(void)
208 class_unregister(&firmware_class
);
211 static ssize_t
firmware_loading_show(struct device
*dev
,
212 struct device_attribute
*attr
, char *buf
)
214 struct fw_sysfs
*fw_sysfs
= to_fw_sysfs(dev
);
217 mutex_lock(&fw_lock
);
218 if (fw_sysfs
->fw_priv
)
219 loading
= fw_sysfs_loading(fw_sysfs
->fw_priv
);
220 mutex_unlock(&fw_lock
);
222 return sysfs_emit(buf
, "%d\n", loading
);
226 * firmware_loading_store() - set value in the 'loading' control file
227 * @dev: device pointer
228 * @attr: device attribute pointer
229 * @buf: buffer to scan for loading control value
230 * @count: number of bytes in @buf
232 * The relevant values are:
234 * 1: Start a load, discarding any previous partial load.
235 * 0: Conclude the load and hand the data to the driver code.
236 * -1: Conclude the load with an error and discard any written data.
238 static ssize_t
firmware_loading_store(struct device
*dev
,
239 struct device_attribute
*attr
,
240 const char *buf
, size_t count
)
242 struct fw_sysfs
*fw_sysfs
= to_fw_sysfs(dev
);
243 struct fw_priv
*fw_priv
;
244 ssize_t written
= count
;
245 int loading
= simple_strtol(buf
, NULL
, 10);
247 mutex_lock(&fw_lock
);
248 fw_priv
= fw_sysfs
->fw_priv
;
249 if (fw_state_is_aborted(fw_priv
))
254 /* discarding any previous partial load */
255 if (!fw_sysfs_done(fw_priv
)) {
256 fw_free_paged_buf(fw_priv
);
257 fw_state_start(fw_priv
);
261 if (fw_sysfs_loading(fw_priv
)) {
265 * Several loading requests may be pending on
266 * one same firmware buf, so let all requests
267 * see the mapped 'buf->data' once the loading
270 rc
= fw_map_paged_buf(fw_priv
);
272 dev_err(dev
, "%s: map pages failed\n",
275 rc
= security_kernel_post_load_data(fw_priv
->data
,
277 LOADING_FIRMWARE
, "blob");
280 * Same logic as fw_load_abort, only the DONE bit
281 * is ignored and we set ABORT only on failure.
283 list_del_init(&fw_priv
->pending_list
);
285 fw_state_aborted(fw_priv
);
288 fw_state_done(fw_priv
);
294 dev_err(dev
, "%s: unexpected value (%d)\n", __func__
, loading
);
297 fw_load_abort(fw_sysfs
);
301 mutex_unlock(&fw_lock
);
305 static DEVICE_ATTR(loading
, 0644, firmware_loading_show
, firmware_loading_store
);
307 static void firmware_rw_data(struct fw_priv
*fw_priv
, char *buffer
,
308 loff_t offset
, size_t count
, bool read
)
311 memcpy(buffer
, fw_priv
->data
+ offset
, count
);
313 memcpy(fw_priv
->data
+ offset
, buffer
, count
);
316 static void firmware_rw(struct fw_priv
*fw_priv
, char *buffer
,
317 loff_t offset
, size_t count
, bool read
)
321 int page_nr
= offset
>> PAGE_SHIFT
;
322 int page_ofs
= offset
& (PAGE_SIZE
-1);
323 int page_cnt
= min_t(size_t, PAGE_SIZE
- page_ofs
, count
);
325 page_data
= kmap(fw_priv
->pages
[page_nr
]);
328 memcpy(buffer
, page_data
+ page_ofs
, page_cnt
);
330 memcpy(page_data
+ page_ofs
, buffer
, page_cnt
);
332 kunmap(fw_priv
->pages
[page_nr
]);
339 static ssize_t
firmware_data_read(struct file
*filp
, struct kobject
*kobj
,
340 struct bin_attribute
*bin_attr
,
341 char *buffer
, loff_t offset
, size_t count
)
343 struct device
*dev
= kobj_to_dev(kobj
);
344 struct fw_sysfs
*fw_sysfs
= to_fw_sysfs(dev
);
345 struct fw_priv
*fw_priv
;
348 mutex_lock(&fw_lock
);
349 fw_priv
= fw_sysfs
->fw_priv
;
350 if (!fw_priv
|| fw_sysfs_done(fw_priv
)) {
354 if (offset
> fw_priv
->size
) {
358 if (count
> fw_priv
->size
- offset
)
359 count
= fw_priv
->size
- offset
;
364 firmware_rw_data(fw_priv
, buffer
, offset
, count
, true);
366 firmware_rw(fw_priv
, buffer
, offset
, count
, true);
369 mutex_unlock(&fw_lock
);
373 static int fw_realloc_pages(struct fw_sysfs
*fw_sysfs
, int min_size
)
377 err
= fw_grow_paged_buf(fw_sysfs
->fw_priv
,
378 PAGE_ALIGN(min_size
) >> PAGE_SHIFT
);
380 fw_load_abort(fw_sysfs
);
385 * firmware_data_write() - write method for firmware
386 * @filp: open sysfs file
387 * @kobj: kobject for the device
388 * @bin_attr: bin_attr structure
389 * @buffer: buffer being written
390 * @offset: buffer offset for write in total data store area
391 * @count: buffer size
393 * Data written to the 'data' attribute will be later handed to
394 * the driver as a firmware image.
396 static ssize_t
firmware_data_write(struct file
*filp
, struct kobject
*kobj
,
397 struct bin_attribute
*bin_attr
,
398 char *buffer
, loff_t offset
, size_t count
)
400 struct device
*dev
= kobj_to_dev(kobj
);
401 struct fw_sysfs
*fw_sysfs
= to_fw_sysfs(dev
);
402 struct fw_priv
*fw_priv
;
405 if (!capable(CAP_SYS_RAWIO
))
408 mutex_lock(&fw_lock
);
409 fw_priv
= fw_sysfs
->fw_priv
;
410 if (!fw_priv
|| fw_sysfs_done(fw_priv
)) {
416 if (offset
+ count
> fw_priv
->allocated_size
) {
420 firmware_rw_data(fw_priv
, buffer
, offset
, count
, false);
423 retval
= fw_realloc_pages(fw_sysfs
, offset
+ count
);
428 firmware_rw(fw_priv
, buffer
, offset
, count
, false);
431 fw_priv
->size
= max_t(size_t, offset
+ count
, fw_priv
->size
);
433 mutex_unlock(&fw_lock
);
437 static struct bin_attribute firmware_attr_data
= {
438 .attr
= { .name
= "data", .mode
= 0644 },
440 .read
= firmware_data_read
,
441 .write
= firmware_data_write
,
444 static struct attribute
*fw_dev_attrs
[] = {
445 &dev_attr_loading
.attr
,
449 static struct bin_attribute
*fw_dev_bin_attrs
[] = {
454 static const struct attribute_group fw_dev_attr_group
= {
455 .attrs
= fw_dev_attrs
,
456 .bin_attrs
= fw_dev_bin_attrs
,
459 static const struct attribute_group
*fw_dev_attr_groups
[] = {
464 static struct fw_sysfs
*
465 fw_create_instance(struct firmware
*firmware
, const char *fw_name
,
466 struct device
*device
, u32 opt_flags
)
468 struct fw_sysfs
*fw_sysfs
;
469 struct device
*f_dev
;
471 fw_sysfs
= kzalloc(sizeof(*fw_sysfs
), GFP_KERNEL
);
473 fw_sysfs
= ERR_PTR(-ENOMEM
);
477 fw_sysfs
->nowait
= !!(opt_flags
& FW_OPT_NOWAIT
);
478 fw_sysfs
->fw
= firmware
;
479 f_dev
= &fw_sysfs
->dev
;
481 device_initialize(f_dev
);
482 dev_set_name(f_dev
, "%s", fw_name
);
483 f_dev
->parent
= device
;
484 f_dev
->class = &firmware_class
;
485 f_dev
->groups
= fw_dev_attr_groups
;
491 * fw_load_sysfs_fallback() - load a firmware via the sysfs fallback mechanism
492 * @fw_sysfs: firmware sysfs information for the firmware to load
493 * @timeout: timeout to wait for the load
495 * In charge of constructing a sysfs fallback interface for firmware loading.
497 static int fw_load_sysfs_fallback(struct fw_sysfs
*fw_sysfs
, long timeout
)
500 struct device
*f_dev
= &fw_sysfs
->dev
;
501 struct fw_priv
*fw_priv
= fw_sysfs
->fw_priv
;
503 /* fall back on userspace loading */
505 fw_priv
->is_paged_buf
= true;
507 dev_set_uevent_suppress(f_dev
, true);
509 retval
= device_add(f_dev
);
511 dev_err(f_dev
, "%s: device_register failed\n", __func__
);
515 mutex_lock(&fw_lock
);
516 list_add(&fw_priv
->pending_list
, &pending_fw_head
);
517 mutex_unlock(&fw_lock
);
519 if (fw_priv
->opt_flags
& FW_OPT_UEVENT
) {
520 fw_priv
->need_uevent
= true;
521 dev_set_uevent_suppress(f_dev
, false);
522 dev_dbg(f_dev
, "firmware: requesting %s\n", fw_priv
->fw_name
);
523 kobject_uevent(&fw_sysfs
->dev
.kobj
, KOBJ_ADD
);
525 timeout
= MAX_JIFFY_OFFSET
;
528 retval
= fw_sysfs_wait_timeout(fw_priv
, timeout
);
529 if (retval
< 0 && retval
!= -ENOENT
) {
530 mutex_lock(&fw_lock
);
531 fw_load_abort(fw_sysfs
);
532 mutex_unlock(&fw_lock
);
535 if (fw_state_is_aborted(fw_priv
)) {
536 if (retval
== -ERESTARTSYS
)
540 } else if (fw_priv
->is_paged_buf
&& !fw_priv
->data
)
549 static int fw_load_from_user_helper(struct firmware
*firmware
,
550 const char *name
, struct device
*device
,
553 struct fw_sysfs
*fw_sysfs
;
557 timeout
= firmware_loading_timeout();
558 if (opt_flags
& FW_OPT_NOWAIT
) {
559 timeout
= usermodehelper_read_lock_wait(timeout
);
561 dev_dbg(device
, "firmware: %s loading timed out\n",
566 ret
= usermodehelper_read_trylock();
568 dev_err(device
, "firmware: %s will not be loaded\n",
574 fw_sysfs
= fw_create_instance(firmware
, name
, device
, opt_flags
);
575 if (IS_ERR(fw_sysfs
)) {
576 ret
= PTR_ERR(fw_sysfs
);
580 fw_sysfs
->fw_priv
= firmware
->priv
;
581 ret
= fw_load_sysfs_fallback(fw_sysfs
, timeout
);
584 ret
= assign_fw(firmware
, device
);
587 usermodehelper_read_unlock();
592 static bool fw_force_sysfs_fallback(u32 opt_flags
)
594 if (fw_fallback_config
.force_sysfs_fallback
)
596 if (!(opt_flags
& FW_OPT_USERHELPER
))
601 static bool fw_run_sysfs_fallback(u32 opt_flags
)
605 if (fw_fallback_config
.ignore_sysfs_fallback
) {
606 pr_info_once("Ignoring firmware sysfs fallback due to sysctl knob\n");
610 if ((opt_flags
& FW_OPT_NOFALLBACK_SYSFS
))
613 /* Also permit LSMs and IMA to fail firmware sysfs fallback */
614 ret
= security_kernel_load_data(LOADING_FIRMWARE
, true);
618 return fw_force_sysfs_fallback(opt_flags
);
622 * firmware_fallback_sysfs() - use the fallback mechanism to find firmware
623 * @fw: pointer to firmware image
624 * @name: name of firmware file to look for
625 * @device: device for which firmware is being loaded
626 * @opt_flags: options to control firmware loading behaviour, as defined by
628 * @ret: return value from direct lookup which triggered the fallback mechanism
630 * This function is called if direct lookup for the firmware failed, it enables
631 * a fallback mechanism through userspace by exposing a sysfs loading
632 * interface. Userspace is in charge of loading the firmware through the sysfs
633 * loading interface. This sysfs fallback mechanism may be disabled completely
634 * on a system by setting the proc sysctl value ignore_sysfs_fallback to true.
635 * If this is false we check if the internal API caller set the
636 * @FW_OPT_NOFALLBACK_SYSFS flag, if so it would also disable the fallback
637 * mechanism. A system may want to enforce the sysfs fallback mechanism at all
638 * times, it can do this by setting ignore_sysfs_fallback to false and
639 * force_sysfs_fallback to true.
640 * Enabling force_sysfs_fallback is functionally equivalent to build a kernel
641 * with CONFIG_FW_LOADER_USER_HELPER_FALLBACK.
643 int firmware_fallback_sysfs(struct firmware
*fw
, const char *name
,
644 struct device
*device
,
648 if (!fw_run_sysfs_fallback(opt_flags
))
651 if (!(opt_flags
& FW_OPT_NO_WARN
))
652 dev_warn(device
, "Falling back to sysfs fallback for: %s\n",
655 dev_dbg(device
, "Falling back to sysfs fallback for: %s\n",
657 return fw_load_from_user_helper(fw
, name
, device
, opt_flags
);