1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2014 Red Hat, Inc.
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_trans_resv.h"
12 #include "xfs_sysfs.h"
14 #include "xfs_log_priv.h"
15 #include "xfs_mount.h"
17 struct xfs_sysfs_attr
{
18 struct attribute attr
;
19 ssize_t (*show
)(struct kobject
*kobject
, char *buf
);
20 ssize_t (*store
)(struct kobject
*kobject
, const char *buf
,
24 static inline struct xfs_sysfs_attr
*
25 to_attr(struct attribute
*attr
)
27 return container_of(attr
, struct xfs_sysfs_attr
, attr
);
30 #define XFS_SYSFS_ATTR_RW(name) \
31 static struct xfs_sysfs_attr xfs_sysfs_attr_##name = __ATTR_RW(name)
32 #define XFS_SYSFS_ATTR_RO(name) \
33 static struct xfs_sysfs_attr xfs_sysfs_attr_##name = __ATTR_RO(name)
34 #define XFS_SYSFS_ATTR_WO(name) \
35 static struct xfs_sysfs_attr xfs_sysfs_attr_##name = __ATTR_WO(name)
37 #define ATTR_LIST(name) &xfs_sysfs_attr_##name.attr
40 xfs_sysfs_object_show(
41 struct kobject
*kobject
,
42 struct attribute
*attr
,
45 struct xfs_sysfs_attr
*xfs_attr
= to_attr(attr
);
47 return xfs_attr
->show
? xfs_attr
->show(kobject
, buf
) : 0;
51 xfs_sysfs_object_store(
52 struct kobject
*kobject
,
53 struct attribute
*attr
,
57 struct xfs_sysfs_attr
*xfs_attr
= to_attr(attr
);
59 return xfs_attr
->store
? xfs_attr
->store(kobject
, buf
, count
) : 0;
62 static const struct sysfs_ops xfs_sysfs_ops
= {
63 .show
= xfs_sysfs_object_show
,
64 .store
= xfs_sysfs_object_store
,
67 static struct attribute
*xfs_mp_attrs
[] = {
70 ATTRIBUTE_GROUPS(xfs_mp
);
72 const struct kobj_type xfs_mp_ktype
= {
73 .release
= xfs_sysfs_release
,
74 .sysfs_ops
= &xfs_sysfs_ops
,
75 .default_groups
= xfs_mp_groups
,
83 struct kobject
*kobject
,
90 ret
= kstrtoint(buf
, 0, &val
);
95 xfs_globals
.bug_on_assert
= true;
97 xfs_globals
.bug_on_assert
= false;
106 struct kobject
*kobject
,
109 return sysfs_emit(buf
, "%d\n", xfs_globals
.bug_on_assert
);
111 XFS_SYSFS_ATTR_RW(bug_on_assert
);
114 log_recovery_delay_store(
115 struct kobject
*kobject
,
122 ret
= kstrtoint(buf
, 0, &val
);
126 if (val
< 0 || val
> 60)
129 xfs_globals
.log_recovery_delay
= val
;
135 log_recovery_delay_show(
136 struct kobject
*kobject
,
139 return sysfs_emit(buf
, "%d\n", xfs_globals
.log_recovery_delay
);
141 XFS_SYSFS_ATTR_RW(log_recovery_delay
);
145 struct kobject
*kobject
,
152 ret
= kstrtoint(buf
, 0, &val
);
156 if (val
< 0 || val
> 60)
159 xfs_globals
.mount_delay
= val
;
166 struct kobject
*kobject
,
169 return sysfs_emit(buf
, "%d\n", xfs_globals
.mount_delay
);
171 XFS_SYSFS_ATTR_RW(mount_delay
);
175 struct kobject
*kobject
,
181 ret
= kstrtobool(buf
, &xfs_globals
.always_cow
);
189 struct kobject
*kobject
,
192 return sysfs_emit(buf
, "%d\n", xfs_globals
.always_cow
);
194 XFS_SYSFS_ATTR_RW(always_cow
);
197 * Override how many threads the parallel work queue is allowed to create.
198 * This has to be a debug-only global (instead of an errortag) because one of
199 * the main users of parallel workqueues is mount time quotacheck.
203 struct kobject
*kobject
,
210 ret
= kstrtoint(buf
, 0, &val
);
214 if (val
< -1 || val
> num_possible_cpus())
217 xfs_globals
.pwork_threads
= val
;
224 struct kobject
*kobject
,
227 return sysfs_emit(buf
, "%d\n", xfs_globals
.pwork_threads
);
229 XFS_SYSFS_ATTR_RW(pwork_threads
);
232 * The "LARP" (Logged extended Attribute Recovery Persistence) debugging knob
233 * sets the XFS_DA_OP_LOGGED flag on all xfs_attr_set operations performed on
234 * V5 filesystems. As a result, the intermediate progress of all setxattr and
235 * removexattr operations are tracked via the log and can be restarted during
236 * recovery. This is useful for testing xattr recovery prior to merging of the
237 * parent pointer feature which requires it to maintain consistency, and may be
238 * enabled for userspace xattrs in the future.
242 struct kobject
*kobject
,
248 ret
= kstrtobool(buf
, &xfs_globals
.larp
);
256 struct kobject
*kobject
,
259 return snprintf(buf
, PAGE_SIZE
, "%d\n", xfs_globals
.larp
);
261 XFS_SYSFS_ATTR_RW(larp
);
264 bload_leaf_slack_store(
265 struct kobject
*kobject
,
272 ret
= kstrtoint(buf
, 0, &val
);
276 xfs_globals
.bload_leaf_slack
= val
;
281 bload_leaf_slack_show(
282 struct kobject
*kobject
,
285 return snprintf(buf
, PAGE_SIZE
, "%d\n", xfs_globals
.bload_leaf_slack
);
287 XFS_SYSFS_ATTR_RW(bload_leaf_slack
);
290 bload_node_slack_store(
291 struct kobject
*kobject
,
298 ret
= kstrtoint(buf
, 0, &val
);
302 xfs_globals
.bload_node_slack
= val
;
307 bload_node_slack_show(
308 struct kobject
*kobject
,
311 return snprintf(buf
, PAGE_SIZE
, "%d\n", xfs_globals
.bload_node_slack
);
313 XFS_SYSFS_ATTR_RW(bload_node_slack
);
315 static struct attribute
*xfs_dbg_attrs
[] = {
316 ATTR_LIST(bug_on_assert
),
317 ATTR_LIST(log_recovery_delay
),
318 ATTR_LIST(mount_delay
),
319 ATTR_LIST(always_cow
),
320 ATTR_LIST(pwork_threads
),
322 ATTR_LIST(bload_leaf_slack
),
323 ATTR_LIST(bload_node_slack
),
326 ATTRIBUTE_GROUPS(xfs_dbg
);
328 const struct kobj_type xfs_dbg_ktype
= {
329 .release
= xfs_sysfs_release
,
330 .sysfs_ops
= &xfs_sysfs_ops
,
331 .default_groups
= xfs_dbg_groups
,
338 static inline struct xstats
*
339 to_xstats(struct kobject
*kobject
)
341 struct xfs_kobj
*kobj
= to_kobj(kobject
);
343 return container_of(kobj
, struct xstats
, xs_kobj
);
348 struct kobject
*kobject
,
351 struct xstats
*stats
= to_xstats(kobject
);
353 return xfs_stats_format(stats
->xs_stats
, buf
);
355 XFS_SYSFS_ATTR_RO(stats
);
359 struct kobject
*kobject
,
365 struct xstats
*stats
= to_xstats(kobject
);
367 ret
= kstrtoint(buf
, 0, &val
);
374 xfs_stats_clearall(stats
->xs_stats
);
377 XFS_SYSFS_ATTR_WO(stats_clear
);
379 static struct attribute
*xfs_stats_attrs
[] = {
381 ATTR_LIST(stats_clear
),
384 ATTRIBUTE_GROUPS(xfs_stats
);
386 const struct kobj_type xfs_stats_ktype
= {
387 .release
= xfs_sysfs_release
,
388 .sysfs_ops
= &xfs_sysfs_ops
,
389 .default_groups
= xfs_stats_groups
,
394 static inline struct xlog
*
395 to_xlog(struct kobject
*kobject
)
397 struct xfs_kobj
*kobj
= to_kobj(kobject
);
399 return container_of(kobj
, struct xlog
, l_kobj
);
404 struct kobject
*kobject
,
409 struct xlog
*log
= to_xlog(kobject
);
411 spin_lock(&log
->l_icloglock
);
412 cycle
= log
->l_curr_cycle
;
413 block
= log
->l_curr_block
;
414 spin_unlock(&log
->l_icloglock
);
416 return sysfs_emit(buf
, "%d:%d\n", cycle
, block
);
418 XFS_SYSFS_ATTR_RO(log_head_lsn
);
422 struct kobject
*kobject
,
427 struct xlog
*log
= to_xlog(kobject
);
429 xlog_crack_atomic_lsn(&log
->l_tail_lsn
, &cycle
, &block
);
430 return sysfs_emit(buf
, "%d:%d\n", cycle
, block
);
432 XFS_SYSFS_ATTR_RO(log_tail_lsn
);
435 reserve_grant_head_bytes_show(
436 struct kobject
*kobject
,
439 return sysfs_emit(buf
, "%lld\n",
440 atomic64_read(&to_xlog(kobject
)->l_reserve_head
.grant
));
442 XFS_SYSFS_ATTR_RO(reserve_grant_head_bytes
);
445 write_grant_head_bytes_show(
446 struct kobject
*kobject
,
449 return sysfs_emit(buf
, "%lld\n",
450 atomic64_read(&to_xlog(kobject
)->l_write_head
.grant
));
452 XFS_SYSFS_ATTR_RO(write_grant_head_bytes
);
454 static struct attribute
*xfs_log_attrs
[] = {
455 ATTR_LIST(log_head_lsn
),
456 ATTR_LIST(log_tail_lsn
),
457 ATTR_LIST(reserve_grant_head_bytes
),
458 ATTR_LIST(write_grant_head_bytes
),
461 ATTRIBUTE_GROUPS(xfs_log
);
463 const struct kobj_type xfs_log_ktype
= {
464 .release
= xfs_sysfs_release
,
465 .sysfs_ops
= &xfs_sysfs_ops
,
466 .default_groups
= xfs_log_groups
,
470 * Metadata IO error configuration
472 * The sysfs structure here is:
473 * ...xfs/<dev>/error/<class>/<errno>/<error_attrs>
475 * where <class> allows us to discriminate between data IO and metadata IO,
476 * and any other future type of IO (e.g. special inode or directory error
477 * handling) we care to support.
479 static inline struct xfs_error_cfg
*
480 to_error_cfg(struct kobject
*kobject
)
482 struct xfs_kobj
*kobj
= to_kobj(kobject
);
483 return container_of(kobj
, struct xfs_error_cfg
, kobj
);
486 static inline struct xfs_mount
*
487 err_to_mp(struct kobject
*kobject
)
489 struct xfs_kobj
*kobj
= to_kobj(kobject
);
490 return container_of(kobj
, struct xfs_mount
, m_error_kobj
);
495 struct kobject
*kobject
,
499 struct xfs_error_cfg
*cfg
= to_error_cfg(kobject
);
501 if (cfg
->max_retries
== XFS_ERR_RETRY_FOREVER
)
504 retries
= cfg
->max_retries
;
506 return sysfs_emit(buf
, "%d\n", retries
);
511 struct kobject
*kobject
,
515 struct xfs_error_cfg
*cfg
= to_error_cfg(kobject
);
519 ret
= kstrtoint(buf
, 0, &val
);
527 cfg
->max_retries
= XFS_ERR_RETRY_FOREVER
;
529 cfg
->max_retries
= val
;
532 XFS_SYSFS_ATTR_RW(max_retries
);
535 retry_timeout_seconds_show(
536 struct kobject
*kobject
,
540 struct xfs_error_cfg
*cfg
= to_error_cfg(kobject
);
542 if (cfg
->retry_timeout
== XFS_ERR_RETRY_FOREVER
)
545 timeout
= jiffies_to_msecs(cfg
->retry_timeout
) / MSEC_PER_SEC
;
547 return sysfs_emit(buf
, "%d\n", timeout
);
551 retry_timeout_seconds_store(
552 struct kobject
*kobject
,
556 struct xfs_error_cfg
*cfg
= to_error_cfg(kobject
);
560 ret
= kstrtoint(buf
, 0, &val
);
564 /* 1 day timeout maximum, -1 means infinite */
565 if (val
< -1 || val
> 86400)
569 cfg
->retry_timeout
= XFS_ERR_RETRY_FOREVER
;
571 cfg
->retry_timeout
= msecs_to_jiffies(val
* MSEC_PER_SEC
);
572 ASSERT(msecs_to_jiffies(val
* MSEC_PER_SEC
) < LONG_MAX
);
576 XFS_SYSFS_ATTR_RW(retry_timeout_seconds
);
579 fail_at_unmount_show(
580 struct kobject
*kobject
,
583 struct xfs_mount
*mp
= err_to_mp(kobject
);
585 return sysfs_emit(buf
, "%d\n", mp
->m_fail_unmount
);
589 fail_at_unmount_store(
590 struct kobject
*kobject
,
594 struct xfs_mount
*mp
= err_to_mp(kobject
);
598 ret
= kstrtoint(buf
, 0, &val
);
602 if (val
< 0 || val
> 1)
605 mp
->m_fail_unmount
= val
;
608 XFS_SYSFS_ATTR_RW(fail_at_unmount
);
610 static struct attribute
*xfs_error_attrs
[] = {
611 ATTR_LIST(max_retries
),
612 ATTR_LIST(retry_timeout_seconds
),
615 ATTRIBUTE_GROUPS(xfs_error
);
617 static const struct kobj_type xfs_error_cfg_ktype
= {
618 .release
= xfs_sysfs_release
,
619 .sysfs_ops
= &xfs_sysfs_ops
,
620 .default_groups
= xfs_error_groups
,
623 static const struct kobj_type xfs_error_ktype
= {
624 .release
= xfs_sysfs_release
,
625 .sysfs_ops
= &xfs_sysfs_ops
,
629 * Error initialization tables. These need to be ordered in the same
630 * order as the enums used to index the array. All class init tables need to
631 * define a "default" behaviour as the first entry, all other entries can be
634 struct xfs_error_init
{
637 int retry_timeout
; /* in seconds */
640 static const struct xfs_error_init xfs_error_meta_init
[XFS_ERR_ERRNO_MAX
] = {
642 .max_retries
= XFS_ERR_RETRY_FOREVER
,
643 .retry_timeout
= XFS_ERR_RETRY_FOREVER
,
646 .max_retries
= XFS_ERR_RETRY_FOREVER
,
647 .retry_timeout
= XFS_ERR_RETRY_FOREVER
,
650 .max_retries
= XFS_ERR_RETRY_FOREVER
,
651 .retry_timeout
= XFS_ERR_RETRY_FOREVER
,
654 .max_retries
= 0, /* We can't recover from devices disappearing */
660 xfs_error_sysfs_init_class(
661 struct xfs_mount
*mp
,
663 const char *parent_name
,
664 struct xfs_kobj
*parent_kobj
,
665 const struct xfs_error_init init
[])
667 struct xfs_error_cfg
*cfg
;
671 ASSERT(class < XFS_ERR_CLASS_MAX
);
673 error
= xfs_sysfs_init(parent_kobj
, &xfs_error_ktype
,
674 &mp
->m_error_kobj
, parent_name
);
678 for (i
= 0; i
< XFS_ERR_ERRNO_MAX
; i
++) {
679 cfg
= &mp
->m_error_cfg
[class][i
];
680 error
= xfs_sysfs_init(&cfg
->kobj
, &xfs_error_cfg_ktype
,
681 parent_kobj
, init
[i
].name
);
685 cfg
->max_retries
= init
[i
].max_retries
;
686 if (init
[i
].retry_timeout
== XFS_ERR_RETRY_FOREVER
)
687 cfg
->retry_timeout
= XFS_ERR_RETRY_FOREVER
;
689 cfg
->retry_timeout
= msecs_to_jiffies(
690 init
[i
].retry_timeout
* MSEC_PER_SEC
);
695 /* unwind the entries that succeeded */
696 for (i
--; i
>= 0; i
--) {
697 cfg
= &mp
->m_error_cfg
[class][i
];
698 xfs_sysfs_del(&cfg
->kobj
);
700 xfs_sysfs_del(parent_kobj
);
705 xfs_error_sysfs_init(
706 struct xfs_mount
*mp
)
710 /* .../xfs/<dev>/error/ */
711 error
= xfs_sysfs_init(&mp
->m_error_kobj
, &xfs_error_ktype
,
712 &mp
->m_kobj
, "error");
716 error
= sysfs_create_file(&mp
->m_error_kobj
.kobject
,
717 ATTR_LIST(fail_at_unmount
));
722 /* .../xfs/<dev>/error/metadata/ */
723 error
= xfs_error_sysfs_init_class(mp
, XFS_ERR_METADATA
,
724 "metadata", &mp
->m_error_meta_kobj
,
725 xfs_error_meta_init
);
732 xfs_sysfs_del(&mp
->m_error_kobj
);
738 struct xfs_mount
*mp
)
740 struct xfs_error_cfg
*cfg
;
743 for (i
= 0; i
< XFS_ERR_CLASS_MAX
; i
++) {
744 for (j
= 0; j
< XFS_ERR_ERRNO_MAX
; j
++) {
745 cfg
= &mp
->m_error_cfg
[i
][j
];
747 xfs_sysfs_del(&cfg
->kobj
);
750 xfs_sysfs_del(&mp
->m_error_meta_kobj
);
751 xfs_sysfs_del(&mp
->m_error_kobj
);
754 struct xfs_error_cfg
*
756 struct xfs_mount
*mp
,
760 struct xfs_error_cfg
*cfg
;
767 cfg
= &mp
->m_error_cfg
[error_class
][XFS_ERR_EIO
];
770 cfg
= &mp
->m_error_cfg
[error_class
][XFS_ERR_ENOSPC
];
773 cfg
= &mp
->m_error_cfg
[error_class
][XFS_ERR_ENODEV
];
776 cfg
= &mp
->m_error_cfg
[error_class
][XFS_ERR_DEFAULT
];