drm/ast: Only warn about unsupported TX chips on Gen4 and later
[drm/drm-misc.git] / fs / xfs / xfs_sysfs.c
blob60cb5318fdae3cc246236fd988b4749df57f8bfc
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (c) 2014 Red Hat, Inc.
4 * All Rights Reserved.
5 */
7 #include "xfs.h"
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_trans_resv.h"
12 #include "xfs_sysfs.h"
13 #include "xfs_log.h"
14 #include "xfs_log_priv.h"
15 #include "xfs_mount.h"
17 struct xfs_sysfs_attr {
18 struct attribute attr;
19 ssize_t (*show)(struct kobject *kobject, char *buf);
20 ssize_t (*store)(struct kobject *kobject, const char *buf,
21 size_t count);
24 static inline struct xfs_sysfs_attr *
25 to_attr(struct attribute *attr)
27 return container_of(attr, struct xfs_sysfs_attr, attr);
30 #define XFS_SYSFS_ATTR_RW(name) \
31 static struct xfs_sysfs_attr xfs_sysfs_attr_##name = __ATTR_RW(name)
32 #define XFS_SYSFS_ATTR_RO(name) \
33 static struct xfs_sysfs_attr xfs_sysfs_attr_##name = __ATTR_RO(name)
34 #define XFS_SYSFS_ATTR_WO(name) \
35 static struct xfs_sysfs_attr xfs_sysfs_attr_##name = __ATTR_WO(name)
37 #define ATTR_LIST(name) &xfs_sysfs_attr_##name.attr
39 STATIC ssize_t
40 xfs_sysfs_object_show(
41 struct kobject *kobject,
42 struct attribute *attr,
43 char *buf)
45 struct xfs_sysfs_attr *xfs_attr = to_attr(attr);
47 return xfs_attr->show ? xfs_attr->show(kobject, buf) : 0;
50 STATIC ssize_t
51 xfs_sysfs_object_store(
52 struct kobject *kobject,
53 struct attribute *attr,
54 const char *buf,
55 size_t count)
57 struct xfs_sysfs_attr *xfs_attr = to_attr(attr);
59 return xfs_attr->store ? xfs_attr->store(kobject, buf, count) : 0;
62 static const struct sysfs_ops xfs_sysfs_ops = {
63 .show = xfs_sysfs_object_show,
64 .store = xfs_sysfs_object_store,
67 static struct attribute *xfs_mp_attrs[] = {
68 NULL,
70 ATTRIBUTE_GROUPS(xfs_mp);
72 const struct kobj_type xfs_mp_ktype = {
73 .release = xfs_sysfs_release,
74 .sysfs_ops = &xfs_sysfs_ops,
75 .default_groups = xfs_mp_groups,
78 #ifdef DEBUG
79 /* debug */
81 STATIC ssize_t
82 bug_on_assert_store(
83 struct kobject *kobject,
84 const char *buf,
85 size_t count)
87 int ret;
88 int val;
90 ret = kstrtoint(buf, 0, &val);
91 if (ret)
92 return ret;
94 if (val == 1)
95 xfs_globals.bug_on_assert = true;
96 else if (val == 0)
97 xfs_globals.bug_on_assert = false;
98 else
99 return -EINVAL;
101 return count;
104 STATIC ssize_t
105 bug_on_assert_show(
106 struct kobject *kobject,
107 char *buf)
109 return sysfs_emit(buf, "%d\n", xfs_globals.bug_on_assert);
111 XFS_SYSFS_ATTR_RW(bug_on_assert);
113 STATIC ssize_t
114 log_recovery_delay_store(
115 struct kobject *kobject,
116 const char *buf,
117 size_t count)
119 int ret;
120 int val;
122 ret = kstrtoint(buf, 0, &val);
123 if (ret)
124 return ret;
126 if (val < 0 || val > 60)
127 return -EINVAL;
129 xfs_globals.log_recovery_delay = val;
131 return count;
134 STATIC ssize_t
135 log_recovery_delay_show(
136 struct kobject *kobject,
137 char *buf)
139 return sysfs_emit(buf, "%d\n", xfs_globals.log_recovery_delay);
141 XFS_SYSFS_ATTR_RW(log_recovery_delay);
143 STATIC ssize_t
144 mount_delay_store(
145 struct kobject *kobject,
146 const char *buf,
147 size_t count)
149 int ret;
150 int val;
152 ret = kstrtoint(buf, 0, &val);
153 if (ret)
154 return ret;
156 if (val < 0 || val > 60)
157 return -EINVAL;
159 xfs_globals.mount_delay = val;
161 return count;
164 STATIC ssize_t
165 mount_delay_show(
166 struct kobject *kobject,
167 char *buf)
169 return sysfs_emit(buf, "%d\n", xfs_globals.mount_delay);
171 XFS_SYSFS_ATTR_RW(mount_delay);
173 static ssize_t
174 always_cow_store(
175 struct kobject *kobject,
176 const char *buf,
177 size_t count)
179 ssize_t ret;
181 ret = kstrtobool(buf, &xfs_globals.always_cow);
182 if (ret < 0)
183 return ret;
184 return count;
187 static ssize_t
188 always_cow_show(
189 struct kobject *kobject,
190 char *buf)
192 return sysfs_emit(buf, "%d\n", xfs_globals.always_cow);
194 XFS_SYSFS_ATTR_RW(always_cow);
197 * Override how many threads the parallel work queue is allowed to create.
198 * This has to be a debug-only global (instead of an errortag) because one of
199 * the main users of parallel workqueues is mount time quotacheck.
201 STATIC ssize_t
202 pwork_threads_store(
203 struct kobject *kobject,
204 const char *buf,
205 size_t count)
207 int ret;
208 int val;
210 ret = kstrtoint(buf, 0, &val);
211 if (ret)
212 return ret;
214 if (val < -1 || val > num_possible_cpus())
215 return -EINVAL;
217 xfs_globals.pwork_threads = val;
219 return count;
222 STATIC ssize_t
223 pwork_threads_show(
224 struct kobject *kobject,
225 char *buf)
227 return sysfs_emit(buf, "%d\n", xfs_globals.pwork_threads);
229 XFS_SYSFS_ATTR_RW(pwork_threads);
232 * The "LARP" (Logged extended Attribute Recovery Persistence) debugging knob
233 * sets the XFS_DA_OP_LOGGED flag on all xfs_attr_set operations performed on
234 * V5 filesystems. As a result, the intermediate progress of all setxattr and
235 * removexattr operations are tracked via the log and can be restarted during
236 * recovery. This is useful for testing xattr recovery prior to merging of the
237 * parent pointer feature which requires it to maintain consistency, and may be
238 * enabled for userspace xattrs in the future.
240 static ssize_t
241 larp_store(
242 struct kobject *kobject,
243 const char *buf,
244 size_t count)
246 ssize_t ret;
248 ret = kstrtobool(buf, &xfs_globals.larp);
249 if (ret < 0)
250 return ret;
251 return count;
254 STATIC ssize_t
255 larp_show(
256 struct kobject *kobject,
257 char *buf)
259 return snprintf(buf, PAGE_SIZE, "%d\n", xfs_globals.larp);
261 XFS_SYSFS_ATTR_RW(larp);
263 STATIC ssize_t
264 bload_leaf_slack_store(
265 struct kobject *kobject,
266 const char *buf,
267 size_t count)
269 int ret;
270 int val;
272 ret = kstrtoint(buf, 0, &val);
273 if (ret)
274 return ret;
276 xfs_globals.bload_leaf_slack = val;
277 return count;
280 STATIC ssize_t
281 bload_leaf_slack_show(
282 struct kobject *kobject,
283 char *buf)
285 return snprintf(buf, PAGE_SIZE, "%d\n", xfs_globals.bload_leaf_slack);
287 XFS_SYSFS_ATTR_RW(bload_leaf_slack);
289 STATIC ssize_t
290 bload_node_slack_store(
291 struct kobject *kobject,
292 const char *buf,
293 size_t count)
295 int ret;
296 int val;
298 ret = kstrtoint(buf, 0, &val);
299 if (ret)
300 return ret;
302 xfs_globals.bload_node_slack = val;
303 return count;
306 STATIC ssize_t
307 bload_node_slack_show(
308 struct kobject *kobject,
309 char *buf)
311 return snprintf(buf, PAGE_SIZE, "%d\n", xfs_globals.bload_node_slack);
313 XFS_SYSFS_ATTR_RW(bload_node_slack);
315 static struct attribute *xfs_dbg_attrs[] = {
316 ATTR_LIST(bug_on_assert),
317 ATTR_LIST(log_recovery_delay),
318 ATTR_LIST(mount_delay),
319 ATTR_LIST(always_cow),
320 ATTR_LIST(pwork_threads),
321 ATTR_LIST(larp),
322 ATTR_LIST(bload_leaf_slack),
323 ATTR_LIST(bload_node_slack),
324 NULL,
326 ATTRIBUTE_GROUPS(xfs_dbg);
328 const struct kobj_type xfs_dbg_ktype = {
329 .release = xfs_sysfs_release,
330 .sysfs_ops = &xfs_sysfs_ops,
331 .default_groups = xfs_dbg_groups,
334 #endif /* DEBUG */
336 /* stats */
338 static inline struct xstats *
339 to_xstats(struct kobject *kobject)
341 struct xfs_kobj *kobj = to_kobj(kobject);
343 return container_of(kobj, struct xstats, xs_kobj);
346 STATIC ssize_t
347 stats_show(
348 struct kobject *kobject,
349 char *buf)
351 struct xstats *stats = to_xstats(kobject);
353 return xfs_stats_format(stats->xs_stats, buf);
355 XFS_SYSFS_ATTR_RO(stats);
357 STATIC ssize_t
358 stats_clear_store(
359 struct kobject *kobject,
360 const char *buf,
361 size_t count)
363 int ret;
364 int val;
365 struct xstats *stats = to_xstats(kobject);
367 ret = kstrtoint(buf, 0, &val);
368 if (ret)
369 return ret;
371 if (val != 1)
372 return -EINVAL;
374 xfs_stats_clearall(stats->xs_stats);
375 return count;
377 XFS_SYSFS_ATTR_WO(stats_clear);
379 static struct attribute *xfs_stats_attrs[] = {
380 ATTR_LIST(stats),
381 ATTR_LIST(stats_clear),
382 NULL,
384 ATTRIBUTE_GROUPS(xfs_stats);
386 const struct kobj_type xfs_stats_ktype = {
387 .release = xfs_sysfs_release,
388 .sysfs_ops = &xfs_sysfs_ops,
389 .default_groups = xfs_stats_groups,
392 /* xlog */
394 static inline struct xlog *
395 to_xlog(struct kobject *kobject)
397 struct xfs_kobj *kobj = to_kobj(kobject);
399 return container_of(kobj, struct xlog, l_kobj);
402 STATIC ssize_t
403 log_head_lsn_show(
404 struct kobject *kobject,
405 char *buf)
407 int cycle;
408 int block;
409 struct xlog *log = to_xlog(kobject);
411 spin_lock(&log->l_icloglock);
412 cycle = log->l_curr_cycle;
413 block = log->l_curr_block;
414 spin_unlock(&log->l_icloglock);
416 return sysfs_emit(buf, "%d:%d\n", cycle, block);
418 XFS_SYSFS_ATTR_RO(log_head_lsn);
420 STATIC ssize_t
421 log_tail_lsn_show(
422 struct kobject *kobject,
423 char *buf)
425 int cycle;
426 int block;
427 struct xlog *log = to_xlog(kobject);
429 xlog_crack_atomic_lsn(&log->l_tail_lsn, &cycle, &block);
430 return sysfs_emit(buf, "%d:%d\n", cycle, block);
432 XFS_SYSFS_ATTR_RO(log_tail_lsn);
434 STATIC ssize_t
435 reserve_grant_head_bytes_show(
436 struct kobject *kobject,
437 char *buf)
439 return sysfs_emit(buf, "%lld\n",
440 atomic64_read(&to_xlog(kobject)->l_reserve_head.grant));
442 XFS_SYSFS_ATTR_RO(reserve_grant_head_bytes);
444 STATIC ssize_t
445 write_grant_head_bytes_show(
446 struct kobject *kobject,
447 char *buf)
449 return sysfs_emit(buf, "%lld\n",
450 atomic64_read(&to_xlog(kobject)->l_write_head.grant));
452 XFS_SYSFS_ATTR_RO(write_grant_head_bytes);
454 static struct attribute *xfs_log_attrs[] = {
455 ATTR_LIST(log_head_lsn),
456 ATTR_LIST(log_tail_lsn),
457 ATTR_LIST(reserve_grant_head_bytes),
458 ATTR_LIST(write_grant_head_bytes),
459 NULL,
461 ATTRIBUTE_GROUPS(xfs_log);
463 const struct kobj_type xfs_log_ktype = {
464 .release = xfs_sysfs_release,
465 .sysfs_ops = &xfs_sysfs_ops,
466 .default_groups = xfs_log_groups,
470 * Metadata IO error configuration
472 * The sysfs structure here is:
473 * ...xfs/<dev>/error/<class>/<errno>/<error_attrs>
475 * where <class> allows us to discriminate between data IO and metadata IO,
476 * and any other future type of IO (e.g. special inode or directory error
477 * handling) we care to support.
479 static inline struct xfs_error_cfg *
480 to_error_cfg(struct kobject *kobject)
482 struct xfs_kobj *kobj = to_kobj(kobject);
483 return container_of(kobj, struct xfs_error_cfg, kobj);
486 static inline struct xfs_mount *
487 err_to_mp(struct kobject *kobject)
489 struct xfs_kobj *kobj = to_kobj(kobject);
490 return container_of(kobj, struct xfs_mount, m_error_kobj);
493 static ssize_t
494 max_retries_show(
495 struct kobject *kobject,
496 char *buf)
498 int retries;
499 struct xfs_error_cfg *cfg = to_error_cfg(kobject);
501 if (cfg->max_retries == XFS_ERR_RETRY_FOREVER)
502 retries = -1;
503 else
504 retries = cfg->max_retries;
506 return sysfs_emit(buf, "%d\n", retries);
509 static ssize_t
510 max_retries_store(
511 struct kobject *kobject,
512 const char *buf,
513 size_t count)
515 struct xfs_error_cfg *cfg = to_error_cfg(kobject);
516 int ret;
517 int val;
519 ret = kstrtoint(buf, 0, &val);
520 if (ret)
521 return ret;
523 if (val < -1)
524 return -EINVAL;
526 if (val == -1)
527 cfg->max_retries = XFS_ERR_RETRY_FOREVER;
528 else
529 cfg->max_retries = val;
530 return count;
532 XFS_SYSFS_ATTR_RW(max_retries);
534 static ssize_t
535 retry_timeout_seconds_show(
536 struct kobject *kobject,
537 char *buf)
539 int timeout;
540 struct xfs_error_cfg *cfg = to_error_cfg(kobject);
542 if (cfg->retry_timeout == XFS_ERR_RETRY_FOREVER)
543 timeout = -1;
544 else
545 timeout = jiffies_to_msecs(cfg->retry_timeout) / MSEC_PER_SEC;
547 return sysfs_emit(buf, "%d\n", timeout);
550 static ssize_t
551 retry_timeout_seconds_store(
552 struct kobject *kobject,
553 const char *buf,
554 size_t count)
556 struct xfs_error_cfg *cfg = to_error_cfg(kobject);
557 int ret;
558 int val;
560 ret = kstrtoint(buf, 0, &val);
561 if (ret)
562 return ret;
564 /* 1 day timeout maximum, -1 means infinite */
565 if (val < -1 || val > 86400)
566 return -EINVAL;
568 if (val == -1)
569 cfg->retry_timeout = XFS_ERR_RETRY_FOREVER;
570 else {
571 cfg->retry_timeout = msecs_to_jiffies(val * MSEC_PER_SEC);
572 ASSERT(msecs_to_jiffies(val * MSEC_PER_SEC) < LONG_MAX);
574 return count;
576 XFS_SYSFS_ATTR_RW(retry_timeout_seconds);
578 static ssize_t
579 fail_at_unmount_show(
580 struct kobject *kobject,
581 char *buf)
583 struct xfs_mount *mp = err_to_mp(kobject);
585 return sysfs_emit(buf, "%d\n", mp->m_fail_unmount);
588 static ssize_t
589 fail_at_unmount_store(
590 struct kobject *kobject,
591 const char *buf,
592 size_t count)
594 struct xfs_mount *mp = err_to_mp(kobject);
595 int ret;
596 int val;
598 ret = kstrtoint(buf, 0, &val);
599 if (ret)
600 return ret;
602 if (val < 0 || val > 1)
603 return -EINVAL;
605 mp->m_fail_unmount = val;
606 return count;
608 XFS_SYSFS_ATTR_RW(fail_at_unmount);
610 static struct attribute *xfs_error_attrs[] = {
611 ATTR_LIST(max_retries),
612 ATTR_LIST(retry_timeout_seconds),
613 NULL,
615 ATTRIBUTE_GROUPS(xfs_error);
617 static const struct kobj_type xfs_error_cfg_ktype = {
618 .release = xfs_sysfs_release,
619 .sysfs_ops = &xfs_sysfs_ops,
620 .default_groups = xfs_error_groups,
623 static const struct kobj_type xfs_error_ktype = {
624 .release = xfs_sysfs_release,
625 .sysfs_ops = &xfs_sysfs_ops,
629 * Error initialization tables. These need to be ordered in the same
630 * order as the enums used to index the array. All class init tables need to
631 * define a "default" behaviour as the first entry, all other entries can be
632 * empty.
634 struct xfs_error_init {
635 char *name;
636 int max_retries;
637 int retry_timeout; /* in seconds */
640 static const struct xfs_error_init xfs_error_meta_init[XFS_ERR_ERRNO_MAX] = {
641 { .name = "default",
642 .max_retries = XFS_ERR_RETRY_FOREVER,
643 .retry_timeout = XFS_ERR_RETRY_FOREVER,
645 { .name = "EIO",
646 .max_retries = XFS_ERR_RETRY_FOREVER,
647 .retry_timeout = XFS_ERR_RETRY_FOREVER,
649 { .name = "ENOSPC",
650 .max_retries = XFS_ERR_RETRY_FOREVER,
651 .retry_timeout = XFS_ERR_RETRY_FOREVER,
653 { .name = "ENODEV",
654 .max_retries = 0, /* We can't recover from devices disappearing */
655 .retry_timeout = 0,
659 static int
660 xfs_error_sysfs_init_class(
661 struct xfs_mount *mp,
662 int class,
663 const char *parent_name,
664 struct xfs_kobj *parent_kobj,
665 const struct xfs_error_init init[])
667 struct xfs_error_cfg *cfg;
668 int error;
669 int i;
671 ASSERT(class < XFS_ERR_CLASS_MAX);
673 error = xfs_sysfs_init(parent_kobj, &xfs_error_ktype,
674 &mp->m_error_kobj, parent_name);
675 if (error)
676 return error;
678 for (i = 0; i < XFS_ERR_ERRNO_MAX; i++) {
679 cfg = &mp->m_error_cfg[class][i];
680 error = xfs_sysfs_init(&cfg->kobj, &xfs_error_cfg_ktype,
681 parent_kobj, init[i].name);
682 if (error)
683 goto out_error;
685 cfg->max_retries = init[i].max_retries;
686 if (init[i].retry_timeout == XFS_ERR_RETRY_FOREVER)
687 cfg->retry_timeout = XFS_ERR_RETRY_FOREVER;
688 else
689 cfg->retry_timeout = msecs_to_jiffies(
690 init[i].retry_timeout * MSEC_PER_SEC);
692 return 0;
694 out_error:
695 /* unwind the entries that succeeded */
696 for (i--; i >= 0; i--) {
697 cfg = &mp->m_error_cfg[class][i];
698 xfs_sysfs_del(&cfg->kobj);
700 xfs_sysfs_del(parent_kobj);
701 return error;
705 xfs_error_sysfs_init(
706 struct xfs_mount *mp)
708 int error;
710 /* .../xfs/<dev>/error/ */
711 error = xfs_sysfs_init(&mp->m_error_kobj, &xfs_error_ktype,
712 &mp->m_kobj, "error");
713 if (error)
714 return error;
716 error = sysfs_create_file(&mp->m_error_kobj.kobject,
717 ATTR_LIST(fail_at_unmount));
719 if (error)
720 goto out_error;
722 /* .../xfs/<dev>/error/metadata/ */
723 error = xfs_error_sysfs_init_class(mp, XFS_ERR_METADATA,
724 "metadata", &mp->m_error_meta_kobj,
725 xfs_error_meta_init);
726 if (error)
727 goto out_error;
729 return 0;
731 out_error:
732 xfs_sysfs_del(&mp->m_error_kobj);
733 return error;
736 void
737 xfs_error_sysfs_del(
738 struct xfs_mount *mp)
740 struct xfs_error_cfg *cfg;
741 int i, j;
743 for (i = 0; i < XFS_ERR_CLASS_MAX; i++) {
744 for (j = 0; j < XFS_ERR_ERRNO_MAX; j++) {
745 cfg = &mp->m_error_cfg[i][j];
747 xfs_sysfs_del(&cfg->kobj);
750 xfs_sysfs_del(&mp->m_error_meta_kobj);
751 xfs_sysfs_del(&mp->m_error_kobj);
754 struct xfs_error_cfg *
755 xfs_error_get_cfg(
756 struct xfs_mount *mp,
757 int error_class,
758 int error)
760 struct xfs_error_cfg *cfg;
762 if (error < 0)
763 error = -error;
765 switch (error) {
766 case EIO:
767 cfg = &mp->m_error_cfg[error_class][XFS_ERR_EIO];
768 break;
769 case ENOSPC:
770 cfg = &mp->m_error_cfg[error_class][XFS_ERR_ENOSPC];
771 break;
772 case ENODEV:
773 cfg = &mp->m_error_cfg[error_class][XFS_ERR_ENODEV];
774 break;
775 default:
776 cfg = &mp->m_error_cfg[error_class][XFS_ERR_DEFAULT];
777 break;
780 return cfg;