1 // SPDX-License-Identifier: GPL-2.0
2 #ifndef NO_BCACHEFS_CHARDEV
5 #include "bcachefs_ioctl.h"
8 #include "disk_accounting.h"
11 #include "recovery_passes.h"
15 #include "thread_with_file.h"
17 #include <linux/cdev.h>
18 #include <linux/device.h>
20 #include <linux/ioctl.h>
21 #include <linux/major.h>
22 #include <linux/sched/task.h>
23 #include <linux/slab.h>
24 #include <linux/uaccess.h>
26 /* returns with ref on ca->ref */
27 static struct bch_dev
*bch2_device_lookup(struct bch_fs
*c
, u64 dev
,
32 if (flags
& BCH_BY_INDEX
) {
33 if (dev
>= c
->sb
.nr_devices
)
34 return ERR_PTR(-EINVAL
);
36 ca
= bch2_dev_tryget_noerror(c
, dev
);
38 return ERR_PTR(-EINVAL
);
42 path
= strndup_user((const char __user
*)
43 (unsigned long) dev
, PATH_MAX
);
45 return ERR_CAST(path
);
47 ca
= bch2_dev_lookup(c
, path
);
55 static long bch2_ioctl_assemble(struct bch_ioctl_assemble __user
*user_arg
)
57 struct bch_ioctl_assemble arg
;
59 u64
*user_devs
= NULL
;
64 if (copy_from_user(&arg
, user_arg
, sizeof(arg
)))
67 if (arg
.flags
|| arg
.pad
)
70 user_devs
= kmalloc_array(arg
.nr_devs
, sizeof(u64
), GFP_KERNEL
);
74 devs
= kcalloc(arg
.nr_devs
, sizeof(char *), GFP_KERNEL
);
76 if (copy_from_user(user_devs
, user_arg
->devs
,
77 sizeof(u64
) * arg
.nr_devs
))
80 for (i
= 0; i
< arg
.nr_devs
; i
++) {
81 devs
[i
] = strndup_user((const char __user
*)(unsigned long)
84 ret
= PTR_ERR_OR_ZERO(devs
[i
]);
89 c
= bch2_fs_open(devs
, arg
.nr_devs
, bch2_opts_empty());
90 ret
= PTR_ERR_OR_ZERO(c
);
95 for (i
= 0; i
< arg
.nr_devs
; i
++)
101 static long bch2_ioctl_incremental(struct bch_ioctl_incremental __user
*user_arg
)
103 struct bch_ioctl_incremental arg
;
107 if (copy_from_user(&arg
, user_arg
, sizeof(arg
)))
110 if (arg
.flags
|| arg
.pad
)
113 path
= strndup_user((const char __user
*)(unsigned long) arg
.dev
, PATH_MAX
);
114 ret
= PTR_ERR_OR_ZERO(path
);
118 err
= bch2_fs_open_incremental(path
);
122 pr_err("Could not register bcachefs devices: %s", err
);
131 struct thread_with_stdio thr
;
133 struct bch_opts opts
;
136 static void bch2_fsck_thread_exit(struct thread_with_stdio
*_thr
)
138 struct fsck_thread
*thr
= container_of(_thr
, struct fsck_thread
, thr
);
142 static int bch2_fsck_offline_thread_fn(struct thread_with_stdio
*stdio
)
144 struct fsck_thread
*thr
= container_of(stdio
, struct fsck_thread
, thr
);
145 struct bch_fs
*c
= thr
->c
;
147 int ret
= PTR_ERR_OR_ZERO(c
);
151 ret
= bch2_fs_start(thr
->c
);
155 if (test_bit(BCH_FS_errors_fixed
, &c
->flags
)) {
156 bch2_stdio_redirect_printf(&stdio
->stdio
, false, "%s: errors fixed\n", c
->name
);
159 if (test_bit(BCH_FS_error
, &c
->flags
)) {
160 bch2_stdio_redirect_printf(&stdio
->stdio
, false, "%s: still has errors\n", c
->name
);
168 static const struct thread_with_stdio_ops bch2_offline_fsck_ops
= {
169 .exit
= bch2_fsck_thread_exit
,
170 .fn
= bch2_fsck_offline_thread_fn
,
173 static long bch2_ioctl_fsck_offline(struct bch_ioctl_fsck_offline __user
*user_arg
)
175 struct bch_ioctl_fsck_offline arg
;
176 struct fsck_thread
*thr
= NULL
;
177 darray_str(devs
) = {};
180 if (copy_from_user(&arg
, user_arg
, sizeof(arg
)))
186 if (!capable(CAP_SYS_ADMIN
))
189 for (size_t i
= 0; i
< arg
.nr_devs
; i
++) {
191 ret
= copy_from_user_errcode(&dev_u64
, &user_arg
->devs
[i
], sizeof(u64
));
195 char *dev_str
= strndup_user((char __user
*)(unsigned long) dev_u64
, PATH_MAX
);
196 ret
= PTR_ERR_OR_ZERO(dev_str
);
200 ret
= darray_push(&devs
, dev_str
);
207 thr
= kzalloc(sizeof(*thr
), GFP_KERNEL
);
213 thr
->opts
= bch2_opts_empty();
216 char *optstr
= strndup_user((char __user
*)(unsigned long) arg
.opts
, 1 << 16);
217 ret
= PTR_ERR_OR_ZERO(optstr
) ?:
218 bch2_parse_mount_opts(NULL
, &thr
->opts
, NULL
, optstr
);
226 opt_set(thr
->opts
, stdio
, (u64
)(unsigned long)&thr
->thr
.stdio
);
227 opt_set(thr
->opts
, read_only
, 1);
228 opt_set(thr
->opts
, ratelimit_errors
, 0);
230 /* We need request_key() to be called before we punt to kthread: */
231 opt_set(thr
->opts
, nostart
, true);
233 bch2_thread_with_stdio_init(&thr
->thr
, &bch2_offline_fsck_ops
);
235 thr
->c
= bch2_fs_open(devs
.data
, arg
.nr_devs
, thr
->opts
);
237 if (!IS_ERR(thr
->c
) &&
238 thr
->c
->opts
.errors
== BCH_ON_ERROR_panic
)
239 thr
->c
->opts
.errors
= BCH_ON_ERROR_ro
;
241 ret
= __bch2_run_thread_with_stdio(&thr
->thr
);
243 darray_for_each(devs
, i
)
249 bch2_fsck_thread_exit(&thr
->thr
);
250 pr_err("ret %s", bch2_err_str(ret
));
254 static long bch2_global_ioctl(unsigned cmd
, void __user
*arg
)
260 case BCH_IOCTL_ASSEMBLE
:
261 return bch2_ioctl_assemble(arg
);
262 case BCH_IOCTL_INCREMENTAL
:
263 return bch2_ioctl_incremental(arg
);
265 case BCH_IOCTL_FSCK_OFFLINE
: {
266 ret
= bch2_ioctl_fsck_offline(arg
);
275 ret
= bch2_err_class(ret
);
279 static long bch2_ioctl_query_uuid(struct bch_fs
*c
,
280 struct bch_ioctl_query_uuid __user
*user_arg
)
282 return copy_to_user_errcode(&user_arg
->uuid
, &c
->sb
.user_uuid
,
283 sizeof(c
->sb
.user_uuid
));
287 static long bch2_ioctl_start(struct bch_fs
*c
, struct bch_ioctl_start arg
)
289 if (!capable(CAP_SYS_ADMIN
))
292 if (arg
.flags
|| arg
.pad
)
295 return bch2_fs_start(c
);
298 static long bch2_ioctl_stop(struct bch_fs
*c
)
300 if (!capable(CAP_SYS_ADMIN
))
308 static long bch2_ioctl_disk_add(struct bch_fs
*c
, struct bch_ioctl_disk arg
)
313 if (!capable(CAP_SYS_ADMIN
))
316 if (arg
.flags
|| arg
.pad
)
319 path
= strndup_user((const char __user
*)(unsigned long) arg
.dev
, PATH_MAX
);
320 ret
= PTR_ERR_OR_ZERO(path
);
324 ret
= bch2_dev_add(c
, path
);
331 static long bch2_ioctl_disk_remove(struct bch_fs
*c
, struct bch_ioctl_disk arg
)
335 if (!capable(CAP_SYS_ADMIN
))
338 if ((arg
.flags
& ~(BCH_FORCE_IF_DATA_LOST
|
339 BCH_FORCE_IF_METADATA_LOST
|
340 BCH_FORCE_IF_DEGRADED
|
345 ca
= bch2_device_lookup(c
, arg
.dev
, arg
.flags
);
349 return bch2_dev_remove(c
, ca
, arg
.flags
);
352 static long bch2_ioctl_disk_online(struct bch_fs
*c
, struct bch_ioctl_disk arg
)
357 if (!capable(CAP_SYS_ADMIN
))
360 if (arg
.flags
|| arg
.pad
)
363 path
= strndup_user((const char __user
*)(unsigned long) arg
.dev
, PATH_MAX
);
364 ret
= PTR_ERR_OR_ZERO(path
);
368 ret
= bch2_dev_online(c
, path
);
373 static long bch2_ioctl_disk_offline(struct bch_fs
*c
, struct bch_ioctl_disk arg
)
378 if (!capable(CAP_SYS_ADMIN
))
381 if ((arg
.flags
& ~(BCH_FORCE_IF_DATA_LOST
|
382 BCH_FORCE_IF_METADATA_LOST
|
383 BCH_FORCE_IF_DEGRADED
|
388 ca
= bch2_device_lookup(c
, arg
.dev
, arg
.flags
);
392 ret
= bch2_dev_offline(c
, ca
, arg
.flags
);
397 static long bch2_ioctl_disk_set_state(struct bch_fs
*c
,
398 struct bch_ioctl_disk_set_state arg
)
403 if (!capable(CAP_SYS_ADMIN
))
406 if ((arg
.flags
& ~(BCH_FORCE_IF_DATA_LOST
|
407 BCH_FORCE_IF_METADATA_LOST
|
408 BCH_FORCE_IF_DEGRADED
|
410 arg
.pad
[0] || arg
.pad
[1] || arg
.pad
[2] ||
411 arg
.new_state
>= BCH_MEMBER_STATE_NR
)
414 ca
= bch2_device_lookup(c
, arg
.dev
, arg
.flags
);
418 ret
= bch2_dev_set_state(c
, ca
, arg
.new_state
, arg
.flags
);
420 bch_err(c
, "Error setting device state: %s", bch2_err_str(ret
));
426 struct bch_data_ctx
{
427 struct thread_with_file thr
;
430 struct bch_ioctl_data arg
;
431 struct bch_move_stats stats
;
434 static int bch2_data_thread(void *arg
)
436 struct bch_data_ctx
*ctx
= container_of(arg
, struct bch_data_ctx
, thr
);
438 ctx
->thr
.ret
= bch2_data_job(ctx
->c
, &ctx
->stats
, ctx
->arg
);
439 ctx
->stats
.data_type
= U8_MAX
;
443 static int bch2_data_job_release(struct inode
*inode
, struct file
*file
)
445 struct bch_data_ctx
*ctx
= container_of(file
->private_data
, struct bch_data_ctx
, thr
);
447 bch2_thread_with_file_exit(&ctx
->thr
);
452 static ssize_t
bch2_data_job_read(struct file
*file
, char __user
*buf
,
453 size_t len
, loff_t
*ppos
)
455 struct bch_data_ctx
*ctx
= container_of(file
->private_data
, struct bch_data_ctx
, thr
);
456 struct bch_fs
*c
= ctx
->c
;
457 struct bch_ioctl_data_event e
= {
458 .type
= BCH_DATA_EVENT_PROGRESS
,
459 .p
.data_type
= ctx
->stats
.data_type
,
460 .p
.btree_id
= ctx
->stats
.pos
.btree
,
461 .p
.pos
= ctx
->stats
.pos
.pos
,
462 .p
.sectors_done
= atomic64_read(&ctx
->stats
.sectors_seen
),
463 .p
.sectors_total
= bch2_fs_usage_read_short(c
).used
,
469 return copy_to_user_errcode(buf
, &e
, sizeof(e
)) ?: sizeof(e
);
472 static const struct file_operations bcachefs_data_ops
= {
473 .release
= bch2_data_job_release
,
474 .read
= bch2_data_job_read
,
477 static long bch2_ioctl_data(struct bch_fs
*c
,
478 struct bch_ioctl_data arg
)
480 struct bch_data_ctx
*ctx
;
483 if (!capable(CAP_SYS_ADMIN
))
486 if (arg
.op
>= BCH_DATA_OP_NR
|| arg
.flags
)
489 ctx
= kzalloc(sizeof(*ctx
), GFP_KERNEL
);
496 ret
= bch2_run_thread_with_file(&ctx
->thr
,
504 static long bch2_ioctl_fs_usage(struct bch_fs
*c
,
505 struct bch_ioctl_fs_usage __user
*user_arg
)
507 struct bch_ioctl_fs_usage arg
= {};
508 darray_char replicas
= {};
509 u32 replica_entries_bytes
;
512 if (!test_bit(BCH_FS_started
, &c
->flags
))
515 if (get_user(replica_entries_bytes
, &user_arg
->replica_entries_bytes
))
518 ret
= bch2_fs_replicas_usage_read(c
, &replicas
) ?:
519 (replica_entries_bytes
< replicas
.nr
? -ERANGE
: 0) ?:
520 copy_to_user_errcode(&user_arg
->replicas
, replicas
.data
, replicas
.nr
);
524 struct bch_fs_usage_short u
= bch2_fs_usage_read_short(c
);
525 arg
.capacity
= c
->capacity
;
527 arg
.online_reserved
= percpu_u64_get(c
->online_reserved
);
528 arg
.replica_entries_bytes
= replicas
.nr
;
530 for (unsigned i
= 0; i
< BCH_REPLICAS_MAX
; i
++) {
531 struct disk_accounting_pos k
= {
532 .type
= BCH_DISK_ACCOUNTING_persistent_reserved
,
533 .persistent_reserved
.nr_replicas
= i
,
536 bch2_accounting_mem_read(c
,
537 disk_accounting_pos_to_bpos(&k
),
538 &arg
.persistent_reserved
[i
], 1);
541 ret
= copy_to_user_errcode(user_arg
, &arg
, sizeof(arg
));
543 darray_exit(&replicas
);
547 static long bch2_ioctl_query_accounting(struct bch_fs
*c
,
548 struct bch_ioctl_query_accounting __user
*user_arg
)
550 struct bch_ioctl_query_accounting arg
;
551 darray_char accounting
= {};
554 if (!test_bit(BCH_FS_started
, &c
->flags
))
557 ret
= copy_from_user_errcode(&arg
, user_arg
, sizeof(arg
)) ?:
558 bch2_fs_accounting_read(c
, &accounting
, arg
.accounting_types_mask
) ?:
559 (arg
.accounting_u64s
* sizeof(u64
) < accounting
.nr
? -ERANGE
: 0) ?:
560 copy_to_user_errcode(&user_arg
->accounting
, accounting
.data
, accounting
.nr
);
564 arg
.capacity
= c
->capacity
;
565 arg
.used
= bch2_fs_usage_read_short(c
).used
;
566 arg
.online_reserved
= percpu_u64_get(c
->online_reserved
);
567 arg
.accounting_u64s
= accounting
.nr
/ sizeof(u64
);
569 ret
= copy_to_user_errcode(user_arg
, &arg
, sizeof(arg
));
571 darray_exit(&accounting
);
575 /* obsolete, didn't allow for new data types: */
576 static long bch2_ioctl_dev_usage(struct bch_fs
*c
,
577 struct bch_ioctl_dev_usage __user
*user_arg
)
579 struct bch_ioctl_dev_usage arg
;
580 struct bch_dev_usage src
;
584 if (!test_bit(BCH_FS_started
, &c
->flags
))
587 if (copy_from_user(&arg
, user_arg
, sizeof(arg
)))
590 if ((arg
.flags
& ~BCH_BY_INDEX
) ||
596 ca
= bch2_device_lookup(c
, arg
.dev
, arg
.flags
);
600 src
= bch2_dev_usage_read(ca
);
602 arg
.state
= ca
->mi
.state
;
603 arg
.bucket_size
= ca
->mi
.bucket_size
;
604 arg
.nr_buckets
= ca
->mi
.nbuckets
- ca
->mi
.first_bucket
;
606 for (i
= 0; i
< ARRAY_SIZE(arg
.d
); i
++) {
607 arg
.d
[i
].buckets
= src
.d
[i
].buckets
;
608 arg
.d
[i
].sectors
= src
.d
[i
].sectors
;
609 arg
.d
[i
].fragmented
= src
.d
[i
].fragmented
;
614 return copy_to_user_errcode(user_arg
, &arg
, sizeof(arg
));
617 static long bch2_ioctl_dev_usage_v2(struct bch_fs
*c
,
618 struct bch_ioctl_dev_usage_v2 __user
*user_arg
)
620 struct bch_ioctl_dev_usage_v2 arg
;
621 struct bch_dev_usage src
;
625 if (!test_bit(BCH_FS_started
, &c
->flags
))
628 if (copy_from_user(&arg
, user_arg
, sizeof(arg
)))
631 if ((arg
.flags
& ~BCH_BY_INDEX
) ||
637 ca
= bch2_device_lookup(c
, arg
.dev
, arg
.flags
);
641 src
= bch2_dev_usage_read(ca
);
643 arg
.state
= ca
->mi
.state
;
644 arg
.bucket_size
= ca
->mi
.bucket_size
;
645 arg
.nr_data_types
= min(arg
.nr_data_types
, BCH_DATA_NR
);
646 arg
.nr_buckets
= ca
->mi
.nbuckets
- ca
->mi
.first_bucket
;
648 ret
= copy_to_user_errcode(user_arg
, &arg
, sizeof(arg
));
652 for (unsigned i
= 0; i
< arg
.nr_data_types
; i
++) {
653 struct bch_ioctl_dev_usage_type t
= {
654 .buckets
= src
.d
[i
].buckets
,
655 .sectors
= src
.d
[i
].sectors
,
656 .fragmented
= src
.d
[i
].fragmented
,
659 ret
= copy_to_user_errcode(&user_arg
->d
[i
], &t
, sizeof(t
));
668 static long bch2_ioctl_read_super(struct bch_fs
*c
,
669 struct bch_ioctl_read_super arg
)
671 struct bch_dev
*ca
= NULL
;
675 if (!capable(CAP_SYS_ADMIN
))
678 if ((arg
.flags
& ~(BCH_BY_INDEX
|BCH_READ_DEV
)) ||
682 mutex_lock(&c
->sb_lock
);
684 if (arg
.flags
& BCH_READ_DEV
) {
685 ca
= bch2_device_lookup(c
, arg
.dev
, arg
.flags
);
686 ret
= PTR_ERR_OR_ZERO(ca
);
695 if (vstruct_bytes(sb
) > arg
.size
) {
700 ret
= copy_to_user_errcode((void __user
*)(unsigned long)arg
.sb
, sb
,
705 mutex_unlock(&c
->sb_lock
);
709 static long bch2_ioctl_disk_get_idx(struct bch_fs
*c
,
710 struct bch_ioctl_disk_get_idx arg
)
712 dev_t dev
= huge_decode_dev(arg
.dev
);
714 if (!capable(CAP_SYS_ADMIN
))
720 for_each_online_member(c
, ca
)
721 if (ca
->dev
== dev
) {
722 percpu_ref_put(&ca
->io_ref
);
726 return -BCH_ERR_ENOENT_dev_idx_not_found
;
729 static long bch2_ioctl_disk_resize(struct bch_fs
*c
,
730 struct bch_ioctl_disk_resize arg
)
735 if (!capable(CAP_SYS_ADMIN
))
738 if ((arg
.flags
& ~BCH_BY_INDEX
) ||
742 ca
= bch2_device_lookup(c
, arg
.dev
, arg
.flags
);
746 ret
= bch2_dev_resize(c
, ca
, arg
.nbuckets
);
752 static long bch2_ioctl_disk_resize_journal(struct bch_fs
*c
,
753 struct bch_ioctl_disk_resize_journal arg
)
758 if (!capable(CAP_SYS_ADMIN
))
761 if ((arg
.flags
& ~BCH_BY_INDEX
) ||
765 if (arg
.nbuckets
> U32_MAX
)
768 ca
= bch2_device_lookup(c
, arg
.dev
, arg
.flags
);
772 ret
= bch2_set_nr_journal_buckets(c
, ca
, arg
.nbuckets
);
778 static int bch2_fsck_online_thread_fn(struct thread_with_stdio
*stdio
)
780 struct fsck_thread
*thr
= container_of(stdio
, struct fsck_thread
, thr
);
781 struct bch_fs
*c
= thr
->c
;
783 c
->stdio_filter
= current
;
784 c
->stdio
= &thr
->thr
.stdio
;
787 * XXX: can we figure out a way to do this without mucking with c->opts?
789 unsigned old_fix_errors
= c
->opts
.fix_errors
;
790 if (opt_defined(thr
->opts
, fix_errors
))
791 c
->opts
.fix_errors
= thr
->opts
.fix_errors
;
793 c
->opts
.fix_errors
= FSCK_FIX_ask
;
796 set_bit(BCH_FS_fsck_running
, &c
->flags
);
798 c
->curr_recovery_pass
= BCH_RECOVERY_PASS_check_alloc_info
;
799 int ret
= bch2_run_online_recovery_passes(c
);
801 clear_bit(BCH_FS_fsck_running
, &c
->flags
);
805 c
->stdio_filter
= NULL
;
806 c
->opts
.fix_errors
= old_fix_errors
;
808 up(&c
->online_fsck_mutex
);
813 static const struct thread_with_stdio_ops bch2_online_fsck_ops
= {
814 .exit
= bch2_fsck_thread_exit
,
815 .fn
= bch2_fsck_online_thread_fn
,
818 static long bch2_ioctl_fsck_online(struct bch_fs
*c
,
819 struct bch_ioctl_fsck_online arg
)
821 struct fsck_thread
*thr
= NULL
;
827 if (!capable(CAP_SYS_ADMIN
))
830 if (!bch2_ro_ref_tryget(c
))
833 if (down_trylock(&c
->online_fsck_mutex
)) {
838 thr
= kzalloc(sizeof(*thr
), GFP_KERNEL
);
845 thr
->opts
= bch2_opts_empty();
848 char *optstr
= strndup_user((char __user
*)(unsigned long) arg
.opts
, 1 << 16);
850 ret
= PTR_ERR_OR_ZERO(optstr
) ?:
851 bch2_parse_mount_opts(c
, &thr
->opts
, NULL
, optstr
);
859 ret
= bch2_run_thread_with_stdio(&thr
->thr
, &bch2_online_fsck_ops
);
864 bch2_fsck_thread_exit(&thr
->thr
);
865 up(&c
->online_fsck_mutex
);
871 #define BCH_IOCTL(_name, _argtype) \
875 if (copy_from_user(&i, arg, sizeof(i))) \
877 ret = bch2_ioctl_##_name(c, i); \
881 long bch2_fs_ioctl(struct bch_fs
*c
, unsigned cmd
, void __user
*arg
)
886 case BCH_IOCTL_QUERY_UUID
:
887 return bch2_ioctl_query_uuid(c
, arg
);
888 case BCH_IOCTL_FS_USAGE
:
889 return bch2_ioctl_fs_usage(c
, arg
);
890 case BCH_IOCTL_DEV_USAGE
:
891 return bch2_ioctl_dev_usage(c
, arg
);
892 case BCH_IOCTL_DEV_USAGE_V2
:
893 return bch2_ioctl_dev_usage_v2(c
, arg
);
895 case BCH_IOCTL_START
:
896 BCH_IOCTL(start
, struct bch_ioctl_start
);
898 return bch2_ioctl_stop(c
);
900 case BCH_IOCTL_READ_SUPER
:
901 BCH_IOCTL(read_super
, struct bch_ioctl_read_super
);
902 case BCH_IOCTL_DISK_GET_IDX
:
903 BCH_IOCTL(disk_get_idx
, struct bch_ioctl_disk_get_idx
);
906 if (!test_bit(BCH_FS_started
, &c
->flags
))
910 case BCH_IOCTL_DISK_ADD
:
911 BCH_IOCTL(disk_add
, struct bch_ioctl_disk
);
912 case BCH_IOCTL_DISK_REMOVE
:
913 BCH_IOCTL(disk_remove
, struct bch_ioctl_disk
);
914 case BCH_IOCTL_DISK_ONLINE
:
915 BCH_IOCTL(disk_online
, struct bch_ioctl_disk
);
916 case BCH_IOCTL_DISK_OFFLINE
:
917 BCH_IOCTL(disk_offline
, struct bch_ioctl_disk
);
918 case BCH_IOCTL_DISK_SET_STATE
:
919 BCH_IOCTL(disk_set_state
, struct bch_ioctl_disk_set_state
);
921 BCH_IOCTL(data
, struct bch_ioctl_data
);
922 case BCH_IOCTL_DISK_RESIZE
:
923 BCH_IOCTL(disk_resize
, struct bch_ioctl_disk_resize
);
924 case BCH_IOCTL_DISK_RESIZE_JOURNAL
:
925 BCH_IOCTL(disk_resize_journal
, struct bch_ioctl_disk_resize_journal
);
926 case BCH_IOCTL_FSCK_ONLINE
:
927 BCH_IOCTL(fsck_online
, struct bch_ioctl_fsck_online
);
928 case BCH_IOCTL_QUERY_ACCOUNTING
:
929 return bch2_ioctl_query_accounting(c
, arg
);
935 ret
= bch2_err_class(ret
);
939 static DEFINE_IDR(bch_chardev_minor
);
941 static long bch2_chardev_ioctl(struct file
*filp
, unsigned cmd
, unsigned long v
)
943 unsigned minor
= iminor(file_inode(filp
));
944 struct bch_fs
*c
= minor
< U8_MAX
? idr_find(&bch_chardev_minor
, minor
) : NULL
;
945 void __user
*arg
= (void __user
*) v
;
948 ? bch2_fs_ioctl(c
, cmd
, arg
)
949 : bch2_global_ioctl(cmd
, arg
);
952 static const struct file_operations bch_chardev_fops
= {
953 .owner
= THIS_MODULE
,
954 .unlocked_ioctl
= bch2_chardev_ioctl
,
955 .open
= nonseekable_open
,
958 static int bch_chardev_major
;
959 static const struct class bch_chardev_class
= {
962 static struct device
*bch_chardev
;
964 void bch2_fs_chardev_exit(struct bch_fs
*c
)
966 if (!IS_ERR_OR_NULL(c
->chardev
))
967 device_unregister(c
->chardev
);
969 idr_remove(&bch_chardev_minor
, c
->minor
);
972 int bch2_fs_chardev_init(struct bch_fs
*c
)
974 c
->minor
= idr_alloc(&bch_chardev_minor
, c
, 0, 0, GFP_KERNEL
);
978 c
->chardev
= device_create(&bch_chardev_class
, NULL
,
979 MKDEV(bch_chardev_major
, c
->minor
), c
,
980 "bcachefs%u-ctl", c
->minor
);
981 if (IS_ERR(c
->chardev
))
982 return PTR_ERR(c
->chardev
);
987 void bch2_chardev_exit(void)
989 device_destroy(&bch_chardev_class
, MKDEV(bch_chardev_major
, U8_MAX
));
990 class_unregister(&bch_chardev_class
);
991 if (bch_chardev_major
> 0)
992 unregister_chrdev(bch_chardev_major
, "bcachefs");
995 int __init
bch2_chardev_init(void)
999 bch_chardev_major
= register_chrdev(0, "bcachefs-ctl", &bch_chardev_fops
);
1000 if (bch_chardev_major
< 0)
1001 return bch_chardev_major
;
1003 ret
= class_register(&bch_chardev_class
);
1007 bch_chardev
= device_create(&bch_chardev_class
, NULL
,
1008 MKDEV(bch_chardev_major
, U8_MAX
),
1009 NULL
, "bcachefs-ctl");
1010 if (IS_ERR(bch_chardev
)) {
1011 ret
= PTR_ERR(bch_chardev
);
1018 class_unregister(&bch_chardev_class
);
1020 unregister_chrdev(bch_chardev_major
, "bcachefs-ctl");
1024 #endif /* NO_BCACHEFS_CHARDEV */