1 // SPDX-License-Identifier: GPL-2.0
4 #include "bkey_methods.h"
5 #include "nocow_locking.h"
8 #include <linux/closure.h>
10 bool bch2_bucket_nocow_is_locked(struct bucket_nocow_lock_table
*t
, struct bpos bucket
)
12 u64 dev_bucket
= bucket_to_u64(bucket
);
13 struct nocow_lock_bucket
*l
= bucket_nocow_lock(t
, dev_bucket
);
16 for (i
= 0; i
< ARRAY_SIZE(l
->b
); i
++)
17 if (l
->b
[i
] == dev_bucket
&& atomic_read(&l
->l
[i
]))
22 #define sign(v) (v < 0 ? -1 : v > 0 ? 1 : 0)
24 void bch2_bucket_nocow_unlock(struct bucket_nocow_lock_table
*t
, struct bpos bucket
, int flags
)
26 u64 dev_bucket
= bucket_to_u64(bucket
);
27 struct nocow_lock_bucket
*l
= bucket_nocow_lock(t
, dev_bucket
);
28 int lock_val
= flags
? 1 : -1;
31 for (i
= 0; i
< ARRAY_SIZE(l
->b
); i
++)
32 if (l
->b
[i
] == dev_bucket
) {
33 int v
= atomic_sub_return(lock_val
, &l
->l
[i
]);
35 BUG_ON(v
&& sign(v
) != lock_val
);
37 closure_wake_up(&l
->wait
);
44 bool __bch2_bucket_nocow_trylock(struct nocow_lock_bucket
*l
,
45 u64 dev_bucket
, int flags
)
47 int v
, lock_val
= flags
? 1 : -1;
52 for (i
= 0; i
< ARRAY_SIZE(l
->b
); i
++)
53 if (l
->b
[i
] == dev_bucket
)
56 for (i
= 0; i
< ARRAY_SIZE(l
->b
); i
++)
57 if (!atomic_read(&l
->l
[i
])) {
62 spin_unlock(&l
->lock
);
65 v
= atomic_read(&l
->l
[i
]);
66 if (lock_val
> 0 ? v
< 0 : v
> 0)
69 v
= atomic_read(&l
->l
[i
]);
71 if (v
&& sign(v
+ lock_val
) != sign(v
))
74 atomic_add(lock_val
, &l
->l
[i
]);
75 spin_unlock(&l
->lock
);
79 void __bch2_bucket_nocow_lock(struct bucket_nocow_lock_table
*t
,
80 struct nocow_lock_bucket
*l
,
81 u64 dev_bucket
, int flags
)
83 if (!__bch2_bucket_nocow_trylock(l
, dev_bucket
, flags
)) {
84 struct bch_fs
*c
= container_of(t
, struct bch_fs
, nocow_locks
);
85 u64 start_time
= local_clock();
87 __closure_wait_event(&l
->wait
, __bch2_bucket_nocow_trylock(l
, dev_bucket
, flags
));
88 bch2_time_stats_update(&c
->times
[BCH_TIME_nocow_lock_contended
], start_time
);
92 void bch2_nocow_locks_to_text(struct printbuf
*out
, struct bucket_nocow_lock_table
*t
)
95 unsigned i
, nr_zero
= 0;
96 struct nocow_lock_bucket
*l
;
98 for (l
= t
->l
; l
< t
->l
+ ARRAY_SIZE(t
->l
); l
++) {
101 for (i
= 0; i
< ARRAY_SIZE(l
->l
); i
++)
102 v
|= atomic_read(&l
->l
[i
]);
110 prt_printf(out
, "(%u empty entries)\n", nr_zero
);
113 for (i
= 0; i
< ARRAY_SIZE(l
->l
); i
++) {
114 int v
= atomic_read(&l
->l
[i
]);
116 bch2_bpos_to_text(out
, u64_to_bucket(l
->b
[i
]));
117 prt_printf(out
, ": %s %u ", v
< 0 ? "copy" : "update", abs(v
));
124 prt_printf(out
, "(%u empty entries)\n", nr_zero
);
127 void bch2_fs_nocow_locking_exit(struct bch_fs
*c
)
129 struct bucket_nocow_lock_table
*t
= &c
->nocow_locks
;
131 for (struct nocow_lock_bucket
*l
= t
->l
; l
< t
->l
+ ARRAY_SIZE(t
->l
); l
++)
132 for (unsigned j
= 0; j
< ARRAY_SIZE(l
->l
); j
++)
133 BUG_ON(atomic_read(&l
->l
[j
]));
136 int bch2_fs_nocow_locking_init(struct bch_fs
*c
)
138 struct bucket_nocow_lock_table
*t
= &c
->nocow_locks
;
140 for (struct nocow_lock_bucket
*l
= t
->l
; l
< t
->l
+ ARRAY_SIZE(t
->l
); l
++)
141 spin_lock_init(&l
->lock
);