1 // SPDX-License-Identifier: GPL-2.0
3 * Block device concurrent positioning ranges.
5 * Copyright (C) 2021 Western Digital Corporation or its Affiliates.
7 #include <linux/kernel.h>
8 #include <linux/blkdev.h>
9 #include <linux/slab.h>
10 #include <linux/init.h>
15 blk_ia_range_sector_show(struct blk_independent_access_range
*iar
,
18 return sprintf(buf
, "%llu\n", iar
->sector
);
22 blk_ia_range_nr_sectors_show(struct blk_independent_access_range
*iar
,
25 return sprintf(buf
, "%llu\n", iar
->nr_sectors
);
28 struct blk_ia_range_sysfs_entry
{
29 struct attribute attr
;
30 ssize_t (*show
)(struct blk_independent_access_range
*iar
, char *buf
);
33 static struct blk_ia_range_sysfs_entry blk_ia_range_sector_entry
= {
34 .attr
= { .name
= "sector", .mode
= 0444 },
35 .show
= blk_ia_range_sector_show
,
38 static struct blk_ia_range_sysfs_entry blk_ia_range_nr_sectors_entry
= {
39 .attr
= { .name
= "nr_sectors", .mode
= 0444 },
40 .show
= blk_ia_range_nr_sectors_show
,
43 static struct attribute
*blk_ia_range_attrs
[] = {
44 &blk_ia_range_sector_entry
.attr
,
45 &blk_ia_range_nr_sectors_entry
.attr
,
48 ATTRIBUTE_GROUPS(blk_ia_range
);
50 static ssize_t
blk_ia_range_sysfs_show(struct kobject
*kobj
,
51 struct attribute
*attr
, char *buf
)
53 struct blk_ia_range_sysfs_entry
*entry
=
54 container_of(attr
, struct blk_ia_range_sysfs_entry
, attr
);
55 struct blk_independent_access_range
*iar
=
56 container_of(kobj
, struct blk_independent_access_range
, kobj
);
58 return entry
->show(iar
, buf
);
61 static const struct sysfs_ops blk_ia_range_sysfs_ops
= {
62 .show
= blk_ia_range_sysfs_show
,
66 * Independent access range entries are not freed individually, but alltogether
67 * with struct blk_independent_access_ranges and its array of ranges. Since
68 * kobject_add() takes a reference on the parent kobject contained in
69 * struct blk_independent_access_ranges, the array of independent access range
70 * entries cannot be freed until kobject_del() is called for all entries.
71 * So we do not need to do anything here, but still need this no-op release
72 * operation to avoid complaints from the kobject code.
74 static void blk_ia_range_sysfs_nop_release(struct kobject
*kobj
)
78 static const struct kobj_type blk_ia_range_ktype
= {
79 .sysfs_ops
= &blk_ia_range_sysfs_ops
,
80 .default_groups
= blk_ia_range_groups
,
81 .release
= blk_ia_range_sysfs_nop_release
,
85 * This will be executed only after all independent access range entries are
86 * removed with kobject_del(), at which point, it is safe to free everything,
87 * including the array of ranges.
89 static void blk_ia_ranges_sysfs_release(struct kobject
*kobj
)
91 struct blk_independent_access_ranges
*iars
=
92 container_of(kobj
, struct blk_independent_access_ranges
, kobj
);
97 static const struct kobj_type blk_ia_ranges_ktype
= {
98 .release
= blk_ia_ranges_sysfs_release
,
102 * disk_register_independent_access_ranges - register with sysfs a set of
103 * independent access ranges
106 * Register with sysfs a set of independent access ranges for @disk.
108 int disk_register_independent_access_ranges(struct gendisk
*disk
)
110 struct blk_independent_access_ranges
*iars
= disk
->ia_ranges
;
111 struct request_queue
*q
= disk
->queue
;
114 lockdep_assert_held(&q
->sysfs_dir_lock
);
115 lockdep_assert_held(&q
->sysfs_lock
);
121 * At this point, iars is the new set of sector access ranges that needs
122 * to be registered with sysfs.
124 WARN_ON(iars
->sysfs_registered
);
125 ret
= kobject_init_and_add(&iars
->kobj
, &blk_ia_ranges_ktype
,
126 &disk
->queue_kobj
, "%s",
127 "independent_access_ranges");
129 disk
->ia_ranges
= NULL
;
130 kobject_put(&iars
->kobj
);
134 for (i
= 0; i
< iars
->nr_ia_ranges
; i
++) {
135 ret
= kobject_init_and_add(&iars
->ia_range
[i
].kobj
,
136 &blk_ia_range_ktype
, &iars
->kobj
,
140 kobject_del(&iars
->ia_range
[i
].kobj
);
141 kobject_del(&iars
->kobj
);
142 kobject_put(&iars
->kobj
);
147 iars
->sysfs_registered
= true;
152 void disk_unregister_independent_access_ranges(struct gendisk
*disk
)
154 struct request_queue
*q
= disk
->queue
;
155 struct blk_independent_access_ranges
*iars
= disk
->ia_ranges
;
158 lockdep_assert_held(&q
->sysfs_dir_lock
);
159 lockdep_assert_held(&q
->sysfs_lock
);
164 if (iars
->sysfs_registered
) {
165 for (i
= 0; i
< iars
->nr_ia_ranges
; i
++)
166 kobject_del(&iars
->ia_range
[i
].kobj
);
167 kobject_del(&iars
->kobj
);
168 kobject_put(&iars
->kobj
);
173 disk
->ia_ranges
= NULL
;
176 static struct blk_independent_access_range
*
177 disk_find_ia_range(struct blk_independent_access_ranges
*iars
,
180 struct blk_independent_access_range
*iar
;
183 for (i
= 0; i
< iars
->nr_ia_ranges
; i
++) {
184 iar
= &iars
->ia_range
[i
];
185 if (sector
>= iar
->sector
&&
186 sector
< iar
->sector
+ iar
->nr_sectors
)
193 static bool disk_check_ia_ranges(struct gendisk
*disk
,
194 struct blk_independent_access_ranges
*iars
)
196 struct blk_independent_access_range
*iar
, *tmp
;
197 sector_t capacity
= get_capacity(disk
);
201 if (WARN_ON_ONCE(!iars
->nr_ia_ranges
))
205 * While sorting the ranges in increasing LBA order, check that the
206 * ranges do not overlap, that there are no sector holes and that all
207 * sectors belong to one range.
209 for (i
= 0; i
< iars
->nr_ia_ranges
; i
++) {
210 tmp
= disk_find_ia_range(iars
, sector
);
211 if (!tmp
|| tmp
->sector
!= sector
) {
212 pr_warn("Invalid non-contiguous independent access ranges\n");
216 iar
= &iars
->ia_range
[i
];
218 swap(iar
->sector
, tmp
->sector
);
219 swap(iar
->nr_sectors
, tmp
->nr_sectors
);
222 sector
+= iar
->nr_sectors
;
225 if (sector
!= capacity
) {
226 pr_warn("Independent access ranges do not match disk capacity\n");
233 static bool disk_ia_ranges_changed(struct gendisk
*disk
,
234 struct blk_independent_access_ranges
*new)
236 struct blk_independent_access_ranges
*old
= disk
->ia_ranges
;
242 if (old
->nr_ia_ranges
!= new->nr_ia_ranges
)
245 for (i
= 0; i
< old
->nr_ia_ranges
; i
++) {
246 if (new->ia_range
[i
].sector
!= old
->ia_range
[i
].sector
||
247 new->ia_range
[i
].nr_sectors
!= old
->ia_range
[i
].nr_sectors
)
255 * disk_alloc_independent_access_ranges - Allocate an independent access ranges
258 * @nr_ia_ranges: Number of independent access ranges
260 * Allocate a struct blk_independent_access_ranges structure with @nr_ia_ranges
261 * access range descriptors.
263 struct blk_independent_access_ranges
*
264 disk_alloc_independent_access_ranges(struct gendisk
*disk
, int nr_ia_ranges
)
266 struct blk_independent_access_ranges
*iars
;
268 iars
= kzalloc_node(struct_size(iars
, ia_range
, nr_ia_ranges
),
269 GFP_KERNEL
, disk
->queue
->node
);
271 iars
->nr_ia_ranges
= nr_ia_ranges
;
274 EXPORT_SYMBOL_GPL(disk_alloc_independent_access_ranges
);
277 * disk_set_independent_access_ranges - Set a disk independent access ranges
279 * @iars: independent access ranges structure
281 * Set the independent access ranges information of the request queue
282 * of @disk to @iars. If @iars is NULL and the independent access ranges
283 * structure already set is cleared. If there are no differences between
284 * @iars and the independent access ranges structure already set, @iars
287 void disk_set_independent_access_ranges(struct gendisk
*disk
,
288 struct blk_independent_access_ranges
*iars
)
290 struct request_queue
*q
= disk
->queue
;
292 mutex_lock(&q
->sysfs_dir_lock
);
293 mutex_lock(&q
->sysfs_lock
);
294 if (iars
&& !disk_check_ia_ranges(disk
, iars
)) {
298 if (iars
&& !disk_ia_ranges_changed(disk
, iars
)) {
304 * This may be called for a registered queue. E.g. during a device
305 * revalidation. If that is the case, we need to unregister the old
306 * set of independent access ranges and register the new set. If the
307 * queue is not registered, registration of the device request queue
308 * will register the independent access ranges.
310 disk_unregister_independent_access_ranges(disk
);
311 disk
->ia_ranges
= iars
;
312 if (blk_queue_registered(q
))
313 disk_register_independent_access_ranges(disk
);
315 mutex_unlock(&q
->sysfs_lock
);
316 mutex_unlock(&q
->sysfs_dir_lock
);
318 EXPORT_SYMBOL_GPL(disk_set_independent_access_ranges
);