blk-ia-ranges.c (9199B)
1// SPDX-License-Identifier: GPL-2.0 2/* 3 * Block device concurrent positioning ranges. 4 * 5 * Copyright (C) 2021 Western Digital Corporation or its Affiliates. 6 */ 7#include <linux/kernel.h> 8#include <linux/blkdev.h> 9#include <linux/slab.h> 10#include <linux/init.h> 11 12#include "blk.h" 13 14static ssize_t 15blk_ia_range_sector_show(struct blk_independent_access_range *iar, 16 char *buf) 17{ 18 return sprintf(buf, "%llu\n", iar->sector); 19} 20 21static ssize_t 22blk_ia_range_nr_sectors_show(struct blk_independent_access_range *iar, 23 char *buf) 24{ 25 return sprintf(buf, "%llu\n", iar->nr_sectors); 26} 27 28struct blk_ia_range_sysfs_entry { 29 struct attribute attr; 30 ssize_t (*show)(struct blk_independent_access_range *iar, char *buf); 31}; 32 33static struct blk_ia_range_sysfs_entry blk_ia_range_sector_entry = { 34 .attr = { .name = "sector", .mode = 0444 }, 35 .show = blk_ia_range_sector_show, 36}; 37 38static struct blk_ia_range_sysfs_entry blk_ia_range_nr_sectors_entry = { 39 .attr = { .name = "nr_sectors", .mode = 0444 }, 40 .show = blk_ia_range_nr_sectors_show, 41}; 42 43static struct attribute *blk_ia_range_attrs[] = { 44 &blk_ia_range_sector_entry.attr, 45 &blk_ia_range_nr_sectors_entry.attr, 46 NULL, 47}; 48ATTRIBUTE_GROUPS(blk_ia_range); 49 50static ssize_t blk_ia_range_sysfs_show(struct kobject *kobj, 51 struct attribute *attr, char *buf) 52{ 53 struct blk_ia_range_sysfs_entry *entry = 54 container_of(attr, struct blk_ia_range_sysfs_entry, attr); 55 struct blk_independent_access_range *iar = 56 container_of(kobj, struct blk_independent_access_range, kobj); 57 58 return entry->show(iar, buf); 59} 60 61static const struct sysfs_ops blk_ia_range_sysfs_ops = { 62 .show = blk_ia_range_sysfs_show, 63}; 64 65/* 66 * Independent access range entries are not freed individually, but alltogether 67 * with struct blk_independent_access_ranges and its array of ranges. Since 68 * kobject_add() takes a reference on the parent kobject contained in 69 * struct blk_independent_access_ranges, the array of independent access range 70 * entries cannot be freed until kobject_del() is called for all entries. 71 * So we do not need to do anything here, but still need this no-op release 72 * operation to avoid complaints from the kobject code. 73 */ 74static void blk_ia_range_sysfs_nop_release(struct kobject *kobj) 75{ 76} 77 78static struct kobj_type blk_ia_range_ktype = { 79 .sysfs_ops = &blk_ia_range_sysfs_ops, 80 .default_groups = blk_ia_range_groups, 81 .release = blk_ia_range_sysfs_nop_release, 82}; 83 84/* 85 * This will be executed only after all independent access range entries are 86 * removed with kobject_del(), at which point, it is safe to free everything, 87 * including the array of ranges. 88 */ 89static void blk_ia_ranges_sysfs_release(struct kobject *kobj) 90{ 91 struct blk_independent_access_ranges *iars = 92 container_of(kobj, struct blk_independent_access_ranges, kobj); 93 94 kfree(iars); 95} 96 97static struct kobj_type blk_ia_ranges_ktype = { 98 .release = blk_ia_ranges_sysfs_release, 99}; 100 101/** 102 * disk_register_independent_access_ranges - register with sysfs a set of 103 * independent access ranges 104 * @disk: Target disk 105 * @new_iars: New set of independent access ranges 106 * 107 * Register with sysfs a set of independent access ranges for @disk. 108 * If @new_iars is not NULL, this set of ranges is registered and the old set 109 * specified by q->ia_ranges is unregistered. Otherwise, q->ia_ranges is 110 * registered if it is not already. 111 */ 112int disk_register_independent_access_ranges(struct gendisk *disk, 113 struct blk_independent_access_ranges *new_iars) 114{ 115 struct request_queue *q = disk->queue; 116 struct blk_independent_access_ranges *iars; 117 int i, ret; 118 119 lockdep_assert_held(&q->sysfs_dir_lock); 120 lockdep_assert_held(&q->sysfs_lock); 121 122 /* If a new range set is specified, unregister the old one */ 123 if (new_iars) { 124 if (q->ia_ranges) 125 disk_unregister_independent_access_ranges(disk); 126 q->ia_ranges = new_iars; 127 } 128 129 iars = q->ia_ranges; 130 if (!iars) 131 return 0; 132 133 /* 134 * At this point, iars is the new set of sector access ranges that needs 135 * to be registered with sysfs. 136 */ 137 WARN_ON(iars->sysfs_registered); 138 ret = kobject_init_and_add(&iars->kobj, &blk_ia_ranges_ktype, 139 &q->kobj, "%s", "independent_access_ranges"); 140 if (ret) { 141 q->ia_ranges = NULL; 142 kobject_put(&iars->kobj); 143 return ret; 144 } 145 146 for (i = 0; i < iars->nr_ia_ranges; i++) { 147 ret = kobject_init_and_add(&iars->ia_range[i].kobj, 148 &blk_ia_range_ktype, &iars->kobj, 149 "%d", i); 150 if (ret) { 151 while (--i >= 0) 152 kobject_del(&iars->ia_range[i].kobj); 153 kobject_del(&iars->kobj); 154 kobject_put(&iars->kobj); 155 return ret; 156 } 157 } 158 159 iars->sysfs_registered = true; 160 161 return 0; 162} 163 164void disk_unregister_independent_access_ranges(struct gendisk *disk) 165{ 166 struct request_queue *q = disk->queue; 167 struct blk_independent_access_ranges *iars = q->ia_ranges; 168 int i; 169 170 lockdep_assert_held(&q->sysfs_dir_lock); 171 lockdep_assert_held(&q->sysfs_lock); 172 173 if (!iars) 174 return; 175 176 if (iars->sysfs_registered) { 177 for (i = 0; i < iars->nr_ia_ranges; i++) 178 kobject_del(&iars->ia_range[i].kobj); 179 kobject_del(&iars->kobj); 180 kobject_put(&iars->kobj); 181 } else { 182 kfree(iars); 183 } 184 185 q->ia_ranges = NULL; 186} 187 188static struct blk_independent_access_range * 189disk_find_ia_range(struct blk_independent_access_ranges *iars, 190 sector_t sector) 191{ 192 struct blk_independent_access_range *iar; 193 int i; 194 195 for (i = 0; i < iars->nr_ia_ranges; i++) { 196 iar = &iars->ia_range[i]; 197 if (sector >= iar->sector && 198 sector < iar->sector + iar->nr_sectors) 199 return iar; 200 } 201 202 return NULL; 203} 204 205static bool disk_check_ia_ranges(struct gendisk *disk, 206 struct blk_independent_access_ranges *iars) 207{ 208 struct blk_independent_access_range *iar, *tmp; 209 sector_t capacity = get_capacity(disk); 210 sector_t sector = 0; 211 int i; 212 213 /* 214 * While sorting the ranges in increasing LBA order, check that the 215 * ranges do not overlap, that there are no sector holes and that all 216 * sectors belong to one range. 217 */ 218 for (i = 0; i < iars->nr_ia_ranges; i++) { 219 tmp = disk_find_ia_range(iars, sector); 220 if (!tmp || tmp->sector != sector) { 221 pr_warn("Invalid non-contiguous independent access ranges\n"); 222 return false; 223 } 224 225 iar = &iars->ia_range[i]; 226 if (tmp != iar) { 227 swap(iar->sector, tmp->sector); 228 swap(iar->nr_sectors, tmp->nr_sectors); 229 } 230 231 sector += iar->nr_sectors; 232 } 233 234 if (sector != capacity) { 235 pr_warn("Independent access ranges do not match disk capacity\n"); 236 return false; 237 } 238 239 return true; 240} 241 242static bool disk_ia_ranges_changed(struct gendisk *disk, 243 struct blk_independent_access_ranges *new) 244{ 245 struct blk_independent_access_ranges *old = disk->queue->ia_ranges; 246 int i; 247 248 if (!old) 249 return true; 250 251 if (old->nr_ia_ranges != new->nr_ia_ranges) 252 return true; 253 254 for (i = 0; i < old->nr_ia_ranges; i++) { 255 if (new->ia_range[i].sector != old->ia_range[i].sector || 256 new->ia_range[i].nr_sectors != old->ia_range[i].nr_sectors) 257 return true; 258 } 259 260 return false; 261} 262 263/** 264 * disk_alloc_independent_access_ranges - Allocate an independent access ranges 265 * data structure 266 * @disk: target disk 267 * @nr_ia_ranges: Number of independent access ranges 268 * 269 * Allocate a struct blk_independent_access_ranges structure with @nr_ia_ranges 270 * access range descriptors. 271 */ 272struct blk_independent_access_ranges * 273disk_alloc_independent_access_ranges(struct gendisk *disk, int nr_ia_ranges) 274{ 275 struct blk_independent_access_ranges *iars; 276 277 iars = kzalloc_node(struct_size(iars, ia_range, nr_ia_ranges), 278 GFP_KERNEL, disk->queue->node); 279 if (iars) 280 iars->nr_ia_ranges = nr_ia_ranges; 281 return iars; 282} 283EXPORT_SYMBOL_GPL(disk_alloc_independent_access_ranges); 284 285/** 286 * disk_set_independent_access_ranges - Set a disk independent access ranges 287 * @disk: target disk 288 * @iars: independent access ranges structure 289 * 290 * Set the independent access ranges information of the request queue 291 * of @disk to @iars. If @iars is NULL and the independent access ranges 292 * structure already set is cleared. If there are no differences between 293 * @iars and the independent access ranges structure already set, @iars 294 * is freed. 295 */ 296void disk_set_independent_access_ranges(struct gendisk *disk, 297 struct blk_independent_access_ranges *iars) 298{ 299 struct request_queue *q = disk->queue; 300 301 if (WARN_ON_ONCE(iars && !iars->nr_ia_ranges)) { 302 kfree(iars); 303 iars = NULL; 304 } 305 306 mutex_lock(&q->sysfs_dir_lock); 307 mutex_lock(&q->sysfs_lock); 308 309 if (iars) { 310 if (!disk_check_ia_ranges(disk, iars)) { 311 kfree(iars); 312 iars = NULL; 313 goto reg; 314 } 315 316 if (!disk_ia_ranges_changed(disk, iars)) { 317 kfree(iars); 318 goto unlock; 319 } 320 } 321 322 /* 323 * This may be called for a registered queue. E.g. during a device 324 * revalidation. If that is the case, we need to unregister the old 325 * set of independent access ranges and register the new set. If the 326 * queue is not registered, registration of the device request queue 327 * will register the independent access ranges, so only swap in the 328 * new set and free the old one. 329 */ 330reg: 331 if (blk_queue_registered(q)) { 332 disk_register_independent_access_ranges(disk, iars); 333 } else { 334 swap(q->ia_ranges, iars); 335 kfree(iars); 336 } 337 338unlock: 339 mutex_unlock(&q->sysfs_lock); 340 mutex_unlock(&q->sysfs_dir_lock); 341} 342EXPORT_SYMBOL_GPL(disk_set_independent_access_ranges);