1// SPDX-License-Identifier: GPL-2.0 2#include <linux/kernel.h> 3#include <linux/module.h> 4#include <linux/backing-dev.h> 5#include <linux/bio.h> 6#include <linux/blkdev.h> 7#include <linux/mm.h> 8#include <linux/init.h> 9#include <linux/slab.h> 10#include <linux/workqueue.h> 11#include <linux/smp.h> 12 13#include <linux/blk-mq.h> 14#include "blk.h" 15#include "blk-mq.h" 16#include "blk-mq-tag.h" 17 18static void blk_mq_sysfs_release(struct kobject *kobj) 19{ 20 struct blk_mq_ctxs *ctxs = container_of(kobj, struct blk_mq_ctxs, kobj); 21 22 free_percpu(ctxs->queue_ctx); 23 kfree(ctxs); 24} 25 26static void blk_mq_ctx_sysfs_release(struct kobject *kobj) 27{ 28 struct blk_mq_ctx *ctx = container_of(kobj, struct blk_mq_ctx, kobj); 29 30 /* ctx->ctxs won't be released until all ctx are freed */ 31 kobject_put(&ctx->ctxs->kobj); 32} 33 34static void blk_mq_hw_sysfs_release(struct kobject *kobj) 35{ 36 struct blk_mq_hw_ctx *hctx = container_of(kobj, struct blk_mq_hw_ctx, 37 kobj); 38 39 if (hctx->flags & BLK_MQ_F_BLOCKING) 40 cleanup_srcu_struct(hctx->srcu); 41 blk_free_flush_queue(hctx->fq); 42 sbitmap_free(&hctx->ctx_map); 43 free_cpumask_var(hctx->cpumask); 44 kfree(hctx->ctxs); 45 kfree(hctx); 46} 47 48struct blk_mq_ctx_sysfs_entry { 49 struct attribute attr; 50 ssize_t (*show)(struct blk_mq_ctx *, char *); 51 ssize_t (*store)(struct blk_mq_ctx *, const char *, size_t); 52}; 53 54struct blk_mq_hw_ctx_sysfs_entry { 55 struct attribute attr; 56 ssize_t (*show)(struct blk_mq_hw_ctx *, char *); 57 ssize_t (*store)(struct blk_mq_hw_ctx *, const char *, size_t); 58}; 59 60static ssize_t blk_mq_sysfs_show(struct kobject *kobj, struct attribute *attr, 61 char *page) 62{ 63 struct blk_mq_ctx_sysfs_entry *entry; 64 struct blk_mq_ctx *ctx; 65 struct request_queue *q; 66 ssize_t res; 67 68 entry = container_of(attr, struct blk_mq_ctx_sysfs_entry, attr); 69 ctx = container_of(kobj, struct blk_mq_ctx, kobj); 70 q = ctx->queue; 71 72 if (!entry->show) 73 return -EIO; 74 75 mutex_lock(&q->sysfs_lock); 76 res = entry->show(ctx, page); 77 mutex_unlock(&q->sysfs_lock); 78 return res; 79} 80 81static ssize_t blk_mq_sysfs_store(struct kobject *kobj, struct attribute *attr, 82 const char *page, size_t length) 83{ 84 struct blk_mq_ctx_sysfs_entry *entry; 85 struct blk_mq_ctx *ctx; 86 struct request_queue *q; 87 ssize_t res; 88 89 entry = container_of(attr, struct blk_mq_ctx_sysfs_entry, attr); 90 ctx = container_of(kobj, struct blk_mq_ctx, kobj); 91 q = ctx->queue; 92 93 if (!entry->store) 94 return -EIO; 95 96 mutex_lock(&q->sysfs_lock); 97 res = entry->store(ctx, page, length); 98 mutex_unlock(&q->sysfs_lock); 99 return res; 100} 101 102static ssize_t blk_mq_hw_sysfs_show(struct kobject *kobj, 103 struct attribute *attr, char *page) 104{ 105 struct blk_mq_hw_ctx_sysfs_entry *entry; 106 struct blk_mq_hw_ctx *hctx; 107 struct request_queue *q; 108 ssize_t res; 109 110 entry = container_of(attr, struct blk_mq_hw_ctx_sysfs_entry, attr); 111 hctx = container_of(kobj, struct blk_mq_hw_ctx, kobj); 112 q = hctx->queue; 113 114 if (!entry->show) 115 return -EIO; 116 117 mutex_lock(&q->sysfs_lock); 118 res = entry->show(hctx, page); 119 mutex_unlock(&q->sysfs_lock); 120 return res; 121} 122 123static ssize_t blk_mq_hw_sysfs_store(struct kobject *kobj, 124 struct attribute *attr, const char *page, 125 size_t length) 126{ 127 struct blk_mq_hw_ctx_sysfs_entry *entry; 128 struct blk_mq_hw_ctx *hctx; 129 struct request_queue *q; 130 ssize_t res; 131 132 entry = container_of(attr, struct blk_mq_hw_ctx_sysfs_entry, attr); 133 hctx = container_of(kobj, struct blk_mq_hw_ctx, kobj); 134 q = hctx->queue; 135 136 if (!entry->store) 137 return -EIO; 138 139 mutex_lock(&q->sysfs_lock); 140 res = entry->store(hctx, page, length); 141 mutex_unlock(&q->sysfs_lock); 142 return res; 143} 144 145static ssize_t blk_mq_hw_sysfs_nr_tags_show(struct blk_mq_hw_ctx *hctx, 146 char *page) 147{ 148 return sprintf(page, "%u\n", hctx->tags->nr_tags); 149} 150 151static ssize_t blk_mq_hw_sysfs_nr_reserved_tags_show(struct blk_mq_hw_ctx *hctx, 152 char *page) 153{ 154 return sprintf(page, "%u\n", hctx->tags->nr_reserved_tags); 155} 156 157static ssize_t blk_mq_hw_sysfs_cpus_show(struct blk_mq_hw_ctx *hctx, char *page) 158{ 159 const size_t size = PAGE_SIZE - 1; 160 unsigned int i, first = 1; 161 int ret = 0, pos = 0; 162 163 for_each_cpu(i, hctx->cpumask) { 164 if (first) 165 ret = snprintf(pos + page, size - pos, "%u", i); 166 else 167 ret = snprintf(pos + page, size - pos, ", %u", i); 168 169 if (ret >= size - pos) 170 break; 171 172 first = 0; 173 pos += ret; 174 } 175 176 ret = snprintf(pos + page, size + 1 - pos, "\n"); 177 return pos + ret; 178} 179 180static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_nr_tags = { 181 .attr = {.name = "nr_tags", .mode = 0444 }, 182 .show = blk_mq_hw_sysfs_nr_tags_show, 183}; 184static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_nr_reserved_tags = { 185 .attr = {.name = "nr_reserved_tags", .mode = 0444 }, 186 .show = blk_mq_hw_sysfs_nr_reserved_tags_show, 187}; 188static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_cpus = { 189 .attr = {.name = "cpu_list", .mode = 0444 }, 190 .show = blk_mq_hw_sysfs_cpus_show, 191}; 192 193static struct attribute *default_hw_ctx_attrs[] = { 194 &blk_mq_hw_sysfs_nr_tags.attr, 195 &blk_mq_hw_sysfs_nr_reserved_tags.attr, 196 &blk_mq_hw_sysfs_cpus.attr, 197 NULL, 198}; 199ATTRIBUTE_GROUPS(default_hw_ctx); 200 201static const struct sysfs_ops blk_mq_sysfs_ops = { 202 .show = blk_mq_sysfs_show, 203 .store = blk_mq_sysfs_store, 204}; 205 206static const struct sysfs_ops blk_mq_hw_sysfs_ops = { 207 .show = blk_mq_hw_sysfs_show, 208 .store = blk_mq_hw_sysfs_store, 209}; 210 211static struct kobj_type blk_mq_ktype = { 212 .sysfs_ops = &blk_mq_sysfs_ops, 213 .release = blk_mq_sysfs_release, 214}; 215 216static struct kobj_type blk_mq_ctx_ktype = { 217 .sysfs_ops = &blk_mq_sysfs_ops, 218 .release = blk_mq_ctx_sysfs_release, 219}; 220 221static struct kobj_type blk_mq_hw_ktype = { 222 .sysfs_ops = &blk_mq_hw_sysfs_ops, 223 .default_groups = default_hw_ctx_groups, 224 .release = blk_mq_hw_sysfs_release, 225}; 226 227static void blk_mq_unregister_hctx(struct blk_mq_hw_ctx *hctx) 228{ 229 struct blk_mq_ctx *ctx; 230 int i; 231 232 if (!hctx->nr_ctx) 233 return; 234 235 hctx_for_each_ctx(hctx, ctx, i) 236 kobject_del(&ctx->kobj); 237 238 kobject_del(&hctx->kobj); 239} 240 241static int blk_mq_register_hctx(struct blk_mq_hw_ctx *hctx) 242{ 243 struct request_queue *q = hctx->queue; 244 struct blk_mq_ctx *ctx; 245 int i, j, ret; 246 247 if (!hctx->nr_ctx) 248 return 0; 249 250 ret = kobject_add(&hctx->kobj, q->mq_kobj, "%u", hctx->queue_num); 251 if (ret) 252 return ret; 253 254 hctx_for_each_ctx(hctx, ctx, i) { 255 ret = kobject_add(&ctx->kobj, &hctx->kobj, "cpu%u", ctx->cpu); 256 if (ret) 257 goto out; 258 } 259 260 return 0; 261out: 262 hctx_for_each_ctx(hctx, ctx, j) { 263 if (j < i) 264 kobject_del(&ctx->kobj); 265 } 266 kobject_del(&hctx->kobj); 267 return ret; 268} 269 270void blk_mq_unregister_dev(struct device *dev, struct request_queue *q) 271{ 272 struct blk_mq_hw_ctx *hctx; 273 int i; 274 275 lockdep_assert_held(&q->sysfs_dir_lock); 276 277 queue_for_each_hw_ctx(q, hctx, i) 278 blk_mq_unregister_hctx(hctx); 279 280 kobject_uevent(q->mq_kobj, KOBJ_REMOVE); 281 kobject_del(q->mq_kobj); 282 kobject_put(&dev->kobj); 283 284 q->mq_sysfs_init_done = false; 285} 286 287void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx) 288{ 289 kobject_init(&hctx->kobj, &blk_mq_hw_ktype); 290} 291 292void blk_mq_sysfs_deinit(struct request_queue *q) 293{ 294 struct blk_mq_ctx *ctx; 295 int cpu; 296 297 for_each_possible_cpu(cpu) { 298 ctx = per_cpu_ptr(q->queue_ctx, cpu); 299 kobject_put(&ctx->kobj); 300 } 301 kobject_put(q->mq_kobj); 302} 303 304void blk_mq_sysfs_init(struct request_queue *q) 305{ 306 struct blk_mq_ctx *ctx; 307 int cpu; 308 309 kobject_init(q->mq_kobj, &blk_mq_ktype); 310 311 for_each_possible_cpu(cpu) { 312 ctx = per_cpu_ptr(q->queue_ctx, cpu); 313 314 kobject_get(q->mq_kobj); 315 kobject_init(&ctx->kobj, &blk_mq_ctx_ktype); 316 } 317} 318 319int __blk_mq_register_dev(struct device *dev, struct request_queue *q) 320{ 321 struct blk_mq_hw_ctx *hctx; 322 int ret, i; 323 324 WARN_ON_ONCE(!q->kobj.parent); 325 lockdep_assert_held(&q->sysfs_dir_lock); 326 327 ret = kobject_add(q->mq_kobj, kobject_get(&dev->kobj), "%s", "mq"); 328 if (ret < 0) 329 goto out; 330 331 kobject_uevent(q->mq_kobj, KOBJ_ADD); 332 333 queue_for_each_hw_ctx(q, hctx, i) { 334 ret = blk_mq_register_hctx(hctx); 335 if (ret) 336 goto unreg; 337 } 338 339 q->mq_sysfs_init_done = true; 340 341out: 342 return ret; 343 344unreg: 345 while (--i >= 0) 346 blk_mq_unregister_hctx(q->queue_hw_ctx[i]); 347 348 kobject_uevent(q->mq_kobj, KOBJ_REMOVE); 349 kobject_del(q->mq_kobj); 350 kobject_put(&dev->kobj); 351 return ret; 352} 353 354void blk_mq_sysfs_unregister(struct request_queue *q) 355{ 356 struct blk_mq_hw_ctx *hctx; 357 int i; 358 359 mutex_lock(&q->sysfs_dir_lock); 360 if (!q->mq_sysfs_init_done) 361 goto unlock; 362 363 queue_for_each_hw_ctx(q, hctx, i) 364 blk_mq_unregister_hctx(hctx); 365 366unlock: 367 mutex_unlock(&q->sysfs_dir_lock); 368} 369 370int blk_mq_sysfs_register(struct request_queue *q) 371{ 372 struct blk_mq_hw_ctx *hctx; 373 int i, ret = 0; 374 375 mutex_lock(&q->sysfs_dir_lock); 376 if (!q->mq_sysfs_init_done) 377 goto unlock; 378 379 queue_for_each_hw_ctx(q, hctx, i) { 380 ret = blk_mq_register_hctx(hctx); 381 if (ret) 382 break; 383 } 384 385unlock: 386 mutex_unlock(&q->sysfs_dir_lock); 387 388 return ret; 389} 390