1// SPDX-License-Identifier: GPL-2.0-only 2/* 3 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved. 4 */ 5#include <linux/libnvdimm.h> 6#include <linux/badblocks.h> 7#include <linux/suspend.h> 8#include <linux/export.h> 9#include <linux/module.h> 10#include <linux/blkdev.h> 11#include <linux/device.h> 12#include <linux/ctype.h> 13#include <linux/ndctl.h> 14#include <linux/mutex.h> 15#include <linux/slab.h> 16#include <linux/io.h> 17#include "nd-core.h" 18#include "nd.h" 19 20LIST_HEAD(nvdimm_bus_list); 21DEFINE_MUTEX(nvdimm_bus_list_mutex); 22 23void nvdimm_bus_lock(struct device *dev) 24{ 25 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev); 26 27 if (!nvdimm_bus) 28 return; 29 mutex_lock(&nvdimm_bus->reconfig_mutex); 30} 31EXPORT_SYMBOL(nvdimm_bus_lock); 32 33void nvdimm_bus_unlock(struct device *dev) 34{ 35 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev); 36 37 if (!nvdimm_bus) 38 return; 39 mutex_unlock(&nvdimm_bus->reconfig_mutex); 40} 41EXPORT_SYMBOL(nvdimm_bus_unlock); 42 43bool is_nvdimm_bus_locked(struct device *dev) 44{ 45 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev); 46 47 if (!nvdimm_bus) 48 return false; 49 return mutex_is_locked(&nvdimm_bus->reconfig_mutex); 50} 51EXPORT_SYMBOL(is_nvdimm_bus_locked); 52 53struct nvdimm_map { 54 struct nvdimm_bus *nvdimm_bus; 55 struct list_head list; 56 resource_size_t offset; 57 unsigned long flags; 58 size_t size; 59 union { 60 void *mem; 61 void __iomem *iomem; 62 }; 63 struct kref kref; 64}; 65 66static struct nvdimm_map *find_nvdimm_map(struct device *dev, 67 resource_size_t offset) 68{ 69 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev); 70 struct nvdimm_map *nvdimm_map; 71 72 list_for_each_entry(nvdimm_map, &nvdimm_bus->mapping_list, list) 73 if (nvdimm_map->offset == offset) 74 return nvdimm_map; 75 return NULL; 76} 77 78static struct nvdimm_map *alloc_nvdimm_map(struct device *dev, 79 resource_size_t offset, size_t size, unsigned long flags) 80{ 81 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev); 82 struct nvdimm_map *nvdimm_map; 83 84 nvdimm_map = kzalloc(sizeof(*nvdimm_map), GFP_KERNEL); 85 if (!nvdimm_map) 86 return NULL; 87 88 INIT_LIST_HEAD(&nvdimm_map->list); 89 nvdimm_map->nvdimm_bus = nvdimm_bus; 90 nvdimm_map->offset = offset; 91 nvdimm_map->flags = flags; 92 nvdimm_map->size = size; 93 kref_init(&nvdimm_map->kref); 94 95 if (!request_mem_region(offset, size, dev_name(&nvdimm_bus->dev))) { 96 dev_err(&nvdimm_bus->dev, "failed to request %pa + %zd for %s\n", 97 &offset, size, dev_name(dev)); 98 goto err_request_region; 99 } 100 101 if (flags) 102 nvdimm_map->mem = memremap(offset, size, flags); 103 else 104 nvdimm_map->iomem = ioremap(offset, size); 105 106 if (!nvdimm_map->mem) 107 goto err_map; 108 109 dev_WARN_ONCE(dev, !is_nvdimm_bus_locked(dev), "%s: bus unlocked!", 110 __func__); 111 list_add(&nvdimm_map->list, &nvdimm_bus->mapping_list); 112 113 return nvdimm_map; 114 115 err_map: 116 release_mem_region(offset, size); 117 err_request_region: 118 kfree(nvdimm_map); 119 return NULL; 120} 121 122static void nvdimm_map_release(struct kref *kref) 123{ 124 struct nvdimm_bus *nvdimm_bus; 125 struct nvdimm_map *nvdimm_map; 126 127 nvdimm_map = container_of(kref, struct nvdimm_map, kref); 128 nvdimm_bus = nvdimm_map->nvdimm_bus; 129 130 dev_dbg(&nvdimm_bus->dev, "%pa\n", &nvdimm_map->offset); 131 list_del(&nvdimm_map->list); 132 if (nvdimm_map->flags) 133 memunmap(nvdimm_map->mem); 134 else 135 iounmap(nvdimm_map->iomem); 136 release_mem_region(nvdimm_map->offset, nvdimm_map->size); 137 kfree(nvdimm_map); 138} 139 140static void nvdimm_map_put(void *data) 141{ 142 struct nvdimm_map *nvdimm_map = data; 143 struct nvdimm_bus *nvdimm_bus = nvdimm_map->nvdimm_bus; 144 145 nvdimm_bus_lock(&nvdimm_bus->dev); 146 kref_put(&nvdimm_map->kref, nvdimm_map_release); 147 nvdimm_bus_unlock(&nvdimm_bus->dev); 148} 149 150/** 151 * devm_nvdimm_memremap - map a resource that is shared across regions 152 * @dev: device that will own a reference to the shared mapping 153 * @offset: physical base address of the mapping 154 * @size: mapping size 155 * @flags: memremap flags, or, if zero, perform an ioremap instead 156 */ 157void *devm_nvdimm_memremap(struct device *dev, resource_size_t offset, 158 size_t size, unsigned long flags) 159{ 160 struct nvdimm_map *nvdimm_map; 161 162 nvdimm_bus_lock(dev); 163 nvdimm_map = find_nvdimm_map(dev, offset); 164 if (!nvdimm_map) 165 nvdimm_map = alloc_nvdimm_map(dev, offset, size, flags); 166 else 167 kref_get(&nvdimm_map->kref); 168 nvdimm_bus_unlock(dev); 169 170 if (!nvdimm_map) 171 return NULL; 172 173 if (devm_add_action_or_reset(dev, nvdimm_map_put, nvdimm_map)) 174 return NULL; 175 176 return nvdimm_map->mem; 177} 178EXPORT_SYMBOL_GPL(devm_nvdimm_memremap); 179 180u64 nd_fletcher64(void *addr, size_t len, bool le) 181{ 182 u32 *buf = addr; 183 u32 lo32 = 0; 184 u64 hi32 = 0; 185 int i; 186 187 for (i = 0; i < len / sizeof(u32); i++) { 188 lo32 += le ? le32_to_cpu((__le32) buf[i]) : buf[i]; 189 hi32 += lo32; 190 } 191 192 return hi32 << 32 | lo32; 193} 194EXPORT_SYMBOL_GPL(nd_fletcher64); 195 196struct nvdimm_bus_descriptor *to_nd_desc(struct nvdimm_bus *nvdimm_bus) 197{ 198 /* struct nvdimm_bus definition is private to libnvdimm */ 199 return nvdimm_bus->nd_desc; 200} 201EXPORT_SYMBOL_GPL(to_nd_desc); 202 203struct device *to_nvdimm_bus_dev(struct nvdimm_bus *nvdimm_bus) 204{ 205 /* struct nvdimm_bus definition is private to libnvdimm */ 206 return &nvdimm_bus->dev; 207} 208EXPORT_SYMBOL_GPL(to_nvdimm_bus_dev); 209 210static bool is_uuid_sep(char sep) 211{ 212 if (sep == '\n' || sep == '-' || sep == ':' || sep == '\0') 213 return true; 214 return false; 215} 216 217static int nd_uuid_parse(struct device *dev, u8 *uuid_out, const char *buf, 218 size_t len) 219{ 220 const char *str = buf; 221 u8 uuid[16]; 222 int i; 223 224 for (i = 0; i < 16; i++) { 225 if (!isxdigit(str[0]) || !isxdigit(str[1])) { 226 dev_dbg(dev, "pos: %d buf[%zd]: %c buf[%zd]: %c\n", 227 i, str - buf, str[0], 228 str + 1 - buf, str[1]); 229 return -EINVAL; 230 } 231 232 uuid[i] = (hex_to_bin(str[0]) << 4) | hex_to_bin(str[1]); 233 str += 2; 234 if (is_uuid_sep(*str)) 235 str++; 236 } 237 238 memcpy(uuid_out, uuid, sizeof(uuid)); 239 return 0; 240} 241 242/** 243 * nd_uuid_store: common implementation for writing 'uuid' sysfs attributes 244 * @dev: container device for the uuid property 245 * @uuid_out: uuid buffer to replace 246 * @buf: raw sysfs buffer to parse 247 * 248 * Enforce that uuids can only be changed while the device is disabled 249 * (driver detached) 250 * LOCKING: expects nd_device_lock() is held on entry 251 */ 252int nd_uuid_store(struct device *dev, u8 **uuid_out, const char *buf, 253 size_t len) 254{ 255 u8 uuid[16]; 256 int rc; 257 258 if (dev->driver) 259 return -EBUSY; 260 261 rc = nd_uuid_parse(dev, uuid, buf, len); 262 if (rc) 263 return rc; 264 265 kfree(*uuid_out); 266 *uuid_out = kmemdup(uuid, sizeof(uuid), GFP_KERNEL); 267 if (!(*uuid_out)) 268 return -ENOMEM; 269 270 return 0; 271} 272 273ssize_t nd_size_select_show(unsigned long current_size, 274 const unsigned long *supported, char *buf) 275{ 276 ssize_t len = 0; 277 int i; 278 279 for (i = 0; supported[i]; i++) 280 if (current_size == supported[i]) 281 len += sprintf(buf + len, "[%ld] ", supported[i]); 282 else 283 len += sprintf(buf + len, "%ld ", supported[i]); 284 len += sprintf(buf + len, "\n"); 285 return len; 286} 287 288ssize_t nd_size_select_store(struct device *dev, const char *buf, 289 unsigned long *current_size, const unsigned long *supported) 290{ 291 unsigned long lbasize; 292 int rc, i; 293 294 if (dev->driver) 295 return -EBUSY; 296 297 rc = kstrtoul(buf, 0, &lbasize); 298 if (rc) 299 return rc; 300 301 for (i = 0; supported[i]; i++) 302 if (lbasize == supported[i]) 303 break; 304 305 if (supported[i]) { 306 *current_size = lbasize; 307 return 0; 308 } else { 309 return -EINVAL; 310 } 311} 312 313static ssize_t commands_show(struct device *dev, 314 struct device_attribute *attr, char *buf) 315{ 316 int cmd, len = 0; 317 struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev); 318 struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc; 319 320 for_each_set_bit(cmd, &nd_desc->cmd_mask, BITS_PER_LONG) 321 len += sprintf(buf + len, "%s ", nvdimm_bus_cmd_name(cmd)); 322 len += sprintf(buf + len, "\n"); 323 return len; 324} 325static DEVICE_ATTR_RO(commands); 326 327static const char *nvdimm_bus_provider(struct nvdimm_bus *nvdimm_bus) 328{ 329 struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc; 330 struct device *parent = nvdimm_bus->dev.parent; 331 332 if (nd_desc->provider_name) 333 return nd_desc->provider_name; 334 else if (parent) 335 return dev_name(parent); 336 else 337 return "unknown"; 338} 339 340static ssize_t provider_show(struct device *dev, 341 struct device_attribute *attr, char *buf) 342{ 343 struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev); 344 345 return sprintf(buf, "%s\n", nvdimm_bus_provider(nvdimm_bus)); 346} 347static DEVICE_ATTR_RO(provider); 348 349static int flush_namespaces(struct device *dev, void *data) 350{ 351 nd_device_lock(dev); 352 nd_device_unlock(dev); 353 return 0; 354} 355 356static int flush_regions_dimms(struct device *dev, void *data) 357{ 358 nd_device_lock(dev); 359 nd_device_unlock(dev); 360 device_for_each_child(dev, NULL, flush_namespaces); 361 return 0; 362} 363 364static ssize_t wait_probe_show(struct device *dev, 365 struct device_attribute *attr, char *buf) 366{ 367 struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev); 368 struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc; 369 int rc; 370 371 if (nd_desc->flush_probe) { 372 rc = nd_desc->flush_probe(nd_desc); 373 if (rc) 374 return rc; 375 } 376 nd_synchronize(); 377 device_for_each_child(dev, NULL, flush_regions_dimms); 378 return sprintf(buf, "1\n"); 379} 380static DEVICE_ATTR_RO(wait_probe); 381 382static struct attribute *nvdimm_bus_attributes[] = { 383 &dev_attr_commands.attr, 384 &dev_attr_wait_probe.attr, 385 &dev_attr_provider.attr, 386 NULL, 387}; 388 389static const struct attribute_group nvdimm_bus_attribute_group = { 390 .attrs = nvdimm_bus_attributes, 391}; 392 393static ssize_t capability_show(struct device *dev, 394 struct device_attribute *attr, char *buf) 395{ 396 struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev); 397 struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc; 398 enum nvdimm_fwa_capability cap; 399 400 if (!nd_desc->fw_ops) 401 return -EOPNOTSUPP; 402 403 cap = nd_desc->fw_ops->capability(nd_desc); 404 405 switch (cap) { 406 case NVDIMM_FWA_CAP_QUIESCE: 407 return sprintf(buf, "quiesce\n"); 408 case NVDIMM_FWA_CAP_LIVE: 409 return sprintf(buf, "live\n"); 410 default: 411 return -EOPNOTSUPP; 412 } 413} 414 415static DEVICE_ATTR_RO(capability); 416 417static ssize_t activate_show(struct device *dev, 418 struct device_attribute *attr, char *buf) 419{ 420 struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev); 421 struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc; 422 enum nvdimm_fwa_capability cap; 423 enum nvdimm_fwa_state state; 424 425 if (!nd_desc->fw_ops) 426 return -EOPNOTSUPP; 427 428 cap = nd_desc->fw_ops->capability(nd_desc); 429 state = nd_desc->fw_ops->activate_state(nd_desc); 430 431 if (cap < NVDIMM_FWA_CAP_QUIESCE) 432 return -EOPNOTSUPP; 433 434 switch (state) { 435 case NVDIMM_FWA_IDLE: 436 return sprintf(buf, "idle\n"); 437 case NVDIMM_FWA_BUSY: 438 return sprintf(buf, "busy\n"); 439 case NVDIMM_FWA_ARMED: 440 return sprintf(buf, "armed\n"); 441 case NVDIMM_FWA_ARM_OVERFLOW: 442 return sprintf(buf, "overflow\n"); 443 default: 444 return -ENXIO; 445 } 446} 447 448static int exec_firmware_activate(void *data) 449{ 450 struct nvdimm_bus_descriptor *nd_desc = data; 451 452 return nd_desc->fw_ops->activate(nd_desc); 453} 454 455static ssize_t activate_store(struct device *dev, 456 struct device_attribute *attr, const char *buf, size_t len) 457{ 458 struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev); 459 struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc; 460 enum nvdimm_fwa_state state; 461 bool quiesce; 462 ssize_t rc; 463 464 if (!nd_desc->fw_ops) 465 return -EOPNOTSUPP; 466 467 if (sysfs_streq(buf, "live")) 468 quiesce = false; 469 else if (sysfs_streq(buf, "quiesce")) 470 quiesce = true; 471 else 472 return -EINVAL; 473 474 state = nd_desc->fw_ops->activate_state(nd_desc); 475 476 switch (state) { 477 case NVDIMM_FWA_BUSY: 478 rc = -EBUSY; 479 break; 480 case NVDIMM_FWA_ARMED: 481 case NVDIMM_FWA_ARM_OVERFLOW: 482 if (quiesce) 483 rc = hibernate_quiet_exec(exec_firmware_activate, nd_desc); 484 else 485 rc = nd_desc->fw_ops->activate(nd_desc); 486 break; 487 case NVDIMM_FWA_IDLE: 488 default: 489 rc = -ENXIO; 490 } 491 492 if (rc == 0) 493 rc = len; 494 return rc; 495} 496 497static DEVICE_ATTR_ADMIN_RW(activate); 498 499static umode_t nvdimm_bus_firmware_visible(struct kobject *kobj, struct attribute *a, int n) 500{ 501 struct device *dev = container_of(kobj, typeof(*dev), kobj); 502 struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev); 503 struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc; 504 enum nvdimm_fwa_capability cap; 505 506 /* 507 * Both 'activate' and 'capability' disappear when no ops 508 * detected, or a negative capability is indicated. 509 */ 510 if (!nd_desc->fw_ops) 511 return 0; 512 513 cap = nd_desc->fw_ops->capability(nd_desc); 514 if (cap < NVDIMM_FWA_CAP_QUIESCE) 515 return 0; 516 517 return a->mode; 518} 519static struct attribute *nvdimm_bus_firmware_attributes[] = { 520 &dev_attr_activate.attr, 521 &dev_attr_capability.attr, 522 NULL, 523}; 524 525static const struct attribute_group nvdimm_bus_firmware_attribute_group = { 526 .name = "firmware", 527 .attrs = nvdimm_bus_firmware_attributes, 528 .is_visible = nvdimm_bus_firmware_visible, 529}; 530 531const struct attribute_group *nvdimm_bus_attribute_groups[] = { 532 &nvdimm_bus_attribute_group, 533 &nvdimm_bus_firmware_attribute_group, 534 NULL, 535}; 536 537int nvdimm_bus_add_badrange(struct nvdimm_bus *nvdimm_bus, u64 addr, u64 length) 538{ 539 return badrange_add(&nvdimm_bus->badrange, addr, length); 540} 541EXPORT_SYMBOL_GPL(nvdimm_bus_add_badrange); 542 543#ifdef CONFIG_BLK_DEV_INTEGRITY 544int nd_integrity_init(struct gendisk *disk, unsigned long meta_size) 545{ 546 struct blk_integrity bi; 547 548 if (meta_size == 0) 549 return 0; 550 551 memset(&bi, 0, sizeof(bi)); 552 553 bi.tuple_size = meta_size; 554 bi.tag_size = meta_size; 555 556 blk_integrity_register(disk, &bi); 557 blk_queue_max_integrity_segments(disk->queue, 1); 558 559 return 0; 560} 561EXPORT_SYMBOL(nd_integrity_init); 562 563#else /* CONFIG_BLK_DEV_INTEGRITY */ 564int nd_integrity_init(struct gendisk *disk, unsigned long meta_size) 565{ 566 return 0; 567} 568EXPORT_SYMBOL(nd_integrity_init); 569 570#endif 571 572static __init int libnvdimm_init(void) 573{ 574 int rc; 575 576 rc = nvdimm_bus_init(); 577 if (rc) 578 return rc; 579 rc = nvdimm_init(); 580 if (rc) 581 goto err_dimm; 582 rc = nd_region_init(); 583 if (rc) 584 goto err_region; 585 586 nd_label_init(); 587 588 return 0; 589 err_region: 590 nvdimm_exit(); 591 err_dimm: 592 nvdimm_bus_exit(); 593 return rc; 594} 595 596static __exit void libnvdimm_exit(void) 597{ 598 WARN_ON(!list_empty(&nvdimm_bus_list)); 599 nd_region_exit(); 600 nvdimm_exit(); 601 nvdimm_bus_exit(); 602 nvdimm_devs_exit(); 603} 604 605MODULE_LICENSE("GPL v2"); 606MODULE_AUTHOR("Intel Corporation"); 607subsys_initcall(libnvdimm_init); 608module_exit(libnvdimm_exit); 609