Lines Matching refs:head
102 spin_lock_irqsave(&ns->head->requeue_lock, flags);
104 bio_set_dev(bio, ns->head->disk->part0);
118 blk_steal_bios(&ns->head->requeue_list, req);
119 spin_unlock_irqrestore(&ns->head->requeue_lock, flags);
122 kblockd_schedule_work(&ns->head->requeue_work);
128 struct gendisk *disk = ns->head->disk;
145 bdev_end_io_acct(ns->head->disk->part0, req_op(rq),
156 if (!ns->head->disk)
158 kblockd_schedule_work(&ns->head->requeue_work);
160 disk_uevent(ns->head->disk, KOBJ_CHANGE);
176 struct nvme_ns_head *head = ns->head;
180 if (!head)
184 if (ns == rcu_access_pointer(head->current_path[node])) {
185 rcu_assign_pointer(head->current_path[node], NULL);
200 kblockd_schedule_work(&ns->head->requeue_work);
207 struct nvme_ns_head *head = ns->head;
208 sector_t capacity = get_capacity(head->disk);
212 srcu_idx = srcu_read_lock(&head->srcu);
213 list_for_each_entry_rcu(ns, &head->list, siblings) {
217 srcu_read_unlock(&head->srcu, srcu_idx);
220 rcu_assign_pointer(head->current_path[node], NULL);
221 kblockd_schedule_work(&head->requeue_work);
240 static struct nvme_ns *__nvme_find_path(struct nvme_ns_head *head, int node)
245 list_for_each_entry_rcu(ns, &head->list, siblings) {
249 if (READ_ONCE(head->subsys->iopolicy) == NVME_IOPOLICY_NUMA)
275 rcu_assign_pointer(head->current_path[node], found);
279 static struct nvme_ns *nvme_next_ns(struct nvme_ns_head *head,
282 ns = list_next_or_null_rcu(&head->list, &ns->siblings, struct nvme_ns,
286 return list_first_or_null_rcu(&head->list, struct nvme_ns, siblings);
289 static struct nvme_ns *nvme_round_robin_path(struct nvme_ns_head *head,
294 if (list_is_singular(&head->list)) {
300 for (ns = nvme_next_ns(head, old);
302 ns = nvme_next_ns(head, ns)) {
328 rcu_assign_pointer(head->current_path[node], found);
338 inline struct nvme_ns *nvme_find_path(struct nvme_ns_head *head)
343 ns = srcu_dereference(head->current_path[node], &head->srcu);
345 return __nvme_find_path(head, node);
347 if (READ_ONCE(head->subsys->iopolicy) == NVME_IOPOLICY_RR)
348 return nvme_round_robin_path(head, node, ns);
350 return __nvme_find_path(head, node);
354 static bool nvme_available_path(struct nvme_ns_head *head)
358 list_for_each_entry_rcu(ns, &head->list, siblings) {
376 struct nvme_ns_head *head = bio->bi_bdev->bd_disk->private_data;
377 struct device *dev = disk_to_dev(head->disk);
390 srcu_idx = srcu_read_lock(&head->srcu);
391 ns = nvme_find_path(head);
395 trace_block_bio_remap(bio, disk_devt(ns->head->disk),
398 } else if (nvme_available_path(head)) {
401 spin_lock_irq(&head->requeue_lock);
402 bio_list_add(&head->requeue_list, bio);
403 spin_unlock_irq(&head->requeue_lock);
410 srcu_read_unlock(&head->srcu, srcu_idx);
429 struct nvme_ns_head *head = disk->private_data;
433 srcu_idx = srcu_read_lock(&head->srcu);
434 ns = nvme_find_path(head);
437 srcu_read_unlock(&head->srcu, srcu_idx);
484 static int nvme_add_ns_head_cdev(struct nvme_ns_head *head)
488 head->cdev_device.parent = &head->subsys->dev;
489 ret = dev_set_name(&head->cdev_device, "ng%dn%d",
490 head->subsys->instance, head->instance);
493 ret = nvme_cdev_add(&head->cdev, &head->cdev_device,
500 struct nvme_ns_head *head =
504 spin_lock_irq(&head->requeue_lock);
505 next = bio_list_get(&head->requeue_list);
506 spin_unlock_irq(&head->requeue_lock);
516 int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, struct nvme_ns_head *head)
520 mutex_init(&head->lock);
521 bio_list_init(&head->requeue_list);
522 spin_lock_init(&head->requeue_lock);
523 INIT_WORK(&head->requeue_work, nvme_requeue_work);
531 !nvme_is_unique_nsid(ctrl, head) || !multipath)
534 head->disk = blk_alloc_disk(ctrl->numa_node);
535 if (!head->disk)
537 head->disk->fops = &nvme_ns_head_ops;
538 head->disk->private_data = head;
539 sprintf(head->disk->disk_name, "nvme%dn%d",
540 ctrl->subsys->instance, head->instance);
542 blk_queue_flag_set(QUEUE_FLAG_NONROT, head->disk->queue);
543 blk_queue_flag_set(QUEUE_FLAG_NOWAIT, head->disk->queue);
544 blk_queue_flag_set(QUEUE_FLAG_IO_STAT, head->disk->queue);
553 blk_queue_flag_set(QUEUE_FLAG_POLL, head->disk->queue);
556 blk_queue_logical_block_size(head->disk->queue, 512);
557 blk_set_stacking_limits(&head->disk->queue->limits);
558 blk_queue_dma_alignment(head->disk->queue, 3);
563 blk_queue_write_cache(head->disk->queue, vwc, vwc);
569 struct nvme_ns_head *head = ns->head;
572 if (!head->disk)
578 * head.
580 if (!test_and_set_bit(NVME_NSHEAD_DISK_LIVE, &head->flags)) {
581 rc = device_add_disk(&head->subsys->dev, head->disk,
587 nvme_add_ns_head_cdev(head);
590 mutex_lock(&head->lock);
594 srcu_idx = srcu_read_lock(&head->srcu);
596 __nvme_find_path(head, node);
597 srcu_read_unlock(&head->srcu, srcu_idx);
599 mutex_unlock(&head->lock);
601 synchronize_srcu(&head->srcu);
602 kblockd_schedule_work(&head->requeue_work);
696 if (ns->head->ns_id < nsid)
698 if (ns->head->ns_id == nsid)
702 if (ns->head->ns_id > nsid)
871 if (blk_queue_stable_writes(ns->queue) && ns->head->disk)
873 ns->head->disk->queue);
875 if (blk_queue_is_zoned(ns->queue) && ns->head->disk)
876 ns->head->disk->nr_zones = ns->disk->nr_zones;
880 void nvme_mpath_shutdown_disk(struct nvme_ns_head *head)
882 if (!head->disk)
884 kblockd_schedule_work(&head->requeue_work);
885 if (test_bit(NVME_NSHEAD_DISK_LIVE, &head->flags)) {
886 nvme_cdev_del(&head->cdev, &head->cdev_device);
887 del_gendisk(head->disk);
891 void nvme_mpath_remove_disk(struct nvme_ns_head *head)
893 if (!head->disk)
896 kblockd_schedule_work(&head->requeue_work);
897 flush_work(&head->requeue_work);
898 put_disk(head->disk);