Lines Matching refs:vblk

59 	 * virtblk_remove() sets vblk->vdev to NULL.
62 * shut down before vblk->vdev is set to NULL and therefore do not need
134 struct virtio_blk *vblk = hctx->queue->queuedata;
135 struct virtio_blk_vq *vq = &vblk->vqs[hctx->queue_num];
339 struct virtio_blk *vblk = req->mq_hctx->queue->queuedata;
345 req->__sector = virtio64_to_cpu(vblk->vdev,
353 struct virtio_blk *vblk = vq->vdev->priv;
360 spin_lock_irqsave(&vblk->vqs[qid].lock, flags);
363 while ((vbr = virtqueue_get_buf(vblk->vqs[qid].vq, &len)) != NULL) {
376 blk_mq_start_stopped_hw_queues(vblk->disk->queue, true);
377 spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
382 struct virtio_blk *vblk = hctx->queue->queuedata;
383 struct virtio_blk_vq *vq = &vblk->vqs[hctx->queue_num];
408 struct virtio_blk *vblk,
415 status = virtblk_setup_cmd(vblk->vdev, req, vbr);
432 struct virtio_blk *vblk = hctx->queue->queuedata;
441 status = virtblk_prep_rq(hctx, vblk, req, vbr);
445 spin_lock_irqsave(&vblk->vqs[qid].lock, flags);
446 err = virtblk_add_req(vblk->vqs[qid].vq, vbr);
448 virtqueue_kick(vblk->vqs[qid].vq);
454 spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
459 if (bd->last && virtqueue_kick_prepare(vblk->vqs[qid].vq))
461 spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
464 virtqueue_notify(vblk->vqs[qid].vq);
470 struct virtio_blk *vblk = req->mq_hctx->queue->queuedata;
475 return virtblk_prep_rq(req->mq_hctx, vblk, req, vbr) == BLK_STS_OK;
537 static void *virtblk_alloc_report_buffer(struct virtio_blk *vblk,
541 struct request_queue *q = vblk->disk->queue;
546 get_capacity(vblk->disk) >> ilog2(vblk->zone_sectors));
566 static int virtblk_submit_zone_report(struct virtio_blk *vblk,
570 struct request_queue *q = vblk->disk->queue;
581 vbr->out_hdr.type = cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_ZONE_REPORT);
582 vbr->out_hdr.sector = cpu_to_virtio64(vblk->vdev, sector);
595 static int virtblk_parse_zone(struct virtio_blk *vblk,
601 zone.start = virtio64_to_cpu(vblk->vdev, entry->z_start);
602 if (zone.start + vblk->zone_sectors <= get_capacity(vblk->disk))
603 zone.len = vblk->zone_sectors;
605 zone.len = get_capacity(vblk->disk) - zone.start;
606 zone.capacity = virtio64_to_cpu(vblk->vdev, entry->z_cap);
607 zone.wp = virtio64_to_cpu(vblk->vdev, entry->z_wp);
620 dev_err(&vblk->vdev->dev, "zone %llu: invalid type %#x\n",
654 dev_err(&vblk->vdev->dev, "zone %llu: invalid condition %#x\n",
670 struct virtio_blk *vblk = disk->private_data;
677 if (WARN_ON_ONCE(!vblk->zone_sectors))
680 report = virtblk_alloc_report_buffer(vblk, nr_zones, &buflen);
684 mutex_lock(&vblk->vdev_mutex);
686 if (!vblk->vdev) {
691 while (zone_idx < nr_zones && sector < get_capacity(vblk->disk)) {
694 ret = virtblk_submit_zone_report(vblk, (char *)report,
699 nz = min_t(u64, virtio64_to_cpu(vblk->vdev, report->nr_zones),
705 ret = virtblk_parse_zone(vblk, &report->zones[i],
710 sector = virtio64_to_cpu(vblk->vdev,
712 vblk->zone_sectors;
722 mutex_unlock(&vblk->vdev_mutex);
727 static void virtblk_revalidate_zones(struct virtio_blk *vblk)
731 virtio_cread(vblk->vdev, struct virtio_blk_config,
735 dev_err(&vblk->vdev->dev, "unknown zone model %d\n", model);
739 disk_set_zoned(vblk->disk, BLK_ZONED_NONE);
742 WARN_ON_ONCE(!vblk->zone_sectors);
743 if (!blk_revalidate_disk_zones(vblk->disk, NULL))
744 set_capacity_and_notify(vblk->disk, 0);
749 struct virtio_blk *vblk,
772 disk_set_zoned(vblk->disk, BLK_ZONED_HM);
777 disk_set_max_open_zones(vblk->disk, v);
782 disk_set_max_active_zones(vblk->disk, v);
801 &vblk->zone_sectors);
802 if (vblk->zone_sectors == 0 || !is_power_of_2(vblk->zone_sectors)) {
805 vblk->zone_sectors);
808 blk_queue_chunk_sectors(q, vblk->zone_sectors);
809 dev_dbg(&vdev->dev, "zone sectors = %u\n", vblk->zone_sectors);
812 dev_warn(&vblk->vdev->dev,
832 return blk_revalidate_disk_zones(vblk->disk, NULL);
844 static inline void virtblk_revalidate_zones(struct virtio_blk *vblk)
849 struct virtio_blk *vblk, struct request_queue *q)
868 struct virtio_blk *vblk = disk->private_data;
869 struct request_queue *q = vblk->disk->queue;
880 vbr->out_hdr.type = cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_GET_ID);
897 struct virtio_blk *vblk = bd->bd_disk->private_data;
900 mutex_lock(&vblk->vdev_mutex);
902 if (!vblk->vdev) {
908 if (virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_GEOMETRY)) {
909 virtio_cread(vblk->vdev, struct virtio_blk_config,
911 virtio_cread(vblk->vdev, struct virtio_blk_config,
913 virtio_cread(vblk->vdev, struct virtio_blk_config,
922 mutex_unlock(&vblk->vdev_mutex);
928 struct virtio_blk *vblk = disk->private_data;
930 ida_free(&vd_index_ida, vblk->index);
931 mutex_destroy(&vblk->vdev_mutex);
932 kfree(vblk);
975 static void virtblk_update_capacity(struct virtio_blk *vblk, bool resize)
977 struct virtio_device *vdev = vblk->vdev;
978 struct request_queue *q = vblk->disk->queue;
995 vblk->disk->disk_name,
1002 set_capacity_and_notify(vblk->disk, capacity);
1007 struct virtio_blk *vblk =
1010 virtblk_revalidate_zones(vblk);
1011 virtblk_update_capacity(vblk, true);
1016 struct virtio_blk *vblk = vdev->priv;
1018 queue_work(virtblk_wq, &vblk->config_work);
1021 static int init_vq(struct virtio_blk *vblk)
1030 struct virtio_device *vdev = vblk->vdev;
1050 vblk->io_queues[HCTX_TYPE_DEFAULT] = num_vqs - num_poll_vqs;
1051 vblk->io_queues[HCTX_TYPE_READ] = 0;
1052 vblk->io_queues[HCTX_TYPE_POLL] = num_poll_vqs;
1055 vblk->io_queues[HCTX_TYPE_DEFAULT],
1056 vblk->io_queues[HCTX_TYPE_READ],
1057 vblk->io_queues[HCTX_TYPE_POLL]);
1059 vblk->vqs = kmalloc_array(num_vqs, sizeof(*vblk->vqs), GFP_KERNEL);
1060 if (!vblk->vqs)
1073 snprintf(vblk->vqs[i].name, VQ_NAME_LEN, "req.%u", i);
1074 names[i] = vblk->vqs[i].name;
1079 snprintf(vblk->vqs[i].name, VQ_NAME_LEN, "req_poll.%u", i);
1080 names[i] = vblk->vqs[i].name;
1089 spin_lock_init(&vblk->vqs[i].lock);
1090 vblk->vqs[i].vq = vqs[i];
1092 vblk->num_vqs = num_vqs;
1099 kfree(vblk->vqs);
1153 struct virtio_blk *vblk = vdev->priv;
1155 blk_queue_write_cache(vblk->disk->queue, writeback, false);
1167 struct virtio_blk *vblk = disk->private_data;
1168 struct virtio_device *vdev = vblk->vdev;
1171 BUG_ON(!virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_CONFIG_WCE));
1185 struct virtio_blk *vblk = disk->private_data;
1186 u8 writeback = virtblk_get_cache_mode(vblk->vdev);
1205 struct virtio_blk *vblk = disk->private_data;
1206 struct virtio_device *vdev = vblk->vdev;
1227 struct virtio_blk *vblk = set->driver_data;
1233 map->nr_queues = vblk->io_queues[i];
1248 blk_mq_virtio_map_queues(&set->map[i], vblk->vdev, 0);
1265 struct virtio_blk *vblk = hctx->queue->queuedata;
1285 blk_mq_start_stopped_hw_queues(vblk->disk->queue, true);
1306 struct virtio_blk *vblk;
1342 vdev->priv = vblk = kmalloc(sizeof(*vblk), GFP_KERNEL);
1343 if (!vblk) {
1348 mutex_init(&vblk->vdev_mutex);
1350 vblk->vdev = vdev;
1352 INIT_WORK(&vblk->config_work, virtblk_config_changed_work);
1354 err = init_vq(vblk);
1360 queue_depth = vblk->vqs[0].vq->num_free;
1368 memset(&vblk->tag_set, 0, sizeof(vblk->tag_set));
1369 vblk->tag_set.ops = &virtio_mq_ops;
1370 vblk->tag_set.queue_depth = queue_depth;
1371 vblk->tag_set.numa_node = NUMA_NO_NODE;
1372 vblk->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
1373 vblk->tag_set.cmd_size =
1376 vblk->tag_set.driver_data = vblk;
1377 vblk->tag_set.nr_hw_queues = vblk->num_vqs;
1378 vblk->tag_set.nr_maps = 1;
1379 if (vblk->io_queues[HCTX_TYPE_POLL])
1380 vblk->tag_set.nr_maps = 3;
1382 err = blk_mq_alloc_tag_set(&vblk->tag_set);
1386 vblk->disk = blk_mq_alloc_disk(&vblk->tag_set, vblk);
1387 if (IS_ERR(vblk->disk)) {
1388 err = PTR_ERR(vblk->disk);
1391 q = vblk->disk->queue;
1393 virtblk_name_format("vd", index, vblk->disk->disk_name, DISK_NAME_LEN);
1395 vblk->disk->major = major;
1396 vblk->disk->first_minor = index_to_minor(index);
1397 vblk->disk->minors = 1 << PART_BITS;
1398 vblk->disk->private_data = vblk;
1399 vblk->disk->fops = &virtblk_fops;
1400 vblk->index = index;
1407 set_disk_ro(vblk->disk, 1);
1567 virtblk_update_capacity(vblk, false);
1575 err = virtblk_probe_zoned_device(vdev, vblk, q);
1580 err = device_add_disk(&vdev->dev, vblk->disk, virtblk_attr_groups);
1587 put_disk(vblk->disk);
1589 blk_mq_free_tag_set(&vblk->tag_set);
1592 kfree(vblk->vqs);
1594 kfree(vblk);
1603 struct virtio_blk *vblk = vdev->priv;
1606 flush_work(&vblk->config_work);
1608 del_gendisk(vblk->disk);
1609 blk_mq_free_tag_set(&vblk->tag_set);
1611 mutex_lock(&vblk->vdev_mutex);
1616 /* Virtqueues are stopped, nothing can use vblk->vdev anymore. */
1617 vblk->vdev = NULL;
1620 kfree(vblk->vqs);
1622 mutex_unlock(&vblk->vdev_mutex);
1624 put_disk(vblk->disk);
1630 struct virtio_blk *vblk = vdev->priv;
1633 blk_mq_freeze_queue(vblk->disk->queue);
1639 flush_work(&vblk->config_work);
1642 kfree(vblk->vqs);
1649 struct virtio_blk *vblk = vdev->priv;
1658 blk_mq_unfreeze_queue(vblk->disk->queue);