Lines Matching refs:vblk

38 	 * virtblk_remove() sets vblk->vdev to NULL.
41 * shut down before vblk->vdev is set to NULL and therefore do not need
176 struct virtio_blk *vblk = vq->vdev->priv;
183 spin_lock_irqsave(&vblk->vqs[qid].lock, flags);
186 while ((vbr = virtqueue_get_buf(vblk->vqs[qid].vq, &len)) != NULL) {
199 blk_mq_start_stopped_hw_queues(vblk->disk->queue, true);
200 spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
205 struct virtio_blk *vblk = hctx->queue->queuedata;
206 struct virtio_blk_vq *vq = &vblk->vqs[hctx->queue_num];
220 struct virtio_blk *vblk = hctx->queue->queuedata;
231 BUG_ON(req->nr_phys_segments + 2 > vblk->sg_elems);
256 vbr->out_hdr.type = cpu_to_virtio32(vblk->vdev, type);
258 0 : cpu_to_virtio64(vblk->vdev, blk_rq_pos(req));
259 vbr->out_hdr.ioprio = cpu_to_virtio32(vblk->vdev, req_get_ioprio(req));
272 vbr->out_hdr.type |= cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_OUT);
274 vbr->out_hdr.type |= cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_IN);
277 spin_lock_irqsave(&vblk->vqs[qid].lock, flags);
278 err = virtblk_add_req(vblk->vqs[qid].vq, vbr, vbr->sg, num);
280 virtqueue_kick(vblk->vqs[qid].vq);
286 spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
297 if (bd->last && virtqueue_kick_prepare(vblk->vqs[qid].vq))
299 spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
302 virtqueue_notify(vblk->vqs[qid].vq);
310 struct virtio_blk *vblk = disk->private_data;
311 struct request_queue *q = vblk->disk->queue;
323 blk_execute_rq(vblk->disk->queue, vblk->disk, req, false);
330 static void virtblk_get(struct virtio_blk *vblk)
332 refcount_inc(&vblk->refs);
335 static void virtblk_put(struct virtio_blk *vblk)
337 if (refcount_dec_and_test(&vblk->refs)) {
338 ida_simple_remove(&vd_index_ida, vblk->index);
339 mutex_destroy(&vblk->vdev_mutex);
340 kfree(vblk);
346 struct virtio_blk *vblk = bd->bd_disk->private_data;
349 mutex_lock(&vblk->vdev_mutex);
351 if (vblk->vdev)
352 virtblk_get(vblk);
356 mutex_unlock(&vblk->vdev_mutex);
362 struct virtio_blk *vblk = disk->private_data;
364 virtblk_put(vblk);
370 struct virtio_blk *vblk = bd->bd_disk->private_data;
373 mutex_lock(&vblk->vdev_mutex);
375 if (!vblk->vdev) {
381 if (virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_GEOMETRY)) {
382 virtio_cread(vblk->vdev, struct virtio_blk_config,
384 virtio_cread(vblk->vdev, struct virtio_blk_config,
386 virtio_cread(vblk->vdev, struct virtio_blk_config,
395 mutex_unlock(&vblk->vdev_mutex);
439 static void virtblk_update_capacity(struct virtio_blk *vblk, bool resize)
441 struct virtio_device *vdev = vblk->vdev;
442 struct request_queue *q = vblk->disk->queue;
466 vblk->disk->disk_name,
473 set_capacity_revalidate_and_notify(vblk->disk, capacity, true);
478 struct virtio_blk *vblk =
481 virtblk_update_capacity(vblk, true);
486 struct virtio_blk *vblk = vdev->priv;
488 queue_work(virtblk_wq, &vblk->config_work);
491 static int init_vq(struct virtio_blk *vblk)
499 struct virtio_device *vdev = vblk->vdev;
510 vblk->vqs = kmalloc_array(num_vqs, sizeof(*vblk->vqs), GFP_KERNEL);
511 if (!vblk->vqs)
524 snprintf(vblk->vqs[i].name, VQ_NAME_LEN, "req.%d", i);
525 names[i] = vblk->vqs[i].name;
534 spin_lock_init(&vblk->vqs[i].lock);
535 vblk->vqs[i].vq = vqs[i];
537 vblk->num_vqs = num_vqs;
544 kfree(vblk->vqs);
598 struct virtio_blk *vblk = vdev->priv;
600 blk_queue_write_cache(vblk->disk->queue, writeback, false);
601 revalidate_disk_size(vblk->disk, true);
613 struct virtio_blk *vblk = disk->private_data;
614 struct virtio_device *vdev = vblk->vdev;
617 BUG_ON(!virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_CONFIG_WCE));
631 struct virtio_blk *vblk = disk->private_data;
632 u8 writeback = virtblk_get_cache_mode(vblk->vdev);
651 struct virtio_blk *vblk = disk->private_data;
652 struct virtio_device *vdev = vblk->vdev;
674 struct virtio_blk *vblk = set->driver_data;
677 sg_init_table(vbr->sg, vblk->sg_elems);
683 struct virtio_blk *vblk = set->driver_data;
686 vblk->vdev, 0);
702 struct virtio_blk *vblk;
733 vdev->priv = vblk = kmalloc(sizeof(*vblk), GFP_KERNEL);
734 if (!vblk) {
740 refcount_set(&vblk->refs, 1);
741 mutex_init(&vblk->vdev_mutex);
743 vblk->vdev = vdev;
744 vblk->sg_elems = sg_elems;
746 INIT_WORK(&vblk->config_work, virtblk_config_changed_work);
748 err = init_vq(vblk);
753 vblk->disk = alloc_disk(1 << PART_BITS);
754 if (!vblk->disk) {
761 virtblk_queue_depth = vblk->vqs[0].vq->num_free;
767 memset(&vblk->tag_set, 0, sizeof(vblk->tag_set));
768 vblk->tag_set.ops = &virtio_mq_ops;
769 vblk->tag_set.queue_depth = virtblk_queue_depth;
770 vblk->tag_set.numa_node = NUMA_NO_NODE;
771 vblk->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
772 vblk->tag_set.cmd_size =
775 vblk->tag_set.driver_data = vblk;
776 vblk->tag_set.nr_hw_queues = vblk->num_vqs;
778 err = blk_mq_alloc_tag_set(&vblk->tag_set);
782 q = blk_mq_init_queue(&vblk->tag_set);
787 vblk->disk->queue = q;
789 q->queuedata = vblk;
791 virtblk_name_format("vd", index, vblk->disk->disk_name, DISK_NAME_LEN);
793 vblk->disk->major = major;
794 vblk->disk->first_minor = index_to_minor(index);
795 vblk->disk->private_data = vblk;
796 vblk->disk->fops = &virtblk_fops;
797 vblk->disk->flags |= GENHD_FL_EXT_DEVT;
798 vblk->index = index;
805 set_disk_ro(vblk->disk, 1);
808 blk_queue_max_segments(q, vblk->sg_elems-2);
900 virtblk_update_capacity(vblk, false);
903 device_add_disk(&vdev->dev, vblk->disk, virtblk_attr_groups);
907 blk_mq_free_tag_set(&vblk->tag_set);
909 put_disk(vblk->disk);
912 kfree(vblk->vqs);
914 kfree(vblk);
923 struct virtio_blk *vblk = vdev->priv;
926 flush_work(&vblk->config_work);
928 del_gendisk(vblk->disk);
929 blk_cleanup_queue(vblk->disk->queue);
931 blk_mq_free_tag_set(&vblk->tag_set);
933 mutex_lock(&vblk->vdev_mutex);
938 /* Virtqueues are stopped, nothing can use vblk->vdev anymore. */
939 vblk->vdev = NULL;
941 put_disk(vblk->disk);
943 kfree(vblk->vqs);
945 mutex_unlock(&vblk->vdev_mutex);
947 virtblk_put(vblk);
953 struct virtio_blk *vblk = vdev->priv;
959 flush_work(&vblk->config_work);
961 blk_mq_quiesce_queue(vblk->disk->queue);
964 kfree(vblk->vqs);
971 struct virtio_blk *vblk = vdev->priv;
980 blk_mq_unquiesce_queue(vblk->disk->queue);