Lines Matching defs:blkif

2 /*  Xenbus code for blkif backend
23 struct xen_blkif *blkif;
35 static void xen_blkif_free(struct xen_blkif *blkif);
49 struct xen_blkif *blkif;
51 blkif = container_of(work, struct xen_blkif, free_work);
52 xen_blkif_free(blkif);
55 static int blkback_name(struct xen_blkif *blkif, char *buf)
58 struct xenbus_device *dev = blkif->be->dev;
70 snprintf(buf, TASK_COMM_LEN, "%d.%s", blkif->domid, devname);
76 static void xen_update_blkif_status(struct xen_blkif *blkif)
84 if (!blkif->rings || !blkif->rings[0].irq || !blkif->vbd.bdev)
88 if (blkif->be->dev->state == XenbusStateConnected)
92 connect(blkif->be);
93 if (blkif->be->dev->state != XenbusStateConnected)
96 err = blkback_name(blkif, name);
98 xenbus_dev_error(blkif->be->dev, err, "get blkback dev name");
102 err = sync_blockdev(blkif->vbd.bdev);
104 xenbus_dev_error(blkif->be->dev, err, "block flush");
107 invalidate_inode_pages2(blkif->vbd.bdev->bd_inode->i_mapping);
109 for (i = 0; i < blkif->nr_rings; i++) {
110 ring = &blkif->rings[i];
115 xenbus_dev_fatal(blkif->be->dev, err,
124 ring = &blkif->rings[i];
130 static int xen_blkif_alloc_rings(struct xen_blkif *blkif)
134 blkif->rings = kcalloc(blkif->nr_rings, sizeof(struct xen_blkif_ring),
136 if (!blkif->rings)
139 for (r = 0; r < blkif->nr_rings; r++) {
140 struct xen_blkif_ring *ring = &blkif->rings[r];
152 ring->blkif = blkif;
167 struct xen_blkif *blkif;
171 blkif = kmem_cache_zalloc(xen_blkif_cachep, GFP_KERNEL);
172 if (!blkif)
175 blkif->domid = domid;
176 atomic_set(&blkif->refcnt, 1);
177 init_completion(&blkif->drain_complete);
187 INIT_WORK(&blkif->free_work, xen_blkif_deferred_free);
189 return blkif;
196 struct xen_blkif *blkif = ring->blkif;
205 err = xenbus_map_ring_valloc(blkif->be->dev, gref, nr_grefs,
214 switch (blkif->blk_protocol) {
253 err = bind_interdomain_evtchn_to_irqhandler_lateeoi(blkif->be->dev,
254 evtchn, xen_blkif_be_int, 0, "blkif-backend", ring);
262 xenbus_unmap_ring_vfree(blkif->be->dev, ring->blk_ring);
267 static int xen_blkif_disconnect(struct xen_blkif *blkif)
273 for (r = 0; r < blkif->nr_rings; r++) {
274 struct xen_blkif_ring *ring = &blkif->rings[r];
301 xenbus_unmap_ring_vfree(blkif->be->dev, ring->blk_ring);
327 WARN_ON(i != (XEN_BLKIF_REQS_PER_PAGE * blkif->nr_ring_pages));
333 blkif->nr_ring_pages = 0;
335 * blkif->rings was allocated in connect_ring, so we should free it in
338 kfree(blkif->rings);
339 blkif->rings = NULL;
340 blkif->nr_rings = 0;
345 static void xen_blkif_free(struct xen_blkif *blkif)
347 WARN_ON(xen_blkif_disconnect(blkif));
348 xen_vbd_free(&blkif->vbd);
349 kfree(blkif->be->mode);
350 kfree(blkif->be);
353 kmem_cache_free(xen_blkif_cachep, blkif);
385 struct xen_blkif *blkif = be->blkif; \
389 if (!blkif->rings) \
392 for (i = 0; i < blkif->nr_rings; i++) { \
393 struct xen_blkif_ring *ring = &blkif->rings[i]; \
480 static int xen_vbd_create(struct xen_blkif *blkif, blkif_vdev_t handle,
487 vbd = &blkif->vbd;
523 handle, blkif->domid);
544 if (be->blkif) {
545 xen_blkif_disconnect(be->blkif);
548 xen_blkif_put(be->blkif);
569 struct xen_blkif *blkif = be->blkif;
572 struct block_device *bdev = be->blkif->vbd.bdev;
596 blkif->vbd.discard_secure);
645 be->blkif = xen_blkif_alloc(dev->otherend_id);
646 if (IS_ERR(be->blkif)) {
647 err = PTR_ERR(be->blkif);
648 be->blkif = NULL;
668 be->blkif->be = be;
760 err = xen_vbd_create(be->blkif, handle, major, minor,
768 xen_vbd_free(&be->blkif->vbd);
780 xen_update_blkif_status(be->blkif);
817 err = xen_blkif_disconnect(be->blkif);
829 xen_blkif_disconnect(be->blkif);
832 xen_update_blkif_status(be->blkif);
840 xen_blkif_disconnect(be->blkif);
874 be->blkif->buffer_squeeze_end = jiffies +
901 xen_blkbk_flush_diskcache(xbt, be, be->blkif->vbd.flush_support);
905 xen_blkbk_barrier(xbt, be, be->blkif->vbd.flush_support);
908 be->blkif->vbd.feature_gnt_persistent_parm);
916 (unsigned long long)vbd_sz(&be->blkif->vbd));
925 be->blkif->vbd.type |
926 (be->blkif->vbd.readonly ? VDISK_READONLY : 0));
934 bdev_logical_block_size(be->blkif->vbd.bdev));
941 bdev_physical_block_size(be->blkif->vbd.bdev));
970 struct xen_blkif *blkif = ring->blkif;
971 struct xenbus_device *dev = blkif->be->dev;
982 nr_grefs = blkif->nr_ring_pages;
992 if (blkif->multi_ref)
1059 struct xen_blkif *blkif = be->blkif;
1070 blkif->blk_protocol = BLKIF_PROTOCOL_DEFAULT;
1076 blkif->blk_protocol = BLKIF_PROTOCOL_NATIVE;
1078 blkif->blk_protocol = BLKIF_PROTOCOL_X86_32;
1080 blkif->blk_protocol = BLKIF_PROTOCOL_X86_64;
1086 blkif->vbd.feature_gnt_persistent_parm = feature_persistent;
1087 blkif->vbd.feature_gnt_persistent =
1088 blkif->vbd.feature_gnt_persistent_parm &&
1091 blkif->vbd.overflow_max_grants = 0;
1107 blkif->nr_rings = requested_num_queues;
1108 if (xen_blkif_alloc_rings(blkif))
1112 blkif->nr_rings, blkif->blk_protocol, protocol,
1113 blkif->vbd.feature_gnt_persistent ? "persistent grants" : "");
1118 blkif->nr_ring_pages = 1;
1119 blkif->multi_ref = false;
1121 blkif->nr_ring_pages = 1 << ring_page_order;
1122 blkif->multi_ref = true;
1132 if (blkif->nr_rings == 1)
1133 return read_per_ring_refs(&blkif->rings[0], dev->otherend);
1142 for (i = 0; i < blkif->nr_rings; i++) {
1145 err = read_per_ring_refs(&blkif->rings[i], xspath);