Lines Matching defs:blkif
2 /* Xenbus code for blkif backend
23 struct xen_blkif *blkif;
35 static void xen_blkif_free(struct xen_blkif *blkif);
49 struct xen_blkif *blkif;
51 blkif = container_of(work, struct xen_blkif, free_work);
52 xen_blkif_free(blkif);
55 static int blkback_name(struct xen_blkif *blkif, char *buf)
58 struct xenbus_device *dev = blkif->be->dev;
70 snprintf(buf, TASK_COMM_LEN, "%d.%s", blkif->domid, devname);
76 static void xen_update_blkif_status(struct xen_blkif *blkif)
84 if (!blkif->rings || !blkif->rings[0].irq || !blkif->vbd.bdev)
88 if (blkif->be->dev->state == XenbusStateConnected)
92 connect(blkif->be);
93 if (blkif->be->dev->state != XenbusStateConnected)
96 err = blkback_name(blkif, name);
98 xenbus_dev_error(blkif->be->dev, err, "get blkback dev name");
102 err = filemap_write_and_wait(blkif->vbd.bdev->bd_inode->i_mapping);
104 xenbus_dev_error(blkif->be->dev, err, "block flush");
107 invalidate_inode_pages2(blkif->vbd.bdev->bd_inode->i_mapping);
109 for (i = 0; i < blkif->nr_rings; i++) {
110 ring = &blkif->rings[i];
115 xenbus_dev_fatal(blkif->be->dev, err,
124 ring = &blkif->rings[i];
130 static int xen_blkif_alloc_rings(struct xen_blkif *blkif)
134 blkif->rings = kcalloc(blkif->nr_rings, sizeof(struct xen_blkif_ring),
136 if (!blkif->rings)
139 for (r = 0; r < blkif->nr_rings; r++) {
140 struct xen_blkif_ring *ring = &blkif->rings[r];
152 ring->blkif = blkif;
167 struct xen_blkif *blkif;
171 blkif = kmem_cache_zalloc(xen_blkif_cachep, GFP_KERNEL);
172 if (!blkif)
175 blkif->domid = domid;
176 atomic_set(&blkif->refcnt, 1);
177 init_completion(&blkif->drain_complete);
187 INIT_WORK(&blkif->free_work, xen_blkif_deferred_free);
189 return blkif;
196 struct xen_blkif *blkif = ring->blkif;
205 err = xenbus_map_ring_valloc(blkif->be->dev, gref, nr_grefs,
214 switch (blkif->blk_protocol) {
253 err = bind_interdomain_evtchn_to_irqhandler_lateeoi(blkif->domid,
254 evtchn, xen_blkif_be_int, 0, "blkif-backend", ring);
262 xenbus_unmap_ring_vfree(blkif->be->dev, ring->blk_ring);
267 static int xen_blkif_disconnect(struct xen_blkif *blkif)
273 for (r = 0; r < blkif->nr_rings; r++) {
274 struct xen_blkif_ring *ring = &blkif->rings[r];
301 xenbus_unmap_ring_vfree(blkif->be->dev, ring->blk_ring);
327 WARN_ON(i != (XEN_BLKIF_REQS_PER_PAGE * blkif->nr_ring_pages));
333 blkif->nr_ring_pages = 0;
335 * blkif->rings was allocated in connect_ring, so we should free it in
338 kfree(blkif->rings);
339 blkif->rings = NULL;
340 blkif->nr_rings = 0;
345 static void xen_blkif_free(struct xen_blkif *blkif)
347 WARN_ON(xen_blkif_disconnect(blkif));
348 xen_vbd_free(&blkif->vbd);
349 kfree(blkif->be->mode);
350 kfree(blkif->be);
353 kmem_cache_free(xen_blkif_cachep, blkif);
385 struct xen_blkif *blkif = be->blkif; \
389 if (!blkif->rings) \
392 for (i = 0; i < blkif->nr_rings; i++) { \
393 struct xen_blkif_ring *ring = &blkif->rings[i]; \
480 static int xen_vbd_create(struct xen_blkif *blkif, blkif_vdev_t handle,
488 vbd = &blkif->vbd;
526 handle, blkif->domid);
547 if (be->blkif) {
548 xen_blkif_disconnect(be->blkif);
551 xen_blkif_put(be->blkif);
574 struct xen_blkif *blkif = be->blkif;
577 struct block_device *bdev = be->blkif->vbd.bdev;
602 blkif->vbd.discard_secure);
651 be->blkif = xen_blkif_alloc(dev->otherend_id);
652 if (IS_ERR(be->blkif)) {
653 err = PTR_ERR(be->blkif);
654 be->blkif = NULL;
674 be->blkif->be = be;
766 err = xen_vbd_create(be->blkif, handle, major, minor,
774 xen_vbd_free(&be->blkif->vbd);
786 xen_update_blkif_status(be->blkif);
823 err = xen_blkif_disconnect(be->blkif);
835 xen_blkif_disconnect(be->blkif);
838 xen_update_blkif_status(be->blkif);
846 xen_blkif_disconnect(be->blkif);
880 be->blkif->buffer_squeeze_end = jiffies +
907 xen_blkbk_flush_diskcache(xbt, be, be->blkif->vbd.flush_support);
911 xen_blkbk_barrier(xbt, be, be->blkif->vbd.flush_support);
914 be->blkif->vbd.feature_gnt_persistent_parm);
922 (unsigned long long)vbd_sz(&be->blkif->vbd));
931 be->blkif->vbd.type |
932 (be->blkif->vbd.readonly ? VDISK_READONLY : 0));
940 bdev_logical_block_size(be->blkif->vbd.bdev));
947 bdev_physical_block_size(be->blkif->vbd.bdev));
976 struct xen_blkif *blkif = ring->blkif;
977 struct xenbus_device *dev = blkif->be->dev;
988 nr_grefs = blkif->nr_ring_pages;
998 if (blkif->multi_ref)
1065 struct xen_blkif *blkif = be->blkif;
1076 blkif->blk_protocol = BLKIF_PROTOCOL_DEFAULT;
1082 blkif->blk_protocol = BLKIF_PROTOCOL_NATIVE;
1084 blkif->blk_protocol = BLKIF_PROTOCOL_X86_32;
1086 blkif->blk_protocol = BLKIF_PROTOCOL_X86_64;
1092 blkif->vbd.feature_gnt_persistent_parm = feature_persistent;
1093 blkif->vbd.feature_gnt_persistent =
1094 blkif->vbd.feature_gnt_persistent_parm &&
1097 blkif->vbd.overflow_max_grants = 0;
1113 blkif->nr_rings = requested_num_queues;
1114 if (xen_blkif_alloc_rings(blkif))
1118 blkif->nr_rings, blkif->blk_protocol, protocol,
1119 blkif->vbd.feature_gnt_persistent ? "persistent grants" : "");
1124 blkif->nr_ring_pages = 1;
1125 blkif->multi_ref = false;
1127 blkif->nr_ring_pages = 1 << ring_page_order;
1128 blkif->multi_ref = true;
1138 if (blkif->nr_rings == 1)
1139 return read_per_ring_refs(&blkif->rings[0], dev->otherend);
1148 for (i = 0; i < blkif->nr_rings; i++) {
1151 err = read_per_ring_refs(&blkif->rings[i], xspath);