Lines Matching refs:pending_req

146 				struct pending_req *pending_req);
407 * Retrieve from the 'pending_reqs' a free pending_req structure to be used.
409 static struct pending_req *alloc_req(struct xen_blkif_ring *ring)
411 struct pending_req *req = NULL;
416 req = list_entry(ring->pending_free.next, struct pending_req,
425 * Return the 'pending_req' structure back to the freepool. We also
428 static void free_req(struct xen_blkif_ring *ring, struct pending_req *req)
673 struct pending_req *pending_req = (struct pending_req *)(data->data);
674 struct xen_blkif_ring *ring = pending_req->ring;
682 make_response(ring, pending_req->id,
683 pending_req->operation, pending_req->status);
684 free_req(ring, pending_req);
703 static void xen_blkbk_unmap_and_respond(struct pending_req *req)
824 * so that when we access vaddr(pending_req,i) it has the contents of
902 static int xen_blkbk_map_seg(struct pending_req *pending_req)
906 rc = xen_blkbk_map(pending_req->ring, pending_req->segments,
907 pending_req->nr_segs,
908 (pending_req->operation != BLKIF_OP_READ));
914 struct pending_req *pending_req,
918 struct grant_page **pages = pending_req->indirect_pages;
919 struct xen_blkif_ring *ring = pending_req->ring;
923 nseg = pending_req->nr_segs;
945 pending_req->segments[n]->gref = segments[i].gref;
1011 struct pending_req *pending_req)
1013 free_req(ring, pending_req);
1036 static void __end_block_io_op(struct pending_req *pending_req,
1040 if (pending_req->operation == BLKIF_OP_FLUSH_DISKCACHE &&
1043 xen_blkbk_flush_diskcache(XBT_NIL, pending_req->ring->blkif->be, 0);
1044 pending_req->status = BLKIF_RSP_EOPNOTSUPP;
1045 } else if (pending_req->operation == BLKIF_OP_WRITE_BARRIER &&
1048 xen_blkbk_barrier(XBT_NIL, pending_req->ring->blkif->be, 0);
1049 pending_req->status = BLKIF_RSP_EOPNOTSUPP;
1053 pending_req->status = BLKIF_RSP_ERROR;
1061 if (atomic_dec_and_test(&pending_req->pendcnt))
1062 xen_blkbk_unmap_and_respond(pending_req);
1086 struct pending_req *pending_req;
1113 pending_req = alloc_req(ring);
1114 if (NULL == pending_req) {
1144 if (dispatch_rw_block_io(ring, &req, pending_req))
1148 free_req(ring, pending_req);
1153 if (dispatch_other_io(ring, &req, pending_req))
1187 struct pending_req *pending_req)
1190 struct seg_buf *seg = pending_req->seg;
1193 struct bio **biolist = pending_req->biolist;
1199 struct grant_page **pages = pending_req->segments;
1252 pending_req->ring = ring;
1253 pending_req->id = req->u.rw.id;
1254 pending_req->operation = req_operation;
1255 pending_req->status = BLKIF_RSP_OKAY;
1256 pending_req->nr_segs = nseg;
1275 if (xen_blkbk_parse_indirect(req, pending_req, seg, &preq))
1305 xen_blk_drain_io(pending_req->ring);
1313 if (xen_blkbk_map_seg(pending_req))
1337 bio->bi_private = pending_req;
1356 bio->bi_private = pending_req;
1361 atomic_set(&pending_req->pendcnt, nbio);
1378 xen_blkbk_unmap(ring, pending_req->segments,
1379 pending_req->nr_segs);
1383 free_req(ring, pending_req);
1390 atomic_set(&pending_req->pendcnt, 1);
1391 __end_block_io_op(pending_req, BLK_STS_RESOURCE);