Lines Matching refs:req
145 struct blkif_request *req,
411 struct pending_req *req = NULL;
416 req = list_entry(ring->pending_free.next, struct pending_req,
418 list_del(&req->free_list);
421 return req;
428 static void free_req(struct xen_blkif_ring *ring, struct pending_req *req)
435 list_add(&req->free_list, &ring->pending_free);
444 static int xen_vbd_translate(struct phys_req *req, struct xen_blkif *blkif,
453 if (likely(req->nr_sects)) {
454 blkif_sector_t end = req->sector_number + req->nr_sects;
456 if (unlikely(end < req->sector_number))
462 req->dev = vbd->pdevice;
463 req->bdev = vbd->bdev;
703 static void xen_blkbk_unmap_and_respond(struct pending_req *req)
705 struct gntab_unmap_queue_data* work = &req->gnttab_unmap_data;
706 struct xen_blkif_ring *ring = req->ring;
707 struct grant_page **pages = req->segments;
710 invcount = xen_blkbk_unmap_prepare(ring, pages, req->nr_segs,
711 req->unmap, req->unmap_pages);
713 work->data = req;
715 work->unmap_ops = req->unmap;
717 work->pages = req->unmap_pages;
720 gnttab_unmap_refs_async(&req->gnttab_unmap_data);
913 static int xen_blkbk_parse_indirect(struct blkif_request *req,
928 pages[i]->gref = req->u.indirect.indirect_grefs[i];
967 struct blkif_request *req)
978 preq.sector_number = req->u.discard.sector_number;
979 preq.nr_sects = req->u.discard.nr_sectors;
991 (req->u.discard.flag & BLKIF_DISCARD_SECURE)) ?
994 err = blkdev_issue_discard(bdev, req->u.discard.sector_number,
995 req->u.discard.nr_sectors,
1004 make_response(ring, req->u.discard.id, req->operation, status);
1010 struct blkif_request *req,
1014 make_response(ring, req->u.other.id, req->operation,
1085 struct blkif_request req;
1122 memcpy(&req, RING_GET_REQUEST(&blk_rings->native, rc), sizeof(req));
1125 blkif_get_x86_32_req(&req, RING_GET_REQUEST(&blk_rings->x86_32, rc));
1128 blkif_get_x86_64_req(&req, RING_GET_REQUEST(&blk_rings->x86_64, rc));
1138 switch (req.operation) {
1144 if (dispatch_rw_block_io(ring, &req, pending_req))
1149 if (dispatch_discard_io(ring, &req))
1153 if (dispatch_other_io(ring, &req, pending_req))
1186 struct blkif_request *req,
1202 req_operation = req->operation == BLKIF_OP_INDIRECT ?
1203 req->u.indirect.indirect_op : req->operation;
1205 if ((req->operation == BLKIF_OP_INDIRECT) &&
1237 nseg = req->operation == BLKIF_OP_INDIRECT ?
1238 req->u.indirect.nr_segments : req->u.rw.nr_segments;
1241 unlikely((req->operation != BLKIF_OP_INDIRECT) &&
1243 unlikely((req->operation == BLKIF_OP_INDIRECT) &&
1253 pending_req->id = req->u.rw.id;
1258 if (req->operation != BLKIF_OP_INDIRECT) {
1259 preq.dev = req->u.rw.handle;
1260 preq.sector_number = req->u.rw.sector_number;
1262 pages[i]->gref = req->u.rw.seg[i].gref;
1263 seg[i].nsec = req->u.rw.seg[i].last_sect -
1264 req->u.rw.seg[i].first_sect + 1;
1265 seg[i].offset = (req->u.rw.seg[i].first_sect << 9);
1266 if ((req->u.rw.seg[i].last_sect >= (XEN_PAGE_SIZE >> 9)) ||
1267 (req->u.rw.seg[i].last_sect <
1268 req->u.rw.seg[i].first_sect))
1273 preq.dev = req->u.indirect.handle;
1274 preq.sector_number = req->u.indirect.sector_number;
1275 if (xen_blkbk_parse_indirect(req, pending_req, seg, &preq))
1382 make_response(ring, req->u.rw.id, req_operation, BLKIF_RSP_ERROR);