Lines Matching defs:request
71 * therefore the maximum amount of data that a request can carry is
74 * Note that we only support one extra request. So the Linux page size
103 struct request *request;
113 * block I/O request
122 static inline struct blkif_req *blkif_req(struct request *rq)
301 if (rinfo->shadow[id].request == NULL)
304 rinfo->shadow[id].request = NULL;
534 struct request *req,
543 rinfo->shadow[id].request = req;
552 static int blkif_queue_discard_req(struct request *req, struct blkfront_ring_info *rinfo)
571 /* Copy the request to the ring page. */
585 /* Only used when persistent grant is used and it's a write request */
606 * We always use the shadow of the first request to store the list
607 * of grant associated to the block I/O request. This made the
608 * completion more easy to handle even if the block I/O request is
616 * We are using the second request, setup grant_idx
639 * request. Therefore we have to use the global index.
689 * The second request is only present when the first request uses
702 static int blkif_queue_rw_req(struct request *req, struct blkfront_ring_info *rinfo)
718 * Used to store if we are able to queue the request by just using
730 * for the indirect grefs used in the request.
788 * A barrier request a superset of FUA, so we can
808 * Only the first request contains the scatter-gather
848 /* Copy request(s) to the ring page. */
863 * Generate a Xen blkfront IO request from a blk layer request. Reads
866 * @req: a request struct
868 static int blkif_queue_request(struct request *req, struct blkfront_ring_info *rinfo)
890 static inline bool blkif_request_flush_invalid(struct request *req,
934 static void blkif_complete_rq(struct request *rq)
968 /* Each segment in a request is up to an aligned page in size. */
972 /* Ensure a merged request will fit in a single I/O ring slot. */
1122 * When indirect descriptior is not supported, the I/O request
1123 * will be split between multiple request in the ring.
1124 * To avoid problems when sending the request, divide by
1249 if (!rinfo->shadow[i].request)
1362 * Get the final status of the block request based on two ring response
1399 /* The I/O request may be split in two. */
1421 * first request will store the scatter-gather list.
1430 * We don't need anymore the second request, so recycling
1434 WARN(1, "%s: can't recycle the second part (id = %ld) of the request\n",
1515 struct request *req;
1558 pr_alert("%s: response references no pending request\n",
1564 req = rinfo->shadow[id].request;
1580 * I/O request is split in 2
1639 "Bad return from blkdev data request: %#x\n",
2010 struct request *req, *n;
2077 if (!shadow[j].request)
2081 * Get the bios in the request so we can re-queue them.
2083 if (req_op(shadow[j].request) == REQ_OP_FLUSH ||
2084 req_op(shadow[j].request) == REQ_OP_DISCARD ||
2085 req_op(shadow[j].request) == REQ_OP_SECURE_ERASE ||
2086 shadow[j].request->cmd_flags & REQ_FUA) {
2089 * we need to requeue the whole request
2094 list_add(&shadow[j].request->queuelist, &info->requests);
2097 merge_bio.head = shadow[j].request->bio;
2098 merge_bio.tail = shadow[j].request->biotail;
2100 shadow[j].request->bio = NULL;
2101 blk_mq_end_request(shadow[j].request, BLK_STS_OK);