Lines Matching defs:request
183 * When a sync request is dispatched, the queue that contains that
184 * request, and all the ancestor entities of that queue, are charged
185 * with the number of sectors of the request. In contrast, if the
186 * request is async, then the queue and its ancestor entities are
187 * charged with the number of sectors of the request, multiplied by
453 * @q: the request queue.
490 * We choose the request that is closer to the head right now. Distance
493 static struct request *bfq_choose_req(struct bfq_data *bfqd,
494 struct request *rq1,
495 struct request *rq2,
500 #define BFQ_RQ1_WRAP 0x01 /* request 1 wraps */
501 #define BFQ_RQ2_WRAP 0x02 /* request 2 wraps */
620 * If at some level entity is not even active, allow request
909 * 1) a request arrival has caused the queue to become both
996 static struct request *bfq_check_fifo(struct bfq_queue *bfqq,
997 struct request *last)
999 struct request *rq;
1015 static struct request *bfq_find_next_rq(struct bfq_data *bfqd,
1017 struct request *last)
1021 struct request *next, *prev = NULL;
1043 static unsigned long bfq_serv_to_charge(struct request *rq,
1058 * If the first request of a queue changes we make sure that the queue
1059 * has enough budget to serve at least its first request (if the
1060 * request has grown). We do this because if the queue has not enough
1061 * budget for its first request, it has to go through two dispatch
1068 struct request *next_rq = bfqq->next_rq;
1321 * exact choice depends on the device and request pattern at
1494 * and did not make it to issue a new request before its last
1495 * request was served;
1498 * a new request before the expiration of the idling-time.
1547 * request (which implies that bfqq expired for one of the above two
1548 * reasons), and 2) such a request has arrived soon. The first
1821 struct request *rq,
1972 * request to arrive for the currently in-service queue, but
1993 * some request completion.
2000 * an injected I/O request may be higher than the think time
2001 * of bfqq, and therefore, if one request was injected when
2002 * bfqq remains empty, this injected request might delay the
2003 * service of the next I/O request for bfqq significantly. In
2011 * injection on request service times, and then to update the
2026 * a) on the bright side, keeping at most one request in
2029 * request is likely to be lower than the think time of bfqq;
2031 * expire before getting its next request. With this request
2084 * receive new I/O only right after some I/O request of the other
2101 * non empty right after a request of Q has been completed within given
2204 static void bfq_add_request(struct request *rq)
2208 struct request *next_rq, *prev;
2297 * Check if this request is a better next-serve candidate.
2334 * arrival time of the last request; as of now, this piece
2359 static struct request *bfq_find_rq_fmerge(struct bfq_data *bfqd,
2372 static sector_t get_sdist(sector_t last_pos, struct request *rq)
2381 struct request *rq)
2428 * Remove queue from request-position tree as it is empty.
2449 struct request *free = NULL;
2485 static int bfq_request_merge(struct request_queue *q, struct request **req,
2489 struct request *__rq;
2503 static void bfq_request_merged(struct request_queue *q, struct request *req,
2510 struct request, rb_node))) {
2513 struct request *prev, *next_rq;
2520 /* Reposition request in its sort_list */
2524 /* Choose next request to be served for bfqq */
2531 * fit the new request and the queue's position in its
2556 * bfq_queues, while BFQ is in use for the request queue q. In fact,
2560 static void bfq_requests_merged(struct request_queue *q, struct request *rq,
2561 struct request *next)
2591 /* Merged request may be in the IO scheduler. Remove it. */
2666 static sector_t bfq_io_struct_pos(void *io_struct, bool request)
2668 if (request)
2674 static int bfq_rq_close_to_sector(void *io_struct, bool request,
2677 return abs(bfq_io_struct_pos(io_struct, request) - sector) <=
2693 * First, if we find a request starting at the end of the last
2694 * request, choose it.
2802 * requests close to the last request served and, by doing so,
2907 void *io_struct, bool request, struct bfq_io_cq *bic)
3013 bfq_rq_close_to_sector(io_struct, request,
3023 * queues. The only thing we need is that the bio/request is not
3027 bfq_io_struct_pos(io_struct, request));
3227 static bool bfq_allow_bio_merge(struct request_queue *q, struct request *rq,
3235 * Disallow merge of a sync bio into an async request.
3337 * request.
3433 struct request *rq)
3450 static void bfq_update_rate_reset(struct bfq_data *bfqd, struct request *rq)
3467 * If a new request completion has occurred after last
3566 * say exactly when a given dispatched request is served inside the
3571 * On the opposite end, the dispatch time of any request is trivially
3574 * function is to use what is known, namely request dispatch times
3575 * (plus, when useful, request completion times), to estimate what is
3576 * unknown, namely in-device request service rate.
3583 * (no request is silently dropped), the following obvious convergence
3589 * on every request dispatch.
3591 static void bfq_update_peak_rate(struct bfq_data *bfqd, struct request *rq)
3610 * request dispatch or completion
3651 * Remove request from internal lists.
3653 static void bfq_dispatch_remove(struct request_queue *q, struct request *rq)
3659 * executed after removing the request from the queue and
3681 * to enqueue more than one request at a time, and hence
3684 * actual request service order. In particular, the critical
3692 * makes its decisions only on a per-request basis. Therefore,
3704 * (reads or writes), request sizes, greediness
3738 * some request already dispatched but still waiting for
3782 * request of Q arrives soon (see the comments on
3801 * most one request at a time, which implies that each queue
3804 * request. It follows that the two queues are served
3868 * queue is served until a new request arrives for bfqq, (4) when bfqq
3924 * backlog and no outstanding request; used by
3964 struct request *next_rq;
3995 * the budget: if there is no request of the
4009 * issued its next request just because it is
4058 * new request in time to enjoy timestamp
4077 * for request completions, or blocking for
4103 * sure that it is large enough for the next request. Since
4109 * it will be updated on the arrival of a new request.
4131 * an I/O request is processed by the device (apart from the trivial
4132 * I/O pattern where a new request is dispatched only after the
4213 * The next function is invoked on the completion of the last request of a
4215 * that, if the next request of the application does not arrive before
4218 * The second requirement is that the request pattern of the application is
4219 * isochronous, i.e., that, after issuing a request or a batch of requests,
4246 * of a request is waited for when a sync queue becomes idle,
4252 * because the latter issue their next request as soon as possible
4276 * likely to be constantly kept so high that any I/O request
4379 * requests, then the request pattern is isochronous
4387 * the request pattern is actually isochronous.
4422 /* mark bfqq as waiting a request only if a bic still points to it */
4459 * just checked on request arrivals and completions, as well as on
4469 * device idled) for the arrival of a new request, then we may incur
4512 * the request pattern for bfqq is I/O-bound and sequential, or
4514 * not NCQ-capable and the request pattern for bfqq is
4536 * When the request pool is saturated (e.g., in the presence
4540 * higher probability to get a request from the pool
4557 * requests from the request pool, before the busy
4634 * request for the queue.
4647 * I/O request to inject, if it finds a compatible queue. See the
4681 * then temporarily raise inject limit to one request.
4697 * its next request. In fact:
4712 * Allow for only one large in-flight request
4721 * request of the in-service queue wait for so
4725 * there is only one in-flight large request
4804 struct request *next_rq;
4838 * request served.
4851 * enough to serve the next request, even if
4859 * not disable disk idling even when a new request
4864 * If we get here: 1) at least a new request
4866 * timer because the request was too small,
4885 * for a new request, or has requests waiting for a completion and
4909 * pick an I/O request from.
4979 * injects a pending I/O request of the waker queue
5101 * Dispatch next request from bfqq.
5103 static struct request *bfq_dispatch_rq_from_bfqq(struct bfq_data *bfqd,
5106 struct request *rq = bfqq->next_rq;
5159 static struct request *__bfq_dispatch_request(struct blk_mq_hw_ctx *hctx)
5162 struct request *rq = NULL;
5166 rq = list_first_entry(&bfqd->dispatch, struct request,
5188 * this request. So, to avoid unbalance, just start
5189 * this request, without incrementing tot_rq_in_driver. As
5191 * lower than it should be while this request is in
5197 * probably invoked also on this request. So, by
5217 * Force device to serve one request at a time if
5219 * currently the ONLY way to guarantee that the request
5222 * some unlucky request wait for as long as the device
5225 * Of course, serving one request at a time may cause loss of
5250 struct request *rq,
5276 * in_serv_queue contained some request when
5295 struct request *rq,
5300 static struct request *bfq_dispatch_request(struct blk_mq_hw_ctx *hctx)
5303 struct request *rq;
5623 /* set end request to minus infinity from now */
5654 /* first request is almost certainly seeky */
5747 * the total per-request processing time, the above throughput boost
5917 struct request *rq)
6075 * Called when a new fs request (rq) is added to bfqq. Check if there's
6079 struct request *rq)
6092 * There is just this request queued: if
6093 * - the request is small, and
6099 * for a new request from the in-service queue, we
6101 * device to serve just a small request. In contrast
6112 * A large enough request arrived, or idling is being
6122 * The queue is not empty, because a new request just
6151 static bool __bfq_insert_request(struct bfq_data *bfqd, struct request *rq)
6160 * Release the request's reference to the old bfqq
6168 * issuing this request still points to bfqq
6237 static struct bfq_queue *bfq_init_rq(struct request *rq);
6239 static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
6285 * may disappear afterwards (for example, because of a request
6300 struct request *rq;
6302 rq = list_first_entry(list, struct request, queuelist);
6364 * no outstanding request; used by the weight-raising
6384 * If the request took rather long to complete, and, according
6385 * to the maximum request size recorded, this completion latency
6386 * implies that the request was certainly served at a very low
6393 * request dispatch or completion
6419 * If we are waiting to discover whether the request pattern
6452 * implies that, even if no request arrives
6500 * To counter this loss of throughput, BFQ implements a "request
6512 * which the inject limit is computed. We define as first request for
6513 * bfqq, an I/O request for bfqq that arrives while bfqq is in
6523 * time of an I/O request, the time that elapses since when the
6524 * request is enqueued into bfqq, to when it is completed. This
6527 * actually injected while bfqq is empty, and that a new request R
6534 * before R, some extra request still present in its queues. As a
6540 * first request of bfqq, the algorithm measures the total time of the
6541 * request only if one of the three cases below holds, and, for each
6544 * (1) If there is no in-flight request. This gives a baseline for the
6609 * request in flight, because this function is in the code
6610 * path that handles the completion of a request of bfqq, and,
6626 * No I/O injected and no request still in service in
6637 /* update complete, not waiting for any request completion any longer */
6648 static void bfq_finish_requeue_request(struct request *rq)
6656 * requeued request that has not (yet) been re-inserted into
6685 * invoked again on this same request (see the check at the
6688 * or finish hooks of an elevator, for a request that is not
6692 * request-insertion logic if rq is re-inserted into a bfq
6703 static void bfq_finish_request(struct request *rq)
6805 * Only reset private fields. The actual request preparation will be
6810 static void bfq_prepare_request(struct request *rq)
6833 * rq, rq may still be transformed into a request with no icq, i.e., a
6834 * request not associated with any queue. No bfq hook is invoked to
6845 static struct bfq_queue *bfq_init_rq(struct request *rq)
6864 * events, a request cannot be manipulated any longer before
7009 * first request of the in-service queue arrives
7035 * different from the queue that was idling if a new request
7243 * access ranges from the request queue structure.
7355 /* We dispatch from request queue wide instead of hw queue */