Lines Matching defs:ops
398 if (e->type->ops.prepare_request)
399 e->type->ops.prepare_request(rq);
463 struct elevator_mq_ops *ops = &q->elevator->type->ops;
468 if (ops->limit_depth)
469 ops->limit_depth(data->cmd_flags, data);
690 q->elevator->type->ops.finish_request(rq);
2575 q->elevator->type->ops.insert_requests(hctx, &list, flags);
2795 this_hctx->queue->elevator->type->ops.insert_requests(this_hctx,
3297 if (tags->static_rqs && set->ops->exit_request) {
3305 set->ops->exit_request(set, rq, hctx_idx);
3403 if (set->ops->init_request) {
3404 ret = set->ops->init_request(set, rq, hctx_idx, node);
3464 * to additional allocations like via ops->init_request().
3658 if (set->ops->exit_request)
3659 set->ops->exit_request(set, flush_rq, hctx_idx);
3661 if (set->ops->exit_hctx)
3662 set->ops->exit_hctx(hctx, hctx_idx);
3699 if (set->ops->init_hctx &&
3700 set->ops->init_hctx(hctx, set->driver_data, hctx_idx))
3713 if (set->ops->exit_request)
3714 set->ops->exit_request(set, hctx->fq->flush_rq, hctx_idx);
3716 if (set->ops->exit_hctx)
3717 set->ops->exit_hctx(hctx, hctx_idx);
4280 q->mq_ops = set->ops;
4413 if (set->ops->map_queues && !is_kdump_kernel()) {
4433 set->ops->map_queues(set);
4493 if (!set->ops->queue_rq)
4496 if (!set->ops->get_budget ^ !set->ops->put_budget)
4582 const struct blk_mq_ops *ops, unsigned int queue_depth,
4586 set->ops = ops;
4655 if (q->elevator && q->elevator->type->ops.depth_updated)
4656 q->elevator->type->ops.depth_updated(hctx);