Lines Matching refs:rinfo
227 struct blkfront_ring_info *rinfo;
270 static int blkfront_setup_indirect(struct blkfront_ring_info *rinfo);
275 for ((ptr) = (info)->rinfo, (idx) = 0; \
283 return (void *)info->rinfo + i * info->rinfo_size;
286 static int get_id_from_freelist(struct blkfront_ring_info *rinfo)
288 unsigned long free = rinfo->shadow_free;
290 BUG_ON(free >= BLK_RING_SIZE(rinfo->dev_info));
291 rinfo->shadow_free = rinfo->shadow[free].req.u.rw.id;
292 rinfo->shadow[free].req.u.rw.id = 0x0fffffee; /* debug */
296 static int add_id_to_freelist(struct blkfront_ring_info *rinfo,
299 if (rinfo->shadow[id].req.u.rw.id != id)
301 if (rinfo->shadow[id].request == NULL)
303 rinfo->shadow[id].req.u.rw.id = rinfo->shadow_free;
304 rinfo->shadow[id].request = NULL;
305 rinfo->shadow_free = id;
309 static int fill_grant_buffer(struct blkfront_ring_info *rinfo, int num)
311 struct blkfront_info *info = rinfo->dev_info;
331 list_add(&gnt_list_entry->node, &rinfo->grants);
339 &rinfo->grants, node) {
350 static struct grant *get_free_grant(struct blkfront_ring_info *rinfo)
354 BUG_ON(list_empty(&rinfo->grants));
355 gnt_list_entry = list_first_entry(&rinfo->grants, struct grant,
360 rinfo->persistent_gnts_c--;
376 struct blkfront_ring_info *rinfo)
378 struct grant *gnt_list_entry = get_free_grant(rinfo);
379 struct blkfront_info *info = rinfo->dev_info;
400 struct blkfront_ring_info *rinfo)
402 struct grant *gnt_list_entry = get_free_grant(rinfo);
403 struct blkfront_info *info = rinfo->dev_info;
415 BUG_ON(list_empty(&rinfo->indirect_pages));
416 indirect_page = list_first_entry(&rinfo->indirect_pages,
492 struct blkfront_ring_info *rinfo = (struct blkfront_ring_info *)arg;
493 schedule_work(&rinfo->work);
533 static unsigned long blkif_ring_get_request(struct blkfront_ring_info *rinfo,
539 *ring_req = RING_GET_REQUEST(&rinfo->ring, rinfo->ring.req_prod_pvt);
540 rinfo->ring.req_prod_pvt++;
542 id = get_id_from_freelist(rinfo);
543 rinfo->shadow[id].request = req;
544 rinfo->shadow[id].status = REQ_PROCESSING;
545 rinfo->shadow[id].associated_id = NO_ASSOCIATED_ID;
547 rinfo->shadow[id].req.u.rw.id = id;
552 static int blkif_queue_discard_req(struct request *req, struct blkfront_ring_info *rinfo)
554 struct blkfront_info *info = rinfo->dev_info;
559 id = blkif_ring_get_request(rinfo, req, &final_ring_req);
560 ring_req = &rinfo->shadow[id].req;
573 rinfo->shadow[id].status = REQ_WAITING;
581 struct blkfront_ring_info *rinfo;
604 struct blkfront_ring_info *rinfo = setup->rinfo;
611 struct blk_shadow *shadow = &rinfo->shadow[setup->id];
629 gnt_list_entry = get_indirect_grant(&setup->gref_head, rinfo);
635 gnt_list_entry = get_grant(&setup->gref_head, gfn, rinfo);
702 static int blkif_queue_rw_req(struct request *req, struct blkfront_ring_info *rinfo)
704 struct blkfront_info *info = rinfo->dev_info;
713 .rinfo = rinfo,
735 if (rinfo->persistent_gnts_c < max_grefs) {
739 max_grefs - rinfo->persistent_gnts_c,
742 &rinfo->callback,
744 rinfo,
745 max_grefs - rinfo->persistent_gnts_c);
751 id = blkif_ring_get_request(rinfo, req, &final_ring_req);
752 ring_req = &rinfo->shadow[id].req;
754 num_sg = blk_rq_map_sg(req->q, req, rinfo->shadow[id].sg);
757 for_each_sg(rinfo->shadow[id].sg, sg, num_sg, i)
764 rinfo->shadow[id].num_sg = num_sg;
803 extra_id = blkif_ring_get_request(rinfo, req,
805 extra_ring_req = &rinfo->shadow[extra_id].req;
811 rinfo->shadow[extra_id].num_sg = 0;
816 rinfo->shadow[extra_id].associated_id = id;
817 rinfo->shadow[id].associated_id = extra_id;
828 for_each_sg(rinfo->shadow[id].sg, sg, num_sg, i) {
850 rinfo->shadow[id].status = REQ_WAITING;
853 rinfo->shadow[extra_id].status = REQ_WAITING;
868 static int blkif_queue_request(struct request *req, struct blkfront_ring_info *rinfo)
870 if (unlikely(rinfo->dev_info->connected != BLKIF_STATE_CONNECTED))
875 return blkif_queue_discard_req(req, rinfo);
877 return blkif_queue_rw_req(req, rinfo);
880 static inline void flush_requests(struct blkfront_ring_info *rinfo)
884 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&rinfo->ring, notify);
887 notify_remote_via_irq(rinfo->irq);
906 struct blkfront_ring_info *rinfo = NULL;
908 rinfo = get_rinfo(info, qid);
910 spin_lock_irqsave(&rinfo->ring_lock, flags);
911 if (RING_FULL(&rinfo->ring))
914 if (blkif_request_flush_invalid(qd->rq, rinfo->dev_info))
917 if (blkif_queue_request(qd->rq, rinfo))
920 flush_requests(rinfo);
921 spin_unlock_irqrestore(&rinfo->ring_lock, flags);
925 spin_unlock_irqrestore(&rinfo->ring_lock, flags);
930 spin_unlock_irqrestore(&rinfo->ring_lock, flags);
1183 /* Already hold rinfo->ring_lock. */
1184 static inline void kick_pending_request_queues_locked(struct blkfront_ring_info *rinfo)
1186 if (!RING_FULL(&rinfo->ring))
1187 blk_mq_start_stopped_hw_queues(rinfo->dev_info->rq, true);
1190 static void kick_pending_request_queues(struct blkfront_ring_info *rinfo)
1194 spin_lock_irqsave(&rinfo->ring_lock, flags);
1195 kick_pending_request_queues_locked(rinfo);
1196 spin_unlock_irqrestore(&rinfo->ring_lock, flags);
1201 struct blkfront_ring_info *rinfo = container_of(work, struct blkfront_ring_info, work);
1203 if (rinfo->dev_info->connected == BLKIF_STATE_CONNECTED)
1204 kick_pending_request_queues(rinfo);
1207 static void blkif_free_ring(struct blkfront_ring_info *rinfo)
1210 struct blkfront_info *info = rinfo->dev_info;
1217 if (!list_empty(&rinfo->indirect_pages)) {
1221 list_for_each_entry_safe(indirect_page, n, &rinfo->indirect_pages, lru) {
1228 if (!list_empty(&rinfo->grants)) {
1230 &rinfo->grants, node) {
1235 rinfo->persistent_gnts_c--;
1242 BUG_ON(rinfo->persistent_gnts_c != 0);
1249 if (!rinfo->shadow[i].request)
1252 segs = rinfo->shadow[i].req.operation == BLKIF_OP_INDIRECT ?
1253 rinfo->shadow[i].req.u.indirect.nr_segments :
1254 rinfo->shadow[i].req.u.rw.nr_segments;
1256 persistent_gnt = rinfo->shadow[i].grants_used[j];
1263 if (rinfo->shadow[i].req.operation != BLKIF_OP_INDIRECT)
1271 persistent_gnt = rinfo->shadow[i].indirect_grants[j];
1278 kvfree(rinfo->shadow[i].grants_used);
1279 rinfo->shadow[i].grants_used = NULL;
1280 kvfree(rinfo->shadow[i].indirect_grants);
1281 rinfo->shadow[i].indirect_grants = NULL;
1282 kvfree(rinfo->shadow[i].sg);
1283 rinfo->shadow[i].sg = NULL;
1287 gnttab_cancel_free_callback(&rinfo->callback);
1290 flush_work(&rinfo->work);
1293 xenbus_teardown_ring((void **)&rinfo->ring.sring, info->nr_ring_pages,
1294 rinfo->ring_ref);
1296 if (rinfo->irq)
1297 unbind_from_irqhandler(rinfo->irq, rinfo);
1298 rinfo->evtchn = rinfo->irq = 0;
1304 struct blkfront_ring_info *rinfo;
1313 for_each_rinfo(info, rinfo, i)
1314 blkif_free_ring(rinfo);
1316 kvfree(info->rinfo);
1317 info->rinfo = NULL;
1384 struct blkfront_ring_info *rinfo,
1390 struct blkfront_info *info = rinfo->dev_info;
1391 struct blk_shadow *s = &rinfo->shadow[*id];
1401 struct blk_shadow *s2 = &rinfo->shadow[s->associated_id];
1433 if (add_id_to_freelist(rinfo, s->associated_id))
1471 list_add(&s->grants_used[i]->node, &rinfo->grants);
1472 rinfo->persistent_gnts_c++;
1480 list_add_tail(&s->grants_used[i]->node, &rinfo->grants);
1491 list_add(&s->indirect_grants[i]->node, &rinfo->grants);
1492 rinfo->persistent_gnts_c++;
1502 list_add(&indirect_page->lru, &rinfo->indirect_pages);
1505 list_add_tail(&s->indirect_grants[i]->node, &rinfo->grants);
1519 struct blkfront_ring_info *rinfo = (struct blkfront_ring_info *)dev_id;
1520 struct blkfront_info *info = rinfo->dev_info;
1528 spin_lock_irqsave(&rinfo->ring_lock, flags);
1530 rp = READ_ONCE(rinfo->ring.sring->rsp_prod);
1532 if (RING_RESPONSE_PROD_OVERFLOW(&rinfo->ring, rp)) {
1534 info->gd->disk_name, rp - rinfo->ring.rsp_cons);
1538 for (i = rinfo->ring.rsp_cons; i != rp; i++) {
1544 RING_COPY_RESPONSE(&rinfo->ring, i, &bret);
1557 if (rinfo->shadow[id].status != REQ_WAITING) {
1563 rinfo->shadow[id].status = REQ_PROCESSING;
1564 req = rinfo->shadow[id].request;
1566 op = rinfo->shadow[id].req.operation;
1568 op = rinfo->shadow[id].req.u.indirect.indirect_op;
1582 ret = blkif_completion(&id, rinfo, &bret);
1589 if (add_id_to_freelist(rinfo, id)) {
1622 rinfo->shadow[id].req.u.rw.nr_segments == 0)) {
1651 rinfo->ring.rsp_cons = i;
1653 if (i != rinfo->ring.req_prod_pvt) {
1655 RING_FINAL_CHECK_FOR_RESPONSES(&rinfo->ring, more_to_do);
1659 rinfo->ring.sring->rsp_event = i + 1;
1661 kick_pending_request_queues_locked(rinfo);
1663 spin_unlock_irqrestore(&rinfo->ring_lock, flags);
1672 spin_unlock_irqrestore(&rinfo->ring_lock, flags);
1682 struct blkfront_ring_info *rinfo)
1686 struct blkfront_info *info = rinfo->dev_info;
1690 info->nr_ring_pages, rinfo->ring_ref);
1694 XEN_FRONT_RING_INIT(&rinfo->ring, sring, ring_size);
1696 err = xenbus_alloc_evtchn(dev, &rinfo->evtchn);
1700 err = bind_evtchn_to_irqhandler_lateeoi(rinfo->evtchn, blkif_interrupt,
1701 0, "blkif", rinfo);
1707 rinfo->irq = err;
1720 struct blkfront_ring_info *rinfo, const char *dir)
1725 struct blkfront_info *info = rinfo->dev_info;
1728 err = xenbus_printf(xbt, dir, "ring-ref", "%u", rinfo->ring_ref[0]);
1739 "%u", rinfo->ring_ref[i]);
1747 err = xenbus_printf(xbt, dir, "event-channel", "%u", rinfo->evtchn);
1778 struct blkfront_ring_info *rinfo;
1796 for_each_rinfo(info, rinfo, i) {
1798 err = setup_blkring(dev, rinfo);
1821 err = write_per_ring_nodes(xbt, info->rinfo, dev->nodename);
1843 for_each_rinfo(info, rinfo, i) {
1846 err = write_per_ring_nodes(xbt, rinfo, path);
1875 for_each_rinfo(info, rinfo, i) {
1879 rinfo->shadow[j].req.u.rw.id = j + 1;
1880 rinfo->shadow[BLK_RING_SIZE(info)-1].req.u.rw.id = 0x0fffffff;
1899 struct blkfront_ring_info *rinfo;
1911 info->rinfo_size = struct_size(info->rinfo, shadow,
1913 info->rinfo = kvcalloc(info->nr_rings, info->rinfo_size, GFP_KERNEL);
1914 if (!info->rinfo) {
1920 for_each_rinfo(info, rinfo, i) {
1921 INIT_LIST_HEAD(&rinfo->indirect_pages);
1922 INIT_LIST_HEAD(&rinfo->grants);
1923 rinfo->dev_info = info;
1924 INIT_WORK(&rinfo->work, blkif_restart_queue);
1925 spin_lock_init(&rinfo->ring_lock);
2014 struct blkfront_ring_info *rinfo;
2022 for_each_rinfo(info, rinfo, r_index) {
2023 rc = blkfront_setup_indirect(rinfo);
2032 for_each_rinfo(info, rinfo, r_index) {
2034 kick_pending_request_queues(rinfo);
2065 struct blkfront_ring_info *rinfo;
2071 for_each_rinfo(info, rinfo, i) {
2073 struct blk_shadow *shadow = rinfo->shadow;
2123 struct blkfront_ring_info *rinfo;
2135 for_each_rinfo(info, rinfo, i) {
2137 gnttab_cancel_free_callback(&rinfo->callback);
2140 flush_work(&rinfo->work);
2159 static int blkfront_setup_indirect(struct blkfront_ring_info *rinfo)
2163 struct blkfront_info *info = rinfo->dev_info;
2183 err = fill_grant_buffer(rinfo,
2196 BUG_ON(!list_empty(&rinfo->indirect_pages));
2202 list_add(&indirect_page->lru, &rinfo->indirect_pages);
2207 rinfo->shadow[i].grants_used =
2209 sizeof(rinfo->shadow[i].grants_used[0]),
2211 rinfo->shadow[i].sg = kvcalloc(psegs,
2212 sizeof(rinfo->shadow[i].sg[0]),
2215 rinfo->shadow[i].indirect_grants =
2217 sizeof(rinfo->shadow[i].indirect_grants[0]),
2219 if ((rinfo->shadow[i].grants_used == NULL) ||
2220 (rinfo->shadow[i].sg == NULL) ||
2222 (rinfo->shadow[i].indirect_grants == NULL)))
2224 sg_init_table(rinfo->shadow[i].sg, psegs);
2233 kvfree(rinfo->shadow[i].grants_used);
2234 rinfo->shadow[i].grants_used = NULL;
2235 kvfree(rinfo->shadow[i].sg);
2236 rinfo->shadow[i].sg = NULL;
2237 kvfree(rinfo->shadow[i].indirect_grants);
2238 rinfo->shadow[i].indirect_grants = NULL;
2240 if (!list_empty(&rinfo->indirect_pages)) {
2242 list_for_each_entry_safe(indirect_page, n, &rinfo->indirect_pages, lru) {
2320 struct blkfront_ring_info *rinfo;
2375 for_each_rinfo(info, rinfo, i) {
2376 err = blkfront_setup_indirect(rinfo);
2397 for_each_rinfo(info, rinfo, i)
2398 kick_pending_request_queues(rinfo);
2528 struct blkfront_ring_info *rinfo;
2530 for_each_rinfo(info, rinfo, i) {
2534 spin_lock_irqsave(&rinfo->ring_lock, flags);
2536 if (rinfo->persistent_gnts_c == 0) {
2537 spin_unlock_irqrestore(&rinfo->ring_lock, flags);
2541 list_for_each_entry_safe(gnt_list_entry, tmp, &rinfo->grants,
2548 rinfo->persistent_gnts_c--;
2553 list_splice_tail(&grants, &rinfo->grants);
2555 spin_unlock_irqrestore(&rinfo->ring_lock, flags);