Lines Matching refs:rinfo

225 	struct blkfront_ring_info *rinfo;
270 static int blkfront_setup_indirect(struct blkfront_ring_info *rinfo);
275 for ((ptr) = (info)->rinfo, (idx) = 0; \
283 return (void *)info->rinfo + i * info->rinfo_size;
286 static int get_id_from_freelist(struct blkfront_ring_info *rinfo)
288 unsigned long free = rinfo->shadow_free;
290 BUG_ON(free >= BLK_RING_SIZE(rinfo->dev_info));
291 rinfo->shadow_free = rinfo->shadow[free].req.u.rw.id;
292 rinfo->shadow[free].req.u.rw.id = 0x0fffffee; /* debug */
296 static int add_id_to_freelist(struct blkfront_ring_info *rinfo,
299 if (rinfo->shadow[id].req.u.rw.id != id)
301 if (rinfo->shadow[id].request == NULL)
303 rinfo->shadow[id].req.u.rw.id = rinfo->shadow_free;
304 rinfo->shadow[id].request = NULL;
305 rinfo->shadow_free = id;
309 static int fill_grant_buffer(struct blkfront_ring_info *rinfo, int num)
311 struct blkfront_info *info = rinfo->dev_info;
331 list_add(&gnt_list_entry->node, &rinfo->grants);
339 &rinfo->grants, node) {
350 static struct grant *get_free_grant(struct blkfront_ring_info *rinfo)
354 BUG_ON(list_empty(&rinfo->grants));
355 gnt_list_entry = list_first_entry(&rinfo->grants, struct grant,
360 rinfo->persistent_gnts_c--;
376 struct blkfront_ring_info *rinfo)
378 struct grant *gnt_list_entry = get_free_grant(rinfo);
379 struct blkfront_info *info = rinfo->dev_info;
400 struct blkfront_ring_info *rinfo)
402 struct grant *gnt_list_entry = get_free_grant(rinfo);
403 struct blkfront_info *info = rinfo->dev_info;
415 BUG_ON(list_empty(&rinfo->indirect_pages));
416 indirect_page = list_first_entry(&rinfo->indirect_pages,
492 struct blkfront_ring_info *rinfo = (struct blkfront_ring_info *)arg;
493 schedule_work(&rinfo->work);
545 static unsigned long blkif_ring_get_request(struct blkfront_ring_info *rinfo,
551 *ring_req = RING_GET_REQUEST(&rinfo->ring, rinfo->ring.req_prod_pvt);
552 rinfo->ring.req_prod_pvt++;
554 id = get_id_from_freelist(rinfo);
555 rinfo->shadow[id].request = req;
556 rinfo->shadow[id].status = REQ_PROCESSING;
557 rinfo->shadow[id].associated_id = NO_ASSOCIATED_ID;
559 rinfo->shadow[id].req.u.rw.id = id;
564 static int blkif_queue_discard_req(struct request *req, struct blkfront_ring_info *rinfo)
566 struct blkfront_info *info = rinfo->dev_info;
571 id = blkif_ring_get_request(rinfo, req, &final_ring_req);
572 ring_req = &rinfo->shadow[id].req;
585 rinfo->shadow[id].status = REQ_WAITING;
593 struct blkfront_ring_info *rinfo;
616 struct blkfront_ring_info *rinfo = setup->rinfo;
623 struct blk_shadow *shadow = &rinfo->shadow[setup->id];
641 gnt_list_entry = get_indirect_grant(&setup->gref_head, rinfo);
647 gnt_list_entry = get_grant(&setup->gref_head, gfn, rinfo);
714 static int blkif_queue_rw_req(struct request *req, struct blkfront_ring_info *rinfo)
716 struct blkfront_info *info = rinfo->dev_info;
725 .rinfo = rinfo,
747 if (rinfo->persistent_gnts_c < max_grefs) {
751 max_grefs - rinfo->persistent_gnts_c,
754 &rinfo->callback,
756 rinfo,
757 max_grefs - rinfo->persistent_gnts_c);
763 id = blkif_ring_get_request(rinfo, req, &final_ring_req);
764 ring_req = &rinfo->shadow[id].req;
766 num_sg = blk_rq_map_sg(req->q, req, rinfo->shadow[id].sg);
769 for_each_sg(rinfo->shadow[id].sg, sg, num_sg, i)
776 rinfo->shadow[id].num_sg = num_sg;
815 extra_id = blkif_ring_get_request(rinfo, req,
817 extra_ring_req = &rinfo->shadow[extra_id].req;
823 rinfo->shadow[extra_id].num_sg = 0;
828 rinfo->shadow[extra_id].associated_id = id;
829 rinfo->shadow[id].associated_id = extra_id;
840 for_each_sg(rinfo->shadow[id].sg, sg, num_sg, i) {
862 rinfo->shadow[id].status = REQ_WAITING;
865 rinfo->shadow[extra_id].status = REQ_WAITING;
880 static int blkif_queue_request(struct request *req, struct blkfront_ring_info *rinfo)
882 if (unlikely(rinfo->dev_info->connected != BLKIF_STATE_CONNECTED))
887 return blkif_queue_discard_req(req, rinfo);
889 return blkif_queue_rw_req(req, rinfo);
892 static inline void flush_requests(struct blkfront_ring_info *rinfo)
896 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&rinfo->ring, notify);
899 notify_remote_via_irq(rinfo->irq);
918 struct blkfront_ring_info *rinfo = NULL;
920 rinfo = get_rinfo(info, qid);
922 spin_lock_irqsave(&rinfo->ring_lock, flags);
923 if (RING_FULL(&rinfo->ring))
926 if (blkif_request_flush_invalid(qd->rq, rinfo->dev_info))
929 if (blkif_queue_request(qd->rq, rinfo))
932 flush_requests(rinfo);
933 spin_unlock_irqrestore(&rinfo->ring_lock, flags);
937 spin_unlock_irqrestore(&rinfo->ring_lock, flags);
942 spin_unlock_irqrestore(&rinfo->ring_lock, flags);
1219 struct blkfront_ring_info *rinfo;
1227 for_each_rinfo(info, rinfo, i) {
1229 gnttab_cancel_free_callback(&rinfo->callback);
1232 flush_work(&rinfo->work);
1249 /* Already hold rinfo->ring_lock. */
1250 static inline void kick_pending_request_queues_locked(struct blkfront_ring_info *rinfo)
1252 if (!RING_FULL(&rinfo->ring))
1253 blk_mq_start_stopped_hw_queues(rinfo->dev_info->rq, true);
1256 static void kick_pending_request_queues(struct blkfront_ring_info *rinfo)
1260 spin_lock_irqsave(&rinfo->ring_lock, flags);
1261 kick_pending_request_queues_locked(rinfo);
1262 spin_unlock_irqrestore(&rinfo->ring_lock, flags);
1267 struct blkfront_ring_info *rinfo = container_of(work, struct blkfront_ring_info, work);
1269 if (rinfo->dev_info->connected == BLKIF_STATE_CONNECTED)
1270 kick_pending_request_queues(rinfo);
1273 static void blkif_free_ring(struct blkfront_ring_info *rinfo)
1276 struct blkfront_info *info = rinfo->dev_info;
1283 if (!list_empty(&rinfo->indirect_pages)) {
1287 list_for_each_entry_safe(indirect_page, n, &rinfo->indirect_pages, lru) {
1294 if (!list_empty(&rinfo->grants)) {
1296 &rinfo->grants, node) {
1301 rinfo->persistent_gnts_c--;
1308 BUG_ON(rinfo->persistent_gnts_c != 0);
1315 if (!rinfo->shadow[i].request)
1318 segs = rinfo->shadow[i].req.operation == BLKIF_OP_INDIRECT ?
1319 rinfo->shadow[i].req.u.indirect.nr_segments :
1320 rinfo->shadow[i].req.u.rw.nr_segments;
1322 persistent_gnt = rinfo->shadow[i].grants_used[j];
1329 if (rinfo->shadow[i].req.operation != BLKIF_OP_INDIRECT)
1337 persistent_gnt = rinfo->shadow[i].indirect_grants[j];
1344 kvfree(rinfo->shadow[i].grants_used);
1345 rinfo->shadow[i].grants_used = NULL;
1346 kvfree(rinfo->shadow[i].indirect_grants);
1347 rinfo->shadow[i].indirect_grants = NULL;
1348 kvfree(rinfo->shadow[i].sg);
1349 rinfo->shadow[i].sg = NULL;
1353 gnttab_cancel_free_callback(&rinfo->callback);
1356 flush_work(&rinfo->work);
1360 if (rinfo->ring_ref[i] != GRANT_INVALID_REF) {
1361 gnttab_end_foreign_access(rinfo->ring_ref[i], 0, 0);
1362 rinfo->ring_ref[i] = GRANT_INVALID_REF;
1365 free_pages_exact(rinfo->ring.sring,
1367 rinfo->ring.sring = NULL;
1369 if (rinfo->irq)
1370 unbind_from_irqhandler(rinfo->irq, rinfo);
1371 rinfo->evtchn = rinfo->irq = 0;
1377 struct blkfront_ring_info *rinfo;
1386 for_each_rinfo(info, rinfo, i)
1387 blkif_free_ring(rinfo);
1389 kvfree(info->rinfo);
1390 info->rinfo = NULL;
1457 struct blkfront_ring_info *rinfo,
1463 struct blkfront_info *info = rinfo->dev_info;
1464 struct blk_shadow *s = &rinfo->shadow[*id];
1474 struct blk_shadow *s2 = &rinfo->shadow[s->associated_id];
1506 if (add_id_to_freelist(rinfo, s->associated_id))
1544 list_add(&s->grants_used[i]->node, &rinfo->grants);
1545 rinfo->persistent_gnts_c++;
1553 list_add_tail(&s->grants_used[i]->node, &rinfo->grants);
1564 list_add(&s->indirect_grants[i]->node, &rinfo->grants);
1565 rinfo->persistent_gnts_c++;
1575 list_add(&indirect_page->lru, &rinfo->indirect_pages);
1578 list_add_tail(&s->indirect_grants[i]->node, &rinfo->grants);
1592 struct blkfront_ring_info *rinfo = (struct blkfront_ring_info *)dev_id;
1593 struct blkfront_info *info = rinfo->dev_info;
1601 spin_lock_irqsave(&rinfo->ring_lock, flags);
1603 rp = READ_ONCE(rinfo->ring.sring->rsp_prod);
1605 if (RING_RESPONSE_PROD_OVERFLOW(&rinfo->ring, rp)) {
1607 info->gd->disk_name, rp - rinfo->ring.rsp_cons);
1611 for (i = rinfo->ring.rsp_cons; i != rp; i++) {
1617 RING_COPY_RESPONSE(&rinfo->ring, i, &bret);
1630 if (rinfo->shadow[id].status != REQ_WAITING) {
1636 rinfo->shadow[id].status = REQ_PROCESSING;
1637 req = rinfo->shadow[id].request;
1639 op = rinfo->shadow[id].req.operation;
1641 op = rinfo->shadow[id].req.u.indirect.indirect_op;
1655 ret = blkif_completion(&id, rinfo, &bret);
1662 if (add_id_to_freelist(rinfo, id)) {
1695 rinfo->shadow[id].req.u.rw.nr_segments == 0)) {
1724 rinfo->ring.rsp_cons = i;
1726 if (i != rinfo->ring.req_prod_pvt) {
1728 RING_FINAL_CHECK_FOR_RESPONSES(&rinfo->ring, more_to_do);
1732 rinfo->ring.sring->rsp_event = i + 1;
1734 kick_pending_request_queues_locked(rinfo);
1736 spin_unlock_irqrestore(&rinfo->ring_lock, flags);
1745 spin_unlock_irqrestore(&rinfo->ring_lock, flags);
1755 struct blkfront_ring_info *rinfo)
1759 struct blkfront_info *info = rinfo->dev_info;
1764 rinfo->ring_ref[i] = GRANT_INVALID_REF;
1772 FRONT_RING_INIT(&rinfo->ring, sring, ring_size);
1774 err = xenbus_grant_ring(dev, rinfo->ring.sring, info->nr_ring_pages, gref);
1777 rinfo->ring.sring = NULL;
1781 rinfo->ring_ref[i] = gref[i];
1783 err = xenbus_alloc_evtchn(dev, &rinfo->evtchn);
1787 err = bind_evtchn_to_irqhandler_lateeoi(rinfo->evtchn, blkif_interrupt,
1788 0, "blkif", rinfo);
1794 rinfo->irq = err;
1807 struct blkfront_ring_info *rinfo, const char *dir)
1812 struct blkfront_info *info = rinfo->dev_info;
1815 err = xenbus_printf(xbt, dir, "ring-ref", "%u", rinfo->ring_ref[0]);
1826 "%u", rinfo->ring_ref[i]);
1834 err = xenbus_printf(xbt, dir, "event-channel", "%u", rinfo->evtchn);
1871 struct blkfront_ring_info *rinfo;
1889 for_each_rinfo(info, rinfo, i) {
1891 err = setup_blkring(dev, rinfo);
1914 err = write_per_ring_nodes(xbt, info->rinfo, dev->nodename);
1936 for_each_rinfo(info, rinfo, i) {
1939 err = write_per_ring_nodes(xbt, rinfo, path);
1968 for_each_rinfo(info, rinfo, i) {
1972 rinfo->shadow[j].req.u.rw.id = j + 1;
1973 rinfo->shadow[BLK_RING_SIZE(info)-1].req.u.rw.id = 0x0fffffff;
1999 struct blkfront_ring_info *rinfo;
2011 info->rinfo_size = struct_size(info->rinfo, shadow,
2013 info->rinfo = kvcalloc(info->nr_rings, info->rinfo_size, GFP_KERNEL);
2014 if (!info->rinfo) {
2020 for_each_rinfo(info, rinfo, i) {
2021 INIT_LIST_HEAD(&rinfo->indirect_pages);
2022 INIT_LIST_HEAD(&rinfo->grants);
2023 rinfo->dev_info = info;
2024 INIT_WORK(&rinfo->work, blkif_restart_queue);
2025 spin_lock_init(&rinfo->ring_lock);
2114 struct blkfront_ring_info *rinfo;
2122 for_each_rinfo(info, rinfo, r_index) {
2123 rc = blkfront_setup_indirect(rinfo);
2132 for_each_rinfo(info, rinfo, r_index) {
2134 kick_pending_request_queues(rinfo);
2165 struct blkfront_ring_info *rinfo;
2171 for_each_rinfo(info, rinfo, i) {
2173 struct blk_shadow *shadow = rinfo->shadow;
2270 static int blkfront_setup_indirect(struct blkfront_ring_info *rinfo)
2274 struct blkfront_info *info = rinfo->dev_info;
2294 err = fill_grant_buffer(rinfo,
2307 BUG_ON(!list_empty(&rinfo->indirect_pages));
2313 list_add(&indirect_page->lru, &rinfo->indirect_pages);
2318 rinfo->shadow[i].grants_used =
2320 sizeof(rinfo->shadow[i].grants_used[0]),
2322 rinfo->shadow[i].sg = kvcalloc(psegs,
2323 sizeof(rinfo->shadow[i].sg[0]),
2326 rinfo->shadow[i].indirect_grants =
2328 sizeof(rinfo->shadow[i].indirect_grants[0]),
2330 if ((rinfo->shadow[i].grants_used == NULL) ||
2331 (rinfo->shadow[i].sg == NULL) ||
2333 (rinfo->shadow[i].indirect_grants == NULL)))
2335 sg_init_table(rinfo->shadow[i].sg, psegs);
2344 kvfree(rinfo->shadow[i].grants_used);
2345 rinfo->shadow[i].grants_used = NULL;
2346 kvfree(rinfo->shadow[i].sg);
2347 rinfo->shadow[i].sg = NULL;
2348 kvfree(rinfo->shadow[i].indirect_grants);
2349 rinfo->shadow[i].indirect_grants = NULL;
2351 if (!list_empty(&rinfo->indirect_pages)) {
2353 list_for_each_entry_safe(indirect_page, n, &rinfo->indirect_pages, lru) {
2432 struct blkfront_ring_info *rinfo;
2487 for_each_rinfo(info, rinfo, i) {
2488 err = blkfront_setup_indirect(rinfo);
2509 for_each_rinfo(info, rinfo, i)
2510 kick_pending_request_queues(rinfo);
2745 struct blkfront_ring_info *rinfo;
2747 for_each_rinfo(info, rinfo, i) {
2750 spin_lock_irqsave(&rinfo->ring_lock, flags);
2752 if (rinfo->persistent_gnts_c == 0) {
2753 spin_unlock_irqrestore(&rinfo->ring_lock, flags);
2757 list_for_each_entry_safe(gnt_list_entry, tmp, &rinfo->grants,
2764 rinfo->persistent_gnts_c--;
2766 list_add_tail(&gnt_list_entry->node, &rinfo->grants);
2769 spin_unlock_irqrestore(&rinfo->ring_lock, flags);