Lines Matching refs:ring
73 * to fill the ring, but since this might become too high, specially with
109 * Maximum order of pages to be used for the shared ring between front and
114 MODULE_PARM_DESC(max_ring_page_order, "Maximum order of pages to be used for the shared ring");
143 static int do_block_io_op(struct xen_blkif_ring *ring, unsigned int *eoi_flags);
144 static int dispatch_rw_block_io(struct xen_blkif_ring *ring,
147 static void make_response(struct xen_blkif_ring *ring, u64 id,
168 static int add_persistent_gnt(struct xen_blkif_ring *ring,
173 struct xen_blkif *blkif = ring->blkif;
175 if (ring->persistent_gnt_c >= max_pgrants) {
181 new = &ring->persistent_gnts.rb_node;
199 rb_insert_color(&(persistent_gnt->node), &ring->persistent_gnts);
200 ring->persistent_gnt_c++;
201 atomic_inc(&ring->persistent_gnt_in_use);
205 static struct persistent_gnt *get_persistent_gnt(struct xen_blkif_ring *ring,
211 node = ring->persistent_gnts.rb_node;
225 atomic_inc(&ring->persistent_gnt_in_use);
232 static void put_persistent_gnt(struct xen_blkif_ring *ring,
239 atomic_dec(&ring->persistent_gnt_in_use);
242 static void free_persistent_gnts(struct xen_blkif_ring *ring, struct rb_root *root,
273 gnttab_page_cache_put(&ring->free_pages, pages,
291 struct xen_blkif_ring *ring = container_of(work, typeof(*ring), persistent_purge_work);
298 while(!list_empty(&ring->persistent_purge_list)) {
299 persistent_gnt = list_first_entry(&ring->persistent_purge_list,
314 gnttab_page_cache_put(&ring->free_pages, pages,
323 gnttab_page_cache_put(&ring->free_pages, pages, segs_to_unmap);
327 static void purge_persistent_gnt(struct xen_blkif_ring *ring)
335 if (work_busy(&ring->persistent_purge_work)) {
340 if (ring->persistent_gnt_c < max_pgrants ||
341 (ring->persistent_gnt_c == max_pgrants &&
342 !ring->blkif->vbd.overflow_max_grants)) {
346 num_clean = ring->persistent_gnt_c - max_pgrants + num_clean;
347 num_clean = min(ring->persistent_gnt_c, num_clean);
363 BUG_ON(!list_empty(&ring->persistent_purge_list));
364 root = &ring->persistent_gnts;
379 &ring->persistent_purge_list);
394 ring->persistent_gnt_c -= total;
395 ring->blkif->vbd.overflow_max_grants = 0;
398 schedule_work(&ring->persistent_purge_work);
409 static struct pending_req *alloc_req(struct xen_blkif_ring *ring)
414 spin_lock_irqsave(&ring->pending_free_lock, flags);
415 if (!list_empty(&ring->pending_free)) {
416 req = list_entry(ring->pending_free.next, struct pending_req,
420 spin_unlock_irqrestore(&ring->pending_free_lock, flags);
428 static void free_req(struct xen_blkif_ring *ring, struct pending_req *req)
433 spin_lock_irqsave(&ring->pending_free_lock, flags);
434 was_empty = list_empty(&ring->pending_free);
435 list_add(&req->free_list, &ring->pending_free);
436 spin_unlock_irqrestore(&ring->pending_free_lock, flags);
438 wake_up(&ring->pending_free_wq);
518 static void blkif_notify_work(struct xen_blkif_ring *ring)
520 ring->waiting_reqs = 1;
521 wake_up(&ring->wq);
534 static void print_stats(struct xen_blkif_ring *ring)
538 current->comm, ring->st_oo_req,
539 ring->st_rd_req, ring->st_wr_req,
540 ring->st_f_req, ring->st_ds_req,
541 ring->persistent_gnt_c, max_pgrants);
542 ring->st_print = jiffies + msecs_to_jiffies(10 * 1000);
543 ring->st_rd_req = 0;
544 ring->st_wr_req = 0;
545 ring->st_oo_req = 0;
546 ring->st_ds_req = 0;
551 struct xen_blkif_ring *ring = arg;
552 struct xen_blkif *blkif = ring->blkif;
569 ring->wq,
570 ring->waiting_reqs || kthread_should_stop(),
575 ring->pending_free_wq,
576 !list_empty(&ring->pending_free) ||
582 do_eoi = ring->waiting_reqs;
584 ring->waiting_reqs = 0;
587 ret = do_block_io_op(ring, &eoi_flags);
589 ring->waiting_reqs = 1;
591 wait_event_interruptible(ring->shutdown_wq,
594 if (do_eoi && !ring->waiting_reqs) {
595 xen_irq_lateeoi(ring->irq, eoi_flags);
601 time_after(jiffies, ring->next_lru)) {
602 purge_persistent_gnt(ring);
603 ring->next_lru = jiffies + msecs_to_jiffies(LRU_INTERVAL);
608 gnttab_page_cache_shrink(&ring->free_pages, 0);
610 gnttab_page_cache_shrink(&ring->free_pages,
613 if (log_stats && time_after(jiffies, ring->st_print))
614 print_stats(ring);
618 flush_work(&ring->persistent_purge_work);
621 print_stats(ring);
623 ring->xenblkd = NULL;
631 void xen_blkbk_free_caches(struct xen_blkif_ring *ring)
634 if (!RB_EMPTY_ROOT(&ring->persistent_gnts))
635 free_persistent_gnts(ring, &ring->persistent_gnts,
636 ring->persistent_gnt_c);
638 BUG_ON(!RB_EMPTY_ROOT(&ring->persistent_gnts));
639 ring->persistent_gnt_c = 0;
642 gnttab_page_cache_shrink(&ring->free_pages, 0 /* All */);
646 struct xen_blkif_ring *ring,
656 put_persistent_gnt(ring, pages[i]->persistent_gnt);
674 struct xen_blkif_ring *ring = pending_req->ring;
675 struct xen_blkif *blkif = ring->blkif;
681 gnttab_page_cache_put(&ring->free_pages, data->pages, data->count);
682 make_response(ring, pending_req->id,
684 free_req(ring, pending_req);
697 if (atomic_dec_and_test(&ring->inflight) && atomic_read(&blkif->drain)) {
706 struct xen_blkif_ring *ring = req->ring;
710 invcount = xen_blkbk_unmap_prepare(ring, pages, req->nr_segs,
731 static void xen_blkbk_unmap(struct xen_blkif_ring *ring,
743 invcount = xen_blkbk_unmap_prepare(ring, pages, batch,
748 gnttab_page_cache_put(&ring->free_pages, unmap_pages,
756 static int xen_blkbk_map(struct xen_blkif_ring *ring,
769 struct xen_blkif *blkif = ring->blkif;
784 ring,
796 if (gnttab_page_cache_get(&ring->free_pages,
798 gnttab_page_cache_put(&ring->free_pages,
833 gnttab_page_cache_put(&ring->free_pages,
844 ring->persistent_gnt_c < max_pgrants) {
862 if (add_persistent_gnt(ring,
870 persistent_gnt->gnt, ring->persistent_gnt_c,
906 rc = xen_blkbk_map(pending_req->ring, pending_req->segments,
919 struct xen_blkif_ring *ring = pending_req->ring;
930 rc = xen_blkbk_map(ring, pages, indirect_grefs, true);
962 xen_blkbk_unmap(ring, pages, indirect_grefs);
966 static int dispatch_discard_io(struct xen_blkif_ring *ring,
971 struct xen_blkif *blkif = ring->blkif;
988 ring->st_ds_req++;
1004 make_response(ring, req->u.discard.id, req->operation, status);
1009 static int dispatch_other_io(struct xen_blkif_ring *ring,
1013 free_req(ring, pending_req);
1014 make_response(ring, req->u.other.id, req->operation,
1019 static void xen_blk_drain_io(struct xen_blkif_ring *ring)
1021 struct xen_blkif *blkif = ring->blkif;
1025 if (atomic_read(&ring->inflight) == 0)
1043 xen_blkbk_flush_diskcache(XBT_NIL, pending_req->ring->blkif->be, 0);
1048 xen_blkbk_barrier(XBT_NIL, pending_req->ring->blkif->be, 0);
1059 * the proper response on the ring.
1077 * Function to copy the from the ring buffer the 'struct blkif_request'
1082 __do_block_io_op(struct xen_blkif_ring *ring, unsigned int *eoi_flags)
1084 union blkif_back_rings *blk_rings = &ring->blk_rings;
1096 pr_warn("Frontend provided bogus ring requests (%d - %d = %d). Halting ring processing on dev=%04x\n",
1097 rp, rc, rp - rc, ring->blkif->vbd.pdevice);
1113 pending_req = alloc_req(ring);
1115 ring->st_oo_req++;
1120 switch (ring->blkif->blk_protocol) {
1144 if (dispatch_rw_block_io(ring, &req, pending_req))
1148 free_req(ring, pending_req);
1149 if (dispatch_discard_io(ring, &req))
1153 if (dispatch_other_io(ring, &req, pending_req))
1166 do_block_io_op(struct xen_blkif_ring *ring, unsigned int *eoi_flags)
1168 union blkif_back_rings *blk_rings = &ring->blk_rings;
1172 more_to_do = __do_block_io_op(ring, eoi_flags);
1185 static int dispatch_rw_block_io(struct xen_blkif_ring *ring,
1214 ring->st_rd_req++;
1218 ring->st_wr_req++;
1226 ring->st_f_req++;
1252 pending_req->ring = ring;
1279 if (xen_vbd_translate(&preq, ring->blkif, operation) != 0) {
1284 ring->blkif->vbd.pdevice);
1296 ring->blkif->domid);
1305 xen_blk_drain_io(pending_req->ring);
1320 xen_blkif_get(ring->blkif);
1321 atomic_inc(&ring->inflight);
1371 ring->st_rd_sect += preq.nr_sects;
1373 ring->st_wr_sect += preq.nr_sects;
1378 xen_blkbk_unmap(ring, pending_req->segments,
1382 make_response(ring, req->u.rw.id, req_operation, BLKIF_RSP_ERROR);
1383 free_req(ring, pending_req);
1399 * Put a response on the ring on how the operation fared.
1401 static void make_response(struct xen_blkif_ring *ring, u64 id,
1409 spin_lock_irqsave(&ring->blk_ring_lock, flags);
1410 blk_rings = &ring->blk_rings;
1411 /* Place on the response ring for the relevant domain. */
1412 switch (ring->blkif->blk_protocol) {
1435 spin_unlock_irqrestore(&ring->blk_ring_lock, flags);
1437 notify_remote_via_irq(ring->irq);