Lines Matching refs:ring
73 * to fill the ring, but since this might become too high, specially with
109 * Maximum order of pages to be used for the shared ring between front and
114 MODULE_PARM_DESC(max_ring_page_order, "Maximum order of pages to be used for the shared ring");
143 static int do_block_io_op(struct xen_blkif_ring *ring, unsigned int *eoi_flags);
144 static int dispatch_rw_block_io(struct xen_blkif_ring *ring,
147 static void make_response(struct xen_blkif_ring *ring, u64 id,
168 static int add_persistent_gnt(struct xen_blkif_ring *ring,
173 struct xen_blkif *blkif = ring->blkif;
175 if (ring->persistent_gnt_c >= max_pgrants) {
181 new = &ring->persistent_gnts.rb_node;
199 rb_insert_color(&(persistent_gnt->node), &ring->persistent_gnts);
200 ring->persistent_gnt_c++;
201 atomic_inc(&ring->persistent_gnt_in_use);
205 static struct persistent_gnt *get_persistent_gnt(struct xen_blkif_ring *ring,
211 node = ring->persistent_gnts.rb_node;
225 atomic_inc(&ring->persistent_gnt_in_use);
232 static void put_persistent_gnt(struct xen_blkif_ring *ring,
239 atomic_dec(&ring->persistent_gnt_in_use);
242 static void free_persistent_gnts(struct xen_blkif_ring *ring)
244 struct rb_root *root = &ring->persistent_gnts;
276 gnttab_page_cache_put(&ring->free_pages, pages,
283 ring->persistent_gnt_c--;
286 BUG_ON(!RB_EMPTY_ROOT(&ring->persistent_gnts));
287 BUG_ON(ring->persistent_gnt_c != 0);
296 struct xen_blkif_ring *ring = container_of(work, typeof(*ring), persistent_purge_work);
303 while(!list_empty(&ring->persistent_purge_list)) {
304 persistent_gnt = list_first_entry(&ring->persistent_purge_list,
319 gnttab_page_cache_put(&ring->free_pages, pages,
328 gnttab_page_cache_put(&ring->free_pages, pages, segs_to_unmap);
332 static void purge_persistent_gnt(struct xen_blkif_ring *ring)
340 if (work_busy(&ring->persistent_purge_work)) {
345 if (ring->persistent_gnt_c < max_pgrants ||
346 (ring->persistent_gnt_c == max_pgrants &&
347 !ring->blkif->vbd.overflow_max_grants)) {
351 num_clean = ring->persistent_gnt_c - max_pgrants + num_clean;
352 num_clean = min(ring->persistent_gnt_c, num_clean);
368 BUG_ON(!list_empty(&ring->persistent_purge_list));
369 root = &ring->persistent_gnts;
384 &ring->persistent_purge_list);
399 ring->persistent_gnt_c -= total;
400 ring->blkif->vbd.overflow_max_grants = 0;
403 schedule_work(&ring->persistent_purge_work);
414 static struct pending_req *alloc_req(struct xen_blkif_ring *ring)
419 spin_lock_irqsave(&ring->pending_free_lock, flags);
420 if (!list_empty(&ring->pending_free)) {
421 req = list_entry(ring->pending_free.next, struct pending_req,
425 spin_unlock_irqrestore(&ring->pending_free_lock, flags);
433 static void free_req(struct xen_blkif_ring *ring, struct pending_req *req)
438 spin_lock_irqsave(&ring->pending_free_lock, flags);
439 was_empty = list_empty(&ring->pending_free);
440 list_add(&req->free_list, &ring->pending_free);
441 spin_unlock_irqrestore(&ring->pending_free_lock, flags);
443 wake_up(&ring->pending_free_wq);
523 static void blkif_notify_work(struct xen_blkif_ring *ring)
525 ring->waiting_reqs = 1;
526 wake_up(&ring->wq);
539 static void print_stats(struct xen_blkif_ring *ring)
543 current->comm, ring->st_oo_req,
544 ring->st_rd_req, ring->st_wr_req,
545 ring->st_f_req, ring->st_ds_req,
546 ring->persistent_gnt_c, max_pgrants);
547 ring->st_print = jiffies + msecs_to_jiffies(10 * 1000);
548 ring->st_rd_req = 0;
549 ring->st_wr_req = 0;
550 ring->st_oo_req = 0;
551 ring->st_ds_req = 0;
556 struct xen_blkif_ring *ring = arg;
557 struct xen_blkif *blkif = ring->blkif;
574 ring->wq,
575 ring->waiting_reqs || kthread_should_stop(),
580 ring->pending_free_wq,
581 !list_empty(&ring->pending_free) ||
587 do_eoi = ring->waiting_reqs;
589 ring->waiting_reqs = 0;
592 ret = do_block_io_op(ring, &eoi_flags);
594 ring->waiting_reqs = 1;
596 wait_event_interruptible(ring->shutdown_wq,
599 if (do_eoi && !ring->waiting_reqs) {
600 xen_irq_lateeoi(ring->irq, eoi_flags);
606 time_after(jiffies, ring->next_lru)) {
607 purge_persistent_gnt(ring);
608 ring->next_lru = jiffies + msecs_to_jiffies(LRU_INTERVAL);
613 gnttab_page_cache_shrink(&ring->free_pages, 0);
615 gnttab_page_cache_shrink(&ring->free_pages,
618 if (log_stats && time_after(jiffies, ring->st_print))
619 print_stats(ring);
623 flush_work(&ring->persistent_purge_work);
626 print_stats(ring);
628 ring->xenblkd = NULL;
636 void xen_blkbk_free_caches(struct xen_blkif_ring *ring)
639 free_persistent_gnts(ring);
642 gnttab_page_cache_shrink(&ring->free_pages, 0 /* All */);
646 struct xen_blkif_ring *ring,
656 put_persistent_gnt(ring, pages[i]->persistent_gnt);
674 struct xen_blkif_ring *ring = pending_req->ring;
675 struct xen_blkif *blkif = ring->blkif;
681 gnttab_page_cache_put(&ring->free_pages, data->pages, data->count);
682 make_response(ring, pending_req->id,
684 free_req(ring, pending_req);
697 if (atomic_dec_and_test(&ring->inflight) && atomic_read(&blkif->drain)) {
706 struct xen_blkif_ring *ring = req->ring;
710 invcount = xen_blkbk_unmap_prepare(ring, pages, req->nr_segs,
731 static void xen_blkbk_unmap(struct xen_blkif_ring *ring,
743 invcount = xen_blkbk_unmap_prepare(ring, pages, batch,
748 gnttab_page_cache_put(&ring->free_pages, unmap_pages,
756 static int xen_blkbk_map(struct xen_blkif_ring *ring,
769 struct xen_blkif *blkif = ring->blkif;
784 ring,
796 if (gnttab_page_cache_get(&ring->free_pages,
798 gnttab_page_cache_put(&ring->free_pages,
833 gnttab_page_cache_put(&ring->free_pages,
844 ring->persistent_gnt_c < max_pgrants) {
862 if (add_persistent_gnt(ring,
870 persistent_gnt->gnt, ring->persistent_gnt_c,
906 rc = xen_blkbk_map(pending_req->ring, pending_req->segments,
919 struct xen_blkif_ring *ring = pending_req->ring;
930 rc = xen_blkbk_map(ring, pages, indirect_grefs, true);
962 xen_blkbk_unmap(ring, pages, indirect_grefs);
966 static int dispatch_discard_io(struct xen_blkif_ring *ring,
971 struct xen_blkif *blkif = ring->blkif;
987 ring->st_ds_req++;
1005 make_response(ring, req->u.discard.id, req->operation, status);
1010 static int dispatch_other_io(struct xen_blkif_ring *ring,
1014 free_req(ring, pending_req);
1015 make_response(ring, req->u.other.id, req->operation,
1020 static void xen_blk_drain_io(struct xen_blkif_ring *ring)
1022 struct xen_blkif *blkif = ring->blkif;
1026 if (atomic_read(&ring->inflight) == 0)
1044 xen_blkbk_flush_diskcache(XBT_NIL, pending_req->ring->blkif->be, 0);
1049 xen_blkbk_barrier(XBT_NIL, pending_req->ring->blkif->be, 0);
1060 * the proper response on the ring.
1182 * Function to copy the from the ring buffer the 'struct blkif_request'
1187 __do_block_io_op(struct xen_blkif_ring *ring, unsigned int *eoi_flags)
1189 union blkif_back_rings *blk_rings = &ring->blk_rings;
1201 pr_warn("Frontend provided bogus ring requests (%d - %d = %d). Halting ring processing on dev=%04x\n",
1202 rp, rc, rp - rc, ring->blkif->vbd.pdevice);
1218 pending_req = alloc_req(ring);
1220 ring->st_oo_req++;
1225 switch (ring->blkif->blk_protocol) {
1249 if (dispatch_rw_block_io(ring, &req, pending_req))
1253 free_req(ring, pending_req);
1254 if (dispatch_discard_io(ring, &req))
1258 if (dispatch_other_io(ring, &req, pending_req))
1271 do_block_io_op(struct xen_blkif_ring *ring, unsigned int *eoi_flags)
1273 union blkif_back_rings *blk_rings = &ring->blk_rings;
1277 more_to_do = __do_block_io_op(ring, eoi_flags);
1290 static int dispatch_rw_block_io(struct xen_blkif_ring *ring,
1319 ring->st_rd_req++;
1323 ring->st_wr_req++;
1331 ring->st_f_req++;
1357 pending_req->ring = ring;
1384 if (xen_vbd_translate(&preq, ring->blkif, operation) != 0) {
1389 ring->blkif->vbd.pdevice);
1401 ring->blkif->domid);
1410 xen_blk_drain_io(pending_req->ring);
1425 xen_blkif_get(ring->blkif);
1426 atomic_inc(&ring->inflight);
1467 ring->st_rd_sect += preq.nr_sects;
1469 ring->st_wr_sect += preq.nr_sects;
1474 xen_blkbk_unmap(ring, pending_req->segments,
1478 make_response(ring, req->u.rw.id, req_operation, BLKIF_RSP_ERROR);
1479 free_req(ring, pending_req);
1487 * Put a response on the ring on how the operation fared.
1489 static void make_response(struct xen_blkif_ring *ring, u64 id,
1497 spin_lock_irqsave(&ring->blk_ring_lock, flags);
1498 blk_rings = &ring->blk_rings;
1499 /* Place on the response ring for the relevant domain. */
1500 switch (ring->blkif->blk_protocol) {
1523 spin_unlock_irqrestore(&ring->blk_ring_lock, flags);
1525 notify_remote_via_irq(ring->irq);