Lines Matching refs:ioend
1471 * We're now finished for good with this ioend structure. Update the page
1473 * ioend after this.
1476 iomap_finish_ioend(struct iomap_ioend *ioend, int error)
1478 struct inode *inode = ioend->io_inode;
1479 struct bio *bio = &ioend->io_inline_bio;
1480 struct bio *last = ioend->io_bio, *next;
1482 loff_t offset = ioend->io_offset;
1486 for (bio = &ioend->io_inline_bio; bio; bio = next) {
1490 * For the last bio, bi_private points to the ioend, so we
1506 /* The ioend has been freed by bio_put() */
1525 iomap_finish_ioends(struct iomap_ioend *ioend, int error)
1532 list_replace_init(&ioend->io_list, &tmp);
1533 completions = iomap_finish_ioend(ioend, error);
1540 ioend = list_first_entry(&tmp, struct iomap_ioend, io_list);
1541 list_del_init(&ioend->io_list);
1542 completions += iomap_finish_ioend(ioend, error);
1551 iomap_ioend_can_merge(struct iomap_ioend *ioend, struct iomap_ioend *next)
1553 if (ioend->io_bio->bi_status != next->io_bio->bi_status)
1555 if ((ioend->io_flags & IOMAP_F_SHARED) ^
1558 if ((ioend->io_type == IOMAP_UNWRITTEN) ^
1561 if (ioend->io_offset + ioend->io_size != next->io_offset)
1573 if (ioend->io_sector + (ioend->io_size >> 9) != next->io_sector)
1579 iomap_ioend_try_merge(struct iomap_ioend *ioend, struct list_head *more_ioends)
1583 INIT_LIST_HEAD(&ioend->io_list);
1587 if (!iomap_ioend_can_merge(ioend, next))
1589 list_move_tail(&next->io_list, &ioend->io_list);
1590 ioend->io_size += next->io_size;
1618 struct iomap_ioend *ioend = bio->bi_private;
1620 iomap_finish_ioend(ioend, blk_status_to_errno(bio->bi_status));
1624 * Submit the final bio for an ioend.
1632 iomap_submit_ioend(struct iomap_writepage_ctx *wpc, struct iomap_ioend *ioend,
1635 ioend->io_bio->bi_private = ioend;
1636 ioend->io_bio->bi_end_io = iomap_writepage_end_bio;
1639 error = wpc->ops->prepare_ioend(ioend, error);
1642 * If we're failing the IO now, just mark the ioend with an
1644 * as there is only one reference to the ioend at this point in
1647 ioend->io_bio->bi_status = errno_to_blk_status(error);
1648 bio_endio(ioend->io_bio);
1652 submit_bio(ioend->io_bio);
1660 struct iomap_ioend *ioend;
1669 ioend = container_of(bio, struct iomap_ioend, io_inline_bio);
1670 INIT_LIST_HEAD(&ioend->io_list);
1671 ioend->io_type = wpc->iomap.type;
1672 ioend->io_flags = wpc->iomap.flags;
1673 ioend->io_inode = inode;
1674 ioend->io_size = 0;
1675 ioend->io_folios = 0;
1676 ioend->io_offset = offset;
1677 ioend->io_bio = bio;
1678 ioend->io_sector = sector;
1679 return ioend;
1709 (wpc->ioend->io_flags & IOMAP_F_SHARED))
1711 if (wpc->iomap.type != wpc->ioend->io_type)
1713 if (offset != wpc->ioend->io_offset + wpc->ioend->io_size)
1715 if (sector != bio_end_sector(wpc->ioend->io_bio))
1718 * Limit ioend bio chain lengths to minimise IO completion latency. This
1720 * folios in the ioend.
1722 if (wpc->ioend->io_folios >= IOEND_BATCH_SIZE)
1728 * Test to see if we have an existing ioend structure that we could append to
1729 * first; otherwise finish off the current ioend and start another.
1740 if (!wpc->ioend || !iomap_can_add_to_ioend(wpc, pos, sector)) {
1741 if (wpc->ioend)
1742 list_add(&wpc->ioend->io_list, iolist);
1743 wpc->ioend = iomap_alloc_ioend(inode, wpc, pos, sector, wbc);
1746 if (!bio_add_folio(wpc->ioend->io_bio, folio, len, poff)) {
1747 wpc->ioend->io_bio = iomap_chain_bio(wpc->ioend->io_bio);
1748 bio_add_folio_nofail(wpc->ioend->io_bio, folio, len, poff);
1753 wpc->ioend->io_size += len;
1758 * We implement an immediate ioend submission policy here to avoid needing to
1760 * the forward progress guarantees we need to provide. The current ioend we're
1762 * doesn't append to the cached ioend, it will create a new ioend and cache that
1765 * If a new ioend is created and cached, the old ioend is returned and queued
1770 * At the end of a writeback pass, there will be a cached ioend remaining on the
1779 struct iomap_ioend *ioend, *next;
1817 wpc->ioend->io_folios++;
1819 WARN_ON_ONCE(!wpc->ioend && !list_empty(&submit_list));
1825 * We cannot cancel the ioend directly here on error. We may have
1847 * If the page hasn't been added to the ioend, it won't be affected by
1860 * submission errors here and propagate into subsequent ioend
1863 list_for_each_entry_safe(ioend, next, &submit_list, io_list) {
1866 list_del_init(&ioend->io_list);
1867 error2 = iomap_submit_ioend(wpc, ioend, error);
1994 if (!wpc->ioend)
1996 return iomap_submit_ioend(wpc, wpc->ioend, ret);