Lines Matching refs:ifs

47 		struct iomap_folio_state *ifs)
51 return bitmap_full(ifs->state, i_blocks_per_folio(inode, folio));
54 static inline bool ifs_block_is_uptodate(struct iomap_folio_state *ifs,
57 return test_bit(block, ifs->state);
61 struct iomap_folio_state *ifs, size_t off, size_t len)
69 spin_lock_irqsave(&ifs->state_lock, flags);
70 bitmap_set(ifs->state, first_blk, nr_blks);
71 if (ifs_is_fully_uptodate(folio, ifs))
73 spin_unlock_irqrestore(&ifs->state_lock, flags);
79 struct iomap_folio_state *ifs = folio->private;
81 if (ifs)
82 ifs_set_range_uptodate(folio, ifs, off, len);
88 struct iomap_folio_state *ifs, int block)
93 return test_bit(block + blks_per_folio, ifs->state);
97 struct iomap_folio_state *ifs, size_t off, size_t len)
106 spin_lock_irqsave(&ifs->state_lock, flags);
107 bitmap_clear(ifs->state, first_blk + blks_per_folio, nr_blks);
108 spin_unlock_irqrestore(&ifs->state_lock, flags);
113 struct iomap_folio_state *ifs = folio->private;
115 if (ifs)
116 ifs_clear_range_dirty(folio, ifs, off, len);
120 struct iomap_folio_state *ifs, size_t off, size_t len)
129 spin_lock_irqsave(&ifs->state_lock, flags);
130 bitmap_set(ifs->state, first_blk + blks_per_folio, nr_blks);
131 spin_unlock_irqrestore(&ifs->state_lock, flags);
136 struct iomap_folio_state *ifs = folio->private;
138 if (ifs)
139 ifs_set_range_dirty(folio, ifs, off, len);
145 struct iomap_folio_state *ifs = folio->private;
149 if (ifs || nr_blocks <= 1)
150 return ifs;
158 * ifs->state tracks two sets of state flags when the
163 ifs = kzalloc(struct_size(ifs, state,
165 if (!ifs)
166 return ifs;
168 spin_lock_init(&ifs->state_lock);
170 bitmap_set(ifs->state, 0, nr_blocks);
172 bitmap_set(ifs->state, nr_blocks, nr_blocks);
173 folio_attach_private(folio, ifs);
175 return ifs;
180 struct iomap_folio_state *ifs = folio_detach_private(folio);
182 if (!ifs)
184 WARN_ON_ONCE(atomic_read(&ifs->read_bytes_pending));
185 WARN_ON_ONCE(atomic_read(&ifs->write_bytes_pending));
186 WARN_ON_ONCE(ifs_is_fully_uptodate(folio, ifs) !=
188 kfree(ifs);
197 struct iomap_folio_state *ifs = folio->private;
212 if (ifs) {
217 if (!ifs_block_is_uptodate(ifs, i))
227 if (ifs_block_is_uptodate(ifs, i)) {
254 struct iomap_folio_state *ifs = folio->private;
263 if (!ifs || atomic_sub_and_test(len, &ifs->read_bytes_pending))
340 struct iomap_folio_state *ifs;
349 ifs = ifs_alloc(iter->inode, folio, iter->flags);
361 if (ifs)
362 atomic_add(plen, &ifs->read_bytes_pending);
516 struct iomap_folio_state *ifs = folio->private;
520 if (!ifs)
531 if (!ifs_block_is_uptodate(ifs, i))
634 struct iomap_folio_state *ifs;
653 ifs = ifs_alloc(iter->inode, folio, iter->flags);
654 if ((iter->flags & IOMAP_NOWAIT) && !ifs && nr_blocks > 1)
991 struct iomap_folio_state *ifs;
1000 ifs = folio->private;
1001 if (!ifs)
1009 if (!ifs_block_is_dirty(folio, ifs, i)) {
1456 struct iomap_folio_state *ifs = folio->private;
1463 WARN_ON_ONCE(i_blocks_per_folio(inode, folio) > 1 && !ifs);
1464 WARN_ON_ONCE(ifs && atomic_read(&ifs->write_bytes_pending) <= 0);
1466 if (!ifs || atomic_sub_and_test(len, &ifs->write_bytes_pending))
1733 struct iomap_folio_state *ifs, struct iomap_writepage_ctx *wpc,
1751 if (ifs)
1752 atomic_add(len, &ifs->write_bytes_pending);
1778 struct iomap_folio_state *ifs = folio->private;
1788 if (!ifs && nblocks > 1) {
1789 ifs = ifs_alloc(inode, folio, 0);
1793 WARN_ON_ONCE(ifs && atomic_read(&ifs->write_bytes_pending) != 0);
1801 if (ifs && !ifs_block_is_dirty(folio, ifs, i))
1812 iomap_add_to_ioend(inode, pos, folio, ifs, wpc, wbc,