Lines Matching refs:inode
10 * inode itself is not handled here.
13 * Split out of fs/inode.c
59 * If an inode is constantly having its pages dirtied, but then the
61 * possible for the worst case time between when an inode has its
70 static inline struct inode *wb_inode(struct list_head *head)
72 return list_entry(head, struct inode, i_io_list);
109 * inode_io_list_move_locked - move an inode onto a bdi_writeback IO list
110 * @inode: inode to be moved
114 * Move @inode->i_io_list to @list of @wb and set %WB_has_dirty_io.
115 * Returns %true if @inode is the first occupant of the !dirty_time IO
118 static bool inode_io_list_move_locked(struct inode *inode,
124 list_move(&inode->i_io_list, head);
135 * inode_io_list_del_locked - remove an inode from its bdi_writeback IO list
136 * @inode: inode to be removed
137 * @wb: bdi_writeback @inode is being removed from
139 * Remove @inode which may be on one of @wb->b_{dirty|io|more_io} lists and
142 static void inode_io_list_del_locked(struct inode *inode,
146 assert_spin_locked(&inode->i_lock);
148 inode->i_state &= ~I_SYNC_QUEUED;
149 list_del_init(&inode->i_io_list);
215 * Parameters for foreign inode detection, see wbc_detach_inode() to see
219 * itself is fuzzy. All we want to do is detaching an inode from the
223 * cgroups writing to the same inode concurrently is very rare and a mode
225 * taking too long when a different cgroup takes over an inode while
238 #define WB_FRN_HIST_SLOTS 16 /* inode->i_wb_frn_history is 16bit */
250 void __inode_attach_wb(struct inode *inode, struct page *page)
252 struct backing_dev_info *bdi = inode_to_bdi(inode);
255 if (inode_cgwb_enabled(inode)) {
274 * update the same inode. Use cmpxchg() to tell the winner.
276 if (unlikely(cmpxchg(&inode->i_wb, NULL, wb)))
282 * locked_inode_to_wb_and_lock_list - determine a locked inode's wb and lock it
283 * @inode: inode of interest with i_lock held
285 * Returns @inode's wb with its list_lock held. @inode->i_lock must be
287 * to stay @inode's associated wb until its list_lock is released.
290 locked_inode_to_wb_and_lock_list(struct inode *inode)
291 __releases(&inode->i_lock)
295 struct bdi_writeback *wb = inode_to_wb(inode);
299 * @inode->i_lock and @wb->list_lock but list_lock nests
304 spin_unlock(&inode->i_lock);
308 if (likely(wb == inode->i_wb)) {
309 wb_put(wb); /* @inode already has ref */
316 spin_lock(&inode->i_lock);
321 * inode_to_wb_and_lock_list - determine an inode's wb and lock it
322 * @inode: inode of interest
324 * Same as locked_inode_to_wb_and_lock_list() but @inode->i_lock isn't held
327 static struct bdi_writeback *inode_to_wb_and_lock_list(struct inode *inode)
330 spin_lock(&inode->i_lock);
331 return locked_inode_to_wb_and_lock_list(inode);
335 struct inode *inode;
356 struct inode *inode = isw->inode;
357 struct backing_dev_info *bdi = inode_to_bdi(inode);
358 struct address_space *mapping = inode->i_mapping;
359 struct bdi_writeback *old_wb = inode->i_wb;
366 * If @inode switches cgwb membership while sync_inodes_sb() is
377 * Grabbing old_wb->list_lock, inode->i_lock and the i_pages lock
378 * gives us exclusion against all wb related operations on @inode
388 spin_lock(&inode->i_lock);
393 * the inode and we shouldn't modify ->i_io_list.
395 if (unlikely(inode->i_state & I_FREEING))
398 trace_inode_switch_wbs(inode, old_wb, new_wb);
423 * @inode was on is ignored and the inode is put on ->b_dirty which
425 * preserves @inode->dirtied_when ordering.
427 if (!list_empty(&inode->i_io_list)) {
428 struct inode *pos;
430 inode_io_list_del_locked(inode, old_wb);
431 inode->i_wb = new_wb;
433 if (time_after_eq(inode->dirtied_when,
436 inode_io_list_move_locked(inode, new_wb, pos->i_io_list.prev);
438 inode->i_wb = new_wb;
442 inode->i_wb_frn_winner = 0;
443 inode->i_wb_frn_avg_time = 0;
444 inode->i_wb_frn_history = 0;
451 smp_store_release(&inode->i_state, inode->i_state & ~I_WB_SWITCH);
454 spin_unlock(&inode->i_lock);
466 iput(inode);
483 * inode_switch_wbs - change the wb association of an inode
484 * @inode: target inode
487 * Switch @inode's wb association to the wb identified by @new_wb_id. The
490 static void inode_switch_wbs(struct inode *inode, int new_wb_id)
492 struct backing_dev_info *bdi = inode_to_bdi(inode);
497 if (inode->i_state & I_WB_SWITCH)
525 spin_lock(&inode->i_lock);
526 if (!(inode->i_sb->s_flags & SB_ACTIVE) ||
527 inode->i_state & (I_WB_SWITCH | I_FREEING) ||
528 inode_to_wb(inode) == isw->new_wb) {
529 spin_unlock(&inode->i_lock);
532 inode->i_state |= I_WB_SWITCH;
533 __iget(inode);
534 spin_unlock(&inode->i_lock);
536 isw->inode = inode;
555 * wbc_attach_and_unlock_inode - associate wbc with target inode and unlock it
557 * @inode: target inode
559 * @inode is locked and about to be written back under the control of @wbc.
560 * Record @inode's writeback context into @wbc and unlock the i_lock. On
565 struct inode *inode)
567 if (!inode_cgwb_enabled(inode)) {
568 spin_unlock(&inode->i_lock);
572 wbc->wb = inode_to_wb(inode);
573 wbc->inode = inode;
576 wbc->wb_lcand_id = inode->i_wb_frn_winner;
583 spin_unlock(&inode->i_lock);
593 inode_switch_wbs(inode, wbc->wb_id);
598 * wbc_detach_inode - disassociate wbc from inode and perform foreign detection
601 * To be called after a writeback attempt of an inode finishes and undoes
604 * As concurrent write sharing of an inode is expected to be very rare and
607 * per-inode. While the support for concurrent write sharing of an inode
608 * is deemed unnecessary, an inode being written to by different cgroups at
615 * an inode and transfers the ownership to it. To avoid unnnecessary
631 * inode->i_wb_frn_history. If the amount of recorded foreign IO time is
637 struct inode *inode = wbc->inode;
645 history = inode->i_wb_frn_history;
646 avg_time = inode->i_wb_frn_avg_time;
663 * into the running average kept per inode. If the consumed IO
694 trace_inode_foreign_history(inode, wbc, history);
699 * inode may switch across them repeatedly over time, which
700 * is okay. The main goal is avoiding keeping an inode on
704 inode_switch_wbs(inode, max_id);
711 inode->i_wb_frn_winner = max_id;
712 inode->i_wb_frn_avg_time = min(avg_time, (unsigned long)U16_MAX);
713 inode->i_wb_frn_history = history;
721 * wbc_account_cgroup_owner - account writeback to update inode cgroup ownership
727 * controlled by @wbc. Keep the book for foreign inode detection. See
737 * pageout() path doesn't attach @wbc to the inode being written
746 /* dead cgroups shouldn't contribute to inode ownership arbitration */
771 * inode_congested - test whether an inode is congested
772 * @inode: inode to test for congestion (may be NULL)
775 * Tests whether @inode is congested. @cong_bits is the mask of congestion
778 * If cgroup writeback is enabled for @inode, the congestion state is
780 * associated with @inode is congested; otherwise, the root wb's congestion
783 * @inode is allowed to be NULL as this function is often called on
786 int inode_congested(struct inode *inode, int cong_bits)
789 * Once set, ->i_wb never becomes NULL while the inode is alive.
792 if (inode && inode_to_wb_is_valid(inode)) {
797 wb = unlocked_inode_to_wb_begin(inode, &lock_cookie);
799 unlocked_inode_to_wb_end(inode, &lock_cookie);
803 return wb_congested(&inode_to_bdi(inode)->wb, cong_bits);
1001 * cgroup_writeback_umount - flush inode wb switches for umount
1004 * flushes in-flight inode wb switches. An inode wb switch goes through
1037 locked_inode_to_wb_and_lock_list(struct inode *inode)
1038 __releases(&inode->i_lock)
1041 struct bdi_writeback *wb = inode_to_wb(inode);
1043 spin_unlock(&inode->i_lock);
1048 static struct bdi_writeback *inode_to_wb_and_lock_list(struct inode *inode)
1051 struct bdi_writeback *wb = inode_to_wb(inode);
1077 * Add in the number of potentially dirty inodes, because each inode
1128 * Remove the inode from the writeback list it is on.
1130 void inode_io_list_del(struct inode *inode)
1134 wb = inode_to_wb_and_lock_list(inode);
1135 spin_lock(&inode->i_lock);
1136 inode_io_list_del_locked(inode, wb);
1137 spin_unlock(&inode->i_lock);
1143 * mark an inode as under writeback on the sb
1145 void sb_mark_inode_writeback(struct inode *inode)
1147 struct super_block *sb = inode->i_sb;
1150 if (list_empty(&inode->i_wb_list)) {
1152 if (list_empty(&inode->i_wb_list)) {
1153 list_add_tail(&inode->i_wb_list, &sb->s_inodes_wb);
1154 trace_sb_mark_inode_writeback(inode);
1161 * clear an inode as under writeback on the sb
1163 void sb_clear_inode_writeback(struct inode *inode)
1165 struct super_block *sb = inode->i_sb;
1168 if (!list_empty(&inode->i_wb_list)) {
1170 if (!list_empty(&inode->i_wb_list)) {
1171 list_del_init(&inode->i_wb_list);
1172 trace_sb_clear_inode_writeback(inode);
1179 * Redirty an inode: set its when-it-was dirtied timestamp and move it to the
1180 * furthest end of its superblock's dirty-inode list.
1182 * Before stamping the inode's ->dirtied_when, we check to see whether it is
1183 * already the most-recently-dirtied inode on the b_dirty list. If that is
1184 * the case then the inode must have been redirtied while it was being written
1187 static void redirty_tail_locked(struct inode *inode, struct bdi_writeback *wb)
1189 assert_spin_locked(&inode->i_lock);
1192 struct inode *tail;
1195 if (time_before(inode->dirtied_when, tail->dirtied_when))
1196 inode->dirtied_when = jiffies;
1198 inode_io_list_move_locked(inode, wb, &wb->b_dirty);
1199 inode->i_state &= ~I_SYNC_QUEUED;
1202 static void redirty_tail(struct inode *inode, struct bdi_writeback *wb)
1204 spin_lock(&inode->i_lock);
1205 redirty_tail_locked(inode, wb);
1206 spin_unlock(&inode->i_lock);
1210 * requeue inode for re-scanning after bdi->b_io list is exhausted.
1212 static void requeue_io(struct inode *inode, struct bdi_writeback *wb)
1214 inode_io_list_move_locked(inode, wb, &wb->b_more_io);
1217 static void inode_sync_complete(struct inode *inode)
1219 inode->i_state &= ~I_SYNC;
1220 /* If inode is clean an unused, put it into LRU now... */
1221 inode_add_lru(inode);
1224 wake_up_bit(&inode->i_state, __I_SYNC);
1227 static bool inode_dirtied_after(struct inode *inode, unsigned long t)
1229 bool ret = time_after(inode->dirtied_when, t);
1237 ret = ret && time_before_eq(inode->dirtied_when, jiffies);
1255 struct inode *inode;
1260 inode = wb_inode(delaying_queue->prev);
1261 if (inode_dirtied_after(inode, dirtied_before))
1263 list_move(&inode->i_io_list, &tmp);
1265 spin_lock(&inode->i_lock);
1266 inode->i_state |= I_SYNC_QUEUED;
1267 spin_unlock(&inode->i_lock);
1268 if (sb_is_blkdev_sb(inode->i_sb))
1270 if (sb && sb != inode->i_sb)
1272 sb = inode->i_sb;
1285 inode = wb_inode(pos);
1286 if (inode->i_sb == sb)
1287 list_move(&inode->i_io_list, dispatch_queue);
1323 static int write_inode(struct inode *inode, struct writeback_control *wbc)
1327 if (inode->i_sb->s_op->write_inode && !is_bad_inode(inode)) {
1328 trace_writeback_write_inode_start(inode, wbc);
1329 ret = inode->i_sb->s_op->write_inode(inode, wbc);
1330 trace_writeback_write_inode(inode, wbc);
1337 * Wait for writeback on an inode to complete. Called with i_lock held.
1338 * Caller must make sure inode cannot go away when we drop i_lock.
1340 static void __inode_wait_for_writeback(struct inode *inode)
1341 __releases(inode->i_lock)
1342 __acquires(inode->i_lock)
1344 DEFINE_WAIT_BIT(wq, &inode->i_state, __I_SYNC);
1347 wqh = bit_waitqueue(&inode->i_state, __I_SYNC);
1348 while (inode->i_state & I_SYNC) {
1349 spin_unlock(&inode->i_lock);
1352 spin_lock(&inode->i_lock);
1357 * Wait for writeback on an inode to complete. Caller must have inode pinned.
1359 void inode_wait_for_writeback(struct inode *inode)
1361 spin_lock(&inode->i_lock);
1362 __inode_wait_for_writeback(inode);
1363 spin_unlock(&inode->i_lock);
1368 * held and drops it. It is aimed for callers not holding any inode reference
1369 * so once i_lock is dropped, inode can go away.
1371 static void inode_sleep_on_writeback(struct inode *inode)
1372 __releases(inode->i_lock)
1375 wait_queue_head_t *wqh = bit_waitqueue(&inode->i_state, __I_SYNC);
1379 sleep = inode->i_state & I_SYNC;
1380 spin_unlock(&inode->i_lock);
1387 * Find proper writeback list for the inode depending on its current state and
1394 static void requeue_inode(struct inode *inode, struct bdi_writeback *wb,
1397 if (inode->i_state & I_FREEING)
1401 * Sync livelock prevention. Each inode is tagged and synced in one
1405 if ((inode->i_state & I_DIRTY) &&
1407 inode->dirtied_when = jiffies;
1412 * buffers. Skip this inode for now.
1414 redirty_tail_locked(inode, wb);
1418 if (mapping_tagged(inode->i_mapping, PAGECACHE_TAG_DIRTY)) {
1425 requeue_io(inode, wb);
1429 * congestion. Delay the inode for some time to
1431 * retrying writeback of the dirty page/inode
1434 redirty_tail_locked(inode, wb);
1436 } else if (inode->i_state & I_DIRTY) {
1438 * Filesystems can dirty the inode during writeback operations,
1442 redirty_tail_locked(inode, wb);
1443 } else if (inode->i_state & I_DIRTY_TIME) {
1444 inode->dirtied_when = jiffies;
1445 inode_io_list_move_locked(inode, wb, &wb->b_dirty_time);
1446 inode->i_state &= ~I_SYNC_QUEUED;
1448 /* The inode is clean. Remove from writeback lists. */
1449 inode_io_list_del_locked(inode, wb);
1454 * Write out an inode and its dirty pages. Do not update the writeback list
1459 __writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
1461 struct address_space *mapping = inode->i_mapping;
1466 WARN_ON(!(inode->i_state & I_SYNC));
1468 trace_writeback_single_inode_start(inode, wbc, nr_to_write);
1477 * inode metadata is written back correctly.
1486 * If the inode has dirty timestamps and we need to write them, call
1490 if ((inode->i_state & I_DIRTY_TIME) &&
1492 time_after(jiffies, inode->dirtied_time_when +
1494 trace_writeback_lazytime(inode);
1495 mark_inode_dirty_sync(inode);
1499 * Some filesystems may redirty the inode during the writeback
1503 spin_lock(&inode->i_lock);
1504 dirty = inode->i_state & I_DIRTY;
1505 inode->i_state &= ~dirty;
1511 * inode.
1521 inode->i_state |= I_DIRTY_PAGES;
1523 spin_unlock(&inode->i_lock);
1525 /* Don't write the inode if only I_DIRTY_PAGES was set */
1527 int err = write_inode(inode, wbc);
1531 trace_writeback_single_inode(inode, wbc, nr_to_write);
1536 * Write out an inode's dirty pages. Either the caller has an active reference
1537 * on the inode or the inode has I_WILL_FREE set.
1539 * This function is designed to be called for writing back one inode which
1543 static int writeback_single_inode(struct inode *inode,
1549 spin_lock(&inode->i_lock);
1550 if (!atomic_read(&inode->i_count))
1551 WARN_ON(!(inode->i_state & (I_WILL_FREE|I_FREEING)));
1553 WARN_ON(inode->i_state & I_WILL_FREE);
1555 if (inode->i_state & I_SYNC) {
1560 * inode reference or inode has I_WILL_FREE set, it cannot go
1563 __inode_wait_for_writeback(inode);
1565 WARN_ON(inode->i_state & I_SYNC);
1567 * Skip inode if it is clean and we have no outstanding writeback in
1570 * parallel and if we move the inode, it could get skipped. So here we
1571 * make sure inode is on some writeback list and leave it there unless
1572 * we have completely cleaned the inode.
1574 if (!(inode->i_state & I_DIRTY_ALL) &&
1576 !mapping_tagged(inode->i_mapping, PAGECACHE_TAG_WRITEBACK)))
1578 inode->i_state |= I_SYNC;
1579 wbc_attach_and_unlock_inode(wbc, inode);
1581 ret = __writeback_single_inode(inode, wbc);
1585 wb = inode_to_wb_and_lock_list(inode);
1586 spin_lock(&inode->i_lock);
1588 * If inode is clean, remove it from writeback lists. Otherwise don't
1591 if (!(inode->i_state & I_DIRTY_ALL))
1592 inode_io_list_del_locked(inode, wb);
1594 inode_sync_complete(inode);
1596 spin_unlock(&inode->i_lock);
1614 * write_cache_pages() <== called once for each inode
1637 * unlock and relock that for each inode it ends up doing
1659 struct inode *inode = wb_inode(wb->b_io.prev);
1663 if (inode->i_sb != sb) {
1670 redirty_tail(inode, wb);
1675 * The inode belongs to a different superblock.
1687 spin_lock(&inode->i_lock);
1688 if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) {
1689 redirty_tail_locked(inode, wb);
1690 spin_unlock(&inode->i_lock);
1693 if ((inode->i_state & I_SYNC) && wbc.sync_mode != WB_SYNC_ALL) {
1695 * If this inode is locked for writeback and we are not
1700 * We'll have another go at writing back this inode
1703 spin_unlock(&inode->i_lock);
1704 requeue_io(inode, wb);
1705 trace_writeback_sb_inodes_requeue(inode);
1711 * We already requeued the inode if it had I_SYNC set and we
1715 if (inode->i_state & I_SYNC) {
1717 inode_sleep_on_writeback(inode);
1722 inode->i_state |= I_SYNC;
1723 wbc_attach_and_unlock_inode(&wbc, inode);
1730 * We use I_SYNC to pin the inode in memory. While it is set
1731 * evict_inode() will wait so the inode cannot be freed.
1733 __writeback_single_inode(inode, &wbc);
1755 * Requeue @inode if still dirty. Be careful as @inode may
1758 tmp_wb = inode_to_wb_and_lock_list(inode);
1759 spin_lock(&inode->i_lock);
1760 if (!(inode->i_state & I_DIRTY_ALL))
1762 requeue_inode(inode, tmp_wb, &wbc);
1763 inode_sync_complete(inode);
1764 spin_unlock(&inode->i_lock);
1792 struct inode *inode = wb_inode(wb->b_io.prev);
1793 struct super_block *sb = inode->i_sb;
1799 * requeue_io() to avoid busy retrying the inode/sb.
1801 redirty_tail(inode, wb);
1844 * Define "old": the first time one of an inode's pages is dirtied, we mark the
1845 * dirtying-time in the inode's address_space. So this periodic writeback code
1846 * just walks the superblock inode list, writing back any inodes which are
1862 struct inode *inode;
1931 * Nothing written. Wait for some inode to
1936 inode = wb_inode(wb->b_more_io.prev);
1937 spin_lock(&inode->i_lock);
1940 inode_sleep_on_writeback(inode);
2170 * happened on the file system is a dirtytime inode caused by an atime
2171 * update, we need this infrastructure below to make sure that inode
2214 * @inode: inode to mark
2217 * Mark an inode as dirty. Callers should use mark_inode_dirty or
2220 * Put the inode on the super block's dirty list.
2230 * Note that for blockdevs, inode->dirtied_when represents the dirtying time of
2231 * the block-special inode (/dev/hda1) itself. And the ->dirtied_when field of
2232 * the kernel-internal blockdev inode represents the dirtying time of the
2235 * blockdev inode.
2237 void __mark_inode_dirty(struct inode *inode, int flags)
2239 struct super_block *sb = inode->i_sb;
2242 trace_writeback_mark_inode_dirty(inode, flags);
2246 * dirty the inode itself
2249 trace_writeback_dirty_inode_start(inode, flags);
2252 sb->s_op->dirty_inode(inode, flags);
2254 trace_writeback_dirty_inode(inode, flags);
2266 if (((inode->i_state & flags) == flags) ||
2267 (dirtytime && (inode->i_state & I_DIRTY_INODE)))
2270 spin_lock(&inode->i_lock);
2271 if (dirtytime && (inode->i_state & I_DIRTY_INODE))
2273 if ((inode->i_state & flags) != flags) {
2274 const int was_dirty = inode->i_state & I_DIRTY;
2276 inode_attach_wb(inode, NULL);
2279 inode->i_state &= ~I_DIRTY_TIME;
2280 inode->i_state |= flags;
2283 * If the inode is queued for writeback by flush worker, just
2285 * the inode it will place it on the appropriate superblock
2288 if (inode->i_state & I_SYNC_QUEUED)
2295 if (!S_ISBLK(inode->i_mode)) {
2296 if (inode_unhashed(inode))
2299 if (inode->i_state & I_FREEING)
2303 * If the inode was already on b_dirty/b_io/b_more_io, don't
2311 wb = locked_inode_to_wb_and_lock_list(inode);
2313 inode->dirtied_when = jiffies;
2315 inode->dirtied_time_when = jiffies;
2317 if (inode->i_state & I_DIRTY)
2322 wakeup_bdi = inode_io_list_move_locked(inode, wb,
2326 trace_writeback_dirty_inode_enqueue(inode);
2329 * If this is the first dirty inode for this bdi,
2341 spin_unlock(&inode->i_lock);
2382 * writeout started before we write it out. In which case, the inode
2387 struct inode *inode = list_first_entry(&sync_list, struct inode,
2389 struct address_space *mapping = inode->i_mapping;
2392 * Move each inode back to the wb list before we drop the lock
2395 * the inode from either list once the writeback tag is cleared.
2397 list_move_tail(&inode->i_wb_list, &sb->s_inodes_wb);
2409 spin_lock(&inode->i_lock);
2410 if (inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) {
2411 spin_unlock(&inode->i_lock);
2416 __iget(inode);
2417 spin_unlock(&inode->i_lock);
2429 iput(inode);
2512 * sync_inodes_sb - sync sb inode pages
2515 * This function writes and waits on any dirty inode belonging to this
2541 /* protect against inode wb switch, see inode_switch_wbs_work_fn() */
2552 * write_inode_now - write an inode to disk
2553 * @inode: inode to write to disk
2556 * This function commits an inode to disk immediately if it is dirty. This is
2559 * The caller must either have a ref on the inode or must have set I_WILL_FREE.
2561 int write_inode_now(struct inode *inode, int sync)
2570 if (!mapping_can_writeback(inode->i_mapping))
2574 return writeback_single_inode(inode, &wbc);
2579 * sync_inode - write an inode and its pages to disk.
2580 * @inode: the inode to sync
2583 * sync_inode() will write an inode and its pages to disk. It will also
2584 * correctly update the inode on its superblock's dirty inode lists and will
2585 * update inode->i_state.
2587 * The caller must have a ref on the inode.
2589 int sync_inode(struct inode *inode, struct writeback_control *wbc)
2591 return writeback_single_inode(inode, wbc);
2596 * sync_inode_metadata - write an inode to disk
2597 * @inode: the inode to sync
2600 * Write an inode to disk and adjust its dirty state after completion.
2602 * Note: only writes the actual inode, no associated data or other metadata.
2604 int sync_inode_metadata(struct inode *inode, int wait)
2611 return sync_inode(inode, &wbc);