Lines Matching refs:ip

75 	struct xfs_inode	*ip;
81 ip = alloc_inode_sb(mp->m_super, xfs_inode_cache, GFP_KERNEL | __GFP_NOFAIL);
83 if (inode_init_always(mp->m_super, VFS_I(ip))) {
84 kmem_cache_free(xfs_inode_cache, ip);
89 VFS_I(ip)->i_mode = 0;
90 VFS_I(ip)->i_state = 0;
91 mapping_set_large_folios(VFS_I(ip)->i_mapping);
94 ASSERT(atomic_read(&ip->i_pincount) == 0);
95 ASSERT(ip->i_ino == 0);
98 ip->i_ino = ino;
99 ip->i_mount = mp;
100 memset(&ip->i_imap, 0, sizeof(struct xfs_imap));
101 ip->i_cowfp = NULL;
102 memset(&ip->i_af, 0, sizeof(ip->i_af));
103 ip->i_af.if_format = XFS_DINODE_FMT_EXTENTS;
104 memset(&ip->i_df, 0, sizeof(ip->i_df));
105 ip->i_flags = 0;
106 ip->i_delayed_blks = 0;
107 ip->i_diflags2 = mp->m_ino_geo.new_diflags2;
108 ip->i_nblocks = 0;
109 ip->i_forkoff = 0;
110 ip->i_sick = 0;
111 ip->i_checked = 0;
112 INIT_WORK(&ip->i_ioend_work, xfs_end_io);
113 INIT_LIST_HEAD(&ip->i_ioend_list);
114 spin_lock_init(&ip->i_ioend_lock);
115 ip->i_next_unlinked = NULLAGINO;
116 ip->i_prev_unlinked = 0;
118 return ip;
126 struct xfs_inode *ip = XFS_I(inode);
128 switch (VFS_I(ip)->i_mode & S_IFMT) {
132 xfs_idestroy_fork(&ip->i_df);
136 xfs_ifork_zap_attr(ip);
138 if (ip->i_cowfp) {
139 xfs_idestroy_fork(ip->i_cowfp);
140 kmem_cache_free(xfs_ifork_cache, ip->i_cowfp);
142 if (ip->i_itemp) {
144 &ip->i_itemp->ili_item.li_flags));
145 xfs_inode_item_destroy(ip);
146 ip->i_itemp = NULL;
149 kmem_cache_free(xfs_inode_cache, ip);
154 struct xfs_inode *ip)
157 ASSERT(atomic_read(&ip->i_pincount) == 0);
158 ASSERT(!ip->i_itemp || list_empty(&ip->i_itemp->ili_item.li_bio_list));
159 XFS_STATS_DEC(ip->i_mount, vn_active);
161 call_rcu(&VFS_I(ip)->i_rcu, xfs_inode_free_callback);
166 struct xfs_inode *ip)
168 ASSERT(!xfs_iflags_test(ip, XFS_IFLUSHING));
173 * free state. The ip->i_flags_lock provides the barrier against lookup
176 spin_lock(&ip->i_flags_lock);
177 ip->i_flags = XFS_IRECLAIM;
178 ip->i_ino = 0;
179 spin_unlock(&ip->i_flags_lock);
181 __xfs_inode_free(ip);
337 struct xfs_inode *ip) __releases(&ip->i_flags_lock)
339 struct xfs_mount *mp = ip->i_mount;
340 struct inode *inode = VFS_I(ip);
343 trace_xfs_iget_recycle(ip);
345 if (!xfs_ilock_nowait(ip, XFS_ILOCK_EXCL))
354 ip->i_flags |= XFS_IRECLAIM;
356 spin_unlock(&ip->i_flags_lock);
361 xfs_iunlock(ip, XFS_ILOCK_EXCL);
368 spin_lock(&ip->i_flags_lock);
369 ip->i_flags &= ~(XFS_INEW | XFS_IRECLAIM);
370 ASSERT(ip->i_flags & XFS_IRECLAIMABLE);
371 spin_unlock(&ip->i_flags_lock);
374 trace_xfs_iget_recycle_fail(ip);
379 spin_lock(&ip->i_flags_lock);
386 ip->i_flags &= ~XFS_IRECLAIM_RESET_FLAGS;
387 ip->i_flags |= XFS_INEW;
388 xfs_perag_clear_inode_tag(pag, XFS_INO_TO_AGINO(mp, ip->i_ino),
391 spin_unlock(&ip->i_flags_lock);
409 struct xfs_inode *ip,
414 if (VFS_I(ip)->i_mode != 0) {
415 xfs_warn(ip->i_mount,
417 ip->i_ino, VFS_I(ip)->i_mode);
421 if (ip->i_nblocks != 0) {
422 xfs_warn(ip->i_mount,
424 ip->i_ino);
431 if (VFS_I(ip)->i_mode == 0)
484 struct xfs_inode *ip,
489 struct inode *inode = VFS_I(ip);
490 struct xfs_mount *mp = ip->i_mount;
500 spin_lock(&ip->i_flags_lock);
501 if (ip->i_ino != ino)
522 if (ip->i_flags & (XFS_INEW | XFS_IRECLAIM | XFS_INACTIVATING))
525 if (ip->i_flags & XFS_NEED_INACTIVE) {
527 if (VFS_I(ip)->i_nlink == 0) {
538 error = xfs_iget_check_free_state(ip, flags);
544 (ip->i_flags & XFS_IRECLAIMABLE))
548 if (ip->i_flags & XFS_IRECLAIMABLE) {
550 error = xfs_iget_recycle(pag, ip);
561 spin_unlock(&ip->i_flags_lock);
563 trace_xfs_iget_hit(ip);
567 xfs_ilock(ip, lock_flags);
570 xfs_iflags_clear(ip, XFS_ISTALE);
576 trace_xfs_iget_skip(ip);
580 spin_unlock(&ip->i_flags_lock);
585 spin_unlock(&ip->i_flags_lock);
606 struct xfs_inode *ip;
611 ip = xfs_inode_alloc(mp, ino);
612 if (!ip)
615 error = xfs_imap(pag, tp, ip->i_ino, &ip->i_imap, flags);
631 VFS_I(ip)->i_generation = get_random_u32();
635 error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &bp);
639 error = xfs_inode_from_disk(ip,
640 xfs_buf_offset(bp, ip->i_imap.im_boffset));
649 trace_xfs_iget_miss(ip);
655 error = xfs_iget_check_free_state(ip, flags);
675 if (!xfs_ilock_nowait(ip, lock_flags))
684 * The ip->i_flags_lock that protects the XFS_INEW flag forms the
690 d_mark_dontcache(VFS_I(ip));
691 ip->i_udquot = NULL;
692 ip->i_gdquot = NULL;
693 ip->i_pdquot = NULL;
694 xfs_iflags_set(ip, iflags);
698 error = radix_tree_insert(&pag->pag_ici_root, agino, ip);
708 *ipp = ip;
715 xfs_iunlock(ip, lock_flags);
717 __destroy_inode(VFS_I(ip));
718 xfs_inode_free(ip);
743 struct xfs_inode *ip;
763 ip = radix_tree_lookup(&pag->pag_ici_root, agino);
765 if (ip) {
766 error = xfs_iget_cache_hit(pag, ip, ino, flags, lock_flags);
777 error = xfs_iget_cache_miss(mp, pag, tp, ino, &ip,
784 *ipp = ip;
791 if (xfs_iflags_test(ip, XFS_INEW) && VFS_I(ip)->i_mode != 0)
792 xfs_setup_existing_inode(ip);
824 struct xfs_inode *ip,
829 spin_lock(&ip->i_flags_lock);
830 if (!__xfs_iflags_test(ip, XFS_IRECLAIMABLE) ||
831 __xfs_iflags_test(ip, XFS_IRECLAIM)) {
833 spin_unlock(&ip->i_flags_lock);
838 if (ip->i_sick &&
840 spin_unlock(&ip->i_flags_lock);
844 __xfs_iflags_set(ip, XFS_IRECLAIM);
845 spin_unlock(&ip->i_flags_lock);
863 struct xfs_inode *ip,
866 xfs_ino_t ino = ip->i_ino; /* for radix_tree_delete */
868 if (!xfs_ilock_nowait(ip, XFS_ILOCK_EXCL))
870 if (xfs_iflags_test_and_set(ip, XFS_IFLUSHING))
880 if (xlog_is_shutdown(ip->i_mount->m_log)) {
881 xfs_iunpin_wait(ip);
882 xfs_iflush_shutdown_abort(ip);
885 if (xfs_ipincount(ip))
887 if (!xfs_inode_clean(ip))
890 xfs_iflags_clear(ip, XFS_IFLUSHING);
892 trace_xfs_inode_reclaiming(ip);
904 spin_lock(&ip->i_flags_lock);
905 ip->i_flags = XFS_IRECLAIM;
906 ip->i_ino = 0;
907 ip->i_sick = 0;
908 ip->i_checked = 0;
909 spin_unlock(&ip->i_flags_lock);
911 ASSERT(!ip->i_itemp || ip->i_itemp->ili_item.li_buf == NULL);
912 xfs_iunlock(ip, XFS_ILOCK_EXCL);
914 XFS_STATS_INC(ip->i_mount, xs_ig_reclaims);
924 XFS_INO_TO_AGINO(ip->i_mount, ino)))
937 xfs_ilock(ip, XFS_ILOCK_EXCL);
938 ASSERT(!ip->i_udquot && !ip->i_gdquot && !ip->i_pdquot);
939 xfs_iunlock(ip, XFS_ILOCK_EXCL);
940 ASSERT(xfs_inode_clean(ip));
942 __xfs_inode_free(ip);
946 xfs_iflags_clear(ip, XFS_IFLUSHING);
948 xfs_iunlock(ip, XFS_ILOCK_EXCL);
950 xfs_iflags_clear(ip, XFS_IRECLAIM);
1029 struct xfs_inode *ip,
1033 !uid_eq(VFS_I(ip)->i_uid, icw->icw_uid))
1037 !gid_eq(VFS_I(ip)->i_gid, icw->icw_gid))
1041 ip->i_projid != icw->icw_prid)
1053 struct xfs_inode *ip,
1057 uid_eq(VFS_I(ip)->i_uid, icw->icw_uid))
1061 gid_eq(VFS_I(ip)->i_gid, icw->icw_gid))
1065 ip->i_projid == icw->icw_prid)
1072 * Is this inode @ip eligible for eof/cow block reclamation, given some
1078 struct xfs_inode *ip,
1087 match = xfs_icwalk_match_id_union(ip, icw);
1089 match = xfs_icwalk_match_id(ip, icw);
1095 XFS_ISIZE(ip) < icw->icw_min_file_size)
1120 struct xfs_inode *ip,
1128 if (!xfs_iflags_test(ip, XFS_IEOFBLOCKS))
1135 if (!wait && mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_DIRTY))
1138 if (!xfs_icwalk_match(ip, icw))
1145 if (!xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL)) {
1152 if (xfs_can_free_eofblocks(ip, false))
1153 return xfs_free_eofblocks(ip);
1156 trace_xfs_inode_free_eofblocks_invalid(ip);
1157 xfs_inode_clear_eofblocks_tag(ip);
1163 struct xfs_inode *ip,
1166 struct xfs_mount *mp = ip->i_mount;
1175 if (ip->i_flags & iflag)
1177 spin_lock(&ip->i_flags_lock);
1178 ip->i_flags |= iflag;
1179 spin_unlock(&ip->i_flags_lock);
1181 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
1184 xfs_perag_set_inode_tag(pag, XFS_INO_TO_AGINO(mp, ip->i_ino),
1193 xfs_inode_t *ip)
1195 trace_xfs_inode_set_eofblocks_tag(ip);
1196 return xfs_blockgc_set_iflag(ip, XFS_IEOFBLOCKS);
1201 struct xfs_inode *ip,
1204 struct xfs_mount *mp = ip->i_mount;
1210 spin_lock(&ip->i_flags_lock);
1211 ip->i_flags &= ~iflag;
1212 clear_tag = (ip->i_flags & (XFS_IEOFBLOCKS | XFS_ICOWBLOCKS)) == 0;
1213 spin_unlock(&ip->i_flags_lock);
1218 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
1221 xfs_perag_clear_inode_tag(pag, XFS_INO_TO_AGINO(mp, ip->i_ino),
1230 xfs_inode_t *ip)
1232 trace_xfs_inode_clear_eofblocks_tag(ip);
1233 return xfs_blockgc_clear_iflag(ip, XFS_IEOFBLOCKS);
1243 struct xfs_inode *ip)
1249 if (!xfs_inode_has_cow_data(ip)) {
1250 trace_xfs_inode_free_cowblocks_invalid(ip);
1251 xfs_inode_clear_cowblocks_tag(ip);
1259 if ((VFS_I(ip)->i_state & I_DIRTY_PAGES) ||
1260 mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_DIRTY) ||
1261 mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_WRITEBACK) ||
1262 atomic_read(&VFS_I(ip)->i_dio_count))
1282 struct xfs_inode *ip,
1291 if (!xfs_iflags_test(ip, XFS_ICOWBLOCKS))
1294 if (!xfs_prep_free_cowblocks(ip))
1297 if (!xfs_icwalk_match(ip, icw))
1305 !xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL)) {
1312 if (!xfs_ilock_nowait(ip, XFS_MMAPLOCK_EXCL)) {
1323 if (xfs_prep_free_cowblocks(ip))
1324 ret = xfs_reflink_cancel_cow_range(ip, 0, NULLFILEOFF, false);
1330 xfs_inode_t *ip)
1332 trace_xfs_inode_set_cowblocks_tag(ip);
1333 return xfs_blockgc_set_iflag(ip, XFS_ICOWBLOCKS);
1338 xfs_inode_t *ip)
1340 trace_xfs_inode_clear_cowblocks_tag(ip);
1341 return xfs_blockgc_clear_iflag(ip, XFS_ICOWBLOCKS);
1383 * Decide if the given @ip is eligible for garbage collection of speculative
1389 struct xfs_inode *ip)
1391 struct inode *inode = VFS_I(ip);
1396 spin_lock(&ip->i_flags_lock);
1397 if (!ip->i_ino)
1400 if (ip->i_flags & XFS_BLOCKGC_NOGRAB_IFLAGS)
1402 spin_unlock(&ip->i_flags_lock);
1405 if (xfs_is_shutdown(ip->i_mount))
1416 spin_unlock(&ip->i_flags_lock);
1423 struct xfs_inode *ip,
1429 error = xfs_inode_free_eofblocks(ip, icw, &lockflags);
1433 error = xfs_inode_free_cowblocks(ip, icw, &lockflags);
1436 xfs_iunlock(ip, lockflags);
1437 xfs_irele(ip);
1565 struct xfs_inode *ip,
1568 return xfs_blockgc_free_dquots(ip->i_mount,
1569 xfs_inode_dquot(ip, XFS_DQTYPE_USER),
1570 xfs_inode_dquot(ip, XFS_DQTYPE_GROUP),
1571 xfs_inode_dquot(ip, XFS_DQTYPE_PROJ), iwalk_flags);
1592 struct xfs_inode *ip,
1597 return xfs_blockgc_igrab(ip);
1599 return xfs_reclaim_igrab(ip, icw);
1612 struct xfs_inode *ip,
1620 error = xfs_blockgc_scan_inode(ip, icw);
1623 xfs_reclaim_inode(ip, pag);
1675 struct xfs_inode *ip = batch[i];
1677 if (done || !xfs_icwalk_igrab(goal, ip, icw))
1692 if (XFS_INO_TO_AGNO(mp, ip->i_ino) != pag->pag_agno)
1694 first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1);
1695 if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino))
1770 struct xfs_inode *ip,
1773 struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork);
1777 if (!ifp || !xfs_iext_lookup_extent(ip, ifp, 0, &icur, &got))
1781 xfs_warn(ip->i_mount,
1783 ip->i_ino,
1790 #define xfs_check_delalloc(ip, whichfork) do { } while (0)
1796 struct xfs_inode *ip)
1798 struct xfs_mount *mp = ip->i_mount;
1801 if (!xfs_is_shutdown(mp) && ip->i_delayed_blks) {
1802 xfs_check_delalloc(ip, XFS_DATA_FORK);
1803 xfs_check_delalloc(ip, XFS_COW_FORK);
1807 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
1809 spin_lock(&ip->i_flags_lock);
1811 trace_xfs_inode_set_reclaimable(ip);
1812 ip->i_flags &= ~(XFS_NEED_INACTIVE | XFS_INACTIVATING);
1813 ip->i_flags |= XFS_IRECLAIMABLE;
1814 xfs_perag_set_inode_tag(pag, XFS_INO_TO_AGINO(mp, ip->i_ino),
1817 spin_unlock(&ip->i_flags_lock);
1829 struct xfs_inode *ip)
1833 trace_xfs_inode_inactivating(ip);
1834 error = xfs_inactive(ip);
1835 xfs_inodegc_set_reclaimable(ip);
1847 struct xfs_inode *ip, *n;
1872 ip = llist_entry(node, struct xfs_inode, i_gclist);
1876 llist_for_each_entry_safe(ip, n, node, i_gclist) {
1879 xfs_iflags_set(ip, XFS_INACTIVATING);
1880 error = xfs_inodegc_inactivate(ip);
1967 struct xfs_inode *ip)
1969 struct xfs_mount *mp = ip->i_mount;
1971 if (!XFS_IS_REALTIME_INODE(ip))
1982 # define xfs_inodegc_want_queue_rt_file(ip) (false)
1994 struct xfs_inode *ip,
1997 struct xfs_mount *mp = ip->i_mount;
2007 if (xfs_inodegc_want_queue_rt_file(ip))
2010 if (xfs_inode_near_dquot_enforcement(ip, XFS_DQTYPE_USER))
2013 if (xfs_inode_near_dquot_enforcement(ip, XFS_DQTYPE_GROUP))
2016 if (xfs_inode_near_dquot_enforcement(ip, XFS_DQTYPE_PROJ))
2039 struct xfs_inode *ip,
2062 struct xfs_inode *ip)
2064 struct xfs_mount *mp = ip->i_mount;
2071 trace_xfs_inode_set_need_inactive(ip);
2072 spin_lock(&ip->i_flags_lock);
2073 ip->i_flags |= XFS_NEED_INACTIVE;
2074 spin_unlock(&ip->i_flags_lock);
2078 llist_add(&ip->i_gclist, &gc->list);
2101 if (xfs_inodegc_want_queue_work(ip, items))
2109 if (xfs_inodegc_want_flush_work(ip, items, shrinker_hits)) {
2127 struct xfs_inode *ip)
2129 struct xfs_mount *mp = ip->i_mount;
2137 ASSERT_ALWAYS(!xfs_iflags_test(ip, XFS_ALL_IRECLAIM_FLAGS));
2139 need_inactive = xfs_inode_needs_inactive(ip);
2141 xfs_inodegc_queue(ip);
2146 xfs_qm_dqdetach(ip);
2147 xfs_inodegc_set_reclaimable(ip);