Lines Matching refs:ip
37 struct xfs_inode *ip;
43 ip = kmem_cache_alloc(xfs_inode_zone, GFP_KERNEL | __GFP_NOFAIL);
45 if (inode_init_always(mp->m_super, VFS_I(ip))) {
46 kmem_cache_free(xfs_inode_zone, ip);
51 VFS_I(ip)->i_mode = 0;
52 VFS_I(ip)->i_state = 0;
55 ASSERT(atomic_read(&ip->i_pincount) == 0);
56 ASSERT(ip->i_ino == 0);
59 ip->i_ino = ino;
60 ip->i_mount = mp;
61 memset(&ip->i_imap, 0, sizeof(struct xfs_imap));
62 ip->i_afp = NULL;
63 ip->i_cowfp = NULL;
64 memset(&ip->i_df, 0, sizeof(ip->i_df));
65 ip->i_flags = 0;
66 ip->i_delayed_blks = 0;
67 memset(&ip->i_d, 0, sizeof(ip->i_d));
68 ip->i_sick = 0;
69 ip->i_checked = 0;
70 INIT_WORK(&ip->i_ioend_work, xfs_end_io);
71 INIT_LIST_HEAD(&ip->i_ioend_list);
72 spin_lock_init(&ip->i_ioend_lock);
74 return ip;
82 struct xfs_inode *ip = XFS_I(inode);
84 switch (VFS_I(ip)->i_mode & S_IFMT) {
88 xfs_idestroy_fork(&ip->i_df);
92 if (ip->i_afp) {
93 xfs_idestroy_fork(ip->i_afp);
94 kmem_cache_free(xfs_ifork_zone, ip->i_afp);
96 if (ip->i_cowfp) {
97 xfs_idestroy_fork(ip->i_cowfp);
98 kmem_cache_free(xfs_ifork_zone, ip->i_cowfp);
100 if (ip->i_itemp) {
102 &ip->i_itemp->ili_item.li_flags));
103 xfs_inode_item_destroy(ip);
104 ip->i_itemp = NULL;
107 kmem_cache_free(xfs_inode_zone, ip);
112 struct xfs_inode *ip)
115 ASSERT(atomic_read(&ip->i_pincount) == 0);
116 ASSERT(!ip->i_itemp || list_empty(&ip->i_itemp->ili_item.li_bio_list));
117 XFS_STATS_DEC(ip->i_mount, vn_active);
119 call_rcu(&VFS_I(ip)->i_rcu, xfs_inode_free_callback);
124 struct xfs_inode *ip)
126 ASSERT(!xfs_iflags_test(ip, XFS_IFLUSHING));
131 * free state. The ip->i_flags_lock provides the barrier against lookup
134 spin_lock(&ip->i_flags_lock);
135 ip->i_flags = XFS_IRECLAIM;
136 ip->i_ino = 0;
137 spin_unlock(&ip->i_flags_lock);
139 __xfs_inode_free(ip);
207 struct xfs_inode *ip)
209 struct xfs_mount *mp = ip->i_mount;
212 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
214 spin_lock(&ip->i_flags_lock);
216 radix_tree_tag_set(&pag->pag_ici_root, XFS_INO_TO_AGINO(mp, ip->i_ino),
219 __xfs_iflags_set(ip, XFS_IRECLAIMABLE);
221 spin_unlock(&ip->i_flags_lock);
239 struct xfs_inode *ip)
241 wait_queue_head_t *wq = bit_waitqueue(&ip->i_flags, __XFS_INEW_BIT);
242 DEFINE_WAIT_BIT(wait, &ip->i_flags, __XFS_INEW_BIT);
246 if (!xfs_iflags_test(ip, XFS_INEW))
299 struct xfs_inode *ip,
304 if (VFS_I(ip)->i_mode != 0) {
305 xfs_warn(ip->i_mount,
307 ip->i_ino, VFS_I(ip)->i_mode);
311 if (ip->i_d.di_nblocks != 0) {
312 xfs_warn(ip->i_mount,
314 ip->i_ino);
321 if (VFS_I(ip)->i_mode == 0)
333 struct xfs_inode *ip,
338 struct inode *inode = VFS_I(ip);
339 struct xfs_mount *mp = ip->i_mount;
349 spin_lock(&ip->i_flags_lock);
350 if (ip->i_ino != ino) {
351 trace_xfs_iget_skip(ip);
368 if (ip->i_flags & (XFS_INEW|XFS_IRECLAIM)) {
369 trace_xfs_iget_skip(ip);
379 error = xfs_iget_check_free_state(ip, flags);
387 if (ip->i_flags & XFS_IRECLAIMABLE) {
388 trace_xfs_iget_reclaim(ip);
401 ip->i_flags |= XFS_IRECLAIM;
403 spin_unlock(&ip->i_flags_lock);
415 spin_lock(&ip->i_flags_lock);
416 wake = !!__xfs_iflags_test(ip, XFS_INEW);
417 ip->i_flags &= ~(XFS_INEW | XFS_IRECLAIM);
419 wake_up_bit(&ip->i_flags, __XFS_INEW_BIT);
420 ASSERT(ip->i_flags & XFS_IRECLAIMABLE);
421 trace_xfs_iget_reclaim_fail(ip);
426 spin_lock(&ip->i_flags_lock);
433 ip->i_flags &= ~XFS_IRECLAIM_RESET_FLAGS;
434 ip->i_flags |= XFS_INEW;
435 xfs_inode_clear_reclaim_tag(pag, ip->i_ino);
437 ip->i_sick = 0;
438 ip->i_checked = 0;
440 spin_unlock(&ip->i_flags_lock);
445 trace_xfs_iget_skip(ip);
451 spin_unlock(&ip->i_flags_lock);
453 trace_xfs_iget_hit(ip);
457 xfs_ilock(ip, lock_flags);
460 xfs_iflags_clear(ip, XFS_ISTALE);
466 spin_unlock(&ip->i_flags_lock);
482 struct xfs_inode *ip;
487 ip = xfs_inode_alloc(mp, ino);
488 if (!ip)
491 error = xfs_imap(mp, tp, ip->i_ino, &ip->i_imap, flags);
507 VFS_I(ip)->i_generation = prandom_u32();
512 error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &dip, &bp, 0);
516 error = xfs_inode_from_disk(ip, dip);
525 trace_xfs_iget_miss(ip);
531 error = xfs_iget_check_free_state(ip, flags);
551 if (!xfs_ilock_nowait(ip, lock_flags))
560 * The ip->i_flags_lock that protects the XFS_INEW flag forms the
566 d_mark_dontcache(VFS_I(ip));
567 ip->i_udquot = NULL;
568 ip->i_gdquot = NULL;
569 ip->i_pdquot = NULL;
570 xfs_iflags_set(ip, iflags);
574 error = radix_tree_insert(&pag->pag_ici_root, agino, ip);
584 *ipp = ip;
591 xfs_iunlock(ip, lock_flags);
593 __destroy_inode(VFS_I(ip));
594 xfs_inode_free(ip);
619 struct xfs_inode *ip;
639 ip = radix_tree_lookup(&pag->pag_ici_root, agino);
641 if (ip) {
642 error = xfs_iget_cache_hit(pag, ip, ino, flags, lock_flags);
653 error = xfs_iget_cache_miss(mp, pag, tp, ino, &ip,
660 *ipp = ip;
666 if (xfs_iflags_test(ip, XFS_INEW) && VFS_I(ip)->i_mode != 0)
667 xfs_setup_existing_inode(ip);
705 struct xfs_inode *ip;
708 error = xfs_iget(mp, tp, ino, XFS_IGET_INCORE, 0, &ip);
712 *inuse = !!(VFS_I(ip)->i_mode);
713 xfs_irele(ip);
726 * Decide if the given @ip is eligible to be a part of the inode walk, and
732 struct xfs_inode *ip,
735 struct inode *inode = VFS_I(ip);
741 spin_lock(&ip->i_flags_lock);
742 if (!ip->i_ino)
746 if ((!newinos && __xfs_iflags_test(ip, XFS_INEW)) ||
747 __xfs_iflags_test(ip, XFS_IRECLAIMABLE | XFS_IRECLAIM))
749 spin_unlock(&ip->i_flags_lock);
752 if (XFS_FORCED_SHUTDOWN(ip->i_mount))
763 spin_unlock(&ip->i_flags_lock);
775 int (*execute)(struct xfs_inode *ip, void *args),
818 struct xfs_inode *ip = batch[i];
820 if (done || !xfs_inode_walk_ag_grab(ip, iter_flags))
835 if (XFS_INO_TO_AGNO(mp, ip->i_ino) != pag->pag_agno)
837 first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1);
838 if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino))
896 int (*execute)(struct xfs_inode *ip, void *args),
1001 struct xfs_inode *ip)
1005 spin_lock(&ip->i_flags_lock);
1006 if (!__xfs_iflags_test(ip, XFS_IRECLAIMABLE) ||
1007 __xfs_iflags_test(ip, XFS_IRECLAIM)) {
1009 spin_unlock(&ip->i_flags_lock);
1012 __xfs_iflags_set(ip, XFS_IRECLAIM);
1013 spin_unlock(&ip->i_flags_lock);
1031 struct xfs_inode *ip,
1034 xfs_ino_t ino = ip->i_ino; /* for radix_tree_delete */
1036 if (!xfs_ilock_nowait(ip, XFS_ILOCK_EXCL))
1038 if (xfs_iflags_test_and_set(ip, XFS_IFLUSHING))
1041 if (XFS_FORCED_SHUTDOWN(ip->i_mount)) {
1042 xfs_iunpin_wait(ip);
1043 xfs_iflush_abort(ip);
1046 if (xfs_ipincount(ip))
1048 if (!xfs_inode_clean(ip))
1051 xfs_iflags_clear(ip, XFS_IFLUSHING);
1064 spin_lock(&ip->i_flags_lock);
1065 ip->i_flags = XFS_IRECLAIM;
1066 ip->i_ino = 0;
1067 spin_unlock(&ip->i_flags_lock);
1069 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1071 XFS_STATS_INC(ip->i_mount, xs_ig_reclaims);
1081 XFS_INO_TO_AGINO(ip->i_mount, ino)))
1094 xfs_ilock(ip, XFS_ILOCK_EXCL);
1095 xfs_qm_dqdetach(ip);
1096 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1097 ASSERT(xfs_inode_clean(ip));
1099 __xfs_inode_free(ip);
1103 xfs_iflags_clear(ip, XFS_IFLUSHING);
1105 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1107 xfs_iflags_clear(ip, XFS_IRECLAIM);
1157 struct xfs_inode *ip = batch[i];
1159 if (done || !xfs_reclaim_inode_grab(ip))
1176 if (XFS_INO_TO_AGNO(mp, ip->i_ino) !=
1179 first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1);
1180 if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino))
1257 struct xfs_inode *ip,
1261 !uid_eq(VFS_I(ip)->i_uid, eofb->eof_uid))
1265 !gid_eq(VFS_I(ip)->i_gid, eofb->eof_gid))
1269 ip->i_d.di_projid != eofb->eof_prid)
1281 struct xfs_inode *ip,
1285 uid_eq(VFS_I(ip)->i_uid, eofb->eof_uid))
1289 gid_eq(VFS_I(ip)->i_gid, eofb->eof_gid))
1293 ip->i_d.di_projid == eofb->eof_prid)
1300 * Is this inode @ip eligible for eof/cow block reclamation, given some
1306 struct xfs_inode *ip,
1315 match = xfs_inode_match_id_union(ip, eofb);
1317 match = xfs_inode_match_id(ip, eofb);
1323 XFS_ISIZE(ip) < eofb->eof_min_file_size)
1349 struct xfs_inode *ip,
1358 if (!xfs_can_free_eofblocks(ip, false)) {
1360 trace_xfs_inode_free_eofblocks_invalid(ip);
1361 xfs_inode_clear_eofblocks_tag(ip);
1369 if (!wait && mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_DIRTY))
1372 if (!xfs_inode_matches_eofb(ip, eofb))
1379 if (!xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL)) {
1385 ret = xfs_free_eofblocks(ip);
1386 xfs_iunlock(ip, XFS_IOLOCK_EXCL);
1408 struct xfs_inode *ip,
1422 if (XFS_IS_UQUOTA_ENFORCED(ip->i_mount)) {
1423 dq = xfs_inode_dquot(ip, XFS_DQTYPE_USER);
1425 eofb.eof_uid = VFS_I(ip)->i_uid;
1431 if (XFS_IS_GQUOTA_ENFORCED(ip->i_mount)) {
1432 dq = xfs_inode_dquot(ip, XFS_DQTYPE_GROUP);
1434 eofb.eof_gid = VFS_I(ip)->i_gid;
1441 execute(ip->i_mount, &eofb);
1448 struct xfs_inode *ip)
1450 return __xfs_inode_free_quota_eofblocks(ip, xfs_icache_free_eofblocks);
1470 xfs_inode_t *ip,
1476 struct xfs_mount *mp = ip->i_mount;
1484 if (ip->i_flags & xfs_iflag_for_tag(tag))
1486 spin_lock(&ip->i_flags_lock);
1487 ip->i_flags |= xfs_iflag_for_tag(tag);
1488 spin_unlock(&ip->i_flags_lock);
1490 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
1495 XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino), tag);
1498 spin_lock(&ip->i_mount->m_perag_lock);
1499 radix_tree_tag_set(&ip->i_mount->m_perag_tree,
1500 XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino),
1502 spin_unlock(&ip->i_mount->m_perag_lock);
1505 execute(ip->i_mount);
1507 set_tp(ip->i_mount, pag->pag_agno, -1, _RET_IP_);
1516 xfs_inode_t *ip)
1518 trace_xfs_inode_set_eofblocks_tag(ip);
1519 return __xfs_inode_set_blocks_tag(ip, xfs_queue_eofblocks,
1526 xfs_inode_t *ip,
1531 struct xfs_mount *mp = ip->i_mount;
1534 spin_lock(&ip->i_flags_lock);
1535 ip->i_flags &= ~xfs_iflag_for_tag(tag);
1536 spin_unlock(&ip->i_flags_lock);
1538 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
1542 XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino), tag);
1545 spin_lock(&ip->i_mount->m_perag_lock);
1546 radix_tree_tag_clear(&ip->i_mount->m_perag_tree,
1547 XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino),
1549 spin_unlock(&ip->i_mount->m_perag_lock);
1550 clear_tp(ip->i_mount, pag->pag_agno, -1, _RET_IP_);
1559 xfs_inode_t *ip)
1561 trace_xfs_inode_clear_eofblocks_tag(ip);
1562 return __xfs_inode_clear_blocks_tag(ip,
1573 struct xfs_inode *ip)
1579 if (!xfs_inode_has_cow_data(ip)) {
1580 trace_xfs_inode_free_cowblocks_invalid(ip);
1581 xfs_inode_clear_cowblocks_tag(ip);
1589 if ((VFS_I(ip)->i_state & I_DIRTY_PAGES) ||
1590 mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_DIRTY) ||
1591 mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_WRITEBACK) ||
1592 atomic_read(&VFS_I(ip)->i_dio_count))
1612 struct xfs_inode *ip,
1618 if (!xfs_prep_free_cowblocks(ip))
1621 if (!xfs_inode_matches_eofb(ip, eofb))
1625 xfs_ilock(ip, XFS_IOLOCK_EXCL);
1626 xfs_ilock(ip, XFS_MMAPLOCK_EXCL);
1632 if (xfs_prep_free_cowblocks(ip))
1633 ret = xfs_reflink_cancel_cow_range(ip, 0, NULLFILEOFF, false);
1635 xfs_iunlock(ip, XFS_MMAPLOCK_EXCL);
1636 xfs_iunlock(ip, XFS_IOLOCK_EXCL);
1652 struct xfs_inode *ip)
1654 return __xfs_inode_free_quota_eofblocks(ip, xfs_icache_free_cowblocks);
1659 xfs_inode_t *ip)
1661 trace_xfs_inode_set_cowblocks_tag(ip);
1662 return __xfs_inode_set_blocks_tag(ip, xfs_queue_cowblocks,
1669 xfs_inode_t *ip)
1671 trace_xfs_inode_clear_cowblocks_tag(ip);
1672 return __xfs_inode_clear_blocks_tag(ip,