Lines Matching refs:inode
4 * (C) 1999 Andrea Arcangeli <andrea@suse.de> (dynamic inode allocation)
31 * inode->i_lock protects:
32 * inode->i_state, inode->i_hash, __iget()
34 * inode->i_sb->s_inode_lru, inode->i_lru
35 * inode->i_sb->s_inode_list_lock protects:
36 * inode->i_sb->s_inodes, inode->i_sb_list
38 * bdi->wb.b_{dirty,io,more_io,dirty_time}, inode->i_io_list
40 * inode_hashtable, inode->i_hash
44 * inode->i_sb->s_inode_list_lock
45 * inode->i_lock
49 * inode->i_lock
52 * inode->i_sb->s_inode_list_lock
53 * inode->i_lock
120 static int no_open(struct inode *inode, struct file *file)
126 * inode_init_always - perform inode structure initialisation
127 * @sb: superblock inode belongs to
128 * @inode: inode to initialise
130 * These are initializations that need to be done on every inode
133 int inode_init_always(struct super_block *sb, struct inode *inode)
137 struct address_space *const mapping = &inode->i_data;
139 inode->i_sb = sb;
140 inode->i_blkbits = sb->s_blocksize_bits;
141 inode->i_flags = 0;
142 atomic64_set(&inode->i_sequence, 0);
143 atomic_set(&inode->i_count, 1);
144 inode->i_op = &empty_iops;
145 inode->i_fop = &no_open_fops;
146 inode->__i_nlink = 1;
147 inode->i_opflags = 0;
149 inode->i_opflags |= IOP_XATTR;
150 i_uid_write(inode, 0);
151 i_gid_write(inode, 0);
152 atomic_set(&inode->i_writecount, 0);
153 inode->i_size = 0;
154 inode->i_write_hint = WRITE_LIFE_NOT_SET;
155 inode->i_blocks = 0;
156 inode->i_bytes = 0;
157 inode->i_generation = 0;
158 inode->i_pipe = NULL;
159 inode->i_bdev = NULL;
160 inode->i_cdev = NULL;
161 inode->i_link = NULL;
162 inode->i_dir_seq = 0;
163 inode->i_rdev = 0;
164 inode->dirtied_when = 0;
167 inode->i_wb_frn_winner = 0;
168 inode->i_wb_frn_avg_time = 0;
169 inode->i_wb_frn_history = 0;
172 spin_lock_init(&inode->i_lock);
173 lockdep_set_class(&inode->i_lock, &sb->s_type->i_lock_key);
175 init_rwsem(&inode->i_rwsem);
176 lockdep_set_class(&inode->i_rwsem, &sb->s_type->i_mutex_key);
178 atomic_set(&inode->i_dio_count, 0);
181 mapping->host = inode;
193 inode->i_private = NULL;
194 inode->i_mapping = mapping;
195 INIT_HLIST_HEAD(&inode->i_dentry); /* buggered by rcu freeing */
197 inode->i_acl = inode->i_default_acl = ACL_NOT_CACHED;
201 inode->i_fsnotify_mask = 0;
203 inode->i_flctx = NULL;
205 if (unlikely(security_inode_alloc(inode)))
213 void free_inode_nonrcu(struct inode *inode)
215 kmem_cache_free(inode_cachep, inode);
221 struct inode *inode = container_of(head, struct inode, i_rcu);
222 if (inode->free_inode)
223 inode->free_inode(inode);
225 free_inode_nonrcu(inode);
228 static struct inode *alloc_inode(struct super_block *sb)
231 struct inode *inode;
234 inode = ops->alloc_inode(sb);
236 inode = kmem_cache_alloc(inode_cachep, GFP_KERNEL);
238 if (!inode)
241 if (unlikely(inode_init_always(sb, inode))) {
243 ops->destroy_inode(inode);
247 inode->free_inode = ops->free_inode;
248 i_callback(&inode->i_rcu);
252 return inode;
255 void __destroy_inode(struct inode *inode)
257 BUG_ON(inode_has_buffers(inode));
258 inode_detach_wb(inode);
259 security_inode_free(inode);
260 fsnotify_inode_delete(inode);
261 locks_free_lock_context(inode);
262 xpm_delete_cache_node_hook(inode);
263 if (!inode->i_nlink) {
264 WARN_ON(atomic_long_read(&inode->i_sb->s_remove_count) == 0);
265 atomic_long_dec(&inode->i_sb->s_remove_count);
269 if (inode->i_acl && !is_uncached_acl(inode->i_acl))
270 posix_acl_release(inode->i_acl);
271 if (inode->i_default_acl && !is_uncached_acl(inode->i_default_acl))
272 posix_acl_release(inode->i_default_acl);
278 static void destroy_inode(struct inode *inode)
280 const struct super_operations *ops = inode->i_sb->s_op;
282 BUG_ON(!list_empty(&inode->i_lru));
283 __destroy_inode(inode);
285 ops->destroy_inode(inode);
289 inode->free_inode = ops->free_inode;
290 call_rcu(&inode->i_rcu, i_callback);
294 * drop_nlink - directly drop an inode's link count
295 * @inode: inode
304 void drop_nlink(struct inode *inode)
306 WARN_ON(inode->i_nlink == 0);
307 inode->__i_nlink--;
308 if (!inode->i_nlink)
309 atomic_long_inc(&inode->i_sb->s_remove_count);
314 * clear_nlink - directly zero an inode's link count
315 * @inode: inode
321 void clear_nlink(struct inode *inode)
323 if (inode->i_nlink) {
324 inode->__i_nlink = 0;
325 atomic_long_inc(&inode->i_sb->s_remove_count);
331 * set_nlink - directly set an inode's link count
332 * @inode: inode
338 void set_nlink(struct inode *inode, unsigned int nlink)
341 clear_nlink(inode);
344 if (inode->i_nlink == 0)
345 atomic_long_dec(&inode->i_sb->s_remove_count);
347 inode->__i_nlink = nlink;
353 * inc_nlink - directly increment an inode's link count
354 * @inode: inode
360 void inc_nlink(struct inode *inode)
362 if (unlikely(inode->i_nlink == 0)) {
363 WARN_ON(!(inode->i_state & I_LINKABLE));
364 atomic_long_dec(&inode->i_sb->s_remove_count);
367 inode->__i_nlink++;
390 * of the inode, so let the slab aware of that.
392 void inode_init_once(struct inode *inode)
394 memset(inode, 0, sizeof(*inode));
395 INIT_HLIST_NODE(&inode->i_hash);
396 INIT_LIST_HEAD(&inode->i_devices);
397 INIT_LIST_HEAD(&inode->i_io_list);
398 INIT_LIST_HEAD(&inode->i_wb_list);
399 INIT_LIST_HEAD(&inode->i_lru);
400 __address_space_init_once(&inode->i_data);
401 i_size_ordered_init(inode);
407 struct inode *inode = (struct inode *) foo;
409 inode_init_once(inode);
413 * inode->i_lock must be held
415 void __iget(struct inode *inode)
417 atomic_inc(&inode->i_count);
421 * get additional reference to inode; caller must already hold one.
423 void ihold(struct inode *inode)
425 WARN_ON(atomic_inc_return(&inode->i_count) < 2);
429 static void inode_lru_list_add(struct inode *inode)
431 if (list_lru_add(&inode->i_sb->s_inode_lru, &inode->i_lru))
434 inode->i_state |= I_REFERENCED;
438 * Add inode to LRU if needed (inode is unused and clean).
440 * Needs inode->i_lock held.
442 void inode_add_lru(struct inode *inode)
444 if (!(inode->i_state & (I_DIRTY_ALL | I_SYNC |
446 !atomic_read(&inode->i_count) && inode->i_sb->s_flags & SB_ACTIVE)
447 inode_lru_list_add(inode);
451 static void inode_lru_list_del(struct inode *inode)
454 if (list_lru_del(&inode->i_sb->s_inode_lru, &inode->i_lru))
458 static void inode_pin_lru_isolating(struct inode *inode)
460 lockdep_assert_held(&inode->i_lock);
461 WARN_ON(inode->i_state & (I_LRU_ISOLATING | I_FREEING | I_WILL_FREE));
462 inode->i_state |= I_LRU_ISOLATING;
465 static void inode_unpin_lru_isolating(struct inode *inode)
467 spin_lock(&inode->i_lock);
468 WARN_ON(!(inode->i_state & I_LRU_ISOLATING));
469 inode->i_state &= ~I_LRU_ISOLATING;
471 wake_up_bit(&inode->i_state, __I_LRU_ISOLATING);
472 spin_unlock(&inode->i_lock);
475 static void inode_wait_for_lru_isolating(struct inode *inode)
477 spin_lock(&inode->i_lock);
478 if (inode->i_state & I_LRU_ISOLATING) {
479 DEFINE_WAIT_BIT(wq, &inode->i_state, __I_LRU_ISOLATING);
482 wqh = bit_waitqueue(&inode->i_state, __I_LRU_ISOLATING);
483 spin_unlock(&inode->i_lock);
485 spin_lock(&inode->i_lock);
486 WARN_ON(inode->i_state & I_LRU_ISOLATING);
488 spin_unlock(&inode->i_lock);
492 * inode_sb_list_add - add inode to the superblock list of inodes
493 * @inode: inode to add
495 void inode_sb_list_add(struct inode *inode)
497 spin_lock(&inode->i_sb->s_inode_list_lock);
498 list_add(&inode->i_sb_list, &inode->i_sb->s_inodes);
499 spin_unlock(&inode->i_sb->s_inode_list_lock);
503 static inline void inode_sb_list_del(struct inode *inode)
505 if (!list_empty(&inode->i_sb_list)) {
506 spin_lock(&inode->i_sb->s_inode_list_lock);
507 list_del_init(&inode->i_sb_list);
508 spin_unlock(&inode->i_sb->s_inode_list_lock);
523 * __insert_inode_hash - hash an inode
524 * @inode: unhashed inode
528 * Add an inode to the inode hash for this superblock.
530 void __insert_inode_hash(struct inode *inode, unsigned long hashval)
532 struct hlist_head *b = inode_hashtable + hash(inode->i_sb, hashval);
535 spin_lock(&inode->i_lock);
536 hlist_add_head_rcu(&inode->i_hash, b);
537 spin_unlock(&inode->i_lock);
543 * __remove_inode_hash - remove an inode from the hash
544 * @inode: inode to unhash
546 * Remove an inode from the superblock.
548 void __remove_inode_hash(struct inode *inode)
551 spin_lock(&inode->i_lock);
552 hlist_del_init_rcu(&inode->i_hash);
553 spin_unlock(&inode->i_lock);
558 void clear_inode(struct inode *inode)
565 xa_lock_irq(&inode->i_data.i_pages);
566 BUG_ON(inode->i_data.nrpages);
567 BUG_ON(inode->i_data.nrexceptional);
568 xa_unlock_irq(&inode->i_data.i_pages);
569 BUG_ON(!list_empty(&inode->i_data.private_list));
570 BUG_ON(!(inode->i_state & I_FREEING));
571 BUG_ON(inode->i_state & I_CLEAR);
572 BUG_ON(!list_empty(&inode->i_wb_list));
574 inode->i_state = I_FREEING | I_CLEAR;
579 * Free the inode passed in, removing it from the lists it is still connected
580 * to. We remove any pages still attached to the inode and wait for any IO that
581 * is still in progress before finally destroying the inode.
583 * An inode must already be marked I_FREEING so that we avoid the inode being
587 * An inode must already be removed from the LRU list before being evicted from
591 static void evict(struct inode *inode)
593 const struct super_operations *op = inode->i_sb->s_op;
595 BUG_ON(!(inode->i_state & I_FREEING));
596 BUG_ON(!list_empty(&inode->i_lru));
598 if (!list_empty(&inode->i_io_list))
599 inode_io_list_del(inode);
601 inode_sb_list_del(inode);
603 inode_wait_for_lru_isolating(inode);
606 * Wait for flusher thread to be done with the inode so that filesystem
608 * the inode has I_FREEING set, flusher thread won't start new work on
609 * the inode. We just have to wait for running writeback to finish.
611 inode_wait_for_writeback(inode);
614 op->evict_inode(inode);
616 truncate_inode_pages_final(&inode->i_data);
617 clear_inode(inode);
619 if (S_ISBLK(inode->i_mode) && inode->i_bdev)
620 bd_forget(inode);
621 if (S_ISCHR(inode->i_mode) && inode->i_cdev)
622 cd_forget(inode);
624 remove_inode_hash(inode);
626 spin_lock(&inode->i_lock);
627 wake_up_bit(&inode->i_state, __I_NEW);
628 BUG_ON(inode->i_state != (I_FREEING | I_CLEAR));
629 spin_unlock(&inode->i_lock);
631 destroy_inode(inode);
644 struct inode *inode;
646 inode = list_first_entry(head, struct inode, i_lru);
647 list_del_init(&inode->i_lru);
649 evict(inode);
660 * so any inode reaching zero refcount during or after that call will
665 struct inode *inode, *next;
670 list_for_each_entry_safe(inode, next, &sb->s_inodes, i_sb_list) {
671 if (atomic_read(&inode->i_count))
674 spin_lock(&inode->i_lock);
675 if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) {
676 spin_unlock(&inode->i_lock);
680 inode->i_state |= I_FREEING;
681 inode_lru_list_del(inode);
682 spin_unlock(&inode->i_lock);
683 list_add(&inode->i_lru, &dispose);
716 struct inode *inode, *next;
721 list_for_each_entry_safe(inode, next, &sb->s_inodes, i_sb_list) {
722 spin_lock(&inode->i_lock);
723 if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) {
724 spin_unlock(&inode->i_lock);
727 if (inode->i_state & I_DIRTY_ALL && !kill_dirty) {
728 spin_unlock(&inode->i_lock);
732 if (atomic_read(&inode->i_count)) {
733 spin_unlock(&inode->i_lock);
738 inode->i_state |= I_FREEING;
739 inode_lru_list_del(inode);
740 spin_unlock(&inode->i_lock);
741 list_add(&inode->i_lru, &dispose);
757 * Isolate the inode from the LRU in preparation for freeing it.
760 * pagecache removed. If the inode has metadata buffers attached to
763 * If the inode has the I_REFERENCED flag set, then it means that it has been
765 * inode, clear the flag and move it to the back of the LRU so it gets another
775 struct inode *inode = container_of(item, struct inode, i_lru);
778 * we are inverting the lru lock/inode->i_lock here, so use a trylock.
781 if (!spin_trylock(&inode->i_lock))
788 if (atomic_read(&inode->i_count) ||
789 (inode->i_state & ~I_REFERENCED)) {
790 list_lru_isolate(lru, &inode->i_lru);
791 spin_unlock(&inode->i_lock);
797 if (inode->i_state & I_REFERENCED) {
798 inode->i_state &= ~I_REFERENCED;
799 spin_unlock(&inode->i_lock);
803 if (inode_has_buffers(inode) || inode->i_data.nrpages) {
804 inode_pin_lru_isolating(inode);
805 spin_unlock(&inode->i_lock);
807 if (remove_inode_buffers(inode)) {
809 reap = invalidate_mapping_pages(&inode->i_data, 0, -1);
817 inode_unpin_lru_isolating(inode);
822 WARN_ON(inode->i_state & I_NEW);
823 inode->i_state |= I_FREEING;
824 list_lru_isolate_move(lru, &inode->i_lru, freeable);
825 spin_unlock(&inode->i_lock);
832 * Walk the superblock inode LRU for freeable inodes and attempt to free them.
848 static void __wait_on_freeing_inode(struct inode *inode);
850 * Called with the inode lock held.
852 static struct inode *find_inode(struct super_block *sb,
854 int (*test)(struct inode *, void *),
857 struct inode *inode = NULL;
860 hlist_for_each_entry(inode, head, i_hash) {
861 if (inode->i_sb != sb)
863 if (!test(inode, data))
865 spin_lock(&inode->i_lock);
866 if (inode->i_state & (I_FREEING|I_WILL_FREE)) {
867 __wait_on_freeing_inode(inode);
870 if (unlikely(inode->i_state & I_CREATING)) {
871 spin_unlock(&inode->i_lock);
874 __iget(inode);
875 spin_unlock(&inode->i_lock);
876 return inode;
885 static struct inode *find_inode_fast(struct super_block *sb,
888 struct inode *inode = NULL;
891 hlist_for_each_entry(inode, head, i_hash) {
892 if (inode->i_ino != ino)
894 if (inode->i_sb != sb)
896 spin_lock(&inode->i_lock);
897 if (inode->i_state & (I_FREEING|I_WILL_FREE)) {
898 __wait_on_freeing_inode(inode);
901 if (unlikely(inode->i_state & I_CREATING)) {
902 spin_unlock(&inode->i_lock);
905 __iget(inode);
906 spin_unlock(&inode->i_lock);
907 return inode;
918 * consume at most LAST_INO_BATCH-1 unused inode numbers. So there is
945 /* get_next_ino should not provide a 0 inode number */
955 * new_inode_pseudo - obtain an inode
958 * Allocates a new inode for given superblock.
964 struct inode *new_inode_pseudo(struct super_block *sb)
966 struct inode *inode = alloc_inode(sb);
968 if (inode) {
969 spin_lock(&inode->i_lock);
970 inode->i_state = 0;
971 spin_unlock(&inode->i_lock);
972 INIT_LIST_HEAD(&inode->i_sb_list);
974 return inode;
978 * new_inode - obtain an inode
981 * Allocates a new inode for given superblock. The default gfp_mask
982 * for allocations related to inode->i_mapping is GFP_HIGHUSER_MOVABLE.
986 * newly created inode's mapping
989 struct inode *new_inode(struct super_block *sb)
991 struct inode *inode;
995 inode = new_inode_pseudo(sb);
996 if (inode)
997 inode_sb_list_add(inode);
998 return inode;
1003 void lockdep_annotate_inode_mutex_key(struct inode *inode)
1005 if (S_ISDIR(inode->i_mode)) {
1006 struct file_system_type *type = inode->i_sb->s_type;
1009 if (lockdep_match_class(&inode->i_rwsem, &type->i_mutex_key)) {
1013 // mutex_destroy(&inode->i_mutex);
1014 init_rwsem(&inode->i_rwsem);
1015 lockdep_set_class(&inode->i_rwsem,
1025 * @inode: new inode to unlock
1027 * Called when the inode is fully initialised to clear the new state of the
1028 * inode and wake up anyone waiting for the inode to finish initialisation.
1030 void unlock_new_inode(struct inode *inode)
1032 lockdep_annotate_inode_mutex_key(inode);
1033 spin_lock(&inode->i_lock);
1034 WARN_ON(!(inode->i_state & I_NEW));
1035 inode->i_state &= ~I_NEW & ~I_CREATING;
1037 wake_up_bit(&inode->i_state, __I_NEW);
1038 spin_unlock(&inode->i_lock);
1042 void discard_new_inode(struct inode *inode)
1044 lockdep_annotate_inode_mutex_key(inode);
1045 spin_lock(&inode->i_lock);
1046 WARN_ON(!(inode->i_state & I_NEW));
1047 inode->i_state &= ~I_NEW;
1049 wake_up_bit(&inode->i_state, __I_NEW);
1050 spin_unlock(&inode->i_lock);
1051 iput(inode);
1062 * @inode1: first inode to lock
1063 * @inode2: second inode to lock
1064 * @subclass1: inode lock subclass for the first lock obtained
1065 * @subclass2: inode lock subclass for the second lock obtained
1067 void lock_two_inodes(struct inode *inode1, struct inode *inode2,
1103 * @inode1: first inode to lock
1104 * @inode2: second inode to lock
1106 void lock_two_nondirectories(struct inode *inode1, struct inode *inode2)
1120 * @inode1: first inode to unlock
1121 * @inode2: second inode to unlock
1123 void unlock_two_nondirectories(struct inode *inode1, struct inode *inode2)
1133 * inode_insert5 - obtain an inode from a mounted file system
1134 * @inode: pre-allocated inode to use for insert to cache
1135 * @hashval: hash value (usually inode number) to get
1137 * @set: callback used to initialize a new struct inode
1140 * Search for the inode specified by @hashval and @data in the inode cache,
1143 * allocation of inode.
1145 * If the inode is not in cache, insert the pre-allocated inode to cache and
1152 struct inode *inode_insert5(struct inode *inode, unsigned long hashval,
1153 int (*test)(struct inode *, void *),
1154 int (*set)(struct inode *, void *), void *data)
1156 struct hlist_head *head = inode_hashtable + hash(inode->i_sb, hashval);
1157 struct inode *old;
1158 bool creating = inode->i_state & I_CREATING;
1162 old = find_inode(inode->i_sb, head, test, data);
1165 * Uhhuh, somebody else created the same inode under us.
1166 * Use the old inode instead of the preallocated one.
1179 if (set && unlikely(set(inode, data))) {
1180 inode = NULL;
1185 * Return the locked inode with I_NEW set, the
1188 spin_lock(&inode->i_lock);
1189 inode->i_state |= I_NEW;
1190 hlist_add_head_rcu(&inode->i_hash, head);
1191 spin_unlock(&inode->i_lock);
1193 inode_sb_list_add(inode);
1197 return inode;
1202 * iget5_locked - obtain an inode from a mounted file system
1204 * @hashval: hash value (usually inode number) to get
1206 * @set: callback used to initialize a new struct inode
1209 * Search for the inode specified by @hashval and @data in the inode cache,
1211 * a generalized version of iget_locked() for file systems where the inode
1212 * number is not sufficient for unique identification of an inode.
1214 * If the inode is not in cache, allocate a new inode and return it locked,
1221 struct inode *iget5_locked(struct super_block *sb, unsigned long hashval,
1222 int (*test)(struct inode *, void *),
1223 int (*set)(struct inode *, void *), void *data)
1225 struct inode *inode = ilookup5(sb, hashval, test, data);
1227 if (!inode) {
1228 struct inode *new = alloc_inode(sb);
1232 inode = inode_insert5(new, hashval, test, set, data);
1233 if (unlikely(inode != new))
1237 return inode;
1242 * iget_locked - obtain an inode from a mounted file system
1244 * @ino: inode number to get
1246 * Search for the inode specified by @ino in the inode cache and if present
1248 * where the inode number is sufficient for unique identification of an inode.
1250 * If the inode is not in cache, allocate a new inode and return it locked,
1254 struct inode *iget_locked(struct super_block *sb, unsigned long ino)
1257 struct inode *inode;
1260 inode = find_inode_fast(sb, head, ino);
1262 if (inode) {
1263 if (IS_ERR(inode))
1265 wait_on_inode(inode);
1266 if (unlikely(inode_unhashed(inode))) {
1267 iput(inode);
1270 return inode;
1273 inode = alloc_inode(sb);
1274 if (inode) {
1275 struct inode *old;
1281 inode->i_ino = ino;
1282 spin_lock(&inode->i_lock);
1283 inode->i_state = I_NEW;
1284 hlist_add_head_rcu(&inode->i_hash, head);
1285 spin_unlock(&inode->i_lock);
1286 inode_sb_list_add(inode);
1289 /* Return the locked inode with I_NEW set, the
1292 return inode;
1296 * Uhhuh, somebody else created the same inode under
1297 * us. Use the old inode instead of the one we just
1301 destroy_inode(inode);
1304 inode = old;
1305 wait_on_inode(inode);
1306 if (unlikely(inode_unhashed(inode))) {
1307 iput(inode);
1311 return inode;
1316 * search the inode cache for a matching inode number.
1317 * If we find one, then the inode number we are trying to
1320 * Returns 1 if the inode number is unique, 0 if it is not.
1325 struct inode *inode;
1327 hlist_for_each_entry_rcu(inode, b, i_hash) {
1328 if (inode->i_ino == ino && inode->i_sb == sb)
1335 * iunique - get a unique inode number
1337 * @max_reserved: highest reserved inode number
1339 * Obtain an inode number that is unique on the system for a given
1341 * permanent inode numbering system. An inode number is returned that
1373 struct inode *igrab(struct inode *inode)
1375 spin_lock(&inode->i_lock);
1376 if (!(inode->i_state & (I_FREEING|I_WILL_FREE))) {
1377 __iget(inode);
1378 spin_unlock(&inode->i_lock);
1380 spin_unlock(&inode->i_lock);
1384 * while the inode is getting freed.
1386 inode = NULL;
1388 return inode;
1393 * ilookup5_nowait - search for an inode in the inode cache
1395 * @hashval: hash value (usually inode number) to search for
1399 * Search for the inode specified by @hashval and @data in the inode cache.
1400 * If the inode is in the cache, the inode is returned with an incremented
1404 * with the returned inode. You probably should be using ilookup5() instead.
1408 struct inode *ilookup5_nowait(struct super_block *sb, unsigned long hashval,
1409 int (*test)(struct inode *, void *), void *data)
1412 struct inode *inode;
1415 inode = find_inode(sb, head, test, data);
1418 return IS_ERR(inode) ? NULL : inode;
1423 * ilookup5 - search for an inode in the inode cache
1425 * @hashval: hash value (usually inode number) to search for
1429 * Search for the inode specified by @hashval and @data in the inode cache,
1430 * and if the inode is in the cache, return the inode with an incremented
1431 * reference count. Waits on I_NEW before returning the inode.
1435 * inode number is not sufficient for unique identification of an inode.
1439 struct inode *ilookup5(struct super_block *sb, unsigned long hashval,
1440 int (*test)(struct inode *, void *), void *data)
1442 struct inode *inode;
1444 inode = ilookup5_nowait(sb, hashval, test, data);
1445 if (inode) {
1446 wait_on_inode(inode);
1447 if (unlikely(inode_unhashed(inode))) {
1448 iput(inode);
1452 return inode;
1457 * ilookup - search for an inode in the inode cache
1459 * @ino: inode number to search for
1461 * Search for the inode @ino in the inode cache, and if the inode is in the
1462 * cache, the inode is returned with an incremented reference count.
1464 struct inode *ilookup(struct super_block *sb, unsigned long ino)
1467 struct inode *inode;
1470 inode = find_inode_fast(sb, head, ino);
1473 if (inode) {
1474 if (IS_ERR(inode))
1476 wait_on_inode(inode);
1477 if (unlikely(inode_unhashed(inode))) {
1478 iput(inode);
1482 return inode;
1487 * find_inode_nowait - find an inode in the inode cache
1489 * @hashval: hash value (usually inode number) to search for
1493 * Search for the inode specified by @hashval and @data in the inode
1494 * cache, where the helper function @match will return 0 if the inode
1495 * does not match, 1 if the inode does match, and -1 if the search
1497 * taking the i_lock spin_lock and checking i_state for an inode being
1506 * inode eviction. The tradeoff is that the @match funtion must be
1509 struct inode *find_inode_nowait(struct super_block *sb,
1511 int (*match)(struct inode *, unsigned long,
1516 struct inode *inode, *ret_inode = NULL;
1520 hlist_for_each_entry(inode, head, i_hash) {
1521 if (inode->i_sb != sb)
1523 mval = match(inode, hashval, data);
1527 ret_inode = inode;
1537 * find_inode_rcu - find an inode in the inode cache
1540 * @test: Function to test match on an inode
1543 * Search for the inode specified by @hashval and @data in the inode cache,
1544 * where the helper function @test will return 0 if the inode does not match
1546 * i_lock spin_lock and checking i_state for an inode being freed or being
1549 * If successful, this will return the inode for which the @test function
1552 * The @test function is not permitted to take a ref on any inode presented.
1557 struct inode *find_inode_rcu(struct super_block *sb, unsigned long hashval,
1558 int (*test)(struct inode *, void *), void *data)
1561 struct inode *inode;
1566 hlist_for_each_entry_rcu(inode, head, i_hash) {
1567 if (inode->i_sb == sb &&
1568 !(READ_ONCE(inode->i_state) & (I_FREEING | I_WILL_FREE)) &&
1569 test(inode, data))
1570 return inode;
1577 * find_inode_by_rcu - Find an inode in the inode cache
1579 * @ino: The inode number to match
1581 * Search for the inode specified by @hashval and @data in the inode cache,
1582 * where the helper function @test will return 0 if the inode does not match
1584 * i_lock spin_lock and checking i_state for an inode being freed or being
1587 * If successful, this will return the inode for which the @test function
1590 * The @test function is not permitted to take a ref on any inode presented.
1595 struct inode *find_inode_by_ino_rcu(struct super_block *sb,
1599 struct inode *inode;
1604 hlist_for_each_entry_rcu(inode, head, i_hash) {
1605 if (inode->i_ino == ino &&
1606 inode->i_sb == sb &&
1607 !(READ_ONCE(inode->i_state) & (I_FREEING | I_WILL_FREE)))
1608 return inode;
1614 int insert_inode_locked(struct inode *inode)
1616 struct super_block *sb = inode->i_sb;
1617 ino_t ino = inode->i_ino;
1621 struct inode *old = NULL;
1636 spin_lock(&inode->i_lock);
1637 inode->i_state |= I_NEW | I_CREATING;
1638 hlist_add_head_rcu(&inode->i_hash, head);
1639 spin_unlock(&inode->i_lock);
1661 int insert_inode_locked4(struct inode *inode, unsigned long hashval,
1662 int (*test)(struct inode *, void *), void *data)
1664 struct inode *old;
1666 inode->i_state |= I_CREATING;
1667 old = inode_insert5(inode, hashval, test, NULL, data);
1669 if (old != inode) {
1678 int generic_delete_inode(struct inode *inode)
1686 * to an inode.
1690 * us to evict inode, do so. Otherwise, retain inode
1694 static void iput_final(struct inode *inode)
1696 struct super_block *sb = inode->i_sb;
1697 const struct super_operations *op = inode->i_sb->s_op;
1701 WARN_ON(inode->i_state & I_NEW);
1704 drop = op->drop_inode(inode);
1706 drop = generic_drop_inode(inode);
1709 !(inode->i_state & I_DONTCACHE) &&
1711 inode_add_lru(inode);
1712 spin_unlock(&inode->i_lock);
1716 state = inode->i_state;
1718 WRITE_ONCE(inode->i_state, state | I_WILL_FREE);
1719 spin_unlock(&inode->i_lock);
1721 write_inode_now(inode, 1);
1723 spin_lock(&inode->i_lock);
1724 state = inode->i_state;
1729 WRITE_ONCE(inode->i_state, state | I_FREEING);
1730 if (!list_empty(&inode->i_lru))
1731 inode_lru_list_del(inode);
1732 spin_unlock(&inode->i_lock);
1734 evict(inode);
1738 * iput - put an inode
1739 * @inode: inode to put
1741 * Puts an inode, dropping its usage count. If the inode use count hits
1742 * zero, the inode is then freed and may also be destroyed.
1746 void iput(struct inode *inode)
1748 if (!inode)
1750 BUG_ON(inode->i_state & I_CLEAR);
1752 if (atomic_dec_and_lock(&inode->i_count, &inode->i_lock)) {
1753 if (inode->i_nlink && (inode->i_state & I_DIRTY_TIME)) {
1754 atomic_inc(&inode->i_count);
1755 spin_unlock(&inode->i_lock);
1756 trace_writeback_lazytime_iput(inode);
1757 mark_inode_dirty_sync(inode);
1760 iput_final(inode);
1768 * @inode: inode owning the block number being requested
1773 * That is, asked for block 4 of inode 1 the function will replace the
1780 int bmap(struct inode *inode, sector_t *block)
1782 if (!inode->i_mapping->a_ops->bmap)
1785 *block = inode->i_mapping->a_ops->bmap(inode->i_mapping, *block);
1796 static int relatime_need_update(struct vfsmount *mnt, struct inode *inode,
1805 if (timespec64_compare(&inode->i_mtime, &inode->i_atime) >= 0)
1810 if (timespec64_compare(&inode->i_ctime, &inode->i_atime) >= 0)
1817 if ((long)(now.tv_sec - inode->i_atime.tv_sec) >= 24*60*60)
1825 int generic_update_time(struct inode *inode, struct timespec64 *time, int flags)
1831 inode->i_atime = *time;
1833 dirty = inode_maybe_inc_iversion(inode, false);
1835 inode->i_ctime = *time;
1837 inode->i_mtime = *time;
1839 !(inode->i_sb->s_flags & SB_LAZYTIME))
1844 __mark_inode_dirty(inode, iflags);
1853 int inode_update_time(struct inode *inode, struct timespec64 *time, int flags)
1855 if (inode->i_op->update_time)
1856 return inode->i_op->update_time(inode, time, flags);
1857 return generic_update_time(inode, time, flags);
1864 * @inode: inode to update
1866 * Update the accessed time on an inode and mark it for writeback.
1868 * as well as the "noatime" flag and inode specific "noatime" markers.
1870 bool atime_needs_update(const struct path *path, struct inode *inode)
1875 if (inode->i_flags & S_NOATIME)
1881 if (HAS_UNMAPPED_ID(inode))
1884 if (IS_NOATIME(inode))
1886 if ((inode->i_sb->s_flags & SB_NODIRATIME) && S_ISDIR(inode->i_mode))
1891 if ((mnt->mnt_flags & MNT_NODIRATIME) && S_ISDIR(inode->i_mode))
1894 now = current_time(inode);
1896 if (!relatime_need_update(mnt, inode, now))
1899 if (timespec64_equal(&inode->i_atime, &now))
1908 struct inode *inode = d_inode(path->dentry);
1911 if (!atime_needs_update(path, inode))
1914 if (!sb_start_write_trylock(inode->i_sb))
1921 * allocate new space to modify an inode (such is the case for
1928 now = current_time(inode);
1929 inode_update_time(inode, &now, S_ATIME);
1932 sb_end_write(inode->i_sb);
1943 struct inode *inode = d_inode(dentry);
1947 if (IS_NOSEC(inode))
1950 mask = setattr_should_drop_suidgid(inode);
1978 struct inode *inode = file_inode(file);
1988 if (IS_NOSEC(inode) || !S_ISREG(inode->i_mode))
1997 inode_has_no_xattr(inode);
2007 * Update the mtime and ctime members of an inode and mark the inode
2011 * S_NOCMTIME inode flag, e.g. for network filesystem where these
2013 * file systems who need to allocate space in order to update an inode.
2018 struct inode *inode = file_inode(file);
2024 if (IS_NOCMTIME(inode))
2027 now = current_time(inode);
2028 if (!timespec64_equal(&inode->i_mtime, &now))
2031 if (!timespec64_equal(&inode->i_ctime, &now))
2034 if (IS_I_VERSION(inode) && inode_iversion_need_inc(inode))
2044 ret = inode_update_time(inode, &now, sync_it);
2051 /* Caller must hold the file's inode lock */
2071 int inode_needs_sync(struct inode *inode)
2073 if (IS_SYNC(inode))
2075 if (S_ISDIR(inode->i_mode) && IS_DIRSYNC(inode))
2082 * If we try to find an inode in the inode hash while it is being
2086 * to recheck inode state.
2089 * wake_up_bit(&inode->i_state, __I_NEW) after removing from the hash list
2092 static void __wait_on_freeing_inode(struct inode *inode)
2095 DEFINE_WAIT_BIT(wait, &inode->i_state, __I_NEW);
2096 wq = bit_waitqueue(&inode->i_state, __I_NEW);
2098 spin_unlock(&inode->i_lock);
2116 * Initialize the waitqueues and inode hash table.
2140 /* inode slab cache */
2142 sizeof(struct inode),
2164 void init_special_inode(struct inode *inode, umode_t mode, dev_t rdev)
2166 inode->i_mode = mode;
2168 inode->i_fop = &def_chr_fops;
2169 inode->i_rdev = rdev;
2171 inode->i_fop = &def_blk_fops;
2172 inode->i_rdev = rdev;
2174 inode->i_fop = &pipefifo_fops;
2179 " inode %s:%lu\n", mode, inode->i_sb->s_id,
2180 inode->i_ino);
2185 * inode_init_owner - Init uid,gid,mode for new inode according to posix standards
2186 * @inode: New inode
2187 * @dir: Directory inode
2188 * @mode: mode of the new inode
2190 void inode_init_owner(struct inode *inode, const struct inode *dir,
2193 inode->i_uid = current_fsuid();
2195 inode->i_gid = dir->i_gid;
2201 inode->i_gid = current_fsgid();
2202 inode->i_mode = mode;
2207 * inode_owner_or_capable - check current task permissions to inode
2208 * @inode: inode being checked
2211 * inode owner uid mapped, or owns the file.
2213 bool inode_owner_or_capable(const struct inode *inode)
2217 if (uid_eq(current_fsuid(), inode->i_uid))
2221 if (kuid_has_mapping(ns, inode->i_uid) && ns_capable(ns, CAP_FOWNER))
2230 static void __inode_dio_wait(struct inode *inode)
2232 wait_queue_head_t *wq = bit_waitqueue(&inode->i_state, __I_DIO_WAKEUP);
2233 DEFINE_WAIT_BIT(q, &inode->i_state, __I_DIO_WAKEUP);
2237 if (atomic_read(&inode->i_dio_count))
2239 } while (atomic_read(&inode->i_dio_count));
2245 * @inode: inode to wait for
2251 * to i_dio_count, usually by inode->i_mutex.
2253 void inode_dio_wait(struct inode *inode)
2255 if (atomic_read(&inode->i_dio_count))
2256 __inode_dio_wait(inode);
2261 * inode_set_flags - atomically set some inode flags
2264 * they have exclusive access to the inode structure (i.e., while the
2265 * inode is being instantiated). The reason for the cmpxchg() loop
2276 void inode_set_flags(struct inode *inode, unsigned int flags,
2280 set_mask_bits(&inode->i_flags, mask, flags);
2284 void inode_nohighmem(struct inode *inode)
2286 mapping_set_gfp_mask(inode->i_mapping, GFP_USER);
2293 * @inode: inode being updated
2296 * containing the inode. Always rounds down. gran must
2299 struct timespec64 timestamp_truncate(struct timespec64 t, struct inode *inode)
2301 struct super_block *sb = inode->i_sb;
2323 * @inode: inode.
2328 * Note that inode and inode->sb cannot be NULL.
2331 struct timespec64 current_time(struct inode *inode)
2337 if (unlikely(!inode->i_sb)) {
2338 WARN(1, "current_time() called with uninitialized super_block in the inode");
2342 return timestamp_truncate(now, inode);
2351 * exclusive access to the inode structure.
2353 int vfs_ioc_setflags_prepare(struct inode *inode, unsigned int oldflags,
2366 return fscrypt_prepare_setflags(inode, oldflags, flags);
2375 * exclusive access to the inode structure.
2377 int vfs_ioc_fssetxattr_check(struct inode *inode, const struct fsxattr *old_fa,
2403 if ((fa->fsx_xflags & FS_XFLAG_EXTSIZE) && !S_ISREG(inode->i_mode))
2407 !S_ISDIR(inode->i_mode))
2411 !S_ISREG(inode->i_mode) && !S_ISDIR(inode->i_mode))
2419 !(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode)))
2434 * @inode: inode
2436 * Set the inode->i_ctime to the current value for the inode. Returns
2439 struct timespec64 inode_set_ctime_current(struct inode *inode)
2441 struct timespec64 now = current_time(inode);
2443 inode_set_ctime(inode, now.tv_sec, now.tv_nsec);
2450 * @inode: inode to check
2451 * @gid: the new/current gid of @inode
2454 * privileged with CAP_FSETID over @inode. This can be used to determine
2459 bool in_group_or_capable(const struct inode *inode, kgid_t gid)
2463 if (capable_wrt_inode_uidgid(inode, CAP_FSETID))
2470 * @dir: parent directory inode
2481 umode_t mode_strip_sgid(const struct inode *dir, umode_t mode)