Lines Matching refs:s_umount
56 down_write(&sb->s_umount);
58 down_read(&sb->s_umount);
64 up_write(&sb->s_umount);
66 up_read(&sb->s_umount);
109 * set. The function acquires s_umount and returns with it held.
114 lockdep_assert_not_held(&sb->s_umount);
138 * Just reacquire @sb->s_umount for the caller.
143 /* wait and acquire read-side of @sb->s_umount */
149 /* wait and acquire write-side of @sb->s_umount */
335 init_rwsem(&s->s_umount);
336 lockdep_set_class(&s->s_umount, &type->s_umount_key);
338 * sget() can have s_umount recursion.
344 * In case that succeeds, it will acquire the s_umount
352 down_write_nested(&s->s_umount, SINGLE_DEPTH_NESTING);
439 lockdep_assert_not_held(&sb->s_umount);
458 * We don't need @sb->s_umount here as every concurrent caller
579 lockdep_assert_held(&sb->s_umount);
583 lockdep_assert_not_held(&sb->s_umount);
589 * super_trylock_shared - try to grab ->s_umount shared
595 * false if we cannot acquire s_umount or if we lose the race and
596 * filesystem already got into shutdown, and returns true with the s_umount
598 * the caller must drop the s_umount lock when done.
607 if (down_read_trylock(&sb->s_umount)) {
1568 * We drop s_umount here because we need to open the bdev and
1569 * bdev->open_mutex ranks above s_umount (blkdev_put() ->
1620 * We drop s_umount here because we need to open the bdev and
1621 * bdev->open_mutex ranks above s_umount (blkdev_put() ->
1885 up_write(&sb->s_umount);
1888 down_write(&sb->s_umount);
1938 * sb->s_writers.frozen is protected by sb->s_umount.
1991 /* Release s_umount to preserve sb_start_write -> s_umount ordering */