Lines Matching refs:files
51 * space if any. This does not copy the file pointers. Called with the files
74 * clear the extra space. Called with the files spinlock held for write.
167 * The files->file_lock should be held on entry, and will be held on exit.
169 static int expand_fdtable(struct files_struct *files, unsigned int nr)
170 __releases(files->file_lock)
171 __acquires(files->file_lock)
175 spin_unlock(&files->file_lock);
181 if (atomic_read(&files->count) > 1)
184 spin_lock(&files->file_lock);
195 cur_fdt = files_fdtable(files);
198 rcu_assign_pointer(files->fdt, new_fdt);
199 if (cur_fdt != &files->fdtab)
207 * Expand files.
210 * Return <0 error code on error; 0 when nothing done; 1 when files were
212 * The files->file_lock should be held on entry, and will be held on exit.
214 static int expand_files(struct files_struct *files, unsigned int nr)
215 __releases(files->file_lock)
216 __acquires(files->file_lock)
222 fdt = files_fdtable(files);
232 if (unlikely(files->resize_in_progress)) {
233 spin_unlock(&files->file_lock);
235 wait_event(files->resize_wait, !files->resize_in_progress);
236 spin_lock(&files->file_lock);
241 files->resize_in_progress = true;
242 expanded = expand_fdtable(files, nr);
243 files->resize_in_progress = false;
245 wake_up_all(&files->resize_wait);
312 * Allocate a new files structure and copy contents from the
313 * passed in files structure.
389 * instantiated in the files array if a sibling thread
412 static struct fdtable *close_files(struct files_struct * files)
417 * files structure.
419 struct fdtable *fdt = rcu_dereference_raw(files->fdt);
432 filp_close(file, files);
444 void put_files_struct(struct files_struct *files)
446 if (atomic_dec_and_test(&files->count)) {
447 struct fdtable *fdt = close_files(files);
450 if (fdt != &files->fdtab)
452 kmem_cache_free(files_cachep, files);
458 struct files_struct * files = tsk->files;
460 if (files) {
462 tsk->files = NULL;
464 put_files_struct(files);
501 struct files_struct *files = current->files;
506 spin_lock(&files->file_lock);
508 fdt = files_fdtable(files);
510 if (fd < files->next_fd)
511 fd = files->next_fd;
517 * N.B. For clone tasks sharing a files structure, this test
518 * will limit the total number of files that can be opened.
524 error = expand_files(files, fd);
535 if (start <= files->next_fd)
536 files->next_fd = fd + 1;
553 spin_unlock(&files->file_lock);
568 static void __put_unused_fd(struct files_struct *files, unsigned int fd)
570 struct fdtable *fdt = files_fdtable(files);
572 if (fd < files->next_fd)
573 files->next_fd = fd;
578 struct files_struct *files = current->files;
579 spin_lock(&files->file_lock);
580 __put_unused_fd(files, fd);
581 spin_unlock(&files->file_lock);
589 * The VFS is full of places where we drop the files lock between
604 struct files_struct *files = current->files;
609 if (unlikely(files->resize_in_progress)) {
611 spin_lock(&files->file_lock);
612 fdt = files_fdtable(files);
615 spin_unlock(&files->file_lock);
620 fdt = rcu_dereference_sched(files->fdt);
630 * @files: file struct to retrieve file from
637 static struct file *pick_file(struct files_struct *files, unsigned fd)
639 struct fdtable *fdt = files_fdtable(files);
649 __put_unused_fd(files, fd);
656 struct files_struct *files = current->files;
659 spin_lock(&files->file_lock);
660 file = pick_file(files, fd);
661 spin_unlock(&files->file_lock);
665 return filp_close(file, files);
696 static inline void __range_close(struct files_struct *files, unsigned int fd,
702 spin_lock(&files->file_lock);
703 n = last_fd(files_fdtable(files));
707 file = pick_file(files, fd);
709 spin_unlock(&files->file_lock);
710 filp_close(file, files);
712 spin_lock(&files->file_lock);
714 spin_unlock(&files->file_lock);
716 spin_lock(&files->file_lock);
719 spin_unlock(&files->file_lock);
735 struct files_struct *cur_fds = me->files, *fds = NULL;
783 * We're done closing the files we were supposed to. Time to install
787 me->files = cur_fds;
796 * See close_fd_get_file() below, this variant assumes current->files->file_lock
801 return pick_file(current->files, fd);
810 struct files_struct *files = current->files;
813 spin_lock(&files->file_lock);
814 file = pick_file(files, fd);
815 spin_unlock(&files->file_lock);
820 void do_close_on_exec(struct files_struct *files)
826 spin_lock(&files->file_lock);
830 fdt = files_fdtable(files);
845 __put_unused_fd(files, fd);
846 spin_unlock(&files->file_lock);
847 filp_close(file, files);
849 spin_lock(&files->file_lock);
853 spin_unlock(&files->file_lock);
856 static inline struct file *__fget_files_rcu(struct files_struct *files,
861 struct fdtable *fdt = rcu_dereference_raw(files->fdt);
896 if (unlikely(rcu_dereference_raw(files->fdt) != fdt) ||
910 static struct file *__fget_files(struct files_struct *files, unsigned int fd,
916 file = __fget_files_rcu(files, fd, mask);
924 return __fget_files(current->files, fd, mask);
944 if (task->files)
945 file = __fget_files(task->files, fd, 0);
954 struct files_struct *files;
958 files = task->files;
959 if (files)
960 file = files_lookup_fd_rcu(files, fd);
969 struct files_struct *files;
974 files = task->files;
975 if (files) {
976 for (; fd < files_fdtable(files)->max_fds; fd++) {
977 file = files_lookup_fd_rcu(files, fd);
1006 struct files_struct *files = current->files;
1018 if (atomic_read_acquire(&files->count) == 1) {
1019 file = files_lookup_fd_raw(files, fd);
1082 struct files_struct *files = current->files;
1084 spin_lock(&files->file_lock);
1085 fdt = files_fdtable(files);
1090 spin_unlock(&files->file_lock);
1095 struct files_struct *files = current->files;
1099 fdt = files_fdtable(files);
1105 static int do_dup2(struct files_struct *files,
1107 __releases(&files->file_lock)
1118 * fget() treats larval files as absent. Potentially interesting,
1126 fdt = files_fdtable(files);
1137 spin_unlock(&files->file_lock);
1140 filp_close(tofree, files);
1145 spin_unlock(&files->file_lock);
1152 struct files_struct *files = current->files;
1160 spin_lock(&files->file_lock);
1161 err = expand_files(files, fd);
1164 return do_dup2(files, file, fd, flags);
1167 spin_unlock(&files->file_lock);
1236 struct files_struct *files = current->files;
1247 spin_lock(&files->file_lock);
1248 err = expand_files(files, newfd);
1249 file = files_lookup_fd_locked(files, oldfd);
1257 return do_dup2(files, file, newfd, flags);
1262 spin_unlock(&files->file_lock);
1274 struct files_struct *files = current->files;
1278 if (!files_lookup_fd_rcu(files, oldfd))
1315 int iterate_fd(struct files_struct *files, unsigned n,
1321 if (!files)
1323 spin_lock(&files->file_lock);
1324 for (fdt = files_fdtable(files); n < fdt->max_fds; n++) {
1326 file = rcu_dereference_check_fdtable(files, fdt->fd[n]);
1333 spin_unlock(&files->file_lock);