Lines Matching refs:fl

73 #define IS_POSIX(fl)	(fl->fl_flags & FL_POSIX)
74 #define IS_FLOCK(fl) (fl->fl_flags & FL_FLOCK)
75 #define IS_LEASE(fl) (fl->fl_flags & (FL_LEASE|FL_DELEG|FL_LAYOUT))
76 #define IS_OFDLCK(fl) (fl->fl_flags & FL_OFDLCK)
77 #define IS_REMOTELCK(fl) (fl->fl_pid <= 0)
79 static bool lease_breaking(struct file_lock *fl)
81 return fl->fl_flags & (FL_UNLOCK_PENDING | FL_DOWNGRADE_PENDING);
84 static int target_leasetype(struct file_lock *fl)
86 if (fl->fl_flags & FL_UNLOCK_PENDING)
88 if (fl->fl_flags & FL_DOWNGRADE_PENDING)
90 return fl->fl_type;
159 * In addition, it also protects the fl->fl_blocked_requests list, and the
160 * fl->fl_blocker pointer for file_lock structures that are acting as lock
208 struct file_lock *fl;
210 list_for_each_entry(fl, list, fl_list) {
211 pr_warn("%s: fl_owner=%p fl_flags=0x%x fl_type=0x%x fl_pid=%u\n", list_type, fl->fl_owner, fl->fl_flags, fl->fl_type, fl->fl_pid);
236 struct file_lock *fl;
239 list_for_each_entry(fl, list, fl_list)
240 if (fl->fl_file == filp)
245 fl->fl_owner, fl->fl_flags, fl->fl_type, fl->fl_pid);
259 static void locks_init_lock_heads(struct file_lock *fl)
261 INIT_HLIST_NODE(&fl->fl_link);
262 INIT_LIST_HEAD(&fl->fl_list);
263 INIT_LIST_HEAD(&fl->fl_blocked_requests);
264 INIT_LIST_HEAD(&fl->fl_blocked_member);
265 init_waitqueue_head(&fl->fl_wait);
271 struct file_lock *fl = kmem_cache_zalloc(filelock_cache, GFP_KERNEL);
273 if (fl)
274 locks_init_lock_heads(fl);
276 return fl;
280 void locks_release_private(struct file_lock *fl)
282 BUG_ON(waitqueue_active(&fl->fl_wait));
283 BUG_ON(!list_empty(&fl->fl_list));
284 BUG_ON(!list_empty(&fl->fl_blocked_requests));
285 BUG_ON(!list_empty(&fl->fl_blocked_member));
286 BUG_ON(!hlist_unhashed(&fl->fl_link));
288 if (fl->fl_ops) {
289 if (fl->fl_ops->fl_release_private)
290 fl->fl_ops->fl_release_private(fl);
291 fl->fl_ops = NULL;
294 if (fl->fl_lmops) {
295 if (fl->fl_lmops->lm_put_owner) {
296 fl->fl_lmops->lm_put_owner(fl->fl_owner);
297 fl->fl_owner = NULL;
299 fl->fl_lmops = NULL;
316 struct file_lock *fl;
319 list_for_each_entry(fl, &flctx->flc_posix, fl_list) {
320 if (fl->fl_owner != owner)
322 if (!list_empty(&fl->fl_blocked_requests)) {
333 void locks_free_lock(struct file_lock *fl)
335 locks_release_private(fl);
336 kmem_cache_free(filelock_cache, fl);
343 struct file_lock *fl;
346 fl = list_first_entry(dispose, struct file_lock, fl_list);
347 list_del_init(&fl->fl_list);
348 locks_free_lock(fl);
352 void locks_init_lock(struct file_lock *fl)
354 memset(fl, 0, sizeof(struct file_lock));
355 locks_init_lock_heads(fl);
362 void locks_copy_conflock(struct file_lock *new, struct file_lock *fl)
364 new->fl_owner = fl->fl_owner;
365 new->fl_pid = fl->fl_pid;
367 new->fl_flags = fl->fl_flags;
368 new->fl_type = fl->fl_type;
369 new->fl_start = fl->fl_start;
370 new->fl_end = fl->fl_end;
371 new->fl_lmops = fl->fl_lmops;
374 if (fl->fl_lmops) {
375 if (fl->fl_lmops->lm_get_owner)
376 fl->fl_lmops->lm_get_owner(fl->fl_owner);
381 void locks_copy_lock(struct file_lock *new, struct file_lock *fl)
386 locks_copy_conflock(new, fl);
388 new->fl_file = fl->fl_file;
389 new->fl_ops = fl->fl_ops;
391 if (fl->fl_ops) {
392 if (fl->fl_ops->fl_copy_lock)
393 fl->fl_ops->fl_copy_lock(new, fl);
398 static void locks_move_blocks(struct file_lock *new, struct file_lock *fl)
407 if (list_empty(&fl->fl_blocked_requests))
410 list_splice_init(&fl->fl_blocked_requests, &new->fl_blocked_requests);
429 static void flock_make_lock(struct file *filp, struct file_lock *fl, int type)
431 locks_init_lock(fl);
433 fl->fl_file = filp;
434 fl->fl_owner = filp;
435 fl->fl_pid = current->tgid;
436 fl->fl_flags = FL_FLOCK;
437 fl->fl_type = type;
438 fl->fl_end = OFFSET_MAX;
441 static int assign_type(struct file_lock *fl, int type)
447 fl->fl_type = type;
455 static int flock64_to_posix_lock(struct file *filp, struct file_lock *fl,
460 fl->fl_start = 0;
463 fl->fl_start = filp->f_pos;
466 fl->fl_start = i_size_read(file_inode(filp));
471 if (l->l_start > OFFSET_MAX - fl->fl_start)
473 fl->fl_start += l->l_start;
474 if (fl->fl_start < 0)
480 if (l->l_len - 1 > OFFSET_MAX - fl->fl_start)
482 fl->fl_end = fl->fl_start + (l->l_len - 1);
485 if (fl->fl_start + l->l_len < 0)
487 fl->fl_end = fl->fl_start - 1;
488 fl->fl_start += l->l_len;
490 fl->fl_end = OFFSET_MAX;
492 fl->fl_owner = current->files;
493 fl->fl_pid = current->tgid;
494 fl->fl_file = filp;
495 fl->fl_flags = FL_POSIX;
496 fl->fl_ops = NULL;
497 fl->fl_lmops = NULL;
499 return assign_type(fl, l->l_type);
505 static int flock_to_posix_lock(struct file *filp, struct file_lock *fl,
515 return flock64_to_posix_lock(filp, fl, &ll);
520 lease_break_callback(struct file_lock *fl)
522 kill_fasync(&fl->fl_fasync, SIGIO, POLL_MSG);
527 lease_setup(struct file_lock *fl, void **priv)
529 struct file *filp = fl->fl_file;
537 if (!fasync_insert_entry(fa->fa_fd, filp, &fl->fl_fasync, fa))
552 static int lease_init(struct file *filp, int type, struct file_lock *fl)
554 if (assign_type(fl, type) != 0)
557 fl->fl_owner = filp;
558 fl->fl_pid = current->tgid;
560 fl->fl_file = filp;
561 fl->fl_flags = FL_LEASE;
562 fl->fl_start = 0;
563 fl->fl_end = OFFSET_MAX;
564 fl->fl_ops = NULL;
565 fl->fl_lmops = &lease_manager_ops;
572 struct file_lock *fl = locks_alloc_lock();
575 if (fl == NULL)
578 error = lease_init(filp, type, fl);
580 locks_free_lock(fl);
583 return fl;
603 static void locks_insert_global_locks(struct file_lock *fl)
610 fl->fl_link_cpu = smp_processor_id();
611 hlist_add_head(&fl->fl_link, &fll->hlist);
616 static void locks_delete_global_locks(struct file_lock *fl)
627 if (hlist_unhashed(&fl->fl_link))
630 fll = per_cpu_ptr(&file_lock_list, fl->fl_link_cpu);
632 hlist_del_init(&fl->fl_link);
637 posix_owner_key(struct file_lock *fl)
639 return (unsigned long)fl->fl_owner;
760 struct file_lock *fl;
764 list_for_each_entry(fl, &blocker->fl_blocked_requests, fl_blocked_member)
765 if (conflict(fl, waiter)) {
766 blocker = fl;
815 locks_insert_lock_ctx(struct file_lock *fl, struct list_head *before)
817 list_add_tail(&fl->fl_list, before);
818 locks_insert_global_locks(fl);
822 locks_unlink_lock_ctx(struct file_lock *fl)
824 locks_delete_global_locks(fl);
825 list_del_init(&fl->fl_list);
826 locks_wake_up_blocks(fl);
830 locks_delete_lock_ctx(struct file_lock *fl, struct list_head *dispose)
832 locks_unlink_lock_ctx(fl);
834 list_add(&fl->fl_list, dispose);
836 locks_free_lock(fl);
902 posix_test_lock(struct file *filp, struct file_lock *fl)
912 fl->fl_type = F_UNLCK;
919 if (!posix_test_locks_conflict(fl, cfl))
931 locks_copy_conflock(fl, cfl);
934 fl->fl_type = F_UNLCK;
979 struct file_lock *fl;
981 hash_for_each_possible(blocked_hash, fl, fl_link, posix_owner_key(block_fl)) {
982 if (posix_same_owner(fl, block_fl)) {
983 while (fl->fl_blocker)
984 fl = fl->fl_blocker;
985 return fl;
1025 struct file_lock *fl;
1049 list_for_each_entry(fl, &ctx->flc_flock, fl_list) {
1050 if (request->fl_file != fl->fl_file)
1052 if (request->fl_type == fl->fl_type)
1055 locks_delete_lock_ctx(fl, &dispose);
1066 list_for_each_entry(fl, &ctx->flc_flock, fl_list) {
1067 if (!flock_locks_conflict(request, fl))
1073 locks_insert_block(fl, request, flock_locks_conflict);
1097 struct file_lock *fl, *tmp;
1135 list_for_each_entry(fl, &ctx->flc_posix, fl_list) {
1136 if (!posix_locks_conflict(request, fl))
1138 if (fl->fl_lmops && fl->fl_lmops->lm_lock_expirable
1139 && (*fl->fl_lmops->lm_lock_expirable)(fl)) {
1140 owner = fl->fl_lmops->lm_mod_owner;
1141 func = fl->fl_lmops->lm_expire_lock;
1150 locks_copy_conflock(conflock, fl);
1165 if (likely(!posix_locks_deadlock(request, fl))) {
1167 __locks_insert_block(fl, request,
1181 list_for_each_entry(fl, &ctx->flc_posix, fl_list) {
1182 if (posix_same_owner(request, fl))
1187 list_for_each_entry_safe_from(fl, tmp, &ctx->flc_posix, fl_list) {
1188 if (!posix_same_owner(request, fl))
1192 if (request->fl_type == fl->fl_type) {
1197 if (fl->fl_end < request->fl_start - 1)
1202 if (fl->fl_start - 1 > request->fl_end)
1210 if (fl->fl_start > request->fl_start)
1211 fl->fl_start = request->fl_start;
1213 request->fl_start = fl->fl_start;
1214 if (fl->fl_end < request->fl_end)
1215 fl->fl_end = request->fl_end;
1217 request->fl_end = fl->fl_end;
1219 locks_delete_lock_ctx(fl, &dispose);
1222 request = fl;
1228 if (fl->fl_end < request->fl_start)
1230 if (fl->fl_start > request->fl_end)
1234 if (fl->fl_start < request->fl_start)
1235 left = fl;
1239 if (fl->fl_end > request->fl_end) {
1240 right = fl;
1243 if (fl->fl_start >= request->fl_start) {
1248 locks_delete_lock_ctx(fl, &dispose);
1265 locks_insert_lock_ctx(request, &fl->fl_list);
1266 locks_delete_lock_ctx(fl, &dispose);
1295 locks_insert_lock_ctx(new_fl, &fl->fl_list);
1296 fl = new_fl;
1307 locks_insert_lock_ctx(left, &fl->fl_list);
1335 * @fl: The lock to be applied
1346 int posix_lock_file(struct file *filp, struct file_lock *fl,
1349 return posix_lock_inode(file_inode(filp), fl, conflock);
1356 * @fl: The lock to be applied
1360 static int posix_lock_inode_wait(struct inode *inode, struct file_lock *fl)
1365 error = posix_lock_inode(inode, fl, NULL);
1368 error = wait_event_interruptible(fl->fl_wait,
1369 list_empty(&fl->fl_blocked_member));
1373 locks_delete_block(fl);
1377 static void lease_clear_pending(struct file_lock *fl, int arg)
1381 fl->fl_flags &= ~FL_UNLOCK_PENDING;
1384 fl->fl_flags &= ~FL_DOWNGRADE_PENDING;
1389 int lease_modify(struct file_lock *fl, int arg, struct list_head *dispose)
1391 int error = assign_type(fl, arg);
1395 lease_clear_pending(fl, arg);
1396 locks_wake_up_blocks(fl);
1398 struct file *filp = fl->fl_file;
1402 fasync_helper(0, fl->fl_file, 0, &fl->fl_fasync);
1403 if (fl->fl_fasync != NULL) {
1404 printk(KERN_ERR "locks_delete_lock: fasync == %p\n", fl->fl_fasync);
1405 fl->fl_fasync = NULL;
1407 locks_delete_lock_ctx(fl, dispose);
1424 struct file_lock *fl, *tmp;
1428 list_for_each_entry_safe(fl, tmp, &ctx->flc_lease, fl_list) {
1429 trace_time_out_leases(inode, fl);
1430 if (past_time(fl->fl_downgrade_time))
1431 lease_modify(fl, F_RDLCK, dispose);
1432 if (past_time(fl->fl_break_time))
1433 lease_modify(fl, F_UNLCK, dispose);
1463 struct file_lock *fl;
1467 list_for_each_entry(fl, &ctx->flc_lease, fl_list) {
1468 if (leases_conflict(fl, breaker))
1491 struct file_lock *new_fl, *fl, *tmp;
1523 list_for_each_entry_safe(fl, tmp, &ctx->flc_lease, fl_list) {
1524 if (!leases_conflict(fl, new_fl))
1527 if (fl->fl_flags & FL_UNLOCK_PENDING)
1529 fl->fl_flags |= FL_UNLOCK_PENDING;
1530 fl->fl_break_time = break_time;
1532 if (lease_breaking(fl))
1534 fl->fl_flags |= FL_DOWNGRADE_PENDING;
1535 fl->fl_downgrade_time = break_time;
1537 if (fl->fl_lmops->lm_break(fl))
1538 locks_delete_lock_ctx(fl, &dispose);
1551 fl = list_first_entry(&ctx->flc_lease, struct file_lock, fl_list);
1552 break_time = fl->fl_break_time;
1557 locks_insert_block(fl, new_fl, leases_conflict);
1605 struct file_lock *fl;
1610 fl = list_first_entry_or_null(&ctx->flc_lease,
1612 if (fl && (fl->fl_type == F_WRLCK))
1647 struct file_lock *fl;
1658 list_for_each_entry(fl, &ctx->flc_lease, fl_list) {
1659 if (fl->fl_file != filp)
1661 type = target_leasetype(fl);
1721 struct file_lock *fl, *my_fl = NULL, *lease;
1763 list_for_each_entry(fl, &ctx->flc_lease, fl_list) {
1764 if (fl->fl_file == filp &&
1765 fl->fl_owner == lease->fl_owner) {
1766 my_fl = fl;
1780 if (fl->fl_flags & FL_UNLOCK_PENDING)
1830 struct file_lock *fl, *victim = NULL;
1843 list_for_each_entry(fl, &ctx->flc_lease, fl_list) {
1844 if (fl->fl_file == filp &&
1845 fl->fl_owner == owner) {
1846 victim = fl;
1852 error = fl->fl_lmops->lm_change(victim, F_UNLCK, &dispose);
1966 struct file_lock *fl;
1970 fl = lease_alloc(filp, arg);
1971 if (IS_ERR(fl))
1972 return PTR_ERR(fl);
1976 locks_free_lock(fl);
1981 error = vfs_setlease(filp, arg, &fl, (void **)&new);
1982 if (fl)
1983 locks_free_lock(fl);
2009 * @fl: The lock to be applied
2013 static int flock_lock_inode_wait(struct inode *inode, struct file_lock *fl)
2018 error = flock_lock_inode(inode, fl);
2021 error = wait_event_interruptible(fl->fl_wait,
2022 list_empty(&fl->fl_blocked_member));
2026 locks_delete_block(fl);
2033 * @fl: The lock to be applied
2037 int locks_lock_inode_wait(struct inode *inode, struct file_lock *fl)
2040 switch (fl->fl_flags & (FL_POSIX|FL_FLOCK)) {
2042 res = posix_lock_inode_wait(inode, fl);
2045 res = flock_lock_inode_wait(inode, fl);
2072 struct file_lock fl;
2100 flock_make_lock(f.file, &fl, type);
2102 error = security_file_lock(f.file, fl.fl_type);
2108 fl.fl_flags |= FL_SLEEP;
2113 &fl);
2115 error = locks_lock_file_wait(f.file, &fl);
2117 locks_release_private(&fl);
2127 * @fl: The lock to test; also used to hold result
2132 int vfs_test_lock(struct file *filp, struct file_lock *fl)
2134 WARN_ON_ONCE(filp != fl->fl_file);
2136 return filp->f_op->lock(filp, F_GETLK, fl);
2137 posix_test_lock(filp, fl);
2144 * @fl: The file_lock who's fl_pid should be translated
2149 static pid_t locks_translate_pid(struct file_lock *fl, struct pid_namespace *ns)
2154 if (IS_OFDLCK(fl))
2156 if (IS_REMOTELCK(fl))
2157 return fl->fl_pid;
2164 return (pid_t)fl->fl_pid;
2167 pid = find_pid_ns(fl->fl_pid, &init_pid_ns);
2173 static int posix_lock_to_flock(struct flock *flock, struct file_lock *fl)
2175 flock->l_pid = locks_translate_pid(fl, task_active_pid_ns(current));
2181 if (fl->fl_start > OFFT_OFFSET_MAX)
2183 if (fl->fl_end != OFFSET_MAX && fl->fl_end > OFFT_OFFSET_MAX)
2186 flock->l_start = fl->fl_start;
2187 flock->l_len = fl->fl_end == OFFSET_MAX ? 0 :
2188 fl->fl_end - fl->fl_start + 1;
2190 flock->l_type = fl->fl_type;
2195 static void posix_lock_to_flock64(struct flock64 *flock, struct file_lock *fl)
2197 flock->l_pid = locks_translate_pid(fl, task_active_pid_ns(current));
2198 flock->l_start = fl->fl_start;
2199 flock->l_len = fl->fl_end == OFFSET_MAX ? 0 :
2200 fl->fl_end - fl->fl_start + 1;
2202 flock->l_type = fl->fl_type;
2211 struct file_lock *fl;
2214 fl = locks_alloc_lock();
2215 if (fl == NULL)
2222 error = flock_to_posix_lock(filp, fl, flock);
2231 fl->fl_flags |= FL_OFDLCK;
2232 fl->fl_owner = filp;
2235 error = vfs_test_lock(filp, fl);
2239 flock->l_type = fl->fl_type;
2240 if (fl->fl_type != F_UNLCK) {
2241 error = posix_lock_to_flock(flock, fl);
2246 locks_free_lock(fl);
2254 * @fl: The lock to be applied
2283 int vfs_lock_file(struct file *filp, unsigned int cmd, struct file_lock *fl, struct file_lock *conf)
2285 WARN_ON_ONCE(filp != fl->fl_file);
2287 return filp->f_op->lock(filp, cmd, fl);
2289 return posix_lock_file(filp, fl, conf);
2294 struct file_lock *fl)
2298 error = security_file_lock(filp, fl->fl_type);
2303 error = vfs_lock_file(filp, cmd, fl, NULL);
2306 error = wait_event_interruptible(fl->fl_wait,
2307 list_empty(&fl->fl_blocked_member));
2311 locks_delete_block(fl);
2316 /* Ensure that fl->fl_file has compatible f_mode for F_SETLK calls */
2318 check_fmode_for_setlk(struct file_lock *fl)
2320 switch (fl->fl_type) {
2322 if (!(fl->fl_file->f_mode & FMODE_READ))
2326 if (!(fl->fl_file->f_mode & FMODE_WRITE))
2418 struct file_lock *fl;
2421 fl = locks_alloc_lock();
2422 if (fl == NULL)
2430 error = flock64_to_posix_lock(filp, fl, flock);
2439 fl->fl_flags |= FL_OFDLCK;
2440 fl->fl_owner = filp;
2443 error = vfs_test_lock(filp, fl);
2447 flock->l_type = fl->fl_type;
2448 if (fl->fl_type != F_UNLCK)
2449 posix_lock_to_flock64(flock, fl);
2452 locks_free_lock(fl);
2579 struct file_lock fl;
2585 flock_make_lock(filp, &fl, F_UNLCK);
2586 fl.fl_flags |= FL_CLOSE;
2589 filp->f_op->flock(filp, F_SETLKW, &fl);
2591 flock_lock_inode(inode, &fl);
2593 if (fl.fl_ops && fl.fl_ops->fl_release_private)
2594 fl.fl_ops->fl_release_private(&fl);
2601 struct file_lock *fl, *tmp;
2609 list_for_each_entry_safe(fl, tmp, &ctx->flc_lease, fl_list)
2610 if (filp == fl->fl_file)
2611 lease_modify(fl, F_UNLCK, &dispose);
2648 * @fl: The lock to be unblocked
2652 int vfs_cancel_lock(struct file *filp, struct file_lock *fl)
2654 WARN_ON_ONCE(filp != fl->fl_file);
2656 return filp->f_op->lock(filp, F_CANCELLK, fl);
2693 static void lock_get_status(struct seq_file *f, struct file_lock *fl,
2701 fl_pid = locks_translate_pid(fl, proc_pidns);
2708 if (fl->fl_file != NULL)
2709 inode = file_inode(fl->fl_file);
2716 if (IS_POSIX(fl)) {
2717 if (fl->fl_flags & FL_ACCESS)
2719 else if (IS_OFDLCK(fl))
2726 } else if (IS_FLOCK(fl)) {
2728 } else if (IS_LEASE(fl)) {
2729 if (fl->fl_flags & FL_DELEG)
2734 if (lease_breaking(fl))
2736 else if (fl->fl_file)
2743 type = IS_LEASE(fl) ? target_leasetype(fl) : fl->fl_type;
2755 if (IS_POSIX(fl)) {
2756 if (fl->fl_end == OFFSET_MAX)
2757 seq_printf(f, "%Ld EOF\n", fl->fl_start);
2759 seq_printf(f, "%Ld %Ld\n", fl->fl_start, fl->fl_end);
2831 struct file_lock *fl;
2833 list_for_each_entry(fl, head, fl_list) {
2835 if (filp != fl->fl_file)
2837 if (fl->fl_owner != files &&
2838 fl->fl_owner != filp)
2843 lock_get_status(f, fl, *id, "", 0);