Lines Matching refs:fl

177 #define IS_POSIX(fl)	(fl->fl_flags & FL_POSIX)
178 #define IS_FLOCK(fl) (fl->fl_flags & FL_FLOCK)
179 #define IS_LEASE(fl) (fl->fl_flags & (FL_LEASE|FL_DELEG|FL_LAYOUT))
180 #define IS_OFDLCK(fl) (fl->fl_flags & FL_OFDLCK)
181 #define IS_REMOTELCK(fl) (fl->fl_pid <= 0)
183 static bool lease_breaking(struct file_lock *fl)
185 return fl->fl_flags & (FL_UNLOCK_PENDING | FL_DOWNGRADE_PENDING);
188 static int target_leasetype(struct file_lock *fl)
190 if (fl->fl_flags & FL_UNLOCK_PENDING)
192 if (fl->fl_flags & FL_DOWNGRADE_PENDING)
194 return fl->fl_type;
234 * In addition, it also protects the fl->fl_blocked_requests list, and the
235 * fl->fl_blocker pointer for file_lock structures that are acting as lock
283 struct file_lock *fl;
285 list_for_each_entry(fl, list, fl_list) {
286 pr_warn("%s: fl_owner=%p fl_flags=0x%x fl_type=0x%x fl_pid=%u\n", list_type, fl->fl_owner, fl->fl_flags, fl->fl_type, fl->fl_pid);
311 struct file_lock *fl;
314 list_for_each_entry(fl, list, fl_list)
315 if (fl->fl_file == filp)
320 fl->fl_owner, fl->fl_flags, fl->fl_type, fl->fl_pid);
334 static void locks_init_lock_heads(struct file_lock *fl)
336 INIT_HLIST_NODE(&fl->fl_link);
337 INIT_LIST_HEAD(&fl->fl_list);
338 INIT_LIST_HEAD(&fl->fl_blocked_requests);
339 INIT_LIST_HEAD(&fl->fl_blocked_member);
340 init_waitqueue_head(&fl->fl_wait);
346 struct file_lock *fl = kmem_cache_zalloc(filelock_cache, GFP_KERNEL);
348 if (fl)
349 locks_init_lock_heads(fl);
351 return fl;
355 void locks_release_private(struct file_lock *fl)
357 BUG_ON(waitqueue_active(&fl->fl_wait));
358 BUG_ON(!list_empty(&fl->fl_list));
359 BUG_ON(!list_empty(&fl->fl_blocked_requests));
360 BUG_ON(!list_empty(&fl->fl_blocked_member));
361 BUG_ON(!hlist_unhashed(&fl->fl_link));
363 if (fl->fl_ops) {
364 if (fl->fl_ops->fl_release_private)
365 fl->fl_ops->fl_release_private(fl);
366 fl->fl_ops = NULL;
369 if (fl->fl_lmops) {
370 if (fl->fl_lmops->lm_put_owner) {
371 fl->fl_lmops->lm_put_owner(fl->fl_owner);
372 fl->fl_owner = NULL;
374 fl->fl_lmops = NULL;
380 void locks_free_lock(struct file_lock *fl)
382 locks_release_private(fl);
383 kmem_cache_free(filelock_cache, fl);
390 struct file_lock *fl;
393 fl = list_first_entry(dispose, struct file_lock, fl_list);
394 list_del_init(&fl->fl_list);
395 locks_free_lock(fl);
399 void locks_init_lock(struct file_lock *fl)
401 memset(fl, 0, sizeof(struct file_lock));
402 locks_init_lock_heads(fl);
409 void locks_copy_conflock(struct file_lock *new, struct file_lock *fl)
411 new->fl_owner = fl->fl_owner;
412 new->fl_pid = fl->fl_pid;
414 new->fl_flags = fl->fl_flags;
415 new->fl_type = fl->fl_type;
416 new->fl_start = fl->fl_start;
417 new->fl_end = fl->fl_end;
418 new->fl_lmops = fl->fl_lmops;
421 if (fl->fl_lmops) {
422 if (fl->fl_lmops->lm_get_owner)
423 fl->fl_lmops->lm_get_owner(fl->fl_owner);
428 void locks_copy_lock(struct file_lock *new, struct file_lock *fl)
433 locks_copy_conflock(new, fl);
435 new->fl_file = fl->fl_file;
436 new->fl_ops = fl->fl_ops;
438 if (fl->fl_ops) {
439 if (fl->fl_ops->fl_copy_lock)
440 fl->fl_ops->fl_copy_lock(new, fl);
445 static void locks_move_blocks(struct file_lock *new, struct file_lock *fl)
454 if (list_empty(&fl->fl_blocked_requests))
457 list_splice_init(&fl->fl_blocked_requests, &new->fl_blocked_requests);
479 flock_make_lock(struct file *filp, unsigned int cmd, struct file_lock *fl)
486 if (fl == NULL) {
487 fl = locks_alloc_lock();
488 if (fl == NULL)
491 locks_init_lock(fl);
494 fl->fl_file = filp;
495 fl->fl_owner = filp;
496 fl->fl_pid = current->tgid;
497 fl->fl_flags = FL_FLOCK;
498 fl->fl_type = type;
499 fl->fl_end = OFFSET_MAX;
501 return fl;
504 static int assign_type(struct file_lock *fl, long type)
510 fl->fl_type = type;
518 static int flock64_to_posix_lock(struct file *filp, struct file_lock *fl,
523 fl->fl_start = 0;
526 fl->fl_start = filp->f_pos;
529 fl->fl_start = i_size_read(file_inode(filp));
534 if (l->l_start > OFFSET_MAX - fl->fl_start)
536 fl->fl_start += l->l_start;
537 if (fl->fl_start < 0)
543 if (l->l_len - 1 > OFFSET_MAX - fl->fl_start)
545 fl->fl_end = fl->fl_start + (l->l_len - 1);
548 if (fl->fl_start + l->l_len < 0)
550 fl->fl_end = fl->fl_start - 1;
551 fl->fl_start += l->l_len;
553 fl->fl_end = OFFSET_MAX;
555 fl->fl_owner = current->files;
556 fl->fl_pid = current->tgid;
557 fl->fl_file = filp;
558 fl->fl_flags = FL_POSIX;
559 fl->fl_ops = NULL;
560 fl->fl_lmops = NULL;
562 return assign_type(fl, l->l_type);
568 static int flock_to_posix_lock(struct file *filp, struct file_lock *fl,
578 return flock64_to_posix_lock(filp, fl, &ll);
583 lease_break_callback(struct file_lock *fl)
585 kill_fasync(&fl->fl_fasync, SIGIO, POLL_MSG);
590 lease_setup(struct file_lock *fl, void **priv)
592 struct file *filp = fl->fl_file;
600 if (!fasync_insert_entry(fa->fa_fd, filp, &fl->fl_fasync, fa))
615 static int lease_init(struct file *filp, long type, struct file_lock *fl)
617 if (assign_type(fl, type) != 0)
620 fl->fl_owner = filp;
621 fl->fl_pid = current->tgid;
623 fl->fl_file = filp;
624 fl->fl_flags = FL_LEASE;
625 fl->fl_start = 0;
626 fl->fl_end = OFFSET_MAX;
627 fl->fl_ops = NULL;
628 fl->fl_lmops = &lease_manager_ops;
635 struct file_lock *fl = locks_alloc_lock();
638 if (fl == NULL)
641 error = lease_init(filp, type, fl);
643 locks_free_lock(fl);
646 return fl;
666 static void locks_insert_global_locks(struct file_lock *fl)
673 fl->fl_link_cpu = smp_processor_id();
674 hlist_add_head(&fl->fl_link, &fll->hlist);
679 static void locks_delete_global_locks(struct file_lock *fl)
690 if (hlist_unhashed(&fl->fl_link))
693 fll = per_cpu_ptr(&file_lock_list, fl->fl_link_cpu);
695 hlist_del_init(&fl->fl_link);
700 posix_owner_key(struct file_lock *fl)
702 return (unsigned long)fl->fl_owner;
823 struct file_lock *fl;
827 list_for_each_entry(fl, &blocker->fl_blocked_requests, fl_blocked_member)
828 if (conflict(fl, waiter)) {
829 blocker = fl;
878 locks_insert_lock_ctx(struct file_lock *fl, struct list_head *before)
880 list_add_tail(&fl->fl_list, before);
881 locks_insert_global_locks(fl);
885 locks_unlink_lock_ctx(struct file_lock *fl)
887 locks_delete_global_locks(fl);
888 list_del_init(&fl->fl_list);
889 locks_wake_up_blocks(fl);
893 locks_delete_lock_ctx(struct file_lock *fl, struct list_head *dispose)
895 locks_unlink_lock_ctx(fl);
897 list_add(&fl->fl_list, dispose);
899 locks_free_lock(fl);
952 posix_test_lock(struct file *filp, struct file_lock *fl)
960 fl->fl_type = F_UNLCK;
966 if (posix_locks_conflict(fl, cfl)) {
967 locks_copy_conflock(fl, cfl);
971 fl->fl_type = F_UNLCK;
1016 struct file_lock *fl;
1018 hash_for_each_possible(blocked_hash, fl, fl_link, posix_owner_key(block_fl)) {
1019 if (posix_same_owner(fl, block_fl)) {
1020 while (fl->fl_blocker)
1021 fl = fl->fl_blocker;
1022 return fl;
1062 struct file_lock *fl;
1086 list_for_each_entry(fl, &ctx->flc_flock, fl_list) {
1087 if (request->fl_file != fl->fl_file)
1089 if (request->fl_type == fl->fl_type)
1092 locks_delete_lock_ctx(fl, &dispose);
1103 list_for_each_entry(fl, &ctx->flc_flock, fl_list) {
1104 if (!flock_locks_conflict(request, fl))
1110 locks_insert_block(fl, request, flock_locks_conflict);
1134 struct file_lock *fl, *tmp;
1169 list_for_each_entry(fl, &ctx->flc_posix, fl_list) {
1170 if (!posix_locks_conflict(request, fl))
1173 locks_copy_conflock(conflock, fl);
1188 if (likely(!posix_locks_deadlock(request, fl))) {
1190 __locks_insert_block(fl, request,
1204 list_for_each_entry(fl, &ctx->flc_posix, fl_list) {
1205 if (posix_same_owner(request, fl))
1210 list_for_each_entry_safe_from(fl, tmp, &ctx->flc_posix, fl_list) {
1211 if (!posix_same_owner(request, fl))
1215 if (request->fl_type == fl->fl_type) {
1220 if (fl->fl_end < request->fl_start - 1)
1225 if (fl->fl_start - 1 > request->fl_end)
1233 if (fl->fl_start > request->fl_start)
1234 fl->fl_start = request->fl_start;
1236 request->fl_start = fl->fl_start;
1237 if (fl->fl_end < request->fl_end)
1238 fl->fl_end = request->fl_end;
1240 request->fl_end = fl->fl_end;
1242 locks_delete_lock_ctx(fl, &dispose);
1245 request = fl;
1251 if (fl->fl_end < request->fl_start)
1253 if (fl->fl_start > request->fl_end)
1257 if (fl->fl_start < request->fl_start)
1258 left = fl;
1262 if (fl->fl_end > request->fl_end) {
1263 right = fl;
1266 if (fl->fl_start >= request->fl_start) {
1271 locks_delete_lock_ctx(fl, &dispose);
1288 locks_insert_lock_ctx(request, &fl->fl_list);
1289 locks_delete_lock_ctx(fl, &dispose);
1318 locks_insert_lock_ctx(new_fl, &fl->fl_list);
1319 fl = new_fl;
1330 locks_insert_lock_ctx(left, &fl->fl_list);
1358 * @fl: The lock to be applied
1369 int posix_lock_file(struct file *filp, struct file_lock *fl,
1372 return posix_lock_inode(locks_inode(filp), fl, conflock);
1379 * @fl: The lock to be applied
1383 static int posix_lock_inode_wait(struct inode *inode, struct file_lock *fl)
1388 error = posix_lock_inode(inode, fl, NULL);
1391 error = wait_event_interruptible(fl->fl_wait,
1392 list_empty(&fl->fl_blocked_member));
1396 locks_delete_block(fl);
1413 struct file_lock *fl;
1424 list_for_each_entry(fl, &ctx->flc_posix, fl_list) {
1425 if (fl->fl_owner != current->files &&
1426 fl->fl_owner != file) {
1448 struct file_lock fl;
1452 locks_init_lock(&fl);
1453 fl.fl_pid = current->tgid;
1454 fl.fl_file = filp;
1455 fl.fl_flags = FL_POSIX | FL_ACCESS;
1458 fl.fl_type = type;
1459 fl.fl_start = start;
1460 fl.fl_end = end;
1464 fl.fl_owner = filp;
1465 fl.fl_flags &= ~FL_SLEEP;
1466 error = posix_lock_inode(inode, &fl, NULL);
1472 fl.fl_flags |= FL_SLEEP;
1473 fl.fl_owner = current->files;
1474 error = posix_lock_inode(inode, &fl, NULL);
1477 error = wait_event_interruptible(fl.fl_wait,
1478 list_empty(&fl.fl_blocked_member));
1490 locks_delete_block(&fl);
1497 static void lease_clear_pending(struct file_lock *fl, int arg)
1501 fl->fl_flags &= ~FL_UNLOCK_PENDING;
1504 fl->fl_flags &= ~FL_DOWNGRADE_PENDING;
1509 int lease_modify(struct file_lock *fl, int arg, struct list_head *dispose)
1511 int error = assign_type(fl, arg);
1515 lease_clear_pending(fl, arg);
1516 locks_wake_up_blocks(fl);
1518 struct file *filp = fl->fl_file;
1522 fasync_helper(0, fl->fl_file, 0, &fl->fl_fasync);
1523 if (fl->fl_fasync != NULL) {
1524 printk(KERN_ERR "locks_delete_lock: fasync == %p\n", fl->fl_fasync);
1525 fl->fl_fasync = NULL;
1527 locks_delete_lock_ctx(fl, dispose);
1544 struct file_lock *fl, *tmp;
1548 list_for_each_entry_safe(fl, tmp, &ctx->flc_lease, fl_list) {
1549 trace_time_out_leases(inode, fl);
1550 if (past_time(fl->fl_downgrade_time))
1551 lease_modify(fl, F_RDLCK, dispose);
1552 if (past_time(fl->fl_break_time))
1553 lease_modify(fl, F_UNLCK, dispose);
1583 struct file_lock *fl;
1587 list_for_each_entry(fl, &ctx->flc_lease, fl_list) {
1588 if (leases_conflict(fl, breaker))
1611 struct file_lock *new_fl, *fl, *tmp;
1643 list_for_each_entry_safe(fl, tmp, &ctx->flc_lease, fl_list) {
1644 if (!leases_conflict(fl, new_fl))
1647 if (fl->fl_flags & FL_UNLOCK_PENDING)
1649 fl->fl_flags |= FL_UNLOCK_PENDING;
1650 fl->fl_break_time = break_time;
1652 if (lease_breaking(fl))
1654 fl->fl_flags |= FL_DOWNGRADE_PENDING;
1655 fl->fl_downgrade_time = break_time;
1657 if (fl->fl_lmops->lm_break(fl))
1658 locks_delete_lock_ctx(fl, &dispose);
1671 fl = list_first_entry(&ctx->flc_lease, struct file_lock, fl_list);
1672 break_time = fl->fl_break_time;
1677 locks_insert_block(fl, new_fl, leases_conflict);
1725 struct file_lock *fl;
1730 fl = list_first_entry_or_null(&ctx->flc_lease,
1732 if (fl && (fl->fl_type == F_WRLCK))
1767 struct file_lock *fl;
1778 list_for_each_entry(fl, &ctx->flc_lease, fl_list) {
1779 if (fl->fl_file != filp)
1781 type = target_leasetype(fl);
1838 struct file_lock *fl, *my_fl = NULL, *lease;
1887 list_for_each_entry(fl, &ctx->flc_lease, fl_list) {
1888 if (fl->fl_file == filp &&
1889 fl->fl_owner == lease->fl_owner) {
1890 my_fl = fl;
1904 if (fl->fl_flags & FL_UNLOCK_PENDING)
1954 struct file_lock *fl, *victim = NULL;
1967 list_for_each_entry(fl, &ctx->flc_lease, fl_list) {
1968 if (fl->fl_file == filp &&
1969 fl->fl_owner == owner) {
1970 victim = fl;
1976 error = fl->fl_lmops->lm_change(victim, F_UNLCK, &dispose);
2114 struct file_lock *fl;
2118 fl = lease_alloc(filp, arg);
2119 if (IS_ERR(fl))
2120 return PTR_ERR(fl);
2124 locks_free_lock(fl);
2129 error = vfs_setlease(filp, arg, &fl, (void **)&new);
2130 if (fl)
2131 locks_free_lock(fl);
2157 * @fl: The lock to be applied
2161 static int flock_lock_inode_wait(struct inode *inode, struct file_lock *fl)
2166 error = flock_lock_inode(inode, fl);
2169 error = wait_event_interruptible(fl->fl_wait,
2170 list_empty(&fl->fl_blocked_member));
2174 locks_delete_block(fl);
2181 * @fl: The lock to be applied
2185 int locks_lock_inode_wait(struct inode *inode, struct file_lock *fl)
2188 switch (fl->fl_flags & (FL_POSIX|FL_FLOCK)) {
2190 res = posix_lock_inode_wait(inode, fl);
2193 res = flock_lock_inode_wait(inode, fl);
2270 * @fl: The lock to test; also used to hold result
2275 int vfs_test_lock(struct file *filp, struct file_lock *fl)
2278 return filp->f_op->lock(filp, F_GETLK, fl);
2279 posix_test_lock(filp, fl);
2286 * @fl: The file_lock who's fl_pid should be translated
2291 static pid_t locks_translate_pid(struct file_lock *fl, struct pid_namespace *ns)
2296 if (IS_OFDLCK(fl))
2298 if (IS_REMOTELCK(fl))
2299 return fl->fl_pid;
2306 return (pid_t)fl->fl_pid;
2309 pid = find_pid_ns(fl->fl_pid, &init_pid_ns);
2315 static int posix_lock_to_flock(struct flock *flock, struct file_lock *fl)
2317 flock->l_pid = locks_translate_pid(fl, task_active_pid_ns(current));
2323 if (fl->fl_start > OFFT_OFFSET_MAX)
2325 if (fl->fl_end != OFFSET_MAX && fl->fl_end > OFFT_OFFSET_MAX)
2328 flock->l_start = fl->fl_start;
2329 flock->l_len = fl->fl_end == OFFSET_MAX ? 0 :
2330 fl->fl_end - fl->fl_start + 1;
2332 flock->l_type = fl->fl_type;
2337 static void posix_lock_to_flock64(struct flock64 *flock, struct file_lock *fl)
2339 flock->l_pid = locks_translate_pid(fl, task_active_pid_ns(current));
2340 flock->l_start = fl->fl_start;
2341 flock->l_len = fl->fl_end == OFFSET_MAX ? 0 :
2342 fl->fl_end - fl->fl_start + 1;
2344 flock->l_type = fl->fl_type;
2353 struct file_lock *fl;
2356 fl = locks_alloc_lock();
2357 if (fl == NULL)
2363 error = flock_to_posix_lock(filp, fl, flock);
2373 fl->fl_flags |= FL_OFDLCK;
2374 fl->fl_owner = filp;
2377 error = vfs_test_lock(filp, fl);
2381 flock->l_type = fl->fl_type;
2382 if (fl->fl_type != F_UNLCK) {
2383 error = posix_lock_to_flock(flock, fl);
2388 locks_free_lock(fl);
2396 * @fl: The lock to be applied
2425 int vfs_lock_file(struct file *filp, unsigned int cmd, struct file_lock *fl, struct file_lock *conf)
2428 return filp->f_op->lock(filp, cmd, fl);
2430 return posix_lock_file(filp, fl, conf);
2435 struct file_lock *fl)
2439 error = security_file_lock(filp, fl->fl_type);
2444 error = vfs_lock_file(filp, cmd, fl, NULL);
2447 error = wait_event_interruptible(fl->fl_wait,
2448 list_empty(&fl->fl_blocked_member));
2452 locks_delete_block(fl);
2457 /* Ensure that fl->fl_file has compatible f_mode for F_SETLK calls */
2459 check_fmode_for_setlk(struct file_lock *fl)
2461 switch (fl->fl_type) {
2463 if (!(fl->fl_file->f_mode & FMODE_READ))
2467 if (!(fl->fl_file->f_mode & FMODE_WRITE))
2566 struct file_lock *fl;
2569 fl = locks_alloc_lock();
2570 if (fl == NULL)
2577 error = flock64_to_posix_lock(filp, fl, flock);
2587 fl->fl_flags |= FL_OFDLCK;
2588 fl->fl_owner = filp;
2591 error = vfs_test_lock(filp, fl);
2595 flock->l_type = fl->fl_type;
2596 if (fl->fl_type != F_UNLCK)
2597 posix_lock_to_flock64(flock, fl);
2600 locks_free_lock(fl);
2735 struct file_lock fl;
2741 flock_make_lock(filp, LOCK_UN, &fl);
2742 fl.fl_flags |= FL_CLOSE;
2745 filp->f_op->flock(filp, F_SETLKW, &fl);
2747 flock_lock_inode(inode, &fl);
2749 if (fl.fl_ops && fl.fl_ops->fl_release_private)
2750 fl.fl_ops->fl_release_private(&fl);
2757 struct file_lock *fl, *tmp;
2765 list_for_each_entry_safe(fl, tmp, &ctx->flc_lease, fl_list)
2766 if (filp == fl->fl_file)
2767 lease_modify(fl, F_UNLCK, &dispose);
2804 * @fl: The lock to be unblocked
2808 int vfs_cancel_lock(struct file *filp, struct file_lock *fl)
2811 return filp->f_op->lock(filp, F_CANCELLK, fl);
2848 static void lock_get_status(struct seq_file *f, struct file_lock *fl,
2855 fl_pid = locks_translate_pid(fl, proc_pidns);
2862 if (fl->fl_file != NULL)
2863 inode = locks_inode(fl->fl_file);
2866 if (IS_POSIX(fl)) {
2867 if (fl->fl_flags & FL_ACCESS)
2869 else if (IS_OFDLCK(fl))
2877 } else if (IS_FLOCK(fl)) {
2878 if (fl->fl_type & LOCK_MAND) {
2883 } else if (IS_LEASE(fl)) {
2884 if (fl->fl_flags & FL_DELEG)
2889 if (lease_breaking(fl))
2891 else if (fl->fl_file)
2898 if (fl->fl_type & LOCK_MAND) {
2900 (fl->fl_type & LOCK_READ)
2901 ? (fl->fl_type & LOCK_WRITE) ? "RW " : "READ "
2902 : (fl->fl_type & LOCK_WRITE) ? "WRITE" : "NONE ");
2904 int type = IS_LEASE(fl) ? target_leasetype(fl) : fl->fl_type;
2917 if (IS_POSIX(fl)) {
2918 if (fl->fl_end == OFFSET_MAX)
2919 seq_printf(f, "%Ld EOF\n", fl->fl_start);
2921 seq_printf(f, "%Ld %Ld\n", fl->fl_start, fl->fl_end);
2930 struct file_lock *fl, *bfl;
2933 fl = hlist_entry(v, struct file_lock, fl_link);
2935 if (locks_translate_pid(fl, proc_pidns) == 0)
2938 lock_get_status(f, fl, iter->li_pos, "");
2940 list_for_each_entry(bfl, &fl->fl_blocked_requests, fl_blocked_member)
2950 struct file_lock *fl;
2952 list_for_each_entry(fl, head, fl_list) {
2954 if (filp != fl->fl_file)
2956 if (fl->fl_owner != files &&
2957 fl->fl_owner != filp)
2962 lock_get_status(f, fl, *id, "");