Lines Matching refs:vnode
16 static void afs_next_locker(struct afs_vnode *vnode, int error);
25 static inline void afs_set_lock_state(struct afs_vnode *vnode, enum afs_lock_state state)
27 _debug("STATE %u -> %u", vnode->lock_state, state);
28 vnode->lock_state = state;
34 * if the callback is broken on this vnode, then the lock may now be available
36 void afs_lock_may_be_available(struct afs_vnode *vnode)
38 _enter("{%llx:%llu}", vnode->fid.vid, vnode->fid.vnode);
40 spin_lock(&vnode->lock);
41 if (vnode->lock_state == AFS_VNODE_LOCK_WAITING_FOR_CB)
42 afs_next_locker(vnode, 0);
43 trace_afs_flock_ev(vnode, NULL, afs_flock_callback_break, 0);
44 spin_unlock(&vnode->lock);
51 static void afs_schedule_lock_extension(struct afs_vnode *vnode)
56 expires_at = ktime_add_ms(vnode->locked_at, AFS_LOCKWAIT * 1000 / 2);
64 queue_delayed_work(afs_lock_manager, &vnode->lock_work, duration_j);
74 struct afs_vnode *vnode = op->file[0].vnode;
77 spin_lock(&vnode->lock);
78 trace_afs_flock_ev(vnode, NULL, afs_flock_timestamp, 0);
79 vnode->locked_at = call->issue_time;
80 afs_schedule_lock_extension(vnode);
81 spin_unlock(&vnode->lock);
88 * - the caller must hold the vnode lock
90 static void afs_grant_locks(struct afs_vnode *vnode)
93 bool exclusive = (vnode->lock_type == AFS_LOCK_WRITE);
95 list_for_each_entry_safe(p, _p, &vnode->pending_locks, fl_u.afs.link) {
99 list_move_tail(&p->fl_u.afs.link, &vnode->granted_locks);
101 trace_afs_flock_op(vnode, p, afs_flock_op_grant);
111 static void afs_next_locker(struct afs_vnode *vnode, int error)
114 struct key *key = vnode->lock_key;
119 if (vnode->lock_type == AFS_LOCK_WRITE)
122 list_for_each_entry_safe(p, _p, &vnode->pending_locks, fl_u.afs.link) {
138 vnode->lock_key = NULL;
142 afs_set_lock_state(vnode, AFS_VNODE_LOCK_SETTING);
144 trace_afs_flock_op(vnode, next, afs_flock_op_wake);
147 afs_set_lock_state(vnode, AFS_VNODE_LOCK_NONE);
148 trace_afs_flock_ev(vnode, NULL, afs_flock_no_lockers, 0);
155 * Kill off all waiters in the the pending lock queue due to the vnode being
158 static void afs_kill_lockers_enoent(struct afs_vnode *vnode)
162 afs_set_lock_state(vnode, AFS_VNODE_LOCK_DELETED);
164 while (!list_empty(&vnode->pending_locks)) {
165 p = list_entry(vnode->pending_locks.next,
172 key_put(vnode->lock_key);
173 vnode->lock_key = NULL;
192 static int afs_set_lock(struct afs_vnode *vnode, struct key *key,
198 vnode->volume->name,
199 vnode->fid.vid,
200 vnode->fid.vnode,
201 vnode->fid.unique,
204 op = afs_alloc_operation(key, vnode->volume);
208 afs_op_set_vnode(op, 0, vnode);
224 static int afs_extend_lock(struct afs_vnode *vnode, struct key *key)
229 vnode->volume->name,
230 vnode->fid.vid,
231 vnode->fid.vnode,
232 vnode->fid.unique,
235 op = afs_alloc_operation(key, vnode->volume);
239 afs_op_set_vnode(op, 0, vnode);
255 static int afs_release_lock(struct afs_vnode *vnode, struct key *key)
260 vnode->volume->name,
261 vnode->fid.vid,
262 vnode->fid.vnode,
263 vnode->fid.unique,
266 op = afs_alloc_operation(key, vnode->volume);
270 afs_op_set_vnode(op, 0, vnode);
284 struct afs_vnode *vnode =
289 _enter("{%llx:%llu}", vnode->fid.vid, vnode->fid.vnode);
291 spin_lock(&vnode->lock);
294 _debug("wstate %u for %p", vnode->lock_state, vnode);
295 switch (vnode->lock_state) {
297 afs_set_lock_state(vnode, AFS_VNODE_LOCK_UNLOCKING);
298 trace_afs_flock_ev(vnode, NULL, afs_flock_work_unlocking, 0);
299 spin_unlock(&vnode->lock);
303 ret = afs_release_lock(vnode, vnode->lock_key);
304 if (ret < 0 && vnode->lock_state != AFS_VNODE_LOCK_DELETED) {
305 trace_afs_flock_ev(vnode, NULL, afs_flock_release_fail,
309 vnode->fid.vid, vnode->fid.vnode, ret);
312 spin_lock(&vnode->lock);
314 afs_kill_lockers_enoent(vnode);
316 afs_next_locker(vnode, 0);
317 spin_unlock(&vnode->lock);
326 ASSERT(!list_empty(&vnode->granted_locks));
328 key = key_get(vnode->lock_key);
329 afs_set_lock_state(vnode, AFS_VNODE_LOCK_EXTENDING);
330 trace_afs_flock_ev(vnode, NULL, afs_flock_work_extending, 0);
331 spin_unlock(&vnode->lock);
333 ret = afs_extend_lock(vnode, key); /* RPC */
337 trace_afs_flock_ev(vnode, NULL, afs_flock_extend_fail,
340 vnode->fid.vid, vnode->fid.vnode, ret);
343 spin_lock(&vnode->lock);
346 afs_kill_lockers_enoent(vnode);
347 spin_unlock(&vnode->lock);
351 if (vnode->lock_state != AFS_VNODE_LOCK_EXTENDING)
353 afs_set_lock_state(vnode, AFS_VNODE_LOCK_GRANTED);
356 queue_delayed_work(afs_lock_manager, &vnode->lock_work,
358 spin_unlock(&vnode->lock);
370 afs_next_locker(vnode, 0);
371 spin_unlock(&vnode->lock);
375 afs_kill_lockers_enoent(vnode);
376 spin_unlock(&vnode->lock);
381 spin_unlock(&vnode->lock);
388 * pass responsibility for the unlocking of a vnode on the server to the
391 * - the caller must hold the vnode lock
393 static void afs_defer_unlock(struct afs_vnode *vnode)
395 _enter("%u", vnode->lock_state);
397 if (list_empty(&vnode->granted_locks) &&
398 (vnode->lock_state == AFS_VNODE_LOCK_GRANTED ||
399 vnode->lock_state == AFS_VNODE_LOCK_EXTENDING)) {
400 cancel_delayed_work(&vnode->lock_work);
402 afs_set_lock_state(vnode, AFS_VNODE_LOCK_NEED_UNLOCK);
403 trace_afs_flock_ev(vnode, NULL, afs_flock_defer_unlock, 0);
404 queue_delayed_work(afs_lock_manager, &vnode->lock_work, 0);
412 static int afs_do_setlk_check(struct afs_vnode *vnode, struct key *key,
421 ret = afs_validate(vnode, key);
428 ret = afs_check_permit(vnode, key, &access);
455 struct afs_vnode *vnode = AFS_FS_I(inode);
466 vnode->fid.vid, vnode->fid.vnode,
478 ret = afs_do_setlk_check(vnode, key, mode, type);
482 trace_afs_flock_op(vnode, fl, afs_flock_op_set_lock);
498 spin_lock(&vnode->lock);
499 list_add_tail(&fl->fl_u.afs.link, &vnode->pending_locks);
502 if (vnode->lock_state == AFS_VNODE_LOCK_DELETED)
509 _debug("try %u", vnode->lock_state);
510 if (vnode->lock_state == AFS_VNODE_LOCK_GRANTED) {
513 list_move_tail(&fl->fl_u.afs.link, &vnode->granted_locks);
518 if (vnode->lock_type == AFS_LOCK_WRITE) {
520 list_move_tail(&fl->fl_u.afs.link, &vnode->granted_locks);
526 if (vnode->lock_state == AFS_VNODE_LOCK_NONE &&
530 if (vnode->status.lock_count == -1)
533 if (vnode->status.lock_count != 0)
538 if (vnode->lock_state != AFS_VNODE_LOCK_NONE)
542 /* We don't have a lock on this vnode and we aren't currently waiting
550 trace_afs_flock_ev(vnode, fl, afs_flock_try_to_lock, 0);
551 vnode->lock_key = key_get(key);
552 vnode->lock_type = type;
553 afs_set_lock_state(vnode, AFS_VNODE_LOCK_SETTING);
554 spin_unlock(&vnode->lock);
556 ret = afs_set_lock(vnode, key, type); /* RPC */
558 spin_lock(&vnode->lock);
566 trace_afs_flock_ev(vnode, fl, afs_flock_fail_perm, ret);
568 afs_next_locker(vnode, ret);
573 trace_afs_flock_ev(vnode, fl, afs_flock_fail_other, ret);
575 afs_kill_lockers_enoent(vnode);
580 trace_afs_flock_ev(vnode, fl, afs_flock_fail_other, ret);
582 afs_next_locker(vnode, 0);
590 ASSERT(list_empty(&vnode->granted_locks));
591 ASSERTCMP(vnode->pending_locks.next, ==, &fl->fl_u.afs.link);
595 afs_set_lock_state(vnode, AFS_VNODE_LOCK_GRANTED);
596 trace_afs_flock_ev(vnode, fl, afs_flock_acquired, type);
597 afs_grant_locks(vnode);
602 spin_unlock(&vnode->lock);
609 trace_afs_flock_ev(vnode, fl, afs_flock_vfs_locking, 0);
611 trace_afs_flock_ev(vnode, fl, afs_flock_vfs_lock, ret);
619 afs_validate(vnode, key);
626 afs_next_locker(vnode, 0);
631 afs_set_lock_state(vnode, AFS_VNODE_LOCK_WAITING_FOR_CB);
632 trace_afs_flock_ev(vnode, fl, afs_flock_would_block, ret);
633 queue_delayed_work(afs_lock_manager, &vnode->lock_work, HZ * 5);
641 spin_unlock(&vnode->lock);
643 trace_afs_flock_ev(vnode, fl, afs_flock_waiting, 0);
646 trace_afs_flock_ev(vnode, fl, afs_flock_waited, ret);
649 spin_lock(&vnode->lock);
661 ASSERTCMP(vnode->lock_state, ==, AFS_VNODE_LOCK_WAITING_FOR_CB);
662 afs_set_lock_state(vnode, AFS_VNODE_LOCK_SETTING);
672 spin_unlock(&vnode->lock);
688 spin_lock(&vnode->lock);
690 afs_defer_unlock(vnode);
693 spin_unlock(&vnode->lock);
704 struct afs_vnode *vnode = AFS_FS_I(file_inode(file));
707 _enter("{%llx:%llu},%u", vnode->fid.vid, vnode->fid.vnode, fl->fl_type);
709 trace_afs_flock_op(vnode, fl, afs_flock_op_unlock);
715 _leave(" = %d [%u]", ret, vnode->lock_state);
724 struct afs_vnode *vnode = AFS_FS_I(file_inode(file));
730 if (vnode->lock_state == AFS_VNODE_LOCK_DELETED)
739 ret = afs_fetch_status(vnode, key, false, NULL);
743 lock_count = READ_ONCE(vnode->status.lock_count);
766 struct afs_vnode *vnode = AFS_FS_I(file_inode(file));
771 vnode->fid.vid, vnode->fid.vnode, cmd,
779 trace_afs_flock_op(vnode, fl, afs_flock_op_lock);
792 trace_afs_flock_op(vnode, fl, op);
801 struct afs_vnode *vnode = AFS_FS_I(file_inode(file));
806 vnode->fid.vid, vnode->fid.vnode, cmd,
820 trace_afs_flock_op(vnode, fl, afs_flock_op_flock);
834 trace_afs_flock_op(vnode, fl, op);
840 * copy into its own list, so we need to add that copy to the vnode's lock
846 struct afs_vnode *vnode = AFS_FS_I(file_inode(fl->fl_file));
852 spin_lock(&vnode->lock);
853 trace_afs_flock_op(vnode, new, afs_flock_op_copy_lock);
855 spin_unlock(&vnode->lock);
859 * need to remove this lock from the vnode queue when it's removed from the
864 struct afs_vnode *vnode = AFS_FS_I(file_inode(fl->fl_file));
868 spin_lock(&vnode->lock);
870 trace_afs_flock_op(vnode, fl, afs_flock_op_release_lock);
872 if (list_empty(&vnode->granted_locks))
873 afs_defer_unlock(vnode);
875 _debug("state %u for %p", vnode->lock_state, vnode);
876 spin_unlock(&vnode->lock);