Lines Matching refs:vnode
22 static void afs_write_to_cache(struct afs_vnode *vnode, loff_t start, size_t len,
72 struct afs_vnode *vnode = AFS_FS_I(file_inode(file));
81 vnode->fid.vid, vnode->fid.vnode, pos, len);
87 ret = netfs_write_begin(&vnode->netfs, file, mapping, pos, len, &folio, fsdata);
106 trace_afs_folio_dirty(vnode, tracepoint_string("alrdy"), folio);
114 if (!test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags) &&
127 trace_afs_folio_dirty(vnode, tracepoint_string("confl"), folio);
158 struct afs_vnode *vnode = AFS_FS_I(file_inode(file));
165 vnode->fid.vid, vnode->fid.vnode, folio_index(folio));
181 i_size = i_size_read(&vnode->netfs.inode);
183 write_seqlock(&vnode->cb_lock);
184 i_size = i_size_read(&vnode->netfs.inode);
186 afs_set_i_size(vnode, write_end_pos);
187 write_sequnlock(&vnode->cb_lock);
188 fscache_update_cookie(afs_vnode_cache(vnode), NULL, &write_end_pos);
201 trace_afs_folio_dirty(vnode, tracepoint_string("dirty+"), folio);
205 trace_afs_folio_dirty(vnode, tracepoint_string("dirty"), folio);
223 struct afs_vnode *vnode = AFS_FS_I(mapping->host);
229 vnode->fid.vid, vnode->fid.vnode, len, start);
261 struct afs_vnode *vnode = AFS_FS_I(mapping->host);
267 vnode->fid.vid, vnode->fid.vnode, len, start);
290 static void afs_pages_written_back(struct afs_vnode *vnode, loff_t start, unsigned int len)
292 struct address_space *mapping = vnode->netfs.inode.i_mapping;
299 vnode->fid.vid, vnode->fid.vnode, len, start);
311 trace_afs_folio_dirty(vnode, tracepoint_string("clear"), folio);
318 afs_prune_wb_keys(vnode);
324 * writes on the vnode. *_wbk will contain the last writeback key used or NULL
327 static int afs_get_writeback_key(struct afs_vnode *vnode,
334 spin_lock(&vnode->wb_lock);
338 p = vnode->wb_keys.next;
340 while (p != &vnode->wb_keys) {
356 spin_unlock(&vnode->wb_lock);
365 struct afs_vnode *vnode = op->file[0].vnode;
371 afs_pages_written_back(vnode, op->store.pos, op->store.size);
372 afs_stat_v(vnode, n_stores);
373 atomic_long_add(op->store.size, &afs_v2net(vnode)->n_store_bytes);
386 static int afs_store_data(struct afs_vnode *vnode, struct iov_iter *iter, loff_t pos,
395 vnode->volume->name,
396 vnode->fid.vid,
397 vnode->fid.vnode,
398 vnode->fid.unique,
401 ret = afs_get_writeback_key(vnode, &wbk);
407 op = afs_alloc_operation(wbk->key, vnode->volume);
413 afs_op_set_vnode(op, 0, vnode);
426 op->store.i_size = max(pos + size, vnode->netfs.remote_i_size);
427 op->mtime = vnode->netfs.inode.i_mtime;
440 ret = afs_get_writeback_key(vnode, &wbk);
462 struct afs_vnode *vnode,
558 trace_afs_folio_dirty(vnode, tracepoint_string("store+"), folio);
586 struct afs_vnode *vnode = AFS_FS_I(mapping->host);
590 loff_t i_size = i_size_read(&vnode->netfs.inode);
591 bool new_content = test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags);
592 bool caching = fscache_cookie_enabled(afs_vnode_cache(vnode));
612 trace_afs_folio_dirty(vnode, tracepoint_string("store"), folio);
626 afs_extend_writeback(mapping, vnode, &count,
644 afs_write_to_cache(vnode, start, len, i_size, caching);
647 ret = afs_store_data(vnode, &iter, start, false);
653 afs_pages_written_back(vnode, start, len);
690 trace_afs_file_error(vnode, ret, afs_file_error_writeback_fail);
807 struct afs_vnode *vnode = AFS_FS_I(mapping->host);
818 down_read(&vnode->validate_lock);
819 else if (!down_read_trylock(&vnode->validate_lock))
847 up_read(&vnode->validate_lock);
857 struct afs_vnode *vnode = AFS_FS_I(file_inode(iocb->ki_filp));
863 vnode->fid.vid, vnode->fid.vnode, count);
865 if (IS_SWAPFILE(&vnode->netfs.inode)) {
874 result = afs_validate(vnode, af->key);
891 struct afs_vnode *vnode = AFS_FS_I(file_inode(file));
896 vnode->fid.vid, vnode->fid.vnode, file,
899 ret = afs_validate(vnode, af->key);
915 struct afs_vnode *vnode = AFS_FS_I(inode);
920 _enter("{{%llx:%llu}},{%lx}", vnode->fid.vid, vnode->fid.vnode, folio_index(folio));
922 afs_validate(vnode, af->key);
954 trace_afs_folio_dirty(vnode, tracepoint_string("mkwrite+"), folio);
957 trace_afs_folio_dirty(vnode, tracepoint_string("mkwrite"), folio);
968 * Prune the keys cached for writeback. The caller must hold vnode->wb_lock.
970 void afs_prune_wb_keys(struct afs_vnode *vnode)
976 spin_lock(&vnode->wb_lock);
978 if (!mapping_tagged(&vnode->netfs.inode.i_data, PAGECACHE_TAG_WRITEBACK) &&
979 !mapping_tagged(&vnode->netfs.inode.i_data, PAGECACHE_TAG_DIRTY)) {
980 list_for_each_entry_safe(wbk, tmp, &vnode->wb_keys, vnode_link) {
986 spin_unlock(&vnode->wb_lock);
1000 struct afs_vnode *vnode = AFS_FS_I(folio_inode(folio));
1021 trace_afs_folio_dirty(vnode, tracepoint_string("launder"), folio);
1022 ret = afs_store_data(vnode, &iter, folio_pos(folio) + f, true);
1025 trace_afs_folio_dirty(vnode, tracepoint_string("laundered"), folio);
1037 struct afs_vnode *vnode = priv;
1041 afs_invalidate_cache(vnode, 0);
1047 static void afs_write_to_cache(struct afs_vnode *vnode,
1051 fscache_write_to_cache(afs_vnode_cache(vnode),
1052 vnode->netfs.inode.i_mapping, start, len, i_size,
1053 afs_write_to_cache_done, vnode, caching);