Lines Matching defs:inode
270 pnfs_alloc_layout_hdr(struct inode *ino, gfp_t gfp_flags)
307 struct inode *inode;
312 inode = lo->plh_inode;
315 if (refcount_dec_and_lock(&lo->plh_refcount, &inode->i_lock)) {
319 i_state = inode->i_state;
320 spin_unlock(&inode->i_lock);
328 static struct inode *
331 struct inode *inode = igrab(lo->plh_inode);
332 if (inode)
333 return inode;
413 struct inode *inode)
425 spin_lock(&inode->i_lock);
426 lo = NFS_I(inode)->layout;
444 spin_unlock(&inode->i_lock);
454 * Note that caller must hold inode->i_lock.
504 struct inode *inode = lo->plh_inode;
512 spin_lock(&inode->i_lock);
515 spin_unlock(&inode->i_lock);
557 struct inode *inode = lseg->pls_layout->plh_inode;
558 NFS_SERVER(inode)->pnfs_curr_ld->free_lseg(lseg);
598 struct inode *inode;
608 inode = lo->plh_inode;
610 if (refcount_dec_and_lock(&lseg->pls_refcount, &inode->i_lock)) {
612 spin_unlock(&inode->i_lock);
619 spin_unlock(&inode->i_lock);
812 pnfs_layout_add_bulk_destroy_list(struct inode *inode,
818 spin_lock(&inode->i_lock);
819 lo = NFS_I(inode)->layout;
825 spin_unlock(&inode->i_lock);
838 struct inode *inode;
848 inode = pnfs_grab_inode_layout_hdr(lo);
849 if (inode != NULL) {
852 if (pnfs_layout_add_bulk_destroy_list(inode,
857 iput(inode);
875 struct inode *inode;
882 dprintk("%s freeing layout for inode %lu\n", __func__,
884 inode = lo->plh_inode;
886 pnfs_layoutcommit_inode(inode, false);
888 spin_lock(&inode->i_lock);
895 spin_unlock(&inode->i_lock);
898 nfs_commit_inode(inode, 0);
900 nfs_iput_and_deactive(inode);
1031 pnfs_find_server(struct inode *inode, struct nfs_open_context *ctx)
1035 if (inode) {
1036 server = NFS_SERVER(inode);
1084 pnfs_alloc_init_layoutget_args(struct inode *ino,
1134 lgp->args.inode = ino;
1147 if (lgp->args.inode)
1148 pnfs_put_layout_hdr(NFS_I(lgp->args.inode)->layout);
1153 static void pnfs_clear_layoutcommit(struct inode *inode,
1156 struct nfs_inode *nfsi = NFS_I(inode);
1173 struct inode *inode = lo->plh_inode;
1176 spin_lock(&inode->i_lock);
1190 spin_unlock(&inode->i_lock);
1228 struct inode *inode = lo->plh_inode;
1230 args->layout_type = NFS_SERVER(inode)->pnfs_curr_ld->id;
1231 args->inode = inode;
1246 struct inode *ino = lo->plh_inode;
1302 struct inode *inode= lo->plh_inode;
1306 spin_lock(&inode->i_lock);
1314 spin_unlock(&inode->i_lock);
1320 spin_unlock(&inode->i_lock);
1332 _pnfs_return_layout(struct inode *ino)
1347 dprintk("NFS: %s for inode %lu\n", __func__, ino->i_ino);
1396 pnfs_commit_and_return_layout(struct inode *inode)
1401 spin_lock(&inode->i_lock);
1402 lo = NFS_I(inode)->layout;
1404 spin_unlock(&inode->i_lock);
1410 spin_unlock(&inode->i_lock);
1411 filemap_fdatawait(inode->i_mapping);
1412 ret = pnfs_layoutcommit_inode(inode, true);
1414 ret = _pnfs_return_layout(inode);
1415 spin_lock(&inode->i_lock);
1417 spin_unlock(&inode->i_lock);
1422 bool pnfs_roc(struct inode *ino,
1564 &arg->range, arg->inode))
1579 struct inode *inode = args->inode;
1586 spin_lock(&inode->i_lock);
1590 spin_unlock(&inode->i_lock);
1599 trace_nfs4_layoutreturn_on_close(args->inode, &args->stateid, ret);
1607 bool pnfs_wait_on_layoutreturn(struct inode *ino, struct rpc_task *task)
1713 struct inode *inode = lo->plh_inode;
1714 struct pnfs_layoutdriver_type *ld = NFS_SERVER(inode)->pnfs_curr_ld;
1726 alloc_init_layout_hdr(struct inode *ino,
1747 pnfs_find_alloc_layout(struct inode *ino,
1853 struct inode *ino, int iomode)
1966 pnfs_update_layout(struct inode *ino,
2192 dprintk("%s: inode %s/%llu pNFS layout segment %s for "
2228 _pnfs_grab_empty_layout(struct inode *ino, struct nfs_open_context *ctx)
2258 struct inode *ino = data->dentry->d_inode;
2326 void pnfs_parse_lgopen(struct inode *ino, struct nfs4_layoutget *lgp,
2359 if (!lgp->args.inode) {
2363 lgp->args.inode = ino;
2365 lo = NFS_I(lgp->args.inode)->layout;
2378 struct inode *inode = lgp->args.inode;
2379 if (inode) {
2380 struct pnfs_layout_hdr *lo = NFS_I(inode)->layout;
2391 struct pnfs_layout_hdr *lo = NFS_I(lgp->args.inode)->layout;
2394 struct inode *ino = lo->plh_inode;
2435 * inode invalid, and retry the layoutget
2530 pnfs_mark_layout_for_return(struct inode *inode,
2536 spin_lock(&inode->i_lock);
2537 lo = NFS_I(inode)->layout;
2539 spin_unlock(&inode->i_lock);
2554 spin_unlock(&inode->i_lock);
2558 spin_unlock(&inode->i_lock);
2559 nfs_commit_inode(inode, 0);
2563 void pnfs_error_mark_layout_for_return(struct inode *inode,
2572 pnfs_mark_layout_for_return(inode, &range);
2642 struct inode *inode;
2649 inode = lo->plh_inode;
2650 if (!inode || !pnfs_layout_can_be_returned(lo) ||
2653 spin_lock(&inode->i_lock);
2656 spin_unlock(&inode->i_lock);
2664 spin_unlock(&inode->i_lock);
2670 spin_unlock(&inode->i_lock);
2840 nfs_pageio_init_write(&pgio, hdr->inode, FLUSH_STABLE, true,
2851 if (NFS_SERVER(hdr->inode)->pnfs_curr_ld->flags &
2853 pnfs_return_layout(hdr->inode);
2865 pnfs_set_layoutcommit(hdr->inode, hdr->lseg,
2896 struct inode *inode = hdr->inode;
2898 struct nfs_server *nfss = NFS_SERVER(inode);
2903 inode->i_ino, hdr->args.count, hdr->args.offset, how);
2906 nfs_inc_stats(inode, NFSIOS_PNFS_WRITE);
2969 nfs_pageio_init_read(&pgio, hdr->inode, true, hdr->completion_ops);
2977 if (NFS_SERVER(hdr->inode)->pnfs_curr_ld->flags &
2979 pnfs_return_layout(hdr->inode);
3021 struct inode *inode = hdr->inode;
3022 struct nfs_server *nfss = NFS_SERVER(inode);
3028 __func__, inode->i_ino, hdr->args.count, hdr->args.offset);
3032 nfs_inc_stats(inode, NFSIOS_PNFS_READ);
3048 nfs_pageio_init_read(&pgio, hdr->inode, false,
3106 static void pnfs_clear_layoutcommitting(struct inode *inode)
3108 unsigned long *bitlock = &NFS_I(inode)->flags;
3118 static void pnfs_list_write_lseg(struct inode *inode, struct list_head *listp)
3122 list_for_each_entry(lseg, &NFS_I(inode)->layout->plh_segs, pls_list) {
3129 static void pnfs_list_write_lseg_done(struct inode *inode, struct list_head *listp)
3139 pnfs_clear_layoutcommitting(inode);
3149 pnfs_set_layoutcommit(struct inode *inode, struct pnfs_layout_segment *lseg,
3152 struct nfs_inode *nfsi = NFS_I(inode);
3155 spin_lock(&inode->i_lock);
3159 dprintk("%s: Set layoutcommit for inode %lu ",
3160 __func__, inode->i_ino);
3167 spin_unlock(&inode->i_lock);
3171 /* if pnfs_layoutcommit_inode() runs between inode locks, the next one
3174 mark_inode_dirty_sync(inode);
3180 struct nfs_server *nfss = NFS_SERVER(data->args.inode);
3184 pnfs_list_write_lseg_done(data->args.inode, &data->lseg_list);
3196 pnfs_layoutcommit_inode(struct inode *inode, bool sync)
3198 struct pnfs_layoutdriver_type *ld = NFS_SERVER(inode)->pnfs_curr_ld;
3200 struct nfs_inode *nfsi = NFS_I(inode);
3204 if (!pnfs_layoutcommit_outstanding(inode))
3207 dprintk("--> %s inode %lu\n", __func__, inode->i_ino);
3228 spin_lock(&inode->i_lock);
3233 pnfs_list_write_lseg(inode, &data->lseg_list);
3239 spin_unlock(&inode->i_lock);
3241 data->args.inode = inode;
3243 data->args.bitmask = NFS_SERVER(inode)->cache_consistency_bitmask;
3249 data->res.server = NFS_SERVER(inode);
3255 spin_lock(&inode->i_lock);
3267 mark_inode_dirty_sync(inode);
3271 spin_unlock(&inode->i_lock);
3274 pnfs_clear_layoutcommitting(inode);
3280 pnfs_generic_sync(struct inode *inode, bool datasync)
3282 return pnfs_layoutcommit_inode(inode, true);
3300 pnfs_report_layoutstat(struct inode *inode, gfp_t gfp_flags)
3302 struct pnfs_layoutdriver_type *ld = NFS_SERVER(inode)->pnfs_curr_ld;
3303 struct nfs_server *server = NFS_SERVER(inode);
3304 struct nfs_inode *nfsi = NFS_I(inode);
3312 if (!nfs_server_capable(inode, NFS_CAP_LAYOUTSTATS))
3318 spin_lock(&inode->i_lock);
3319 if (!NFS_I(inode)->layout) {
3320 spin_unlock(&inode->i_lock);
3323 hdr = NFS_I(inode)->layout;
3325 spin_unlock(&inode->i_lock);
3333 data->args.fh = NFS_FH(inode);
3334 data->args.inode = inode;
3339 status = nfs42_proc_layoutstats_generic(NFS_SERVER(inode), data);