Lines Matching refs:cifs_sb
493 * Determine the number of dfs targets the referral path in @cifs_sb resolves to.
2760 struct cifs_sb_info *new = mnt_data->cifs_sb;
2810 struct cifs_sb_info *new = mnt_data->cifs_sb;
2833 struct cifs_sb_info *cifs_sb;
2841 cifs_sb = CIFS_SB(sb);
2844 if (CIFS_MOUNT_SHUTDOWN & cifs_sb->mnt_cifs_flags) {
2849 tlink = cifs_get_tlink(cifs_sb_master_tlink(cifs_sb));
3145 struct cifs_sb_info *cifs_sb, struct smb3_fs_context *ctx)
3205 if (cifs_sb)
3206 cifs_sb->mnt_cifs_flags |=
3214 if (cifs_sb)
3215 cifs_sb->mnt_cifs_flags |=
3251 int cifs_setup_cifs_sb(struct cifs_sb_info *cifs_sb)
3253 struct smb3_fs_context *ctx = cifs_sb->ctx;
3255 INIT_DELAYED_WORK(&cifs_sb->prune_tlinks, cifs_prune_tlinks);
3257 spin_lock_init(&cifs_sb->tlink_tree_lock);
3258 cifs_sb->tlink_tree = RB_ROOT;
3266 cifs_sb->local_nls = load_nls_default();
3268 cifs_sb->local_nls = load_nls(ctx->iocharset);
3269 if (cifs_sb->local_nls == NULL) {
3275 ctx->local_nls = cifs_sb->local_nls;
3277 smb3_update_mnt_flags(cifs_sb);
3283 cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_RO_CACHE;
3286 cifs_sb->mnt_cifs_flags |= (CIFS_MOUNT_RO_CACHE |
3294 cifs_sb->prepath = kstrdup(ctx->prepath, GFP_KERNEL);
3295 if (cifs_sb->prepath == NULL)
3297 cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_USE_PREFIX_PATH;
3314 mnt_ctx->cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_POSIX_PATHS;
3368 struct cifs_sb_info *cifs_sb;
3374 !mnt_ctx->cifs_sb)) {
3380 cifs_sb = mnt_ctx->cifs_sb;
3392 cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_POSIX_PATHS;
3401 reset_cifs_unix_caps(mnt_ctx->xid, tcon, cifs_sb, ctx);
3417 server->ops->qfs_tcon(mnt_ctx->xid, tcon, cifs_sb);
3418 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RO_CACHE) {
3422 else if ((cifs_sb->mnt_cifs_flags &
3434 if ((cifs_sb->ctx->wsize == 0) ||
3435 (cifs_sb->ctx->wsize > server->ops->negotiate_wsize(tcon, ctx))) {
3436 cifs_sb->ctx->wsize =
3442 if (cifs_sb->ctx->wsize == 0) {
3443 cifs_sb->ctx->wsize = PAGE_SIZE;
3447 if ((cifs_sb->ctx->rsize == 0) ||
3448 (cifs_sb->ctx->rsize > server->ops->negotiate_rsize(tcon, ctx)))
3449 cifs_sb->ctx->rsize = server->ops->negotiate_rsize(tcon, ctx);
3456 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_FSCACHE)
3464 static int mount_setup_tlink(struct cifs_sb_info *cifs_sb, struct cifs_ses *ses,
3480 cifs_sb->master_tlink = tlink;
3481 spin_lock(&cifs_sb->tlink_tree_lock);
3482 tlink_rb_insert(&cifs_sb->tlink_tree, tlink);
3483 spin_unlock(&cifs_sb->tlink_tree_lock);
3485 queue_delayed_work(cifsiod_wq, &cifs_sb->prune_tlinks,
3494 struct cifs_sb_info *cifs_sb,
3503 sep = CIFS_DIR_SEP(cifs_sb);
3506 rc = server->ops->is_path_accessible(xid, tcon, cifs_sb, "");
3530 rc = server->ops->is_path_accessible(xid, tcon, cifs_sb,
3545 struct cifs_sb_info *cifs_sb = mnt_ctx->cifs_sb;
3558 full_path = cifs_build_path_to_root(ctx, cifs_sb, tcon,
3565 rc = server->ops->is_path_accessible(xid, tcon, cifs_sb,
3572 cifs_sb, full_path, tcon->Flags & SMB_SHARE_IS_IN_DFS);
3575 cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_USE_PREFIX_PATH;
3586 int cifs_mount(struct cifs_sb_info *cifs_sb, struct smb3_fs_context *ctx)
3588 struct cifs_mount_ctx mnt_ctx = { .cifs_sb = cifs_sb, .fs_ctx = ctx, };
3604 cifs_autodisable_serverino(cifs_sb);
3609 cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_USE_PREFIX_PATH;
3610 kfree(cifs_sb->prepath);
3611 cifs_sb->prepath = ctx->prepath;
3616 rc = mount_setup_tlink(cifs_sb, mnt_ctx.ses, mnt_ctx.tcon);
3629 int cifs_mount(struct cifs_sb_info *cifs_sb, struct smb3_fs_context *ctx)
3632 struct cifs_mount_ctx mnt_ctx = { .cifs_sb = cifs_sb, .fs_ctx = ctx, };
3648 rc = mount_setup_tlink(cifs_sb, mnt_ctx.ses, mnt_ctx.tcon);
3790 struct cifs_sb_info *cifs_sb = container_of(p, struct cifs_sb_info, rcu);
3792 unload_nls(cifs_sb->local_nls);
3793 smb3_cleanup_fs_context(cifs_sb->ctx);
3794 kfree(cifs_sb);
3798 cifs_umount(struct cifs_sb_info *cifs_sb)
3800 struct rb_root *root = &cifs_sb->tlink_tree;
3804 cancel_delayed_work_sync(&cifs_sb->prune_tlinks);
3806 spin_lock(&cifs_sb->tlink_tree_lock);
3813 spin_unlock(&cifs_sb->tlink_tree_lock);
3815 spin_lock(&cifs_sb->tlink_tree_lock);
3817 spin_unlock(&cifs_sb->tlink_tree_lock);
3819 kfree(cifs_sb->prepath);
3820 call_rcu(&cifs_sb->rcu, delayed_free);
3976 cifs_construct_tcon(struct cifs_sb_info *cifs_sb, kuid_t fsuid)
3979 struct cifs_tcon *master_tcon = cifs_sb_master_tcon(cifs_sb);
3988 ctx->local_nls = cifs_sb->local_nls;
4045 cifs_sb_master_tcon(struct cifs_sb_info *cifs_sb)
4047 return tlink_tcon(cifs_sb_master_tlink(cifs_sb));
4092 * Find or construct an appropriate tcon given a cifs_sb and the fsuid of the
4108 cifs_sb_tlink(struct cifs_sb_info *cifs_sb)
4114 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
4115 return cifs_get_tlink(cifs_sb_master_tlink(cifs_sb));
4117 spin_lock(&cifs_sb->tlink_tree_lock);
4118 tlink = tlink_rb_search(&cifs_sb->tlink_tree, fsuid);
4121 spin_unlock(&cifs_sb->tlink_tree_lock);
4133 spin_lock(&cifs_sb->tlink_tree_lock);
4135 tlink = tlink_rb_search(&cifs_sb->tlink_tree, fsuid);
4138 spin_unlock(&cifs_sb->tlink_tree_lock);
4143 tlink_rb_insert(&cifs_sb->tlink_tree, tlink);
4144 spin_unlock(&cifs_sb->tlink_tree_lock);
4168 tlink->tl_tcon = cifs_construct_tcon(cifs_sb, fsuid);
4187 struct cifs_sb_info *cifs_sb = container_of(work, struct cifs_sb_info,
4189 struct rb_root *root = &cifs_sb->tlink_tree;
4201 spin_lock(&cifs_sb->tlink_tree_lock);
4217 spin_unlock(&cifs_sb->tlink_tree_lock);
4219 spin_lock(&cifs_sb->tlink_tree_lock);
4221 spin_unlock(&cifs_sb->tlink_tree_lock);
4223 queue_delayed_work(cifsiod_wq, &cifs_sb->prune_tlinks,