Lines Matching refs:ofs

191 	struct ovl_fs *ofs = OVL_FS(sb);
193 if (ofs)
194 ovl_free_fs(ofs);
200 struct ovl_fs *ofs = OVL_FS(sb);
204 ret = ovl_sync_status(ofs);
229 upper_sb = ovl_upper_mnt(ofs)->mnt_sb;
249 struct ovl_fs *ofs = OVL_FS(sb);
258 buf->f_namelen = ofs->namelen;
260 if (ovl_has_fsid(ofs))
281 static struct dentry *ovl_workdir_create(struct ovl_fs *ofs,
284 struct inode *dir = ofs->workbasedir->d_inode;
285 struct vfsmount *mnt = ovl_upper_mnt(ofs);
292 work = ovl_lookup_upper(ofs, name, ofs->workbasedir, strlen(name));
309 err = ovl_workdir_cleanup(ofs, dir, mnt, work, 0);
318 err = ovl_mkdir_real(ofs, dir, &work, attr.ia_mode);
340 err = ovl_do_remove_acl(ofs, work, XATTR_NAME_POSIX_ACL_DEFAULT);
344 err = ovl_do_remove_acl(ofs, work, XATTR_NAME_POSIX_ACL_ACCESS);
350 err = ovl_do_notify_change(ofs, work, &attr);
366 ofs->config.workdir, name, -err);
371 static int ovl_check_namelen(const struct path *path, struct ovl_fs *ofs,
380 ofs->namelen = max(ofs->namelen, statfs.f_namelen);
386 struct ovl_fs *ofs, int *stack_depth)
391 err = ovl_check_namelen(path, ofs, name);
402 if ((ofs->config.nfs_export ||
403 (ofs->config.index && ofs->config.upperdir)) && !fh_type) {
404 ofs->config.index = false;
405 ofs->config.nfs_export = false;
409 ofs->nofh |= !fh_type;
414 if (ofs->config.xino == OVL_XINO_AUTO &&
415 ofs->config.upperdir && !fh_type) {
416 ofs->config.xino = OVL_XINO_OFF;
423 ofs->xino_mode = -1;
526 static int ovl_report_in_use(struct ovl_fs *ofs, const char *name)
528 if (ofs->config.index) {
539 static int ovl_get_upper(struct super_block *sb, struct ovl_fs *ofs,
553 err = ovl_check_namelen(upperpath, ofs, ofs->config.upperdir);
587 if (ovl_inuse_trylock(ovl_upper_mnt(ofs)->mnt_root)) {
588 ofs->upperdir_locked = true;
590 err = ovl_report_in_use(ofs, "upperdir");
604 static int ovl_check_rename_whiteout(struct ovl_fs *ofs)
606 struct dentry *workdir = ofs->workdir;
616 temp = ovl_create_temp(ofs, workdir, OVL_CATTR(S_IFREG | 0));
621 dest = ovl_lookup_temp(ofs, workdir);
630 err = ovl_do_rename(ofs, dir, temp, dir, dest, RENAME_WHITEOUT);
637 whiteout = ovl_lookup_upper(ofs, name.name.name, workdir, name.name.len);
646 ovl_cleanup(ofs, dir, whiteout);
650 ovl_cleanup(ofs, dir, temp);
661 static struct dentry *ovl_lookup_or_create(struct ovl_fs *ofs,
669 child = ovl_lookup_upper(ofs, name, parent, len);
671 child = ovl_create_real(ofs, parent->d_inode, child,
683 static int ovl_create_volatile_dirty(struct ovl_fs *ofs)
686 struct dentry *d = dget(ofs->workbasedir);
693 d = ovl_lookup_or_create(ofs, d, *name, ctr > 1 ? S_IFDIR : S_IFREG);
701 static int ovl_make_workdir(struct super_block *sb, struct ovl_fs *ofs,
704 struct vfsmount *mnt = ovl_upper_mnt(ofs);
716 workdir = ovl_workdir_create(ofs, OVL_WORKDIR_NAME, false);
721 ofs->workdir = workdir;
723 err = ovl_setup_trap(sb, ofs->workdir, &ofs->workdir_trap, "workdir");
742 tmpfile = ovl_do_tmpfile(ofs, ofs->workdir, S_IFREG | 0);
743 ofs->tmpfile = !IS_ERR(tmpfile);
744 if (ofs->tmpfile)
751 err = ovl_check_rename_whiteout(ofs);
762 err = ovl_setxattr(ofs, ofs->workdir, OVL_XATTR_OPAQUE, "0", 1);
765 ofs->noxattr = true;
766 if (ovl_redirect_follow(ofs)) {
767 ofs->config.redirect_mode = OVL_REDIRECT_NOFOLLOW;
770 if (ofs->config.metacopy) {
771 ofs->config.metacopy = false;
774 if (ofs->config.index) {
775 ofs->config.index = false;
778 if (ovl_has_fsid(ofs)) {
779 ofs->config.uuid = OVL_UUID_NULL;
786 if (ofs->config.xino == OVL_XINO_AUTO) {
787 ofs->config.xino = OVL_XINO_OFF;
790 if (err == -EPERM && !ofs->config.userxattr)
794 ovl_removexattr(ofs, ofs->workdir, OVL_XATTR_OPAQUE);
802 if (ovl_dentry_remote(ofs->workdir) &&
803 (!d_type || !rename_whiteout || ofs->noxattr)) {
813 if (ofs->config.ovl_volatile) {
814 err = ovl_create_volatile_dirty(ofs);
822 fh_type = ovl_can_decode_fh(ofs->workdir->d_sb);
823 if (ofs->config.index && !fh_type) {
824 ofs->config.index = false;
827 ofs->nofh |= !fh_type;
831 ofs->xino_mode = -1;
834 if (ofs->config.nfs_export && !ofs->config.index) {
836 ofs->config.nfs_export = false;
843 static int ovl_get_workdir(struct super_block *sb, struct ovl_fs *ofs,
859 ofs->workbasedir = dget(workpath->dentry);
861 if (ovl_inuse_trylock(ofs->workbasedir)) {
862 ofs->workdir_locked = true;
864 err = ovl_report_in_use(ofs, "workdir");
869 err = ovl_setup_trap(sb, ofs->workbasedir, &ofs->workbasedir_trap,
874 return ovl_make_workdir(sb, ofs, workpath);
877 static int ovl_get_indexdir(struct super_block *sb, struct ovl_fs *ofs,
880 struct vfsmount *mnt = ovl_upper_mnt(ofs);
889 err = ovl_verify_origin(ofs, upperpath->dentry,
897 iput(ofs->workdir_trap);
898 ofs->workdir_trap = NULL;
899 dput(ofs->workdir);
900 ofs->workdir = NULL;
901 indexdir = ovl_workdir_create(ofs, OVL_INDEXDIR_NAME, true);
905 ofs->indexdir = indexdir;
906 ofs->workdir = dget(indexdir);
908 err = ovl_setup_trap(sb, ofs->indexdir, &ofs->indexdir_trap,
921 if (ovl_check_origin_xattr(ofs, ofs->indexdir)) {
922 err = ovl_verify_set_fh(ofs, ofs->indexdir,
928 err = ovl_verify_upper(ofs, ofs->indexdir, upperpath->dentry,
935 err = ovl_indexdir_cleanup(ofs);
937 if (err || !ofs->indexdir)
945 static bool ovl_lower_uuid_ok(struct ovl_fs *ofs, const uuid_t *uuid)
949 if (!ofs->config.nfs_export && !ovl_upper_mnt(ofs))
960 if (ovl_allow_offline_changes(ofs) && uuid_is_null(uuid))
963 for (i = 0; i < ofs->numfs; i++) {
971 if (ofs->fs[i].is_lower &&
972 uuid_equal(&ofs->fs[i].sb->s_uuid, uuid)) {
973 ofs->fs[i].bad_uuid = true;
981 static int ovl_get_fsid(struct ovl_fs *ofs, const struct path *path)
990 for (i = 0; i < ofs->numfs; i++) {
991 if (ofs->fs[i].sb == sb)
995 if (!ovl_lower_uuid_ok(ofs, &sb->s_uuid)) {
997 if (ofs->config.xino == OVL_XINO_AUTO) {
998 ofs->config.xino = OVL_XINO_OFF;
1001 if (ofs->config.index || ofs->config.nfs_export) {
1002 ofs->config.index = false;
1003 ofs->config.nfs_export = false;
1010 path->dentry, ovl_xino_mode(&ofs->config));
1020 ofs->fs[ofs->numfs].sb = sb;
1021 ofs->fs[ofs->numfs].pseudo_dev = dev;
1022 ofs->fs[ofs->numfs].bad_uuid = bad_uuid;
1024 return ofs->numfs++;
1031 static int ovl_get_data_fsid(struct ovl_fs *ofs)
1033 return ofs->numfs;
1037 static int ovl_get_layers(struct super_block *sb, struct ovl_fs *ofs,
1044 ofs->fs = kcalloc(ctx->nr + 2, sizeof(struct ovl_sb), GFP_KERNEL);
1045 if (ofs->fs == NULL)
1052 ofs->numfs++;
1060 err = get_anon_bdev(&ofs->fs[0].pseudo_dev);
1066 if (ovl_upper_mnt(ofs)) {
1067 ofs->fs[0].sb = ovl_upper_mnt(ofs)->mnt_sb;
1068 ofs->fs[0].is_lower = false;
1079 fsid = ovl_get_fsid(ofs, &l->path);
1081 fsid = ovl_get_data_fsid(ofs);
1097 err = ovl_report_in_use(ofs, "lowerdir");
1118 layers[ofs->numlayer].trap = trap;
1119 layers[ofs->numlayer].mnt = mnt;
1120 layers[ofs->numlayer].idx = ofs->numlayer;
1121 layers[ofs->numlayer].fsid = fsid;
1122 layers[ofs->numlayer].fs = &ofs->fs[fsid];
1124 ofs->config.lowerdirs[ofs->numlayer] = l->name;
1126 ofs->numlayer++;
1127 ofs->fs[fsid].is_lower = true;
1139 if (ofs->numfs - !ovl_upper_mnt(ofs) == 1) {
1140 if (ofs->config.xino == OVL_XINO_ON)
1142 ofs->xino_mode = 0;
1143 } else if (ofs->config.xino == OVL_XINO_OFF) {
1144 ofs->xino_mode = -1;
1145 } else if (ofs->xino_mode < 0) {
1154 ofs->xino_mode = ilog2(ofs->numfs - 1) + 2;
1157 if (ofs->xino_mode > 0) {
1159 ofs->xino_mode);
1167 struct ovl_fs *ofs,
1178 if (!ofs->config.upperdir && ctx->nr == 1) {
1187 err = ovl_lower_dir(l->name, &l->path, ofs, &sb->s_stack_depth);
1199 err = ovl_get_layers(sb, ofs, ctx, layers);
1214 lowerstack[i].layer = &ofs->layers[i + 1];
1216 ofs->numdatalayer = ctx->nr_data;
1226 static int ovl_check_layer(struct super_block *sb, struct ovl_fs *ofs,
1244 err = ovl_report_in_use(ofs, name);
1260 struct ovl_fs *ofs)
1264 if (ovl_upper_mnt(ofs)) {
1265 err = ovl_check_layer(sb, ofs, ovl_upper_mnt(ofs)->mnt_root,
1277 err = ovl_check_layer(sb, ofs, ofs->workbasedir, "workdir",
1283 for (i = 1; i < ofs->numlayer; i++) {
1284 err = ovl_check_layer(sb, ofs,
1285 ofs->layers[i].mnt->mnt_root,
1334 struct ovl_fs *ofs = sb->s_fs_info;
1349 ofs->creator_cred = cred = prepare_creds();
1353 err = ovl_fs_params_verify(ctx, &ofs->config);
1369 ofs->config.lowerdirs = kcalloc(ctx->nr + 1, sizeof(char *), GFP_KERNEL);
1370 if (!ofs->config.lowerdirs) {
1374 ofs->layers = layers;
1380 ofs->config.lowerdirs[0] = ctx->lowerdir_all;
1382 ofs->numlayer = 1;
1386 atomic_long_set(&ofs->last_ino, 1);
1388 if (ofs->config.xino != OVL_XINO_OFF) {
1389 ofs->xino_mode = BITS_PER_LONG - 32;
1390 if (!ofs->xino_mode) {
1392 ofs->config.xino = OVL_XINO_OFF;
1399 if (ofs->config.upperdir) {
1403 if (!ofs->config.workdir) {
1408 err = ovl_get_upper(sb, ofs, &layers[0], &ctx->upper);
1412 upper_sb = ovl_upper_mnt(ofs)->mnt_sb;
1413 if (!ovl_should_sync(ofs)) {
1414 ofs->errseq = errseq_sample(&upper_sb->s_wb_err);
1415 if (errseq_check(&upper_sb->s_wb_err, ofs->errseq)) {
1422 err = ovl_get_workdir(sb, ofs, &ctx->upper, &ctx->work);
1426 if (!ofs->workdir)
1432 oe = ovl_get_lowerstack(sb, ctx, ofs, layers);
1438 if (!ovl_upper_mnt(ofs))
1441 if (!ovl_origin_uuid(ofs) && ofs->numfs > 1) {
1443 ofs->config.uuid = OVL_UUID_NULL;
1444 } else if (ovl_has_fsid(ofs) && ovl_upper_mnt(ofs)) {
1446 ovl_init_uuid_xattr(sb, ofs, &ctx->upper);
1449 if (!ovl_force_readonly(ofs) && ofs->config.index) {
1450 err = ovl_get_indexdir(sb, ofs, oe, &ctx->upper);
1455 if (!ofs->indexdir)
1459 err = ovl_check_overlapping_layers(sb, ofs);
1464 if (!ofs->indexdir) {
1465 ofs->config.index = false;
1466 if (ovl_upper_mnt(ofs) && ofs->config.nfs_export) {
1468 ofs->config.nfs_export = false;
1472 if (ofs->config.metacopy && ofs->config.nfs_export) {
1474 ofs->config.nfs_export = false;
1482 if (ofs->config.nfs_export)
1484 else if (!ofs->nofh)
1491 sb->s_xattr = ofs->config.userxattr ? ovl_user_xattr_handlers :
1493 sb->s_fs_info = ofs;
1509 ovl_free_fs(ofs);