1// SPDX-License-Identifier: GPL-2.0-only 2/* 3 * 4 * Copyright (C) 2011 Novell Inc. 5 */ 6 7#include <linux/fs.h> 8#include <linux/slab.h> 9#include <linux/namei.h> 10#include <linux/file.h> 11#include <linux/xattr.h> 12#include <linux/rbtree.h> 13#include <linux/security.h> 14#include <linux/cred.h> 15#include <linux/ratelimit.h> 16#include "overlayfs.h" 17 18struct ovl_cache_entry { 19 unsigned int len; 20 unsigned int type; 21 u64 real_ino; 22 u64 ino; 23 struct list_head l_node; 24 struct rb_node node; 25 struct ovl_cache_entry *next_maybe_whiteout; 26 bool is_upper; 27 bool is_whiteout; 28 char name[]; 29}; 30 31struct ovl_dir_cache { 32 long refcount; 33 u64 version; 34 struct list_head entries; 35 struct rb_root root; 36}; 37 38struct ovl_readdir_data { 39 struct dir_context ctx; 40 struct dentry *dentry; 41 bool is_lowest; 42 struct rb_root *root; 43 struct list_head *list; 44 struct list_head middle; 45 struct ovl_cache_entry *first_maybe_whiteout; 46 int count; 47 int err; 48 bool is_upper; 49 bool d_type_supported; 50}; 51 52struct ovl_dir_file { 53 bool is_real; 54 bool is_upper; 55 struct ovl_dir_cache *cache; 56 struct list_head *cursor; 57 struct file *realfile; 58 struct file *upperfile; 59}; 60 61static struct ovl_cache_entry *ovl_cache_entry_from_node(struct rb_node *n) 62{ 63 return rb_entry(n, struct ovl_cache_entry, node); 64} 65 66static bool ovl_cache_entry_find_link(const char *name, int len, 67 struct rb_node ***link, 68 struct rb_node **parent) 69{ 70 bool found = false; 71 struct rb_node **newp = *link; 72 73 while (!found && *newp) { 74 int cmp; 75 struct ovl_cache_entry *tmp; 76 77 *parent = *newp; 78 tmp = ovl_cache_entry_from_node(*newp); 79 cmp = strncmp(name, tmp->name, len); 80 if (cmp > 0) 81 newp = &tmp->node.rb_right; 82 else if (cmp < 0 || len < tmp->len) 83 newp = &tmp->node.rb_left; 84 else 85 found = true; 86 } 87 *link = newp; 88 89 return found; 90} 91 92static struct ovl_cache_entry *ovl_cache_entry_find(struct rb_root *root, 93 const char *name, int len) 94{ 95 struct rb_node *node = root->rb_node; 96 int cmp; 97 98 while (node) { 99 struct ovl_cache_entry *p = ovl_cache_entry_from_node(node); 100 101 cmp = strncmp(name, p->name, len); 102 if (cmp > 0) 103 node = p->node.rb_right; 104 else if (cmp < 0 || len < p->len) 105 node = p->node.rb_left; 106 else 107 return p; 108 } 109 110 return NULL; 111} 112 113static bool ovl_calc_d_ino(struct ovl_readdir_data *rdd, 114 struct ovl_cache_entry *p) 115{ 116 /* Don't care if not doing ovl_iter() */ 117 if (!rdd->dentry) 118 return false; 119 120 /* Always recalc d_ino when remapping lower inode numbers */ 121 if (ovl_xino_bits(rdd->dentry->d_sb)) 122 return true; 123 124 /* Always recalc d_ino for parent */ 125 if (strcmp(p->name, "..") == 0) 126 return true; 127 128 /* If this is lower, then native d_ino will do */ 129 if (!rdd->is_upper) 130 return false; 131 132 /* 133 * Recalc d_ino for '.' and for all entries if dir is impure (contains 134 * copied up entries) 135 */ 136 if ((p->name[0] == '.' && p->len == 1) || 137 ovl_test_flag(OVL_IMPURE, d_inode(rdd->dentry))) 138 return true; 139 140 return false; 141} 142 143static struct ovl_cache_entry *ovl_cache_entry_new(struct ovl_readdir_data *rdd, 144 const char *name, int len, 145 u64 ino, unsigned int d_type) 146{ 147 struct ovl_cache_entry *p; 148 size_t size = offsetof(struct ovl_cache_entry, name[len + 1]); 149 150 p = kmalloc(size, GFP_KERNEL); 151 if (!p) 152 return NULL; 153 154 memcpy(p->name, name, len); 155 p->name[len] = '\0'; 156 p->len = len; 157 p->type = d_type; 158 p->real_ino = ino; 159 p->ino = ino; 160 /* Defer setting d_ino for upper entry to ovl_iterate() */ 161 if (ovl_calc_d_ino(rdd, p)) 162 p->ino = 0; 163 p->is_upper = rdd->is_upper; 164 p->is_whiteout = false; 165 166 if (d_type == DT_CHR) { 167 p->next_maybe_whiteout = rdd->first_maybe_whiteout; 168 rdd->first_maybe_whiteout = p; 169 } 170 return p; 171} 172 173static int ovl_cache_entry_add_rb(struct ovl_readdir_data *rdd, 174 const char *name, int len, u64 ino, 175 unsigned int d_type) 176{ 177 struct rb_node **newp = &rdd->root->rb_node; 178 struct rb_node *parent = NULL; 179 struct ovl_cache_entry *p; 180 181 if (ovl_cache_entry_find_link(name, len, &newp, &parent)) 182 return 0; 183 184 p = ovl_cache_entry_new(rdd, name, len, ino, d_type); 185 if (p == NULL) { 186 rdd->err = -ENOMEM; 187 return -ENOMEM; 188 } 189 190 list_add_tail(&p->l_node, rdd->list); 191 rb_link_node(&p->node, parent, newp); 192 rb_insert_color(&p->node, rdd->root); 193 194 return 0; 195} 196 197static int ovl_fill_lowest(struct ovl_readdir_data *rdd, 198 const char *name, int namelen, 199 loff_t offset, u64 ino, unsigned int d_type) 200{ 201 struct ovl_cache_entry *p; 202 203 p = ovl_cache_entry_find(rdd->root, name, namelen); 204 if (p) { 205 list_move_tail(&p->l_node, &rdd->middle); 206 } else { 207 p = ovl_cache_entry_new(rdd, name, namelen, ino, d_type); 208 if (p == NULL) 209 rdd->err = -ENOMEM; 210 else 211 list_add_tail(&p->l_node, &rdd->middle); 212 } 213 214 return rdd->err; 215} 216 217void ovl_cache_free(struct list_head *list) 218{ 219 struct ovl_cache_entry *p; 220 struct ovl_cache_entry *n; 221 222 list_for_each_entry_safe(p, n, list, l_node) 223 kfree(p); 224 225 INIT_LIST_HEAD(list); 226} 227 228void ovl_dir_cache_free(struct inode *inode) 229{ 230 struct ovl_dir_cache *cache = ovl_dir_cache(inode); 231 232 if (cache) { 233 ovl_cache_free(&cache->entries); 234 kfree(cache); 235 } 236} 237 238static void ovl_cache_put(struct ovl_dir_file *od, struct dentry *dentry) 239{ 240 struct ovl_dir_cache *cache = od->cache; 241 242 WARN_ON(cache->refcount <= 0); 243 cache->refcount--; 244 if (!cache->refcount) { 245 if (ovl_dir_cache(d_inode(dentry)) == cache) 246 ovl_set_dir_cache(d_inode(dentry), NULL); 247 248 ovl_cache_free(&cache->entries); 249 kfree(cache); 250 } 251} 252 253static int ovl_fill_merge(struct dir_context *ctx, const char *name, 254 int namelen, loff_t offset, u64 ino, 255 unsigned int d_type) 256{ 257 struct ovl_readdir_data *rdd = 258 container_of(ctx, struct ovl_readdir_data, ctx); 259 260 rdd->count++; 261 if (!rdd->is_lowest) 262 return ovl_cache_entry_add_rb(rdd, name, namelen, ino, d_type); 263 else 264 return ovl_fill_lowest(rdd, name, namelen, offset, ino, d_type); 265} 266 267static int ovl_check_whiteouts(struct dentry *dir, struct ovl_readdir_data *rdd) 268{ 269 int err; 270 struct ovl_cache_entry *p; 271 struct dentry *dentry; 272 const struct cred *old_cred; 273 274 old_cred = ovl_override_creds(rdd->dentry->d_sb); 275 276 err = down_write_killable(&dir->d_inode->i_rwsem); 277 if (!err) { 278 while (rdd->first_maybe_whiteout) { 279 p = rdd->first_maybe_whiteout; 280 rdd->first_maybe_whiteout = p->next_maybe_whiteout; 281 dentry = lookup_one_len(p->name, dir, p->len); 282 if (!IS_ERR(dentry)) { 283 p->is_whiteout = ovl_is_whiteout(dentry); 284 dput(dentry); 285 } 286 } 287 inode_unlock(dir->d_inode); 288 } 289 revert_creds(old_cred); 290 291 return err; 292} 293 294static inline int ovl_dir_read(struct path *realpath, 295 struct ovl_readdir_data *rdd) 296{ 297 struct file *realfile; 298 int err; 299 300 realfile = ovl_path_open(realpath, O_RDONLY | O_LARGEFILE); 301 if (IS_ERR(realfile)) 302 return PTR_ERR(realfile); 303 304 rdd->first_maybe_whiteout = NULL; 305 rdd->ctx.pos = 0; 306 do { 307 rdd->count = 0; 308 rdd->err = 0; 309 err = iterate_dir(realfile, &rdd->ctx); 310 if (err >= 0) 311 err = rdd->err; 312 } while (!err && rdd->count); 313 314 if (!err && rdd->first_maybe_whiteout && rdd->dentry) 315 err = ovl_check_whiteouts(realpath->dentry, rdd); 316 317 fput(realfile); 318 319 return err; 320} 321 322static void ovl_dir_reset(struct file *file) 323{ 324 struct ovl_dir_file *od = file->private_data; 325 struct ovl_dir_cache *cache = od->cache; 326 struct dentry *dentry = file->f_path.dentry; 327 bool is_real; 328 329 if (cache && ovl_dentry_version_get(dentry) != cache->version) { 330 ovl_cache_put(od, dentry); 331 od->cache = NULL; 332 od->cursor = NULL; 333 } 334 is_real = ovl_dir_is_real(dentry); 335 if (od->is_real != is_real) { 336 /* is_real can only become false when dir is copied up */ 337 if (WARN_ON(is_real)) 338 return; 339 od->is_real = false; 340 } 341} 342 343static int ovl_dir_read_merged(struct dentry *dentry, struct list_head *list, 344 struct rb_root *root) 345{ 346 int err; 347 struct path realpath; 348 struct ovl_readdir_data rdd = { 349 .ctx.actor = ovl_fill_merge, 350 .dentry = dentry, 351 .list = list, 352 .root = root, 353 .is_lowest = false, 354 }; 355 int idx, next; 356 357 for (idx = 0; idx != -1; idx = next) { 358 next = ovl_path_next(idx, dentry, &realpath); 359 rdd.is_upper = ovl_dentry_upper(dentry) == realpath.dentry; 360 361 if (next != -1) { 362 err = ovl_dir_read(&realpath, &rdd); 363 if (err) 364 break; 365 } else { 366 /* 367 * Insert lowest layer entries before upper ones, this 368 * allows offsets to be reasonably constant 369 */ 370 list_add(&rdd.middle, rdd.list); 371 rdd.is_lowest = true; 372 err = ovl_dir_read(&realpath, &rdd); 373 list_del(&rdd.middle); 374 } 375 } 376 return err; 377} 378 379static void ovl_seek_cursor(struct ovl_dir_file *od, loff_t pos) 380{ 381 struct list_head *p; 382 loff_t off = 0; 383 384 list_for_each(p, &od->cache->entries) { 385 if (off >= pos) 386 break; 387 off++; 388 } 389 /* Cursor is safe since the cache is stable */ 390 od->cursor = p; 391} 392 393static struct ovl_dir_cache *ovl_cache_get(struct dentry *dentry) 394{ 395 int res; 396 struct ovl_dir_cache *cache; 397 398 cache = ovl_dir_cache(d_inode(dentry)); 399 if (cache && ovl_dentry_version_get(dentry) == cache->version) { 400 WARN_ON(!cache->refcount); 401 cache->refcount++; 402 return cache; 403 } 404 ovl_set_dir_cache(d_inode(dentry), NULL); 405 406 cache = kzalloc(sizeof(struct ovl_dir_cache), GFP_KERNEL); 407 if (!cache) 408 return ERR_PTR(-ENOMEM); 409 410 cache->refcount = 1; 411 INIT_LIST_HEAD(&cache->entries); 412 cache->root = RB_ROOT; 413 414 res = ovl_dir_read_merged(dentry, &cache->entries, &cache->root); 415 if (res) { 416 ovl_cache_free(&cache->entries); 417 kfree(cache); 418 return ERR_PTR(res); 419 } 420 421 cache->version = ovl_dentry_version_get(dentry); 422 ovl_set_dir_cache(d_inode(dentry), cache); 423 424 return cache; 425} 426 427/* Map inode number to lower fs unique range */ 428static u64 ovl_remap_lower_ino(u64 ino, int xinobits, int fsid, 429 const char *name, int namelen, bool warn) 430{ 431 unsigned int xinoshift = 64 - xinobits; 432 433 if (unlikely(ino >> xinoshift)) { 434 if (warn) { 435 pr_warn_ratelimited("d_ino too big (%.*s, ino=%llu, xinobits=%d)\n", 436 namelen, name, ino, xinobits); 437 } 438 return ino; 439 } 440 441 /* 442 * The lowest xinobit is reserved for mapping the non-peresistent inode 443 * numbers range, but this range is only exposed via st_ino, not here. 444 */ 445 return ino | ((u64)fsid) << (xinoshift + 1); 446} 447 448/* 449 * Set d_ino for upper entries. Non-upper entries should always report 450 * the uppermost real inode ino and should not call this function. 451 * 452 * When not all layer are on same fs, report real ino also for upper. 453 * 454 * When all layers are on the same fs, and upper has a reference to 455 * copy up origin, call vfs_getattr() on the overlay entry to make 456 * sure that d_ino will be consistent with st_ino from stat(2). 457 */ 458static int ovl_cache_update_ino(struct path *path, struct ovl_cache_entry *p) 459 460{ 461 struct dentry *dir = path->dentry; 462 struct dentry *this = NULL; 463 enum ovl_path_type type; 464 u64 ino = p->real_ino; 465 int xinobits = ovl_xino_bits(dir->d_sb); 466 int err = 0; 467 468 if (!ovl_same_dev(dir->d_sb)) 469 goto out; 470 471 if (p->name[0] == '.') { 472 if (p->len == 1) { 473 this = dget(dir); 474 goto get; 475 } 476 if (p->len == 2 && p->name[1] == '.') { 477 /* we shall not be moved */ 478 this = dget(dir->d_parent); 479 goto get; 480 } 481 } 482 this = lookup_one_len(p->name, dir, p->len); 483 if (IS_ERR_OR_NULL(this) || !this->d_inode) { 484 if (IS_ERR(this)) { 485 err = PTR_ERR(this); 486 this = NULL; 487 goto fail; 488 } 489 goto out; 490 } 491 492get: 493 type = ovl_path_type(this); 494 if (OVL_TYPE_ORIGIN(type)) { 495 struct kstat stat; 496 struct path statpath = *path; 497 498 statpath.dentry = this; 499 err = vfs_getattr(&statpath, &stat, STATX_INO, 0); 500 if (err) 501 goto fail; 502 503 /* 504 * Directory inode is always on overlay st_dev. 505 * Non-dir with ovl_same_dev() could be on pseudo st_dev in case 506 * of xino bits overflow. 507 */ 508 WARN_ON_ONCE(S_ISDIR(stat.mode) && 509 dir->d_sb->s_dev != stat.dev); 510 ino = stat.ino; 511 } else if (xinobits && !OVL_TYPE_UPPER(type)) { 512 ino = ovl_remap_lower_ino(ino, xinobits, 513 ovl_layer_lower(this)->fsid, 514 p->name, p->len, 515 ovl_xino_warn(dir->d_sb)); 516 } 517 518out: 519 p->ino = ino; 520 dput(this); 521 return err; 522 523fail: 524 pr_warn_ratelimited("failed to look up (%s) for ino (%i)\n", 525 p->name, err); 526 goto out; 527} 528 529static int ovl_fill_plain(struct dir_context *ctx, const char *name, 530 int namelen, loff_t offset, u64 ino, 531 unsigned int d_type) 532{ 533 struct ovl_cache_entry *p; 534 struct ovl_readdir_data *rdd = 535 container_of(ctx, struct ovl_readdir_data, ctx); 536 537 rdd->count++; 538 p = ovl_cache_entry_new(rdd, name, namelen, ino, d_type); 539 if (p == NULL) { 540 rdd->err = -ENOMEM; 541 return -ENOMEM; 542 } 543 list_add_tail(&p->l_node, rdd->list); 544 545 return 0; 546} 547 548static int ovl_dir_read_impure(struct path *path, struct list_head *list, 549 struct rb_root *root) 550{ 551 int err; 552 struct path realpath; 553 struct ovl_cache_entry *p, *n; 554 struct ovl_readdir_data rdd = { 555 .ctx.actor = ovl_fill_plain, 556 .list = list, 557 .root = root, 558 }; 559 560 INIT_LIST_HEAD(list); 561 *root = RB_ROOT; 562 ovl_path_upper(path->dentry, &realpath); 563 564 err = ovl_dir_read(&realpath, &rdd); 565 if (err) 566 return err; 567 568 list_for_each_entry_safe(p, n, list, l_node) { 569 if (strcmp(p->name, ".") != 0 && 570 strcmp(p->name, "..") != 0) { 571 err = ovl_cache_update_ino(path, p); 572 if (err) 573 return err; 574 } 575 if (p->ino == p->real_ino) { 576 list_del(&p->l_node); 577 kfree(p); 578 } else { 579 struct rb_node **newp = &root->rb_node; 580 struct rb_node *parent = NULL; 581 582 if (WARN_ON(ovl_cache_entry_find_link(p->name, p->len, 583 &newp, &parent))) 584 return -EIO; 585 586 rb_link_node(&p->node, parent, newp); 587 rb_insert_color(&p->node, root); 588 } 589 } 590 return 0; 591} 592 593static struct ovl_dir_cache *ovl_cache_get_impure(struct path *path) 594{ 595 int res; 596 struct dentry *dentry = path->dentry; 597 struct ovl_fs *ofs = OVL_FS(dentry->d_sb); 598 struct ovl_dir_cache *cache; 599 600 cache = ovl_dir_cache(d_inode(dentry)); 601 if (cache && ovl_dentry_version_get(dentry) == cache->version) 602 return cache; 603 604 /* Impure cache is not refcounted, free it here */ 605 ovl_dir_cache_free(d_inode(dentry)); 606 ovl_set_dir_cache(d_inode(dentry), NULL); 607 608 cache = kzalloc(sizeof(struct ovl_dir_cache), GFP_KERNEL); 609 if (!cache) 610 return ERR_PTR(-ENOMEM); 611 612 res = ovl_dir_read_impure(path, &cache->entries, &cache->root); 613 if (res) { 614 ovl_cache_free(&cache->entries); 615 kfree(cache); 616 return ERR_PTR(res); 617 } 618 if (list_empty(&cache->entries)) { 619 /* 620 * A good opportunity to get rid of an unneeded "impure" flag. 621 * Removing the "impure" xattr is best effort. 622 */ 623 if (!ovl_want_write(dentry)) { 624 ovl_do_removexattr(ofs, ovl_dentry_upper(dentry), 625 OVL_XATTR_IMPURE); 626 ovl_drop_write(dentry); 627 } 628 ovl_clear_flag(OVL_IMPURE, d_inode(dentry)); 629 kfree(cache); 630 return NULL; 631 } 632 633 cache->version = ovl_dentry_version_get(dentry); 634 ovl_set_dir_cache(d_inode(dentry), cache); 635 636 return cache; 637} 638 639struct ovl_readdir_translate { 640 struct dir_context *orig_ctx; 641 struct ovl_dir_cache *cache; 642 struct dir_context ctx; 643 u64 parent_ino; 644 int fsid; 645 int xinobits; 646 bool xinowarn; 647}; 648 649static int ovl_fill_real(struct dir_context *ctx, const char *name, 650 int namelen, loff_t offset, u64 ino, 651 unsigned int d_type) 652{ 653 struct ovl_readdir_translate *rdt = 654 container_of(ctx, struct ovl_readdir_translate, ctx); 655 struct dir_context *orig_ctx = rdt->orig_ctx; 656 657 if (rdt->parent_ino && strcmp(name, "..") == 0) { 658 ino = rdt->parent_ino; 659 } else if (rdt->cache) { 660 struct ovl_cache_entry *p; 661 662 p = ovl_cache_entry_find(&rdt->cache->root, name, namelen); 663 if (p) 664 ino = p->ino; 665 } else if (rdt->xinobits) { 666 ino = ovl_remap_lower_ino(ino, rdt->xinobits, rdt->fsid, 667 name, namelen, rdt->xinowarn); 668 } 669 670 return orig_ctx->actor(orig_ctx, name, namelen, offset, ino, d_type); 671} 672 673static bool ovl_is_impure_dir(struct file *file) 674{ 675 struct ovl_dir_file *od = file->private_data; 676 struct inode *dir = d_inode(file->f_path.dentry); 677 678 /* 679 * Only upper dir can be impure, but if we are in the middle of 680 * iterating a lower real dir, dir could be copied up and marked 681 * impure. We only want the impure cache if we started iterating 682 * a real upper dir to begin with. 683 */ 684 return od->is_upper && ovl_test_flag(OVL_IMPURE, dir); 685 686} 687 688static int ovl_iterate_real(struct file *file, struct dir_context *ctx) 689{ 690 int err; 691 struct ovl_dir_file *od = file->private_data; 692 struct dentry *dir = file->f_path.dentry; 693 const struct ovl_layer *lower_layer = ovl_layer_lower(dir); 694 struct ovl_readdir_translate rdt = { 695 .ctx.actor = ovl_fill_real, 696 .orig_ctx = ctx, 697 .xinobits = ovl_xino_bits(dir->d_sb), 698 .xinowarn = ovl_xino_warn(dir->d_sb), 699 }; 700 701 if (rdt.xinobits && lower_layer) 702 rdt.fsid = lower_layer->fsid; 703 704 if (OVL_TYPE_MERGE(ovl_path_type(dir->d_parent))) { 705 struct kstat stat; 706 struct path statpath = file->f_path; 707 708 statpath.dentry = dir->d_parent; 709 err = vfs_getattr(&statpath, &stat, STATX_INO, 0); 710 if (err) 711 return err; 712 713 WARN_ON_ONCE(dir->d_sb->s_dev != stat.dev); 714 rdt.parent_ino = stat.ino; 715 } 716 717 if (ovl_is_impure_dir(file)) { 718 rdt.cache = ovl_cache_get_impure(&file->f_path); 719 if (IS_ERR(rdt.cache)) 720 return PTR_ERR(rdt.cache); 721 } 722 723 err = iterate_dir(od->realfile, &rdt.ctx); 724 ctx->pos = rdt.ctx.pos; 725 726 return err; 727} 728 729 730static int ovl_iterate(struct file *file, struct dir_context *ctx) 731{ 732 struct ovl_dir_file *od = file->private_data; 733 struct dentry *dentry = file->f_path.dentry; 734 struct ovl_cache_entry *p; 735 const struct cred *old_cred; 736 int err; 737 738 old_cred = ovl_override_creds(dentry->d_sb); 739 if (!ctx->pos) 740 ovl_dir_reset(file); 741 742 if (od->is_real) { 743 /* 744 * If parent is merge, then need to adjust d_ino for '..', if 745 * dir is impure then need to adjust d_ino for copied up 746 * entries. 747 */ 748 if (ovl_xino_bits(dentry->d_sb) || 749 (ovl_same_fs(dentry->d_sb) && 750 (ovl_is_impure_dir(file) || 751 OVL_TYPE_MERGE(ovl_path_type(dentry->d_parent))))) { 752 err = ovl_iterate_real(file, ctx); 753 } else { 754 err = iterate_dir(od->realfile, ctx); 755 } 756 goto out; 757 } 758 759 if (!od->cache) { 760 struct ovl_dir_cache *cache; 761 762 cache = ovl_cache_get(dentry); 763 err = PTR_ERR(cache); 764 if (IS_ERR(cache)) 765 goto out; 766 767 od->cache = cache; 768 ovl_seek_cursor(od, ctx->pos); 769 } 770 771 while (od->cursor != &od->cache->entries) { 772 p = list_entry(od->cursor, struct ovl_cache_entry, l_node); 773 if (!p->is_whiteout) { 774 if (!p->ino) { 775 err = ovl_cache_update_ino(&file->f_path, p); 776 if (err) 777 goto out; 778 } 779 if (!dir_emit(ctx, p->name, p->len, p->ino, p->type)) 780 break; 781 } 782 od->cursor = p->l_node.next; 783 ctx->pos++; 784 } 785 err = 0; 786out: 787 revert_creds(old_cred); 788 return err; 789} 790 791static loff_t ovl_dir_llseek(struct file *file, loff_t offset, int origin) 792{ 793 loff_t res; 794 struct ovl_dir_file *od = file->private_data; 795 796 inode_lock(file_inode(file)); 797 if (!file->f_pos) 798 ovl_dir_reset(file); 799 800 if (od->is_real) { 801 res = vfs_llseek(od->realfile, offset, origin); 802 file->f_pos = od->realfile->f_pos; 803 } else { 804 res = -EINVAL; 805 806 switch (origin) { 807 case SEEK_CUR: 808 offset += file->f_pos; 809 break; 810 case SEEK_SET: 811 break; 812 default: 813 goto out_unlock; 814 } 815 if (offset < 0) 816 goto out_unlock; 817 818 if (offset != file->f_pos) { 819 file->f_pos = offset; 820 if (od->cache) 821 ovl_seek_cursor(od, offset); 822 } 823 res = offset; 824 } 825out_unlock: 826 inode_unlock(file_inode(file)); 827 828 return res; 829} 830 831static struct file *ovl_dir_open_realfile(const struct file *file, 832 struct path *realpath) 833{ 834 struct file *res; 835 const struct cred *old_cred; 836 837 old_cred = ovl_override_creds(file_inode(file)->i_sb); 838 res = ovl_path_open(realpath, O_RDONLY | (file->f_flags & O_LARGEFILE)); 839 revert_creds(old_cred); 840 841 return res; 842} 843 844/* 845 * Like ovl_real_fdget(), returns upperfile if dir was copied up since open. 846 * Unlike ovl_real_fdget(), this caches upperfile in file->private_data. 847 * 848 * TODO: use same abstract type for file->private_data of dir and file so 849 * upperfile could also be cached for files as well. 850 */ 851struct file *ovl_dir_real_file(const struct file *file, bool want_upper) 852{ 853 854 struct ovl_dir_file *od = file->private_data; 855 struct dentry *dentry = file->f_path.dentry; 856 struct file *old, *realfile = od->realfile; 857 858 if (!OVL_TYPE_UPPER(ovl_path_type(dentry))) 859 return want_upper ? NULL : realfile; 860 861 /* 862 * Need to check if we started out being a lower dir, but got copied up 863 */ 864 if (!od->is_upper) { 865 realfile = READ_ONCE(od->upperfile); 866 if (!realfile) { 867 struct path upperpath; 868 869 ovl_path_upper(dentry, &upperpath); 870 realfile = ovl_dir_open_realfile(file, &upperpath); 871 if (IS_ERR(realfile)) 872 return realfile; 873 874 old = cmpxchg_release(&od->upperfile, NULL, realfile); 875 if (old) { 876 fput(realfile); 877 realfile = old; 878 } 879 } 880 } 881 882 return realfile; 883} 884 885static int ovl_dir_fsync(struct file *file, loff_t start, loff_t end, 886 int datasync) 887{ 888 struct file *realfile; 889 int err; 890 891 err = ovl_sync_status(OVL_FS(file->f_path.dentry->d_sb)); 892 if (err <= 0) 893 return err; 894 895 realfile = ovl_dir_real_file(file, true); 896 err = PTR_ERR_OR_ZERO(realfile); 897 898 /* Nothing to sync for lower */ 899 if (!realfile || err) 900 return err; 901 902 return vfs_fsync_range(realfile, start, end, datasync); 903} 904 905static int ovl_dir_release(struct inode *inode, struct file *file) 906{ 907 struct ovl_dir_file *od = file->private_data; 908 909 if (od->cache) { 910 inode_lock(inode); 911 ovl_cache_put(od, file->f_path.dentry); 912 inode_unlock(inode); 913 } 914 fput(od->realfile); 915 if (od->upperfile) 916 fput(od->upperfile); 917 kfree(od); 918 919 return 0; 920} 921 922static int ovl_dir_open(struct inode *inode, struct file *file) 923{ 924 struct path realpath; 925 struct file *realfile; 926 struct ovl_dir_file *od; 927 enum ovl_path_type type; 928 929 od = kzalloc(sizeof(struct ovl_dir_file), GFP_KERNEL); 930 if (!od) 931 return -ENOMEM; 932 933 type = ovl_path_real(file->f_path.dentry, &realpath); 934 realfile = ovl_dir_open_realfile(file, &realpath); 935 if (IS_ERR(realfile)) { 936 kfree(od); 937 return PTR_ERR(realfile); 938 } 939 od->realfile = realfile; 940 od->is_real = ovl_dir_is_real(file->f_path.dentry); 941 od->is_upper = OVL_TYPE_UPPER(type); 942 file->private_data = od; 943 944 return 0; 945} 946 947const struct file_operations ovl_dir_operations = { 948 .read = generic_read_dir, 949 .open = ovl_dir_open, 950 .iterate = ovl_iterate, 951 .llseek = ovl_dir_llseek, 952 .fsync = ovl_dir_fsync, 953 .release = ovl_dir_release, 954 .unlocked_ioctl = ovl_ioctl, 955#ifdef CONFIG_COMPAT 956 .compat_ioctl = ovl_compat_ioctl, 957#endif 958}; 959 960int ovl_check_empty_dir(struct dentry *dentry, struct list_head *list) 961{ 962 int err; 963 struct ovl_cache_entry *p, *n; 964 struct rb_root root = RB_ROOT; 965 const struct cred *old_cred; 966 967 old_cred = ovl_override_creds(dentry->d_sb); 968 err = ovl_dir_read_merged(dentry, list, &root); 969 revert_creds(old_cred); 970 if (err) 971 return err; 972 973 err = 0; 974 975 list_for_each_entry_safe(p, n, list, l_node) { 976 /* 977 * Select whiteouts in upperdir, they should 978 * be cleared when deleting this directory. 979 */ 980 if (p->is_whiteout) { 981 if (p->is_upper) 982 continue; 983 goto del_entry; 984 } 985 986 if (p->name[0] == '.') { 987 if (p->len == 1) 988 goto del_entry; 989 if (p->len == 2 && p->name[1] == '.') 990 goto del_entry; 991 } 992 err = -ENOTEMPTY; 993 break; 994 995del_entry: 996 list_del(&p->l_node); 997 kfree(p); 998 } 999 1000 return err; 1001} 1002 1003void ovl_cleanup_whiteouts(struct dentry *upper, struct list_head *list) 1004{ 1005 struct ovl_cache_entry *p; 1006 1007 inode_lock_nested(upper->d_inode, I_MUTEX_CHILD); 1008 list_for_each_entry(p, list, l_node) { 1009 struct dentry *dentry; 1010 1011 if (WARN_ON(!p->is_whiteout || !p->is_upper)) 1012 continue; 1013 1014 dentry = lookup_one_len(p->name, upper, p->len); 1015 if (IS_ERR(dentry)) { 1016 pr_err("lookup '%s/%.*s' failed (%i)\n", 1017 upper->d_name.name, p->len, p->name, 1018 (int) PTR_ERR(dentry)); 1019 continue; 1020 } 1021 if (dentry->d_inode) 1022 ovl_cleanup(upper->d_inode, dentry); 1023 dput(dentry); 1024 } 1025 inode_unlock(upper->d_inode); 1026} 1027 1028static int ovl_check_d_type(struct dir_context *ctx, const char *name, 1029 int namelen, loff_t offset, u64 ino, 1030 unsigned int d_type) 1031{ 1032 struct ovl_readdir_data *rdd = 1033 container_of(ctx, struct ovl_readdir_data, ctx); 1034 1035 /* Even if d_type is not supported, DT_DIR is returned for . and .. */ 1036 if (!strncmp(name, ".", namelen) || !strncmp(name, "..", namelen)) 1037 return 0; 1038 1039 if (d_type != DT_UNKNOWN) 1040 rdd->d_type_supported = true; 1041 1042 return 0; 1043} 1044 1045/* 1046 * Returns 1 if d_type is supported, 0 not supported/unknown. Negative values 1047 * if error is encountered. 1048 */ 1049int ovl_check_d_type_supported(struct path *realpath) 1050{ 1051 int err; 1052 struct ovl_readdir_data rdd = { 1053 .ctx.actor = ovl_check_d_type, 1054 .d_type_supported = false, 1055 }; 1056 1057 err = ovl_dir_read(realpath, &rdd); 1058 if (err) 1059 return err; 1060 1061 return rdd.d_type_supported; 1062} 1063 1064#define OVL_INCOMPATDIR_NAME "incompat" 1065 1066static int ovl_workdir_cleanup_recurse(struct path *path, int level) 1067{ 1068 int err; 1069 struct inode *dir = path->dentry->d_inode; 1070 LIST_HEAD(list); 1071 struct rb_root root = RB_ROOT; 1072 struct ovl_cache_entry *p; 1073 struct ovl_readdir_data rdd = { 1074 .ctx.actor = ovl_fill_merge, 1075 .dentry = NULL, 1076 .list = &list, 1077 .root = &root, 1078 .is_lowest = false, 1079 }; 1080 bool incompat = false; 1081 1082 /* 1083 * The "work/incompat" directory is treated specially - if it is not 1084 * empty, instead of printing a generic error and mounting read-only, 1085 * we will error about incompat features and fail the mount. 1086 * 1087 * When called from ovl_indexdir_cleanup(), path->dentry->d_name.name 1088 * starts with '#'. 1089 */ 1090 if (level == 2 && 1091 !strcmp(path->dentry->d_name.name, OVL_INCOMPATDIR_NAME)) 1092 incompat = true; 1093 1094 err = ovl_dir_read(path, &rdd); 1095 if (err) 1096 goto out; 1097 1098 inode_lock_nested(dir, I_MUTEX_PARENT); 1099 list_for_each_entry(p, &list, l_node) { 1100 struct dentry *dentry; 1101 1102 if (p->name[0] == '.') { 1103 if (p->len == 1) 1104 continue; 1105 if (p->len == 2 && p->name[1] == '.') 1106 continue; 1107 } else if (incompat) { 1108 pr_err("overlay with incompat feature '%s' cannot be mounted\n", 1109 p->name); 1110 err = -EINVAL; 1111 break; 1112 } 1113 dentry = lookup_one_len(p->name, path->dentry, p->len); 1114 if (IS_ERR(dentry)) 1115 continue; 1116 if (dentry->d_inode) 1117 err = ovl_workdir_cleanup(dir, path->mnt, dentry, level); 1118 dput(dentry); 1119 if (err) 1120 break; 1121 } 1122 inode_unlock(dir); 1123out: 1124 ovl_cache_free(&list); 1125 return err; 1126} 1127 1128int ovl_workdir_cleanup(struct inode *dir, struct vfsmount *mnt, 1129 struct dentry *dentry, int level) 1130{ 1131 int err; 1132 1133 if (!d_is_dir(dentry) || level > 1) { 1134 return ovl_cleanup(dir, dentry); 1135 } 1136 1137 err = ovl_do_rmdir(dir, dentry); 1138 if (err) { 1139 struct path path = { .mnt = mnt, .dentry = dentry }; 1140 1141 inode_unlock(dir); 1142 err = ovl_workdir_cleanup_recurse(&path, level + 1); 1143 inode_lock_nested(dir, I_MUTEX_PARENT); 1144 if (!err) 1145 err = ovl_cleanup(dir, dentry); 1146 } 1147 1148 return err; 1149} 1150 1151int ovl_indexdir_cleanup(struct ovl_fs *ofs) 1152{ 1153 int err; 1154 struct dentry *indexdir = ofs->indexdir; 1155 struct dentry *index = NULL; 1156 struct inode *dir = indexdir->d_inode; 1157 struct path path = { .mnt = ovl_upper_mnt(ofs), .dentry = indexdir }; 1158 LIST_HEAD(list); 1159 struct rb_root root = RB_ROOT; 1160 struct ovl_cache_entry *p; 1161 struct ovl_readdir_data rdd = { 1162 .ctx.actor = ovl_fill_merge, 1163 .dentry = NULL, 1164 .list = &list, 1165 .root = &root, 1166 .is_lowest = false, 1167 }; 1168 1169 err = ovl_dir_read(&path, &rdd); 1170 if (err) 1171 goto out; 1172 1173 inode_lock_nested(dir, I_MUTEX_PARENT); 1174 list_for_each_entry(p, &list, l_node) { 1175 if (p->name[0] == '.') { 1176 if (p->len == 1) 1177 continue; 1178 if (p->len == 2 && p->name[1] == '.') 1179 continue; 1180 } 1181 index = lookup_one_len(p->name, indexdir, p->len); 1182 if (IS_ERR(index)) { 1183 err = PTR_ERR(index); 1184 index = NULL; 1185 break; 1186 } 1187 /* Cleanup leftover from index create/cleanup attempt */ 1188 if (index->d_name.name[0] == '#') { 1189 err = ovl_workdir_cleanup(dir, path.mnt, index, 1); 1190 if (err) 1191 break; 1192 goto next; 1193 } 1194 err = ovl_verify_index(ofs, index); 1195 if (!err) { 1196 goto next; 1197 } else if (err == -ESTALE) { 1198 /* Cleanup stale index entries */ 1199 err = ovl_cleanup(dir, index); 1200 } else if (err != -ENOENT) { 1201 /* 1202 * Abort mount to avoid corrupting the index if 1203 * an incompatible index entry was found or on out 1204 * of memory. 1205 */ 1206 break; 1207 } else if (ofs->config.nfs_export) { 1208 /* 1209 * Whiteout orphan index to block future open by 1210 * handle after overlay nlink dropped to zero. 1211 */ 1212 err = ovl_cleanup_and_whiteout(ofs, dir, index); 1213 } else { 1214 /* Cleanup orphan index entries */ 1215 err = ovl_cleanup(dir, index); 1216 } 1217 1218 if (err) 1219 break; 1220 1221next: 1222 dput(index); 1223 index = NULL; 1224 } 1225 dput(index); 1226 inode_unlock(dir); 1227out: 1228 ovl_cache_free(&list); 1229 if (err) 1230 pr_err("failed index dir cleanup (%i)\n", err); 1231 return err; 1232} 1233