1// SPDX-License-Identifier: GPL-2.0-only 2/* 3 * vfsv0 quota IO operations on file 4 */ 5 6#include <linux/errno.h> 7#include <linux/fs.h> 8#include <linux/mount.h> 9#include <linux/dqblk_v2.h> 10#include <linux/kernel.h> 11#include <linux/init.h> 12#include <linux/module.h> 13#include <linux/slab.h> 14#include <linux/quotaops.h> 15 16#include <asm/byteorder.h> 17 18#include "quota_tree.h" 19 20MODULE_AUTHOR("Jan Kara"); 21MODULE_DESCRIPTION("Quota trie support"); 22MODULE_LICENSE("GPL"); 23 24#define __QUOTA_QT_PARANOIA 25 26static int __get_index(struct qtree_mem_dqinfo *info, qid_t id, int depth) 27{ 28 unsigned int epb = info->dqi_usable_bs >> 2; 29 30 depth = info->dqi_qtree_depth - depth - 1; 31 while (depth--) 32 id /= epb; 33 return id % epb; 34} 35 36static int get_index(struct qtree_mem_dqinfo *info, struct kqid qid, int depth) 37{ 38 qid_t id = from_kqid(&init_user_ns, qid); 39 40 return __get_index(info, id, depth); 41} 42 43/* Number of entries in one blocks */ 44static int qtree_dqstr_in_blk(struct qtree_mem_dqinfo *info) 45{ 46 return (info->dqi_usable_bs - sizeof(struct qt_disk_dqdbheader)) 47 / info->dqi_entry_size; 48} 49 50static char *getdqbuf(size_t size) 51{ 52 char *buf = kmalloc(size, GFP_NOFS); 53 if (!buf) 54 printk(KERN_WARNING 55 "VFS: Not enough memory for quota buffers.\n"); 56 return buf; 57} 58 59static ssize_t read_blk(struct qtree_mem_dqinfo *info, uint blk, char *buf) 60{ 61 struct super_block *sb = info->dqi_sb; 62 63 memset(buf, 0, info->dqi_usable_bs); 64 return sb->s_op->quota_read(sb, info->dqi_type, buf, 65 info->dqi_usable_bs, (loff_t)blk << info->dqi_blocksize_bits); 66} 67 68static ssize_t write_blk(struct qtree_mem_dqinfo *info, uint blk, char *buf) 69{ 70 struct super_block *sb = info->dqi_sb; 71 ssize_t ret; 72 73 ret = sb->s_op->quota_write(sb, info->dqi_type, buf, 74 info->dqi_usable_bs, (loff_t)blk << info->dqi_blocksize_bits); 75 if (ret != info->dqi_usable_bs) { 76 quota_error(sb, "dquota write failed"); 77 if (ret >= 0) 78 ret = -EIO; 79 } 80 return ret; 81} 82 83static inline int do_check_range(struct super_block *sb, const char *val_name, 84 uint val, uint min_val, uint max_val) 85{ 86 if (val < min_val || val > max_val) { 87 quota_error(sb, "Getting %s %u out of range %u-%u", 88 val_name, val, min_val, max_val); 89 return -EUCLEAN; 90 } 91 92 return 0; 93} 94 95static int check_dquot_block_header(struct qtree_mem_dqinfo *info, 96 struct qt_disk_dqdbheader *dh) 97{ 98 int err = 0; 99 100 err = do_check_range(info->dqi_sb, "dqdh_next_free", 101 le32_to_cpu(dh->dqdh_next_free), 0, 102 info->dqi_blocks - 1); 103 if (err) 104 return err; 105 err = do_check_range(info->dqi_sb, "dqdh_prev_free", 106 le32_to_cpu(dh->dqdh_prev_free), 0, 107 info->dqi_blocks - 1); 108 109 return err; 110} 111 112/* Remove empty block from list and return it */ 113static int get_free_dqblk(struct qtree_mem_dqinfo *info) 114{ 115 char *buf = getdqbuf(info->dqi_usable_bs); 116 struct qt_disk_dqdbheader *dh = (struct qt_disk_dqdbheader *)buf; 117 int ret, blk; 118 119 if (!buf) 120 return -ENOMEM; 121 if (info->dqi_free_blk) { 122 blk = info->dqi_free_blk; 123 ret = read_blk(info, blk, buf); 124 if (ret < 0) 125 goto out_buf; 126 ret = check_dquot_block_header(info, dh); 127 if (ret) 128 goto out_buf; 129 info->dqi_free_blk = le32_to_cpu(dh->dqdh_next_free); 130 } 131 else { 132 memset(buf, 0, info->dqi_usable_bs); 133 /* Assure block allocation... */ 134 ret = write_blk(info, info->dqi_blocks, buf); 135 if (ret < 0) 136 goto out_buf; 137 blk = info->dqi_blocks++; 138 } 139 mark_info_dirty(info->dqi_sb, info->dqi_type); 140 ret = blk; 141out_buf: 142 kfree(buf); 143 return ret; 144} 145 146/* Insert empty block to the list */ 147static int put_free_dqblk(struct qtree_mem_dqinfo *info, char *buf, uint blk) 148{ 149 struct qt_disk_dqdbheader *dh = (struct qt_disk_dqdbheader *)buf; 150 int err; 151 152 dh->dqdh_next_free = cpu_to_le32(info->dqi_free_blk); 153 dh->dqdh_prev_free = cpu_to_le32(0); 154 dh->dqdh_entries = cpu_to_le16(0); 155 err = write_blk(info, blk, buf); 156 if (err < 0) 157 return err; 158 info->dqi_free_blk = blk; 159 mark_info_dirty(info->dqi_sb, info->dqi_type); 160 return 0; 161} 162 163/* Remove given block from the list of blocks with free entries */ 164static int remove_free_dqentry(struct qtree_mem_dqinfo *info, char *buf, 165 uint blk) 166{ 167 char *tmpbuf = getdqbuf(info->dqi_usable_bs); 168 struct qt_disk_dqdbheader *dh = (struct qt_disk_dqdbheader *)buf; 169 uint nextblk = le32_to_cpu(dh->dqdh_next_free); 170 uint prevblk = le32_to_cpu(dh->dqdh_prev_free); 171 int err; 172 173 if (!tmpbuf) 174 return -ENOMEM; 175 if (nextblk) { 176 err = read_blk(info, nextblk, tmpbuf); 177 if (err < 0) 178 goto out_buf; 179 ((struct qt_disk_dqdbheader *)tmpbuf)->dqdh_prev_free = 180 dh->dqdh_prev_free; 181 err = write_blk(info, nextblk, tmpbuf); 182 if (err < 0) 183 goto out_buf; 184 } 185 if (prevblk) { 186 err = read_blk(info, prevblk, tmpbuf); 187 if (err < 0) 188 goto out_buf; 189 ((struct qt_disk_dqdbheader *)tmpbuf)->dqdh_next_free = 190 dh->dqdh_next_free; 191 err = write_blk(info, prevblk, tmpbuf); 192 if (err < 0) 193 goto out_buf; 194 } else { 195 info->dqi_free_entry = nextblk; 196 mark_info_dirty(info->dqi_sb, info->dqi_type); 197 } 198 kfree(tmpbuf); 199 dh->dqdh_next_free = dh->dqdh_prev_free = cpu_to_le32(0); 200 /* No matter whether write succeeds block is out of list */ 201 if (write_blk(info, blk, buf) < 0) 202 quota_error(info->dqi_sb, "Can't write block (%u) " 203 "with free entries", blk); 204 return 0; 205out_buf: 206 kfree(tmpbuf); 207 return err; 208} 209 210/* Insert given block to the beginning of list with free entries */ 211static int insert_free_dqentry(struct qtree_mem_dqinfo *info, char *buf, 212 uint blk) 213{ 214 char *tmpbuf = getdqbuf(info->dqi_usable_bs); 215 struct qt_disk_dqdbheader *dh = (struct qt_disk_dqdbheader *)buf; 216 int err; 217 218 if (!tmpbuf) 219 return -ENOMEM; 220 dh->dqdh_next_free = cpu_to_le32(info->dqi_free_entry); 221 dh->dqdh_prev_free = cpu_to_le32(0); 222 err = write_blk(info, blk, buf); 223 if (err < 0) 224 goto out_buf; 225 if (info->dqi_free_entry) { 226 err = read_blk(info, info->dqi_free_entry, tmpbuf); 227 if (err < 0) 228 goto out_buf; 229 ((struct qt_disk_dqdbheader *)tmpbuf)->dqdh_prev_free = 230 cpu_to_le32(blk); 231 err = write_blk(info, info->dqi_free_entry, tmpbuf); 232 if (err < 0) 233 goto out_buf; 234 } 235 kfree(tmpbuf); 236 info->dqi_free_entry = blk; 237 mark_info_dirty(info->dqi_sb, info->dqi_type); 238 return 0; 239out_buf: 240 kfree(tmpbuf); 241 return err; 242} 243 244/* Is the entry in the block free? */ 245int qtree_entry_unused(struct qtree_mem_dqinfo *info, char *disk) 246{ 247 int i; 248 249 for (i = 0; i < info->dqi_entry_size; i++) 250 if (disk[i]) 251 return 0; 252 return 1; 253} 254EXPORT_SYMBOL(qtree_entry_unused); 255 256/* Find space for dquot */ 257static uint find_free_dqentry(struct qtree_mem_dqinfo *info, 258 struct dquot *dquot, int *err) 259{ 260 uint blk, i; 261 struct qt_disk_dqdbheader *dh; 262 char *buf = getdqbuf(info->dqi_usable_bs); 263 char *ddquot; 264 265 *err = 0; 266 if (!buf) { 267 *err = -ENOMEM; 268 return 0; 269 } 270 dh = (struct qt_disk_dqdbheader *)buf; 271 if (info->dqi_free_entry) { 272 blk = info->dqi_free_entry; 273 *err = read_blk(info, blk, buf); 274 if (*err < 0) 275 goto out_buf; 276 *err = check_dquot_block_header(info, dh); 277 if (*err) 278 goto out_buf; 279 } else { 280 blk = get_free_dqblk(info); 281 if ((int)blk < 0) { 282 *err = blk; 283 kfree(buf); 284 return 0; 285 } 286 memset(buf, 0, info->dqi_usable_bs); 287 /* This is enough as the block is already zeroed and the entry 288 * list is empty... */ 289 info->dqi_free_entry = blk; 290 mark_info_dirty(dquot->dq_sb, dquot->dq_id.type); 291 } 292 /* Block will be full? */ 293 if (le16_to_cpu(dh->dqdh_entries) + 1 >= qtree_dqstr_in_blk(info)) { 294 *err = remove_free_dqentry(info, buf, blk); 295 if (*err < 0) { 296 quota_error(dquot->dq_sb, "Can't remove block (%u) " 297 "from entry free list", blk); 298 goto out_buf; 299 } 300 } 301 le16_add_cpu(&dh->dqdh_entries, 1); 302 /* Find free structure in block */ 303 ddquot = buf + sizeof(struct qt_disk_dqdbheader); 304 for (i = 0; i < qtree_dqstr_in_blk(info); i++) { 305 if (qtree_entry_unused(info, ddquot)) 306 break; 307 ddquot += info->dqi_entry_size; 308 } 309#ifdef __QUOTA_QT_PARANOIA 310 if (i == qtree_dqstr_in_blk(info)) { 311 quota_error(dquot->dq_sb, "Data block full but it shouldn't"); 312 *err = -EIO; 313 goto out_buf; 314 } 315#endif 316 *err = write_blk(info, blk, buf); 317 if (*err < 0) { 318 quota_error(dquot->dq_sb, "Can't write quota data block %u", 319 blk); 320 goto out_buf; 321 } 322 dquot->dq_off = ((loff_t)blk << info->dqi_blocksize_bits) + 323 sizeof(struct qt_disk_dqdbheader) + 324 i * info->dqi_entry_size; 325 kfree(buf); 326 return blk; 327out_buf: 328 kfree(buf); 329 return 0; 330} 331 332/* Insert reference to structure into the trie */ 333static int do_insert_tree(struct qtree_mem_dqinfo *info, struct dquot *dquot, 334 uint *treeblk, int depth) 335{ 336 char *buf = getdqbuf(info->dqi_usable_bs); 337 int ret = 0, newson = 0, newact = 0; 338 __le32 *ref; 339 uint newblk; 340 341 if (!buf) 342 return -ENOMEM; 343 if (!*treeblk) { 344 ret = get_free_dqblk(info); 345 if (ret < 0) 346 goto out_buf; 347 *treeblk = ret; 348 memset(buf, 0, info->dqi_usable_bs); 349 newact = 1; 350 } else { 351 ret = read_blk(info, *treeblk, buf); 352 if (ret < 0) { 353 quota_error(dquot->dq_sb, "Can't read tree quota " 354 "block %u", *treeblk); 355 goto out_buf; 356 } 357 } 358 ref = (__le32 *)buf; 359 newblk = le32_to_cpu(ref[get_index(info, dquot->dq_id, depth)]); 360 if (!newblk) 361 newson = 1; 362 if (depth == info->dqi_qtree_depth - 1) { 363#ifdef __QUOTA_QT_PARANOIA 364 if (newblk) { 365 quota_error(dquot->dq_sb, "Inserting already present " 366 "quota entry (block %u)", 367 le32_to_cpu(ref[get_index(info, 368 dquot->dq_id, depth)])); 369 ret = -EIO; 370 goto out_buf; 371 } 372#endif 373 newblk = find_free_dqentry(info, dquot, &ret); 374 } else { 375 ret = do_insert_tree(info, dquot, &newblk, depth+1); 376 } 377 if (newson && ret >= 0) { 378 ref[get_index(info, dquot->dq_id, depth)] = 379 cpu_to_le32(newblk); 380 ret = write_blk(info, *treeblk, buf); 381 } else if (newact && ret < 0) { 382 put_free_dqblk(info, buf, *treeblk); 383 } 384out_buf: 385 kfree(buf); 386 return ret; 387} 388 389/* Wrapper for inserting quota structure into tree */ 390static inline int dq_insert_tree(struct qtree_mem_dqinfo *info, 391 struct dquot *dquot) 392{ 393 int tmp = QT_TREEOFF; 394 395#ifdef __QUOTA_QT_PARANOIA 396 if (info->dqi_blocks <= QT_TREEOFF) { 397 quota_error(dquot->dq_sb, "Quota tree root isn't allocated!"); 398 return -EIO; 399 } 400#endif 401 return do_insert_tree(info, dquot, &tmp, 0); 402} 403 404/* 405 * We don't have to be afraid of deadlocks as we never have quotas on quota 406 * files... 407 */ 408int qtree_write_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot) 409{ 410 int type = dquot->dq_id.type; 411 struct super_block *sb = dquot->dq_sb; 412 ssize_t ret; 413 char *ddquot = getdqbuf(info->dqi_entry_size); 414 415 if (!ddquot) 416 return -ENOMEM; 417 418 /* dq_off is guarded by dqio_sem */ 419 if (!dquot->dq_off) { 420 ret = dq_insert_tree(info, dquot); 421 if (ret < 0) { 422 quota_error(sb, "Error %zd occurred while creating " 423 "quota", ret); 424 kfree(ddquot); 425 return ret; 426 } 427 } 428 spin_lock(&dquot->dq_dqb_lock); 429 info->dqi_ops->mem2disk_dqblk(ddquot, dquot); 430 spin_unlock(&dquot->dq_dqb_lock); 431 ret = sb->s_op->quota_write(sb, type, ddquot, info->dqi_entry_size, 432 dquot->dq_off); 433 if (ret != info->dqi_entry_size) { 434 quota_error(sb, "dquota write failed"); 435 if (ret >= 0) 436 ret = -ENOSPC; 437 } else { 438 ret = 0; 439 } 440 dqstats_inc(DQST_WRITES); 441 kfree(ddquot); 442 443 return ret; 444} 445EXPORT_SYMBOL(qtree_write_dquot); 446 447/* Free dquot entry in data block */ 448static int free_dqentry(struct qtree_mem_dqinfo *info, struct dquot *dquot, 449 uint blk) 450{ 451 struct qt_disk_dqdbheader *dh; 452 char *buf = getdqbuf(info->dqi_usable_bs); 453 int ret = 0; 454 455 if (!buf) 456 return -ENOMEM; 457 if (dquot->dq_off >> info->dqi_blocksize_bits != blk) { 458 quota_error(dquot->dq_sb, "Quota structure has offset to " 459 "other block (%u) than it should (%u)", blk, 460 (uint)(dquot->dq_off >> info->dqi_blocksize_bits)); 461 ret = -EIO; 462 goto out_buf; 463 } 464 ret = read_blk(info, blk, buf); 465 if (ret < 0) { 466 quota_error(dquot->dq_sb, "Can't read quota data block %u", 467 blk); 468 goto out_buf; 469 } 470 dh = (struct qt_disk_dqdbheader *)buf; 471 ret = check_dquot_block_header(info, dh); 472 if (ret) 473 goto out_buf; 474 le16_add_cpu(&dh->dqdh_entries, -1); 475 if (!le16_to_cpu(dh->dqdh_entries)) { /* Block got free? */ 476 ret = remove_free_dqentry(info, buf, blk); 477 if (ret >= 0) 478 ret = put_free_dqblk(info, buf, blk); 479 if (ret < 0) { 480 quota_error(dquot->dq_sb, "Can't move quota data block " 481 "(%u) to free list", blk); 482 goto out_buf; 483 } 484 } else { 485 memset(buf + 486 (dquot->dq_off & ((1 << info->dqi_blocksize_bits) - 1)), 487 0, info->dqi_entry_size); 488 if (le16_to_cpu(dh->dqdh_entries) == 489 qtree_dqstr_in_blk(info) - 1) { 490 /* Insert will write block itself */ 491 ret = insert_free_dqentry(info, buf, blk); 492 if (ret < 0) { 493 quota_error(dquot->dq_sb, "Can't insert quota " 494 "data block (%u) to free entry list", blk); 495 goto out_buf; 496 } 497 } else { 498 ret = write_blk(info, blk, buf); 499 if (ret < 0) { 500 quota_error(dquot->dq_sb, "Can't write quota " 501 "data block %u", blk); 502 goto out_buf; 503 } 504 } 505 } 506 dquot->dq_off = 0; /* Quota is now unattached */ 507out_buf: 508 kfree(buf); 509 return ret; 510} 511 512/* Remove reference to dquot from tree */ 513static int remove_tree(struct qtree_mem_dqinfo *info, struct dquot *dquot, 514 uint *blk, int depth) 515{ 516 char *buf = getdqbuf(info->dqi_usable_bs); 517 int ret = 0; 518 uint newblk; 519 __le32 *ref = (__le32 *)buf; 520 521 if (!buf) 522 return -ENOMEM; 523 ret = read_blk(info, *blk, buf); 524 if (ret < 0) { 525 quota_error(dquot->dq_sb, "Can't read quota data block %u", 526 *blk); 527 goto out_buf; 528 } 529 newblk = le32_to_cpu(ref[get_index(info, dquot->dq_id, depth)]); 530 if (newblk < QT_TREEOFF || newblk >= info->dqi_blocks) { 531 quota_error(dquot->dq_sb, "Getting block too big (%u >= %u)", 532 newblk, info->dqi_blocks); 533 ret = -EUCLEAN; 534 goto out_buf; 535 } 536 537 if (depth == info->dqi_qtree_depth - 1) { 538 ret = free_dqentry(info, dquot, newblk); 539 newblk = 0; 540 } else { 541 ret = remove_tree(info, dquot, &newblk, depth+1); 542 } 543 if (ret >= 0 && !newblk) { 544 int i; 545 ref[get_index(info, dquot->dq_id, depth)] = cpu_to_le32(0); 546 /* Block got empty? */ 547 for (i = 0; i < (info->dqi_usable_bs >> 2) && !ref[i]; i++) 548 ; 549 /* Don't put the root block into the free block list */ 550 if (i == (info->dqi_usable_bs >> 2) 551 && *blk != QT_TREEOFF) { 552 put_free_dqblk(info, buf, *blk); 553 *blk = 0; 554 } else { 555 ret = write_blk(info, *blk, buf); 556 if (ret < 0) 557 quota_error(dquot->dq_sb, 558 "Can't write quota tree block %u", 559 *blk); 560 } 561 } 562out_buf: 563 kfree(buf); 564 return ret; 565} 566 567/* Delete dquot from tree */ 568int qtree_delete_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot) 569{ 570 uint tmp = QT_TREEOFF; 571 572 if (!dquot->dq_off) /* Even not allocated? */ 573 return 0; 574 return remove_tree(info, dquot, &tmp, 0); 575} 576EXPORT_SYMBOL(qtree_delete_dquot); 577 578/* Find entry in block */ 579static loff_t find_block_dqentry(struct qtree_mem_dqinfo *info, 580 struct dquot *dquot, uint blk) 581{ 582 char *buf = getdqbuf(info->dqi_usable_bs); 583 loff_t ret = 0; 584 int i; 585 char *ddquot; 586 587 if (!buf) 588 return -ENOMEM; 589 ret = read_blk(info, blk, buf); 590 if (ret < 0) { 591 quota_error(dquot->dq_sb, "Can't read quota tree " 592 "block %u", blk); 593 goto out_buf; 594 } 595 ddquot = buf + sizeof(struct qt_disk_dqdbheader); 596 for (i = 0; i < qtree_dqstr_in_blk(info); i++) { 597 if (info->dqi_ops->is_id(ddquot, dquot)) 598 break; 599 ddquot += info->dqi_entry_size; 600 } 601 if (i == qtree_dqstr_in_blk(info)) { 602 quota_error(dquot->dq_sb, 603 "Quota for id %u referenced but not present", 604 from_kqid(&init_user_ns, dquot->dq_id)); 605 ret = -EIO; 606 goto out_buf; 607 } else { 608 ret = ((loff_t)blk << info->dqi_blocksize_bits) + sizeof(struct 609 qt_disk_dqdbheader) + i * info->dqi_entry_size; 610 } 611out_buf: 612 kfree(buf); 613 return ret; 614} 615 616/* Find entry for given id in the tree */ 617static loff_t find_tree_dqentry(struct qtree_mem_dqinfo *info, 618 struct dquot *dquot, uint blk, int depth) 619{ 620 char *buf = getdqbuf(info->dqi_usable_bs); 621 loff_t ret = 0; 622 __le32 *ref = (__le32 *)buf; 623 624 if (!buf) 625 return -ENOMEM; 626 ret = read_blk(info, blk, buf); 627 if (ret < 0) { 628 quota_error(dquot->dq_sb, "Can't read quota tree block %u", 629 blk); 630 goto out_buf; 631 } 632 ret = 0; 633 blk = le32_to_cpu(ref[get_index(info, dquot->dq_id, depth)]); 634 if (!blk) /* No reference? */ 635 goto out_buf; 636 if (blk < QT_TREEOFF || blk >= info->dqi_blocks) { 637 quota_error(dquot->dq_sb, "Getting block too big (%u >= %u)", 638 blk, info->dqi_blocks); 639 ret = -EUCLEAN; 640 goto out_buf; 641 } 642 643 if (depth < info->dqi_qtree_depth - 1) 644 ret = find_tree_dqentry(info, dquot, blk, depth+1); 645 else 646 ret = find_block_dqentry(info, dquot, blk); 647out_buf: 648 kfree(buf); 649 return ret; 650} 651 652/* Find entry for given id in the tree - wrapper function */ 653static inline loff_t find_dqentry(struct qtree_mem_dqinfo *info, 654 struct dquot *dquot) 655{ 656 return find_tree_dqentry(info, dquot, QT_TREEOFF, 0); 657} 658 659int qtree_read_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot) 660{ 661 int type = dquot->dq_id.type; 662 struct super_block *sb = dquot->dq_sb; 663 loff_t offset; 664 char *ddquot; 665 int ret = 0; 666 667#ifdef __QUOTA_QT_PARANOIA 668 /* Invalidated quota? */ 669 if (!sb_dqopt(dquot->dq_sb)->files[type]) { 670 quota_error(sb, "Quota invalidated while reading!"); 671 return -EIO; 672 } 673#endif 674 /* Do we know offset of the dquot entry in the quota file? */ 675 if (!dquot->dq_off) { 676 offset = find_dqentry(info, dquot); 677 if (offset <= 0) { /* Entry not present? */ 678 if (offset < 0) 679 quota_error(sb,"Can't read quota structure " 680 "for id %u", 681 from_kqid(&init_user_ns, 682 dquot->dq_id)); 683 dquot->dq_off = 0; 684 set_bit(DQ_FAKE_B, &dquot->dq_flags); 685 memset(&dquot->dq_dqb, 0, sizeof(struct mem_dqblk)); 686 ret = offset; 687 goto out; 688 } 689 dquot->dq_off = offset; 690 } 691 ddquot = getdqbuf(info->dqi_entry_size); 692 if (!ddquot) 693 return -ENOMEM; 694 ret = sb->s_op->quota_read(sb, type, ddquot, info->dqi_entry_size, 695 dquot->dq_off); 696 if (ret != info->dqi_entry_size) { 697 if (ret >= 0) 698 ret = -EIO; 699 quota_error(sb, "Error while reading quota structure for id %u", 700 from_kqid(&init_user_ns, dquot->dq_id)); 701 set_bit(DQ_FAKE_B, &dquot->dq_flags); 702 memset(&dquot->dq_dqb, 0, sizeof(struct mem_dqblk)); 703 kfree(ddquot); 704 goto out; 705 } 706 spin_lock(&dquot->dq_dqb_lock); 707 info->dqi_ops->disk2mem_dqblk(dquot, ddquot); 708 if (!dquot->dq_dqb.dqb_bhardlimit && 709 !dquot->dq_dqb.dqb_bsoftlimit && 710 !dquot->dq_dqb.dqb_ihardlimit && 711 !dquot->dq_dqb.dqb_isoftlimit) 712 set_bit(DQ_FAKE_B, &dquot->dq_flags); 713 spin_unlock(&dquot->dq_dqb_lock); 714 kfree(ddquot); 715out: 716 dqstats_inc(DQST_READS); 717 return ret; 718} 719EXPORT_SYMBOL(qtree_read_dquot); 720 721/* Check whether dquot should not be deleted. We know we are 722 * the only one operating on dquot (thanks to dq_lock) */ 723int qtree_release_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot) 724{ 725 if (test_bit(DQ_FAKE_B, &dquot->dq_flags) && 726 !(dquot->dq_dqb.dqb_curinodes | dquot->dq_dqb.dqb_curspace)) 727 return qtree_delete_dquot(info, dquot); 728 return 0; 729} 730EXPORT_SYMBOL(qtree_release_dquot); 731 732static int find_next_id(struct qtree_mem_dqinfo *info, qid_t *id, 733 unsigned int blk, int depth) 734{ 735 char *buf = getdqbuf(info->dqi_usable_bs); 736 __le32 *ref = (__le32 *)buf; 737 ssize_t ret; 738 unsigned int epb = info->dqi_usable_bs >> 2; 739 unsigned int level_inc = 1; 740 int i; 741 742 if (!buf) 743 return -ENOMEM; 744 745 for (i = depth; i < info->dqi_qtree_depth - 1; i++) 746 level_inc *= epb; 747 748 ret = read_blk(info, blk, buf); 749 if (ret < 0) { 750 quota_error(info->dqi_sb, 751 "Can't read quota tree block %u", blk); 752 goto out_buf; 753 } 754 for (i = __get_index(info, *id, depth); i < epb; i++) { 755 if (ref[i] == cpu_to_le32(0)) { 756 *id += level_inc; 757 continue; 758 } 759 if (depth == info->dqi_qtree_depth - 1) { 760 ret = 0; 761 goto out_buf; 762 } 763 ret = find_next_id(info, id, le32_to_cpu(ref[i]), depth + 1); 764 if (ret != -ENOENT) 765 break; 766 } 767 if (i == epb) { 768 ret = -ENOENT; 769 goto out_buf; 770 } 771out_buf: 772 kfree(buf); 773 return ret; 774} 775 776int qtree_get_next_id(struct qtree_mem_dqinfo *info, struct kqid *qid) 777{ 778 qid_t id = from_kqid(&init_user_ns, *qid); 779 int ret; 780 781 ret = find_next_id(info, &id, QT_TREEOFF, 0); 782 if (ret < 0) 783 return ret; 784 *qid = make_kqid(&init_user_ns, qid->type, id); 785 return 0; 786} 787EXPORT_SYMBOL(qtree_get_next_id); 788