1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * linux/fs/ext4/resize.c
4 *
5 * Support for resizing an ext4 filesystem while it is mounted.
6 *
7 * Copyright (C) 2001, 2002 Andreas Dilger <adilger@clusterfs.com>
8 *
9 * This could probably be made into a module, because it is not often in use.
10 */
11
12
13 #define EXT4FS_DEBUG
14
15 #include <linux/errno.h>
16 #include <linux/slab.h>
17
18 #include "ext4_jbd2.h"
19
20 struct ext4_rcu_ptr {
21 struct rcu_head rcu;
22 void *ptr;
23 };
24
ext4_rcu_ptr_callback(struct rcu_head *head)25 static void ext4_rcu_ptr_callback(struct rcu_head *head)
26 {
27 struct ext4_rcu_ptr *ptr;
28
29 ptr = container_of(head, struct ext4_rcu_ptr, rcu);
30 kvfree(ptr->ptr);
31 kfree(ptr);
32 }
33
ext4_kvfree_array_rcu(void *to_free)34 void ext4_kvfree_array_rcu(void *to_free)
35 {
36 struct ext4_rcu_ptr *ptr = kzalloc(sizeof(*ptr), GFP_KERNEL);
37
38 if (ptr) {
39 ptr->ptr = to_free;
40 call_rcu(&ptr->rcu, ext4_rcu_ptr_callback);
41 return;
42 }
43 synchronize_rcu();
44 kvfree(to_free);
45 }
46
ext4_resize_begin(struct super_block *sb)47 int ext4_resize_begin(struct super_block *sb)
48 {
49 struct ext4_sb_info *sbi = EXT4_SB(sb);
50 int ret = 0;
51
52 if (!capable(CAP_SYS_RESOURCE))
53 return -EPERM;
54
55 /*
56 * If the reserved GDT blocks is non-zero, the resize_inode feature
57 * should always be set.
58 */
59 if (EXT4_SB(sb)->s_es->s_reserved_gdt_blocks &&
60 !ext4_has_feature_resize_inode(sb)) {
61 ext4_error(sb, "resize_inode disabled but reserved GDT blocks non-zero");
62 return -EFSCORRUPTED;
63 }
64
65 /*
66 * If we are not using the primary superblock/GDT copy don't resize,
67 * because the user tools have no way of handling this. Probably a
68 * bad time to do it anyways.
69 */
70 if (EXT4_B2C(sbi, sbi->s_sbh->b_blocknr) !=
71 le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block)) {
72 ext4_warning(sb, "won't resize using backup superblock at %llu",
73 (unsigned long long)EXT4_SB(sb)->s_sbh->b_blocknr);
74 return -EPERM;
75 }
76
77 /*
78 * We are not allowed to do online-resizing on a filesystem mounted
79 * with error, because it can destroy the filesystem easily.
80 */
81 if (EXT4_SB(sb)->s_mount_state & EXT4_ERROR_FS) {
82 ext4_warning(sb, "There are errors in the filesystem, "
83 "so online resizing is not allowed");
84 return -EPERM;
85 }
86
87 if (ext4_has_feature_sparse_super2(sb)) {
88 ext4_msg(sb, KERN_ERR, "Online resizing not supported with sparse_super2");
89 return -EOPNOTSUPP;
90 }
91
92 if (test_and_set_bit_lock(EXT4_FLAGS_RESIZING,
93 &EXT4_SB(sb)->s_ext4_flags))
94 ret = -EBUSY;
95
96 return ret;
97 }
98
ext4_resize_end(struct super_block *sb)99 void ext4_resize_end(struct super_block *sb)
100 {
101 clear_bit_unlock(EXT4_FLAGS_RESIZING, &EXT4_SB(sb)->s_ext4_flags);
102 smp_mb__after_atomic();
103 }
104
ext4_meta_bg_first_group(struct super_block *sb, ext4_group_t group)105 static ext4_group_t ext4_meta_bg_first_group(struct super_block *sb,
106 ext4_group_t group) {
107 return (group >> EXT4_DESC_PER_BLOCK_BITS(sb)) <<
108 EXT4_DESC_PER_BLOCK_BITS(sb);
109 }
110
ext4_meta_bg_first_block_no(struct super_block *sb, ext4_group_t group)111 static ext4_fsblk_t ext4_meta_bg_first_block_no(struct super_block *sb,
112 ext4_group_t group) {
113 group = ext4_meta_bg_first_group(sb, group);
114 return ext4_group_first_block_no(sb, group);
115 }
116
ext4_group_overhead_blocks(struct super_block *sb, ext4_group_t group)117 static ext4_grpblk_t ext4_group_overhead_blocks(struct super_block *sb,
118 ext4_group_t group) {
119 ext4_grpblk_t overhead;
120 overhead = ext4_bg_num_gdb(sb, group);
121 if (ext4_bg_has_super(sb, group))
122 overhead += 1 +
123 le16_to_cpu(EXT4_SB(sb)->s_es->s_reserved_gdt_blocks);
124 return overhead;
125 }
126
127 #define outside(b, first, last) ((b) < (first) || (b) >= (last))
128 #define inside(b, first, last) ((b) >= (first) && (b) < (last))
129
verify_group_input(struct super_block *sb, struct ext4_new_group_data *input)130 static int verify_group_input(struct super_block *sb,
131 struct ext4_new_group_data *input)
132 {
133 struct ext4_sb_info *sbi = EXT4_SB(sb);
134 struct ext4_super_block *es = sbi->s_es;
135 ext4_fsblk_t start = ext4_blocks_count(es);
136 ext4_fsblk_t end = start + input->blocks_count;
137 ext4_group_t group = input->group;
138 ext4_fsblk_t itend = input->inode_table + sbi->s_itb_per_group;
139 unsigned overhead;
140 ext4_fsblk_t metaend;
141 struct buffer_head *bh = NULL;
142 ext4_grpblk_t free_blocks_count, offset;
143 int err = -EINVAL;
144
145 if (group != sbi->s_groups_count) {
146 ext4_warning(sb, "Cannot add at group %u (only %u groups)",
147 input->group, sbi->s_groups_count);
148 return -EINVAL;
149 }
150
151 overhead = ext4_group_overhead_blocks(sb, group);
152 metaend = start + overhead;
153 input->free_clusters_count = free_blocks_count =
154 input->blocks_count - 2 - overhead - sbi->s_itb_per_group;
155
156 if (test_opt(sb, DEBUG))
157 printk(KERN_DEBUG "EXT4-fs: adding %s group %u: %u blocks "
158 "(%d free, %u reserved)\n",
159 ext4_bg_has_super(sb, input->group) ? "normal" :
160 "no-super", input->group, input->blocks_count,
161 free_blocks_count, input->reserved_blocks);
162
163 ext4_get_group_no_and_offset(sb, start, NULL, &offset);
164 if (offset != 0)
165 ext4_warning(sb, "Last group not full");
166 else if (input->reserved_blocks > input->blocks_count / 5)
167 ext4_warning(sb, "Reserved blocks too high (%u)",
168 input->reserved_blocks);
169 else if (free_blocks_count < 0)
170 ext4_warning(sb, "Bad blocks count %u",
171 input->blocks_count);
172 else if (IS_ERR(bh = ext4_sb_bread(sb, end - 1, 0))) {
173 err = PTR_ERR(bh);
174 bh = NULL;
175 ext4_warning(sb, "Cannot read last block (%llu)",
176 end - 1);
177 } else if (outside(input->block_bitmap, start, end))
178 ext4_warning(sb, "Block bitmap not in group (block %llu)",
179 (unsigned long long)input->block_bitmap);
180 else if (outside(input->inode_bitmap, start, end))
181 ext4_warning(sb, "Inode bitmap not in group (block %llu)",
182 (unsigned long long)input->inode_bitmap);
183 else if (outside(input->inode_table, start, end) ||
184 outside(itend - 1, start, end))
185 ext4_warning(sb, "Inode table not in group (blocks %llu-%llu)",
186 (unsigned long long)input->inode_table, itend - 1);
187 else if (input->inode_bitmap == input->block_bitmap)
188 ext4_warning(sb, "Block bitmap same as inode bitmap (%llu)",
189 (unsigned long long)input->block_bitmap);
190 else if (inside(input->block_bitmap, input->inode_table, itend))
191 ext4_warning(sb, "Block bitmap (%llu) in inode table "
192 "(%llu-%llu)",
193 (unsigned long long)input->block_bitmap,
194 (unsigned long long)input->inode_table, itend - 1);
195 else if (inside(input->inode_bitmap, input->inode_table, itend))
196 ext4_warning(sb, "Inode bitmap (%llu) in inode table "
197 "(%llu-%llu)",
198 (unsigned long long)input->inode_bitmap,
199 (unsigned long long)input->inode_table, itend - 1);
200 else if (inside(input->block_bitmap, start, metaend))
201 ext4_warning(sb, "Block bitmap (%llu) in GDT table (%llu-%llu)",
202 (unsigned long long)input->block_bitmap,
203 start, metaend - 1);
204 else if (inside(input->inode_bitmap, start, metaend))
205 ext4_warning(sb, "Inode bitmap (%llu) in GDT table (%llu-%llu)",
206 (unsigned long long)input->inode_bitmap,
207 start, metaend - 1);
208 else if (inside(input->inode_table, start, metaend) ||
209 inside(itend - 1, start, metaend))
210 ext4_warning(sb, "Inode table (%llu-%llu) overlaps GDT table "
211 "(%llu-%llu)",
212 (unsigned long long)input->inode_table,
213 itend - 1, start, metaend - 1);
214 else
215 err = 0;
216 brelse(bh);
217
218 return err;
219 }
220
221 /*
222 * ext4_new_flex_group_data is used by 64bit-resize interface to add a flex
223 * group each time.
224 */
225 struct ext4_new_flex_group_data {
226 struct ext4_new_group_data *groups; /* new_group_data for groups
227 in the flex group */
228 __u16 *bg_flags; /* block group flags of groups
229 in @groups */
230 ext4_group_t resize_bg; /* number of allocated
231 new_group_data */
232 ext4_group_t count; /* number of groups in @groups
233 */
234 };
235
236 /*
237 * Avoiding memory allocation failures due to too many groups added each time.
238 */
239 #define MAX_RESIZE_BG 16384
240
241 /*
242 * alloc_flex_gd() allocates a ext4_new_flex_group_data with size of
243 * @flexbg_size.
244 *
245 * Returns NULL on failure otherwise address of the allocated structure.
246 */
alloc_flex_gd(unsigned int flexbg_size)247 static struct ext4_new_flex_group_data *alloc_flex_gd(unsigned int flexbg_size)
248 {
249 struct ext4_new_flex_group_data *flex_gd;
250
251 flex_gd = kmalloc(sizeof(*flex_gd), GFP_NOFS);
252 if (flex_gd == NULL)
253 goto out3;
254
255 if (unlikely(flexbg_size > MAX_RESIZE_BG))
256 flex_gd->resize_bg = MAX_RESIZE_BG;
257 else
258 flex_gd->resize_bg = flexbg_size;
259
260 flex_gd->groups = kmalloc_array(flex_gd->resize_bg,
261 sizeof(struct ext4_new_group_data),
262 GFP_NOFS);
263 if (flexbg_size >= UINT_MAX / sizeof(struct ext4_new_group_data))
264 goto out2;
265
266 if (flex_gd->groups == NULL)
267 goto out2;
268
269 flex_gd->bg_flags = kmalloc_array(flex_gd->resize_bg, sizeof(__u16),
270 GFP_NOFS);
271 if (flex_gd->bg_flags == NULL)
272 goto out1;
273
274 return flex_gd;
275
276 out1:
277 kfree(flex_gd->groups);
278 out2:
279 kfree(flex_gd);
280 out3:
281 return NULL;
282 }
283
free_flex_gd(struct ext4_new_flex_group_data *flex_gd)284 static void free_flex_gd(struct ext4_new_flex_group_data *flex_gd)
285 {
286 kfree(flex_gd->bg_flags);
287 kfree(flex_gd->groups);
288 kfree(flex_gd);
289 }
290
291 /*
292 * ext4_alloc_group_tables() allocates block bitmaps, inode bitmaps
293 * and inode tables for a flex group.
294 *
295 * This function is used by 64bit-resize. Note that this function allocates
296 * group tables from the 1st group of groups contained by @flexgd, which may
297 * be a partial of a flex group.
298 *
299 * @sb: super block of fs to which the groups belongs
300 *
301 * Returns 0 on a successful allocation of the metadata blocks in the
302 * block group.
303 */
ext4_alloc_group_tables(struct super_block *sb, struct ext4_new_flex_group_data *flex_gd, unsigned int flexbg_size)304 static int ext4_alloc_group_tables(struct super_block *sb,
305 struct ext4_new_flex_group_data *flex_gd,
306 unsigned int flexbg_size)
307 {
308 struct ext4_new_group_data *group_data = flex_gd->groups;
309 ext4_fsblk_t start_blk;
310 ext4_fsblk_t last_blk;
311 ext4_group_t src_group;
312 ext4_group_t bb_index = 0;
313 ext4_group_t ib_index = 0;
314 ext4_group_t it_index = 0;
315 ext4_group_t group;
316 ext4_group_t last_group;
317 unsigned overhead;
318 __u16 uninit_mask = (flexbg_size > 1) ? ~EXT4_BG_BLOCK_UNINIT : ~0;
319 int i;
320
321 BUG_ON(flex_gd->count == 0 || group_data == NULL);
322
323 src_group = group_data[0].group;
324 last_group = src_group + flex_gd->count - 1;
325
326 BUG_ON((flexbg_size > 1) && ((src_group & ~(flexbg_size - 1)) !=
327 (last_group & ~(flexbg_size - 1))));
328 next_group:
329 group = group_data[0].group;
330 if (src_group >= group_data[0].group + flex_gd->count)
331 return -ENOSPC;
332 start_blk = ext4_group_first_block_no(sb, src_group);
333 last_blk = start_blk + group_data[src_group - group].blocks_count;
334
335 overhead = ext4_group_overhead_blocks(sb, src_group);
336
337 start_blk += overhead;
338
339 /* We collect contiguous blocks as much as possible. */
340 src_group++;
341 for (; src_group <= last_group; src_group++) {
342 overhead = ext4_group_overhead_blocks(sb, src_group);
343 if (overhead == 0)
344 last_blk += group_data[src_group - group].blocks_count;
345 else
346 break;
347 }
348
349 /* Allocate block bitmaps */
350 for (; bb_index < flex_gd->count; bb_index++) {
351 if (start_blk >= last_blk)
352 goto next_group;
353 group_data[bb_index].block_bitmap = start_blk++;
354 group = ext4_get_group_number(sb, start_blk - 1);
355 group -= group_data[0].group;
356 group_data[group].mdata_blocks++;
357 flex_gd->bg_flags[group] &= uninit_mask;
358 }
359
360 /* Allocate inode bitmaps */
361 for (; ib_index < flex_gd->count; ib_index++) {
362 if (start_blk >= last_blk)
363 goto next_group;
364 group_data[ib_index].inode_bitmap = start_blk++;
365 group = ext4_get_group_number(sb, start_blk - 1);
366 group -= group_data[0].group;
367 group_data[group].mdata_blocks++;
368 flex_gd->bg_flags[group] &= uninit_mask;
369 }
370
371 /* Allocate inode tables */
372 for (; it_index < flex_gd->count; it_index++) {
373 unsigned int itb = EXT4_SB(sb)->s_itb_per_group;
374 ext4_fsblk_t next_group_start;
375
376 if (start_blk + itb > last_blk)
377 goto next_group;
378 group_data[it_index].inode_table = start_blk;
379 group = ext4_get_group_number(sb, start_blk);
380 next_group_start = ext4_group_first_block_no(sb, group + 1);
381 group -= group_data[0].group;
382
383 if (start_blk + itb > next_group_start) {
384 flex_gd->bg_flags[group + 1] &= uninit_mask;
385 overhead = start_blk + itb - next_group_start;
386 group_data[group + 1].mdata_blocks += overhead;
387 itb -= overhead;
388 }
389
390 group_data[group].mdata_blocks += itb;
391 flex_gd->bg_flags[group] &= uninit_mask;
392 start_blk += EXT4_SB(sb)->s_itb_per_group;
393 }
394
395 /* Update free clusters count to exclude metadata blocks */
396 for (i = 0; i < flex_gd->count; i++) {
397 group_data[i].free_clusters_count -=
398 EXT4_NUM_B2C(EXT4_SB(sb),
399 group_data[i].mdata_blocks);
400 }
401
402 if (test_opt(sb, DEBUG)) {
403 int i;
404 group = group_data[0].group;
405
406 printk(KERN_DEBUG "EXT4-fs: adding a flex group with "
407 "%u groups, flexbg size is %u:\n", flex_gd->count,
408 flexbg_size);
409
410 for (i = 0; i < flex_gd->count; i++) {
411 ext4_debug(
412 "adding %s group %u: %u blocks (%u free, %u mdata blocks)\n",
413 ext4_bg_has_super(sb, group + i) ? "normal" :
414 "no-super", group + i,
415 group_data[i].blocks_count,
416 group_data[i].free_clusters_count,
417 group_data[i].mdata_blocks);
418 }
419 }
420 return 0;
421 }
422
bclean(handle_t *handle, struct super_block *sb, ext4_fsblk_t blk)423 static struct buffer_head *bclean(handle_t *handle, struct super_block *sb,
424 ext4_fsblk_t blk)
425 {
426 struct buffer_head *bh;
427 int err;
428
429 bh = sb_getblk(sb, blk);
430 if (unlikely(!bh))
431 return ERR_PTR(-ENOMEM);
432 BUFFER_TRACE(bh, "get_write_access");
433 if ((err = ext4_journal_get_write_access(handle, bh))) {
434 brelse(bh);
435 bh = ERR_PTR(err);
436 } else {
437 memset(bh->b_data, 0, sb->s_blocksize);
438 set_buffer_uptodate(bh);
439 }
440
441 return bh;
442 }
443
ext4_resize_ensure_credits_batch(handle_t *handle, int credits)444 static int ext4_resize_ensure_credits_batch(handle_t *handle, int credits)
445 {
446 return ext4_journal_ensure_credits_fn(handle, credits,
447 EXT4_MAX_TRANS_DATA, 0, 0);
448 }
449
450 /*
451 * set_flexbg_block_bitmap() mark clusters [@first_cluster, @last_cluster] used.
452 *
453 * Helper function for ext4_setup_new_group_blocks() which set .
454 *
455 * @sb: super block
456 * @handle: journal handle
457 * @flex_gd: flex group data
458 */
set_flexbg_block_bitmap(struct super_block *sb, handle_t *handle, struct ext4_new_flex_group_data *flex_gd, ext4_fsblk_t first_cluster, ext4_fsblk_t last_cluster)459 static int set_flexbg_block_bitmap(struct super_block *sb, handle_t *handle,
460 struct ext4_new_flex_group_data *flex_gd,
461 ext4_fsblk_t first_cluster, ext4_fsblk_t last_cluster)
462 {
463 struct ext4_sb_info *sbi = EXT4_SB(sb);
464 ext4_group_t count = last_cluster - first_cluster + 1;
465 ext4_group_t count2;
466
467 ext4_debug("mark clusters [%llu-%llu] used\n", first_cluster,
468 last_cluster);
469 for (count2 = count; count > 0;
470 count -= count2, first_cluster += count2) {
471 ext4_fsblk_t start;
472 struct buffer_head *bh;
473 ext4_group_t group;
474 int err;
475
476 group = ext4_get_group_number(sb, EXT4_C2B(sbi, first_cluster));
477 start = EXT4_B2C(sbi, ext4_group_first_block_no(sb, group));
478 group -= flex_gd->groups[0].group;
479
480 count2 = EXT4_CLUSTERS_PER_GROUP(sb) - (first_cluster - start);
481 if (count2 > count)
482 count2 = count;
483
484 if (flex_gd->bg_flags[group] & EXT4_BG_BLOCK_UNINIT) {
485 BUG_ON(flex_gd->count > 1);
486 continue;
487 }
488
489 err = ext4_resize_ensure_credits_batch(handle, 1);
490 if (err < 0)
491 return err;
492
493 bh = sb_getblk(sb, flex_gd->groups[group].block_bitmap);
494 if (unlikely(!bh))
495 return -ENOMEM;
496
497 BUFFER_TRACE(bh, "get_write_access");
498 err = ext4_journal_get_write_access(handle, bh);
499 if (err) {
500 brelse(bh);
501 return err;
502 }
503 ext4_debug("mark block bitmap %#04llx (+%llu/%u)\n",
504 first_cluster, first_cluster - start, count2);
505 ext4_set_bits(bh->b_data, first_cluster - start, count2);
506
507 err = ext4_handle_dirty_metadata(handle, NULL, bh);
508 brelse(bh);
509 if (unlikely(err))
510 return err;
511 }
512
513 return 0;
514 }
515
516 /*
517 * Set up the block and inode bitmaps, and the inode table for the new groups.
518 * This doesn't need to be part of the main transaction, since we are only
519 * changing blocks outside the actual filesystem. We still do journaling to
520 * ensure the recovery is correct in case of a failure just after resize.
521 * If any part of this fails, we simply abort the resize.
522 *
523 * setup_new_flex_group_blocks handles a flex group as follow:
524 * 1. copy super block and GDT, and initialize group tables if necessary.
525 * In this step, we only set bits in blocks bitmaps for blocks taken by
526 * super block and GDT.
527 * 2. allocate group tables in block bitmaps, that is, set bits in block
528 * bitmap for blocks taken by group tables.
529 */
setup_new_flex_group_blocks(struct super_block *sb, struct ext4_new_flex_group_data *flex_gd)530 static int setup_new_flex_group_blocks(struct super_block *sb,
531 struct ext4_new_flex_group_data *flex_gd)
532 {
533 int group_table_count[] = {1, 1, EXT4_SB(sb)->s_itb_per_group};
534 ext4_fsblk_t start;
535 ext4_fsblk_t block;
536 struct ext4_sb_info *sbi = EXT4_SB(sb);
537 struct ext4_super_block *es = sbi->s_es;
538 struct ext4_new_group_data *group_data = flex_gd->groups;
539 __u16 *bg_flags = flex_gd->bg_flags;
540 handle_t *handle;
541 ext4_group_t group, count;
542 struct buffer_head *bh = NULL;
543 int reserved_gdb, i, j, err = 0, err2;
544 int meta_bg;
545
546 BUG_ON(!flex_gd->count || !group_data ||
547 group_data[0].group != sbi->s_groups_count);
548
549 reserved_gdb = le16_to_cpu(es->s_reserved_gdt_blocks);
550 meta_bg = ext4_has_feature_meta_bg(sb);
551
552 /* This transaction may be extended/restarted along the way */
553 handle = ext4_journal_start_sb(sb, EXT4_HT_RESIZE, EXT4_MAX_TRANS_DATA);
554 if (IS_ERR(handle))
555 return PTR_ERR(handle);
556
557 group = group_data[0].group;
558 for (i = 0; i < flex_gd->count; i++, group++) {
559 unsigned long gdblocks;
560 ext4_grpblk_t overhead;
561
562 gdblocks = ext4_bg_num_gdb(sb, group);
563 start = ext4_group_first_block_no(sb, group);
564
565 if (meta_bg == 0 && !ext4_bg_has_super(sb, group))
566 goto handle_itb;
567
568 if (meta_bg == 1)
569 goto handle_itb;
570
571 block = start + ext4_bg_has_super(sb, group);
572 /* Copy all of the GDT blocks into the backup in this group */
573 for (j = 0; j < gdblocks; j++, block++) {
574 struct buffer_head *gdb;
575
576 ext4_debug("update backup group %#04llx\n", block);
577 err = ext4_resize_ensure_credits_batch(handle, 1);
578 if (err < 0)
579 goto out;
580
581 gdb = sb_getblk(sb, block);
582 if (unlikely(!gdb)) {
583 err = -ENOMEM;
584 goto out;
585 }
586
587 BUFFER_TRACE(gdb, "get_write_access");
588 err = ext4_journal_get_write_access(handle, gdb);
589 if (err) {
590 brelse(gdb);
591 goto out;
592 }
593 memcpy(gdb->b_data, sbi_array_rcu_deref(sbi,
594 s_group_desc, j)->b_data, gdb->b_size);
595 set_buffer_uptodate(gdb);
596
597 err = ext4_handle_dirty_metadata(handle, NULL, gdb);
598 if (unlikely(err)) {
599 brelse(gdb);
600 goto out;
601 }
602 brelse(gdb);
603 }
604
605 /* Zero out all of the reserved backup group descriptor
606 * table blocks
607 */
608 if (ext4_bg_has_super(sb, group)) {
609 err = sb_issue_zeroout(sb, gdblocks + start + 1,
610 reserved_gdb, GFP_NOFS);
611 if (err)
612 goto out;
613 }
614
615 handle_itb:
616 /* Initialize group tables of the grop @group */
617 if (!(bg_flags[i] & EXT4_BG_INODE_ZEROED))
618 goto handle_bb;
619
620 /* Zero out all of the inode table blocks */
621 block = group_data[i].inode_table;
622 ext4_debug("clear inode table blocks %#04llx -> %#04lx\n",
623 block, sbi->s_itb_per_group);
624 err = sb_issue_zeroout(sb, block, sbi->s_itb_per_group,
625 GFP_NOFS);
626 if (err)
627 goto out;
628
629 handle_bb:
630 if (bg_flags[i] & EXT4_BG_BLOCK_UNINIT)
631 goto handle_ib;
632
633 /* Initialize block bitmap of the @group */
634 block = group_data[i].block_bitmap;
635 err = ext4_resize_ensure_credits_batch(handle, 1);
636 if (err < 0)
637 goto out;
638
639 bh = bclean(handle, sb, block);
640 if (IS_ERR(bh)) {
641 err = PTR_ERR(bh);
642 goto out;
643 }
644 overhead = ext4_group_overhead_blocks(sb, group);
645 if (overhead != 0) {
646 ext4_debug("mark backup superblock %#04llx (+0)\n",
647 start);
648 ext4_set_bits(bh->b_data, 0,
649 EXT4_NUM_B2C(sbi, overhead));
650 }
651 ext4_mark_bitmap_end(EXT4_B2C(sbi, group_data[i].blocks_count),
652 sb->s_blocksize * 8, bh->b_data);
653 err = ext4_handle_dirty_metadata(handle, NULL, bh);
654 brelse(bh);
655 if (err)
656 goto out;
657
658 handle_ib:
659 if (bg_flags[i] & EXT4_BG_INODE_UNINIT)
660 continue;
661
662 /* Initialize inode bitmap of the @group */
663 block = group_data[i].inode_bitmap;
664 err = ext4_resize_ensure_credits_batch(handle, 1);
665 if (err < 0)
666 goto out;
667 /* Mark unused entries in inode bitmap used */
668 bh = bclean(handle, sb, block);
669 if (IS_ERR(bh)) {
670 err = PTR_ERR(bh);
671 goto out;
672 }
673
674 ext4_mark_bitmap_end(EXT4_INODES_PER_GROUP(sb),
675 sb->s_blocksize * 8, bh->b_data);
676 err = ext4_handle_dirty_metadata(handle, NULL, bh);
677 brelse(bh);
678 if (err)
679 goto out;
680 }
681
682 /* Mark group tables in block bitmap */
683 for (j = 0; j < GROUP_TABLE_COUNT; j++) {
684 count = group_table_count[j];
685 start = (&group_data[0].block_bitmap)[j];
686 block = start;
687 for (i = 1; i < flex_gd->count; i++) {
688 block += group_table_count[j];
689 if (block == (&group_data[i].block_bitmap)[j]) {
690 count += group_table_count[j];
691 continue;
692 }
693 err = set_flexbg_block_bitmap(sb, handle,
694 flex_gd,
695 EXT4_B2C(sbi, start),
696 EXT4_B2C(sbi,
697 start + count
698 - 1));
699 if (err)
700 goto out;
701 count = group_table_count[j];
702 start = (&group_data[i].block_bitmap)[j];
703 block = start;
704 }
705
706 if (count) {
707 err = set_flexbg_block_bitmap(sb, handle,
708 flex_gd,
709 EXT4_B2C(sbi, start),
710 EXT4_B2C(sbi,
711 start + count
712 - 1));
713 if (err)
714 goto out;
715 }
716 }
717
718 out:
719 err2 = ext4_journal_stop(handle);
720 if (err2 && !err)
721 err = err2;
722
723 return err;
724 }
725
726 /*
727 * Iterate through the groups which hold BACKUP superblock/GDT copies in an
728 * ext4 filesystem. The counters should be initialized to 1, 5, and 7 before
729 * calling this for the first time. In a sparse filesystem it will be the
730 * sequence of powers of 3, 5, and 7: 1, 3, 5, 7, 9, 25, 27, 49, 81, ...
731 * For a non-sparse filesystem it will be every group: 1, 2, 3, 4, ...
732 */
ext4_list_backups(struct super_block *sb, unsigned *three, unsigned *five, unsigned *seven)733 static unsigned ext4_list_backups(struct super_block *sb, unsigned *three,
734 unsigned *five, unsigned *seven)
735 {
736 unsigned *min = three;
737 int mult = 3;
738 unsigned ret;
739
740 if (!ext4_has_feature_sparse_super(sb)) {
741 ret = *min;
742 *min += 1;
743 return ret;
744 }
745
746 if (*five < *min) {
747 min = five;
748 mult = 5;
749 }
750 if (*seven < *min) {
751 min = seven;
752 mult = 7;
753 }
754
755 ret = *min;
756 *min *= mult;
757
758 return ret;
759 }
760
761 /*
762 * Check that all of the backup GDT blocks are held in the primary GDT block.
763 * It is assumed that they are stored in group order. Returns the number of
764 * groups in current filesystem that have BACKUPS, or -ve error code.
765 */
verify_reserved_gdb(struct super_block *sb, ext4_group_t end, struct buffer_head *primary)766 static int verify_reserved_gdb(struct super_block *sb,
767 ext4_group_t end,
768 struct buffer_head *primary)
769 {
770 const ext4_fsblk_t blk = primary->b_blocknr;
771 unsigned three = 1;
772 unsigned five = 5;
773 unsigned seven = 7;
774 unsigned grp;
775 __le32 *p = (__le32 *)primary->b_data;
776 int gdbackups = 0;
777
778 while ((grp = ext4_list_backups(sb, &three, &five, &seven)) < end) {
779 if (le32_to_cpu(*p++) !=
780 grp * EXT4_BLOCKS_PER_GROUP(sb) + blk){
781 ext4_warning(sb, "reserved GDT %llu"
782 " missing grp %d (%llu)",
783 blk, grp,
784 grp *
785 (ext4_fsblk_t)EXT4_BLOCKS_PER_GROUP(sb) +
786 blk);
787 return -EINVAL;
788 }
789 if (++gdbackups > EXT4_ADDR_PER_BLOCK(sb))
790 return -EFBIG;
791 }
792
793 return gdbackups;
794 }
795
796 /*
797 * Called when we need to bring a reserved group descriptor table block into
798 * use from the resize inode. The primary copy of the new GDT block currently
799 * is an indirect block (under the double indirect block in the resize inode).
800 * The new backup GDT blocks will be stored as leaf blocks in this indirect
801 * block, in group order. Even though we know all the block numbers we need,
802 * we check to ensure that the resize inode has actually reserved these blocks.
803 *
804 * Don't need to update the block bitmaps because the blocks are still in use.
805 *
806 * We get all of the error cases out of the way, so that we are sure to not
807 * fail once we start modifying the data on disk, because JBD has no rollback.
808 */
add_new_gdb(handle_t *handle, struct inode *inode, ext4_group_t group)809 static int add_new_gdb(handle_t *handle, struct inode *inode,
810 ext4_group_t group)
811 {
812 struct super_block *sb = inode->i_sb;
813 struct ext4_super_block *es = EXT4_SB(sb)->s_es;
814 unsigned long gdb_num = group / EXT4_DESC_PER_BLOCK(sb);
815 ext4_fsblk_t gdblock = EXT4_SB(sb)->s_sbh->b_blocknr + 1 + gdb_num;
816 struct buffer_head **o_group_desc, **n_group_desc = NULL;
817 struct buffer_head *dind = NULL;
818 struct buffer_head *gdb_bh = NULL;
819 int gdbackups;
820 struct ext4_iloc iloc = { .bh = NULL };
821 __le32 *data;
822 int err;
823
824 if (test_opt(sb, DEBUG))
825 printk(KERN_DEBUG
826 "EXT4-fs: ext4_add_new_gdb: adding group block %lu\n",
827 gdb_num);
828
829 gdb_bh = ext4_sb_bread(sb, gdblock, 0);
830 if (IS_ERR(gdb_bh))
831 return PTR_ERR(gdb_bh);
832
833 gdbackups = verify_reserved_gdb(sb, group, gdb_bh);
834 if (gdbackups < 0) {
835 err = gdbackups;
836 goto errout;
837 }
838
839 data = EXT4_I(inode)->i_data + EXT4_DIND_BLOCK;
840 dind = ext4_sb_bread(sb, le32_to_cpu(*data), 0);
841 if (IS_ERR(dind)) {
842 err = PTR_ERR(dind);
843 dind = NULL;
844 goto errout;
845 }
846
847 data = (__le32 *)dind->b_data;
848 if (le32_to_cpu(data[gdb_num % EXT4_ADDR_PER_BLOCK(sb)]) != gdblock) {
849 ext4_warning(sb, "new group %u GDT block %llu not reserved",
850 group, gdblock);
851 err = -EINVAL;
852 goto errout;
853 }
854
855 BUFFER_TRACE(EXT4_SB(sb)->s_sbh, "get_write_access");
856 err = ext4_journal_get_write_access(handle, EXT4_SB(sb)->s_sbh);
857 if (unlikely(err))
858 goto errout;
859
860 BUFFER_TRACE(gdb_bh, "get_write_access");
861 err = ext4_journal_get_write_access(handle, gdb_bh);
862 if (unlikely(err))
863 goto errout;
864
865 BUFFER_TRACE(dind, "get_write_access");
866 err = ext4_journal_get_write_access(handle, dind);
867 if (unlikely(err)) {
868 ext4_std_error(sb, err);
869 goto errout;
870 }
871
872 /* ext4_reserve_inode_write() gets a reference on the iloc */
873 err = ext4_reserve_inode_write(handle, inode, &iloc);
874 if (unlikely(err))
875 goto errout;
876
877 n_group_desc = kvmalloc((gdb_num + 1) * sizeof(struct buffer_head *),
878 GFP_KERNEL);
879 if (!n_group_desc) {
880 err = -ENOMEM;
881 ext4_warning(sb, "not enough memory for %lu groups",
882 gdb_num + 1);
883 goto errout;
884 }
885
886 /*
887 * Finally, we have all of the possible failures behind us...
888 *
889 * Remove new GDT block from inode double-indirect block and clear out
890 * the new GDT block for use (which also "frees" the backup GDT blocks
891 * from the reserved inode). We don't need to change the bitmaps for
892 * these blocks, because they are marked as in-use from being in the
893 * reserved inode, and will become GDT blocks (primary and backup).
894 */
895 data[gdb_num % EXT4_ADDR_PER_BLOCK(sb)] = 0;
896 err = ext4_handle_dirty_metadata(handle, NULL, dind);
897 if (unlikely(err)) {
898 ext4_std_error(sb, err);
899 goto errout;
900 }
901 inode->i_blocks -= (gdbackups + 1) * sb->s_blocksize >>
902 (9 - EXT4_SB(sb)->s_cluster_bits);
903 ext4_mark_iloc_dirty(handle, inode, &iloc);
904 memset(gdb_bh->b_data, 0, sb->s_blocksize);
905 err = ext4_handle_dirty_metadata(handle, NULL, gdb_bh);
906 if (unlikely(err)) {
907 ext4_std_error(sb, err);
908 iloc.bh = NULL;
909 goto errout;
910 }
911 brelse(dind);
912
913 rcu_read_lock();
914 o_group_desc = rcu_dereference(EXT4_SB(sb)->s_group_desc);
915 memcpy(n_group_desc, o_group_desc,
916 EXT4_SB(sb)->s_gdb_count * sizeof(struct buffer_head *));
917 rcu_read_unlock();
918 n_group_desc[gdb_num] = gdb_bh;
919 rcu_assign_pointer(EXT4_SB(sb)->s_group_desc, n_group_desc);
920 EXT4_SB(sb)->s_gdb_count++;
921 ext4_kvfree_array_rcu(o_group_desc);
922
923 lock_buffer(EXT4_SB(sb)->s_sbh);
924 le16_add_cpu(&es->s_reserved_gdt_blocks, -1);
925 ext4_superblock_csum_set(sb);
926 unlock_buffer(EXT4_SB(sb)->s_sbh);
927 err = ext4_handle_dirty_metadata(handle, NULL, EXT4_SB(sb)->s_sbh);
928 if (err)
929 ext4_std_error(sb, err);
930 return err;
931 errout:
932 kvfree(n_group_desc);
933 brelse(iloc.bh);
934 brelse(dind);
935 brelse(gdb_bh);
936
937 ext4_debug("leaving with error %d\n", err);
938 return err;
939 }
940
941 /*
942 * add_new_gdb_meta_bg is the sister of add_new_gdb.
943 */
add_new_gdb_meta_bg(struct super_block *sb, handle_t *handle, ext4_group_t group)944 static int add_new_gdb_meta_bg(struct super_block *sb,
945 handle_t *handle, ext4_group_t group) {
946 ext4_fsblk_t gdblock;
947 struct buffer_head *gdb_bh;
948 struct buffer_head **o_group_desc, **n_group_desc;
949 unsigned long gdb_num = group / EXT4_DESC_PER_BLOCK(sb);
950 int err;
951
952 gdblock = ext4_meta_bg_first_block_no(sb, group) +
953 ext4_bg_has_super(sb, group);
954 gdb_bh = ext4_sb_bread(sb, gdblock, 0);
955 if (IS_ERR(gdb_bh))
956 return PTR_ERR(gdb_bh);
957 n_group_desc = kvmalloc((gdb_num + 1) * sizeof(struct buffer_head *),
958 GFP_KERNEL);
959 if (!n_group_desc) {
960 brelse(gdb_bh);
961 err = -ENOMEM;
962 ext4_warning(sb, "not enough memory for %lu groups",
963 gdb_num + 1);
964 return err;
965 }
966
967 rcu_read_lock();
968 o_group_desc = rcu_dereference(EXT4_SB(sb)->s_group_desc);
969 memcpy(n_group_desc, o_group_desc,
970 EXT4_SB(sb)->s_gdb_count * sizeof(struct buffer_head *));
971 rcu_read_unlock();
972 n_group_desc[gdb_num] = gdb_bh;
973
974 BUFFER_TRACE(gdb_bh, "get_write_access");
975 err = ext4_journal_get_write_access(handle, gdb_bh);
976 if (err) {
977 kvfree(n_group_desc);
978 brelse(gdb_bh);
979 return err;
980 }
981
982 rcu_assign_pointer(EXT4_SB(sb)->s_group_desc, n_group_desc);
983 EXT4_SB(sb)->s_gdb_count++;
984 ext4_kvfree_array_rcu(o_group_desc);
985 return err;
986 }
987
988 /*
989 * Called when we are adding a new group which has a backup copy of each of
990 * the GDT blocks (i.e. sparse group) and there are reserved GDT blocks.
991 * We need to add these reserved backup GDT blocks to the resize inode, so
992 * that they are kept for future resizing and not allocated to files.
993 *
994 * Each reserved backup GDT block will go into a different indirect block.
995 * The indirect blocks are actually the primary reserved GDT blocks,
996 * so we know in advance what their block numbers are. We only get the
997 * double-indirect block to verify it is pointing to the primary reserved
998 * GDT blocks so we don't overwrite a data block by accident. The reserved
999 * backup GDT blocks are stored in their reserved primary GDT block.
1000 */
reserve_backup_gdb(handle_t *handle, struct inode *inode, ext4_group_t group)1001 static int reserve_backup_gdb(handle_t *handle, struct inode *inode,
1002 ext4_group_t group)
1003 {
1004 struct super_block *sb = inode->i_sb;
1005 int reserved_gdb =le16_to_cpu(EXT4_SB(sb)->s_es->s_reserved_gdt_blocks);
1006 int cluster_bits = EXT4_SB(sb)->s_cluster_bits;
1007 struct buffer_head **primary;
1008 struct buffer_head *dind;
1009 struct ext4_iloc iloc;
1010 ext4_fsblk_t blk;
1011 __le32 *data, *end;
1012 int gdbackups = 0;
1013 int res, i;
1014 int err;
1015
1016 primary = kmalloc_array(reserved_gdb, sizeof(*primary), GFP_NOFS);
1017 if (!primary)
1018 return -ENOMEM;
1019
1020 data = EXT4_I(inode)->i_data + EXT4_DIND_BLOCK;
1021 dind = ext4_sb_bread(sb, le32_to_cpu(*data), 0);
1022 if (IS_ERR(dind)) {
1023 err = PTR_ERR(dind);
1024 dind = NULL;
1025 goto exit_free;
1026 }
1027
1028 blk = EXT4_SB(sb)->s_sbh->b_blocknr + 1 + EXT4_SB(sb)->s_gdb_count;
1029 data = (__le32 *)dind->b_data + (EXT4_SB(sb)->s_gdb_count %
1030 EXT4_ADDR_PER_BLOCK(sb));
1031 end = (__le32 *)dind->b_data + EXT4_ADDR_PER_BLOCK(sb);
1032
1033 /* Get each reserved primary GDT block and verify it holds backups */
1034 for (res = 0; res < reserved_gdb; res++, blk++) {
1035 if (le32_to_cpu(*data) != blk) {
1036 ext4_warning(sb, "reserved block %llu"
1037 " not at offset %ld",
1038 blk,
1039 (long)(data - (__le32 *)dind->b_data));
1040 err = -EINVAL;
1041 goto exit_bh;
1042 }
1043 primary[res] = ext4_sb_bread(sb, blk, 0);
1044 if (IS_ERR(primary[res])) {
1045 err = PTR_ERR(primary[res]);
1046 primary[res] = NULL;
1047 goto exit_bh;
1048 }
1049 gdbackups = verify_reserved_gdb(sb, group, primary[res]);
1050 if (gdbackups < 0) {
1051 brelse(primary[res]);
1052 err = gdbackups;
1053 goto exit_bh;
1054 }
1055 if (++data >= end)
1056 data = (__le32 *)dind->b_data;
1057 }
1058
1059 for (i = 0; i < reserved_gdb; i++) {
1060 BUFFER_TRACE(primary[i], "get_write_access");
1061 if ((err = ext4_journal_get_write_access(handle, primary[i])))
1062 goto exit_bh;
1063 }
1064
1065 if ((err = ext4_reserve_inode_write(handle, inode, &iloc)))
1066 goto exit_bh;
1067
1068 /*
1069 * Finally we can add each of the reserved backup GDT blocks from
1070 * the new group to its reserved primary GDT block.
1071 */
1072 blk = group * EXT4_BLOCKS_PER_GROUP(sb);
1073 for (i = 0; i < reserved_gdb; i++) {
1074 int err2;
1075 data = (__le32 *)primary[i]->b_data;
1076 /* printk("reserving backup %lu[%u] = %lu\n",
1077 primary[i]->b_blocknr, gdbackups,
1078 blk + primary[i]->b_blocknr); */
1079 data[gdbackups] = cpu_to_le32(blk + primary[i]->b_blocknr);
1080 err2 = ext4_handle_dirty_metadata(handle, NULL, primary[i]);
1081 if (!err)
1082 err = err2;
1083 }
1084
1085 inode->i_blocks += reserved_gdb * sb->s_blocksize >> (9 - cluster_bits);
1086 ext4_mark_iloc_dirty(handle, inode, &iloc);
1087
1088 exit_bh:
1089 while (--res >= 0)
1090 brelse(primary[res]);
1091 brelse(dind);
1092
1093 exit_free:
1094 kfree(primary);
1095
1096 return err;
1097 }
1098
1099 /*
1100 * Update the backup copies of the ext4 metadata. These don't need to be part
1101 * of the main resize transaction, because e2fsck will re-write them if there
1102 * is a problem (basically only OOM will cause a problem). However, we
1103 * _should_ update the backups if possible, in case the primary gets trashed
1104 * for some reason and we need to run e2fsck from a backup superblock. The
1105 * important part is that the new block and inode counts are in the backup
1106 * superblocks, and the location of the new group metadata in the GDT backups.
1107 *
1108 * We do not need take the s_resize_lock for this, because these
1109 * blocks are not otherwise touched by the filesystem code when it is
1110 * mounted. We don't need to worry about last changing from
1111 * sbi->s_groups_count, because the worst that can happen is that we
1112 * do not copy the full number of backups at this time. The resize
1113 * which changed s_groups_count will backup again.
1114 */
update_backups(struct super_block *sb, sector_t blk_off, char *data, int size, int meta_bg)1115 static void update_backups(struct super_block *sb, sector_t blk_off, char *data,
1116 int size, int meta_bg)
1117 {
1118 struct ext4_sb_info *sbi = EXT4_SB(sb);
1119 ext4_group_t last;
1120 const int bpg = EXT4_BLOCKS_PER_GROUP(sb);
1121 unsigned three = 1;
1122 unsigned five = 5;
1123 unsigned seven = 7;
1124 ext4_group_t group = 0;
1125 int rest = sb->s_blocksize - size;
1126 handle_t *handle;
1127 int err = 0, err2;
1128
1129 handle = ext4_journal_start_sb(sb, EXT4_HT_RESIZE, EXT4_MAX_TRANS_DATA);
1130 if (IS_ERR(handle)) {
1131 group = 1;
1132 err = PTR_ERR(handle);
1133 goto exit_err;
1134 }
1135
1136 if (meta_bg == 0) {
1137 group = ext4_list_backups(sb, &three, &five, &seven);
1138 last = sbi->s_groups_count;
1139 } else {
1140 group = ext4_get_group_number(sb, blk_off) + 1;
1141 last = (ext4_group_t)(group + EXT4_DESC_PER_BLOCK(sb) - 2);
1142 }
1143
1144 while (group < sbi->s_groups_count) {
1145 struct buffer_head *bh;
1146 ext4_fsblk_t backup_block;
1147
1148 /* Out of journal space, and can't get more - abort - so sad */
1149 err = ext4_resize_ensure_credits_batch(handle, 1);
1150 if (err < 0)
1151 break;
1152
1153 if (meta_bg == 0)
1154 backup_block = ((ext4_fsblk_t)group) * bpg + blk_off;
1155 else
1156 backup_block = (ext4_group_first_block_no(sb, group) +
1157 ext4_bg_has_super(sb, group));
1158
1159 bh = sb_getblk(sb, backup_block);
1160 if (unlikely(!bh)) {
1161 err = -ENOMEM;
1162 break;
1163 }
1164 ext4_debug("update metadata backup %llu(+%llu)\n",
1165 backup_block, backup_block -
1166 ext4_group_first_block_no(sb, group));
1167 BUFFER_TRACE(bh, "get_write_access");
1168 if ((err = ext4_journal_get_write_access(handle, bh))) {
1169 brelse(bh);
1170 break;
1171 }
1172 lock_buffer(bh);
1173 memcpy(bh->b_data, data, size);
1174 if (rest)
1175 memset(bh->b_data + size, 0, rest);
1176 set_buffer_uptodate(bh);
1177 unlock_buffer(bh);
1178 err = ext4_handle_dirty_metadata(handle, NULL, bh);
1179 if (unlikely(err))
1180 ext4_std_error(sb, err);
1181 brelse(bh);
1182
1183 if (meta_bg == 0)
1184 group = ext4_list_backups(sb, &three, &five, &seven);
1185 else if (group == last)
1186 break;
1187 else
1188 group = last;
1189 }
1190 if ((err2 = ext4_journal_stop(handle)) && !err)
1191 err = err2;
1192
1193 /*
1194 * Ugh! Need to have e2fsck write the backup copies. It is too
1195 * late to revert the resize, we shouldn't fail just because of
1196 * the backup copies (they are only needed in case of corruption).
1197 *
1198 * However, if we got here we have a journal problem too, so we
1199 * can't really start a transaction to mark the superblock.
1200 * Chicken out and just set the flag on the hope it will be written
1201 * to disk, and if not - we will simply wait until next fsck.
1202 */
1203 exit_err:
1204 if (err) {
1205 ext4_warning(sb, "can't update backup for group %u (err %d), "
1206 "forcing fsck on next reboot", group, err);
1207 sbi->s_mount_state &= ~EXT4_VALID_FS;
1208 sbi->s_es->s_state &= cpu_to_le16(~EXT4_VALID_FS);
1209 mark_buffer_dirty(sbi->s_sbh);
1210 }
1211 }
1212
1213 /*
1214 * ext4_add_new_descs() adds @count group descriptor of groups
1215 * starting at @group
1216 *
1217 * @handle: journal handle
1218 * @sb: super block
1219 * @group: the group no. of the first group desc to be added
1220 * @resize_inode: the resize inode
1221 * @count: number of group descriptors to be added
1222 */
ext4_add_new_descs(handle_t *handle, struct super_block *sb, ext4_group_t group, struct inode *resize_inode, ext4_group_t count)1223 static int ext4_add_new_descs(handle_t *handle, struct super_block *sb,
1224 ext4_group_t group, struct inode *resize_inode,
1225 ext4_group_t count)
1226 {
1227 struct ext4_sb_info *sbi = EXT4_SB(sb);
1228 struct ext4_super_block *es = sbi->s_es;
1229 struct buffer_head *gdb_bh;
1230 int i, gdb_off, gdb_num, err = 0;
1231 int meta_bg;
1232
1233 meta_bg = ext4_has_feature_meta_bg(sb);
1234 for (i = 0; i < count; i++, group++) {
1235 int reserved_gdb = ext4_bg_has_super(sb, group) ?
1236 le16_to_cpu(es->s_reserved_gdt_blocks) : 0;
1237
1238 gdb_off = group % EXT4_DESC_PER_BLOCK(sb);
1239 gdb_num = group / EXT4_DESC_PER_BLOCK(sb);
1240
1241 /*
1242 * We will only either add reserved group blocks to a backup group
1243 * or remove reserved blocks for the first group in a new group block.
1244 * Doing both would be mean more complex code, and sane people don't
1245 * use non-sparse filesystems anymore. This is already checked above.
1246 */
1247 if (gdb_off) {
1248 gdb_bh = sbi_array_rcu_deref(sbi, s_group_desc,
1249 gdb_num);
1250 BUFFER_TRACE(gdb_bh, "get_write_access");
1251 err = ext4_journal_get_write_access(handle, gdb_bh);
1252
1253 if (!err && reserved_gdb && ext4_bg_num_gdb(sb, group))
1254 err = reserve_backup_gdb(handle, resize_inode, group);
1255 } else if (meta_bg != 0) {
1256 err = add_new_gdb_meta_bg(sb, handle, group);
1257 } else {
1258 err = add_new_gdb(handle, resize_inode, group);
1259 }
1260 if (err)
1261 break;
1262 }
1263 return err;
1264 }
1265
ext4_get_bitmap(struct super_block *sb, __u64 block)1266 static struct buffer_head *ext4_get_bitmap(struct super_block *sb, __u64 block)
1267 {
1268 struct buffer_head *bh = sb_getblk(sb, block);
1269 if (unlikely(!bh))
1270 return NULL;
1271 if (!bh_uptodate_or_lock(bh)) {
1272 if (ext4_read_bh(bh, 0, NULL) < 0) {
1273 brelse(bh);
1274 return NULL;
1275 }
1276 }
1277
1278 return bh;
1279 }
1280
ext4_set_bitmap_checksums(struct super_block *sb, ext4_group_t group, struct ext4_group_desc *gdp, struct ext4_new_group_data *group_data)1281 static int ext4_set_bitmap_checksums(struct super_block *sb,
1282 ext4_group_t group,
1283 struct ext4_group_desc *gdp,
1284 struct ext4_new_group_data *group_data)
1285 {
1286 struct buffer_head *bh;
1287
1288 if (!ext4_has_metadata_csum(sb))
1289 return 0;
1290
1291 bh = ext4_get_bitmap(sb, group_data->inode_bitmap);
1292 if (!bh)
1293 return -EIO;
1294 ext4_inode_bitmap_csum_set(sb, group, gdp, bh,
1295 EXT4_INODES_PER_GROUP(sb) / 8);
1296 brelse(bh);
1297
1298 bh = ext4_get_bitmap(sb, group_data->block_bitmap);
1299 if (!bh)
1300 return -EIO;
1301 ext4_block_bitmap_csum_set(sb, group, gdp, bh);
1302 brelse(bh);
1303
1304 return 0;
1305 }
1306
1307 /*
1308 * ext4_setup_new_descs() will set up the group descriptor descriptors of a flex bg
1309 */
ext4_setup_new_descs(handle_t *handle, struct super_block *sb, struct ext4_new_flex_group_data *flex_gd)1310 static int ext4_setup_new_descs(handle_t *handle, struct super_block *sb,
1311 struct ext4_new_flex_group_data *flex_gd)
1312 {
1313 struct ext4_new_group_data *group_data = flex_gd->groups;
1314 struct ext4_group_desc *gdp;
1315 struct ext4_sb_info *sbi = EXT4_SB(sb);
1316 struct buffer_head *gdb_bh;
1317 ext4_group_t group;
1318 __u16 *bg_flags = flex_gd->bg_flags;
1319 int i, gdb_off, gdb_num, err = 0;
1320
1321
1322 for (i = 0; i < flex_gd->count; i++, group_data++, bg_flags++) {
1323 group = group_data->group;
1324
1325 gdb_off = group % EXT4_DESC_PER_BLOCK(sb);
1326 gdb_num = group / EXT4_DESC_PER_BLOCK(sb);
1327
1328 /*
1329 * get_write_access() has been called on gdb_bh by ext4_add_new_desc().
1330 */
1331 gdb_bh = sbi_array_rcu_deref(sbi, s_group_desc, gdb_num);
1332 /* Update group descriptor block for new group */
1333 gdp = (struct ext4_group_desc *)(gdb_bh->b_data +
1334 gdb_off * EXT4_DESC_SIZE(sb));
1335
1336 memset(gdp, 0, EXT4_DESC_SIZE(sb));
1337 ext4_block_bitmap_set(sb, gdp, group_data->block_bitmap);
1338 ext4_inode_bitmap_set(sb, gdp, group_data->inode_bitmap);
1339 err = ext4_set_bitmap_checksums(sb, group, gdp, group_data);
1340 if (err) {
1341 ext4_std_error(sb, err);
1342 break;
1343 }
1344
1345 ext4_inode_table_set(sb, gdp, group_data->inode_table);
1346 ext4_free_group_clusters_set(sb, gdp,
1347 group_data->free_clusters_count);
1348 ext4_free_inodes_set(sb, gdp, EXT4_INODES_PER_GROUP(sb));
1349 if (ext4_has_group_desc_csum(sb))
1350 ext4_itable_unused_set(sb, gdp,
1351 EXT4_INODES_PER_GROUP(sb));
1352 gdp->bg_flags = cpu_to_le16(*bg_flags);
1353 ext4_group_desc_csum_set(sb, group, gdp);
1354
1355 err = ext4_handle_dirty_metadata(handle, NULL, gdb_bh);
1356 if (unlikely(err)) {
1357 ext4_std_error(sb, err);
1358 break;
1359 }
1360
1361 /*
1362 * We can allocate memory for mb_alloc based on the new group
1363 * descriptor
1364 */
1365 err = ext4_mb_add_groupinfo(sb, group, gdp);
1366 if (err)
1367 break;
1368 }
1369 return err;
1370 }
1371
1372 /*
1373 * ext4_update_super() updates the super block so that the newly added
1374 * groups can be seen by the filesystem.
1375 *
1376 * @sb: super block
1377 * @flex_gd: new added groups
1378 */
ext4_update_super(struct super_block *sb, struct ext4_new_flex_group_data *flex_gd)1379 static void ext4_update_super(struct super_block *sb,
1380 struct ext4_new_flex_group_data *flex_gd)
1381 {
1382 ext4_fsblk_t blocks_count = 0;
1383 ext4_fsblk_t free_blocks = 0;
1384 ext4_fsblk_t reserved_blocks = 0;
1385 struct ext4_new_group_data *group_data = flex_gd->groups;
1386 struct ext4_sb_info *sbi = EXT4_SB(sb);
1387 struct ext4_super_block *es = sbi->s_es;
1388 int i;
1389
1390 BUG_ON(flex_gd->count == 0 || group_data == NULL);
1391 /*
1392 * Make the new blocks and inodes valid next. We do this before
1393 * increasing the group count so that once the group is enabled,
1394 * all of its blocks and inodes are already valid.
1395 *
1396 * We always allocate group-by-group, then block-by-block or
1397 * inode-by-inode within a group, so enabling these
1398 * blocks/inodes before the group is live won't actually let us
1399 * allocate the new space yet.
1400 */
1401 for (i = 0; i < flex_gd->count; i++) {
1402 blocks_count += group_data[i].blocks_count;
1403 free_blocks += EXT4_C2B(sbi, group_data[i].free_clusters_count);
1404 }
1405
1406 reserved_blocks = ext4_r_blocks_count(es) * 100;
1407 reserved_blocks = div64_u64(reserved_blocks, ext4_blocks_count(es));
1408 reserved_blocks *= blocks_count;
1409 do_div(reserved_blocks, 100);
1410
1411 lock_buffer(sbi->s_sbh);
1412 ext4_blocks_count_set(es, ext4_blocks_count(es) + blocks_count);
1413 ext4_free_blocks_count_set(es, ext4_free_blocks_count(es) + free_blocks);
1414 le32_add_cpu(&es->s_inodes_count, EXT4_INODES_PER_GROUP(sb) *
1415 flex_gd->count);
1416 le32_add_cpu(&es->s_free_inodes_count, EXT4_INODES_PER_GROUP(sb) *
1417 flex_gd->count);
1418
1419 ext4_debug("free blocks count %llu", ext4_free_blocks_count(es));
1420 /*
1421 * We need to protect s_groups_count against other CPUs seeing
1422 * inconsistent state in the superblock.
1423 *
1424 * The precise rules we use are:
1425 *
1426 * * Writers must perform a smp_wmb() after updating all
1427 * dependent data and before modifying the groups count
1428 *
1429 * * Readers must perform an smp_rmb() after reading the groups
1430 * count and before reading any dependent data.
1431 *
1432 * NB. These rules can be relaxed when checking the group count
1433 * while freeing data, as we can only allocate from a block
1434 * group after serialising against the group count, and we can
1435 * only then free after serialising in turn against that
1436 * allocation.
1437 */
1438 smp_wmb();
1439
1440 /* Update the global fs size fields */
1441 sbi->s_groups_count += flex_gd->count;
1442 sbi->s_blockfile_groups = min_t(ext4_group_t, sbi->s_groups_count,
1443 (EXT4_MAX_BLOCK_FILE_PHYS / EXT4_BLOCKS_PER_GROUP(sb)));
1444
1445 /* Update the reserved block counts only once the new group is
1446 * active. */
1447 ext4_r_blocks_count_set(es, ext4_r_blocks_count(es) +
1448 reserved_blocks);
1449 ext4_superblock_csum_set(sb);
1450 unlock_buffer(sbi->s_sbh);
1451
1452 /* Update the free space counts */
1453 percpu_counter_add(&sbi->s_freeclusters_counter,
1454 EXT4_NUM_B2C(sbi, free_blocks));
1455 percpu_counter_add(&sbi->s_freeinodes_counter,
1456 EXT4_INODES_PER_GROUP(sb) * flex_gd->count);
1457
1458 ext4_debug("free blocks count %llu",
1459 percpu_counter_read(&sbi->s_freeclusters_counter));
1460 if (ext4_has_feature_flex_bg(sb) && sbi->s_log_groups_per_flex) {
1461 ext4_group_t flex_group;
1462 struct flex_groups *fg;
1463
1464 flex_group = ext4_flex_group(sbi, group_data[0].group);
1465 fg = sbi_array_rcu_deref(sbi, s_flex_groups, flex_group);
1466 atomic64_add(EXT4_NUM_B2C(sbi, free_blocks),
1467 &fg->free_clusters);
1468 atomic_add(EXT4_INODES_PER_GROUP(sb) * flex_gd->count,
1469 &fg->free_inodes);
1470 }
1471
1472 /*
1473 * Update the fs overhead information
1474 */
1475 ext4_calculate_overhead(sb);
1476 es->s_overhead_clusters = cpu_to_le32(sbi->s_overhead);
1477
1478 if (test_opt(sb, DEBUG))
1479 printk(KERN_DEBUG "EXT4-fs: added group %u:"
1480 "%llu blocks(%llu free %llu reserved)\n", flex_gd->count,
1481 blocks_count, free_blocks, reserved_blocks);
1482 }
1483
1484 /* Add a flex group to an fs. Ensure we handle all possible error conditions
1485 * _before_ we start modifying the filesystem, because we cannot abort the
1486 * transaction and not have it write the data to disk.
1487 */
ext4_flex_group_add(struct super_block *sb, struct inode *resize_inode, struct ext4_new_flex_group_data *flex_gd)1488 static int ext4_flex_group_add(struct super_block *sb,
1489 struct inode *resize_inode,
1490 struct ext4_new_flex_group_data *flex_gd)
1491 {
1492 struct ext4_sb_info *sbi = EXT4_SB(sb);
1493 struct ext4_super_block *es = sbi->s_es;
1494 ext4_fsblk_t o_blocks_count;
1495 ext4_grpblk_t last;
1496 ext4_group_t group;
1497 handle_t *handle;
1498 unsigned reserved_gdb;
1499 int err = 0, err2 = 0, credit;
1500
1501 BUG_ON(!flex_gd->count || !flex_gd->groups || !flex_gd->bg_flags);
1502
1503 reserved_gdb = le16_to_cpu(es->s_reserved_gdt_blocks);
1504 o_blocks_count = ext4_blocks_count(es);
1505 ext4_get_group_no_and_offset(sb, o_blocks_count, &group, &last);
1506 BUG_ON(last);
1507
1508 err = setup_new_flex_group_blocks(sb, flex_gd);
1509 if (err)
1510 goto exit;
1511 /*
1512 * We will always be modifying at least the superblock and GDT
1513 * blocks. If we are adding a group past the last current GDT block,
1514 * we will also modify the inode and the dindirect block. If we
1515 * are adding a group with superblock/GDT backups we will also
1516 * modify each of the reserved GDT dindirect blocks.
1517 */
1518 credit = 3; /* sb, resize inode, resize inode dindirect */
1519 /* GDT blocks */
1520 credit += 1 + DIV_ROUND_UP(flex_gd->count, EXT4_DESC_PER_BLOCK(sb));
1521 credit += reserved_gdb; /* Reserved GDT dindirect blocks */
1522 handle = ext4_journal_start_sb(sb, EXT4_HT_RESIZE, credit);
1523 if (IS_ERR(handle)) {
1524 err = PTR_ERR(handle);
1525 goto exit;
1526 }
1527
1528 BUFFER_TRACE(sbi->s_sbh, "get_write_access");
1529 err = ext4_journal_get_write_access(handle, sbi->s_sbh);
1530 if (err)
1531 goto exit_journal;
1532
1533 group = flex_gd->groups[0].group;
1534 BUG_ON(group != sbi->s_groups_count);
1535 err = ext4_add_new_descs(handle, sb, group,
1536 resize_inode, flex_gd->count);
1537 if (err)
1538 goto exit_journal;
1539
1540 err = ext4_setup_new_descs(handle, sb, flex_gd);
1541 if (err)
1542 goto exit_journal;
1543
1544 ext4_update_super(sb, flex_gd);
1545
1546 err = ext4_handle_dirty_metadata(handle, NULL, sbi->s_sbh);
1547
1548 exit_journal:
1549 err2 = ext4_journal_stop(handle);
1550 if (!err)
1551 err = err2;
1552
1553 if (!err) {
1554 int gdb_num = group / EXT4_DESC_PER_BLOCK(sb);
1555 int gdb_num_end = ((group + flex_gd->count - 1) /
1556 EXT4_DESC_PER_BLOCK(sb));
1557 int meta_bg = ext4_has_feature_meta_bg(sb) &&
1558 gdb_num >= le32_to_cpu(es->s_first_meta_bg);
1559 sector_t padding_blocks = meta_bg ? 0 : sbi->s_sbh->b_blocknr -
1560 ext4_group_first_block_no(sb, 0);
1561 sector_t old_gdb = 0;
1562
1563 update_backups(sb, ext4_group_first_block_no(sb, 0),
1564 (char *)es, sizeof(struct ext4_super_block), 0);
1565 for (; gdb_num <= gdb_num_end; gdb_num++) {
1566 struct buffer_head *gdb_bh;
1567
1568 gdb_bh = sbi_array_rcu_deref(sbi, s_group_desc,
1569 gdb_num);
1570 if (old_gdb == gdb_bh->b_blocknr)
1571 continue;
1572 update_backups(sb, gdb_bh->b_blocknr - padding_blocks,
1573 gdb_bh->b_data, gdb_bh->b_size, meta_bg);
1574 old_gdb = gdb_bh->b_blocknr;
1575 }
1576 }
1577 exit:
1578 return err;
1579 }
1580
ext4_setup_next_flex_gd(struct super_block *sb, struct ext4_new_flex_group_data *flex_gd, ext4_fsblk_t n_blocks_count)1581 static int ext4_setup_next_flex_gd(struct super_block *sb,
1582 struct ext4_new_flex_group_data *flex_gd,
1583 ext4_fsblk_t n_blocks_count)
1584 {
1585 struct ext4_sb_info *sbi = EXT4_SB(sb);
1586 struct ext4_super_block *es = sbi->s_es;
1587 struct ext4_new_group_data *group_data = flex_gd->groups;
1588 ext4_fsblk_t o_blocks_count;
1589 ext4_group_t n_group;
1590 ext4_group_t group;
1591 ext4_group_t last_group;
1592 ext4_grpblk_t last;
1593 ext4_grpblk_t clusters_per_group;
1594 unsigned long i;
1595
1596 clusters_per_group = EXT4_CLUSTERS_PER_GROUP(sb);
1597
1598 o_blocks_count = ext4_blocks_count(es);
1599
1600 if (o_blocks_count == n_blocks_count)
1601 return 0;
1602
1603 ext4_get_group_no_and_offset(sb, o_blocks_count, &group, &last);
1604 BUG_ON(last);
1605 ext4_get_group_no_and_offset(sb, n_blocks_count - 1, &n_group, &last);
1606
1607 last_group = group | (flex_gd->resize_bg - 1);
1608 if (last_group > n_group)
1609 last_group = n_group;
1610
1611 flex_gd->count = last_group - group + 1;
1612
1613 for (i = 0; i < flex_gd->count; i++) {
1614 int overhead;
1615
1616 group_data[i].group = group + i;
1617 group_data[i].blocks_count = EXT4_BLOCKS_PER_GROUP(sb);
1618 overhead = ext4_group_overhead_blocks(sb, group + i);
1619 group_data[i].mdata_blocks = overhead;
1620 group_data[i].free_clusters_count = EXT4_CLUSTERS_PER_GROUP(sb);
1621 if (ext4_has_group_desc_csum(sb)) {
1622 flex_gd->bg_flags[i] = EXT4_BG_BLOCK_UNINIT |
1623 EXT4_BG_INODE_UNINIT;
1624 if (!test_opt(sb, INIT_INODE_TABLE))
1625 flex_gd->bg_flags[i] |= EXT4_BG_INODE_ZEROED;
1626 } else
1627 flex_gd->bg_flags[i] = EXT4_BG_INODE_ZEROED;
1628 }
1629
1630 if (last_group == n_group && ext4_has_group_desc_csum(sb))
1631 /* We need to initialize block bitmap of last group. */
1632 flex_gd->bg_flags[i - 1] &= ~EXT4_BG_BLOCK_UNINIT;
1633
1634 if ((last_group == n_group) && (last != clusters_per_group - 1)) {
1635 group_data[i - 1].blocks_count = EXT4_C2B(sbi, last + 1);
1636 group_data[i - 1].free_clusters_count -= clusters_per_group -
1637 last - 1;
1638 }
1639
1640 return 1;
1641 }
1642
1643 /* Add group descriptor data to an existing or new group descriptor block.
1644 * Ensure we handle all possible error conditions _before_ we start modifying
1645 * the filesystem, because we cannot abort the transaction and not have it
1646 * write the data to disk.
1647 *
1648 * If we are on a GDT block boundary, we need to get the reserved GDT block.
1649 * Otherwise, we may need to add backup GDT blocks for a sparse group.
1650 *
1651 * We only need to hold the superblock lock while we are actually adding
1652 * in the new group's counts to the superblock. Prior to that we have
1653 * not really "added" the group at all. We re-check that we are still
1654 * adding in the last group in case things have changed since verifying.
1655 */
ext4_group_add(struct super_block *sb, struct ext4_new_group_data *input)1656 int ext4_group_add(struct super_block *sb, struct ext4_new_group_data *input)
1657 {
1658 struct ext4_new_flex_group_data flex_gd;
1659 struct ext4_sb_info *sbi = EXT4_SB(sb);
1660 struct ext4_super_block *es = sbi->s_es;
1661 int reserved_gdb = ext4_bg_has_super(sb, input->group) ?
1662 le16_to_cpu(es->s_reserved_gdt_blocks) : 0;
1663 struct inode *inode = NULL;
1664 int gdb_off;
1665 int err;
1666 __u16 bg_flags = 0;
1667
1668 gdb_off = input->group % EXT4_DESC_PER_BLOCK(sb);
1669
1670 if (gdb_off == 0 && !ext4_has_feature_sparse_super(sb)) {
1671 ext4_warning(sb, "Can't resize non-sparse filesystem further");
1672 return -EPERM;
1673 }
1674
1675 if (ext4_blocks_count(es) + input->blocks_count <
1676 ext4_blocks_count(es)) {
1677 ext4_warning(sb, "blocks_count overflow");
1678 return -EINVAL;
1679 }
1680
1681 if (le32_to_cpu(es->s_inodes_count) + EXT4_INODES_PER_GROUP(sb) <
1682 le32_to_cpu(es->s_inodes_count)) {
1683 ext4_warning(sb, "inodes_count overflow");
1684 return -EINVAL;
1685 }
1686
1687 if (reserved_gdb || gdb_off == 0) {
1688 if (!ext4_has_feature_resize_inode(sb) ||
1689 !le16_to_cpu(es->s_reserved_gdt_blocks)) {
1690 ext4_warning(sb,
1691 "No reserved GDT blocks, can't resize");
1692 return -EPERM;
1693 }
1694 inode = ext4_iget(sb, EXT4_RESIZE_INO, EXT4_IGET_SPECIAL);
1695 if (IS_ERR(inode)) {
1696 ext4_warning(sb, "Error opening resize inode");
1697 return PTR_ERR(inode);
1698 }
1699 }
1700
1701
1702 err = verify_group_input(sb, input);
1703 if (err)
1704 goto out;
1705
1706 err = ext4_alloc_flex_bg_array(sb, input->group + 1);
1707 if (err)
1708 goto out;
1709
1710 err = ext4_mb_alloc_groupinfo(sb, input->group + 1);
1711 if (err)
1712 goto out;
1713
1714 flex_gd.count = 1;
1715 flex_gd.groups = input;
1716 flex_gd.bg_flags = &bg_flags;
1717 err = ext4_flex_group_add(sb, inode, &flex_gd);
1718 out:
1719 iput(inode);
1720 return err;
1721 } /* ext4_group_add */
1722
1723 /*
1724 * extend a group without checking assuming that checking has been done.
1725 */
ext4_group_extend_no_check(struct super_block *sb, ext4_fsblk_t o_blocks_count, ext4_grpblk_t add)1726 static int ext4_group_extend_no_check(struct super_block *sb,
1727 ext4_fsblk_t o_blocks_count, ext4_grpblk_t add)
1728 {
1729 struct ext4_super_block *es = EXT4_SB(sb)->s_es;
1730 handle_t *handle;
1731 int err = 0, err2;
1732
1733 /* We will update the superblock, one block bitmap, and
1734 * one group descriptor via ext4_group_add_blocks().
1735 */
1736 handle = ext4_journal_start_sb(sb, EXT4_HT_RESIZE, 3);
1737 if (IS_ERR(handle)) {
1738 err = PTR_ERR(handle);
1739 ext4_warning(sb, "error %d on journal start", err);
1740 return err;
1741 }
1742
1743 BUFFER_TRACE(EXT4_SB(sb)->s_sbh, "get_write_access");
1744 err = ext4_journal_get_write_access(handle, EXT4_SB(sb)->s_sbh);
1745 if (err) {
1746 ext4_warning(sb, "error %d on journal write access", err);
1747 goto errout;
1748 }
1749
1750 lock_buffer(EXT4_SB(sb)->s_sbh);
1751 ext4_blocks_count_set(es, o_blocks_count + add);
1752 ext4_free_blocks_count_set(es, ext4_free_blocks_count(es) + add);
1753 ext4_superblock_csum_set(sb);
1754 unlock_buffer(EXT4_SB(sb)->s_sbh);
1755 ext4_debug("freeing blocks %llu through %llu\n", o_blocks_count,
1756 o_blocks_count + add);
1757 /* We add the blocks to the bitmap and set the group need init bit */
1758 err = ext4_group_add_blocks(handle, sb, o_blocks_count, add);
1759 if (err)
1760 goto errout;
1761 ext4_handle_dirty_metadata(handle, NULL, EXT4_SB(sb)->s_sbh);
1762 ext4_debug("freed blocks %llu through %llu\n", o_blocks_count,
1763 o_blocks_count + add);
1764 errout:
1765 err2 = ext4_journal_stop(handle);
1766 if (err2 && !err)
1767 err = err2;
1768
1769 if (!err) {
1770 if (test_opt(sb, DEBUG))
1771 printk(KERN_DEBUG "EXT4-fs: extended group to %llu "
1772 "blocks\n", ext4_blocks_count(es));
1773 update_backups(sb, ext4_group_first_block_no(sb, 0),
1774 (char *)es, sizeof(struct ext4_super_block), 0);
1775 }
1776 return err;
1777 }
1778
1779 /*
1780 * Extend the filesystem to the new number of blocks specified. This entry
1781 * point is only used to extend the current filesystem to the end of the last
1782 * existing group. It can be accessed via ioctl, or by "remount,resize=<size>"
1783 * for emergencies (because it has no dependencies on reserved blocks).
1784 *
1785 * If we _really_ wanted, we could use default values to call ext4_group_add()
1786 * allow the "remount" trick to work for arbitrary resizing, assuming enough
1787 * GDT blocks are reserved to grow to the desired size.
1788 */
ext4_group_extend(struct super_block *sb, struct ext4_super_block *es, ext4_fsblk_t n_blocks_count)1789 int ext4_group_extend(struct super_block *sb, struct ext4_super_block *es,
1790 ext4_fsblk_t n_blocks_count)
1791 {
1792 ext4_fsblk_t o_blocks_count;
1793 ext4_grpblk_t last;
1794 ext4_grpblk_t add;
1795 struct buffer_head *bh;
1796 int err;
1797 ext4_group_t group;
1798
1799 o_blocks_count = ext4_blocks_count(es);
1800
1801 if (test_opt(sb, DEBUG))
1802 ext4_msg(sb, KERN_DEBUG,
1803 "extending last group from %llu to %llu blocks",
1804 o_blocks_count, n_blocks_count);
1805
1806 if (n_blocks_count == 0 || n_blocks_count == o_blocks_count)
1807 return 0;
1808
1809 if (n_blocks_count > (sector_t)(~0ULL) >> (sb->s_blocksize_bits - 9)) {
1810 ext4_msg(sb, KERN_ERR,
1811 "filesystem too large to resize to %llu blocks safely",
1812 n_blocks_count);
1813 return -EINVAL;
1814 }
1815
1816 if (n_blocks_count < o_blocks_count) {
1817 ext4_warning(sb, "can't shrink FS - resize aborted");
1818 return -EINVAL;
1819 }
1820
1821 /* Handle the remaining blocks in the last group only. */
1822 ext4_get_group_no_and_offset(sb, o_blocks_count, &group, &last);
1823
1824 if (last == 0) {
1825 ext4_warning(sb, "need to use ext2online to resize further");
1826 return -EPERM;
1827 }
1828
1829 add = EXT4_BLOCKS_PER_GROUP(sb) - last;
1830
1831 if (o_blocks_count + add < o_blocks_count) {
1832 ext4_warning(sb, "blocks_count overflow");
1833 return -EINVAL;
1834 }
1835
1836 if (o_blocks_count + add > n_blocks_count)
1837 add = n_blocks_count - o_blocks_count;
1838
1839 if (o_blocks_count + add < n_blocks_count)
1840 ext4_warning(sb, "will only finish group (%llu blocks, %u new)",
1841 o_blocks_count + add, add);
1842
1843 /* See if the device is actually as big as what was requested */
1844 bh = ext4_sb_bread(sb, o_blocks_count + add - 1, 0);
1845 if (IS_ERR(bh)) {
1846 ext4_warning(sb, "can't read last block, resize aborted");
1847 return -ENOSPC;
1848 }
1849 brelse(bh);
1850
1851 err = ext4_group_extend_no_check(sb, o_blocks_count, add);
1852 return err;
1853 } /* ext4_group_extend */
1854
1855
num_desc_blocks(struct super_block *sb, ext4_group_t groups)1856 static int num_desc_blocks(struct super_block *sb, ext4_group_t groups)
1857 {
1858 return (groups + EXT4_DESC_PER_BLOCK(sb) - 1) / EXT4_DESC_PER_BLOCK(sb);
1859 }
1860
1861 /*
1862 * Release the resize inode and drop the resize_inode feature if there
1863 * are no more reserved gdt blocks, and then convert the file system
1864 * to enable meta_bg
1865 */
ext4_convert_meta_bg(struct super_block *sb, struct inode *inode)1866 static int ext4_convert_meta_bg(struct super_block *sb, struct inode *inode)
1867 {
1868 handle_t *handle;
1869 struct ext4_sb_info *sbi = EXT4_SB(sb);
1870 struct ext4_super_block *es = sbi->s_es;
1871 struct ext4_inode_info *ei = EXT4_I(inode);
1872 ext4_fsblk_t nr;
1873 int i, ret, err = 0;
1874 int credits = 1;
1875
1876 ext4_msg(sb, KERN_INFO, "Converting file system to meta_bg");
1877 if (inode) {
1878 if (es->s_reserved_gdt_blocks) {
1879 ext4_error(sb, "Unexpected non-zero "
1880 "s_reserved_gdt_blocks");
1881 return -EPERM;
1882 }
1883
1884 /* Do a quick sanity check of the resize inode */
1885 if (inode->i_blocks != 1 << (inode->i_blkbits -
1886 (9 - sbi->s_cluster_bits)))
1887 goto invalid_resize_inode;
1888 for (i = 0; i < EXT4_N_BLOCKS; i++) {
1889 if (i == EXT4_DIND_BLOCK) {
1890 if (ei->i_data[i])
1891 continue;
1892 else
1893 goto invalid_resize_inode;
1894 }
1895 if (ei->i_data[i])
1896 goto invalid_resize_inode;
1897 }
1898 credits += 3; /* block bitmap, bg descriptor, resize inode */
1899 }
1900
1901 handle = ext4_journal_start_sb(sb, EXT4_HT_RESIZE, credits);
1902 if (IS_ERR(handle))
1903 return PTR_ERR(handle);
1904
1905 BUFFER_TRACE(sbi->s_sbh, "get_write_access");
1906 err = ext4_journal_get_write_access(handle, sbi->s_sbh);
1907 if (err)
1908 goto errout;
1909
1910 lock_buffer(sbi->s_sbh);
1911 ext4_clear_feature_resize_inode(sb);
1912 ext4_set_feature_meta_bg(sb);
1913 sbi->s_es->s_first_meta_bg =
1914 cpu_to_le32(num_desc_blocks(sb, sbi->s_groups_count));
1915 ext4_superblock_csum_set(sb);
1916 unlock_buffer(sbi->s_sbh);
1917
1918 err = ext4_handle_dirty_metadata(handle, NULL, sbi->s_sbh);
1919 if (err) {
1920 ext4_std_error(sb, err);
1921 goto errout;
1922 }
1923
1924 if (inode) {
1925 nr = le32_to_cpu(ei->i_data[EXT4_DIND_BLOCK]);
1926 ext4_free_blocks(handle, inode, NULL, nr, 1,
1927 EXT4_FREE_BLOCKS_METADATA |
1928 EXT4_FREE_BLOCKS_FORGET);
1929 ei->i_data[EXT4_DIND_BLOCK] = 0;
1930 inode->i_blocks = 0;
1931
1932 err = ext4_mark_inode_dirty(handle, inode);
1933 if (err)
1934 ext4_std_error(sb, err);
1935 }
1936
1937 errout:
1938 ret = ext4_journal_stop(handle);
1939 return err ? err : ret;
1940
1941 invalid_resize_inode:
1942 ext4_error(sb, "corrupted/inconsistent resize inode");
1943 return -EINVAL;
1944 }
1945
1946 /*
1947 * ext4_resize_fs() resizes a fs to new size specified by @n_blocks_count
1948 *
1949 * @sb: super block of the fs to be resized
1950 * @n_blocks_count: the number of blocks resides in the resized fs
1951 */
ext4_resize_fs(struct super_block *sb, ext4_fsblk_t n_blocks_count)1952 int ext4_resize_fs(struct super_block *sb, ext4_fsblk_t n_blocks_count)
1953 {
1954 struct ext4_new_flex_group_data *flex_gd = NULL;
1955 struct ext4_sb_info *sbi = EXT4_SB(sb);
1956 struct ext4_super_block *es = sbi->s_es;
1957 struct buffer_head *bh;
1958 struct inode *resize_inode = NULL;
1959 ext4_grpblk_t add, offset;
1960 unsigned long n_desc_blocks;
1961 unsigned long o_desc_blocks;
1962 ext4_group_t o_group;
1963 ext4_group_t n_group;
1964 ext4_fsblk_t o_blocks_count;
1965 ext4_fsblk_t n_blocks_count_retry = 0;
1966 unsigned long last_update_time = 0;
1967 int err = 0;
1968 int meta_bg;
1969 unsigned int flexbg_size = ext4_flex_bg_size(sbi);
1970
1971 /* See if the device is actually as big as what was requested */
1972 bh = ext4_sb_bread(sb, n_blocks_count - 1, 0);
1973 if (IS_ERR(bh)) {
1974 ext4_warning(sb, "can't read last block, resize aborted");
1975 return -ENOSPC;
1976 }
1977 brelse(bh);
1978
1979 /*
1980 * For bigalloc, trim the requested size to the nearest cluster
1981 * boundary to avoid creating an unusable filesystem. We do this
1982 * silently, instead of returning an error, to avoid breaking
1983 * callers that blindly resize the filesystem to the full size of
1984 * the underlying block device.
1985 */
1986 if (ext4_has_feature_bigalloc(sb))
1987 n_blocks_count &= ~((1 << EXT4_CLUSTER_BITS(sb)) - 1);
1988
1989 retry:
1990 o_blocks_count = ext4_blocks_count(es);
1991
1992 ext4_msg(sb, KERN_INFO, "resizing filesystem from %llu "
1993 "to %llu blocks", o_blocks_count, n_blocks_count);
1994
1995 if (n_blocks_count < o_blocks_count) {
1996 /* On-line shrinking not supported */
1997 ext4_warning(sb, "can't shrink FS - resize aborted");
1998 return -EINVAL;
1999 }
2000
2001 if (n_blocks_count == o_blocks_count)
2002 /* Nothing need to do */
2003 return 0;
2004
2005 n_group = ext4_get_group_number(sb, n_blocks_count - 1);
2006 if (n_group >= (0xFFFFFFFFUL / EXT4_INODES_PER_GROUP(sb))) {
2007 ext4_warning(sb, "resize would cause inodes_count overflow");
2008 return -EINVAL;
2009 }
2010 ext4_get_group_no_and_offset(sb, o_blocks_count - 1, &o_group, &offset);
2011
2012 n_desc_blocks = num_desc_blocks(sb, n_group + 1);
2013 o_desc_blocks = num_desc_blocks(sb, sbi->s_groups_count);
2014
2015 meta_bg = ext4_has_feature_meta_bg(sb);
2016
2017 if (ext4_has_feature_resize_inode(sb)) {
2018 if (meta_bg) {
2019 ext4_error(sb, "resize_inode and meta_bg enabled "
2020 "simultaneously");
2021 return -EINVAL;
2022 }
2023 if (n_desc_blocks > o_desc_blocks +
2024 le16_to_cpu(es->s_reserved_gdt_blocks)) {
2025 n_blocks_count_retry = n_blocks_count;
2026 n_desc_blocks = o_desc_blocks +
2027 le16_to_cpu(es->s_reserved_gdt_blocks);
2028 n_group = n_desc_blocks * EXT4_DESC_PER_BLOCK(sb);
2029 n_blocks_count = (ext4_fsblk_t)n_group *
2030 EXT4_BLOCKS_PER_GROUP(sb) +
2031 le32_to_cpu(es->s_first_data_block);
2032 n_group--; /* set to last group number */
2033 }
2034
2035 if (!resize_inode)
2036 resize_inode = ext4_iget(sb, EXT4_RESIZE_INO,
2037 EXT4_IGET_SPECIAL);
2038 if (IS_ERR(resize_inode)) {
2039 ext4_warning(sb, "Error opening resize inode");
2040 return PTR_ERR(resize_inode);
2041 }
2042 }
2043
2044 if ((!resize_inode && !meta_bg) || n_blocks_count == o_blocks_count) {
2045 err = ext4_convert_meta_bg(sb, resize_inode);
2046 if (err)
2047 goto out;
2048 if (resize_inode) {
2049 iput(resize_inode);
2050 resize_inode = NULL;
2051 }
2052 if (n_blocks_count_retry) {
2053 n_blocks_count = n_blocks_count_retry;
2054 n_blocks_count_retry = 0;
2055 goto retry;
2056 }
2057 }
2058
2059 /*
2060 * Make sure the last group has enough space so that it's
2061 * guaranteed to have enough space for all metadata blocks
2062 * that it might need to hold. (We might not need to store
2063 * the inode table blocks in the last block group, but there
2064 * will be cases where this might be needed.)
2065 */
2066 if ((ext4_group_first_block_no(sb, n_group) +
2067 ext4_group_overhead_blocks(sb, n_group) + 2 +
2068 sbi->s_itb_per_group + sbi->s_cluster_ratio) >= n_blocks_count) {
2069 n_blocks_count = ext4_group_first_block_no(sb, n_group);
2070 n_group--;
2071 n_blocks_count_retry = 0;
2072 if (resize_inode) {
2073 iput(resize_inode);
2074 resize_inode = NULL;
2075 }
2076 goto retry;
2077 }
2078
2079 /* extend the last group */
2080 if (n_group == o_group)
2081 add = n_blocks_count - o_blocks_count;
2082 else
2083 add = EXT4_C2B(sbi, EXT4_CLUSTERS_PER_GROUP(sb) - (offset + 1));
2084 if (add > 0) {
2085 err = ext4_group_extend_no_check(sb, o_blocks_count, add);
2086 if (err)
2087 goto out;
2088 }
2089
2090 if (ext4_blocks_count(es) == n_blocks_count && n_blocks_count_retry == 0)
2091 goto out;
2092
2093 err = ext4_alloc_flex_bg_array(sb, n_group + 1);
2094 if (err)
2095 goto out;
2096
2097 err = ext4_mb_alloc_groupinfo(sb, n_group + 1);
2098 if (err)
2099 goto out;
2100
2101 flex_gd = alloc_flex_gd(flexbg_size);
2102 if (flex_gd == NULL) {
2103 err = -ENOMEM;
2104 goto out;
2105 }
2106
2107 /* Add flex groups. Note that a regular group is a
2108 * flex group with 1 group.
2109 */
2110 while (ext4_setup_next_flex_gd(sb, flex_gd, n_blocks_count)) {
2111 if (jiffies - last_update_time > HZ * 10) {
2112 if (last_update_time)
2113 ext4_msg(sb, KERN_INFO,
2114 "resized to %llu blocks",
2115 ext4_blocks_count(es));
2116 last_update_time = jiffies;
2117 }
2118 if (ext4_alloc_group_tables(sb, flex_gd, flexbg_size) != 0)
2119 break;
2120 err = ext4_flex_group_add(sb, resize_inode, flex_gd);
2121 if (unlikely(err))
2122 break;
2123 }
2124
2125 if (!err && n_blocks_count_retry) {
2126 n_blocks_count = n_blocks_count_retry;
2127 n_blocks_count_retry = 0;
2128 free_flex_gd(flex_gd);
2129 flex_gd = NULL;
2130 if (resize_inode) {
2131 iput(resize_inode);
2132 resize_inode = NULL;
2133 }
2134 goto retry;
2135 }
2136
2137 out:
2138 if (flex_gd)
2139 free_flex_gd(flex_gd);
2140 if (resize_inode != NULL)
2141 iput(resize_inode);
2142 if (err)
2143 ext4_warning(sb, "error (%d) occurred during "
2144 "file system resize", err);
2145 ext4_msg(sb, KERN_INFO, "resized filesystem to %llu",
2146 ext4_blocks_count(es));
2147 return err;
2148 }
2149