1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2003-2006, Cluster File Systems, Inc, info@clusterfs.com
4  * Written by Alex Tomas <alex@clusterfs.com>
5  */
6 
7 
8 /*
9  * mballoc.c contains the multiblocks allocation routines
10  */
11 
12 #include "ext4_jbd2.h"
13 #include "mballoc.h"
14 #include <linux/log2.h>
15 #include <linux/module.h>
16 #include <linux/slab.h>
17 #include <linux/nospec.h>
18 #include <linux/backing-dev.h>
19 #include <linux/freezer.h>
20 #include <trace/events/ext4.h>
21 
22 /*
23  * MUSTDO:
24  *   - test ext4_ext_search_left() and ext4_ext_search_right()
25  *   - search for metadata in few groups
26  *
27  * TODO v4:
28  *   - normalization should take into account whether file is still open
29  *   - discard preallocations if no free space left (policy?)
30  *   - don't normalize tails
31  *   - quota
32  *   - reservation for superuser
33  *
34  * TODO v3:
35  *   - bitmap read-ahead (proposed by Oleg Drokin aka green)
36  *   - track min/max extents in each group for better group selection
37  *   - mb_mark_used() may allocate chunk right after splitting buddy
38  *   - tree of groups sorted by number of free blocks
39  *   - error handling
40  */
41 
42 /*
43  * The allocation request involve request for multiple number of blocks
44  * near to the goal(block) value specified.
45  *
46  * During initialization phase of the allocator we decide to use the
47  * group preallocation or inode preallocation depending on the size of
48  * the file. The size of the file could be the resulting file size we
49  * would have after allocation, or the current file size, which ever
50  * is larger. If the size is less than sbi->s_mb_stream_request we
51  * select to use the group preallocation. The default value of
52  * s_mb_stream_request is 16 blocks. This can also be tuned via
53  * /sys/fs/ext4/<partition>/mb_stream_req. The value is represented in
54  * terms of number of blocks.
55  *
56  * The main motivation for having small file use group preallocation is to
57  * ensure that we have small files closer together on the disk.
58  *
59  * First stage the allocator looks at the inode prealloc list,
60  * ext4_inode_info->i_prealloc_list, which contains list of prealloc
61  * spaces for this particular inode. The inode prealloc space is
62  * represented as:
63  *
64  * pa_lstart -> the logical start block for this prealloc space
65  * pa_pstart -> the physical start block for this prealloc space
66  * pa_len    -> length for this prealloc space (in clusters)
67  * pa_free   ->  free space available in this prealloc space (in clusters)
68  *
69  * The inode preallocation space is used looking at the _logical_ start
70  * block. If only the logical file block falls within the range of prealloc
71  * space we will consume the particular prealloc space. This makes sure that
72  * we have contiguous physical blocks representing the file blocks
73  *
74  * The important thing to be noted in case of inode prealloc space is that
75  * we don't modify the values associated to inode prealloc space except
76  * pa_free.
77  *
78  * If we are not able to find blocks in the inode prealloc space and if we
79  * have the group allocation flag set then we look at the locality group
80  * prealloc space. These are per CPU prealloc list represented as
81  *
82  * ext4_sb_info.s_locality_groups[smp_processor_id()]
83  *
84  * The reason for having a per cpu locality group is to reduce the contention
85  * between CPUs. It is possible to get scheduled at this point.
86  *
87  * The locality group prealloc space is used looking at whether we have
88  * enough free space (pa_free) within the prealloc space.
89  *
90  * If we can't allocate blocks via inode prealloc or/and locality group
91  * prealloc then we look at the buddy cache. The buddy cache is represented
92  * by ext4_sb_info.s_buddy_cache (struct inode) whose file offset gets
93  * mapped to the buddy and bitmap information regarding different
94  * groups. The buddy information is attached to buddy cache inode so that
95  * we can access them through the page cache. The information regarding
96  * each group is loaded via ext4_mb_load_buddy.  The information involve
97  * block bitmap and buddy information. The information are stored in the
98  * inode as:
99  *
100  *  {                        page                        }
101  *  [ group 0 bitmap][ group 0 buddy] [group 1][ group 1]...
102  *
103  *
104  * one block each for bitmap and buddy information.  So for each group we
105  * take up 2 blocks. A page can contain blocks_per_page (PAGE_SIZE /
106  * blocksize) blocks.  So it can have information regarding groups_per_page
107  * which is blocks_per_page/2
108  *
109  * The buddy cache inode is not stored on disk. The inode is thrown
110  * away when the filesystem is unmounted.
111  *
112  * We look for count number of blocks in the buddy cache. If we were able
113  * to locate that many free blocks we return with additional information
114  * regarding rest of the contiguous physical block available
115  *
116  * Before allocating blocks via buddy cache we normalize the request
117  * blocks. This ensure we ask for more blocks that we needed. The extra
118  * blocks that we get after allocation is added to the respective prealloc
119  * list. In case of inode preallocation we follow a list of heuristics
120  * based on file size. This can be found in ext4_mb_normalize_request. If
121  * we are doing a group prealloc we try to normalize the request to
122  * sbi->s_mb_group_prealloc.  The default value of s_mb_group_prealloc is
123  * dependent on the cluster size; for non-bigalloc file systems, it is
124  * 512 blocks. This can be tuned via
125  * /sys/fs/ext4/<partition>/mb_group_prealloc. The value is represented in
126  * terms of number of blocks. If we have mounted the file system with -O
127  * stripe=<value> option the group prealloc request is normalized to the
128  * smallest multiple of the stripe value (sbi->s_stripe) which is
129  * greater than the default mb_group_prealloc.
130  *
131  * The regular allocator (using the buddy cache) supports a few tunables.
132  *
133  * /sys/fs/ext4/<partition>/mb_min_to_scan
134  * /sys/fs/ext4/<partition>/mb_max_to_scan
135  * /sys/fs/ext4/<partition>/mb_order2_req
136  *
137  * The regular allocator uses buddy scan only if the request len is power of
138  * 2 blocks and the order of allocation is >= sbi->s_mb_order2_reqs. The
139  * value of s_mb_order2_reqs can be tuned via
140  * /sys/fs/ext4/<partition>/mb_order2_req.  If the request len is equal to
141  * stripe size (sbi->s_stripe), we try to search for contiguous block in
142  * stripe size. This should result in better allocation on RAID setups. If
143  * not, we search in the specific group using bitmap for best extents. The
144  * tunable min_to_scan and max_to_scan control the behaviour here.
145  * min_to_scan indicate how long the mballoc __must__ look for a best
146  * extent and max_to_scan indicates how long the mballoc __can__ look for a
147  * best extent in the found extents. Searching for the blocks starts with
148  * the group specified as the goal value in allocation context via
149  * ac_g_ex. Each group is first checked based on the criteria whether it
150  * can be used for allocation. ext4_mb_good_group explains how the groups are
151  * checked.
152  *
153  * Both the prealloc space are getting populated as above. So for the first
154  * request we will hit the buddy cache which will result in this prealloc
155  * space getting filled. The prealloc space is then later used for the
156  * subsequent request.
157  */
158 
159 /*
160  * mballoc operates on the following data:
161  *  - on-disk bitmap
162  *  - in-core buddy (actually includes buddy and bitmap)
163  *  - preallocation descriptors (PAs)
164  *
165  * there are two types of preallocations:
166  *  - inode
167  *    assiged to specific inode and can be used for this inode only.
168  *    it describes part of inode's space preallocated to specific
169  *    physical blocks. any block from that preallocated can be used
170  *    independent. the descriptor just tracks number of blocks left
171  *    unused. so, before taking some block from descriptor, one must
172  *    make sure corresponded logical block isn't allocated yet. this
173  *    also means that freeing any block within descriptor's range
174  *    must discard all preallocated blocks.
175  *  - locality group
176  *    assigned to specific locality group which does not translate to
177  *    permanent set of inodes: inode can join and leave group. space
178  *    from this type of preallocation can be used for any inode. thus
179  *    it's consumed from the beginning to the end.
180  *
181  * relation between them can be expressed as:
182  *    in-core buddy = on-disk bitmap + preallocation descriptors
183  *
184  * this mean blocks mballoc considers used are:
185  *  - allocated blocks (persistent)
186  *  - preallocated blocks (non-persistent)
187  *
188  * consistency in mballoc world means that at any time a block is either
189  * free or used in ALL structures. notice: "any time" should not be read
190  * literally -- time is discrete and delimited by locks.
191  *
192  *  to keep it simple, we don't use block numbers, instead we count number of
193  *  blocks: how many blocks marked used/free in on-disk bitmap, buddy and PA.
194  *
195  * all operations can be expressed as:
196  *  - init buddy:			buddy = on-disk + PAs
197  *  - new PA:				buddy += N; PA = N
198  *  - use inode PA:			on-disk += N; PA -= N
199  *  - discard inode PA			buddy -= on-disk - PA; PA = 0
200  *  - use locality group PA		on-disk += N; PA -= N
201  *  - discard locality group PA		buddy -= PA; PA = 0
202  *  note: 'buddy -= on-disk - PA' is used to show that on-disk bitmap
203  *        is used in real operation because we can't know actual used
204  *        bits from PA, only from on-disk bitmap
205  *
206  * if we follow this strict logic, then all operations above should be atomic.
207  * given some of them can block, we'd have to use something like semaphores
208  * killing performance on high-end SMP hardware. let's try to relax it using
209  * the following knowledge:
210  *  1) if buddy is referenced, it's already initialized
211  *  2) while block is used in buddy and the buddy is referenced,
212  *     nobody can re-allocate that block
213  *  3) we work on bitmaps and '+' actually means 'set bits'. if on-disk has
214  *     bit set and PA claims same block, it's OK. IOW, one can set bit in
215  *     on-disk bitmap if buddy has same bit set or/and PA covers corresponded
216  *     block
217  *
218  * so, now we're building a concurrency table:
219  *  - init buddy vs.
220  *    - new PA
221  *      blocks for PA are allocated in the buddy, buddy must be referenced
222  *      until PA is linked to allocation group to avoid concurrent buddy init
223  *    - use inode PA
224  *      we need to make sure that either on-disk bitmap or PA has uptodate data
225  *      given (3) we care that PA-=N operation doesn't interfere with init
226  *    - discard inode PA
227  *      the simplest way would be to have buddy initialized by the discard
228  *    - use locality group PA
229  *      again PA-=N must be serialized with init
230  *    - discard locality group PA
231  *      the simplest way would be to have buddy initialized by the discard
232  *  - new PA vs.
233  *    - use inode PA
234  *      i_data_sem serializes them
235  *    - discard inode PA
236  *      discard process must wait until PA isn't used by another process
237  *    - use locality group PA
238  *      some mutex should serialize them
239  *    - discard locality group PA
240  *      discard process must wait until PA isn't used by another process
241  *  - use inode PA
242  *    - use inode PA
243  *      i_data_sem or another mutex should serializes them
244  *    - discard inode PA
245  *      discard process must wait until PA isn't used by another process
246  *    - use locality group PA
247  *      nothing wrong here -- they're different PAs covering different blocks
248  *    - discard locality group PA
249  *      discard process must wait until PA isn't used by another process
250  *
251  * now we're ready to make few consequences:
252  *  - PA is referenced and while it is no discard is possible
253  *  - PA is referenced until block isn't marked in on-disk bitmap
254  *  - PA changes only after on-disk bitmap
255  *  - discard must not compete with init. either init is done before
256  *    any discard or they're serialized somehow
257  *  - buddy init as sum of on-disk bitmap and PAs is done atomically
258  *
259  * a special case when we've used PA to emptiness. no need to modify buddy
260  * in this case, but we should care about concurrent init
261  *
262  */
263 
264  /*
265  * Logic in few words:
266  *
267  *  - allocation:
268  *    load group
269  *    find blocks
270  *    mark bits in on-disk bitmap
271  *    release group
272  *
273  *  - use preallocation:
274  *    find proper PA (per-inode or group)
275  *    load group
276  *    mark bits in on-disk bitmap
277  *    release group
278  *    release PA
279  *
280  *  - free:
281  *    load group
282  *    mark bits in on-disk bitmap
283  *    release group
284  *
285  *  - discard preallocations in group:
286  *    mark PAs deleted
287  *    move them onto local list
288  *    load on-disk bitmap
289  *    load group
290  *    remove PA from object (inode or locality group)
291  *    mark free blocks in-core
292  *
293  *  - discard inode's preallocations:
294  */
295 
296 /*
297  * Locking rules
298  *
299  * Locks:
300  *  - bitlock on a group	(group)
301  *  - object (inode/locality)	(object)
302  *  - per-pa lock		(pa)
303  *
304  * Paths:
305  *  - new pa
306  *    object
307  *    group
308  *
309  *  - find and use pa:
310  *    pa
311  *
312  *  - release consumed pa:
313  *    pa
314  *    group
315  *    object
316  *
317  *  - generate in-core bitmap:
318  *    group
319  *        pa
320  *
321  *  - discard all for given object (inode, locality group):
322  *    object
323  *        pa
324  *    group
325  *
326  *  - discard all for given group:
327  *    group
328  *        pa
329  *    group
330  *        object
331  *
332  */
333 static struct kmem_cache *ext4_pspace_cachep;
334 static struct kmem_cache *ext4_ac_cachep;
335 static struct kmem_cache *ext4_free_data_cachep;
336 
337 /* We create slab caches for groupinfo data structures based on the
338  * superblock block size.  There will be one per mounted filesystem for
339  * each unique s_blocksize_bits */
340 #define NR_GRPINFO_CACHES 8
341 static struct kmem_cache *ext4_groupinfo_caches[NR_GRPINFO_CACHES];
342 
343 static const char * const ext4_groupinfo_slab_names[NR_GRPINFO_CACHES] = {
344 	"ext4_groupinfo_1k", "ext4_groupinfo_2k", "ext4_groupinfo_4k",
345 	"ext4_groupinfo_8k", "ext4_groupinfo_16k", "ext4_groupinfo_32k",
346 	"ext4_groupinfo_64k", "ext4_groupinfo_128k"
347 };
348 
349 static void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap,
350 					ext4_group_t group);
351 static void ext4_mb_generate_from_freelist(struct super_block *sb, void *bitmap,
352 						ext4_group_t group);
353 static void ext4_mb_new_preallocation(struct ext4_allocation_context *ac);
354 
355 /*
356  * The algorithm using this percpu seq counter goes below:
357  * 1. We sample the percpu discard_pa_seq counter before trying for block
358  *    allocation in ext4_mb_new_blocks().
359  * 2. We increment this percpu discard_pa_seq counter when we either allocate
360  *    or free these blocks i.e. while marking those blocks as used/free in
361  *    mb_mark_used()/mb_free_blocks().
362  * 3. We also increment this percpu seq counter when we successfully identify
363  *    that the bb_prealloc_list is not empty and hence proceed for discarding
364  *    of those PAs inside ext4_mb_discard_group_preallocations().
365  *
366  * Now to make sure that the regular fast path of block allocation is not
367  * affected, as a small optimization we only sample the percpu seq counter
368  * on that cpu. Only when the block allocation fails and when freed blocks
369  * found were 0, that is when we sample percpu seq counter for all cpus using
370  * below function ext4_get_discard_pa_seq_sum(). This happens after making
371  * sure that all the PAs on grp->bb_prealloc_list got freed or if it's empty.
372  */
373 static DEFINE_PER_CPU(u64, discard_pa_seq);
ext4_get_discard_pa_seq_sum(void)374 static inline u64 ext4_get_discard_pa_seq_sum(void)
375 {
376 	int __cpu;
377 	u64 __seq = 0;
378 
379 	for_each_possible_cpu(__cpu)
380 		__seq += per_cpu(discard_pa_seq, __cpu);
381 	return __seq;
382 }
383 
mb_correct_addr_and_bit(int *bit, void *addr)384 static inline void *mb_correct_addr_and_bit(int *bit, void *addr)
385 {
386 #if BITS_PER_LONG == 64
387 	*bit += ((unsigned long) addr & 7UL) << 3;
388 	addr = (void *) ((unsigned long) addr & ~7UL);
389 #elif BITS_PER_LONG == 32
390 	*bit += ((unsigned long) addr & 3UL) << 3;
391 	addr = (void *) ((unsigned long) addr & ~3UL);
392 #else
393 #error "how many bits you are?!"
394 #endif
395 	return addr;
396 }
397 
mb_test_bit(int bit, void *addr)398 static inline int mb_test_bit(int bit, void *addr)
399 {
400 	/*
401 	 * ext4_test_bit on architecture like powerpc
402 	 * needs unsigned long aligned address
403 	 */
404 	addr = mb_correct_addr_and_bit(&bit, addr);
405 	return ext4_test_bit(bit, addr);
406 }
407 
mb_set_bit(int bit, void *addr)408 static inline void mb_set_bit(int bit, void *addr)
409 {
410 	addr = mb_correct_addr_and_bit(&bit, addr);
411 	ext4_set_bit(bit, addr);
412 }
413 
mb_clear_bit(int bit, void *addr)414 static inline void mb_clear_bit(int bit, void *addr)
415 {
416 	addr = mb_correct_addr_and_bit(&bit, addr);
417 	ext4_clear_bit(bit, addr);
418 }
419 
mb_test_and_clear_bit(int bit, void *addr)420 static inline int mb_test_and_clear_bit(int bit, void *addr)
421 {
422 	addr = mb_correct_addr_and_bit(&bit, addr);
423 	return ext4_test_and_clear_bit(bit, addr);
424 }
425 
mb_find_next_zero_bit(void *addr, int max, int start)426 static inline int mb_find_next_zero_bit(void *addr, int max, int start)
427 {
428 	int fix = 0, ret, tmpmax;
429 	addr = mb_correct_addr_and_bit(&fix, addr);
430 	tmpmax = max + fix;
431 	start += fix;
432 
433 	ret = ext4_find_next_zero_bit(addr, tmpmax, start) - fix;
434 	if (ret > max)
435 		return max;
436 	return ret;
437 }
438 
mb_find_next_bit(void *addr, int max, int start)439 static inline int mb_find_next_bit(void *addr, int max, int start)
440 {
441 	int fix = 0, ret, tmpmax;
442 	addr = mb_correct_addr_and_bit(&fix, addr);
443 	tmpmax = max + fix;
444 	start += fix;
445 
446 	ret = ext4_find_next_bit(addr, tmpmax, start) - fix;
447 	if (ret > max)
448 		return max;
449 	return ret;
450 }
451 
mb_find_buddy(struct ext4_buddy *e4b, int order, int *max)452 static void *mb_find_buddy(struct ext4_buddy *e4b, int order, int *max)
453 {
454 	char *bb;
455 
456 	BUG_ON(e4b->bd_bitmap == e4b->bd_buddy);
457 	BUG_ON(max == NULL);
458 
459 	if (order > e4b->bd_blkbits + 1) {
460 		*max = 0;
461 		return NULL;
462 	}
463 
464 	/* at order 0 we see each particular block */
465 	if (order == 0) {
466 		*max = 1 << (e4b->bd_blkbits + 3);
467 		return e4b->bd_bitmap;
468 	}
469 
470 	bb = e4b->bd_buddy + EXT4_SB(e4b->bd_sb)->s_mb_offsets[order];
471 	*max = EXT4_SB(e4b->bd_sb)->s_mb_maxs[order];
472 
473 	return bb;
474 }
475 
476 #ifdef DOUBLE_CHECK
mb_free_blocks_double(struct inode *inode, struct ext4_buddy *e4b, int first, int count)477 static void mb_free_blocks_double(struct inode *inode, struct ext4_buddy *e4b,
478 			   int first, int count)
479 {
480 	int i;
481 	struct super_block *sb = e4b->bd_sb;
482 
483 	if (unlikely(e4b->bd_info->bb_bitmap == NULL))
484 		return;
485 	assert_spin_locked(ext4_group_lock_ptr(sb, e4b->bd_group));
486 	for (i = 0; i < count; i++) {
487 		if (!mb_test_bit(first + i, e4b->bd_info->bb_bitmap)) {
488 			ext4_fsblk_t blocknr;
489 
490 			blocknr = ext4_group_first_block_no(sb, e4b->bd_group);
491 			blocknr += EXT4_C2B(EXT4_SB(sb), first + i);
492 			ext4_grp_locked_error(sb, e4b->bd_group,
493 					      inode ? inode->i_ino : 0,
494 					      blocknr,
495 					      "freeing block already freed "
496 					      "(bit %u)",
497 					      first + i);
498 			ext4_mark_group_bitmap_corrupted(sb, e4b->bd_group,
499 					EXT4_GROUP_INFO_BBITMAP_CORRUPT);
500 		}
501 		mb_clear_bit(first + i, e4b->bd_info->bb_bitmap);
502 	}
503 }
504 
mb_mark_used_double(struct ext4_buddy *e4b, int first, int count)505 static void mb_mark_used_double(struct ext4_buddy *e4b, int first, int count)
506 {
507 	int i;
508 
509 	if (unlikely(e4b->bd_info->bb_bitmap == NULL))
510 		return;
511 	assert_spin_locked(ext4_group_lock_ptr(e4b->bd_sb, e4b->bd_group));
512 	for (i = 0; i < count; i++) {
513 		BUG_ON(mb_test_bit(first + i, e4b->bd_info->bb_bitmap));
514 		mb_set_bit(first + i, e4b->bd_info->bb_bitmap);
515 	}
516 }
517 
mb_cmp_bitmaps(struct ext4_buddy *e4b, void *bitmap)518 static void mb_cmp_bitmaps(struct ext4_buddy *e4b, void *bitmap)
519 {
520 	if (unlikely(e4b->bd_info->bb_bitmap == NULL))
521 		return;
522 	if (memcmp(e4b->bd_info->bb_bitmap, bitmap, e4b->bd_sb->s_blocksize)) {
523 		unsigned char *b1, *b2;
524 		int i;
525 		b1 = (unsigned char *) e4b->bd_info->bb_bitmap;
526 		b2 = (unsigned char *) bitmap;
527 		for (i = 0; i < e4b->bd_sb->s_blocksize; i++) {
528 			if (b1[i] != b2[i]) {
529 				ext4_msg(e4b->bd_sb, KERN_ERR,
530 					 "corruption in group %u "
531 					 "at byte %u(%u): %x in copy != %x "
532 					 "on disk/prealloc",
533 					 e4b->bd_group, i, i * 8, b1[i], b2[i]);
534 				BUG();
535 			}
536 		}
537 	}
538 }
539 
mb_group_bb_bitmap_alloc(struct super_block *sb, struct ext4_group_info *grp, ext4_group_t group)540 static void mb_group_bb_bitmap_alloc(struct super_block *sb,
541 			struct ext4_group_info *grp, ext4_group_t group)
542 {
543 	struct buffer_head *bh;
544 
545 	grp->bb_bitmap = kmalloc(sb->s_blocksize, GFP_NOFS);
546 	if (!grp->bb_bitmap)
547 		return;
548 
549 	bh = ext4_read_block_bitmap(sb, group);
550 	if (IS_ERR_OR_NULL(bh)) {
551 		kfree(grp->bb_bitmap);
552 		grp->bb_bitmap = NULL;
553 		return;
554 	}
555 
556 	memcpy(grp->bb_bitmap, bh->b_data, sb->s_blocksize);
557 	put_bh(bh);
558 }
559 
mb_group_bb_bitmap_free(struct ext4_group_info *grp)560 static void mb_group_bb_bitmap_free(struct ext4_group_info *grp)
561 {
562 	kfree(grp->bb_bitmap);
563 }
564 
565 #else
mb_free_blocks_double(struct inode *inode, struct ext4_buddy *e4b, int first, int count)566 static inline void mb_free_blocks_double(struct inode *inode,
567 				struct ext4_buddy *e4b, int first, int count)
568 {
569 	return;
570 }
mb_mark_used_double(struct ext4_buddy *e4b, int first, int count)571 static inline void mb_mark_used_double(struct ext4_buddy *e4b,
572 						int first, int count)
573 {
574 	return;
575 }
mb_cmp_bitmaps(struct ext4_buddy *e4b, void *bitmap)576 static inline void mb_cmp_bitmaps(struct ext4_buddy *e4b, void *bitmap)
577 {
578 	return;
579 }
580 
mb_group_bb_bitmap_alloc(struct super_block *sb, struct ext4_group_info *grp, ext4_group_t group)581 static inline void mb_group_bb_bitmap_alloc(struct super_block *sb,
582 			struct ext4_group_info *grp, ext4_group_t group)
583 {
584 	return;
585 }
586 
mb_group_bb_bitmap_free(struct ext4_group_info *grp)587 static inline void mb_group_bb_bitmap_free(struct ext4_group_info *grp)
588 {
589 	return;
590 }
591 #endif
592 
593 #ifdef AGGRESSIVE_CHECK
594 
595 #define MB_CHECK_ASSERT(assert)						\
596 do {									\
597 	if (!(assert)) {						\
598 		printk(KERN_EMERG					\
599 			"Assertion failure in %s() at %s:%d: \"%s\"\n",	\
600 			function, file, line, # assert);		\
601 		BUG();							\
602 	}								\
603 } while (0)
604 
__mb_check_buddy(struct ext4_buddy *e4b, char *file, const char *function, int line)605 static int __mb_check_buddy(struct ext4_buddy *e4b, char *file,
606 				const char *function, int line)
607 {
608 	struct super_block *sb = e4b->bd_sb;
609 	int order = e4b->bd_blkbits + 1;
610 	int max;
611 	int max2;
612 	int i;
613 	int j;
614 	int k;
615 	int count;
616 	struct ext4_group_info *grp;
617 	int fragments = 0;
618 	int fstart;
619 	struct list_head *cur;
620 	void *buddy;
621 	void *buddy2;
622 
623 	if (e4b->bd_info->bb_check_counter++ % 10)
624 		return 0;
625 
626 	while (order > 1) {
627 		buddy = mb_find_buddy(e4b, order, &max);
628 		MB_CHECK_ASSERT(buddy);
629 		buddy2 = mb_find_buddy(e4b, order - 1, &max2);
630 		MB_CHECK_ASSERT(buddy2);
631 		MB_CHECK_ASSERT(buddy != buddy2);
632 		MB_CHECK_ASSERT(max * 2 == max2);
633 
634 		count = 0;
635 		for (i = 0; i < max; i++) {
636 
637 			if (mb_test_bit(i, buddy)) {
638 				/* only single bit in buddy2 may be 1 */
639 				if (!mb_test_bit(i << 1, buddy2)) {
640 					MB_CHECK_ASSERT(
641 						mb_test_bit((i<<1)+1, buddy2));
642 				} else if (!mb_test_bit((i << 1) + 1, buddy2)) {
643 					MB_CHECK_ASSERT(
644 						mb_test_bit(i << 1, buddy2));
645 				}
646 				continue;
647 			}
648 
649 			/* both bits in buddy2 must be 1 */
650 			MB_CHECK_ASSERT(mb_test_bit(i << 1, buddy2));
651 			MB_CHECK_ASSERT(mb_test_bit((i << 1) + 1, buddy2));
652 
653 			for (j = 0; j < (1 << order); j++) {
654 				k = (i * (1 << order)) + j;
655 				MB_CHECK_ASSERT(
656 					!mb_test_bit(k, e4b->bd_bitmap));
657 			}
658 			count++;
659 		}
660 		MB_CHECK_ASSERT(e4b->bd_info->bb_counters[order] == count);
661 		order--;
662 	}
663 
664 	fstart = -1;
665 	buddy = mb_find_buddy(e4b, 0, &max);
666 	for (i = 0; i < max; i++) {
667 		if (!mb_test_bit(i, buddy)) {
668 			MB_CHECK_ASSERT(i >= e4b->bd_info->bb_first_free);
669 			if (fstart == -1) {
670 				fragments++;
671 				fstart = i;
672 			}
673 			continue;
674 		}
675 		fstart = -1;
676 		/* check used bits only */
677 		for (j = 0; j < e4b->bd_blkbits + 1; j++) {
678 			buddy2 = mb_find_buddy(e4b, j, &max2);
679 			k = i >> j;
680 			MB_CHECK_ASSERT(k < max2);
681 			MB_CHECK_ASSERT(mb_test_bit(k, buddy2));
682 		}
683 	}
684 	MB_CHECK_ASSERT(!EXT4_MB_GRP_NEED_INIT(e4b->bd_info));
685 	MB_CHECK_ASSERT(e4b->bd_info->bb_fragments == fragments);
686 
687 	grp = ext4_get_group_info(sb, e4b->bd_group);
688 	if (!grp)
689 		return NULL;
690 	list_for_each(cur, &grp->bb_prealloc_list) {
691 		ext4_group_t groupnr;
692 		struct ext4_prealloc_space *pa;
693 		pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list);
694 		ext4_get_group_no_and_offset(sb, pa->pa_pstart, &groupnr, &k);
695 		MB_CHECK_ASSERT(groupnr == e4b->bd_group);
696 		for (i = 0; i < pa->pa_len; i++)
697 			MB_CHECK_ASSERT(mb_test_bit(k + i, buddy));
698 	}
699 	return 0;
700 }
701 #undef MB_CHECK_ASSERT
702 #define mb_check_buddy(e4b) __mb_check_buddy(e4b,	\
703 					__FILE__, __func__, __LINE__)
704 #else
705 #define mb_check_buddy(e4b)
706 #endif
707 
708 /*
709  * Divide blocks started from @first with length @len into
710  * smaller chunks with power of 2 blocks.
711  * Clear the bits in bitmap which the blocks of the chunk(s) covered,
712  * then increase bb_counters[] for corresponded chunk size.
713  */
ext4_mb_mark_free_simple(struct super_block *sb, void *buddy, ext4_grpblk_t first, ext4_grpblk_t len, struct ext4_group_info *grp)714 static void ext4_mb_mark_free_simple(struct super_block *sb,
715 				void *buddy, ext4_grpblk_t first, ext4_grpblk_t len,
716 					struct ext4_group_info *grp)
717 {
718 	struct ext4_sb_info *sbi = EXT4_SB(sb);
719 	ext4_grpblk_t min;
720 	ext4_grpblk_t max;
721 	ext4_grpblk_t chunk;
722 	unsigned int border;
723 
724 	BUG_ON(len > EXT4_CLUSTERS_PER_GROUP(sb));
725 
726 	border = 2 << sb->s_blocksize_bits;
727 
728 	while (len > 0) {
729 		/* find how many blocks can be covered since this position */
730 		max = ffs(first | border) - 1;
731 
732 		/* find how many blocks of power 2 we need to mark */
733 		min = fls(len) - 1;
734 
735 		if (max < min)
736 			min = max;
737 		chunk = 1 << min;
738 
739 		/* mark multiblock chunks only */
740 		grp->bb_counters[min]++;
741 		if (min > 0)
742 			mb_clear_bit(first >> min,
743 				     buddy + sbi->s_mb_offsets[min]);
744 
745 		len -= chunk;
746 		first += chunk;
747 	}
748 }
749 
750 /*
751  * Cache the order of the largest free extent we have available in this block
752  * group.
753  */
754 static void
mb_set_largest_free_order(struct super_block *sb, struct ext4_group_info *grp)755 mb_set_largest_free_order(struct super_block *sb, struct ext4_group_info *grp)
756 {
757 	int i;
758 	int bits;
759 
760 	grp->bb_largest_free_order = -1; /* uninit */
761 
762 	bits = sb->s_blocksize_bits + 1;
763 	for (i = bits; i >= 0; i--) {
764 		if (grp->bb_counters[i] > 0) {
765 			grp->bb_largest_free_order = i;
766 			break;
767 		}
768 	}
769 }
770 
771 static noinline_for_stack
ext4_mb_generate_buddy(struct super_block *sb, void *buddy, void *bitmap, ext4_group_t group, struct ext4_group_info *grp)772 void ext4_mb_generate_buddy(struct super_block *sb,
773 			    void *buddy, void *bitmap, ext4_group_t group,
774 			    struct ext4_group_info *grp)
775 {
776 	struct ext4_sb_info *sbi = EXT4_SB(sb);
777 	ext4_grpblk_t max = EXT4_CLUSTERS_PER_GROUP(sb);
778 	ext4_grpblk_t i = 0;
779 	ext4_grpblk_t first;
780 	ext4_grpblk_t len;
781 	unsigned free = 0;
782 	unsigned fragments = 0;
783 	unsigned long long period = get_cycles();
784 
785 	/* initialize buddy from bitmap which is aggregation
786 	 * of on-disk bitmap and preallocations */
787 	i = mb_find_next_zero_bit(bitmap, max, 0);
788 	grp->bb_first_free = i;
789 	while (i < max) {
790 		fragments++;
791 		first = i;
792 		i = mb_find_next_bit(bitmap, max, i);
793 		len = i - first;
794 		free += len;
795 		if (len > 1)
796 			ext4_mb_mark_free_simple(sb, buddy, first, len, grp);
797 		else
798 			grp->bb_counters[0]++;
799 		if (i < max)
800 			i = mb_find_next_zero_bit(bitmap, max, i);
801 	}
802 	grp->bb_fragments = fragments;
803 
804 	if (free != grp->bb_free) {
805 		ext4_grp_locked_error(sb, group, 0, 0,
806 				      "block bitmap and bg descriptor "
807 				      "inconsistent: %u vs %u free clusters",
808 				      free, grp->bb_free);
809 		/*
810 		 * If we intend to continue, we consider group descriptor
811 		 * corrupt and update bb_free using bitmap value
812 		 */
813 		grp->bb_free = free;
814 		ext4_mark_group_bitmap_corrupted(sb, group,
815 					EXT4_GROUP_INFO_BBITMAP_CORRUPT);
816 	}
817 	mb_set_largest_free_order(sb, grp);
818 
819 	clear_bit(EXT4_GROUP_INFO_NEED_INIT_BIT, &(grp->bb_state));
820 
821 	period = get_cycles() - period;
822 	atomic_inc(&sbi->s_mb_buddies_generated);
823 	atomic64_add(period, &sbi->s_mb_generation_time);
824 }
825 
mb_regenerate_buddy(struct ext4_buddy *e4b)826 static void mb_regenerate_buddy(struct ext4_buddy *e4b)
827 {
828 	int count;
829 	int order = 1;
830 	void *buddy;
831 
832 	while ((buddy = mb_find_buddy(e4b, order++, &count)))
833 		ext4_set_bits(buddy, 0, count);
834 
835 	e4b->bd_info->bb_fragments = 0;
836 	memset(e4b->bd_info->bb_counters, 0,
837 		sizeof(*e4b->bd_info->bb_counters) *
838 		(e4b->bd_sb->s_blocksize_bits + 2));
839 
840 	ext4_mb_generate_buddy(e4b->bd_sb, e4b->bd_buddy,
841 		e4b->bd_bitmap, e4b->bd_group, e4b->bd_info);
842 }
843 
844 /* The buddy information is attached the buddy cache inode
845  * for convenience. The information regarding each group
846  * is loaded via ext4_mb_load_buddy. The information involve
847  * block bitmap and buddy information. The information are
848  * stored in the inode as
849  *
850  * {                        page                        }
851  * [ group 0 bitmap][ group 0 buddy] [group 1][ group 1]...
852  *
853  *
854  * one block each for bitmap and buddy information.
855  * So for each group we take up 2 blocks. A page can
856  * contain blocks_per_page (PAGE_SIZE / blocksize)  blocks.
857  * So it can have information regarding groups_per_page which
858  * is blocks_per_page/2
859  *
860  * Locking note:  This routine takes the block group lock of all groups
861  * for this page; do not hold this lock when calling this routine!
862  */
863 
ext4_mb_init_cache(struct page *page, char *incore, gfp_t gfp)864 static int ext4_mb_init_cache(struct page *page, char *incore, gfp_t gfp)
865 {
866 	ext4_group_t ngroups;
867 	int blocksize;
868 	int blocks_per_page;
869 	int groups_per_page;
870 	int err = 0;
871 	int i;
872 	ext4_group_t first_group, group;
873 	int first_block;
874 	struct super_block *sb;
875 	struct buffer_head *bhs;
876 	struct buffer_head **bh = NULL;
877 	struct inode *inode;
878 	char *data;
879 	char *bitmap;
880 	struct ext4_group_info *grinfo;
881 
882 	inode = page->mapping->host;
883 	sb = inode->i_sb;
884 	ngroups = ext4_get_groups_count(sb);
885 	blocksize = i_blocksize(inode);
886 	blocks_per_page = PAGE_SIZE / blocksize;
887 
888 	mb_debug(sb, "init page %lu\n", page->index);
889 
890 	groups_per_page = blocks_per_page >> 1;
891 	if (groups_per_page == 0)
892 		groups_per_page = 1;
893 
894 	/* allocate buffer_heads to read bitmaps */
895 	if (groups_per_page > 1) {
896 		i = sizeof(struct buffer_head *) * groups_per_page;
897 		bh = kzalloc(i, gfp);
898 		if (bh == NULL) {
899 			err = -ENOMEM;
900 			goto out;
901 		}
902 	} else
903 		bh = &bhs;
904 
905 	first_group = page->index * blocks_per_page / 2;
906 
907 	/* read all groups the page covers into the cache */
908 	for (i = 0, group = first_group; i < groups_per_page; i++, group++) {
909 		if (group >= ngroups)
910 			break;
911 
912 		grinfo = ext4_get_group_info(sb, group);
913 		if (!grinfo)
914 			continue;
915 		/*
916 		 * If page is uptodate then we came here after online resize
917 		 * which added some new uninitialized group info structs, so
918 		 * we must skip all initialized uptodate buddies on the page,
919 		 * which may be currently in use by an allocating task.
920 		 */
921 		if (PageUptodate(page) && !EXT4_MB_GRP_NEED_INIT(grinfo)) {
922 			bh[i] = NULL;
923 			continue;
924 		}
925 		bh[i] = ext4_read_block_bitmap_nowait(sb, group, false);
926 		if (IS_ERR(bh[i])) {
927 			err = PTR_ERR(bh[i]);
928 			bh[i] = NULL;
929 			goto out;
930 		}
931 		mb_debug(sb, "read bitmap for group %u\n", group);
932 	}
933 
934 	/* wait for I/O completion */
935 	for (i = 0, group = first_group; i < groups_per_page; i++, group++) {
936 		int err2;
937 
938 		if (!bh[i])
939 			continue;
940 		err2 = ext4_wait_block_bitmap(sb, group, bh[i]);
941 		if (!err)
942 			err = err2;
943 	}
944 
945 	first_block = page->index * blocks_per_page;
946 	for (i = 0; i < blocks_per_page; i++) {
947 		group = (first_block + i) >> 1;
948 		if (group >= ngroups)
949 			break;
950 
951 		if (!bh[group - first_group])
952 			/* skip initialized uptodate buddy */
953 			continue;
954 
955 		if (!buffer_verified(bh[group - first_group]))
956 			/* Skip faulty bitmaps */
957 			continue;
958 		err = 0;
959 
960 		/*
961 		 * data carry information regarding this
962 		 * particular group in the format specified
963 		 * above
964 		 *
965 		 */
966 		data = page_address(page) + (i * blocksize);
967 		bitmap = bh[group - first_group]->b_data;
968 
969 		/*
970 		 * We place the buddy block and bitmap block
971 		 * close together
972 		 */
973 		if ((first_block + i) & 1) {
974 			/* this is block of buddy */
975 			BUG_ON(incore == NULL);
976 			mb_debug(sb, "put buddy for group %u in page %lu/%x\n",
977 				group, page->index, i * blocksize);
978 			trace_ext4_mb_buddy_bitmap_load(sb, group);
979 			grinfo = ext4_get_group_info(sb, group);
980 			if (!grinfo) {
981 				err = -EFSCORRUPTED;
982 				goto out;
983 			}
984 			grinfo->bb_fragments = 0;
985 			memset(grinfo->bb_counters, 0,
986 			       sizeof(*grinfo->bb_counters) *
987 				(sb->s_blocksize_bits+2));
988 			/*
989 			 * incore got set to the group block bitmap below
990 			 */
991 			ext4_lock_group(sb, group);
992 			/* init the buddy */
993 			memset(data, 0xff, blocksize);
994 			ext4_mb_generate_buddy(sb, data, incore, group, grinfo);
995 			ext4_unlock_group(sb, group);
996 			incore = NULL;
997 		} else {
998 			/* this is block of bitmap */
999 			BUG_ON(incore != NULL);
1000 			mb_debug(sb, "put bitmap for group %u in page %lu/%x\n",
1001 				group, page->index, i * blocksize);
1002 			trace_ext4_mb_bitmap_load(sb, group);
1003 
1004 			/* see comments in ext4_mb_put_pa() */
1005 			ext4_lock_group(sb, group);
1006 			memcpy(data, bitmap, blocksize);
1007 
1008 			/* mark all preallocated blks used in in-core bitmap */
1009 			ext4_mb_generate_from_pa(sb, data, group);
1010 			ext4_mb_generate_from_freelist(sb, data, group);
1011 			ext4_unlock_group(sb, group);
1012 
1013 			/* set incore so that the buddy information can be
1014 			 * generated using this
1015 			 */
1016 			incore = data;
1017 		}
1018 	}
1019 	SetPageUptodate(page);
1020 
1021 out:
1022 	if (bh) {
1023 		for (i = 0; i < groups_per_page; i++)
1024 			brelse(bh[i]);
1025 		if (bh != &bhs)
1026 			kfree(bh);
1027 	}
1028 	return err;
1029 }
1030 
1031 /*
1032  * Lock the buddy and bitmap pages. This make sure other parallel init_group
1033  * on the same buddy page doesn't happen whild holding the buddy page lock.
1034  * Return locked buddy and bitmap pages on e4b struct. If buddy and bitmap
1035  * are on the same page e4b->bd_buddy_page is NULL and return value is 0.
1036  */
ext4_mb_get_buddy_page_lock(struct super_block *sb, ext4_group_t group, struct ext4_buddy *e4b, gfp_t gfp)1037 static int ext4_mb_get_buddy_page_lock(struct super_block *sb,
1038 		ext4_group_t group, struct ext4_buddy *e4b, gfp_t gfp)
1039 {
1040 	struct inode *inode = EXT4_SB(sb)->s_buddy_cache;
1041 	int block, pnum, poff;
1042 	int blocks_per_page;
1043 	struct page *page;
1044 
1045 	e4b->bd_buddy_page = NULL;
1046 	e4b->bd_bitmap_page = NULL;
1047 
1048 	blocks_per_page = PAGE_SIZE / sb->s_blocksize;
1049 	/*
1050 	 * the buddy cache inode stores the block bitmap
1051 	 * and buddy information in consecutive blocks.
1052 	 * So for each group we need two blocks.
1053 	 */
1054 	block = group * 2;
1055 	pnum = block / blocks_per_page;
1056 	poff = block % blocks_per_page;
1057 	page = find_or_create_page(inode->i_mapping, pnum, gfp);
1058 	if (!page)
1059 		return -ENOMEM;
1060 	BUG_ON(page->mapping != inode->i_mapping);
1061 	e4b->bd_bitmap_page = page;
1062 	e4b->bd_bitmap = page_address(page) + (poff * sb->s_blocksize);
1063 
1064 	if (blocks_per_page >= 2) {
1065 		/* buddy and bitmap are on the same page */
1066 		return 0;
1067 	}
1068 
1069 	block++;
1070 	pnum = block / blocks_per_page;
1071 	page = find_or_create_page(inode->i_mapping, pnum, gfp);
1072 	if (!page)
1073 		return -ENOMEM;
1074 	BUG_ON(page->mapping != inode->i_mapping);
1075 	e4b->bd_buddy_page = page;
1076 	return 0;
1077 }
1078 
ext4_mb_put_buddy_page_lock(struct ext4_buddy *e4b)1079 static void ext4_mb_put_buddy_page_lock(struct ext4_buddy *e4b)
1080 {
1081 	if (e4b->bd_bitmap_page) {
1082 		unlock_page(e4b->bd_bitmap_page);
1083 		put_page(e4b->bd_bitmap_page);
1084 	}
1085 	if (e4b->bd_buddy_page) {
1086 		unlock_page(e4b->bd_buddy_page);
1087 		put_page(e4b->bd_buddy_page);
1088 	}
1089 }
1090 
1091 /*
1092  * Locking note:  This routine calls ext4_mb_init_cache(), which takes the
1093  * block group lock of all groups for this page; do not hold the BG lock when
1094  * calling this routine!
1095  */
1096 static noinline_for_stack
ext4_mb_init_group(struct super_block *sb, ext4_group_t group, gfp_t gfp)1097 int ext4_mb_init_group(struct super_block *sb, ext4_group_t group, gfp_t gfp)
1098 {
1099 
1100 	struct ext4_group_info *this_grp;
1101 	struct ext4_buddy e4b;
1102 	struct page *page;
1103 	int ret = 0;
1104 
1105 	might_sleep();
1106 	mb_debug(sb, "init group %u\n", group);
1107 	this_grp = ext4_get_group_info(sb, group);
1108 	if (!this_grp)
1109 		return -EFSCORRUPTED;
1110 
1111 	/*
1112 	 * This ensures that we don't reinit the buddy cache
1113 	 * page which map to the group from which we are already
1114 	 * allocating. If we are looking at the buddy cache we would
1115 	 * have taken a reference using ext4_mb_load_buddy and that
1116 	 * would have pinned buddy page to page cache.
1117 	 * The call to ext4_mb_get_buddy_page_lock will mark the
1118 	 * page accessed.
1119 	 */
1120 	ret = ext4_mb_get_buddy_page_lock(sb, group, &e4b, gfp);
1121 	if (ret || !EXT4_MB_GRP_NEED_INIT(this_grp)) {
1122 		/*
1123 		 * somebody initialized the group
1124 		 * return without doing anything
1125 		 */
1126 		goto err;
1127 	}
1128 
1129 	page = e4b.bd_bitmap_page;
1130 	ret = ext4_mb_init_cache(page, NULL, gfp);
1131 	if (ret)
1132 		goto err;
1133 	if (!PageUptodate(page)) {
1134 		ret = -EIO;
1135 		goto err;
1136 	}
1137 
1138 	if (e4b.bd_buddy_page == NULL) {
1139 		/*
1140 		 * If both the bitmap and buddy are in
1141 		 * the same page we don't need to force
1142 		 * init the buddy
1143 		 */
1144 		ret = 0;
1145 		goto err;
1146 	}
1147 	/* init buddy cache */
1148 	page = e4b.bd_buddy_page;
1149 	ret = ext4_mb_init_cache(page, e4b.bd_bitmap, gfp);
1150 	if (ret)
1151 		goto err;
1152 	if (!PageUptodate(page)) {
1153 		ret = -EIO;
1154 		goto err;
1155 	}
1156 err:
1157 	ext4_mb_put_buddy_page_lock(&e4b);
1158 	return ret;
1159 }
1160 
1161 /*
1162  * Locking note:  This routine calls ext4_mb_init_cache(), which takes the
1163  * block group lock of all groups for this page; do not hold the BG lock when
1164  * calling this routine!
1165  */
1166 static noinline_for_stack int
ext4_mb_load_buddy_gfp(struct super_block *sb, ext4_group_t group, struct ext4_buddy *e4b, gfp_t gfp)1167 ext4_mb_load_buddy_gfp(struct super_block *sb, ext4_group_t group,
1168 		       struct ext4_buddy *e4b, gfp_t gfp)
1169 {
1170 	int blocks_per_page;
1171 	int block;
1172 	int pnum;
1173 	int poff;
1174 	struct page *page;
1175 	int ret;
1176 	struct ext4_group_info *grp;
1177 	struct ext4_sb_info *sbi = EXT4_SB(sb);
1178 	struct inode *inode = sbi->s_buddy_cache;
1179 
1180 	might_sleep();
1181 	mb_debug(sb, "load group %u\n", group);
1182 
1183 	blocks_per_page = PAGE_SIZE / sb->s_blocksize;
1184 	grp = ext4_get_group_info(sb, group);
1185 	if (!grp)
1186 		return -EFSCORRUPTED;
1187 
1188 	e4b->bd_blkbits = sb->s_blocksize_bits;
1189 	e4b->bd_info = grp;
1190 	e4b->bd_sb = sb;
1191 	e4b->bd_group = group;
1192 	e4b->bd_buddy_page = NULL;
1193 	e4b->bd_bitmap_page = NULL;
1194 
1195 	if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) {
1196 		/*
1197 		 * we need full data about the group
1198 		 * to make a good selection
1199 		 */
1200 		ret = ext4_mb_init_group(sb, group, gfp);
1201 		if (ret)
1202 			return ret;
1203 	}
1204 
1205 	/*
1206 	 * the buddy cache inode stores the block bitmap
1207 	 * and buddy information in consecutive blocks.
1208 	 * So for each group we need two blocks.
1209 	 */
1210 	block = group * 2;
1211 	pnum = block / blocks_per_page;
1212 	poff = block % blocks_per_page;
1213 
1214 	/* we could use find_or_create_page(), but it locks page
1215 	 * what we'd like to avoid in fast path ... */
1216 	page = find_get_page_flags(inode->i_mapping, pnum, FGP_ACCESSED);
1217 	if (page == NULL || !PageUptodate(page)) {
1218 		if (page)
1219 			/*
1220 			 * drop the page reference and try
1221 			 * to get the page with lock. If we
1222 			 * are not uptodate that implies
1223 			 * somebody just created the page but
1224 			 * is yet to initialize the same. So
1225 			 * wait for it to initialize.
1226 			 */
1227 			put_page(page);
1228 		page = find_or_create_page(inode->i_mapping, pnum, gfp);
1229 		if (page) {
1230 			BUG_ON(page->mapping != inode->i_mapping);
1231 			if (!PageUptodate(page)) {
1232 				ret = ext4_mb_init_cache(page, NULL, gfp);
1233 				if (ret) {
1234 					unlock_page(page);
1235 					goto err;
1236 				}
1237 				mb_cmp_bitmaps(e4b, page_address(page) +
1238 					       (poff * sb->s_blocksize));
1239 			}
1240 			unlock_page(page);
1241 		}
1242 	}
1243 	if (page == NULL) {
1244 		ret = -ENOMEM;
1245 		goto err;
1246 	}
1247 	if (!PageUptodate(page)) {
1248 		ret = -EIO;
1249 		goto err;
1250 	}
1251 
1252 	/* Pages marked accessed already */
1253 	e4b->bd_bitmap_page = page;
1254 	e4b->bd_bitmap = page_address(page) + (poff * sb->s_blocksize);
1255 
1256 	block++;
1257 	pnum = block / blocks_per_page;
1258 	poff = block % blocks_per_page;
1259 
1260 	page = find_get_page_flags(inode->i_mapping, pnum, FGP_ACCESSED);
1261 	if (page == NULL || !PageUptodate(page)) {
1262 		if (page)
1263 			put_page(page);
1264 		page = find_or_create_page(inode->i_mapping, pnum, gfp);
1265 		if (page) {
1266 			BUG_ON(page->mapping != inode->i_mapping);
1267 			if (!PageUptodate(page)) {
1268 				ret = ext4_mb_init_cache(page, e4b->bd_bitmap,
1269 							 gfp);
1270 				if (ret) {
1271 					unlock_page(page);
1272 					goto err;
1273 				}
1274 			}
1275 			unlock_page(page);
1276 		}
1277 	}
1278 	if (page == NULL) {
1279 		ret = -ENOMEM;
1280 		goto err;
1281 	}
1282 	if (!PageUptodate(page)) {
1283 		ret = -EIO;
1284 		goto err;
1285 	}
1286 
1287 	/* Pages marked accessed already */
1288 	e4b->bd_buddy_page = page;
1289 	e4b->bd_buddy = page_address(page) + (poff * sb->s_blocksize);
1290 
1291 	return 0;
1292 
1293 err:
1294 	if (page)
1295 		put_page(page);
1296 	if (e4b->bd_bitmap_page)
1297 		put_page(e4b->bd_bitmap_page);
1298 	if (e4b->bd_buddy_page)
1299 		put_page(e4b->bd_buddy_page);
1300 	e4b->bd_buddy = NULL;
1301 	e4b->bd_bitmap = NULL;
1302 	return ret;
1303 }
1304 
ext4_mb_load_buddy(struct super_block *sb, ext4_group_t group, struct ext4_buddy *e4b)1305 static int ext4_mb_load_buddy(struct super_block *sb, ext4_group_t group,
1306 			      struct ext4_buddy *e4b)
1307 {
1308 	return ext4_mb_load_buddy_gfp(sb, group, e4b, GFP_NOFS);
1309 }
1310 
ext4_mb_unload_buddy(struct ext4_buddy *e4b)1311 static void ext4_mb_unload_buddy(struct ext4_buddy *e4b)
1312 {
1313 	if (e4b->bd_bitmap_page)
1314 		put_page(e4b->bd_bitmap_page);
1315 	if (e4b->bd_buddy_page)
1316 		put_page(e4b->bd_buddy_page);
1317 }
1318 
1319 
mb_find_order_for_block(struct ext4_buddy *e4b, int block)1320 static int mb_find_order_for_block(struct ext4_buddy *e4b, int block)
1321 {
1322 	int order = 1;
1323 	int bb_incr = 1 << (e4b->bd_blkbits - 1);
1324 	void *bb;
1325 
1326 	BUG_ON(e4b->bd_bitmap == e4b->bd_buddy);
1327 	BUG_ON(block >= (1 << (e4b->bd_blkbits + 3)));
1328 
1329 	bb = e4b->bd_buddy;
1330 	while (order <= e4b->bd_blkbits + 1) {
1331 		block = block >> 1;
1332 		if (!mb_test_bit(block, bb)) {
1333 			/* this block is part of buddy of order 'order' */
1334 			return order;
1335 		}
1336 		bb += bb_incr;
1337 		bb_incr >>= 1;
1338 		order++;
1339 	}
1340 	return 0;
1341 }
1342 
mb_clear_bits(void *bm, int cur, int len)1343 static void mb_clear_bits(void *bm, int cur, int len)
1344 {
1345 	__u32 *addr;
1346 
1347 	len = cur + len;
1348 	while (cur < len) {
1349 		if ((cur & 31) == 0 && (len - cur) >= 32) {
1350 			/* fast path: clear whole word at once */
1351 			addr = bm + (cur >> 3);
1352 			*addr = 0;
1353 			cur += 32;
1354 			continue;
1355 		}
1356 		mb_clear_bit(cur, bm);
1357 		cur++;
1358 	}
1359 }
1360 
1361 /* clear bits in given range
1362  * will return first found zero bit if any, -1 otherwise
1363  */
mb_test_and_clear_bits(void *bm, int cur, int len)1364 static int mb_test_and_clear_bits(void *bm, int cur, int len)
1365 {
1366 	__u32 *addr;
1367 	int zero_bit = -1;
1368 
1369 	len = cur + len;
1370 	while (cur < len) {
1371 		if ((cur & 31) == 0 && (len - cur) >= 32) {
1372 			/* fast path: clear whole word at once */
1373 			addr = bm + (cur >> 3);
1374 			if (*addr != (__u32)(-1) && zero_bit == -1)
1375 				zero_bit = cur + mb_find_next_zero_bit(addr, 32, 0);
1376 			*addr = 0;
1377 			cur += 32;
1378 			continue;
1379 		}
1380 		if (!mb_test_and_clear_bit(cur, bm) && zero_bit == -1)
1381 			zero_bit = cur;
1382 		cur++;
1383 	}
1384 
1385 	return zero_bit;
1386 }
1387 
ext4_set_bits(void *bm, int cur, int len)1388 void ext4_set_bits(void *bm, int cur, int len)
1389 {
1390 	__u32 *addr;
1391 
1392 	len = cur + len;
1393 	while (cur < len) {
1394 		if ((cur & 31) == 0 && (len - cur) >= 32) {
1395 			/* fast path: set whole word at once */
1396 			addr = bm + (cur >> 3);
1397 			*addr = 0xffffffff;
1398 			cur += 32;
1399 			continue;
1400 		}
1401 		mb_set_bit(cur, bm);
1402 		cur++;
1403 	}
1404 }
1405 
mb_buddy_adjust_border(int* bit, void* bitmap, int side)1406 static inline int mb_buddy_adjust_border(int* bit, void* bitmap, int side)
1407 {
1408 	if (mb_test_bit(*bit + side, bitmap)) {
1409 		mb_clear_bit(*bit, bitmap);
1410 		(*bit) -= side;
1411 		return 1;
1412 	}
1413 	else {
1414 		(*bit) += side;
1415 		mb_set_bit(*bit, bitmap);
1416 		return -1;
1417 	}
1418 }
1419 
mb_buddy_mark_free(struct ext4_buddy *e4b, int first, int last)1420 static void mb_buddy_mark_free(struct ext4_buddy *e4b, int first, int last)
1421 {
1422 	int max;
1423 	int order = 1;
1424 	void *buddy = mb_find_buddy(e4b, order, &max);
1425 
1426 	while (buddy) {
1427 		void *buddy2;
1428 
1429 		/* Bits in range [first; last] are known to be set since
1430 		 * corresponding blocks were allocated. Bits in range
1431 		 * (first; last) will stay set because they form buddies on
1432 		 * upper layer. We just deal with borders if they don't
1433 		 * align with upper layer and then go up.
1434 		 * Releasing entire group is all about clearing
1435 		 * single bit of highest order buddy.
1436 		 */
1437 
1438 		/* Example:
1439 		 * ---------------------------------
1440 		 * |   1   |   1   |   1   |   1   |
1441 		 * ---------------------------------
1442 		 * | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 1 |
1443 		 * ---------------------------------
1444 		 *   0   1   2   3   4   5   6   7
1445 		 *      \_____________________/
1446 		 *
1447 		 * Neither [1] nor [6] is aligned to above layer.
1448 		 * Left neighbour [0] is free, so mark it busy,
1449 		 * decrease bb_counters and extend range to
1450 		 * [0; 6]
1451 		 * Right neighbour [7] is busy. It can't be coaleasced with [6], so
1452 		 * mark [6] free, increase bb_counters and shrink range to
1453 		 * [0; 5].
1454 		 * Then shift range to [0; 2], go up and do the same.
1455 		 */
1456 
1457 
1458 		if (first & 1)
1459 			e4b->bd_info->bb_counters[order] += mb_buddy_adjust_border(&first, buddy, -1);
1460 		if (!(last & 1))
1461 			e4b->bd_info->bb_counters[order] += mb_buddy_adjust_border(&last, buddy, 1);
1462 		if (first > last)
1463 			break;
1464 		order++;
1465 
1466 		if (first == last || !(buddy2 = mb_find_buddy(e4b, order, &max))) {
1467 			mb_clear_bits(buddy, first, last - first + 1);
1468 			e4b->bd_info->bb_counters[order - 1] += last - first + 1;
1469 			break;
1470 		}
1471 		first >>= 1;
1472 		last >>= 1;
1473 		buddy = buddy2;
1474 	}
1475 }
1476 
mb_free_blocks(struct inode *inode, struct ext4_buddy *e4b, int first, int count)1477 static void mb_free_blocks(struct inode *inode, struct ext4_buddy *e4b,
1478 			   int first, int count)
1479 {
1480 	int left_is_free = 0;
1481 	int right_is_free = 0;
1482 	int block;
1483 	int last = first + count - 1;
1484 	struct super_block *sb = e4b->bd_sb;
1485 
1486 	if (WARN_ON(count == 0))
1487 		return;
1488 	BUG_ON(last >= (sb->s_blocksize << 3));
1489 	assert_spin_locked(ext4_group_lock_ptr(sb, e4b->bd_group));
1490 	/* Don't bother if the block group is corrupt. */
1491 	if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(e4b->bd_info)))
1492 		return;
1493 
1494 	mb_check_buddy(e4b);
1495 	mb_free_blocks_double(inode, e4b, first, count);
1496 
1497 	this_cpu_inc(discard_pa_seq);
1498 	e4b->bd_info->bb_free += count;
1499 	if (first < e4b->bd_info->bb_first_free)
1500 		e4b->bd_info->bb_first_free = first;
1501 
1502 	/* access memory sequentially: check left neighbour,
1503 	 * clear range and then check right neighbour
1504 	 */
1505 	if (first != 0)
1506 		left_is_free = !mb_test_bit(first - 1, e4b->bd_bitmap);
1507 	block = mb_test_and_clear_bits(e4b->bd_bitmap, first, count);
1508 	if (last + 1 < EXT4_SB(sb)->s_mb_maxs[0])
1509 		right_is_free = !mb_test_bit(last + 1, e4b->bd_bitmap);
1510 
1511 	if (unlikely(block != -1)) {
1512 		struct ext4_sb_info *sbi = EXT4_SB(sb);
1513 		ext4_fsblk_t blocknr;
1514 
1515 		blocknr = ext4_group_first_block_no(sb, e4b->bd_group);
1516 		blocknr += EXT4_C2B(sbi, block);
1517 		if (!(sbi->s_mount_state & EXT4_FC_REPLAY)) {
1518 			ext4_grp_locked_error(sb, e4b->bd_group,
1519 					      inode ? inode->i_ino : 0,
1520 					      blocknr,
1521 					      "freeing already freed block (bit %u); block bitmap corrupt.",
1522 					      block);
1523 			ext4_mark_group_bitmap_corrupted(
1524 				sb, e4b->bd_group,
1525 				EXT4_GROUP_INFO_BBITMAP_CORRUPT);
1526 		} else {
1527 			mb_regenerate_buddy(e4b);
1528 		}
1529 		goto done;
1530 	}
1531 
1532 	/* let's maintain fragments counter */
1533 	if (left_is_free && right_is_free)
1534 		e4b->bd_info->bb_fragments--;
1535 	else if (!left_is_free && !right_is_free)
1536 		e4b->bd_info->bb_fragments++;
1537 
1538 	/* buddy[0] == bd_bitmap is a special case, so handle
1539 	 * it right away and let mb_buddy_mark_free stay free of
1540 	 * zero order checks.
1541 	 * Check if neighbours are to be coaleasced,
1542 	 * adjust bitmap bb_counters and borders appropriately.
1543 	 */
1544 	if (first & 1) {
1545 		first += !left_is_free;
1546 		e4b->bd_info->bb_counters[0] += left_is_free ? -1 : 1;
1547 	}
1548 	if (!(last & 1)) {
1549 		last -= !right_is_free;
1550 		e4b->bd_info->bb_counters[0] += right_is_free ? -1 : 1;
1551 	}
1552 
1553 	if (first <= last)
1554 		mb_buddy_mark_free(e4b, first >> 1, last >> 1);
1555 
1556 done:
1557 	mb_set_largest_free_order(sb, e4b->bd_info);
1558 	mb_check_buddy(e4b);
1559 }
1560 
mb_find_extent(struct ext4_buddy *e4b, int block, int needed, struct ext4_free_extent *ex)1561 static int mb_find_extent(struct ext4_buddy *e4b, int block,
1562 				int needed, struct ext4_free_extent *ex)
1563 {
1564 	int next = block;
1565 	int max, order;
1566 	void *buddy;
1567 
1568 	assert_spin_locked(ext4_group_lock_ptr(e4b->bd_sb, e4b->bd_group));
1569 	BUG_ON(ex == NULL);
1570 
1571 	buddy = mb_find_buddy(e4b, 0, &max);
1572 	BUG_ON(buddy == NULL);
1573 	BUG_ON(block >= max);
1574 	if (mb_test_bit(block, buddy)) {
1575 		ex->fe_len = 0;
1576 		ex->fe_start = 0;
1577 		ex->fe_group = 0;
1578 		return 0;
1579 	}
1580 
1581 	/* find actual order */
1582 	order = mb_find_order_for_block(e4b, block);
1583 	block = block >> order;
1584 
1585 	ex->fe_len = 1 << order;
1586 	ex->fe_start = block << order;
1587 	ex->fe_group = e4b->bd_group;
1588 
1589 	/* calc difference from given start */
1590 	next = next - ex->fe_start;
1591 	ex->fe_len -= next;
1592 	ex->fe_start += next;
1593 
1594 	while (needed > ex->fe_len &&
1595 	       mb_find_buddy(e4b, order, &max)) {
1596 
1597 		if (block + 1 >= max)
1598 			break;
1599 
1600 		next = (block + 1) * (1 << order);
1601 		if (mb_test_bit(next, e4b->bd_bitmap))
1602 			break;
1603 
1604 		order = mb_find_order_for_block(e4b, next);
1605 
1606 		block = next >> order;
1607 		ex->fe_len += 1 << order;
1608 	}
1609 
1610 	if (ex->fe_start + ex->fe_len > EXT4_CLUSTERS_PER_GROUP(e4b->bd_sb)) {
1611 		/* Should never happen! (but apparently sometimes does?!?) */
1612 		WARN_ON(1);
1613 		ext4_grp_locked_error(e4b->bd_sb, e4b->bd_group, 0, 0,
1614 			"corruption or bug in mb_find_extent "
1615 			"block=%d, order=%d needed=%d ex=%u/%d/%d@%u",
1616 			block, order, needed, ex->fe_group, ex->fe_start,
1617 			ex->fe_len, ex->fe_logical);
1618 		ex->fe_len = 0;
1619 		ex->fe_start = 0;
1620 		ex->fe_group = 0;
1621 	}
1622 	return ex->fe_len;
1623 }
1624 
mb_mark_used(struct ext4_buddy *e4b, struct ext4_free_extent *ex)1625 static int mb_mark_used(struct ext4_buddy *e4b, struct ext4_free_extent *ex)
1626 {
1627 	int ord;
1628 	int mlen = 0;
1629 	int max = 0;
1630 	int cur;
1631 	int start = ex->fe_start;
1632 	int len = ex->fe_len;
1633 	unsigned ret = 0;
1634 	int len0 = len;
1635 	void *buddy;
1636 
1637 	BUG_ON(start + len > (e4b->bd_sb->s_blocksize << 3));
1638 	BUG_ON(e4b->bd_group != ex->fe_group);
1639 	assert_spin_locked(ext4_group_lock_ptr(e4b->bd_sb, e4b->bd_group));
1640 	mb_check_buddy(e4b);
1641 	mb_mark_used_double(e4b, start, len);
1642 
1643 	this_cpu_inc(discard_pa_seq);
1644 	e4b->bd_info->bb_free -= len;
1645 	if (e4b->bd_info->bb_first_free == start)
1646 		e4b->bd_info->bb_first_free += len;
1647 
1648 	/* let's maintain fragments counter */
1649 	if (start != 0)
1650 		mlen = !mb_test_bit(start - 1, e4b->bd_bitmap);
1651 	if (start + len < EXT4_SB(e4b->bd_sb)->s_mb_maxs[0])
1652 		max = !mb_test_bit(start + len, e4b->bd_bitmap);
1653 	if (mlen && max)
1654 		e4b->bd_info->bb_fragments++;
1655 	else if (!mlen && !max)
1656 		e4b->bd_info->bb_fragments--;
1657 
1658 	/* let's maintain buddy itself */
1659 	while (len) {
1660 		ord = mb_find_order_for_block(e4b, start);
1661 
1662 		if (((start >> ord) << ord) == start && len >= (1 << ord)) {
1663 			/* the whole chunk may be allocated at once! */
1664 			mlen = 1 << ord;
1665 			buddy = mb_find_buddy(e4b, ord, &max);
1666 			BUG_ON((start >> ord) >= max);
1667 			mb_set_bit(start >> ord, buddy);
1668 			e4b->bd_info->bb_counters[ord]--;
1669 			start += mlen;
1670 			len -= mlen;
1671 			BUG_ON(len < 0);
1672 			continue;
1673 		}
1674 
1675 		/* store for history */
1676 		if (ret == 0)
1677 			ret = len | (ord << 16);
1678 
1679 		/* we have to split large buddy */
1680 		BUG_ON(ord <= 0);
1681 		buddy = mb_find_buddy(e4b, ord, &max);
1682 		mb_set_bit(start >> ord, buddy);
1683 		e4b->bd_info->bb_counters[ord]--;
1684 
1685 		ord--;
1686 		cur = (start >> ord) & ~1U;
1687 		buddy = mb_find_buddy(e4b, ord, &max);
1688 		mb_clear_bit(cur, buddy);
1689 		mb_clear_bit(cur + 1, buddy);
1690 		e4b->bd_info->bb_counters[ord]++;
1691 		e4b->bd_info->bb_counters[ord]++;
1692 	}
1693 	mb_set_largest_free_order(e4b->bd_sb, e4b->bd_info);
1694 
1695 	ext4_set_bits(e4b->bd_bitmap, ex->fe_start, len0);
1696 	mb_check_buddy(e4b);
1697 
1698 	return ret;
1699 }
1700 
1701 /*
1702  * Must be called under group lock!
1703  */
ext4_mb_use_best_found(struct ext4_allocation_context *ac, struct ext4_buddy *e4b)1704 static void ext4_mb_use_best_found(struct ext4_allocation_context *ac,
1705 					struct ext4_buddy *e4b)
1706 {
1707 	struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
1708 	int ret;
1709 
1710 	BUG_ON(ac->ac_b_ex.fe_group != e4b->bd_group);
1711 	BUG_ON(ac->ac_status == AC_STATUS_FOUND);
1712 
1713 	ac->ac_b_ex.fe_len = min(ac->ac_b_ex.fe_len, ac->ac_g_ex.fe_len);
1714 	ac->ac_b_ex.fe_logical = ac->ac_g_ex.fe_logical;
1715 	ret = mb_mark_used(e4b, &ac->ac_b_ex);
1716 
1717 	/* preallocation can change ac_b_ex, thus we store actually
1718 	 * allocated blocks for history */
1719 	ac->ac_f_ex = ac->ac_b_ex;
1720 
1721 	ac->ac_status = AC_STATUS_FOUND;
1722 	ac->ac_tail = ret & 0xffff;
1723 	ac->ac_buddy = ret >> 16;
1724 
1725 	/*
1726 	 * take the page reference. We want the page to be pinned
1727 	 * so that we don't get a ext4_mb_init_cache_call for this
1728 	 * group until we update the bitmap. That would mean we
1729 	 * double allocate blocks. The reference is dropped
1730 	 * in ext4_mb_release_context
1731 	 */
1732 	ac->ac_bitmap_page = e4b->bd_bitmap_page;
1733 	get_page(ac->ac_bitmap_page);
1734 	ac->ac_buddy_page = e4b->bd_buddy_page;
1735 	get_page(ac->ac_buddy_page);
1736 	/* store last allocated for subsequent stream allocation */
1737 	if (ac->ac_flags & EXT4_MB_STREAM_ALLOC) {
1738 		spin_lock(&sbi->s_md_lock);
1739 		sbi->s_mb_last_group = ac->ac_f_ex.fe_group;
1740 		sbi->s_mb_last_start = ac->ac_f_ex.fe_start;
1741 		spin_unlock(&sbi->s_md_lock);
1742 	}
1743 	/*
1744 	 * As we've just preallocated more space than
1745 	 * user requested originally, we store allocated
1746 	 * space in a special descriptor.
1747 	 */
1748 	if (ac->ac_o_ex.fe_len < ac->ac_b_ex.fe_len)
1749 		ext4_mb_new_preallocation(ac);
1750 
1751 }
1752 
ext4_mb_check_limits(struct ext4_allocation_context *ac, struct ext4_buddy *e4b, int finish_group)1753 static void ext4_mb_check_limits(struct ext4_allocation_context *ac,
1754 					struct ext4_buddy *e4b,
1755 					int finish_group)
1756 {
1757 	struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
1758 	struct ext4_free_extent *bex = &ac->ac_b_ex;
1759 	struct ext4_free_extent *gex = &ac->ac_g_ex;
1760 	struct ext4_free_extent ex;
1761 	int max;
1762 
1763 	if (ac->ac_status == AC_STATUS_FOUND)
1764 		return;
1765 	/*
1766 	 * We don't want to scan for a whole year
1767 	 */
1768 	if (ac->ac_found > sbi->s_mb_max_to_scan &&
1769 			!(ac->ac_flags & EXT4_MB_HINT_FIRST)) {
1770 		ac->ac_status = AC_STATUS_BREAK;
1771 		return;
1772 	}
1773 
1774 	/*
1775 	 * Haven't found good chunk so far, let's continue
1776 	 */
1777 	if (bex->fe_len < gex->fe_len)
1778 		return;
1779 
1780 	if ((finish_group || ac->ac_found > sbi->s_mb_min_to_scan)
1781 			&& bex->fe_group == e4b->bd_group) {
1782 		/* recheck chunk's availability - we don't know
1783 		 * when it was found (within this lock-unlock
1784 		 * period or not) */
1785 		max = mb_find_extent(e4b, bex->fe_start, gex->fe_len, &ex);
1786 		if (max >= gex->fe_len) {
1787 			ext4_mb_use_best_found(ac, e4b);
1788 			return;
1789 		}
1790 	}
1791 }
1792 
1793 /*
1794  * The routine checks whether found extent is good enough. If it is,
1795  * then the extent gets marked used and flag is set to the context
1796  * to stop scanning. Otherwise, the extent is compared with the
1797  * previous found extent and if new one is better, then it's stored
1798  * in the context. Later, the best found extent will be used, if
1799  * mballoc can't find good enough extent.
1800  *
1801  * FIXME: real allocation policy is to be designed yet!
1802  */
ext4_mb_measure_extent(struct ext4_allocation_context *ac, struct ext4_free_extent *ex, struct ext4_buddy *e4b)1803 static void ext4_mb_measure_extent(struct ext4_allocation_context *ac,
1804 					struct ext4_free_extent *ex,
1805 					struct ext4_buddy *e4b)
1806 {
1807 	struct ext4_free_extent *bex = &ac->ac_b_ex;
1808 	struct ext4_free_extent *gex = &ac->ac_g_ex;
1809 
1810 	BUG_ON(ex->fe_len <= 0);
1811 	BUG_ON(ex->fe_len > EXT4_CLUSTERS_PER_GROUP(ac->ac_sb));
1812 	BUG_ON(ex->fe_start >= EXT4_CLUSTERS_PER_GROUP(ac->ac_sb));
1813 	BUG_ON(ac->ac_status != AC_STATUS_CONTINUE);
1814 
1815 	ac->ac_found++;
1816 
1817 	/*
1818 	 * The special case - take what you catch first
1819 	 */
1820 	if (unlikely(ac->ac_flags & EXT4_MB_HINT_FIRST)) {
1821 		*bex = *ex;
1822 		ext4_mb_use_best_found(ac, e4b);
1823 		return;
1824 	}
1825 
1826 	/*
1827 	 * Let's check whether the chuck is good enough
1828 	 */
1829 	if (ex->fe_len == gex->fe_len) {
1830 		*bex = *ex;
1831 		ext4_mb_use_best_found(ac, e4b);
1832 		return;
1833 	}
1834 
1835 	/*
1836 	 * If this is first found extent, just store it in the context
1837 	 */
1838 	if (bex->fe_len == 0) {
1839 		*bex = *ex;
1840 		return;
1841 	}
1842 
1843 	/*
1844 	 * If new found extent is better, store it in the context
1845 	 */
1846 	if (bex->fe_len < gex->fe_len) {
1847 		/* if the request isn't satisfied, any found extent
1848 		 * larger than previous best one is better */
1849 		if (ex->fe_len > bex->fe_len)
1850 			*bex = *ex;
1851 	} else if (ex->fe_len > gex->fe_len) {
1852 		/* if the request is satisfied, then we try to find
1853 		 * an extent that still satisfy the request, but is
1854 		 * smaller than previous one */
1855 		if (ex->fe_len < bex->fe_len)
1856 			*bex = *ex;
1857 	}
1858 
1859 	ext4_mb_check_limits(ac, e4b, 0);
1860 }
1861 
1862 static noinline_for_stack
ext4_mb_try_best_found(struct ext4_allocation_context *ac, struct ext4_buddy *e4b)1863 int ext4_mb_try_best_found(struct ext4_allocation_context *ac,
1864 					struct ext4_buddy *e4b)
1865 {
1866 	struct ext4_free_extent ex = ac->ac_b_ex;
1867 	ext4_group_t group = ex.fe_group;
1868 	int max;
1869 	int err;
1870 
1871 	BUG_ON(ex.fe_len <= 0);
1872 	err = ext4_mb_load_buddy(ac->ac_sb, group, e4b);
1873 	if (err)
1874 		return err;
1875 
1876 	ext4_lock_group(ac->ac_sb, group);
1877 	if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(e4b->bd_info)))
1878 		goto out;
1879 
1880 	max = mb_find_extent(e4b, ex.fe_start, ex.fe_len, &ex);
1881 
1882 	if (max > 0) {
1883 		ac->ac_b_ex = ex;
1884 		ext4_mb_use_best_found(ac, e4b);
1885 	}
1886 
1887 out:
1888 	ext4_unlock_group(ac->ac_sb, group);
1889 	ext4_mb_unload_buddy(e4b);
1890 
1891 	return 0;
1892 }
1893 
1894 static noinline_for_stack
ext4_mb_find_by_goal(struct ext4_allocation_context *ac, struct ext4_buddy *e4b)1895 int ext4_mb_find_by_goal(struct ext4_allocation_context *ac,
1896 				struct ext4_buddy *e4b)
1897 {
1898 	ext4_group_t group = ac->ac_g_ex.fe_group;
1899 	int max;
1900 	int err;
1901 	struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
1902 	struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group);
1903 	struct ext4_free_extent ex;
1904 
1905 	if (!grp)
1906 		return -EFSCORRUPTED;
1907 	if (!(ac->ac_flags & (EXT4_MB_HINT_TRY_GOAL | EXT4_MB_HINT_GOAL_ONLY)))
1908 		return 0;
1909 	if (grp->bb_free == 0)
1910 		return 0;
1911 
1912 	err = ext4_mb_load_buddy(ac->ac_sb, group, e4b);
1913 	if (err)
1914 		return err;
1915 
1916 	ext4_lock_group(ac->ac_sb, group);
1917 	if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(e4b->bd_info)))
1918 		goto out;
1919 
1920 	max = mb_find_extent(e4b, ac->ac_g_ex.fe_start,
1921 			     ac->ac_g_ex.fe_len, &ex);
1922 	ex.fe_logical = 0xDEADFA11; /* debug value */
1923 
1924 	if (max >= ac->ac_g_ex.fe_len && ac->ac_g_ex.fe_len == sbi->s_stripe) {
1925 		ext4_fsblk_t start;
1926 
1927 		start = ext4_group_first_block_no(ac->ac_sb, e4b->bd_group) +
1928 			ex.fe_start;
1929 		/* use do_div to get remainder (would be 64-bit modulo) */
1930 		if (do_div(start, sbi->s_stripe) == 0) {
1931 			ac->ac_found++;
1932 			ac->ac_b_ex = ex;
1933 			ext4_mb_use_best_found(ac, e4b);
1934 		}
1935 	} else if (max >= ac->ac_g_ex.fe_len) {
1936 		BUG_ON(ex.fe_len <= 0);
1937 		BUG_ON(ex.fe_group != ac->ac_g_ex.fe_group);
1938 		BUG_ON(ex.fe_start != ac->ac_g_ex.fe_start);
1939 		ac->ac_found++;
1940 		ac->ac_b_ex = ex;
1941 		ext4_mb_use_best_found(ac, e4b);
1942 	} else if (max > 0 && (ac->ac_flags & EXT4_MB_HINT_MERGE)) {
1943 		/* Sometimes, caller may want to merge even small
1944 		 * number of blocks to an existing extent */
1945 		BUG_ON(ex.fe_len <= 0);
1946 		BUG_ON(ex.fe_group != ac->ac_g_ex.fe_group);
1947 		BUG_ON(ex.fe_start != ac->ac_g_ex.fe_start);
1948 		ac->ac_found++;
1949 		ac->ac_b_ex = ex;
1950 		ext4_mb_use_best_found(ac, e4b);
1951 	}
1952 out:
1953 	ext4_unlock_group(ac->ac_sb, group);
1954 	ext4_mb_unload_buddy(e4b);
1955 
1956 	return 0;
1957 }
1958 
1959 /*
1960  * The routine scans buddy structures (not bitmap!) from given order
1961  * to max order and tries to find big enough chunk to satisfy the req
1962  */
1963 static noinline_for_stack
ext4_mb_simple_scan_group(struct ext4_allocation_context *ac, struct ext4_buddy *e4b)1964 void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac,
1965 					struct ext4_buddy *e4b)
1966 {
1967 	struct super_block *sb = ac->ac_sb;
1968 	struct ext4_group_info *grp = e4b->bd_info;
1969 	void *buddy;
1970 	int i;
1971 	int k;
1972 	int max;
1973 
1974 	BUG_ON(ac->ac_2order <= 0);
1975 	for (i = ac->ac_2order; i <= sb->s_blocksize_bits + 1; i++) {
1976 		if (grp->bb_counters[i] == 0)
1977 			continue;
1978 
1979 		buddy = mb_find_buddy(e4b, i, &max);
1980 		BUG_ON(buddy == NULL);
1981 
1982 		k = mb_find_next_zero_bit(buddy, max, 0);
1983 		if (k >= max) {
1984 			ext4_grp_locked_error(ac->ac_sb, e4b->bd_group, 0, 0,
1985 				"%d free clusters of order %d. But found 0",
1986 				grp->bb_counters[i], i);
1987 			ext4_mark_group_bitmap_corrupted(ac->ac_sb,
1988 					 e4b->bd_group,
1989 					EXT4_GROUP_INFO_BBITMAP_CORRUPT);
1990 			break;
1991 		}
1992 		ac->ac_found++;
1993 
1994 		ac->ac_b_ex.fe_len = 1 << i;
1995 		ac->ac_b_ex.fe_start = k << i;
1996 		ac->ac_b_ex.fe_group = e4b->bd_group;
1997 
1998 		ext4_mb_use_best_found(ac, e4b);
1999 
2000 		BUG_ON(ac->ac_f_ex.fe_len != ac->ac_g_ex.fe_len);
2001 
2002 		if (EXT4_SB(sb)->s_mb_stats)
2003 			atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
2004 
2005 		break;
2006 	}
2007 }
2008 
2009 /*
2010  * The routine scans the group and measures all found extents.
2011  * In order to optimize scanning, caller must pass number of
2012  * free blocks in the group, so the routine can know upper limit.
2013  */
2014 static noinline_for_stack
ext4_mb_complex_scan_group(struct ext4_allocation_context *ac, struct ext4_buddy *e4b)2015 void ext4_mb_complex_scan_group(struct ext4_allocation_context *ac,
2016 					struct ext4_buddy *e4b)
2017 {
2018 	struct super_block *sb = ac->ac_sb;
2019 	void *bitmap = e4b->bd_bitmap;
2020 	struct ext4_free_extent ex;
2021 	int i;
2022 	int free;
2023 
2024 	free = e4b->bd_info->bb_free;
2025 	if (WARN_ON(free <= 0))
2026 		return;
2027 
2028 	i = e4b->bd_info->bb_first_free;
2029 
2030 	while (free && ac->ac_status == AC_STATUS_CONTINUE) {
2031 		i = mb_find_next_zero_bit(bitmap,
2032 						EXT4_CLUSTERS_PER_GROUP(sb), i);
2033 		if (i >= EXT4_CLUSTERS_PER_GROUP(sb)) {
2034 			/*
2035 			 * IF we have corrupt bitmap, we won't find any
2036 			 * free blocks even though group info says we
2037 			 * have free blocks
2038 			 */
2039 			ext4_grp_locked_error(sb, e4b->bd_group, 0, 0,
2040 					"%d free clusters as per "
2041 					"group info. But bitmap says 0",
2042 					free);
2043 			ext4_mark_group_bitmap_corrupted(sb, e4b->bd_group,
2044 					EXT4_GROUP_INFO_BBITMAP_CORRUPT);
2045 			break;
2046 		}
2047 
2048 		mb_find_extent(e4b, i, ac->ac_g_ex.fe_len, &ex);
2049 		if (WARN_ON(ex.fe_len <= 0))
2050 			break;
2051 		if (free < ex.fe_len) {
2052 			ext4_grp_locked_error(sb, e4b->bd_group, 0, 0,
2053 					"%d free clusters as per "
2054 					"group info. But got %d blocks",
2055 					free, ex.fe_len);
2056 			ext4_mark_group_bitmap_corrupted(sb, e4b->bd_group,
2057 					EXT4_GROUP_INFO_BBITMAP_CORRUPT);
2058 			/*
2059 			 * The number of free blocks differs. This mostly
2060 			 * indicate that the bitmap is corrupt. So exit
2061 			 * without claiming the space.
2062 			 */
2063 			break;
2064 		}
2065 		ex.fe_logical = 0xDEADC0DE; /* debug value */
2066 		ext4_mb_measure_extent(ac, &ex, e4b);
2067 
2068 		i += ex.fe_len;
2069 		free -= ex.fe_len;
2070 	}
2071 
2072 	ext4_mb_check_limits(ac, e4b, 1);
2073 }
2074 
2075 /*
2076  * This is a special case for storages like raid5
2077  * we try to find stripe-aligned chunks for stripe-size-multiple requests
2078  */
2079 static noinline_for_stack
ext4_mb_scan_aligned(struct ext4_allocation_context *ac, struct ext4_buddy *e4b)2080 void ext4_mb_scan_aligned(struct ext4_allocation_context *ac,
2081 				 struct ext4_buddy *e4b)
2082 {
2083 	struct super_block *sb = ac->ac_sb;
2084 	struct ext4_sb_info *sbi = EXT4_SB(sb);
2085 	void *bitmap = e4b->bd_bitmap;
2086 	struct ext4_free_extent ex;
2087 	ext4_fsblk_t first_group_block;
2088 	ext4_fsblk_t a;
2089 	ext4_grpblk_t i;
2090 	int max;
2091 
2092 	BUG_ON(sbi->s_stripe == 0);
2093 
2094 	/* find first stripe-aligned block in group */
2095 	first_group_block = ext4_group_first_block_no(sb, e4b->bd_group);
2096 
2097 	a = first_group_block + sbi->s_stripe - 1;
2098 	do_div(a, sbi->s_stripe);
2099 	i = (a * sbi->s_stripe) - first_group_block;
2100 
2101 	while (i < EXT4_CLUSTERS_PER_GROUP(sb)) {
2102 		if (!mb_test_bit(i, bitmap)) {
2103 			max = mb_find_extent(e4b, i, sbi->s_stripe, &ex);
2104 			if (max >= sbi->s_stripe) {
2105 				ac->ac_found++;
2106 				ex.fe_logical = 0xDEADF00D; /* debug value */
2107 				ac->ac_b_ex = ex;
2108 				ext4_mb_use_best_found(ac, e4b);
2109 				break;
2110 			}
2111 		}
2112 		i += sbi->s_stripe;
2113 	}
2114 }
2115 
2116 /*
2117  * This is also called BEFORE we load the buddy bitmap.
2118  * Returns either 1 or 0 indicating that the group is either suitable
2119  * for the allocation or not.
2120  */
ext4_mb_good_group(struct ext4_allocation_context *ac, ext4_group_t group, int cr)2121 static bool ext4_mb_good_group(struct ext4_allocation_context *ac,
2122 				ext4_group_t group, int cr)
2123 {
2124 	ext4_grpblk_t free, fragments;
2125 	int flex_size = ext4_flex_bg_size(EXT4_SB(ac->ac_sb));
2126 	struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group);
2127 
2128 	BUG_ON(cr < 0 || cr >= 4);
2129 
2130 	if (unlikely(!grp || EXT4_MB_GRP_BBITMAP_CORRUPT(grp)))
2131 		return false;
2132 
2133 	free = grp->bb_free;
2134 	if (free == 0)
2135 		return false;
2136 
2137 	fragments = grp->bb_fragments;
2138 	if (fragments == 0)
2139 		return false;
2140 
2141 	switch (cr) {
2142 	case 0:
2143 		BUG_ON(ac->ac_2order == 0);
2144 
2145 		/* Avoid using the first bg of a flexgroup for data files */
2146 		if ((ac->ac_flags & EXT4_MB_HINT_DATA) &&
2147 		    (flex_size >= EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME) &&
2148 		    ((group % flex_size) == 0))
2149 			return false;
2150 
2151 		if (free < ac->ac_g_ex.fe_len)
2152 			return false;
2153 
2154 		if (ac->ac_2order > ac->ac_sb->s_blocksize_bits+1)
2155 			return true;
2156 
2157 		if (grp->bb_largest_free_order < ac->ac_2order)
2158 			return false;
2159 
2160 		return true;
2161 	case 1:
2162 		if ((free / fragments) >= ac->ac_g_ex.fe_len)
2163 			return true;
2164 		break;
2165 	case 2:
2166 		if (free >= ac->ac_g_ex.fe_len)
2167 			return true;
2168 		break;
2169 	case 3:
2170 		return true;
2171 	default:
2172 		BUG();
2173 	}
2174 
2175 	return false;
2176 }
2177 
2178 /*
2179  * This could return negative error code if something goes wrong
2180  * during ext4_mb_init_group(). This should not be called with
2181  * ext4_lock_group() held.
2182  */
ext4_mb_good_group_nolock(struct ext4_allocation_context *ac, ext4_group_t group, int cr)2183 static int ext4_mb_good_group_nolock(struct ext4_allocation_context *ac,
2184 				     ext4_group_t group, int cr)
2185 {
2186 	struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group);
2187 	struct super_block *sb = ac->ac_sb;
2188 	struct ext4_sb_info *sbi = EXT4_SB(sb);
2189 	bool should_lock = ac->ac_flags & EXT4_MB_STRICT_CHECK;
2190 	ext4_grpblk_t free;
2191 	int ret = 0;
2192 
2193 	if (!grp)
2194 		return -EFSCORRUPTED;
2195 	if (sbi->s_mb_stats)
2196 		atomic64_inc(&sbi->s_bal_cX_groups_considered[ac->ac_criteria]);
2197 	if (should_lock)
2198 		ext4_lock_group(sb, group);
2199 	free = grp->bb_free;
2200 	if (free == 0)
2201 		goto out;
2202 	if (cr <= 2 && free < ac->ac_g_ex.fe_len)
2203 		goto out;
2204 	if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(grp)))
2205 		goto out;
2206 	if (should_lock)
2207 		ext4_unlock_group(sb, group);
2208 
2209 	/* We only do this if the grp has never been initialized */
2210 	if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) {
2211 		struct ext4_group_desc *gdp =
2212 			ext4_get_group_desc(sb, group, NULL);
2213 		int ret;
2214 
2215 		/* cr=0/1 is a very optimistic search to find large
2216 		 * good chunks almost for free.  If buddy data is not
2217 		 * ready, then this optimization makes no sense.  But
2218 		 * we never skip the first block group in a flex_bg,
2219 		 * since this gets used for metadata block allocation,
2220 		 * and we want to make sure we locate metadata blocks
2221 		 * in the first block group in the flex_bg if possible.
2222 		 */
2223 		if (cr < 2 &&
2224 		    (!sbi->s_log_groups_per_flex ||
2225 		     ((group & ((1 << sbi->s_log_groups_per_flex) - 1)) != 0)) &&
2226 		    !(ext4_has_group_desc_csum(sb) &&
2227 		      (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))))
2228 			return 0;
2229 		ret = ext4_mb_init_group(sb, group, GFP_NOFS);
2230 		if (ret)
2231 			return ret;
2232 	}
2233 
2234 	if (should_lock)
2235 		ext4_lock_group(sb, group);
2236 	ret = ext4_mb_good_group(ac, group, cr);
2237 out:
2238 	if (should_lock)
2239 		ext4_unlock_group(sb, group);
2240 	return ret;
2241 }
2242 
2243 /*
2244  * Start prefetching @nr block bitmaps starting at @group.
2245  * Return the next group which needs to be prefetched.
2246  */
ext4_mb_prefetch(struct super_block *sb, ext4_group_t group, unsigned int nr, int *cnt)2247 ext4_group_t ext4_mb_prefetch(struct super_block *sb, ext4_group_t group,
2248 			      unsigned int nr, int *cnt)
2249 {
2250 	ext4_group_t ngroups = ext4_get_groups_count(sb);
2251 	struct buffer_head *bh;
2252 	struct blk_plug plug;
2253 
2254 	blk_start_plug(&plug);
2255 	while (nr-- > 0) {
2256 		struct ext4_group_desc *gdp = ext4_get_group_desc(sb, group,
2257 								  NULL);
2258 		struct ext4_group_info *grp = ext4_get_group_info(sb, group);
2259 
2260 		/*
2261 		 * Prefetch block groups with free blocks; but don't
2262 		 * bother if it is marked uninitialized on disk, since
2263 		 * it won't require I/O to read.  Also only try to
2264 		 * prefetch once, so we avoid getblk() call, which can
2265 		 * be expensive.
2266 		 */
2267 		if (gdp && grp && !EXT4_MB_GRP_TEST_AND_SET_READ(grp) &&
2268 		    EXT4_MB_GRP_NEED_INIT(grp) &&
2269 		    ext4_free_group_clusters(sb, gdp) > 0 &&
2270 		    !(ext4_has_group_desc_csum(sb) &&
2271 		      (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)))) {
2272 			bh = ext4_read_block_bitmap_nowait(sb, group, true);
2273 			if (bh && !IS_ERR(bh)) {
2274 				if (!buffer_uptodate(bh) && cnt)
2275 					(*cnt)++;
2276 				brelse(bh);
2277 			}
2278 		}
2279 		if (++group >= ngroups)
2280 			group = 0;
2281 	}
2282 	blk_finish_plug(&plug);
2283 	return group;
2284 }
2285 
2286 /*
2287  * Prefetching reads the block bitmap into the buffer cache; but we
2288  * need to make sure that the buddy bitmap in the page cache has been
2289  * initialized.  Note that ext4_mb_init_group() will block if the I/O
2290  * is not yet completed, or indeed if it was not initiated by
2291  * ext4_mb_prefetch did not start the I/O.
2292  *
2293  * TODO: We should actually kick off the buddy bitmap setup in a work
2294  * queue when the buffer I/O is completed, so that we don't block
2295  * waiting for the block allocation bitmap read to finish when
2296  * ext4_mb_prefetch_fini is called from ext4_mb_regular_allocator().
2297  */
ext4_mb_prefetch_fini(struct super_block *sb, ext4_group_t group, unsigned int nr)2298 void ext4_mb_prefetch_fini(struct super_block *sb, ext4_group_t group,
2299 			   unsigned int nr)
2300 {
2301 	while (nr-- > 0) {
2302 		struct ext4_group_desc *gdp = ext4_get_group_desc(sb, group,
2303 								  NULL);
2304 		struct ext4_group_info *grp = ext4_get_group_info(sb, group);
2305 
2306 		if (!group)
2307 			group = ext4_get_groups_count(sb);
2308 		group--;
2309 		grp = ext4_get_group_info(sb, group);
2310 
2311 		if (grp && gdp && EXT4_MB_GRP_NEED_INIT(grp) &&
2312 		    ext4_free_group_clusters(sb, gdp) > 0 &&
2313 		    !(ext4_has_group_desc_csum(sb) &&
2314 		      (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)))) {
2315 			if (ext4_mb_init_group(sb, group, GFP_NOFS))
2316 				break;
2317 		}
2318 	}
2319 }
2320 
2321 static noinline_for_stack int
ext4_mb_regular_allocator(struct ext4_allocation_context *ac)2322 ext4_mb_regular_allocator(struct ext4_allocation_context *ac)
2323 {
2324 	ext4_group_t prefetch_grp = 0, ngroups, group, i;
2325 	int cr = -1;
2326 	int err = 0, first_err = 0;
2327 	unsigned int nr = 0, prefetch_ios = 0;
2328 	struct ext4_sb_info *sbi;
2329 	struct super_block *sb;
2330 	struct ext4_buddy e4b;
2331 	int lost;
2332 
2333 	sb = ac->ac_sb;
2334 	sbi = EXT4_SB(sb);
2335 	ngroups = ext4_get_groups_count(sb);
2336 	/* non-extent files are limited to low blocks/groups */
2337 	if (!(ext4_test_inode_flag(ac->ac_inode, EXT4_INODE_EXTENTS)))
2338 		ngroups = sbi->s_blockfile_groups;
2339 
2340 	BUG_ON(ac->ac_status == AC_STATUS_FOUND);
2341 
2342 	/* first, try the goal */
2343 	err = ext4_mb_find_by_goal(ac, &e4b);
2344 	if (err || ac->ac_status == AC_STATUS_FOUND)
2345 		goto out;
2346 
2347 	if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY))
2348 		goto out;
2349 
2350 	/*
2351 	 * ac->ac_2order is set only if the fe_len is a power of 2
2352 	 * if ac->ac_2order is set we also set criteria to 0 so that we
2353 	 * try exact allocation using buddy.
2354 	 */
2355 	i = fls(ac->ac_g_ex.fe_len);
2356 	ac->ac_2order = 0;
2357 	/*
2358 	 * We search using buddy data only if the order of the request
2359 	 * is greater than equal to the sbi_s_mb_order2_reqs
2360 	 * You can tune it via /sys/fs/ext4/<partition>/mb_order2_req
2361 	 * We also support searching for power-of-two requests only for
2362 	 * requests upto maximum buddy size we have constructed.
2363 	 */
2364 	if (i >= sbi->s_mb_order2_reqs && i <= sb->s_blocksize_bits + 2) {
2365 		/*
2366 		 * This should tell if fe_len is exactly power of 2
2367 		 */
2368 		if ((ac->ac_g_ex.fe_len & (~(1 << (i - 1)))) == 0)
2369 			ac->ac_2order = array_index_nospec(i - 1,
2370 							   sb->s_blocksize_bits + 2);
2371 	}
2372 
2373 	/* if stream allocation is enabled, use global goal */
2374 	if (ac->ac_flags & EXT4_MB_STREAM_ALLOC) {
2375 		/* TBD: may be hot point */
2376 		spin_lock(&sbi->s_md_lock);
2377 		ac->ac_g_ex.fe_group = sbi->s_mb_last_group;
2378 		ac->ac_g_ex.fe_start = sbi->s_mb_last_start;
2379 		spin_unlock(&sbi->s_md_lock);
2380 	}
2381 
2382 	/* Let's just scan groups to find more-less suitable blocks */
2383 	cr = ac->ac_2order ? 0 : 1;
2384 	/*
2385 	 * cr == 0 try to get exact allocation,
2386 	 * cr == 3  try to get anything
2387 	 */
2388 repeat:
2389 	for (; cr < 4 && ac->ac_status == AC_STATUS_CONTINUE; cr++) {
2390 		ac->ac_criteria = cr;
2391 		/*
2392 		 * searching for the right group start
2393 		 * from the goal value specified
2394 		 */
2395 		group = ac->ac_g_ex.fe_group;
2396 		prefetch_grp = group;
2397 
2398 		for (i = 0; i < ngroups; group++, i++) {
2399 			int ret = 0;
2400 			cond_resched();
2401 			/*
2402 			 * Artificially restricted ngroups for non-extent
2403 			 * files makes group > ngroups possible on first loop.
2404 			 */
2405 			if (group >= ngroups)
2406 				group = 0;
2407 
2408 			/*
2409 			 * Batch reads of the block allocation bitmaps
2410 			 * to get multiple READs in flight; limit
2411 			 * prefetching at cr=0/1, otherwise mballoc can
2412 			 * spend a lot of time loading imperfect groups
2413 			 */
2414 			if ((prefetch_grp == group) &&
2415 			    (cr > 1 ||
2416 			     prefetch_ios < sbi->s_mb_prefetch_limit)) {
2417 				unsigned int curr_ios = prefetch_ios;
2418 
2419 				nr = sbi->s_mb_prefetch;
2420 				if (ext4_has_feature_flex_bg(sb)) {
2421 					nr = 1 << sbi->s_log_groups_per_flex;
2422 					nr -= group & (nr - 1);
2423 					nr = min(nr, sbi->s_mb_prefetch);
2424 				}
2425 				prefetch_grp = ext4_mb_prefetch(sb, group,
2426 							nr, &prefetch_ios);
2427 				if (prefetch_ios == curr_ios)
2428 					nr = 0;
2429 			}
2430 
2431 			/* This now checks without needing the buddy page */
2432 			ret = ext4_mb_good_group_nolock(ac, group, cr);
2433 			if (ret <= 0) {
2434 				if (!first_err)
2435 					first_err = ret;
2436 				continue;
2437 			}
2438 
2439 			err = ext4_mb_load_buddy(sb, group, &e4b);
2440 			if (err)
2441 				goto out;
2442 
2443 			ext4_lock_group(sb, group);
2444 
2445 			/*
2446 			 * We need to check again after locking the
2447 			 * block group
2448 			 */
2449 			ret = ext4_mb_good_group(ac, group, cr);
2450 			if (ret == 0) {
2451 				ext4_unlock_group(sb, group);
2452 				ext4_mb_unload_buddy(&e4b);
2453 				continue;
2454 			}
2455 
2456 			ac->ac_groups_scanned++;
2457 			if (cr == 0)
2458 				ext4_mb_simple_scan_group(ac, &e4b);
2459 			else if (cr == 1 && sbi->s_stripe &&
2460 					!(ac->ac_g_ex.fe_len % sbi->s_stripe))
2461 				ext4_mb_scan_aligned(ac, &e4b);
2462 			else
2463 				ext4_mb_complex_scan_group(ac, &e4b);
2464 
2465 			ext4_unlock_group(sb, group);
2466 			ext4_mb_unload_buddy(&e4b);
2467 
2468 			if (ac->ac_status != AC_STATUS_CONTINUE)
2469 				break;
2470 		}
2471 		/* Processed all groups and haven't found blocks */
2472 		if (sbi->s_mb_stats && i == ngroups)
2473 			atomic64_inc(&sbi->s_bal_cX_failed[cr]);
2474 	}
2475 
2476 	if (ac->ac_b_ex.fe_len > 0 && ac->ac_status != AC_STATUS_FOUND &&
2477 	    !(ac->ac_flags & EXT4_MB_HINT_FIRST)) {
2478 		/*
2479 		 * We've been searching too long. Let's try to allocate
2480 		 * the best chunk we've found so far
2481 		 */
2482 		ext4_mb_try_best_found(ac, &e4b);
2483 		if (ac->ac_status != AC_STATUS_FOUND) {
2484 			/*
2485 			 * Someone more lucky has already allocated it.
2486 			 * The only thing we can do is just take first
2487 			 * found block(s)
2488 			 */
2489 			lost = atomic_inc_return(&sbi->s_mb_lost_chunks);
2490 			mb_debug(sb, "lost chunk, group: %u, start: %d, len: %d, lost: %d\n",
2491 				 ac->ac_b_ex.fe_group, ac->ac_b_ex.fe_start,
2492 				 ac->ac_b_ex.fe_len, lost);
2493 
2494 			ac->ac_b_ex.fe_group = 0;
2495 			ac->ac_b_ex.fe_start = 0;
2496 			ac->ac_b_ex.fe_len = 0;
2497 			ac->ac_status = AC_STATUS_CONTINUE;
2498 			ac->ac_flags |= EXT4_MB_HINT_FIRST;
2499 			cr = 3;
2500 			goto repeat;
2501 		}
2502 	}
2503 
2504 	if (sbi->s_mb_stats && ac->ac_status == AC_STATUS_FOUND)
2505 		atomic64_inc(&sbi->s_bal_cX_hits[ac->ac_criteria]);
2506 out:
2507 	if (!err && ac->ac_status != AC_STATUS_FOUND && first_err)
2508 		err = first_err;
2509 
2510 	mb_debug(sb, "Best len %d, origin len %d, ac_status %u, ac_flags 0x%x, cr %d ret %d\n",
2511 		 ac->ac_b_ex.fe_len, ac->ac_o_ex.fe_len, ac->ac_status,
2512 		 ac->ac_flags, cr, err);
2513 
2514 	if (nr)
2515 		ext4_mb_prefetch_fini(sb, prefetch_grp, nr);
2516 
2517 	return err;
2518 }
2519 
ext4_mb_seq_groups_start(struct seq_file *seq, loff_t *pos)2520 static void *ext4_mb_seq_groups_start(struct seq_file *seq, loff_t *pos)
2521 {
2522 	struct super_block *sb = PDE_DATA(file_inode(seq->file));
2523 	ext4_group_t group;
2524 
2525 	if (*pos < 0 || *pos >= ext4_get_groups_count(sb))
2526 		return NULL;
2527 	group = *pos + 1;
2528 	return (void *) ((unsigned long) group);
2529 }
2530 
ext4_mb_seq_groups_next(struct seq_file *seq, void *v, loff_t *pos)2531 static void *ext4_mb_seq_groups_next(struct seq_file *seq, void *v, loff_t *pos)
2532 {
2533 	struct super_block *sb = PDE_DATA(file_inode(seq->file));
2534 	ext4_group_t group;
2535 
2536 	++*pos;
2537 	if (*pos < 0 || *pos >= ext4_get_groups_count(sb))
2538 		return NULL;
2539 	group = *pos + 1;
2540 	return (void *) ((unsigned long) group);
2541 }
2542 
ext4_mb_seq_groups_show(struct seq_file *seq, void *v)2543 static int ext4_mb_seq_groups_show(struct seq_file *seq, void *v)
2544 {
2545 	struct super_block *sb = PDE_DATA(file_inode(seq->file));
2546 	ext4_group_t group = (ext4_group_t) ((unsigned long) v);
2547 	int i;
2548 	int err, buddy_loaded = 0;
2549 	struct ext4_buddy e4b;
2550 	struct ext4_group_info *grinfo;
2551 	unsigned char blocksize_bits = min_t(unsigned char,
2552 					     sb->s_blocksize_bits,
2553 					     EXT4_MAX_BLOCK_LOG_SIZE);
2554 	struct sg {
2555 		struct ext4_group_info info;
2556 		ext4_grpblk_t counters[EXT4_MAX_BLOCK_LOG_SIZE + 2];
2557 	} sg;
2558 
2559 	group--;
2560 	if (group == 0)
2561 		seq_puts(seq, "#group: free  frags first ["
2562 			      " 2^0   2^1   2^2   2^3   2^4   2^5   2^6  "
2563 			      " 2^7   2^8   2^9   2^10  2^11  2^12  2^13  ]\n");
2564 
2565 	i = (blocksize_bits + 2) * sizeof(sg.info.bb_counters[0]) +
2566 		sizeof(struct ext4_group_info);
2567 
2568 	grinfo = ext4_get_group_info(sb, group);
2569 	if (!grinfo)
2570 		return 0;
2571 	/* Load the group info in memory only if not already loaded. */
2572 	if (unlikely(EXT4_MB_GRP_NEED_INIT(grinfo))) {
2573 		err = ext4_mb_load_buddy(sb, group, &e4b);
2574 		if (err) {
2575 			seq_printf(seq, "#%-5u: I/O error\n", group);
2576 			return 0;
2577 		}
2578 		buddy_loaded = 1;
2579 	}
2580 
2581 	memcpy(&sg, grinfo, i);
2582 
2583 	if (buddy_loaded)
2584 		ext4_mb_unload_buddy(&e4b);
2585 
2586 	seq_printf(seq, "#%-5u: %-5u %-5u %-5u [", group, sg.info.bb_free,
2587 			sg.info.bb_fragments, sg.info.bb_first_free);
2588 	for (i = 0; i <= 13; i++)
2589 		seq_printf(seq, " %-5u", i <= blocksize_bits + 1 ?
2590 				sg.info.bb_counters[i] : 0);
2591 	seq_puts(seq, " ]\n");
2592 
2593 	return 0;
2594 }
2595 
ext4_mb_seq_groups_stop(struct seq_file *seq, void *v)2596 static void ext4_mb_seq_groups_stop(struct seq_file *seq, void *v)
2597 {
2598 }
2599 
2600 const struct seq_operations ext4_mb_seq_groups_ops = {
2601 	.start  = ext4_mb_seq_groups_start,
2602 	.next   = ext4_mb_seq_groups_next,
2603 	.stop   = ext4_mb_seq_groups_stop,
2604 	.show   = ext4_mb_seq_groups_show,
2605 };
2606 
ext4_seq_mb_stats_show(struct seq_file *seq, void *offset)2607 int ext4_seq_mb_stats_show(struct seq_file *seq, void *offset)
2608 {
2609 	struct super_block *sb = (struct super_block *)seq->private;
2610 	struct ext4_sb_info *sbi = EXT4_SB(sb);
2611 
2612 	seq_puts(seq, "mballoc:\n");
2613 	if (!sbi->s_mb_stats) {
2614 		seq_puts(seq, "\tmb stats collection turned off.\n");
2615 		seq_puts(seq, "\tTo enable, please write \"1\" to sysfs file mb_stats.\n");
2616 		return 0;
2617 	}
2618 	seq_printf(seq, "\treqs: %u\n", atomic_read(&sbi->s_bal_reqs));
2619 	seq_printf(seq, "\tsuccess: %u\n", atomic_read(&sbi->s_bal_success));
2620 
2621 	seq_printf(seq, "\tgroups_scanned: %u\n",  atomic_read(&sbi->s_bal_groups_scanned));
2622 
2623 	seq_puts(seq, "\tcr0_stats:\n");
2624 	seq_printf(seq, "\t\thits: %llu\n", atomic64_read(&sbi->s_bal_cX_hits[0]));
2625 	seq_printf(seq, "\t\tgroups_considered: %llu\n",
2626 		   atomic64_read(&sbi->s_bal_cX_groups_considered[0]));
2627 	seq_printf(seq, "\t\tuseless_loops: %llu\n",
2628 		   atomic64_read(&sbi->s_bal_cX_failed[0]));
2629 
2630 	seq_puts(seq, "\tcr1_stats:\n");
2631 	seq_printf(seq, "\t\thits: %llu\n", atomic64_read(&sbi->s_bal_cX_hits[1]));
2632 	seq_printf(seq, "\t\tgroups_considered: %llu\n",
2633 		   atomic64_read(&sbi->s_bal_cX_groups_considered[1]));
2634 	seq_printf(seq, "\t\tuseless_loops: %llu\n",
2635 		   atomic64_read(&sbi->s_bal_cX_failed[1]));
2636 
2637 	seq_puts(seq, "\tcr2_stats:\n");
2638 	seq_printf(seq, "\t\thits: %llu\n", atomic64_read(&sbi->s_bal_cX_hits[2]));
2639 	seq_printf(seq, "\t\tgroups_considered: %llu\n",
2640 		   atomic64_read(&sbi->s_bal_cX_groups_considered[2]));
2641 	seq_printf(seq, "\t\tuseless_loops: %llu\n",
2642 		   atomic64_read(&sbi->s_bal_cX_failed[2]));
2643 
2644 	seq_puts(seq, "\tcr3_stats:\n");
2645 	seq_printf(seq, "\t\thits: %llu\n", atomic64_read(&sbi->s_bal_cX_hits[3]));
2646 	seq_printf(seq, "\t\tgroups_considered: %llu\n",
2647 		   atomic64_read(&sbi->s_bal_cX_groups_considered[3]));
2648 	seq_printf(seq, "\t\tuseless_loops: %llu\n",
2649 		   atomic64_read(&sbi->s_bal_cX_failed[3]));
2650 	seq_printf(seq, "\textents_scanned: %u\n", atomic_read(&sbi->s_bal_ex_scanned));
2651 	seq_printf(seq, "\t\tgoal_hits: %u\n", atomic_read(&sbi->s_bal_goals));
2652 	seq_printf(seq, "\t\t2^n_hits: %u\n", atomic_read(&sbi->s_bal_2orders));
2653 	seq_printf(seq, "\t\tbreaks: %u\n", atomic_read(&sbi->s_bal_breaks));
2654 	seq_printf(seq, "\t\tlost: %u\n", atomic_read(&sbi->s_mb_lost_chunks));
2655 
2656 	seq_printf(seq, "\tbuddies_generated: %u/%u\n",
2657 		   atomic_read(&sbi->s_mb_buddies_generated),
2658 		   ext4_get_groups_count(sb));
2659 	seq_printf(seq, "\tbuddies_time_used: %llu\n",
2660 		   atomic64_read(&sbi->s_mb_generation_time));
2661 	seq_printf(seq, "\tpreallocated: %u\n",
2662 		   atomic_read(&sbi->s_mb_preallocated));
2663 	seq_printf(seq, "\tdiscarded: %u\n",
2664 		   atomic_read(&sbi->s_mb_discarded));
2665 	return 0;
2666 }
2667 
get_groupinfo_cache(int blocksize_bits)2668 static struct kmem_cache *get_groupinfo_cache(int blocksize_bits)
2669 {
2670 	int cache_index = blocksize_bits - EXT4_MIN_BLOCK_LOG_SIZE;
2671 	struct kmem_cache *cachep = ext4_groupinfo_caches[cache_index];
2672 
2673 	BUG_ON(!cachep);
2674 	return cachep;
2675 }
2676 
2677 /*
2678  * Allocate the top-level s_group_info array for the specified number
2679  * of groups
2680  */
ext4_mb_alloc_groupinfo(struct super_block *sb, ext4_group_t ngroups)2681 int ext4_mb_alloc_groupinfo(struct super_block *sb, ext4_group_t ngroups)
2682 {
2683 	struct ext4_sb_info *sbi = EXT4_SB(sb);
2684 	unsigned size;
2685 	struct ext4_group_info ***old_groupinfo, ***new_groupinfo;
2686 
2687 	size = (ngroups + EXT4_DESC_PER_BLOCK(sb) - 1) >>
2688 		EXT4_DESC_PER_BLOCK_BITS(sb);
2689 	if (size <= sbi->s_group_info_size)
2690 		return 0;
2691 
2692 	size = roundup_pow_of_two(sizeof(*sbi->s_group_info) * size);
2693 	new_groupinfo = kvzalloc(size, GFP_KERNEL);
2694 	if (!new_groupinfo) {
2695 		ext4_msg(sb, KERN_ERR, "can't allocate buddy meta group");
2696 		return -ENOMEM;
2697 	}
2698 	rcu_read_lock();
2699 	old_groupinfo = rcu_dereference(sbi->s_group_info);
2700 	if (old_groupinfo)
2701 		memcpy(new_groupinfo, old_groupinfo,
2702 		       sbi->s_group_info_size * sizeof(*sbi->s_group_info));
2703 	rcu_read_unlock();
2704 	rcu_assign_pointer(sbi->s_group_info, new_groupinfo);
2705 	sbi->s_group_info_size = size / sizeof(*sbi->s_group_info);
2706 	if (old_groupinfo)
2707 		ext4_kvfree_array_rcu(old_groupinfo);
2708 	ext4_debug("allocated s_groupinfo array for %d meta_bg's\n",
2709 		   sbi->s_group_info_size);
2710 	return 0;
2711 }
2712 
2713 /* Create and initialize ext4_group_info data for the given group. */
ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t group, struct ext4_group_desc *desc)2714 int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t group,
2715 			  struct ext4_group_desc *desc)
2716 {
2717 	int i;
2718 	int metalen = 0;
2719 	int idx = group >> EXT4_DESC_PER_BLOCK_BITS(sb);
2720 	struct ext4_sb_info *sbi = EXT4_SB(sb);
2721 	struct ext4_group_info **meta_group_info;
2722 	struct kmem_cache *cachep = get_groupinfo_cache(sb->s_blocksize_bits);
2723 
2724 	/*
2725 	 * First check if this group is the first of a reserved block.
2726 	 * If it's true, we have to allocate a new table of pointers
2727 	 * to ext4_group_info structures
2728 	 */
2729 	if (group % EXT4_DESC_PER_BLOCK(sb) == 0) {
2730 		metalen = sizeof(*meta_group_info) <<
2731 			EXT4_DESC_PER_BLOCK_BITS(sb);
2732 		meta_group_info = kmalloc(metalen, GFP_NOFS);
2733 		if (meta_group_info == NULL) {
2734 			ext4_msg(sb, KERN_ERR, "can't allocate mem "
2735 				 "for a buddy group");
2736 			goto exit_meta_group_info;
2737 		}
2738 		rcu_read_lock();
2739 		rcu_dereference(sbi->s_group_info)[idx] = meta_group_info;
2740 		rcu_read_unlock();
2741 	}
2742 
2743 	meta_group_info = sbi_array_rcu_deref(sbi, s_group_info, idx);
2744 	i = group & (EXT4_DESC_PER_BLOCK(sb) - 1);
2745 
2746 	meta_group_info[i] = kmem_cache_zalloc(cachep, GFP_NOFS);
2747 	if (meta_group_info[i] == NULL) {
2748 		ext4_msg(sb, KERN_ERR, "can't allocate buddy mem");
2749 		goto exit_group_info;
2750 	}
2751 	set_bit(EXT4_GROUP_INFO_NEED_INIT_BIT,
2752 		&(meta_group_info[i]->bb_state));
2753 
2754 	/*
2755 	 * initialize bb_free to be able to skip
2756 	 * empty groups without initialization
2757 	 */
2758 	if (ext4_has_group_desc_csum(sb) &&
2759 	    (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) {
2760 		meta_group_info[i]->bb_free =
2761 			ext4_free_clusters_after_init(sb, group, desc);
2762 	} else {
2763 		meta_group_info[i]->bb_free =
2764 			ext4_free_group_clusters(sb, desc);
2765 	}
2766 
2767 	INIT_LIST_HEAD(&meta_group_info[i]->bb_prealloc_list);
2768 	init_rwsem(&meta_group_info[i]->alloc_sem);
2769 	meta_group_info[i]->bb_free_root = RB_ROOT;
2770 	meta_group_info[i]->bb_largest_free_order = -1;  /* uninit */
2771 
2772 	mb_group_bb_bitmap_alloc(sb, meta_group_info[i], group);
2773 	return 0;
2774 
2775 exit_group_info:
2776 	/* If a meta_group_info table has been allocated, release it now */
2777 	if (group % EXT4_DESC_PER_BLOCK(sb) == 0) {
2778 		struct ext4_group_info ***group_info;
2779 
2780 		rcu_read_lock();
2781 		group_info = rcu_dereference(sbi->s_group_info);
2782 		kfree(group_info[idx]);
2783 		group_info[idx] = NULL;
2784 		rcu_read_unlock();
2785 	}
2786 exit_meta_group_info:
2787 	return -ENOMEM;
2788 } /* ext4_mb_add_groupinfo */
2789 
ext4_mb_init_backend(struct super_block *sb)2790 static int ext4_mb_init_backend(struct super_block *sb)
2791 {
2792 	ext4_group_t ngroups = ext4_get_groups_count(sb);
2793 	ext4_group_t i;
2794 	struct ext4_sb_info *sbi = EXT4_SB(sb);
2795 	int err;
2796 	struct ext4_group_desc *desc;
2797 	struct ext4_group_info ***group_info;
2798 	struct kmem_cache *cachep;
2799 
2800 	err = ext4_mb_alloc_groupinfo(sb, ngroups);
2801 	if (err)
2802 		return err;
2803 
2804 	sbi->s_buddy_cache = new_inode(sb);
2805 	if (sbi->s_buddy_cache == NULL) {
2806 		ext4_msg(sb, KERN_ERR, "can't get new inode");
2807 		goto err_freesgi;
2808 	}
2809 	/* To avoid potentially colliding with an valid on-disk inode number,
2810 	 * use EXT4_BAD_INO for the buddy cache inode number.  This inode is
2811 	 * not in the inode hash, so it should never be found by iget(), but
2812 	 * this will avoid confusion if it ever shows up during debugging. */
2813 	sbi->s_buddy_cache->i_ino = EXT4_BAD_INO;
2814 	EXT4_I(sbi->s_buddy_cache)->i_disksize = 0;
2815 	for (i = 0; i < ngroups; i++) {
2816 		cond_resched();
2817 		desc = ext4_get_group_desc(sb, i, NULL);
2818 		if (desc == NULL) {
2819 			ext4_msg(sb, KERN_ERR, "can't read descriptor %u", i);
2820 			goto err_freebuddy;
2821 		}
2822 		if (ext4_mb_add_groupinfo(sb, i, desc) != 0)
2823 			goto err_freebuddy;
2824 	}
2825 
2826 	if (ext4_has_feature_flex_bg(sb)) {
2827 		/* a single flex group is supposed to be read by a single IO.
2828 		 * 2 ^ s_log_groups_per_flex != UINT_MAX as s_mb_prefetch is
2829 		 * unsigned integer, so the maximum shift is 32.
2830 		 */
2831 		if (sbi->s_es->s_log_groups_per_flex >= 32) {
2832 			ext4_msg(sb, KERN_ERR, "too many log groups per flexible block group");
2833 			goto err_freebuddy;
2834 		}
2835 		sbi->s_mb_prefetch = min_t(uint, 1 << sbi->s_es->s_log_groups_per_flex,
2836 			BLK_MAX_SEGMENT_SIZE >> (sb->s_blocksize_bits - 9));
2837 		sbi->s_mb_prefetch *= 8; /* 8 prefetch IOs in flight at most */
2838 	} else {
2839 		sbi->s_mb_prefetch = 32;
2840 	}
2841 	if (sbi->s_mb_prefetch > ext4_get_groups_count(sb))
2842 		sbi->s_mb_prefetch = ext4_get_groups_count(sb);
2843 	/* now many real IOs to prefetch within a single allocation at cr=0
2844 	 * given cr=0 is an CPU-related optimization we shouldn't try to
2845 	 * load too many groups, at some point we should start to use what
2846 	 * we've got in memory.
2847 	 * with an average random access time 5ms, it'd take a second to get
2848 	 * 200 groups (* N with flex_bg), so let's make this limit 4
2849 	 */
2850 	sbi->s_mb_prefetch_limit = sbi->s_mb_prefetch * 4;
2851 	if (sbi->s_mb_prefetch_limit > ext4_get_groups_count(sb))
2852 		sbi->s_mb_prefetch_limit = ext4_get_groups_count(sb);
2853 
2854 	return 0;
2855 
2856 err_freebuddy:
2857 	cachep = get_groupinfo_cache(sb->s_blocksize_bits);
2858 	while (i-- > 0) {
2859 		struct ext4_group_info *grp = ext4_get_group_info(sb, i);
2860 
2861 		if (grp)
2862 			kmem_cache_free(cachep, grp);
2863 	}
2864 	i = sbi->s_group_info_size;
2865 	rcu_read_lock();
2866 	group_info = rcu_dereference(sbi->s_group_info);
2867 	while (i-- > 0)
2868 		kfree(group_info[i]);
2869 	rcu_read_unlock();
2870 	iput(sbi->s_buddy_cache);
2871 err_freesgi:
2872 	rcu_read_lock();
2873 	kvfree(rcu_dereference(sbi->s_group_info));
2874 	rcu_read_unlock();
2875 	return -ENOMEM;
2876 }
2877 
ext4_groupinfo_destroy_slabs(void)2878 static void ext4_groupinfo_destroy_slabs(void)
2879 {
2880 	int i;
2881 
2882 	for (i = 0; i < NR_GRPINFO_CACHES; i++) {
2883 		kmem_cache_destroy(ext4_groupinfo_caches[i]);
2884 		ext4_groupinfo_caches[i] = NULL;
2885 	}
2886 }
2887 
ext4_groupinfo_create_slab(size_t size)2888 static int ext4_groupinfo_create_slab(size_t size)
2889 {
2890 	static DEFINE_MUTEX(ext4_grpinfo_slab_create_mutex);
2891 	int slab_size;
2892 	int blocksize_bits = order_base_2(size);
2893 	int cache_index = blocksize_bits - EXT4_MIN_BLOCK_LOG_SIZE;
2894 	struct kmem_cache *cachep;
2895 
2896 	if (cache_index >= NR_GRPINFO_CACHES)
2897 		return -EINVAL;
2898 
2899 	if (unlikely(cache_index < 0))
2900 		cache_index = 0;
2901 
2902 	mutex_lock(&ext4_grpinfo_slab_create_mutex);
2903 	if (ext4_groupinfo_caches[cache_index]) {
2904 		mutex_unlock(&ext4_grpinfo_slab_create_mutex);
2905 		return 0;	/* Already created */
2906 	}
2907 
2908 	slab_size = offsetof(struct ext4_group_info,
2909 				bb_counters[blocksize_bits + 2]);
2910 
2911 	cachep = kmem_cache_create(ext4_groupinfo_slab_names[cache_index],
2912 					slab_size, 0, SLAB_RECLAIM_ACCOUNT,
2913 					NULL);
2914 
2915 	ext4_groupinfo_caches[cache_index] = cachep;
2916 
2917 	mutex_unlock(&ext4_grpinfo_slab_create_mutex);
2918 	if (!cachep) {
2919 		printk(KERN_EMERG
2920 		       "EXT4-fs: no memory for groupinfo slab cache\n");
2921 		return -ENOMEM;
2922 	}
2923 
2924 	return 0;
2925 }
2926 
ext4_mb_init(struct super_block *sb)2927 int ext4_mb_init(struct super_block *sb)
2928 {
2929 	struct ext4_sb_info *sbi = EXT4_SB(sb);
2930 	unsigned i, j;
2931 	unsigned offset, offset_incr;
2932 	unsigned max;
2933 	int ret;
2934 
2935 	i = (sb->s_blocksize_bits + 2) * sizeof(*sbi->s_mb_offsets);
2936 
2937 	sbi->s_mb_offsets = kmalloc(i, GFP_KERNEL);
2938 	if (sbi->s_mb_offsets == NULL) {
2939 		ret = -ENOMEM;
2940 		goto out;
2941 	}
2942 
2943 	i = (sb->s_blocksize_bits + 2) * sizeof(*sbi->s_mb_maxs);
2944 	sbi->s_mb_maxs = kmalloc(i, GFP_KERNEL);
2945 	if (sbi->s_mb_maxs == NULL) {
2946 		ret = -ENOMEM;
2947 		goto out;
2948 	}
2949 
2950 	ret = ext4_groupinfo_create_slab(sb->s_blocksize);
2951 	if (ret < 0)
2952 		goto out;
2953 
2954 	/* order 0 is regular bitmap */
2955 	sbi->s_mb_maxs[0] = sb->s_blocksize << 3;
2956 	sbi->s_mb_offsets[0] = 0;
2957 
2958 	i = 1;
2959 	offset = 0;
2960 	offset_incr = 1 << (sb->s_blocksize_bits - 1);
2961 	max = sb->s_blocksize << 2;
2962 	do {
2963 		sbi->s_mb_offsets[i] = offset;
2964 		sbi->s_mb_maxs[i] = max;
2965 		offset += offset_incr;
2966 		offset_incr = offset_incr >> 1;
2967 		max = max >> 1;
2968 		i++;
2969 	} while (i <= sb->s_blocksize_bits + 1);
2970 
2971 	spin_lock_init(&sbi->s_md_lock);
2972 	sbi->s_mb_free_pending = 0;
2973 	INIT_LIST_HEAD(&sbi->s_freed_data_list);
2974 
2975 	sbi->s_mb_max_to_scan = MB_DEFAULT_MAX_TO_SCAN;
2976 	sbi->s_mb_min_to_scan = MB_DEFAULT_MIN_TO_SCAN;
2977 	sbi->s_mb_stats = MB_DEFAULT_STATS;
2978 	sbi->s_mb_stream_request = MB_DEFAULT_STREAM_THRESHOLD;
2979 	sbi->s_mb_order2_reqs = MB_DEFAULT_ORDER2_REQS;
2980 	sbi->s_mb_max_inode_prealloc = MB_DEFAULT_MAX_INODE_PREALLOC;
2981 	/*
2982 	 * The default group preallocation is 512, which for 4k block
2983 	 * sizes translates to 2 megabytes.  However for bigalloc file
2984 	 * systems, this is probably too big (i.e, if the cluster size
2985 	 * is 1 megabyte, then group preallocation size becomes half a
2986 	 * gigabyte!).  As a default, we will keep a two megabyte
2987 	 * group pralloc size for cluster sizes up to 64k, and after
2988 	 * that, we will force a minimum group preallocation size of
2989 	 * 32 clusters.  This translates to 8 megs when the cluster
2990 	 * size is 256k, and 32 megs when the cluster size is 1 meg,
2991 	 * which seems reasonable as a default.
2992 	 */
2993 	sbi->s_mb_group_prealloc = max(MB_DEFAULT_GROUP_PREALLOC >>
2994 				       sbi->s_cluster_bits, 32);
2995 	/*
2996 	 * If there is a s_stripe > 1, then we set the s_mb_group_prealloc
2997 	 * to the lowest multiple of s_stripe which is bigger than
2998 	 * the s_mb_group_prealloc as determined above. We want
2999 	 * the preallocation size to be an exact multiple of the
3000 	 * RAID stripe size so that preallocations don't fragment
3001 	 * the stripes.
3002 	 */
3003 	if (sbi->s_stripe > 1) {
3004 		sbi->s_mb_group_prealloc = roundup(
3005 			sbi->s_mb_group_prealloc, sbi->s_stripe);
3006 	}
3007 
3008 	sbi->s_locality_groups = alloc_percpu(struct ext4_locality_group);
3009 	if (sbi->s_locality_groups == NULL) {
3010 		ret = -ENOMEM;
3011 		goto out;
3012 	}
3013 	for_each_possible_cpu(i) {
3014 		struct ext4_locality_group *lg;
3015 		lg = per_cpu_ptr(sbi->s_locality_groups, i);
3016 		mutex_init(&lg->lg_mutex);
3017 		for (j = 0; j < PREALLOC_TB_SIZE; j++)
3018 			INIT_LIST_HEAD(&lg->lg_prealloc_list[j]);
3019 		spin_lock_init(&lg->lg_prealloc_lock);
3020 	}
3021 
3022 	/* init file for buddy data */
3023 	ret = ext4_mb_init_backend(sb);
3024 	if (ret != 0)
3025 		goto out_free_locality_groups;
3026 
3027 	return 0;
3028 
3029 out_free_locality_groups:
3030 	free_percpu(sbi->s_locality_groups);
3031 	sbi->s_locality_groups = NULL;
3032 out:
3033 	kfree(sbi->s_mb_offsets);
3034 	sbi->s_mb_offsets = NULL;
3035 	kfree(sbi->s_mb_maxs);
3036 	sbi->s_mb_maxs = NULL;
3037 	return ret;
3038 }
3039 
3040 /* need to called with the ext4 group lock held */
ext4_mb_cleanup_pa(struct ext4_group_info *grp)3041 static int ext4_mb_cleanup_pa(struct ext4_group_info *grp)
3042 {
3043 	struct ext4_prealloc_space *pa;
3044 	struct list_head *cur, *tmp;
3045 	int count = 0;
3046 
3047 	list_for_each_safe(cur, tmp, &grp->bb_prealloc_list) {
3048 		pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list);
3049 		list_del(&pa->pa_group_list);
3050 		count++;
3051 		kmem_cache_free(ext4_pspace_cachep, pa);
3052 	}
3053 	return count;
3054 }
3055 
ext4_mb_release(struct super_block *sb)3056 int ext4_mb_release(struct super_block *sb)
3057 {
3058 	ext4_group_t ngroups = ext4_get_groups_count(sb);
3059 	ext4_group_t i;
3060 	int num_meta_group_infos;
3061 	struct ext4_group_info *grinfo, ***group_info;
3062 	struct ext4_sb_info *sbi = EXT4_SB(sb);
3063 	struct kmem_cache *cachep = get_groupinfo_cache(sb->s_blocksize_bits);
3064 	int count;
3065 
3066 	if (sbi->s_group_info) {
3067 		for (i = 0; i < ngroups; i++) {
3068 			cond_resched();
3069 			grinfo = ext4_get_group_info(sb, i);
3070 			if (!grinfo)
3071 				continue;
3072 			mb_group_bb_bitmap_free(grinfo);
3073 			ext4_lock_group(sb, i);
3074 			count = ext4_mb_cleanup_pa(grinfo);
3075 			if (count)
3076 				mb_debug(sb, "mballoc: %d PAs left\n",
3077 					 count);
3078 			ext4_unlock_group(sb, i);
3079 			kmem_cache_free(cachep, grinfo);
3080 		}
3081 		num_meta_group_infos = (ngroups +
3082 				EXT4_DESC_PER_BLOCK(sb) - 1) >>
3083 			EXT4_DESC_PER_BLOCK_BITS(sb);
3084 		rcu_read_lock();
3085 		group_info = rcu_dereference(sbi->s_group_info);
3086 		for (i = 0; i < num_meta_group_infos; i++)
3087 			kfree(group_info[i]);
3088 		kvfree(group_info);
3089 		rcu_read_unlock();
3090 	}
3091 	kfree(sbi->s_mb_offsets);
3092 	kfree(sbi->s_mb_maxs);
3093 	iput(sbi->s_buddy_cache);
3094 	if (sbi->s_mb_stats) {
3095 		ext4_msg(sb, KERN_INFO,
3096 		       "mballoc: %u blocks %u reqs (%u success)",
3097 				atomic_read(&sbi->s_bal_allocated),
3098 				atomic_read(&sbi->s_bal_reqs),
3099 				atomic_read(&sbi->s_bal_success));
3100 		ext4_msg(sb, KERN_INFO,
3101 		      "mballoc: %u extents scanned, %u groups scanned, %u goal hits, "
3102 				"%u 2^N hits, %u breaks, %u lost",
3103 				atomic_read(&sbi->s_bal_ex_scanned),
3104 				atomic_read(&sbi->s_bal_groups_scanned),
3105 				atomic_read(&sbi->s_bal_goals),
3106 				atomic_read(&sbi->s_bal_2orders),
3107 				atomic_read(&sbi->s_bal_breaks),
3108 				atomic_read(&sbi->s_mb_lost_chunks));
3109 		ext4_msg(sb, KERN_INFO,
3110 		       "mballoc: %u generated and it took %llu",
3111 				atomic_read(&sbi->s_mb_buddies_generated),
3112 				atomic64_read(&sbi->s_mb_generation_time));
3113 		ext4_msg(sb, KERN_INFO,
3114 		       "mballoc: %u preallocated, %u discarded",
3115 				atomic_read(&sbi->s_mb_preallocated),
3116 				atomic_read(&sbi->s_mb_discarded));
3117 	}
3118 
3119 	free_percpu(sbi->s_locality_groups);
3120 
3121 	return 0;
3122 }
3123 
ext4_issue_discard(struct super_block *sb, ext4_group_t block_group, ext4_grpblk_t cluster, int count, struct bio **biop)3124 static inline int ext4_issue_discard(struct super_block *sb,
3125 		ext4_group_t block_group, ext4_grpblk_t cluster, int count,
3126 		struct bio **biop)
3127 {
3128 	ext4_fsblk_t discard_block;
3129 
3130 	discard_block = (EXT4_C2B(EXT4_SB(sb), cluster) +
3131 			 ext4_group_first_block_no(sb, block_group));
3132 	count = EXT4_C2B(EXT4_SB(sb), count);
3133 	trace_ext4_discard_blocks(sb,
3134 			(unsigned long long) discard_block, count);
3135 	if (biop) {
3136 		return __blkdev_issue_discard(sb->s_bdev,
3137 			(sector_t)discard_block << (sb->s_blocksize_bits - 9),
3138 			(sector_t)count << (sb->s_blocksize_bits - 9),
3139 			GFP_NOFS, 0, biop);
3140 	} else
3141 		return sb_issue_discard(sb, discard_block, count, GFP_NOFS, 0);
3142 }
3143 
ext4_free_data_in_buddy(struct super_block *sb, struct ext4_free_data *entry)3144 static void ext4_free_data_in_buddy(struct super_block *sb,
3145 				    struct ext4_free_data *entry)
3146 {
3147 	struct ext4_buddy e4b;
3148 	struct ext4_group_info *db;
3149 	int err, count = 0, count2 = 0;
3150 
3151 	mb_debug(sb, "gonna free %u blocks in group %u (0x%p):",
3152 		 entry->efd_count, entry->efd_group, entry);
3153 
3154 	err = ext4_mb_load_buddy(sb, entry->efd_group, &e4b);
3155 	/* we expect to find existing buddy because it's pinned */
3156 	BUG_ON(err != 0);
3157 
3158 	spin_lock(&EXT4_SB(sb)->s_md_lock);
3159 	EXT4_SB(sb)->s_mb_free_pending -= entry->efd_count;
3160 	spin_unlock(&EXT4_SB(sb)->s_md_lock);
3161 
3162 	db = e4b.bd_info;
3163 	/* there are blocks to put in buddy to make them really free */
3164 	count += entry->efd_count;
3165 	count2++;
3166 	ext4_lock_group(sb, entry->efd_group);
3167 	/* Take it out of per group rb tree */
3168 	rb_erase(&entry->efd_node, &(db->bb_free_root));
3169 	mb_free_blocks(NULL, &e4b, entry->efd_start_cluster, entry->efd_count);
3170 
3171 	/*
3172 	 * Clear the trimmed flag for the group so that the next
3173 	 * ext4_trim_fs can trim it.
3174 	 * If the volume is mounted with -o discard, online discard
3175 	 * is supported and the free blocks will be trimmed online.
3176 	 */
3177 	if (!test_opt(sb, DISCARD))
3178 		EXT4_MB_GRP_CLEAR_TRIMMED(db);
3179 
3180 	if (!db->bb_free_root.rb_node) {
3181 		/* No more items in the per group rb tree
3182 		 * balance refcounts from ext4_mb_free_metadata()
3183 		 */
3184 		put_page(e4b.bd_buddy_page);
3185 		put_page(e4b.bd_bitmap_page);
3186 	}
3187 	ext4_unlock_group(sb, entry->efd_group);
3188 	kmem_cache_free(ext4_free_data_cachep, entry);
3189 	ext4_mb_unload_buddy(&e4b);
3190 
3191 	mb_debug(sb, "freed %d blocks in %d structures\n", count,
3192 		 count2);
3193 }
3194 
3195 /*
3196  * This function is called by the jbd2 layer once the commit has finished,
3197  * so we know we can free the blocks that were released with that commit.
3198  */
ext4_process_freed_data(struct super_block *sb, tid_t commit_tid)3199 void ext4_process_freed_data(struct super_block *sb, tid_t commit_tid)
3200 {
3201 	struct ext4_sb_info *sbi = EXT4_SB(sb);
3202 	struct ext4_free_data *entry, *tmp;
3203 	struct bio *discard_bio = NULL;
3204 	struct list_head freed_data_list;
3205 	struct list_head *cut_pos = NULL;
3206 	int err;
3207 
3208 	INIT_LIST_HEAD(&freed_data_list);
3209 
3210 	spin_lock(&sbi->s_md_lock);
3211 	list_for_each_entry(entry, &sbi->s_freed_data_list, efd_list) {
3212 		if (entry->efd_tid != commit_tid)
3213 			break;
3214 		cut_pos = &entry->efd_list;
3215 	}
3216 	if (cut_pos)
3217 		list_cut_position(&freed_data_list, &sbi->s_freed_data_list,
3218 				  cut_pos);
3219 	spin_unlock(&sbi->s_md_lock);
3220 
3221 	if (test_opt(sb, DISCARD)) {
3222 		list_for_each_entry(entry, &freed_data_list, efd_list) {
3223 			err = ext4_issue_discard(sb, entry->efd_group,
3224 						 entry->efd_start_cluster,
3225 						 entry->efd_count,
3226 						 &discard_bio);
3227 			if (err && err != -EOPNOTSUPP) {
3228 				ext4_msg(sb, KERN_WARNING, "discard request in"
3229 					 " group:%d block:%d count:%d failed"
3230 					 " with %d", entry->efd_group,
3231 					 entry->efd_start_cluster,
3232 					 entry->efd_count, err);
3233 			} else if (err == -EOPNOTSUPP)
3234 				break;
3235 		}
3236 
3237 		if (discard_bio) {
3238 			submit_bio_wait(discard_bio);
3239 			bio_put(discard_bio);
3240 		}
3241 	}
3242 
3243 	list_for_each_entry_safe(entry, tmp, &freed_data_list, efd_list)
3244 		ext4_free_data_in_buddy(sb, entry);
3245 }
3246 
ext4_init_mballoc(void)3247 int __init ext4_init_mballoc(void)
3248 {
3249 	ext4_pspace_cachep = KMEM_CACHE(ext4_prealloc_space,
3250 					SLAB_RECLAIM_ACCOUNT);
3251 	if (ext4_pspace_cachep == NULL)
3252 		goto out;
3253 
3254 	ext4_ac_cachep = KMEM_CACHE(ext4_allocation_context,
3255 				    SLAB_RECLAIM_ACCOUNT);
3256 	if (ext4_ac_cachep == NULL)
3257 		goto out_pa_free;
3258 
3259 	ext4_free_data_cachep = KMEM_CACHE(ext4_free_data,
3260 					   SLAB_RECLAIM_ACCOUNT);
3261 	if (ext4_free_data_cachep == NULL)
3262 		goto out_ac_free;
3263 
3264 	return 0;
3265 
3266 out_ac_free:
3267 	kmem_cache_destroy(ext4_ac_cachep);
3268 out_pa_free:
3269 	kmem_cache_destroy(ext4_pspace_cachep);
3270 out:
3271 	return -ENOMEM;
3272 }
3273 
ext4_exit_mballoc(void)3274 void ext4_exit_mballoc(void)
3275 {
3276 	/*
3277 	 * Wait for completion of call_rcu()'s on ext4_pspace_cachep
3278 	 * before destroying the slab cache.
3279 	 */
3280 	rcu_barrier();
3281 	kmem_cache_destroy(ext4_pspace_cachep);
3282 	kmem_cache_destroy(ext4_ac_cachep);
3283 	kmem_cache_destroy(ext4_free_data_cachep);
3284 	ext4_groupinfo_destroy_slabs();
3285 }
3286 
3287 
3288 /*
3289  * Check quota and mark chosen space (ac->ac_b_ex) non-free in bitmaps
3290  * Returns 0 if success or error code
3291  */
3292 static noinline_for_stack int
ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac, handle_t *handle, unsigned int reserv_clstrs)3293 ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac,
3294 				handle_t *handle, unsigned int reserv_clstrs)
3295 {
3296 	struct buffer_head *bitmap_bh = NULL;
3297 	struct ext4_group_desc *gdp;
3298 	struct buffer_head *gdp_bh;
3299 	struct ext4_sb_info *sbi;
3300 	struct super_block *sb;
3301 	ext4_fsblk_t block;
3302 	int err, len;
3303 
3304 	BUG_ON(ac->ac_status != AC_STATUS_FOUND);
3305 	BUG_ON(ac->ac_b_ex.fe_len <= 0);
3306 
3307 	sb = ac->ac_sb;
3308 	sbi = EXT4_SB(sb);
3309 
3310 	bitmap_bh = ext4_read_block_bitmap(sb, ac->ac_b_ex.fe_group);
3311 	if (IS_ERR(bitmap_bh)) {
3312 		err = PTR_ERR(bitmap_bh);
3313 		bitmap_bh = NULL;
3314 		goto out_err;
3315 	}
3316 
3317 	BUFFER_TRACE(bitmap_bh, "getting write access");
3318 	err = ext4_journal_get_write_access(handle, bitmap_bh);
3319 	if (err)
3320 		goto out_err;
3321 
3322 	err = -EIO;
3323 	gdp = ext4_get_group_desc(sb, ac->ac_b_ex.fe_group, &gdp_bh);
3324 	if (!gdp)
3325 		goto out_err;
3326 
3327 	ext4_debug("using block group %u(%d)\n", ac->ac_b_ex.fe_group,
3328 			ext4_free_group_clusters(sb, gdp));
3329 
3330 	BUFFER_TRACE(gdp_bh, "get_write_access");
3331 	err = ext4_journal_get_write_access(handle, gdp_bh);
3332 	if (err)
3333 		goto out_err;
3334 
3335 	block = ext4_grp_offs_to_block(sb, &ac->ac_b_ex);
3336 
3337 	len = EXT4_C2B(sbi, ac->ac_b_ex.fe_len);
3338 	if (!ext4_inode_block_valid(ac->ac_inode, block, len)) {
3339 		ext4_error(sb, "Allocating blocks %llu-%llu which overlap "
3340 			   "fs metadata", block, block+len);
3341 		/* File system mounted not to panic on error
3342 		 * Fix the bitmap and return EFSCORRUPTED
3343 		 * We leak some of the blocks here.
3344 		 */
3345 		ext4_lock_group(sb, ac->ac_b_ex.fe_group);
3346 		ext4_set_bits(bitmap_bh->b_data, ac->ac_b_ex.fe_start,
3347 			      ac->ac_b_ex.fe_len);
3348 		ext4_unlock_group(sb, ac->ac_b_ex.fe_group);
3349 		err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
3350 		if (!err)
3351 			err = -EFSCORRUPTED;
3352 		goto out_err;
3353 	}
3354 
3355 	ext4_lock_group(sb, ac->ac_b_ex.fe_group);
3356 #ifdef AGGRESSIVE_CHECK
3357 	{
3358 		int i;
3359 		for (i = 0; i < ac->ac_b_ex.fe_len; i++) {
3360 			BUG_ON(mb_test_bit(ac->ac_b_ex.fe_start + i,
3361 						bitmap_bh->b_data));
3362 		}
3363 	}
3364 #endif
3365 	ext4_set_bits(bitmap_bh->b_data, ac->ac_b_ex.fe_start,
3366 		      ac->ac_b_ex.fe_len);
3367 	if (ext4_has_group_desc_csum(sb) &&
3368 	    (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) {
3369 		gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT);
3370 		ext4_free_group_clusters_set(sb, gdp,
3371 					     ext4_free_clusters_after_init(sb,
3372 						ac->ac_b_ex.fe_group, gdp));
3373 	}
3374 	len = ext4_free_group_clusters(sb, gdp) - ac->ac_b_ex.fe_len;
3375 	ext4_free_group_clusters_set(sb, gdp, len);
3376 	ext4_block_bitmap_csum_set(sb, ac->ac_b_ex.fe_group, gdp, bitmap_bh);
3377 	ext4_group_desc_csum_set(sb, ac->ac_b_ex.fe_group, gdp);
3378 
3379 	ext4_unlock_group(sb, ac->ac_b_ex.fe_group);
3380 	percpu_counter_sub(&sbi->s_freeclusters_counter, ac->ac_b_ex.fe_len);
3381 	/*
3382 	 * Now reduce the dirty block count also. Should not go negative
3383 	 */
3384 	if (!(ac->ac_flags & EXT4_MB_DELALLOC_RESERVED))
3385 		/* release all the reserved blocks if non delalloc */
3386 		percpu_counter_sub(&sbi->s_dirtyclusters_counter,
3387 				   reserv_clstrs);
3388 
3389 	if (sbi->s_log_groups_per_flex) {
3390 		ext4_group_t flex_group = ext4_flex_group(sbi,
3391 							  ac->ac_b_ex.fe_group);
3392 		atomic64_sub(ac->ac_b_ex.fe_len,
3393 			     &sbi_array_rcu_deref(sbi, s_flex_groups,
3394 						  flex_group)->free_clusters);
3395 	}
3396 
3397 	err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
3398 	if (err)
3399 		goto out_err;
3400 	err = ext4_handle_dirty_metadata(handle, NULL, gdp_bh);
3401 
3402 out_err:
3403 	brelse(bitmap_bh);
3404 	return err;
3405 }
3406 
3407 /*
3408  * Idempotent helper for Ext4 fast commit replay path to set the state of
3409  * blocks in bitmaps and update counters.
3410  */
ext4_mb_mark_bb(struct super_block *sb, ext4_fsblk_t block, int len, int state)3411 void ext4_mb_mark_bb(struct super_block *sb, ext4_fsblk_t block,
3412 			int len, int state)
3413 {
3414 	struct buffer_head *bitmap_bh = NULL;
3415 	struct ext4_group_desc *gdp;
3416 	struct buffer_head *gdp_bh;
3417 	struct ext4_sb_info *sbi = EXT4_SB(sb);
3418 	ext4_group_t group;
3419 	ext4_grpblk_t blkoff;
3420 	int i, err;
3421 	int already;
3422 	unsigned int clen, clen_changed, thisgrp_len;
3423 
3424 	while (len > 0) {
3425 		ext4_get_group_no_and_offset(sb, block, &group, &blkoff);
3426 
3427 		/*
3428 		 * Check to see if we are freeing blocks across a group
3429 		 * boundary.
3430 		 * In case of flex_bg, this can happen that (block, len) may
3431 		 * span across more than one group. In that case we need to
3432 		 * get the corresponding group metadata to work with.
3433 		 * For this we have goto again loop.
3434 		 */
3435 		thisgrp_len = min_t(unsigned int, (unsigned int)len,
3436 			EXT4_BLOCKS_PER_GROUP(sb) - EXT4_C2B(sbi, blkoff));
3437 		clen = EXT4_NUM_B2C(sbi, thisgrp_len);
3438 
3439 		bitmap_bh = ext4_read_block_bitmap(sb, group);
3440 		if (IS_ERR(bitmap_bh)) {
3441 			err = PTR_ERR(bitmap_bh);
3442 			bitmap_bh = NULL;
3443 			break;
3444 		}
3445 
3446 		err = -EIO;
3447 		gdp = ext4_get_group_desc(sb, group, &gdp_bh);
3448 		if (!gdp)
3449 			break;
3450 
3451 		ext4_lock_group(sb, group);
3452 		already = 0;
3453 		for (i = 0; i < clen; i++)
3454 			if (!mb_test_bit(blkoff + i, bitmap_bh->b_data) ==
3455 					 !state)
3456 				already++;
3457 
3458 		clen_changed = clen - already;
3459 		if (state)
3460 			ext4_set_bits(bitmap_bh->b_data, blkoff, clen);
3461 		else
3462 			mb_test_and_clear_bits(bitmap_bh->b_data, blkoff, clen);
3463 		if (ext4_has_group_desc_csum(sb) &&
3464 		    (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) {
3465 			gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT);
3466 			ext4_free_group_clusters_set(sb, gdp,
3467 			     ext4_free_clusters_after_init(sb, group, gdp));
3468 		}
3469 		if (state)
3470 			clen = ext4_free_group_clusters(sb, gdp) - clen_changed;
3471 		else
3472 			clen = ext4_free_group_clusters(sb, gdp) + clen_changed;
3473 
3474 		ext4_free_group_clusters_set(sb, gdp, clen);
3475 		ext4_block_bitmap_csum_set(sb, group, gdp, bitmap_bh);
3476 		ext4_group_desc_csum_set(sb, group, gdp);
3477 
3478 		ext4_unlock_group(sb, group);
3479 
3480 		if (sbi->s_log_groups_per_flex) {
3481 			ext4_group_t flex_group = ext4_flex_group(sbi, group);
3482 			struct flex_groups *fg = sbi_array_rcu_deref(sbi,
3483 						   s_flex_groups, flex_group);
3484 
3485 			if (state)
3486 				atomic64_sub(clen_changed, &fg->free_clusters);
3487 			else
3488 				atomic64_add(clen_changed, &fg->free_clusters);
3489 
3490 		}
3491 
3492 		err = ext4_handle_dirty_metadata(NULL, NULL, bitmap_bh);
3493 		if (err)
3494 			break;
3495 		sync_dirty_buffer(bitmap_bh);
3496 		err = ext4_handle_dirty_metadata(NULL, NULL, gdp_bh);
3497 		sync_dirty_buffer(gdp_bh);
3498 		if (err)
3499 			break;
3500 
3501 		block += thisgrp_len;
3502 		len -= thisgrp_len;
3503 		brelse(bitmap_bh);
3504 		BUG_ON(len < 0);
3505 	}
3506 
3507 	if (err)
3508 		brelse(bitmap_bh);
3509 }
3510 
3511 /*
3512  * here we normalize request for locality group
3513  * Group request are normalized to s_mb_group_prealloc, which goes to
3514  * s_strip if we set the same via mount option.
3515  * s_mb_group_prealloc can be configured via
3516  * /sys/fs/ext4/<partition>/mb_group_prealloc
3517  *
3518  * XXX: should we try to preallocate more than the group has now?
3519  */
ext4_mb_normalize_group_request(struct ext4_allocation_context *ac)3520 static void ext4_mb_normalize_group_request(struct ext4_allocation_context *ac)
3521 {
3522 	struct super_block *sb = ac->ac_sb;
3523 	struct ext4_locality_group *lg = ac->ac_lg;
3524 
3525 	BUG_ON(lg == NULL);
3526 	ac->ac_g_ex.fe_len = EXT4_SB(sb)->s_mb_group_prealloc;
3527 	mb_debug(sb, "goal %u blocks for locality group\n", ac->ac_g_ex.fe_len);
3528 }
3529 
3530 /*
3531  * Normalization means making request better in terms of
3532  * size and alignment
3533  */
3534 static noinline_for_stack void
ext4_mb_normalize_request(struct ext4_allocation_context *ac, struct ext4_allocation_request *ar)3535 ext4_mb_normalize_request(struct ext4_allocation_context *ac,
3536 				struct ext4_allocation_request *ar)
3537 {
3538 	struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
3539 	struct ext4_super_block *es = sbi->s_es;
3540 	int bsbits, max;
3541 	loff_t size, start_off, end;
3542 	loff_t orig_size __maybe_unused;
3543 	ext4_lblk_t start;
3544 	struct ext4_inode_info *ei = EXT4_I(ac->ac_inode);
3545 	struct ext4_prealloc_space *pa;
3546 
3547 	/* do normalize only data requests, metadata requests
3548 	   do not need preallocation */
3549 	if (!(ac->ac_flags & EXT4_MB_HINT_DATA))
3550 		return;
3551 
3552 	/* sometime caller may want exact blocks */
3553 	if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY))
3554 		return;
3555 
3556 	/* caller may indicate that preallocation isn't
3557 	 * required (it's a tail, for example) */
3558 	if (ac->ac_flags & EXT4_MB_HINT_NOPREALLOC)
3559 		return;
3560 
3561 	if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC) {
3562 		ext4_mb_normalize_group_request(ac);
3563 		return ;
3564 	}
3565 
3566 	bsbits = ac->ac_sb->s_blocksize_bits;
3567 
3568 	/* first, let's learn actual file size
3569 	 * given current request is allocated */
3570 	size = extent_logical_end(sbi, &ac->ac_o_ex);
3571 	size = size << bsbits;
3572 	if (size < i_size_read(ac->ac_inode))
3573 		size = i_size_read(ac->ac_inode);
3574 	orig_size = size;
3575 
3576 	/* max size of free chunks */
3577 	max = 2 << bsbits;
3578 
3579 #define NRL_CHECK_SIZE(req, size, max, chunk_size)	\
3580 		(req <= (size) || max <= (chunk_size))
3581 
3582 	/* first, try to predict filesize */
3583 	/* XXX: should this table be tunable? */
3584 	start_off = 0;
3585 	if (size <= 16 * 1024) {
3586 		size = 16 * 1024;
3587 	} else if (size <= 32 * 1024) {
3588 		size = 32 * 1024;
3589 	} else if (size <= 64 * 1024) {
3590 		size = 64 * 1024;
3591 	} else if (size <= 128 * 1024) {
3592 		size = 128 * 1024;
3593 	} else if (size <= 256 * 1024) {
3594 		size = 256 * 1024;
3595 	} else if (size <= 512 * 1024) {
3596 		size = 512 * 1024;
3597 	} else if (size <= 1024 * 1024) {
3598 		size = 1024 * 1024;
3599 	} else if (NRL_CHECK_SIZE(size, 4 * 1024 * 1024, max, 2 * 1024)) {
3600 		start_off = ((loff_t)ac->ac_o_ex.fe_logical >>
3601 						(21 - bsbits)) << 21;
3602 		size = 2 * 1024 * 1024;
3603 	} else if (NRL_CHECK_SIZE(size, 8 * 1024 * 1024, max, 4 * 1024)) {
3604 		start_off = ((loff_t)ac->ac_o_ex.fe_logical >>
3605 							(22 - bsbits)) << 22;
3606 		size = 4 * 1024 * 1024;
3607 	} else if (NRL_CHECK_SIZE(ac->ac_o_ex.fe_len,
3608 					(8<<20)>>bsbits, max, 8 * 1024)) {
3609 		start_off = ((loff_t)ac->ac_o_ex.fe_logical >>
3610 							(23 - bsbits)) << 23;
3611 		size = 8 * 1024 * 1024;
3612 	} else {
3613 		start_off = (loff_t) ac->ac_o_ex.fe_logical << bsbits;
3614 		size	  = (loff_t) EXT4_C2B(EXT4_SB(ac->ac_sb),
3615 					      ac->ac_o_ex.fe_len) << bsbits;
3616 	}
3617 	size = size >> bsbits;
3618 	start = start_off >> bsbits;
3619 
3620 	/*
3621 	 * For tiny groups (smaller than 8MB) the chosen allocation
3622 	 * alignment may be larger than group size. Make sure the
3623 	 * alignment does not move allocation to a different group which
3624 	 * makes mballoc fail assertions later.
3625 	 */
3626 	start = max(start, rounddown(ac->ac_o_ex.fe_logical,
3627 			(ext4_lblk_t)EXT4_BLOCKS_PER_GROUP(ac->ac_sb)));
3628 
3629 	/* avoid unnecessary preallocation that may trigger assertions */
3630 	if (start + size > EXT_MAX_BLOCKS)
3631 		size = EXT_MAX_BLOCKS - start;
3632 
3633 	/* don't cover already allocated blocks in selected range */
3634 	if (ar->pleft && start <= ar->lleft) {
3635 		size -= ar->lleft + 1 - start;
3636 		start = ar->lleft + 1;
3637 	}
3638 	if (ar->pright && start + size - 1 >= ar->lright)
3639 		size -= start + size - ar->lright;
3640 
3641 	/*
3642 	 * Trim allocation request for filesystems with artificially small
3643 	 * groups.
3644 	 */
3645 	if (size > EXT4_BLOCKS_PER_GROUP(ac->ac_sb))
3646 		size = EXT4_BLOCKS_PER_GROUP(ac->ac_sb);
3647 
3648 	end = start + size;
3649 
3650 	/* check we don't cross already preallocated blocks */
3651 	rcu_read_lock();
3652 	list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) {
3653 		loff_t pa_end;
3654 
3655 		if (pa->pa_deleted)
3656 			continue;
3657 		spin_lock(&pa->pa_lock);
3658 		if (pa->pa_deleted) {
3659 			spin_unlock(&pa->pa_lock);
3660 			continue;
3661 		}
3662 
3663 		pa_end = pa_logical_end(EXT4_SB(ac->ac_sb), pa);
3664 
3665 		/* PA must not overlap original request */
3666 		BUG_ON(!(ac->ac_o_ex.fe_logical >= pa_end ||
3667 			ac->ac_o_ex.fe_logical < pa->pa_lstart));
3668 
3669 		/* skip PAs this normalized request doesn't overlap with */
3670 		if (pa->pa_lstart >= end || pa_end <= start) {
3671 			spin_unlock(&pa->pa_lock);
3672 			continue;
3673 		}
3674 		BUG_ON(pa->pa_lstart <= start && pa_end >= end);
3675 
3676 		/* adjust start or end to be adjacent to this pa */
3677 		if (pa_end <= ac->ac_o_ex.fe_logical) {
3678 			BUG_ON(pa_end < start);
3679 			start = pa_end;
3680 		} else if (pa->pa_lstart > ac->ac_o_ex.fe_logical) {
3681 			BUG_ON(pa->pa_lstart > end);
3682 			end = pa->pa_lstart;
3683 		}
3684 		spin_unlock(&pa->pa_lock);
3685 	}
3686 	rcu_read_unlock();
3687 	size = end - start;
3688 
3689 	/* XXX: extra loop to check we really don't overlap preallocations */
3690 	rcu_read_lock();
3691 	list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) {
3692 		loff_t pa_end;
3693 
3694 		spin_lock(&pa->pa_lock);
3695 		if (pa->pa_deleted == 0) {
3696 			pa_end = pa_logical_end(EXT4_SB(ac->ac_sb), pa);
3697 			BUG_ON(!(start >= pa_end || end <= pa->pa_lstart));
3698 		}
3699 		spin_unlock(&pa->pa_lock);
3700 	}
3701 	rcu_read_unlock();
3702 
3703 	if (start + size <= ac->ac_o_ex.fe_logical &&
3704 			start > ac->ac_o_ex.fe_logical) {
3705 		ext4_msg(ac->ac_sb, KERN_ERR,
3706 			 "start %lu, size %lu, fe_logical %lu",
3707 			 (unsigned long) start, (unsigned long) size,
3708 			 (unsigned long) ac->ac_o_ex.fe_logical);
3709 		BUG();
3710 	}
3711 	BUG_ON(size <= 0 || size > EXT4_BLOCKS_PER_GROUP(ac->ac_sb));
3712 
3713 	/* now prepare goal request */
3714 
3715 	/* XXX: is it better to align blocks WRT to logical
3716 	 * placement or satisfy big request as is */
3717 	ac->ac_g_ex.fe_logical = start;
3718 	ac->ac_g_ex.fe_len = EXT4_NUM_B2C(sbi, size);
3719 
3720 	/* define goal start in order to merge */
3721 	if (ar->pright && (ar->lright == (start + size)) &&
3722 	    ar->pright >= size &&
3723 	    ar->pright - size >= le32_to_cpu(es->s_first_data_block)) {
3724 		/* merge to the right */
3725 		ext4_get_group_no_and_offset(ac->ac_sb, ar->pright - size,
3726 						&ac->ac_g_ex.fe_group,
3727 						&ac->ac_g_ex.fe_start);
3728 		ac->ac_flags |= EXT4_MB_HINT_TRY_GOAL;
3729 	}
3730 	if (ar->pleft && (ar->lleft + 1 == start) &&
3731 	    ar->pleft + 1 < ext4_blocks_count(es)) {
3732 		/* merge to the left */
3733 		ext4_get_group_no_and_offset(ac->ac_sb, ar->pleft + 1,
3734 						&ac->ac_g_ex.fe_group,
3735 						&ac->ac_g_ex.fe_start);
3736 		ac->ac_flags |= EXT4_MB_HINT_TRY_GOAL;
3737 	}
3738 
3739 	mb_debug(ac->ac_sb, "goal: %lld(was %lld) blocks at %u\n", size,
3740 		 orig_size, start);
3741 }
3742 
ext4_mb_collect_stats(struct ext4_allocation_context *ac)3743 static void ext4_mb_collect_stats(struct ext4_allocation_context *ac)
3744 {
3745 	struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
3746 
3747 	if (sbi->s_mb_stats && ac->ac_g_ex.fe_len >= 1) {
3748 		atomic_inc(&sbi->s_bal_reqs);
3749 		atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
3750 		if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len)
3751 			atomic_inc(&sbi->s_bal_success);
3752 		atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
3753 		atomic_add(ac->ac_groups_scanned, &sbi->s_bal_groups_scanned);
3754 		if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
3755 				ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
3756 			atomic_inc(&sbi->s_bal_goals);
3757 		if (ac->ac_found > sbi->s_mb_max_to_scan)
3758 			atomic_inc(&sbi->s_bal_breaks);
3759 	}
3760 
3761 	if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
3762 		trace_ext4_mballoc_alloc(ac);
3763 	else
3764 		trace_ext4_mballoc_prealloc(ac);
3765 }
3766 
3767 /*
3768  * Called on failure; free up any blocks from the inode PA for this
3769  * context.  We don't need this for MB_GROUP_PA because we only change
3770  * pa_free in ext4_mb_release_context(), but on failure, we've already
3771  * zeroed out ac->ac_b_ex.fe_len, so group_pa->pa_free is not changed.
3772  */
ext4_discard_allocated_blocks(struct ext4_allocation_context *ac)3773 static void ext4_discard_allocated_blocks(struct ext4_allocation_context *ac)
3774 {
3775 	struct ext4_prealloc_space *pa = ac->ac_pa;
3776 	struct ext4_buddy e4b;
3777 	int err;
3778 
3779 	if (pa == NULL) {
3780 		if (ac->ac_f_ex.fe_len == 0)
3781 			return;
3782 		err = ext4_mb_load_buddy(ac->ac_sb, ac->ac_f_ex.fe_group, &e4b);
3783 		if (err) {
3784 			/*
3785 			 * This should never happen since we pin the
3786 			 * pages in the ext4_allocation_context so
3787 			 * ext4_mb_load_buddy() should never fail.
3788 			 */
3789 			WARN(1, "mb_load_buddy failed (%d)", err);
3790 			return;
3791 		}
3792 		ext4_lock_group(ac->ac_sb, ac->ac_f_ex.fe_group);
3793 		mb_free_blocks(ac->ac_inode, &e4b, ac->ac_f_ex.fe_start,
3794 			       ac->ac_f_ex.fe_len);
3795 		ext4_unlock_group(ac->ac_sb, ac->ac_f_ex.fe_group);
3796 		ext4_mb_unload_buddy(&e4b);
3797 		return;
3798 	}
3799 	if (pa->pa_type == MB_INODE_PA)
3800 		pa->pa_free += ac->ac_b_ex.fe_len;
3801 }
3802 
3803 /*
3804  * use blocks preallocated to inode
3805  */
ext4_mb_use_inode_pa(struct ext4_allocation_context *ac, struct ext4_prealloc_space *pa)3806 static void ext4_mb_use_inode_pa(struct ext4_allocation_context *ac,
3807 				struct ext4_prealloc_space *pa)
3808 {
3809 	struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
3810 	ext4_fsblk_t start;
3811 	ext4_fsblk_t end;
3812 	int len;
3813 
3814 	/* found preallocated blocks, use them */
3815 	start = pa->pa_pstart + (ac->ac_o_ex.fe_logical - pa->pa_lstart);
3816 	end = min(pa->pa_pstart + EXT4_C2B(sbi, pa->pa_len),
3817 		  start + EXT4_C2B(sbi, ac->ac_o_ex.fe_len));
3818 	len = EXT4_NUM_B2C(sbi, end - start);
3819 	ext4_get_group_no_and_offset(ac->ac_sb, start, &ac->ac_b_ex.fe_group,
3820 					&ac->ac_b_ex.fe_start);
3821 	ac->ac_b_ex.fe_len = len;
3822 	ac->ac_status = AC_STATUS_FOUND;
3823 	ac->ac_pa = pa;
3824 
3825 	BUG_ON(start < pa->pa_pstart);
3826 	BUG_ON(end > pa->pa_pstart + EXT4_C2B(sbi, pa->pa_len));
3827 	BUG_ON(pa->pa_free < len);
3828 	BUG_ON(ac->ac_b_ex.fe_len <= 0);
3829 	pa->pa_free -= len;
3830 
3831 	mb_debug(ac->ac_sb, "use %llu/%d from inode pa %p\n", start, len, pa);
3832 }
3833 
3834 /*
3835  * use blocks preallocated to locality group
3836  */
ext4_mb_use_group_pa(struct ext4_allocation_context *ac, struct ext4_prealloc_space *pa)3837 static void ext4_mb_use_group_pa(struct ext4_allocation_context *ac,
3838 				struct ext4_prealloc_space *pa)
3839 {
3840 	unsigned int len = ac->ac_o_ex.fe_len;
3841 
3842 	ext4_get_group_no_and_offset(ac->ac_sb, pa->pa_pstart,
3843 					&ac->ac_b_ex.fe_group,
3844 					&ac->ac_b_ex.fe_start);
3845 	ac->ac_b_ex.fe_len = len;
3846 	ac->ac_status = AC_STATUS_FOUND;
3847 	ac->ac_pa = pa;
3848 
3849 	/* we don't correct pa_pstart or pa_plen here to avoid
3850 	 * possible race when the group is being loaded concurrently
3851 	 * instead we correct pa later, after blocks are marked
3852 	 * in on-disk bitmap -- see ext4_mb_release_context()
3853 	 * Other CPUs are prevented from allocating from this pa by lg_mutex
3854 	 */
3855 	mb_debug(ac->ac_sb, "use %u/%u from group pa %p\n",
3856 		 pa->pa_lstart-len, len, pa);
3857 }
3858 
3859 /*
3860  * Return the prealloc space that have minimal distance
3861  * from the goal block. @cpa is the prealloc
3862  * space that is having currently known minimal distance
3863  * from the goal block.
3864  */
3865 static struct ext4_prealloc_space *
ext4_mb_check_group_pa(ext4_fsblk_t goal_block, struct ext4_prealloc_space *pa, struct ext4_prealloc_space *cpa)3866 ext4_mb_check_group_pa(ext4_fsblk_t goal_block,
3867 			struct ext4_prealloc_space *pa,
3868 			struct ext4_prealloc_space *cpa)
3869 {
3870 	ext4_fsblk_t cur_distance, new_distance;
3871 
3872 	if (cpa == NULL) {
3873 		atomic_inc(&pa->pa_count);
3874 		return pa;
3875 	}
3876 	cur_distance = abs(goal_block - cpa->pa_pstart);
3877 	new_distance = abs(goal_block - pa->pa_pstart);
3878 
3879 	if (cur_distance <= new_distance)
3880 		return cpa;
3881 
3882 	/* drop the previous reference */
3883 	atomic_dec(&cpa->pa_count);
3884 	atomic_inc(&pa->pa_count);
3885 	return pa;
3886 }
3887 
3888 /*
3889  * search goal blocks in preallocated space
3890  */
3891 static noinline_for_stack bool
ext4_mb_use_preallocated(struct ext4_allocation_context *ac)3892 ext4_mb_use_preallocated(struct ext4_allocation_context *ac)
3893 {
3894 	struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
3895 	int order, i;
3896 	struct ext4_inode_info *ei = EXT4_I(ac->ac_inode);
3897 	struct ext4_locality_group *lg;
3898 	struct ext4_prealloc_space *pa, *cpa = NULL;
3899 	ext4_fsblk_t goal_block;
3900 
3901 	/* only data can be preallocated */
3902 	if (!(ac->ac_flags & EXT4_MB_HINT_DATA))
3903 		return false;
3904 
3905 	/* first, try per-file preallocation */
3906 	rcu_read_lock();
3907 	list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) {
3908 
3909 		/* all fields in this condition don't change,
3910 		 * so we can skip locking for them */
3911 		if (ac->ac_o_ex.fe_logical < pa->pa_lstart ||
3912 		    ac->ac_o_ex.fe_logical >= pa_logical_end(sbi, pa))
3913 			continue;
3914 
3915 		/* non-extent files can't have physical blocks past 2^32 */
3916 		if (!(ext4_test_inode_flag(ac->ac_inode, EXT4_INODE_EXTENTS)) &&
3917 		    (pa->pa_pstart + EXT4_C2B(sbi, pa->pa_len) >
3918 		     EXT4_MAX_BLOCK_FILE_PHYS))
3919 			continue;
3920 
3921 		/* found preallocated blocks, use them */
3922 		spin_lock(&pa->pa_lock);
3923 		if (pa->pa_deleted == 0 && pa->pa_free) {
3924 			atomic_inc(&pa->pa_count);
3925 			ext4_mb_use_inode_pa(ac, pa);
3926 			spin_unlock(&pa->pa_lock);
3927 			ac->ac_criteria = 10;
3928 			rcu_read_unlock();
3929 			return true;
3930 		}
3931 		spin_unlock(&pa->pa_lock);
3932 	}
3933 	rcu_read_unlock();
3934 
3935 	/* can we use group allocation? */
3936 	if (!(ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC))
3937 		return false;
3938 
3939 	/* inode may have no locality group for some reason */
3940 	lg = ac->ac_lg;
3941 	if (lg == NULL)
3942 		return false;
3943 	order  = fls(ac->ac_o_ex.fe_len) - 1;
3944 	if (order > PREALLOC_TB_SIZE - 1)
3945 		/* The max size of hash table is PREALLOC_TB_SIZE */
3946 		order = PREALLOC_TB_SIZE - 1;
3947 
3948 	goal_block = ext4_grp_offs_to_block(ac->ac_sb, &ac->ac_g_ex);
3949 	/*
3950 	 * search for the prealloc space that is having
3951 	 * minimal distance from the goal block.
3952 	 */
3953 	for (i = order; i < PREALLOC_TB_SIZE; i++) {
3954 		rcu_read_lock();
3955 		list_for_each_entry_rcu(pa, &lg->lg_prealloc_list[i],
3956 					pa_inode_list) {
3957 			spin_lock(&pa->pa_lock);
3958 			if (pa->pa_deleted == 0 &&
3959 					pa->pa_free >= ac->ac_o_ex.fe_len) {
3960 
3961 				cpa = ext4_mb_check_group_pa(goal_block,
3962 								pa, cpa);
3963 			}
3964 			spin_unlock(&pa->pa_lock);
3965 		}
3966 		rcu_read_unlock();
3967 	}
3968 	if (cpa) {
3969 		ext4_mb_use_group_pa(ac, cpa);
3970 		ac->ac_criteria = 20;
3971 		return true;
3972 	}
3973 	return false;
3974 }
3975 
3976 /*
3977  * the function goes through all block freed in the group
3978  * but not yet committed and marks them used in in-core bitmap.
3979  * buddy must be generated from this bitmap
3980  * Need to be called with the ext4 group lock held
3981  */
ext4_mb_generate_from_freelist(struct super_block *sb, void *bitmap, ext4_group_t group)3982 static void ext4_mb_generate_from_freelist(struct super_block *sb, void *bitmap,
3983 						ext4_group_t group)
3984 {
3985 	struct rb_node *n;
3986 	struct ext4_group_info *grp;
3987 	struct ext4_free_data *entry;
3988 
3989 	grp = ext4_get_group_info(sb, group);
3990 	if (!grp)
3991 		return;
3992 	n = rb_first(&(grp->bb_free_root));
3993 
3994 	while (n) {
3995 		entry = rb_entry(n, struct ext4_free_data, efd_node);
3996 		ext4_set_bits(bitmap, entry->efd_start_cluster, entry->efd_count);
3997 		n = rb_next(n);
3998 	}
3999 	return;
4000 }
4001 
4002 /*
4003  * the function goes through all preallocation in this group and marks them
4004  * used in in-core bitmap. buddy must be generated from this bitmap
4005  * Need to be called with ext4 group lock held
4006  */
4007 static noinline_for_stack
ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap, ext4_group_t group)4008 void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap,
4009 					ext4_group_t group)
4010 {
4011 	struct ext4_group_info *grp = ext4_get_group_info(sb, group);
4012 	struct ext4_prealloc_space *pa;
4013 	struct list_head *cur;
4014 	ext4_group_t groupnr;
4015 	ext4_grpblk_t start;
4016 	int preallocated = 0;
4017 	int len;
4018 
4019 	if (!grp)
4020 		return;
4021 
4022 	/* all form of preallocation discards first load group,
4023 	 * so the only competing code is preallocation use.
4024 	 * we don't need any locking here
4025 	 * notice we do NOT ignore preallocations with pa_deleted
4026 	 * otherwise we could leave used blocks available for
4027 	 * allocation in buddy when concurrent ext4_mb_put_pa()
4028 	 * is dropping preallocation
4029 	 */
4030 	list_for_each(cur, &grp->bb_prealloc_list) {
4031 		pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list);
4032 		spin_lock(&pa->pa_lock);
4033 		ext4_get_group_no_and_offset(sb, pa->pa_pstart,
4034 					     &groupnr, &start);
4035 		len = pa->pa_len;
4036 		spin_unlock(&pa->pa_lock);
4037 		if (unlikely(len == 0))
4038 			continue;
4039 		BUG_ON(groupnr != group);
4040 		ext4_set_bits(bitmap, start, len);
4041 		preallocated += len;
4042 	}
4043 	mb_debug(sb, "preallocated %d for group %u\n", preallocated, group);
4044 }
4045 
ext4_mb_mark_pa_deleted(struct super_block *sb, struct ext4_prealloc_space *pa)4046 static void ext4_mb_mark_pa_deleted(struct super_block *sb,
4047 				    struct ext4_prealloc_space *pa)
4048 {
4049 	struct ext4_inode_info *ei;
4050 
4051 	if (pa->pa_deleted) {
4052 		ext4_warning(sb, "deleted pa, type:%d, pblk:%llu, lblk:%u, len:%d\n",
4053 			     pa->pa_type, pa->pa_pstart, pa->pa_lstart,
4054 			     pa->pa_len);
4055 		return;
4056 	}
4057 
4058 	pa->pa_deleted = 1;
4059 
4060 	if (pa->pa_type == MB_INODE_PA) {
4061 		ei = EXT4_I(pa->pa_inode);
4062 		atomic_dec(&ei->i_prealloc_active);
4063 	}
4064 }
4065 
ext4_mb_pa_callback(struct rcu_head *head)4066 static void ext4_mb_pa_callback(struct rcu_head *head)
4067 {
4068 	struct ext4_prealloc_space *pa;
4069 	pa = container_of(head, struct ext4_prealloc_space, u.pa_rcu);
4070 
4071 	BUG_ON(atomic_read(&pa->pa_count));
4072 	BUG_ON(pa->pa_deleted == 0);
4073 	kmem_cache_free(ext4_pspace_cachep, pa);
4074 }
4075 
4076 /*
4077  * drops a reference to preallocated space descriptor
4078  * if this was the last reference and the space is consumed
4079  */
ext4_mb_put_pa(struct ext4_allocation_context *ac, struct super_block *sb, struct ext4_prealloc_space *pa)4080 static void ext4_mb_put_pa(struct ext4_allocation_context *ac,
4081 			struct super_block *sb, struct ext4_prealloc_space *pa)
4082 {
4083 	ext4_group_t grp;
4084 	ext4_fsblk_t grp_blk;
4085 
4086 	/* in this short window concurrent discard can set pa_deleted */
4087 	spin_lock(&pa->pa_lock);
4088 	if (!atomic_dec_and_test(&pa->pa_count) || pa->pa_free != 0) {
4089 		spin_unlock(&pa->pa_lock);
4090 		return;
4091 	}
4092 
4093 	if (pa->pa_deleted == 1) {
4094 		spin_unlock(&pa->pa_lock);
4095 		return;
4096 	}
4097 
4098 	ext4_mb_mark_pa_deleted(sb, pa);
4099 	spin_unlock(&pa->pa_lock);
4100 
4101 	grp_blk = pa->pa_pstart;
4102 	/*
4103 	 * If doing group-based preallocation, pa_pstart may be in the
4104 	 * next group when pa is used up
4105 	 */
4106 	if (pa->pa_type == MB_GROUP_PA)
4107 		grp_blk--;
4108 
4109 	grp = ext4_get_group_number(sb, grp_blk);
4110 
4111 	/*
4112 	 * possible race:
4113 	 *
4114 	 *  P1 (buddy init)			P2 (regular allocation)
4115 	 *					find block B in PA
4116 	 *  copy on-disk bitmap to buddy
4117 	 *  					mark B in on-disk bitmap
4118 	 *					drop PA from group
4119 	 *  mark all PAs in buddy
4120 	 *
4121 	 * thus, P1 initializes buddy with B available. to prevent this
4122 	 * we make "copy" and "mark all PAs" atomic and serialize "drop PA"
4123 	 * against that pair
4124 	 */
4125 	ext4_lock_group(sb, grp);
4126 	list_del(&pa->pa_group_list);
4127 	ext4_unlock_group(sb, grp);
4128 
4129 	spin_lock(pa->pa_obj_lock);
4130 	list_del_rcu(&pa->pa_inode_list);
4131 	spin_unlock(pa->pa_obj_lock);
4132 
4133 	call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
4134 }
4135 
4136 /*
4137  * creates new preallocated space for given inode
4138  */
4139 static noinline_for_stack void
ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)4140 ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
4141 {
4142 	struct super_block *sb = ac->ac_sb;
4143 	struct ext4_sb_info *sbi = EXT4_SB(sb);
4144 	struct ext4_prealloc_space *pa;
4145 	struct ext4_group_info *grp;
4146 	struct ext4_inode_info *ei;
4147 
4148 	/* preallocate only when found space is larger then requested */
4149 	BUG_ON(ac->ac_o_ex.fe_len >= ac->ac_b_ex.fe_len);
4150 	BUG_ON(ac->ac_status != AC_STATUS_FOUND);
4151 	BUG_ON(!S_ISREG(ac->ac_inode->i_mode));
4152 	BUG_ON(ac->ac_pa == NULL);
4153 
4154 	pa = ac->ac_pa;
4155 
4156 	if (ac->ac_b_ex.fe_len < ac->ac_g_ex.fe_len) {
4157 		struct ext4_free_extent ex = {
4158 			.fe_logical = ac->ac_g_ex.fe_logical,
4159 			.fe_len = ac->ac_g_ex.fe_len,
4160 		};
4161 		loff_t orig_goal_end = extent_logical_end(sbi, &ex);
4162 
4163 		/* we can't allocate as much as normalizer wants.
4164 		 * so, found space must get proper lstart
4165 		 * to cover original request */
4166 		BUG_ON(ac->ac_g_ex.fe_logical > ac->ac_o_ex.fe_logical);
4167 		BUG_ON(ac->ac_g_ex.fe_len < ac->ac_o_ex.fe_len);
4168 
4169 		/*
4170 		 * Use the below logic for adjusting best extent as it keeps
4171 		 * fragmentation in check while ensuring logical range of best
4172 		 * extent doesn't overflow out of goal extent:
4173 		 *
4174 		 * 1. Check if best ex can be kept at end of goal and still
4175 		 *    cover original start
4176 		 * 2. Else, check if best ex can be kept at start of goal and
4177 		 *    still cover original start
4178 		 * 3. Else, keep the best ex at start of original request.
4179 		 */
4180 		ex.fe_len = ac->ac_b_ex.fe_len;
4181 
4182 		ex.fe_logical = orig_goal_end - EXT4_C2B(sbi, ex.fe_len);
4183 		if (ac->ac_o_ex.fe_logical >= ex.fe_logical)
4184 			goto adjust_bex;
4185 
4186 		ex.fe_logical = ac->ac_g_ex.fe_logical;
4187 		if (ac->ac_o_ex.fe_logical < extent_logical_end(sbi, &ex))
4188 			goto adjust_bex;
4189 
4190 		ex.fe_logical = ac->ac_o_ex.fe_logical;
4191 adjust_bex:
4192 		ac->ac_b_ex.fe_logical = ex.fe_logical;
4193 
4194 		BUG_ON(ac->ac_o_ex.fe_logical < ac->ac_b_ex.fe_logical);
4195 		BUG_ON(ac->ac_o_ex.fe_len > ac->ac_b_ex.fe_len);
4196 		BUG_ON(extent_logical_end(sbi, &ex) > orig_goal_end);
4197 	}
4198 
4199 	/* preallocation can change ac_b_ex, thus we store actually
4200 	 * allocated blocks for history */
4201 	ac->ac_f_ex = ac->ac_b_ex;
4202 
4203 	pa->pa_lstart = ac->ac_b_ex.fe_logical;
4204 	pa->pa_pstart = ext4_grp_offs_to_block(sb, &ac->ac_b_ex);
4205 	pa->pa_len = ac->ac_b_ex.fe_len;
4206 	pa->pa_free = pa->pa_len;
4207 	spin_lock_init(&pa->pa_lock);
4208 	INIT_LIST_HEAD(&pa->pa_inode_list);
4209 	INIT_LIST_HEAD(&pa->pa_group_list);
4210 	pa->pa_deleted = 0;
4211 	pa->pa_type = MB_INODE_PA;
4212 
4213 	mb_debug(sb, "new inode pa %p: %llu/%d for %u\n", pa, pa->pa_pstart,
4214 		 pa->pa_len, pa->pa_lstart);
4215 	trace_ext4_mb_new_inode_pa(ac, pa);
4216 
4217 	ext4_mb_use_inode_pa(ac, pa);
4218 	atomic_add(pa->pa_free, &sbi->s_mb_preallocated);
4219 
4220 	ei = EXT4_I(ac->ac_inode);
4221 	grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
4222 	if (!grp)
4223 		return;
4224 
4225 	pa->pa_obj_lock = &ei->i_prealloc_lock;
4226 	pa->pa_inode = ac->ac_inode;
4227 
4228 	list_add(&pa->pa_group_list, &grp->bb_prealloc_list);
4229 
4230 	spin_lock(pa->pa_obj_lock);
4231 	list_add_rcu(&pa->pa_inode_list, &ei->i_prealloc_list);
4232 	spin_unlock(pa->pa_obj_lock);
4233 	atomic_inc(&ei->i_prealloc_active);
4234 }
4235 
4236 /*
4237  * creates new preallocated space for locality group inodes belongs to
4238  */
4239 static noinline_for_stack void
ext4_mb_new_group_pa(struct ext4_allocation_context *ac)4240 ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
4241 {
4242 	struct super_block *sb = ac->ac_sb;
4243 	struct ext4_locality_group *lg;
4244 	struct ext4_prealloc_space *pa;
4245 	struct ext4_group_info *grp;
4246 
4247 	/* preallocate only when found space is larger then requested */
4248 	BUG_ON(ac->ac_o_ex.fe_len >= ac->ac_b_ex.fe_len);
4249 	BUG_ON(ac->ac_status != AC_STATUS_FOUND);
4250 	BUG_ON(!S_ISREG(ac->ac_inode->i_mode));
4251 	BUG_ON(ac->ac_pa == NULL);
4252 
4253 	pa = ac->ac_pa;
4254 
4255 	/* preallocation can change ac_b_ex, thus we store actually
4256 	 * allocated blocks for history */
4257 	ac->ac_f_ex = ac->ac_b_ex;
4258 
4259 	pa->pa_pstart = ext4_grp_offs_to_block(sb, &ac->ac_b_ex);
4260 	pa->pa_lstart = pa->pa_pstart;
4261 	pa->pa_len = ac->ac_b_ex.fe_len;
4262 	pa->pa_free = pa->pa_len;
4263 	spin_lock_init(&pa->pa_lock);
4264 	INIT_LIST_HEAD(&pa->pa_inode_list);
4265 	INIT_LIST_HEAD(&pa->pa_group_list);
4266 	pa->pa_deleted = 0;
4267 	pa->pa_type = MB_GROUP_PA;
4268 
4269 	mb_debug(sb, "new group pa %p: %llu/%d for %u\n", pa, pa->pa_pstart,
4270 		 pa->pa_len, pa->pa_lstart);
4271 	trace_ext4_mb_new_group_pa(ac, pa);
4272 
4273 	ext4_mb_use_group_pa(ac, pa);
4274 	atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
4275 
4276 	grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
4277 	if (!grp)
4278 		return;
4279 	lg = ac->ac_lg;
4280 	BUG_ON(lg == NULL);
4281 
4282 	pa->pa_obj_lock = &lg->lg_prealloc_lock;
4283 	pa->pa_inode = NULL;
4284 
4285 	list_add(&pa->pa_group_list, &grp->bb_prealloc_list);
4286 
4287 	/*
4288 	 * We will later add the new pa to the right bucket
4289 	 * after updating the pa_free in ext4_mb_release_context
4290 	 */
4291 }
4292 
ext4_mb_new_preallocation(struct ext4_allocation_context *ac)4293 static void ext4_mb_new_preallocation(struct ext4_allocation_context *ac)
4294 {
4295 	if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC)
4296 		ext4_mb_new_group_pa(ac);
4297 	else
4298 		ext4_mb_new_inode_pa(ac);
4299 }
4300 
4301 /*
4302  * finds all unused blocks in on-disk bitmap, frees them in
4303  * in-core bitmap and buddy.
4304  * @pa must be unlinked from inode and group lists, so that
4305  * nobody else can find/use it.
4306  * the caller MUST hold group/inode locks.
4307  * TODO: optimize the case when there are no in-core structures yet
4308  */
4309 static noinline_for_stack int
ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh, struct ext4_prealloc_space *pa)4310 ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
4311 			struct ext4_prealloc_space *pa)
4312 {
4313 	struct super_block *sb = e4b->bd_sb;
4314 	struct ext4_sb_info *sbi = EXT4_SB(sb);
4315 	unsigned int end;
4316 	unsigned int next;
4317 	ext4_group_t group;
4318 	ext4_grpblk_t bit;
4319 	unsigned long long grp_blk_start;
4320 	int free = 0;
4321 
4322 	BUG_ON(pa->pa_deleted == 0);
4323 	ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
4324 	grp_blk_start = pa->pa_pstart - EXT4_C2B(sbi, bit);
4325 	BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
4326 	end = bit + pa->pa_len;
4327 
4328 	while (bit < end) {
4329 		bit = mb_find_next_zero_bit(bitmap_bh->b_data, end, bit);
4330 		if (bit >= end)
4331 			break;
4332 		next = mb_find_next_bit(bitmap_bh->b_data, end, bit);
4333 		mb_debug(sb, "free preallocated %u/%u in group %u\n",
4334 			 (unsigned) ext4_group_first_block_no(sb, group) + bit,
4335 			 (unsigned) next - bit, (unsigned) group);
4336 		free += next - bit;
4337 
4338 		trace_ext4_mballoc_discard(sb, NULL, group, bit, next - bit);
4339 		trace_ext4_mb_release_inode_pa(pa, (grp_blk_start +
4340 						    EXT4_C2B(sbi, bit)),
4341 					       next - bit);
4342 		mb_free_blocks(pa->pa_inode, e4b, bit, next - bit);
4343 		bit = next + 1;
4344 	}
4345 	if (free != pa->pa_free) {
4346 		ext4_msg(e4b->bd_sb, KERN_CRIT,
4347 			 "pa %p: logic %lu, phys. %lu, len %d",
4348 			 pa, (unsigned long) pa->pa_lstart,
4349 			 (unsigned long) pa->pa_pstart,
4350 			 pa->pa_len);
4351 		ext4_grp_locked_error(sb, group, 0, 0, "free %u, pa_free %u",
4352 					free, pa->pa_free);
4353 		/*
4354 		 * pa is already deleted so we use the value obtained
4355 		 * from the bitmap and continue.
4356 		 */
4357 	}
4358 	atomic_add(free, &sbi->s_mb_discarded);
4359 
4360 	return 0;
4361 }
4362 
4363 static noinline_for_stack int
ext4_mb_release_group_pa(struct ext4_buddy *e4b, struct ext4_prealloc_space *pa)4364 ext4_mb_release_group_pa(struct ext4_buddy *e4b,
4365 				struct ext4_prealloc_space *pa)
4366 {
4367 	struct super_block *sb = e4b->bd_sb;
4368 	ext4_group_t group;
4369 	ext4_grpblk_t bit;
4370 
4371 	trace_ext4_mb_release_group_pa(sb, pa);
4372 	BUG_ON(pa->pa_deleted == 0);
4373 	ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
4374 	if (unlikely(group != e4b->bd_group && pa->pa_len != 0)) {
4375 		ext4_warning(sb, "bad group: expected %u, group %u, pa_start %llu",
4376 			     e4b->bd_group, group, pa->pa_pstart);
4377 		return 0;
4378 	}
4379 	mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
4380 	atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
4381 	trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len);
4382 
4383 	return 0;
4384 }
4385 
4386 /*
4387  * releases all preallocations in given group
4388  *
4389  * first, we need to decide discard policy:
4390  * - when do we discard
4391  *   1) ENOSPC
4392  * - how many do we discard
4393  *   1) how many requested
4394  */
4395 static noinline_for_stack int
ext4_mb_discard_group_preallocations(struct super_block *sb, ext4_group_t group, int *busy)4396 ext4_mb_discard_group_preallocations(struct super_block *sb,
4397 				     ext4_group_t group, int *busy)
4398 {
4399 	struct ext4_group_info *grp = ext4_get_group_info(sb, group);
4400 	struct buffer_head *bitmap_bh = NULL;
4401 	struct ext4_prealloc_space *pa, *tmp;
4402 	struct list_head list;
4403 	struct ext4_buddy e4b;
4404 	int err;
4405 	int free = 0;
4406 
4407 	if (!grp)
4408 		return 0;
4409 	mb_debug(sb, "discard preallocation for group %u\n", group);
4410 	if (list_empty(&grp->bb_prealloc_list))
4411 		goto out_dbg;
4412 
4413 	bitmap_bh = ext4_read_block_bitmap(sb, group);
4414 	if (IS_ERR(bitmap_bh)) {
4415 		err = PTR_ERR(bitmap_bh);
4416 		ext4_error_err(sb, -err,
4417 			       "Error %d reading block bitmap for %u",
4418 			       err, group);
4419 		goto out_dbg;
4420 	}
4421 
4422 	err = ext4_mb_load_buddy(sb, group, &e4b);
4423 	if (err) {
4424 		ext4_warning(sb, "Error %d loading buddy information for %u",
4425 			     err, group);
4426 		put_bh(bitmap_bh);
4427 		goto out_dbg;
4428 	}
4429 
4430 	INIT_LIST_HEAD(&list);
4431 	ext4_lock_group(sb, group);
4432 	list_for_each_entry_safe(pa, tmp,
4433 				&grp->bb_prealloc_list, pa_group_list) {
4434 		spin_lock(&pa->pa_lock);
4435 		if (atomic_read(&pa->pa_count)) {
4436 			spin_unlock(&pa->pa_lock);
4437 			*busy = 1;
4438 			continue;
4439 		}
4440 		if (pa->pa_deleted) {
4441 			spin_unlock(&pa->pa_lock);
4442 			continue;
4443 		}
4444 
4445 		/* seems this one can be freed ... */
4446 		ext4_mb_mark_pa_deleted(sb, pa);
4447 
4448 		if (!free)
4449 			this_cpu_inc(discard_pa_seq);
4450 
4451 		/* we can trust pa_free ... */
4452 		free += pa->pa_free;
4453 
4454 		spin_unlock(&pa->pa_lock);
4455 
4456 		list_del(&pa->pa_group_list);
4457 		list_add(&pa->u.pa_tmp_list, &list);
4458 	}
4459 
4460 	/* now free all selected PAs */
4461 	list_for_each_entry_safe(pa, tmp, &list, u.pa_tmp_list) {
4462 
4463 		/* remove from object (inode or locality group) */
4464 		spin_lock(pa->pa_obj_lock);
4465 		list_del_rcu(&pa->pa_inode_list);
4466 		spin_unlock(pa->pa_obj_lock);
4467 
4468 		if (pa->pa_type == MB_GROUP_PA)
4469 			ext4_mb_release_group_pa(&e4b, pa);
4470 		else
4471 			ext4_mb_release_inode_pa(&e4b, bitmap_bh, pa);
4472 
4473 		list_del(&pa->u.pa_tmp_list);
4474 		call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
4475 	}
4476 
4477 	ext4_unlock_group(sb, group);
4478 	ext4_mb_unload_buddy(&e4b);
4479 	put_bh(bitmap_bh);
4480 out_dbg:
4481 	mb_debug(sb, "discarded (%d) blocks preallocated for group %u bb_free (%d)\n",
4482 		 free, group, grp->bb_free);
4483 	return free;
4484 }
4485 
4486 /*
4487  * releases all non-used preallocated blocks for given inode
4488  *
4489  * It's important to discard preallocations under i_data_sem
4490  * We don't want another block to be served from the prealloc
4491  * space when we are discarding the inode prealloc space.
4492  *
4493  * FIXME!! Make sure it is valid at all the call sites
4494  */
ext4_discard_preallocations(struct inode *inode, unsigned int needed)4495 void ext4_discard_preallocations(struct inode *inode, unsigned int needed)
4496 {
4497 	struct ext4_inode_info *ei = EXT4_I(inode);
4498 	struct super_block *sb = inode->i_sb;
4499 	struct buffer_head *bitmap_bh = NULL;
4500 	struct ext4_prealloc_space *pa, *tmp;
4501 	ext4_group_t group = 0;
4502 	struct list_head list;
4503 	struct ext4_buddy e4b;
4504 	int err;
4505 
4506 	if (!S_ISREG(inode->i_mode)) {
4507 		/*BUG_ON(!list_empty(&ei->i_prealloc_list));*/
4508 		return;
4509 	}
4510 
4511 	if (EXT4_SB(sb)->s_mount_state & EXT4_FC_REPLAY)
4512 		return;
4513 
4514 	mb_debug(sb, "discard preallocation for inode %lu\n",
4515 		 inode->i_ino);
4516 	trace_ext4_discard_preallocations(inode,
4517 			atomic_read(&ei->i_prealloc_active), needed);
4518 
4519 	INIT_LIST_HEAD(&list);
4520 
4521 	if (needed == 0)
4522 		needed = UINT_MAX;
4523 
4524 repeat:
4525 	/* first, collect all pa's in the inode */
4526 	spin_lock(&ei->i_prealloc_lock);
4527 	while (!list_empty(&ei->i_prealloc_list) && needed) {
4528 		pa = list_entry(ei->i_prealloc_list.prev,
4529 				struct ext4_prealloc_space, pa_inode_list);
4530 		BUG_ON(pa->pa_obj_lock != &ei->i_prealloc_lock);
4531 		spin_lock(&pa->pa_lock);
4532 		if (atomic_read(&pa->pa_count)) {
4533 			/* this shouldn't happen often - nobody should
4534 			 * use preallocation while we're discarding it */
4535 			spin_unlock(&pa->pa_lock);
4536 			spin_unlock(&ei->i_prealloc_lock);
4537 			ext4_msg(sb, KERN_ERR,
4538 				 "uh-oh! used pa while discarding");
4539 			WARN_ON(1);
4540 			schedule_timeout_uninterruptible(HZ);
4541 			goto repeat;
4542 
4543 		}
4544 		if (pa->pa_deleted == 0) {
4545 			ext4_mb_mark_pa_deleted(sb, pa);
4546 			spin_unlock(&pa->pa_lock);
4547 			list_del_rcu(&pa->pa_inode_list);
4548 			list_add(&pa->u.pa_tmp_list, &list);
4549 			needed--;
4550 			continue;
4551 		}
4552 
4553 		/* someone is deleting pa right now */
4554 		spin_unlock(&pa->pa_lock);
4555 		spin_unlock(&ei->i_prealloc_lock);
4556 
4557 		/* we have to wait here because pa_deleted
4558 		 * doesn't mean pa is already unlinked from
4559 		 * the list. as we might be called from
4560 		 * ->clear_inode() the inode will get freed
4561 		 * and concurrent thread which is unlinking
4562 		 * pa from inode's list may access already
4563 		 * freed memory, bad-bad-bad */
4564 
4565 		/* XXX: if this happens too often, we can
4566 		 * add a flag to force wait only in case
4567 		 * of ->clear_inode(), but not in case of
4568 		 * regular truncate */
4569 		schedule_timeout_uninterruptible(HZ);
4570 		goto repeat;
4571 	}
4572 	spin_unlock(&ei->i_prealloc_lock);
4573 
4574 	list_for_each_entry_safe(pa, tmp, &list, u.pa_tmp_list) {
4575 		BUG_ON(pa->pa_type != MB_INODE_PA);
4576 		group = ext4_get_group_number(sb, pa->pa_pstart);
4577 
4578 		err = ext4_mb_load_buddy_gfp(sb, group, &e4b,
4579 					     GFP_NOFS|__GFP_NOFAIL);
4580 		if (err) {
4581 			ext4_error_err(sb, -err, "Error %d loading buddy information for %u",
4582 				       err, group);
4583 			continue;
4584 		}
4585 
4586 		bitmap_bh = ext4_read_block_bitmap(sb, group);
4587 		if (IS_ERR(bitmap_bh)) {
4588 			err = PTR_ERR(bitmap_bh);
4589 			ext4_error_err(sb, -err, "Error %d reading block bitmap for %u",
4590 				       err, group);
4591 			ext4_mb_unload_buddy(&e4b);
4592 			continue;
4593 		}
4594 
4595 		ext4_lock_group(sb, group);
4596 		list_del(&pa->pa_group_list);
4597 		ext4_mb_release_inode_pa(&e4b, bitmap_bh, pa);
4598 		ext4_unlock_group(sb, group);
4599 
4600 		ext4_mb_unload_buddy(&e4b);
4601 		put_bh(bitmap_bh);
4602 
4603 		list_del(&pa->u.pa_tmp_list);
4604 		call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
4605 	}
4606 }
4607 
ext4_mb_pa_alloc(struct ext4_allocation_context *ac)4608 static int ext4_mb_pa_alloc(struct ext4_allocation_context *ac)
4609 {
4610 	struct ext4_prealloc_space *pa;
4611 
4612 	BUG_ON(ext4_pspace_cachep == NULL);
4613 	pa = kmem_cache_zalloc(ext4_pspace_cachep, GFP_NOFS);
4614 	if (!pa)
4615 		return -ENOMEM;
4616 	atomic_set(&pa->pa_count, 1);
4617 	ac->ac_pa = pa;
4618 	return 0;
4619 }
4620 
ext4_mb_pa_free(struct ext4_allocation_context *ac)4621 static void ext4_mb_pa_free(struct ext4_allocation_context *ac)
4622 {
4623 	struct ext4_prealloc_space *pa = ac->ac_pa;
4624 
4625 	BUG_ON(!pa);
4626 	ac->ac_pa = NULL;
4627 	WARN_ON(!atomic_dec_and_test(&pa->pa_count));
4628 	kmem_cache_free(ext4_pspace_cachep, pa);
4629 }
4630 
4631 #ifdef CONFIG_EXT4_DEBUG
ext4_mb_show_pa(struct super_block *sb)4632 static inline void ext4_mb_show_pa(struct super_block *sb)
4633 {
4634 	ext4_group_t i, ngroups;
4635 
4636 	if (ext4_test_mount_flag(sb, EXT4_MF_FS_ABORTED))
4637 		return;
4638 
4639 	ngroups = ext4_get_groups_count(sb);
4640 	mb_debug(sb, "groups: ");
4641 	for (i = 0; i < ngroups; i++) {
4642 		struct ext4_group_info *grp = ext4_get_group_info(sb, i);
4643 		struct ext4_prealloc_space *pa;
4644 		ext4_grpblk_t start;
4645 		struct list_head *cur;
4646 
4647 		if (!grp)
4648 			continue;
4649 		ext4_lock_group(sb, i);
4650 		list_for_each(cur, &grp->bb_prealloc_list) {
4651 			pa = list_entry(cur, struct ext4_prealloc_space,
4652 					pa_group_list);
4653 			spin_lock(&pa->pa_lock);
4654 			ext4_get_group_no_and_offset(sb, pa->pa_pstart,
4655 						     NULL, &start);
4656 			spin_unlock(&pa->pa_lock);
4657 			mb_debug(sb, "PA:%u:%d:%d\n", i, start,
4658 				 pa->pa_len);
4659 		}
4660 		ext4_unlock_group(sb, i);
4661 		mb_debug(sb, "%u: %d/%d\n", i, grp->bb_free,
4662 			 grp->bb_fragments);
4663 	}
4664 }
4665 
ext4_mb_show_ac(struct ext4_allocation_context *ac)4666 static void ext4_mb_show_ac(struct ext4_allocation_context *ac)
4667 {
4668 	struct super_block *sb = ac->ac_sb;
4669 
4670 	if (ext4_test_mount_flag(sb, EXT4_MF_FS_ABORTED))
4671 		return;
4672 
4673 	mb_debug(sb, "Can't allocate:"
4674 			" Allocation context details:");
4675 	mb_debug(sb, "status %u flags 0x%x",
4676 			ac->ac_status, ac->ac_flags);
4677 	mb_debug(sb, "orig %lu/%lu/%lu@%lu, "
4678 			"goal %lu/%lu/%lu@%lu, "
4679 			"best %lu/%lu/%lu@%lu cr %d",
4680 			(unsigned long)ac->ac_o_ex.fe_group,
4681 			(unsigned long)ac->ac_o_ex.fe_start,
4682 			(unsigned long)ac->ac_o_ex.fe_len,
4683 			(unsigned long)ac->ac_o_ex.fe_logical,
4684 			(unsigned long)ac->ac_g_ex.fe_group,
4685 			(unsigned long)ac->ac_g_ex.fe_start,
4686 			(unsigned long)ac->ac_g_ex.fe_len,
4687 			(unsigned long)ac->ac_g_ex.fe_logical,
4688 			(unsigned long)ac->ac_b_ex.fe_group,
4689 			(unsigned long)ac->ac_b_ex.fe_start,
4690 			(unsigned long)ac->ac_b_ex.fe_len,
4691 			(unsigned long)ac->ac_b_ex.fe_logical,
4692 			(int)ac->ac_criteria);
4693 	mb_debug(sb, "%u found", ac->ac_found);
4694 	ext4_mb_show_pa(sb);
4695 }
4696 #else
ext4_mb_show_pa(struct super_block *sb)4697 static inline void ext4_mb_show_pa(struct super_block *sb)
4698 {
4699 	return;
4700 }
ext4_mb_show_ac(struct ext4_allocation_context *ac)4701 static inline void ext4_mb_show_ac(struct ext4_allocation_context *ac)
4702 {
4703 	ext4_mb_show_pa(ac->ac_sb);
4704 	return;
4705 }
4706 #endif
4707 
4708 /*
4709  * We use locality group preallocation for small size file. The size of the
4710  * file is determined by the current size or the resulting size after
4711  * allocation which ever is larger
4712  *
4713  * One can tune this size via /sys/fs/ext4/<partition>/mb_stream_req
4714  */
ext4_mb_group_or_file(struct ext4_allocation_context *ac)4715 static void ext4_mb_group_or_file(struct ext4_allocation_context *ac)
4716 {
4717 	struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
4718 	int bsbits = ac->ac_sb->s_blocksize_bits;
4719 	loff_t size, isize;
4720 
4721 	if (!(ac->ac_flags & EXT4_MB_HINT_DATA))
4722 		return;
4723 
4724 	if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY))
4725 		return;
4726 
4727 	size = extent_logical_end(sbi, &ac->ac_o_ex);
4728 	isize = (i_size_read(ac->ac_inode) + ac->ac_sb->s_blocksize - 1)
4729 		>> bsbits;
4730 
4731 	if ((size == isize) && !ext4_fs_is_busy(sbi) &&
4732 	    !inode_is_open_for_write(ac->ac_inode)) {
4733 		ac->ac_flags |= EXT4_MB_HINT_NOPREALLOC;
4734 		return;
4735 	}
4736 
4737 	if (sbi->s_mb_group_prealloc <= 0) {
4738 		ac->ac_flags |= EXT4_MB_STREAM_ALLOC;
4739 		return;
4740 	}
4741 
4742 	/* don't use group allocation for large files */
4743 	size = max(size, isize);
4744 	if (size > sbi->s_mb_stream_request) {
4745 		ac->ac_flags |= EXT4_MB_STREAM_ALLOC;
4746 		return;
4747 	}
4748 
4749 	BUG_ON(ac->ac_lg != NULL);
4750 	/*
4751 	 * locality group prealloc space are per cpu. The reason for having
4752 	 * per cpu locality group is to reduce the contention between block
4753 	 * request from multiple CPUs.
4754 	 */
4755 	ac->ac_lg = raw_cpu_ptr(sbi->s_locality_groups);
4756 
4757 	/* we're going to use group allocation */
4758 	ac->ac_flags |= EXT4_MB_HINT_GROUP_ALLOC;
4759 
4760 	/* serialize all allocations in the group */
4761 	mutex_lock(&ac->ac_lg->lg_mutex);
4762 }
4763 
4764 static noinline_for_stack int
ext4_mb_initialize_context(struct ext4_allocation_context *ac, struct ext4_allocation_request *ar)4765 ext4_mb_initialize_context(struct ext4_allocation_context *ac,
4766 				struct ext4_allocation_request *ar)
4767 {
4768 	struct super_block *sb = ar->inode->i_sb;
4769 	struct ext4_sb_info *sbi = EXT4_SB(sb);
4770 	struct ext4_super_block *es = sbi->s_es;
4771 	ext4_group_t group;
4772 	unsigned int len;
4773 	ext4_fsblk_t goal;
4774 	ext4_grpblk_t block;
4775 
4776 	/* we can't allocate > group size */
4777 	len = ar->len;
4778 
4779 	/* just a dirty hack to filter too big requests  */
4780 	if (len >= EXT4_CLUSTERS_PER_GROUP(sb))
4781 		len = EXT4_CLUSTERS_PER_GROUP(sb);
4782 
4783 	/* start searching from the goal */
4784 	goal = ar->goal;
4785 	if (goal < le32_to_cpu(es->s_first_data_block) ||
4786 			goal >= ext4_blocks_count(es))
4787 		goal = le32_to_cpu(es->s_first_data_block);
4788 	ext4_get_group_no_and_offset(sb, goal, &group, &block);
4789 
4790 	/* set up allocation goals */
4791 	ac->ac_b_ex.fe_logical = EXT4_LBLK_CMASK(sbi, ar->logical);
4792 	ac->ac_status = AC_STATUS_CONTINUE;
4793 	ac->ac_sb = sb;
4794 	ac->ac_inode = ar->inode;
4795 	ac->ac_o_ex.fe_logical = ac->ac_b_ex.fe_logical;
4796 	ac->ac_o_ex.fe_group = group;
4797 	ac->ac_o_ex.fe_start = block;
4798 	ac->ac_o_ex.fe_len = len;
4799 	ac->ac_g_ex = ac->ac_o_ex;
4800 	ac->ac_flags = ar->flags;
4801 
4802 	/* we have to define context: we'll work with a file or
4803 	 * locality group. this is a policy, actually */
4804 	ext4_mb_group_or_file(ac);
4805 
4806 	mb_debug(sb, "init ac: %u blocks @ %u, goal %u, flags 0x%x, 2^%d, "
4807 			"left: %u/%u, right %u/%u to %swritable\n",
4808 			(unsigned) ar->len, (unsigned) ar->logical,
4809 			(unsigned) ar->goal, ac->ac_flags, ac->ac_2order,
4810 			(unsigned) ar->lleft, (unsigned) ar->pleft,
4811 			(unsigned) ar->lright, (unsigned) ar->pright,
4812 			inode_is_open_for_write(ar->inode) ? "" : "non-");
4813 	return 0;
4814 
4815 }
4816 
4817 static noinline_for_stack void
ext4_mb_discard_lg_preallocations(struct super_block *sb, struct ext4_locality_group *lg, int order, int total_entries)4818 ext4_mb_discard_lg_preallocations(struct super_block *sb,
4819 					struct ext4_locality_group *lg,
4820 					int order, int total_entries)
4821 {
4822 	ext4_group_t group = 0;
4823 	struct ext4_buddy e4b;
4824 	struct list_head discard_list;
4825 	struct ext4_prealloc_space *pa, *tmp;
4826 
4827 	mb_debug(sb, "discard locality group preallocation\n");
4828 
4829 	INIT_LIST_HEAD(&discard_list);
4830 
4831 	spin_lock(&lg->lg_prealloc_lock);
4832 	list_for_each_entry_rcu(pa, &lg->lg_prealloc_list[order],
4833 				pa_inode_list,
4834 				lockdep_is_held(&lg->lg_prealloc_lock)) {
4835 		spin_lock(&pa->pa_lock);
4836 		if (atomic_read(&pa->pa_count)) {
4837 			/*
4838 			 * This is the pa that we just used
4839 			 * for block allocation. So don't
4840 			 * free that
4841 			 */
4842 			spin_unlock(&pa->pa_lock);
4843 			continue;
4844 		}
4845 		if (pa->pa_deleted) {
4846 			spin_unlock(&pa->pa_lock);
4847 			continue;
4848 		}
4849 		/* only lg prealloc space */
4850 		BUG_ON(pa->pa_type != MB_GROUP_PA);
4851 
4852 		/* seems this one can be freed ... */
4853 		ext4_mb_mark_pa_deleted(sb, pa);
4854 		spin_unlock(&pa->pa_lock);
4855 
4856 		list_del_rcu(&pa->pa_inode_list);
4857 		list_add(&pa->u.pa_tmp_list, &discard_list);
4858 
4859 		total_entries--;
4860 		if (total_entries <= 5) {
4861 			/*
4862 			 * we want to keep only 5 entries
4863 			 * allowing it to grow to 8. This
4864 			 * mak sure we don't call discard
4865 			 * soon for this list.
4866 			 */
4867 			break;
4868 		}
4869 	}
4870 	spin_unlock(&lg->lg_prealloc_lock);
4871 
4872 	list_for_each_entry_safe(pa, tmp, &discard_list, u.pa_tmp_list) {
4873 		int err;
4874 
4875 		group = ext4_get_group_number(sb, pa->pa_pstart);
4876 		err = ext4_mb_load_buddy_gfp(sb, group, &e4b,
4877 					     GFP_NOFS|__GFP_NOFAIL);
4878 		if (err) {
4879 			ext4_error_err(sb, -err, "Error %d loading buddy information for %u",
4880 				       err, group);
4881 			continue;
4882 		}
4883 		ext4_lock_group(sb, group);
4884 		list_del(&pa->pa_group_list);
4885 		ext4_mb_release_group_pa(&e4b, pa);
4886 		ext4_unlock_group(sb, group);
4887 
4888 		ext4_mb_unload_buddy(&e4b);
4889 		list_del(&pa->u.pa_tmp_list);
4890 		call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
4891 	}
4892 }
4893 
4894 /*
4895  * We have incremented pa_count. So it cannot be freed at this
4896  * point. Also we hold lg_mutex. So no parallel allocation is
4897  * possible from this lg. That means pa_free cannot be updated.
4898  *
4899  * A parallel ext4_mb_discard_group_preallocations is possible.
4900  * which can cause the lg_prealloc_list to be updated.
4901  */
4902 
ext4_mb_add_n_trim(struct ext4_allocation_context *ac)4903 static void ext4_mb_add_n_trim(struct ext4_allocation_context *ac)
4904 {
4905 	int order, added = 0, lg_prealloc_count = 1;
4906 	struct super_block *sb = ac->ac_sb;
4907 	struct ext4_locality_group *lg = ac->ac_lg;
4908 	struct ext4_prealloc_space *tmp_pa, *pa = ac->ac_pa;
4909 
4910 	order = fls(pa->pa_free) - 1;
4911 	if (order > PREALLOC_TB_SIZE - 1)
4912 		/* The max size of hash table is PREALLOC_TB_SIZE */
4913 		order = PREALLOC_TB_SIZE - 1;
4914 	/* Add the prealloc space to lg */
4915 	spin_lock(&lg->lg_prealloc_lock);
4916 	list_for_each_entry_rcu(tmp_pa, &lg->lg_prealloc_list[order],
4917 				pa_inode_list,
4918 				lockdep_is_held(&lg->lg_prealloc_lock)) {
4919 		spin_lock(&tmp_pa->pa_lock);
4920 		if (tmp_pa->pa_deleted) {
4921 			spin_unlock(&tmp_pa->pa_lock);
4922 			continue;
4923 		}
4924 		if (!added && pa->pa_free < tmp_pa->pa_free) {
4925 			/* Add to the tail of the previous entry */
4926 			list_add_tail_rcu(&pa->pa_inode_list,
4927 						&tmp_pa->pa_inode_list);
4928 			added = 1;
4929 			/*
4930 			 * we want to count the total
4931 			 * number of entries in the list
4932 			 */
4933 		}
4934 		spin_unlock(&tmp_pa->pa_lock);
4935 		lg_prealloc_count++;
4936 	}
4937 	if (!added)
4938 		list_add_tail_rcu(&pa->pa_inode_list,
4939 					&lg->lg_prealloc_list[order]);
4940 	spin_unlock(&lg->lg_prealloc_lock);
4941 
4942 	/* Now trim the list to be not more than 8 elements */
4943 	if (lg_prealloc_count > 8) {
4944 		ext4_mb_discard_lg_preallocations(sb, lg,
4945 						  order, lg_prealloc_count);
4946 		return;
4947 	}
4948 	return ;
4949 }
4950 
4951 /*
4952  * if per-inode prealloc list is too long, trim some PA
4953  */
ext4_mb_trim_inode_pa(struct inode *inode)4954 static void ext4_mb_trim_inode_pa(struct inode *inode)
4955 {
4956 	struct ext4_inode_info *ei = EXT4_I(inode);
4957 	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
4958 	int count, delta;
4959 
4960 	count = atomic_read(&ei->i_prealloc_active);
4961 	delta = (sbi->s_mb_max_inode_prealloc >> 2) + 1;
4962 	if (count > sbi->s_mb_max_inode_prealloc + delta) {
4963 		count -= sbi->s_mb_max_inode_prealloc;
4964 		ext4_discard_preallocations(inode, count);
4965 	}
4966 }
4967 
4968 /*
4969  * release all resource we used in allocation
4970  */
ext4_mb_release_context(struct ext4_allocation_context *ac)4971 static int ext4_mb_release_context(struct ext4_allocation_context *ac)
4972 {
4973 	struct inode *inode = ac->ac_inode;
4974 	struct ext4_inode_info *ei = EXT4_I(inode);
4975 	struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
4976 	struct ext4_prealloc_space *pa = ac->ac_pa;
4977 	if (pa) {
4978 		if (pa->pa_type == MB_GROUP_PA) {
4979 			/* see comment in ext4_mb_use_group_pa() */
4980 			spin_lock(&pa->pa_lock);
4981 			pa->pa_pstart += EXT4_C2B(sbi, ac->ac_b_ex.fe_len);
4982 			pa->pa_lstart += EXT4_C2B(sbi, ac->ac_b_ex.fe_len);
4983 			pa->pa_free -= ac->ac_b_ex.fe_len;
4984 			pa->pa_len -= ac->ac_b_ex.fe_len;
4985 			spin_unlock(&pa->pa_lock);
4986 
4987 			/*
4988 			 * We want to add the pa to the right bucket.
4989 			 * Remove it from the list and while adding
4990 			 * make sure the list to which we are adding
4991 			 * doesn't grow big.
4992 			 */
4993 			if (likely(pa->pa_free)) {
4994 				spin_lock(pa->pa_obj_lock);
4995 				list_del_rcu(&pa->pa_inode_list);
4996 				spin_unlock(pa->pa_obj_lock);
4997 				ext4_mb_add_n_trim(ac);
4998 			}
4999 		}
5000 
5001 		if (pa->pa_type == MB_INODE_PA) {
5002 			/*
5003 			 * treat per-inode prealloc list as a lru list, then try
5004 			 * to trim the least recently used PA.
5005 			 */
5006 			spin_lock(pa->pa_obj_lock);
5007 			list_move(&pa->pa_inode_list, &ei->i_prealloc_list);
5008 			spin_unlock(pa->pa_obj_lock);
5009 		}
5010 
5011 		ext4_mb_put_pa(ac, ac->ac_sb, pa);
5012 	}
5013 	if (ac->ac_bitmap_page)
5014 		put_page(ac->ac_bitmap_page);
5015 	if (ac->ac_buddy_page)
5016 		put_page(ac->ac_buddy_page);
5017 	if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC)
5018 		mutex_unlock(&ac->ac_lg->lg_mutex);
5019 	ext4_mb_collect_stats(ac);
5020 	ext4_mb_trim_inode_pa(inode);
5021 	return 0;
5022 }
5023 
ext4_mb_discard_preallocations(struct super_block *sb, int needed)5024 static int ext4_mb_discard_preallocations(struct super_block *sb, int needed)
5025 {
5026 	ext4_group_t i, ngroups = ext4_get_groups_count(sb);
5027 	int ret;
5028 	int freed = 0, busy = 0;
5029 	int retry = 0;
5030 
5031 	trace_ext4_mb_discard_preallocations(sb, needed);
5032 
5033 	if (needed == 0)
5034 		needed = EXT4_CLUSTERS_PER_GROUP(sb) + 1;
5035  repeat:
5036 	for (i = 0; i < ngroups && needed > 0; i++) {
5037 		ret = ext4_mb_discard_group_preallocations(sb, i, &busy);
5038 		freed += ret;
5039 		needed -= ret;
5040 		cond_resched();
5041 	}
5042 
5043 	if (needed > 0 && busy && ++retry < 3) {
5044 		busy = 0;
5045 		goto repeat;
5046 	}
5047 
5048 	return freed;
5049 }
5050 
ext4_mb_discard_preallocations_should_retry(struct super_block *sb, struct ext4_allocation_context *ac, u64 *seq)5051 static bool ext4_mb_discard_preallocations_should_retry(struct super_block *sb,
5052 			struct ext4_allocation_context *ac, u64 *seq)
5053 {
5054 	int freed;
5055 	u64 seq_retry = 0;
5056 	bool ret = false;
5057 
5058 	freed = ext4_mb_discard_preallocations(sb, ac->ac_o_ex.fe_len);
5059 	if (freed) {
5060 		ret = true;
5061 		goto out_dbg;
5062 	}
5063 	seq_retry = ext4_get_discard_pa_seq_sum();
5064 	if (!(ac->ac_flags & EXT4_MB_STRICT_CHECK) || seq_retry != *seq) {
5065 		ac->ac_flags |= EXT4_MB_STRICT_CHECK;
5066 		*seq = seq_retry;
5067 		ret = true;
5068 	}
5069 
5070 out_dbg:
5071 	mb_debug(sb, "freed %d, retry ? %s\n", freed, ret ? "yes" : "no");
5072 	return ret;
5073 }
5074 
5075 static ext4_fsblk_t ext4_mb_new_blocks_simple(handle_t *handle,
5076 				struct ext4_allocation_request *ar, int *errp);
5077 
5078 /*
5079  * Main entry point into mballoc to allocate blocks
5080  * it tries to use preallocation first, then falls back
5081  * to usual allocation
5082  */
ext4_mb_new_blocks(handle_t *handle, struct ext4_allocation_request *ar, int *errp)5083 ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle,
5084 				struct ext4_allocation_request *ar, int *errp)
5085 {
5086 	struct ext4_allocation_context *ac = NULL;
5087 	struct ext4_sb_info *sbi;
5088 	struct super_block *sb;
5089 	ext4_fsblk_t block = 0;
5090 	unsigned int inquota = 0;
5091 	unsigned int reserv_clstrs = 0;
5092 	int retries = 0;
5093 	u64 seq;
5094 
5095 	might_sleep();
5096 	sb = ar->inode->i_sb;
5097 	sbi = EXT4_SB(sb);
5098 
5099 	trace_ext4_request_blocks(ar);
5100 	if (sbi->s_mount_state & EXT4_FC_REPLAY)
5101 		return ext4_mb_new_blocks_simple(handle, ar, errp);
5102 
5103 	/* Allow to use superuser reservation for quota file */
5104 	if (ext4_is_quota_file(ar->inode))
5105 		ar->flags |= EXT4_MB_USE_ROOT_BLOCKS;
5106 
5107 	if ((ar->flags & EXT4_MB_DELALLOC_RESERVED) == 0) {
5108 		/* Without delayed allocation we need to verify
5109 		 * there is enough free blocks to do block allocation
5110 		 * and verify allocation doesn't exceed the quota limits.
5111 		 */
5112 		while (ar->len &&
5113 			ext4_claim_free_clusters(sbi, ar->len, ar->flags)) {
5114 
5115 			/* let others to free the space */
5116 			cond_resched();
5117 			ar->len = ar->len >> 1;
5118 		}
5119 		if (!ar->len) {
5120 			ext4_mb_show_pa(sb);
5121 			*errp = -ENOSPC;
5122 			return 0;
5123 		}
5124 		reserv_clstrs = ar->len;
5125 		if (ar->flags & EXT4_MB_USE_ROOT_BLOCKS) {
5126 			dquot_alloc_block_nofail(ar->inode,
5127 						 EXT4_C2B(sbi, ar->len));
5128 		} else {
5129 			while (ar->len &&
5130 				dquot_alloc_block(ar->inode,
5131 						  EXT4_C2B(sbi, ar->len))) {
5132 
5133 				ar->flags |= EXT4_MB_HINT_NOPREALLOC;
5134 				ar->len--;
5135 			}
5136 		}
5137 		inquota = ar->len;
5138 		if (ar->len == 0) {
5139 			*errp = -EDQUOT;
5140 			goto out;
5141 		}
5142 	}
5143 
5144 	ac = kmem_cache_zalloc(ext4_ac_cachep, GFP_NOFS);
5145 	if (!ac) {
5146 		ar->len = 0;
5147 		*errp = -ENOMEM;
5148 		goto out;
5149 	}
5150 
5151 	*errp = ext4_mb_initialize_context(ac, ar);
5152 	if (*errp) {
5153 		ar->len = 0;
5154 		goto out;
5155 	}
5156 
5157 	ac->ac_op = EXT4_MB_HISTORY_PREALLOC;
5158 	seq = this_cpu_read(discard_pa_seq);
5159 	if (!ext4_mb_use_preallocated(ac)) {
5160 		ac->ac_op = EXT4_MB_HISTORY_ALLOC;
5161 		ext4_mb_normalize_request(ac, ar);
5162 
5163 		*errp = ext4_mb_pa_alloc(ac);
5164 		if (*errp)
5165 			goto errout;
5166 repeat:
5167 		/* allocate space in core */
5168 		*errp = ext4_mb_regular_allocator(ac);
5169 		/*
5170 		 * pa allocated above is added to grp->bb_prealloc_list only
5171 		 * when we were able to allocate some block i.e. when
5172 		 * ac->ac_status == AC_STATUS_FOUND.
5173 		 * And error from above mean ac->ac_status != AC_STATUS_FOUND
5174 		 * So we have to free this pa here itself.
5175 		 */
5176 		if (*errp) {
5177 			ext4_mb_pa_free(ac);
5178 			ext4_discard_allocated_blocks(ac);
5179 			goto errout;
5180 		}
5181 		if (ac->ac_status == AC_STATUS_FOUND &&
5182 			ac->ac_o_ex.fe_len >= ac->ac_f_ex.fe_len)
5183 			ext4_mb_pa_free(ac);
5184 	}
5185 	if (likely(ac->ac_status == AC_STATUS_FOUND)) {
5186 		*errp = ext4_mb_mark_diskspace_used(ac, handle, reserv_clstrs);
5187 		if (*errp) {
5188 			ext4_discard_allocated_blocks(ac);
5189 			goto errout;
5190 		} else {
5191 			block = ext4_grp_offs_to_block(sb, &ac->ac_b_ex);
5192 			ar->len = ac->ac_b_ex.fe_len;
5193 		}
5194 	} else {
5195 		if (++retries < 3 &&
5196 		    ext4_mb_discard_preallocations_should_retry(sb, ac, &seq))
5197 			goto repeat;
5198 		/*
5199 		 * If block allocation fails then the pa allocated above
5200 		 * needs to be freed here itself.
5201 		 */
5202 		ext4_mb_pa_free(ac);
5203 		*errp = -ENOSPC;
5204 	}
5205 
5206 errout:
5207 	if (*errp) {
5208 		ac->ac_b_ex.fe_len = 0;
5209 		ar->len = 0;
5210 		ext4_mb_show_ac(ac);
5211 	}
5212 	ext4_mb_release_context(ac);
5213 out:
5214 	if (ac)
5215 		kmem_cache_free(ext4_ac_cachep, ac);
5216 	if (inquota && ar->len < inquota)
5217 		dquot_free_block(ar->inode, EXT4_C2B(sbi, inquota - ar->len));
5218 	if (!ar->len) {
5219 		if ((ar->flags & EXT4_MB_DELALLOC_RESERVED) == 0)
5220 			/* release all the reserved blocks if non delalloc */
5221 			percpu_counter_sub(&sbi->s_dirtyclusters_counter,
5222 						reserv_clstrs);
5223 	}
5224 
5225 	trace_ext4_allocate_blocks(ar, (unsigned long long)block);
5226 
5227 	return block;
5228 }
5229 
5230 /*
5231  * We can merge two free data extents only if the physical blocks
5232  * are contiguous, AND the extents were freed by the same transaction,
5233  * AND the blocks are associated with the same group.
5234  */
ext4_try_merge_freed_extent(struct ext4_sb_info *sbi, struct ext4_free_data *entry, struct ext4_free_data *new_entry, struct rb_root *entry_rb_root)5235 static void ext4_try_merge_freed_extent(struct ext4_sb_info *sbi,
5236 					struct ext4_free_data *entry,
5237 					struct ext4_free_data *new_entry,
5238 					struct rb_root *entry_rb_root)
5239 {
5240 	if ((entry->efd_tid != new_entry->efd_tid) ||
5241 	    (entry->efd_group != new_entry->efd_group))
5242 		return;
5243 	if (entry->efd_start_cluster + entry->efd_count ==
5244 	    new_entry->efd_start_cluster) {
5245 		new_entry->efd_start_cluster = entry->efd_start_cluster;
5246 		new_entry->efd_count += entry->efd_count;
5247 	} else if (new_entry->efd_start_cluster + new_entry->efd_count ==
5248 		   entry->efd_start_cluster) {
5249 		new_entry->efd_count += entry->efd_count;
5250 	} else
5251 		return;
5252 	spin_lock(&sbi->s_md_lock);
5253 	list_del(&entry->efd_list);
5254 	spin_unlock(&sbi->s_md_lock);
5255 	rb_erase(&entry->efd_node, entry_rb_root);
5256 	kmem_cache_free(ext4_free_data_cachep, entry);
5257 }
5258 
5259 static noinline_for_stack int
ext4_mb_free_metadata(handle_t *handle, struct ext4_buddy *e4b, struct ext4_free_data *new_entry)5260 ext4_mb_free_metadata(handle_t *handle, struct ext4_buddy *e4b,
5261 		      struct ext4_free_data *new_entry)
5262 {
5263 	ext4_group_t group = e4b->bd_group;
5264 	ext4_grpblk_t cluster;
5265 	ext4_grpblk_t clusters = new_entry->efd_count;
5266 	struct ext4_free_data *entry;
5267 	struct ext4_group_info *db = e4b->bd_info;
5268 	struct super_block *sb = e4b->bd_sb;
5269 	struct ext4_sb_info *sbi = EXT4_SB(sb);
5270 	struct rb_node **n = &db->bb_free_root.rb_node, *node;
5271 	struct rb_node *parent = NULL, *new_node;
5272 
5273 	BUG_ON(!ext4_handle_valid(handle));
5274 	BUG_ON(e4b->bd_bitmap_page == NULL);
5275 	BUG_ON(e4b->bd_buddy_page == NULL);
5276 
5277 	new_node = &new_entry->efd_node;
5278 	cluster = new_entry->efd_start_cluster;
5279 
5280 	if (!*n) {
5281 		/* first free block exent. We need to
5282 		   protect buddy cache from being freed,
5283 		 * otherwise we'll refresh it from
5284 		 * on-disk bitmap and lose not-yet-available
5285 		 * blocks */
5286 		get_page(e4b->bd_buddy_page);
5287 		get_page(e4b->bd_bitmap_page);
5288 	}
5289 	while (*n) {
5290 		parent = *n;
5291 		entry = rb_entry(parent, struct ext4_free_data, efd_node);
5292 		if (cluster < entry->efd_start_cluster)
5293 			n = &(*n)->rb_left;
5294 		else if (cluster >= (entry->efd_start_cluster + entry->efd_count))
5295 			n = &(*n)->rb_right;
5296 		else {
5297 			ext4_grp_locked_error(sb, group, 0,
5298 				ext4_group_first_block_no(sb, group) +
5299 				EXT4_C2B(sbi, cluster),
5300 				"Block already on to-be-freed list");
5301 			kmem_cache_free(ext4_free_data_cachep, new_entry);
5302 			return 0;
5303 		}
5304 	}
5305 
5306 	rb_link_node(new_node, parent, n);
5307 	rb_insert_color(new_node, &db->bb_free_root);
5308 
5309 	/* Now try to see the extent can be merged to left and right */
5310 	node = rb_prev(new_node);
5311 	if (node) {
5312 		entry = rb_entry(node, struct ext4_free_data, efd_node);
5313 		ext4_try_merge_freed_extent(sbi, entry, new_entry,
5314 					    &(db->bb_free_root));
5315 	}
5316 
5317 	node = rb_next(new_node);
5318 	if (node) {
5319 		entry = rb_entry(node, struct ext4_free_data, efd_node);
5320 		ext4_try_merge_freed_extent(sbi, entry, new_entry,
5321 					    &(db->bb_free_root));
5322 	}
5323 
5324 	spin_lock(&sbi->s_md_lock);
5325 	list_add_tail(&new_entry->efd_list, &sbi->s_freed_data_list);
5326 	sbi->s_mb_free_pending += clusters;
5327 	spin_unlock(&sbi->s_md_lock);
5328 	return 0;
5329 }
5330 
5331 /*
5332  * Simple allocator for Ext4 fast commit replay path. It searches for blocks
5333  * linearly starting at the goal block and also excludes the blocks which
5334  * are going to be in use after fast commit replay.
5335  */
ext4_mb_new_blocks_simple(handle_t *handle, struct ext4_allocation_request *ar, int *errp)5336 static ext4_fsblk_t ext4_mb_new_blocks_simple(handle_t *handle,
5337 				struct ext4_allocation_request *ar, int *errp)
5338 {
5339 	struct buffer_head *bitmap_bh;
5340 	struct super_block *sb = ar->inode->i_sb;
5341 	ext4_group_t group;
5342 	ext4_grpblk_t blkoff;
5343 	ext4_grpblk_t max = EXT4_CLUSTERS_PER_GROUP(sb);
5344 	ext4_grpblk_t i = 0;
5345 	ext4_fsblk_t goal, block;
5346 	struct ext4_super_block *es = EXT4_SB(sb)->s_es;
5347 
5348 	goal = ar->goal;
5349 	if (goal < le32_to_cpu(es->s_first_data_block) ||
5350 			goal >= ext4_blocks_count(es))
5351 		goal = le32_to_cpu(es->s_first_data_block);
5352 
5353 	ar->len = 0;
5354 	ext4_get_group_no_and_offset(sb, goal, &group, &blkoff);
5355 	for (; group < ext4_get_groups_count(sb); group++) {
5356 		bitmap_bh = ext4_read_block_bitmap(sb, group);
5357 		if (IS_ERR(bitmap_bh)) {
5358 			*errp = PTR_ERR(bitmap_bh);
5359 			pr_warn("Failed to read block bitmap\n");
5360 			return 0;
5361 		}
5362 
5363 		ext4_get_group_no_and_offset(sb,
5364 			max(ext4_group_first_block_no(sb, group), goal),
5365 			NULL, &blkoff);
5366 		while (1) {
5367 			i = mb_find_next_zero_bit(bitmap_bh->b_data, max,
5368 						blkoff);
5369 			if (i >= max)
5370 				break;
5371 			if (ext4_fc_replay_check_excluded(sb,
5372 				ext4_group_first_block_no(sb, group) + i)) {
5373 				blkoff = i + 1;
5374 			} else
5375 				break;
5376 		}
5377 		brelse(bitmap_bh);
5378 		if (i < max)
5379 			break;
5380 	}
5381 
5382 	if (group >= ext4_get_groups_count(sb) || i >= max) {
5383 		*errp = -ENOSPC;
5384 		return 0;
5385 	}
5386 
5387 	block = ext4_group_first_block_no(sb, group) + i;
5388 	ext4_mb_mark_bb(sb, block, 1, 1);
5389 	ar->len = 1;
5390 
5391 	return block;
5392 }
5393 
ext4_free_blocks_simple(struct inode *inode, ext4_fsblk_t block, unsigned long count)5394 static void ext4_free_blocks_simple(struct inode *inode, ext4_fsblk_t block,
5395 					unsigned long count)
5396 {
5397 	struct buffer_head *bitmap_bh;
5398 	struct super_block *sb = inode->i_sb;
5399 	struct ext4_group_desc *gdp;
5400 	struct buffer_head *gdp_bh;
5401 	ext4_group_t group;
5402 	ext4_grpblk_t blkoff;
5403 	int already_freed = 0, err, i;
5404 
5405 	ext4_get_group_no_and_offset(sb, block, &group, &blkoff);
5406 	bitmap_bh = ext4_read_block_bitmap(sb, group);
5407 	if (IS_ERR(bitmap_bh)) {
5408 		err = PTR_ERR(bitmap_bh);
5409 		pr_warn("Failed to read block bitmap\n");
5410 		return;
5411 	}
5412 	gdp = ext4_get_group_desc(sb, group, &gdp_bh);
5413 	if (!gdp)
5414 		return;
5415 
5416 	for (i = 0; i < count; i++) {
5417 		if (!mb_test_bit(blkoff + i, bitmap_bh->b_data))
5418 			already_freed++;
5419 	}
5420 	mb_clear_bits(bitmap_bh->b_data, blkoff, count);
5421 	err = ext4_handle_dirty_metadata(NULL, NULL, bitmap_bh);
5422 	if (err)
5423 		return;
5424 	ext4_free_group_clusters_set(
5425 		sb, gdp, ext4_free_group_clusters(sb, gdp) +
5426 		count - already_freed);
5427 	ext4_block_bitmap_csum_set(sb, group, gdp, bitmap_bh);
5428 	ext4_group_desc_csum_set(sb, group, gdp);
5429 	ext4_handle_dirty_metadata(NULL, NULL, gdp_bh);
5430 	sync_dirty_buffer(bitmap_bh);
5431 	sync_dirty_buffer(gdp_bh);
5432 	brelse(bitmap_bh);
5433 }
5434 
5435 /**
5436  * ext4_mb_clear_bb() -- helper function for freeing blocks.
5437  *			Used by ext4_free_blocks()
5438  * @handle:		handle for this transaction
5439  * @inode:		inode
5440  * @bh:			optional buffer of the block to be freed
5441  * @block:		starting physical block to be freed
5442  * @count:		number of blocks to be freed
5443  * @flags:		flags used by ext4_free_blocks
5444  */
ext4_mb_clear_bb(handle_t *handle, struct inode *inode, ext4_fsblk_t block, unsigned long count, int flags)5445 static void ext4_mb_clear_bb(handle_t *handle, struct inode *inode,
5446 			       ext4_fsblk_t block, unsigned long count,
5447 			       int flags)
5448 {
5449 	struct buffer_head *bitmap_bh = NULL;
5450 	struct super_block *sb = inode->i_sb;
5451 	struct ext4_group_desc *gdp;
5452 	struct ext4_group_info *grp;
5453 	unsigned int overflow;
5454 	ext4_grpblk_t bit;
5455 	struct buffer_head *gd_bh;
5456 	ext4_group_t block_group;
5457 	struct ext4_sb_info *sbi;
5458 	struct ext4_buddy e4b;
5459 	unsigned int count_clusters;
5460 	int err = 0;
5461 	int ret;
5462 
5463 	sbi = EXT4_SB(sb);
5464 
5465 	if (!(flags & EXT4_FREE_BLOCKS_VALIDATED) &&
5466 	    !ext4_inode_block_valid(inode, block, count)) {
5467 		ext4_error(sb, "Freeing blocks in system zone - "
5468 			   "Block = %llu, count = %lu", block, count);
5469 		/* err = 0. ext4_std_error should be a no op */
5470 		goto error_return;
5471 	}
5472 	flags |= EXT4_FREE_BLOCKS_VALIDATED;
5473 
5474 do_more:
5475 	overflow = 0;
5476 	ext4_get_group_no_and_offset(sb, block, &block_group, &bit);
5477 
5478 	grp = ext4_get_group_info(sb, block_group);
5479 	if (unlikely(!grp || EXT4_MB_GRP_BBITMAP_CORRUPT(grp)))
5480 		return;
5481 
5482 	/*
5483 	 * Check to see if we are freeing blocks across a group
5484 	 * boundary.
5485 	 */
5486 	if (EXT4_C2B(sbi, bit) + count > EXT4_BLOCKS_PER_GROUP(sb)) {
5487 		overflow = EXT4_C2B(sbi, bit) + count -
5488 			EXT4_BLOCKS_PER_GROUP(sb);
5489 		count -= overflow;
5490 		/* The range changed so it's no longer validated */
5491 		flags &= ~EXT4_FREE_BLOCKS_VALIDATED;
5492 	}
5493 	count_clusters = EXT4_NUM_B2C(sbi, count);
5494 	bitmap_bh = ext4_read_block_bitmap(sb, block_group);
5495 	if (IS_ERR(bitmap_bh)) {
5496 		err = PTR_ERR(bitmap_bh);
5497 		bitmap_bh = NULL;
5498 		goto error_return;
5499 	}
5500 	gdp = ext4_get_group_desc(sb, block_group, &gd_bh);
5501 	if (!gdp) {
5502 		err = -EIO;
5503 		goto error_return;
5504 	}
5505 
5506 	if (!(flags & EXT4_FREE_BLOCKS_VALIDATED) &&
5507 	    !ext4_inode_block_valid(inode, block, count)) {
5508 		ext4_error(sb, "Freeing blocks in system zone - "
5509 			   "Block = %llu, count = %lu", block, count);
5510 		/* err = 0. ext4_std_error should be a no op */
5511 		goto error_return;
5512 	}
5513 
5514 	BUFFER_TRACE(bitmap_bh, "getting write access");
5515 	err = ext4_journal_get_write_access(handle, bitmap_bh);
5516 	if (err)
5517 		goto error_return;
5518 
5519 	/*
5520 	 * We are about to modify some metadata.  Call the journal APIs
5521 	 * to unshare ->b_data if a currently-committing transaction is
5522 	 * using it
5523 	 */
5524 	BUFFER_TRACE(gd_bh, "get_write_access");
5525 	err = ext4_journal_get_write_access(handle, gd_bh);
5526 	if (err)
5527 		goto error_return;
5528 #ifdef AGGRESSIVE_CHECK
5529 	{
5530 		int i;
5531 		for (i = 0; i < count_clusters; i++)
5532 			BUG_ON(!mb_test_bit(bit + i, bitmap_bh->b_data));
5533 	}
5534 #endif
5535 	trace_ext4_mballoc_free(sb, inode, block_group, bit, count_clusters);
5536 
5537 	/* __GFP_NOFAIL: retry infinitely, ignore TIF_MEMDIE and memcg limit. */
5538 	err = ext4_mb_load_buddy_gfp(sb, block_group, &e4b,
5539 				     GFP_NOFS|__GFP_NOFAIL);
5540 	if (err)
5541 		goto error_return;
5542 
5543 	/*
5544 	 * We need to make sure we don't reuse the freed block until after the
5545 	 * transaction is committed. We make an exception if the inode is to be
5546 	 * written in writeback mode since writeback mode has weak data
5547 	 * consistency guarantees.
5548 	 */
5549 	if (ext4_handle_valid(handle) &&
5550 	    ((flags & EXT4_FREE_BLOCKS_METADATA) ||
5551 	     !ext4_should_writeback_data(inode))) {
5552 		struct ext4_free_data *new_entry;
5553 		/*
5554 		 * We use __GFP_NOFAIL because ext4_free_blocks() is not allowed
5555 		 * to fail.
5556 		 */
5557 		new_entry = kmem_cache_alloc(ext4_free_data_cachep,
5558 				GFP_NOFS|__GFP_NOFAIL);
5559 		new_entry->efd_start_cluster = bit;
5560 		new_entry->efd_group = block_group;
5561 		new_entry->efd_count = count_clusters;
5562 		new_entry->efd_tid = handle->h_transaction->t_tid;
5563 
5564 		ext4_lock_group(sb, block_group);
5565 		mb_clear_bits(bitmap_bh->b_data, bit, count_clusters);
5566 		ext4_mb_free_metadata(handle, &e4b, new_entry);
5567 	} else {
5568 		/* need to update group_info->bb_free and bitmap
5569 		 * with group lock held. generate_buddy look at
5570 		 * them with group lock_held
5571 		 */
5572 		if (test_opt(sb, DISCARD)) {
5573 			err = ext4_issue_discard(sb, block_group, bit,
5574 						 count_clusters, NULL);
5575 			if (err && err != -EOPNOTSUPP)
5576 				ext4_msg(sb, KERN_WARNING, "discard request in"
5577 					 " group:%u block:%d count:%lu failed"
5578 					 " with %d", block_group, bit, count,
5579 					 err);
5580 		} else
5581 			EXT4_MB_GRP_CLEAR_TRIMMED(e4b.bd_info);
5582 
5583 		ext4_lock_group(sb, block_group);
5584 		mb_clear_bits(bitmap_bh->b_data, bit, count_clusters);
5585 		mb_free_blocks(inode, &e4b, bit, count_clusters);
5586 	}
5587 
5588 	ret = ext4_free_group_clusters(sb, gdp) + count_clusters;
5589 	ext4_free_group_clusters_set(sb, gdp, ret);
5590 	ext4_block_bitmap_csum_set(sb, block_group, gdp, bitmap_bh);
5591 	ext4_group_desc_csum_set(sb, block_group, gdp);
5592 	ext4_unlock_group(sb, block_group);
5593 
5594 	if (sbi->s_log_groups_per_flex) {
5595 		ext4_group_t flex_group = ext4_flex_group(sbi, block_group);
5596 		atomic64_add(count_clusters,
5597 			     &sbi_array_rcu_deref(sbi, s_flex_groups,
5598 						  flex_group)->free_clusters);
5599 	}
5600 
5601 	/*
5602 	 * on a bigalloc file system, defer the s_freeclusters_counter
5603 	 * update to the caller (ext4_remove_space and friends) so they
5604 	 * can determine if a cluster freed here should be rereserved
5605 	 */
5606 	if (!(flags & EXT4_FREE_BLOCKS_RERESERVE_CLUSTER)) {
5607 		if (!(flags & EXT4_FREE_BLOCKS_NO_QUOT_UPDATE))
5608 			dquot_free_block(inode, EXT4_C2B(sbi, count_clusters));
5609 		percpu_counter_add(&sbi->s_freeclusters_counter,
5610 				   count_clusters);
5611 	}
5612 
5613 	ext4_mb_unload_buddy(&e4b);
5614 
5615 	/* We dirtied the bitmap block */
5616 	BUFFER_TRACE(bitmap_bh, "dirtied bitmap block");
5617 	err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
5618 
5619 	/* And the group descriptor block */
5620 	BUFFER_TRACE(gd_bh, "dirtied group descriptor block");
5621 	ret = ext4_handle_dirty_metadata(handle, NULL, gd_bh);
5622 	if (!err)
5623 		err = ret;
5624 
5625 	if (overflow && !err) {
5626 		block += count;
5627 		count = overflow;
5628 		put_bh(bitmap_bh);
5629 		/* The range changed so it's no longer validated */
5630 		flags &= ~EXT4_FREE_BLOCKS_VALIDATED;
5631 		goto do_more;
5632 	}
5633 error_return:
5634 	brelse(bitmap_bh);
5635 	ext4_std_error(sb, err);
5636 	return;
5637 }
5638 
5639 /**
5640  * ext4_free_blocks() -- Free given blocks and update quota
5641  * @handle:		handle for this transaction
5642  * @inode:		inode
5643  * @bh:			optional buffer of the block to be freed
5644  * @block:		starting physical block to be freed
5645  * @count:		number of blocks to be freed
5646  * @flags:		flags used by ext4_free_blocks
5647  */
ext4_free_blocks(handle_t *handle, struct inode *inode, struct buffer_head *bh, ext4_fsblk_t block, unsigned long count, int flags)5648 void ext4_free_blocks(handle_t *handle, struct inode *inode,
5649 		      struct buffer_head *bh, ext4_fsblk_t block,
5650 		      unsigned long count, int flags)
5651 {
5652 	struct super_block *sb = inode->i_sb;
5653 	unsigned int overflow;
5654 	struct ext4_sb_info *sbi;
5655 
5656 	sbi = EXT4_SB(sb);
5657 
5658 	if (bh) {
5659 		if (block)
5660 			BUG_ON(block != bh->b_blocknr);
5661 		else
5662 			block = bh->b_blocknr;
5663 	}
5664 
5665 	if (sbi->s_mount_state & EXT4_FC_REPLAY) {
5666 		ext4_free_blocks_simple(inode, block, EXT4_NUM_B2C(sbi, count));
5667 		return;
5668 	}
5669 
5670 	might_sleep();
5671 
5672 	if (!(flags & EXT4_FREE_BLOCKS_VALIDATED) &&
5673 	    !ext4_inode_block_valid(inode, block, count)) {
5674 		ext4_error(sb, "Freeing blocks not in datazone - "
5675 			   "block = %llu, count = %lu", block, count);
5676 		return;
5677 	}
5678 	flags |= EXT4_FREE_BLOCKS_VALIDATED;
5679 
5680 	ext4_debug("freeing block %llu\n", block);
5681 	trace_ext4_free_blocks(inode, block, count, flags);
5682 
5683 	if (bh && (flags & EXT4_FREE_BLOCKS_FORGET)) {
5684 		BUG_ON(count > 1);
5685 
5686 		ext4_forget(handle, flags & EXT4_FREE_BLOCKS_METADATA,
5687 			    inode, bh, block);
5688 	}
5689 
5690 	/*
5691 	 * If the extent to be freed does not begin on a cluster
5692 	 * boundary, we need to deal with partial clusters at the
5693 	 * beginning and end of the extent.  Normally we will free
5694 	 * blocks at the beginning or the end unless we are explicitly
5695 	 * requested to avoid doing so.
5696 	 */
5697 	overflow = EXT4_PBLK_COFF(sbi, block);
5698 	if (overflow) {
5699 		if (flags & EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER) {
5700 			overflow = sbi->s_cluster_ratio - overflow;
5701 			block += overflow;
5702 			if (count > overflow)
5703 				count -= overflow;
5704 			else
5705 				return;
5706 		} else {
5707 			block -= overflow;
5708 			count += overflow;
5709 		}
5710 		/* The range changed so it's no longer validated */
5711 		flags &= ~EXT4_FREE_BLOCKS_VALIDATED;
5712 	}
5713 	overflow = EXT4_LBLK_COFF(sbi, count);
5714 	if (overflow) {
5715 		if (flags & EXT4_FREE_BLOCKS_NOFREE_LAST_CLUSTER) {
5716 			if (count > overflow)
5717 				count -= overflow;
5718 			else
5719 				return;
5720 		} else
5721 			count += sbi->s_cluster_ratio - overflow;
5722 		/* The range changed so it's no longer validated */
5723 		flags &= ~EXT4_FREE_BLOCKS_VALIDATED;
5724 	}
5725 
5726 	if (!bh && (flags & EXT4_FREE_BLOCKS_FORGET)) {
5727 		int i;
5728 		int is_metadata = flags & EXT4_FREE_BLOCKS_METADATA;
5729 
5730 		for (i = 0; i < count; i++) {
5731 			cond_resched();
5732 			if (is_metadata)
5733 				bh = sb_find_get_block(inode->i_sb, block + i);
5734 			ext4_forget(handle, is_metadata, inode, bh, block + i);
5735 		}
5736 	}
5737 
5738 	ext4_mb_clear_bb(handle, inode, block, count, flags);
5739 	return;
5740 }
5741 
5742 /**
5743  * ext4_group_add_blocks() -- Add given blocks to an existing group
5744  * @handle:			handle to this transaction
5745  * @sb:				super block
5746  * @block:			start physical block to add to the block group
5747  * @count:			number of blocks to free
5748  *
5749  * This marks the blocks as free in the bitmap and buddy.
5750  */
ext4_group_add_blocks(handle_t *handle, struct super_block *sb, ext4_fsblk_t block, unsigned long count)5751 int ext4_group_add_blocks(handle_t *handle, struct super_block *sb,
5752 			 ext4_fsblk_t block, unsigned long count)
5753 {
5754 	struct buffer_head *bitmap_bh = NULL;
5755 	struct buffer_head *gd_bh;
5756 	ext4_group_t block_group;
5757 	ext4_grpblk_t bit;
5758 	unsigned int i;
5759 	struct ext4_group_desc *desc;
5760 	struct ext4_sb_info *sbi = EXT4_SB(sb);
5761 	struct ext4_buddy e4b;
5762 	int err = 0, ret, free_clusters_count;
5763 	ext4_grpblk_t clusters_freed;
5764 	ext4_fsblk_t first_cluster = EXT4_B2C(sbi, block);
5765 	ext4_fsblk_t last_cluster = EXT4_B2C(sbi, block + count - 1);
5766 	unsigned long cluster_count = last_cluster - first_cluster + 1;
5767 
5768 	ext4_debug("Adding block(s) %llu-%llu\n", block, block + count - 1);
5769 
5770 	if (count == 0)
5771 		return 0;
5772 
5773 	ext4_get_group_no_and_offset(sb, block, &block_group, &bit);
5774 	/*
5775 	 * Check to see if we are freeing blocks across a group
5776 	 * boundary.
5777 	 */
5778 	if (bit + cluster_count > EXT4_CLUSTERS_PER_GROUP(sb)) {
5779 		ext4_warning(sb, "too many blocks added to group %u",
5780 			     block_group);
5781 		err = -EINVAL;
5782 		goto error_return;
5783 	}
5784 
5785 	bitmap_bh = ext4_read_block_bitmap(sb, block_group);
5786 	if (IS_ERR(bitmap_bh)) {
5787 		err = PTR_ERR(bitmap_bh);
5788 		bitmap_bh = NULL;
5789 		goto error_return;
5790 	}
5791 
5792 	desc = ext4_get_group_desc(sb, block_group, &gd_bh);
5793 	if (!desc) {
5794 		err = -EIO;
5795 		goto error_return;
5796 	}
5797 
5798 	if (!ext4_sb_block_valid(sb, NULL, block, count)) {
5799 		ext4_error(sb, "Adding blocks in system zones - "
5800 			   "Block = %llu, count = %lu",
5801 			   block, count);
5802 		err = -EINVAL;
5803 		goto error_return;
5804 	}
5805 
5806 	BUFFER_TRACE(bitmap_bh, "getting write access");
5807 	err = ext4_journal_get_write_access(handle, bitmap_bh);
5808 	if (err)
5809 		goto error_return;
5810 
5811 	/*
5812 	 * We are about to modify some metadata.  Call the journal APIs
5813 	 * to unshare ->b_data if a currently-committing transaction is
5814 	 * using it
5815 	 */
5816 	BUFFER_TRACE(gd_bh, "get_write_access");
5817 	err = ext4_journal_get_write_access(handle, gd_bh);
5818 	if (err)
5819 		goto error_return;
5820 
5821 	for (i = 0, clusters_freed = 0; i < cluster_count; i++) {
5822 		BUFFER_TRACE(bitmap_bh, "clear bit");
5823 		if (!mb_test_bit(bit + i, bitmap_bh->b_data)) {
5824 			ext4_error(sb, "bit already cleared for block %llu",
5825 				   (ext4_fsblk_t)(block + i));
5826 			BUFFER_TRACE(bitmap_bh, "bit already cleared");
5827 		} else {
5828 			clusters_freed++;
5829 		}
5830 	}
5831 
5832 	err = ext4_mb_load_buddy(sb, block_group, &e4b);
5833 	if (err)
5834 		goto error_return;
5835 
5836 	/*
5837 	 * need to update group_info->bb_free and bitmap
5838 	 * with group lock held. generate_buddy look at
5839 	 * them with group lock_held
5840 	 */
5841 	ext4_lock_group(sb, block_group);
5842 	mb_clear_bits(bitmap_bh->b_data, bit, cluster_count);
5843 	mb_free_blocks(NULL, &e4b, bit, cluster_count);
5844 	free_clusters_count = clusters_freed +
5845 		ext4_free_group_clusters(sb, desc);
5846 	ext4_free_group_clusters_set(sb, desc, free_clusters_count);
5847 	ext4_block_bitmap_csum_set(sb, block_group, desc, bitmap_bh);
5848 	ext4_group_desc_csum_set(sb, block_group, desc);
5849 	ext4_unlock_group(sb, block_group);
5850 	percpu_counter_add(&sbi->s_freeclusters_counter,
5851 			   clusters_freed);
5852 
5853 	if (sbi->s_log_groups_per_flex) {
5854 		ext4_group_t flex_group = ext4_flex_group(sbi, block_group);
5855 		atomic64_add(clusters_freed,
5856 			     &sbi_array_rcu_deref(sbi, s_flex_groups,
5857 						  flex_group)->free_clusters);
5858 	}
5859 
5860 	ext4_mb_unload_buddy(&e4b);
5861 
5862 	/* We dirtied the bitmap block */
5863 	BUFFER_TRACE(bitmap_bh, "dirtied bitmap block");
5864 	err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
5865 
5866 	/* And the group descriptor block */
5867 	BUFFER_TRACE(gd_bh, "dirtied group descriptor block");
5868 	ret = ext4_handle_dirty_metadata(handle, NULL, gd_bh);
5869 	if (!err)
5870 		err = ret;
5871 
5872 error_return:
5873 	brelse(bitmap_bh);
5874 	ext4_std_error(sb, err);
5875 	return err;
5876 }
5877 
5878 /**
5879  * ext4_trim_extent -- function to TRIM one single free extent in the group
5880  * @sb:		super block for the file system
5881  * @start:	starting block of the free extent in the alloc. group
5882  * @count:	number of blocks to TRIM
5883  * @e4b:	ext4 buddy for the group
5884  *
5885  * Trim "count" blocks starting at "start" in the "group". To assure that no
5886  * one will allocate those blocks, mark it as used in buddy bitmap. This must
5887  * be called with under the group lock.
5888  */
5889 static int ext4_trim_extent(struct super_block *sb,
5890 		int start, int count, struct ext4_buddy *e4b)
__releasesnull5891 __releases(bitlock)
5892 __acquires(bitlock)
5893 {
5894 	struct ext4_free_extent ex;
5895 	ext4_group_t group = e4b->bd_group;
5896 	int ret = 0;
5897 
5898 	trace_ext4_trim_extent(sb, group, start, count);
5899 
5900 	assert_spin_locked(ext4_group_lock_ptr(sb, group));
5901 
5902 	ex.fe_start = start;
5903 	ex.fe_group = group;
5904 	ex.fe_len = count;
5905 
5906 	/*
5907 	 * Mark blocks used, so no one can reuse them while
5908 	 * being trimmed.
5909 	 */
5910 	mb_mark_used(e4b, &ex);
5911 	ext4_unlock_group(sb, group);
5912 	ret = ext4_issue_discard(sb, group, start, count, NULL);
5913 	ext4_lock_group(sb, group);
5914 	mb_free_blocks(NULL, e4b, start, ex.fe_len);
5915 	return ret;
5916 }
5917 
ext4_last_grp_cluster(struct super_block *sb, ext4_group_t grp)5918 static ext4_grpblk_t ext4_last_grp_cluster(struct super_block *sb,
5919 					   ext4_group_t grp)
5920 {
5921 	unsigned long nr_clusters_in_group;
5922 
5923 	if (grp < (ext4_get_groups_count(sb) - 1))
5924 		nr_clusters_in_group = EXT4_CLUSTERS_PER_GROUP(sb);
5925 	else
5926 		nr_clusters_in_group = (ext4_blocks_count(EXT4_SB(sb)->s_es) -
5927 					ext4_group_first_block_no(sb, grp))
5928 				       >> EXT4_CLUSTER_BITS(sb);
5929 
5930 	return nr_clusters_in_group - 1;
5931 }
5932 
ext4_trim_interrupted(void)5933 static bool ext4_trim_interrupted(void)
5934 {
5935 	return fatal_signal_pending(current) || freezing(current);
5936 }
5937 
ext4_try_to_trim_range(struct super_block *sb, struct ext4_buddy *e4b, ext4_grpblk_t start, ext4_grpblk_t max, ext4_grpblk_t minblocks)5938 static int ext4_try_to_trim_range(struct super_block *sb,
5939 		struct ext4_buddy *e4b, ext4_grpblk_t start,
5940 		ext4_grpblk_t max, ext4_grpblk_t minblocks)
5941 {
5942 	ext4_grpblk_t next, count, free_count, last, origin_start;
5943 	bool set_trimmed = false;
5944 	void *bitmap;
5945 
5946 	last = ext4_last_grp_cluster(sb, e4b->bd_group);
5947 	bitmap = e4b->bd_bitmap;
5948 	if (start == 0 && max >= last)
5949 		set_trimmed = true;
5950 	origin_start = start;
5951 	start = max(e4b->bd_info->bb_first_free, start);
5952 	count = 0;
5953 	free_count = 0;
5954 
5955 	while (start <= max) {
5956 		start = mb_find_next_zero_bit(bitmap, max + 1, start);
5957 		if (start > max)
5958 			break;
5959 
5960 		next = mb_find_next_bit(bitmap, last + 1, start);
5961 		if (origin_start == 0 && next >= last)
5962 			set_trimmed = true;
5963 
5964 		if ((next - start) >= minblocks) {
5965 			int ret = ext4_trim_extent(sb, start, next - start, e4b);
5966 
5967 			if (ret && ret != -EOPNOTSUPP)
5968 				return count;
5969 			count += next - start;
5970 		}
5971 		free_count += next - start;
5972 		start = next + 1;
5973 
5974 		if (ext4_trim_interrupted())
5975 			return count;
5976 
5977 		if (need_resched()) {
5978 			ext4_unlock_group(sb, e4b->bd_group);
5979 			cond_resched();
5980 			ext4_lock_group(sb, e4b->bd_group);
5981 		}
5982 
5983 		if ((e4b->bd_info->bb_free - free_count) < minblocks)
5984 			break;
5985 	}
5986 
5987 	if (set_trimmed)
5988 		EXT4_MB_GRP_SET_TRIMMED(e4b->bd_info);
5989 
5990 	return count;
5991 }
5992 
5993 /**
5994  * ext4_trim_all_free -- function to trim all free space in alloc. group
5995  * @sb:			super block for file system
5996  * @group:		group to be trimmed
5997  * @start:		first group block to examine
5998  * @max:		last group block to examine
5999  * @minblocks:		minimum extent block count
6000  *
6001  * ext4_trim_all_free walks through group's buddy bitmap searching for free
6002  * extents. When the free block is found, ext4_trim_extent is called to TRIM
6003  * the extent.
6004  *
6005  *
6006  * ext4_trim_all_free walks through group's block bitmap searching for free
6007  * extents. When the free extent is found, mark it as used in group buddy
6008  * bitmap. Then issue a TRIM command on this extent and free the extent in
6009  * the group buddy bitmap. This is done until whole group is scanned.
6010  */
6011 static ext4_grpblk_t
ext4_trim_all_free(struct super_block *sb, ext4_group_t group, ext4_grpblk_t start, ext4_grpblk_t max, ext4_grpblk_t minblocks)6012 ext4_trim_all_free(struct super_block *sb, ext4_group_t group,
6013 		   ext4_grpblk_t start, ext4_grpblk_t max,
6014 		   ext4_grpblk_t minblocks)
6015 {
6016 	struct ext4_buddy e4b;
6017 	int ret;
6018 
6019 	trace_ext4_trim_all_free(sb, group, start, max);
6020 
6021 	ret = ext4_mb_load_buddy(sb, group, &e4b);
6022 	if (ret) {
6023 		ext4_warning(sb, "Error %d loading buddy information for %u",
6024 			     ret, group);
6025 		return ret;
6026 	}
6027 
6028 	ext4_lock_group(sb, group);
6029 
6030 	if (!EXT4_MB_GRP_WAS_TRIMMED(e4b.bd_info) ||
6031 	    minblocks < EXT4_SB(sb)->s_last_trim_minblks)
6032 		ret = ext4_try_to_trim_range(sb, &e4b, start, max, minblocks);
6033 	else
6034 		ret = 0;
6035 
6036 	ext4_unlock_group(sb, group);
6037 	ext4_mb_unload_buddy(&e4b);
6038 
6039 	ext4_debug("trimmed %d blocks in the group %d\n",
6040 		ret, group);
6041 
6042 	return ret;
6043 }
6044 
6045 /**
6046  * ext4_trim_fs() -- trim ioctl handle function
6047  * @sb:			superblock for filesystem
6048  * @range:		fstrim_range structure
6049  *
6050  * start:	First Byte to trim
6051  * len:		number of Bytes to trim from start
6052  * minlen:	minimum extent length in Bytes
6053  * ext4_trim_fs goes through all allocation groups containing Bytes from
6054  * start to start+len. For each such a group ext4_trim_all_free function
6055  * is invoked to trim all free space.
6056  */
ext4_trim_fs(struct super_block *sb, struct fstrim_range *range)6057 int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range)
6058 {
6059 	struct request_queue *q = bdev_get_queue(sb->s_bdev);
6060 	struct ext4_group_info *grp;
6061 	ext4_group_t group, first_group, last_group;
6062 	ext4_grpblk_t cnt = 0, first_cluster, last_cluster;
6063 	uint64_t start, end, minlen, trimmed = 0;
6064 	ext4_fsblk_t first_data_blk =
6065 			le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block);
6066 	ext4_fsblk_t max_blks = ext4_blocks_count(EXT4_SB(sb)->s_es);
6067 	int ret = 0;
6068 
6069 	start = range->start >> sb->s_blocksize_bits;
6070 	end = start + (range->len >> sb->s_blocksize_bits) - 1;
6071 	minlen = EXT4_NUM_B2C(EXT4_SB(sb),
6072 			      range->minlen >> sb->s_blocksize_bits);
6073 
6074 	if (minlen > EXT4_CLUSTERS_PER_GROUP(sb) ||
6075 	    start >= max_blks ||
6076 	    range->len < sb->s_blocksize)
6077 		return -EINVAL;
6078 	/* No point to try to trim less than discard granularity */
6079 	if (range->minlen < q->limits.discard_granularity) {
6080 		minlen = EXT4_NUM_B2C(EXT4_SB(sb),
6081 			q->limits.discard_granularity >> sb->s_blocksize_bits);
6082 		if (minlen > EXT4_CLUSTERS_PER_GROUP(sb))
6083 			goto out;
6084 	}
6085 	if (end >= max_blks - 1)
6086 		end = max_blks - 1;
6087 	if (end <= first_data_blk)
6088 		goto out;
6089 	if (start < first_data_blk)
6090 		start = first_data_blk;
6091 
6092 	/* Determine first and last group to examine based on start and end */
6093 	ext4_get_group_no_and_offset(sb, (ext4_fsblk_t) start,
6094 				     &first_group, &first_cluster);
6095 	ext4_get_group_no_and_offset(sb, (ext4_fsblk_t) end,
6096 				     &last_group, &last_cluster);
6097 
6098 	/* end now represents the last cluster to discard in this group */
6099 	end = EXT4_CLUSTERS_PER_GROUP(sb) - 1;
6100 
6101 	for (group = first_group; group <= last_group; group++) {
6102 		if (ext4_trim_interrupted())
6103 			break;
6104 		grp = ext4_get_group_info(sb, group);
6105 		if (!grp)
6106 			continue;
6107 		/* We only do this if the grp has never been initialized */
6108 		if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) {
6109 			ret = ext4_mb_init_group(sb, group, GFP_NOFS);
6110 			if (ret)
6111 				break;
6112 		}
6113 
6114 		/*
6115 		 * For all the groups except the last one, last cluster will
6116 		 * always be EXT4_CLUSTERS_PER_GROUP(sb)-1, so we only need to
6117 		 * change it for the last group, note that last_cluster is
6118 		 * already computed earlier by ext4_get_group_no_and_offset()
6119 		 */
6120 		if (group == last_group)
6121 			end = last_cluster;
6122 		if (grp->bb_free >= minlen) {
6123 			cnt = ext4_trim_all_free(sb, group, first_cluster,
6124 						 end, minlen);
6125 			if (cnt < 0) {
6126 				ret = cnt;
6127 				break;
6128 			}
6129 			trimmed += cnt;
6130 		}
6131 
6132 		/*
6133 		 * For every group except the first one, we are sure
6134 		 * that the first cluster to discard will be cluster #0.
6135 		 */
6136 		first_cluster = 0;
6137 	}
6138 
6139 	if (!ret)
6140 		EXT4_SB(sb)->s_last_trim_minblks = minlen;
6141 
6142 out:
6143 	range->len = EXT4_C2B(EXT4_SB(sb), trimmed) << sb->s_blocksize_bits;
6144 	return ret;
6145 }
6146 
6147 /* Iterate all the free extents in the group. */
6148 int
ext4_mballoc_query_range( struct super_block *sb, ext4_group_t group, ext4_grpblk_t start, ext4_grpblk_t end, ext4_mballoc_query_range_fn formatter, void *priv)6149 ext4_mballoc_query_range(
6150 	struct super_block		*sb,
6151 	ext4_group_t			group,
6152 	ext4_grpblk_t			start,
6153 	ext4_grpblk_t			end,
6154 	ext4_mballoc_query_range_fn	formatter,
6155 	void				*priv)
6156 {
6157 	void				*bitmap;
6158 	ext4_grpblk_t			next;
6159 	struct ext4_buddy		e4b;
6160 	int				error;
6161 
6162 	error = ext4_mb_load_buddy(sb, group, &e4b);
6163 	if (error)
6164 		return error;
6165 	bitmap = e4b.bd_bitmap;
6166 
6167 	ext4_lock_group(sb, group);
6168 
6169 	start = max(e4b.bd_info->bb_first_free, start);
6170 	if (end >= EXT4_CLUSTERS_PER_GROUP(sb))
6171 		end = EXT4_CLUSTERS_PER_GROUP(sb) - 1;
6172 
6173 	while (start <= end) {
6174 		start = mb_find_next_zero_bit(bitmap, end + 1, start);
6175 		if (start > end)
6176 			break;
6177 		next = mb_find_next_bit(bitmap, end + 1, start);
6178 
6179 		ext4_unlock_group(sb, group);
6180 		error = formatter(sb, group, start, next - start, priv);
6181 		if (error)
6182 			goto out_unload;
6183 		ext4_lock_group(sb, group);
6184 
6185 		start = next + 1;
6186 	}
6187 
6188 	ext4_unlock_group(sb, group);
6189 out_unload:
6190 	ext4_mb_unload_buddy(&e4b);
6191 
6192 	return error;
6193 }
6194