18c2ecf20Sopenharmony_ci// SPDX-License-Identifier: GPL-2.0 28c2ecf20Sopenharmony_ci/* Maximum size of each resync request */ 38c2ecf20Sopenharmony_ci#define RESYNC_BLOCK_SIZE (64*1024) 48c2ecf20Sopenharmony_ci#define RESYNC_PAGES ((RESYNC_BLOCK_SIZE + PAGE_SIZE-1) / PAGE_SIZE) 58c2ecf20Sopenharmony_ci 68c2ecf20Sopenharmony_ci/* 78c2ecf20Sopenharmony_ci * Number of guaranteed raid bios in case of extreme VM load: 88c2ecf20Sopenharmony_ci */ 98c2ecf20Sopenharmony_ci#define NR_RAID_BIOS 256 108c2ecf20Sopenharmony_ci 118c2ecf20Sopenharmony_ci/* when we get a read error on a read-only array, we redirect to another 128c2ecf20Sopenharmony_ci * device without failing the first device, or trying to over-write to 138c2ecf20Sopenharmony_ci * correct the read error. To keep track of bad blocks on a per-bio 148c2ecf20Sopenharmony_ci * level, we store IO_BLOCKED in the appropriate 'bios' pointer 158c2ecf20Sopenharmony_ci */ 168c2ecf20Sopenharmony_ci#define IO_BLOCKED ((struct bio *)1) 178c2ecf20Sopenharmony_ci/* When we successfully write to a known bad-block, we need to remove the 188c2ecf20Sopenharmony_ci * bad-block marking which must be done from process context. So we record 198c2ecf20Sopenharmony_ci * the success by setting devs[n].bio to IO_MADE_GOOD 208c2ecf20Sopenharmony_ci */ 218c2ecf20Sopenharmony_ci#define IO_MADE_GOOD ((struct bio *)2) 228c2ecf20Sopenharmony_ci 238c2ecf20Sopenharmony_ci#define BIO_SPECIAL(bio) ((unsigned long)bio <= 2) 248c2ecf20Sopenharmony_ci 258c2ecf20Sopenharmony_ci/* When there are this many requests queue to be written by 268c2ecf20Sopenharmony_ci * the raid thread, we become 'congested' to provide back-pressure 278c2ecf20Sopenharmony_ci * for writeback. 288c2ecf20Sopenharmony_ci */ 298c2ecf20Sopenharmony_cistatic int max_queued_requests = 1024; 308c2ecf20Sopenharmony_ci 318c2ecf20Sopenharmony_ci/* for managing resync I/O pages */ 328c2ecf20Sopenharmony_cistruct resync_pages { 338c2ecf20Sopenharmony_ci void *raid_bio; 348c2ecf20Sopenharmony_ci struct page *pages[RESYNC_PAGES]; 358c2ecf20Sopenharmony_ci}; 368c2ecf20Sopenharmony_ci 378c2ecf20Sopenharmony_cistatic void rbio_pool_free(void *rbio, void *data) 388c2ecf20Sopenharmony_ci{ 398c2ecf20Sopenharmony_ci kfree(rbio); 408c2ecf20Sopenharmony_ci} 418c2ecf20Sopenharmony_ci 428c2ecf20Sopenharmony_cistatic inline int resync_alloc_pages(struct resync_pages *rp, 438c2ecf20Sopenharmony_ci gfp_t gfp_flags) 448c2ecf20Sopenharmony_ci{ 458c2ecf20Sopenharmony_ci int i; 468c2ecf20Sopenharmony_ci 478c2ecf20Sopenharmony_ci for (i = 0; i < RESYNC_PAGES; i++) { 488c2ecf20Sopenharmony_ci rp->pages[i] = alloc_page(gfp_flags); 498c2ecf20Sopenharmony_ci if (!rp->pages[i]) 508c2ecf20Sopenharmony_ci goto out_free; 518c2ecf20Sopenharmony_ci } 528c2ecf20Sopenharmony_ci 538c2ecf20Sopenharmony_ci return 0; 548c2ecf20Sopenharmony_ci 558c2ecf20Sopenharmony_ciout_free: 568c2ecf20Sopenharmony_ci while (--i >= 0) 578c2ecf20Sopenharmony_ci put_page(rp->pages[i]); 588c2ecf20Sopenharmony_ci return -ENOMEM; 598c2ecf20Sopenharmony_ci} 608c2ecf20Sopenharmony_ci 618c2ecf20Sopenharmony_cistatic inline void resync_free_pages(struct resync_pages *rp) 628c2ecf20Sopenharmony_ci{ 638c2ecf20Sopenharmony_ci int i; 648c2ecf20Sopenharmony_ci 658c2ecf20Sopenharmony_ci for (i = 0; i < RESYNC_PAGES; i++) 668c2ecf20Sopenharmony_ci put_page(rp->pages[i]); 678c2ecf20Sopenharmony_ci} 688c2ecf20Sopenharmony_ci 698c2ecf20Sopenharmony_cistatic inline void resync_get_all_pages(struct resync_pages *rp) 708c2ecf20Sopenharmony_ci{ 718c2ecf20Sopenharmony_ci int i; 728c2ecf20Sopenharmony_ci 738c2ecf20Sopenharmony_ci for (i = 0; i < RESYNC_PAGES; i++) 748c2ecf20Sopenharmony_ci get_page(rp->pages[i]); 758c2ecf20Sopenharmony_ci} 768c2ecf20Sopenharmony_ci 778c2ecf20Sopenharmony_cistatic inline struct page *resync_fetch_page(struct resync_pages *rp, 788c2ecf20Sopenharmony_ci unsigned idx) 798c2ecf20Sopenharmony_ci{ 808c2ecf20Sopenharmony_ci if (WARN_ON_ONCE(idx >= RESYNC_PAGES)) 818c2ecf20Sopenharmony_ci return NULL; 828c2ecf20Sopenharmony_ci return rp->pages[idx]; 838c2ecf20Sopenharmony_ci} 848c2ecf20Sopenharmony_ci 858c2ecf20Sopenharmony_ci/* 868c2ecf20Sopenharmony_ci * 'strct resync_pages' stores actual pages used for doing the resync 878c2ecf20Sopenharmony_ci * IO, and it is per-bio, so make .bi_private points to it. 888c2ecf20Sopenharmony_ci */ 898c2ecf20Sopenharmony_cistatic inline struct resync_pages *get_resync_pages(struct bio *bio) 908c2ecf20Sopenharmony_ci{ 918c2ecf20Sopenharmony_ci return bio->bi_private; 928c2ecf20Sopenharmony_ci} 938c2ecf20Sopenharmony_ci 948c2ecf20Sopenharmony_ci/* generally called after bio_reset() for reseting bvec */ 958c2ecf20Sopenharmony_cistatic void md_bio_reset_resync_pages(struct bio *bio, struct resync_pages *rp, 968c2ecf20Sopenharmony_ci int size) 978c2ecf20Sopenharmony_ci{ 988c2ecf20Sopenharmony_ci int idx = 0; 998c2ecf20Sopenharmony_ci 1008c2ecf20Sopenharmony_ci /* initialize bvec table again */ 1018c2ecf20Sopenharmony_ci do { 1028c2ecf20Sopenharmony_ci struct page *page = resync_fetch_page(rp, idx); 1038c2ecf20Sopenharmony_ci int len = min_t(int, size, PAGE_SIZE); 1048c2ecf20Sopenharmony_ci 1058c2ecf20Sopenharmony_ci /* 1068c2ecf20Sopenharmony_ci * won't fail because the vec table is big 1078c2ecf20Sopenharmony_ci * enough to hold all these pages 1088c2ecf20Sopenharmony_ci */ 1098c2ecf20Sopenharmony_ci bio_add_page(bio, page, len, 0); 1108c2ecf20Sopenharmony_ci size -= len; 1118c2ecf20Sopenharmony_ci } while (idx++ < RESYNC_PAGES && size > 0); 1128c2ecf20Sopenharmony_ci} 113