Lines Matching defs:stripe
25 * When we discover that we will need to write to any block in a stripe
29 * we plug the array and queue the stripe for later.
118 /* Find first data disk in a raid6 stripe */
235 * In the following cases, the stripe cannot be released to cached
236 * lists. Therefore, we make the stripe write out and set
239 * 2. when resync is requested fot the stripe.
287 /* full stripe */
425 pr_debug("remove_hash(), stripe %llu\n",
435 pr_debug("insert_hash(), stripe %llu\n",
441 /* find an idle stripe, make sure it is unhashed, and return it. */
572 static void stripe_set_idx(sector_t stripe, struct r5conf *conf, int previous,
585 pr_debug("init_stripe called, stripe %llu\n",
644 * Slow path. The reference count is zero which means the stripe must
645 * be on a list (sh->lru). Must remove the stripe from the list that
777 * bitmap to track stripe sectors that have been added to stripes
789 * and there is an inactive stripe available.
820 * hold a reference to a stripe and raid5_quiesce()
901 /* Only freshly new full stripe normal write stripe can be added to a batch list */
922 /* Don't cross chunks, so stripe pd_idx/qd_idx is the same */
966 * We must assign batch_head of this stripe within the
968 * stripe could clear BATCH_READY bit of this stripe and
969 * this stripe->batch_head doesn't get assigned, which
970 * could confuse clear_batch_ready for this stripe
976 * can still add the stripe to batch list
1024 /* We are in a reshape, and this is a new-generation stripe,
1454 pr_debug("%s: stripe %llu\n", __func__,
1494 pr_debug("%s: stripe %llu\n", __func__,
1537 pr_debug("%s: stripe %llu\n", __func__,
1590 pr_debug("%s: stripe %llu block: %d\n",
1620 * Populates srcs in proper layout order for the stripe and returns the
1695 pr_debug("%s: stripe %llu block: %d\n",
1751 pr_debug("%s: stripe %llu block1: %d block2: %d\n",
1782 pr_debug("%s: stripe: %llu faila: %d failb: %d\n",
1858 pr_debug("%s: stripe %llu\n", __func__,
1884 pr_debug("%s: stripe %llu\n", __func__,
1920 pr_debug("%s: stripe %llu\n", __func__,
1941 pr_debug("%s: stripe %llu\n", __func__,
2014 pr_debug("%s: stripe %llu\n", __func__,
2069 pr_debug("%s: stripe %llu\n", __func__,
2163 pr_debug("%s: stripe %llu\n", __func__, (unsigned long long)sh->sector);
2217 pr_debug("%s: stripe %llu\n", __func__,
2239 pr_debug("%s: stripe %llu\n", __func__,
2273 pr_debug("%s: stripe %llu checkp: %d\n", __func__,
2419 /* we just created an active stripe so... */
2535 * New slots in each stripe get 'page' set to a new page.
2543 * no IO will be possible. Old stripe heads are freed once their
3004 sector_t stripe, stripe2;
3027 * Compute the stripe number
3029 stripe = chunk_number;
3030 *dd_idx = sector_div(stripe, data_disks);
3031 stripe2 = stripe;
3128 /* Same a left_asymmetric, by first stripe is
3198 new_sector = (sector_t)stripe * sectors_per_chunk + chunk_offset;
3212 sector_t stripe;
3220 stripe = new_sector;
3308 chunk_number = stripe * data_disks + i;
3324 * schedule_reconstruction() to delay towrite to some dev of a stripe.
3329 * 1. degraded stripe has a non-overwrite to the missing dev, AND this
3330 * stripe has data in journal (for other devices).
3343 * stripe, we need to reserve (conf->raid_disk + 1) pages per stripe
3345 * operation, we only need (conf->max_degraded + 1) pages per stripe.
3349 * Note: To make sure the stripe makes progress, we only delay
3356 * based on data in stripe cache. The array is read-only to upper
3411 * stripe cache
3482 pr_debug("%s: stripe %llu locked: %d ops_request: %lx\n",
3493 pr_debug("checking bi b#%llu to stripe s#%llu\n",
3517 * stripe are allowed because for a single stripe_head we can
3590 pr_debug("added bi b#%llu to stripe s#%llu, disk %d, logical %llu\n",
3603 * STRIPE_BIT_DELAY. This is important as once a stripe
3621 * Each stripe/dev can have one or more bios attached.
3643 static void stripe_set_idx(sector_t stripe, struct r5conf *conf, int previous,
3649 int chunk_offset = sector_div(stripe, sectors_per_chunk);
3653 stripe * (disks - conf->max_degraded)
3975 pr_debug("Computing stripe %llu block %d\n",
4004 pr_debug("Computing stripe %llu blocks %d,%d\n",
4038 * is already in flight, or if the stripe contents are in the
4045 * For degraded stripe with data in journal, do not handle
4046 * read requests yet, instead, flush the stripe to raid
4142 * SCSI discard will change some bio fields and the stripe has
4143 * no updated data, so remove it from hash list and the stripe
4377 /* check that a write has not made the stripe insync */
4398 * STRIPE_INSYNC not set and let the stripe be handled again
4506 /* check that a write has not made the stripe insync */
4625 /* We have read all the blocks in this stripe and now we need to
4626 * copy some of them into a target stripe for expand.
4643 /* so far only the early blocks of this stripe
4681 * handle_stripe - do things to a stripe.
4683 * We lock the stripe by setting STRIPE_ACTIVE and then examine the
4890 * Return '1' if this is a member of batch, or '0' if it is a lone stripe or
4905 * this stripe could be added to a batch list before we check
4948 "stripe state: %lx\n", sh->state);
4951 "head stripe state: %lx\n", head_sh->state);
5000 * handle_stripe should not continue handle the batched stripe, only
5001 * the head of batch list or lone stripe can continue. Otherwise we
5003 * is set for the batched stripe.
5036 pr_debug("handling stripe %llu, state=%#lx cnt=%d, "
5078 * the stripe if there is data need write to raid disks
5170 * When the stripe finishes full journal write cycle (write to journal
5199 * stripe under reclaim: !caching && injournal
5204 /* stripe under reclaim: !caching && injournal */
5215 /* maybe we need to check and possibly fix the parity for this stripe
5606 /* __get_priority_stripe - get the next stripe to process
5608 * Full stripe writes are allowed to pass preread active stripes up until
5612 * stripe with in flight i/o. The bypass_count will be reset when the
5732 * STRIPE_ON_UNPLUG_LIST clear but the stripe
5996 * to the stripe that we think it is, we will have
6025 /* cannot get stripe, just give-up */
6033 * Expansion moved on while waiting for a stripe.
6038 * won't proceed until we finish with the stripe.
6071 /* we only need flush for one stripe */
6203 * Lets start with the stripe with the lowest chunk offset in the first
6227 * otherwise the batch_last stripe head could prevent
6270 * into the destination stripe and release that stripe.
6428 /* If any of this stripe is beyond the end of the old
6459 /* Ok, those stripe are ready. We can start scheduling
6567 * stripe, and as resync_max will always be on a chunk boundary,
6599 /* make sure we don't swamp the stripe cache if someone else
6634 * So we do one stripe head at a time and record in
6658 /* already done this stripe */
6664 /* failed to get a stripe - must wait */
7699 pr_info("md/raid:%s: force stripe size %d for reshape\n",
7713 * Losing a stripe head costs more than the time to refill it,
7827 * Difficulties arise if the stripe we would write to
7828 * next is at or after the stripe we would read from next.
7856 /* reshape_position must be on a new-stripe boundary, and one
7867 pr_warn("md/raid:%s: reshape_position not on a stripe boundary\n",
7872 /* here_new is the stripe we will write to */
7875 /* here_old is the first stripe that we might need to read
7898 /* Reading from the same stripe as writing to - bad */
8059 int stripe = data_disks *
8067 * We can only discard a whole stripe. It doesn't make sense to
8070 stripe = stripe * PAGE_SIZE;
8071 stripe = roundup_pow_of_two(stripe);
8072 mddev->queue->limits.discard_granularity = stripe;
8085 * could be lost. Consider a scenario: discard a stripe
8086 * (the stripe could be inconsistent if
8088 * stripe (the stripe could be inconsistent again
8090 * parity); the disk is broken; The stripe data of this
8099 mddev->queue->limits.max_discard_sectors < (stripe >> 9) ||
8100 mddev->queue->limits.discard_granularity < stripe)
8104 * Requests require having a bitmap for each stripe.
8435 * We need a minimum of one full stripe,, and for sensible progress