Lines Matching refs:stripe
25 * When we discover that we will need to write to any block in a stripe
29 * we plug the array and queue the stripe for later.
111 /* Find first data disk in a raid6 stripe */
226 * In the following cases, the stripe cannot be released to cached
227 * lists. Therefore, we make the stripe write out and set
230 * 2. when resync is requested fot the stripe.
278 /* full stripe */
415 pr_debug("remove_hash(), stripe %llu\n",
425 pr_debug("insert_hash(), stripe %llu\n",
431 /* find an idle stripe, make sure it is unhashed, and return it. */
562 static void stripe_set_idx(sector_t stripe, struct r5conf *conf, int previous,
575 pr_debug("init_stripe called, stripe %llu\n",
803 /* Only freshly new full stripe normal write stripe can be added to a batch list */
824 /* Don't cross chunks, so stripe pd_idx/qd_idx is the same */
884 * We must assign batch_head of this stripe within the
886 * stripe could clear BATCH_READY bit of this stripe and
887 * this stripe->batch_head doesn't get assigned, which
888 * could confuse clear_batch_ready for this stripe
894 * can still add the stripe to batch list
942 /* We are in a reshape, and this is a new-generation stripe,
1376 pr_debug("%s: stripe %llu\n", __func__,
1416 pr_debug("%s: stripe %llu\n", __func__,
1459 pr_debug("%s: stripe %llu\n", __func__,
1512 pr_debug("%s: stripe %llu block: %d\n",
1542 * Populates srcs in proper layout order for the stripe and returns the
1617 pr_debug("%s: stripe %llu block: %d\n",
1673 pr_debug("%s: stripe %llu block1: %d block2: %d\n",
1704 pr_debug("%s: stripe: %llu faila: %d failb: %d\n",
1780 pr_debug("%s: stripe %llu\n", __func__,
1806 pr_debug("%s: stripe %llu\n", __func__,
1842 pr_debug("%s: stripe %llu\n", __func__,
1863 pr_debug("%s: stripe %llu\n", __func__,
1936 pr_debug("%s: stripe %llu\n", __func__,
1991 pr_debug("%s: stripe %llu\n", __func__,
2085 pr_debug("%s: stripe %llu\n", __func__, (unsigned long long)sh->sector);
2139 pr_debug("%s: stripe %llu\n", __func__,
2161 pr_debug("%s: stripe %llu\n", __func__,
2195 pr_debug("%s: stripe %llu checkp: %d\n", __func__,
2348 /* we just created an active stripe so... */
2464 * New slots in each stripe get 'page' set to a new page.
2472 * no IO will be possible. Old stripe heads are freed once their
2916 sector_t stripe, stripe2;
2939 * Compute the stripe number
2941 stripe = chunk_number;
2942 *dd_idx = sector_div(stripe, data_disks);
2943 stripe2 = stripe;
3040 /* Same a left_asymmetric, by first stripe is
3110 new_sector = (sector_t)stripe * sectors_per_chunk + chunk_offset;
3124 sector_t stripe;
3132 stripe = new_sector;
3220 chunk_number = stripe * data_disks + i;
3236 * schedule_reconstruction() to delay towrite to some dev of a stripe.
3241 * 1. degraded stripe has a non-overwrite to the missing dev, AND this
3242 * stripe has data in journal (for other devices).
3255 * stripe, we need to reserve (conf->raid_disk + 1) pages per stripe
3257 * operation, we only need (conf->max_degraded + 1) pages per stripe.
3261 * Note: To make sure the stripe makes progress, we only delay
3268 * based on data in stripe cache. The array is read-only to upper
3323 * stripe cache
3394 pr_debug("%s: stripe %llu locked: %d ops_request: %lx\n",
3400 * Each stripe/dev can have one or more bion attached.
3411 pr_debug("adding bi b#%llu to stripe s#%llu\n",
3437 * stripe are allowed because for a single stripe_head we can
3489 pr_debug("added bi b#%llu to stripe s#%llu, disk %d.\n",
3502 * STRIPE_BIT_DELAY. This is important as once a stripe
3531 static void stripe_set_idx(sector_t stripe, struct r5conf *conf, int previous,
3537 int chunk_offset = sector_div(stripe, sectors_per_chunk);
3541 stripe * (disks - conf->max_degraded)
3863 pr_debug("Computing stripe %llu block %d\n",
3892 pr_debug("Computing stripe %llu blocks %d,%d\n",
3926 * is already in flight, or if the stripe contents are in the
3933 * For degraded stripe with data in journal, do not handle
3934 * read requests yet, instead, flush the stripe to raid
4030 * SCSI discard will change some bio fields and the stripe has
4031 * no updated data, so remove it from hash list and the stripe
4265 /* check that a write has not made the stripe insync */
4286 * STRIPE_INSYNC not set and let the stripe be handled again
4394 /* check that a write has not made the stripe insync */
4513 /* We have read all the blocks in this stripe and now we need to
4514 * copy some of them into a target stripe for expand.
4530 /* so far only the early blocks of this stripe
4568 * handle_stripe - do things to a stripe.
4570 * We lock the stripe by setting STRIPE_ACTIVE and then examine the
4777 * Return '1' if this is a member of batch, or '0' if it is a lone stripe or
4792 * this stripe could be added to a batch list before we check
4835 "stripe state: %lx\n", sh->state);
4838 "head stripe state: %lx\n", head_sh->state);
4887 * handle_stripe should not continue handle the batched stripe, only
4888 * the head of batch list or lone stripe can continue. Otherwise we
4890 * is set for the batched stripe.
4923 pr_debug("handling stripe %llu, state=%#lx cnt=%d, "
4965 * the stripe if there is data need write to raid disks
5057 * When the stripe finishes full journal write cycle (write to journal
5086 * stripe under reclaim: !caching && injournal
5091 /* stripe under reclaim: !caching && injournal */
5102 /* maybe we need to check and possibly fix the parity for this stripe
5502 /* __get_priority_stripe - get the next stripe to process
5504 * Full stripe writes are allowed to pass preread active stripes up until
5508 * stripe with in flight i/o. The bypass_count will be reset when the
5627 * STRIPE_ON_UNPLUG_LIST clear but the stripe
5835 * to the stripe that we think it is, we will have
5868 * stripe, so we must do the range check again.
5873 * won't proceed until we finish with the stripe.
5912 /* we only need flush for one stripe */
5924 /* cannot get stripe for read-ahead, just give-up */
5948 * into the destination stripe and release that stripe.
6105 /* If any of this stripe is beyond the end of the old
6136 /* Ok, those stripe are ready. We can start scheduling
6243 * stripe, and as resync_max will always be on a chunk boundary,
6274 /* make sure we don't swamp the stripe cache if someone else
6309 * So we do one stripe head at a time and record in
6333 /* already done this stripe */
6339 /* failed to get a stripe - must wait */
7357 pr_info("md/raid:%s: force stripe size %d for reshape\n",
7370 * Losing a stripe head costs more than the time to refill it,
7484 * Difficulties arise if the stripe we would write to
7485 * next is at or after the stripe we would read from next.
7513 /* reshape_position must be on a new-stripe boundary, and one
7524 pr_warn("md/raid:%s: reshape_position not on a stripe boundary\n",
7529 /* here_new is the stripe we will write to */
7532 /* here_old is the first stripe that we might need to read
7555 /* Reading from the same stripe as writing to - bad */
7717 int stripe = data_disks *
7725 * We can only discard a whole stripe. It doesn't make sense to
7728 stripe = stripe * PAGE_SIZE;
7731 while ((stripe-1) & stripe)
7732 stripe = (stripe | (stripe-1)) + 1;
7733 mddev->queue->limits.discard_alignment = stripe;
7734 mddev->queue->limits.discard_granularity = stripe;
7748 * could be lost. Consider a scenario: discard a stripe
7749 * (the stripe could be inconsistent if
7751 * stripe (the stripe could be inconsistent again
7753 * parity); the disk is broken; The stripe data of this
7762 mddev->queue->limits.max_discard_sectors >= (stripe >> 9) &&
7763 mddev->queue->limits.discard_granularity >= stripe)
8081 * We need a minimum of one full stripe,, and for sensible progress