Lines Matching defs:dev

292 r5c_return_dev_pending_writes(struct r5conf *conf, struct r5dev *dev)
296 wbi = dev->written;
297 dev->written = NULL;
299 dev->sector + RAID5_STRIPE_SECTORS(conf)) {
300 wbi2 = r5_next_bio(conf, wbi, dev->sector);
313 if (sh->dev[i].written) {
314 set_bit(R5_UPTODATE, &sh->dev[i].flags);
315 r5c_return_dev_pending_writes(conf, &sh->dev[i]);
468 if (test_and_clear_bit(R5_Wantwrite, &sh->dev[i].flags)) {
469 set_bit(R5_InJournal, &sh->dev[i].flags);
470 clear_bit(R5_LOCKED, &sh->dev[i].flags);
484 if (test_bit(R5_InJournal, &sh->dev[i].flags))
485 set_bit(R5_Wantwrite, &sh->dev[i].flags);
499 * Set R5_InJournal for parity dev[pd_idx]. This means
501 * NOT necessary to set the flag for dev[qd_idx], as the
504 set_bit(R5_InJournal, &sh->dev[sh->pd_idx].flags);
509 set_bit(R5_InJournal, &sh->dev[sh->pd_idx].flags);
925 if (!test_bit(R5_Wantwrite, &sh->dev[i].flags) ||
926 test_bit(R5_InJournal, &sh->dev[i].flags))
930 if (test_bit(R5_WantFUA, &sh->dev[i].flags) &&
941 sh->dev[i].log_checksum, 0, false);
942 r5l_append_payload_page(log, sh->dev[i].page);
947 sh->sector, sh->dev[sh->pd_idx].log_checksum,
948 sh->dev[sh->qd_idx].log_checksum, true);
949 r5l_append_payload_page(log, sh->dev[sh->pd_idx].page);
950 r5l_append_payload_page(log, sh->dev[sh->qd_idx].page);
953 sh->sector, sh->dev[sh->pd_idx].log_checksum,
955 r5l_append_payload_page(log, sh->dev[sh->pd_idx].page);
1004 if (sh->log_io || !test_bit(R5_Wantwrite, &sh->dev[sh->pd_idx].flags) ||
1016 if (!test_bit(R5_Wantwrite, &sh->dev[i].flags) ||
1017 test_bit(R5_InJournal, &sh->dev[i].flags))
1024 addr = kmap_atomic(sh->dev[i].page);
1025 sh->dev[i].log_checksum = crc32c_le(log->uuid_checksum,
1816 r5l_recovery_read_page(log, ctx, sh->dev[dd_idx].page, log_offset);
1817 sh->dev[dd_idx].log_checksum =
1821 set_bit(R5_Wantwrite, &sh->dev[dd_idx].flags);
1835 r5l_recovery_read_page(log, ctx, sh->dev[sh->pd_idx].page, log_offset);
1836 sh->dev[sh->pd_idx].log_checksum =
1838 set_bit(R5_Wantwrite, &sh->dev[sh->pd_idx].flags);
1842 log, ctx, sh->dev[sh->qd_idx].page,
1844 sh->dev[sh->qd_idx].log_checksum =
1846 set_bit(R5_Wantwrite, &sh->dev[sh->qd_idx].flags);
1858 sh->dev[i].flags = 0;
1871 if (!test_bit(R5_Wantwrite, &sh->dev[disk_index].flags))
1887 if (!test_bit(R5_Wantwrite, &sh->dev[disk_index].flags))
1897 sh->dev[disk_index].page, REQ_OP_WRITE,
1907 sh->dev[disk_index].page, REQ_OP_WRITE,
2177 test_bit(R5_Wantwrite, &sh->dev[sh->pd_idx].flags)) {
2207 struct r5dev *dev;
2211 dev = sh->dev + i;
2212 if (test_and_clear_bit(R5_Wantwrite, &dev->flags)) {
2213 set_bit(R5_InJournal, &dev->flags);
2214 set_bit(R5_UPTODATE, &dev->flags);
2377 struct r5dev *dev = &sh->dev[i];
2381 if (test_bit(R5_InJournal, &dev->flags)) {
2388 addr = kmap_atomic(dev->page);
2394 dev->page, REQ_OP_WRITE, false);
2640 struct r5dev *dev;
2684 dev = &sh->dev[i];
2686 if (dev->towrite && !test_bit(R5_OVERWRITE, &dev->flags) &&
2687 !test_bit(R5_InJournal, &dev->flags)) {
2732 dev = &sh->dev[i];
2733 if (dev->towrite) {
2734 set_bit(R5_Wantwrite, &dev->flags);
2735 set_bit(R5_Wantdrain, &dev->flags);
2736 set_bit(R5_LOCKED, &dev->flags);
2764 sh->dev[0].orig_page == conf->disks[0].extra_page;
2767 if (sh->dev[i].page != sh->dev[i].orig_page) {
2768 struct page *p = sh->dev[i].orig_page;
2770 sh->dev[i].orig_page = sh->dev[i].page;
2771 clear_bit(R5_OrigPageUPTDODATE, &sh->dev[i].flags);
2787 struct r5dev *dev;
2790 dev = &sh->dev[i];
2791 if (dev->orig_page != dev->page)
2792 put_page(dev->orig_page);
2793 dev->orig_page = conf->disks[i].extra_page;
2798 * clean up the stripe (clear R5_InJournal for dev[pd_idx] etc.) after the
2812 if (!log || !test_bit(R5_InJournal, &sh->dev[sh->pd_idx].flags))
2816 clear_bit(R5_InJournal, &sh->dev[sh->pd_idx].flags);
2822 clear_bit(R5_InJournal, &sh->dev[i].flags);
2823 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
2899 if (!test_bit(R5_Wantwrite, &sh->dev[i].flags))
2901 addr = kmap_atomic(sh->dev[i].page);
2902 sh->dev[i].log_checksum = crc32c_le(log->uuid_checksum,