Lines Matching defs:page
455 struct page *p;
457 /* Have not allocate page pool */
472 struct page *p;
475 /* The page have allocated. */
501 sh->pages = kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL);
517 struct page *p;
519 WARN_ON(sh->dev[i].page != sh->dev[i].orig_page);
520 p = sh->dev[i].page;
523 sh->dev[i].page = NULL;
528 sh->dev[i].page = NULL;
540 struct page *page;
542 if (!(page = alloc_page(gfp))) {
545 sh->dev[i].page = page;
546 sh->dev[i].orig_page = page;
554 sh->dev[i].page = raid5_get_dev_page(sh, i);
555 sh->dev[i].orig_page = sh->dev[i].page;
1202 * issuing read for a page in journal, this
1208 sh->dev[i].vec.bv_page = sh->dev[i].page;
1262 sh->dev[i].rvec.bv_page = sh->dev[i].page;
1306 async_copy_data(int frombio, struct bio *bio, struct page **page,
1312 struct page *bio_page;
1351 *page = bio_page;
1353 tx = async_memcpy(*page, bio_page, page_offset + poff,
1356 tx = async_memcpy(bio_page, *page, b_offset,
1362 if (clen < len) /* hit end of page */
1429 tx = async_copy_data(0, rbi, &dev->page,
1474 static struct page **to_addr_page(struct raid5_percpu *percpu, int i)
1499 struct page **xor_srcs = to_addr_page(percpu, 0);
1503 struct page *xor_dest = tgt->page;
1519 xor_srcs[count++] = sh->dev[i].page;
1538 * @srcs - (struct page *) array of size sh->disks
1539 * @offs - (unsigned int) array of offset for each page
1547 static int set_syndrome_sources(struct page **srcs,
1578 srcs[slot] = sh->dev[i].page;
1581 * not shared page. In that case, dev[i].offset
1596 struct page **blocks = to_addr_page(percpu, 0);
1603 struct page *dest;
1622 dest = tgt->page;
1643 blocks[count++] = sh->dev[i].page;
1668 struct page **blocks = to_addr_page(percpu, 0);
1692 blocks[slot] = sh->dev[i].page;
1720 struct page *dest;
1736 blocks[count++] = sh->dev[i].page;
1738 dest = sh->dev[data_target].page;
1796 struct page **xor_srcs = to_addr_page(percpu, 0);
1803 struct page *xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page;
1815 * page offset is zero.
1821 xor_srcs[count++] = dev->page;
1837 struct page **blocks = to_addr_page(percpu, 0);
1877 * clear R5_InJournal, so when rewriting a page in
1888 WARN_ON(dev->page != dev->orig_page);
1899 tx = async_copy_data(1, wbi, &dev->page,
1903 if (dev->page != dev->orig_page &&
1979 struct page **xor_srcs;
1983 struct page *xor_dest;
2016 xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page;
2022 xor_srcs[count++] = dev->page;
2026 xor_dest = sh->dev[pd_idx].page;
2032 xor_srcs[count++] = dev->page;
2077 struct page **blocks;
2152 struct page *xor_dest;
2154 struct page **xor_srcs = to_addr_page(percpu, 0);
2166 xor_dest = sh->dev[pd_idx].page;
2174 xor_srcs[count++] = sh->dev[i].page;
2190 struct page **srcs = to_addr_page(percpu, 0);
2393 * 1/ a struct page pointer for each device in the array +2
2395 * (dma_map_page()) or page (page_address()) address.
2405 sizeof(struct page *) * (num + 2) +
2464 * New slots in each stripe get 'page' set to a new page.
2546 nsh->dev[i].page = osh->dev[i].page;
2547 nsh->dev[i].orig_page = osh->dev[i].page;
2607 if (nsh->dev[i].page)
2609 nsh->dev[i].page = raid5_get_dev_page(nsh, i);
2610 nsh->dev[i].orig_page = nsh->dev[i].page;
2615 if (nsh->dev[i].page == NULL) {
2616 struct page *p = alloc_page(GFP_NOIO);
2617 nsh->dev[i].page = p;
2718 * end read for a page in journal, this
3246 * orig_page, and xor with page). To keep read path simple, we would
3301 * run rmw and allocates extra page for prexor. However, rcw is
3302 * cheaper later on. We need to free the extra page now,
3475 /* check if page is covered */
3607 sh->dev[i].page = sh->dev[i].orig_page;
3937 * page) in the read path
3987 dev->page = dev->orig_page;
4061 * For RMW in write back cache, we need extra page in prexor to store the
4062 * old data. This page is stored in dev->orig_page.
4139 dev->page == dev->orig_page &&
4141 /* alloc page for prexor */
4142 struct page *p = alloc_page(GFP_NOIO);
4544 tx = async_memcpy(sh2->dev[dd_idx].page,
4545 sh->dev[i].page, sh2->dev[dd_idx].offset,
6120 memset(page_address(sh->dev[j].page), 0, RAID5_STRIPE_SIZE(conf));
6548 raid5_show_stripe_cache_size(struct mddev *mddev, char *page)
6555 ret = sprintf(page, "%d\n", conf->min_nr_stripes);
6592 raid5_store_stripe_cache_size(struct mddev *mddev, const char *page, size_t len)
6600 if (kstrtoul(page, 10, &new))
6621 raid5_show_rmw_level(struct mddev *mddev, char *page)
6625 return sprintf(page, "%d\n", conf->rmw_level);
6631 raid5_store_rmw_level(struct mddev *mddev, const char *page, size_t len)
6642 if (kstrtoul(page, 10, &new))
6663 raid5_show_stripe_size(struct mddev *mddev, char *page)
6671 ret = sprintf(page, "%lu\n", RAID5_STRIPE_SIZE(conf));
6678 raid5_store_stripe_size(struct mddev *mddev, const char *page, size_t len)
6687 if (kstrtoul(page, 10, &new))
6758 raid5_show_preread_threshold(struct mddev *mddev, char *page)
6765 ret = sprintf(page, "%d\n", conf->bypass_threshold);
6771 raid5_store_preread_threshold(struct mddev *mddev, const char *page, size_t len)
6779 if (kstrtoul(page, 10, &new))
6803 raid5_show_skip_copy(struct mddev *mddev, char *page)
6810 ret = sprintf(page, "%d\n", conf->skip_copy);
6816 raid5_store_skip_copy(struct mddev *mddev, const char *page, size_t len)
6824 if (kstrtoul(page, 10, &new))
6855 stripe_cache_active_show(struct mddev *mddev, char *page)
6859 return sprintf(page, "%d\n", atomic_read(&conf->active_stripes));
6868 raid5_show_group_thread_cnt(struct mddev *mddev, char *page)
6875 ret = sprintf(page, "%d\n", conf->worker_cnt_per_group);
6884 raid5_store_group_thread_cnt(struct mddev *mddev, const char *page, size_t len)
6894 if (kstrtouint(page, 10, &new))