Lines Matching defs:page
68 * some helper functions to deal with single linked page lists,
69 * page->private being our "next" pointer.
76 static struct page *page_chain_del(struct page **head, int n)
78 struct page *page;
79 struct page *tmp;
84 page = *head;
86 if (!page)
89 while (page) {
90 tmp = page_chain_next(page);
96 page = tmp;
100 set_page_private(page, 0);
102 page = *head;
104 return page;
108 * "private" page chain, before adding it back to a global chain head
110 static struct page *page_chain_tail(struct page *page, int *len)
112 struct page *tmp;
114 while ((tmp = page_chain_next(page)))
115 ++i, page = tmp;
118 return page;
121 static int page_chain_free(struct page *page)
123 struct page *tmp;
125 page_chain_for_each_safe(page, tmp) {
126 put_page(page);
132 static void page_chain_add(struct page **head,
133 struct page *chain_first, struct page *chain_last)
136 struct page *tmp;
146 static struct page *__drbd_alloc_pages(struct drbd_device *device,
149 struct page *page = NULL;
150 struct page *tmp = NULL;
157 page = page_chain_del(&drbd_pp_pool, number);
158 if (page)
161 if (page)
162 return page;
172 set_page_private(tmp, (unsigned long)page);
173 page = tmp;
177 return page;
182 if (page) {
183 tmp = page_chain_tail(page, NULL);
185 page_chain_add(&drbd_pp_pool, page, tmp);
247 * Tries to allocate number pages, first from our own page pool, then from
259 * Returns a page chain linked via page->private.
261 struct page *drbd_alloc_pages(struct drbd_peer_device *peer_device, unsigned int number,
265 struct page *page = NULL;
276 page = __drbd_alloc_pages(device, number);
280 if (page && atomic_read(&device->pp_in_use_by_net) > 512)
283 while (page == NULL) {
289 page = __drbd_alloc_pages(device, number);
290 if (page)
307 if (page)
309 return page;
314 * Either links the page chain back to the global pool,
316 static void drbd_free_pages(struct drbd_device *device, struct page *page, int is_net)
321 if (page == NULL)
325 i = page_chain_free(page);
327 struct page *tmp;
328 tmp = page_chain_tail(page, &i);
330 page_chain_add(&drbd_pp_pool, page, tmp);
364 struct page *page = NULL;
378 page = drbd_alloc_pages(peer_device, nr_pages,
380 if (!page)
391 peer_req->pages = page;
1632 * single page to an empty bio (which should never happen and likely indicates
1644 struct page *page = peer_req->pages;
1706 page_chain_for_each(page) {
1708 if (!bio_add_page(bio, page, len, 0))
1715 D_ASSERT(device, page == NULL);
1865 struct page *page;
1949 /* receive payload size bytes into page chain */
1951 page = peer_req->pages;
1952 page_chain_for_each(page) {
1954 data = kmap(page);
1960 kunmap(page);
1986 struct page *page;
1993 page = drbd_alloc_pages(peer_device, 1, 1);
1995 data = kmap(page);
2004 kunmap(page);
2005 drbd_free_pages(peer_device->device, page, 0);