Lines Matching refs:page
63 #include <asm/page.h>
74 #include <xen/page.h>
121 * Use one extent per PAGE_SIZE to avoid to break down the page into
150 /* We increase/decrease in batches which fit in a page */
163 /* balloon_append: add the given page to the balloon. */
164 static void balloon_append(struct page *page)
166 __SetPageOffline(page);
169 if (PageHighMem(page)) {
170 list_add_tail(&page->lru, &ballooned_pages);
173 list_add(&page->lru, &ballooned_pages);
179 /* balloon_retrieve: rescue a page from the balloon, if it is not empty. */
180 static struct page *balloon_retrieve(bool require_lowmem)
182 struct page *page;
187 page = list_entry(ballooned_pages.next, struct page, lru);
188 if (require_lowmem && PageHighMem(page))
190 list_del(&page->lru);
192 if (PageHighMem(page))
197 __ClearPageOffline(page);
198 return page;
201 static struct page *balloon_next_page(struct page *page)
203 struct list_head *next = page->lru.next;
206 return list_entry(next, struct page, lru);
304 * different page granularity.
309 * add_memory() will build page tables for the new memory so
356 static void xen_online_page(struct page *page, unsigned int order)
359 unsigned long start_pfn = page_to_pfn(page);
360 struct page *p;
406 struct page *page;
411 page = list_first_entry_or_null(&ballooned_pages, struct page, lru);
413 if (!page) {
418 frame_list[i] = page_to_xen_pfn(page);
419 page = balloon_next_page(page);
427 page = balloon_retrieve(false);
428 BUG_ON(page == NULL);
430 xenmem_reservation_va_mapping_update(1, &page, &frame_list[i]);
432 /* Relinquish the page back to the allocator. */
433 free_reserved_page(page);
445 struct page *page, *tmp;
453 page = alloc_page(gfp);
454 if (page == NULL) {
459 adjust_managed_page_count(page, -1);
460 xenmem_reservation_scrub_page(page);
461 list_add(&page->lru, &pages);
478 list_for_each_entry_safe(page, tmp, &pages, lru) {
479 frame_list[i++] = xen_page_to_gfn(page);
481 xenmem_reservation_va_mapping_reset(1, &page);
483 list_del(&page->lru);
485 balloon_append(page);
616 int alloc_xenballooned_pages(int nr_pages, struct page **pages)
619 struct page *page;
627 page = balloon_retrieve(true);
628 if (page) {
629 pages[pgno++] = page;
633 * different page granularity.
638 ret = xen_alloc_p2m_entry(page_to_pfn(page));
669 void free_xenballooned_pages(int nr_pages, struct page **pages)