Lines Matching refs:page
64 #include <asm/page.h>
75 #include <xen/page.h>
105 * Use one extent per PAGE_SIZE to avoid to break down the page into
134 /* We increase/decrease in batches which fit in a page */
147 /* balloon_append: add the given page to the balloon. */
148 static void balloon_append(struct page *page)
150 __SetPageOffline(page);
153 if (PageHighMem(page)) {
154 list_add_tail(&page->lru, &ballooned_pages);
157 list_add(&page->lru, &ballooned_pages);
163 /* balloon_retrieve: rescue a page from the balloon, if it is not empty. */
164 static struct page *balloon_retrieve(bool require_lowmem)
166 struct page *page;
171 page = list_entry(ballooned_pages.next, struct page, lru);
172 if (require_lowmem && PageHighMem(page))
174 list_del(&page->lru);
176 if (PageHighMem(page))
181 __ClearPageOffline(page);
182 return page;
185 static struct page *balloon_next_page(struct page *page)
187 struct list_head *next = page->lru.next;
190 return list_entry(next, struct page, lru);
288 * different page granularity.
293 * add_memory() will build page tables for the new memory so
340 static void xen_online_page(struct page *page, unsigned int order)
343 unsigned long start_pfn = page_to_pfn(page);
344 struct page *p;
390 struct page *page;
395 page = list_first_entry_or_null(&ballooned_pages, struct page, lru);
397 if (!page) {
402 frame_list[i] = page_to_xen_pfn(page);
403 page = balloon_next_page(page);
411 page = balloon_retrieve(false);
412 BUG_ON(page == NULL);
414 xenmem_reservation_va_mapping_update(1, &page, &frame_list[i]);
416 /* Relinquish the page back to the allocator. */
417 free_reserved_page(page);
429 struct page *page, *tmp;
437 page = alloc_page(gfp);
438 if (page == NULL) {
443 adjust_managed_page_count(page, -1);
444 xenmem_reservation_scrub_page(page);
445 list_add(&page->lru, &pages);
462 list_for_each_entry_safe(page, tmp, &pages, lru) {
463 frame_list[i++] = xen_page_to_gfn(page);
465 xenmem_reservation_va_mapping_reset(1, &page);
467 list_del(&page->lru);
469 balloon_append(page);
600 int xen_alloc_ballooned_pages(unsigned int nr_pages, struct page **pages)
603 struct page *page;
611 page = balloon_retrieve(true);
612 if (page) {
613 pages[pgno++] = page;
617 * different page granularity.
622 ret = xen_alloc_p2m_entry(page_to_pfn(page));
653 void xen_free_ballooned_pages(unsigned int nr_pages, struct page **pages)