Lines Matching refs:page
13 * Rewrote high memory support to move the page cache into
62 * Determine color of virtual address where the page should be mapped.
64 static inline unsigned int get_pkmap_color(struct page *page)
71 * Get next index for mapping inside PKMAP region for page with given color.
82 * Determine if page index inside PKMAP region (pkmap_nr) of given color
154 struct page *__kmap_to_page(void *vaddr)
193 struct page *page;
212 * no-one has the page mapped, and cannot get at
217 page = pte_page(ptent);
220 set_page_address(page, NULL);
234 static inline unsigned long map_new_virtual(struct page *page)
239 unsigned int color = get_pkmap_color(page);
271 if (page_address(page))
272 return (unsigned long)page_address(page);
280 &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
283 set_page_address(page, (void *)vaddr);
289 * kmap_high - map a highmem page into memory
290 * @page: &struct page to map
292 * Returns the page's virtual memory address.
296 void *kmap_high(struct page *page)
305 vaddr = (unsigned long)page_address(page);
307 vaddr = map_new_virtual(page);
317 * kmap_high_get - pin a highmem page into memory
318 * @page: &struct page to pin
320 * Returns the page's current virtual memory address, or NULL if no mapping
326 void *kmap_high_get(struct page *page)
331 vaddr = (unsigned long)page_address(page);
342 * kunmap_high - unmap a highmem page into memory
343 * @page: &struct page to unmap
348 void kunmap_high(struct page *page)
354 unsigned int color = get_pkmap_color(page);
358 vaddr = (unsigned long)page_address(page);
392 void zero_user_segments(struct page *page, unsigned start1, unsigned end1,
397 BUG_ON(end1 > page_size(page) || end2 > page_size(page));
404 for (i = 0; i < compound_nr(page); i++) {
414 kaddr = kmap_local_page(page + i);
429 kaddr = kmap_local_page(page + i);
438 flush_dcache_page(page + i);
456 * slot is unused which acts as a guard page
500 static inline void *arch_kmap_local_high_get(struct page *page)
564 void *__kmap_local_page_prot(struct page *page, pgprot_t prot)
573 if (!IS_ENABLED(CONFIG_DEBUG_KMAP_LOCAL_FORCE_MAP) && !PageHighMem(page))
574 return page_address(page);
577 kmap = arch_kmap_local_high_get(page);
581 return __kmap_local_pfn_prot(page_to_pfn(page), prot);
713 * Describes one page->virtual association
716 struct page *page;
731 static struct page_address_slot *page_slot(const struct page *page)
733 return &page_address_htable[hash_ptr(page, PA_HASH_ORDER)];
737 * page_address - get the mapped virtual address of a page
738 * @page: &struct page to get the virtual address of
740 * Returns the page's virtual address.
742 void *page_address(const struct page *page)
748 if (!PageHighMem(page))
749 return lowmem_page_address(page);
751 pas = page_slot(page);
758 if (pam->page == page) {
771 * set_page_address - set a page's virtual address
772 * @page: &struct page to set
775 void set_page_address(struct page *page, void *virtual)
781 BUG_ON(!PageHighMem(page));
783 pas = page_slot(page);
786 pam->page = page;
795 if (pam->page == page) {