Lines Matching refs:head

2380 static void __split_huge_page_tail(struct page *head, int tail,
2383 struct page *page_tail = head + tail;
2394 page_tail->flags |= (head->flags &
2412 page_tail->mapping = head->mapping;
2413 page_tail->index = head->index + tail;
2427 page_ref_unfreeze(page_tail, 1 + (!PageAnon(head) ||
2428 PageSwapCache(head)));
2430 if (page_is_young(head))
2432 if (page_is_idle(head))
2435 page_cpupid_xchg_last(page_tail, page_cpupid_last(head));
2442 lru_add_page_tail(head, page_tail, lruvec, list);
2448 struct page *head = compound_head(page);
2449 pg_data_t *pgdat = page_pgdat(head);
2453 unsigned int nr = thp_nr_pages(head);
2456 lruvec = mem_cgroup_page_lruvec(head, pgdat);
2459 split_page_memcg(head, nr);
2461 if (PageAnon(head) && PageSwapCache(head)) {
2462 swp_entry_t entry = { .val = page_private(head) };
2470 __split_huge_page_tail(head, i, lruvec, list);
2472 if (head[i].index >= end) {
2473 ClearPageDirty(head + i);
2474 __delete_from_page_cache(head + i, NULL);
2475 if (IS_ENABLED(CONFIG_SHMEM) && PageSwapBacked(head))
2476 shmem_uncharge(head->mapping->host, 1);
2477 put_page(head + i);
2479 __xa_store(&head->mapping->i_pages, head[i].index,
2480 head + i, 0);
2483 head + i, 0);
2487 ClearPageCompound(head);
2489 split_page_owner(head, nr);
2492 if (PageAnon(head)) {
2494 if (PageSwapCache(head)) {
2495 page_ref_add(head, 2);
2498 page_ref_inc(head);
2502 page_ref_add(head, 2);
2503 xa_unlock(&head->mapping->i_pages);
2508 remap_page(head, nr);
2510 if (PageSwapCache(head)) {
2511 swp_entry_t entry = { .val = page_private(head) };
2517 struct page *subpage = head + i;
2639 * Both head page and tail pages will inherit mapping, flags, and so on from
2651 struct page *head = compound_head(page);
2652 struct pglist_data *pgdata = NODE_DATA(page_to_nid(head));
2653 struct deferred_split *ds_queue = get_deferred_split_queue(head);
2660 VM_BUG_ON_PAGE(is_huge_zero_page(head), head);
2661 VM_BUG_ON_PAGE(!PageLocked(head), head);
2662 VM_BUG_ON_PAGE(!PageCompound(head), head);
2664 if (PageWriteback(head))
2667 if (PageAnon(head)) {
2676 anon_vma = page_get_anon_vma(head);
2685 mapping = head->mapping;
2701 * head page lock is good enough to serialize the trimming.
2710 if (!can_split_huge_page(head, &extra_pins)) {
2715 unmap_page(head);
2721 XA_STATE(xas, &mapping->i_pages, page_index(head));
2724 * Check if the head page is present in page cache.
2725 * We assume all tail are present too, if head is there.
2728 if (xas_load(&xas) != head)
2734 if (page_ref_freeze(head, 1 + extra_pins)) {
2735 if (!list_empty(page_deferred_list(head))) {
2737 list_del(page_deferred_list(head));
2741 if (PageSwapBacked(head))
2742 __dec_node_page_state(head, NR_SHMEM_THPS);
2744 __dec_node_page_state(head, NR_FILE_THPS);
2755 remap_page(head, thp_nr_pages(head));
2854 /* Take pin on all head pages to avoid freeing them under us */