Lines Matching refs:new_zhdr

681 	struct z3fold_header *new_zhdr = NULL;
713 new_zhdr = __z3fold_alloc(pool, sz, false);
714 if (!new_zhdr)
717 if (WARN_ON(new_zhdr == zhdr))
720 new_bud = get_free_buddy(new_zhdr, chunks);
721 q = new_zhdr;
724 new_zhdr->first_chunks = chunks;
728 new_zhdr->middle_chunks = chunks;
729 new_zhdr->start_middle =
730 new_zhdr->first_chunks + ZHDR_CHUNKS;
731 q += new_zhdr->start_middle << CHUNK_SHIFT;
734 new_zhdr->last_chunks = chunks;
735 q += PAGE_SIZE - (new_zhdr->last_chunks << CHUNK_SHIFT);
740 new_zhdr->foreign_handles++;
743 *(unsigned long *)old_handle = (unsigned long)new_zhdr +
744 __idx(new_zhdr, new_bud);
747 (new_zhdr->last_chunks << BUDDY_SHIFT);
749 add_to_unbuddied(pool, new_zhdr);
750 z3fold_page_unlock(new_zhdr);
755 return new_zhdr;
758 if (new_zhdr) {
759 if (kref_put(&new_zhdr->refcount, release_z3fold_page_locked))
762 add_to_unbuddied(pool, new_zhdr);
763 z3fold_page_unlock(new_zhdr);
1619 struct z3fold_header *zhdr, *new_zhdr;
1642 new_zhdr = page_address(newpage);
1643 memcpy(new_zhdr, zhdr, PAGE_SIZE);
1647 spin_lock_init(&new_zhdr->page_lock);
1648 INIT_WORK(&new_zhdr->work, compact_page_work);
1650 * z3fold_page_isolate() ensures that new_zhdr->buddy is empty,
1653 INIT_LIST_HEAD(&new_zhdr->buddy);
1659 z3fold_page_lock(new_zhdr);
1660 if (new_zhdr->first_chunks)
1661 encode_handle(new_zhdr, FIRST);
1662 if (new_zhdr->last_chunks)
1663 encode_handle(new_zhdr, LAST);
1664 if (new_zhdr->middle_chunks)
1665 encode_handle(new_zhdr, MIDDLE);
1667 new_zhdr->cpu = smp_processor_id();
1672 z3fold_page_unlock(new_zhdr);
1674 queue_work_on(new_zhdr->cpu, pool->compact_wq, &new_zhdr->work);