Lines Matching defs:zhdr
221 static inline void z3fold_page_lock(struct z3fold_header *zhdr)
223 spin_lock(&zhdr->page_lock);
227 static inline int z3fold_page_trylock(struct z3fold_header *zhdr)
229 return spin_trylock(&zhdr->page_lock);
233 static inline void z3fold_page_unlock(struct z3fold_header *zhdr)
235 spin_unlock(&zhdr->page_lock);
242 struct z3fold_header *zhdr;
252 zhdr = (struct z3fold_header *)(addr & PAGE_MASK);
253 locked = z3fold_page_trylock(zhdr);
256 struct page *page = virt_to_page(zhdr);
260 z3fold_page_unlock(zhdr);
265 zhdr = (struct z3fold_header *)(handle & PAGE_MASK);
268 return zhdr;
271 static inline void put_z3fold_header(struct z3fold_header *zhdr)
273 struct page *page = virt_to_page(zhdr);
276 z3fold_page_unlock(zhdr);
279 static inline void free_handle(unsigned long handle, struct z3fold_header *zhdr)
297 if (zhdr->slots != slots)
298 zhdr->foreign_handles--;
312 if (zhdr->slots == slots)
313 zhdr->slots = NULL;
322 struct z3fold_header *zhdr = page_address(page);
332 return zhdr;
338 memset(zhdr, 0, sizeof(*zhdr));
339 spin_lock_init(&zhdr->page_lock);
340 kref_init(&zhdr->refcount);
341 zhdr->cpu = -1;
342 zhdr->slots = slots;
343 zhdr->pool = pool;
344 INIT_LIST_HEAD(&zhdr->buddy);
345 INIT_WORK(&zhdr->work, compact_page_work);
346 return zhdr;
361 static inline int __idx(struct z3fold_header *zhdr, enum buddy bud)
363 return (bud + zhdr->first_num) & BUDDY_MASK;
370 static unsigned long __encode_handle(struct z3fold_header *zhdr,
374 unsigned long h = (unsigned long)zhdr;
385 idx = __idx(zhdr, bud);
388 h |= (zhdr->last_chunks << BUDDY_SHIFT);
396 static unsigned long encode_handle(struct z3fold_header *zhdr, enum buddy bud)
398 return __encode_handle(zhdr, zhdr->slots, bud);
414 * (handle & BUDDY_MASK) < zhdr->first_num is possible in encode_handle
420 struct z3fold_header *zhdr;
428 zhdr = (struct z3fold_header *)(addr & PAGE_MASK);
429 return (addr - zhdr->first_num) & BUDDY_MASK;
432 static inline struct z3fold_pool *zhdr_to_pool(struct z3fold_header *zhdr)
434 return zhdr->pool;
437 static void __release_z3fold_page(struct z3fold_header *zhdr, bool locked)
439 struct page *page = virt_to_page(zhdr);
440 struct z3fold_pool *pool = zhdr_to_pool(zhdr);
442 WARN_ON(!list_empty(&zhdr->buddy));
449 z3fold_page_unlock(zhdr);
452 list_add(&zhdr->buddy, &pool->stale);
461 struct z3fold_header *zhdr = container_of(ref, struct z3fold_header,
463 WARN_ON(z3fold_page_trylock(zhdr));
464 __release_z3fold_page(zhdr, true);
469 struct z3fold_header *zhdr = container_of(ref, struct z3fold_header,
471 struct z3fold_pool *pool = zhdr_to_pool(zhdr);
474 list_del_init(&zhdr->buddy);
477 WARN_ON(z3fold_page_trylock(zhdr));
478 __release_z3fold_page(zhdr, true);
481 static inline int put_z3fold_locked(struct z3fold_header *zhdr)
483 return kref_put(&zhdr->refcount, release_z3fold_page_locked);
486 static inline int put_z3fold_locked_list(struct z3fold_header *zhdr)
488 return kref_put(&zhdr->refcount, release_z3fold_page_locked_list);
497 struct z3fold_header *zhdr = list_first_entry(&pool->stale,
499 struct page *page = virt_to_page(zhdr);
501 list_del(&zhdr->buddy);
505 cancel_work_sync(&zhdr->work);
517 static int num_free_chunks(struct z3fold_header *zhdr)
525 if (zhdr->middle_chunks != 0) {
526 int nfree_before = zhdr->first_chunks ?
527 0 : zhdr->start_middle - ZHDR_CHUNKS;
528 int nfree_after = zhdr->last_chunks ?
530 (zhdr->start_middle + zhdr->middle_chunks);
533 nfree = NCHUNKS - zhdr->first_chunks - zhdr->last_chunks;
539 struct z3fold_header *zhdr)
541 if (zhdr->first_chunks == 0 || zhdr->last_chunks == 0 ||
542 zhdr->middle_chunks == 0) {
544 int freechunks = num_free_chunks(zhdr);
549 list_add(&zhdr->buddy, &unbuddied[freechunks]);
551 zhdr->cpu = smp_processor_id();
556 static inline enum buddy get_free_buddy(struct z3fold_header *zhdr, int chunks)
560 if (zhdr->middle_chunks) {
561 if (!zhdr->first_chunks &&
562 chunks <= zhdr->start_middle - ZHDR_CHUNKS)
564 else if (!zhdr->last_chunks)
567 if (!zhdr->first_chunks)
569 else if (!zhdr->last_chunks)
578 static inline void *mchunk_memmove(struct z3fold_header *zhdr,
581 void *beg = zhdr;
583 beg + (zhdr->start_middle << CHUNK_SHIFT),
584 zhdr->middle_chunks << CHUNK_SHIFT);
587 static inline bool buddy_single(struct z3fold_header *zhdr)
589 return !((zhdr->first_chunks && zhdr->middle_chunks) ||
590 (zhdr->first_chunks && zhdr->last_chunks) ||
591 (zhdr->middle_chunks && zhdr->last_chunks));
594 static struct z3fold_header *compact_single_buddy(struct z3fold_header *zhdr)
596 struct z3fold_pool *pool = zhdr_to_pool(zhdr);
597 void *p = zhdr;
601 int first_idx = __idx(zhdr, FIRST);
602 int middle_idx = __idx(zhdr, MIDDLE);
603 int last_idx = __idx(zhdr, LAST);
610 if (zhdr->first_chunks && zhdr->slots->slot[first_idx]) {
612 sz = zhdr->first_chunks << CHUNK_SHIFT;
613 old_handle = (unsigned long)&zhdr->slots->slot[first_idx];
614 moved_chunks = &zhdr->first_chunks;
615 } else if (zhdr->middle_chunks && zhdr->slots->slot[middle_idx]) {
616 p += zhdr->start_middle << CHUNK_SHIFT;
617 sz = zhdr->middle_chunks << CHUNK_SHIFT;
618 old_handle = (unsigned long)&zhdr->slots->slot[middle_idx];
619 moved_chunks = &zhdr->middle_chunks;
620 } else if (zhdr->last_chunks && zhdr->slots->slot[last_idx]) {
621 p += PAGE_SIZE - (zhdr->last_chunks << CHUNK_SHIFT);
622 sz = zhdr->last_chunks << CHUNK_SHIFT;
623 old_handle = (unsigned long)&zhdr->slots->slot[last_idx];
624 moved_chunks = &zhdr->last_chunks;
636 if (WARN_ON(new_zhdr == zhdr))
661 write_lock(&zhdr->slots->lock);
667 write_unlock(&zhdr->slots->lock);
687 static int z3fold_compact_page(struct z3fold_header *zhdr)
689 struct page *page = virt_to_page(zhdr);
697 if (zhdr->middle_chunks == 0)
700 if (zhdr->first_chunks == 0 && zhdr->last_chunks == 0) {
702 mchunk_memmove(zhdr, ZHDR_CHUNKS);
703 zhdr->first_chunks = zhdr->middle_chunks;
704 zhdr->middle_chunks = 0;
705 zhdr->start_middle = 0;
706 zhdr->first_num++;
714 if (zhdr->first_chunks != 0 && zhdr->last_chunks == 0 &&
715 zhdr->start_middle - (zhdr->first_chunks + ZHDR_CHUNKS) >=
717 mchunk_memmove(zhdr, zhdr->first_chunks + ZHDR_CHUNKS);
718 zhdr->start_middle = zhdr->first_chunks + ZHDR_CHUNKS;
720 } else if (zhdr->last_chunks != 0 && zhdr->first_chunks == 0 &&
721 TOTAL_CHUNKS - (zhdr->last_chunks + zhdr->start_middle
722 + zhdr->middle_chunks) >=
724 unsigned short new_start = TOTAL_CHUNKS - zhdr->last_chunks -
725 zhdr->middle_chunks;
726 mchunk_memmove(zhdr, new_start);
727 zhdr->start_middle = new_start;
734 static void do_compact_page(struct z3fold_header *zhdr, bool locked)
736 struct z3fold_pool *pool = zhdr_to_pool(zhdr);
739 page = virt_to_page(zhdr);
741 WARN_ON(z3fold_page_trylock(zhdr));
743 z3fold_page_lock(zhdr);
745 z3fold_page_unlock(zhdr);
749 list_del_init(&zhdr->buddy);
752 if (put_z3fold_locked(zhdr))
757 z3fold_page_unlock(zhdr);
761 if (!zhdr->foreign_handles && buddy_single(zhdr) &&
762 zhdr->mapped_count == 0 && compact_single_buddy(zhdr)) {
763 if (!put_z3fold_locked(zhdr)) {
765 z3fold_page_unlock(zhdr);
770 z3fold_compact_page(zhdr);
771 add_to_unbuddied(pool, zhdr);
773 z3fold_page_unlock(zhdr);
778 struct z3fold_header *zhdr = container_of(w, struct z3fold_header,
781 do_compact_page(zhdr, false);
788 struct z3fold_header *zhdr = NULL;
800 zhdr = list_first_entry_or_null(READ_ONCE(l),
803 if (!zhdr)
808 if (unlikely(zhdr != list_first_entry(READ_ONCE(l),
810 !z3fold_page_trylock(zhdr)) {
812 zhdr = NULL;
818 list_del_init(&zhdr->buddy);
819 zhdr->cpu = -1;
822 page = virt_to_page(zhdr);
825 z3fold_page_unlock(zhdr);
826 zhdr = NULL;
839 kref_get(&zhdr->refcount);
844 if (!zhdr) {
855 zhdr = list_first_entry_or_null(READ_ONCE(l),
858 if (!zhdr || !z3fold_page_trylock(zhdr)) {
860 zhdr = NULL;
863 list_del_init(&zhdr->buddy);
864 zhdr->cpu = -1;
867 page = virt_to_page(zhdr);
870 z3fold_page_unlock(zhdr);
871 zhdr = NULL;
876 kref_get(&zhdr->refcount);
881 if (zhdr && !zhdr->slots) {
882 zhdr->slots = alloc_slots(pool, GFP_ATOMIC);
883 if (!zhdr->slots)
886 return zhdr;
889 if (!put_z3fold_locked(zhdr)) {
890 add_to_unbuddied(pool, zhdr);
891 z3fold_page_unlock(zhdr);
1004 struct z3fold_header *zhdr = NULL;
1019 zhdr = __z3fold_alloc(pool, size, can_sleep);
1020 if (zhdr) {
1021 bud = get_free_buddy(zhdr, chunks);
1023 if (!put_z3fold_locked(zhdr))
1024 z3fold_page_unlock(zhdr);
1029 page = virt_to_page(zhdr);
1039 zhdr = init_z3fold_page(page, bud == HEADLESS, pool, gfp);
1040 if (!zhdr) {
1059 z3fold_page_lock(zhdr);
1063 zhdr->first_chunks = chunks;
1065 zhdr->last_chunks = chunks;
1067 zhdr->middle_chunks = chunks;
1068 zhdr->start_middle = zhdr->first_chunks + ZHDR_CHUNKS;
1070 add_to_unbuddied(pool, zhdr);
1074 *handle = encode_handle(zhdr, bud);
1077 z3fold_page_unlock(zhdr);
1094 struct z3fold_header *zhdr;
1099 zhdr = get_z3fold_header(handle);
1100 page = virt_to_page(zhdr);
1110 put_z3fold_header(zhdr);
1122 zhdr->first_chunks = 0;
1125 zhdr->middle_chunks = 0;
1128 zhdr->last_chunks = 0;
1133 put_z3fold_header(zhdr);
1138 free_handle(handle, zhdr);
1139 if (put_z3fold_locked_list(zhdr))
1143 put_z3fold_header(zhdr);
1148 put_z3fold_header(zhdr);
1151 if (zhdr->cpu < 0 || !cpu_online(zhdr->cpu)) {
1152 zhdr->cpu = -1;
1153 kref_get(&zhdr->refcount);
1155 do_compact_page(zhdr, true);
1158 kref_get(&zhdr->refcount);
1160 queue_work_on(zhdr->cpu, pool->compact_wq, &zhdr->work);
1161 put_z3fold_header(zhdr);
1176 struct z3fold_header *zhdr;
1181 zhdr = get_z3fold_header(handle);
1182 addr = zhdr;
1183 page = virt_to_page(zhdr);
1194 addr += zhdr->start_middle << CHUNK_SHIFT;
1208 zhdr->mapped_count++;
1210 put_z3fold_header(zhdr);
1221 struct z3fold_header *zhdr;
1225 zhdr = get_z3fold_header(handle);
1226 page = virt_to_page(zhdr);
1234 zhdr->mapped_count--;
1235 put_z3fold_header(zhdr);
1251 struct z3fold_header *zhdr;
1259 zhdr = page_address(page);
1260 z3fold_page_lock(zhdr);
1265 if (zhdr->mapped_count != 0 || zhdr->foreign_handles != 0)
1270 pool = zhdr_to_pool(zhdr);
1272 if (!list_empty(&zhdr->buddy))
1273 list_del_init(&zhdr->buddy);
1276 kref_get(&zhdr->refcount);
1277 z3fold_page_unlock(zhdr);
1281 z3fold_page_unlock(zhdr);
1288 struct z3fold_header *zhdr, *new_zhdr;
1295 zhdr = page_address(page);
1296 pool = zhdr_to_pool(zhdr);
1298 if (!z3fold_page_trylock(zhdr))
1300 if (zhdr->mapped_count != 0 || zhdr->foreign_handles != 0) {
1302 z3fold_page_unlock(zhdr);
1305 if (work_pending(&zhdr->work)) {
1306 z3fold_page_unlock(zhdr);
1310 memcpy(new_zhdr, zhdr, PAGE_SIZE);
1313 z3fold_page_unlock(zhdr);
1346 struct z3fold_header *zhdr;
1349 zhdr = page_address(page);
1350 pool = zhdr_to_pool(zhdr);
1352 z3fold_page_lock(zhdr);
1353 if (!list_empty(&zhdr->buddy))
1354 list_del_init(&zhdr->buddy);
1356 if (put_z3fold_locked(zhdr))
1358 if (list_empty(&zhdr->buddy))
1359 add_to_unbuddied(pool, zhdr);
1361 z3fold_page_unlock(zhdr);