Lines Matching defs:zhdr

239 static inline void z3fold_page_lock(struct z3fold_header *zhdr)
241 spin_lock(&zhdr->page_lock);
245 static inline int z3fold_page_trylock(struct z3fold_header *zhdr)
247 return spin_trylock(&zhdr->page_lock);
251 static inline void z3fold_page_unlock(struct z3fold_header *zhdr)
253 spin_unlock(&zhdr->page_lock);
261 struct z3fold_header *zhdr;
271 zhdr = (struct z3fold_header *)(addr & PAGE_MASK);
273 locked = z3fold_page_trylock(zhdr);
280 zhdr = (struct z3fold_header *)(handle & PAGE_MASK);
283 return zhdr;
298 static inline void put_z3fold_header(struct z3fold_header *zhdr)
300 struct page *page = virt_to_page(zhdr);
303 z3fold_page_unlock(zhdr);
306 static inline void free_handle(unsigned long handle, struct z3fold_header *zhdr)
327 if (zhdr->slots != slots)
328 zhdr->foreign_handles--;
342 if (zhdr->slots == slots)
343 zhdr->slots = NULL;
400 struct z3fold_header *zhdr = page_address(page);
410 return zhdr;
416 spin_lock_init(&zhdr->page_lock);
417 kref_init(&zhdr->refcount);
418 zhdr->first_chunks = 0;
419 zhdr->middle_chunks = 0;
420 zhdr->last_chunks = 0;
421 zhdr->first_num = 0;
422 zhdr->start_middle = 0;
423 zhdr->cpu = -1;
424 zhdr->foreign_handles = 0;
425 zhdr->mapped_count = 0;
426 zhdr->slots = slots;
427 zhdr->pool = pool;
428 INIT_LIST_HEAD(&zhdr->buddy);
429 INIT_WORK(&zhdr->work, compact_page_work);
430 return zhdr;
446 static inline int __idx(struct z3fold_header *zhdr, enum buddy bud)
448 return (bud + zhdr->first_num) & BUDDY_MASK;
455 static unsigned long __encode_handle(struct z3fold_header *zhdr,
459 unsigned long h = (unsigned long)zhdr;
470 idx = __idx(zhdr, bud);
473 h |= (zhdr->last_chunks << BUDDY_SHIFT);
481 static unsigned long encode_handle(struct z3fold_header *zhdr, enum buddy bud)
483 return __encode_handle(zhdr, zhdr->slots, bud);
499 * (handle & BUDDY_MASK) < zhdr->first_num is possible in encode_handle
505 struct z3fold_header *zhdr;
513 zhdr = (struct z3fold_header *)(addr & PAGE_MASK);
514 return (addr - zhdr->first_num) & BUDDY_MASK;
517 static inline struct z3fold_pool *zhdr_to_pool(struct z3fold_header *zhdr)
519 return zhdr->pool;
522 static void __release_z3fold_page(struct z3fold_header *zhdr, bool locked)
524 struct page *page = virt_to_page(zhdr);
525 struct z3fold_pool *pool = zhdr_to_pool(zhdr);
527 WARN_ON(!list_empty(&zhdr->buddy));
536 z3fold_page_unlock(zhdr);
539 list_add(&zhdr->buddy, &pool->stale);
547 struct z3fold_header *zhdr = container_of(ref, struct z3fold_header,
549 __release_z3fold_page(zhdr, false);
554 struct z3fold_header *zhdr = container_of(ref, struct z3fold_header,
556 WARN_ON(z3fold_page_trylock(zhdr));
557 __release_z3fold_page(zhdr, true);
562 struct z3fold_header *zhdr = container_of(ref, struct z3fold_header,
564 struct z3fold_pool *pool = zhdr_to_pool(zhdr);
567 list_del_init(&zhdr->buddy);
570 WARN_ON(z3fold_page_trylock(zhdr));
571 __release_z3fold_page(zhdr, true);
580 struct z3fold_header *zhdr = list_first_entry(&pool->stale,
582 struct page *page = virt_to_page(zhdr);
584 list_del(&zhdr->buddy);
588 cancel_work_sync(&zhdr->work);
600 static int num_free_chunks(struct z3fold_header *zhdr)
608 if (zhdr->middle_chunks != 0) {
609 int nfree_before = zhdr->first_chunks ?
610 0 : zhdr->start_middle - ZHDR_CHUNKS;
611 int nfree_after = zhdr->last_chunks ?
613 (zhdr->start_middle + zhdr->middle_chunks);
616 nfree = NCHUNKS - zhdr->first_chunks - zhdr->last_chunks;
622 struct z3fold_header *zhdr)
624 if (zhdr->first_chunks == 0 || zhdr->last_chunks == 0 ||
625 zhdr->middle_chunks == 0) {
628 int freechunks = num_free_chunks(zhdr);
630 list_add(&zhdr->buddy, &unbuddied[freechunks]);
632 zhdr->cpu = smp_processor_id();
637 static inline enum buddy get_free_buddy(struct z3fold_header *zhdr, int chunks)
641 if (zhdr->middle_chunks) {
642 if (!zhdr->first_chunks &&
643 chunks <= zhdr->start_middle - ZHDR_CHUNKS)
645 else if (!zhdr->last_chunks)
648 if (!zhdr->first_chunks)
650 else if (!zhdr->last_chunks)
659 static inline void *mchunk_memmove(struct z3fold_header *zhdr,
662 void *beg = zhdr;
664 beg + (zhdr->start_middle << CHUNK_SHIFT),
665 zhdr->middle_chunks << CHUNK_SHIFT);
668 static inline bool buddy_single(struct z3fold_header *zhdr)
670 return !((zhdr->first_chunks && zhdr->middle_chunks) ||
671 (zhdr->first_chunks && zhdr->last_chunks) ||
672 (zhdr->middle_chunks && zhdr->last_chunks));
675 static struct z3fold_header *compact_single_buddy(struct z3fold_header *zhdr)
677 struct z3fold_pool *pool = zhdr_to_pool(zhdr);
678 void *p = zhdr;
682 int first_idx = __idx(zhdr, FIRST);
683 int middle_idx = __idx(zhdr, MIDDLE);
684 int last_idx = __idx(zhdr, LAST);
691 if (zhdr->first_chunks && zhdr->slots->slot[first_idx]) {
693 sz = zhdr->first_chunks << CHUNK_SHIFT;
694 old_handle = (unsigned long)&zhdr->slots->slot[first_idx];
695 moved_chunks = &zhdr->first_chunks;
696 } else if (zhdr->middle_chunks && zhdr->slots->slot[middle_idx]) {
697 p += zhdr->start_middle << CHUNK_SHIFT;
698 sz = zhdr->middle_chunks << CHUNK_SHIFT;
699 old_handle = (unsigned long)&zhdr->slots->slot[middle_idx];
700 moved_chunks = &zhdr->middle_chunks;
701 } else if (zhdr->last_chunks && zhdr->slots->slot[last_idx]) {
702 p += PAGE_SIZE - (zhdr->last_chunks << CHUNK_SHIFT);
703 sz = zhdr->last_chunks << CHUNK_SHIFT;
704 old_handle = (unsigned long)&zhdr->slots->slot[last_idx];
705 moved_chunks = &zhdr->last_chunks;
717 if (WARN_ON(new_zhdr == zhdr))
742 write_lock(&zhdr->slots->lock);
748 write_unlock(&zhdr->slots->lock);
772 static int z3fold_compact_page(struct z3fold_header *zhdr)
774 struct page *page = virt_to_page(zhdr);
782 if (zhdr->middle_chunks == 0)
785 if (zhdr->first_chunks == 0 && zhdr->last_chunks == 0) {
787 mchunk_memmove(zhdr, ZHDR_CHUNKS);
788 zhdr->first_chunks = zhdr->middle_chunks;
789 zhdr->middle_chunks = 0;
790 zhdr->start_middle = 0;
791 zhdr->first_num++;
799 if (zhdr->first_chunks != 0 && zhdr->last_chunks == 0 &&
800 zhdr->start_middle - (zhdr->first_chunks + ZHDR_CHUNKS) >=
802 mchunk_memmove(zhdr, zhdr->first_chunks + ZHDR_CHUNKS);
803 zhdr->start_middle = zhdr->first_chunks + ZHDR_CHUNKS;
805 } else if (zhdr->last_chunks != 0 && zhdr->first_chunks == 0 &&
806 TOTAL_CHUNKS - (zhdr->last_chunks + zhdr->start_middle
807 + zhdr->middle_chunks) >=
809 unsigned short new_start = TOTAL_CHUNKS - zhdr->last_chunks -
810 zhdr->middle_chunks;
811 mchunk_memmove(zhdr, new_start);
812 zhdr->start_middle = new_start;
819 static void do_compact_page(struct z3fold_header *zhdr, bool locked)
821 struct z3fold_pool *pool = zhdr_to_pool(zhdr);
824 page = virt_to_page(zhdr);
826 WARN_ON(z3fold_page_trylock(zhdr));
828 z3fold_page_lock(zhdr);
830 z3fold_page_unlock(zhdr);
834 list_del_init(&zhdr->buddy);
837 if (kref_put(&zhdr->refcount, release_z3fold_page_locked)) {
844 z3fold_page_unlock(zhdr);
848 if (!zhdr->foreign_handles && buddy_single(zhdr) &&
849 zhdr->mapped_count == 0 && compact_single_buddy(zhdr)) {
850 if (kref_put(&zhdr->refcount, release_z3fold_page_locked))
854 z3fold_page_unlock(zhdr);
859 z3fold_compact_page(zhdr);
860 add_to_unbuddied(pool, zhdr);
862 z3fold_page_unlock(zhdr);
867 struct z3fold_header *zhdr = container_of(w, struct z3fold_header,
870 do_compact_page(zhdr, false);
877 struct z3fold_header *zhdr = NULL;
888 zhdr = list_first_entry_or_null(READ_ONCE(l),
891 if (!zhdr)
897 if (unlikely(zhdr != list_first_entry(READ_ONCE(l),
899 !z3fold_page_trylock(zhdr)) {
901 zhdr = NULL;
907 list_del_init(&zhdr->buddy);
908 zhdr->cpu = -1;
911 page = virt_to_page(zhdr);
914 z3fold_page_unlock(zhdr);
915 zhdr = NULL;
928 kref_get(&zhdr->refcount);
933 if (!zhdr) {
944 zhdr = list_first_entry_or_null(READ_ONCE(l),
947 if (!zhdr || !z3fold_page_trylock(zhdr)) {
949 zhdr = NULL;
952 list_del_init(&zhdr->buddy);
953 zhdr->cpu = -1;
956 page = virt_to_page(zhdr);
959 z3fold_page_unlock(zhdr);
960 zhdr = NULL;
965 kref_get(&zhdr->refcount);
970 if (zhdr && !zhdr->slots)
971 zhdr->slots = alloc_slots(pool,
973 return zhdr;
1093 struct z3fold_header *zhdr = NULL;
1108 zhdr = __z3fold_alloc(pool, size, can_sleep);
1109 if (zhdr) {
1110 bud = get_free_buddy(zhdr, chunks);
1112 if (kref_put(&zhdr->refcount,
1116 z3fold_page_unlock(zhdr);
1121 page = virt_to_page(zhdr);
1130 zhdr = list_first_entry_or_null(&pool->stale,
1137 if (zhdr) {
1138 list_del(&zhdr->buddy);
1140 cancel_work_sync(&zhdr->work);
1141 page = virt_to_page(zhdr);
1152 zhdr = init_z3fold_page(page, bud == HEADLESS, pool, gfp);
1153 if (!zhdr) {
1173 z3fold_page_lock(zhdr);
1177 zhdr->first_chunks = chunks;
1179 zhdr->last_chunks = chunks;
1181 zhdr->middle_chunks = chunks;
1182 zhdr->start_middle = zhdr->first_chunks + ZHDR_CHUNKS;
1184 add_to_unbuddied(pool, zhdr);
1194 *handle = encode_handle(zhdr, bud);
1197 z3fold_page_unlock(zhdr);
1214 struct z3fold_header *zhdr;
1219 zhdr = get_z3fold_header(handle);
1220 page = virt_to_page(zhdr);
1233 put_z3fold_header(zhdr);
1245 zhdr->first_chunks = 0;
1248 zhdr->middle_chunks = 0;
1251 zhdr->last_chunks = 0;
1256 put_z3fold_header(zhdr);
1261 free_handle(handle, zhdr);
1262 if (kref_put(&zhdr->refcount, release_z3fold_page_locked_list)) {
1268 z3fold_page_unlock(zhdr);
1272 put_z3fold_header(zhdr);
1276 if (zhdr->cpu < 0 || !cpu_online(zhdr->cpu)) {
1278 list_del_init(&zhdr->buddy);
1280 zhdr->cpu = -1;
1281 kref_get(&zhdr->refcount);
1283 do_compact_page(zhdr, true);
1286 kref_get(&zhdr->refcount);
1288 queue_work_on(zhdr->cpu, pool->compact_wq, &zhdr->work);
1289 put_z3fold_header(zhdr);
1331 struct z3fold_header *zhdr = NULL;
1353 zhdr = page_address(page);
1371 if (kref_get_unless_zero(&zhdr->refcount) == 0) {
1372 zhdr = NULL;
1375 if (!z3fold_page_trylock(zhdr)) {
1376 if (kref_put(&zhdr->refcount,
1379 zhdr = NULL;
1387 if (zhdr->foreign_handles ||
1389 if (kref_put(&zhdr->refcount,
1393 z3fold_page_unlock(zhdr);
1394 zhdr = NULL;
1397 list_del_init(&zhdr->buddy);
1398 zhdr->cpu = -1;
1402 if (!zhdr)
1412 * can zero out zhdr->slots and we can't do much
1419 if (zhdr->first_chunks)
1420 first_handle = __encode_handle(zhdr, &slots,
1422 if (zhdr->middle_chunks)
1423 middle_handle = __encode_handle(zhdr, &slots,
1425 if (zhdr->last_chunks)
1426 last_handle = __encode_handle(zhdr, &slots,
1432 z3fold_page_unlock(zhdr);
1434 first_handle = encode_handle(zhdr, HEADLESS);
1465 struct z3fold_buddy_slots *slots = zhdr->slots;
1466 z3fold_page_lock(zhdr);
1467 if (kref_put(&zhdr->refcount,
1481 z3fold_page_unlock(zhdr);
1504 struct z3fold_header *zhdr;
1509 zhdr = get_z3fold_header(handle);
1510 addr = zhdr;
1511 page = virt_to_page(zhdr);
1522 addr += zhdr->start_middle << CHUNK_SHIFT;
1536 zhdr->mapped_count++;
1538 put_z3fold_header(zhdr);
1549 struct z3fold_header *zhdr;
1553 zhdr = get_z3fold_header(handle);
1554 page = virt_to_page(zhdr);
1562 zhdr->mapped_count--;
1563 put_z3fold_header(zhdr);
1579 struct z3fold_header *zhdr;
1588 zhdr = page_address(page);
1589 z3fold_page_lock(zhdr);
1594 if (zhdr->mapped_count != 0 || zhdr->foreign_handles != 0)
1599 pool = zhdr_to_pool(zhdr);
1601 if (!list_empty(&zhdr->buddy))
1602 list_del_init(&zhdr->buddy);
1607 kref_get(&zhdr->refcount);
1608 z3fold_page_unlock(zhdr);
1612 z3fold_page_unlock(zhdr);
1619 struct z3fold_header *zhdr, *new_zhdr;
1628 zhdr = page_address(page);
1629 pool = zhdr_to_pool(zhdr);
1631 if (!z3fold_page_trylock(zhdr))
1633 if (zhdr->mapped_count != 0 || zhdr->foreign_handles != 0) {
1634 z3fold_page_unlock(zhdr);
1638 if (work_pending(&zhdr->work)) {
1639 z3fold_page_unlock(zhdr);
1643 memcpy(new_zhdr, zhdr, PAGE_SIZE);
1646 z3fold_page_unlock(zhdr);
1684 struct z3fold_header *zhdr;
1687 zhdr = page_address(page);
1688 pool = zhdr_to_pool(zhdr);
1690 z3fold_page_lock(zhdr);
1691 if (!list_empty(&zhdr->buddy))
1692 list_del_init(&zhdr->buddy);
1694 if (kref_put(&zhdr->refcount, release_z3fold_page_locked)) {
1702 z3fold_page_unlock(zhdr);