Lines Matching defs:mblk

135 	struct dmz_mblock	*mblk;
409 struct dmz_mblock *mblk = NULL;
414 mblk = list_first_entry_or_null(&zmd->mblk_lru_list,
416 if (mblk) {
417 list_del_init(&mblk->link);
418 rb_erase(&mblk->node, &zmd->mblk_rbtree);
419 mblk->no = mblk_no;
422 if (mblk)
423 return mblk;
427 mblk = kmalloc(sizeof(struct dmz_mblock), GFP_NOIO);
428 if (!mblk)
431 mblk->page = alloc_page(GFP_NOIO);
432 if (!mblk->page) {
433 kfree(mblk);
437 RB_CLEAR_NODE(&mblk->node);
438 INIT_LIST_HEAD(&mblk->link);
439 mblk->ref = 0;
440 mblk->state = 0;
441 mblk->no = mblk_no;
442 mblk->data = page_address(mblk->page);
446 return mblk;
452 static void dmz_free_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk)
454 __free_pages(mblk->page, 0);
455 kfree(mblk);
463 static void dmz_insert_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk)
473 new = (b->no < mblk->no) ? &((*new)->rb_left) : &((*new)->rb_right);
477 rb_link_node(&mblk->node, parent, new);
478 rb_insert_color(&mblk->node, root);
490 struct dmz_mblock *mblk;
493 mblk = container_of(node, struct dmz_mblock, node);
494 if (mblk->no == mblk_no) {
499 mblk->ref++;
500 if (mblk->ref == 1 &&
501 !test_bit(DMZ_META_DIRTY, &mblk->state))
502 list_del_init(&mblk->link);
503 return mblk;
505 node = (mblk->no < mblk_no) ? node->rb_left : node->rb_right;
516 struct dmz_mblock *mblk = bio->bi_private;
520 set_bit(DMZ_META_ERROR, &mblk->state);
527 clear_bit_unlock(flag, &mblk->state);
529 wake_up_bit(&mblk->state, flag);
540 struct dmz_mblock *mblk, *m;
549 mblk = dmz_alloc_mblock(zmd, mblk_no);
550 if (!mblk)
565 dmz_free_mblock(zmd, mblk);
570 mblk->ref++;
571 set_bit(DMZ_META_READING, &mblk->state);
572 dmz_insert_mblock(zmd, mblk);
578 bio->bi_private = mblk;
580 __bio_add_page(bio, mblk->page, DMZ_BLOCK_SIZE, 0);
583 return mblk;
592 struct dmz_mblock *mblk;
601 mblk = list_first_entry(&zmd->mblk_lru_list,
603 list_del_init(&mblk->link);
604 rb_erase(&mblk->node, &zmd->mblk_rbtree);
605 dmz_free_mblock(zmd, mblk);
643 struct dmz_mblock *mblk)
646 if (!mblk)
651 mblk->ref--;
652 if (mblk->ref == 0) {
653 if (test_bit(DMZ_META_ERROR, &mblk->state)) {
654 rb_erase(&mblk->node, &zmd->mblk_rbtree);
655 dmz_free_mblock(zmd, mblk);
656 } else if (!test_bit(DMZ_META_DIRTY, &mblk->state)) {
657 list_add_tail(&mblk->link, &zmd->mblk_lru_list);
672 struct dmz_mblock *mblk;
677 mblk = dmz_get_mblock_fast(zmd, mblk_no);
680 if (!mblk) {
682 mblk = dmz_get_mblock_slow(zmd, mblk_no);
683 if (IS_ERR(mblk))
684 return mblk;
688 wait_on_bit_io(&mblk->state, DMZ_META_READING,
690 if (test_bit(DMZ_META_ERROR, &mblk->state)) {
691 dmz_release_mblock(zmd, mblk);
696 return mblk;
702 static void dmz_dirty_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk)
705 if (!test_and_set_bit(DMZ_META_DIRTY, &mblk->state))
706 list_add_tail(&mblk->link, &zmd->mblk_dirty_list);
713 static int dmz_write_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk,
717 sector_t block = zmd->sb[set].block + mblk->no;
726 set_bit(DMZ_META_WRITING, &mblk->state);
729 bio->bi_private = mblk;
731 __bio_add_page(bio, mblk->page, DMZ_BLOCK_SIZE, 0);
769 struct dmz_mblock *mblk = zmd->sb[set].mblk;
806 mblk->page);
820 struct dmz_mblock *mblk;
827 list_for_each_entry(mblk, write_list, link) {
828 ret = dmz_write_mblock(zmd, mblk, set);
836 list_for_each_entry(mblk, write_list, link) {
839 wait_on_bit_io(&mblk->state, DMZ_META_WRITING,
841 if (test_bit(DMZ_META_ERROR, &mblk->state)) {
842 clear_bit(DMZ_META_ERROR, &mblk->state);
886 struct dmz_mblock *mblk;
948 mblk = list_first_entry(&write_list, struct dmz_mblock, link);
949 list_del_init(&mblk->link);
952 clear_bit(DMZ_META_DIRTY, &mblk->state);
953 if (mblk->ref == 0)
954 list_add_tail(&mblk->link, &zmd->mblk_lru_list);
1104 sb->block, sb->mblk->page);
1115 struct dmz_mblock *mblk;
1120 mblk = dmz_alloc_mblock(zmd, 0);
1121 if (!mblk)
1124 zmd->sb[1].mblk = mblk;
1125 zmd->sb[1].sb = mblk->data;
1140 dmz_free_mblock(zmd, mblk);
1141 zmd->sb[1].mblk = NULL;
1153 struct dmz_mblock *mblk;
1157 mblk = dmz_alloc_mblock(zmd, 0);
1158 if (!mblk)
1161 sb->mblk = mblk;
1162 sb->sb = mblk->data;
1167 dmz_free_mblock(zmd, mblk);
1168 sb->mblk = NULL;
1209 if (!zmd->sb[dst_set].mblk) {
1210 zmd->sb[dst_set].mblk = dmz_alloc_mblock(zmd, 0);
1211 if (!zmd->sb[dst_set].mblk) {
1215 zmd->sb[dst_set].sb = zmd->sb[dst_set].mblk->data;
1337 dmz_free_mblock(zmd, sb->mblk);
1341 dmz_free_mblock(zmd, sb->mblk);
2481 struct dmz_mblock *mblk;
2492 mblk = dmz_get_bitmap(zmd, zone, chunk_block);
2493 if (IS_ERR(mblk))
2494 return PTR_ERR(mblk);
2500 count = dmz_set_bits((unsigned long *)mblk->data, bit, nr_bits);
2502 dmz_dirty_mblock(zmd, mblk);
2505 dmz_release_mblock(zmd, mblk);
2561 struct dmz_mblock *mblk;
2571 mblk = dmz_get_bitmap(zmd, zone, chunk_block);
2572 if (IS_ERR(mblk))
2573 return PTR_ERR(mblk);
2579 count = dmz_clear_bits((unsigned long *)mblk->data,
2582 dmz_dirty_mblock(zmd, mblk);
2585 dmz_release_mblock(zmd, mblk);
2608 struct dmz_mblock *mblk;
2614 mblk = dmz_get_bitmap(zmd, zone, chunk_block);
2615 if (IS_ERR(mblk))
2616 return PTR_ERR(mblk);
2620 (unsigned long *) mblk->data) != 0;
2622 dmz_release_mblock(zmd, mblk);
2635 struct dmz_mblock *mblk;
2645 mblk = dmz_get_bitmap(zmd, zone, chunk_block);
2646 if (IS_ERR(mblk))
2647 return PTR_ERR(mblk);
2650 bitmap = (unsigned long *) mblk->data;
2657 dmz_release_mblock(zmd, mblk);
2745 struct dmz_mblock *mblk;
2754 mblk = dmz_get_bitmap(zmd, zone, chunk_block);
2755 if (IS_ERR(mblk)) {
2761 bitmap = mblk->data;
2766 dmz_release_mblock(zmd, mblk);
2781 struct dmz_mblock *mblk, *next;
2794 if (zmd->sb[i].mblk) {
2795 dmz_free_mblock(zmd, zmd->sb[i].mblk);
2796 zmd->sb[i].mblk = NULL;
2802 mblk = list_first_entry(&zmd->mblk_dirty_list,
2805 (u64)mblk->no, mblk->ref);
2806 list_del_init(&mblk->link);
2807 rb_erase(&mblk->node, &zmd->mblk_rbtree);
2808 dmz_free_mblock(zmd, mblk);
2812 mblk = list_first_entry(&zmd->mblk_lru_list,
2814 list_del_init(&mblk->link);
2815 rb_erase(&mblk->node, &zmd->mblk_rbtree);
2816 dmz_free_mblock(zmd, mblk);
2821 rbtree_postorder_for_each_entry_safe(mblk, next, root, node) {
2823 (u64)mblk->no, mblk->ref);
2824 mblk->ref = 0;
2825 dmz_free_mblock(zmd, mblk);