Lines Matching defs:mblk
135 struct dmz_mblock *mblk;
409 struct dmz_mblock *mblk = NULL;
414 mblk = list_first_entry_or_null(&zmd->mblk_lru_list,
416 if (mblk) {
417 list_del_init(&mblk->link);
418 rb_erase(&mblk->node, &zmd->mblk_rbtree);
419 mblk->no = mblk_no;
422 if (mblk)
423 return mblk;
427 mblk = kmalloc(sizeof(struct dmz_mblock), GFP_NOIO);
428 if (!mblk)
431 mblk->page = alloc_page(GFP_NOIO);
432 if (!mblk->page) {
433 kfree(mblk);
437 RB_CLEAR_NODE(&mblk->node);
438 INIT_LIST_HEAD(&mblk->link);
439 mblk->ref = 0;
440 mblk->state = 0;
441 mblk->no = mblk_no;
442 mblk->data = page_address(mblk->page);
446 return mblk;
452 static void dmz_free_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk)
454 __free_pages(mblk->page, 0);
455 kfree(mblk);
463 static void dmz_insert_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk)
473 new = (b->no < mblk->no) ? &((*new)->rb_left) : &((*new)->rb_right);
477 rb_link_node(&mblk->node, parent, new);
478 rb_insert_color(&mblk->node, root);
490 struct dmz_mblock *mblk;
493 mblk = container_of(node, struct dmz_mblock, node);
494 if (mblk->no == mblk_no) {
499 mblk->ref++;
500 if (mblk->ref == 1 &&
501 !test_bit(DMZ_META_DIRTY, &mblk->state))
502 list_del_init(&mblk->link);
503 return mblk;
505 node = (mblk->no < mblk_no) ? node->rb_left : node->rb_right;
516 struct dmz_mblock *mblk = bio->bi_private;
520 set_bit(DMZ_META_ERROR, &mblk->state);
527 clear_bit_unlock(flag, &mblk->state);
529 wake_up_bit(&mblk->state, flag);
540 struct dmz_mblock *mblk, *m;
549 mblk = dmz_alloc_mblock(zmd, mblk_no);
550 if (!mblk)
555 dmz_free_mblock(zmd, mblk);
568 dmz_free_mblock(zmd, mblk);
573 mblk->ref++;
574 set_bit(DMZ_META_READING, &mblk->state);
575 dmz_insert_mblock(zmd, mblk);
582 bio->bi_private = mblk;
585 bio_add_page(bio, mblk->page, DMZ_BLOCK_SIZE, 0);
588 return mblk;
597 struct dmz_mblock *mblk;
606 mblk = list_first_entry(&zmd->mblk_lru_list,
608 list_del_init(&mblk->link);
609 rb_erase(&mblk->node, &zmd->mblk_rbtree);
610 dmz_free_mblock(zmd, mblk);
648 struct dmz_mblock *mblk)
651 if (!mblk)
656 mblk->ref--;
657 if (mblk->ref == 0) {
658 if (test_bit(DMZ_META_ERROR, &mblk->state)) {
659 rb_erase(&mblk->node, &zmd->mblk_rbtree);
660 dmz_free_mblock(zmd, mblk);
661 } else if (!test_bit(DMZ_META_DIRTY, &mblk->state)) {
662 list_add_tail(&mblk->link, &zmd->mblk_lru_list);
677 struct dmz_mblock *mblk;
682 mblk = dmz_get_mblock_fast(zmd, mblk_no);
685 if (!mblk) {
687 mblk = dmz_get_mblock_slow(zmd, mblk_no);
688 if (IS_ERR(mblk))
689 return mblk;
693 wait_on_bit_io(&mblk->state, DMZ_META_READING,
695 if (test_bit(DMZ_META_ERROR, &mblk->state)) {
696 dmz_release_mblock(zmd, mblk);
701 return mblk;
707 static void dmz_dirty_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk)
710 if (!test_and_set_bit(DMZ_META_DIRTY, &mblk->state))
711 list_add_tail(&mblk->link, &zmd->mblk_dirty_list);
718 static int dmz_write_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk,
722 sector_t block = zmd->sb[set].block + mblk->no;
730 set_bit(DMZ_META_ERROR, &mblk->state);
734 set_bit(DMZ_META_WRITING, &mblk->state);
738 bio->bi_private = mblk;
741 bio_add_page(bio, mblk->page, DMZ_BLOCK_SIZE, 0);
783 struct dmz_mblock *mblk = zmd->sb[set].mblk;
820 mblk->page);
834 struct dmz_mblock *mblk;
841 list_for_each_entry(mblk, write_list, link) {
842 ret = dmz_write_mblock(zmd, mblk, set);
850 list_for_each_entry(mblk, write_list, link) {
853 wait_on_bit_io(&mblk->state, DMZ_META_WRITING,
855 if (test_bit(DMZ_META_ERROR, &mblk->state)) {
856 clear_bit(DMZ_META_ERROR, &mblk->state);
900 struct dmz_mblock *mblk;
962 mblk = list_first_entry(&write_list, struct dmz_mblock, link);
963 list_del_init(&mblk->link);
966 clear_bit(DMZ_META_DIRTY, &mblk->state);
967 if (mblk->ref == 0)
968 list_add_tail(&mblk->link, &zmd->mblk_lru_list);
1122 sb->block, sb->mblk->page);
1133 struct dmz_mblock *mblk;
1138 mblk = dmz_alloc_mblock(zmd, 0);
1139 if (!mblk)
1142 zmd->sb[1].mblk = mblk;
1143 zmd->sb[1].sb = mblk->data;
1158 dmz_free_mblock(zmd, mblk);
1159 zmd->sb[1].mblk = NULL;
1171 struct dmz_mblock *mblk;
1175 mblk = dmz_alloc_mblock(zmd, 0);
1176 if (!mblk)
1179 sb->mblk = mblk;
1180 sb->sb = mblk->data;
1185 dmz_free_mblock(zmd, mblk);
1186 sb->mblk = NULL;
1227 if (!zmd->sb[dst_set].mblk) {
1228 zmd->sb[dst_set].mblk = dmz_alloc_mblock(zmd, 0);
1229 if (!zmd->sb[dst_set].mblk) {
1233 zmd->sb[dst_set].sb = zmd->sb[dst_set].mblk->data;
1355 dmz_free_mblock(zmd, sb->mblk);
1359 dmz_free_mblock(zmd, sb->mblk);
2498 struct dmz_mblock *mblk;
2509 mblk = dmz_get_bitmap(zmd, zone, chunk_block);
2510 if (IS_ERR(mblk))
2511 return PTR_ERR(mblk);
2517 count = dmz_set_bits((unsigned long *)mblk->data, bit, nr_bits);
2519 dmz_dirty_mblock(zmd, mblk);
2522 dmz_release_mblock(zmd, mblk);
2578 struct dmz_mblock *mblk;
2588 mblk = dmz_get_bitmap(zmd, zone, chunk_block);
2589 if (IS_ERR(mblk))
2590 return PTR_ERR(mblk);
2596 count = dmz_clear_bits((unsigned long *)mblk->data,
2599 dmz_dirty_mblock(zmd, mblk);
2602 dmz_release_mblock(zmd, mblk);
2625 struct dmz_mblock *mblk;
2631 mblk = dmz_get_bitmap(zmd, zone, chunk_block);
2632 if (IS_ERR(mblk))
2633 return PTR_ERR(mblk);
2637 (unsigned long *) mblk->data) != 0;
2639 dmz_release_mblock(zmd, mblk);
2652 struct dmz_mblock *mblk;
2662 mblk = dmz_get_bitmap(zmd, zone, chunk_block);
2663 if (IS_ERR(mblk))
2664 return PTR_ERR(mblk);
2667 bitmap = (unsigned long *) mblk->data;
2674 dmz_release_mblock(zmd, mblk);
2762 struct dmz_mblock *mblk;
2771 mblk = dmz_get_bitmap(zmd, zone, chunk_block);
2772 if (IS_ERR(mblk)) {
2778 bitmap = mblk->data;
2783 dmz_release_mblock(zmd, mblk);
2798 struct dmz_mblock *mblk, *next;
2811 if (zmd->sb[i].mblk) {
2812 dmz_free_mblock(zmd, zmd->sb[i].mblk);
2813 zmd->sb[i].mblk = NULL;
2819 mblk = list_first_entry(&zmd->mblk_dirty_list,
2822 (u64)mblk->no, mblk->ref);
2823 list_del_init(&mblk->link);
2824 rb_erase(&mblk->node, &zmd->mblk_rbtree);
2825 dmz_free_mblock(zmd, mblk);
2829 mblk = list_first_entry(&zmd->mblk_lru_list,
2831 list_del_init(&mblk->link);
2832 rb_erase(&mblk->node, &zmd->mblk_rbtree);
2833 dmz_free_mblock(zmd, mblk);
2838 rbtree_postorder_for_each_entry_safe(mblk, next, root, node) {
2840 (u64)mblk->no, mblk->ref);
2841 mblk->ref = 0;
2842 dmz_free_mblock(zmd, mblk);