Lines Matching refs:mp

32 #define metapage_locked(mp) test_bit(META_locked, &(mp)->flag)
33 #define trylock_metapage(mp) test_and_set_bit_lock(META_locked, &(mp)->flag)
35 static inline void unlock_metapage(struct metapage *mp)
37 clear_bit_unlock(META_locked, &mp->flag);
38 wake_up(&mp->wait);
41 static inline void __lock_metapage(struct metapage *mp)
45 add_wait_queue_exclusive(&mp->wait, &wait);
48 if (metapage_locked(mp)) {
49 unlock_page(mp->page);
51 lock_page(mp->page);
53 } while (trylock_metapage(mp));
55 remove_wait_queue(&mp->wait, &wait);
59 * Must have mp->page locked
61 static inline void lock_metapage(struct metapage *mp)
63 if (trylock_metapage(mp))
64 __lock_metapage(mp);
78 struct metapage *mp[MPS_PER_PAGE];
86 return mp_anchor(page)->mp[offset >> L2PSIZE];
89 static inline int insert_metapage(struct page *page, struct metapage *mp)
106 if (mp) {
108 index = (mp->index >> l2mp_blocks) & (MPS_PER_PAGE - 1);
110 a->mp[index] = mp;
116 static inline void remove_metapage(struct page *page, struct metapage *mp)
122 index = (mp->index >> l2mp_blocks) & (MPS_PER_PAGE - 1);
124 BUG_ON(a->mp[index] != mp);
126 a->mp[index] = NULL;
152 static inline int insert_metapage(struct page *page, struct metapage *mp)
154 if (mp) {
155 set_page_private(page, (unsigned long)mp);
162 static inline void remove_metapage(struct page *page, struct metapage *mp)
176 struct metapage *mp = mempool_alloc(metapage_mempool, gfp_mask);
178 if (mp) {
179 mp->lid = 0;
180 mp->lsn = 0;
181 mp->data = NULL;
182 mp->clsn = 0;
183 mp->log = NULL;
184 init_waitqueue_head(&mp->wait);
186 return mp;
189 static inline void free_metapage(struct metapage *mp)
191 mempool_free(mp, metapage_mempool);
221 static inline void drop_metapage(struct page *page, struct metapage *mp)
223 if (mp->count || mp->nohomeok || test_bit(META_dirty, &mp->flag) ||
224 test_bit(META_io, &mp->flag))
226 remove_metapage(page, mp);
228 free_metapage(mp);
280 static void remove_from_logsync(struct metapage *mp)
282 struct jfs_log *log = mp->log;
292 if (mp->lsn) {
293 mp->log = NULL;
294 mp->lsn = 0;
295 mp->clsn = 0;
297 list_del(&mp->synclist);
304 struct metapage *mp;
308 mp = page_to_mp(page, offset);
309 if (mp && test_bit(META_io, &mp->flag)) {
310 if (mp->lsn)
311 remove_from_logsync(mp);
312 clear_bit(META_io, &mp->flag);
339 int block_offset; /* block offset of mp within page */
344 struct metapage *mp;
363 mp = page_to_mp(page, offset);
365 if (!mp || !test_bit(META_dirty, &mp->flag))
368 if (mp->nohomeok && !test_bit(META_forcewrite, &mp->flag)) {
374 if (mp->log && !(mp->log->cflag & logGC_PAGEOUT))
375 jfs_flush_journal(mp->log, 0);
379 clear_bit(META_dirty, &mp->flag);
380 set_bit(META_io, &mp->flag);
529 struct metapage *mp;
534 mp = page_to_mp(&folio->page, offset);
536 if (!mp)
539 jfs_info("metapage_release_folio: mp = 0x%p", mp);
540 if (mp->count || mp->nohomeok ||
541 test_bit(META_dirty, &mp->flag)) {
542 jfs_info("count = %ld, nohomeok = %d", mp->count,
543 mp->nohomeok);
547 if (mp->lsn)
548 remove_from_logsync(mp);
549 remove_metapage(&folio->page, mp);
551 free_metapage(mp);
581 struct metapage *mp = NULL;
628 mp = page_to_mp(page, page_offset);
629 if (mp) {
630 if (mp->logical_size != size) {
634 mp->logical_size, size);
638 mp->count++;
639 lock_metapage(mp);
640 if (test_bit(META_discard, &mp->flag)) {
644 discard_metapage(mp);
647 clear_bit(META_discard, &mp->flag);
651 mp = alloc_metapage(GFP_NOFS);
652 if (!mp)
654 mp->page = page;
655 mp->sb = inode->i_sb;
656 mp->flag = 0;
657 mp->xflag = COMMIT_PAGE;
658 mp->count = 1;
659 mp->nohomeok = 0;
660 mp->logical_size = size;
661 mp->data = page_address(page) + page_offset;
662 mp->index = lblock;
663 if (unlikely(insert_metapage(page, mp))) {
664 free_metapage(mp);
667 lock_metapage(mp);
671 jfs_info("zeroing mp = 0x%p", mp);
672 memset(mp->data, 0, PSIZE);
676 jfs_info("__get_metapage: returning = 0x%p data = 0x%p", mp, mp->data);
677 return mp;
684 void grab_metapage(struct metapage * mp)
686 jfs_info("grab_metapage: mp = 0x%p", mp);
687 get_page(mp->page);
688 lock_page(mp->page);
689 mp->count++;
690 lock_metapage(mp);
691 unlock_page(mp->page);
723 void force_metapage(struct metapage *mp)
725 struct page *page = mp->page;
726 jfs_info("force_metapage: mp = 0x%p", mp);
727 set_bit(META_forcewrite, &mp->flag);
728 clear_bit(META_sync, &mp->flag);
733 jfs_error(mp->sb, "metapage_write_one() failed\n");
734 clear_bit(META_forcewrite, &mp->flag);
738 void hold_metapage(struct metapage *mp)
740 lock_page(mp->page);
743 void put_metapage(struct metapage *mp)
745 if (mp->count || mp->nohomeok) {
747 unlock_page(mp->page);
750 get_page(mp->page);
751 mp->count++;
752 lock_metapage(mp);
753 unlock_page(mp->page);
754 release_metapage(mp);
757 void release_metapage(struct metapage * mp)
759 struct page *page = mp->page;
760 jfs_info("release_metapage: mp = 0x%p, flag = 0x%lx", mp, mp->flag);
765 unlock_metapage(mp);
767 assert(mp->count);
768 if (--mp->count || mp->nohomeok) {
774 if (test_bit(META_dirty, &mp->flag)) {
776 if (test_bit(META_sync, &mp->flag)) {
777 clear_bit(META_sync, &mp->flag);
779 jfs_error(mp->sb, "metapage_write_one() failed\n");
782 } else if (mp->lsn) /* discard_metapage doesn't remove it */
783 remove_from_logsync(mp);
786 drop_metapage(page, mp);
800 struct metapage *mp;
814 mp = page_to_mp(page, offset);
815 if (!mp)
817 if (mp->index < addr)
819 if (mp->index >= addr + len)
822 clear_bit(META_dirty, &mp->flag);
823 set_bit(META_discard, &mp->flag);
824 if (mp->lsn)
825 remove_from_logsync(mp);