Lines Matching defs:page

30  * The page->private field is used to reference a struct
31 * ceph_snap_context for _every_ dirty page. This indicates which
32 * snapshot the page was logically dirtied in, and thus which snap
37 * i_wrbuffer_ref == i_wrbuffer_ref_head == the dirty page count.
57 * Invalidate and so forth must take care to ensure the dirty page
69 static inline struct ceph_snap_context *page_snap_context(struct page *page)
71 if (PagePrivate(page))
72 return (void *)page->private;
77 * Dirty a page. Optimistically adjust accounting, on the assumption
148 dout("%p invalidate_folio idx %lu partial dirty page %zu~%zu\n",
155 dout("%p invalidate_folio idx %lu full dirty page\n",
393 * instead of page arrays, and we don't have that as of yet. Once the
398 struct page **pages;
408 /* should always give us a page-aligned read */
517 static void ceph_set_page_fscache(struct page *page)
519 set_page_fscache(page);
539 static inline void ceph_set_page_fscache(struct page *page)
619 struct page *page, u64 start)
627 snapc = page_snap_context(ceph_fscrypt_pagecache_page(page));
642 if (end > ceph_fscrypt_page_offset(page) + thp_size(page))
643 end = ceph_fscrypt_page_offset(page) + thp_size(page);
645 if (ret && fscrypt_is_bounce_page(page))
651 * Write a single page, but leave the page locked.
654 * dirty page accounting (i.e., page is no longer dirty).
656 static int writepage_nounlock(struct page *page, struct writeback_control *wbc)
658 struct folio *folio = page_folio(page);
659 struct inode *inode = page->mapping->host;
663 loff_t page_off = page_offset(page);
665 loff_t len = thp_size(page);
671 struct page *bounce_page = NULL;
673 dout("writepage %p idx %lu\n", page, page->index);
679 snapc = page_snap_context(page);
681 dout("writepage %p page %p not dirty?\n", inode, page);
686 dout("writepage %p page %p snapc %p not writeable - noop\n",
687 inode, page, snapc);
691 redirty_page_for_writepage(wbc, page);
696 /* is this a partial page at end of file? */
708 dout("writepage %p page %p index %lu on %llu~%llu snapc %p seq %lld\n",
709 inode, page, page->index, page_off, wlen, snapc, snapc->seq);
721 redirty_page_for_writepage(wbc, page);
728 set_page_writeback(page);
730 ceph_set_page_fscache(page);
734 bounce_page = fscrypt_encrypt_pagecache_blocks(page,
738 redirty_page_for_writepage(wbc, page);
739 end_page_writeback(page);
746 WARN_ON_ONCE(len > thp_size(page));
748 bounce_page ? &bounce_page : &page, wlen, 0,
770 dout("writepage interrupted page %p\n", page);
771 redirty_page_for_writepage(wbc, page);
772 end_page_writeback(page);
777 dout("writepage setting page/mapping error %d %p\n",
778 err, page);
782 dout("writepage cleaned page %p\n", page);
785 oldest = detach_page_private(page);
787 end_page_writeback(page);
789 ceph_put_snap_context(snapc); /* page's reference */
798 static int ceph_writepage(struct page *page, struct writeback_control *wbc)
801 struct inode *inode = page->mapping->host;
809 wait_on_page_fscache(page);
811 err = writepage_nounlock(page, wbc);
814 * to prevent caller from setting mapping/page error */
817 unlock_page(page);
826 * page error bits.
833 struct page *page;
854 * We lost the cache cap, need to truncate the page before
856 * page truncation thread, possibly losing some data that
877 page = osd_data->pages[j];
878 if (fscrypt_is_bounce_page(page)) {
879 page = fscrypt_pagecache_page(page);
881 osd_data->pages[j] = page;
883 BUG_ON(!page);
884 WARN_ON(!PageUptodate(page));
891 ceph_put_snap_context(detach_page_private(page));
892 end_page_writeback(page);
893 dout("unlocking %p\n", page);
897 page);
899 unlock_page(page);
1020 struct page **pages = NULL, **data_pages;
1021 struct page *page;
1035 page = &fbatch.folios[i]->page;
1036 dout("? %p idx %lu\n", page, page->index);
1038 lock_page(page); /* first page */
1039 else if (!trylock_page(page))
1043 if (unlikely(!PageDirty(page)) ||
1044 unlikely(page->mapping != mapping)) {
1045 dout("!dirty or !mapping %p\n", page);
1046 unlock_page(page);
1050 pgsnapc = page_snap_context(page);
1052 dout("page snapc %p %lld != oldest %p %lld\n",
1058 unlock_page(page);
1061 if (page_offset(page) >= ceph_wbc.i_size) {
1062 struct folio *folio = page_folio(page);
1074 if (strip_unit_end && (page->index > strip_unit_end)) {
1075 dout("end of strip unit %p\n", page);
1076 unlock_page(page);
1079 if (PageWriteback(page) || PageFsCache(page)) {
1081 dout("%p under writeback\n", page);
1082 unlock_page(page);
1085 dout("waiting on writeback %p\n", page);
1086 wait_on_page_writeback(page);
1087 wait_on_page_fscache(page);
1090 if (!clear_page_dirty_for_io(page)) {
1091 dout("%p !clear_page_dirty_for_io\n", page);
1092 unlock_page(page);
1098 * the first locked page this time through,
1100 * allocate a page array
1108 offset = (u64)page_offset(page);
1116 strip_unit_end = page->index +
1131 } else if (page->index !=
1135 redirty_page_for_writepage(wbc, page);
1136 unlock_page(page);
1141 offset = (u64)page_offset(page);
1145 /* note position of first page in fbatch */
1146 dout("%p will write page %p idx %lu\n",
1147 inode, page, page->index);
1156 fscrypt_encrypt_pagecache_blocks(page,
1163 /* better not fail on first page! */
1166 redirty_page_for_writepage(wbc, page);
1167 unlock_page(page);
1172 pages[locked_pages++] = page;
1176 len += thp_size(page);
1184 /* shift unused page to beginning of fbatch */
1239 struct page *page = ceph_fscrypt_pagecache_page(pages[i]);
1241 u64 cur_offset = page_offset(page);
1243 * Discontinuity in page range? Ceph can handle that by just passing
1270 set_page_writeback(page);
1272 ceph_set_page_fscache(page);
1273 len += thp_size(page);
1283 u64 min_len = len + 1 - thp_size(page);
1363 struct page *page;
1372 page = &fbatch.folios[i]->page;
1373 if (page_snap_context(page) != snapc)
1375 wait_on_page_writeback(page);
1414 * @page: page being dirtied
1416 * We are only allowed to write into/dirty a page if the page is
1421 * Must be called with page lock held.
1424 ceph_find_incompatible(struct page *page)
1426 struct inode *inode = page->mapping->host;
1430 dout(" page %p %llx:%llx is shutdown\n", page,
1438 wait_on_page_writeback(page);
1440 snapc = page_snap_context(page);
1445 * this page is already dirty in another (older) snap
1452 dout(" page %p snapc %p not current or oldest\n", page, snapc);
1457 /* yay, writeable, do it now (without dropping page lock) */
1458 dout(" page %p snapc %p not current, but oldest\n", page, snapc);
1459 if (clear_page_dirty_for_io(page)) {
1460 int r = writepage_nounlock(page, NULL);
1495 * We are only allowed to write into/dirty the page if the page is
1500 struct page **pagep, void **fsdata)
1513 *pagep = &folio->page;
1519 * except adjust dirty page accounting
1523 struct page *subpage, void *fsdata)
1638 struct page *page;
1641 page = find_or_create_page(mapping, 0,
1643 if (!page) {
1647 err = __ceph_do_getattr(inode, page,
1650 unlock_page(page);
1651 put_page(page);
1656 zero_user_segment(page, err, PAGE_SIZE);
1658 flush_dcache_page(page);
1659 SetPageUptodate(page);
1660 vmf->page = page;
1682 struct page *page = vmf->page;
1683 loff_t off = page_offset(page);
1700 if (off + thp_size(page) <= size)
1701 len = thp_size(page);
1703 len = offset_in_thp(page, size);
1720 /* Update time before taking page lock */
1727 lock_page(page);
1729 if (page_mkwrite_check_truncate(page, inode) < 0) {
1730 unlock_page(page);
1735 snapc = ceph_find_incompatible(page);
1737 /* success. we'll keep the page locked. */
1738 set_page_dirty(page);
1743 unlock_page(page);
1778 void ceph_fill_inline_data(struct inode *inode, struct page *locked_page,
1782 struct page *page;
1785 page = locked_page;
1789 page = find_or_create_page(mapping, 0,
1792 if (!page)
1794 if (PageUptodate(page)) {
1795 unlock_page(page);
1796 put_page(page);
1805 void *kaddr = kmap_atomic(page);
1810 if (page != locked_page) {
1812 zero_user_segment(page, len, PAGE_SIZE);
1814 flush_dcache_page(page);
1816 SetPageUptodate(page);
1817 unlock_page(page);
1818 put_page(page);
1831 struct page *pages[1];
1985 struct page **pages;
2084 /* one page should be large enough for STAT data */